summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
commit863981e96738983919de841ec669e157e6bdaeb0 (patch)
treed6d89a12e7eb8017837c057935a2271290907f76 /drivers
parent8dec7c70575785729a6a9e6719a955e9c545bcab (diff)
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_amba.c3
-rw-r--r--drivers/acpi/acpi_apd.c3
-rw-r--r--drivers/acpi/acpi_dbg.c22
-rw-r--r--drivers/acpi/acpi_processor.c9
-rw-r--r--drivers/acpi/acpi_video.c88
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acdebug.h10
-rw-r--r--drivers/acpi/acpica/acevents.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h11
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h62
-rw-r--r--drivers/acpi/acpica/acmacros.h21
-rw-r--r--drivers/acpi/acpica/acnamesp.h5
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h14
-rw-r--r--drivers/acpi/acpica/acresrc.h12
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h58
-rw-r--r--drivers/acpi/acpica/dbcmds.c4
-rw-r--r--drivers/acpi/acpica/dbconvert.c8
-rw-r--r--drivers/acpi/acpica/dbexec.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c19
-rw-r--r--drivers/acpi/acpica/dbnames.c4
-rw-r--r--drivers/acpi/acpica/dbutils.c9
-rw-r--r--drivers/acpi/acpica/dbxface.c4
-rw-r--r--drivers/acpi/acpica/dscontrol.c4
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswload.c4
-rw-r--r--drivers/acpi/acpica/dswload2.c4
-rw-r--r--drivers/acpi/acpica/dswstate.c10
-rw-r--r--drivers/acpi/acpica/evgpe.c4
-rw-r--r--drivers/acpi/acpica/evgpeblk.c4
-rw-r--r--drivers/acpi/acpica/evgpeutil.c4
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c3
-rw-r--r--drivers/acpi/acpica/evregion.c74
-rw-r--r--drivers/acpi/acpica/evrgnini.c3
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c439
-rw-r--r--drivers/acpi/acpica/exconfig.c4
-rw-r--r--drivers/acpi/acpica/exconvrt.c8
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdump.c15
-rw-r--r--drivers/acpi/acpica/exfield.c4
-rw-r--r--drivers/acpi/acpica/exfldio.c14
-rw-r--r--drivers/acpi/acpica/exmisc.c290
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c8
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exregion.c6
-rw-r--r--drivers/acpi/acpica/exresnte.c4
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c4
-rw-r--r--drivers/acpi/acpica/exstorob.c4
-rw-r--r--drivers/acpi/acpica/exutils.c12
-rw-r--r--drivers/acpi/acpica/hwgpe.c6
-rw-r--r--drivers/acpi/acpica/hwregs.c140
-rw-r--r--drivers/acpi/acpica/hwxface.c11
-rw-r--r--drivers/acpi/acpica/nsaccess.c7
-rw-r--r--drivers/acpi/acpica/nsconvert.c9
-rw-r--r--drivers/acpi/acpica/nsdump.c9
-rw-r--r--drivers/acpi/acpica/nsinit.c76
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c4
-rw-r--r--drivers/acpi/acpica/nsprepkg.c92
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c6
-rw-r--r--drivers/acpi/acpica/nsutils.c8
-rw-r--r--drivers/acpi/acpica/nsxfeval.c113
-rw-r--r--drivers/acpi/acpica/nsxfname.c6
-rw-r--r--drivers/acpi/acpica/nsxfobj.c6
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c4
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c90
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c50
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c9
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c21
-rw-r--r--drivers/acpi/acpica/rsutils.c14
-rw-r--r--drivers/acpi/acpica/rsxface.c6
-rw-r--r--drivers/acpi/acpica/tbdata.c15
-rw-r--r--drivers/acpi/acpica/tbfadt.c28
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c6
-rw-r--r--drivers/acpi/acpica/tbutils.c33
-rw-r--r--drivers/acpi/acpica/tbxface.c6
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c8
-rw-r--r--drivers/acpi/acpica/utalloc.c5
-rw-r--r--drivers/acpi/acpica/utascii.c140
-rw-r--r--drivers/acpi/acpica/utbuffer.c24
-rw-r--r--drivers/acpi/acpica/utcache.c7
-rw-r--r--drivers/acpi/acpica/utcopy.c16
-rw-r--r--drivers/acpi/acpica/utdebug.c47
-rw-r--r--drivers/acpi/acpica/utdecode.c30
-rw-r--r--drivers/acpi/acpica/uteval.c4
-rw-r--r--drivers/acpi/acpica/utglobal.c48
-rw-r--r--drivers/acpi/acpica/utids.c8
-rw-r--r--drivers/acpi/acpica/utmath.c4
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utnonansi.c67
-rw-r--r--drivers/acpi/acpica/utobject.c18
-rw-r--r--drivers/acpi/acpica/utosi.c4
-rw-r--r--drivers/acpi/acpica/utownerid.c6
-rw-r--r--drivers/acpi/acpica/utprint.c19
-rw-r--r--drivers/acpi/acpica/utstring.c71
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utxface.c4
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c196
-rw-r--r--drivers/acpi/bus.c41
-rw-r--r--drivers/acpi/device_sysfs.c44
-rw-r--r--drivers/acpi/ec.c279
-rw-r--r--drivers/acpi/evged.c154
-rw-r--r--drivers/acpi/internal.h5
-rw-r--r--drivers/acpi/nfit.c283
-rw-r--r--drivers/acpi/nfit.h31
-rw-r--r--drivers/acpi/numa.c16
-rw-r--r--drivers/acpi/osi.c522
-rw-r--r--drivers/acpi/osl.c511
-rw-r--r--drivers/acpi/pci_link.c124
-rw-r--r--drivers/acpi/processor_throttling.c9
-rw-r--r--drivers/acpi/sleep.c7
-rw-r--r--drivers/acpi/sysfs.c7
-rw-r--r--drivers/acpi/tables.c316
-rw-r--r--drivers/acpi/utils.c10
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/amba/bus.c100
-rw-r--r--drivers/ata/Kconfig13
-rw-r--r--drivers/ata/ahci_seattle.c2
-rw-r--r--drivers/ata/libahci.c4
-rw-r--r--drivers/ata/libata-core.c243
-rw-r--r--drivers/ata/libata-eh.c113
-rw-r--r--drivers/ata/libata-scsi.c761
-rw-r--r--drivers/ata/libata-trace.c72
-rw-r--r--drivers/ata/libata.h8
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c552
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/firestream.c6
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/devcoredump.c83
-rw-r--r--drivers/base/isa.c2
-rw-r--r--drivers/base/platform.c19
-rw-r--r--drivers/base/power/clock_ops.c2
-rw-r--r--drivers/base/power/domain.c145
-rw-r--r--drivers/base/power/domain_governor.c20
-rw-r--r--drivers/base/power/main.c18
-rw-r--r--drivers/base/power/opp/Makefile1
-rw-r--r--drivers/base/power/opp/core.c440
-rw-r--r--drivers/base/power/opp/cpu.c201
-rw-r--r--drivers/base/power/opp/of.c597
-rw-r--r--drivers/base/power/opp/opp.h22
-rw-r--r--drivers/base/property.c34
-rw-r--r--drivers/base/regmap/regcache-flat.c2
-rw-r--r--drivers/base/regmap/regcache.c2
-rw-r--r--drivers/bcma/bcma_private.h2
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c1
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_nl.c28
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c12
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/rbd.c305
-rw-r--r--drivers/block/skd_main.c61
-rw-r--r--drivers/block/virtio_blk.c6
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c129
-rw-r--r--drivers/block/zram/zcomp.c300
-rw-r--r--drivers/block/zram/zcomp.h14
-rw-r--r--drivers/block/zram/zram_drv.c104
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/ath3k.c8
-rw-r--r--drivers/bluetooth/btmrvl_drv.h11
-rw-r--r--drivers/bluetooth/btmrvl_main.c35
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c79
-rw-r--r--drivers/bluetooth/btmrvl_sdio.h6
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/bluetooth/hci_bcm.c1
-rw-r--r--drivers/bluetooth/hci_bcsp.c57
-rw-r--r--drivers/bluetooth/hci_intel.c6
-rw-r--r--drivers/bluetooth/hci_ldisc.c11
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bus/Kconfig5
-rw-r--r--drivers/bus/arm-ccn.c7
-rw-r--r--drivers/bus/brcmstb_gisb.c30
-rw-r--r--drivers/bus/mips_cdmm.c12
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/hw_random/Kconfig29
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/exynos-rng.c23
-rw-r--r--drivers/char/hw_random/hisi-rng.c126
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c147
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c65
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c33
-rw-r--r--drivers/char/random.c34
-rw-r--r--drivers/char/xillybus/xillybus_of.c11
-rw-r--r--drivers/char/xillybus/xillybus_pcie.c10
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile6
-rw-r--r--drivers/clk/at91/clk-programmable.c2
-rw-r--r--drivers/clk/axis/Makefile1
-rw-r--r--drivers/clk/axis/clk-artpec6.c242
-rw-r--r--drivers/clk/axs10x/Makefile1
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c228
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c1155
-rw-r--r--drivers/clk/bcm/clk-kona-setup.c3
-rw-r--r--drivers/clk/clk-clps711x.c19
-rw-r--r--drivers/clk/clk-composite.c93
-rw-r--r--drivers/clk/clk-divider.c91
-rw-r--r--drivers/clk/clk-fixed-factor.c42
-rw-r--r--drivers/clk/clk-fixed-rate.c44
-rw-r--r--drivers/clk/clk-fractional-divider.c40
-rw-r--r--drivers/clk/clk-gate.c43
-rw-r--r--drivers/clk/clk-gpio.c52
-rw-r--r--drivers/clk/clk-ls1x.c3
-rw-r--r--drivers/clk/clk-mux.c57
-rw-r--r--drivers/clk/clk-nspire.c3
-rw-r--r--drivers/clk/clk-oxnas.c195
-rw-r--r--drivers/clk/clk-palmas.c4
-rw-r--r--drivers/clk/clk-pwm.c17
-rw-r--r--drivers/clk/clk-qoriq.c5
-rw-r--r--drivers/clk/clk-rk808.c1
-rw-r--r--drivers/clk/clk-tango4.c73
-rw-r--r--drivers/clk/clk-twl6040.c1
-rw-r--r--drivers/clk/clk-wm831x.c1
-rw-r--r--drivers/clk/clk-xgene.c2
-rw-r--r--drivers/clk/clk.c222
-rw-r--r--drivers/clk/clkdev.c75
-rw-r--r--drivers/clk/hisilicon/Kconfig15
-rw-r--r--drivers/clk/hisilicon/Makefile2
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c131
-rw-r--r--drivers/clk/hisilicon/clk.c23
-rw-r--r--drivers/clk/hisilicon/clk.h14
-rw-r--r--drivers/clk/hisilicon/reset.c134
-rw-r--r--drivers/clk/hisilicon/reset.h36
-rw-r--r--drivers/clk/imx/clk-gate2.c7
-rw-r--r--drivers/clk/imx/clk-imx6sx.c10
-rw-r--r--drivers/clk/imx/clk-imx7d.c5
-rw-r--r--drivers/clk/imx/clk-pllv3.c9
-rw-r--r--drivers/clk/imx/clk-vf610.c60
-rw-r--r--drivers/clk/imx/clk.h13
-rw-r--r--drivers/clk/ingenic/cgu.c11
-rw-r--r--drivers/clk/ingenic/cgu.h6
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c24
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c40
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c12
-rw-r--r--drivers/clk/mediatek/clk-mtk.h15
-rw-r--r--drivers/clk/meson/meson8b-clkc.c6
-rw-r--r--drivers/clk/microchip/Makefile2
-rw-r--r--drivers/clk/microchip/clk-core.c1031
-rw-r--r--drivers/clk/microchip/clk-core.h84
-rw-r--r--drivers/clk/microchip/clk-pic32mzda.c275
-rw-r--r--drivers/clk/mmp/clk-mmp2.c14
-rw-r--r--drivers/clk/mmp/clk-of-mmp2.c10
-rw-r--r--drivers/clk/mmp/clk-of-pxa168.c8
-rw-r--r--drivers/clk/mmp/clk-of-pxa1928.c12
-rw-r--r--drivers/clk/mmp/clk-of-pxa910.c8
-rw-r--r--drivers/clk/mmp/clk-pxa168.c8
-rw-r--r--drivers/clk/mmp/clk-pxa910.c8
-rw-r--r--drivers/clk/mvebu/Kconfig6
-rw-r--r--drivers/clk/mvebu/Makefile2
-rw-r--r--drivers/clk/mvebu/ap806-system-controller.c168
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c406
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-creg.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c32
-rw-r--r--drivers/clk/renesas/Kconfig16
-rw-r--r--drivers/clk/renesas/Makefile26
-rw-r--r--drivers/clk/renesas/clk-mstp.c10
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c47
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c49
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h6
-rw-r--r--drivers/clk/rockchip/Makefile1
-rw-r--r--drivers/clk/rockchip/clk-cpu.c33
-rw-r--r--drivers/clk/rockchip/clk-mmc-phase.c15
-rw-r--r--drivers/clk/rockchip/clk-pll.c336
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c21
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c51
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c21
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c23
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c28
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c1577
-rw-r--r--drivers/clk/rockchip/clk.c151
-rw-r--r--drivers/clk/rockchip/clk.h104
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c15
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c77
-rw-r--r--drivers/clk/sirf/clk-atlas6.c7
-rw-r--r--drivers/clk/sirf/clk-prima2.c7
-rw-r--r--drivers/clk/sunxi/Makefile3
-rw-r--r--drivers/clk/sunxi/clk-a10-hosc.c3
-rw-r--r--drivers/clk/sunxi/clk-a10-mod1.c2
-rw-r--r--drivers/clk/sunxi/clk-sun4i-display.c264
-rw-r--r--drivers/clk/sunxi/clk-sun4i-pll3.c98
-rw-r--r--drivers/clk/sunxi/clk-sun4i-tcon-ch1.c296
-rw-r--r--drivers/clk/sunxi/clk-sun9i-mmc.c2
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c79
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-dfll.c11
-rw-r--r--drivers/clk/tegra/clk-dfll.h22
-rw-r--r--drivers/clk/tegra/clk-id.h2
-rw-r--r--drivers/clk/tegra/clk-periph-fixed.c120
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c2
-rw-r--r--drivers/clk/tegra/clk-periph.c2
-rw-r--r--drivers/clk/tegra/clk-pll.c46
-rw-r--r--drivers/clk/tegra/clk-tegra-fixed.c1
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c5
-rw-r--r--drivers/clk/tegra/clk-tegra114.c6
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c103
-rw-r--r--drivers/clk/tegra/clk-tegra124.c4
-rw-r--r--drivers/clk/tegra/clk-tegra20.c2
-rw-r--r--drivers/clk/tegra/clk-tegra210.c89
-rw-r--r--drivers/clk/tegra/clk-tegra30.c12
-rw-r--r--drivers/clk/tegra/clk.c4
-rw-r--r--drivers/clk/tegra/clk.h27
-rw-r--r--drivers/clk/tegra/cvb.c71
-rw-r--r--drivers/clk/tegra/cvb.h15
-rw-r--r--drivers/clk/ti/clk-54xx.c1
-rw-r--r--drivers/clk/ti/clk-7xx.c3
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c2
-rw-r--r--drivers/clk/ti/clkt_dflt.c2
-rw-r--r--drivers/clk/ti/clkt_dpll.c3
-rw-r--r--drivers/clk/ti/dpll.c5
-rw-r--r--drivers/clk/zte/clk-zx296702.c3
-rw-r--r--drivers/clocksource/Kconfig16
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/arm_arch_timer.c11
-rw-r--r--drivers/clocksource/dw_apb_timer.c1
-rw-r--r--drivers/clocksource/mps2-timer.c275
-rw-r--r--drivers/clocksource/mtk_timer.c2
-rw-r--r--drivers/clocksource/tegra20_timer.c14
-rw-r--r--drivers/clocksource/timer-nps.c98
-rw-r--r--drivers/connector/cn_proc.c43
-rw-r--r--drivers/cpufreq/Kconfig45
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/Makefile6
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c129
-rw-r--r--drivers/cpufreq/arm_big_little.c54
-rw-r--r--drivers/cpufreq/arm_big_little.h4
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c21
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c21
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c95
-rw-r--r--drivers/cpufreq/cpufreq-dt.c22
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c28
-rw-r--r--drivers/cpufreq/cpufreq.c244
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c25
-rw-r--r--drivers/cpufreq/cpufreq_governor.c271
-rw-r--r--drivers/cpufreq/cpufreq_governor.h46
-rw-r--r--drivers/cpufreq/cpufreq_governor_attr_set.c84
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c29
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c43
-rw-r--r--drivers/cpufreq/e_powersaver.c76
-rw-r--r--drivers/cpufreq/elanfreq.c4
-rw-r--r--drivers/cpufreq/hisi-acpu-cpufreq.c42
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c10
-rw-r--r--drivers/cpufreq/intel_pstate.c299
-rw-r--r--drivers/cpufreq/longhaul.c86
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c (renamed from drivers/cpufreq/ls1x-cpufreq.c)114
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c7
-rw-r--r--drivers/cpufreq/maple-cpufreq.c11
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c25
-rw-r--r--drivers/cpufreq/mvebu-cpufreq.c107
-rw-r--r--drivers/cpufreq/omap-cpufreq.c9
-rw-r--r--drivers/cpufreq/p4-clockmod.c19
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c4
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c16
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c47
-rw-r--r--drivers/cpufreq/powernow-k6.c16
-rw-r--r--drivers/cpufreq/powernow-k7.c70
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c272
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.h2
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq_pmi.c15
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c18
-rw-r--r--drivers/cpufreq/qoriq-cpufreq.c9
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c15
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c6
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c4
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c59
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c10
-rw-r--r--drivers/cpufreq/sc520_freq.c10
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c47
-rw-r--r--drivers/cpufreq/speedstep-centrino.c6
-rw-r--r--drivers/cpufreq/speedstep-ich.c8
-rw-r--r--drivers/cpufreq/speedstep-lib.c11
-rw-r--r--drivers/cpufreq/speedstep-smi.c7
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c7
-rw-r--r--drivers/cpufreq/vexpress-spc-cpufreq.c4
-rw-r--r--drivers/cpuidle/cpuidle.c6
-rw-r--r--drivers/crypto/Kconfig27
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/amcc/Makefile1
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c7
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h1
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.c131
-rw-r--r--drivers/crypto/amcc/crypto4xx_trng.h34
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/crypto/ccp/Kconfig2
-rw-r--r--drivers/crypto/ccp/Makefile6
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c13
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/ccp-dev.h49
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c727
-rw-r--r--drivers/crypto/ccp/ccp-ops.c69
-rw-r--r--drivers/crypto/marvell/cesa.c10
-rw-r--r--drivers/crypto/marvell/hash.c3
-rw-r--r--drivers/crypto/marvell/tdma.c5
-rw-r--r--drivers/crypto/mxc-scc.c765
-rw-r--r--drivers/crypto/n2_core.c2
-rw-r--r--drivers/crypto/omap-aes.c62
-rw-r--r--drivers/crypto/omap-des.c165
-rw-r--r--drivers/crypto/omap-sham.c25
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/qat/qat_common/adf_admin.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg_strings.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h28
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c40
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c15
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_sriov.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c92
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c61
-rw-r--r--drivers/crypto/qat/qat_common/qat_asym_algs.c4
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c4
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c23
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c6
-rw-r--r--drivers/crypto/s5p-sss.c368
-rw-r--r--drivers/crypto/vmx/ppc-xlate.pl20
-rw-r--r--drivers/dax/Kconfig26
-rw-r--r--drivers/dax/Makefile4
-rw-r--r--drivers/dax/dax.c575
-rw-r--r--drivers/dax/dax.h24
-rw-r--r--drivers/dax/pmem.c158
-rw-r--r--drivers/devfreq/Kconfig36
-rw-r--r--drivers/devfreq/Makefile4
-rw-r--r--drivers/devfreq/devfreq-event.c5
-rw-r--r--drivers/devfreq/devfreq.c232
-rw-r--r--drivers/devfreq/event/Kconfig8
-rw-r--r--drivers/devfreq/event/Makefile2
-rw-r--r--drivers/devfreq/event/exynos-nocp.c301
-rw-r--r--drivers/devfreq/event/exynos-nocp.h78
-rw-r--r--drivers/devfreq/exynos-bus.c570
-rw-r--r--drivers/devfreq/exynos/Makefile3
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.c1055
-rw-r--r--drivers/devfreq/exynos/exynos4_bus.h110
-rw-r--r--drivers/devfreq/exynos/exynos5_bus.c431
-rw-r--r--drivers/devfreq/exynos/exynos_ppmu.c119
-rw-r--r--drivers/devfreq/exynos/exynos_ppmu.h86
-rw-r--r--drivers/devfreq/governor_passive.c205
-rw-r--r--drivers/dma-buf/Kconfig11
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/dma-buf.c7
-rw-r--r--drivers/dma-buf/reservation.c72
-rw-r--r--drivers/dma-buf/sync_file.c395
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c86
-rw-r--r--drivers/dma/at_xdmac.c82
-rw-r--r--drivers/dma/bcm2835-dma.c604
-rw-r--r--drivers/dma/dmaengine.c37
-rw-r--r--drivers/dma/dw/core.c495
-rw-r--r--drivers/dma/dw/pci.c5
-rw-r--r--drivers/dma/dw/platform.c40
-rw-r--r--drivers/dma/dw/regs.h56
-rw-r--r--drivers/dma/edma.c11
-rw-r--r--drivers/dma/fsldma.c3
-rw-r--r--drivers/dma/hsu/hsu.c8
-rw-r--r--drivers/dma/hsu/hsu.h4
-rw-r--r--drivers/dma/ioat/init.c17
-rw-r--r--drivers/dma/ioat/registers.h7
-rw-r--r--drivers/dma/mmp_pdma.c3
-rw-r--r--drivers/dma/mpc512x_dma.c174
-rw-r--r--drivers/dma/mv_xor.c108
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/of-dma.c12
-rw-r--r--drivers/dma/pxa_dma.c16
-rw-r--r--drivers/dma/qcom/Makefile2
-rw-r--r--drivers/dma/qcom/bam_dma.c38
-rw-r--r--drivers/dma/qcom/hidma.c52
-rw-r--r--drivers/dma/qcom/hidma.h40
-rw-r--r--drivers/dma/qcom/hidma_dbg.c217
-rw-r--r--drivers/dma/qcom/hidma_ll.c872
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c113
-rw-r--r--drivers/dma/sun4i-dma.c16
-rw-r--r--drivers/dma/sun6i-dma.c254
-rw-r--r--drivers/dma/tegra20-apb-dma.c16
-rw-r--r--drivers/dma/tegra210-adma.c840
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c1663
-rw-r--r--drivers/edac/Kconfig5
-rw-r--r--drivers/edac/altera_edac.c412
-rw-r--r--drivers/edac/altera_edac.h128
-rw-r--r--drivers/edac/amd64_edac.c131
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c3
-rw-r--r--drivers/edac/i7core_edac.c81
-rw-r--r--drivers/edac/ie31200_edac.c121
-rw-r--r--drivers/edac/mce_amd.c9
-rw-r--r--drivers/edac/sb_edac.c260
-rw-r--r--drivers/firewire/net.c2
-rw-r--r--drivers/firmware/broadcom/Kconfig11
-rw-r--r--drivers/firmware/broadcom/Makefile1
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c737
-rw-r--r--drivers/firmware/efi/Kconfig25
-rw-r--r--drivers/firmware/efi/Makefile5
-rw-r--r--drivers/firmware/efi/arm-init.c118
-rw-r--r--drivers/firmware/efi/arm-runtime.c45
-rw-r--r--drivers/firmware/efi/capsule-loader.c343
-rw-r--r--drivers/firmware/efi/capsule.c308
-rw-r--r--drivers/firmware/efi/efi.c48
-rw-r--r--drivers/firmware/efi/efibc.c113
-rw-r--r--drivers/firmware/efi/efivars.c5
-rw-r--r--drivers/firmware/efi/fake_mem.c43
-rw-r--r--drivers/firmware/efi/libstub/Makefile2
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c77
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c37
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c15
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c6
-rw-r--r--drivers/firmware/efi/libstub/fdt.c24
-rw-r--r--drivers/firmware/efi/libstub/gop.c354
-rw-r--r--drivers/firmware/efi/memattr.c182
-rw-r--r--drivers/firmware/efi/reboot.c12
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c60
-rw-r--r--drivers/firmware/efi/vars.c56
-rw-r--r--drivers/firmware/iscsi_ibft.c66
-rw-r--r--drivers/firmware/psci.c8
-rw-r--r--drivers/firmware/qemu_fw_cfg.c4
-rw-r--r--drivers/gpio/Kconfig62
-rw-r--r--drivers/gpio/Makefile6
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c110
-rw-r--r--drivers/gpio/gpio-104-idi-48.c87
-rw-r--r--drivers/gpio/gpio-104-idio-16.c85
-rw-r--r--drivers/gpio/gpio-74x164.c25
-rw-r--r--drivers/gpio/gpio-amdpt.c123
-rw-r--r--drivers/gpio/gpio-bcm-kona.c14
-rw-r--r--drivers/gpio/gpio-brcmstb.c1
-rw-r--r--drivers/gpio/gpio-dwapb.c78
-rw-r--r--drivers/gpio/gpio-f7188x.c52
-rw-r--r--drivers/gpio/gpio-it87.c10
-rw-r--r--drivers/gpio/gpio-loongson1.c102
-rw-r--r--drivers/gpio/gpio-lpc32xx.c48
-rw-r--r--drivers/gpio/gpio-mb86s7x.c10
-rw-r--r--drivers/gpio/gpio-mc9s08dz60.c12
-rw-r--r--drivers/gpio/gpio-mcp23s08.c111
-rw-r--r--drivers/gpio/gpio-menz127.c22
-rw-r--r--drivers/gpio/gpio-mmio.c (renamed from drivers/gpio/gpio-generic.c)2
-rw-r--r--drivers/gpio/gpio-moxart.c7
-rw-r--r--drivers/gpio/gpio-mvebu.c5
-rw-r--r--drivers/gpio/gpio-octeon.c26
-rw-r--r--drivers/gpio/gpio-omap.c42
-rw-r--r--drivers/gpio/gpio-palmas.c13
-rw-r--r--drivers/gpio/gpio-pca953x.c42
-rw-r--r--drivers/gpio/gpio-pl061.c26
-rw-r--r--drivers/gpio/gpio-rc5t583.c12
-rw-r--r--drivers/gpio/gpio-rcar.c20
-rw-r--r--drivers/gpio/gpio-sodaville.c28
-rw-r--r--drivers/gpio/gpio-sta2x11.c8
-rw-r--r--drivers/gpio/gpio-stmpe.c31
-rw-r--r--drivers/gpio/gpio-sx150x.c100
-rw-r--r--drivers/gpio/gpio-tc3589x.c69
-rw-r--r--drivers/gpio/gpio-tegra.c482
-rw-r--r--drivers/gpio/gpio-timberdale.c35
-rw-r--r--drivers/gpio/gpio-tpic2810.c35
-rw-r--r--drivers/gpio/gpio-tps65218.c45
-rw-r--r--drivers/gpio/gpio-tps6586x.c13
-rw-r--r--drivers/gpio/gpio-tps65910.c16
-rw-r--r--drivers/gpio/gpio-vx855.c23
-rw-r--r--drivers/gpio/gpio-wm831x.c25
-rw-r--r--drivers/gpio/gpio-wm8994.c25
-rw-r--r--drivers/gpio/gpio-ws16c48.c88
-rw-r--r--drivers/gpio/gpio-xgene-sb.c15
-rw-r--r--drivers/gpio/gpio-xgene.c30
-rw-r--r--drivers/gpio/gpio-xlp.c23
-rw-r--r--drivers/gpio/gpio-zevio.c21
-rw-r--r--drivers/gpio/gpio-zx.c14
-rw-r--r--drivers/gpio/gpiolib-of.c67
-rw-r--r--drivers/gpio/gpiolib.c173
-rw-r--r--drivers/gpio/gpiolib.h4
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/acp/acp_hw.c2
-rw-r--r--drivers/gpu/drm/amd/acp/include/acp_gfx_if.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c306
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c395
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c235
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c346
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1626
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c420
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c211
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h8
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h10075
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_enum.h6813
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h18687
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h735
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h84
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c282
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c171
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c52
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c430
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c5060
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h361
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c398
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h70
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c716
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c158
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h70
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c203
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c261
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h10088
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu74.h833
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h849
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h22
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c1007
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h68
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c9
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c121
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h42
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c10
-rw-r--r--drivers/gpu/drm/arc/Kconfig10
-rw-r--r--drivers/gpu/drm/arc/Makefile2
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h50
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c257
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c288
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c201
-rw-r--r--drivers/gpu/drm/arc/arcpgu_regs.h40
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c86
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c71
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h5
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c3
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c1
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c6
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_main.c4
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c2
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c158
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c155
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h15
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c251
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c95
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c17
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c11
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c6
-rw-r--r--drivers/gpu/drm/bridge/Kconfig12
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c1514
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.h719
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/analogix/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1430
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h281
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c1320
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h (renamed from drivers/gpu/drm/exynos/exynos_dp_reg.h)270
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c65
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c138
-rw-r--r--drivers/gpu/drm/drm_bufs.c91
-rw-r--r--drivers/gpu/drm/drm_cache.c6
-rw-r--r--drivers/gpu/drm/drm_crtc.c556
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c106
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h4
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c12
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c66
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c29
-rw-r--r--drivers/gpu/drm/drm_drv.c31
-rw-r--r--drivers/gpu/drm/drm_edid.c335
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c207
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c147
-rw-r--r--drivers/gpu/drm/drm_fops.c51
-rw-r--r--drivers/gpu/drm/drm_gem.c104
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c54
-rw-r--r--drivers/gpu/drm/drm_irq.c11
-rw-r--r--drivers/gpu/drm/drm_legacy.h2
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c61
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c167
-rw-r--r--drivers/gpu/drm/drm_vm.c16
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c17
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c1
-rw-r--r--drivers/gpu/drm/exynos/Kconfig3
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c92
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c312
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c1499
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h282
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_reg.c1263
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c69
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c84
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c106
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c99
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c801
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c69
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h9
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig2
-rw-r--r--drivers/gpu/drm/fsl-dcu/Makefile3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c12
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c126
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h6
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c38
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c111
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.h33
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c15
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c6
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/hisilicon/Kconfig5
-rw-r--r--drivers/gpu/drm/hisilicon/Makefile5
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig18
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Makefile6
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c858
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h103
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h230
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1057
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c343
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h31
-rw-r--r--drivers/gpu/drm/i915/Kconfig6
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug41
-rw-r--r--drivers/gpu/drm/i915/Makefile4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c211
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c458
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c524
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c208
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h521
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c781
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c340
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c53
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c179
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c576
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c118
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c84
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c210
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c165
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c859
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h288
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h52
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c36
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c18
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c397
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h861
-rw-r--r--drivers/gpu/drm/i915/intel_color.c553
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c40
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c173
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1385
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3413
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c589
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c1786
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h164
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h200
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c455
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h30
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c177
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c282
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c5
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c10
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h29
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c71
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c42
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c25
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c1353
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h26
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c67
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c169
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c69
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c80
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c65
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c616
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1900
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h134
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c519
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c20
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c61
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c58
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c672
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h845
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c22
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h7
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c78
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c5
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c40
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig16
-rw-r--r--drivers/gpu/drm/mediatek/Makefile14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c302
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c240
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c764
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi_regs.h228
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c582
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h32
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c353
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h41
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c225
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h150
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c567
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h60
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c165
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.h23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c269
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h59
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c240
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.h59
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c911
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c463
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c2
-rw-r--r--drivers/gpu/drm/msm/Kconfig7
-rw-r--r--drivers/gpu/drm/msm/Makefile5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c18
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c34
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c27
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c13
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c20
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c29
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h11
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c34
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c6
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c56
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c168
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.h26
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c406
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h64
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c5
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c163
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h46
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c140
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h19
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c143
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c123
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c5
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h (renamed from drivers/gpu/drm/nouveau/nouveau_drm.h)4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/engine.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/falcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c395
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c261
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c64
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c110
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c10
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c156
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c15
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h7
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c230
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c29
-rw-r--r--drivers/gpu/drm/radeon/cik.c252
-rw-r--r--drivers/gpu/drm/radeon/cikd.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c120
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c66
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h43
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c246
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c112
-rw-r--r--drivers/gpu/drm/radeon/radeon.h26
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c85
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c106
-rw-r--r--drivers/gpu/drm/radeon/si.c250
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c5
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c5
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c16
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig9
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c390
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c38
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c17
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c73
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c97
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c10
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c1
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c4
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig14
-rw-r--r--drivers/gpu/drm/sun4i/Makefile13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c364
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h165
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c128
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.h30
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c191
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.h21
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c351
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.h30
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.h19
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c161
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.h30
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c264
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.h18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c566
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h186
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c708
-rw-r--r--drivers/gpu/drm/tegra/dc.c9
-rw-r--r--drivers/gpu/drm/tegra/drm.c31
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c15
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/gem.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c86
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c8
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c142
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c92
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c520
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h5
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h10
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c421
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h191
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c2
-rw-r--r--drivers/hid/Kconfig10
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-asus.c52
-rw-r--r--drivers/hid/hid-core.c34
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-roccat.c5
-rw-r--r--drivers/hid/hid-thingm.c49
-rw-r--r--drivers/hid/hidraw.c18
-rw-r--r--drivers/hid/usbhid/hid-quirks.c5
-rw-r--r--drivers/hid/wacom_sys.c3
-rw-r--r--drivers/hid/wacom_wac.c13
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/hsi/controllers/Kconfig8
-rw-r--r--drivers/hsi/controllers/Makefile4
-rw-r--r--drivers/hsi/controllers/omap_ssi.h12
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c (renamed from drivers/hsi/controllers/omap_ssi.c)107
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c100
-rw-r--r--drivers/hv/channel_mgmt.c58
-rw-r--r--drivers/hv/connection.c1
-rw-r--r--drivers/hv/hv_balloon.c5
-rw-r--r--drivers/hv/hv_kvp.c31
-rw-r--r--drivers/hv/hyperv_vmbus.h23
-rw-r--r--drivers/hv/ring_buffer.c95
-rw-r--r--drivers/hv/vmbus_drv.c150
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c5
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/fam15h_power.c211
-rw-r--r--drivers/hwmon/it87.c2261
-rw-r--r--drivers/hwmon/lm75.c10
-rw-r--r--drivers/hwmon/lm90.c2
-rw-r--r--drivers/hwmon/max31722.c165
-rw-r--r--drivers/hwmon/ntc_thermistor.c12
-rw-r--r--drivers/hwmon/pwm-fan.c26
-rw-r--r--drivers/hwmon/sch5636.c2
-rw-r--r--drivers/hwmon/scpi-hwmon.c48
-rw-r--r--drivers/hwmon/tmp102.c8
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c2
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/hwtracing/coresight/Makefile13
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c107
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c2126
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c2402
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h222
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h30
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c920
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c604
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c326
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c604
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h140
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c1
-rw-r--r--drivers/hwtracing/coresight/coresight.c157
-rw-r--r--drivers/hwtracing/intel_th/core.c29
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h6
-rw-r--r--drivers/hwtracing/intel_th/msu.c118
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/hwtracing/intel_th/pti.c6
-rw-r--r--drivers/hwtracing/stm/core.c36
-rw-r--r--drivers/hwtracing/stm/dummy_stm.c14
-rw-r--r--drivers/hwtracing/stm/heartbeat.c14
-rw-r--r--drivers/hwtracing/stm/policy.c5
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c2
-rw-r--r--drivers/i2c/busses/Kconfig5
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-kona.c5
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-dln2.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c10
-rw-r--r--drivers/i2c/busses/i2c-i801.c151
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c2
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c5
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c4
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c5
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c45
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c5
-rw-r--r--drivers/i2c/busses/i2c-octeon.c1011
-rw-r--r--drivers/i2c/busses/i2c-omap.c12
-rw-r--r--drivers/i2c/busses/i2c-powermac.c4
-rw-r--r--drivers/i2c/busses/i2c-qup.c4
-rw-r--r--drivers/i2c/busses/i2c-rcar.c233
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c87
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c244
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c3
-rw-r--r--drivers/i2c/busses/i2c-sirf.c4
-rw-r--r--drivers/i2c/busses/i2c-st.c48
-rw-r--r--drivers/i2c/busses/i2c-tegra.c83
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/i2c/i2c-boardinfo.c4
-rw-r--r--drivers/i2c/i2c-core.c72
-rw-r--r--drivers/i2c/i2c-dev.c25
-rw-r--r--drivers/i2c/i2c-mux.c300
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c47
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c73
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c58
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c61
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c135
-rw-r--r--drivers/i2c/muxes/i2c-mux-reg.c72
-rw-r--r--drivers/ide/ide-disk.c6
-rw-r--r--drivers/idle/intel_idle.c137
-rw-r--r--drivers/iio/accel/Kconfig5
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c127
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c7
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c8
-rw-r--r--drivers/iio/accel/bmc150-accel.h1
-rw-r--r--drivers/iio/accel/kxcjk-1013.c25
-rw-r--r--drivers/iio/accel/mma7455_core.c5
-rw-r--r--drivers/iio/accel/mma8452.c188
-rw-r--r--drivers/iio/accel/mma9553.c1
-rw-r--r--drivers/iio/accel/mxc4005.c29
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_buffer.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c106
-rw-r--r--drivers/iio/accel/st_accel_i2c.c4
-rw-r--r--drivers/iio/accel/stk8312.c1
-rw-r--r--drivers/iio/accel/stk8ba50.c1
-rw-r--r--drivers/iio/adc/Kconfig16
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad799x.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c102
-rw-r--r--drivers/iio/adc/at91_adc.c8
-rw-r--r--drivers/iio/adc/ina2xx-adc.c43
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c231
-rw-r--r--drivers/iio/adc/mcp3422.c6
-rw-r--r--drivers/iio/adc/mxs-lradc.c37
-rw-r--r--drivers/iio/adc/rockchip_saradc.c19
-rw-r--r--drivers/iio/adc/ti-adc081c.c118
-rw-r--r--drivers/iio/adc/vf610_adc.c24
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c2
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c88
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c28
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c109
-rw-r--r--drivers/iio/dac/Kconfig39
-rw-r--r--drivers/iio/dac/Makefile4
-rw-r--r--drivers/iio/dac/ad5592r-base.c691
-rw-r--r--drivers/iio/dac/ad5592r-base.h76
-rw-r--r--drivers/iio/dac/ad5592r.c164
-rw-r--r--drivers/iio/dac/ad5593r.c131
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c210
-rw-r--r--drivers/iio/dac/stx104.c24
-rw-r--r--drivers/iio/frequency/ad9523.c19
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/bmg160_core.c137
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_buffer.c2
-rw-r--r--drivers/iio/gyro/st_gyro_core.c5
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c5
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c1
-rw-r--r--drivers/iio/humidity/Kconfig10
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/am2315.c301
-rw-r--r--drivers/iio/humidity/dht11.c40
-rw-r--r--drivers/iio/imu/Kconfig2
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c7
-rw-r--r--drivers/iio/imu/bmi160/Kconfig32
-rw-r--r--drivers/iio/imu/bmi160/Makefile6
-rw-r--r--drivers/iio/imu/bmi160/bmi160.h10
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c596
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c72
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c63
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig10
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c7
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c74
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c82
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h19
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c20
-rw-r--r--drivers/iio/imu/kmx61.c1
-rw-r--r--drivers/iio/industrialio-core.c123
-rw-r--r--drivers/iio/inkern.c86
-rw-r--r--drivers/iio/light/Kconfig32
-rw-r--r--drivers/iio/light/Makefile3
-rw-r--r--drivers/iio/light/apds9960.c13
-rw-r--r--drivers/iio/light/bh1780.c299
-rw-r--r--drivers/iio/light/max44000.c638
-rw-r--r--drivers/iio/light/stk3310.c1
-rw-r--r--drivers/iio/light/tsl2563.c3
-rw-r--r--drivers/iio/light/veml6070.c218
-rw-r--r--drivers/iio/magnetometer/Kconfig33
-rw-r--r--drivers/iio/magnetometer/Makefile3
-rw-r--r--drivers/iio/magnetometer/ak8975.c232
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c156
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.h11
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c77
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c68
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c2
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c2
-rw-r--r--drivers/iio/potentiometer/Kconfig28
-rw-r--r--drivers/iio/potentiometer/Makefile2
-rw-r--r--drivers/iio/potentiometer/ds1803.c173
-rw-r--r--drivers/iio/potentiometer/mcp4131.c494
-rw-r--r--drivers/iio/potentiometer/mcp4531.c13
-rw-r--r--drivers/iio/potentiometer/tpl0102.c2
-rw-r--r--drivers/iio/pressure/Kconfig28
-rw-r--r--drivers/iio/pressure/Makefile2
-rw-r--r--drivers/iio/pressure/bmp280.c568
-rw-r--r--drivers/iio/pressure/hp03.c312
-rw-r--r--drivers/iio/pressure/hp206c.c426
-rw-r--r--drivers/iio/pressure/ms5611.h23
-rw-r--r--drivers/iio/pressure/ms5611_core.c148
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c25
-rw-r--r--drivers/iio/pressure/ms5611_spi.c34
-rw-r--r--drivers/iio/pressure/st_pressure_buffer.c2
-rw-r--r--drivers/iio/pressure/st_pressure_core.c11
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile14
-rw-r--r--drivers/infiniband/core/addr.c226
-rw-r--r--drivers/infiniband/core/cache.c14
-rw-r--r--drivers/infiniband/core/cma.c66
-rw-r--r--drivers/infiniband/core/core_priv.h16
-rw-r--r--drivers/infiniband/core/device.c62
-rw-r--r--drivers/infiniband/core/iwcm.c4
-rw-r--r--drivers/infiniband/core/iwpm_msg.c2
-rw-r--r--drivers/infiniband/core/iwpm_util.c1
-rw-r--r--drivers/infiniband/core/mad.c19
-rw-r--r--drivers/infiniband/core/mr_pool.c86
-rw-r--r--drivers/infiniband/core/multicast.c23
-rw-r--r--drivers/infiniband/core/netlink.c5
-rw-r--r--drivers/infiniband/core/rw.c727
-rw-r--r--drivers/infiniband/core/sa_query.c213
-rw-r--r--drivers/infiniband/core/sysfs.c378
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c13
-rw-r--r--drivers/infiniband/core/verbs.c188
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c154
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c611
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c58
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig (renamed from drivers/staging/rdma/hfi1/Kconfig)1
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile (renamed from drivers/staging/rdma/hfi1/Makefile)2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c (renamed from drivers/staging/rdma/hfi1/affinity.c)124
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h (renamed from drivers/staging/rdma/hfi1/affinity.h)19
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h (renamed from drivers/staging/rdma/hfi1/aspm.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c (renamed from drivers/staging/rdma/hfi1/chip.c)722
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h (renamed from drivers/staging/rdma/hfi1/chip.h)13
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h (renamed from drivers/staging/rdma/hfi1/chip_registers.h)1
-rw-r--r--drivers/infiniband/hw/hfi1/common.h (renamed from drivers/staging/rdma/hfi1/common.h)5
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c (renamed from drivers/staging/rdma/hfi1/debugfs.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h (renamed from drivers/staging/rdma/hfi1/debugfs.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/device.c (renamed from drivers/staging/rdma/hfi1/device.c)18
-rw-r--r--drivers/infiniband/hw/hfi1/device.h (renamed from drivers/staging/rdma/hfi1/device.h)3
-rw-r--r--drivers/infiniband/hw/hfi1/dma.c (renamed from drivers/staging/rdma/hfi1/dma.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c (renamed from drivers/staging/rdma/hfi1/driver.c)5
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c (renamed from drivers/staging/rdma/hfi1/efivar.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.h (renamed from drivers/staging/rdma/hfi1/efivar.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c102
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.h (renamed from drivers/staging/rdma/hfi1/eprom.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c (renamed from drivers/staging/rdma/hfi1/file_ops.c)550
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c (renamed from drivers/staging/rdma/hfi1/firmware.c)9
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h (renamed from drivers/staging/rdma/hfi1/hfi.h)18
-rw-r--r--drivers/infiniband/hw/hfi1/init.c (renamed from drivers/staging/rdma/hfi1/init.c)51
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c (renamed from drivers/staging/rdma/hfi1/intr.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h (renamed from drivers/staging/rdma/hfi1/iowait.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c (renamed from drivers/staging/rdma/hfi1/mad.c)134
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h (renamed from drivers/staging/rdma/hfi1/mad.h)2
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c (renamed from drivers/staging/rdma/hfi1/mmu_rb.c)57
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h (renamed from drivers/staging/rdma/hfi1/mmu_rb.h)2
-rw-r--r--drivers/infiniband/hw/hfi1/opa_compat.h (renamed from drivers/staging/rdma/hfi1/opa_compat.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c (renamed from drivers/staging/rdma/hfi1/pcie.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c (renamed from drivers/staging/rdma/hfi1/pio.c)81
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h (renamed from drivers/staging/rdma/hfi1/pio.h)8
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c (renamed from drivers/staging/rdma/hfi1/pio_copy.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c (renamed from drivers/staging/rdma/hfi1/platform.c)126
-rw-r--r--drivers/infiniband/hw/hfi1/platform.h (renamed from drivers/staging/rdma/hfi1/platform.h)1
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c (renamed from drivers/staging/rdma/hfi1/qp.c)10
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h (renamed from drivers/staging/rdma/hfi1/qp.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c (renamed from drivers/staging/rdma/hfi1/qsfp.c)61
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h (renamed from drivers/staging/rdma/hfi1/qsfp.h)15
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c (renamed from drivers/staging/rdma/hfi1/rc.c)9
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c (renamed from drivers/staging/rdma/hfi1/ruc.c)20
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c (renamed from drivers/staging/rdma/hfi1/sdma.c)4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h (renamed from drivers/staging/rdma/hfi1/sdma.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/sdma_txreq.h (renamed from drivers/staging/rdma/hfi1/sdma_txreq.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c (renamed from drivers/staging/rdma/hfi1/sysfs.c)8
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c (renamed from drivers/staging/rdma/hfi1/trace.c)21
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h (renamed from drivers/staging/rdma/hfi1/trace.h)5
-rw-r--r--drivers/infiniband/hw/hfi1/twsi.c (renamed from drivers/staging/rdma/hfi1/twsi.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/twsi.h (renamed from drivers/staging/rdma/hfi1/twsi.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c (renamed from drivers/staging/rdma/hfi1/uc.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c (renamed from drivers/staging/rdma/hfi1/ud.c)31
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c (renamed from drivers/staging/rdma/hfi1/user_exp_rcv.c)7
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h (renamed from drivers/staging/rdma/hfi1/user_exp_rcv.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c (renamed from drivers/staging/rdma/hfi1/user_pages.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c (renamed from drivers/staging/rdma/hfi1/user_sdma.c)119
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h (renamed from drivers/staging/rdma/hfi1/user_sdma.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c (renamed from drivers/staging/rdma/hfi1/verbs.c)112
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h (renamed from drivers/staging/rdma/hfi1/verbs.h)5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c (renamed from drivers/staging/rdma/hfi1/verbs_txreq.c)4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h (renamed from drivers/staging/rdma/hfi1/verbs_txreq.h)1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c148
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h10
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c185
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_hw.c14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c61
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_osdep.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c106
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_user.h36
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c47
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c449
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.h3
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_vf.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_vf.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_virtchnl.c102
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c24
-rw-r--r--drivers/infiniband/hw/mlx4/main.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c41
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c33
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c17
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c127
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c25
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c68
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c60
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c43
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h5
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h7
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c40
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c17
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c67
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c113
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c149
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c48
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c8
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c4
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c852
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h69
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c226
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c737
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h30
-rw-r--r--drivers/input/joystick/analog.c6
-rw-r--r--drivers/input/joystick/xpad.c54
-rw-r--r--drivers/input/keyboard/adp5588-keys.c10
-rw-r--r--drivers/input/keyboard/adp5589-keys.c12
-rw-r--r--drivers/input/keyboard/omap-keypad.c52
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c28
-rw-r--r--drivers/input/misc/cm109.c47
-rw-r--r--drivers/input/misc/max77693-haptic.c17
-rw-r--r--drivers/input/misc/max8997_haptic.c6
-rw-r--r--drivers/input/misc/pwm-beeper.c6
-rw-r--r--drivers/input/misc/rotary_encoder.c8
-rw-r--r--drivers/input/misc/twl6040-vibra.c15
-rw-r--r--drivers/input/mouse/byd.c1
-rw-r--r--drivers/input/mouse/elantech.c8
-rw-r--r--drivers/input/mouse/vmmouse.c22
-rw-r--r--drivers/input/rmi4/rmi_bus.c4
-rw-r--r--drivers/input/rmi4/rmi_f12.c9
-rw-r--r--drivers/input/tablet/acecad.c12
-rw-r--r--drivers/input/tablet/aiptek.c20
-rw-r--r--drivers/input/tablet/gtco.c24
-rw-r--r--drivers/input/tablet/kbtab.c8
-rw-r--r--drivers/input/touchscreen/ad7879.c10
-rw-r--r--drivers/input/touchscreen/bcm_iproc_tsc.c77
-rw-r--r--drivers/input/touchscreen/cyttsp4_core.c2
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c9
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c16
-rw-r--r--drivers/input/touchscreen/ts4800-ts.c13
-rw-r--r--drivers/input/touchscreen/tsc2004.c7
-rw-r--r--drivers/input/touchscreen/tsc2005.c7
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c15
-rw-r--r--drivers/input/touchscreen/tsc200x-core.h2
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c14
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/amd_iommu.c270
-rw-r--r--drivers/iommu/amd_iommu_init.c329
-rw-r--r--drivers/iommu/amd_iommu_types.h40
-rw-r--r--drivers/iommu/arm-smmu-v3.c37
-rw-r--r--drivers/iommu/arm-smmu.c352
-rw-r--r--drivers/iommu/dma-iommu.c146
-rw-r--r--drivers/iommu/dmar.c47
-rw-r--r--drivers/iommu/intel-iommu.c320
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c29
-rw-r--r--drivers/iommu/io-pgtable-arm.c9
-rw-r--r--drivers/iommu/io-pgtable.c3
-rw-r--r--drivers/iommu/io-pgtable.h6
-rw-r--r--drivers/iommu/iommu.c32
-rw-r--r--drivers/iommu/iova.c421
-rw-r--r--drivers/iommu/irq_remapping.c2
-rw-r--r--drivers/iommu/mtk_iommu.c16
-rw-r--r--drivers/iommu/of_iommu.c14
-rw-r--r--drivers/iommu/omap-iommu-debug.c2
-rw-r--r--drivers/iommu/omap-iommu.c10
-rw-r--r--drivers/iommu/rockchip-iommu.c2
-rw-r--r--drivers/ipack/devices/ipoctal.c5
-rw-r--r--drivers/irqchip/Kconfig16
-rw-r--r--drivers/irqchip/Makefile4
-rw-r--r--drivers/irqchip/irq-alpine-msi.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c10
-rw-r--r--drivers/irqchip/irq-clps711x.c2
-rw-r--r--drivers/irqchip/irq-crossbar.c2
-rw-r--r--drivers/irqchip/irq-eznps.c165
-rw-r--r--drivers/irqchip/irq-gic-common.c33
-rw-r--r--drivers/irqchip/irq-gic-common.h3
-rw-r--r--drivers/irqchip/irq-gic-v2m.c19
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c91
-rw-r--r--drivers/irqchip/irq-gic-v3.c351
-rw-r--r--drivers/irqchip/irq-gic.c406
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-lpc32xx.c238
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c240
-rw-r--r--drivers/irqchip/irq-mbigen.c4
-rw-r--r--drivers/irqchip/irq-mips-gic.c36
-rw-r--r--drivers/irqchip/irq-partition-percpu.c256
-rw-r--r--drivers/irqchip/irq-pic32-evic.c2
-rw-r--r--drivers/irqchip/irq-tegra.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c1
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/hardware/eicon/message.c21
-rw-r--r--drivers/isdn/hysdn/hysdn_net.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c4
-rw-r--r--drivers/isdn/i4l/isdn_tty.c44
-rw-r--r--drivers/isdn/i4l/isdn_x25iface.c2
-rw-r--r--drivers/leds/Kconfig5
-rw-r--r--drivers/leds/led-core.c9
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/leds/leds-gpio.c4
-rw-r--r--drivers/leds/leds-pwm.c11
-rw-r--r--drivers/leds/leds-ss4200.c13
-rw-r--r--drivers/leds/leds-tca6507.c2
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/trigger/Kconfig18
-rw-r--r--drivers/leds/trigger/Makefile2
-rw-r--r--drivers/leds/trigger/ledtrig-heartbeat.c31
-rw-r--r--drivers/leds/trigger/ledtrig-ide-disk.c3
-rw-r--r--drivers/leds/trigger/ledtrig-mtd.c45
-rw-r--r--drivers/leds/trigger/ledtrig-panic.c77
-rw-r--r--drivers/lguest/x86/core.c2
-rw-r--r--drivers/lightnvm/core.c370
-rw-r--r--drivers/lightnvm/gennvm.c100
-rw-r--r--drivers/lightnvm/rrpc.c42
-rw-r--r--drivers/lightnvm/rrpc.h2
-rw-r--r--drivers/lightnvm/sysblk.c284
-rw-r--r--drivers/macintosh/rack-meter.c5
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/mailbox/mailbox-sti.c4
-rw-r--r--drivers/mailbox/omap-mailbox.c220
-rw-r--r--drivers/mcb/mcb-core.c116
-rw-r--r--drivers/mcb/mcb-internal.h1
-rw-r--r--drivers/mcb/mcb-parse.c15
-rw-r--r--drivers/mcb/mcb-pci.c23
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/writeback.c3
-rw-r--r--drivers/md/bitmap.c88
-rw-r--r--drivers/md/bitmap.h3
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-mpath.c351
-rw-r--r--drivers/md/dm-raid.c7
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/md/dm-thin.c165
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md-cluster.c96
-rw-r--r--drivers/md/md-cluster.h1
-rw-r--r--drivers/md/md.c86
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c20
-rw-r--r--drivers/md/raid5-cache.c7
-rw-r--r--drivers/md/raid5.c10
-rw-r--r--drivers/media/common/Kconfig1
-rw-r--r--drivers/media/common/Makefile2
-rw-r--r--drivers/media/common/v4l2-tpg/Kconfig2
-rw-r--r--drivers/media/common/v4l2-tpg/Makefile3
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c (renamed from drivers/media/platform/vivid/vivid-tpg-colors.c)7
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c (renamed from drivers/media/platform/vivid/vivid-tpg.c)25
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h14
-rw-r--r--drivers/media/dvb-core/dvbdev.c4
-rw-r--r--drivers/media/dvb-frontends/dib0090.c2
-rw-r--r--drivers/media/dvb-frontends/ds3000.c14
-rw-r--r--drivers/media/dvb-frontends/m88ds3103.c19
-rw-r--r--drivers/media/dvb-frontends/m88ds3103_priv.h4
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c20
-rw-r--r--drivers/media/dvb-frontends/rtl2830_priv.h2
-rw-r--r--drivers/media/dvb-frontends/rtl2832.c243
-rw-r--r--drivers/media/dvb-frontends/rtl2832.h4
-rw-r--r--drivers/media/dvb-frontends/rtl2832_priv.h3
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c303
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.h5
-rw-r--r--drivers/media/dvb-frontends/si2168.c106
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h3
-rw-r--r--drivers/media/dvb-frontends/zl10353.c6
-rw-r--r--drivers/media/i2c/ad9389b.c8
-rw-r--r--drivers/media/i2c/adp1653.c14
-rw-r--r--drivers/media/i2c/adv7180.c160
-rw-r--r--drivers/media/i2c/adv7511.c6
-rw-r--r--drivers/media/i2c/adv7604.c54
-rw-r--r--drivers/media/i2c/adv7842.c6
-rw-r--r--drivers/media/i2c/m5mols/m5mols_controls.c2
-rw-r--r--drivers/media/i2c/saa7115.c15
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c12
-rw-r--r--drivers/media/i2c/smiapp/smiapp.h1
-rw-r--r--drivers/media/i2c/tc358743.c5
-rw-r--r--drivers/media/i2c/ths7303.c2
-rw-r--r--drivers/media/i2c/tvp5150.c9
-rw-r--r--drivers/media/media-device.c50
-rw-r--r--drivers/media/media-devnode.c6
-rw-r--r--drivers/media/media-entity.c18
-rw-r--r--drivers/media/pci/Kconfig1
-rw-r--r--drivers/media/pci/Makefile1
-rw-r--r--drivers/media/pci/cobalt/Kconfig1
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h13
-rw-r--r--drivers/media/pci/cx23885/cx23885-av.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h13
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c2
-rw-r--r--drivers/media/pci/smipcie/smipcie-main.c17
-rw-r--r--drivers/media/pci/smipcie/smipcie.h2
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c28
-rw-r--r--drivers/media/pci/tw686x/Kconfig18
-rw-r--r--drivers/media/pci/tw686x/Makefile3
-rw-r--r--drivers/media/pci/tw686x/tw686x-audio.c386
-rw-r--r--drivers/media/pci/tw686x/tw686x-core.c415
-rw-r--r--drivers/media/pci/tw686x/tw686x-regs.h122
-rw-r--r--drivers/media/pci/tw686x/tw686x-video.c937
-rw-r--r--drivers/media/pci/tw686x/tw686x.h158
-rw-r--r--drivers/media/pci/zoran/videocodec.c5
-rw-r--r--drivers/media/platform/Kconfig4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c4
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c35
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c50
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c8
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c6
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c2
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c27
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.h5
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c7
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c37
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_grp_layer.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_vp_layer.c2
-rw-r--r--drivers/media/platform/soc_camera/Kconfig4
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c2
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c69
-rw-r--r--drivers/media/platform/vivid/Kconfig1
-rw-r--r--drivers/media/platform/vivid/Makefile2
-rw-r--r--drivers/media/platform/vivid/vivid-core.c22
-rw-r--r--drivers/media/platform/vivid/vivid-core.h2
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c13
-rw-r--r--drivers/media/platform/vivid/vivid-rds-gen.c19
-rw-r--r--drivers/media/platform/vivid/vivid-tpg-colors.h68
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.h598
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c101
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c97
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.h9
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c103
-rw-r--r--drivers/media/platform/vsp1/vsp1.h14
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c359
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.h3
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c567
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h49
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c234
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h27
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c34
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c288
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h63
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c130
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c179
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.c172
-rw-r--r--drivers/media/platform/vsp1/vsp1_lut.h6
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c71
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h19
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h10
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c275
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c171
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h64
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c214
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c223
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.h3
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c493
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c279
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c8
-rw-r--r--drivers/media/rc/ati_remote.c11
-rw-r--r--drivers/media/rc/mceusb.c6
-rw-r--r--drivers/media/rc/rc-main.c9
-rw-r--r--drivers/media/tuners/qm1d1c0042.c38
-rw-r--r--drivers/media/tuners/si2157.c19
-rw-r--r--drivers/media/tuners/si2157_priv.h1
-rw-r--r--drivers/media/usb/airspy/airspy.c3
-rw-r--r--drivers/media/usb/au0828/au0828-core.c38
-rw-r--r--drivers/media/usb/au0828/au0828-video.c4
-rw-r--r--drivers/media/usb/au0828/au0828.h1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c31
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c9
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c47
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.h24
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c5
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c7
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c4
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c4
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c63
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c4
-rw-r--r--drivers/media/usb/em28xx/Kconfig2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c88
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c185
-rw-r--r--drivers/media/usb/em28xx/em28xx-reg.h13
-rw-r--r--drivers/media/usb/em28xx/em28xx.h3
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c9
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c58
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c73
-rw-r--r--drivers/media/v4l2-core/v4l2-mc.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c44
-rw-r--r--drivers/memory/Kconfig2
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/fsl_ifc.c36
-rw-r--r--drivers/memory/mtk-smi.c2
-rw-r--r--drivers/memory/of_memory.c2
-rw-r--r--drivers/memory/omap-gpmc.c657
-rw-r--r--drivers/memory/samsung/Kconfig13
-rw-r--r--drivers/memory/samsung/Makefile1
-rw-r--r--drivers/memory/samsung/exynos-srom.c231
-rw-r--r--drivers/memory/samsung/exynos-srom.h51
-rw-r--r--drivers/memstick/core/ms_block.c16
-rw-r--r--drivers/memstick/core/ms_block.h2
-rw-r--r--drivers/memstick/core/mspro_block.c3
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c2
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/message/fusion/mptsas.c4
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/mfd/Kconfig33
-rw-r--r--drivers/mfd/Makefile2
-rw-r--r--drivers/mfd/ab8500-debugfs.c2
-rw-r--r--drivers/mfd/act8945a.c13
-rw-r--r--drivers/mfd/arizona-core.c10
-rw-r--r--drivers/mfd/arizona-irq.c3
-rw-r--r--drivers/mfd/as3711.c13
-rw-r--r--drivers/mfd/as3722.c31
-rw-r--r--drivers/mfd/asic3.c10
-rw-r--r--drivers/mfd/atmel-hlcdc.c14
-rw-r--r--drivers/mfd/axp20x-rsb.c1
-rw-r--r--drivers/mfd/axp20x.c90
-rw-r--r--drivers/mfd/bcm590xx.c11
-rw-r--r--drivers/mfd/da9063-irq.c8
-rw-r--r--drivers/mfd/dm355evm_msp.c10
-rw-r--r--drivers/mfd/hi6421-pmic-core.c12
-rw-r--r--drivers/mfd/hi655x-pmic.c162
-rw-r--r--drivers/mfd/htc-egpio.c10
-rw-r--r--drivers/mfd/htc-i2cpld.c15
-rw-r--r--drivers/mfd/intel-lpss-acpi.c12
-rw-r--r--drivers/mfd/intel-lpss-pci.c20
-rw-r--r--drivers/mfd/intel-lpss.c6
-rw-r--r--drivers/mfd/intel-lpss.h4
-rw-r--r--drivers/mfd/intel_quark_i2c_gpio.c27
-rw-r--r--drivers/mfd/lp3943.c14
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/max77620.c592
-rw-r--r--drivers/mfd/max77686.c46
-rw-r--r--drivers/mfd/max77693.c16
-rw-r--r--drivers/mfd/menf21bmc.c11
-rw-r--r--drivers/mfd/mfd-core.c44
-rw-r--r--drivers/mfd/mt6397-core.c40
-rw-r--r--drivers/mfd/rc5t583-irq.c11
-rw-r--r--drivers/mfd/rc5t583.c24
-rw-r--r--drivers/mfd/rdc321x-southbridge.c13
-rw-r--r--drivers/mfd/rk808.c7
-rw-r--r--drivers/mfd/rn5t618.c5
-rw-r--r--drivers/mfd/rt5033.c14
-rw-r--r--drivers/mfd/sec-core.c20
-rw-r--r--drivers/mfd/sec-irq.c14
-rw-r--r--drivers/mfd/sky81452.c10
-rw-r--r--drivers/mfd/sm501.c15
-rw-r--r--drivers/mfd/smsc-ece1099.c10
-rw-r--r--drivers/mfd/stw481x.c11
-rw-r--r--drivers/mfd/tc6393xb.c14
-rw-r--r--drivers/mfd/tps6105x.c1
-rw-r--r--drivers/mfd/tps65010.c8
-rw-r--r--drivers/mfd/tps6507x.c13
-rw-r--r--drivers/mfd/tps65217.c14
-rw-r--r--drivers/mfd/tps65910.c34
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mfd/twl4030-power.c1
-rw-r--r--drivers/mfd/twl6040.c8
-rw-r--r--drivers/mfd/ucb1x00-core.c14
-rw-r--r--drivers/mfd/vexpress-sysreg.c2
-rw-r--r--drivers/mfd/wl1273-core.c14
-rw-r--r--drivers/mfd/wm5110-tables.c1
-rw-r--r--drivers/mfd/wm8400-core.c52
-rw-r--r--drivers/misc/cxl/api.c28
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h15
-rw-r--r--drivers/misc/cxl/fault.c10
-rw-r--r--drivers/misc/cxl/guest.c78
-rw-r--r--drivers/misc/cxl/native.c29
-rw-r--r--drivers/misc/cxl/pci.c64
-rw-r--r--drivers/misc/cxl/sysfs.c10
-rw-r--r--drivers/misc/eeprom/Kconfig2
-rw-r--r--drivers/misc/eeprom/at24.c112
-rw-r--r--drivers/misc/eeprom/at25.c91
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c92
-rw-r--r--drivers/misc/mei/bus.c27
-rw-r--r--drivers/misc/mei/client.c26
-rw-r--r--drivers/misc/mei/hbm.c23
-rw-r--r--drivers/misc/mei/mei_dev.h2
-rw-r--r--drivers/misc/mic/Kconfig1
-rw-r--r--drivers/misc/mic/host/mic_boot.c6
-rw-r--r--drivers/misc/mic/scif/scif_fence.c3
-rw-r--r--drivers/misc/mic/vop/vop_vringh.c5
-rw-r--r--drivers/misc/qcom-coincell.c3
-rw-r--r--drivers/misc/sgi-gru/grukservices.c38
-rw-r--r--drivers/misc/sram.c4
-rw-r--r--drivers/misc/ti-st/st_kim.c1
-rw-r--r--drivers/mmc/card/block.c82
-rw-r--r--drivers/mmc/card/sdio_uart.c2
-rw-r--r--drivers/mmc/core/Kconfig21
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/host.c46
-rw-r--r--drivers/mmc/core/mmc.c61
-rw-r--r--drivers/mmc/core/pwrseq.c108
-rw-r--r--drivers/mmc/core/pwrseq.h19
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c81
-rw-r--r--drivers/mmc/core/pwrseq_simple.c91
-rw-r--r--drivers/mmc/core/sdio_cis.c7
-rw-r--r--drivers/mmc/host/Kconfig4
-rw-r--r--drivers/mmc/host/atmel-mci.c9
-rw-r--r--drivers/mmc/host/davinci_mmc.c151
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c23
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c107
-rw-r--r--drivers/mmc/host/dw_mmc.c23
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/mmci.c20
-rw-r--r--drivers/mmc/host/mtk-sd.c19
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/omap_hsmmc.c90
-rw-r--r--drivers/mmc/host/pxamci.c16
-rw-r--r--drivers/mmc/host/sdhci-acpi.c9
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c76
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c9
-rw-r--r--drivers/mmc/host/sdhci-pic32.c1
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c42
-rw-r--r--drivers/mmc/host/sdhci.c324
-rw-r--r--drivers/mmc/host/sdhci.h10
-rw-r--r--drivers/mmc/host/sh_mmcif.c74
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c194
-rw-r--r--drivers/mmc/host/sunxi-mmc.c9
-rw-r--r--drivers/mmc/host/tmio_mmc.h75
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c1
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c196
-rw-r--r--drivers/mmc/host/toshsd.c1
-rw-r--r--drivers/mmc/host/usdhi6rol0.c60
-rw-r--r--drivers/mtd/chips/Kconfig1
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c29
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h3
-rw-r--r--drivers/mtd/devices/docg3.c46
-rw-r--r--drivers/mtd/devices/m25p80.c22
-rw-r--r--drivers/mtd/devices/pmc551.c2
-rw-r--r--drivers/mtd/maps/Kconfig10
-rw-r--r--drivers/mtd/maps/Makefile3
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/physmap_of.c6
-rw-r--r--drivers/mtd/maps/physmap_of_versatile.c255
-rw-r--r--drivers/mtd/maps/physmap_of_versatile.h16
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c6
-rw-r--r--drivers/mtd/maps/uclinux.c27
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c123
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/mtdcore.c379
-rw-r--r--drivers/mtd/mtdpart.c23
-rw-r--r--drivers/mtd/nand/ams-delta.c1
-rw-r--r--drivers/mtd/nand/atmel_nand.c350
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h3
-rw-r--r--drivers/mtd/nand/au1550nd.c1
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c52
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c290
-rw-r--r--drivers/mtd/nand/cafe_nand.c44
-rw-r--r--drivers/mtd/nand/cmx270_nand.c1
-rw-r--r--drivers/mtd/nand/davinci_nand.c210
-rw-r--r--drivers/mtd/nand/denali.c50
-rw-r--r--drivers/mtd/nand/diskonchip.c60
-rw-r--r--drivers/mtd/nand/docg4.c33
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c84
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c317
-rw-r--r--drivers/mtd/nand/fsl_upm.c1
-rw-r--r--drivers/mtd/nand/fsmc_nand.c332
-rw-r--r--drivers/mtd/nand/gpio.c1
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c161
-rw-r--r--drivers/mtd/nand/hisi504_nand.c40
-rw-r--r--drivers/mtd/nand/jz4740_nand.c3
-rw-r--r--drivers/mtd/nand/jz4780_bch.c1
-rw-r--r--drivers/mtd/nand/jz4780_nand.c21
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c51
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c83
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/mxc_nand.c257
-rw-r--r--drivers/mtd/nand/nand_base.c664
-rw-r--r--drivers/mtd/nand/nand_bch.c48
-rw-r--r--drivers/mtd/nand/nandsim.c10
-rw-r--r--drivers/mtd/nand/nuc900_nand.c1
-rw-r--r--drivers/mtd/nand/omap2.c451
-rw-r--r--drivers/mtd/nand/orion_nand.c1
-rw-r--r--drivers/mtd/nand/pasemi_nand.c16
-rw-r--r--drivers/mtd/nand/plat_nand.c1
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c132
-rw-r--r--drivers/mtd/nand/qcom_nandc.c88
-rw-r--r--drivers/mtd/nand/s3c2410.c36
-rw-r--r--drivers/mtd/nand/sh_flctl.c115
-rw-r--r--drivers/mtd/nand/sharpsl.c2
-rw-r--r--drivers/mtd/nand/sm_common.c93
-rw-r--r--drivers/mtd/nand/socrates_nand.c1
-rw-r--r--drivers/mtd/nand/sunxi_nand.c600
-rw-r--r--drivers/mtd/nand/vf610_nfc.c35
-rw-r--r--drivers/mtd/onenand/onenand_base.c235
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1
-rw-r--r--drivers/mtd/ubi/build.c21
-rw-r--r--drivers/mtd/ubi/debug.c3
-rw-r--r--drivers/mtd/ubi/eba.c26
-rw-r--r--drivers/mtd/ubi/kapi.c19
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c21
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/arcnet/com90xx.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c75
-rw-r--r--drivers/net/bonding/bond_alb.c7
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/can/at91_can.c5
-rw-r--r--drivers/net/can/c_can/c_can.c38
-rw-r--r--drivers/net/can/dev.c9
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c187
-rw-r--r--drivers/net/can/janz-ican3.c104
-rw-r--r--drivers/net/can/mscan/mscan.c4
-rw-r--r--drivers/net/can/sja1000/plx_pci.c64
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/spi/mcp251x.c3
-rw-r--r--drivers/net/can/usb/Kconfig5
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/usb/esd_usb2.c4
-rw-r--r--drivers/net/can/usb/gs_usb.c17
-rw-r--r--drivers/net/can/usb/kvaser_usb.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/dsa/Kconfig45
-rw-r--r--drivers/net/dsa/Makefile15
-rw-r--r--drivers/net/dsa/bcm_sf2.c62
-rw-r--r--drivers/net/dsa/mv88e6060.c47
-rw-r--r--drivers/net/dsa/mv88e6060.h11
-rw-r--r--drivers/net/dsa/mv88e6123.c124
-rw-r--r--drivers/net/dsa/mv88e6131.c177
-rw-r--r--drivers/net/dsa/mv88e6171.c123
-rw-r--r--drivers/net/dsa/mv88e6352.c345
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2280
-rw-r--r--drivers/net/dsa/mv88e6xxx.h381
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c6
-rw-r--r--drivers/net/ethernet/8390/lib8390.c4
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c6
-rw-r--r--drivers/net/ethernet/amd/7990.c12
-rw-r--r--drivers/net/ethernet/amd/a2065.c9
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c15
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h2
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c63
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c11
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c12
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c173
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c396
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c300
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h467
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c54
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c152
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c95
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c147
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c330
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c115
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c72
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c45
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/ethoc.c23
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c36
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c87
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c57
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c45
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c47
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c52
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c17
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c93
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c260
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c209
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c171
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c187
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c11
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c26
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c478
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/Kconfig80
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c30
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h111
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c61
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c44
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c173
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/Makefile7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h51
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c352
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c150
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c50
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c226
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c141
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c462
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c44
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h28
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c63
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c168
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c386
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c332
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c86
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1083
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h114
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c509
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h70
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1039
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h114
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c329
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c503
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c145
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h30
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h108
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c10
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h40
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c221
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c42
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c196
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h99
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c161
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1054
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c117
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h302
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c693
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c230
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h37
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c298
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c224
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h6
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/lantiq_etop.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c201
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c439
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h591
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c752
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c341
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1060
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1366
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c779
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h337
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1007
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c736
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h682
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c581
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h139
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c1001
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c484
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c50
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c7
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c41
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1101
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c30
-rw-r--r--drivers/net/ethernet/netx-eth.c12
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig31
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h84
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c563
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h80
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c788
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h148
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c160
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c725
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h239
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c328
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c356
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h80
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c76
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c322
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c57
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c3613
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h388
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c1102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h990
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h45
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c628
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c547
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c4
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c44
-rw-r--r--drivers/net/ethernet/renesas/ravb.h206
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c289
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c26
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c39
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c32
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/ethernet/sfc/rx.c102
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h255
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c407
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c389
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c354
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c225
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c349
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c659
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c13
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c60
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/ethernet/tile/tilepro.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig14
-rw-r--r--drivers/net/ethernet/wiznet/Makefile1
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c466
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1076
-rw-r--r--drivers/net/ethernet/wiznet/w5100.h37
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fddi/skfp/Makefile2
-rw-r--r--drivers/net/fjes/fjes_hw.c30
-rw-r--r--drivers/net/fjes/fjes_hw.h9
-rw-r--r--drivers/net/fjes/fjes_main.c137
-rw-r--r--drivers/net/geneve.c113
-rw-r--r--drivers/net/gtp.c1375
-rw-r--r--drivers/net/hamradio/baycom_epp.c14
-rw-r--r--drivers/net/hamradio/hdlcdrv.c6
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h29
-rw-r--r--drivers/net/hyperv/netvsc.c161
-rw-r--r--drivers/net/hyperv/netvsc_drv.c428
-rw-r--r--drivers/net/hyperv/rndis_filter.c88
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c6
-rw-r--r--drivers/net/ieee802154/atusb.c91
-rw-r--r--drivers/net/ieee802154/mrf24j40.c14
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c19
-rw-r--r--drivers/net/irda/Kconfig7
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c8
-rw-r--r--drivers/net/irda/bfin_sir.c2
-rw-r--r--drivers/net/irda/irda-usb.c4
-rw-r--r--drivers/net/irda/nsc-ircc.c11
-rw-r--r--drivers/net/irda/sh_irda.c875
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/net/irda/via-ircc.c8
-rw-r--r--drivers/net/macsec.c169
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/macvtap.c39
-rw-r--r--drivers/net/phy/dp83867.c13
-rw-r--r--drivers/net/phy/fixed_phy.c30
-rw-r--r--drivers/net/phy/lxt.c22
-rw-r--r--drivers/net/phy/marvell.c82
-rw-r--r--drivers/net/phy/mdio-mux.c10
-rw-r--r--drivers/net/phy/mdio_bus.c10
-rw-r--r--drivers/net/phy/micrel.c34
-rw-r--r--drivers/net/phy/phy.c105
-rw-r--r--drivers/net/phy/phy_device.c5
-rw-r--r--drivers/net/phy/smsc.c17
-rw-r--r--drivers/net/ppp/ppp_generic.c320
-rw-r--r--drivers/net/rionet.c6
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c118
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/ch9200.c3
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/r8152.c158
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c55
-rw-r--r--drivers/net/usb/usbnet.c15
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/net/virtio_net.c18
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c272
-rw-r--r--drivers/net/vxlan.c355
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wan/farsync.c6
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/sbni.c8
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c50
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c482
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h118
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c129
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h60
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c717
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c300
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c729
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c270
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c44
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c197
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h44
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c250
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h128
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h10
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c22
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c82
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar956x_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c138
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c277
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h90
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c12
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c8
-rw-r--r--drivers/net/wireless/ath/regd.c16
-rw-r--r--drivers/net/wireless/ath/regd.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h55
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c145
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c228
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h14
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c337
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c22
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c196
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c99
-rw-r--r--drivers/net/wireless/ath/wil6210/ioctl.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c170
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c259
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c204
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h19
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c69
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h12
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h177
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c233
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1325
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/atmel/atmel.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c34
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ac.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c16
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lcn.c10
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c30
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c176
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c14
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_nphy.c16
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c12
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c157
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c247
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c209
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c48
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c41
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/cisco/airo.c14
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c18
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c30
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c20
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c41
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c92
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h30
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-1000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-2000.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-5000.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-6000.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c1315
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h149
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c371
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c666
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c257
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c210
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c97
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/cfg.c6
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.c32
-rw-r--r--drivers/net/wireless/intersil/p54/main.c4
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h2
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c4
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_38xx.c35
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c21
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c10
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c127
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c113
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c202
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c88
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c42
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c47
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c96
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c11
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c88
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c34
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c43
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c22
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c22
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c16
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h99
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c93
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h301
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c586
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c1525
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c397
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c1682
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c)3601
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h46
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c847
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c611
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c856
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c652
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c851
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h6
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c100
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_pkt.c24
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h2
-rw-r--r--drivers/net/wireless/st/cw1200/main.c10
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c6
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c12
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c22
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c22
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c8
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c36
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c17
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c42
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h2
-rw-r--r--drivers/net/wireless/wl3501_cs.c4
-rw-r--r--drivers/net/wireless/zydas/zd1201.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c4
-rw-r--r--drivers/net/xen-netback/Makefile2
-rw-r--r--drivers/net/xen-netback/common.h74
-rw-r--r--drivers/net/xen-netback/hash.c384
-rw-r--r--drivers/net/xen-netback/interface.c133
-rw-r--r--drivers/net/xen-netback/netback.c249
-rw-r--r--drivers/net/xen-netback/xenbus.c79
-rw-r--r--drivers/nfc/Kconfig11
-rw-r--r--drivers/nfc/Makefile2
-rw-r--r--drivers/nfc/fdp/fdp.c3
-rw-r--r--drivers/nfc/nxp-nci/i2c.c1
-rw-r--r--drivers/nfc/pn533/Kconfig27
-rw-r--r--drivers/nfc/pn533/Makefile9
-rw-r--r--drivers/nfc/pn533/i2c.c281
-rw-r--r--drivers/nfc/pn533/pn533.c (renamed from drivers/nfc/pn533.c)1220
-rw-r--r--drivers/nfc/pn533/pn533.h238
-rw-r--r--drivers/nfc/pn533/usb.c597
-rw-r--r--drivers/nfc/pn544/i2c.c1
-rw-r--r--drivers/nfc/st-nci/i2c.c33
-rw-r--r--drivers/nfc/st-nci/se.c28
-rw-r--r--drivers/nfc/st-nci/spi.c32
-rw-r--r--drivers/nfc/st-nci/st-nci.h14
-rw-r--r--drivers/nfc/st-nci/vendor_cmds.c62
-rw-r--r--drivers/nfc/st21nfca/core.c13
-rw-r--r--drivers/nfc/st21nfca/i2c.c33
-rw-r--r--drivers/nfc/st21nfca/se.c2
-rw-r--r--drivers/nvdimm/Kconfig13
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/blk.c208
-rw-r--r--drivers/nvdimm/btt.c26
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c63
-rw-r--r--drivers/nvdimm/claim.c86
-rw-r--r--drivers/nvdimm/core.c5
-rw-r--r--drivers/nvdimm/dax_devs.c134
-rw-r--r--drivers/nvdimm/dimm_devs.c23
-rw-r--r--drivers/nvdimm/namespace_devs.c38
-rw-r--r--drivers/nvdimm/nd-core.h6
-rw-r--r--drivers/nvdimm/nd.h83
-rw-r--r--drivers/nvdimm/pfn.h5
-rw-r--r--drivers/nvdimm/pfn_devs.c358
-rw-r--r--drivers/nvdimm/pmem.c503
-rw-r--r--drivers/nvdimm/region.c2
-rw-r--r--drivers/nvdimm/region_devs.c34
-rw-r--r--drivers/nvme/host/Kconfig2
-rw-r--r--drivers/nvme/host/core.c329
-rw-r--r--drivers/nvme/host/lightnvm.c82
-rw-r--r--drivers/nvme/host/nvme.h92
-rw-r--r--drivers/nvme/host/pci.c281
-rw-r--r--drivers/nvmem/Kconfig5
-rw-r--r--drivers/nvmem/core.c89
-rw-r--r--drivers/nvmem/imx-ocotp.c55
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c94
-rw-r--r--drivers/nvmem/qfprom.c56
-rw-r--r--drivers/nvmem/rockchip-efuse.c49
-rw-r--r--drivers/nvmem/sunxi_sid.c54
-rw-r--r--drivers/nvmem/vf610-ocotp.c44
-rw-r--r--drivers/of/Kconfig3
-rw-r--r--drivers/of/Makefile2
-rw-r--r--drivers/of/address.c116
-rw-r--r--drivers/of/base.c212
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/of/dynamic.c6
-rw-r--r--drivers/of/fdt.c393
-rw-r--r--drivers/of/of_mdio.c27
-rw-r--r--drivers/of/of_mtd.c119
-rw-r--r--drivers/of/of_numa.c211
-rw-r--r--drivers/of/of_reserved_mem.c11
-rw-r--r--drivers/of/platform.c28
-rw-r--r--drivers/of/unittest.c40
-rw-r--r--drivers/parport/procfs.c2
-rw-r--r--drivers/pci/Kconfig3
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/ecam.c164
-rw-r--r--drivers/pci/ecam.h67
-rw-r--r--drivers/pci/host/Kconfig18
-rw-r--r--drivers/pci/host/Makefile3
-rw-r--r--drivers/pci/host/pci-dra7xx.c4
-rw-r--r--drivers/pci/host/pci-host-common.c114
-rw-r--r--drivers/pci/host/pci-host-common.h47
-rw-r--r--drivers/pci/host/pci-host-generic.c52
-rw-r--r--drivers/pci/host/pci-hyperv.c38
-rw-r--r--drivers/pci/host/pci-imx6.c213
-rw-r--r--drivers/pci/host/pci-keystone-dw.c38
-rw-r--r--drivers/pci/host/pci-keystone.c52
-rw-r--r--drivers/pci/host/pci-keystone.h6
-rw-r--r--drivers/pci/host/pci-mvebu.c7
-rw-r--r--drivers/pci/host/pci-tegra.c244
-rw-r--r--drivers/pci/host/pci-thunder-ecam.c39
-rw-r--r--drivers/pci/host/pci-thunder-pem.c134
-rw-r--r--drivers/pci/host/pcie-armada8k.c262
-rw-r--r--drivers/pci/host/pcie-designware.c47
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c8
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c4
-rw-r--r--drivers/pci/pci-sysfs.c7
-rw-r--r--drivers/pci/pci.c160
-rw-r--r--drivers/pci/pcie/Kconfig14
-rw-r--r--drivers/pci/pcie/Makefile2
-rw-r--r--drivers/pci/pcie/pcie-dpc.c163
-rw-r--r--drivers/pci/pcie/portdrv.h15
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c12
-rw-r--r--drivers/pci/pcie/portdrv_core.c36
-rw-r--r--drivers/pci/probe.c1
-rw-r--r--drivers/pci/quirks.c196
-rw-r--r--drivers/pci/search.c14
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/perf/arm_pmu.c16
-rw-r--r--drivers/phy/Kconfig35
-rw-r--r--drivers/phy/Makefile5
-rw-r--r--drivers/phy/phy-bcm-ns-usb2.c137
-rw-r--r--drivers/phy/phy-brcm-sata.c412
-rw-r--r--drivers/phy/phy-brcmstb-sata.c250
-rw-r--r--drivers/phy/phy-core.c50
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c325
-rw-r--r--drivers/phy/phy-miphy28lp.c3
-rw-r--r--drivers/phy/phy-mt65xx-usb3.c77
-rw-r--r--drivers/phy/phy-rcar-gen2.c1
-rw-r--r--drivers/phy/phy-rcar-gen3-usb2.c98
-rw-r--r--drivers/phy/phy-rockchip-dp.c2
-rw-r--r--drivers/phy/phy-rockchip-usb.c2
-rw-r--r--drivers/phy/phy-stih407-usb.c4
-rw-r--r--drivers/phy/phy-sun4i-usb.c14
-rw-r--r--drivers/phy/phy-ti-pipe3.c15
-rw-r--r--drivers/phy/phy-twl4030-usb.c14
-rw-r--r--drivers/phy/tegra/Kconfig8
-rw-r--r--drivers/phy/tegra/Makefile6
-rw-r--r--drivers/phy/tegra/xusb-tegra124.c1752
-rw-r--r--drivers/phy/tegra/xusb-tegra210.c2045
-rw-r--r--drivers/phy/tegra/xusb.c1021
-rw-r--r--drivers/phy/tegra/xusb.h421
-rw-r--r--drivers/pinctrl/Makefile2
-rw-r--r--drivers/pinctrl/bcm/Kconfig13
-rw-r--r--drivers/pinctrl/bcm/Makefile1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c16
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-mux.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c14
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns2-mux.c1117
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c4
-rw-r--r--drivers/pinctrl/berlin/berlin.c5
-rw-r--r--drivers/pinctrl/core.c63
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c15
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c11
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1.h1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx21.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx25.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx27.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx35.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx50.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx51.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx53.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6dl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6q.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sl.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6sx.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx6ul.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7d.c1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c1
-rw-r--r--drivers/pinctrl/intel/Kconfig3
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c1697
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c21
-rw-r--r--drivers/pinctrl/meson/Makefile2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c432
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c12
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.h2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-375.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-38x.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-39x.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-xp.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c5
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-kirkwood.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c9
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.h1
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-orion.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c7
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c168
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c13
-rw-r--r--drivers/pinctrl/pinctrl-amd.c12
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c11
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c30
-rw-r--r--drivers/pinctrl/pinctrl-at91.c28
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c15
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c2
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c5
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c14
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c5
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c6
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c192
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c5
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c13
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c13
-rw-r--r--drivers/pinctrl/pinctrl-u300.c12
-rw-r--r--drivers/pinctrl/pinctrl-utils.c4
-rw-r--r--drivers/pinctrl/pinctrl-utils.h2
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c14
-rw-r--r--drivers/pinctrl/pxa/Kconfig10
-rw-r--r--drivers/pinctrl/pxa/Makefile1
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa25x.c274
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c9
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c9
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c3
-rw-r--r--drivers/pinctrl/sh-pfc/core.c20
-rw-r--r--drivers/pinctrl/sh-pfc/core.h5
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c54
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7794.c217
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c218
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c124
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h19
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c11
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h1
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c6
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c6
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c6
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c6
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c6
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c185
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c13
-rw-r--r--drivers/pinctrl/tegra/Makefile2
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c35
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c36
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.h8
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra114.c3
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra124.c3
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra210.c5
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra30.c3
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c14
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier.h2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c7
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/Makefile15
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c22
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c55
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c15
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c10
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c4
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c122
-rw-r--r--drivers/platform/mips/Kconfig4
-rw-r--r--drivers/platform/mips/Makefile1
-rw-r--r--drivers/platform/mips/acpi_init.c150
-rw-r--r--drivers/platform/mips/cpu_hwmon.c10
-rw-r--r--drivers/platform/x86/Kconfig22
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c16
-rw-r--r--drivers/platform/x86/asus-laptop.c15
-rw-r--r--drivers/platform/x86/asus-wmi.c5
-rw-r--r--drivers/platform/x86/eeepc-wmi.c24
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c63
-rw-r--r--drivers/platform/x86/ideapad-laptop.c21
-rw-r--r--drivers/platform/x86/intel_menlow.c49
-rw-r--r--drivers/platform/x86/intel_pmc_core.c200
-rw-r--r--drivers/platform/x86/intel_pmc_core.h51
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c6
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c6
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/platform/x86/surfacepro3_button.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c130
-rw-r--r--drivers/platform/x86/wmi.c104
-rw-r--r--drivers/pnp/pnpbios/Kconfig2
-rw-r--r--drivers/pnp/pnpbios/core.c3
-rw-r--r--drivers/power/avs/rockchip-io-domain.c10
-rw-r--r--drivers/power/ipaq_micro_battery.c2
-rw-r--r--drivers/power/max8925_power.c10
-rw-r--r--drivers/power/power_supply_core.c27
-rw-r--r--drivers/power/reset/Kconfig8
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c282
-rw-r--r--drivers/power/sbs-battery.c4
-rw-r--r--drivers/power/tps65217_charger.c6
-rw-r--r--drivers/powercap/intel_rapl.c71
-rw-r--r--drivers/pps/clients/pps_parport.c2
-rw-r--r--drivers/ptp/ptp_chardev.c12
-rw-r--r--drivers/pwm/core.c224
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c2
-rw-r--r--drivers/pwm/pwm-clps711x.c2
-rw-r--r--drivers/pwm/pwm-crc.c2
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c2
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c2
-rw-r--r--drivers/pwm/pwm-pxa.c2
-rw-r--r--drivers/pwm/pwm-rcar.c2
-rw-r--r--drivers/pwm/pwm-sun4i.c3
-rw-r--r--drivers/pwm/sysfs.c70
-rw-r--r--drivers/regulator/Kconfig17
-rw-r--r--drivers/regulator/Makefile8
-rw-r--r--drivers/regulator/act8865-regulator.c113
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/regulator/as3722-regulator.c65
-rw-r--r--drivers/regulator/core.c268
-rw-r--r--drivers/regulator/fan53555.c24
-rw-r--r--drivers/regulator/helpers.c2
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/lp873x-regulator.c241
-rw-r--r--drivers/regulator/max14577-regulator.c (renamed from drivers/regulator/max14577.c)0
-rw-r--r--drivers/regulator/max77620-regulator.c90
-rw-r--r--drivers/regulator/max77686-regulator.c17
-rw-r--r--drivers/regulator/max77693-regulator.c (renamed from drivers/regulator/max77693.c)0
-rw-r--r--drivers/regulator/max77802-regulator.c2
-rw-r--r--drivers/regulator/max8973-regulator.c97
-rw-r--r--drivers/regulator/max8997-regulator.c (renamed from drivers/regulator/max8997.c)2
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/palmas-regulator.c67
-rw-r--r--drivers/regulator/pv88080-regulator.c419
-rw-r--r--drivers/regulator/pv88080-regulator.h92
-rw-r--r--drivers/regulator/pwm-regulator.c78
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c275
-rw-r--r--drivers/regulator/rk808-regulator.c310
-rw-r--r--drivers/regulator/s2mps11.c13
-rw-r--r--drivers/regulator/tps51632-regulator.c9
-rw-r--r--drivers/regulator/tps6524x-regulator.c2
-rw-r--r--drivers/regulator/twl-regulator.c106
-rw-r--r--drivers/remoteproc/remoteproc_core.c39
-rw-r--r--drivers/remoteproc/remoteproc_internal.h1
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c2
-rw-r--r--drivers/reset/Kconfig3
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c217
-rw-r--r--drivers/reset/reset-lpc18xx.c22
-rw-r--r--drivers/reset/reset-oxnas.c136
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c8
-rw-r--r--drivers/rtc/Kconfig52
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/rtc/rtc-da9052.c13
-rw-r--r--drivers/rtc/rtc-ds1216.c3
-rw-r--r--drivers/rtc/rtc-ds1286.c3
-rw-r--r--drivers/rtc/rtc-ds1302.c348
-rw-r--r--drivers/rtc/rtc-ds1307.c23
-rw-r--r--drivers/rtc/rtc-ds1343.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c3
-rw-r--r--drivers/rtc/rtc-ds1553.c3
-rw-r--r--drivers/rtc/rtc-ds1672.c5
-rw-r--r--drivers/rtc/rtc-ds1685.c4
-rw-r--r--drivers/rtc/rtc-ds1742.c3
-rw-r--r--drivers/rtc/rtc-ds3232.c9
-rw-r--r--drivers/rtc/rtc-ep93xx.c3
-rw-r--r--drivers/rtc/rtc-gemini.c1
-rw-r--r--drivers/rtc/rtc-hym8563.c2
-rw-r--r--drivers/rtc/rtc-isl12022.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c6
-rw-r--r--drivers/rtc/rtc-m41t80.c447
-rw-r--r--drivers/rtc/rtc-m48t35.c3
-rw-r--r--drivers/rtc/rtc-m48t86.c4
-rw-r--r--drivers/rtc/rtc-max6900.c5
-rw-r--r--drivers/rtc/rtc-mc13xxx.c19
-rw-r--r--drivers/rtc/rtc-mrst.c2
-rw-r--r--drivers/rtc/rtc-mxc.c3
-rw-r--r--drivers/rtc/rtc-pcf2123.c4
-rw-r--r--drivers/rtc/rtc-pcf8563.c7
-rw-r--r--drivers/rtc/rtc-rs5c313.c2
-rw-r--r--drivers/rtc/rtc-rs5c348.c4
-rw-r--r--drivers/rtc/rtc-rs5c372.c18
-rw-r--r--drivers/rtc/rtc-rv3029c2.c596
-rw-r--r--drivers/rtc/rtc-rx8581.c5
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-snvs.c2
-rw-r--r--drivers/rtc/rtc-stk17ta8.c3
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c7
-rw-r--r--drivers/rtc/rtc-tps6586x.c2
-rw-r--r--drivers/rtc/rtc-x1205.c5
-rw-r--r--drivers/rtc/rtc-zynqmp.c74
-rw-r--r--drivers/s390/block/dasd.c92
-rw-r--r--drivers/s390/block/dasd_3990_erp.c20
-rw-r--r--drivers/s390/block/dasd_devmap.c27
-rw-r--r--drivers/s390/block/dasd_eckd.c699
-rw-r--r--drivers/s390/block/dasd_eckd.h34
-rw-r--r--drivers/s390/block/dasd_int.h17
-rw-r--r--drivers/s390/block/dasd_ioctl.c61
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/con3215.c22
-rw-r--r--drivers/s390/char/con3270.c3
-rw-r--r--drivers/s390/char/fs3270.c3
-rw-r--r--drivers/s390/char/raw3270.c131
-rw-r--r--drivers/s390/char/raw3270.h8
-rw-r--r--drivers/s390/char/sclp.h38
-rw-r--r--drivers/s390/char/sclp_cmd.c61
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/sclp_early.c6
-rw-r--r--drivers/s390/char/sclp_pci.c193
-rw-r--r--drivers/s390/char/tty3270.c90
-rw-r--r--drivers/s390/crypto/ap_bus.c2
-rw-r--r--drivers/s390/net/ctcm_main.c6
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/netiucv.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/s390/scsi/zfcp_unit.c3
-rw-r--r--drivers/sbus/char/openprom.c40
-rw-r--r--drivers/scsi/Kconfig16
-rw-r--r--drivers/scsi/NCR5380.c657
-rw-r--r--drivers/scsi/NCR5380.h143
-rw-r--r--drivers/scsi/aacraid/aachba.c22
-rw-r--r--drivers/scsi/aacraid/aacraid.h13
-rw-r--r--drivers/scsi/aacraid/comminit.c19
-rw-r--r--drivers/scsi/aacraid/commsup.c27
-rw-r--r--drivers/scsi/aacraid/dpcsup.c7
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/aacraid/src.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_seq.c2
-rw-r--r--drivers/scsi/arm/cumana_1.c25
-rw-r--r--drivers/scsi/arm/cumana_2.c2
-rw-r--r--drivers/scsi/arm/eesox.c2
-rw-r--r--drivers/scsi/arm/oak.c22
-rw-r--r--drivers/scsi/arm/powertec.c2
-rw-r--r--drivers/scsi/atari_NCR5380.c2676
-rw-r--r--drivers/scsi/atari_scsi.c144
-rw-r--r--drivers/scsi/bfa/bfa_fcs.h4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_fcpim.c5
-rw-r--r--drivers/scsi/bfa/bfad_im.c5
-rw-r--r--drivers/scsi/bfa/bfi.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c100
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c14
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c4
-rw-r--r--drivers/scsi/constants.c859
-rw-r--r--drivers/scsi/cxlflash/superpipe.c15
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c33
-rw-r--r--drivers/scsi/dmx3191d.c10
-rw-r--r--drivers/scsi/dtc.c27
-rw-r--r--drivers/scsi/dtc.h7
-rw-r--r--drivers/scsi/eata_pio.c1
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c4
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c91
-rw-r--r--drivers/scsi/g_NCR5380.c141
-rw-r--r--drivers/scsi/g_NCR5380.h26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c93
-rw-r--r--drivers/scsi/hpsa.c187
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/isci/port.c2
-rw-r--r--drivers/scsi/isci/request.c5
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c62
-rw-r--r--drivers/scsi/iscsi_tcp.c12
-rw-r--r--drivers/scsi/libiscsi.c9
-rw-r--r--drivers/scsi/libsas/sas_ata.c7
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c176
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h75
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c140
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c5
-rw-r--r--drivers/scsi/mac_scsi.c241
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c117
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h7
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h18
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h15
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h40
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c32
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c38
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c5
-rw-r--r--drivers/scsi/pas16.c27
-rw-r--r--drivers/scsi/pas16.h5
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c5
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c59
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_common.c53
-rw-r--r--drivers/scsi/scsi_debug.c2801
-rw-r--r--drivers/scsi/scsi_devinfo.c10
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c188
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_proc.c3
-rw-r--r--drivers/scsi/scsi_scan.c44
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/scsi_trace.c161
-rw-r--r--drivers/scsi/scsi_transport_fc.c9
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c19
-rw-r--r--drivers/scsi/scsi_transport_sas.c7
-rw-r--r--drivers/scsi/sd.c19
-rw-r--r--drivers/scsi/sense_codes.h826
-rw-r--r--drivers/scsi/snic/snic.h5
-rw-r--r--drivers/scsi/snic/snic_ctl.c8
-rw-r--r--drivers/scsi/snic/snic_debugfs.c20
-rw-r--r--drivers/scsi/snic/snic_disc.c19
-rw-r--r--drivers/scsi/snic/snic_fwint.h4
-rw-r--r--drivers/scsi/snic/snic_io.c62
-rw-r--r--drivers/scsi/snic/snic_isr.c6
-rw-r--r--drivers/scsi/snic/snic_main.c44
-rw-r--r--drivers/scsi/snic/snic_scsi.c56
-rw-r--r--drivers/scsi/snic/snic_stats.h12
-rw-r--r--drivers/scsi/snic/vnic_dev.c44
-rw-r--r--drivers/scsi/st.c9
-rw-r--r--drivers/scsi/sun3_scsi.c47
-rw-r--r--drivers/scsi/t128.c19
-rw-r--r--drivers/scsi/t128.h7
-rw-r--r--drivers/soc/Makefile3
-rw-r--r--drivers/soc/brcmstb/Kconfig1
-rw-r--r--drivers/soc/brcmstb/common.c66
-rw-r--r--drivers/soc/fsl/qe/gpio.c20
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c544
-rw-r--r--drivers/soc/qcom/smd-rpm.c9
-rw-r--r--drivers/soc/qcom/smd.c247
-rw-r--r--drivers/soc/qcom/smem.c3
-rw-r--r--drivers/soc/qcom/spm.c10
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c8
-rw-r--r--drivers/soc/renesas/Makefile7
-rw-r--r--drivers/soc/renesas/r8a7779-sysc.c34
-rw-r--r--drivers/soc/renesas/r8a7790-sysc.c48
-rw-r--r--drivers/soc/renesas/r8a7791-sysc.c33
-rw-r--r--drivers/soc/renesas/r8a7794-sysc.c33
-rw-r--r--drivers/soc/renesas/r8a7795-sysc.c56
-rw-r--r--drivers/soc/renesas/rcar-sysc.c401
-rw-r--r--drivers/soc/renesas/rcar-sysc.h58
-rw-r--r--drivers/soc/rockchip/pm_domains.c247
-rw-r--r--drivers/soc/tegra/Kconfig2
-rw-r--r--drivers/soc/tegra/pmc.c613
-rw-r--r--drivers/soc/versatile/soc-realview.c19
-rw-r--r--drivers/spi/Kconfig21
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-axi-spi-engine.c1
-rw-r--r--drivers/spi/spi-bcm53xx.c78
-rw-r--r--drivers/spi/spi-cadence.c244
-rw-r--r--drivers/spi/spi-davinci.c76
-rw-r--r--drivers/spi/spi-dln2.c2
-rw-r--r--drivers/spi/spi-dw-pci.c2
-rw-r--r--drivers/spi/spi-ep93xx.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c7
-rw-r--r--drivers/spi/spi-fsl-espi.c30
-rw-r--r--drivers/spi/spi-octeon.c17
-rw-r--r--drivers/spi/spi-omap2-mcspi.c83
-rw-r--r--drivers/spi/spi-pic32-sqi.c727
-rw-r--r--drivers/spi/spi-pic32.c878
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c28
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c12
-rw-r--r--drivers/spi/spi-pxa2xx.c15
-rw-r--r--drivers/spi/spi-pxa2xx.h3
-rw-r--r--drivers/spi/spi-qup.c15
-rw-r--r--drivers/spi/spi-rockchip.c13
-rw-r--r--drivers/spi/spi-st-ssc4.c8
-rw-r--r--drivers/spi/spi-sun4i.c23
-rw-r--r--drivers/spi/spi-sun6i.c10
-rw-r--r--drivers/spi/spi-ti-qspi.c7
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c3
-rw-r--r--drivers/spi/spi.c7
-rw-r--r--drivers/spmi/spmi.c12
-rw-r--r--drivers/ssb/driver_gpio.c33
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/Kconfig17
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/ion/ion.c16
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c2
-rw-r--r--drivers/staging/android/ion/ion_test.c2
-rw-r--r--drivers/staging/android/lowmemorykiller.c9
-rw-r--r--drivers/staging/android/sync.c356
-rw-r--r--drivers/staging/android/sync.h91
-rw-r--r--drivers/staging/android/sync_debug.c8
-rw-r--r--drivers/staging/android/timed_gpio.c166
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c110
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/android/uapi/sync.h84
-rw-r--r--drivers/staging/board/armadillo800eva.c8
-rw-r--r--drivers/staging/comedi/comedi_buf.c10
-rw-r--r--drivers/staging/comedi/comedi_fops.c54
-rw-r--r--drivers/staging/comedi/comedidev.h4
-rw-r--r--drivers/staging/comedi/drivers.c40
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h24
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c12
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c104
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c71
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c189
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c86
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c4
-rw-r--r--drivers/staging/comedi/drivers/comedi_8254.h14
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c2
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1365
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c119
-rw-r--r--drivers/staging/comedi/drivers/mite.c1113
-rw-r--r--drivers/staging/comedi/drivers/mite.h329
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c1174
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h33
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c65
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c95
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_regs.h82
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_c_common.c0
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c981
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c37
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c36
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h56
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c807
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h66
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h322
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c127
-rw-r--r--drivers/staging/comedi/drivers/plx9052.h122
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h2
-rw-r--r--drivers/staging/comedi/drivers/z8536.h89
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c52
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h23
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c28
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c131
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c22
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c279
-rw-r--r--drivers/staging/dgnc/digi.h4
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c24
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h40
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c2
-rw-r--r--drivers/staging/fbtft/fbtft-io.c8
-rw-r--r--drivers/staging/fbtft/fbtft_device.c6
-rw-r--r--drivers/staging/fsl-mc/README.txt138
-rw-r--r--drivers/staging/fsl-mc/TODO13
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c77
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h7
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c35
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h10
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h6
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c33
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c26
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c79
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c90
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c14
-rw-r--r--drivers/staging/fsl-mc/include/dpbp-cmd.h4
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h51
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h19
-rw-r--r--drivers/staging/fsl-mc/include/mc-private.h2
-rw-r--r--drivers/staging/fwserial/dma_fifo.c8
-rw-r--r--drivers/staging/fwserial/dma_fifo.h16
-rw-r--r--drivers/staging/fwserial/fwserial.c44
-rw-r--r--drivers/staging/fwserial/fwserial.h42
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c5
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c6
-rw-r--r--drivers/staging/gdm724x/hci_packet.h2
-rw-r--r--drivers/staging/gdm724x/netlink_k.c3
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c8
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h2
-rw-r--r--drivers/staging/gs_fpgaboot/io.c1
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c24
-rw-r--r--drivers/staging/i4l/pcbit/capi.h2
-rw-r--r--drivers/staging/i4l/pcbit/drv.c8
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c2
-rw-r--r--drivers/staging/i4l/pcbit/layer2.h2
-rw-r--r--drivers/staging/iio/accel/Kconfig23
-rw-r--r--drivers/staging/iio/accel/Makefile6
-rw-r--r--drivers/staging/iio/accel/adis16201.h156
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16203.h132
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16204.h68
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c253
-rw-r--r--drivers/staging/iio/accel/adis16209.h39
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16220.h140
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c494
-rw-r--r--drivers/staging/iio/accel/adis16240.h50
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c5
-rw-r--r--drivers/staging/iio/adc/ad7192.c50
-rw-r--r--drivers/staging/iio/adc/ad7280a.c40
-rw-r--r--drivers/staging/iio/adc/ad7280a.h8
-rw-r--r--drivers/staging/iio/adc/ad7606.h28
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c18
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c3
-rw-r--r--drivers/staging/iio/adc/ad7780.c2
-rw-r--r--drivers/staging/iio/frequency/ad9832.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c51
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.h28
-rw-r--r--drivers/staging/iio/light/isl29028.c55
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c211
-rw-r--r--drivers/staging/iio/meter/ade7753.c4
-rw-r--r--drivers/staging/iio/meter/ade7754.c4
-rw-r--r--drivers/staging/iio/meter/ade7758.h16
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c77
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c4
-rw-r--r--drivers/staging/iio/meter/ade7759.c4
-rw-r--r--drivers/staging/iio/meter/ade7854.c3
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.h8
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h51
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h79
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h136
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h18
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h161
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h31
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h75
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h80
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-dlc.h29
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h9
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c405
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h134
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c101
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c139
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c126
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c54
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c9
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c283
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c154
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c31
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c132
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c17
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c143
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c7
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c82
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c52
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c215
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h40
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c282
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h47
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c270
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c44
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c133
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h156
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h204
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c94
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h978
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h408
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h125
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h75
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h112
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h54
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h14
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h120
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h22
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h60
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h3
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h77
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h5
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h4
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c1203
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c30
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h19
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c115
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c163
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c19
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c99
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c277
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c (renamed from drivers/staging/lustre/lustre/lclient/glimpse.c)87
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c327
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c (renamed from drivers/staging/lustre/lustre/lclient/lcommon_misc.c)45
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c71
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h274
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c176
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c29
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c143
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c367
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c318
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c270
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h332
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c928
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c141
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c211
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c121
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c45
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c182
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h105
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h34
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c246
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c996
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c26
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c54
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c12
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c183
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c62
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c9
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c386
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c7
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c24
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c5
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c26
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c430
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c2086
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c303
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c659
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c72
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c26
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c15
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c173
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c68
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c531
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h159
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h27
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c283
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1698
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c38
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c544
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c423
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c21
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c52
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c12
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c2
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/omap1/omap1_camera.c68
-rw-r--r--drivers/staging/media/omap4iss/iss.c2
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/media/tw686x-kh/Kconfig17
-rw-r--r--drivers/staging/media/tw686x-kh/Makefile3
-rw-r--r--drivers/staging/media/tw686x-kh/TODO6
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-core.c140
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-regs.h103
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh-video.c821
-rw-r--r--drivers/staging/media/tw686x-kh/tw686x-kh.h118
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_errors.h8
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h14
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_reg.h8
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c49
-rw-r--r--drivers/staging/netlogic/xlr_net.c2
-rw-r--r--drivers/staging/nvec/nvec.c11
-rw-r--r--drivers/staging/nvec/nvec_power.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c7
-rw-r--r--drivers/staging/octeon/ethernet-rx.h2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c15
-rw-r--r--drivers/staging/octeon/ethernet.c4
-rw-r--r--drivers/staging/rdma/Kconfig27
-rw-r--r--drivers/staging/rdma/Makefile2
-rw-r--r--drivers/staging/rdma/hfi1/TODO6
-rw-r--r--drivers/staging/rdma/hfi1/diag.c1924
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c471
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseq.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c73
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h5
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/fw.h4
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h5
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h5
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RTL8188E.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h5
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h5
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h5
-rw-r--r--drivers/staging/rtl8188eu/include/usb_hal.h5
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c13
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mon.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c7
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c5
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c77
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c22
-rw-r--r--drivers/staging/rtl8712/basic_types.h4
-rw-r--r--drivers/staging/rtl8712/drv_types.h4
-rw-r--r--drivers/staging/rtl8712/ethernet.h4
-rw-r--r--drivers/staging/rtl8712/hal_init.c25
-rw-r--r--drivers/staging/rtl8712/ieee80211.c4
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c4
-rw-r--r--drivers/staging/rtl8712/osdep_service.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c18
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c80
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c2
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723au/Kconfig7
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_mlme_ext.c4
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c25
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c10
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c2
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c2
-rw-r--r--drivers/staging/rtl8723au/include/ieee80211.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c54
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c5
-rw-r--r--drivers/staging/rts5208/ms.c16
-rw-r--r--drivers/staging/rts5208/rtsx_card.c21
-rw-r--r--drivers/staging/rts5208/rtsx_card.h2
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c35
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h3
-rw-r--r--drivers/staging/rts5208/sd.c16
-rw-r--r--drivers/staging/skein/skein_api.c3
-rw-r--r--drivers/staging/skein/skein_base.c90
-rw-r--r--drivers/staging/skein/skein_base.h45
-rw-r--r--drivers/staging/skein/skein_block.c92
-rw-r--r--drivers/staging/skein/skein_generic.c6
-rw-r--r--drivers/staging/skein/threefish_api.h2
-rw-r--r--drivers/staging/skein/threefish_block.c2144
-rw-r--r--drivers/staging/slicoss/slicoss.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/speakup/main.c6
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/speakup/serialio.h3
-rw-r--r--drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset14
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt19
-rw-r--r--drivers/staging/unisys/Documentation/proc-entries.txt93
-rw-r--r--drivers/staging/unisys/MAINTAINERS1
-rw-r--r--drivers/staging/unisys/include/channel.h10
-rw-r--r--drivers/staging/unisys/include/iochannel.h42
-rw-r--r--drivers/staging/unisys/include/visorbus.h127
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c394
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c5
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c444
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c114
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c24
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c223
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c22
-rw-r--r--drivers/staging/vt6655/baseband.c24
-rw-r--r--drivers/staging/vt6655/baseband.h6
-rw-r--r--drivers/staging/vt6655/card.c95
-rw-r--r--drivers/staging/vt6655/card.h9
-rw-r--r--drivers/staging/vt6655/channel.c4
-rw-r--r--drivers/staging/vt6655/desc.h3
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/staging/vt6655/mac.c15
-rw-r--r--drivers/staging/vt6655/rxtx.c2
-rw-r--r--drivers/staging/vt6655/srom.c9
-rw-r--r--drivers/staging/vt6656/baseband.c26
-rw-r--r--drivers/staging/vt6656/channel.c4
-rw-r--r--drivers/staging/vt6656/int.c2
-rw-r--r--drivers/staging/vt6656/main_usb.c8
-rw-r--r--drivers/staging/vt6656/rxtx.c2
-rw-r--r--drivers/staging/vt6656/wcmd.c8
-rw-r--r--drivers/staging/wilc1000/Kconfig1
-rw-r--r--drivers/staging/wilc1000/host_interface.c438
-rw-r--r--drivers/staging/wilc1000/host_interface.h8
-rw-r--r--drivers/staging/wilc1000/linux_mon.c24
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c98
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c3
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c81
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h15
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c53
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c7
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h21
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c10
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c8
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c5
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c6
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h1
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c28
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c2
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c5
-rw-r--r--drivers/staging/xgifb/vb_init.c16
-rw-r--r--drivers/staging/xgifb/vb_setmode.c22
-rw-r--r--drivers/staging/xgifb/vb_table.h135
-rw-r--r--drivers/staging/xgifb/vb_util.h8
-rw-r--r--drivers/target/iscsi/Kconfig2
-rw-r--r--drivers/target/iscsi/Makefile1
-rw-r--r--drivers/target/iscsi/cxgbit/Kconfig7
-rw-r--r--drivers/target/iscsi/cxgbit/Makefile6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h353
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c2086
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c325
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_lro.h72
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c702
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c1561
-rw-r--r--drivers/target/iscsi/iscsi_target.c701
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c158
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c19
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c10
-rw-r--r--drivers/target/loopback/tcm_loop.c12
-rw-r--r--drivers/target/sbp/sbp_target.c12
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c70
-rw-r--r--drivers/target/target_core_iblock.c6
-rw-r--r--drivers/target/target_core_internal.h6
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c4
-rw-r--r--drivers/target/target_core_tpg.c83
-rw-r--r--drivers/target/target_core_transport.c58
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c1
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--drivers/thermal/Kconfig59
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/gov_bang_bang.c8
-rw-r--r--drivers/thermal/hisi_thermal.c45
-rw-r--r--drivers/thermal/int340x_thermal/Kconfig42
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c236
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c108
-rw-r--r--drivers/thermal/intel_powerclamp.c47
-rw-r--r--drivers/thermal/mtk_thermal.c12
-rw-r--r--drivers/thermal/of-thermal.c10
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c3
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/rockchip_thermal.c280
-rw-r--r--drivers/thermal/tango_thermal.c109
-rw-r--r--drivers/thermal/tegra/Kconfig13
-rw-r--r--drivers/thermal/tegra/Makefile6
-rw-r--r--drivers/thermal/tegra/soctherm-fuse.c169
-rw-r--r--drivers/thermal/tegra/soctherm.c685
-rw-r--r--drivers/thermal/tegra/soctherm.h127
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c196
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c196
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c197
-rw-r--r--drivers/thermal/tegra_soctherm.c476
-rw-r--r--drivers/thermal/thermal-generic-adc.c182
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c5
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/eeprom.c7
-rw-r--r--drivers/thunderbolt/nhi.c19
-rw-r--r--drivers/thunderbolt/switch.c20
-rw-r--r--drivers/thunderbolt/tb.c2
-rw-r--r--drivers/thunderbolt/tb.h2
-rw-r--r--drivers/thunderbolt/tb_regs.h2
-rw-r--r--drivers/tty/Kconfig11
-rw-r--r--drivers/tty/amiserial.c39
-rw-r--r--drivers/tty/cyclades.c38
-rw-r--r--drivers/tty/hvc/hvc_console.c4
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/hvc/hvsi.c2
-rw-r--r--drivers/tty/ipwireless/hardware.c5
-rw-r--r--drivers/tty/isicom.c19
-rw-r--r--drivers/tty/moxa.c12
-rw-r--r--drivers/tty/mxser.c35
-rw-r--r--drivers/tty/n_gsm.c10
-rw-r--r--drivers/tty/nozomi.c2
-rw-r--r--drivers/tty/pty.c22
-rw-r--r--drivers/tty/rocket.c16
-rw-r--r--drivers/tty/serial/8250/8250.h15
-rw-r--r--drivers/tty/serial/8250/8250_core.c3
-rw-r--r--drivers/tty/serial/8250/8250_dma.c68
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/8250/8250_fintek.c118
-rw-r--r--drivers/tty/serial/8250/8250_mid.c9
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c93
-rw-r--r--drivers/tty/serial/8250/8250_pci.c20
-rw-r--r--drivers/tty/serial/8250/8250_port.c38
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c2
-rw-r--r--drivers/tty/serial/8250/Kconfig24
-rw-r--r--drivers/tty/serial/8250/Makefile2
-rw-r--r--drivers/tty/serial/Kconfig41
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/amba-pl011.c3
-rw-r--r--drivers/tty/serial/crisv10.c30
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/imx.c174
-rw-r--r--drivers/tty/serial/max310x.c12
-rw-r--r--drivers/tty/serial/meson_uart.c42
-rw-r--r--drivers/tty/serial/mps2-uart.c625
-rw-r--r--drivers/tty/serial/msm_serial.c101
-rw-r--r--drivers/tty/serial/mvebu-uart.c31
-rw-r--r--drivers/tty/serial/mxs-auart.c644
-rw-r--r--drivers/tty/serial/pic32_uart.c960
-rw-r--r--drivers/tty/serial/pic32_uart.h126
-rw-r--r--drivers/tty/serial/sc16is7xx.c29
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c430
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c7
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.h6
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c39
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/tty/serial/uartlite.c12
-rw-r--r--drivers/tty/synclink.c78
-rw-r--r--drivers/tty/synclink_gt.c45
-rw-r--r--drivers/tty/synclinkmp.c45
-rw-r--r--drivers/tty/tty_io.c9
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_port.c27
-rw-r--r--drivers/tty/vt/selection.c2
-rw-r--r--drivers/tty/vt/vt.c110
-rw-r--r--drivers/uio/uio.c16
-rw-r--r--drivers/usb/Kconfig3
-rw-r--r--drivers/usb/atm/ueagle-atm.c10
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c4
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/usb-otg-fsm.c10
-rw-r--r--drivers/usb/core/buffer.c3
-rw-r--r--drivers/usb/core/devio.c11
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/message.c48
-rw-r--r--drivers/usb/core/usb.c1
-rw-r--r--drivers/usb/dwc2/gadget.c32
-rw-r--r--drivers/usb/dwc2/hcd.c1
-rw-r--r--drivers/usb/dwc2/hcd.h1
-rw-r--r--drivers/usb/dwc2/hcd_queue.c3
-rw-r--r--drivers/usb/dwc2/platform.c2
-rw-r--r--drivers/usb/dwc3/core.c118
-rw-r--r--drivers/usb/dwc3/core.h85
-rw-r--r--drivers/usb/dwc3/debug.h6
-rw-r--r--drivers/usb/dwc3/debugfs.c358
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c9
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c18
-rw-r--r--drivers/usb/dwc3/dwc3-st.c16
-rw-r--r--drivers/usb/dwc3/ep0.c43
-rw-r--r--drivers/usb/dwc3/gadget.c457
-rw-r--r--drivers/usb/dwc3/gadget.h6
-rw-r--r--drivers/usb/dwc3/platform_data.h2
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/composite.c43
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c30
-rw-r--r--drivers/usb/gadget/function/f_printer.c8
-rw-r--r--drivers/usb/gadget/function/f_tcm.c31
-rw-r--r--drivers/usb/gadget/function/f_uac2.c13
-rw-r--r--drivers/usb/gadget/function/storage_common.c4
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/u_serial.c4
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c5
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c175
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c36
-rw-r--r--drivers/usb/host/Kconfig9
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/bcma-hcd.c6
-rw-r--r--drivers/usb/host/ehci-dbg.c86
-rw-r--r--drivers/usb/host/ehci-exynos.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c9
-rw-r--r--drivers/usb/host/ehci-hub.c14
-rw-r--r--drivers/usb/host/ehci-msm.c16
-rw-r--r--drivers/usb/host/ehci-omap.c2
-rw-r--r--drivers/usb/host/ehci-spear.c2
-rw-r--r--drivers/usb/host/ehci-st.c6
-rw-r--r--drivers/usb/host/ehci-tegra.c16
-rw-r--r--drivers/usb/host/fhci-sched.c2
-rw-r--r--drivers/usb/host/fotg210-hcd.c8
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-jz4740.c245
-rw-r--r--drivers/usb/host/ohci-q.c3
-rw-r--r--drivers/usb/host/ohci-st.c6
-rw-r--r--drivers/usb/host/whci/hcd.c7
-rw-r--r--drivers/usb/host/whci/qset.c8
-rw-r--r--drivers/usb/host/xhci-mvebu.c7
-rw-r--r--drivers/usb/host/xhci-mvebu.h7
-rw-r--r--drivers/usb/host/xhci-plat.c60
-rw-r--r--drivers/usb/host/xhci-plat.h20
-rw-r--r--drivers/usb/host/xhci-rcar.c34
-rw-r--r--drivers/usb/host/xhci-ring.c478
-rw-r--r--drivers/usb/host/xhci-tegra.c1331
-rw-r--r--drivers/usb/host/xhci.c43
-rw-r--r--drivers/usb/host/xhci.h14
-rw-r--r--drivers/usb/isp1760/isp1760-if.c2
-rw-r--r--drivers/usb/misc/Kconfig26
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c4
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_con.c2
-rw-r--r--drivers/usb/misc/ucsi.c478
-rw-r--r--drivers/usb/misc/ucsi.h215
-rw-r--r--drivers/usb/misc/usbtest.c3
-rw-r--r--drivers/usb/musb/musb_core.c82
-rw-r--r--drivers/usb/musb/musb_core.h3
-rw-r--r--drivers/usb/musb/musb_gadget.c34
-rw-r--r--drivers/usb/musb/musb_host.c45
-rw-r--r--drivers/usb/musb/omap2430.c257
-rw-r--r--drivers/usb/musb/sunxi.c54
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c5
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c39
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c16
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c9
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c3
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c30
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h6
-rw-r--r--drivers/usb/serial/console.c4
-rw-r--r--drivers/usb/serial/cp210x.c101
-rw-r--r--drivers/usb/serial/digi_acceleport.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c16
-rw-r--r--drivers/usb/serial/generic.c6
-rw-r--r--drivers/usb/serial/keyspan.c68
-rw-r--r--drivers/usb/serial/mxuport.c6
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c50
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h8
-rw-r--r--drivers/usb/serial/usb-serial.c7
-rw-r--r--drivers/usb/serial/usb_wwan.c4
-rw-r--r--drivers/usb/storage/alauda.c22
-rw-r--r--drivers/usb/storage/cypress_atacb.c34
-rw-r--r--drivers/usb/storage/datafab.c22
-rw-r--r--drivers/usb/storage/debug.c3
-rw-r--r--drivers/usb/storage/debug.h3
-rw-r--r--drivers/usb/storage/ene_ub6250.c25
-rw-r--r--drivers/usb/storage/freecom.c75
-rw-r--r--drivers/usb/storage/initializers.c15
-rw-r--r--drivers/usb/storage/initializers.h15
-rw-r--r--drivers/usb/storage/isd200.c51
-rw-r--r--drivers/usb/storage/jumpshot.c22
-rw-r--r--drivers/usb/storage/karma.c3
-rw-r--r--drivers/usb/storage/option_ms.c6
-rw-r--r--drivers/usb/storage/protocol.c12
-rw-r--r--drivers/usb/storage/protocol.h3
-rw-r--r--drivers/usb/storage/realtek_cr.c12
-rw-r--r--drivers/usb/storage/scsiglue.c171
-rw-r--r--drivers/usb/storage/scsiglue.h3
-rw-r--r--drivers/usb/storage/sddr09.c82
-rw-r--r--drivers/usb/storage/sddr55.c45
-rw-r--r--drivers/usb/storage/shuttle_usbat.c16
-rw-r--r--drivers/usb/storage/sierra_ms.c3
-rw-r--r--drivers/usb/storage/transport.c165
-rw-r--r--drivers/usb/storage/transport.h3
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/storage/unusual_alauda.h3
-rw-r--r--drivers/usb/storage/unusual_cypress.h3
-rw-r--r--drivers/usb/storage/unusual_datafab.h6
-rw-r--r--drivers/usb/storage/unusual_devs.h334
-rw-r--r--drivers/usb/storage/unusual_freecom.h3
-rw-r--r--drivers/usb/storage/unusual_isd200.h3
-rw-r--r--drivers/usb/storage/unusual_jumpshot.h3
-rw-r--r--drivers/usb/storage/unusual_karma.h3
-rw-r--r--drivers/usb/storage/unusual_onetouch.h6
-rw-r--r--drivers/usb/storage/unusual_realtek.h3
-rw-r--r--drivers/usb/storage/unusual_sddr09.h3
-rw-r--r--drivers/usb/storage/unusual_sddr55.h3
-rw-r--r--drivers/usb/storage/unusual_uas.h3
-rw-r--r--drivers/usb/storage/unusual_usbat.h3
-rw-r--r--drivers/usb/storage/usb.c98
-rw-r--r--drivers/usb/storage/usb.h14
-rw-r--r--drivers/usb/storage/usual-tables.c3
-rw-r--r--drivers/usb/usbip/Kconfig17
-rw-r--r--drivers/usb/usbip/Makefile3
-rw-r--r--drivers/usb/usbip/stub.h1
-rw-r--r--drivers/usb/usbip/stub_dev.c7
-rw-r--r--drivers/usb/usbip/stub_rx.c19
-rw-r--r--drivers/usb/usbip/stub_tx.c11
-rw-r--r--drivers/usb/usbip/usbip_common.c17
-rw-r--r--drivers/usb/usbip/usbip_common.h14
-rw-r--r--drivers/usb/usbip/usbip_event.c168
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/usb/usbip/vudc.h190
-rw-r--r--drivers/usb/usbip/vudc_dev.c661
-rw-r--r--drivers/usb/usbip/vudc_main.c113
-rw-r--r--drivers/usb/usbip/vudc_rx.c234
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c229
-rw-r--r--drivers/usb/usbip/vudc_transfer.c506
-rw-r--r--drivers/usb/usbip/vudc_tx.c289
-rw-r--r--drivers/usb/wusbcore/crypto.c6
-rw-r--r--drivers/usb/wusbcore/devconnect.c1
-rw-r--r--drivers/vfio/pci/vfio_pci.c55
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c49
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h1
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c5
-rw-r--r--drivers/vfio/vfio_iommu_type1.c4
-rw-r--r--drivers/vhost/scsi.c12
-rw-r--r--drivers/video/Kconfig4
-rw-r--r--drivers/video/backlight/backlight.c39
-rw-r--r--drivers/video/backlight/lm3630a_bl.c9
-rw-r--r--drivers/video/backlight/lp855x_bl.c6
-rw-r--r--drivers/video/backlight/lp8788_bl.c6
-rw-r--r--drivers/video/backlight/pwm_bl.c14
-rw-r--r--drivers/video/console/fbcon.c4
-rw-r--r--drivers/video/console/mdacon.c2
-rw-r--r--drivers/video/console/newport_con.c2
-rw-r--r--drivers/video/console/sticon.c2
-rw-r--r--drivers/video/console/vgacon.c5
-rw-r--r--drivers/video/fbdev/Kconfig3
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/amba-clcd.c2
-rw-r--r--drivers/video/fbdev/core/fb_defio.c3
-rw-r--r--drivers/video/fbdev/core/fbmem.c20
-rw-r--r--drivers/video/fbdev/da8xx-fb.c4
-rw-r--r--drivers/video/fbdev/efifb.c27
-rw-r--r--drivers/video/fbdev/hyperv_fb.c4
-rw-r--r--drivers/video/fbdev/imxfb.c44
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c12
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c12
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c12
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c6
-rw-r--r--drivers/video/fbdev/sh_mipi_dsi.c587
-rw-r--r--drivers/video/fbdev/ssd1307fb.c13
-rw-r--r--drivers/video/fbdev/via/accel.c2
-rw-r--r--drivers/video/fbdev/via/via-core.c4
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c9
-rw-r--r--drivers/vme/bridges/vme_tsi148.c9
-rw-r--r--drivers/vme/vme.c26
-rw-r--r--drivers/vme/vme_bridge.h1
-rw-r--r--drivers/w1/masters/ds2482.c18
-rw-r--r--drivers/w1/slaves/w1_therm.c218
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/w1/w1.h2
-rw-r--r--drivers/w1/w1_io.c2
-rw-r--r--drivers/watchdog/Kconfig36
-rw-r--r--drivers/watchdog/Makefile3
-rw-r--r--drivers/watchdog/cpwd.c4
-rw-r--r--drivers/watchdog/ebc-c384_wdt.c43
-rw-r--r--drivers/watchdog/f71808e_wdt.c30
-rw-r--r--drivers/watchdog/imx2_wdt.c19
-rw-r--r--drivers/watchdog/jz4740_wdt.c4
-rw-r--r--drivers/watchdog/octeon-wdt-main.c2
-rw-r--r--drivers/watchdog/pic32-dmt.c257
-rw-r--r--drivers/watchdog/pic32-wdt.c263
-rw-r--r--drivers/watchdog/qcom-wdt.c7
-rw-r--r--drivers/watchdog/renesas_wdt.c213
-rw-r--r--drivers/watchdog/shwdt.c4
-rw-r--r--drivers/watchdog/watchdog_core.c4
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/efi.c1
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c18
-rw-r--r--drivers/xen/xen-scsiback.c11
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c10
4688 files changed, 304978 insertions, 126942 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d2ac339de..e1e2066ce 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -114,6 +114,8 @@ source "drivers/rtc/Kconfig"
source "drivers/dma/Kconfig"
+source "drivers/dma-buf/Kconfig"
+
source "drivers/dca/Kconfig"
source "drivers/auxdisplay/Kconfig"
@@ -190,6 +192,8 @@ source "drivers/android/Kconfig"
source "drivers/nvdimm/Kconfig"
+source "drivers/dax/Kconfig"
+
source "drivers/nvmem/Kconfig"
source "drivers/hwtracing/stm/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 8f5d076ba..0b6f3d601 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_PARPORT) += parport/
obj-$(CONFIG_NVM) += lightnvm/
obj-y += base/ block/ misc/ mfd/ nfc/
obj-$(CONFIG_LIBNVDIMM) += nvdimm/
+obj-$(CONFIG_DEV_DAX) += dax/
obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 82b96ee86..b7e2e7763 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -5,10 +5,10 @@
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on !IA64_HP_SIM
- depends on IA64 || X86 || (ARM64 && EXPERT)
+ depends on IA64 || X86 || ARM64
depends on PCI
select PNP
- default y
+ default y if (IA64 || X86)
help
Advanced Configuration and Power Interface (ACPI) support for
Linux requires an ACPI-compliant platform (hardware/firmware),
@@ -311,12 +311,12 @@ config ACPI_CUSTOM_DSDT
bool
default ACPI_CUSTOM_DSDT_FILE != ""
-config ACPI_INITRD_TABLE_OVERRIDE
- bool "ACPI tables override via initrd"
+config ACPI_TABLE_UPGRADE
+ bool "Allow upgrading ACPI tables via initrd"
depends on BLK_DEV_INITRD && X86
- default n
+ default y
help
- This option provides functionality to override arbitrary ACPI tables
+ This option provides functionality to upgrade arbitrary ACPI tables
via initrd. No functional change if no ACPI tables are passed via
initrd, therefore it's safe to say Y.
See Documentation/acpi/initrd_table_override.txt for details
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index edeb2d1d9..251ce85a6 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_ACPI) += acpi.o \
acpica/
# All the builtin files are in the "acpi." module_param namespace.
-acpi-y += osl.o utils.o reboot.o
+acpi-y += osi.o osl.o utils.o reboot.o
acpi-y += nvs.o
# Power management related files
@@ -47,6 +47,7 @@ acpi-$(CONFIG_ARM_AMBA) += acpi_amba.o
acpi-y += int340x_thermal.o
acpi-y += power.o
acpi-y += event.o
+acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
diff --git a/drivers/acpi/acpi_amba.c b/drivers/acpi/acpi_amba.c
index 2a61b54ab..7f77c0717 100644
--- a/drivers/acpi/acpi_amba.c
+++ b/drivers/acpi/acpi_amba.c
@@ -35,8 +35,7 @@ static void amba_register_dummy_clk(void)
if (amba_dummy_clk)
return;
- amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL,
- CLK_IS_ROOT, 0);
+ amba_dummy_clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, 0, 0);
clk_register_clkdev(amba_dummy_clk, "apb_pclk", NULL);
}
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index f245bf35b..1daf9c46d 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -62,8 +62,7 @@ static int acpi_apd_setup(struct apd_private_data *pdata)
if (dev_desc->fixed_clk_rate) {
clk = clk_register_fixed_rate(&pdata->adev->dev,
dev_name(&pdata->adev->dev),
- NULL, CLK_IS_ROOT,
- dev_desc->fixed_clk_rate);
+ NULL, 0, dev_desc->fixed_clk_rate);
clk_register_clkdev(clk, NULL, dev_name(&pdata->adev->dev));
pdata->clk = clk;
}
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 15e4604ef..dee86925a 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -265,7 +265,7 @@ static int acpi_aml_write_kern(const char *buf, int len)
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync tail before inserting logs */
smp_mb();
@@ -286,7 +286,7 @@ static int acpi_aml_readb_kern(void)
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync head before removing cmds */
smp_rmb();
@@ -330,7 +330,7 @@ again:
goto again;
break;
}
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
break;
size += ret;
count -= ret;
@@ -373,7 +373,7 @@ again:
if (ret == 0)
goto again;
}
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
break;
*(msg + size) = (char)ret;
size++;
@@ -526,7 +526,7 @@ static int acpi_aml_open(struct inode *inode, struct file *file)
}
acpi_aml_io.users++;
err_lock:
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (acpi_aml_active_reader == file)
acpi_aml_active_reader = NULL;
}
@@ -587,7 +587,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync head before removing logs */
smp_rmb();
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret));
+ acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
return ret;
}
@@ -634,7 +634,7 @@ again:
goto again;
}
}
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (!acpi_aml_running())
ret = 0;
break;
@@ -657,7 +657,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync tail before inserting cmds */
smp_mb();
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret));
+ acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
return n;
}
@@ -704,7 +704,7 @@ again:
goto again;
}
}
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (!acpi_aml_running())
ret = 0;
break;
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 0d92d0f91..c7ba948d2 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
pr->pblk = object.processor.pblk_address;
-
- /*
- * We don't care about error returns - we just try to mark
- * these reserved so that nobody else is confused into thinking
- * that this region might be unused..
- *
- * (In particular, allocating the IO range for Cardbus)
- */
- request_region(pr->throttling.address, 6, "ACPI CPU throttle");
}
/*
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 4361bc98e..c1d138e12 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -191,19 +191,6 @@ struct acpi_video_device_cap {
u8 _DDC:1; /* Return the EDID for this device */
};
-struct acpi_video_brightness_flags {
- u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */
- u8 _BCL_reversed:1; /* _BCL package is in a reversed order */
- u8 _BQC_use_index:1; /* _BQC returns an index value */
-};
-
-struct acpi_video_device_brightness {
- int curr;
- int count;
- int *levels;
- struct acpi_video_brightness_flags flags;
-};
-
struct acpi_video_device {
unsigned long device_id;
struct acpi_video_device_flags flags;
@@ -325,7 +312,7 @@ static const struct thermal_cooling_device_ops video_cooling_ops = {
*/
static int
-acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
+acpi_video_device_lcd_query_levels(acpi_handle handle,
union acpi_object **levels)
{
int status;
@@ -335,7 +322,7 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
*levels = NULL;
- status = acpi_evaluate_object(device->dev->handle, "_BCL", NULL, &buffer);
+ status = acpi_evaluate_object(handle, "_BCL", NULL, &buffer);
if (!ACPI_SUCCESS(status))
return status;
obj = (union acpi_object *)buffer.pointer;
@@ -766,36 +753,29 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
return 0;
}
-
-/*
- * Arg:
- * device : video output device (LCD, CRT, ..)
- *
- * Return Value:
- * Maximum brightness level
- *
- * Allocate and initialize device->brightness.
- */
-
-static int
-acpi_video_init_brightness(struct acpi_video_device *device)
+int acpi_video_get_levels(struct acpi_device *device,
+ struct acpi_video_device_brightness **dev_br,
+ int *pmax_level)
{
union acpi_object *obj = NULL;
int i, max_level = 0, count = 0, level_ac_battery = 0;
- unsigned long long level, level_old;
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
- int result = -EINVAL;
+ int result = 0;
u32 value;
- if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
+ if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device->handle,
+ &obj))) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
"LCD brightness level\n"));
+ result = -ENODEV;
goto out;
}
- if (obj->package.count < 2)
+ if (obj->package.count < 2) {
+ result = -EINVAL;
goto out;
+ }
br = kzalloc(sizeof(*br), GFP_KERNEL);
if (!br) {
@@ -861,6 +841,40 @@ acpi_video_init_brightness(struct acpi_video_device *device)
"Found unordered _BCL package"));
br->count = count;
+ *dev_br = br;
+ if (pmax_level)
+ *pmax_level = max_level;
+
+out:
+ kfree(obj);
+ return result;
+out_free:
+ kfree(br);
+ goto out;
+}
+EXPORT_SYMBOL(acpi_video_get_levels);
+
+/*
+ * Arg:
+ * device : video output device (LCD, CRT, ..)
+ *
+ * Return Value:
+ * Maximum brightness level
+ *
+ * Allocate and initialize device->brightness.
+ */
+
+static int
+acpi_video_init_brightness(struct acpi_video_device *device)
+{
+ int i, max_level = 0;
+ unsigned long long level, level_old;
+ struct acpi_video_device_brightness *br = NULL;
+ int result = -EINVAL;
+
+ result = acpi_video_get_levels(device->dev, &br, &max_level);
+ if (result)
+ return result;
device->brightness = br;
/* _BQC uses INDEX while _BCL uses VALUE in some laptops */
@@ -903,17 +917,13 @@ set_level:
goto out_free_levels;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "found %d brightness levels\n", count - 2));
- kfree(obj);
- return result;
+ "found %d brightness levels\n", br->count - 2));
+ return 0;
out_free_levels:
kfree(br->levels);
-out_free:
kfree(br);
-out:
device->brightness = NULL;
- kfree(obj);
return result;
}
@@ -1730,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video)
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry) {
- if (!acpi_video_device_lcd_query_levels(dev, &levels))
+ if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels))
kfree(levels);
}
mutex_unlock(&video->device_list_lock);
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index f682374c1..227bb7bb1 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -43,6 +43,7 @@ acpi-y += \
evxfregn.o
acpi-y += \
+ exconcat.o \
exconfig.o \
exconvrt.o \
excreate.o \
@@ -149,6 +150,7 @@ acpi-y += \
acpi-y += \
utaddress.o \
utalloc.o \
+ utascii.o \
utbuffer.o \
utcopy.o \
utexcep.o \
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 993af9eb0..f6404ea92 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -53,7 +53,7 @@
#define ACPI_DEBUG_BUFFER_SIZE 0x4000 /* 16K buffer for return objects */
struct acpi_db_command_info {
- char *name; /* Command Name */
+ const char *name; /* Command Name */
u8 min_args; /* Minimum arguments required */
};
@@ -64,7 +64,7 @@ struct acpi_db_command_help {
};
struct acpi_db_argument_info {
- char *name; /* Argument Name */
+ const char *name; /* Argument Name */
};
struct acpi_db_execute_walk {
@@ -196,7 +196,7 @@ ACPI_DBR_DEPENDENT_RETURN_VOID(void
acpi_walk_state
*walk_state))
- acpi_status acpi_db_display_all_methods(char *display_count_arg);
+acpi_status acpi_db_display_all_methods(char *display_count_arg);
void acpi_db_display_arguments(void);
@@ -220,7 +220,7 @@ ACPI_DBR_DEPENDENT_RETURN_VOID(void
* dbexec - debugger control method execution
*/
void
-acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags);
+acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags);
void
acpi_db_create_execution_threads(char *num_threads_arg,
@@ -271,7 +271,7 @@ void ACPI_SYSTEM_XFACE acpi_db_execute_thread(void *context);
acpi_status acpi_db_user_commands(void);
char *acpi_db_get_next_token(char *string,
- char **next, acpi_object_type * return_type);
+ char **next, acpi_object_type *return_type);
/*
* dbobject
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 010cf81ba..77af91cf4 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -72,6 +72,7 @@ acpi_status acpi_ev_init_global_lock_handler(void);
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status
acpi_ev_acquire_global_lock(u16 timeout))
ACPI_HW_DEPENDENT_RETURN_OK(acpi_status acpi_ev_release_global_lock(void))
+
acpi_status acpi_ev_remove_global_lock_handler(void);
/*
@@ -198,8 +199,6 @@ void
acpi_ev_detach_region(union acpi_operand_object *region_obj,
u8 acpi_ns_is_locked);
-void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj);
-
void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id, u32 function);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 51b073b68..fded77623 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -187,6 +187,8 @@ extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
extern const char *acpi_gbl_lowest_dstate_names[ACPI_NUM_sx_w_METHODS];
extern const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS];
extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
+extern const char acpi_gbl_lower_hex_digits[];
+extern const char acpi_gbl_upper_hex_digits[];
extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
@@ -361,6 +363,15 @@ ACPI_GLOBAL(u32, acpi_gbl_num_objects);
#endif /* ACPI_DEBUGGER */
+#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
+
+ACPI_GLOBAL(const char, *acpi_gbl_pld_panel_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_vertical_position_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_horizontal_position_list[]);
+ACPI_GLOBAL(const char, *acpi_gbl_pld_shape_list[]);
+
+#endif
+
/*****************************************************************************
*
* Application globals
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index bae1a35c3..7ead23555 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -67,7 +67,7 @@
typedef const struct acpi_exdump_info {
u8 opcode;
u8 offset;
- char *name;
+ const char *name;
} acpi_exdump_info;
@@ -370,7 +370,7 @@ acpi_ex_resolve_to_value(union acpi_operand_object **stack_ptr,
acpi_status
acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
union acpi_operand_object *operand,
- acpi_object_type * return_type,
+ acpi_object_type *return_type,
union acpi_operand_object **return_desc);
/*
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 9562a10a1..13331d70d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -278,7 +278,7 @@ struct acpi_create_field_info {
};
typedef
-acpi_status(*acpi_internal_method) (struct acpi_walk_state * walk_state);
+acpi_status (*acpi_internal_method) (struct acpi_walk_state * walk_state);
/*
* Bitmapped ACPI types. Used internally only
@@ -395,11 +395,12 @@ union acpi_predefined_info {
/* Return object auto-repair info */
-typedef acpi_status(*acpi_object_converter) (struct acpi_namespace_node * scope,
- union acpi_operand_object
- *original_object,
- union acpi_operand_object
- **converted_object);
+typedef acpi_status (*acpi_object_converter) (struct acpi_namespace_node *
+ scope,
+ union acpi_operand_object *
+ original_object,
+ union acpi_operand_object **
+ converted_object);
struct acpi_simple_repair_info {
char name[ACPI_NAME_SIZE];
@@ -539,10 +540,10 @@ struct acpi_gpe_device_info {
struct acpi_namespace_node *gpe_device;
};
-typedef acpi_status(*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
- gpe_xrupt_info,
- struct acpi_gpe_block_info *gpe_block,
- void *context);
+typedef acpi_status (*acpi_gpe_callback) (struct acpi_gpe_xrupt_info *
+ gpe_xrupt_info,
+ struct acpi_gpe_block_info *
+ gpe_block, void *context);
/* Information about each particular fixed event */
@@ -657,10 +658,11 @@ struct acpi_result_values {
};
typedef
-acpi_status(*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
- union acpi_parse_object ** out_op);
+acpi_status (*acpi_parse_downwards) (struct acpi_walk_state * walk_state,
+ union acpi_parse_object ** out_op);
-typedef acpi_status(*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
+typedef
+acpi_status (*acpi_parse_upwards) (struct acpi_walk_state * walk_state);
/* Global handlers for AML Notifies */
@@ -700,7 +702,8 @@ union acpi_generic_state {
*
****************************************************************************/
-typedef acpi_status(*acpi_execute_op) (struct acpi_walk_state * walk_state);
+typedef
+acpi_status (*acpi_execute_op) (struct acpi_walk_state * walk_state);
/* Address Range info block */
@@ -853,24 +856,24 @@ struct acpi_parse_state {
/* Parse object flags */
-#define ACPI_PARSEOP_GENERIC 0x01
-#define ACPI_PARSEOP_NAMED 0x02
-#define ACPI_PARSEOP_DEFERRED 0x04
-#define ACPI_PARSEOP_BYTELIST 0x08
-#define ACPI_PARSEOP_IN_STACK 0x10
-#define ACPI_PARSEOP_TARGET 0x20
-#define ACPI_PARSEOP_IN_CACHE 0x80
+#define ACPI_PARSEOP_GENERIC 0x01
+#define ACPI_PARSEOP_NAMED_OBJECT 0x02
+#define ACPI_PARSEOP_DEFERRED 0x04
+#define ACPI_PARSEOP_BYTELIST 0x08
+#define ACPI_PARSEOP_IN_STACK 0x10
+#define ACPI_PARSEOP_TARGET 0x20
+#define ACPI_PARSEOP_IN_CACHE 0x80
/* Parse object disasm_flags */
-#define ACPI_PARSEOP_IGNORE 0x01
-#define ACPI_PARSEOP_PARAMLIST 0x02
-#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
-#define ACPI_PARSEOP_PREDEF_CHECKED 0x08
-#define ACPI_PARSEOP_CLOSING_PAREN 0x10
-#define ACPI_PARSEOP_COMPOUND 0x20
-#define ACPI_PARSEOP_ASSIGNMENT 0x40
-#define ACPI_PARSEOP_ELSEIF 0x80
+#define ACPI_PARSEOP_IGNORE 0x01
+#define ACPI_PARSEOP_PARAMETER_LIST 0x02
+#define ACPI_PARSEOP_EMPTY_TERMLIST 0x04
+#define ACPI_PARSEOP_PREDEFINED_CHECKED 0x08
+#define ACPI_PARSEOP_CLOSING_PAREN 0x10
+#define ACPI_PARSEOP_COMPOUND_ASSIGNMENT 0x20
+#define ACPI_PARSEOP_ASSIGNMENT 0x40
+#define ACPI_PARSEOP_ELSEIF 0x80
/*****************************************************************************
*
@@ -1096,6 +1099,7 @@ struct acpi_external_list {
#define ACPI_EXT_ORIGIN_FROM_FILE 0x02 /* External came from a file */
#define ACPI_EXT_INTERNAL_PATH_ALLOCATED 0x04 /* Deallocate internal path on completion */
#define ACPI_EXT_EXTERNAL_EMITTED 0x08 /* External() statement has been emitted */
+#define ACPI_EXT_ORIGIN_FROM_OPCODE 0x10 /* External came from a External() opcode */
struct acpi_external_file {
char *path;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 411c18b7d..a3b95431b 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -260,14 +260,31 @@
#define ACPI_IS_MISALIGNED(value) (((acpi_size) value) & (sizeof(acpi_size)-1))
+/* Generic (power-of-two) rounding */
+
+#define ACPI_IS_ALIGNED(a, s) (((a) & ((s) - 1)) == 0)
+#define ACPI_IS_POWER_OF_TWO(a) ACPI_IS_ALIGNED(a, a)
+
/*
* Bitmask creation
* Bit positions start at zero.
* MASK_BITS_ABOVE creates a mask starting AT the position and above
* MASK_BITS_BELOW creates a mask starting one bit BELOW the position
+ * MASK_BITS_ABOVE/BELOW accpets a bit offset to create a mask
+ * MASK_BITS_ABOVE/BELOW_32/64 accpets a bit width to create a mask
+ * Note: The ACPI_INTEGER_BIT_SIZE check is used to bypass compiler
+ * differences with the shift operator
*/
#define ACPI_MASK_BITS_ABOVE(position) (~((ACPI_UINT64_MAX) << ((u32) (position))))
#define ACPI_MASK_BITS_BELOW(position) ((ACPI_UINT64_MAX) << ((u32) (position)))
+#define ACPI_MASK_BITS_ABOVE_32(width) ((u32) ACPI_MASK_BITS_ABOVE(width))
+#define ACPI_MASK_BITS_BELOW_32(width) ((u32) ACPI_MASK_BITS_BELOW(width))
+#define ACPI_MASK_BITS_ABOVE_64(width) ((width) == ACPI_INTEGER_BIT_SIZE ? \
+ ACPI_UINT64_MAX : \
+ ACPI_MASK_BITS_ABOVE(width))
+#define ACPI_MASK_BITS_BELOW_64(width) ((width) == ACPI_INTEGER_BIT_SIZE ? \
+ (u64) 0 : \
+ ACPI_MASK_BITS_BELOW(width))
/* Bitfields within ACPI registers */
@@ -283,10 +300,10 @@
/* Generic bitfield macros and masks */
#define ACPI_GET_BITS(source_ptr, position, mask) \
- ((*source_ptr >> position) & mask)
+ ((*(source_ptr) >> (position)) & (mask))
#define ACPI_SET_BITS(target_ptr, position, mask, value) \
- (*target_ptr |= ((value & mask) << position))
+ (*(target_ptr) |= (((value) & (mask)) << (position)))
#define ACPI_1BIT_MASK 0x00000001
#define ACPI_2BIT_MASK 0x00000003
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 022d69cb3..f33a4ba8e 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -206,9 +206,10 @@ void acpi_ns_dump_tables(acpi_handle search_base, u32 max_depth);
void acpi_ns_dump_entry(acpi_handle handle, u32 debug_level);
void
-acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component);
+acpi_ns_dump_pathname(acpi_handle handle,
+ const char *msg, u32 level, u32 component);
-void acpi_ns_print_pathname(u32 num_segments, char *pathname);
+void acpi_ns_print_pathname(u32 num_segments, const char *pathname);
acpi_status
acpi_ns_dump_one_object(acpi_handle obj_handle,
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 7da639d62..fc305775c 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -139,7 +139,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
*/
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode);
-char *acpi_ps_get_opcode_name(u16 opcode);
+const char *acpi_ps_get_opcode_name(u16 opcode);
u8 acpi_ps_get_argument_count(u32 op_type);
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 5faeab41e..888440b2c 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -129,7 +129,8 @@ enum acpi_return_package_types {
ACPI_PTYPE2_REV_FIXED = 9,
ACPI_PTYPE2_FIX_VAR = 10,
ACPI_PTYPE2_VAR_VAR = 11,
- ACPI_PTYPE2_UUID_PAIR = 12
+ ACPI_PTYPE2_UUID_PAIR = 12,
+ ACPI_PTYPE_CUSTOM = 13
};
/* Support macros for users of the predefined info table */
@@ -340,7 +341,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_BIX", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (16 Int),(4 Str) */
- PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,
+ PACKAGE_INFO(ACPI_PTYPE_CUSTOM, ACPI_RTYPE_INTEGER, 16,
ACPI_RTYPE_STRING, 4, 0),
{{"_BLT",
@@ -523,6 +524,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+ {{"_FIT", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_BUFFER)}}, /* ACPI 6.0 */
+
{{"_FIX", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Ints) */
PACKAGE_INFO(ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0, 0, 0, 0),
@@ -1053,6 +1057,12 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
METHOD_RETURNS(ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING |
ACPI_RTYPE_BUFFER)}},
+ {{"_WPC", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* ACPI 6.1 */
+
+ {{"_WPP", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* ACPI 6.1 */
+
PACKAGE_INFO(0, 0, 0, 0, 0, 0) /* Table terminator */
};
#else
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 5dd58beaf..63da1e37c 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -124,7 +124,7 @@ typedef enum {
typedef const struct acpi_rsdump_info {
u8 opcode;
u8 offset;
- char *name;
+ const char *name;
const char **pointer;
} acpi_rsdump_info;
@@ -209,7 +209,7 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
acpi_status
acpi_rs_get_method_data(acpi_handle handle,
- char *path, struct acpi_buffer *ret_buffer);
+ const char *path, struct acpi_buffer *ret_buffer);
acpi_status
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
@@ -223,16 +223,16 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
* rscalc
*/
acpi_status
-acpi_rs_get_list_length(u8 * aml_buffer,
- u32 aml_buffer_length, acpi_size * size_needed);
+acpi_rs_get_list_length(u8 *aml_buffer,
+ u32 aml_buffer_length, acpi_size *size_needed);
acpi_status
acpi_rs_get_aml_length(struct acpi_resource *resource_list,
- acpi_size resource_list_size, acpi_size * size_needed);
+ acpi_size resource_list_size, acpi_size *size_needed);
acpi_status
acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
- acpi_size * buffer_size_needed);
+ acpi_size *buffer_size_needed);
acpi_status
acpi_rs_convert_aml_to_resources(u8 * aml,
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index b3b386e0b..6235642e3 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -184,7 +184,7 @@ struct acpi_evaluate_info {
/* The first 3 elements are passed by the caller to acpi_ns_evaluate */
struct acpi_namespace_node *prefix_node; /* Input: starting node */
- char *relative_pathname; /* Input: path relative to prefix_node */
+ const char *relative_pathname; /* Input: path relative to prefix_node */
union acpi_operand_object **parameters; /* Input: argument list */
struct acpi_namespace_node *node; /* Resolved node (prefix_node:relative_pathname) */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 848ad3ac9..cd5a135fc 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -161,8 +161,6 @@ acpi_tb_install_fixed_table(acpi_physical_address address,
acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
-u8 acpi_is_valid_signature(char *signature);
-
/*
* tbxfload
*/
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index e43ab6f2a..a7dbb2b88 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -136,16 +136,16 @@ extern const char *acpi_gbl_pt_decode[];
#define ACPI_SMALL_VARIABLE_LENGTH 3
typedef
-acpi_status(*acpi_walk_aml_callback) (u8 *aml,
- u32 length,
- u32 offset,
- u8 resource_index, void **context);
+acpi_status (*acpi_walk_aml_callback) (u8 *aml,
+ u32 length,
+ u32 offset,
+ u8 resource_index, void **context);
typedef
-acpi_status(*acpi_pkg_callback) (u8 object_type,
- union acpi_operand_object *source_object,
- union acpi_generic_state * state,
- void *context);
+acpi_status (*acpi_pkg_callback) (u8 object_type,
+ union acpi_operand_object * source_object,
+ union acpi_generic_state * state,
+ void *context);
struct acpi_pkg_info {
u8 *free_space;
@@ -167,6 +167,15 @@ struct acpi_pkg_info {
#define DB_QWORD_DISPLAY 8
/*
+ * utascii - ASCII utilities
+ */
+u8 acpi_ut_valid_nameseg(char *signature);
+
+u8 acpi_ut_valid_name_char(char character, u32 position);
+
+void acpi_ut_check_and_repair_ascii(u8 *name, char *repaired_name, u32 count);
+
+/*
* utnonansi - Non-ANSI C library functions
*/
void acpi_ut_strupr(char *src_string);
@@ -175,7 +184,14 @@ void acpi_ut_strlwr(char *src_string);
int acpi_ut_stricmp(char *string1, char *string2);
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
+acpi_status
+acpi_ut_strtoul64(char *string,
+ u32 base, u32 max_integer_byte_width, u64 *ret_integer);
+
+/* Values for max_integer_byte_width above */
+
+#define ACPI_MAX32_BYTE_WIDTH 4
+#define ACPI_MAX64_BYTE_WIDTH 8
/*
* utglobal - Global data structures and procedures
@@ -266,7 +282,8 @@ acpi_ut_trace(u32 line_number,
void
acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
- const char *module_name, u32 component_id, void *pointer);
+ const char *module_name,
+ u32 component_id, const void *pointer);
void
acpi_ut_trace_u32(u32 line_number,
@@ -276,7 +293,8 @@ acpi_ut_trace_u32(u32 line_number,
void
acpi_ut_trace_str(u32 line_number,
const char *function_name,
- const char *module_name, u32 component_id, char *string);
+ const char *module_name,
+ u32 component_id, const char *string);
void
acpi_ut_exit(u32 line_number,
@@ -335,12 +353,12 @@ void acpi_ut_delete_internal_object_list(union acpi_operand_object **obj_list);
*/
acpi_status
acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
- char *path,
+ const char *path,
u32 expected_return_btypes,
union acpi_operand_object **return_desc);
acpi_status
-acpi_ut_evaluate_numeric_object(char *object_name,
+acpi_ut_evaluate_numeric_object(const char *object_name,
struct acpi_namespace_node *device_node,
u64 *value);
@@ -415,7 +433,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size);
union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size);
acpi_status
-acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size * obj_length);
+acpi_ut_get_object_size(union acpi_operand_object *obj, acpi_size *obj_length);
/*
* utosi - Support for the _OSI predefined control method
@@ -526,15 +544,15 @@ void acpi_ut_set_integer_width(u8 revision);
void
acpi_ut_display_init_pathname(u8 type,
struct acpi_namespace_node *obj_handle,
- char *path);
+ const char *path);
#endif
/*
* utownerid - Support for Table/Method Owner IDs
*/
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id);
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id);
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id);
+void acpi_ut_release_owner_id(acpi_owner_id *owner_id);
/*
* utresrc
@@ -570,10 +588,6 @@ void acpi_ut_print_string(char *string, u16 max_length);
void ut_convert_backslashes(char *pathname);
#endif
-u8 acpi_ut_valid_acpi_name(char *name);
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position);
-
void acpi_ut_repair_name(char *name);
#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
@@ -628,7 +642,7 @@ void acpi_ut_dump_allocation_info(void);
void acpi_ut_dump_allocations(u32 component, const char *module);
acpi_status
-acpi_ut_create_list(char *list_name,
+acpi_ut_create_list(const char *list_name,
u16 object_size, struct acpi_memory_list **return_cache);
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c
index 772178c96..62bd44653 100644
--- a/drivers/acpi/acpica/dbcmds.c
+++ b/drivers/acpi/acpica/dbcmds.c
@@ -738,9 +738,9 @@ acpi_dm_test_resource_conversion(struct acpi_namespace_node *node, char *name)
original_aml = return_buffer.pointer;
acpi_dm_compare_aml_resources(original_aml->buffer.pointer,
- (acpi_rsdesc_size) original_aml->buffer.
+ (acpi_rsdesc_size)original_aml->buffer.
length, new_aml.pointer,
- (acpi_rsdesc_size) new_aml.length);
+ (acpi_rsdesc_size)new_aml.length);
/* Cleanup and exit */
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 68f4e0f4b..7cd07b27f 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -194,7 +194,7 @@ acpi_db_convert_to_buffer(char *string, union acpi_object *object)
*
******************************************************************************/
-acpi_status acpi_db_convert_to_package(char *string, union acpi_object * object)
+acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object)
{
char *this;
char *next;
@@ -252,7 +252,7 @@ acpi_status acpi_db_convert_to_package(char *string, union acpi_object * object)
acpi_status
acpi_db_convert_to_object(acpi_object_type type,
- char *string, union acpi_object * object)
+ char *string, union acpi_object *object)
{
acpi_status status = AE_OK;
@@ -277,7 +277,9 @@ acpi_db_convert_to_object(acpi_object_type type,
default:
object->type = ACPI_TYPE_INTEGER;
- status = acpi_ut_strtoul64(string, 16, &object->integer.value);
+ status =
+ acpi_ut_strtoul64(string, 16, acpi_gbl_integer_byte_width,
+ &object->integer.value);
break;
}
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index c81485537..12df2915a 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -361,7 +361,7 @@ acpi_db_execution_walk(acpi_handle obj_handle,
******************************************************************************/
void
-acpi_db_execute(char *name, char **args, acpi_object_type * types, u32 flags)
+acpi_db_execute(char *name, char **args, acpi_object_type *types, u32 flags)
{
acpi_status status;
struct acpi_buffer return_obj;
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 417c02a89..7cd5d2e02 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -57,12 +57,12 @@ static u32 acpi_db_get_line(char *input_buffer);
static u32 acpi_db_match_command(char *user_command);
-static void acpi_db_display_command_info(char *command, u8 display_all);
+static void acpi_db_display_command_info(const char *command, u8 display_all);
static void acpi_db_display_help(char *command);
static u8
-acpi_db_match_command_help(char *command,
+acpi_db_match_command_help(const char *command,
const struct acpi_db_command_help *help);
/*
@@ -348,7 +348,7 @@ static const struct acpi_db_command_help acpi_gbl_db_command_help[] = {
******************************************************************************/
static u8
-acpi_db_match_command_help(char *command,
+acpi_db_match_command_help(const char *command,
const struct acpi_db_command_help *help)
{
char *invocation = help->invocation;
@@ -402,7 +402,7 @@ acpi_db_match_command_help(char *command,
*
******************************************************************************/
-static void acpi_db_display_command_info(char *command, u8 display_all)
+static void acpi_db_display_command_info(const char *command, u8 display_all)
{
const struct acpi_db_command_help *next;
u8 matched;
@@ -466,7 +466,7 @@ static void acpi_db_display_help(char *command)
******************************************************************************/
char *acpi_db_get_next_token(char *string,
- char **next, acpi_object_type * return_type)
+ char **next, acpi_object_type *return_type)
{
char *start;
u32 depth;
@@ -656,8 +656,9 @@ static u32 acpi_db_match_command(char *user_command)
}
for (i = CMD_FIRST_VALID; acpi_gbl_db_commands[i].name; i++) {
- if (strstr(acpi_gbl_db_commands[i].name, user_command) ==
- acpi_gbl_db_commands[i].name) {
+ if (strstr
+ (ACPI_CAST_PTR(char, acpi_gbl_db_commands[i].name),
+ user_command) == acpi_gbl_db_commands[i].name) {
return (i);
}
}
@@ -683,8 +684,8 @@ static u32 acpi_db_match_command(char *user_command)
acpi_status
acpi_db_command_dispatch(char *input_buffer,
- struct acpi_walk_state * walk_state,
- union acpi_parse_object * op)
+ struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
{
u32 temp;
u32 command_index;
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 3c23b5a10..8667f14d5 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -285,7 +285,7 @@ void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg)
u32 max_depth = ACPI_UINT32_MAX;
acpi_owner_id owner_id;
- owner_id = (acpi_owner_id) strtoul(owner_arg, NULL, 0);
+ owner_id = (acpi_owner_id)strtoul(owner_arg, NULL, 0);
/* Now we can check for the depth argument */
@@ -709,7 +709,7 @@ acpi_db_integrity_walk(acpi_handle obj_handle,
return (AE_OK);
}
- if (!acpi_ut_valid_acpi_name(node->name.ascii)) {
+ if (!acpi_ut_valid_nameseg(node->name.ascii)) {
acpi_os_printf("Invalid AcpiName for Node %p\n", node);
return (AE_OK);
}
diff --git a/drivers/acpi/acpica/dbutils.c b/drivers/acpi/acpica/dbutils.c
index b37a2c77b..ae80106d1 100644
--- a/drivers/acpi/acpica/dbutils.c
+++ b/drivers/acpi/acpica/dbutils.c
@@ -56,8 +56,6 @@ acpi_status acpi_db_second_pass_parse(union acpi_parse_object *root);
void acpi_db_dump_buffer(u32 address);
#endif
-static char *gbl_hex_to_ascii = "0123456789ABCDEF";
-
/*******************************************************************************
*
* FUNCTION: acpi_db_match_argument
@@ -82,8 +80,9 @@ acpi_db_match_argument(char *user_argument,
}
for (i = 0; arguments[i].name; i++) {
- if (strstr(arguments[i].name, user_argument) ==
- arguments[i].name) {
+ if (strstr(ACPI_CAST_PTR(char, arguments[i].name),
+ ACPI_CAST_PTR(char,
+ user_argument)) == arguments[i].name) {
return (i);
}
}
@@ -339,7 +338,7 @@ void acpi_db_uint32_to_hex_string(u32 value, char *buffer)
buffer[8] = '\0';
for (i = 7; i >= 0; i--) {
- buffer[i] = gbl_hex_to_ascii[value & 0x0F];
+ buffer[i] = acpi_gbl_upper_hex_digits[value & 0x0F];
value = value >> 4;
}
}
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index e94e0d80b..124db2377 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -162,8 +162,8 @@ void acpi_db_signal_break_point(struct acpi_walk_state *walk_state)
******************************************************************************/
acpi_status
-acpi_db_single_step(struct acpi_walk_state * walk_state,
- union acpi_parse_object * op, u32 opcode_class)
+acpi_db_single_step(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op, u32 opcode_class)
{
union acpi_parse_object *next;
acpi_status status = AE_OK;
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index c9a663f21..4ddcbf100 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -163,8 +163,8 @@ acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
******************************************************************************/
acpi_status
-acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
- union acpi_parse_object * op)
+acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
{
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 5aa1c5fee..f1e6dcc7a 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -188,7 +188,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
acpi_status
acpi_ds_initialize_objects(u32 table_index,
- struct acpi_namespace_node * start_node)
+ struct acpi_namespace_node *start_node)
{
acpi_status status;
struct acpi_init_walk_info info;
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index da198b864..47c7b52a5 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -209,7 +209,7 @@ acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
******************************************************************************/
acpi_status
-acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
+acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
{
u32 aml_offset;
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 8ca941632..f393de9f5 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -569,7 +569,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
/* TBD: May only be temporary */
obj_desc =
- acpi_ut_create_string_object((acpi_size) name_length);
+ acpi_ut_create_string_object((acpi_size)name_length);
strncpy(obj_desc->string.pointer,
name_string, name_length);
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index d1cedcfda..fd34040d4 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -137,8 +137,8 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
******************************************************************************/
acpi_status
-acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
- union acpi_parse_object ** out_op)
+acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object **out_op)
{
union acpi_parse_object *op;
struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 0bac6e141..762db3fa7 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -490,8 +490,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
status =
acpi_ds_create_index_field(op,
- (acpi_handle) arg->
- common.node, walk_state);
+ (acpi_handle)arg->common.
+ node, walk_state);
break;
case AML_BANK_FIELD_OP:
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 3a26ddbae..e3338698e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -143,8 +143,8 @@ acpi_ds_result_pop(union acpi_operand_object **object,
******************************************************************************/
acpi_status
-acpi_ds_result_push(union acpi_operand_object * object,
- struct acpi_walk_state * walk_state)
+acpi_ds_result_push(union acpi_operand_object *object,
+ struct acpi_walk_state *walk_state)
{
union acpi_generic_state *state;
acpi_status status;
@@ -307,7 +307,7 @@ static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
******************************************************************************/
acpi_status
-acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
+acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state)
{
ACPI_FUNCTION_NAME(ds_obj_stack_push);
@@ -354,7 +354,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
******************************************************************************/
acpi_status
-acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
+acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state)
{
u32 i;
@@ -411,7 +411,7 @@ acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
return;
}
- for (i = (s32) pop_count - 1; i >= 0; i--) {
+ for (i = (s32)pop_count - 1; i >= 0; i--) {
if (walk_state->num_operands == 0) {
return;
}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index b47e62aaf..4b4949ce0 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -440,7 +440,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
gpe_event_info =
&gpe_block->
- event_info[((acpi_size) i *
+ event_info[((acpi_size)i *
ACPI_GPE_REGISTER_WIDTH) + j];
gpe_number =
j + gpe_register_info->base_gpe_number;
@@ -652,7 +652,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
*
******************************************************************************/
-acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info * gpe_event_info)
+acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 447fa1cac..d54014cab 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -211,7 +211,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
/* Allocate the GPE register information block */
- gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->
+ gpe_register_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->
register_count *
sizeof(struct
acpi_gpe_register_info));
@@ -225,7 +225,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
* Allocate the GPE event_info block. There are eight distinct GPEs
* per register. Initialization to zeros is sufficient.
*/
- gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
+ gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size)gpe_block->gpe_count *
sizeof(struct
acpi_gpe_event_info));
if (!gpe_event_info) {
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 66c4b5b7c..3f150d567 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -163,7 +163,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status
acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
- struct acpi_gpe_xrupt_info ** gpe_xrupt_block)
+ struct acpi_gpe_xrupt_info **gpe_xrupt_block)
{
struct acpi_gpe_xrupt_info *next_gpe_xrupt;
struct acpi_gpe_xrupt_info *gpe_xrupt;
@@ -320,7 +320,7 @@ acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
- gpe_event_info = &gpe_block->event_info[((acpi_size) i *
+ gpe_event_info = &gpe_block->event_info[((acpi_size)i *
ACPI_GPE_REGISTER_WIDTH)
+ j];
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 0f6be8956..24768ca03 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -359,7 +359,7 @@ union acpi_operand_object *acpi_ev_find_region_handler(acpi_adr_space_type
******************************************************************************/
acpi_status
-acpi_ev_install_space_handler(struct acpi_namespace_node * node,
+acpi_ev_install_space_handler(struct acpi_namespace_node *node,
acpi_adr_space_type space_id,
acpi_adr_space_handler handler,
acpi_adr_space_setup setup, void *context)
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index c67d78c59..f51d43adb 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -99,8 +99,7 @@ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
******************************************************************************/
acpi_status
-acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
- u32 notify_value)
+acpi_ev_queue_notify_request(struct acpi_namespace_node *node, u32 notify_value)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object *handler_list_head = NULL;
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 63924d1c7..4c6f79514 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -526,81 +526,59 @@ acpi_ev_attach_region(union acpi_operand_object *handler_obj,
/*******************************************************************************
*
- * FUNCTION: acpi_ev_associate_reg_method
+ * FUNCTION: acpi_ev_execute_reg_method
*
* PARAMETERS: region_obj - Region object
+ * function - Passed to _REG: On (1) or Off (0)
*
* RETURN: Status
*
- * DESCRIPTION: Find and associate _REG method to a region
+ * DESCRIPTION: Execute _REG method for a region
*
******************************************************************************/
-void acpi_ev_associate_reg_method(union acpi_operand_object *region_obj)
+acpi_status
+acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
{
- acpi_name *reg_name_ptr = (acpi_name *) METHOD_NAME__REG;
+ struct acpi_evaluate_info *info;
+ union acpi_operand_object *args[3];
+ union acpi_operand_object *region_obj2;
+ const acpi_name *reg_name_ptr =
+ ACPI_CAST_PTR(acpi_name, METHOD_NAME__REG);
struct acpi_namespace_node *method_node;
struct acpi_namespace_node *node;
- union acpi_operand_object *region_obj2;
acpi_status status;
- ACPI_FUNCTION_TRACE(ev_associate_reg_method);
+ ACPI_FUNCTION_TRACE(ev_execute_reg_method);
+
+ if (!acpi_gbl_namespace_initialized ||
+ region_obj->region.handler == NULL) {
+ return_ACPI_STATUS(AE_OK);
+ }
region_obj2 = acpi_ns_get_secondary_object(region_obj);
if (!region_obj2) {
- return_VOID;
+ return_ACPI_STATUS(AE_NOT_EXIST);
}
+ /*
+ * Find any "_REG" method associated with this region definition.
+ * The method should always be updated as this function may be
+ * invoked after a namespace change.
+ */
node = region_obj->region.node->parent;
-
- /* Find any "_REG" method associated with this region definition */
-
status =
acpi_ns_search_one_scope(*reg_name_ptr, node, ACPI_TYPE_METHOD,
&method_node);
if (ACPI_SUCCESS(status)) {
/*
- * The _REG method is optional and there can be only one per region
- * definition. This will be executed when the handler is attached
- * or removed
+ * The _REG method is optional and there can be only one per
+ * region definition. This will be executed when the handler is
+ * attached or removed.
*/
region_obj2->extra.method_REG = method_node;
}
-
- return_VOID;
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ev_execute_reg_method
- *
- * PARAMETERS: region_obj - Region object
- * function - Passed to _REG: On (1) or Off (0)
- *
- * RETURN: Status
- *
- * DESCRIPTION: Execute _REG method for a region
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
-{
- struct acpi_evaluate_info *info;
- union acpi_operand_object *args[3];
- union acpi_operand_object *region_obj2;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ev_execute_reg_method);
-
- region_obj2 = acpi_ns_get_secondary_object(region_obj);
- if (!region_obj2) {
- return_ACPI_STATUS(AE_NOT_EXIST);
- }
-
- if (region_obj2->extra.method_REG == NULL ||
- region_obj->region.handler == NULL ||
- !acpi_gbl_namespace_initialized) {
+ if (region_obj2->extra.method_REG == NULL) {
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index fda869c9a..b6ea9c0d0 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -227,7 +227,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Install a handler for this PCI root bridge */
- status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
+ status = acpi_install_address_space_handler((acpi_handle)pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_SAME_HANDLER) {
/*
@@ -518,7 +518,6 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_OK);
}
- acpi_ev_associate_reg_method(region_obj);
region_obj->common.flags |= AOPOBJ_OBJECT_INITIALIZED;
node = region_obj->region.node->parent;
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 904567148..17cfef721 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -917,7 +917,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block)
* the FADT-defined gpe blocks. Otherwise, the GPE block device.
*
******************************************************************************/
-acpi_status acpi_get_gpe_device(u32 index, acpi_handle * gpe_device)
+acpi_status acpi_get_gpe_device(u32 index, acpi_handle *gpe_device)
{
struct acpi_gpe_device_info info;
acpi_status status;
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
new file mode 100644
index 000000000..2423fe03e
--- /dev/null
+++ b/drivers/acpi/acpica/exconcat.c
@@ -0,0 +1,439 @@
+/******************************************************************************
+ *
+ * Module Name: exconcat - Concatenate-type AML operators
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2016, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acinterp.h"
+#include "amlresrc.h"
+
+#define _COMPONENT ACPI_EXECUTER
+ACPI_MODULE_NAME("exconcat")
+
+/* Local Prototypes */
+static acpi_status
+acpi_ex_convert_to_object_type_string(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_do_concatenate
+ *
+ * PARAMETERS: operand0 - First source object
+ * operand1 - Second source object
+ * actual_return_desc - Where to place the return object
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Concatenate two objects with the ACPI-defined conversion
+ * rules as necessary.
+ * NOTE:
+ * Per the ACPI spec (up to 6.1), Concatenate only supports Integer,
+ * String, and Buffer objects. However, we support all objects here
+ * as an extension. This improves the usefulness of both Concatenate
+ * and the Printf/Fprintf macros. The extension returns a string
+ * describing the object type for the other objects.
+ * 02/2016.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_do_concatenate(union acpi_operand_object *operand0,
+ union acpi_operand_object *operand1,
+ union acpi_operand_object **actual_return_desc,
+ struct acpi_walk_state *walk_state)
+{
+ union acpi_operand_object *local_operand0 = operand0;
+ union acpi_operand_object *local_operand1 = operand1;
+ union acpi_operand_object *temp_operand1 = NULL;
+ union acpi_operand_object *return_desc;
+ char *buffer;
+ acpi_object_type operand0_type;
+ acpi_object_type operand1_type;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ex_do_concatenate);
+
+ /* Operand 0 preprocessing */
+
+ switch (operand0->common.type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ operand0_type = operand0->common.type;
+ break;
+
+ default:
+
+ /* For all other types, get the "object type" string */
+
+ status =
+ acpi_ex_convert_to_object_type_string(operand0,
+ &local_operand0);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ operand0_type = ACPI_TYPE_STRING;
+ break;
+ }
+
+ /* Operand 1 preprocessing */
+
+ switch (operand1->common.type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ operand1_type = operand1->common.type;
+ break;
+
+ default:
+
+ /* For all other types, get the "object type" string */
+
+ status =
+ acpi_ex_convert_to_object_type_string(operand1,
+ &local_operand1);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ operand1_type = ACPI_TYPE_STRING;
+ break;
+ }
+
+ /*
+ * Convert the second operand if necessary. The first operand (0)
+ * determines the type of the second operand (1) (See the Data Types
+ * section of the ACPI specification). Both object types are
+ * guaranteed to be either Integer/String/Buffer by the operand
+ * resolution mechanism.
+ */
+ switch (operand0_type) {
+ case ACPI_TYPE_INTEGER:
+
+ status =
+ acpi_ex_convert_to_integer(local_operand1, &temp_operand1,
+ 16);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ status =
+ acpi_ex_convert_to_buffer(local_operand1, &temp_operand1);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ switch (operand1_type) {
+ case ACPI_TYPE_INTEGER:
+ case ACPI_TYPE_STRING:
+ case ACPI_TYPE_BUFFER:
+
+ /* Other types have already been converted to string */
+
+ status =
+ acpi_ex_convert_to_string(local_operand1,
+ &temp_operand1,
+ ACPI_IMPLICIT_CONVERT_HEX);
+ break;
+
+ default:
+
+ status = AE_OK;
+ break;
+ }
+ break;
+
+ default:
+
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
+ operand0->common.type));
+ status = AE_AML_INTERNAL;
+ }
+
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ /* Take care with any newly created operand objects */
+
+ if ((local_operand1 != operand1) && (local_operand1 != temp_operand1)) {
+ acpi_ut_remove_reference(local_operand1);
+ }
+
+ local_operand1 = temp_operand1;
+
+ /*
+ * Both operands are now known to be the same object type
+ * (Both are Integer, String, or Buffer), and we can now perform
+ * the concatenation.
+ *
+ * There are three cases to handle, as per the ACPI spec:
+ *
+ * 1) Two Integers concatenated to produce a new Buffer
+ * 2) Two Strings concatenated to produce a new String
+ * 3) Two Buffers concatenated to produce a new Buffer
+ */
+ switch (operand0_type) {
+ case ACPI_TYPE_INTEGER:
+
+ /* Result of two Integers is a Buffer */
+ /* Need enough buffer space for two integers */
+
+ return_desc = acpi_ut_create_buffer_object((acpi_size)
+ ACPI_MUL_2
+ (acpi_gbl_integer_byte_width));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ buffer = (char *)return_desc->buffer.pointer;
+
+ /* Copy the first integer, LSB first */
+
+ memcpy(buffer, &operand0->integer.value,
+ acpi_gbl_integer_byte_width);
+
+ /* Copy the second integer (LSB first) after the first */
+
+ memcpy(buffer + acpi_gbl_integer_byte_width,
+ &local_operand1->integer.value,
+ acpi_gbl_integer_byte_width);
+ break;
+
+ case ACPI_TYPE_STRING:
+
+ /* Result of two Strings is a String */
+
+ return_desc = acpi_ut_create_string_object(((acpi_size)
+ local_operand0->
+ string.length +
+ local_operand1->
+ string.length));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ buffer = return_desc->string.pointer;
+
+ /* Concatenate the strings */
+
+ strcpy(buffer, local_operand0->string.pointer);
+ strcat(buffer, local_operand1->string.pointer);
+ break;
+
+ case ACPI_TYPE_BUFFER:
+
+ /* Result of two Buffers is a Buffer */
+
+ return_desc = acpi_ut_create_buffer_object(((acpi_size)
+ operand0->buffer.
+ length +
+ local_operand1->
+ buffer.length));
+ if (!return_desc) {
+ status = AE_NO_MEMORY;
+ goto cleanup;
+ }
+
+ buffer = (char *)return_desc->buffer.pointer;
+
+ /* Concatenate the buffers */
+
+ memcpy(buffer, operand0->buffer.pointer,
+ operand0->buffer.length);
+ memcpy(buffer + operand0->buffer.length,
+ local_operand1->buffer.pointer,
+ local_operand1->buffer.length);
+ break;
+
+ default:
+
+ /* Invalid object type, should not happen here */
+
+ ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
+ operand0->common.type));
+ status = AE_AML_INTERNAL;
+ goto cleanup;
+ }
+
+ *actual_return_desc = return_desc;
+
+cleanup:
+ if (local_operand0 != operand0) {
+ acpi_ut_remove_reference(local_operand0);
+ }
+
+ if (local_operand1 != operand1) {
+ acpi_ut_remove_reference(local_operand1);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_convert_to_object_type_string
+ *
+ * PARAMETERS: obj_desc - Object to be converted
+ * return_desc - Where to place the return object
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert an object of arbitrary type to a string object that
+ * contains the namestring for the object. Used for the
+ * concatenate operator.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ex_convert_to_object_type_string(union acpi_operand_object *obj_desc,
+ union acpi_operand_object **result_desc)
+{
+ union acpi_operand_object *return_desc;
+ const char *type_string;
+
+ type_string = acpi_ut_get_type_name(obj_desc->common.type);
+
+ return_desc = acpi_ut_create_string_object(((acpi_size)strlen(type_string) + 9)); /* 9 For "[ Object]" */
+ if (!return_desc) {
+ return (AE_NO_MEMORY);
+ }
+
+ strcpy(return_desc->string.pointer, "[");
+ strcat(return_desc->string.pointer, type_string);
+ strcat(return_desc->string.pointer, " Object]");
+
+ *result_desc = return_desc;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ex_concat_template
+ *
+ * PARAMETERS: operand0 - First source object
+ * operand1 - Second source object
+ * actual_return_desc - Where to place the return object
+ * walk_state - Current walk state
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Concatenate two resource templates
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_concat_template(union acpi_operand_object *operand0,
+ union acpi_operand_object *operand1,
+ union acpi_operand_object **actual_return_desc,
+ struct acpi_walk_state *walk_state)
+{
+ acpi_status status;
+ union acpi_operand_object *return_desc;
+ u8 *new_buf;
+ u8 *end_tag;
+ acpi_size length0;
+ acpi_size length1;
+ acpi_size new_length;
+
+ ACPI_FUNCTION_TRACE(ex_concat_template);
+
+ /*
+ * Find the end_tag descriptor in each resource template.
+ * Note1: returned pointers point TO the end_tag, not past it.
+ * Note2: zero-length buffers are allowed; treated like one end_tag
+ */
+
+ /* Get the length of the first resource template */
+
+ status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
+
+ /* Get the length of the second resource template */
+
+ status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
+
+ /* Combine both lengths, minimum size will be 2 for end_tag */
+
+ new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
+
+ /* Create a new buffer object for the result (with one end_tag) */
+
+ return_desc = acpi_ut_create_buffer_object(new_length);
+ if (!return_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ /*
+ * Copy the templates to the new buffer, 0 first, then 1 follows. One
+ * end_tag descriptor is copied from Operand1.
+ */
+ new_buf = return_desc->buffer.pointer;
+ memcpy(new_buf, operand0->buffer.pointer, length0);
+ memcpy(new_buf + length0, operand1->buffer.pointer, length1);
+
+ /* Insert end_tag and set the checksum to zero, means "ignore checksum" */
+
+ new_buf[new_length - 1] = 0;
+ new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
+
+ /* Return the completed resource template */
+
+ *actual_return_desc = return_desc;
+ return_ACPI_STATUS(AE_OK);
+}
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index f74161301..a1d177d58 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -118,7 +118,9 @@ acpi_ex_add_table(u32 table_index,
/* Execute any module-level code that was found in the table */
acpi_ex_exit_interpreter();
- acpi_ns_exec_module_code_list();
+ if (acpi_gbl_group_module_level_code) {
+ acpi_ns_exec_module_code_list();
+ }
acpi_ex_enter_interpreter();
/*
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 0b9f2c13b..b7e9b3d80 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -124,7 +124,9 @@ acpi_ex_convert_to_integer(union acpi_operand_object *obj_desc,
* of ACPI 3.0) is that the to_integer() operator allows both decimal
* and hexadecimal strings (hex prefixed with "0x").
*/
- status = acpi_ut_strtoul64((char *)pointer, flags, &result);
+ status = acpi_ut_strtoul64((char *)pointer, flags,
+ acpi_gbl_integer_byte_width,
+ &result);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
@@ -439,7 +441,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* Need enough space for one ASCII integer (plus null terminator)
*/
return_desc =
- acpi_ut_create_string_object((acpi_size) string_length);
+ acpi_ut_create_string_object((acpi_size)string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
@@ -518,7 +520,7 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
}
return_desc =
- acpi_ut_create_string_object((acpi_size) string_length);
+ acpi_ut_create_string_object((acpi_size)string_length);
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index bea9612e4..613ba6eb0 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -394,7 +394,7 @@ acpi_status acpi_ex_create_processor(struct acpi_walk_state *walk_state)
obj_desc->processor.proc_id = (u8) operand[1]->integer.value;
obj_desc->processor.length = (u8) operand[3]->integer.value;
obj_desc->processor.address =
- (acpi_io_address) operand[2]->integer.value;
+ (acpi_io_address)operand[2]->integer.value;
/* Install the processor object in the parent Node */
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index ee30974b2..fce6b2e10 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -55,9 +55,9 @@ ACPI_MODULE_NAME("exdump")
*/
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
/* Local prototypes */
-static void acpi_ex_out_string(char *title, char *value);
+static void acpi_ex_out_string(const char *title, const char *value);
-static void acpi_ex_out_pointer(char *title, void *value);
+static void acpi_ex_out_pointer(const char *title, const void *value);
static void
acpi_ex_dump_object(union acpi_operand_object *obj_desc,
@@ -365,8 +365,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
struct acpi_exdump_info *info)
{
u8 *target;
- char *name;
- const char *reference_name;
+ const char *name;
u8 count;
union acpi_operand_object *start;
union acpi_operand_object *data = NULL;
@@ -459,9 +458,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
case ACPI_EXD_REFERENCE:
- reference_name = acpi_ut_get_reference_name(obj_desc);
acpi_ex_out_string("Class Name",
- ACPI_CAST_PTR(char, reference_name));
+ acpi_ut_get_reference_name
+ (obj_desc));
acpi_ex_dump_reference_obj(obj_desc);
break;
@@ -934,12 +933,12 @@ acpi_ex_dump_operands(union acpi_operand_object **operands,
*
******************************************************************************/
-static void acpi_ex_out_string(char *title, char *value)
+static void acpi_ex_out_string(const char *title, const char *value)
{
acpi_os_printf("%20s : %s\n", title, value);
}
-static void acpi_ex_out_pointer(char *title, void *value)
+static void acpi_ex_out_pointer(const char *title, const void *value)
{
acpi_os_printf("%20s : %p\n", title, value);
}
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index d5d8020a8..d7d3ee363 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -126,7 +126,7 @@ acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
******************************************************************************/
acpi_status
-acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
+acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
union acpi_operand_object *obj_desc,
union acpi_operand_object **ret_buffer_desc)
{
@@ -233,7 +233,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
* Note: Field.length is in bits.
*/
length =
- (acpi_size) ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
+ (acpi_size)ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->field.bit_length);
if (length > acpi_gbl_integer_byte_width) {
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f0c5ed0b7..ee76d299b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -164,7 +164,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
if (ACPI_ROUND_UP(rgn_desc->region.length,
obj_desc->common_field.
access_byte_width) >=
- ((acpi_size) obj_desc->common_field.
+ ((acpi_size)obj_desc->common_field.
base_byte_offset +
obj_desc->common_field.access_byte_width +
field_datum_byte_offset)) {
@@ -897,17 +897,9 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
access_bit_width = ACPI_MUL_8(obj_desc->common_field.access_byte_width);
- /*
- * Create the bitmasks used for bit insertion.
- * Note: This if/else is used to bypass compiler differences with the
- * shift operator
- */
- if (access_bit_width == ACPI_INTEGER_BIT_SIZE) {
- width_mask = ACPI_UINT64_MAX;
- } else {
- width_mask = ACPI_MASK_BITS_ABOVE(access_bit_width);
- }
+ /* Create the bitmasks used for bit insertion */
+ width_mask = ACPI_MASK_BITS_ABOVE_64(access_bit_width);
mask = width_mask &
ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index db30ae43d..4f7e66762 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -45,7 +45,6 @@
#include "accommon.h"
#include "acinterp.h"
#include "amlcode.h"
-#include "amlresrc.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exmisc")
@@ -140,295 +139,6 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
/*******************************************************************************
*
- * FUNCTION: acpi_ex_concat_template
- *
- * PARAMETERS: operand0 - First source object
- * operand1 - Second source object
- * actual_return_desc - Where to place the return object
- * walk_state - Current walk state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Concatenate two resource templates
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_concat_template(union acpi_operand_object *operand0,
- union acpi_operand_object *operand1,
- union acpi_operand_object **actual_return_desc,
- struct acpi_walk_state *walk_state)
-{
- acpi_status status;
- union acpi_operand_object *return_desc;
- u8 *new_buf;
- u8 *end_tag;
- acpi_size length0;
- acpi_size length1;
- acpi_size new_length;
-
- ACPI_FUNCTION_TRACE(ex_concat_template);
-
- /*
- * Find the end_tag descriptor in each resource template.
- * Note1: returned pointers point TO the end_tag, not past it.
- * Note2: zero-length buffers are allowed; treated like one end_tag
- */
-
- /* Get the length of the first resource template */
-
- status = acpi_ut_get_resource_end_tag(operand0, &end_tag);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- length0 = ACPI_PTR_DIFF(end_tag, operand0->buffer.pointer);
-
- /* Get the length of the second resource template */
-
- status = acpi_ut_get_resource_end_tag(operand1, &end_tag);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- length1 = ACPI_PTR_DIFF(end_tag, operand1->buffer.pointer);
-
- /* Combine both lengths, minimum size will be 2 for end_tag */
-
- new_length = length0 + length1 + sizeof(struct aml_resource_end_tag);
-
- /* Create a new buffer object for the result (with one end_tag) */
-
- return_desc = acpi_ut_create_buffer_object(new_length);
- if (!return_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- /*
- * Copy the templates to the new buffer, 0 first, then 1 follows. One
- * end_tag descriptor is copied from Operand1.
- */
- new_buf = return_desc->buffer.pointer;
- memcpy(new_buf, operand0->buffer.pointer, length0);
- memcpy(new_buf + length0, operand1->buffer.pointer, length1);
-
- /* Insert end_tag and set the checksum to zero, means "ignore checksum" */
-
- new_buf[new_length - 1] = 0;
- new_buf[new_length - 2] = ACPI_RESOURCE_NAME_END_TAG | 1;
-
- /* Return the completed resource template */
-
- *actual_return_desc = return_desc;
- return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ex_do_concatenate
- *
- * PARAMETERS: operand0 - First source object
- * operand1 - Second source object
- * actual_return_desc - Where to place the return object
- * walk_state - Current walk state
- *
- * RETURN: Status
- *
- * DESCRIPTION: Concatenate two objects OF THE SAME TYPE.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ex_do_concatenate(union acpi_operand_object *operand0,
- union acpi_operand_object *operand1,
- union acpi_operand_object **actual_return_desc,
- struct acpi_walk_state *walk_state)
-{
- union acpi_operand_object *local_operand1 = operand1;
- union acpi_operand_object *return_desc;
- char *new_buf;
- const char *type_string;
- acpi_status status;
-
- ACPI_FUNCTION_TRACE(ex_do_concatenate);
-
- /*
- * Convert the second operand if necessary. The first operand
- * determines the type of the second operand, (See the Data Types
- * section of the ACPI specification.) Both object types are
- * guaranteed to be either Integer/String/Buffer by the operand
- * resolution mechanism.
- */
- switch (operand0->common.type) {
- case ACPI_TYPE_INTEGER:
-
- status =
- acpi_ex_convert_to_integer(operand1, &local_operand1, 16);
- break;
-
- case ACPI_TYPE_STRING:
- /*
- * Per the ACPI spec, Concatenate only supports int/str/buf.
- * However, we support all objects here as an extension.
- * This improves the usefulness of the Printf() macro.
- * 12/2015.
- */
- switch (operand1->common.type) {
- case ACPI_TYPE_INTEGER:
- case ACPI_TYPE_STRING:
- case ACPI_TYPE_BUFFER:
-
- status =
- acpi_ex_convert_to_string(operand1, &local_operand1,
- ACPI_IMPLICIT_CONVERT_HEX);
- break;
-
- default:
- /*
- * Just emit a string containing the object type.
- */
- type_string =
- acpi_ut_get_type_name(operand1->common.type);
-
- local_operand1 = acpi_ut_create_string_object(((acpi_size) strlen(type_string) + 9)); /* 9 For "[Object]" */
- if (!local_operand1) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- strcpy(local_operand1->string.pointer, "[");
- strcat(local_operand1->string.pointer, type_string);
- strcat(local_operand1->string.pointer, " Object]");
- status = AE_OK;
- break;
- }
- break;
-
- case ACPI_TYPE_BUFFER:
-
- status = acpi_ex_convert_to_buffer(operand1, &local_operand1);
- break;
-
- default:
-
- ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
- operand0->common.type));
- status = AE_AML_INTERNAL;
- }
-
- if (ACPI_FAILURE(status)) {
- goto cleanup;
- }
-
- /*
- * Both operands are now known to be the same object type
- * (Both are Integer, String, or Buffer), and we can now perform the
- * concatenation.
- */
-
- /*
- * There are three cases to handle:
- *
- * 1) Two Integers concatenated to produce a new Buffer
- * 2) Two Strings concatenated to produce a new String
- * 3) Two Buffers concatenated to produce a new Buffer
- */
- switch (operand0->common.type) {
- case ACPI_TYPE_INTEGER:
-
- /* Result of two Integers is a Buffer */
- /* Need enough buffer space for two integers */
-
- return_desc = acpi_ut_create_buffer_object((acpi_size)
- ACPI_MUL_2
- (acpi_gbl_integer_byte_width));
- if (!return_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- new_buf = (char *)return_desc->buffer.pointer;
-
- /* Copy the first integer, LSB first */
-
- memcpy(new_buf, &operand0->integer.value,
- acpi_gbl_integer_byte_width);
-
- /* Copy the second integer (LSB first) after the first */
-
- memcpy(new_buf + acpi_gbl_integer_byte_width,
- &local_operand1->integer.value,
- acpi_gbl_integer_byte_width);
- break;
-
- case ACPI_TYPE_STRING:
-
- /* Result of two Strings is a String */
-
- return_desc = acpi_ut_create_string_object(((acpi_size)
- operand0->string.
- length +
- local_operand1->
- string.length));
- if (!return_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- new_buf = return_desc->string.pointer;
-
- /* Concatenate the strings */
-
- strcpy(new_buf, operand0->string.pointer);
- strcat(new_buf, local_operand1->string.pointer);
- break;
-
- case ACPI_TYPE_BUFFER:
-
- /* Result of two Buffers is a Buffer */
-
- return_desc = acpi_ut_create_buffer_object(((acpi_size)
- operand0->buffer.
- length +
- local_operand1->
- buffer.length));
- if (!return_desc) {
- status = AE_NO_MEMORY;
- goto cleanup;
- }
-
- new_buf = (char *)return_desc->buffer.pointer;
-
- /* Concatenate the buffers */
-
- memcpy(new_buf, operand0->buffer.pointer,
- operand0->buffer.length);
- memcpy(new_buf + operand0->buffer.length,
- local_operand1->buffer.pointer,
- local_operand1->buffer.length);
- break;
-
- default:
-
- /* Invalid object type, should not happen here */
-
- ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
- operand0->common.type));
- status = AE_AML_INTERNAL;
- goto cleanup;
- }
-
- *actual_return_desc = return_desc;
-
-cleanup:
- if (local_operand1 != operand1) {
- acpi_ut_remove_reference(local_operand1);
- }
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ex_do_math_op
*
* PARAMETERS: opcode - AML opcode
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 27c11ab5e..3d6af93fe 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -178,7 +178,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
for (index = 0;
(index < ACPI_NAME_SIZE)
- && (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
+ && (acpi_ut_valid_name_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
}
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 5aa21c4ed..69e4e269a 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -184,7 +184,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* Get the Integer values from the objects */
index = operand[1]->integer.value;
- length = (acpi_size) operand[2]->integer.value;
+ length = (acpi_size)operand[2]->integer.value;
/*
* If the index is beyond the length of the String/Buffer, or if the
@@ -198,8 +198,8 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
else if ((index + length) > operand[0]->string.length) {
length =
- (acpi_size) operand[0]->string.length -
- (acpi_size) index;
+ (acpi_size)operand[0]->string.length -
+ (acpi_size)index;
}
/* Strings always have a sub-pointer, not so for buffers */
@@ -209,7 +209,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
/* Always allocate a new buffer for the String */
- buffer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
+ buffer = ACPI_ALLOCATE_ZEROED((acpi_size)length + 1);
if (!buffer) {
status = AE_NO_MEMORY;
goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index e2b634838..786d53b0b 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -207,7 +207,7 @@ acpi_ex_do_match(u32 match_op,
*
******************************************************************************/
-acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
+acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state *walk_state)
{
union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *return_desc = NULL;
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 076074daf..31b381cae 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -325,15 +325,15 @@ acpi_ex_system_io_space_handler(u32 function,
switch (function) {
case ACPI_READ:
- status = acpi_hw_read_port((acpi_io_address) address,
+ status = acpi_hw_read_port((acpi_io_address)address,
&value32, bit_width);
*value = value32;
break;
case ACPI_WRITE:
- status = acpi_hw_write_port((acpi_io_address) address,
- (u32) * value, bit_width);
+ status = acpi_hw_write_port((acpi_io_address)address,
+ (u32)*value, bit_width);
break;
default:
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index c1e8bfb0f..a183cb740 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -93,7 +93,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
*/
node = *object_ptr;
source_desc = acpi_ns_get_attached_object(node);
- entry_type = acpi_ns_get_type((acpi_handle) node);
+ entry_type = acpi_ns_get_type((acpi_handle)node);
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Entry=%p SourceDesc=%p [%s]\n",
node, source_desc,
@@ -106,7 +106,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
node = ACPI_CAST_PTR(struct acpi_namespace_node, node->object);
source_desc = acpi_ns_get_attached_object(node);
- entry_type = acpi_ns_get_type((acpi_handle) node);
+ entry_type = acpi_ns_get_type((acpi_handle)node);
*object_ptr = node;
}
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index fedacf13d..e1d3878be 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -334,7 +334,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
acpi_status
acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
union acpi_operand_object *operand,
- acpi_object_type * return_type,
+ acpi_object_type *return_type,
union acpi_operand_object **return_desc)
{
union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand);
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index cc2c26c46..27b41fd75 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -131,8 +131,8 @@ acpi_ex_check_object_type(acpi_object_type type_needed,
acpi_status
acpi_ex_resolve_operands(u16 opcode,
- union acpi_operand_object ** stack_ptr,
- struct acpi_walk_state * walk_state)
+ union acpi_operand_object **stack_ptr,
+ struct acpi_walk_state *walk_state)
{
union acpi_operand_object *obj_desc;
acpi_status status = AE_OK;
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 28b724827..1dab82746 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -188,7 +188,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
* Clear old string and copy in the new one
*/
memset(target_desc->string.pointer, 0,
- (acpi_size) target_desc->string.length + 1);
+ (acpi_size)target_desc->string.length + 1);
memcpy(target_desc->string.pointer, buffer, length);
} else {
/*
@@ -204,7 +204,7 @@ acpi_ex_store_string_to_string(union acpi_operand_object *source_desc,
}
target_desc->string.pointer =
- ACPI_ALLOCATE_ZEROED((acpi_size) length + 1);
+ ACPI_ALLOCATE_ZEROED((acpi_size)length + 1);
if (!target_desc->string.pointer) {
return_ACPI_STATUS(AE_NO_MEMORY);
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 4d44bc1cb..425f13372 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -301,8 +301,8 @@ static u32 acpi_ex_digits_needed(u64 value, u32 base)
*
* FUNCTION: acpi_ex_eisa_id_to_string
*
- * PARAMETERS: compressed_id - EISAID to be converted
- * out_string - Where to put the converted string (8 bytes)
+ * PARAMETERS: out_string - Where to put the converted string (8 bytes)
+ * compressed_id - EISAID to be converted
*
* RETURN: None
*
@@ -354,7 +354,7 @@ void acpi_ex_eisa_id_to_string(char *out_string, u64 compressed_id)
* possible 64-bit integer.
* value - Value to be converted
*
- * RETURN: None, string
+ * RETURN: Converted string in out_string
*
* DESCRIPTION: Convert a 64-bit integer to decimal string representation.
* Assumes string buffer is large enough to hold the string. The
@@ -384,9 +384,9 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
* FUNCTION: acpi_ex_pci_cls_to_string
*
* PARAMETERS: out_string - Where to put the converted string (7 bytes)
- * PARAMETERS: class_code - PCI class code to be converted (3 bytes)
+ * class_code - PCI class code to be converted (3 bytes)
*
- * RETURN: None
+ * RETURN: Converted string in out_string
*
* DESCRIPTION: Convert 3-bytes PCI class code to string representation.
* Return buffer must be large enough to hold the string. The
@@ -417,7 +417,7 @@ void acpi_ex_pci_cls_to_string(char *out_string, u8 class_code[3])
*
* PARAMETERS: space_id - ID to be validated
*
- * RETURN: TRUE if valid/supported ID.
+ * RETURN: TRUE if space_id is a valid/supported ID.
*
* DESCRIPTION: Validate an operation region space_ID.
*
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 1c4f45186..bdecd5e76 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -166,7 +166,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
*
******************************************************************************/
-acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
+acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
acpi_status status;
@@ -206,7 +206,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
******************************************************************************/
acpi_status
-acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
+acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
acpi_event_status *event_status)
{
u32 in_byte;
@@ -391,7 +391,7 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
- struct acpi_gpe_block_info * gpe_block,
+ struct acpi_gpe_block_info *gpe_block,
void *context)
{
u32 i;
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 5ba049841..3b7fb9936 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -51,6 +51,10 @@ ACPI_MODULE_NAME("hwregs")
#if (!ACPI_REDUCED_HARDWARE)
/* Local Prototypes */
+static u8
+acpi_hw_get_access_bit_width(struct acpi_generic_address *reg,
+ u8 max_bit_width);
+
static acpi_status
acpi_hw_read_multiple(u32 *value,
struct acpi_generic_address *register_a,
@@ -65,6 +69,43 @@ acpi_hw_write_multiple(u32 value,
/******************************************************************************
*
+ * FUNCTION: acpi_hw_get_access_bit_width
+ *
+ * PARAMETERS: reg - GAS register structure
+ * max_bit_width - Max bit_width supported (32 or 64)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Obtain optimal access bit width
+ *
+ ******************************************************************************/
+
+static u8
+acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width)
+{
+ if (!reg->access_width) {
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
+ max_bit_width = 32;
+ }
+
+ /*
+ * Detect old register descriptors where only the bit_width field
+ * makes senses.
+ */
+ if (reg->bit_width < max_bit_width &&
+ !reg->bit_offset && reg->bit_width &&
+ ACPI_IS_POWER_OF_TWO(reg->bit_width) &&
+ ACPI_IS_ALIGNED(reg->bit_width, 8)) {
+ return (reg->bit_width);
+ }
+ return (max_bit_width);
+ } else {
+ return (1 << (reg->access_width + 2));
+ }
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_hw_validate_register
*
* PARAMETERS: reg - GAS register structure
@@ -83,6 +124,8 @@ acpi_status
acpi_hw_validate_register(struct acpi_generic_address *reg,
u8 max_bit_width, u64 *address)
{
+ u8 bit_width;
+ u8 access_width;
/* Must have a valid pointer to a GAS structure */
@@ -109,23 +152,25 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
return (AE_SUPPORT);
}
- /* Validate the bit_width */
+ /* Validate the access_width */
- if ((reg->bit_width != 8) &&
- (reg->bit_width != 16) &&
- (reg->bit_width != 32) && (reg->bit_width != max_bit_width)) {
+ if (reg->access_width > 4) {
ACPI_ERROR((AE_INFO,
- "Unsupported register bit width: 0x%X",
- reg->bit_width));
+ "Unsupported register access width: 0x%X",
+ reg->access_width));
return (AE_SUPPORT);
}
- /* Validate the bit_offset. Just a warning for now. */
+ /* Validate the bit_width, convert access_width into number of bits */
- if (reg->bit_offset != 0) {
+ access_width = acpi_hw_get_access_bit_width(reg, max_bit_width);
+ bit_width =
+ ACPI_ROUND_UP(reg->bit_offset + reg->bit_width, access_width);
+ if (max_bit_width < bit_width) {
ACPI_WARNING((AE_INFO,
- "Unsupported register bit offset: 0x%X",
- reg->bit_offset));
+ "Requested bit width 0x%X is smaller than register bit width 0x%X",
+ max_bit_width, bit_width));
+ return (AE_SUPPORT);
}
return (AE_OK);
@@ -145,17 +190,19 @@ acpi_hw_validate_register(struct acpi_generic_address *reg,
* 64-bit values is not needed.
*
* LIMITATIONS: <These limitations also apply to acpi_hw_write>
- * bit_width must be exactly 8, 16, or 32.
* space_ID must be system_memory or system_IO.
- * bit_offset and access_width are currently ignored, as there has
- * not been a need to implement these.
*
******************************************************************************/
acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
{
u64 address;
+ u8 access_width;
+ u32 bit_width;
+ u8 bit_offset;
u64 value64;
+ u32 value32;
+ u8 index;
acpi_status status;
ACPI_FUNCTION_NAME(hw_read);
@@ -167,28 +214,75 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
return (status);
}
- /* Initialize entire 32-bit return value to zero */
-
+ /*
+ * Initialize entire 32-bit return value to zero, convert access_width
+ * into number of bits based
+ */
*value = 0;
+ access_width = acpi_hw_get_access_bit_width(reg, 32);
+ bit_width = reg->bit_offset + reg->bit_width;
+ bit_offset = reg->bit_offset;
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
- if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- status = acpi_os_read_memory((acpi_physical_address)
- address, &value64, reg->bit_width);
+ index = 0;
+ while (bit_width) {
+ if (bit_offset >= access_width) {
+ value32 = 0;
+ bit_offset -= access_width;
+ } else {
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ status =
+ acpi_os_read_memory((acpi_physical_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ &value64, access_width);
+ value32 = (u32)value64;
+ } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+ status = acpi_hw_read_port((acpi_io_address)
+ address +
+ index *
+ ACPI_DIV_8
+ (access_width),
+ &value32,
+ access_width);
+ }
+
+ /*
+ * Use offset style bit masks because:
+ * bit_offset < access_width/bit_width < access_width, and
+ * access_width is ensured to be less than 32-bits by
+ * acpi_hw_validate_register().
+ */
+ if (bit_offset) {
+ value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
+ bit_offset = 0;
+ }
+ if (bit_width < access_width) {
+ value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
+ }
+ }
- *value = (u32)value64;
- } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+ /*
+ * Use offset style bit writes because "Index * AccessWidth" is
+ * ensured to be less than 32-bits by acpi_hw_validate_register().
+ */
+ ACPI_SET_BITS(value, index * access_width,
+ ACPI_MASK_BITS_ABOVE_32(access_width), value32);
- status = acpi_hw_read_port((acpi_io_address)
- address, value, reg->bit_width);
+ bit_width -=
+ bit_width > access_width ? access_width : bit_width;
+ index++;
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"Read: %8.8X width %2d from %8.8X%8.8X (%s)\n",
- *value, reg->bit_width, ACPI_FORMAT_UINT64(address),
+ *value, access_width, ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index a01ddb393..98c26ff39 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -91,10 +91,9 @@ acpi_status acpi_reset(void)
* compatibility with other ACPI implementations that have allowed
* BIOS code with bad register width values to go unnoticed.
*/
- status =
- acpi_os_write_port((acpi_io_address) reset_reg->address,
- acpi_gbl_FADT.reset_value,
- ACPI_RESET_REGISTER_WIDTH);
+ status = acpi_os_write_port((acpi_io_address)reset_reg->address,
+ acpi_gbl_FADT.reset_value,
+ ACPI_RESET_REGISTER_WIDTH);
} else {
/* Write the reset value to the reset register */
@@ -504,9 +503,7 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
* Evaluate the \_Sx namespace object containing the register values
* for this state
*/
- info->relative_pathname = ACPI_CAST_PTR(char,
- acpi_gbl_sleep_state_names
- [sleep_state]);
+ info->relative_pathname = acpi_gbl_sleep_state_names[sleep_state];
status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 697af810e..426a6307e 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -107,9 +107,10 @@ acpi_status acpi_ns_root_initialize(void)
continue;
}
- status = acpi_ns_lookup(NULL, init_val->name, init_val->type,
- ACPI_IMODE_LOAD_PASS2,
- ACPI_NS_NO_UPSEARCH, NULL, &new_node);
+ status =
+ acpi_ns_lookup(NULL, (char *)init_val->name, init_val->type,
+ ACPI_IMODE_LOAD_PASS2, ACPI_NS_NO_UPSEARCH,
+ NULL, &new_node);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not create predefined name %s",
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index 878e8fb6a..c803bda79 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -79,7 +79,8 @@ acpi_ns_convert_to_integer(union acpi_operand_object *original_object,
/* String-to-Integer conversion */
status = acpi_ut_strtoul64(original_object->string.pointer,
- ACPI_ANY_BASE, &value);
+ ACPI_ANY_BASE,
+ acpi_gbl_integer_byte_width, &value);
if (ACPI_FAILURE(status)) {
return (status);
}
@@ -317,7 +318,7 @@ acpi_ns_convert_to_buffer(union acpi_operand_object *original_object,
******************************************************************************/
acpi_status
-acpi_ns_convert_to_unicode(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_unicode(struct acpi_namespace_node *scope,
union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
@@ -384,7 +385,7 @@ acpi_ns_convert_to_unicode(struct acpi_namespace_node * scope,
******************************************************************************/
acpi_status
-acpi_ns_convert_to_resource(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_resource(struct acpi_namespace_node *scope,
union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
@@ -463,7 +464,7 @@ acpi_ns_convert_to_resource(struct acpi_namespace_node * scope,
******************************************************************************/
acpi_status
-acpi_ns_convert_to_reference(struct acpi_namespace_node * scope,
+acpi_ns_convert_to_reference(struct acpi_namespace_node *scope,
union acpi_operand_object *original_object,
union acpi_operand_object **return_object)
{
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index af236e348..ce1f8605d 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -81,7 +81,7 @@ acpi_ns_get_max_depth(acpi_handle obj_handle,
*
******************************************************************************/
-void acpi_ns_print_pathname(u32 num_segments, char *pathname)
+void acpi_ns_print_pathname(u32 num_segments, const char *pathname)
{
u32 i;
@@ -114,6 +114,9 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
acpi_os_printf("]\n");
}
+#ifdef ACPI_OBSOLETE_FUNCTIONS
+/* Not used at this time, perhaps later */
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_dump_pathname
@@ -131,7 +134,8 @@ void acpi_ns_print_pathname(u32 num_segments, char *pathname)
******************************************************************************/
void
-acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
+acpi_ns_dump_pathname(acpi_handle handle,
+ const char *msg, u32 level, u32 component)
{
ACPI_FUNCTION_TRACE(ns_dump_pathname);
@@ -148,6 +152,7 @@ acpi_ns_dump_pathname(acpi_handle handle, char *msg, u32 level, u32 component)
acpi_os_printf("\n");
return_VOID;
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index d4aa8b696..36643a8cf 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -140,6 +140,7 @@ acpi_status acpi_ns_initialize_devices(u32 flags)
{
acpi_status status = AE_OK;
struct acpi_device_walk_info info;
+ acpi_handle handle;
ACPI_FUNCTION_TRACE(ns_initialize_devices);
@@ -190,6 +191,27 @@ acpi_status acpi_ns_initialize_devices(u32 flags)
if (ACPI_SUCCESS(status)) {
info.num_INI++;
}
+
+ /*
+ * Execute \_SB._INI.
+ * There appears to be a strict order requirement for \_SB._INI,
+ * which should be evaluated before any _REG evaluations.
+ */
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_SUCCESS(status)) {
+ memset(info.evaluate_info, 0,
+ sizeof(struct acpi_evaluate_info));
+ info.evaluate_info->prefix_node = handle;
+ info.evaluate_info->relative_pathname =
+ METHOD_NAME__INI;
+ info.evaluate_info->parameters = NULL;
+ info.evaluate_info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info.evaluate_info);
+ if (ACPI_SUCCESS(status)) {
+ info.num_INI++;
+ }
+ }
}
/*
@@ -198,6 +220,12 @@ acpi_status acpi_ns_initialize_devices(u32 flags)
* Note: Any objects accessed by the _REG methods will be automatically
* initialized, even if they contain executable AML (see the call to
* acpi_ns_initialize_objects below).
+ *
+ * Note: According to the ACPI specification, we actually needn't execute
+ * _REG for system_memory/system_io operation regions, but for PCI_Config
+ * operation regions, it is required to evaluate _REG for those on a PCI
+ * root bus that doesn't contain _BBN object. So this code is kept here
+ * in order not to break things.
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
@@ -592,33 +620,37 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
* Note: We know there is an _INI within this subtree, but it may not be
* under this particular device, it may be lower in the branch.
*/
- ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
- (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI));
-
- memset(info, 0, sizeof(struct acpi_evaluate_info));
- info->prefix_node = device_node;
- info->relative_pathname = METHOD_NAME__INI;
- info->parameters = NULL;
- info->flags = ACPI_IGNORE_RETURN_VALUE;
-
- status = acpi_ns_evaluate(info);
-
- if (ACPI_SUCCESS(status)) {
- walk_info->num_INI++;
- }
+ if (!ACPI_COMPARE_NAME(device_node->name.ascii, "_SB_") ||
+ device_node->parent != acpi_gbl_root_node) {
+ ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+ (ACPI_TYPE_METHOD, device_node,
+ METHOD_NAME__INI));
+
+ memset(info, 0, sizeof(struct acpi_evaluate_info));
+ info->prefix_node = device_node;
+ info->relative_pathname = METHOD_NAME__INI;
+ info->parameters = NULL;
+ info->flags = ACPI_IGNORE_RETURN_VALUE;
+
+ status = acpi_ns_evaluate(info);
+ if (ACPI_SUCCESS(status)) {
+ walk_info->num_INI++;
+ }
#ifdef ACPI_DEBUG_OUTPUT
- else if (status != AE_NOT_FOUND) {
+ else if (status != AE_NOT_FOUND) {
- /* Ignore error and move on to next device */
+ /* Ignore error and move on to next device */
- char *scope_name =
- acpi_ns_get_normalized_pathname(device_node, TRUE);
+ char *scope_name =
+ acpi_ns_get_normalized_pathname(device_node, TRUE);
- ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
- scope_name));
- ACPI_FREE(scope_name);
- }
+ ACPI_EXCEPTION((AE_INFO, status,
+ "during %s._INI execution",
+ scope_name));
+ ACPI_FREE(scope_name);
+ }
#endif
+ }
/* Ignore errors from above */
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 75cdb8790..b5e2b0ada 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -123,8 +123,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list.
tables[table_index].owner_id);
- acpi_tb_release_owner_id(table_index);
+ acpi_tb_release_owner_id(table_index);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index eb6e1b88a..f03dd41e8 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -113,7 +113,7 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
acpi_status
acpi_ns_handle_to_pathname(acpi_handle target_handle,
- struct acpi_buffer * buffer, u8 no_trailing)
+ struct acpi_buffer *buffer, u8 no_trailing)
{
acpi_status status;
struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 051306f0d..cfa2bb716 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -399,7 +399,7 @@ acpi_ns_attach_data(struct acpi_namespace_node *node,
******************************************************************************/
acpi_status
-acpi_ns_detach_data(struct acpi_namespace_node * node,
+acpi_ns_detach_data(struct acpi_namespace_node *node,
acpi_object_handler handler)
{
union acpi_operand_object *obj_desc;
@@ -444,7 +444,7 @@ acpi_ns_detach_data(struct acpi_namespace_node * node,
******************************************************************************/
acpi_status
-acpi_ns_get_attached_data(struct acpi_namespace_node * node,
+acpi_ns_get_attached_data(struct acpi_namespace_node *node,
acpi_object_handler handler, void **data)
{
union acpi_operand_object *obj_desc;
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 9047f2808..fbedc6e8a 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -62,6 +62,10 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
u32 count1,
u8 type2, u32 count2, u32 start_index);
+static acpi_status
+acpi_ns_custom_package(struct acpi_evaluate_info *info,
+ union acpi_operand_object **elements, u32 count);
+
/*******************************************************************************
*
* FUNCTION: acpi_ns_check_package
@@ -135,6 +139,11 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
* PTYPE2 packages contain subpackages
*/
switch (package->ret_info.type) {
+ case ACPI_PTYPE_CUSTOM:
+
+ status = acpi_ns_custom_package(info, elements, count);
+ break;
+
case ACPI_PTYPE1_FIXED:
/*
* The package count is fixed and there are no subpackages
@@ -179,6 +188,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
if (ACPI_FAILURE(status)) {
return (status);
}
+
elements++;
}
break;
@@ -225,6 +235,7 @@ acpi_ns_check_package(struct acpi_evaluate_info *info,
return (status);
}
}
+
elements++;
}
break;
@@ -569,11 +580,13 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
if (sub_package->package.count < expected_count) {
goto package_too_small;
}
+
if (sub_package->package.count <
package->ret_info.count1) {
expected_count = package->ret_info.count1;
goto package_too_small;
}
+
if (expected_count == 0) {
/*
* Either the num_entries element was originally zero or it was
@@ -622,6 +635,83 @@ package_too_small:
/*******************************************************************************
*
+ * FUNCTION: acpi_ns_custom_package
+ *
+ * PARAMETERS: info - Method execution information block
+ * elements - Pointer to the package elements array
+ * count - Element count for the package
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check a returned package object for the correct count and
+ * correct type of all sub-objects.
+ *
+ * NOTE: Currently used for the _BIX method only. When needed for two or more
+ * methods, probably a detect/dispatch mechanism will be required.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_custom_package(struct acpi_evaluate_info *info,
+ union acpi_operand_object **elements, u32 count)
+{
+ u32 expected_count;
+ u32 version;
+ acpi_status status = AE_OK;
+
+ ACPI_FUNCTION_NAME(ns_custom_package);
+
+ /* Get version number, must be Integer */
+
+ if ((*elements)->common.type != ACPI_TYPE_INTEGER) {
+ ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+ info->node_flags,
+ "Return Package has invalid object type for version number"));
+ return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
+ }
+
+ version = (u32)(*elements)->integer.value;
+ expected_count = 21; /* Version 1 */
+
+ if (version == 0) {
+ expected_count = 20; /* Version 0 */
+ }
+
+ if (count < expected_count) {
+ ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+ info->node_flags,
+ "Return Package is too small - found %u elements, expected %u",
+ count, expected_count));
+ return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
+ } else if (count > expected_count) {
+ ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
+ "%s: Return Package is larger than needed - "
+ "found %u, expected %u\n",
+ info->full_pathname, count, expected_count));
+ }
+
+ /* Validate all elements of the returned package */
+
+ status = acpi_ns_check_package_elements(info, elements,
+ ACPI_RTYPE_INTEGER, 16,
+ ACPI_RTYPE_STRING, 4, 0);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Version 1 has a single trailing integer */
+
+ if (version > 0) {
+ status = acpi_ns_check_package_elements(info, elements + 20,
+ ACPI_RTYPE_INTEGER, 1,
+ 0, 0, 20);
+ }
+
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ns_check_package_elements
*
* PARAMETERS: info - Method execution information block
@@ -661,6 +751,7 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
if (ACPI_FAILURE(status)) {
return (status);
}
+
this_element++;
}
@@ -671,6 +762,7 @@ acpi_ns_check_package_elements(struct acpi_evaluate_info *info,
if (ACPI_FAILURE(status)) {
return (status);
}
+
this_element++;
}
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 805e36de8..9523d41c7 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -399,7 +399,7 @@ static const struct acpi_simple_repair_info *acpi_ns_match_simple_repair(struct
******************************************************************************/
acpi_status
-acpi_ns_repair_null_element(struct acpi_evaluate_info * info,
+acpi_ns_repair_null_element(struct acpi_evaluate_info *info,
u32 expected_btypes,
u32 package_index,
union acpi_operand_object **return_object_ptr)
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 63edbbbf9..d53361224 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -54,9 +54,9 @@ ACPI_MODULE_NAME("nsrepair2")
* be repaired on a per-name basis.
*/
typedef
-acpi_status(*acpi_repair_function) (struct acpi_evaluate_info * info,
- union acpi_operand_object
- **return_object_ptr);
+acpi_status (*acpi_repair_function) (struct acpi_evaluate_info * info,
+ union acpi_operand_object **
+ return_object_ptr);
typedef struct acpi_repair_info {
char name[ACPI_NAME_SIZE];
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index c72cc62b9..784a30b76 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -272,11 +272,11 @@ acpi_status acpi_ns_build_internal_name(struct acpi_namestring_info *info)
result = &internal_name[i];
} else if (num_segments == 2) {
internal_name[i] = AML_DUAL_NAME_PREFIX;
- result = &internal_name[(acpi_size) i + 1];
+ result = &internal_name[(acpi_size)i + 1];
} else {
internal_name[i] = AML_MULTI_NAME_PREFIX_OP;
- internal_name[(acpi_size) i + 1] = (char)num_segments;
- result = &internal_name[(acpi_size) i + 2];
+ internal_name[(acpi_size)i + 1] = (char)num_segments;
+ result = &internal_name[(acpi_size)i + 2];
}
}
@@ -456,7 +456,7 @@ acpi_ns_externalize_name(u32 internal_name_length,
names_index = prefix_length + 2;
num_segments = (u8)
- internal_name[(acpi_size) prefix_length + 1];
+ internal_name[(acpi_size)prefix_length + 1];
break;
case AML_DUAL_NAME_PREFIX:
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index a7deeaa8e..d2a9b4fd7 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -256,7 +256,7 @@ acpi_evaluate_object(acpi_handle handle,
* Allocate a new parameter block for the internal objects
* Add 1 to count to allow for null terminated internal list
*/
- info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size) info->
+ info->parameters = ACPI_ALLOCATE_ZEROED(((acpi_size)info->
param_count +
1) * sizeof(void *));
if (!info->parameters) {
@@ -280,13 +280,12 @@ acpi_evaluate_object(acpi_handle handle,
info->parameters[info->param_count] = NULL;
}
-#if 0
+#ifdef _FUTURE_FEATURE
/*
* Begin incoming argument count analysis. Check for too few args
* and too many args.
*/
-
switch (acpi_ns_get_type(info->node)) {
case ACPI_TYPE_METHOD:
@@ -370,68 +369,68 @@ acpi_evaluate_object(acpi_handle handle,
* If we are expecting a return value, and all went well above,
* copy the return value to an external object.
*/
- if (return_buffer) {
- if (!info->return_object) {
- return_buffer->length = 0;
- } else {
- if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
- ACPI_DESC_TYPE_NAMED) {
- /*
- * If we received a NS Node as a return object, this means that
- * the object we are evaluating has nothing interesting to
- * return (such as a mutex, etc.) We return an error because
- * these types are essentially unsupported by this interface.
- * We don't check up front because this makes it easier to add
- * support for various types at a later date if necessary.
- */
- status = AE_TYPE;
- info->return_object = NULL; /* No need to delete a NS Node */
- return_buffer->length = 0;
- }
+ if (!return_buffer) {
+ goto cleanup_return_object;
+ }
- if (ACPI_SUCCESS(status)) {
+ if (!info->return_object) {
+ return_buffer->length = 0;
+ goto cleanup;
+ }
- /* Dereference Index and ref_of references */
+ if (ACPI_GET_DESCRIPTOR_TYPE(info->return_object) ==
+ ACPI_DESC_TYPE_NAMED) {
+ /*
+ * If we received a NS Node as a return object, this means that
+ * the object we are evaluating has nothing interesting to
+ * return (such as a mutex, etc.) We return an error because
+ * these types are essentially unsupported by this interface.
+ * We don't check up front because this makes it easier to add
+ * support for various types at a later date if necessary.
+ */
+ status = AE_TYPE;
+ info->return_object = NULL; /* No need to delete a NS Node */
+ return_buffer->length = 0;
+ }
- acpi_ns_resolve_references(info);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup_return_object;
+ }
- /* Get the size of the returned object */
+ /* Dereference Index and ref_of references */
- status =
- acpi_ut_get_object_size(info->return_object,
- &buffer_space_needed);
- if (ACPI_SUCCESS(status)) {
-
- /* Validate/Allocate/Clear caller buffer */
-
- status =
- acpi_ut_initialize_buffer
- (return_buffer,
- buffer_space_needed);
- if (ACPI_FAILURE(status)) {
- /*
- * Caller's buffer is too small or a new one can't
- * be allocated
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Needed buffer size %X, %s\n",
- (u32)
- buffer_space_needed,
- acpi_format_exception
- (status)));
- } else {
- /* We have enough space for the object, build it */
-
- status =
- acpi_ut_copy_iobject_to_eobject
- (info->return_object,
- return_buffer);
- }
- }
- }
+ acpi_ns_resolve_references(info);
+
+ /* Get the size of the returned object */
+
+ status = acpi_ut_get_object_size(info->return_object,
+ &buffer_space_needed);
+ if (ACPI_SUCCESS(status)) {
+
+ /* Validate/Allocate/Clear caller buffer */
+
+ status = acpi_ut_initialize_buffer(return_buffer,
+ buffer_space_needed);
+ if (ACPI_FAILURE(status)) {
+ /*
+ * Caller's buffer is too small or a new one can't
+ * be allocated
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Needed buffer size %X, %s\n",
+ (u32)buffer_space_needed,
+ acpi_format_exception(status)));
+ } else {
+ /* We have enough space for the object, build it */
+
+ status =
+ acpi_ut_copy_iobject_to_eobject(info->return_object,
+ return_buffer);
}
}
+cleanup_return_object:
+
if (info->return_object) {
/*
* Delete the internal return object. NOTE: Interpreter must be
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 285b82044..76a1bd4bb 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -78,7 +78,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
acpi_status
acpi_get_handle(acpi_handle parent,
- acpi_string pathname, acpi_handle * ret_handle)
+ acpi_string pathname, acpi_handle *ret_handle)
{
acpi_status status;
struct acpi_namespace_node *node = NULL;
@@ -155,7 +155,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_handle)
*
******************************************************************************/
acpi_status
-acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
+acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer *buffer)
{
acpi_status status;
struct acpi_namespace_node *node;
@@ -448,7 +448,7 @@ acpi_get_object_info(acpi_handle handle,
/* Point past the CID PNP_DEVICE_ID array */
next_id_string +=
- ((acpi_size) cid_list->count *
+ ((acpi_size)cid_list->count *
sizeof(struct acpi_pnp_device_id));
}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index c312cd490..32d372b85 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -63,7 +63,7 @@ ACPI_MODULE_NAME("nsxfobj")
* DESCRIPTION: This routine returns the type associatd with a particular handle
*
******************************************************************************/
-acpi_status acpi_get_type(acpi_handle handle, acpi_object_type * ret_type)
+acpi_status acpi_get_type(acpi_handle handle, acpi_object_type *ret_type)
{
struct acpi_namespace_node *node;
acpi_status status;
@@ -115,7 +115,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_type)
* Handle.
*
******************************************************************************/
-acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
+acpi_status acpi_get_parent(acpi_handle handle, acpi_handle *ret_handle)
{
struct acpi_namespace_node *node;
struct acpi_namespace_node *parent_node;
@@ -183,7 +183,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_parent)
acpi_status
acpi_get_next_object(acpi_object_type type,
acpi_handle parent,
- acpi_handle child, acpi_handle * ret_handle)
+ acpi_handle child, acpi_handle *ret_handle)
{
acpi_status status;
struct acpi_namespace_node *node;
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index d48cbed34..c29c930ff 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -87,7 +87,7 @@ acpi_ps_get_next_package_length(struct acpi_parse_state *parser_state)
* used to encode the package length, either 0,1,2, or 3
*/
byte_count = (aml[0] >> 6);
- parser_state->aml += ((acpi_size) byte_count + 1);
+ parser_state->aml += ((acpi_size)byte_count + 1);
/* Get bytes 3, 2, 1 as needed */
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index cfd17a4f2..177b05b23 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -158,7 +158,7 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
*
******************************************************************************/
-char *acpi_ps_get_opcode_name(u16 opcode)
+const char *acpi_ps_get_opcode_name(u16 opcode)
{
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUG_OUTPUT)
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8038ed2ac..0a23897d8 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -130,8 +130,8 @@ u16 acpi_ps_peek_opcode(struct acpi_parse_state * parser_state)
******************************************************************************/
acpi_status
-acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
- union acpi_parse_object * op)
+acpi_ps_complete_this_op(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op)
{
union acpi_parse_object *prev;
union acpi_parse_object *next;
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index b28b0da17..89cb4bffc 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -128,7 +128,7 @@ union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
if (op_info->flags & AML_DEFER) {
flags = ACPI_PARSEOP_DEFERRED;
} else if (op_info->flags & AML_NAMED) {
- flags = ACPI_PARSEOP_NAMED;
+ flags = ACPI_PARSEOP_NAMED_OBJECT;
} else if (opcode == AML_INT_BYTELIST_OP) {
flags = ACPI_PARSEOP_BYTELIST;
}
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 04b37fcca..cf30cd821 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -115,7 +115,7 @@ acpi_debug_trace(const char *name, u32 debug_level, u32 debug_layer, u32 flags)
*
******************************************************************************/
-acpi_status acpi_ps_execute_method(struct acpi_evaluate_info * info)
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
{
acpi_status status;
union acpi_parse_object *op;
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 2b1209d73..f1e83addd 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -112,7 +112,7 @@ acpi_rs_struct_option_length(struct acpi_resource_source *resource_source)
* resource_source_index (1).
*/
if (resource_source->string_ptr) {
- return ((acpi_rs_length) (resource_source->string_length + 1));
+ return ((acpi_rs_length)(resource_source->string_length + 1));
}
return (0);
@@ -188,7 +188,7 @@ acpi_rs_stream_option_length(u32 resource_length,
acpi_status
acpi_rs_get_aml_length(struct acpi_resource *resource,
- acpi_size resource_list_size, acpi_size * size_needed)
+ acpi_size resource_list_size, acpi_size *size_needed)
{
acpi_size aml_size_needed = 0;
struct acpi_resource *resource_end;
@@ -278,11 +278,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
* 16-Bit Address Resource:
* Add the size of the optional resource_source info
*/
- total_size = (acpi_rs_length) (total_size +
- acpi_rs_struct_option_length
- (&resource->data.
- address16.
- resource_source));
+ total_size = (acpi_rs_length)(total_size +
+ acpi_rs_struct_option_length
+ (&resource->data.
+ address16.
+ resource_source));
break;
case ACPI_RESOURCE_TYPE_ADDRESS32:
@@ -290,11 +290,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
* 32-Bit Address Resource:
* Add the size of the optional resource_source info
*/
- total_size = (acpi_rs_length) (total_size +
- acpi_rs_struct_option_length
- (&resource->data.
- address32.
- resource_source));
+ total_size = (acpi_rs_length)(total_size +
+ acpi_rs_struct_option_length
+ (&resource->data.
+ address32.
+ resource_source));
break;
case ACPI_RESOURCE_TYPE_ADDRESS64:
@@ -302,11 +302,11 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
* 64-Bit Address Resource:
* Add the size of the optional resource_source info
*/
- total_size = (acpi_rs_length) (total_size +
- acpi_rs_struct_option_length
- (&resource->data.
- address64.
- resource_source));
+ total_size = (acpi_rs_length)(total_size +
+ acpi_rs_struct_option_length
+ (&resource->data.
+ address64.
+ resource_source));
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
@@ -315,28 +315,28 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
* Add the size of each additional optional interrupt beyond the
* required 1 (4 bytes for each u32 interrupt number)
*/
- total_size = (acpi_rs_length) (total_size +
- ((resource->data.
- extended_irq.
- interrupt_count -
- 1) * 4) +
- /* Add the size of the optional resource_source info */
- acpi_rs_struct_option_length
- (&resource->data.
+ total_size = (acpi_rs_length)(total_size +
+ ((resource->data.
extended_irq.
- resource_source));
+ interrupt_count -
+ 1) * 4) +
+ /* Add the size of the optional resource_source info */
+ acpi_rs_struct_option_length
+ (&resource->data.
+ extended_irq.
+ resource_source));
break;
case ACPI_RESOURCE_TYPE_GPIO:
- total_size = (acpi_rs_length) (total_size +
- (resource->data.gpio.
- pin_table_length * 2) +
- resource->data.gpio.
- resource_source.
- string_length +
- resource->data.gpio.
- vendor_length);
+ total_size = (acpi_rs_length)(total_size +
+ (resource->data.gpio.
+ pin_table_length * 2) +
+ resource->data.gpio.
+ resource_source.
+ string_length +
+ resource->data.gpio.
+ vendor_length);
break;
@@ -348,14 +348,14 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
common_serial_bus.
type];
- total_size = (acpi_rs_length) (total_size +
- resource->data.
- i2c_serial_bus.
- resource_source.
- string_length +
- resource->data.
- i2c_serial_bus.
- vendor_length);
+ total_size = (acpi_rs_length)(total_size +
+ resource->data.
+ i2c_serial_bus.
+ resource_source.
+ string_length +
+ resource->data.
+ i2c_serial_bus.
+ vendor_length);
break;
@@ -397,8 +397,8 @@ acpi_rs_get_aml_length(struct acpi_resource *resource,
******************************************************************************/
acpi_status
-acpi_rs_get_list_length(u8 * aml_buffer,
- u32 aml_buffer_length, acpi_size * size_needed)
+acpi_rs_get_list_length(u8 *aml_buffer,
+ u32 aml_buffer_length, acpi_size *size_needed)
{
acpi_status status;
u8 *end_aml;
@@ -610,7 +610,7 @@ acpi_rs_get_list_length(u8 * aml_buffer,
acpi_status
acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
- acpi_size * buffer_size_needed)
+ acpi_size *buffer_size_needed)
{
u32 number_of_elements;
acpi_size temp_size_needed = 0;
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 12978891e..809b61c11 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -347,7 +347,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
(u8 *) output_buffer->pointer);
path_buffer.pointer = user_prt->source;
- status = acpi_ns_handle_to_pathname((acpi_handle) node, &path_buffer, FALSE);
+ status = acpi_ns_handle_to_pathname((acpi_handle)node, &path_buffer, FALSE);
/* +1 to include null terminator */
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 23a17c86d..5ffdb5602 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -52,17 +52,17 @@ ACPI_MODULE_NAME("rsdump")
* All functions in this module are used by the AML Debugger only
*/
/* Local prototypes */
-static void acpi_rs_out_string(char *title, char *value);
+static void acpi_rs_out_string(const char *title, const char *value);
-static void acpi_rs_out_integer8(char *title, u8 value);
+static void acpi_rs_out_integer8(const char *title, u8 value);
-static void acpi_rs_out_integer16(char *title, u16 value);
+static void acpi_rs_out_integer16(const char *title, u16 value);
-static void acpi_rs_out_integer32(char *title, u32 value);
+static void acpi_rs_out_integer32(const char *title, u32 value);
-static void acpi_rs_out_integer64(char *title, u64 value);
+static void acpi_rs_out_integer64(const char *title, u64 value);
-static void acpi_rs_out_title(char *title);
+static void acpi_rs_out_title(const char *title);
static void acpi_rs_dump_byte_list(u16 length, u8 *data);
@@ -208,7 +208,7 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
{
u8 *target = NULL;
u8 *previous_target;
- char *name;
+ const char *name;
u8 count;
/* First table entry must contain the table length (# of table entries) */
@@ -248,10 +248,8 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
case ACPI_RSD_UINT8:
if (table->pointer) {
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer
- [*target]));
+ acpi_rs_out_string(name,
+ table->pointer[*target]);
} else {
acpi_rs_out_integer8(name, ACPI_GET8(target));
}
@@ -276,26 +274,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
case ACPI_RSD_1BITFLAG:
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer[*target &
- 0x01]));
+ acpi_rs_out_string(name,
+ table->pointer[*target & 0x01]);
break;
case ACPI_RSD_2BITFLAG:
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer[*target &
- 0x03]));
+ acpi_rs_out_string(name,
+ table->pointer[*target & 0x03]);
break;
case ACPI_RSD_3BITFLAG:
- acpi_rs_out_string(name, ACPI_CAST_PTR(char,
- table->
- pointer[*target &
- 0x07]));
+ acpi_rs_out_string(name,
+ table->pointer[*target & 0x07]);
break;
case ACPI_RSD_SHORTLIST:
@@ -481,7 +473,7 @@ static void acpi_rs_dump_address_common(union acpi_resource_data *resource)
*
******************************************************************************/
-static void acpi_rs_out_string(char *title, char *value)
+static void acpi_rs_out_string(const char *title, const char *value)
{
acpi_os_printf("%27s : %s", title, value);
@@ -491,30 +483,30 @@ static void acpi_rs_out_string(char *title, char *value)
acpi_os_printf("\n");
}
-static void acpi_rs_out_integer8(char *title, u8 value)
+static void acpi_rs_out_integer8(const char *title, u8 value)
{
acpi_os_printf("%27s : %2.2X\n", title, value);
}
-static void acpi_rs_out_integer16(char *title, u16 value)
+static void acpi_rs_out_integer16(const char *title, u16 value)
{
acpi_os_printf("%27s : %4.4X\n", title, value);
}
-static void acpi_rs_out_integer32(char *title, u32 value)
+static void acpi_rs_out_integer32(const char *title, u32 value)
{
acpi_os_printf("%27s : %8.8X\n", title, value);
}
-static void acpi_rs_out_integer64(char *title, u64 value)
+static void acpi_rs_out_integer64(const char *title, u64 value)
{
acpi_os_printf("%27s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
}
-static void acpi_rs_out_title(char *title)
+static void acpi_rs_out_title(const char *title)
{
acpi_os_printf("%27s : ", title);
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index 5c3491387..61e8f16c8 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -330,19 +330,20 @@ struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
{ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.connection_sharing),"ConnectionSharing", acpi_gbl_shr_decode}, \
{ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
{ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
{ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
{ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
-struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[11] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
"Common Serial Bus", NULL},
ACPI_RS_DUMP_COMMON_SERIAL_BUS
};
-struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[14] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
"I2C Serial Bus", NULL},
ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
@@ -355,7 +356,7 @@ struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
"SlaveAddress", NULL},
};
-struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[18] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
"Spi Serial Bus", NULL},
ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
@@ -376,7 +377,7 @@ struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
"ConnectionSpeed", NULL},
};
-struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[20] = {
{ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
"Uart Serial Bus", NULL},
ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index ce3d0b77e..25165ca42 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -87,7 +87,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- if (((acpi_size) resource) & 0x3) {
+ if (((acpi_size)resource) & 0x3) {
/* Each internal resource struct is expected to be 32-bit aligned */
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 8a01296ac..b82c061f2 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -151,7 +151,7 @@ struct acpi_rsconvert_info acpi_rs_convert_gpio[18] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
+struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[17] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
@@ -177,6 +177,11 @@ struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
AML_OFFSET(common_serial_bus.flags),
1},
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+ AML_OFFSET(common_serial_bus.flags),
+ 2},
+
{ACPI_RSC_MOVE8,
ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
AML_OFFSET(common_serial_bus.type_revision_id),
@@ -237,7 +242,7 @@ struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
+struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[21] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
@@ -263,6 +268,11 @@ struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
AML_OFFSET(common_serial_bus.flags),
1},
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+ AML_OFFSET(common_serial_bus.flags),
+ 2},
+
{ACPI_RSC_MOVE8,
ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
AML_OFFSET(common_serial_bus.type_revision_id),
@@ -339,7 +349,7 @@ struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
*
******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
+struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[23] = {
{ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
@@ -365,6 +375,11 @@ struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
AML_OFFSET(common_serial_bus.flags),
1},
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.connection_sharing),
+ AML_OFFSET(common_serial_bus.flags),
+ 2},
+
{ACPI_RSC_MOVE8,
ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
AML_OFFSET(common_serial_bus.type_revision_id),
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index cf06e49cd..fa491c64c 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -338,7 +338,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
* Note: Some resource descriptors will have an additional null, so
* we add 1 to the minimum length.
*/
- if (total_length > (acpi_rsdesc_size) (minimum_length + 1)) {
+ if (total_length > (acpi_rsdesc_size)(minimum_length + 1)) {
/* Get the resource_source_index */
@@ -377,7 +377,7 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
ACPI_CAST_PTR(char,
&aml_resource_source[1]));
- return ((acpi_rs_length) total_length);
+ return ((acpi_rs_length)total_length);
}
/* resource_source is not present */
@@ -406,9 +406,9 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
******************************************************************************/
acpi_rsdesc_size
-acpi_rs_set_resource_source(union aml_resource * aml,
+acpi_rs_set_resource_source(union aml_resource *aml,
acpi_rs_length minimum_length,
- struct acpi_resource_source * resource_source)
+ struct acpi_resource_source *resource_source)
{
u8 *aml_resource_source;
acpi_rsdesc_size descriptor_length;
@@ -466,8 +466,8 @@ acpi_rs_set_resource_source(union aml_resource * aml,
******************************************************************************/
acpi_status
-acpi_rs_get_prt_method_data(struct acpi_namespace_node * node,
- struct acpi_buffer * ret_buffer)
+acpi_rs_get_prt_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
@@ -671,7 +671,7 @@ acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
acpi_status
acpi_rs_get_method_data(acpi_handle handle,
- char *path, struct acpi_buffer *ret_buffer)
+ const char *path, struct acpi_buffer *ret_buffer)
{
union acpi_operand_object *obj_desc;
acpi_status status;
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 900933be9..465ed8137 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -433,8 +433,8 @@ ACPI_EXPORT_SYMBOL(acpi_resource_to_address64)
acpi_status
acpi_get_vendor_resource(acpi_handle device_handle,
char *name,
- struct acpi_vendor_uuid * uuid,
- struct acpi_buffer * ret_buffer)
+ struct acpi_vendor_uuid *uuid,
+ struct acpi_buffer *ret_buffer)
{
struct acpi_vendor_walk_info info;
acpi_status status;
@@ -539,7 +539,7 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
******************************************************************************/
acpi_status
-acpi_walk_resource_buffer(struct acpi_buffer * buffer,
+acpi_walk_resource_buffer(struct acpi_buffer *buffer,
acpi_walk_resource_callback user_function,
void *context)
{
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 7da79ce74..1388a19e5 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -368,7 +368,7 @@ acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
*****************************************************************************/
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
{
acpi_status status = AE_OK;
@@ -401,9 +401,9 @@ acpi_tb_verify_temp_table(struct acpi_table_desc * table_desc, char *signature)
ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
"%4.4s 0x%8.8X%8.8X"
" Attempted table install failed",
- acpi_ut_valid_acpi_name(table_desc->
- signature.
- ascii) ?
+ acpi_ut_valid_nameseg(table_desc->
+ signature.
+ ascii) ?
table_desc->signature.ascii : "????",
ACPI_FORMAT_UINT64(table_desc->
address)));
@@ -454,7 +454,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
table_count = acpi_gbl_root_table_list.current_table_count;
}
- tables = ACPI_ALLOCATE_ZEROED(((acpi_size) table_count +
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size)table_count +
ACPI_ROOT_TABLE_SIZE_INCREMENT) *
sizeof(struct acpi_table_desc));
if (!tables) {
@@ -467,8 +467,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
if (acpi_gbl_root_table_list.tables) {
memcpy(tables, acpi_gbl_root_table_list.tables,
- (acpi_size) table_count *
- sizeof(struct acpi_table_desc));
+ (acpi_size)table_count * sizeof(struct acpi_table_desc));
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -701,7 +700,7 @@ acpi_status acpi_tb_release_owner_id(u32 table_index)
*
******************************************************************************/
-acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id * owner_id)
+acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
{
acpi_status status = AE_BAD_PARAMETER;
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index a79e4f30b..620806965 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -53,7 +53,7 @@ static void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
u8 space_id,
u8 byte_width,
- u64 address, char *register_name, u8 flags);
+ u64 address, const char *register_name, u8 flags);
static void acpi_tb_convert_fadt(void);
@@ -65,7 +65,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64);
/* Table for conversion of FADT to common internal format and FADT validation */
typedef struct acpi_fadt_info {
- char *name;
+ const char *name;
u16 address64;
u16 address32;
u16 length;
@@ -192,7 +192,7 @@ static void
acpi_tb_init_generic_address(struct acpi_generic_address *generic_address,
u8 space_id,
u8 byte_width,
- u64 address, char *register_name, u8 flags)
+ u64 address, const char *register_name, u8 flags)
{
u8 bit_width;
@@ -344,7 +344,7 @@ void acpi_tb_parse_fadt(void)
/* Obtain the DSDT and FACS tables via their addresses within the FADT */
- acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
+ acpi_tb_install_fixed_table((acpi_physical_address)acpi_gbl_FADT.Xdsdt,
ACPI_SIG_DSDT, &acpi_gbl_dsdt_index);
/* If Hardware Reduced flag is set, there is no FACS */
@@ -385,14 +385,15 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
{
/*
* Check if the FADT is larger than the largest table that we expect
- * (the ACPI 5.0 version). If so, truncate the table, and issue
- * a warning.
+ * (typically the current ACPI specification version). If so, truncate
+ * the table, and issue a warning.
*/
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_BIOS_WARNING((AE_INFO,
- "FADT (revision %u) is longer than ACPI 5.0 version, "
+ "FADT (revision %u) is longer than %s length, "
"truncating length %u to %u",
- table->revision, length,
+ table->revision, ACPI_FADT_CONFORMANCE,
+ length,
(u32)sizeof(struct acpi_table_fadt)));
}
@@ -467,7 +468,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
static void acpi_tb_convert_fadt(void)
{
- char *name;
+ const char *name;
struct acpi_generic_address *address64;
u32 address32;
u8 length;
@@ -646,9 +647,12 @@ static void acpi_tb_convert_fadt(void)
if ((address64->address && !length) ||
(!address64->address && length)) {
ACPI_BIOS_WARNING((AE_INFO,
- "Optional FADT field %s has zero address or length: "
- "0x%8.8X%8.8X/0x%X",
- name,
+ "Optional FADT field %s has valid %s but zero %s: "
+ "0x%8.8X%8.8X/0x%X", name,
+ (length ? "Length" :
+ "Address"),
+ (length ? "Address" :
+ "Length"),
ACPI_FORMAT_UINT64
(address64->address),
length));
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index f2d080346..e348d616e 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -76,7 +76,7 @@ acpi_tb_find_table(char *signature,
/* Validate the input table signature */
- if (!acpi_is_valid_signature(signature)) {
+ if (!acpi_ut_valid_nameseg(signature)) {
return_ACPI_STATUS(AE_BAD_SIGNATURE);
}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 4dc6108de..8b1305212 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -299,9 +299,9 @@ acpi_tb_install_standard_table(acpi_physical_address address,
ACPI_BIOS_ERROR((AE_INFO,
"Table has invalid signature [%4.4s] (0x%8.8X), "
"must be SSDT or OEMx",
- acpi_ut_valid_acpi_name(new_table_desc.
- signature.
- ascii) ?
+ acpi_ut_valid_nameseg(new_table_desc.
+ signature.
+ ascii) ?
new_table_desc.signature.
ascii : "????",
new_table_desc.signature.integer));
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 9240c76d2..e28553914 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -231,7 +231,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
ACPI_FORMAT_UINT64(address64)));
}
#endif
- return ((acpi_physical_address) (address64));
+ return ((acpi_physical_address)(address64));
}
}
@@ -287,12 +287,12 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
* the XSDT if the revision is > 1 and the XSDT pointer is present,
* as per the ACPI specification.
*/
- address = (acpi_physical_address) rsdp->xsdt_physical_address;
+ address = (acpi_physical_address)rsdp->xsdt_physical_address;
table_entry_size = ACPI_XSDT_ENTRY_SIZE;
} else {
/* Root table is an RSDT (32-bit physical addresses) */
- address = (acpi_physical_address) rsdp->rsdt_physical_address;
+ address = (acpi_physical_address)rsdp->rsdt_physical_address;
table_entry_size = ACPI_RSDT_ENTRY_SIZE;
}
@@ -380,30 +380,3 @@ next_table:
acpi_os_unmap_memory(table, length);
return_ACPI_STATUS(AE_OK);
}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_is_valid_signature
- *
- * PARAMETERS: signature - Sig string to be validated
- *
- * RETURN: TRUE if signature is has 4 valid ACPI characters
- *
- * DESCRIPTION: Validate an ACPI table signature.
- *
- ******************************************************************************/
-
-u8 acpi_is_valid_signature(char *signature)
-{
- u32 i;
-
- /* Validate each character in the signature */
-
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (!acpi_ut_valid_acpi_char(signature[i], i)) {
- return (FALSE);
- }
- }
-
- return (TRUE);
-}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 326df65de..3ecec937e 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -99,7 +99,7 @@ acpi_status acpi_allocate_root_table(u32 initial_table_count)
******************************************************************************/
acpi_status __init
-acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
+acpi_initialize_tables(struct acpi_table_desc *initial_table_array,
u32 initial_table_count, u8 allow_resize)
{
acpi_physical_address rsdp_address;
@@ -120,7 +120,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
/* Root Table Array has been statically allocated by the host */
memset(initial_table_array, 0,
- (acpi_size) initial_table_count *
+ (acpi_size)initial_table_count *
sizeof(struct acpi_table_desc));
acpi_gbl_root_table_list.tables = initial_table_array;
@@ -352,7 +352,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_table)
*
******************************************************************************/
acpi_status
-acpi_get_table_by_index(u32 table_index, struct acpi_table_header ** table)
+acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 3151968c1..ac71abcd3 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -82,7 +82,7 @@ acpi_status __init acpi_load_tables(void)
* their customized default region handlers.
*/
status = acpi_ev_install_region_handlers();
- if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
+ if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During Region initialization"));
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index b9a78e457..adb6cfc54 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -90,7 +90,7 @@ u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp)
*
******************************************************************************/
-acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
+acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
{
/*
@@ -142,7 +142,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
*
******************************************************************************/
-acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
+acpi_status __init acpi_find_root_pointer(acpi_physical_address *table_address)
{
u8 *table_ptr;
u8 *mem_rover;
@@ -201,7 +201,7 @@ acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
(u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
*table_address =
- (acpi_physical_address) physical_address;
+ (acpi_physical_address)physical_address;
return_ACPI_STATUS(AE_OK);
}
}
@@ -234,7 +234,7 @@ acpi_status __init acpi_find_root_pointer(acpi_physical_address * table_address)
(ACPI_HI_RSDP_WINDOW_BASE +
ACPI_PTR_DIFF(mem_rover, table_ptr));
- *table_address = (acpi_physical_address) physical_address;
+ *table_address = (acpi_physical_address)physical_address;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 3dbdc3ab8..13324a27b 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -231,7 +231,7 @@ acpi_status acpi_ut_delete_caches(void)
*
******************************************************************************/
-acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
+acpi_status acpi_ut_validate_buffer(struct acpi_buffer *buffer)
{
/* Obviously, the structure pointer must be valid */
@@ -272,8 +272,7 @@ acpi_status acpi_ut_validate_buffer(struct acpi_buffer * buffer)
******************************************************************************/
acpi_status
-acpi_ut_initialize_buffer(struct acpi_buffer * buffer,
- acpi_size required_length)
+acpi_ut_initialize_buffer(struct acpi_buffer *buffer, acpi_size required_length)
{
acpi_size input_buffer_length;
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
new file mode 100644
index 000000000..706c1f346
--- /dev/null
+++ b/drivers/acpi/acpica/utascii.c
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * Module Name: utascii - Utility ascii functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2016, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_nameseg
+ *
+ * PARAMETERS: name - The name or table signature to be examined.
+ * Four characters, does not have to be a
+ * NULL terminated string.
+ *
+ * RETURN: TRUE if signature is has 4 valid ACPI characters
+ *
+ * DESCRIPTION: Validate an ACPI table signature.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_nameseg(char *name)
+{
+ u32 i;
+
+ /* Validate each character in the signature */
+
+ for (i = 0; i < ACPI_NAME_SIZE; i++) {
+ if (!acpi_ut_valid_name_char(name[i], i)) {
+ return (FALSE);
+ }
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_valid_name_char
+ *
+ * PARAMETERS: char - The character to be examined
+ * position - Byte position (0-3)
+ *
+ * RETURN: TRUE if the character is valid, FALSE otherwise
+ *
+ * DESCRIPTION: Check for a valid ACPI character. Must be one of:
+ * 1) Upper case alpha
+ * 2) numeric
+ * 3) underscore
+ *
+ * We allow a '!' as the last character because of the ASF! table
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_valid_name_char(char character, u32 position)
+{
+
+ if (!((character >= 'A' && character <= 'Z') ||
+ (character >= '0' && character <= '9') || (character == '_'))) {
+
+ /* Allow a '!' in the last position */
+
+ if (character == '!' && position == 3) {
+ return (TRUE);
+ }
+
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_check_and_repair_ascii
+ *
+ * PARAMETERS: name - Ascii string
+ * count - Number of characters to check
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Ensure that the requested number of characters are printable
+ * Ascii characters. Sets non-printable and null chars to <space>.
+ *
+ ******************************************************************************/
+
+void acpi_ut_check_and_repair_ascii(u8 *name, char *repaired_name, u32 count)
+{
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ repaired_name[i] = (char)name[i];
+
+ if (!name[i]) {
+ return;
+ }
+ if (!isprint(name[i])) {
+ repaired_name[i] = ' ';
+ }
+ }
+}
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 0cfb2b8ed..bd31faf5d 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -106,31 +106,31 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
default: /* Default is BYTE display */
acpi_os_printf("%02X ",
- buffer[(acpi_size) i + j]);
+ buffer[(acpi_size)i + j]);
break;
case DB_WORD_DISPLAY:
ACPI_MOVE_16_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_os_printf("%04X ", temp32);
break;
case DB_DWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_os_printf("%08X ", temp32);
break;
case DB_QWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_os_printf("%08X", temp32);
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j +
+ &buffer[(acpi_size)i + j +
4]);
acpi_os_printf("%08X ", temp32);
break;
@@ -158,7 +158,7 @@ void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 base_offset)
acpi_os_printf("// ");
}
- buf_char = buffer[(acpi_size) i + j];
+ buf_char = buffer[(acpi_size)i + j];
if (isprint(buf_char)) {
acpi_os_printf("%c", buf_char);
} else {
@@ -274,31 +274,31 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file,
default: /* Default is BYTE display */
acpi_ut_file_printf(file, "%02X ",
- buffer[(acpi_size) i + j]);
+ buffer[(acpi_size)i + j]);
break;
case DB_WORD_DISPLAY:
ACPI_MOVE_16_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_ut_file_printf(file, "%04X ", temp32);
break;
case DB_DWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_ut_file_printf(file, "%08X ", temp32);
break;
case DB_QWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j]);
+ &buffer[(acpi_size)i + j]);
acpi_ut_file_printf(file, "%08X", temp32);
ACPI_MOVE_32_TO_32(&temp32,
- &buffer[(acpi_size) i + j +
+ &buffer[(acpi_size)i + j +
4]);
acpi_ut_file_printf(file, "%08X ", temp32);
break;
@@ -318,7 +318,7 @@ acpi_ut_dump_buffer_to_file(ACPI_FILE file,
return;
}
- buf_char = buffer[(acpi_size) i + j];
+ buf_char = buffer[(acpi_size)i + j];
if (isprint(buf_char)) {
acpi_ut_file_printf(file, "%c", buf_char);
} else {
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index f8e997888..3b8d23ef3 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -105,7 +105,7 @@ acpi_os_create_cache(char *cache_name,
*
******************************************************************************/
-acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
+acpi_status acpi_os_purge_cache(struct acpi_memory_list *cache)
{
void *next;
acpi_status status;
@@ -151,7 +151,7 @@ acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache)
*
******************************************************************************/
-acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
+acpi_status acpi_os_delete_cache(struct acpi_memory_list *cache)
{
acpi_status status;
@@ -184,8 +184,7 @@ acpi_status acpi_os_delete_cache(struct acpi_memory_list * cache)
*
******************************************************************************/
-acpi_status
-acpi_os_release_object(struct acpi_memory_list * cache, void *object)
+acpi_status acpi_os_release_object(struct acpi_memory_list *cache, void *object)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 98d53e59c..82f971402 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -53,7 +53,7 @@ ACPI_MODULE_NAME("utcopy")
static acpi_status
acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
union acpi_object *external_object,
- u8 * data_space, acpi_size * buffer_space_used);
+ u8 *data_space, acpi_size *buffer_space_used);
static acpi_status
acpi_ut_copy_ielement_to_ielement(u8 object_type,
@@ -63,7 +63,7 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
static acpi_status
acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
- u8 * buffer, acpi_size * space_used);
+ u8 *buffer, acpi_size *space_used);
static acpi_status
acpi_ut_copy_esimple_to_isimple(union acpi_object *user_obj,
@@ -111,7 +111,7 @@ acpi_ut_copy_ipackage_to_ipackage(union acpi_operand_object *source_obj,
static acpi_status
acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
union acpi_object *external_object,
- u8 * data_space, acpi_size * buffer_space_used)
+ u8 *data_space, acpi_size *buffer_space_used)
{
acpi_status status = AE_OK;
@@ -151,7 +151,7 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
memcpy((void *)data_space,
(void *)internal_object->string.pointer,
- (acpi_size) internal_object->string.length + 1);
+ (acpi_size)internal_object->string.length + 1);
break;
case ACPI_TYPE_BUFFER:
@@ -331,7 +331,7 @@ acpi_ut_copy_ielement_to_eelement(u8 object_type,
static acpi_status
acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
- u8 * buffer, acpi_size * space_used)
+ u8 *buffer, acpi_size *space_used)
{
union acpi_object *external_object;
acpi_status status;
@@ -362,7 +362,7 @@ acpi_ut_copy_ipackage_to_epackage(union acpi_operand_object *internal_object,
* Leave room for an array of ACPI_OBJECTS in the buffer
* and move the free space past it
*/
- info.length += (acpi_size) external_object->package.count *
+ info.length += (acpi_size)external_object->package.count *
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
info.free_space += external_object->package.count *
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object));
@@ -738,7 +738,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
*/
if (source_desc->string.pointer) {
dest_desc->string.pointer =
- ACPI_ALLOCATE((acpi_size) source_desc->string.
+ ACPI_ALLOCATE((acpi_size)source_desc->string.
length + 1);
if (!dest_desc->string.pointer) {
return (AE_NO_MEMORY);
@@ -748,7 +748,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
memcpy(dest_desc->string.pointer,
source_desc->string.pointer,
- (acpi_size) source_desc->string.length + 1);
+ (acpi_size)source_desc->string.length + 1);
}
break;
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 1cfc5f69b..574422205 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -51,13 +51,9 @@
ACPI_MODULE_NAME("utdebug")
#ifdef ACPI_DEBUG_OUTPUT
-static acpi_thread_id acpi_gbl_prev_thread_id = (acpi_thread_id) 0xFFFFFFFF;
-static char *acpi_gbl_fn_entry_str = "----Entry";
-static char *acpi_gbl_fn_exit_str = "----Exit-";
-
-/* Local prototypes */
-
-static const char *acpi_ut_trim_function_name(const char *function_name);
+static acpi_thread_id acpi_gbl_previous_thread_id = (acpi_thread_id) 0xFFFFFFFF;
+static const char *acpi_gbl_function_entry_prefix = "----Entry";
+static const char *acpi_gbl_function_exit_prefix = "----Exit-";
/*******************************************************************************
*
@@ -178,14 +174,14 @@ acpi_debug_print(u32 requested_debug_level,
* Thread tracking and context switch notification
*/
thread_id = acpi_os_get_thread_id();
- if (thread_id != acpi_gbl_prev_thread_id) {
+ if (thread_id != acpi_gbl_previous_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
("\n**** Context Switch from TID %u to TID %u ****\n\n",
- (u32)acpi_gbl_prev_thread_id, (u32)thread_id);
+ (u32)acpi_gbl_previous_thread_id, (u32)thread_id);
}
- acpi_gbl_prev_thread_id = thread_id;
+ acpi_gbl_previous_thread_id = thread_id;
acpi_gbl_nesting_level = 0;
}
@@ -287,7 +283,8 @@ acpi_ut_trace(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s\n", acpi_gbl_fn_entry_str);
+ component_id, "%s\n",
+ acpi_gbl_function_entry_prefix);
}
}
@@ -312,7 +309,8 @@ ACPI_EXPORT_SYMBOL(acpi_ut_trace)
void
acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
- const char *module_name, u32 component_id, void *pointer)
+ const char *module_name,
+ u32 component_id, const void *pointer)
{
acpi_gbl_nesting_level++;
@@ -323,8 +321,8 @@ acpi_ut_trace_ptr(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s %p\n", acpi_gbl_fn_entry_str,
- pointer);
+ component_id, "%s %p\n",
+ acpi_gbl_function_entry_prefix, pointer);
}
}
@@ -348,7 +346,7 @@ acpi_ut_trace_ptr(u32 line_number,
void
acpi_ut_trace_str(u32 line_number,
const char *function_name,
- const char *module_name, u32 component_id, char *string)
+ const char *module_name, u32 component_id, const char *string)
{
acpi_gbl_nesting_level++;
@@ -359,8 +357,8 @@ acpi_ut_trace_str(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s %s\n", acpi_gbl_fn_entry_str,
- string);
+ component_id, "%s %s\n",
+ acpi_gbl_function_entry_prefix, string);
}
}
@@ -396,7 +394,7 @@ acpi_ut_trace_u32(u32 line_number,
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
component_id, "%s %08X\n",
- acpi_gbl_fn_entry_str, integer);
+ acpi_gbl_function_entry_prefix, integer);
}
}
@@ -427,7 +425,8 @@ acpi_ut_exit(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s\n", acpi_gbl_fn_exit_str);
+ component_id, "%s\n",
+ acpi_gbl_function_exit_prefix);
}
if (acpi_gbl_nesting_level) {
@@ -467,14 +466,14 @@ acpi_ut_status_exit(u32 line_number,
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name,
module_name, component_id, "%s %s\n",
- acpi_gbl_fn_exit_str,
+ acpi_gbl_function_exit_prefix,
acpi_format_exception(status));
} else {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name,
module_name, component_id,
"%s ****Exception****: %s\n",
- acpi_gbl_fn_exit_str,
+ acpi_gbl_function_exit_prefix,
acpi_format_exception(status));
}
}
@@ -514,7 +513,7 @@ acpi_ut_value_exit(u32 line_number,
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
component_id, "%s %8.8X%8.8X\n",
- acpi_gbl_fn_exit_str,
+ acpi_gbl_function_exit_prefix,
ACPI_FORMAT_UINT64(value));
}
@@ -552,8 +551,8 @@ acpi_ut_ptr_exit(u32 line_number,
if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_FUNCTIONS, component_id)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
- component_id, "%s %p\n", acpi_gbl_fn_exit_str,
- ptr);
+ component_id, "%s %p\n",
+ acpi_gbl_function_exit_prefix, ptr);
}
if (acpi_gbl_nesting_level) {
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 6ba65b025..efd7988e3 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -446,7 +446,7 @@ const char *acpi_ut_get_mutex_name(u32 mutex_id)
/* Names for Notify() values, used for debug output */
-static const char *acpi_gbl_generic_notify[ACPI_NOTIFY_MAX + 1] = {
+static const char *acpi_gbl_generic_notify[ACPI_GENERIC_NOTIFY_MAX + 1] = {
/* 00 */ "Bus Check",
/* 01 */ "Device Check",
/* 02 */ "Device Wake",
@@ -459,49 +459,53 @@ static const char *acpi_gbl_generic_notify[ACPI_NOTIFY_MAX + 1] = {
/* 09 */ "Device PLD Check",
/* 0A */ "Reserved",
/* 0B */ "System Locality Update",
- /* 0C */ "Shutdown Request",
+ /* 0C */ "Shutdown Request",
+ /* Reserved in ACPI 6.0 */
/* 0D */ "System Resource Affinity Update"
};
-static const char *acpi_gbl_device_notify[4] = {
+static const char *acpi_gbl_device_notify[5] = {
/* 80 */ "Status Change",
/* 81 */ "Information Change",
/* 82 */ "Device-Specific Change",
- /* 83 */ "Device-Specific Change"
+ /* 83 */ "Device-Specific Change",
+ /* 84 */ "Reserved"
};
-static const char *acpi_gbl_processor_notify[4] = {
+static const char *acpi_gbl_processor_notify[5] = {
/* 80 */ "Performance Capability Change",
/* 81 */ "C-State Change",
/* 82 */ "Throttling Capability Change",
- /* 83 */ "Device-Specific Change"
+ /* 83 */ "Guaranteed Change",
+ /* 84 */ "Minimum Excursion"
};
-static const char *acpi_gbl_thermal_notify[4] = {
+static const char *acpi_gbl_thermal_notify[5] = {
/* 80 */ "Thermal Status Change",
/* 81 */ "Thermal Trip Point Change",
/* 82 */ "Thermal Device List Change",
- /* 83 */ "Thermal Relationship Change"
+ /* 83 */ "Thermal Relationship Change",
+ /* 84 */ "Reserved"
};
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type)
{
- /* 00 - 0D are common to all object types */
+ /* 00 - 0D are "common to all object types" (from ACPI Spec) */
- if (notify_value <= ACPI_NOTIFY_MAX) {
+ if (notify_value <= ACPI_GENERIC_NOTIFY_MAX) {
return (acpi_gbl_generic_notify[notify_value]);
}
- /* 0D - 7F are reserved */
+ /* 0E - 7F are reserved */
if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
return ("Reserved");
}
- /* 80 - 83 are per-object-type */
+ /* 80 - 84 are per-object-type */
- if (notify_value <= 0x83) {
+ if (notify_value <= ACPI_SPECIFIC_NOTIFY_MAX) {
switch (type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_DEVICE:
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 17b9f3e6e..7bad13f2e 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -69,7 +69,7 @@ ACPI_MODULE_NAME("uteval")
acpi_status
acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
- char *path,
+ const char *path,
u32 expected_return_btypes,
union acpi_operand_object **return_desc)
{
@@ -204,7 +204,7 @@ cleanup:
******************************************************************************/
acpi_status
-acpi_ut_evaluate_numeric_object(char *object_name,
+acpi_ut_evaluate_numeric_object(const char *object_name,
struct acpi_namespace_node *device_node,
u64 *value)
{
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 48fffcfe9..dd3fd7f97 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -80,6 +80,11 @@ const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS] = {
"_S4D"
};
+/* Hex-to-ascii */
+
+const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
+const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
+
/*******************************************************************************
*
* Namespace globals
@@ -221,6 +226,49 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
};
#endif /* !ACPI_REDUCED_HARDWARE */
+#if defined (ACPI_DISASSEMBLER) || defined (ACPI_ASL_COMPILER)
+
+/* to_pld macro: compile/disassemble strings */
+
+const char *acpi_gbl_pld_panel_list[] = {
+ "TOP",
+ "BOTTOM",
+ "LEFT",
+ "RIGHT",
+ "FRONT",
+ "BACK",
+ "UNKNOWN",
+ NULL
+};
+
+const char *acpi_gbl_pld_vertical_position_list[] = {
+ "UPPER",
+ "CENTER",
+ "LOWER",
+ NULL
+};
+
+const char *acpi_gbl_pld_horizontal_position_list[] = {
+ "LEFT",
+ "CENTER",
+ "RIGHT",
+ NULL
+};
+
+const char *acpi_gbl_pld_shape_list[] = {
+ "ROUND",
+ "OVAL",
+ "SQUARE",
+ "VERTICALRECTANGLE",
+ "HORIZONTALRECTANGLE",
+ "VERTICALTRAPEZOID",
+ "HORIZONTALTRAPEZOID",
+ "UNKNOWN",
+ "CHAMFERED",
+ NULL
+};
+#endif
+
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index 6fb4ec365..f7cd2d526 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -95,7 +95,7 @@ acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
hid =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
- (acpi_size) length);
+ (acpi_size)length);
if (!hid) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -173,7 +173,7 @@ acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
uid =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
- (acpi_size) length);
+ (acpi_size)length);
if (!uid) {
status = AE_NO_MEMORY;
goto cleanup;
@@ -309,7 +309,7 @@ acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
/* Area for CID strings starts after the CID PNP_DEVICE_ID array */
next_id_string = ACPI_CAST_PTR(char, cid_list->ids) +
- ((acpi_size) count * sizeof(struct acpi_pnp_device_id));
+ ((acpi_size)count * sizeof(struct acpi_pnp_device_id));
/* Copy/convert the CIDs to the return buffer */
@@ -413,7 +413,7 @@ acpi_ut_execute_CLS(struct acpi_namespace_node *device_node,
cls =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
- (acpi_size) length);
+ (acpi_size)length);
if (!cls) {
status = AE_NO_MEMORY;
goto cleanup;
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 667372093..2d6530ee7 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -236,8 +236,8 @@ acpi_ut_divide(u64 in_dividend,
}
remainder.full = remainder.full - dividend.full;
- remainder.part.hi = (u32) - ((s32) remainder.part.hi);
- remainder.part.lo = (u32) - ((s32) remainder.part.lo);
+ remainder.part.hi = (u32)-((s32)remainder.part.hi);
+ remainder.part.lo = (u32)-((s32)remainder.part.lo);
if (remainder.part.lo) {
remainder.part.hi--;
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index d938c27cc..389de3bd1 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -361,7 +361,7 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
void
acpi_ut_display_init_pathname(u8 type,
struct acpi_namespace_node *obj_handle,
- char *path)
+ const char *path)
{
acpi_status status;
struct acpi_buffer buffer;
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index d5c3adf19..3465fe2c5 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -205,37 +205,41 @@ acpi_ut_safe_strncat(char *dest,
*
* FUNCTION: acpi_ut_strtoul64
*
- * PARAMETERS: string - Null terminated string
- * base - Radix of the string: 16 or ACPI_ANY_BASE;
- * ACPI_ANY_BASE means 'in behalf of to_integer'
- * ret_integer - Where the converted integer is returned
+ * PARAMETERS: string - Null terminated string
+ * base - Radix of the string: 16 or 10 or
+ * ACPI_ANY_BASE
+ * max_integer_byte_width - Maximum allowable integer,in bytes:
+ * 4 or 8 (32 or 64 bits)
+ * ret_integer - Where the converted integer is
+ * returned
*
* RETURN: Status and Converted value
*
* DESCRIPTION: Convert a string into an unsigned value. Performs either a
- * 32-bit or 64-bit conversion, depending on the current mode
- * of the interpreter.
+ * 32-bit or 64-bit conversion, depending on the input integer
+ * size (often the current mode of the interpreter).
*
- * NOTES: acpi_gbl_integer_byte_width should be set to the proper width.
+ * NOTES: Negative numbers are not supported, as they are not supported
+ * by ACPI.
+ *
+ * acpi_gbl_integer_byte_width should be set to the proper width.
* For the core ACPICA code, this width depends on the DSDT
- * version. For iASL, the default byte width is always 8.
+ * version. For iASL, the default byte width is always 8 for the
+ * parser, but error checking is performed later to flag cases
+ * where a 64-bit constant is defined in a 32-bit DSDT/SSDT.
*
* Does not support Octal strings, not needed at this time.
*
- * There is an earlier version of the function after this one,
- * below. It is slightly different than this one, and the two
- * may eventually may need to be merged. (01/2016).
- *
******************************************************************************/
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
+acpi_status
+acpi_ut_strtoul64(char *string,
+ u32 base, u32 max_integer_byte_width, u64 *ret_integer)
{
u32 this_digit = 0;
u64 return_value = 0;
u64 quotient;
u64 dividend;
- u32 to_integer_op = (base == ACPI_ANY_BASE);
- u32 mode32 = (acpi_gbl_integer_byte_width == 4);
u8 valid_digits = 0;
u8 sign_of0x = 0;
u8 term = 0;
@@ -244,6 +248,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
switch (base) {
case ACPI_ANY_BASE:
+ case 10:
case 16:
break;
@@ -265,9 +270,9 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
string++;
}
- if (to_integer_op) {
+ if (base == ACPI_ANY_BASE) {
/*
- * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
+ * Base equal to ACPI_ANY_BASE means 'Either decimal or hex'.
* We need to determine if it is decimal or hexadecimal.
*/
if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
@@ -284,7 +289,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
/* Any string left? Check that '0x' is not followed by white space. */
if (!(*string) || isspace((int)*string) || *string == '\t') {
- if (to_integer_op) {
+ if (base == ACPI_ANY_BASE) {
goto error_exit;
} else {
goto all_done;
@@ -292,10 +297,11 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
}
/*
- * Perform a 32-bit or 64-bit conversion, depending upon the current
- * execution mode of the interpreter
+ * Perform a 32-bit or 64-bit conversion, depending upon the input
+ * byte width
*/
- dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+ dividend = (max_integer_byte_width <= ACPI_MAX32_BYTE_WIDTH) ?
+ ACPI_UINT32_MAX : ACPI_UINT64_MAX;
/* Main loop: convert the string to a 32- or 64-bit integer */
@@ -323,7 +329,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
}
if (term) {
- if (to_integer_op) {
+ if (base == ACPI_ANY_BASE) {
goto error_exit;
} else {
break;
@@ -338,12 +344,13 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
valid_digits++;
- if (sign_of0x
- && ((valid_digits > 16)
- || ((valid_digits > 8) && mode32))) {
+ if (sign_of0x && ((valid_digits > 16) ||
+ ((valid_digits > 8)
+ && (max_integer_byte_width <=
+ ACPI_MAX32_BYTE_WIDTH)))) {
/*
* This is to_integer operation case.
- * No any restrictions for string-to-integer conversion,
+ * No restrictions for string-to-integer conversion,
* see ACPI spec.
*/
goto error_exit;
@@ -355,7 +362,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
&quotient, NULL);
if (return_value > quotient) {
- if (to_integer_op) {
+ if (base == ACPI_ANY_BASE) {
goto error_exit;
} else {
break;
@@ -378,7 +385,8 @@ all_done:
return_ACPI_STATUS(AE_OK);
error_exit:
- /* Base was set/validated above */
+
+ /* Base was set/validated above (10 or 16) */
if (base == 10) {
return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
@@ -388,8 +396,7 @@ error_exit:
}
#ifdef _OBSOLETE_FUNCTIONS
-/* TBD: use version in ACPICA main code base? */
-/* DONE: 01/2016 */
+/* Removed: 01/2016 */
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index edad3f043..72b9a062b 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -51,11 +51,11 @@ ACPI_MODULE_NAME("utobject")
/* Local prototypes */
static acpi_status
acpi_ut_get_simple_object_size(union acpi_operand_object *obj,
- acpi_size * obj_length);
+ acpi_size *obj_length);
static acpi_status
acpi_ut_get_package_object_size(union acpi_operand_object *obj,
- acpi_size * obj_length);
+ acpi_size *obj_length);
static acpi_status
acpi_ut_get_element_length(u8 object_type,
@@ -177,7 +177,7 @@ union acpi_operand_object *acpi_ut_create_package_object(u32 count)
* Create the element array. Count+1 allows the array to be null
* terminated.
*/
- package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size) count +
+ package_elements = ACPI_ALLOCATE_ZEROED(((acpi_size)count +
1) * sizeof(void *));
if (!package_elements) {
ACPI_FREE(package_desc);
@@ -454,7 +454,7 @@ void acpi_ut_delete_object_desc(union acpi_operand_object *object)
static acpi_status
acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
- acpi_size * obj_length)
+ acpi_size *obj_length)
{
acpi_size length;
acpi_size size;
@@ -495,12 +495,12 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
switch (internal_object->common.type) {
case ACPI_TYPE_STRING:
- length += (acpi_size) internal_object->string.length + 1;
+ length += (acpi_size)internal_object->string.length + 1;
break;
case ACPI_TYPE_BUFFER:
- length += (acpi_size) internal_object->buffer.length;
+ length += (acpi_size)internal_object->buffer.length;
break;
case ACPI_TYPE_INTEGER:
@@ -640,7 +640,7 @@ acpi_ut_get_element_length(u8 object_type,
static acpi_status
acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
- acpi_size * obj_length)
+ acpi_size *obj_length)
{
acpi_status status;
struct acpi_pkg_info info;
@@ -665,7 +665,7 @@ acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
*/
info.length +=
ACPI_ROUND_UP_TO_NATIVE_WORD(sizeof(union acpi_object)) *
- (acpi_size) info.num_packages;
+ (acpi_size)info.num_packages;
/* Return the total package length */
@@ -689,7 +689,7 @@ acpi_ut_get_package_object_size(union acpi_operand_object *internal_object,
acpi_status
acpi_ut_get_object_size(union acpi_operand_object *internal_object,
- acpi_size * obj_length)
+ acpi_size *obj_length)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index b5cfe577f..3f5fed670 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -150,7 +150,7 @@ acpi_status acpi_ut_initialize_interfaces(void)
i < (ACPI_ARRAY_LENGTH(acpi_default_supported_interfaces) - 1);
i++) {
acpi_default_supported_interfaces[i].next =
- &acpi_default_supported_interfaces[(acpi_size) i + 1];
+ &acpi_default_supported_interfaces[(acpi_size)i + 1];
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
@@ -397,7 +397,7 @@ struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name)
*
******************************************************************************/
-acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state)
+acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state)
{
union acpi_operand_object *string_desc;
union acpi_operand_object *return_desc;
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 813520ab8..3cd573c5f 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -61,7 +61,7 @@ ACPI_MODULE_NAME("utownerid")
* when the method exits or the table is unloaded.
*
******************************************************************************/
-acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
+acpi_status acpi_ut_allocate_owner_id(acpi_owner_id *owner_id)
{
u32 i;
u32 j;
@@ -122,7 +122,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
* permanently allocated (prevents +1 overflow)
*/
*owner_id =
- (acpi_owner_id) ((k + 1) + ACPI_MUL_32(j));
+ (acpi_owner_id)((k + 1) + ACPI_MUL_32(j));
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
"Allocated OwnerId: %2.2X\n",
@@ -167,7 +167,7 @@ exit:
*
******************************************************************************/
-void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
+void acpi_ut_release_owner_id(acpi_owner_id *owner_id_ptr)
{
acpi_owner_id owner_id = *owner_id_ptr;
acpi_status status;
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 8c218ad78..dd084cf52 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -67,11 +67,6 @@ static char *acpi_ut_format_number(char *string,
static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper);
-/* Module globals */
-
-static const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
-static const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
-
/*******************************************************************************
*
* FUNCTION: acpi_ut_bound_string_length
@@ -269,9 +264,9 @@ static char *acpi_ut_format_number(char *string,
sign = '\0';
if (type & ACPI_FORMAT_SIGN) {
- if ((s64) number < 0) {
+ if ((s64)number < 0) {
sign = '-';
- number = -(s64) number;
+ number = -(s64)number;
width--;
} else if (type & ACPI_FORMAT_SIGN_PLUS) {
sign = '+';
@@ -409,7 +404,7 @@ acpi_ut_vsnprintf(char *string,
width = -1;
if (isdigit((int)*format)) {
format = acpi_ut_scan_number(format, &number);
- width = (s32) number;
+ width = (s32)number;
} else if (*format == '*') {
++format;
width = va_arg(args, int);
@@ -426,7 +421,7 @@ acpi_ut_vsnprintf(char *string,
++format;
if (isdigit((int)*format)) {
format = acpi_ut_scan_number(format, &number);
- precision = (s32) number;
+ precision = (s32)number;
} else if (*format == '*') {
++format;
precision = va_arg(args, int);
@@ -555,17 +550,17 @@ acpi_ut_vsnprintf(char *string,
if (qualifier == 'L') {
number = va_arg(args, u64);
if (type & ACPI_FORMAT_SIGN) {
- number = (s64) number;
+ number = (s64)number;
}
} else if (qualifier == 'l') {
number = va_arg(args, unsigned long);
if (type & ACPI_FORMAT_SIGN) {
- number = (s32) number;
+ number = (s32)number;
}
} else if (qualifier == 'h') {
number = (u16)va_arg(args, int);
if (type & ACPI_FORMAT_SIGN) {
- number = (s16) number;
+ number = (s16)number;
}
} else {
number = va_arg(args, unsigned int);
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 0b005728d..288913a0e 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -130,7 +130,7 @@ void acpi_ut_print_string(char *string, u16 max_length)
} else {
/* All others will be Hex escapes */
- acpi_os_printf("\\x%2.2X", (s32) string[i]);
+ acpi_os_printf("\\x%2.2X", (s32)string[i]);
}
break;
}
@@ -145,73 +145,6 @@ void acpi_ut_print_string(char *string, u16 max_length)
/*******************************************************************************
*
- * FUNCTION: acpi_ut_valid_acpi_char
- *
- * PARAMETERS: char - The character to be examined
- * position - Byte position (0-3)
- *
- * RETURN: TRUE if the character is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI character. Must be one of:
- * 1) Upper case alpha
- * 2) numeric
- * 3) underscore
- *
- * We allow a '!' as the last character because of the ASF! table
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_char(char character, u32 position)
-{
-
- if (!((character >= 'A' && character <= 'Z') ||
- (character >= '0' && character <= '9') || (character == '_'))) {
-
- /* Allow a '!' in the last position */
-
- if (character == '!' && position == 3) {
- return (TRUE);
- }
-
- return (FALSE);
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_valid_acpi_name
- *
- * PARAMETERS: name - The name to be examined. Does not have to
- * be NULL terminated string.
- *
- * RETURN: TRUE if the name is valid, FALSE otherwise
- *
- * DESCRIPTION: Check for a valid ACPI name. Each character must be one of:
- * 1) Upper case alpha
- * 2) numeric
- * 3) underscore
- *
- ******************************************************************************/
-
-u8 acpi_ut_valid_acpi_name(char *name)
-{
- u32 i;
-
- ACPI_FUNCTION_ENTRY();
-
- for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (!acpi_ut_valid_acpi_char(name[i], i)) {
- return (FALSE);
- }
- }
-
- return (TRUE);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_repair_name
*
* PARAMETERS: name - The ACPI name to be repaired
@@ -253,7 +186,7 @@ void acpi_ut_repair_name(char *name)
/* Check each character in the name */
for (i = 0; i < ACPI_NAME_SIZE; i++) {
- if (acpi_ut_valid_acpi_char(name[i], i)) {
+ if (acpi_ut_valid_name_char(name[i], i)) {
continue;
}
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 60c406a8e..0df07dfa5 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -90,7 +90,7 @@ acpi_ut_remove_allocation(struct acpi_debug_mem_block *address,
******************************************************************************/
acpi_status
-acpi_ut_create_list(char *list_name,
+acpi_ut_create_list(const char *list_name,
u16 object_size, struct acpi_memory_list **return_cache)
{
struct acpi_memory_list *cache;
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 68d4673f6..d9e6aac7d 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -127,7 +127,7 @@ ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
* and the value of out_buffer is undefined.
*
******************************************************************************/
-acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
+acpi_status acpi_get_system_info(struct acpi_buffer *out_buffer)
{
struct acpi_system_info *info_ptr;
acpi_status status;
@@ -483,7 +483,7 @@ ACPI_EXPORT_SYMBOL(acpi_check_address_range)
******************************************************************************/
acpi_status
acpi_decode_pld_buffer(u8 *in_buffer,
- acpi_size length, struct acpi_pld_info ** return_buffer)
+ acpi_size length, struct acpi_pld_info **return_buffer)
{
struct acpi_pld_info *pld_info;
u32 *buffer = ACPI_CAST_PTR(u32, in_buffer);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b719ab309..ab234791a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1316,7 +1316,7 @@ static int __init acpi_battery_init(void)
static void __exit acpi_battery_exit(void)
{
- async_synchronize_cookie(async_cookie);
+ async_synchronize_cookie(async_cookie + 1);
acpi_bus_unregister_driver(&acpi_battery_driver);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_unlock_battery_dir(acpi_battery_dir);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 96809cd99..bdc67bad6 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -3,7 +3,7 @@
*
* Check to see if the given machine has a known bad ACPI BIOS
* or if the BIOS is too old.
- * Check given machine against acpi_osi_dmi_table[].
+ * Check given machine against acpi_rev_dmi_table[].
*
* Copyright (C) 2004 Len Brown <len.brown@intel.com>
* Copyright (C) 2002 Andy Grover <andrew.grover@intel.com>
@@ -47,7 +47,7 @@ struct acpi_blacklist_item {
u32 is_critical_error;
};
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
+static struct dmi_system_id acpi_rev_dmi_table[] __initdata;
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
@@ -128,36 +128,12 @@ int __init acpi_blacklisted(void)
}
}
- dmi_check_system(acpi_osi_dmi_table);
+ (void)early_acpi_osi_init();
+ dmi_check_system(acpi_rev_dmi_table);
return blacklisted;
}
#ifdef CONFIG_DMI
-static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
-{
- acpi_dmi_osi_linux(1, d); /* enable */
- return 0;
-}
-static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
- acpi_osi_setup("!Windows 2006");
- acpi_osi_setup("!Windows 2006 SP1");
- acpi_osi_setup("!Windows 2006 SP2");
- return 0;
-}
-static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
- acpi_osi_setup("!Windows 2009");
- return 0;
-}
-static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
- acpi_osi_setup("!Windows 2012");
- return 0;
-}
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
{
@@ -168,169 +144,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
}
#endif
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Fujitsu Siemens",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
- DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
- },
- },
- {
- /*
- * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
- * driver (e.g. nouveau) when user press brightness hotkey.
- * Currently, nouveau driver didn't do the job and it causes there
- * have a infinite while loop in DSDT when user press hotkey.
- * We add MSI GX723's dmi information to this table for workaround
- * this issue.
- * Will remove MSI GX723 from the table after nouveau grows support.
- */
- .callback = dmi_disable_osi_vista,
- .ident = "MSI GX723",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
- DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Sony VGN-NS10J_S",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Sony VGN-SR290J",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "VGN-NS50B_L",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "VGN-SR19XN",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR19XN"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Toshiba Satellite L355",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
- },
- },
- {
- .callback = dmi_disable_osi_win7,
- .ident = "ASUS K50IJ",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Toshiba P305D",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
- },
- },
- {
- .callback = dmi_disable_osi_vista,
- .ident = "Toshiba NB100",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
- DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
- },
- },
-
- /*
- * The wireless hotkey does not work on those machines when
- * returning true for _OSI("Windows 2012")
- */
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 7737",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 7537",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 5437",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 3437",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Vostro 3446",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Dell Vostro 3546",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
- },
- },
-
- /*
- * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
- * Linux ignores it, except for the machines enumerated below.
- */
-
- /*
- * Without this this EEEpc exports a non working WMI interface, with
- * this it exports a working "good old" eeepc_laptop interface, fixing
- * both brightness control, and rfkill not working.
- */
- {
- .callback = dmi_enable_osi_linux,
- .ident = "Asus EEE PC 1015PX",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
- },
- },
-
+static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
/*
* DELL XPS 13 (2015) switches sound between HDA and I2S
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c068c829b..262ca31b8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -925,11 +925,13 @@ void __init acpi_early_init(void)
goto error0;
}
- status = acpi_load_tables();
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to load the System Description Tables\n");
- goto error0;
+ if (acpi_gbl_group_module_level_code) {
+ status = acpi_load_tables();
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to load the System Description Tables\n");
+ goto error0;
+ }
}
#ifdef CONFIG_X86
@@ -995,17 +997,10 @@ static int __init acpi_bus_init(void)
acpi_os_initialize1();
- status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
- if (ACPI_FAILURE(status)) {
- printk(KERN_ERR PREFIX
- "Unable to start the ACPI Interpreter\n");
- goto error1;
- }
-
/*
* ACPI 2.0 requires the EC driver to be loaded and work before
- * the EC device is found in the namespace (i.e. before acpi_initialize_objects()
- * is called).
+ * the EC device is found in the namespace (i.e. before
+ * acpi_load_tables() is called).
*
* This is accomplished by looking for the ECDT table, and getting
* the EC parameters out of that.
@@ -1013,6 +1008,22 @@ static int __init acpi_bus_init(void)
status = acpi_ec_ecdt_probe();
/* Ignore result. Not having an ECDT is not fatal. */
+ if (!acpi_gbl_group_module_level_code) {
+ status = acpi_load_tables();
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to load the System Description Tables\n");
+ goto error1;
+ }
+ }
+
+ status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX
+ "Unable to start the ACPI Interpreter\n");
+ goto error1;
+ }
+
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
@@ -1040,7 +1051,7 @@ static int __init acpi_bus_init(void)
* Maybe EC region is required at bus_scan/acpi_get_devices. So it
* is necessary to enable it as early as possible.
*/
- acpi_boot_ec_enable();
+ acpi_ec_dsdt_probe();
printk(KERN_INFO PREFIX "Interpreter enabled\n");
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index b9afb47db..7b2c48fde 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -35,7 +35,7 @@ static ssize_t acpi_object_path(acpi_handle handle, char *buf)
if (result)
return result;
- result = sprintf(buf, "%s\n", (char*)path.pointer);
+ result = sprintf(buf, "%s\n", (char *)path.pointer);
kfree(path.pointer);
return result;
}
@@ -333,7 +333,8 @@ int acpi_device_modalias(struct device *dev, char *buf, int size)
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
+acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
@@ -397,7 +398,8 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
static ssize_t
-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
+acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
@@ -467,12 +469,27 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return -EIO;
return sprintf(buf, "%llu\n", sun);
}
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+static ssize_t
+acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ struct acpi_device *acpi_dev = to_acpi_device(dev);
+ acpi_status status;
+ unsigned long long hrv;
+
+ status = acpi_evaluate_integer(acpi_dev->handle, "_HRV", NULL, &hrv);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return sprintf(buf, "%llu\n", hrv);
+}
+static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL);
+
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -481,7 +498,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
- return -ENODEV;
+ return -EIO;
return sprintf(buf, "%llu\n", sta);
}
@@ -541,16 +558,22 @@ int acpi_device_setup_files(struct acpi_device *dev)
goto end;
}
+ if (acpi_has_method(dev->handle, "_HRV")) {
+ result = device_create_file(&dev->dev, &dev_attr_hrv);
+ if (result)
+ goto end;
+ }
+
if (acpi_has_method(dev->handle, "_STA")) {
result = device_create_file(&dev->dev, &dev_attr_status);
if (result)
goto end;
}
- /*
- * If device has _EJ0, 'eject' file is created that is used to trigger
- * hot-removal function from userland.
- */
+ /*
+ * If device has _EJ0, 'eject' file is created that is used to trigger
+ * hot-removal function from userland.
+ */
if (acpi_has_method(dev->handle, "_EJ0")) {
result = device_create_file(&dev->dev, &dev_attr_eject);
if (result)
@@ -604,6 +627,9 @@ void acpi_device_remove_files(struct acpi_device *dev)
if (acpi_has_method(dev->handle, "_SUN"))
device_remove_file(&dev->dev, &dev_attr_sun);
+ if (acpi_has_method(dev->handle, "_HRV"))
+ device_remove_file(&dev->dev, &dev_attr_hrv);
+
if (dev->pnp.unique_id)
device_remove_file(&dev->dev, &dev_attr_uid);
if (dev->pnp.type.bus_address)
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index b420fb466..290d6f5be 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -105,8 +105,8 @@ enum ec_command {
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
- EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
- * OpReg are installed */
+ EC_FLAGS_GPE_HANDLER_INSTALLED, /* GPE handler installed */
+ EC_FLAGS_EC_HANDLER_INSTALLED, /* OpReg handler installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
@@ -175,10 +175,9 @@ static void acpi_ec_event_processor(struct work_struct *work);
struct acpi_ec *boot_ec, *first_ec;
EXPORT_SYMBOL(first_ec);
-static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
-static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
+static int EC_FLAGS_CORRECT_ECDT; /* Needs ECDT port address correction */
/* --------------------------------------------------------------------------
* Logging/Debugging
@@ -367,7 +366,8 @@ static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
static void acpi_ec_submit_request(struct acpi_ec *ec)
{
ec->reference_count++;
- if (ec->reference_count == 1)
+ if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
+ ec->reference_count == 1)
acpi_ec_enable_gpe(ec, true);
}
@@ -376,7 +376,8 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
bool flushed = false;
ec->reference_count--;
- if (ec->reference_count == 0)
+ if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags) &&
+ ec->reference_count == 0)
acpi_ec_disable_gpe(ec, true);
flushed = acpi_ec_flushed(ec);
if (flushed)
@@ -1287,52 +1288,75 @@ static int ec_install_handlers(struct acpi_ec *ec)
{
acpi_status status;
- if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
- return 0;
- status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
acpi_ec_start(ec, false);
- status = acpi_install_address_space_handler(ec->handle,
- ACPI_ADR_SPACE_EC,
- &acpi_ec_space_handler,
- NULL, ec);
- if (ACPI_FAILURE(status)) {
- if (status == AE_NOT_FOUND) {
- /*
- * Maybe OS fails in evaluating the _REG object.
- * The AE_NOT_FOUND error will be ignored and OS
- * continue to initialize EC.
- */
- pr_err("Fail in evaluating the _REG object"
- " of EC device. Broken bios is suspected.\n");
- } else {
- acpi_ec_stop(ec, false);
- acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler);
- return -ENODEV;
+
+ if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ status = acpi_install_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler,
+ NULL, ec);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ /*
+ * Maybe OS fails in evaluating the _REG
+ * object. The AE_NOT_FOUND error will be
+ * ignored and OS * continue to initialize
+ * EC.
+ */
+ pr_err("Fail in evaluating the _REG object"
+ " of EC device. Broken bios is suspected.\n");
+ } else {
+ acpi_ec_stop(ec, false);
+ return -ENODEV;
+ }
+ }
+ set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+ }
+
+ if (!test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
+ status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler, ec);
+ /* This is not fatal as we can poll EC events */
+ if (ACPI_SUCCESS(status)) {
+ set_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+ if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
+ ec->reference_count >= 1)
+ acpi_ec_enable_gpe(ec, true);
}
}
- set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
return 0;
}
static void ec_remove_handlers(struct acpi_ec *ec)
{
- if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
- return;
+ if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
+ if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
+ ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
+ pr_err("failed to remove space handler\n");
+ clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
+ }
+
+ /*
+ * Stops handling the EC transactions after removing the operation
+ * region handler. This is required because _REG(DISCONNECT)
+ * invoked during the removal can result in new EC transactions.
+ *
+ * Flushes the EC requests and thus disables the GPE before
+ * removing the GPE handler. This is required by the current ACPICA
+ * GPE core. ACPICA GPE core will automatically disable a GPE when
+ * it is indicated but there is no way to handle it. So the drivers
+ * must disable the GPEs prior to removing the GPE handlers.
+ */
acpi_ec_stop(ec, false);
- if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
- ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
- pr_err("failed to remove space handler\n");
- if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
- &acpi_ec_gpe_handler)))
- pr_err("failed to remove gpe handler\n");
- clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
+
+ if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
+ if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
+ &acpi_ec_gpe_handler)))
+ pr_err("failed to remove gpe handler\n");
+ clear_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags);
+ }
}
static int acpi_ec_add(struct acpi_device *device)
@@ -1344,11 +1368,12 @@ static int acpi_ec_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
/* Check for boot EC */
- if (boot_ec &&
- (boot_ec->handle == device->handle ||
- boot_ec->handle == ACPI_ROOT_OBJECT)) {
+ if (boot_ec) {
ec = boot_ec;
boot_ec = NULL;
+ ec_remove_handlers(ec);
+ if (first_ec == ec)
+ first_ec = NULL;
} else {
ec = make_acpi_ec();
if (!ec)
@@ -1432,34 +1457,35 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
return AE_OK;
}
-int __init acpi_boot_ec_enable(void)
-{
- if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
- return 0;
- if (!ec_install_handlers(boot_ec)) {
- first_ec = boot_ec;
- return 0;
- }
- return -EFAULT;
-}
-
static const struct acpi_device_id ec_device_ids[] = {
{"PNP0C09", 0},
{"", 0},
};
-/* Some BIOS do not survive early DSDT scan, skip it */
-static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
+int __init acpi_ec_dsdt_probe(void)
{
- EC_FLAGS_SKIP_DSDT_SCAN = 1;
- return 0;
-}
+ acpi_status status;
-/* ASUStek often supplies us with broken ECDT, validate it */
-static int ec_validate_ecdt(const struct dmi_system_id *id)
-{
- EC_FLAGS_VALIDATE_ECDT = 1;
- return 0;
+ if (boot_ec)
+ return 0;
+
+ /*
+ * Finding EC from DSDT if there is no ECDT EC available. When this
+ * function is invoked, ACPI tables have been fully loaded, we can
+ * walk namespace now.
+ */
+ boot_ec = make_acpi_ec();
+ if (!boot_ec)
+ return -ENOMEM;
+ status = acpi_get_devices(ec_device_ids[0].id,
+ ec_parse_device, boot_ec, NULL);
+ if (ACPI_FAILURE(status) || !boot_ec->handle)
+ return -ENODEV;
+ if (!ec_install_handlers(boot_ec)) {
+ first_ec = boot_ec;
+ return 0;
+ }
+ return -EFAULT;
}
#if 0
@@ -1503,30 +1529,29 @@ static int ec_clear_on_resume(const struct dmi_system_id *id)
return 0;
}
+static int ec_correct_ecdt(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing ECDT address correction.\n");
+ EC_FLAGS_CORRECT_ECDT = 1;
+ return 0;
+}
+
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
- ec_skip_dsdt_scan, "Compal JFL92", {
- DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
- DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
+ ec_correct_ecdt, "Asus L4R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "1008.006"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),
+ DMI_MATCH(DMI_BOARD_NAME, "L4R") }, NULL},
{
- ec_validate_ecdt, "MSI MS-171F", {
+ ec_correct_ecdt, "Asus M6R", {
+ DMI_MATCH(DMI_BIOS_VERSION, "0207"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M6R"),
+ DMI_MATCH(DMI_BOARD_NAME, "M6R") }, NULL},
+ {
+ ec_correct_ecdt, "MSI MS-171F", {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
{
- ec_validate_ecdt, "ASUS hardware", {
- DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
- {
- ec_validate_ecdt, "ASUS hardware", {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
- {
- ec_skip_dsdt_scan, "HP Folio 13", {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
- {
- ec_validate_ecdt, "ASUS hardware", {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
- {
ec_clear_on_resume, "Samsung hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
@@ -1534,8 +1559,8 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
int __init acpi_ec_ecdt_probe(void)
{
+ int ret = 0;
acpi_status status;
- struct acpi_ec *saved_ec = NULL;
struct acpi_table_ecdt *ecdt_ptr;
boot_ec = make_acpi_ec();
@@ -1547,67 +1572,45 @@ int __init acpi_ec_ecdt_probe(void)
dmi_check_system(ec_dmi_table);
status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **)&ecdt_ptr);
- if (ACPI_SUCCESS(status)) {
- pr_info("EC description table is found, configuring boot EC\n");
- boot_ec->command_addr = ecdt_ptr->control.address;
- boot_ec->data_addr = ecdt_ptr->data.address;
- boot_ec->gpe = ecdt_ptr->gpe;
- boot_ec->handle = ACPI_ROOT_OBJECT;
- acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
- &boot_ec->handle);
- /* Don't trust ECDT, which comes from ASUSTek */
- if (!EC_FLAGS_VALIDATE_ECDT)
- goto install;
- saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
- if (!saved_ec)
- return -ENOMEM;
- /* fall through */
+ if (ACPI_FAILURE(status)) {
+ ret = -ENODEV;
+ goto error;
}
- if (EC_FLAGS_SKIP_DSDT_SCAN) {
- kfree(saved_ec);
- return -ENODEV;
+ if (!ecdt_ptr->control.address || !ecdt_ptr->data.address) {
+ /*
+ * Asus X50GL:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=11880
+ */
+ ret = -ENODEV;
+ goto error;
}
- /* This workaround is needed only on some broken machines,
- * which require early EC, but fail to provide ECDT */
- pr_debug("Look up EC in DSDT\n");
- status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
- boot_ec, NULL);
- /* Check that acpi_get_devices actually find something */
- if (ACPI_FAILURE(status) || !boot_ec->handle)
- goto error;
- if (saved_ec) {
- /* try to find good ECDT from ASUSTek */
- if (saved_ec->command_addr != boot_ec->command_addr ||
- saved_ec->data_addr != boot_ec->data_addr ||
- saved_ec->gpe != boot_ec->gpe ||
- saved_ec->handle != boot_ec->handle)
- pr_info("ASUSTek keeps feeding us with broken "
- "ECDT tables, which are very hard to workaround. "
- "Trying to use DSDT EC info instead. Please send "
- "output of acpidump to linux-acpi@vger.kernel.org\n");
- kfree(saved_ec);
- saved_ec = NULL;
+ pr_info("EC description table is found, configuring boot EC\n");
+ if (EC_FLAGS_CORRECT_ECDT) {
+ /*
+ * Asus L4R, Asus M6R
+ * https://bugzilla.kernel.org/show_bug.cgi?id=9399
+ * MSI MS-171F
+ * https://bugzilla.kernel.org/show_bug.cgi?id=12461
+ */
+ boot_ec->command_addr = ecdt_ptr->data.address;
+ boot_ec->data_addr = ecdt_ptr->control.address;
} else {
- /* We really need to limit this workaround, the only ASUS,
- * which needs it, has fake EC._INI method, so use it as flag.
- * Keep boot_ec struct as it will be needed soon.
- */
- if (!dmi_name_in_vendors("ASUS") ||
- !acpi_has_method(boot_ec->handle, "_INI"))
- return -ENODEV;
+ boot_ec->command_addr = ecdt_ptr->control.address;
+ boot_ec->data_addr = ecdt_ptr->data.address;
}
-install:
- if (!ec_install_handlers(boot_ec)) {
+ boot_ec->gpe = ecdt_ptr->gpe;
+ boot_ec->handle = ACPI_ROOT_OBJECT;
+ ret = ec_install_handlers(boot_ec);
+ if (!ret)
first_ec = boot_ec;
- return 0;
- }
error:
- kfree(boot_ec);
- kfree(saved_ec);
- boot_ec = NULL;
- return -ENODEV;
+ if (ret) {
+ kfree(boot_ec);
+ boot_ec = NULL;
+ }
+ return ret;
}
static int param_set_event_clearing(const char *val, struct kernel_param *kp)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
new file mode 100644
index 000000000..46f060356
--- /dev/null
+++ b/drivers/acpi/evged.c
@@ -0,0 +1,154 @@
+/*
+ * Generic Event Device for ACPI.
+ *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Generic Event Device allows platforms to handle interrupts in ACPI
+ * ASL statements. It follows very similar to _EVT method approach
+ * from GPIO events. All interrupts are listed in _CRS and the handler
+ * is written in _EVT method. Here is an example.
+ *
+ * Device (GED0)
+ * {
+ *
+ * Name (_HID, "ACPI0013")
+ * Name (_UID, 0)
+ * Method (_CRS, 0x0, Serialized)
+ * {
+ * Name (RBUF, ResourceTemplate ()
+ * {
+ * Interrupt(ResourceConsumer, Edge, ActiveHigh, Shared, , , )
+ * {123}
+ * }
+ * })
+ *
+ * Method (_EVT, 1) {
+ * if (Lequal(123, Arg0))
+ * {
+ * }
+ * }
+ * }
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+#define MODULE_NAME "acpi-ged"
+
+struct acpi_ged_event {
+ struct list_head node;
+ struct device *dev;
+ unsigned int gsi;
+ unsigned int irq;
+ acpi_handle handle;
+};
+
+static irqreturn_t acpi_ged_irq_handler(int irq, void *data)
+{
+ struct acpi_ged_event *event = data;
+ acpi_status acpi_ret;
+
+ acpi_ret = acpi_execute_simple_method(event->handle, NULL, event->gsi);
+ if (ACPI_FAILURE(acpi_ret))
+ dev_err_once(event->dev, "IRQ method execution failed\n");
+
+ return IRQ_HANDLED;
+}
+
+static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
+ void *context)
+{
+ struct acpi_ged_event *event;
+ unsigned int irq;
+ unsigned int gsi;
+ unsigned int irqflags = IRQF_ONESHOT;
+ struct device *dev = context;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ acpi_handle evt_handle;
+ struct resource r;
+ struct acpi_resource_irq *p = &ares->data.irq;
+ struct acpi_resource_extended_irq *pext = &ares->data.extended_irq;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_END_TAG)
+ return AE_OK;
+
+ if (!acpi_dev_resource_interrupt(ares, 0, &r)) {
+ dev_err(dev, "unable to parse IRQ resource\n");
+ return AE_ERROR;
+ }
+ if (ares->type == ACPI_RESOURCE_TYPE_IRQ)
+ gsi = p->interrupts[0];
+ else
+ gsi = pext->interrupts[0];
+
+ irq = r.start;
+
+ if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) {
+ dev_err(dev, "cannot locate _EVT method\n");
+ return AE_ERROR;
+ }
+
+ dev_info(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq);
+
+ event = devm_kzalloc(dev, sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return AE_ERROR;
+
+ event->gsi = gsi;
+ event->dev = dev;
+ event->irq = irq;
+ event->handle = evt_handle;
+
+ if (r.flags & IORESOURCE_IRQ_SHAREABLE)
+ irqflags |= IRQF_SHARED;
+
+ if (devm_request_threaded_irq(dev, irq, NULL, acpi_ged_irq_handler,
+ irqflags, "ACPI:Ged", event)) {
+ dev_err(dev, "failed to setup event handler for irq %u\n", irq);
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static int ged_probe(struct platform_device *pdev)
+{
+ acpi_status acpi_ret;
+
+ acpi_ret = acpi_walk_resources(ACPI_HANDLE(&pdev->dev), "_CRS",
+ acpi_ged_request_interrupt, &pdev->dev);
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(&pdev->dev, "unable to parse the _CRS record\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct acpi_device_id ged_acpi_ids[] = {
+ {"ACPI0013"},
+ {},
+};
+
+static struct platform_driver ged_driver = {
+ .probe = ged_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .acpi_match_table = ACPI_PTR(ged_acpi_ids),
+ },
+};
+builtin_platform_driver(ged_driver);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7c188472d..27cc7feab 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -20,7 +20,8 @@
#define PREFIX "ACPI: "
-void acpi_initrd_initialize_tables(void);
+int early_acpi_osi_init(void);
+int acpi_osi_init(void);
acpi_status acpi_os_initialize1(void);
void init_acpi_device_notify(void);
int acpi_scan_init(void);
@@ -180,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
-int acpi_boot_ec_enable(void);
+int acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
void acpi_ec_unblock_transactions_early(void);
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 63cc9dbe4..1f0e06065 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -45,6 +45,11 @@ module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scrub_overflow_abort,
"Number of times we overflow ARS results before abort");
+static bool disable_vendor_specific;
+module_param(disable_vendor_specific, bool, S_IRUGO);
+MODULE_PARM_DESC(disable_vendor_specific,
+ "Limit commands to the publicly specified set\n");
+
static struct workqueue_struct *nfit_wq;
struct nfit_table_prev {
@@ -171,33 +176,46 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
unsigned int buf_len, int *cmd_rc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
- const struct nd_cmd_desc *desc = NULL;
union acpi_object in_obj, in_buf, *out_obj;
+ const struct nd_cmd_desc *desc = NULL;
struct device *dev = acpi_desc->dev;
+ struct nd_cmd_pkg *call_pkg = NULL;
const char *cmd_name, *dimm_name;
- unsigned long dsm_mask;
+ unsigned long cmd_mask, dsm_mask;
acpi_handle handle;
+ unsigned int func;
const u8 *uuid;
u32 offset;
int rc, i;
+ func = cmd;
+ if (cmd == ND_CMD_CALL) {
+ call_pkg = buf;
+ func = call_pkg->nd_command;
+ }
+
if (nvdimm) {
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_device *adev = nfit_mem->adev;
if (!adev)
return -ENOTTY;
+ if (call_pkg && nfit_mem->family != call_pkg->nd_family)
+ return -ENOTTY;
+
dimm_name = nvdimm_name(nvdimm);
cmd_name = nvdimm_cmd_name(cmd);
+ cmd_mask = nvdimm_cmd_mask(nvdimm);
dsm_mask = nfit_mem->dsm_mask;
desc = nd_cmd_dimm_desc(cmd);
- uuid = to_nfit_uuid(NFIT_DEV_DIMM);
+ uuid = to_nfit_uuid(nfit_mem->family);
handle = adev->handle;
} else {
struct acpi_device *adev = to_acpi_dev(acpi_desc);
cmd_name = nvdimm_bus_cmd_name(cmd);
- dsm_mask = nd_desc->dsm_mask;
+ cmd_mask = nd_desc->cmd_mask;
+ dsm_mask = cmd_mask;
desc = nd_cmd_bus_desc(cmd);
uuid = to_nfit_uuid(NFIT_DEV_BUS);
handle = adev->handle;
@@ -207,7 +225,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
return -ENOTTY;
- if (!test_bit(cmd, &dsm_mask))
+ if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
return -ENOTTY;
in_obj.type = ACPI_TYPE_PACKAGE;
@@ -222,21 +240,44 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
i, buf);
+ if (call_pkg) {
+ /* skip over package wrapper */
+ in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
+ in_buf.buffer.length = call_pkg->nd_size_in;
+ }
+
if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) {
- dev_dbg(dev, "%s:%s cmd: %s input length: %d\n", __func__,
- dimm_name, cmd_name, in_buf.buffer.length);
- print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4,
- 4, in_buf.buffer.pointer, min_t(u32, 128,
- in_buf.buffer.length), true);
+ dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
+ __func__, dimm_name, cmd, func,
+ in_buf.buffer.length);
+ print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4,
+ in_buf.buffer.pointer,
+ min_t(u32, 256, in_buf.buffer.length), true);
}
- out_obj = acpi_evaluate_dsm(handle, uuid, 1, cmd, &in_obj);
+ out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj);
if (!out_obj) {
dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
cmd_name);
return -EINVAL;
}
+ if (call_pkg) {
+ call_pkg->nd_fw_size = out_obj->buffer.length;
+ memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
+ out_obj->buffer.pointer,
+ min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
+
+ ACPI_FREE(out_obj);
+ /*
+ * Need to support FW function w/o known size in advance.
+ * Caller can determine required size based upon nd_fw_size.
+ * If we return an error (like elsewhere) then caller wouldn't
+ * be able to rely upon data returned to make calculation.
+ */
+ return 0;
+ }
+
if (out_obj->package.type != ACPI_TYPE_BUFFER) {
dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
__func__, dimm_name, cmd_name, out_obj->type);
@@ -658,6 +699,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
if (!nfit_mem)
return -ENOMEM;
INIT_LIST_HEAD(&nfit_mem->list);
+ nfit_mem->acpi_desc = acpi_desc;
list_add(&nfit_mem->list, &acpi_desc->dimms);
}
@@ -819,7 +861,7 @@ static ssize_t vendor_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "%#x\n", dcr->vendor_id);
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
}
static DEVICE_ATTR_RO(vendor);
@@ -828,7 +870,7 @@ static ssize_t rev_id_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "%#x\n", dcr->revision_id);
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
}
static DEVICE_ATTR_RO(rev_id);
@@ -837,28 +879,142 @@ static ssize_t device_show(struct device *dev,
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "%#x\n", dcr->device_id);
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
}
static DEVICE_ATTR_RO(device);
+static ssize_t subsystem_vendor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
+}
+static DEVICE_ATTR_RO(subsystem_vendor);
+
+static ssize_t subsystem_rev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n",
+ be16_to_cpu(dcr->subsystem_revision_id));
+}
+static DEVICE_ATTR_RO(subsystem_rev_id);
+
+static ssize_t subsystem_device_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
+}
+static DEVICE_ATTR_RO(subsystem_device);
+
+static int num_nvdimm_formats(struct nvdimm *nvdimm)
+{
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ int formats = 0;
+
+ if (nfit_mem->memdev_pmem)
+ formats++;
+ if (nfit_mem->memdev_bdw)
+ formats++;
+ return formats;
+}
+
static ssize_t format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "%#x\n", dcr->code);
+ return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
}
static DEVICE_ATTR_RO(format);
+static ssize_t format1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 handle;
+ ssize_t rc = -ENXIO;
+ struct nfit_mem *nfit_mem;
+ struct nfit_memdev *nfit_memdev;
+ struct acpi_nfit_desc *acpi_desc;
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ nfit_mem = nvdimm_provider_data(nvdimm);
+ acpi_desc = nfit_mem->acpi_desc;
+ handle = to_nfit_memdev(dev)->device_handle;
+
+ /* assumes DIMMs have at most 2 published interface codes */
+ mutex_lock(&acpi_desc->init_mutex);
+ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
+ struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
+ struct nfit_dcr *nfit_dcr;
+
+ if (memdev->device_handle != handle)
+ continue;
+
+ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+ if (nfit_dcr->dcr->region_index != memdev->region_index)
+ continue;
+ if (nfit_dcr->dcr->code == dcr->code)
+ continue;
+ rc = sprintf(buf, "0x%04x\n",
+ le16_to_cpu(nfit_dcr->dcr->code));
+ break;
+ }
+ if (rc != ENXIO)
+ break;
+ }
+ mutex_unlock(&acpi_desc->init_mutex);
+ return rc;
+}
+static DEVICE_ATTR_RO(format1);
+
+static ssize_t formats_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+
+ return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
+}
+static DEVICE_ATTR_RO(formats);
+
static ssize_t serial_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
- return sprintf(buf, "%#x\n", dcr->serial_number);
+ return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
}
static DEVICE_ATTR_RO(serial);
+static ssize_t family_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ if (nfit_mem->family < 0)
+ return -ENXIO;
+ return sprintf(buf, "%d\n", nfit_mem->family);
+}
+static DEVICE_ATTR_RO(family);
+
+static ssize_t dsm_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ if (nfit_mem->family < 0)
+ return -ENXIO;
+ return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
+}
+static DEVICE_ATTR_RO(dsm_mask);
+
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -873,15 +1029,41 @@ static ssize_t flags_show(struct device *dev,
}
static DEVICE_ATTR_RO(flags);
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
+
+ if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
+ return sprintf(buf, "%04x-%02x-%04x-%08x\n",
+ be16_to_cpu(dcr->vendor_id),
+ dcr->manufacturing_location,
+ be16_to_cpu(dcr->manufacturing_date),
+ be32_to_cpu(dcr->serial_number));
+ else
+ return sprintf(buf, "%04x-%08x\n",
+ be16_to_cpu(dcr->vendor_id),
+ be32_to_cpu(dcr->serial_number));
+}
+static DEVICE_ATTR_RO(id);
+
static struct attribute *acpi_nfit_dimm_attributes[] = {
&dev_attr_handle.attr,
&dev_attr_phys_id.attr,
&dev_attr_vendor.attr,
&dev_attr_device.attr,
+ &dev_attr_rev_id.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_subsystem_device.attr,
+ &dev_attr_subsystem_rev_id.attr,
&dev_attr_format.attr,
+ &dev_attr_formats.attr,
+ &dev_attr_format1.attr,
&dev_attr_serial.attr,
- &dev_attr_rev_id.attr,
&dev_attr_flags.attr,
+ &dev_attr_id.attr,
+ &dev_attr_family.attr,
+ &dev_attr_dsm_mask.attr,
NULL,
};
@@ -889,11 +1071,13 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
- if (to_nfit_dcr(dev))
- return a->mode;
- else
+ if (!to_nfit_dcr(dev))
return 0;
+ if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
+ return 0;
+ return a->mode;
}
static struct attribute_group acpi_nfit_dimm_attribute_group = {
@@ -926,10 +1110,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
{
struct acpi_device *adev, *adev_dimm;
struct device *dev = acpi_desc->dev;
- const u8 *uuid = to_nfit_uuid(NFIT_DEV_DIMM);
+ unsigned long dsm_mask;
+ const u8 *uuid;
int i;
- nfit_mem->dsm_mask = acpi_desc->dimm_dsm_force_en;
+ /* nfit test assumes 1:1 relationship between commands and dsms */
+ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
+ nfit_mem->family = NVDIMM_FAMILY_INTEL;
adev = to_acpi_dev(acpi_desc);
if (!adev)
return 0;
@@ -942,7 +1129,36 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
return force_enable_dimms ? 0 : -ENODEV;
}
- for (i = ND_CMD_SMART; i <= ND_CMD_VENDOR; i++)
+ /*
+ * Until standardization materializes we need to consider up to 3
+ * different command sets. Note, that checking for function0 (bit0)
+ * tells us if any commands are reachable through this uuid.
+ */
+ for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
+ if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
+ break;
+
+ /* limit the supported commands to those that are publicly documented */
+ nfit_mem->family = i;
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
+ dsm_mask = 0x3fe;
+ if (disable_vendor_specific)
+ dsm_mask &= ~(1 << ND_CMD_VENDOR);
+ } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1)
+ dsm_mask = 0x1c3c76;
+ else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
+ dsm_mask = 0x1fe;
+ if (disable_vendor_specific)
+ dsm_mask &= ~(1 << 8);
+ } else {
+ dev_dbg(dev, "unknown dimm command family\n");
+ nfit_mem->family = -1;
+ /* DSMs are optional, continue loading the driver... */
+ return 0;
+ }
+
+ uuid = to_nfit_uuid(nfit_mem->family);
+ for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
set_bit(i, &nfit_mem->dsm_mask);
@@ -955,8 +1171,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
int dimm_count = 0;
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
+ unsigned long flags = 0, cmd_mask;
struct nvdimm *nvdimm;
- unsigned long flags = 0;
u32 device_handle;
u16 mem_flags;
int rc;
@@ -979,9 +1195,18 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
if (rc)
continue;
+ /*
+ * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
+ * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
+ * userspace interface.
+ */
+ cmd_mask = 1UL << ND_CMD_CALL;
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+ cmd_mask |= nfit_mem->dsm_mask;
+
nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
acpi_nfit_dimm_attribute_groups,
- flags, &nfit_mem->dsm_mask);
+ flags, cmd_mask);
if (!nvdimm)
return -ENOMEM;
@@ -1010,14 +1235,14 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
struct acpi_device *adev;
int i;
- nd_desc->dsm_mask = acpi_desc->bus_dsm_force_en;
+ nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
adev = to_acpi_dev(acpi_desc);
if (!adev)
return;
for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i))
- set_bit(i, &nd_desc->dsm_mask);
+ set_bit(i, &nd_desc->cmd_mask);
}
static ssize_t range_index_show(struct device *dev,
@@ -2309,7 +2534,7 @@ static int acpi_nfit_add(struct acpi_device *adev)
acpi_size sz;
int rc;
- status = acpi_get_table_with_size("NFIT", 0, &tbl, &sz);
+ status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz);
if (ACPI_FAILURE(status)) {
/* This is ok, we could have an nvdimm hotplugged later */
dev_dbg(dev, "failed to find NFIT at startup\n");
@@ -2466,6 +2691,8 @@ static __init int nfit_init(void)
acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]);
acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]);
acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]);
+ acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
+ acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
nfit_wq = create_singlethread_workqueue("nfit");
if (!nfit_wq)
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index c75576b2d..02b9ea1e8 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -21,13 +21,25 @@
#include <linux/acpi.h>
#include <acpi/acuuid.h>
+/* ACPI 6.1 */
#define UUID_NFIT_BUS "2f10e7a4-9e91-11e4-89d3-123b93f75cba"
+
+/* http://pmem.io/documents/NVDIMM_DSM_Interface_Example.pdf */
#define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66"
+
+/* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */
+#define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6"
+#define UUID_NFIT_DIMM_N_HPE2 "5008664b-b758-41a0-a03c-27c2f2d04f7e"
+
#define ACPI_NFIT_MEM_FAILED_MASK (ACPI_NFIT_MEM_SAVE_FAILED \
| ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \
| ACPI_NFIT_MEM_NOT_ARMED)
enum nfit_uuids {
+ /* for simplicity alias the uuid index with the family id */
+ NFIT_DEV_DIMM = NVDIMM_FAMILY_INTEL,
+ NFIT_DEV_DIMM_N_HPE1 = NVDIMM_FAMILY_HPE1,
+ NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2,
NFIT_SPA_VOLATILE,
NFIT_SPA_PM,
NFIT_SPA_DCR,
@@ -37,15 +49,16 @@ enum nfit_uuids {
NFIT_SPA_PDISK,
NFIT_SPA_PCD,
NFIT_DEV_BUS,
- NFIT_DEV_DIMM,
NFIT_UUID_MAX,
};
-enum nfit_fic {
- NFIT_FIC_BYTE = 0x101, /* byte-addressable energy backed */
- NFIT_FIC_BLK = 0x201, /* block-addressable non-energy backed */
- NFIT_FIC_BYTEN = 0x301, /* byte-addressable non-energy backed */
-};
+/*
+ * Region format interface codes are stored with the interface as the
+ * LSB and the function as the MSB.
+ */
+#define NFIT_FIC_BYTE cpu_to_le16(0x101) /* byte-addressable energy backed */
+#define NFIT_FIC_BLK cpu_to_le16(0x201) /* block-addressable non-energy backed */
+#define NFIT_FIC_BYTEN cpu_to_le16(0x301) /* byte-addressable non-energy backed */
enum {
NFIT_BLK_READ_FLUSH = 1,
@@ -109,7 +122,9 @@ struct nfit_mem {
struct nfit_flush *nfit_flush;
struct list_head list;
struct acpi_device *adev;
+ struct acpi_nfit_desc *acpi_desc;
unsigned long dsm_mask;
+ int family;
};
struct acpi_nfit_desc {
@@ -132,8 +147,8 @@ struct acpi_nfit_desc {
size_t ars_status_size;
struct work_struct work;
unsigned int cancel:1;
- unsigned long dimm_dsm_force_en;
- unsigned long bus_dsm_force_en;
+ unsigned long dimm_cmd_force_en;
+ unsigned long bus_cmd_force_en;
int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
void *iobuf, u64 len, int rw);
};
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 72b6e9ef0..d176e0ece 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -327,10 +327,18 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
- acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, 0);
- acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, 0);
+ struct acpi_subtable_proc srat_proc[2];
+
+ memset(srat_proc, 0, sizeof(srat_proc));
+ srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
+ srat_proc[0].handler = acpi_parse_processor_affinity;
+ srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
+ srat_proc[1].handler = acpi_parse_x2apic_affinity;
+
+ acpi_table_parse_entries_array(ACPI_SIG_SRAT,
+ sizeof(struct acpi_table_srat),
+ srat_proc, ARRAY_SIZE(srat_proc), 0);
+
cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
new file mode 100644
index 000000000..849f9d224
--- /dev/null
+++ b/drivers/acpi/osi.c
@@ -0,0 +1,522 @@
+/*
+ * osi.c - _OSI implementation
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Lv Zheng <lv.zheng@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+/* Uncomment next line to get verbose printout */
+/* #define DEBUG */
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+
+#include "internal.h"
+
+
+#define OSI_STRING_LENGTH_MAX 64
+#define OSI_STRING_ENTRIES_MAX 16
+
+struct acpi_osi_entry {
+ char string[OSI_STRING_LENGTH_MAX];
+ bool enable;
+};
+
+static struct acpi_osi_config {
+ u8 default_disabling;
+ unsigned int linux_enable:1;
+ unsigned int linux_dmi:1;
+ unsigned int linux_cmdline:1;
+ unsigned int darwin_enable:1;
+ unsigned int darwin_dmi:1;
+ unsigned int darwin_cmdline:1;
+} osi_config;
+
+static struct acpi_osi_config osi_config;
+static struct acpi_osi_entry
+osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
+ {"Module Device", true},
+ {"Processor Device", true},
+ {"3.0 _SCP Extensions", true},
+ {"Processor Aggregator Device", true},
+};
+
+static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+{
+ if (!strcmp("Linux", interface)) {
+ pr_notice_once(FW_BUG
+ "BIOS _OSI(Linux) query %s%s\n",
+ osi_config.linux_enable ? "honored" : "ignored",
+ osi_config.linux_cmdline ? " via cmdline" :
+ osi_config.linux_dmi ? " via DMI" : "");
+ }
+ if (!strcmp("Darwin", interface)) {
+ pr_notice_once(
+ "BIOS _OSI(Darwin) query %s%s\n",
+ osi_config.darwin_enable ? "honored" : "ignored",
+ osi_config.darwin_cmdline ? " via cmdline" :
+ osi_config.darwin_dmi ? " via DMI" : "");
+ }
+
+ return supported;
+}
+
+void __init acpi_osi_setup(char *str)
+{
+ struct acpi_osi_entry *osi;
+ bool enable = true;
+ int i;
+
+ if (!acpi_gbl_create_osi_method)
+ return;
+
+ if (str == NULL || *str == '\0') {
+ pr_info("_OSI method disabled\n");
+ acpi_gbl_create_osi_method = FALSE;
+ return;
+ }
+
+ if (*str == '!') {
+ str++;
+ if (*str == '\0') {
+ /* Do not override acpi_osi=!* */
+ if (!osi_config.default_disabling)
+ osi_config.default_disabling =
+ ACPI_DISABLE_ALL_VENDOR_STRINGS;
+ return;
+ } else if (*str == '*') {
+ osi_config.default_disabling = ACPI_DISABLE_ALL_STRINGS;
+ for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ osi = &osi_setup_entries[i];
+ osi->enable = false;
+ }
+ return;
+ } else if (*str == '!') {
+ osi_config.default_disabling = 0;
+ return;
+ }
+ enable = false;
+ }
+
+ for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ osi = &osi_setup_entries[i];
+ if (!strcmp(osi->string, str)) {
+ osi->enable = enable;
+ break;
+ } else if (osi->string[0] == '\0') {
+ osi->enable = enable;
+ strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
+ break;
+ }
+ }
+}
+
+static void __init __acpi_osi_setup_darwin(bool enable)
+{
+ osi_config.darwin_enable = !!enable;
+ if (enable) {
+ acpi_osi_setup("!");
+ acpi_osi_setup("Darwin");
+ } else {
+ acpi_osi_setup("!!");
+ acpi_osi_setup("!Darwin");
+ }
+}
+
+static void __init acpi_osi_setup_darwin(bool enable)
+{
+ /* Override acpi_osi_dmi_blacklisted() */
+ osi_config.darwin_dmi = 0;
+ osi_config.darwin_cmdline = 1;
+ __acpi_osi_setup_darwin(enable);
+}
+
+/*
+ * The story of _OSI(Linux)
+ *
+ * From pre-history through Linux-2.6.22, Linux responded TRUE upon a BIOS
+ * OSI(Linux) query.
+ *
+ * Unfortunately, reference BIOS writers got wind of this and put
+ * OSI(Linux) in their example code, quickly exposing this string as
+ * ill-conceived and opening the door to an un-bounded number of BIOS
+ * incompatibilities.
+ *
+ * For example, OSI(Linux) was used on resume to re-POST a video card on
+ * one system, because Linux at that time could not do a speedy restore in
+ * its native driver. But then upon gaining quick native restore
+ * capability, Linux has no way to tell the BIOS to skip the time-consuming
+ * POST -- putting Linux at a permanent performance disadvantage. On
+ * another system, the BIOS writer used OSI(Linux) to infer native OS
+ * support for IPMI! On other systems, OSI(Linux) simply got in the way of
+ * Linux claiming to be compatible with other operating systems, exposing
+ * BIOS issues such as skipped device initialization.
+ *
+ * So "Linux" turned out to be a really poor chose of OSI string, and from
+ * Linux-2.6.23 onward we respond FALSE.
+ *
+ * BIOS writers should NOT query _OSI(Linux) on future systems. Linux will
+ * complain on the console when it sees it, and return FALSE. To get Linux
+ * to return TRUE for your system will require a kernel source update to
+ * add a DMI entry, or boot with "acpi_osi=Linux"
+ */
+static void __init __acpi_osi_setup_linux(bool enable)
+{
+ osi_config.linux_enable = !!enable;
+ if (enable)
+ acpi_osi_setup("Linux");
+ else
+ acpi_osi_setup("!Linux");
+}
+
+static void __init acpi_osi_setup_linux(bool enable)
+{
+ /* Override acpi_osi_dmi_blacklisted() */
+ osi_config.linux_dmi = 0;
+ osi_config.linux_cmdline = 1;
+ __acpi_osi_setup_linux(enable);
+}
+
+/*
+ * Modify the list of "OS Interfaces" reported to BIOS via _OSI
+ *
+ * empty string disables _OSI
+ * string starting with '!' disables that string
+ * otherwise string is added to list, augmenting built-in strings
+ */
+static void __init acpi_osi_setup_late(void)
+{
+ struct acpi_osi_entry *osi;
+ char *str;
+ int i;
+ acpi_status status;
+
+ if (osi_config.default_disabling) {
+ status = acpi_update_interfaces(osi_config.default_disabling);
+ if (ACPI_SUCCESS(status))
+ pr_info("Disabled all _OSI OS vendors%s\n",
+ osi_config.default_disabling ==
+ ACPI_DISABLE_ALL_STRINGS ?
+ " and feature groups" : "");
+ }
+
+ for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ osi = &osi_setup_entries[i];
+ str = osi->string;
+ if (*str == '\0')
+ break;
+ if (osi->enable) {
+ status = acpi_install_interface(str);
+ if (ACPI_SUCCESS(status))
+ pr_info("Added _OSI(%s)\n", str);
+ } else {
+ status = acpi_remove_interface(str);
+ if (ACPI_SUCCESS(status))
+ pr_info("Deleted _OSI(%s)\n", str);
+ }
+ }
+}
+
+static int __init osi_setup(char *str)
+{
+ if (str && !strcmp("Linux", str))
+ acpi_osi_setup_linux(true);
+ else if (str && !strcmp("!Linux", str))
+ acpi_osi_setup_linux(false);
+ else if (str && !strcmp("Darwin", str))
+ acpi_osi_setup_darwin(true);
+ else if (str && !strcmp("!Darwin", str))
+ acpi_osi_setup_darwin(false);
+ else
+ acpi_osi_setup(str);
+
+ return 1;
+}
+__setup("acpi_osi=", osi_setup);
+
+bool acpi_osi_is_win8(void)
+{
+ return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
+}
+EXPORT_SYMBOL(acpi_osi_is_win8);
+
+static void __init acpi_osi_dmi_darwin(bool enable,
+ const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected to setup _OSI(\"Darwin\"): %s\n", d->ident);
+ osi_config.darwin_dmi = 1;
+ __acpi_osi_setup_darwin(enable);
+}
+
+void __init acpi_osi_dmi_linux(bool enable, const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected to setup _OSI(\"Linux\"): %s\n", d->ident);
+ osi_config.linux_dmi = 1;
+ __acpi_osi_setup_linux(enable);
+}
+
+static int __init dmi_enable_osi_darwin(const struct dmi_system_id *d)
+{
+ acpi_osi_dmi_darwin(true, d);
+
+ return 0;
+}
+
+static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
+{
+ acpi_osi_dmi_linux(true, d);
+
+ return 0;
+}
+
+static int __init dmi_disable_osi_vista(const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2006");
+ acpi_osi_setup("!Windows 2006 SP1");
+ acpi_osi_setup("!Windows 2006 SP2");
+
+ return 0;
+}
+
+static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2009");
+
+ return 0;
+}
+
+static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
+{
+ pr_notice("DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2012");
+
+ return 0;
+}
+
+/*
+ * Linux default _OSI response behavior is determined by this DMI table.
+ *
+ * Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden
+ * by acpi_osi=!Linux/acpi_osi=!Darwin command line options.
+ */
+static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Fujitsu Siemens",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Mobile V5505"),
+ },
+ },
+ {
+ /*
+ * There have a NVIF method in MSI GX723 DSDT need call by Nvidia
+ * driver (e.g. nouveau) when user press brightness hotkey.
+ * Currently, nouveau driver didn't do the job and it causes there
+ * have a infinite while loop in DSDT when user press hotkey.
+ * We add MSI GX723's dmi information to this table for workaround
+ * this issue.
+ * Will remove MSI GX723 from the table after nouveau grows support.
+ */
+ .callback = dmi_disable_osi_vista,
+ .ident = "MSI GX723",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GX723"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-NS10J_S",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS10J_S"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Sony VGN-SR290J",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR290J"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "VGN-NS50B_L",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NS50B_L"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "VGN-SR19XN",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR19XN"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba Satellite L355",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win7,
+ .ident = "ASUS K50IJ",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba P305D",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_vista,
+ .ident = "Toshiba NB100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "NB100"),
+ },
+ },
+
+ /*
+ * The wireless hotkey does not work on those machines when
+ * returning true for _OSI("Windows 2012")
+ */
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 7737",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 7537",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 5437",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 3437",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Vostro 3446",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Vostro 3546",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
+ },
+ },
+
+ /*
+ * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ * Linux ignores it, except for the machines enumerated below.
+ */
+
+ /*
+ * Without this this EEEpc exports a non working WMI interface, with
+ * this it exports a working "good old" eeepc_laptop interface, fixing
+ * both brightness control, and rfkill not working.
+ */
+ {
+ .callback = dmi_enable_osi_linux,
+ .ident = "Asus EEE PC 1015PX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
+ },
+ },
+
+ /*
+ * Enable _OSI("Darwin") for all apple platforms.
+ */
+ {
+ .callback = dmi_enable_osi_darwin,
+ .ident = "Apple hardware",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ },
+ },
+ {
+ .callback = dmi_enable_osi_darwin,
+ .ident = "Apple hardware",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+ },
+ },
+ {}
+};
+
+static __init void acpi_osi_dmi_blacklisted(void)
+{
+ dmi_check_system(acpi_osi_dmi_table);
+}
+
+int __init early_acpi_osi_init(void)
+{
+ acpi_osi_dmi_blacklisted();
+
+ return 0;
+}
+
+int __init acpi_osi_init(void)
+{
+ acpi_install_interface_handler(acpi_osi_handler);
+ acpi_osi_setup_late();
+
+ return 0;
+}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f03677588..b108f1358 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -56,10 +56,6 @@ struct acpi_os_dpc {
struct work_struct work;
};
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
-#include CONFIG_ACPI_CUSTOM_DSDT_FILE
-#endif
-
#ifdef ENABLE_DEBUGGER
#include <linux/kdb.h>
@@ -96,72 +92,6 @@ struct acpi_ioremap {
static LIST_HEAD(acpi_ioremaps);
static DEFINE_MUTEX(acpi_ioremap_lock);
-static void __init acpi_osi_setup_late(void);
-
-/*
- * The story of _OSI(Linux)
- *
- * From pre-history through Linux-2.6.22,
- * Linux responded TRUE upon a BIOS OSI(Linux) query.
- *
- * Unfortunately, reference BIOS writers got wind of this
- * and put OSI(Linux) in their example code, quickly exposing
- * this string as ill-conceived and opening the door to
- * an un-bounded number of BIOS incompatibilities.
- *
- * For example, OSI(Linux) was used on resume to re-POST a
- * video card on one system, because Linux at that time
- * could not do a speedy restore in its native driver.
- * But then upon gaining quick native restore capability,
- * Linux has no way to tell the BIOS to skip the time-consuming
- * POST -- putting Linux at a permanent performance disadvantage.
- * On another system, the BIOS writer used OSI(Linux)
- * to infer native OS support for IPMI! On other systems,
- * OSI(Linux) simply got in the way of Linux claiming to
- * be compatible with other operating systems, exposing
- * BIOS issues such as skipped device initialization.
- *
- * So "Linux" turned out to be a really poor chose of
- * OSI string, and from Linux-2.6.23 onward we respond FALSE.
- *
- * BIOS writers should NOT query _OSI(Linux) on future systems.
- * Linux will complain on the console when it sees it, and return FALSE.
- * To get Linux to return TRUE for your system will require
- * a kernel source update to add a DMI entry,
- * or boot with "acpi_osi=Linux"
- */
-
-static struct osi_linux {
- unsigned int enable:1;
- unsigned int dmi:1;
- unsigned int cmdline:1;
- u8 default_disabling;
-} osi_linux = {0, 0, 0, 0};
-
-static u32 acpi_osi_handler(acpi_string interface, u32 supported)
-{
- if (!strcmp("Linux", interface)) {
-
- printk_once(KERN_NOTICE FW_BUG PREFIX
- "BIOS _OSI(Linux) query %s%s\n",
- osi_linux.enable ? "honored" : "ignored",
- osi_linux.cmdline ? " via cmdline" :
- osi_linux.dmi ? " via DMI" : "");
- }
-
- if (!strcmp("Darwin", interface)) {
- /*
- * Apple firmware will behave poorly if it receives positive
- * answers to "Darwin" and any other OS. Respond positively
- * to Darwin and then disable all other vendor strings.
- */
- acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
- supported = ACPI_UINT32_MAX;
- }
-
- return supported;
-}
-
static void __init acpi_request_region (struct acpi_generic_address *gas,
unsigned int length, char *desc)
{
@@ -582,7 +512,7 @@ static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
- char **new_val)
+ acpi_string *new_val)
{
if (!init_val || !new_val)
return AE_BAD_PARAMETER;
@@ -602,280 +532,6 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
-static void acpi_table_taint(struct acpi_table_header *table)
-{
- pr_warn(PREFIX
- "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
- table->signature, table->oem_table_id);
- add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
-}
-
-#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
-#include <linux/earlycpio.h>
-#include <linux/memblock.h>
-
-static u64 acpi_tables_addr;
-static int all_tables_size;
-
-/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
-static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
-{
- u8 sum = 0;
- u8 *end = buffer + length;
-
- while (buffer < end)
- sum = (u8) (sum + *(buffer++));
- return sum;
-}
-
-/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
-static const char * const table_sigs[] = {
- ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
- ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
- ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
- ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
- ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
- ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
- ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
- ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
- ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
-
-#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
-
-#define ACPI_OVERRIDE_TABLES 64
-static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
-static DECLARE_BITMAP(acpi_initrd_installed, ACPI_OVERRIDE_TABLES);
-
-#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
-
-void __init acpi_initrd_override(void *data, size_t size)
-{
- int sig, no, table_nr = 0, total_offset = 0;
- long offset = 0;
- struct acpi_table_header *table;
- char cpio_path[32] = "kernel/firmware/acpi/";
- struct cpio_data file;
-
- if (data == NULL || size == 0)
- return;
-
- for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
- file = find_cpio_data(cpio_path, data, size, &offset);
- if (!file.data)
- break;
-
- data += offset;
- size -= offset;
-
- if (file.size < sizeof(struct acpi_table_header)) {
- pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
- cpio_path, file.name);
- continue;
- }
-
- table = file.data;
-
- for (sig = 0; table_sigs[sig]; sig++)
- if (!memcmp(table->signature, table_sigs[sig], 4))
- break;
-
- if (!table_sigs[sig]) {
- pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
- cpio_path, file.name);
- continue;
- }
- if (file.size != table->length) {
- pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
- cpio_path, file.name);
- continue;
- }
- if (acpi_table_checksum(file.data, table->length)) {
- pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
- cpio_path, file.name);
- continue;
- }
-
- pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
- table->signature, cpio_path, file.name, table->length);
-
- all_tables_size += table->length;
- acpi_initrd_files[table_nr].data = file.data;
- acpi_initrd_files[table_nr].size = file.size;
- table_nr++;
- }
- if (table_nr == 0)
- return;
-
- acpi_tables_addr =
- memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
- all_tables_size, PAGE_SIZE);
- if (!acpi_tables_addr) {
- WARN_ON(1);
- return;
- }
- /*
- * Only calling e820_add_reserve does not work and the
- * tables are invalid (memory got used) later.
- * memblock_reserve works as expected and the tables won't get modified.
- * But it's not enough on X86 because ioremap will
- * complain later (used by acpi_os_map_memory) that the pages
- * that should get mapped are not marked "reserved".
- * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
- * works fine.
- */
- memblock_reserve(acpi_tables_addr, all_tables_size);
- arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
-
- /*
- * early_ioremap only can remap 256k one time. If we map all
- * tables one time, we will hit the limit. Need to map chunks
- * one by one during copying the same as that in relocate_initrd().
- */
- for (no = 0; no < table_nr; no++) {
- unsigned char *src_p = acpi_initrd_files[no].data;
- phys_addr_t size = acpi_initrd_files[no].size;
- phys_addr_t dest_addr = acpi_tables_addr + total_offset;
- phys_addr_t slop, clen;
- char *dest_p;
-
- total_offset += size;
-
- while (size) {
- slop = dest_addr & ~PAGE_MASK;
- clen = size;
- if (clen > MAP_CHUNK_SIZE - slop)
- clen = MAP_CHUNK_SIZE - slop;
- dest_p = early_ioremap(dest_addr & PAGE_MASK,
- clen + slop);
- memcpy(dest_p + slop, src_p, clen);
- early_iounmap(dest_p, clen + slop);
- src_p += clen;
- dest_addr += clen;
- size -= clen;
- }
- }
-}
-
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address *address, u32 *length)
-{
- int table_offset = 0;
- int table_index = 0;
- struct acpi_table_header *table;
- u32 table_length;
-
- *length = 0;
- *address = 0;
- if (!acpi_tables_addr)
- return AE_OK;
-
- while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
- table = acpi_os_map_memory(acpi_tables_addr + table_offset,
- ACPI_HEADER_SIZE);
- if (table_offset + table->length > all_tables_size) {
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- WARN_ON(1);
- return AE_OK;
- }
-
- table_length = table->length;
-
- /* Only override tables matched */
- if (test_bit(table_index, acpi_initrd_installed) ||
- memcmp(existing_table->signature, table->signature, 4) ||
- memcmp(table->oem_table_id, existing_table->oem_table_id,
- ACPI_OEM_TABLE_ID_SIZE)) {
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- goto next_table;
- }
-
- *length = table_length;
- *address = acpi_tables_addr + table_offset;
- acpi_table_taint(existing_table);
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- set_bit(table_index, acpi_initrd_installed);
- break;
-
-next_table:
- table_offset += table_length;
- table_index++;
- }
- return AE_OK;
-}
-
-void __init acpi_initrd_initialize_tables(void)
-{
- int table_offset = 0;
- int table_index = 0;
- u32 table_length;
- struct acpi_table_header *table;
-
- if (!acpi_tables_addr)
- return;
-
- while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
- table = acpi_os_map_memory(acpi_tables_addr + table_offset,
- ACPI_HEADER_SIZE);
- if (table_offset + table->length > all_tables_size) {
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- WARN_ON(1);
- return;
- }
-
- table_length = table->length;
-
- /* Skip RSDT/XSDT which should only be used for override */
- if (test_bit(table_index, acpi_initrd_installed) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- goto next_table;
- }
-
- acpi_table_taint(table);
- acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
- acpi_install_table(acpi_tables_addr + table_offset, TRUE);
- set_bit(table_index, acpi_initrd_installed);
-next_table:
- table_offset += table_length;
- table_index++;
- }
-}
-#else
-acpi_status
-acpi_os_physical_table_override(struct acpi_table_header *existing_table,
- acpi_physical_address *address,
- u32 *table_length)
-{
- *table_length = 0;
- *address = 0;
- return AE_OK;
-}
-
-void __init acpi_initrd_initialize_tables(void)
-{
-}
-#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
-
-acpi_status
-acpi_os_table_override(struct acpi_table_header *existing_table,
- struct acpi_table_header **new_table)
-{
- if (!existing_table || !new_table)
- return AE_BAD_PARAMETER;
-
- *new_table = NULL;
-
-#ifdef CONFIG_ACPI_CUSTOM_DSDT
- if (strncmp(existing_table->signature, "DSDT", 4) == 0)
- *new_table = (struct acpi_table_header *)AmlCode;
-#endif
- if (*new_table != NULL)
- acpi_table_taint(existing_table);
- return AE_OK;
-}
-
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
u32 handled;
@@ -1717,162 +1373,6 @@ static int __init acpi_os_name_setup(char *str)
__setup("acpi_os_name=", acpi_os_name_setup);
-#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
-#define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
-
-struct osi_setup_entry {
- char string[OSI_STRING_LENGTH_MAX];
- bool enable;
-};
-
-static struct osi_setup_entry
- osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
- {"Module Device", true},
- {"Processor Device", true},
- {"3.0 _SCP Extensions", true},
- {"Processor Aggregator Device", true},
-};
-
-void __init acpi_osi_setup(char *str)
-{
- struct osi_setup_entry *osi;
- bool enable = true;
- int i;
-
- if (!acpi_gbl_create_osi_method)
- return;
-
- if (str == NULL || *str == '\0') {
- printk(KERN_INFO PREFIX "_OSI method disabled\n");
- acpi_gbl_create_osi_method = FALSE;
- return;
- }
-
- if (*str == '!') {
- str++;
- if (*str == '\0') {
- /* Do not override acpi_osi=!* */
- if (!osi_linux.default_disabling)
- osi_linux.default_disabling =
- ACPI_DISABLE_ALL_VENDOR_STRINGS;
- return;
- } else if (*str == '*') {
- osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
- for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
- osi = &osi_setup_entries[i];
- osi->enable = false;
- }
- return;
- }
- enable = false;
- }
-
- for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
- osi = &osi_setup_entries[i];
- if (!strcmp(osi->string, str)) {
- osi->enable = enable;
- break;
- } else if (osi->string[0] == '\0') {
- osi->enable = enable;
- strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
- break;
- }
- }
-}
-
-static void __init set_osi_linux(unsigned int enable)
-{
- if (osi_linux.enable != enable)
- osi_linux.enable = enable;
-
- if (osi_linux.enable)
- acpi_osi_setup("Linux");
- else
- acpi_osi_setup("!Linux");
-
- return;
-}
-
-static void __init acpi_cmdline_osi_linux(unsigned int enable)
-{
- osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
- osi_linux.dmi = 0;
- set_osi_linux(enable);
-
- return;
-}
-
-void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
-{
- printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
-
- if (enable == -1)
- return;
-
- osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
- set_osi_linux(enable);
-
- return;
-}
-
-/*
- * Modify the list of "OS Interfaces" reported to BIOS via _OSI
- *
- * empty string disables _OSI
- * string starting with '!' disables that string
- * otherwise string is added to list, augmenting built-in strings
- */
-static void __init acpi_osi_setup_late(void)
-{
- struct osi_setup_entry *osi;
- char *str;
- int i;
- acpi_status status;
-
- if (osi_linux.default_disabling) {
- status = acpi_update_interfaces(osi_linux.default_disabling);
-
- if (ACPI_SUCCESS(status))
- printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
- osi_linux.default_disabling ==
- ACPI_DISABLE_ALL_STRINGS ?
- " and feature groups" : "");
- }
-
- for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
- osi = &osi_setup_entries[i];
- str = osi->string;
-
- if (*str == '\0')
- break;
- if (osi->enable) {
- status = acpi_install_interface(str);
-
- if (ACPI_SUCCESS(status))
- printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
- } else {
- status = acpi_remove_interface(str);
-
- if (ACPI_SUCCESS(status))
- printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
- }
- }
-}
-
-static int __init osi_setup(char *str)
-{
- if (str && !strcmp("Linux", str))
- acpi_cmdline_osi_linux(1);
- else if (str && !strcmp("!Linux", str))
- acpi_cmdline_osi_linux(0);
- else
- acpi_osi_setup(str);
-
- return 1;
-}
-
-__setup("acpi_osi=", osi_setup);
-
/*
* Disable the auto-serialization of named objects creation methods.
*
@@ -1992,12 +1492,6 @@ int acpi_resources_are_enforced(void)
}
EXPORT_SYMBOL(acpi_resources_are_enforced);
-bool acpi_osi_is_win8(void)
-{
- return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
-}
-EXPORT_SYMBOL(acpi_osi_is_win8);
-
/*
* Deallocate the memory for a spinlock.
*/
@@ -2163,8 +1657,7 @@ acpi_status __init acpi_os_initialize1(void)
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
- acpi_install_interface_handler(acpi_osi_handler);
- acpi_osi_setup_late();
+ acpi_osi_init();
return AE_OK;
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index ededa909d..c983bf733 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -36,6 +36,7 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/irq.h>
#include "internal.h"
@@ -437,17 +438,15 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
* enabled system.
*/
-#define ACPI_MAX_IRQS 256
-#define ACPI_MAX_ISA_IRQ 16
+#define ACPI_MAX_ISA_IRQS 16
-#define PIRQ_PENALTY_PCI_AVAILABLE (0)
#define PIRQ_PENALTY_PCI_POSSIBLE (16*16)
#define PIRQ_PENALTY_PCI_USING (16*16*16)
#define PIRQ_PENALTY_ISA_TYPICAL (16*16*16*16)
#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
-static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
+static int acpi_isa_irq_penalty[ACPI_MAX_ISA_IRQS] = {
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -457,9 +456,9 @@ static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ6 */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ7 parallel, spurious */
PIRQ_PENALTY_ISA_TYPICAL, /* IRQ8 rtc, sometimes */
- PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ9 PCI, often acpi */
- PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ10 PCI */
- PIRQ_PENALTY_PCI_AVAILABLE, /* IRQ11 PCI */
+ 0, /* IRQ9 PCI, often acpi */
+ 0, /* IRQ10 PCI */
+ 0, /* IRQ11 PCI */
PIRQ_PENALTY_ISA_USED, /* IRQ12 mouse */
PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
@@ -467,6 +466,57 @@ static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
/* >IRQ15 */
};
+static int acpi_irq_pci_sharing_penalty(int irq)
+{
+ struct acpi_pci_link *link;
+ int penalty = 0;
+ int i;
+
+ list_for_each_entry(link, &acpi_link_list, list) {
+ /*
+ * If a link is active, penalize its IRQ heavily
+ * so we try to choose a different IRQ.
+ */
+ if (link->irq.active && link->irq.active == irq)
+ penalty += PIRQ_PENALTY_PCI_USING;
+
+ /*
+ * penalize the IRQs PCI might use, but not as severely.
+ */
+ for (i = 0; i < link->irq.possible_count; i++)
+ if (link->irq.possible[i] == irq)
+ penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+ link->irq.possible_count;
+ }
+
+ return penalty;
+}
+
+static int acpi_irq_get_penalty(int irq)
+{
+ int penalty = 0;
+
+ /*
+ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
+ * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
+ * use for PCI IRQs.
+ */
+ if (irq == acpi_gbl_FADT.sci_interrupt) {
+ u32 type = irq_get_trigger_type(irq) & IRQ_TYPE_SENSE_MASK;
+
+ if (type != IRQ_TYPE_LEVEL_LOW)
+ penalty += PIRQ_PENALTY_ISA_ALWAYS;
+ else
+ penalty += PIRQ_PENALTY_PCI_USING;
+ }
+
+ if (irq < ACPI_MAX_ISA_IRQS)
+ return penalty + acpi_isa_irq_penalty[irq];
+
+ penalty += acpi_irq_pci_sharing_penalty(irq);
+ return penalty;
+}
+
int __init acpi_irq_penalty_init(void)
{
struct acpi_pci_link *link;
@@ -487,14 +537,15 @@ int __init acpi_irq_penalty_init(void)
link->irq.possible_count;
for (i = 0; i < link->irq.possible_count; i++) {
- if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
- acpi_irq_penalty[link->irq.
+ if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
+ acpi_isa_irq_penalty[link->irq.
possible[i]] +=
penalty;
}
- } else if (link->irq.active) {
- acpi_irq_penalty[link->irq.active] +=
+ } else if (link->irq.active &&
+ (link->irq.active < ACPI_MAX_ISA_IRQS)) {
+ acpi_isa_irq_penalty[link->irq.active] +=
PIRQ_PENALTY_PCI_POSSIBLE;
}
}
@@ -547,12 +598,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
* the use of IRQs 9, 10, 11, and >15.
*/
for (i = (link->irq.possible_count - 1); i >= 0; i--) {
- if (acpi_irq_penalty[irq] >
- acpi_irq_penalty[link->irq.possible[i]])
+ if (acpi_irq_get_penalty(irq) >
+ acpi_irq_get_penalty(link->irq.possible[i]))
irq = link->irq.possible[i];
}
}
- if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+ if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
"Try pci=noacpi or acpi=off\n",
acpi_device_name(link->device),
@@ -568,7 +619,6 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
acpi_device_bid(link->device));
return -ENODEV;
} else {
- acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -778,7 +828,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
}
/*
- * modify acpi_irq_penalty[] from cmdline
+ * modify acpi_isa_irq_penalty[] from cmdline
*/
static int __init acpi_irq_penalty_update(char *str, int used)
{
@@ -787,23 +837,24 @@ static int __init acpi_irq_penalty_update(char *str, int used)
for (i = 0; i < 16; i++) {
int retval;
int irq;
+ int new_penalty;
retval = get_option(&str, &irq);
if (!retval)
break; /* no number found */
- if (irq < 0)
- continue;
-
- if (irq >= ARRAY_SIZE(acpi_irq_penalty))
+ /* see if this is a ISA IRQ */
+ if ((irq < 0) || (irq >= ACPI_MAX_ISA_IRQS))
continue;
if (used)
- acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ new_penalty = acpi_irq_get_penalty(irq) +
+ PIRQ_PENALTY_ISA_USED;
else
- acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
+ new_penalty = 0;
+ acpi_isa_irq_penalty[irq] = new_penalty;
if (retval != 2) /* no next number */
break;
}
@@ -819,34 +870,15 @@ static int __init acpi_irq_penalty_update(char *str, int used)
*/
void acpi_penalize_isa_irq(int irq, int active)
{
- if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
- if (active)
- acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
- else
- acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
- }
+ if ((irq >= 0) && (irq < ARRAY_SIZE(acpi_isa_irq_penalty)))
+ acpi_isa_irq_penalty[irq] = acpi_irq_get_penalty(irq) +
+ (active ? PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
}
bool acpi_isa_irq_available(int irq)
{
- return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
- acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
-}
-
-/*
- * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
- * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
- * PCI IRQs.
- */
-void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
-{
- if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
- if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
- polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
- acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
- else
- acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
- }
+ return irq >= 0 && (irq >= ARRAY_SIZE(acpi_isa_irq_penalty) ||
+ acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
}
/*
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index f170d7463..c72e64893 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
if (!pr->flags.throttling)
return -ENODEV;
+ /*
+ * We don't care about error returns - we just try to mark
+ * these reserved so that nobody else is confused into thinking
+ * that this region might be unused..
+ *
+ * (In particular, allocating the IO range for Cardbus)
+ */
+ request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+
pr->throttling.state = 0;
duty_mask = pr->throttling.state_count - 1;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 2a8b59644..7a2e4d45b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -26,6 +26,11 @@
#include "internal.h"
#include "sleep.h"
+/*
+ * Some HW-full platforms do not have _S5, so they may need
+ * to leverage efi power off for a shutdown.
+ */
+bool acpi_no_s5;
static u8 sleep_states[ACPI_S_STATE_COUNT];
static void acpi_sleep_tts_switch(u32 acpi_state)
@@ -882,6 +887,8 @@ int __init acpi_sleep_init(void)
sleep_states[ACPI_STATE_S5] = 1;
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
+ } else {
+ acpi_no_s5 = true;
}
supported[0] = 0;
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0243d375c..4b3a9e27f 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -555,23 +555,22 @@ static void acpi_global_event_handler(u32 event_type, acpi_handle device,
static int get_status(u32 index, acpi_event_status *status,
acpi_handle *handle)
{
- int result = 0;
+ int result;
if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
- goto end;
+ return -EINVAL;
if (index < num_gpes) {
result = acpi_get_gpe_device(index, handle);
if (result) {
ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
"Invalid GPE 0x%x", index));
- goto end;
+ return result;
}
result = acpi_get_gpe_status(*handle, index, status);
} else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
result = acpi_get_event_status(index - num_gpes, status);
-end:
return result;
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f49c02442..a372f9eaa 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -32,8 +32,14 @@
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/bootmem.h>
+#include <linux/earlycpio.h>
+#include <linux/memblock.h>
#include "internal.h"
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+#include CONFIG_ACPI_CUSTOM_DSDT_FILE
+#endif
+
#define ACPI_MAX_TABLES 128
static char *mps_inti_flags_polarity[] = { "dfl", "high", "res", "low" };
@@ -433,6 +439,314 @@ static void __init check_multiple_madt(void)
return;
}
+static void acpi_table_taint(struct acpi_table_header *table)
+{
+ pr_warn("Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
+ table->signature, table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
+}
+
+#ifdef CONFIG_ACPI_TABLE_UPGRADE
+static u64 acpi_tables_addr;
+static int all_tables_size;
+
+/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
+static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+{
+ u8 sum = 0;
+ u8 *end = buffer + length;
+
+ while (buffer < end)
+ sum = (u8) (sum + *(buffer++));
+ return sum;
+}
+
+/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
+static const char * const table_sigs[] = {
+ ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
+ ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
+ ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
+ ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
+ ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
+ ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
+ ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
+ ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
+ ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
+
+#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
+
+#define NR_ACPI_INITRD_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[NR_ACPI_INITRD_TABLES];
+static DECLARE_BITMAP(acpi_initrd_installed, NR_ACPI_INITRD_TABLES);
+
+#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
+
+static void __init acpi_table_initrd_init(void *data, size_t size)
+{
+ int sig, no, table_nr = 0, total_offset = 0;
+ long offset = 0;
+ struct acpi_table_header *table;
+ char cpio_path[32] = "kernel/firmware/acpi/";
+ struct cpio_data file;
+
+ if (data == NULL || size == 0)
+ return;
+
+ for (no = 0; no < NR_ACPI_INITRD_TABLES; no++) {
+ file = find_cpio_data(cpio_path, data, size, &offset);
+ if (!file.data)
+ break;
+
+ data += offset;
+ size -= offset;
+
+ if (file.size < sizeof(struct acpi_table_header)) {
+ pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+
+ table = file.data;
+
+ for (sig = 0; table_sigs[sig]; sig++)
+ if (!memcmp(table->signature, table_sigs[sig], 4))
+ break;
+
+ if (!table_sigs[sig]) {
+ pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+ if (file.size != table->length) {
+ pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+ if (acpi_table_checksum(file.data, table->length)) {
+ pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+
+ pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
+ table->signature, cpio_path, file.name, table->length);
+
+ all_tables_size += table->length;
+ acpi_initrd_files[table_nr].data = file.data;
+ acpi_initrd_files[table_nr].size = file.size;
+ table_nr++;
+ }
+ if (table_nr == 0)
+ return;
+
+ acpi_tables_addr =
+ memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
+ all_tables_size, PAGE_SIZE);
+ if (!acpi_tables_addr) {
+ WARN_ON(1);
+ return;
+ }
+ /*
+ * Only calling e820_add_reserve does not work and the
+ * tables are invalid (memory got used) later.
+ * memblock_reserve works as expected and the tables won't get modified.
+ * But it's not enough on X86 because ioremap will
+ * complain later (used by acpi_os_map_memory) that the pages
+ * that should get mapped are not marked "reserved".
+ * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
+ * works fine.
+ */
+ memblock_reserve(acpi_tables_addr, all_tables_size);
+ arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+
+ /*
+ * early_ioremap only can remap 256k one time. If we map all
+ * tables one time, we will hit the limit. Need to map chunks
+ * one by one during copying the same as that in relocate_initrd().
+ */
+ for (no = 0; no < table_nr; no++) {
+ unsigned char *src_p = acpi_initrd_files[no].data;
+ phys_addr_t size = acpi_initrd_files[no].size;
+ phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+ phys_addr_t slop, clen;
+ char *dest_p;
+
+ total_offset += size;
+
+ while (size) {
+ slop = dest_addr & ~PAGE_MASK;
+ clen = size;
+ if (clen > MAP_CHUNK_SIZE - slop)
+ clen = MAP_CHUNK_SIZE - slop;
+ dest_p = early_ioremap(dest_addr & PAGE_MASK,
+ clen + slop);
+ memcpy(dest_p + slop, src_p, clen);
+ early_iounmap(dest_p, clen + slop);
+ src_p += clen;
+ dest_addr += clen;
+ size -= clen;
+ }
+ }
+}
+
+static acpi_status
+acpi_table_initrd_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address, u32 *length)
+{
+ int table_offset = 0;
+ int table_index = 0;
+ struct acpi_table_header *table;
+ u32 table_length;
+
+ *length = 0;
+ *address = 0;
+ if (!acpi_tables_addr)
+ return AE_OK;
+
+ while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return AE_OK;
+ }
+
+ table_length = table->length;
+
+ /* Only override tables matched */
+ if (memcmp(existing_table->signature, table->signature, 4) ||
+ memcmp(table->oem_id, existing_table->oem_id,
+ ACPI_OEM_ID_SIZE) ||
+ memcmp(table->oem_table_id, existing_table->oem_table_id,
+ ACPI_OEM_TABLE_ID_SIZE)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+ /*
+ * Mark the table to avoid being used in
+ * acpi_table_initrd_scan() and check the revision.
+ */
+ if (test_and_set_bit(table_index, acpi_initrd_installed) ||
+ existing_table->oem_revision >= table->oem_revision) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+
+ *length = table_length;
+ *address = acpi_tables_addr + table_offset;
+ pr_info("Table Upgrade: override [%4.4s-%6.6s-%8.8s]\n",
+ table->signature, table->oem_id,
+ table->oem_table_id);
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ break;
+
+next_table:
+ table_offset += table_length;
+ table_index++;
+ }
+ return AE_OK;
+}
+
+static void __init acpi_table_initrd_scan(void)
+{
+ int table_offset = 0;
+ int table_index = 0;
+ u32 table_length;
+ struct acpi_table_header *table;
+
+ if (!acpi_tables_addr)
+ return;
+
+ while (table_offset + ACPI_HEADER_SIZE <= all_tables_size) {
+ table = acpi_os_map_memory(acpi_tables_addr + table_offset,
+ ACPI_HEADER_SIZE);
+ if (table_offset + table->length > all_tables_size) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ WARN_ON(1);
+ return;
+ }
+
+ table_length = table->length;
+
+ /* Skip RSDT/XSDT which should only be used for override */
+ if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_RSDT) ||
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_XSDT)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+ /*
+ * Mark the table to avoid being used in
+ * acpi_table_initrd_override(). Though this is not possible
+ * because override is disabled in acpi_install_table().
+ */
+ if (test_and_set_bit(table_index, acpi_initrd_installed)) {
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ goto next_table;
+ }
+
+ pr_info("Table Upgrade: install [%4.4s-%6.6s-%8.8s]\n",
+ table->signature, table->oem_id,
+ table->oem_table_id);
+ acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
+ acpi_install_table(acpi_tables_addr + table_offset, TRUE);
+next_table:
+ table_offset += table_length;
+ table_index++;
+ }
+}
+#else
+static void __init acpi_table_initrd_init(void *data, size_t size)
+{
+}
+
+static acpi_status
+acpi_table_initrd_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address,
+ u32 *table_length)
+{
+ *table_length = 0;
+ *address = 0;
+ return AE_OK;
+}
+
+static void __init acpi_table_initrd_scan(void)
+{
+}
+#endif /* CONFIG_ACPI_TABLE_UPGRADE */
+
+acpi_status
+acpi_os_physical_table_override(struct acpi_table_header *existing_table,
+ acpi_physical_address *address,
+ u32 *table_length)
+{
+ return acpi_table_initrd_override(existing_table, address,
+ table_length);
+}
+
+acpi_status
+acpi_os_table_override(struct acpi_table_header *existing_table,
+ struct acpi_table_header **new_table)
+{
+ if (!existing_table || !new_table)
+ return AE_BAD_PARAMETER;
+
+ *new_table = NULL;
+
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+ if (strncmp(existing_table->signature, "DSDT", 4) == 0)
+ *new_table = (struct acpi_table_header *)AmlCode;
+#endif
+ if (*new_table != NULL)
+ acpi_table_taint(existing_table);
+ return AE_OK;
+}
+
+void __init early_acpi_table_init(void *data, size_t size)
+{
+ acpi_table_initrd_init(data, size);
+}
+
/*
* acpi_table_init()
*
@@ -457,7 +771,7 @@ int __init acpi_table_init(void)
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
if (ACPI_FAILURE(status))
return -EINVAL;
- acpi_initrd_initialize_tables();
+ acpi_table_initrd_scan();
check_multiple_madt();
return 0;
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 050673f0c..22c09952e 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -625,7 +625,7 @@ acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
* some old BIOSes do expect a buffer or an integer etc.
*/
union acpi_object *
-acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, int rev, int func,
+acpi_evaluate_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
union acpi_object *argv4)
{
acpi_status ret;
@@ -674,7 +674,7 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
* functions. Currently only support 64 functions at maximum, should be
* enough for now.
*/
-bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
+bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
{
int i;
u64 mask = 0;
@@ -707,7 +707,7 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, int rev, u64 funcs)
EXPORT_SYMBOL(acpi_check_dsm);
/**
- * acpi_dev_present - Detect presence of a given ACPI device in the system.
+ * acpi_dev_found - Detect presence of a given ACPI device in the namespace.
* @hid: Hardware ID of the device.
*
* Return %true if the device was present at the moment of invocation.
@@ -719,7 +719,7 @@ EXPORT_SYMBOL(acpi_check_dsm);
* instead). Calling from module_init() is fine (which is synonymous
* with device_initcall()).
*/
-bool acpi_dev_present(const char *hid)
+bool acpi_dev_found(const char *hid)
{
struct acpi_device_bus_id *acpi_device_bus_id;
bool found = false;
@@ -734,7 +734,7 @@ bool acpi_dev_present(const char *hid)
return found;
}
-EXPORT_SYMBOL(acpi_dev_present);
+EXPORT_SYMBOL(acpi_dev_found);
/*
* acpi_backlight= handling, this is done here rather then in video_detect.c
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 1316ddd92..3d1327615 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -358,7 +358,7 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
if (!(video_caps & ACPI_VIDEO_BACKLIGHT))
return acpi_backlight_vendor;
- if (acpi_osi_is_win8() && backlight_device_registered(BACKLIGHT_RAW))
+ if (acpi_osi_is_win8() && backlight_device_get_by_type(BACKLIGHT_RAW))
return acpi_backlight_native;
return acpi_backlight_video;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index f00993600..a5b5c87e2 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -336,16 +336,7 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
-/**
- * amba_device_add - add a previously allocated AMBA device structure
- * @dev: AMBA device allocated by amba_device_alloc
- * @parent: resource parent for this devices resources
- *
- * Claim the resource, and read the device cell ID if not already
- * initialized. Register the AMBA device with the Linux device
- * manager.
- */
-int amba_device_add(struct amba_device *dev, struct resource *parent)
+static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
u32 size;
void __iomem *tmp;
@@ -373,6 +364,12 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
goto err_release;
}
+ ret = dev_pm_domain_attach(&dev->dev, true);
+ if (ret == -EPROBE_DEFER) {
+ iounmap(tmp);
+ goto err_release;
+ }
+
ret = amba_get_enable_pclk(dev);
if (ret == 0) {
u32 pid, cid;
@@ -398,6 +395,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
}
iounmap(tmp);
+ dev_pm_domain_detach(&dev->dev, true);
if (ret)
goto err_release;
@@ -421,6 +419,88 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
err_out:
return ret;
}
+
+/*
+ * Registration of AMBA device require reading its pid and cid registers.
+ * To do this, the device must be turned on (if it is a part of power domain)
+ * and have clocks enabled. However in some cases those resources might not be
+ * yet available. Returning EPROBE_DEFER is not a solution in such case,
+ * because callers don't handle this special error code. Instead such devices
+ * are added to the special list and their registration is retried from
+ * periodic worker, until all resources are available and registration succeeds.
+ */
+struct deferred_device {
+ struct amba_device *dev;
+ struct resource *parent;
+ struct list_head node;
+};
+
+static LIST_HEAD(deferred_devices);
+static DEFINE_MUTEX(deferred_devices_lock);
+
+static void amba_deferred_retry_func(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
+
+#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
+
+static void amba_deferred_retry_func(struct work_struct *dummy)
+{
+ struct deferred_device *ddev, *tmp;
+
+ mutex_lock(&deferred_devices_lock);
+
+ list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) {
+ int ret = amba_device_try_add(ddev->dev, ddev->parent);
+
+ if (ret == -EPROBE_DEFER)
+ continue;
+
+ list_del_init(&ddev->node);
+ kfree(ddev);
+ }
+
+ if (!list_empty(&deferred_devices))
+ schedule_delayed_work(&deferred_retry_work,
+ DEFERRED_DEVICE_TIMEOUT);
+
+ mutex_unlock(&deferred_devices_lock);
+}
+
+/**
+ * amba_device_add - add a previously allocated AMBA device structure
+ * @dev: AMBA device allocated by amba_device_alloc
+ * @parent: resource parent for this devices resources
+ *
+ * Claim the resource, and read the device cell ID if not already
+ * initialized. Register the AMBA device with the Linux device
+ * manager.
+ */
+int amba_device_add(struct amba_device *dev, struct resource *parent)
+{
+ int ret = amba_device_try_add(dev, parent);
+
+ if (ret == -EPROBE_DEFER) {
+ struct deferred_device *ddev;
+
+ ddev = kmalloc(sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return -ENOMEM;
+
+ ddev->dev = dev;
+ ddev->parent = parent;
+ ret = 0;
+
+ mutex_lock(&deferred_devices_lock);
+
+ if (list_empty(&deferred_devices))
+ schedule_delayed_work(&deferred_retry_work,
+ DEFERRED_DEVICE_TIMEOUT);
+ list_add_tail(&ddev->node, &deferred_devices);
+
+ mutex_unlock(&deferred_devices_lock);
+ }
+ return ret;
+}
EXPORT_SYMBOL_GPL(amba_device_add);
static struct amba_device *
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cfa936a32..e2dc4c045 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -313,14 +313,23 @@ config ATA_PIIX
config SATA_DWC
tristate "DesignWare Cores SATA support"
- depends on 460EX
- select DW_DMAC
+ depends on DMADEVICES
+ select GENERIC_PHY
help
This option enables support for the on-chip SATA controller of the
AppliedMicro processor 460EX.
If unsure, say N.
+config SATA_DWC_OLD_DMA
+ bool "Support old device trees"
+ depends on SATA_DWC
+ select DW_DMAC_CORE
+ default y if 460EX
+ help
+ This option enables support for old device trees without the
+ "dmas" property.
+
config SATA_DWC_DEBUG
bool "Debugging driver version"
depends on SATA_DWC
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 6e702ab57..1d31c0c0f 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -137,7 +137,7 @@ static const struct ata_port_info *ahci_seattle_get_port_info(
u32 val;
plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
- if (IS_ERR(plat_data))
+ if (!plat_data)
return &ahci_port_info;
plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index a5d7c1c2a..71b07198e 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2550,8 +2550,8 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
if (hpriv->irq_handler)
- dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
- and custom irq handler implemented\n");
+ dev_warn(host->dev,
+ "both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
rc = ahci_host_activate_multi_irqs(host, sht);
} else {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 474f9f473..0506c49da 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -66,6 +66,7 @@
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
@@ -695,7 +696,7 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
* RETURNS:
* Block address read from @tf.
*/
-u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
{
u64 block = 0;
@@ -720,7 +721,7 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
if (!sect) {
ata_dev_warn(dev,
"device reported invalid CHS sector 0\n");
- sect = 1; /* oh well */
+ return U64_MAX;
}
block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
@@ -884,7 +885,7 @@ unsigned long ata_pack_xfermask(unsigned long pio_mask,
* @udma_mask: resulting udma_mask
*
* Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
- * Any NULL distination masks will be ignored.
+ * Any NULL destination masks will be ignored.
*/
void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
unsigned long *mwdma_mask, unsigned long *udma_mask)
@@ -2079,6 +2080,81 @@ static inline u8 ata_dev_knobble(struct ata_device *dev)
return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
}
+static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+ int log_index = ATA_LOG_NCQ_SEND_RECV * 2;
+ u16 log_pages;
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
+ 0, ap->sector_buf, 1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to get Log Directory Emask 0x%x\n",
+ err_mask);
+ return;
+ }
+ log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
+ if (!log_pages) {
+ ata_dev_warn(dev,
+ "NCQ Send/Recv Log not supported\n");
+ return;
+ }
+ err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
+ 0, ap->sector_buf, 1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to get NCQ Send/Recv Log Emask 0x%x\n",
+ err_mask);
+ } else {
+ u8 *cmds = dev->ncq_send_recv_cmds;
+
+ dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
+ memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+
+ if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+ ata_dev_dbg(dev, "disabling queued TRIM support\n");
+ cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+ ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+ }
+ }
+}
+
+static void ata_dev_config_ncq_non_data(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+ int log_index = ATA_LOG_NCQ_NON_DATA * 2;
+ u16 log_pages;
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
+ 0, ap->sector_buf, 1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to get Log Directory Emask 0x%x\n",
+ err_mask);
+ return;
+ }
+ log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
+ if (!log_pages) {
+ ata_dev_warn(dev,
+ "NCQ Send/Recv Log not supported\n");
+ return;
+ }
+ err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
+ 0, ap->sector_buf, 1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to get NCQ Non-Data Log Emask 0x%x\n",
+ err_mask);
+ } else {
+ u8 *cmds = dev->ncq_non_data_cmds;
+
+ memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
+ }
+}
+
static int ata_dev_config_ncq(struct ata_device *dev,
char *desc, size_t desc_sz)
{
@@ -2127,29 +2203,125 @@ static int ata_dev_config_ncq(struct ata_device *dev,
snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
ddepth, aa_desc);
- if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
- ata_id_has_ncq_send_and_recv(dev->id)) {
- err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
- 0, ap->sector_buf, 1);
- if (err_mask) {
- ata_dev_dbg(dev,
- "failed to get NCQ Send/Recv Log Emask 0x%x\n",
- err_mask);
- } else {
- u8 *cmds = dev->ncq_send_recv_cmds;
+ if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
+ if (ata_id_has_ncq_send_and_recv(dev->id))
+ ata_dev_config_ncq_send_recv(dev);
+ if (ata_id_has_ncq_non_data(dev->id))
+ ata_dev_config_ncq_non_data(dev);
+ }
+
+ return 0;
+}
- dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
- memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+static void ata_dev_config_sense_reporting(struct ata_device *dev)
+{
+ unsigned int err_mask;
- if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
- ata_dev_dbg(dev, "disabling queued TRIM support\n");
- cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
- ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
- }
+ if (!ata_id_has_sense_reporting(dev->id))
+ return;
+
+ if (ata_id_sense_reporting_enabled(dev->id))
+ return;
+
+ err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to enable Sense Data Reporting, Emask 0x%x\n",
+ err_mask);
+ }
+}
+
+static void ata_dev_config_zac(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+ u8 *identify_buf = ap->sector_buf;
+ int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0;
+ u16 log_pages;
+
+ dev->zac_zones_optimal_open = U32_MAX;
+ dev->zac_zones_optimal_nonseq = U32_MAX;
+ dev->zac_zones_max_open = U32_MAX;
+
+ /*
+ * Always set the 'ZAC' flag for Host-managed devices.
+ */
+ if (dev->class == ATA_DEV_ZAC)
+ dev->flags |= ATA_DFLAG_ZAC;
+ else if (ata_id_zoned_cap(dev->id) == 0x01)
+ /*
+ * Check for host-aware devices.
+ */
+ dev->flags |= ATA_DFLAG_ZAC;
+
+ if (!(dev->flags & ATA_DFLAG_ZAC))
+ return;
+
+ /*
+ * Read Log Directory to figure out if IDENTIFY DEVICE log
+ * is supported.
+ */
+ err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY,
+ 0, ap->sector_buf, 1);
+ if (err_mask) {
+ ata_dev_info(dev,
+ "failed to get Log Directory Emask 0x%x\n",
+ err_mask);
+ return;
+ }
+ log_pages = get_unaligned_le16(&ap->sector_buf[log_index]);
+ if (log_pages == 0) {
+ ata_dev_warn(dev,
+ "ATA Identify Device Log not supported\n");
+ return;
+ }
+ /*
+ * Read IDENTIFY DEVICE data log, page 0, to figure out
+ * if page 9 is supported.
+ */
+ err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0,
+ identify_buf, 1);
+ if (err_mask) {
+ ata_dev_info(dev,
+ "failed to get Device Identify Log Emask 0x%x\n",
+ err_mask);
+ return;
+ }
+ log_pages = identify_buf[8];
+ for (i = 0; i < log_pages; i++) {
+ if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) {
+ found++;
+ break;
}
}
+ if (!found) {
+ ata_dev_warn(dev,
+ "ATA Zoned Information Log not supported\n");
+ return;
+ }
- return 0;
+ /*
+ * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
+ */
+ err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA,
+ ATA_LOG_ZONED_INFORMATION,
+ identify_buf, 1);
+ if (!err_mask) {
+ u64 zoned_cap, opt_open, opt_nonseq, max_open;
+
+ zoned_cap = get_unaligned_le64(&identify_buf[8]);
+ if ((zoned_cap >> 63))
+ dev->zac_zoned_cap = (zoned_cap & 1);
+ opt_open = get_unaligned_le64(&identify_buf[24]);
+ if ((opt_open >> 63))
+ dev->zac_zones_optimal_open = (u32)opt_open;
+ opt_nonseq = get_unaligned_le64(&identify_buf[32]);
+ if ((opt_nonseq >> 63))
+ dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
+ max_open = get_unaligned_le64(&identify_buf[40]);
+ if ((max_open >> 63))
+ dev->zac_zones_max_open = (u32)max_open;
+ }
}
/**
@@ -2374,7 +2546,8 @@ int ata_dev_configure(struct ata_device *dev)
dev->devslp_timing[i] = sata_setting[j];
}
}
-
+ ata_dev_config_sense_reporting(dev);
+ ata_dev_config_zac(dev);
dev->cdb_len = 16;
}
@@ -3403,7 +3576,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
* EH context.
*
* RETURNS:
- * 0 if @linke is ready before @deadline; otherwise, -errno.
+ * 0 if @link is ready before @deadline; otherwise, -errno.
*/
int ata_wait_ready(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
@@ -3484,7 +3657,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
* EH context.
*
* RETURNS:
- * 0 if @linke is ready before @deadline; otherwise, -errno.
+ * 0 if @link is ready before @deadline; otherwise, -errno.
*/
int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link))
@@ -3497,7 +3670,7 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
/**
* sata_link_debounce - debounce SATA phy status
* @link: ATA link to debounce SATA phy status for
- * @params: timing parameters { interval, duratinon, timeout } in msec
+ * @params: timing parameters { interval, duration, timeout } in msec
* @deadline: deadline jiffies for the operation
*
* Make sure SStatus of @link reaches stable state, determined by
@@ -3567,7 +3740,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params,
/**
* sata_link_resume - resume SATA link
* @link: ATA link to resume SATA
- * @params: timing parameters { interval, duratinon, timeout } in msec
+ * @params: timing parameters { interval, duration, timeout } in msec
* @deadline: deadline jiffies for the operation
*
* Resume SATA phy @link and debounce it.
@@ -3750,7 +3923,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
/**
* sata_link_hardreset - reset link via SATA phy reset
* @link: link to reset
- * @timing: timing parameters { interval, duratinon, timeout } in msec
+ * @timing: timing parameters { interval, duration, timeout } in msec
* @deadline: deadline jiffies for the operation
* @online: optional out parameter indicating link onlineness
* @check_ready: optional callback to check link readiness
@@ -4145,6 +4318,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
+ /*
+ * Device times out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ */
+ { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -4532,6 +4711,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
{
struct ata_taskfile tf;
unsigned int err_mask;
+ unsigned long timeout = 0;
/* set up set-features taskfile */
DPRINTK("set features - SATA features\n");
@@ -4543,7 +4723,10 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
tf.protocol = ATA_PROT_NODATA;
tf.nsect = feature;
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (enable == SETFEATURES_SPINUP)
+ timeout = ata_probe_timeout ?
+ ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
@@ -6212,7 +6395,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
*
* After allocating an ATA host and initializing it, most libata
* LLDs perform three steps to activate the host - start host,
- * request IRQ and register it. This helper takes necessasry
+ * request IRQ and register it. This helper takes necessary
* arguments and performs the three steps in one go.
*
* An invalid IRQ skips the IRQ registration and expects the host to
@@ -6265,7 +6448,7 @@ int ata_host_activate(struct ata_host *host, int irq,
}
/**
- * ata_port_detach - Detach ATA port in prepration of device removal
+ * ata_port_detach - Detach ATA port in preparation of device removal
* @ap: ATA port to be detached
*
* Detach all ATA devices and the associated SCSI devices of @ap;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 91a9e6af2..c6f017458 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1600,6 +1600,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
tf->hob_lbah = buf[10];
tf->nsect = buf[12];
tf->hob_nsect = buf[13];
+ if (ata_id_has_ncq_autosense(dev->id))
+ tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
return 0;
}
@@ -1636,6 +1638,56 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
}
/**
+ * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
+ * @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
+ * @cmd: scsi command for which the sense code should be set
+ *
+ * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
+ * SENSE. This function is an EH helper.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+static void ata_eh_request_sense(struct ata_queued_cmd *qc,
+ struct scsi_cmnd *cmd)
+{
+ struct ata_device *dev = qc->dev;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ if (qc->ap->pflags & ATA_PFLAG_FROZEN) {
+ ata_dev_warn(dev, "sense data available but port frozen\n");
+ return;
+ }
+
+ if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID)
+ return;
+
+ if (!ata_id_sense_reporting_enabled(dev->id)) {
+ ata_dev_warn(qc->dev, "sense data reporting disabled\n");
+ return;
+ }
+
+ DPRINTK("ATA request sense\n");
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+ tf.command = ATA_CMD_REQ_SENSE_DATA;
+ tf.protocol = ATA_PROT_NODATA;
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ /* Ignore err_mask; ATA_ERR might be set */
+ if (tf.command & ATA_SENSE) {
+ ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal);
+ qc->flags |= ATA_QCFLAG_SENSE_VALID;
+ } else {
+ ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
+ tf.command, err_mask);
+ }
+}
+
+/**
* atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
* @dev: device to perform REQUEST_SENSE to
* @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
@@ -1797,6 +1849,18 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memcpy(&qc->result_tf, &tf, sizeof(tf));
qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+ if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
+ char sense_key, asc, ascq;
+
+ sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+ asc = (qc->result_tf.auxiliary >> 8) & 0xff;
+ ascq = qc->result_tf.auxiliary & 0xff;
+ ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, ascq);
+ ata_scsi_set_sense_information(dev, qc->scsicmd,
+ &qc->result_tf);
+ qc->flags |= ATA_QCFLAG_SENSE_VALID;
+ }
+
ehc->i.err_mask &= ~AC_ERR_DEV;
}
@@ -1826,14 +1890,23 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
return ATA_EH_RESET;
}
- if (stat & (ATA_ERR | ATA_DF))
+ if (stat & (ATA_ERR | ATA_DF)) {
qc->err_mask |= AC_ERR_DEV;
- else
+ /*
+ * Sense data reporting does not work if the
+ * device fault bit is set.
+ */
+ if (stat & ATA_DF)
+ stat &= ~ATA_SENSE;
+ } else {
return 0;
+ }
switch (qc->dev->class) {
case ATA_DEV_ATA:
case ATA_DEV_ZAC:
+ if (stat & ATA_SENSE)
+ ata_eh_request_sense(qc, qc->scsicmd);
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
if (err & (ATA_UNC | ATA_AMNF))
@@ -1847,20 +1920,31 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
tmp = atapi_eh_request_sense(qc->dev,
qc->scsicmd->sense_buffer,
qc->result_tf.feature >> 4);
- if (!tmp) {
- /* ATA_QCFLAG_SENSE_VALID is used to
- * tell atapi_qc_complete() that sense
- * data is already valid.
- *
- * TODO: interpret sense data and set
- * appropriate err_mask.
- */
+ if (!tmp)
qc->flags |= ATA_QCFLAG_SENSE_VALID;
- } else
+ else
qc->err_mask |= tmp;
}
}
+ if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+ int ret = scsi_check_sense(qc->scsicmd);
+ /*
+ * SUCCESS here means that the sense code could
+ * evaluated and should be passed to the upper layers
+ * for correct evaluation.
+ * FAILED means the sense code could not interpreted
+ * and the device would need to be reset.
+ * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
+ * command would need to be retried.
+ */
+ if (ret == NEEDS_RETRY || ret == ADD_TO_MLQUEUE) {
+ qc->flags |= ATA_QCFLAG_RETRY;
+ qc->err_mask |= AC_ERR_OTHER;
+ } else if (ret != SUCCESS) {
+ qc->err_mask |= AC_ERR_HSM;
+ }
+ }
if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
action |= ATA_EH_RESET;
@@ -2398,6 +2482,8 @@ const char *ata_get_cmd_descript(u8 command)
{ ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
{ ATA_CMD_REQ_SENSE_DATA, "REQUEST SENSE DATA EXT" },
{ ATA_CMD_SANITIZE_DEVICE, "SANITIZE DEVICE" },
+ { ATA_CMD_ZAC_MGMT_IN, "ZAC MANAGEMENT IN" },
+ { ATA_CMD_ZAC_MGMT_OUT, "ZAC MANAGEMENT OUT" },
{ ATA_CMD_READ_LONG, "READ LONG (with retries)" },
{ ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
{ ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
@@ -2569,14 +2655,15 @@ static void ata_eh_link_report(struct ata_link *link)
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
- ATA_ERR)) {
+ ATA_SENSE | ATA_ERR)) {
if (res->command & ATA_BUSY)
ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
+ res->command & ATA_SENSE ? "SENSE " : "",
res->command & ATA_ERR ? "ERR " : "");
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 567859ce0..bfec66fb2 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -270,11 +270,52 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
ata_scsi_park_show, ata_scsi_park_store);
EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
-static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
+ u8 sk, u8 asc, u8 ascq)
{
+ bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE);
+
+ if (!cmd)
+ return;
+
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
+ scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq);
+}
+
+void ata_scsi_set_sense_information(struct ata_device *dev,
+ struct scsi_cmnd *cmd,
+ const struct ata_taskfile *tf)
+{
+ u64 information;
+
+ if (!cmd)
+ return;
+
+ information = ata_tf_read_block(tf, dev);
+ if (information == U64_MAX)
+ return;
+
+ scsi_set_sense_information(cmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE, information);
+}
+
+static void ata_scsi_set_invalid_field(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u16 field, u8 bit)
+{
+ ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+ /* "Invalid field in cbd" */
+ scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ field, bit, 1);
+}
+
+static void ata_scsi_set_invalid_parameter(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u16 field)
+{
+ /* "Invalid field in parameter list" */
+ ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x26, 0x0);
+ scsi_set_sense_field_pointer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ field, 0xff, 0);
}
static ssize_t
@@ -364,10 +405,10 @@ struct device_attribute *ata_common_sdev_attrs[] = {
};
EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
-static void ata_scsi_invalid_field(struct scsi_cmnd *cmd)
+static void ata_scsi_invalid_field(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u16 field)
{
- ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
- /* "Invalid field in cbd" */
+ ata_scsi_set_invalid_field(dev, cmd, field, 0xff);
cmd->scsi_done(cmd);
}
@@ -980,6 +1021,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
unsigned char *sb = cmd->sense_buffer;
unsigned char *desc = sb + 8;
int verbose = qc->ap->ops->error_handler == NULL;
+ u8 sense_key, asc, ascq;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
@@ -992,47 +1034,71 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
if (qc->err_mask ||
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
- &sb[1], &sb[2], &sb[3], verbose);
- sb[1] &= 0x0f;
+ &sense_key, &asc, &ascq, verbose);
+ ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
- sb[1] = RECOVERED_ERROR;
- sb[2] = 0;
- sb[3] = 0x1D;
+ /*
+ * ATA PASS-THROUGH INFORMATION AVAILABLE
+ * Always in descriptor format sense.
+ */
+ scsi_build_sense_buffer(1, cmd->sense_buffer,
+ RECOVERED_ERROR, 0, 0x1D);
}
- /*
- * Sense data is current and format is descriptor.
- */
- sb[0] = 0x72;
-
- desc[0] = 0x09;
-
- /* set length of additional sense data */
- sb[7] = 14;
- desc[1] = 12;
-
- /*
- * Copy registers into sense buffer.
- */
- desc[2] = 0x00;
- desc[3] = tf->feature; /* == error reg */
- desc[5] = tf->nsect;
- desc[7] = tf->lbal;
- desc[9] = tf->lbam;
- desc[11] = tf->lbah;
- desc[12] = tf->device;
- desc[13] = tf->command; /* == status reg */
+ if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
+ u8 len;
+
+ /* descriptor format */
+ len = sb[7];
+ desc = (char *)scsi_sense_desc_find(sb, len + 8, 9);
+ if (!desc) {
+ if (SCSI_SENSE_BUFFERSIZE < len + 14)
+ return;
+ sb[7] = len + 14;
+ desc = sb + 8 + len;
+ }
+ desc[0] = 9;
+ desc[1] = 12;
+ /*
+ * Copy registers into sense buffer.
+ */
+ desc[2] = 0x00;
+ desc[3] = tf->feature; /* == error reg */
+ desc[5] = tf->nsect;
+ desc[7] = tf->lbal;
+ desc[9] = tf->lbam;
+ desc[11] = tf->lbah;
+ desc[12] = tf->device;
+ desc[13] = tf->command; /* == status reg */
- /*
- * Fill in Extend bit, and the high order bytes
- * if applicable.
- */
- if (tf->flags & ATA_TFLAG_LBA48) {
- desc[2] |= 0x01;
- desc[4] = tf->hob_nsect;
- desc[6] = tf->hob_lbal;
- desc[8] = tf->hob_lbam;
- desc[10] = tf->hob_lbah;
+ /*
+ * Fill in Extend bit, and the high order bytes
+ * if applicable.
+ */
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[2] |= 0x01;
+ desc[4] = tf->hob_nsect;
+ desc[6] = tf->hob_lbal;
+ desc[8] = tf->hob_lbam;
+ desc[10] = tf->hob_lbah;
+ }
+ } else {
+ /* Fixed sense format */
+ desc[0] = tf->feature;
+ desc[1] = tf->command; /* status */
+ desc[2] = tf->device;
+ desc[3] = tf->nsect;
+ desc[0] = 0;
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ desc[8] |= 0x80;
+ if (tf->hob_nsect)
+ desc[8] |= 0x40;
+ if (tf->hob_lbal || tf->hob_lbam || tf->hob_lbah)
+ desc[8] |= 0x20;
+ }
+ desc[9] = tf->lbal;
+ desc[10] = tf->lbam;
+ desc[11] = tf->lbah;
}
}
@@ -1052,41 +1118,41 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
- unsigned char *desc = sb + 8;
int verbose = qc->ap->ops->error_handler == NULL;
u64 block;
+ u8 sense_key, asc, ascq;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
- /* sense data is current and format is descriptor */
- sb[0] = 0x72;
-
+ if (ata_dev_disabled(dev)) {
+ /* Device disabled after error recovery */
+ /* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
+ ata_scsi_set_sense(dev, cmd, NOT_READY, 0x04, 0x21);
+ return;
+ }
/* Use ata_to_sense_error() to map status register bits
* onto sense key, asc & ascq.
*/
if (qc->err_mask ||
tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
- &sb[1], &sb[2], &sb[3], verbose);
- sb[1] &= 0x0f;
+ &sense_key, &asc, &ascq, verbose);
+ ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
+ } else {
+ /* Could not decode error */
+ ata_dev_warn(dev, "could not decode error status 0x%x err_mask 0x%x\n",
+ tf->command, qc->err_mask);
+ ata_scsi_set_sense(dev, cmd, ABORTED_COMMAND, 0, 0);
+ return;
}
block = ata_tf_read_block(&qc->result_tf, dev);
+ if (block == U64_MAX)
+ return;
- /* information sense data descriptor */
- sb[7] = 12;
- desc[0] = 0x00;
- desc[1] = 10;
-
- desc[2] |= 0x80; /* valid */
- desc[6] = block >> 40;
- desc[7] = block >> 32;
- desc[8] = block >> 24;
- desc[9] = block >> 16;
- desc[10] = block >> 8;
- desc[11] = block;
+ scsi_set_sense_information(sb, SCSI_SENSE_BUFFERSIZE, block);
}
static void ata_scsi_sdev_config(struct scsi_device *sdev)
@@ -1109,7 +1175,7 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
* @rq: request to be checked
*
* ATAPI commands which transfer variable length data to host
- * might overflow due to application error or hardare bug. This
+ * might overflow due to application error or hardware bug. This
* function checks whether overflow should be drained and ignored
* for @request.
*
@@ -1343,19 +1409,29 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->tf;
const u8 *cdb = scmd->cmnd;
+ u16 fp;
+ u8 bp = 0xff;
- if (scmd->cmd_len < 5)
+ if (scmd->cmd_len < 5) {
+ fp = 4;
goto invalid_fld;
+ }
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf->protocol = ATA_PROT_NODATA;
if (cdb[1] & 0x1) {
; /* ignore IMMED bit, violates sat-r05 */
}
- if (cdb[4] & 0x2)
+ if (cdb[4] & 0x2) {
+ fp = 4;
+ bp = 1;
goto invalid_fld; /* LOEJ bit set not supported */
- if (((cdb[4] >> 4) & 0xf) != 0)
+ }
+ if (((cdb[4] >> 4) & 0xf) != 0) {
+ fp = 4;
+ bp = 3;
goto invalid_fld; /* power conditions not supported */
+ }
if (cdb[4] & 0x1) {
tf->nsect = 1; /* 1 sector, lba=0 */
@@ -1401,8 +1477,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
return 0;
invalid_fld:
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
- /* "Invalid field in cbd" */
+ ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
return 1;
skip:
scmd->result = SAM_STAT_GOOD;
@@ -1553,20 +1628,27 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
const u8 *cdb = scmd->cmnd;
u64 block;
u32 n_block;
+ u16 fp;
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->protocol = ATA_PROT_NODATA;
if (cdb[0] == VERIFY) {
- if (scmd->cmd_len < 10)
+ if (scmd->cmd_len < 10) {
+ fp = 9;
goto invalid_fld;
+ }
scsi_10_lba_len(cdb, &block, &n_block);
} else if (cdb[0] == VERIFY_16) {
- if (scmd->cmd_len < 16)
+ if (scmd->cmd_len < 16) {
+ fp = 15;
goto invalid_fld;
+ }
scsi_16_lba_len(cdb, &block, &n_block);
- } else
+ } else {
+ fp = 0;
goto invalid_fld;
+ }
if (!n_block)
goto nothing_to_do;
@@ -1640,12 +1722,11 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
return 0;
invalid_fld:
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
- /* "Invalid field in cbd" */
+ ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
return 1;
out_of_range:
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
+ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
/* "Logical Block Address out of range" */
return 1;
@@ -1680,6 +1761,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
u64 block;
u32 n_block;
int rc;
+ u16 fp = 0;
if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
tf_flags |= ATA_TFLAG_WRITE;
@@ -1688,16 +1770,20 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
switch (cdb[0]) {
case READ_10:
case WRITE_10:
- if (unlikely(scmd->cmd_len < 10))
+ if (unlikely(scmd->cmd_len < 10)) {
+ fp = 9;
goto invalid_fld;
+ }
scsi_10_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
break;
case READ_6:
case WRITE_6:
- if (unlikely(scmd->cmd_len < 6))
+ if (unlikely(scmd->cmd_len < 6)) {
+ fp = 5;
goto invalid_fld;
+ }
scsi_6_lba_len(cdb, &block, &n_block);
/* for 6-byte r/w commands, transfer length 0
@@ -1708,14 +1794,17 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
break;
case READ_16:
case WRITE_16:
- if (unlikely(scmd->cmd_len < 16))
+ if (unlikely(scmd->cmd_len < 16)) {
+ fp = 15;
goto invalid_fld;
+ }
scsi_16_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
break;
default:
DPRINTK("no-byte command\n");
+ fp = 0;
goto invalid_fld;
}
@@ -1742,12 +1831,11 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
goto out_of_range;
/* treat all other errors as -EINVAL, fall through */
invalid_fld:
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
- /* "Invalid field in cbd" */
+ ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
return 1;
out_of_range:
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
+ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x0);
/* "Logical Block Address out of range" */
return 1;
@@ -1784,6 +1872,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense))
ata_gen_passthru_sense(qc);
+ else if (qc->flags & ATA_QCFLAG_SENSE_VALID)
+ cmd->result = SAM_STAT_CHECK_CONDITION;
else if (need_sense)
ata_gen_ata_sense(qc);
else
@@ -1992,14 +2082,14 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
0x00,
0xA0, /* SAM-5 (no version claimed) */
- 0x04,
- 0xC0, /* SBC-3 (no version claimed) */
+ 0x06,
+ 0x00, /* SBC-4 (no version claimed) */
- 0x04,
- 0x60, /* SPC-4 (no version claimed) */
+ 0x05,
+ 0xC0, /* SPC-5 (no version claimed) */
0x60,
- 0x20, /* ZBC (no version claimed) */
+ 0x24, /* ZBC r05 */
};
u8 hdr[] = {
@@ -2019,10 +2109,8 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
(args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
hdr[1] |= (1 << 7);
- if (args->dev->class == ATA_DEV_ZAC) {
+ if (args->dev->class == ATA_DEV_ZAC)
hdr[0] = TYPE_ZBC;
- hdr[2] = 0x6; /* ZBC is defined in SPC-4 */
- }
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
@@ -2036,7 +2124,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
if (rbuf[32] == 0 || rbuf[32] == ' ')
memcpy(&rbuf[32], "n/a ", 4);
- if (args->dev->class == ATA_DEV_ZAC)
+ if (ata_id_zoned_cap(args->id) || args->dev->class == ATA_DEV_ZAC)
memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
else
memcpy(rbuf + 58, versions, sizeof(versions));
@@ -2056,6 +2144,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{
+ int num_pages;
const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
@@ -2064,10 +2153,14 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
0xb0, /* page 0xb0, block limits page */
0xb1, /* page 0xb1, block device characteristics page */
0xb2, /* page 0xb2, thin provisioning page */
+ 0xb6, /* page 0xb6, zoned block device characteristics */
};
- rbuf[3] = sizeof(pages); /* number of supported VPD pages */
- memcpy(rbuf + 4, pages, sizeof(pages));
+ num_pages = sizeof(pages);
+ if (!(args->dev->flags & ATA_DFLAG_ZAC))
+ num_pages--;
+ rbuf[3] = num_pages; /* number of supported VPD pages */
+ memcpy(rbuf + 4, pages, num_pages);
return 0;
}
@@ -2232,12 +2325,15 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
{
int form_factor = ata_id_form_factor(args->id);
int media_rotation_rate = ata_id_rotation_rate(args->id);
+ u8 zoned = ata_id_zoned_cap(args->id);
rbuf[1] = 0xb1;
rbuf[3] = 0x3c;
rbuf[4] = media_rotation_rate >> 8;
rbuf[5] = media_rotation_rate;
rbuf[7] = form_factor;
+ if (zoned)
+ rbuf[8] = (zoned << 4);
return 0;
}
@@ -2252,6 +2348,26 @@ static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
return 0;
}
+static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
+{
+ /*
+ * zbc-r05 SCSI Zoned Block device characteristics VPD page
+ */
+ rbuf[1] = 0xb6;
+ rbuf[3] = 0x3C;
+
+ /*
+ * URSWRZ bit is only meaningful for host-managed ZAC drives
+ */
+ if (args->dev->zac_zoned_cap & 1)
+ rbuf[4] |= 1;
+ put_unaligned_be32(args->dev->zac_zones_optimal_open, &rbuf[8]);
+ put_unaligned_be32(args->dev->zac_zones_optimal_nonseq, &rbuf[12]);
+ put_unaligned_be32(args->dev->zac_zones_max_open, &rbuf[16]);
+
+ return 0;
+}
+
/**
* ata_scsiop_noop - Command handler that simply returns success.
* @args: device IDENTIFY data / SCSI command of interest.
@@ -2317,6 +2433,7 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
/**
* ata_msense_ctl_mode - Simulate MODE SENSE control mode page
+ * @dev: ATA device of interest
* @buf: output buffer
* @changeable: whether changeable parameters are requested
*
@@ -2325,9 +2442,12 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable)
* LOCKING:
* None.
*/
-static unsigned int ata_msense_ctl_mode(u8 *buf, bool changeable)
+static unsigned int ata_msense_ctl_mode(struct ata_device *dev, u8 *buf,
+ bool changeable)
{
modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable);
+ if (changeable && (dev->flags & ATA_DFLAG_D_SENSE))
+ buf[2] |= (1 << 2); /* Descriptor sense requested */
return sizeof(def_control_mpage);
}
@@ -2395,7 +2515,8 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
};
u8 pg, spg;
unsigned int ebd, page_control, six_byte;
- u8 dpofua;
+ u8 dpofua, bp = 0xff;
+ u16 fp;
VPRINTK("ENTER\n");
@@ -2414,6 +2535,8 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
case 3: /* saved */
goto saving_not_supp;
default:
+ fp = 2;
+ bp = 6;
goto invalid_fld;
}
@@ -2428,8 +2551,10 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
* No mode subpages supported (yet) but asking for _all_
* subpages may be valid
*/
- if (spg && (spg != ALL_SUB_MPAGES))
+ if (spg && (spg != ALL_SUB_MPAGES)) {
+ fp = 3;
goto invalid_fld;
+ }
switch(pg) {
case RW_RECOVERY_MPAGE:
@@ -2441,16 +2566,17 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
break;
case CONTROL_MPAGE:
- p += ata_msense_ctl_mode(p, page_control == 1);
+ p += ata_msense_ctl_mode(args->dev, p, page_control == 1);
break;
case ALL_MPAGES:
p += ata_msense_rw_recovery(p, page_control == 1);
p += ata_msense_caching(args->id, p, page_control == 1);
- p += ata_msense_ctl_mode(p, page_control == 1);
+ p += ata_msense_ctl_mode(args->dev, p, page_control == 1);
break;
default: /* invalid page code */
+ fp = 2;
goto invalid_fld;
}
@@ -2480,12 +2606,11 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
return 0;
invalid_fld:
- ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
- /* "Invalid field in cbd" */
+ ata_scsi_set_invalid_field(dev, args->cmd, fp, bp);
return 1;
saving_not_supp:
- ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+ ata_scsi_set_sense(dev, args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
/* "Saving parameters not supported" */
return 1;
}
@@ -2561,6 +2686,9 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[14] |= 0x40; /* LBPRZ */
}
}
+ if (ata_id_zoned_cap(args->id) ||
+ args->dev->class == ATA_DEV_ZAC)
+ rbuf[12] = (1 << 4); /* RC_BASIS */
}
return 0;
}
@@ -2942,9 +3070,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
struct scsi_cmnd *scmd = qc->scsicmd;
struct ata_device *dev = qc->dev;
const u8 *cdb = scmd->cmnd;
+ u16 fp;
- if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
+ if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) {
+ fp = 1;
goto invalid_fld;
+ }
/* enable LBA */
tf->flags |= ATA_TFLAG_LBA;
@@ -3008,8 +3139,10 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
case ATA_CMD_READ_LONG_ONCE:
case ATA_CMD_WRITE_LONG:
case ATA_CMD_WRITE_LONG_ONCE:
- if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
+ if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) {
+ fp = 1;
goto invalid_fld;
+ }
qc->sect_size = scsi_bufflen(scmd);
break;
@@ -3072,12 +3205,16 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
ata_qc_set_pc_nbytes(qc);
/* We may not issue DMA commands if no DMA mode is set */
- if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
+ if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) {
+ fp = 1;
goto invalid_fld;
+ }
/* sanity check for pio multi commands */
- if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
+ if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
+ fp = 1;
goto invalid_fld;
+ }
if (is_multi_taskfile(tf)) {
unsigned int multi_count = 1 << (cdb[1] >> 5);
@@ -3098,8 +3235,10 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* ->set_dmamode(), and ->post_set_mode() hooks).
*/
if (tf->command == ATA_CMD_SET_FEATURES &&
- tf->feature == SETFEATURES_XFER)
+ tf->feature == SETFEATURES_XFER) {
+ fp = (cdb[0] == ATA_16) ? 4 : 3;
goto invalid_fld;
+ }
/*
* Filter TPM commands by default. These provide an
@@ -3116,14 +3255,15 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* so that we comply with the TC consortium stated goal that the user
* can turn off TC features of their system.
*/
- if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
+ if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) {
+ fp = (cdb[0] == ATA_16) ? 14 : 9;
goto invalid_fld;
+ }
return 0;
invalid_fld:
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
- /* "Invalid field in cdb" */
+ ata_scsi_set_invalid_field(dev, scmd, fp, 0xff);
return 1;
}
@@ -3137,25 +3277,32 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
u32 n_block;
u32 size;
void *buf;
+ u16 fp;
+ u8 bp = 0xff;
/* we may not issue DMA commands if no DMA mode is set */
if (unlikely(!dev->dma_mode))
- goto invalid_fld;
+ goto invalid_opcode;
- if (unlikely(scmd->cmd_len < 16))
+ if (unlikely(scmd->cmd_len < 16)) {
+ fp = 15;
goto invalid_fld;
+ }
scsi_16_lba_len(cdb, &block, &n_block);
/* for now we only support WRITE SAME with the unmap bit set */
- if (unlikely(!(cdb[1] & 0x8)))
+ if (unlikely(!(cdb[1] & 0x8))) {
+ fp = 1;
+ bp = 3;
goto invalid_fld;
+ }
/*
* WRITE SAME always has a sector sized buffer as payload, this
* should never be a multiple entry S/G list.
*/
if (!scsi_sg_count(scmd))
- goto invalid_fld;
+ goto invalid_param_len;
buf = page_address(sg_page(scsi_sglist(scmd)));
size = ata_set_lba_range_entries(buf, 512, block, n_block);
@@ -3186,9 +3333,242 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
return 0;
+invalid_fld:
+ ata_scsi_set_invalid_field(dev, scmd, fp, bp);
+ return 1;
+invalid_param_len:
+ /* "Parameter list length error" */
+ ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+ return 1;
+invalid_opcode:
+ /* "Invalid command operation code" */
+ ata_scsi_set_sense(dev, scmd, ILLEGAL_REQUEST, 0x20, 0x0);
+ return 1;
+}
+
+/**
+ * ata_scsi_report_zones_complete - convert ATA output
+ * @qc: command structure returning the data
+ *
+ * Convert T-13 little-endian field representation into
+ * T-10 big-endian field representation.
+ * What a mess.
+ */
+static void ata_scsi_report_zones_complete(struct ata_queued_cmd *qc)
+{
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ struct sg_mapping_iter miter;
+ unsigned long flags;
+ unsigned int bytes = 0;
+
+ sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
+ SG_MITER_TO_SG | SG_MITER_ATOMIC);
+
+ local_irq_save(flags);
+ while (sg_miter_next(&miter)) {
+ unsigned int offset = 0;
+
+ if (bytes == 0) {
+ char *hdr;
+ u32 list_length;
+ u64 max_lba, opt_lba;
+ u16 same;
+
+ /* Swizzle header */
+ hdr = miter.addr;
+ list_length = get_unaligned_le32(&hdr[0]);
+ same = get_unaligned_le16(&hdr[4]);
+ max_lba = get_unaligned_le64(&hdr[8]);
+ opt_lba = get_unaligned_le64(&hdr[16]);
+ put_unaligned_be32(list_length, &hdr[0]);
+ hdr[4] = same & 0xf;
+ put_unaligned_be64(max_lba, &hdr[8]);
+ put_unaligned_be64(opt_lba, &hdr[16]);
+ offset += 64;
+ bytes += 64;
+ }
+ while (offset < miter.length) {
+ char *rec;
+ u8 cond, type, non_seq, reset;
+ u64 size, start, wp;
+
+ /* Swizzle zone descriptor */
+ rec = miter.addr + offset;
+ type = rec[0] & 0xf;
+ cond = (rec[1] >> 4) & 0xf;
+ non_seq = (rec[1] & 2);
+ reset = (rec[1] & 1);
+ size = get_unaligned_le64(&rec[8]);
+ start = get_unaligned_le64(&rec[16]);
+ wp = get_unaligned_le64(&rec[24]);
+ rec[0] = type;
+ rec[1] = (cond << 4) | non_seq | reset;
+ put_unaligned_be64(size, &rec[8]);
+ put_unaligned_be64(start, &rec[16]);
+ put_unaligned_be64(wp, &rec[24]);
+ WARN_ON(offset + 64 > miter.length);
+ offset += 64;
+ bytes += 64;
+ }
+ }
+ sg_miter_stop(&miter);
+ local_irq_restore(flags);
+
+ ata_scsi_qc_complete(qc);
+}
+
+static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
+{
+ struct ata_taskfile *tf = &qc->tf;
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ const u8 *cdb = scmd->cmnd;
+ u16 sect, fp = (u16)-1;
+ u8 sa, options, bp = 0xff;
+ u64 block;
+ u32 n_block;
+
+ if (unlikely(scmd->cmd_len < 16)) {
+ ata_dev_warn(qc->dev, "invalid cdb length %d\n",
+ scmd->cmd_len);
+ fp = 15;
+ goto invalid_fld;
+ }
+ scsi_16_lba_len(cdb, &block, &n_block);
+ if (n_block != scsi_bufflen(scmd)) {
+ ata_dev_warn(qc->dev, "non-matching transfer count (%d/%d)\n",
+ n_block, scsi_bufflen(scmd));
+ goto invalid_param_len;
+ }
+ sa = cdb[1] & 0x1f;
+ if (sa != ZI_REPORT_ZONES) {
+ ata_dev_warn(qc->dev, "invalid service action %d\n", sa);
+ fp = 1;
+ goto invalid_fld;
+ }
+ /*
+ * ZAC allows only for transfers in 512 byte blocks,
+ * and uses a 16 bit value for the transfer count.
+ */
+ if ((n_block / 512) > 0xffff || n_block < 512 || (n_block % 512)) {
+ ata_dev_warn(qc->dev, "invalid transfer count %d\n", n_block);
+ goto invalid_param_len;
+ }
+ sect = n_block / 512;
+ options = cdb[14];
+
+ if (ata_ncq_enabled(qc->dev) &&
+ ata_fpdma_zac_mgmt_in_supported(qc->dev)) {
+ tf->protocol = ATA_PROT_NCQ;
+ tf->command = ATA_CMD_FPDMA_RECV;
+ tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f;
+ tf->nsect = qc->tag << 3;
+ tf->feature = sect & 0xff;
+ tf->hob_feature = (sect >> 8) & 0xff;
+ tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
+ } else {
+ tf->command = ATA_CMD_ZAC_MGMT_IN;
+ tf->feature = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES;
+ tf->protocol = ATA_PROT_DMA;
+ tf->hob_feature = options;
+ tf->hob_nsect = (sect >> 8) & 0xff;
+ tf->nsect = sect & 0xff;
+ }
+ tf->device = ATA_LBA;
+ tf->lbah = (block >> 16) & 0xff;
+ tf->lbam = (block >> 8) & 0xff;
+ tf->lbal = block & 0xff;
+ tf->hob_lbah = (block >> 40) & 0xff;
+ tf->hob_lbam = (block >> 32) & 0xff;
+ tf->hob_lbal = (block >> 24) & 0xff;
+
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+ qc->flags |= ATA_QCFLAG_RESULT_TF;
+
+ ata_qc_set_pc_nbytes(qc);
+
+ qc->complete_fn = ata_scsi_report_zones_complete;
+
+ return 0;
+
+invalid_fld:
+ ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
+ return 1;
+
+invalid_param_len:
+ /* "Parameter list length error" */
+ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+ return 1;
+}
+
+static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
+{
+ struct ata_taskfile *tf = &qc->tf;
+ struct scsi_cmnd *scmd = qc->scsicmd;
+ struct ata_device *dev = qc->dev;
+ const u8 *cdb = scmd->cmnd;
+ u8 reset_all, sa;
+ u64 block;
+ u32 n_block;
+ u16 fp = (u16)-1;
+
+ if (unlikely(scmd->cmd_len < 16)) {
+ fp = 15;
+ goto invalid_fld;
+ }
+
+ sa = cdb[1] & 0x1f;
+ if ((sa != ZO_CLOSE_ZONE) && (sa != ZO_FINISH_ZONE) &&
+ (sa != ZO_OPEN_ZONE) && (sa != ZO_RESET_WRITE_POINTER)) {
+ fp = 1;
+ goto invalid_fld;
+ }
+
+ scsi_16_lba_len(cdb, &block, &n_block);
+ if (n_block) {
+ /*
+ * ZAC MANAGEMENT OUT doesn't define any length
+ */
+ goto invalid_param_len;
+ }
+ if (block > dev->n_sectors)
+ goto out_of_range;
+
+ reset_all = cdb[14] & 0x1;
+
+ if (ata_ncq_enabled(qc->dev) &&
+ ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
+ tf->protocol = ATA_PROT_NCQ;
+ tf->command = ATA_CMD_NCQ_NON_DATA;
+ tf->hob_nsect = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
+ tf->nsect = qc->tag << 3;
+ tf->auxiliary = sa | (reset_all & 0x1) << 8;
+ } else {
+ tf->protocol = ATA_PROT_NODATA;
+ tf->command = ATA_CMD_ZAC_MGMT_OUT;
+ tf->feature = sa;
+ tf->hob_feature = reset_all & 0x1;
+ }
+ tf->lbah = (block >> 16) & 0xff;
+ tf->lbam = (block >> 8) & 0xff;
+ tf->lbal = block & 0xff;
+ tf->hob_lbah = (block >> 40) & 0xff;
+ tf->hob_lbam = (block >> 32) & 0xff;
+ tf->hob_lbal = (block >> 24) & 0xff;
+ tf->device = ATA_LBA;
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48;
+
+ return 0;
+
invalid_fld:
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
- /* "Invalid field in cdb" */
+ ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
+ return 1;
+ out_of_range:
+ /* "Logical Block Address out of range" */
+ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
+ return 1;
+invalid_param_len:
+ /* "Parameter list length error" */
+ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
return 1;
}
@@ -3197,6 +3577,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
* @qc: Storage for translated ATA taskfile
* @buf: input buffer
* @len: number of valid bytes in the input buffer
+ * @fp: out parameter for the failed field on error
*
* Prepare a taskfile to modify caching information for the device.
*
@@ -3204,20 +3585,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
* None.
*/
static int ata_mselect_caching(struct ata_queued_cmd *qc,
- const u8 *buf, int len)
+ const u8 *buf, int len, u16 *fp)
{
struct ata_taskfile *tf = &qc->tf;
struct ata_device *dev = qc->dev;
char mpage[CACHE_MPAGE_LEN];
u8 wce;
+ int i;
/*
* The first two bytes of def_cache_mpage are a header, so offsets
* in mpage are off by 2 compared to buf. Same for len.
*/
- if (len != CACHE_MPAGE_LEN - 2)
+ if (len != CACHE_MPAGE_LEN - 2) {
+ if (len < CACHE_MPAGE_LEN - 2)
+ *fp = len;
+ else
+ *fp = CACHE_MPAGE_LEN - 2;
return -EINVAL;
+ }
wce = buf[0] & (1 << 2);
@@ -3225,10 +3612,14 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
* Check that read-only bits are not modified.
*/
ata_msense_caching(dev->id, mpage, false);
- mpage[2] &= ~(1 << 2);
- mpage[2] |= wce;
- if (memcmp(mpage + 2, buf, CACHE_MPAGE_LEN - 2) != 0)
- return -EINVAL;
+ for (i = 0; i < CACHE_MPAGE_LEN - 2; i++) {
+ if (i == 0)
+ continue;
+ if (mpage[i + 2] != buf[i]) {
+ *fp = i;
+ return -EINVAL;
+ }
+ }
tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
tf->protocol = ATA_PROT_NODATA;
@@ -3239,6 +3630,62 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
}
/**
+ * ata_mselect_control - Simulate MODE SELECT for control page
+ * @qc: Storage for translated ATA taskfile
+ * @buf: input buffer
+ * @len: number of valid bytes in the input buffer
+ * @fp: out parameter for the failed field on error
+ *
+ * Prepare a taskfile to modify caching information for the device.
+ *
+ * LOCKING:
+ * None.
+ */
+static int ata_mselect_control(struct ata_queued_cmd *qc,
+ const u8 *buf, int len, u16 *fp)
+{
+ struct ata_device *dev = qc->dev;
+ char mpage[CONTROL_MPAGE_LEN];
+ u8 d_sense;
+ int i;
+
+ /*
+ * The first two bytes of def_control_mpage are a header, so offsets
+ * in mpage are off by 2 compared to buf. Same for len.
+ */
+
+ if (len != CONTROL_MPAGE_LEN - 2) {
+ if (len < CONTROL_MPAGE_LEN - 2)
+ *fp = len;
+ else
+ *fp = CONTROL_MPAGE_LEN - 2;
+ return -EINVAL;
+ }
+
+ d_sense = buf[0] & (1 << 2);
+
+ /*
+ * Check that read-only bits are not modified.
+ */
+ ata_msense_ctl_mode(dev, mpage, false);
+ for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) {
+ if (i == 0)
+ continue;
+ if (mpage[2 + i] != buf[i]) {
+ *fp = i;
+ return -EINVAL;
+ }
+ }
+ if (d_sense & (1 << 2))
+ dev->flags |= ATA_DFLAG_D_SENSE;
+ else
+ dev->flags &= ~ATA_DFLAG_D_SENSE;
+ qc->scsicmd->result = SAM_STAT_GOOD;
+ qc->scsicmd->scsi_done(qc->scsicmd);
+ return 0;
+}
+
+/**
* ata_scsiop_mode_select - Simulate MODE SELECT 6, 10 commands
* @qc: Storage for translated ATA taskfile
*
@@ -3257,27 +3704,36 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
u8 pg, spg;
unsigned six_byte, pg_len, hdr_len, bd_len;
int len;
+ u16 fp = (u16)-1;
+ u8 bp = 0xff;
VPRINTK("ENTER\n");
six_byte = (cdb[0] == MODE_SELECT);
if (six_byte) {
- if (scmd->cmd_len < 5)
+ if (scmd->cmd_len < 5) {
+ fp = 4;
goto invalid_fld;
+ }
len = cdb[4];
hdr_len = 4;
} else {
- if (scmd->cmd_len < 9)
+ if (scmd->cmd_len < 9) {
+ fp = 8;
goto invalid_fld;
+ }
len = (cdb[7] << 8) + cdb[8];
hdr_len = 8;
}
/* We only support PF=1, SP=0. */
- if ((cdb[1] & 0x11) != 0x10)
+ if ((cdb[1] & 0x11) != 0x10) {
+ fp = 1;
+ bp = (cdb[1] & 0x01) ? 1 : 5;
goto invalid_fld;
+ }
/* Test early for possible overrun. */
if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
@@ -3298,8 +3754,11 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
p += hdr_len;
if (len < bd_len)
goto invalid_param_len;
- if (bd_len != 0 && bd_len != 8)
+ if (bd_len != 0 && bd_len != 8) {
+ fp = (six_byte) ? 3 : 6;
+ fp += bd_len + hdr_len;
goto invalid_param;
+ }
len -= bd_len;
p += bd_len;
@@ -3330,18 +3789,29 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
* No mode subpages supported (yet) but asking for _all_
* subpages may be valid
*/
- if (spg && (spg != ALL_SUB_MPAGES))
+ if (spg && (spg != ALL_SUB_MPAGES)) {
+ fp = (p[0] & 0x40) ? 1 : 0;
+ fp += hdr_len + bd_len;
goto invalid_param;
+ }
if (pg_len > len)
goto invalid_param_len;
switch (pg) {
case CACHE_MPAGE:
- if (ata_mselect_caching(qc, p, pg_len) < 0)
+ if (ata_mselect_caching(qc, p, pg_len, &fp) < 0) {
+ fp += hdr_len + bd_len;
goto invalid_param;
+ }
+ break;
+ case CONTROL_MPAGE:
+ if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
+ fp += hdr_len + bd_len;
+ goto invalid_param;
+ }
break;
-
default: /* invalid page code */
+ fp = bd_len + hdr_len;
goto invalid_param;
}
@@ -3355,18 +3825,16 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
return 0;
invalid_fld:
- /* "Invalid field in CDB" */
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
+ ata_scsi_set_invalid_field(qc->dev, scmd, fp, bp);
return 1;
invalid_param:
- /* "Invalid field in parameter list" */
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x26, 0x0);
+ ata_scsi_set_invalid_parameter(qc->dev, scmd, fp);
return 1;
invalid_param_len:
/* "Parameter list length error" */
- ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
+ ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
return 1;
skip:
@@ -3419,6 +3887,12 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
return ata_scsi_mode_select_xlat;
break;
+ case ZBC_IN:
+ return ata_scsi_zbc_in_xlat;
+
+ case ZBC_OUT:
+ return ata_scsi_zbc_out_xlat;
+
case START_STOP:
return ata_scsi_start_stop_xlat;
}
@@ -3439,14 +3913,11 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
{
#ifdef ATA_DEBUG
struct scsi_device *scsidev = cmd->device;
- u8 *scsicmd = cmd->cmnd;
- DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ DPRINTK("CDB (%u:%d,%d,%d) %9ph\n",
ap->print_id,
scsidev->channel, scsidev->id, scsidev->lun,
- scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
- scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
- scsicmd[8]);
+ cmd->cmnd);
#endif
}
@@ -3570,12 +4041,12 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
switch(scsicmd[0]) {
/* TODO: worth improving? */
case FORMAT_UNIT:
- ata_scsi_invalid_field(cmd);
+ ata_scsi_invalid_field(dev, cmd, 0);
break;
case INQUIRY:
- if (scsicmd[1] & 2) /* is CmdDt set? */
- ata_scsi_invalid_field(cmd);
+ if (scsicmd[1] & 2) /* is CmdDt set? */
+ ata_scsi_invalid_field(dev, cmd, 1);
else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
else switch (scsicmd[2]) {
@@ -3600,8 +4071,14 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
case 0xb2:
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
break;
+ case 0xb6:
+ if (dev->flags & ATA_DFLAG_ZAC) {
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
+ break;
+ }
+ /* Fallthrough */
default:
- ata_scsi_invalid_field(cmd);
+ ata_scsi_invalid_field(dev, cmd, 2);
break;
}
break;
@@ -3619,7 +4096,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
else
- ata_scsi_invalid_field(cmd);
+ ata_scsi_invalid_field(dev, cmd, 1);
break;
case REPORT_LUNS:
@@ -3627,7 +4104,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case REQUEST_SENSE:
- ata_scsi_set_sense(cmd, 0, 0, 0);
+ ata_scsi_set_sense(dev, cmd, 0, 0, 0);
cmd->result = (DRIVER_SENSE << 24);
cmd->scsi_done(cmd);
break;
@@ -3651,12 +4128,12 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
else
- ata_scsi_invalid_field(cmd);
+ ata_scsi_invalid_field(dev, cmd, 1);
break;
/* all other commands */
default:
- ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
+ ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
/* "Invalid command operation code" */
cmd->scsi_done(cmd);
break;
diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c
index fd30b8c10..f8c550df0 100644
--- a/drivers/ata/libata-trace.c
+++ b/drivers/ata/libata-trace.c
@@ -149,3 +149,75 @@ libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
return ret;
}
+
+const char *
+libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd,
+ unsigned char feature, unsigned char hob_nsect)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ switch (cmd) {
+ case ATA_CMD_FPDMA_RECV:
+ switch (hob_nsect & 0x5f) {
+ case ATA_SUBCMD_FPDMA_RECV_RD_LOG_DMA_EXT:
+ trace_seq_printf(p, " READ_LOG_DMA_EXT");
+ break;
+ case ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN:
+ trace_seq_printf(p, " ZAC_MGMT_IN");
+ break;
+ }
+ break;
+ case ATA_CMD_FPDMA_SEND:
+ switch (hob_nsect & 0x5f) {
+ case ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT:
+ trace_seq_printf(p, " WRITE_LOG_DMA_EXT");
+ break;
+ case ATA_SUBCMD_FPDMA_SEND_DSM:
+ trace_seq_printf(p, " DATASET_MANAGEMENT");
+ break;
+ }
+ break;
+ case ATA_CMD_NCQ_NON_DATA:
+ switch (feature) {
+ case ATA_SUBCMD_NCQ_NON_DATA_ABORT_QUEUE:
+ trace_seq_printf(p, " ABORT_QUEUE");
+ break;
+ case ATA_SUBCMD_NCQ_NON_DATA_SET_FEATURES:
+ trace_seq_printf(p, " SET_FEATURES");
+ break;
+ case ATA_SUBCMD_NCQ_NON_DATA_ZERO_EXT:
+ trace_seq_printf(p, " ZERO_EXT");
+ break;
+ case ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT:
+ trace_seq_printf(p, " ZAC_MGMT_OUT");
+ break;
+ }
+ break;
+ case ATA_CMD_ZAC_MGMT_IN:
+ switch (feature) {
+ case ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES:
+ trace_seq_printf(p, " REPORT_ZONES");
+ break;
+ }
+ break;
+ case ATA_CMD_ZAC_MGMT_OUT:
+ switch (feature) {
+ case ATA_SUBCMD_ZAC_MGMT_OUT_CLOSE_ZONE:
+ trace_seq_printf(p, " CLOSE_ZONE");
+ break;
+ case ATA_SUBCMD_ZAC_MGMT_OUT_FINISH_ZONE:
+ trace_seq_printf(p, " FINISH_ZONE");
+ break;
+ case ATA_SUBCMD_ZAC_MGMT_OUT_OPEN_ZONE:
+ trace_seq_printf(p, " OPEN_ZONE");
+ break;
+ case ATA_SUBCMD_ZAC_MGMT_OUT_RESET_WRITE_POINTER:
+ trace_seq_printf(p, " RESET_WRITE_POINTER");
+ break;
+ }
+ break;
+ }
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index f840ca18a..3b301a480 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -67,7 +67,8 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag);
-extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
+extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
+ struct ata_device *dev);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen,
@@ -137,6 +138,11 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern void ata_scsi_set_sense(struct ata_device *dev,
+ struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
+extern void ata_scsi_set_sense_information(struct ata_device *dev,
+ struct scsi_cmnd *cmd,
+ const struct ata_taskfile *tf);
extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index d7c732042..188f2f2eb 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -294,7 +294,7 @@ static int icside_dma_init(struct pata_icside_info *info)
static struct scsi_host_template pata_icside_sht = {
ATA_BASE_SHT(DRV_NAME),
- .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
};
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 902034991..00c2af1d2 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -30,10 +30,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/libata.h>
#include <linux/slab.h>
@@ -42,10 +44,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
-/* Supported DMA engine drivers */
-#include <linux/platform_data/dma-dw.h>
-#include <linux/dma/dw.h>
-
/* These two are defined in "libata.h" */
#undef DRV_NAME
#undef DRV_VERSION
@@ -53,19 +51,14 @@
#define DRV_NAME "sata-dwc"
#define DRV_VERSION "1.3"
-#ifndef out_le32
-#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (void __iomem *)(a))
-#endif
-
-#ifndef in_le32
-#define in_le32(a) __le32_to_cpu(__raw_readl((void __iomem *)(a)))
-#endif
+#define sata_dwc_writel(a, v) writel_relaxed(v, a)
+#define sata_dwc_readl(a) readl_relaxed(a)
#ifndef NO_IRQ
#define NO_IRQ 0
#endif
-#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length*/
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
enum {
SATA_DWC_MAX_PORTS = 1,
@@ -102,7 +95,7 @@ struct sata_dwc_regs {
u32 versionr; /* Version Register */
u32 idr; /* ID Register */
u32 unimpl[192]; /* Unimplemented */
- u32 dmadr[256]; /* FIFO Locations in DMA Mode */
+ u32 dmadr[256]; /* FIFO Locations in DMA Mode */
};
enum {
@@ -146,9 +139,14 @@ struct sata_dwc_device {
struct device *dev; /* generic device struct */
struct ata_probe_ent *pe; /* ptr to probe-ent */
struct ata_host *host;
- u8 __iomem *reg_base;
- struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
+ struct sata_dwc_regs __iomem *sata_dwc_regs; /* DW SATA specific */
+ u32 sactive_issued;
+ u32 sactive_queued;
+ struct phy *phy;
+ phys_addr_t dmadr;
+#ifdef CONFIG_SATA_DWC_OLD_DMA
struct dw_dma_chip *dma;
+#endif
};
#define SATA_DWC_QCMD_MAX 32
@@ -159,25 +157,19 @@ struct sata_dwc_device_port {
int dma_pending[SATA_DWC_QCMD_MAX];
/* DMA info */
- struct dw_dma_slave *dws;
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc[SATA_DWC_QCMD_MAX];
u32 dma_interrupt_count;
};
/*
- * Commonly used DWC SATA driver Macros
+ * Commonly used DWC SATA driver macros
*/
-#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)\
- (host)->private_data)
-#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)\
- (ap)->host->private_data)
-#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)\
- (ap)->private_data)
-#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)\
- (qc)->ap->host->private_data)
-#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)\
- (hsdevp)->hsdev)
+#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *)(host)->private_data)
+#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *)(ap)->host->private_data)
+#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *)(ap)->private_data)
+#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *)(qc)->ap->host->private_data)
+#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *)(p)->hsdev)
enum {
SATA_DWC_CMD_ISSUED_NOT = 0,
@@ -190,21 +182,6 @@ enum {
SATA_DWC_DMA_PENDING_RX = 2,
};
-struct sata_dwc_host_priv {
- void __iomem *scr_addr_sstatus;
- u32 sata_dwc_sactive_issued ;
- u32 sata_dwc_sactive_queued ;
-};
-
-static struct sata_dwc_host_priv host_pvt;
-
-static struct dw_dma_slave sata_dwc_dma_dws = {
- .src_id = 0,
- .dst_id = 0,
- .src_master = 0,
- .dst_master = 1,
-};
-
/*
* Prototypes
*/
@@ -215,6 +192,93 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
static void sata_dwc_port_stop(struct ata_port *ap);
static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+
+#include <linux/platform_data/dma-dw.h>
+#include <linux/dma/dw.h>
+
+static struct dw_dma_slave sata_dwc_dma_dws = {
+ .src_id = 0,
+ .dst_id = 0,
+ .m_master = 1,
+ .p_master = 0,
+};
+
+static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_slave *dws = &sata_dwc_dma_dws;
+
+ if (dws->dma_dev != chan->device->dev)
+ return false;
+
+ chan->private = dws;
+ return true;
+}
+
+static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
+{
+ struct sata_dwc_device *hsdev = hsdevp->hsdev;
+ struct dw_dma_slave *dws = &sata_dwc_dma_dws;
+ dma_cap_mask_t mask;
+
+ dws->dma_dev = hsdev->dev;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Acquire DMA channel */
+ hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
+ if (!hsdevp->chan) {
+ dev_err(hsdev->dev, "%s: dma channel unavailable\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int sata_dwc_dma_init_old(struct platform_device *pdev,
+ struct sata_dwc_device *hsdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+
+ hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
+ if (!hsdev->dma)
+ return -ENOMEM;
+
+ hsdev->dma->dev = &pdev->dev;
+
+ /* Get SATA DMA interrupt number */
+ hsdev->dma->irq = irq_of_parse_and_map(np, 1);
+ if (hsdev->dma->irq == NO_IRQ) {
+ dev_err(&pdev->dev, "no SATA DMA irq\n");
+ return -ENODEV;
+ }
+
+ /* Get physical SATA DMA register base address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsdev->dma->regs)) {
+ dev_err(&pdev->dev,
+ "ioremap failed for AHBDMA register address\n");
+ return PTR_ERR(hsdev->dma->regs);
+ }
+
+ /* Initialize AHB DMAC */
+ return dw_dma_probe(hsdev->dma);
+}
+
+static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
+{
+ if (!hsdev->dma)
+ return;
+
+ dw_dma_remove(hsdev->dma);
+}
+
+#endif
+
static const char *get_prot_descript(u8 protocol)
{
switch ((enum ata_tf_protocols)protocol) {
@@ -305,21 +369,20 @@ static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd
struct ata_port *ap = qc->ap;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
- dma_addr_t addr = (dma_addr_t)&hsdev->sata_dwc_regs->dmadr;
struct dma_slave_config sconf;
struct dma_async_tx_descriptor *desc;
if (qc->dma_dir == DMA_DEV_TO_MEM) {
- sconf.src_addr = addr;
- sconf.device_fc = true;
+ sconf.src_addr = hsdev->dmadr;
+ sconf.device_fc = false;
} else { /* DMA_MEM_TO_DEV */
- sconf.dst_addr = addr;
+ sconf.dst_addr = hsdev->dmadr;
sconf.device_fc = false;
}
sconf.direction = qc->dma_dir;
- sconf.src_maxburst = AHB_DMA_BRST_DFLT;
- sconf.dst_maxburst = AHB_DMA_BRST_DFLT;
+ sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
+ sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4; /* in items */
sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -336,8 +399,8 @@ static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd
desc->callback = dma_dwc_xfer_done;
desc->callback_param = hsdev;
- dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pad\n",
- __func__, qc->sg, qc->n_elem, &addr);
+ dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
+ qc->sg, qc->n_elem, &hsdev->dmadr);
return desc;
}
@@ -350,48 +413,38 @@ static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
return -EINVAL;
}
- *val = in_le32(link->ap->ioaddr.scr_addr + (scr * 4));
- dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
- __func__, link->ap->print_id, scr, *val);
+ *val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
+ dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
+ link->ap->print_id, scr, *val);
return 0;
}
static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
{
- dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
- __func__, link->ap->print_id, scr, val);
+ dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
+ link->ap->print_id, scr, val);
if (scr > SCR_NOTIFICATION) {
dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
__func__, scr);
return -EINVAL;
}
- out_le32(link->ap->ioaddr.scr_addr + (scr * 4), val);
+ sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
return 0;
}
-static u32 core_scr_read(unsigned int scr)
-{
- return in_le32(host_pvt.scr_addr_sstatus + (scr * 4));
-}
-
-static void core_scr_write(unsigned int scr, u32 val)
-{
- out_le32(host_pvt.scr_addr_sstatus + (scr * 4), val);
-}
-
-static void clear_serror(void)
+static void clear_serror(struct ata_port *ap)
{
u32 val;
- val = core_scr_read(SCR_ERROR);
- core_scr_write(SCR_ERROR, val);
+ sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
+ sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
}
static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
{
- out_le32(&hsdev->sata_dwc_regs->intpr,
- in_le32(&hsdev->sata_dwc_regs->intpr));
+ sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
+ sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
}
static u32 qcmd_tag_to_mask(u8 tag)
@@ -412,7 +465,7 @@ static void sata_dwc_error_intr(struct ata_port *ap,
ata_ehi_clear_desc(ehi);
- serror = core_scr_read(SCR_ERROR);
+ sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
status = ap->ops->sff_check_status(ap);
tag = ap->link.active_tag;
@@ -423,7 +476,7 @@ static void sata_dwc_error_intr(struct ata_port *ap,
hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
/* Clear error register and interrupt bit */
- clear_serror();
+ clear_serror(ap);
clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
/* This is the only error happening now. TODO check for exact error */
@@ -462,12 +515,12 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
int handled, num_processed, port = 0;
uint intpr, sactive, sactive2, tag_mask;
struct sata_dwc_device_port *hsdevp;
- host_pvt.sata_dwc_sactive_issued = 0;
+ hsdev->sactive_issued = 0;
spin_lock_irqsave(&host->lock, flags);
/* Read the interrupt register */
- intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+ intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
ap = host->ports[port];
hsdevp = HSDEVP_FROM_AP(ap);
@@ -486,12 +539,12 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
if (intpr & SATA_DWC_INTPR_NEWFP) {
clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
- tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
+ tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
- host_pvt.sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
+ hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
qc = ata_qc_from_tag(ap, tag);
/*
@@ -505,11 +558,11 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
handled = 1;
goto DONE;
}
- sactive = core_scr_read(SCR_ACTIVE);
- tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
+ sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
+ tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
/* If no sactive issued and tag_mask is zero then this is not NCQ */
- if (host_pvt.sata_dwc_sactive_issued == 0 && tag_mask == 0) {
+ if (hsdev->sactive_issued == 0 && tag_mask == 0) {
if (ap->link.active_tag == ATA_TAG_POISON)
tag = 0;
else
@@ -579,22 +632,19 @@ DRVSTILLBUSY:
*/
/* process completed commands */
- sactive = core_scr_read(SCR_ACTIVE);
- tag_mask = (host_pvt.sata_dwc_sactive_issued | sactive) ^ sactive;
+ sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
+ tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
- if (sactive != 0 || (host_pvt.sata_dwc_sactive_issued) > 1 || \
- tag_mask > 1) {
+ if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
dev_dbg(ap->dev,
"%s NCQ:sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
- __func__, sactive, host_pvt.sata_dwc_sactive_issued,
- tag_mask);
+ __func__, sactive, hsdev->sactive_issued, tag_mask);
}
- if ((tag_mask | (host_pvt.sata_dwc_sactive_issued)) != \
- (host_pvt.sata_dwc_sactive_issued)) {
+ if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
dev_warn(ap->dev,
- "Bad tag mask? sactive=0x%08x (host_pvt.sata_dwc_sactive_issued)=0x%08x tag_mask=0x%08x\n",
- sactive, host_pvt.sata_dwc_sactive_issued, tag_mask);
+ "Bad tag mask? sactive=0x%08x sactive_issued=0x%08x tag_mask=0x%08x\n",
+ sactive, hsdev->sactive_issued, tag_mask);
}
/* read just to clear ... not bad if currently still busy */
@@ -656,7 +706,7 @@ STILLBUSY:
* we were processing --we read status as part of processing a completed
* command).
*/
- sactive2 = core_scr_read(SCR_ACTIVE);
+ sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
if (sactive2 != sactive) {
dev_dbg(ap->dev,
"More completed - sactive=0x%x sactive2=0x%x\n",
@@ -672,15 +722,14 @@ DONE:
static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
{
struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
+ u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
- out_le32(&(hsdev->sata_dwc_regs->dmacr),
- SATA_DWC_DMACR_RX_CLEAR(
- in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
} else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
- out_le32(&(hsdev->sata_dwc_regs->dmacr),
- SATA_DWC_DMACR_TX_CLEAR(
- in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
} else {
/*
* This should not happen, it indicates the driver is out of
@@ -688,10 +737,9 @@ static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
*/
dev_err(hsdev->dev,
"%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
- __func__, tag, hsdevp->dma_pending[tag],
- in_le32(&hsdev->sata_dwc_regs->dmacr));
- out_le32(&(hsdev->sata_dwc_regs->dmacr),
- SATA_DWC_DMACR_TXRXCH_CLEAR);
+ __func__, tag, hsdevp->dma_pending[tag], dmacr);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
}
}
@@ -716,7 +764,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
__func__, qc->tag, qc->tf.command,
get_dma_dir_descript(qc->dma_dir),
get_prot_descript(qc->tf.protocol),
- in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
}
#endif
@@ -725,7 +773,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
dev_err(ap->dev,
"%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
__func__,
- in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
}
hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
@@ -742,8 +790,9 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
u8 status = 0;
u32 mask = 0x0;
u8 tag = qc->tag;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
- host_pvt.sata_dwc_sactive_queued = 0;
+ hsdev->sactive_queued = 0;
dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
@@ -756,10 +805,8 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
/* clear active bit */
mask = (~(qcmd_tag_to_mask(tag)));
- host_pvt.sata_dwc_sactive_queued = (host_pvt.sata_dwc_sactive_queued) \
- & mask;
- host_pvt.sata_dwc_sactive_issued = (host_pvt.sata_dwc_sactive_issued) \
- & mask;
+ hsdev->sactive_queued = hsdev->sactive_queued & mask;
+ hsdev->sactive_issued = hsdev->sactive_issued & mask;
ata_qc_complete(qc);
return 0;
}
@@ -767,54 +814,62 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
{
/* Enable selective interrupts by setting the interrupt maskregister*/
- out_le32(&hsdev->sata_dwc_regs->intmr,
- SATA_DWC_INTMR_ERRM |
- SATA_DWC_INTMR_NEWFPM |
- SATA_DWC_INTMR_PMABRTM |
- SATA_DWC_INTMR_DMATM);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
+ SATA_DWC_INTMR_ERRM |
+ SATA_DWC_INTMR_NEWFPM |
+ SATA_DWC_INTMR_PMABRTM |
+ SATA_DWC_INTMR_DMATM);
/*
* Unmask the error bits that should trigger an error interrupt by
* setting the error mask register.
*/
- out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
- __func__, in_le32(&hsdev->sata_dwc_regs->intmr),
- in_le32(&hsdev->sata_dwc_regs->errmr));
+ __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
+ sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
}
-static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
+static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
{
- struct sata_dwc_device_port *hsdevp = param;
- struct dw_dma_slave *dws = hsdevp->dws;
+ port->cmd_addr = base + 0x00;
+ port->data_addr = base + 0x00;
- if (dws->dma_dev != chan->device->dev)
- return false;
+ port->error_addr = base + 0x04;
+ port->feature_addr = base + 0x04;
- chan->private = dws;
- return true;
-}
+ port->nsect_addr = base + 0x08;
-static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
-{
- port->cmd_addr = (void __iomem *)base + 0x00;
- port->data_addr = (void __iomem *)base + 0x00;
+ port->lbal_addr = base + 0x0c;
+ port->lbam_addr = base + 0x10;
+ port->lbah_addr = base + 0x14;
- port->error_addr = (void __iomem *)base + 0x04;
- port->feature_addr = (void __iomem *)base + 0x04;
+ port->device_addr = base + 0x18;
+ port->command_addr = base + 0x1c;
+ port->status_addr = base + 0x1c;
- port->nsect_addr = (void __iomem *)base + 0x08;
+ port->altstatus_addr = base + 0x20;
+ port->ctl_addr = base + 0x20;
+}
+
+static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
+{
+ struct sata_dwc_device *hsdev = hsdevp->hsdev;
+ struct device *dev = hsdev->dev;
- port->lbal_addr = (void __iomem *)base + 0x0c;
- port->lbam_addr = (void __iomem *)base + 0x10;
- port->lbah_addr = (void __iomem *)base + 0x14;
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+ if (!of_find_property(dev->of_node, "dmas", NULL))
+ return sata_dwc_dma_get_channel_old(hsdevp);
+#endif
- port->device_addr = (void __iomem *)base + 0x18;
- port->command_addr = (void __iomem *)base + 0x1c;
- port->status_addr = (void __iomem *)base + 0x1c;
+ hsdevp->chan = dma_request_chan(dev, "sata-dma");
+ if (IS_ERR(hsdevp->chan)) {
+ dev_err(dev, "failed to allocate dma channel: %ld\n",
+ PTR_ERR(hsdevp->chan));
+ return PTR_ERR(hsdevp->chan);
+ }
- port->altstatus_addr = (void __iomem *)base + 0x20;
- port->ctl_addr = (void __iomem *)base + 0x20;
+ return 0;
}
/*
@@ -829,7 +884,6 @@ static int sata_dwc_port_start(struct ata_port *ap)
struct sata_dwc_device *hsdev;
struct sata_dwc_device_port *hsdevp = NULL;
struct device *pdev;
- dma_cap_mask_t mask;
int i;
hsdev = HSDEV_FROM_AP(ap);
@@ -853,20 +907,13 @@ static int sata_dwc_port_start(struct ata_port *ap)
}
hsdevp->hsdev = hsdev;
- hsdevp->dws = &sata_dwc_dma_dws;
- hsdevp->dws->dma_dev = hsdev->dev;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ err = sata_dwc_dma_get_channel(hsdevp);
+ if (err)
+ goto CLEANUP_ALLOC;
- /* Acquire DMA channel */
- hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
- if (!hsdevp->chan) {
- dev_err(hsdev->dev, "%s: dma channel unavailable\n",
- __func__);
- err = -EAGAIN;
+ err = phy_power_on(hsdev->phy);
+ if (err)
goto CLEANUP_ALLOC;
- }
for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
@@ -877,18 +924,18 @@ static int sata_dwc_port_start(struct ata_port *ap)
if (ap->port_no == 0) {
dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
__func__);
- out_le32(&hsdev->sata_dwc_regs->dmacr,
- SATA_DWC_DMACR_TXRXCH_CLEAR);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
__func__);
- out_le32(&hsdev->sata_dwc_regs->dbtsr,
- (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
- SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
}
/* Clear any error bits before libata starts issuing commands */
- clear_serror();
+ clear_serror(ap);
ap->private_data = hsdevp;
dev_dbg(ap->dev, "%s: done\n", __func__);
return 0;
@@ -903,11 +950,13 @@ CLEANUP:
static void sata_dwc_port_stop(struct ata_port *ap)
{
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
- dmaengine_terminate_all(hsdevp->chan);
+ dmaengine_terminate_sync(hsdevp->chan);
dma_release_channel(hsdevp->chan);
+ phy_power_off(hsdev->phy);
kfree(hsdevp);
ap->private_data = NULL;
@@ -924,22 +973,20 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
struct ata_taskfile *tf,
u8 tag, u32 cmd_issued)
{
- unsigned long flags;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
ata_get_cmd_descript(tf->command), tag);
- spin_lock_irqsave(&ap->host->lock, flags);
hsdevp->cmd_issued[tag] = cmd_issued;
- spin_unlock_irqrestore(&ap->host->lock, flags);
+
/*
* Clear SError before executing a new command.
* sata_dwc_scr_write and read can not be used here. Clearing the PM
* managed SError register for the disk needs to be done before the
* task file is loaded.
*/
- clear_serror();
+ clear_serror(ap);
ata_sff_exec_command(ap, tf);
}
@@ -992,18 +1039,18 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
sata_dwc_tf_dump(ap, &qc->tf);
if (start_dma) {
- reg = core_scr_read(SCR_ERROR);
+ sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
if (reg & SATA_DWC_SERROR_ERR_BITS) {
dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
__func__, reg);
}
if (dir == DMA_TO_DEVICE)
- out_le32(&hsdev->sata_dwc_regs->dmacr,
- SATA_DWC_DMACR_TXCHEN);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXCHEN);
else
- out_le32(&hsdev->sata_dwc_regs->dmacr,
- SATA_DWC_DMACR_RXCHEN);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_RXCHEN);
/* Enable AHB DMA transfer on the specified channel */
dmaengine_submit(desc);
@@ -1025,36 +1072,12 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
sata_dwc_bmdma_start_by_tag(qc, tag);
}
-/*
- * Function : sata_dwc_qc_prep_by_tag
- * arguments : ata_queued_cmd *qc, u8 tag
- * Return value : None
- * qc_prep for a particular queued command based on tag
- */
-static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
-{
- struct dma_async_tx_descriptor *desc;
- struct ata_port *ap = qc->ap;
- struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
-
- dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
- __func__, ap->port_no, get_dma_dir_descript(qc->dma_dir),
- qc->n_elem);
-
- desc = dma_dwc_xfer_setup(qc);
- if (!desc) {
- dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns NULL\n",
- __func__);
- return;
- }
- hsdevp->desc[tag] = desc;
-}
-
static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
{
u32 sactive;
u8 tag = qc->tag;
struct ata_port *ap = qc->ap;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
#ifdef DEBUG_NCQ
if (qc->tag > 0 || ap->link.sactive > 1)
@@ -1068,47 +1091,33 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
- sata_dwc_qc_prep_by_tag(qc, tag);
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
+ if (!hsdevp->desc[tag])
+ return AC_ERR_SYSTEM;
+ } else {
+ hsdevp->desc[tag] = NULL;
+ }
if (ata_is_ncq(qc->tf.protocol)) {
- sactive = core_scr_read(SCR_ACTIVE);
+ sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
sactive |= (0x00000001 << tag);
- core_scr_write(SCR_ACTIVE, sactive);
+ sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
dev_dbg(qc->ap->dev,
"%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
__func__, tag, qc->ap->link.sactive, sactive);
ap->ops->sff_tf_load(ap, &qc->tf);
- sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
+ sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
SATA_DWC_CMD_ISSUED_PEND);
} else {
- ata_sff_qc_issue(qc);
+ return ata_bmdma_qc_issue(qc);
}
return 0;
}
-/*
- * Function : sata_dwc_qc_prep
- * arguments : ata_queued_cmd *qc
- * Return value : None
- * qc_prep for a particular queued command
- */
-
-static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
-{
- if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
- return;
-
-#ifdef DEBUG_NCQ
- if (qc->tag > 0)
- dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
- __func__, qc->tag, qc->ap->link.active_tag);
-
- return ;
-#endif
-}
-
static void sata_dwc_error_handler(struct ata_port *ap)
{
ata_sff_error_handler(ap);
@@ -1125,17 +1134,22 @@ static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
sata_dwc_enable_interrupts(hsdev);
/* Reconfigure the DMA control register */
- out_le32(&hsdev->sata_dwc_regs->dmacr,
- SATA_DWC_DMACR_TXRXCH_CLEAR);
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
/* Reconfigure the DMA Burst Transaction Size register */
- out_le32(&hsdev->sata_dwc_regs->dbtsr,
- SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
- SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
+ sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
+ SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
return ret;
}
+static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
+{
+ /* SATA DWC is master only */
+}
+
/*
* scsi mid-layer and libata interface structures
*/
@@ -1148,7 +1162,13 @@ static struct scsi_host_template sata_dwc_sht = {
*/
.sg_tablesize = LIBATA_MAX_PRD,
/* .can_queue = ATA_MAX_QUEUE, */
- .dma_boundary = ATA_DMA_BOUNDARY,
+ /*
+ * Make sure a LLI block is not created that will span 8K max FIS
+ * boundary. If the block spans such a FIS boundary, there is a chance
+ * that a DMA burst will cross that boundary -- this results in an
+ * error in the host controller.
+ */
+ .dma_boundary = 0x1fff /* ATA_DMA_BOUNDARY */,
};
static struct ata_port_operations sata_dwc_ops = {
@@ -1157,7 +1177,6 @@ static struct ata_port_operations sata_dwc_ops = {
.error_handler = sata_dwc_error_handler,
.hardreset = sata_dwc_hardreset,
- .qc_prep = sata_dwc_qc_prep,
.qc_issue = sata_dwc_qc_issue,
.scr_read = sata_dwc_scr_read,
@@ -1166,6 +1185,8 @@ static struct ata_port_operations sata_dwc_ops = {
.port_start = sata_dwc_port_start,
.port_stop = sata_dwc_port_stop,
+ .sff_dev_select = sata_dwc_dev_select,
+
.bmdma_setup = sata_dwc_bmdma_setup,
.bmdma_start = sata_dwc_bmdma_start,
};
@@ -1184,13 +1205,14 @@ static int sata_dwc_probe(struct platform_device *ofdev)
struct sata_dwc_device *hsdev;
u32 idr, versionr;
char *ver = (char *)&versionr;
- u8 __iomem *base;
+ void __iomem *base;
int err = 0;
int irq;
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct device_node *np = ofdev->dev.of_node;
+ struct resource *res;
/* Allocate DWC SATA device */
host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
@@ -1201,57 +1223,33 @@ static int sata_dwc_probe(struct platform_device *ofdev)
host->private_data = hsdev;
/* Ioremap SATA registers */
- base = of_iomap(np, 0);
- if (!base) {
+ res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&ofdev->dev, res);
+ if (IS_ERR(base)) {
dev_err(&ofdev->dev,
"ioremap failed for SATA register address\n");
- return -ENODEV;
+ return PTR_ERR(base);
}
- hsdev->reg_base = base;
dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
/* Synopsys DWC SATA specific Registers */
- hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
+ hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
+ hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
/* Setup port */
host->ports[0]->ioaddr.cmd_addr = base;
host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
- host_pvt.scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
- sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
+ sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
/* Read the ID and Version Registers */
- idr = in_le32(&hsdev->sata_dwc_regs->idr);
- versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
+ idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
+ versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
idr, ver[0], ver[1], ver[2]);
- /* Get SATA DMA interrupt number */
- hsdev->dma->irq = irq_of_parse_and_map(np, 1);
- if (hsdev->dma->irq == NO_IRQ) {
- dev_err(&ofdev->dev, "no SATA DMA irq\n");
- err = -ENODEV;
- goto error_iomap;
- }
-
- /* Get physical SATA DMA register base address */
- hsdev->dma->regs = of_iomap(np, 1);
- if (!hsdev->dma->regs) {
- dev_err(&ofdev->dev,
- "ioremap failed for AHBDMA register address\n");
- err = -ENODEV;
- goto error_iomap;
- }
-
/* Save dev for later use in dev_xxx() routines */
hsdev->dev = &ofdev->dev;
- hsdev->dma->dev = &ofdev->dev;
-
- /* Initialize AHB DMAC */
- err = dw_dma_probe(hsdev->dma, NULL);
- if (err)
- goto error_dma_iomap;
-
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
@@ -1263,6 +1261,25 @@ static int sata_dwc_probe(struct platform_device *ofdev)
goto error_out;
}
+#ifdef CONFIG_SATA_DWC_OLD_DMA
+ if (!of_find_property(np, "dmas", NULL)) {
+ err = sata_dwc_dma_init_old(ofdev, hsdev);
+ if (err)
+ goto error_out;
+ }
+#endif
+
+ hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
+ if (IS_ERR(hsdev->phy)) {
+ err = PTR_ERR(hsdev->phy);
+ hsdev->phy = NULL;
+ goto error_out;
+ }
+
+ err = phy_init(hsdev->phy);
+ if (err)
+ goto error_out;
+
/*
* Now, register with libATA core, this will also initiate the
* device discovery process, invoking our port_start() handler &
@@ -1276,12 +1293,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
return 0;
error_out:
- /* Free SATA DMA resources */
- dw_dma_remove(hsdev->dma);
-error_dma_iomap:
- iounmap(hsdev->dma->regs);
-error_iomap:
- iounmap(base);
+ phy_exit(hsdev->phy);
return err;
}
@@ -1293,11 +1305,13 @@ static int sata_dwc_remove(struct platform_device *ofdev)
ata_host_detach(host);
+ phy_exit(hsdev->phy);
+
+#ifdef CONFIG_SATA_DWC_OLD_DMA
/* Free SATA DMA resources */
- dw_dma_remove(hsdev->dma);
+ sata_dwc_dma_exit_old(hsdev);
+#endif
- iounmap(hsdev->dma->regs);
- iounmap(hsdev->reg_base);
dev_dbg(&ofdev->dev, "done\n");
return 0;
}
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 8638d575b..aafb8cc03 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -197,7 +197,7 @@ static void highbank_set_em_messages(struct device *dev,
for (i = 0; i < SGPIO_PINS; i++) {
err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
- if (IS_ERR_VALUE(err))
+ if (err < 0)
return;
pdata->sgpio_gpio[i] = err;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bd74ee555..745489a1c 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -986,7 +986,7 @@ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
* Looks like a lot of fuss, but it avoids an unnecessary
* +1 usec read-after-write delay for unaffected registers.
*/
- laddr = (long)addr & 0xffff;
+ laddr = (unsigned long)addr & 0xffff;
if (laddr >= 0x300 && laddr <= 0x33c) {
laddr &= 0x000f;
if (laddr == 0x4 || laddr == 0xc) {
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index a969a7e44..85aaf2222 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -181,13 +181,17 @@ static char *res_strings[] = {
"reserved 27",
"reserved 28",
"reserved 29",
- "reserved 30",
+ "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */
"reassembly abort: no buffers",
"receive buffer overflow",
"change in GFC",
"receive buffer full",
"low priority discard - no receive descriptor",
"low priority discard - missing end of packet",
+ "reserved 37",
+ "reserved 38",
+ "reserved 39",
+ "reseverd 40",
"reserved 41",
"reserved 42",
"reserved 43",
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7d00f2994..809dd1e02 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev)
/* make the ptr point to the corresponding buffer desc entry */
buf_desc_ptr += desc;
if (!desc || (desc > iadev->num_rx_desc) ||
- ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
+ ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
free_desc(dev, desc);
IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
return -1;
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 6b2a84e7f..2609ba20b 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_ISA) += isa.o
+obj-$(CONFIG_ISA_BUS_API) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 1bd120a0b..240374fd1 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -4,6 +4,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -41,12 +42,12 @@ static bool devcd_disabled;
struct devcd_entry {
struct device devcd_dev;
- const void *data;
+ void *data;
size_t datalen;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- const void *data, size_t datalen);
- void (*free)(const void *data);
+ void *data, size_t datalen);
+ void (*free)(void *data);
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -174,7 +175,7 @@ static struct class devcd_class = {
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
- const void *data, size_t datalen)
+ void *data, size_t datalen)
{
if (offset > datalen)
return -EINVAL;
@@ -188,6 +189,11 @@ static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
return count;
}
+static void devcd_freev(void *data)
+{
+ vfree(data);
+}
+
/**
* dev_coredumpv - create device coredump with vmalloc data
* @dev: the struct device for the crashed device
@@ -198,10 +204,10 @@ static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
* This function takes ownership of the vmalloc'ed data and will free
* it when it is no longer used. See dev_coredumpm() for more information.
*/
-void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
+void dev_coredumpv(struct device *dev, void *data, size_t datalen,
gfp_t gfp)
{
- dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, vfree);
+ dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, devcd_freev);
}
EXPORT_SYMBOL_GPL(dev_coredumpv);
@@ -213,6 +219,44 @@ static int devcd_match_failing(struct device *dev, const void *failing)
}
/**
+ * devcd_free_sgtable - free all the memory of the given scatterlist table
+ * (i.e. both pages and scatterlist instances)
+ * NOTE: if two tables allocated with devcd_alloc_sgtable and then chained
+ * using the sg_chain function then that function should be called only once
+ * on the chained table
+ * @table: pointer to sg_table to free
+ */
+static void devcd_free_sgtable(void *data)
+{
+ _devcd_free_sgtable(data);
+}
+
+/**
+ * devcd_read_from_table - copy data from sg_table to a given buffer
+ * and return the number of bytes read
+ * @buffer: the buffer to copy the data to it
+ * @buf_len: the length of the buffer
+ * @data: the scatterlist table to copy from
+ * @offset: start copy from @offset@ bytes from the head of the data
+ * in the given scatterlist
+ * @data_len: the length of the data in the sg_table
+ */
+static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset,
+ size_t buf_len, void *data,
+ size_t data_len)
+{
+ struct scatterlist *table = data;
+
+ if (offset > data_len)
+ return -EINVAL;
+
+ if (offset + buf_len > data_len)
+ buf_len = data_len - offset;
+ return sg_pcopy_to_buffer(table, sg_nents(table), buffer, buf_len,
+ offset);
+}
+
+/**
* dev_coredumpm - create device coredump with read/free methods
* @dev: the struct device for the crashed device
* @owner: the module that contains the read/free functions, use %THIS_MODULE
@@ -228,10 +272,10 @@ static int devcd_match_failing(struct device *dev, const void *failing)
* function will be called to free the data.
*/
void dev_coredumpm(struct device *dev, struct module *owner,
- const void *data, size_t datalen, gfp_t gfp,
+ void *data, size_t datalen, gfp_t gfp,
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
- const void *data, size_t datalen),
- void (*free)(const void *data))
+ void *data, size_t datalen),
+ void (*free)(void *data))
{
static atomic_t devcd_count = ATOMIC_INIT(0);
struct devcd_entry *devcd;
@@ -291,6 +335,27 @@ void dev_coredumpm(struct device *dev, struct module *owner,
}
EXPORT_SYMBOL_GPL(dev_coredumpm);
+/**
+ * dev_coredumpmsg - create device coredump that uses scatterlist as data
+ * parameter
+ * @dev: the struct device for the crashed device
+ * @table: the dump data
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed
+ * it will free the data.
+ */
+void dev_coredumpsg(struct device *dev, struct scatterlist *table,
+ size_t datalen, gfp_t gfp)
+{
+ dev_coredumpm(dev, NULL, table, datalen, gfp, devcd_read_from_sgtable,
+ devcd_free_sgtable);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpsg);
+
static int __init devcoredump_init(void)
{
return class_register(&devcd_class);
diff --git a/drivers/base/isa.c b/drivers/base/isa.c
index 91dba65d7..cd6ccdcf9 100644
--- a/drivers/base/isa.c
+++ b/drivers/base/isa.c
@@ -180,4 +180,4 @@ static int __init isa_bus_init(void)
return error;
}
-device_initcall(isa_bus_init);
+postcore_initcall(isa_bus_init);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f437afa17..6482d47de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -322,16 +322,16 @@ EXPORT_SYMBOL_GPL(platform_device_add_data);
/**
* platform_device_add_properties - add built-in properties to a platform device
* @pdev: platform device to add properties to
- * @pset: properties to add
+ * @properties: null terminated array of properties to add
*
- * The function will take deep copy of the properties in @pset and attach
- * the copy to the platform device. The memory associated with properties
- * will be freed when the platform device is released.
+ * The function will take deep copy of @properties and attach the copy to the
+ * platform device. The memory associated with properties will be freed when the
+ * platform device is released.
*/
int platform_device_add_properties(struct platform_device *pdev,
- const struct property_set *pset)
+ struct property_entry *properties)
{
- return device_add_property_set(&pdev->dev, pset);
+ return device_add_properties(&pdev->dev, properties);
}
EXPORT_SYMBOL_GPL(platform_device_add_properties);
@@ -447,7 +447,7 @@ void platform_device_del(struct platform_device *pdev)
release_resource(r);
}
- device_remove_property_set(&pdev->dev);
+ device_remove_properties(&pdev->dev);
}
}
EXPORT_SYMBOL_GPL(platform_device_del);
@@ -526,8 +526,9 @@ struct platform_device *platform_device_register_full(
if (ret)
goto err;
- if (pdevinfo->pset) {
- ret = platform_device_add_properties(pdev, pdevinfo->pset);
+ if (pdevinfo->properties) {
+ ret = platform_device_add_properties(pdev,
+ pdevinfo->properties);
if (ret)
goto err;
}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 0e64a1b5e..3657ac1cb 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -159,7 +159,7 @@ int of_pm_clk_add_clks(struct device *dev)
count = of_count_phandle_with_args(dev->of_node, "clocks",
"#clock-cells");
- if (count == 0)
+ if (count <= 0)
return -ENODEV;
clks = kcalloc(count, sizeof(*clks), GFP_KERNEL);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 56705b527..de23b648f 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -229,17 +229,6 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
return ret;
}
-static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
-}
-
-static int genpd_restore_dev(struct generic_pm_domain *genpd,
- struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
-}
-
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
@@ -372,17 +361,63 @@ static void genpd_power_off_work_fn(struct work_struct *work)
}
/**
- * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_suspend(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_suspend;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_suspend;
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_suspend;
+
+ return cb ? cb(dev) : 0;
+}
+
+/**
+ * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
+ * @dev: Device to handle.
+ */
+static int __genpd_runtime_resume(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_resume;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_resume;
+ else
+ cb = NULL;
+
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_resume;
+
+ return cb ? cb(dev) : 0;
+}
+
+/**
+ * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
* @dev: Device to suspend.
*
* Carry out a runtime suspend of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
-static int pm_genpd_runtime_suspend(struct device *dev)
+static int genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
- bool (*stop_ok)(struct device *__dev);
+ bool (*suspend_ok)(struct device *__dev);
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start;
@@ -401,21 +436,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
* runtime PM is disabled. Under these circumstances, we shall skip
* validating/measuring the PM QoS latency.
*/
- stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
- if (runtime_pm && stop_ok && !stop_ok(dev))
+ suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
+ if (runtime_pm && suspend_ok && !suspend_ok(dev))
return -EBUSY;
/* Measure suspend latency. */
if (runtime_pm)
time_start = ktime_get();
- ret = genpd_save_dev(genpd, dev);
+ ret = __genpd_runtime_suspend(dev);
if (ret)
return ret;
ret = genpd_stop_dev(genpd, dev);
if (ret) {
- genpd_restore_dev(genpd, dev);
+ __genpd_runtime_resume(dev);
return ret;
}
@@ -446,14 +481,14 @@ static int pm_genpd_runtime_suspend(struct device *dev)
}
/**
- * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
* @dev: Device to resume.
*
* Carry out a runtime resume of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
-static int pm_genpd_runtime_resume(struct device *dev)
+static int genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
@@ -491,7 +526,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
if (ret)
goto err_poweroff;
- ret = genpd_restore_dev(genpd, dev);
+ ret = __genpd_runtime_resume(dev);
if (ret)
goto err_stop;
@@ -695,15 +730,6 @@ static int pm_genpd_prepare(struct device *dev)
* at this point and a system wakeup event should be reported if it's
* set up to wake up the system from sleep states.
*/
- pm_runtime_get_noresume(dev);
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
-
- if (pm_wakeup_pending()) {
- pm_runtime_put(dev);
- return -EBUSY;
- }
-
if (resume_needed(dev, genpd))
pm_runtime_resume(dev);
@@ -716,10 +742,8 @@ static int pm_genpd_prepare(struct device *dev)
mutex_unlock(&genpd->lock);
- if (genpd->suspend_power_off) {
- pm_runtime_put_noidle(dev);
+ if (genpd->suspend_power_off)
return 0;
- }
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
@@ -741,7 +765,6 @@ static int pm_genpd_prepare(struct device *dev)
pm_runtime_enable(dev);
}
- pm_runtime_put(dev);
return ret;
}
@@ -1427,54 +1450,6 @@ out:
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
-/* Default device callbacks for generic PM domains. */
-
-/**
- * pm_genpd_default_save_state - Default "save device state" for PM domains.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_save_state(struct device *dev)
-{
- int (*cb)(struct device *__dev);
-
- if (dev->type && dev->type->pm)
- cb = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- cb = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- cb = dev->bus->pm->runtime_suspend;
- else
- cb = NULL;
-
- if (!cb && dev->driver && dev->driver->pm)
- cb = dev->driver->pm->runtime_suspend;
-
- return cb ? cb(dev) : 0;
-}
-
-/**
- * pm_genpd_default_restore_state - Default PM domains "restore device state".
- * @dev: Device to handle.
- */
-static int pm_genpd_default_restore_state(struct device *dev)
-{
- int (*cb)(struct device *__dev);
-
- if (dev->type && dev->type->pm)
- cb = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- cb = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- cb = dev->bus->pm->runtime_resume;
- else
- cb = NULL;
-
- if (!cb && dev->driver && dev->driver->pm)
- cb = dev->driver->pm->runtime_resume;
-
- return cb ? cb(dev) : 0;
-}
-
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -1498,8 +1473,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->device_count = 0;
genpd->max_off_time_ns = -1;
genpd->max_off_time_changed = true;
- genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
- genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+ genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
+ genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend;
genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
@@ -1520,8 +1495,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.restore_early = pm_genpd_resume_early;
genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
- genpd->dev_ops.save_state = pm_genpd_default_save_state;
- genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
if (genpd->flags & GENPD_FLAG_PM_CLK) {
genpd->dev_ops.stop = pm_clk_suspend;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 00a5436dd..2e0fce711 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -37,10 +37,10 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
}
/**
- * default_stop_ok - Default PM domain governor routine for stopping devices.
+ * default_suspend_ok - Default PM domain governor routine to suspend devices.
* @dev: Device to check.
*/
-static bool default_stop_ok(struct device *dev)
+static bool default_suspend_ok(struct device *dev)
{
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
unsigned long flags;
@@ -51,13 +51,13 @@ static bool default_stop_ok(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
if (!td->constraint_changed) {
- bool ret = td->cached_stop_ok;
+ bool ret = td->cached_suspend_ok;
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
}
td->constraint_changed = false;
- td->cached_stop_ok = false;
+ td->cached_suspend_ok = false;
td->effective_constraint_ns = -1;
constraint_ns = __dev_pm_qos_read_value(dev);
@@ -83,13 +83,13 @@ static bool default_stop_ok(struct device *dev)
return false;
}
td->effective_constraint_ns = constraint_ns;
- td->cached_stop_ok = constraint_ns >= 0;
+ td->cached_suspend_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
- * their stop latencies into account here.
+ * their suspend latencies into account here.
*/
- return td->cached_stop_ok;
+ return td->cached_suspend_ok;
}
/**
@@ -150,7 +150,7 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
*/
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
- /* default_stop_ok() need not be called before us. */
+ /* default_suspend_ok() need not be called before us. */
if (constraint_ns < 0) {
constraint_ns = dev_pm_qos_read_value(pdd->dev);
constraint_ns *= NSEC_PER_USEC;
@@ -227,7 +227,7 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
}
struct dev_power_governor simple_qos_governor = {
- .stop_ok = default_stop_ok,
+ .suspend_ok = default_suspend_ok,
.power_down_ok = default_power_down_ok,
};
@@ -236,5 +236,5 @@ struct dev_power_governor simple_qos_governor = {
*/
struct dev_power_governor pm_domain_always_on_gov = {
.power_down_ok = always_on_power_down_ok,
- .stop_ok = default_stop_ok,
+ .suspend_ok = default_suspend_ok,
};
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 27aea9603..e44944f4b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1557,7 +1557,6 @@ int dpm_suspend(pm_message_t state)
static int device_prepare(struct device *dev, pm_message_t state)
{
int (*callback)(struct device *) = NULL;
- char *info = NULL;
int ret = 0;
if (dev->power.syscore)
@@ -1580,24 +1579,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
goto unlock;
}
- if (dev->pm_domain) {
- info = "preparing power domain ";
+ if (dev->pm_domain)
callback = dev->pm_domain->ops.prepare;
- } else if (dev->type && dev->type->pm) {
- info = "preparing type ";
+ else if (dev->type && dev->type->pm)
callback = dev->type->pm->prepare;
- } else if (dev->class && dev->class->pm) {
- info = "preparing class ";
+ else if (dev->class && dev->class->pm)
callback = dev->class->pm->prepare;
- } else if (dev->bus && dev->bus->pm) {
- info = "preparing bus ";
+ else if (dev->bus && dev->bus->pm)
callback = dev->bus->pm->prepare;
- }
- if (!callback && dev->driver && dev->driver->pm) {
- info = "preparing driver ";
+ if (!callback && dev->driver && dev->driver->pm)
callback = dev->driver->pm->prepare;
- }
if (callback)
ret = callback(dev);
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
index 19837ef04..e70ceb406 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/base/power/opp/Makefile
@@ -1,3 +1,4 @@
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o
+obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index d8f4cc228..7c04c8773 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/of.h>
#include <linux/export.h>
#include <linux/regulator/consumer.h>
@@ -29,7 +28,7 @@
* from here, with each opp_table containing the list of opps it supports in
* various states of availability.
*/
-static LIST_HEAD(opp_tables);
+LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
@@ -53,26 +52,6 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
return NULL;
}
-static struct opp_table *_managed_opp(const struct device_node *np)
-{
- struct opp_table *opp_table;
-
- list_for_each_entry_rcu(opp_table, &opp_tables, node) {
- if (opp_table->np == np) {
- /*
- * Multiple devices can point to the same OPP table and
- * so will have same node-pointer, np.
- *
- * But the OPPs will be considered as shared only if the
- * OPP table contains a "opp-shared" property.
- */
- return opp_table->shared_opp ? opp_table : NULL;
- }
- }
-
- return NULL;
-}
-
/**
* _find_opp_table() - find opp_table struct using device pointer
* @dev: device pointer used to lookup OPP table
@@ -757,7 +736,6 @@ static struct opp_table *_add_opp_table(struct device *dev)
{
struct opp_table *opp_table;
struct opp_device *opp_dev;
- struct device_node *np;
int ret;
/* Check for existing table for 'dev' first */
@@ -781,20 +759,7 @@ static struct opp_table *_add_opp_table(struct device *dev)
return NULL;
}
- /*
- * Only required for backward compatibility with v1 bindings, but isn't
- * harmful for other cases. And so we do it unconditionally.
- */
- np = of_node_get(dev->of_node);
- if (np) {
- u32 val;
-
- if (!of_property_read_u32(np, "clock-latency", &val))
- opp_table->clock_latency_ns_max = val;
- of_property_read_u32(np, "voltage-tolerance",
- &opp_table->voltage_tolerance_v1);
- of_node_put(np);
- }
+ _of_init_opp_table(opp_table, dev);
/* Set regulator to a non-NULL error value */
opp_table->regulator = ERR_PTR(-ENXIO);
@@ -890,8 +855,8 @@ static void _kfree_opp_rcu(struct rcu_head *head)
* It is assumed that the caller holds required mutex for an RCU updater
* strategy.
*/
-static void _opp_remove(struct opp_table *opp_table,
- struct dev_pm_opp *opp, bool notify)
+void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
+ bool notify)
{
/*
* Notify the changes in the availability of the operable
@@ -952,8 +917,8 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
-static struct dev_pm_opp *_allocate_opp(struct device *dev,
- struct opp_table **opp_table)
+struct dev_pm_opp *_allocate_opp(struct device *dev,
+ struct opp_table **opp_table)
{
struct dev_pm_opp *opp;
@@ -989,8 +954,8 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
return true;
}
-static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
- struct opp_table *opp_table)
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
+ struct opp_table *opp_table)
{
struct dev_pm_opp *opp;
struct list_head *head = &opp_table->opp_list;
@@ -1066,8 +1031,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* Duplicate OPPs (both freq and volt are same) and !opp->available
* -ENOMEM Memory allocation failure
*/
-static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
- bool dynamic)
+int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
+ bool dynamic)
{
struct opp_table *opp_table;
struct dev_pm_opp *new_opp;
@@ -1112,83 +1077,6 @@ unlock:
return ret;
}
-/* TODO: Support multiple regulators */
-static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
- struct opp_table *opp_table)
-{
- u32 microvolt[3] = {0};
- u32 val;
- int count, ret;
- struct property *prop = NULL;
- char name[NAME_MAX];
-
- /* Search for "opp-microvolt-<name>" */
- if (opp_table->prop_name) {
- snprintf(name, sizeof(name), "opp-microvolt-%s",
- opp_table->prop_name);
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (!prop) {
- /* Search for "opp-microvolt" */
- sprintf(name, "opp-microvolt");
- prop = of_find_property(opp->np, name, NULL);
-
- /* Missing property isn't a problem, but an invalid entry is */
- if (!prop)
- return 0;
- }
-
- count = of_property_count_u32_elems(opp->np, name);
- if (count < 0) {
- dev_err(dev, "%s: Invalid %s property (%d)\n",
- __func__, name, count);
- return count;
- }
-
- /* There can be one or three elements here */
- if (count != 1 && count != 3) {
- dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
- __func__, name, count);
- return -EINVAL;
- }
-
- ret = of_property_read_u32_array(opp->np, name, microvolt, count);
- if (ret) {
- dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
- return -EINVAL;
- }
-
- opp->u_volt = microvolt[0];
-
- if (count == 1) {
- opp->u_volt_min = opp->u_volt;
- opp->u_volt_max = opp->u_volt;
- } else {
- opp->u_volt_min = microvolt[1];
- opp->u_volt_max = microvolt[2];
- }
-
- /* Search for "opp-microamp-<name>" */
- prop = NULL;
- if (opp_table->prop_name) {
- snprintf(name, sizeof(name), "opp-microamp-%s",
- opp_table->prop_name);
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (!prop) {
- /* Search for "opp-microamp" */
- sprintf(name, "opp-microamp");
- prop = of_find_property(opp->np, name, NULL);
- }
-
- if (prop && !of_property_read_u32(opp->np, name, &val))
- opp->u_amp = val;
-
- return 0;
-}
-
/**
* dev_pm_opp_set_supported_hw() - Set supported platforms
* @dev: Device for which supported-hw has to be set.
@@ -1517,144 +1405,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
-static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
- struct device_node *np)
-{
- unsigned int count = opp_table->supported_hw_count;
- u32 version;
- int ret;
-
- if (!opp_table->supported_hw)
- return true;
-
- while (count--) {
- ret = of_property_read_u32_index(np, "opp-supported-hw", count,
- &version);
- if (ret) {
- dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
- __func__, count, ret);
- return false;
- }
-
- /* Both of these are bitwise masks of the versions */
- if (!(version & opp_table->supported_hw[count]))
- return false;
- }
-
- return true;
-}
-
-/**
- * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
- * @dev: device for which we do this operation
- * @np: device node
- *
- * This function adds an opp definition to the opp table and returns status. The
- * opp can be controlled using dev_pm_opp_enable/disable functions and may be
- * removed by dev_pm_opp_remove.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -EINVAL Failed parsing the OPP node
- */
-static int _opp_add_static_v2(struct device *dev, struct device_node *np)
-{
- struct opp_table *opp_table;
- struct dev_pm_opp *new_opp;
- u64 rate;
- u32 val;
- int ret;
-
- /* Hold our table modification lock here */
- mutex_lock(&opp_table_lock);
-
- new_opp = _allocate_opp(dev, &opp_table);
- if (!new_opp) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = of_property_read_u64(np, "opp-hz", &rate);
- if (ret < 0) {
- dev_err(dev, "%s: opp-hz not found\n", __func__);
- goto free_opp;
- }
-
- /* Check if the OPP supports hardware's hierarchy of versions or not */
- if (!_opp_is_supported(dev, opp_table, np)) {
- dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
- goto free_opp;
- }
-
- /*
- * Rate is defined as an unsigned long in clk API, and so casting
- * explicitly to its type. Must be fixed once rate is 64 bit
- * guaranteed in clk API.
- */
- new_opp->rate = (unsigned long)rate;
- new_opp->turbo = of_property_read_bool(np, "turbo-mode");
-
- new_opp->np = np;
- new_opp->dynamic = false;
- new_opp->available = true;
-
- if (!of_property_read_u32(np, "clock-latency-ns", &val))
- new_opp->clock_latency_ns = val;
-
- ret = opp_parse_supplies(new_opp, dev, opp_table);
- if (ret)
- goto free_opp;
-
- ret = _opp_add(dev, new_opp, opp_table);
- if (ret)
- goto free_opp;
-
- /* OPP to select on device suspend */
- if (of_property_read_bool(np, "opp-suspend")) {
- if (opp_table->suspend_opp) {
- dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
- __func__, opp_table->suspend_opp->rate,
- new_opp->rate);
- } else {
- new_opp->suspend = true;
- opp_table->suspend_opp = new_opp;
- }
- }
-
- if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
- opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
-
- mutex_unlock(&opp_table_lock);
-
- pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
- __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
- new_opp->u_volt_min, new_opp->u_volt_max,
- new_opp->clock_latency_ns);
-
- /*
- * Notify the changes in the availability of the operable
- * frequency/voltage list.
- */
- srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
- return 0;
-
-free_opp:
- _opp_remove(opp_table, new_opp, false);
-unlock:
- mutex_unlock(&opp_table_lock);
- return ret;
-}
-
/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
@@ -1842,21 +1592,11 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
-#ifdef CONFIG_OF
-/**
- * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
- * entries
- * @dev: device pointer used to lookup OPP table.
- *
- * Free OPPs created using static entries present in DT.
- *
- * Locking: The internal opp_table and opp structures are RCU protected.
- * Hence this function indirectly uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
+/*
+ * Free OPPs either created using static entries present in DT or even the
+ * dynamically added entries based on remove_all param.
*/
-void dev_pm_opp_of_remove_table(struct device *dev)
+void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
{
struct opp_table *opp_table;
struct dev_pm_opp *opp, *tmp;
@@ -1881,7 +1621,7 @@ void dev_pm_opp_of_remove_table(struct device *dev)
if (list_is_singular(&opp_table->dev_list)) {
/* Free static OPPs */
list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
- if (!opp->dynamic)
+ if (remove_all || !opp->dynamic)
_opp_remove(opp_table, opp, true);
}
} else {
@@ -1891,160 +1631,22 @@ void dev_pm_opp_of_remove_table(struct device *dev)
unlock:
mutex_unlock(&opp_table_lock);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
-{
- /*
- * TODO: Support for multiple OPP tables.
- *
- * There should be only ONE phandle present in "operating-points-v2"
- * property.
- */
-
- return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
-}
-
-/* Initializes OPP tables based on new bindings */
-static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
-{
- struct device_node *np;
- struct opp_table *opp_table;
- int ret = 0, count = 0;
-
- mutex_lock(&opp_table_lock);
-
- opp_table = _managed_opp(opp_np);
- if (opp_table) {
- /* OPPs are already managed */
- if (!_add_opp_dev(dev, opp_table))
- ret = -ENOMEM;
- mutex_unlock(&opp_table_lock);
- return ret;
- }
- mutex_unlock(&opp_table_lock);
-
- /* We have opp-table node now, iterate over it and add OPPs */
- for_each_available_child_of_node(opp_np, np) {
- count++;
-
- ret = _opp_add_static_v2(dev, np);
- if (ret) {
- dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
- ret);
- goto free_table;
- }
- }
-
- /* There should be one of more OPP defined */
- if (WARN_ON(!count))
- return -ENOENT;
-
- mutex_lock(&opp_table_lock);
-
- opp_table = _find_opp_table(dev);
- if (WARN_ON(IS_ERR(opp_table))) {
- ret = PTR_ERR(opp_table);
- mutex_unlock(&opp_table_lock);
- goto free_table;
- }
-
- opp_table->np = opp_np;
- opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
-
- mutex_unlock(&opp_table_lock);
-
- return 0;
-
-free_table:
- dev_pm_opp_of_remove_table(dev);
-
- return ret;
-}
-
-/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_add_opp_table_v1(struct device *dev)
-{
- const struct property *prop;
- const __be32 *val;
- int nr;
-
- prop = of_find_property(dev->of_node, "operating-points", NULL);
- if (!prop)
- return -ENODEV;
- if (!prop->value)
- return -ENODATA;
-
- /*
- * Each OPP is a set of tuples consisting of frequency and
- * voltage like <freq-kHz vol-uV>.
- */
- nr = prop->length / sizeof(u32);
- if (nr % 2) {
- dev_err(dev, "%s: Invalid OPP table\n", __func__);
- return -EINVAL;
- }
-
- val = prop->value;
- while (nr) {
- unsigned long freq = be32_to_cpup(val++) * 1000;
- unsigned long volt = be32_to_cpup(val++);
-
- if (_opp_add_v1(dev, freq, volt, false))
- dev_warn(dev, "%s: Failed to add OPP %ld\n",
- __func__, freq);
- nr -= 2;
- }
-
- return 0;
-}
/**
- * dev_pm_opp_of_add_table() - Initialize opp table from device tree
+ * dev_pm_opp_remove_table() - Free all OPPs associated with the device
* @dev: device pointer used to lookup OPP table.
*
- * Register the initial OPP table with the OPP library for given device.
+ * Free both OPPs created using static entries present in DT and the
+ * dynamically added entries.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function indirectly uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
- *
- * Return:
- * 0 On success OR
- * Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST Freq are same and volt are different OR
- * Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM Memory allocation failure
- * -ENODEV when 'operating-points' property is not found or is invalid data
- * in device node.
- * -ENODATA when empty 'operating-points' property is found
- * -EINVAL when invalid entries are found in opp-v2 table
*/
-int dev_pm_opp_of_add_table(struct device *dev)
+void dev_pm_opp_remove_table(struct device *dev)
{
- struct device_node *opp_np;
- int ret;
-
- /*
- * OPPs have two version of bindings now. The older one is deprecated,
- * try for the new binding first.
- */
- opp_np = _of_get_opp_desc_node(dev);
- if (!opp_np) {
- /*
- * Try old-deprecated bindings for backward compatibility with
- * older dtbs.
- */
- return _of_add_opp_table_v1(dev);
- }
-
- ret = _of_add_opp_table_v2(dev, opp_np);
- of_node_put(opp_np);
-
- return ret;
+ _dev_pm_opp_remove_table(dev, true);
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
-#endif
+EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index ba2bdbd93..8c3434bdb 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -18,7 +18,6 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/export.h>
-#include <linux/of.h>
#include <linux/slab.h>
#include "opp.h"
@@ -119,8 +118,66 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
-/* Required only for V1 bindings, as v2 can manage it from DT itself */
-int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
+{
+ struct device *cpu_dev;
+ int cpu;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ if (of)
+ dev_pm_opp_of_remove_table(cpu_dev);
+ else
+ dev_pm_opp_remove_table(cpu_dev);
+ }
+}
+
+/**
+ * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
+ * @cpumask: cpumask for which OPP table needs to be removed
+ *
+ * This removes the OPP tables for CPUs present in the @cpumask.
+ * This should be used to remove all the OPPs entries associated with
+ * the cpus in @cpumask.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
+{
+ _dev_pm_opp_cpumask_remove_table(cpumask, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
+
+/**
+ * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
+ * @cpu_dev: CPU device for which we do this operation
+ * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
+ *
+ * This marks OPP table of the @cpu_dev as shared by the CPUs present in
+ * @cpumask.
+ *
+ * Returns -ENODEV if OPP table isn't already present.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
struct opp_device *opp_dev;
struct opp_table *opp_table;
@@ -131,7 +188,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
opp_table = _find_opp_table(cpu_dev);
if (IS_ERR(opp_table)) {
- ret = -EINVAL;
+ ret = PTR_ERR(opp_table);
goto unlock;
}
@@ -152,6 +209,9 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
__func__, cpu);
continue;
}
+
+ /* Mark opp-table as multiple CPUs are sharing it now */
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
}
unlock:
mutex_unlock(&opp_table_lock);
@@ -160,112 +220,53 @@ unlock:
}
EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
-#ifdef CONFIG_OF
-void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+/**
+ * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
+ * @cpu_dev: CPU device for which we do this operation
+ * @cpumask: cpumask to update with information of sharing CPUs
+ *
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
+ *
+ * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
+ * table's status is access-unknown.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
- struct device *cpu_dev;
- int cpu;
-
- WARN_ON(cpumask_empty(cpumask));
+ struct opp_device *opp_dev;
+ struct opp_table *opp_table;
+ int ret = 0;
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
+ mutex_lock(&opp_table_lock);
- dev_pm_opp_of_remove_table(cpu_dev);
+ opp_table = _find_opp_table(cpu_dev);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ goto unlock;
}
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
-
-int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
-{
- struct device *cpu_dev;
- int cpu, ret = 0;
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- ret = dev_pm_opp_of_add_table(cpu_dev);
- if (ret) {
- pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
- __func__, cpu, ret);
-
- /* Free all other OPPs */
- dev_pm_opp_of_cpumask_remove_table(cpumask);
- break;
- }
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
+ ret = -EINVAL;
+ goto unlock;
}
- return ret;
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
-
-/*
- * Works only for OPP v2 bindings.
- *
- * Returns -ENOENT if operating-points-v2 bindings aren't supported.
- */
-int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
-{
- struct device_node *np, *tmp_np;
- struct device *tcpu_dev;
- int cpu, ret = 0;
+ cpumask_clear(cpumask);
- /* Get OPP descriptor node */
- np = _of_get_opp_desc_node(cpu_dev);
- if (!np) {
- dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
- return -ENOENT;
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
+ list_for_each_entry(opp_dev, &opp_table->dev_list, node)
+ cpumask_set_cpu(opp_dev->dev->id, cpumask);
+ } else {
+ cpumask_set_cpu(cpu_dev->id, cpumask);
}
- cpumask_set_cpu(cpu_dev->id, cpumask);
-
- /* OPPs are shared ? */
- if (!of_property_read_bool(np, "opp-shared"))
- goto put_cpu_node;
-
- for_each_possible_cpu(cpu) {
- if (cpu == cpu_dev->id)
- continue;
-
- tcpu_dev = get_cpu_device(cpu);
- if (!tcpu_dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
- __func__, cpu);
- ret = -ENODEV;
- goto put_cpu_node;
- }
-
- /* Get OPP descriptor node */
- tmp_np = _of_get_opp_desc_node(tcpu_dev);
- if (!tmp_np) {
- dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
- __func__);
- ret = -ENOENT;
- goto put_cpu_node;
- }
-
- /* CPUs are sharing opp node */
- if (np == tmp_np)
- cpumask_set_cpu(cpu, cpumask);
-
- of_node_put(tmp_np);
- }
+unlock:
+ mutex_unlock(&opp_table_lock);
-put_cpu_node:
- of_node_put(np);
return ret;
}
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
-#endif
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
new file mode 100644
index 000000000..1dfd3dd92
--- /dev/null
+++ b/drivers/base/power/opp/of.c
@@ -0,0 +1,597 @@
+/*
+ * Generic OPP OF helpers
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/export.h>
+
+#include "opp.h"
+
+static struct opp_table *_managed_opp(const struct device_node *np)
+{
+ struct opp_table *opp_table;
+
+ list_for_each_entry_rcu(opp_table, &opp_tables, node) {
+ if (opp_table->np == np) {
+ /*
+ * Multiple devices can point to the same OPP table and
+ * so will have same node-pointer, np.
+ *
+ * But the OPPs will be considered as shared only if the
+ * OPP table contains a "opp-shared" property.
+ */
+ if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
+ return opp_table;
+
+ return NULL;
+ }
+ }
+
+ return NULL;
+}
+
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
+{
+ struct device_node *np;
+
+ /*
+ * Only required for backward compatibility with v1 bindings, but isn't
+ * harmful for other cases. And so we do it unconditionally.
+ */
+ np = of_node_get(dev->of_node);
+ if (np) {
+ u32 val;
+
+ if (!of_property_read_u32(np, "clock-latency", &val))
+ opp_table->clock_latency_ns_max = val;
+ of_property_read_u32(np, "voltage-tolerance",
+ &opp_table->voltage_tolerance_v1);
+ of_node_put(np);
+ }
+}
+
+static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
+ struct device_node *np)
+{
+ unsigned int count = opp_table->supported_hw_count;
+ u32 version;
+ int ret;
+
+ if (!opp_table->supported_hw)
+ return true;
+
+ while (count--) {
+ ret = of_property_read_u32_index(np, "opp-supported-hw", count,
+ &version);
+ if (ret) {
+ dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
+ __func__, count, ret);
+ return false;
+ }
+
+ /* Both of these are bitwise masks of the versions */
+ if (!(version & opp_table->supported_hw[count]))
+ return false;
+ }
+
+ return true;
+}
+
+/* TODO: Support multiple regulators */
+static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ struct opp_table *opp_table)
+{
+ u32 microvolt[3] = {0};
+ u32 val;
+ int count, ret;
+ struct property *prop = NULL;
+ char name[NAME_MAX];
+
+ /* Search for "opp-microvolt-<name>" */
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microvolt-%s",
+ opp_table->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (!prop) {
+ /* Search for "opp-microvolt" */
+ sprintf(name, "opp-microvolt");
+ prop = of_find_property(opp->np, name, NULL);
+
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!prop)
+ return 0;
+ }
+
+ count = of_property_count_u32_elems(opp->np, name);
+ if (count < 0) {
+ dev_err(dev, "%s: Invalid %s property (%d)\n",
+ __func__, name, count);
+ return count;
+ }
+
+ /* There can be one or three elements here */
+ if (count != 1 && count != 3) {
+ dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
+ __func__, name, count);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(opp->np, name, microvolt, count);
+ if (ret) {
+ dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
+ return -EINVAL;
+ }
+
+ opp->u_volt = microvolt[0];
+
+ if (count == 1) {
+ opp->u_volt_min = opp->u_volt;
+ opp->u_volt_max = opp->u_volt;
+ } else {
+ opp->u_volt_min = microvolt[1];
+ opp->u_volt_max = microvolt[2];
+ }
+
+ /* Search for "opp-microamp-<name>" */
+ prop = NULL;
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microamp-%s",
+ opp_table->prop_name);
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (!prop) {
+ /* Search for "opp-microamp" */
+ sprintf(name, "opp-microamp");
+ prop = of_find_property(opp->np, name, NULL);
+ }
+
+ if (prop && !of_property_read_u32(opp->np, name, &val))
+ opp->u_amp = val;
+
+ return 0;
+}
+
+/**
+ * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
+ * entries
+ * @dev: device pointer used to lookup OPP table.
+ *
+ * Free OPPs created using static entries present in DT.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_of_remove_table(struct device *dev)
+{
+ _dev_pm_opp_remove_table(dev, false);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *_of_get_opp_desc_node(struct device *dev)
+{
+ /*
+ * TODO: Support for multiple OPP tables.
+ *
+ * There should be only ONE phandle present in "operating-points-v2"
+ * property.
+ */
+
+ return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+}
+
+/**
+ * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
+ * @dev: device for which we do this operation
+ * @np: device node
+ *
+ * This function adds an opp definition to the opp table and returns status. The
+ * opp can be controlled using dev_pm_opp_enable/disable functions and may be
+ * removed by dev_pm_opp_remove.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ * -EINVAL Failed parsing the OPP node
+ */
+static int _opp_add_static_v2(struct device *dev, struct device_node *np)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *new_opp;
+ u64 rate;
+ u32 val;
+ int ret;
+
+ /* Hold our table modification lock here */
+ mutex_lock(&opp_table_lock);
+
+ new_opp = _allocate_opp(dev, &opp_table);
+ if (!new_opp) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = of_property_read_u64(np, "opp-hz", &rate);
+ if (ret < 0) {
+ dev_err(dev, "%s: opp-hz not found\n", __func__);
+ goto free_opp;
+ }
+
+ /* Check if the OPP supports hardware's hierarchy of versions or not */
+ if (!_opp_is_supported(dev, opp_table, np)) {
+ dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
+ goto free_opp;
+ }
+
+ /*
+ * Rate is defined as an unsigned long in clk API, and so casting
+ * explicitly to its type. Must be fixed once rate is 64 bit
+ * guaranteed in clk API.
+ */
+ new_opp->rate = (unsigned long)rate;
+ new_opp->turbo = of_property_read_bool(np, "turbo-mode");
+
+ new_opp->np = np;
+ new_opp->dynamic = false;
+ new_opp->available = true;
+
+ if (!of_property_read_u32(np, "clock-latency-ns", &val))
+ new_opp->clock_latency_ns = val;
+
+ ret = opp_parse_supplies(new_opp, dev, opp_table);
+ if (ret)
+ goto free_opp;
+
+ ret = _opp_add(dev, new_opp, opp_table);
+ if (ret)
+ goto free_opp;
+
+ /* OPP to select on device suspend */
+ if (of_property_read_bool(np, "opp-suspend")) {
+ if (opp_table->suspend_opp) {
+ dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
+ __func__, opp_table->suspend_opp->rate,
+ new_opp->rate);
+ } else {
+ new_opp->suspend = true;
+ opp_table->suspend_opp = new_opp;
+ }
+ }
+
+ if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
+ opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
+
+ mutex_unlock(&opp_table_lock);
+
+ pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+ __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
+ new_opp->u_volt_min, new_opp->u_volt_max,
+ new_opp->clock_latency_ns);
+
+ /*
+ * Notify the changes in the availability of the operable
+ * frequency/voltage list.
+ */
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
+ return 0;
+
+free_opp:
+ _opp_remove(opp_table, new_opp, false);
+unlock:
+ mutex_unlock(&opp_table_lock);
+ return ret;
+}
+
+/* Initializes OPP tables based on new bindings */
+static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
+{
+ struct device_node *np;
+ struct opp_table *opp_table;
+ int ret = 0, count = 0;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _managed_opp(opp_np);
+ if (opp_table) {
+ /* OPPs are already managed */
+ if (!_add_opp_dev(dev, opp_table))
+ ret = -ENOMEM;
+ mutex_unlock(&opp_table_lock);
+ return ret;
+ }
+ mutex_unlock(&opp_table_lock);
+
+ /* We have opp-table node now, iterate over it and add OPPs */
+ for_each_available_child_of_node(opp_np, np) {
+ count++;
+
+ ret = _opp_add_static_v2(dev, np);
+ if (ret) {
+ dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+ ret);
+ goto free_table;
+ }
+ }
+
+ /* There should be one of more OPP defined */
+ if (WARN_ON(!count))
+ return -ENOENT;
+
+ mutex_lock(&opp_table_lock);
+
+ opp_table = _find_opp_table(dev);
+ if (WARN_ON(IS_ERR(opp_table))) {
+ ret = PTR_ERR(opp_table);
+ mutex_unlock(&opp_table_lock);
+ goto free_table;
+ }
+
+ opp_table->np = opp_np;
+ if (of_property_read_bool(opp_np, "opp-shared"))
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
+ else
+ opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
+
+ mutex_unlock(&opp_table_lock);
+
+ return 0;
+
+free_table:
+ dev_pm_opp_of_remove_table(dev);
+
+ return ret;
+}
+
+/* Initializes OPP tables based on old-deprecated bindings */
+static int _of_add_opp_table_v1(struct device *dev)
+{
+ const struct property *prop;
+ const __be32 *val;
+ int nr;
+
+ prop = of_find_property(dev->of_node, "operating-points", NULL);
+ if (!prop)
+ return -ENODEV;
+ if (!prop->value)
+ return -ENODATA;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ dev_err(dev, "%s: Invalid OPP table\n", __func__);
+ return -EINVAL;
+ }
+
+ val = prop->value;
+ while (nr) {
+ unsigned long freq = be32_to_cpup(val++) * 1000;
+ unsigned long volt = be32_to_cpup(val++);
+
+ if (_opp_add_v1(dev, freq, volt, false))
+ dev_warn(dev, "%s: Failed to add OPP %ld\n",
+ __func__, freq);
+ nr -= 2;
+ }
+
+ return 0;
+}
+
+/**
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
+ * @dev: device pointer used to lookup OPP table.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function indirectly uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0 On success OR
+ * Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST Freq are same and volt are different OR
+ * Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM Memory allocation failure
+ * -ENODEV when 'operating-points' property is not found or is invalid data
+ * in device node.
+ * -ENODATA when empty 'operating-points' property is found
+ * -EINVAL when invalid entries are found in opp-v2 table
+ */
+int dev_pm_opp_of_add_table(struct device *dev)
+{
+ struct device_node *opp_np;
+ int ret;
+
+ /*
+ * OPPs have two version of bindings now. The older one is deprecated,
+ * try for the new binding first.
+ */
+ opp_np = _of_get_opp_desc_node(dev);
+ if (!opp_np) {
+ /*
+ * Try old-deprecated bindings for backward compatibility with
+ * older dtbs.
+ */
+ return _of_add_opp_table_v1(dev);
+ }
+
+ ret = _of_add_opp_table_v2(dev, opp_np);
+ of_node_put(opp_np);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
+
+/* CPU device specific helpers */
+
+/**
+ * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
+ * @cpumask: cpumask for which OPP table needs to be removed
+ *
+ * This removes the OPP tables for CPUs present in the @cpumask.
+ * This should be used only to remove static entries created from DT.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
+{
+ _dev_pm_opp_cpumask_remove_table(cpumask, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
+
+/**
+ * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
+ * @cpumask: cpumask for which OPP table needs to be added.
+ *
+ * This adds the OPP tables for CPUs present in the @cpumask.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
+{
+ struct device *cpu_dev;
+ int cpu, ret = 0;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret) {
+ pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
+
+ /* Free all other OPPs */
+ dev_pm_opp_of_cpumask_remove_table(cpumask);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+/**
+ * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
+ * @cpu_dev using operating-points-v2
+ * bindings.
+ *
+ * @cpu_dev: CPU device for which we do this operation
+ * @cpumask: cpumask to update with information of sharing CPUs
+ *
+ * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
+ *
+ * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
+ *
+ * Locking: The internal opp_table and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
+ struct cpumask *cpumask)
+{
+ struct device_node *np, *tmp_np;
+ struct device *tcpu_dev;
+ int cpu, ret = 0;
+
+ /* Get OPP descriptor node */
+ np = _of_get_opp_desc_node(cpu_dev);
+ if (!np) {
+ dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+ return -ENOENT;
+ }
+
+ cpumask_set_cpu(cpu_dev->id, cpumask);
+
+ /* OPPs are shared ? */
+ if (!of_property_read_bool(np, "opp-shared"))
+ goto put_cpu_node;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ ret = -ENODEV;
+ goto put_cpu_node;
+ }
+
+ /* Get OPP descriptor node */
+ tmp_np = _of_get_opp_desc_node(tcpu_dev);
+ if (!tmp_np) {
+ dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+ __func__);
+ ret = -ENOENT;
+ goto put_cpu_node;
+ }
+
+ /* CPUs are sharing opp node */
+ if (np == tmp_np)
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(tmp_np);
+ }
+
+put_cpu_node:
+ of_node_put(np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index f67f806fc..fabd5ca1a 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -28,6 +28,8 @@ struct regulator;
/* Lock to allow exclusive modification to the device and opp lists */
extern struct mutex opp_table_lock;
+extern struct list_head opp_tables;
+
/*
* Internal data structure organization with the OPP layer library is as
* follows:
@@ -117,6 +119,12 @@ struct opp_device {
#endif
};
+enum opp_table_access {
+ OPP_TABLE_ACCESS_UNKNOWN = 0,
+ OPP_TABLE_ACCESS_EXCLUSIVE = 1,
+ OPP_TABLE_ACCESS_SHARED = 2,
+};
+
/**
* struct opp_table - Device opp structure
* @node: table node - contains the devices with OPPs that
@@ -164,7 +172,7 @@ struct opp_table {
/* For backward compatibility with v1 bindings */
unsigned int voltage_tolerance_v1;
- bool shared_opp;
+ enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
unsigned int *supported_hw;
@@ -183,6 +191,18 @@ struct opp_table {
struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct device_node *_of_get_opp_desc_node(struct device *dev);
+void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
+struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
+int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
+void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify);
+int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic);
+void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
+
+#ifdef CONFIG_OF
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
+#else
+static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
+#endif
#ifdef CONFIG_DEBUG_FS
void opp_debug_remove_one(struct dev_pm_opp *opp);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 7f692accd..f38c21de2 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -19,6 +19,11 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
+struct property_set {
+ struct fwnode_handle fwnode;
+ struct property_entry *properties;
+};
+
static inline bool is_pset_node(struct fwnode_handle *fwnode)
{
return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
@@ -801,14 +806,14 @@ static struct property_set *pset_copy_set(const struct property_set *pset)
}
/**
- * device_remove_property_set - Remove properties from a device object.
+ * device_remove_properties - Remove properties from a device object.
* @dev: Device whose properties to remove.
*
* The function removes properties previously associated to the device
- * secondary firmware node with device_add_property_set(). Memory allocated
+ * secondary firmware node with device_add_properties(). Memory allocated
* to the properties will also be released.
*/
-void device_remove_property_set(struct device *dev)
+void device_remove_properties(struct device *dev)
{
struct fwnode_handle *fwnode;
@@ -831,24 +836,27 @@ void device_remove_property_set(struct device *dev)
}
}
}
-EXPORT_SYMBOL_GPL(device_remove_property_set);
+EXPORT_SYMBOL_GPL(device_remove_properties);
/**
- * device_add_property_set - Add a collection of properties to a device object.
+ * device_add_properties - Add a collection of properties to a device object.
* @dev: Device to add properties to.
- * @pset: Collection of properties to add.
+ * @properties: Collection of properties to add.
*
- * Associate a collection of device properties represented by @pset with @dev
- * as its secondary firmware node. The function takes a copy of @pset.
+ * Associate a collection of device properties represented by @properties with
+ * @dev as its secondary firmware node. The function takes a copy of
+ * @properties.
*/
-int device_add_property_set(struct device *dev, const struct property_set *pset)
+int device_add_properties(struct device *dev, struct property_entry *properties)
{
- struct property_set *p;
+ struct property_set *p, pset;
- if (!pset)
+ if (!properties)
return -EINVAL;
- p = pset_copy_set(pset);
+ pset.properties = properties;
+
+ p = pset_copy_set(&pset);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -856,7 +864,7 @@ int device_add_property_set(struct device *dev, const struct property_set *pset)
set_secondary_fwnode(dev, &p->fwnode);
return 0;
}
-EXPORT_SYMBOL_GPL(device_add_property_set);
+EXPORT_SYMBOL_GPL(device_add_properties);
/**
* device_get_next_child_node - Return the next child node handle for a device
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index 3ee72550b..4d2e50bfc 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -27,7 +27,7 @@ static int regcache_flat_init(struct regmap *map)
int i;
unsigned int *cache;
- if (!map || map->reg_stride_order < 0)
+ if (!map || map->reg_stride_order < 0 || !map->max_register)
return -EINVAL;
map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 4170b7d95..df7ff7290 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -529,7 +529,7 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
* regcache_cache_bypass: Put a register map into cache bypass mode
*
* @map: map to configure
- * @cache_bypass: flag if changes should not be written to the hardware
+ * @cache_bypass: flag if changes should not be written to the cache
*
* When a register map is marked with the cache bypass option, writes
* to the register map API will only update the hardware and not the
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index eda09090c..f642c4264 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -8,8 +8,6 @@
#include <linux/bcma/bcma.h>
#include <linux/delay.h>
-#define BCMA_CORE_SIZE 0x1000
-
#define bcma_err(bus, fmt, ...) \
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 04d706ca5..35b13a08c 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -146,7 +146,6 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
return -ENOTSUPP;
}
- sflash->window = BCMA_SOC_FLASH2;
sflash->blocksize = e->blocksize;
sflash->numblocks = e->numblocks;
sflash->size = sflash->blocksize * sflash->numblocks;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 437b3a822..ab19adb07 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -861,7 +861,7 @@ rqbiocnt(struct request *r)
* discussion.
*
* We cannot use get_page in the workaround, because it insists on a
- * positive page count as a precondition. So we use _count directly.
+ * positive page count as a precondition. So we use _refcount directly.
*/
static void
bio_pageinc(struct bio *bio)
@@ -1750,7 +1750,7 @@ aoecmd_init(void)
int ret;
/* get_zeroed_page returns page with ref count 1 */
- p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+ p = (void *) get_zeroed_page(GFP_KERNEL);
if (!p)
return -ENOMEM;
empty_page = virt_to_page(p);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 51a071e32..c04bd9bc3 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
#ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index fa209773d..2ba1494b2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2761,7 +2761,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
q->backing_dev_info.congested_data = device;
blk_queue_make_request(q, drbd_make_request);
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(q, true, true);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1fd1dcceb..0bac9c824 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -3633,14 +3633,15 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
goto nla_put_failure;
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
nla_put_u32(skb, T_current_state, device->state.i) ||
- nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
- nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
- nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
- nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
- nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
- nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
- nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
- nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
+ nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
+ nla_put_u64_0pad(skb, T_capacity,
+ drbd_get_capacity(device->this_bdev)) ||
+ nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
+ nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
+ nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
+ nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
+ nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
+ nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
@@ -3657,13 +3658,16 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
goto nla_put_failure;
if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
- nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
- nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
+ nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
+ nla_put_u64_0pad(skb, T_bits_oos,
+ drbd_bm_total_weight(device)))
goto nla_put_failure;
if (C_SYNC_SOURCE <= device->state.conn &&
C_PAUSED_SYNC_T >= device->state.conn) {
- if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
- nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
+ if (nla_put_u64_0pad(skb, T_bits_rs_total,
+ device->rs_total) ||
+ nla_put_u64_0pad(skb, T_bits_rs_failed,
+ device->rs_failed))
goto nla_put_failure;
}
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ba9e4a722..7339e65f6 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -961,7 +961,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
- blk_queue_flush(lo->lo_queue, REQ_FLUSH);
+ blk_queue_write_cache(lo->lo_queue, true, false);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 25824c169..6053e4659 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3000,14 +3000,14 @@ restart_eh:
"Completion workers still active!");
spin_lock(dd->queue->queue_lock);
- blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ blk_mq_tagset_busy_iter(&dd->tags,
mtip_queue_cmd, dd);
spin_unlock(dd->queue->queue_lock);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
if (mtip_device_reset(dd))
- blk_mq_all_tag_busy_iter(*dd->tags.tags,
+ blk_mq_tagset_busy_iter(&dd->tags,
mtip_abort_cmd, dd);
clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
@@ -4023,12 +4023,6 @@ skip_create_disk:
blk_queue_io_min(dd->queue, 4096);
blk_queue_bounce_limit(dd->queue, dd->pdev->dma_mask);
- /*
- * write back cache is not supported in the device. FUA depends on
- * write back cache support, hence setting flush support to zero.
- */
- blk_queue_flush(dd->queue, 0);
-
/* Signal trim support */
if (dd->trim_supp == true) {
set_bit(QUEUE_FLAG_DISCARD, &dd->queue->queue_flags);
@@ -4174,7 +4168,7 @@ static int mtip_block_remove(struct driver_data *dd)
blk_mq_freeze_queue_start(dd->queue);
blk_mq_stop_hw_queues(dd->queue);
- blk_mq_all_tag_busy_iter(dd->tags.tags[0], mtip_no_dev_cleanup, dd);
+ blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
/*
* Delete our gendisk structure. This also removes the device
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 08afbc7a2..6a48ed419 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -693,9 +693,9 @@ static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
if (nbd->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (nbd->flags & NBD_FLAG_SEND_FLUSH)
- blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
+ blk_queue_write_cache(nbd->disk->queue, true, false);
else
- blk_queue_flush(nbd->disk->queue, 0);
+ blk_queue_write_cache(nbd->disk->queue, false, false);
}
static int nbd_dev_dbg_init(struct nbd_device *nbd);
@@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
- debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+ debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
return 0;
}
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 1b709a4e3..c2854a2bf 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -437,7 +437,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
blk_queue_prep_rq(q, blk_queue_start_tag);
- blk_queue_flush(q, REQ_FLUSH);
+ blk_queue_write_cache(q, true, false);
disk->queue = q;
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index c120d70d3..4b7e40583 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -468,7 +468,7 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_logical_block_size(queue, dev->blk_size);
- blk_queue_flush(queue, REQ_FLUSH);
+ blk_queue_write_cache(queue, true, false);
blk_queue_max_segments(queue, -1);
blk_queue_max_segment_size(queue, dev->bounce_size);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0ede6d7e2..81666a564 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -350,12 +350,12 @@ struct rbd_device {
struct rbd_spec *spec;
struct rbd_options *opts;
- char *header_name;
+ struct ceph_object_id header_oid;
+ struct ceph_object_locator header_oloc;
struct ceph_file_layout layout;
- struct ceph_osd_event *watch_event;
- struct rbd_obj_request *watch_request;
+ struct ceph_osd_linger_request *watch_handle;
struct rbd_spec *parent_spec;
u64 parent_overlap;
@@ -1596,12 +1596,6 @@ static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
return __rbd_obj_request_wait(obj_request, 0);
}
-static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
- unsigned long timeout)
-{
- return __rbd_obj_request_wait(obj_request, timeout);
-}
-
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
@@ -1751,12 +1745,6 @@ static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
complete_all(&obj_request->completion);
}
-static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
-{
- dout("%s: obj %p\n", __func__, obj_request);
- obj_request_done_set(obj_request);
-}
-
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = NULL;
@@ -1828,13 +1816,12 @@ static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
obj_request_done_set(obj_request);
}
-static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
- struct ceph_msg *msg)
+static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_request = osd_req->r_priv;
u16 opcode;
- dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
+ dout("%s: osd_req %p\n", __func__, osd_req);
rbd_assert(osd_req == obj_request->osd_req);
if (obj_request_img_data_test(obj_request)) {
rbd_assert(obj_request->img_request);
@@ -1878,10 +1865,6 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
case CEPH_OSD_OP_CALL:
rbd_osd_call_callback(obj_request);
break;
- case CEPH_OSD_OP_NOTIFY_ACK:
- case CEPH_OSD_OP_WATCH:
- rbd_osd_trivial_callback(obj_request);
- break;
default:
rbd_warn(NULL, "%s: unsupported op %hu",
obj_request->object_name, (unsigned short) opcode);
@@ -1896,27 +1879,17 @@ static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
- u64 snap_id;
- rbd_assert(osd_req != NULL);
-
- snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
- ceph_osdc_build_request(osd_req, obj_request->offset,
- NULL, snap_id, NULL);
+ if (img_request)
+ osd_req->r_snapid = img_request->snap_id;
}
static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
- struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
- struct ceph_snap_context *snapc;
- struct timespec mtime = CURRENT_TIME;
- rbd_assert(osd_req != NULL);
-
- snapc = img_request ? img_request->snapc : NULL;
- ceph_osdc_build_request(osd_req, obj_request->offset,
- snapc, CEPH_NOSNAP, &mtime);
+ osd_req->r_mtime = CURRENT_TIME;
+ osd_req->r_data_offset = obj_request->offset;
}
/*
@@ -1954,7 +1927,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
GFP_NOIO);
if (!osd_req)
- return NULL; /* ENOMEM */
+ goto fail;
if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
@@ -1965,9 +1938,18 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_priv = obj_request;
osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
- ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
+ if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
+ obj_request->object_name))
+ goto fail;
+
+ if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
+ goto fail;
return osd_req;
+
+fail:
+ ceph_osdc_put_request(osd_req);
+ return NULL;
}
/*
@@ -2003,16 +1985,25 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
false, GFP_NOIO);
if (!osd_req)
- return NULL; /* ENOMEM */
+ goto fail;
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
- ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
+ if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
+ obj_request->object_name))
+ goto fail;
+
+ if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
+ goto fail;
return osd_req;
+
+fail:
+ ceph_osdc_put_request(osd_req);
+ return NULL;
}
@@ -2973,17 +2964,20 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
+ int ret = 0;
dout("%s: img %p\n", __func__, img_request);
- for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
- int ret;
+ rbd_img_request_get(img_request);
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
ret = rbd_img_obj_request_submit(obj_request);
if (ret)
- return ret;
+ goto out_put_ireq;
}
- return 0;
+out_put_ireq:
+ rbd_img_request_put(img_request);
+ return ret;
}
static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
@@ -3090,45 +3084,18 @@ out_err:
obj_request_done_set(obj_request);
}
-static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
-{
- struct rbd_obj_request *obj_request;
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- int ret;
-
- obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
- OBJ_REQUEST_NODATA);
- if (!obj_request)
- return -ENOMEM;
-
- ret = -ENOMEM;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
- obj_request);
- if (!obj_request->osd_req)
- goto out;
-
- osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
- notify_id, 0, 0);
- rbd_osd_req_format_read(obj_request);
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
+static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out;
- ret = rbd_obj_request_wait(obj_request);
-out:
- rbd_obj_request_put(obj_request);
-
- return ret;
-}
-
-static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
+static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
+ u64 notifier_id, void *data, size_t data_len)
{
- struct rbd_device *rbd_dev = (struct rbd_device *)data;
+ struct rbd_device *rbd_dev = arg;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
- dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
- rbd_dev->header_name, (unsigned long long)notify_id,
- (unsigned int)opcode);
+ dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
+ cookie, notify_id);
/*
* Until adequate refresh error handling is in place, there is
@@ -3140,63 +3107,31 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
if (ret)
rbd_warn(rbd_dev, "refresh failed: %d", ret);
- ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
+ ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, notify_id, cookie,
+ NULL, 0);
if (ret)
rbd_warn(rbd_dev, "notify_ack ret %d", ret);
}
-/*
- * Send a (un)watch request and wait for the ack. Return a request
- * with a ref held on success or error.
- */
-static struct rbd_obj_request *rbd_obj_watch_request_helper(
- struct rbd_device *rbd_dev,
- bool watch)
+static void rbd_watch_errcb(void *arg, u64 cookie, int err)
{
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct ceph_options *opts = osdc->client->options;
- struct rbd_obj_request *obj_request;
+ struct rbd_device *rbd_dev = arg;
int ret;
- obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
- OBJ_REQUEST_NODATA);
- if (!obj_request)
- return ERR_PTR(-ENOMEM);
-
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
- obj_request);
- if (!obj_request->osd_req) {
- ret = -ENOMEM;
- goto out;
- }
-
- osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie, 0, watch);
- rbd_osd_req_format_write(obj_request);
+ rbd_warn(rbd_dev, "encountered watch error: %d", err);
- if (watch)
- ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
-
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out;
+ __rbd_dev_header_unwatch_sync(rbd_dev);
- ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
- if (ret)
- goto out;
-
- ret = obj_request->result;
+ ret = rbd_dev_header_watch_sync(rbd_dev);
if (ret) {
- if (watch)
- rbd_obj_request_end(obj_request);
- goto out;
+ rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
+ return;
}
- return obj_request;
-
-out:
- rbd_obj_request_put(obj_request);
- return ERR_PTR(ret);
+ ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
}
/*
@@ -3205,35 +3140,33 @@ out:
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct rbd_obj_request *obj_request;
- int ret;
+ struct ceph_osd_linger_request *handle;
- rbd_assert(!rbd_dev->watch_event);
- rbd_assert(!rbd_dev->watch_request);
+ rbd_assert(!rbd_dev->watch_handle);
- ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
- &rbd_dev->watch_event);
- if (ret < 0)
- return ret;
+ handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, rbd_watch_cb,
+ rbd_watch_errcb, rbd_dev);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
- if (IS_ERR(obj_request)) {
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
- return PTR_ERR(obj_request);
- }
+ rbd_dev->watch_handle = handle;
+ return 0;
+}
- /*
- * A watch request is set to linger, so the underlying osd
- * request won't go away until we unregister it. We retain
- * a pointer to the object request during that time (in
- * rbd_dev->watch_request), so we'll keep a reference to it.
- * We'll drop that reference after we've unregistered it in
- * rbd_dev_header_unwatch_sync().
- */
- rbd_dev->watch_request = obj_request;
+static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ int ret;
- return 0;
+ if (!rbd_dev->watch_handle)
+ return;
+
+ ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
+ if (ret)
+ rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
+
+ rbd_dev->watch_handle = NULL;
}
/*
@@ -3241,24 +3174,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
*/
static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
{
- struct rbd_obj_request *obj_request;
-
- rbd_assert(rbd_dev->watch_event);
- rbd_assert(rbd_dev->watch_request);
-
- rbd_obj_request_end(rbd_dev->watch_request);
- rbd_obj_request_put(rbd_dev->watch_request);
- rbd_dev->watch_request = NULL;
-
- obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
- if (!IS_ERR(obj_request))
- rbd_obj_request_put(obj_request);
- else
- rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
- PTR_ERR(obj_request));
-
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
+ __rbd_dev_header_unwatch_sync(rbd_dev);
dout("%s flushing notifies\n", __func__);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
@@ -3591,7 +3507,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
if (!ondisk)
return -ENOMEM;
- ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
0, size, ondisk);
if (ret < 0)
goto out;
@@ -4033,6 +3949,8 @@ static void rbd_dev_release(struct device *dev)
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
bool need_put = !!rbd_dev->opts;
+ ceph_oid_destroy(&rbd_dev->header_oid);
+
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
kfree(rbd_dev->opts);
@@ -4063,6 +3981,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
INIT_LIST_HEAD(&rbd_dev->node);
init_rwsem(&rbd_dev->header_rwsem);
+ ceph_oid_init(&rbd_dev->header_oid);
+ ceph_oloc_init(&rbd_dev->header_oloc);
+
rbd_dev->dev.bus = &rbd_bus_type;
rbd_dev->dev.type = &rbd_device_type;
rbd_dev->dev.parent = &rbd_root_dev;
@@ -4111,7 +4032,7 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
__le64 size;
} __attribute__ ((packed)) size_buf = { 0 };
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_size",
&snapid, sizeof (snapid),
&size_buf, sizeof (size_buf));
@@ -4151,7 +4072,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_object_prefix", NULL, 0,
reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4186,7 +4107,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 unsup;
int ret;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_features",
&snapid, sizeof (snapid),
&features_buf, sizeof (features_buf));
@@ -4248,7 +4169,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
snapid = cpu_to_le64(rbd_dev->spec->snap_id);
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_parent",
&snapid, sizeof (snapid),
reply_buf, size);
@@ -4351,7 +4272,7 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
u64 stripe_count;
int ret;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_stripe_unit_count", NULL, 0,
(char *)&striping_info_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4599,7 +4520,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_snapcontext", NULL, 0,
reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4664,7 +4585,7 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
return ERR_PTR(-ENOMEM);
snapid = cpu_to_le64(snap_id);
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_snapshot_name",
&snapid, sizeof (snapid),
reply_buf, size);
@@ -4975,13 +4896,13 @@ static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
again:
ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
if (ret == -ENOENT && tries++ < 1) {
- ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
- &newest_epoch);
+ ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
+ &newest_epoch);
if (ret < 0)
return ret;
if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
- ceph_monc_request_next_osdmap(&rbdc->client->monc);
+ ceph_osdc_maybe_request_map(&rbdc->client->osdc);
(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
newest_epoch,
opts->mount_timeout);
@@ -5260,35 +5181,26 @@ err_out_unlock:
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
struct rbd_spec *spec = rbd_dev->spec;
- size_t size;
+ int ret;
/* Record the header object name for this rbd image. */
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
if (rbd_dev->image_format == 1)
- size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
+ ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
+ spec->image_name, RBD_SUFFIX);
else
- size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
-
- rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
- if (!rbd_dev->header_name)
- return -ENOMEM;
+ ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
+ RBD_HEADER_PREFIX, spec->image_id);
- if (rbd_dev->image_format == 1)
- sprintf(rbd_dev->header_name, "%s%s",
- spec->image_name, RBD_SUFFIX);
- else
- sprintf(rbd_dev->header_name, "%s%s",
- RBD_HEADER_PREFIX, spec->image_id);
- return 0;
+ return ret;
}
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
rbd_dev_unprobe(rbd_dev);
- kfree(rbd_dev->header_name);
- rbd_dev->header_name = NULL;
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
@@ -5327,7 +5239,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
pr_info("image %s/%s does not exist\n",
rbd_dev->spec->pool_name,
rbd_dev->spec->image_name);
- goto out_header_name;
+ goto err_out_format;
}
}
@@ -5373,7 +5285,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
goto err_out_probe;
dout("discovered format %u image, header name is %s\n",
- rbd_dev->image_format, rbd_dev->header_name);
+ rbd_dev->image_format, rbd_dev->header_oid.name);
return 0;
err_out_probe:
@@ -5381,9 +5293,6 @@ err_out_probe:
err_out_watch:
if (!depth)
rbd_dev_header_unwatch_sync(rbd_dev);
-out_header_name:
- kfree(rbd_dev->header_name);
- rbd_dev->header_name = NULL;
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 586f9168f..910e06591 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -133,7 +133,6 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
#define INQ_STD_NBYTES 36
-#define SKD_DISCARD_CDB_LENGTH 24
enum skd_drvr_state {
SKD_DRVR_STATE_LOAD,
@@ -212,7 +211,6 @@ struct skd_request_context {
struct request *req;
u8 flush_cmd;
- u8 discard_page;
u32 timeout_stamp;
u8 sg_data_dir;
@@ -230,7 +228,6 @@ struct skd_request_context {
};
#define SKD_DATA_DIR_HOST_TO_CARD 1
#define SKD_DATA_DIR_CARD_TO_HOST 2
-#define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */
struct skd_special_context {
struct skd_request_context req;
@@ -540,31 +537,6 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
scsi_req->cdb[9] = 0;
}
-static void
-skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
- struct skd_request_context *skreq,
- struct page *page,
- u32 lba, u32 count)
-{
- char *buf;
- unsigned long len;
- struct request *req;
-
- buf = page_address(page);
- len = SKD_DISCARD_CDB_LENGTH;
-
- scsi_req->cdb[0] = UNMAP;
- scsi_req->cdb[8] = len;
-
- put_unaligned_be16(6 + 16, &buf[0]);
- put_unaligned_be16(16, &buf[2]);
- put_unaligned_be64(lba, &buf[8]);
- put_unaligned_be32(count, &buf[16]);
-
- req = skreq->req;
- blk_add_request_payload(req, page, len);
-}
-
static void skd_request_fn_not_online(struct request_queue *q);
static void skd_request_fn(struct request_queue *q)
@@ -575,7 +547,6 @@ static void skd_request_fn(struct request_queue *q)
struct skd_request_context *skreq;
struct request *req = NULL;
struct skd_scsi_request *scsi_req;
- struct page *page;
unsigned long io_flags;
int error;
u32 lba;
@@ -669,7 +640,6 @@ static void skd_request_fn(struct request_queue *q)
skreq->flush_cmd = 0;
skreq->n_sg = 0;
skreq->sg_byte_count = 0;
- skreq->discard_page = 0;
/*
* OK to now dequeue request from q.
@@ -735,18 +705,7 @@ static void skd_request_fn(struct request_queue *q)
else
skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
- if (io_flags & REQ_DISCARD) {
- page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
- if (!page) {
- pr_err("request_fn:Page allocation failed.\n");
- skd_end_request(skdev, skreq, -ENOMEM);
- break;
- }
- skreq->discard_page = 1;
- req->completion_data = page;
- skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
-
- } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
+ if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
skd_prep_zerosize_flush_cdb(scsi_req, skreq);
SKD_ASSERT(skreq->flush_cmd == 1);
@@ -851,16 +810,6 @@ skip_sg:
static void skd_end_request(struct skd_device *skdev,
struct skd_request_context *skreq, int error)
{
- struct request *req = skreq->req;
- unsigned int io_flags = req->cmd_flags;
-
- if ((io_flags & REQ_DISCARD) &&
- (skreq->discard_page == 1)) {
- pr_debug("%s:%s:%d, free the page!",
- skdev->name, __func__, __LINE__);
- __free_page(req->completion_data);
- }
-
if (unlikely(error)) {
struct request *req = skreq->req;
char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
@@ -4412,19 +4361,13 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->queue = q;
q->queuedata = skdev;
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(q, true, true);
blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
/* set sysfs ptimal_io_size to 8K */
blk_queue_io_opt(q, 8192);
- /* DISCARD Flag initialization. */
- q->limits.discard_granularity = 8192;
- q->limits.discard_alignment = 0;
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- q->limits.discard_zeroes_data = 1;
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 28cff0d23..42758b527 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -493,11 +493,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
u8 writeback = virtblk_get_cache_mode(vdev);
struct virtio_blk *vblk = vdev->priv;
- if (writeback)
- blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
- else
- blk_queue_flush(vblk->disk->queue, 0);
-
+ blk_queue_write_cache(vblk->disk->queue, writeback, false);
revalidate_disk(vblk->disk);
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 26aa080e2..3355f1cdd 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -477,7 +477,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
vbd->type |= VDISK_REMOVABLE;
q = bdev_get_queue(bdev);
- if (q && q->flush_flags)
+ if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
vbd->flush_support = true;
if (q && blk_queue_secdiscard(q))
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6405b6557..fcc5b4e0a 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -207,6 +207,9 @@ struct blkfront_info
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
+ /* Save uncomplete reqs and bios for migration. */
+ struct list_head requests;
+ struct bio_list bio_list;
};
static unsigned int nr_minors;
@@ -874,8 +877,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
unsigned long flags;
- struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
+ int qid = hctx->queue_num;
+ struct blkfront_info *info = hctx->queue->queuedata;
+ struct blkfront_ring_info *rinfo = NULL;
+ BUG_ON(info->nr_rings <= qid);
+ rinfo = &info->rinfo[qid];
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
@@ -901,20 +908,9 @@ out_busy:
return BLK_MQ_RQ_QUEUE_BUSY;
}
-static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int index)
-{
- struct blkfront_info *info = (struct blkfront_info *)data;
-
- BUG_ON(info->nr_rings <= index);
- hctx->driver_data = &info->rinfo[index];
- return 0;
-}
-
static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
.map_queue = blk_mq_map_queue,
- .init_hctx = blk_mq_init_hctx,
};
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -950,6 +946,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
return PTR_ERR(rq);
}
+ rq->queuedata = info;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
if (info->feature_discard) {
@@ -998,7 +995,8 @@ static const char *flush_info(unsigned int feature_flush)
static void xlvbd_flush(struct blkfront_info *info)
{
- blk_queue_flush(info->rq, info->feature_flush);
+ blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
+ info->feature_flush & REQ_FUA);
pr_info("blkfront: %s: %s %s %s %s %s\n",
info->gd->disk_name, flush_info(info->feature_flush),
"persistent grants:", info->feature_persistent ?
@@ -2007,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info)
{
unsigned int i, r_index;
struct request *req, *n;
- struct blk_shadow *copy;
int rc;
struct bio *bio, *cloned_bio;
- struct bio_list bio_list, merge_bio;
unsigned int segs, offset;
int pending, size;
struct split_bio *split_bio;
- struct list_head requests;
blkfront_gather_backend_features(info);
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs);
- bio_list_init(&bio_list);
- INIT_LIST_HEAD(&requests);
for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[r_index];
- /* Stage 1: Make a safe copy of the shadow state. */
- copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
- GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
- if (!copy)
- return -ENOMEM;
-
- /* Stage 2: Set up free list. */
- memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
- for (i = 0; i < BLK_RING_SIZE(info); i++)
- rinfo->shadow[i].req.u.rw.id = i+1;
- rinfo->shadow_free = rinfo->ring.req_prod_pvt;
- rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
+ struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
rc = blkfront_setup_indirect(rinfo);
- if (rc) {
- kfree(copy);
+ if (rc)
return rc;
- }
-
- for (i = 0; i < BLK_RING_SIZE(info); i++) {
- /* Not in use? */
- if (!copy[i].request)
- continue;
-
- /*
- * Get the bios in the request so we can re-queue them.
- */
- if (copy[i].request->cmd_flags &
- (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
- /*
- * Flush operations don't contain bios, so
- * we need to requeue the whole request
- */
- list_add(&copy[i].request->queuelist, &requests);
- continue;
- }
- merge_bio.head = copy[i].request->bio;
- merge_bio.tail = copy[i].request->biotail;
- bio_list_merge(&bio_list, &merge_bio);
- copy[i].request->bio = NULL;
- blk_end_request_all(copy[i].request, 0);
- }
-
- kfree(copy);
}
xenbus_switch_state(info->xbdev, XenbusStateConnected);
@@ -2084,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info)
kick_pending_request_queues(rinfo);
}
- list_for_each_entry_safe(req, n, &requests, queuelist) {
+ list_for_each_entry_safe(req, n, &info->requests, queuelist) {
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
@@ -2092,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
}
blk_mq_kick_requeue_list(info->rq);
- while ((bio = bio_list_pop(&bio_list)) != NULL) {
+ while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
if (bio_segments(bio) > segs) {
/*
@@ -2138,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
+ unsigned int i, j;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
+ bio_list_init(&info->bio_list);
+ INIT_LIST_HEAD(&info->requests);
+ for (i = 0; i < info->nr_rings; i++) {
+ struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ struct bio_list merge_bio;
+ struct blk_shadow *shadow = rinfo->shadow;
+
+ for (j = 0; j < BLK_RING_SIZE(info); j++) {
+ /* Not in use? */
+ if (!shadow[j].request)
+ continue;
+
+ /*
+ * Get the bios in the request so we can re-queue them.
+ */
+ if (shadow[j].request->cmd_flags &
+ (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+ /*
+ * Flush operations don't contain bios, so
+ * we need to requeue the whole request
+ */
+ list_add(&shadow[j].request->queuelist, &info->requests);
+ continue;
+ }
+ merge_bio.head = shadow[j].request->bio;
+ merge_bio.tail = shadow[j].request->biotail;
+ bio_list_merge(&info->bio_list, &merge_bio);
+ shadow[j].request->bio = NULL;
+ blk_mq_end_request(shadow[j].request, 0);
+ }
+ }
+
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
err = negotiate_mq(info);
@@ -2148,6 +2132,8 @@ static int blkfront_resume(struct xenbus_device *dev)
return err;
err = talk_to_blkback(dev, info);
+ if (!err)
+ blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
/*
* We have to wait for the backend to switch to
@@ -2484,10 +2470,23 @@ static void blkback_changed(struct xenbus_device *dev,
break;
case XenbusStateConnected:
- if (dev->state != XenbusStateInitialised) {
+ /*
+ * talk_to_blkback sets state to XenbusStateInitialised
+ * and blkfront_connect sets it to XenbusStateConnected
+ * (if connection went OK).
+ *
+ * If the backend (or toolstack) decides to poke at backend
+ * state (and re-trigger the watch by setting the state repeatedly
+ * to XenbusStateConnected (4)) we need to deal with this.
+ * This is allowed as this is used to communicate to the guest
+ * that the size of disk has changed!
+ */
+ if ((dev->state != XenbusStateInitialised) &&
+ (dev->state != XenbusStateConnected)) {
if (talk_to_blkback(dev, info))
break;
}
+
blkfront_connect(info);
break;
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 3ef42e563..b51a816d7 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
+#include <linux/cpu.h>
#include "zcomp.h"
#include "zcomp_lzo.h"
@@ -20,29 +21,6 @@
#include "zcomp_lz4.h"
#endif
-/*
- * single zcomp_strm backend
- */
-struct zcomp_strm_single {
- struct mutex strm_lock;
- struct zcomp_strm *zstrm;
-};
-
-/*
- * multi zcomp_strm backend
- */
-struct zcomp_strm_multi {
- /* protect strm list */
- spinlock_t strm_lock;
- /* max possible number of zstrm streams */
- int max_strm;
- /* number of available zstrm streams */
- int avail_strm;
- /* list of available strms */
- struct list_head idle_strm;
- wait_queue_head_t strm_wait;
-};
-
static struct zcomp_backend *backends[] = {
&zcomp_lzo,
#ifdef CONFIG_ZRAM_LZ4_COMPRESS
@@ -93,188 +71,6 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags)
return zstrm;
}
-/*
- * get idle zcomp_strm or wait until other process release
- * (zcomp_strm_release()) one for us
- */
-static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
-{
- struct zcomp_strm_multi *zs = comp->stream;
- struct zcomp_strm *zstrm;
-
- while (1) {
- spin_lock(&zs->strm_lock);
- if (!list_empty(&zs->idle_strm)) {
- zstrm = list_entry(zs->idle_strm.next,
- struct zcomp_strm, list);
- list_del(&zstrm->list);
- spin_unlock(&zs->strm_lock);
- return zstrm;
- }
- /* zstrm streams limit reached, wait for idle stream */
- if (zs->avail_strm >= zs->max_strm) {
- spin_unlock(&zs->strm_lock);
- wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
- continue;
- }
- /* allocate new zstrm stream */
- zs->avail_strm++;
- spin_unlock(&zs->strm_lock);
- /*
- * This function can be called in swapout/fs write path
- * so we can't use GFP_FS|IO. And it assumes we already
- * have at least one stream in zram initialization so we
- * don't do best effort to allocate more stream in here.
- * A default stream will work well without further multiple
- * streams. That's why we use NORETRY | NOWARN.
- */
- zstrm = zcomp_strm_alloc(comp, GFP_NOIO | __GFP_NORETRY |
- __GFP_NOWARN);
- if (!zstrm) {
- spin_lock(&zs->strm_lock);
- zs->avail_strm--;
- spin_unlock(&zs->strm_lock);
- wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
- continue;
- }
- break;
- }
- return zstrm;
-}
-
-/* add stream back to idle list and wake up waiter or free the stream */
-static void zcomp_strm_multi_release(struct zcomp *comp, struct zcomp_strm *zstrm)
-{
- struct zcomp_strm_multi *zs = comp->stream;
-
- spin_lock(&zs->strm_lock);
- if (zs->avail_strm <= zs->max_strm) {
- list_add(&zstrm->list, &zs->idle_strm);
- spin_unlock(&zs->strm_lock);
- wake_up(&zs->strm_wait);
- return;
- }
-
- zs->avail_strm--;
- spin_unlock(&zs->strm_lock);
- zcomp_strm_free(comp, zstrm);
-}
-
-/* change max_strm limit */
-static bool zcomp_strm_multi_set_max_streams(struct zcomp *comp, int num_strm)
-{
- struct zcomp_strm_multi *zs = comp->stream;
- struct zcomp_strm *zstrm;
-
- spin_lock(&zs->strm_lock);
- zs->max_strm = num_strm;
- /*
- * if user has lowered the limit and there are idle streams,
- * immediately free as much streams (and memory) as we can.
- */
- while (zs->avail_strm > num_strm && !list_empty(&zs->idle_strm)) {
- zstrm = list_entry(zs->idle_strm.next,
- struct zcomp_strm, list);
- list_del(&zstrm->list);
- zcomp_strm_free(comp, zstrm);
- zs->avail_strm--;
- }
- spin_unlock(&zs->strm_lock);
- return true;
-}
-
-static void zcomp_strm_multi_destroy(struct zcomp *comp)
-{
- struct zcomp_strm_multi *zs = comp->stream;
- struct zcomp_strm *zstrm;
-
- while (!list_empty(&zs->idle_strm)) {
- zstrm = list_entry(zs->idle_strm.next,
- struct zcomp_strm, list);
- list_del(&zstrm->list);
- zcomp_strm_free(comp, zstrm);
- }
- kfree(zs);
-}
-
-static int zcomp_strm_multi_create(struct zcomp *comp, int max_strm)
-{
- struct zcomp_strm *zstrm;
- struct zcomp_strm_multi *zs;
-
- comp->destroy = zcomp_strm_multi_destroy;
- comp->strm_find = zcomp_strm_multi_find;
- comp->strm_release = zcomp_strm_multi_release;
- comp->set_max_streams = zcomp_strm_multi_set_max_streams;
- zs = kmalloc(sizeof(struct zcomp_strm_multi), GFP_KERNEL);
- if (!zs)
- return -ENOMEM;
-
- comp->stream = zs;
- spin_lock_init(&zs->strm_lock);
- INIT_LIST_HEAD(&zs->idle_strm);
- init_waitqueue_head(&zs->strm_wait);
- zs->max_strm = max_strm;
- zs->avail_strm = 1;
-
- zstrm = zcomp_strm_alloc(comp, GFP_KERNEL);
- if (!zstrm) {
- kfree(zs);
- return -ENOMEM;
- }
- list_add(&zstrm->list, &zs->idle_strm);
- return 0;
-}
-
-static struct zcomp_strm *zcomp_strm_single_find(struct zcomp *comp)
-{
- struct zcomp_strm_single *zs = comp->stream;
- mutex_lock(&zs->strm_lock);
- return zs->zstrm;
-}
-
-static void zcomp_strm_single_release(struct zcomp *comp,
- struct zcomp_strm *zstrm)
-{
- struct zcomp_strm_single *zs = comp->stream;
- mutex_unlock(&zs->strm_lock);
-}
-
-static bool zcomp_strm_single_set_max_streams(struct zcomp *comp, int num_strm)
-{
- /* zcomp_strm_single support only max_comp_streams == 1 */
- return false;
-}
-
-static void zcomp_strm_single_destroy(struct zcomp *comp)
-{
- struct zcomp_strm_single *zs = comp->stream;
- zcomp_strm_free(comp, zs->zstrm);
- kfree(zs);
-}
-
-static int zcomp_strm_single_create(struct zcomp *comp)
-{
- struct zcomp_strm_single *zs;
-
- comp->destroy = zcomp_strm_single_destroy;
- comp->strm_find = zcomp_strm_single_find;
- comp->strm_release = zcomp_strm_single_release;
- comp->set_max_streams = zcomp_strm_single_set_max_streams;
- zs = kmalloc(sizeof(struct zcomp_strm_single), GFP_KERNEL);
- if (!zs)
- return -ENOMEM;
-
- comp->stream = zs;
- mutex_init(&zs->strm_lock);
- zs->zstrm = zcomp_strm_alloc(comp, GFP_KERNEL);
- if (!zs->zstrm) {
- kfree(zs);
- return -ENOMEM;
- }
- return 0;
-}
-
/* show available compressors */
ssize_t zcomp_available_show(const char *comp, char *buf)
{
@@ -299,19 +95,14 @@ bool zcomp_available_algorithm(const char *comp)
return find_backend(comp) != NULL;
}
-bool zcomp_set_max_streams(struct zcomp *comp, int num_strm)
-{
- return comp->set_max_streams(comp, num_strm);
-}
-
struct zcomp_strm *zcomp_strm_find(struct zcomp *comp)
{
- return comp->strm_find(comp);
+ return *get_cpu_ptr(comp->stream);
}
void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm)
{
- comp->strm_release(comp, zstrm);
+ put_cpu_ptr(comp->stream);
}
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
@@ -327,9 +118,83 @@ int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
return comp->backend->decompress(src, src_len, dst);
}
+static int __zcomp_cpu_notifier(struct zcomp *comp,
+ unsigned long action, unsigned long cpu)
+{
+ struct zcomp_strm *zstrm;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
+ break;
+ zstrm = zcomp_strm_alloc(comp, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(zstrm)) {
+ pr_err("Can't allocate a compression stream\n");
+ return NOTIFY_BAD;
+ }
+ *per_cpu_ptr(comp->stream, cpu) = zstrm;
+ break;
+ case CPU_DEAD:
+ case CPU_UP_CANCELED:
+ zstrm = *per_cpu_ptr(comp->stream, cpu);
+ if (!IS_ERR_OR_NULL(zstrm))
+ zcomp_strm_free(comp, zstrm);
+ *per_cpu_ptr(comp->stream, cpu) = NULL;
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static int zcomp_cpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *pcpu)
+{
+ unsigned long cpu = (unsigned long)pcpu;
+ struct zcomp *comp = container_of(nb, typeof(*comp), notifier);
+
+ return __zcomp_cpu_notifier(comp, action, cpu);
+}
+
+static int zcomp_init(struct zcomp *comp)
+{
+ unsigned long cpu;
+ int ret;
+
+ comp->notifier.notifier_call = zcomp_cpu_notifier;
+
+ comp->stream = alloc_percpu(struct zcomp_strm *);
+ if (!comp->stream)
+ return -ENOMEM;
+
+ cpu_notifier_register_begin();
+ for_each_online_cpu(cpu) {
+ ret = __zcomp_cpu_notifier(comp, CPU_UP_PREPARE, cpu);
+ if (ret == NOTIFY_BAD)
+ goto cleanup;
+ }
+ __register_cpu_notifier(&comp->notifier);
+ cpu_notifier_register_done();
+ return 0;
+
+cleanup:
+ for_each_online_cpu(cpu)
+ __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
+ cpu_notifier_register_done();
+ return -ENOMEM;
+}
+
void zcomp_destroy(struct zcomp *comp)
{
- comp->destroy(comp);
+ unsigned long cpu;
+
+ cpu_notifier_register_begin();
+ for_each_online_cpu(cpu)
+ __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
+ __unregister_cpu_notifier(&comp->notifier);
+ cpu_notifier_register_done();
+
+ free_percpu(comp->stream);
kfree(comp);
}
@@ -339,9 +204,9 @@ void zcomp_destroy(struct zcomp *comp)
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
* case of allocation error, or any other error potentially
- * returned by functions zcomp_strm_{multi,single}_create.
+ * returned by zcomp_init().
*/
-struct zcomp *zcomp_create(const char *compress, int max_strm)
+struct zcomp *zcomp_create(const char *compress)
{
struct zcomp *comp;
struct zcomp_backend *backend;
@@ -356,10 +221,7 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
return ERR_PTR(-ENOMEM);
comp->backend = backend;
- if (max_strm > 1)
- error = zcomp_strm_multi_create(comp, max_strm);
- else
- error = zcomp_strm_single_create(comp);
+ error = zcomp_init(comp);
if (error) {
kfree(comp);
return ERR_PTR(error);
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index b7d2a4bca..ffd88cb74 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -10,8 +10,6 @@
#ifndef _ZCOMP_H_
#define _ZCOMP_H_
-#include <linux/mutex.h>
-
struct zcomp_strm {
/* compression/decompression buffer */
void *buffer;
@@ -21,8 +19,6 @@ struct zcomp_strm {
* working memory)
*/
void *private;
- /* used in multi stream backend, protected by backend strm_lock */
- struct list_head list;
};
/* static compression backend */
@@ -41,19 +37,15 @@ struct zcomp_backend {
/* dynamic per-device compression frontend */
struct zcomp {
- void *stream;
+ struct zcomp_strm * __percpu *stream;
struct zcomp_backend *backend;
-
- struct zcomp_strm *(*strm_find)(struct zcomp *comp);
- void (*strm_release)(struct zcomp *comp, struct zcomp_strm *zstrm);
- bool (*set_max_streams)(struct zcomp *comp, int num_strm);
- void (*destroy)(struct zcomp *comp);
+ struct notifier_block notifier;
};
ssize_t zcomp_available_show(const char *comp, char *buf);
bool zcomp_available_algorithm(const char *comp);
-struct zcomp *zcomp_create(const char *comp, int max_strm);
+struct zcomp *zcomp_create(const char *comp);
void zcomp_destroy(struct zcomp *comp);
struct zcomp_strm *zcomp_strm_find(struct zcomp *comp);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 370c2f760..8fcad8b76 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -304,46 +304,25 @@ static ssize_t mem_used_max_store(struct device *dev,
return len;
}
+/*
+ * We switched to per-cpu streams and this attr is not needed anymore.
+ * However, we will keep it around for some time, because:
+ * a) we may revert per-cpu streams in the future
+ * b) it's visible to user space and we need to follow our 2 years
+ * retirement rule; but we already have a number of 'soon to be
+ * altered' attrs, so max_comp_streams need to wait for the next
+ * layoff cycle.
+ */
static ssize_t max_comp_streams_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int val;
- struct zram *zram = dev_to_zram(dev);
-
- down_read(&zram->init_lock);
- val = zram->max_comp_streams;
- up_read(&zram->init_lock);
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", num_online_cpus());
}
static ssize_t max_comp_streams_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- int num;
- struct zram *zram = dev_to_zram(dev);
- int ret;
-
- ret = kstrtoint(buf, 0, &num);
- if (ret < 0)
- return ret;
- if (num < 1)
- return -EINVAL;
-
- down_write(&zram->init_lock);
- if (init_done(zram)) {
- if (!zcomp_set_max_streams(zram->comp, num)) {
- pr_info("Cannot change max compression streams\n");
- ret = -EINVAL;
- goto out;
- }
- }
-
- zram->max_comp_streams = num;
- ret = len;
-out:
- up_write(&zram->init_lock);
- return ret;
+ return len;
}
static ssize_t comp_algorithm_show(struct device *dev,
@@ -456,8 +435,26 @@ static ssize_t mm_stat_show(struct device *dev,
return ret;
}
+static ssize_t debug_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int version = 1;
+ struct zram *zram = dev_to_zram(dev);
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ ret = scnprintf(buf, PAGE_SIZE,
+ "version: %d\n%8llu\n",
+ version,
+ (u64)atomic64_read(&zram->stats.writestall));
+ up_read(&zram->init_lock);
+
+ return ret;
+}
+
static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
+static DEVICE_ATTR_RO(debug_stat);
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
ZRAM_ATTR_RO(failed_reads);
@@ -514,7 +511,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
goto out_error;
}
- meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
+ meta->mem_pool = zs_create_pool(pool_name);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
goto out_error;
@@ -650,7 +647,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
{
int ret = 0;
size_t clen;
- unsigned long handle;
+ unsigned long handle = 0;
struct page *page;
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
struct zram_meta *meta = zram->meta;
@@ -673,9 +670,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
- zstrm = zcomp_strm_find(zram->comp);
+compress_again:
user_mem = kmap_atomic(page);
-
if (is_partial_io(bvec)) {
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
bvec->bv_len);
@@ -699,6 +695,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
+ zstrm = zcomp_strm_find(zram->comp);
ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
if (!is_partial_io(bvec)) {
kunmap_atomic(user_mem);
@@ -710,6 +707,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
pr_err("Compression failed! err=%d\n", ret);
goto out;
}
+
src = zstrm->buffer;
if (unlikely(clen > max_zpage_size)) {
clen = PAGE_SIZE;
@@ -717,8 +715,35 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = uncmem;
}
- handle = zs_malloc(meta->mem_pool, clen);
+ /*
+ * handle allocation has 2 paths:
+ * a) fast path is executed with preemption disabled (for
+ * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
+ * since we can't sleep;
+ * b) slow path enables preemption and attempts to allocate
+ * the page with __GFP_DIRECT_RECLAIM bit set. we have to
+ * put per-cpu compression stream and, thus, to re-do
+ * the compression once handle is allocated.
+ *
+ * if we have a 'non-null' handle here then we are coming
+ * from the slow path and handle has already been allocated.
+ */
+ if (!handle)
+ handle = zs_malloc(meta->mem_pool, clen,
+ __GFP_KSWAPD_RECLAIM |
+ __GFP_NOWARN |
+ __GFP_HIGHMEM);
if (!handle) {
+ zcomp_strm_release(zram->comp, zstrm);
+ zstrm = NULL;
+
+ atomic64_inc(&zram->stats.writestall);
+
+ handle = zs_malloc(meta->mem_pool, clen,
+ GFP_NOIO | __GFP_HIGHMEM);
+ if (handle)
+ goto compress_again;
+
pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
index, clen);
ret = -ENOMEM;
@@ -1009,7 +1034,6 @@ static void zram_reset_device(struct zram *zram)
/* Reset stats */
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- zram->max_comp_streams = 1;
set_capacity(zram->disk, 0);
part_stat_set_all(&zram->disk->part0, 0);
@@ -1038,7 +1062,7 @@ static ssize_t disksize_store(struct device *dev,
if (!meta)
return -ENOMEM;
- comp = zcomp_create(zram->compressor, zram->max_comp_streams);
+ comp = zcomp_create(zram->compressor);
if (IS_ERR(comp)) {
pr_err("Cannot initialise %s compressing backend\n",
zram->compressor);
@@ -1177,6 +1201,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_comp_algorithm.attr,
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
+ &dev_attr_debug_stat.attr,
NULL,
};
@@ -1273,7 +1298,6 @@ static int zram_add(void)
}
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram->meta = NULL;
- zram->max_comp_streams = 1;
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 8e9233968..3f5bf66a2 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -85,6 +85,7 @@ struct zram_stats {
atomic64_t zero_pages; /* no. of zero filled pages */
atomic64_t pages_stored; /* no. of pages currently stored */
atomic_long_t max_used_pages; /* no. of maximum pages stored */
+ atomic64_t writestall; /* no. of write slow paths */
};
struct zram_meta {
@@ -102,7 +103,6 @@ struct zram {
* the number of pages zram can consume for storing compressed data
*/
unsigned long limit_pages;
- int max_comp_streams;
struct zram_stats stats;
atomic_t refcount; /* refcount for zram_meta */
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a520fe6f1..1fac1e8c7 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -122,6 +122,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3432) },
{ USB_DEVICE(0x13d3, 0x3472) },
{ USB_DEVICE(0x13d3, 0x3474) },
+ { USB_DEVICE(0x13d3, 0x3487) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -188,6 +189,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
@@ -206,7 +208,8 @@ static int ath3k_load_firmware(struct usb_device *udev,
const struct firmware *firmware)
{
u8 *send_buf;
- int err, pipe, len, size, sent = 0;
+ int len = 0;
+ int err, pipe, size, sent = 0;
int count = firmware->size;
BT_DBG("udev %p", udev);
@@ -302,7 +305,8 @@ static int ath3k_load_fwfile(struct usb_device *udev,
const struct firmware *firmware)
{
u8 *send_buf;
- int err, pipe, len, size, count, sent = 0;
+ int len = 0;
+ int err, pipe, size, count, sent = 0;
int ret;
count = firmware->size;
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 05904732e..f742384b5 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -23,6 +23,17 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <net/bluetooth/bluetooth.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
#define BTM_HEADER_LEN 4
#define BTM_UPLD_SIZE 2312
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index f25a825a6..7ad8d61c0 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -510,34 +510,39 @@ static int btmrvl_download_cal_data(struct btmrvl_private *priv,
static int btmrvl_check_device_tree(struct btmrvl_private *priv)
{
struct device_node *dt_node;
+ struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE];
- int ret;
- u32 val;
+ int ret = 0;
+ u16 gpio, gap;
+
+ if (card->plt_of_node) {
+ dt_node = card->plt_of_node;
+ ret = of_property_read_u16(dt_node, "marvell,wakeup-pin",
+ &gpio);
+ if (ret)
+ gpio = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+
+ ret = of_property_read_u16(dt_node, "marvell,wakeup-gap-ms",
+ &gap);
+ if (ret)
+ gap = (u8)(priv->btmrvl_dev.gpio_gap & 0x00ff);
- for_each_compatible_node(dt_node, NULL, "btmrvl,cfgdata") {
- ret = of_property_read_u32(dt_node, "btmrvl,gpio-gap", &val);
- if (!ret)
- priv->btmrvl_dev.gpio_gap = val;
+ priv->btmrvl_dev.gpio_gap = (gpio << 8) + gap;
- ret = of_property_read_u8_array(dt_node, "btmrvl,cal-data",
+ ret = of_property_read_u8_array(dt_node, "marvell,cal-data",
cal_data + BT_CAL_HDR_LEN,
BT_CAL_DATA_SIZE);
- if (ret) {
- of_node_put(dt_node);
+ if (ret)
return ret;
- }
BT_DBG("Use cal data from device tree");
ret = btmrvl_download_cal_data(priv, cal_data,
BT_CAL_DATA_SIZE);
- if (ret) {
+ if (ret)
BT_ERR("Fail to download calibrate data");
- of_node_put(dt_node);
- return ret;
- }
}
- return 0;
+ return ret;
}
static int btmrvl_setup(struct hci_dev *hdev)
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index d6588947c..e569ff003 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -52,6 +52,68 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"EXTLAST", NULL, 0, 0xFE},
};
+static const struct of_device_id btmrvl_sdio_of_match_table[] = {
+ { .compatible = "marvell,sd8897-bt" },
+ { .compatible = "marvell,sd8997-bt" },
+ { }
+};
+
+static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv)
+{
+ struct btmrvl_plt_wake_cfg *cfg = priv;
+
+ if (cfg->irq_bt >= 0) {
+ pr_info("%s: wake by bt", __func__);
+ cfg->wake_by_bt = true;
+ disable_irq_nosync(irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* This function parses device tree node using mmc subnode devicetree API.
+ * The device node is saved in card->plt_of_node.
+ * If the device tree node exists and includes interrupts attributes, this
+ * function will request platform specific wakeup interrupt.
+ */
+static int btmrvl_sdio_probe_of(struct device *dev,
+ struct btmrvl_sdio_card *card)
+{
+ struct btmrvl_plt_wake_cfg *cfg;
+ int ret;
+
+ if (!dev->of_node ||
+ !of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) {
+ pr_err("sdio platform data not available");
+ return -1;
+ }
+
+ card->plt_of_node = dev->of_node;
+
+ card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg),
+ GFP_KERNEL);
+ cfg = card->plt_wake_cfg;
+ if (cfg && card->plt_of_node) {
+ cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0);
+ if (!cfg->irq_bt) {
+ dev_err(dev, "fail to parse irq_bt from device tree");
+ } else {
+ ret = devm_request_irq(dev, cfg->irq_bt,
+ btmrvl_wake_irq_bt,
+ IRQF_TRIGGER_LOW,
+ "bt_wake", cfg);
+ if (ret) {
+ dev_err(dev,
+ "Failed to request irq_bt %d (%d)\n",
+ cfg->irq_bt, ret);
+ }
+ disable_irq(cfg->irq_bt);
+ }
+ }
+
+ return 0;
+}
+
/* The btmrvl_sdio_remove() callback function is called
* when user removes this module from kernel space or ejects
* the card from the slot. The driver handles these 2 cases
@@ -1464,6 +1526,9 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
btmrvl_sdio_enable_host_int(card);
+ /* Device tree node parsing and platform specific configuration*/
+ btmrvl_sdio_probe_of(&func->dev, card);
+
priv = btmrvl_add_card(card);
if (!priv) {
BT_ERR("Initializing card failed!");
@@ -1544,6 +1609,13 @@ static int btmrvl_sdio_suspend(struct device *dev)
return 0;
}
+ /* Enable platform specific wakeup interrupt */
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+ card->plt_wake_cfg->wake_by_bt = false;
+ enable_irq(card->plt_wake_cfg->irq_bt);
+ enable_irq_wake(card->plt_wake_cfg->irq_bt);
+ }
+
priv = card->priv;
priv->adapter->is_suspending = true;
hcidev = priv->btmrvl_dev.hcidev;
@@ -1606,6 +1678,13 @@ static int btmrvl_sdio_resume(struct device *dev)
BT_DBG("%s: SDIO resume", hcidev->name);
hci_resume_dev(hcidev);
+ /* Disable platform specific wakeup interrupt */
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) {
+ disable_irq_wake(card->plt_wake_cfg->irq_bt);
+ if (!card->plt_wake_cfg->wake_by_bt)
+ disable_irq(card->plt_wake_cfg->irq_bt);
+ }
+
return 0;
}
diff --git a/drivers/bluetooth/btmrvl_sdio.h b/drivers/bluetooth/btmrvl_sdio.h
index 1a3bd064c..3a522d23e 100644
--- a/drivers/bluetooth/btmrvl_sdio.h
+++ b/drivers/bluetooth/btmrvl_sdio.h
@@ -62,6 +62,10 @@
#define FIRMWARE_READY 0xfedc
+struct btmrvl_plt_wake_cfg {
+ int irq_bt;
+ bool wake_by_bt;
+};
struct btmrvl_sdio_card_reg {
u8 cfg;
@@ -97,6 +101,8 @@ struct btmrvl_sdio_card {
u16 sd_blksz_fw_dl;
u8 rx_unit;
struct btmrvl_private *priv;
+ struct device_node *plt_of_node;
+ struct btmrvl_plt_wake_cfg *plt_wake_cfg;
};
struct btmrvl_sdio_device {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 154b9ecc9..bc83c2bb5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -236,6 +236,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -2001,12 +2002,13 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return -EINVAL;
}
- /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
- * supported by this firmware loading method. This check has been
- * put in place to ensure correct forward compatibility options
- * when newer hardware variants come along.
+ /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
+ * and 0x0c (WsP) are supported by this firmware loading method.
+ *
+ * This check has been put in place to ensure correct forward
+ * compatibility options when newer hardware variants come along.
*/
- if (ver.hw_variant != 0x0b) {
+ if (ver.hw_variant != 0x0b && ver.hw_variant != 0x0c) {
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
hdev->name, ver.hw_variant);
return -EINVAL;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 923ec3375..f639417ca 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -825,6 +825,7 @@ static const struct acpi_device_id bcm_acpi_match[] = {
{ "BCM2E64", 0 },
{ "BCM2E65", 0 },
{ "BCM2E67", 0 },
+ { "BCM2E71", 0 },
{ "BCM2E7B", 0 },
{ "BCM2E7C", 0 },
{ },
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 064f2fefa..d7d23ceba 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -102,13 +102,12 @@ static const u16 crc_table[] = {
/* Initialise the crc calculator */
#define BCSP_CRC_INIT(x) x = 0xffff
-/*
- Update crc with next data byte
-
- Implementation note
- The data byte is treated as two nibbles. The crc is generated
- in reverse, i.e., bits are fed into the register from the top.
-*/
+/* Update crc with next data byte
+ *
+ * Implementation note
+ * The data byte is treated as two nibbles. The crc is generated
+ * in reverse, i.e., bits are fed into the register from the top.
+ */
static void bcsp_crc_update(u16 *crc, u8 d)
{
u16 reg = *crc;
@@ -223,9 +222,10 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
}
/* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2
- (because bytes 0xc0 and 0xdb are escaped, worst case is
- when the packet is all made of 0xc0 and 0xdb :) )
- + 2 (0xc0 delimiters at start and end). */
+ * (because bytes 0xc0 and 0xdb are escaped, worst case is
+ * when the packet is all made of 0xc0 and 0xdb :) )
+ * + 2 (0xc0 delimiters at start and end).
+ */
nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
if (!nskb)
@@ -285,7 +285,7 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
struct bcsp_struct *bcsp = hu->priv;
unsigned long flags;
struct sk_buff *skb;
-
+
/* First of all, check for unreliable messages in the queue,
since they have priority */
@@ -305,8 +305,9 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
}
/* Now, try to send a reliable pkt. We can only send a
- reliable packet if the number of packets sent but not yet ack'ed
- is < than the winsize */
+ * reliable packet if the number of packets sent but not yet ack'ed
+ * is < than the winsize
+ */
spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING);
@@ -332,12 +333,14 @@ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
spin_unlock_irqrestore(&bcsp->unack.lock, flags);
/* We could not send a reliable packet, either because there are
- none or because there are too many unack'ed pkts. Did we receive
- any packets we have not acknowledged yet ? */
+ * none or because there are too many unack'ed pkts. Did we receive
+ * any packets we have not acknowledged yet ?
+ */
if (bcsp->txack_req) {
/* if so, craft an empty ACK pkt and send it on BCSP unreliable
- channel 0 */
+ * channel 0
+ */
struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT);
return nskb;
}
@@ -399,8 +402,9 @@ static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
}
/* Handle BCSP link-establishment packets. When we
- detect a "sync" packet, symptom that the BT module has reset,
- we do nothing :) (yet) */
+ * detect a "sync" packet, symptom that the BT module has reset,
+ * we do nothing :) (yet)
+ */
static void bcsp_handle_le_pkt(struct hci_uart *hu)
{
struct bcsp_struct *bcsp = hu->priv;
@@ -462,7 +466,7 @@ static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char
case 0xdd:
memcpy(skb_put(bcsp->rx_skb, 1), &db, 1);
if ((bcsp->rx_skb->data[0] & 0x40) != 0 &&
- bcsp->rx_state != BCSP_W4_CRC)
+ bcsp->rx_state != BCSP_W4_CRC)
bcsp_crc_update(&bcsp->message_crc, 0xdb);
bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
bcsp->rx_count--;
@@ -534,7 +538,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
} else {
BT_ERR("Packet for unknown channel (%u %s)",
bcsp->rx_skb->data[1] & 0x0f,
- bcsp->rx_skb->data[0] & 0x80 ?
+ bcsp->rx_skb->data[0] & 0x80 ?
"reliable" : "unreliable");
kfree_skb(bcsp->rx_skb);
}
@@ -562,7 +566,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
struct bcsp_struct *bcsp = hu->priv;
const unsigned char *ptr;
- BT_DBG("hu %p count %d rx_state %d rx_count %ld",
+ BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
ptr = data;
@@ -591,7 +595,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
continue;
}
if (bcsp->rx_skb->data[0] & 0x80 /* reliable pkt */
- && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
+ && (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
BT_ERR("Out-of-order packet arrived, got %u expected %u",
bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);
@@ -601,7 +605,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
continue;
}
bcsp->rx_state = BCSP_W4_DATA;
- bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) +
+ bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) +
(bcsp->rx_skb->data[2] << 4); /* May be 0 */
continue;
@@ -615,7 +619,7 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
case BCSP_W4_CRC:
if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) {
- BT_ERR ("Checksum failed: computed %04x received %04x",
+ BT_ERR("Checksum failed: computed %04x received %04x",
bitrev16(bcsp->message_crc),
bscp_get_crc(bcsp));
@@ -653,8 +657,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
BCSP_CRC_INIT(bcsp->message_crc);
/* Do not increment ptr or decrement count
- * Allocate packet. Max len of a BCSP pkt=
- * 0xFFF (payload) +4 (header) +2 (crc) */
+ * Allocate packet. Max len of a BCSP pkt=
+ * 0xFFF (payload) +4 (header) +2 (crc)
+ */
bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC);
if (!bcsp->rx_skb) {
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index fefff3498..4124269aa 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -1210,8 +1210,7 @@ static int intel_probe(struct platform_device *pdev)
idev->pdev = pdev;
- idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(idev->reset)) {
dev_err(&pdev->dev, "Unable to retrieve gpio\n");
return PTR_ERR(idev->reset);
@@ -1223,8 +1222,7 @@ static int intel_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n");
- host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake",
- GPIOD_IN);
+ host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN);
if (IS_ERR(host_wake)) {
dev_err(&pdev->dev, "Unable to retrieve IRQ\n");
goto no_irq;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c00168a5b..49b3e1e2d 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -227,7 +227,7 @@ static int hci_uart_flush(struct hci_dev *hdev)
tty_ldisc_flush(tty);
tty_driver_flush_buffer(tty);
- if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
hu->proto->flush(hu);
return 0;
@@ -492,7 +492,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
cancel_work_sync(&hu->write_work);
- if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ if (test_and_clear_bit(HCI_UART_PROTO_READY, &hu->flags)) {
if (hdev) {
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
@@ -500,6 +500,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
}
hu->proto->close(hu);
}
+ clear_bit(HCI_UART_PROTO_SET, &hu->flags);
kfree(hu);
}
@@ -526,7 +527,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
if (tty != hu->tty)
return;
- if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags))
hci_uart_tx_wakeup(hu);
}
@@ -550,7 +551,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
if (!hu || tty != hu->tty)
return;
- if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
return;
/* It does not need a lock here as it is already protected by a mutex in
@@ -638,9 +639,11 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id)
return err;
hu->proto = p;
+ set_bit(HCI_UART_PROTO_READY, &hu->flags);
err = hci_uart_register_dev(hu);
if (err) {
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
p->close(hu);
return err;
}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 4814ff08f..839bad1d8 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -95,6 +95,7 @@ struct hci_uart {
/* HCI_UART proto flag bits */
#define HCI_UART_PROTO_SET 0
#define HCI_UART_REGISTERED 1
+#define HCI_UART_PROTO_READY 2
/* TX states */
#define HCI_UART_SENDING 1
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index d4a3a3133..c5a7de9bc 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -48,7 +48,7 @@ config ARM_CCI5xx_PMU
If unsure, say Y
config ARM_CCN
- bool "ARM CCN driver support"
+ tristate "ARM CCN driver support"
depends on ARM || ARM64
depends on PERF_EVENTS
help
@@ -58,6 +58,7 @@ config ARM_CCN
config BRCMSTB_GISB_ARB
bool "Broadcom STB GISB bus arbiter"
depends on ARM || MIPS
+ default ARCH_BRCMSTB || BMIPS_GENERIC
help
Driver for the Broadcom Set Top Box System-on-a-chip internal bus
arbiter. This driver provides timeout and target abort error handling
@@ -110,7 +111,7 @@ config OMAP_OCP2SCP
config SIMPLE_PM_BUS
bool "Simple Power-Managed Bus Driver"
depends on OF && PM
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Driver for transparent busses that don't need a real driver, but
where the bus controller is part of a PM domain, or under the control
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index 7082c7268..acc3eb542 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -1189,7 +1189,7 @@ static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
perf_pmu_migrate_context(&dt->pmu, cpu, target);
cpumask_set_cpu(target, &dt->cpu);
if (ccn->irq)
- WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0);
+ WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
default:
break;
}
@@ -1278,7 +1278,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
/* Also make sure that the overflow interrupt is handled by this CPU */
if (ccn->irq) {
- err = irq_set_affinity(ccn->irq, &ccn->dt.cpu);
+ err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
if (err) {
dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
goto error_set_affinity;
@@ -1306,7 +1306,8 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
{
int i;
- irq_set_affinity(ccn->irq, cpu_possible_mask);
+ if (ccn->irq)
+ irq_set_affinity_hint(ccn->irq, NULL);
unregister_cpu_notifier(&ccn->dt.cpu_nb);
for (i = 0; i < ccn->num_xps; i++)
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index f364fa4d2..72fe0a5a8 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -30,6 +30,10 @@
#include <asm/signal.h>
#endif
+#ifdef CONFIG_MIPS
+#include <asm/traps.h>
+#endif
+
#define ARB_ERR_CAP_CLEAR (1 << 0)
#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12)
#define ARB_ERR_CAP_STATUS_TEA (1 << 11)
@@ -238,6 +242,29 @@ static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
}
#endif
+#ifdef CONFIG_MIPS
+static int brcmstb_bus_error_handler(struct pt_regs *regs, int is_fixup)
+{
+ int ret = 0;
+ struct brcmstb_gisb_arb_device *gdev;
+ u32 cap_status;
+
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) {
+ cap_status = gisb_read(gdev, ARB_ERR_CAP_STATUS);
+
+ /* Invalid captured address, bail out */
+ if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) {
+ is_fixup = 1;
+ goto out;
+ }
+
+ ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ }
+out:
+ return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+}
+#endif
+
static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id)
{
brcmstb_gisb_arb_decode_addr(dev_id, "timeout");
@@ -355,6 +382,9 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
"imprecise external abort");
#endif
+#ifdef CONFIG_MIPS
+ board_be_handler = brcmstb_bus_error_handler;
+#endif
dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n",
gdev->base, timeout_irq, tea_irq);
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index 1c543effe..cad49bc38 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -599,8 +599,8 @@ BUILD_PERDEV_HELPER(cpu_up) /* int mips_cdmm_cpu_up_helper(...) */
* mips_cdmm_bus_down() - Tear down the CDMM bus.
* @data: Pointer to unsigned int CPU number.
*
- * This work_on_cpu callback function is executed on a given CPU to call the
- * CDMM driver cpu_down callback for all devices on that CPU.
+ * This function is executed on the hotplugged CPU and calls the CDMM
+ * driver cpu_down callback for all devices on that CPU.
*/
static long mips_cdmm_bus_down(void *data)
{
@@ -630,7 +630,9 @@ static long mips_cdmm_bus_down(void *data)
* CDMM devices on that CPU, or to call the CDMM driver cpu_up callback for all
* devices already discovered on that CPU.
*
- * It is used during initialisation and when CPUs are brought online.
+ * It is used as work_on_cpu callback function during
+ * initialisation. When CPUs are brought online the function is
+ * invoked directly on the hotplugged CPU.
*/
static long mips_cdmm_bus_up(void *data)
{
@@ -677,10 +679,10 @@ static int mips_cdmm_cpu_notify(struct notifier_block *nb,
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
- work_on_cpu(cpu, mips_cdmm_bus_up, &cpu);
+ mips_cdmm_bus_up(&cpu);
break;
case CPU_DOWN_PREPARE:
- work_on_cpu(cpu, mips_cdmm_bus_down, &cpu);
+ mips_cdmm_bus_down(&cpu);
break;
default:
return NOTIFY_DONE;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3ec0766ed..601f64fcc 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -279,8 +279,7 @@ if RTC_LIB=n
config RTC
tristate "Enhanced Real Time Clock Support (legacy PC RTC driver)"
- depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
- && !ARM && !SUPERH && !S390 && !AVR32 && !BLACKFIN && !UML
+ depends on ALPHA || (MIPS && MACH_LOONGSON64) || MN10300
---help---
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -585,7 +584,6 @@ config TELCLOCK
config DEVPORT
bool
- depends on !M68K
depends on ISA || PCI
default y
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 67ee8b08a..ac51149e9 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -268,19 +268,6 @@ config HW_RANDOM_NOMADIK
If unsure, say Y.
-config HW_RANDOM_PPC4XX
- tristate "PowerPC 4xx generic true random number generator support"
- depends on PPC && 4xx
- default HW_RANDOM
- ---help---
- This driver provides the kernel-side support for the TRNG hardware
- found in the security function of some PowerPC 4xx SoCs.
-
- To compile this driver as a module, choose M here: the
- module will be called ppc4xx-rng.
-
- If unsure, say N.
-
config HW_RANDOM_PSERIES
tristate "pSeries HW Random Number Generator support"
depends on PPC64 && IBMVIO
@@ -309,7 +296,8 @@ config HW_RANDOM_POWERNV
config HW_RANDOM_EXYNOS
tristate "EXYNOS HW random number generator support"
- depends on ARCH_EXYNOS
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_IOMEM
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
@@ -333,6 +321,19 @@ config HW_RANDOM_TPM
If unsure, say Y.
+config HW_RANDOM_HISI
+ tristate "Hisilicon Random Number Generator support"
+ depends on HW_RANDOM && ARCH_HISI
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Hisilicon Hip04 and Hip05 SoC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hisi-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_MSM
tristate "Qualcomm SoCs Random Number Generator support"
depends on HW_RANDOM && ARCH_QCOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index f5a6fa769..63022b49f 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -22,10 +22,10 @@ obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
-obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
+obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o
obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index b98a141ea..ed44561ea 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -2,7 +2,7 @@
* exynos-rng.c - Random Number Generator driver for the exynos
*
* Copyright (C) 2012 Samsung Electronics
- * Jonghwa Lee <jonghwa3.lee@smasung.com>
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -77,7 +77,8 @@ static int exynos_init(struct hwrng *rng)
pm_runtime_get_sync(exynos_rng->dev);
ret = exynos_rng_configure(exynos_rng);
- pm_runtime_put_noidle(exynos_rng->dev);
+ pm_runtime_mark_last_busy(exynos_rng->dev);
+ pm_runtime_put_autosuspend(exynos_rng->dev);
return ret;
}
@@ -118,6 +119,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
{
struct exynos_rng *exynos_rng;
struct resource *res;
+ int ret;
exynos_rng = devm_kzalloc(&pdev->dev, sizeof(struct exynos_rng),
GFP_KERNEL);
@@ -145,7 +147,21 @@ static int exynos_rng_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- return devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+ ret = devm_hwrng_register(&pdev->dev, &exynos_rng->rng);
+ if (ret) {
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
+
+ return ret;
+}
+
+static int exynos_rng_remove(struct platform_device *pdev)
+{
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
}
static int __maybe_unused exynos_rng_runtime_suspend(struct device *dev)
@@ -205,6 +221,7 @@ static struct platform_driver exynos_rng_driver = {
.of_match_table = exynos_rng_dt_match,
},
.probe = exynos_rng_probe,
+ .remove = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/char/hw_random/hisi-rng.c b/drivers/char/hw_random/hisi-rng.c
new file mode 100644
index 000000000..40d96572c
--- /dev/null
+++ b/drivers/char/hw_random/hisi-rng.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 HiSilicon Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#define RNG_SEED 0x0
+#define RNG_CTRL 0x4
+ #define RNG_SEED_SEL BIT(2)
+ #define RNG_RING_EN BIT(1)
+ #define RNG_EN BIT(0)
+#define RNG_RAN_NUM 0x10
+#define RNG_PHY_SEED 0x14
+
+#define to_hisi_rng(p) container_of(p, struct hisi_rng, rng)
+
+static int seed_sel;
+module_param(seed_sel, int, S_IRUGO);
+MODULE_PARM_DESC(seed_sel, "Auto reload seed. 0, use LFSR(default); 1, use ring oscillator.");
+
+struct hisi_rng {
+ void __iomem *base;
+ struct hwrng rng;
+};
+
+static int hisi_rng_init(struct hwrng *rng)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+ int val = RNG_EN;
+ u32 seed;
+
+ /* get a random number as initial seed */
+ get_random_bytes(&seed, sizeof(seed));
+
+ writel_relaxed(seed, hrng->base + RNG_SEED);
+
+ /**
+ * The seed is reload periodically, there are two choice
+ * of seeds, default seed using the value from LFSR, or
+ * will use seed generated by ring oscillator.
+ */
+ if (seed_sel == 1)
+ val |= RNG_RING_EN | RNG_SEED_SEL;
+
+ writel_relaxed(val, hrng->base + RNG_CTRL);
+ return 0;
+}
+
+static void hisi_rng_cleanup(struct hwrng *rng)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+
+ writel_relaxed(0, hrng->base + RNG_CTRL);
+}
+
+static int hisi_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct hisi_rng *hrng = to_hisi_rng(rng);
+ u32 *data = buf;
+
+ *data = readl_relaxed(hrng->base + RNG_RAN_NUM);
+ return 4;
+}
+
+static int hisi_rng_probe(struct platform_device *pdev)
+{
+ struct hisi_rng *rng;
+ struct resource *res;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rng);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rng->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rng->base))
+ return PTR_ERR(rng->base);
+
+ rng->rng.name = pdev->name;
+ rng->rng.init = hisi_rng_init;
+ rng->rng.cleanup = hisi_rng_cleanup;
+ rng->rng.read = hisi_rng_read;
+
+ ret = devm_hwrng_register(&pdev->dev, &rng->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register hwrng\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id hisi_rng_dt_ids[] = {
+ { .compatible = "hisilicon,hip04-rng" },
+ { .compatible = "hisilicon,hip05-rng" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hisi_rng_dt_ids);
+
+static struct platform_driver hisi_rng_driver = {
+ .probe = hisi_rng_probe,
+ .driver = {
+ .name = "hisi-rng",
+ .of_match_table = of_match_ptr(hisi_rng_dt_ids),
+ },
+};
+
+module_platform_driver(hisi_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kefeng Wang <wangkefeng.wang@huawei>");
+MODULE_DESCRIPTION("Hisilicon random number generator driver");
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
deleted file mode 100644
index c0db4387d..000000000
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Generic PowerPC 44x RNG driver
- *
- * Copyright 2011 IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <asm/io.h>
-
-#define PPC4XX_TRNG_DEV_CTRL 0x60080
-
-#define PPC4XX_TRNGE 0x00020000
-#define PPC4XX_TRNG_CTRL 0x0008
-#define PPC4XX_TRNG_CTRL_DALM 0x20
-#define PPC4XX_TRNG_STAT 0x0004
-#define PPC4XX_TRNG_STAT_B 0x1
-#define PPC4XX_TRNG_DATA 0x0000
-
-#define MODULE_NAME "ppc4xx_rng"
-
-static int ppc4xx_rng_data_present(struct hwrng *rng, int wait)
-{
- void __iomem *rng_regs = (void __iomem *) rng->priv;
- int busy, i, present = 0;
-
- for (i = 0; i < 20; i++) {
- busy = (in_le32(rng_regs + PPC4XX_TRNG_STAT) & PPC4XX_TRNG_STAT_B);
- if (!busy || !wait) {
- present = 1;
- break;
- }
- udelay(10);
- }
- return present;
-}
-
-static int ppc4xx_rng_data_read(struct hwrng *rng, u32 *data)
-{
- void __iomem *rng_regs = (void __iomem *) rng->priv;
- *data = in_le32(rng_regs + PPC4XX_TRNG_DATA);
- return 4;
-}
-
-static int ppc4xx_rng_enable(int enable)
-{
- struct device_node *ctrl;
- void __iomem *ctrl_reg;
- int err = 0;
- u32 val;
-
- /* Find the main crypto device node and map it to turn the TRNG on */
- ctrl = of_find_compatible_node(NULL, NULL, "amcc,ppc4xx-crypto");
- if (!ctrl)
- return -ENODEV;
-
- ctrl_reg = of_iomap(ctrl, 0);
- if (!ctrl_reg) {
- err = -ENODEV;
- goto out;
- }
-
- val = in_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL);
-
- if (enable)
- val |= PPC4XX_TRNGE;
- else
- val = val & ~PPC4XX_TRNGE;
-
- out_le32(ctrl_reg + PPC4XX_TRNG_DEV_CTRL, val);
- iounmap(ctrl_reg);
-
-out:
- of_node_put(ctrl);
-
- return err;
-}
-
-static struct hwrng ppc4xx_rng = {
- .name = MODULE_NAME,
- .data_present = ppc4xx_rng_data_present,
- .data_read = ppc4xx_rng_data_read,
-};
-
-static int ppc4xx_rng_probe(struct platform_device *dev)
-{
- void __iomem *rng_regs;
- int err = 0;
-
- rng_regs = of_iomap(dev->dev.of_node, 0);
- if (!rng_regs)
- return -ENODEV;
-
- err = ppc4xx_rng_enable(1);
- if (err)
- return err;
-
- out_le32(rng_regs + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
- ppc4xx_rng.priv = (unsigned long) rng_regs;
-
- err = hwrng_register(&ppc4xx_rng);
-
- return err;
-}
-
-static int ppc4xx_rng_remove(struct platform_device *dev)
-{
- void __iomem *rng_regs = (void __iomem *) ppc4xx_rng.priv;
-
- hwrng_unregister(&ppc4xx_rng);
- ppc4xx_rng_enable(0);
- iounmap(rng_regs);
-
- return 0;
-}
-
-static const struct of_device_id ppc4xx_rng_match[] = {
- { .compatible = "ppc4xx-rng", },
- { .compatible = "amcc,ppc460ex-rng", },
- { .compatible = "amcc,ppc440epx-rng", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ppc4xx_rng_match);
-
-static struct platform_driver ppc4xx_rng_driver = {
- .driver = {
- .name = MODULE_NAME,
- .of_match_table = ppc4xx_rng_match,
- },
- .probe = ppc4xx_rng_probe,
- .remove = ppc4xx_rng_remove,
-};
-
-module_platform_driver(ppc4xx_rng_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
-MODULE_DESCRIPTION("HW RNG driver for PPC 4xx processors");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1e25b5205..7b1c412b4 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -104,7 +104,7 @@ enum si_intf_state {
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
enum si_type {
- SI_KCS, SI_SMIC, SI_BT
+ SI_KCS, SI_SMIC, SI_BT
};
static const char * const si_to_str[] = { "kcs", "smic", "bt" };
@@ -410,7 +410,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
rv = SI_SM_CALL_WITHOUT_DELAY;
}
- out:
+out:
return rv;
}
@@ -539,7 +539,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
static void handle_flags(struct smi_info *smi_info)
{
- retry:
+retry:
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
/* Watchdog pre-timeout */
smi_inc_stat(smi_info, watchdog_pretimeouts);
@@ -831,7 +831,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
{
enum si_sm_result si_sm_result;
- restart:
+restart:
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
@@ -944,7 +944,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
smi_info->timer_running = false;
}
- out:
+out:
return si_sm_result;
}
@@ -1190,7 +1190,7 @@ static void smi_timeout(unsigned long data)
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
- do_mod_timer:
+do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
@@ -1576,10 +1576,9 @@ static int port_setup(struct smi_info *info)
if (request_region(addr + idx * info->io.regspacing,
info->io.regsize, DEVICE_NAME) == NULL) {
/* Undo allocations */
- while (idx--) {
+ while (idx--)
release_region(addr + idx * info->io.regspacing,
info->io.regsize);
- }
return -EIO;
}
}
@@ -1638,25 +1637,28 @@ static void mem_outq(const struct si_sm_io *io, unsigned int offset,
}
#endif
-static void mem_cleanup(struct smi_info *info)
+static void mem_region_cleanup(struct smi_info *info, int num)
{
unsigned long addr = info->io.addr_data;
- int mapsize;
+ int idx;
+ for (idx = 0; idx < num; idx++)
+ release_mem_region(addr + idx * info->io.regspacing,
+ info->io.regsize);
+}
+
+static void mem_cleanup(struct smi_info *info)
+{
if (info->io.addr) {
iounmap(info->io.addr);
-
- mapsize = ((info->io_size * info->io.regspacing)
- - (info->io.regspacing - info->io.regsize));
-
- release_mem_region(addr, mapsize);
+ mem_region_cleanup(info, info->io_size);
}
}
static int mem_setup(struct smi_info *info)
{
unsigned long addr = info->io.addr_data;
- int mapsize;
+ int mapsize, idx;
if (!addr)
return -ENODEV;
@@ -1693,6 +1695,21 @@ static int mem_setup(struct smi_info *info)
}
/*
+ * Some BIOSes reserve disjoint memory regions in their ACPI
+ * tables. This causes problems when trying to request the
+ * entire region. Therefore we must request each register
+ * separately.
+ */
+ for (idx = 0; idx < info->io_size; idx++) {
+ if (request_mem_region(addr + idx * info->io.regspacing,
+ info->io.regsize, DEVICE_NAME) == NULL) {
+ /* Undo allocations */
+ mem_region_cleanup(info, idx);
+ return -EIO;
+ }
+ }
+
+ /*
* Calculate the total amount of memory to claim. This is an
* unusual looking calculation, but it avoids claiming any
* more memory than it has to. It will claim everything
@@ -1701,13 +1718,9 @@ static int mem_setup(struct smi_info *info)
*/
mapsize = ((info->io_size * info->io.regspacing)
- (info->io.regspacing - info->io.regsize));
-
- if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
- return -EIO;
-
info->io.addr = ioremap(addr, mapsize);
if (info->io.addr == NULL) {
- release_mem_region(addr, mapsize);
+ mem_region_cleanup(info, info->io_size);
return -EIO;
}
return 0;
@@ -1975,7 +1988,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
}
}
rv = len;
- out:
+out:
kfree(str);
return rv;
}
@@ -2945,7 +2958,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
/* Check and record info from the get device id, in case we need it. */
rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
- out:
+out:
kfree(resp);
return rv;
}
@@ -3192,7 +3205,7 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
else
smi_info->supports_event_msg_buff = true;
- out:
+out:
kfree(resp);
return rv;
}
@@ -3718,10 +3731,10 @@ static int try_smi_init(struct smi_info *new_smi)
return 0;
- out_err_stop_timer:
+out_err_stop_timer:
wait_for_timer_and_thread(new_smi);
- out_err:
+out_err:
new_smi->interrupt_disabled = true;
if (new_smi->intf) {
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 8b3be8b92..097c86898 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1870,7 +1870,7 @@ static int try_init_spmi(struct SPMITable *spmi)
return -EIO;
}
- myaddr = spmi->addr.address >> 1;
+ myaddr = spmi->addr.address & 0x7f;
return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI);
}
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 22c27652e..d28922df0 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1101,7 +1101,7 @@ static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty)
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
- if (info->port.flags & ASYNC_CHECK_CD) {
+ if (tty_port_check_carrier(&info->port)) {
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s CD now %s...", info->device_name,
(info->serial_signals & SerialSignal_DCD) ? "on" : "off");
@@ -1272,7 +1272,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
- if (info->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&info->port))
return 0;
if (!info->tx_buf) {
@@ -1311,7 +1311,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
if (tty)
clear_bit(TTY_IO_ERROR, &tty->flags);
- info->port.flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 1);
return 0;
}
@@ -1322,7 +1322,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
{
unsigned long flags;
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -1361,7 +1361,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
- info->port.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 0);
}
static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
@@ -1466,15 +1466,8 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
}
info->timeout += HZ/50; /* Add .02 seconds of slop */
- if (cflag & CRTSCTS)
- info->port.flags |= ASYNC_CTS_FLOW;
- else
- info->port.flags &= ~ASYNC_CTS_FLOW;
-
- if (cflag & CLOCAL)
- info->port.flags &= ~ASYNC_CHECK_CD;
- else
- info->port.flags |= ASYNC_CHECK_CD;
+ tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
+ tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
/* process tty input control flags */
@@ -2246,7 +2239,7 @@ static int mgslpc_ioctl(struct tty_struct *tty,
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCMIWAIT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
}
@@ -2316,7 +2309,7 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->serial_signals |= SerialSignal_DTR;
- if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (!C_CRTSCTS(tty) || !tty_throttled(tty))
info->serial_signals |= SerialSignal_RTS;
spin_lock_irqsave(&info->lock, flags);
set_signals(info);
@@ -2345,7 +2338,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
if (tty_port_close_start(port, tty, filp) == 0)
goto cleanup;
- if (port->flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(port))
mgslpc_wait_until_sent(tty, info->timeout);
mgslpc_flush_buffer(tty);
@@ -2378,7 +2371,7 @@ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout)
if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
return;
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
goto exit;
orig_jiffies = jiffies;
@@ -3969,7 +3962,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
dev_kfree_skb(skb);
/* save start time for transmit timeout detection */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* start hardware transmitter if necessary */
spin_lock_irqsave(&info->lock, flags);
@@ -4032,7 +4025,7 @@ static int hdlcdev_open(struct net_device *dev)
tty_kref_put(tty);
/* enable network layer transmit */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_start_queue(dev);
/* inform generic HDLC layer of current DCD status */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b583e5336..87ab9f6b4 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -260,6 +260,7 @@
#include <linux/irq.h>
#include <linux/syscalls.h>
#include <linux/completion.h>
+#include <linux/uuid.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -722,15 +723,18 @@ retry:
}
}
-static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
{
const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
+ if (nbits < 0)
+ return -EINVAL;
+
/* Cap the value to avoid overflows */
nbits = min(nbits, nbits_max);
- nbits = max(nbits, -nbits_max);
credit_entropy_bits(r, nbits);
+ return 0;
}
/*********************************************************************
@@ -1542,8 +1546,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_bits_safe(&input_pool, ent_count);
- return 0;
+ return credit_entropy_bits_safe(&input_pool, ent_count);
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1557,8 +1560,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
size);
if (retval < 0)
return retval;
- credit_entropy_bits_safe(&input_pool, ent_count);
- return 0;
+ return credit_entropy_bits_safe(&input_pool, ent_count);
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/*
@@ -1621,26 +1623,6 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
return urandom_read(NULL, buf, count, NULL);
}
-/***************************************************************
- * Random UUID interface
- *
- * Used here for a Boot ID, but can be useful for other kernel
- * drivers.
- ***************************************************************/
-
-/*
- * Generate random UUID
- */
-void generate_random_uuid(unsigned char uuid_out[16])
-{
- get_random_bytes(uuid_out, 16);
- /* Set UUID version to 4 --- truly random generation */
- uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
- /* Set the UUID variant to DCE */
- uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
-}
-EXPORT_SYMBOL(generate_random_uuid);
-
/********************************************************************
*
* Sysctl interface
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index 781865084..78a492f5a 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -81,7 +81,6 @@ static int xilly_map_single_of(struct xilly_endpoint *ep,
{
dma_addr_t addr;
struct xilly_mapping *this;
- int rc;
this = kzalloc(sizeof(*this), GFP_KERNEL);
if (!this)
@@ -101,15 +100,7 @@ static int xilly_map_single_of(struct xilly_endpoint *ep,
*ret_dma_handle = addr;
- rc = devm_add_action(ep->dev, xilly_of_unmap, this);
-
- if (rc) {
- dma_unmap_single(ep->dev, addr, size, direction);
- kfree(this);
- return rc;
- }
-
- return 0;
+ return devm_add_action_or_reset(ep->dev, xilly_of_unmap, this);
}
static struct xilly_endpoint_hardware of_hw = {
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
index 941830021..dff2d1538 100644
--- a/drivers/char/xillybus/xillybus_pcie.c
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -98,7 +98,6 @@ static int xilly_map_single_pci(struct xilly_endpoint *ep,
int pci_direction;
dma_addr_t addr;
struct xilly_mapping *this;
- int rc;
this = kzalloc(sizeof(*this), GFP_KERNEL);
if (!this)
@@ -120,14 +119,7 @@ static int xilly_map_single_pci(struct xilly_endpoint *ep,
*ret_dma_handle = addr;
- rc = devm_add_action(ep->dev, xilly_pci_unmap, this);
- if (rc) {
- pci_unmap_single(ep->pdev, addr, size, pci_direction);
- kfree(this);
- return rc;
- }
-
- return 0;
+ return devm_add_action_or_reset(ep->dev, xilly_pci_unmap, this);
}
static struct xilly_endpoint_hardware pci_hw = {
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 16f7d3342..98efbfcdb 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE
config COMMON_CLK_NXP
def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX)
select REGMAP_MMIO if ARCH_LPC32XX
+ select MFD_SYSCON if ARCH_LPC18XX
---help---
Support for clock providers on NXP platforms.
@@ -197,10 +198,20 @@ config COMMON_CLK_PXA
---help---
Support for the Marvell PXA SoC.
+config COMMON_CLK_PIC32
+ def_bool COMMON_CLK && MACH_PIC32
+
+config COMMON_CLK_OXNAS
+ bool "Clock driver for the OXNAS SoC Family"
+ select MFD_SYSCON
+ ---help---
+ Support for the OXNAS SoC Family clocks.
+
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
+source "drivers/clk/renesas/Kconfig"
source "drivers/clk/samsung/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 46869d696..dcc5e698f 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_ARCH_MB86S7X) += clk-mb86s7x.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
+obj-$(CONFIG_COMMON_CLK_OXNAS) += clk-oxnas.o
obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o
obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_COMMON_CLK_AT91) += at91/
+obj-$(CONFIG_ARCH_ARTPEC) += axis/
obj-y += bcm/
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
@@ -58,10 +60,11 @@ obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_MACH_INGENIC) += ingenic/
obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
+obj-$(CONFIG_MACH_PIC32) += microchip/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_ARCH_MMP) += mmp/
endif
-obj-$(CONFIG_PLAT_ORION) += mvebu/
+obj-y += mvebu/
obj-$(CONFIG_ARCH_MESON) += meson/
obj-$(CONFIG_ARCH_MXS) += mxs/
obj-$(CONFIG_MACH_PISTACHIO) += pistachio/
@@ -84,3 +87,4 @@ obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_ARCH_ZX) += zte/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
obj-$(CONFIG_H8300) += h8300/
+obj-$(CONFIG_ARC_PLAT_AXS10X) += axs10x/
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 10f846cc8..25d590664 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -99,7 +99,7 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
struct clk_programmable *prog = to_clk_programmable(hw);
const struct clk_programmable_layout *layout = prog->layout;
unsigned int mask = layout->css_mask;
- unsigned int pckr = 0;
+ unsigned int pckr = index;
if (layout->have_slck_mck)
mask |= AT91_PMC_CSSMCK_MCK;
diff --git a/drivers/clk/axis/Makefile b/drivers/clk/axis/Makefile
new file mode 100644
index 000000000..628c9d3b9
--- /dev/null
+++ b/drivers/clk/axis/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MACH_ARTPEC6) += clk-artpec6.o
diff --git a/drivers/clk/axis/clk-artpec6.c b/drivers/clk/axis/clk-artpec6.c
new file mode 100644
index 000000000..ffc988b09
--- /dev/null
+++ b/drivers/clk/axis/clk-artpec6.c
@@ -0,0 +1,242 @@
+/*
+ * ARTPEC-6 clock initialization
+ *
+ * Copyright 2015-2016 Axis Comunications AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <dt-bindings/clock/axis,artpec6-clkctrl.h>
+
+#define NUM_I2S_CLOCKS 2
+
+struct artpec6_clkctrl_drvdata {
+ struct clk *clk_table[ARTPEC6_CLK_NUMCLOCKS];
+ void __iomem *syscon_base;
+ struct clk_onecell_data clk_data;
+ spinlock_t i2scfg_lock;
+};
+
+static struct artpec6_clkctrl_drvdata *clkdata;
+
+static const char *const i2s_clk_names[NUM_I2S_CLOCKS] = {
+ "i2s0",
+ "i2s1",
+};
+
+static const int i2s_clk_indexes[NUM_I2S_CLOCKS] = {
+ ARTPEC6_CLK_I2S0_CLK,
+ ARTPEC6_CLK_I2S1_CLK,
+};
+
+static void of_artpec6_clkctrl_setup(struct device_node *np)
+{
+ int i;
+ const char *sys_refclk_name;
+ u32 pll_mode, pll_m, pll_n;
+ struct clk **clks;
+
+ /* Mandatory parent clock. */
+ i = of_property_match_string(np, "clock-names", "sys_refclk");
+ if (i < 0)
+ return;
+
+ sys_refclk_name = of_clk_get_parent_name(np, i);
+
+ clkdata = kzalloc(sizeof(*clkdata), GFP_KERNEL);
+ if (!clkdata)
+ return;
+
+ clks = clkdata->clk_table;
+
+ for (i = 0; i < ARTPEC6_CLK_NUMCLOCKS; ++i)
+ clks[i] = ERR_PTR(-EPROBE_DEFER);
+
+ clkdata->syscon_base = of_iomap(np, 0);
+ BUG_ON(clkdata->syscon_base == NULL);
+
+ /* Read PLL1 factors configured by boot strap pins. */
+ pll_mode = (readl(clkdata->syscon_base) >> 6) & 3;
+ switch (pll_mode) {
+ case 0: /* DDR3-2133 mode */
+ pll_m = 4;
+ pll_n = 85;
+ break;
+ case 1: /* DDR3-1866 mode */
+ pll_m = 6;
+ pll_n = 112;
+ break;
+ case 2: /* DDR3-1600 mode */
+ pll_m = 4;
+ pll_n = 64;
+ break;
+ case 3: /* DDR3-1333 mode */
+ pll_m = 8;
+ pll_n = 106;
+ break;
+ }
+
+ clks[ARTPEC6_CLK_CPU] =
+ clk_register_fixed_factor(NULL, "cpu", sys_refclk_name, 0, pll_n,
+ pll_m);
+ clks[ARTPEC6_CLK_CPU_PERIPH] =
+ clk_register_fixed_factor(NULL, "cpu_periph", "cpu", 0, 1, 2);
+
+ /* EPROBE_DEFER on the apb_clock is not handled in amba devices. */
+ clks[ARTPEC6_CLK_UART_PCLK] =
+ clk_register_fixed_factor(NULL, "uart_pclk", "cpu", 0, 1, 8);
+ clks[ARTPEC6_CLK_UART_REFCLK] =
+ clk_register_fixed_rate(NULL, "uart_ref", sys_refclk_name, 0,
+ 50000000);
+
+ clks[ARTPEC6_CLK_SPI_PCLK] =
+ clk_register_fixed_factor(NULL, "spi_pclk", "cpu", 0, 1, 8);
+ clks[ARTPEC6_CLK_SPI_SSPCLK] =
+ clk_register_fixed_rate(NULL, "spi_sspclk", sys_refclk_name, 0,
+ 50000000);
+
+ clks[ARTPEC6_CLK_DBG_PCLK] =
+ clk_register_fixed_factor(NULL, "dbg_pclk", "cpu", 0, 1, 8);
+
+ clkdata->clk_data.clks = clkdata->clk_table;
+ clkdata->clk_data.clk_num = ARTPEC6_CLK_NUMCLOCKS;
+
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clkdata->clk_data);
+}
+
+CLK_OF_DECLARE(artpec6_clkctrl, "axis,artpec6-clkctrl",
+ of_artpec6_clkctrl_setup);
+
+static int artpec6_clkctrl_probe(struct platform_device *pdev)
+{
+ int propidx;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct clk **clks = clkdata->clk_table;
+ const char *sys_refclk_name;
+ const char *i2s_refclk_name = NULL;
+ const char *frac_clk_name[2] = { NULL, NULL };
+ const char *i2s_mux_parents[2];
+ u32 muxreg;
+ int i;
+ int err = 0;
+
+ /* Mandatory parent clock. */
+ propidx = of_property_match_string(np, "clock-names", "sys_refclk");
+ if (propidx < 0)
+ return -EINVAL;
+
+ sys_refclk_name = of_clk_get_parent_name(np, propidx);
+
+ /* Find clock names of optional parent clocks. */
+ propidx = of_property_match_string(np, "clock-names", "i2s_refclk");
+ if (propidx >= 0)
+ i2s_refclk_name = of_clk_get_parent_name(np, propidx);
+
+ propidx = of_property_match_string(np, "clock-names", "frac_clk0");
+ if (propidx >= 0)
+ frac_clk_name[0] = of_clk_get_parent_name(np, propidx);
+ propidx = of_property_match_string(np, "clock-names", "frac_clk1");
+ if (propidx >= 0)
+ frac_clk_name[1] = of_clk_get_parent_name(np, propidx);
+
+ spin_lock_init(&clkdata->i2scfg_lock);
+
+ clks[ARTPEC6_CLK_NAND_CLKA] =
+ clk_register_fixed_factor(dev, "nand_clka", "cpu", 0, 1, 8);
+ clks[ARTPEC6_CLK_NAND_CLKB] =
+ clk_register_fixed_rate(dev, "nand_clkb", sys_refclk_name, 0,
+ 100000000);
+ clks[ARTPEC6_CLK_ETH_ACLK] =
+ clk_register_fixed_factor(dev, "eth_aclk", "cpu", 0, 1, 4);
+ clks[ARTPEC6_CLK_DMA_ACLK] =
+ clk_register_fixed_factor(dev, "dma_aclk", "cpu", 0, 1, 4);
+ clks[ARTPEC6_CLK_PTP_REF] =
+ clk_register_fixed_rate(dev, "ptp_ref", sys_refclk_name, 0,
+ 100000000);
+ clks[ARTPEC6_CLK_SD_PCLK] =
+ clk_register_fixed_rate(dev, "sd_pclk", sys_refclk_name, 0,
+ 100000000);
+ clks[ARTPEC6_CLK_SD_IMCLK] =
+ clk_register_fixed_rate(dev, "sd_imclk", sys_refclk_name, 0,
+ 100000000);
+ clks[ARTPEC6_CLK_I2S_HST] =
+ clk_register_fixed_factor(dev, "i2s_hst", "cpu", 0, 1, 8);
+
+ for (i = 0; i < NUM_I2S_CLOCKS; ++i) {
+ if (i2s_refclk_name && frac_clk_name[i]) {
+ i2s_mux_parents[0] = frac_clk_name[i];
+ i2s_mux_parents[1] = i2s_refclk_name;
+
+ clks[i2s_clk_indexes[i]] =
+ clk_register_mux(dev, i2s_clk_names[i],
+ i2s_mux_parents, 2,
+ CLK_SET_RATE_NO_REPARENT |
+ CLK_SET_RATE_PARENT,
+ clkdata->syscon_base + 0x14, i, 1,
+ 0, &clkdata->i2scfg_lock);
+ } else if (frac_clk_name[i]) {
+ /* Lock the mux for internal clock reference. */
+ muxreg = readl(clkdata->syscon_base + 0x14);
+ muxreg &= ~BIT(i);
+ writel(muxreg, clkdata->syscon_base + 0x14);
+ clks[i2s_clk_indexes[i]] =
+ clk_register_fixed_factor(dev, i2s_clk_names[i],
+ frac_clk_name[i], 0, 1,
+ 1);
+ } else if (i2s_refclk_name) {
+ /* Lock the mux for external clock reference. */
+ muxreg = readl(clkdata->syscon_base + 0x14);
+ muxreg |= BIT(i);
+ writel(muxreg, clkdata->syscon_base + 0x14);
+ clks[i2s_clk_indexes[i]] =
+ clk_register_fixed_factor(dev, i2s_clk_names[i],
+ i2s_refclk_name, 0, 1, 1);
+ }
+ }
+
+ clks[ARTPEC6_CLK_I2C] =
+ clk_register_fixed_rate(dev, "i2c", sys_refclk_name, 0, 100000000);
+
+ clks[ARTPEC6_CLK_SYS_TIMER] =
+ clk_register_fixed_rate(dev, "timer", sys_refclk_name, 0,
+ 100000000);
+ clks[ARTPEC6_CLK_FRACDIV_IN] =
+ clk_register_fixed_rate(dev, "fracdiv_in", sys_refclk_name, 0,
+ 600000000);
+
+ for (i = 0; i < ARTPEC6_CLK_NUMCLOCKS; ++i) {
+ if (IS_ERR(clks[i]) && PTR_ERR(clks[i]) != -EPROBE_DEFER) {
+ dev_err(dev,
+ "Failed to register clock at index %d err=%ld\n",
+ i, PTR_ERR(clks[i]));
+ err = PTR_ERR(clks[i]);
+ }
+ }
+
+ return err;
+}
+
+static const struct of_device_id artpec_clkctrl_of_match[] = {
+ { .compatible = "axis,artpec6-clkctrl" },
+ {}
+};
+
+static struct platform_driver artpec6_clkctrl_driver = {
+ .probe = artpec6_clkctrl_probe,
+ .driver = {
+ .name = "artpec6_clkctrl",
+ .of_match_table = artpec_clkctrl_of_match,
+ },
+};
+
+builtin_platform_driver(artpec6_clkctrl_driver);
diff --git a/drivers/clk/axs10x/Makefile b/drivers/clk/axs10x/Makefile
new file mode 100644
index 000000000..01996b871
--- /dev/null
+++ b/drivers/clk/axs10x/Makefile
@@ -0,0 +1 @@
+obj-y += i2s_pll_clock.o
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
new file mode 100644
index 000000000..411310d29
--- /dev/null
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -0,0 +1,228 @@
+/*
+ * Synopsys AXS10X SDP I2S PLL clock driver
+ *
+ * Copyright (C) 2016 Synopsys
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+/* PLL registers addresses */
+#define PLL_IDIV_REG 0x0
+#define PLL_FBDIV_REG 0x4
+#define PLL_ODIV0_REG 0x8
+#define PLL_ODIV1_REG 0xC
+
+struct i2s_pll_cfg {
+ unsigned int rate;
+ unsigned int idiv;
+ unsigned int fbdiv;
+ unsigned int odiv0;
+ unsigned int odiv1;
+};
+
+static const struct i2s_pll_cfg i2s_pll_cfg_27m[] = {
+ /* 27 Mhz */
+ { 1024000, 0x104, 0x451, 0x10E38, 0x2000 },
+ { 1411200, 0x104, 0x596, 0x10D35, 0x2000 },
+ { 1536000, 0x208, 0xA28, 0x10B2C, 0x2000 },
+ { 2048000, 0x82, 0x451, 0x10E38, 0x2000 },
+ { 2822400, 0x82, 0x596, 0x10D35, 0x2000 },
+ { 3072000, 0x104, 0xA28, 0x10B2C, 0x2000 },
+ { 2116800, 0x82, 0x3CF, 0x10C30, 0x2000 },
+ { 2304000, 0x104, 0x79E, 0x10B2C, 0x2000 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static const struct i2s_pll_cfg i2s_pll_cfg_28m[] = {
+ /* 28.224 Mhz */
+ { 1024000, 0x82, 0x105, 0x107DF, 0x2000 },
+ { 1411200, 0x28A, 0x1, 0x10001, 0x2000 },
+ { 1536000, 0xA28, 0x187, 0x10042, 0x2000 },
+ { 2048000, 0x41, 0x105, 0x107DF, 0x2000 },
+ { 2822400, 0x145, 0x1, 0x10001, 0x2000 },
+ { 3072000, 0x514, 0x187, 0x10042, 0x2000 },
+ { 2116800, 0x514, 0x42, 0x10001, 0x2000 },
+ { 2304000, 0x619, 0x82, 0x10001, 0x2000 },
+ { 0, 0, 0, 0, 0 },
+};
+
+struct i2s_pll_clk {
+ void __iomem *base;
+ struct clk_hw hw;
+ struct device *dev;
+};
+
+static inline void i2s_pll_write(struct i2s_pll_clk *clk, unsigned int reg,
+ unsigned int val)
+{
+ writel_relaxed(val, clk->base + reg);
+}
+
+static inline unsigned int i2s_pll_read(struct i2s_pll_clk *clk,
+ unsigned int reg)
+{
+ return readl_relaxed(clk->base + reg);
+}
+
+static inline struct i2s_pll_clk *to_i2s_pll_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct i2s_pll_clk, hw);
+}
+
+static inline unsigned int i2s_pll_get_value(unsigned int val)
+{
+ return (val & 0x3F) + ((val >> 6) & 0x3F);
+}
+
+static const struct i2s_pll_cfg *i2s_pll_get_cfg(unsigned long prate)
+{
+ switch (prate) {
+ case 27000000:
+ return i2s_pll_cfg_27m;
+ case 28224000:
+ return i2s_pll_cfg_28m;
+ default:
+ return NULL;
+ }
+}
+
+static unsigned long i2s_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct i2s_pll_clk *clk = to_i2s_pll_clk(hw);
+ unsigned int idiv, fbdiv, odiv;
+
+ idiv = i2s_pll_get_value(i2s_pll_read(clk, PLL_IDIV_REG));
+ fbdiv = i2s_pll_get_value(i2s_pll_read(clk, PLL_FBDIV_REG));
+ odiv = i2s_pll_get_value(i2s_pll_read(clk, PLL_ODIV0_REG));
+
+ return ((parent_rate / idiv) * fbdiv) / odiv;
+}
+
+static long i2s_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct i2s_pll_clk *clk = to_i2s_pll_clk(hw);
+ const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(*prate);
+ int i;
+
+ if (!pll_cfg) {
+ dev_err(clk->dev, "invalid parent rate=%ld\n", *prate);
+ return -EINVAL;
+ }
+
+ for (i = 0; pll_cfg[i].rate != 0; i++)
+ if (pll_cfg[i].rate == rate)
+ return rate;
+
+ return -EINVAL;
+}
+
+static int i2s_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct i2s_pll_clk *clk = to_i2s_pll_clk(hw);
+ const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(parent_rate);
+ int i;
+
+ if (!pll_cfg) {
+ dev_err(clk->dev, "invalid parent rate=%ld\n", parent_rate);
+ return -EINVAL;
+ }
+
+ for (i = 0; pll_cfg[i].rate != 0; i++) {
+ if (pll_cfg[i].rate == rate) {
+ i2s_pll_write(clk, PLL_IDIV_REG, pll_cfg[i].idiv);
+ i2s_pll_write(clk, PLL_FBDIV_REG, pll_cfg[i].fbdiv);
+ i2s_pll_write(clk, PLL_ODIV0_REG, pll_cfg[i].odiv0);
+ i2s_pll_write(clk, PLL_ODIV1_REG, pll_cfg[i].odiv1);
+ return 0;
+ }
+ }
+
+ dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
+ parent_rate);
+ return -EINVAL;
+}
+
+static const struct clk_ops i2s_pll_ops = {
+ .recalc_rate = i2s_pll_recalc_rate,
+ .round_rate = i2s_pll_round_rate,
+ .set_rate = i2s_pll_set_rate,
+};
+
+static int i2s_pll_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const char *clk_name;
+ const char *parent_name;
+ struct clk *clk;
+ struct i2s_pll_clk *pll_clk;
+ struct clk_init_data init;
+ struct resource *mem;
+
+ pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pll_clk->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(pll_clk->base))
+ return PTR_ERR(pll_clk->base);
+
+ clk_name = node->name;
+ init.name = clk_name;
+ init.ops = &i2s_pll_ops;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ pll_clk->hw.init = &init;
+ pll_clk->dev = dev;
+
+ clk = devm_clk_register(dev, &pll_clk->hw);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register %s clock (%ld)\n",
+ clk_name, PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+
+ return of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+
+static int i2s_pll_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static const struct of_device_id i2s_pll_clk_id[] = {
+ { .compatible = "snps,axs10x-i2s-pll-clock", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, i2s_pll_clk_id);
+
+static struct platform_driver i2s_pll_clk_driver = {
+ .driver = {
+ .name = "axs10x-i2s-pll-clock",
+ .of_match_table = i2s_pll_clk_id,
+ },
+ .probe = i2s_pll_clk_probe,
+ .remove = i2s_pll_clk_remove,
+};
+module_platform_driver(i2s_pll_clk_driver);
+
+MODULE_AUTHOR("Jose Abreu <joabreu@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys AXS10X SDP I2S PLL Clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 1f79f48d5..7a7970865 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -12,9 +12,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/**
@@ -40,6 +37,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/bcm2835.h>
+#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -119,6 +117,8 @@
#define CM_SDCCTL 0x1a8
#define CM_SDCDIV 0x1ac
#define CM_ARMCTL 0x1b0
+#define CM_AVEOCTL 0x1b8
+#define CM_AVEODIV 0x1bc
#define CM_EMMCCTL 0x1c0
#define CM_EMMCDIV 0x1c4
@@ -299,11 +299,11 @@
struct bcm2835_cprman {
struct device *dev;
void __iomem *regs;
- spinlock_t regs_lock;
+ spinlock_t regs_lock; /* spinlock for all clocks */
const char *osc_name;
struct clk_onecell_data onecell;
- struct clk *clks[BCM2835_CLOCK_COUNT];
+ struct clk *clks[];
};
static inline void cprman_write(struct bcm2835_cprman *cprman, u32 reg, u32 val)
@@ -316,6 +316,27 @@ static inline u32 cprman_read(struct bcm2835_cprman *cprman, u32 reg)
return readl(cprman->regs + reg);
}
+static int bcm2835_debugfs_regset(struct bcm2835_cprman *cprman, u32 base,
+ struct debugfs_reg32 *regs, size_t nregs,
+ struct dentry *dentry)
+{
+ struct dentry *regdump;
+ struct debugfs_regset32 *regset;
+
+ regset = devm_kzalloc(cprman->dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ return -ENOMEM;
+
+ regset->regs = regs;
+ regset->nregs = nregs;
+ regset->base = cprman->regs + base;
+
+ regdump = debugfs_create_regset32("regdump", S_IRUGO, dentry,
+ regset);
+
+ return regdump ? 0 : -ENOMEM;
+}
+
/*
* These are fixed clocks. They're probably not all root clocks and it may
* be possible to turn them on and off but until this is mapped out better
@@ -379,132 +400,27 @@ struct bcm2835_pll_ana_bits {
static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
.mask0 = 0,
.set0 = 0,
- .mask1 = ~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK),
+ .mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK),
.set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
- .mask3 = ~A2W_PLL_KA_MASK,
+ .mask3 = (u32)~A2W_PLL_KA_MASK,
.set3 = (2 << A2W_PLL_KA_SHIFT),
.fb_prediv_mask = BIT(14),
};
static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
- .mask0 = ~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK),
+ .mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK),
.set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
- .mask1 = ~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK),
+ .mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK),
.set1 = (6 << A2W_PLLH_KP_SHIFT),
.mask3 = 0,
.set3 = 0,
.fb_prediv_mask = BIT(11),
};
-/*
- * PLLA is the auxiliary PLL, used to drive the CCP2 (Compact Camera
- * Port 2) transmitter clock.
- *
- * It is in the PX LDO power domain, which is on when the AUDIO domain
- * is on.
- */
-static const struct bcm2835_pll_data bcm2835_plla_data = {
- .name = "plla",
- .cm_ctrl_reg = CM_PLLA,
- .a2w_ctrl_reg = A2W_PLLA_CTRL,
- .frac_reg = A2W_PLLA_FRAC,
- .ana_reg_base = A2W_PLLA_ANA0,
- .reference_enable_mask = A2W_XOSC_CTRL_PLLA_ENABLE,
- .lock_mask = CM_LOCK_FLOCKA,
-
- .ana = &bcm2835_ana_default,
-
- .min_rate = 600000000u,
- .max_rate = 2400000000u,
- .max_fb_rate = BCM2835_MAX_FB_RATE,
-};
-
-/* PLLB is used for the ARM's clock. */
-static const struct bcm2835_pll_data bcm2835_pllb_data = {
- .name = "pllb",
- .cm_ctrl_reg = CM_PLLB,
- .a2w_ctrl_reg = A2W_PLLB_CTRL,
- .frac_reg = A2W_PLLB_FRAC,
- .ana_reg_base = A2W_PLLB_ANA0,
- .reference_enable_mask = A2W_XOSC_CTRL_PLLB_ENABLE,
- .lock_mask = CM_LOCK_FLOCKB,
-
- .ana = &bcm2835_ana_default,
-
- .min_rate = 600000000u,
- .max_rate = 3000000000u,
- .max_fb_rate = BCM2835_MAX_FB_RATE,
-};
-
-/*
- * PLLC is the core PLL, used to drive the core VPU clock.
- *
- * It is in the PX LDO power domain, which is on when the AUDIO domain
- * is on.
-*/
-static const struct bcm2835_pll_data bcm2835_pllc_data = {
- .name = "pllc",
- .cm_ctrl_reg = CM_PLLC,
- .a2w_ctrl_reg = A2W_PLLC_CTRL,
- .frac_reg = A2W_PLLC_FRAC,
- .ana_reg_base = A2W_PLLC_ANA0,
- .reference_enable_mask = A2W_XOSC_CTRL_PLLC_ENABLE,
- .lock_mask = CM_LOCK_FLOCKC,
-
- .ana = &bcm2835_ana_default,
-
- .min_rate = 600000000u,
- .max_rate = 3000000000u,
- .max_fb_rate = BCM2835_MAX_FB_RATE,
-};
-
-/*
- * PLLD is the display PLL, used to drive DSI display panels.
- *
- * It is in the PX LDO power domain, which is on when the AUDIO domain
- * is on.
- */
-static const struct bcm2835_pll_data bcm2835_plld_data = {
- .name = "plld",
- .cm_ctrl_reg = CM_PLLD,
- .a2w_ctrl_reg = A2W_PLLD_CTRL,
- .frac_reg = A2W_PLLD_FRAC,
- .ana_reg_base = A2W_PLLD_ANA0,
- .reference_enable_mask = A2W_XOSC_CTRL_DDR_ENABLE,
- .lock_mask = CM_LOCK_FLOCKD,
-
- .ana = &bcm2835_ana_default,
-
- .min_rate = 600000000u,
- .max_rate = 2400000000u,
- .max_fb_rate = BCM2835_MAX_FB_RATE,
-};
-
-/*
- * PLLH is used to supply the pixel clock or the AUX clock for the TV
- * encoder.
- *
- * It is in the HDMI power domain.
- */
-static const struct bcm2835_pll_data bcm2835_pllh_data = {
- "pllh",
- .cm_ctrl_reg = CM_PLLH,
- .a2w_ctrl_reg = A2W_PLLH_CTRL,
- .frac_reg = A2W_PLLH_FRAC,
- .ana_reg_base = A2W_PLLH_ANA0,
- .reference_enable_mask = A2W_XOSC_CTRL_PLLC_ENABLE,
- .lock_mask = CM_LOCK_FLOCKH,
-
- .ana = &bcm2835_ana_pllh,
-
- .min_rate = 600000000u,
- .max_rate = 3000000000u,
- .max_fb_rate = BCM2835_MAX_FB_RATE,
-};
-
struct bcm2835_pll_divider_data {
const char *name;
- const struct bcm2835_pll_data *source_pll;
+ const char *source_pll;
+
u32 cm_reg;
u32 a2w_reg;
@@ -513,124 +429,6 @@ struct bcm2835_pll_divider_data {
u32 fixed_divider;
};
-static const struct bcm2835_pll_divider_data bcm2835_plla_core_data = {
- .name = "plla_core",
- .source_pll = &bcm2835_plla_data,
- .cm_reg = CM_PLLA,
- .a2w_reg = A2W_PLLA_CORE,
- .load_mask = CM_PLLA_LOADCORE,
- .hold_mask = CM_PLLA_HOLDCORE,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_plla_per_data = {
- .name = "plla_per",
- .source_pll = &bcm2835_plla_data,
- .cm_reg = CM_PLLA,
- .a2w_reg = A2W_PLLA_PER,
- .load_mask = CM_PLLA_LOADPER,
- .hold_mask = CM_PLLA_HOLDPER,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_pllb_arm_data = {
- .name = "pllb_arm",
- .source_pll = &bcm2835_pllb_data,
- .cm_reg = CM_PLLB,
- .a2w_reg = A2W_PLLB_ARM,
- .load_mask = CM_PLLB_LOADARM,
- .hold_mask = CM_PLLB_HOLDARM,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_pllc_core0_data = {
- .name = "pllc_core0",
- .source_pll = &bcm2835_pllc_data,
- .cm_reg = CM_PLLC,
- .a2w_reg = A2W_PLLC_CORE0,
- .load_mask = CM_PLLC_LOADCORE0,
- .hold_mask = CM_PLLC_HOLDCORE0,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_pllc_core1_data = {
- .name = "pllc_core1", .source_pll = &bcm2835_pllc_data,
- .cm_reg = CM_PLLC, A2W_PLLC_CORE1,
- .load_mask = CM_PLLC_LOADCORE1,
- .hold_mask = CM_PLLC_HOLDCORE1,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_pllc_core2_data = {
- .name = "pllc_core2",
- .source_pll = &bcm2835_pllc_data,
- .cm_reg = CM_PLLC,
- .a2w_reg = A2W_PLLC_CORE2,
- .load_mask = CM_PLLC_LOADCORE2,
- .hold_mask = CM_PLLC_HOLDCORE2,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_pllc_per_data = {
- .name = "pllc_per",
- .source_pll = &bcm2835_pllc_data,
- .cm_reg = CM_PLLC,
- .a2w_reg = A2W_PLLC_PER,
- .load_mask = CM_PLLC_LOADPER,
- .hold_mask = CM_PLLC_HOLDPER,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_plld_core_data = {
- .name = "plld_core",
- .source_pll = &bcm2835_plld_data,
- .cm_reg = CM_PLLD,
- .a2w_reg = A2W_PLLD_CORE,
- .load_mask = CM_PLLD_LOADCORE,
- .hold_mask = CM_PLLD_HOLDCORE,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_plld_per_data = {
- .name = "plld_per",
- .source_pll = &bcm2835_plld_data,
- .cm_reg = CM_PLLD,
- .a2w_reg = A2W_PLLD_PER,
- .load_mask = CM_PLLD_LOADPER,
- .hold_mask = CM_PLLD_HOLDPER,
- .fixed_divider = 1,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_pllh_rcal_data = {
- .name = "pllh_rcal",
- .source_pll = &bcm2835_pllh_data,
- .cm_reg = CM_PLLH,
- .a2w_reg = A2W_PLLH_RCAL,
- .load_mask = CM_PLLH_LOADRCAL,
- .hold_mask = 0,
- .fixed_divider = 10,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_pllh_aux_data = {
- .name = "pllh_aux",
- .source_pll = &bcm2835_pllh_data,
- .cm_reg = CM_PLLH,
- .a2w_reg = A2W_PLLH_AUX,
- .load_mask = CM_PLLH_LOADAUX,
- .hold_mask = 0,
- .fixed_divider = 10,
-};
-
-static const struct bcm2835_pll_divider_data bcm2835_pllh_pix_data = {
- .name = "pllh_pix",
- .source_pll = &bcm2835_pllh_data,
- .cm_reg = CM_PLLH,
- .a2w_reg = A2W_PLLH_PIX,
- .load_mask = CM_PLLH_LOADPIX,
- .hold_mask = 0,
- .fixed_divider = 10,
-};
-
struct bcm2835_clock_data {
const char *name;
@@ -649,186 +447,11 @@ struct bcm2835_clock_data {
bool is_mash_clock;
};
-static const char *const bcm2835_clock_per_parents[] = {
- "gnd",
- "xosc",
- "testdebug0",
- "testdebug1",
- "plla_per",
- "pllc_per",
- "plld_per",
- "pllh_aux",
-};
-
-static const char *const bcm2835_clock_vpu_parents[] = {
- "gnd",
- "xosc",
- "testdebug0",
- "testdebug1",
- "plla_core",
- "pllc_core0",
- "plld_core",
- "pllh_aux",
- "pllc_core1",
- "pllc_core2",
-};
-
-static const char *const bcm2835_clock_osc_parents[] = {
- "gnd",
- "xosc",
- "testdebug0",
- "testdebug1"
-};
-
-/*
- * Used for a 1Mhz clock for the system clocksource, and also used by
- * the watchdog timer and the camera pulse generator.
- */
-static const struct bcm2835_clock_data bcm2835_clock_timer_data = {
- .name = "timer",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents),
- .parents = bcm2835_clock_osc_parents,
- .ctl_reg = CM_TIMERCTL,
- .div_reg = CM_TIMERDIV,
- .int_bits = 6,
- .frac_bits = 12,
-};
-
-/* One Time Programmable Memory clock. Maximum 10Mhz. */
-static const struct bcm2835_clock_data bcm2835_clock_otp_data = {
- .name = "otp",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents),
- .parents = bcm2835_clock_osc_parents,
- .ctl_reg = CM_OTPCTL,
- .div_reg = CM_OTPDIV,
- .int_bits = 4,
- .frac_bits = 0,
-};
-
-/*
- * VPU clock. This doesn't have an enable bit, since it drives the
- * bus for everything else, and is special so it doesn't need to be
- * gated for rate changes. It is also known as "clk_audio" in various
- * hardware documentation.
- */
-static const struct bcm2835_clock_data bcm2835_clock_vpu_data = {
- .name = "vpu",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
- .parents = bcm2835_clock_vpu_parents,
- .ctl_reg = CM_VPUCTL,
- .div_reg = CM_VPUDIV,
- .int_bits = 12,
- .frac_bits = 8,
- .is_vpu_clock = true,
-};
-
-static const struct bcm2835_clock_data bcm2835_clock_v3d_data = {
- .name = "v3d",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
- .parents = bcm2835_clock_vpu_parents,
- .ctl_reg = CM_V3DCTL,
- .div_reg = CM_V3DDIV,
- .int_bits = 4,
- .frac_bits = 8,
-};
-
-static const struct bcm2835_clock_data bcm2835_clock_isp_data = {
- .name = "isp",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
- .parents = bcm2835_clock_vpu_parents,
- .ctl_reg = CM_ISPCTL,
- .div_reg = CM_ISPDIV,
- .int_bits = 4,
- .frac_bits = 8,
-};
-
-static const struct bcm2835_clock_data bcm2835_clock_h264_data = {
- .name = "h264",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
- .parents = bcm2835_clock_vpu_parents,
- .ctl_reg = CM_H264CTL,
- .div_reg = CM_H264DIV,
- .int_bits = 4,
- .frac_bits = 8,
-};
-
-/* TV encoder clock. Only operating frequency is 108Mhz. */
-static const struct bcm2835_clock_data bcm2835_clock_vec_data = {
- .name = "vec",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
- .parents = bcm2835_clock_per_parents,
- .ctl_reg = CM_VECCTL,
- .div_reg = CM_VECDIV,
- .int_bits = 4,
- .frac_bits = 0,
-};
-
-static const struct bcm2835_clock_data bcm2835_clock_uart_data = {
- .name = "uart",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
- .parents = bcm2835_clock_per_parents,
- .ctl_reg = CM_UARTCTL,
- .div_reg = CM_UARTDIV,
- .int_bits = 10,
- .frac_bits = 12,
-};
-
-/* HDMI state machine */
-static const struct bcm2835_clock_data bcm2835_clock_hsm_data = {
- .name = "hsm",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
- .parents = bcm2835_clock_per_parents,
- .ctl_reg = CM_HSMCTL,
- .div_reg = CM_HSMDIV,
- .int_bits = 4,
- .frac_bits = 8,
-};
-
-/*
- * Secondary SDRAM clock. Used for low-voltage modes when the PLL in
- * the SDRAM controller can't be used.
- */
-static const struct bcm2835_clock_data bcm2835_clock_sdram_data = {
- .name = "sdram",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents),
- .parents = bcm2835_clock_vpu_parents,
- .ctl_reg = CM_SDCCTL,
- .div_reg = CM_SDCDIV,
- .int_bits = 6,
- .frac_bits = 0,
-};
-
-/* Clock for the temperature sensor. Generally run at 2Mhz, max 5Mhz. */
-static const struct bcm2835_clock_data bcm2835_clock_tsens_data = {
- .name = "tsens",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents),
- .parents = bcm2835_clock_osc_parents,
- .ctl_reg = CM_TSENSCTL,
- .div_reg = CM_TSENSDIV,
- .int_bits = 5,
- .frac_bits = 0,
-};
-
-/* Arasan EMMC clock */
-static const struct bcm2835_clock_data bcm2835_clock_emmc_data = {
- .name = "emmc",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
- .parents = bcm2835_clock_per_parents,
- .ctl_reg = CM_EMMCCTL,
- .div_reg = CM_EMMCDIV,
- .int_bits = 4,
- .frac_bits = 8,
-};
+struct bcm2835_gate_data {
+ const char *name;
+ const char *parent;
-static const struct bcm2835_clock_data bcm2835_clock_pwm_data = {
- .name = "pwm",
- .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents),
- .parents = bcm2835_clock_per_parents,
- .ctl_reg = CM_PWMCTL,
- .div_reg = CM_PWMDIV,
- .int_bits = 12,
- .frac_bits = 12,
- .is_mash_clock = true,
+ u32 ctl_reg;
};
struct bcm2835_pll {
@@ -1044,6 +667,36 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
return 0;
}
+static int bcm2835_pll_debug_init(struct clk_hw *hw,
+ struct dentry *dentry)
+{
+ struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw);
+ struct bcm2835_cprman *cprman = pll->cprman;
+ const struct bcm2835_pll_data *data = pll->data;
+ struct debugfs_reg32 *regs;
+
+ regs = devm_kzalloc(cprman->dev, 7 * sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ regs[0].name = "cm_ctrl";
+ regs[0].offset = data->cm_ctrl_reg;
+ regs[1].name = "a2w_ctrl";
+ regs[1].offset = data->a2w_ctrl_reg;
+ regs[2].name = "frac";
+ regs[2].offset = data->frac_reg;
+ regs[3].name = "ana0";
+ regs[3].offset = data->ana_reg_base + 0 * 4;
+ regs[4].name = "ana1";
+ regs[4].offset = data->ana_reg_base + 1 * 4;
+ regs[5].name = "ana2";
+ regs[5].offset = data->ana_reg_base + 2 * 4;
+ regs[6].name = "ana3";
+ regs[6].offset = data->ana_reg_base + 3 * 4;
+
+ return bcm2835_debugfs_regset(cprman, 0, regs, 7, dentry);
+}
+
static const struct clk_ops bcm2835_pll_clk_ops = {
.is_prepared = bcm2835_pll_is_on,
.prepare = bcm2835_pll_on,
@@ -1051,6 +704,7 @@ static const struct clk_ops bcm2835_pll_clk_ops = {
.recalc_rate = bcm2835_pll_get_rate,
.set_rate = bcm2835_pll_set_rate,
.round_rate = bcm2835_pll_round_rate,
+ .debug_init = bcm2835_pll_debug_init,
};
struct bcm2835_pll_divider {
@@ -1142,6 +796,26 @@ static int bcm2835_pll_divider_set_rate(struct clk_hw *hw,
return 0;
}
+static int bcm2835_pll_divider_debug_init(struct clk_hw *hw,
+ struct dentry *dentry)
+{
+ struct bcm2835_pll_divider *divider = bcm2835_pll_divider_from_hw(hw);
+ struct bcm2835_cprman *cprman = divider->cprman;
+ const struct bcm2835_pll_divider_data *data = divider->data;
+ struct debugfs_reg32 *regs;
+
+ regs = devm_kzalloc(cprman->dev, 7 * sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ regs[0].name = "cm";
+ regs[0].offset = data->cm_reg;
+ regs[1].name = "a2w";
+ regs[1].offset = data->a2w_reg;
+
+ return bcm2835_debugfs_regset(cprman, 0, regs, 2, dentry);
+}
+
static const struct clk_ops bcm2835_pll_divider_clk_ops = {
.is_prepared = bcm2835_pll_divider_is_on,
.prepare = bcm2835_pll_divider_on,
@@ -1149,6 +823,7 @@ static const struct clk_ops bcm2835_pll_divider_clk_ops = {
.recalc_rate = bcm2835_pll_divider_get_rate,
.set_rate = bcm2835_pll_divider_set_rate,
.round_rate = bcm2835_pll_divider_round_rate,
+ .debug_init = bcm2835_pll_divider_debug_init,
};
/*
@@ -1332,7 +1007,7 @@ static int bcm2835_clock_set_rate(struct clk_hw *hw,
}
static int bcm2835_clock_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
+ struct clk_rate_request *req)
{
struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
struct clk_hw *parent, *best_parent = NULL;
@@ -1390,6 +1065,30 @@ static u8 bcm2835_clock_get_parent(struct clk_hw *hw)
return (src & CM_SRC_MASK) >> CM_SRC_SHIFT;
}
+static struct debugfs_reg32 bcm2835_debugfs_clock_reg32[] = {
+ {
+ .name = "ctl",
+ .offset = 0,
+ },
+ {
+ .name = "div",
+ .offset = 4,
+ },
+};
+
+static int bcm2835_clock_debug_init(struct clk_hw *hw,
+ struct dentry *dentry)
+{
+ struct bcm2835_clock *clock = bcm2835_clock_from_hw(hw);
+ struct bcm2835_cprman *cprman = clock->cprman;
+ const struct bcm2835_clock_data *data = clock->data;
+
+ return bcm2835_debugfs_regset(
+ cprman, data->ctl_reg,
+ bcm2835_debugfs_clock_reg32,
+ ARRAY_SIZE(bcm2835_debugfs_clock_reg32),
+ dentry);
+}
static const struct clk_ops bcm2835_clock_clk_ops = {
.is_prepared = bcm2835_clock_is_on,
@@ -1400,6 +1099,7 @@ static const struct clk_ops bcm2835_clock_clk_ops = {
.determine_rate = bcm2835_clock_determine_rate,
.set_parent = bcm2835_clock_set_parent,
.get_parent = bcm2835_clock_get_parent,
+ .debug_init = bcm2835_clock_debug_init,
};
static int bcm2835_vpu_clock_is_on(struct clk_hw *hw)
@@ -1418,6 +1118,7 @@ static const struct clk_ops bcm2835_vpu_clock_clk_ops = {
.determine_rate = bcm2835_clock_determine_rate,
.set_parent = bcm2835_clock_set_parent,
.get_parent = bcm2835_clock_get_parent,
+ .debug_init = bcm2835_clock_debug_init,
};
static struct clk *bcm2835_register_pll(struct bcm2835_cprman *cprman,
@@ -1466,7 +1167,7 @@ bcm2835_register_pll_divider(struct bcm2835_cprman *cprman,
memset(&init, 0, sizeof(init));
- init.parent_names = &data->source_pll->name;
+ init.parent_names = &data->source_pll;
init.num_parents = 1;
init.name = divider_name;
init.ops = &bcm2835_pll_divider_clk_ops;
@@ -1549,14 +1250,559 @@ static struct clk *bcm2835_register_clock(struct bcm2835_cprman *cprman,
return devm_clk_register(cprman->dev, &clock->hw);
}
+static struct clk *bcm2835_register_gate(struct bcm2835_cprman *cprman,
+ const struct bcm2835_gate_data *data)
+{
+ return clk_register_gate(cprman->dev, data->name, data->parent,
+ CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
+ cprman->regs + data->ctl_reg,
+ CM_GATE_BIT, 0, &cprman->regs_lock);
+}
+
+typedef struct clk *(*bcm2835_clk_register)(struct bcm2835_cprman *cprman,
+ const void *data);
+struct bcm2835_clk_desc {
+ bcm2835_clk_register clk_register;
+ const void *data;
+};
+
+/* assignment helper macros for different clock types */
+#define _REGISTER(f, ...) { .clk_register = (bcm2835_clk_register)f, \
+ .data = __VA_ARGS__ }
+#define REGISTER_PLL(...) _REGISTER(&bcm2835_register_pll, \
+ &(struct bcm2835_pll_data) \
+ {__VA_ARGS__})
+#define REGISTER_PLL_DIV(...) _REGISTER(&bcm2835_register_pll_divider, \
+ &(struct bcm2835_pll_divider_data) \
+ {__VA_ARGS__})
+#define REGISTER_CLK(...) _REGISTER(&bcm2835_register_clock, \
+ &(struct bcm2835_clock_data) \
+ {__VA_ARGS__})
+#define REGISTER_GATE(...) _REGISTER(&bcm2835_register_gate, \
+ &(struct bcm2835_gate_data) \
+ {__VA_ARGS__})
+
+/* parent mux arrays plus helper macros */
+
+/* main oscillator parent mux */
+static const char *const bcm2835_clock_osc_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1"
+};
+
+#define REGISTER_OSC_CLK(...) REGISTER_CLK( \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_osc_parents), \
+ .parents = bcm2835_clock_osc_parents, \
+ __VA_ARGS__)
+
+/* main peripherial parent mux */
+static const char *const bcm2835_clock_per_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1",
+ "plla_per",
+ "pllc_per",
+ "plld_per",
+ "pllh_aux",
+};
+
+#define REGISTER_PER_CLK(...) REGISTER_CLK( \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_per_parents), \
+ .parents = bcm2835_clock_per_parents, \
+ __VA_ARGS__)
+
+/* main vpu parent mux */
+static const char *const bcm2835_clock_vpu_parents[] = {
+ "gnd",
+ "xosc",
+ "testdebug0",
+ "testdebug1",
+ "plla_core",
+ "pllc_core0",
+ "plld_core",
+ "pllh_aux",
+ "pllc_core1",
+ "pllc_core2",
+};
+
+#define REGISTER_VPU_CLK(...) REGISTER_CLK( \
+ .num_mux_parents = ARRAY_SIZE(bcm2835_clock_vpu_parents), \
+ .parents = bcm2835_clock_vpu_parents, \
+ __VA_ARGS__)
+
+/*
+ * the real definition of all the pll, pll_dividers and clocks
+ * these make use of the above REGISTER_* macros
+ */
+static const struct bcm2835_clk_desc clk_desc_array[] = {
+ /* the PLL + PLL dividers */
+
+ /*
+ * PLLA is the auxiliary PLL, used to drive the CCP2
+ * (Compact Camera Port 2) transmitter clock.
+ *
+ * It is in the PX LDO power domain, which is on when the
+ * AUDIO domain is on.
+ */
+ [BCM2835_PLLA] = REGISTER_PLL(
+ .name = "plla",
+ .cm_ctrl_reg = CM_PLLA,
+ .a2w_ctrl_reg = A2W_PLLA_CTRL,
+ .frac_reg = A2W_PLLA_FRAC,
+ .ana_reg_base = A2W_PLLA_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLA_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKA,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 2400000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLA_CORE] = REGISTER_PLL_DIV(
+ .name = "plla_core",
+ .source_pll = "plla",
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_CORE,
+ .load_mask = CM_PLLA_LOADCORE,
+ .hold_mask = CM_PLLA_HOLDCORE,
+ .fixed_divider = 1),
+ [BCM2835_PLLA_PER] = REGISTER_PLL_DIV(
+ .name = "plla_per",
+ .source_pll = "plla",
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_PER,
+ .load_mask = CM_PLLA_LOADPER,
+ .hold_mask = CM_PLLA_HOLDPER,
+ .fixed_divider = 1),
+ [BCM2835_PLLA_DSI0] = REGISTER_PLL_DIV(
+ .name = "plla_dsi0",
+ .source_pll = "plla",
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_DSI0,
+ .load_mask = CM_PLLA_LOADDSI0,
+ .hold_mask = CM_PLLA_HOLDDSI0,
+ .fixed_divider = 1),
+ [BCM2835_PLLA_CCP2] = REGISTER_PLL_DIV(
+ .name = "plla_ccp2",
+ .source_pll = "plla",
+ .cm_reg = CM_PLLA,
+ .a2w_reg = A2W_PLLA_CCP2,
+ .load_mask = CM_PLLA_LOADCCP2,
+ .hold_mask = CM_PLLA_HOLDCCP2,
+ .fixed_divider = 1),
+
+ /* PLLB is used for the ARM's clock. */
+ [BCM2835_PLLB] = REGISTER_PLL(
+ .name = "pllb",
+ .cm_ctrl_reg = CM_PLLB,
+ .a2w_ctrl_reg = A2W_PLLB_CTRL,
+ .frac_reg = A2W_PLLB_FRAC,
+ .ana_reg_base = A2W_PLLB_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLB_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKB,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLB_ARM] = REGISTER_PLL_DIV(
+ .name = "pllb_arm",
+ .source_pll = "pllb",
+ .cm_reg = CM_PLLB,
+ .a2w_reg = A2W_PLLB_ARM,
+ .load_mask = CM_PLLB_LOADARM,
+ .hold_mask = CM_PLLB_HOLDARM,
+ .fixed_divider = 1),
+
+ /*
+ * PLLC is the core PLL, used to drive the core VPU clock.
+ *
+ * It is in the PX LDO power domain, which is on when the
+ * AUDIO domain is on.
+ */
+ [BCM2835_PLLC] = REGISTER_PLL(
+ .name = "pllc",
+ .cm_ctrl_reg = CM_PLLC,
+ .a2w_ctrl_reg = A2W_PLLC_CTRL,
+ .frac_reg = A2W_PLLC_FRAC,
+ .ana_reg_base = A2W_PLLC_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLC_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKC,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLC_CORE0] = REGISTER_PLL_DIV(
+ .name = "pllc_core0",
+ .source_pll = "pllc",
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_CORE0,
+ .load_mask = CM_PLLC_LOADCORE0,
+ .hold_mask = CM_PLLC_HOLDCORE0,
+ .fixed_divider = 1),
+ [BCM2835_PLLC_CORE1] = REGISTER_PLL_DIV(
+ .name = "pllc_core1",
+ .source_pll = "pllc",
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_CORE1,
+ .load_mask = CM_PLLC_LOADCORE1,
+ .hold_mask = CM_PLLC_HOLDCORE1,
+ .fixed_divider = 1),
+ [BCM2835_PLLC_CORE2] = REGISTER_PLL_DIV(
+ .name = "pllc_core2",
+ .source_pll = "pllc",
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_CORE2,
+ .load_mask = CM_PLLC_LOADCORE2,
+ .hold_mask = CM_PLLC_HOLDCORE2,
+ .fixed_divider = 1),
+ [BCM2835_PLLC_PER] = REGISTER_PLL_DIV(
+ .name = "pllc_per",
+ .source_pll = "pllc",
+ .cm_reg = CM_PLLC,
+ .a2w_reg = A2W_PLLC_PER,
+ .load_mask = CM_PLLC_LOADPER,
+ .hold_mask = CM_PLLC_HOLDPER,
+ .fixed_divider = 1),
+
+ /*
+ * PLLD is the display PLL, used to drive DSI display panels.
+ *
+ * It is in the PX LDO power domain, which is on when the
+ * AUDIO domain is on.
+ */
+ [BCM2835_PLLD] = REGISTER_PLL(
+ .name = "plld",
+ .cm_ctrl_reg = CM_PLLD,
+ .a2w_ctrl_reg = A2W_PLLD_CTRL,
+ .frac_reg = A2W_PLLD_FRAC,
+ .ana_reg_base = A2W_PLLD_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_DDR_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKD,
+
+ .ana = &bcm2835_ana_default,
+
+ .min_rate = 600000000u,
+ .max_rate = 2400000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLD_CORE] = REGISTER_PLL_DIV(
+ .name = "plld_core",
+ .source_pll = "plld",
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_CORE,
+ .load_mask = CM_PLLD_LOADCORE,
+ .hold_mask = CM_PLLD_HOLDCORE,
+ .fixed_divider = 1),
+ [BCM2835_PLLD_PER] = REGISTER_PLL_DIV(
+ .name = "plld_per",
+ .source_pll = "plld",
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_PER,
+ .load_mask = CM_PLLD_LOADPER,
+ .hold_mask = CM_PLLD_HOLDPER,
+ .fixed_divider = 1),
+ [BCM2835_PLLD_DSI0] = REGISTER_PLL_DIV(
+ .name = "plld_dsi0",
+ .source_pll = "plld",
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_DSI0,
+ .load_mask = CM_PLLD_LOADDSI0,
+ .hold_mask = CM_PLLD_HOLDDSI0,
+ .fixed_divider = 1),
+ [BCM2835_PLLD_DSI1] = REGISTER_PLL_DIV(
+ .name = "plld_dsi1",
+ .source_pll = "plld",
+ .cm_reg = CM_PLLD,
+ .a2w_reg = A2W_PLLD_DSI1,
+ .load_mask = CM_PLLD_LOADDSI1,
+ .hold_mask = CM_PLLD_HOLDDSI1,
+ .fixed_divider = 1),
+
+ /*
+ * PLLH is used to supply the pixel clock or the AUX clock for the
+ * TV encoder.
+ *
+ * It is in the HDMI power domain.
+ */
+ [BCM2835_PLLH] = REGISTER_PLL(
+ "pllh",
+ .cm_ctrl_reg = CM_PLLH,
+ .a2w_ctrl_reg = A2W_PLLH_CTRL,
+ .frac_reg = A2W_PLLH_FRAC,
+ .ana_reg_base = A2W_PLLH_ANA0,
+ .reference_enable_mask = A2W_XOSC_CTRL_PLLC_ENABLE,
+ .lock_mask = CM_LOCK_FLOCKH,
+
+ .ana = &bcm2835_ana_pllh,
+
+ .min_rate = 600000000u,
+ .max_rate = 3000000000u,
+ .max_fb_rate = BCM2835_MAX_FB_RATE),
+ [BCM2835_PLLH_RCAL] = REGISTER_PLL_DIV(
+ .name = "pllh_rcal",
+ .source_pll = "pllh",
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_RCAL,
+ .load_mask = CM_PLLH_LOADRCAL,
+ .hold_mask = 0,
+ .fixed_divider = 10),
+ [BCM2835_PLLH_AUX] = REGISTER_PLL_DIV(
+ .name = "pllh_aux",
+ .source_pll = "pllh",
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_AUX,
+ .load_mask = CM_PLLH_LOADAUX,
+ .hold_mask = 0,
+ .fixed_divider = 10),
+ [BCM2835_PLLH_PIX] = REGISTER_PLL_DIV(
+ .name = "pllh_pix",
+ .source_pll = "pllh",
+ .cm_reg = CM_PLLH,
+ .a2w_reg = A2W_PLLH_PIX,
+ .load_mask = CM_PLLH_LOADPIX,
+ .hold_mask = 0,
+ .fixed_divider = 10),
+
+ /* the clocks */
+
+ /* clocks with oscillator parent mux */
+
+ /* One Time Programmable Memory clock. Maximum 10Mhz. */
+ [BCM2835_CLOCK_OTP] = REGISTER_OSC_CLK(
+ .name = "otp",
+ .ctl_reg = CM_OTPCTL,
+ .div_reg = CM_OTPDIV,
+ .int_bits = 4,
+ .frac_bits = 0),
+ /*
+ * Used for a 1Mhz clock for the system clocksource, and also used
+ * bythe watchdog timer and the camera pulse generator.
+ */
+ [BCM2835_CLOCK_TIMER] = REGISTER_OSC_CLK(
+ .name = "timer",
+ .ctl_reg = CM_TIMERCTL,
+ .div_reg = CM_TIMERDIV,
+ .int_bits = 6,
+ .frac_bits = 12),
+ /*
+ * Clock for the temperature sensor.
+ * Generally run at 2Mhz, max 5Mhz.
+ */
+ [BCM2835_CLOCK_TSENS] = REGISTER_OSC_CLK(
+ .name = "tsens",
+ .ctl_reg = CM_TSENSCTL,
+ .div_reg = CM_TSENSDIV,
+ .int_bits = 5,
+ .frac_bits = 0),
+ [BCM2835_CLOCK_TEC] = REGISTER_OSC_CLK(
+ .name = "tec",
+ .ctl_reg = CM_TECCTL,
+ .div_reg = CM_TECDIV,
+ .int_bits = 6,
+ .frac_bits = 0),
+
+ /* clocks with vpu parent mux */
+ [BCM2835_CLOCK_H264] = REGISTER_VPU_CLK(
+ .name = "h264",
+ .ctl_reg = CM_H264CTL,
+ .div_reg = CM_H264DIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+ [BCM2835_CLOCK_ISP] = REGISTER_VPU_CLK(
+ .name = "isp",
+ .ctl_reg = CM_ISPCTL,
+ .div_reg = CM_ISPDIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+
+ /*
+ * Secondary SDRAM clock. Used for low-voltage modes when the PLL
+ * in the SDRAM controller can't be used.
+ */
+ [BCM2835_CLOCK_SDRAM] = REGISTER_VPU_CLK(
+ .name = "sdram",
+ .ctl_reg = CM_SDCCTL,
+ .div_reg = CM_SDCDIV,
+ .int_bits = 6,
+ .frac_bits = 0),
+ [BCM2835_CLOCK_V3D] = REGISTER_VPU_CLK(
+ .name = "v3d",
+ .ctl_reg = CM_V3DCTL,
+ .div_reg = CM_V3DDIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+ /*
+ * VPU clock. This doesn't have an enable bit, since it drives
+ * the bus for everything else, and is special so it doesn't need
+ * to be gated for rate changes. It is also known as "clk_audio"
+ * in various hardware documentation.
+ */
+ [BCM2835_CLOCK_VPU] = REGISTER_VPU_CLK(
+ .name = "vpu",
+ .ctl_reg = CM_VPUCTL,
+ .div_reg = CM_VPUDIV,
+ .int_bits = 12,
+ .frac_bits = 8,
+ .is_vpu_clock = true),
+
+ /* clocks with per parent mux */
+ [BCM2835_CLOCK_AVEO] = REGISTER_PER_CLK(
+ .name = "aveo",
+ .ctl_reg = CM_AVEOCTL,
+ .div_reg = CM_AVEODIV,
+ .int_bits = 4,
+ .frac_bits = 0),
+ [BCM2835_CLOCK_CAM0] = REGISTER_PER_CLK(
+ .name = "cam0",
+ .ctl_reg = CM_CAM0CTL,
+ .div_reg = CM_CAM0DIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+ [BCM2835_CLOCK_CAM1] = REGISTER_PER_CLK(
+ .name = "cam1",
+ .ctl_reg = CM_CAM1CTL,
+ .div_reg = CM_CAM1DIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+ [BCM2835_CLOCK_DFT] = REGISTER_PER_CLK(
+ .name = "dft",
+ .ctl_reg = CM_DFTCTL,
+ .div_reg = CM_DFTDIV,
+ .int_bits = 5,
+ .frac_bits = 0),
+ [BCM2835_CLOCK_DPI] = REGISTER_PER_CLK(
+ .name = "dpi",
+ .ctl_reg = CM_DPICTL,
+ .div_reg = CM_DPIDIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+
+ /* Arasan EMMC clock */
+ [BCM2835_CLOCK_EMMC] = REGISTER_PER_CLK(
+ .name = "emmc",
+ .ctl_reg = CM_EMMCCTL,
+ .div_reg = CM_EMMCDIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+
+ /* General purpose (GPIO) clocks */
+ [BCM2835_CLOCK_GP0] = REGISTER_PER_CLK(
+ .name = "gp0",
+ .ctl_reg = CM_GP0CTL,
+ .div_reg = CM_GP0DIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true),
+ [BCM2835_CLOCK_GP1] = REGISTER_PER_CLK(
+ .name = "gp1",
+ .ctl_reg = CM_GP1CTL,
+ .div_reg = CM_GP1DIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true),
+ [BCM2835_CLOCK_GP2] = REGISTER_PER_CLK(
+ .name = "gp2",
+ .ctl_reg = CM_GP2CTL,
+ .div_reg = CM_GP2DIV,
+ .int_bits = 12,
+ .frac_bits = 12),
+
+ /* HDMI state machine */
+ [BCM2835_CLOCK_HSM] = REGISTER_PER_CLK(
+ .name = "hsm",
+ .ctl_reg = CM_HSMCTL,
+ .div_reg = CM_HSMDIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+ [BCM2835_CLOCK_PCM] = REGISTER_PER_CLK(
+ .name = "pcm",
+ .ctl_reg = CM_PCMCTL,
+ .div_reg = CM_PCMDIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true),
+ [BCM2835_CLOCK_PWM] = REGISTER_PER_CLK(
+ .name = "pwm",
+ .ctl_reg = CM_PWMCTL,
+ .div_reg = CM_PWMDIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true),
+ [BCM2835_CLOCK_SLIM] = REGISTER_PER_CLK(
+ .name = "slim",
+ .ctl_reg = CM_SLIMCTL,
+ .div_reg = CM_SLIMDIV,
+ .int_bits = 12,
+ .frac_bits = 12,
+ .is_mash_clock = true),
+ [BCM2835_CLOCK_SMI] = REGISTER_PER_CLK(
+ .name = "smi",
+ .ctl_reg = CM_SMICTL,
+ .div_reg = CM_SMIDIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+ [BCM2835_CLOCK_UART] = REGISTER_PER_CLK(
+ .name = "uart",
+ .ctl_reg = CM_UARTCTL,
+ .div_reg = CM_UARTDIV,
+ .int_bits = 10,
+ .frac_bits = 12),
+
+ /* TV encoder clock. Only operating frequency is 108Mhz. */
+ [BCM2835_CLOCK_VEC] = REGISTER_PER_CLK(
+ .name = "vec",
+ .ctl_reg = CM_VECCTL,
+ .div_reg = CM_VECDIV,
+ .int_bits = 4,
+ .frac_bits = 0),
+
+ /* dsi clocks */
+ [BCM2835_CLOCK_DSI0E] = REGISTER_PER_CLK(
+ .name = "dsi0e",
+ .ctl_reg = CM_DSI0ECTL,
+ .div_reg = CM_DSI0EDIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+ [BCM2835_CLOCK_DSI1E] = REGISTER_PER_CLK(
+ .name = "dsi1e",
+ .ctl_reg = CM_DSI1ECTL,
+ .div_reg = CM_DSI1EDIV,
+ .int_bits = 4,
+ .frac_bits = 8),
+
+ /* the gates */
+
+ /*
+ * CM_PERIICTL (and CM_PERIACTL, CM_SYSCTL and CM_VPUCTL if
+ * you have the debug bit set in the power manager, which we
+ * don't bother exposing) are individual gates off of the
+ * non-stop vpu clock.
+ */
+ [BCM2835_CLOCK_PERI_IMAGE] = REGISTER_GATE(
+ .name = "peri_image",
+ .parent = "vpu",
+ .ctl_reg = CM_PERIICTL),
+};
+
static int bcm2835_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct clk **clks;
struct bcm2835_cprman *cprman;
struct resource *res;
+ const struct bcm2835_clk_desc *desc;
+ const size_t asize = ARRAY_SIZE(clk_desc_array);
+ size_t i;
- cprman = devm_kzalloc(dev, sizeof(*cprman), GFP_KERNEL);
+ cprman = devm_kzalloc(dev,
+ sizeof(*cprman) + asize * sizeof(*clks),
+ GFP_KERNEL);
if (!cprman)
return -ENOMEM;
@@ -1573,80 +1819,15 @@ static int bcm2835_clk_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cprman);
- cprman->onecell.clk_num = BCM2835_CLOCK_COUNT;
+ cprman->onecell.clk_num = asize;
cprman->onecell.clks = cprman->clks;
clks = cprman->clks;
- clks[BCM2835_PLLA] = bcm2835_register_pll(cprman, &bcm2835_plla_data);
- clks[BCM2835_PLLB] = bcm2835_register_pll(cprman, &bcm2835_pllb_data);
- clks[BCM2835_PLLC] = bcm2835_register_pll(cprman, &bcm2835_pllc_data);
- clks[BCM2835_PLLD] = bcm2835_register_pll(cprman, &bcm2835_plld_data);
- clks[BCM2835_PLLH] = bcm2835_register_pll(cprman, &bcm2835_pllh_data);
-
- clks[BCM2835_PLLA_CORE] =
- bcm2835_register_pll_divider(cprman, &bcm2835_plla_core_data);
- clks[BCM2835_PLLA_PER] =
- bcm2835_register_pll_divider(cprman, &bcm2835_plla_per_data);
- clks[BCM2835_PLLC_CORE0] =
- bcm2835_register_pll_divider(cprman, &bcm2835_pllc_core0_data);
- clks[BCM2835_PLLC_CORE1] =
- bcm2835_register_pll_divider(cprman, &bcm2835_pllc_core1_data);
- clks[BCM2835_PLLC_CORE2] =
- bcm2835_register_pll_divider(cprman, &bcm2835_pllc_core2_data);
- clks[BCM2835_PLLC_PER] =
- bcm2835_register_pll_divider(cprman, &bcm2835_pllc_per_data);
- clks[BCM2835_PLLD_CORE] =
- bcm2835_register_pll_divider(cprman, &bcm2835_plld_core_data);
- clks[BCM2835_PLLD_PER] =
- bcm2835_register_pll_divider(cprman, &bcm2835_plld_per_data);
- clks[BCM2835_PLLH_RCAL] =
- bcm2835_register_pll_divider(cprman, &bcm2835_pllh_rcal_data);
- clks[BCM2835_PLLH_AUX] =
- bcm2835_register_pll_divider(cprman, &bcm2835_pllh_aux_data);
- clks[BCM2835_PLLH_PIX] =
- bcm2835_register_pll_divider(cprman, &bcm2835_pllh_pix_data);
-
- clks[BCM2835_CLOCK_TIMER] =
- bcm2835_register_clock(cprman, &bcm2835_clock_timer_data);
- clks[BCM2835_CLOCK_OTP] =
- bcm2835_register_clock(cprman, &bcm2835_clock_otp_data);
- clks[BCM2835_CLOCK_TSENS] =
- bcm2835_register_clock(cprman, &bcm2835_clock_tsens_data);
- clks[BCM2835_CLOCK_VPU] =
- bcm2835_register_clock(cprman, &bcm2835_clock_vpu_data);
- clks[BCM2835_CLOCK_V3D] =
- bcm2835_register_clock(cprman, &bcm2835_clock_v3d_data);
- clks[BCM2835_CLOCK_ISP] =
- bcm2835_register_clock(cprman, &bcm2835_clock_isp_data);
- clks[BCM2835_CLOCK_H264] =
- bcm2835_register_clock(cprman, &bcm2835_clock_h264_data);
- clks[BCM2835_CLOCK_V3D] =
- bcm2835_register_clock(cprman, &bcm2835_clock_v3d_data);
- clks[BCM2835_CLOCK_SDRAM] =
- bcm2835_register_clock(cprman, &bcm2835_clock_sdram_data);
- clks[BCM2835_CLOCK_UART] =
- bcm2835_register_clock(cprman, &bcm2835_clock_uart_data);
- clks[BCM2835_CLOCK_VEC] =
- bcm2835_register_clock(cprman, &bcm2835_clock_vec_data);
- clks[BCM2835_CLOCK_HSM] =
- bcm2835_register_clock(cprman, &bcm2835_clock_hsm_data);
- clks[BCM2835_CLOCK_EMMC] =
- bcm2835_register_clock(cprman, &bcm2835_clock_emmc_data);
-
- /*
- * CM_PERIICTL (and CM_PERIACTL, CM_SYSCTL and CM_VPUCTL if
- * you have the debug bit set in the power manager, which we
- * don't bother exposing) are individual gates off of the
- * non-stop vpu clock.
- */
- clks[BCM2835_CLOCK_PERI_IMAGE] =
- clk_register_gate(dev, "peri_image", "vpu",
- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
- cprman->regs + CM_PERIICTL, CM_GATE_BIT,
- 0, &cprman->regs_lock);
-
- clks[BCM2835_CLOCK_PWM] =
- bcm2835_register_clock(cprman, &bcm2835_clock_pwm_data);
+ for (i = 0; i < asize; i++) {
+ desc = &clk_desc_array[i];
+ if (desc->clk_register && desc->data)
+ clks[i] = desc->clk_register(cprman, desc->data);
+ }
return of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
&cprman->onecell);
diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c
index deaa7f962..526b0b0e9 100644
--- a/drivers/clk/bcm/clk-kona-setup.c
+++ b/drivers/clk/bcm/clk-kona-setup.c
@@ -577,7 +577,8 @@ static u32 *parent_process(const char *clocks[],
* selector is not required, but we allocate space for the
* array anyway to keep things simple.
*/
- parent_names = kmalloc(parent_count * sizeof(parent_names), GFP_KERNEL);
+ parent_names = kmalloc_array(parent_count, sizeof(*parent_names),
+ GFP_KERNEL);
if (!parent_names) {
pr_err("%s: error allocating %u parent names\n", __func__,
parent_count);
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index ff4ef4f1d..1f60b0241 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -107,16 +107,15 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
writel(tmp, base + CLPS711X_SYSCON1);
clps711x_clk->clks[CLPS711X_CLK_DUMMY] =
- clk_register_fixed_rate(NULL, "dummy", NULL, CLK_IS_ROOT, 0);
+ clk_register_fixed_rate(NULL, "dummy", NULL, 0, 0);
clps711x_clk->clks[CLPS711X_CLK_CPU] =
- clk_register_fixed_rate(NULL, "cpu", NULL, CLK_IS_ROOT, f_cpu);
+ clk_register_fixed_rate(NULL, "cpu", NULL, 0, f_cpu);
clps711x_clk->clks[CLPS711X_CLK_BUS] =
- clk_register_fixed_rate(NULL, "bus", NULL, CLK_IS_ROOT, f_bus);
+ clk_register_fixed_rate(NULL, "bus", NULL, 0, f_bus);
clps711x_clk->clks[CLPS711X_CLK_PLL] =
- clk_register_fixed_rate(NULL, "pll", NULL, CLK_IS_ROOT, f_pll);
+ clk_register_fixed_rate(NULL, "pll", NULL, 0, f_pll);
clps711x_clk->clks[CLPS711X_CLK_TIMERREF] =
- clk_register_fixed_rate(NULL, "timer_ref", NULL, CLK_IS_ROOT,
- f_tim);
+ clk_register_fixed_rate(NULL, "timer_ref", NULL, 0, f_tim);
clps711x_clk->clks[CLPS711X_CLK_TIMER1] =
clk_register_divider_table(NULL, "timer1", "timer_ref", 0,
base + CLPS711X_SYSCON1, 5, 1, 0,
@@ -126,10 +125,9 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
base + CLPS711X_SYSCON1, 7, 1, 0,
timer_div_table, &clps711x_clk->lock);
clps711x_clk->clks[CLPS711X_CLK_PWM] =
- clk_register_fixed_rate(NULL, "pwm", NULL, CLK_IS_ROOT, f_pwm);
+ clk_register_fixed_rate(NULL, "pwm", NULL, 0, f_pwm);
clps711x_clk->clks[CLPS711X_CLK_SPIREF] =
- clk_register_fixed_rate(NULL, "spi_ref", NULL, CLK_IS_ROOT,
- f_spi);
+ clk_register_fixed_rate(NULL, "spi_ref", NULL, 0, f_spi);
clps711x_clk->clks[CLPS711X_CLK_SPI] =
clk_register_divider_table(NULL, "spi", "spi_ref", 0,
base + CLPS711X_SYSCON1, 16, 2, 0,
@@ -137,8 +135,7 @@ static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
clps711x_clk->clks[CLPS711X_CLK_UART] =
clk_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
clps711x_clk->clks[CLPS711X_CLK_TICK] =
- clk_register_fixed_rate(NULL, "tick", NULL, CLK_IS_ROOT, 64);
-
+ clk_register_fixed_rate(NULL, "tick", NULL, 0, 64);
for (i = 0; i < CLPS711X_CLK_MAX; i++)
if (IS_ERR(clps711x_clk->clks[i]))
pr_err("clk %i: register failed with %ld\n",
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 1f903e1f8..00269de2f 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -151,6 +151,33 @@ static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
return rate_ops->set_rate(rate_hw, rate, parent_rate);
}
+static int clk_composite_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index)
+{
+ struct clk_composite *composite = to_clk_composite(hw);
+ const struct clk_ops *rate_ops = composite->rate_ops;
+ const struct clk_ops *mux_ops = composite->mux_ops;
+ struct clk_hw *rate_hw = composite->rate_hw;
+ struct clk_hw *mux_hw = composite->mux_hw;
+ unsigned long temp_rate;
+
+ __clk_hw_set_clk(rate_hw, hw);
+ __clk_hw_set_clk(mux_hw, hw);
+
+ temp_rate = rate_ops->recalc_rate(rate_hw, parent_rate);
+ if (temp_rate > rate) {
+ rate_ops->set_rate(rate_hw, rate, parent_rate);
+ mux_ops->set_parent(mux_hw, index);
+ } else {
+ mux_ops->set_parent(mux_hw, index);
+ rate_ops->set_rate(rate_hw, rate, parent_rate);
+ }
+
+ return 0;
+}
+
static int clk_composite_is_enabled(struct clk_hw *hw)
{
struct clk_composite *composite = to_clk_composite(hw);
@@ -184,17 +211,18 @@ static void clk_composite_disable(struct clk_hw *hw)
gate_ops->disable(gate_hw);
}
-struct clk *clk_register_composite(struct device *dev, const char *name,
+struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags)
{
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
struct clk_composite *composite;
struct clk_ops *clk_composite_ops;
+ int ret;
composite = kzalloc(sizeof(*composite), GFP_KERNEL);
if (!composite)
@@ -204,12 +232,13 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
init.flags = flags | CLK_IS_BASIC;
init.parent_names = parent_names;
init.num_parents = num_parents;
+ hw = &composite->hw;
clk_composite_ops = &composite->ops;
if (mux_hw && mux_ops) {
if (!mux_ops->get_parent) {
- clk = ERR_PTR(-EINVAL);
+ hw = ERR_PTR(-EINVAL);
goto err;
}
@@ -224,7 +253,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
if (rate_hw && rate_ops) {
if (!rate_ops->recalc_rate) {
- clk = ERR_PTR(-EINVAL);
+ hw = ERR_PTR(-EINVAL);
goto err;
}
clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
@@ -250,10 +279,16 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
composite->rate_ops = rate_ops;
}
+ if (mux_hw && mux_ops && rate_hw && rate_ops) {
+ if (mux_ops->set_parent && rate_ops->set_rate)
+ clk_composite_ops->set_rate_and_parent =
+ clk_composite_set_rate_and_parent;
+ }
+
if (gate_hw && gate_ops) {
if (!gate_ops->is_enabled || !gate_ops->enable ||
!gate_ops->disable) {
- clk = ERR_PTR(-EINVAL);
+ hw = ERR_PTR(-EINVAL);
goto err;
}
@@ -267,22 +302,56 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
init.ops = clk_composite_ops;
composite->hw.init = &init;
- clk = clk_register(dev, &composite->hw);
- if (IS_ERR(clk))
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ hw = ERR_PTR(ret);
goto err;
+ }
if (composite->mux_hw)
- composite->mux_hw->clk = clk;
+ composite->mux_hw->clk = hw->clk;
if (composite->rate_hw)
- composite->rate_hw->clk = clk;
+ composite->rate_hw->clk = hw->clk;
if (composite->gate_hw)
- composite->gate_hw->clk = clk;
+ composite->gate_hw->clk = hw->clk;
- return clk;
+ return hw;
err:
kfree(composite);
- return clk;
+ return hw;
+}
+
+struct clk *clk_register_composite(struct device *dev, const char *name,
+ const char * const *parent_names, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_composite(dev, name, parent_names, num_parents,
+ mux_hw, mux_ops, rate_hw, rate_ops, gate_hw, gate_ops,
+ flags);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
+}
+
+void clk_unregister_composite(struct clk *clk)
+{
+ struct clk_composite *composite;
+ struct clk_hw *hw;
+
+ hw = __clk_get_hw(clk);
+ if (!hw)
+ return;
+
+ composite = to_clk_composite(hw);
+
+ clk_unregister(clk);
+ kfree(composite);
}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 00e035b51..a0f55bc1a 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -426,15 +426,16 @@ const struct clk_ops clk_divider_ro_ops = {
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
-static struct clk *_register_divider(struct device *dev, const char *name,
+static struct clk_hw *_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock)
{
struct clk_divider *div;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
if (width + shift > 16) {
@@ -467,12 +468,14 @@ static struct clk *_register_divider(struct device *dev, const char *name,
div->table = table;
/* register the clock */
- clk = clk_register(dev, &div->hw);
-
- if (IS_ERR(clk))
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
kfree(div);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
}
/**
@@ -492,12 +495,39 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, spinlock_t *lock)
{
- return _register_divider(dev, name, parent_name, flags, reg, shift,
+ struct clk_hw *hw;
+
+ hw = _register_divider(dev, name, parent_name, flags, reg, shift,
width, clk_divider_flags, NULL, lock);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_divider);
/**
+ * clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, spinlock_t *lock)
+{
+ return _register_divider(dev, name, parent_name, flags, reg, shift,
+ width, clk_divider_flags, NULL, lock);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_divider);
+
+/**
* clk_register_divider_table - register a table based divider clock with
* the clock framework
* @dev: device registering this clock
@@ -517,11 +547,41 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock)
{
- return _register_divider(dev, name, parent_name, flags, reg, shift,
+ struct clk_hw *hw;
+
+ hw = _register_divider(dev, name, parent_name, flags, reg, shift,
width, clk_divider_flags, table, lock);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_divider_table);
+/**
+ * clk_hw_register_divider_table - register a table based divider clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+struct clk_hw *clk_hw_register_divider_table(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ return _register_divider(dev, name, parent_name, flags, reg, shift,
+ width, clk_divider_flags, table, lock);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_divider_table);
+
void clk_unregister_divider(struct clk *clk)
{
struct clk_divider *div;
@@ -537,3 +597,18 @@ void clk_unregister_divider(struct clk *clk)
kfree(div);
}
EXPORT_SYMBOL_GPL(clk_unregister_divider);
+
+/**
+ * clk_hw_unregister_divider - unregister a clk divider
+ * @hw: hardware-specific clock data to unregister
+ */
+void clk_hw_unregister_divider(struct clk_hw *hw)
+{
+ struct clk_divider *div;
+
+ div = to_clk_divider(hw);
+
+ clk_hw_unregister(hw);
+ kfree(div);
+}
+EXPORT_SYMBOL_GPL(clk_hw_unregister_divider);
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index 053448e24..75cd6c792 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -68,13 +68,14 @@ const struct clk_ops clk_fixed_factor_ops = {
};
EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
-struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
+struct clk_hw *clk_hw_register_fixed_factor(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
unsigned int mult, unsigned int div)
{
struct clk_fixed_factor *fix;
struct clk_init_data init;
- struct clk *clk;
+ struct clk_hw *hw;
+ int ret;
fix = kmalloc(sizeof(*fix), GFP_KERNEL);
if (!fix)
@@ -91,12 +92,28 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
init.parent_names = &parent_name;
init.num_parents = 1;
- clk = clk_register(dev, &fix->hw);
-
- if (IS_ERR(clk))
+ hw = &fix->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
kfree(fix);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_factor);
+
+struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned int mult, unsigned int div)
+{
+ struct clk_hw *hw;
- return clk;
+ hw = clk_hw_register_fixed_factor(dev, name, parent_name, flags, mult,
+ div);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_fixed_factor);
@@ -113,6 +130,17 @@ void clk_unregister_fixed_factor(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_unregister_fixed_factor);
+void clk_hw_unregister_fixed_factor(struct clk_hw *hw)
+{
+ struct clk_fixed_factor *fix;
+
+ fix = to_clk_fixed_factor(hw);
+
+ clk_hw_unregister(hw);
+ kfree(fix);
+}
+EXPORT_SYMBOL_GPL(clk_hw_unregister_fixed_factor);
+
#ifdef CONFIG_OF
/**
* of_fixed_factor_clk_setup() - Setup function for simple fixed factor clock
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index cd9dc925b..8e4453eb5 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -45,8 +45,8 @@ const struct clk_ops clk_fixed_rate_ops = {
EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
/**
- * clk_register_fixed_rate_with_accuracy - register fixed-rate clock with the
- * clock framework
+ * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
+ * the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of clock's parent
@@ -54,13 +54,14 @@ EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
* @fixed_rate: non-adjustable clock rate
* @fixed_accuracy: non-adjustable clock rate
*/
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
+struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
unsigned long fixed_rate, unsigned long fixed_accuracy)
{
struct clk_fixed_rate *fixed;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
/* allocate fixed-rate clock */
fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
@@ -79,22 +80,49 @@ struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
fixed->hw.init = &init;
/* register the clock */
- clk = clk_register(dev, &fixed->hw);
- if (IS_ERR(clk))
+ hw = &fixed->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
kfree(fixed);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_rate_with_accuracy);
+
+struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
+ flags, fixed_rate, fixed_accuracy);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_fixed_rate_with_accuracy);
/**
- * clk_register_fixed_rate - register fixed-rate clock with the clock framework
+ * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
*/
+struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ unsigned long fixed_rate)
+{
+ return clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name,
+ flags, fixed_rate, 0);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fixed_rate);
+
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate)
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 1abcd76b4..aab904618 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -116,14 +116,15 @@ const struct clk_ops clk_fractional_divider_ops = {
};
EXPORT_SYMBOL_GPL(clk_fractional_divider_ops);
-struct clk *clk_register_fractional_divider(struct device *dev,
+struct clk_hw *clk_hw_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
u8 clk_divider_flags, spinlock_t *lock)
{
struct clk_fractional_divider *fd;
struct clk_init_data init;
- struct clk *clk;
+ struct clk_hw *hw;
+ int ret;
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
if (!fd)
@@ -146,10 +147,39 @@ struct clk *clk_register_fractional_divider(struct device *dev,
fd->lock = lock;
fd->hw.init = &init;
- clk = clk_register(dev, &fd->hw);
- if (IS_ERR(clk))
+ hw = &fd->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
kfree(fd);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_fractional_divider);
- return clk;
+struct clk *clk_register_fractional_divider(struct device *dev,
+ const char *name, const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth,
+ u8 clk_divider_flags, spinlock_t *lock)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_fractional_divider(dev, name, parent_name, flags,
+ reg, mshift, mwidth, nshift, nwidth, clk_divider_flags,
+ lock);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_fractional_divider);
+
+void clk_hw_unregister_fractional_divider(struct clk_hw *hw)
+{
+ struct clk_fractional_divider *fd;
+
+ fd = to_clk_fd(hw);
+
+ clk_hw_unregister(hw);
+ kfree(fd);
+}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index d0d8ec8e1..4e691e354 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -110,7 +110,7 @@ const struct clk_ops clk_gate_ops = {
EXPORT_SYMBOL_GPL(clk_gate_ops);
/**
- * clk_register_gate - register a gate clock with the clock framework
+ * clk_hw_register_gate - register a gate clock with the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of this clock's parent
@@ -120,14 +120,15 @@ EXPORT_SYMBOL_GPL(clk_gate_ops);
* @clk_gate_flags: gate-specific flags for this clock
* @lock: shared register lock for this clock
*/
-struct clk *clk_register_gate(struct device *dev, const char *name,
+struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct clk_gate *gate;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
+ int ret;
if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
if (bit_idx > 15) {
@@ -154,12 +155,29 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
gate->lock = lock;
gate->hw.init = &init;
- clk = clk_register(dev, &gate->hw);
-
- if (IS_ERR(clk))
+ hw = &gate->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
kfree(gate);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_gate);
+
+struct clk *clk_register_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_gate(dev, name, parent_name, flags, reg,
+ bit_idx, clk_gate_flags, lock);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_gate);
@@ -178,3 +196,14 @@ void clk_unregister_gate(struct clk *clk)
kfree(gate);
}
EXPORT_SYMBOL_GPL(clk_unregister_gate);
+
+void clk_hw_unregister_gate(struct clk_hw *hw)
+{
+ struct clk_gate *gate;
+
+ gate = to_clk_gate(hw);
+
+ clk_hw_unregister(hw);
+ kfree(gate);
+}
+EXPORT_SYMBOL_GPL(clk_hw_unregister_gate);
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 08f65acc5..86b245746 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -94,13 +94,13 @@ const struct clk_ops clk_gpio_mux_ops = {
};
EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
-static struct clk *clk_register_gpio(struct device *dev, const char *name,
+static struct clk_hw *clk_register_gpio(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents, unsigned gpio,
bool active_low, unsigned long flags,
const struct clk_ops *clk_gpio_ops)
{
struct clk_gpio *clk_gpio;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init = {};
unsigned long gpio_flags;
int err;
@@ -141,24 +141,26 @@ static struct clk *clk_register_gpio(struct device *dev, const char *name,
clk_gpio->gpiod = gpio_to_desc(gpio);
clk_gpio->hw.init = &init;
+ hw = &clk_gpio->hw;
if (dev)
- clk = devm_clk_register(dev, &clk_gpio->hw);
+ err = devm_clk_hw_register(dev, hw);
else
- clk = clk_register(NULL, &clk_gpio->hw);
+ err = clk_hw_register(NULL, hw);
- if (!IS_ERR(clk))
- return clk;
+ if (!err)
+ return hw;
if (!dev) {
gpiod_put(clk_gpio->gpiod);
kfree(clk_gpio);
}
- return clk;
+ return ERR_PTR(err);
}
/**
- * clk_register_gpio_gate - register a gpio clock gate with the clock framework
+ * clk_hw_register_gpio_gate - register a gpio clock gate with the clock
+ * framework
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_name: name of this clock's parent
@@ -166,7 +168,7 @@ static struct clk *clk_register_gpio(struct device *dev, const char *name,
* @active_low: true if gpio should be set to 0 to enable clock
* @flags: clock flags
*/
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
+struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
const char *parent_name, unsigned gpio, bool active_low,
unsigned long flags)
{
@@ -175,10 +177,24 @@ struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
(parent_name ? 1 : 0), gpio, active_low, flags,
&clk_gpio_gate_ops);
}
+EXPORT_SYMBOL_GPL(clk_hw_register_gpio_gate);
+
+struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
+ const char *parent_name, unsigned gpio, bool active_low,
+ unsigned long flags)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_gpio_gate(dev, name, parent_name, gpio, active_low,
+ flags);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
+}
EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
/**
- * clk_register_gpio_mux - register a gpio clock mux with the clock framework
+ * clk_hw_register_gpio_mux - register a gpio clock mux with the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
* @parent_names: names of this clock's parents
@@ -187,7 +203,7 @@ EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
* @active_low: true if gpio should be set to 0 to enable clock
* @flags: clock flags
*/
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
+struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents, unsigned gpio,
bool active_low, unsigned long flags)
{
@@ -199,6 +215,20 @@ struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
return clk_register_gpio(dev, name, parent_names, num_parents,
gpio, active_low, flags, &clk_gpio_mux_ops);
}
+EXPORT_SYMBOL_GPL(clk_hw_register_gpio_mux);
+
+struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents, unsigned gpio,
+ bool active_low, unsigned long flags)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_gpio_mux(dev, name, parent_names, num_parents,
+ gpio, active_low, flags);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
+}
EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
static int gpio_clk_driver_probe(struct platform_device *pdev)
diff --git a/drivers/clk/clk-ls1x.c b/drivers/clk/clk-ls1x.c
index d4c61985f..509783138 100644
--- a/drivers/clk/clk-ls1x.c
+++ b/drivers/clk/clk-ls1x.c
@@ -88,8 +88,7 @@ void __init ls1x_clk_init(void)
{
struct clk *clk;
- clk = clk_register_fixed_rate(NULL, "osc_33m_clk", NULL, CLK_IS_ROOT,
- OSC);
+ clk = clk_register_fixed_rate(NULL, "osc_33m_clk", NULL, 0, OSC);
clk_register_clkdev(clk, "osc_33m_clk", NULL);
/* clock derived from 33 MHz OSC clk */
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 252188fd8..16a3d5717 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -113,16 +113,17 @@ const struct clk_ops clk_mux_ro_ops = {
};
EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
-struct clk *clk_register_mux_table(struct device *dev, const char *name,
+struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock)
{
struct clk_mux *mux;
- struct clk *clk;
+ struct clk_hw *hw;
struct clk_init_data init;
u8 width = 0;
+ int ret;
if (clk_mux_flags & CLK_MUX_HIWORD_MASK) {
width = fls(mask) - ffs(mask) + 1;
@@ -157,12 +158,31 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
mux->table = table;
mux->hw.init = &init;
- clk = clk_register(dev, &mux->hw);
-
- if (IS_ERR(clk))
+ hw = &mux->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
kfree(mux);
+ hw = ERR_PTR(ret);
+ }
- return clk;
+ return hw;
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_mux_table);
+
+struct clk *clk_register_mux_table(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags,
+ void __iomem *reg, u8 shift, u32 mask,
+ u8 clk_mux_flags, u32 *table, spinlock_t *lock)
+{
+ struct clk_hw *hw;
+
+ hw = clk_hw_register_mux_table(dev, name, parent_names, num_parents,
+ flags, reg, shift, mask, clk_mux_flags,
+ table, lock);
+ if (IS_ERR(hw))
+ return ERR_CAST(hw);
+ return hw->clk;
}
EXPORT_SYMBOL_GPL(clk_register_mux_table);
@@ -180,6 +200,20 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
}
EXPORT_SYMBOL_GPL(clk_register_mux);
+struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
+ const char * const *parent_names, u8 num_parents,
+ unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_mux_flags, spinlock_t *lock)
+{
+ u32 mask = BIT(width) - 1;
+
+ return clk_hw_register_mux_table(dev, name, parent_names, num_parents,
+ flags, reg, shift, mask, clk_mux_flags,
+ NULL, lock);
+}
+EXPORT_SYMBOL_GPL(clk_hw_register_mux);
+
void clk_unregister_mux(struct clk *clk)
{
struct clk_mux *mux;
@@ -195,3 +229,14 @@ void clk_unregister_mux(struct clk *clk)
kfree(mux);
}
EXPORT_SYMBOL_GPL(clk_unregister_mux);
+
+void clk_hw_unregister_mux(struct clk_hw *hw)
+{
+ struct clk_mux *mux;
+
+ mux = to_clk_mux(hw);
+
+ clk_hw_unregister(hw);
+ kfree(mux);
+}
+EXPORT_SYMBOL_GPL(clk_hw_unregister_mux);
diff --git a/drivers/clk/clk-nspire.c b/drivers/clk/clk-nspire.c
index a378db7b2..64f196a90 100644
--- a/drivers/clk/clk-nspire.c
+++ b/drivers/clk/clk-nspire.c
@@ -125,8 +125,7 @@ static void __init nspire_clk_setup(struct device_node *node,
of_property_read_string(node, "clock-output-names", &clk_name);
- clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT,
- info.base_clock);
+ clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, info.base_clock);
if (!IS_ERR(clk))
of_clk_add_provider(node, of_clk_src_simple_get, clk);
else
diff --git a/drivers/clk/clk-oxnas.c b/drivers/clk/clk-oxnas.c
new file mode 100644
index 000000000..79bcb2e42
--- /dev/null
+++ b/drivers/clk/clk-oxnas.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2010 Broadcom
+ * Copyright (C) 2012 Stephen Warren
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/stringify.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+/* Standard regmap gate clocks */
+struct clk_oxnas {
+ struct clk_hw hw;
+ signed char bit;
+ struct regmap *regmap;
+};
+
+/* Regmap offsets */
+#define CLK_STAT_REGOFFSET 0x24
+#define CLK_SET_REGOFFSET 0x2c
+#define CLK_CLR_REGOFFSET 0x30
+
+static inline struct clk_oxnas *to_clk_oxnas(struct clk_hw *hw)
+{
+ return container_of(hw, struct clk_oxnas, hw);
+}
+
+static int oxnas_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_oxnas *std = to_clk_oxnas(hw);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(std->regmap, CLK_STAT_REGOFFSET, &val);
+ if (ret < 0)
+ return ret;
+
+ return val & BIT(std->bit);
+}
+
+static int oxnas_clk_enable(struct clk_hw *hw)
+{
+ struct clk_oxnas *std = to_clk_oxnas(hw);
+
+ regmap_write(std->regmap, CLK_SET_REGOFFSET, BIT(std->bit));
+
+ return 0;
+}
+
+static void oxnas_clk_disable(struct clk_hw *hw)
+{
+ struct clk_oxnas *std = to_clk_oxnas(hw);
+
+ regmap_write(std->regmap, CLK_CLR_REGOFFSET, BIT(std->bit));
+}
+
+static const struct clk_ops oxnas_clk_ops = {
+ .enable = oxnas_clk_enable,
+ .disable = oxnas_clk_disable,
+ .is_enabled = oxnas_clk_is_enabled,
+};
+
+static const char *const oxnas_clk_parents[] = {
+ "oscillator",
+};
+
+static const char *const eth_parents[] = {
+ "gmacclk",
+};
+
+#define DECLARE_STD_CLKP(__clk, __parent) \
+static const struct clk_init_data clk_##__clk##_init = { \
+ .name = __stringify(__clk), \
+ .ops = &oxnas_clk_ops, \
+ .parent_names = __parent, \
+ .num_parents = ARRAY_SIZE(__parent), \
+}
+
+#define DECLARE_STD_CLK(__clk) DECLARE_STD_CLKP(__clk, oxnas_clk_parents)
+
+/* Hardware Bit - Clock association */
+struct clk_oxnas_init_data {
+ unsigned long bit;
+ const struct clk_init_data *clk_init;
+};
+
+/* Clk init data declaration */
+DECLARE_STD_CLK(leon);
+DECLARE_STD_CLK(dma_sgdma);
+DECLARE_STD_CLK(cipher);
+DECLARE_STD_CLK(sata);
+DECLARE_STD_CLK(audio);
+DECLARE_STD_CLK(usbmph);
+DECLARE_STD_CLKP(etha, eth_parents);
+DECLARE_STD_CLK(pciea);
+DECLARE_STD_CLK(nand);
+
+/* Table index is clock indice */
+static const struct clk_oxnas_init_data clk_oxnas_init[] = {
+ [0] = {0, &clk_leon_init},
+ [1] = {1, &clk_dma_sgdma_init},
+ [2] = {2, &clk_cipher_init},
+ /* Skip & Do not touch to DDR clock */
+ [3] = {4, &clk_sata_init},
+ [4] = {5, &clk_audio_init},
+ [5] = {6, &clk_usbmph_init},
+ [6] = {7, &clk_etha_init},
+ [7] = {8, &clk_pciea_init},
+ [8] = {9, &clk_nand_init},
+};
+
+struct clk_oxnas_data {
+ struct clk_oxnas clk_oxnas[ARRAY_SIZE(clk_oxnas_init)];
+ struct clk_onecell_data onecell_data[ARRAY_SIZE(clk_oxnas_init)];
+ struct clk *clks[ARRAY_SIZE(clk_oxnas_init)];
+};
+
+static int oxnas_stdclk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct clk_oxnas_data *clk_oxnas;
+ struct regmap *regmap;
+ int i;
+
+ clk_oxnas = devm_kzalloc(&pdev->dev, sizeof(*clk_oxnas), GFP_KERNEL);
+ if (!clk_oxnas)
+ return -ENOMEM;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "failed to have parent regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(clk_oxnas_init); i++) {
+ struct clk_oxnas *_clk;
+
+ _clk = &clk_oxnas->clk_oxnas[i];
+ _clk->bit = clk_oxnas_init[i].bit;
+ _clk->hw.init = clk_oxnas_init[i].clk_init;
+ _clk->regmap = regmap;
+
+ clk_oxnas->clks[i] =
+ devm_clk_register(&pdev->dev, &_clk->hw);
+ if (WARN_ON(IS_ERR(clk_oxnas->clks[i])))
+ return PTR_ERR(clk_oxnas->clks[i]);
+ }
+
+ clk_oxnas->onecell_data->clks = clk_oxnas->clks;
+ clk_oxnas->onecell_data->clk_num = ARRAY_SIZE(clk_oxnas_init);
+
+ return of_clk_add_provider(np, of_clk_src_onecell_get,
+ clk_oxnas->onecell_data);
+}
+
+static int oxnas_stdclk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id oxnas_stdclk_dt_ids[] = {
+ { .compatible = "oxsemi,ox810se-stdclk" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, oxnas_stdclk_dt_ids);
+
+static struct platform_driver oxnas_stdclk_driver = {
+ .probe = oxnas_stdclk_probe,
+ .remove = oxnas_stdclk_remove,
+ .driver = {
+ .name = "oxnas-stdclk",
+ .of_match_table = oxnas_stdclk_dt_ids,
+ },
+};
+
+module_platform_driver(oxnas_stdclk_driver);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 9c0b8e6b1..8328863cb 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -132,7 +132,7 @@ static const struct palmas_clks_of_match_data palmas_of_clk32kg = {
.init = {
.name = "clk32kg",
.ops = &palmas_clks_ops,
- .flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED,
+ .flags = CLK_IGNORE_UNUSED,
},
.desc = {
.clk_name = "clk32kg",
@@ -148,7 +148,7 @@ static const struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
.init = {
.name = "clk32kgaudio",
.ops = &palmas_clks_ops,
- .flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED,
+ .flags = CLK_IGNORE_UNUSED,
},
.desc = {
.clk_name = "clk32kgaudio",
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 883045814..1630a1f08 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -59,6 +59,7 @@ static int clk_pwm_probe(struct platform_device *pdev)
struct clk_init_data init;
struct clk_pwm *clk_pwm;
struct pwm_device *pwm;
+ struct pwm_args pargs;
const char *clk_name;
struct clk *clk;
int ret;
@@ -71,22 +72,28 @@ static int clk_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm))
return PTR_ERR(pwm);
- if (!pwm->period) {
+ pwm_get_args(pwm, &pargs);
+ if (!pargs.period) {
dev_err(&pdev->dev, "invalid PWM period\n");
return -EINVAL;
}
if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate))
- clk_pwm->fixed_rate = NSEC_PER_SEC / pwm->period;
+ clk_pwm->fixed_rate = NSEC_PER_SEC / pargs.period;
- if (pwm->period != NSEC_PER_SEC / clk_pwm->fixed_rate &&
- pwm->period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) {
+ if (pargs.period != NSEC_PER_SEC / clk_pwm->fixed_rate &&
+ pargs.period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) {
dev_err(&pdev->dev,
"clock-frequency does not match PWM period\n");
return -EINVAL;
}
- ret = pwm_config(pwm, (pwm->period + 1) >> 1, pwm->period);
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to the
+ * atomic PWM API.
+ */
+ pwm_apply_args(pwm);
+ ret = pwm_config(pwm, (pargs.period + 1) >> 1, pargs.period);
if (ret < 0)
return ret;
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 7bc1c4527..58566a179 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -869,14 +869,15 @@ static void __init core_mux_init(struct device_node *np)
}
}
-static struct clk *sysclk_from_fixed(struct device_node *node, const char *name)
+static struct clk __init
+*sysclk_from_fixed(struct device_node *node, const char *name)
{
u32 rate;
if (of_property_read_u32(node, "clock-frequency", &rate))
return ERR_PTR(-ENODEV);
- return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
+ return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
static struct clk *sysclk_from_parent(const char *name)
diff --git a/drivers/clk/clk-rk808.c b/drivers/clk/clk-rk808.c
index 0fee2f4ca..743830397 100644
--- a/drivers/clk/clk-rk808.c
+++ b/drivers/clk/clk-rk808.c
@@ -106,7 +106,6 @@ static int rk808_clkout_probe(struct platform_device *pdev)
if (!clk_table)
return -ENOMEM;
- init.flags = CLK_IS_ROOT;
init.parent_names = NULL;
init.num_parents = 0;
init.name = "rk808-clkout1";
diff --git a/drivers/clk/clk-tango4.c b/drivers/clk/clk-tango4.c
index 004ab7dfc..eef75e305 100644
--- a/drivers/clk/clk-tango4.c
+++ b/drivers/clk/clk-tango4.c
@@ -4,17 +4,19 @@
#include <linux/init.h>
#include <linux/io.h>
-static struct clk *out[2];
-static struct clk_onecell_data clk_data = { out, 2 };
+#define CLK_COUNT 4 /* cpu_clk, sys_clk, usb_clk, sdio_clk */
+static struct clk *clks[CLK_COUNT];
+static struct clk_onecell_data clk_data = { clks, CLK_COUNT };
-#define SYSCLK_CTRL 0x20
-#define CPUCLK_CTRL 0x24
-#define LEGACY_DIV 0x3c
+#define SYSCLK_DIV 0x20
+#define CPUCLK_DIV 0x24
+#define DIV_BYPASS BIT(23)
-#define PLL_N(val) (((val) >> 0) & 0x7f)
-#define PLL_K(val) (((val) >> 13) & 0x7)
-#define PLL_M(val) (((val) >> 16) & 0x7)
-#define DIV_INDEX(val) (((val) >> 8) & 0xf)
+/*** CLKGEN_PLL ***/
+#define extract_pll_n(val) ((val >> 0) & ((1u << 7) - 1))
+#define extract_pll_k(val) ((val >> 13) & ((1u << 3) - 1))
+#define extract_pll_m(val) ((val >> 16) & ((1u << 3) - 1))
+#define extract_pll_isel(val) ((val >> 24) & ((1u << 3) - 1))
static void __init make_pll(int idx, const char *parent, void __iomem *base)
{
@@ -22,40 +24,61 @@ static void __init make_pll(int idx, const char *parent, void __iomem *base)
u32 val, mul, div;
sprintf(name, "pll%d", idx);
- val = readl_relaxed(base + idx*8);
- mul = PLL_N(val) + 1;
- div = (PLL_M(val) + 1) << PLL_K(val);
+ val = readl(base + idx * 8);
+ mul = extract_pll_n(val) + 1;
+ div = (extract_pll_m(val) + 1) << extract_pll_k(val);
clk_register_fixed_factor(NULL, name, parent, 0, mul, div);
+ if (extract_pll_isel(val) != 1)
+ panic("%s: input not set to XTAL_IN\n", name);
}
-static int __init get_div(void __iomem *base)
+static void __init make_cd(int idx, void __iomem *base)
{
- u8 sysclk_tab[16] = { 2, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4 };
- int idx = DIV_INDEX(readl_relaxed(base + LEGACY_DIV));
+ char name[8];
+ u32 val, mul, div;
- return sysclk_tab[idx];
+ sprintf(name, "cd%d", idx);
+ val = readl(base + idx * 8);
+ mul = 1 << 27;
+ div = (2 << 27) + val;
+ clk_register_fixed_factor(NULL, name, "pll2", 0, mul, div);
+ if (val > 0xf0000000)
+ panic("%s: unsupported divider %x\n", name, val);
}
static void __init tango4_clkgen_setup(struct device_node *np)
{
- int div, ret;
+ struct clk **pp = clk_data.clks;
void __iomem *base = of_iomap(np, 0);
const char *parent = of_clk_get_parent_name(np, 0);
if (!base)
- panic("%s: invalid address\n", np->full_name);
+ panic("%s: invalid address\n", np->name);
+
+ if (readl(base + CPUCLK_DIV) & DIV_BYPASS)
+ panic("%s: unsupported cpuclk setup\n", np->name);
+
+ if (readl(base + SYSCLK_DIV) & DIV_BYPASS)
+ panic("%s: unsupported sysclk setup\n", np->name);
+
+ writel(0x100, base + CPUCLK_DIV); /* disable frequency ramping */
make_pll(0, parent, base);
make_pll(1, parent, base);
+ make_pll(2, parent, base);
+ make_cd(2, base + 0x80);
+ make_cd(6, base + 0x80);
- out[0] = clk_register_divider(NULL, "cpuclk", "pll0", 0,
- base + CPUCLK_CTRL, 8, 8, CLK_DIVIDER_ONE_BASED, NULL);
+ pp[0] = clk_register_divider(NULL, "cpu_clk", "pll0", 0,
+ base + CPUCLK_DIV, 8, 8, CLK_DIVIDER_ONE_BASED, NULL);
+ pp[1] = clk_register_fixed_factor(NULL, "sys_clk", "pll1", 0, 1, 4);
+ pp[2] = clk_register_fixed_factor(NULL, "usb_clk", "cd2", 0, 1, 2);
+ pp[3] = clk_register_fixed_factor(NULL, "sdio_clk", "cd6", 0, 1, 2);
- div = readl_relaxed(base + SYSCLK_CTRL) & BIT(23) ? get_div(base) : 4;
- out[1] = clk_register_fixed_factor(NULL, "sysclk", "pll1", 0, 1, div);
+ if (IS_ERR(pp[0]) || IS_ERR(pp[1]) || IS_ERR(pp[2]) || IS_ERR(pp[3]))
+ panic("%s: clk registration failed\n", np->name);
- ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
- if (IS_ERR(out[0]) || IS_ERR(out[1]) || ret < 0)
- panic("%s: clk registration failed\n", np->full_name);
+ if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data))
+ panic("%s: clk provider registration failed\n", np->name);
}
CLK_OF_DECLARE(tango4_clkgen, "sigma,tango4-clkgen", tango4_clkgen_setup);
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index 8e5ed649a..697c66757 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -74,7 +74,6 @@ static const struct clk_ops twl6040_mcpdm_ops = {
static struct clk_init_data wm831x_clkout_init = {
.name = "mcpdm_fclk",
.ops = &twl6040_mcpdm_ops,
- .flags = CLK_IS_ROOT,
};
static int twl6040_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 43f9d1525..88def4b27 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -58,7 +58,6 @@ static const struct clk_ops wm831x_xtal_ops = {
static struct clk_init_data wm831x_xtal_init = {
.name = "xtal",
.ops = &wm831x_xtal_ops,
- .flags = CLK_IS_ROOT,
};
static const unsigned long wm831x_fll_auto_rates[] = {
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index d73450b60..343313250 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -198,7 +198,7 @@ static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_ty
of_property_read_string(np, "clock-output-names", &clk_name);
clk = xgene_register_clk_pll(NULL,
clk_name, of_clk_get_parent_name(np, 0),
- CLK_IS_ROOT, reg, 0, pll_type, &clk_lock,
+ 0, reg, 0, pll_type, &clk_lock,
version);
if (!IS_ERR(clk)) {
of_clk_add_provider(np, of_clk_src_simple_get, clk);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fb74dc1f7..d584004f7 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -574,6 +574,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (WARN_ON(core->prepare_count == 0))
return;
+ if (WARN_ON(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL))
+ return;
+
if (--core->prepare_count > 0)
return;
@@ -679,6 +682,9 @@ static void clk_core_disable(struct clk_core *core)
if (WARN_ON(core->enable_count == 0))
return;
+ if (WARN_ON(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL))
+ return;
+
if (--core->enable_count > 0)
return;
@@ -2397,6 +2403,16 @@ static int __clk_core_init(struct clk_core *core)
if (core->ops->init)
core->ops->init(core->hw);
+ if (core->flags & CLK_IS_CRITICAL) {
+ unsigned long flags;
+
+ clk_core_prepare(core);
+
+ flags = clk_enable_lock();
+ clk_core_enable(core);
+ clk_enable_unlock(flags);
+ }
+
kref_init(&core->ref);
out:
clk_prepare_unlock();
@@ -2536,6 +2552,22 @@ fail_out:
}
EXPORT_SYMBOL_GPL(clk_register);
+/**
+ * clk_hw_register - register a clk_hw and return an error code
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * clk_hw_register is the primary interface for populating the clock tree with
+ * new clock nodes. It returns an integer equal to zero indicating success or
+ * less than zero indicating failure. Drivers must test for an error code after
+ * calling clk_hw_register().
+ */
+int clk_hw_register(struct device *dev, struct clk_hw *hw)
+{
+ return PTR_ERR_OR_ZERO(clk_register(dev, hw));
+}
+EXPORT_SYMBOL_GPL(clk_hw_register);
+
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
@@ -2637,11 +2669,26 @@ unlock:
}
EXPORT_SYMBOL_GPL(clk_unregister);
+/**
+ * clk_hw_unregister - unregister a currently registered clk_hw
+ * @hw: hardware-specific clock data to unregister
+ */
+void clk_hw_unregister(struct clk_hw *hw)
+{
+ clk_unregister(hw->clk);
+}
+EXPORT_SYMBOL_GPL(clk_hw_unregister);
+
static void devm_clk_release(struct device *dev, void *res)
{
clk_unregister(*(struct clk **)res);
}
+static void devm_clk_hw_release(struct device *dev, void *res)
+{
+ clk_hw_unregister(*(struct clk_hw **)res);
+}
+
/**
* devm_clk_register - resource managed clk_register()
* @dev: device that is registering this clock
@@ -2672,6 +2719,36 @@ struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(devm_clk_register);
+/**
+ * devm_clk_hw_register - resource managed clk_hw_register()
+ * @dev: device that is registering this clock
+ * @hw: link to hardware-specific clock data
+ *
+ * Managed clk_hw_register(). Clocks registered by this function are
+ * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
+ * for more information.
+ */
+int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
+{
+ struct clk_hw **hwp;
+ int ret;
+
+ hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
+ if (!hwp)
+ return -ENOMEM;
+
+ ret = clk_hw_register(dev, hw);
+ if (!ret) {
+ *hwp = hw;
+ devres_add(dev, hwp);
+ } else {
+ devres_free(hwp);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_register);
+
static int devm_clk_match(struct device *dev, void *res, void *data)
{
struct clk *c = res;
@@ -2680,6 +2757,15 @@ static int devm_clk_match(struct device *dev, void *res, void *data)
return c == data;
}
+static int devm_clk_hw_match(struct device *dev, void *res, void *data)
+{
+ struct clk_hw *hw = res;
+
+ if (WARN_ON(!hw))
+ return 0;
+ return hw == data;
+}
+
/**
* devm_clk_unregister - resource managed clk_unregister()
* @clk: clock to unregister
@@ -2694,6 +2780,22 @@ void devm_clk_unregister(struct device *dev, struct clk *clk)
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);
+/**
+ * devm_clk_hw_unregister - resource managed clk_hw_unregister()
+ * @dev: device that is unregistering the hardware-specific clock data
+ * @hw: link to hardware-specific clock data
+ *
+ * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
+ * this function will not need to be called and the resource management
+ * code will ensure that the resource is freed.
+ */
+void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
+{
+ WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
+ hw));
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
+
/*
* clkdev helpers
*/
@@ -2855,6 +2957,7 @@ struct of_clk_provider {
struct device_node *node;
struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
+ struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
void *data;
};
@@ -2871,6 +2974,12 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
+struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
+{
+ return data;
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
+
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
{
struct clk_onecell_data *clk_data = data;
@@ -2885,6 +2994,21 @@ struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
}
EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
+struct clk_hw *
+of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct clk_hw_onecell_data *hw_data = data;
+ unsigned int idx = clkspec->args[0];
+
+ if (idx >= hw_data->num) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return hw_data->hws[idx];
+}
+EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
+
/**
* of_clk_add_provider() - Register a clock provider for a node
* @np: Device node pointer associated with clock provider
@@ -2921,6 +3045,41 @@ int of_clk_add_provider(struct device_node *np,
EXPORT_SYMBOL_GPL(of_clk_add_provider);
/**
+ * of_clk_add_hw_provider() - Register a clock provider for a node
+ * @np: Device node pointer associated with clock provider
+ * @get: callback for decoding clk_hw
+ * @data: context pointer for @get callback.
+ */
+int of_clk_add_hw_provider(struct device_node *np,
+ struct clk_hw *(*get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data)
+{
+ struct of_clk_provider *cp;
+ int ret;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ cp->node = of_node_get(np);
+ cp->data = data;
+ cp->get_hw = get;
+
+ mutex_lock(&of_clk_mutex);
+ list_add(&cp->link, &of_clk_providers);
+ mutex_unlock(&of_clk_mutex);
+ pr_debug("Added clk_hw provider from %s\n", np->full_name);
+
+ ret = of_clk_set_defaults(np, true);
+ if (ret < 0)
+ of_clk_del_provider(np);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
+
+/**
* of_clk_del_provider() - Remove a previously registered clock provider
* @np: Device node pointer associated with clock provider
*/
@@ -2941,11 +3100,32 @@ void of_clk_del_provider(struct device_node *np)
}
EXPORT_SYMBOL_GPL(of_clk_del_provider);
+static struct clk_hw *
+__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
+ struct of_phandle_args *clkspec)
+{
+ struct clk *clk;
+ struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
+
+ if (provider->get_hw) {
+ hw = provider->get_hw(clkspec, provider->data);
+ } else if (provider->get) {
+ clk = provider->get(clkspec, provider->data);
+ if (!IS_ERR(clk))
+ hw = __clk_get_hw(clk);
+ else
+ hw = ERR_CAST(clk);
+ }
+
+ return hw;
+}
+
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
const char *dev_id, const char *con_id)
{
struct of_clk_provider *provider;
struct clk *clk = ERR_PTR(-EPROBE_DEFER);
+ struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
if (!clkspec)
return ERR_PTR(-EINVAL);
@@ -2954,10 +3134,9 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
mutex_lock(&of_clk_mutex);
list_for_each_entry(provider, &of_clk_providers, link) {
if (provider->node == clkspec->np)
- clk = provider->get(clkspec, provider->data);
- if (!IS_ERR(clk)) {
- clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
- con_id);
+ hw = __of_clk_get_hw_from_provider(provider, clkspec);
+ if (!IS_ERR(hw)) {
+ clk = __clk_create_clk(hw, dev_id, con_id);
if (!IS_ERR(clk) && !__clk_get(clk)) {
__clk_free_clk(clk);
@@ -3127,6 +3306,41 @@ static int parent_ready(struct device_node *np)
}
/**
+ * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
+ * @np: Device node pointer associated with clock provider
+ * @index: clock index
+ * @flags: pointer to clk_core->flags
+ *
+ * Detects if the clock-critical property exists and, if so, sets the
+ * corresponding CLK_IS_CRITICAL flag.
+ *
+ * Do not use this function. It exists only for legacy Device Tree
+ * bindings, such as the one-clock-per-node style that are outdated.
+ * Those bindings typically put all clock data into .dts and the Linux
+ * driver has no clock data, thus making it impossible to set this flag
+ * correctly from the driver. Only those drivers may call
+ * of_clk_detect_critical from their setup functions.
+ *
+ * Return: error code or zero on success
+ */
+int of_clk_detect_critical(struct device_node *np,
+ int index, unsigned long *flags)
+{
+ struct property *prop;
+ const __be32 *cur;
+ uint32_t idx;
+
+ if (!np || !flags)
+ return -EINVAL;
+
+ of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
+ if (index == idx)
+ *flags |= CLK_IS_CRITICAL;
+
+ return 0;
+}
+
+/**
* of_clk_init() - Scan and init clock providers from the DT
* @matches: array of compatible values and init functions for providers.
*
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index eb20b9411..89cc700fb 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -301,6 +301,20 @@ clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
}
EXPORT_SYMBOL(clkdev_alloc);
+struct clk_lookup *
+clkdev_hw_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt, ...)
+{
+ struct clk_lookup *cl;
+ va_list ap;
+
+ va_start(ap, dev_fmt);
+ cl = vclkdev_alloc(hw, con_id, dev_fmt, ap);
+ va_end(ap);
+
+ return cl;
+}
+EXPORT_SYMBOL(clkdev_hw_alloc);
+
/**
* clkdev_create - allocate and add a clkdev lookup structure
* @clk: struct clk to associate with all clk_lookups
@@ -324,6 +338,29 @@ struct clk_lookup *clkdev_create(struct clk *clk, const char *con_id,
}
EXPORT_SYMBOL_GPL(clkdev_create);
+/**
+ * clkdev_hw_create - allocate and add a clkdev lookup structure
+ * @hw: struct clk_hw to associate with all clk_lookups
+ * @con_id: connection ID string on device
+ * @dev_fmt: format string describing device name
+ *
+ * Returns a clk_lookup structure, which can be later unregistered and
+ * freed.
+ */
+struct clk_lookup *clkdev_hw_create(struct clk_hw *hw, const char *con_id,
+ const char *dev_fmt, ...)
+{
+ struct clk_lookup *cl;
+ va_list ap;
+
+ va_start(ap, dev_fmt);
+ cl = vclkdev_create(hw, con_id, dev_fmt, ap);
+ va_end(ap);
+
+ return cl;
+}
+EXPORT_SYMBOL_GPL(clkdev_hw_create);
+
int clk_add_alias(const char *alias, const char *alias_dev_name,
const char *con_id, struct device *dev)
{
@@ -404,28 +441,28 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
EXPORT_SYMBOL(clk_register_clkdev);
/**
- * clk_register_clkdevs - register a set of clk_lookup for a struct clk
- * @clk: struct clk to associate with all clk_lookups
- * @cl: array of clk_lookup structures with con_id and dev_id pre-initialized
- * @num: number of clk_lookup structures to register
+ * clk_hw_register_clkdev - register one clock lookup for a struct clk_hw
+ * @hw: struct clk_hw to associate with all clk_lookups
+ * @con_id: connection ID string on device
+ * @dev_id: format string describing device name
*
- * To make things easier for mass registration, we detect error clks
- * from a previous clk_register() call, and return the error code for
- * those. This is to permit this function to be called immediately
- * after clk_register().
+ * con_id or dev_id may be NULL as a wildcard, just as in the rest of
+ * clkdev.
*/
-int clk_register_clkdevs(struct clk *clk, struct clk_lookup *cl, size_t num)
+int clk_hw_register_clkdev(struct clk_hw *hw, const char *con_id,
+ const char *dev_id)
{
- unsigned i;
-
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ struct clk_lookup *cl;
- for (i = 0; i < num; i++, cl++) {
- cl->clk_hw = __clk_get_hw(clk);
- __clkdev_add(cl);
- }
+ /*
+ * Since dev_id can be NULL, and NULL is handled specially, we must
+ * pass it as either a NULL format string, or with "%s".
+ */
+ if (dev_id)
+ cl = __clk_register_clkdev(hw, con_id, "%s", dev_id);
+ else
+ cl = __clk_register_clkdev(hw, con_id, NULL);
- return 0;
+ return cl ? 0 : -ENOMEM;
}
-EXPORT_SYMBOL(clk_register_clkdevs);
+EXPORT_SYMBOL(clk_hw_register_clkdev);
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index e43485448..3f537a04c 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -1,3 +1,11 @@
+config COMMON_CLK_HI3519
+ tristate "Hi3519 Clock Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ select RESET_HISI
+ default ARCH_HISI
+ help
+ Build the clock driver for hi3519.
+
config COMMON_CLK_HI6220
bool "Hi6220 Clock Driver"
depends on ARCH_HISI || COMPILE_TEST
@@ -5,6 +13,13 @@ config COMMON_CLK_HI6220
help
Build the Hisilicon Hi6220 clock driver based on the common clock framework.
+config RESET_HISI
+ bool "HiSilicon Reset Controller Driver"
+ depends on ARCH_HISI || COMPILE_TEST
+ select RESET_CONTROLLER
+ help
+ Build reset controller driver for HiSilicon device chipsets.
+
config STUB_CLK_HI6220
bool "Hi6220 Stub Clock Driver"
depends on COMMON_CLK_HI6220 && MAILBOX
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 74dba3159..e169ec7da 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -7,5 +7,7 @@ obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o
obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o
+obj-$(CONFIG_COMMON_CLK_HI3519) += clk-hi3519.o
obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
+obj-$(CONFIG_RESET_HISI) += reset.o
obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
new file mode 100644
index 000000000..715c7301a
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -0,0 +1,131 @@
+/*
+ * Hi3519 Clock Driver
+ *
+ * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/clock/hi3519-clock.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "clk.h"
+#include "reset.h"
+
+#define HI3519_INNER_CLK_OFFSET 64
+#define HI3519_FIXED_24M 65
+#define HI3519_FIXED_50M 66
+#define HI3519_FIXED_75M 67
+#define HI3519_FIXED_125M 68
+#define HI3519_FIXED_150M 69
+#define HI3519_FIXED_200M 70
+#define HI3519_FIXED_250M 71
+#define HI3519_FIXED_300M 72
+#define HI3519_FIXED_400M 73
+#define HI3519_FMC_MUX 74
+
+#define HI3519_NR_CLKS 128
+
+static const struct hisi_fixed_rate_clock hi3519_fixed_rate_clks[] = {
+ { HI3519_FIXED_24M, "24m", NULL, 0, 24000000, },
+ { HI3519_FIXED_50M, "50m", NULL, 0, 50000000, },
+ { HI3519_FIXED_75M, "75m", NULL, 0, 75000000, },
+ { HI3519_FIXED_125M, "125m", NULL, 0, 125000000, },
+ { HI3519_FIXED_150M, "150m", NULL, 0, 150000000, },
+ { HI3519_FIXED_200M, "200m", NULL, 0, 200000000, },
+ { HI3519_FIXED_250M, "250m", NULL, 0, 250000000, },
+ { HI3519_FIXED_300M, "300m", NULL, 0, 300000000, },
+ { HI3519_FIXED_400M, "400m", NULL, 0, 400000000, },
+};
+
+static const char *const fmc_mux_p[] = {
+ "24m", "75m", "125m", "150m", "200m", "250m", "300m", "400m", };
+static u32 fmc_mux_table[] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+static const struct hisi_mux_clock hi3519_mux_clks[] = {
+ { HI3519_FMC_MUX, "fmc_mux", fmc_mux_p, ARRAY_SIZE(fmc_mux_p),
+ CLK_SET_RATE_PARENT, 0xc0, 2, 3, 0, fmc_mux_table, },
+};
+
+static const struct hisi_gate_clock hi3519_gate_clks[] = {
+ { HI3519_FMC_CLK, "clk_fmc", "fmc_mux",
+ CLK_SET_RATE_PARENT, 0xc0, 1, 0, },
+ { HI3519_UART0_CLK, "clk_uart0", "24m",
+ CLK_SET_RATE_PARENT, 0xe4, 20, 0, },
+ { HI3519_UART1_CLK, "clk_uart1", "24m",
+ CLK_SET_RATE_PARENT, 0xe4, 21, 0, },
+ { HI3519_UART2_CLK, "clk_uart2", "24m",
+ CLK_SET_RATE_PARENT, 0xe4, 22, 0, },
+ { HI3519_UART3_CLK, "clk_uart3", "24m",
+ CLK_SET_RATE_PARENT, 0xe4, 23, 0, },
+ { HI3519_UART4_CLK, "clk_uart4", "24m",
+ CLK_SET_RATE_PARENT, 0xe4, 24, 0, },
+ { HI3519_SPI0_CLK, "clk_spi0", "50m",
+ CLK_SET_RATE_PARENT, 0xe4, 16, 0, },
+ { HI3519_SPI1_CLK, "clk_spi1", "50m",
+ CLK_SET_RATE_PARENT, 0xe4, 17, 0, },
+ { HI3519_SPI2_CLK, "clk_spi2", "50m",
+ CLK_SET_RATE_PARENT, 0xe4, 18, 0, },
+};
+
+static int hi3519_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct hisi_clock_data *clk_data;
+ struct hisi_reset_controller *rstc;
+
+ rstc = hisi_reset_init(np);
+ if (!rstc)
+ return -ENOMEM;
+
+ clk_data = hisi_clk_init(np, HI3519_NR_CLKS);
+ if (!clk_data) {
+ hisi_reset_exit(rstc);
+ return -ENODEV;
+ }
+
+ hisi_clk_register_fixed_rate(hi3519_fixed_rate_clks,
+ ARRAY_SIZE(hi3519_fixed_rate_clks),
+ clk_data);
+ hisi_clk_register_mux(hi3519_mux_clks, ARRAY_SIZE(hi3519_mux_clks),
+ clk_data);
+ hisi_clk_register_gate(hi3519_gate_clks,
+ ARRAY_SIZE(hi3519_gate_clks), clk_data);
+
+ return 0;
+}
+
+static const struct of_device_id hi3519_clk_match_table[] = {
+ { .compatible = "hisilicon,hi3519-crg" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hi3519_clk_match_table);
+
+static struct platform_driver hi3519_clk_driver = {
+ .probe = hi3519_clk_probe,
+ .driver = {
+ .name = "hi3519-clk",
+ .of_match_table = hi3519_clk_match_table,
+ },
+};
+
+static int __init hi3519_clk_init(void)
+{
+ return platform_driver_register(&hi3519_clk_driver);
+}
+core_initcall(hi3519_clk_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon Hi3519 Clock Driver");
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
index 9f8e76676..9b15adbfc 100644
--- a/drivers/clk/hisilicon/clk.c
+++ b/drivers/clk/hisilicon/clk.c
@@ -37,7 +37,7 @@
static DEFINE_SPINLOCK(hisi_clk_lock);
-struct hisi_clock_data __init *hisi_clk_init(struct device_node *np,
+struct hisi_clock_data *hisi_clk_init(struct device_node *np,
int nr_clks)
{
struct hisi_clock_data *clk_data;
@@ -71,8 +71,9 @@ err_data:
err:
return NULL;
}
+EXPORT_SYMBOL_GPL(hisi_clk_init);
-void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks,
+void hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -91,8 +92,9 @@ void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
}
+EXPORT_SYMBOL_GPL(hisi_clk_register_fixed_rate);
-void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks,
+void hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *clks,
int nums,
struct hisi_clock_data *data)
{
@@ -112,8 +114,9 @@ void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
}
+EXPORT_SYMBOL_GPL(hisi_clk_register_fixed_factor);
-void __init hisi_clk_register_mux(struct hisi_mux_clock *clks,
+void hisi_clk_register_mux(const struct hisi_mux_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -141,8 +144,9 @@ void __init hisi_clk_register_mux(struct hisi_mux_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
}
+EXPORT_SYMBOL_GPL(hisi_clk_register_mux);
-void __init hisi_clk_register_divider(struct hisi_divider_clock *clks,
+void hisi_clk_register_divider(const struct hisi_divider_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -170,8 +174,9 @@ void __init hisi_clk_register_divider(struct hisi_divider_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
}
+EXPORT_SYMBOL_GPL(hisi_clk_register_divider);
-void __init hisi_clk_register_gate(struct hisi_gate_clock *clks,
+void hisi_clk_register_gate(const struct hisi_gate_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -198,8 +203,9 @@ void __init hisi_clk_register_gate(struct hisi_gate_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
}
+EXPORT_SYMBOL_GPL(hisi_clk_register_gate);
-void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
+void hisi_clk_register_gate_sep(const struct hisi_gate_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
@@ -226,8 +232,9 @@ void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks,
data->clk_data.clks[clks[i].id] = clk;
}
}
+EXPORT_SYMBOL_GPL(hisi_clk_register_gate_sep);
-void __init hi6220_clk_register_divider(struct hi6220_divider_clock *clks,
+void __init hi6220_clk_register_divider(const struct hi6220_divider_clock *clks,
int nums, struct hisi_clock_data *data)
{
struct clk *clk;
diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h
index b56fbc1c5..20d64afe4 100644
--- a/drivers/clk/hisilicon/clk.h
+++ b/drivers/clk/hisilicon/clk.h
@@ -111,18 +111,18 @@ struct clk *hi6220_register_clkdiv(struct device *dev, const char *name,
u8 shift, u8 width, u32 mask_bit, spinlock_t *lock);
struct hisi_clock_data *hisi_clk_init(struct device_node *, int);
-void hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *,
+void hisi_clk_register_fixed_rate(const struct hisi_fixed_rate_clock *,
int, struct hisi_clock_data *);
-void hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *,
+void hisi_clk_register_fixed_factor(const struct hisi_fixed_factor_clock *,
int, struct hisi_clock_data *);
-void hisi_clk_register_mux(struct hisi_mux_clock *, int,
+void hisi_clk_register_mux(const struct hisi_mux_clock *, int,
struct hisi_clock_data *);
-void hisi_clk_register_divider(struct hisi_divider_clock *,
+void hisi_clk_register_divider(const struct hisi_divider_clock *,
int, struct hisi_clock_data *);
-void hisi_clk_register_gate(struct hisi_gate_clock *,
+void hisi_clk_register_gate(const struct hisi_gate_clock *,
int, struct hisi_clock_data *);
-void hisi_clk_register_gate_sep(struct hisi_gate_clock *,
+void hisi_clk_register_gate_sep(const struct hisi_gate_clock *,
int, struct hisi_clock_data *);
-void hi6220_clk_register_divider(struct hi6220_divider_clock *,
+void hi6220_clk_register_divider(const struct hi6220_divider_clock *,
int, struct hisi_clock_data *);
#endif /* __HISI_CLK_H */
diff --git a/drivers/clk/hisilicon/reset.c b/drivers/clk/hisilicon/reset.c
new file mode 100644
index 000000000..6aa49c220
--- /dev/null
+++ b/drivers/clk/hisilicon/reset.c
@@ -0,0 +1,134 @@
+/*
+ * Hisilicon Reset Controller Driver
+ *
+ * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include "reset.h"
+
+#define HISI_RESET_BIT_MASK 0x1f
+#define HISI_RESET_OFFSET_SHIFT 8
+#define HISI_RESET_OFFSET_MASK 0xffff00
+
+struct hisi_reset_controller {
+ spinlock_t lock;
+ void __iomem *membase;
+ struct reset_controller_dev rcdev;
+};
+
+
+#define to_hisi_reset_controller(rcdev) \
+ container_of(rcdev, struct hisi_reset_controller, rcdev)
+
+static int hisi_reset_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ u32 offset;
+ u8 bit;
+
+ offset = (reset_spec->args[0] << HISI_RESET_OFFSET_SHIFT)
+ & HISI_RESET_OFFSET_MASK;
+ bit = reset_spec->args[1] & HISI_RESET_BIT_MASK;
+
+ return (offset | bit);
+}
+
+static int hisi_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct hisi_reset_controller *rstc = to_hisi_reset_controller(rcdev);
+ unsigned long flags;
+ u32 offset, reg;
+ u8 bit;
+
+ offset = (id & HISI_RESET_OFFSET_MASK) >> HISI_RESET_OFFSET_SHIFT;
+ bit = id & HISI_RESET_BIT_MASK;
+
+ spin_lock_irqsave(&rstc->lock, flags);
+
+ reg = readl(rstc->membase + offset);
+ writel(reg | BIT(bit), rstc->membase + offset);
+
+ spin_unlock_irqrestore(&rstc->lock, flags);
+
+ return 0;
+}
+
+static int hisi_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct hisi_reset_controller *rstc = to_hisi_reset_controller(rcdev);
+ unsigned long flags;
+ u32 offset, reg;
+ u8 bit;
+
+ offset = (id & HISI_RESET_OFFSET_MASK) >> HISI_RESET_OFFSET_SHIFT;
+ bit = id & HISI_RESET_BIT_MASK;
+
+ spin_lock_irqsave(&rstc->lock, flags);
+
+ reg = readl(rstc->membase + offset);
+ writel(reg & ~BIT(bit), rstc->membase + offset);
+
+ spin_unlock_irqrestore(&rstc->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops hisi_reset_ops = {
+ .assert = hisi_reset_assert,
+ .deassert = hisi_reset_deassert,
+};
+
+struct hisi_reset_controller *hisi_reset_init(struct device_node *np)
+{
+ struct hisi_reset_controller *rstc;
+
+ rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
+ if (!rstc)
+ return NULL;
+
+ rstc->membase = of_iomap(np, 0);
+ if (!rstc->membase) {
+ kfree(rstc);
+ return NULL;
+ }
+
+ spin_lock_init(&rstc->lock);
+
+ rstc->rcdev.owner = THIS_MODULE;
+ rstc->rcdev.ops = &hisi_reset_ops;
+ rstc->rcdev.of_node = np;
+ rstc->rcdev.of_reset_n_cells = 2;
+ rstc->rcdev.of_xlate = hisi_reset_of_xlate;
+ reset_controller_register(&rstc->rcdev);
+
+ return rstc;
+}
+EXPORT_SYMBOL_GPL(hisi_reset_init);
+
+void hisi_reset_exit(struct hisi_reset_controller *rstc)
+{
+ reset_controller_unregister(&rstc->rcdev);
+ iounmap(rstc->membase);
+ kfree(rstc);
+}
+EXPORT_SYMBOL_GPL(hisi_reset_exit);
diff --git a/drivers/clk/hisilicon/reset.h b/drivers/clk/hisilicon/reset.h
new file mode 100644
index 000000000..677d773ed
--- /dev/null
+++ b/drivers/clk/hisilicon/reset.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HISI_RESET_H
+#define __HISI_RESET_H
+
+struct device_node;
+struct hisi_reset_controller;
+
+#ifdef CONFIG_RESET_CONTROLLER
+struct hisi_reset_controller *hisi_reset_init(struct device_node *np);
+void hisi_reset_exit(struct hisi_reset_controller *rstc);
+#else
+static inline hisi_reset_controller *hisi_reset_init(struct device_node *np)
+{
+ return 0;
+}
+static inline void hisi_reset_exit(struct hisi_reset_controller *rstc)
+{}
+#endif
+
+#endif /* __HISI_RESET_H */
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index 8935bff99..db44a198a 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -31,6 +31,7 @@ struct clk_gate2 {
struct clk_hw hw;
void __iomem *reg;
u8 bit_idx;
+ u8 cgr_val;
u8 flags;
spinlock_t *lock;
unsigned int *share_count;
@@ -50,7 +51,8 @@ static int clk_gate2_enable(struct clk_hw *hw)
goto out;
reg = readl(gate->reg);
- reg |= 3 << gate->bit_idx;
+ reg &= ~(3 << gate->bit_idx);
+ reg |= gate->cgr_val << gate->bit_idx;
writel(reg, gate->reg);
out:
@@ -125,7 +127,7 @@ static struct clk_ops clk_gate2_ops = {
struct clk *clk_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 bit_idx,
+ void __iomem *reg, u8 bit_idx, u8 cgr_val,
u8 clk_gate2_flags, spinlock_t *lock,
unsigned int *share_count)
{
@@ -140,6 +142,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
/* struct clk_gate2 assignments */
gate->reg = reg;
gate->bit_idx = bit_idx;
+ gate->cgr_val = cgr_val;
gate->flags = clk_gate2_flags;
gate->lock = lock;
gate->share_count = share_count;
diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
index fea125eb4..97e742a8b 100644
--- a/drivers/clk/imx/clk-imx6sx.c
+++ b/drivers/clk/imx/clk-imx6sx.c
@@ -134,6 +134,8 @@ static u32 share_count_esai;
static u32 share_count_ssi1;
static u32 share_count_ssi2;
static u32 share_count_ssi3;
+static u32 share_count_sai1;
+static u32 share_count_sai2;
static struct clk ** const uart_clks[] __initconst = {
&clks[IMX6SX_CLK_UART_IPG],
@@ -469,10 +471,10 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clks[IMX6SX_CLK_SSI3] = imx_clk_gate2_shared("ssi3", "ssi3_podf", base + 0x7c, 22, &share_count_ssi3);
clks[IMX6SX_CLK_UART_IPG] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24);
clks[IMX6SX_CLK_UART_SERIAL] = imx_clk_gate2("uart_serial", "uart_podf", base + 0x7c, 26);
- clks[IMX6SX_CLK_SAI1_IPG] = imx_clk_gate2("sai1_ipg", "ipg", base + 0x7c, 28);
- clks[IMX6SX_CLK_SAI2_IPG] = imx_clk_gate2("sai2_ipg", "ipg", base + 0x7c, 30);
- clks[IMX6SX_CLK_SAI1] = imx_clk_gate2("sai1", "ssi1_podf", base + 0x7c, 28);
- clks[IMX6SX_CLK_SAI2] = imx_clk_gate2("sai2", "ssi2_podf", base + 0x7c, 30);
+ clks[IMX6SX_CLK_SAI1_IPG] = imx_clk_gate2_shared("sai1_ipg", "ipg", base + 0x7c, 28, &share_count_sai1);
+ clks[IMX6SX_CLK_SAI2_IPG] = imx_clk_gate2_shared("sai2_ipg", "ipg", base + 0x7c, 30, &share_count_sai2);
+ clks[IMX6SX_CLK_SAI1] = imx_clk_gate2_shared("sai1", "ssi1_podf", base + 0x7c, 28, &share_count_sai1);
+ clks[IMX6SX_CLK_SAI2] = imx_clk_gate2_shared("sai2", "ssi2_podf", base + 0x7c, 30, &share_count_sai2);
/* CCGR6 */
clks[IMX6SX_CLK_USBOH3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index fbb6a8c86..522996800 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -56,7 +56,7 @@ static const char *nand_usdhc_bus_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_sys_pfd2_135m_clk", "pll_sys_pfd6_clk", "pll_enet_250m_clk",
"pll_audio_main_clk", };
-static const char *ahb_channel_sel[] = { "osc", "pll_sys_pfd2_135m_clk",
+static const char *ahb_channel_sel[] = { "osc", "pll_sys_pfd2_270m_clk",
"pll_dram_533m_clk", "pll_sys_pfd0_392m_clk",
"pll_enet_125m_clk", "pll_usb_main_clk", "pll_audio_main_clk",
"pll_video_main_clk", };
@@ -342,7 +342,7 @@ static const char *clko1_sel[] = { "osc", "pll_sys_main_clk",
static const char *clko2_sel[] = { "osc", "pll_sys_main_240m_clk",
"pll_sys_pfd0_392m_clk", "pll_sys_pfd1_166m_clk", "pll_sys_pfd4_clk",
- "pll_audio_main_clk", "pll_video_main_clk", "osc_32k_clk", };
+ "pll_audio_main_clk", "pll_video_main_clk", "ckil", };
static const char *lvds1_sel[] = { "pll_arm_main_clk",
"pll_sys_main_clk", "pll_sys_pfd0_392m_clk", "pll_sys_pfd1_332m_clk",
@@ -382,6 +382,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
clks[IMX7D_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
clks[IMX7D_OSC_24M_CLK] = of_clk_get_by_name(ccm_node, "osc");
+ clks[IMX7D_CKIL] = of_clk_get_by_name(ccm_node, "ckil");
np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
base = of_iomap(np, 0);
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index c05c43d56..4826b3c9e 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -44,6 +44,7 @@ struct clk_pllv3 {
u32 powerdown;
u32 div_mask;
u32 div_shift;
+ unsigned long ref_clock;
};
#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
@@ -286,7 +287,9 @@ static const struct clk_ops clk_pllv3_av_ops = {
static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- return 500000000;
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+
+ return pll->ref_clock;
}
static const struct clk_ops clk_pllv3_enet_ops = {
@@ -326,7 +329,11 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
break;
case IMX_PLLV3_ENET_IMX7:
pll->powerdown = IMX7_ENET_PLL_POWER;
+ pll->ref_clock = 1000000000;
+ ops = &clk_pllv3_enet_ops;
+ break;
case IMX_PLLV3_ENET:
+ pll->ref_clock = 500000000;
ops = &clk_pllv3_enet_ops;
break;
default:
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 0a94d9661..3a1f24475 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -10,6 +10,7 @@
#include <linux/of_address.h>
#include <linux/clk.h>
+#include <linux/syscore_ops.h>
#include <dt-bindings/clock/vf610-clock.h>
#include "clk.h"
@@ -40,6 +41,7 @@
#define CCM_CCGR9 (ccm_base + 0x64)
#define CCM_CCGR10 (ccm_base + 0x68)
#define CCM_CCGR11 (ccm_base + 0x6c)
+#define CCM_CCGRx(x) (CCM_CCGR0 + (x) * 4)
#define CCM_CMEOR0 (ccm_base + 0x70)
#define CCM_CMEOR1 (ccm_base + 0x74)
#define CCM_CMEOR2 (ccm_base + 0x78)
@@ -115,10 +117,19 @@ static struct clk_div_table pll4_audio_div_table[] = {
static struct clk *clk[VF610_CLK_END];
static struct clk_onecell_data clk_data;
+static u32 cscmr1;
+static u32 cscmr2;
+static u32 cscdr1;
+static u32 cscdr2;
+static u32 cscdr3;
+static u32 ccgr[12];
+
static unsigned int const clks_init_on[] __initconst = {
VF610_CLK_SYS_BUS,
VF610_CLK_DDR_SEL,
VF610_CLK_DAP,
+ VF610_CLK_DDRMC,
+ VF610_CLK_WKPU,
};
static struct clk * __init vf610_get_fixed_clock(
@@ -132,6 +143,43 @@ static struct clk * __init vf610_get_fixed_clock(
return clk;
};
+static int vf610_clk_suspend(void)
+{
+ int i;
+
+ cscmr1 = readl_relaxed(CCM_CSCMR1);
+ cscmr2 = readl_relaxed(CCM_CSCMR2);
+
+ cscdr1 = readl_relaxed(CCM_CSCDR1);
+ cscdr2 = readl_relaxed(CCM_CSCDR2);
+ cscdr3 = readl_relaxed(CCM_CSCDR3);
+
+ for (i = 0; i < 12; i++)
+ ccgr[i] = readl_relaxed(CCM_CCGRx(i));
+
+ return 0;
+}
+
+static void vf610_clk_resume(void)
+{
+ int i;
+
+ writel_relaxed(cscmr1, CCM_CSCMR1);
+ writel_relaxed(cscmr2, CCM_CSCMR2);
+
+ writel_relaxed(cscdr1, CCM_CSCDR1);
+ writel_relaxed(cscdr2, CCM_CSCDR2);
+ writel_relaxed(cscdr3, CCM_CSCDR3);
+
+ for (i = 0; i < 12; i++)
+ writel_relaxed(ccgr[i], CCM_CCGRx(i));
+}
+
+static struct syscore_ops vf610_clk_syscore_ops = {
+ .suspend = vf610_clk_suspend,
+ .resume = vf610_clk_resume,
+};
+
static void __init vf610_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -233,6 +281,9 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_audio_div", "pll4_audio", 0, CCM_CACRR, 6, 3, 0, pll4_audio_div_table, &imx_ccm_lock);
clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_video_div", "pll6_video", CCM_CACRR, 21, 1);
+ clk[VF610_CLK_DDRMC] = imx_clk_gate2_cgr("ddrmc", "ddr_sel", CCM_CCGR6, CCM_CCGRx_CGn(14), 0x2);
+ clk[VF610_CLK_WKPU] = imx_clk_gate2_cgr("wkpu", "ipg_bus", CCM_CCGR4, CCM_CCGRx_CGn(10), 0x2);
+
clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_usb_otg", PLL3_CTRL, 6);
clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_usb_host", PLL7_CTRL, 6);
@@ -321,11 +372,14 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_DCU0_SEL] = imx_clk_mux("dcu0_sel", CCM_CSCMR1, 28, 1, dcu_sels, 2);
clk[VF610_CLK_DCU0_EN] = imx_clk_gate("dcu0_en", "dcu0_sel", CCM_CSCDR3, 19);
clk[VF610_CLK_DCU0_DIV] = imx_clk_divider("dcu0_div", "dcu0_en", CCM_CSCDR3, 16, 3);
- clk[VF610_CLK_DCU0] = imx_clk_gate2("dcu0", "dcu0_div", CCM_CCGR3, CCM_CCGRx_CGn(8));
+ clk[VF610_CLK_DCU0] = imx_clk_gate2("dcu0", "ipg_bus", CCM_CCGR3, CCM_CCGRx_CGn(8));
clk[VF610_CLK_DCU1_SEL] = imx_clk_mux("dcu1_sel", CCM_CSCMR1, 29, 1, dcu_sels, 2);
clk[VF610_CLK_DCU1_EN] = imx_clk_gate("dcu1_en", "dcu1_sel", CCM_CSCDR3, 23);
clk[VF610_CLK_DCU1_DIV] = imx_clk_divider("dcu1_div", "dcu1_en", CCM_CSCDR3, 20, 3);
- clk[VF610_CLK_DCU1] = imx_clk_gate2("dcu1", "dcu1_div", CCM_CCGR9, CCM_CCGRx_CGn(8));
+ clk[VF610_CLK_DCU1] = imx_clk_gate2("dcu1", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(8));
+
+ clk[VF610_CLK_TCON0] = imx_clk_gate2("tcon0", "platform_bus", CCM_CCGR1, CCM_CCGRx_CGn(13));
+ clk[VF610_CLK_TCON1] = imx_clk_gate2("tcon1", "platform_bus", CCM_CCGR7, CCM_CCGRx_CGn(13));
clk[VF610_CLK_ESAI_SEL] = imx_clk_mux("esai_sel", CCM_CSCMR1, 20, 2, esai_sels, 4);
clk[VF610_CLK_ESAI_EN] = imx_clk_gate("esai_en", "esai_sel", CCM_CSCDR2, 30);
@@ -409,6 +463,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clk[clks_init_on[i]]);
+ register_syscore_ops(&vf610_clk_syscore_ops);
+
/* Add the clocks to provider list */
clk_data.clks = clk;
clk_data.clk_num = ARRAY_SIZE(clk);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index d942f5748..508d0fad8 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -41,7 +41,7 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
struct clk *clk_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 bit_idx,
+ void __iomem *reg, u8 bit_idx, u8 cgr_val,
u8 clk_gate_flags, spinlock_t *lock,
unsigned int *share_count);
@@ -55,7 +55,7 @@ static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock, NULL);
+ shift, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk *imx_clk_gate2_shared(const char *name,
@@ -63,7 +63,14 @@ static inline struct clk *imx_clk_gate2_shared(const char *name,
unsigned int *share_count)
{
return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock, share_count);
+ shift, 0x3, 0, &imx_ccm_lock, share_count);
+}
+
+static inline struct clk *imx_clk_gate2_cgr(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, u8 cgr_val)
+{
+ return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, cgr_val, 0, &imx_ccm_lock, NULL);
}
struct clk *imx_clk_pfd(const char *name, const char *parent_name,
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 7cfb7b2a2..e8248f918 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -325,6 +325,7 @@ ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
div = (div_reg >> clk_info->div.shift) &
GENMASK(clk_info->div.bits - 1, 0);
div += 1;
+ div *= clk_info->div.div;
rate /= div;
}
@@ -345,6 +346,14 @@ ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
div = min_t(unsigned, div, 1 << clk_info->div.bits);
div = max_t(unsigned, div, 1);
+ /*
+ * If the divider value itself must be divided before being written to
+ * the divider register, we must ensure we don't have any bits set that
+ * would be lost as a result of doing so.
+ */
+ div /= clk_info->div.div;
+ div *= clk_info->div.div;
+
return div;
}
@@ -395,7 +404,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
/* update the divide */
mask = GENMASK(clk_info->div.bits - 1, 0);
reg &= ~(mask << clk_info->div.shift);
- reg |= (div - 1) << clk_info->div.shift;
+ reg |= ((div / clk_info->div.div) - 1) << clk_info->div.shift;
/* clear the stop bit */
if (clk_info->div.stop_bit != -1)
diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h
index 99347e2b9..09700b2c5 100644
--- a/drivers/clk/ingenic/cgu.h
+++ b/drivers/clk/ingenic/cgu.h
@@ -76,8 +76,11 @@ struct ingenic_cgu_mux_info {
/**
* struct ingenic_cgu_div_info - information about a divider
* @reg: offset of the divider control register within the CGU
- * @shift: number of bits to shift the divide value by (ie. the index of
+ * @shift: number of bits to left shift the divide value by (ie. the index of
* the lowest bit of the divide value within its control register)
+ * @div: number of bits to divide the divider value by (i.e. if the
+ * effective divider value is the value written to the register
+ * multiplied by some constant)
* @bits: the size of the divide value in bits
* @ce_bit: the index of the change enable bit within reg, or -1 if there
* isn't one
@@ -87,6 +90,7 @@ struct ingenic_cgu_mux_info {
struct ingenic_cgu_div_info {
unsigned reg;
u8 shift;
+ u8 div;
u8 bits;
s8 ce_bit;
s8 busy_bit;
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 305a26c2a..510fe7e0c 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -90,51 +90,51 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
[JZ4740_CLK_PLL_HALF] = {
"pll half", CGU_CLK_DIV,
.parents = { JZ4740_CLK_PLL, -1, -1, -1 },
- .div = { CGU_REG_CPCCR, 21, 1, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 21, 1, 1, -1, -1, -1 },
},
[JZ4740_CLK_CCLK] = {
"cclk", CGU_CLK_DIV,
.parents = { JZ4740_CLK_PLL, -1, -1, -1 },
- .div = { CGU_REG_CPCCR, 0, 4, 22, -1, -1 },
+ .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 },
},
[JZ4740_CLK_HCLK] = {
"hclk", CGU_CLK_DIV,
.parents = { JZ4740_CLK_PLL, -1, -1, -1 },
- .div = { CGU_REG_CPCCR, 4, 4, 22, -1, -1 },
+ .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 },
},
[JZ4740_CLK_PCLK] = {
"pclk", CGU_CLK_DIV,
.parents = { JZ4740_CLK_PLL, -1, -1, -1 },
- .div = { CGU_REG_CPCCR, 8, 4, 22, -1, -1 },
+ .div = { CGU_REG_CPCCR, 8, 1, 4, 22, -1, -1 },
},
[JZ4740_CLK_MCLK] = {
"mclk", CGU_CLK_DIV,
.parents = { JZ4740_CLK_PLL, -1, -1, -1 },
- .div = { CGU_REG_CPCCR, 12, 4, 22, -1, -1 },
+ .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1 },
},
[JZ4740_CLK_LCD] = {
"lcd", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_PLL_HALF, -1, -1, -1 },
- .div = { CGU_REG_CPCCR, 16, 5, 22, -1, -1 },
+ .div = { CGU_REG_CPCCR, 16, 1, 5, 22, -1, -1 },
.gate = { CGU_REG_CLKGR, 10 },
},
[JZ4740_CLK_LCD_PCLK] = {
"lcd_pclk", CGU_CLK_DIV,
.parents = { JZ4740_CLK_PLL_HALF, -1, -1, -1 },
- .div = { CGU_REG_LPCDR, 0, 11, -1, -1, -1 },
+ .div = { CGU_REG_LPCDR, 0, 1, 11, -1, -1, -1 },
},
[JZ4740_CLK_I2S] = {
"i2s", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 31, 1 },
- .div = { CGU_REG_I2SCDR, 0, 8, -1, -1, -1 },
+ .div = { CGU_REG_I2SCDR, 0, 1, 8, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 6 },
},
@@ -142,21 +142,21 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
"spi", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL, -1, -1 },
.mux = { CGU_REG_SSICDR, 31, 1 },
- .div = { CGU_REG_SSICDR, 0, 4, -1, -1, -1 },
+ .div = { CGU_REG_SSICDR, 0, 1, 4, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 4 },
},
[JZ4740_CLK_MMC] = {
"mmc", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_PLL_HALF, -1, -1, -1 },
- .div = { CGU_REG_MSCCDR, 0, 5, -1, -1, -1 },
+ .div = { CGU_REG_MSCCDR, 0, 1, 5, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 7 },
},
[JZ4740_CLK_UHC] = {
"uhc", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4740_CLK_PLL_HALF, -1, -1, -1 },
- .div = { CGU_REG_UHCCDR, 0, 4, -1, -1, -1 },
+ .div = { CGU_REG_UHCCDR, 0, 1, 4, -1, -1, -1 },
.gate = { CGU_REG_CLKGR, 14 },
},
@@ -164,7 +164,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
"udc", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
.mux = { CGU_REG_CPCCR, 29, 1 },
- .div = { CGU_REG_CPCCR, 23, 6, -1, -1, -1 },
+ .div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 },
.gate = { CGU_REG_SCR, 6 },
},
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index 431f96230..b35d6d9dd 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -296,13 +296,13 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
[JZ4780_CLK_CPU] = {
"cpu", CGU_CLK_DIV,
.parents = { JZ4780_CLK_CPUMUX, -1, -1, -1 },
- .div = { CGU_REG_CLOCKCONTROL, 0, 4, 22, -1, -1 },
+ .div = { CGU_REG_CLOCKCONTROL, 0, 1, 4, 22, -1, -1 },
},
[JZ4780_CLK_L2CACHE] = {
"l2cache", CGU_CLK_DIV,
.parents = { JZ4780_CLK_CPUMUX, -1, -1, -1 },
- .div = { CGU_REG_CLOCKCONTROL, 4, 4, -1, -1, -1 },
+ .div = { CGU_REG_CLOCKCONTROL, 4, 1, 4, -1, -1, -1 },
},
[JZ4780_CLK_AHB0] = {
@@ -310,7 +310,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { -1, JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_EPLL },
.mux = { CGU_REG_CLOCKCONTROL, 26, 2 },
- .div = { CGU_REG_CLOCKCONTROL, 8, 4, 21, -1, -1 },
+ .div = { CGU_REG_CLOCKCONTROL, 8, 1, 4, 21, -1, -1 },
},
[JZ4780_CLK_AHB2PMUX] = {
@@ -323,20 +323,20 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
[JZ4780_CLK_AHB2] = {
"ahb2", CGU_CLK_DIV,
.parents = { JZ4780_CLK_AHB2PMUX, -1, -1, -1 },
- .div = { CGU_REG_CLOCKCONTROL, 12, 4, 20, -1, -1 },
+ .div = { CGU_REG_CLOCKCONTROL, 12, 1, 4, 20, -1, -1 },
},
[JZ4780_CLK_PCLK] = {
"pclk", CGU_CLK_DIV,
.parents = { JZ4780_CLK_AHB2PMUX, -1, -1, -1 },
- .div = { CGU_REG_CLOCKCONTROL, 16, 4, 20, -1, -1 },
+ .div = { CGU_REG_CLOCKCONTROL, 16, 1, 4, 20, -1, -1 },
},
[JZ4780_CLK_DDR] = {
"ddr", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { -1, JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL, -1 },
.mux = { CGU_REG_DDRCDR, 30, 2 },
- .div = { CGU_REG_DDRCDR, 0, 4, 29, 28, 27 },
+ .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 },
},
[JZ4780_CLK_VPU] = {
@@ -344,7 +344,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_EPLL, -1 },
.mux = { CGU_REG_VPUCDR, 30, 2 },
- .div = { CGU_REG_VPUCDR, 0, 4, 29, 28, 27 },
+ .div = { CGU_REG_VPUCDR, 0, 1, 4, 29, 28, 27 },
.gate = { CGU_REG_CLKGR1, 2 },
},
@@ -352,7 +352,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
"i2s_pll", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_EPLL, -1, -1 },
.mux = { CGU_REG_I2SCDR, 30, 1 },
- .div = { CGU_REG_I2SCDR, 0, 8, 29, 28, 27 },
+ .div = { CGU_REG_I2SCDR, 0, 1, 8, 29, 28, 27 },
},
[JZ4780_CLK_I2S] = {
@@ -366,7 +366,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_VPLL, -1 },
.mux = { CGU_REG_LP0CDR, 30, 2 },
- .div = { CGU_REG_LP0CDR, 0, 8, 28, 27, 26 },
+ .div = { CGU_REG_LP0CDR, 0, 1, 8, 28, 27, 26 },
},
[JZ4780_CLK_LCD1PIXCLK] = {
@@ -374,7 +374,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_VPLL, -1 },
.mux = { CGU_REG_LP1CDR, 30, 2 },
- .div = { CGU_REG_LP1CDR, 0, 8, 28, 27, 26 },
+ .div = { CGU_REG_LP1CDR, 0, 1, 8, 28, 27, 26 },
},
[JZ4780_CLK_MSCMUX] = {
@@ -386,21 +386,21 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
[JZ4780_CLK_MSC0] = {
"msc0", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4780_CLK_MSCMUX, -1, -1, -1 },
- .div = { CGU_REG_MSC0CDR, 0, 8, 29, 28, 27 },
+ .div = { CGU_REG_MSC0CDR, 0, 2, 8, 29, 28, 27 },
.gate = { CGU_REG_CLKGR0, 3 },
},
[JZ4780_CLK_MSC1] = {
"msc1", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4780_CLK_MSCMUX, -1, -1, -1 },
- .div = { CGU_REG_MSC1CDR, 0, 8, 29, 28, 27 },
+ .div = { CGU_REG_MSC1CDR, 0, 2, 8, 29, 28, 27 },
.gate = { CGU_REG_CLKGR0, 11 },
},
[JZ4780_CLK_MSC2] = {
"msc2", CGU_CLK_DIV | CGU_CLK_GATE,
.parents = { JZ4780_CLK_MSCMUX, -1, -1, -1 },
- .div = { CGU_REG_MSC2CDR, 0, 8, 29, 28, 27 },
+ .div = { CGU_REG_MSC2CDR, 0, 2, 8, 29, 28, 27 },
.gate = { CGU_REG_CLKGR0, 12 },
},
@@ -409,7 +409,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_EPLL, JZ4780_CLK_OTGPHY },
.mux = { CGU_REG_UHCCDR, 30, 2 },
- .div = { CGU_REG_UHCCDR, 0, 8, 29, 28, 27 },
+ .div = { CGU_REG_UHCCDR, 0, 1, 8, 29, 28, 27 },
.gate = { CGU_REG_CLKGR0, 24 },
},
@@ -417,7 +417,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
"ssi_pll", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL, -1, -1 },
.mux = { CGU_REG_SSICDR, 30, 1 },
- .div = { CGU_REG_SSICDR, 0, 8, 29, 28, 27 },
+ .div = { CGU_REG_SSICDR, 0, 1, 8, 29, 28, 27 },
},
[JZ4780_CLK_SSI] = {
@@ -430,7 +430,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
"cim_mclk", CGU_CLK_MUX | CGU_CLK_DIV,
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL, -1, -1 },
.mux = { CGU_REG_CIMCDR, 31, 1 },
- .div = { CGU_REG_CIMCDR, 0, 8, 30, 29, 28 },
+ .div = { CGU_REG_CIMCDR, 0, 1, 8, 30, 29, 28 },
},
[JZ4780_CLK_PCMPLL] = {
@@ -438,7 +438,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_EPLL, JZ4780_CLK_VPLL },
.mux = { CGU_REG_PCMCDR, 29, 2 },
- .div = { CGU_REG_PCMCDR, 0, 8, 28, 27, 26 },
+ .div = { CGU_REG_PCMCDR, 0, 1, 8, 28, 27, 26 },
},
[JZ4780_CLK_PCM] = {
@@ -453,7 +453,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { -1, JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_EPLL },
.mux = { CGU_REG_GPUCDR, 30, 2 },
- .div = { CGU_REG_GPUCDR, 0, 4, 29, 28, 27 },
+ .div = { CGU_REG_GPUCDR, 0, 1, 4, 29, 28, 27 },
.gate = { CGU_REG_CLKGR1, 4 },
},
@@ -462,7 +462,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_VPLL, -1 },
.mux = { CGU_REG_HDMICDR, 30, 2 },
- .div = { CGU_REG_HDMICDR, 0, 8, 29, 28, 26 },
+ .div = { CGU_REG_HDMICDR, 0, 1, 8, 29, 28, 26 },
.gate = { CGU_REG_CLKGR1, 9 },
},
@@ -471,7 +471,7 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = {
.parents = { -1, JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL,
JZ4780_CLK_EPLL },
.mux = { CGU_REG_BCHCDR, 30, 2 },
- .div = { CGU_REG_BCHCDR, 0, 4, 29, 28, 27 },
+ .div = { CGU_REG_BCHCDR, 0, 1, 4, 29, 28, 27 },
.gate = { CGU_REG_CLKGR0, 1 },
},
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 227e35640..10c986018 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -61,7 +61,6 @@ static const struct mtk_fixed_factor top_divs[] __initconst = {
FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793),
FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1),
- FACTOR(CLK_TOP_HDMITX_DIG_CTS, "hdmitx_dig_cts", "tvdpll_445p5m", 1, 3),
FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "hdmitx_dig_cts", 1, 2),
FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "hdmitx_dig_cts", 1, 3),
@@ -558,7 +557,11 @@ static const struct mtk_composite top_muxes[] __initconst = {
MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x0090, 16, 2, 23),
MUX_GATE(CLK_TOP_VENC_LT_SEL, "venclt_sel", venc_lt_parents, 0x0090, 24, 4, 31),
/* CLK_CFG_6 */
- MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7),
+ /*
+ * The dpi0_sel clock should not propagate rate changes to its parent
+ * clock so the dpi driver can have full control over PLL and divider.
+ */
+ MUX_GATE_FLAGS(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7, 0),
MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x00a0, 8, 2, 15),
MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents, 0x00a0, 16, 3, 23),
MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0x00a0, 24, 2, 31),
@@ -1091,6 +1094,11 @@ static void __init mtk_apmixedsys_init(struct device_node *node)
clk_data->clks[cku->id] = clk;
}
+ clk = clk_register_divider(NULL, "hdmi_ref", "tvdpll_594m", 0,
+ base + 0x40, 16, 3, CLK_DIVIDER_POWER_OF_TWO,
+ NULL);
+ clk_data->clks[CLK_APMIXED_HDMI_REF] = clk;
+
r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
if (r)
pr_err("%s(): could not register clock provider: %d\n",
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 32d2e455e..9f24fcfa3 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -83,7 +83,11 @@ struct mtk_composite {
signed char num_parents;
};
-#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) { \
+/*
+ * In case the rate change propagation to parent clocks is undesirable,
+ * this macro allows to specify the clock flags manually.
+ */
+#define MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, _flags) { \
.id = _id, \
.name = _name, \
.mux_reg = _reg, \
@@ -94,9 +98,16 @@ struct mtk_composite {
.divider_shift = -1, \
.parent_names = _parents, \
.num_parents = ARRAY_SIZE(_parents), \
- .flags = CLK_SET_RATE_PARENT, \
+ .flags = _flags, \
}
+/*
+ * Unless necessary, all MUX_GATE clocks propagate rate changes to their
+ * parent clock by default.
+ */
+#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) \
+ MUX_GATE_FLAGS(_id, _name, _parents, _reg, _shift, _width, _gate, CLK_SET_RATE_PARENT)
+
#define MUX(_id, _name, _parents, _reg, _shift, _width) { \
.id = _id, \
.name = _name, \
diff --git a/drivers/clk/meson/meson8b-clkc.c b/drivers/clk/meson/meson8b-clkc.c
index 61f6d55c4..4d057b3e2 100644
--- a/drivers/clk/meson/meson8b-clkc.c
+++ b/drivers/clk/meson/meson8b-clkc.c
@@ -141,11 +141,11 @@ static const struct composite_conf mali_conf __initconst = {
};
static const struct clk_conf meson8b_xtal_conf __initconst =
- FIXED_RATE_P(MESON8B_REG_CTL0_ADDR, CLKID_XTAL, "xtal",
- CLK_IS_ROOT, PARM(0x00, 4, 7));
+ FIXED_RATE_P(MESON8B_REG_CTL0_ADDR, CLKID_XTAL, "xtal", 0,
+ PARM(0x00, 4, 7));
static const struct clk_conf meson8b_clk_confs[] __initconst = {
- FIXED_RATE(CLKID_ZERO, "zero", CLK_IS_ROOT, 0),
+ FIXED_RATE(CLKID_ZERO, "zero", 0, 0),
PLL(MESON8B_REG_PLL_FIXED, CLKID_PLL_FIXED, "fixed_pll",
p_xtal, 0, &pll_confs),
PLL(MESON8B_REG_PLL_VID, CLKID_PLL_VID, "vid_pll",
diff --git a/drivers/clk/microchip/Makefile b/drivers/clk/microchip/Makefile
new file mode 100644
index 000000000..2152f4181
--- /dev/null
+++ b/drivers/clk/microchip/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_COMMON_CLK_PIC32) += clk-core.o
+obj-$(CONFIG_PIC32MZDA) += clk-pic32mzda.o
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
new file mode 100644
index 000000000..ca85cea17
--- /dev/null
+++ b/drivers/clk/microchip/clk-core.c
@@ -0,0 +1,1031 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <asm/mach-pic32/pic32.h>
+#include <asm/traps.h>
+
+#include "clk-core.h"
+
+/* OSCCON Reg fields */
+#define OSC_CUR_MASK 0x07
+#define OSC_CUR_SHIFT 12
+#define OSC_NEW_MASK 0x07
+#define OSC_NEW_SHIFT 8
+#define OSC_SWEN BIT(0)
+
+/* SPLLCON Reg fields */
+#define PLL_RANGE_MASK 0x07
+#define PLL_RANGE_SHIFT 0
+#define PLL_ICLK_MASK 0x01
+#define PLL_ICLK_SHIFT 7
+#define PLL_IDIV_MASK 0x07
+#define PLL_IDIV_SHIFT 8
+#define PLL_ODIV_MASK 0x07
+#define PLL_ODIV_SHIFT 24
+#define PLL_MULT_MASK 0x7F
+#define PLL_MULT_SHIFT 16
+#define PLL_MULT_MAX 128
+#define PLL_ODIV_MIN 1
+#define PLL_ODIV_MAX 5
+
+/* Peripheral Bus Clock Reg Fields */
+#define PB_DIV_MASK 0x7f
+#define PB_DIV_SHIFT 0
+#define PB_DIV_READY BIT(11)
+#define PB_DIV_ENABLE BIT(15)
+#define PB_DIV_MAX 128
+#define PB_DIV_MIN 0
+
+/* Reference Oscillator Control Reg fields */
+#define REFO_SEL_MASK 0x0f
+#define REFO_SEL_SHIFT 0
+#define REFO_ACTIVE BIT(8)
+#define REFO_DIVSW_EN BIT(9)
+#define REFO_OE BIT(12)
+#define REFO_ON BIT(15)
+#define REFO_DIV_SHIFT 16
+#define REFO_DIV_MASK 0x7fff
+
+/* Reference Oscillator Trim Register Fields */
+#define REFO_TRIM_REG 0x10
+#define REFO_TRIM_MASK 0x1ff
+#define REFO_TRIM_SHIFT 23
+#define REFO_TRIM_MAX 511
+
+/* Mux Slew Control Register fields */
+#define SLEW_BUSY BIT(0)
+#define SLEW_DOWNEN BIT(1)
+#define SLEW_UPEN BIT(2)
+#define SLEW_DIV 0x07
+#define SLEW_DIV_SHIFT 8
+#define SLEW_SYSDIV 0x0f
+#define SLEW_SYSDIV_SHIFT 20
+
+/* Clock Poll Timeout */
+#define LOCK_TIMEOUT_US USEC_PER_MSEC
+
+/* SoC specific clock needed during SPLL clock rate switch */
+static struct clk_hw *pic32_sclk_hw;
+
+/* add instruction pipeline delay while CPU clock is in-transition. */
+#define cpu_nop5() \
+do { \
+ __asm__ __volatile__("nop"); \
+ __asm__ __volatile__("nop"); \
+ __asm__ __volatile__("nop"); \
+ __asm__ __volatile__("nop"); \
+ __asm__ __volatile__("nop"); \
+} while (0)
+
+/* Perpheral bus clocks */
+struct pic32_periph_clk {
+ struct clk_hw hw;
+ void __iomem *ctrl_reg;
+ struct pic32_clk_common *core;
+};
+
+#define clkhw_to_pbclk(_hw) container_of(_hw, struct pic32_periph_clk, hw)
+
+static int pbclk_is_enabled(struct clk_hw *hw)
+{
+ struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+
+ return readl(pb->ctrl_reg) & PB_DIV_ENABLE;
+}
+
+static int pbclk_enable(struct clk_hw *hw)
+{
+ struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+
+ writel(PB_DIV_ENABLE, PIC32_SET(pb->ctrl_reg));
+ return 0;
+}
+
+static void pbclk_disable(struct clk_hw *hw)
+{
+ struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+
+ writel(PB_DIV_ENABLE, PIC32_CLR(pb->ctrl_reg));
+}
+
+static unsigned long calc_best_divided_rate(unsigned long rate,
+ unsigned long parent_rate,
+ u32 divider_max,
+ u32 divider_min)
+{
+ unsigned long divided_rate, divided_rate_down, best_rate;
+ unsigned long div, div_up;
+
+ /* eq. clk_rate = parent_rate / divider.
+ *
+ * Find best divider to produce closest of target divided rate.
+ */
+ div = parent_rate / rate;
+ div = clamp_val(div, divider_min, divider_max);
+ div_up = clamp_val(div + 1, divider_min, divider_max);
+
+ divided_rate = parent_rate / div;
+ divided_rate_down = parent_rate / div_up;
+ if (abs(rate - divided_rate_down) < abs(rate - divided_rate))
+ best_rate = divided_rate_down;
+ else
+ best_rate = divided_rate;
+
+ return best_rate;
+}
+
+static inline u32 pbclk_read_pbdiv(struct pic32_periph_clk *pb)
+{
+ return ((readl(pb->ctrl_reg) >> PB_DIV_SHIFT) & PB_DIV_MASK) + 1;
+}
+
+static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+
+ return parent_rate / pbclk_read_pbdiv(pb);
+}
+
+static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return calc_best_divided_rate(rate, *parent_rate,
+ PB_DIV_MAX, PB_DIV_MIN);
+}
+
+static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pic32_periph_clk *pb = clkhw_to_pbclk(hw);
+ unsigned long flags;
+ u32 v, div;
+ int err;
+
+ /* check & wait for DIV_READY */
+ err = readl_poll_timeout(pb->ctrl_reg, v, v & PB_DIV_READY,
+ 1, LOCK_TIMEOUT_US);
+ if (err)
+ return err;
+
+ /* calculate clkdiv and best rate */
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ spin_lock_irqsave(&pb->core->reg_lock, flags);
+
+ /* apply new div */
+ v = readl(pb->ctrl_reg);
+ v &= ~PB_DIV_MASK;
+ v |= (div - 1);
+
+ pic32_syskey_unlock();
+
+ writel(v, pb->ctrl_reg);
+
+ spin_unlock_irqrestore(&pb->core->reg_lock, flags);
+
+ /* wait again, for pbdivready */
+ err = readl_poll_timeout_atomic(pb->ctrl_reg, v, v & PB_DIV_READY,
+ 1, LOCK_TIMEOUT_US);
+ if (err)
+ return err;
+
+ /* confirm that new div is applied correctly */
+ return (pbclk_read_pbdiv(pb) == div) ? 0 : -EBUSY;
+}
+
+const struct clk_ops pic32_pbclk_ops = {
+ .enable = pbclk_enable,
+ .disable = pbclk_disable,
+ .is_enabled = pbclk_is_enabled,
+ .recalc_rate = pbclk_recalc_rate,
+ .round_rate = pbclk_round_rate,
+ .set_rate = pbclk_set_rate,
+};
+
+struct clk *pic32_periph_clk_register(const struct pic32_periph_clk_data *desc,
+ struct pic32_clk_common *core)
+{
+ struct pic32_periph_clk *pbclk;
+ struct clk *clk;
+
+ pbclk = devm_kzalloc(core->dev, sizeof(*pbclk), GFP_KERNEL);
+ if (!pbclk)
+ return ERR_PTR(-ENOMEM);
+
+ pbclk->hw.init = &desc->init_data;
+ pbclk->core = core;
+ pbclk->ctrl_reg = desc->ctrl_reg + core->iobase;
+
+ clk = devm_clk_register(core->dev, &pbclk->hw);
+ if (IS_ERR(clk)) {
+ dev_err(core->dev, "%s: clk_register() failed\n", __func__);
+ devm_kfree(core->dev, pbclk);
+ }
+
+ return clk;
+}
+
+/* Reference oscillator operations */
+struct pic32_ref_osc {
+ struct clk_hw hw;
+ void __iomem *ctrl_reg;
+ const u32 *parent_map;
+ struct pic32_clk_common *core;
+};
+
+#define clkhw_to_refosc(_hw) container_of(_hw, struct pic32_ref_osc, hw)
+
+static int roclk_is_enabled(struct clk_hw *hw)
+{
+ struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+
+ return readl(refo->ctrl_reg) & REFO_ON;
+}
+
+static int roclk_enable(struct clk_hw *hw)
+{
+ struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+
+ writel(REFO_ON | REFO_OE, PIC32_SET(refo->ctrl_reg));
+ return 0;
+}
+
+static void roclk_disable(struct clk_hw *hw)
+{
+ struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+
+ writel(REFO_ON | REFO_OE, PIC32_CLR(refo->ctrl_reg));
+}
+
+static void roclk_init(struct clk_hw *hw)
+{
+ /* initialize clock in disabled state */
+ roclk_disable(hw);
+}
+
+static u8 roclk_get_parent(struct clk_hw *hw)
+{
+ struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+ u32 v, i;
+
+ v = (readl(refo->ctrl_reg) >> REFO_SEL_SHIFT) & REFO_SEL_MASK;
+
+ if (!refo->parent_map)
+ return v;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++)
+ if (refo->parent_map[i] == v)
+ return i;
+
+ return -EINVAL;
+}
+
+static unsigned long roclk_calc_rate(unsigned long parent_rate,
+ u32 rodiv, u32 rotrim)
+{
+ u64 rate64;
+
+ /* fout = fin / [2 * {div + (trim / 512)}]
+ * = fin * 512 / [1024 * div + 2 * trim]
+ * = fin * 256 / (512 * div + trim)
+ * = (fin << 8) / ((div << 9) + trim)
+ */
+ if (rotrim) {
+ rodiv = (rodiv << 9) + rotrim;
+ rate64 = parent_rate;
+ rate64 <<= 8;
+ do_div(rate64, rodiv);
+ } else if (rodiv) {
+ rate64 = parent_rate / (rodiv << 1);
+ } else {
+ rate64 = parent_rate;
+ }
+ return rate64;
+}
+
+static void roclk_calc_div_trim(unsigned long rate,
+ unsigned long parent_rate,
+ u32 *rodiv_p, u32 *rotrim_p)
+{
+ u32 div, rotrim, rodiv;
+ u64 frac;
+
+ /* Find integer approximation of floating-point arithmetic.
+ * fout = fin / [2 * {rodiv + (rotrim / 512)}] ... (1)
+ * i.e. fout = fin / 2 * DIV
+ * whereas DIV = rodiv + (rotrim / 512)
+ *
+ * Since kernel does not perform floating-point arithmatic so
+ * (rotrim/512) will be zero. And DIV & rodiv will result same.
+ *
+ * ie. fout = (fin * 256) / [(512 * rodiv) + rotrim] ... from (1)
+ * ie. rotrim = ((fin * 256) / fout) - (512 * DIV)
+ */
+ if (parent_rate <= rate) {
+ div = 0;
+ frac = 0;
+ rodiv = 0;
+ rotrim = 0;
+ } else {
+ div = parent_rate / (rate << 1);
+ frac = parent_rate;
+ frac <<= 8;
+ do_div(frac, rate);
+ frac -= (u64)(div << 9);
+
+ rodiv = (div > REFO_DIV_MASK) ? REFO_DIV_MASK : div;
+ rotrim = (frac >= REFO_TRIM_MAX) ? REFO_TRIM_MAX : frac;
+ }
+
+ if (rodiv_p)
+ *rodiv_p = rodiv;
+
+ if (rotrim_p)
+ *rotrim_p = rotrim;
+}
+
+static unsigned long roclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+ u32 v, rodiv, rotrim;
+
+ /* get rodiv */
+ v = readl(refo->ctrl_reg);
+ rodiv = (v >> REFO_DIV_SHIFT) & REFO_DIV_MASK;
+
+ /* get trim */
+ v = readl(refo->ctrl_reg + REFO_TRIM_REG);
+ rotrim = (v >> REFO_TRIM_SHIFT) & REFO_TRIM_MASK;
+
+ return roclk_calc_rate(parent_rate, rodiv, rotrim);
+}
+
+static long roclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 rotrim, rodiv;
+
+ /* calculate dividers for new rate */
+ roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim);
+
+ /* caclulate new rate (rounding) based on new rodiv & rotrim */
+ return roclk_calc_rate(*parent_rate, rodiv, rotrim);
+}
+
+static int roclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent_clk, *best_parent_clk = NULL;
+ unsigned int i, delta, best_delta = -1;
+ unsigned long parent_rate, best_parent_rate = 0;
+ unsigned long best = 0, nearest_rate;
+
+ /* find a parent which can generate nearest clkrate >= rate */
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ /* get parent */
+ parent_clk = clk_hw_get_parent_by_index(hw, i);
+ if (!parent_clk)
+ continue;
+
+ /* skip if parent runs slower than target rate */
+ parent_rate = clk_hw_get_rate(parent_clk);
+ if (req->rate > parent_rate)
+ continue;
+
+ nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
+ delta = abs(nearest_rate - req->rate);
+ if ((nearest_rate >= req->rate) && (delta < best_delta)) {
+ best_parent_clk = parent_clk;
+ best_parent_rate = parent_rate;
+ best = nearest_rate;
+ best_delta = delta;
+
+ if (delta == 0)
+ break;
+ }
+ }
+
+ /* if no match found, retain old rate */
+ if (!best_parent_clk) {
+ pr_err("%s:%s, no parent found for rate %lu.\n",
+ __func__, clk_hw_get_name(hw), req->rate);
+ return clk_hw_get_rate(hw);
+ }
+
+ pr_debug("%s,rate %lu, best_parent(%s, %lu), best %lu, delta %d\n",
+ clk_hw_get_name(hw), req->rate,
+ clk_hw_get_name(best_parent_clk), best_parent_rate,
+ best, best_delta);
+
+ if (req->best_parent_rate)
+ req->best_parent_rate = best_parent_rate;
+
+ if (req->best_parent_hw)
+ req->best_parent_hw = best_parent_clk;
+
+ return best;
+}
+
+static int roclk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+ unsigned long flags;
+ u32 v;
+ int err;
+
+ if (refo->parent_map)
+ index = refo->parent_map[index];
+
+ /* wait until ACTIVE bit is zero or timeout */
+ err = readl_poll_timeout(refo->ctrl_reg, v, !(v & REFO_ACTIVE),
+ 1, LOCK_TIMEOUT_US);
+ if (err) {
+ pr_err("%s: poll failed, clk active\n", clk_hw_get_name(hw));
+ return err;
+ }
+
+ spin_lock_irqsave(&refo->core->reg_lock, flags);
+
+ pic32_syskey_unlock();
+
+ /* calculate & apply new */
+ v = readl(refo->ctrl_reg);
+ v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
+ v |= index << REFO_SEL_SHIFT;
+
+ writel(v, refo->ctrl_reg);
+
+ spin_unlock_irqrestore(&refo->core->reg_lock, flags);
+
+ return 0;
+}
+
+static int roclk_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u8 index)
+{
+ struct pic32_ref_osc *refo = clkhw_to_refosc(hw);
+ unsigned long flags;
+ u32 trim, rodiv, v;
+ int err;
+
+ /* calculate new rodiv & rotrim for new rate */
+ roclk_calc_div_trim(rate, parent_rate, &rodiv, &trim);
+
+ pr_debug("parent_rate = %lu, rate = %lu, div = %d, trim = %d\n",
+ parent_rate, rate, rodiv, trim);
+
+ /* wait till source change is active */
+ err = readl_poll_timeout(refo->ctrl_reg, v,
+ !(v & (REFO_ACTIVE | REFO_DIVSW_EN)),
+ 1, LOCK_TIMEOUT_US);
+ if (err) {
+ pr_err("%s: poll timedout, clock is still active\n", __func__);
+ return err;
+ }
+
+ spin_lock_irqsave(&refo->core->reg_lock, flags);
+ v = readl(refo->ctrl_reg);
+
+ pic32_syskey_unlock();
+
+ /* apply parent, if required */
+ if (refo->parent_map)
+ index = refo->parent_map[index];
+
+ v &= ~(REFO_SEL_MASK << REFO_SEL_SHIFT);
+ v |= index << REFO_SEL_SHIFT;
+
+ /* apply RODIV */
+ v &= ~(REFO_DIV_MASK << REFO_DIV_SHIFT);
+ v |= rodiv << REFO_DIV_SHIFT;
+ writel(v, refo->ctrl_reg);
+
+ /* apply ROTRIM */
+ v = readl(refo->ctrl_reg + REFO_TRIM_REG);
+ v &= ~(REFO_TRIM_MASK << REFO_TRIM_SHIFT);
+ v |= trim << REFO_TRIM_SHIFT;
+ writel(v, refo->ctrl_reg + REFO_TRIM_REG);
+
+ /* enable & activate divider switching */
+ writel(REFO_ON | REFO_DIVSW_EN, PIC32_SET(refo->ctrl_reg));
+
+ /* wait till divswen is in-progress */
+ err = readl_poll_timeout_atomic(refo->ctrl_reg, v, !(v & REFO_DIVSW_EN),
+ 1, LOCK_TIMEOUT_US);
+ /* leave the clk gated as it was */
+ writel(REFO_ON, PIC32_CLR(refo->ctrl_reg));
+
+ spin_unlock_irqrestore(&refo->core->reg_lock, flags);
+
+ return err;
+}
+
+static int roclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u8 index = roclk_get_parent(hw);
+
+ return roclk_set_rate_and_parent(hw, rate, parent_rate, index);
+}
+
+const struct clk_ops pic32_roclk_ops = {
+ .enable = roclk_enable,
+ .disable = roclk_disable,
+ .is_enabled = roclk_is_enabled,
+ .get_parent = roclk_get_parent,
+ .set_parent = roclk_set_parent,
+ .determine_rate = roclk_determine_rate,
+ .recalc_rate = roclk_recalc_rate,
+ .set_rate_and_parent = roclk_set_rate_and_parent,
+ .set_rate = roclk_set_rate,
+ .init = roclk_init,
+};
+
+struct clk *pic32_refo_clk_register(const struct pic32_ref_osc_data *data,
+ struct pic32_clk_common *core)
+{
+ struct pic32_ref_osc *refo;
+ struct clk *clk;
+
+ refo = devm_kzalloc(core->dev, sizeof(*refo), GFP_KERNEL);
+ if (!refo)
+ return ERR_PTR(-ENOMEM);
+
+ refo->core = core;
+ refo->hw.init = &data->init_data;
+ refo->ctrl_reg = data->ctrl_reg + core->iobase;
+ refo->parent_map = data->parent_map;
+
+ clk = devm_clk_register(core->dev, &refo->hw);
+ if (IS_ERR(clk))
+ dev_err(core->dev, "%s: clk_register() failed\n", __func__);
+
+ return clk;
+}
+
+struct pic32_sys_pll {
+ struct clk_hw hw;
+ void __iomem *ctrl_reg;
+ void __iomem *status_reg;
+ u32 lock_mask;
+ u32 idiv; /* PLL iclk divider, treated fixed */
+ struct pic32_clk_common *core;
+};
+
+#define clkhw_to_spll(_hw) container_of(_hw, struct pic32_sys_pll, hw)
+
+static inline u32 spll_odiv_to_divider(u32 odiv)
+{
+ odiv = clamp_val(odiv, PLL_ODIV_MIN, PLL_ODIV_MAX);
+
+ return 1 << odiv;
+}
+
+static unsigned long spll_calc_mult_div(struct pic32_sys_pll *pll,
+ unsigned long rate,
+ unsigned long parent_rate,
+ u32 *mult_p, u32 *odiv_p)
+{
+ u32 mul, div, best_mul = 1, best_div = 1;
+ unsigned long new_rate, best_rate = rate;
+ unsigned int best_delta = -1, delta, match_found = 0;
+ u64 rate64;
+
+ parent_rate /= pll->idiv;
+
+ for (mul = 1; mul <= PLL_MULT_MAX; mul++) {
+ for (div = PLL_ODIV_MIN; div <= PLL_ODIV_MAX; div++) {
+ rate64 = parent_rate;
+ rate64 *= mul;
+ do_div(rate64, 1 << div);
+ new_rate = rate64;
+ delta = abs(rate - new_rate);
+ if ((new_rate >= rate) && (delta < best_delta)) {
+ best_delta = delta;
+ best_rate = new_rate;
+ best_mul = mul;
+ best_div = div;
+ match_found = 1;
+ }
+ }
+ }
+
+ if (!match_found) {
+ pr_warn("spll: no match found\n");
+ return 0;
+ }
+
+ pr_debug("rate %lu, par_rate %lu/mult %u, div %u, best_rate %lu\n",
+ rate, parent_rate, best_mul, best_div, best_rate);
+
+ if (mult_p)
+ *mult_p = best_mul - 1;
+
+ if (odiv_p)
+ *odiv_p = best_div;
+
+ return best_rate;
+}
+
+static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct pic32_sys_pll *pll = clkhw_to_spll(hw);
+ unsigned long pll_in_rate;
+ u32 mult, odiv, div, v;
+ u64 rate64;
+
+ v = readl(pll->ctrl_reg);
+ odiv = ((v >> PLL_ODIV_SHIFT) & PLL_ODIV_MASK);
+ mult = ((v >> PLL_MULT_SHIFT) & PLL_MULT_MASK) + 1;
+ div = spll_odiv_to_divider(odiv);
+
+ /* pll_in_rate = parent_rate / idiv
+ * pll_out_rate = pll_in_rate * mult / div;
+ */
+ pll_in_rate = parent_rate / pll->idiv;
+ rate64 = pll_in_rate;
+ rate64 *= mult;
+ do_div(rate64, div);
+
+ return rate64;
+}
+
+static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct pic32_sys_pll *pll = clkhw_to_spll(hw);
+
+ return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL);
+}
+
+static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pic32_sys_pll *pll = clkhw_to_spll(hw);
+ unsigned long ret, flags;
+ u32 mult, odiv, v;
+ int err;
+
+ ret = spll_calc_mult_div(pll, rate, parent_rate, &mult, &odiv);
+ if (!ret)
+ return -EINVAL;
+
+ /*
+ * We can't change SPLL counters when it is in-active use
+ * by SYSCLK. So check before applying new counters/rate.
+ */
+
+ /* Is spll_clk active parent of sys_clk ? */
+ if (unlikely(clk_hw_get_parent(pic32_sclk_hw) == hw)) {
+ pr_err("%s: failed, clk in-use\n", __func__);
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&pll->core->reg_lock, flags);
+
+ /* apply new multiplier & divisor */
+ v = readl(pll->ctrl_reg);
+ v &= ~(PLL_MULT_MASK << PLL_MULT_SHIFT);
+ v &= ~(PLL_ODIV_MASK << PLL_ODIV_SHIFT);
+ v |= (mult << PLL_MULT_SHIFT) | (odiv << PLL_ODIV_SHIFT);
+
+ /* sys unlock before write */
+ pic32_syskey_unlock();
+
+ writel(v, pll->ctrl_reg);
+ cpu_relax();
+
+ /* insert few nops (5-stage) to ensure CPU does not hang */
+ cpu_nop5();
+ cpu_nop5();
+
+ /* Wait until PLL is locked (maximum 100 usecs). */
+ err = readl_poll_timeout_atomic(pll->status_reg, v,
+ v & pll->lock_mask, 1, 100);
+ spin_unlock_irqrestore(&pll->core->reg_lock, flags);
+
+ return err;
+}
+
+/* SPLL clock operation */
+const struct clk_ops pic32_spll_ops = {
+ .recalc_rate = spll_clk_recalc_rate,
+ .round_rate = spll_clk_round_rate,
+ .set_rate = spll_clk_set_rate,
+};
+
+struct clk *pic32_spll_clk_register(const struct pic32_sys_pll_data *data,
+ struct pic32_clk_common *core)
+{
+ struct pic32_sys_pll *spll;
+ struct clk *clk;
+
+ spll = devm_kzalloc(core->dev, sizeof(*spll), GFP_KERNEL);
+ if (!spll)
+ return ERR_PTR(-ENOMEM);
+
+ spll->core = core;
+ spll->hw.init = &data->init_data;
+ spll->ctrl_reg = data->ctrl_reg + core->iobase;
+ spll->status_reg = data->status_reg + core->iobase;
+ spll->lock_mask = data->lock_mask;
+
+ /* cache PLL idiv; PLL driver uses it as constant.*/
+ spll->idiv = (readl(spll->ctrl_reg) >> PLL_IDIV_SHIFT) & PLL_IDIV_MASK;
+ spll->idiv += 1;
+
+ clk = devm_clk_register(core->dev, &spll->hw);
+ if (IS_ERR(clk))
+ dev_err(core->dev, "sys_pll: clk_register() failed\n");
+
+ return clk;
+}
+
+/* System mux clock(aka SCLK) */
+
+struct pic32_sys_clk {
+ struct clk_hw hw;
+ void __iomem *mux_reg;
+ void __iomem *slew_reg;
+ u32 slew_div;
+ const u32 *parent_map;
+ struct pic32_clk_common *core;
+};
+
+#define clkhw_to_sys_clk(_hw) container_of(_hw, struct pic32_sys_clk, hw)
+
+static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+ u32 div;
+
+ div = (readl(sclk->slew_reg) >> SLEW_SYSDIV_SHIFT) & SLEW_SYSDIV;
+ div += 1; /* sys-div to divider */
+
+ return parent_rate / div;
+}
+
+static long sclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1);
+}
+
+static int sclk_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+ unsigned long flags;
+ u32 v, div;
+ int err;
+
+ div = parent_rate / rate;
+
+ spin_lock_irqsave(&sclk->core->reg_lock, flags);
+
+ /* apply new div */
+ v = readl(sclk->slew_reg);
+ v &= ~(SLEW_SYSDIV << SLEW_SYSDIV_SHIFT);
+ v |= (div - 1) << SLEW_SYSDIV_SHIFT;
+
+ pic32_syskey_unlock();
+
+ writel(v, sclk->slew_reg);
+
+ /* wait until BUSY is cleared */
+ err = readl_poll_timeout_atomic(sclk->slew_reg, v,
+ !(v & SLEW_BUSY), 1, LOCK_TIMEOUT_US);
+
+ spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
+
+ return err;
+}
+
+static u8 sclk_get_parent(struct clk_hw *hw)
+{
+ struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+ u32 i, v;
+
+ v = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
+
+ if (!sclk->parent_map)
+ return v;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++)
+ if (sclk->parent_map[i] == v)
+ return i;
+ return -EINVAL;
+}
+
+static int sclk_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+ unsigned long flags;
+ u32 nosc, cosc, v;
+ int err;
+
+ spin_lock_irqsave(&sclk->core->reg_lock, flags);
+
+ /* find new_osc */
+ nosc = sclk->parent_map ? sclk->parent_map[index] : index;
+
+ /* set new parent */
+ v = readl(sclk->mux_reg);
+ v &= ~(OSC_NEW_MASK << OSC_NEW_SHIFT);
+ v |= nosc << OSC_NEW_SHIFT;
+
+ pic32_syskey_unlock();
+
+ writel(v, sclk->mux_reg);
+
+ /* initate switch */
+ writel(OSC_SWEN, PIC32_SET(sclk->mux_reg));
+ cpu_relax();
+
+ /* add nop to flush pipeline (as cpu_clk is in-flux) */
+ cpu_nop5();
+
+ /* wait for SWEN bit to clear */
+ err = readl_poll_timeout_atomic(sclk->slew_reg, v,
+ !(v & OSC_SWEN), 1, LOCK_TIMEOUT_US);
+
+ spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
+
+ /*
+ * SCLK clock-switching logic might reject a clock switching request
+ * if pre-requisites (like new clk_src not present or unstable) are
+ * not met.
+ * So confirm before claiming success.
+ */
+ cosc = (readl(sclk->mux_reg) >> OSC_CUR_SHIFT) & OSC_CUR_MASK;
+ if (cosc != nosc) {
+ pr_err("%s: err, failed to set_parent() to %d, current %d\n",
+ clk_hw_get_name(hw), nosc, cosc);
+ err = -EBUSY;
+ }
+
+ return err;
+}
+
+static void sclk_init(struct clk_hw *hw)
+{
+ struct pic32_sys_clk *sclk = clkhw_to_sys_clk(hw);
+ unsigned long flags;
+ u32 v;
+
+ /* Maintain reference to this clk, required in spll_clk_set_rate() */
+ pic32_sclk_hw = hw;
+
+ /* apply slew divider on both up and down scaling */
+ if (sclk->slew_div) {
+ spin_lock_irqsave(&sclk->core->reg_lock, flags);
+ v = readl(sclk->slew_reg);
+ v &= ~(SLEW_DIV << SLEW_DIV_SHIFT);
+ v |= sclk->slew_div << SLEW_DIV_SHIFT;
+ v |= SLEW_DOWNEN | SLEW_UPEN;
+ writel(v, sclk->slew_reg);
+ spin_unlock_irqrestore(&sclk->core->reg_lock, flags);
+ }
+}
+
+/* sclk with post-divider */
+const struct clk_ops pic32_sclk_ops = {
+ .get_parent = sclk_get_parent,
+ .set_parent = sclk_set_parent,
+ .round_rate = sclk_round_rate,
+ .set_rate = sclk_set_rate,
+ .recalc_rate = sclk_get_rate,
+ .init = sclk_init,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+/* sclk with no slew and no post-divider */
+const struct clk_ops pic32_sclk_no_div_ops = {
+ .get_parent = sclk_get_parent,
+ .set_parent = sclk_set_parent,
+ .init = sclk_init,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+struct clk *pic32_sys_clk_register(const struct pic32_sys_clk_data *data,
+ struct pic32_clk_common *core)
+{
+ struct pic32_sys_clk *sclk;
+ struct clk *clk;
+
+ sclk = devm_kzalloc(core->dev, sizeof(*sclk), GFP_KERNEL);
+ if (!sclk)
+ return ERR_PTR(-ENOMEM);
+
+ sclk->core = core;
+ sclk->hw.init = &data->init_data;
+ sclk->mux_reg = data->mux_reg + core->iobase;
+ sclk->slew_reg = data->slew_reg + core->iobase;
+ sclk->slew_div = data->slew_div;
+ sclk->parent_map = data->parent_map;
+
+ clk = devm_clk_register(core->dev, &sclk->hw);
+ if (IS_ERR(clk))
+ dev_err(core->dev, "%s: clk register failed\n", __func__);
+
+ return clk;
+}
+
+/* secondary oscillator */
+struct pic32_sec_osc {
+ struct clk_hw hw;
+ void __iomem *enable_reg;
+ void __iomem *status_reg;
+ u32 enable_mask;
+ u32 status_mask;
+ unsigned long fixed_rate;
+ struct pic32_clk_common *core;
+};
+
+#define clkhw_to_sosc(_hw) container_of(_hw, struct pic32_sec_osc, hw)
+static int sosc_clk_enable(struct clk_hw *hw)
+{
+ struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
+ u32 v;
+
+ /* enable SOSC */
+ pic32_syskey_unlock();
+ writel(sosc->enable_mask, PIC32_SET(sosc->enable_reg));
+
+ /* wait till warm-up period expires or ready-status is updated */
+ return readl_poll_timeout_atomic(sosc->status_reg, v,
+ v & sosc->status_mask, 1, 100);
+}
+
+static void sosc_clk_disable(struct clk_hw *hw)
+{
+ struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
+
+ pic32_syskey_unlock();
+ writel(sosc->enable_mask, PIC32_CLR(sosc->enable_reg));
+}
+
+static int sosc_clk_is_enabled(struct clk_hw *hw)
+{
+ struct pic32_sec_osc *sosc = clkhw_to_sosc(hw);
+ u32 enabled, ready;
+
+ /* check enabled and ready status */
+ enabled = readl(sosc->enable_reg) & sosc->enable_mask;
+ ready = readl(sosc->status_reg) & sosc->status_mask;
+
+ return enabled && ready;
+}
+
+static unsigned long sosc_clk_calc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ return clkhw_to_sosc(hw)->fixed_rate;
+}
+
+const struct clk_ops pic32_sosc_ops = {
+ .enable = sosc_clk_enable,
+ .disable = sosc_clk_disable,
+ .is_enabled = sosc_clk_is_enabled,
+ .recalc_rate = sosc_clk_calc_rate,
+};
+
+struct clk *pic32_sosc_clk_register(const struct pic32_sec_osc_data *data,
+ struct pic32_clk_common *core)
+{
+ struct pic32_sec_osc *sosc;
+
+ sosc = devm_kzalloc(core->dev, sizeof(*sosc), GFP_KERNEL);
+ if (!sosc)
+ return ERR_PTR(-ENOMEM);
+
+ sosc->core = core;
+ sosc->hw.init = &data->init_data;
+ sosc->fixed_rate = data->fixed_rate;
+ sosc->enable_mask = data->enable_mask;
+ sosc->status_mask = data->status_mask;
+ sosc->enable_reg = data->enable_reg + core->iobase;
+ sosc->status_reg = data->status_reg + core->iobase;
+
+ return devm_clk_register(core->dev, &sosc->hw);
+}
diff --git a/drivers/clk/microchip/clk-core.h b/drivers/clk/microchip/clk-core.h
new file mode 100644
index 000000000..856664277
--- /dev/null
+++ b/drivers/clk/microchip/clk-core.h
@@ -0,0 +1,84 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef __MICROCHIP_CLK_PIC32_H_
+#define __MICROCHIP_CLK_PIC32_H_
+
+#include <linux/clk-provider.h>
+
+/* PIC32 clock data */
+struct pic32_clk_common {
+ struct device *dev;
+ void __iomem *iobase;
+ spinlock_t reg_lock; /* clock lock */
+};
+
+/* System PLL clock */
+struct pic32_sys_pll_data {
+ struct clk_init_data init_data;
+ const u32 ctrl_reg;
+ const u32 status_reg;
+ const u32 lock_mask;
+};
+
+/* System clock */
+struct pic32_sys_clk_data {
+ struct clk_init_data init_data;
+ const u32 mux_reg;
+ const u32 slew_reg;
+ const u32 *parent_map;
+ const u32 slew_div;
+};
+
+/* Reference Oscillator clock */
+struct pic32_ref_osc_data {
+ struct clk_init_data init_data;
+ const u32 ctrl_reg;
+ const u32 *parent_map;
+};
+
+/* Peripheral Bus clock */
+struct pic32_periph_clk_data {
+ struct clk_init_data init_data;
+ const u32 ctrl_reg;
+};
+
+/* External Secondary Oscillator clock */
+struct pic32_sec_osc_data {
+ struct clk_init_data init_data;
+ const u32 enable_reg;
+ const u32 status_reg;
+ const u32 enable_mask;
+ const u32 status_mask;
+ const unsigned long fixed_rate;
+};
+
+extern const struct clk_ops pic32_pbclk_ops;
+extern const struct clk_ops pic32_sclk_ops;
+extern const struct clk_ops pic32_sclk_no_div_ops;
+extern const struct clk_ops pic32_spll_ops;
+extern const struct clk_ops pic32_roclk_ops;
+extern const struct clk_ops pic32_sosc_ops;
+
+struct clk *pic32_periph_clk_register(const struct pic32_periph_clk_data *data,
+ struct pic32_clk_common *core);
+struct clk *pic32_refo_clk_register(const struct pic32_ref_osc_data *data,
+ struct pic32_clk_common *core);
+struct clk *pic32_sys_clk_register(const struct pic32_sys_clk_data *data,
+ struct pic32_clk_common *core);
+struct clk *pic32_spll_clk_register(const struct pic32_sys_pll_data *data,
+ struct pic32_clk_common *core);
+struct clk *pic32_sosc_clk_register(const struct pic32_sec_osc_data *data,
+ struct pic32_clk_common *core);
+
+#endif /* __MICROCHIP_CLK_PIC32_H_*/
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c
new file mode 100644
index 000000000..51f543804
--- /dev/null
+++ b/drivers/clk/microchip/clk-pic32mzda.c
@@ -0,0 +1,275 @@
+/*
+ * Purna Chandra Mandal,<purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#include <dt-bindings/clock/microchip,pic32-clock.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <asm/traps.h>
+
+#include "clk-core.h"
+
+/* FRC Postscaler */
+#define OSC_FRCDIV_MASK 0x07
+#define OSC_FRCDIV_SHIFT 24
+
+/* SPLL fields */
+#define PLL_ICLK_MASK 0x01
+#define PLL_ICLK_SHIFT 7
+
+#define DECLARE_PERIPHERAL_CLOCK(__clk_name, __reg, __flags) \
+ { \
+ .ctrl_reg = (__reg), \
+ .init_data = { \
+ .name = (__clk_name), \
+ .parent_names = (const char *[]) { \
+ "sys_clk" \
+ }, \
+ .num_parents = 1, \
+ .ops = &pic32_pbclk_ops, \
+ .flags = (__flags), \
+ }, \
+ }
+
+#define DECLARE_REFO_CLOCK(__clkid, __reg) \
+ { \
+ .ctrl_reg = (__reg), \
+ .init_data = { \
+ .name = "refo" #__clkid "_clk", \
+ .parent_names = (const char *[]) { \
+ "sys_clk", "pb1_clk", "posc_clk", \
+ "frc_clk", "lprc_clk", "sosc_clk", \
+ "sys_pll", "refi" #__clkid "_clk", \
+ "bfrc_clk", \
+ }, \
+ .num_parents = 9, \
+ .flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE,\
+ .ops = &pic32_roclk_ops, \
+ }, \
+ .parent_map = (const u32[]) { \
+ 0, 1, 2, 3, 4, 5, 7, 8, 9 \
+ }, \
+ }
+
+static const struct pic32_ref_osc_data ref_clks[] = {
+ DECLARE_REFO_CLOCK(1, 0x80),
+ DECLARE_REFO_CLOCK(2, 0xa0),
+ DECLARE_REFO_CLOCK(3, 0xc0),
+ DECLARE_REFO_CLOCK(4, 0xe0),
+ DECLARE_REFO_CLOCK(5, 0x100),
+};
+
+static const struct pic32_periph_clk_data periph_clocks[] = {
+ DECLARE_PERIPHERAL_CLOCK("pb1_clk", 0x140, 0),
+ DECLARE_PERIPHERAL_CLOCK("pb2_clk", 0x150, CLK_IGNORE_UNUSED),
+ DECLARE_PERIPHERAL_CLOCK("pb3_clk", 0x160, 0),
+ DECLARE_PERIPHERAL_CLOCK("pb4_clk", 0x170, 0),
+ DECLARE_PERIPHERAL_CLOCK("pb5_clk", 0x180, 0),
+ DECLARE_PERIPHERAL_CLOCK("pb6_clk", 0x190, 0),
+ DECLARE_PERIPHERAL_CLOCK("cpu_clk", 0x1a0, CLK_IGNORE_UNUSED),
+};
+
+static const struct pic32_sys_clk_data sys_mux_clk = {
+ .slew_reg = 0x1c0,
+ .slew_div = 2, /* step of div_4 -> div_2 -> no_div */
+ .init_data = {
+ .name = "sys_clk",
+ .parent_names = (const char *[]) {
+ "frcdiv_clk", "sys_pll", "posc_clk",
+ "sosc_clk", "lprc_clk", "frcdiv_clk",
+ },
+ .num_parents = 6,
+ .ops = &pic32_sclk_ops,
+ },
+ .parent_map = (const u32[]) {
+ 0, 1, 2, 4, 5, 7,
+ },
+};
+
+static const struct pic32_sys_pll_data sys_pll = {
+ .ctrl_reg = 0x020,
+ .status_reg = 0x1d0,
+ .lock_mask = BIT(7),
+ .init_data = {
+ .name = "sys_pll",
+ .parent_names = (const char *[]) {
+ "spll_mux_clk"
+ },
+ .num_parents = 1,
+ .ops = &pic32_spll_ops,
+ },
+};
+
+static const struct pic32_sec_osc_data sosc_clk = {
+ .status_reg = 0x1d0,
+ .enable_mask = BIT(1),
+ .status_mask = BIT(4),
+ .init_data = {
+ .name = "sosc_clk",
+ .parent_names = NULL,
+ .ops = &pic32_sosc_ops,
+ },
+};
+
+static int pic32mzda_critical_clks[] = {
+ PB2CLK, PB7CLK
+};
+
+/* PIC32MZDA clock data */
+struct pic32mzda_clk_data {
+ struct clk *clks[MAXCLKS];
+ struct pic32_clk_common core;
+ struct clk_onecell_data onecell_data;
+ struct notifier_block failsafe_notifier;
+};
+
+static int pic32_fscm_nmi(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct pic32mzda_clk_data *cd;
+
+ cd = container_of(nb, struct pic32mzda_clk_data, failsafe_notifier);
+
+ /* SYSCLK is now running from BFRCCLK. Report clock failure. */
+ if (readl(cd->core.iobase) & BIT(2))
+ pr_alert("pic32-clk: FSCM detected clk failure.\n");
+
+ /* TODO: detect reason of failure and recover accordingly */
+
+ return NOTIFY_OK;
+}
+
+static int pic32mzda_clk_probe(struct platform_device *pdev)
+{
+ const char *const pll_mux_parents[] = {"posc_clk", "frc_clk"};
+ struct device_node *np = pdev->dev.of_node;
+ struct pic32mzda_clk_data *cd;
+ struct pic32_clk_common *core;
+ struct clk *pll_mux_clk, *clk;
+ struct clk **clks;
+ int nr_clks, i, ret;
+
+ cd = devm_kzalloc(&pdev->dev, sizeof(*cd), GFP_KERNEL);
+ if (!cd)
+ return -ENOMEM;
+
+ core = &cd->core;
+ core->iobase = of_io_request_and_map(np, 0, of_node_full_name(np));
+ if (IS_ERR(core->iobase)) {
+ dev_err(&pdev->dev, "pic32-clk: failed to map registers\n");
+ return PTR_ERR(core->iobase);
+ }
+
+ spin_lock_init(&core->reg_lock);
+ core->dev = &pdev->dev;
+ clks = &cd->clks[0];
+
+ /* register fixed rate clocks */
+ clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL,
+ 0, 24000000);
+ clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL,
+ 0, 8000000);
+ clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL,
+ 0, 8000000);
+ clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL,
+ 0, 32000);
+ clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL,
+ 0, 24000000);
+ /* fixed rate (optional) clock */
+ if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) {
+ pr_info("pic32-clk: dt requests SOSC.\n");
+ clks[SOSCCLK] = pic32_sosc_clk_register(&sosc_clk, core);
+ }
+ /* divider clock */
+ clks[FRCDIVCLK] = clk_register_divider(&pdev->dev, "frcdiv_clk",
+ "frc_clk", 0,
+ core->iobase,
+ OSC_FRCDIV_SHIFT,
+ OSC_FRCDIV_MASK,
+ CLK_DIVIDER_POWER_OF_TWO,
+ &core->reg_lock);
+ /* PLL ICLK mux */
+ pll_mux_clk = clk_register_mux(&pdev->dev, "spll_mux_clk",
+ pll_mux_parents, 2, 0,
+ core->iobase + 0x020,
+ PLL_ICLK_SHIFT, 1, 0, &core->reg_lock);
+ if (IS_ERR(pll_mux_clk))
+ pr_err("spll_mux_clk: clk register failed\n");
+
+ /* PLL */
+ clks[PLLCLK] = pic32_spll_clk_register(&sys_pll, core);
+ /* SYSTEM clock */
+ clks[SCLK] = pic32_sys_clk_register(&sys_mux_clk, core);
+ /* Peripheral bus clocks */
+ for (nr_clks = PB1CLK, i = 0; nr_clks <= PB7CLK; i++, nr_clks++)
+ clks[nr_clks] = pic32_periph_clk_register(&periph_clocks[i],
+ core);
+ /* Reference oscillator clock */
+ for (nr_clks = REF1CLK, i = 0; nr_clks <= REF5CLK; i++, nr_clks++)
+ clks[nr_clks] = pic32_refo_clk_register(&ref_clks[i], core);
+
+ /* register clkdev */
+ for (i = 0; i < MAXCLKS; i++) {
+ if (IS_ERR(clks[i]))
+ continue;
+ clk_register_clkdev(clks[i], NULL, __clk_get_name(clks[i]));
+ }
+
+ /* register clock provider */
+ cd->onecell_data.clks = clks;
+ cd->onecell_data.clk_num = MAXCLKS;
+ ret = of_clk_add_provider(np, of_clk_src_onecell_get,
+ &cd->onecell_data);
+ if (ret)
+ return ret;
+
+ /* force enable critical clocks */
+ for (i = 0; i < ARRAY_SIZE(pic32mzda_critical_clks); i++) {
+ clk = clks[pic32mzda_critical_clks[i]];
+ if (clk_prepare_enable(clk))
+ dev_err(&pdev->dev, "clk_prepare_enable(%s) failed\n",
+ __clk_get_name(clk));
+ }
+
+ /* register NMI for failsafe clock monitor */
+ cd->failsafe_notifier.notifier_call = pic32_fscm_nmi;
+ return register_nmi_notifier(&cd->failsafe_notifier);
+}
+
+static const struct of_device_id pic32mzda_clk_match_table[] = {
+ { .compatible = "microchip,pic32mzda-clk", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pic32mzda_clk_match_table);
+
+static struct platform_driver pic32mzda_clk_driver = {
+ .probe = pic32mzda_clk_probe,
+ .driver = {
+ .name = "clk-pic32mzda",
+ .of_match_table = pic32mzda_clk_match_table,
+ },
+};
+
+static int __init microchip_pic32mzda_clk_init(void)
+{
+ return platform_driver_register(&pic32mzda_clk_driver);
+}
+core_initcall(microchip_pic32mzda_clk_init);
+
+MODULE_DESCRIPTION("Microchip PIC32MZDA Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:clk-pic32mzda");
diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c
index 38931dbd1..383f6a4f6 100644
--- a/drivers/clk/mmp/clk-mmp2.c
+++ b/drivers/clk/mmp/clk-mmp2.c
@@ -99,23 +99,19 @@ void __init mmp2_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
return;
}
- clk = clk_register_fixed_rate(NULL, "clk32", NULL, CLK_IS_ROOT, 3200);
+ clk = clk_register_fixed_rate(NULL, "clk32", NULL, 0, 3200);
clk_register_clkdev(clk, "clk32", NULL);
- vctcxo = clk_register_fixed_rate(NULL, "vctcxo", NULL, CLK_IS_ROOT,
- 26000000);
+ vctcxo = clk_register_fixed_rate(NULL, "vctcxo", NULL, 0, 26000000);
clk_register_clkdev(vctcxo, "vctcxo", NULL);
- clk = clk_register_fixed_rate(NULL, "pll1", NULL, CLK_IS_ROOT,
- 800000000);
+ clk = clk_register_fixed_rate(NULL, "pll1", NULL, 0, 800000000);
clk_register_clkdev(clk, "pll1", NULL);
- clk = clk_register_fixed_rate(NULL, "usb_pll", NULL, CLK_IS_ROOT,
- 480000000);
+ clk = clk_register_fixed_rate(NULL, "usb_pll", NULL, 0, 480000000);
clk_register_clkdev(clk, "usb_pll", NULL);
- clk = clk_register_fixed_rate(NULL, "pll2", NULL, CLK_IS_ROOT,
- 960000000);
+ clk = clk_register_fixed_rate(NULL, "pll2", NULL, 0, 960000000);
clk_register_clkdev(clk, "pll2", NULL);
clk = clk_register_fixed_factor(NULL, "pll1_2", "pll1",
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
index 251533d87..3a51fff1b 100644
--- a/drivers/clk/mmp/clk-of-mmp2.c
+++ b/drivers/clk/mmp/clk-of-mmp2.c
@@ -63,11 +63,11 @@ struct mmp2_clk_unit {
};
static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
- {MMP2_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
- {MMP2_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
- {MMP2_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 800000000},
- {MMP2_CLK_PLL2, "pll2", NULL, CLK_IS_ROOT, 960000000},
- {MMP2_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
+ {MMP2_CLK_CLK32, "clk32", NULL, 0, 32768},
+ {MMP2_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000},
+ {MMP2_CLK_PLL1, "pll1", NULL, 0, 800000000},
+ {MMP2_CLK_PLL2, "pll2", NULL, 0, 960000000},
+ {MMP2_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000},
};
static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
index 64eaf4141..87f2317b2 100644
--- a/drivers/clk/mmp/clk-of-pxa168.c
+++ b/drivers/clk/mmp/clk-of-pxa168.c
@@ -56,10 +56,10 @@ struct pxa168_clk_unit {
};
static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
- {PXA168_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
- {PXA168_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
- {PXA168_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
- {PXA168_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
+ {PXA168_CLK_CLK32, "clk32", NULL, 0, 32768},
+ {PXA168_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000},
+ {PXA168_CLK_PLL1, "pll1", NULL, 0, 624000000},
+ {PXA168_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000},
};
static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c
index 433a5ae1e..e478ff44e 100644
--- a/drivers/clk/mmp/clk-of-pxa1928.c
+++ b/drivers/clk/mmp/clk-of-pxa1928.c
@@ -34,12 +34,12 @@ struct pxa1928_clk_unit {
};
static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
- {0, "clk32", NULL, CLK_IS_ROOT, 32768},
- {0, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
- {0, "pll1_624", NULL, CLK_IS_ROOT, 624000000},
- {0, "pll5p", NULL, CLK_IS_ROOT, 832000000},
- {0, "pll5", NULL, CLK_IS_ROOT, 1248000000},
- {0, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
+ {0, "clk32", NULL, 0, 32768},
+ {0, "vctcxo", NULL, 0, 26000000},
+ {0, "pll1_624", NULL, 0, 624000000},
+ {0, "pll5p", NULL, 0, 832000000},
+ {0, "pll5", NULL, 0, 1248000000},
+ {0, "usb_pll", NULL, 0, 480000000},
};
static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
index 13d617332..e22a67f76 100644
--- a/drivers/clk/mmp/clk-of-pxa910.c
+++ b/drivers/clk/mmp/clk-of-pxa910.c
@@ -56,10 +56,10 @@ struct pxa910_clk_unit {
};
static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
- {PXA910_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
- {PXA910_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
- {PXA910_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
- {PXA910_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
+ {PXA910_CLK_CLK32, "clk32", NULL, 0, 32768},
+ {PXA910_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000},
+ {PXA910_CLK_PLL1, "pll1", NULL, 0, 624000000},
+ {PXA910_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000},
};
static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c
index 0dd83fb95..a9ef92095 100644
--- a/drivers/clk/mmp/clk-pxa168.c
+++ b/drivers/clk/mmp/clk-pxa168.c
@@ -92,15 +92,13 @@ void __init pxa168_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
return;
}
- clk = clk_register_fixed_rate(NULL, "clk32", NULL, CLK_IS_ROOT, 3200);
+ clk = clk_register_fixed_rate(NULL, "clk32", NULL, 0, 3200);
clk_register_clkdev(clk, "clk32", NULL);
- clk = clk_register_fixed_rate(NULL, "vctcxo", NULL, CLK_IS_ROOT,
- 26000000);
+ clk = clk_register_fixed_rate(NULL, "vctcxo", NULL, 0, 26000000);
clk_register_clkdev(clk, "vctcxo", NULL);
- clk = clk_register_fixed_rate(NULL, "pll1", NULL, CLK_IS_ROOT,
- 624000000);
+ clk = clk_register_fixed_rate(NULL, "pll1", NULL, 0, 624000000);
clk_register_clkdev(clk, "pll1", NULL);
clk = clk_register_fixed_factor(NULL, "pll1_2", "pll1",
diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c
index e1d2ce22c..a520cf770 100644
--- a/drivers/clk/mmp/clk-pxa910.c
+++ b/drivers/clk/mmp/clk-pxa910.c
@@ -97,15 +97,13 @@ void __init pxa910_clk_init(phys_addr_t mpmu_phys, phys_addr_t apmu_phys,
return;
}
- clk = clk_register_fixed_rate(NULL, "clk32", NULL, CLK_IS_ROOT, 3200);
+ clk = clk_register_fixed_rate(NULL, "clk32", NULL, 0, 3200);
clk_register_clkdev(clk, "clk32", NULL);
- clk = clk_register_fixed_rate(NULL, "vctcxo", NULL, CLK_IS_ROOT,
- 26000000);
+ clk = clk_register_fixed_rate(NULL, "vctcxo", NULL, 0, 26000000);
clk_register_clkdev(clk, "vctcxo", NULL);
- clk = clk_register_fixed_rate(NULL, "pll1", NULL, CLK_IS_ROOT,
- 624000000);
+ clk = clk_register_fixed_rate(NULL, "pll1", NULL, 0, 624000000);
clk_register_clkdev(clk, "pll1", NULL);
clk = clk_register_fixed_factor(NULL, "pll1_2", "pll1",
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index eaee8f099..3165da77d 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -29,6 +29,12 @@ config ARMADA_XP_CLK
select MVEBU_CLK_COMMON
select MVEBU_CLK_CPU
+config ARMADA_AP806_SYSCON
+ bool
+
+config ARMADA_CP110_SYSCON
+ bool
+
config DOVE_CLK
bool
select MVEBU_CLK_COMMON
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index 886611548..7172ef656 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_ARMADA_375_CLK) += armada-375.o
obj-$(CONFIG_ARMADA_38X_CLK) += armada-38x.o
obj-$(CONFIG_ARMADA_39X_CLK) += armada-39x.o
obj-$(CONFIG_ARMADA_XP_CLK) += armada-xp.o
+obj-$(CONFIG_ARMADA_AP806_SYSCON) += ap806-system-controller.o
+obj-$(CONFIG_ARMADA_CP110_SYSCON) += cp110-system-controller.o
obj-$(CONFIG_DOVE_CLK) += dove.o dove-divider.o
obj-$(CONFIG_KIRKWOOD_CLK) += kirkwood.o
obj-$(CONFIG_ORION_CLK) += orion.o
diff --git a/drivers/clk/mvebu/ap806-system-controller.c b/drivers/clk/mvebu/ap806-system-controller.c
new file mode 100644
index 000000000..02023baf8
--- /dev/null
+++ b/drivers/clk/mvebu/ap806-system-controller.c
@@ -0,0 +1,168 @@
+/*
+ * Marvell Armada AP806 System Controller
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "ap806-system-controller: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define AP806_SAR_REG 0x400
+#define AP806_SAR_CLKFREQ_MODE_MASK 0x1f
+
+#define AP806_CLK_NUM 4
+
+static struct clk *ap806_clks[AP806_CLK_NUM];
+
+static struct clk_onecell_data ap806_clk_data = {
+ .clks = ap806_clks,
+ .clk_num = AP806_CLK_NUM,
+};
+
+static int ap806_syscon_clk_probe(struct platform_device *pdev)
+{
+ unsigned int freq_mode, cpuclk_freq;
+ const char *name, *fixedclk_name;
+ struct device_node *np = pdev->dev.of_node;
+ struct regmap *regmap;
+ u32 reg;
+ int ret;
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "cannot get regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ ret = regmap_read(regmap, AP806_SAR_REG, &reg);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot read from regmap\n");
+ return ret;
+ }
+
+ freq_mode = reg & AP806_SAR_CLKFREQ_MODE_MASK;
+ switch (freq_mode) {
+ case 0x0 ... 0x5:
+ cpuclk_freq = 2000;
+ break;
+ case 0x6 ... 0xB:
+ cpuclk_freq = 1800;
+ break;
+ case 0xC ... 0x11:
+ cpuclk_freq = 1600;
+ break;
+ case 0x12 ... 0x16:
+ cpuclk_freq = 1400;
+ break;
+ case 0x17 ... 0x19:
+ cpuclk_freq = 1300;
+ break;
+ default:
+ dev_err(&pdev->dev, "invalid SAR value\n");
+ return -EINVAL;
+ }
+
+ /* Convert to hertz */
+ cpuclk_freq *= 1000 * 1000;
+
+ /* CPU clocks depend on the Sample At Reset configuration */
+ of_property_read_string_index(np, "clock-output-names",
+ 0, &name);
+ ap806_clks[0] = clk_register_fixed_rate(&pdev->dev, name, NULL,
+ 0, cpuclk_freq);
+ if (IS_ERR(ap806_clks[0])) {
+ ret = PTR_ERR(ap806_clks[0]);
+ goto fail0;
+ }
+
+ of_property_read_string_index(np, "clock-output-names",
+ 1, &name);
+ ap806_clks[1] = clk_register_fixed_rate(&pdev->dev, name, NULL, 0,
+ cpuclk_freq);
+ if (IS_ERR(ap806_clks[1])) {
+ ret = PTR_ERR(ap806_clks[1]);
+ goto fail1;
+ }
+
+ /* Fixed clock is always 1200 Mhz */
+ of_property_read_string_index(np, "clock-output-names",
+ 2, &fixedclk_name);
+ ap806_clks[2] = clk_register_fixed_rate(&pdev->dev, fixedclk_name, NULL,
+ 0, 1200 * 1000 * 1000);
+ if (IS_ERR(ap806_clks[2])) {
+ ret = PTR_ERR(ap806_clks[2]);
+ goto fail2;
+ }
+
+ /* MSS Clock is fixed clock divided by 6 */
+ of_property_read_string_index(np, "clock-output-names",
+ 3, &name);
+ ap806_clks[3] = clk_register_fixed_factor(NULL, name, fixedclk_name,
+ 0, 1, 6);
+ if (IS_ERR(ap806_clks[3])) {
+ ret = PTR_ERR(ap806_clks[3]);
+ goto fail3;
+ }
+
+ ret = of_clk_add_provider(np, of_clk_src_onecell_get, &ap806_clk_data);
+ if (ret)
+ goto fail_clk_add;
+
+ return 0;
+
+fail_clk_add:
+ clk_unregister_fixed_factor(ap806_clks[3]);
+fail3:
+ clk_unregister_fixed_rate(ap806_clks[2]);
+fail2:
+ clk_unregister_fixed_rate(ap806_clks[1]);
+fail1:
+ clk_unregister_fixed_rate(ap806_clks[0]);
+fail0:
+ return ret;
+}
+
+static int ap806_syscon_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ clk_unregister_fixed_factor(ap806_clks[3]);
+ clk_unregister_fixed_rate(ap806_clks[2]);
+ clk_unregister_fixed_rate(ap806_clks[1]);
+ clk_unregister_fixed_rate(ap806_clks[0]);
+
+ return 0;
+}
+
+static const struct of_device_id ap806_syscon_of_match[] = {
+ { .compatible = "marvell,ap806-system-controller", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
+
+static struct platform_driver ap806_syscon_driver = {
+ .probe = ap806_syscon_clk_probe,
+ .remove = ap806_syscon_clk_remove,
+ .driver = {
+ .name = "marvell-ap806-system-controller",
+ .of_match_table = ap806_syscon_of_match,
+ },
+};
+
+module_platform_driver(ap806_syscon_driver);
+
+MODULE_DESCRIPTION("Marvell AP806 System Controller driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
new file mode 100644
index 000000000..7fa42d6b2
--- /dev/null
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -0,0 +1,406 @@
+/*
+ * Marvell Armada CP110 System Controller
+ *
+ * Copyright (C) 2016 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/*
+ * CP110 has 5 core clocks:
+ *
+ * - APLL (1 Ghz)
+ * - PPv2 core (1/3 APLL)
+ * - EIP (1/2 APLL)
+ * - Core (1/2 EIP)
+ *
+ * - NAND clock, which is either:
+ * - Equal to the core clock
+ * - 2/5 APLL
+ *
+ * CP110 has 32 gatable clocks, for the various peripherals in the
+ * IP. They have fairly complicated parent/child relationships.
+ */
+
+#define pr_fmt(fmt) "cp110-system-controller: " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CP110_PM_CLOCK_GATING_REG 0x220
+#define CP110_NAND_FLASH_CLK_CTRL_REG 0x700
+#define NF_CLOCK_SEL_400_MASK BIT(0)
+
+enum {
+ CP110_CLK_TYPE_CORE,
+ CP110_CLK_TYPE_GATABLE,
+};
+
+#define CP110_MAX_CORE_CLOCKS 5
+#define CP110_MAX_GATABLE_CLOCKS 32
+
+#define CP110_CLK_NUM \
+ (CP110_MAX_CORE_CLOCKS + CP110_MAX_GATABLE_CLOCKS)
+
+#define CP110_CORE_APLL 0
+#define CP110_CORE_PPV2 1
+#define CP110_CORE_EIP 2
+#define CP110_CORE_CORE 3
+#define CP110_CORE_NAND 4
+
+/* A number of gatable clocks need special handling */
+#define CP110_GATE_AUDIO 0
+#define CP110_GATE_COMM_UNIT 1
+#define CP110_GATE_NAND 2
+#define CP110_GATE_PPV2 3
+#define CP110_GATE_SDIO 4
+#define CP110_GATE_XOR1 7
+#define CP110_GATE_XOR0 8
+#define CP110_GATE_PCIE_X1_0 11
+#define CP110_GATE_PCIE_X1_1 12
+#define CP110_GATE_PCIE_X4 13
+#define CP110_GATE_PCIE_XOR 14
+#define CP110_GATE_SATA 15
+#define CP110_GATE_SATA_USB 16
+#define CP110_GATE_MAIN 17
+#define CP110_GATE_SDMMC 18
+#define CP110_GATE_SLOW_IO 21
+#define CP110_GATE_USB3H0 22
+#define CP110_GATE_USB3H1 23
+#define CP110_GATE_USB3DEV 24
+#define CP110_GATE_EIP150 25
+#define CP110_GATE_EIP197 26
+
+static struct clk *cp110_clks[CP110_CLK_NUM];
+
+static struct clk_onecell_data cp110_clk_data = {
+ .clks = cp110_clks,
+ .clk_num = CP110_CLK_NUM,
+};
+
+struct cp110_gate_clk {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 bit_idx;
+};
+
+#define to_cp110_gate_clk(clk) container_of(clk, struct cp110_gate_clk, hw)
+
+static int cp110_gate_enable(struct clk_hw *hw)
+{
+ struct cp110_gate_clk *gate = to_cp110_gate_clk(hw);
+
+ regmap_update_bits(gate->regmap, CP110_PM_CLOCK_GATING_REG,
+ BIT(gate->bit_idx), BIT(gate->bit_idx));
+
+ return 0;
+}
+
+static void cp110_gate_disable(struct clk_hw *hw)
+{
+ struct cp110_gate_clk *gate = to_cp110_gate_clk(hw);
+
+ regmap_update_bits(gate->regmap, CP110_PM_CLOCK_GATING_REG,
+ BIT(gate->bit_idx), 0);
+}
+
+static int cp110_gate_is_enabled(struct clk_hw *hw)
+{
+ struct cp110_gate_clk *gate = to_cp110_gate_clk(hw);
+ u32 val;
+
+ regmap_read(gate->regmap, CP110_PM_CLOCK_GATING_REG, &val);
+
+ return val & BIT(gate->bit_idx);
+}
+
+static const struct clk_ops cp110_gate_ops = {
+ .enable = cp110_gate_enable,
+ .disable = cp110_gate_disable,
+ .is_enabled = cp110_gate_is_enabled,
+};
+
+static struct clk *cp110_register_gate(const char *name,
+ const char *parent_name,
+ struct regmap *regmap, u8 bit_idx)
+{
+ struct cp110_gate_clk *gate;
+ struct clk *clk;
+ struct clk_init_data init;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cp110_gate_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ gate->regmap = regmap;
+ gate->bit_idx = bit_idx;
+ gate->hw.init = &init;
+
+ clk = clk_register(NULL, &gate->hw);
+ if (IS_ERR(clk))
+ kfree(gate);
+
+ return clk;
+}
+
+static void cp110_unregister_gate(struct clk *clk)
+{
+ struct clk_hw *hw;
+
+ hw = __clk_get_hw(clk);
+ if (!hw)
+ return;
+
+ clk_unregister(clk);
+ kfree(to_cp110_gate_clk(hw));
+}
+
+static struct clk *cp110_of_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct clk_onecell_data *clk_data = data;
+ unsigned int type = clkspec->args[0];
+ unsigned int idx = clkspec->args[1];
+
+ if (type == CP110_CLK_TYPE_CORE) {
+ if (idx > CP110_MAX_CORE_CLOCKS)
+ return ERR_PTR(-EINVAL);
+ return clk_data->clks[idx];
+ } else if (type == CP110_CLK_TYPE_GATABLE) {
+ if (idx > CP110_MAX_GATABLE_CLOCKS)
+ return ERR_PTR(-EINVAL);
+ return clk_data->clks[CP110_MAX_CORE_CLOCKS + idx];
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int cp110_syscon_clk_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct device_node *np = pdev->dev.of_node;
+ const char *ppv2_name, *apll_name, *core_name, *eip_name, *nand_name;
+ struct clk *clk;
+ u32 nand_clk_ctrl;
+ int i, ret;
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = regmap_read(regmap, CP110_NAND_FLASH_CLK_CTRL_REG,
+ &nand_clk_ctrl);
+ if (ret)
+ return ret;
+
+ /* Register the APLL which is the root of the clk tree */
+ of_property_read_string_index(np, "core-clock-output-names",
+ CP110_CORE_APLL, &apll_name);
+ clk = clk_register_fixed_rate(NULL, apll_name, NULL, 0,
+ 1000 * 1000 * 1000);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail0;
+ }
+
+ cp110_clks[CP110_CORE_APLL] = clk;
+
+ /* PPv2 is APLL/3 */
+ of_property_read_string_index(np, "core-clock-output-names",
+ CP110_CORE_PPV2, &ppv2_name);
+ clk = clk_register_fixed_factor(NULL, ppv2_name, apll_name, 0, 1, 3);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail1;
+ }
+
+ cp110_clks[CP110_CORE_PPV2] = clk;
+
+ /* EIP clock is APLL/2 */
+ of_property_read_string_index(np, "core-clock-output-names",
+ CP110_CORE_EIP, &eip_name);
+ clk = clk_register_fixed_factor(NULL, eip_name, apll_name, 0, 1, 2);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail2;
+ }
+
+ cp110_clks[CP110_CORE_EIP] = clk;
+
+ /* Core clock is EIP/2 */
+ of_property_read_string_index(np, "core-clock-output-names",
+ CP110_CORE_CORE, &core_name);
+ clk = clk_register_fixed_factor(NULL, core_name, eip_name, 0, 1, 2);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail3;
+ }
+
+ cp110_clks[CP110_CORE_CORE] = clk;
+
+ /* NAND can be either APLL/2.5 or core clock */
+ of_property_read_string_index(np, "core-clock-output-names",
+ CP110_CORE_NAND, &nand_name);
+ if (nand_clk_ctrl & NF_CLOCK_SEL_400_MASK)
+ clk = clk_register_fixed_factor(NULL, nand_name,
+ apll_name, 0, 2, 5);
+ else
+ clk = clk_register_fixed_factor(NULL, nand_name,
+ core_name, 0, 1, 1);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail4;
+ }
+
+ cp110_clks[CP110_CORE_NAND] = clk;
+
+ for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
+ const char *parent, *name;
+ int ret;
+
+ ret = of_property_read_string_index(np,
+ "gate-clock-output-names",
+ i, &name);
+ /* Reached the end of the list? */
+ if (ret < 0)
+ break;
+
+ if (!strcmp(name, "none"))
+ continue;
+
+ switch (i) {
+ case CP110_GATE_AUDIO:
+ case CP110_GATE_COMM_UNIT:
+ case CP110_GATE_EIP150:
+ case CP110_GATE_EIP197:
+ case CP110_GATE_SLOW_IO:
+ of_property_read_string_index(np,
+ "gate-clock-output-names",
+ CP110_GATE_MAIN, &parent);
+ break;
+ case CP110_GATE_NAND:
+ parent = nand_name;
+ break;
+ case CP110_GATE_PPV2:
+ parent = ppv2_name;
+ break;
+ case CP110_GATE_SDIO:
+ of_property_read_string_index(np,
+ "gate-clock-output-names",
+ CP110_GATE_SDMMC, &parent);
+ break;
+ case CP110_GATE_XOR1:
+ case CP110_GATE_XOR0:
+ case CP110_GATE_PCIE_X1_0:
+ case CP110_GATE_PCIE_X1_1:
+ case CP110_GATE_PCIE_X4:
+ of_property_read_string_index(np,
+ "gate-clock-output-names",
+ CP110_GATE_PCIE_XOR, &parent);
+ break;
+ case CP110_GATE_SATA:
+ case CP110_GATE_USB3H0:
+ case CP110_GATE_USB3H1:
+ case CP110_GATE_USB3DEV:
+ of_property_read_string_index(np,
+ "gate-clock-output-names",
+ CP110_GATE_SATA_USB, &parent);
+ break;
+ default:
+ parent = core_name;
+ break;
+ }
+
+ clk = cp110_register_gate(name, parent, regmap, i);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto fail_gate;
+ }
+
+ cp110_clks[CP110_MAX_CORE_CLOCKS + i] = clk;
+ }
+
+ ret = of_clk_add_provider(np, cp110_of_clk_get, &cp110_clk_data);
+ if (ret)
+ goto fail_clk_add;
+
+ return 0;
+
+fail_clk_add:
+fail_gate:
+ for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
+ clk = cp110_clks[CP110_MAX_CORE_CLOCKS + i];
+
+ if (clk)
+ cp110_unregister_gate(clk);
+ }
+
+ clk_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
+fail4:
+ clk_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
+fail3:
+ clk_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
+fail2:
+ clk_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
+fail1:
+ clk_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
+fail0:
+ return ret;
+}
+
+static int cp110_syscon_clk_remove(struct platform_device *pdev)
+{
+ int i;
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < CP110_MAX_GATABLE_CLOCKS; i++) {
+ struct clk *clk = cp110_clks[CP110_MAX_CORE_CLOCKS + i];
+
+ if (clk)
+ cp110_unregister_gate(clk);
+ }
+
+ clk_unregister_fixed_factor(cp110_clks[CP110_CORE_NAND]);
+ clk_unregister_fixed_factor(cp110_clks[CP110_CORE_CORE]);
+ clk_unregister_fixed_factor(cp110_clks[CP110_CORE_EIP]);
+ clk_unregister_fixed_factor(cp110_clks[CP110_CORE_PPV2]);
+ clk_unregister_fixed_rate(cp110_clks[CP110_CORE_APLL]);
+
+ return 0;
+}
+
+static const struct of_device_id cp110_syscon_of_match[] = {
+ { .compatible = "marvell,cp110-system-controller0", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
+
+static struct platform_driver cp110_syscon_driver = {
+ .probe = cp110_syscon_clk_probe,
+ .remove = cp110_syscon_clk_remove,
+ .driver = {
+ .name = "marvell-cp110-system-controller0",
+ .of_match_table = cp110_syscon_of_match,
+ },
+};
+
+module_platform_driver(cp110_syscon_driver);
+
+MODULE_DESCRIPTION("Marvell CP110 System Controller 0 driver");
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/nxp/clk-lpc18xx-creg.c b/drivers/clk/nxp/clk-lpc18xx-creg.c
index d44b61afa..9e35749da 100644
--- a/drivers/clk/nxp/clk-lpc18xx-creg.c
+++ b/drivers/clk/nxp/clk-lpc18xx-creg.c
@@ -147,6 +147,7 @@ static struct clk *clk_register_creg_clk(struct device *dev,
init.name = creg_clk->name;
init.parent_names = parent_name;
init.num_parents = 1;
+ init.flags = 0;
creg_clk->reg = syscon;
creg_clk->hw.init = &init;
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 6df7ff36b..847dd9dad 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -1279,21 +1279,6 @@ static struct clk_branch mmss_misc_cxo_clk = {
},
};
-static struct clk_branch mmss_mmagic_axi_clk = {
- .halt_reg = 0x506c,
- .clkr = {
- .enable_reg = 0x506c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_mmagic_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch mmss_mmagic_maxi_clk = {
.halt_reg = 0x5074,
.clkr = {
@@ -1579,21 +1564,6 @@ static struct clk_branch smmu_video_axi_clk = {
},
};
-static struct clk_branch mmagic_bimc_axi_clk = {
- .halt_reg = 0x5294,
- .clkr = {
- .enable_reg = 0x5294,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmagic_bimc_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch mmagic_bimc_noc_cfg_ahb_clk = {
.halt_reg = 0x5298,
.clkr = {
@@ -3121,7 +3091,6 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
[MMSS_MMAGIC_CFG_AHB_CLK] = &mmss_mmagic_cfg_ahb_clk.clkr,
[MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
[MMSS_MISC_CXO_CLK] = &mmss_misc_cxo_clk.clkr,
- [MMSS_MMAGIC_AXI_CLK] = &mmss_mmagic_axi_clk.clkr,
[MMSS_MMAGIC_MAXI_CLK] = &mmss_mmagic_maxi_clk.clkr,
[MMAGIC_CAMSS_AXI_CLK] = &mmagic_camss_axi_clk.clkr,
[MMAGIC_CAMSS_NOC_CFG_AHB_CLK] = &mmagic_camss_noc_cfg_ahb_clk.clkr,
@@ -3141,7 +3110,6 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
[MMAGIC_VIDEO_NOC_CFG_AHB_CLK] = &mmagic_video_noc_cfg_ahb_clk.clkr,
[SMMU_VIDEO_AHB_CLK] = &smmu_video_ahb_clk.clkr,
[SMMU_VIDEO_AXI_CLK] = &smmu_video_axi_clk.clkr,
- [MMAGIC_BIMC_AXI_CLK] = &mmagic_bimc_axi_clk.clkr,
[MMAGIC_BIMC_NOC_CFG_AHB_CLK] = &mmagic_bimc_noc_cfg_ahb_clk.clkr,
[GPU_GX_GFX3D_CLK] = &gpu_gx_gfx3d_clk.clkr,
[GPU_GX_RBBMTIMER_CLK] = &gpu_gx_rbbmtimer_clk.clkr,
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
new file mode 100644
index 000000000..2115ce410
--- /dev/null
+++ b/drivers/clk/renesas/Kconfig
@@ -0,0 +1,16 @@
+config CLK_RENESAS_CPG_MSSR
+ bool
+ default y if ARCH_R8A7795
+
+config CLK_RENESAS_CPG_MSTP
+ bool
+ default y if ARCH_R7S72100
+ default y if ARCH_R8A73A4
+ default y if ARCH_R8A7740
+ default y if ARCH_R8A7778
+ default y if ARCH_R8A7779
+ default y if ARCH_R8A7790
+ default y if ARCH_R8A7791
+ default y if ARCH_R8A7793
+ default y if ARCH_R8A7794
+ default y if ARCH_SH73A0
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 7e2579b30..ead8bb843 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -1,13 +1,15 @@
obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o
-obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o clk-mstp.o
-obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7778) += clk-r8a7778.o clk-mstp.o
-obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o clk-mstp.o
-obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7793) += clk-rcar-gen2.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o clk-mstp.o clk-div6.o
-obj-$(CONFIG_ARCH_R8A7795) += renesas-cpg-mssr.o \
- r8a7795-cpg-mssr.o clk-div6.o
-obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o clk-mstp.o clk-div6.o
+obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o
+obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7778) += clk-r8a7778.o
+obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o
+obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7793) += clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o clk-div6.o
+obj-$(CONFIG_ARCH_R8A7795) += r8a7795-cpg-mssr.o
+obj-$(CONFIG_ARCH_SH73A0) += clk-sh73a0.o clk-div6.o
+
+obj-$(CONFIG_CLK_RENESAS_CPG_MSSR) += renesas-cpg-mssr.o clk-div6.o
+obj-$(CONFIG_CLK_RENESAS_CPG_MSTP) += clk-mstp.o
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 3d44e183a..5093a2506 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -243,9 +243,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np)
}
CLK_OF_DECLARE(cpg_mstp_clks, "renesas,cpg-mstp-clocks", cpg_mstp_clocks_init);
-
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev)
+int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev)
{
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
@@ -297,7 +295,7 @@ fail_put:
return error;
}
-void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev)
+void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
{
if (!list_empty(&dev->power.subsys_data->clock_list))
pm_clk_destroy(dev);
@@ -318,12 +316,10 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
return;
pd->name = np->name;
-
pd->flags = GENPD_FLAG_PM_CLK;
- pm_genpd_init(pd, &simple_qos_governor, false);
pd->attach_dev = cpg_mstp_attach_dev;
pd->detach_dev = cpg_mstp_detach_dev;
+ pm_genpd_init(pd, &pm_domain_always_on_gov, false);
of_genpd_add_provider_simple(np, pd);
}
-#endif /* !CONFIG_PM_GENERIC_DOMAINS_OF */
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index b2198aef5..ca5519c58 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -13,6 +13,7 @@
*/
#include <linux/bug.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -26,6 +27,7 @@
#include "renesas-cpg-mssr.h"
+#define CPG_RCKCR 0x240
enum clk_ids {
/* Core Clock Outputs exported to DT */
@@ -50,6 +52,7 @@ enum clk_ids {
CLK_S3,
CLK_SDSRC,
CLK_SSPSRC,
+ CLK_RINT,
/* Module Clocks */
MOD_CLK_BASE
@@ -63,8 +66,12 @@ enum r8a7795_clk_types {
CLK_TYPE_GEN3_PLL3,
CLK_TYPE_GEN3_PLL4,
CLK_TYPE_GEN3_SD,
+ CLK_TYPE_GEN3_R,
};
+#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
+
static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -102,10 +109,10 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
- DEF_SD("sd0", R8A7795_CLK_SD0, CLK_PLL1_DIV2, 0x0074),
- DEF_SD("sd1", R8A7795_CLK_SD1, CLK_PLL1_DIV2, 0x0078),
- DEF_SD("sd2", R8A7795_CLK_SD2, CLK_PLL1_DIV2, 0x0268),
- DEF_SD("sd3", R8A7795_CLK_SD3, CLK_PLL1_DIV2, 0x026c),
+ DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_PLL1_DIV2, 0x0074),
+ DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_PLL1_DIV2, 0x0078),
+ DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_PLL1_DIV2, 0x0268),
+ DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_PLL1_DIV2, 0x026c),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
@@ -113,6 +120,12 @@ static const struct cpg_core_clk r8a7795_core_clks[] __initconst = {
DEF_DIV6P1("mso", R8A7795_CLK_MSO, CLK_PLL1_DIV4, 0x014),
DEF_DIV6P1("hdmi", R8A7795_CLK_HDMI, CLK_PLL1_DIV2, 0x250),
DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
+ DEF_DIV6P1("csi0", R8A7795_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
+
+ DEF_DIV6_RO("osc", R8A7795_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
+ DEF_DIV6_RO("r_int", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
+
+ DEF_BASE("r", R8A7795_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT),
};
static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
@@ -139,6 +152,7 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1),
DEF_MOD("usb-dmac1", 331, R8A7795_CLK_S3D1),
+ DEF_MOD("rwdt0", 402, R8A7795_CLK_R),
DEF_MOD("intc-ex", 407, R8A7795_CLK_CP),
DEF_MOD("intc-ap", 408, R8A7795_CLK_S3D1),
DEF_MOD("audmac0", 502, R8A7795_CLK_S3D4),
@@ -148,6 +162,7 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("hscif2", 518, R8A7795_CLK_S3D1),
DEF_MOD("hscif1", 519, R8A7795_CLK_S3D1),
DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1),
+ DEF_MOD("pwm", 523, R8A7795_CLK_S3D4),
DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1),
DEF_MOD("fcpvd2", 601, R8A7795_CLK_S2D1),
DEF_MOD("fcpvd1", 602, R8A7795_CLK_S2D1),
@@ -176,6 +191,10 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("ehci1", 702, R8A7795_CLK_S3D4),
DEF_MOD("ehci0", 703, R8A7795_CLK_S3D4),
DEF_MOD("hsusb", 704, R8A7795_CLK_S3D4),
+ DEF_MOD("csi21", 713, R8A7795_CLK_CSI0),
+ DEF_MOD("csi20", 714, R8A7795_CLK_CSI0),
+ DEF_MOD("csi41", 715, R8A7795_CLK_CSI0),
+ DEF_MOD("csi40", 716, R8A7795_CLK_CSI0),
DEF_MOD("du3", 721, R8A7795_CLK_S2D1),
DEF_MOD("du2", 722, R8A7795_CLK_S2D1),
DEF_MOD("du1", 723, R8A7795_CLK_S2D1),
@@ -183,6 +202,14 @@ static const struct mssr_mod_clk r8a7795_mod_clks[] __initconst = {
DEF_MOD("lvds", 727, R8A7795_CLK_S2D1),
DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI),
DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI),
+ DEF_MOD("vin7", 804, R8A7795_CLK_S2D1),
+ DEF_MOD("vin6", 805, R8A7795_CLK_S2D1),
+ DEF_MOD("vin5", 806, R8A7795_CLK_S2D1),
+ DEF_MOD("vin4", 807, R8A7795_CLK_S2D1),
+ DEF_MOD("vin3", 808, R8A7795_CLK_S2D1),
+ DEF_MOD("vin2", 809, R8A7795_CLK_S2D1),
+ DEF_MOD("vin1", 810, R8A7795_CLK_S2D1),
+ DEF_MOD("vin0", 811, R8A7795_CLK_S2D1),
DEF_MOD("etheravb", 812, R8A7795_CLK_S3D2),
DEF_MOD("sata0", 815, R8A7795_CLK_S3D2),
DEF_MOD("gpio7", 905, R8A7795_CLK_CP),
@@ -578,6 +605,18 @@ struct clk * __init r8a7795_cpg_clk_register(struct device *dev,
case CLK_TYPE_GEN3_SD:
return cpg_sd_clk_register(core, base, __clk_get_name(parent));
+ case CLK_TYPE_GEN3_R:
+ /* RINT is default. Only if EXTALR is populated, we switch to it */
+ value = readl(base + CPG_RCKCR) & 0x3f;
+
+ if (clk_get_rate(clks[CLK_EXTALR])) {
+ parent = clks[CLK_EXTALR];
+ value |= BIT(15);
+ }
+
+ writel(value, base + CPG_RCKCR);
+ break;
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 58e24b326..210cd744a 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk/renesas.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
@@ -253,7 +254,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
{
struct clk *clk = NULL, *parent;
struct device *dev = priv->dev;
- unsigned int id = core->id;
+ unsigned int id = core->id, div = core->div;
const char *parent_name;
WARN_DEBUG(id >= priv->num_core_clks);
@@ -266,6 +267,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_FF:
case CLK_TYPE_DIV6P1:
+ case CLK_TYPE_DIV6_RO:
WARN_DEBUG(core->parent >= priv->num_core_clks);
parent = priv->clks[core->parent];
if (IS_ERR(parent)) {
@@ -274,13 +276,18 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
}
parent_name = __clk_get_name(parent);
- if (core->type == CLK_TYPE_FF) {
- clk = clk_register_fixed_factor(NULL, core->name,
- parent_name, 0,
- core->mult, core->div);
- } else {
+
+ if (core->type == CLK_TYPE_DIV6_RO)
+ /* Multiply with the DIV6 register value */
+ div *= (readl(priv->base + core->offset) & 0x3f) + 1;
+
+ if (core->type == CLK_TYPE_DIV6P1) {
clk = cpg_div6_register(core->name, 1, &parent_name,
priv->base + core->offset);
+ } else {
+ clk = clk_register_fixed_factor(NULL, core->name,
+ parent_name, 0,
+ core->mult, div);
}
break;
@@ -375,8 +382,6 @@ fail:
kfree(clock);
}
-
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
struct cpg_mssr_clk_domain {
struct generic_pm_domain genpd;
struct device_node *np;
@@ -384,6 +389,8 @@ struct cpg_mssr_clk_domain {
unsigned int core_pm_clks[0];
};
+static struct cpg_mssr_clk_domain *cpg_mssr_clk_domain;
+
static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
struct cpg_mssr_clk_domain *pd)
{
@@ -407,17 +414,20 @@ static bool cpg_mssr_is_pm_clk(const struct of_phandle_args *clkspec,
}
}
-static int cpg_mssr_attach_dev(struct generic_pm_domain *genpd,
- struct device *dev)
+int cpg_mssr_attach_dev(struct generic_pm_domain *unused, struct device *dev)
{
- struct cpg_mssr_clk_domain *pd =
- container_of(genpd, struct cpg_mssr_clk_domain, genpd);
+ struct cpg_mssr_clk_domain *pd = cpg_mssr_clk_domain;
struct device_node *np = dev->of_node;
struct of_phandle_args clkspec;
struct clk *clk;
int i = 0;
int error;
+ if (!pd) {
+ dev_dbg(dev, "CPG/MSSR clock domain not yet available\n");
+ return -EPROBE_DEFER;
+ }
+
while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
&clkspec)) {
if (cpg_mssr_is_pm_clk(&clkspec, pd))
@@ -457,8 +467,7 @@ fail_put:
return error;
}
-static void cpg_mssr_detach_dev(struct generic_pm_domain *genpd,
- struct device *dev)
+void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
{
if (!list_empty(&dev->power.subsys_data->clock_list))
pm_clk_destroy(dev);
@@ -484,22 +493,14 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
genpd = &pd->genpd;
genpd->name = np->name;
genpd->flags = GENPD_FLAG_PM_CLK;
- pm_genpd_init(genpd, &simple_qos_governor, false);
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
+ pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ cpg_mssr_clk_domain = pd;
of_genpd_add_provider_simple(np, genpd);
return 0;
}
-#else
-static inline int cpg_mssr_add_clk_domain(struct device *dev,
- const unsigned int *core_pm_clks,
- unsigned int num_core_pm_clks)
-{
- return 0;
-}
-#endif /* !CONFIG_PM_GENERIC_DOMAINS_OF */
-
static const struct of_device_id cpg_mssr_match[] = {
#ifdef CONFIG_ARCH_R8A7795
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 952b69572..0d1e3e811 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -37,6 +37,7 @@ enum clk_types {
CLK_TYPE_IN, /* External Clock Input */
CLK_TYPE_FF, /* Fixed Factor Clock */
CLK_TYPE_DIV6P1, /* DIV6 Clock with 1 parent clock */
+ CLK_TYPE_DIV6_RO, /* DIV6 Clock read only with extra divisor */
/* Custom definitions start here */
CLK_TYPE_CUSTOM,
@@ -53,9 +54,8 @@ enum clk_types {
DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult)
#define DEF_DIV6P1(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_DIV6P1, _parent, .offset = _offset)
-#define DEF_SD(_name, _id, _parent, _offset) \
- DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
-
+#define DEF_DIV6_RO(_name, _id, _parent, _offset, _div) \
+ DEF_BASE(_name, _id, CLK_TYPE_DIV6_RO, _parent, .offset = _offset, .div = _div, .mult = 1)
/*
* Definitions of Module Clocks
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 80b9a379b..f47a2fa96 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -15,3 +15,4 @@ obj-y += clk-rk3188.o
obj-y += clk-rk3228.o
obj-y += clk-rk3288.o
obj-y += clk-rk3368.o
+obj-y += clk-rk3399.o
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 4e73ed5ca..05b3d73bf 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -158,12 +158,16 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
writel(HIWORD_UPDATE(alt_div, reg_data->div_core_mask,
reg_data->div_core_shift) |
- HIWORD_UPDATE(1, 1, reg_data->mux_core_shift),
+ HIWORD_UPDATE(reg_data->mux_core_alt,
+ reg_data->mux_core_mask,
+ reg_data->mux_core_shift),
cpuclk->reg_base + reg_data->core_reg);
} else {
/* select alternate parent */
- writel(HIWORD_UPDATE(1, 1, reg_data->mux_core_shift),
- cpuclk->reg_base + reg_data->core_reg);
+ writel(HIWORD_UPDATE(reg_data->mux_core_alt,
+ reg_data->mux_core_mask,
+ reg_data->mux_core_shift),
+ cpuclk->reg_base + reg_data->core_reg);
}
spin_unlock_irqrestore(cpuclk->lock, flags);
@@ -198,7 +202,9 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
writel(HIWORD_UPDATE(0, reg_data->div_core_mask,
reg_data->div_core_shift) |
- HIWORD_UPDATE(0, 1, reg_data->mux_core_shift),
+ HIWORD_UPDATE(reg_data->mux_core_main,
+ reg_data->mux_core_mask,
+ reg_data->mux_core_shift),
cpuclk->reg_base + reg_data->core_reg);
if (ndata->old_rate > ndata->new_rate)
@@ -252,7 +258,7 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.parent_names = &parent_names[0];
+ init.parent_names = &parent_names[reg_data->mux_core_main];
init.num_parents = 1;
init.ops = &rockchip_cpuclk_ops;
@@ -270,10 +276,10 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
cpuclk->clk_nb.notifier_call = rockchip_cpuclk_notifier_cb;
cpuclk->hw.init = &init;
- cpuclk->alt_parent = __clk_lookup(parent_names[1]);
+ cpuclk->alt_parent = __clk_lookup(parent_names[reg_data->mux_core_alt]);
if (!cpuclk->alt_parent) {
- pr_err("%s: could not lookup alternate parent\n",
- __func__);
+ pr_err("%s: could not lookup alternate parent: (%d)\n",
+ __func__, reg_data->mux_core_alt);
ret = -EINVAL;
goto free_cpuclk;
}
@@ -285,10 +291,11 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
goto free_cpuclk;
}
- clk = __clk_lookup(parent_names[0]);
+ clk = __clk_lookup(parent_names[reg_data->mux_core_main]);
if (!clk) {
- pr_err("%s: could not lookup parent clock %s\n",
- __func__, parent_names[0]);
+ pr_err("%s: could not lookup parent clock: (%d) %s\n",
+ __func__, reg_data->mux_core_main,
+ parent_names[reg_data->mux_core_main]);
ret = -EINVAL;
goto free_alt_parent;
}
@@ -314,9 +321,9 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
}
cclk = clk_register(NULL, &cpuclk->hw);
- if (IS_ERR(clk)) {
+ if (IS_ERR(cclk)) {
pr_err("%s: could not register cpuclk %s\n", __func__, name);
- ret = PTR_ERR(clk);
+ ret = PTR_ERR(cclk);
goto free_rate_table;
}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index e0dc7e834..077fcdc79 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -41,8 +41,6 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
#define ROCKCHIP_MMC_DEGREE_MASK 0x3
#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
-#define ROCKCHIP_MMC_INIT_STATE_RESET 0x1
-#define ROCKCHIP_MMC_INIT_STATE_SHIFT 1
#define PSECS_PER_SEC 1000000000000LL
@@ -123,7 +121,8 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
raw_value |= nineties;
- writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg);
+ writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift),
+ mmc_clock->reg);
pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
clk_hw_get_name(hw), degrees, delay_num,
@@ -153,6 +152,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
return ERR_PTR(-ENOMEM);
init.name = name;
+ init.flags = 0;
init.num_parents = num_parents;
init.parent_names = parent_names;
init.ops = &rockchip_mmc_clk_ops;
@@ -161,15 +161,6 @@ struct clk *rockchip_clk_register_mmc(const char *name,
mmc_clock->reg = reg;
mmc_clock->shift = shift;
- /*
- * Assert init_state to soft reset the CLKGEN
- * for mmc tuning phase and degree
- */
- if (mmc_clock->shift == ROCKCHIP_MMC_INIT_STATE_SHIFT)
- writel(HIWORD_UPDATE(ROCKCHIP_MMC_INIT_STATE_RESET,
- ROCKCHIP_MMC_INIT_STATE_RESET,
- mmc_clock->shift), mmc_clock->reg);
-
clk = clk_register(NULL, &mmc_clock->hw);
if (IS_ERR(clk))
kfree(mmc_clock);
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 5de797e34..db81e4541 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -46,6 +46,8 @@ struct rockchip_clk_pll {
const struct rockchip_pll_rate_table *rate_table;
unsigned int rate_count;
spinlock_t *lock;
+
+ struct rockchip_clk_provider *ctx;
};
#define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw)
@@ -90,15 +92,10 @@ static long rockchip_pll_round_rate(struct clk_hw *hw,
*/
static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
{
- struct regmap *grf = rockchip_clk_get_grf();
+ struct regmap *grf = pll->ctx->grf;
unsigned int val;
int delay = 24000000, ret;
- if (IS_ERR(grf)) {
- pr_err("%s: grf regmap not available\n", __func__);
- return PTR_ERR(grf);
- }
-
while (delay > 0) {
ret = regmap_read(grf, pll->lock_offset, &val);
if (ret) {
@@ -234,7 +231,7 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll,
/* wait for the pll to lock */
ret = rockchip_pll_wait_lock(pll);
if (ret) {
- pr_warn("%s: pll update unsucessful, trying to restore old params\n",
+ pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
__func__);
rockchip_rk3036_pll_set_params(pll, &cur);
}
@@ -250,17 +247,9 @@ static int rockchip_rk3036_pll_set_rate(struct clk_hw *hw, unsigned long drate,
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
- unsigned long old_rate = rockchip_rk3036_pll_recalc_rate(hw, prate);
- struct regmap *grf = rockchip_clk_get_grf();
- if (IS_ERR(grf)) {
- pr_debug("%s: grf regmap not available, aborting rate change\n",
- __func__);
- return PTR_ERR(grf);
- }
-
- pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
- __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
+ pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
+ __func__, __clk_get_name(hw->clk), drate, prate);
/* Get required rate settings from table */
rate = rockchip_get_pll_settings(pll, drate);
@@ -473,7 +462,7 @@ static int rockchip_rk3066_pll_set_params(struct rockchip_clk_pll *pll,
/* wait for the pll to lock */
ret = rockchip_pll_wait_lock(pll);
if (ret) {
- pr_warn("%s: pll update unsucessful, trying to restore old params\n",
+ pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
__func__);
rockchip_rk3066_pll_set_params(pll, &cur);
}
@@ -489,17 +478,9 @@ static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate;
- unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
- struct regmap *grf = rockchip_clk_get_grf();
- if (IS_ERR(grf)) {
- pr_debug("%s: grf regmap not available, aborting rate change\n",
- __func__);
- return PTR_ERR(grf);
- }
-
- pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
- __func__, clk_hw_get_name(hw), old_rate, drate, prate);
+ pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
+ __func__, clk_hw_get_name(hw), drate, prate);
/* Get required rate settings from table */
rate = rockchip_get_pll_settings(pll, drate);
@@ -563,11 +544,6 @@ static void rockchip_rk3066_pll_init(struct clk_hw *hw)
rate->no, cur.no, rate->nf, cur.nf, rate->nb, cur.nb);
if (rate->nr != cur.nr || rate->no != cur.no || rate->nf != cur.nf
|| rate->nb != cur.nb) {
- struct regmap *grf = rockchip_clk_get_grf();
-
- if (IS_ERR(grf))
- return;
-
pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
__func__, clk_hw_get_name(hw));
rockchip_rk3066_pll_set_params(pll, rate);
@@ -591,16 +567,277 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.init = rockchip_rk3066_pll_init,
};
+/**
+ * PLL used in RK3399
+ */
+
+#define RK3399_PLLCON(i) (i * 0x4)
+#define RK3399_PLLCON0_FBDIV_MASK 0xfff
+#define RK3399_PLLCON0_FBDIV_SHIFT 0
+#define RK3399_PLLCON1_REFDIV_MASK 0x3f
+#define RK3399_PLLCON1_REFDIV_SHIFT 0
+#define RK3399_PLLCON1_POSTDIV1_MASK 0x7
+#define RK3399_PLLCON1_POSTDIV1_SHIFT 8
+#define RK3399_PLLCON1_POSTDIV2_MASK 0x7
+#define RK3399_PLLCON1_POSTDIV2_SHIFT 12
+#define RK3399_PLLCON2_FRAC_MASK 0xffffff
+#define RK3399_PLLCON2_FRAC_SHIFT 0
+#define RK3399_PLLCON2_LOCK_STATUS BIT(31)
+#define RK3399_PLLCON3_PWRDOWN BIT(0)
+#define RK3399_PLLCON3_DSMPD_MASK 0x1
+#define RK3399_PLLCON3_DSMPD_SHIFT 3
+
+static int rockchip_rk3399_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+ u32 pllcon;
+ int delay = 24000000;
+
+ /* poll check the lock status in rk3399 xPLLCON2 */
+ while (delay > 0) {
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2));
+ if (pllcon & RK3399_PLLCON2_LOCK_STATUS)
+ return 0;
+
+ delay--;
+ }
+
+ pr_err("%s: timeout waiting for pll to lock\n", __func__);
+ return -ETIMEDOUT;
+}
+
+static void rockchip_rk3399_pll_get_params(struct rockchip_clk_pll *pll,
+ struct rockchip_pll_rate_table *rate)
+{
+ u32 pllcon;
+
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(0));
+ rate->fbdiv = ((pllcon >> RK3399_PLLCON0_FBDIV_SHIFT)
+ & RK3399_PLLCON0_FBDIV_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(1));
+ rate->refdiv = ((pllcon >> RK3399_PLLCON1_REFDIV_SHIFT)
+ & RK3399_PLLCON1_REFDIV_MASK);
+ rate->postdiv1 = ((pllcon >> RK3399_PLLCON1_POSTDIV1_SHIFT)
+ & RK3399_PLLCON1_POSTDIV1_MASK);
+ rate->postdiv2 = ((pllcon >> RK3399_PLLCON1_POSTDIV2_SHIFT)
+ & RK3399_PLLCON1_POSTDIV2_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2));
+ rate->frac = ((pllcon >> RK3399_PLLCON2_FRAC_SHIFT)
+ & RK3399_PLLCON2_FRAC_MASK);
+
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(3));
+ rate->dsmpd = ((pllcon >> RK3399_PLLCON3_DSMPD_SHIFT)
+ & RK3399_PLLCON3_DSMPD_MASK);
+}
+
+static unsigned long rockchip_rk3399_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ struct rockchip_pll_rate_table cur;
+ u64 rate64 = prate;
+
+ rockchip_rk3399_pll_get_params(pll, &cur);
+
+ rate64 *= cur.fbdiv;
+ do_div(rate64, cur.refdiv);
+
+ if (cur.dsmpd == 0) {
+ /* fractional mode */
+ u64 frac_rate64 = prate * cur.frac;
+
+ do_div(frac_rate64, cur.refdiv);
+ rate64 += frac_rate64 >> 24;
+ }
+
+ do_div(rate64, cur.postdiv1);
+ do_div(rate64, cur.postdiv2);
+
+ return (unsigned long)rate64;
+}
+
+static int rockchip_rk3399_pll_set_params(struct rockchip_clk_pll *pll,
+ const struct rockchip_pll_rate_table *rate)
+{
+ const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
+ struct clk_mux *pll_mux = &pll->pll_mux;
+ struct rockchip_pll_rate_table cur;
+ u32 pllcon;
+ int rate_change_remuxed = 0;
+ int cur_parent;
+ int ret;
+
+ pr_debug("%s: rate settings for %lu fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ __func__, rate->rate, rate->fbdiv, rate->postdiv1, rate->refdiv,
+ rate->postdiv2, rate->dsmpd, rate->frac);
+
+ rockchip_rk3399_pll_get_params(pll, &cur);
+ cur.rate = 0;
+
+ cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+ if (cur_parent == PLL_MODE_NORM) {
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+ rate_change_remuxed = 1;
+ }
+
+ /* update pll values */
+ writel_relaxed(HIWORD_UPDATE(rate->fbdiv, RK3399_PLLCON0_FBDIV_MASK,
+ RK3399_PLLCON0_FBDIV_SHIFT),
+ pll->reg_base + RK3399_PLLCON(0));
+
+ writel_relaxed(HIWORD_UPDATE(rate->refdiv, RK3399_PLLCON1_REFDIV_MASK,
+ RK3399_PLLCON1_REFDIV_SHIFT) |
+ HIWORD_UPDATE(rate->postdiv1, RK3399_PLLCON1_POSTDIV1_MASK,
+ RK3399_PLLCON1_POSTDIV1_SHIFT) |
+ HIWORD_UPDATE(rate->postdiv2, RK3399_PLLCON1_POSTDIV2_MASK,
+ RK3399_PLLCON1_POSTDIV2_SHIFT),
+ pll->reg_base + RK3399_PLLCON(1));
+
+ /* xPLL CON2 is not HIWORD_MASK */
+ pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2));
+ pllcon &= ~(RK3399_PLLCON2_FRAC_MASK << RK3399_PLLCON2_FRAC_SHIFT);
+ pllcon |= rate->frac << RK3399_PLLCON2_FRAC_SHIFT;
+ writel_relaxed(pllcon, pll->reg_base + RK3399_PLLCON(2));
+
+ writel_relaxed(HIWORD_UPDATE(rate->dsmpd, RK3399_PLLCON3_DSMPD_MASK,
+ RK3399_PLLCON3_DSMPD_SHIFT),
+ pll->reg_base + RK3399_PLLCON(3));
+
+ /* wait for the pll to lock */
+ ret = rockchip_rk3399_pll_wait_lock(pll);
+ if (ret) {
+ pr_warn("%s: pll update unsuccessful, trying to restore old params\n",
+ __func__);
+ rockchip_rk3399_pll_set_params(pll, &cur);
+ }
+
+ if (rate_change_remuxed)
+ pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
+
+ return ret;
+}
+
+static int rockchip_rk3399_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+
+ pr_debug("%s: changing %s to %lu with a parent rate of %lu\n",
+ __func__, __clk_get_name(hw->clk), drate, prate);
+
+ /* Get required rate settings from table */
+ rate = rockchip_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, __clk_get_name(hw->clk));
+ return -EINVAL;
+ }
+
+ return rockchip_rk3399_pll_set_params(pll, rate);
+}
+
+static int rockchip_rk3399_pll_enable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(0, RK3399_PLLCON3_PWRDOWN, 0),
+ pll->reg_base + RK3399_PLLCON(3));
+
+ return 0;
+}
+
+static void rockchip_rk3399_pll_disable(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+ writel(HIWORD_UPDATE(RK3399_PLLCON3_PWRDOWN,
+ RK3399_PLLCON3_PWRDOWN, 0),
+ pll->reg_base + RK3399_PLLCON(3));
+}
+
+static int rockchip_rk3399_pll_is_enabled(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ u32 pllcon = readl(pll->reg_base + RK3399_PLLCON(3));
+
+ return !(pllcon & RK3399_PLLCON3_PWRDOWN);
+}
+
+static void rockchip_rk3399_pll_init(struct clk_hw *hw)
+{
+ struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+ const struct rockchip_pll_rate_table *rate;
+ struct rockchip_pll_rate_table cur;
+ unsigned long drate;
+
+ if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
+ return;
+
+ drate = clk_hw_get_rate(hw);
+ rate = rockchip_get_pll_settings(pll, drate);
+
+ /* when no rate setting for the current rate, rely on clk_set_rate */
+ if (!rate)
+ return;
+
+ rockchip_rk3399_pll_get_params(pll, &cur);
+
+ pr_debug("%s: pll %s@%lu: Hz\n", __func__, __clk_get_name(hw->clk),
+ drate);
+ pr_debug("old - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ cur.fbdiv, cur.postdiv1, cur.refdiv, cur.postdiv2,
+ cur.dsmpd, cur.frac);
+ pr_debug("new - fbdiv: %d, postdiv1: %d, refdiv: %d, postdiv2: %d, dsmpd: %d, frac: %d\n",
+ rate->fbdiv, rate->postdiv1, rate->refdiv, rate->postdiv2,
+ rate->dsmpd, rate->frac);
+
+ if (rate->fbdiv != cur.fbdiv || rate->postdiv1 != cur.postdiv1 ||
+ rate->refdiv != cur.refdiv || rate->postdiv2 != cur.postdiv2 ||
+ rate->dsmpd != cur.dsmpd || rate->frac != cur.frac) {
+ struct clk *parent = clk_get_parent(hw->clk);
+
+ if (!parent) {
+ pr_warn("%s: parent of %s not available\n",
+ __func__, __clk_get_name(hw->clk));
+ return;
+ }
+
+ pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
+ __func__, __clk_get_name(hw->clk));
+ rockchip_rk3399_pll_set_params(pll, rate);
+ }
+}
+
+static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
+ .recalc_rate = rockchip_rk3399_pll_recalc_rate,
+ .enable = rockchip_rk3399_pll_enable,
+ .disable = rockchip_rk3399_pll_disable,
+ .is_enabled = rockchip_rk3399_pll_is_enabled,
+};
+
+static const struct clk_ops rockchip_rk3399_pll_clk_ops = {
+ .recalc_rate = rockchip_rk3399_pll_recalc_rate,
+ .round_rate = rockchip_pll_round_rate,
+ .set_rate = rockchip_rk3399_pll_set_rate,
+ .enable = rockchip_rk3399_pll_enable,
+ .disable = rockchip_rk3399_pll_disable,
+ .is_enabled = rockchip_rk3399_pll_is_enabled,
+ .init = rockchip_rk3399_pll_init,
+};
+
/*
* Common registering of pll clocks
*/
-struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
+struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
+ enum rockchip_pll_type pll_type,
const char *name, const char *const *parent_names,
- u8 num_parents, void __iomem *base, int con_offset,
- int grf_lock_offset, int lock_shift, int mode_offset,
- int mode_shift, struct rockchip_pll_rate_table *rate_table,
- u8 clk_pll_flags, spinlock_t *lock)
+ u8 num_parents, int con_offset, int grf_lock_offset,
+ int lock_shift, int mode_offset, int mode_shift,
+ struct rockchip_pll_rate_table *rate_table,
+ u8 clk_pll_flags)
{
const char *pll_parents[3];
struct clk_init_data init;
@@ -624,14 +861,16 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
/* create the mux on top of the real pll */
pll->pll_mux_ops = &clk_mux_ops;
pll_mux = &pll->pll_mux;
- pll_mux->reg = base + mode_offset;
+ pll_mux->reg = ctx->reg_base + mode_offset;
pll_mux->shift = mode_shift;
pll_mux->mask = PLL_MODE_MASK;
pll_mux->flags = 0;
- pll_mux->lock = lock;
+ pll_mux->lock = &ctx->lock;
pll_mux->hw.init = &init;
- if (pll_type == pll_rk3036 || pll_type == pll_rk3066)
+ if (pll_type == pll_rk3036 ||
+ pll_type == pll_rk3066 ||
+ pll_type == pll_rk3399)
pll_mux->flags |= CLK_MUX_HIWORD_MASK;
/* the actual muxing is xin24m, pll-output, xin32k */
@@ -677,17 +916,23 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
switch (pll_type) {
case pll_rk3036:
- if (!pll->rate_table)
+ if (!pll->rate_table || IS_ERR(ctx->grf))
init.ops = &rockchip_rk3036_pll_clk_norate_ops;
else
init.ops = &rockchip_rk3036_pll_clk_ops;
break;
case pll_rk3066:
- if (!pll->rate_table)
+ if (!pll->rate_table || IS_ERR(ctx->grf))
init.ops = &rockchip_rk3066_pll_clk_norate_ops;
else
init.ops = &rockchip_rk3066_pll_clk_ops;
break;
+ case pll_rk3399:
+ if (!pll->rate_table)
+ init.ops = &rockchip_rk3399_pll_clk_norate_ops;
+ else
+ init.ops = &rockchip_rk3399_pll_clk_ops;
+ break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, name);
@@ -695,11 +940,12 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
pll->hw.init = &init;
pll->type = pll_type;
- pll->reg_base = base + con_offset;
+ pll->reg_base = ctx->reg_base + con_offset;
pll->lock_offset = grf_lock_offset;
pll->lock_shift = lock_shift;
pll->flags = clk_pll_flags;
- pll->lock = lock;
+ pll->lock = &ctx->lock;
+ pll->ctx = ctx;
pll_clk = clk_register(NULL, &pll->hw);
if (IS_ERR(pll_clk)) {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index 7cdb2d61f..924f560dc 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -113,7 +113,10 @@ static const struct rockchip_cpuclk_reg_data rk3036_cpuclk_data = {
.core_reg = RK2928_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
.mux_core_shift = 7,
+ .mux_core_mask = 0x1,
};
PNAME(mux_pll_p) = { "xin24m", "xin24m" };
@@ -437,6 +440,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
static void __init rk3036_clk_init(struct device_node *np)
{
+ struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
struct clk *clk;
@@ -446,22 +450,27 @@ static void __init rk3036_clk_init(struct device_node *np)
return;
}
- rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
if (IS_ERR(clk))
pr_warn("%s: could not register clock usb480m: %ld\n",
__func__, PTR_ERR(clk));
- rockchip_clk_register_plls(rk3036_pll_clks,
+ rockchip_clk_register_plls(ctx, rk3036_pll_clks,
ARRAY_SIZE(rk3036_pll_clks),
RK3036_GRF_SOC_STATUS0);
- rockchip_clk_register_branches(rk3036_clk_branches,
+ rockchip_clk_register_branches(ctx, rk3036_clk_branches,
ARRAY_SIZE(rk3036_clk_branches));
rockchip_clk_protect_critical(rk3036_critical_clocks,
ARRAY_SIZE(rk3036_critical_clocks));
- rockchip_clk_register_armclk(ARMCLK, "armclk",
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
&rk3036_cpuclk_data, rk3036_cpuclk_rates,
ARRAY_SIZE(rk3036_cpuclk_rates));
@@ -469,6 +478,8 @@ static void __init rk3036_clk_init(struct device_node *np)
rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
- rockchip_register_restart_notifier(RK2928_GLB_SRST_FST, NULL);
+ rockchip_register_restart_notifier(ctx, RK2928_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(rk3036_cru, "rockchip,rk3036-cru", rk3036_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 40bab3901..d0e722a0e 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -155,7 +155,10 @@ static const struct rockchip_cpuclk_reg_data rk3066_cpuclk_data = {
.core_reg = RK2928_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
.mux_core_shift = 8,
+ .mux_core_mask = 0x1,
};
#define RK3188_DIV_ACLK_CORE_MASK 0x7
@@ -191,7 +194,10 @@ static const struct rockchip_cpuclk_reg_data rk3188_cpuclk_data = {
.core_reg = RK2928_CLKSEL_CON(0),
.div_core_shift = 9,
.div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
.mux_core_shift = 8,
+ .mux_core_mask = 0x1,
};
PNAME(mux_pll_p) = { "xin24m", "xin32k" };
@@ -753,57 +759,75 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"hclk_cpubus"
};
-static void __init rk3188_common_clk_init(struct device_node *np)
+static struct rockchip_clk_provider *__init rk3188_common_clk_init(struct device_node *np)
{
+ struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
if (!reg_base) {
pr_err("%s: could not map cru region\n", __func__);
- return;
+ return ERR_PTR(-ENOMEM);
}
- rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return ERR_PTR(-ENOMEM);
+ }
- rockchip_clk_register_branches(common_clk_branches,
+ rockchip_clk_register_branches(ctx, common_clk_branches,
ARRAY_SIZE(common_clk_branches));
rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
- rockchip_register_restart_notifier(RK2928_GLB_SRST_FST, NULL);
+ rockchip_register_restart_notifier(ctx, RK2928_GLB_SRST_FST, NULL);
+
+ return ctx;
}
static void __init rk3066a_clk_init(struct device_node *np)
{
- rk3188_common_clk_init(np);
- rockchip_clk_register_plls(rk3066_pll_clks,
+ struct rockchip_clk_provider *ctx;
+
+ ctx = rk3188_common_clk_init(np);
+ if (IS_ERR(ctx))
+ return;
+
+ rockchip_clk_register_plls(ctx, rk3066_pll_clks,
ARRAY_SIZE(rk3066_pll_clks),
RK3066_GRF_SOC_STATUS);
- rockchip_clk_register_branches(rk3066a_clk_branches,
+ rockchip_clk_register_branches(ctx, rk3066a_clk_branches,
ARRAY_SIZE(rk3066a_clk_branches));
- rockchip_clk_register_armclk(ARMCLK, "armclk",
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
&rk3066_cpuclk_data, rk3066_cpuclk_rates,
ARRAY_SIZE(rk3066_cpuclk_rates));
rockchip_clk_protect_critical(rk3188_critical_clocks,
ARRAY_SIZE(rk3188_critical_clocks));
+ rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init);
static void __init rk3188a_clk_init(struct device_node *np)
{
+ struct rockchip_clk_provider *ctx;
struct clk *clk1, *clk2;
unsigned long rate;
int ret;
- rk3188_common_clk_init(np);
- rockchip_clk_register_plls(rk3188_pll_clks,
+ ctx = rk3188_common_clk_init(np);
+ if (IS_ERR(ctx))
+ return;
+
+ rockchip_clk_register_plls(ctx, rk3188_pll_clks,
ARRAY_SIZE(rk3188_pll_clks),
RK3188_GRF_SOC_STATUS);
- rockchip_clk_register_branches(rk3188_clk_branches,
+ rockchip_clk_register_branches(ctx, rk3188_clk_branches,
ARRAY_SIZE(rk3188_clk_branches));
- rockchip_clk_register_armclk(ARMCLK, "armclk",
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
&rk3188_cpuclk_data, rk3188_cpuclk_rates,
ARRAY_SIZE(rk3188_cpuclk_rates));
@@ -827,6 +851,7 @@ static void __init rk3188a_clk_init(struct device_node *np)
rockchip_clk_protect_critical(rk3188_critical_clocks,
ARRAY_SIZE(rk3188_critical_clocks));
+ rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3228.c b/drivers/clk/rockchip/clk-rk3228.c
index 7702d2855..016bdb0b7 100644
--- a/drivers/clk/rockchip/clk-rk3228.c
+++ b/drivers/clk/rockchip/clk-rk3228.c
@@ -111,7 +111,10 @@ static const struct rockchip_cpuclk_reg_data rk3228_cpuclk_data = {
.core_reg = RK2928_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
.mux_core_shift = 6,
+ .mux_core_mask = 0x1,
};
PNAME(mux_pll_p) = { "clk_24m", "xin24m" };
@@ -625,6 +628,7 @@ static const char *const rk3228_critical_clocks[] __initconst = {
static void __init rk3228_clk_init(struct device_node *np)
{
+ struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -633,17 +637,22 @@ static void __init rk3228_clk_init(struct device_node *np)
return;
}
- rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
- rockchip_clk_register_plls(rk3228_pll_clks,
+ rockchip_clk_register_plls(ctx, rk3228_pll_clks,
ARRAY_SIZE(rk3228_pll_clks),
RK3228_GRF_SOC_STATUS0);
- rockchip_clk_register_branches(rk3228_clk_branches,
+ rockchip_clk_register_branches(ctx, rk3228_clk_branches,
ARRAY_SIZE(rk3228_clk_branches));
rockchip_clk_protect_critical(rk3228_critical_clocks,
ARRAY_SIZE(rk3228_critical_clocks));
- rockchip_clk_register_armclk(ARMCLK, "armclk",
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
&rk3228_cpuclk_data, rk3228_cpuclk_rates,
ARRAY_SIZE(rk3228_cpuclk_rates));
@@ -651,6 +660,8 @@ static void __init rk3228_clk_init(struct device_node *np)
rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
- rockchip_register_restart_notifier(RK3228_GLB_SRST_FST, NULL);
+ rockchip_register_restart_notifier(ctx, RK3228_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(rk3228_cru, "rockchip,rk3228-cru", rk3228_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 3cb72163a..39af05a58 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -165,7 +165,10 @@ static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
.core_reg = RK3288_CLKSEL_CON(0),
.div_core_shift = 8,
.div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
.mux_core_shift = 15,
+ .mux_core_mask = 0x1,
};
PNAME(mux_pll_p) = { "xin24m", "xin32k" };
@@ -878,6 +881,7 @@ static struct syscore_ops rk3288_clk_syscore_ops = {
static void __init rk3288_clk_init(struct device_node *np)
{
+ struct rockchip_clk_provider *ctx;
struct clk *clk;
rk3288_cru_base = of_iomap(np, 0);
@@ -886,7 +890,12 @@ static void __init rk3288_clk_init(struct device_node *np)
return;
}
- rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS);
+ ctx = rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(rk3288_cru_base);
+ return;
+ }
/* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
@@ -894,17 +903,17 @@ static void __init rk3288_clk_init(struct device_node *np)
pr_warn("%s: could not register clock pclk_wdt: %ld\n",
__func__, PTR_ERR(clk));
else
- rockchip_clk_add_lookup(clk, PCLK_WDT);
+ rockchip_clk_add_lookup(ctx, clk, PCLK_WDT);
- rockchip_clk_register_plls(rk3288_pll_clks,
+ rockchip_clk_register_plls(ctx, rk3288_pll_clks,
ARRAY_SIZE(rk3288_pll_clks),
RK3288_GRF_SOC_STATUS1);
- rockchip_clk_register_branches(rk3288_clk_branches,
+ rockchip_clk_register_branches(ctx, rk3288_clk_branches,
ARRAY_SIZE(rk3288_clk_branches));
rockchip_clk_protect_critical(rk3288_critical_clocks,
ARRAY_SIZE(rk3288_critical_clocks));
- rockchip_clk_register_armclk(ARMCLK, "armclk",
+ rockchip_clk_register_armclk(ctx, ARMCLK, "armclk",
mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
&rk3288_cpuclk_data, rk3288_cpuclk_rates,
ARRAY_SIZE(rk3288_cpuclk_rates));
@@ -913,8 +922,10 @@ static void __init rk3288_clk_init(struct device_node *np)
rk3288_cru_base + RK3288_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
- rockchip_register_restart_notifier(RK3288_GLB_SRST_FST,
+ rockchip_register_restart_notifier(ctx, RK3288_GLB_SRST_FST,
rk3288_clk_shutdown);
register_syscore_ops(&rk3288_clk_syscore_ops);
+
+ rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index a2bb12200..6cb474c59 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -165,14 +165,20 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
.core_reg = RK3368_CLKSEL_CON(0),
.div_core_shift = 0,
.div_core_mask = 0x1f,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
.mux_core_shift = 7,
+ .mux_core_mask = 0x1,
};
static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
.core_reg = RK3368_CLKSEL_CON(2),
.div_core_shift = 0,
+ .mux_core_alt = 1,
+ .mux_core_main = 0,
.div_core_mask = 0x1f,
.mux_core_shift = 7,
+ .mux_core_mask = 0x1,
};
#define RK3368_DIV_ACLKM_MASK 0x1f
@@ -856,6 +862,7 @@ static const char *const rk3368_critical_clocks[] __initconst = {
static void __init rk3368_clk_init(struct device_node *np)
{
+ struct rockchip_clk_provider *ctx;
void __iomem *reg_base;
struct clk *clk;
@@ -865,7 +872,12 @@ static void __init rk3368_clk_init(struct device_node *np)
return;
}
- rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
/* Watchdog pclk is controlled by sgrf_soc_con3[7]. */
clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
@@ -873,22 +885,22 @@ static void __init rk3368_clk_init(struct device_node *np)
pr_warn("%s: could not register clock pclk_wdt: %ld\n",
__func__, PTR_ERR(clk));
else
- rockchip_clk_add_lookup(clk, PCLK_WDT);
+ rockchip_clk_add_lookup(ctx, clk, PCLK_WDT);
- rockchip_clk_register_plls(rk3368_pll_clks,
+ rockchip_clk_register_plls(ctx, rk3368_pll_clks,
ARRAY_SIZE(rk3368_pll_clks),
RK3368_GRF_SOC_STATUS0);
- rockchip_clk_register_branches(rk3368_clk_branches,
+ rockchip_clk_register_branches(ctx, rk3368_clk_branches,
ARRAY_SIZE(rk3368_clk_branches));
rockchip_clk_protect_critical(rk3368_critical_clocks,
ARRAY_SIZE(rk3368_critical_clocks));
- rockchip_clk_register_armclk(ARMCLKB, "armclkb",
+ rockchip_clk_register_armclk(ctx, ARMCLKB, "armclkb",
mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
&rk3368_cpuclkb_data, rk3368_cpuclkb_rates,
ARRAY_SIZE(rk3368_cpuclkb_rates));
- rockchip_clk_register_armclk(ARMCLKL, "armclkl",
+ rockchip_clk_register_armclk(ctx, ARMCLKL, "armclkl",
mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p),
&rk3368_cpuclkl_data, rk3368_cpuclkl_rates,
ARRAY_SIZE(rk3368_cpuclkl_rates));
@@ -896,6 +908,8 @@ static void __init rk3368_clk_init(struct device_node *np)
rockchip_register_softrst(np, 15, reg_base + RK3368_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
- rockchip_register_restart_notifier(RK3368_GLB_SRST_FST, NULL);
+ rockchip_register_restart_notifier(ctx, RK3368_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
}
CLK_OF_DECLARE(rk3368_cru, "rockchip,rk3368-cru", rk3368_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
new file mode 100644
index 000000000..8059a8d3e
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -0,0 +1,1577 @@
+/*
+ * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
+ * Author: Xing Zheng <zhengxing@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <dt-bindings/clock/rk3399-cru.h>
+#include "clk.h"
+
+enum rk3399_plls {
+ lpll, bpll, dpll, cpll, gpll, npll, vpll,
+};
+
+enum rk3399_pmu_plls {
+ ppll,
+};
+
+static struct rockchip_pll_rate_table rk3399_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(2208000000, 1, 92, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2184000000, 1, 91, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2160000000, 1, 90, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2136000000, 1, 89, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2112000000, 1, 88, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2088000000, 1, 87, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2064000000, 1, 86, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2040000000, 1, 85, 1, 1, 1, 0),
+ RK3036_PLL_RATE(2016000000, 1, 84, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1992000000, 1, 83, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1968000000, 1, 82, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1944000000, 1, 81, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1920000000, 1, 80, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1896000000, 1, 79, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1872000000, 1, 78, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1848000000, 1, 77, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1824000000, 1, 76, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1800000000, 1, 75, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1776000000, 1, 74, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1752000000, 1, 73, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1728000000, 1, 72, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1704000000, 1, 71, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1680000000, 1, 70, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1656000000, 1, 69, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1632000000, 1, 68, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1584000000, 1, 66, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1560000000, 1, 65, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1536000000, 1, 64, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1488000000, 1, 62, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1464000000, 1, 61, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1440000000, 1, 60, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1392000000, 1, 58, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1368000000, 1, 57, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1344000000, 1, 56, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1320000000, 1, 55, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1272000000, 1, 53, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1248000000, 1, 52, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 2, 99, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1104000000, 1, 46, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1100000000, 12, 550, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 6, 500, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 984000000, 1, 82, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 936000000, 1, 78, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 900000000, 4, 300, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 888000000, 1, 74, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 864000000, 1, 72, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 840000000, 1, 70, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 800000000, 6, 400, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 700000000, 6, 350, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 696000000, 1, 58, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 676000000, 3, 169, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 600000000, 1, 75, 3, 1, 1, 0),
+ RK3036_PLL_RATE( 594000000, 1, 99, 4, 1, 1, 0),
+ RK3036_PLL_RATE( 504000000, 1, 63, 3, 1, 1, 0),
+ RK3036_PLL_RATE( 500000000, 6, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE( 408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE( 312000000, 1, 52, 2, 2, 1, 0),
+ RK3036_PLL_RATE( 297000000, 1, 99, 4, 2, 1, 0),
+ RK3036_PLL_RATE( 216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE( 148500000, 1, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE( 96000000, 1, 64, 4, 4, 1, 0),
+ RK3036_PLL_RATE( 74250000, 2, 99, 4, 4, 1, 0),
+ RK3036_PLL_RATE( 54000000, 1, 54, 6, 4, 1, 0),
+ RK3036_PLL_RATE( 27000000, 1, 27, 6, 4, 1, 0),
+ { /* sentinel */ },
+};
+
+/* CRU parents */
+PNAME(mux_pll_p) = { "xin24m", "xin32k" };
+
+PNAME(mux_armclkl_p) = { "clk_core_l_lpll_src",
+ "clk_core_l_bpll_src",
+ "clk_core_l_dpll_src",
+ "clk_core_l_gpll_src" };
+PNAME(mux_armclkb_p) = { "clk_core_b_lpll_src",
+ "clk_core_b_bpll_src",
+ "clk_core_b_dpll_src",
+ "clk_core_b_gpll_src" };
+PNAME(mux_aclk_cci_p) = { "cpll_aclk_cci_src",
+ "gpll_aclk_cci_src",
+ "npll_aclk_cci_src",
+ "vpll_aclk_cci_src" };
+PNAME(mux_cci_trace_p) = { "cpll_cci_trace",
+ "gpll_cci_trace" };
+PNAME(mux_cs_p) = { "cpll_cs", "gpll_cs",
+ "npll_cs"};
+PNAME(mux_aclk_perihp_p) = { "cpll_aclk_perihp_src",
+ "gpll_aclk_perihp_src" };
+
+PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
+PNAME(mux_pll_src_cpll_gpll_npll_p) = { "cpll", "gpll", "npll" };
+PNAME(mux_pll_src_cpll_gpll_ppll_p) = { "cpll", "gpll", "ppll" };
+PNAME(mux_pll_src_cpll_gpll_upll_p) = { "cpll", "gpll", "upll" };
+PNAME(mux_pll_src_npll_cpll_gpll_p) = { "npll", "cpll", "gpll" };
+PNAME(mux_pll_src_cpll_gpll_npll_ppll_p) = { "cpll", "gpll", "npll",
+ "ppll" };
+PNAME(mux_pll_src_cpll_gpll_npll_24m_p) = { "cpll", "gpll", "npll",
+ "xin24m" };
+PNAME(mux_pll_src_cpll_gpll_npll_usbphy480m_p) = { "cpll", "gpll", "npll",
+ "clk_usbphy_480m" };
+PNAME(mux_pll_src_ppll_cpll_gpll_npll_p) = { "ppll", "cpll", "gpll",
+ "npll", "upll" };
+PNAME(mux_pll_src_cpll_gpll_npll_upll_24m_p) = { "cpll", "gpll", "npll",
+ "upll", "xin24m" };
+PNAME(mux_pll_src_cpll_gpll_npll_ppll_upll_24m_p) = { "cpll", "gpll", "npll",
+ "ppll", "upll", "xin24m" };
+
+PNAME(mux_pll_src_vpll_cpll_gpll_p) = { "vpll", "cpll", "gpll" };
+PNAME(mux_pll_src_vpll_cpll_gpll_npll_p) = { "vpll", "cpll", "gpll",
+ "npll" };
+PNAME(mux_pll_src_vpll_cpll_gpll_24m_p) = { "vpll", "cpll", "gpll",
+ "xin24m" };
+
+PNAME(mux_dclk_vop0_p) = { "dclk_vop0_div",
+ "dclk_vop0_frac" };
+PNAME(mux_dclk_vop1_p) = { "dclk_vop1_div",
+ "dclk_vop1_frac" };
+
+PNAME(mux_clk_cif_p) = { "clk_cifout_src", "xin24m" };
+
+PNAME(mux_pll_src_24m_usbphy480m_p) = { "xin24m", "clk_usbphy_480m" };
+PNAME(mux_pll_src_24m_pciephy_p) = { "xin24m", "clk_pciephy_ref100m" };
+PNAME(mux_pll_src_24m_32k_cpll_gpll_p) = { "xin24m", "xin32k",
+ "cpll", "gpll" };
+PNAME(mux_pciecore_cru_phy_p) = { "clk_pcie_core_cru",
+ "clk_pcie_core_phy" };
+
+PNAME(mux_aclk_emmc_p) = { "cpll_aclk_emmc_src",
+ "gpll_aclk_emmc_src" };
+
+PNAME(mux_aclk_perilp0_p) = { "cpll_aclk_perilp0_src",
+ "gpll_aclk_perilp0_src" };
+
+PNAME(mux_fclk_cm0s_p) = { "cpll_fclk_cm0s_src",
+ "gpll_fclk_cm0s_src" };
+
+PNAME(mux_hclk_perilp1_p) = { "cpll_hclk_perilp1_src",
+ "gpll_hclk_perilp1_src" };
+
+PNAME(mux_clk_testout1_p) = { "clk_testout1_pll_src", "xin24m" };
+PNAME(mux_clk_testout2_p) = { "clk_testout2_pll_src", "xin24m" };
+
+PNAME(mux_usbphy_480m_p) = { "clk_usbphy0_480m_src",
+ "clk_usbphy1_480m_src" };
+PNAME(mux_aclk_gmac_p) = { "cpll_aclk_gmac_src",
+ "gpll_aclk_gmac_src" };
+PNAME(mux_rmii_p) = { "clk_gmac", "clkin_gmac" };
+PNAME(mux_spdif_p) = { "clk_spdif_div", "clk_spdif_frac",
+ "clkin_i2s", "xin12m" };
+PNAME(mux_i2s0_p) = { "clk_i2s0_div", "clk_i2s0_frac",
+ "clkin_i2s", "xin12m" };
+PNAME(mux_i2s1_p) = { "clk_i2s1_div", "clk_i2s1_frac",
+ "clkin_i2s", "xin12m" };
+PNAME(mux_i2s2_p) = { "clk_i2s2_div", "clk_i2s2_frac",
+ "clkin_i2s", "xin12m" };
+PNAME(mux_i2sch_p) = { "clk_i2s0", "clk_i2s1",
+ "clk_i2s2" };
+PNAME(mux_i2sout_p) = { "clk_i2sout_src", "xin12m" };
+
+PNAME(mux_uart0_p) = { "clk_uart0_div", "clk_uart0_frac", "xin24m" };
+PNAME(mux_uart1_p) = { "clk_uart1_div", "clk_uart1_frac", "xin24m" };
+PNAME(mux_uart2_p) = { "clk_uart2_div", "clk_uart2_frac", "xin24m" };
+PNAME(mux_uart3_p) = { "clk_uart3_div", "clk_uart3_frac", "xin24m" };
+
+/* PMU CRU parents */
+PNAME(mux_ppll_24m_p) = { "ppll", "xin24m" };
+PNAME(mux_24m_ppll_p) = { "xin24m", "ppll" };
+PNAME(mux_fclk_cm0s_pmu_ppll_p) = { "fclk_cm0s_pmu_ppll_src", "xin24m" };
+PNAME(mux_wifi_pmu_p) = { "clk_wifi_div", "clk_wifi_frac" };
+PNAME(mux_uart4_pmu_p) = { "clk_uart4_div", "clk_uart4_frac",
+ "xin24m" };
+PNAME(mux_clk_testout2_2io_p) = { "clk_testout2", "clk_32k_suspend_pmu" };
+
+static struct rockchip_pll_clock rk3399_pll_clks[] __initdata = {
+ [lpll] = PLL(pll_rk3399, PLL_APLLL, "lpll", mux_pll_p, 0, RK3399_PLL_CON(0),
+ RK3399_PLL_CON(3), 8, 31, 0, rk3399_pll_rates),
+ [bpll] = PLL(pll_rk3399, PLL_APLLB, "bpll", mux_pll_p, 0, RK3399_PLL_CON(8),
+ RK3399_PLL_CON(11), 8, 31, 0, rk3399_pll_rates),
+ [dpll] = PLL(pll_rk3399, PLL_DPLL, "dpll", mux_pll_p, 0, RK3399_PLL_CON(16),
+ RK3399_PLL_CON(19), 8, 31, 0, NULL),
+ [cpll] = PLL(pll_rk3399, PLL_CPLL, "cpll", mux_pll_p, 0, RK3399_PLL_CON(24),
+ RK3399_PLL_CON(27), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates),
+ [gpll] = PLL(pll_rk3399, PLL_GPLL, "gpll", mux_pll_p, 0, RK3399_PLL_CON(32),
+ RK3399_PLL_CON(35), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates),
+ [npll] = PLL(pll_rk3399, PLL_NPLL, "npll", mux_pll_p, 0, RK3399_PLL_CON(40),
+ RK3399_PLL_CON(43), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates),
+ [vpll] = PLL(pll_rk3399, PLL_VPLL, "vpll", mux_pll_p, 0, RK3399_PLL_CON(48),
+ RK3399_PLL_CON(51), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates),
+};
+
+static struct rockchip_pll_clock rk3399_pmu_pll_clks[] __initdata = {
+ [ppll] = PLL(pll_rk3399, PLL_PPLL, "ppll", mux_pll_p, 0, RK3399_PMU_PLL_CON(0),
+ RK3399_PMU_PLL_CON(3), 8, 31, ROCKCHIP_PLL_SYNC_RATE, rk3399_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
+
+static struct rockchip_clk_branch rk3399_spdif_fracmux __initdata =
+ MUX(0, "clk_spdif_mux", mux_spdif_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(32), 13, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_i2s0_fracmux __initdata =
+ MUX(0, "clk_i2s0_mux", mux_i2s0_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(28), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_i2s1_fracmux __initdata =
+ MUX(0, "clk_i2s1_mux", mux_i2s1_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(29), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_i2s2_fracmux __initdata =
+ MUX(0, "clk_i2s2_mux", mux_i2s2_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(30), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_uart0_fracmux __initdata =
+ MUX(SCLK_UART0, "clk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(33), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_uart1_fracmux __initdata =
+ MUX(SCLK_UART1, "clk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(34), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_uart2_fracmux __initdata =
+ MUX(SCLK_UART2, "clk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(35), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_uart3_fracmux __initdata =
+ MUX(SCLK_UART3, "clk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(36), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_uart4_pmu_fracmux __initdata =
+ MUX(SCLK_UART4_PMU, "clk_uart4_pmu", mux_uart4_pmu_p, CLK_SET_RATE_PARENT,
+ RK3399_PMU_CLKSEL_CON(5), 8, 2, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_dclk_vop0_fracmux __initdata =
+ MUX(DCLK_VOP0, "dclk_vop0", mux_dclk_vop0_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(49), 11, 1, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_dclk_vop1_fracmux __initdata =
+ MUX(DCLK_VOP1, "dclk_vop1", mux_dclk_vop1_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(50), 11, 1, MFLAGS);
+
+static struct rockchip_clk_branch rk3399_pmuclk_wifi_fracmux __initdata =
+ MUX(SCLK_WIFI_PMU, "clk_wifi_pmu", mux_wifi_pmu_p, CLK_SET_RATE_PARENT,
+ RK3399_PMU_CLKSEL_CON(1), 14, 1, MFLAGS);
+
+static const struct rockchip_cpuclk_reg_data rk3399_cpuclkl_data = {
+ .core_reg = RK3399_CLKSEL_CON(0),
+ .div_core_shift = 0,
+ .div_core_mask = 0x1f,
+ .mux_core_alt = 3,
+ .mux_core_main = 0,
+ .mux_core_shift = 6,
+ .mux_core_mask = 0x3,
+};
+
+static const struct rockchip_cpuclk_reg_data rk3399_cpuclkb_data = {
+ .core_reg = RK3399_CLKSEL_CON(2),
+ .div_core_shift = 0,
+ .div_core_mask = 0x1f,
+ .mux_core_alt = 3,
+ .mux_core_main = 1,
+ .mux_core_shift = 6,
+ .mux_core_mask = 0x3,
+};
+
+#define RK3399_DIV_ACLKM_MASK 0x1f
+#define RK3399_DIV_ACLKM_SHIFT 8
+#define RK3399_DIV_ATCLK_MASK 0x1f
+#define RK3399_DIV_ATCLK_SHIFT 0
+#define RK3399_DIV_PCLK_DBG_MASK 0x1f
+#define RK3399_DIV_PCLK_DBG_SHIFT 8
+
+#define RK3399_CLKSEL0(_offs, _aclkm) \
+ { \
+ .reg = RK3399_CLKSEL_CON(0 + _offs), \
+ .val = HIWORD_UPDATE(_aclkm, RK3399_DIV_ACLKM_MASK, \
+ RK3399_DIV_ACLKM_SHIFT), \
+ }
+#define RK3399_CLKSEL1(_offs, _atclk, _pdbg) \
+ { \
+ .reg = RK3399_CLKSEL_CON(1 + _offs), \
+ .val = HIWORD_UPDATE(_atclk, RK3399_DIV_ATCLK_MASK, \
+ RK3399_DIV_ATCLK_SHIFT) | \
+ HIWORD_UPDATE(_pdbg, RK3399_DIV_PCLK_DBG_MASK, \
+ RK3399_DIV_PCLK_DBG_SHIFT), \
+ }
+
+/* cluster_l: aclkm in clksel0, rest in clksel1 */
+#define RK3399_CPUCLKL_RATE(_prate, _aclkm, _atclk, _pdbg) \
+ { \
+ .prate = _prate##U, \
+ .divs = { \
+ RK3399_CLKSEL0(0, _aclkm), \
+ RK3399_CLKSEL1(0, _atclk, _pdbg), \
+ }, \
+ }
+
+/* cluster_b: aclkm in clksel2, rest in clksel3 */
+#define RK3399_CPUCLKB_RATE(_prate, _aclkm, _atclk, _pdbg) \
+ { \
+ .prate = _prate##U, \
+ .divs = { \
+ RK3399_CLKSEL0(2, _aclkm), \
+ RK3399_CLKSEL1(2, _atclk, _pdbg), \
+ }, \
+ }
+
+static struct rockchip_cpuclk_rate_table rk3399_cpuclkl_rates[] __initdata = {
+ RK3399_CPUCLKL_RATE(1800000000, 1, 8, 8),
+ RK3399_CPUCLKL_RATE(1704000000, 1, 8, 8),
+ RK3399_CPUCLKL_RATE(1608000000, 1, 7, 7),
+ RK3399_CPUCLKL_RATE(1512000000, 1, 7, 7),
+ RK3399_CPUCLKL_RATE(1488000000, 1, 6, 6),
+ RK3399_CPUCLKL_RATE(1416000000, 1, 6, 6),
+ RK3399_CPUCLKL_RATE(1200000000, 1, 5, 5),
+ RK3399_CPUCLKL_RATE(1008000000, 1, 5, 5),
+ RK3399_CPUCLKL_RATE( 816000000, 1, 4, 4),
+ RK3399_CPUCLKL_RATE( 696000000, 1, 3, 3),
+ RK3399_CPUCLKL_RATE( 600000000, 1, 3, 3),
+ RK3399_CPUCLKL_RATE( 408000000, 1, 2, 2),
+ RK3399_CPUCLKL_RATE( 312000000, 1, 1, 1),
+ RK3399_CPUCLKL_RATE( 216000000, 1, 1, 1),
+ RK3399_CPUCLKL_RATE( 96000000, 1, 1, 1),
+};
+
+static struct rockchip_cpuclk_rate_table rk3399_cpuclkb_rates[] __initdata = {
+ RK3399_CPUCLKB_RATE(2208000000, 1, 11, 11),
+ RK3399_CPUCLKB_RATE(2184000000, 1, 11, 11),
+ RK3399_CPUCLKB_RATE(2088000000, 1, 10, 10),
+ RK3399_CPUCLKB_RATE(2040000000, 1, 10, 10),
+ RK3399_CPUCLKB_RATE(1992000000, 1, 9, 9),
+ RK3399_CPUCLKB_RATE(1896000000, 1, 9, 9),
+ RK3399_CPUCLKB_RATE(1800000000, 1, 8, 8),
+ RK3399_CPUCLKB_RATE(1704000000, 1, 8, 8),
+ RK3399_CPUCLKB_RATE(1608000000, 1, 7, 7),
+ RK3399_CPUCLKB_RATE(1512000000, 1, 7, 7),
+ RK3399_CPUCLKB_RATE(1488000000, 1, 6, 6),
+ RK3399_CPUCLKB_RATE(1416000000, 1, 6, 6),
+ RK3399_CPUCLKB_RATE(1200000000, 1, 5, 5),
+ RK3399_CPUCLKB_RATE(1008000000, 1, 5, 5),
+ RK3399_CPUCLKB_RATE( 816000000, 1, 4, 4),
+ RK3399_CPUCLKB_RATE( 696000000, 1, 3, 3),
+ RK3399_CPUCLKB_RATE( 600000000, 1, 3, 3),
+ RK3399_CPUCLKB_RATE( 408000000, 1, 2, 2),
+ RK3399_CPUCLKB_RATE( 312000000, 1, 1, 1),
+ RK3399_CPUCLKB_RATE( 216000000, 1, 1, 1),
+ RK3399_CPUCLKB_RATE( 96000000, 1, 1, 1),
+};
+
+static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
+ /*
+ * CRU Clock-Architecture
+ */
+
+ /* usbphy */
+ GATE(SCLK_USB2PHY0_REF, "clk_usb2phy0_ref", "xin24m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(6), 5, GFLAGS),
+ GATE(SCLK_USB2PHY1_REF, "clk_usb2phy1_ref", "xin24m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(6), 6, GFLAGS),
+
+ GATE(0, "clk_usbphy0_480m_src", "clk_usbphy0_480m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(0, "clk_usbphy1_480m_src", "clk_usbphy1_480m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(13), 12, GFLAGS),
+ MUX(0, "clk_usbphy_480m", mux_usbphy_480m_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(14), 6, 1, MFLAGS),
+
+ MUX(0, "upll", mux_pll_src_24m_usbphy480m_p, 0,
+ RK3399_CLKSEL_CON(14), 15, 1, MFLAGS),
+
+ COMPOSITE_NODIV(SCLK_HSICPHY, "clk_hsicphy", mux_pll_src_cpll_gpll_npll_usbphy480m_p, 0,
+ RK3399_CLKSEL_CON(19), 0, 2, MFLAGS,
+ RK3399_CLKGATE_CON(6), 4, GFLAGS),
+
+ COMPOSITE(ACLK_USB3, "aclk_usb3", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(39), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(12), 0, GFLAGS),
+ GATE(ACLK_USB3_NOC, "aclk_usb3_noc", "aclk_usb3", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(30), 0, GFLAGS),
+ GATE(ACLK_USB3OTG0, "aclk_usb3otg0", "aclk_usb3", 0,
+ RK3399_CLKGATE_CON(30), 1, GFLAGS),
+ GATE(ACLK_USB3OTG1, "aclk_usb3otg1", "aclk_usb3", 0,
+ RK3399_CLKGATE_CON(30), 2, GFLAGS),
+ GATE(ACLK_USB3_RKSOC_AXI_PERF, "aclk_usb3_rksoc_axi_perf", "aclk_usb3", 0,
+ RK3399_CLKGATE_CON(30), 3, GFLAGS),
+ GATE(ACLK_USB3_GRF, "aclk_usb3_grf", "aclk_usb3", 0,
+ RK3399_CLKGATE_CON(30), 4, GFLAGS),
+
+ GATE(SCLK_USB3OTG0_REF, "clk_usb3otg0_ref", "xin24m", 0,
+ RK3399_CLKGATE_CON(12), 1, GFLAGS),
+ GATE(SCLK_USB3OTG1_REF, "clk_usb3otg1_ref", "xin24m", 0,
+ RK3399_CLKGATE_CON(12), 2, GFLAGS),
+
+ COMPOSITE(SCLK_USB3OTG0_SUSPEND, "clk_usb3otg0_suspend", mux_pll_p, 0,
+ RK3399_CLKSEL_CON(40), 15, 1, MFLAGS, 0, 10, DFLAGS,
+ RK3399_CLKGATE_CON(12), 3, GFLAGS),
+
+ COMPOSITE(SCLK_USB3OTG1_SUSPEND, "clk_usb3otg1_suspend", mux_pll_p, 0,
+ RK3399_CLKSEL_CON(41), 15, 1, MFLAGS, 0, 10, DFLAGS,
+ RK3399_CLKGATE_CON(12), 4, GFLAGS),
+
+ COMPOSITE(SCLK_UPHY0_TCPDPHY_REF, "clk_uphy0_tcpdphy_ref", mux_pll_p, 0,
+ RK3399_CLKSEL_CON(64), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 4, GFLAGS),
+
+ COMPOSITE(SCLK_UPHY0_TCPDCORE, "clk_uphy0_tcpdcore", mux_pll_src_24m_32k_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(64), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 5, GFLAGS),
+
+ COMPOSITE(SCLK_UPHY1_TCPDPHY_REF, "clk_uphy1_tcpdphy_ref", mux_pll_p, 0,
+ RK3399_CLKSEL_CON(65), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 6, GFLAGS),
+
+ COMPOSITE(SCLK_UPHY1_TCPDCORE, "clk_uphy1_tcpdcore", mux_pll_src_24m_32k_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(65), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 7, GFLAGS),
+
+ /* little core */
+ GATE(0, "clk_core_l_lpll_src", "lpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(0), 0, GFLAGS),
+ GATE(0, "clk_core_l_bpll_src", "bpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(0, "clk_core_l_dpll_src", "dpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(0, "clk_core_l_gpll_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(0), 3, GFLAGS),
+
+ COMPOSITE_NOMUX(0, "aclkm_core_l", "armclkl", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(0), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3399_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE_NOMUX(0, "atclk_core_l", "armclkl", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(1), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3399_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg_core_l", "armclkl", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(1), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3399_CLKGATE_CON(0), 6, GFLAGS),
+
+ GATE(ACLK_CORE_ADB400_CORE_L_2_CCI500, "aclk_core_adb400_core_l_2_cci500", "aclkm_core_l", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 12, GFLAGS),
+ GATE(ACLK_PERF_CORE_L, "aclk_perf_core_l", "aclkm_core_l", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 13, GFLAGS),
+
+ GATE(0, "clk_dbg_pd_core_l", "armclkl", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(ACLK_GIC_ADB400_GIC_2_CORE_L, "aclk_core_adb400_gic_2_core_l", "armclkl", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 10, GFLAGS),
+ GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_core_adb400_core_l_2_gic", "armclkl", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 11, GFLAGS),
+ GATE(SCLK_PVTM_CORE_L, "clk_pvtm_core_l", "xin24m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(0), 7, GFLAGS),
+
+ /* big core */
+ GATE(0, "clk_core_b_lpll_src", "lpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(0, "clk_core_b_bpll_src", "bpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(0, "clk_core_b_dpll_src", "dpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(0, "clk_core_b_gpll_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(1), 3, GFLAGS),
+
+ COMPOSITE_NOMUX(0, "aclkm_core_b", "armclkb", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(2), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3399_CLKGATE_CON(1), 4, GFLAGS),
+ COMPOSITE_NOMUX(0, "atclk_core_b", "armclkb", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(3), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3399_CLKGATE_CON(1), 5, GFLAGS),
+ COMPOSITE_NOMUX(0, "pclk_dbg_core_b", "armclkb", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(3), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3399_CLKGATE_CON(1), 6, GFLAGS),
+
+ GATE(ACLK_CORE_ADB400_CORE_B_2_CCI500, "aclk_core_adb400_core_b_2_cci500", "aclkm_core_b", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 5, GFLAGS),
+ GATE(ACLK_PERF_CORE_B, "aclk_perf_core_b", "aclkm_core_b", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 6, GFLAGS),
+
+ GATE(0, "clk_dbg_pd_core_b", "armclkb", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(ACLK_GIC_ADB400_GIC_2_CORE_B, "aclk_core_adb400_gic_2_core_b", "armclkb", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_core_adb400_core_b_2_gic", "armclkb", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 4, GFLAGS),
+
+ DIV(0, "pclken_dbg_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(3), 13, 2, DFLAGS | CLK_DIVIDER_READ_ONLY),
+
+ GATE(0, "pclk_dbg_cxcs_pd_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(14), 2, GFLAGS),
+
+ GATE(SCLK_PVTM_CORE_B, "clk_pvtm_core_b", "xin24m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(1), 7, GFLAGS),
+
+ /* gmac */
+ GATE(0, "cpll_aclk_gmac_src", "cpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(0, "gpll_aclk_gmac_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(6), 8, GFLAGS),
+ COMPOSITE(0, "aclk_gmac_pre", mux_aclk_gmac_p, 0,
+ RK3399_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(6), 10, GFLAGS),
+
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_pre", 0,
+ RK3399_CLKGATE_CON(32), 0, GFLAGS),
+ GATE(ACLK_GMAC_NOC, "aclk_gmac_noc", "aclk_gmac_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(32), 1, GFLAGS),
+ GATE(ACLK_PERF_GMAC, "aclk_perf_gmac", "aclk_gmac_pre", 0,
+ RK3399_CLKGATE_CON(32), 4, GFLAGS),
+
+ COMPOSITE_NOMUX(0, "pclk_gmac_pre", "aclk_gmac_pre", 0,
+ RK3399_CLKSEL_CON(19), 8, 3, DFLAGS,
+ RK3399_CLKGATE_CON(6), 11, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_gmac_pre", 0,
+ RK3399_CLKGATE_CON(32), 2, GFLAGS),
+ GATE(PCLK_GMAC_NOC, "pclk_gmac_noc", "pclk_gmac_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(32), 3, GFLAGS),
+
+ COMPOSITE(SCLK_MAC, "clk_gmac", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(20), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(5), 5, GFLAGS),
+
+ MUX(SCLK_RMII_SRC, "clk_rmii_src", mux_rmii_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(19), 4, 1, MFLAGS),
+ GATE(SCLK_MACREF_OUT, "clk_mac_refout", "clk_rmii_src", 0,
+ RK3399_CLKGATE_CON(5), 6, GFLAGS),
+ GATE(SCLK_MACREF, "clk_mac_ref", "clk_rmii_src", 0,
+ RK3399_CLKGATE_CON(5), 7, GFLAGS),
+ GATE(SCLK_MAC_RX, "clk_rmii_rx", "clk_rmii_src", 0,
+ RK3399_CLKGATE_CON(5), 8, GFLAGS),
+ GATE(SCLK_MAC_TX, "clk_rmii_tx", "clk_rmii_src", 0,
+ RK3399_CLKGATE_CON(5), 9, GFLAGS),
+
+ /* spdif */
+ COMPOSITE(0, "clk_spdif_div", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(32), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(8), 13, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_spdif_frac", "clk_spdif_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(99), 0,
+ RK3399_CLKGATE_CON(8), 14, GFLAGS,
+ &rk3399_spdif_fracmux),
+ GATE(SCLK_SPDIF_8CH, "clk_spdif", "clk_spdif_mux", CLK_SET_RATE_PARENT,
+ RK3399_CLKGATE_CON(8), 15, GFLAGS),
+
+ COMPOSITE(SCLK_SPDIF_REC_DPTX, "clk_spdif_rec_dptx", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(32), 15, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(10), 6, GFLAGS),
+ /* i2s */
+ COMPOSITE(0, "clk_i2s0_div", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(28), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(8), 3, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s0_frac", "clk_i2s0_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(96), 0,
+ RK3399_CLKGATE_CON(8), 4, GFLAGS,
+ &rk3399_i2s0_fracmux),
+ GATE(SCLK_I2S0_8CH, "clk_i2s0", "clk_i2s0_mux", CLK_SET_RATE_PARENT,
+ RK3399_CLKGATE_CON(8), 5, GFLAGS),
+
+ COMPOSITE(0, "clk_i2s1_div", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(29), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(8), 6, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s1_frac", "clk_i2s1_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(97), 0,
+ RK3399_CLKGATE_CON(8), 7, GFLAGS,
+ &rk3399_i2s1_fracmux),
+ GATE(SCLK_I2S1_8CH, "clk_i2s1", "clk_i2s1_mux", CLK_SET_RATE_PARENT,
+ RK3399_CLKGATE_CON(8), 8, GFLAGS),
+
+ COMPOSITE(0, "clk_i2s2_div", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(30), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(8), 9, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_i2s2_frac", "clk_i2s2_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(98), 0,
+ RK3399_CLKGATE_CON(8), 10, GFLAGS,
+ &rk3399_i2s2_fracmux),
+ GATE(SCLK_I2S2_8CH, "clk_i2s2", "clk_i2s2_mux", CLK_SET_RATE_PARENT,
+ RK3399_CLKGATE_CON(8), 11, GFLAGS),
+
+ MUX(0, "clk_i2sout_src", mux_i2sch_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(31), 0, 2, MFLAGS),
+ COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "clk_i2sout", mux_i2sout_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(30), 8, 2, MFLAGS,
+ RK3399_CLKGATE_CON(8), 12, GFLAGS),
+
+ /* uart */
+ MUX(0, "clk_uart0_src", mux_pll_src_cpll_gpll_upll_p, 0,
+ RK3399_CLKSEL_CON(33), 12, 2, MFLAGS),
+ COMPOSITE_NOMUX(0, "clk_uart0_div", "clk_uart0_src", 0,
+ RK3399_CLKSEL_CON(33), 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(9), 0, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart0_frac", "clk_uart0_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(100), 0,
+ RK3399_CLKGATE_CON(9), 1, GFLAGS,
+ &rk3399_uart0_fracmux),
+
+ MUX(0, "clk_uart_src", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(33), 15, 1, MFLAGS),
+ COMPOSITE_NOMUX(0, "clk_uart1_div", "clk_uart_src", 0,
+ RK3399_CLKSEL_CON(34), 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(9), 2, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart1_frac", "clk_uart1_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(101), 0,
+ RK3399_CLKGATE_CON(9), 3, GFLAGS,
+ &rk3399_uart1_fracmux),
+
+ COMPOSITE_NOMUX(0, "clk_uart2_div", "clk_uart_src", 0,
+ RK3399_CLKSEL_CON(35), 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(9), 4, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart2_frac", "clk_uart2_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(102), 0,
+ RK3399_CLKGATE_CON(9), 5, GFLAGS,
+ &rk3399_uart2_fracmux),
+
+ COMPOSITE_NOMUX(0, "clk_uart3_div", "clk_uart_src", 0,
+ RK3399_CLKSEL_CON(36), 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(9), 6, GFLAGS),
+ COMPOSITE_FRACMUX(0, "clk_uart3_frac", "clk_uart3_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(103), 0,
+ RK3399_CLKGATE_CON(9), 7, GFLAGS,
+ &rk3399_uart3_fracmux),
+
+ COMPOSITE(0, "pclk_ddr", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(6), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(3), 4, GFLAGS),
+
+ GATE(PCLK_CENTER_MAIN_NOC, "pclk_center_main_noc", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(PCLK_DDR_MON, "pclk_ddr_mon", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(18), 12, GFLAGS),
+ GATE(PCLK_CIC, "pclk_cic", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(18), 15, GFLAGS),
+ GATE(PCLK_DDR_SGRF, "pclk_ddr_sgrf", "pclk_ddr", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(19), 2, GFLAGS),
+
+ GATE(SCLK_PVTM_DDR, "clk_pvtm_ddr", "xin24m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(4), 11, GFLAGS),
+ GATE(SCLK_DFIMON0_TIMER, "clk_dfimon0_timer", "xin24m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(3), 5, GFLAGS),
+ GATE(SCLK_DFIMON1_TIMER, "clk_dfimon1_timer", "xin24m", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(3), 6, GFLAGS),
+
+ /* cci */
+ GATE(0, "cpll_aclk_cci_src", "cpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(0, "gpll_aclk_cci_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 1, GFLAGS),
+ GATE(0, "npll_aclk_cci_src", "npll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 2, GFLAGS),
+ GATE(0, "vpll_aclk_cci_src", "vpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 3, GFLAGS),
+
+ COMPOSITE(0, "aclk_cci_pre", mux_aclk_cci_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(5), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(2), 4, GFLAGS),
+
+ GATE(ACLK_ADB400M_PD_CORE_L, "aclk_adb400m_pd_core_l", "aclk_cci_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(15), 0, GFLAGS),
+ GATE(ACLK_ADB400M_PD_CORE_B, "aclk_adb400m_pd_core_b", "aclk_cci_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(15), 1, GFLAGS),
+ GATE(ACLK_CCI, "aclk_cci", "aclk_cci_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(15), 2, GFLAGS),
+ GATE(ACLK_CCI_NOC0, "aclk_cci_noc0", "aclk_cci_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(15), 3, GFLAGS),
+ GATE(ACLK_CCI_NOC1, "aclk_cci_noc1", "aclk_cci_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(15), 4, GFLAGS),
+ GATE(ACLK_CCI_GRF, "aclk_cci_grf", "aclk_cci_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(15), 7, GFLAGS),
+
+ GATE(0, "cpll_cci_trace", "cpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 5, GFLAGS),
+ GATE(0, "gpll_cci_trace", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE(SCLK_CCI_TRACE, "clk_cci_trace", mux_cci_trace_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(5), 15, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(2), 7, GFLAGS),
+
+ GATE(0, "cpll_cs", "cpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 8, GFLAGS),
+ GATE(0, "gpll_cs", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 9, GFLAGS),
+ GATE(0, "npll_cs", "npll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(2), 10, GFLAGS),
+ COMPOSITE_NOGATE(0, "clk_cs", mux_cs_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS),
+ GATE(0, "clk_dbg_cxcs", "clk_cs", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(15), 5, GFLAGS),
+ GATE(0, "clk_dbg_noc", "clk_cs", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(15), 6, GFLAGS),
+
+ /* vcodec */
+ COMPOSITE(0, "aclk_vcodec_pre", mux_pll_src_cpll_gpll_npll_ppll_p, 0,
+ RK3399_CLKSEL_CON(7), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 0, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vcodec_pre", "aclk_vcodec_pre", 0,
+ RK3399_CLKSEL_CON(7), 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 1, GFLAGS),
+ GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
+ RK3399_CLKGATE_CON(17), 2, GFLAGS),
+ GATE(0, "hclk_vcodec_noc", "hclk_vcodec_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(17), 3, GFLAGS),
+
+ GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
+ RK3399_CLKGATE_CON(17), 0, GFLAGS),
+ GATE(0, "aclk_vcodec_noc", "aclk_vcodec_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(17), 1, GFLAGS),
+
+ /* vdu */
+ COMPOSITE(SCLK_VDU_CORE, "clk_vdu_core", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(9), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 4, GFLAGS),
+ COMPOSITE(SCLK_VDU_CA, "clk_vdu_ca", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(9), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 5, GFLAGS),
+
+ COMPOSITE(0, "aclk_vdu_pre", mux_pll_src_cpll_gpll_npll_ppll_p, 0,
+ RK3399_CLKSEL_CON(8), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vdu_pre", "aclk_vdu_pre", 0,
+ RK3399_CLKSEL_CON(8), 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 3, GFLAGS),
+ GATE(HCLK_VDU, "hclk_vdu", "hclk_vdu_pre", 0,
+ RK3399_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(HCLK_VDU_NOC, "hclk_vdu_noc", "hclk_vdu_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(17), 11, GFLAGS),
+
+ GATE(ACLK_VDU, "aclk_vdu", "aclk_vdu_pre", 0,
+ RK3399_CLKGATE_CON(17), 8, GFLAGS),
+ GATE(ACLK_VDU_NOC, "aclk_vdu_noc", "aclk_vdu_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(17), 9, GFLAGS),
+
+ /* iep */
+ COMPOSITE(0, "aclk_iep_pre", mux_pll_src_cpll_gpll_npll_ppll_p, 0,
+ RK3399_CLKSEL_CON(10), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 6, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_iep_pre", "aclk_iep_pre", 0,
+ RK3399_CLKSEL_CON(10), 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(HCLK_IEP, "hclk_iep", "hclk_iep_pre", 0,
+ RK3399_CLKGATE_CON(16), 2, GFLAGS),
+ GATE(HCLK_IEP_NOC, "hclk_iep_noc", "hclk_iep_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(16), 3, GFLAGS),
+
+ GATE(ACLK_IEP, "aclk_iep", "aclk_iep_pre", 0,
+ RK3399_CLKGATE_CON(16), 0, GFLAGS),
+ GATE(ACLK_IEP_NOC, "aclk_iep_noc", "aclk_iep_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(16), 1, GFLAGS),
+
+ /* rga */
+ COMPOSITE(SCLK_RGA_CORE, "clk_rga_core", mux_pll_src_cpll_gpll_npll_ppll_p, 0,
+ RK3399_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 10, GFLAGS),
+
+ COMPOSITE(0, "aclk_rga_pre", mux_pll_src_cpll_gpll_npll_ppll_p, 0,
+ RK3399_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 8, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_rga_pre", "aclk_rga_pre", 0,
+ RK3399_CLKSEL_CON(11), 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(4), 9, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_rga_pre", 0,
+ RK3399_CLKGATE_CON(16), 10, GFLAGS),
+ GATE(HCLK_RGA_NOC, "hclk_rga_noc", "hclk_rga_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(16), 11, GFLAGS),
+
+ GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0,
+ RK3399_CLKGATE_CON(16), 8, GFLAGS),
+ GATE(ACLK_RGA_NOC, "aclk_rga_noc", "aclk_rga_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(16), 9, GFLAGS),
+
+ /* center */
+ COMPOSITE(0, "aclk_center", mux_pll_src_cpll_gpll_npll_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(3), 7, GFLAGS),
+ GATE(ACLK_CENTER_MAIN_NOC, "aclk_center_main_noc", "aclk_center", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(19), 0, GFLAGS),
+ GATE(ACLK_CENTER_PERI_NOC, "aclk_center_peri_noc", "aclk_center", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(19), 1, GFLAGS),
+
+ /* gpu */
+ COMPOSITE(0, "aclk_gpu_pre", mux_pll_src_ppll_cpll_gpll_npll_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(13), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 0, GFLAGS),
+ GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_pre", 0,
+ RK3399_CLKGATE_CON(30), 8, GFLAGS),
+ GATE(ACLK_PERF_GPU, "aclk_perf_gpu", "aclk_gpu_pre", 0,
+ RK3399_CLKGATE_CON(30), 10, GFLAGS),
+ GATE(ACLK_GPU_GRF, "aclk_gpu_grf", "aclk_gpu_pre", 0,
+ RK3399_CLKGATE_CON(30), 11, GFLAGS),
+ GATE(SCLK_PVTM_GPU, "aclk_pvtm_gpu", "xin24m", 0,
+ RK3399_CLKGATE_CON(13), 1, GFLAGS),
+
+ /* perihp */
+ GATE(0, "cpll_aclk_perihp_src", "cpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(5), 0, GFLAGS),
+ GATE(0, "gpll_aclk_perihp_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE(ACLK_PERIHP, "aclk_perihp", mux_aclk_perihp_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(14), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(5), 2, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_PERIHP, "hclk_perihp", "aclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(14), 8, 2, DFLAGS,
+ RK3399_CLKGATE_CON(5), 3, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_PERIHP, "pclk_perihp", "aclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(14), 12, 2, DFLAGS,
+ RK3399_CLKGATE_CON(5), 4, GFLAGS),
+
+ GATE(ACLK_PERF_PCIE, "aclk_perf_pcie", "aclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(20), 2, GFLAGS),
+ GATE(ACLK_PCIE, "aclk_pcie", "aclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(20), 10, GFLAGS),
+ GATE(0, "aclk_perihp_noc", "aclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(20), 12, GFLAGS),
+
+ GATE(HCLK_HOST0, "hclk_host0", "hclk_perihp", 0,
+ RK3399_CLKGATE_CON(20), 5, GFLAGS),
+ GATE(HCLK_HOST0_ARB, "hclk_host0_arb", "hclk_perihp", 0,
+ RK3399_CLKGATE_CON(20), 6, GFLAGS),
+ GATE(HCLK_HOST1, "hclk_host1", "hclk_perihp", 0,
+ RK3399_CLKGATE_CON(20), 7, GFLAGS),
+ GATE(HCLK_HOST1_ARB, "hclk_host1_arb", "hclk_perihp", 0,
+ RK3399_CLKGATE_CON(20), 8, GFLAGS),
+ GATE(HCLK_HSIC, "hclk_hsic", "hclk_perihp", 0,
+ RK3399_CLKGATE_CON(20), 9, GFLAGS),
+ GATE(0, "hclk_perihp_noc", "hclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(20), 13, GFLAGS),
+ GATE(0, "hclk_ahb1tom", "hclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(20), 15, GFLAGS),
+
+ GATE(PCLK_PERIHP_GRF, "pclk_perihp_grf", "pclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(20), 4, GFLAGS),
+ GATE(PCLK_PCIE, "pclk_pcie", "pclk_perihp", 0,
+ RK3399_CLKGATE_CON(20), 11, GFLAGS),
+ GATE(0, "pclk_perihp_noc", "pclk_perihp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(20), 14, GFLAGS),
+ GATE(PCLK_HSICPHY, "pclk_hsicphy", "pclk_perihp", 0,
+ RK3399_CLKGATE_CON(31), 8, GFLAGS),
+
+ /* sdio & sdmmc */
+ COMPOSITE(0, "hclk_sd", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(13), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(12), 13, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_sd", 0,
+ RK3399_CLKGATE_CON(33), 8, GFLAGS),
+ GATE(0, "hclk_sdmmc_noc", "hclk_sd", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(33), 9, GFLAGS),
+
+ COMPOSITE(SCLK_SDIO, "clk_sdio", mux_pll_src_cpll_gpll_npll_ppll_upll_24m_p, 0,
+ RK3399_CLKSEL_CON(15), 8, 3, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(6), 0, GFLAGS),
+
+ COMPOSITE(SCLK_SDMMC, "clk_sdmmc", mux_pll_src_cpll_gpll_npll_ppll_upll_24m_p, 0,
+ RK3399_CLKSEL_CON(16), 8, 3, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(6), 1, GFLAGS),
+
+ MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RK3399_SDMMC_CON0, 1),
+ MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", RK3399_SDMMC_CON1, 1),
+
+ MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RK3399_SDIO_CON0, 1),
+ MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", RK3399_SDIO_CON1, 1),
+
+ /* pcie */
+ COMPOSITE(SCLK_PCIE_PM, "clk_pcie_pm", mux_pll_src_cpll_gpll_npll_24m_p, 0,
+ RK3399_CLKSEL_CON(17), 8, 3, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(6), 2, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_PCIEPHY_REF100M, "clk_pciephy_ref100m", "npll", 0,
+ RK3399_CLKSEL_CON(18), 11, 5, DFLAGS,
+ RK3399_CLKGATE_CON(12), 6, GFLAGS),
+ MUX(SCLK_PCIEPHY_REF, "clk_pciephy_ref", mux_pll_src_24m_pciephy_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(18), 10, 1, MFLAGS),
+
+ COMPOSITE(0, "clk_pcie_core_cru", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(18), 8, 2, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(6), 3, GFLAGS),
+ MUX(SCLK_PCIE_CORE, "clk_pcie_core", mux_pciecore_cru_phy_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(18), 7, 1, MFLAGS),
+
+ /* emmc */
+ COMPOSITE(SCLK_EMMC, "clk_emmc", mux_pll_src_cpll_gpll_npll_upll_24m_p, 0,
+ RK3399_CLKSEL_CON(22), 8, 3, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(6), 14, GFLAGS),
+
+ GATE(0, "cpll_aclk_emmc_src", "cpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(0, "gpll_aclk_emmc_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(6), 13, GFLAGS),
+ COMPOSITE_NOGATE(ACLK_EMMC, "aclk_emmc", mux_aclk_emmc_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(21), 7, 1, MFLAGS, 0, 5, DFLAGS),
+ GATE(ACLK_EMMC_CORE, "aclk_emmccore", "aclk_emmc", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(32), 8, GFLAGS),
+ GATE(ACLK_EMMC_NOC, "aclk_emmc_noc", "aclk_emmc", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(32), 9, GFLAGS),
+ GATE(ACLK_EMMC_GRF, "aclk_emmcgrf", "aclk_emmc", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(32), 10, GFLAGS),
+
+ /* perilp0 */
+ GATE(0, "cpll_aclk_perilp0_src", "cpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(0, "gpll_aclk_perilp0_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(7), 0, GFLAGS),
+ COMPOSITE(ACLK_PERILP0, "aclk_perilp0", mux_aclk_perilp0_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(23), 7, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(7), 2, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_PERILP0, "hclk_perilp0", "aclk_perilp0", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(23), 8, 2, DFLAGS,
+ RK3399_CLKGATE_CON(7), 3, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_PERILP0, "pclk_perilp0", "aclk_perilp0", 0,
+ RK3399_CLKSEL_CON(23), 12, 3, DFLAGS,
+ RK3399_CLKGATE_CON(7), 4, GFLAGS),
+
+ /* aclk_perilp0 gates */
+ GATE(ACLK_INTMEM, "aclk_intmem", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 0, GFLAGS),
+ GATE(ACLK_TZMA, "aclk_tzma", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 1, GFLAGS),
+ GATE(SCLK_INTMEM0, "clk_intmem0", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 2, GFLAGS),
+ GATE(SCLK_INTMEM1, "clk_intmem1", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 3, GFLAGS),
+ GATE(SCLK_INTMEM2, "clk_intmem2", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 4, GFLAGS),
+ GATE(SCLK_INTMEM3, "clk_intmem3", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 5, GFLAGS),
+ GATE(SCLK_INTMEM4, "clk_intmem4", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 6, GFLAGS),
+ GATE(SCLK_INTMEM5, "clk_intmem5", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 7, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 8, GFLAGS),
+ GATE(ACLK_DMAC0_PERILP, "aclk_dmac0_perilp", "aclk_perilp0", 0, RK3399_CLKGATE_CON(25), 5, GFLAGS),
+ GATE(ACLK_DMAC1_PERILP, "aclk_dmac1_perilp", "aclk_perilp0", 0, RK3399_CLKGATE_CON(25), 6, GFLAGS),
+ GATE(ACLK_PERILP0_NOC, "aclk_perilp0_noc", "aclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 7, GFLAGS),
+
+ /* hclk_perilp0 gates */
+ GATE(HCLK_ROM, "hclk_rom", "hclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(24), 4, GFLAGS),
+ GATE(HCLK_M_CRYPTO0, "hclk_m_crypto0", "hclk_perilp0", 0, RK3399_CLKGATE_CON(24), 5, GFLAGS),
+ GATE(HCLK_S_CRYPTO0, "hclk_s_crypto0", "hclk_perilp0", 0, RK3399_CLKGATE_CON(24), 6, GFLAGS),
+ GATE(HCLK_M_CRYPTO1, "hclk_m_crypto1", "hclk_perilp0", 0, RK3399_CLKGATE_CON(24), 14, GFLAGS),
+ GATE(HCLK_S_CRYPTO1, "hclk_s_crypto1", "hclk_perilp0", 0, RK3399_CLKGATE_CON(24), 15, GFLAGS),
+ GATE(HCLK_PERILP0_NOC, "hclk_perilp0_noc", "hclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 8, GFLAGS),
+
+ /* pclk_perilp0 gates */
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_perilp0", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(23), 9, GFLAGS),
+
+ /* crypto */
+ COMPOSITE(SCLK_CRYPTO0, "clk_crypto0", mux_pll_src_cpll_gpll_ppll_p, 0,
+ RK3399_CLKSEL_CON(24), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(7), 7, GFLAGS),
+
+ COMPOSITE(SCLK_CRYPTO1, "clk_crypto1", mux_pll_src_cpll_gpll_ppll_p, 0,
+ RK3399_CLKSEL_CON(26), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(7), 8, GFLAGS),
+
+ /* cm0s_perilp */
+ GATE(0, "cpll_fclk_cm0s_src", "cpll", 0,
+ RK3399_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(0, "gpll_fclk_cm0s_src", "gpll", 0,
+ RK3399_CLKGATE_CON(7), 5, GFLAGS),
+ COMPOSITE(FCLK_CM0S, "fclk_cm0s", mux_fclk_cm0s_p, 0,
+ RK3399_CLKSEL_CON(24), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(7), 9, GFLAGS),
+
+ /* fclk_cm0s gates */
+ GATE(SCLK_M0_PERILP, "sclk_m0_perilp", "fclk_cm0s", 0, RK3399_CLKGATE_CON(24), 8, GFLAGS),
+ GATE(HCLK_M0_PERILP, "hclk_m0_perilp", "fclk_cm0s", 0, RK3399_CLKGATE_CON(24), 9, GFLAGS),
+ GATE(DCLK_M0_PERILP, "dclk_m0_perilp", "fclk_cm0s", 0, RK3399_CLKGATE_CON(24), 10, GFLAGS),
+ GATE(SCLK_M0_PERILP_DEC, "clk_m0_perilp_dec", "fclk_cm0s", 0, RK3399_CLKGATE_CON(24), 11, GFLAGS),
+ GATE(HCLK_M0_PERILP_NOC, "hclk_m0_perilp_noc", "fclk_cm0s", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 11, GFLAGS),
+
+ /* perilp1 */
+ GATE(0, "cpll_hclk_perilp1_src", "cpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(8), 1, GFLAGS),
+ GATE(0, "gpll_hclk_perilp1_src", "gpll", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE_NOGATE(HCLK_PERILP1, "hclk_perilp1", mux_hclk_perilp1_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(25), 7, 1, MFLAGS, 0, 5, DFLAGS),
+ COMPOSITE_NOMUX(PCLK_PERILP1, "pclk_perilp1", "hclk_perilp1", CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(25), 8, 3, DFLAGS,
+ RK3399_CLKGATE_CON(8), 2, GFLAGS),
+
+ /* hclk_perilp1 gates */
+ GATE(0, "hclk_perilp1_noc", "hclk_perilp1", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 9, GFLAGS),
+ GATE(0, "hclk_sdio_noc", "hclk_perilp1", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(25), 12, GFLAGS),
+ GATE(HCLK_I2S0_8CH, "hclk_i2s0", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 0, GFLAGS),
+ GATE(HCLK_I2S1_8CH, "hclk_i2s1", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 1, GFLAGS),
+ GATE(HCLK_I2S2_8CH, "hclk_i2s2", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 2, GFLAGS),
+ GATE(HCLK_SPDIF, "hclk_spdif", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 3, GFLAGS),
+ GATE(HCLK_SDIO, "hclk_sdio", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 4, GFLAGS),
+ GATE(PCLK_SPI5, "pclk_spi5", "hclk_perilp1", 0, RK3399_CLKGATE_CON(34), 5, GFLAGS),
+ GATE(0, "hclk_sdioaudio_noc", "hclk_perilp1", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(34), 6, GFLAGS),
+
+ /* pclk_perilp1 gates */
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 0, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 1, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 2, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 3, GFLAGS),
+ GATE(PCLK_I2C7, "pclk_rki2c7", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 5, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_rki2c1", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 6, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_rki2c5", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 7, GFLAGS),
+ GATE(PCLK_I2C6, "pclk_rki2c6", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 8, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_rki2c2", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 9, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_rki2c3", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 10, GFLAGS),
+ GATE(PCLK_MAILBOX0, "pclk_mailbox0", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 11, GFLAGS),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 12, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 13, GFLAGS),
+ GATE(PCLK_EFUSE1024NS, "pclk_efuse1024ns", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 14, GFLAGS),
+ GATE(PCLK_EFUSE1024S, "pclk_efuse1024s", "pclk_perilp1", 0, RK3399_CLKGATE_CON(22), 15, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_perilp1", 0, RK3399_CLKGATE_CON(23), 10, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_perilp1", 0, RK3399_CLKGATE_CON(23), 11, GFLAGS),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_perilp1", 0, RK3399_CLKGATE_CON(23), 12, GFLAGS),
+ GATE(PCLK_SPI4, "pclk_spi4", "pclk_perilp1", 0, RK3399_CLKGATE_CON(23), 13, GFLAGS),
+ GATE(PCLK_PERIHP_GRF, "pclk_perilp_sgrf", "pclk_perilp1", 0, RK3399_CLKGATE_CON(24), 13, GFLAGS),
+ GATE(0, "pclk_perilp1_noc", "pclk_perilp1", 0, RK3399_CLKGATE_CON(25), 10, GFLAGS),
+
+ /* saradc */
+ COMPOSITE_NOMUX(SCLK_SARADC, "clk_saradc", "xin24m", 0,
+ RK3399_CLKSEL_CON(26), 8, 8, DFLAGS,
+ RK3399_CLKGATE_CON(9), 11, GFLAGS),
+
+ /* tsadc */
+ COMPOSITE(SCLK_TSADC, "clk_tsadc", mux_pll_p, 0,
+ RK3399_CLKSEL_CON(27), 15, 1, MFLAGS, 0, 10, DFLAGS,
+ RK3399_CLKGATE_CON(9), 10, GFLAGS),
+
+ /* cif_testout */
+ MUX(0, "clk_testout1_pll_src", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(38), 6, 2, MFLAGS),
+ COMPOSITE(0, "clk_testout1", mux_clk_testout1_p, 0,
+ RK3399_CLKSEL_CON(38), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 14, GFLAGS),
+
+ MUX(0, "clk_testout2_pll_src", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(38), 14, 2, MFLAGS),
+ COMPOSITE(0, "clk_testout2", mux_clk_testout2_p, 0,
+ RK3399_CLKSEL_CON(38), 13, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(13), 15, GFLAGS),
+
+ /* vio */
+ COMPOSITE(ACLK_VIO, "aclk_vio", mux_pll_src_cpll_gpll_ppll_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 10, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_VIO, "pclk_vio", "aclk_vio", 0,
+ RK3399_CLKSEL_CON(43), 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 1, GFLAGS),
+
+ GATE(ACLK_VIO_NOC, "aclk_vio_noc", "aclk_vio", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(29), 0, GFLAGS),
+
+ GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "pclk_vio", 0,
+ RK3399_CLKGATE_CON(29), 1, GFLAGS),
+ GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "pclk_vio", 0,
+ RK3399_CLKGATE_CON(29), 2, GFLAGS),
+ GATE(PCLK_VIO_GRF, "pclk_vio_grf", "pclk_vio", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(29), 12, GFLAGS),
+
+ /* hdcp */
+ COMPOSITE(ACLK_HDCP, "aclk_hdcp", mux_pll_src_cpll_gpll_ppll_p, 0,
+ RK3399_CLKSEL_CON(42), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 12, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_HDCP, "hclk_hdcp", "aclk_hdcp", 0,
+ RK3399_CLKSEL_CON(43), 5, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 3, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_HDCP, "pclk_hdcp", "aclk_hdcp", 0,
+ RK3399_CLKSEL_CON(43), 10, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 10, GFLAGS),
+
+ GATE(ACLK_HDCP_NOC, "aclk_hdcp_noc", "aclk_hdcp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(29), 4, GFLAGS),
+ GATE(ACLK_HDCP22, "aclk_hdcp22", "aclk_hdcp", 0,
+ RK3399_CLKGATE_CON(29), 10, GFLAGS),
+
+ GATE(HCLK_HDCP_NOC, "hclk_hdcp_noc", "hclk_hdcp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(29), 5, GFLAGS),
+ GATE(HCLK_HDCP22, "hclk_hdcp22", "hclk_hdcp", 0,
+ RK3399_CLKGATE_CON(29), 9, GFLAGS),
+
+ GATE(PCLK_HDCP_NOC, "pclk_hdcp_noc", "pclk_hdcp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(29), 3, GFLAGS),
+ GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "pclk_hdcp", 0,
+ RK3399_CLKGATE_CON(29), 6, GFLAGS),
+ GATE(PCLK_DP_CTRL, "pclk_dp_ctrl", "pclk_hdcp", 0,
+ RK3399_CLKGATE_CON(29), 7, GFLAGS),
+ GATE(PCLK_HDCP22, "pclk_hdcp22", "pclk_hdcp", 0,
+ RK3399_CLKGATE_CON(29), 8, GFLAGS),
+ GATE(PCLK_GASKET, "pclk_gasket", "pclk_hdcp", 0,
+ RK3399_CLKGATE_CON(29), 11, GFLAGS),
+
+ /* edp */
+ COMPOSITE(SCLK_DP_CORE, "clk_dp_core", mux_pll_src_npll_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(46), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 8, GFLAGS),
+
+ COMPOSITE(PCLK_EDP, "pclk_edp", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(44), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 11, GFLAGS),
+ GATE(PCLK_EDP_NOC, "pclk_edp_noc", "pclk_edp", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(32), 12, GFLAGS),
+ GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "pclk_edp", 0,
+ RK3399_CLKGATE_CON(32), 13, GFLAGS),
+
+ /* hdmi */
+ GATE(SCLK_HDMI_SFR, "clk_hdmi_sfr", "xin24m", 0,
+ RK3399_CLKGATE_CON(11), 6, GFLAGS),
+
+ COMPOSITE(SCLK_HDMI_CEC, "clk_hdmi_cec", mux_pll_p, 0,
+ RK3399_CLKSEL_CON(45), 15, 1, MFLAGS, 0, 10, DFLAGS,
+ RK3399_CLKGATE_CON(11), 7, GFLAGS),
+
+ /* vop0 */
+ COMPOSITE(ACLK_VOP0_PRE, "aclk_vop0_pre", mux_pll_src_vpll_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(47), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(10), 8, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vop0_pre", "aclk_vop0_pre", 0,
+ RK3399_CLKSEL_CON(47), 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(10), 9, GFLAGS),
+
+ GATE(ACLK_VOP0, "aclk_vop0", "aclk_vop0_pre", 0,
+ RK3399_CLKGATE_CON(28), 3, GFLAGS),
+ GATE(ACLK_VOP0_NOC, "aclk_vop0_noc", "aclk_vop0_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(28), 1, GFLAGS),
+
+ GATE(HCLK_VOP0, "hclk_vop0", "hclk_vop0_pre", 0,
+ RK3399_CLKGATE_CON(28), 2, GFLAGS),
+ GATE(HCLK_VOP0_NOC, "hclk_vop0_noc", "hclk_vop0_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(28), 0, GFLAGS),
+
+ COMPOSITE(DCLK_VOP0_DIV, "dclk_vop0_div", mux_pll_src_vpll_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(49), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3399_CLKGATE_CON(10), 12, GFLAGS),
+
+ COMPOSITE_FRACMUX_NOGATE(0, "dclk_vop0_frac", "dclk_vop0_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(106), 0,
+ &rk3399_dclk_vop0_fracmux),
+
+ COMPOSITE(SCLK_VOP0_PWM, "clk_vop0_pwm", mux_pll_src_vpll_cpll_gpll_24m_p, 0,
+ RK3399_CLKSEL_CON(51), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(10), 14, GFLAGS),
+
+ /* vop1 */
+ COMPOSITE(ACLK_VOP1_PRE, "aclk_vop1_pre", mux_pll_src_vpll_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(48), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(10), 10, GFLAGS),
+ COMPOSITE_NOMUX(0, "hclk_vop1_pre", "aclk_vop1_pre", 0,
+ RK3399_CLKSEL_CON(48), 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(10), 11, GFLAGS),
+
+ GATE(ACLK_VOP1, "aclk_vop1", "aclk_vop1_pre", 0,
+ RK3399_CLKGATE_CON(28), 7, GFLAGS),
+ GATE(ACLK_VOP1_NOC, "aclk_vop1_noc", "aclk_vop1_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(28), 5, GFLAGS),
+
+ GATE(HCLK_VOP1, "hclk_vop1", "hclk_vop1_pre", 0,
+ RK3399_CLKGATE_CON(28), 6, GFLAGS),
+ GATE(HCLK_VOP1_NOC, "hclk_vop1_noc", "hclk_vop1_pre", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(28), 4, GFLAGS),
+
+ COMPOSITE(DCLK_VOP1_DIV, "dclk_vop1_div", mux_pll_src_vpll_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RK3399_CLKGATE_CON(10), 13, GFLAGS),
+
+ COMPOSITE_FRACMUX_NOGATE(0, "dclk_vop1_frac", "dclk_vop1_div", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(107), 0,
+ &rk3399_dclk_vop1_fracmux),
+
+ COMPOSITE(SCLK_VOP1_PWM, "clk_vop1_pwm", mux_pll_src_vpll_cpll_gpll_24m_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(52), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(10), 15, GFLAGS),
+
+ /* isp */
+ COMPOSITE(ACLK_ISP0, "aclk_isp0", mux_pll_src_cpll_gpll_ppll_p, 0,
+ RK3399_CLKSEL_CON(53), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(12), 8, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_ISP0, "hclk_isp0", "aclk_isp0", 0,
+ RK3399_CLKSEL_CON(53), 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(12), 9, GFLAGS),
+
+ GATE(ACLK_ISP0_NOC, "aclk_isp0_noc", "aclk_isp0", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(27), 1, GFLAGS),
+ GATE(ACLK_ISP0_WRAPPER, "aclk_isp0_wrapper", "aclk_isp0", 0,
+ RK3399_CLKGATE_CON(27), 5, GFLAGS),
+ GATE(HCLK_ISP1_WRAPPER, "hclk_isp1_wrapper", "aclk_isp0", 0,
+ RK3399_CLKGATE_CON(27), 7, GFLAGS),
+
+ GATE(HCLK_ISP0_NOC, "hclk_isp0_noc", "hclk_isp0", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(27), 0, GFLAGS),
+ GATE(HCLK_ISP0_WRAPPER, "hclk_isp0_wrapper", "hclk_isp0", 0,
+ RK3399_CLKGATE_CON(27), 4, GFLAGS),
+
+ COMPOSITE(SCLK_ISP0, "clk_isp0", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(55), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 4, GFLAGS),
+
+ COMPOSITE(ACLK_ISP1, "aclk_isp1", mux_pll_src_cpll_gpll_ppll_p, 0,
+ RK3399_CLKSEL_CON(54), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3399_CLKGATE_CON(12), 10, GFLAGS),
+ COMPOSITE_NOMUX(HCLK_ISP1, "hclk_isp1", "aclk_isp1", 0,
+ RK3399_CLKSEL_CON(54), 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(12), 11, GFLAGS),
+
+ GATE(ACLK_ISP1_NOC, "aclk_isp1_noc", "aclk_isp1", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(27), 3, GFLAGS),
+
+ GATE(HCLK_ISP1_NOC, "hclk_isp1_noc", "hclk_isp1", CLK_IGNORE_UNUSED,
+ RK3399_CLKGATE_CON(27), 2, GFLAGS),
+ GATE(ACLK_ISP1_WRAPPER, "aclk_isp1_wrapper", "hclk_isp1", 0,
+ RK3399_CLKGATE_CON(27), 8, GFLAGS),
+
+ COMPOSITE(SCLK_ISP1, "clk_isp1", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(55), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(11), 5, GFLAGS),
+
+ /*
+ * We use pclkin_cifinv by default GRF_SOC_CON20[9] (GSC20_9) setting in system,
+ * so we ignore the mux and make clocks nodes as following,
+ *
+ * pclkin_cifinv --|-------\
+ * |GSC20_9|-- pclkin_cifmux -- |G27_6| -- pclkin_isp1_wrapper
+ * pclkin_cif --|-------/
+ */
+ GATE(PCLK_ISP1_WRAPPER, "pclkin_isp1_wrapper", "pclkin_cif", 0,
+ RK3399_CLKGATE_CON(27), 6, GFLAGS),
+
+ /* cif */
+ COMPOSITE_NODIV(0, "clk_cifout_src", mux_pll_src_cpll_gpll_npll_p, 0,
+ RK3399_CLKSEL_CON(56), 6, 2, MFLAGS,
+ RK3399_CLKGATE_CON(10), 7, GFLAGS),
+
+ COMPOSITE_NOGATE(SCLK_CIF_OUT, "clk_cifout", mux_clk_cif_p, 0,
+ RK3399_CLKSEL_CON(56), 5, 1, MFLAGS, 0, 5, DFLAGS),
+
+ /* gic */
+ COMPOSITE(ACLK_GIC_PRE, "aclk_gic_pre", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
+ RK3399_CLKSEL_CON(56), 15, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_CLKGATE_CON(12), 12, GFLAGS),
+
+ GATE(ACLK_GIC, "aclk_gic", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 0, GFLAGS),
+ GATE(ACLK_GIC_NOC, "aclk_gic_noc", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 1, GFLAGS),
+ GATE(ACLK_GIC_ADB400_CORE_L_2_GIC, "aclk_gic_adb400_core_l_2_gic", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 2, GFLAGS),
+ GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_gic_adb400_core_b_2_gic", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 3, GFLAGS),
+ GATE(ACLK_GIC_ADB400_GIC_2_CORE_L, "aclk_gic_adb400_gic_2_core_l", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 4, GFLAGS),
+ GATE(ACLK_GIC_ADB400_GIC_2_CORE_B, "aclk_gic_adb400_gic_2_core_b", "aclk_gic_pre", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(33), 5, GFLAGS),
+
+ /* alive */
+ /* pclk_alive_gpll_src is controlled by PMUGRF_SOC_CON0[6] */
+ DIV(PCLK_ALIVE, "pclk_alive", "gpll", 0,
+ RK3399_CLKSEL_CON(57), 0, 5, DFLAGS),
+
+ GATE(PCLK_USBPHY_MUX_G, "pclk_usbphy_mux_g", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 4, GFLAGS),
+ GATE(PCLK_UPHY0_TCPHY_G, "pclk_uphy0_tcphy_g", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 5, GFLAGS),
+ GATE(PCLK_UPHY0_TCPD_G, "pclk_uphy0_tcpd_g", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 6, GFLAGS),
+ GATE(PCLK_UPHY1_TCPHY_G, "pclk_uphy1_tcphy_g", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 8, GFLAGS),
+ GATE(PCLK_UPHY1_TCPD_G, "pclk_uphy1_tcpd_g", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 9, GFLAGS),
+
+ GATE(PCLK_GRF, "pclk_grf", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(31), 1, GFLAGS),
+ GATE(PCLK_INTR_ARB, "pclk_intr_arb", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(31), 2, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_alive", 0, RK3399_CLKGATE_CON(31), 3, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_alive", 0, RK3399_CLKGATE_CON(31), 4, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_alive", 0, RK3399_CLKGATE_CON(31), 5, GFLAGS),
+ GATE(PCLK_TIMER0, "pclk_timer0", "pclk_alive", 0, RK3399_CLKGATE_CON(31), 6, GFLAGS),
+ GATE(PCLK_TIMER1, "pclk_timer1", "pclk_alive", 0, RK3399_CLKGATE_CON(31), 7, GFLAGS),
+ GATE(PCLK_PMU_INTR_ARB, "pclk_pmu_intr_arb", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(31), 9, GFLAGS),
+ GATE(PCLK_SGRF, "pclk_sgrf", "pclk_alive", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(31), 10, GFLAGS),
+
+ GATE(SCLK_MIPIDPHY_REF, "clk_mipidphy_ref", "xin24m", 0, RK3399_CLKGATE_CON(11), 14, GFLAGS),
+ GATE(SCLK_DPHY_PLL, "clk_dphy_pll", "clk_mipidphy_ref", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 0, GFLAGS),
+
+ GATE(SCLK_MIPIDPHY_CFG, "clk_mipidphy_cfg", "xin24m", 0, RK3399_CLKGATE_CON(11), 15, GFLAGS),
+ GATE(SCLK_DPHY_TX0_CFG, "clk_dphy_tx0_cfg", "clk_mipidphy_cfg", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 1, GFLAGS),
+ GATE(SCLK_DPHY_TX1RX1_CFG, "clk_dphy_tx1rx1_cfg", "clk_mipidphy_cfg", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 2, GFLAGS),
+ GATE(SCLK_DPHY_RX0_CFG, "clk_dphy_rx0_cfg", "clk_mipidphy_cfg", CLK_IGNORE_UNUSED, RK3399_CLKGATE_CON(21), 3, GFLAGS),
+
+ /* testout */
+ MUX(0, "clk_test_pre", mux_pll_src_cpll_gpll_p, CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(58), 7, 1, MFLAGS),
+ COMPOSITE_FRAC(0, "clk_test_frac", "clk_test_pre", CLK_SET_RATE_PARENT,
+ RK3399_CLKSEL_CON(105), 0,
+ RK3399_CLKGATE_CON(13), 9, GFLAGS),
+
+ DIV(0, "clk_test_24m", "xin24m", 0,
+ RK3399_CLKSEL_CON(57), 6, 10, DFLAGS),
+
+ /* spi */
+ COMPOSITE(SCLK_SPI0, "clk_spi0", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(59), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(9), 12, GFLAGS),
+
+ COMPOSITE(SCLK_SPI1, "clk_spi1", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(59), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3399_CLKGATE_CON(9), 13, GFLAGS),
+
+ COMPOSITE(SCLK_SPI2, "clk_spi2", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(60), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(9), 14, GFLAGS),
+
+ COMPOSITE(SCLK_SPI4, "clk_spi4", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(60), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3399_CLKGATE_CON(9), 15, GFLAGS),
+
+ COMPOSITE(SCLK_SPI5, "clk_spi5", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(58), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3399_CLKGATE_CON(13), 13, GFLAGS),
+
+ /* i2c */
+ COMPOSITE(SCLK_I2C1, "clk_i2c1", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(61), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(10), 0, GFLAGS),
+
+ COMPOSITE(SCLK_I2C2, "clk_i2c2", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(62), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(10), 2, GFLAGS),
+
+ COMPOSITE(SCLK_I2C3, "clk_i2c3", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(63), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_CLKGATE_CON(10), 4, GFLAGS),
+
+ COMPOSITE(SCLK_I2C5, "clk_i2c5", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(61), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3399_CLKGATE_CON(10), 1, GFLAGS),
+
+ COMPOSITE(SCLK_I2C6, "clk_i2c6", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(62), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3399_CLKGATE_CON(10), 3, GFLAGS),
+
+ COMPOSITE(SCLK_I2C7, "clk_i2c7", mux_pll_src_cpll_gpll_p, 0,
+ RK3399_CLKSEL_CON(63), 15, 1, MFLAGS, 8, 7, DFLAGS,
+ RK3399_CLKGATE_CON(10), 5, GFLAGS),
+
+ /* timer */
+ GATE(SCLK_TIMER00, "clk_timer00", "xin24m", 0, RK3399_CLKGATE_CON(26), 0, GFLAGS),
+ GATE(SCLK_TIMER01, "clk_timer01", "xin24m", 0, RK3399_CLKGATE_CON(26), 1, GFLAGS),
+ GATE(SCLK_TIMER02, "clk_timer02", "xin24m", 0, RK3399_CLKGATE_CON(26), 2, GFLAGS),
+ GATE(SCLK_TIMER03, "clk_timer03", "xin24m", 0, RK3399_CLKGATE_CON(26), 3, GFLAGS),
+ GATE(SCLK_TIMER04, "clk_timer04", "xin24m", 0, RK3399_CLKGATE_CON(26), 4, GFLAGS),
+ GATE(SCLK_TIMER05, "clk_timer05", "xin24m", 0, RK3399_CLKGATE_CON(26), 5, GFLAGS),
+ GATE(SCLK_TIMER06, "clk_timer06", "xin24m", 0, RK3399_CLKGATE_CON(26), 6, GFLAGS),
+ GATE(SCLK_TIMER07, "clk_timer07", "xin24m", 0, RK3399_CLKGATE_CON(26), 7, GFLAGS),
+ GATE(SCLK_TIMER08, "clk_timer08", "xin24m", 0, RK3399_CLKGATE_CON(26), 8, GFLAGS),
+ GATE(SCLK_TIMER09, "clk_timer09", "xin24m", 0, RK3399_CLKGATE_CON(26), 9, GFLAGS),
+ GATE(SCLK_TIMER10, "clk_timer10", "xin24m", 0, RK3399_CLKGATE_CON(26), 10, GFLAGS),
+ GATE(SCLK_TIMER11, "clk_timer11", "xin24m", 0, RK3399_CLKGATE_CON(26), 11, GFLAGS),
+
+ /* clk_test */
+ /* clk_test_pre is controlled by CRU_MISC_CON[3] */
+ COMPOSITE_NOMUX(0, "clk_test", "clk_test_pre", CLK_IGNORE_UNUSED,
+ RK3368_CLKSEL_CON(58), 0, 5, DFLAGS,
+ RK3368_CLKGATE_CON(13), 11, GFLAGS),
+};
+
+static struct rockchip_clk_branch rk3399_clk_pmu_branches[] __initdata = {
+ /*
+ * PMU CRU Clock-Architecture
+ */
+
+ GATE(0, "fclk_cm0s_pmu_ppll_src", "ppll", 0,
+ RK3399_PMU_CLKGATE_CON(0), 1, GFLAGS),
+
+ COMPOSITE_NOGATE(FCLK_CM0S_SRC_PMU, "fclk_cm0s_src_pmu", mux_fclk_cm0s_pmu_ppll_p, 0,
+ RK3399_PMU_CLKSEL_CON(0), 15, 1, MFLAGS, 8, 5, DFLAGS),
+
+ COMPOSITE(SCLK_SPI3_PMU, "clk_spi3_pmu", mux_24m_ppll_p, 0,
+ RK3399_PMU_CLKSEL_CON(1), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_PMU_CLKGATE_CON(0), 2, GFLAGS),
+
+ COMPOSITE(0, "clk_wifi_div", mux_ppll_24m_p, CLK_IGNORE_UNUSED,
+ RK3399_PMU_CLKSEL_CON(1), 13, 1, MFLAGS, 8, 5, DFLAGS,
+ RK3399_PMU_CLKGATE_CON(0), 8, GFLAGS),
+
+ COMPOSITE_FRACMUX_NOGATE(0, "clk_wifi_frac", "clk_wifi_div", CLK_SET_RATE_PARENT,
+ RK3399_PMU_CLKSEL_CON(7), 0,
+ &rk3399_pmuclk_wifi_fracmux),
+
+ MUX(0, "clk_timer_src_pmu", mux_pll_p, CLK_IGNORE_UNUSED,
+ RK3399_PMU_CLKSEL_CON(1), 15, 1, MFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_I2C0_PMU, "clk_i2c0_pmu", "ppll", 0,
+ RK3399_PMU_CLKSEL_CON(2), 0, 7, DFLAGS,
+ RK3399_PMU_CLKGATE_CON(0), 9, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_I2C4_PMU, "clk_i2c4_pmu", "ppll", 0,
+ RK3399_PMU_CLKSEL_CON(3), 0, 7, DFLAGS,
+ RK3399_PMU_CLKGATE_CON(0), 10, GFLAGS),
+
+ COMPOSITE_NOMUX(SCLK_I2C8_PMU, "clk_i2c8_pmu", "ppll", 0,
+ RK3399_PMU_CLKSEL_CON(2), 8, 7, DFLAGS,
+ RK3399_PMU_CLKGATE_CON(0), 11, GFLAGS),
+
+ DIV(0, "clk_32k_suspend_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RK3399_PMU_CLKSEL_CON(4), 0, 10, DFLAGS),
+ MUX(0, "clk_testout_2io", mux_clk_testout2_2io_p, CLK_IGNORE_UNUSED,
+ RK3399_PMU_CLKSEL_CON(4), 15, 1, MFLAGS),
+
+ COMPOSITE(0, "clk_uart4_div", mux_24m_ppll_p, 0,
+ RK3399_PMU_CLKSEL_CON(5), 10, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3399_PMU_CLKGATE_CON(0), 5, GFLAGS),
+
+ COMPOSITE_FRACMUX(0, "clk_uart4_frac", "clk_uart4_div", CLK_SET_RATE_PARENT,
+ RK3399_PMU_CLKSEL_CON(6), 0,
+ RK3399_PMU_CLKGATE_CON(0), 6, GFLAGS,
+ &rk3399_uart4_pmu_fracmux),
+
+ DIV(PCLK_SRC_PMU, "pclk_pmu_src", "ppll", CLK_IGNORE_UNUSED,
+ RK3399_PMU_CLKSEL_CON(0), 0, 5, DFLAGS),
+
+ /* pmu clock gates */
+ GATE(SCLK_TIMER12_PMU, "clk_timer0_pmu", "clk_timer_src_pmu", 0, RK3399_PMU_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(SCLK_TIMER13_PMU, "clk_timer1_pmu", "clk_timer_src_pmu", 0, RK3399_PMU_CLKGATE_CON(0), 4, GFLAGS),
+
+ GATE(SCLK_PVTM_PMU, "clk_pvtm_pmu", "xin24m", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(0), 7, GFLAGS),
+
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(PCLK_PMUGRF_PMU, "pclk_pmugrf_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(PCLK_INTMEM1_PMU, "pclk_intmem1_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_GPIO0_PMU, "pclk_gpio0_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_GPIO1_PMU, "pclk_gpio1_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 4, GFLAGS),
+ GATE(PCLK_SGRF_PMU, "pclk_sgrf_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 5, GFLAGS),
+ GATE(PCLK_NOC_PMU, "pclk_noc_pmu", "pclk_pmu_src", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(1), 6, GFLAGS),
+ GATE(PCLK_I2C0_PMU, "pclk_i2c0_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 7, GFLAGS),
+ GATE(PCLK_I2C4_PMU, "pclk_i2c4_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 8, GFLAGS),
+ GATE(PCLK_I2C8_PMU, "pclk_i2c8_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 9, GFLAGS),
+ GATE(PCLK_RKPWM_PMU, "pclk_rkpwm_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_SPI3_PMU, "pclk_spi3_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 11, GFLAGS),
+ GATE(PCLK_TIMER_PMU, "pclk_timer_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_MAILBOX_PMU, "pclk_mailbox_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 13, GFLAGS),
+ GATE(PCLK_UART4_PMU, "pclk_uart4_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 14, GFLAGS),
+ GATE(PCLK_WDT_M0_PMU, "pclk_wdt_m0_pmu", "pclk_pmu_src", 0, RK3399_PMU_CLKGATE_CON(1), 15, GFLAGS),
+
+ GATE(FCLK_CM0S_PMU, "fclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 0, GFLAGS),
+ GATE(SCLK_CM0S_PMU, "sclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 1, GFLAGS),
+ GATE(HCLK_CM0S_PMU, "hclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 2, GFLAGS),
+ GATE(DCLK_CM0S_PMU, "dclk_cm0s_pmu", "fclk_cm0s_src_pmu", 0, RK3399_PMU_CLKGATE_CON(2), 3, GFLAGS),
+ GATE(HCLK_NOC_PMU, "hclk_noc_pmu", "fclk_cm0s_src_pmu", CLK_IGNORE_UNUSED, RK3399_PMU_CLKGATE_CON(2), 5, GFLAGS),
+};
+
+static const char *const rk3399_cru_critical_clocks[] __initconst = {
+ "aclk_cci_pre",
+ "aclk_gic",
+ "aclk_gic_noc",
+ "pclk_perilp0",
+ "pclk_perilp0",
+ "hclk_perilp0",
+ "hclk_perilp0_noc",
+ "pclk_perilp1",
+ "pclk_perilp1_noc",
+ "pclk_perihp",
+ "pclk_perihp_noc",
+ "hclk_perihp",
+ "aclk_perihp",
+ "aclk_perihp_noc",
+ "aclk_perilp0",
+ "aclk_perilp0_noc",
+ "hclk_perilp1",
+ "hclk_perilp1_noc",
+ "aclk_dmac0_perilp",
+ "gpll_hclk_perilp1_src",
+ "gpll_aclk_perilp0_src",
+ "gpll_aclk_perihp_src",
+};
+
+static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
+ "ppll",
+ "pclk_pmu_src",
+ "fclk_cm0s_src_pmu",
+ "clk_timer_src_pmu",
+};
+
+static void __init rk3399_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk3399_pll_clks,
+ ARRAY_SIZE(rk3399_pll_clks), -1);
+
+ rockchip_clk_register_branches(ctx, rk3399_clk_branches,
+ ARRAY_SIZE(rk3399_clk_branches));
+
+ rockchip_clk_protect_critical(rk3399_cru_critical_clocks,
+ ARRAY_SIZE(rk3399_cru_critical_clocks));
+
+ rockchip_clk_register_armclk(ctx, ARMCLKL, "armclkl",
+ mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p),
+ &rk3399_cpuclkl_data, rk3399_cpuclkl_rates,
+ ARRAY_SIZE(rk3399_cpuclkl_rates));
+
+ rockchip_clk_register_armclk(ctx, ARMCLKB, "armclkb",
+ mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
+ &rk3399_cpuclkb_data, rk3399_cpuclkb_rates,
+ ARRAY_SIZE(rk3399_cpuclkb_rates));
+
+ rockchip_register_softrst(np, 21, reg_base + RK3399_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_register_restart_notifier(ctx, RK3399_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(rk3399_cru, "rockchip,rk3399-cru", rk3399_clk_init);
+
+static void __init rk3399_pmu_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru pmu region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, CLKPMU_NR_CLKS);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip pmu clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk3399_pmu_pll_clks,
+ ARRAY_SIZE(rk3399_pmu_pll_clks), -1);
+
+ rockchip_clk_register_branches(ctx, rk3399_clk_pmu_branches,
+ ARRAY_SIZE(rk3399_clk_pmu_branches));
+
+ rockchip_clk_protect_critical(rk3399_pmucru_critical_clocks,
+ ARRAY_SIZE(rk3399_pmucru_critical_clocks));
+
+ rockchip_register_softrst(np, 2, reg_base + RK3399_PMU_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+ rockchip_clk_of_add_provider(np, ctx);
+}
+CLK_OF_DECLARE(rk3399_cru_pmu, "rockchip,rk3399-pmucru", rk3399_pmu_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index ec06350c7..7ffd13499 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -2,6 +2,9 @@
* Copyright (c) 2014 MundoReader S.L.
* Author: Heiko Stuebner <heiko@sntech.de>
*
+ * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
+ * Author: Xing Zheng <zhengxing@rock-chips.com>
+ *
* based on
*
* samsung/clk.c
@@ -39,7 +42,8 @@
* sometimes without one of those components.
*/
static struct clk *rockchip_clk_register_branch(const char *name,
- const char *const *parent_names, u8 num_parents, void __iomem *base,
+ const char *const *parent_names, u8 num_parents,
+ void __iomem *base,
int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
u8 div_shift, u8 div_width, u8 div_flags,
struct clk_div_table *div_table, int gate_offset,
@@ -136,9 +140,11 @@ static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
__func__, event, ndata->old_rate, ndata->new_rate);
if (event == PRE_RATE_CHANGE) {
- frac->rate_change_idx = frac->mux_ops->get_parent(&frac_mux->hw);
+ frac->rate_change_idx =
+ frac->mux_ops->get_parent(&frac_mux->hw);
if (frac->rate_change_idx != frac->mux_frac_idx) {
- frac->mux_ops->set_parent(&frac_mux->hw, frac->mux_frac_idx);
+ frac->mux_ops->set_parent(&frac_mux->hw,
+ frac->mux_frac_idx);
frac->rate_change_remuxed = 1;
}
} else if (event == POST_RATE_CHANGE) {
@@ -149,7 +155,8 @@ static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
* reaches the mux itself.
*/
if (frac->rate_change_remuxed) {
- frac->mux_ops->set_parent(&frac_mux->hw, frac->rate_change_idx);
+ frac->mux_ops->set_parent(&frac_mux->hw,
+ frac->rate_change_idx);
frac->rate_change_remuxed = 0;
}
}
@@ -157,7 +164,8 @@ static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
return notifier_from_errno(ret);
}
-static struct clk *rockchip_clk_register_frac_branch(const char *name,
+static struct clk *rockchip_clk_register_frac_branch(
+ struct rockchip_clk_provider *ctx, const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *base, int muxdiv_offset, u8 div_flags,
int gate_offset, u8 gate_shift, u8 gate_flags,
@@ -250,7 +258,7 @@ static struct clk *rockchip_clk_register_frac_branch(const char *name,
if (IS_ERR(mux_clk))
return clk;
- rockchip_clk_add_lookup(mux_clk, child->id);
+ rockchip_clk_add_lookup(ctx, mux_clk, child->id);
/* notifier on the fraction divider to catch rate changes */
if (frac->mux_frac_idx >= 0) {
@@ -314,66 +322,82 @@ static struct clk *rockchip_clk_register_factor_branch(const char *name,
return clk;
}
-static DEFINE_SPINLOCK(clk_lock);
-static struct clk **clk_table;
-static void __iomem *reg_base;
-static struct clk_onecell_data clk_data;
-static struct device_node *cru_node;
-static struct regmap *grf;
-
-void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
- unsigned long nr_clks)
+struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
+ void __iomem *base, unsigned long nr_clks)
{
- reg_base = base;
- cru_node = np;
- grf = ERR_PTR(-EPROBE_DEFER);
+ struct rockchip_clk_provider *ctx;
+ struct clk **clk_table;
+ int i;
+
+ ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
if (!clk_table)
- pr_err("%s: could not allocate clock lookup table\n", __func__);
+ goto err_free;
- clk_data.clks = clk_table;
- clk_data.clk_num = nr_clks;
- of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ for (i = 0; i < nr_clks; ++i)
+ clk_table[i] = ERR_PTR(-ENOENT);
+
+ ctx->reg_base = base;
+ ctx->clk_data.clks = clk_table;
+ ctx->clk_data.clk_num = nr_clks;
+ ctx->cru_node = np;
+ ctx->grf = ERR_PTR(-EPROBE_DEFER);
+ spin_lock_init(&ctx->lock);
+
+ ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
+ "rockchip,grf");
+
+ return ctx;
+
+err_free:
+ kfree(ctx);
+ return ERR_PTR(-ENOMEM);
}
-struct regmap *rockchip_clk_get_grf(void)
+void __init rockchip_clk_of_add_provider(struct device_node *np,
+ struct rockchip_clk_provider *ctx)
{
- if (IS_ERR(grf))
- grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
- return grf;
+ if (of_clk_add_provider(np, of_clk_src_onecell_get,
+ &ctx->clk_data))
+ pr_err("%s: could not register clk provider\n", __func__);
}
-void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
+void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
+ struct clk *clk, unsigned int id)
{
- if (clk_table && id)
- clk_table[id] = clk;
+ if (ctx->clk_data.clks && id)
+ ctx->clk_data.clks[id] = clk;
}
-void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
+void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
+ struct rockchip_pll_clock *list,
unsigned int nr_pll, int grf_lock_offset)
{
struct clk *clk;
int idx;
for (idx = 0; idx < nr_pll; idx++, list++) {
- clk = rockchip_clk_register_pll(list->type, list->name,
+ clk = rockchip_clk_register_pll(ctx, list->type, list->name,
list->parent_names, list->num_parents,
- reg_base, list->con_offset, grf_lock_offset,
+ list->con_offset, grf_lock_offset,
list->lock_shift, list->mode_offset,
list->mode_shift, list->rate_table,
- list->pll_flags, &clk_lock);
+ list->pll_flags);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s\n", __func__,
list->name);
continue;
}
- rockchip_clk_add_lookup(clk, list->id);
+ rockchip_clk_add_lookup(ctx, clk, list->id);
}
}
void __init rockchip_clk_register_branches(
+ struct rockchip_clk_provider *ctx,
struct rockchip_clk_branch *list,
unsigned int nr_clk)
{
@@ -389,56 +413,59 @@ void __init rockchip_clk_register_branches(
case branch_mux:
clk = clk_register_mux(NULL, list->name,
list->parent_names, list->num_parents,
- flags, reg_base + list->muxdiv_offset,
+ flags, ctx->reg_base + list->muxdiv_offset,
list->mux_shift, list->mux_width,
- list->mux_flags, &clk_lock);
+ list->mux_flags, &ctx->lock);
break;
case branch_divider:
if (list->div_table)
clk = clk_register_divider_table(NULL,
list->name, list->parent_names[0],
- flags, reg_base + list->muxdiv_offset,
+ flags,
+ ctx->reg_base + list->muxdiv_offset,
list->div_shift, list->div_width,
list->div_flags, list->div_table,
- &clk_lock);
+ &ctx->lock);
else
clk = clk_register_divider(NULL, list->name,
list->parent_names[0], flags,
- reg_base + list->muxdiv_offset,
+ ctx->reg_base + list->muxdiv_offset,
list->div_shift, list->div_width,
- list->div_flags, &clk_lock);
+ list->div_flags, &ctx->lock);
break;
case branch_fraction_divider:
- clk = rockchip_clk_register_frac_branch(list->name,
+ clk = rockchip_clk_register_frac_branch(ctx, list->name,
list->parent_names, list->num_parents,
- reg_base, list->muxdiv_offset, list->div_flags,
+ ctx->reg_base, list->muxdiv_offset,
+ list->div_flags,
list->gate_offset, list->gate_shift,
list->gate_flags, flags, list->child,
- &clk_lock);
+ &ctx->lock);
break;
case branch_gate:
flags |= CLK_SET_RATE_PARENT;
clk = clk_register_gate(NULL, list->name,
list->parent_names[0], flags,
- reg_base + list->gate_offset,
- list->gate_shift, list->gate_flags, &clk_lock);
+ ctx->reg_base + list->gate_offset,
+ list->gate_shift, list->gate_flags, &ctx->lock);
break;
case branch_composite:
clk = rockchip_clk_register_branch(list->name,
list->parent_names, list->num_parents,
- reg_base, list->muxdiv_offset, list->mux_shift,
+ ctx->reg_base, list->muxdiv_offset,
+ list->mux_shift,
list->mux_width, list->mux_flags,
list->div_shift, list->div_width,
list->div_flags, list->div_table,
list->gate_offset, list->gate_shift,
- list->gate_flags, flags, &clk_lock);
+ list->gate_flags, flags, &ctx->lock);
break;
case branch_mmc:
clk = rockchip_clk_register_mmc(
list->name,
list->parent_names, list->num_parents,
- reg_base + list->muxdiv_offset,
+ ctx->reg_base + list->muxdiv_offset,
list->div_shift
);
break;
@@ -446,16 +473,16 @@ void __init rockchip_clk_register_branches(
clk = rockchip_clk_register_inverter(
list->name, list->parent_names,
list->num_parents,
- reg_base + list->muxdiv_offset,
- list->div_shift, list->div_flags, &clk_lock);
+ ctx->reg_base + list->muxdiv_offset,
+ list->div_shift, list->div_flags, &ctx->lock);
break;
case branch_factor:
clk = rockchip_clk_register_factor_branch(
list->name, list->parent_names,
- list->num_parents, reg_base,
+ list->num_parents, ctx->reg_base,
list->div_shift, list->div_width,
list->gate_offset, list->gate_shift,
- list->gate_flags, flags, &clk_lock);
+ list->gate_flags, flags, &ctx->lock);
break;
}
@@ -472,11 +499,12 @@ void __init rockchip_clk_register_branches(
continue;
}
- rockchip_clk_add_lookup(clk, list->id);
+ rockchip_clk_add_lookup(ctx, clk, list->id);
}
}
-void __init rockchip_clk_register_armclk(unsigned int lookup_id,
+void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
+ unsigned int lookup_id,
const char *name, const char *const *parent_names,
u8 num_parents,
const struct rockchip_cpuclk_reg_data *reg_data,
@@ -486,15 +514,15 @@ void __init rockchip_clk_register_armclk(unsigned int lookup_id,
struct clk *clk;
clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
- reg_data, rates, nrates, reg_base,
- &clk_lock);
+ reg_data, rates, nrates,
+ ctx->reg_base, &ctx->lock);
if (IS_ERR(clk)) {
pr_err("%s: failed to register clock %s: %ld\n",
__func__, name, PTR_ERR(clk));
return;
}
- rockchip_clk_add_lookup(clk, lookup_id);
+ rockchip_clk_add_lookup(ctx, clk, lookup_id);
}
void __init rockchip_clk_protect_critical(const char *const clocks[],
@@ -511,6 +539,7 @@ void __init rockchip_clk_protect_critical(const char *const clocks[],
}
}
+static void __iomem *rst_base;
static unsigned int reg_restart;
static void (*cb_restart)(void);
static int rockchip_restart_notify(struct notifier_block *this,
@@ -519,7 +548,7 @@ static int rockchip_restart_notify(struct notifier_block *this,
if (cb_restart)
cb_restart();
- writel(0xfdb9, reg_base + reg_restart);
+ writel(0xfdb9, rst_base + reg_restart);
return NOTIFY_DONE;
}
@@ -528,10 +557,14 @@ static struct notifier_block rockchip_restart_handler = {
.priority = 128,
};
-void __init rockchip_register_restart_notifier(unsigned int reg, void (*cb)(void))
+void __init
+rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
+ unsigned int reg,
+ void (*cb)(void))
{
int ret;
+ rst_base = ctx->reg_base;
reg_restart = reg;
cb_restart = cb;
ret = register_restart_handler(&rockchip_restart_handler);
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 39c198bbc..2194ffa8c 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -27,13 +27,13 @@
#define CLK_ROCKCHIP_CLK_H
#include <linux/io.h>
+#include <linux/clk-provider.h>
struct clk;
#define HIWORD_UPDATE(val, mask, shift) \
((val) << (shift) | (mask) << ((shift) + 16))
-/* register positions shared by RK2928, RK3036, RK3066, RK3188 and RK3228 */
#define RK2928_PLL_CON(x) ((x) * 0x4)
#define RK2928_MODE_CON 0x40
#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44)
@@ -92,9 +92,30 @@ struct clk;
#define RK3368_EMMC_CON0 0x418
#define RK3368_EMMC_CON1 0x41c
+#define RK3399_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3399_CLKSEL_CON(x) ((x) * 0x4 + 0x100)
+#define RK3399_CLKGATE_CON(x) ((x) * 0x4 + 0x300)
+#define RK3399_SOFTRST_CON(x) ((x) * 0x4 + 0x400)
+#define RK3399_GLB_SRST_FST 0x500
+#define RK3399_GLB_SRST_SND 0x504
+#define RK3399_GLB_CNT_TH 0x508
+#define RK3399_MISC_CON 0x50c
+#define RK3399_RST_CON 0x510
+#define RK3399_RST_ST 0x514
+#define RK3399_SDMMC_CON0 0x580
+#define RK3399_SDMMC_CON1 0x584
+#define RK3399_SDIO_CON0 0x588
+#define RK3399_SDIO_CON1 0x58c
+
+#define RK3399_PMU_PLL_CON(x) RK2928_PLL_CON(x)
+#define RK3399_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x80)
+#define RK3399_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x100)
+#define RK3399_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x110)
+
enum rockchip_pll_type {
pll_rk3036,
pll_rk3066,
+ pll_rk3399,
};
#define RK3036_PLL_RATE(_rate, _refdiv, _fbdiv, _postdiv1, \
@@ -127,13 +148,29 @@ enum rockchip_pll_type {
.nb = _nb, \
}
+/**
+ * struct rockchip_clk_provider - information about clock provider
+ * @reg_base: virtual address for the register base.
+ * @clk_data: holds clock related data like clk* and number of clocks.
+ * @cru_node: device-node of the clock-provider
+ * @grf: regmap of the general-register-files syscon
+ * @lock: maintains exclusion between callbacks for a given clock-provider.
+ */
+struct rockchip_clk_provider {
+ void __iomem *reg_base;
+ struct clk_onecell_data clk_data;
+ struct device_node *cru_node;
+ struct regmap *grf;
+ spinlock_t lock;
+};
+
struct rockchip_pll_rate_table {
unsigned long rate;
unsigned int nr;
unsigned int nf;
unsigned int no;
unsigned int nb;
- /* for RK3036 */
+ /* for RK3036/RK3399 */
unsigned int fbdiv;
unsigned int postdiv1;
unsigned int refdiv;
@@ -143,10 +180,11 @@ struct rockchip_pll_rate_table {
};
/**
- * struct rockchip_pll_clock: information about pll clock
+ * struct rockchip_pll_clock - information about pll clock
* @id: platform specific id of the clock.
* @name: name of this pll clock.
- * @parent_name: name of the parent clock.
+ * @parent_names: name of the parent clock.
+ * @num_parents: number of parents
* @flags: optional flags for basic clock.
* @con_offset: offset of the register for configuring the PLL.
* @mode_offset: offset of the register for configuring the PLL-mode.
@@ -194,12 +232,13 @@ struct rockchip_pll_clock {
.rate_table = _rtable, \
}
-struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
+struct clk *rockchip_clk_register_pll(struct rockchip_clk_provider *ctx,
+ enum rockchip_pll_type pll_type,
const char *name, const char *const *parent_names,
- u8 num_parents, void __iomem *base, int con_offset,
- int grf_lock_offset, int lock_shift, int reg_mode,
- int mode_shift, struct rockchip_pll_rate_table *rate_table,
- u8 clk_pll_flags, spinlock_t *lock);
+ u8 num_parents, int con_offset, int grf_lock_offset,
+ int lock_shift, int mode_offset, int mode_shift,
+ struct rockchip_pll_rate_table *rate_table,
+ u8 clk_pll_flags);
struct rockchip_cpuclk_clksel {
int reg;
@@ -213,18 +252,23 @@ struct rockchip_cpuclk_rate_table {
};
/**
- * struct rockchip_cpuclk_reg_data: describes register offsets and masks of the cpuclock
+ * struct rockchip_cpuclk_reg_data - register offsets and masks of the cpuclock
* @core_reg: register offset of the core settings register
* @div_core_shift: core divider offset used to divide the pll value
* @div_core_mask: core divider mask
+ * @mux_core_alt: mux value to select alternate parent
+ * @mux_core_main: mux value to select main parent of core
* @mux_core_shift: offset of the core multiplexer
+ * @mux_core_mask: core multiplexer mask
*/
struct rockchip_cpuclk_reg_data {
int core_reg;
u8 div_core_shift;
u32 div_core_mask;
- int mux_core_reg;
+ u8 mux_core_alt;
+ u8 mux_core_main;
u8 mux_core_shift;
+ u32 mux_core_mask;
};
struct clk *rockchip_clk_register_cpuclk(const char *name,
@@ -428,6 +472,22 @@ struct rockchip_clk_branch {
.child = ch, \
}
+#define COMPOSITE_FRACMUX_NOGATE(_id, cname, pname, f, mo, df, ch) \
+ { \
+ .id = _id, \
+ .branch_type = branch_fraction_divider, \
+ .name = cname, \
+ .parent_names = (const char *[]){ pname }, \
+ .num_parents = 1, \
+ .flags = f, \
+ .muxdiv_offset = mo, \
+ .div_shift = 16, \
+ .div_width = 16, \
+ .div_flags = df, \
+ .gate_offset = -1, \
+ .child = ch, \
+ }
+
#define MUX(_id, cname, pnames, f, o, s, w, mf) \
{ \
.id = _id, \
@@ -536,21 +596,27 @@ struct rockchip_clk_branch {
.gate_flags = gf, \
}
-void rockchip_clk_init(struct device_node *np, void __iomem *base,
- unsigned long nr_clks);
-struct regmap *rockchip_clk_get_grf(void);
-void rockchip_clk_add_lookup(struct clk *clk, unsigned int id);
-void rockchip_clk_register_branches(struct rockchip_clk_branch *clk_list,
+struct rockchip_clk_provider *rockchip_clk_init(struct device_node *np,
+ void __iomem *base, unsigned long nr_clks);
+void rockchip_clk_of_add_provider(struct device_node *np,
+ struct rockchip_clk_provider *ctx);
+void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
+ struct clk *clk, unsigned int id);
+void rockchip_clk_register_branches(struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
unsigned int nr_clk);
-void rockchip_clk_register_plls(struct rockchip_pll_clock *pll_list,
+void rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
+ struct rockchip_pll_clock *pll_list,
unsigned int nr_pll, int grf_lock_offset);
-void rockchip_clk_register_armclk(unsigned int lookup_id, const char *name,
+void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
+ unsigned int lookup_id, const char *name,
const char *const *parent_names, u8 num_parents,
const struct rockchip_cpuclk_reg_data *reg_data,
const struct rockchip_cpuclk_rate_table *rates,
int nrates);
void rockchip_clk_protect_critical(const char *const clocks[], int nclocks);
-void rockchip_register_restart_notifier(unsigned int reg, void (*cb)(void));
+void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
+ unsigned int reg, void (*cb)(void));
#define ROCKCHIP_SOFTRST_HIWORD_MASK BIT(0)
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index fdd41b17a..16575ee87 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -302,10 +302,12 @@ static struct samsung_mux_clock mux_clks[] __initdata = {
/* SRC_FSYS */
MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4),
+ MUX(CLK_MOUT_MMC2, "mout_mmc2", group_sclk_p, SRC_FSYS, 8, 4),
MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 4),
MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 4),
/* SRC_PERIL0 */
+ MUX(CLK_MOUT_UART2, "mout_uart2", group_sclk_p, SRC_PERIL0, 8, 4),
MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4),
MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4),
@@ -389,7 +391,13 @@ static struct samsung_div_clock div_clks[] __initdata = {
CLK_SET_RATE_PARENT, 0),
DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+ /* DIV_FSYS2 */
+ DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2_pre", "div_mmc2", DIV_FSYS2, 8, 8,
+ CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DIV_MMC2, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4),
+
/* DIV_PERIL0 */
+ DIV(CLK_DIV_UART2, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
@@ -538,6 +546,8 @@ static struct samsung_gate_clock gate_clks[] __initdata = {
GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi",
GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc2_pre",
+ GATE_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre",
GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre",
@@ -552,6 +562,9 @@ static struct samsung_gate_clock gate_clks[] __initdata = {
GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre",
GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0),
+
+ GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
+ GATE_SCLK_PERIL, 2, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0),
GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
@@ -630,6 +643,7 @@ static struct samsung_gate_clock gate_clks[] __initdata = {
GATE(CLK_USBOTG, "usbotg", "div_aclk_200", GATE_IP_FSYS, 13, 0, 0),
GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0),
GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0),
+ GATE(CLK_SDMMC2, "sdmmc2", "div_aclk_200", GATE_IP_FSYS, 7, 0, 0),
GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0),
GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0),
GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0),
@@ -649,6 +663,7 @@ static struct samsung_gate_clock gate_clks[] __initdata = {
GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0),
GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0),
GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0),
+ GATE(CLK_UART2, "uart2", "div_aclk_100", GATE_IP_PERIL, 2, 0, 0),
GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0),
GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0),
};
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index be03ed0fc..92382cef9 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -554,8 +554,8 @@ static struct samsung_mux_clock exynos5800_mux_clks[] __initdata = {
};
static struct samsung_div_clock exynos5800_div_clks[] __initdata = {
- DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore", DIV_TOP0, 16, 3),
-
+ DIV(CLK_DOUT_ACLK400_WCORE, "dout_aclk400_wcore",
+ "mout_aclk400_wcore", DIV_TOP0, 16, 3),
DIV(0, "dout_aclk550_cam", "mout_aclk550_cam",
DIV_TOP8, 16, 3),
DIV(0, "dout_aclkfl1_550_cam", "mout_aclkfl1_550_cam",
@@ -607,8 +607,8 @@ static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = {
};
static struct samsung_div_clock exynos5420_div_clks[] __initdata = {
- DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore_bpll",
- DIV_TOP0, 16, 3),
+ DIV(CLK_DOUT_ACLK400_WCORE, "dout_aclk400_wcore",
+ "mout_aclk400_wcore_bpll", DIV_TOP0, 16, 3),
};
static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
@@ -785,31 +785,47 @@ static struct samsung_div_clock exynos5x_div_clks[] __initdata = {
DIV(0, "div_kfc", "mout_kfc", DIV_KFC0, 0, 3),
DIV(0, "sclk_kpll", "mout_kpll", DIV_KFC0, 24, 3),
- DIV(0, "dout_aclk400_isp", "mout_aclk400_isp", DIV_TOP0, 0, 3),
- DIV(0, "dout_aclk400_mscl", "mout_aclk400_mscl", DIV_TOP0, 4, 3),
- DIV(0, "dout_aclk200", "mout_aclk200", DIV_TOP0, 8, 3),
- DIV(0, "dout_aclk200_fsys2", "mout_aclk200_fsys2", DIV_TOP0, 12, 3),
- DIV(0, "dout_aclk100_noc", "mout_aclk100_noc", DIV_TOP0, 20, 3),
- DIV(0, "dout_pclk200_fsys", "mout_pclk200_fsys", DIV_TOP0, 24, 3),
- DIV(0, "dout_aclk200_fsys", "mout_aclk200_fsys", DIV_TOP0, 28, 3),
-
- DIV(0, "dout_aclk333_432_gscl", "mout_aclk333_432_gscl",
- DIV_TOP1, 0, 3),
- DIV(0, "dout_aclk333_432_isp", "mout_aclk333_432_isp",
- DIV_TOP1, 4, 3),
- DIV(0, "dout_aclk66", "mout_aclk66", DIV_TOP1, 8, 6),
- DIV(0, "dout_aclk333_432_isp0", "mout_aclk333_432_isp0",
- DIV_TOP1, 16, 3),
- DIV(0, "dout_aclk266", "mout_aclk266", DIV_TOP1, 20, 3),
- DIV(0, "dout_aclk166", "mout_aclk166", DIV_TOP1, 24, 3),
- DIV(0, "dout_aclk333", "mout_aclk333", DIV_TOP1, 28, 3),
-
- DIV(0, "dout_aclk333_g2d", "mout_aclk333_g2d", DIV_TOP2, 8, 3),
- DIV(0, "dout_aclk266_g2d", "mout_aclk266_g2d", DIV_TOP2, 12, 3),
- DIV(0, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2, 16, 3),
- DIV(0, "dout_aclk300_jpeg", "mout_aclk300_jpeg", DIV_TOP2, 20, 3),
- DIV(0, "dout_aclk300_disp1", "mout_aclk300_disp1", DIV_TOP2, 24, 3),
- DIV(0, "dout_aclk300_gscl", "mout_aclk300_gscl", DIV_TOP2, 28, 3),
+ DIV(CLK_DOUT_ACLK400_ISP, "dout_aclk400_isp", "mout_aclk400_isp",
+ DIV_TOP0, 0, 3),
+ DIV(CLK_DOUT_ACLK400_MSCL, "dout_aclk400_mscl", "mout_aclk400_mscl",
+ DIV_TOP0, 4, 3),
+ DIV(CLK_DOUT_ACLK200, "dout_aclk200", "mout_aclk200",
+ DIV_TOP0, 8, 3),
+ DIV(CLK_DOUT_ACLK200_FSYS2, "dout_aclk200_fsys2", "mout_aclk200_fsys2",
+ DIV_TOP0, 12, 3),
+ DIV(CLK_DOUT_ACLK100_NOC, "dout_aclk100_noc", "mout_aclk100_noc",
+ DIV_TOP0, 20, 3),
+ DIV(CLK_DOUT_PCLK200_FSYS, "dout_pclk200_fsys", "mout_pclk200_fsys",
+ DIV_TOP0, 24, 3),
+ DIV(CLK_DOUT_ACLK200_FSYS, "dout_aclk200_fsys", "mout_aclk200_fsys",
+ DIV_TOP0, 28, 3),
+ DIV(CLK_DOUT_ACLK333_432_GSCL, "dout_aclk333_432_gscl",
+ "mout_aclk333_432_gscl", DIV_TOP1, 0, 3),
+ DIV(CLK_DOUT_ACLK333_432_ISP, "dout_aclk333_432_isp",
+ "mout_aclk333_432_isp", DIV_TOP1, 4, 3),
+ DIV(CLK_DOUT_ACLK66, "dout_aclk66", "mout_aclk66",
+ DIV_TOP1, 8, 6),
+ DIV(CLK_DOUT_ACLK333_432_ISP0, "dout_aclk333_432_isp0",
+ "mout_aclk333_432_isp0", DIV_TOP1, 16, 3),
+ DIV(CLK_DOUT_ACLK266, "dout_aclk266", "mout_aclk266",
+ DIV_TOP1, 20, 3),
+ DIV(CLK_DOUT_ACLK166, "dout_aclk166", "mout_aclk166",
+ DIV_TOP1, 24, 3),
+ DIV(CLK_DOUT_ACLK333, "dout_aclk333", "mout_aclk333",
+ DIV_TOP1, 28, 3),
+
+ DIV(CLK_DOUT_ACLK333_G2D, "dout_aclk333_g2d", "mout_aclk333_g2d",
+ DIV_TOP2, 8, 3),
+ DIV(CLK_DOUT_ACLK266_G2D, "dout_aclk266_g2d", "mout_aclk266_g2d",
+ DIV_TOP2, 12, 3),
+ DIV(CLK_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2,
+ 16, 3),
+ DIV(CLK_DOUT_ACLK300_JPEG, "dout_aclk300_jpeg", "mout_aclk300_jpeg",
+ DIV_TOP2, 20, 3),
+ DIV(CLK_DOUT_ACLK300_DISP1, "dout_aclk300_disp1",
+ "mout_aclk300_disp1", DIV_TOP2, 24, 3),
+ DIV(CLK_DOUT_ACLK300_GSCL, "dout_aclk300_gscl", "mout_aclk300_gscl",
+ DIV_TOP2, 28, 3),
/* DISP1 Block */
DIV(0, "dout_fimd1", "mout_fimd1_final", DIV_DISP10, 0, 4),
@@ -817,7 +833,8 @@ static struct samsung_div_clock exynos5x_div_clks[] __initdata = {
DIV(0, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4),
DIV(CLK_DOUT_PIXEL, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4),
DIV(0, "dout_disp1_blk", "aclk200_disp1", DIV2_RATIO0, 16, 2),
- DIV(0, "dout_aclk400_disp1", "mout_aclk400_disp1", DIV_TOP2, 4, 3),
+ DIV(CLK_DOUT_ACLK400_DISP1, "dout_aclk400_disp1",
+ "mout_aclk400_disp1", DIV_TOP2, 4, 3),
/* Audio Block */
DIV(0, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4),
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index c5eaa9d16..665fa681b 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -130,10 +130,9 @@ static void __init atlas6_clk_init(struct device_node *np)
panic("unable to map clkc registers\n");
/* These are always available (RTC and 26MHz OSC)*/
- atlas6_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
- CLK_IS_ROOT, 32768);
- atlas6_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL,
- CLK_IS_ROOT, 26000000);
+ atlas6_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL, 0, 32768);
+ atlas6_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL, 0,
+ 26000000);
for (i = pll1; i < maxclk; i++) {
atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
index f92c40264..aac1c8ec1 100644
--- a/drivers/clk/sirf/clk-prima2.c
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -129,10 +129,9 @@ static void __init prima2_clk_init(struct device_node *np)
panic("unable to map clkc registers\n");
/* These are always available (RTC and 26MHz OSC)*/
- prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL,
- CLK_IS_ROOT, 32768);
- prima2_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL,
- CLK_IS_ROOT, 26000000);
+ prima2_clks[rtc] = clk_register_fixed_rate(NULL, "rtc", NULL, 0, 32768);
+ prima2_clks[osc] = clk_register_fixed_rate(NULL, "osc", NULL, 0,
+ 26000000);
for (i = pll1; i < maxclk; i++) {
prima2_clks[i] = clk_register(NULL, prima2_clk_hw_array[i]);
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 3fd7901d4..39d2044a1 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -11,6 +11,9 @@ obj-y += clk-a10-ve.o
obj-y += clk-a20-gmac.o
obj-y += clk-mod0.o
obj-y += clk-simple-gates.o
+obj-y += clk-sun4i-display.o
+obj-y += clk-sun4i-pll3.o
+obj-y += clk-sun4i-tcon-ch1.o
obj-y += clk-sun8i-bus-gates.o
obj-y += clk-sun8i-mbus.o
obj-y += clk-sun9i-core.o
diff --git a/drivers/clk/sunxi/clk-a10-hosc.c b/drivers/clk/sunxi/clk-a10-hosc.c
index 6b598c6a0..dca532431 100644
--- a/drivers/clk/sunxi/clk-a10-hosc.c
+++ b/drivers/clk/sunxi/clk-a10-hosc.c
@@ -54,8 +54,7 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
NULL, 0,
NULL, NULL,
&fixed->hw, &clk_fixed_rate_ops,
- &gate->hw, &clk_gate_ops,
- CLK_IS_ROOT);
+ &gate->hw, &clk_gate_ops, 0);
if (IS_ERR(clk))
goto err_free_gate;
diff --git a/drivers/clk/sunxi/clk-a10-mod1.c b/drivers/clk/sunxi/clk-a10-mod1.c
index e9d870de1..e2819fa09 100644
--- a/drivers/clk/sunxi/clk-a10-mod1.c
+++ b/drivers/clk/sunxi/clk-a10-mod1.c
@@ -62,7 +62,7 @@ static void __init sun4i_mod1_clk_setup(struct device_node *node)
clk = clk_register_composite(NULL, clk_name, parents, i,
&mux->hw, &clk_mux_ops,
NULL, NULL,
- &gate->hw, &clk_gate_ops, 0);
+ &gate->hw, &clk_gate_ops, CLK_SET_RATE_PARENT);
if (IS_ERR(clk))
goto err_free_gate;
diff --git a/drivers/clk/sunxi/clk-sun4i-display.c b/drivers/clk/sunxi/clk-sun4i-display.c
new file mode 100644
index 000000000..9780fac6d
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun4i-display.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2015 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+struct sun4i_a10_display_clk_data {
+ bool has_div;
+ u8 num_rst;
+ u8 parents;
+
+ u8 offset_en;
+ u8 offset_div;
+ u8 offset_mux;
+ u8 offset_rst;
+
+ u8 width_div;
+ u8 width_mux;
+
+ u32 flags;
+};
+
+struct reset_data {
+ void __iomem *reg;
+ spinlock_t *lock;
+ struct reset_controller_dev rcdev;
+ u8 offset;
+};
+
+static DEFINE_SPINLOCK(sun4i_a10_display_lock);
+
+static inline struct reset_data *rcdev_to_reset_data(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct reset_data, rcdev);
+};
+
+static int sun4i_a10_display_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct reset_data *data = rcdev_to_reset_data(rcdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(data->lock, flags);
+
+ reg = readl(data->reg);
+ writel(reg & ~BIT(data->offset + id), data->reg);
+
+ spin_unlock_irqrestore(data->lock, flags);
+
+ return 0;
+}
+
+static int sun4i_a10_display_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct reset_data *data = rcdev_to_reset_data(rcdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(data->lock, flags);
+
+ reg = readl(data->reg);
+ writel(reg | BIT(data->offset + id), data->reg);
+
+ spin_unlock_irqrestore(data->lock, flags);
+
+ return 0;
+}
+
+static int sun4i_a10_display_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct reset_data *data = rcdev_to_reset_data(rcdev);
+
+ return !(readl(data->reg) & BIT(data->offset + id));
+}
+
+static const struct reset_control_ops sun4i_a10_display_reset_ops = {
+ .assert = sun4i_a10_display_assert,
+ .deassert = sun4i_a10_display_deassert,
+ .status = sun4i_a10_display_status,
+};
+
+static int sun4i_a10_display_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *spec)
+{
+ /* We only have a single reset signal */
+ return 0;
+}
+
+static void __init sun4i_a10_display_init(struct device_node *node,
+ const struct sun4i_a10_display_clk_data *data)
+{
+ const char *parents[4];
+ const char *clk_name = node->name;
+ struct reset_data *reset_data;
+ struct clk_divider *div = NULL;
+ struct clk_gate *gate;
+ struct resource res;
+ struct clk_mux *mux;
+ void __iomem *reg;
+ struct clk *clk;
+ int ret;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n", clk_name);
+ return;
+ }
+
+ ret = of_clk_parent_fill(node, parents, data->parents);
+ if (ret != data->parents) {
+ pr_err("%s: Could not retrieve the parents\n", clk_name);
+ goto unmap;
+ }
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ goto unmap;
+
+ mux->reg = reg;
+ mux->shift = data->offset_mux;
+ mux->mask = (1 << data->width_mux) - 1;
+ mux->lock = &sun4i_a10_display_lock;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto free_mux;
+
+ gate->reg = reg;
+ gate->bit_idx = data->offset_en;
+ gate->lock = &sun4i_a10_display_lock;
+
+ if (data->has_div) {
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ goto free_gate;
+
+ div->reg = reg;
+ div->shift = data->offset_div;
+ div->width = data->width_div;
+ div->lock = &sun4i_a10_display_lock;
+ }
+
+ clk = clk_register_composite(NULL, clk_name,
+ parents, data->parents,
+ &mux->hw, &clk_mux_ops,
+ data->has_div ? &div->hw : NULL,
+ data->has_div ? &clk_divider_ops : NULL,
+ &gate->hw, &clk_gate_ops,
+ data->flags);
+ if (IS_ERR(clk)) {
+ pr_err("%s: Couldn't register the clock\n", clk_name);
+ goto free_div;
+ }
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret) {
+ pr_err("%s: Couldn't register DT provider\n", clk_name);
+ goto free_clk;
+ }
+
+ if (!data->num_rst)
+ return;
+
+ reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
+ if (!reset_data)
+ goto free_of_clk;
+
+ reset_data->reg = reg;
+ reset_data->offset = data->offset_rst;
+ reset_data->lock = &sun4i_a10_display_lock;
+ reset_data->rcdev.nr_resets = data->num_rst;
+ reset_data->rcdev.ops = &sun4i_a10_display_reset_ops;
+ reset_data->rcdev.of_node = node;
+
+ if (data->num_rst == 1) {
+ reset_data->rcdev.of_reset_n_cells = 0;
+ reset_data->rcdev.of_xlate = &sun4i_a10_display_reset_xlate;
+ } else {
+ reset_data->rcdev.of_reset_n_cells = 1;
+ }
+
+ if (reset_controller_register(&reset_data->rcdev)) {
+ pr_err("%s: Couldn't register the reset controller\n",
+ clk_name);
+ goto free_reset;
+ }
+
+ return;
+
+free_reset:
+ kfree(reset_data);
+free_of_clk:
+ of_clk_del_provider(node);
+free_clk:
+ clk_unregister_composite(clk);
+free_div:
+ kfree(div);
+free_gate:
+ kfree(gate);
+free_mux:
+ kfree(mux);
+unmap:
+ iounmap(reg);
+ of_address_to_resource(node, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+}
+
+static const struct sun4i_a10_display_clk_data sun4i_a10_tcon_ch0_data __initconst = {
+ .num_rst = 2,
+ .parents = 4,
+ .offset_en = 31,
+ .offset_rst = 29,
+ .offset_mux = 24,
+ .width_mux = 2,
+ .flags = CLK_SET_RATE_PARENT,
+};
+
+static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
+{
+ sun4i_a10_display_init(node, &sun4i_a10_tcon_ch0_data);
+}
+CLK_OF_DECLARE(sun4i_a10_tcon_ch0, "allwinner,sun4i-a10-tcon-ch0-clk",
+ sun4i_a10_tcon_ch0_setup);
+
+static const struct sun4i_a10_display_clk_data sun4i_a10_display_data __initconst = {
+ .has_div = true,
+ .num_rst = 1,
+ .parents = 3,
+ .offset_en = 31,
+ .offset_rst = 30,
+ .offset_mux = 24,
+ .offset_div = 0,
+ .width_mux = 2,
+ .width_div = 4,
+};
+
+static void __init sun4i_a10_display_setup(struct device_node *node)
+{
+ sun4i_a10_display_init(node, &sun4i_a10_display_data);
+}
+CLK_OF_DECLARE(sun4i_a10_display, "allwinner,sun4i-a10-display-clk",
+ sun4i_a10_display_setup);
diff --git a/drivers/clk/sunxi/clk-sun4i-pll3.c b/drivers/clk/sunxi/clk-sun4i-pll3.c
new file mode 100644
index 000000000..f66267e77
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun4i-pll3.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2015 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define SUN4I_A10_PLL3_GATE_BIT 31
+#define SUN4I_A10_PLL3_DIV_WIDTH 7
+#define SUN4I_A10_PLL3_DIV_SHIFT 0
+
+static DEFINE_SPINLOCK(sun4i_a10_pll3_lock);
+
+static void __init sun4i_a10_pll3_setup(struct device_node *node)
+{
+ const char *clk_name = node->name, *parent;
+ struct clk_multiplier *mult;
+ struct clk_gate *gate;
+ struct resource res;
+ void __iomem *reg;
+ struct clk *clk;
+ int ret;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+ parent = of_clk_get_parent_name(node, 0);
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n", clk_name);
+ return;
+ }
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto err_unmap;
+
+ gate->reg = reg;
+ gate->bit_idx = SUN4I_A10_PLL3_GATE_BIT;
+ gate->lock = &sun4i_a10_pll3_lock;
+
+ mult = kzalloc(sizeof(*mult), GFP_KERNEL);
+ if (!mult)
+ goto err_free_gate;
+
+ mult->reg = reg;
+ mult->shift = SUN4I_A10_PLL3_DIV_SHIFT;
+ mult->width = SUN4I_A10_PLL3_DIV_WIDTH;
+ mult->lock = &sun4i_a10_pll3_lock;
+
+ clk = clk_register_composite(NULL, clk_name,
+ &parent, 1,
+ NULL, NULL,
+ &mult->hw, &clk_multiplier_ops,
+ &gate->hw, &clk_gate_ops,
+ 0);
+ if (IS_ERR(clk)) {
+ pr_err("%s: Couldn't register the clock\n", clk_name);
+ goto err_free_mult;
+ }
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret) {
+ pr_err("%s: Couldn't register DT provider\n",
+ clk_name);
+ goto err_clk_unregister;
+ }
+
+ return;
+
+err_clk_unregister:
+ clk_unregister_composite(clk);
+err_free_mult:
+ kfree(mult);
+err_free_gate:
+ kfree(gate);
+err_unmap:
+ iounmap(reg);
+ of_address_to_resource(node, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+}
+
+CLK_OF_DECLARE(sun4i_a10_pll3, "allwinner,sun4i-a10-pll3-clk",
+ sun4i_a10_pll3_setup);
diff --git a/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
new file mode 100644
index 000000000..b6d29d1be
--- /dev/null
+++ b/drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2015 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define TCON_CH1_SCLK2_PARENTS 4
+
+#define TCON_CH1_SCLK2_GATE_BIT BIT(31)
+#define TCON_CH1_SCLK2_MUX_MASK 3
+#define TCON_CH1_SCLK2_MUX_SHIFT 24
+#define TCON_CH1_SCLK2_DIV_MASK 0xf
+#define TCON_CH1_SCLK2_DIV_SHIFT 0
+
+#define TCON_CH1_SCLK1_GATE_BIT BIT(15)
+#define TCON_CH1_SCLK1_HALF_BIT BIT(11)
+
+struct tcon_ch1_clk {
+ struct clk_hw hw;
+ spinlock_t lock;
+ void __iomem *reg;
+};
+
+#define hw_to_tclk(hw) container_of(hw, struct tcon_ch1_clk, hw)
+
+static void tcon_ch1_disable(struct clk_hw *hw)
+{
+ struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&tclk->lock, flags);
+ reg = readl(tclk->reg);
+ reg &= ~(TCON_CH1_SCLK2_GATE_BIT | TCON_CH1_SCLK1_GATE_BIT);
+ writel(reg, tclk->reg);
+ spin_unlock_irqrestore(&tclk->lock, flags);
+}
+
+static int tcon_ch1_enable(struct clk_hw *hw)
+{
+ struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&tclk->lock, flags);
+ reg = readl(tclk->reg);
+ reg |= TCON_CH1_SCLK2_GATE_BIT | TCON_CH1_SCLK1_GATE_BIT;
+ writel(reg, tclk->reg);
+ spin_unlock_irqrestore(&tclk->lock, flags);
+
+ return 0;
+}
+
+static int tcon_ch1_is_enabled(struct clk_hw *hw)
+{
+ struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
+ u32 reg;
+
+ reg = readl(tclk->reg);
+ return reg & (TCON_CH1_SCLK2_GATE_BIT | TCON_CH1_SCLK1_GATE_BIT);
+}
+
+static u8 tcon_ch1_get_parent(struct clk_hw *hw)
+{
+ struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
+ u32 reg;
+
+ reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT;
+ reg &= reg >> TCON_CH1_SCLK2_MUX_MASK;
+
+ return reg;
+}
+
+static int tcon_ch1_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&tclk->lock, flags);
+ reg = readl(tclk->reg);
+ reg &= ~(TCON_CH1_SCLK2_MUX_MASK << TCON_CH1_SCLK2_MUX_SHIFT);
+ reg |= index << TCON_CH1_SCLK2_MUX_SHIFT;
+ writel(reg, tclk->reg);
+ spin_unlock_irqrestore(&tclk->lock, flags);
+
+ return 0;
+};
+
+static unsigned long tcon_ch1_calc_divider(unsigned long rate,
+ unsigned long parent_rate,
+ u8 *div,
+ bool *half)
+{
+ unsigned long best_rate = 0;
+ u8 best_m = 0, m;
+ bool is_double;
+
+ for (m = 1; m < 16; m++) {
+ u8 d;
+
+ for (d = 1; d < 3; d++) {
+ unsigned long tmp_rate;
+
+ tmp_rate = parent_rate / m / d;
+
+ if (tmp_rate > rate)
+ continue;
+
+ if (!best_rate ||
+ (rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_m = m;
+ is_double = d;
+ }
+ }
+ }
+
+ if (div && half) {
+ *div = best_m;
+ *half = is_double;
+ }
+
+ return best_rate;
+}
+
+static int tcon_ch1_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ long best_rate = -EINVAL;
+ int i;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ unsigned long parent_rate;
+ unsigned long tmp_rate;
+ struct clk_hw *parent;
+
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ parent_rate = clk_hw_get_rate(parent);
+
+ tmp_rate = tcon_ch1_calc_divider(req->rate, parent_rate,
+ NULL, NULL);
+
+ if (best_rate < 0 ||
+ (req->rate - tmp_rate) < (req->rate - best_rate)) {
+ best_rate = tmp_rate;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+ }
+
+ if (best_rate < 0)
+ return best_rate;
+
+ req->rate = best_rate;
+ return 0;
+}
+
+static unsigned long tcon_ch1_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
+ u32 reg;
+
+ reg = readl(tclk->reg);
+
+ parent_rate /= (reg & TCON_CH1_SCLK2_DIV_MASK) + 1;
+
+ if (reg & TCON_CH1_SCLK1_HALF_BIT)
+ parent_rate /= 2;
+
+ return parent_rate;
+}
+
+static int tcon_ch1_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
+ unsigned long flags;
+ bool half;
+ u8 div_m;
+ u32 reg;
+
+ tcon_ch1_calc_divider(rate, parent_rate, &div_m, &half);
+
+ spin_lock_irqsave(&tclk->lock, flags);
+ reg = readl(tclk->reg);
+ reg &= ~(TCON_CH1_SCLK2_DIV_MASK | TCON_CH1_SCLK1_HALF_BIT);
+ reg |= (div_m - 1) & TCON_CH1_SCLK2_DIV_MASK;
+
+ if (half)
+ reg |= TCON_CH1_SCLK1_HALF_BIT;
+
+ writel(reg, tclk->reg);
+ spin_unlock_irqrestore(&tclk->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops tcon_ch1_ops = {
+ .disable = tcon_ch1_disable,
+ .enable = tcon_ch1_enable,
+ .is_enabled = tcon_ch1_is_enabled,
+
+ .get_parent = tcon_ch1_get_parent,
+ .set_parent = tcon_ch1_set_parent,
+
+ .determine_rate = tcon_ch1_determine_rate,
+ .recalc_rate = tcon_ch1_recalc_rate,
+ .set_rate = tcon_ch1_set_rate,
+};
+
+static void __init tcon_ch1_setup(struct device_node *node)
+{
+ const char *parents[TCON_CH1_SCLK2_PARENTS];
+ const char *clk_name = node->name;
+ struct clk_init_data init;
+ struct tcon_ch1_clk *tclk;
+ struct resource res;
+ struct clk *clk;
+ void __iomem *reg;
+ int ret;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%s: Could not map the clock registers\n", clk_name);
+ return;
+ }
+
+ ret = of_clk_parent_fill(node, parents, TCON_CH1_SCLK2_PARENTS);
+ if (ret != TCON_CH1_SCLK2_PARENTS) {
+ pr_err("%s Could not retrieve the parents\n", clk_name);
+ goto err_unmap;
+ }
+
+ tclk = kzalloc(sizeof(*tclk), GFP_KERNEL);
+ if (!tclk)
+ goto err_unmap;
+
+ init.name = clk_name;
+ init.ops = &tcon_ch1_ops;
+ init.parent_names = parents;
+ init.num_parents = TCON_CH1_SCLK2_PARENTS;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ tclk->reg = reg;
+ tclk->hw.init = &init;
+ spin_lock_init(&tclk->lock);
+
+ clk = clk_register(NULL, &tclk->hw);
+ if (IS_ERR(clk)) {
+ pr_err("%s: Couldn't register the clock\n", clk_name);
+ goto err_free_data;
+ }
+
+ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ if (ret) {
+ pr_err("%s: Couldn't register our clock provider\n", clk_name);
+ goto err_unregister_clk;
+ }
+
+ return;
+
+err_unregister_clk:
+ clk_unregister(clk);
+err_free_data:
+ kfree(tclk);
+err_unmap:
+ iounmap(reg);
+ of_address_to_resource(node, 0, &res);
+ release_mem_region(res.start, resource_size(&res));
+}
+
+CLK_OF_DECLARE(tcon_ch1, "allwinner,sun4i-a10-tcon-ch1-clk",
+ tcon_ch1_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 028dd832a..716737388 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -106,7 +106,7 @@ static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* one clock/reset pair per word */
- count = DIV_ROUND_UP((r->end - r->start + 1), SUN9I_MMC_WIDTH);
+ count = DIV_ROUND_UP((resource_size(r)), SUN9I_MMC_WIDTH);
data->membase = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(data->membase))
return PTR_ERR(data->membase);
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index 91de0a006..838b22aa8 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -523,21 +523,12 @@ static const struct factors_data sun4i_pll5_data __initconst = {
.enable = 31,
.table = &sun4i_pll5_config,
.getter = sun4i_get_pll5_factors,
- .name = "pll5",
-};
-
-static const struct factors_data sun4i_pll6_data __initconst = {
- .enable = 31,
- .table = &sun4i_pll5_config,
- .getter = sun4i_get_pll5_factors,
- .name = "pll6",
};
static const struct factors_data sun6i_a31_pll6_data __initconst = {
.enable = 31,
.table = &sun6i_a31_pll6_config,
.getter = sun6i_a31_get_pll6_factors,
- .name = "pll6x2",
};
static const struct factors_data sun5i_a13_ahb_data __initconst = {
@@ -933,7 +924,7 @@ static const struct divs_data pll5_divs_data __initconst = {
};
static const struct divs_data pll6_divs_data __initconst = {
- .factors = &sun4i_pll6_data,
+ .factors = &sun4i_pll5_data,
.ndivs = 4,
.div = {
{ .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
@@ -975,6 +966,8 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
struct clk_gate *gate = NULL;
struct clk_fixed_factor *fix_factor;
struct clk_divider *divider;
+ struct factors_data factors = *data->factors;
+ char *derived_name = NULL;
void __iomem *reg;
int ndivs = SUNXI_DIVS_MAX_QTY, i = 0;
int flags, clkflags;
@@ -983,11 +976,37 @@ static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
if (data->ndivs)
ndivs = data->ndivs;
+ /* Try to find a name for base factor clock */
+ for (i = 0; i < ndivs; i++) {
+ if (data->div[i].self) {
+ of_property_read_string_index(node, "clock-output-names",
+ i, &factors.name);
+ break;
+ }
+ }
+ /* If we don't have a .self clk use the first output-name up to '_' */
+ if (factors.name == NULL) {
+ char *endp;
+
+ of_property_read_string_index(node, "clock-output-names",
+ 0, &clk_name);
+ endp = strchr(clk_name, '_');
+ if (endp) {
+ derived_name = kstrndup(clk_name, endp - clk_name,
+ GFP_KERNEL);
+ factors.name = derived_name;
+ } else {
+ factors.name = clk_name;
+ }
+ }
+
/* Set up factor clock that we will be dividing */
- pclk = sunxi_factors_clk_setup(node, data->factors);
+ pclk = sunxi_factors_clk_setup(node, &factors);
if (!pclk)
return NULL;
+
parent = __clk_get_name(pclk);
+ kfree(derived_name);
reg = of_iomap(node, 0);
if (!reg) {
@@ -1127,3 +1146,41 @@ static void __init sun6i_pll6_clk_setup(struct device_node *node)
}
CLK_OF_DECLARE(sun6i_pll6, "allwinner,sun6i-a31-pll6-clk",
sun6i_pll6_clk_setup);
+
+/*
+ * sun6i display
+ *
+ * rate = parent_rate / (m + 1);
+ */
+static void sun6i_display_factors(struct factors_request *req)
+{
+ u8 m;
+
+ if (req->rate > req->parent_rate)
+ req->rate = req->parent_rate;
+
+ m = DIV_ROUND_UP(req->parent_rate, req->rate);
+
+ req->rate = req->parent_rate / m;
+ req->m = m - 1;
+}
+
+static const struct clk_factors_config sun6i_display_config = {
+ .mshift = 0,
+ .mwidth = 4,
+};
+
+static const struct factors_data sun6i_display_data __initconst = {
+ .enable = 31,
+ .mux = 24,
+ .muxmask = BIT(2) | BIT(1) | BIT(0),
+ .table = &sun6i_display_config,
+ .getter = sun6i_display_factors,
+};
+
+static void __init sun6i_display_setup(struct device_node *node)
+{
+ sunxi_factors_clk_setup(node, &sun6i_display_data);
+}
+CLK_OF_DECLARE(sun6i_display, "allwinner,sun6i-a31-display-clk",
+ sun6i_display_setup);
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 97984c503..33fd0938d 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -3,6 +3,7 @@ obj-y += clk-audio-sync.o
obj-y += clk-dfll.o
obj-y += clk-divider.o
obj-y += clk-periph.o
+obj-y += clk-periph-fixed.o
obj-y += clk-periph-gate.o
obj-y += clk-pll.o
obj-y += clk-pll-out.o
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 19bfa07e2..f01056253 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -55,6 +55,7 @@
#include <linux/seq_file.h>
#include "clk-dfll.h"
+#include "cvb.h"
/*
* DFLL control registers - access via dfll_{readl,writel}
@@ -442,8 +443,8 @@ static void dfll_tune_low(struct tegra_dfll *td)
{
td->tune_range = DFLL_TUNE_LOW;
- dfll_writel(td, td->soc->tune0_low, DFLL_TUNE0);
- dfll_writel(td, td->soc->tune1, DFLL_TUNE1);
+ dfll_writel(td, td->soc->cvb->cpu_dfll_data.tune0_low, DFLL_TUNE0);
+ dfll_writel(td, td->soc->cvb->cpu_dfll_data.tune1, DFLL_TUNE1);
dfll_wmb(td);
if (td->soc->set_clock_trimmers_low)
@@ -1449,7 +1450,7 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
}
v_max = dev_pm_opp_get_voltage(opp);
- v = td->soc->min_millivolts * 1000;
+ v = td->soc->cvb->min_millivolts * 1000;
lut = find_vdd_map_entry_exact(td, v);
if (lut < 0)
goto out;
@@ -1461,7 +1462,7 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
break;
v_opp = dev_pm_opp_get_voltage(opp);
- if (v_opp <= td->soc->min_millivolts * 1000)
+ if (v_opp <= td->soc->cvb->min_millivolts * 1000)
td->dvco_rate_min = dev_pm_opp_get_freq(opp);
for (;;) {
@@ -1490,7 +1491,7 @@ static int dfll_build_i2c_lut(struct tegra_dfll *td)
if (!td->dvco_rate_min)
dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n",
- td->soc->min_millivolts);
+ td->soc->cvb->min_millivolts);
else
ret = 0;
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
index 2e4c0772a..ed2ad8882 100644
--- a/drivers/clk/tegra/clk-dfll.h
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -24,22 +24,18 @@
/**
* struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver
- * @opp_dev: struct device * that holds the OPP table for the DFLL
- * @min_millivolts: minimum voltage (in mV) that the DFLL can operate
- * @tune0_low: DFLL tuning register 0 (low voltage range)
- * @tune0_high: DFLL tuning register 0 (high voltage range)
- * @tune1: DFLL tuning register 1
- * @assert_dvco_reset: fn ptr to place the DVCO in reset
- * @deassert_dvco_reset: fn ptr to release the DVCO reset
- * @set_clock_trimmers_high: fn ptr to tune clock trimmers for high voltage
- * @set_clock_trimmers_low: fn ptr to tune clock trimmers for low voltage
+ * @dev: struct device * that holds the OPP table for the DFLL
+ * @max_freq: maximum frequency supported on this SoC
+ * @cvb: CPU frequency table for this SoC
+ * @init_clock_trimmers: callback to initialize clock trimmers
+ * @set_clock_trimmers_high: callback to tune clock trimmers for high voltage
+ * @set_clock_trimmers_low: callback to tune clock trimmers for low voltage
*/
struct tegra_dfll_soc_data {
struct device *dev;
- unsigned int min_millivolts;
- u32 tune0_low;
- u32 tune0_high;
- u32 tune1;
+ unsigned long max_freq;
+ const struct cvb_table *cvb;
+
void (*init_clock_trimmers)(void);
void (*set_clock_trimmers_high)(void);
void (*set_clock_trimmers_low)(void);
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 62ea38187..36c974916 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -71,6 +71,7 @@ enum clk_id {
tegra_clk_disp2_8,
tegra_clk_dp2,
tegra_clk_dpaux,
+ tegra_clk_dpaux1,
tegra_clk_dsialp,
tegra_clk_dsia_mux,
tegra_clk_dsiblp,
@@ -306,6 +307,7 @@ enum clk_id {
tegra_clk_xusb_ss_div2,
tegra_clk_xusb_ssp_src,
tegra_clk_sclk_mux,
+ tegra_clk_sor_safe,
tegra_clk_max,
};
diff --git a/drivers/clk/tegra/clk-periph-fixed.c b/drivers/clk/tegra/clk-periph-fixed.c
new file mode 100644
index 000000000..c57dfb037
--- /dev/null
+++ b/drivers/clk/tegra/clk-periph-fixed.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+
+#include "clk.h"
+
+static inline struct tegra_clk_periph_fixed *
+to_tegra_clk_periph_fixed(struct clk_hw *hw)
+{
+ return container_of(hw, struct tegra_clk_periph_fixed, hw);
+}
+
+static int tegra_clk_periph_fixed_is_enabled(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_fixed *fixed = to_tegra_clk_periph_fixed(hw);
+ u32 mask = 1 << (fixed->num % 32), value;
+
+ value = readl(fixed->base + fixed->regs->enb_reg);
+ if (value & mask) {
+ value = readl(fixed->base + fixed->regs->rst_reg);
+ if ((value & mask) == 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int tegra_clk_periph_fixed_enable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_fixed *fixed = to_tegra_clk_periph_fixed(hw);
+ u32 mask = 1 << (fixed->num % 32);
+
+ writel(mask, fixed->base + fixed->regs->enb_set_reg);
+
+ return 0;
+}
+
+static void tegra_clk_periph_fixed_disable(struct clk_hw *hw)
+{
+ struct tegra_clk_periph_fixed *fixed = to_tegra_clk_periph_fixed(hw);
+ u32 mask = 1 << (fixed->num % 32);
+
+ writel(mask, fixed->base + fixed->regs->enb_clr_reg);
+}
+
+static unsigned long
+tegra_clk_periph_fixed_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct tegra_clk_periph_fixed *fixed = to_tegra_clk_periph_fixed(hw);
+ unsigned long long rate;
+
+ rate = (unsigned long long)parent_rate * fixed->mul;
+ do_div(rate, fixed->div);
+
+ return (unsigned long)rate;
+}
+
+static const struct clk_ops tegra_clk_periph_fixed_ops = {
+ .is_enabled = tegra_clk_periph_fixed_is_enabled,
+ .enable = tegra_clk_periph_fixed_enable,
+ .disable = tegra_clk_periph_fixed_disable,
+ .recalc_rate = tegra_clk_periph_fixed_recalc_rate,
+};
+
+struct clk *tegra_clk_register_periph_fixed(const char *name,
+ const char *parent,
+ unsigned long flags,
+ void __iomem *base,
+ unsigned int mul,
+ unsigned int div,
+ unsigned int num)
+{
+ const struct tegra_clk_periph_regs *regs;
+ struct tegra_clk_periph_fixed *fixed;
+ struct clk_init_data init;
+ struct clk *clk;
+
+ regs = get_reg_bank(num);
+ if (!regs)
+ return ERR_PTR(-EINVAL);
+
+ fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = flags;
+ init.parent_names = parent ? &parent : NULL;
+ init.num_parents = parent ? 1 : 0;
+ init.ops = &tegra_clk_periph_fixed_ops;
+
+ fixed->base = base;
+ fixed->regs = regs;
+ fixed->mul = mul;
+ fixed->div = div;
+ fixed->num = num;
+
+ fixed->hw.init = &init;
+
+ clk = clk_register(NULL, &fixed->hw);
+ if (IS_ERR(clk))
+ kfree(fixed);
+
+ return clk;
+}
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
index d28d6e950..88127828b 100644
--- a/drivers/clk/tegra/clk-periph-gate.c
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -134,7 +134,7 @@ struct clk *tegra_clk_register_periph_gate(const char *name,
struct tegra_clk_periph_gate *gate;
struct clk *clk;
struct clk_init_data init;
- struct tegra_clk_periph_regs *pregs;
+ const struct tegra_clk_periph_regs *pregs;
pregs = get_reg_bank(clk_num);
if (!pregs)
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index ec5b6113b..a17ca6d7f 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -145,7 +145,7 @@ static struct clk *_tegra_clk_register_periph(const char *name,
{
struct clk *clk;
struct clk_init_data init;
- struct tegra_clk_periph_regs *bank;
+ const struct tegra_clk_periph_regs *bank;
bool div = !(periph->gate.flags & TEGRA_PERIPH_NO_DIV);
if (periph->gate.flags & TEGRA_PERIPH_NO_DIV) {
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 6ac3f843e..4e194ecc8 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -2013,6 +2013,52 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
#endif
#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+struct clk *tegra_clk_register_pllre_tegra210(const char *name,
+ const char *parent_name, void __iomem *clk_base,
+ void __iomem *pmc, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock, unsigned long parent_rate)
+{
+ u32 val;
+ struct tegra_clk_pll *pll;
+ struct clk *clk;
+
+ pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
+
+ if (pll_params->adjust_vco)
+ pll_params->vco_min = pll_params->adjust_vco(pll_params,
+ parent_rate);
+
+ pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
+ if (IS_ERR(pll))
+ return ERR_CAST(pll);
+
+ /* program minimum rate by default */
+
+ val = pll_readl_base(pll);
+ if (val & PLL_BASE_ENABLE)
+ WARN_ON(readl_relaxed(clk_base + pll_params->iddq_reg) &
+ BIT(pll_params->iddq_bit_idx));
+ else {
+ val = 0x4 << divm_shift(pll);
+ val |= 0x41 << divn_shift(pll);
+ pll_writel_base(val, pll);
+ }
+
+ /* disable lock override */
+
+ val = pll_readl_misc(pll);
+ val &= ~BIT(29);
+ pll_writel_misc(val, pll);
+
+ clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
+ &tegra_clk_pllre_ops);
+ if (IS_ERR(clk))
+ kfree(pll);
+
+ return clk;
+}
+
static int clk_plle_tegra210_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index d64ec7a1b..91c38f166 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -107,4 +107,3 @@ void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks)
*dt_clk = clk;
}
}
-
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index ea2b9cbf9..29d04c663 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -803,7 +803,7 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("hda2hdmi", "clk_m", 128, TEGRA_PERIPH_ON_APB, tegra_clk_hda2hdmi, 0),
GATE("bsea", "clk_m", 62, 0, tegra_clk_bsea, 0),
GATE("bsev", "clk_m", 63, 0, tegra_clk_bsev, 0),
- GATE("mipi-cal", "clk_m", 56, 0, tegra_clk_mipi_cal, 0),
+ GATE("mipi-cal", "clk72mhz", 56, 0, tegra_clk_mipi_cal, 0),
GATE("usbd", "clk_m", 22, 0, tegra_clk_usbd, 0),
GATE("usb2", "clk_m", 58, 0, tegra_clk_usb2, 0),
GATE("usb3", "clk_m", 59, 0, tegra_clk_usb3, 0),
@@ -821,7 +821,6 @@ static struct tegra_periph_init_data gate_clks[] = {
GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
GATE("vim2_clk", "clk_m", 11, 0, tegra_clk_vim2_clk, 0),
GATE("pcie", "clk_m", 70, 0, tegra_clk_pcie, 0),
- GATE("dpaux", "clk_m", 181, 0, tegra_clk_dpaux, 0),
GATE("gpu", "pll_ref", 184, 0, tegra_clk_gpu, 0),
GATE("pllg_ref", "pll_ref", 189, 0, tegra_clk_pll_g_ref, 0),
GATE("hsic_trk", "usb2_hsic_trk", 209, TEGRA_PERIPH_NO_RESET, tegra_clk_hsic_trk, 0),
@@ -877,7 +876,7 @@ static void __init periph_clk_init(void __iomem *clk_base,
struct clk **dt_clk;
for (i = 0; i < ARRAY_SIZE(periph_clks); i++) {
- struct tegra_clk_periph_regs *bank;
+ const struct tegra_clk_periph_regs *bank;
struct tegra_periph_init_data *data;
data = periph_clks + i;
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index df47ec316..b78054fac 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -743,7 +743,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
[tegra_clk_i2c2] = { .dt_id = TEGRA114_CLK_I2C2, .present = true },
[tegra_clk_uartc] = { .dt_id = TEGRA114_CLK_UARTC, .present = true },
- [tegra_clk_mipi_cal] = { .dt_id = TEGRA114_CLK_MIPI_CAL, .present = true },
[tegra_clk_emc] = { .dt_id = TEGRA114_CLK_EMC, .present = true },
[tegra_clk_usb2] = { .dt_id = TEGRA114_CLK_USB2, .present = true },
[tegra_clk_usb3] = { .dt_id = TEGRA114_CLK_USB3, .present = true },
@@ -1237,6 +1236,11 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
&emc_lock);
clks[TEGRA114_CLK_MC] = clk;
+ clk = tegra_clk_register_periph_gate("mipi-cal", "clk_m", 0, clk_base,
+ CLK_SET_RATE_PARENT, 56,
+ periph_clk_enb_refcnt);
+ clks[TEGRA114_CLK_MIPI_CAL] = clk;
+
for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
data = &tegra_periph_clk_list[i];
clk = tegra_clk_register_periph(data->name,
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 61253330c..c205809ba 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -47,32 +47,32 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
},
.speedo_scale = 100,
.voltage_scale = 1000,
- .cvb_table = {
- {204000000UL, {1112619, -29295, 402} },
- {306000000UL, {1150460, -30585, 402} },
- {408000000UL, {1190122, -31865, 402} },
- {510000000UL, {1231606, -33155, 402} },
- {612000000UL, {1274912, -34435, 402} },
- {714000000UL, {1320040, -35725, 402} },
- {816000000UL, {1366990, -37005, 402} },
- {918000000UL, {1415762, -38295, 402} },
- {1020000000UL, {1466355, -39575, 402} },
- {1122000000UL, {1518771, -40865, 402} },
- {1224000000UL, {1573009, -42145, 402} },
- {1326000000UL, {1629068, -43435, 402} },
- {1428000000UL, {1686950, -44715, 402} },
- {1530000000UL, {1746653, -46005, 402} },
- {1632000000UL, {1808179, -47285, 402} },
- {1734000000UL, {1871526, -48575, 402} },
- {1836000000UL, {1936696, -49855, 402} },
- {1938000000UL, {2003687, -51145, 402} },
- {2014500000UL, {2054787, -52095, 402} },
- {2116500000UL, {2124957, -53385, 402} },
- {2218500000UL, {2196950, -54665, 402} },
- {2320500000UL, {2270765, -55955, 402} },
- {2422500000UL, {2346401, -57235, 402} },
- {2524500000UL, {2437299, -58535, 402} },
- {0, { 0, 0, 0} },
+ .entries = {
+ { 204000000UL, { 1112619, -29295, 402 } },
+ { 306000000UL, { 1150460, -30585, 402 } },
+ { 408000000UL, { 1190122, -31865, 402 } },
+ { 510000000UL, { 1231606, -33155, 402 } },
+ { 612000000UL, { 1274912, -34435, 402 } },
+ { 714000000UL, { 1320040, -35725, 402 } },
+ { 816000000UL, { 1366990, -37005, 402 } },
+ { 918000000UL, { 1415762, -38295, 402 } },
+ { 1020000000UL, { 1466355, -39575, 402 } },
+ { 1122000000UL, { 1518771, -40865, 402 } },
+ { 1224000000UL, { 1573009, -42145, 402 } },
+ { 1326000000UL, { 1629068, -43435, 402 } },
+ { 1428000000UL, { 1686950, -44715, 402 } },
+ { 1530000000UL, { 1746653, -46005, 402 } },
+ { 1632000000UL, { 1808179, -47285, 402 } },
+ { 1734000000UL, { 1871526, -48575, 402 } },
+ { 1836000000UL, { 1936696, -49855, 402 } },
+ { 1938000000UL, { 2003687, -51145, 402 } },
+ { 2014500000UL, { 2054787, -52095, 402 } },
+ { 2116500000UL, { 2124957, -53385, 402 } },
+ { 2218500000UL, { 2196950, -54665, 402 } },
+ { 2320500000UL, { 2270765, -55955, 402 } },
+ { 2422500000UL, { 2346401, -57235, 402 } },
+ { 2524500000UL, { 2437299, -58535, 402 } },
+ { 0UL, { 0, 0, 0 } },
},
.cpu_dfll_data = {
.tune0_low = 0x005020ff,
@@ -84,9 +84,8 @@ static const struct cvb_table tegra124_cpu_cvb_tables[] = {
static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
{
- int process_id, speedo_id, speedo_value;
+ int process_id, speedo_id, speedo_value, err;
struct tegra_dfll_soc_data *soc;
- const struct cvb_table *cvb;
process_id = tegra_sku_info.cpu_process_id;
speedo_id = tegra_sku_info.cpu_speedo_id;
@@ -108,23 +107,41 @@ static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
return -ENODEV;
}
- cvb = tegra_cvb_build_opp_table(tegra124_cpu_cvb_tables,
- ARRAY_SIZE(tegra124_cpu_cvb_tables),
- process_id, speedo_id, speedo_value,
- cpu_max_freq_table[speedo_id],
- soc->dev);
- if (IS_ERR(cvb)) {
- dev_err(&pdev->dev, "couldn't build OPP table: %ld\n",
- PTR_ERR(cvb));
- return PTR_ERR(cvb);
+ soc->max_freq = cpu_max_freq_table[speedo_id];
+
+ soc->cvb = tegra_cvb_add_opp_table(soc->dev, tegra124_cpu_cvb_tables,
+ ARRAY_SIZE(tegra124_cpu_cvb_tables),
+ process_id, speedo_id, speedo_value,
+ soc->max_freq);
+ if (IS_ERR(soc->cvb)) {
+ dev_err(&pdev->dev, "couldn't add OPP table: %ld\n",
+ PTR_ERR(soc->cvb));
+ return PTR_ERR(soc->cvb);
+ }
+
+ err = tegra_dfll_register(pdev, soc);
+ if (err < 0) {
+ tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
+ return err;
}
- soc->min_millivolts = cvb->min_millivolts;
- soc->tune0_low = cvb->cpu_dfll_data.tune0_low;
- soc->tune0_high = cvb->cpu_dfll_data.tune0_high;
- soc->tune1 = cvb->cpu_dfll_data.tune1;
+ platform_set_drvdata(pdev, soc);
+
+ return 0;
+}
+
+static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
+{
+ struct tegra_dfll_soc_data *soc = platform_get_drvdata(pdev);
+ int err;
+
+ err = tegra_dfll_unregister(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to unregister DFLL: %d\n", err);
+
+ tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
- return tegra_dfll_register(pdev, soc);
+ return 0;
}
static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
@@ -140,7 +157,7 @@ static const struct dev_pm_ops tegra124_dfll_pm_ops = {
static struct platform_driver tegra124_dfll_fcpu_driver = {
.probe = tegra124_dfll_fcpu_probe,
- .remove = tegra_dfll_unregister,
+ .remove = tegra124_dfll_fcpu_remove,
.driver = {
.name = "tegra124-dfll",
.of_match_table = tegra124_dfll_fcpu_of_match,
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 162725829..f4fbbf16a 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1155,6 +1155,10 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
1, 2);
clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
+ clk = tegra_clk_register_periph_fixed("dpaux", "pll_p", 0, clk_base,
+ 1, 17, 181);
+ clks[TEGRA124_CLK_DPAUX] = clk;
+
clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
clks[TEGRA124_CLK_PLL_D_DSI_OUT] = clk;
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 7ad638376..837e5cbd6 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -623,7 +623,7 @@ static unsigned int tegra20_get_pll_ref_div(void)
case OSC_CTRL_PLL_REF_DIV_4:
return 4;
default:
- pr_err("Invalied pll ref divider %d\n", pll_ref_div);
+ pr_err("Invalid pll ref divider %d\n", pll_ref_div);
BUG();
}
return 0;
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 637041fd5..456cf586d 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -92,6 +92,7 @@
#define PLLE_AUX 0x48c
#define PLLRE_BASE 0x4c4
#define PLLRE_MISC0 0x4c8
+#define PLLRE_OUT1 0x4cc
#define PLLDP_BASE 0x590
#define PLLDP_MISC 0x594
@@ -175,6 +176,19 @@
#define UTMIP_PLL_CFG1_FORCE_PLL_ENABLE_POWERDOWN BIT(14)
#define UTMIP_PLL_CFG1_FORCE_PLL_ACTIVE_POWERDOWN BIT(12)
+#define SATA_PLL_CFG0 0x490
+#define SATA_PLL_CFG0_PADPLL_RESET_SWCTL BIT(0)
+#define SATA_PLL_CFG0_PADPLL_USE_LOCKDET BIT(2)
+#define SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ BIT(13)
+#define SATA_PLL_CFG0_SEQ_ENABLE BIT(24)
+
+#define XUSBIO_PLL_CFG0 0x51c
+#define XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL BIT(0)
+#define XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL BIT(2)
+#define XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET BIT(6)
+#define XUSBIO_PLL_CFG0_PADPLL_SLEEP_IDDQ BIT(13)
+#define XUSBIO_PLL_CFG0_SEQ_ENABLE BIT(24)
+
#define UTMIPLL_HW_PWRDN_CFG0 0x52c
#define UTMIPLL_HW_PWRDN_CFG0_UTMIPLL_LOCK BIT(31)
#define UTMIPLL_HW_PWRDN_CFG0_SEQ_START_STATE BIT(25)
@@ -416,6 +430,51 @@ static const char *mux_pllmcp_clkm[] = {
#define PLLU_MISC0_WRITE_MASK 0xbfffffff
#define PLLU_MISC1_WRITE_MASK 0x00000007
+void tegra210_xusb_pll_hw_control_enable(void)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + XUSBIO_PLL_CFG0);
+ val &= ~(XUSBIO_PLL_CFG0_CLK_ENABLE_SWCTL |
+ XUSBIO_PLL_CFG0_PADPLL_RESET_SWCTL);
+ val |= XUSBIO_PLL_CFG0_PADPLL_USE_LOCKDET |
+ XUSBIO_PLL_CFG0_PADPLL_SLEEP_IDDQ;
+ writel_relaxed(val, clk_base + XUSBIO_PLL_CFG0);
+}
+EXPORT_SYMBOL_GPL(tegra210_xusb_pll_hw_control_enable);
+
+void tegra210_xusb_pll_hw_sequence_start(void)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + XUSBIO_PLL_CFG0);
+ val |= XUSBIO_PLL_CFG0_SEQ_ENABLE;
+ writel_relaxed(val, clk_base + XUSBIO_PLL_CFG0);
+}
+EXPORT_SYMBOL_GPL(tegra210_xusb_pll_hw_sequence_start);
+
+void tegra210_sata_pll_hw_control_enable(void)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + SATA_PLL_CFG0);
+ val &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL;
+ val |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET |
+ SATA_PLL_CFG0_PADPLL_SLEEP_IDDQ;
+ writel_relaxed(val, clk_base + SATA_PLL_CFG0);
+}
+EXPORT_SYMBOL_GPL(tegra210_sata_pll_hw_control_enable);
+
+void tegra210_sata_pll_hw_sequence_start(void)
+{
+ u32 val;
+
+ val = readl_relaxed(clk_base + SATA_PLL_CFG0);
+ val |= SATA_PLL_CFG0_SEQ_ENABLE;
+ writel_relaxed(val, clk_base + SATA_PLL_CFG0);
+}
+EXPORT_SYMBOL_GPL(tegra210_sata_pll_hw_sequence_start);
+
static inline void _pll_misc_chk_default(void __iomem *base,
struct tegra_clk_pll_params *params,
u8 misc_num, u32 default_val, u32 mask)
@@ -1162,7 +1221,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw,
p = rate >= params->vco_min ? 1 : -EINVAL;
}
- if (IS_ERR_VALUE(p))
+ if (p < 0)
return -EINVAL;
cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate);
@@ -2092,6 +2151,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
[tegra_clk_clk72Mhz_8] = { .dt_id = TEGRA210_CLK_CLK72MHZ, .present = true },
[tegra_clk_vic03_8] = { .dt_id = TEGRA210_CLK_VIC03, .present = true },
[tegra_clk_dpaux] = { .dt_id = TEGRA210_CLK_DPAUX, .present = true },
+ [tegra_clk_dpaux1] = { .dt_id = TEGRA210_CLK_DPAUX1, .present = true },
[tegra_clk_sor0] = { .dt_id = TEGRA210_CLK_SOR0, .present = true },
[tegra_clk_sor0_lvds] = { .dt_id = TEGRA210_CLK_SOR0_LVDS, .present = true },
[tegra_clk_gpu] = { .dt_id = TEGRA210_CLK_GPU, .present = true },
@@ -2403,6 +2463,18 @@ static __init void tegra210_periph_clk_init(void __iomem *clk_base,
1, 2);
clks[TEGRA210_CLK_XUSB_SS_DIV2] = clk;
+ clk = tegra_clk_register_periph_fixed("dpaux", "pll_p", 0, clk_base,
+ 1, 17, 181);
+ clks[TEGRA210_CLK_DPAUX] = clk;
+
+ clk = tegra_clk_register_periph_fixed("dpaux1", "pll_p", 0, clk_base,
+ 1, 17, 207);
+ clks[TEGRA210_CLK_DPAUX1] = clk;
+
+ clk = tegra_clk_register_periph_fixed("sor_safe", "pll_p", 0, clk_base,
+ 1, 17, 222);
+ clks[TEGRA210_CLK_SOR_SAFE] = clk;
+
/* pll_d_dsi_out */
clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC0, 21, 0, &pll_d_lock);
@@ -2582,8 +2654,10 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
clks[TEGRA210_CLK_PLL_D_OUT0] = clk;
/* PLLRE */
- clk = tegra_clk_register_pllre("pll_re_vco", "pll_ref", clk_base, pmc,
- 0, &pll_re_vco_params, &pll_re_lock, pll_ref_freq);
+ clk = tegra_clk_register_pllre_tegra210("pll_re_vco", "pll_ref",
+ clk_base, pmc, 0,
+ &pll_re_vco_params,
+ &pll_re_lock, pll_ref_freq);
clk_register_clkdev(clk, "pll_re_vco", NULL);
clks[TEGRA210_CLK_PLL_RE_VCO] = clk;
@@ -2593,6 +2667,15 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
clk_register_clkdev(clk, "pll_re_out", NULL);
clks[TEGRA210_CLK_PLL_RE_OUT] = clk;
+ clk = tegra_clk_register_divider("pll_re_out1_div", "pll_re_vco",
+ clk_base + PLLRE_OUT1, 0,
+ TEGRA_DIVIDER_ROUND_UP,
+ 8, 8, 1, NULL);
+ clk = tegra_clk_register_pll_out("pll_re_out1", "pll_re_out1_div",
+ clk_base + PLLRE_OUT1, 1, 0,
+ CLK_SET_RATE_PARENT, 0, NULL);
+ clks[TEGRA210_CLK_PLL_RE_OUT1] = clk;
+
/* PLLE */
clk = tegra_clk_register_plle_tegra210("pll_e", "pll_ref",
clk_base, 0, &pll_e_params, NULL);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 0478565cf..9396f4930 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -339,11 +339,11 @@ static const struct pdiv_map pllu_p[] = {
};
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
- { 12000000, 480000000, 960, 12, 1, 12 },
- { 13000000, 480000000, 960, 13, 1, 12 },
- { 16800000, 480000000, 400, 7, 1, 5 },
- { 19200000, 480000000, 200, 4, 1, 3 },
- { 26000000, 480000000, 960, 26, 1, 12 },
+ { 12000000, 480000000, 960, 12, 2, 12 },
+ { 13000000, 480000000, 960, 13, 2, 12 },
+ { 16800000, 480000000, 400, 7, 2, 5 },
+ { 19200000, 480000000, 200, 4, 2, 3 },
+ { 26000000, 480000000, 960, 26, 2, 12 },
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1372,6 +1372,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_SBC4, TEGRA30_CLK_PLL_P, 100000000, 0 },
{ TEGRA30_CLK_SBC5, TEGRA30_CLK_PLL_P, 100000000, 0 },
{ TEGRA30_CLK_SBC6, TEGRA30_CLK_PLL_P, 100000000, 0 },
+ { TEGRA30_CLK_PLL_C, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
{ TEGRA30_CLK_HOST1X, TEGRA30_CLK_PLL_C, 150000000, 0 },
{ TEGRA30_CLK_DISP1, TEGRA30_CLK_PLL_P, 600000000, 0 },
{ TEGRA30_CLK_DISP2, TEGRA30_CLK_PLL_P, 600000000, 0 },
@@ -1379,6 +1380,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
+ { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index f60fe2e34..b2cdd9a23 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -84,7 +84,7 @@ static int (*special_reset_assert)(unsigned long);
static int (*special_reset_deassert)(unsigned long);
static unsigned int num_special_reset;
-static struct tegra_clk_periph_regs periph_regs[] = {
+static const struct tegra_clk_periph_regs periph_regs[] = {
[0] = {
.enb_reg = CLK_OUT_ENB_L,
.enb_set_reg = CLK_OUT_ENB_SET_L,
@@ -182,7 +182,7 @@ static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev,
return -EINVAL;
}
-struct tegra_clk_periph_regs *get_reg_bank(int clkid)
+const struct tegra_clk_periph_regs *get_reg_bank(int clkid)
{
int reg_bank = clkid / 32;
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 4dbcfaec5..9421f0310 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -386,6 +386,12 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
struct tegra_clk_pll_params *pll_params,
spinlock_t *lock, unsigned long parent_rate);
+struct clk *tegra_clk_register_pllre_tegra210(const char *name,
+ const char *parent_name, void __iomem *clk_base,
+ void __iomem *pmc, unsigned long flags,
+ struct tegra_clk_pll_params *pll_params,
+ spinlock_t *lock, unsigned long parent_rate);
+
struct clk *tegra_clk_register_plle_tegra114(const char *name,
const char *parent_name,
void __iomem *clk_base, unsigned long flags,
@@ -496,7 +502,7 @@ struct tegra_clk_periph_gate {
u8 flags;
int clk_num;
int *enable_refcnt;
- struct tegra_clk_periph_regs *regs;
+ const struct tegra_clk_periph_regs *regs;
};
#define to_clk_periph_gate(_hw) \
@@ -516,6 +522,23 @@ struct clk *tegra_clk_register_periph_gate(const char *name,
const char *parent_name, u8 gate_flags, void __iomem *clk_base,
unsigned long flags, int clk_num, int *enable_refcnt);
+struct tegra_clk_periph_fixed {
+ struct clk_hw hw;
+ void __iomem *base;
+ const struct tegra_clk_periph_regs *regs;
+ unsigned int mul;
+ unsigned int div;
+ unsigned int num;
+};
+
+struct clk *tegra_clk_register_periph_fixed(const char *name,
+ const char *parent,
+ unsigned long flags,
+ void __iomem *base,
+ unsigned int mul,
+ unsigned int div,
+ unsigned int num);
+
/**
* struct clk-periph - peripheral clock
*
@@ -716,7 +739,7 @@ void tegra_init_from_table(struct tegra_clk_init_table *tbl,
void tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
struct clk *clks[], int clk_max);
-struct tegra_clk_periph_regs *get_reg_bank(int clkid);
+const struct tegra_clk_periph_regs *get_reg_bank(int clkid);
struct clk **tegra_clk_init(void __iomem *clk_base, int num, int periph_banks);
struct clk **tegra_lookup_dt_id(int clk_id, struct tegra_clk *tegra_clk);
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
index 69c74eec3..624115e82 100644
--- a/drivers/clk/tegra/cvb.c
+++ b/drivers/clk/tegra/cvb.c
@@ -61,29 +61,28 @@ static int round_voltage(int mv, const struct rail_alignment *align, int up)
return mv;
}
-static int build_opp_table(const struct cvb_table *d,
- int speedo_value,
- unsigned long max_freq,
- struct device *opp_dev)
+static int build_opp_table(struct device *dev, const struct cvb_table *table,
+ int speedo_value, unsigned long max_freq)
{
+ const struct rail_alignment *align = &table->alignment;
int i, ret, dfll_mv, min_mv, max_mv;
- const struct cvb_table_freq_entry *table = NULL;
- const struct rail_alignment *align = &d->alignment;
- min_mv = round_voltage(d->min_millivolts, align, UP);
- max_mv = round_voltage(d->max_millivolts, align, DOWN);
+ min_mv = round_voltage(table->min_millivolts, align, UP);
+ max_mv = round_voltage(table->max_millivolts, align, DOWN);
for (i = 0; i < MAX_DVFS_FREQS; i++) {
- table = &d->cvb_table[i];
- if (!table->freq || (table->freq > max_freq))
+ const struct cvb_table_freq_entry *entry = &table->entries[i];
+
+ if (!entry->freq || (entry->freq > max_freq))
break;
- dfll_mv = get_cvb_voltage(
- speedo_value, d->speedo_scale, &table->coefficients);
- dfll_mv = round_cvb_voltage(dfll_mv, d->voltage_scale, align);
+ dfll_mv = get_cvb_voltage(speedo_value, table->speedo_scale,
+ &entry->coefficients);
+ dfll_mv = round_cvb_voltage(dfll_mv, table->voltage_scale,
+ align);
dfll_mv = clamp(dfll_mv, min_mv, max_mv);
- ret = dev_pm_opp_add(opp_dev, table->freq, dfll_mv * 1000);
+ ret = dev_pm_opp_add(dev, entry->freq, dfll_mv * 1000);
if (ret)
return ret;
}
@@ -92,7 +91,7 @@ static int build_opp_table(const struct cvb_table *d,
}
/**
- * tegra_cvb_build_opp_table - build OPP table from Tegra CVB tables
+ * tegra_cvb_add_opp_table - build OPP table from Tegra CVB tables
* @cvb_tables: array of CVB tables
* @sz: size of the previously mentioned array
* @process_id: process id of the HW module
@@ -108,26 +107,42 @@ static int build_opp_table(const struct cvb_table *d,
* given @opp_dev. Returns a pointer to the struct cvb_table that matched
* or an ERR_PTR on failure.
*/
-const struct cvb_table *tegra_cvb_build_opp_table(
- const struct cvb_table *cvb_tables,
- size_t sz, int process_id,
- int speedo_id, int speedo_value,
- unsigned long max_rate,
- struct device *opp_dev)
+const struct cvb_table *
+tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *tables,
+ size_t count, int process_id, int speedo_id,
+ int speedo_value, unsigned long max_freq)
{
- int i, ret;
+ size_t i;
+ int ret;
- for (i = 0; i < sz; i++) {
- const struct cvb_table *d = &cvb_tables[i];
+ for (i = 0; i < count; i++) {
+ const struct cvb_table *table = &tables[i];
- if (d->speedo_id != -1 && d->speedo_id != speedo_id)
+ if (table->speedo_id != -1 && table->speedo_id != speedo_id)
continue;
- if (d->process_id != -1 && d->process_id != process_id)
+
+ if (table->process_id != -1 && table->process_id != process_id)
continue;
- ret = build_opp_table(d, speedo_value, max_rate, opp_dev);
- return ret ? ERR_PTR(ret) : d;
+ ret = build_opp_table(dev, table, speedo_value, max_freq);
+ return ret ? ERR_PTR(ret) : table;
}
return ERR_PTR(-EINVAL);
}
+
+void tegra_cvb_remove_opp_table(struct device *dev,
+ const struct cvb_table *table,
+ unsigned long max_freq)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_DVFS_FREQS; i++) {
+ const struct cvb_table_freq_entry *entry = &table->entries[i];
+
+ if (!entry->freq || (entry->freq > max_freq))
+ break;
+
+ dev_pm_opp_remove(dev, entry->freq);
+ }
+}
diff --git a/drivers/clk/tegra/cvb.h b/drivers/clk/tegra/cvb.h
index f62cdc4f4..c1f077993 100644
--- a/drivers/clk/tegra/cvb.h
+++ b/drivers/clk/tegra/cvb.h
@@ -53,15 +53,16 @@ struct cvb_table {
int speedo_scale;
int voltage_scale;
- struct cvb_table_freq_entry cvb_table[MAX_DVFS_FREQS];
+ struct cvb_table_freq_entry entries[MAX_DVFS_FREQS];
struct cvb_cpu_dfll_data cpu_dfll_data;
};
-const struct cvb_table *tegra_cvb_build_opp_table(
- const struct cvb_table *cvb_tables,
- size_t sz, int process_id,
- int speedo_id, int speedo_value,
- unsigned long max_rate,
- struct device *opp_dev);
+const struct cvb_table *
+tegra_cvb_add_opp_table(struct device *dev, const struct cvb_table *cvb_tables,
+ size_t count, int process_id, int speedo_id,
+ int speedo_value, unsigned long max_freq);
+void tegra_cvb_remove_opp_table(struct device *dev,
+ const struct cvb_table *table,
+ unsigned long max_freq);
#endif
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 59ce2fa2c..294bc03ec 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -210,6 +210,7 @@ static struct ti_dt_clk omap54xx_clks[] = {
DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
DT_CLK("omap_wdt", "ick", "dummy_ck"),
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "sys_clkin"),
DT_CLK("4ae18000.timer", "timer_sys_ck", "sys_clkin"),
DT_CLK("48032000.timer", "timer_sys_ck", "sys_clkin"),
DT_CLK("48034000.timer", "timer_sys_ck", "sys_clkin"),
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index a911d7de3..bfa17d33e 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -223,7 +223,7 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "mcasp6_aux_gfclk_mux"),
DT_CLK(NULL, "mcasp7_ahclkx_mux", "mcasp7_ahclkx_mux"),
DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "mcasp7_aux_gfclk_mux"),
- DT_CLK(NULL, "mcasp8_ahclk_mux", "mcasp8_ahclk_mux"),
+ DT_CLK(NULL, "mcasp8_ahclkx_mux", "mcasp8_ahclkx_mux"),
DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "mcasp8_aux_gfclk_mux"),
DT_CLK(NULL, "mmc1_fclk_mux", "mmc1_fclk_mux"),
DT_CLK(NULL, "mmc1_fclk_div", "mmc1_fclk_div"),
@@ -289,6 +289,7 @@ static struct ti_dt_clk dra7xx_clks[] = {
DT_CLK("usbhs_omap", "usbtll_fck", "dummy_ck"),
DT_CLK("omap_wdt", "ick", "dummy_ck"),
DT_CLK(NULL, "timer_32k_ck", "sys_32k_ck"),
+ DT_CLK(NULL, "sys_clkin_ck", "timer_sys_clk_div"),
DT_CLK("4ae18000.timer", "timer_sys_ck", "timer_sys_clk_div"),
DT_CLK("48032000.timer", "timer_sys_ck", "timer_sys_clk_div"),
DT_CLK("48034000.timer", "timer_sys_ck", "timer_sys_clk_div"),
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 2e14dfb58..c77333230 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -265,6 +265,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
/* Get configuration for the ATL instances */
snprintf(prop, sizeof(prop), "atl%u", i);
+ of_node_get(node);
cfg_node = of_find_node_by_name(node, prop);
if (cfg_node) {
ret = of_property_read_u32(cfg_node, "bws",
@@ -278,6 +279,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i),
cdesc->aws);
}
+ of_node_put(cfg_node);
}
cdesc->probed = true;
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c
index 1ddc288fc..c6ae56380 100644
--- a/drivers/clk/ti/clkt_dflt.c
+++ b/drivers/clk/ti/clkt_dflt.c
@@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw)
}
}
- if (unlikely(IS_ERR(clk->enable_reg))) {
+ if (IS_ERR(clk->enable_reg)) {
pr_err("%s: %s missing enable_reg\n", __func__,
clk_hw_get_name(hw));
ret = -EINVAL;
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index 032c658a5..b919fdfe8 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -301,6 +301,9 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
dd = clk->dpll_data;
+ if (dd->max_rate && target_rate > dd->max_rate)
+ target_rate = dd->max_rate;
+
ref_rate = clk_hw_get_rate(dd->clk_ref);
clk_name = clk_hw_get_name(hw);
pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 3bc9959f7..9fc8754a6 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -655,6 +655,7 @@ static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
.max_multiplier = 2047,
.max_divider = 128,
.min_divider = 1,
+ .max_rate = 1000000000,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
@@ -674,6 +675,7 @@ static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
.max_divider = 256,
.min_divider = 2,
.flags = DPLL_J_TYPE,
+ .max_rate = 2000000000,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
@@ -692,6 +694,7 @@ static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
.max_multiplier = 2047,
.max_divider = 128,
.min_divider = 1,
+ .max_rate = 2000000000,
.flags = DPLL_J_TYPE,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
@@ -712,6 +715,7 @@ static void __init of_ti_am3_dpll_setup(struct device_node *node)
.max_multiplier = 2047,
.max_divider = 128,
.min_divider = 1,
+ .max_rate = 1000000000,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
@@ -729,6 +733,7 @@ static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
.max_multiplier = 2047,
.max_divider = 128,
.min_divider = 1,
+ .max_rate = 1000000000,
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
};
diff --git a/drivers/clk/zte/clk-zx296702.c b/drivers/clk/zte/clk-zx296702.c
index ebd20d852..76e967c19 100644
--- a/drivers/clk/zte/clk-zx296702.c
+++ b/drivers/clk/zte/clk-zx296702.c
@@ -234,8 +234,7 @@ static void __init zx296702_top_clocks_init(struct device_node *np)
WARN_ON(!topcrm_base);
clk[ZX296702_OSC] =
- clk_register_fixed_rate(NULL, "osc", NULL, CLK_IS_ROOT,
- 30000000);
+ clk_register_fixed_rate(NULL, "osc", NULL, 0, 30000000);
clk[ZX296702_PLL_A9] =
clk_register_zx_pll("pll_a9", "osc", 0, topcrm_base
+ 0x01c, pll_a9_config,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c346be650..47352d25c 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -181,11 +181,27 @@ config CLKSRC_TI_32K
This option enables support for Texas Instruments 32.768 Hz clocksource
available on many OMAP-like platforms.
+config CLKSRC_NPS
+ bool "NPS400 clocksource driver" if COMPILE_TEST
+ depends on !PHYS_ADDR_T_64BIT
+ select CLKSRC_MMIO
+ select CLKSRC_OF if OF
+ help
+ NPS400 clocksource support.
+ Got 64 bit counter with update rate up to 1000MHz.
+ This counter is accessed via couple of 32 bit memory mapped registers.
+
config CLKSRC_STM32
bool "Clocksource for STM32 SoCs" if !ARCH_STM32
depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
select CLKSRC_MMIO
+config CLKSRC_MPS2
+ bool "Clocksource for MPS2 SoCs" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK
+ select CLKSRC_MMIO
+ select CLKSRC_OF
+
config ARM_ARCH_TIMER
bool
select CLKSRC_OF if OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index dc2b8997f..473974f95 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
obj-$(CONFIG_CLKSRC_LPC32XX) += time-lpc32xx.o
+obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
obj-$(CONFIG_MTK_TIMER) += mtk_timer.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
+obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 5152b3898..4814446a0 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -468,11 +468,11 @@ static struct cyclecounter cyclecounter = {
.mask = CLOCKSOURCE_MASK(56),
};
-static struct timecounter timecounter;
+static struct arch_timer_kvm_info arch_timer_kvm_info;
-struct timecounter *arch_timer_get_timecounter(void)
+struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
{
- return &timecounter;
+ return &arch_timer_kvm_info;
}
static void __init arch_counter_register(unsigned type)
@@ -500,7 +500,8 @@ static void __init arch_counter_register(unsigned type)
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
- timecounter_init(&timecounter, &cyclecounter, start_count);
+ timecounter_init(&arch_timer_kvm_info.timecounter,
+ &cyclecounter, start_count);
/* 56 bits minimum, so we assume worst case rollover */
sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
@@ -744,6 +745,8 @@ static void __init arch_timer_init(void)
arch_timer_register();
arch_timer_common_init();
+
+ arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
}
static void __init arch_timer_of_init(struct device_node *np)
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 633452602..797505aa2 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -264,6 +264,7 @@ dw_apb_clockevent_init(int cpu, const char *name, unsigned rating,
dw_ced->ced.set_state_shutdown = apbt_shutdown;
dw_ced->ced.set_state_periodic = apbt_set_periodic;
dw_ced->ced.set_state_oneshot = apbt_set_oneshot;
+ dw_ced->ced.set_state_oneshot_stopped = apbt_shutdown;
dw_ced->ced.tick_resume = apbt_resume;
dw_ced->ced.set_next_event = apbt_next_event;
dw_ced->ced.irq = dw_ced->timer.irq;
diff --git a/drivers/clocksource/mps2-timer.c b/drivers/clocksource/mps2-timer.c
new file mode 100644
index 000000000..3d33a5e23
--- /dev/null
+++ b/drivers/clocksource/mps2-timer.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Vladimir Murzin <vladimir.murzin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#define TIMER_CTRL 0x0
+#define TIMER_CTRL_ENABLE BIT(0)
+#define TIMER_CTRL_IE BIT(3)
+
+#define TIMER_VALUE 0x4
+#define TIMER_RELOAD 0x8
+#define TIMER_INT 0xc
+
+struct clockevent_mps2 {
+ void __iomem *reg;
+ u32 clock_count_per_tick;
+ struct clock_event_device clkevt;
+};
+
+static void __iomem *sched_clock_base;
+
+static u64 notrace mps2_sched_read(void)
+{
+ return ~readl_relaxed(sched_clock_base + TIMER_VALUE);
+}
+
+static inline struct clockevent_mps2 *to_mps2_clkevt(struct clock_event_device *c)
+{
+ return container_of(c, struct clockevent_mps2, clkevt);
+}
+
+static void clockevent_mps2_writel(u32 val, struct clock_event_device *c, u32 offset)
+{
+ writel_relaxed(val, to_mps2_clkevt(c)->reg + offset);
+}
+
+static int mps2_timer_shutdown(struct clock_event_device *ce)
+{
+ clockevent_mps2_writel(0, ce, TIMER_RELOAD);
+ clockevent_mps2_writel(0, ce, TIMER_CTRL);
+
+ return 0;
+}
+
+static int mps2_timer_set_next_event(unsigned long next, struct clock_event_device *ce)
+{
+ clockevent_mps2_writel(next, ce, TIMER_VALUE);
+ clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
+
+ return 0;
+}
+
+static int mps2_timer_set_periodic(struct clock_event_device *ce)
+{
+ u32 clock_count_per_tick = to_mps2_clkevt(ce)->clock_count_per_tick;
+
+ clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_RELOAD);
+ clockevent_mps2_writel(clock_count_per_tick, ce, TIMER_VALUE);
+ clockevent_mps2_writel(TIMER_CTRL_IE | TIMER_CTRL_ENABLE, ce, TIMER_CTRL);
+
+ return 0;
+}
+
+static irqreturn_t mps2_timer_interrupt(int irq, void *dev_id)
+{
+ struct clockevent_mps2 *ce = dev_id;
+ u32 status = readl_relaxed(ce->reg + TIMER_INT);
+
+ if (!status) {
+ pr_warn("spurious interrupt\n");
+ return IRQ_NONE;
+ }
+
+ writel_relaxed(1, ce->reg + TIMER_INT);
+
+ ce->clkevt.event_handler(&ce->clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int __init mps2_clockevent_init(struct device_node *np)
+{
+ void __iomem *base;
+ struct clk *clk = NULL;
+ struct clockevent_mps2 *ce;
+ u32 rate;
+ int irq, ret;
+ const char *name = "mps2-clkevt";
+
+ ret = of_property_read_u32(np, "clock-frequency", &rate);
+ if (ret) {
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clockevent: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable clock for clockevent: %d\n", ret);
+ goto out_clk_put;
+ }
+
+ rate = clk_get_rate(clk);
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map register for clockevent: %d\n", ret);
+ goto out_clk_disable;
+ }
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENOENT;
+ pr_err("failed to get irq for clockevent: %d\n", ret);
+ goto out_iounmap;
+ }
+
+ ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+ if (!ce) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ ce->reg = base;
+ ce->clock_count_per_tick = DIV_ROUND_CLOSEST(rate, HZ);
+ ce->clkevt.irq = irq;
+ ce->clkevt.name = name;
+ ce->clkevt.rating = 200;
+ ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ ce->clkevt.cpumask = cpu_possible_mask;
+ ce->clkevt.set_state_shutdown = mps2_timer_shutdown,
+ ce->clkevt.set_state_periodic = mps2_timer_set_periodic,
+ ce->clkevt.set_state_oneshot = mps2_timer_shutdown,
+ ce->clkevt.set_next_event = mps2_timer_set_next_event;
+
+ /* Ensure timer is disabled */
+ writel_relaxed(0, base + TIMER_CTRL);
+
+ ret = request_irq(irq, mps2_timer_interrupt, IRQF_TIMER, name, ce);
+ if (ret) {
+ pr_err("failed to request irq for clockevent: %d\n", ret);
+ goto out_kfree;
+ }
+
+ clockevents_config_and_register(&ce->clkevt, rate, 0xf, 0xffffffff);
+
+ return 0;
+
+out_kfree:
+ kfree(ce);
+out_iounmap:
+ iounmap(base);
+out_clk_disable:
+ /* clk_{disable, unprepare, put}() can handle NULL as a parameter */
+ clk_disable_unprepare(clk);
+out_clk_put:
+ clk_put(clk);
+out:
+ return ret;
+}
+
+static int __init mps2_clocksource_init(struct device_node *np)
+{
+ void __iomem *base;
+ struct clk *clk = NULL;
+ u32 rate;
+ int ret;
+ const char *name = "mps2-clksrc";
+
+ ret = of_property_read_u32(np, "clock-frequency", &rate);
+ if (ret) {
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ pr_err("failed to get clock for clocksource: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("failed to enable clock for clocksource: %d\n", ret);
+ goto out_clk_put;
+ }
+
+ rate = clk_get_rate(clk);
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ ret = -EADDRNOTAVAIL;
+ pr_err("failed to map register for clocksource: %d\n", ret);
+ goto out_clk_disable;
+ }
+
+ /* Ensure timer is disabled */
+ writel_relaxed(0, base + TIMER_CTRL);
+
+ /* ... and set it up as free-running clocksource */
+ writel_relaxed(0xffffffff, base + TIMER_VALUE);
+ writel_relaxed(0xffffffff, base + TIMER_RELOAD);
+
+ writel_relaxed(TIMER_CTRL_ENABLE, base + TIMER_CTRL);
+
+ ret = clocksource_mmio_init(base + TIMER_VALUE, name,
+ rate, 200, 32,
+ clocksource_mmio_readl_down);
+ if (ret) {
+ pr_err("failed to init clocksource: %d\n", ret);
+ goto out_iounmap;
+ }
+
+ sched_clock_base = base;
+ sched_clock_register(mps2_sched_read, 32, rate);
+
+ return 0;
+
+out_iounmap:
+ iounmap(base);
+out_clk_disable:
+ /* clk_{disable, unprepare, put}() can handle NULL as a parameter */
+ clk_disable_unprepare(clk);
+out_clk_put:
+ clk_put(clk);
+out:
+ return ret;
+}
+
+static void __init mps2_timer_init(struct device_node *np)
+{
+ static int has_clocksource, has_clockevent;
+ int ret;
+
+ if (!has_clocksource) {
+ ret = mps2_clocksource_init(np);
+ if (!ret) {
+ has_clocksource = 1;
+ return;
+ }
+ }
+
+ if (!has_clockevent) {
+ ret = mps2_clockevent_init(np);
+ if (!ret) {
+ has_clockevent = 1;
+ return;
+ }
+ }
+}
+
+CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index d67bc3564..7e583f8ea 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -152,7 +152,7 @@ static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id)
}
static void
-mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
+__init mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option)
{
writel(TIMER_CTRL_CLEAR | TIMER_CTRL_DISABLE,
evt->gpt_base + TIMER_CTRL_REG(timer));
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 38333aba3..7b94ad2ab 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -258,17 +258,3 @@ static void __init tegra20_init_rtc(struct device_node *np)
register_persistent_clock(NULL, tegra_read_persistent_clock64);
}
CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
-
-#ifdef CONFIG_PM
-static u32 usec_config;
-
-void tegra_timer_suspend(void)
-{
- usec_config = timer_readl(TIMERUS_USEC_CFG);
-}
-
-void tegra_timer_resume(void)
-{
- timer_writel(usec_config, TIMERUS_USEC_CFG);
-}
-#endif
diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c
new file mode 100644
index 000000000..d46108920
--- /dev/null
+++ b/drivers/clocksource/timer-nps.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/cpu.h>
+#include <soc/nps/common.h>
+
+#define NPS_MSU_TICK_LOW 0xC8
+#define NPS_CLUSTER_OFFSET 8
+#define NPS_CLUSTER_NUM 16
+
+/* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */
+static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly;
+
+static unsigned long nps_timer_rate;
+
+static cycle_t nps_clksrc_read(struct clocksource *clksrc)
+{
+ int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET;
+
+ return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
+}
+
+static void __init nps_setup_clocksource(struct device_node *node,
+ struct clk *clk)
+{
+ int ret, cluster;
+
+ for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++)
+ nps_msu_reg_low_addr[cluster] =
+ nps_host_reg((cluster << NPS_CLUSTER_OFFSET),
+ NPS_MSU_BLKID, NPS_MSU_TICK_LOW);
+
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ pr_err("Couldn't enable parent clock\n");
+ return;
+ }
+
+ nps_timer_rate = clk_get_rate(clk);
+
+ ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick",
+ nps_timer_rate, 301, 32, nps_clksrc_read);
+ if (ret) {
+ pr_err("Couldn't register clock source.\n");
+ clk_disable_unprepare(clk);
+ }
+}
+
+static void __init nps_timer_init(struct device_node *node)
+{
+ struct clk *clk;
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Can't get timer clock.\n");
+ return;
+ }
+
+ nps_setup_clocksource(node, clk);
+}
+
+CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
+ nps_timer_init);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 15d06fcf0..b02f9c606 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -56,11 +56,21 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
/* proc_event_counts is used as the sequence number of the netlink message */
static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
-static inline void get_seq(__u32 *ts, int *cpu)
+static inline void send_msg(struct cn_msg *msg)
{
preempt_disable();
- *ts = __this_cpu_inc_return(proc_event_counts) - 1;
- *cpu = smp_processor_id();
+
+ msg->seq = __this_cpu_inc_return(proc_event_counts) - 1;
+ ((struct proc_event *)msg->data)->cpu = smp_processor_id();
+
+ /*
+ * Preemption remains disabled during send to ensure the messages are
+ * ordered according to their sequence numbers.
+ *
+ * If cn_netlink_send() fails, the data is not sent.
+ */
+ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT);
+
preempt_enable();
}
@@ -77,7 +87,6 @@ void proc_fork_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_FORK;
rcu_read_lock();
@@ -92,8 +101,7 @@ void proc_fork_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- /* If cn_netlink_send() failed, the data is not sent */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_exec_connector(struct task_struct *task)
@@ -108,7 +116,6 @@ void proc_exec_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
@@ -118,7 +125,7 @@ void proc_exec_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_id_connector(struct task_struct *task, int which_id)
@@ -150,14 +157,13 @@ void proc_id_connector(struct task_struct *task, int which_id)
return;
}
rcu_read_unlock();
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_sid_connector(struct task_struct *task)
@@ -172,7 +178,6 @@ void proc_sid_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid;
@@ -182,7 +187,7 @@ void proc_sid_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
@@ -197,7 +202,6 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_PTRACE;
ev->event_data.ptrace.process_pid = task->pid;
@@ -215,7 +219,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_comm_connector(struct task_struct *task)
@@ -230,7 +234,6 @@ void proc_comm_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COMM;
ev->event_data.comm.process_pid = task->pid;
@@ -241,7 +244,7 @@ void proc_comm_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_coredump_connector(struct task_struct *task)
@@ -256,7 +259,6 @@ void proc_coredump_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
@@ -266,7 +268,7 @@ void proc_coredump_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
void proc_exit_connector(struct task_struct *task)
@@ -281,7 +283,6 @@ void proc_exit_connector(struct task_struct *task)
msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
memset(&ev->event_data, 0, sizeof(ev->event_data));
- get_seq(&msg->seq, &ev->cpu);
ev->timestamp_ns = ktime_get_ns();
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
@@ -293,7 +294,7 @@ void proc_exit_connector(struct task_struct *task)
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
/*
@@ -325,7 +326,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
msg->flags = 0; /* not used */
- cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_KERNEL);
+ send_msg(msg);
}
/**
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a7f45853c..b7445b6ae 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -18,7 +18,11 @@ config CPU_FREQ
if CPU_FREQ
+config CPU_FREQ_GOV_ATTR_SET
+ bool
+
config CPU_FREQ_GOV_COMMON
+ select CPU_FREQ_GOV_ATTR_SET
select IRQ_WORK
bool
@@ -103,6 +107,17 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
Be aware that not all cpufreq drivers support the conservative
governor. If unsure have a look at the help section of the
driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
+ bool "schedutil"
+ depends on SMP
+ select CPU_FREQ_GOV_SCHEDUTIL
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the 'schedutil' CPUFreq governor by default. If unsure,
+ have a look at the help section of that governor. The fallback
+ governor will be 'performance'.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -184,6 +199,26 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
+config CPU_FREQ_GOV_SCHEDUTIL
+ tristate "'schedutil' cpufreq policy governor"
+ depends on CPU_FREQ && SMP
+ select CPU_FREQ_GOV_ATTR_SET
+ select IRQ_WORK
+ help
+ This governor makes decisions based on the utilization data provided
+ by the scheduler. It sets the CPU frequency to be proportional to
+ the utilization/capacity ratio coming from the scheduler. If the
+ utilization is frequency-invariant, the new frequency is also
+ proportional to the maximum available frequency. If that is not the
+ case, it is proportional to the current frequency of the CPU. The
+ frequency tipping point is at utilization/capacity equal to 80% in
+ both cases.
+
+ To compile this driver as a module, choose M here: the module will
+ be called cpufreq_schedutil.
+
+ If in doubt, say N.
+
comment "CPU frequency scaling drivers"
config CPUFREQ_DT
@@ -191,6 +226,7 @@ config CPUFREQ_DT
depends on HAVE_CLK && OF
# if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
depends on !CPU_THERMAL || THERMAL
+ select CPUFREQ_DT_PLATDEV
select PM_OPP
help
This adds a generic DT based cpufreq driver for frequency management.
@@ -199,6 +235,15 @@ config CPUFREQ_DT
If in doubt, say N.
+config CPUFREQ_DT_PLATDEV
+ bool
+ help
+ This adds a generic DT based cpufreq platdev driver for frequency
+ management. This creates a 'cpufreq-dt' platform device, on the
+ supported platforms.
+
+ If in doubt, say N.
+
if X86
source "drivers/cpufreq/Kconfig.x86"
endif
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 14b1f9393..d89b8afe2 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -50,15 +50,6 @@ config ARM_HIGHBANK_CPUFREQ
If in doubt, say N.
-config ARM_HISI_ACPU_CPUFREQ
- tristate "Hisilicon ACPU CPUfreq driver"
- depends on ARCH_HISI && CPUFREQ_DT
- select PM_OPP
- help
- This enables the hisilicon ACPU CPUfreq driver.
-
- If in doubt, say N.
-
config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index c59bdcb83..adbd1de1c 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,6 +5,7 @@
config X86_INTEL_PSTATE
bool "Intel P state control"
depends on X86
+ select ACPI_PROCESSOR if ACPI
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9e63fb1b0..0a9b6a093 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -11,8 +11,10 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
+obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
+obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
##################################################################################
# x86 drivers.
@@ -53,7 +55,6 @@ obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
-obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
+obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
##################################################################################
@@ -100,7 +102,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
-obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o
+obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index fb5712141..32a15052f 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -25,6 +25,8 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -50,8 +52,6 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");
-#define PFX "acpi-cpufreq: "
-
enum {
UNDEFINED_CAPABLE = 0,
SYSTEM_INTEL_MSR_CAPABLE,
@@ -65,7 +65,6 @@ enum {
#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
struct acpi_cpufreq_data {
- struct cpufreq_frequency_table *freq_table;
unsigned int resume;
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
@@ -200,8 +199,9 @@ static int check_amd_hwpstate_cpu(unsigned int cpuid)
return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
}
-static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
+static unsigned extract_io(struct cpufreq_policy *policy, u32 value)
{
+ struct acpi_cpufreq_data *data = policy->driver_data;
struct acpi_processor_performance *perf;
int i;
@@ -209,13 +209,14 @@ static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
for (i = 0; i < perf->state_count; i++) {
if (value == perf->states[i].status)
- return data->freq_table[i].frequency;
+ return policy->freq_table[i].frequency;
}
return 0;
}
-static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
+static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
{
+ struct acpi_cpufreq_data *data = policy->driver_data;
struct cpufreq_frequency_table *pos;
struct acpi_processor_performance *perf;
@@ -226,20 +227,22 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
perf = to_perf_data(data);
- cpufreq_for_each_entry(pos, data->freq_table)
+ cpufreq_for_each_entry(pos, policy->freq_table)
if (msr == perf->states[pos->driver_data].status)
return pos->frequency;
- return data->freq_table[0].frequency;
+ return policy->freq_table[0].frequency;
}
-static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
+static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+
switch (data->cpu_feature) {
case SYSTEM_INTEL_MSR_CAPABLE:
case SYSTEM_AMD_MSR_CAPABLE:
- return extract_msr(val, data);
+ return extract_msr(policy, val);
case SYSTEM_IO_CAPABLE:
- return extract_io(val, data);
+ return extract_io(policy, val);
default:
return 0;
}
@@ -374,11 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
return 0;
data = policy->driver_data;
- if (unlikely(!data || !data->freq_table))
+ if (unlikely(!data || !policy->freq_table))
return 0;
- cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
- freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
+ cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+ freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
if (freq != cached_freq) {
/*
* The dreaded BIOS frequency change behind our back.
@@ -392,14 +395,15 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
return freq;
}
-static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
- struct acpi_cpufreq_data *data)
+static unsigned int check_freqs(struct cpufreq_policy *policy,
+ const struct cpumask *mask, unsigned int freq)
{
+ struct acpi_cpufreq_data *data = policy->driver_data;
unsigned int cur_freq;
unsigned int i;
for (i = 0; i < 100; i++) {
- cur_freq = extract_freq(get_cur_val(mask, data), data);
+ cur_freq = extract_freq(policy, get_cur_val(mask, data));
if (cur_freq == freq)
return 1;
udelay(10);
@@ -416,12 +420,12 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
unsigned int next_perf_state = 0; /* Index into perf table */
int result = 0;
- if (unlikely(data == NULL || data->freq_table == NULL)) {
+ if (unlikely(!data)) {
return -ENODEV;
}
perf = to_perf_data(data);
- next_perf_state = data->freq_table[index].driver_data;
+ next_perf_state = policy->freq_table[index].driver_data;
if (perf->state == next_perf_state) {
if (unlikely(data->resume)) {
pr_debug("Called after resume, resetting to P%d\n",
@@ -444,8 +448,8 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
drv_write(data, mask, perf->states[next_perf_state].control);
if (acpi_pstate_strict) {
- if (!check_freqs(mask, data->freq_table[index].frequency,
- data)) {
+ if (!check_freqs(policy, mask,
+ policy->freq_table[index].frequency)) {
pr_debug("acpi_cpufreq_target failed (%d)\n",
policy->cpu);
result = -EAGAIN;
@@ -458,6 +462,43 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
return result;
}
+unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ struct acpi_processor_performance *perf;
+ struct cpufreq_frequency_table *entry;
+ unsigned int next_perf_state, next_freq, freq;
+
+ /*
+ * Find the closest frequency above target_freq.
+ *
+ * The table is sorted in the reverse order with respect to the
+ * frequency and all of the entries are valid (see the initialization).
+ */
+ entry = policy->freq_table;
+ do {
+ entry++;
+ freq = entry->frequency;
+ } while (freq >= target_freq && freq != CPUFREQ_TABLE_END);
+ entry--;
+ next_freq = entry->frequency;
+ next_perf_state = entry->driver_data;
+
+ perf = to_perf_data(data);
+ if (perf->state == next_perf_state) {
+ if (unlikely(data->resume))
+ data->resume = 0;
+ else
+ return next_freq;
+ }
+
+ data->cpu_freq_write(&perf->control_register,
+ perf->states[next_perf_state].control);
+ perf->state = next_perf_state;
+ return next_freq;
+}
+
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
@@ -611,10 +652,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
if ((c->x86 == 15) &&
(c->x86_model == 6) &&
(c->x86_mask == 8)) {
- printk(KERN_INFO "acpi-cpufreq: Intel(R) "
- "Xeon(R) 7100 Errata AL30, processors may "
- "lock up on frequency changes: disabling "
- "acpi-cpufreq.\n");
+ pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
return -ENODEV;
}
}
@@ -631,6 +669,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
struct acpi_processor_performance *perf;
+ struct cpufreq_frequency_table *freq_table;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@@ -690,7 +729,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(data->freqdomain_cpus,
topology_sibling_cpumask(cpu));
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
- pr_info_once(PFX "overriding BIOS provided _PSD data\n");
+ pr_info_once("overriding BIOS provided _PSD data\n");
}
#endif
@@ -742,9 +781,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
- data->freq_table = kzalloc(sizeof(*data->freq_table) *
+ freq_table = kzalloc(sizeof(*freq_table) *
(perf->state_count+1), GFP_KERNEL);
- if (!data->freq_table) {
+ if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
}
@@ -762,30 +801,29 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
policy->cpuinfo.transition_latency > 20 * 1000) {
policy->cpuinfo.transition_latency = 20 * 1000;
- printk_once(KERN_INFO
- "P-state transition latency capped at 20 uS\n");
+ pr_info_once("P-state transition latency capped at 20 uS\n");
}
/* table init */
for (i = 0; i < perf->state_count; i++) {
if (i > 0 && perf->states[i].core_frequency >=
- data->freq_table[valid_states-1].frequency / 1000)
+ freq_table[valid_states-1].frequency / 1000)
continue;
- data->freq_table[valid_states].driver_data = i;
- data->freq_table[valid_states].frequency =
+ freq_table[valid_states].driver_data = i;
+ freq_table[valid_states].frequency =
perf->states[i].core_frequency * 1000;
valid_states++;
}
- data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+ freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
perf->state = 0;
- result = cpufreq_table_validate_and_show(policy, data->freq_table);
+ result = cpufreq_table_validate_and_show(policy, freq_table);
if (result)
goto err_freqfree;
if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
- printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
+ pr_warn(FW_WARN "P-state 0 is not max freq\n");
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
@@ -821,10 +859,13 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
*/
data->resume = 1;
+ policy->fast_switch_possible = !acpi_pstate_strict &&
+ !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
+
return result;
err_freqfree:
- kfree(data->freq_table);
+ kfree(freq_table);
err_unreg:
acpi_processor_unregister_performance(cpu);
err_free_mask:
@@ -842,13 +883,12 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
pr_debug("acpi_cpufreq_cpu_exit\n");
- if (data) {
- policy->driver_data = NULL;
- acpi_processor_unregister_performance(data->acpi_perf_cpu);
- free_cpumask_var(data->freqdomain_cpus);
- kfree(data->freq_table);
- kfree(data);
- }
+ policy->fast_switch_possible = false;
+ policy->driver_data = NULL;
+ acpi_processor_unregister_performance(data->acpi_perf_cpu);
+ free_cpumask_var(data->freqdomain_cpus);
+ kfree(policy->freq_table);
+ kfree(data);
return 0;
}
@@ -876,6 +916,7 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
static struct cpufreq_driver acpi_cpufreq_driver = {
.verify = cpufreq_generic_frequency_table_verify,
.target_index = acpi_cpufreq_target,
+ .fast_switch = acpi_cpufreq_fast_switch,
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index c251247ae..418042201 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -298,7 +298,8 @@ static int merge_cluster_tables(void)
return 0;
}
-static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
@@ -308,11 +309,12 @@ static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
clk_put(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpu_dev);
+ arm_bL_ops->free_opp_table(cpumask);
dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}
-static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
+static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i;
@@ -321,7 +323,7 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
return;
if (cluster < MAX_CLUSTERS)
- return _put_cluster_clk_and_freq_table(cpu_dev);
+ return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
@@ -330,14 +332,15 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
return;
}
- _put_cluster_clk_and_freq_table(cdev);
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
}
/* free virtual table */
kfree(freq_table[cluster]);
}
-static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
int ret;
@@ -345,7 +348,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
if (freq_table[cluster])
return 0;
- ret = arm_bL_ops->init_opp_table(cpu_dev);
+ ret = arm_bL_ops->init_opp_table(cpumask);
if (ret) {
dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
@@ -374,14 +377,15 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
free_opp_table:
if (arm_bL_ops->free_opp_table)
- arm_bL_ops->free_opp_table(cpu_dev);
+ arm_bL_ops->free_opp_table(cpumask);
out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
-static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
+static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
+ const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i, ret;
@@ -390,7 +394,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
return 0;
if (cluster < MAX_CLUSTERS) {
- ret = _get_cluster_clk_and_freq_table(cpu_dev);
+ ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
if (ret)
atomic_dec(&cluster_usage[cluster]);
return ret;
@@ -407,7 +411,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
return -ENODEV;
}
- ret = _get_cluster_clk_and_freq_table(cdev);
+ ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
if (ret)
goto put_clusters;
}
@@ -433,7 +437,7 @@ put_clusters:
return -ENODEV;
}
- _put_cluster_clk_and_freq_table(cdev);
+ _put_cluster_clk_and_freq_table(cdev, cpumask);
}
atomic_dec(&cluster_usage[cluster]);
@@ -455,18 +459,6 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
- ret = get_cluster_clk_and_freq_table(cpu_dev);
- if (ret)
- return ret;
-
- ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
- if (ret) {
- dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
- policy->cpu, cur_cluster);
- put_cluster_clk_and_freq_table(cpu_dev);
- return ret;
- }
-
if (cur_cluster < MAX_CLUSTERS) {
int cpu;
@@ -479,6 +471,18 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
}
+ ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+ if (ret)
+ return ret;
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
+ if (ret) {
+ dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
+ policy->cpu, cur_cluster);
+ put_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
+ return ret;
+ }
+
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
@@ -509,7 +513,7 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
- put_cluster_clk_and_freq_table(cpu_dev);
+ put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
return 0;
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index b88889d93..184d7c3a1 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -30,11 +30,11 @@ struct cpufreq_arm_bL_ops {
* This must set opp table for cpu_dev in a similar way as done by
* dev_pm_opp_of_add_table().
*/
- int (*init_opp_table)(struct device *cpu_dev);
+ int (*init_opp_table)(const struct cpumask *cpumask);
/* Optional */
int (*get_transition_latency)(struct device *cpu_dev);
- void (*free_opp_table)(struct device *cpu_dev);
+ void (*free_opp_table)(const struct cpumask *cpumask);
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 16ddeefe9..39b3f51d9 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -43,23 +43,6 @@ static struct device_node *get_cpu_node_with_valid_op(int cpu)
return np;
}
-static int dt_init_opp_table(struct device *cpu_dev)
-{
- struct device_node *np;
- int ret;
-
- np = of_node_get(cpu_dev->of_node);
- if (!np) {
- pr_err("failed to find cpu%d node\n", cpu_dev->id);
- return -ENOENT;
- }
-
- ret = dev_pm_opp_of_add_table(cpu_dev);
- of_node_put(np);
-
- return ret;
-}
-
static int dt_get_transition_latency(struct device *cpu_dev)
{
struct device_node *np;
@@ -81,8 +64,8 @@ static int dt_get_transition_latency(struct device *cpu_dev)
static struct cpufreq_arm_bL_ops dt_bL_ops = {
.name = "dt-bl",
.get_transition_latency = dt_get_transition_latency,
- .init_opp_table = dt_init_opp_table,
- .free_opp_table = dev_pm_opp_of_remove_table,
+ .init_opp_table = dev_pm_opp_of_cpumask_add_table,
+ .free_opp_table = dev_pm_opp_of_cpumask_remove_table,
};
static int generic_bL_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 7c0bdfb1a..8882b8e2e 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -173,4 +173,25 @@ out:
return -ENODEV;
}
+static void __exit cppc_cpufreq_exit(void)
+{
+ struct cpudata *cpu;
+ int i;
+
+ cpufreq_unregister_driver(&cppc_cpufreq_driver);
+
+ for_each_possible_cpu(i) {
+ cpu = all_cpu_data[i];
+ free_cpumask_var(cpu->shared_cpu_map);
+ kfree(cpu);
+ }
+
+ kfree(all_cpu_data);
+}
+
+module_exit(cppc_cpufreq_exit);
+MODULE_AUTHOR("Ashwin Chaugule");
+MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
+MODULE_LICENSE("GPL");
+
late_initcall(cppc_cpufreq_init);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
new file mode 100644
index 000000000..0bb44d5b5
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Linaro.
+ * Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static const struct of_device_id machines[] __initconst = {
+ { .compatible = "allwinner,sun4i-a10", },
+ { .compatible = "allwinner,sun5i-a10s", },
+ { .compatible = "allwinner,sun5i-a13", },
+ { .compatible = "allwinner,sun5i-r8", },
+ { .compatible = "allwinner,sun6i-a31", },
+ { .compatible = "allwinner,sun6i-a31s", },
+ { .compatible = "allwinner,sun7i-a20", },
+ { .compatible = "allwinner,sun8i-a23", },
+ { .compatible = "allwinner,sun8i-a33", },
+ { .compatible = "allwinner,sun8i-a83t", },
+ { .compatible = "allwinner,sun8i-h3", },
+
+ { .compatible = "hisilicon,hi6220", },
+
+ { .compatible = "fsl,imx27", },
+ { .compatible = "fsl,imx51", },
+ { .compatible = "fsl,imx53", },
+ { .compatible = "fsl,imx7d", },
+
+ { .compatible = "marvell,berlin", },
+
+ { .compatible = "samsung,exynos3250", },
+ { .compatible = "samsung,exynos4210", },
+ { .compatible = "samsung,exynos4212", },
+ { .compatible = "samsung,exynos4412", },
+ { .compatible = "samsung,exynos5250", },
+#ifndef CONFIG_BL_SWITCHER
+ { .compatible = "samsung,exynos5420", },
+ { .compatible = "samsung,exynos5800", },
+#endif
+
+ { .compatible = "renesas,emev2", },
+ { .compatible = "renesas,r7s72100", },
+ { .compatible = "renesas,r8a73a4", },
+ { .compatible = "renesas,r8a7740", },
+ { .compatible = "renesas,r8a7778", },
+ { .compatible = "renesas,r8a7779", },
+ { .compatible = "renesas,r8a7790", },
+ { .compatible = "renesas,r8a7791", },
+ { .compatible = "renesas,r8a7793", },
+ { .compatible = "renesas,r8a7794", },
+ { .compatible = "renesas,sh73a0", },
+
+ { .compatible = "rockchip,rk2928", },
+ { .compatible = "rockchip,rk3036", },
+ { .compatible = "rockchip,rk3066a", },
+ { .compatible = "rockchip,rk3066b", },
+ { .compatible = "rockchip,rk3188", },
+ { .compatible = "rockchip,rk3228", },
+ { .compatible = "rockchip,rk3288", },
+ { .compatible = "rockchip,rk3366", },
+ { .compatible = "rockchip,rk3368", },
+ { .compatible = "rockchip,rk3399", },
+
+ { .compatible = "sigma,tango4" },
+
+ { .compatible = "ti,omap2", },
+ { .compatible = "ti,omap3", },
+ { .compatible = "ti,omap4", },
+ { .compatible = "ti,omap5", },
+
+ { .compatible = "xlnx,zynq-7000", },
+};
+
+static int __init cpufreq_dt_platdev_init(void)
+{
+ struct device_node *np = of_find_node_by_path("/");
+ const struct of_device_id *match;
+
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(machines, np);
+ of_node_put(np);
+ if (!match)
+ return -ENODEV;
+
+ return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
+ NULL, 0));
+}
+device_initcall(cpufreq_dt_platdev_init);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 5f8dbe640..3957de801 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -15,7 +15,6 @@
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
-#include <linux/cpufreq-dt.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/module.h>
@@ -147,7 +146,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
unsigned int transition_latency;
- bool opp_v1 = false;
+ bool fallback = false;
const char *name;
int ret;
@@ -167,14 +166,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
+ if (ret != -ENOENT)
+ goto out_put_clk;
+
/*
* operating-points-v2 not supported, fallback to old method of
- * finding shared-OPPs for backward compatibility.
+ * finding shared-OPPs for backward compatibility if the
+ * platform hasn't set sharing CPUs.
*/
- if (ret == -ENOENT)
- opp_v1 = true;
- else
- goto out_put_clk;
+ if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
+ fallback = true;
}
/*
@@ -214,11 +215,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
- if (opp_v1) {
- struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
-
- if (!pd || !pd->independent_clocks)
- cpumask_setall(policy->cpus);
+ if (fallback) {
+ cpumask_setall(policy->cpus);
/*
* OPP tables are initialized only for policy->cpu, do it for
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index db69eeb50..5503d491b 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -7,6 +7,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -56,8 +58,6 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
MODULE_PARM_DESC(min_fsb,
"Minimum FSB to use, if not defined: current FSB - 50");
-#define PFX "cpufreq-nforce2: "
-
/**
* nforce2_calc_fsb - calculate FSB
* @pll: PLL value
@@ -174,13 +174,13 @@ static int nforce2_set_fsb(unsigned int fsb)
int pll = 0;
if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
- printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
+ pr_err("FSB %d is out of range!\n", fsb);
return -EINVAL;
}
tfsb = nforce2_fsb_read(0);
if (!tfsb) {
- printk(KERN_ERR PFX "Error while reading the FSB\n");
+ pr_err("Error while reading the FSB\n");
return -EINVAL;
}
@@ -276,8 +276,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
/* local_irq_save(flags); */
if (nforce2_set_fsb(target_fsb) < 0)
- printk(KERN_ERR PFX "Changing FSB to %d failed\n",
- target_fsb);
+ pr_err("Changing FSB to %d failed\n", target_fsb);
else
pr_debug("Changed FSB successfully to %d\n",
target_fsb);
@@ -325,8 +324,7 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
/* FIX: Get FID from CPU */
if (!fid) {
if (!cpu_khz) {
- printk(KERN_WARNING PFX
- "cpu_khz not set, can't calculate multiplier!\n");
+ pr_warn("cpu_khz not set, can't calculate multiplier!\n");
return -ENODEV;
}
@@ -341,8 +339,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
}
}
- printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
- fid / 10, fid % 10);
+ pr_info("FSB currently at %i MHz, FID %d.%d\n",
+ fsb, fid / 10, fid % 10);
/* Set maximum FSB to FSB at boot time */
max_fsb = nforce2_fsb_read(1);
@@ -401,11 +399,9 @@ static int nforce2_detect_chipset(void)
if (nforce2_dev == NULL)
return -ENODEV;
- printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
- nforce2_dev->revision);
- printk(KERN_INFO PFX
- "FSB changing is maybe unstable and can lead to "
- "crashes and data loss.\n");
+ pr_info("Detected nForce2 chipset revision %X\n",
+ nforce2_dev->revision);
+ pr_info("FSB changing is maybe unstable and can lead to crashes and data loss\n");
return 0;
}
@@ -423,7 +419,7 @@ static int __init nforce2_init(void)
/* detect chipset */
if (nforce2_detect_chipset()) {
- printk(KERN_INFO PFX "No nForce2 chipset.\n");
+ pr_info("No nForce2 chipset\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0a94a2caf..d41baadec 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -79,6 +79,16 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
+static inline void cpufreq_exit_governor(struct cpufreq_policy *policy)
+{
+ (void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+}
+
+static inline void cpufreq_stop_governor(struct cpufreq_policy *policy)
+{
+ (void)cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+}
+
/**
* Two notifier lists: the "policy" list is involved in the
* validation process for a new CPU frequency policy; the
@@ -430,6 +440,73 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
+/*
+ * Fast frequency switching status count. Positive means "enabled", negative
+ * means "disabled" and 0 means "not decided yet".
+ */
+static int cpufreq_fast_switch_count;
+static DEFINE_MUTEX(cpufreq_fast_switch_lock);
+
+static void cpufreq_list_transition_notifiers(void)
+{
+ struct notifier_block *nb;
+
+ pr_info("Registered transition notifiers:\n");
+
+ mutex_lock(&cpufreq_transition_notifier_list.mutex);
+
+ for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
+ pr_info("%pF\n", nb->notifier_call);
+
+ mutex_unlock(&cpufreq_transition_notifier_list.mutex);
+}
+
+/**
+ * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
+ * @policy: cpufreq policy to enable fast frequency switching for.
+ *
+ * Try to enable fast frequency switching for @policy.
+ *
+ * The attempt will fail if there is at least one transition notifier registered
+ * at this point, as fast frequency switching is quite fundamentally at odds
+ * with transition notifiers. Thus if successful, it will make registration of
+ * transition notifiers fail going forward.
+ */
+void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
+{
+ lockdep_assert_held(&policy->rwsem);
+
+ if (!policy->fast_switch_possible)
+ return;
+
+ mutex_lock(&cpufreq_fast_switch_lock);
+ if (cpufreq_fast_switch_count >= 0) {
+ cpufreq_fast_switch_count++;
+ policy->fast_switch_enabled = true;
+ } else {
+ pr_warn("CPU%u: Fast frequency switching not enabled\n",
+ policy->cpu);
+ cpufreq_list_transition_notifiers();
+ }
+ mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
+
+/**
+ * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
+ * @policy: cpufreq policy to disable fast frequency switching for.
+ */
+void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
+{
+ mutex_lock(&cpufreq_fast_switch_lock);
+ if (policy->fast_switch_enabled) {
+ policy->fast_switch_enabled = false;
+ if (!WARN_ON(cpufreq_fast_switch_count <= 0))
+ cpufreq_fast_switch_count--;
+ }
+ mutex_unlock(&cpufreq_fast_switch_lock);
+}
+EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
/*********************************************************************
* SYSFS INTERFACE *
@@ -955,13 +1032,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
return 0;
down_write(&policy->rwsem);
- if (has_target()) {
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret) {
- pr_err("%s: Failed to stop governor\n", __func__);
- goto unlock;
- }
- }
+ if (has_target())
+ cpufreq_stop_governor(policy);
cpumask_set_cpu(cpu, policy->cpus);
@@ -970,8 +1042,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
-
-unlock:
up_write(&policy->rwsem);
return ret;
}
@@ -1249,26 +1319,24 @@ out_free_policy:
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
+ struct cpufreq_policy *policy;
unsigned cpu = dev->id;
- int ret;
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
- if (cpu_online(cpu)) {
- ret = cpufreq_online(cpu);
- } else {
- /*
- * A hotplug notifier will follow and we will handle it as CPU
- * online then. For now, just create the sysfs link, unless
- * there is no policy or the link is already present.
- */
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (cpu_online(cpu))
+ return cpufreq_online(cpu);
- ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
- ? add_cpu_dev_symlink(policy, cpu) : 0;
- }
+ /*
+ * A hotplug notifier will follow and we will handle it as CPU online
+ * then. For now, just create the sysfs link, unless there is no policy
+ * or the link is already present.
+ */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
+ return 0;
- return ret;
+ return add_cpu_dev_symlink(policy, cpu);
}
static void cpufreq_offline(unsigned int cpu)
@@ -1285,11 +1353,8 @@ static void cpufreq_offline(unsigned int cpu)
}
down_write(&policy->rwsem);
- if (has_target()) {
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret)
- pr_err("%s: Failed to stop governor\n", __func__);
- }
+ if (has_target())
+ cpufreq_stop_governor(policy);
cpumask_clear_cpu(cpu, policy->cpus);
@@ -1318,12 +1383,8 @@ static void cpufreq_offline(unsigned int cpu)
if (cpufreq_driver->stop_cpu)
cpufreq_driver->stop_cpu(policy);
- /* If cpu is last user of policy, free policy */
- if (has_target()) {
- ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- if (ret)
- pr_err("%s: Failed to exit governor\n", __func__);
- }
+ if (has_target())
+ cpufreq_exit_governor(policy);
/*
* Perform the ->exit() even during light-weight tear-down,
@@ -1448,8 +1509,12 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
ret_freq = cpufreq_driver->get(policy->cpu);
- /* Updating inactive policies is invalid, so avoid doing that. */
- if (unlikely(policy_is_inactive(policy)))
+ /*
+ * Updating inactive policies is invalid, so avoid doing that. Also
+ * if fast frequency switching is used with the given policy, the check
+ * against policy->cur is pointless, so skip it in that case too.
+ */
+ if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
@@ -1553,7 +1618,6 @@ EXPORT_SYMBOL(cpufreq_generic_suspend);
void cpufreq_suspend(void)
{
struct cpufreq_policy *policy;
- int ret;
if (!cpufreq_driver)
return;
@@ -1566,14 +1630,8 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
down_write(&policy->rwsem);
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ cpufreq_stop_governor(policy);
up_write(&policy->rwsem);
-
- if (ret) {
- pr_err("%s: Failed to stop governor for policy: %p\n",
- __func__, policy);
- continue;
- }
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -1680,8 +1738,18 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
+ mutex_lock(&cpufreq_fast_switch_lock);
+
+ if (cpufreq_fast_switch_count > 0) {
+ mutex_unlock(&cpufreq_fast_switch_lock);
+ return -EBUSY;
+ }
ret = srcu_notifier_chain_register(
&cpufreq_transition_notifier_list, nb);
+ if (!ret)
+ cpufreq_fast_switch_count--;
+
+ mutex_unlock(&cpufreq_fast_switch_lock);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_register(
@@ -1714,8 +1782,14 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
+ mutex_lock(&cpufreq_fast_switch_lock);
+
ret = srcu_notifier_chain_unregister(
&cpufreq_transition_notifier_list, nb);
+ if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
+ cpufreq_fast_switch_count++;
+
+ mutex_unlock(&cpufreq_fast_switch_lock);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_unregister(
@@ -1734,6 +1808,37 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
* GOVERNORS *
*********************************************************************/
+/**
+ * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
+ * @policy: cpufreq policy to switch the frequency for.
+ * @target_freq: New frequency to set (may be approximate).
+ *
+ * Carry out a fast frequency switch without sleeping.
+ *
+ * The driver's ->fast_switch() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select the minimum available frequency greater than or
+ * equal to @target_freq (CPUFREQ_RELATION_L).
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same policy and that it will never be called in
+ * parallel with either ->target() or ->target_index() for the same policy.
+ *
+ * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
+ * callback to indicate an error condition, the hardware configuration must be
+ * preserved.
+ */
+unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ target_freq = clamp_val(target_freq, policy->min, policy->max);
+
+ return cpufreq_driver->fast_switch(policy, target_freq);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
+
/* Must set freqs->new to intermediate frequency */
static int __target_intermediate(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int index)
@@ -1932,16 +2037,15 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
ret = policy->governor->governor(policy, event);
- if (!ret) {
- if (event == CPUFREQ_GOV_POLICY_INIT)
+ if (event == CPUFREQ_GOV_POLICY_INIT) {
+ if (ret)
+ module_put(policy->governor->owner);
+ else
policy->governor->initialized++;
- else if (event == CPUFREQ_GOV_POLICY_EXIT)
- policy->governor->initialized--;
- }
-
- if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
- ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
+ } else if (event == CPUFREQ_GOV_POLICY_EXIT) {
+ policy->governor->initialized--;
module_put(policy->governor->owner);
+ }
return ret;
}
@@ -2104,20 +2208,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
old_gov = policy->governor;
/* end old governor */
if (old_gov) {
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret) {
- /* This can happen due to race with other operations */
- pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
- __func__, old_gov->name, ret);
- return ret;
- }
-
- ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
- if (ret) {
- pr_err("%s: Failed to Exit Governor: %s (%d)\n",
- __func__, old_gov->name, ret);
- return ret;
- }
+ cpufreq_stop_governor(policy);
+ cpufreq_exit_governor(policy);
}
/* start new governor */
@@ -2129,7 +2221,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("cpufreq: governor change\n");
return 0;
}
- cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ cpufreq_exit_governor(policy);
}
/* new governor failed, so re-start old one */
@@ -2173,6 +2265,10 @@ int cpufreq_update_policy(unsigned int cpu)
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
+ if (cpufreq_suspended) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
new_policy.cur = cpufreq_update_current_freq(policy);
if (WARN_ON(!new_policy.cur)) {
ret = -EIO;
@@ -2197,16 +2293,13 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
cpufreq_online(cpu);
break;
case CPU_DOWN_PREPARE:
cpufreq_offline(cpu);
break;
-
- case CPU_DOWN_FAILED:
- cpufreq_online(cpu);
- break;
}
return NOTIFY_OK;
}
@@ -2381,10 +2474,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
register_hotcpu_notifier(&cpufreq_cpu_notifier);
pr_debug("driver %s up and running\n", driver_data->name);
-
-out:
- put_online_cpus();
- return ret;
+ goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
@@ -2394,7 +2484,9 @@ err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- goto out;
+out:
+ put_online_cpus();
+ return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 3b642c9d0..7306830c5 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -135,9 +135,10 @@ static struct notifier_block cs_cpufreq_notifier_block = {
/************************** sysfs interface ************************/
static struct dbs_governor cs_dbs_gov;
-static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
- const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -149,9 +150,10 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
@@ -164,9 +166,10 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
@@ -181,9 +184,10 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
- const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
@@ -205,9 +209,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
+ size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 5f1147fa9..be498d56d 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -43,9 +43,10 @@ static DEFINE_MUTEX(gov_dbs_data_mutex);
* This must be called with dbs_data->mutex held, otherwise traversing
* policy_dbs_list isn't safe.
*/
-ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
unsigned int rate;
int ret;
@@ -59,7 +60,7 @@ ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
* We are operating under dbs_data->mutex and so the list and its
* entries can't be freed concurrently.
*/
- list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
mutex_lock(&policy_dbs->timer_mutex);
/*
* On 32-bit architectures this may race with the
@@ -96,13 +97,13 @@ void gov_update_cpu_data(struct dbs_data *dbs_data)
{
struct policy_dbs_info *policy_dbs;
- list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
unsigned int j;
for_each_cpu(j, policy_dbs->policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
- j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
dbs_data->io_is_busy);
if (dbs_data->ignore_nice_load)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -111,54 +112,6 @@ void gov_update_cpu_data(struct dbs_data *dbs_data)
}
EXPORT_SYMBOL_GPL(gov_update_cpu_data);
-static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
-{
- return container_of(kobj, struct dbs_data, kobj);
-}
-
-static inline struct governor_attr *to_gov_attr(struct attribute *attr)
-{
- return container_of(attr, struct governor_attr, attr);
-}
-
-static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct dbs_data *dbs_data = to_dbs_data(kobj);
- struct governor_attr *gattr = to_gov_attr(attr);
-
- return gattr->show(dbs_data, buf);
-}
-
-static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct dbs_data *dbs_data = to_dbs_data(kobj);
- struct governor_attr *gattr = to_gov_attr(attr);
- int ret = -EBUSY;
-
- mutex_lock(&dbs_data->mutex);
-
- if (dbs_data->usage_count)
- ret = gattr->store(dbs_data, buf, count);
-
- mutex_unlock(&dbs_data->mutex);
-
- return ret;
-}
-
-/*
- * Sysfs Ops for accessing governor attributes.
- *
- * All show/store invocations for governor specific sysfs attributes, will first
- * call the below show/store callbacks and the attribute specific callback will
- * be called from within it.
- */
-static const struct sysfs_ops governor_sysfs_ops = {
- .show = governor_show,
- .store = governor_store,
-};
-
unsigned int dbs_update(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
@@ -184,14 +137,14 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
/* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
- u64 cur_wall_time, cur_idle_time;
- unsigned int idle_time, wall_time;
+ u64 update_time, cur_idle_time;
+ unsigned int idle_time, time_elapsed;
unsigned int load;
- cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
+ cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
- wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
- j_cdbs->prev_cpu_wall = cur_wall_time;
+ time_elapsed = update_time - j_cdbs->prev_update_time;
+ j_cdbs->prev_update_time = update_time;
idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
j_cdbs->prev_cpu_idle = cur_idle_time;
@@ -203,47 +156,62 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
j_cdbs->prev_cpu_nice = cur_nice;
}
- if (unlikely(!wall_time || wall_time < idle_time))
- continue;
-
- /*
- * If the CPU had gone completely idle, and a task just woke up
- * on this CPU now, it would be unfair to calculate 'load' the
- * usual way for this elapsed time-window, because it will show
- * near-zero load, irrespective of how CPU intensive that task
- * actually is. This is undesirable for latency-sensitive bursty
- * workloads.
- *
- * To avoid this, we reuse the 'load' from the previous
- * time-window and give this task a chance to start with a
- * reasonably high CPU frequency. (However, we shouldn't over-do
- * this copy, lest we get stuck at a high load (high frequency)
- * for too long, even when the current system load has actually
- * dropped down. So we perform the copy only once, upon the
- * first wake-up from idle.)
- *
- * Detecting this situation is easy: the governor's utilization
- * update handler would not have run during CPU-idle periods.
- * Hence, an unusually large 'wall_time' (as compared to the
- * sampling rate) indicates this scenario.
- *
- * prev_load can be zero in two cases and we must recalculate it
- * for both cases:
- * - during long idle intervals
- * - explicitly set to zero
- */
- if (unlikely(wall_time > (2 * sampling_rate) &&
- j_cdbs->prev_load)) {
+ if (unlikely(!time_elapsed)) {
+ /*
+ * That can only happen when this function is called
+ * twice in a row with a very short interval between the
+ * calls, so the previous load value can be used then.
+ */
load = j_cdbs->prev_load;
-
+ } else if (unlikely(time_elapsed > 2 * sampling_rate &&
+ j_cdbs->prev_load)) {
/*
- * Perform a destructive copy, to ensure that we copy
- * the previous load only once, upon the first wake-up
- * from idle.
+ * If the CPU had gone completely idle and a task has
+ * just woken up on this CPU now, it would be unfair to
+ * calculate 'load' the usual way for this elapsed
+ * time-window, because it would show near-zero load,
+ * irrespective of how CPU intensive that task actually
+ * was. This is undesirable for latency-sensitive bursty
+ * workloads.
+ *
+ * To avoid this, reuse the 'load' from the previous
+ * time-window and give this task a chance to start with
+ * a reasonably high CPU frequency. However, that
+ * shouldn't be over-done, lest we get stuck at a high
+ * load (high frequency) for too long, even when the
+ * current system load has actually dropped down, so
+ * clear prev_load to guarantee that the load will be
+ * computed again next time.
+ *
+ * Detecting this situation is easy: the governor's
+ * utilization update handler would not have run during
+ * CPU-idle periods. Hence, an unusually large
+ * 'time_elapsed' (as compared to the sampling rate)
+ * indicates this scenario.
*/
+ load = j_cdbs->prev_load;
j_cdbs->prev_load = 0;
} else {
- load = 100 * (wall_time - idle_time) / wall_time;
+ if (time_elapsed >= idle_time) {
+ load = 100 * (time_elapsed - idle_time) / time_elapsed;
+ } else {
+ /*
+ * That can happen if idle_time is returned by
+ * get_cpu_idle_time_jiffy(). In that case
+ * idle_time is roughly equal to the difference
+ * between time_elapsed and "busy time" obtained
+ * from CPU statistics. Then, the "busy time"
+ * can end up being greater than time_elapsed
+ * (for example, if jiffies_64 and the CPU
+ * statistics are updated by different CPUs),
+ * so idle_time may in fact be negative. That
+ * means, though, that the CPU was busy all
+ * the time (on the rough average) during the
+ * last sampling interval and 100 can be
+ * returned as the load.
+ */
+ load = (int)idle_time < 0 ? 100 : 0;
+ }
j_cdbs->prev_load = load;
}
@@ -254,43 +222,6 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
}
EXPORT_SYMBOL_GPL(dbs_update);
-static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
- unsigned int delay_us)
-{
- struct cpufreq_policy *policy = policy_dbs->policy;
- int cpu;
-
- gov_update_sample_delay(policy_dbs, delay_us);
- policy_dbs->last_sample_time = 0;
-
- for_each_cpu(cpu, policy->cpus) {
- struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
-
- cpufreq_set_update_util_data(cpu, &cdbs->update_util);
- }
-}
-
-static inline void gov_clear_update_util(struct cpufreq_policy *policy)
-{
- int i;
-
- for_each_cpu(i, policy->cpus)
- cpufreq_set_update_util_data(i, NULL);
-
- synchronize_sched();
-}
-
-static void gov_cancel_work(struct cpufreq_policy *policy)
-{
- struct policy_dbs_info *policy_dbs = policy->governor_data;
-
- gov_clear_update_util(policy_dbs->policy);
- irq_work_sync(&policy_dbs->irq_work);
- cancel_work_sync(&policy_dbs->work);
- atomic_set(&policy_dbs->work_count, 0);
- policy_dbs->work_in_progress = false;
-}
-
static void dbs_work_handler(struct work_struct *work)
{
struct policy_dbs_info *policy_dbs;
@@ -378,6 +309,44 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
irq_work_queue(&policy_dbs->irq_work);
}
+static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
+ unsigned int delay_us)
+{
+ struct cpufreq_policy *policy = policy_dbs->policy;
+ int cpu;
+
+ gov_update_sample_delay(policy_dbs, delay_us);
+ policy_dbs->last_sample_time = 0;
+
+ for_each_cpu(cpu, policy->cpus) {
+ struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
+
+ cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
+ dbs_update_util_handler);
+ }
+}
+
+static inline void gov_clear_update_util(struct cpufreq_policy *policy)
+{
+ int i;
+
+ for_each_cpu(i, policy->cpus)
+ cpufreq_remove_update_util_hook(i);
+
+ synchronize_sched();
+}
+
+static void gov_cancel_work(struct cpufreq_policy *policy)
+{
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+
+ gov_clear_update_util(policy_dbs->policy);
+ irq_work_sync(&policy_dbs->irq_work);
+ cancel_work_sync(&policy_dbs->work);
+ atomic_set(&policy_dbs->work_count, 0);
+ policy_dbs->work_in_progress = false;
+}
+
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
struct dbs_governor *gov)
{
@@ -400,7 +369,6 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
j_cdbs->policy_dbs = policy_dbs;
- j_cdbs->update_util.func = dbs_update_util_handler;
}
return policy_dbs;
}
@@ -449,10 +417,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
policy_dbs->dbs_data = dbs_data;
policy->governor_data = policy_dbs;
- mutex_lock(&dbs_data->mutex);
- dbs_data->usage_count++;
- list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
- mutex_unlock(&dbs_data->mutex);
+ gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
goto out;
}
@@ -462,8 +427,7 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
goto free_policy_dbs_info;
}
- INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
- mutex_init(&dbs_data->mutex);
+ gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
ret = gov->init(dbs_data, !policy->governor->initialized);
if (ret)
@@ -483,14 +447,11 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy)
if (!have_governor_per_policy())
gov->gdbs_data = dbs_data;
- policy->governor_data = policy_dbs;
-
policy_dbs->dbs_data = dbs_data;
- dbs_data->usage_count = 1;
- list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
+ policy->governor_data = policy_dbs;
gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
- ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
+ ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
get_governor_parent_kobj(policy),
"%s", gov->gov.name);
if (!ret)
@@ -519,29 +480,21 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy)
struct dbs_governor *gov = dbs_governor_of(policy);
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct dbs_data *dbs_data = policy_dbs->dbs_data;
- int count;
+ unsigned int count;
/* Protect gov->gdbs_data against concurrent updates. */
mutex_lock(&gov_dbs_data_mutex);
- mutex_lock(&dbs_data->mutex);
- list_del(&policy_dbs->list);
- count = --dbs_data->usage_count;
- mutex_unlock(&dbs_data->mutex);
+ count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
- if (!count) {
- kobject_put(&dbs_data->kobj);
-
- policy->governor_data = NULL;
+ policy->governor_data = NULL;
+ if (!count) {
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
gov->exit(dbs_data, policy->governor->initialized == 1);
- mutex_destroy(&dbs_data->mutex);
kfree(dbs_data);
- } else {
- policy->governor_data = NULL;
}
free_policy_dbs_info(policy_dbs, gov);
@@ -570,12 +523,12 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy)
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
- unsigned int prev_load;
- j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
-
- prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
- j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
+ j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
+ /*
+ * Make the first invocation of dbs_update() compute the load.
+ */
+ j_cdbs->prev_load = 0;
if (ignore_nice)
j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 61ff82fe0..34eb214b6 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -24,20 +24,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
-/*
- * The polling frequency depends on the capability of the processor. Default
- * polling frequency is 1000 times the transition latency of the processor. The
- * governor will work on any processor with transition latency <= 10ms, using
- * appropriate sampling rate.
- *
- * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work. All times here are in us (micro seconds).
- */
-#define MIN_SAMPLING_RATE_RATIO (2)
-#define LATENCY_MULTIPLIER (1000)
-#define MIN_LATENCY_MULTIPLIER (20)
-#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
-
/* Ondemand Sampling types */
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
@@ -52,7 +38,7 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/* Governor demand based switching data (per-policy or global). */
struct dbs_data {
- int usage_count;
+ struct gov_attr_set attr_set;
void *tuners;
unsigned int min_sampling_rate;
unsigned int ignore_nice_load;
@@ -60,37 +46,27 @@ struct dbs_data {
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int io_is_busy;
-
- struct kobject kobj;
- struct list_head policy_dbs_list;
- /*
- * Protect concurrent updates to governor tunables from sysfs,
- * policy_dbs_list and usage_count.
- */
- struct mutex mutex;
};
-/* Governor's specific attributes */
-struct dbs_data;
-struct governor_attr {
- struct attribute attr;
- ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
- ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
- size_t count);
-};
+static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
+{
+ return container_of(attr_set, struct dbs_data, attr_set);
+}
#define gov_show_one(_gov, file_name) \
static ssize_t show_##file_name \
-(struct dbs_data *dbs_data, char *buf) \
+(struct gov_attr_set *attr_set, char *buf) \
{ \
+ struct dbs_data *dbs_data = to_dbs_data(attr_set); \
struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
}
#define gov_show_one_common(file_name) \
static ssize_t show_##file_name \
-(struct dbs_data *dbs_data, char *buf) \
+(struct gov_attr_set *attr_set, char *buf) \
{ \
+ struct dbs_data *dbs_data = to_dbs_data(attr_set); \
return sprintf(buf, "%u\n", dbs_data->file_name); \
}
@@ -135,7 +111,7 @@ static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
/* Per cpu structures */
struct cpu_dbs_info {
u64 prev_cpu_idle;
- u64 prev_cpu_wall;
+ u64 prev_update_time;
u64 prev_cpu_nice;
/*
* Used to keep track of load in the previous interval. However, when
@@ -184,7 +160,7 @@ void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
void od_unregister_powersave_bias_handler(void);
-ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
+ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
size_t count);
void gov_update_cpu_data(struct dbs_data *dbs_data);
#endif /* _CPUFREQ_GOVERNOR_H */
diff --git a/drivers/cpufreq/cpufreq_governor_attr_set.c b/drivers/cpufreq/cpufreq_governor_attr_set.c
new file mode 100644
index 000000000..52841f807
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_governor_attr_set.c
@@ -0,0 +1,84 @@
+/*
+ * Abstract code for CPUFreq governor tunable sysfs attributes.
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cpufreq_governor.h"
+
+static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
+{
+ return container_of(kobj, struct gov_attr_set, kobj);
+}
+
+static inline struct governor_attr *to_gov_attr(struct attribute *attr)
+{
+ return container_of(attr, struct governor_attr, attr);
+}
+
+static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct governor_attr *gattr = to_gov_attr(attr);
+
+ return gattr->show(to_gov_attr_set(kobj), buf);
+}
+
+static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
+ struct governor_attr *gattr = to_gov_attr(attr);
+ int ret;
+
+ mutex_lock(&attr_set->update_lock);
+ ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
+ mutex_unlock(&attr_set->update_lock);
+ return ret;
+}
+
+const struct sysfs_ops governor_sysfs_ops = {
+ .show = governor_show,
+ .store = governor_store,
+};
+EXPORT_SYMBOL_GPL(governor_sysfs_ops);
+
+void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ INIT_LIST_HEAD(&attr_set->policy_list);
+ mutex_init(&attr_set->update_lock);
+ attr_set->usage_count = 1;
+ list_add(list_node, &attr_set->policy_list);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_init);
+
+void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ mutex_lock(&attr_set->update_lock);
+ attr_set->usage_count++;
+ list_add(list_node, &attr_set->policy_list);
+ mutex_unlock(&attr_set->update_lock);
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_get);
+
+unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
+{
+ unsigned int count;
+
+ mutex_lock(&attr_set->update_lock);
+ list_del(list_node);
+ count = --attr_set->usage_count;
+ mutex_unlock(&attr_set->update_lock);
+ if (count)
+ return count;
+
+ kobject_put(&attr_set->kobj);
+ mutex_destroy(&attr_set->update_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gov_attr_set_put);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index aa7ed7068..b596c8410 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -216,9 +216,10 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
/************************** sysfs interface ************************/
static struct dbs_governor od_dbs_gov;
-static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
+ size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
@@ -233,9 +234,10 @@ static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
@@ -249,9 +251,10 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
- const char *buf, size_t count)
+static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
unsigned int input;
int ret;
@@ -263,7 +266,7 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
dbs_data->sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
- list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
/*
* Doing this without locking might lead to using different
* rate_mult values in od_update() and od_dbs_timer().
@@ -276,9 +279,10 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
- const char *buf, size_t count)
+static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
@@ -300,9 +304,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
+ const char *buf, size_t count)
{
+ struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct policy_dbs_info *policy_dbs;
unsigned int input;
@@ -317,7 +322,7 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
od_tuners->powersave_bias = input;
- list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list)
+ list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
ondemand_powersave_bias_init(policy_dbs->policy);
return count;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 4d16f45ee..9f3dec9a3 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
static DEFINE_MUTEX(userspace_mutex);
@@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
+ unsigned int *setspeed = policy->governor_data;
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
@@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
+ *setspeed = freq;
+
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
mutex_unlock(&userspace_mutex);
@@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cur);
}
+static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
+{
+ unsigned int *setspeed;
+
+ setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
+ if (!setspeed)
+ return -ENOMEM;
+
+ policy->governor_data = setspeed;
+ return 0;
+}
+
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
{
+ unsigned int *setspeed = policy->governor_data;
unsigned int cpu = policy->cpu;
int rc = 0;
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ return cpufreq_userspace_policy_init(policy);
+
+ if (!setspeed)
+ return -EINVAL;
+
switch (event) {
+ case CPUFREQ_GOV_POLICY_EXIT:
+ mutex_lock(&userspace_mutex);
+ policy->governor_data = NULL;
+ kfree(setspeed);
+ mutex_unlock(&userspace_mutex);
+ break;
case CPUFREQ_GOV_START:
BUG_ON(!policy->cur);
pr_debug("started managing cpu %u\n", cpu);
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 1;
+ *setspeed = policy->cur;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
@@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 0;
+ *setspeed = 0;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_LIMITS:
mutex_lock(&userspace_mutex);
- pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
- cpu, policy->min, policy->max,
- policy->cur);
+ pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
+ cpu, policy->min, policy->max, policy->cur, *setspeed);
- if (policy->max < policy->cur)
+ if (policy->max < *setspeed)
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
+ else if (policy->min > *setspeed)
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
+ else
+ __cpufreq_driver_target(policy, *setspeed,
+ CPUFREQ_RELATION_L);
mutex_unlock(&userspace_mutex);
break;
}
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index 4085244c8..cdf097b29 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -6,6 +6,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -20,7 +22,7 @@
#include <asm/msr.h>
#include <asm/tsc.h>
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
#include <linux/acpi.h>
#include <acpi/processor.h>
#endif
@@ -33,7 +35,7 @@
struct eps_cpu_data {
u32 fsb;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
u32 bios_limit;
#endif
struct cpufreq_frequency_table freq_table[];
@@ -46,7 +48,7 @@ static int freq_failsafe_off;
static int voltage_failsafe_off;
static int set_max_voltage;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
static int ignore_acpi_limit;
static struct acpi_processor_performance *eps_acpi_cpu_perf;
@@ -141,11 +143,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
- printk(KERN_INFO "eps: Current voltage = %dmV\n",
- current_voltage * 16 + 700);
+ pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
- printk(KERN_INFO "eps: Current multiplier = %d\n",
- current_multiplier);
+ pr_info("Current multiplier = %d\n", current_multiplier);
}
#endif
return 0;
@@ -166,7 +166,7 @@ static int eps_target(struct cpufreq_policy *policy, unsigned int index)
dest_state = centaur->freq_table[index].driver_data & 0xffff;
ret = eps_set_state(centaur, policy, dest_state);
if (ret)
- printk(KERN_ERR "eps: Timeout!\n");
+ pr_err("Timeout!\n");
return ret;
}
@@ -186,7 +186,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
int k, step, voltage;
int ret;
int states;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
unsigned int limit;
#endif
@@ -194,36 +194,36 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
/* Check brand */
- printk(KERN_INFO "eps: Detected VIA ");
+ pr_info("Detected VIA ");
switch (c->x86_model) {
case 10:
rdmsr(0x1153, lo, hi);
brand = (((lo >> 2) ^ lo) >> 18) & 3;
- printk(KERN_CONT "Model A ");
+ pr_cont("Model A ");
break;
case 13:
rdmsr(0x1154, lo, hi);
brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
- printk(KERN_CONT "Model D ");
+ pr_cont("Model D ");
break;
}
switch (brand) {
case EPS_BRAND_C7M:
- printk(KERN_CONT "C7-M\n");
+ pr_cont("C7-M\n");
break;
case EPS_BRAND_C7:
- printk(KERN_CONT "C7\n");
+ pr_cont("C7\n");
break;
case EPS_BRAND_EDEN:
- printk(KERN_CONT "Eden\n");
+ pr_cont("Eden\n");
break;
case EPS_BRAND_C7D:
- printk(KERN_CONT "C7-D\n");
+ pr_cont("C7-D\n");
break;
case EPS_BRAND_C3:
- printk(KERN_CONT "C3\n");
+ pr_cont("C3\n");
return -ENODEV;
break;
}
@@ -235,7 +235,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Can be locked at 0 */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
- printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
+ pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
}
}
@@ -243,22 +243,19 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
- printk(KERN_INFO "eps: Current voltage = %dmV\n",
- current_voltage * 16 + 700);
+ pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
- printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
+ pr_info("Current multiplier = %d\n", current_multiplier);
/* Print limits */
max_voltage = hi & 0xff;
- printk(KERN_INFO "eps: Highest voltage = %dmV\n",
- max_voltage * 16 + 700);
+ pr_info("Highest voltage = %dmV\n", max_voltage * 16 + 700);
max_multiplier = (hi >> 8) & 0xff;
- printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
+ pr_info("Highest multiplier = %d\n", max_multiplier);
min_voltage = (hi >> 16) & 0xff;
- printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
- min_voltage * 16 + 700);
+ pr_info("Lowest voltage = %dmV\n", min_voltage * 16 + 700);
min_multiplier = (hi >> 24) & 0xff;
- printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
+ pr_info("Lowest multiplier = %d\n", min_multiplier);
/* Sanity checks */
if (current_multiplier == 0 || max_multiplier == 0
@@ -276,34 +273,30 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Check for systems using underclocked CPU */
if (!freq_failsafe_off && max_multiplier != current_multiplier) {
- printk(KERN_INFO "eps: Your processor is running at different "
- "frequency then its maximum. Aborting.\n");
- printk(KERN_INFO "eps: You can use freq_failsafe_off option "
- "to disable this check.\n");
+ pr_info("Your processor is running at different frequency then its maximum. Aborting.\n");
+ pr_info("You can use freq_failsafe_off option to disable this check.\n");
return -EINVAL;
}
if (!voltage_failsafe_off && max_voltage != current_voltage) {
- printk(KERN_INFO "eps: Your processor is running at different "
- "voltage then its maximum. Aborting.\n");
- printk(KERN_INFO "eps: You can use voltage_failsafe_off "
- "option to disable this check.\n");
+ pr_info("Your processor is running at different voltage then its maximum. Aborting.\n");
+ pr_info("You can use voltage_failsafe_off option to disable this check.\n");
return -EINVAL;
}
/* Calc FSB speed */
fsb = cpu_khz / current_multiplier;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
/* Check for ACPI processor speed limit */
if (!ignore_acpi_limit && !eps_acpi_init()) {
if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
- printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n",
+ pr_info("ACPI limit %u.%uGHz\n",
limit/1000000,
(limit%1000000)/10000);
eps_acpi_exit(policy);
/* Check if max_multiplier is in BIOS limits */
if (limit && max_multiplier * fsb > limit) {
- printk(KERN_INFO "eps: Aborting.\n");
+ pr_info("Aborting\n");
return -EINVAL;
}
}
@@ -319,8 +312,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
v = (set_max_voltage - 700) / 16;
/* Check if voltage is within limits */
if (v >= min_voltage && v <= max_voltage) {
- printk(KERN_INFO "eps: Setting %dmV as maximum.\n",
- v * 16 + 700);
+ pr_info("Setting %dmV as maximum\n", v * 16 + 700);
max_voltage = v;
}
}
@@ -341,7 +333,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Copy basic values */
centaur->fsb = fsb;
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
centaur->bios_limit = limit;
#endif
@@ -426,7 +418,7 @@ module_param(freq_failsafe_off, int, 0644);
MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
module_param(voltage_failsafe_off, int, 0644);
MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
-#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
+#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
module_param(ignore_acpi_limit, int, 0644);
MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
#endif
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 1c06e786c..bfce11cba 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -16,6 +16,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -185,7 +187,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
static int __init elanfreq_setup(char *str)
{
max_freq = simple_strtoul(str, &str, 0);
- printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
+ pr_warn("You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
return 1;
}
__setup("elanfreq=", elanfreq_setup);
diff --git a/drivers/cpufreq/hisi-acpu-cpufreq.c b/drivers/cpufreq/hisi-acpu-cpufreq.c
deleted file mode 100644
index 026d5b222..000000000
--- a/drivers/cpufreq/hisi-acpu-cpufreq.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Hisilicon Platforms Using ACPU CPUFreq Support
- *
- * Copyright (c) 2015 Hisilicon Limited.
- * Copyright (c) 2015 Linaro Limited.
- *
- * Leo Yan <leo.yan@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-static int __init hisi_acpu_cpufreq_driver_init(void)
-{
- struct platform_device *pdev;
-
- if (!of_machine_is_compatible("hisilicon,hi6220"))
- return -ENODEV;
-
- pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
- return PTR_ERR_OR_ZERO(pdev);
-}
-module_init(hisi_acpu_cpufreq_driver_init);
-
-MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
-MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 0202429f1..759612da4 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -8,6 +8,8 @@
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -118,8 +120,7 @@ processor_get_freq (
if (ret) {
set_cpus_allowed_ptr(current, &saved_mask);
- printk(KERN_WARNING "get performance failed with error %d\n",
- ret);
+ pr_warn("get performance failed with error %d\n", ret);
ret = 0;
goto migrate_end;
}
@@ -177,7 +178,7 @@ processor_set_freq (
ret = processor_set_pstate(value);
if (ret) {
- printk(KERN_WARNING "Transition failed with error %d\n", ret);
+ pr_warn("Transition failed with error %d\n", ret);
retval = -ENODEV;
goto migrate_end;
}
@@ -291,8 +292,7 @@ acpi_cpufreq_cpu_init (
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
- printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
- "activated.\n", cpu);
+ pr_info("CPU%u - ACPI performance management activated\n", cpu);
for (i = 0; i < data->acpi_data.state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index d63074ff6..970df8986 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -10,6 +10,8 @@
* of the License.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
@@ -39,10 +41,17 @@
#define ATOM_TURBO_RATIOS 0x66c
#define ATOM_TURBO_VIDS 0x66d
+#ifdef CONFIG_ACPI
+#include <acpi/processor.h>
+#endif
+
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
+#define EXT_BITS 6
+#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
+
static inline int32_t mul_fp(int32_t x, int32_t y)
{
return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
@@ -64,12 +73,22 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
+static inline u64 mul_ext_fp(u64 x, u64 y)
+{
+ return (x * y) >> EXT_FRAC_BITS;
+}
+
+static inline u64 div_ext_fp(u64 x, u64 y)
+{
+ return div64_u64(x << EXT_FRAC_BITS, y);
+}
+
/**
* struct sample - Store performance sample
- * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual
+ * @core_avg_perf: Ratio of APERF/MPERF which is the actual average
* performance during last sample period
* @busy_scaled: Scaled busy value which is used to calculate next
- * P state. This can be different than core_pct_busy
+ * P state. This can be different than core_avg_perf
* to account for cpu idle period
* @aperf: Difference of actual performance frequency clock count
* read from APERF MSR between last and current sample
@@ -84,7 +103,7 @@ static inline int ceiling_fp(int32_t x)
* data for choosing next P State.
*/
struct sample {
- int32_t core_pct_busy;
+ int32_t core_avg_perf;
int32_t busy_scaled;
u64 aperf;
u64 mperf;
@@ -162,6 +181,7 @@ struct _pid {
* struct cpudata - Per CPU instance data storage
* @cpu: CPU number for this instance data
* @update_util: CPUFreq utility callback information
+ * @update_util_set: CPUFreq utility callback is set
* @pstate: Stores P state limits for this CPU
* @vid: Stores VID limits for this CPU
* @pid: Stores PID parameters for this CPU
@@ -172,6 +192,8 @@ struct _pid {
* @prev_cummulative_iowait: IO Wait time difference from last and
* current sample
* @sample: Storage for storing last Sample data
+ * @acpi_perf_data: Stores ACPI perf information read from _PSS
+ * @valid_pss_table: Set to true for valid ACPI _PSS entries found
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -179,6 +201,7 @@ struct cpudata {
int cpu;
struct update_util_data update_util;
+ bool update_util_set;
struct pstate_data pstate;
struct vid_data vid;
@@ -190,6 +213,10 @@ struct cpudata {
u64 prev_tsc;
u64 prev_cummulative_iowait;
struct sample sample;
+#ifdef CONFIG_ACPI
+ struct acpi_processor_performance acpi_perf_data;
+ bool valid_pss_table;
+#endif
};
static struct cpudata **all_cpu_data;
@@ -258,6 +285,9 @@ static struct pstate_adjust_policy pid_params;
static struct pstate_funcs pstate_funcs;
static int hwp_active;
+#ifdef CONFIG_ACPI
+static bool acpi_ppc;
+#endif
/**
* struct perf_limits - Store user and policy limits
@@ -331,6 +361,106 @@ static struct perf_limits *limits = &performance_limits;
static struct perf_limits *limits = &powersave_limits;
#endif
+#ifdef CONFIG_ACPI
+
+static bool intel_pstate_get_ppc_enable_status(void)
+{
+ if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
+ acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
+ return true;
+
+ return acpi_ppc;
+}
+
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+ int ret;
+ int i;
+
+ if (hwp_active)
+ return;
+
+ if (!intel_pstate_get_ppc_enable_status())
+ return;
+
+ cpu = all_cpu_data[policy->cpu];
+
+ ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
+ policy->cpu);
+ if (ret)
+ return;
+
+ /*
+ * Check if the control value in _PSS is for PERF_CTL MSR, which should
+ * guarantee that the states returned by it map to the states in our
+ * list directly.
+ */
+ if (cpu->acpi_perf_data.control_register.space_id !=
+ ACPI_ADR_SPACE_FIXED_HARDWARE)
+ goto err;
+
+ /*
+ * If there is only one entry _PSS, simply ignore _PSS and continue as
+ * usual without taking _PSS into account
+ */
+ if (cpu->acpi_perf_data.state_count < 2)
+ goto err;
+
+ pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
+ for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
+ pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
+ (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
+ (u32) cpu->acpi_perf_data.states[i].core_frequency,
+ (u32) cpu->acpi_perf_data.states[i].power,
+ (u32) cpu->acpi_perf_data.states[i].control);
+ }
+
+ /*
+ * The _PSS table doesn't contain whole turbo frequency range.
+ * This just contains +1 MHZ above the max non turbo frequency,
+ * with control value corresponding to max turbo ratio. But
+ * when cpufreq set policy is called, it will call with this
+ * max frequency, which will cause a reduced performance as
+ * this driver uses real max turbo frequency as the max
+ * frequency. So correct this frequency in _PSS table to
+ * correct max turbo frequency based on the turbo state.
+ * Also need to convert to MHz as _PSS freq is in MHz.
+ */
+ if (!limits->turbo_disabled)
+ cpu->acpi_perf_data.states[0].core_frequency =
+ policy->cpuinfo.max_freq / 1000;
+ cpu->valid_pss_table = true;
+ pr_debug("_PPC limits will be enforced\n");
+
+ return;
+
+ err:
+ cpu->valid_pss_table = false;
+ acpi_processor_unregister_performance(policy->cpu);
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu;
+
+ cpu = all_cpu_data[policy->cpu];
+ if (!cpu->valid_pss_table)
+ return;
+
+ acpi_processor_unregister_performance(policy->cpu);
+}
+
+#else
+static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
+{
+}
+
+static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
+{
+}
+#endif
+
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
int deadband, int integral) {
pid->setpoint = int_tofp(setpoint);
@@ -341,17 +471,17 @@ static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
static inline void pid_p_gain_set(struct _pid *pid, int percent)
{
- pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->p_gain = div_fp(percent, 100);
}
static inline void pid_i_gain_set(struct _pid *pid, int percent)
{
- pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->i_gain = div_fp(percent, 100);
}
static inline void pid_d_gain_set(struct _pid *pid, int percent)
{
- pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
+ pid->d_gain = div_fp(percent, 100);
}
static signed int pid_calc(struct _pid *pid, int32_t busy)
@@ -537,7 +667,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
- turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
+ turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
return sprintf(buf, "%u\n", turbo_pct);
}
@@ -579,7 +709,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
update_turbo_state();
if (limits->turbo_disabled) {
- pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
+ pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
return -EPERM;
}
@@ -608,8 +738,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf_pct = max(limits->min_perf_pct,
limits->max_perf_pct);
- limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
- int_tofp(100));
+ limits->max_perf = div_fp(limits->max_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
@@ -633,8 +762,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf_pct = min(limits->max_perf_pct,
limits->min_perf_pct);
- limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
- int_tofp(100));
+ limits->min_perf = div_fp(limits->min_perf_pct, 100);
if (hwp_active)
intel_pstate_hwp_set_online_cpus();
@@ -1024,15 +1152,11 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
intel_pstate_set_min_pstate(cpu);
}
-static inline void intel_pstate_calc_busy(struct cpudata *cpu)
+static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
- int64_t core_pct;
-
- core_pct = int_tofp(sample->aperf) * int_tofp(100);
- core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
- sample->core_pct_busy = (int32_t)core_pct;
+ sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
}
static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
@@ -1075,9 +1199,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- return fp_toint(mul_fp(cpu->sample.core_pct_busy,
- int_tofp(cpu->pstate.max_pstate_physical *
- cpu->pstate.scaling / 100)));
+ return mul_ext_fp(cpu->sample.core_avg_perf,
+ cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
+}
+
+static inline int32_t get_avg_pstate(struct cpudata *cpu)
+{
+ return mul_ext_fp(cpu->pstate.max_pstate_physical,
+ cpu->sample.core_avg_perf);
}
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -1112,49 +1241,43 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
cpu->sample.busy_scaled = cpu_load;
- return cpu->pstate.current_pstate - pid_calc(&cpu->pid, cpu_load);
+ return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
}
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
{
- int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+ int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
u64 duration_ns;
/*
- * core_busy is the ratio of actual performance to max
- * max_pstate is the max non turbo pstate available
- * current_pstate was the pstate that was requested during
- * the last sample period.
- *
- * We normalize core_busy, which was our actual percent
- * performance to what we requested during the last sample
- * period. The result will be a percentage of busy at a
- * specified pstate.
+ * perf_scaled is the average performance during the last sampling
+ * period scaled by the ratio of the maximum P-state to the P-state
+ * requested last time (in percent). That measures the system's
+ * response to the previous P-state selection.
*/
- core_busy = cpu->sample.core_pct_busy;
- max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
- current_pstate = int_tofp(cpu->pstate.current_pstate);
- core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ max_pstate = cpu->pstate.max_pstate_physical;
+ current_pstate = cpu->pstate.current_pstate;
+ perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
+ div_fp(100 * max_pstate, current_pstate));
/*
* Since our utilization update callback will not run unless we are
* in C0, check if the actual elapsed time is significantly greater (3x)
* than our sample interval. If it is, then we were idle for a long
- * enough period of time to adjust our busyness.
+ * enough period of time to adjust our performance metric.
*/
duration_ns = cpu->sample.time - cpu->last_sample_time;
if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
- sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns),
- int_tofp(duration_ns));
- core_busy = mul_fp(core_busy, sample_ratio);
+ sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
+ perf_scaled = mul_fp(perf_scaled, sample_ratio);
} else {
sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
if (sample_ratio < int_tofp(1))
- core_busy = 0;
+ perf_scaled = 0;
}
- cpu->sample.busy_scaled = core_busy;
- return cpu->pstate.current_pstate - pid_calc(&cpu->pid, core_busy);
+ cpu->sample.busy_scaled = perf_scaled;
+ return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
}
static inline void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
@@ -1184,7 +1307,7 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
- trace_pstate_sample(fp_toint(sample->core_pct_busy),
+ trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
fp_toint(sample->busy_scaled),
from,
cpu->pstate.current_pstate,
@@ -1204,7 +1327,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
bool sample_taken = intel_pstate_sample(cpu, time);
if (sample_taken) {
- intel_pstate_calc_busy(cpu);
+ intel_pstate_calc_avg_perf(cpu);
if (!hwp_active)
intel_pstate_adjust_busy_pstate(cpu);
}
@@ -1266,37 +1389,41 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
intel_pstate_busy_pid_reset(cpu);
- cpu->update_util.func = intel_pstate_update_util;
-
- pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
+ pr_debug("controlling: cpu %d\n", cpunum);
return 0;
}
static unsigned int intel_pstate_get(unsigned int cpu_num)
{
- struct sample *sample;
- struct cpudata *cpu;
+ struct cpudata *cpu = all_cpu_data[cpu_num];
- cpu = all_cpu_data[cpu_num];
- if (!cpu)
- return 0;
- sample = &cpu->sample;
- return get_avg_frequency(cpu);
+ return cpu ? get_avg_frequency(cpu) : 0;
}
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
{
struct cpudata *cpu = all_cpu_data[cpu_num];
+ if (cpu->update_util_set)
+ return;
+
/* Prevent intel_pstate_update_util() from using stale data. */
cpu->sample.time = 0;
- cpufreq_set_update_util_data(cpu_num, &cpu->update_util);
+ cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
+ intel_pstate_update_util);
+ cpu->update_util_set = true;
}
static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{
- cpufreq_set_update_util_data(cpu, NULL);
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+
+ if (!cpu_data->update_util_set)
+ return;
+
+ cpufreq_remove_update_util_hook(cpu);
+ cpu_data->update_util_set = false;
synchronize_sched();
}
@@ -1316,20 +1443,31 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
+ struct cpudata *cpu;
+
if (!policy->cpuinfo.max_freq)
return -ENODEV;
- intel_pstate_clear_update_util_hook(policy->cpu);
+ pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
+
+ cpu = all_cpu_data[0];
+ if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+ policy->max < policy->cpuinfo.max_freq &&
+ policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
+ pr_debug("policy->max > max non turbo frequency\n");
+ policy->max = policy->cpuinfo.max_freq;
+ }
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
limits = &performance_limits;
if (policy->max >= policy->cpuinfo.max_freq) {
- pr_debug("intel_pstate: set performance\n");
+ pr_debug("set performance\n");
intel_pstate_set_performance_limits(limits);
goto out;
}
} else {
- pr_debug("intel_pstate: set powersave\n");
+ pr_debug("set powersave\n");
limits = &powersave_limits;
}
@@ -1348,15 +1486,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_sysfs_pct);
limits->max_perf_pct = max(limits->min_policy_pct,
limits->max_perf_pct);
- limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
/* Make sure min_perf_pct <= max_perf_pct */
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
- limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
- int_tofp(100));
- limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
- int_tofp(100));
+ limits->min_perf = div_fp(limits->min_perf_pct, 100);
+ limits->max_perf = div_fp(limits->max_perf_pct, 100);
+ limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
out:
intel_pstate_set_update_util_hook(policy->cpu);
@@ -1382,7 +1518,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
int cpu_num = policy->cpu;
struct cpudata *cpu = all_cpu_data[cpu_num];
- pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
+ pr_debug("CPU %d exiting\n", cpu_num);
intel_pstate_clear_update_util_hook(cpu_num);
@@ -1413,14 +1549,25 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
- policy->cpuinfo.max_freq =
- cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ update_turbo_state();
+ policy->cpuinfo.max_freq = limits->turbo_disabled ?
+ cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
+ policy->cpuinfo.max_freq *= cpu->pstate.scaling;
+
+ intel_pstate_init_acpi_perf_limits(policy);
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
cpumask_set_cpu(policy->cpu, policy->cpus);
return 0;
}
+static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
+{
+ intel_pstate_exit_perf_limits(policy);
+
+ return 0;
+}
+
static struct cpufreq_driver intel_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
@@ -1428,6 +1575,7 @@ static struct cpufreq_driver intel_pstate_driver = {
.resume = intel_pstate_hwp_set_policy,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
+ .exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
.name = "intel_pstate",
};
@@ -1471,8 +1619,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
}
-#if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
+#ifdef CONFIG_ACPI
static bool intel_pstate_no_acpi_pss(void)
{
@@ -1628,7 +1775,7 @@ hwp_cpu_matched:
if (intel_pstate_platform_pwr_mgmt_exists())
return -ENODEV;
- pr_info("Intel P-state driver initializing.\n");
+ pr_info("Intel P-state driver initializing\n");
all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
@@ -1645,7 +1792,7 @@ hwp_cpu_matched:
intel_pstate_sysfs_expose_params();
if (hwp_active)
- pr_info("intel_pstate: HWP enabled\n");
+ pr_info("HWP enabled\n");
return rc;
out:
@@ -1671,13 +1818,19 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "disable"))
no_load = 1;
if (!strcmp(str, "no_hwp")) {
- pr_info("intel_pstate: HWP disabled\n");
+ pr_info("HWP disabled\n");
no_hwp = 1;
}
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))
hwp_only = 1;
+
+#ifdef CONFIG_ACPI
+ if (!strcmp(str, "support_acpi_ppc"))
+ acpi_ppc = true;
+#endif
+
return 0;
}
early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 0f6b229af..c46a12df4 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -21,6 +21,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -40,8 +42,6 @@
#include "longhaul.h"
-#define PFX "longhaul: "
-
#define TYPE_LONGHAUL_V1 1
#define TYPE_LONGHAUL_V2 2
#define TYPE_POWERSAVER 3
@@ -347,14 +347,13 @@ retry_loop:
freqs.new = calc_speed(longhaul_get_cpu_mult());
/* Check if requested frequency is set. */
if (unlikely(freqs.new != speed)) {
- printk(KERN_INFO PFX "Failed to set requested frequency!\n");
+ pr_info("Failed to set requested frequency!\n");
/* Revision ID = 1 but processor is expecting revision key
* equal to 0. Jumpers at the bottom of processor will change
* multiplier and FSB, but will not change bits in Longhaul
* MSR nor enable voltage scaling. */
if (!revid_errata) {
- printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
- "option.\n");
+ pr_info("Enabling \"Ignore Revision ID\" option\n");
revid_errata = 1;
msleep(200);
goto retry_loop;
@@ -364,11 +363,10 @@ retry_loop:
* but it doesn't change frequency. I tried poking various
* bits in northbridge registers, but without success. */
if (longhaul_flags & USE_ACPI_C3) {
- printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
+ pr_info("Disabling ACPI C3 support\n");
longhaul_flags &= ~USE_ACPI_C3;
if (revid_errata) {
- printk(KERN_INFO PFX "Disabling \"Ignore "
- "Revision ID\" option.\n");
+ pr_info("Disabling \"Ignore Revision ID\" option\n");
revid_errata = 0;
}
msleep(200);
@@ -379,7 +377,7 @@ retry_loop:
* RevID = 1. RevID errata will make things right. Just
* to be 100% sure. */
if (longhaul_version == TYPE_LONGHAUL_V2) {
- printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
+ pr_info("Switching to Longhaul ver. 1\n");
longhaul_version = TYPE_LONGHAUL_V1;
msleep(200);
goto retry_loop;
@@ -387,8 +385,7 @@ retry_loop:
}
if (!bm_timeout) {
- printk(KERN_INFO PFX "Warning: Timeout while waiting for "
- "idle PCI bus.\n");
+ pr_info("Warning: Timeout while waiting for idle PCI bus\n");
return -EBUSY;
}
@@ -433,12 +430,12 @@ static int longhaul_get_ranges(void)
/* Get current frequency */
mult = longhaul_get_cpu_mult();
if (mult == -1) {
- printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
+ pr_info("Invalid (reserved) multiplier!\n");
return -EINVAL;
}
fsb = guess_fsb(mult);
if (fsb == 0) {
- printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
+ pr_info("Invalid (reserved) FSB!\n");
return -EINVAL;
}
/* Get max multiplier - as we always did.
@@ -468,11 +465,11 @@ static int longhaul_get_ranges(void)
print_speed(highest_speed/1000));
if (lowest_speed == highest_speed) {
- printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
+ pr_info("highestspeed == lowest, aborting\n");
return -EINVAL;
}
if (lowest_speed > highest_speed) {
- printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
+ pr_info("nonsense! lowest (%d > %d) !\n",
lowest_speed, highest_speed);
return -EINVAL;
}
@@ -538,16 +535,16 @@ static void longhaul_setup_voltagescaling(void)
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
- printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n");
+ pr_info("Voltage scaling not supported by CPU\n");
return;
}
if (!longhaul.bits.VRMRev) {
- printk(KERN_INFO PFX "VRM 8.5\n");
+ pr_info("VRM 8.5\n");
vrm_mV_table = &vrm85_mV[0];
mV_vrm_table = &mV_vrm85[0];
} else {
- printk(KERN_INFO PFX "Mobile VRM\n");
+ pr_info("Mobile VRM\n");
if (cpu_model < CPU_NEHEMIAH)
return;
vrm_mV_table = &mobilevrm_mV[0];
@@ -558,27 +555,21 @@ static void longhaul_setup_voltagescaling(void)
maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
- printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
- "Voltage scaling disabled.\n",
- minvid.mV/1000, minvid.mV%1000,
- maxvid.mV/1000, maxvid.mV%1000);
+ pr_info("Bogus values Min:%d.%03d Max:%d.%03d - Voltage scaling disabled\n",
+ minvid.mV/1000, minvid.mV%1000,
+ maxvid.mV/1000, maxvid.mV%1000);
return;
}
if (minvid.mV == maxvid.mV) {
- printk(KERN_INFO PFX "Claims to support voltage scaling but "
- "min & max are both %d.%03d. "
- "Voltage scaling disabled\n",
- maxvid.mV/1000, maxvid.mV%1000);
+ pr_info("Claims to support voltage scaling but min & max are both %d.%03d - Voltage scaling disabled\n",
+ maxvid.mV/1000, maxvid.mV%1000);
return;
}
/* How many voltage steps*/
numvscales = maxvid.pos - minvid.pos + 1;
- printk(KERN_INFO PFX
- "Max VID=%d.%03d "
- "Min VID=%d.%03d, "
- "%d possible voltage scales\n",
+ pr_info("Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n",
maxvid.mV/1000, maxvid.mV%1000,
minvid.mV/1000, minvid.mV%1000,
numvscales);
@@ -617,12 +608,12 @@ static void longhaul_setup_voltagescaling(void)
pos = minvid.pos;
freq_pos->driver_data |= mV_vrm_table[pos] << 8;
vid = vrm_mV_table[mV_vrm_table[pos]];
- printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
+ pr_info("f: %d kHz, index: %d, vid: %d mV\n",
speed, (int)(freq_pos - longhaul_table), vid.mV);
}
can_scale_voltage = 1;
- printk(KERN_INFO PFX "Voltage scaling enabled.\n");
+ pr_info("Voltage scaling enabled\n");
}
@@ -720,8 +711,7 @@ static int enable_arbiter_disable(void)
pci_write_config_byte(dev, reg, pci_cmd);
pci_read_config_byte(dev, reg, &pci_cmd);
if (!(pci_cmd & 1<<7)) {
- printk(KERN_ERR PFX
- "Can't enable access to port 0x22.\n");
+ pr_err("Can't enable access to port 0x22\n");
status = 0;
}
}
@@ -758,8 +748,7 @@ static int longhaul_setup_southbridge(void)
if (pci_cmd & 1 << 7) {
pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
acpi_regs_addr &= 0xff00;
- printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
- acpi_regs_addr);
+ pr_info("ACPI I/O at 0x%x\n", acpi_regs_addr);
}
pci_dev_put(dev);
@@ -853,14 +842,14 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_version = TYPE_LONGHAUL_V1;
}
- printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
+ pr_info("VIA %s CPU detected. ", cpuname);
switch (longhaul_version) {
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
- printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
+ pr_cont("Longhaul v%d supported\n", longhaul_version);
break;
case TYPE_POWERSAVER:
- printk(KERN_CONT "Powersaver supported.\n");
+ pr_cont("Powersaver supported\n");
break;
};
@@ -889,15 +878,14 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
if (!(longhaul_flags & USE_ACPI_C3
|| longhaul_flags & USE_NORTHBRIDGE)
&& ((pr == NULL) || !(pr->flags.bm_control))) {
- printk(KERN_ERR PFX
- "No ACPI support. Unsupported northbridge.\n");
+ pr_err("No ACPI support: Unsupported northbridge\n");
return -ENODEV;
}
if (longhaul_flags & USE_NORTHBRIDGE)
- printk(KERN_INFO PFX "Using northbridge support.\n");
+ pr_info("Using northbridge support\n");
if (longhaul_flags & USE_ACPI_C3)
- printk(KERN_INFO PFX "Using ACPI support.\n");
+ pr_info("Using ACPI support\n");
ret = longhaul_get_ranges();
if (ret != 0)
@@ -934,20 +922,18 @@ static int __init longhaul_init(void)
return -ENODEV;
if (!enable) {
- printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
+ pr_err("Option \"enable\" not set - Aborting\n");
return -ENODEV;
}
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
- printk(KERN_ERR PFX "More than 1 CPU detected, "
- "longhaul disabled.\n");
+ pr_err("More than 1 CPU detected, longhaul disabled\n");
return -ENODEV;
}
#endif
#ifdef CONFIG_X86_IO_APIC
- if (cpu_has_apic) {
- printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
- "broken in this configuration.\n");
+ if (boot_cpu_has(X86_FEATURE_APIC)) {
+ pr_err("APIC detected. Longhaul is currently broken in this configuration.\n");
return -ENODEV;
}
#endif
@@ -955,7 +941,7 @@ static int __init longhaul_init(void)
case 6 ... 9:
return cpufreq_register_driver(&longhaul_driver);
case 10:
- printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
+ pr_err("Use acpi-cpufreq driver for VIA C7\n");
default:
;
}
diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
index 262581b33..be89416e2 100644
--- a/drivers/cpufreq/ls1x-cpufreq.c
+++ b/drivers/cpufreq/loongson1-cpufreq.c
@@ -1,7 +1,7 @@
/*
* CPU Frequency Scaling for Loongson 1 SoC
*
- * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
+ * Copyright (C) 2014-2016 Zhang, Keguang <keguang.zhang@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -20,7 +20,7 @@
#include <cpufreq.h>
#include <loongson1.h>
-static struct {
+struct ls1x_cpufreq {
struct device *dev;
struct clk *clk; /* CPU clk */
struct clk *mux_clk; /* MUX of CPU clk */
@@ -28,7 +28,9 @@ static struct {
struct clk *osc_clk; /* OSC clk */
unsigned int max_freq;
unsigned int min_freq;
-} ls1x_cpufreq;
+};
+
+static struct ls1x_cpufreq *cpufreq;
static int ls1x_cpufreq_notifier(struct notifier_block *nb,
unsigned long val, void *data)
@@ -46,6 +48,7 @@ static struct notifier_block ls1x_cpufreq_notifier_block = {
static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
unsigned int index)
{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
unsigned int old_freq, new_freq;
old_freq = policy->cur;
@@ -60,53 +63,49 @@ static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
* - Reparent CPU clk back to CPU DIV clk
*/
- dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
- clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk);
+ clk_set_parent(policy->clk, cpufreq->osc_clk);
__raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
LS1X_CLK_PLL_DIV);
__raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
LS1X_CLK_PLL_DIV);
- clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000);
- clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk);
+ clk_set_rate(cpufreq->mux_clk, new_freq * 1000);
+ clk_set_parent(policy->clk, cpufreq->mux_clk);
+ dev_dbg(cpu_dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
return 0;
}
static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
{
+ struct device *cpu_dev = get_cpu_device(policy->cpu);
struct cpufreq_frequency_table *freq_tbl;
unsigned int pll_freq, freq;
int steps, i, ret;
- pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000;
+ pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000;
steps = 1 << DIV_CPU_WIDTH;
- freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL);
- if (!freq_tbl) {
- dev_err(ls1x_cpufreq.dev,
- "failed to alloc cpufreq_frequency_table\n");
- ret = -ENOMEM;
- goto out;
- }
+ freq_tbl = kcalloc(steps, sizeof(*freq_tbl), GFP_KERNEL);
+ if (!freq_tbl)
+ return -ENOMEM;
for (i = 0; i < (steps - 1); i++) {
freq = pll_freq / (i + 1);
- if ((freq < ls1x_cpufreq.min_freq) ||
- (freq > ls1x_cpufreq.max_freq))
+ if ((freq < cpufreq->min_freq) || (freq > cpufreq->max_freq))
freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
else
freq_tbl[i].frequency = freq;
- dev_dbg(ls1x_cpufreq.dev,
+ dev_dbg(cpu_dev,
"cpufreq table: index %d: frequency %d\n", i,
freq_tbl[i].frequency);
}
freq_tbl[i].frequency = CPUFREQ_TABLE_END;
- policy->clk = ls1x_cpufreq.clk;
+ policy->clk = cpufreq->clk;
ret = cpufreq_generic_init(policy, freq_tbl, 0);
if (ret)
kfree(freq_tbl);
-out:
+
return ret;
}
@@ -138,85 +137,86 @@ static int ls1x_cpufreq_remove(struct platform_device *pdev)
static int ls1x_cpufreq_probe(struct platform_device *pdev)
{
- struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data;
+ struct plat_ls1x_cpufreq *pdata = dev_get_platdata(&pdev->dev);
struct clk *clk;
int ret;
- if (!pdata || !pdata->clk_name || !pdata->osc_clk_name)
+ if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) {
+ dev_err(&pdev->dev, "platform data missing\n");
return -EINVAL;
+ }
- ls1x_cpufreq.dev = &pdev->dev;
+ cpufreq =
+ devm_kzalloc(&pdev->dev, sizeof(struct ls1x_cpufreq), GFP_KERNEL);
+ if (!cpufreq)
+ return -ENOMEM;
+
+ cpufreq->dev = &pdev->dev;
clk = devm_clk_get(&pdev->dev, pdata->clk_name);
if (IS_ERR(clk)) {
- dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
+ dev_err(&pdev->dev, "unable to get %s clock\n",
pdata->clk_name);
- ret = PTR_ERR(clk);
- goto out;
+ return PTR_ERR(clk);
}
- ls1x_cpufreq.clk = clk;
+ cpufreq->clk = clk;
clk = clk_get_parent(clk);
if (IS_ERR(clk)) {
- dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
- __clk_get_name(ls1x_cpufreq.clk));
- ret = PTR_ERR(clk);
- goto out;
+ dev_err(&pdev->dev, "unable to get parent of %s clock\n",
+ __clk_get_name(cpufreq->clk));
+ return PTR_ERR(clk);
}
- ls1x_cpufreq.mux_clk = clk;
+ cpufreq->mux_clk = clk;
clk = clk_get_parent(clk);
if (IS_ERR(clk)) {
- dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n",
- __clk_get_name(ls1x_cpufreq.mux_clk));
- ret = PTR_ERR(clk);
- goto out;
+ dev_err(&pdev->dev, "unable to get parent of %s clock\n",
+ __clk_get_name(cpufreq->mux_clk));
+ return PTR_ERR(clk);
}
- ls1x_cpufreq.pll_clk = clk;
+ cpufreq->pll_clk = clk;
clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
if (IS_ERR(clk)) {
- dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n",
+ dev_err(&pdev->dev, "unable to get %s clock\n",
pdata->osc_clk_name);
- ret = PTR_ERR(clk);
- goto out;
+ return PTR_ERR(clk);
}
- ls1x_cpufreq.osc_clk = clk;
+ cpufreq->osc_clk = clk;
- ls1x_cpufreq.max_freq = pdata->max_freq;
- ls1x_cpufreq.min_freq = pdata->min_freq;
+ cpufreq->max_freq = pdata->max_freq;
+ cpufreq->min_freq = pdata->min_freq;
ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
if (ret) {
- dev_err(ls1x_cpufreq.dev,
- "failed to register cpufreq driver: %d\n", ret);
- goto out;
+ dev_err(&pdev->dev,
+ "failed to register CPUFreq driver: %d\n", ret);
+ return ret;
}
ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
- if (!ret)
- goto out;
-
- dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n",
- ret);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to register CPUFreq notifier: %d\n",ret);
+ cpufreq_unregister_driver(&ls1x_cpufreq_driver);
+ }
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
-out:
return ret;
}
static struct platform_driver ls1x_cpufreq_platdrv = {
- .driver = {
+ .probe = ls1x_cpufreq_probe,
+ .remove = ls1x_cpufreq_remove,
+ .driver = {
.name = "ls1x-cpufreq",
},
- .probe = ls1x_cpufreq_probe,
- .remove = ls1x_cpufreq_remove,
};
module_platform_driver(ls1x_cpufreq_platdrv);
MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
-MODULE_DESCRIPTION("Loongson 1 CPUFreq driver");
+MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index cd593c1f6..6bbdac106 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -10,6 +10,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/err.h>
@@ -76,7 +79,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpuclk = clk_get(NULL, "cpu_clk");
if (IS_ERR(cpuclk)) {
- printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
+ pr_err("couldn't get CPU clk\n");
return PTR_ERR(cpuclk);
}
@@ -163,7 +166,7 @@ static int __init cpufreq_init(void)
if (ret)
return ret;
- pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
+ pr_info("Loongson-2F CPU frequency driver\n");
cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index cc3408fc0..d9df89392 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -13,6 +13,8 @@
#undef DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -174,7 +176,7 @@ static int __init maple_cpufreq_init(void)
/* Get first CPU node */
cpunode = of_cpu_device_node_get(0);
if (cpunode == NULL) {
- printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
+ pr_err("Can't find any CPU 0 node\n");
goto bail_noprops;
}
@@ -182,8 +184,7 @@ static int __init maple_cpufreq_init(void)
/* we actually don't care on which CPU to access PVR */
pvr_hi = PVR_VER(mfspr(SPRN_PVR));
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
- printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
- pvr_hi);
+ pr_err("Unsupported CPU version (%x)\n", pvr_hi);
goto bail_noprops;
}
@@ -222,8 +223,8 @@ static int __init maple_cpufreq_init(void)
maple_pmode_cur = -1;
maple_scom_switch_freq(maple_scom_query_freq());
- printk(KERN_INFO "Registering Maple CPU frequency driver\n");
- printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ pr_info("Registering Maple CPU frequency driver\n");
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
maple_cpu_freqs[1].frequency/1000,
maple_cpu_freqs[0].frequency/1000,
maple_cpu_freqs[maple_pmode_cur].frequency/1000);
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 2058e6d29..643f43179 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -59,11 +59,8 @@ static LIST_HEAD(dvfs_info_list);
static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
{
struct mtk_cpu_dvfs_info *info;
- struct list_head *list;
-
- list_for_each(list, &dvfs_info_list) {
- info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
+ list_for_each_entry(info, &dvfs_info_list, list_head) {
if (cpumask_test_cpu(cpu, &info->cpus))
return info;
}
@@ -310,17 +307,24 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
return 0;
}
+#define DYNAMIC_POWER "dynamic-power-coefficient"
+
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
struct device_node *np = of_node_get(info->cpu_dev->of_node);
+ u32 capacitance = 0;
if (WARN_ON(!np))
return;
if (of_find_property(np, "#cooling-cells", NULL)) {
- info->cdev = of_cpufreq_cooling_register(np,
- policy->related_cpus);
+ of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
+
+ info->cdev = of_cpufreq_power_cooling_register(np,
+ policy->related_cpus,
+ capacitance,
+ NULL);
if (IS_ERR(info->cdev)) {
dev_err(info->cpu_dev,
@@ -524,8 +528,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = {
static int mt8173_cpufreq_probe(struct platform_device *pdev)
{
- struct mtk_cpu_dvfs_info *info;
- struct list_head *list, *tmp;
+ struct mtk_cpu_dvfs_info *info, *tmp;
int cpu, ret;
for_each_possible_cpu(cpu) {
@@ -559,11 +562,9 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev)
return 0;
release_dvfs_info_list:
- list_for_each_safe(list, tmp, &dvfs_info_list) {
- info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
-
+ list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
mtk_cpu_dvfs_info_release(info);
- list_del(list);
+ list_del(&info->list_head);
}
return ret;
diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c
new file mode 100644
index 000000000..e920889b9
--- /dev/null
+++ b/drivers/cpufreq/mvebu-cpufreq.c
@@ -0,0 +1,107 @@
+/*
+ * CPUFreq support for Armada 370/XP platforms.
+ *
+ * Copyright (C) 2012-2016 Marvell
+ *
+ * Yehuda Yitschak <yehuday@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) "mvebu-pmsu: " fmt
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/resource.h>
+
+static int __init armada_xp_pmsu_cpufreq_init(void)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret, cpu;
+
+ if (!of_machine_is_compatible("marvell,armadaxp"))
+ return 0;
+
+ /*
+ * In order to have proper cpufreq handling, we need to ensure
+ * that the Device Tree description of the CPU clock includes
+ * the definition of the PMU DFS registers. If not, we do not
+ * register the clock notifier and the cpufreq driver. This
+ * piece of code is only for compatibility with old Device
+ * Trees.
+ */
+ np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
+ if (!np)
+ return 0;
+
+ ret = of_address_to_resource(np, 1, &res);
+ if (ret) {
+ pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
+ of_node_put(np);
+ return 0;
+ }
+
+ of_node_put(np);
+
+ /*
+ * For each CPU, this loop registers the operating points
+ * supported (which are the nominal CPU frequency and half of
+ * it), and registers the clock notifier that will take care
+ * of doing the PMSU part of a frequency transition.
+ */
+ for_each_possible_cpu(cpu) {
+ struct device *cpu_dev;
+ struct clk *clk;
+ int ret;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("Cannot get CPU %d\n", cpu);
+ continue;
+ }
+
+ clk = clk_get(cpu_dev, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Cannot get clock for CPU %d\n", cpu);
+ return PTR_ERR(clk);
+ }
+
+ /*
+ * In case of a failure of dev_pm_opp_add(), we don't
+ * bother with cleaning up the registered OPP (there's
+ * no function to do so), and simply cancel the
+ * registration of the cpufreq device.
+ */
+ ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
+ if (ret) {
+ clk_put(clk);
+ return ret;
+ }
+
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
+ cpumask_of(cpu_dev->id));
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ }
+
+ platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+ return 0;
+}
+device_initcall(armada_xp_pmsu_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index e3866e0d5..376e63ca9 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -13,6 +13,9 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -51,7 +54,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
freq = new_freq * 1000;
ret = clk_round_rate(policy->clk, freq);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_warn(mpu_dev,
"CPUfreq: Cannot find matching frequency for %lu\n",
freq);
@@ -163,13 +166,13 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
{
mpu_dev = get_cpu_device(0);
if (!mpu_dev) {
- pr_warning("%s: unable to get the mpu device\n", __func__);
+ pr_warn("%s: unable to get the MPU device\n", __func__);
return -EINVAL;
}
mpu_reg = regulator_get(mpu_dev, "vcc");
if (IS_ERR(mpu_reg)) {
- pr_warning("%s: unable to get MPU regulator\n", __func__);
+ pr_warn("%s: unable to get MPU regulator\n", __func__);
mpu_reg = NULL;
} else {
/*
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 5dd95dab5..fd7781231 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -20,6 +20,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -35,8 +37,6 @@
#include "speedstep-lib.h"
-#define PFX "p4-clockmod: "
-
/*
* Duty Cycle (3bits), note DC_DISABLE is not specified in
* intel docs i just use it to mean disable
@@ -124,11 +124,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST))
- printk_once(KERN_WARNING PFX "Warning: EST-capable "
- "CPU detected. The acpi-cpufreq module offers "
- "voltage scaling in addition to frequency "
- "scaling. You should use that instead of "
- "p4-clockmod, if possible.\n");
+ pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
switch (c->x86_model) {
case 0x0E: /* Core */
case 0x0F: /* Core Duo */
@@ -152,11 +148,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
- printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
- "The speedstep-ich or acpi cpufreq modules offer "
- "voltage scaling in addition of frequency scaling. "
- "You should use either one instead of p4-clockmod, "
- "if possible.\n");
+ pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
}
@@ -265,8 +257,7 @@ static int __init cpufreq_p4_init(void)
ret = cpufreq_register_driver(&p4clockmod_driver);
if (!ret)
- printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
- "Modulation available\n");
+ pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
return ret;
}
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 808a320e9..3f0ce2ae3 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void)
doorbell.space_id = reg_resource->space_id;
doorbell.bit_width = reg_resource->bit_width;
doorbell.bit_offset = reg_resource->bit_offset;
- doorbell.access_width = 64;
+ doorbell.access_width = 4;
doorbell.address = reg_resource->address;
pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
@@ -555,8 +555,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq =
ioread32(&pcch_hdr->minimum_frequency) * 1000;
- policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-
pr_debug("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min);
out:
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 1f49d97a7..ff44016ea 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -13,6 +13,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -298,7 +300,7 @@ static int pmu_set_cpu_speed(int low_speed)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
- switch_mmu_context(NULL, current->active_mm);
+ switch_mmu_context(NULL, current->active_mm, NULL);
#ifdef DEBUG_FREQ
printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
@@ -481,13 +483,13 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
lenp /= sizeof(u32);
if (freqs == NULL || lenp != 2) {
- printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
+ pr_err("bus-frequencies incorrect or missing\n");
return 1;
}
ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
NULL);
if (ratio == NULL) {
- printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
+ pr_err("processor-to-bus-ratio*2 missing\n");
return 1;
}
@@ -550,7 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
if (!voltage_gpio){
- printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
+ pr_err("missing cpu-vcore-select gpio\n");
return 1;
}
@@ -675,9 +677,9 @@ out:
pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
ppc_proc_freq = cur_freq * 1000ul;
- printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
- printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
- low_freq/1000, hi_freq/1000, cur_freq/1000);
+ pr_info("Registering PowerMac CPU frequency driver\n");
+ pr_info("Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
+ low_freq/1000, hi_freq/1000, cur_freq/1000);
return cpufreq_register_driver(&pmac_cpufreq_driver);
}
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 4ff868787..267e0894c 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -12,6 +12,8 @@
#undef DEBUG
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -138,7 +140,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
usleep_range(1000, 1000);
}
if (done == 0)
- printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+ pr_warn("Timeout in clock slewing !\n");
}
@@ -266,7 +268,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
if (rc)
- printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
+ pr_warn("pfunc switch error %d\n", rc);
/* It's an irq GPIO so we should be able to just block here,
* I'll do that later after I've properly tested the IRQ code for
@@ -282,7 +284,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
usleep_range(500, 500);
}
if (done == 0)
- printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
+ pr_warn("Timeout in clock slewing !\n");
/* If frequency is going down, last ramp the voltage */
if (speed_mode > g5_pmode_cur)
@@ -368,7 +370,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
}
pvr_hi = (*valp) >> 16;
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
- printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
+ pr_err("Unsupported CPU version\n");
goto bail_noprops;
}
@@ -403,8 +405,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
root = of_find_node_by_path("/");
if (root == NULL) {
- printk(KERN_ERR "cpufreq: Can't find root of "
- "device tree\n");
+ pr_err("Can't find root of device tree\n");
goto bail_noprops;
}
pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
@@ -412,8 +413,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
pmf_find_function(root, "slewing-done");
if (pfunc_set_vdnap0 == NULL ||
pfunc_vdnap0_complete == NULL) {
- printk(KERN_ERR "cpufreq: Can't find required "
- "platform function\n");
+ pr_err("Can't find required platform function\n");
goto bail_noprops;
}
@@ -453,10 +453,10 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
g5_pmode_cur = -1;
g5_switch_freq(g5_query_freq());
- printk(KERN_INFO "Registering G5 CPU frequency driver\n");
- printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
- freq_method, volt_method);
- printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ pr_info("Registering G5 CPU frequency driver\n");
+ pr_info("Frequency method: %s, Voltage method: %s\n",
+ freq_method, volt_method);
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
g5_cpu_freqs[g5_pmode_cur].frequency/1000);
@@ -493,7 +493,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
if (cpuid != NULL)
eeprom = of_get_property(cpuid, "cpuid", NULL);
if (eeprom == NULL) {
- printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
+ pr_err("Can't find cpuid EEPROM !\n");
rc = -ENODEV;
goto bail;
}
@@ -511,7 +511,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
break;
}
if (hwclock == NULL) {
- printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
+ pr_err("Can't find i2c clock chip !\n");
rc = -ENODEV;
goto bail;
}
@@ -539,7 +539,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Check we have minimum requirements */
if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
- printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
+ pr_err("Can't find platform functions !\n");
rc = -ENODEV;
goto bail;
}
@@ -567,7 +567,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Get max frequency from device-tree */
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp) {
- printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
+ pr_err("Can't find CPU frequency !\n");
rc = -ENODEV;
goto bail;
}
@@ -583,8 +583,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Check for machines with no useful settings */
if (il == ih) {
- printk(KERN_WARNING "cpufreq: No low frequency mode available"
- " on this model !\n");
+ pr_warn("No low frequency mode available on this model !\n");
rc = -ENODEV;
goto bail;
}
@@ -595,7 +594,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Sanity check */
if (min_freq >= max_freq || min_freq < 1000) {
- printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
+ pr_err("Can't calculate low frequency !\n");
rc = -ENXIO;
goto bail;
}
@@ -619,10 +618,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
g5_pmode_cur = -1;
g5_switch_freq(g5_query_freq());
- printk(KERN_INFO "Registering G5 CPU frequency driver\n");
- printk(KERN_INFO "Frequency method: i2c/pfunc, "
- "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
- printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
+ pr_info("Registering G5 CPU frequency driver\n");
+ pr_info("Frequency method: i2c/pfunc, Voltage method: %s\n",
+ has_volt ? "i2c/pfunc" : "none");
+ pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
g5_cpu_freqs[g5_pmode_cur].frequency/1000);
@@ -654,7 +653,7 @@ static int __init g5_cpufreq_init(void)
/* Get first CPU node */
cpunode = of_cpu_device_node_get(0);
if (cpunode == NULL) {
- pr_err("cpufreq: Can't find any CPU node\n");
+ pr_err("Can't find any CPU node\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index e6f24b281..dedd2568e 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -8,6 +8,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -22,7 +24,6 @@
#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
as it is unused */
-#define PFX "powernow-k6: "
static unsigned int busfreq; /* FSB, in 10 kHz */
static unsigned int max_multiplier;
@@ -141,7 +142,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
{
if (clock_ratio[best_i].driver_data > max_multiplier) {
- printk(KERN_ERR PFX "invalid target frequency\n");
+ pr_err("invalid target frequency\n");
return -EINVAL;
}
@@ -175,13 +176,14 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
max_multiplier = param_max_multiplier;
goto have_max_multiplier;
}
- printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
+ pr_err("invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
return -EINVAL;
}
if (!max_multiplier) {
- printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
- printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
+ pr_warn("unknown frequency %u, cannot determine current multiplier\n",
+ khz);
+ pr_warn("use module parameters max_multiplier and bus_frequency\n");
return -EOPNOTSUPP;
}
@@ -193,7 +195,7 @@ have_max_multiplier:
busfreq = param_busfreq / 10;
goto have_busfreq;
}
- printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
+ pr_err("invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
return -EINVAL;
}
@@ -275,7 +277,7 @@ static int __init powernow_k6_init(void)
return -ENODEV;
if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
- printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
+ pr_info("PowerNow IOPORT region already used\n");
return -EIO;
}
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index c1ae19997..9f013ed42 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -13,6 +13,8 @@
* - We disable half multipliers if ACPI is used on A0 stepping CPUs.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -35,9 +37,6 @@
#include "powernow-k7.h"
-#define PFX "powernow: "
-
-
struct psb_s {
u8 signature[10];
u8 tableversion;
@@ -127,14 +126,13 @@ static int check_powernow(void)
maxei = cpuid_eax(0x80000000);
if (maxei < 0x80000007) { /* Any powernow info ? */
#ifdef MODULE
- printk(KERN_INFO PFX "No powernow capabilities detected\n");
+ pr_info("No powernow capabilities detected\n");
#endif
return 0;
}
if ((c->x86_model == 6) && (c->x86_mask == 0)) {
- printk(KERN_INFO PFX "K7 660[A0] core detected, "
- "enabling errata workarounds\n");
+ pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
have_a0 = 1;
}
@@ -144,22 +142,22 @@ static int check_powernow(void)
if (!(edx & (1 << 1 | 1 << 2)))
return 0;
- printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
+ pr_info("PowerNOW! Technology present. Can scale: ");
if (edx & 1 << 1) {
- printk("frequency");
+ pr_cont("frequency");
can_scale_bus = 1;
}
if ((edx & (1 << 1 | 1 << 2)) == 0x6)
- printk(" and ");
+ pr_cont(" and ");
if (edx & 1 << 2) {
- printk("voltage");
+ pr_cont("voltage");
can_scale_vid = 1;
}
- printk(".\n");
+ pr_cont("\n");
return 1;
}
@@ -427,16 +425,14 @@ err1:
err05:
kfree(acpi_processor_perf);
err0:
- printk(KERN_WARNING PFX "ACPI perflib can not be used on "
- "this platform\n");
+ pr_warn("ACPI perflib can not be used on this platform\n");
acpi_processor_perf = NULL;
return retval;
}
#else
static int powernow_acpi_init(void)
{
- printk(KERN_INFO PFX "no support for ACPI processor found."
- " Please recompile your kernel with ACPI processor\n");
+ pr_info("no support for ACPI processor found - please recompile your kernel with ACPI processor\n");
return -EINVAL;
}
#endif
@@ -468,8 +464,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
psb = (struct psb_s *) p;
pr_debug("Table version: 0x%x\n", psb->tableversion);
if (psb->tableversion != 0x12) {
- printk(KERN_INFO PFX "Sorry, only v1.2 tables"
- " supported right now\n");
+ pr_info("Sorry, only v1.2 tables supported right now\n");
return -ENODEV;
}
@@ -481,10 +476,8 @@ static int powernow_decode_bios(int maxfid, int startvid)
latency = psb->settlingtime;
if (latency < 100) {
- printk(KERN_INFO PFX "BIOS set settling time "
- "to %d microseconds. "
- "Should be at least 100. "
- "Correcting.\n", latency);
+ pr_info("BIOS set settling time to %d microseconds. Should be at least 100. Correcting.\n",
+ latency);
latency = 100;
}
pr_debug("Settling Time: %d microseconds.\n",
@@ -516,10 +509,9 @@ static int powernow_decode_bios(int maxfid, int startvid)
p += 2;
}
}
- printk(KERN_INFO PFX "No PST tables match this cpuid "
- "(0x%x)\n", etuple);
- printk(KERN_INFO PFX "This is indicative of a broken "
- "BIOS.\n");
+ pr_info("No PST tables match this cpuid (0x%x)\n",
+ etuple);
+ pr_info("This is indicative of a broken BIOS\n");
return -EINVAL;
}
@@ -552,7 +544,7 @@ static int fixup_sgtc(void)
sgtc = 100 * m * latency;
sgtc = sgtc / 3;
if (sgtc > 0xfffff) {
- printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
+ pr_warn("SGTC too large %d\n", sgtc);
sgtc = 0xfffff;
}
return sgtc;
@@ -574,14 +566,10 @@ static unsigned int powernow_get(unsigned int cpu)
static int acer_cpufreq_pst(const struct dmi_system_id *d)
{
- printk(KERN_WARNING PFX
- "%s laptop with broken PST tables in BIOS detected.\n",
+ pr_warn("%s laptop with broken PST tables in BIOS detected\n",
d->ident);
- printk(KERN_WARNING PFX
- "You need to downgrade to 3A21 (09/09/2002), or try a newer "
- "BIOS than 3A71 (01/20/2003)\n");
- printk(KERN_WARNING PFX
- "cpufreq scaling has been disabled as a result of this.\n");
+ pr_warn("You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
+ pr_warn("cpufreq scaling has been disabled as a result of this\n");
return 0;
}
@@ -616,40 +604,38 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
if (!fsb) {
- printk(KERN_WARNING PFX "can not determine bus frequency\n");
+ pr_warn("can not determine bus frequency\n");
return -EINVAL;
}
pr_debug("FSB: %3dMHz\n", fsb/1000);
if (dmi_check_system(powernow_dmi_table) || acpi_force) {
- printk(KERN_INFO PFX "PSB/PST known to be broken. "
- "Trying ACPI instead\n");
+ pr_info("PSB/PST known to be broken - trying ACPI instead\n");
result = powernow_acpi_init();
} else {
result = powernow_decode_bios(fidvidstatus.bits.MFID,
fidvidstatus.bits.SVID);
if (result) {
- printk(KERN_INFO PFX "Trying ACPI perflib\n");
+ pr_info("Trying ACPI perflib\n");
maximum_speed = 0;
minimum_speed = -1;
latency = 0;
result = powernow_acpi_init();
if (result) {
- printk(KERN_INFO PFX
- "ACPI and legacy methods failed\n");
+ pr_info("ACPI and legacy methods failed\n");
}
} else {
/* SGTC use the bus clock as timer */
latency = fixup_sgtc();
- printk(KERN_INFO PFX "SGTC: %d\n", latency);
+ pr_info("SGTC: %d\n", latency);
}
}
if (result)
return result;
- printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
- minimum_speed/1000, maximum_speed/1000);
+ pr_info("Minimum speed %d MHz - Maximum speed %d MHz\n",
+ minimum_speed/1000, maximum_speed/1000);
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 39ac78c94..54c45368e 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -36,12 +36,56 @@
#include <asm/reg.h>
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
#include <asm/opal.h>
+#include <linux/timer.h>
#define POWERNV_MAX_PSTATES 256
#define PMSR_PSAFE_ENABLE (1UL << 30)
#define PMSR_SPR_EM_DISABLE (1UL << 31)
#define PMSR_MAX(x) ((x >> 32) & 0xFF)
+#define MAX_RAMP_DOWN_TIME 5120
+/*
+ * On an idle system we want the global pstate to ramp-down from max value to
+ * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
+ * then ramp-down rapidly later on.
+ *
+ * This gives a percentage rampdown for time elapsed in milliseconds.
+ * ramp_down_percentage = ((ms * ms) >> 18)
+ * ~= 3.8 * (sec * sec)
+ *
+ * At 0 ms ramp_down_percent = 0
+ * At 5120 ms ramp_down_percent = 100
+ */
+#define ramp_down_percent(time) ((time * time) >> 18)
+
+/* Interval after which the timer is queued to bring down global pstate */
+#define GPSTATE_TIMER_INTERVAL 2000
+
+/**
+ * struct global_pstate_info - Per policy data structure to maintain history of
+ * global pstates
+ * @highest_lpstate: The local pstate from which we are ramping down
+ * @elapsed_time: Time in ms spent in ramping down from
+ * highest_lpstate
+ * @last_sampled_time: Time from boot in ms when global pstates were
+ * last set
+ * @last_lpstate,last_gpstate: Last set values for local and global pstates
+ * @timer: Is used for ramping down if cpu goes idle for
+ * a long time with global pstate held high
+ * @gpstate_lock: A spinlock to maintain synchronization between
+ * routines called by the timer handler and
+ * governer's target_index calls
+ */
+struct global_pstate_info {
+ int highest_lpstate;
+ unsigned int elapsed_time;
+ unsigned int last_sampled_time;
+ int last_lpstate;
+ int last_gpstate;
+ spinlock_t gpstate_lock;
+ struct timer_list timer;
+};
+
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
static bool rebooting, throttled, occ_reset;
@@ -94,6 +138,17 @@ static struct powernv_pstate_info {
int nr_pstates;
} powernv_pstate_info;
+static inline void reset_gpstates(struct cpufreq_policy *policy)
+{
+ struct global_pstate_info *gpstates = policy->driver_data;
+
+ gpstates->highest_lpstate = 0;
+ gpstates->elapsed_time = 0;
+ gpstates->last_sampled_time = 0;
+ gpstates->last_lpstate = 0;
+ gpstates->last_gpstate = 0;
+}
+
/*
* Initialize the freq table based on data obtained
* from the firmware passed via device-tree
@@ -285,6 +340,7 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val)
struct powernv_smp_call_data {
unsigned int freq;
int pstate_id;
+ int gpstate_id;
};
/*
@@ -343,19 +399,21 @@ static unsigned int powernv_cpufreq_get(unsigned int cpu)
* (struct powernv_smp_call_data *) and the pstate_id which needs to be set
* on this CPU should be present in freq_data->pstate_id.
*/
-static void set_pstate(void *freq_data)
+static void set_pstate(void *data)
{
unsigned long val;
- unsigned long pstate_ul =
- ((struct powernv_smp_call_data *) freq_data)->pstate_id;
+ struct powernv_smp_call_data *freq_data = data;
+ unsigned long pstate_ul = freq_data->pstate_id;
+ unsigned long gpstate_ul = freq_data->gpstate_id;
val = get_pmspr(SPRN_PMCR);
val = val & 0x0000FFFFFFFFFFFFULL;
pstate_ul = pstate_ul & 0xFF;
+ gpstate_ul = gpstate_ul & 0xFF;
/* Set both global(bits 56..63) and local(bits 48..55) PStates */
- val = val | (pstate_ul << 56) | (pstate_ul << 48);
+ val = val | (gpstate_ul << 56) | (pstate_ul << 48);
pr_debug("Setting cpu %d pmcr to %016lX\n",
raw_smp_processor_id(), val);
@@ -424,6 +482,111 @@ next:
}
}
+/**
+ * calc_global_pstate - Calculate global pstate
+ * @elapsed_time: Elapsed time in milliseconds
+ * @local_pstate: New local pstate
+ * @highest_lpstate: pstate from which its ramping down
+ *
+ * Finds the appropriate global pstate based on the pstate from which its
+ * ramping down and the time elapsed in ramping down. It follows a quadratic
+ * equation which ensures that it reaches ramping down to pmin in 5sec.
+ */
+static inline int calc_global_pstate(unsigned int elapsed_time,
+ int highest_lpstate, int local_pstate)
+{
+ int pstate_diff;
+
+ /*
+ * Using ramp_down_percent we get the percentage of rampdown
+ * that we are expecting to be dropping. Difference between
+ * highest_lpstate and powernv_pstate_info.min will give a absolute
+ * number of how many pstates we will drop eventually by the end of
+ * 5 seconds, then just scale it get the number pstates to be dropped.
+ */
+ pstate_diff = ((int)ramp_down_percent(elapsed_time) *
+ (highest_lpstate - powernv_pstate_info.min)) / 100;
+
+ /* Ensure that global pstate is >= to local pstate */
+ if (highest_lpstate - pstate_diff < local_pstate)
+ return local_pstate;
+ else
+ return highest_lpstate - pstate_diff;
+}
+
+static inline void queue_gpstate_timer(struct global_pstate_info *gpstates)
+{
+ unsigned int timer_interval;
+
+ /*
+ * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
+ * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
+ * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
+ * seconds of ramp down time.
+ */
+ if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
+ > MAX_RAMP_DOWN_TIME)
+ timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
+ else
+ timer_interval = GPSTATE_TIMER_INTERVAL;
+
+ mod_timer_pinned(&gpstates->timer, jiffies +
+ msecs_to_jiffies(timer_interval));
+}
+
+/**
+ * gpstate_timer_handler
+ *
+ * @data: pointer to cpufreq_policy on which timer was queued
+ *
+ * This handler brings down the global pstate closer to the local pstate
+ * according quadratic equation. Queues a new timer if it is still not equal
+ * to local pstate
+ */
+void gpstate_timer_handler(unsigned long data)
+{
+ struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+ struct global_pstate_info *gpstates = policy->driver_data;
+ int gpstate_id;
+ unsigned int time_diff = jiffies_to_msecs(jiffies)
+ - gpstates->last_sampled_time;
+ struct powernv_smp_call_data freq_data;
+
+ if (!spin_trylock(&gpstates->gpstate_lock))
+ return;
+
+ gpstates->last_sampled_time += time_diff;
+ gpstates->elapsed_time += time_diff;
+ freq_data.pstate_id = gpstates->last_lpstate;
+
+ if ((gpstates->last_gpstate == freq_data.pstate_id) ||
+ (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
+ gpstate_id = freq_data.pstate_id;
+ reset_gpstates(policy);
+ gpstates->highest_lpstate = freq_data.pstate_id;
+ } else {
+ gpstate_id = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate,
+ freq_data.pstate_id);
+ }
+
+ /*
+ * If local pstate is equal to global pstate, rampdown is over
+ * So timer is not required to be queued.
+ */
+ if (gpstate_id != freq_data.pstate_id)
+ queue_gpstate_timer(gpstates);
+
+ freq_data.gpstate_id = gpstate_id;
+ gpstates->last_gpstate = freq_data.gpstate_id;
+ gpstates->last_lpstate = freq_data.pstate_id;
+
+ spin_unlock(&gpstates->gpstate_lock);
+
+ /* Timer may get migrated to a different cpu on cpu hot unplug */
+ smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+}
+
/*
* powernv_cpufreq_target_index: Sets the frequency corresponding to
* the cpufreq table entry indexed by new_index on the cpus in the
@@ -433,6 +596,8 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
unsigned int new_index)
{
struct powernv_smp_call_data freq_data;
+ unsigned int cur_msec, gpstate_id;
+ struct global_pstate_info *gpstates = policy->driver_data;
if (unlikely(rebooting) && new_index != get_nominal_index())
return 0;
@@ -440,28 +605,81 @@ static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
if (!throttled)
powernv_cpufreq_throttle_check(NULL);
+ cur_msec = jiffies_to_msecs(get_jiffies_64());
+
+ spin_lock(&gpstates->gpstate_lock);
freq_data.pstate_id = powernv_freqs[new_index].driver_data;
+ if (!gpstates->last_sampled_time) {
+ gpstate_id = freq_data.pstate_id;
+ gpstates->highest_lpstate = freq_data.pstate_id;
+ goto gpstates_done;
+ }
+
+ if (gpstates->last_gpstate > freq_data.pstate_id) {
+ gpstates->elapsed_time += cur_msec -
+ gpstates->last_sampled_time;
+
+ /*
+ * If its has been ramping down for more than MAX_RAMP_DOWN_TIME
+ * we should be resetting all global pstate related data. Set it
+ * equal to local pstate to start fresh.
+ */
+ if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
+ reset_gpstates(policy);
+ gpstates->highest_lpstate = freq_data.pstate_id;
+ gpstate_id = freq_data.pstate_id;
+ } else {
+ /* Elaspsed_time is less than 5 seconds, continue to rampdown */
+ gpstate_id = calc_global_pstate(gpstates->elapsed_time,
+ gpstates->highest_lpstate,
+ freq_data.pstate_id);
+ }
+ } else {
+ reset_gpstates(policy);
+ gpstates->highest_lpstate = freq_data.pstate_id;
+ gpstate_id = freq_data.pstate_id;
+ }
+
+ /*
+ * If local pstate is equal to global pstate, rampdown is over
+ * So timer is not required to be queued.
+ */
+ if (gpstate_id != freq_data.pstate_id)
+ queue_gpstate_timer(gpstates);
+ else
+ del_timer_sync(&gpstates->timer);
+
+gpstates_done:
+ freq_data.gpstate_id = gpstate_id;
+ gpstates->last_sampled_time = cur_msec;
+ gpstates->last_gpstate = freq_data.gpstate_id;
+ gpstates->last_lpstate = freq_data.pstate_id;
+
+ spin_unlock(&gpstates->gpstate_lock);
+
/*
* Use smp_call_function to send IPI and execute the
* mtspr on target CPU. We could do that without IPI
* if current CPU is within policy->cpus (core)
*/
smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
-
return 0;
}
static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- int base, i;
+ int base, i, ret;
+ struct kernfs_node *kn;
+ struct global_pstate_info *gpstates;
base = cpu_first_thread_sibling(policy->cpu);
for (i = 0; i < threads_per_core; i++)
cpumask_set_cpu(base + i, policy->cpus);
- if (!policy->driver_data) {
+ kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
+ if (!kn) {
int ret;
ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
@@ -470,13 +688,37 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cpu);
return ret;
}
- /*
- * policy->driver_data is used as a flag for one-time
- * creation of throttle sysfs files.
- */
- policy->driver_data = policy;
+ } else {
+ kernfs_put(kn);
}
- return cpufreq_table_validate_and_show(policy, powernv_freqs);
+
+ gpstates = kzalloc(sizeof(*gpstates), GFP_KERNEL);
+ if (!gpstates)
+ return -ENOMEM;
+
+ policy->driver_data = gpstates;
+
+ /* initialize timer */
+ init_timer_deferrable(&gpstates->timer);
+ gpstates->timer.data = (unsigned long)policy;
+ gpstates->timer.function = gpstate_timer_handler;
+ gpstates->timer.expires = jiffies +
+ msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
+ spin_lock_init(&gpstates->gpstate_lock);
+ ret = cpufreq_table_validate_and_show(policy, powernv_freqs);
+
+ if (ret < 0)
+ kfree(policy->driver_data);
+
+ return ret;
+}
+
+static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ /* timer is deleted in cpufreq_cpu_stop() */
+ kfree(policy->driver_data);
+
+ return 0;
}
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
@@ -604,15 +846,19 @@ static struct notifier_block powernv_cpufreq_opal_nb = {
static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
struct powernv_smp_call_data freq_data;
+ struct global_pstate_info *gpstates = policy->driver_data;
freq_data.pstate_id = powernv_pstate_info.min;
+ freq_data.gpstate_id = powernv_pstate_info.min;
smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
+ del_timer_sync(&gpstates->timer);
}
static struct cpufreq_driver powernv_cpufreq_driver = {
.name = "powernv-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = powernv_cpufreq_cpu_init,
+ .exit = powernv_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = powernv_cpufreq_target_index,
.get = powernv_cpufreq_get,
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.h b/drivers/cpufreq/ppc_cbe_cpufreq.h
index b4c00a5a6..3eace725c 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.h
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.h
@@ -17,7 +17,7 @@ int cbe_cpufreq_get_pmode(int cpu);
int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
-#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
+#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
extern bool cbe_cpufreq_has_pmi;
#else
#define cbe_cpufreq_has_pmi (0)
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index 7969f7690..7c4cd5c63 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timer.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/processor.h>
@@ -142,15 +142,4 @@ static int __init cbe_cpufreq_pmi_init(void)
return 0;
}
-
-static void __exit cbe_cpufreq_pmi_exit(void)
-{
- cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
- pmi_unregister_handler(&cbe_pmi_handler);
-}
-
-module_init(cbe_cpufreq_pmi_init);
-module_exit(cbe_cpufreq_pmi_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
+device_initcall(cbe_cpufreq_pmi_init);
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 46fee1539..ce345bf34 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -29,6 +29,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -186,8 +188,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
ret = regulator_set_voltage(vcc_core, vmin, vmax);
if (ret)
- pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
- vmin, vmax);
+ pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax);
return ret;
}
@@ -195,10 +196,10 @@ static void __init pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
- pr_info("cpufreq: Didn't find vcc_core regulator\n");
+ pr_info("Didn't find vcc_core regulator\n");
vcc_core = NULL;
} else {
- pr_info("cpufreq: Found vcc_core regulator\n");
+ pr_info("Found vcc_core regulator\n");
}
}
#else
@@ -233,9 +234,8 @@ static void pxa27x_guess_max_freq(void)
{
if (!pxa27x_maxfreq) {
pxa27x_maxfreq = 416000;
- printk(KERN_INFO "PXA CPU 27x max frequency not defined "
- "(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
- pxa27x_maxfreq);
+ pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
+ pxa27x_maxfreq);
} else {
pxa27x_maxfreq *= 1000;
}
@@ -408,7 +408,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
*/
if (cpu_is_pxa25x()) {
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
- pr_info("PXA255 cpufreq using %s frequency table\n",
+ pr_info("using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
cpufreq_table_validate_and_show(policy, pxa255_freq_table);
@@ -417,7 +417,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
}
- printk(KERN_INFO "PXA CPU frequency change support initialized\n");
+ pr_info("frequency change support initialized\n");
return 0;
}
diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c
index b23e525a7..53d8c3fb1 100644
--- a/drivers/cpufreq/qoriq-cpufreq.c
+++ b/drivers/cpufreq/qoriq-cpufreq.c
@@ -301,10 +301,11 @@ err_np:
return -ENODEV;
}
-static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
+ cpufreq_cooling_unregister(data->cdev);
kfree(data->pclk);
kfree(data->table);
kfree(data);
@@ -333,8 +334,8 @@ static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
cpud->cdev = of_cpufreq_cooling_register(np,
policy->related_cpus);
- if (IS_ERR(cpud->cdev)) {
- pr_err("Failed to register cooling device cpu%d: %ld\n",
+ if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
+ pr_err("cpu%d is not running as cooling device: %ld\n",
policy->cpu, PTR_ERR(cpud->cdev));
cpud->cdev = NULL;
@@ -348,7 +349,7 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.name = "qoriq_cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = qoriq_cpufreq_cpu_init,
- .exit = __exit_p(qoriq_cpufreq_cpu_exit),
+ .exit = qoriq_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
index eb262133f..b04b6f02b 100644
--- a/drivers/cpufreq/s3c2412-cpufreq.c
+++ b/drivers/cpufreq/s3c2412-cpufreq.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -197,21 +199,20 @@ static int s3c2412_cpufreq_add(struct device *dev,
hclk = clk_get(NULL, "hclk");
if (IS_ERR(hclk)) {
- printk(KERN_ERR "%s: cannot find hclk clock\n", __func__);
+ pr_err("cannot find hclk clock\n");
return -ENOENT;
}
fclk = clk_get(NULL, "fclk");
if (IS_ERR(fclk)) {
- printk(KERN_ERR "%s: cannot find fclk clock\n", __func__);
+ pr_err("cannot find fclk clock\n");
goto err_fclk;
}
fclk_rate = clk_get_rate(fclk);
if (fclk_rate > 200000000) {
- printk(KERN_INFO
- "%s: fclk %ld MHz, assuming 266MHz capable part\n",
- __func__, fclk_rate / 1000000);
+ pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
+ fclk_rate / 1000000);
s3c2412_cpufreq_info.max.fclk = 266000000;
s3c2412_cpufreq_info.max.hclk = 133000000;
s3c2412_cpufreq_info.max.pclk = 66000000;
@@ -219,13 +220,13 @@ static int s3c2412_cpufreq_add(struct device *dev,
armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) {
- printk(KERN_ERR "%s: cannot find arm clock\n", __func__);
+ pr_err("cannot find arm clock\n");
goto err_armclk;
}
xtal = clk_get(NULL, "xtal");
if (IS_ERR(xtal)) {
- printk(KERN_ERR "%s: cannot find xtal clock\n", __func__);
+ pr_err("cannot find xtal clock\n");
goto err_xtal;
}
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 0129f5c70..d0d75b65d 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -66,7 +68,7 @@ static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
__func__, fclk, armclk, hclk_max);
if (armclk > fclk) {
- printk(KERN_WARNING "%s: armclk > fclk\n", __func__);
+ pr_warn("%s: armclk > fclk\n", __func__);
armclk = fclk;
}
@@ -273,7 +275,7 @@ static int s3c2440_cpufreq_add(struct device *dev,
armclk = s3c_cpufreq_clk_get(NULL, "armclk");
if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
- printk(KERN_ERR "%s: failed to get clocks\n", __func__);
+ pr_err("%s: failed to get clocks\n", __func__);
return -ENOENT;
}
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
index 9b7b4289d..4d976e8db 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/export.h>
#include <linux/interrupt.h>
@@ -178,7 +180,7 @@ static int __init s3c_freq_debugfs_init(void)
{
dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
if (IS_ERR(dbgfs_root)) {
- printk(KERN_ERR "%s: error creating debugfs root\n", __func__);
+ pr_err("%s: error creating debugfs root\n", __func__);
return PTR_ERR(dbgfs_root);
}
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 68ef8fd94..ae8eaed77 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -175,7 +177,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
cpu_new.freq.fclk = cpu_new.pll.frequency;
if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
- printk(KERN_ERR "no divisors for %d\n", target_freq);
+ pr_err("no divisors for %d\n", target_freq);
goto err_notpossible;
}
@@ -187,7 +189,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
if (s3c_cpufreq_calcio(&cpu_new) < 0) {
- printk(KERN_ERR "%s: no IO timings\n", __func__);
+ pr_err("%s: no IO timings\n", __func__);
goto err_notpossible;
}
}
@@ -262,7 +264,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
return 0;
err_notpossible:
- printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+ pr_err("no compatible settings for %d\n", target_freq);
return -EINVAL;
}
@@ -331,7 +333,7 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
&index);
if (ret < 0) {
- printk(KERN_ERR "%s: no PLL available\n", __func__);
+ pr_err("%s: no PLL available\n", __func__);
goto err_notpossible;
}
@@ -346,7 +348,7 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
return s3c_cpufreq_settarget(policy, target_freq, pll);
err_notpossible:
- printk(KERN_ERR "no compatible settings for %d\n", target_freq);
+ pr_err("no compatible settings for %d\n", target_freq);
return -EINVAL;
}
@@ -356,7 +358,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
clk = clk_get(dev, name);
if (IS_ERR(clk))
- printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
+ pr_err("failed to get clock '%s'\n", name);
return clk;
}
@@ -378,15 +380,16 @@ static int __init s3c_cpufreq_initclks(void)
if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
- printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
+ pr_err("%s: could not get clock(s)\n", __func__);
return -ENOENT;
}
- printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
- clk_get_rate(clk_fclk) / 1000,
- clk_get_rate(clk_hclk) / 1000,
- clk_get_rate(clk_pclk) / 1000,
- clk_get_rate(clk_arm) / 1000);
+ pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
+ __func__,
+ clk_get_rate(clk_fclk) / 1000,
+ clk_get_rate(clk_hclk) / 1000,
+ clk_get_rate(clk_pclk) / 1000,
+ clk_get_rate(clk_arm) / 1000);
return 0;
}
@@ -424,7 +427,7 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
if (ret) {
- printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
+ pr_err("%s: failed to reset pll/freq\n", __func__);
return ret;
}
@@ -449,13 +452,12 @@ static struct cpufreq_driver s3c24xx_driver = {
int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
{
if (!info || !info->name) {
- printk(KERN_ERR "%s: failed to pass valid information\n",
- __func__);
+ pr_err("%s: failed to pass valid information\n", __func__);
return -EINVAL;
}
- printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
- info->name);
+ pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
+ info->name);
/* check our driver info has valid data */
@@ -478,7 +480,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
struct s3c_cpufreq_board *ours;
if (!board) {
- printk(KERN_INFO "%s: no board data\n", __func__);
+ pr_info("%s: no board data\n", __func__);
return -EINVAL;
}
@@ -487,7 +489,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
ours = kzalloc(sizeof(*ours), GFP_KERNEL);
if (ours == NULL) {
- printk(KERN_ERR "%s: no memory\n", __func__);
+ pr_err("%s: no memory\n", __func__);
return -ENOMEM;
}
@@ -502,15 +504,15 @@ static int __init s3c_cpufreq_auto_io(void)
int ret;
if (!cpu_cur.info->get_iotiming) {
- printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
+ pr_err("%s: get_iotiming undefined\n", __func__);
return -ENOENT;
}
- printk(KERN_INFO "%s: working out IO settings\n", __func__);
+ pr_info("%s: working out IO settings\n", __func__);
ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
if (ret)
- printk(KERN_ERR "%s: failed to get timings\n", __func__);
+ pr_err("%s: failed to get timings\n", __func__);
return ret;
}
@@ -561,7 +563,7 @@ static void s3c_cpufreq_update_loctkime(void)
val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
val |= calc_locktime(rate, cpu_cur.info->locktime_m);
- printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
+ pr_info("%s: new locktime is 0x%08x\n", __func__, val);
__raw_writel(val, S3C2410_LOCKTIME);
}
@@ -580,7 +582,7 @@ static int s3c_cpufreq_build_freq(void)
ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
if (!ftab) {
- printk(KERN_ERR "%s: no memory for tables\n", __func__);
+ pr_err("%s: no memory for tables\n", __func__);
return -ENOMEM;
}
@@ -608,15 +610,14 @@ static int __init s3c_cpufreq_initcall(void)
if (cpu_cur.board->auto_io) {
ret = s3c_cpufreq_auto_io();
if (ret) {
- printk(KERN_ERR "%s: failed to get io timing\n",
+ pr_err("%s: failed to get io timing\n",
__func__);
goto out;
}
}
if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
- printk(KERN_ERR "%s: no IO support registered\n",
- __func__);
+ pr_err("%s: no IO support registered\n", __func__);
ret = -EINVAL;
goto out;
}
@@ -666,9 +667,9 @@ int s3c_plltab_register(struct cpufreq_frequency_table *plls,
vals += plls_no;
vals->frequency = CPUFREQ_TABLE_END;
- printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
+ pr_info("%d PLL entries\n", plls_no);
} else
- printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
+ pr_err("no memory for PLL tables\n");
return vals ? 0 : -ENOMEM;
}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index a145b319d..06d85917b 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -205,7 +207,7 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
} else if (ch == DMC1) {
reg = (dmc_base[1] + 0x30);
} else {
- printk(KERN_ERR "Cannot find DMC port\n");
+ pr_err("Cannot find DMC port\n");
return;
}
@@ -534,7 +536,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
mem_type = check_mem_type(dmc_base[0]);
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
- printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
+ pr_err("CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
goto out_dmc1;
}
@@ -635,13 +637,13 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
arm_regulator = regulator_get(NULL, "vddarm");
if (IS_ERR(arm_regulator)) {
- pr_err("failed to get regulator vddarm");
+ pr_err("failed to get regulator vddarm\n");
return PTR_ERR(arm_regulator);
}
int_regulator = regulator_get(NULL, "vddint");
if (IS_ERR(int_regulator)) {
- pr_err("failed to get regulator vddint");
+ pr_err("failed to get regulator vddint\n");
regulator_put(arm_regulator);
return PTR_ERR(int_regulator);
}
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index ac84e4818..4225501a4 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -13,6 +13,8 @@
* 2005-03-30: - initial revision
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -30,8 +32,6 @@
static __u8 __iomem *cpuctl;
-#define PFX "sc520_freq: "
-
static struct cpufreq_frequency_table sc520_freq_table[] = {
{0, 0x01, 100000},
{0, 0x02, 133000},
@@ -44,8 +44,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
switch (clockspeed_reg & 0x03) {
default:
- printk(KERN_ERR PFX "error: cpuctl register has unexpected "
- "value %02x\n", clockspeed_reg);
+ pr_err("error: cpuctl register has unexpected value %02x\n",
+ clockspeed_reg);
case 0x01:
return 100000;
case 0x02:
@@ -112,7 +112,7 @@ static int __init sc520_freq_init(void)
cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
if (!cpuctl) {
- printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
+ pr_err("sc520_freq: error: failed to remap memory\n");
return -ENOMEM;
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index de5e89b2e..e8a7bf57b 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -38,10 +39,20 @@ static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
return scpi_ops->dvfs_get_info(domain);
}
-static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
+static int scpi_get_transition_latency(struct device *cpu_dev)
{
- int idx, ret = 0;
+ struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
+
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+ return info->latency;
+}
+
+static int scpi_init_opp_table(const struct cpumask *cpumask)
+{
+ int idx, ret;
struct scpi_opp *opp;
+ struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
if (IS_ERR(info))
@@ -51,11 +62,7 @@ static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
return -EIO;
for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
- if (remove)
- dev_pm_opp_remove(cpu_dev, opp->freq);
- else
- ret = dev_pm_opp_add(cpu_dev, opp->freq,
- opp->m_volt * 1000);
+ ret = dev_pm_opp_add(cpu_dev, opp->freq, opp->m_volt * 1000);
if (ret) {
dev_warn(cpu_dev, "failed to add opp %uHz %umV\n",
opp->freq, opp->m_volt);
@@ -64,33 +71,19 @@ static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
return ret;
}
}
- return ret;
-}
-static int scpi_get_transition_latency(struct device *cpu_dev)
-{
- struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
-
- if (IS_ERR(info))
- return PTR_ERR(info);
- return info->latency;
-}
-
-static int scpi_init_opp_table(struct device *cpu_dev)
-{
- return scpi_opp_table_ops(cpu_dev, false);
-}
-
-static void scpi_free_opp_table(struct device *cpu_dev)
-{
- scpi_opp_table_ops(cpu_dev, true);
+ ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
+ if (ret)
+ dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
+ __func__, ret);
+ return ret;
}
static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
.name = "scpi",
.get_transition_latency = scpi_get_transition_latency,
.init_opp_table = scpi_init_opp_table,
- .free_opp_table = scpi_free_opp_table,
+ .free_opp_table = dev_pm_opp_cpumask_remove_table,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 7d4a31571..41bc5397f 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -13,6 +13,8 @@
* Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -27,7 +29,6 @@
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
-#define PFX "speedstep-centrino: "
#define MAINTAINER "linux-pm@vger.kernel.org"
#define INTEL_MSR_RANGE (0xffff)
@@ -386,8 +387,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
/* check to see if it stuck */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
- printk(KERN_INFO PFX
- "couldn't enable Enhanced SpeedStep\n");
+ pr_info("couldn't enable Enhanced SpeedStep\n");
return -ENODEV;
}
}
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 37555c6b8..b86953a3d 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -18,6 +18,8 @@
* SPEEDSTEP - DEFINITIONS *
*********************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -68,13 +70,13 @@ static int speedstep_find_register(void)
/* get PMBASE */
pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase);
if (!(pmbase & 0x01)) {
- printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+ pr_err("could not find speedstep register\n");
return -ENODEV;
}
pmbase &= 0xFFFFFFFE;
if (!pmbase) {
- printk(KERN_ERR "speedstep-ich: could not find speedstep register\n");
+ pr_err("could not find speedstep register\n");
return -ENODEV;
}
@@ -136,7 +138,7 @@ static void speedstep_set_state(unsigned int state)
pr_debug("change to %u MHz succeeded\n",
speedstep_get_frequency(speedstep_processor) / 1000);
else
- printk(KERN_ERR "cpufreq: change failed - I/O error\n");
+ pr_err("change failed - I/O error\n");
return;
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 15d3214aa..1b8062182 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -8,6 +8,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -153,7 +155,7 @@ static unsigned int pentium_core_get_frequency(void)
fsb = 333333;
break;
default:
- printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
+ pr_err("PCORE - MSR_FSB_FREQ undefined value\n");
}
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
@@ -453,11 +455,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
*/
if (*transition_latency > 10000000 ||
*transition_latency < 50000) {
- printk(KERN_WARNING PFX "frequency transition "
- "measured seems out of range (%u "
- "nSec), falling back to a safe one of"
- "%u nSec.\n",
- *transition_latency, 500000);
+ pr_warn("frequency transition measured seems out of range (%u nSec), falling back to a safe one of %u nSec\n",
+ *transition_latency, 500000);
*transition_latency = 500000;
}
}
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 819229e82..770a9ae19 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -12,6 +12,8 @@
* SPEEDSTEP - DEFINITIONS *
*********************************************************************/
+#define pr_fmt(fmt) "cpufreq: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -204,9 +206,8 @@ static void speedstep_set_state(unsigned int state)
(speedstep_freqs[new_state].frequency / 1000),
retry, result);
else
- printk(KERN_ERR "cpufreq: change to state %u "
- "failed with new_state %u and result %u\n",
- state, new_state, result);
+ pr_err("change to state %u failed with new_state %u and result %u\n",
+ state, new_state, result);
return;
}
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 20bcceb58..435302542 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -14,7 +14,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
-#include <linux/cpufreq-dt.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -69,10 +68,6 @@ static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
clk_set_parent(priv->cpu_clk, priv->pllx_clk);
}
-static struct cpufreq_dt_platform_data cpufreq_dt_pd = {
- .independent_clocks = false,
-};
-
static int tegra124_cpufreq_probe(struct platform_device *pdev)
{
struct tegra124_cpufreq_priv *priv;
@@ -129,8 +124,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
cpufreq_dt_devinfo.name = "cpufreq-dt";
cpufreq_dt_devinfo.parent = &pdev->dev;
- cpufreq_dt_devinfo.data = &cpufreq_dt_pd;
- cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd);
priv->cpufreq_dt_pdev =
platform_device_register_full(&cpufreq_dt_devinfo);
diff --git a/drivers/cpufreq/vexpress-spc-cpufreq.c b/drivers/cpufreq/vexpress-spc-cpufreq.c
index 433e93fd4..87e5bdc5e 100644
--- a/drivers/cpufreq/vexpress-spc-cpufreq.c
+++ b/drivers/cpufreq/vexpress-spc-cpufreq.c
@@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,8 +27,9 @@
#include "arm_big_little.h"
-static int ve_spc_init_opp_table(struct device *cpu_dev)
+static int ve_spc_init_opp_table(const struct cpumask *cpumask)
{
+ struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
/*
* platform specific SPC code must initialise the opp table
* so just check if the OPP count is non-zero
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0db185762..c73207abb 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
sched_idle_set_state(target_state);
trace_cpu_idle_rcuidle(index, dev->cpu);
- time_start = ktime_get();
+ time_start = ns_to_ktime(local_clock());
stop_critical_timings();
entered_state = target_state->enter(dev, drv, index);
start_critical_timings();
- time_end = ktime_get();
+ time_end = ns_to_ktime(local_clock());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* The cpu is no longer idle or about to enter idle. */
@@ -217,7 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
- diff = ktime_to_us(ktime_sub(time_end, time_start));
+ diff = ktime_us_delta(time_end, time_start);
if (diff > INT_MAX)
diff = INT_MAX;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 477fffdb4..d77ba2f12 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -279,6 +279,14 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
+config HW_RANDOM_PPC4XX
+ bool "PowerPC 4xx generic true random number generator support"
+ depends on CRYPTO_DEV_PPC4XX && HW_RANDOM
+ default y
+ ---help---
+ This option provides the kernel-side support for the TRNG hardware
+ found in the security function of some PowerPC 4xx SoCs.
+
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
@@ -302,15 +310,16 @@ config CRYPTO_DEV_OMAP_AES
want to use the OMAP module for AES algorithms.
config CRYPTO_DEV_OMAP_DES
- tristate "Support for OMAP DES3DES hw engine"
+ tristate "Support for OMAP DES/3DES hw engine"
depends on ARCH_OMAP2PLUS
select CRYPTO_DES
select CRYPTO_BLKCIPHER
+ select CRYPTO_ENGINE
help
OMAP processors have DES/3DES module accelerator. Select this if you
want to use the OMAP module for DES and 3DES algorithms. Currently
- the ECB and CBC modes of operation supported by the driver. Also
- accesses made on unaligned boundaries are also supported.
+ the ECB and CBC modes of operation are supported by the driver. Also
+ accesses made on unaligned boundaries are supported.
config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
@@ -340,9 +349,19 @@ config CRYPTO_DEV_SAHARA
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
+config CRYPTO_DEV_MXC_SCC
+ tristate "Support for Freescale Security Controller (SCC)"
+ depends on ARCH_MXC && OF
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_DES
+ help
+ This option enables support for the Security Controller (SCC)
+ found in Freescale i.MX25 chips.
+
config CRYPTO_DEV_S5P
tristate "Support for Samsung S5PV210/Exynos crypto accelerator"
- depends on ARCH_S5PV210 || ARCH_EXYNOS
+ depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_IOMEM && HAS_DMA
select CRYPTO_AES
select CRYPTO_BLKCIPHER
help
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 713de9d11..3c6432dd0 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
+obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
diff --git a/drivers/crypto/amcc/Makefile b/drivers/crypto/amcc/Makefile
index 5c0c62b65..b95539928 100644
--- a/drivers/crypto/amcc/Makefile
+++ b/drivers/crypto/amcc/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
crypto4xx-y := crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
+crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 62134c8a2..dae1e3913 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -40,6 +40,7 @@
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
+#include "crypto4xx_trng.h"
#define PPC4XX_SEC_VERSION_STR "0.5"
@@ -1225,6 +1226,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
if (rc)
goto err_start_dev;
+ ppc4xx_trng_probe(core_dev);
return 0;
err_start_dev:
@@ -1252,6 +1254,8 @@ static int crypto4xx_remove(struct platform_device *ofdev)
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ ppc4xx_trng_remove(core_dev);
+
free_irq(core_dev->irq, dev);
irq_dispose_mapping(core_dev->irq);
@@ -1272,7 +1276,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match);
static struct platform_driver crypto4xx_driver = {
.driver = {
- .name = "crypto4xx",
+ .name = MODULE_NAME,
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
@@ -1284,4 +1288,3 @@ module_platform_driver(crypto4xx_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");
-
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index bac0bdeb4..ecfdcfe36 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -24,6 +24,8 @@
#include <crypto/internal/hash.h>
+#define MODULE_NAME "crypto4xx"
+
#define PPC460SX_SDR0_SRST 0x201
#define PPC405EX_SDR0_SRST 0x200
#define PPC460EX_SDR0_SRST 0x201
@@ -72,6 +74,7 @@ struct crypto4xx_device {
char *name;
u64 ce_phy_address;
void __iomem *ce_base;
+ void __iomem *trng_base;
void *pdr; /* base address of packet
descriptor ring */
@@ -106,6 +109,7 @@ struct crypto4xx_core_device {
struct device *device;
struct platform_device *ofdev;
struct crypto4xx_device *dev;
+ struct hwrng *trng;
u32 int_status;
u32 irq;
struct tasklet_struct tasklet;
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 5f5fbc071..46fe57c8f 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -125,6 +125,7 @@
#define PPC4XX_INTERRUPT_CLR 0x3ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
+#define PPC4XX_TRNG_EN 0x00020000
#define PPC4XX_INT_DESCR_CNT 4
#define PPC4XX_INT_TIMEOUT_CNT 0
#define PPC4XX_INT_CFG 1
diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
new file mode 100644
index 000000000..677ca17fd
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.c
@@ -0,0 +1,131 @@
+/*
+ * Generic PowerPC 44x RNG driver
+ *
+ * Copyright 2011 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/hw_random.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include "crypto4xx_core.h"
+#include "crypto4xx_trng.h"
+#include "crypto4xx_reg_def.h"
+
+#define PPC4XX_TRNG_CTRL 0x0008
+#define PPC4XX_TRNG_CTRL_DALM 0x20
+#define PPC4XX_TRNG_STAT 0x0004
+#define PPC4XX_TRNG_STAT_B 0x1
+#define PPC4XX_TRNG_DATA 0x0000
+
+static int ppc4xx_trng_data_present(struct hwrng *rng, int wait)
+{
+ struct crypto4xx_device *dev = (void *)rng->priv;
+ int busy, i, present = 0;
+
+ for (i = 0; i < 20; i++) {
+ busy = (in_le32(dev->trng_base + PPC4XX_TRNG_STAT) &
+ PPC4XX_TRNG_STAT_B);
+ if (!busy || !wait) {
+ present = 1;
+ break;
+ }
+ udelay(10);
+ }
+ return present;
+}
+
+static int ppc4xx_trng_data_read(struct hwrng *rng, u32 *data)
+{
+ struct crypto4xx_device *dev = (void *)rng->priv;
+ *data = in_le32(dev->trng_base + PPC4XX_TRNG_DATA);
+ return 4;
+}
+
+static void ppc4xx_trng_enable(struct crypto4xx_device *dev, bool enable)
+{
+ u32 device_ctrl;
+
+ device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ if (enable)
+ device_ctrl |= PPC4XX_TRNG_EN;
+ else
+ device_ctrl &= ~PPC4XX_TRNG_EN;
+ writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+}
+
+static const struct of_device_id ppc4xx_trng_match[] = {
+ { .compatible = "ppc4xx-rng", },
+ { .compatible = "amcc,ppc460ex-rng", },
+ { .compatible = "amcc,ppc440epx-rng", },
+ {},
+};
+
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
+{
+ struct crypto4xx_device *dev = core_dev->dev;
+ struct device_node *trng = NULL;
+ struct hwrng *rng = NULL;
+ int err;
+
+ /* Find the TRNG device node and map it */
+ trng = of_find_matching_node(NULL, ppc4xx_trng_match);
+ if (!trng || !of_device_is_available(trng))
+ return;
+
+ dev->trng_base = of_iomap(trng, 0);
+ of_node_put(trng);
+ if (!dev->trng_base)
+ goto err_out;
+
+ rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ goto err_out;
+
+ rng->name = MODULE_NAME;
+ rng->data_present = ppc4xx_trng_data_present;
+ rng->data_read = ppc4xx_trng_data_read;
+ rng->priv = (unsigned long) dev;
+ core_dev->trng = rng;
+ ppc4xx_trng_enable(dev, true);
+ out_le32(dev->trng_base + PPC4XX_TRNG_CTRL, PPC4XX_TRNG_CTRL_DALM);
+ err = devm_hwrng_register(core_dev->device, core_dev->trng);
+ if (err) {
+ ppc4xx_trng_enable(dev, false);
+ dev_err(core_dev->device, "failed to register hwrng (%d).\n",
+ err);
+ goto err_out;
+ }
+ return;
+
+err_out:
+ of_node_put(trng);
+ iounmap(dev->trng_base);
+ kfree(rng);
+ dev->trng_base = NULL;
+ core_dev->trng = NULL;
+}
+
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev)
+{
+ if (core_dev && core_dev->trng) {
+ struct crypto4xx_device *dev = core_dev->dev;
+
+ devm_hwrng_unregister(core_dev->device, core_dev->trng);
+ ppc4xx_trng_enable(dev, false);
+ iounmap(dev->trng_base);
+ kfree(core_dev->trng);
+ }
+}
+
+MODULE_ALIAS("ppc4xx_rng");
diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
new file mode 100644
index 000000000..931d22531
--- /dev/null
+++ b/drivers/crypto/amcc/crypto4xx_trng.h
@@ -0,0 +1,34 @@
+/**
+ * AMCC SoC PPC4xx Crypto Driver
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. James Hsiao <jhsiao@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file defines the security context
+ * associate format.
+ */
+
+#ifndef __CRYPTO4XX_TRNG_H__
+#define __CRYPTO4XX_TRNG_H__
+
+#ifdef CONFIG_HW_RANDOM_PPC4XX
+void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
+void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
+#else
+static inline void ppc4xx_trng_probe(
+ struct crypto4xx_device *dev __maybe_unused) { }
+static inline void ppc4xx_trng_remove(
+ struct crypto4xx_device *dev __maybe_unused) { }
+#endif
+
+#endif
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 44d30b45f..5ad5f3009 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -402,7 +402,7 @@ int caam_get_era(void)
ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
of_node_put(caam_node);
- return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
+ return ret ? -ENOTSUPP : prop;
}
EXPORT_SYMBOL(caam_get_era);
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 6e37845ab..2238f77aa 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -3,6 +3,8 @@ config CRYPTO_DEV_CCP_DD
depends on CRYPTO_DEV_CCP
default m
select HW_RANDOM
+ select DMA_ENGINE
+ select DMADEVICES
select CRYPTO_SHA1
select CRYPTO_SHA256
help
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index b750592cc..ee4d2741b 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -1,5 +1,9 @@
obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
-ccp-objs := ccp-dev.o ccp-ops.o ccp-dev-v3.o ccp-platform.o
+ccp-objs := ccp-dev.o \
+ ccp-ops.o \
+ ccp-dev-v3.o \
+ ccp-platform.o \
+ ccp-dmaengine.o
ccp-$(CONFIG_PCI) += ccp-pci.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 7d5eab491..d7a710347 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -406,6 +406,11 @@ static int ccp_init(struct ccp_device *ccp)
goto e_kthread;
}
+ /* Register the DMA engine support */
+ ret = ccp_dmaengine_register(ccp);
+ if (ret)
+ goto e_hwrng;
+
ccp_add_device(ccp);
/* Enable interrupts */
@@ -413,6 +418,9 @@ static int ccp_init(struct ccp_device *ccp)
return 0;
+e_hwrng:
+ hwrng_unregister(&ccp->hwrng);
+
e_kthread:
for (i = 0; i < ccp->cmd_q_count; i++)
if (ccp->cmd_q[i].kthread)
@@ -436,6 +444,9 @@ static void ccp_destroy(struct ccp_device *ccp)
/* Remove this device from the list of available units first */
ccp_del_device(ccp);
+ /* Unregister the DMA engine */
+ ccp_dmaengine_unregister(ccp);
+
/* Unregister the RNG */
hwrng_unregister(&ccp->hwrng);
@@ -515,7 +526,7 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static struct ccp_actions ccp3_actions = {
+static const struct ccp_actions ccp3_actions = {
.perform_aes = ccp_perform_aes,
.perform_xts_aes = ccp_perform_xts_aes,
.perform_sha = ccp_perform_sha,
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index 4dbc18727..87b9f2bfa 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -16,7 +16,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
-#include <linux/rwlock_types.h>
+#include <linux/spinlock_types.h>
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/delay.h>
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 7745d0be4..bd41ffcef 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -22,6 +22,9 @@
#include <linux/dmapool.h>
#include <linux/hw_random.h>
#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
#define MAX_CCP_NAME_LEN 16
#define MAX_DMAPOOL_NAME_LEN 32
@@ -159,7 +162,7 @@ struct ccp_actions {
/* Structure to hold CCP version-specific values */
struct ccp_vdata {
unsigned int version;
- struct ccp_actions *perform;
+ const struct ccp_actions *perform;
};
extern struct ccp_vdata ccpv3;
@@ -167,6 +170,39 @@ extern struct ccp_vdata ccpv3;
struct ccp_device;
struct ccp_cmd;
+struct ccp_dma_cmd {
+ struct list_head entry;
+
+ struct ccp_cmd ccp_cmd;
+};
+
+struct ccp_dma_desc {
+ struct list_head entry;
+
+ struct ccp_device *ccp;
+
+ struct list_head pending;
+ struct list_head active;
+
+ enum dma_status status;
+ struct dma_async_tx_descriptor tx_desc;
+ size_t len;
+};
+
+struct ccp_dma_chan {
+ struct ccp_device *ccp;
+
+ spinlock_t lock;
+ struct list_head pending;
+ struct list_head active;
+ struct list_head complete;
+
+ struct tasklet_struct cleanup_tasklet;
+
+ enum dma_status status;
+ struct dma_chan dma_chan;
+};
+
struct ccp_cmd_queue {
struct ccp_device *ccp;
@@ -261,6 +297,14 @@ struct ccp_device {
unsigned int hwrng_retries;
/*
+ * Support for the CCP DMA capabilities
+ */
+ struct dma_device dma_dev;
+ struct ccp_dma_chan *ccp_dma_chan;
+ struct kmem_cache *dma_cmd_cache;
+ struct kmem_cache *dma_desc_cache;
+
+ /*
* A counter used to generate job-ids for cmds submitted to the CCP
*/
atomic_t current_id ____cacheline_aligned;
@@ -418,4 +462,7 @@ int ccp_cmd_queue_thread(void *data);
int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
+int ccp_dmaengine_register(struct ccp_device *ccp);
+void ccp_dmaengine_unregister(struct ccp_device *ccp);
+
#endif
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
new file mode 100644
index 000000000..94f77b0f9
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -0,0 +1,727 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/dmaengine.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/ccp.h>
+
+#include "ccp-dev.h"
+#include "../../dma/dmaengine.h"
+
+#define CCP_DMA_WIDTH(_mask) \
+({ \
+ u64 mask = _mask + 1; \
+ (mask == 0) ? 64 : fls64(mask); \
+})
+
+static void ccp_free_cmd_resources(struct ccp_device *ccp,
+ struct list_head *list)
+{
+ struct ccp_dma_cmd *cmd, *ctmp;
+
+ list_for_each_entry_safe(cmd, ctmp, list, entry) {
+ list_del(&cmd->entry);
+ kmem_cache_free(ccp->dma_cmd_cache, cmd);
+ }
+}
+
+static void ccp_free_desc_resources(struct ccp_device *ccp,
+ struct list_head *list)
+{
+ struct ccp_dma_desc *desc, *dtmp;
+
+ list_for_each_entry_safe(desc, dtmp, list, entry) {
+ ccp_free_cmd_resources(ccp, &desc->active);
+ ccp_free_cmd_resources(ccp, &desc->pending);
+
+ list_del(&desc->entry);
+ kmem_cache_free(ccp->dma_desc_cache, desc);
+ }
+}
+
+static void ccp_free_chan_resources(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ unsigned long flags;
+
+ dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ ccp_free_desc_resources(chan->ccp, &chan->complete);
+ ccp_free_desc_resources(chan->ccp, &chan->active);
+ ccp_free_desc_resources(chan->ccp, &chan->pending);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
+ struct list_head *list)
+{
+ struct ccp_dma_desc *desc, *dtmp;
+
+ list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
+ if (!async_tx_test_ack(&desc->tx_desc))
+ continue;
+
+ dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
+
+ ccp_free_cmd_resources(ccp, &desc->active);
+ ccp_free_cmd_resources(ccp, &desc->pending);
+
+ list_del(&desc->entry);
+ kmem_cache_free(ccp->dma_desc_cache, desc);
+ }
+}
+
+static void ccp_do_cleanup(unsigned long data)
+{
+ struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
+ unsigned long flags;
+
+ dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
+ dma_chan_name(&chan->dma_chan));
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
+{
+ struct ccp_dma_cmd *cmd;
+ int ret;
+
+ cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
+ list_move(&cmd->entry, &desc->active);
+
+ dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
+ desc->tx_desc.cookie, cmd);
+
+ ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
+ if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
+ return 0;
+
+ dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
+ ret, desc->tx_desc.cookie, cmd);
+
+ return ret;
+}
+
+static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
+{
+ struct ccp_dma_cmd *cmd;
+
+ cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
+ entry);
+ if (!cmd)
+ return;
+
+ dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
+ __func__, desc->tx_desc.cookie, cmd);
+
+ list_del(&cmd->entry);
+ kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
+}
+
+static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
+ struct ccp_dma_desc *desc)
+{
+ /* Move current DMA descriptor to the complete list */
+ if (desc)
+ list_move(&desc->entry, &chan->complete);
+
+ /* Get the next DMA descriptor on the active list */
+ desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
+ entry);
+
+ return desc;
+}
+
+static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
+ struct ccp_dma_desc *desc)
+{
+ struct dma_async_tx_descriptor *tx_desc;
+ unsigned long flags;
+
+ /* Loop over descriptors until one is found with commands */
+ do {
+ if (desc) {
+ /* Remove the DMA command from the list and free it */
+ ccp_free_active_cmd(desc);
+
+ if (!list_empty(&desc->pending)) {
+ /* No errors, keep going */
+ if (desc->status != DMA_ERROR)
+ return desc;
+
+ /* Error, free remaining commands and move on */
+ ccp_free_cmd_resources(desc->ccp,
+ &desc->pending);
+ }
+
+ tx_desc = &desc->tx_desc;
+ } else {
+ tx_desc = NULL;
+ }
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (desc) {
+ if (desc->status != DMA_ERROR)
+ desc->status = DMA_COMPLETE;
+
+ dev_dbg(desc->ccp->dev,
+ "%s - tx %d complete, status=%u\n", __func__,
+ desc->tx_desc.cookie, desc->status);
+
+ dma_cookie_complete(tx_desc);
+ }
+
+ desc = __ccp_next_dma_desc(chan, desc);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (tx_desc) {
+ if (tx_desc->callback &&
+ (tx_desc->flags & DMA_PREP_INTERRUPT))
+ tx_desc->callback(tx_desc->callback_param);
+
+ dma_run_dependencies(tx_desc);
+ }
+ } while (desc);
+
+ return NULL;
+}
+
+static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
+{
+ struct ccp_dma_desc *desc;
+
+ if (list_empty(&chan->pending))
+ return NULL;
+
+ desc = list_empty(&chan->active)
+ ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
+ : NULL;
+
+ list_splice_tail_init(&chan->pending, &chan->active);
+
+ return desc;
+}
+
+static void ccp_cmd_callback(void *data, int err)
+{
+ struct ccp_dma_desc *desc = data;
+ struct ccp_dma_chan *chan;
+ int ret;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
+ dma_chan);
+
+ dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
+ __func__, desc->tx_desc.cookie, err);
+
+ if (err)
+ desc->status = DMA_ERROR;
+
+ while (true) {
+ /* Check for DMA descriptor completion */
+ desc = ccp_handle_active_desc(chan, desc);
+
+ /* Don't submit cmd if no descriptor or DMA is paused */
+ if (!desc || (chan->status == DMA_PAUSED))
+ break;
+
+ ret = ccp_issue_next_cmd(desc);
+ if (!ret)
+ break;
+
+ desc->status = DMA_ERROR;
+ }
+
+ tasklet_schedule(&chan->cleanup_tasklet);
+}
+
+static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
+{
+ struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
+ tx_desc);
+ struct ccp_dma_chan *chan;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ cookie = dma_cookie_assign(tx_desc);
+ list_add_tail(&desc->entry, &chan->pending);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
+ __func__, cookie);
+
+ return cookie;
+}
+
+static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
+{
+ struct ccp_dma_cmd *cmd;
+
+ cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
+ if (cmd)
+ memset(cmd, 0, sizeof(*cmd));
+
+ return cmd;
+}
+
+static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
+ unsigned long flags)
+{
+ struct ccp_dma_desc *desc;
+
+ desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ memset(desc, 0, sizeof(*desc));
+
+ dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
+ desc->tx_desc.flags = flags;
+ desc->tx_desc.tx_submit = ccp_tx_submit;
+ desc->ccp = chan->ccp;
+ INIT_LIST_HEAD(&desc->pending);
+ INIT_LIST_HEAD(&desc->active);
+ desc->status = DMA_IN_PROGRESS;
+
+ return desc;
+}
+
+static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
+ struct scatterlist *dst_sg,
+ unsigned int dst_nents,
+ struct scatterlist *src_sg,
+ unsigned int src_nents,
+ unsigned long flags)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_device *ccp = chan->ccp;
+ struct ccp_dma_desc *desc;
+ struct ccp_dma_cmd *cmd;
+ struct ccp_cmd *ccp_cmd;
+ struct ccp_passthru_nomap_engine *ccp_pt;
+ unsigned int src_offset, src_len;
+ unsigned int dst_offset, dst_len;
+ unsigned int len;
+ unsigned long sflags;
+ size_t total_len;
+
+ if (!dst_sg || !src_sg)
+ return NULL;
+
+ if (!dst_nents || !src_nents)
+ return NULL;
+
+ desc = ccp_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ total_len = 0;
+
+ src_len = sg_dma_len(src_sg);
+ src_offset = 0;
+
+ dst_len = sg_dma_len(dst_sg);
+ dst_offset = 0;
+
+ while (true) {
+ if (!src_len) {
+ src_nents--;
+ if (!src_nents)
+ break;
+
+ src_sg = sg_next(src_sg);
+ if (!src_sg)
+ break;
+
+ src_len = sg_dma_len(src_sg);
+ src_offset = 0;
+ continue;
+ }
+
+ if (!dst_len) {
+ dst_nents--;
+ if (!dst_nents)
+ break;
+
+ dst_sg = sg_next(dst_sg);
+ if (!dst_sg)
+ break;
+
+ dst_len = sg_dma_len(dst_sg);
+ dst_offset = 0;
+ continue;
+ }
+
+ len = min(dst_len, src_len);
+
+ cmd = ccp_alloc_dma_cmd(chan);
+ if (!cmd)
+ goto err;
+
+ ccp_cmd = &cmd->ccp_cmd;
+ ccp_pt = &ccp_cmd->u.passthru_nomap;
+ ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
+ ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
+ ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
+ ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
+ ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
+ ccp_pt->src_len = len;
+ ccp_pt->final = 1;
+ ccp_cmd->callback = ccp_cmd_callback;
+ ccp_cmd->data = desc;
+
+ list_add_tail(&cmd->entry, &desc->pending);
+
+ dev_dbg(ccp->dev,
+ "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
+ cmd, &ccp_pt->src_dma,
+ &ccp_pt->dst_dma, ccp_pt->src_len);
+
+ total_len += len;
+
+ src_len -= len;
+ src_offset += len;
+
+ dst_len -= len;
+ dst_offset += len;
+ }
+
+ desc->len = total_len;
+
+ if (list_empty(&desc->pending))
+ goto err;
+
+ dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
+
+ spin_lock_irqsave(&chan->lock, sflags);
+
+ list_add_tail(&desc->entry, &chan->pending);
+
+ spin_unlock_irqrestore(&chan->lock, sflags);
+
+ return desc;
+
+err:
+ ccp_free_cmd_resources(ccp, &desc->pending);
+ kmem_cache_free(ccp->dma_desc_cache, desc);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
+ struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
+ unsigned long flags)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+ struct scatterlist dst_sg, src_sg;
+
+ dev_dbg(chan->ccp->dev,
+ "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
+ __func__, &src, &dst, len, flags);
+
+ sg_init_table(&dst_sg, 1);
+ sg_dma_address(&dst_sg) = dst;
+ sg_dma_len(&dst_sg) = len;
+
+ sg_init_table(&src_sg, 1);
+ sg_dma_address(&src_sg) = src;
+ sg_dma_len(&src_sg) = len;
+
+ desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->tx_desc;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
+ struct dma_chan *dma_chan, struct scatterlist *dst_sg,
+ unsigned int dst_nents, struct scatterlist *src_sg,
+ unsigned int src_nents, unsigned long flags)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+
+ dev_dbg(chan->ccp->dev,
+ "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
+ __func__, src_sg, src_nents, dst_sg, dst_nents, flags);
+
+ desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
+ flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->tx_desc;
+}
+
+static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
+ struct dma_chan *dma_chan, unsigned long flags)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+
+ desc = ccp_alloc_dma_desc(chan, flags);
+ if (!desc)
+ return NULL;
+
+ return &desc->tx_desc;
+}
+
+static void ccp_issue_pending(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+ unsigned long flags;
+
+ dev_dbg(chan->ccp->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = __ccp_pending_to_active(chan);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* If there was nothing active, start processing */
+ if (desc)
+ ccp_cmd_callback(desc, 0);
+}
+
+static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+ enum dma_status ret;
+ unsigned long flags;
+
+ if (chan->status == DMA_PAUSED) {
+ ret = DMA_PAUSED;
+ goto out;
+ }
+
+ ret = dma_cookie_status(dma_chan, cookie, state);
+ if (ret == DMA_COMPLETE) {
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* Get status from complete chain, if still there */
+ list_for_each_entry(desc, &chan->complete, entry) {
+ if (desc->tx_desc.cookie != cookie)
+ continue;
+
+ ret = desc->status;
+ break;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+out:
+ dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
+
+ return ret;
+}
+
+static int ccp_pause(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+
+ chan->status = DMA_PAUSED;
+
+ /*TODO: Wait for active DMA to complete before returning? */
+
+ return 0;
+}
+
+static int ccp_resume(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ struct ccp_dma_desc *desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
+ entry);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ /* Indicate the channel is running again */
+ chan->status = DMA_IN_PROGRESS;
+
+ /* If there was something active, re-start */
+ if (desc)
+ ccp_cmd_callback(desc, 0);
+
+ return 0;
+}
+
+static int ccp_terminate_all(struct dma_chan *dma_chan)
+{
+ struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
+ dma_chan);
+ unsigned long flags;
+
+ dev_dbg(chan->ccp->dev, "%s\n", __func__);
+
+ /*TODO: Wait for active DMA to complete before continuing */
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /*TODO: Purge the complete list? */
+ ccp_free_desc_resources(chan->ccp, &chan->active);
+ ccp_free_desc_resources(chan->ccp, &chan->pending);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+int ccp_dmaengine_register(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_device *dma_dev = &ccp->dma_dev;
+ struct dma_chan *dma_chan;
+ char *dma_cmd_cache_name;
+ char *dma_desc_cache_name;
+ unsigned int i;
+ int ret;
+
+ ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
+ sizeof(*(ccp->ccp_dma_chan)),
+ GFP_KERNEL);
+ if (!ccp->ccp_dma_chan)
+ return -ENOMEM;
+
+ dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
+ "%s-dmaengine-cmd-cache",
+ ccp->name);
+ if (!dma_cmd_cache_name)
+ return -ENOMEM;
+
+ ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
+ sizeof(struct ccp_dma_cmd),
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ccp->dma_cmd_cache)
+ return -ENOMEM;
+
+ dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
+ "%s-dmaengine-desc-cache",
+ ccp->name);
+ if (!dma_cmd_cache_name)
+ return -ENOMEM;
+ ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
+ sizeof(struct ccp_dma_desc),
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!ccp->dma_desc_cache) {
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ dma_dev->dev = ccp->dev;
+ dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
+ dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
+ dma_dev->directions = DMA_MEM_TO_MEM;
+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SG, dma_dev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+
+ chan->ccp = ccp;
+
+ spin_lock_init(&chan->lock);
+ INIT_LIST_HEAD(&chan->pending);
+ INIT_LIST_HEAD(&chan->active);
+ INIT_LIST_HEAD(&chan->complete);
+
+ tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
+ (unsigned long)chan);
+
+ dma_chan->device = dma_dev;
+ dma_cookie_init(dma_chan);
+
+ list_add_tail(&dma_chan->device_node, &dma_dev->channels);
+ }
+
+ dma_dev->device_free_chan_resources = ccp_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
+ dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
+ dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
+ dma_dev->device_issue_pending = ccp_issue_pending;
+ dma_dev->device_tx_status = ccp_tx_status;
+ dma_dev->device_pause = ccp_pause;
+ dma_dev->device_resume = ccp_resume;
+ dma_dev->device_terminate_all = ccp_terminate_all;
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ kmem_cache_destroy(ccp->dma_desc_cache);
+
+err_cache:
+ kmem_cache_destroy(ccp->dma_cmd_cache);
+
+ return ret;
+}
+
+void ccp_dmaengine_unregister(struct ccp_device *ccp)
+{
+ struct dma_device *dma_dev = &ccp->dma_dev;
+
+ dma_async_device_unregister(dma_dev);
+
+ kmem_cache_destroy(ccp->dma_desc_cache);
+ kmem_cache_destroy(ccp->dma_cmd_cache);
+}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index eefdf595f..ffa289103 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1427,6 +1427,70 @@ e_mask:
return ret;
}
+static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+{
+ struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
+ struct ccp_dm_workarea mask;
+ struct ccp_op op;
+ int ret;
+
+ if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
+ return -EINVAL;
+
+ if (!pt->src_dma || !pt->dst_dma)
+ return -EINVAL;
+
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+ if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
+ return -EINVAL;
+ if (!pt->mask)
+ return -EINVAL;
+ }
+
+ BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
+
+ memset(&op, 0, sizeof(op));
+ op.cmd_q = cmd_q;
+ op.jobid = ccp_gen_jobid(cmd_q->ccp);
+
+ if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
+ /* Load the mask */
+ op.ksb_key = cmd_q->ksb_key;
+
+ mask.length = pt->mask_len;
+ mask.dma.address = pt->mask;
+ mask.dma.length = pt->mask_len;
+
+ ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ return ret;
+ }
+ }
+
+ /* Send data to the CCP Passthru engine */
+ op.eom = 1;
+ op.soc = 1;
+
+ op.src.type = CCP_MEMTYPE_SYSTEM;
+ op.src.u.dma.address = pt->src_dma;
+ op.src.u.dma.offset = 0;
+ op.src.u.dma.length = pt->src_len;
+
+ op.dst.type = CCP_MEMTYPE_SYSTEM;
+ op.dst.u.dma.address = pt->dst_dma;
+ op.dst.u.dma.offset = 0;
+ op.dst.u.dma.length = pt->src_len;
+
+ ret = cmd_q->ccp->vdata->perform->perform_passthru(&op);
+ if (ret)
+ cmd->engine_error = cmd_q->cmd_error;
+
+ return ret;
+}
+
static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
{
struct ccp_ecc_engine *ecc = &cmd->u.ecc;
@@ -1762,7 +1826,10 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_run_rsa_cmd(cmd_q, cmd);
break;
case CCP_ENGINE_PASSTHRU:
- ret = ccp_run_passthru_cmd(cmd_q, cmd);
+ if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP)
+ ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd);
+ else
+ ret = ccp_run_passthru_cmd(cmd_q, cmd);
break;
case CCP_ENGINE_ECC:
ret = ccp_run_ecc_cmd(cmd_q, cmd);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 80239ae69..e8ef9fd24 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -475,18 +475,18 @@ static int mv_cesa_probe(struct platform_device *pdev)
engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
if (dram && cesa->caps->has_tdma)
- mv_cesa_conf_mbus_windows(&cesa->engines[i], dram);
+ mv_cesa_conf_mbus_windows(engine, dram);
- writel(0, cesa->engines[i].regs + CESA_SA_INT_STATUS);
+ writel(0, engine->regs + CESA_SA_INT_STATUS);
writel(CESA_SA_CFG_STOP_DIG_ERR,
- cesa->engines[i].regs + CESA_SA_CFG);
+ engine->regs + CESA_SA_CFG);
writel(engine->sram_dma & CESA_SA_SRAM_MSK,
- cesa->engines[i].regs + CESA_SA_DESC_P0);
+ engine->regs + CESA_SA_DESC_P0);
ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
IRQF_ONESHOT,
dev_name(&pdev->dev),
- &cesa->engines[i]);
+ engine);
if (ret)
goto err_cleanup;
}
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index 7ca2e0f9d..7a5058da9 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -768,8 +768,7 @@ static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
*len = creq->len;
memcpy(hash, creq->state, digsize);
memset(cache, 0, blocksize);
- if (creq->cache)
- memcpy(cache, creq->cache, creq->cache_ptr);
+ memcpy(cache, creq->cache, creq->cache_ptr);
return 0;
}
diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
index 764279812..0ad8f1ecf 100644
--- a/drivers/crypto/marvell/tdma.c
+++ b/drivers/crypto/marvell/tdma.c
@@ -99,12 +99,11 @@ mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags)
struct mv_cesa_tdma_desc *new_tdma = NULL;
dma_addr_t dma_handle;
- new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags,
- &dma_handle);
+ new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags,
+ &dma_handle);
if (!new_tdma)
return ERR_PTR(-ENOMEM);
- memset(new_tdma, 0, sizeof(*new_tdma));
new_tdma->cur_dma = dma_handle;
if (chain->last) {
chain->last->next_dma = cpu_to_le32(dma_handle);
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
new file mode 100644
index 000000000..ff383ef83
--- /dev/null
+++ b/drivers/crypto/mxc-scc.c
@@ -0,0 +1,765 @@
+/*
+ * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
+ *
+ * The driver is based on information gathered from
+ * drivers/mxc/security/mxc_scc.c which can be found in
+ * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+/* Secure Memory (SCM) registers */
+#define SCC_SCM_RED_START 0x0000
+#define SCC_SCM_BLACK_START 0x0004
+#define SCC_SCM_LENGTH 0x0008
+#define SCC_SCM_CTRL 0x000C
+#define SCC_SCM_STATUS 0x0010
+#define SCC_SCM_ERROR_STATUS 0x0014
+#define SCC_SCM_INTR_CTRL 0x0018
+#define SCC_SCM_CFG 0x001C
+#define SCC_SCM_INIT_VECTOR_0 0x0020
+#define SCC_SCM_INIT_VECTOR_1 0x0024
+#define SCC_SCM_RED_MEMORY 0x0400
+#define SCC_SCM_BLACK_MEMORY 0x0800
+
+/* Security Monitor (SMN) Registers */
+#define SCC_SMN_STATUS 0x1000
+#define SCC_SMN_COMMAND 0x1004
+#define SCC_SMN_SEQ_START 0x1008
+#define SCC_SMN_SEQ_END 0x100C
+#define SCC_SMN_SEQ_CHECK 0x1010
+#define SCC_SMN_BIT_COUNT 0x1014
+#define SCC_SMN_BITBANK_INC_SIZE 0x1018
+#define SCC_SMN_BITBANK_DECREMENT 0x101C
+#define SCC_SMN_COMPARE_SIZE 0x1020
+#define SCC_SMN_PLAINTEXT_CHECK 0x1024
+#define SCC_SMN_CIPHERTEXT_CHECK 0x1028
+#define SCC_SMN_TIMER_IV 0x102C
+#define SCC_SMN_TIMER_CONTROL 0x1030
+#define SCC_SMN_DEBUG_DETECT_STAT 0x1034
+#define SCC_SMN_TIMER 0x1038
+
+#define SCC_SCM_CTRL_START_CIPHER BIT(2)
+#define SCC_SCM_CTRL_CBC_MODE BIT(1)
+#define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
+
+#define SCC_SCM_STATUS_LEN_ERR BIT(12)
+#define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
+#define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
+#define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
+#define SCC_SCM_STATUS_INTR_STATUS BIT(8)
+#define SCC_SCM_STATUS_SEC_KEY BIT(7)
+#define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
+#define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
+#define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
+#define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
+#define SCC_SCM_STATUS_CIPHERING BIT(2)
+#define SCC_SCM_STATUS_ZEROIZING BIT(1)
+#define SCC_SCM_STATUS_BUSY BIT(0)
+
+#define SCC_SMN_STATUS_STATE_MASK 0x0000001F
+#define SCC_SMN_STATE_START 0x0
+/* The SMN is zeroizing its RAM during reset */
+#define SCC_SMN_STATE_ZEROIZE_RAM 0x5
+/* SMN has passed internal checks */
+#define SCC_SMN_STATE_HEALTH_CHECK 0x6
+/* Fatal Security Violation. SMN is locked, SCM is inoperative. */
+#define SCC_SMN_STATE_FAIL 0x9
+/* SCC is in secure state. SCM is using secret key. */
+#define SCC_SMN_STATE_SECURE 0xA
+/* SCC is not secure. SCM is using default key. */
+#define SCC_SMN_STATE_NON_SECURE 0xC
+
+#define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
+#define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
+#define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
+
+/* Size, in blocks, of Red memory. */
+#define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
+#define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
+/* Size, in blocks, of Black memory. */
+#define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
+#define SCC_SCM_CFG_RED_SIZE_SHIFT 7
+/* Number of bytes per block. */
+#define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
+
+#define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
+#define SCC_SMN_COMMAND_CLR_INTR BIT(3)
+#define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
+#define SCC_SMN_COMMAND_EN_INTR BIT(1)
+#define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
+
+#define SCC_KEY_SLOTS 20
+#define SCC_MAX_KEY_SIZE 32
+#define SCC_KEY_SLOT_SIZE 32
+
+#define SCC_CRC_CCITT_START 0xFFFF
+
+/*
+ * Offset into each RAM of the base of the area which is not
+ * used for Stored Keys.
+ */
+#define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
+
+/* Fixed padding for appending to plaintext to fill out a block */
+static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
+
+enum mxc_scc_state {
+ SCC_STATE_OK,
+ SCC_STATE_UNIMPLEMENTED,
+ SCC_STATE_FAILED
+};
+
+struct mxc_scc {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk;
+ bool hw_busy;
+ spinlock_t lock;
+ struct crypto_queue queue;
+ struct crypto_async_request *req;
+ int block_size_bytes;
+ int black_ram_size_blocks;
+ int memory_size_bytes;
+ int bytes_remaining;
+
+ void __iomem *red_memory;
+ void __iomem *black_memory;
+};
+
+struct mxc_scc_ctx {
+ struct mxc_scc *scc;
+ struct scatterlist *sg_src;
+ size_t src_nents;
+ struct scatterlist *sg_dst;
+ size_t dst_nents;
+ unsigned int offset;
+ unsigned int size;
+ unsigned int ctrl;
+};
+
+struct mxc_scc_crypto_tmpl {
+ struct mxc_scc *scc;
+ struct crypto_alg alg;
+};
+
+static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
+ struct crypto_async_request *req)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mxc_scc *scc = ctx->scc;
+ size_t len;
+ void __iomem *from;
+
+ if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
+ from = scc->red_memory;
+ else
+ from = scc->black_memory;
+
+ dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
+ ctx->dst_nents * 8);
+ len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
+ from, ctx->size, ctx->offset);
+ if (!len) {
+ dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
+ return -EINVAL;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "red memory@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->red_memory, ctx->size, 1);
+ print_hex_dump(KERN_ERR,
+ "black memory@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->black_memory, ctx->size, 1);
+#endif
+
+ ctx->offset += len;
+
+ if (ctx->offset < ablkreq->nbytes)
+ return -EINPROGRESS;
+
+ return 0;
+}
+
+static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
+ struct mxc_scc_ctx *ctx)
+{
+ struct mxc_scc *scc = ctx->scc;
+ int nents;
+
+ nents = sg_nents_for_len(req->src, req->nbytes);
+ if (nents < 0) {
+ dev_err(scc->dev, "Invalid number of src SC");
+ return nents;
+ }
+ ctx->src_nents = nents;
+
+ nents = sg_nents_for_len(req->dst, req->nbytes);
+ if (nents < 0) {
+ dev_err(scc->dev, "Invalid number of dst SC");
+ return nents;
+ }
+ ctx->dst_nents = nents;
+
+ ctx->size = 0;
+ ctx->offset = 0;
+
+ return 0;
+}
+
+static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
+ struct mxc_scc_ctx *ctx,
+ int result)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mxc_scc *scc = ctx->scc;
+
+ scc->req = NULL;
+ scc->bytes_remaining = scc->memory_size_bytes;
+
+ if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
+ memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
+ scc->block_size_bytes);
+
+ req->complete(req, result);
+ scc->hw_busy = false;
+
+ return 0;
+}
+
+static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
+ struct ablkcipher_request *req)
+{
+ u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
+ size_t len = min_t(size_t, req->nbytes - ctx->offset,
+ ctx->scc->bytes_remaining);
+ unsigned int padding_byte_count = 0;
+ struct mxc_scc *scc = ctx->scc;
+ void __iomem *to;
+
+ if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
+ to = scc->black_memory;
+ else
+ to = scc->red_memory;
+
+ if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
+ memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
+ scc->block_size_bytes);
+
+ len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
+ to, len, ctx->offset);
+ if (!len) {
+ dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
+ return -EINVAL;
+ }
+
+ ctx->size = len;
+
+#ifdef DEBUG
+ dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
+ print_hex_dump(KERN_ERR,
+ "init vector0@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
+ 1);
+ print_hex_dump(KERN_ERR,
+ "red memory@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->red_memory, ctx->size, 1);
+ print_hex_dump(KERN_ERR,
+ "black memory@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ scc->black_memory, ctx->size, 1);
+#endif
+
+ scc->bytes_remaining -= len;
+
+ padding_byte_count = len % scc->block_size_bytes;
+
+ if (padding_byte_count) {
+ memcpy(padding_buffer, scc_block_padding, padding_byte_count);
+ memcpy(to + len, padding_buffer, padding_byte_count);
+ ctx->size += padding_byte_count;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "data to encrypt@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ to, ctx->size, 1);
+#endif
+
+ return 0;
+}
+
+static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
+ struct crypto_async_request *req)
+{
+ struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
+ struct mxc_scc *scc = ctx->scc;
+ int err;
+
+ dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
+ ablkreq->nbytes, ablkreq->src, ablkreq->dst);
+
+ writel(0, scc->base + SCC_SCM_ERROR_STATUS);
+
+ err = mxc_scc_put_data(ctx, ablkreq);
+ if (err) {
+ mxc_scc_ablkcipher_req_complete(req, ctx, err);
+ return;
+ }
+
+ dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
+ (void *)readl(scc->base + SCC_SCM_RED_START),
+ (void *)readl(scc->base + SCC_SCM_BLACK_START));
+
+ /* clear interrupt control registers */
+ writel(SCC_SCM_INTR_CTRL_CLR_INTR,
+ scc->base + SCC_SCM_INTR_CTRL);
+
+ writel((ctx->size / ctx->scc->block_size_bytes) - 1,
+ scc->base + SCC_SCM_LENGTH);
+
+ dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
+ ctx->size / ctx->scc->block_size_bytes,
+ (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
+ scc->red_memory);
+
+ writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
+}
+
+static irqreturn_t mxc_scc_int(int irq, void *priv)
+{
+ struct crypto_async_request *req;
+ struct mxc_scc_ctx *ctx;
+ struct mxc_scc *scc = priv;
+ int status;
+ int ret;
+
+ status = readl(scc->base + SCC_SCM_STATUS);
+
+ /* clear interrupt control registers */
+ writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
+
+ if (status & SCC_SCM_STATUS_BUSY)
+ return IRQ_NONE;
+
+ req = scc->req;
+ if (req) {
+ ctx = crypto_tfm_ctx(req->tfm);
+ ret = mxc_scc_get_data(ctx, req);
+ if (ret != -EINPROGRESS)
+ mxc_scc_ablkcipher_req_complete(req, ctx, ret);
+ else
+ mxc_scc_ablkcipher_next(ctx, req);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mxc_scc_cra_init(struct crypto_tfm *tfm)
+{
+ struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct mxc_scc_crypto_tmpl *algt;
+
+ algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
+
+ ctx->scc = algt->scc;
+ return 0;
+}
+
+static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
+{
+ struct crypto_async_request *req, *backlog;
+
+ if (ctx->scc->hw_busy)
+ return;
+
+ spin_lock_bh(&ctx->scc->lock);
+ backlog = crypto_get_backlog(&ctx->scc->queue);
+ req = crypto_dequeue_request(&ctx->scc->queue);
+ ctx->scc->req = req;
+ ctx->scc->hw_busy = true;
+ spin_unlock_bh(&ctx->scc->lock);
+
+ if (!req)
+ return;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ mxc_scc_ablkcipher_next(ctx, req);
+}
+
+static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
+ struct crypto_async_request *req)
+{
+ int ret;
+
+ spin_lock_bh(&ctx->scc->lock);
+ ret = crypto_enqueue_request(&ctx->scc->queue, req);
+ spin_unlock_bh(&ctx->scc->lock);
+
+ if (ret != -EINPROGRESS)
+ return ret;
+
+ mxc_scc_dequeue_req_unlocked(ctx);
+
+ return -EINPROGRESS;
+}
+
+static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
+ struct ablkcipher_request *req)
+{
+ int err;
+
+ err = mxc_scc_ablkcipher_req_init(req, ctx);
+ if (err)
+ return err;
+
+ return mxc_scc_queue_req(ctx, &req->base);
+}
+
+static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+
+ return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+ ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
+
+ return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+ ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
+
+ return mxc_scc_des3_op(ctx, req);
+}
+
+static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
+ struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+
+ ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
+ ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
+ ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
+
+ return mxc_scc_des3_op(ctx, req);
+}
+
+static void mxc_scc_hw_init(struct mxc_scc *scc)
+{
+ int offset;
+
+ offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
+
+ /* Fill the RED_START register */
+ writel(offset, scc->base + SCC_SCM_RED_START);
+
+ /* Fill the BLACK_START register */
+ writel(offset, scc->base + SCC_SCM_BLACK_START);
+
+ scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
+ SCC_NON_RESERVED_OFFSET;
+
+ scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
+ SCC_NON_RESERVED_OFFSET;
+
+ scc->bytes_remaining = scc->memory_size_bytes;
+}
+
+static int mxc_scc_get_config(struct mxc_scc *scc)
+{
+ int config;
+
+ config = readl(scc->base + SCC_SCM_CFG);
+
+ scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
+
+ scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
+
+ scc->memory_size_bytes = (scc->block_size_bytes *
+ scc->black_ram_size_blocks) -
+ SCC_NON_RESERVED_OFFSET;
+
+ return 0;
+}
+
+static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
+{
+ enum mxc_scc_state state;
+ int status;
+
+ status = readl(scc->base + SCC_SMN_STATUS) &
+ SCC_SMN_STATUS_STATE_MASK;
+
+ /* If in Health Check, try to bringup to secure state */
+ if (status & SCC_SMN_STATE_HEALTH_CHECK) {
+ /*
+ * Write a simple algorithm to the Algorithm Sequence
+ * Checker (ASC)
+ */
+ writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
+ writel(0x5555, scc->base + SCC_SMN_SEQ_END);
+ writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
+
+ status = readl(scc->base + SCC_SMN_STATUS) &
+ SCC_SMN_STATUS_STATE_MASK;
+ }
+
+ switch (status) {
+ case SCC_SMN_STATE_NON_SECURE:
+ case SCC_SMN_STATE_SECURE:
+ state = SCC_STATE_OK;
+ break;
+ case SCC_SMN_STATE_FAIL:
+ state = SCC_STATE_FAILED;
+ break;
+ default:
+ state = SCC_STATE_UNIMPLEMENTED;
+ break;
+ }
+
+ return state;
+}
+
+static struct mxc_scc_crypto_tmpl scc_ecb_des = {
+ .alg = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ecb-des3-scc",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mxc_scc_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mxc_scc_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .encrypt = mxc_scc_ecb_des_encrypt,
+ .decrypt = mxc_scc_ecb_des_decrypt,
+ }
+ }
+};
+
+static struct mxc_scc_crypto_tmpl scc_cbc_des = {
+ .alg = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-des3-scc",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct mxc_scc_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = mxc_scc_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .encrypt = mxc_scc_cbc_des_encrypt,
+ .decrypt = mxc_scc_cbc_des_decrypt,
+ }
+ }
+};
+
+static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
+ &scc_ecb_des,
+ &scc_cbc_des,
+};
+
+static int mxc_scc_crypto_register(struct mxc_scc *scc)
+{
+ int i;
+ int err = 0;
+
+ for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
+ scc_crypto_algs[i]->scc = scc;
+ err = crypto_register_alg(&scc_crypto_algs[i]->alg);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ while (--i >= 0)
+ crypto_unregister_alg(&scc_crypto_algs[i]->alg);
+
+ return err;
+}
+
+static void mxc_scc_crypto_unregister(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
+ crypto_unregister_alg(&scc_crypto_algs[i]->alg);
+}
+
+static int mxc_scc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct mxc_scc *scc;
+ enum mxc_scc_state state;
+ int irq;
+ int ret;
+ int i;
+
+ scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
+ if (!scc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ scc->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scc->base))
+ return PTR_ERR(scc->base);
+
+ scc->clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(scc->clk)) {
+ dev_err(dev, "Could not get ipg clock\n");
+ return PTR_ERR(scc->clk);
+ }
+
+ clk_prepare_enable(scc->clk);
+
+ /* clear error status register */
+ writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
+
+ /* clear interrupt control registers */
+ writel(SCC_SCM_INTR_CTRL_CLR_INTR |
+ SCC_SCM_INTR_CTRL_MASK_INTR,
+ scc->base + SCC_SCM_INTR_CTRL);
+
+ writel(SCC_SMN_COMMAND_CLR_INTR |
+ SCC_SMN_COMMAND_EN_INTR,
+ scc->base + SCC_SMN_COMMAND);
+
+ scc->dev = dev;
+ platform_set_drvdata(pdev, scc);
+
+ ret = mxc_scc_get_config(scc);
+ if (ret)
+ goto err_out;
+
+ state = mxc_scc_get_state(scc);
+
+ if (state != SCC_STATE_OK) {
+ dev_err(dev, "SCC in unusable state %d\n", state);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ mxc_scc_hw_init(scc);
+
+ spin_lock_init(&scc->lock);
+ /* FIXME: calculate queue from RAM slots */
+ crypto_init_queue(&scc->queue, 50);
+
+ for (i = 0; i < 2; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ dev_err(dev, "failed to get irq resource\n");
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
+ IRQF_ONESHOT, dev_name(dev), scc);
+ if (ret)
+ goto err_out;
+ }
+
+ ret = mxc_scc_crypto_register(scc);
+ if (ret) {
+ dev_err(dev, "could not register algorithms");
+ goto err_out;
+ }
+
+ dev_info(dev, "registered successfully.\n");
+
+ return 0;
+
+err_out:
+ clk_disable_unprepare(scc->clk);
+
+ return ret;
+}
+
+static int mxc_scc_remove(struct platform_device *pdev)
+{
+ struct mxc_scc *scc = platform_get_drvdata(pdev);
+
+ mxc_scc_crypto_unregister();
+
+ clk_disable_unprepare(scc->clk);
+
+ return 0;
+}
+
+static const struct of_device_id mxc_scc_dt_ids[] = {
+ { .compatible = "fsl,imx25-scc", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
+
+static struct platform_driver mxc_scc_driver = {
+ .probe = mxc_scc_probe,
+ .remove = mxc_scc_remove,
+ .driver = {
+ .name = "mxc-scc",
+ .of_match_table = mxc_scc_dt_ids,
+ },
+};
+
+module_platform_driver(mxc_scc_driver);
+MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index b85a7a7db..c5aac25a5 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1598,7 +1598,7 @@ static void *new_queue(unsigned long q_type)
static void free_queue(void *p, unsigned long q_type)
{
- return kmem_cache_free(queue_cache[q_type - 1], p);
+ kmem_cache_free(queue_cache[q_type - 1], p);
}
static int queue_cache_init(void)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index d420ec751..ce174d3b8 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -26,7 +26,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -176,9 +175,7 @@ struct omap_aes_dev {
struct scatter_walk in_walk;
struct scatter_walk out_walk;
- int dma_in;
struct dma_chan *dma_lch_in;
- int dma_out;
struct dma_chan *dma_lch_out;
int in_sg_len;
int out_sg_len;
@@ -351,30 +348,21 @@ static void omap_aes_dma_out_callback(void *data)
static int omap_aes_dma_init(struct omap_aes_dev *dd)
{
- int err = -ENOMEM;
- dma_cap_mask_t mask;
+ int err;
dd->dma_lch_out = NULL;
dd->dma_lch_in = NULL;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- dd->dma_lch_in = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn,
- &dd->dma_in,
- dd->dev, "rx");
- if (!dd->dma_lch_in) {
+ dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_in)) {
dev_err(dd->dev, "Unable to request in DMA channel\n");
- goto err_dma_in;
+ return PTR_ERR(dd->dma_lch_in);
}
- dd->dma_lch_out = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn,
- &dd->dma_out,
- dd->dev, "tx");
- if (!dd->dma_lch_out) {
+ dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_out)) {
dev_err(dd->dev, "Unable to request out DMA channel\n");
+ err = PTR_ERR(dd->dma_lch_out);
goto err_dma_out;
}
@@ -382,14 +370,15 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd)
err_dma_out:
dma_release_channel(dd->dma_lch_in);
-err_dma_in:
- if (err)
- pr_err("error: %d\n", err);
+
return err;
}
static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
{
+ if (dd->pio_only)
+ return;
+
dma_release_channel(dd->dma_lch_out);
dma_release_channel(dd->dma_lch_in);
}
@@ -1080,9 +1069,6 @@ static int omap_aes_get_res_of(struct omap_aes_dev *dd,
goto err;
}
- dd->dma_out = -1; /* Dummy value that's unused */
- dd->dma_in = -1; /* Dummy value that's unused */
-
dd->pdata = match->data;
err:
@@ -1116,24 +1102,6 @@ static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
}
memcpy(res, r, sizeof(*res));
- /* Get the DMA out channel */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- dev_err(dev, "no DMA out resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma_out = r->start;
-
- /* Get the DMA in channel */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!r) {
- dev_err(dev, "no DMA in resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma_in = r->start;
-
/* Only OMAP2/3 can be non-DT */
dd->pdata = &omap_aes_pdata_omap2;
@@ -1191,7 +1159,9 @@ static int omap_aes_probe(struct platform_device *pdev)
tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
err = omap_aes_dma_init(dd);
- if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
+ if (err == -EPROBE_DEFER) {
+ goto err_irq;
+ } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
dd->pio_only = 1;
irq = platform_get_irq(pdev, 0);
@@ -1248,8 +1218,8 @@ err_algs:
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
- if (!dd->pio_only)
- omap_aes_dma_cleanup(dd);
+
+ omap_aes_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
pm_runtime_disable(dev);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index dd7b93f2f..3eedb0311 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -29,7 +29,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -39,6 +38,7 @@
#include <linux/interrupt.h>
#include <crypto/scatterwalk.h>
#include <crypto/des.h>
+#include <crypto/algapi.h>
#define DST_MAXBURST 2
@@ -132,14 +132,10 @@ struct omap_des_dev {
unsigned long flags;
int err;
- /* spinlock used for queues */
- spinlock_t lock;
- struct crypto_queue queue;
-
struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
struct ablkcipher_request *req;
+ struct crypto_engine *engine;
/*
* total is used by PIO mode for book keeping so introduce
* variable total_save as need it to calc page_order
@@ -158,9 +154,7 @@ struct omap_des_dev {
struct scatter_walk in_walk;
struct scatter_walk out_walk;
- int dma_in;
struct dma_chan *dma_lch_in;
- int dma_out;
struct dma_chan *dma_lch_out;
int in_sg_len;
int out_sg_len;
@@ -340,30 +334,21 @@ static void omap_des_dma_out_callback(void *data)
static int omap_des_dma_init(struct omap_des_dev *dd)
{
- int err = -ENOMEM;
- dma_cap_mask_t mask;
+ int err;
dd->dma_lch_out = NULL;
dd->dma_lch_in = NULL;
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- dd->dma_lch_in = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn,
- &dd->dma_in,
- dd->dev, "rx");
- if (!dd->dma_lch_in) {
+ dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
+ if (IS_ERR(dd->dma_lch_in)) {
dev_err(dd->dev, "Unable to request in DMA channel\n");
- goto err_dma_in;
+ return PTR_ERR(dd->dma_lch_in);
}
- dd->dma_lch_out = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn,
- &dd->dma_out,
- dd->dev, "tx");
- if (!dd->dma_lch_out) {
+ dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
+ if (IS_ERR(dd->dma_lch_out)) {
dev_err(dd->dev, "Unable to request out DMA channel\n");
+ err = PTR_ERR(dd->dma_lch_out);
goto err_dma_out;
}
@@ -371,14 +356,15 @@ static int omap_des_dma_init(struct omap_des_dev *dd)
err_dma_out:
dma_release_channel(dd->dma_lch_in);
-err_dma_in:
- if (err)
- pr_err("error: %d\n", err);
+
return err;
}
static void omap_des_dma_cleanup(struct omap_des_dev *dd)
{
+ if (dd->pio_only)
+ return;
+
dma_release_channel(dd->dma_lch_out);
dma_release_channel(dd->dma_lch_in);
}
@@ -520,9 +506,7 @@ static void omap_des_finish_req(struct omap_des_dev *dd, int err)
pr_debug("err: %d\n", err);
pm_runtime_put(dd->dev);
- dd->flags &= ~FLAGS_BUSY;
-
- req->base.complete(&req->base, err);
+ crypto_finalize_request(dd->engine, req, err);
}
static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
@@ -585,34 +569,24 @@ static int omap_des_copy_sgs(struct omap_des_dev *dd)
}
static int omap_des_handle_queue(struct omap_des_dev *dd,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
- struct crypto_async_request *async_req, *backlog;
- struct omap_des_ctx *ctx;
- struct omap_des_reqctx *rctx;
- unsigned long flags;
- int err, ret = 0;
-
- spin_lock_irqsave(&dd->lock, flags);
if (req)
- ret = ablkcipher_enqueue_request(&dd->queue, req);
- if (dd->flags & FLAGS_BUSY) {
- spin_unlock_irqrestore(&dd->lock, flags);
- return ret;
- }
- backlog = crypto_get_backlog(&dd->queue);
- async_req = crypto_dequeue_request(&dd->queue);
- if (async_req)
- dd->flags |= FLAGS_BUSY;
- spin_unlock_irqrestore(&dd->lock, flags);
+ return crypto_transfer_request_to_engine(dd->engine, req);
- if (!async_req)
- return ret;
+ return 0;
+}
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+static int omap_des_prepare_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_des_dev *dd = omap_des_find_dev(ctx);
+ struct omap_des_reqctx *rctx;
- req = ablkcipher_request_cast(async_req);
+ if (!dd)
+ return -ENODEV;
/* assign new request to device */
dd->req = req;
@@ -642,16 +616,20 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
dd->ctx = ctx;
ctx->dd = dd;
- err = omap_des_write_ctrl(dd);
- if (!err)
- err = omap_des_crypt_dma_start(dd);
- if (err) {
- /* des_task will not finish it, so do it here */
- omap_des_finish_req(dd, err);
- tasklet_schedule(&dd->queue_task);
- }
+ return omap_des_write_ctrl(dd);
+}
+
+static int omap_des_crypt_req(struct crypto_engine *engine,
+ struct ablkcipher_request *req)
+{
+ struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
+ crypto_ablkcipher_reqtfm(req));
+ struct omap_des_dev *dd = omap_des_find_dev(ctx);
+
+ if (!dd)
+ return -ENODEV;
- return ret; /* return ret, which is enqueue return value */
+ return omap_des_crypt_dma_start(dd);
}
static void omap_des_done_task(unsigned long data)
@@ -683,18 +661,10 @@ static void omap_des_done_task(unsigned long data)
}
omap_des_finish_req(dd, 0);
- omap_des_handle_queue(dd, NULL);
pr_debug("exit\n");
}
-static void omap_des_queue_task(unsigned long data)
-{
- struct omap_des_dev *dd = (struct omap_des_dev *)data;
-
- omap_des_handle_queue(dd, NULL);
-}
-
static int omap_des_crypt(struct ablkcipher_request *req, unsigned long mode)
{
struct omap_des_ctx *ctx = crypto_ablkcipher_ctx(
@@ -999,8 +969,6 @@ static int omap_des_get_of(struct omap_des_dev *dd,
return -EINVAL;
}
- dd->dma_out = -1; /* Dummy value that's unused */
- dd->dma_in = -1; /* Dummy value that's unused */
dd->pdata = match->data;
return 0;
@@ -1016,33 +984,10 @@ static int omap_des_get_of(struct omap_des_dev *dd,
static int omap_des_get_pdev(struct omap_des_dev *dd,
struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct resource *r;
- int err = 0;
-
- /* Get the DMA out channel */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- dev_err(dev, "no DMA out resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma_out = r->start;
-
- /* Get the DMA in channel */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!r) {
- dev_err(dev, "no DMA in resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma_in = r->start;
-
/* non-DT devices get pdata from pdev */
dd->pdata = pdev->dev.platform_data;
-err:
- return err;
+ return 0;
}
static int omap_des_probe(struct platform_device *pdev)
@@ -1062,9 +1007,6 @@ static int omap_des_probe(struct platform_device *pdev)
dd->dev = dev;
platform_set_drvdata(pdev, dd);
- spin_lock_init(&dd->lock);
- crypto_init_queue(&dd->queue, OMAP_DES_QUEUE_LENGTH);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "no MEM resource info\n");
@@ -1103,10 +1045,11 @@ static int omap_des_probe(struct platform_device *pdev)
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
- tasklet_init(&dd->queue_task, omap_des_queue_task, (unsigned long)dd);
err = omap_des_dma_init(dd);
- if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
+ if (err == -EPROBE_DEFER) {
+ goto err_irq;
+ } else if (err && DES_REG_IRQ_STATUS(dd) && DES_REG_IRQ_ENABLE(dd)) {
dd->pio_only = 1;
irq = platform_get_irq(pdev, 0);
@@ -1144,17 +1087,30 @@ static int omap_des_probe(struct platform_device *pdev)
}
}
+ /* Initialize des crypto engine */
+ dd->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dd->engine)
+ goto err_algs;
+
+ dd->engine->prepare_request = omap_des_prepare_req;
+ dd->engine->crypt_one_request = omap_des_crypt_req;
+ err = crypto_engine_start(dd->engine);
+ if (err)
+ goto err_engine;
+
return 0;
+
+err_engine:
+ crypto_engine_exit(dd->engine);
err_algs:
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
crypto_unregister_alg(
&dd->pdata->algs_info[i].algs_list[j]);
- if (!dd->pio_only)
- omap_des_dma_cleanup(dd);
+
+ omap_des_dma_cleanup(dd);
err_irq:
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
err_get:
pm_runtime_disable(dev);
err_res:
@@ -1182,7 +1138,6 @@ static int omap_des_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
dd = NULL;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 48adb2a09..63464e86f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -29,7 +29,6 @@
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -219,7 +218,6 @@ struct omap_sham_dev {
int irq;
spinlock_t lock;
int err;
- unsigned int dma;
struct dma_chan *dma_lch;
struct tasklet_struct done_task;
u8 polling_mode;
@@ -1842,7 +1840,6 @@ static int omap_sham_get_res_of(struct omap_sham_dev *dd,
goto err;
}
- dd->dma = -1; /* Dummy value that's unused */
dd->pdata = match->data;
err:
@@ -1884,15 +1881,6 @@ static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
goto err;
}
- /* Get the DMA */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r) {
- dev_err(dev, "no DMA resource info\n");
- err = -ENODEV;
- goto err;
- }
- dd->dma = r->start;
-
/* Only OMAP2/3 can be non-DT */
dd->pdata = &omap_sham_pdata_omap2;
@@ -1946,9 +1934,12 @@ static int omap_sham_probe(struct platform_device *pdev)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
- &dd->dma, dev, "rx");
- if (!dd->dma_lch) {
+ dd->dma_lch = dma_request_chan(dev, "rx");
+ if (IS_ERR(dd->dma_lch)) {
+ err = PTR_ERR(dd->dma_lch);
+ if (err == -EPROBE_DEFER)
+ goto data_err;
+
dd->polling_mode = 1;
dev_dbg(dev, "using polling mode instead of dma\n");
}
@@ -1995,7 +1986,7 @@ err_algs:
&dd->pdata->algs_info[i].algs_list[j]);
err_pm:
pm_runtime_disable(dev);
- if (dd->dma_lch)
+ if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
@@ -2021,7 +2012,7 @@ static int omap_sham_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
pm_runtime_disable(&pdev->dev);
- if (dd->dma_lch)
+ if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
return 0;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index e13bd08dd..640c3fc87 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -300,9 +300,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
index 1af321c2c..d2d0ae445 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
@@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Init event to PF\n");
- return -EFAULT;
- }
- return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Shutdown event to PF\n");
-}
-
void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c3xxxiov_class;
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 1ac4ae90e..949d77b79 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 512c56509..bc5cbc193 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -300,9 +300,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
index baf4b509c..38e4bc04f 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
@@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Init event to PF\n");
- return -EFAULT;
- }
- return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Shutdown event to PF\n");
-}
-
void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &c62xiov_class;
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index d2e4b928f..7540ce13b 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index 92561c87f..5fc3dbb9a 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -10,7 +10,6 @@ clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
intel_qat-objs := adf_cfg.o \
adf_isr.o \
- adf_vf_isr.o \
adf_ctl_drv.o \
adf_dev_mgr.o \
adf_init.o \
@@ -28,4 +27,5 @@ intel_qat-objs := adf_cfg.o \
qat_hal.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
-intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o \
+ adf_vf2pf_msg.o adf_vf_isr.o
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
index eb557f69e..ce7c4626c 100644
--- a/drivers/crypto/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -61,7 +61,7 @@
#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
#define ADF_ADMINMSG_LEN 32
-static const u8 const_tab[1024] = {
+static const u8 const_tab[1024] __aligned(1024) = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
index 135751113..7632ed0f2 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_strings.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
@@ -57,10 +57,8 @@
#define ADF_RING_DC_SIZE "NumConcurrentRequests"
#define ADF_RING_ASYM_TX "RingAsymTx"
#define ADF_RING_SYM_TX "RingSymTx"
-#define ADF_RING_RND_TX "RingNrbgTx"
#define ADF_RING_ASYM_RX "RingAsymRx"
#define ADF_RING_SYM_RX "RingSymRx"
-#define ADF_RING_RND_RX "RingNrbgRx"
#define ADF_RING_DC_TX "RingTx"
#define ADF_RING_DC_RX "RingRx"
#define ADF_ETRMGR_BANK "Bank"
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 976b01e58..75faa39bc 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -67,7 +67,7 @@
#define ADF_STATUS_AE_INITIALISED 4
#define ADF_STATUS_AE_UCODE_LOADED 5
#define ADF_STATUS_AE_STARTED 6
-#define ADF_STATUS_ORPHAN_TH_RUNNING 7
+#define ADF_STATUS_PF_RUNNING 7
#define ADF_STATUS_IRQ_ALLOCATED 8
enum adf_dev_reset_mode {
@@ -103,7 +103,7 @@ int adf_service_unregister(struct service_hndl *service);
int adf_dev_init(struct adf_accel_dev *accel_dev);
int adf_dev_start(struct adf_accel_dev *accel_dev);
-int adf_dev_stop(struct adf_accel_dev *accel_dev);
+void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
@@ -236,8 +236,13 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
uint32_t vf_mask);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
+int adf_init_vf_wq(void);
+void adf_exit_vf_wq(void);
#else
static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
{
@@ -256,6 +261,15 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
+static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
static inline int adf_init_pf_wq(void)
{
return 0;
@@ -264,5 +278,15 @@ static inline int adf_init_pf_wq(void)
static inline void adf_exit_pf_wq(void)
{
}
+
+static inline int adf_init_vf_wq(void)
+{
+ return 0;
+}
+
+static inline void adf_exit_vf_wq(void)
+{
+}
+
#endif
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 3c3f94829..abc7a7f64 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -270,26 +270,33 @@ static int adf_ctl_is_device_in_use(int id)
return 0;
}
-static int adf_ctl_stop_devices(uint32_t id)
+static void adf_ctl_stop_devices(uint32_t id)
{
struct adf_accel_dev *accel_dev;
- int ret = 0;
- list_for_each_entry_reverse(accel_dev, adf_devmgr_get_head(), list) {
+ list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
if (!adf_dev_started(accel_dev))
continue;
- if (adf_dev_stop(accel_dev)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to stop qat_dev%d\n", id);
- ret = -EFAULT;
- } else {
- adf_dev_shutdown(accel_dev);
- }
+ /* First stop all VFs */
+ if (!accel_dev->is_vf)
+ continue;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+ }
+ }
+
+ list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
+ if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+ if (!adf_dev_started(accel_dev))
+ continue;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
}
}
- return ret;
}
static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
@@ -318,9 +325,8 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
ctl_data->device_id);
- ret = adf_ctl_stop_devices(ctl_data->device_id);
- if (ret)
- pr_err("QAT: failed to stop device.\n");
+ adf_ctl_stop_devices(ctl_data->device_id);
+
out:
kfree(ctl_data);
return ret;
@@ -465,12 +471,17 @@ static int __init adf_register_ctl_device_driver(void)
if (adf_init_pf_wq())
goto err_pf_wq;
+ if (adf_init_vf_wq())
+ goto err_vf_wq;
+
if (qat_crypto_register())
goto err_crypto_register;
return 0;
err_crypto_register:
+ adf_exit_vf_wq();
+err_vf_wq:
adf_exit_pf_wq();
err_pf_wq:
adf_exit_aer();
@@ -485,6 +496,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
{
adf_chr_drv_destroy();
adf_exit_aer();
+ adf_exit_vf_wq();
adf_exit_pf_wq();
qat_crypto_unregister();
adf_clean_vf_map(false);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index ef5575e4a..888c6675e 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -236,9 +236,9 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
* is shuting down.
* To be used by QAT device specific drivers.
*
- * Return: 0 on success, error code otherwise.
+ * Return: void
*/
-int adf_dev_stop(struct adf_accel_dev *accel_dev)
+void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
@@ -246,9 +246,9 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
int ret;
if (!adf_dev_started(accel_dev) &&
- !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
- return 0;
- }
+ !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
+ return;
+
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -279,8 +279,6 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
else
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(adf_dev_stop);
@@ -329,6 +327,8 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, &service->init_status);
}
+ hw_data->disable_iov(accel_dev);
+
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
hw_data->free_irq(accel_dev);
clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
@@ -344,7 +344,6 @@ void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
if (hw_data->exit_admin_comms)
hw_data->exit_admin_comms(accel_dev);
- hw_data->disable_iov(accel_dev);
adf_cleanup_etr_data(accel_dev);
adf_dev_restore(accel_dev);
}
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index b81f79acc..06d49017a 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -302,7 +302,7 @@ static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
}
/**
- * adf_vf_isr_resource_free() - Free IRQ for acceleration device
+ * adf_isr_resource_free() - Free IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function frees interrupts for acceleration device.
@@ -317,7 +317,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
/**
- * adf_vf_isr_resource_alloc() - Allocate IRQ for acceleration device
+ * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
* @accel_dev: Pointer to acceleration device.
*
* Function allocates interrupts for acceleration device.
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
index 38a0415e7..4a526e2f1 100644
--- a/drivers/crypto/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -249,13 +249,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
return -EBUSY;
}
- if (adf_dev_stop(accel_dev)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to stop qat_dev%d\n",
- accel_dev->accel_id);
- return -EFAULT;
- }
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
new file mode 100644
index 000000000..cd5f37dff
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -0,0 +1,92 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+ Copyright(c) 2015 Intel Corporation.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Contact Information:
+ qat-linux@intel.com
+
+ BSD LICENSE
+ Copyright(c) 2015 Intel Corporation.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+/**
+ * adf_vf2pf_init() - send init msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends an init messge from the VF to a PF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (adf_iov_putmsg(accel_dev, msg, 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Init event to PF\n");
+ return -EFAULT;
+ }
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_init);
+
+/**
+ * adf_vf2pf_shutdown() - send shutdown msg to PF
+ * @accel_dev: Pointer to acceleration VF device.
+ *
+ * Function sends a shutdown messge from the VF to a PF
+ *
+ * Return: void
+ */
+void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+ u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+ (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+ if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
+ if (adf_iov_putmsg(accel_dev, msg, 0))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send Shutdown event to PF\n");
+}
+EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 09427b3d4..aa689cabe 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -51,6 +51,7 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
@@ -64,6 +65,13 @@
#define ADF_VINTSOU_BUN BIT(0)
#define ADF_VINTSOU_PF2VF BIT(1)
+static struct workqueue_struct *adf_vf_stop_wq;
+
+struct adf_vf_stop_data {
+ struct adf_accel_dev *accel_dev;
+ struct work_struct work;
+};
+
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -90,6 +98,20 @@ static void adf_disable_msi(struct adf_accel_dev *accel_dev)
pci_disable_msi(pdev);
}
+static void adf_dev_stop_async(struct work_struct *work)
+{
+ struct adf_vf_stop_data *stop_data =
+ container_of(work, struct adf_vf_stop_data, work);
+ struct adf_accel_dev *accel_dev = stop_data->accel_dev;
+
+ adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
+
+ /* Re-enable PF2VF interrupts */
+ adf_enable_pf2vf_interrupts(accel_dev);
+ kfree(stop_data);
+}
+
static void adf_pf2vf_bh_handler(void *data)
{
struct adf_accel_dev *accel_dev = data;
@@ -107,11 +129,29 @@ static void adf_pf2vf_bh_handler(void *data)
goto err;
switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
- case ADF_PF2VF_MSGTYPE_RESTARTING:
+ case ADF_PF2VF_MSGTYPE_RESTARTING: {
+ struct adf_vf_stop_data *stop_data;
+
dev_dbg(&GET_DEV(accel_dev),
"Restarting msg received from PF 0x%x\n", msg);
- adf_dev_stop(accel_dev);
- break;
+
+ clear_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
+ stop_data = kzalloc(sizeof(*stop_data), GFP_ATOMIC);
+ if (!stop_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "Couldn't schedule stop for vf_%d\n",
+ accel_dev->accel_id);
+ return;
+ }
+ stop_data->accel_dev = accel_dev;
+ INIT_WORK(&stop_data->work, adf_dev_stop_async);
+ queue_work(adf_vf_stop_wq, &stop_data->work);
+ /* To ack, clear the PF2VFINT bit */
+ msg &= ~BIT(0);
+ ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+ return;
+ }
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
dev_dbg(&GET_DEV(accel_dev),
"Version resp received from PF 0x%x\n", msg);
@@ -278,3 +318,18 @@ err_out:
return -EFAULT;
}
EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
+
+int __init adf_init_vf_wq(void)
+{
+ adf_vf_stop_wq = create_workqueue("adf_vf_stop_wq");
+
+ return !adf_vf_stop_wq ? -EFAULT : 0;
+}
+
+void adf_exit_vf_wq(void)
+{
+ if (adf_vf_stop_wq)
+ destroy_workqueue(adf_vf_stop_wq);
+
+ adf_vf_stop_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
index e5c0727d4..05f49d4f9 100644
--- a/drivers/crypto/qat/qat_common/qat_asym_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -593,7 +593,7 @@ int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
ret = -ENOMEM;
ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
- if (!ctx->n)
+ if (!ctx->d)
goto err;
memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
@@ -711,7 +711,7 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
}
qat_crypto_put_instance(ctx->inst);
ctx->n = NULL;
- ctx->d = NULL;
+ ctx->e = NULL;
ctx->d = NULL;
}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index bad6cf035..26ad09f56 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -302,9 +302,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_disable_aer(accel_dev);
adf_cleanup_accel(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
index dc04ab68d..a3b4dd809 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -109,29 +109,6 @@ static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
{
}
-static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0)) {
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Init event to PF\n");
- return -EFAULT;
- }
- return 0;
-}
-
-static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
-{
- u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
- (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
-
- if (adf_iov_putmsg(accel_dev, msg, 0))
- dev_err(&GET_DEV(accel_dev),
- "Failed to send Shutdown event to PF\n");
-}
-
void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcciov_class;
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index f8cc4bf0a..60df98632 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -238,6 +238,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_err_free_reg;
+ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
+
ret = adf_dev_init(accel_dev);
if (ret)
goto out_err_dev_shutdown;
@@ -270,9 +272,7 @@ static void adf_remove(struct pci_dev *pdev)
pr_err("QAT: Driver removal failed\n");
return;
}
- if (adf_dev_stop(accel_dev))
- dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
-
+ adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
adf_cleanup_accel(accel_dev);
adf_cleanup_pci_dev(accel_dev);
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 5f161a977..2b3a0cfe3 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -11,65 +11,64 @@
*
*/
-#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <crypto/algapi.h>
-#include <crypto/aes.h>
#include <crypto/ctr.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
#define _SBF(s, v) ((v) << (s))
-#define _BIT(b) _SBF(b, 1)
/* Feed control registers */
#define SSS_REG_FCINTSTAT 0x0000
-#define SSS_FCINTSTAT_BRDMAINT _BIT(3)
-#define SSS_FCINTSTAT_BTDMAINT _BIT(2)
-#define SSS_FCINTSTAT_HRDMAINT _BIT(1)
-#define SSS_FCINTSTAT_PKDMAINT _BIT(0)
+#define SSS_FCINTSTAT_BRDMAINT BIT(3)
+#define SSS_FCINTSTAT_BTDMAINT BIT(2)
+#define SSS_FCINTSTAT_HRDMAINT BIT(1)
+#define SSS_FCINTSTAT_PKDMAINT BIT(0)
#define SSS_REG_FCINTENSET 0x0004
-#define SSS_FCINTENSET_BRDMAINTENSET _BIT(3)
-#define SSS_FCINTENSET_BTDMAINTENSET _BIT(2)
-#define SSS_FCINTENSET_HRDMAINTENSET _BIT(1)
-#define SSS_FCINTENSET_PKDMAINTENSET _BIT(0)
+#define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
+#define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
+#define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
+#define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
#define SSS_REG_FCINTENCLR 0x0008
-#define SSS_FCINTENCLR_BRDMAINTENCLR _BIT(3)
-#define SSS_FCINTENCLR_BTDMAINTENCLR _BIT(2)
-#define SSS_FCINTENCLR_HRDMAINTENCLR _BIT(1)
-#define SSS_FCINTENCLR_PKDMAINTENCLR _BIT(0)
+#define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
+#define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
+#define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
+#define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
#define SSS_REG_FCINTPEND 0x000C
-#define SSS_FCINTPEND_BRDMAINTP _BIT(3)
-#define SSS_FCINTPEND_BTDMAINTP _BIT(2)
-#define SSS_FCINTPEND_HRDMAINTP _BIT(1)
-#define SSS_FCINTPEND_PKDMAINTP _BIT(0)
+#define SSS_FCINTPEND_BRDMAINTP BIT(3)
+#define SSS_FCINTPEND_BTDMAINTP BIT(2)
+#define SSS_FCINTPEND_HRDMAINTP BIT(1)
+#define SSS_FCINTPEND_PKDMAINTP BIT(0)
#define SSS_REG_FCFIFOSTAT 0x0010
-#define SSS_FCFIFOSTAT_BRFIFOFUL _BIT(7)
-#define SSS_FCFIFOSTAT_BRFIFOEMP _BIT(6)
-#define SSS_FCFIFOSTAT_BTFIFOFUL _BIT(5)
-#define SSS_FCFIFOSTAT_BTFIFOEMP _BIT(4)
-#define SSS_FCFIFOSTAT_HRFIFOFUL _BIT(3)
-#define SSS_FCFIFOSTAT_HRFIFOEMP _BIT(2)
-#define SSS_FCFIFOSTAT_PKFIFOFUL _BIT(1)
-#define SSS_FCFIFOSTAT_PKFIFOEMP _BIT(0)
+#define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
+#define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
+#define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
+#define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
+#define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
+#define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
+#define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
+#define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
#define SSS_REG_FCFIFOCTRL 0x0014
-#define SSS_FCFIFOCTRL_DESSEL _BIT(2)
+#define SSS_FCFIFOCTRL_DESSEL BIT(2)
#define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
#define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
#define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
@@ -77,52 +76,52 @@
#define SSS_REG_FCBRDMAS 0x0020
#define SSS_REG_FCBRDMAL 0x0024
#define SSS_REG_FCBRDMAC 0x0028
-#define SSS_FCBRDMAC_BYTESWAP _BIT(1)
-#define SSS_FCBRDMAC_FLUSH _BIT(0)
+#define SSS_FCBRDMAC_BYTESWAP BIT(1)
+#define SSS_FCBRDMAC_FLUSH BIT(0)
#define SSS_REG_FCBTDMAS 0x0030
#define SSS_REG_FCBTDMAL 0x0034
#define SSS_REG_FCBTDMAC 0x0038
-#define SSS_FCBTDMAC_BYTESWAP _BIT(1)
-#define SSS_FCBTDMAC_FLUSH _BIT(0)
+#define SSS_FCBTDMAC_BYTESWAP BIT(1)
+#define SSS_FCBTDMAC_FLUSH BIT(0)
#define SSS_REG_FCHRDMAS 0x0040
#define SSS_REG_FCHRDMAL 0x0044
#define SSS_REG_FCHRDMAC 0x0048
-#define SSS_FCHRDMAC_BYTESWAP _BIT(1)
-#define SSS_FCHRDMAC_FLUSH _BIT(0)
+#define SSS_FCHRDMAC_BYTESWAP BIT(1)
+#define SSS_FCHRDMAC_FLUSH BIT(0)
#define SSS_REG_FCPKDMAS 0x0050
#define SSS_REG_FCPKDMAL 0x0054
#define SSS_REG_FCPKDMAC 0x0058
-#define SSS_FCPKDMAC_BYTESWAP _BIT(3)
-#define SSS_FCPKDMAC_DESCEND _BIT(2)
-#define SSS_FCPKDMAC_TRANSMIT _BIT(1)
-#define SSS_FCPKDMAC_FLUSH _BIT(0)
+#define SSS_FCPKDMAC_BYTESWAP BIT(3)
+#define SSS_FCPKDMAC_DESCEND BIT(2)
+#define SSS_FCPKDMAC_TRANSMIT BIT(1)
+#define SSS_FCPKDMAC_FLUSH BIT(0)
#define SSS_REG_FCPKDMAO 0x005C
/* AES registers */
#define SSS_REG_AES_CONTROL 0x00
-#define SSS_AES_BYTESWAP_DI _BIT(11)
-#define SSS_AES_BYTESWAP_DO _BIT(10)
-#define SSS_AES_BYTESWAP_IV _BIT(9)
-#define SSS_AES_BYTESWAP_CNT _BIT(8)
-#define SSS_AES_BYTESWAP_KEY _BIT(7)
-#define SSS_AES_KEY_CHANGE_MODE _BIT(6)
+#define SSS_AES_BYTESWAP_DI BIT(11)
+#define SSS_AES_BYTESWAP_DO BIT(10)
+#define SSS_AES_BYTESWAP_IV BIT(9)
+#define SSS_AES_BYTESWAP_CNT BIT(8)
+#define SSS_AES_BYTESWAP_KEY BIT(7)
+#define SSS_AES_KEY_CHANGE_MODE BIT(6)
#define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
#define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
#define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
-#define SSS_AES_FIFO_MODE _BIT(3)
+#define SSS_AES_FIFO_MODE BIT(3)
#define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
#define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
#define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
-#define SSS_AES_MODE_DECRYPT _BIT(0)
+#define SSS_AES_MODE_DECRYPT BIT(0)
#define SSS_REG_AES_STATUS 0x04
-#define SSS_AES_BUSY _BIT(2)
-#define SSS_AES_INPUT_READY _BIT(1)
-#define SSS_AES_OUTPUT_READY _BIT(0)
+#define SSS_AES_BUSY BIT(2)
+#define SSS_AES_INPUT_READY BIT(1)
+#define SSS_AES_OUTPUT_READY BIT(0)
#define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
#define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
@@ -139,7 +138,7 @@
SSS_AES_REG(dev, reg))
/* HW engine modes */
-#define FLAGS_AES_DECRYPT _BIT(0)
+#define FLAGS_AES_DECRYPT BIT(0)
#define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
#define FLAGS_AES_CBC _SBF(1, 0x01)
#define FLAGS_AES_CTR _SBF(1, 0x02)
@@ -149,7 +148,6 @@
/**
* struct samsung_aes_variant - platform specific SSS driver data
- * @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
* @aes_offset: AES register offset from SSS module's base.
*
* Specifies platform specific configuration of SSS module.
@@ -157,7 +155,6 @@
* expansion of its usage.
*/
struct samsung_aes_variant {
- bool has_hash_irq;
unsigned int aes_offset;
};
@@ -178,7 +175,6 @@ struct s5p_aes_dev {
struct clk *clk;
void __iomem *ioaddr;
void __iomem *aes_ioaddr;
- int irq_hash;
int irq_fc;
struct ablkcipher_request *req;
@@ -186,6 +182,10 @@ struct s5p_aes_dev {
struct scatterlist *sg_src;
struct scatterlist *sg_dst;
+ /* In case of unaligned access: */
+ struct scatterlist *sg_src_cpy;
+ struct scatterlist *sg_dst_cpy;
+
struct tasklet_struct tasklet;
struct crypto_queue queue;
bool busy;
@@ -197,12 +197,10 @@ struct s5p_aes_dev {
static struct s5p_aes_dev *s5p_dev;
static const struct samsung_aes_variant s5p_aes_data = {
- .has_hash_irq = true,
.aes_offset = 0x4000,
};
static const struct samsung_aes_variant exynos_aes_data = {
- .has_hash_irq = false,
.aes_offset = 0x200,
};
@@ -245,8 +243,45 @@ static void s5p_set_dma_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
}
+static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
+{
+ int len;
+
+ if (!*sg)
+ return;
+
+ len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ free_pages((unsigned long)sg_virt(*sg), get_order(len));
+
+ kfree(*sg);
+ *sg = NULL;
+}
+
+static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
{
+ if (dev->sg_dst_cpy) {
+ dev_dbg(dev->dev,
+ "Copying %d bytes of output data back to original place\n",
+ dev->req->nbytes);
+ s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
+ dev->req->nbytes, 1);
+ }
+ s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+ s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+
/* holding a lock outside */
dev->req->base.complete(&dev->req->base, err);
dev->busy = false;
@@ -262,15 +297,37 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
}
+static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
+ struct scatterlist **dst)
+{
+ void *pages;
+ int len;
+
+ *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
+ if (!*dst)
+ return -ENOMEM;
+
+ len = ALIGN(dev->req->nbytes, AES_BLOCK_SIZE);
+ pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
+ if (!pages) {
+ kfree(*dst);
+ *dst = NULL;
+ return -ENOMEM;
+ }
+
+ s5p_sg_copy_buf(pages, src, dev->req->nbytes, 0);
+
+ sg_init_table(*dst, 1);
+ sg_set_buf(*dst, pages, len);
+
+ return 0;
+}
+
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
int err;
- if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
- err = -EINVAL;
- goto exit;
- }
- if (!sg_dma_len(sg)) {
+ if (!sg->length) {
err = -EINVAL;
goto exit;
}
@@ -284,7 +341,7 @@ static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
dev->sg_dst = sg;
err = 0;
- exit:
+exit:
return err;
}
@@ -292,11 +349,7 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
int err;
- if (!IS_ALIGNED(sg_dma_len(sg), AES_BLOCK_SIZE)) {
- err = -EINVAL;
- goto exit;
- }
- if (!sg_dma_len(sg)) {
+ if (!sg->length) {
err = -EINVAL;
goto exit;
}
@@ -310,47 +363,59 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
dev->sg_src = sg;
err = 0;
- exit:
+exit:
return err;
}
-static void s5p_aes_tx(struct s5p_aes_dev *dev)
+/*
+ * Returns true if new transmitting (output) data is ready and its
+ * address+length have to be written to device (by calling
+ * s5p_set_dma_outdata()). False otherwise.
+ */
+static bool s5p_aes_tx(struct s5p_aes_dev *dev)
{
int err = 0;
+ bool ret = false;
s5p_unset_outdata(dev);
if (!sg_is_last(dev->sg_dst)) {
err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
- if (err) {
+ if (err)
s5p_aes_complete(dev, err);
- return;
- }
-
- s5p_set_dma_outdata(dev, dev->sg_dst);
+ else
+ ret = true;
} else {
s5p_aes_complete(dev, err);
dev->busy = true;
tasklet_schedule(&dev->tasklet);
}
+
+ return ret;
}
-static void s5p_aes_rx(struct s5p_aes_dev *dev)
+/*
+ * Returns true if new receiving (input) data is ready and its
+ * address+length have to be written to device (by calling
+ * s5p_set_dma_indata()). False otherwise.
+ */
+static bool s5p_aes_rx(struct s5p_aes_dev *dev)
{
int err;
+ bool ret = false;
s5p_unset_indata(dev);
if (!sg_is_last(dev->sg_src)) {
err = s5p_set_indata(dev, sg_next(dev->sg_src));
- if (err) {
+ if (err)
s5p_aes_complete(dev, err);
- return;
- }
-
- s5p_set_dma_indata(dev, dev->sg_src);
+ else
+ ret = true;
}
+
+ return ret;
}
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
@@ -359,18 +424,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
uint32_t status;
unsigned long flags;
+ bool set_dma_tx = false;
+ bool set_dma_rx = false;
spin_lock_irqsave(&dev->lock, flags);
- if (irq == dev->irq_fc) {
- status = SSS_READ(dev, FCINTSTAT);
- if (status & SSS_FCINTSTAT_BRDMAINT)
- s5p_aes_rx(dev);
- if (status & SSS_FCINTSTAT_BTDMAINT)
- s5p_aes_tx(dev);
-
- SSS_WRITE(dev, FCINTPEND, status);
- }
+ status = SSS_READ(dev, FCINTSTAT);
+ if (status & SSS_FCINTSTAT_BRDMAINT)
+ set_dma_rx = s5p_aes_rx(dev);
+ if (status & SSS_FCINTSTAT_BTDMAINT)
+ set_dma_tx = s5p_aes_tx(dev);
+
+ SSS_WRITE(dev, FCINTPEND, status);
+
+ /*
+ * Writing length of DMA block (either receiving or transmitting)
+ * will start the operation immediately, so this should be done
+ * at the end (even after clearing pending interrupts to not miss the
+ * interrupt).
+ */
+ if (set_dma_tx)
+ s5p_set_dma_outdata(dev, dev->sg_dst);
+ if (set_dma_rx)
+ s5p_set_dma_indata(dev, dev->sg_src);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -395,6 +471,71 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
memcpy_toio(keystart, key, keylen);
}
+static bool s5p_is_sg_aligned(struct scatterlist *sg)
+{
+ while (sg) {
+ if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
+ return false;
+ sg = sg_next(sg);
+ }
+
+ return true;
+}
+
+static int s5p_set_indata_start(struct s5p_aes_dev *dev,
+ struct ablkcipher_request *req)
+{
+ struct scatterlist *sg;
+ int err;
+
+ dev->sg_src_cpy = NULL;
+ sg = req->src;
+ if (!s5p_is_sg_aligned(sg)) {
+ dev_dbg(dev->dev,
+ "At least one unaligned source scatter list, making a copy\n");
+ err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
+ if (err)
+ return err;
+
+ sg = dev->sg_src_cpy;
+ }
+
+ err = s5p_set_indata(dev, sg);
+ if (err) {
+ s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
+ return err;
+ }
+
+ return 0;
+}
+
+static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
+ struct ablkcipher_request *req)
+{
+ struct scatterlist *sg;
+ int err;
+
+ dev->sg_dst_cpy = NULL;
+ sg = req->dst;
+ if (!s5p_is_sg_aligned(sg)) {
+ dev_dbg(dev->dev,
+ "At least one unaligned dest scatter list, making a copy\n");
+ err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
+ if (err)
+ return err;
+
+ sg = dev->sg_dst_cpy;
+ }
+
+ err = s5p_set_outdata(dev, sg);
+ if (err) {
+ s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+ return err;
+ }
+
+ return 0;
+}
+
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
@@ -431,19 +572,19 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
SSS_WRITE(dev, FCFIFOCTRL, 0x00);
- err = s5p_set_indata(dev, req->src);
+ err = s5p_set_indata_start(dev, req);
if (err)
goto indata_error;
- err = s5p_set_outdata(dev, req->dst);
+ err = s5p_set_outdata_start(dev, req);
if (err)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
- s5p_set_dma_indata(dev, req->src);
- s5p_set_dma_outdata(dev, req->dst);
+ s5p_set_dma_indata(dev, dev->sg_src);
+ s5p_set_dma_outdata(dev, dev->sg_dst);
SSS_WRITE(dev, FCINTENSET,
SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
@@ -452,10 +593,10 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
return;
- outdata_error:
+outdata_error:
s5p_unset_indata(dev);
- indata_error:
+indata_error:
s5p_aes_complete(dev, err);
spin_unlock_irqrestore(&dev->lock, flags);
}
@@ -506,7 +647,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
tasklet_schedule(&dev->tasklet);
- exit:
+exit:
return err;
}
@@ -671,21 +812,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_irq;
}
- if (variant->has_hash_irq) {
- pdata->irq_hash = platform_get_irq(pdev, 1);
- if (pdata->irq_hash < 0) {
- err = pdata->irq_hash;
- dev_warn(dev, "hash interrupt is not available.\n");
- goto err_irq;
- }
- err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
- IRQF_SHARED, pdev->name, pdev);
- if (err < 0) {
- dev_warn(dev, "hash interrupt is not available.\n");
- goto err_irq;
- }
- }
-
pdata->busy = false;
pdata->variant = variant;
pdata->dev = dev;
@@ -705,7 +831,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
return 0;
- err_algs:
+err_algs:
dev_err(dev, "can't register '%s': %d\n", algs[i].cra_name, err);
for (j = 0; j < i; j++)
@@ -713,7 +839,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
tasklet_kill(&pdata->tasklet);
- err_irq:
+err_irq:
clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index b9997335f..b18e67d0e 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -139,6 +139,26 @@ my $vmr = sub {
" vor $vx,$vy,$vy";
};
+# Some ABIs specify vrsave, special-purpose register #256, as reserved
+# for system use.
+my $no_vrsave = ($flavour =~ /linux-ppc64le/);
+my $mtspr = sub {
+ my ($f,$idx,$ra) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " or $ra,$ra,$ra";
+ } else {
+ " mtspr $idx,$ra";
+ }
+};
+my $mfspr = sub {
+ my ($f,$rd,$idx) = @_;
+ if ($idx == 256 && $no_vrsave) {
+ " li $rd,-1";
+ } else {
+ " mfspr $rd,$idx";
+ }
+};
+
# PowerISA 2.06 stuff
sub vsxmem_op {
my ($f, $vrt, $ra, $rb, $op) = @_;
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
new file mode 100644
index 000000000..cedab7572
--- /dev/null
+++ b/drivers/dax/Kconfig
@@ -0,0 +1,26 @@
+menuconfig DEV_DAX
+ tristate "DAX: direct access to differentiated memory"
+ default m if NVDIMM_DAX
+ depends on TRANSPARENT_HUGEPAGE
+ help
+ Support raw access to differentiated (persistence, bandwidth,
+ latency...) memory via an mmap(2) capable character
+ device. Platform firmware or a device driver may identify a
+ platform memory resource that is differentiated from the
+ baseline memory pool. Mappings of a /dev/daxX.Y device impose
+ restrictions that make the mapping behavior deterministic.
+
+if DEV_DAX
+
+config DEV_DAX_PMEM
+ tristate "PMEM DAX: direct access to persistent memory"
+ depends on NVDIMM_DAX
+ default DEV_DAX
+ help
+ Support raw access to persistent memory. Note that this
+ driver consumes memory ranges allocated and exported by the
+ libnvdimm sub-system.
+
+ Say Y if unsure
+
+endif
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
new file mode 100644
index 000000000..27c54e384
--- /dev/null
+++ b/drivers/dax/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_DEV_DAX) += dax.o
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
+
+dax_pmem-y := pmem.o
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
new file mode 100644
index 000000000..b891a129b
--- /dev/null
+++ b/drivers/dax/dax.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/pagemap.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pfn_t.h>
+#include <linux/slab.h>
+#include <linux/dax.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+static int dax_major;
+static struct class *dax_class;
+static DEFINE_IDA(dax_minor_ida);
+
+/**
+ * struct dax_region - mapping infrastructure for dax devices
+ * @id: kernel-wide unique region for a memory range
+ * @base: linear address corresponding to @res
+ * @kref: to pin while other agents have a need to do lookups
+ * @dev: parent device backing this region
+ * @align: allocation and mapping alignment for child dax devices
+ * @res: physical address range of the region
+ * @pfn_flags: identify whether the pfns are paged back or not
+ */
+struct dax_region {
+ int id;
+ struct ida ida;
+ void *base;
+ struct kref kref;
+ struct device *dev;
+ unsigned int align;
+ struct resource res;
+ unsigned long pfn_flags;
+};
+
+/**
+ * struct dax_dev - subdivision of a dax region
+ * @region - parent region
+ * @dev - device backing the character device
+ * @kref - enable this data to be tracked in filp->private_data
+ * @alive - !alive + rcu grace period == no new mappings can be established
+ * @id - child id in the region
+ * @num_resources - number of physical address extents in this device
+ * @res - array of physical address ranges
+ */
+struct dax_dev {
+ struct dax_region *region;
+ struct device *dev;
+ struct kref kref;
+ bool alive;
+ int id;
+ int num_resources;
+ struct resource res[0];
+};
+
+static void dax_region_free(struct kref *kref)
+{
+ struct dax_region *dax_region;
+
+ dax_region = container_of(kref, struct dax_region, kref);
+ kfree(dax_region);
+}
+
+void dax_region_put(struct dax_region *dax_region)
+{
+ kref_put(&dax_region->kref, dax_region_free);
+}
+EXPORT_SYMBOL_GPL(dax_region_put);
+
+static void dax_dev_free(struct kref *kref)
+{
+ struct dax_dev *dax_dev;
+
+ dax_dev = container_of(kref, struct dax_dev, kref);
+ dax_region_put(dax_dev->region);
+ kfree(dax_dev);
+}
+
+static void dax_dev_put(struct dax_dev *dax_dev)
+{
+ kref_put(&dax_dev->kref, dax_dev_free);
+}
+
+struct dax_region *alloc_dax_region(struct device *parent, int region_id,
+ struct resource *res, unsigned int align, void *addr,
+ unsigned long pfn_flags)
+{
+ struct dax_region *dax_region;
+
+ dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
+
+ if (!dax_region)
+ return NULL;
+
+ memcpy(&dax_region->res, res, sizeof(*res));
+ dax_region->pfn_flags = pfn_flags;
+ kref_init(&dax_region->kref);
+ dax_region->id = region_id;
+ ida_init(&dax_region->ida);
+ dax_region->align = align;
+ dax_region->dev = parent;
+ dax_region->base = addr;
+
+ return dax_region;
+}
+EXPORT_SYMBOL_GPL(alloc_dax_region);
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dax_dev *dax_dev = dev_get_drvdata(dev);
+ unsigned long long size = 0;
+ int i;
+
+ for (i = 0; i < dax_dev->num_resources; i++)
+ size += resource_size(&dax_dev->res[i]);
+
+ return sprintf(buf, "%llu\n", size);
+}
+static DEVICE_ATTR_RO(size);
+
+static struct attribute *dax_device_attributes[] = {
+ &dev_attr_size.attr,
+ NULL,
+};
+
+static const struct attribute_group dax_device_attribute_group = {
+ .attrs = dax_device_attributes,
+};
+
+static const struct attribute_group *dax_attribute_groups[] = {
+ &dax_device_attribute_group,
+ NULL,
+};
+
+static void unregister_dax_dev(void *_dev)
+{
+ struct device *dev = _dev;
+ struct dax_dev *dax_dev = dev_get_drvdata(dev);
+ struct dax_region *dax_region = dax_dev->region;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /*
+ * Note, rcu is not protecting the liveness of dax_dev, rcu is
+ * ensuring that any fault handlers that might have seen
+ * dax_dev->alive == true, have completed. Any fault handlers
+ * that start after synchronize_rcu() has started will abort
+ * upon seeing dax_dev->alive == false.
+ */
+ dax_dev->alive = false;
+ synchronize_rcu();
+
+ get_device(dev);
+ device_unregister(dev);
+ ida_simple_remove(&dax_region->ida, dax_dev->id);
+ ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
+ put_device(dev);
+ dax_dev_put(dax_dev);
+}
+
+int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
+ int count)
+{
+ struct device *parent = dax_region->dev;
+ struct dax_dev *dax_dev;
+ struct device *dev;
+ int rc, minor;
+ dev_t dev_t;
+
+ dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
+ if (!dax_dev)
+ return -ENOMEM;
+ memcpy(dax_dev->res, res, sizeof(*res) * count);
+ dax_dev->num_resources = count;
+ kref_init(&dax_dev->kref);
+ dax_dev->alive = true;
+ dax_dev->region = dax_region;
+ kref_get(&dax_region->kref);
+
+ dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
+ if (dax_dev->id < 0) {
+ rc = dax_dev->id;
+ goto err_id;
+ }
+
+ minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
+ if (minor < 0) {
+ rc = minor;
+ goto err_minor;
+ }
+
+ dev_t = MKDEV(dax_major, minor);
+ dev = device_create_with_groups(dax_class, parent, dev_t, dax_dev,
+ dax_attribute_groups, "dax%d.%d", dax_region->id,
+ dax_dev->id);
+ if (IS_ERR(dev)) {
+ rc = PTR_ERR(dev);
+ goto err_create;
+ }
+ dax_dev->dev = dev;
+
+ rc = devm_add_action(dax_region->dev, unregister_dax_dev, dev);
+ if (rc) {
+ unregister_dax_dev(dev);
+ return rc;
+ }
+
+ return 0;
+
+ err_create:
+ ida_simple_remove(&dax_minor_ida, minor);
+ err_minor:
+ ida_simple_remove(&dax_region->ida, dax_dev->id);
+ err_id:
+ dax_dev_put(dax_dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(devm_create_dax_dev);
+
+/* return an unmapped area aligned to the dax region specified alignment */
+static unsigned long dax_dev_get_unmapped_area(struct file *filp,
+ unsigned long addr, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long off, off_end, off_align, len_align, addr_align, align;
+ struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
+ struct dax_region *dax_region;
+
+ if (!dax_dev || addr)
+ goto out;
+
+ dax_region = dax_dev->region;
+ align = dax_region->align;
+ off = pgoff << PAGE_SHIFT;
+ off_end = off + len;
+ off_align = round_up(off, align);
+
+ if ((off_end <= off_align) || ((off_end - off_align) < align))
+ goto out;
+
+ len_align = len + align;
+ if ((off + len_align) < off)
+ goto out;
+
+ addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
+ pgoff, flags);
+ if (!IS_ERR_VALUE(addr_align)) {
+ addr_align += (off - addr_align) & (align - 1);
+ return addr_align;
+ }
+ out:
+ return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+}
+
+static int __match_devt(struct device *dev, const void *data)
+{
+ const dev_t *devt = data;
+
+ return dev->devt == *devt;
+}
+
+static struct device *dax_dev_find(dev_t dev_t)
+{
+ return class_find_device(dax_class, NULL, &dev_t, __match_devt);
+}
+
+static int dax_dev_open(struct inode *inode, struct file *filp)
+{
+ struct dax_dev *dax_dev = NULL;
+ struct device *dev;
+
+ dev = dax_dev_find(inode->i_rdev);
+ if (!dev)
+ return -ENXIO;
+
+ device_lock(dev);
+ dax_dev = dev_get_drvdata(dev);
+ if (dax_dev) {
+ dev_dbg(dev, "%s\n", __func__);
+ filp->private_data = dax_dev;
+ kref_get(&dax_dev->kref);
+ inode->i_flags = S_DAX;
+ }
+ device_unlock(dev);
+
+ if (!dax_dev) {
+ put_device(dev);
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int dax_dev_release(struct inode *inode, struct file *filp)
+{
+ struct dax_dev *dax_dev = filp->private_data;
+ struct device *dev = dax_dev->dev;
+
+ dev_dbg(dax_dev->dev, "%s\n", __func__);
+ dax_dev_put(dax_dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
+ const char *func)
+{
+ struct dax_region *dax_region = dax_dev->region;
+ struct device *dev = dax_dev->dev;
+ unsigned long mask;
+
+ if (!dax_dev->alive)
+ return -ENXIO;
+
+ /* prevent private / writable mappings from being established */
+ if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
+ dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ mask = dax_region->align - 1;
+ if (vma->vm_start & mask || vma->vm_end & mask) {
+ dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+ current->comm, func, vma->vm_start, vma->vm_end,
+ mask);
+ return -EINVAL;
+ }
+
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
+ && (vma->vm_flags & VM_DONTCOPY) == 0) {
+ dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ if (!vma_is_dax(vma)) {
+ dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
+ unsigned long size)
+{
+ struct resource *res;
+ phys_addr_t phys;
+ int i;
+
+ for (i = 0; i < dax_dev->num_resources; i++) {
+ res = &dax_dev->res[i];
+ phys = pgoff * PAGE_SIZE + res->start;
+ if (phys >= res->start && phys <= res->end)
+ break;
+ pgoff -= PHYS_PFN(resource_size(res));
+ }
+
+ if (i < dax_dev->num_resources) {
+ res = &dax_dev->res[i];
+ if (phys + size - 1 <= res->end)
+ return phys;
+ }
+
+ return -1;
+}
+
+static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ unsigned long vaddr = (unsigned long) vmf->virtual_address;
+ struct device *dev = dax_dev->dev;
+ struct dax_region *dax_region;
+ int rc = VM_FAULT_SIGBUS;
+ phys_addr_t phys;
+ pfn_t pfn;
+
+ if (check_vma(dax_dev, vma, __func__))
+ return VM_FAULT_SIGBUS;
+
+ dax_region = dax_dev->region;
+ if (dax_region->align > PAGE_SIZE) {
+ dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
+ if (phys == -1) {
+ dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ vmf->pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+ rc = vm_insert_mixed(vma, vaddr, pfn);
+
+ if (rc == -ENOMEM)
+ return VM_FAULT_OOM;
+ if (rc < 0 && rc != -EBUSY)
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+
+static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ int rc;
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+ current->comm, (vmf->flags & FAULT_FLAG_WRITE)
+ ? "write" : "read", vma->vm_start, vma->vm_end);
+ rcu_read_lock();
+ rc = __dax_dev_fault(dax_dev, vma, vmf);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
+ struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd,
+ unsigned int flags)
+{
+ unsigned long pmd_addr = addr & PMD_MASK;
+ struct device *dev = dax_dev->dev;
+ struct dax_region *dax_region;
+ phys_addr_t phys;
+ pgoff_t pgoff;
+ pfn_t pfn;
+
+ if (check_vma(dax_dev, vma, __func__))
+ return VM_FAULT_SIGBUS;
+
+ dax_region = dax_dev->region;
+ if (dax_region->align > PMD_SIZE) {
+ dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ /* dax pmd mappings require pfn_t_devmap() */
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
+ dev_dbg(dev, "%s: alignment > fault size\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pgoff = linear_page_index(vma, pmd_addr);
+ phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
+ if (phys == -1) {
+ dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ pgoff);
+ return VM_FAULT_SIGBUS;
+ }
+
+ pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
+
+ return vmf_insert_pfn_pmd(vma, addr, pmd, pfn,
+ flags & FAULT_FLAG_WRITE);
+}
+
+static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmd, unsigned int flags)
+{
+ int rc;
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
+ current->comm, (flags & FAULT_FLAG_WRITE)
+ ? "write" : "read", vma->vm_start, vma->vm_end);
+
+ rcu_read_lock();
+ rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static void dax_dev_vm_open(struct vm_area_struct *vma)
+{
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(dax_dev->dev, "%s\n", __func__);
+ kref_get(&dax_dev->kref);
+}
+
+static void dax_dev_vm_close(struct vm_area_struct *vma)
+{
+ struct file *filp = vma->vm_file;
+ struct dax_dev *dax_dev = filp->private_data;
+
+ dev_dbg(dax_dev->dev, "%s\n", __func__);
+ dax_dev_put(dax_dev);
+}
+
+static const struct vm_operations_struct dax_dev_vm_ops = {
+ .fault = dax_dev_fault,
+ .pmd_fault = dax_dev_pmd_fault,
+ .open = dax_dev_vm_open,
+ .close = dax_dev_vm_close,
+};
+
+static int dax_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct dax_dev *dax_dev = filp->private_data;
+ int rc;
+
+ dev_dbg(dax_dev->dev, "%s\n", __func__);
+
+ rc = check_vma(dax_dev, vma, __func__);
+ if (rc)
+ return rc;
+
+ kref_get(&dax_dev->kref);
+ vma->vm_ops = &dax_dev_vm_ops;
+ vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
+ return 0;
+
+}
+
+static const struct file_operations dax_fops = {
+ .llseek = noop_llseek,
+ .owner = THIS_MODULE,
+ .open = dax_dev_open,
+ .release = dax_dev_release,
+ .get_unmapped_area = dax_dev_get_unmapped_area,
+ .mmap = dax_dev_mmap,
+};
+
+static int __init dax_init(void)
+{
+ int rc;
+
+ rc = register_chrdev(0, "dax", &dax_fops);
+ if (rc < 0)
+ return rc;
+ dax_major = rc;
+
+ dax_class = class_create(THIS_MODULE, "dax");
+ if (IS_ERR(dax_class)) {
+ unregister_chrdev(dax_major, "dax");
+ return PTR_ERR(dax_class);
+ }
+
+ return 0;
+}
+
+static void __exit dax_exit(void)
+{
+ class_destroy(dax_class);
+ unregister_chrdev(dax_major, "dax");
+ ida_destroy(&dax_minor_ida);
+}
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+subsys_initcall(dax_init);
+module_exit(dax_exit);
diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h
new file mode 100644
index 000000000..d8b8f1f25
--- /dev/null
+++ b/drivers/dax/dax.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef __DAX_H__
+#define __DAX_H__
+struct device;
+struct resource;
+struct dax_region;
+void dax_region_put(struct dax_region *dax_region);
+struct dax_region *alloc_dax_region(struct device *parent,
+ int region_id, struct resource *res, unsigned int align,
+ void *addr, unsigned long flags);
+int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
+ int count);
+#endif /* __DAX_H__ */
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
new file mode 100644
index 000000000..55d510e36
--- /dev/null
+++ b/drivers/dax/pmem.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/percpu-refcount.h>
+#include <linux/memremap.h>
+#include <linux/module.h>
+#include <linux/pfn_t.h>
+#include "../nvdimm/pfn.h"
+#include "../nvdimm/nd.h"
+#include "dax.h"
+
+struct dax_pmem {
+ struct device *dev;
+ struct percpu_ref ref;
+ struct completion cmp;
+};
+
+struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
+{
+ return container_of(ref, struct dax_pmem, ref);
+}
+
+static void dax_pmem_percpu_release(struct percpu_ref *ref)
+{
+ struct dax_pmem *dax_pmem = to_dax_pmem(ref);
+
+ dev_dbg(dax_pmem->dev, "%s\n", __func__);
+ complete(&dax_pmem->cmp);
+}
+
+static void dax_pmem_percpu_exit(void *data)
+{
+ struct percpu_ref *ref = data;
+ struct dax_pmem *dax_pmem = to_dax_pmem(ref);
+
+ dev_dbg(dax_pmem->dev, "%s\n", __func__);
+ percpu_ref_exit(ref);
+ wait_for_completion(&dax_pmem->cmp);
+}
+
+static void dax_pmem_percpu_kill(void *data)
+{
+ struct percpu_ref *ref = data;
+ struct dax_pmem *dax_pmem = to_dax_pmem(ref);
+
+ dev_dbg(dax_pmem->dev, "%s\n", __func__);
+ percpu_ref_kill(ref);
+}
+
+static int dax_pmem_probe(struct device *dev)
+{
+ int rc;
+ void *addr;
+ struct resource res;
+ struct nd_pfn_sb *pfn_sb;
+ struct dax_pmem *dax_pmem;
+ struct nd_region *nd_region;
+ struct nd_namespace_io *nsio;
+ struct dax_region *dax_region;
+ struct nd_namespace_common *ndns;
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+ struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
+ struct vmem_altmap __altmap, *altmap = NULL;
+
+ ndns = nvdimm_namespace_common_probe(dev);
+ if (IS_ERR(ndns))
+ return PTR_ERR(ndns);
+ nsio = to_nd_namespace_io(&ndns->dev);
+
+ /* parse the 'pfn' info block via ->rw_bytes */
+ devm_nsio_enable(dev, nsio);
+ altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
+ if (IS_ERR(altmap))
+ return PTR_ERR(altmap);
+ devm_nsio_disable(dev, nsio);
+
+ pfn_sb = nd_pfn->pfn_sb;
+
+ if (!devm_request_mem_region(dev, nsio->res.start,
+ resource_size(&nsio->res), dev_name(dev))) {
+ dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
+ return -EBUSY;
+ }
+
+ dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
+ if (!dax_pmem)
+ return -ENOMEM;
+
+ dax_pmem->dev = dev;
+ init_completion(&dax_pmem->cmp);
+ rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
+ if (rc) {
+ dax_pmem_percpu_exit(&dax_pmem->ref);
+ return rc;
+ }
+
+ addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
+ rc = devm_add_action(dev, dax_pmem_percpu_kill, &dax_pmem->ref);
+ if (rc) {
+ dax_pmem_percpu_kill(&dax_pmem->ref);
+ return rc;
+ }
+
+ nd_region = to_nd_region(dev->parent);
+ dax_region = alloc_dax_region(dev, nd_region->id, &res,
+ le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
+ if (!dax_region)
+ return -ENOMEM;
+
+ /* TODO: support for subdividing a dax region... */
+ rc = devm_create_dax_dev(dax_region, &res, 1);
+
+ /* child dax_dev instances now own the lifetime of the dax_region */
+ dax_region_put(dax_region);
+
+ return rc;
+}
+
+static struct nd_device_driver dax_pmem_driver = {
+ .probe = dax_pmem_probe,
+ .drv = {
+ .name = "dax_pmem",
+ },
+ .type = ND_DRIVER_DAX_PMEM,
+};
+
+static int __init dax_pmem_init(void)
+{
+ return nd_driver_register(&dax_pmem_driver);
+}
+module_init(dax_pmem_init);
+
+static void __exit dax_pmem_exit(void)
+{
+ driver_unregister(&dax_pmem_driver.drv);
+}
+module_exit(dax_pmem_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 4de78c552..78dac0e9d 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -64,30 +64,32 @@ config DEVFREQ_GOV_USERSPACE
Otherwise, the governor does not change the frequency
given at the initialization.
+config DEVFREQ_GOV_PASSIVE
+ tristate "Passive"
+ help
+ Sets the frequency based on the frequency of its parent devfreq
+ device. This governor does not change the frequency by itself
+ through sysfs entries. The passive governor recommends that
+ devfreq device uses the OPP table to get the frequency/voltage.
+
comment "DEVFREQ Drivers"
-config ARM_EXYNOS4_BUS_DEVFREQ
- bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
- depends on (CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
+config ARM_EXYNOS_BUS_DEVFREQ
+ bool "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
+ depends on ARCH_EXYNOS
select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select DEVFREQ_GOV_PASSIVE
+ select DEVFREQ_EVENT_EXYNOS_PPMU
+ select PM_DEVFREQ_EVENT
select PM_OPP
help
- This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
- and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
- It reads PPMU counters of memory controllers and adjusts
- the operating frequencies and voltages with OPP support.
+ This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
+ Memory bus has one more group of memory bus (e.g, MIF and INT block).
+ Each memory bus group could contain many memoby bus block. It reads
+ PPMU counters of memory controllers by using DEVFREQ-event device
+ and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
-config ARM_EXYNOS5_BUS_DEVFREQ
- tristate "ARM Exynos5250 Bus DEVFREQ Driver"
- depends on SOC_EXYNOS5250
- select DEVFREQ_GOV_SIMPLE_ONDEMAND
- select PM_OPP
- help
- This adds the DEVFREQ driver for Exynos5250 bus interface (vdd_int).
- It reads PPMU counters of memory controllers and adjusts the
- operating frequencies and voltages with OPP support.
-
config ARM_TEGRA_DEVFREQ
tristate "Tegra DEVFREQ Driver"
depends on ARCH_TEGRA_124_SOC
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 5134f9ee9..09f11d9d4 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -4,10 +4,10 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
-obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/
-obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/
+obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
# DEVFREQ Event Drivers
diff --git a/drivers/devfreq/devfreq-event.c b/drivers/devfreq/devfreq-event.c
index 38bf144ca..39b048eda 100644
--- a/drivers/devfreq/devfreq-event.c
+++ b/drivers/devfreq/devfreq-event.c
@@ -235,6 +235,11 @@ struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
mutex_lock(&devfreq_event_list_lock);
list_for_each_entry(edev, &devfreq_event_list, node) {
+ if (edev->dev.parent && edev->dev.parent->of_node == node)
+ goto out;
+ }
+
+ list_for_each_entry(edev, &devfreq_event_list, node) {
if (!strcmp(edev->desc->name, node->name))
goto out;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 984c5e9e7..e92418fac 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/printk.h>
#include <linux/hrtimer.h>
+#include <linux/of.h>
#include "governor.h"
static struct class *devfreq_class;
@@ -188,6 +189,29 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
return ERR_PTR(-ENODEV);
}
+static int devfreq_notify_transition(struct devfreq *devfreq,
+ struct devfreq_freqs *freqs, unsigned int state)
+{
+ if (!devfreq)
+ return -EINVAL;
+
+ switch (state) {
+ case DEVFREQ_PRECHANGE:
+ srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+ DEVFREQ_PRECHANGE, freqs);
+ break;
+
+ case DEVFREQ_POSTCHANGE:
+ srcu_notifier_call_chain(&devfreq->transition_notifier_list,
+ DEVFREQ_POSTCHANGE, freqs);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Load monitoring helper functions for governors use */
/**
@@ -199,7 +223,8 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
*/
int update_devfreq(struct devfreq *devfreq)
{
- unsigned long freq;
+ struct devfreq_freqs freqs;
+ unsigned long freq, cur_freq;
int err = 0;
u32 flags = 0;
@@ -233,9 +258,24 @@ int update_devfreq(struct devfreq *devfreq)
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
+ if (devfreq->profile->get_cur_freq)
+ devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
+ else
+ cur_freq = devfreq->previous_freq;
+
+ freqs.old = cur_freq;
+ freqs.new = freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
+
err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
- if (err)
+ if (err) {
+ freqs.new = cur_freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
return err;
+ }
+
+ freqs.new = freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
if (devfreq->profile->freq_table)
if (devfreq_update_status(devfreq, freq))
@@ -515,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->profile = profile;
strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
+ devfreq->last_status.current_frequency = profile->initial_freq;
devfreq->data = data;
devfreq->nb.notifier_call = devfreq_notifier_call;
@@ -524,22 +565,23 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
}
- devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
+ err = device_register(&devfreq->dev);
+ if (err) {
+ mutex_unlock(&devfreq->lock);
+ goto err_out;
+ }
+
+ devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
devfreq->profile->max_state *
devfreq->profile->max_state,
GFP_KERNEL);
- devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
+ devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
devfreq->profile->max_state,
GFP_KERNEL);
devfreq->last_stat_updated = jiffies;
- dev_set_name(&devfreq->dev, "%s", dev_name(dev));
- err = device_register(&devfreq->dev);
- if (err) {
- put_device(&devfreq->dev);
- mutex_unlock(&devfreq->lock);
- goto err_out;
- }
+ srcu_init_notifier_head(&devfreq->transition_notifier_list);
mutex_unlock(&devfreq->lock);
@@ -564,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
err_init:
list_del(&devfreq->node);
device_unregister(&devfreq->dev);
- kfree(devfreq);
err_out:
return ERR_PTR(err);
}
@@ -582,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq)
return -EINVAL;
device_unregister(&devfreq->dev);
- put_device(&devfreq->dev);
return 0;
}
@@ -639,6 +679,49 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
}
EXPORT_SYMBOL(devm_devfreq_add_device);
+#ifdef CONFIG_OF
+/*
+ * devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
+ * @dev - instance to the given device
+ * @index - index into list of devfreq
+ *
+ * return the instance of devfreq device
+ */
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+ struct device_node *node;
+ struct devfreq *devfreq;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ if (!dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ node = of_parse_phandle(dev->of_node, "devfreq", index);
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ if (devfreq->dev.parent
+ && devfreq->dev.parent->of_node == node) {
+ mutex_unlock(&devfreq_list_lock);
+ return devfreq;
+ }
+ }
+ mutex_unlock(&devfreq_list_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+#else
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
+
/**
* devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
* @dev: the device to add devfreq feature.
@@ -1266,6 +1349,129 @@ void devm_devfreq_unregister_opp_notifier(struct device *dev,
}
EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
+/**
+ * devfreq_register_notifier() - Register a driver with devfreq
+ * @devfreq: The devfreq object.
+ * @nb: The notifier block to register.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ int ret = 0;
+
+ if (!devfreq)
+ return -EINVAL;
+
+ switch (list) {
+ case DEVFREQ_TRANSITION_NOTIFIER:
+ ret = srcu_notifier_chain_register(
+ &devfreq->transition_notifier_list, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(devfreq_register_notifier);
+
+/*
+ * devfreq_unregister_notifier() - Unregister a driver with devfreq
+ * @devfreq: The devfreq object.
+ * @nb: The notifier block to be unregistered.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ int ret = 0;
+
+ if (!devfreq)
+ return -EINVAL;
+
+ switch (list) {
+ case DEVFREQ_TRANSITION_NOTIFIER:
+ ret = srcu_notifier_chain_unregister(
+ &devfreq->transition_notifier_list, nb);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(devfreq_unregister_notifier);
+
+struct devfreq_notifier_devres {
+ struct devfreq *devfreq;
+ struct notifier_block *nb;
+ unsigned int list;
+};
+
+static void devm_devfreq_notifier_release(struct device *dev, void *res)
+{
+ struct devfreq_notifier_devres *this = res;
+
+ devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
+}
+
+/**
+ * devm_devfreq_register_notifier()
+ - Resource-managed devfreq_register_notifier()
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
+ * @nb: The notifier block to be unregistered.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ */
+int devm_devfreq_register_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ struct devfreq_notifier_devres *ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = devfreq_register_notifier(devfreq, nb, list);
+ if (ret) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ ptr->devfreq = devfreq;
+ ptr->nb = nb;
+ ptr->list = list;
+ devres_add(dev, ptr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_devfreq_register_notifier);
+
+/**
+ * devm_devfreq_unregister_notifier()
+ - Resource-managed devfreq_unregister_notifier()
+ * @dev: The devfreq user device. (parent of devfreq)
+ * @devfreq: The devfreq object.
+ * @nb: The notifier block to be unregistered.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ */
+void devm_devfreq_unregister_notifier(struct device *dev,
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
+{
+ WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
+ devm_devfreq_dev_match, devfreq));
+}
+EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
+
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("devfreq class support");
MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/Kconfig b/drivers/devfreq/event/Kconfig
index a11720aff..1e8b4f469 100644
--- a/drivers/devfreq/event/Kconfig
+++ b/drivers/devfreq/event/Kconfig
@@ -13,6 +13,14 @@ menuconfig PM_DEVFREQ_EVENT
if PM_DEVFREQ_EVENT
+config DEVFREQ_EVENT_EXYNOS_NOCP
+ bool "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
+ depends on ARCH_EXYNOS
+ select PM_OPP
+ help
+ This add the devfreq-event driver for Exynos SoC. It provides NoC
+ (Network on Chip) Probe counters to measure the bandwidth of AXI bus.
+
config DEVFREQ_EVENT_EXYNOS_PPMU
bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS
diff --git a/drivers/devfreq/event/Makefile b/drivers/devfreq/event/Makefile
index be146ead7..3d6afd352 100644
--- a/drivers/devfreq/event/Makefile
+++ b/drivers/devfreq/event/Makefile
@@ -1,2 +1,4 @@
# Exynos DEVFREQ Event Drivers
+
+obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_NOCP) += exynos-nocp.o
obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c
new file mode 100644
index 000000000..a5841403b
--- /dev/null
+++ b/drivers/devfreq/event/exynos-nocp.c
@@ -0,0 +1,301 @@
+/*
+ * exynos-nocp.c - EXYNOS NoC (Network On Chip) Probe support
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/devfreq-event.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "exynos-nocp.h"
+
+struct exynos_nocp {
+ struct devfreq_event_dev *edev;
+ struct devfreq_event_desc desc;
+
+ struct device *dev;
+
+ struct regmap *regmap;
+ struct clk *clk;
+};
+
+/*
+ * The devfreq-event ops structure for nocp probe.
+ */
+static int exynos_nocp_set_event(struct devfreq_event_dev *edev)
+{
+ struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+ int ret;
+
+ /* Disable NoC probe */
+ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+ NOCP_MAIN_CTL_STATEN_MASK, 0);
+ if (ret < 0) {
+ dev_err(nocp->dev, "failed to disable the NoC probe device\n");
+ return ret;
+ }
+
+ /* Set a statistics dump period to 0 */
+ ret = regmap_write(nocp->regmap, NOCP_STAT_PERIOD, 0x0);
+ if (ret < 0)
+ goto out;
+
+ /* Set the IntEvent fields of *_SRC */
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_SRC,
+ NOCP_CNT_SRC_INTEVENT_MASK,
+ NOCP_CNT_SRC_INTEVENT_BYTE_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_SRC,
+ NOCP_CNT_SRC_INTEVENT_MASK,
+ NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_SRC,
+ NOCP_CNT_SRC_INTEVENT_MASK,
+ NOCP_CNT_SRC_INTEVENT_CYCLE_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_SRC,
+ NOCP_CNT_SRC_INTEVENT_MASK,
+ NOCP_CNT_SRC_INTEVENT_CHAIN_MASK);
+ if (ret < 0)
+ goto out;
+
+
+ /* Set an alarm with a max/min value of 0 to generate StatALARM */
+ ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MIN, 0x0);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_write(nocp->regmap, NOCP_STAT_ALARM_MAX, 0x0);
+ if (ret < 0)
+ goto out;
+
+ /* Set AlarmMode */
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_0_ALARM_MODE,
+ NOCP_CNT_ALARM_MODE_MASK,
+ NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_1_ALARM_MODE,
+ NOCP_CNT_ALARM_MODE_MASK,
+ NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_2_ALARM_MODE,
+ NOCP_CNT_ALARM_MODE_MASK,
+ NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_update_bits(nocp->regmap, NOCP_COUNTERS_3_ALARM_MODE,
+ NOCP_CNT_ALARM_MODE_MASK,
+ NOCP_CNT_ALARM_MODE_MIN_MAX_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Enable the measurements by setting AlarmEn and StatEn */
+ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+ NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK,
+ NOCP_MAIN_CTL_STATEN_MASK | NOCP_MAIN_CTL_ALARMEN_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Set GlobalEN */
+ ret = regmap_update_bits(nocp->regmap, NOCP_CFG_CTL,
+ NOCP_CFG_CTL_GLOBALEN_MASK,
+ NOCP_CFG_CTL_GLOBALEN_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* Enable NoC probe */
+ ret = regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+ NOCP_MAIN_CTL_STATEN_MASK,
+ NOCP_MAIN_CTL_STATEN_MASK);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ /* Reset NoC probe */
+ if (regmap_update_bits(nocp->regmap, NOCP_MAIN_CTL,
+ NOCP_MAIN_CTL_STATEN_MASK, 0)) {
+ dev_err(nocp->dev, "Failed to reset NoC probe device\n");
+ }
+
+ return ret;
+}
+
+static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
+ struct devfreq_event_data *edata)
+{
+ struct exynos_nocp *nocp = devfreq_event_get_drvdata(edev);
+ unsigned int counter[4];
+ int ret;
+
+ /* Read cycle count */
+ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_0_VAL, &counter[0]);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_1_VAL, &counter[1]);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_2_VAL, &counter[2]);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(nocp->regmap, NOCP_COUNTERS_3_VAL, &counter[3]);
+ if (ret < 0)
+ goto out;
+
+ edata->load_count = ((counter[1] << 16) | counter[0]);
+ edata->total_count = ((counter[3] << 16) | counter[2]);
+
+ dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
+ edata->load_count, edata->total_count);
+
+ return 0;
+
+out:
+ edata->load_count = 0;
+ edata->total_count = 0;
+
+ dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
+
+ return ret;
+}
+
+static const struct devfreq_event_ops exynos_nocp_ops = {
+ .set_event = exynos_nocp_set_event,
+ .get_event = exynos_nocp_get_event,
+};
+
+static const struct of_device_id exynos_nocp_id_match[] = {
+ { .compatible = "samsung,exynos5420-nocp", },
+ { /* sentinel */ },
+};
+
+static struct regmap_config exynos_nocp_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = NOCP_COUNTERS_3_VAL,
+};
+
+static int exynos_nocp_parse_dt(struct platform_device *pdev,
+ struct exynos_nocp *nocp)
+{
+ struct device *dev = nocp->dev;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ void __iomem *base;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
+ }
+
+ nocp->clk = devm_clk_get(dev, "nocp");
+ if (IS_ERR(nocp->clk))
+ nocp->clk = NULL;
+
+ /* Maps the memory mapped IO to control nocp register */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ exynos_nocp_regmap_config.max_register = resource_size(res) - 4;
+
+ nocp->regmap = devm_regmap_init_mmio(dev, base,
+ &exynos_nocp_regmap_config);
+ if (IS_ERR(nocp->regmap)) {
+ dev_err(dev, "failed to initialize regmap\n");
+ return PTR_ERR(nocp->regmap);
+ }
+
+ return 0;
+}
+
+static int exynos_nocp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct exynos_nocp *nocp;
+ int ret;
+
+ nocp = devm_kzalloc(&pdev->dev, sizeof(*nocp), GFP_KERNEL);
+ if (!nocp)
+ return -ENOMEM;
+
+ nocp->dev = &pdev->dev;
+
+ /* Parse dt data to get resource */
+ ret = exynos_nocp_parse_dt(pdev, nocp);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to parse devicetree for resource\n");
+ return ret;
+ }
+
+ /* Add devfreq-event device to measure the bandwidth of NoC */
+ nocp->desc.ops = &exynos_nocp_ops;
+ nocp->desc.driver_data = nocp;
+ nocp->desc.name = np->full_name;
+ nocp->edev = devm_devfreq_event_add_edev(&pdev->dev, &nocp->desc);
+ if (IS_ERR(nocp->edev)) {
+ dev_err(&pdev->dev,
+ "failed to add devfreq-event device\n");
+ return PTR_ERR(nocp->edev);
+ }
+ platform_set_drvdata(pdev, nocp);
+
+ clk_prepare_enable(nocp->clk);
+
+ pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
+ dev_name(dev));
+
+ return 0;
+}
+
+static int exynos_nocp_remove(struct platform_device *pdev)
+{
+ struct exynos_nocp *nocp = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(nocp->clk);
+
+ return 0;
+}
+
+static struct platform_driver exynos_nocp_driver = {
+ .probe = exynos_nocp_probe,
+ .remove = exynos_nocp_remove,
+ .driver = {
+ .name = "exynos-nocp",
+ .of_match_table = exynos_nocp_id_match,
+ },
+};
+module_platform_driver(exynos_nocp_driver);
+
+MODULE_DESCRIPTION("Exynos NoC (Network on Chip) Probe driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/event/exynos-nocp.h b/drivers/devfreq/event/exynos-nocp.h
new file mode 100644
index 000000000..28564db0e
--- /dev/null
+++ b/drivers/devfreq/event/exynos-nocp.h
@@ -0,0 +1,78 @@
+/*
+ * exynos-nocp.h - EXYNOS NoC (Network on Chip) Probe header file
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_NOCP_H__
+#define __EXYNOS_NOCP_H__
+
+enum nocp_reg {
+ NOCP_ID_REVISION_ID = 0x04,
+ NOCP_MAIN_CTL = 0x08,
+ NOCP_CFG_CTL = 0x0C,
+
+ NOCP_STAT_PERIOD = 0x24,
+ NOCP_STAT_GO = 0x28,
+ NOCP_STAT_ALARM_MIN = 0x2C,
+ NOCP_STAT_ALARM_MAX = 0x30,
+ NOCP_STAT_ALARM_STATUS = 0x34,
+ NOCP_STAT_ALARM_CLR = 0x38,
+
+ NOCP_COUNTERS_0_SRC = 0x138,
+ NOCP_COUNTERS_0_ALARM_MODE = 0x13C,
+ NOCP_COUNTERS_0_VAL = 0x140,
+
+ NOCP_COUNTERS_1_SRC = 0x14C,
+ NOCP_COUNTERS_1_ALARM_MODE = 0x150,
+ NOCP_COUNTERS_1_VAL = 0x154,
+
+ NOCP_COUNTERS_2_SRC = 0x160,
+ NOCP_COUNTERS_2_ALARM_MODE = 0x164,
+ NOCP_COUNTERS_2_VAL = 0x168,
+
+ NOCP_COUNTERS_3_SRC = 0x174,
+ NOCP_COUNTERS_3_ALARM_MODE = 0x178,
+ NOCP_COUNTERS_3_VAL = 0x17C,
+};
+
+/* NOCP_MAIN_CTL register */
+#define NOCP_MAIN_CTL_ERREN_MASK BIT(0)
+#define NOCP_MAIN_CTL_TRACEEN_MASK BIT(1)
+#define NOCP_MAIN_CTL_PAYLOADEN_MASK BIT(2)
+#define NOCP_MAIN_CTL_STATEN_MASK BIT(3)
+#define NOCP_MAIN_CTL_ALARMEN_MASK BIT(4)
+#define NOCP_MAIN_CTL_STATCONDDUMP_MASK BIT(5)
+#define NOCP_MAIN_CTL_INTRUSIVEMODE_MASK BIT(6)
+
+/* NOCP_CFG_CTL register */
+#define NOCP_CFG_CTL_GLOBALEN_MASK BIT(0)
+#define NOCP_CFG_CTL_ACTIVE_MASK BIT(1)
+
+/* NOCP_COUNTERS_x_SRC register */
+#define NOCP_CNT_SRC_INTEVENT_SHIFT 0
+#define NOCP_CNT_SRC_INTEVENT_MASK (0x1F << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_OFF_MASK (0x0 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CYCLE_MASK (0x1 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_IDLE_MASK (0x2 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_XFER_MASK (0x3 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BUSY_MASK (0x4 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_WAIT_MASK (0x5 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_PKT_MASK (0x6 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_BYTE_MASK (0x8 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+#define NOCP_CNT_SRC_INTEVENT_CHAIN_MASK (0x10 << NOCP_CNT_SRC_INTEVENT_SHIFT)
+
+/* NOCP_COUNTERS_x_ALARM_MODE register */
+#define NOCP_CNT_ALARM_MODE_SHIFT 0
+#define NOCP_CNT_ALARM_MODE_MASK (0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_OFF_MASK (0x0 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MASK (0x1 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MAX_MASK (0x2 << NOCP_CNT_ALARM_MODE_SHIFT)
+#define NOCP_CNT_ALARM_MODE_MIN_MAX_MASK (0x3 << NOCP_CNT_ALARM_MODE_SHIFT)
+
+#endif /* __EXYNOS_NOCP_H__ */
diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
new file mode 100644
index 000000000..2363d0a18
--- /dev/null
+++ b/drivers/devfreq/exynos-bus.c
@@ -0,0 +1,570 @@
+/*
+ * Generic Exynos Bus frequency driver with DEVFREQ Framework
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Author : Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This driver support Exynos Bus frequency feature by using
+ * DEVFREQ framework and is based on drivers/devfreq/exynos/exynos4_bus.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq-event.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#define DEFAULT_SATURATION_RATIO 40
+#define DEFAULT_VOLTAGE_TOLERANCE 2
+
+struct exynos_bus {
+ struct device *dev;
+
+ struct devfreq *devfreq;
+ struct devfreq_event_dev **edev;
+ unsigned int edev_count;
+ struct mutex lock;
+
+ struct dev_pm_opp *curr_opp;
+
+ struct regulator *regulator;
+ struct clk *clk;
+ unsigned int voltage_tolerance;
+ unsigned int ratio;
+};
+
+/*
+ * Control the devfreq-event device to get the current state of bus
+ */
+#define exynos_bus_ops_edev(ops) \
+static int exynos_bus_##ops(struct exynos_bus *bus) \
+{ \
+ int i, ret; \
+ \
+ for (i = 0; i < bus->edev_count; i++) { \
+ if (!bus->edev[i]) \
+ continue; \
+ ret = devfreq_event_##ops(bus->edev[i]); \
+ if (ret < 0) \
+ return ret; \
+ } \
+ \
+ return 0; \
+}
+exynos_bus_ops_edev(enable_edev);
+exynos_bus_ops_edev(disable_edev);
+exynos_bus_ops_edev(set_event);
+
+static int exynos_bus_get_event(struct exynos_bus *bus,
+ struct devfreq_event_data *edata)
+{
+ struct devfreq_event_data event_data;
+ unsigned long load_count = 0, total_count = 0;
+ int i, ret = 0;
+
+ for (i = 0; i < bus->edev_count; i++) {
+ if (!bus->edev[i])
+ continue;
+
+ ret = devfreq_event_get_event(bus->edev[i], &event_data);
+ if (ret < 0)
+ return ret;
+
+ if (i == 0 || event_data.load_count > load_count) {
+ load_count = event_data.load_count;
+ total_count = event_data.total_count;
+ }
+ }
+
+ edata->load_count = load_count;
+ edata->total_count = total_count;
+
+ return ret;
+}
+
+/*
+ * Must necessary function for devfreq simple-ondemand governor
+ */
+static int exynos_bus_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ struct dev_pm_opp *new_opp;
+ unsigned long old_freq, new_freq, old_volt, new_volt, tol;
+ int ret = 0;
+
+ /* Get new opp-bus instance according to new bus clock */
+ rcu_read_lock();
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ dev_err(dev, "failed to get recommended opp instance\n");
+ rcu_read_unlock();
+ return PTR_ERR(new_opp);
+ }
+
+ new_freq = dev_pm_opp_get_freq(new_opp);
+ new_volt = dev_pm_opp_get_voltage(new_opp);
+ old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+ old_volt = dev_pm_opp_get_voltage(bus->curr_opp);
+ rcu_read_unlock();
+
+ if (old_freq == new_freq)
+ return 0;
+ tol = new_volt * bus->voltage_tolerance / 100;
+
+ /* Change voltage and frequency according to new OPP level */
+ mutex_lock(&bus->lock);
+
+ if (old_freq < new_freq) {
+ ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+ if (ret < 0) {
+ dev_err(bus->dev, "failed to set voltage\n");
+ goto out;
+ }
+ }
+
+ ret = clk_set_rate(bus->clk, new_freq);
+ if (ret < 0) {
+ dev_err(dev, "failed to change clock of bus\n");
+ clk_set_rate(bus->clk, old_freq);
+ goto out;
+ }
+
+ if (old_freq > new_freq) {
+ ret = regulator_set_voltage_tol(bus->regulator, new_volt, tol);
+ if (ret < 0) {
+ dev_err(bus->dev, "failed to set voltage\n");
+ goto out;
+ }
+ }
+ bus->curr_opp = new_opp;
+
+ dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
+ old_freq/1000, new_freq/1000);
+out:
+ mutex_unlock(&bus->lock);
+
+ return ret;
+}
+
+static int exynos_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ struct devfreq_event_data edata;
+ int ret;
+
+ rcu_read_lock();
+ stat->current_frequency = dev_pm_opp_get_freq(bus->curr_opp);
+ rcu_read_unlock();
+
+ ret = exynos_bus_get_event(bus, &edata);
+ if (ret < 0) {
+ stat->total_time = stat->busy_time = 0;
+ goto err;
+ }
+
+ stat->busy_time = (edata.load_count * 100) / bus->ratio;
+ stat->total_time = edata.total_count;
+
+ dev_dbg(dev, "Usage of devfreq-event : %lu/%lu\n", stat->busy_time,
+ stat->total_time);
+
+err:
+ ret = exynos_bus_set_event(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to set event to devfreq-event devices\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void exynos_bus_exit(struct device *dev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = exynos_bus_disable_edev(bus);
+ if (ret < 0)
+ dev_warn(dev, "failed to disable the devfreq-event devices\n");
+
+ if (bus->regulator)
+ regulator_disable(bus->regulator);
+
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
+}
+
+/*
+ * Must necessary function for devfreq passive governor
+ */
+static int exynos_bus_passive_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ struct dev_pm_opp *new_opp;
+ unsigned long old_freq, new_freq;
+ int ret = 0;
+
+ /* Get new opp-bus instance according to new bus clock */
+ rcu_read_lock();
+ new_opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(new_opp)) {
+ dev_err(dev, "failed to get recommended opp instance\n");
+ rcu_read_unlock();
+ return PTR_ERR(new_opp);
+ }
+
+ new_freq = dev_pm_opp_get_freq(new_opp);
+ old_freq = dev_pm_opp_get_freq(bus->curr_opp);
+ rcu_read_unlock();
+
+ if (old_freq == new_freq)
+ return 0;
+
+ /* Change the frequency according to new OPP level */
+ mutex_lock(&bus->lock);
+
+ ret = clk_set_rate(bus->clk, new_freq);
+ if (ret < 0) {
+ dev_err(dev, "failed to set the clock of bus\n");
+ goto out;
+ }
+
+ *freq = new_freq;
+ bus->curr_opp = new_opp;
+
+ dev_dbg(dev, "Set the frequency of bus (%lukHz -> %lukHz)\n",
+ old_freq/1000, new_freq/1000);
+out:
+ mutex_unlock(&bus->lock);
+
+ return ret;
+}
+
+static void exynos_bus_passive_exit(struct device *dev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
+}
+
+static int exynos_bus_parent_parse_of(struct device_node *np,
+ struct exynos_bus *bus)
+{
+ struct device *dev = bus->dev;
+ int i, ret, count, size;
+
+ /* Get the regulator to provide each bus with the power */
+ bus->regulator = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(bus->regulator)) {
+ dev_err(dev, "failed to get VDD regulator\n");
+ return PTR_ERR(bus->regulator);
+ }
+
+ ret = regulator_enable(bus->regulator);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable VDD regulator\n");
+ return ret;
+ }
+
+ /*
+ * Get the devfreq-event devices to get the current utilization of
+ * buses. This raw data will be used in devfreq ondemand governor.
+ */
+ count = devfreq_event_get_edev_count(dev);
+ if (count < 0) {
+ dev_err(dev, "failed to get the count of devfreq-event dev\n");
+ ret = count;
+ goto err_regulator;
+ }
+ bus->edev_count = count;
+
+ size = sizeof(*bus->edev) * count;
+ bus->edev = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!bus->edev) {
+ ret = -ENOMEM;
+ goto err_regulator;
+ }
+
+ for (i = 0; i < count; i++) {
+ bus->edev[i] = devfreq_event_get_edev_by_phandle(dev, i);
+ if (IS_ERR(bus->edev[i])) {
+ ret = -EPROBE_DEFER;
+ goto err_regulator;
+ }
+ }
+
+ /*
+ * Optionally, Get the saturation ratio according to Exynos SoC
+ * When measuring the utilization of each AXI bus with devfreq-event
+ * devices, the measured real cycle might be much lower than the
+ * total cycle of bus during sampling rate. In result, the devfreq
+ * simple-ondemand governor might not decide to change the current
+ * frequency due to too utilization (= real cycle/total cycle).
+ * So, this property is used to adjust the utilization when calculating
+ * the busy_time in exynos_bus_get_dev_status().
+ */
+ if (of_property_read_u32(np, "exynos,saturation-ratio", &bus->ratio))
+ bus->ratio = DEFAULT_SATURATION_RATIO;
+
+ if (of_property_read_u32(np, "exynos,voltage-tolerance",
+ &bus->voltage_tolerance))
+ bus->voltage_tolerance = DEFAULT_VOLTAGE_TOLERANCE;
+
+ return 0;
+
+err_regulator:
+ regulator_disable(bus->regulator);
+
+ return ret;
+}
+
+static int exynos_bus_parse_of(struct device_node *np,
+ struct exynos_bus *bus)
+{
+ struct device *dev = bus->dev;
+ unsigned long rate;
+ int ret;
+
+ /* Get the clock to provide each bus with source clock */
+ bus->clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(bus->clk)) {
+ dev_err(dev, "failed to get bus clock\n");
+ return PTR_ERR(bus->clk);
+ }
+
+ ret = clk_prepare_enable(bus->clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to get enable clock\n");
+ return ret;
+ }
+
+ /* Get the freq and voltage from OPP table to scale the bus freq */
+ rcu_read_lock();
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get OPP table\n");
+ rcu_read_unlock();
+ goto err_clk;
+ }
+
+ rate = clk_get_rate(bus->clk);
+ bus->curr_opp = devfreq_recommended_opp(dev, &rate, 0);
+ if (IS_ERR(bus->curr_opp)) {
+ dev_err(dev, "failed to find dev_pm_opp\n");
+ rcu_read_unlock();
+ ret = PTR_ERR(bus->curr_opp);
+ goto err_opp;
+ }
+ rcu_read_unlock();
+
+ return 0;
+
+err_opp:
+ dev_pm_opp_of_remove_table(dev);
+err_clk:
+ clk_disable_unprepare(bus->clk);
+
+ return ret;
+}
+
+static int exynos_bus_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct devfreq_dev_profile *profile;
+ struct devfreq_simple_ondemand_data *ondemand_data;
+ struct devfreq_passive_data *passive_data;
+ struct devfreq *parent_devfreq;
+ struct exynos_bus *bus;
+ int ret, max_state;
+ unsigned long min_freq, max_freq;
+
+ if (!np) {
+ dev_err(dev, "failed to find devicetree node\n");
+ return -EINVAL;
+ }
+
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+ mutex_init(&bus->lock);
+ bus->dev = &pdev->dev;
+ platform_set_drvdata(pdev, bus);
+
+ /* Parse the device-tree to get the resource information */
+ ret = exynos_bus_parse_of(np, bus);
+ if (ret < 0)
+ goto err;
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (of_parse_phandle(dev->of_node, "devfreq", 0))
+ goto passive;
+ else
+ ret = exynos_bus_parent_parse_of(np, bus);
+
+ if (ret < 0)
+ goto err;
+
+ /* Initialize the struct profile and governor data for parent device */
+ profile->polling_ms = 50;
+ profile->target = exynos_bus_target;
+ profile->get_dev_status = exynos_bus_get_dev_status;
+ profile->exit = exynos_bus_exit;
+
+ ondemand_data = devm_kzalloc(dev, sizeof(*ondemand_data), GFP_KERNEL);
+ if (!ondemand_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ ondemand_data->upthreshold = 40;
+ ondemand_data->downdifferential = 5;
+
+ /* Add devfreq device to monitor and handle the exynos bus */
+ bus->devfreq = devm_devfreq_add_device(dev, profile, "simple_ondemand",
+ ondemand_data);
+ if (IS_ERR(bus->devfreq)) {
+ dev_err(dev, "failed to add devfreq device\n");
+ ret = PTR_ERR(bus->devfreq);
+ goto err;
+ }
+
+ /* Register opp_notifier to catch the change of OPP */
+ ret = devm_devfreq_register_opp_notifier(dev, bus->devfreq);
+ if (ret < 0) {
+ dev_err(dev, "failed to register opp notifier\n");
+ goto err;
+ }
+
+ /*
+ * Enable devfreq-event to get raw data which is used to determine
+ * current bus load.
+ */
+ ret = exynos_bus_enable_edev(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable devfreq-event devices\n");
+ goto err;
+ }
+
+ ret = exynos_bus_set_event(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to set event to devfreq-event devices\n");
+ goto err;
+ }
+
+ goto out;
+passive:
+ /* Initialize the struct profile and governor data for passive device */
+ profile->target = exynos_bus_passive_target;
+ profile->exit = exynos_bus_passive_exit;
+
+ /* Get the instance of parent devfreq device */
+ parent_devfreq = devfreq_get_devfreq_by_phandle(dev, 0);
+ if (IS_ERR(parent_devfreq)) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+
+ passive_data = devm_kzalloc(dev, sizeof(*passive_data), GFP_KERNEL);
+ if (!passive_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ passive_data->parent = parent_devfreq;
+
+ /* Add devfreq device for exynos bus with passive governor */
+ bus->devfreq = devm_devfreq_add_device(dev, profile, "passive",
+ passive_data);
+ if (IS_ERR(bus->devfreq)) {
+ dev_err(dev,
+ "failed to add devfreq dev with passive governor\n");
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+
+out:
+ max_state = bus->devfreq->profile->max_state;
+ min_freq = (bus->devfreq->profile->freq_table[0] / 1000);
+ max_freq = (bus->devfreq->profile->freq_table[max_state - 1] / 1000);
+ pr_info("exynos-bus: new bus device registered: %s (%6ld KHz ~ %6ld KHz)\n",
+ dev_name(dev), min_freq, max_freq);
+
+ return 0;
+
+err:
+ dev_pm_opp_of_remove_table(dev);
+ clk_disable_unprepare(bus->clk);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int exynos_bus_resume(struct device *dev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = exynos_bus_enable_edev(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable the devfreq-event devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int exynos_bus_suspend(struct device *dev)
+{
+ struct exynos_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = exynos_bus_disable_edev(bus);
+ if (ret < 0) {
+ dev_err(dev, "failed to disable the devfreq-event devices\n");
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops exynos_bus_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_bus_suspend, exynos_bus_resume)
+};
+
+static const struct of_device_id exynos_bus_of_match[] = {
+ { .compatible = "samsung,exynos-bus", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, exynos_bus_of_match);
+
+static struct platform_driver exynos_bus_platdrv = {
+ .probe = exynos_bus_probe,
+ .driver = {
+ .name = "exynos-bus",
+ .pm = &exynos_bus_pm,
+ .of_match_table = of_match_ptr(exynos_bus_of_match),
+ },
+};
+module_platform_driver(exynos_bus_platdrv);
+
+MODULE_DESCRIPTION("Generic Exynos Bus frequency driver");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/exynos/Makefile b/drivers/devfreq/exynos/Makefile
deleted file mode 100644
index 49bc9175f..000000000
--- a/drivers/devfreq/exynos/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# Exynos DEVFREQ Drivers
-obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos_ppmu.o exynos4_bus.o
-obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos_ppmu.o exynos5_bus.o
diff --git a/drivers/devfreq/exynos/exynos4_bus.c b/drivers/devfreq/exynos/exynos4_bus.c
deleted file mode 100644
index da9509205..000000000
--- a/drivers/devfreq/exynos/exynos4_bus.c
+++ /dev/null
@@ -1,1055 +0,0 @@
-/* drivers/devfreq/exynos4210_memorybus.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
- * This version supports EXYNOS4210 only. This changes bus frequencies
- * and vddint voltages. Exynos4412/4212 should be able to be supported
- * with minor modifications.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/suspend.h>
-#include <linux/pm_opp.h>
-#include <linux/devfreq.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/module.h>
-
-#include <mach/map.h>
-
-#include "exynos_ppmu.h"
-#include "exynos4_bus.h"
-
-#define MAX_SAFEVOLT 1200000 /* 1.2V */
-
-enum exynos4_busf_type {
- TYPE_BUSF_EXYNOS4210,
- TYPE_BUSF_EXYNOS4x12,
-};
-
-/* Assume that the bus is saturated if the utilization is 40% */
-#define BUS_SATURATION_RATIO 40
-
-enum busclk_level_idx {
- LV_0 = 0,
- LV_1,
- LV_2,
- LV_3,
- LV_4,
- _LV_END
-};
-
-enum exynos_ppmu_idx {
- PPMU_DMC0,
- PPMU_DMC1,
- PPMU_END,
-};
-
-#define EX4210_LV_MAX LV_2
-#define EX4x12_LV_MAX LV_4
-#define EX4210_LV_NUM (LV_2 + 1)
-#define EX4x12_LV_NUM (LV_4 + 1)
-
-/**
- * struct busfreq_opp_info - opp information for bus
- * @rate: Frequency in hertz
- * @volt: Voltage in microvolts corresponding to this OPP
- */
-struct busfreq_opp_info {
- unsigned long rate;
- unsigned long volt;
-};
-
-struct busfreq_data {
- enum exynos4_busf_type type;
- struct device *dev;
- struct devfreq *devfreq;
- bool disabled;
- struct regulator *vdd_int;
- struct regulator *vdd_mif; /* Exynos4412/4212 only */
- struct busfreq_opp_info curr_oppinfo;
- struct busfreq_ppmu_data ppmu_data;
-
- struct notifier_block pm_notifier;
- struct mutex lock;
-
- /* Dividers calculated at boot/probe-time */
- unsigned int dmc_divtable[_LV_END]; /* DMC0 */
- unsigned int top_divtable[_LV_END];
-};
-
-/* 4210 controls clock of mif and voltage of int */
-static struct bus_opp_table exynos4210_busclk_table[] = {
- {LV_0, 400000, 1150000},
- {LV_1, 267000, 1050000},
- {LV_2, 133000, 1025000},
- {0, 0, 0},
-};
-
-/*
- * MIF is the main control knob clock for Exynos4x12 MIF/INT
- * clock and voltage of both mif/int are controlled.
- */
-static struct bus_opp_table exynos4x12_mifclk_table[] = {
- {LV_0, 400000, 1100000},
- {LV_1, 267000, 1000000},
- {LV_2, 160000, 950000},
- {LV_3, 133000, 950000},
- {LV_4, 100000, 950000},
- {0, 0, 0},
-};
-
-/*
- * INT is not the control knob of 4x12. LV_x is not meant to represent
- * the current performance. (MIF does)
- */
-static struct bus_opp_table exynos4x12_intclk_table[] = {
- {LV_0, 200000, 1000000},
- {LV_1, 160000, 950000},
- {LV_2, 133000, 925000},
- {LV_3, 100000, 900000},
- {0, 0, 0},
-};
-
-/* TODO: asv volt definitions are "__initdata"? */
-/* Some chips have different operating voltages */
-static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
- {1150000, 1050000, 1050000},
- {1125000, 1025000, 1025000},
- {1100000, 1000000, 1000000},
- {1075000, 975000, 975000},
- {1050000, 950000, 950000},
-};
-
-static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
- /* 400 267 160 133 100 */
- {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
- {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
- {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
- {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
- {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
- {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
- {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
- {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
- {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
-};
-
-static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
- /* 200 160 133 100 */
- {1000000, 950000, 925000, 900000}, /* ASV0 */
- {975000, 925000, 925000, 900000}, /* ASV1 */
- {950000, 925000, 900000, 875000}, /* ASV2 */
- {950000, 900000, 900000, 875000}, /* ASV3 */
- {925000, 875000, 875000, 875000}, /* ASV4 */
- {900000, 850000, 850000, 850000}, /* ASV5 */
- {900000, 850000, 850000, 850000}, /* ASV6 */
- {900000, 850000, 850000, 850000}, /* ASV7 */
- {900000, 850000, 850000, 850000}, /* ASV8 */
-};
-
-/*** Clock Divider Data for Exynos4210 ***/
-static unsigned int exynos4210_clkdiv_dmc0[][8] = {
- /*
- * Clock divider value for following
- * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
- * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
- */
-
- /* DMC L0: 400MHz */
- { 3, 1, 1, 1, 1, 1, 3, 1 },
- /* DMC L1: 266.7MHz */
- { 4, 1, 1, 2, 1, 1, 3, 1 },
- /* DMC L2: 133MHz */
- { 5, 1, 1, 5, 1, 1, 3, 1 },
-};
-static unsigned int exynos4210_clkdiv_top[][5] = {
- /*
- * Clock divider value for following
- * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
- */
- /* ACLK200 L0: 200MHz */
- { 3, 7, 4, 5, 1 },
- /* ACLK200 L1: 160MHz */
- { 4, 7, 5, 6, 1 },
- /* ACLK200 L2: 133MHz */
- { 5, 7, 7, 7, 1 },
-};
-static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
- /*
- * Clock divider value for following
- * { DIVGDL/R, DIVGPL/R }
- */
- /* ACLK_GDL/R L1: 200MHz */
- { 3, 1 },
- /* ACLK_GDL/R L2: 160MHz */
- { 4, 1 },
- /* ACLK_GDL/R L3: 133MHz */
- { 5, 1 },
-};
-
-/*** Clock Divider Data for Exynos4212/4412 ***/
-static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
- /*
- * Clock divider value for following
- * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
- * DIVDMCP}
- */
-
- /* DMC L0: 400MHz */
- {3, 1, 1, 1, 1, 1},
- /* DMC L1: 266.7MHz */
- {4, 1, 1, 2, 1, 1},
- /* DMC L2: 160MHz */
- {5, 1, 1, 4, 1, 1},
- /* DMC L3: 133MHz */
- {5, 1, 1, 5, 1, 1},
- /* DMC L4: 100MHz */
- {7, 1, 1, 7, 1, 1},
-};
-static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
- /*
- * Clock divider value for following
- * { G2DACP, DIVC2C, DIVC2C_ACLK }
- */
-
- /* DMC L0: 400MHz */
- {3, 1, 1},
- /* DMC L1: 266.7MHz */
- {4, 2, 1},
- /* DMC L2: 160MHz */
- {5, 4, 1},
- /* DMC L3: 133MHz */
- {5, 5, 1},
- /* DMC L4: 100MHz */
- {7, 7, 1},
-};
-static unsigned int exynos4x12_clkdiv_top[][5] = {
- /*
- * Clock divider value for following
- * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
- DIVACLK133, DIVONENAND }
- */
-
- /* ACLK_GDL/R L0: 200MHz */
- {2, 7, 4, 5, 1},
- /* ACLK_GDL/R L1: 200MHz */
- {2, 7, 4, 5, 1},
- /* ACLK_GDL/R L2: 160MHz */
- {4, 7, 5, 7, 1},
- /* ACLK_GDL/R L3: 133MHz */
- {4, 7, 5, 7, 1},
- /* ACLK_GDL/R L4: 100MHz */
- {7, 7, 7, 7, 1},
-};
-static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
- /*
- * Clock divider value for following
- * { DIVGDL/R, DIVGPL/R }
- */
-
- /* ACLK_GDL/R L0: 200MHz */
- {3, 1},
- /* ACLK_GDL/R L1: 200MHz */
- {3, 1},
- /* ACLK_GDL/R L2: 160MHz */
- {4, 1},
- /* ACLK_GDL/R L3: 133MHz */
- {5, 1},
- /* ACLK_GDL/R L4: 100MHz */
- {7, 1},
-};
-static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
- /*
- * Clock divider value for following
- * { DIVMFC, DIVJPEG, DIVFIMC0~3}
- */
-
- /* SCLK_MFC: 200MHz */
- {3, 3, 4},
- /* SCLK_MFC: 200MHz */
- {3, 3, 4},
- /* SCLK_MFC: 160MHz */
- {4, 4, 5},
- /* SCLK_MFC: 133MHz */
- {5, 5, 5},
- /* SCLK_MFC: 100MHz */
- {7, 7, 7},
-};
-
-
-static int exynos4210_set_busclk(struct busfreq_data *data,
- struct busfreq_opp_info *oppi)
-{
- unsigned int index;
- unsigned int tmp;
-
- for (index = LV_0; index < EX4210_LV_NUM; index++)
- if (oppi->rate == exynos4210_busclk_table[index].clk)
- break;
-
- if (index == EX4210_LV_NUM)
- return -EINVAL;
-
- /* Change Divider - DMC0 */
- tmp = data->dmc_divtable[index];
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
- } while (tmp & 0x11111111);
-
- /* Change Divider - TOP */
- tmp = data->top_divtable[index];
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
- } while (tmp & 0x11111);
-
- /* Change Divider - LEFTBUS */
- tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
-
- tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
- EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
- (exynos4210_clkdiv_lr_bus[index][1] <<
- EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - RIGHTBUS */
- tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
-
- tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
- EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
- (exynos4210_clkdiv_lr_bus[index][1] <<
- EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
- } while (tmp & 0x11);
-
- return 0;
-}
-
-static int exynos4x12_set_busclk(struct busfreq_data *data,
- struct busfreq_opp_info *oppi)
-{
- unsigned int index;
- unsigned int tmp;
-
- for (index = LV_0; index < EX4x12_LV_NUM; index++)
- if (oppi->rate == exynos4x12_mifclk_table[index].clk)
- break;
-
- if (index == EX4x12_LV_NUM)
- return -EINVAL;
-
- /* Change Divider - DMC0 */
- tmp = data->dmc_divtable[index];
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_DMC0);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC0);
- } while (tmp & 0x11111111);
-
- /* Change Divider - DMC1 */
- tmp = __raw_readl(EXYNOS4_CLKDIV_DMC1);
-
- tmp &= ~(EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK |
- EXYNOS4_CLKDIV_DMC1_C2C_MASK |
- EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK);
-
- tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
- EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT) |
- (exynos4x12_clkdiv_dmc1[index][1] <<
- EXYNOS4_CLKDIV_DMC1_C2C_SHIFT) |
- (exynos4x12_clkdiv_dmc1[index][2] <<
- EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_DMC1);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_DMC1);
- } while (tmp & 0x111111);
-
- /* Change Divider - TOP */
- tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
-
- tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
- EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
-
- tmp |= ((exynos4x12_clkdiv_top[index][0] <<
- EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
- (exynos4x12_clkdiv_top[index][1] <<
- EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
- (exynos4x12_clkdiv_top[index][2] <<
- EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
- (exynos4x12_clkdiv_top[index][3] <<
- EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
- (exynos4x12_clkdiv_top[index][4] <<
- EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_TOP);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_TOP);
- } while (tmp & 0x11111);
-
- /* Change Divider - LEFTBUS */
- tmp = __raw_readl(EXYNOS4_CLKDIV_LEFTBUS);
-
- tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
- EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
- (exynos4x12_clkdiv_lr_bus[index][1] <<
- EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_LEFTBUS);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_LEFTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - RIGHTBUS */
- tmp = __raw_readl(EXYNOS4_CLKDIV_RIGHTBUS);
-
- tmp &= ~(EXYNOS4_CLKDIV_BUS_GDLR_MASK | EXYNOS4_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
- EXYNOS4_CLKDIV_BUS_GDLR_SHIFT) |
- (exynos4x12_clkdiv_lr_bus[index][1] <<
- EXYNOS4_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_RIGHTBUS);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_RIGHTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - MFC */
- tmp = __raw_readl(EXYNOS4_CLKDIV_MFC);
-
- tmp &= ~(EXYNOS4_CLKDIV_MFC_MASK);
-
- tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
- EXYNOS4_CLKDIV_MFC_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_MFC);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_MFC);
- } while (tmp & 0x1);
-
- /* Change Divider - JPEG */
- tmp = __raw_readl(EXYNOS4_CLKDIV_CAM1);
-
- tmp &= ~(EXYNOS4_CLKDIV_CAM1_JPEG_MASK);
-
- tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
- EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_CAM1);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
- } while (tmp & 0x1);
-
- /* Change Divider - FIMC0~3 */
- tmp = __raw_readl(EXYNOS4_CLKDIV_CAM);
-
- tmp &= ~(EXYNOS4_CLKDIV_CAM_FIMC0_MASK | EXYNOS4_CLKDIV_CAM_FIMC1_MASK |
- EXYNOS4_CLKDIV_CAM_FIMC2_MASK | EXYNOS4_CLKDIV_CAM_FIMC3_MASK);
-
- tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
- EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT) |
- (exynos4x12_clkdiv_sclkip[index][2] <<
- EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT) |
- (exynos4x12_clkdiv_sclkip[index][2] <<
- EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT) |
- (exynos4x12_clkdiv_sclkip[index][2] <<
- EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT));
-
- __raw_writel(tmp, EXYNOS4_CLKDIV_CAM);
-
- do {
- tmp = __raw_readl(EXYNOS4_CLKDIV_STAT_CAM1);
- } while (tmp & 0x1111);
-
- return 0;
-}
-
-static int exynos4x12_get_intspec(unsigned long mifclk)
-{
- int i = 0;
-
- while (exynos4x12_intclk_table[i].clk) {
- if (exynos4x12_intclk_table[i].clk <= mifclk)
- return i;
- i++;
- }
-
- return -EINVAL;
-}
-
-static int exynos4_bus_setvolt(struct busfreq_data *data,
- struct busfreq_opp_info *oppi,
- struct busfreq_opp_info *oldoppi)
-{
- int err = 0, tmp;
- unsigned long volt = oppi->volt;
-
- switch (data->type) {
- case TYPE_BUSF_EXYNOS4210:
- /* OPP represents DMC clock + INT voltage */
- err = regulator_set_voltage(data->vdd_int, volt,
- MAX_SAFEVOLT);
- break;
- case TYPE_BUSF_EXYNOS4x12:
- /* OPP represents MIF clock + MIF voltage */
- err = regulator_set_voltage(data->vdd_mif, volt,
- MAX_SAFEVOLT);
- if (err)
- break;
-
- tmp = exynos4x12_get_intspec(oppi->rate);
- if (tmp < 0) {
- err = tmp;
- regulator_set_voltage(data->vdd_mif,
- oldoppi->volt,
- MAX_SAFEVOLT);
- break;
- }
- err = regulator_set_voltage(data->vdd_int,
- exynos4x12_intclk_table[tmp].volt,
- MAX_SAFEVOLT);
- /* Try to recover */
- if (err)
- regulator_set_voltage(data->vdd_mif,
- oldoppi->volt,
- MAX_SAFEVOLT);
- break;
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
- u32 flags)
-{
- int err = 0;
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct busfreq_data *data = platform_get_drvdata(pdev);
- struct dev_pm_opp *opp;
- unsigned long freq;
- unsigned long old_freq = data->curr_oppinfo.rate;
- struct busfreq_opp_info new_oppinfo;
-
- rcu_read_lock();
- opp = devfreq_recommended_opp(dev, _freq, flags);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- return PTR_ERR(opp);
- }
- new_oppinfo.rate = dev_pm_opp_get_freq(opp);
- new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
- freq = new_oppinfo.rate;
-
- if (old_freq == freq)
- return 0;
-
- dev_dbg(dev, "targeting %lukHz %luuV\n", freq, new_oppinfo.volt);
-
- mutex_lock(&data->lock);
-
- if (data->disabled)
- goto out;
-
- if (old_freq < freq)
- err = exynos4_bus_setvolt(data, &new_oppinfo,
- &data->curr_oppinfo);
- if (err)
- goto out;
-
- if (old_freq != freq) {
- switch (data->type) {
- case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, &new_oppinfo);
- break;
- case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, &new_oppinfo);
- break;
- default:
- err = -EINVAL;
- }
- }
- if (err)
- goto out;
-
- if (old_freq > freq)
- err = exynos4_bus_setvolt(data, &new_oppinfo,
- &data->curr_oppinfo);
- if (err)
- goto out;
-
- data->curr_oppinfo = new_oppinfo;
-out:
- mutex_unlock(&data->lock);
- return err;
-}
-
-static int exynos4_bus_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
-{
- struct busfreq_data *data = dev_get_drvdata(dev);
- struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
- int busier;
-
- exynos_read_ppmu(ppmu_data);
- busier = exynos_get_busier_ppmu(ppmu_data);
- stat->current_frequency = data->curr_oppinfo.rate;
-
- /* Number of cycles spent on memory access */
- stat->busy_time = ppmu_data->ppmu[busier].count[PPMU_PMNCNT3];
- stat->busy_time *= 100 / BUS_SATURATION_RATIO;
- stat->total_time = ppmu_data->ppmu[busier].ccnt;
-
- /* If the counters have overflown, retry */
- if (ppmu_data->ppmu[busier].ccnt_overflow ||
- ppmu_data->ppmu[busier].count_overflow[0])
- return -EAGAIN;
-
- return 0;
-}
-
-static struct devfreq_dev_profile exynos4_devfreq_profile = {
- .initial_freq = 400000,
- .polling_ms = 50,
- .target = exynos4_bus_target,
- .get_dev_status = exynos4_bus_get_dev_status,
-};
-
-static int exynos4210_init_tables(struct busfreq_data *data)
-{
- u32 tmp;
- int mgrp;
- int i, err = 0;
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
- for (i = LV_0; i < EX4210_LV_NUM; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
- EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
- EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
- EXYNOS4_CLKDIV_DMC0_DMC_MASK |
- EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
- EXYNOS4_CLKDIV_DMC0_DMCP_MASK |
- EXYNOS4_CLKDIV_DMC0_COPY2_MASK |
- EXYNOS4_CLKDIV_DMC0_CORETI_MASK);
-
- tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
- EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][1] <<
- EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][2] <<
- EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][3] <<
- EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][4] <<
- EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][5] <<
- EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][6] <<
- EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT) |
- (exynos4210_clkdiv_dmc0[i][7] <<
- EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT));
-
- data->dmc_divtable[i] = tmp;
- }
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_TOP);
- for (i = LV_0; i < EX4210_LV_NUM; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_TOP_ACLK200_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK100_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK160_MASK |
- EXYNOS4_CLKDIV_TOP_ACLK133_MASK |
- EXYNOS4_CLKDIV_TOP_ONENAND_MASK);
-
- tmp |= ((exynos4210_clkdiv_top[i][0] <<
- EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT) |
- (exynos4210_clkdiv_top[i][1] <<
- EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT) |
- (exynos4210_clkdiv_top[i][2] <<
- EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT) |
- (exynos4210_clkdiv_top[i][3] <<
- EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT) |
- (exynos4210_clkdiv_top[i][4] <<
- EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT));
-
- data->top_divtable[i] = tmp;
- }
-
- /*
- * TODO: init tmp based on busfreq_data
- * (device-tree or platform-data)
- */
- tmp = 0; /* Max voltages for the reliability of the unknown */
-
- pr_debug("ASV Group of Exynos4 is %d\n", tmp);
- /* Use merged grouping for voltage */
- switch (tmp) {
- case 0:
- mgrp = 0;
- break;
- case 1:
- case 2:
- mgrp = 1;
- break;
- case 3:
- case 4:
- mgrp = 2;
- break;
- case 5:
- case 6:
- mgrp = 3;
- break;
- case 7:
- mgrp = 4;
- break;
- default:
- pr_warn("Unknown ASV Group. Use max voltage.\n");
- mgrp = 0;
- }
-
- for (i = LV_0; i < EX4210_LV_NUM; i++)
- exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
-
- for (i = LV_0; i < EX4210_LV_NUM; i++) {
- err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
- exynos4210_busclk_table[i].volt);
- if (err) {
- dev_err(data->dev, "Cannot add opp entries.\n");
- return err;
- }
- }
-
-
- return 0;
-}
-
-static int exynos4x12_init_tables(struct busfreq_data *data)
-{
- unsigned int i;
- unsigned int tmp;
- int ret;
-
- /* Enable pause function for DREX2 DVFS */
- tmp = __raw_readl(EXYNOS4_DMC_PAUSE_CTRL);
- tmp |= EXYNOS4_DMC_PAUSE_ENABLE;
- __raw_writel(tmp, EXYNOS4_DMC_PAUSE_CTRL);
-
- tmp = __raw_readl(EXYNOS4_CLKDIV_DMC0);
-
- for (i = 0; i < EX4x12_LV_NUM; i++) {
- tmp &= ~(EXYNOS4_CLKDIV_DMC0_ACP_MASK |
- EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK |
- EXYNOS4_CLKDIV_DMC0_DPHY_MASK |
- EXYNOS4_CLKDIV_DMC0_DMC_MASK |
- EXYNOS4_CLKDIV_DMC0_DMCD_MASK |
- EXYNOS4_CLKDIV_DMC0_DMCP_MASK);
-
- tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
- EXYNOS4_CLKDIV_DMC0_ACP_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][1] <<
- EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][2] <<
- EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][3] <<
- EXYNOS4_CLKDIV_DMC0_DMC_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][4] <<
- EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT) |
- (exynos4x12_clkdiv_dmc0[i][5] <<
- EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT));
-
- data->dmc_divtable[i] = tmp;
- }
-
- tmp = 0; /* Max voltages for the reliability of the unknown */
-
- if (tmp > 8)
- tmp = 0;
- pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
-
- for (i = 0; i < EX4x12_LV_NUM; i++) {
- exynos4x12_mifclk_table[i].volt =
- exynos4x12_mif_step_50[tmp][i];
- exynos4x12_intclk_table[i].volt =
- exynos4x12_int_volt[tmp][i];
- }
-
- for (i = 0; i < EX4x12_LV_NUM; i++) {
- ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
- exynos4x12_mifclk_table[i].volt);
- if (ret) {
- dev_err(data->dev, "Fail to add opp entries.\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct busfreq_data *data = container_of(this, struct busfreq_data,
- pm_notifier);
- struct dev_pm_opp *opp;
- struct busfreq_opp_info new_oppinfo;
- unsigned long maxfreq = ULONG_MAX;
- int err = 0;
-
- switch (event) {
- case PM_SUSPEND_PREPARE:
- /* Set Fastest and Deactivate DVFS */
- mutex_lock(&data->lock);
-
- data->disabled = true;
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(data->dev, "%s: unable to find a min freq\n",
- __func__);
- mutex_unlock(&data->lock);
- return PTR_ERR(opp);
- }
- new_oppinfo.rate = dev_pm_opp_get_freq(opp);
- new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- err = exynos4_bus_setvolt(data, &new_oppinfo,
- &data->curr_oppinfo);
- if (err)
- goto unlock;
-
- switch (data->type) {
- case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_set_busclk(data, &new_oppinfo);
- break;
- case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_set_busclk(data, &new_oppinfo);
- break;
- default:
- err = -EINVAL;
- }
- if (err)
- goto unlock;
-
- data->curr_oppinfo = new_oppinfo;
-unlock:
- mutex_unlock(&data->lock);
- if (err)
- return err;
- return NOTIFY_OK;
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- /* Reactivate */
- mutex_lock(&data->lock);
- data->disabled = false;
- mutex_unlock(&data->lock);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
-}
-
-static int exynos4_busfreq_probe(struct platform_device *pdev)
-{
- struct busfreq_data *data;
- struct busfreq_ppmu_data *ppmu_data;
- struct dev_pm_opp *opp;
- struct device *dev = &pdev->dev;
- int err = 0;
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data), GFP_KERNEL);
- if (data == NULL) {
- dev_err(dev, "Cannot allocate memory.\n");
- return -ENOMEM;
- }
-
- ppmu_data = &data->ppmu_data;
- ppmu_data->ppmu_end = PPMU_END;
- ppmu_data->ppmu = devm_kzalloc(dev,
- sizeof(struct exynos_ppmu) * PPMU_END,
- GFP_KERNEL);
- if (!ppmu_data->ppmu) {
- dev_err(dev, "Failed to allocate memory for exynos_ppmu\n");
- return -ENOMEM;
- }
-
- data->type = pdev->id_entry->driver_data;
- ppmu_data->ppmu[PPMU_DMC0].hw_base = S5P_VA_DMC0;
- ppmu_data->ppmu[PPMU_DMC1].hw_base = S5P_VA_DMC1;
- data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
- data->dev = dev;
- mutex_init(&data->lock);
-
- switch (data->type) {
- case TYPE_BUSF_EXYNOS4210:
- err = exynos4210_init_tables(data);
- break;
- case TYPE_BUSF_EXYNOS4x12:
- err = exynos4x12_init_tables(data);
- break;
- default:
- dev_err(dev, "Cannot determine the device id %d\n", data->type);
- err = -EINVAL;
- }
- if (err) {
- dev_err(dev, "Cannot initialize busfreq table %d\n",
- data->type);
- return err;
- }
-
- data->vdd_int = devm_regulator_get(dev, "vdd_int");
- if (IS_ERR(data->vdd_int)) {
- dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- return PTR_ERR(data->vdd_int);
- }
- if (data->type == TYPE_BUSF_EXYNOS4x12) {
- data->vdd_mif = devm_regulator_get(dev, "vdd_mif");
- if (IS_ERR(data->vdd_mif)) {
- dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
- return PTR_ERR(data->vdd_mif);
- }
- }
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_floor(dev,
- &exynos4_devfreq_profile.initial_freq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(dev, "Invalid initial frequency %lu kHz.\n",
- exynos4_devfreq_profile.initial_freq);
- return PTR_ERR(opp);
- }
- data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
- data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- platform_set_drvdata(pdev, data);
-
- data->devfreq = devm_devfreq_add_device(dev, &exynos4_devfreq_profile,
- "simple_ondemand", NULL);
- if (IS_ERR(data->devfreq))
- return PTR_ERR(data->devfreq);
-
- /*
- * Start PPMU (Performance Profiling Monitoring Unit) to check
- * utilization of each IP in the Exynos4 SoC.
- */
- busfreq_mon_reset(ppmu_data);
-
- /* Register opp_notifier for Exynos4 busfreq */
- err = devm_devfreq_register_opp_notifier(dev, data->devfreq);
- if (err < 0) {
- dev_err(dev, "Failed to register opp notifier\n");
- return err;
- }
-
- /* Register pm_notifier for Exynos4 busfreq */
- err = register_pm_notifier(&data->pm_notifier);
- if (err) {
- dev_err(dev, "Failed to setup pm notifier\n");
- return err;
- }
-
- return 0;
-}
-
-static int exynos4_busfreq_remove(struct platform_device *pdev)
-{
- struct busfreq_data *data = platform_get_drvdata(pdev);
-
- /* Unregister all of notifier chain */
- unregister_pm_notifier(&data->pm_notifier);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos4_busfreq_resume(struct device *dev)
-{
- struct busfreq_data *data = dev_get_drvdata(dev);
- struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
-
- busfreq_mon_reset(ppmu_data);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(exynos4_busfreq_pm_ops, NULL, exynos4_busfreq_resume);
-
-static const struct platform_device_id exynos4_busfreq_id[] = {
- { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
- { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
- { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
- { },
-};
-
-static struct platform_driver exynos4_busfreq_driver = {
- .probe = exynos4_busfreq_probe,
- .remove = exynos4_busfreq_remove,
- .id_table = exynos4_busfreq_id,
- .driver = {
- .name = "exynos4-busfreq",
- .pm = &exynos4_busfreq_pm_ops,
- },
-};
-
-static int __init exynos4_busfreq_init(void)
-{
- return platform_driver_register(&exynos4_busfreq_driver);
-}
-late_initcall(exynos4_busfreq_init);
-
-static void __exit exynos4_busfreq_exit(void)
-{
- platform_driver_unregister(&exynos4_busfreq_driver);
-}
-module_exit(exynos4_busfreq_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
-MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/devfreq/exynos/exynos4_bus.h b/drivers/devfreq/exynos/exynos4_bus.h
deleted file mode 100644
index 94c73c18d..000000000
--- a/drivers/devfreq/exynos/exynos4_bus.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS4 BUS header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __DEVFREQ_EXYNOS4_BUS_H
-#define __DEVFREQ_EXYNOS4_BUS_H __FILE__
-
-#include <mach/map.h>
-
-#define EXYNOS4_CLKDIV_LEFTBUS (S5P_VA_CMU + 0x04500)
-#define EXYNOS4_CLKDIV_STAT_LEFTBUS (S5P_VA_CMU + 0x04600)
-
-#define EXYNOS4_CLKDIV_RIGHTBUS (S5P_VA_CMU + 0x08500)
-#define EXYNOS4_CLKDIV_STAT_RIGHTBUS (S5P_VA_CMU + 0x08600)
-
-#define EXYNOS4_CLKDIV_TOP (S5P_VA_CMU + 0x0C510)
-#define EXYNOS4_CLKDIV_CAM (S5P_VA_CMU + 0x0C520)
-#define EXYNOS4_CLKDIV_MFC (S5P_VA_CMU + 0x0C528)
-
-#define EXYNOS4_CLKDIV_STAT_TOP (S5P_VA_CMU + 0x0C610)
-#define EXYNOS4_CLKDIV_STAT_MFC (S5P_VA_CMU + 0x0C628)
-
-#define EXYNOS4210_CLKGATE_IP_IMAGE (S5P_VA_CMU + 0x0C930)
-#define EXYNOS4212_CLKGATE_IP_IMAGE (S5P_VA_CMU + 0x04930)
-
-#define EXYNOS4_CLKDIV_DMC0 (S5P_VA_CMU + 0x10500)
-#define EXYNOS4_CLKDIV_DMC1 (S5P_VA_CMU + 0x10504)
-#define EXYNOS4_CLKDIV_STAT_DMC0 (S5P_VA_CMU + 0x10600)
-#define EXYNOS4_CLKDIV_STAT_DMC1 (S5P_VA_CMU + 0x10604)
-
-#define EXYNOS4_DMC_PAUSE_CTRL (S5P_VA_CMU + 0x11094)
-#define EXYNOS4_DMC_PAUSE_ENABLE (1 << 0)
-
-#define EXYNOS4_CLKDIV_DMC0_ACP_SHIFT (0)
-#define EXYNOS4_CLKDIV_DMC0_ACP_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_ACP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT (4)
-#define EXYNOS4_CLKDIV_DMC0_ACPPCLK_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_ACPPCLK_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT (8)
-#define EXYNOS4_CLKDIV_DMC0_DPHY_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DPHY_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMC_SHIFT (12)
-#define EXYNOS4_CLKDIV_DMC0_DMC_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMC_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT (16)
-#define EXYNOS4_CLKDIV_DMC0_DMCD_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMCD_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT (20)
-#define EXYNOS4_CLKDIV_DMC0_DMCP_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_DMCP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT (24)
-#define EXYNOS4_CLKDIV_DMC0_COPY2_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_COPY2_SHIFT)
-#define EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT (28)
-#define EXYNOS4_CLKDIV_DMC0_CORETI_MASK (0x7 << EXYNOS4_CLKDIV_DMC0_CORETI_SHIFT)
-
-#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT (0)
-#define EXYNOS4_CLKDIV_DMC1_G2D_ACP_MASK (0xf << EXYNOS4_CLKDIV_DMC1_G2D_ACP_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_C2C_SHIFT (4)
-#define EXYNOS4_CLKDIV_DMC1_C2C_MASK (0x7 << EXYNOS4_CLKDIV_DMC1_C2C_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_PWI_SHIFT (8)
-#define EXYNOS4_CLKDIV_DMC1_PWI_MASK (0xf << EXYNOS4_CLKDIV_DMC1_PWI_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT (12)
-#define EXYNOS4_CLKDIV_DMC1_C2CACLK_MASK (0x7 << EXYNOS4_CLKDIV_DMC1_C2CACLK_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT (16)
-#define EXYNOS4_CLKDIV_DMC1_DVSEM_MASK (0x7f << EXYNOS4_CLKDIV_DMC1_DVSEM_SHIFT)
-#define EXYNOS4_CLKDIV_DMC1_DPM_SHIFT (24)
-#define EXYNOS4_CLKDIV_DMC1_DPM_MASK (0x7f << EXYNOS4_CLKDIV_DMC1_DPM_SHIFT)
-
-#define EXYNOS4_CLKDIV_MFC_SHIFT (0)
-#define EXYNOS4_CLKDIV_MFC_MASK (0x7 << EXYNOS4_CLKDIV_MFC_SHIFT)
-
-#define EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT (0)
-#define EXYNOS4_CLKDIV_TOP_ACLK200_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK200_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT (4)
-#define EXYNOS4_CLKDIV_TOP_ACLK100_MASK (0xF << EXYNOS4_CLKDIV_TOP_ACLK100_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT (8)
-#define EXYNOS4_CLKDIV_TOP_ACLK160_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK160_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT (12)
-#define EXYNOS4_CLKDIV_TOP_ACLK133_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK133_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT (16)
-#define EXYNOS4_CLKDIV_TOP_ONENAND_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ONENAND_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT (20)
-#define EXYNOS4_CLKDIV_TOP_ACLK266_GPS_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK266_GPS_SHIFT)
-#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT (24)
-#define EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_MASK (0x7 << EXYNOS4_CLKDIV_TOP_ACLK400_MCUISP_SHIFT)
-
-#define EXYNOS4_CLKDIV_BUS_GDLR_SHIFT (0)
-#define EXYNOS4_CLKDIV_BUS_GDLR_MASK (0x7 << EXYNOS4_CLKDIV_BUS_GDLR_SHIFT)
-#define EXYNOS4_CLKDIV_BUS_GPLR_SHIFT (4)
-#define EXYNOS4_CLKDIV_BUS_GPLR_MASK (0x7 << EXYNOS4_CLKDIV_BUS_GPLR_SHIFT)
-
-#define EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT (0)
-#define EXYNOS4_CLKDIV_CAM_FIMC0_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC0_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT (4)
-#define EXYNOS4_CLKDIV_CAM_FIMC1_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC1_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT (8)
-#define EXYNOS4_CLKDIV_CAM_FIMC2_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC2_SHIFT)
-#define EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT (12)
-#define EXYNOS4_CLKDIV_CAM_FIMC3_MASK (0xf << EXYNOS4_CLKDIV_CAM_FIMC3_SHIFT)
-
-#define EXYNOS4_CLKDIV_CAM1 (S5P_VA_CMU + 0x0C568)
-
-#define EXYNOS4_CLKDIV_STAT_CAM1 (S5P_VA_CMU + 0x0C668)
-
-#define EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT (0)
-#define EXYNOS4_CLKDIV_CAM1_JPEG_MASK (0xf << EXYNOS4_CLKDIV_CAM1_JPEG_SHIFT)
-
-#endif /* __DEVFREQ_EXYNOS4_BUS_H */
diff --git a/drivers/devfreq/exynos/exynos5_bus.c b/drivers/devfreq/exynos/exynos5_bus.c
deleted file mode 100644
index 297ea30d4..000000000
--- a/drivers/devfreq/exynos/exynos5_bus.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS5 INT clock frequency scaling support using DEVFREQ framework
- * Based on work done by Jonghwan Choi <jhbird.choi@samsung.com>
- * Support for only EXYNOS5250 is present.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/devfreq.h>
-#include <linux/io.h>
-#include <linux/pm_opp.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/pm_qos.h>
-#include <linux/regulator/consumer.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-
-#include "exynos_ppmu.h"
-
-#define MAX_SAFEVOLT 1100000 /* 1.10V */
-/* Assume that the bus is saturated if the utilization is 25% */
-#define INT_BUS_SATURATION_RATIO 25
-
-enum int_level_idx {
- LV_0,
- LV_1,
- LV_2,
- LV_3,
- LV_4,
- _LV_END
-};
-
-enum exynos_ppmu_list {
- PPMU_RIGHT,
- PPMU_END,
-};
-
-struct busfreq_data_int {
- struct device *dev;
- struct devfreq *devfreq;
- struct regulator *vdd_int;
- struct busfreq_ppmu_data ppmu_data;
- unsigned long curr_freq;
- bool disabled;
-
- struct notifier_block pm_notifier;
- struct mutex lock;
- struct pm_qos_request int_req;
- struct clk *int_clk;
-};
-
-struct int_bus_opp_table {
- unsigned int idx;
- unsigned long clk;
- unsigned long volt;
-};
-
-static struct int_bus_opp_table exynos5_int_opp_table[] = {
- {LV_0, 266000, 1025000},
- {LV_1, 200000, 1025000},
- {LV_2, 160000, 1025000},
- {LV_3, 133000, 1025000},
- {LV_4, 100000, 1025000},
- {0, 0, 0},
-};
-
-static int exynos5_int_setvolt(struct busfreq_data_int *data,
- unsigned long volt)
-{
- return regulator_set_voltage(data->vdd_int, volt, MAX_SAFEVOLT);
-}
-
-static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
- u32 flags)
-{
- int err = 0;
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct dev_pm_opp *opp;
- unsigned long old_freq, freq;
- unsigned long volt;
-
- rcu_read_lock();
- opp = devfreq_recommended_opp(dev, _freq, flags);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(dev, "%s: Invalid OPP.\n", __func__);
- return PTR_ERR(opp);
- }
-
- freq = dev_pm_opp_get_freq(opp);
- volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- old_freq = data->curr_freq;
-
- if (old_freq == freq)
- return 0;
-
- dev_dbg(dev, "targeting %lukHz %luuV\n", freq, volt);
-
- mutex_lock(&data->lock);
-
- if (data->disabled)
- goto out;
-
- if (freq > exynos5_int_opp_table[0].clk)
- pm_qos_update_request(&data->int_req, freq * 16 / 1000);
- else
- pm_qos_update_request(&data->int_req, -1);
-
- if (old_freq < freq)
- err = exynos5_int_setvolt(data, volt);
- if (err)
- goto out;
-
- err = clk_set_rate(data->int_clk, freq * 1000);
-
- if (err)
- goto out;
-
- if (old_freq > freq)
- err = exynos5_int_setvolt(data, volt);
- if (err)
- goto out;
-
- data->curr_freq = freq;
-out:
- mutex_unlock(&data->lock);
- return err;
-}
-
-static int exynos5_int_get_dev_status(struct device *dev,
- struct devfreq_dev_status *stat)
-{
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
- int busier_dmc;
-
- exynos_read_ppmu(ppmu_data);
- busier_dmc = exynos_get_busier_ppmu(ppmu_data);
-
- stat->current_frequency = data->curr_freq;
-
- /* Number of cycles spent on memory access */
- stat->busy_time = ppmu_data->ppmu[busier_dmc].count[PPMU_PMNCNT3];
- stat->busy_time *= 100 / INT_BUS_SATURATION_RATIO;
- stat->total_time = ppmu_data->ppmu[busier_dmc].ccnt;
-
- return 0;
-}
-
-static struct devfreq_dev_profile exynos5_devfreq_int_profile = {
- .initial_freq = 160000,
- .polling_ms = 100,
- .target = exynos5_busfreq_int_target,
- .get_dev_status = exynos5_int_get_dev_status,
-};
-
-static int exynos5250_init_int_tables(struct busfreq_data_int *data)
-{
- int i, err = 0;
-
- for (i = LV_0; i < _LV_END; i++) {
- err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
- exynos5_int_opp_table[i].volt);
- if (err) {
- dev_err(data->dev, "Cannot add opp entries.\n");
- return err;
- }
- }
-
- return 0;
-}
-
-static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct busfreq_data_int *data = container_of(this,
- struct busfreq_data_int, pm_notifier);
- struct dev_pm_opp *opp;
- unsigned long maxfreq = ULONG_MAX;
- unsigned long freq;
- unsigned long volt;
- int err = 0;
-
- switch (event) {
- case PM_SUSPEND_PREPARE:
- /* Set Fastest and Deactivate DVFS */
- mutex_lock(&data->lock);
-
- data->disabled = true;
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- err = PTR_ERR(opp);
- goto unlock;
- }
- freq = dev_pm_opp_get_freq(opp);
- volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
-
- err = exynos5_int_setvolt(data, volt);
- if (err)
- goto unlock;
-
- err = clk_set_rate(data->int_clk, freq * 1000);
-
- if (err)
- goto unlock;
-
- data->curr_freq = freq;
-unlock:
- mutex_unlock(&data->lock);
- if (err)
- return NOTIFY_BAD;
- return NOTIFY_OK;
- case PM_POST_RESTORE:
- case PM_POST_SUSPEND:
- /* Reactivate */
- mutex_lock(&data->lock);
- data->disabled = false;
- mutex_unlock(&data->lock);
- return NOTIFY_OK;
- }
-
- return NOTIFY_DONE;
-}
-
-static int exynos5_busfreq_int_probe(struct platform_device *pdev)
-{
- struct busfreq_data_int *data;
- struct busfreq_ppmu_data *ppmu_data;
- struct dev_pm_opp *opp;
- struct device *dev = &pdev->dev;
- struct device_node *np;
- unsigned long initial_freq;
- unsigned long initial_volt;
- int err = 0;
- int i;
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct busfreq_data_int),
- GFP_KERNEL);
- if (data == NULL) {
- dev_err(dev, "Cannot allocate memory.\n");
- return -ENOMEM;
- }
-
- ppmu_data = &data->ppmu_data;
- ppmu_data->ppmu_end = PPMU_END;
- ppmu_data->ppmu = devm_kzalloc(dev,
- sizeof(struct exynos_ppmu) * PPMU_END,
- GFP_KERNEL);
- if (!ppmu_data->ppmu) {
- dev_err(dev, "Failed to allocate memory for exynos_ppmu\n");
- return -ENOMEM;
- }
-
- np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-ppmu");
- if (np == NULL) {
- pr_err("Unable to find PPMU node\n");
- return -ENOENT;
- }
-
- for (i = 0; i < ppmu_data->ppmu_end; i++) {
- /* map PPMU memory region */
- ppmu_data->ppmu[i].hw_base = of_iomap(np, i);
- if (ppmu_data->ppmu[i].hw_base == NULL) {
- dev_err(&pdev->dev, "failed to map memory region\n");
- return -ENOMEM;
- }
- }
- data->pm_notifier.notifier_call = exynos5_busfreq_int_pm_notifier_event;
- data->dev = dev;
- mutex_init(&data->lock);
-
- err = exynos5250_init_int_tables(data);
- if (err)
- return err;
-
- data->vdd_int = devm_regulator_get(dev, "vdd_int");
- if (IS_ERR(data->vdd_int)) {
- dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
- return PTR_ERR(data->vdd_int);
- }
-
- data->int_clk = devm_clk_get(dev, "int_clk");
- if (IS_ERR(data->int_clk)) {
- dev_err(dev, "Cannot get clock \"int_clk\"\n");
- return PTR_ERR(data->int_clk);
- }
-
- rcu_read_lock();
- opp = dev_pm_opp_find_freq_floor(dev,
- &exynos5_devfreq_int_profile.initial_freq);
- if (IS_ERR(opp)) {
- rcu_read_unlock();
- dev_err(dev, "Invalid initial frequency %lu kHz.\n",
- exynos5_devfreq_int_profile.initial_freq);
- return PTR_ERR(opp);
- }
- initial_freq = dev_pm_opp_get_freq(opp);
- initial_volt = dev_pm_opp_get_voltage(opp);
- rcu_read_unlock();
- data->curr_freq = initial_freq;
-
- err = clk_set_rate(data->int_clk, initial_freq * 1000);
- if (err) {
- dev_err(dev, "Failed to set initial frequency\n");
- return err;
- }
-
- err = exynos5_int_setvolt(data, initial_volt);
- if (err)
- return err;
-
- platform_set_drvdata(pdev, data);
-
- busfreq_mon_reset(ppmu_data);
-
- data->devfreq = devm_devfreq_add_device(dev, &exynos5_devfreq_int_profile,
- "simple_ondemand", NULL);
- if (IS_ERR(data->devfreq))
- return PTR_ERR(data->devfreq);
-
- err = devm_devfreq_register_opp_notifier(dev, data->devfreq);
- if (err < 0) {
- dev_err(dev, "Failed to register opp notifier\n");
- return err;
- }
-
- err = register_pm_notifier(&data->pm_notifier);
- if (err) {
- dev_err(dev, "Failed to setup pm notifier\n");
- return err;
- }
-
- /* TODO: Add a new QOS class for int/mif bus */
- pm_qos_add_request(&data->int_req, PM_QOS_NETWORK_THROUGHPUT, -1);
-
- return 0;
-}
-
-static int exynos5_busfreq_int_remove(struct platform_device *pdev)
-{
- struct busfreq_data_int *data = platform_get_drvdata(pdev);
-
- pm_qos_remove_request(&data->int_req);
- unregister_pm_notifier(&data->pm_notifier);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int exynos5_busfreq_int_resume(struct device *dev)
-{
- struct platform_device *pdev = container_of(dev, struct platform_device,
- dev);
- struct busfreq_data_int *data = platform_get_drvdata(pdev);
- struct busfreq_ppmu_data *ppmu_data = &data->ppmu_data;
-
- busfreq_mon_reset(ppmu_data);
- return 0;
-}
-static const struct dev_pm_ops exynos5_busfreq_int_pm = {
- .resume = exynos5_busfreq_int_resume,
-};
-#endif
-static SIMPLE_DEV_PM_OPS(exynos5_busfreq_int_pm_ops, NULL,
- exynos5_busfreq_int_resume);
-
-/* platform device pointer for exynos5 devfreq device. */
-static struct platform_device *exynos5_devfreq_pdev;
-
-static struct platform_driver exynos5_busfreq_int_driver = {
- .probe = exynos5_busfreq_int_probe,
- .remove = exynos5_busfreq_int_remove,
- .driver = {
- .name = "exynos5-bus-int",
- .pm = &exynos5_busfreq_int_pm_ops,
- },
-};
-
-static int __init exynos5_busfreq_int_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&exynos5_busfreq_int_driver);
- if (ret < 0)
- goto out;
-
- exynos5_devfreq_pdev =
- platform_device_register_simple("exynos5-bus-int", -1, NULL, 0);
- if (IS_ERR(exynos5_devfreq_pdev)) {
- ret = PTR_ERR(exynos5_devfreq_pdev);
- goto out1;
- }
-
- return 0;
-out1:
- platform_driver_unregister(&exynos5_busfreq_int_driver);
-out:
- return ret;
-}
-late_initcall(exynos5_busfreq_int_init);
-
-static void __exit exynos5_busfreq_int_exit(void)
-{
- platform_device_unregister(exynos5_devfreq_pdev);
- platform_driver_unregister(&exynos5_busfreq_int_driver);
-}
-module_exit(exynos5_busfreq_int_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("EXYNOS5 busfreq driver with devfreq framework");
diff --git a/drivers/devfreq/exynos/exynos_ppmu.c b/drivers/devfreq/exynos/exynos_ppmu.c
deleted file mode 100644
index 97b75e513..000000000
--- a/drivers/devfreq/exynos/exynos_ppmu.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS - PPMU support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-
-#include "exynos_ppmu.h"
-
-void exynos_ppmu_reset(void __iomem *ppmu_base)
-{
- __raw_writel(PPMU_CYCLE_RESET | PPMU_COUNTER_RESET, ppmu_base);
- __raw_writel(PPMU_ENABLE_CYCLE |
- PPMU_ENABLE_COUNT0 |
- PPMU_ENABLE_COUNT1 |
- PPMU_ENABLE_COUNT2 |
- PPMU_ENABLE_COUNT3,
- ppmu_base + PPMU_CNTENS);
-}
-
-void exynos_ppmu_setevent(void __iomem *ppmu_base, unsigned int ch,
- unsigned int evt)
-{
- __raw_writel(evt, ppmu_base + PPMU_BEVTSEL(ch));
-}
-
-void exynos_ppmu_start(void __iomem *ppmu_base)
-{
- __raw_writel(PPMU_ENABLE, ppmu_base);
-}
-
-void exynos_ppmu_stop(void __iomem *ppmu_base)
-{
- __raw_writel(PPMU_DISABLE, ppmu_base);
-}
-
-unsigned int exynos_ppmu_read(void __iomem *ppmu_base, unsigned int ch)
-{
- unsigned int total;
-
- if (ch == PPMU_PMNCNT3)
- total = ((__raw_readl(ppmu_base + PMCNT_OFFSET(ch)) << 8) |
- __raw_readl(ppmu_base + PMCNT_OFFSET(ch + 1)));
- else
- total = __raw_readl(ppmu_base + PMCNT_OFFSET(ch));
-
- return total;
-}
-
-void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data)
-{
- unsigned int i;
-
- for (i = 0; i < ppmu_data->ppmu_end; i++) {
- void __iomem *ppmu_base = ppmu_data->ppmu[i].hw_base;
-
- /* Reset the performance and cycle counters */
- exynos_ppmu_reset(ppmu_base);
-
- /* Setup count registers to monitor read/write transactions */
- ppmu_data->ppmu[i].event[PPMU_PMNCNT3] = RDWR_DATA_COUNT;
- exynos_ppmu_setevent(ppmu_base, PPMU_PMNCNT3,
- ppmu_data->ppmu[i].event[PPMU_PMNCNT3]);
-
- exynos_ppmu_start(ppmu_base);
- }
-}
-EXPORT_SYMBOL(busfreq_mon_reset);
-
-void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data)
-{
- int i, j;
-
- for (i = 0; i < ppmu_data->ppmu_end; i++) {
- void __iomem *ppmu_base = ppmu_data->ppmu[i].hw_base;
-
- exynos_ppmu_stop(ppmu_base);
-
- /* Update local data from PPMU */
- ppmu_data->ppmu[i].ccnt = __raw_readl(ppmu_base + PPMU_CCNT);
-
- for (j = PPMU_PMNCNT0; j < PPMU_PMNCNT_MAX; j++) {
- if (ppmu_data->ppmu[i].event[j] == 0)
- ppmu_data->ppmu[i].count[j] = 0;
- else
- ppmu_data->ppmu[i].count[j] =
- exynos_ppmu_read(ppmu_base, j);
- }
- }
-
- busfreq_mon_reset(ppmu_data);
-}
-EXPORT_SYMBOL(exynos_read_ppmu);
-
-int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data)
-{
- unsigned int count = 0;
- int i, j, busy = 0;
-
- for (i = 0; i < ppmu_data->ppmu_end; i++) {
- for (j = PPMU_PMNCNT0; j < PPMU_PMNCNT_MAX; j++) {
- if (ppmu_data->ppmu[i].count[j] > count) {
- count = ppmu_data->ppmu[i].count[j];
- busy = i;
- }
- }
- }
-
- return busy;
-}
-EXPORT_SYMBOL(exynos_get_busier_ppmu);
diff --git a/drivers/devfreq/exynos/exynos_ppmu.h b/drivers/devfreq/exynos/exynos_ppmu.h
deleted file mode 100644
index 71f17ba35..000000000
--- a/drivers/devfreq/exynos/exynos_ppmu.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * EXYNOS PPMU header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __DEVFREQ_EXYNOS_PPMU_H
-#define __DEVFREQ_EXYNOS_PPMU_H __FILE__
-
-#include <linux/ktime.h>
-
-/* For PPMU Control */
-#define PPMU_ENABLE BIT(0)
-#define PPMU_DISABLE 0x0
-#define PPMU_CYCLE_RESET BIT(1)
-#define PPMU_COUNTER_RESET BIT(2)
-
-#define PPMU_ENABLE_COUNT0 BIT(0)
-#define PPMU_ENABLE_COUNT1 BIT(1)
-#define PPMU_ENABLE_COUNT2 BIT(2)
-#define PPMU_ENABLE_COUNT3 BIT(3)
-#define PPMU_ENABLE_CYCLE BIT(31)
-
-#define PPMU_CNTENS 0x10
-#define PPMU_FLAG 0x50
-#define PPMU_CCNT_OVERFLOW BIT(31)
-#define PPMU_CCNT 0x100
-
-#define PPMU_PMCNT0 0x110
-#define PPMU_PMCNT_OFFSET 0x10
-#define PMCNT_OFFSET(x) (PPMU_PMCNT0 + (PPMU_PMCNT_OFFSET * x))
-
-#define PPMU_BEVT0SEL 0x1000
-#define PPMU_BEVTSEL_OFFSET 0x100
-#define PPMU_BEVTSEL(x) (PPMU_BEVT0SEL + (ch * PPMU_BEVTSEL_OFFSET))
-
-/* For Event Selection */
-#define RD_DATA_COUNT 0x5
-#define WR_DATA_COUNT 0x6
-#define RDWR_DATA_COUNT 0x7
-
-enum ppmu_counter {
- PPMU_PMNCNT0,
- PPMU_PMCCNT1,
- PPMU_PMNCNT2,
- PPMU_PMNCNT3,
- PPMU_PMNCNT_MAX,
-};
-
-struct bus_opp_table {
- unsigned int idx;
- unsigned long clk;
- unsigned long volt;
-};
-
-struct exynos_ppmu {
- void __iomem *hw_base;
- unsigned int ccnt;
- unsigned int event[PPMU_PMNCNT_MAX];
- unsigned int count[PPMU_PMNCNT_MAX];
- unsigned long long ns;
- ktime_t reset_time;
- bool ccnt_overflow;
- bool count_overflow[PPMU_PMNCNT_MAX];
-};
-
-struct busfreq_ppmu_data {
- struct exynos_ppmu *ppmu;
- int ppmu_end;
-};
-
-void exynos_ppmu_reset(void __iomem *ppmu_base);
-void exynos_ppmu_setevent(void __iomem *ppmu_base, unsigned int ch,
- unsigned int evt);
-void exynos_ppmu_start(void __iomem *ppmu_base);
-void exynos_ppmu_stop(void __iomem *ppmu_base);
-unsigned int exynos_ppmu_read(void __iomem *ppmu_base, unsigned int ch);
-void busfreq_mon_reset(struct busfreq_ppmu_data *ppmu_data);
-void exynos_read_ppmu(struct busfreq_ppmu_data *ppmu_data);
-int exynos_get_busier_ppmu(struct busfreq_ppmu_data *ppmu_data);
-#endif /* __DEVFREQ_EXYNOS_PPMU_H */
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
new file mode 100644
index 000000000..9ef46e259
--- /dev/null
+++ b/drivers/devfreq/governor_passive.c
@@ -0,0 +1,205 @@
+/*
+ * linux/drivers/devfreq/governor_passive.c
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+
+static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
+ unsigned long *freq)
+{
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
+ unsigned long child_freq = ULONG_MAX;
+ struct dev_pm_opp *opp;
+ int i, count, ret = 0;
+
+ /*
+ * If the devfreq device with passive governor has the specific method
+ * to determine the next frequency, should use the get_target_freq()
+ * of struct devfreq_passive_data.
+ */
+ if (p_data->get_target_freq) {
+ ret = p_data->get_target_freq(devfreq, freq);
+ goto out;
+ }
+
+ /*
+ * If the parent and passive devfreq device uses the OPP table,
+ * get the next frequency by using the OPP table.
+ */
+
+ /*
+ * - parent devfreq device uses the governors except for passive.
+ * - passive devfreq device uses the passive governor.
+ *
+ * Each devfreq has the OPP table. After deciding the new frequency
+ * from the governor of parent devfreq device, the passive governor
+ * need to get the index of new frequency on OPP table of parent
+ * device. And then the index is used for getting the suitable
+ * new frequency for passive devfreq device.
+ */
+ if (!devfreq->profile || !devfreq->profile->freq_table
+ || devfreq->profile->max_state <= 0)
+ return -EINVAL;
+
+ /*
+ * The passive governor have to get the correct frequency from OPP
+ * list of parent device. Because in this case, *freq is temporary
+ * value which is decided by ondemand governor.
+ */
+ rcu_read_lock();
+ opp = devfreq_recommended_opp(parent_devfreq->dev.parent, freq, 0);
+ rcu_read_unlock();
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out;
+ }
+
+ /*
+ * Get the OPP table's index of decided freqeuncy by governor
+ * of parent device.
+ */
+ for (i = 0; i < parent_devfreq->profile->max_state; i++)
+ if (parent_devfreq->profile->freq_table[i] == *freq)
+ break;
+
+ if (i == parent_devfreq->profile->max_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the suitable frequency by using index of parent device. */
+ if (i < devfreq->profile->max_state) {
+ child_freq = devfreq->profile->freq_table[i];
+ } else {
+ count = devfreq->profile->max_state;
+ child_freq = devfreq->profile->freq_table[count - 1];
+ }
+
+ /* Return the suitable frequency for passive device. */
+ *freq = child_freq;
+
+out:
+ return ret;
+}
+
+static int update_devfreq_passive(struct devfreq *devfreq, unsigned long freq)
+{
+ int ret;
+
+ if (!devfreq->governor)
+ return -EINVAL;
+
+ mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
+
+ ret = devfreq->governor->get_target_freq(devfreq, &freq);
+ if (ret < 0)
+ goto out;
+
+ ret = devfreq->profile->target(devfreq->dev.parent, &freq, 0);
+ if (ret < 0)
+ goto out;
+
+ devfreq->previous_freq = freq;
+
+out:
+ mutex_unlock(&devfreq->lock);
+
+ return 0;
+}
+
+static int devfreq_passive_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct devfreq_passive_data *data
+ = container_of(nb, struct devfreq_passive_data, nb);
+ struct devfreq *devfreq = (struct devfreq *)data->this;
+ struct devfreq *parent = (struct devfreq *)data->parent;
+ struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
+ unsigned long freq = freqs->new;
+
+ switch (event) {
+ case DEVFREQ_PRECHANGE:
+ if (parent->previous_freq > freq)
+ update_devfreq_passive(devfreq, freq);
+ break;
+ case DEVFREQ_POSTCHANGE:
+ if (parent->previous_freq < freq)
+ update_devfreq_passive(devfreq, freq);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int devfreq_passive_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ struct device *dev = devfreq->dev.parent;
+ struct devfreq_passive_data *p_data
+ = (struct devfreq_passive_data *)devfreq->data;
+ struct devfreq *parent = (struct devfreq *)p_data->parent;
+ struct notifier_block *nb = &p_data->nb;
+ int ret = 0;
+
+ if (!parent)
+ return -EPROBE_DEFER;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ if (!p_data->this)
+ p_data->this = devfreq;
+
+ nb->notifier_call = devfreq_passive_notifier_call;
+ ret = devm_devfreq_register_notifier(dev, parent, nb,
+ DEVFREQ_TRANSITION_NOTIFIER);
+ break;
+ case DEVFREQ_GOV_STOP:
+ devm_devfreq_unregister_notifier(dev, parent, nb,
+ DEVFREQ_TRANSITION_NOTIFIER);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct devfreq_governor devfreq_passive = {
+ .name = "passive",
+ .get_target_freq = devfreq_passive_get_target_freq,
+ .event_handler = devfreq_passive_event_handler,
+};
+
+static int __init devfreq_passive_init(void)
+{
+ return devfreq_add_governor(&devfreq_passive);
+}
+subsys_initcall(devfreq_passive_init);
+
+static void __exit devfreq_passive_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_passive);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+}
+module_exit(devfreq_passive_exit);
+
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("DEVFREQ Passive governor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
new file mode 100644
index 000000000..9824bc4ad
--- /dev/null
+++ b/drivers/dma-buf/Kconfig
@@ -0,0 +1,11 @@
+menu "DMABUF options"
+
+config SYNC_FILE
+ bool "sync_file support for fences"
+ default n
+ select ANON_INODES
+ select DMA_SHARED_BUFFER
+ ---help---
+ This option enables the fence framework synchronization to export
+ sync_files to userspace that can represent one or more fences.
+endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 57a675f90..4a424eca7 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1 +1,2 @@
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
+obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 4a2c07ee6..6355ab38d 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -33,6 +33,7 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/reservation.h>
+#include <linux/mm.h>
#include <uapi/linux/dma-buf.h>
@@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
dmabuf = file->private_data;
/* check for overflowing the buffer's size */
- if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ if (vma->vm_pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
@@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* check for offset overflow */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
+ if (pgoff + vma_pages(vma) < pgoff)
return -EOVERFLOW;
/* check for overflowing the buffer's size */
- if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
+ if (pgoff + vma_pages(vma) >
dmabuf->size >> PAGE_SHIFT)
return -EINVAL;
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index c0bd5722c..9566a62ad 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -35,6 +35,17 @@
#include <linux/reservation.h>
#include <linux/export.h>
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer. A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations). The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
DEFINE_WW_CLASS(reservation_ww_class);
EXPORT_SYMBOL(reservation_ww_class);
@@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class);
const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
-/*
- * Reserve space to add a shared fence to a reservation_object,
- * must be called with obj->lock held.
+
+/**
+ * reservation_object_reserve_shared - Reserve space to add a shared
+ * fence to a reservation_object.
+ * @obj: reservation object
+ *
+ * Should be called before reservation_object_add_shared_fence(). Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
*/
int reservation_object_reserve_shared(struct reservation_object *obj)
{
@@ -180,7 +199,11 @@ done:
fence_put(old_fence);
}
-/*
+/**
+ * reservation_object_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
* Add a fence to a shared slot, obj->lock must be held, and
* reservation_object_reserve_shared_fence has been called.
*/
@@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj,
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
+/**
+ * reservation_object_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot. The obj->lock must be held.
+ */
void reservation_object_add_excl_fence(struct reservation_object *obj,
struct fence *fence)
{
@@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
}
EXPORT_SYMBOL(reservation_object_add_excl_fence);
+/**
+ * reservation_object_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * RETURNS
+ * Zero or -errno
+ */
int reservation_object_get_fences_rcu(struct reservation_object *obj,
struct fence **pfence_excl,
unsigned *pshared_count,
@@ -319,6 +361,18 @@ unlock:
}
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
+/**
+ * reservation_object_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
bool wait_all, bool intr,
unsigned long timeout)
@@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence)
return ret;
}
+/**
+ * reservation_object_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
new file mode 100644
index 000000000..f08cf2d83
--- /dev/null
+++ b/drivers/dma-buf/sync_file.c
@@ -0,0 +1,395 @@
+/*
+ * drivers/dma-buf/sync_file.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+static const struct file_operations sync_file_fops;
+
+static struct sync_file *sync_file_alloc(int size)
+{
+ struct sync_file *sync_file;
+
+ sync_file = kzalloc(size, GFP_KERNEL);
+ if (!sync_file)
+ return NULL;
+
+ sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
+ sync_file, 0);
+ if (IS_ERR(sync_file->file))
+ goto err;
+
+ kref_init(&sync_file->kref);
+
+ init_waitqueue_head(&sync_file->wq);
+
+ return sync_file;
+
+err:
+ kfree(sync_file);
+ return NULL;
+}
+
+static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+{
+ struct sync_file_cb *check;
+ struct sync_file *sync_file;
+
+ check = container_of(cb, struct sync_file_cb, cb);
+ sync_file = check->sync_file;
+
+ if (atomic_dec_and_test(&sync_file->status))
+ wake_up_all(&sync_file->wq);
+}
+
+/**
+ * sync_file_create() - creates a sync file
+ * @fence: fence to add to the sync_fence
+ *
+ * Creates a sync_file containg @fence. Once this is called, the sync_file
+ * takes ownership of @fence. The sync_file can be released with
+ * fput(sync_file->file). Returns the sync_file or NULL in case of error.
+ */
+struct sync_file *sync_file_create(struct fence *fence)
+{
+ struct sync_file *sync_file;
+
+ sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
+ if (!sync_file)
+ return NULL;
+
+ sync_file->num_fences = 1;
+ atomic_set(&sync_file->status, 1);
+ snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence), fence->context,
+ fence->seqno);
+
+ sync_file->cbs[0].fence = fence;
+ sync_file->cbs[0].sync_file = sync_file;
+ if (fence_add_callback(fence, &sync_file->cbs[0].cb,
+ fence_check_cb_func))
+ atomic_dec(&sync_file->status);
+
+ return sync_file;
+}
+EXPORT_SYMBOL(sync_file_create);
+
+/**
+ * sync_file_fdget() - get a sync_file from an fd
+ * @fd: fd referencing a fence
+ *
+ * Ensures @fd references a valid sync_file, increments the refcount of the
+ * backing file. Returns the sync_file or NULL in case of error.
+ */
+static struct sync_file *sync_file_fdget(int fd)
+{
+ struct file *file = fget(fd);
+
+ if (!file)
+ return NULL;
+
+ if (file->f_op != &sync_file_fops)
+ goto err;
+
+ return file->private_data;
+
+err:
+ fput(file);
+ return NULL;
+}
+
+static void sync_file_add_pt(struct sync_file *sync_file, int *i,
+ struct fence *fence)
+{
+ sync_file->cbs[*i].fence = fence;
+ sync_file->cbs[*i].sync_file = sync_file;
+
+ if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
+ fence_check_cb_func)) {
+ fence_get(fence);
+ (*i)++;
+ }
+}
+
+/**
+ * sync_file_merge() - merge two sync_files
+ * @name: name of new fence
+ * @a: sync_file a
+ * @b: sync_file b
+ *
+ * Creates a new sync_file which contains copies of all the fences in both
+ * @a and @b. @a and @b remain valid, independent sync_file. Returns the
+ * new merged sync_file or NULL in case of error.
+ */
+static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
+ struct sync_file *b)
+{
+ int num_fences = a->num_fences + b->num_fences;
+ struct sync_file *sync_file;
+ int i, i_a, i_b;
+ unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
+
+ sync_file = sync_file_alloc(size);
+ if (!sync_file)
+ return NULL;
+
+ atomic_set(&sync_file->status, num_fences);
+
+ /*
+ * Assume sync_file a and b are both ordered and have no
+ * duplicates with the same context.
+ *
+ * If a sync_file can only be created with sync_file_merge
+ * and sync_file_create, this is a reasonable assumption.
+ */
+ for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
+ struct fence *pt_a = a->cbs[i_a].fence;
+ struct fence *pt_b = b->cbs[i_b].fence;
+
+ if (pt_a->context < pt_b->context) {
+ sync_file_add_pt(sync_file, &i, pt_a);
+
+ i_a++;
+ } else if (pt_a->context > pt_b->context) {
+ sync_file_add_pt(sync_file, &i, pt_b);
+
+ i_b++;
+ } else {
+ if (pt_a->seqno - pt_b->seqno <= INT_MAX)
+ sync_file_add_pt(sync_file, &i, pt_a);
+ else
+ sync_file_add_pt(sync_file, &i, pt_b);
+
+ i_a++;
+ i_b++;
+ }
+ }
+
+ for (; i_a < a->num_fences; i_a++)
+ sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
+
+ for (; i_b < b->num_fences; i_b++)
+ sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
+
+ if (num_fences > i)
+ atomic_sub(num_fences - i, &sync_file->status);
+ sync_file->num_fences = i;
+
+ strlcpy(sync_file->name, name, sizeof(sync_file->name));
+ return sync_file;
+}
+
+static void sync_file_free(struct kref *kref)
+{
+ struct sync_file *sync_file = container_of(kref, struct sync_file,
+ kref);
+ int i;
+
+ for (i = 0; i < sync_file->num_fences; ++i) {
+ fence_remove_callback(sync_file->cbs[i].fence,
+ &sync_file->cbs[i].cb);
+ fence_put(sync_file->cbs[i].fence);
+ }
+
+ kfree(sync_file);
+}
+
+static int sync_file_release(struct inode *inode, struct file *file)
+{
+ struct sync_file *sync_file = file->private_data;
+
+ kref_put(&sync_file->kref, sync_file_free);
+ return 0;
+}
+
+static unsigned int sync_file_poll(struct file *file, poll_table *wait)
+{
+ struct sync_file *sync_file = file->private_data;
+ int status;
+
+ poll_wait(file, &sync_file->wq, wait);
+
+ status = atomic_read(&sync_file->status);
+
+ if (!status)
+ return POLLIN;
+ if (status < 0)
+ return POLLERR;
+ return 0;
+}
+
+static long sync_file_ioctl_merge(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_file *fence2, *fence3;
+ struct sync_merge_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fd;
+ }
+
+ if (data.flags || data.pad) {
+ err = -EINVAL;
+ goto err_put_fd;
+ }
+
+ fence2 = sync_file_fdget(data.fd2);
+ if (!fence2) {
+ err = -ENOENT;
+ goto err_put_fd;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+ fence3 = sync_file_merge(data.name, sync_file, fence2);
+ if (!fence3) {
+ err = -ENOMEM;
+ goto err_put_fence2;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fence3;
+ }
+
+ fd_install(fd, fence3->file);
+ fput(fence2->file);
+ return 0;
+
+err_put_fence3:
+ fput(fence3->file);
+
+err_put_fence2:
+ fput(fence2->file);
+
+err_put_fd:
+ put_unused_fd(fd);
+ return err;
+}
+
+static void sync_fill_fence_info(struct fence *fence,
+ struct sync_fence_info *info)
+{
+ strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ sizeof(info->obj_name));
+ strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
+ sizeof(info->driver_name));
+ if (fence_is_signaled(fence))
+ info->status = fence->status >= 0 ? 1 : fence->status;
+ else
+ info->status = 0;
+ info->timestamp_ns = ktime_to_ns(fence->timestamp);
+}
+
+static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ struct sync_file_info info;
+ struct sync_fence_info *fence_info = NULL;
+ __u32 size;
+ int ret, i;
+
+ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+ return -EFAULT;
+
+ if (info.flags || info.pad)
+ return -EINVAL;
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve any sync_fence_info. If num_fences = 0 we skip filling
+ * sync_fence_info and return the actual number of fences on
+ * info->num_fences.
+ */
+ if (!info.num_fences)
+ goto no_fences;
+
+ if (info.num_fences < sync_file->num_fences)
+ return -EINVAL;
+
+ size = sync_file->num_fences * sizeof(*fence_info);
+ fence_info = kzalloc(size, GFP_KERNEL);
+ if (!fence_info)
+ return -ENOMEM;
+
+ for (i = 0; i < sync_file->num_fences; ++i)
+ sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]);
+
+ if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
+ size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+no_fences:
+ strlcpy(info.name, sync_file->name, sizeof(info.name));
+ info.status = atomic_read(&sync_file->status);
+ if (info.status >= 0)
+ info.status = !info.status;
+
+ info.num_fences = sync_file->num_fences;
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+out:
+ kfree(fence_info);
+
+ return ret;
+}
+
+static long sync_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_file *sync_file = file->private_data;
+
+ switch (cmd) {
+ case SYNC_IOC_MERGE:
+ return sync_file_ioctl_merge(sync_file, arg);
+
+ case SYNC_IOC_FILE_INFO:
+ return sync_file_ioctl_fence_info(sync_file, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sync_file_fops = {
+ .release = sync_file_release,
+ .poll = sync_file_poll,
+ .unlocked_ioctl = sync_file_ioctl,
+ .compat_ioctl = sync_file_ioctl,
+};
+
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index d96d87c56..8c98779a1 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -332,7 +332,7 @@ config MPC512X_DMA
config MV_XOR
bool "Marvell XOR engine support"
- depends on PLAT_ORION
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
select DMA_ENGINE
select DMA_ENGINE_RAID
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
@@ -467,6 +467,20 @@ config TEGRA20_APB_DMA
This DMA controller transfers data from memory to peripheral fifo
or vice versa. It does not support memory to memory data transfer.
+config TEGRA210_ADMA
+ bool "NVIDIA Tegra210 ADMA support"
+ depends on ARCH_TEGRA_210_SOC
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select PM_CLK
+ help
+ Support for the NVIDIA Tegra210 ADMA controller driver. The
+ DMA controller has multiple DMA channels and is used to service
+ various audio clients in the Tegra210 audio processing engine
+ (APE). This DMA controller transfers data from memory to
+ peripheral and vice versa. It does not support memory to
+ memory data transfer.
+
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
depends on MFD_TIMBERDALE
@@ -507,7 +521,7 @@ config XGENE_DMA
config XILINX_VDMA
tristate "Xilinx AXI VDMA Engine"
- depends on (ARCH_ZYNQ || MICROBLAZE)
+ depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
select DMA_ENGINE
help
Enable support for Xilinx AXI VDMA Soft IP.
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6084127c1..614f28b0b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_STM32_DMA) += stm32-dma.o
obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_TI_CPPI41) += cppi41.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 9b42c0588..81db1c481 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -107,16 +107,20 @@ struct pl08x_driver_data;
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant
+ * @signals: the number of request signals available from the hardware
* @dualmaster: whether this version supports dual AHB masters or not.
* @nomadik: whether the channels have Nomadik security extension bits
* that need to be checked for permission before use and some registers are
* missing
* @pl080s: whether this version is a PL080S, which has separate register and
* LLI word for transfer size.
+ * @max_transfer_size: the maximum single element transfer size for this
+ * PL08x variant.
*/
struct vendor_data {
u8 config_offset;
u8 channels;
+ u8 signals;
bool dualmaster;
bool nomadik;
bool pl080s;
@@ -235,7 +239,7 @@ struct pl08x_dma_chan {
struct virt_dma_chan vc;
struct pl08x_phy_chan *phychan;
const char *name;
- const struct pl08x_channel_data *cd;
+ struct pl08x_channel_data *cd;
struct dma_slave_config cfg;
struct pl08x_txd *at;
struct pl08x_driver_data *host;
@@ -1909,6 +1913,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
if (slave) {
chan->cd = &pl08x->pd->slave_channels[i];
+ /*
+ * Some implementations have muxed signals, whereas some
+ * use a mux in front of the signals and need dynamic
+ * assignment of signals.
+ */
+ chan->signal = i;
pl08x_dma_slave_init(chan);
} else {
chan->cd = &pl08x->pd->memcpy_channel;
@@ -2050,40 +2060,33 @@ static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
struct pl08x_driver_data *pl08x = ofdma->of_dma_data;
- struct pl08x_channel_data *data;
- struct pl08x_dma_chan *chan;
struct dma_chan *dma_chan;
+ struct pl08x_dma_chan *plchan;
if (!pl08x)
return NULL;
- if (dma_spec->args_count != 2)
+ if (dma_spec->args_count != 2) {
+ dev_err(&pl08x->adev->dev,
+ "DMA channel translation requires two cells\n");
return NULL;
+ }
dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]);
- if (dma_chan)
- return dma_get_slave_channel(dma_chan);
-
- chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data),
- GFP_KERNEL);
- if (!chan)
+ if (!dma_chan) {
+ dev_err(&pl08x->adev->dev,
+ "DMA slave channel not found\n");
return NULL;
+ }
- data = (void *)&chan[1];
- data->bus_id = "(none)";
- data->periph_buses = dma_spec->args[1];
-
- chan->cd = data;
- chan->host = pl08x;
- chan->slave = true;
- chan->name = data->bus_id;
- chan->state = PL08X_CHAN_IDLE;
- chan->signal = dma_spec->args[0];
- chan->vc.desc_free = pl08x_desc_free;
-
- vchan_init(&chan->vc, &pl08x->slave);
+ plchan = to_pl08x_chan(dma_chan);
+ dev_dbg(&pl08x->adev->dev,
+ "translated channel for signal %d\n",
+ dma_spec->args[0]);
- return dma_get_slave_channel(&chan->vc.chan);
+ /* Augment channel data for applicable AHB buses */
+ plchan->cd->periph_buses = dma_spec->args[1];
+ return dma_get_slave_channel(dma_chan);
}
static int pl08x_of_probe(struct amba_device *adev,
@@ -2091,9 +2094,11 @@ static int pl08x_of_probe(struct amba_device *adev,
struct device_node *np)
{
struct pl08x_platform_data *pd;
+ struct pl08x_channel_data *chanp = NULL;
u32 cctl_memcpy = 0;
u32 val;
int ret;
+ int i;
pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL);
if (!pd)
@@ -2195,6 +2200,27 @@ static int pl08x_of_probe(struct amba_device *adev,
/* Use the buses that can access memory, obviously */
pd->memcpy_channel.periph_buses = pd->mem_buses;
+ /*
+ * Allocate channel data for all possible slave channels (one
+ * for each possible signal), channels will then be allocated
+ * for a device and have it's AHB interfaces set up at
+ * translation time.
+ */
+ chanp = devm_kcalloc(&adev->dev,
+ pl08x->vd->signals,
+ sizeof(struct pl08x_channel_data),
+ GFP_KERNEL);
+ if (!chanp)
+ return -ENOMEM;
+
+ pd->slave_channels = chanp;
+ for (i = 0; i < pl08x->vd->signals; i++) {
+ /* chanp->periph_buses will be assigned at translation */
+ chanp->bus_id = kasprintf(GFP_KERNEL, "slave%d", i);
+ chanp++;
+ }
+ pd->num_slave_channels = pl08x->vd->signals;
+
pl08x->pd = pd;
return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate,
@@ -2234,6 +2260,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
goto out_no_pl08x;
}
+ /* Assign useful pointers to the driver state */
+ pl08x->adev = adev;
+ pl08x->vd = vd;
+
/* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev;
@@ -2284,10 +2314,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
}
- /* Assign useful pointers to the driver state */
- pl08x->adev = adev;
- pl08x->vd = vd;
-
/* By default, AHB1 only. If dualmaster, from platform */
pl08x->lli_buses = PL08X_AHB1;
pl08x->mem_buses = PL08X_AHB1;
@@ -2438,6 +2464,7 @@ out_no_pl08x:
static struct vendor_data vendor_pl080 = {
.config_offset = PL080_CH_CONFIG,
.channels = 8,
+ .signals = 16,
.dualmaster = true,
.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
@@ -2445,6 +2472,7 @@ static struct vendor_data vendor_pl080 = {
static struct vendor_data vendor_nomadik = {
.config_offset = PL080_CH_CONFIG,
.channels = 8,
+ .signals = 32,
.dualmaster = true,
.nomadik = true,
.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
@@ -2453,6 +2481,7 @@ static struct vendor_data vendor_nomadik = {
static struct vendor_data vendor_pl080s = {
.config_offset = PL080S_CH_CONFIG,
.channels = 8,
+ .signals = 32,
.pl080s = true,
.max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
};
@@ -2460,6 +2489,7 @@ static struct vendor_data vendor_pl080s = {
static struct vendor_data vendor_pl081 = {
.config_offset = PL080_CH_CONFIG,
.channels = 2,
+ .signals = 16,
.dualmaster = false,
.max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
};
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 8e304b1be..75bd6621d 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -242,7 +242,7 @@ struct at_xdmac_lld {
u32 mbr_dus; /* Destination Microblock Stride Register */
};
-
+/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
struct at_xdmac_desc {
struct at_xdmac_lld lld;
enum dma_transfer_direction direction;
@@ -253,7 +253,7 @@ struct at_xdmac_desc {
unsigned int xfer_size;
struct list_head descs_list;
struct list_head xfer_node;
-};
+} __aligned(sizeof(u64));
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
{
@@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
+ bool initd;
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_COMPLETE)
@@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
residue = desc->xfer_size;
/*
* Flush FIFO: only relevant when the transfer is source peripheral
- * synchronized.
+ * synchronized. Flush is needed before reading CUBC because data in
+ * the FIFO are not reported by CUBC. Reporting a residue of the
+ * transfer length while we have data in FIFO can cause issue.
+ * Usecase: atmel USART has a timeout which means I have received
+ * characters but there is no more character received for a while. On
+ * timeout, it requests the residue. If the data are in the DMA FIFO,
+ * we will return a residue of the transfer length. It means no data
+ * received. If an application is waiting for these data, it will hang
+ * since we won't have another USART timeout without receiving new
+ * data.
*/
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
@@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
}
/*
- * When processing the residue, we need to read two registers but we
- * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
- * we stand in the descriptor list and AT_XDMAC_CUBC is used
- * to know how many data are remaining for the current descriptor.
- * Since the dma channel is not paused to not loose data, between the
- * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
- * descriptor.
- * For that reason, after reading AT_XDMAC_CUBC, we check if we are
- * still using the same descriptor by reading a second time
- * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
- * read again AT_XDMAC_CUBC.
+ * The easiest way to compute the residue should be to pause the DMA
+ * but doing this can lead to miss some data as some devices don't
+ * have FIFO.
+ * We need to read several registers because:
+ * - DMA is running therefore a descriptor change is possible while
+ * reading these registers
+ * - When the block transfer is done, the value of the CUBC register
+ * is set to its initial value until the fetch of the next descriptor.
+ * This value will corrupt the residue calculation so we have to skip
+ * it.
+ *
+ * INITD -------- ------------
+ * |____________________|
+ * _______________________ _______________
+ * NDA @desc2 \/ @desc3
+ * _______________________/\_______________
+ * __________ ___________ _______________
+ * CUBC 0 \/ MAX desc1 \/ MAX desc2
+ * __________/\___________/\_______________
+ *
+ * Since descriptors are aligned on 64 bits, we can assume that
+ * the update of NDA and CUBC is atomic.
* Memory barriers are used to ensure the read order of the registers.
- * A max number of retries is set because unlikely it can never ends if
- * we are transferring a lot of data with small buffers.
+ * A max number of retries is set because unlikely it could never ends.
*/
- cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
- rmb();
- cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
- rmb();
check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-
- if (likely(cur_nda == check_nda))
- break;
-
- cur_nda = check_nda;
+ rmb();
+ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
rmb();
cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ rmb();
+ cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ rmb();
+
+ if ((check_nda == cur_nda) && initd)
+ break;
}
if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
@@ -1471,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
}
/*
+ * Flush FIFO: only relevant when the transfer is source peripheral
+ * synchronized. Another flush is needed here because CUBC is updated
+ * when the controller sends the data write command. It can lead to
+ * report data that are not written in the memory or the device. The
+ * FIFO flush ensures that data are really written.
+ */
+ if ((desc->lld.mbr_cfg & mask) == value) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+ cpu_relax();
+ }
+
+ /*
* Remove size of all microblocks already transferred and the current
* one. Then add the remaining size to transfer of the current
* microblock.
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 996c4b00d..6149b27c3 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -46,6 +46,9 @@
#include "virt-dma.h"
+#define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
+#define BCM2835_DMA_CHAN_NAME_SIZE 8
+
struct bcm2835_dmadev {
struct dma_device ddev;
spinlock_t lock;
@@ -73,7 +76,6 @@ struct bcm2835_chan {
struct list_head node;
struct dma_slave_config cfg;
- bool cyclic;
unsigned int dreq;
int ch;
@@ -82,6 +84,9 @@ struct bcm2835_chan {
void __iomem *chan_base;
int irq_number;
+ unsigned int irq_flags;
+
+ bool is_lite_channel;
};
struct bcm2835_desc {
@@ -89,47 +94,104 @@ struct bcm2835_desc {
struct virt_dma_desc vd;
enum dma_transfer_direction dir;
- struct bcm2835_cb_entry *cb_list;
-
unsigned int frames;
size_t size;
+
+ bool cyclic;
+
+ struct bcm2835_cb_entry cb_list[];
};
#define BCM2835_DMA_CS 0x00
#define BCM2835_DMA_ADDR 0x04
+#define BCM2835_DMA_TI 0x08
#define BCM2835_DMA_SOURCE_AD 0x0c
#define BCM2835_DMA_DEST_AD 0x10
-#define BCM2835_DMA_NEXTCB 0x1C
+#define BCM2835_DMA_LEN 0x14
+#define BCM2835_DMA_STRIDE 0x18
+#define BCM2835_DMA_NEXTCB 0x1c
+#define BCM2835_DMA_DEBUG 0x20
/* DMA CS Control and Status bits */
-#define BCM2835_DMA_ACTIVE BIT(0)
-#define BCM2835_DMA_INT BIT(2)
+#define BCM2835_DMA_ACTIVE BIT(0) /* activate the DMA */
+#define BCM2835_DMA_END BIT(1) /* current CB has ended */
+#define BCM2835_DMA_INT BIT(2) /* interrupt status */
+#define BCM2835_DMA_DREQ BIT(3) /* DREQ state */
#define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */
#define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */
-#define BCM2835_DMA_ERR BIT(8)
+#define BCM2835_DMA_WAITING_FOR_WRITES BIT(6) /* waiting for last
+ * AXI-write to ack
+ */
+#define BCM2835_DMA_ERR BIT(8)
+#define BCM2835_DMA_PRIORITY(x) ((x & 15) << 16) /* AXI priority */
+#define BCM2835_DMA_PANIC_PRIORITY(x) ((x & 15) << 20) /* panic priority */
+/* current value of TI.BCM2835_DMA_WAIT_RESP */
+#define BCM2835_DMA_WAIT_FOR_WRITES BIT(28)
+#define BCM2835_DMA_DIS_DEBUG BIT(29) /* disable debug pause signal */
#define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */
#define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */
+/* Transfer information bits - also bcm2835_cb.info field */
#define BCM2835_DMA_INT_EN BIT(0)
+#define BCM2835_DMA_TDMODE BIT(1) /* 2D-Mode */
+#define BCM2835_DMA_WAIT_RESP BIT(3) /* wait for AXI-write to be acked */
#define BCM2835_DMA_D_INC BIT(4)
-#define BCM2835_DMA_D_DREQ BIT(6)
+#define BCM2835_DMA_D_WIDTH BIT(5) /* 128bit writes if set */
+#define BCM2835_DMA_D_DREQ BIT(6) /* enable DREQ for destination */
+#define BCM2835_DMA_D_IGNORE BIT(7) /* ignore destination writes */
#define BCM2835_DMA_S_INC BIT(8)
-#define BCM2835_DMA_S_DREQ BIT(10)
-
-#define BCM2835_DMA_PER_MAP(x) ((x) << 16)
+#define BCM2835_DMA_S_WIDTH BIT(9) /* 128bit writes if set */
+#define BCM2835_DMA_S_DREQ BIT(10) /* enable SREQ for source */
+#define BCM2835_DMA_S_IGNORE BIT(11) /* ignore source reads - read 0 */
+#define BCM2835_DMA_BURST_LENGTH(x) ((x & 15) << 12)
+#define BCM2835_DMA_PER_MAP(x) ((x & 31) << 16) /* REQ source */
+#define BCM2835_DMA_WAIT(x) ((x & 31) << 21) /* add DMA-wait cycles */
+#define BCM2835_DMA_NO_WIDE_BURSTS BIT(26) /* no 2 beat write bursts */
+
+/* debug register bits */
+#define BCM2835_DMA_DEBUG_LAST_NOT_SET_ERR BIT(0)
+#define BCM2835_DMA_DEBUG_FIFO_ERR BIT(1)
+#define BCM2835_DMA_DEBUG_READ_ERR BIT(2)
+#define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_SHIFT 4
+#define BCM2835_DMA_DEBUG_OUTSTANDING_WRITES_BITS 4
+#define BCM2835_DMA_DEBUG_ID_SHIFT 16
+#define BCM2835_DMA_DEBUG_ID_BITS 9
+#define BCM2835_DMA_DEBUG_STATE_SHIFT 16
+#define BCM2835_DMA_DEBUG_STATE_BITS 9
+#define BCM2835_DMA_DEBUG_VERSION_SHIFT 25
+#define BCM2835_DMA_DEBUG_VERSION_BITS 3
+#define BCM2835_DMA_DEBUG_LITE BIT(28)
+
+/* shared registers for all dma channels */
+#define BCM2835_DMA_INT_STATUS 0xfe0
+#define BCM2835_DMA_ENABLE 0xff0
#define BCM2835_DMA_DATA_TYPE_S8 1
#define BCM2835_DMA_DATA_TYPE_S16 2
#define BCM2835_DMA_DATA_TYPE_S32 4
#define BCM2835_DMA_DATA_TYPE_S128 16
-#define BCM2835_DMA_BULK_MASK BIT(0)
-#define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3))
-
/* Valid only for channels 0 - 14, 15 has its own base address */
#define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */
#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
+/* the max dma length for different channels */
+#define MAX_DMA_LEN SZ_1G
+#define MAX_LITE_DMA_LEN (SZ_64K - 4)
+
+static inline size_t bcm2835_dma_max_frame_length(struct bcm2835_chan *c)
+{
+ /* lite and normal channels have different max frame length */
+ return c->is_lite_channel ? MAX_LITE_DMA_LEN : MAX_DMA_LEN;
+}
+
+/* how many frames of max_len size do we need to transfer len bytes */
+static inline size_t bcm2835_dma_frames_for_length(size_t len,
+ size_t max_len)
+{
+ return DIV_ROUND_UP(len, max_len);
+}
+
static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
{
return container_of(d, struct bcm2835_dmadev, ddev);
@@ -146,19 +208,209 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
return container_of(t, struct bcm2835_desc, vd.tx);
}
-static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+static void bcm2835_dma_free_cb_chain(struct bcm2835_desc *desc)
{
- struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
- int i;
+ size_t i;
for (i = 0; i < desc->frames; i++)
dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
desc->cb_list[i].paddr);
- kfree(desc->cb_list);
kfree(desc);
}
+static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+{
+ bcm2835_dma_free_cb_chain(
+ container_of(vd, struct bcm2835_desc, vd));
+}
+
+static void bcm2835_dma_create_cb_set_length(
+ struct bcm2835_chan *chan,
+ struct bcm2835_dma_cb *control_block,
+ size_t len,
+ size_t period_len,
+ size_t *total_len,
+ u32 finalextrainfo)
+{
+ size_t max_len = bcm2835_dma_max_frame_length(chan);
+
+ /* set the length taking lite-channel limitations into account */
+ control_block->length = min_t(u32, len, max_len);
+
+ /* finished if we have no period_length */
+ if (!period_len)
+ return;
+
+ /*
+ * period_len means: that we need to generate
+ * transfers that are terminating at every
+ * multiple of period_len - this is typically
+ * used to set the interrupt flag in info
+ * which is required during cyclic transfers
+ */
+
+ /* have we filled in period_length yet? */
+ if (*total_len + control_block->length < period_len)
+ return;
+
+ /* calculate the length that remains to reach period_length */
+ control_block->length = period_len - *total_len;
+
+ /* reset total_length for next period */
+ *total_len = 0;
+
+ /* add extrainfo bits in info */
+ control_block->info |= finalextrainfo;
+}
+
+static inline size_t bcm2835_dma_count_frames_for_sg(
+ struct bcm2835_chan *c,
+ struct scatterlist *sgl,
+ unsigned int sg_len)
+{
+ size_t frames = 0;
+ struct scatterlist *sgent;
+ unsigned int i;
+ size_t plength = bcm2835_dma_max_frame_length(c);
+
+ for_each_sg(sgl, sgent, sg_len, i)
+ frames += bcm2835_dma_frames_for_length(
+ sg_dma_len(sgent), plength);
+
+ return frames;
+}
+
+/**
+ * bcm2835_dma_create_cb_chain - create a control block and fills data in
+ *
+ * @chan: the @dma_chan for which we run this
+ * @direction: the direction in which we transfer
+ * @cyclic: it is a cyclic transfer
+ * @info: the default info bits to apply per controlblock
+ * @frames: number of controlblocks to allocate
+ * @src: the src address to assign (if the S_INC bit is set
+ * in @info, then it gets incremented)
+ * @dst: the dst address to assign (if the D_INC bit is set
+ * in @info, then it gets incremented)
+ * @buf_len: the full buffer length (may also be 0)
+ * @period_len: the period length when to apply @finalextrainfo
+ * in addition to the last transfer
+ * this will also break some control-blocks early
+ * @finalextrainfo: additional bits in last controlblock
+ * (or when period_len is reached in case of cyclic)
+ * @gfp: the GFP flag to use for allocation
+ */
+static struct bcm2835_desc *bcm2835_dma_create_cb_chain(
+ struct dma_chan *chan, enum dma_transfer_direction direction,
+ bool cyclic, u32 info, u32 finalextrainfo, size_t frames,
+ dma_addr_t src, dma_addr_t dst, size_t buf_len,
+ size_t period_len, gfp_t gfp)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ size_t len = buf_len, total_len;
+ size_t frame;
+ struct bcm2835_desc *d;
+ struct bcm2835_cb_entry *cb_entry;
+ struct bcm2835_dma_cb *control_block;
+
+ if (!frames)
+ return NULL;
+
+ /* allocate and setup the descriptor. */
+ d = kzalloc(sizeof(*d) + frames * sizeof(struct bcm2835_cb_entry),
+ gfp);
+ if (!d)
+ return NULL;
+
+ d->c = c;
+ d->dir = direction;
+ d->cyclic = cyclic;
+
+ /*
+ * Iterate over all frames, create a control block
+ * for each frame and link them together.
+ */
+ for (frame = 0, total_len = 0; frame < frames; d->frames++, frame++) {
+ cb_entry = &d->cb_list[frame];
+ cb_entry->cb = dma_pool_alloc(c->cb_pool, gfp,
+ &cb_entry->paddr);
+ if (!cb_entry->cb)
+ goto error_cb;
+
+ /* fill in the control block */
+ control_block = cb_entry->cb;
+ control_block->info = info;
+ control_block->src = src;
+ control_block->dst = dst;
+ control_block->stride = 0;
+ control_block->next = 0;
+ /* set up length in control_block if requested */
+ if (buf_len) {
+ /* calculate length honoring period_length */
+ bcm2835_dma_create_cb_set_length(
+ c, control_block,
+ len, period_len, &total_len,
+ cyclic ? finalextrainfo : 0);
+
+ /* calculate new remaining length */
+ len -= control_block->length;
+ }
+
+ /* link this the last controlblock */
+ if (frame)
+ d->cb_list[frame - 1].cb->next = cb_entry->paddr;
+
+ /* update src and dst and length */
+ if (src && (info & BCM2835_DMA_S_INC))
+ src += control_block->length;
+ if (dst && (info & BCM2835_DMA_D_INC))
+ dst += control_block->length;
+
+ /* Length of total transfer */
+ d->size += control_block->length;
+ }
+
+ /* the last frame requires extra flags */
+ d->cb_list[d->frames - 1].cb->info |= finalextrainfo;
+
+ /* detect a size missmatch */
+ if (buf_len && (d->size != buf_len))
+ goto error_cb;
+
+ return d;
+error_cb:
+ bcm2835_dma_free_cb_chain(d);
+
+ return NULL;
+}
+
+static void bcm2835_dma_fill_cb_chain_with_sg(
+ struct dma_chan *chan,
+ enum dma_transfer_direction direction,
+ struct bcm2835_cb_entry *cb,
+ struct scatterlist *sgl,
+ unsigned int sg_len)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ size_t max_len = bcm2835_dma_max_frame_length(c);
+ unsigned int i, len;
+ dma_addr_t addr;
+ struct scatterlist *sgent;
+
+ for_each_sg(sgl, sgent, sg_len, i) {
+ for (addr = sg_dma_address(sgent), len = sg_dma_len(sgent);
+ len > 0;
+ addr += cb->cb->length, len -= cb->cb->length, cb++) {
+ if (direction == DMA_DEV_TO_MEM)
+ cb->cb->dst = addr;
+ else
+ cb->cb->src = addr;
+ cb->cb->length = min(len, max_len);
+ }
+ }
+}
+
static int bcm2835_dma_abort(void __iomem *chan_base)
{
unsigned long cs;
@@ -218,6 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
struct bcm2835_desc *d;
unsigned long flags;
+ /* check the shared interrupt */
+ if (c->irq_flags & IRQF_SHARED) {
+ /* check if the interrupt is enabled */
+ flags = readl(c->chan_base + BCM2835_DMA_CS);
+ /* if not set then we are not the reason for the irq */
+ if (!(flags & BCM2835_DMA_INT))
+ return IRQ_NONE;
+ }
+
spin_lock_irqsave(&c->vc.lock, flags);
/* Acknowledge interrupt */
@@ -226,12 +487,18 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
d = c->desc;
if (d) {
- /* TODO Only works for cyclic DMA */
- vchan_cyclic_callback(&d->vd);
- }
+ if (d->cyclic) {
+ /* call the cyclic callback */
+ vchan_cyclic_callback(&d->vd);
- /* Keep the DMA engine running */
- writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+ /* Keep the DMA engine running */
+ writel(BCM2835_DMA_ACTIVE,
+ c->chan_base + BCM2835_DMA_CS);
+ } else {
+ vchan_cookie_complete(&c->desc->vd);
+ bcm2835_dma_start_desc(c);
+ }
+ }
spin_unlock_irqrestore(&c->vc.lock, flags);
@@ -252,8 +519,8 @@ static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
return -ENOMEM;
}
- return request_irq(c->irq_number,
- bcm2835_dma_callback, 0, "DMA IRQ", c);
+ return request_irq(c->irq_number, bcm2835_dma_callback,
+ c->irq_flags, "DMA IRQ", c);
}
static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
@@ -339,8 +606,6 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan)
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
unsigned long flags;
- c->cyclic = true; /* Nothing else is implemented */
-
spin_lock_irqsave(&c->vc.lock, flags);
if (vchan_issue_pending(&c->vc) && !c->desc)
bcm2835_dma_start_desc(c);
@@ -348,122 +613,160 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&c->vc.lock, flags);
}
-static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
+struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- enum dma_slave_buswidth dev_width;
struct bcm2835_desc *d;
- dma_addr_t dev_addr;
- unsigned int es, sync_type;
- unsigned int frame;
- int i;
+ u32 info = BCM2835_DMA_D_INC | BCM2835_DMA_S_INC;
+ u32 extra = BCM2835_DMA_INT_EN | BCM2835_DMA_WAIT_RESP;
+ size_t max_len = bcm2835_dma_max_frame_length(c);
+ size_t frames;
+
+ /* if src, dst or len is not given return with an error */
+ if (!src || !dst || !len)
+ return NULL;
+
+ /* calculate number of frames */
+ frames = bcm2835_dma_frames_for_length(len, max_len);
+
+ /* allocate the CB chain - this also fills in the pointers */
+ d = bcm2835_dma_create_cb_chain(chan, DMA_MEM_TO_MEM, false,
+ info, extra, frames,
+ src, dst, len, 0, GFP_KERNEL);
+ if (!d)
+ return NULL;
+
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_slave_sg(
+ struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ struct bcm2835_desc *d;
+ dma_addr_t src = 0, dst = 0;
+ u32 info = BCM2835_DMA_WAIT_RESP;
+ u32 extra = BCM2835_DMA_INT_EN;
+ size_t frames;
- /* Grab configuration */
if (!is_slave_direction(direction)) {
- dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+ dev_err(chan->device->dev,
+ "%s: bad direction?\n", __func__);
return NULL;
}
+ if (c->dreq != 0)
+ info |= BCM2835_DMA_PER_MAP(c->dreq);
+
if (direction == DMA_DEV_TO_MEM) {
- dev_addr = c->cfg.src_addr;
- dev_width = c->cfg.src_addr_width;
- sync_type = BCM2835_DMA_S_DREQ;
+ if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+ return NULL;
+ src = c->cfg.src_addr;
+ info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
} else {
- dev_addr = c->cfg.dst_addr;
- dev_width = c->cfg.dst_addr_width;
- sync_type = BCM2835_DMA_D_DREQ;
+ if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+ return NULL;
+ dst = c->cfg.dst_addr;
+ info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
}
- /* Bus width translates to the element size (ES) */
- switch (dev_width) {
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- es = BCM2835_DMA_DATA_TYPE_S32;
- break;
- default:
- return NULL;
- }
+ /* count frames in sg list */
+ frames = bcm2835_dma_count_frames_for_sg(c, sgl, sg_len);
- /* Now allocate and setup the descriptor. */
- d = kzalloc(sizeof(*d), GFP_NOWAIT);
+ /* allocate the CB chain */
+ d = bcm2835_dma_create_cb_chain(chan, direction, false,
+ info, extra,
+ frames, src, dst, 0, 0,
+ GFP_KERNEL);
if (!d)
return NULL;
- d->c = c;
- d->dir = direction;
- d->frames = buf_len / period_len;
+ /* fill in frames with scatterlist pointers */
+ bcm2835_dma_fill_cb_chain_with_sg(chan, direction, d->cb_list,
+ sgl, sg_len);
- d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
- if (!d->cb_list) {
- kfree(d);
+ return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ struct bcm2835_desc *d;
+ dma_addr_t src, dst;
+ u32 info = BCM2835_DMA_WAIT_RESP;
+ u32 extra = BCM2835_DMA_INT_EN;
+ size_t max_len = bcm2835_dma_max_frame_length(c);
+ size_t frames;
+
+ /* Grab configuration */
+ if (!is_slave_direction(direction)) {
+ dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
return NULL;
}
- /* Allocate memory for control blocks */
- for (i = 0; i < d->frames; i++) {
- struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
- cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
- &cb_entry->paddr);
- if (!cb_entry->cb)
- goto error_cb;
+ if (!buf_len) {
+ dev_err(chan->device->dev,
+ "%s: bad buffer length (= 0)\n", __func__);
+ return NULL;
}
/*
- * Iterate over all frames, create a control block
- * for each frame and link them together.
+ * warn if buf_len is not a multiple of period_len - this may leed
+ * to unexpected latencies for interrupts and thus audiable clicks
*/
- for (frame = 0; frame < d->frames; frame++) {
- struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
-
- /* Setup adresses */
- if (d->dir == DMA_DEV_TO_MEM) {
- control_block->info = BCM2835_DMA_D_INC;
- control_block->src = dev_addr;
- control_block->dst = buf_addr + frame * period_len;
- } else {
- control_block->info = BCM2835_DMA_S_INC;
- control_block->src = buf_addr + frame * period_len;
- control_block->dst = dev_addr;
- }
+ if (buf_len % period_len)
+ dev_warn_once(chan->device->dev,
+ "%s: buffer_length (%zd) is not a multiple of period_len (%zd)\n",
+ __func__, buf_len, period_len);
- /* Enable interrupt */
- control_block->info |= BCM2835_DMA_INT_EN;
+ /* Setup DREQ channel */
+ if (c->dreq != 0)
+ info |= BCM2835_DMA_PER_MAP(c->dreq);
- /* Setup synchronization */
- if (sync_type != 0)
- control_block->info |= sync_type;
+ if (direction == DMA_DEV_TO_MEM) {
+ if (c->cfg.src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+ return NULL;
+ src = c->cfg.src_addr;
+ dst = buf_addr;
+ info |= BCM2835_DMA_S_DREQ | BCM2835_DMA_D_INC;
+ } else {
+ if (c->cfg.dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)
+ return NULL;
+ dst = c->cfg.dst_addr;
+ src = buf_addr;
+ info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
+ }
- /* Setup DREQ channel */
- if (c->dreq != 0)
- control_block->info |=
- BCM2835_DMA_PER_MAP(c->dreq);
+ /* calculate number of frames */
+ frames = /* number of periods */
+ DIV_ROUND_UP(buf_len, period_len) *
+ /* number of frames per period */
+ bcm2835_dma_frames_for_length(period_len, max_len);
- /* Length of a frame */
- control_block->length = period_len;
- d->size += control_block->length;
+ /*
+ * allocate the CB chain
+ * note that we need to use GFP_NOWAIT, as the ALSA i2s dmaengine
+ * implementation calls prep_dma_cyclic with interrupts disabled.
+ */
+ d = bcm2835_dma_create_cb_chain(chan, direction, true,
+ info, extra,
+ frames, src, dst, buf_len,
+ period_len, GFP_NOWAIT);
+ if (!d)
+ return NULL;
- /*
- * Next block is the next frame.
- * This DMA engine driver currently only supports cyclic DMA.
- * Therefore, wrap around at number of frames.
- */
- control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
- }
+ /* wrap around into a loop */
+ d->cb_list[d->frames - 1].cb->next = d->cb_list[0].paddr;
return vchan_tx_prep(&c->vc, &d->vd, flags);
-error_cb:
- i--;
- for (; i >= 0; i--) {
- struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
-
- dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
- }
-
- kfree(d->cb_list);
- kfree(d);
- return NULL;
}
static int bcm2835_dma_slave_config(struct dma_chan *chan,
@@ -529,7 +832,8 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
return 0;
}
-static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
+static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
+ int irq, unsigned int irq_flags)
{
struct bcm2835_chan *c;
@@ -544,6 +848,12 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
c->ch = chan_id;
c->irq_number = irq;
+ c->irq_flags = irq_flags;
+
+ /* check in DEBUG register if this is a LITE channel */
+ if (readl(c->chan_base + BCM2835_DMA_DEBUG) &
+ BCM2835_DMA_DEBUG_LITE)
+ c->is_lite_channel = true;
return 0;
}
@@ -587,9 +897,11 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
int rc;
- int i;
- int irq;
+ int i, j;
+ int irq[BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED + 1];
+ int irq_flags;
uint32_t chans_available;
+ char chan_name[BCM2835_DMA_CHAN_NAME_SIZE];
if (!pdev->dev.dma_mask)
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
@@ -615,16 +927,22 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+ dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+ dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
od->ddev.device_tx_status = bcm2835_dma_tx_status;
od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
+ od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
+ od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
od->ddev.device_config = bcm2835_dma_slave_config;
od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
+ BIT(DMA_MEM_TO_MEM);
+ od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
spin_lock_init(&od->lock);
@@ -640,22 +958,48 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
goto err_no_dma;
}
- /*
- * Do not use the FIQ and BULK channels,
- * because they are used by the GPU.
- */
- chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
+ /* get irqs for each channel that we support */
+ for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
+ /* skip masked out channels */
+ if (!(chans_available & (1 << i))) {
+ irq[i] = -1;
+ continue;
+ }
- for (i = 0; i < pdev->num_resources; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0)
- break;
+ /* get the named irq */
+ snprintf(chan_name, sizeof(chan_name), "dma%i", i);
+ irq[i] = platform_get_irq_byname(pdev, chan_name);
+ if (irq[i] >= 0)
+ continue;
- if (chans_available & (1 << i)) {
- rc = bcm2835_dma_chan_init(od, i, irq);
- if (rc)
- goto err_no_dma;
- }
+ /* legacy device tree case handling */
+ dev_warn_once(&pdev->dev,
+ "missing interrupt-names property in device tree - legacy interpretation is used\n");
+ /*
+ * in case of channel >= 11
+ * use the 11th interrupt and that is shared
+ */
+ irq[i] = platform_get_irq(pdev, i < 11 ? i : 11);
+ }
+
+ /* get irqs for each channel */
+ for (i = 0; i <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; i++) {
+ /* skip channels without irq */
+ if (irq[i] < 0)
+ continue;
+
+ /* check if there are other channels that also use this irq */
+ irq_flags = 0;
+ for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
+ if ((i != j) && (irq[j] == irq[i])) {
+ irq_flags = IRQF_SHARED;
+ break;
+ }
+
+ /* initialize the channel */
+ rc = bcm2835_dma_chan_init(od, i, irq[i], irq_flags);
+ if (rc)
+ goto err_no_dma;
}
dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 0cb259c59..8c9f45fd5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -289,7 +289,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
do {
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
- pr_err("%s: timeout!\n", __func__);
+ dev_err(chan->device->dev, "%s: timeout!\n", __func__);
return DMA_ERROR;
}
if (status != DMA_IN_PROGRESS)
@@ -482,7 +482,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
device = chan->device;
/* check if the channel supports slave transactions */
- if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
+ if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
+ test_bit(DMA_CYCLIC, device->cap_mask.bits)))
return -ENXIO;
/*
@@ -518,7 +519,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
struct dma_chan *chan;
if (mask && !__dma_device_satisfies_mask(dev, mask)) {
- pr_debug("%s: wrong capabilities\n", __func__);
+ dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
return NULL;
}
/* devices with multiple channels need special handling as we need to
@@ -533,12 +534,12 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
list_for_each_entry(chan, &dev->channels, device_node) {
if (chan->client_count) {
- pr_debug("%s: %s busy\n",
+ dev_dbg(dev->dev, "%s: %s busy\n",
__func__, dma_chan_name(chan));
continue;
}
if (fn && !fn(chan, fn_param)) {
- pr_debug("%s: %s filter said false\n",
+ dev_dbg(dev->dev, "%s: %s filter said false\n",
__func__, dma_chan_name(chan));
continue;
}
@@ -567,11 +568,12 @@ static struct dma_chan *find_candidate(struct dma_device *device,
if (err) {
if (err == -ENODEV) {
- pr_debug("%s: %s module removed\n", __func__,
- dma_chan_name(chan));
+ dev_dbg(device->dev, "%s: %s module removed\n",
+ __func__, dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else
- pr_debug("%s: failed to get %s: (%d)\n",
+ dev_dbg(device->dev,
+ "%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
if (--device->privatecnt == 0)
@@ -602,7 +604,8 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
device->privatecnt++;
err = dma_chan_get(chan);
if (err) {
- pr_debug("%s: failed to get %s: (%d)\n",
+ dev_dbg(chan->device->dev,
+ "%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
chan = NULL;
if (--device->privatecnt == 0)
@@ -814,8 +817,9 @@ void dmaengine_get(void)
list_del_rcu(&device->global_node);
break;
} else if (err)
- pr_debug("%s: failed to get %s: (%d)\n",
- __func__, dma_chan_name(chan), err);
+ dev_dbg(chan->device->dev,
+ "%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
}
}
@@ -862,12 +866,12 @@ static bool device_has_all_tx_types(struct dma_device *device)
return false;
#endif
- #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
+ #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
return false;
#endif
- #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
+ #if IS_ENABLED(CONFIG_ASYNC_XOR)
if (!dma_has_cap(DMA_XOR, device->cap_mask))
return false;
@@ -877,7 +881,7 @@ static bool device_has_all_tx_types(struct dma_device *device)
#endif
#endif
- #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
+ #if IS_ENABLED(CONFIG_ASYNC_PQ)
if (!dma_has_cap(DMA_PQ, device->cap_mask))
return false;
@@ -1222,8 +1226,9 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
- pr_err("%s timeout waiting for descriptor submission\n",
- __func__);
+ dev_err(tx->chan->device->dev,
+ "%s timeout waiting for descriptor submission\n",
+ __func__);
return DMA_ERROR;
}
cpu_relax();
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 97199b3c2..edf053f73 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -45,22 +45,19 @@
DW_DMA_MSIZE_16; \
u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
DW_DMA_MSIZE_16; \
+ u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
+ _dwc->p_master : _dwc->m_master; \
+ u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
+ _dwc->p_master : _dwc->m_master; \
\
(DWC_CTLL_DST_MSIZE(_dmsize) \
| DWC_CTLL_SRC_MSIZE(_smsize) \
| DWC_CTLL_LLP_D_EN \
| DWC_CTLL_LLP_S_EN \
- | DWC_CTLL_DMS(_dwc->dst_master) \
- | DWC_CTLL_SMS(_dwc->src_master)); \
+ | DWC_CTLL_DMS(_dms) \
+ | DWC_CTLL_SMS(_sms)); \
})
-/*
- * Number of descriptors to allocate for each channel. This should be
- * made configurable somehow; preferably, the clients (at least the
- * ones using slave transfers) should be able to give us a hint.
- */
-#define NR_DESCS_PER_CHANNEL 64
-
/* The set of bus widths supported by the DMA controller */
#define DW_DMA_BUSWIDTHS \
BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
@@ -80,51 +77,65 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
return to_dw_desc(dwc->active_list.next);
}
-static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct dw_desc *desc, *_desc;
- struct dw_desc *ret = NULL;
- unsigned int i = 0;
- unsigned long flags;
+ struct dw_desc *desc = txd_to_dw_desc(tx);
+ struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
+ dma_cookie_t cookie;
+ unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
- list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
- i++;
- if (async_tx_test_ack(&desc->txd)) {
- list_del(&desc->desc_node);
- ret = desc;
- break;
- }
- dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
- }
+ cookie = dma_cookie_assign(tx);
+
+ /*
+ * REVISIT: We should attempt to chain as many descriptors as
+ * possible, perhaps even appending to those already submitted
+ * for DMA. But this is hard to do in a race-free manner.
+ */
+
+ list_add_tail(&desc->desc_node, &dwc->queue);
spin_unlock_irqrestore(&dwc->lock, flags);
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
+ __func__, desc->txd.cookie);
- dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
+ return cookie;
+}
- return ret;
+static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_desc *desc;
+ dma_addr_t phys;
+
+ desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
+ if (!desc)
+ return NULL;
+
+ dwc->descs_allocated++;
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
+ desc->txd.tx_submit = dwc_tx_submit;
+ desc->txd.flags = DMA_CTRL_ACK;
+ desc->txd.phys = phys;
+ return desc;
}
-/*
- * Move a descriptor, including any children, to the free list.
- * `desc' must not be on any lists.
- */
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
- unsigned long flags;
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_desc *child, *_next;
- if (desc) {
- struct dw_desc *child;
+ if (unlikely(!desc))
+ return;
- spin_lock_irqsave(&dwc->lock, flags);
- list_for_each_entry(child, &desc->tx_list, desc_node)
- dev_vdbg(chan2dev(&dwc->chan),
- "moving child desc %p to freelist\n",
- child);
- list_splice_init(&desc->tx_list, &dwc->free_list);
- dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
- list_add(&desc->desc_node, &dwc->free_list);
- spin_unlock_irqrestore(&dwc->lock, flags);
+ list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
+ list_del(&child->desc_node);
+ dma_pool_free(dw->desc_pool, child, child->txd.phys);
+ dwc->descs_allocated--;
}
+
+ dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
+ dwc->descs_allocated--;
}
static void dwc_initialize(struct dw_dma_chan *dwc)
@@ -133,7 +144,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
- if (dwc->initialized == true)
+ if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
return;
cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
@@ -146,26 +157,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
channel_set_bit(dw, MASK.XFER, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
- dwc->initialized = true;
+ set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
}
/*----------------------------------------------------------------------*/
-static inline unsigned int dwc_fast_ffs(unsigned long long v)
-{
- /*
- * We can be a lot more clever here, but this should take care
- * of the most common optimization.
- */
- if (!(v & 7))
- return 3;
- else if (!(v & 3))
- return 2;
- else if (!(v & 1))
- return 1;
- return 0;
-}
-
static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
{
dev_err(chan2dev(&dwc->chan),
@@ -197,12 +193,12 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
* Software emulation of LLP mode relies on interrupts to continue
* multi block transfer.
*/
- ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
+ ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
- channel_writel(dwc, SAR, desc->lli.sar);
- channel_writel(dwc, DAR, desc->lli.dar);
+ channel_writel(dwc, SAR, lli_read(desc, sar));
+ channel_writel(dwc, DAR, lli_read(desc, dar));
channel_writel(dwc, CTL_LO, ctllo);
- channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
+ channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
channel_set_bit(dw, CH_EN, dwc->mask);
/* Move pointer to next descriptor */
@@ -213,6 +209,7 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ u8 lms = DWC_LLP_LMS(dwc->m_master);
unsigned long was_soft_llp;
/* ASSERT: channel is idle */
@@ -237,7 +234,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
dwc_initialize(dwc);
- dwc->residue = first->total_len;
+ first->residue = first->total_len;
dwc->tx_node_active = &first->tx_list;
/* Submit first block */
@@ -248,9 +245,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
dwc_initialize(dwc);
- channel_writel(dwc, LLP, first->txd.phys);
- channel_writel(dwc, CTL_LO,
- DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+ channel_writel(dwc, LLP, first->txd.phys | lms);
+ channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
channel_writel(dwc, CTL_HI, 0);
channel_set_bit(dw, CH_EN, dwc->mask);
}
@@ -293,11 +289,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
list_for_each_entry(child, &desc->tx_list, desc_node)
async_tx_ack(&child->txd);
async_tx_ack(&desc->txd);
-
- list_splice_init(&desc->tx_list, &dwc->free_list);
- list_move(&desc->desc_node, &dwc->free_list);
-
- dma_descriptor_unmap(txd);
+ dwc_desc_put(dwc, desc);
spin_unlock_irqrestore(&dwc->lock, flags);
if (callback)
@@ -368,11 +360,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
head = &desc->tx_list;
if (active != head) {
- /* Update desc to reflect last sent one */
- if (active != head->next)
- desc = to_dw_desc(active->prev);
-
- dwc->residue -= desc->len;
+ /* Update residue to reflect last sent descriptor */
+ if (active == head->next)
+ desc->residue -= desc->len;
+ else
+ desc->residue -= to_dw_desc(active->prev)->len;
child = to_dw_desc(active);
@@ -387,8 +379,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
}
- dwc->residue = 0;
-
spin_unlock_irqrestore(&dwc->lock, flags);
dwc_complete_all(dw, dwc);
@@ -396,7 +386,6 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
}
if (list_empty(&dwc->active_list)) {
- dwc->residue = 0;
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
@@ -411,31 +400,31 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
/* Initial residue value */
- dwc->residue = desc->total_len;
+ desc->residue = desc->total_len;
/* Check first descriptors addr */
- if (desc->txd.phys == llp) {
+ if (desc->txd.phys == DWC_LLP_LOC(llp)) {
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
/* Check first descriptors llp */
- if (desc->lli.llp == llp) {
+ if (lli_read(desc, llp) == llp) {
/* This one is currently in progress */
- dwc->residue -= dwc_get_sent(dwc);
+ desc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
- dwc->residue -= desc->len;
+ desc->residue -= desc->len;
list_for_each_entry(child, &desc->tx_list, desc_node) {
- if (child->lli.llp == llp) {
+ if (lli_read(child, llp) == llp) {
/* Currently in progress */
- dwc->residue -= dwc_get_sent(dwc);
+ desc->residue -= dwc_get_sent(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return;
}
- dwc->residue -= child->len;
+ desc->residue -= child->len;
}
/*
@@ -457,10 +446,14 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
spin_unlock_irqrestore(&dwc->lock, flags);
}
-static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
+static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
{
dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
- lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
+ lli_read(desc, sar),
+ lli_read(desc, dar),
+ lli_read(desc, llp),
+ lli_read(desc, ctlhi),
+ lli_read(desc, ctllo));
}
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -496,9 +489,9 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
*/
dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
" cookie: %d\n", bad_desc->txd.cookie);
- dwc_dump_lli(dwc, &bad_desc->lli);
+ dwc_dump_lli(dwc, bad_desc);
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
- dwc_dump_lli(dwc, &child->lli);
+ dwc_dump_lli(dwc, child);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -549,7 +542,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
*/
if (unlikely(status_err & dwc->mask) ||
unlikely(status_xfer & dwc->mask)) {
- int i;
+ unsigned int i;
dev_err(chan2dev(&dwc->chan),
"cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
@@ -571,7 +564,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
dma_writel(dw, CLEAR.XFER, dwc->mask);
for (i = 0; i < dwc->cdesc->periods; i++)
- dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
+ dwc_dump_lli(dwc, dwc->cdesc->desc[i]);
spin_unlock_irqrestore(&dwc->lock, flags);
}
@@ -589,7 +582,7 @@ static void dw_dma_tasklet(unsigned long data)
u32 status_block;
u32 status_xfer;
u32 status_err;
- int i;
+ unsigned int i;
status_block = dma_readl(dw, RAW.BLOCK);
status_xfer = dma_readl(dw, RAW.XFER);
@@ -658,30 +651,6 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
/*----------------------------------------------------------------------*/
-static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct dw_desc *desc = txd_to_dw_desc(tx);
- struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
- dma_cookie_t cookie;
- unsigned long flags;
-
- spin_lock_irqsave(&dwc->lock, flags);
- cookie = dma_cookie_assign(tx);
-
- /*
- * REVISIT: We should attempt to chain as many descriptors as
- * possible, perhaps even appending to those already submitted
- * for DMA. But this is hard to do in a race-free manner.
- */
-
- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
- list_add_tail(&desc->desc_node, &dwc->queue);
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- return cookie;
-}
-
static struct dma_async_tx_descriptor *
dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
@@ -693,10 +662,12 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct dw_desc *prev;
size_t xfer_count;
size_t offset;
+ u8 m_master = dwc->m_master;
unsigned int src_width;
unsigned int dst_width;
- unsigned int data_width;
+ unsigned int data_width = dw->pdata->data_width[m_master];
u32 ctllo;
+ u8 lms = DWC_LLP_LMS(m_master);
dev_vdbg(chan2dev(chan),
"%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
@@ -709,11 +680,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
dwc->direction = DMA_MEM_TO_MEM;
- data_width = min_t(unsigned int, dw->data_width[dwc->src_master],
- dw->data_width[dwc->dst_master]);
-
- src_width = dst_width = min_t(unsigned int, data_width,
- dwc_fast_ffs(src | dest | len));
+ src_width = dst_width = __ffs(data_width | src | dest | len);
ctllo = DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(dst_width)
@@ -731,27 +698,27 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (!desc)
goto err_desc_get;
- desc->lli.sar = src + offset;
- desc->lli.dar = dest + offset;
- desc->lli.ctllo = ctllo;
- desc->lli.ctlhi = xfer_count;
+ lli_write(desc, sar, src + offset);
+ lli_write(desc, dar, dest + offset);
+ lli_write(desc, ctllo, ctllo);
+ lli_write(desc, ctlhi, xfer_count);
desc->len = xfer_count << src_width;
if (!first) {
first = desc;
} else {
- prev->lli.llp = desc->txd.phys;
- list_add_tail(&desc->desc_node,
- &first->tx_list);
+ lli_write(prev, llp, desc->txd.phys | lms);
+ list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
}
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last block */
- prev->lli.ctllo |= DWC_CTLL_INT_EN;
+ lli_set(prev, ctllo, DWC_CTLL_INT_EN);
prev->lli.llp = 0;
+ lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
first->txd.flags = flags;
first->total_len = len;
@@ -773,10 +740,12 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dw_desc *prev;
struct dw_desc *first;
u32 ctllo;
+ u8 m_master = dwc->m_master;
+ u8 lms = DWC_LLP_LMS(m_master);
dma_addr_t reg;
unsigned int reg_width;
unsigned int mem_width;
- unsigned int data_width;
+ unsigned int data_width = dw->pdata->data_width[m_master];
unsigned int i;
struct scatterlist *sg;
size_t total_len = 0;
@@ -802,8 +771,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
DWC_CTLL_FC(DW_DMA_FC_D_M2P);
- data_width = dw->data_width[dwc->src_master];
-
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
u32 len, dlen, mem;
@@ -811,17 +778,16 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- mem_width = min_t(unsigned int,
- data_width, dwc_fast_ffs(mem | len));
+ mem_width = __ffs(data_width | mem | len);
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc)
goto err_desc_get;
- desc->lli.sar = mem;
- desc->lli.dar = reg;
- desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
+ lli_write(desc, sar, mem);
+ lli_write(desc, dar, reg);
+ lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
if ((len >> mem_width) > dwc->block_size) {
dlen = dwc->block_size << mem_width;
mem += dlen;
@@ -831,15 +797,14 @@ slave_sg_todev_fill_desc:
len = 0;
}
- desc->lli.ctlhi = dlen >> mem_width;
+ lli_write(desc, ctlhi, dlen >> mem_width);
desc->len = dlen;
if (!first) {
first = desc;
} else {
- prev->lli.llp = desc->txd.phys;
- list_add_tail(&desc->desc_node,
- &first->tx_list);
+ lli_write(prev, llp, desc->txd.phys | lms);
+ list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
total_len += dlen;
@@ -859,8 +824,6 @@ slave_sg_todev_fill_desc:
ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
DWC_CTLL_FC(DW_DMA_FC_D_P2M);
- data_width = dw->data_width[dwc->dst_master];
-
for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc;
u32 len, dlen, mem;
@@ -868,17 +831,16 @@ slave_sg_todev_fill_desc:
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- mem_width = min_t(unsigned int,
- data_width, dwc_fast_ffs(mem | len));
+ mem_width = __ffs(data_width | mem | len);
slave_sg_fromdev_fill_desc:
desc = dwc_desc_get(dwc);
if (!desc)
goto err_desc_get;
- desc->lli.sar = reg;
- desc->lli.dar = mem;
- desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
+ lli_write(desc, sar, reg);
+ lli_write(desc, dar, mem);
+ lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
if ((len >> reg_width) > dwc->block_size) {
dlen = dwc->block_size << reg_width;
mem += dlen;
@@ -887,15 +849,14 @@ slave_sg_fromdev_fill_desc:
dlen = len;
len = 0;
}
- desc->lli.ctlhi = dlen >> reg_width;
+ lli_write(desc, ctlhi, dlen >> reg_width);
desc->len = dlen;
if (!first) {
first = desc;
} else {
- prev->lli.llp = desc->txd.phys;
- list_add_tail(&desc->desc_node,
- &first->tx_list);
+ lli_write(prev, llp, desc->txd.phys | lms);
+ list_add_tail(&desc->desc_node, &first->tx_list);
}
prev = desc;
total_len += dlen;
@@ -910,9 +871,10 @@ slave_sg_fromdev_fill_desc:
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last block */
- prev->lli.ctllo |= DWC_CTLL_INT_EN;
+ lli_set(prev, ctllo, DWC_CTLL_INT_EN);
prev->lli.llp = 0;
+ lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
first->total_len = total_len;
return &first->txd;
@@ -937,8 +899,8 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
dwc->src_id = dws->src_id;
dwc->dst_id = dws->dst_id;
- dwc->src_master = dws->src_master;
- dwc->dst_master = dws->dst_master;
+ dwc->m_master = dws->m_master;
+ dwc->p_master = dws->p_master;
return true;
}
@@ -991,7 +953,7 @@ static int dwc_pause(struct dma_chan *chan)
while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
udelay(2);
- dwc->paused = true;
+ set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1004,7 +966,7 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
- dwc->paused = false;
+ clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
}
static int dwc_resume(struct dma_chan *chan)
@@ -1012,12 +974,10 @@ static int dwc_resume(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
unsigned long flags;
- if (!dwc->paused)
- return 0;
-
spin_lock_irqsave(&dwc->lock, flags);
- dwc_chan_resume(dwc);
+ if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
+ dwc_chan_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1053,16 +1013,37 @@ static int dwc_terminate_all(struct dma_chan *chan)
return 0;
}
-static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
+static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
{
+ struct dw_desc *desc;
+
+ list_for_each_entry(desc, &dwc->active_list, desc_node)
+ if (desc->txd.cookie == c)
+ return desc;
+
+ return NULL;
+}
+
+static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
+{
+ struct dw_desc *desc;
unsigned long flags;
u32 residue;
spin_lock_irqsave(&dwc->lock, flags);
- residue = dwc->residue;
- if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
- residue -= dwc_get_sent(dwc);
+ desc = dwc_find_desc(dwc, cookie);
+ if (desc) {
+ if (desc == dwc_first_active(dwc)) {
+ residue = desc->residue;
+ if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
+ residue -= dwc_get_sent(dwc);
+ } else {
+ residue = desc->total_len;
+ }
+ } else {
+ residue = 0;
+ }
spin_unlock_irqrestore(&dwc->lock, flags);
return residue;
@@ -1083,10 +1064,12 @@ dwc_tx_status(struct dma_chan *chan,
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret != DMA_COMPLETE)
- dma_set_residue(txstate, dwc_get_residue(dwc));
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
- if (dwc->paused && ret == DMA_IN_PROGRESS)
+ if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
return DMA_PAUSED;
return ret;
@@ -1107,7 +1090,7 @@ static void dwc_issue_pending(struct dma_chan *chan)
static void dw_dma_off(struct dw_dma *dw)
{
- int i;
+ unsigned int i;
dma_writel(dw, CFG, 0);
@@ -1121,7 +1104,7 @@ static void dw_dma_off(struct dw_dma *dw)
cpu_relax();
for (i = 0; i < dw->dma.chancnt; i++)
- dw->chan[i].initialized = false;
+ clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
}
static void dw_dma_on(struct dw_dma *dw)
@@ -1133,9 +1116,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
- struct dw_desc *desc;
- int i;
- unsigned long flags;
dev_vdbg(chan2dev(chan), "%s\n", __func__);
@@ -1166,48 +1146,13 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dw_dma_on(dw);
dw->in_use |= dwc->mask;
- spin_lock_irqsave(&dwc->lock, flags);
- i = dwc->descs_allocated;
- while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
- dma_addr_t phys;
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
- if (!desc)
- goto err_desc_alloc;
-
- memset(desc, 0, sizeof(struct dw_desc));
-
- INIT_LIST_HEAD(&desc->tx_list);
- dma_async_tx_descriptor_init(&desc->txd, chan);
- desc->txd.tx_submit = dwc_tx_submit;
- desc->txd.flags = DMA_CTRL_ACK;
- desc->txd.phys = phys;
-
- dwc_desc_put(dwc, desc);
-
- spin_lock_irqsave(&dwc->lock, flags);
- i = ++dwc->descs_allocated;
- }
-
- spin_unlock_irqrestore(&dwc->lock, flags);
-
- dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
-
- return i;
-
-err_desc_alloc:
- dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
-
- return i;
+ return 0;
}
static void dwc_free_chan_resources(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
- struct dw_desc *desc, *_desc;
unsigned long flags;
LIST_HEAD(list);
@@ -1220,17 +1165,15 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
spin_lock_irqsave(&dwc->lock, flags);
- list_splice_init(&dwc->free_list, &list);
- dwc->descs_allocated = 0;
/* Clear custom channel configuration */
dwc->src_id = 0;
dwc->dst_id = 0;
- dwc->src_master = 0;
- dwc->dst_master = 0;
+ dwc->m_master = 0;
+ dwc->p_master = 0;
- dwc->initialized = false;
+ clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1244,11 +1187,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
if (!dw->in_use)
dw_dma_off(dw);
- list_for_each_entry_safe(desc, _desc, &list, desc_node) {
- dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
- dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
- }
-
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
@@ -1326,6 +1264,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
struct dw_cyclic_desc *retval = NULL;
struct dw_desc *desc;
struct dw_desc *last = NULL;
+ u8 lms = DWC_LLP_LMS(dwc->m_master);
unsigned long was_cyclic;
unsigned int reg_width;
unsigned int periods;
@@ -1379,9 +1318,6 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
retval = ERR_PTR(-ENOMEM);
- if (periods > NR_DESCS_PER_CHANNEL)
- goto out_err;
-
cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
if (!cdesc)
goto out_err;
@@ -1397,50 +1333,50 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
switch (direction) {
case DMA_MEM_TO_DEV:
- desc->lli.dar = sconfig->dst_addr;
- desc->lli.sar = buf_addr + (period_len * i);
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
- | DWC_CTLL_DST_WIDTH(reg_width)
- | DWC_CTLL_SRC_WIDTH(reg_width)
- | DWC_CTLL_DST_FIX
- | DWC_CTLL_SRC_INC
- | DWC_CTLL_INT_EN);
-
- desc->lli.ctllo |= sconfig->device_fc ?
- DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
- DWC_CTLL_FC(DW_DMA_FC_D_M2P);
+ lli_write(desc, dar, sconfig->dst_addr);
+ lli_write(desc, sar, buf_addr + period_len * i);
+ lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_FIX
+ | DWC_CTLL_SRC_INC
+ | DWC_CTLL_INT_EN));
+
+ lli_set(desc, ctllo, sconfig->device_fc ?
+ DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
+ DWC_CTLL_FC(DW_DMA_FC_D_M2P));
break;
case DMA_DEV_TO_MEM:
- desc->lli.dar = buf_addr + (period_len * i);
- desc->lli.sar = sconfig->src_addr;
- desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
- | DWC_CTLL_SRC_WIDTH(reg_width)
- | DWC_CTLL_DST_WIDTH(reg_width)
- | DWC_CTLL_DST_INC
- | DWC_CTLL_SRC_FIX
- | DWC_CTLL_INT_EN);
-
- desc->lli.ctllo |= sconfig->device_fc ?
- DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
- DWC_CTLL_FC(DW_DMA_FC_D_P2M);
+ lli_write(desc, dar, buf_addr + period_len * i);
+ lli_write(desc, sar, sconfig->src_addr);
+ lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan)
+ | DWC_CTLL_SRC_WIDTH(reg_width)
+ | DWC_CTLL_DST_WIDTH(reg_width)
+ | DWC_CTLL_DST_INC
+ | DWC_CTLL_SRC_FIX
+ | DWC_CTLL_INT_EN));
+
+ lli_set(desc, ctllo, sconfig->device_fc ?
+ DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
+ DWC_CTLL_FC(DW_DMA_FC_D_P2M));
break;
default:
break;
}
- desc->lli.ctlhi = (period_len >> reg_width);
+ lli_write(desc, ctlhi, period_len >> reg_width);
cdesc->desc[i] = desc;
if (last)
- last->lli.llp = desc->txd.phys;
+ lli_write(last, llp, desc->txd.phys | lms);
last = desc;
}
/* Let's make a cyclic list */
- last->lli.llp = cdesc->desc[0]->txd.phys;
+ lli_write(last, llp, cdesc->desc[0]->txd.phys | lms);
dev_dbg(chan2dev(&dwc->chan),
"cyclic prepared buf %pad len %zu period %zu periods %d\n",
@@ -1471,7 +1407,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_cyclic_desc *cdesc = dwc->cdesc;
- int i;
+ unsigned int i;
unsigned long flags;
dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
@@ -1495,32 +1431,38 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
kfree(cdesc->desc);
kfree(cdesc);
+ dwc->cdesc = NULL;
+
clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
}
EXPORT_SYMBOL(dw_dma_cyclic_free);
/*----------------------------------------------------------------------*/
-int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+int dw_dma_probe(struct dw_dma_chip *chip)
{
+ struct dw_dma_platform_data *pdata;
struct dw_dma *dw;
bool autocfg = false;
unsigned int dw_params;
- unsigned int max_blk_size = 0;
+ unsigned int i;
int err;
- int i;
dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
if (!dw)
return -ENOMEM;
+ dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
+ if (!dw->pdata)
+ return -ENOMEM;
+
dw->regs = chip->regs;
chip->dw = dw;
pm_runtime_get_sync(chip->dev);
- if (!pdata) {
- dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
+ if (!chip->pdata) {
+ dw_params = dma_readl(dw, DW_PARAMS);
dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
autocfg = dw_params >> DW_PARAMS_EN & 1;
@@ -1529,29 +1471,31 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
goto err_pdata;
}
- pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- err = -ENOMEM;
- goto err_pdata;
- }
+ /* Reassign the platform data pointer */
+ pdata = dw->pdata;
/* Get hardware configuration parameters */
pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
for (i = 0; i < pdata->nr_masters; i++) {
pdata->data_width[i] =
- (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
+ 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
}
- max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
+ pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
/* Fill platform data with the default values */
pdata->is_private = true;
pdata->is_memcpy = true;
pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
- } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
+ } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
err = -EINVAL;
goto err_pdata;
+ } else {
+ memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
+
+ /* Reassign the platform data pointer */
+ pdata = dw->pdata;
}
dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
@@ -1561,11 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
goto err_pdata;
}
- /* Get hardware configuration parameters */
- dw->nr_masters = pdata->nr_masters;
- for (i = 0; i < dw->nr_masters; i++)
- dw->data_width[i] = pdata->data_width[i];
-
/* Calculate all channel mask before DMA setup */
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
@@ -1612,7 +1551,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
INIT_LIST_HEAD(&dwc->active_list);
INIT_LIST_HEAD(&dwc->queue);
- INIT_LIST_HEAD(&dwc->free_list);
channel_clear_bit(dw, CH_EN, dwc->mask);
@@ -1620,11 +1558,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Hardware configuration */
if (autocfg) {
- unsigned int dwc_params;
unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
- void __iomem *addr = chip->regs + r * sizeof(u32);
-
- dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
+ void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
+ unsigned int dwc_params = dma_readl_native(addr);
dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
dwc_params);
@@ -1635,16 +1571,15 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
* up to 0x0a for 4095.
*/
dwc->block_size =
- (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
+ (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
dwc->nollp =
(dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
} else {
dwc->block_size = pdata->block_size;
/* Check if channel supports multi block transfer */
- channel_writel(dwc, LLP, 0xfffffffc);
- dwc->nollp =
- (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
+ channel_writel(dwc, LLP, DWC_LLP_LOC(0xffffffff));
+ dwc->nollp = DWC_LLP_LOC(channel_readl(dwc, LLP)) == 0;
channel_writel(dwc, LLP, 0);
}
}
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 358f9689a..0ae6c3b1d 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -17,8 +17,8 @@
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
+ const struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
struct dw_dma_chip *chip;
- struct dw_dma_platform_data *pdata = (void *)pid->driver_data;
int ret;
ret = pcim_enable_device(pdev);
@@ -49,8 +49,9 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
chip->dev = &pdev->dev;
chip->regs = pcim_iomap_table(pdev)[0];
chip->irq = pdev->irq;
+ chip->pdata = pdata;
- ret = dw_dma_probe(chip, pdata);
+ ret = dw_dma_probe(chip);
if (ret)
return ret;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index 26edbe3a2..5bda0eb9f 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -42,13 +42,13 @@ static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
slave.src_id = dma_spec->args[0];
slave.dst_id = dma_spec->args[0];
- slave.src_master = dma_spec->args[1];
- slave.dst_master = dma_spec->args[2];
+ slave.m_master = dma_spec->args[1];
+ slave.p_master = dma_spec->args[2];
if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
- slave.src_master >= dw->nr_masters ||
- slave.dst_master >= dw->nr_masters))
+ slave.m_master >= dw->pdata->nr_masters ||
+ slave.p_master >= dw->pdata->nr_masters))
return NULL;
dma_cap_zero(cap);
@@ -66,8 +66,8 @@ static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param)
.dma_dev = dma_spec->dev,
.src_id = dma_spec->slave_id,
.dst_id = dma_spec->slave_id,
- .src_master = 1,
- .dst_master = 0,
+ .m_master = 0,
+ .p_master = 1,
};
return dw_dma_filter(chan, &slave);
@@ -103,6 +103,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
u32 tmp, arr[DW_DMA_MAX_NR_MASTERS];
+ u32 nr_masters;
u32 nr_channels;
if (!np) {
@@ -110,6 +111,11 @@ dw_dma_parse_dt(struct platform_device *pdev)
return NULL;
}
+ if (of_property_read_u32(np, "dma-masters", &nr_masters))
+ return NULL;
+ if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
+ return NULL;
+
if (of_property_read_u32(np, "dma-channels", &nr_channels))
return NULL;
@@ -117,6 +123,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (!pdata)
return NULL;
+ pdata->nr_masters = nr_masters;
pdata->nr_channels = nr_channels;
if (of_property_read_bool(np, "is_private"))
@@ -131,17 +138,13 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (!of_property_read_u32(np, "block_size", &tmp))
pdata->block_size = tmp;
- if (!of_property_read_u32(np, "dma-masters", &tmp)) {
- if (tmp > DW_DMA_MAX_NR_MASTERS)
- return NULL;
-
- pdata->nr_masters = tmp;
- }
-
- if (!of_property_read_u32_array(np, "data_width", arr,
- pdata->nr_masters))
- for (tmp = 0; tmp < pdata->nr_masters; tmp++)
+ if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
+ for (tmp = 0; tmp < nr_masters; tmp++)
pdata->data_width[tmp] = arr[tmp];
+ } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
+ for (tmp = 0; tmp < nr_masters; tmp++)
+ pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
+ }
return pdata;
}
@@ -158,7 +161,7 @@ static int dw_probe(struct platform_device *pdev)
struct dw_dma_chip *chip;
struct device *dev = &pdev->dev;
struct resource *mem;
- struct dw_dma_platform_data *pdata;
+ const struct dw_dma_platform_data *pdata;
int err;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
@@ -183,6 +186,7 @@ static int dw_probe(struct platform_device *pdev)
pdata = dw_dma_parse_dt(pdev);
chip->dev = dev;
+ chip->pdata = pdata;
chip->clk = devm_clk_get(chip->dev, "hclk");
if (IS_ERR(chip->clk))
@@ -193,7 +197,7 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- err = dw_dma_probe(chip, pdata);
+ err = dw_dma_probe(chip);
if (err)
goto err_dw_dma_probe;
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h
index 0a50c18d8..4b7bd7834 100644
--- a/drivers/dma/dw/regs.h
+++ b/drivers/dma/dw/regs.h
@@ -114,10 +114,6 @@ struct dw_dma_regs {
#define dma_writel_native writel
#endif
-/* To access the registers in early stage of probe */
-#define dma_read_byaddr(addr, name) \
- dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
-
/* Bitfields in DW_PARAMS */
#define DW_PARAMS_NR_CHAN 8 /* number of channels */
#define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */
@@ -143,6 +139,10 @@ enum dw_dma_msize {
DW_DMA_MSIZE_256,
};
+/* Bitfields in LLP */
+#define DWC_LLP_LMS(x) ((x) & 3) /* list master select */
+#define DWC_LLP_LOC(x) ((x) & ~3) /* next lli */
+
/* Bitfields in CTL_LO */
#define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */
#define DWC_CTLL_DST_WIDTH(n) ((n)<<1) /* bytes per element */
@@ -216,6 +216,8 @@ enum dw_dma_msize {
enum dw_dmac_flags {
DW_DMA_IS_CYCLIC = 0,
DW_DMA_IS_SOFT_LLP = 1,
+ DW_DMA_IS_PAUSED = 2,
+ DW_DMA_IS_INITIALIZED = 3,
};
struct dw_dma_chan {
@@ -224,8 +226,6 @@ struct dw_dma_chan {
u8 mask;
u8 priority;
enum dma_transfer_direction direction;
- bool paused;
- bool initialized;
/* software emulation of the LLP transfers */
struct list_head *tx_node_active;
@@ -236,8 +236,6 @@ struct dw_dma_chan {
unsigned long flags;
struct list_head active_list;
struct list_head queue;
- struct list_head free_list;
- u32 residue;
struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated;
@@ -249,8 +247,8 @@ struct dw_dma_chan {
/* custom slave configuration */
u8 src_id;
u8 dst_id;
- u8 src_master;
- u8 dst_master;
+ u8 m_master;
+ u8 p_master;
/* configuration passed via .device_config */
struct dma_slave_config dma_sconfig;
@@ -283,9 +281,8 @@ struct dw_dma {
u8 all_chan_mask;
u8 in_use;
- /* hardware configuration */
- unsigned char nr_masters;
- unsigned char data_width[DW_DMA_MAX_NR_MASTERS];
+ /* platform data */
+ struct dw_dma_platform_data *pdata;
};
static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
@@ -308,32 +305,51 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
return container_of(ddev, struct dw_dma, dma);
}
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+typedef __be32 __dw32;
+#else
+typedef __le32 __dw32;
+#endif
+
/* LLI == Linked List Item; a.k.a. DMA block descriptor */
struct dw_lli {
/* values that are not changed by hardware */
- u32 sar;
- u32 dar;
- u32 llp; /* chain to next lli */
- u32 ctllo;
+ __dw32 sar;
+ __dw32 dar;
+ __dw32 llp; /* chain to next lli */
+ __dw32 ctllo;
/* values that may get written back: */
- u32 ctlhi;
+ __dw32 ctlhi;
/* sstat and dstat can snapshot peripheral register state.
* silicon config may discard either or both...
*/
- u32 sstat;
- u32 dstat;
+ __dw32 sstat;
+ __dw32 dstat;
};
struct dw_desc {
/* FIRST values the hardware uses */
struct dw_lli lli;
+#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
+#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_be32(v))
+#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_be32(v))
+#define lli_read(d, reg) be32_to_cpu((d)->lli.reg)
+#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_be32(v))
+#else
+#define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v))
+#define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v))
+#define lli_read(d, reg) le32_to_cpu((d)->lli.reg)
+#define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v))
+#endif
+
/* THEN values for driver housekeeping */
struct list_head desc_node;
struct list_head tx_list;
struct dma_async_tx_descriptor txd;
size_t len;
size_t total_len;
+ u32 residue;
};
#define to_dw_desc(h) list_entry(h, struct dw_desc, desc_node)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 04070baab..8181ed131 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1537,8 +1537,17 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
- if (!edma_error_pending(ecc))
+ if (!edma_error_pending(ecc)) {
+ /*
+ * The registers indicate no pending error event but the irq
+ * handler has been called.
+ * Ask eDMA to re-evaluate the error registers.
+ */
+ dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
+ __func__);
+ edma_write(ecc, EDMA_EEVAL, 1);
return IRQ_NONE;
+ }
while (1) {
/* Event missed register(s) */
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index aac85c30c..a8828ed63 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -462,13 +462,12 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
struct fsl_desc_sw *desc;
dma_addr_t pdesc;
- desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+ desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
if (!desc) {
chan_dbg(chan, "out of memory for link descriptor\n");
return NULL;
}
- memset(desc, 0, sizeof(*desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
desc->async_tx.tx_submit = fsl_dma_tx_submit;
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index ee510515c..f8c5cd533 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -77,8 +77,8 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
/* Set descriptors */
- count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC;
- for (i = 0; i < count; i++) {
+ count = desc->nents - desc->active;
+ for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
@@ -160,7 +160,7 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
return IRQ_NONE;
/* Timeout IRQ, need wait some time, see Errata 2 */
- if (hsuc->direction == DMA_DEV_TO_MEM && (sr & HSU_CH_SR_DESCTO_ANY))
+ if (sr & HSU_CH_SR_DESCTO_ANY)
udelay(2);
sr &= ~HSU_CH_SR_DESCTO_ANY;
@@ -420,6 +420,8 @@ int hsu_dma_probe(struct hsu_dma_chip *chip)
hsu->dma.dev = chip->dev;
+ dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
+
ret = dma_async_device_register(&hsu->dma);
if (ret)
return ret;
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 6b070c22b..486b023b3 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -58,6 +58,10 @@
#define HSU_CH_DCR_CHEI BIT(23)
#define HSU_CH_DCR_CHTOI(x) BIT(24 + (x))
+/* Bits in HSU_CH_DxTSR */
+#define HSU_CH_DxTSR_MASK GENMASK(15, 0)
+#define HSU_CH_DxTSR_TSR(x) ((x) & HSU_CH_DxTSR_MASK)
+
struct hsu_dma_sg {
dma_addr_t addr;
unsigned int len;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index efdee1a69..d406056e8 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -690,12 +690,11 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* allocate a completion writeback area */
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion =
- dma_pool_alloc(ioat_chan->ioat_dma->completion_pool,
- GFP_KERNEL, &ioat_chan->completion_dma);
+ dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
+ GFP_KERNEL, &ioat_chan->completion_dma);
if (!ioat_chan->completion)
return -ENOMEM;
- memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
writel(((u64)ioat_chan->completion_dma) >> 32,
@@ -1074,6 +1073,7 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
struct ioatdma_chan *ioat_chan;
bool is_raid_device = false;
int err;
+ u16 val16;
dma = &ioat_dma->dma_dev;
dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
@@ -1173,6 +1173,17 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
if (dca)
ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
+ /* disable relaxed ordering */
+ err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16);
+ if (err)
+ return err;
+
+ /* clear relaxed ordering enable */
+ val16 &= ~IOAT_DEVCTRL_ROE;
+ err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16);
+ if (err)
+ return err;
+
return 0;
}
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 4994a3623..70534981a 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -26,6 +26,13 @@
#define IOAT_PCI_CHANERR_INT_OFFSET 0x180
#define IOAT_PCI_CHANERRMASK_INT_OFFSET 0x184
+/* PCIe config registers */
+
+/* EXPCAPID + N */
+#define IOAT_DEVCTRL_OFFSET 0x8
+/* relaxed ordering enable */
+#define IOAT_DEVCTRL_ROE 0x10
+
/* MMIO Device Registers */
#define IOAT_CHANCNT_OFFSET 0x00 /* 8-bit */
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index e39457f13..56f1fd68b 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -364,13 +364,12 @@ mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
struct mmp_pdma_desc_sw *desc;
dma_addr_t pdesc;
- desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+ desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
if (!desc) {
dev_err(chan->dev, "out of memory for link descriptor\n");
return NULL;
}
- memset(desc, 0, sizeof(*desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
/* each desc has submit */
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index aae76fb39..ccadafa51 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -3,6 +3,7 @@
* Copyright (C) Semihalf 2009
* Copyright (C) Ilya Yanok, Emcraft Systems 2010
* Copyright (C) Alexander Popov, Promcontroller 2014
+ * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016
*
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
* (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -26,18 +27,19 @@
*/
/*
- * MPC512x and MPC8308 DMA driver. It supports
- * memory to memory data transfers (tested using dmatest module) and
- * data transfers between memory and peripheral I/O memory
- * by means of slave scatter/gather with these limitations:
- * - chunked transfers (described by s/g lists with more than one item)
- * are refused as long as proper support for scatter/gather is missing;
- * - transfers on MPC8308 always start from software as this SoC appears
- * not to have external request lines for peripheral flow control;
- * - only peripheral devices with 4-byte FIFO access register are supported;
- * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
- * source and destination addresses must be 4-byte aligned
- * and transfer size must be aligned on (4 * maxburst) boundary;
+ * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers
+ * (tested using dmatest module) and data transfers between memory and
+ * peripheral I/O memory by means of slave scatter/gather with these
+ * limitations:
+ * - chunked transfers (described by s/g lists with more than one item) are
+ * refused as long as proper support for scatter/gather is missing
+ * - transfers on MPC8308 always start from software as this SoC does not have
+ * external request lines for peripheral flow control
+ * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for
+ * MPC512x), and 32 bytes are supported, and, consequently, source
+ * addresses and destination addresses must be aligned accordingly;
+ * furthermore, for MPC512x SoCs, the transfer size must be aligned on
+ * (chunk size * maxburst)
*/
#include <linux/module.h>
@@ -213,8 +215,10 @@ struct mpc_dma_chan {
/* Settings for access to peripheral FIFO */
dma_addr_t src_per_paddr;
u32 src_tcd_nunits;
+ u8 swidth;
dma_addr_t dst_per_paddr;
u32 dst_tcd_nunits;
+ u8 dwidth;
/* Lock for this structure */
spinlock_t lock;
@@ -247,6 +251,7 @@ static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
+
return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
}
@@ -254,9 +259,9 @@ static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
* Execute all queued DMA descriptors.
*
* Following requirements must be met while calling mpc_dma_execute():
- * a) mchan->lock is acquired,
- * b) mchan->active list is empty,
- * c) mchan->queued list contains at least one entry.
+ * a) mchan->lock is acquired,
+ * b) mchan->active list is empty,
+ * c) mchan->queued list contains at least one entry.
*/
static void mpc_dma_execute(struct mpc_dma_chan *mchan)
{
@@ -446,20 +451,15 @@ static void mpc_dma_tasklet(unsigned long data)
if (es & MPC_DMA_DMAES_SAE)
dev_err(mdma->dma.dev, "- Source Address Error\n");
if (es & MPC_DMA_DMAES_SOE)
- dev_err(mdma->dma.dev, "- Source Offset"
- " Configuration Error\n");
+ dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n");
if (es & MPC_DMA_DMAES_DAE)
- dev_err(mdma->dma.dev, "- Destination Address"
- " Error\n");
+ dev_err(mdma->dma.dev, "- Destination Address Error\n");
if (es & MPC_DMA_DMAES_DOE)
- dev_err(mdma->dma.dev, "- Destination Offset"
- " Configuration Error\n");
+ dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n");
if (es & MPC_DMA_DMAES_NCE)
- dev_err(mdma->dma.dev, "- NBytes/Citter"
- " Configuration Error\n");
+ dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n");
if (es & MPC_DMA_DMAES_SGE)
- dev_err(mdma->dma.dev, "- Scatter/Gather"
- " Configuration Error\n");
+ dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n");
if (es & MPC_DMA_DMAES_SBE)
dev_err(mdma->dma.dev, "- Source Bus Error\n");
if (es & MPC_DMA_DMAES_DBE)
@@ -518,8 +518,8 @@ static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
if (!mdesc) {
- dev_notice(mdma->dma.dev, "Memory allocation error. "
- "Allocated only %u descriptors\n", i);
+ dev_notice(mdma->dma.dev,
+ "Memory allocation error. Allocated only %u descriptors\n", i);
break;
}
@@ -684,6 +684,15 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
return &mdesc->desc;
}
+inline u8 buswidth_to_dmatsize(u8 buswidth)
+{
+ u8 res;
+
+ for (res = 0; buswidth > 1; buswidth /= 2)
+ res++;
+ return res;
+}
+
static struct dma_async_tx_descriptor *
mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
@@ -742,39 +751,54 @@ mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
memset(tcd, 0, sizeof(struct mpc_dma_tcd));
- if (!IS_ALIGNED(sg_dma_address(sg), 4))
- goto err_prep;
-
if (direction == DMA_DEV_TO_MEM) {
tcd->saddr = per_paddr;
tcd->daddr = sg_dma_address(sg);
+
+ if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth))
+ goto err_prep;
+
tcd->soff = 0;
- tcd->doff = 4;
+ tcd->doff = mchan->dwidth;
} else {
tcd->saddr = sg_dma_address(sg);
tcd->daddr = per_paddr;
- tcd->soff = 4;
+
+ if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth))
+ goto err_prep;
+
+ tcd->soff = mchan->swidth;
tcd->doff = 0;
}
- tcd->ssize = MPC_DMA_TSIZE_4;
- tcd->dsize = MPC_DMA_TSIZE_4;
+ tcd->ssize = buswidth_to_dmatsize(mchan->swidth);
+ tcd->dsize = buswidth_to_dmatsize(mchan->dwidth);
- len = sg_dma_len(sg);
- tcd->nbytes = tcd_nunits * 4;
- if (!IS_ALIGNED(len, tcd->nbytes))
- goto err_prep;
+ if (mdma->is_mpc8308) {
+ tcd->nbytes = sg_dma_len(sg);
+ if (!IS_ALIGNED(tcd->nbytes, mchan->swidth))
+ goto err_prep;
- iter = len / tcd->nbytes;
- if (iter >= 1 << 15) {
- /* len is too big */
- goto err_prep;
+ /* No major loops for MPC8303 */
+ tcd->biter = 1;
+ tcd->citer = 1;
+ } else {
+ len = sg_dma_len(sg);
+ tcd->nbytes = tcd_nunits * tcd->ssize;
+ if (!IS_ALIGNED(len, tcd->nbytes))
+ goto err_prep;
+
+ iter = len / tcd->nbytes;
+ if (iter >= 1 << 15) {
+ /* len is too big */
+ goto err_prep;
+ }
+ /* citer_linkch contains the high bits of iter */
+ tcd->biter = iter & 0x1ff;
+ tcd->biter_linkch = iter >> 9;
+ tcd->citer = tcd->biter;
+ tcd->citer_linkch = tcd->biter_linkch;
}
- /* citer_linkch contains the high bits of iter */
- tcd->biter = iter & 0x1ff;
- tcd->biter_linkch = iter >> 9;
- tcd->citer = tcd->biter;
- tcd->citer_linkch = tcd->biter_linkch;
tcd->e_sg = 0;
tcd->d_req = 1;
@@ -796,40 +820,62 @@ err_prep:
return NULL;
}
+inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
+{
+ switch (buswidth) {
+ case 16:
+ if (is_mpc8308)
+ return false;
+ case 1:
+ case 2:
+ case 4:
+ case 32:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static int mpc_dma_device_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
unsigned long flags;
/*
* Software constraints:
- * - only transfers between a peripheral device and
- * memory are supported;
- * - only peripheral devices with 4-byte FIFO access register
- * are supported;
- * - minimal transfer chunk is 4 bytes and consequently
- * source and destination addresses must be 4-byte aligned
- * and transfer size must be aligned on (4 * maxburst)
- * boundary;
- * - during the transfer RAM address is being incremented by
- * the size of minimal transfer chunk;
- * - peripheral port's address is constant during the transfer.
+ * - only transfers between a peripheral device and memory are
+ * supported
+ * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes
+ * are supported, and, consequently, source addresses and
+ * destination addresses; must be aligned accordingly; furthermore,
+ * for MPC512x SoCs, the transfer size must be aligned on (chunk
+ * size * maxburst)
+ * - during the transfer, the RAM address is incremented by the size
+ * of transfer chunk
+ * - the peripheral port's address is constant during the transfer.
*/
- if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
- cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
- !IS_ALIGNED(cfg->src_addr, 4) ||
- !IS_ALIGNED(cfg->dst_addr, 4)) {
+ if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
+ !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) {
return -EINVAL;
}
+ if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
+ !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308))
+ return -EINVAL;
+
spin_lock_irqsave(&mchan->lock, flags);
mchan->src_per_paddr = cfg->src_addr;
mchan->src_tcd_nunits = cfg->src_maxburst;
+ mchan->swidth = cfg->src_addr_width;
mchan->dst_per_paddr = cfg->dst_addr;
mchan->dst_tcd_nunits = cfg->dst_maxburst;
+ mchan->dwidth = cfg->dst_addr_width;
/* Apply defaults */
if (mchan->src_tcd_nunits == 0)
@@ -875,7 +921,6 @@ static int mpc_dma_probe(struct platform_device *op)
mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
if (!mdma) {
- dev_err(dev, "Memory exhausted!\n");
retval = -ENOMEM;
goto err;
}
@@ -999,7 +1044,8 @@ static int mpc_dma_probe(struct platform_device *op)
out_be32(&mdma->regs->dmaerrl, 0xFFFF);
} else {
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
- MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+ MPC_DMA_DMACR_ERGA |
+ MPC_DMA_DMACR_ERCA);
/* Disable hardware DMA requests */
out_be32(&mdma->regs->dmaerqh, 0);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 3922a5d56..d0446a759 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -31,6 +31,12 @@
#include "dmaengine.h"
#include "mv_xor.h"
+enum mv_xor_type {
+ XOR_ORION,
+ XOR_ARMADA_38X,
+ XOR_ARMADA_37XX,
+};
+
enum mv_xor_mode {
XOR_MODE_IN_REG,
XOR_MODE_IN_DESC,
@@ -477,7 +483,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+ "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
__func__, src_cnt, len, &dest, flags);
sw_desc = mv_chan_alloc_slot(mv_chan);
@@ -697,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
goto free_resources;
}
- src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
- PAGE_SIZE, DMA_TO_DEVICE);
+ src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
+ (size_t)src & ~PAGE_MASK, PAGE_SIZE,
+ DMA_TO_DEVICE);
unmap->addr[0] = src_dma;
ret = dma_mapping_error(dma_chan->device->dev, src_dma);
@@ -708,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
}
unmap->to_cnt = 1;
- dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
+ (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
+ DMA_FROM_DEVICE);
unmap->addr[1] = dest_dma;
ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
@@ -933,7 +941,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
static struct mv_xor_chan *
mv_xor_channel_add(struct mv_xor_device *xordev,
struct platform_device *pdev,
- int idx, dma_cap_mask_t cap_mask, int irq, int op_in_desc)
+ int idx, dma_cap_mask_t cap_mask, int irq)
{
int ret = 0;
struct mv_xor_chan *mv_chan;
@@ -945,7 +953,10 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
mv_chan->idx = idx;
mv_chan->irq = irq;
- mv_chan->op_in_desc = op_in_desc;
+ if (xordev->xor_type == XOR_ORION)
+ mv_chan->op_in_desc = XOR_MODE_IN_REG;
+ else
+ mv_chan->op_in_desc = XOR_MODE_IN_DESC;
dma_dev = &mv_chan->dmadev;
@@ -1085,6 +1096,33 @@ mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
+static void
+mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
+{
+ void __iomem *base = xordev->xor_high_base;
+ u32 win_enable = 0;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ writel(0, base + WINDOW_BASE(i));
+ writel(0, base + WINDOW_SIZE(i));
+ if (i < 4)
+ writel(0, base + WINDOW_REMAP_HIGH(i));
+ }
+ /*
+ * For Armada3700 open default 4GB Mbus window. The dram
+ * related configuration are done at AXIS level.
+ */
+ writel(0xffff0000, base + WINDOW_SIZE(0));
+ win_enable |= 1;
+ win_enable |= 3 << 16;
+
+ writel(win_enable, base + WINDOW_BAR_ENABLE(0));
+ writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(0));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(1));
+}
+
/*
* Since this XOR driver is basically used only for RAID5, we don't
* need to care about synchronizing ->suspend with DMA activity,
@@ -1129,6 +1167,11 @@ static int mv_xor_resume(struct platform_device *dev)
XOR_INTR_MASK(mv_chan));
}
+ if (xordev->xor_type == XOR_ARMADA_37XX) {
+ mv_xor_conf_mbus_windows_a3700(xordev);
+ return 0;
+ }
+
dram = mv_mbus_dram_info();
if (dram)
mv_xor_conf_mbus_windows(xordev, dram);
@@ -1137,8 +1180,9 @@ static int mv_xor_resume(struct platform_device *dev)
}
static const struct of_device_id mv_xor_dt_ids[] = {
- { .compatible = "marvell,orion-xor", .data = (void *)XOR_MODE_IN_REG },
- { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
+ { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
+ { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
+ { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
{},
};
@@ -1152,7 +1196,6 @@ static int mv_xor_probe(struct platform_device *pdev)
struct resource *res;
unsigned int max_engines, max_channels;
int i, ret;
- int op_in_desc;
dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
@@ -1180,12 +1223,30 @@ static int mv_xor_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xordev);
+
+ /*
+ * We need to know which type of XOR device we use before
+ * setting up. In non-dt case it can only be the legacy one.
+ */
+ xordev->xor_type = XOR_ORION;
+ if (pdev->dev.of_node) {
+ const struct of_device_id *of_id =
+ of_match_device(mv_xor_dt_ids,
+ &pdev->dev);
+
+ xordev->xor_type = (uintptr_t)of_id->data;
+ }
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- dram = mv_mbus_dram_info();
- if (dram)
- mv_xor_conf_mbus_windows(xordev, dram);
+ if (xordev->xor_type == XOR_ARMADA_37XX) {
+ mv_xor_conf_mbus_windows_a3700(xordev);
+ } else {
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_xor_conf_mbus_windows(xordev, dram);
+ }
/* Not all platforms can gate the clock, so it is not
* an error if the clock does not exists.
@@ -1199,12 +1260,16 @@ static int mv_xor_probe(struct platform_device *pdev)
* order for async_tx to perform well. So we limit the number
* of engines and channels so that we take into account this
* constraint. Note that we also want to use channels from
- * separate engines when possible.
+ * separate engines when possible. For dual-CPU Armada 3700
+ * SoC with single XOR engine allow using its both channels.
*/
max_engines = num_present_cpus();
- max_channels = min_t(unsigned int,
- MV_XOR_MAX_CHANNELS,
- DIV_ROUND_UP(num_present_cpus(), 2));
+ if (xordev->xor_type == XOR_ARMADA_37XX)
+ max_channels = num_present_cpus();
+ else
+ max_channels = min_t(unsigned int,
+ MV_XOR_MAX_CHANNELS,
+ DIV_ROUND_UP(num_present_cpus(), 2));
if (mv_xor_engine_count >= max_engines)
return 0;
@@ -1212,15 +1277,11 @@ static int mv_xor_probe(struct platform_device *pdev)
if (pdev->dev.of_node) {
struct device_node *np;
int i = 0;
- const struct of_device_id *of_id =
- of_match_device(mv_xor_dt_ids,
- &pdev->dev);
for_each_child_of_node(pdev->dev.of_node, np) {
struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
- op_in_desc = (int)of_id->data;
if (i >= max_channels)
continue;
@@ -1237,7 +1298,7 @@ static int mv_xor_probe(struct platform_device *pdev)
}
chan = mv_xor_channel_add(xordev, pdev, i,
- cap_mask, irq, op_in_desc);
+ cap_mask, irq);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
@@ -1266,8 +1327,7 @@ static int mv_xor_probe(struct platform_device *pdev)
}
chan = mv_xor_channel_add(xordev, pdev, i,
- cd->cap_mask, irq,
- XOR_MODE_IN_REG);
+ cd->cap_mask, irq);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
goto err_channel_add;
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c19fe30e5..bf56e082e 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -85,6 +85,7 @@ struct mv_xor_device {
void __iomem *xor_high_base;
struct clk *clk;
struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
+ int xor_type;
};
/**
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 1e1f2986e..faae0bfe1 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -240,8 +240,9 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
struct of_phandle_args dma_spec;
struct of_dma *ofdma;
struct dma_chan *chan;
- int count, i;
+ int count, i, start;
int ret_no_channel = -ENODEV;
+ static atomic_t last_index;
if (!np || !name) {
pr_err("%s: not enough information provided\n", __func__);
@@ -259,8 +260,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
return ERR_PTR(-ENODEV);
}
+ /*
+ * approximate an average distribution across multiple
+ * entries with the same name
+ */
+ start = atomic_inc_return(&last_index);
for (i = 0; i < count; i++) {
- if (of_dma_match_channel(np, name, i, &dma_spec))
+ if (of_dma_match_channel(np, name,
+ (i + start) % count,
+ &dma_spec))
continue;
mutex_lock(&of_dma_lock);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 77c1c4400..e756a30cc 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -117,6 +117,7 @@ struct pxad_chan {
/* protected by vc->lock */
struct pxad_phy *phy;
struct dma_pool *desc_pool; /* Descriptors pool */
+ dma_cookie_t bus_error;
};
struct pxad_device {
@@ -563,6 +564,7 @@ static void pxad_launch_chan(struct pxad_chan *chan,
return;
}
}
+ chan->bus_error = 0;
/*
* Program the descriptor's address into the DMA controller,
@@ -666,6 +668,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
struct virt_dma_desc *vd, *tmp;
unsigned int dcsr;
unsigned long flags;
+ dma_cookie_t last_started = 0;
BUG_ON(!chan);
@@ -678,6 +681,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
dev_dbg(&chan->vc.chan.dev->device,
"%s(): checking txd %p[%x]: completed=%d\n",
__func__, vd, vd->tx.cookie, is_desc_completed(vd));
+ last_started = vd->tx.cookie;
if (to_pxad_sw_desc(vd)->cyclic) {
vchan_cyclic_callback(vd);
break;
@@ -690,7 +694,12 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
}
}
- if (dcsr & PXA_DCSR_STOPSTATE) {
+ if (dcsr & PXA_DCSR_BUSERR) {
+ chan->bus_error = last_started;
+ phy_disable(phy);
+ }
+
+ if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
dev_dbg(&chan->vc.chan.dev->device,
"%s(): channel stopped, submitted_empty=%d issued_empty=%d",
__func__,
@@ -1249,6 +1258,9 @@ static enum dma_status pxad_tx_status(struct dma_chan *dchan,
struct pxad_chan *chan = to_pxad_chan(dchan);
enum dma_status ret;
+ if (cookie == chan->bus_error)
+ return DMA_ERROR;
+
ret = dma_cookie_status(dchan, cookie, txstate);
if (likely(txstate && (ret != DMA_ERROR)))
dma_set_residue(txstate, pxad_residue(chan, cookie));
@@ -1321,7 +1333,7 @@ static int pxad_init_phys(struct platform_device *op,
return 0;
}
-static const struct of_device_id const pxad_dt_ids[] = {
+static const struct of_device_id pxad_dt_ids[] = {
{ .compatible = "marvell,pdma-1.0", },
{}
};
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
index bfea69902..4bfc38b45 100644
--- a/drivers/dma/qcom/Makefile
+++ b/drivers/dma/qcom/Makefile
@@ -1,3 +1,5 @@
obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
+obj-$(CONFIG_QCOM_HIDMA) += hdma.o
+hdma-objs := hidma_ll.o hidma.o hidma_dbg.o
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index d5e0a9c3a..969b48176 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -342,7 +342,7 @@ static const struct reg_offset_data bam_v1_7_reg_info[] = {
#define BAM_DESC_FIFO_SIZE SZ_32K
#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
-#define BAM_MAX_DATA_SIZE (SZ_32K - 8)
+#define BAM_FIFO_SIZE (SZ_32K - 8)
struct bam_chan {
struct virt_dma_chan vc;
@@ -387,6 +387,7 @@ struct bam_device {
/* execution environment ID, from DT */
u32 ee;
+ bool controlled_remotely;
const struct reg_offset_data *layout;
@@ -458,7 +459,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
*/
writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
- writel_relaxed(BAM_DESC_FIFO_SIZE,
+ writel_relaxed(BAM_FIFO_SIZE,
bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
@@ -604,7 +605,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
/* calculate number of required entries */
for_each_sg(sgl, sg, sg_len, i)
- num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
+ num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
/* allocate enough room to accomodate the number of entries */
async_desc = kzalloc(sizeof(*async_desc) +
@@ -635,10 +636,10 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
desc->addr = cpu_to_le32(sg_dma_address(sg) +
curr_offset);
- if (remainder > BAM_MAX_DATA_SIZE) {
- desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE);
- remainder -= BAM_MAX_DATA_SIZE;
- curr_offset += BAM_MAX_DATA_SIZE;
+ if (remainder > BAM_FIFO_SIZE) {
+ desc->size = cpu_to_le16(BAM_FIFO_SIZE);
+ remainder -= BAM_FIFO_SIZE;
+ curr_offset += BAM_FIFO_SIZE;
} else {
desc->size = cpu_to_le16(remainder);
remainder = 0;
@@ -801,13 +802,17 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
if (srcs & P_IRQ)
tasklet_schedule(&bdev->task);
- if (srcs & BAM_IRQ)
+ if (srcs & BAM_IRQ) {
clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
- /* don't allow reorder of the various accesses to the BAM registers */
- mb();
+ /*
+ * don't allow reorder of the various accesses to the BAM
+ * registers
+ */
+ mb();
- writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
+ writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
+ }
return IRQ_HANDLED;
}
@@ -1038,6 +1043,9 @@ static int bam_init(struct bam_device *bdev)
val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+ if (bdev->controlled_remotely)
+ return 0;
+
/* s/w reset bam */
/* after reset all pipes are disabled and idle */
val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
@@ -1125,6 +1133,9 @@ static int bam_dma_probe(struct platform_device *pdev)
return ret;
}
+ bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
+ "qcom,controlled-remotely");
+
bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
if (IS_ERR(bdev->bamclk))
return PTR_ERR(bdev->bamclk);
@@ -1163,7 +1174,7 @@ static int bam_dma_probe(struct platform_device *pdev)
/* set max dma segment size */
bdev->common.dev = bdev->dev;
bdev->common.dev->dma_parms = &bdev->dma_parms;
- ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
+ ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
if (ret) {
dev_err(bdev->dev, "cannot set maximum segment size\n");
goto err_bam_channel_exit;
@@ -1234,6 +1245,9 @@ static int bam_dma_remove(struct platform_device *pdev)
bam_dma_terminate_all(&bdev->channels[i].vc.chan);
tasklet_kill(&bdev->channels[i].vc.task);
+ if (!bdev->channels[i].fifo_virt)
+ continue;
+
dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
bdev->channels[i].fifo_virt,
bdev->channels[i].fifo_phys);
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index cccc78efb..41b5c6dee 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -1,7 +1,7 @@
/*
* Qualcomm Technologies HIDMA DMA engine interface
*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -404,7 +404,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
spin_unlock_irqrestore(&mchan->lock, irqflags);
/* this suspends the existing transfer */
- rc = hidma_ll_pause(dmadev->lldev);
+ rc = hidma_ll_disable(dmadev->lldev);
if (rc) {
dev_err(dmadev->ddev.dev, "channel did not pause\n");
goto out;
@@ -427,7 +427,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
list_move(&mdesc->node, &mchan->free);
}
- rc = hidma_ll_resume(dmadev->lldev);
+ rc = hidma_ll_enable(dmadev->lldev);
out:
pm_runtime_mark_last_busy(dmadev->ddev.dev);
pm_runtime_put_autosuspend(dmadev->ddev.dev);
@@ -488,7 +488,7 @@ static int hidma_pause(struct dma_chan *chan)
dmadev = to_hidma_dev(mchan->chan.device);
if (!mchan->paused) {
pm_runtime_get_sync(dmadev->ddev.dev);
- if (hidma_ll_pause(dmadev->lldev))
+ if (hidma_ll_disable(dmadev->lldev))
dev_warn(dmadev->ddev.dev, "channel did not stop\n");
mchan->paused = true;
pm_runtime_mark_last_busy(dmadev->ddev.dev);
@@ -507,7 +507,7 @@ static int hidma_resume(struct dma_chan *chan)
dmadev = to_hidma_dev(mchan->chan.device);
if (mchan->paused) {
pm_runtime_get_sync(dmadev->ddev.dev);
- rc = hidma_ll_resume(dmadev->lldev);
+ rc = hidma_ll_enable(dmadev->lldev);
if (!rc)
mchan->paused = false;
else
@@ -530,6 +530,43 @@ static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
return hidma_ll_inthandler(chirq, lldev);
}
+static ssize_t hidma_show_values(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_dev *mdev = platform_get_drvdata(pdev);
+
+ buf[0] = 0;
+
+ if (strcmp(attr->attr.name, "chid") == 0)
+ sprintf(buf, "%d\n", mdev->chidx);
+
+ return strlen(buf);
+}
+
+static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name,
+ int mode)
+{
+ struct device_attribute *attrs;
+ char *name_copy;
+
+ attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
+ GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ attrs->attr.name = name_copy;
+ attrs->attr.mode = mode;
+ attrs->show = hidma_show_values;
+ sysfs_attr_init(&attrs->attr);
+
+ return device_create_file(dev->ddev.dev, attrs);
+}
+
static int hidma_probe(struct platform_device *pdev)
{
struct hidma_dev *dmadev;
@@ -644,6 +681,8 @@ static int hidma_probe(struct platform_device *pdev)
dmadev->irq = chirq;
tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
+ hidma_debug_init(dmadev);
+ hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO);
dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
platform_set_drvdata(pdev, dmadev);
pm_runtime_mark_last_busy(dmadev->ddev.dev);
@@ -651,6 +690,7 @@ static int hidma_probe(struct platform_device *pdev)
return 0;
uninit:
+ hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
dmafree:
if (dmadev)
@@ -668,6 +708,7 @@ static int hidma_remove(struct platform_device *pdev)
pm_runtime_get_sync(dmadev->ddev.dev);
dma_async_device_unregister(&dmadev->ddev);
devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ hidma_debug_uninit(dmadev);
hidma_ll_uninit(dmadev->lldev);
hidma_free(dmadev);
@@ -689,7 +730,6 @@ static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
{},
};
-
MODULE_DEVICE_TABLE(of, hidma_match);
static struct platform_driver hidma_driver = {
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index 231e306f6..db413a5ef 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -1,7 +1,7 @@
/*
* Qualcomm Technologies HIDMA data structures
*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,32 +20,29 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
-#define TRE_SIZE 32 /* each TRE is 32 bytes */
-#define TRE_CFG_IDX 0
-#define TRE_LEN_IDX 1
-#define TRE_SRC_LOW_IDX 2
-#define TRE_SRC_HI_IDX 3
-#define TRE_DEST_LOW_IDX 4
-#define TRE_DEST_HI_IDX 5
-
-struct hidma_tx_status {
- u8 err_info; /* error record in this transfer */
- u8 err_code; /* completion code */
-};
+#define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */
+#define HIDMA_TRE_CFG_IDX 0
+#define HIDMA_TRE_LEN_IDX 1
+#define HIDMA_TRE_SRC_LOW_IDX 2
+#define HIDMA_TRE_SRC_HI_IDX 3
+#define HIDMA_TRE_DEST_LOW_IDX 4
+#define HIDMA_TRE_DEST_HI_IDX 5
struct hidma_tre {
atomic_t allocated; /* if this channel is allocated */
bool queued; /* flag whether this is pending */
u16 status; /* status */
- u32 chidx; /* index of the tre */
+ u32 idx; /* index of the tre */
u32 dma_sig; /* signature of the tre */
const char *dev_name; /* name of the device */
void (*callback)(void *data); /* requester callback */
void *data; /* Data associated with this channel*/
struct hidma_lldev *lldev; /* lldma device pointer */
- u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
+ u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
u32 tre_index; /* the offset where this was written*/
u32 int_flags; /* interrupt flags */
+ u8 err_info; /* error record in this transfer */
+ u8 err_code; /* completion code */
};
struct hidma_lldev {
@@ -61,22 +58,21 @@ struct hidma_lldev {
void __iomem *evca; /* Event Channel address */
struct hidma_tre
**pending_tre_list; /* Pointers to pending TREs */
- struct hidma_tx_status
- *tx_status_list; /* Pointers to pending TREs status*/
s32 pending_tre_count; /* Number of TREs pending */
void *tre_ring; /* TRE ring */
- dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */
+ dma_addr_t tre_dma; /* TRE ring to be shared with HW */
u32 tre_ring_size; /* Byte size of the ring */
u32 tre_processed_off; /* last processed TRE */
void *evre_ring; /* EVRE ring */
- dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */
+ dma_addr_t evre_dma; /* EVRE ring to be shared with HW */
u32 evre_ring_size; /* Byte size of the ring */
u32 evre_processed_off; /* last processed EVRE */
u32 tre_write_offset; /* TRE write location */
struct tasklet_struct task; /* task delivering notifications */
+ struct tasklet_struct rst_task; /* task to reset HW */
DECLARE_KFIFO_PTR(handoff_fifo,
struct hidma_tre *); /* pending TREs FIFO */
};
@@ -145,8 +141,8 @@ enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
void hidma_ll_start(struct hidma_lldev *llhndl);
-int hidma_ll_pause(struct hidma_lldev *llhndl);
-int hidma_ll_resume(struct hidma_lldev *llhndl);
+int hidma_ll_disable(struct hidma_lldev *lldev);
+int hidma_ll_enable(struct hidma_lldev *llhndl);
void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
int hidma_ll_setup(struct hidma_lldev *lldev);
@@ -157,4 +153,6 @@ int hidma_ll_uninit(struct hidma_lldev *llhndl);
irqreturn_t hidma_ll_inthandler(int irq, void *arg);
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
u8 err_code);
+int hidma_debug_init(struct hidma_dev *dmadev);
+void hidma_debug_uninit(struct hidma_dev *dmadev);
#endif
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
new file mode 100644
index 000000000..fa827e5ff
--- /dev/null
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -0,0 +1,217 @@
+/*
+ * Qualcomm Technologies HIDMA debug file
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/pm_runtime.h>
+
+#include "hidma.h"
+
+static void hidma_ll_chstats(struct seq_file *s, void *llhndl, u32 tre_ch)
+{
+ struct hidma_lldev *lldev = llhndl;
+ struct hidma_tre *tre;
+ u32 length;
+ dma_addr_t src_start;
+ dma_addr_t dest_start;
+ u32 *tre_local;
+
+ if (tre_ch >= lldev->nr_tres) {
+ dev_err(lldev->dev, "invalid TRE number in chstats:%d", tre_ch);
+ return;
+ }
+ tre = &lldev->trepool[tre_ch];
+ seq_printf(s, "------Channel %d -----\n", tre_ch);
+ seq_printf(s, "allocated=%d\n", atomic_read(&tre->allocated));
+ seq_printf(s, "queued = 0x%x\n", tre->queued);
+ seq_printf(s, "err_info = 0x%x\n", tre->err_info);
+ seq_printf(s, "err_code = 0x%x\n", tre->err_code);
+ seq_printf(s, "status = 0x%x\n", tre->status);
+ seq_printf(s, "idx = 0x%x\n", tre->idx);
+ seq_printf(s, "dma_sig = 0x%x\n", tre->dma_sig);
+ seq_printf(s, "dev_name=%s\n", tre->dev_name);
+ seq_printf(s, "callback=%p\n", tre->callback);
+ seq_printf(s, "data=%p\n", tre->data);
+ seq_printf(s, "tre_index = 0x%x\n", tre->tre_index);
+
+ tre_local = &tre->tre_local[0];
+ src_start = tre_local[HIDMA_TRE_SRC_LOW_IDX];
+ src_start = ((u64) (tre_local[HIDMA_TRE_SRC_HI_IDX]) << 32) + src_start;
+ dest_start = tre_local[HIDMA_TRE_DEST_LOW_IDX];
+ dest_start += ((u64) (tre_local[HIDMA_TRE_DEST_HI_IDX]) << 32);
+ length = tre_local[HIDMA_TRE_LEN_IDX];
+
+ seq_printf(s, "src=%pap\n", &src_start);
+ seq_printf(s, "dest=%pap\n", &dest_start);
+ seq_printf(s, "length = 0x%x\n", length);
+}
+
+static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
+{
+ struct hidma_lldev *lldev = llhndl;
+
+ seq_puts(s, "------Device -----\n");
+ seq_printf(s, "lldev init = 0x%x\n", lldev->initialized);
+ seq_printf(s, "trch_state = 0x%x\n", lldev->trch_state);
+ seq_printf(s, "evch_state = 0x%x\n", lldev->evch_state);
+ seq_printf(s, "chidx = 0x%x\n", lldev->chidx);
+ seq_printf(s, "nr_tres = 0x%x\n", lldev->nr_tres);
+ seq_printf(s, "trca=%p\n", lldev->trca);
+ seq_printf(s, "tre_ring=%p\n", lldev->tre_ring);
+ seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
+ seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
+ seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
+ seq_printf(s, "pending_tre_count=%d\n", lldev->pending_tre_count);
+ seq_printf(s, "evca=%p\n", lldev->evca);
+ seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
+ seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
+ seq_printf(s, "evre_ring_size = 0x%x\n", lldev->evre_ring_size);
+ seq_printf(s, "evre_processed_off = 0x%x\n", lldev->evre_processed_off);
+ seq_printf(s, "tre_write_offset = 0x%x\n", lldev->tre_write_offset);
+}
+
+/*
+ * hidma_chan_stats: display HIDMA channel statistics
+ *
+ * Display the statistics for the current HIDMA virtual channel device.
+ */
+static int hidma_chan_stats(struct seq_file *s, void *unused)
+{
+ struct hidma_chan *mchan = s->private;
+ struct hidma_desc *mdesc;
+ struct hidma_dev *dmadev = mchan->dmadev;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ seq_printf(s, "paused=%u\n", mchan->paused);
+ seq_printf(s, "dma_sig=%u\n", mchan->dma_sig);
+ seq_puts(s, "prepared\n");
+ list_for_each_entry(mdesc, &mchan->prepared, node)
+ hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+ seq_puts(s, "active\n");
+ list_for_each_entry(mdesc, &mchan->active, node)
+ hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+ seq_puts(s, "completed\n");
+ list_for_each_entry(mdesc, &mchan->completed, node)
+ hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+ hidma_ll_devstats(s, mchan->dmadev->lldev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return 0;
+}
+
+/*
+ * hidma_dma_info: display HIDMA device info
+ *
+ * Display the info for the current HIDMA device.
+ */
+static int hidma_dma_info(struct seq_file *s, void *unused)
+{
+ struct hidma_dev *dmadev = s->private;
+ resource_size_t sz;
+
+ seq_printf(s, "nr_descriptors=%d\n", dmadev->nr_descriptors);
+ seq_printf(s, "dev_trca=%p\n", &dmadev->dev_trca);
+ seq_printf(s, "dev_trca_phys=%pa\n", &dmadev->trca_resource->start);
+ sz = resource_size(dmadev->trca_resource);
+ seq_printf(s, "dev_trca_size=%pa\n", &sz);
+ seq_printf(s, "dev_evca=%p\n", &dmadev->dev_evca);
+ seq_printf(s, "dev_evca_phys=%pa\n", &dmadev->evca_resource->start);
+ sz = resource_size(dmadev->evca_resource);
+ seq_printf(s, "dev_evca_size=%pa\n", &sz);
+ return 0;
+}
+
+static int hidma_chan_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hidma_chan_stats, inode->i_private);
+}
+
+static int hidma_dma_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hidma_dma_info, inode->i_private);
+}
+
+static const struct file_operations hidma_chan_fops = {
+ .open = hidma_chan_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations hidma_dma_fops = {
+ .open = hidma_dma_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hidma_debug_uninit(struct hidma_dev *dmadev)
+{
+ debugfs_remove_recursive(dmadev->debugfs);
+ debugfs_remove_recursive(dmadev->stats);
+}
+
+int hidma_debug_init(struct hidma_dev *dmadev)
+{
+ int rc = 0;
+ int chidx = 0;
+ struct list_head *position = NULL;
+
+ dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL);
+ if (!dmadev->debugfs) {
+ rc = -ENODEV;
+ return rc;
+ }
+
+ /* walk through the virtual channel list */
+ list_for_each(position, &dmadev->ddev.channels) {
+ struct hidma_chan *chan;
+
+ chan = list_entry(position, struct hidma_chan,
+ chan.device_node);
+ sprintf(chan->dbg_name, "chan%d", chidx);
+ chan->debugfs = debugfs_create_dir(chan->dbg_name,
+ dmadev->debugfs);
+ if (!chan->debugfs) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ chan->stats = debugfs_create_file("stats", S_IRUGO,
+ chan->debugfs, chan,
+ &hidma_chan_fops);
+ if (!chan->stats) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ chidx++;
+ }
+
+ dmadev->stats = debugfs_create_file("stats", S_IRUGO,
+ dmadev->debugfs, dmadev,
+ &hidma_dma_fops);
+ if (!dmadev->stats) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ return 0;
+cleanup:
+ hidma_debug_uninit(dmadev);
+ return rc;
+}
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
new file mode 100644
index 000000000..f39290015
--- /dev/null
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -0,0 +1,872 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine low level code
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/iopoll.h>
+#include <linux/kfifo.h>
+#include <linux/bitops.h>
+
+#include "hidma.h"
+
+#define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */
+
+#define HIDMA_TRCA_CTRLSTS_REG 0x000
+#define HIDMA_TRCA_RING_LOW_REG 0x008
+#define HIDMA_TRCA_RING_HIGH_REG 0x00C
+#define HIDMA_TRCA_RING_LEN_REG 0x010
+#define HIDMA_TRCA_DOORBELL_REG 0x400
+
+#define HIDMA_EVCA_CTRLSTS_REG 0x000
+#define HIDMA_EVCA_INTCTRL_REG 0x004
+#define HIDMA_EVCA_RING_LOW_REG 0x008
+#define HIDMA_EVCA_RING_HIGH_REG 0x00C
+#define HIDMA_EVCA_RING_LEN_REG 0x010
+#define HIDMA_EVCA_WRITE_PTR_REG 0x020
+#define HIDMA_EVCA_DOORBELL_REG 0x400
+
+#define HIDMA_EVCA_IRQ_STAT_REG 0x100
+#define HIDMA_EVCA_IRQ_CLR_REG 0x108
+#define HIDMA_EVCA_IRQ_EN_REG 0x110
+
+#define HIDMA_EVRE_CFG_IDX 0
+
+#define HIDMA_EVRE_ERRINFO_BIT_POS 24
+#define HIDMA_EVRE_CODE_BIT_POS 28
+
+#define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
+#define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
+
+#define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
+#define HIDMA_CH_STATE_MASK GENMASK(7, 0)
+#define HIDMA_CH_STATE_BIT_POS 0x8
+
+#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
+#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
+#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
+#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
+#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
+#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
+
+#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
+ BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
+
+#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
+do { \
+ iter += size; \
+ if (iter >= ring_size) \
+ iter -= ring_size; \
+} while (0)
+
+#define HIDMA_CH_STATE(val) \
+ ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
+
+#define HIDMA_ERR_INT_MASK \
+ (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
+
+enum ch_command {
+ HIDMA_CH_DISABLE = 0,
+ HIDMA_CH_ENABLE = 1,
+ HIDMA_CH_SUSPEND = 2,
+ HIDMA_CH_RESET = 9,
+};
+
+enum ch_state {
+ HIDMA_CH_DISABLED = 0,
+ HIDMA_CH_ENABLED = 1,
+ HIDMA_CH_RUNNING = 2,
+ HIDMA_CH_SUSPENDED = 3,
+ HIDMA_CH_STOPPED = 4,
+};
+
+enum tre_type {
+ HIDMA_TRE_MEMCPY = 3,
+};
+
+enum err_code {
+ HIDMA_EVRE_STATUS_COMPLETE = 1,
+ HIDMA_EVRE_STATUS_ERROR = 4,
+};
+
+static int hidma_is_chan_enabled(int state)
+{
+ switch (state) {
+ case HIDMA_CH_ENABLED:
+ case HIDMA_CH_RUNNING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
+{
+ struct hidma_tre *tre;
+
+ if (tre_ch >= lldev->nr_tres) {
+ dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
+ return;
+ }
+
+ tre = &lldev->trepool[tre_ch];
+ if (atomic_read(&tre->allocated) != true) {
+ dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
+ return;
+ }
+
+ atomic_set(&tre->allocated, 0);
+}
+
+int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
+ void (*callback)(void *data), void *data, u32 *tre_ch)
+{
+ unsigned int i;
+ struct hidma_tre *tre;
+ u32 *tre_local;
+
+ if (!tre_ch || !lldev)
+ return -EINVAL;
+
+ /* need to have at least one empty spot in the queue */
+ for (i = 0; i < lldev->nr_tres - 1; i++) {
+ if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
+ break;
+ }
+
+ if (i == (lldev->nr_tres - 1))
+ return -ENOMEM;
+
+ tre = &lldev->trepool[i];
+ tre->dma_sig = sig;
+ tre->dev_name = dev_name;
+ tre->callback = callback;
+ tre->data = data;
+ tre->idx = i;
+ tre->status = 0;
+ tre->queued = 0;
+ tre->err_code = 0;
+ tre->err_info = 0;
+ tre->lldev = lldev;
+ tre_local = &tre->tre_local[0];
+ tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
+ tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
+ tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
+ *tre_ch = i;
+ if (callback)
+ callback(data);
+ return 0;
+}
+
+/*
+ * Multiple TREs may be queued and waiting in the pending queue.
+ */
+static void hidma_ll_tre_complete(unsigned long arg)
+{
+ struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
+ struct hidma_tre *tre;
+
+ while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
+ /* call the user if it has been read by the hardware */
+ if (tre->callback)
+ tre->callback(tre->data);
+ }
+}
+
+static int hidma_post_completed(struct hidma_lldev *lldev, int tre_iterator,
+ u8 err_info, u8 err_code)
+{
+ struct hidma_tre *tre;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lldev->lock, flags);
+ tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
+ if (!tre) {
+ spin_unlock_irqrestore(&lldev->lock, flags);
+ dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
+ tre_iterator / HIDMA_TRE_SIZE);
+ return -EINVAL;
+ }
+ lldev->pending_tre_list[tre->tre_index] = NULL;
+
+ /*
+ * Keep track of pending TREs that SW is expecting to receive
+ * from HW. We got one now. Decrement our counter.
+ */
+ lldev->pending_tre_count--;
+ if (lldev->pending_tre_count < 0) {
+ dev_warn(lldev->dev, "tre count mismatch on completion");
+ lldev->pending_tre_count = 0;
+ }
+
+ spin_unlock_irqrestore(&lldev->lock, flags);
+
+ tre->err_info = err_info;
+ tre->err_code = err_code;
+ tre->queued = 0;
+
+ kfifo_put(&lldev->handoff_fifo, tre);
+ tasklet_schedule(&lldev->task);
+
+ return 0;
+}
+
+/*
+ * Called to handle the interrupt for the channel.
+ * Return a positive number if TRE or EVRE were consumed on this run.
+ * Return a positive number if there are pending TREs or EVREs.
+ * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
+ */
+static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
+{
+ u32 evre_ring_size = lldev->evre_ring_size;
+ u32 tre_ring_size = lldev->tre_ring_size;
+ u32 err_info, err_code, evre_write_off;
+ u32 tre_iterator, evre_iterator;
+ u32 num_completed = 0;
+
+ evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
+ tre_iterator = lldev->tre_processed_off;
+ evre_iterator = lldev->evre_processed_off;
+
+ if ((evre_write_off > evre_ring_size) ||
+ (evre_write_off % HIDMA_EVRE_SIZE)) {
+ dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
+ return 0;
+ }
+
+ /*
+ * By the time control reaches here the number of EVREs and TREs
+ * may not match. Only consume the ones that hardware told us.
+ */
+ while ((evre_iterator != evre_write_off)) {
+ u32 *current_evre = lldev->evre_ring + evre_iterator;
+ u32 cfg;
+
+ cfg = current_evre[HIDMA_EVRE_CFG_IDX];
+ err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
+ err_info &= HIDMA_EVRE_ERRINFO_MASK;
+ err_code =
+ (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
+
+ if (hidma_post_completed(lldev, tre_iterator, err_info,
+ err_code))
+ break;
+
+ HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+ tre_ring_size);
+ HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
+ evre_ring_size);
+
+ /*
+ * Read the new event descriptor written by the HW.
+ * As we are processing the delivered events, other events
+ * get queued to the SW for processing.
+ */
+ evre_write_off =
+ readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
+ num_completed++;
+ }
+
+ if (num_completed) {
+ u32 evre_read_off = (lldev->evre_processed_off +
+ HIDMA_EVRE_SIZE * num_completed);
+ u32 tre_read_off = (lldev->tre_processed_off +
+ HIDMA_TRE_SIZE * num_completed);
+
+ evre_read_off = evre_read_off % evre_ring_size;
+ tre_read_off = tre_read_off % tre_ring_size;
+
+ writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
+
+ /* record the last processed tre offset */
+ lldev->tre_processed_off = tre_read_off;
+ lldev->evre_processed_off = evre_read_off;
+ }
+
+ return num_completed;
+}
+
+void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
+ u8 err_code)
+{
+ u32 tre_iterator;
+ u32 tre_ring_size = lldev->tre_ring_size;
+ int num_completed = 0;
+ u32 tre_read_off;
+
+ tre_iterator = lldev->tre_processed_off;
+ while (lldev->pending_tre_count) {
+ if (hidma_post_completed(lldev, tre_iterator, err_info,
+ err_code))
+ break;
+ HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+ tre_ring_size);
+ num_completed++;
+ }
+ tre_read_off = (lldev->tre_processed_off +
+ HIDMA_TRE_SIZE * num_completed);
+
+ tre_read_off = tre_read_off % tre_ring_size;
+
+ /* record the last processed tre offset */
+ lldev->tre_processed_off = tre_read_off;
+}
+
+static int hidma_ll_reset(struct hidma_lldev *lldev)
+{
+ u32 val;
+ int ret;
+
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_RESET << 16;
+ writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+ /*
+ * Delay 10ms after reset to allow DMA logic to quiesce.
+ * Do a polled read up to 1ms and 10ms maximum.
+ */
+ ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+ HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
+ 1000, 10000);
+ if (ret) {
+ dev_err(lldev->dev, "transfer channel did not reset\n");
+ return ret;
+ }
+
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_RESET << 16;
+ writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+ /*
+ * Delay 10ms after reset to allow DMA logic to quiesce.
+ * Do a polled read up to 1ms and 10ms maximum.
+ */
+ ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+ HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
+ 1000, 10000);
+ if (ret)
+ return ret;
+
+ lldev->trch_state = HIDMA_CH_DISABLED;
+ lldev->evch_state = HIDMA_CH_DISABLED;
+ return 0;
+}
+
+/*
+ * Abort all transactions and perform a reset.
+ */
+static void hidma_ll_abort(unsigned long arg)
+{
+ struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
+ u8 err_code = HIDMA_EVRE_STATUS_ERROR;
+ u8 err_info = 0xFF;
+ int rc;
+
+ hidma_cleanup_pending_tre(lldev, err_info, err_code);
+
+ /* reset the channel for recovery */
+ rc = hidma_ll_setup(lldev);
+ if (rc) {
+ dev_err(lldev->dev, "channel reinitialize failed after error\n");
+ return;
+ }
+ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+}
+
+/*
+ * The interrupt handler for HIDMA will try to consume as many pending
+ * EVRE from the event queue as possible. Each EVRE has an associated
+ * TRE that holds the user interface parameters. EVRE reports the
+ * result of the transaction. Hardware guarantees ordering between EVREs
+ * and TREs. We use last processed offset to figure out which TRE is
+ * associated with which EVRE. If two TREs are consumed by HW, the EVREs
+ * are in order in the event ring.
+ *
+ * This handler will do a one pass for consuming EVREs. Other EVREs may
+ * be delivered while we are working. It will try to consume incoming
+ * EVREs one more time and return.
+ *
+ * For unprocessed EVREs, hardware will trigger another interrupt until
+ * all the interrupt bits are cleared.
+ *
+ * Hardware guarantees that by the time interrupt is observed, all data
+ * transactions in flight are delivered to their respective places and
+ * are visible to the CPU.
+ *
+ * On demand paging for IOMMU is only supported for PCIe via PRI
+ * (Page Request Interface) not for HIDMA. All other hardware instances
+ * including HIDMA work on pinned DMA addresses.
+ *
+ * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
+ * IOMMU latency will be built into the data movement time. By the time
+ * interrupt happens, IOMMU lookups + data movement has already taken place.
+ *
+ * While the first read in a typical PCI endpoint ISR flushes all outstanding
+ * requests traditionally to the destination, this concept does not apply
+ * here for this HW.
+ */
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+ u32 status;
+ u32 enable;
+ u32 cause;
+
+ /*
+ * Fine tuned for this HW...
+ *
+ * This ISR has been designed for this particular hardware. Relaxed
+ * read and write accessors are used for performance reasons due to
+ * interrupt delivery guarantees. Do not copy this code blindly and
+ * expect that to work.
+ */
+ status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+ cause = status & enable;
+
+ while (cause) {
+ if (cause & HIDMA_ERR_INT_MASK) {
+ dev_err(lldev->dev, "error 0x%x, resetting...\n",
+ cause);
+
+ /* Clear out pending interrupts */
+ writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ tasklet_schedule(&lldev->rst_task);
+ goto out;
+ }
+
+ /*
+ * Try to consume as many EVREs as possible.
+ */
+ hidma_handle_tre_completion(lldev);
+
+ /* We consumed TREs or there are pending TREs or EVREs. */
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /*
+ * Another interrupt might have arrived while we are
+ * processing this one. Read the new cause.
+ */
+ status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+ cause = status & enable;
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+int hidma_ll_enable(struct hidma_lldev *lldev)
+{
+ u32 val;
+ int ret;
+
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_ENABLE << 16;
+ writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+ ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+ hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
+ 1000, 10000);
+ if (ret) {
+ dev_err(lldev->dev, "event channel did not get enabled\n");
+ return ret;
+ }
+
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_ENABLE << 16;
+ writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+ ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+ hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
+ 1000, 10000);
+ if (ret) {
+ dev_err(lldev->dev, "transfer channel did not get enabled\n");
+ return ret;
+ }
+
+ lldev->trch_state = HIDMA_CH_ENABLED;
+ lldev->evch_state = HIDMA_CH_ENABLED;
+
+ return 0;
+}
+
+void hidma_ll_start(struct hidma_lldev *lldev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&lldev->lock, irqflags);
+ writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
+ spin_unlock_irqrestore(&lldev->lock, irqflags);
+}
+
+bool hidma_ll_isenabled(struct hidma_lldev *lldev)
+{
+ u32 val;
+
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ lldev->trch_state = HIDMA_CH_STATE(val);
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ lldev->evch_state = HIDMA_CH_STATE(val);
+
+ /* both channels have to be enabled before calling this function */
+ if (hidma_is_chan_enabled(lldev->trch_state) &&
+ hidma_is_chan_enabled(lldev->evch_state))
+ return true;
+
+ return false;
+}
+
+void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
+{
+ struct hidma_tre *tre;
+ unsigned long flags;
+
+ tre = &lldev->trepool[tre_ch];
+
+ /* copy the TRE into its location in the TRE ring */
+ spin_lock_irqsave(&lldev->lock, flags);
+ tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
+ lldev->pending_tre_list[tre->tre_index] = tre;
+ memcpy(lldev->tre_ring + lldev->tre_write_offset,
+ &tre->tre_local[0], HIDMA_TRE_SIZE);
+ tre->err_code = 0;
+ tre->err_info = 0;
+ tre->queued = 1;
+ lldev->pending_tre_count++;
+ lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
+ % lldev->tre_ring_size;
+ spin_unlock_irqrestore(&lldev->lock, flags);
+}
+
+/*
+ * Note that even though we stop this channel if there is a pending transaction
+ * in flight it will complete and follow the callback. This request will
+ * prevent further requests to be made.
+ */
+int hidma_ll_disable(struct hidma_lldev *lldev)
+{
+ u32 val;
+ int ret;
+
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ lldev->evch_state = HIDMA_CH_STATE(val);
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ lldev->trch_state = HIDMA_CH_STATE(val);
+
+ /* already suspended by this OS */
+ if ((lldev->trch_state == HIDMA_CH_SUSPENDED) ||
+ (lldev->evch_state == HIDMA_CH_SUSPENDED))
+ return 0;
+
+ /* already stopped by the manager */
+ if ((lldev->trch_state == HIDMA_CH_STOPPED) ||
+ (lldev->evch_state == HIDMA_CH_STOPPED))
+ return 0;
+
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_SUSPEND << 16;
+ writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+ /*
+ * Start the wait right after the suspend is confirmed.
+ * Do a polled read up to 1ms and 10ms maximum.
+ */
+ ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+ HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
+ 1000, 10000);
+ if (ret)
+ return ret;
+
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_SUSPEND << 16;
+ writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+ /*
+ * Start the wait right after the suspend is confirmed
+ * Delay up to 10ms after reset to allow DMA logic to quiesce.
+ */
+ ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+ HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
+ 1000, 10000);
+ if (ret)
+ return ret;
+
+ lldev->trch_state = HIDMA_CH_SUSPENDED;
+ lldev->evch_state = HIDMA_CH_SUSPENDED;
+ return 0;
+}
+
+void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
+ dma_addr_t src, dma_addr_t dest, u32 len,
+ u32 flags)
+{
+ struct hidma_tre *tre;
+ u32 *tre_local;
+
+ if (tre_ch >= lldev->nr_tres) {
+ dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
+ tre_ch);
+ return;
+ }
+
+ tre = &lldev->trepool[tre_ch];
+ if (atomic_read(&tre->allocated) != true) {
+ dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
+ tre_ch);
+ return;
+ }
+
+ tre_local = &tre->tre_local[0];
+ tre_local[HIDMA_TRE_LEN_IDX] = len;
+ tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
+ tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
+ tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
+ tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
+ tre->int_flags = flags;
+}
+
+/*
+ * Called during initialization and after an error condition
+ * to restore hardware state.
+ */
+int hidma_ll_setup(struct hidma_lldev *lldev)
+{
+ int rc;
+ u64 addr;
+ u32 val;
+ u32 nr_tres = lldev->nr_tres;
+
+ lldev->pending_tre_count = 0;
+ lldev->tre_processed_off = 0;
+ lldev->evre_processed_off = 0;
+ lldev->tre_write_offset = 0;
+
+ /* disable interrupts */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ /* clear all pending interrupts */
+ val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ rc = hidma_ll_reset(lldev);
+ if (rc)
+ return rc;
+
+ /*
+ * Clear all pending interrupts again.
+ * Otherwise, we observe reset complete interrupts.
+ */
+ val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /* disable interrupts again after reset */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ addr = lldev->tre_dma;
+ writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
+ writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
+ writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
+
+ addr = lldev->evre_dma;
+ writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
+ writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
+ writel(HIDMA_EVRE_SIZE * nr_tres,
+ lldev->evca + HIDMA_EVCA_RING_LEN_REG);
+
+ /* support IRQ only for now */
+ val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
+ val &= ~0xF;
+ val |= 0x1;
+ writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
+
+ /* clear all pending interrupts and enable them */
+ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ return hidma_ll_enable(lldev);
+}
+
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
+ void __iomem *trca, void __iomem *evca,
+ u8 chidx)
+{
+ u32 required_bytes;
+ struct hidma_lldev *lldev;
+ int rc;
+ size_t sz;
+
+ if (!trca || !evca || !dev || !nr_tres)
+ return NULL;
+
+ /* need at least four TREs */
+ if (nr_tres < 4)
+ return NULL;
+
+ /* need an extra space */
+ nr_tres += 1;
+
+ lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
+ if (!lldev)
+ return NULL;
+
+ lldev->evca = evca;
+ lldev->trca = trca;
+ lldev->dev = dev;
+ sz = sizeof(struct hidma_tre);
+ lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
+ if (!lldev->trepool)
+ return NULL;
+
+ required_bytes = sizeof(lldev->pending_tre_list[0]);
+ lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
+ GFP_KERNEL);
+ if (!lldev->pending_tre_list)
+ return NULL;
+
+ sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
+ lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
+ GFP_KERNEL);
+ if (!lldev->tre_ring)
+ return NULL;
+
+ memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
+ lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
+ lldev->nr_tres = nr_tres;
+
+ /* the TRE ring has to be TRE_SIZE aligned */
+ if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
+ u8 tre_ring_shift;
+
+ tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
+ tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
+ lldev->tre_dma += tre_ring_shift;
+ lldev->tre_ring += tre_ring_shift;
+ }
+
+ sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
+ lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
+ GFP_KERNEL);
+ if (!lldev->evre_ring)
+ return NULL;
+
+ memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
+ lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
+
+ /* the EVRE ring has to be EVRE_SIZE aligned */
+ if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
+ u8 evre_ring_shift;
+
+ evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
+ evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
+ lldev->evre_dma += evre_ring_shift;
+ lldev->evre_ring += evre_ring_shift;
+ }
+ lldev->nr_tres = nr_tres;
+ lldev->chidx = chidx;
+
+ sz = nr_tres * sizeof(struct hidma_tre *);
+ rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
+ if (rc)
+ return NULL;
+
+ rc = hidma_ll_setup(lldev);
+ if (rc)
+ return NULL;
+
+ spin_lock_init(&lldev->lock);
+ tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
+ tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
+ lldev->initialized = 1;
+ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+ return lldev;
+}
+
+int hidma_ll_uninit(struct hidma_lldev *lldev)
+{
+ u32 required_bytes;
+ int rc = 0;
+ u32 val;
+
+ if (!lldev)
+ return -ENODEV;
+
+ if (!lldev->initialized)
+ return 0;
+
+ lldev->initialized = 0;
+
+ required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
+ tasklet_kill(&lldev->task);
+ memset(lldev->trepool, 0, required_bytes);
+ lldev->trepool = NULL;
+ lldev->pending_tre_count = 0;
+ lldev->tre_write_offset = 0;
+
+ rc = hidma_ll_reset(lldev);
+
+ /*
+ * Clear all pending interrupts again.
+ * Otherwise, we observe reset complete interrupts.
+ */
+ val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+ return rc;
+}
+
+enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
+{
+ enum dma_status ret = DMA_ERROR;
+ struct hidma_tre *tre;
+ unsigned long flags;
+ u8 err_code;
+
+ spin_lock_irqsave(&lldev->lock, flags);
+
+ tre = &lldev->trepool[tre_ch];
+ err_code = tre->err_code;
+
+ if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
+ ret = DMA_COMPLETE;
+ else if (err_code & HIDMA_EVRE_STATUS_ERROR)
+ ret = DMA_ERROR;
+ else
+ ret = DMA_IN_PROGRESS;
+ spin_unlock_irqrestore(&lldev->lock, flags);
+
+ return ret;
+}
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index ef491b893..c0e365321 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -1,7 +1,7 @@
/*
* Qualcomm Technologies HIDMA DMA engine Management interface
*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,13 +17,14 @@
#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
#include "hidma_mgmt.h"
@@ -298,5 +299,109 @@ static struct platform_driver hidma_mgmt_driver = {
},
};
-module_platform_driver(hidma_mgmt_driver);
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+static int object_counter;
+
+static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
+{
+ struct platform_device *pdev_parent = of_find_device_by_node(np);
+ struct platform_device_info pdevinfo;
+ struct of_phandle_args out_irq;
+ struct device_node *child;
+ struct resource *res;
+ const __be32 *cell;
+ int ret = 0, size, i, num;
+ u64 addr, addr_size;
+
+ for_each_available_child_of_node(np, child) {
+ struct resource *res_iter;
+ struct platform_device *new_pdev;
+
+ cell = of_get_property(child, "reg", &size);
+ if (!cell) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size /= sizeof(*cell);
+ num = size /
+ (of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
+
+ /* allocate a resource array */
+ res = kcalloc(num, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* read each reg value */
+ i = 0;
+ res_iter = res;
+ while (i < size) {
+ addr = of_read_number(&cell[i],
+ of_n_addr_cells(child));
+ i += of_n_addr_cells(child);
+
+ addr_size = of_read_number(&cell[i],
+ of_n_size_cells(child));
+ i += of_n_size_cells(child);
+
+ res_iter->start = addr;
+ res_iter->end = res_iter->start + addr_size - 1;
+ res_iter->flags = IORESOURCE_MEM;
+ res_iter++;
+ }
+
+ ret = of_irq_parse_one(child, 0, &out_irq);
+ if (ret)
+ goto out;
+
+ res_iter->start = irq_create_of_mapping(&out_irq);
+ res_iter->name = "hidma event irq";
+ res_iter->flags = IORESOURCE_IRQ;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.fwnode = &child->fwnode;
+ pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
+ pdevinfo.name = child->name;
+ pdevinfo.id = object_counter++;
+ pdevinfo.res = res;
+ pdevinfo.num_res = num;
+ pdevinfo.data = NULL;
+ pdevinfo.size_data = 0;
+ pdevinfo.dma_mask = DMA_BIT_MASK(64);
+ new_pdev = platform_device_register_full(&pdevinfo);
+ if (!new_pdev) {
+ ret = -ENODEV;
+ goto out;
+ }
+ of_dma_configure(&new_pdev->dev, child);
+
+ kfree(res);
+ res = NULL;
+ }
+out:
+ kfree(res);
+
+ return ret;
+}
+#endif
+
+static int __init hidma_mgmt_init(void)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+ struct device_node *child;
+
+ for (child = of_find_matching_node(NULL, hidma_mgmt_match); child;
+ child = of_find_matching_node(child, hidma_mgmt_match)) {
+ /* device tree based firmware here */
+ hidma_mgmt_of_populate_channels(child);
+ of_node_put(child);
+ }
+#endif
+ platform_driver_register(&hidma_mgmt_driver);
+
+ return 0;
+}
+module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e0df233dd..57aa227bf 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -461,25 +461,25 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
/* Source burst */
ret = convert_burst(sconfig->src_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
ret = convert_burst(sconfig->dst_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
/* Source bus width */
ret = convert_buswidth(sconfig->src_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
@@ -518,25 +518,25 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
/* Source burst */
ret = convert_burst(sconfig->src_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
ret = convert_burst(sconfig->dst_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
/* Source bus width */
ret = convert_buswidth(sconfig->src_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 2db12e493..5065ca43f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -146,6 +146,8 @@ struct sun6i_vchan {
struct dma_slave_config cfg;
struct sun6i_pchan *phy;
u8 port;
+ u8 irq_type;
+ bool cyclic;
};
struct sun6i_dma_dev {
@@ -254,6 +256,30 @@ static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width)
return addr_width >> 1;
}
+static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan)
+{
+ struct sun6i_desc *txd = pchan->desc;
+ struct sun6i_dma_lli *lli;
+ size_t bytes;
+ dma_addr_t pos;
+
+ pos = readl(pchan->base + DMA_CHAN_LLI_ADDR);
+ bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
+
+ if (pos == LLI_LAST_ITEM)
+ return bytes;
+
+ for (lli = txd->v_lli; lli; lli = lli->v_lli_next) {
+ if (lli->p_lli_next == pos) {
+ for (lli = lli->v_lli_next; lli; lli = lli->v_lli_next)
+ bytes += lli->len;
+ break;
+ }
+ }
+
+ return bytes;
+}
+
static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
struct sun6i_dma_lli *next,
dma_addr_t next_phy,
@@ -276,45 +302,6 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
return next;
}
-static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
- dma_addr_t src,
- dma_addr_t dst, u32 len,
- struct dma_slave_config *config)
-{
- u8 src_width, dst_width, src_burst, dst_burst;
-
- if (!config)
- return -EINVAL;
-
- src_burst = convert_burst(config->src_maxburst);
- if (src_burst)
- return src_burst;
-
- dst_burst = convert_burst(config->dst_maxburst);
- if (dst_burst)
- return dst_burst;
-
- src_width = convert_buswidth(config->src_addr_width);
- if (src_width)
- return src_width;
-
- dst_width = convert_buswidth(config->dst_addr_width);
- if (dst_width)
- return dst_width;
-
- lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
- DMA_CHAN_CFG_SRC_WIDTH(src_width) |
- DMA_CHAN_CFG_DST_BURST(dst_burst) |
- DMA_CHAN_CFG_DST_WIDTH(dst_width);
-
- lli->src = src;
- lli->dst = dst;
- lli->len = len;
- lli->para = NORMAL_WAIT;
-
- return 0;
-}
-
static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
struct sun6i_dma_lli *lli)
{
@@ -381,9 +368,13 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
- irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset));
- irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH);
- writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset));
+ vchan->irq_type = vchan->cyclic ? DMA_IRQ_PKG : DMA_IRQ_QUEUE;
+
+ irq_val = readl(sdev->base + DMA_IRQ_EN(irq_reg));
+ irq_val &= ~((DMA_IRQ_HALF | DMA_IRQ_PKG | DMA_IRQ_QUEUE) <<
+ (irq_offset * DMA_IRQ_CHAN_WIDTH));
+ irq_val |= vchan->irq_type << (irq_offset * DMA_IRQ_CHAN_WIDTH);
+ writel(irq_val, sdev->base + DMA_IRQ_EN(irq_reg));
writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR);
writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE);
@@ -479,11 +470,12 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
writel(status, sdev->base + DMA_IRQ_STAT(i));
for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
- if (status & DMA_IRQ_QUEUE) {
- pchan = sdev->pchans + j;
- vchan = pchan->vchan;
-
- if (vchan) {
+ pchan = sdev->pchans + j;
+ vchan = pchan->vchan;
+ if (vchan && (status & vchan->irq_type)) {
+ if (vchan->cyclic) {
+ vchan_cyclic_callback(&pchan->desc->vd);
+ } else {
spin_lock(&vchan->vc.lock);
vchan_cookie_complete(&pchan->desc->vd);
pchan->done = pchan->desc;
@@ -502,6 +494,55 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
return ret;
}
+static int set_config(struct sun6i_dma_dev *sdev,
+ struct dma_slave_config *sconfig,
+ enum dma_transfer_direction direction,
+ u32 *p_cfg)
+{
+ s8 src_width, dst_width, src_burst, dst_burst;
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ src_burst = convert_burst(sconfig->src_maxburst ?
+ sconfig->src_maxburst : 8);
+ src_width = convert_buswidth(sconfig->src_addr_width !=
+ DMA_SLAVE_BUSWIDTH_UNDEFINED ?
+ sconfig->src_addr_width :
+ DMA_SLAVE_BUSWIDTH_4_BYTES);
+ dst_burst = convert_burst(sconfig->dst_maxburst);
+ dst_width = convert_buswidth(sconfig->dst_addr_width);
+ break;
+ case DMA_DEV_TO_MEM:
+ src_burst = convert_burst(sconfig->src_maxburst);
+ src_width = convert_buswidth(sconfig->src_addr_width);
+ dst_burst = convert_burst(sconfig->dst_maxburst ?
+ sconfig->dst_maxburst : 8);
+ dst_width = convert_buswidth(sconfig->dst_addr_width !=
+ DMA_SLAVE_BUSWIDTH_UNDEFINED ?
+ sconfig->dst_addr_width :
+ DMA_SLAVE_BUSWIDTH_4_BYTES);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (src_burst < 0)
+ return src_burst;
+ if (src_width < 0)
+ return src_width;
+ if (dst_burst < 0)
+ return dst_burst;
+ if (dst_width < 0)
+ return dst_width;
+
+ *p_cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
+ DMA_CHAN_CFG_SRC_WIDTH(src_width) |
+ DMA_CHAN_CFG_DST_BURST(dst_burst) |
+ DMA_CHAN_CFG_DST_WIDTH(dst_width);
+
+ return 0;
+}
+
static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
@@ -569,13 +610,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
struct sun6i_desc *txd;
struct scatterlist *sg;
dma_addr_t p_lli;
+ u32 lli_cfg;
int i, ret;
if (!sgl)
return NULL;
- if (!is_slave_direction(dir)) {
- dev_err(chan2dev(chan), "Invalid DMA direction\n");
+ ret = set_config(sdev, sconfig, dir, &lli_cfg);
+ if (ret) {
+ dev_err(chan2dev(chan), "Invalid DMA configuration\n");
return NULL;
}
@@ -588,14 +631,14 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
if (!v_lli)
goto err_lli_free;
- if (dir == DMA_MEM_TO_DEV) {
- ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg),
- sconfig->dst_addr, sg_dma_len(sg),
- sconfig);
- if (ret)
- goto err_cur_lli_free;
+ v_lli->len = sg_dma_len(sg);
+ v_lli->para = NORMAL_WAIT;
- v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE |
+ if (dir == DMA_MEM_TO_DEV) {
+ v_lli->src = sg_dma_address(sg);
+ v_lli->dst = sconfig->dst_addr;
+ v_lli->cfg = lli_cfg |
+ DMA_CHAN_CFG_DST_IO_MODE |
DMA_CHAN_CFG_SRC_LINEAR_MODE |
DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_DST_DRQ(vchan->port);
@@ -607,13 +650,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
sg_dma_len(sg), flags);
} else {
- ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr,
- sg_dma_address(sg), sg_dma_len(sg),
- sconfig);
- if (ret)
- goto err_cur_lli_free;
-
- v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE |
+ v_lli->src = sconfig->src_addr;
+ v_lli->dst = sg_dma_address(sg);
+ v_lli->cfg = lli_cfg |
+ DMA_CHAN_CFG_DST_LINEAR_MODE |
DMA_CHAN_CFG_SRC_IO_MODE |
DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
DMA_CHAN_CFG_SRC_DRQ(vchan->port);
@@ -634,8 +674,78 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
-err_cur_lli_free:
- dma_pool_free(sdev->pool, v_lli, p_lli);
+err_lli_free:
+ for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
+ dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
+ kfree(txd);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
+ struct dma_chan *chan,
+ dma_addr_t buf_addr,
+ size_t buf_len,
+ size_t period_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun6i_dma_lli *v_lli, *prev = NULL;
+ struct sun6i_desc *txd;
+ dma_addr_t p_lli;
+ u32 lli_cfg;
+ unsigned int i, periods = buf_len / period_len;
+ int ret;
+
+ ret = set_config(sdev, sconfig, dir, &lli_cfg);
+ if (ret) {
+ dev_err(chan2dev(chan), "Invalid DMA configuration\n");
+ return NULL;
+ }
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ for (i = 0; i < periods; i++) {
+ v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ if (!v_lli) {
+ dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
+ goto err_lli_free;
+ }
+
+ v_lli->len = period_len;
+ v_lli->para = NORMAL_WAIT;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ v_lli->src = buf_addr + period_len * i;
+ v_lli->dst = sconfig->dst_addr;
+ v_lli->cfg = lli_cfg |
+ DMA_CHAN_CFG_DST_IO_MODE |
+ DMA_CHAN_CFG_SRC_LINEAR_MODE |
+ DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_DST_DRQ(vchan->port);
+ } else {
+ v_lli->src = sconfig->src_addr;
+ v_lli->dst = buf_addr + period_len * i;
+ v_lli->cfg = lli_cfg |
+ DMA_CHAN_CFG_DST_LINEAR_MODE |
+ DMA_CHAN_CFG_SRC_IO_MODE |
+ DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_SRC_DRQ(vchan->port);
+ }
+
+ prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
+ }
+
+ prev->p_lli_next = txd->p_lli; /* cyclic list */
+
+ vchan->cyclic = true;
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
err_lli_free:
for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
@@ -712,6 +822,16 @@ static int sun6i_dma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&vchan->vc.lock, flags);
+ if (vchan->cyclic) {
+ vchan->cyclic = false;
+ if (pchan && pchan->desc) {
+ struct virt_dma_desc *vd = &pchan->desc->vd;
+ struct virt_dma_chan *vc = &vchan->vc;
+
+ list_add_tail(&vd->node, &vc->desc_completed);
+ }
+ }
+
vchan_get_all_descriptors(&vchan->vc, &head);
if (pchan) {
@@ -759,7 +879,7 @@ static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
} else if (!pchan || !pchan->desc) {
bytes = 0;
} else {
- bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
+ bytes = sun6i_get_chan_size(pchan);
}
spin_unlock_irqrestore(&vchan->vc.lock, flags);
@@ -963,6 +1083,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
+ dma_cap_set(DMA_CYCLIC, sdc->slave.cap_mask);
INIT_LIST_HEAD(&sdc->slave.channels);
sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources;
@@ -970,6 +1091,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
+ sdc->slave.device_prep_dma_cyclic = sun6i_dma_prep_dma_cyclic;
sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES;
sdc->slave.device_config = sun6i_dma_config;
sdc->slave.device_pause = sun6i_dma_pause;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3871f29e5..01e316f73 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -54,6 +54,7 @@
#define TEGRA_APBDMA_CSR_ONCE BIT(27)
#define TEGRA_APBDMA_CSR_FLOW BIT(21)
#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
+#define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F
#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
/* STATUS register */
@@ -114,6 +115,8 @@
/* Channel base address offset from APBDMA base address */
#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
+#define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
+
struct tegra_dma;
/*
@@ -353,8 +356,11 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
- if (!tdc->slave_id)
+ if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID) {
+ if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
+ return -EINVAL;
tdc->slave_id = sconfig->slave_id;
+ }
tdc->config_init = true;
return 0;
}
@@ -1236,7 +1242,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
}
pm_runtime_put(tdma->dev);
- tdc->slave_id = 0;
+ tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
}
static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
@@ -1246,6 +1252,11 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan;
struct tegra_dma_channel *tdc;
+ if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
+ dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
+ return NULL;
+ }
+
chan = dma_get_any_slave_channel(&tdma->dma_dev);
if (!chan)
return NULL;
@@ -1389,6 +1400,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
&tdma->dma_dev.channels);
tdc->tdma = tdma;
tdc->id = i;
+ tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
(unsigned long)tdc);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
new file mode 100644
index 000000000..c4b121c45
--- /dev/null
+++ b/drivers/dma/tegra210-adma.c
@@ -0,0 +1,840 @@
+/*
+ * ADMA driver for Nvidia's Tegra210 ADMA controller.
+ *
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include "virt-dma.h"
+
+#define ADMA_CH_CMD 0x00
+#define ADMA_CH_STATUS 0x0c
+#define ADMA_CH_STATUS_XFER_EN BIT(0)
+
+#define ADMA_CH_INT_STATUS 0x10
+#define ADMA_CH_INT_STATUS_XFER_DONE BIT(0)
+
+#define ADMA_CH_INT_CLEAR 0x1c
+#define ADMA_CH_CTRL 0x24
+#define ADMA_CH_CTRL_TX_REQ(val) (((val) & 0xf) << 28)
+#define ADMA_CH_CTRL_TX_REQ_MAX 10
+#define ADMA_CH_CTRL_RX_REQ(val) (((val) & 0xf) << 24)
+#define ADMA_CH_CTRL_RX_REQ_MAX 10
+#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
+#define ADMA_CH_CTRL_DIR_AHUB2MEM 2
+#define ADMA_CH_CTRL_DIR_MEM2AHUB 4
+#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
+#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
+
+#define ADMA_CH_CONFIG 0x28
+#define ADMA_CH_CONFIG_SRC_BUF(val) (((val) & 0x7) << 28)
+#define ADMA_CH_CONFIG_TRG_BUF(val) (((val) & 0x7) << 24)
+#define ADMA_CH_CONFIG_BURST_SIZE(val) (((val) & 0x7) << 20)
+#define ADMA_CH_CONFIG_BURST_16 5
+#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
+#define ADMA_CH_CONFIG_MAX_BUFS 8
+
+#define ADMA_CH_FIFO_CTRL 0x2c
+#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24)
+#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16)
+#define ADMA_CH_FIFO_CTRL_TX_SIZE(val) (((val) & 0xf) << 8)
+#define ADMA_CH_FIFO_CTRL_RX_SIZE(val) ((val) & 0xf)
+
+#define ADMA_CH_LOWER_SRC_ADDR 0x34
+#define ADMA_CH_LOWER_TRG_ADDR 0x3c
+#define ADMA_CH_TC 0x44
+#define ADMA_CH_TC_COUNT_MASK 0x3ffffffc
+
+#define ADMA_CH_XFER_STATUS 0x54
+#define ADMA_CH_XFER_STATUS_COUNT_MASK 0xffff
+
+#define ADMA_GLOBAL_CMD 0xc00
+#define ADMA_GLOBAL_SOFT_RESET 0xc04
+#define ADMA_GLOBAL_INT_CLEAR 0xc20
+#define ADMA_GLOBAL_CTRL 0xc24
+
+#define ADMA_CH_REG_OFFSET(a) (a * 0x80)
+
+#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
+ ADMA_CH_FIFO_CTRL_STARV_THRES(1) | \
+ ADMA_CH_FIFO_CTRL_TX_SIZE(3) | \
+ ADMA_CH_FIFO_CTRL_RX_SIZE(3))
+struct tegra_adma;
+
+/*
+ * struct tegra_adma_chip_data - Tegra chip specific data
+ * @nr_channels: Number of DMA channels available.
+ */
+struct tegra_adma_chip_data {
+ int nr_channels;
+};
+
+/*
+ * struct tegra_adma_chan_regs - Tegra ADMA channel registers
+ */
+struct tegra_adma_chan_regs {
+ unsigned int ctrl;
+ unsigned int config;
+ unsigned int src_addr;
+ unsigned int trg_addr;
+ unsigned int fifo_ctrl;
+ unsigned int tc;
+};
+
+/*
+ * struct tegra_adma_desc - Tegra ADMA descriptor to manage transfer requests.
+ */
+struct tegra_adma_desc {
+ struct virt_dma_desc vd;
+ struct tegra_adma_chan_regs ch_regs;
+ size_t buf_len;
+ size_t period_len;
+ size_t num_periods;
+};
+
+/*
+ * struct tegra_adma_chan - Tegra ADMA channel information
+ */
+struct tegra_adma_chan {
+ struct virt_dma_chan vc;
+ struct tegra_adma_desc *desc;
+ struct tegra_adma *tdma;
+ int irq;
+ void __iomem *chan_addr;
+
+ /* Slave channel configuration info */
+ struct dma_slave_config sconfig;
+ enum dma_transfer_direction sreq_dir;
+ unsigned int sreq_index;
+ bool sreq_reserved;
+
+ /* Transfer count and position info */
+ unsigned int tx_buf_count;
+ unsigned int tx_buf_pos;
+};
+
+/*
+ * struct tegra_adma - Tegra ADMA controller information
+ */
+struct tegra_adma {
+ struct dma_device dma_dev;
+ struct device *dev;
+ void __iomem *base_addr;
+ unsigned int nr_channels;
+ unsigned long rx_requests_reserved;
+ unsigned long tx_requests_reserved;
+
+ /* Used to store global command register state when suspending */
+ unsigned int global_cmd;
+
+ /* Last member of the structure */
+ struct tegra_adma_chan channels[0];
+};
+
+static inline void tdma_write(struct tegra_adma *tdma, u32 reg, u32 val)
+{
+ writel(val, tdma->base_addr + reg);
+}
+
+static inline u32 tdma_read(struct tegra_adma *tdma, u32 reg)
+{
+ return readl(tdma->base_addr + reg);
+}
+
+static inline void tdma_ch_write(struct tegra_adma_chan *tdc, u32 reg, u32 val)
+{
+ writel(val, tdc->chan_addr + reg);
+}
+
+static inline u32 tdma_ch_read(struct tegra_adma_chan *tdc, u32 reg)
+{
+ return readl(tdc->chan_addr + reg);
+}
+
+static inline struct tegra_adma_chan *to_tegra_adma_chan(struct dma_chan *dc)
+{
+ return container_of(dc, struct tegra_adma_chan, vc.chan);
+}
+
+static inline struct tegra_adma_desc *to_tegra_adma_desc(
+ struct dma_async_tx_descriptor *td)
+{
+ return container_of(td, struct tegra_adma_desc, vd.tx);
+}
+
+static inline struct device *tdc2dev(struct tegra_adma_chan *tdc)
+{
+ return tdc->tdma->dev;
+}
+
+static void tegra_adma_desc_free(struct virt_dma_desc *vd)
+{
+ kfree(container_of(vd, struct tegra_adma_desc, vd));
+}
+
+static int tegra_adma_slave_config(struct dma_chan *dc,
+ struct dma_slave_config *sconfig)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+
+ memcpy(&tdc->sconfig, sconfig, sizeof(*sconfig));
+
+ return 0;
+}
+
+static int tegra_adma_init(struct tegra_adma *tdma)
+{
+ u32 status;
+ int ret;
+
+ /* Clear any interrupts */
+ tdma_write(tdma, ADMA_GLOBAL_INT_CLEAR, 0x1);
+
+ /* Assert soft reset */
+ tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
+
+ /* Wait for reset to clear */
+ ret = readx_poll_timeout(readl,
+ tdma->base_addr + ADMA_GLOBAL_SOFT_RESET,
+ status, status == 0, 20, 10000);
+ if (ret)
+ return ret;
+
+ /* Enable global ADMA registers */
+ tdma_write(tdma, ADMA_GLOBAL_CMD, 1);
+
+ return 0;
+}
+
+static int tegra_adma_request_alloc(struct tegra_adma_chan *tdc,
+ enum dma_transfer_direction direction)
+{
+ struct tegra_adma *tdma = tdc->tdma;
+ unsigned int sreq_index = tdc->sreq_index;
+
+ if (tdc->sreq_reserved)
+ return tdc->sreq_dir == direction ? 0 : -EINVAL;
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ if (sreq_index > ADMA_CH_CTRL_TX_REQ_MAX) {
+ dev_err(tdma->dev, "invalid DMA request\n");
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(sreq_index, &tdma->tx_requests_reserved)) {
+ dev_err(tdma->dev, "DMA request reserved\n");
+ return -EINVAL;
+ }
+ break;
+
+ case DMA_DEV_TO_MEM:
+ if (sreq_index > ADMA_CH_CTRL_RX_REQ_MAX) {
+ dev_err(tdma->dev, "invalid DMA request\n");
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(sreq_index, &tdma->rx_requests_reserved)) {
+ dev_err(tdma->dev, "DMA request reserved\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ dev_WARN(tdma->dev, "channel %s has invalid transfer type\n",
+ dma_chan_name(&tdc->vc.chan));
+ return -EINVAL;
+ }
+
+ tdc->sreq_dir = direction;
+ tdc->sreq_reserved = true;
+
+ return 0;
+}
+
+static void tegra_adma_request_free(struct tegra_adma_chan *tdc)
+{
+ struct tegra_adma *tdma = tdc->tdma;
+
+ if (!tdc->sreq_reserved)
+ return;
+
+ switch (tdc->sreq_dir) {
+ case DMA_MEM_TO_DEV:
+ clear_bit(tdc->sreq_index, &tdma->tx_requests_reserved);
+ break;
+
+ case DMA_DEV_TO_MEM:
+ clear_bit(tdc->sreq_index, &tdma->rx_requests_reserved);
+ break;
+
+ default:
+ dev_WARN(tdma->dev, "channel %s has invalid transfer type\n",
+ dma_chan_name(&tdc->vc.chan));
+ return;
+ }
+
+ tdc->sreq_reserved = false;
+}
+
+static u32 tegra_adma_irq_status(struct tegra_adma_chan *tdc)
+{
+ u32 status = tdma_ch_read(tdc, ADMA_CH_INT_STATUS);
+
+ return status & ADMA_CH_INT_STATUS_XFER_DONE;
+}
+
+static u32 tegra_adma_irq_clear(struct tegra_adma_chan *tdc)
+{
+ u32 status = tegra_adma_irq_status(tdc);
+
+ if (status)
+ tdma_ch_write(tdc, ADMA_CH_INT_CLEAR, status);
+
+ return status;
+}
+
+static void tegra_adma_stop(struct tegra_adma_chan *tdc)
+{
+ unsigned int status;
+
+ /* Disable ADMA */
+ tdma_ch_write(tdc, ADMA_CH_CMD, 0);
+
+ /* Clear interrupt status */
+ tegra_adma_irq_clear(tdc);
+
+ if (readx_poll_timeout_atomic(readl, tdc->chan_addr + ADMA_CH_STATUS,
+ status, !(status & ADMA_CH_STATUS_XFER_EN),
+ 20, 10000)) {
+ dev_err(tdc2dev(tdc), "unable to stop DMA channel\n");
+ return;
+ }
+
+ kfree(tdc->desc);
+ tdc->desc = NULL;
+}
+
+static void tegra_adma_start(struct tegra_adma_chan *tdc)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&tdc->vc);
+ struct tegra_adma_chan_regs *ch_regs;
+ struct tegra_adma_desc *desc;
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+
+ desc = to_tegra_adma_desc(&vd->tx);
+
+ if (!desc) {
+ dev_warn(tdc2dev(tdc), "unable to start DMA, no descriptor\n");
+ return;
+ }
+
+ ch_regs = &desc->ch_regs;
+
+ tdc->tx_buf_pos = 0;
+ tdc->tx_buf_count = 0;
+ tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc);
+ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr);
+ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr);
+ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
+ tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config);
+
+ /* Start ADMA */
+ tdma_ch_write(tdc, ADMA_CH_CMD, 1);
+
+ tdc->desc = desc;
+}
+
+static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc)
+{
+ struct tegra_adma_desc *desc = tdc->desc;
+ unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1;
+ unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS);
+ unsigned int periods_remaining;
+
+ /*
+ * Handle wrap around of buffer count register
+ */
+ if (pos < tdc->tx_buf_pos)
+ tdc->tx_buf_count += pos + (max - tdc->tx_buf_pos);
+ else
+ tdc->tx_buf_count += pos - tdc->tx_buf_pos;
+
+ periods_remaining = tdc->tx_buf_count % desc->num_periods;
+ tdc->tx_buf_pos = pos;
+
+ return desc->buf_len - (periods_remaining * desc->period_len);
+}
+
+static irqreturn_t tegra_adma_isr(int irq, void *dev_id)
+{
+ struct tegra_adma_chan *tdc = dev_id;
+ unsigned long status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+
+ status = tegra_adma_irq_clear(tdc);
+ if (status == 0 || !tdc->desc) {
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+ return IRQ_NONE;
+ }
+
+ vchan_cyclic_callback(&tdc->desc->vd);
+
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static void tegra_adma_issue_pending(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+
+ if (vchan_issue_pending(&tdc->vc)) {
+ if (!tdc->desc)
+ tegra_adma_start(tdc);
+ }
+
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+}
+
+static int tegra_adma_terminate_all(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+
+ if (tdc->desc)
+ tegra_adma_stop(tdc);
+
+ tegra_adma_request_free(tdc);
+ vchan_get_all_descriptors(&tdc->vc, &head);
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+ vchan_dma_desc_free_list(&tdc->vc, &head);
+
+ return 0;
+}
+
+static enum dma_status tegra_adma_tx_status(struct dma_chan *dc,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ struct tegra_adma_desc *desc;
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ unsigned int residual;
+
+ ret = dma_cookie_status(dc, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&tdc->vc.lock, flags);
+
+ vd = vchan_find_desc(&tdc->vc, cookie);
+ if (vd) {
+ desc = to_tegra_adma_desc(&vd->tx);
+ residual = desc->ch_regs.tc;
+ } else if (tdc->desc && tdc->desc->vd.tx.cookie == cookie) {
+ residual = tegra_adma_get_residue(tdc);
+ } else {
+ residual = 0;
+ }
+
+ spin_unlock_irqrestore(&tdc->vc.lock, flags);
+
+ dma_set_residue(txstate, residual);
+
+ return ret;
+}
+
+static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
+ struct tegra_adma_desc *desc,
+ dma_addr_t buf_addr,
+ enum dma_transfer_direction direction)
+{
+ struct tegra_adma_chan_regs *ch_regs = &desc->ch_regs;
+ unsigned int burst_size, adma_dir;
+
+ if (desc->num_periods > ADMA_CH_CONFIG_MAX_BUFS)
+ return -EINVAL;
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ adma_dir = ADMA_CH_CTRL_DIR_MEM2AHUB;
+ burst_size = fls(tdc->sconfig.dst_maxburst);
+ ch_regs->config = ADMA_CH_CONFIG_SRC_BUF(desc->num_periods - 1);
+ ch_regs->ctrl = ADMA_CH_CTRL_TX_REQ(tdc->sreq_index);
+ ch_regs->src_addr = buf_addr;
+ break;
+
+ case DMA_DEV_TO_MEM:
+ adma_dir = ADMA_CH_CTRL_DIR_AHUB2MEM;
+ burst_size = fls(tdc->sconfig.src_maxburst);
+ ch_regs->config = ADMA_CH_CONFIG_TRG_BUF(desc->num_periods - 1);
+ ch_regs->ctrl = ADMA_CH_CTRL_RX_REQ(tdc->sreq_index);
+ ch_regs->trg_addr = buf_addr;
+ break;
+
+ default:
+ dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
+ return -EINVAL;
+ }
+
+ if (!burst_size || burst_size > ADMA_CH_CONFIG_BURST_16)
+ burst_size = ADMA_CH_CONFIG_BURST_16;
+
+ ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
+ ADMA_CH_CTRL_MODE_CONTINUOUS |
+ ADMA_CH_CTRL_FLOWCTRL_EN;
+ ch_regs->config |= ADMA_CH_CONFIG_BURST_SIZE(burst_size);
+ ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
+ ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT;
+ ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
+
+ return tegra_adma_request_alloc(tdc, direction);
+}
+
+static struct dma_async_tx_descriptor *tegra_adma_prep_dma_cyclic(
+ struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ struct tegra_adma_desc *desc = NULL;
+
+ if (!buf_len || !period_len || period_len > ADMA_CH_TC_COUNT_MASK) {
+ dev_err(tdc2dev(tdc), "invalid buffer/period len\n");
+ return NULL;
+ }
+
+ if (buf_len % period_len) {
+ dev_err(tdc2dev(tdc), "buf_len not a multiple of period_len\n");
+ return NULL;
+ }
+
+ if (!IS_ALIGNED(buf_addr, 4)) {
+ dev_err(tdc2dev(tdc), "invalid buffer alignment\n");
+ return NULL;
+ }
+
+ desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+ if (!desc)
+ return NULL;
+
+ desc->buf_len = buf_len;
+ desc->period_len = period_len;
+ desc->num_periods = buf_len / period_len;
+
+ if (tegra_adma_set_xfer_params(tdc, desc, buf_addr, direction)) {
+ kfree(desc);
+ return NULL;
+ }
+
+ return vchan_tx_prep(&tdc->vc, &desc->vd, flags);
+}
+
+static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+ int ret;
+
+ ret = request_irq(tdc->irq, tegra_adma_isr, 0, dma_chan_name(dc), tdc);
+ if (ret) {
+ dev_err(tdc2dev(tdc), "failed to get interrupt for %s\n",
+ dma_chan_name(dc));
+ return ret;
+ }
+
+ ret = pm_runtime_get_sync(tdc2dev(tdc));
+ if (ret < 0) {
+ free_irq(tdc->irq, tdc);
+ return ret;
+ }
+
+ dma_cookie_init(&tdc->vc.chan);
+
+ return 0;
+}
+
+static void tegra_adma_free_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_adma_chan *tdc = to_tegra_adma_chan(dc);
+
+ tegra_adma_terminate_all(dc);
+ vchan_free_chan_resources(&tdc->vc);
+ tasklet_kill(&tdc->vc.task);
+ free_irq(tdc->irq, tdc);
+ pm_runtime_put(tdc2dev(tdc));
+
+ tdc->sreq_index = 0;
+ tdc->sreq_dir = DMA_TRANS_NONE;
+}
+
+static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct tegra_adma *tdma = ofdma->of_dma_data;
+ struct tegra_adma_chan *tdc;
+ struct dma_chan *chan;
+ unsigned int sreq_index;
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ sreq_index = dma_spec->args[0];
+
+ if (sreq_index == 0) {
+ dev_err(tdma->dev, "DMA request must not be 0\n");
+ return NULL;
+ }
+
+ chan = dma_get_any_slave_channel(&tdma->dma_dev);
+ if (!chan)
+ return NULL;
+
+ tdc = to_tegra_adma_chan(chan);
+ tdc->sreq_index = sreq_index;
+
+ return chan;
+}
+
+static int tegra_adma_runtime_suspend(struct device *dev)
+{
+ struct tegra_adma *tdma = dev_get_drvdata(dev);
+
+ tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
+
+ return pm_clk_suspend(dev);
+}
+
+static int tegra_adma_runtime_resume(struct device *dev)
+{
+ struct tegra_adma *tdma = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_clk_resume(dev);
+ if (ret)
+ return ret;
+
+ tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+
+ return 0;
+}
+
+static const struct tegra_adma_chip_data tegra210_chip_data = {
+ .nr_channels = 22,
+};
+
+static const struct of_device_id tegra_adma_of_match[] = {
+ { .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
+
+static int tegra_adma_probe(struct platform_device *pdev)
+{
+ const struct tegra_adma_chip_data *cdata;
+ struct tegra_adma *tdma;
+ struct resource *res;
+ struct clk *clk;
+ int ret, i;
+
+ cdata = of_device_get_match_data(&pdev->dev);
+ if (!cdata) {
+ dev_err(&pdev->dev, "device match data not found\n");
+ return -ENODEV;
+ }
+
+ tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
+ sizeof(struct tegra_adma_chan), GFP_KERNEL);
+ if (!tdma)
+ return -ENOMEM;
+
+ tdma->dev = &pdev->dev;
+ tdma->nr_channels = cdata->nr_channels;
+ platform_set_drvdata(pdev, tdma);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tdma->base_addr))
+ return PTR_ERR(tdma->base_addr);
+
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
+
+ clk = clk_get(&pdev->dev, "d_audio");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "ADMA clock not found\n");
+ ret = PTR_ERR(clk);
+ goto clk_destroy;
+ }
+
+ ret = pm_clk_add_clk(&pdev->dev, clk);
+ if (ret) {
+ clk_put(clk);
+ goto clk_destroy;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+ ret = tegra_adma_init(tdma);
+ if (ret)
+ goto rpm_put;
+
+ INIT_LIST_HEAD(&tdma->dma_dev.channels);
+ for (i = 0; i < tdma->nr_channels; i++) {
+ struct tegra_adma_chan *tdc = &tdma->channels[i];
+
+ tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
+
+ tdc->irq = of_irq_get(pdev->dev.of_node, i);
+ if (tdc->irq < 0) {
+ ret = tdc->irq;
+ goto irq_dispose;
+ }
+
+ vchan_init(&tdc->vc, &tdma->dma_dev);
+ tdc->vc.desc_free = tegra_adma_desc_free;
+ tdc->tdma = tdma;
+ }
+
+ dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
+
+ tdma->dma_dev.dev = &pdev->dev;
+ tdma->dma_dev.device_alloc_chan_resources =
+ tegra_adma_alloc_chan_resources;
+ tdma->dma_dev.device_free_chan_resources =
+ tegra_adma_free_chan_resources;
+ tdma->dma_dev.device_issue_pending = tegra_adma_issue_pending;
+ tdma->dma_dev.device_prep_dma_cyclic = tegra_adma_prep_dma_cyclic;
+ tdma->dma_dev.device_config = tegra_adma_slave_config;
+ tdma->dma_dev.device_tx_status = tegra_adma_tx_status;
+ tdma->dma_dev.device_terminate_all = tegra_adma_terminate_all;
+ tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+ tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+ ret = dma_async_device_register(&tdma->dma_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
+ goto irq_dispose;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ tegra_dma_of_xlate, tdma);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "ADMA OF registration failed %d\n", ret);
+ goto dma_remove;
+ }
+
+ pm_runtime_put(&pdev->dev);
+
+ dev_info(&pdev->dev, "Tegra210 ADMA driver registered %d channels\n",
+ tdma->nr_channels);
+
+ return 0;
+
+dma_remove:
+ dma_async_device_unregister(&tdma->dma_dev);
+irq_dispose:
+ while (--i >= 0)
+ irq_dispose_mapping(tdma->channels[i].irq);
+rpm_put:
+ pm_runtime_put_sync(&pdev->dev);
+rpm_disable:
+ pm_runtime_disable(&pdev->dev);
+clk_destroy:
+ pm_clk_destroy(&pdev->dev);
+
+ return ret;
+}
+
+static int tegra_adma_remove(struct platform_device *pdev)
+{
+ struct tegra_adma *tdma = platform_get_drvdata(pdev);
+ int i;
+
+ dma_async_device_unregister(&tdma->dma_dev);
+
+ for (i = 0; i < tdma->nr_channels; ++i)
+ irq_dispose_mapping(tdma->channels[i].irq);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_clk_destroy(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_adma_pm_suspend(struct device *dev)
+{
+ return pm_runtime_suspended(dev) == false;
+}
+#endif
+
+static const struct dev_pm_ops tegra_adma_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_adma_runtime_suspend,
+ tegra_adma_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_adma_pm_suspend, NULL)
+};
+
+static struct platform_driver tegra_admac_driver = {
+ .driver = {
+ .name = "tegra-adma",
+ .pm = &tegra_adma_dev_pm_ops,
+ .of_match_table = tegra_adma_of_match,
+ },
+ .probe = tegra_adma_probe,
+ .remove = tegra_adma_remove,
+};
+
+module_platform_driver(tegra_admac_driver);
+
+MODULE_ALIAS("platform:tegra210-adma");
+MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
+MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
+MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index ef67f278e..df9118540 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -16,6 +16,15 @@
* video device (S2MM). Initialization, status, interrupt and management
* registers are accessed through an AXI4-Lite slave interface.
*
+ * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
+ * provides high-bandwidth one dimensional direct memory access between memory
+ * and AXI4-Stream target peripherals. It supports one receive and one
+ * transmit channel, both of them optional at synthesis time.
+ *
+ * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
+ * Access (DMA) between a memory-mapped source address and a memory-mapped
+ * destination address.
+ *
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
@@ -35,116 +44,138 @@
#include <linux/of_platform.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include "../dmaengine.h"
/* Register/Descriptor Offsets */
-#define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000
-#define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030
+#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
+#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
/* Control Registers */
-#define XILINX_VDMA_REG_DMACR 0x0000
-#define XILINX_VDMA_DMACR_DELAY_MAX 0xff
-#define XILINX_VDMA_DMACR_DELAY_SHIFT 24
-#define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff
-#define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16
-#define XILINX_VDMA_DMACR_ERR_IRQ BIT(14)
-#define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13)
-#define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12)
-#define XILINX_VDMA_DMACR_MASTER_SHIFT 8
-#define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5
-#define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4)
-#define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3)
-#define XILINX_VDMA_DMACR_RESET BIT(2)
-#define XILINX_VDMA_DMACR_CIRC_EN BIT(1)
-#define XILINX_VDMA_DMACR_RUNSTOP BIT(0)
-#define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
-
-#define XILINX_VDMA_REG_DMASR 0x0004
-#define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15)
-#define XILINX_VDMA_DMASR_ERR_IRQ BIT(14)
-#define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13)
-#define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12)
-#define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11)
-#define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10)
-#define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9)
-#define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8)
-#define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7)
-#define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6)
-#define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5)
-#define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4)
-#define XILINX_VDMA_DMASR_IDLE BIT(1)
-#define XILINX_VDMA_DMASR_HALTED BIT(0)
-#define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24)
-#define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
-
-#define XILINX_VDMA_REG_CURDESC 0x0008
-#define XILINX_VDMA_REG_TAILDESC 0x0010
-#define XILINX_VDMA_REG_REG_INDEX 0x0014
-#define XILINX_VDMA_REG_FRMSTORE 0x0018
-#define XILINX_VDMA_REG_THRESHOLD 0x001c
-#define XILINX_VDMA_REG_FRMPTR_STS 0x0024
-#define XILINX_VDMA_REG_PARK_PTR 0x0028
-#define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8
-#define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0
-#define XILINX_VDMA_REG_VDMA_VERSION 0x002c
+#define XILINX_DMA_REG_DMACR 0x0000
+#define XILINX_DMA_DMACR_DELAY_MAX 0xff
+#define XILINX_DMA_DMACR_DELAY_SHIFT 24
+#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
+#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
+#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
+#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
+#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
+#define XILINX_DMA_DMACR_MASTER_SHIFT 8
+#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
+#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
+#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
+#define XILINX_DMA_DMACR_RESET BIT(2)
+#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
+#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
+#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
+
+#define XILINX_DMA_REG_DMASR 0x0004
+#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
+#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
+#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
+#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
+#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
+#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
+#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
+#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
+#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
+#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
+#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
+#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
+#define XILINX_DMA_DMASR_IDLE BIT(1)
+#define XILINX_DMA_DMASR_HALTED BIT(0)
+#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
+#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
+
+#define XILINX_DMA_REG_CURDESC 0x0008
+#define XILINX_DMA_REG_TAILDESC 0x0010
+#define XILINX_DMA_REG_REG_INDEX 0x0014
+#define XILINX_DMA_REG_FRMSTORE 0x0018
+#define XILINX_DMA_REG_THRESHOLD 0x001c
+#define XILINX_DMA_REG_FRMPTR_STS 0x0024
+#define XILINX_DMA_REG_PARK_PTR 0x0028
+#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
+#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
+#define XILINX_DMA_REG_VDMA_VERSION 0x002c
/* Register Direct Mode Registers */
-#define XILINX_VDMA_REG_VSIZE 0x0000
-#define XILINX_VDMA_REG_HSIZE 0x0004
+#define XILINX_DMA_REG_VSIZE 0x0000
+#define XILINX_DMA_REG_HSIZE 0x0004
-#define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008
-#define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
-#define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
+#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
+#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
+#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
+#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
/* HW specific definitions */
-#define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2
-
-#define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \
- (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \
- XILINX_VDMA_DMASR_DLY_CNT_IRQ | \
- XILINX_VDMA_DMASR_ERR_IRQ)
-
-#define XILINX_VDMA_DMASR_ALL_ERR_MASK \
- (XILINX_VDMA_DMASR_EOL_LATE_ERR | \
- XILINX_VDMA_DMASR_SOF_LATE_ERR | \
- XILINX_VDMA_DMASR_SG_DEC_ERR | \
- XILINX_VDMA_DMASR_SG_SLV_ERR | \
- XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
- XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
- XILINX_VDMA_DMASR_DMA_DEC_ERR | \
- XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \
- XILINX_VDMA_DMASR_DMA_INT_ERR)
+#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
+
+#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
+ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
+ XILINX_DMA_DMASR_DLY_CNT_IRQ | \
+ XILINX_DMA_DMASR_ERR_IRQ)
+
+#define XILINX_DMA_DMASR_ALL_ERR_MASK \
+ (XILINX_DMA_DMASR_EOL_LATE_ERR | \
+ XILINX_DMA_DMASR_SOF_LATE_ERR | \
+ XILINX_DMA_DMASR_SG_DEC_ERR | \
+ XILINX_DMA_DMASR_SG_SLV_ERR | \
+ XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_DMA_DEC_ERR | \
+ XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
+ XILINX_DMA_DMASR_DMA_INT_ERR)
/*
* Recoverable errors are DMA Internal error, SOF Early, EOF Early
* and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
* is enabled in the h/w system.
*/
-#define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \
- (XILINX_VDMA_DMASR_SOF_LATE_ERR | \
- XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
- XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
- XILINX_VDMA_DMASR_DMA_INT_ERR)
+#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
+ (XILINX_DMA_DMASR_SOF_LATE_ERR | \
+ XILINX_DMA_DMASR_EOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_SOF_EARLY_ERR | \
+ XILINX_DMA_DMASR_DMA_INT_ERR)
/* Axi VDMA Flush on Fsync bits */
-#define XILINX_VDMA_FLUSH_S2MM 3
-#define XILINX_VDMA_FLUSH_MM2S 2
-#define XILINX_VDMA_FLUSH_BOTH 1
+#define XILINX_DMA_FLUSH_S2MM 3
+#define XILINX_DMA_FLUSH_MM2S 2
+#define XILINX_DMA_FLUSH_BOTH 1
/* Delay loop counter to prevent hardware failure */
-#define XILINX_VDMA_LOOP_COUNT 1000000
+#define XILINX_DMA_LOOP_COUNT 1000000
+
+/* AXI DMA Specific Registers/Offsets */
+#define XILINX_DMA_REG_SRCDSTADDR 0x18
+#define XILINX_DMA_REG_BTT 0x28
+
+/* AXI DMA Specific Masks/Bit fields */
+#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
+#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
+#define XILINX_DMA_CR_COALESCE_SHIFT 16
+#define XILINX_DMA_BD_SOP BIT(27)
+#define XILINX_DMA_BD_EOP BIT(26)
+#define XILINX_DMA_COALESCE_MAX 255
+#define XILINX_DMA_NUM_APP_WORDS 5
+
+/* AXI CDMA Specific Registers/Offsets */
+#define XILINX_CDMA_REG_SRCADDR 0x18
+#define XILINX_CDMA_REG_DSTADDR 0x20
+
+/* AXI CDMA Specific Masks */
+#define XILINX_CDMA_CR_SGMODE BIT(3)
/**
* struct xilinx_vdma_desc_hw - Hardware Descriptor
* @next_desc: Next Descriptor Pointer @0x00
* @pad1: Reserved @0x04
* @buf_addr: Buffer address @0x08
- * @pad2: Reserved @0x0C
+ * @buf_addr_msb: MSB of Buffer address @0x0C
* @vsize: Vertical Size @0x10
* @hsize: Horizontal Size @0x14
* @stride: Number of bytes between the first
@@ -154,13 +185,59 @@ struct xilinx_vdma_desc_hw {
u32 next_desc;
u32 pad1;
u32 buf_addr;
- u32 pad2;
+ u32 buf_addr_msb;
u32 vsize;
u32 hsize;
u32 stride;
} __aligned(64);
/**
+ * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @buf_addr: Buffer address @0x08
+ * @pad2: Reserved @0x0C
+ * @pad3: Reserved @0x10
+ * @pad4: Reserved @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_axidma_desc_hw {
+ u32 next_desc;
+ u32 pad1;
+ u32 buf_addr;
+ u32 pad2;
+ u32 pad3;
+ u32 pad4;
+ u32 control;
+ u32 status;
+ u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_desc_hw - Hardware Descriptor
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @pad1: Reserved @0x04
+ * @src_addr: Source address @0x08
+ * @pad2: Reserved @0x0C
+ * @dest_addr: Destination address @0x10
+ * @pad3: Reserved @0x14
+ * @control: Control field @0x18
+ * @status: Status field @0x1C
+ */
+struct xilinx_cdma_desc_hw {
+ u32 next_desc;
+ u32 pad1;
+ u32 src_addr;
+ u32 pad2;
+ u32 dest_addr;
+ u32 pad3;
+ u32 control;
+ u32 status;
+} __aligned(64);
+
+/**
* struct xilinx_vdma_tx_segment - Descriptor segment
* @hw: Hardware descriptor
* @node: Node in the descriptor segments list
@@ -173,19 +250,43 @@ struct xilinx_vdma_tx_segment {
} __aligned(64);
/**
- * struct xilinx_vdma_tx_descriptor - Per Transaction structure
+ * struct xilinx_axidma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_axidma_tx_segment {
+ struct xilinx_axidma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_cdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_cdma_tx_segment {
+ struct xilinx_cdma_desc_hw hw;
+ struct list_head node;
+ dma_addr_t phys;
+} __aligned(64);
+
+/**
+ * struct xilinx_dma_tx_descriptor - Per Transaction structure
* @async_tx: Async transaction descriptor
* @segments: TX segments list
* @node: Node in the channel descriptors list
*/
-struct xilinx_vdma_tx_descriptor {
+struct xilinx_dma_tx_descriptor {
struct dma_async_tx_descriptor async_tx;
struct list_head segments;
struct list_head node;
};
/**
- * struct xilinx_vdma_chan - Driver specific VDMA channel structure
+ * struct xilinx_dma_chan - Driver specific DMA channel structure
* @xdev: Driver specific device structure
* @ctrl_offset: Control registers offset
* @desc_offset: TX descriptor registers offset
@@ -207,9 +308,14 @@ struct xilinx_vdma_tx_descriptor {
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
* @desc_pendingcount: Descriptor pending count
+ * @ext_addr: Indicates 64 bit addressing is supported by dma channel
+ * @desc_submitcount: Descriptor h/w submitted count
+ * @residue: Residue for AXI DMA
+ * @seg_v: Statically allocated segments base
+ * @start_transfer: Differentiate b/w DMA IP's transfer
*/
-struct xilinx_vdma_chan {
- struct xilinx_vdma_device *xdev;
+struct xilinx_dma_chan {
+ struct xilinx_dma_device *xdev;
u32 ctrl_offset;
u32 desc_offset;
spinlock_t lock;
@@ -230,73 +336,122 @@ struct xilinx_vdma_chan {
struct xilinx_vdma_config config;
bool flush_on_fsync;
u32 desc_pendingcount;
+ bool ext_addr;
+ u32 desc_submitcount;
+ u32 residue;
+ struct xilinx_axidma_tx_segment *seg_v;
+ void (*start_transfer)(struct xilinx_dma_chan *chan);
+};
+
+struct xilinx_dma_config {
+ enum xdma_ip_type dmatype;
+ int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **txs_clk,
+ struct clk **rx_clk, struct clk **rxs_clk);
};
/**
- * struct xilinx_vdma_device - VDMA device structure
+ * struct xilinx_dma_device - DMA device structure
* @regs: I/O mapped base address
* @dev: Device Structure
* @common: DMA device structure
- * @chan: Driver specific VDMA channel
+ * @chan: Driver specific DMA channel
* @has_sg: Specifies whether Scatter-Gather is present or not
* @flush_on_fsync: Flush on frame sync
+ * @ext_addr: Indicates 64 bit addressing is supported by dma device
+ * @pdev: Platform device structure pointer
+ * @dma_config: DMA config structure
+ * @axi_clk: DMA Axi4-lite interace clock
+ * @tx_clk: DMA mm2s clock
+ * @txs_clk: DMA mm2s stream clock
+ * @rx_clk: DMA s2mm clock
+ * @rxs_clk: DMA s2mm stream clock
*/
-struct xilinx_vdma_device {
+struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
- struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE];
+ struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
bool has_sg;
u32 flush_on_fsync;
+ bool ext_addr;
+ struct platform_device *pdev;
+ const struct xilinx_dma_config *dma_config;
+ struct clk *axi_clk;
+ struct clk *tx_clk;
+ struct clk *txs_clk;
+ struct clk *rx_clk;
+ struct clk *rxs_clk;
};
/* Macros */
#define to_xilinx_chan(chan) \
- container_of(chan, struct xilinx_vdma_chan, common)
-#define to_vdma_tx_descriptor(tx) \
- container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
-#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
+ container_of(chan, struct xilinx_dma_chan, common)
+#define to_dma_tx_descriptor(tx) \
+ container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
+#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
cond, delay_us, timeout_us)
/* IO accessors */
-static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
+static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
{
return ioread32(chan->xdev->regs + reg);
}
-static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value)
+static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
{
iowrite32(value, chan->xdev->regs + reg);
}
-static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg,
+static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
u32 value)
{
- vdma_write(chan, chan->desc_offset + reg, value);
+ dma_write(chan, chan->desc_offset + reg, value);
}
-static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg)
+static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
{
- return vdma_read(chan, chan->ctrl_offset + reg);
+ return dma_read(chan, chan->ctrl_offset + reg);
}
-static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg,
+static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
u32 value)
{
- vdma_write(chan, chan->ctrl_offset + reg, value);
+ dma_write(chan, chan->ctrl_offset + reg, value);
}
-static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg,
+static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
u32 clr)
{
- vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr);
+ dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
}
-static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg,
+static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
u32 set)
{
- vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set);
+ dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
+}
+
+/**
+ * vdma_desc_write_64 - 64-bit descriptor write
+ * @chan: Driver specific VDMA channel
+ * @reg: Register to write
+ * @value_lsb: lower address of the descriptor.
+ * @value_msb: upper address of the descriptor.
+ *
+ * Since vdma driver is trying to write to a register offset which is not a
+ * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
+ * instead of a single 64 bit register write.
+ */
+static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
+ u32 value_lsb, u32 value_msb)
+{
+ /* Write the lsb 32 bits*/
+ writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
+
+ /* Write the msb 32 bits */
+ writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
}
/* -----------------------------------------------------------------------------
@@ -305,16 +460,59 @@ static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg,
/**
* xilinx_vdma_alloc_tx_segment - Allocate transaction segment
- * @chan: Driver specific VDMA channel
+ * @chan: Driver specific DMA channel
*
* Return: The allocated segment on success and NULL on failure.
*/
static struct xilinx_vdma_tx_segment *
-xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan)
+xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
{
struct xilinx_vdma_tx_segment *segment;
dma_addr_t phys;
+ segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_cdma_tx_segment *
+xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_cdma_tx_segment *segment;
+ dma_addr_t phys;
+
+ segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
+ if (!segment)
+ return NULL;
+
+ memset(segment, 0, sizeof(*segment));
+ segment->phys = phys;
+
+ return segment;
+}
+
+/**
+ * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_axidma_tx_segment *
+xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_axidma_tx_segment *segment;
+ dma_addr_t phys;
+
segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
if (!segment)
return NULL;
@@ -326,26 +524,48 @@ xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan)
}
/**
+ * xilinx_dma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_axidma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
+ * xilinx_cdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
+ struct xilinx_cdma_tx_segment *segment)
+{
+ dma_pool_free(chan->desc_pool, segment, segment->phys);
+}
+
+/**
* xilinx_vdma_free_tx_segment - Free transaction segment
- * @chan: Driver specific VDMA channel
- * @segment: VDMA transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
*/
-static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan,
+static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
struct xilinx_vdma_tx_segment *segment)
{
dma_pool_free(chan->desc_pool, segment, segment->phys);
}
/**
- * xilinx_vdma_tx_descriptor - Allocate transaction descriptor
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_tx_descriptor - Allocate transaction descriptor
+ * @chan: Driver specific DMA channel
*
* Return: The allocated descriptor on success and NULL on failure.
*/
-static struct xilinx_vdma_tx_descriptor *
-xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
+static struct xilinx_dma_tx_descriptor *
+xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
{
- struct xilinx_vdma_tx_descriptor *desc;
+ struct xilinx_dma_tx_descriptor *desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
@@ -357,22 +577,38 @@ xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
}
/**
- * xilinx_vdma_free_tx_descriptor - Free transaction descriptor
- * @chan: Driver specific VDMA channel
- * @desc: VDMA transaction descriptor
+ * xilinx_dma_free_tx_descriptor - Free transaction descriptor
+ * @chan: Driver specific DMA channel
+ * @desc: DMA transaction descriptor
*/
static void
-xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan,
- struct xilinx_vdma_tx_descriptor *desc)
+xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
{
struct xilinx_vdma_tx_segment *segment, *next;
+ struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
+ struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
if (!desc)
return;
- list_for_each_entry_safe(segment, next, &desc->segments, node) {
- list_del(&segment->node);
- xilinx_vdma_free_tx_segment(chan, segment);
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ list_for_each_entry_safe(segment, next, &desc->segments, node) {
+ list_del(&segment->node);
+ xilinx_vdma_free_tx_segment(chan, segment);
+ }
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ list_for_each_entry_safe(cdma_segment, cdma_next,
+ &desc->segments, node) {
+ list_del(&cdma_segment->node);
+ xilinx_cdma_free_tx_segment(chan, cdma_segment);
+ }
+ } else {
+ list_for_each_entry_safe(axidma_segment, axidma_next,
+ &desc->segments, node) {
+ list_del(&axidma_segment->node);
+ xilinx_dma_free_tx_segment(chan, axidma_segment);
+ }
}
kfree(desc);
@@ -381,60 +617,62 @@ xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan,
/* Required functions */
/**
- * xilinx_vdma_free_desc_list - Free descriptors list
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_free_desc_list - Free descriptors list
+ * @chan: Driver specific DMA channel
* @list: List to parse and delete the descriptor
*/
-static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan,
+static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
struct list_head *list)
{
- struct xilinx_vdma_tx_descriptor *desc, *next;
+ struct xilinx_dma_tx_descriptor *desc, *next;
list_for_each_entry_safe(desc, next, list, node) {
list_del(&desc->node);
- xilinx_vdma_free_tx_descriptor(chan, desc);
+ xilinx_dma_free_tx_descriptor(chan, desc);
}
}
/**
- * xilinx_vdma_free_descriptors - Free channel descriptors
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_free_descriptors - Free channel descriptors
+ * @chan: Driver specific DMA channel
*/
-static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
{
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
- xilinx_vdma_free_desc_list(chan, &chan->pending_list);
- xilinx_vdma_free_desc_list(chan, &chan->done_list);
- xilinx_vdma_free_desc_list(chan, &chan->active_list);
+ xilinx_dma_free_desc_list(chan, &chan->pending_list);
+ xilinx_dma_free_desc_list(chan, &chan->done_list);
+ xilinx_dma_free_desc_list(chan, &chan->active_list);
spin_unlock_irqrestore(&chan->lock, flags);
}
/**
- * xilinx_vdma_free_chan_resources - Free channel resources
+ * xilinx_dma_free_chan_resources - Free channel resources
* @dchan: DMA channel
*/
-static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan)
+static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
{
- struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
dev_dbg(chan->dev, "Free all channel resources.\n");
- xilinx_vdma_free_descriptors(chan);
+ xilinx_dma_free_descriptors(chan);
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ xilinx_dma_free_tx_segment(chan, chan->seg_v);
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
}
/**
- * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
+ * @chan: Driver specific DMA channel
*/
-static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
{
- struct xilinx_vdma_tx_descriptor *desc, *next;
+ struct xilinx_dma_tx_descriptor *desc, *next;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
@@ -457,32 +695,32 @@ static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan)
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
- xilinx_vdma_free_tx_descriptor(chan, desc);
+ xilinx_dma_free_tx_descriptor(chan, desc);
}
spin_unlock_irqrestore(&chan->lock, flags);
}
/**
- * xilinx_vdma_do_tasklet - Schedule completion tasklet
- * @data: Pointer to the Xilinx VDMA channel structure
+ * xilinx_dma_do_tasklet - Schedule completion tasklet
+ * @data: Pointer to the Xilinx DMA channel structure
*/
-static void xilinx_vdma_do_tasklet(unsigned long data)
+static void xilinx_dma_do_tasklet(unsigned long data)
{
- struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data;
+ struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
- xilinx_vdma_chan_desc_cleanup(chan);
+ xilinx_dma_chan_desc_cleanup(chan);
}
/**
- * xilinx_vdma_alloc_chan_resources - Allocate channel resources
+ * xilinx_dma_alloc_chan_resources - Allocate channel resources
* @dchan: DMA channel
*
* Return: '0' on success and failure value on error
*/
-static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
+static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
{
- struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
/* Has this channel already been allocated? */
if (chan->desc_pool)
@@ -492,10 +730,26 @@ static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
* We need the descriptor to be aligned to 64bytes
* for meeting Xilinx VDMA specification requirement.
*/
- chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
- chan->dev,
- sizeof(struct xilinx_vdma_tx_segment),
- __alignof__(struct xilinx_vdma_tx_segment), 0);
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_axidma_tx_segment),
+ __alignof__(struct xilinx_axidma_tx_segment),
+ 0);
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_cdma_tx_segment),
+ __alignof__(struct xilinx_cdma_tx_segment),
+ 0);
+ } else {
+ chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
+ chan->dev,
+ sizeof(struct xilinx_vdma_tx_segment),
+ __alignof__(struct xilinx_vdma_tx_segment),
+ 0);
+ }
+
if (!chan->desc_pool) {
dev_err(chan->dev,
"unable to allocate channel %d descriptor pool\n",
@@ -503,110 +757,160 @@ static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM;
}
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ /*
+ * For AXI DMA case after submitting a pending_list, keep
+ * an extra segment allocated so that the "next descriptor"
+ * pointer on the tail descriptor always points to a
+ * valid descriptor, even when paused after reaching taildesc.
+ * This way, it is possible to issue additional
+ * transfers without halting and restarting the channel.
+ */
+ chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
+
dma_cookie_init(dchan);
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ /* For AXI DMA resetting once channel will reset the
+ * other channel as well so enable the interrupts here.
+ */
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
+ }
+
+ if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_CDMA_CR_SGMODE);
+
return 0;
}
/**
- * xilinx_vdma_tx_status - Get VDMA transaction status
+ * xilinx_dma_tx_status - Get DMA transaction status
* @dchan: DMA channel
* @cookie: Transaction identifier
* @txstate: Transaction state
*
* Return: DMA transaction status
*/
-static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan,
+static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- return dma_cookie_status(dchan, cookie, txstate);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment;
+ struct xilinx_axidma_desc_hw *hw;
+ enum dma_status ret;
+ unsigned long flags;
+ u32 residue = 0;
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = list_last_entry(&chan->active_list,
+ struct xilinx_dma_tx_descriptor, node);
+ if (chan->has_sg) {
+ list_for_each_entry(segment, &desc->segments, node) {
+ hw = &segment->hw;
+ residue += (hw->control - hw->status) &
+ XILINX_DMA_MAX_TRANS_LEN;
+ }
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ chan->residue = residue;
+ dma_set_residue(txstate, chan->residue);
+ }
+
+ return ret;
}
/**
- * xilinx_vdma_is_running - Check if VDMA channel is running
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_is_running - Check if DMA channel is running
+ * @chan: Driver specific DMA channel
*
* Return: '1' if running, '0' if not.
*/
-static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan)
+static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
{
- return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_HALTED) &&
- (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
- XILINX_VDMA_DMACR_RUNSTOP);
+ return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+ XILINX_DMA_DMASR_HALTED) &&
+ (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
+ XILINX_DMA_DMACR_RUNSTOP);
}
/**
- * xilinx_vdma_is_idle - Check if VDMA channel is idle
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_is_idle - Check if DMA channel is idle
+ * @chan: Driver specific DMA channel
*
* Return: '1' if idle, '0' if not.
*/
-static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
+static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
{
- return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
- XILINX_VDMA_DMASR_IDLE;
+ return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
+ XILINX_DMA_DMASR_IDLE;
}
/**
- * xilinx_vdma_halt - Halt VDMA channel
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_halt - Halt DMA channel
+ * @chan: Driver specific DMA channel
*/
-static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
{
int err;
u32 val;
- vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
/* Wait for the hardware to halt */
- err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
- (val & XILINX_VDMA_DMASR_HALTED), 0,
- XILINX_VDMA_LOOP_COUNT);
+ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ (val & XILINX_DMA_DMASR_HALTED), 0,
+ XILINX_DMA_LOOP_COUNT);
if (err) {
dev_err(chan->dev, "Cannot stop channel %p: %x\n",
- chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+ chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
chan->err = true;
}
-
- return;
}
/**
- * xilinx_vdma_start - Start VDMA channel
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_start - Start DMA channel
+ * @chan: Driver specific DMA channel
*/
-static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_start(struct xilinx_dma_chan *chan)
{
int err;
u32 val;
- vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
/* Wait for the hardware to start */
- err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val,
- !(val & XILINX_VDMA_DMASR_HALTED), 0,
- XILINX_VDMA_LOOP_COUNT);
+ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
+ !(val & XILINX_DMA_DMASR_HALTED), 0,
+ XILINX_DMA_LOOP_COUNT);
if (err) {
dev_err(chan->dev, "Cannot start channel %p: %x\n",
- chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+ chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
chan->err = true;
}
-
- return;
}
/**
* xilinx_vdma_start_transfer - Starts VDMA transfer
* @chan: Driver specific channel struct pointer
*/
-static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
+static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
{
struct xilinx_vdma_config *config = &chan->config;
- struct xilinx_vdma_tx_descriptor *desc, *tail_desc;
+ struct xilinx_dma_tx_descriptor *desc, *tail_desc;
u32 reg;
struct xilinx_vdma_tx_segment *tail_segment;
@@ -618,16 +922,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
return;
desc = list_first_entry(&chan->pending_list,
- struct xilinx_vdma_tx_descriptor, node);
+ struct xilinx_dma_tx_descriptor, node);
tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_vdma_tx_descriptor, node);
+ struct xilinx_dma_tx_descriptor, node);
tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node);
/* If it is SG mode and hardware is busy, cannot submit */
- if (chan->has_sg && xilinx_vdma_is_running(chan) &&
- !xilinx_vdma_is_idle(chan)) {
+ if (chan->has_sg && xilinx_dma_is_running(chan) &&
+ !xilinx_dma_is_idle(chan)) {
dev_dbg(chan->dev, "DMA controller still busy\n");
return;
}
@@ -637,19 +941,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
* done, start new transfers
*/
if (chan->has_sg)
- vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC,
+ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
desc->async_tx.phys);
/* Configure the hardware using info in the config structure */
- reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
if (config->frm_cnt_en)
- reg |= XILINX_VDMA_DMACR_FRAMECNT_EN;
+ reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
else
- reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
+ reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
/* Configure channel to allow number frame buffers */
- vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE,
+ dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
chan->desc_pendingcount);
/*
@@ -657,45 +961,53 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
* In direct register mode, if not parking, enable circular mode
*/
if (chan->has_sg || !config->park)
- reg |= XILINX_VDMA_DMACR_CIRC_EN;
+ reg |= XILINX_DMA_DMACR_CIRC_EN;
if (config->park)
- reg &= ~XILINX_VDMA_DMACR_CIRC_EN;
+ reg &= ~XILINX_DMA_DMACR_CIRC_EN;
- vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg);
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
if (config->park && (config->park_frm >= 0) &&
(config->park_frm < chan->num_frms)) {
if (chan->direction == DMA_MEM_TO_DEV)
- vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR,
config->park_frm <<
- XILINX_VDMA_PARK_PTR_RD_REF_SHIFT);
+ XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
else
- vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
+ dma_write(chan, XILINX_DMA_REG_PARK_PTR,
config->park_frm <<
- XILINX_VDMA_PARK_PTR_WR_REF_SHIFT);
+ XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
}
/* Start the hardware */
- xilinx_vdma_start(chan);
+ xilinx_dma_start(chan);
if (chan->err)
return;
/* Start the transfer */
if (chan->has_sg) {
- vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC,
+ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
tail_segment->phys);
} else {
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
- list_for_each_entry(desc, &chan->pending_list, node) {
- segment = list_first_entry(&desc->segments,
- struct xilinx_vdma_tx_segment, node);
- vdma_desc_write(chan,
+ if (chan->desc_submitcount < chan->num_frms)
+ i = chan->desc_submitcount;
+
+ list_for_each_entry(segment, &desc->segments, node) {
+ if (chan->ext_addr)
+ vdma_desc_write_64(chan,
+ XILINX_VDMA_REG_START_ADDRESS_64(i++),
+ segment->hw.buf_addr,
+ segment->hw.buf_addr_msb);
+ else
+ vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr);
+
last = segment;
}
@@ -703,10 +1015,164 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
return;
/* HW expects these parameters to be same for one transaction */
- vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
- vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE,
+ vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
+ vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
last->hw.stride);
- vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
+ vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
+ }
+
+ if (!chan->has_sg) {
+ list_del(&desc->node);
+ list_add_tail(&desc->node, &chan->active_list);
+ chan->desc_submitcount++;
+ chan->desc_pendingcount--;
+ if (chan->desc_submitcount == chan->num_frms)
+ chan->desc_submitcount = 0;
+ } else {
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+ }
+}
+
+/**
+ * xilinx_cdma_start_transfer - Starts cdma transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_cdma_tx_segment *tail_segment;
+ u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
+
+ if (chan->err)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+
+ if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+ ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+ ctrl_reg |= chan->desc_pendingcount <<
+ XILINX_DMA_CR_COALESCE_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
+ }
+
+ if (chan->has_sg) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+
+ /* Update tail ptr register which will start the transfer */
+ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ /* In simple mode */
+ struct xilinx_cdma_tx_segment *segment;
+ struct xilinx_cdma_desc_hw *hw;
+
+ segment = list_first_entry(&head_desc->segments,
+ struct xilinx_cdma_tx_segment,
+ node);
+
+ hw = &segment->hw;
+
+ dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
+ dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
+
+ /* Start the transfer */
+ dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+ hw->control & XILINX_DMA_MAX_TRANS_LEN);
+ }
+
+ list_splice_tail_init(&chan->pending_list, &chan->active_list);
+ chan->desc_pendingcount = 0;
+}
+
+/**
+ * xilinx_dma_start_transfer - Starts DMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
+{
+ struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+ struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
+ u32 reg;
+
+ if (chan->err)
+ return;
+
+ if (list_empty(&chan->pending_list))
+ return;
+
+ /* If it is SG mode and hardware is busy, cannot submit */
+ if (chan->has_sg && xilinx_dma_is_running(chan) &&
+ !xilinx_dma_is_idle(chan)) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ return;
+ }
+
+ head_desc = list_first_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_desc = list_last_entry(&chan->pending_list,
+ struct xilinx_dma_tx_descriptor, node);
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+
+ old_head = list_first_entry(&head_desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ new_head = chan->seg_v;
+ /* Copy Buffer Descriptor fields. */
+ new_head->hw = old_head->hw;
+
+ /* Swap and save new reserve */
+ list_replace_init(&old_head->node, &new_head->node);
+ chan->seg_v = old_head;
+
+ tail_segment->hw.next_desc = chan->seg_v->phys;
+ head_desc->async_tx.phys = new_head->phys;
+
+ reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
+
+ if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
+ reg &= ~XILINX_DMA_CR_COALESCE_MAX;
+ reg |= chan->desc_pendingcount <<
+ XILINX_DMA_CR_COALESCE_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
+ }
+
+ if (chan->has_sg)
+ dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
+ head_desc->async_tx.phys);
+
+ xilinx_dma_start(chan);
+
+ if (chan->err)
+ return;
+
+ /* Start the transfer */
+ if (chan->has_sg) {
+ dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
+ tail_segment->phys);
+ } else {
+ struct xilinx_axidma_tx_segment *segment;
+ struct xilinx_axidma_desc_hw *hw;
+
+ segment = list_first_entry(&head_desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ hw = &segment->hw;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+
+ /* Start the transfer */
+ dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
+ hw->control & XILINX_DMA_MAX_TRANS_LEN);
}
list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -714,28 +1180,28 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
}
/**
- * xilinx_vdma_issue_pending - Issue pending transactions
+ * xilinx_dma_issue_pending - Issue pending transactions
* @dchan: DMA channel
*/
-static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
+static void xilinx_dma_issue_pending(struct dma_chan *dchan)
{
- struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags);
- xilinx_vdma_start_transfer(chan);
+ chan->start_transfer(chan);
spin_unlock_irqrestore(&chan->lock, flags);
}
/**
- * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete
+ * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
* @chan : xilinx DMA channel
*
* CONTEXT: hardirq
*/
-static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
{
- struct xilinx_vdma_tx_descriptor *desc, *next;
+ struct xilinx_dma_tx_descriptor *desc, *next;
/* This function was invoked with lock held */
if (list_empty(&chan->active_list))
@@ -749,27 +1215,27 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
}
/**
- * xilinx_vdma_reset - Reset VDMA channel
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_reset - Reset DMA channel
+ * @chan: Driver specific DMA channel
*
* Return: '0' on success and failure value on error
*/
-static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
+static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
{
int err;
u32 tmp;
- vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
/* Wait for the hardware to finish reset */
- err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp,
- !(tmp & XILINX_VDMA_DMACR_RESET), 0,
- XILINX_VDMA_LOOP_COUNT);
+ err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
+ !(tmp & XILINX_DMA_DMACR_RESET), 0,
+ XILINX_DMA_LOOP_COUNT);
if (err) {
dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
- vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
- vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
+ dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
+ dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
return -ETIMEDOUT;
}
@@ -779,48 +1245,48 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
}
/**
- * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
+ * @chan: Driver specific DMA channel
*
* Return: '0' on success and failure value on error
*/
-static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan)
+static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
{
int err;
/* Reset VDMA */
- err = xilinx_vdma_reset(chan);
+ err = xilinx_dma_reset(chan);
if (err)
return err;
/* Enable interrupts */
- vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
- XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
return 0;
}
/**
- * xilinx_vdma_irq_handler - VDMA Interrupt handler
+ * xilinx_dma_irq_handler - DMA Interrupt handler
* @irq: IRQ number
- * @data: Pointer to the Xilinx VDMA channel structure
+ * @data: Pointer to the Xilinx DMA channel structure
*
* Return: IRQ_HANDLED/IRQ_NONE
*/
-static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
+static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
{
- struct xilinx_vdma_chan *chan = data;
+ struct xilinx_dma_chan *chan = data;
u32 status;
/* Read the status and ack the interrupts. */
- status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR);
- if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK))
+ status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
+ if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
return IRQ_NONE;
- vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
- status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+ status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
- if (status & XILINX_VDMA_DMASR_ERR_IRQ) {
+ if (status & XILINX_DMA_DMASR_ERR_IRQ) {
/*
* An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
* error is recoverable, ignore it. Otherwise flag the error.
@@ -828,22 +1294,23 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
* Only recoverable errors can be cleared in the DMASR register,
* make sure not to write to other error bits to 1.
*/
- u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK;
- vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
- errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK);
+ u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
+
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
+ errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
if (!chan->flush_on_fsync ||
- (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) {
+ (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
dev_err(chan->dev,
"Channel %p has errors %x, cdr %x tdr %x\n",
chan, errors,
- vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC),
- vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC));
+ dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
+ dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
chan->err = true;
}
}
- if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) {
+ if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
/*
* Device takes too long to do the transfer when user requires
* responsiveness.
@@ -851,10 +1318,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
dev_dbg(chan->dev, "Inter-packet latency too long\n");
}
- if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
+ if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
spin_lock(&chan->lock);
- xilinx_vdma_complete_descriptor(chan);
- xilinx_vdma_start_transfer(chan);
+ xilinx_dma_complete_descriptor(chan);
+ chan->start_transfer(chan);
spin_unlock(&chan->lock);
}
@@ -867,11 +1334,13 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
* @chan: Driver specific dma channel
* @desc: dma transaction descriptor
*/
-static void append_desc_queue(struct xilinx_vdma_chan *chan,
- struct xilinx_vdma_tx_descriptor *desc)
+static void append_desc_queue(struct xilinx_dma_chan *chan,
+ struct xilinx_dma_tx_descriptor *desc)
{
struct xilinx_vdma_tx_segment *tail_segment;
- struct xilinx_vdma_tx_descriptor *tail_desc;
+ struct xilinx_dma_tx_descriptor *tail_desc;
+ struct xilinx_axidma_tx_segment *axidma_tail_segment;
+ struct xilinx_cdma_tx_segment *cdma_tail_segment;
if (list_empty(&chan->pending_list))
goto append;
@@ -881,10 +1350,23 @@ static void append_desc_queue(struct xilinx_vdma_chan *chan,
* that already exists in memory.
*/
tail_desc = list_last_entry(&chan->pending_list,
- struct xilinx_vdma_tx_descriptor, node);
- tail_segment = list_last_entry(&tail_desc->segments,
- struct xilinx_vdma_tx_segment, node);
- tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ struct xilinx_dma_tx_descriptor, node);
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_vdma_tx_segment,
+ node);
+ tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ cdma_tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_cdma_tx_segment,
+ node);
+ cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ } else {
+ axidma_tail_segment = list_last_entry(&tail_desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
+ }
/*
* Add the software descriptor and all children to the list
@@ -894,22 +1376,23 @@ append:
list_add_tail(&desc->node, &chan->pending_list);
chan->desc_pendingcount++;
- if (unlikely(chan->desc_pendingcount > chan->num_frms)) {
+ if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
+ && unlikely(chan->desc_pendingcount > chan->num_frms)) {
dev_dbg(chan->dev, "desc pendingcount is too high\n");
chan->desc_pendingcount = chan->num_frms;
}
}
/**
- * xilinx_vdma_tx_submit - Submit DMA transaction
+ * xilinx_dma_tx_submit - Submit DMA transaction
* @tx: Async transaction descriptor
*
* Return: cookie value on success and failure value on error
*/
-static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
+static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx);
- struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan);
+ struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
dma_cookie_t cookie;
unsigned long flags;
int err;
@@ -919,7 +1402,7 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
* If reset fails, need to hard reset the system.
* Channel is no longer functional
*/
- err = xilinx_vdma_chan_reset(chan);
+ err = xilinx_dma_chan_reset(chan);
if (err < 0)
return err;
}
@@ -950,8 +1433,8 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
struct dma_interleaved_template *xt,
unsigned long flags)
{
- struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
- struct xilinx_vdma_tx_descriptor *desc;
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
struct xilinx_vdma_tx_segment *segment, *prev = NULL;
struct xilinx_vdma_desc_hw *hw;
@@ -965,12 +1448,12 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
return NULL;
/* Allocate a transaction descriptor. */
- desc = xilinx_vdma_alloc_tx_descriptor(chan);
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
if (!desc)
return NULL;
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
- desc->async_tx.tx_submit = xilinx_vdma_tx_submit;
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
async_tx_ack(&desc->async_tx);
/* Allocate the link descriptor from DMA pool */
@@ -983,14 +1466,25 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
hw->vsize = xt->numf;
hw->hsize = xt->sgl[0].size;
hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
- XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
+ XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
hw->stride |= chan->config.frm_dly <<
- XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
-
- if (xt->dir != DMA_MEM_TO_DEV)
- hw->buf_addr = xt->dst_start;
- else
- hw->buf_addr = xt->src_start;
+ XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
+
+ if (xt->dir != DMA_MEM_TO_DEV) {
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(xt->dst_start);
+ hw->buf_addr_msb = upper_32_bits(xt->dst_start);
+ } else {
+ hw->buf_addr = xt->dst_start;
+ }
+ } else {
+ if (chan->ext_addr) {
+ hw->buf_addr = lower_32_bits(xt->src_start);
+ hw->buf_addr_msb = upper_32_bits(xt->src_start);
+ } else {
+ hw->buf_addr = xt->src_start;
+ }
+ }
/* Insert the segment into the descriptor segments list. */
list_add_tail(&segment->node, &desc->segments);
@@ -1005,29 +1499,194 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
return &desc->async_tx;
error:
- xilinx_vdma_free_tx_descriptor(chan, desc);
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
+ * @dchan: DMA channel
+ * @dma_dst: destination address
+ * @dma_src: source address
+ * @len: transfer length
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *
+xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_cdma_tx_segment *segment, *prev;
+ struct xilinx_cdma_desc_hw *hw;
+
+ if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_cdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ hw = &segment->hw;
+ hw->control = len;
+ hw->src_addr = dma_src;
+ hw->dest_addr = dma_dst;
+
+ /* Fill the previous next descriptor with current */
+ prev = list_last_entry(&desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+ prev->hw.next_desc = segment->phys;
+
+ /* Insert the segment into the descriptor segments list. */
+ list_add_tail(&segment->node, &desc->segments);
+
+ prev = segment;
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+ prev->hw.next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
+ * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
+ * @dchan: DMA channel
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
+ u32 *app_w = (u32 *)context;
+ struct scatterlist *sg;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ /* Allocate a transaction descriptor. */
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information in the scatter gather list */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+ XILINX_DMA_MAX_TRANS_LEN);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ hw->buf_addr = sg_dma_address(sg) + sg_used;
+
+ hw->control = copy;
+
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ if (app_w)
+ memcpy(hw->app, app_w, sizeof(u32) *
+ XILINX_DMA_NUM_APP_WORDS);
+ }
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+ prev->hw.next_desc = segment->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
return NULL;
}
/**
- * xilinx_vdma_terminate_all - Halt the channel and free descriptors
- * @chan: Driver specific VDMA Channel pointer
+ * xilinx_dma_terminate_all - Halt the channel and free descriptors
+ * @chan: Driver specific DMA Channel pointer
*/
-static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
+static int xilinx_dma_terminate_all(struct dma_chan *dchan)
{
- struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
/* Halt the DMA engine */
- xilinx_vdma_halt(chan);
+ xilinx_dma_halt(chan);
/* Remove and free all of the descriptors in the lists */
- xilinx_vdma_free_descriptors(chan);
+ xilinx_dma_free_descriptors(chan);
return 0;
}
/**
- * xilinx_vdma_channel_set_config - Configure VDMA channel
+ * xilinx_dma_channel_set_config - Configure VDMA channel
* Run-time configuration for Axi VDMA, supports:
* . halt the channel
* . configure interrupt coalescing and inter-packet delay threshold
@@ -1042,13 +1701,13 @@ static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
struct xilinx_vdma_config *cfg)
{
- struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
u32 dmacr;
if (cfg->reset)
- return xilinx_vdma_chan_reset(chan);
+ return xilinx_dma_chan_reset(chan);
- dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
+ dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
chan->config.frm_dly = cfg->frm_dly;
chan->config.park = cfg->park;
@@ -1058,8 +1717,8 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
chan->config.master = cfg->master;
if (cfg->gen_lock && chan->genlock) {
- dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN;
- dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT;
+ dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+ dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
}
chan->config.frm_cnt_en = cfg->frm_cnt_en;
@@ -1071,21 +1730,21 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
chan->config.coalesc = cfg->coalesc;
chan->config.delay = cfg->delay;
- if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) {
- dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT;
+ if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+ dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
chan->config.coalesc = cfg->coalesc;
}
- if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) {
- dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT;
+ if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+ dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
chan->config.delay = cfg->delay;
}
/* FSync Source selection */
- dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK;
- dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT;
+ dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
+ dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
- vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr);
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
return 0;
}
@@ -1096,14 +1755,14 @@ EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
*/
/**
- * xilinx_vdma_chan_remove - Per Channel remove function
- * @chan: Driver specific VDMA channel
+ * xilinx_dma_chan_remove - Per Channel remove function
+ * @chan: Driver specific DMA channel
*/
-static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
+static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
{
/* Disable all interrupts */
- vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR,
- XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
+ dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
+ XILINX_DMA_DMAXR_ALL_IRQ_MASK);
if (chan->irq > 0)
free_irq(chan->irq, chan);
@@ -1113,8 +1772,197 @@ static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
list_del(&chan->common.device_node);
}
+static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **rx_clk,
+ struct clk **sg_clk, struct clk **tmp_clk)
+{
+ int err;
+
+ *tmp_clk = NULL;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ return err;
+ }
+
+ *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
+
+ *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
+ *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
+ if (IS_ERR(*sg_clk))
+ *sg_clk = NULL;
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(*sg_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **dev_clk, struct clk **tmp_clk,
+ struct clk **tmp1_clk, struct clk **tmp2_clk)
+{
+ int err;
+
+ *tmp_clk = NULL;
+ *tmp1_clk = NULL;
+ *tmp2_clk = NULL;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
+ return err;
+ }
+
+ *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
+ if (IS_ERR(*dev_clk)) {
+ err = PTR_ERR(*dev_clk);
+ dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*dev_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ return 0;
+
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
+ struct clk **tx_clk, struct clk **txs_clk,
+ struct clk **rx_clk, struct clk **rxs_clk)
+{
+ int err;
+
+ *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
+ if (IS_ERR(*axi_clk)) {
+ err = PTR_ERR(*axi_clk);
+ dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
+ return err;
+ }
+
+ *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
+ if (IS_ERR(*tx_clk))
+ *tx_clk = NULL;
+
+ *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
+ if (IS_ERR(*txs_clk))
+ *txs_clk = NULL;
+
+ *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
+ if (IS_ERR(*rx_clk))
+ *rx_clk = NULL;
+
+ *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
+ if (IS_ERR(*rxs_clk))
+ *rxs_clk = NULL;
+
+ err = clk_prepare_enable(*axi_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(*tx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
+ goto err_disable_axiclk;
+ }
+
+ err = clk_prepare_enable(*txs_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
+ goto err_disable_txclk;
+ }
+
+ err = clk_prepare_enable(*rx_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
+ goto err_disable_txsclk;
+ }
+
+ err = clk_prepare_enable(*rxs_clk);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
+ goto err_disable_rxclk;
+ }
+
+ return 0;
+
+err_disable_rxclk:
+ clk_disable_unprepare(*rx_clk);
+err_disable_txsclk:
+ clk_disable_unprepare(*txs_clk);
+err_disable_txclk:
+ clk_disable_unprepare(*tx_clk);
+err_disable_axiclk:
+ clk_disable_unprepare(*axi_clk);
+
+ return err;
+}
+
+static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
+{
+ clk_disable_unprepare(xdev->rxs_clk);
+ clk_disable_unprepare(xdev->rx_clk);
+ clk_disable_unprepare(xdev->txs_clk);
+ clk_disable_unprepare(xdev->tx_clk);
+ clk_disable_unprepare(xdev->axi_clk);
+}
+
/**
- * xilinx_vdma_chan_probe - Per Channel Probing
+ * xilinx_dma_chan_probe - Per Channel Probing
* It get channel features from the device tree entry and
* initialize special channel handling routines
*
@@ -1123,10 +1971,10 @@ static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
*
* Return: '0' on success and failure value on error
*/
-static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
+static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- struct xilinx_vdma_chan *chan;
+ struct xilinx_dma_chan *chan;
bool has_dre = false;
u32 value, width;
int err;
@@ -1140,6 +1988,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
chan->xdev = xdev;
chan->has_sg = xdev->has_sg;
chan->desc_pendingcount = 0x0;
+ chan->ext_addr = xdev->ext_addr;
spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pending_list);
@@ -1169,23 +2018,27 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
chan->direction = DMA_MEM_TO_DEV;
chan->id = 0;
- chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET;
- chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
+ chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
- if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
- xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S)
- chan->flush_on_fsync = true;
+ if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
+ chan->flush_on_fsync = true;
+ }
} else if (of_device_is_compatible(node,
"xlnx,axi-vdma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
chan->id = 1;
- chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET;
- chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
+ chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
- if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
- xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM)
- chan->flush_on_fsync = true;
+ if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
+ xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
+ chan->flush_on_fsync = true;
+ }
} else {
dev_err(xdev->dev, "Invalid channel compatible node\n");
return -EINVAL;
@@ -1193,15 +2046,22 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
/* Request the interrupt */
chan->irq = irq_of_parse_and_map(node, 0);
- err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED,
- "xilinx-vdma-controller", chan);
+ err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
+ "xilinx-dma-controller", chan);
if (err) {
dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
return err;
}
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
+ chan->start_transfer = xilinx_dma_start_transfer;
+ else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
+ chan->start_transfer = xilinx_cdma_start_transfer;
+ else
+ chan->start_transfer = xilinx_vdma_start_transfer;
+
/* Initialize the tasklet */
- tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet,
+ tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
(unsigned long)chan);
/*
@@ -1214,7 +2074,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
xdev->chan[chan->id] = chan;
/* Reset the channel */
- err = xilinx_vdma_chan_reset(chan);
+ err = xilinx_dma_chan_reset(chan);
if (err < 0) {
dev_err(xdev->dev, "Reset channel failed\n");
return err;
@@ -1233,28 +2093,54 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
- struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
+ struct xilinx_dma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
- if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
+ if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
}
+static const struct xilinx_dma_config axidma_config = {
+ .dmatype = XDMA_TYPE_AXIDMA,
+ .clk_init = axidma_clk_init,
+};
+
+static const struct xilinx_dma_config axicdma_config = {
+ .dmatype = XDMA_TYPE_CDMA,
+ .clk_init = axicdma_clk_init,
+};
+
+static const struct xilinx_dma_config axivdma_config = {
+ .dmatype = XDMA_TYPE_VDMA,
+ .clk_init = axivdma_clk_init,
+};
+
+static const struct of_device_id xilinx_dma_of_ids[] = {
+ { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
+ { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
+ { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
+
/**
- * xilinx_vdma_probe - Driver probe function
+ * xilinx_dma_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*
* Return: '0' on success and failure value on error
*/
-static int xilinx_vdma_probe(struct platform_device *pdev)
+static int xilinx_dma_probe(struct platform_device *pdev)
{
+ int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
+ struct clk **, struct clk **, struct clk **)
+ = axivdma_clk_init;
struct device_node *node = pdev->dev.of_node;
- struct xilinx_vdma_device *xdev;
- struct device_node *child;
+ struct xilinx_dma_device *xdev;
+ struct device_node *child, *np = pdev->dev.of_node;
struct resource *io;
- u32 num_frames;
+ u32 num_frames, addr_width;
int i, err;
/* Allocate and initialize the DMA engine structure */
@@ -1263,6 +2149,20 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
return -ENOMEM;
xdev->dev = &pdev->dev;
+ if (np) {
+ const struct of_device_id *match;
+
+ match = of_match_node(xilinx_dma_of_ids, np);
+ if (match && match->data) {
+ xdev->dma_config = match->data;
+ clk_init = xdev->dma_config->clk_init;
+ }
+ }
+
+ err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
+ &xdev->rx_clk, &xdev->rxs_clk);
+ if (err)
+ return err;
/* Request and map I/O memory */
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1273,46 +2173,77 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
/* Retrieve the DMA engine properties from the device tree */
xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
- err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames);
- if (err < 0) {
- dev_err(xdev->dev, "missing xlnx,num-fstores property\n");
- return err;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ err = of_property_read_u32(node, "xlnx,num-fstores",
+ &num_frames);
+ if (err < 0) {
+ dev_err(xdev->dev,
+ "missing xlnx,num-fstores property\n");
+ return err;
+ }
+
+ err = of_property_read_u32(node, "xlnx,flush-fsync",
+ &xdev->flush_on_fsync);
+ if (err < 0)
+ dev_warn(xdev->dev,
+ "missing xlnx,flush-fsync property\n");
}
- err = of_property_read_u32(node, "xlnx,flush-fsync",
- &xdev->flush_on_fsync);
+ err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
if (err < 0)
- dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n");
+ dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
+
+ if (addr_width > 32)
+ xdev->ext_addr = true;
+ else
+ xdev->ext_addr = false;
+
+ /* Set the dma mask bits */
+ dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
/* Initialize the DMA engine */
xdev->common.dev = &pdev->dev;
INIT_LIST_HEAD(&xdev->common.channels);
- dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
- dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
+ dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
+ }
xdev->common.device_alloc_chan_resources =
- xilinx_vdma_alloc_chan_resources;
+ xilinx_dma_alloc_chan_resources;
xdev->common.device_free_chan_resources =
- xilinx_vdma_free_chan_resources;
- xdev->common.device_prep_interleaved_dma =
+ xilinx_dma_free_chan_resources;
+ xdev->common.device_terminate_all = xilinx_dma_terminate_all;
+ xdev->common.device_tx_status = xilinx_dma_tx_status;
+ xdev->common.device_issue_pending = xilinx_dma_issue_pending;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
+ /* Residue calculation is supported by only AXI DMA */
+ xdev->common.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_SEGMENT;
+ } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+ dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+ xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ } else {
+ xdev->common.device_prep_interleaved_dma =
xilinx_vdma_dma_prep_interleaved;
- xdev->common.device_terminate_all = xilinx_vdma_terminate_all;
- xdev->common.device_tx_status = xilinx_vdma_tx_status;
- xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
+ }
platform_set_drvdata(pdev, xdev);
/* Initialize the channels */
for_each_child_of_node(node, child) {
- err = xilinx_vdma_chan_probe(xdev, child);
+ err = xilinx_dma_chan_probe(xdev, child);
if (err < 0)
- goto error;
+ goto disable_clks;
}
- for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
- if (xdev->chan[i])
- xdev->chan[i]->num_frms = num_frames;
+ if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+ for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
+ if (xdev->chan[i])
+ xdev->chan[i]->num_frms = num_frames;
+ }
/* Register the DMA engine with the core */
dma_async_device_register(&xdev->common);
@@ -1329,49 +2260,47 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
return 0;
+disable_clks:
+ xdma_disable_allclks(xdev);
error:
- for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+ for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
if (xdev->chan[i])
- xilinx_vdma_chan_remove(xdev->chan[i]);
+ xilinx_dma_chan_remove(xdev->chan[i]);
return err;
}
/**
- * xilinx_vdma_remove - Driver remove function
+ * xilinx_dma_remove - Driver remove function
* @pdev: Pointer to the platform_device structure
*
* Return: Always '0'
*/
-static int xilinx_vdma_remove(struct platform_device *pdev)
+static int xilinx_dma_remove(struct platform_device *pdev)
{
- struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev);
+ struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
int i;
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&xdev->common);
- for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
+ for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
if (xdev->chan[i])
- xilinx_vdma_chan_remove(xdev->chan[i]);
+ xilinx_dma_chan_remove(xdev->chan[i]);
+
+ xdma_disable_allclks(xdev);
return 0;
}
-static const struct of_device_id xilinx_vdma_of_ids[] = {
- { .compatible = "xlnx,axi-vdma-1.00.a",},
- {}
-};
-MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids);
-
static struct platform_driver xilinx_vdma_driver = {
.driver = {
.name = "xilinx-vdma",
- .of_match_table = xilinx_vdma_of_ids,
+ .of_match_table = xilinx_dma_of_ids,
},
- .probe = xilinx_vdma_probe,
- .remove = xilinx_vdma_remove,
+ .probe = xilinx_dma_probe,
+ .remove = xilinx_dma_remove,
};
module_platform_driver(xilinx_vdma_driver);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 37755e63c..6ca7474ba 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -378,12 +378,11 @@ config EDAC_ALTERA
config EDAC_ALTERA_L2C
bool "Altera L2 Cache ECC"
- depends on EDAC_ALTERA=y
- select CACHE_L2X0
+ depends on EDAC_ALTERA=y && CACHE_L2X0
help
Support for error detection and correction on the
Altera L2 cache Memory for Altera SoCs. This option
- requires L2 cache so it will force that selection.
+ requires L2 cache.
config EDAC_ALTERA_OCRAM
bool "Altera On-Chip RAM ECC"
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 63e420987..5b4d223d6 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -78,27 +79,6 @@ static const struct altr_sdram_prv_data a10_data = {
.ue_set_mask = A10_DIAGINT_TDERRA_MASK,
};
-/************************** EDAC Device Defines **************************/
-
-/* OCRAM ECC Management Group Defines */
-#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET 0x04
-#define ALTR_OCR_ECC_EN BIT(0)
-#define ALTR_OCR_ECC_INJS BIT(1)
-#define ALTR_OCR_ECC_INJD BIT(2)
-#define ALTR_OCR_ECC_SERR BIT(3)
-#define ALTR_OCR_ECC_DERR BIT(4)
-
-/* L2 ECC Management Group Defines */
-#define ALTR_MAN_GRP_L2_ECC_OFFSET 0x00
-#define ALTR_L2_ECC_EN BIT(0)
-#define ALTR_L2_ECC_INJS BIT(1)
-#define ALTR_L2_ECC_INJD BIT(2)
-
-#define ALTR_UE_TRIGGER_CHAR 'U' /* Trigger for UE */
-#define ALTR_TRIGGER_READ_WRD_CNT 32 /* Line size x 4 */
-#define ALTR_TRIG_OCRAM_BYTE_SIZE 128 /* Line size x 4 */
-#define ALTR_TRIG_L2C_BYTE_SIZE 4096 /* Full Page */
-
/*********************** EDAC Memory Controller Functions ****************/
/* The SDRAM controller uses the EDAC Memory Controller framework. */
@@ -252,8 +232,8 @@ static unsigned long get_total_mem(void)
}
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
- { .compatible = "altr,sdram-edac", .data = (void *)&c5_data},
- { .compatible = "altr,sdram-edac-a10", .data = (void *)&a10_data},
+ { .compatible = "altr,sdram-edac", .data = &c5_data},
+ { .compatible = "altr,sdram-edac-a10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
@@ -570,28 +550,8 @@ module_platform_driver(altr_edac_driver);
const struct edac_device_prv_data ocramecc_data;
const struct edac_device_prv_data l2ecc_data;
-
-struct edac_device_prv_data {
- int (*setup)(struct platform_device *pdev, void __iomem *base);
- int ce_clear_mask;
- int ue_clear_mask;
- char dbgfs_name[20];
- void * (*alloc_mem)(size_t size, void **other);
- void (*free_mem)(void *p, size_t size, void *other);
- int ecc_enable_mask;
- int ce_set_mask;
- int ue_set_mask;
- int trig_alloc_sz;
-};
-
-struct altr_edac_device_dev {
- void __iomem *base;
- int sb_irq;
- int db_irq;
- const struct edac_device_prv_data *data;
- struct dentry *debugfs_dir;
- char *edac_dev_name;
-};
+const struct edac_device_prv_data a10_ocramecc_data;
+const struct edac_device_prv_data a10_l2ecc_data;
static irqreturn_t altr_edac_device_handler(int irq, void *dev_id)
{
@@ -665,8 +625,9 @@ static ssize_t altr_edac_device_trig(struct file *file,
if (ACCESS_ONCE(ptemp[i]))
result = -1;
/* Toggle Error bit (it is latched), leave ECC enabled */
- writel(error_mask, drvdata->base);
- writel(priv->ecc_enable_mask, drvdata->base);
+ writel(error_mask, (drvdata->base + priv->set_err_ofst));
+ writel(priv->ecc_enable_mask, (drvdata->base +
+ priv->set_err_ofst));
ptemp[i] = i;
}
/* Ensure it has been written out */
@@ -694,6 +655,16 @@ static const struct file_operations altr_edac_device_inject_fops = {
.llseek = generic_file_llseek,
};
+static ssize_t altr_edac_a10_device_trig(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations altr_edac_a10_device_inject_fops = {
+ .open = simple_open,
+ .write = altr_edac_a10_device_trig,
+ .llseek = generic_file_llseek,
+};
+
static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
const struct edac_device_prv_data *priv)
{
@@ -708,17 +679,18 @@ static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
if (!edac_debugfs_create_file(priv->dbgfs_name, S_IWUSR,
drvdata->debugfs_dir, edac_dci,
- &altr_edac_device_inject_fops))
+ priv->inject_fops))
debugfs_remove_recursive(drvdata->debugfs_dir);
}
static const struct of_device_id altr_edac_device_of_match[] = {
#ifdef CONFIG_EDAC_ALTERA_L2C
- { .compatible = "altr,socfpga-l2-ecc", .data = (void *)&l2ecc_data },
+ { .compatible = "altr,socfpga-l2-ecc", .data = &l2ecc_data },
+ { .compatible = "altr,socfpga-a10-l2-ecc", .data = &a10_l2ecc_data },
#endif
#ifdef CONFIG_EDAC_ALTERA_OCRAM
- { .compatible = "altr,socfpga-ocram-ecc",
- .data = (void *)&ocramecc_data },
+ { .compatible = "altr,socfpga-ocram-ecc", .data = &ocramecc_data },
+ { .compatible = "altr,socfpga-a10-ocram-ecc", .data = &a10_ocramecc_data },
#endif
{},
};
@@ -789,7 +761,7 @@ static int altr_edac_device_probe(struct platform_device *pdev)
/* Check specific dependencies for the module */
if (drvdata->data->setup) {
- res = drvdata->data->setup(pdev, drvdata->base);
+ res = drvdata->data->setup(drvdata);
if (res)
goto fail1;
}
@@ -856,6 +828,25 @@ module_platform_driver(altr_edac_device_driver);
/*********************** OCRAM EDAC Device Functions *********************/
#ifdef CONFIG_EDAC_ALTERA_OCRAM
+/*
+ * Test for memory's ECC dependencies upon entry because platform specific
+ * startup should have initialized the memory and enabled the ECC.
+ * Can't turn on ECC here because accessing un-initialized memory will
+ * cause CE/UE errors possibly causing an ABORT.
+ */
+static int altr_check_ecc_deps(struct altr_edac_device_dev *device)
+{
+ void __iomem *base = device->base;
+ const struct edac_device_prv_data *prv = device->data;
+
+ if (readl(base + prv->ecc_en_ofst) & prv->ecc_enable_mask)
+ return 0;
+
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: No ECC present or ECC disabled.\n",
+ device->edac_dev_name);
+ return -ENODEV;
+}
static void *ocram_alloc_mem(size_t size, void **other)
{
@@ -891,36 +882,53 @@ static void ocram_free_mem(void *p, size_t size, void *other)
gen_pool_free((struct gen_pool *)other, (u32)p, size);
}
-/*
- * altr_ocram_check_deps()
- * Test for OCRAM cache ECC dependencies upon entry because
- * platform specific startup should have initialized the
- * On-Chip RAM memory and enabled the ECC.
- * Can't turn on ECC here because accessing un-initialized
- * memory will cause CE/UE errors possibly causing an ABORT.
- */
-static int altr_ocram_check_deps(struct platform_device *pdev,
- void __iomem *base)
+static irqreturn_t altr_edac_a10_ecc_irq(struct altr_edac_device_dev *dci,
+ bool sberr)
{
- if (readl(base) & ALTR_OCR_ECC_EN)
- return 0;
+ void __iomem *base = dci->base;
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "OCRAM: No ECC present or ECC disabled.\n");
- return -ENODEV;
+ if (sberr) {
+ writel(ALTR_A10_ECC_SERRPENA,
+ base + ALTR_A10_ECC_INTSTAT_OFST);
+ edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ } else {
+ writel(ALTR_A10_ECC_DERRPENA,
+ base + ALTR_A10_ECC_INTSTAT_OFST);
+ edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+ }
+ return IRQ_HANDLED;
}
const struct edac_device_prv_data ocramecc_data = {
- .setup = altr_ocram_check_deps,
+ .setup = altr_check_ecc_deps,
.ce_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_SERR),
.ue_clear_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_DERR),
.dbgfs_name = "altr_ocram_trigger",
.alloc_mem = ocram_alloc_mem,
.free_mem = ocram_free_mem,
.ecc_enable_mask = ALTR_OCR_ECC_EN,
+ .ecc_en_ofst = ALTR_OCR_ECC_REG_OFFSET,
.ce_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJS),
.ue_set_mask = (ALTR_OCR_ECC_EN | ALTR_OCR_ECC_INJD),
+ .set_err_ofst = ALTR_OCR_ECC_REG_OFFSET,
.trig_alloc_sz = ALTR_TRIG_OCRAM_BYTE_SIZE,
+ .inject_fops = &altr_edac_device_inject_fops,
+};
+
+const struct edac_device_prv_data a10_ocramecc_data = {
+ .setup = altr_check_ecc_deps,
+ .ce_clear_mask = ALTR_A10_ECC_SERRPENA,
+ .ue_clear_mask = ALTR_A10_ECC_DERRPENA,
+ .irq_status_mask = A10_SYSMGR_ECC_INTSTAT_OCRAM,
+ .dbgfs_name = "altr_ocram_trigger",
+ .ecc_enable_mask = ALTR_A10_OCRAM_ECC_EN_CTL,
+ .ecc_en_ofst = ALTR_A10_ECC_CTRL_OFST,
+ .ce_set_mask = ALTR_A10_ECC_TSERRA,
+ .ue_set_mask = ALTR_A10_ECC_TDERRA,
+ .set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
+ .ecc_irq_handler = altr_edac_a10_ecc_irq,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_OCRAM */
@@ -966,10 +974,13 @@ static void l2_free_mem(void *p, size_t size, void *other)
* Bail if ECC is not enabled.
* Note that L2 Cache Enable is forced at build time.
*/
-static int altr_l2_check_deps(struct platform_device *pdev,
- void __iomem *base)
+static int altr_l2_check_deps(struct altr_edac_device_dev *device)
{
- if (readl(base) & ALTR_L2_ECC_EN)
+ void __iomem *base = device->base;
+ const struct edac_device_prv_data *prv = device->data;
+
+ if ((readl(base) & prv->ecc_enable_mask) ==
+ prv->ecc_enable_mask)
return 0;
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -977,6 +988,24 @@ static int altr_l2_check_deps(struct platform_device *pdev,
return -ENODEV;
}
+static irqreturn_t altr_edac_a10_l2_irq(struct altr_edac_device_dev *dci,
+ bool sberr)
+{
+ if (sberr) {
+ regmap_write(dci->edac->ecc_mgr_map,
+ A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
+ A10_SYSGMR_MPU_CLEAR_L2_ECC_SB);
+ edac_device_handle_ce(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ } else {
+ regmap_write(dci->edac->ecc_mgr_map,
+ A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST,
+ A10_SYSGMR_MPU_CLEAR_L2_ECC_MB);
+ edac_device_handle_ue(dci->edac_dev, 0, 0, dci->edac_dev_name);
+ panic("\nEDAC:ECC_DEVICE[Uncorrectable errors]\n");
+ }
+ return IRQ_HANDLED;
+}
+
const struct edac_device_prv_data l2ecc_data = {
.setup = altr_l2_check_deps,
.ce_clear_mask = 0,
@@ -987,11 +1016,252 @@ const struct edac_device_prv_data l2ecc_data = {
.ecc_enable_mask = ALTR_L2_ECC_EN,
.ce_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJS),
.ue_set_mask = (ALTR_L2_ECC_EN | ALTR_L2_ECC_INJD),
+ .set_err_ofst = ALTR_L2_ECC_REG_OFFSET,
+ .trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+ .inject_fops = &altr_edac_device_inject_fops,
+};
+
+const struct edac_device_prv_data a10_l2ecc_data = {
+ .setup = altr_l2_check_deps,
+ .ce_clear_mask = ALTR_A10_L2_ECC_SERR_CLR,
+ .ue_clear_mask = ALTR_A10_L2_ECC_MERR_CLR,
+ .irq_status_mask = A10_SYSMGR_ECC_INTSTAT_L2,
+ .dbgfs_name = "altr_l2_trigger",
+ .alloc_mem = l2_alloc_mem,
+ .free_mem = l2_free_mem,
+ .ecc_enable_mask = ALTR_A10_L2_ECC_EN_CTL,
+ .ce_set_mask = ALTR_A10_L2_ECC_CE_INJ_MASK,
+ .ue_set_mask = ALTR_A10_L2_ECC_UE_INJ_MASK,
+ .set_err_ofst = ALTR_A10_L2_ECC_INJ_OFST,
+ .ecc_irq_handler = altr_edac_a10_l2_irq,
.trig_alloc_sz = ALTR_TRIG_L2C_BYTE_SIZE,
+ .inject_fops = &altr_edac_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_L2C */
+/********************* Arria10 EDAC Device Functions *************************/
+
+/*
+ * The Arria10 EDAC Device Functions differ from the Cyclone5/Arria5
+ * because 2 IRQs are shared among the all ECC peripherals. The ECC
+ * manager manages the IRQs and the children.
+ * Based on xgene_edac.c peripheral code.
+ */
+
+static ssize_t altr_edac_a10_device_trig(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dci = file->private_data;
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+ void __iomem *set_addr = (drvdata->base + priv->set_err_ofst);
+ unsigned long flags;
+ u8 trig_type;
+
+ if (!user_buf || get_user(trig_type, user_buf))
+ return -EFAULT;
+
+ local_irq_save(flags);
+ if (trig_type == ALTR_UE_TRIGGER_CHAR)
+ writel(priv->ue_set_mask, set_addr);
+ else
+ writel(priv->ce_set_mask, set_addr);
+ /* Ensure the interrupt test bits are set */
+ wmb();
+ local_irq_restore(flags);
+
+ return count;
+}
+
+static irqreturn_t altr_edac_a10_irq_handler(int irq, void *dev_id)
+{
+ irqreturn_t rc = IRQ_NONE;
+ struct altr_arria10_edac *edac = dev_id;
+ struct altr_edac_device_dev *dci;
+ int irq_status;
+ bool sberr = (irq == edac->sb_irq) ? 1 : 0;
+ int sm_offset = sberr ? A10_SYSMGR_ECC_INTSTAT_SERR_OFST :
+ A10_SYSMGR_ECC_INTSTAT_DERR_OFST;
+
+ regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
+
+ if ((irq != edac->sb_irq) && (irq != edac->db_irq)) {
+ WARN_ON(1);
+ } else {
+ list_for_each_entry(dci, &edac->a10_ecc_devices, next) {
+ if (irq_status & dci->data->irq_status_mask)
+ rc = dci->data->ecc_irq_handler(dci, sberr);
+ }
+ }
+
+ return rc;
+}
+
+static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
+ struct device_node *np)
+{
+ struct edac_device_ctl_info *dci;
+ struct altr_edac_device_dev *altdev;
+ char *ecc_name = (char *)np->name;
+ struct resource res;
+ int edac_idx;
+ int rc = 0;
+ const struct edac_device_prv_data *prv;
+ /* Get matching node and check for valid result */
+ const struct of_device_id *pdev_id =
+ of_match_node(altr_edac_device_of_match, np);
+ if (IS_ERR_OR_NULL(pdev_id))
+ return -ENODEV;
+
+ /* Get driver specific data for this EDAC device */
+ prv = pdev_id->data;
+ if (IS_ERR_OR_NULL(prv))
+ return -ENODEV;
+
+ if (!devres_open_group(edac->dev, altr_edac_a10_device_add, GFP_KERNEL))
+ return -ENOMEM;
+
+ rc = of_address_to_resource(np, 0, &res);
+ if (rc < 0) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: no resource address\n", ecc_name);
+ goto err_release_group;
+ }
+
+ edac_idx = edac_device_alloc_index();
+ dci = edac_device_alloc_ctl_info(sizeof(*altdev), ecc_name,
+ 1, ecc_name, 1, 0, NULL, 0,
+ edac_idx);
+
+ if (!dci) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s: Unable to allocate EDAC device\n", ecc_name);
+ rc = -ENOMEM;
+ goto err_release_group;
+ }
+
+ altdev = dci->pvt_info;
+ dci->dev = edac->dev;
+ altdev->edac_dev_name = ecc_name;
+ altdev->edac_idx = edac_idx;
+ altdev->edac = edac;
+ altdev->edac_dev = dci;
+ altdev->data = prv;
+ altdev->ddev = *edac->dev;
+ dci->dev = &altdev->ddev;
+ dci->ctl_name = "Altera ECC Manager";
+ dci->mod_name = ecc_name;
+ dci->dev_name = ecc_name;
+
+ altdev->base = devm_ioremap_resource(edac->dev, &res);
+ if (IS_ERR(altdev->base)) {
+ rc = PTR_ERR(altdev->base);
+ goto err_release_group1;
+ }
+
+ /* Check specific dependencies for the module */
+ if (altdev->data->setup) {
+ rc = altdev->data->setup(altdev);
+ if (rc)
+ goto err_release_group1;
+ }
+
+ rc = edac_device_add_device(dci);
+ if (rc) {
+ dev_err(edac->dev, "edac_device_add_device failed\n");
+ rc = -ENOMEM;
+ goto err_release_group1;
+ }
+
+ altr_create_edacdev_dbgfs(dci, prv);
+
+ list_add(&altdev->next, &edac->a10_ecc_devices);
+
+ devres_remove_group(edac->dev, altr_edac_a10_device_add);
+
+ return 0;
+
+err_release_group1:
+ edac_device_free_ctl_info(dci);
+err_release_group:
+ edac_printk(KERN_ALERT, EDAC_DEVICE, "%s: %d\n", __func__, __LINE__);
+ devres_release_group(edac->dev, NULL);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "%s:Error setting up EDAC device: %d\n", ecc_name, rc);
+
+ return rc;
+}
+
+static int altr_edac_a10_probe(struct platform_device *pdev)
+{
+ struct altr_arria10_edac *edac;
+ struct device_node *child;
+ int rc;
+
+ edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
+ if (!edac)
+ return -ENOMEM;
+
+ edac->dev = &pdev->dev;
+ platform_set_drvdata(pdev, edac);
+ INIT_LIST_HEAD(&edac->a10_ecc_devices);
+
+ edac->ecc_mgr_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "altr,sysmgr-syscon");
+ if (IS_ERR(edac->ecc_mgr_map)) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to get syscon altr,sysmgr-syscon\n");
+ return PTR_ERR(edac->ecc_mgr_map);
+ }
+
+ edac->sb_irq = platform_get_irq(pdev, 0);
+ rc = devm_request_irq(&pdev->dev, edac->sb_irq,
+ altr_edac_a10_irq_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), edac);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n");
+ return rc;
+ }
+
+ edac->db_irq = platform_get_irq(pdev, 1);
+ rc = devm_request_irq(&pdev->dev, edac->db_irq,
+ altr_edac_a10_irq_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), edac);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
+ return rc;
+ }
+
+ for_each_child_of_node(pdev->dev.of_node, child) {
+ if (!of_device_is_available(child))
+ continue;
+ if (of_device_is_compatible(child, "altr,socfpga-a10-l2-ecc"))
+ altr_edac_a10_device_add(edac, child);
+ else if (of_device_is_compatible(child,
+ "altr,socfpga-a10-ocram-ecc"))
+ altr_edac_a10_device_add(edac, child);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id altr_edac_a10_of_match[] = {
+ { .compatible = "altr,socfpga-a10-ecc-manager" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_edac_a10_of_match);
+
+static struct platform_driver altr_edac_a10_driver = {
+ .probe = altr_edac_a10_probe,
+ .driver = {
+ .name = "socfpga_a10_ecc_manager",
+ .of_match_table = altr_edac_a10_of_match,
+ },
+};
+module_platform_driver(altr_edac_a10_driver);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 953077d3e..42090f36b 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -195,4 +195,132 @@ struct altr_sdram_mc_data {
const struct altr_sdram_prv_data *data;
};
+/************************** EDAC Device Defines **************************/
+/***** General Device Trigger Defines *****/
+#define ALTR_UE_TRIGGER_CHAR 'U' /* Trigger for UE */
+#define ALTR_TRIGGER_READ_WRD_CNT 32 /* Line size x 4 */
+#define ALTR_TRIG_OCRAM_BYTE_SIZE 128 /* Line size x 4 */
+#define ALTR_TRIG_L2C_BYTE_SIZE 4096 /* Full Page */
+
+/******* Cyclone5 and Arria5 Defines *******/
+/* OCRAM ECC Management Group Defines */
+#define ALTR_MAN_GRP_OCRAM_ECC_OFFSET 0x04
+#define ALTR_OCR_ECC_REG_OFFSET 0x00
+#define ALTR_OCR_ECC_EN BIT(0)
+#define ALTR_OCR_ECC_INJS BIT(1)
+#define ALTR_OCR_ECC_INJD BIT(2)
+#define ALTR_OCR_ECC_SERR BIT(3)
+#define ALTR_OCR_ECC_DERR BIT(4)
+
+/* L2 ECC Management Group Defines */
+#define ALTR_MAN_GRP_L2_ECC_OFFSET 0x00
+#define ALTR_L2_ECC_REG_OFFSET 0x00
+#define ALTR_L2_ECC_EN BIT(0)
+#define ALTR_L2_ECC_INJS BIT(1)
+#define ALTR_L2_ECC_INJD BIT(2)
+
+/* Arria10 General ECC Block Module Defines */
+#define ALTR_A10_ECC_CTRL_OFST 0x08
+#define ALTR_A10_ECC_EN BIT(0)
+#define ALTR_A10_ECC_INITA BIT(16)
+#define ALTR_A10_ECC_INITB BIT(24)
+
+#define ALTR_A10_ECC_INITSTAT_OFST 0x0C
+#define ALTR_A10_ECC_INITCOMPLETEA BIT(0)
+#define ALTR_A10_ECC_INITCOMPLETEB BIT(8)
+
+#define ALTR_A10_ECC_ERRINTEN_OFST 0x10
+#define ALTR_A10_ECC_SERRINTEN BIT(0)
+
+#define ALTR_A10_ECC_INTSTAT_OFST 0x20
+#define ALTR_A10_ECC_SERRPENA BIT(0)
+#define ALTR_A10_ECC_DERRPENA BIT(8)
+#define ALTR_A10_ECC_ERRPENA_MASK (ALTR_A10_ECC_SERRPENA | \
+ ALTR_A10_ECC_DERRPENA)
+#define ALTR_A10_ECC_SERRPENB BIT(16)
+#define ALTR_A10_ECC_DERRPENB BIT(24)
+#define ALTR_A10_ECC_ERRPENB_MASK (ALTR_A10_ECC_SERRPENB | \
+ ALTR_A10_ECC_DERRPENB)
+
+#define ALTR_A10_ECC_INTTEST_OFST 0x24
+#define ALTR_A10_ECC_TSERRA BIT(0)
+#define ALTR_A10_ECC_TDERRA BIT(8)
+
+/* ECC Manager Defines */
+#define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94
+#define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
+#define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1)
+
+#define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C
+#define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
+#define A10_SYSMGR_ECC_INTSTAT_L2 BIT(0)
+#define A10_SYSMGR_ECC_INTSTAT_OCRAM BIT(1)
+
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST 0xA8
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_SB BIT(15)
+#define A10_SYSGMR_MPU_CLEAR_L2_ECC_MB BIT(31)
+
+/* Arria 10 L2 ECC Management Group Defines */
+#define ALTR_A10_L2_ECC_CTL_OFST 0x0
+#define ALTR_A10_L2_ECC_EN_CTL BIT(0)
+
+#define ALTR_A10_L2_ECC_STATUS 0xFFD060A4
+#define ALTR_A10_L2_ECC_STAT_OFST 0xA4
+#define ALTR_A10_L2_ECC_SERR_PEND BIT(0)
+#define ALTR_A10_L2_ECC_MERR_PEND BIT(0)
+
+#define ALTR_A10_L2_ECC_CLR_OFST 0x4
+#define ALTR_A10_L2_ECC_SERR_CLR BIT(15)
+#define ALTR_A10_L2_ECC_MERR_CLR BIT(31)
+
+#define ALTR_A10_L2_ECC_INJ_OFST ALTR_A10_L2_ECC_CTL_OFST
+#define ALTR_A10_L2_ECC_CE_INJ_MASK 0x00000101
+#define ALTR_A10_L2_ECC_UE_INJ_MASK 0x00010101
+
+/* Arria 10 OCRAM ECC Management Group Defines */
+#define ALTR_A10_OCRAM_ECC_EN_CTL (BIT(1) | BIT(0))
+
+struct altr_edac_device_dev;
+
+struct edac_device_prv_data {
+ int (*setup)(struct altr_edac_device_dev *device);
+ int ce_clear_mask;
+ int ue_clear_mask;
+ int irq_status_mask;
+ char dbgfs_name[20];
+ void * (*alloc_mem)(size_t size, void **other);
+ void (*free_mem)(void *p, size_t size, void *other);
+ int ecc_enable_mask;
+ int ecc_en_ofst;
+ int ce_set_mask;
+ int ue_set_mask;
+ int set_err_ofst;
+ irqreturn_t (*ecc_irq_handler)(struct altr_edac_device_dev *dci,
+ bool sb);
+ int trig_alloc_sz;
+ const struct file_operations *inject_fops;
+};
+
+struct altr_edac_device_dev {
+ struct list_head next;
+ void __iomem *base;
+ int sb_irq;
+ int db_irq;
+ const struct edac_device_prv_data *data;
+ struct dentry *debugfs_dir;
+ char *edac_dev_name;
+ struct altr_arria10_edac *edac;
+ struct edac_device_ctl_info *edac_dev;
+ struct device ddev;
+ int edac_idx;
+};
+
+struct altr_arria10_edac {
+ struct device *dev;
+ struct regmap *ecc_mgr_map;
+ int sb_irq;
+ int db_irq;
+ struct list_head a10_ecc_devices;
+};
+
#endif /* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index d87a47547..46784eb2e 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -15,11 +15,6 @@ module_param(ecc_enable_override, int, 0644);
static struct msr __percpu *msrs;
-/*
- * count successfully initialized driver instances for setup_pci_device()
- */
-static atomic_t drv_instances = ATOMIC_INIT(0);
-
/* Per-node stuff */
static struct ecc_settings **ecc_stngs;
@@ -645,7 +640,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
input_addr =
dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
- edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
+ edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
(unsigned long)sys_addr, (unsigned long)input_addr);
return input_addr;
@@ -1918,7 +1913,7 @@ static struct amd64_family_type family_types[] = {
[K8_CPUS] = {
.ctl_name = "K8",
.f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
- .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
+ .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
.ops = {
.early_channel_count = k8_early_channel_count,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
@@ -1928,7 +1923,7 @@ static struct amd64_family_type family_types[] = {
[F10_CPUS] = {
.ctl_name = "F10h",
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
- .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
+ .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1938,7 +1933,7 @@ static struct amd64_family_type family_types[] = {
[F15_CPUS] = {
.ctl_name = "F15h",
.f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1948,7 +1943,7 @@ static struct amd64_family_type family_types[] = {
[F15_M30H_CPUS] = {
.ctl_name = "F15h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1958,7 +1953,7 @@ static struct amd64_family_type family_types[] = {
[F15_M60H_CPUS] = {
.ctl_name = "F15h_M60h",
.f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1968,7 +1963,7 @@ static struct amd64_family_type family_types[] = {
[F16_CPUS] = {
.ctl_name = "F16h",
.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -1978,7 +1973,7 @@ static struct amd64_family_type family_types[] = {
[F16_M30H_CPUS] = {
.ctl_name = "F16h_M30h",
.f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
- .f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
+ .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
.ops = {
.early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
@@ -2227,13 +2222,13 @@ static inline void decode_bus_error(int node_id, struct mce *m)
}
/*
- * Use pvt->F2 which contains the F2 CPU PCI device to get the related
- * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
+ * Use pvt->F3 which contains the F3 CPU PCI device to get the related
+ * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
*/
-static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
+static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f2_id)
{
/* Reserve the ADDRESS MAP Device */
- pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
+ pvt->F1 = pci_get_related_function(pvt->F3->vendor, f1_id, pvt->F3);
if (!pvt->F1) {
amd64_err("error address map device not found: "
"vendor %x device 0x%x (broken BIOS?)\n",
@@ -2241,15 +2236,15 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
return -ENODEV;
}
- /* Reserve the MISC Device */
- pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
- if (!pvt->F3) {
+ /* Reserve the DCT Device */
+ pvt->F2 = pci_get_related_function(pvt->F3->vendor, f2_id, pvt->F3);
+ if (!pvt->F2) {
pci_dev_put(pvt->F1);
pvt->F1 = NULL;
- amd64_err("error F3 device not found: "
+ amd64_err("error F2 device not found: "
"vendor %x device 0x%x (broken BIOS?)\n",
- PCI_VENDOR_ID_AMD, f3_id);
+ PCI_VENDOR_ID_AMD, f2_id);
return -ENODEV;
}
@@ -2263,7 +2258,7 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
{
pci_dev_put(pvt->F1);
- pci_dev_put(pvt->F3);
+ pci_dev_put(pvt->F2);
}
/*
@@ -2778,14 +2773,14 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
NULL
};
-static int init_one_instance(struct pci_dev *F2)
+static int init_one_instance(unsigned int nid)
{
- struct amd64_pvt *pvt = NULL;
+ struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
+ struct amd64_pvt *pvt = NULL;
int err = 0, ret;
- u16 nid = amd_pci_dev_to_node_id(F2);
ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2793,7 +2788,7 @@ static int init_one_instance(struct pci_dev *F2)
goto err_ret;
pvt->mc_node_id = nid;
- pvt->F2 = F2;
+ pvt->F3 = F3;
ret = -EINVAL;
fam_type = per_family_init(pvt);
@@ -2801,7 +2796,7 @@ static int init_one_instance(struct pci_dev *F2)
goto err_free;
ret = -ENODEV;
- err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
+ err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f2_id);
if (err)
goto err_free;
@@ -2836,7 +2831,7 @@ static int init_one_instance(struct pci_dev *F2)
goto err_siblings;
mci->pvt_info = pvt;
- mci->pdev = &pvt->F2->dev;
+ mci->pdev = &pvt->F3->dev;
setup_mci_misc_attrs(mci, fam_type);
@@ -2855,8 +2850,6 @@ static int init_one_instance(struct pci_dev *F2)
amd_register_ecc_decoder(decode_bus_error);
- atomic_inc(&drv_instances);
-
return 0;
err_add_mc:
@@ -2872,19 +2865,11 @@ err_ret:
return ret;
}
-static int probe_one_instance(struct pci_dev *pdev,
- const struct pci_device_id *mc_type)
+static int probe_one_instance(unsigned int nid)
{
- u16 nid = amd_pci_dev_to_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s;
- int ret = 0;
-
- ret = pci_enable_device(pdev);
- if (ret < 0) {
- edac_dbg(0, "ret=%d\n", ret);
- return -EIO;
- }
+ int ret;
ret = -ENOMEM;
s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
@@ -2905,7 +2890,7 @@ static int probe_one_instance(struct pci_dev *pdev,
goto err_enable;
}
- ret = init_one_instance(pdev);
+ ret = init_one_instance(nid);
if (ret < 0) {
amd64_err("Error probing instance: %d\n", nid);
restore_ecc_error_reporting(s, nid, F3);
@@ -2921,19 +2906,18 @@ err_out:
return ret;
}
-static void remove_one_instance(struct pci_dev *pdev)
+static void remove_one_instance(unsigned int nid)
{
- struct mem_ctl_info *mci;
- struct amd64_pvt *pvt;
- u16 nid = amd_pci_dev_to_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
+ struct mem_ctl_info *mci;
+ struct amd64_pvt *pvt;
- mci = find_mci_by_dev(&pdev->dev);
+ mci = find_mci_by_dev(&F3->dev);
WARN_ON(!mci);
/* Remove from EDAC CORE tracking list */
- mci = edac_mc_del_mc(&pdev->dev);
+ mci = edac_mc_del_mc(&F3->dev);
if (!mci)
return;
@@ -2957,31 +2941,6 @@ static void remove_one_instance(struct pci_dev *pdev)
edac_mc_free(mci);
}
-/*
- * This table is part of the interface for loading drivers for PCI devices. The
- * PCI core identifies what devices are on a system during boot, and then
- * inquiry this table to see if this driver is for a given device found.
- */
-static const struct pci_device_id amd64_pci_table[] = {
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F2) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F2) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F2) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F2) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F2) },
- {0, }
-};
-MODULE_DEVICE_TABLE(pci, amd64_pci_table);
-
-static struct pci_driver amd64_pci_driver = {
- .name = EDAC_MOD_STR,
- .probe = probe_one_instance,
- .remove = remove_one_instance,
- .id_table = amd64_pci_table,
- .driver.probe_type = PROBE_FORCE_SYNCHRONOUS,
-};
-
static void setup_pci_device(void)
{
struct mem_ctl_info *mci;
@@ -3005,8 +2964,7 @@ static void setup_pci_device(void)
static int __init amd64_edac_init(void)
{
int err = -ENODEV;
-
- printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
+ int i;
opstate_init();
@@ -3022,13 +2980,14 @@ static int __init amd64_edac_init(void)
if (!msrs)
goto err_free;
- err = pci_register_driver(&amd64_pci_driver);
- if (err)
- goto err_pci;
+ for (i = 0; i < amd_nb_num(); i++)
+ if (probe_one_instance(i)) {
+ /* unwind properly */
+ while (--i >= 0)
+ remove_one_instance(i);
- err = -ENODEV;
- if (!atomic_read(&drv_instances))
- goto err_no_instances;
+ goto err_pci;
+ }
setup_pci_device();
@@ -3036,10 +2995,9 @@ static int __init amd64_edac_init(void)
amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
#endif
- return 0;
+ printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
-err_no_instances:
- pci_unregister_driver(&amd64_pci_driver);
+ return 0;
err_pci:
msrs_free(msrs);
@@ -3055,10 +3013,13 @@ err_ret:
static void __exit amd64_edac_exit(void)
{
+ int i;
+
if (pci_ctl)
edac_pci_release_generic_ctl(pci_ctl);
- pci_unregister_driver(&amd64_pci_driver);
+ for (i = 0; i < amd_nb_num(); i++)
+ remove_one_instance(i);
kfree(ecc_stngs);
ecc_stngs = NULL;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c0f248f3a..c08870479 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -422,7 +422,7 @@ struct low_ops {
struct amd64_family_type {
const char *ctl_name;
- u16 f1_id, f3_id;
+ u16 f1_id, f2_id;
struct low_ops ops;
};
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index ff51b51d2..c3ee3ad98 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -924,7 +924,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci,
mci->ue_mc += count;
if (!enable_per_layer_report) {
- mci->ce_noinfo_count += count;
+ mci->ue_noinfo_count += count;
return;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 26e65ab59..10c305b4a 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -998,11 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
+ struct bus_type *bus = mci->bus;
const char *name = mci->bus->name;
edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev);
- bus_unregister(mci->bus);
+ bus_unregister(bus);
kfree(name);
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 792bdae2b..8a68a5e94 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -271,16 +271,6 @@ struct i7core_pvt {
bool is_registered, enable_scrub;
- /* Fifo double buffers */
- struct mce mce_entry[MCE_LOG_LEN];
- struct mce mce_outentry[MCE_LOG_LEN];
-
- /* Fifo in/out counters */
- unsigned mce_in, mce_out;
-
- /* Count indicator to show errors not got */
- unsigned mce_overrun;
-
/* DCLK Frequency used for computing scrub rate */
int dclk_freq;
@@ -1792,56 +1782,15 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
* i7core_check_error Retrieve and process errors reported by the
* hardware. Called by the Core module.
*/
-static void i7core_check_error(struct mem_ctl_info *mci)
+static void i7core_check_error(struct mem_ctl_info *mci, struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- int i;
- unsigned count = 0;
- struct mce *m;
- /*
- * MCE first step: Copy all mce errors into a temporary buffer
- * We use a double buffering here, to reduce the risk of
- * losing an error.
- */
- smp_rmb();
- count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
- % MCE_LOG_LEN;
- if (!count)
- goto check_ce_error;
-
- m = pvt->mce_outentry;
- if (pvt->mce_in + count > MCE_LOG_LEN) {
- unsigned l = MCE_LOG_LEN - pvt->mce_in;
-
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
- smp_wmb();
- pvt->mce_in = 0;
- count -= l;
- m += l;
- }
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
- smp_wmb();
- pvt->mce_in += count;
-
- smp_rmb();
- if (pvt->mce_overrun) {
- i7core_printk(KERN_ERR, "Lost %d memory errors\n",
- pvt->mce_overrun);
- smp_wmb();
- pvt->mce_overrun = 0;
- }
-
- /*
- * MCE second step: parse errors and display
- */
- for (i = 0; i < count; i++)
- i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
+ i7core_mce_output_error(mci, m);
/*
* Now, let's increment CE error counts
*/
-check_ce_error:
if (!pvt->is_registered)
i7core_udimm_check_mc_ecc_err(mci);
else
@@ -1849,12 +1798,8 @@ check_ce_error:
}
/*
- * i7core_mce_check_error Replicates mcelog routine to get errors
- * This routine simply queues mcelog errors, and
- * return. The error itself should be handled later
- * by i7core_check_error.
- * WARNING: As this routine should be called at NMI time, extra care should
- * be taken to avoid deadlocks, and to be as fast as possible.
+ * Check that logging is enabled and that this is the right type
+ * of error for us to handle.
*/
static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
@@ -1882,21 +1827,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->bank != 8)
return NOTIFY_DONE;
- smp_rmb();
- if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
- smp_wmb();
- pvt->mce_overrun++;
- return NOTIFY_DONE;
- }
-
- /* Copy memory error at the ringbuffer */
- memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
- smp_wmb();
- pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
-
- /* Handle fatal errors immediately */
- if (mce->mcgstatus & 1)
- i7core_check_error(mci);
+ i7core_check_error(mci, mce);
/* Advise mcelog that the errors were handled */
return NOTIFY_STOP;
@@ -2243,8 +2174,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
get_dimm_config(mci);
/* record ptr to the generic device */
mci->pdev = &i7core_dev->pdev[0]->dev;
- /* Set the function pointer to an actual operation function */
- mci->edac_check = i7core_check_error;
/* Enable scrubrate setting */
if (pvt->enable_scrub)
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 18d77ace4..1c88d9707 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -17,6 +17,7 @@
* 015c: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
* 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
+ * 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
*
* Based on Intel specification:
* http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
@@ -55,6 +56,7 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918
#define IE31200_DIMMS 4
#define IE31200_RANKS 8
@@ -105,8 +107,11 @@
* 1 Multiple Bit Error Status (MERRSTS)
* 0 Correctable Error Status (CERRSTS)
*/
+
#define IE31200_C0ECCERRLOG 0x40c8
#define IE31200_C1ECCERRLOG 0x44c8
+#define IE31200_C0ECCERRLOG_SKL 0x4048
+#define IE31200_C1ECCERRLOG_SKL 0x4448
#define IE31200_ECCERRLOG_CE BIT(0)
#define IE31200_ECCERRLOG_UE BIT(1)
#define IE31200_ECCERRLOG_RANK_BITS GENMASK_ULL(28, 27)
@@ -123,17 +128,28 @@
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-#define IE31200_MAD_DIMM_0_OFFSET 0x5004
-#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
-#define IE31200_MAD_DIMM_A_RANK BIT(17)
-#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
-
-#define IE31200_PAGES(n) (n << (28 - PAGE_SHIFT))
+#define IE31200_MAD_DIMM_0_OFFSET 0x5004
+#define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C
+#define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0)
+#define IE31200_MAD_DIMM_A_RANK BIT(17)
+#define IE31200_MAD_DIMM_A_RANK_SHIFT 17
+#define IE31200_MAD_DIMM_A_RANK_SKL BIT(10)
+#define IE31200_MAD_DIMM_A_RANK_SKL_SHIFT 10
+#define IE31200_MAD_DIMM_A_WIDTH BIT(19)
+#define IE31200_MAD_DIMM_A_WIDTH_SHIFT 19
+#define IE31200_MAD_DIMM_A_WIDTH_SKL GENMASK_ULL(9, 8)
+#define IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT 8
+
+/* Skylake reports 1GB increments, everything else is 256MB */
+#define IE31200_PAGES(n, skl) \
+ (n << (28 + (2 * skl) - PAGE_SHIFT))
static int nr_channels;
struct ie31200_priv {
void __iomem *window;
+ void __iomem *c0errlog;
+ void __iomem *c1errlog;
};
enum ie31200_chips {
@@ -157,9 +173,9 @@ static const struct ie31200_dev_info ie31200_devs[] = {
};
struct dimm_data {
- u8 size; /* in 256MB multiples */
+ u8 size; /* in multiples of 256MB, except Skylake is 1GB */
u8 dual_rank : 1,
- x16_width : 1; /* 0 means x8 width */
+ x16_width : 2; /* 0 means x8 width */
};
static int how_many_channels(struct pci_dev *pdev)
@@ -197,11 +213,10 @@ static bool ecc_capable(struct pci_dev *pdev)
return true;
}
-static int eccerrlog_row(int channel, u64 log)
+static int eccerrlog_row(u64 log)
{
- int rank = ((log & IE31200_ECCERRLOG_RANK_BITS) >>
- IE31200_ECCERRLOG_RANK_SHIFT);
- return rank | (channel * IE31200_RANKS_PER_CHANNEL);
+ return ((log & IE31200_ECCERRLOG_RANK_BITS) >>
+ IE31200_ECCERRLOG_RANK_SHIFT);
}
static void ie31200_clear_error_info(struct mem_ctl_info *mci)
@@ -219,7 +234,6 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
{
struct pci_dev *pdev;
struct ie31200_priv *priv = mci->pvt_info;
- void __iomem *window = priv->window;
pdev = to_pci_dev(mci->pdev);
@@ -232,9 +246,9 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
if (!(info->errsts & IE31200_ERRSTS_BITS))
return;
- info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
if (nr_channels == 2)
- info->eccerrlog[1] = lo_hi_readq(window + IE31200_C1ECCERRLOG);
+ info->eccerrlog[1] = lo_hi_readq(priv->c1errlog);
pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts2);
@@ -245,10 +259,10 @@ static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
- info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+ info->eccerrlog[0] = lo_hi_readq(priv->c0errlog);
if (nr_channels == 2)
info->eccerrlog[1] =
- lo_hi_readq(window + IE31200_C1ECCERRLOG);
+ lo_hi_readq(priv->c1errlog);
}
ie31200_clear_error_info(mci);
@@ -274,14 +288,14 @@ static void ie31200_process_error_info(struct mem_ctl_info *mci,
if (log & IE31200_ECCERRLOG_UE) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
- eccerrlog_row(channel, log),
+ eccerrlog_row(log),
channel, -1,
"ie31200 UE", "");
} else if (log & IE31200_ECCERRLOG_CE) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0,
IE31200_ECCERRLOG_SYNDROME(log),
- eccerrlog_row(channel, log),
+ eccerrlog_row(log),
channel, -1,
"ie31200 CE", "");
}
@@ -326,6 +340,33 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
return window;
}
+static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
+ int chan)
+{
+ dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE;
+ dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0;
+ dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >>
+ (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4)));
+}
+
+static void __populate_dimm_info(struct dimm_data *dd, u32 addr_decode,
+ int chan)
+{
+ dd->size = (addr_decode >> (chan << 3)) & IE31200_MAD_DIMM_SIZE;
+ dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK << chan)) ? 1 : 0;
+ dd->x16_width = (addr_decode & (IE31200_MAD_DIMM_A_WIDTH << chan)) ? 1 : 0;
+}
+
+static void populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan,
+ bool skl)
+{
+ if (skl)
+ __skl_populate_dimm_info(dd, addr_decode, chan);
+ else
+ __populate_dimm_info(dd, addr_decode, chan);
+}
+
+
static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
{
int i, j, ret;
@@ -334,7 +375,8 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
void __iomem *window;
struct ie31200_priv *priv;
- u32 addr_decode;
+ u32 addr_decode, mad_offset;
+ bool skl = (pdev->device == PCI_DEVICE_ID_INTEL_IE31200_HB_8);
edac_dbg(0, "MC:\n");
@@ -363,7 +405,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
- mci->mtype_cap = MEM_FLAG_DDR3;
+ if (skl)
+ mci->mtype_cap = MEM_FLAG_DDR4;
+ else
+ mci->mtype_cap = MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
@@ -374,19 +419,24 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
+ if (skl) {
+ priv->c0errlog = window + IE31200_C0ECCERRLOG_SKL;
+ priv->c1errlog = window + IE31200_C1ECCERRLOG_SKL;
+ mad_offset = IE31200_MAD_DIMM_0_OFFSET_SKL;
+ } else {
+ priv->c0errlog = window + IE31200_C0ECCERRLOG;
+ priv->c1errlog = window + IE31200_C1ECCERRLOG;
+ mad_offset = IE31200_MAD_DIMM_0_OFFSET;
+ }
/* populate DIMM info */
for (i = 0; i < IE31200_CHANNELS; i++) {
- addr_decode = readl(window + IE31200_MAD_DIMM_0_OFFSET +
+ addr_decode = readl(window + mad_offset +
(i * 4));
edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
- dimm_info[i][j].size = (addr_decode >> (j * 8)) &
- IE31200_MAD_DIMM_SIZE;
- dimm_info[i][j].dual_rank = (addr_decode &
- (IE31200_MAD_DIMM_A_RANK << j)) ? 1 : 0;
- dimm_info[i][j].x16_width = (addr_decode &
- (IE31200_MAD_DIMM_A_WIDTH << j)) ? 1 : 0;
+ populate_dimm_info(&dimm_info[i][j], addr_decode, j,
+ skl);
edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
dimm_info[i][j].size,
dimm_info[i][j].dual_rank,
@@ -405,7 +455,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
struct dimm_info *dimm;
unsigned long nr_pages;
- nr_pages = IE31200_PAGES(dimm_info[j][i].size);
+ nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl);
if (nr_pages == 0)
continue;
@@ -417,7 +467,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* just a guess */
- dimm->mtype = MEM_DDR3;
+ if (skl)
+ dimm->mtype = MEM_DDR4;
+ else
+ dimm->mtype = MEM_DDR3;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
@@ -426,7 +479,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
dimm->nr_pages = nr_pages;
edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
dimm->grain = 8; /* same guess */
- dimm->mtype = MEM_DDR3;
+ if (skl)
+ dimm->mtype = MEM_DDR4;
+ else
+ dimm->mtype = MEM_DDR3;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
@@ -501,6 +557,9 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
IE31200},
{
+ PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ IE31200},
+ {
0,
} /* 0 terminated list. */
};
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 49768c08a..9b6800a79 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1052,7 +1052,6 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
struct mce *m = (struct mce *)data;
struct cpuinfo_x86 *c = &cpu_data(m->extcpu);
int ecc;
- u32 ebx = cpuid_ebx(0x80000007);
if (amd_filter_mce(m))
return NOTIFY_STOP;
@@ -1075,7 +1074,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
- if (!!(ebx & BIT(3))) {
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
u32 low, high;
u32 addr = MSR_AMD64_SMCA_MCx_CONFIG(m->bank);
@@ -1094,7 +1093,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
if (m->status & MCI_STATUS_ADDRV)
pr_emerg(HW_ERR "MC%d Error Address: 0x%016llx\n", m->bank, m->addr);
- if (!!(ebx & BIT(3))) {
+ if (boot_cpu_has(X86_FEATURE_SMCA)) {
decode_smca_errors(m);
goto err_code;
}
@@ -1149,7 +1148,6 @@ static struct notifier_block amd_mce_dec_nb = {
static int __init mce_amd_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- u32 ebx;
if (c->x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
@@ -1205,9 +1203,8 @@ static int __init mce_amd_init(void)
break;
case 0x17:
- ebx = cpuid_ebx(0x80000007);
xec_mask = 0x3f;
- if (!(ebx & BIT(3))) {
+ if (!boot_cpu_has(X86_FEATURE_SMCA)) {
printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
goto err_out;
}
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index b274fa2ff..4fb2eb7c8 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -21,6 +21,8 @@
#include <linux/smp.h>
#include <linux/bitmap.h>
#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <asm/cpu_device_id.h>
#include <asm/processor.h>
#include <asm/mce.h>
@@ -28,8 +30,6 @@
/* Static vars */
static LIST_HEAD(sbridge_edac_list);
-static DEFINE_MUTEX(sbridge_edac_lock);
-static int probed;
/*
* Alter this version for the module when modifications are made
@@ -329,6 +329,7 @@ struct pci_id_descr {
struct pci_id_table {
const struct pci_id_descr *descr;
int n_devs;
+ enum type type;
};
struct sbridge_dev {
@@ -367,16 +368,6 @@ struct sbridge_pvt {
bool is_mirrored, is_lockstep, is_close_pg;
bool is_chan_hash;
- /* Fifo double buffers */
- struct mce mce_entry[MCE_LOG_LEN];
- struct mce mce_outentry[MCE_LOG_LEN];
-
- /* Fifo in/out counters */
- unsigned mce_in, mce_out;
-
- /* Count indicator to show errors not got */
- unsigned mce_overrun;
-
/* Memory description */
u64 tolm, tohm;
struct knl_pvt knl;
@@ -407,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = {
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
};
-#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
+#define PCI_ID_TABLE_ENTRY(A, T) { \
+ .descr = A, \
+ .n_devs = ARRAY_SIZE(A), \
+ .type = T \
+}
+
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
{0,} /* 0 terminated list. */
};
@@ -476,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = {
};
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
{0,} /* 0 terminated list. */
};
@@ -549,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = {
};
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
{0,} /* 0 terminated list. */
};
@@ -593,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = {
};
static const struct pci_id_table pci_dev_descr_knl_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_knl),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
{0,}
};
@@ -661,19 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = {
};
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell),
- {0,} /* 0 terminated list. */
-};
-
-/*
- * pci_device_id table for which devices we are looking for
- */
-static const struct pci_device_id sbridge_pci_tbl[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0)},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0)},
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
{0,} /* 0 terminated list. */
};
@@ -2394,22 +2378,19 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
* @num_mc: pointer to the memory controllers count, to be incremented in case
* of success.
* @table: model specific table
- * @allow_dups: allow for multiple devices to exist with the same device id
- * (as implemented, this isn't expected to work correctly in the
- * multi-socket case).
- * @multi_bus: don't assume devices on different buses belong to different
- * memory controllers.
*
* returns 0 in case of success or error code
*/
-static int sbridge_get_all_devices_full(u8 *num_mc,
- const struct pci_id_table *table,
- int allow_dups,
- int multi_bus)
+static int sbridge_get_all_devices(u8 *num_mc,
+ const struct pci_id_table *table)
{
int i, rc;
struct pci_dev *pdev = NULL;
+ int allow_dups = 0;
+ int multi_bus = 0;
+ if (table->type == KNIGHTS_LANDING)
+ allow_dups = multi_bus = 1;
while (table && table->descr) {
for (i = 0; i < table->n_devs; i++) {
if (!allow_dups || i == 0 ||
@@ -2436,11 +2417,6 @@ static int sbridge_get_all_devices_full(u8 *num_mc,
return 0;
}
-#define sbridge_get_all_devices(num_mc, table) \
- sbridge_get_all_devices_full(num_mc, table, 0, 0)
-#define sbridge_get_all_devices_knl(num_mc, table) \
- sbridge_get_all_devices_full(num_mc, table, 1, 1)
-
static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
@@ -3100,63 +3076,8 @@ err_parsing:
}
/*
- * sbridge_check_error Retrieve and process errors reported by the
- * hardware. Called by the Core module.
- */
-static void sbridge_check_error(struct mem_ctl_info *mci)
-{
- struct sbridge_pvt *pvt = mci->pvt_info;
- int i;
- unsigned count = 0;
- struct mce *m;
-
- /*
- * MCE first step: Copy all mce errors into a temporary buffer
- * We use a double buffering here, to reduce the risk of
- * loosing an error.
- */
- smp_rmb();
- count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
- % MCE_LOG_LEN;
- if (!count)
- return;
-
- m = pvt->mce_outentry;
- if (pvt->mce_in + count > MCE_LOG_LEN) {
- unsigned l = MCE_LOG_LEN - pvt->mce_in;
-
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
- smp_wmb();
- pvt->mce_in = 0;
- count -= l;
- m += l;
- }
- memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
- smp_wmb();
- pvt->mce_in += count;
-
- smp_rmb();
- if (pvt->mce_overrun) {
- sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
- pvt->mce_overrun);
- smp_wmb();
- pvt->mce_overrun = 0;
- }
-
- /*
- * MCE second step: parse errors and display
- */
- for (i = 0; i < count; i++)
- sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
-}
-
-/*
- * sbridge_mce_check_error Replicates mcelog routine to get errors
- * This routine simply queues mcelog errors, and
- * return. The error itself should be handled later
- * by sbridge_check_error.
- * WARNING: As this routine should be called at NMI time, extra care should
- * be taken to avoid deadlocks, and to be as fast as possible.
+ * Check that logging is enabled and that this is the right type
+ * of error for us to handle.
*/
static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
void *data)
@@ -3201,21 +3122,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
"%u APIC %x\n", mce->cpuvendor, mce->cpuid,
mce->time, mce->socketid, mce->apicid);
- smp_rmb();
- if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
- smp_wmb();
- pvt->mce_overrun++;
- return NOTIFY_DONE;
- }
-
- /* Copy memory error at the ringbuffer */
- memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
- smp_wmb();
- pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
-
- /* Handle fatal errors immediately */
- if (mce->mcgstatus & 1)
- sbridge_check_error(mci);
+ sbridge_mce_output_error(mci, mce);
/* Advice mcelog that the error were handled */
return NOTIFY_STOP;
@@ -3301,9 +3208,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
- /* Set the function pointer to an actual operation function */
- mci->edac_check = sbridge_check_error;
-
pvt->info.type = type;
switch (type) {
case IVY_BRIDGE:
@@ -3451,62 +3355,40 @@ fail0:
return rc;
}
+#define ICPU(model, table) \
+ { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
+
+static const struct x86_cpu_id sbridge_cpuids[] = {
+ ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */
+ ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */
+ ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */
+ ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */
+ ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */
+ ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */
+ { }
+};
+MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
+
/*
- * sbridge_probe Probe for ONE instance of device to see if it is
+ * sbridge_probe Get all devices and register memory controllers
* present.
* return:
* 0 for FOUND a device
* < 0 for error code
*/
-static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int sbridge_probe(const struct x86_cpu_id *id)
{
int rc = -ENODEV;
u8 mc, num_mc = 0;
struct sbridge_dev *sbridge_dev;
- enum type type = SANDY_BRIDGE;
+ struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
/* get the pci devices we want to reserve for our use */
- mutex_lock(&sbridge_edac_lock);
-
- /*
- * All memory controllers are allocated at the first pass.
- */
- if (unlikely(probed >= 1)) {
- mutex_unlock(&sbridge_edac_lock);
- return -ENODEV;
- }
- probed++;
+ rc = sbridge_get_all_devices(&num_mc, ptable);
- switch (pdev->device) {
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
- rc = sbridge_get_all_devices(&num_mc,
- pci_dev_descr_ibridge_table);
- type = IVY_BRIDGE;
- break;
- case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
- rc = sbridge_get_all_devices(&num_mc,
- pci_dev_descr_sbridge_table);
- type = SANDY_BRIDGE;
- break;
- case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
- rc = sbridge_get_all_devices(&num_mc,
- pci_dev_descr_haswell_table);
- type = HASWELL;
- break;
- case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
- rc = sbridge_get_all_devices(&num_mc,
- pci_dev_descr_broadwell_table);
- type = BROADWELL;
- break;
- case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
- rc = sbridge_get_all_devices_knl(&num_mc,
- pci_dev_descr_knl_table);
- type = KNIGHTS_LANDING;
- break;
- }
if (unlikely(rc < 0)) {
- edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
+ edac_dbg(0, "couldn't get all devices\n");
goto fail0;
}
@@ -3517,14 +3399,13 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mc, mc + 1, num_mc);
sbridge_dev->mc = mc++;
- rc = sbridge_register_mci(sbridge_dev, type);
+ rc = sbridge_register_mci(sbridge_dev, ptable->type);
if (unlikely(rc < 0))
goto fail1;
}
sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
- mutex_unlock(&sbridge_edac_lock);
return 0;
fail1:
@@ -3533,74 +3414,47 @@ fail1:
sbridge_put_all_devices();
fail0:
- mutex_unlock(&sbridge_edac_lock);
return rc;
}
/*
- * sbridge_remove destructor for one instance of device
+ * sbridge_remove cleanup
*
*/
-static void sbridge_remove(struct pci_dev *pdev)
+static void sbridge_remove(void)
{
struct sbridge_dev *sbridge_dev;
edac_dbg(0, "\n");
- /*
- * we have a trouble here: pdev value for removal will be wrong, since
- * it will point to the X58 register used to detect that the machine
- * is a Nehalem or upper design. However, due to the way several PCI
- * devices are grouped together to provide MC functionality, we need
- * to use a different method for releasing the devices
- */
-
- mutex_lock(&sbridge_edac_lock);
-
- if (unlikely(!probed)) {
- mutex_unlock(&sbridge_edac_lock);
- return;
- }
-
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
sbridge_unregister_mci(sbridge_dev);
/* Release PCI resources */
sbridge_put_all_devices();
-
- probed--;
-
- mutex_unlock(&sbridge_edac_lock);
}
-MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
-
-/*
- * sbridge_driver pci_driver structure for this module
- *
- */
-static struct pci_driver sbridge_driver = {
- .name = "sbridge_edac",
- .probe = sbridge_probe,
- .remove = sbridge_remove,
- .id_table = sbridge_pci_tbl,
-};
-
/*
* sbridge_init Module entry function
* Try to initialize this module for its devices
*/
static int __init sbridge_init(void)
{
- int pci_rc;
+ const struct x86_cpu_id *id;
+ int rc;
edac_dbg(2, "\n");
+ id = x86_match_cpu(sbridge_cpuids);
+ if (!id)
+ return -ENODEV;
+
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
- pci_rc = pci_register_driver(&sbridge_driver);
- if (pci_rc >= 0) {
+ rc = sbridge_probe(id);
+
+ if (rc >= 0) {
mce_register_decode_chain(&sbridge_mce_dec);
if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
@@ -3608,9 +3462,9 @@ static int __init sbridge_init(void)
}
sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
- pci_rc);
+ rc);
- return pci_rc;
+ return rc;
}
/*
@@ -3620,7 +3474,7 @@ static int __init sbridge_init(void)
static void __exit sbridge_exit(void)
{
edac_dbg(2, "\n");
- pci_unregister_driver(&sbridge_driver);
+ sbridge_remove();
mce_unregister_decode_chain(&sbridge_mce_dec);
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index f4ea80d60..309311b1f 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1023,7 +1023,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
spin_unlock_irqrestore(&dev->lock, flags);
- dev->netdev->trans_start = jiffies;
+ netif_trans_update(dev->netdev);
out:
if (free)
fwnet_free_ptask(ptask);
diff --git a/drivers/firmware/broadcom/Kconfig b/drivers/firmware/broadcom/Kconfig
index 6bed11993..3c7e5b741 100644
--- a/drivers/firmware/broadcom/Kconfig
+++ b/drivers/firmware/broadcom/Kconfig
@@ -9,3 +9,14 @@ config BCM47XX_NVRAM
This driver provides an easy way to get value of requested parameter.
It simply reads content of NVRAM and parses it. It doesn't control any
hardware part itself.
+
+config BCM47XX_SPROM
+ bool "Broadcom SPROM driver"
+ depends on BCM47XX_NVRAM
+ help
+ Broadcom devices store configuration data in SPROM. Accessing it is
+ specific to the bus host type, e.g. PCI(e) devices have it mapped in
+ a PCI BAR.
+ In case of SoC devices SPROM content is stored on a flash used by
+ bootloader firmware CFE. This driver provides method to ssb and bcma
+ drivers to read SPROM on SoC.
diff --git a/drivers/firmware/broadcom/Makefile b/drivers/firmware/broadcom/Makefile
index d0e683583..f93efc479 100644
--- a/drivers/firmware/broadcom/Makefile
+++ b/drivers/firmware/broadcom/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_BCM47XX_NVRAM) += bcm47xx_nvram.o
+obj-$(CONFIG_BCM47XX_SPROM) += bcm47xx_sprom.o
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
new file mode 100644
index 000000000..b6eb875d4
--- /dev/null
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -0,0 +1,737 @@
+/*
+ * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
+ * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2006 Michael Buesch <m@bues.ch>
+ * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/bcm47xx_nvram.h>
+#include <linux/bcma/bcma.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/ssb/ssb.h>
+
+static void create_key(const char *prefix, const char *postfix,
+ const char *name, char *buf, int len)
+{
+ if (prefix && postfix)
+ snprintf(buf, len, "%s%s%s", prefix, name, postfix);
+ else if (prefix)
+ snprintf(buf, len, "%s%s", prefix, name);
+ else if (postfix)
+ snprintf(buf, len, "%s%s", name, postfix);
+ else
+ snprintf(buf, len, "%s", name);
+}
+
+static int get_nvram_var(const char *prefix, const char *postfix,
+ const char *name, char *buf, int len, bool fallback)
+{
+ char key[40];
+ int err;
+
+ create_key(prefix, postfix, name, key, sizeof(key));
+
+ err = bcm47xx_nvram_getenv(key, buf, len);
+ if (fallback && err == -ENOENT && prefix) {
+ create_key(NULL, postfix, name, key, sizeof(key));
+ err = bcm47xx_nvram_getenv(key, buf, len);
+ }
+ return err;
+}
+
+#define NVRAM_READ_VAL(type) \
+static void nvram_read_ ## type(const char *prefix, \
+ const char *postfix, const char *name, \
+ type *val, type allset, bool fallback) \
+{ \
+ char buf[100]; \
+ int err; \
+ type var; \
+ \
+ err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \
+ fallback); \
+ if (err < 0) \
+ return; \
+ err = kstrto ## type(strim(buf), 0, &var); \
+ if (err) { \
+ pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
+ prefix, name, postfix, buf, err); \
+ return; \
+ } \
+ if (allset && var == allset) \
+ return; \
+ *val = var; \
+}
+
+NVRAM_READ_VAL(u8)
+NVRAM_READ_VAL(s8)
+NVRAM_READ_VAL(u16)
+NVRAM_READ_VAL(u32)
+
+#undef NVRAM_READ_VAL
+
+static void nvram_read_u32_2(const char *prefix, const char *name,
+ u16 *val_lo, u16 *val_hi, bool fallback)
+{
+ char buf[100];
+ int err;
+ u32 val;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ err = kstrtou32(strim(buf), 0, &val);
+ if (err) {
+ pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+ prefix, name, buf, err);
+ return;
+ }
+ *val_lo = (val & 0x0000FFFFU);
+ *val_hi = (val & 0xFFFF0000U) >> 16;
+}
+
+static void nvram_read_leddc(const char *prefix, const char *name,
+ u8 *leddc_on_time, u8 *leddc_off_time,
+ bool fallback)
+{
+ char buf[100];
+ int err;
+ u32 val;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ err = kstrtou32(strim(buf), 0, &val);
+ if (err) {
+ pr_warn("can not parse nvram name %s%s with value %s got %i\n",
+ prefix, name, buf, err);
+ return;
+ }
+
+ if (val == 0xffff || val == 0xffffffff)
+ return;
+
+ *leddc_on_time = val & 0xff;
+ *leddc_off_time = (val >> 16) & 0xff;
+}
+
+static void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
+{
+ if (strchr(buf, ':'))
+ sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
+ &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
+ &macaddr[5]);
+ else if (strchr(buf, '-'))
+ sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
+ &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
+ &macaddr[5]);
+ else
+ pr_warn("Can not parse mac address: %s\n", buf);
+}
+
+static void nvram_read_macaddr(const char *prefix, const char *name,
+ u8 val[6], bool fallback)
+{
+ char buf[100];
+ int err;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+
+ bcm47xx_nvram_parse_macaddr(buf, val);
+}
+
+static void nvram_read_alpha2(const char *prefix, const char *name,
+ char val[2], bool fallback)
+{
+ char buf[10];
+ int err;
+
+ err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
+ if (err < 0)
+ return;
+ if (buf[0] == '0')
+ return;
+ if (strlen(buf) > 2) {
+ pr_warn("alpha2 is too long %s\n", buf);
+ return;
+ }
+ memcpy(val, buf, 2);
+}
+
+/* This is one-function-only macro, it uses local "sprom" variable! */
+#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
+ if (_revmask & BIT(sprom->revision)) \
+ nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
+ _allset, _fallback)
+/*
+ * Special version of filling function that can be safely called for any SPROM
+ * revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
+ * for which the mapping is valid.
+ * It obviously requires some hexadecimal/bitmasks knowledge, but allows
+ * writing cleaner code (easy revisions handling).
+ * Note that while SPROM revision 0 was never used, we still keep BIT(0)
+ * reserved for it, just to keep numbering sane.
+ */
+static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ const char *pre = prefix;
+ bool fb = fallback;
+
+ /* Broadcom extracts it for rev 8+ but it was found on 2 and 4 too */
+ ENTRY(0xfffffffe, u16, pre, "devid", dev_id, 0, fallback);
+
+ ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
+ ENTRY(0xfffffffe, u32, pre, "boardflags", boardflags, 0, fb);
+ ENTRY(0xfffffff0, u32, pre, "boardflags2", boardflags2, 0, fb);
+ ENTRY(0xfffff800, u32, pre, "boardflags3", boardflags3, 0, fb);
+ ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
+ ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
+ ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
+ ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
+ ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
+
+ ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
+ ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
+
+ ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
+
+ ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
+ ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
+ ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
+ ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
+ ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
+ ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
+ ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
+
+ ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
+ ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
+ ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
+ ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
+ ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
+ ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
+
+ ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
+ ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
+ ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
+ ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
+ ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
+ ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
+ ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
+ ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
+ ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
+ ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
+ ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
+ ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
+ ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
+ ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
+ ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
+ ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
+
+ ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
+ ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
+ ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
+ ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
+ ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
+ ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
+ ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
+ ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
+
+ ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
+ ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
+ ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
+ ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
+
+ ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
+ ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
+ ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
+ ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
+ ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
+ ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
+ ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
+
+ /* TODO: rev 11 support */
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
+
+ ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
+ ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
+
+ /* TODO: rev 11 support */
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
+ ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
+}
+#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+
+static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ char postfix[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+ struct ssb_sprom_core_pwr_info *pwr_info;
+
+ pwr_info = &sprom->core_pwr_info[i];
+
+ snprintf(postfix, sizeof(postfix), "%i", i);
+ nvram_read_u8(prefix, postfix, "maxp2ga",
+ &pwr_info->maxpwr_2g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "itt2ga",
+ &pwr_info->itssi_2g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "itt5ga",
+ &pwr_info->itssi_5g, 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw0a",
+ &pwr_info->pa_2g[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw1a",
+ &pwr_info->pa_2g[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa2gw2a",
+ &pwr_info->pa_2g[2], 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5ga",
+ &pwr_info->maxpwr_5g, 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5gha",
+ &pwr_info->maxpwr_5gh, 0, fallback);
+ nvram_read_u8(prefix, postfix, "maxp5gla",
+ &pwr_info->maxpwr_5gl, 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw0a",
+ &pwr_info->pa_5g[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw1a",
+ &pwr_info->pa_5g[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw2a",
+ &pwr_info->pa_5g[2], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw0a",
+ &pwr_info->pa_5gl[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw1a",
+ &pwr_info->pa_5gl[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw2a",
+ &pwr_info->pa_5gl[2], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw0a",
+ &pwr_info->pa_5gh[0], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw1a",
+ &pwr_info->pa_5gh[1], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw2a",
+ &pwr_info->pa_5gh[2], 0, fallback);
+ }
+}
+
+static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ char postfix[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
+ struct ssb_sprom_core_pwr_info *pwr_info;
+
+ pwr_info = &sprom->core_pwr_info[i];
+
+ snprintf(postfix, sizeof(postfix), "%i", i);
+ nvram_read_u16(prefix, postfix, "pa2gw3a",
+ &pwr_info->pa_2g[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5gw3a",
+ &pwr_info->pa_5g[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5glw3a",
+ &pwr_info->pa_5gl[3], 0, fallback);
+ nvram_read_u16(prefix, postfix, "pa5ghw3a",
+ &pwr_info->pa_5gh[3], 0, fallback);
+ }
+}
+
+static bool bcm47xx_is_valid_mac(u8 *mac)
+{
+ return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c);
+}
+
+static int bcm47xx_increase_mac_addr(u8 *mac, u8 num)
+{
+ u8 *oui = mac + ETH_ALEN/2 - 1;
+ u8 *p = mac + ETH_ALEN - 1;
+
+ do {
+ (*p) += num;
+ if (*p > num)
+ break;
+ p--;
+ num = 1;
+ } while (p != oui);
+
+ if (p == oui) {
+ pr_err("unable to fetch mac address\n");
+ return -ENOENT;
+ }
+ return 0;
+}
+
+static int mac_addr_used = 2;
+
+static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
+ const char *prefix, bool fallback)
+{
+ bool fb = fallback;
+
+ nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
+ nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
+ fallback);
+
+ nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
+ nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
+ fallback);
+ nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
+ fallback);
+
+ nvram_read_macaddr(prefix, "et2macaddr", sprom->et2mac, fb);
+ nvram_read_u8(prefix, NULL, "et2mdcport", &sprom->et2mdcport, 0, fb);
+ nvram_read_u8(prefix, NULL, "et2phyaddr", &sprom->et2phyaddr, 0, fb);
+
+ nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
+ nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
+
+ /* The address prefix 00:90:4C is used by Broadcom in their initial
+ * configuration. When a mac address with the prefix 00:90:4C is used
+ * all devices from the same series are sharing the same mac address.
+ * To prevent mac address collisions we replace them with a mac address
+ * based on the base address.
+ */
+ if (!bcm47xx_is_valid_mac(sprom->il0mac)) {
+ u8 mac[6];
+
+ nvram_read_macaddr(NULL, "et0macaddr", mac, false);
+ if (bcm47xx_is_valid_mac(mac)) {
+ int err = bcm47xx_increase_mac_addr(mac, mac_addr_used);
+
+ if (!err) {
+ ether_addr_copy(sprom->il0mac, mac);
+ mac_addr_used++;
+ }
+ }
+ }
+}
+
+static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
+{
+ nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
+ &sprom->boardflags_hi, fallback);
+ nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
+ &sprom->boardflags2_hi, fallback);
+}
+
+void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
+ bool fallback)
+{
+ bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
+ bcm47xx_fill_board_data(sprom, prefix, fallback);
+
+ nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
+
+ /* Entries requiring custom functions */
+ nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
+ if (sprom->revision >= 3)
+ nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
+ &sprom->leddc_off_time, fallback);
+
+ switch (sprom->revision) {
+ case 4:
+ case 5:
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+ bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
+ break;
+ case 8:
+ case 9:
+ bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
+ break;
+ }
+
+ bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
+}
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
+{
+ char prefix[10];
+
+ switch (bus->bustype) {
+ case SSB_BUSTYPE_SSB:
+ bcm47xx_fill_sprom(out, NULL, false);
+ return 0;
+ case SSB_BUSTYPE_PCI:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ bcm47xx_fill_sprom(out, prefix, false);
+ return 0;
+ default:
+ pr_warn("Unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+}
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+/*
+ * Having many NVRAM entries for PCI devices led to repeating prefixes like
+ * pci/1/1/ all the time and wasting flash space. So at some point Broadcom
+ * decided to introduce prefixes like 0: 1: 2: etc.
+ * If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
+ * instead of pci/2/1/.
+ */
+static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
+{
+ size_t prefix_len = strlen(prefix);
+ size_t short_len = prefix_len - 1;
+ char nvram_var[10];
+ char buf[20];
+ int i;
+
+ /* Passed prefix has to end with a slash */
+ if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
+ return;
+
+ for (i = 0; i < 3; i++) {
+ if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
+ continue;
+ if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
+ continue;
+ if (!strcmp(buf, prefix) ||
+ (short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
+ snprintf(prefix, prefix_size, "%d:", i);
+ return;
+ }
+ }
+}
+
+static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
+{
+ struct bcma_boardinfo *binfo = &bus->boardinfo;
+ struct bcma_device *core;
+ char buf[10];
+ char *prefix;
+ bool fallback = false;
+
+ switch (bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ /* On BCM47XX all PCI buses share the same domain */
+ if (config_enabled(CONFIG_BCM47XX))
+ snprintf(buf, sizeof(buf), "pci/%u/%u/",
+ bus->host_pci->bus->number + 1,
+ PCI_SLOT(bus->host_pci->devfn));
+ else
+ snprintf(buf, sizeof(buf), "pci/%u/%u/",
+ pci_domain_nr(bus->host_pci->bus) + 1,
+ bus->host_pci->bus->number);
+ bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
+ prefix = buf;
+ break;
+ case BCMA_HOSTTYPE_SOC:
+ memset(out, 0, sizeof(struct ssb_sprom));
+ core = bcma_find_core(bus, BCMA_CORE_80211);
+ if (core) {
+ snprintf(buf, sizeof(buf), "sb/%u/",
+ core->core_index);
+ prefix = buf;
+ fallback = true;
+ } else {
+ prefix = NULL;
+ }
+ break;
+ default:
+ pr_warn("Unable to fill SPROM for given bustype.\n");
+ return -EINVAL;
+ }
+
+ nvram_read_u16(prefix, NULL, "boardvendor", &binfo->vendor, 0, true);
+ if (!binfo->vendor)
+ binfo->vendor = SSB_BOARDVENDOR_BCM;
+ nvram_read_u16(prefix, NULL, "boardtype", &binfo->type, 0, true);
+
+ bcm47xx_fill_sprom(out, prefix, fallback);
+
+ return 0;
+}
+#endif
+
+static unsigned int bcm47xx_sprom_registered;
+
+/*
+ * On bcm47xx we need to register SPROM fallback handler very early, so we can't
+ * use anything like platform device / driver for this.
+ */
+int bcm47xx_sprom_register_fallbacks(void)
+{
+ if (bcm47xx_sprom_registered)
+ return 0;
+
+#if IS_BUILTIN(CONFIG_SSB) && IS_ENABLED(CONFIG_SSB_SPROM)
+ if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
+ pr_warn("Failed to register ssb SPROM handler\n");
+#endif
+
+#if IS_BUILTIN(CONFIG_BCMA)
+ if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
+ pr_warn("Failed to register bcma SPROM handler\n");
+#endif
+
+ bcm47xx_sprom_registered = 1;
+
+ return 0;
+}
+
+fs_initcall(bcm47xx_sprom_register_fallbacks);
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index e1670d533..6394152f6 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -87,6 +87,31 @@ config EFI_RUNTIME_WRAPPERS
config EFI_ARMSTUB
bool
+config EFI_BOOTLOADER_CONTROL
+ tristate "EFI Bootloader Control"
+ depends on EFI_VARS
+ default n
+ ---help---
+ This module installs a reboot hook, such that if reboot() is
+ invoked with a string argument NNN, "NNN" is copied to the
+ "LoaderEntryOneShot" EFI variable, to be read by the
+ bootloader. If the string matches one of the boot labels
+ defined in its configuration, the bootloader will boot once
+ to that label. The "LoaderEntryRebootReason" EFI variable is
+ set with the reboot reason: "reboot" or "shutdown". The
+ bootloader reads this reboot reason and takes particular
+ action according to its policy.
+
+config EFI_CAPSULE_LOADER
+ tristate "EFI capsule loader"
+ depends on EFI
+ help
+ This option exposes a loader interface "/dev/efi_capsule_loader" for
+ users to load EFI capsules. This driver requires working runtime
+ capsule support in the firmware, which many OEMs do not provide.
+
+ Most users should say N.
+
endmenu
config UEFI_CPER
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 62e654f25..a219640f8 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -9,7 +9,8 @@
#
KASAN_SANITIZE_runtime-wrappers.o := n
-obj-$(CONFIG_EFI) += efi.o vars.o reboot.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI) += capsule.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
@@ -18,7 +19,9 @@ obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o
obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
obj-$(CONFIG_EFI_STUB) += libstub/
obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
+obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)
+obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 8714f8c27..c49d50e68 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -11,17 +11,19 @@
*
*/
+#define pr_fmt(fmt) "efi: " fmt
+
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
#include <asm/efi.h>
-struct efi_memory_map memmap;
-
u64 efi_system_table;
static int __init is_normal_ram(efi_memory_desc_t *md)
@@ -40,7 +42,7 @@ static phys_addr_t efi_to_phys(unsigned long addr)
{
efi_memory_desc_t *md;
- for_each_efi_memory_desc(&memmap, md) {
+ for_each_efi_memory_desc(md) {
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
if (md->virt_addr == 0)
@@ -53,6 +55,36 @@ static phys_addr_t efi_to_phys(unsigned long addr)
return addr;
}
+static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+static __initdata efi_config_table_type_t arch_tables[] = {
+ {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table},
+ {NULL_GUID, NULL, NULL}
+};
+
+static void __init init_screen_info(void)
+{
+ struct screen_info *si;
+
+ if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+ si = early_memremap_ro(screen_info_table, sizeof(*si));
+ if (!si) {
+ pr_err("Could not map screen_info config table\n");
+ return;
+ }
+ screen_info = *si;
+ early_memunmap(si, sizeof(*si));
+
+ /* dummycon on ARM needs non-zero values for columns/lines */
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_lines = 25;
+ }
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+ memblock_is_map_memory(screen_info.lfb_base))
+ memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
+}
+
static int __init uefi_init(void)
{
efi_char16_t *c16;
@@ -85,6 +117,8 @@ static int __init uefi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff);
+ efi.runtime_version = efi.systab->hdr.revision;
+
/* Show what we know for posterity */
c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
sizeof(vendor) * sizeof(efi_char16_t));
@@ -108,7 +142,8 @@ static int __init uefi_init(void)
goto out;
}
retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
- sizeof(efi_config_table_t), NULL);
+ sizeof(efi_config_table_t),
+ arch_tables);
early_memunmap(config_tables, table_size);
out:
@@ -139,20 +174,31 @@ static __init void reserve_regions(void)
{
efi_memory_desc_t *md;
u64 paddr, npages, size;
+ int resv;
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI memory map:\n");
- for_each_efi_memory_desc(&memmap, md) {
+ /*
+ * Discard memblocks discovered so far: if there are any at this
+ * point, they originate from memory nodes in the DT, and UEFI
+ * uses its own memory map instead.
+ */
+ memblock_dump_all();
+ memblock_remove(0, (phys_addr_t)ULLONG_MAX);
+
+ for_each_efi_memory_desc(md) {
paddr = md->phys_addr;
npages = md->num_pages;
+ resv = is_reserve_region(md);
if (efi_enabled(EFI_DBG)) {
char buf[64];
- pr_info(" 0x%012llx-0x%012llx %s",
+ pr_info(" 0x%012llx-0x%012llx %s%s\n",
paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
- efi_md_typeattr_format(buf, sizeof(buf), md));
+ efi_md_typeattr_format(buf, sizeof(buf), md),
+ resv ? "*" : "");
}
memrange_efi_to_native(&paddr, &npages);
@@ -161,14 +207,9 @@ static __init void reserve_regions(void)
if (is_normal_ram(md))
early_init_dt_add_memory_arch(paddr, size);
- if (is_reserve_region(md)) {
+ if (resv)
memblock_mark_nomap(paddr, size);
- if (efi_enabled(EFI_DBG))
- pr_cont("*");
- }
- if (efi_enabled(EFI_DBG))
- pr_cont("\n");
}
set_bit(EFI_MEMMAP, &efi.flags);
@@ -184,9 +225,9 @@ void __init efi_init(void)
efi_system_table = params.system_table;
- memmap.phys_map = params.mmap;
- memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
- if (memmap.map == NULL) {
+ efi.memmap.phys_map = params.mmap;
+ efi.memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
+ if (efi.memmap.map == NULL) {
/*
* If we are booting via UEFI, the UEFI memory map is the only
* description of memory we have, so there is little point in
@@ -194,28 +235,37 @@ void __init efi_init(void)
*/
panic("Unable to map EFI memory map.\n");
}
- memmap.map_end = memmap.map + params.mmap_size;
- memmap.desc_size = params.desc_size;
- memmap.desc_version = params.desc_ver;
+ efi.memmap.map_end = efi.memmap.map + params.mmap_size;
+ efi.memmap.desc_size = params.desc_size;
+ efi.memmap.desc_version = params.desc_ver;
+
+ WARN(efi.memmap.desc_version != 1,
+ "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+ efi.memmap.desc_version);
if (uefi_init() < 0)
return;
reserve_regions();
- early_memunmap(memmap.map, params.mmap_size);
+ efi_memattr_init();
+ early_memunmap(efi.memmap.map, params.mmap_size);
- if (IS_ENABLED(CONFIG_ARM)) {
- /*
- * ARM currently does not allow ioremap_cache() to be called on
- * memory regions that are covered by struct page. So remove the
- * UEFI memory map from the linear mapping.
- */
- memblock_mark_nomap(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
- } else {
- memblock_reserve(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
- }
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+
+ init_screen_info();
+}
+
+static int __init register_gop_device(void)
+{
+ void *pd;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return 0;
+
+ pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
+ &screen_info, sizeof(screen_info));
+ return PTR_ERR_OR_ZERO(pd);
}
+subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 6ae21e41a..17ccf0a87 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -42,11 +42,13 @@ static struct mm_struct efi_mm = {
static bool __init efi_virtmap_init(void)
{
efi_memory_desc_t *md;
+ bool systab_found;
efi_mm.pgd = pgd_alloc(&efi_mm);
init_new_context(NULL, &efi_mm);
- for_each_efi_memory_desc(&memmap, md) {
+ systab_found = false;
+ for_each_efi_memory_desc(md) {
phys_addr_t phys = md->phys_addr;
int ret;
@@ -64,7 +66,25 @@ static bool __init efi_virtmap_init(void)
&phys, ret);
return false;
}
+ /*
+ * If this entry covers the address of the UEFI system table,
+ * calculate and record its virtual address.
+ */
+ if (efi_system_table >= phys &&
+ efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
+ efi.systab = (void *)(unsigned long)(efi_system_table -
+ phys + md->virt_addr);
+ systab_found = true;
+ }
+ }
+ if (!systab_found) {
+ pr_err("No virtual mapping found for the UEFI System Table\n");
+ return false;
}
+
+ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+ return false;
+
return true;
}
@@ -89,26 +109,17 @@ static int __init arm_enable_runtime_services(void)
pr_info("Remapping and enabling EFI services.\n");
- mapsize = memmap.map_end - memmap.map;
- memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
- mapsize);
- if (!memmap.map) {
- pr_err("Failed to remap EFI memory map\n");
- return -ENOMEM;
- }
- memmap.map_end = memmap.map + mapsize;
- efi.memmap = &memmap;
+ mapsize = efi.memmap.map_end - efi.memmap.map;
- efi.systab = (__force void *)ioremap_cache(efi_system_table,
- sizeof(efi_system_table_t));
- if (!efi.systab) {
- pr_err("Failed to remap EFI System Table\n");
+ efi.memmap.map = memremap(efi.memmap.phys_map, mapsize, MEMREMAP_WB);
+ if (!efi.memmap.map) {
+ pr_err("Failed to remap EFI memory map\n");
return -ENOMEM;
}
- set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+ efi.memmap.map_end = efi.memmap.map + mapsize;
if (!efi_virtmap_init()) {
- pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
+ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
return -ENOMEM;
}
@@ -116,8 +127,6 @@ static int __init arm_enable_runtime_services(void)
efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
- efi.runtime_version = efi.systab->hdr.revision;
-
return 0;
}
early_initcall(arm_enable_runtime_services);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
new file mode 100644
index 000000000..c99c24bc7
--- /dev/null
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -0,0 +1,343 @@
+/*
+ * EFI capsule loader driver.
+ *
+ * Copyright 2015 Intel Corporation
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/efi.h>
+
+#define NO_FURTHER_WRITE_ACTION -1
+
+struct capsule_info {
+ bool header_obtained;
+ int reset_type;
+ long index;
+ size_t count;
+ size_t total_size;
+ struct page **pages;
+ size_t page_bytes_remain;
+};
+
+/**
+ * efi_free_all_buff_pages - free all previous allocated buffer pages
+ * @cap_info: pointer to current instance of capsule_info structure
+ *
+ * In addition to freeing buffer pages, it flags NO_FURTHER_WRITE_ACTION
+ * to cease processing data in subsequent write(2) calls until close(2)
+ * is called.
+ **/
+static void efi_free_all_buff_pages(struct capsule_info *cap_info)
+{
+ while (cap_info->index > 0)
+ __free_page(cap_info->pages[--cap_info->index]);
+
+ cap_info->index = NO_FURTHER_WRITE_ACTION;
+}
+
+/**
+ * efi_capsule_setup_info - obtain the efi capsule header in the binary and
+ * setup capsule_info structure
+ * @cap_info: pointer to current instance of capsule_info structure
+ * @kbuff: a mapped first page buffer pointer
+ * @hdr_bytes: the total received number of bytes for efi header
+ **/
+static ssize_t efi_capsule_setup_info(struct capsule_info *cap_info,
+ void *kbuff, size_t hdr_bytes)
+{
+ efi_capsule_header_t *cap_hdr;
+ size_t pages_needed;
+ int ret;
+ void *temp_page;
+
+ /* Only process data block that is larger than efi header size */
+ if (hdr_bytes < sizeof(efi_capsule_header_t))
+ return 0;
+
+ /* Reset back to the correct offset of header */
+ cap_hdr = kbuff - cap_info->count;
+ pages_needed = ALIGN(cap_hdr->imagesize, PAGE_SIZE) >> PAGE_SHIFT;
+
+ if (pages_needed == 0) {
+ pr_err("%s: pages count invalid\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check if the capsule binary supported */
+ ret = efi_capsule_supported(cap_hdr->guid, cap_hdr->flags,
+ cap_hdr->imagesize,
+ &cap_info->reset_type);
+ if (ret) {
+ pr_err("%s: efi_capsule_supported() failed\n",
+ __func__);
+ return ret;
+ }
+
+ cap_info->total_size = cap_hdr->imagesize;
+ temp_page = krealloc(cap_info->pages,
+ pages_needed * sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!temp_page) {
+ pr_debug("%s: krealloc() failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ cap_info->pages = temp_page;
+ cap_info->header_obtained = true;
+
+ return 0;
+}
+
+/**
+ * efi_capsule_submit_update - invoke the efi_capsule_update API once binary
+ * upload done
+ * @cap_info: pointer to current instance of capsule_info structure
+ **/
+static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
+{
+ int ret;
+ void *cap_hdr_temp;
+
+ cap_hdr_temp = kmap(cap_info->pages[0]);
+ if (!cap_hdr_temp) {
+ pr_debug("%s: kmap() failed\n", __func__);
+ return -EFAULT;
+ }
+
+ ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
+ kunmap(cap_info->pages[0]);
+ if (ret) {
+ pr_err("%s: efi_capsule_update() failed\n", __func__);
+ return ret;
+ }
+
+ /* Indicate capsule binary uploading is done */
+ cap_info->index = NO_FURTHER_WRITE_ACTION;
+ pr_info("%s: Successfully upload capsule file with reboot type '%s'\n",
+ __func__, !cap_info->reset_type ? "RESET_COLD" :
+ cap_info->reset_type == 1 ? "RESET_WARM" :
+ "RESET_SHUTDOWN");
+ return 0;
+}
+
+/**
+ * efi_capsule_write - store the capsule binary and pass it to
+ * efi_capsule_update() API
+ * @file: file pointer
+ * @buff: buffer pointer
+ * @count: number of bytes in @buff
+ * @offp: not used
+ *
+ * Expectation:
+ * - A user space tool should start at the beginning of capsule binary and
+ * pass data in sequentially.
+ * - Users should close and re-open this file note in order to upload more
+ * capsules.
+ * - After an error returned, user should close the file and restart the
+ * operation for the next try otherwise -EIO will be returned until the
+ * file is closed.
+ * - An EFI capsule header must be located at the beginning of capsule
+ * binary file and passed in as first block data of write operation.
+ **/
+static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
+ size_t count, loff_t *offp)
+{
+ int ret = 0;
+ struct capsule_info *cap_info = file->private_data;
+ struct page *page;
+ void *kbuff = NULL;
+ size_t write_byte;
+
+ if (count == 0)
+ return 0;
+
+ /* Return error while NO_FURTHER_WRITE_ACTION is flagged */
+ if (cap_info->index < 0)
+ return -EIO;
+
+ /* Only alloc a new page when previous page is full */
+ if (!cap_info->page_bytes_remain) {
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ pr_debug("%s: alloc_page() failed\n", __func__);
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ cap_info->pages[cap_info->index++] = page;
+ cap_info->page_bytes_remain = PAGE_SIZE;
+ }
+
+ page = cap_info->pages[cap_info->index - 1];
+
+ kbuff = kmap(page);
+ if (!kbuff) {
+ pr_debug("%s: kmap() failed\n", __func__);
+ ret = -EFAULT;
+ goto failed;
+ }
+ kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
+
+ /* Copy capsule binary data from user space to kernel space buffer */
+ write_byte = min_t(size_t, count, cap_info->page_bytes_remain);
+ if (copy_from_user(kbuff, buff, write_byte)) {
+ pr_debug("%s: copy_from_user() failed\n", __func__);
+ ret = -EFAULT;
+ goto fail_unmap;
+ }
+ cap_info->page_bytes_remain -= write_byte;
+
+ /* Setup capsule binary info structure */
+ if (!cap_info->header_obtained) {
+ ret = efi_capsule_setup_info(cap_info, kbuff,
+ cap_info->count + write_byte);
+ if (ret)
+ goto fail_unmap;
+ }
+
+ cap_info->count += write_byte;
+ kunmap(page);
+
+ /* Submit the full binary to efi_capsule_update() API */
+ if (cap_info->header_obtained &&
+ cap_info->count >= cap_info->total_size) {
+ if (cap_info->count > cap_info->total_size) {
+ pr_err("%s: upload size exceeded header defined size\n",
+ __func__);
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ ret = efi_capsule_submit_update(cap_info);
+ if (ret)
+ goto failed;
+ }
+
+ return write_byte;
+
+fail_unmap:
+ kunmap(page);
+failed:
+ efi_free_all_buff_pages(cap_info);
+ return ret;
+}
+
+/**
+ * efi_capsule_flush - called by file close or file flush
+ * @file: file pointer
+ * @id: not used
+ *
+ * If a capsule is being partially uploaded then calling this function
+ * will be treated as upload termination and will free those completed
+ * buffer pages and -ECANCELED will be returned.
+ **/
+static int efi_capsule_flush(struct file *file, fl_owner_t id)
+{
+ int ret = 0;
+ struct capsule_info *cap_info = file->private_data;
+
+ if (cap_info->index > 0) {
+ pr_err("%s: capsule upload not complete\n", __func__);
+ efi_free_all_buff_pages(cap_info);
+ ret = -ECANCELED;
+ }
+
+ return ret;
+}
+
+/**
+ * efi_capsule_release - called by file close
+ * @inode: not used
+ * @file: file pointer
+ *
+ * We will not free successfully submitted pages since efi update
+ * requires data to be maintained across system reboot.
+ **/
+static int efi_capsule_release(struct inode *inode, struct file *file)
+{
+ struct capsule_info *cap_info = file->private_data;
+
+ kfree(cap_info->pages);
+ kfree(file->private_data);
+ file->private_data = NULL;
+ return 0;
+}
+
+/**
+ * efi_capsule_open - called by file open
+ * @inode: not used
+ * @file: file pointer
+ *
+ * Will allocate each capsule_info memory for each file open call.
+ * This provided the capability to support multiple file open feature
+ * where user is not needed to wait for others to finish in order to
+ * upload their capsule binary.
+ **/
+static int efi_capsule_open(struct inode *inode, struct file *file)
+{
+ struct capsule_info *cap_info;
+
+ cap_info = kzalloc(sizeof(*cap_info), GFP_KERNEL);
+ if (!cap_info)
+ return -ENOMEM;
+
+ cap_info->pages = kzalloc(sizeof(void *), GFP_KERNEL);
+ if (!cap_info->pages) {
+ kfree(cap_info);
+ return -ENOMEM;
+ }
+
+ file->private_data = cap_info;
+
+ return 0;
+}
+
+static const struct file_operations efi_capsule_fops = {
+ .owner = THIS_MODULE,
+ .open = efi_capsule_open,
+ .write = efi_capsule_write,
+ .flush = efi_capsule_flush,
+ .release = efi_capsule_release,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice efi_capsule_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "efi_capsule_loader",
+ .fops = &efi_capsule_fops,
+};
+
+static int __init efi_capsule_loader_init(void)
+{
+ int ret;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return -ENODEV;
+
+ ret = misc_register(&efi_capsule_misc);
+ if (ret)
+ pr_err("%s: Failed to register misc char file note\n",
+ __func__);
+
+ return ret;
+}
+module_init(efi_capsule_loader_init);
+
+static void __exit efi_capsule_loader_exit(void)
+{
+ misc_deregister(&efi_capsule_misc);
+}
+module_exit(efi_capsule_loader_exit);
+
+MODULE_DESCRIPTION("EFI capsule firmware binary loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
new file mode 100644
index 000000000..53b9fd229
--- /dev/null
+++ b/drivers/firmware/efi/capsule.c
@@ -0,0 +1,308 @@
+/*
+ * EFI capsule support.
+ *
+ * Copyright 2013 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+typedef struct {
+ u64 length;
+ u64 data;
+} efi_capsule_block_desc_t;
+
+static bool capsule_pending;
+static bool stop_capsules;
+static int efi_reset_type = -1;
+
+/*
+ * capsule_mutex serialises access to both capsule_pending and
+ * efi_reset_type and stop_capsules.
+ */
+static DEFINE_MUTEX(capsule_mutex);
+
+/**
+ * efi_capsule_pending - has a capsule been passed to the firmware?
+ * @reset_type: store the type of EFI reset if capsule is pending
+ *
+ * To ensure that the registered capsule is processed correctly by the
+ * firmware we need to perform a specific type of reset. If a capsule is
+ * pending return the reset type in @reset_type.
+ *
+ * This function will race with callers of efi_capsule_update(), for
+ * example, calling this function while somebody else is in
+ * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
+ * will miss the updates to capsule_pending and efi_reset_type after
+ * efi_capsule_update_locked() completes.
+ *
+ * A non-racy use is from platform reboot code because we use
+ * system_state to ensure no capsules can be sent to the firmware once
+ * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
+ */
+bool efi_capsule_pending(int *reset_type)
+{
+ if (!capsule_pending)
+ return false;
+
+ if (reset_type)
+ *reset_type = efi_reset_type;
+
+ return true;
+}
+
+/*
+ * Whitelist of EFI capsule flags that we support.
+ *
+ * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
+ * require us to prepare the kernel for reboot. Refuse to load any
+ * capsules with that flag and any other flags that we do not know how
+ * to handle.
+ */
+#define EFI_CAPSULE_SUPPORTED_FLAG_MASK \
+ (EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
+
+/**
+ * efi_capsule_supported - does the firmware support the capsule?
+ * @guid: vendor guid of capsule
+ * @flags: capsule flags
+ * @size: size of capsule data
+ * @reset: the reset type required for this capsule
+ *
+ * Check whether a capsule with @flags is supported by the firmware
+ * and that @size doesn't exceed the maximum size for a capsule.
+ *
+ * No attempt is made to check @reset against the reset type required
+ * by any pending capsules because of the races involved.
+ */
+int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
+{
+ efi_capsule_header_t capsule;
+ efi_capsule_header_t *cap_list[] = { &capsule };
+ efi_status_t status;
+ u64 max_size;
+
+ if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
+ return -EINVAL;
+
+ capsule.headersize = capsule.imagesize = sizeof(capsule);
+ memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
+ capsule.flags = flags;
+
+ status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
+ if (status != EFI_SUCCESS)
+ return efi_status_to_err(status);
+
+ if (size > max_size)
+ return -ENOSPC;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_supported);
+
+/*
+ * Every scatter gather list (block descriptor) page must end with a
+ * continuation pointer. The last continuation pointer of the last
+ * page must be zero to mark the end of the chain.
+ */
+#define SGLIST_PER_PAGE ((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
+
+/*
+ * How many scatter gather list (block descriptor) pages do we need
+ * to map @count pages?
+ */
+static inline unsigned int sg_pages_num(unsigned int count)
+{
+ return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
+}
+
+/**
+ * efi_capsule_update_locked - pass a single capsule to the firmware
+ * @capsule: capsule to send to the firmware
+ * @sg_pages: array of scatter gather (block descriptor) pages
+ * @reset: the reset type required for @capsule
+ *
+ * Since this function must be called under capsule_mutex check
+ * whether efi_reset_type will conflict with @reset, and atomically
+ * set it and capsule_pending if a capsule was successfully sent to
+ * the firmware.
+ *
+ * We also check to see if the system is about to restart, and if so,
+ * abort. This avoids races between efi_capsule_update() and
+ * efi_capsule_pending().
+ */
+static int
+efi_capsule_update_locked(efi_capsule_header_t *capsule,
+ struct page **sg_pages, int reset)
+{
+ efi_physical_addr_t sglist_phys;
+ efi_status_t status;
+
+ lockdep_assert_held(&capsule_mutex);
+
+ /*
+ * If someone has already registered a capsule that requires a
+ * different reset type, we're out of luck and must abort.
+ */
+ if (efi_reset_type >= 0 && efi_reset_type != reset) {
+ pr_err("Conflicting capsule reset type %d (%d).\n",
+ reset, efi_reset_type);
+ return -EINVAL;
+ }
+
+ /*
+ * If the system is getting ready to restart it may have
+ * called efi_capsule_pending() to make decisions (such as
+ * whether to force an EFI reboot), and we're racing against
+ * that call. Abort in that case.
+ */
+ if (unlikely(stop_capsules)) {
+ pr_warn("Capsule update raced with reboot, aborting.\n");
+ return -EINVAL;
+ }
+
+ sglist_phys = page_to_phys(sg_pages[0]);
+
+ status = efi.update_capsule(&capsule, 1, sglist_phys);
+ if (status == EFI_SUCCESS) {
+ capsule_pending = true;
+ efi_reset_type = reset;
+ }
+
+ return efi_status_to_err(status);
+}
+
+/**
+ * efi_capsule_update - send a capsule to the firmware
+ * @capsule: capsule to send to firmware
+ * @pages: an array of capsule data pages
+ *
+ * Build a scatter gather list with EFI capsule block descriptors to
+ * map the capsule described by @capsule with its data in @pages and
+ * send it to the firmware via the UpdateCapsule() runtime service.
+ *
+ * @capsule must be a virtual mapping of the first page in @pages
+ * (@pages[0]) in the kernel address space. That is, a
+ * capsule_header_t that describes the entire contents of the capsule
+ * must be at the start of the first data page.
+ *
+ * Even though this function will validate that the firmware supports
+ * the capsule guid, users will likely want to check that
+ * efi_capsule_supported() returns true before calling this function
+ * because it makes it easier to print helpful error messages.
+ *
+ * If the capsule is successfully submitted to the firmware, any
+ * subsequent calls to efi_capsule_pending() will return true. @pages
+ * must not be released or modified if this function returns
+ * successfully.
+ *
+ * Callers must be prepared for this function to fail, which can
+ * happen if we raced with system reboot or if there is already a
+ * pending capsule that has a reset type that conflicts with the one
+ * required by @capsule. Do NOT use efi_capsule_pending() to detect
+ * this conflict since that would be racy. Instead, submit the capsule
+ * to efi_capsule_update() and check the return value.
+ *
+ * Return 0 on success, a converted EFI status code on failure.
+ */
+int efi_capsule_update(efi_capsule_header_t *capsule, struct page **pages)
+{
+ u32 imagesize = capsule->imagesize;
+ efi_guid_t guid = capsule->guid;
+ unsigned int count, sg_count;
+ u32 flags = capsule->flags;
+ struct page **sg_pages;
+ int rv, reset_type;
+ int i, j;
+
+ rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
+ if (rv)
+ return rv;
+
+ count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
+ sg_count = sg_pages_num(count);
+
+ sg_pages = kzalloc(sg_count * sizeof(*sg_pages), GFP_KERNEL);
+ if (!sg_pages)
+ return -ENOMEM;
+
+ for (i = 0; i < sg_count; i++) {
+ sg_pages[i] = alloc_page(GFP_KERNEL);
+ if (!sg_pages[i]) {
+ rv = -ENOMEM;
+ goto out;
+ }
+ }
+
+ for (i = 0; i < sg_count; i++) {
+ efi_capsule_block_desc_t *sglist;
+
+ sglist = kmap(sg_pages[i]);
+ if (!sglist) {
+ rv = -ENOMEM;
+ goto out;
+ }
+
+ for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
+ u64 sz = min_t(u64, imagesize, PAGE_SIZE);
+
+ sglist[j].length = sz;
+ sglist[j].data = page_to_phys(*pages++);
+
+ imagesize -= sz;
+ count--;
+ }
+
+ /* Continuation pointer */
+ sglist[j].length = 0;
+
+ if (i + 1 == sg_count)
+ sglist[j].data = 0;
+ else
+ sglist[j].data = page_to_phys(sg_pages[i + 1]);
+
+ kunmap(sg_pages[i]);
+ }
+
+ mutex_lock(&capsule_mutex);
+ rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
+ mutex_unlock(&capsule_mutex);
+
+out:
+ for (i = 0; rv && i < sg_count; i++) {
+ if (sg_pages[i])
+ __free_page(sg_pages[i]);
+ }
+
+ kfree(sg_pages);
+ return rv;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_update);
+
+static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
+{
+ mutex_lock(&capsule_mutex);
+ stop_capsules = true;
+ mutex_unlock(&capsule_mutex);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block capsule_reboot_nb = {
+ .notifier_call = capsule_reboot_notify,
+};
+
+static int __init capsule_reboot_register(void)
+{
+ return register_reboot_notifier(&capsule_reboot_nb);
+}
+core_initcall(capsule_reboot_register);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 3a69ed5ec..05509f3aa 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -43,6 +43,7 @@ struct efi __read_mostly efi = {
.config_table = EFI_INVALID_TABLE_ADDR,
.esrt = EFI_INVALID_TABLE_ADDR,
.properties_table = EFI_INVALID_TABLE_ADDR,
+ .mem_attr_table = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -256,7 +257,7 @@ subsys_initcall(efisubsys_init);
*/
int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
- struct efi_memory_map *map = efi.memmap;
+ struct efi_memory_map *map = &efi.memmap;
phys_addr_t p, e;
if (!efi_enabled(EFI_MEMMAP)) {
@@ -338,6 +339,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
+ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{NULL_GUID, NULL, NULL},
};
@@ -351,8 +353,9 @@ static __init int match_config_table(efi_guid_t *guid,
for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
if (!efi_guidcmp(*guid, table_types[i].guid)) {
*(table_types[i].ptr) = table;
- pr_cont(" %s=0x%lx ",
- table_types[i].name, table);
+ if (table_types[i].name)
+ pr_cont(" %s=0x%lx ",
+ table_types[i].name, table);
return 1;
}
}
@@ -620,16 +623,12 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
*/
u64 __weak efi_mem_attributes(unsigned long phys_addr)
{
- struct efi_memory_map *map;
efi_memory_desc_t *md;
- void *p;
if (!efi_enabled(EFI_MEMMAP))
return 0;
- map = efi.memmap;
- for (p = map->map; p < map->map_end; p += map->desc_size) {
- md = p;
+ for_each_efi_memory_desc(md) {
if ((md->phys_addr <= phys_addr) &&
(phys_addr < (md->phys_addr +
(md->num_pages << EFI_PAGE_SHIFT))))
@@ -637,3 +636,36 @@ u64 __weak efi_mem_attributes(unsigned long phys_addr)
}
return 0;
}
+
+int efi_status_to_err(efi_status_t status)
+{
+ int err;
+
+ switch (status) {
+ case EFI_SUCCESS:
+ err = 0;
+ break;
+ case EFI_INVALID_PARAMETER:
+ err = -EINVAL;
+ break;
+ case EFI_OUT_OF_RESOURCES:
+ err = -ENOSPC;
+ break;
+ case EFI_DEVICE_ERROR:
+ err = -EIO;
+ break;
+ case EFI_WRITE_PROTECTED:
+ err = -EROFS;
+ break;
+ case EFI_SECURITY_VIOLATION:
+ err = -EACCES;
+ break;
+ case EFI_NOT_FOUND:
+ err = -ENOENT;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
new file mode 100644
index 000000000..8dd0c7085
--- /dev/null
+++ b/drivers/firmware/efi/efibc.c
@@ -0,0 +1,113 @@
+/*
+ * efibc: control EFI bootloaders which obey LoaderEntryOneShot var
+ * Copyright (c) 2013-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#define pr_fmt(fmt) "efibc: " fmt
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+static void efibc_str_to_str16(const char *str, efi_char16_t *str16)
+{
+ size_t i;
+
+ for (i = 0; i < strlen(str); i++)
+ str16[i] = str[i];
+
+ str16[i] = '\0';
+}
+
+static int efibc_set_variable(const char *name, const char *value)
+{
+ int ret;
+ efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID;
+ struct efivar_entry *entry;
+ size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
+
+ if (size > sizeof(entry->var.Data)) {
+ pr_err("value is too large");
+ return -EINVAL;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ pr_err("failed to allocate efivar entry");
+ return -ENOMEM;
+ }
+
+ efibc_str_to_str16(name, entry->var.VariableName);
+ efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
+ memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
+
+ ret = efivar_entry_set(entry,
+ EFI_VARIABLE_NON_VOLATILE
+ | EFI_VARIABLE_BOOTSERVICE_ACCESS
+ | EFI_VARIABLE_RUNTIME_ACCESS,
+ size, entry->var.Data, NULL);
+ if (ret)
+ pr_err("failed to set %s EFI variable: 0x%x\n",
+ name, ret);
+
+ kfree(entry);
+ return ret;
+}
+
+static int efibc_reboot_notifier_call(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ const char *reason = "shutdown";
+ int ret;
+
+ if (event == SYS_RESTART)
+ reason = "reboot";
+
+ ret = efibc_set_variable("LoaderEntryRebootReason", reason);
+ if (ret || !data)
+ return NOTIFY_DONE;
+
+ efibc_set_variable("LoaderEntryOneShot", (char *)data);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efibc_reboot_notifier = {
+ .notifier_call = efibc_reboot_notifier_call,
+};
+
+static int __init efibc_init(void)
+{
+ int ret;
+
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return -ENODEV;
+
+ ret = register_reboot_notifier(&efibc_reboot_notifier);
+ if (ret)
+ pr_err("unable to register reboot notifier\n");
+
+ return ret;
+}
+module_init(efibc_init);
+
+static void __exit efibc_exit(void)
+{
+ unregister_reboot_notifier(&efibc_reboot_notifier);
+}
+module_exit(efibc_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_AUTHOR("Matt Gumbel <matthew.k.gumbel@intel.com");
+MODULE_DESCRIPTION("EFI Bootloader Control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 096adcbcb..116b244de 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -661,7 +661,7 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
return;
err = efivar_init(efivar_update_sysfs_entry, entry,
- true, false, &efivar_sysfs_list);
+ false, &efivar_sysfs_list);
if (!err)
break;
@@ -730,8 +730,7 @@ int efivars_sysfs_init(void)
return -ENOMEM;
}
- efivar_init(efivars_sysfs_callback, NULL, false,
- true, &efivar_sysfs_list);
+ efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list);
error = create_efivars_bin_attributes();
if (error) {
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index ed3a85495..48430aba1 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -57,7 +57,7 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
void __init efi_fake_memmap(void)
{
u64 start, end, m_start, m_end, m_attr;
- int new_nr_map = memmap.nr_map;
+ int new_nr_map = efi.memmap.nr_map;
efi_memory_desc_t *md;
phys_addr_t new_memmap_phy;
void *new_memmap;
@@ -68,8 +68,7 @@ void __init efi_fake_memmap(void)
return;
/* count up the number of EFI memory descriptor */
- for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
- md = old;
+ for_each_efi_memory_desc(md) {
start = md->phys_addr;
end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
@@ -95,25 +94,25 @@ void __init efi_fake_memmap(void)
}
/* allocate memory for new EFI memmap */
- new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
+ new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
PAGE_SIZE);
if (!new_memmap_phy)
return;
/* create new EFI memmap */
new_memmap = early_memremap(new_memmap_phy,
- memmap.desc_size * new_nr_map);
+ efi.memmap.desc_size * new_nr_map);
if (!new_memmap) {
- memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
+ memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
return;
}
- for (old = memmap.map, new = new_memmap;
- old < memmap.map_end;
- old += memmap.desc_size, new += memmap.desc_size) {
+ for (old = efi.memmap.map, new = new_memmap;
+ old < efi.memmap.map_end;
+ old += efi.memmap.desc_size, new += efi.memmap.desc_size) {
/* copy original EFI memory descriptor */
- memcpy(new, old, memmap.desc_size);
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
start = md->phys_addr;
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
@@ -134,8 +133,8 @@ void __init efi_fake_memmap(void)
md->num_pages = (m_end - md->phys_addr + 1) >>
EFI_PAGE_SHIFT;
/* latter part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
+ new += efi.memmap.desc_size;
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
md->phys_addr = m_end + 1;
md->num_pages = (end - md->phys_addr + 1) >>
@@ -147,16 +146,16 @@ void __init efi_fake_memmap(void)
md->num_pages = (m_start - md->phys_addr) >>
EFI_PAGE_SHIFT;
/* middle part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
+ new += efi.memmap.desc_size;
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
md->attribute |= m_attr;
md->phys_addr = m_start;
md->num_pages = (m_end - m_start + 1) >>
EFI_PAGE_SHIFT;
/* last part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
+ new += efi.memmap.desc_size;
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
md->phys_addr = m_end + 1;
md->num_pages = (end - m_end) >>
@@ -169,8 +168,8 @@ void __init efi_fake_memmap(void)
md->num_pages = (m_start - md->phys_addr) >>
EFI_PAGE_SHIFT;
/* latter part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
+ new += efi.memmap.desc_size;
+ memcpy(new, old, efi.memmap.desc_size);
md = new;
md->phys_addr = m_start;
md->num_pages = (end - md->phys_addr + 1) >>
@@ -182,10 +181,10 @@ void __init efi_fake_memmap(void)
/* swap into new EFI memmap */
efi_unmap_memmap();
- memmap.map = new_memmap;
- memmap.phys_map = new_memmap_phy;
- memmap.nr_map = new_nr_map;
- memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
+ efi.memmap.map = new_memmap;
+ efi.memmap.phys_map = new_memmap_phy;
+ efi.memmap.nr_map = new_nr_map;
+ efi.memmap.map_end = efi.memmap.map + efi.memmap.nr_map * efi.memmap.desc_size;
set_bit(EFI_MEMMAP, &efi.flags);
/* print new EFI memmap */
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index da99bbb74..c06945160 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -28,7 +28,7 @@ OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
-lib-y := efi-stub-helper.o
+lib-y := efi-stub-helper.o gop.o
# include the stub's generic dependencies from lib/ when building for ARM/arm64
arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 414deb85c..993aa5675 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -20,27 +20,49 @@
bool __nokaslr;
-static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
+static int efi_get_secureboot(efi_system_table_t *sys_table_arg)
{
- static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
- static efi_char16_t const var_name[] = {
+ static efi_char16_t const sb_var_name[] = {
'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 };
+ static efi_char16_t const sm_var_name[] = {
+ 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 };
+ efi_guid_t var_guid = EFI_GLOBAL_VARIABLE_GUID;
efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable;
- unsigned long size = sizeof(u8);
- efi_status_t status;
u8 val;
+ unsigned long size = sizeof(val);
+ efi_status_t status;
- status = f_getvar((efi_char16_t *)var_name, (efi_guid_t *)&var_guid,
+ status = f_getvar((efi_char16_t *)sb_var_name, (efi_guid_t *)&var_guid,
NULL, &size, &val);
+ if (status != EFI_SUCCESS)
+ goto out_efi_err;
+
+ if (val == 0)
+ return 0;
+
+ status = f_getvar((efi_char16_t *)sm_var_name, (efi_guid_t *)&var_guid,
+ NULL, &size, &val);
+
+ if (status != EFI_SUCCESS)
+ goto out_efi_err;
+
+ if (val == 1)
+ return 0;
+
+ return 1;
+
+out_efi_err:
switch (status) {
- case EFI_SUCCESS:
- return val;
case EFI_NOT_FOUND:
return 0;
+ case EFI_DEVICE_ERROR:
+ return -EIO;
+ case EFI_SECURITY_VIOLATION:
+ return -EACCES;
default:
- return 1;
+ return -EINVAL;
}
}
@@ -147,6 +169,25 @@ void efi_char16_printk(efi_system_table_t *sys_table_arg,
out->output_string(out, str);
}
+static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+ efi_status_t status;
+ unsigned long size;
+ void **gop_handle = NULL;
+ struct screen_info *si = NULL;
+
+ size = 0;
+ status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+ &gop_proto, NULL, &size, gop_handle);
+ if (status == EFI_BUFFER_TOO_SMALL) {
+ si = alloc_screen_info(sys_table_arg);
+ if (!si)
+ return NULL;
+ efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+ }
+ return si;
+}
/*
* This function handles the architcture specific differences between arm and
@@ -185,6 +226,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
unsigned long reserve_addr = 0;
unsigned long reserve_size = 0;
+ int secure_boot = 0;
+ struct screen_info *si;
/* Check if we were booted by the EFI firmware */
if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
@@ -237,6 +280,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
__nokaslr = true;
}
+ si = setup_graphics(sys_table);
+
status = handle_kernel_image(sys_table, image_addr, &image_size,
&reserve_addr,
&reserve_size,
@@ -250,12 +295,21 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS)
pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
+ secure_boot = efi_get_secureboot(sys_table);
+ if (secure_boot > 0)
+ pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
+
+ if (secure_boot < 0) {
+ pr_efi_err(sys_table,
+ "could not determine UEFI Secure Boot status.\n");
+ }
+
/*
* Unauthenticated device tree data is a security hazard, so
* ignore 'dtb=' unless UEFI Secure Boot is disabled.
*/
- if (efi_secureboot_enabled(sys_table)) {
- pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
+ if (secure_boot != 0 && strstr(cmdline_ptr, "dtb=")) {
+ pr_efi(sys_table, "Ignoring DTB from command line.\n");
} else {
status = handle_cmdline_files(sys_table, image, cmdline_ptr,
"dtb=",
@@ -309,6 +363,7 @@ fail_free_image:
efi_free(sys_table, image_size, *image_addr);
efi_free(sys_table, reserve_size, reserve_addr);
fail_free_cmdline:
+ free_screen_info(sys_table, si);
efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
fail:
return EFI_ERROR;
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 6f42be4d0..e1f0b28e1 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -26,6 +26,43 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
return EFI_SUCCESS;
}
+static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
+
+struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+{
+ struct screen_info *si;
+ efi_status_t status;
+
+ /*
+ * Unlike on arm64, where we can directly fill out the screen_info
+ * structure from the stub, we need to allocate a buffer to hold
+ * its contents while we hand over to the kernel proper from the
+ * decompressor.
+ */
+ status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*si), (void **)&si);
+
+ if (status != EFI_SUCCESS)
+ return NULL;
+
+ status = efi_call_early(install_configuration_table,
+ &screen_info_guid, si);
+ if (status == EFI_SUCCESS)
+ return si;
+
+ efi_call_early(free_pool, si);
+ return NULL;
+}
+
+void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+{
+ if (!si)
+ return;
+
+ efi_call_early(install_configuration_table, &screen_info_guid, NULL);
+ efi_call_early(free_pool, si);
+}
+
efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
unsigned long *image_addr,
unsigned long *image_size,
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index a90f6459f..eae693eb3 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -81,15 +81,24 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
+ * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
+ * displacement in the interval [0, MIN_KIMG_ALIGN) that
+ * is a multiple of the minimal segment alignment (SZ_64K)
+ */
+ u32 mask = (MIN_KIMG_ALIGN - 1) & ~(SZ_64K - 1);
+ u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
+ (phys_seed >> 32) & mask : TEXT_OFFSET;
+
+ /*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
- *reserve_size = kernel_memsize + TEXT_OFFSET;
+ *reserve_size = kernel_memsize + offset;
status = efi_random_alloc(sys_table_arg, *reserve_size,
MIN_KIMG_ALIGN, reserve_addr,
- phys_seed);
+ (u32)phys_seed);
- *image_addr = *reserve_addr + TEXT_OFFSET;
+ *image_addr = *reserve_addr + offset;
} else {
/*
* Else, try a straight allocation at the preferred offset.
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 29ed2f9b2..3bd127f95 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -125,10 +125,12 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
map.map_end = map.map + map_size;
- for_each_efi_memory_desc(&map, md)
- if (md->attribute & EFI_MEMORY_WB)
+ for_each_efi_memory_desc_in_map(&map, md) {
+ if (md->attribute & EFI_MEMORY_WB) {
if (membase > md->phys_addr)
membase = md->phys_addr;
+ }
+ }
efi_call_early(free_pool, map.map);
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 6dba78aef..e58abfa95 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -24,7 +24,7 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
unsigned long map_size, unsigned long desc_size,
u32 desc_ver)
{
- int node, prev, num_rsv;
+ int node, num_rsv;
int status;
u32 fdt_val32;
u64 fdt_val64;
@@ -54,28 +54,6 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
goto fdt_set_fail;
/*
- * Delete any memory nodes present. We must delete nodes which
- * early_init_dt_scan_memory may try to use.
- */
- prev = 0;
- for (;;) {
- const char *type;
- int len;
-
- node = fdt_next_node(fdt, prev, NULL);
- if (node < 0)
- break;
-
- type = fdt_getprop(fdt, node, "device_type", &len);
- if (type && strncmp(type, "memory", len) == 0) {
- fdt_del_node(fdt, node);
- continue;
- }
-
- prev = node;
- }
-
- /*
* Delete all memory reserve map entries. When booting via UEFI,
* kernel will use the UEFI memory map to find reserved regions.
*/
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
new file mode 100644
index 000000000..932742e4c
--- /dev/null
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -0,0 +1,354 @@
+/* -----------------------------------------------------------------------
+ *
+ * Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+static void find_bits(unsigned long mask, u8 *pos, u8 *size)
+{
+ u8 first, len;
+
+ first = 0;
+ len = 0;
+
+ if (mask) {
+ while (!(mask & 0x1)) {
+ mask = mask >> 1;
+ first++;
+ }
+
+ while (mask & 0x1) {
+ mask = mask >> 1;
+ len++;
+ }
+ }
+
+ *pos = first;
+ *size = len;
+}
+
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+ struct efi_pixel_bitmask pixel_info, int pixel_format)
+{
+ if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+ si->lfb_depth = 32;
+ si->lfb_linelength = pixels_per_scan_line * 4;
+ si->red_size = 8;
+ si->red_pos = 0;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 16;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
+ } else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
+ si->lfb_depth = 32;
+ si->lfb_linelength = pixels_per_scan_line * 4;
+ si->red_size = 8;
+ si->red_pos = 16;
+ si->green_size = 8;
+ si->green_pos = 8;
+ si->blue_size = 8;
+ si->blue_pos = 0;
+ si->rsvd_size = 8;
+ si->rsvd_pos = 24;
+ } else if (pixel_format == PIXEL_BIT_MASK) {
+ find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
+ find_bits(pixel_info.green_mask, &si->green_pos,
+ &si->green_size);
+ find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
+ find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
+ &si->rsvd_size);
+ si->lfb_depth = si->red_size + si->green_size +
+ si->blue_size + si->rsvd_size;
+ si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+ } else {
+ si->lfb_depth = 4;
+ si->lfb_linelength = si->lfb_width / 2;
+ si->red_size = 0;
+ si->red_pos = 0;
+ si->green_size = 0;
+ si->green_pos = 0;
+ si->blue_size = 0;
+ si->blue_pos = 0;
+ si->rsvd_size = 0;
+ si->rsvd_pos = 0;
+ }
+}
+
+static efi_status_t
+__gop_query32(efi_system_table_t *sys_table_arg,
+ struct efi_graphics_output_protocol_32 *gop32,
+ struct efi_graphics_output_mode_info **info,
+ unsigned long *size, u64 *fb_base)
+{
+ struct efi_graphics_output_protocol_mode_32 *mode;
+ efi_graphics_output_protocol_query_mode query_mode;
+ efi_status_t status;
+ unsigned long m;
+
+ m = gop32->mode;
+ mode = (struct efi_graphics_output_protocol_mode_32 *)m;
+ query_mode = (void *)(unsigned long)gop32->query_mode;
+
+ status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
+ info);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *fb_base = mode->frame_buffer_base;
+ return status;
+}
+
+static efi_status_t
+setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
+ efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+ struct efi_graphics_output_protocol_32 *gop32, *first_gop;
+ unsigned long nr_gops;
+ u16 width, height;
+ u32 pixels_per_scan_line;
+ u32 ext_lfb_base;
+ u64 fb_base;
+ struct efi_pixel_bitmask pixel_info;
+ int pixel_format;
+ efi_status_t status = EFI_NOT_FOUND;
+ u32 *handles = (u32 *)(unsigned long)gop_handle;
+ int i;
+
+ first_gop = NULL;
+ gop32 = NULL;
+
+ nr_gops = size / sizeof(u32);
+ for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_mode_info *info = NULL;
+ efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+ bool conout_found = false;
+ void *dummy = NULL;
+ efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+ u64 current_fb_base;
+
+ status = efi_call_early(handle_protocol, h,
+ proto, (void **)&gop32);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ status = efi_call_early(handle_protocol, h,
+ &conout_proto, &dummy);
+ if (status == EFI_SUCCESS)
+ conout_found = true;
+
+ status = __gop_query32(sys_table_arg, gop32, &info, &size,
+ &current_fb_base);
+ if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ */
+ width = info->horizontal_resolution;
+ height = info->vertical_resolution;
+ pixel_format = info->pixel_format;
+ pixel_info = info->pixel_information;
+ pixels_per_scan_line = info->pixels_per_scan_line;
+ fb_base = current_fb_base;
+
+ /*
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
+ first_gop = gop32;
+ if (conout_found)
+ break;
+ }
+ }
+
+ /* Did we find any GOPs? */
+ if (!first_gop)
+ goto out;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = width;
+ si->lfb_height = height;
+ si->lfb_base = fb_base;
+
+ ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+ if (ext_lfb_base) {
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ si->ext_lfb_base = ext_lfb_base;
+ }
+
+ si->pages = 1;
+
+ setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+ return status;
+}
+
+static efi_status_t
+__gop_query64(efi_system_table_t *sys_table_arg,
+ struct efi_graphics_output_protocol_64 *gop64,
+ struct efi_graphics_output_mode_info **info,
+ unsigned long *size, u64 *fb_base)
+{
+ struct efi_graphics_output_protocol_mode_64 *mode;
+ efi_graphics_output_protocol_query_mode query_mode;
+ efi_status_t status;
+ unsigned long m;
+
+ m = gop64->mode;
+ mode = (struct efi_graphics_output_protocol_mode_64 *)m;
+ query_mode = (void *)(unsigned long)gop64->query_mode;
+
+ status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
+ info);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ *fb_base = mode->frame_buffer_base;
+ return status;
+}
+
+static efi_status_t
+setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
+ efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+ struct efi_graphics_output_protocol_64 *gop64, *first_gop;
+ unsigned long nr_gops;
+ u16 width, height;
+ u32 pixels_per_scan_line;
+ u32 ext_lfb_base;
+ u64 fb_base;
+ struct efi_pixel_bitmask pixel_info;
+ int pixel_format;
+ efi_status_t status = EFI_NOT_FOUND;
+ u64 *handles = (u64 *)(unsigned long)gop_handle;
+ int i;
+
+ first_gop = NULL;
+ gop64 = NULL;
+
+ nr_gops = size / sizeof(u64);
+ for (i = 0; i < nr_gops; i++) {
+ struct efi_graphics_output_mode_info *info = NULL;
+ efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+ bool conout_found = false;
+ void *dummy = NULL;
+ efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+ u64 current_fb_base;
+
+ status = efi_call_early(handle_protocol, h,
+ proto, (void **)&gop64);
+ if (status != EFI_SUCCESS)
+ continue;
+
+ status = efi_call_early(handle_protocol, h,
+ &conout_proto, &dummy);
+ if (status == EFI_SUCCESS)
+ conout_found = true;
+
+ status = __gop_query64(sys_table_arg, gop64, &info, &size,
+ &current_fb_base);
+ if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ /*
+ * Systems that use the UEFI Console Splitter may
+ * provide multiple GOP devices, not all of which are
+ * backed by real hardware. The workaround is to search
+ * for a GOP implementing the ConOut protocol, and if
+ * one isn't found, to just fall back to the first GOP.
+ */
+ width = info->horizontal_resolution;
+ height = info->vertical_resolution;
+ pixel_format = info->pixel_format;
+ pixel_info = info->pixel_information;
+ pixels_per_scan_line = info->pixels_per_scan_line;
+ fb_base = current_fb_base;
+
+ /*
+ * Once we've found a GOP supporting ConOut,
+ * don't bother looking any further.
+ */
+ first_gop = gop64;
+ if (conout_found)
+ break;
+ }
+ }
+
+ /* Did we find any GOPs? */
+ if (!first_gop)
+ goto out;
+
+ /* EFI framebuffer */
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = width;
+ si->lfb_height = height;
+ si->lfb_base = fb_base;
+
+ ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+ if (ext_lfb_base) {
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ si->ext_lfb_base = ext_lfb_base;
+ }
+
+ si->pages = 1;
+
+ setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+ return status;
+}
+
+/*
+ * See if we have Graphics Output Protocol
+ */
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+ struct screen_info *si, efi_guid_t *proto,
+ unsigned long size)
+{
+ efi_status_t status;
+ void **gop_handle = NULL;
+
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size, (void **)&gop_handle);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_early(locate_handle,
+ EFI_LOCATE_BY_PROTOCOL,
+ proto, NULL, &size, gop_handle);
+ if (status != EFI_SUCCESS)
+ goto free_handle;
+
+ if (efi_is_64bit()) {
+ status = setup_gop64(sys_table_arg, si, proto, size,
+ gop_handle);
+ } else {
+ status = setup_gop32(sys_table_arg, si, proto, size,
+ gop_handle);
+ }
+
+free_handle:
+ efi_call_early(free_pool, gop_handle);
+ return status;
+}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
new file mode 100644
index 000000000..236004b9a
--- /dev/null
+++ b/drivers/firmware/efi/memattr.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "efi: memattr: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+static int __initdata tbl_size;
+
+/*
+ * Reserve the memory associated with the Memory Attributes configuration
+ * table, if it exists.
+ */
+int __init efi_memattr_init(void)
+{
+ efi_memory_attributes_table_t *tbl;
+
+ if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi.mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (tbl->version > 1) {
+ pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+ tbl->version);
+ goto unmap;
+ }
+
+ tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+ memblock_reserve(efi.mem_attr_table, tbl_size);
+
+unmap:
+ early_memunmap(tbl, sizeof(*tbl));
+ return 0;
+}
+
+/*
+ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
+ * entirely by a UEFI memory map entry with matching attributes. The virtual
+ * address of @out is set according to the matching entry that was found.
+ */
+static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+{
+ u64 in_paddr = in->phys_addr;
+ u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
+ efi_memory_desc_t *md;
+
+ *out = *in;
+
+ if (in->type != EFI_RUNTIME_SERVICES_CODE &&
+ in->type != EFI_RUNTIME_SERVICES_DATA) {
+ pr_warn("Entry type should be RuntimeServiceCode/Data\n");
+ return false;
+ }
+
+ if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+ pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
+ return false;
+ }
+
+ if (PAGE_SIZE > EFI_PAGE_SIZE &&
+ (!PAGE_ALIGNED(in->phys_addr) ||
+ !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+ /*
+ * Since arm64 may execute with page sizes of up to 64 KB, the
+ * UEFI spec mandates that RuntimeServices memory regions must
+ * be 64 KB aligned. We need to validate this here since we will
+ * not be able to tighten permissions on such regions without
+ * affecting adjacent regions.
+ */
+ pr_warn("Entry address region misaligned\n");
+ return false;
+ }
+
+ for_each_efi_memory_desc(md) {
+ u64 md_paddr = md->phys_addr;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0) {
+ /* no virtual mapping has been installed by the stub */
+ break;
+ }
+
+ if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
+ continue;
+
+ /*
+ * This entry covers the start of @in, check whether
+ * it covers the end as well.
+ */
+ if (md_paddr + md_size < in_paddr + in_size) {
+ pr_warn("Entry covers multiple EFI memory map regions\n");
+ return false;
+ }
+
+ if (md->type != in->type) {
+ pr_warn("Entry type deviates from EFI memory map region type\n");
+ return false;
+ }
+
+ out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
+
+ return true;
+ }
+
+ pr_warn("No matching entry found in the EFI memory map\n");
+ return false;
+}
+
+/*
+ * To be called after the EFI page tables have been populated. If a memory
+ * attributes table is available, its contents will be used to update the
+ * mappings with tightened permissions as described by the table.
+ * This requires the UEFI memory map to have already been populated with
+ * virtual addresses.
+ */
+int __init efi_memattr_apply_permissions(struct mm_struct *mm,
+ efi_memattr_perm_setter fn)
+{
+ efi_memory_attributes_table_t *tbl;
+ int i, ret;
+
+ if (tbl_size <= sizeof(*tbl))
+ return 0;
+
+ /*
+ * We need the EFI memory map to be setup so we can use it to
+ * lookup the virtual addresses of all entries in the of EFI
+ * Memory Attributes table. If it isn't available, this
+ * function should not be called.
+ */
+ if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
+ return 0;
+
+ tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi.mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Processing EFI Memory Attributes table:\n");
+
+ for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
+ efi_memory_desc_t md;
+ unsigned long size;
+ bool valid;
+ char buf[64];
+
+ valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+ &md);
+ size = md.num_pages << EFI_PAGE_SHIFT;
+ if (efi_enabled(EFI_DBG) || !valid)
+ pr_info("%s 0x%012llx-0x%012llx %s\n",
+ valid ? "" : "!", md.phys_addr,
+ md.phys_addr + size - 1,
+ efi_md_typeattr_format(buf, sizeof(buf), &md));
+
+ if (valid)
+ ret = fn(mm, &md);
+ }
+ memunmap(tbl);
+ return ret;
+}
diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c
index 9c59d1c79..62ead9b9d 100644
--- a/drivers/firmware/efi/reboot.c
+++ b/drivers/firmware/efi/reboot.c
@@ -9,7 +9,8 @@ int efi_reboot_quirk_mode = -1;
void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
{
- int efi_mode;
+ const char *str[] = { "cold", "warm", "shutdown", "platform" };
+ int efi_mode, cap_reset_mode;
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return;
@@ -30,6 +31,15 @@ void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
if (efi_reboot_quirk_mode != -1)
efi_mode = efi_reboot_quirk_mode;
+ if (efi_capsule_pending(&cap_reset_mode)) {
+ if (efi_mode != cap_reset_mode)
+ printk(KERN_CRIT "efi: %s reset requested but pending "
+ "capsule update requires %s reset... Performing "
+ "%s reset.\n", str[efi_mode], str[cap_reset_mode],
+ str[cap_reset_mode]);
+ efi_mode = cap_reset_mode;
+ }
+
efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL);
}
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index de6953039..23bef6bb7 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -16,10 +16,70 @@
#include <linux/bug.h>
#include <linux/efi.h>
+#include <linux/irqflags.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/stringify.h>
#include <asm/efi.h>
+static void efi_call_virt_check_flags(unsigned long flags, const char *call)
+{
+ unsigned long cur_flags, mismatch;
+
+ local_save_flags(cur_flags);
+
+ mismatch = flags ^ cur_flags;
+ if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
+ return;
+
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
+ pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
+ flags, cur_flags, call);
+ local_irq_restore(flags);
+}
+
+/*
+ * Arch code can implement the following three template macros, avoiding
+ * reptition for the void/non-void return cases of {__,}efi_call_virt:
+ *
+ * * arch_efi_call_virt_setup
+ *
+ * Sets up the environment for the call (e.g. switching page tables,
+ * allowing kernel-mode use of floating point, if required).
+ *
+ * * arch_efi_call_virt
+ *
+ * Performs the call. The last expression in the macro must be the call
+ * itself, allowing the logic to be shared by the void and non-void
+ * cases.
+ *
+ * * arch_efi_call_virt_teardown
+ *
+ * Restores the usual kernel environment once the call has returned.
+ */
+
+#define efi_call_virt(f, args...) \
+({ \
+ efi_status_t __s; \
+ unsigned long flags; \
+ arch_efi_call_virt_setup(); \
+ local_save_flags(flags); \
+ __s = arch_efi_call_virt(f, args); \
+ efi_call_virt_check_flags(flags, __stringify(f)); \
+ arch_efi_call_virt_teardown(); \
+ __s; \
+})
+
+#define __efi_call_virt(f, args...) \
+({ \
+ unsigned long flags; \
+ arch_efi_call_virt_setup(); \
+ local_save_flags(flags); \
+ arch_efi_call_virt(f, args); \
+ efi_call_virt_check_flags(flags, __stringify(f)); \
+ arch_efi_call_virt_teardown(); \
+})
+
/*
* According to section 7.1 of the UEFI spec, Runtime Services are not fully
* reentrant, and there are particular combinations of calls that need to be
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 34b741940..d3b751383 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -329,39 +329,6 @@ check_var_size_nonblocking(u32 attributes, unsigned long size)
return fops->query_variable_store(attributes, size, true);
}
-static int efi_status_to_err(efi_status_t status)
-{
- int err;
-
- switch (status) {
- case EFI_SUCCESS:
- err = 0;
- break;
- case EFI_INVALID_PARAMETER:
- err = -EINVAL;
- break;
- case EFI_OUT_OF_RESOURCES:
- err = -ENOSPC;
- break;
- case EFI_DEVICE_ERROR:
- err = -EIO;
- break;
- case EFI_WRITE_PROTECTED:
- err = -EROFS;
- break;
- case EFI_SECURITY_VIOLATION:
- err = -EACCES;
- break;
- case EFI_NOT_FOUND:
- err = -ENOENT;
- break;
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
struct list_head *head)
{
@@ -452,8 +419,7 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
* Returns 0 on success, or a kernel error code on failure.
*/
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
- void *data, bool atomic, bool duplicates,
- struct list_head *head)
+ void *data, bool duplicates, struct list_head *head)
{
const struct efivar_operations *ops = __efivars->ops;
unsigned long variable_name_size = 1024;
@@ -483,7 +449,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
&vendor_guid);
switch (status) {
case EFI_SUCCESS:
- if (!atomic)
+ if (duplicates)
spin_unlock_irq(&__efivars->lock);
variable_name_size = var_name_strnsize(variable_name,
@@ -498,21 +464,19 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
* and may end up looping here forever.
*/
if (duplicates &&
- variable_is_present(variable_name, &vendor_guid, head)) {
+ variable_is_present(variable_name, &vendor_guid,
+ head)) {
dup_variable_bug(variable_name, &vendor_guid,
variable_name_size);
- if (!atomic)
- spin_lock_irq(&__efivars->lock);
-
status = EFI_NOT_FOUND;
- break;
+ } else {
+ err = func(variable_name, vendor_guid,
+ variable_name_size, data);
+ if (err)
+ status = EFI_NOT_FOUND;
}
- err = func(variable_name, vendor_guid, variable_name_size, data);
- if (err)
- status = EFI_NOT_FOUND;
-
- if (!atomic)
+ if (duplicates)
spin_lock_irq(&__efivars->lock);
break;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 81037e5fe..14042a64b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -418,6 +418,31 @@ static ssize_t ibft_attr_show_target(void *data, int type, char *buf)
return str - buf;
}
+static ssize_t ibft_attr_show_acpitbl(void *data, int type, char *buf)
+{
+ struct ibft_kobject *entry = data;
+ char *str = buf;
+
+ switch (type) {
+ case ISCSI_BOOT_ACPITBL_SIGNATURE:
+ str += sprintf_string(str, ACPI_NAME_SIZE,
+ entry->header->header.signature);
+ break;
+ case ISCSI_BOOT_ACPITBL_OEM_ID:
+ str += sprintf_string(str, ACPI_OEM_ID_SIZE,
+ entry->header->header.oem_id);
+ break;
+ case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+ str += sprintf_string(str, ACPI_OEM_TABLE_ID_SIZE,
+ entry->header->header.oem_table_id);
+ break;
+ default:
+ break;
+ }
+
+ return str - buf;
+}
+
static int __init ibft_check_device(void)
{
int len;
@@ -576,6 +601,24 @@ static umode_t __init ibft_check_initiator_for(void *data, int type)
return rc;
}
+static umode_t __init ibft_check_acpitbl_for(void *data, int type)
+{
+
+ umode_t rc = 0;
+
+ switch (type) {
+ case ISCSI_BOOT_ACPITBL_SIGNATURE:
+ case ISCSI_BOOT_ACPITBL_OEM_ID:
+ case ISCSI_BOOT_ACPITBL_OEM_TABLE_ID:
+ rc = S_IRUGO;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
static void ibft_kobj_release(void *data)
{
kfree(data);
@@ -699,6 +742,8 @@ free_ibft_obj:
static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
{
struct ibft_control *control = NULL;
+ struct iscsi_boot_kobj *boot_kobj;
+ struct ibft_kobject *ibft_kobj;
void *ptr, *end;
int rc = 0;
u16 offset;
@@ -726,6 +771,25 @@ static int __init ibft_register_kobjects(struct acpi_table_ibft *header)
break;
}
}
+ if (rc)
+ return rc;
+
+ ibft_kobj = kzalloc(sizeof(*ibft_kobj), GFP_KERNEL);
+ if (!ibft_kobj)
+ return -ENOMEM;
+
+ ibft_kobj->header = header;
+ ibft_kobj->hdr = NULL; /*for ibft_unregister*/
+
+ boot_kobj = iscsi_boot_create_acpitbl(boot_kset, 0,
+ ibft_kobj,
+ ibft_attr_show_acpitbl,
+ ibft_check_acpitbl_for,
+ ibft_kobj_release);
+ if (!boot_kobj) {
+ kfree(ibft_kobj);
+ rc = -ENOMEM;
+ }
return rc;
}
@@ -738,7 +802,7 @@ static void ibft_unregister(void)
list_for_each_entry_safe(boot_kobj, tmp_kobj,
&boot_kset->kobj_list, list) {
ibft_kobj = boot_kobj->data;
- if (ibft_kobj->hdr->id == id_nic)
+ if (ibft_kobj->hdr && ibft_kobj->hdr->id == id_nic)
sysfs_remove_link(&boot_kobj->kobj, "device");
};
}
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index b5d05807e..03e045827 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -91,7 +91,7 @@ static inline bool psci_has_ext_power_state(void)
PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK;
}
-bool psci_power_state_loses_context(u32 state)
+static inline bool psci_power_state_loses_context(u32 state)
{
const u32 mask = psci_has_ext_power_state() ?
PSCI_1_0_EXT_POWER_STATE_TYPE_MASK :
@@ -100,7 +100,7 @@ bool psci_power_state_loses_context(u32 state)
return state & mask;
}
-bool psci_power_state_is_valid(u32 state)
+static inline bool psci_power_state_is_valid(u32 state)
{
const u32 valid_mask = psci_has_ext_power_state() ?
PSCI_1_0_EXT_POWER_STATE_MASK :
@@ -355,7 +355,7 @@ int psci_cpu_suspend_enter(unsigned long index)
/* ARM specific CPU idle operations */
#ifdef CONFIG_ARM
-static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
.suspend = psci_cpu_suspend_enter,
.init = psci_dt_cpu_init_idle,
};
@@ -563,7 +563,7 @@ out_put_node:
return err;
}
-static const struct of_device_id const psci_of_match[] __initconst = {
+static const struct of_device_id psci_of_match[] __initconst = {
{ .compatible = "arm,psci", .data = psci_0_1_init},
{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
{ .compatible = "arm,psci-1.0", .data = psci_0_2_init},
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 1b95475b6..0e2011636 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -125,9 +125,7 @@ static void fw_cfg_io_cleanup(void)
# define FW_CFG_CTRL_OFF 0x00
# define FW_CFG_DATA_OFF 0x01
# else
-# warning "QEMU FW_CFG may not be available on this architecture!"
-# define FW_CFG_CTRL_OFF 0x00
-# define FW_CFG_DATA_OFF 0x01
+# error "QEMU FW_CFG not available on this architecture!"
# endif
#endif
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 5f3429f0b..d7860614f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -33,7 +33,7 @@ config ARCH_REQUIRE_GPIOLIB
menuconfig GPIOLIB
bool "GPIO Support"
- depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB
+ select ANON_INODES
help
This enables GPIO support through the generic GPIO library.
You only need to enable this, if you also want to enable
@@ -122,6 +122,7 @@ config GPIO_ALTERA
config GPIO_AMDPT
tristate "AMD Promontory GPIO support"
depends on ACPI
+ select GPIO_GENERIC
help
driver for GPIO functionality on Promontory IOHub
Require ACPI ASL code to enumerate as a platform device.
@@ -303,6 +304,7 @@ config GPIO_MPC8XXX
FSL_SOC_BOOKE || PPC_86xx || ARCH_LAYERSCAPE || ARM || \
COMPILE_TEST
select GPIO_GENERIC
+ select IRQ_DOMAIN
help
Say Y here if you're going to use hardware that connects to the
MPC512x/831x/834x/837x/8572/8610/QorIQ GPIOs.
@@ -399,6 +401,14 @@ config GPIO_TB10X
select GENERIC_IRQ_CHIP
select OF_GPIO
+config GPIO_TEGRA
+ bool "NVIDIA Tegra GPIO support"
+ default ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on OF
+ help
+ Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
+
config GPIO_TS4800
tristate "TS-4800 DIO blocks and compatibles"
depends on OF_GPIO
@@ -473,7 +483,7 @@ config GPIO_XILINX
config GPIO_XLP
tristate "Netlogic XLP GPIO support"
- depends on CPU_XLP && OF_GPIO
+ depends on OF_GPIO && (CPU_XLP || ARCH_VULCAN || COMPILE_TEST)
select GPIOLIB_IRQCHIP
help
This driver provides support for GPIO interface on Netlogic XLP MIPS64
@@ -510,6 +520,13 @@ config GPIO_ZX
help
Say yes here to support the GPIO device on ZTE ZX SoCs.
+config GPIO_LOONGSON1
+ tristate "Loongson1 GPIO support"
+ depends on MACH_LOONGSON32
+ select GPIO_GENERIC
+ help
+ Say Y or M here to support GPIO on Loongson1 SoCs.
+
endmenu
menu "Port-mapped I/O GPIO drivers"
@@ -517,30 +534,35 @@ menu "Port-mapped I/O GPIO drivers"
config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
- Enables GPIO support for the ACCES 104-DIO-48E family. The base port
- address for the device may be configured via the dio_48e_base module
- parameter. The interrupt line number for the device may be configured
- via the dio_48e_irq module parameter.
+ Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
+ 104-DIO-24E). The base port addresses for the devices may be
+ configured via the base module parameter. The interrupt line numbers
+ for the devices may be configured via the irq module parameter.
config GPIO_104_IDIO_16
tristate "ACCES 104-IDIO-16 GPIO support"
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
- Enables GPIO support for the ACCES 104-IDIO-16 family. The base port
- address for the device may be set via the idio_16_base module
- parameter. The interrupt line number for the device may be set via the
- idio_16_irq module parameter.
+ Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
+ 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, 104-IDO-8). The
+ base port addresses for the devices may be configured via the base
+ module parameter. The interrupt line numbers for the devices may be
+ configured via the irq module parameter.
config GPIO_104_IDI_48
tristate "ACCES 104-IDI-48 GPIO support"
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
- Enables GPIO support for the ACCES 104-IDI-48 family. The base port
- address for the device may be configured via the idi_48_base module
- parameter. The interrupt line number for the device may be configured
- via the idi_48_irq module parameter.
+ Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
+ 104-IDI-48AC, 104-IDI-48B, 104-IDI-48BC). The base port addresses for
+ the devices may be configured via the base module parameter. The
+ interrupt line numbers for the devices may be configured via the irq
+ module parameter.
config GPIO_F7188X
tristate "F71869, F71869A, F71882FG, F71889F and F81866 GPIO support"
@@ -557,7 +579,7 @@ config GPIO_IT87
Say yes here to support GPIO functionality of IT87xx Super I/O chips.
This driver is tested with ITE IT8728 and IT8732 Super I/O chips, and
- supports the IT8761E Super I/O chip as well.
+ supports the IT8761E, IT8620E and IT8628E Super I/O chip as well.
To compile this driver as a module, choose M here: the module will
be called gpio_it87
@@ -609,12 +631,13 @@ config GPIO_TS5500
config GPIO_WS16C48
tristate "WinSystems WS16C48 GPIO support"
+ depends on ISA_BUS_API
select GPIOLIB_IRQCHIP
help
- Enables GPIO support for the WinSystems WS16C48. The base port address
- for the device may be configured via the ws16c48_base module
- parameter. The interrupt line number for the device may be configured
- via the ws16c48_irq module parameter.
+ Enables GPIO support for the WinSystems WS16C48. The base port
+ addresses for the devices may be configured via the base module
+ parameter. The interrupt line numbers for the devices may be
+ configured via the irq module parameter.
endmenu
@@ -1091,6 +1114,7 @@ menu "SPI or I2C GPIO expanders"
config GPIO_MCP23S08
tristate "Microchip MCP23xxx I/O expander"
+ select GPIOLIB_IRQCHIP
help
SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
I/O expanders.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 1e0b74f3b..991598ea3 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,9 @@ obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
+# directly supported by gpio-generic
+gpio-generic-$(CONFIG_GPIO_GENERIC) += gpio-mmio.o
+
obj-$(CONFIG_GPIO_104_DIO_48E) += gpio-104-dio-48e.o
obj-$(CONFIG_GPIO_104_IDIO_16) += gpio-104-idio-16.o
obj-$(CONFIG_GPIO_104_IDI_48) += gpio-104-idi-48.o
@@ -95,7 +98,7 @@ obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
obj-$(CONFIG_GPIO_SYSCON) += gpio-syscon.o
obj-$(CONFIG_GPIO_TB10X) += gpio-tb10x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
-obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
+obj-$(CONFIG_GPIO_TEGRA) += gpio-tegra.o
obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
obj-$(CONFIG_GPIO_PALMAS) += gpio-palmas.o
obj-$(CONFIG_GPIO_TPIC2810) += gpio-tpic2810.o
@@ -127,3 +130,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
+obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 448a90308..fcf776971 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -1,5 +1,5 @@
/*
- * GPIO driver for the ACCES 104-DIO-48E
+ * GPIO driver for the ACCES 104-DIO-48E series
* Copyright (C) 2016 William Breathitt Gray
*
* This program is free software; you can redistribute it and/or modify
@@ -10,6 +10,9 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
+ *
+ * This driver supports the following ACCES devices: 104-DIO-48E and
+ * 104-DIO-24E.
*/
#include <linux/bitops.h>
#include <linux/device.h>
@@ -19,18 +22,23 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irqdesc.h>
+#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
#include <linux/spinlock.h>
-static unsigned dio_48e_base;
-module_param(dio_48e_base, uint, 0);
-MODULE_PARM_DESC(dio_48e_base, "ACCES 104-DIO-48E base address");
-static unsigned dio_48e_irq;
-module_param(dio_48e_irq, uint, 0);
-MODULE_PARM_DESC(dio_48e_irq, "ACCES 104-DIO-48E interrupt line number");
+#define DIO48E_EXTENT 16
+#define MAX_NUM_DIO48E max_num_isa_dev(DIO48E_EXTENT)
+
+static unsigned int base[MAX_NUM_DIO48E];
+static unsigned int num_dio48e;
+module_param_array(base, uint, &num_dio48e, 0);
+MODULE_PARM_DESC(base, "ACCES 104-DIO-48E base addresses");
+
+static unsigned int irq[MAX_NUM_DIO48E];
+module_param_array(irq, uint, NULL, 0);
+MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
/**
* struct dio48e_gpio - GPIO device private data structure
@@ -67,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
const unsigned io_port = offset / 8;
- const unsigned control_port = io_port / 2;
+ const unsigned int control_port = io_port / 3;
const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
unsigned long flags;
unsigned control;
@@ -107,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
{
struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
const unsigned io_port = offset / 8;
- const unsigned control_port = io_port / 2;
+ const unsigned int control_port = io_port / 3;
const unsigned mask = BIT(offset % 8);
const unsigned control_addr = dio48egpio->base + 3 + control_port*4;
const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port;
@@ -294,23 +302,19 @@ static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init dio48e_probe(struct platform_device *pdev)
+static int dio48e_probe(struct device *dev, unsigned int id)
{
- struct device *dev = &pdev->dev;
struct dio48e_gpio *dio48egpio;
- const unsigned base = dio_48e_base;
- const unsigned extent = 16;
const char *const name = dev_name(dev);
int err;
- const unsigned irq = dio_48e_irq;
dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
if (!dio48egpio)
return -ENOMEM;
- if (!devm_request_region(dev, base, extent, name)) {
+ if (!devm_request_region(dev, base[id], DIO48E_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
- base, base + extent);
+ base[id], base[id] + DIO48E_EXTENT);
return -EBUSY;
}
@@ -324,8 +328,8 @@ static int __init dio48e_probe(struct platform_device *pdev)
dio48egpio->chip.direction_output = dio48e_gpio_direction_output;
dio48egpio->chip.get = dio48e_gpio_get;
dio48egpio->chip.set = dio48e_gpio_set;
- dio48egpio->base = base;
- dio48egpio->irq = irq;
+ dio48egpio->base = base[id];
+ dio48egpio->irq = irq[id];
spin_lock_init(&dio48egpio->lock);
@@ -338,19 +342,19 @@ static int __init dio48e_probe(struct platform_device *pdev)
}
/* initialize all GPIO as output */
- outb(0x80, base + 3);
- outb(0x00, base);
- outb(0x00, base + 1);
- outb(0x00, base + 2);
- outb(0x00, base + 3);
- outb(0x80, base + 7);
- outb(0x00, base + 4);
- outb(0x00, base + 5);
- outb(0x00, base + 6);
- outb(0x00, base + 7);
+ outb(0x80, base[id] + 3);
+ outb(0x00, base[id]);
+ outb(0x00, base[id] + 1);
+ outb(0x00, base[id] + 2);
+ outb(0x00, base[id] + 3);
+ outb(0x80, base[id] + 7);
+ outb(0x00, base[id] + 4);
+ outb(0x00, base[id] + 5);
+ outb(0x00, base[id] + 6);
+ outb(0x00, base[id] + 7);
/* disable IRQ by default */
- inb(base + 0xB);
+ inb(base[id] + 0xB);
err = gpiochip_irqchip_add(&dio48egpio->chip, &dio48e_irqchip, 0,
handle_edge_irq, IRQ_TYPE_NONE);
@@ -359,7 +363,7 @@ static int __init dio48e_probe(struct platform_device *pdev)
goto err_gpiochip_remove;
}
- err = request_irq(irq, dio48e_irq_handler, 0, name, dio48egpio);
+ err = request_irq(irq[id], dio48e_irq_handler, 0, name, dio48egpio);
if (err) {
dev_err(dev, "IRQ handler registering failed (%d)\n", err);
goto err_gpiochip_remove;
@@ -372,9 +376,9 @@ err_gpiochip_remove:
return err;
}
-static int dio48e_remove(struct platform_device *pdev)
+static int dio48e_remove(struct device *dev, unsigned int id)
{
- struct dio48e_gpio *const dio48egpio = platform_get_drvdata(pdev);
+ struct dio48e_gpio *const dio48egpio = dev_get_drvdata(dev);
free_irq(dio48egpio->irq, dio48egpio);
gpiochip_remove(&dio48egpio->chip);
@@ -382,48 +386,14 @@ static int dio48e_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device *dio48e_device;
-
-static struct platform_driver dio48e_driver = {
+static struct isa_driver dio48e_driver = {
+ .probe = dio48e_probe,
.driver = {
.name = "104-dio-48e"
},
.remove = dio48e_remove
};
-
-static void __exit dio48e_exit(void)
-{
- platform_device_unregister(dio48e_device);
- platform_driver_unregister(&dio48e_driver);
-}
-
-static int __init dio48e_init(void)
-{
- int err;
-
- dio48e_device = platform_device_alloc(dio48e_driver.driver.name, -1);
- if (!dio48e_device)
- return -ENOMEM;
-
- err = platform_device_add(dio48e_device);
- if (err)
- goto err_platform_device;
-
- err = platform_driver_probe(&dio48e_driver, dio48e_probe);
- if (err)
- goto err_platform_driver;
-
- return 0;
-
-err_platform_driver:
- platform_device_del(dio48e_device);
-err_platform_device:
- platform_device_put(dio48e_device);
- return err;
-}
-
-module_init(dio48e_init);
-module_exit(dio48e_exit);
+module_isa_driver(dio48e_driver, num_dio48e);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-DIO-48E GPIO driver");
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index e37cd4cdd..2d2763ea1 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -10,6 +10,9 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
+ *
+ * This driver supports the following ACCES devices: 104-IDI-48A,
+ * 104-IDI-48AC, 104-IDI-48B, and 104-IDI-48BC.
*/
#include <linux/bitops.h>
#include <linux/device.h>
@@ -19,18 +22,23 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irqdesc.h>
+#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
#include <linux/spinlock.h>
-static unsigned idi_48_base;
-module_param(idi_48_base, uint, 0);
-MODULE_PARM_DESC(idi_48_base, "ACCES 104-IDI-48 base address");
-static unsigned idi_48_irq;
-module_param(idi_48_irq, uint, 0);
-MODULE_PARM_DESC(idi_48_irq, "ACCES 104-IDI-48 interrupt line number");
+#define IDI_48_EXTENT 8
+#define MAX_NUM_IDI_48 max_num_isa_dev(IDI_48_EXTENT)
+
+static unsigned int base[MAX_NUM_IDI_48];
+static unsigned int num_idi_48;
+module_param_array(base, uint, &num_idi_48, 0);
+MODULE_PARM_DESC(base, "ACCES 104-IDI-48 base addresses");
+
+static unsigned int irq[MAX_NUM_IDI_48];
+module_param_array(irq, uint, NULL, 0);
+MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
/**
* struct idi_48_gpio - GPIO device private data structure
@@ -211,23 +219,19 @@ static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init idi_48_probe(struct platform_device *pdev)
+static int idi_48_probe(struct device *dev, unsigned int id)
{
- struct device *dev = &pdev->dev;
struct idi_48_gpio *idi48gpio;
- const unsigned base = idi_48_base;
- const unsigned extent = 8;
const char *const name = dev_name(dev);
int err;
- const unsigned irq = idi_48_irq;
idi48gpio = devm_kzalloc(dev, sizeof(*idi48gpio), GFP_KERNEL);
if (!idi48gpio)
return -ENOMEM;
- if (!devm_request_region(dev, base, extent, name)) {
+ if (!devm_request_region(dev, base[id], IDI_48_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
- base, base + extent);
+ base[id], base[id] + IDI_48_EXTENT);
return -EBUSY;
}
@@ -239,10 +243,11 @@ static int __init idi_48_probe(struct platform_device *pdev)
idi48gpio->chip.get_direction = idi_48_gpio_get_direction;
idi48gpio->chip.direction_input = idi_48_gpio_direction_input;
idi48gpio->chip.get = idi_48_gpio_get;
- idi48gpio->base = base;
- idi48gpio->irq = irq;
+ idi48gpio->base = base[id];
+ idi48gpio->irq = irq[id];
spin_lock_init(&idi48gpio->lock);
+ spin_lock_init(&idi48gpio->ack_lock);
dev_set_drvdata(dev, idi48gpio);
@@ -253,8 +258,8 @@ static int __init idi_48_probe(struct platform_device *pdev)
}
/* Disable IRQ by default */
- outb(0, base + 7);
- inb(base + 7);
+ outb(0, base[id] + 7);
+ inb(base[id] + 7);
err = gpiochip_irqchip_add(&idi48gpio->chip, &idi_48_irqchip, 0,
handle_edge_irq, IRQ_TYPE_NONE);
@@ -263,7 +268,7 @@ static int __init idi_48_probe(struct platform_device *pdev)
goto err_gpiochip_remove;
}
- err = request_irq(irq, idi_48_irq_handler, IRQF_SHARED, name,
+ err = request_irq(irq[id], idi_48_irq_handler, IRQF_SHARED, name,
idi48gpio);
if (err) {
dev_err(dev, "IRQ handler registering failed (%d)\n", err);
@@ -277,9 +282,9 @@ err_gpiochip_remove:
return err;
}
-static int idi_48_remove(struct platform_device *pdev)
+static int idi_48_remove(struct device *dev, unsigned int id)
{
- struct idi_48_gpio *const idi48gpio = platform_get_drvdata(pdev);
+ struct idi_48_gpio *const idi48gpio = dev_get_drvdata(dev);
free_irq(idi48gpio->irq, idi48gpio);
gpiochip_remove(&idi48gpio->chip);
@@ -287,48 +292,14 @@ static int idi_48_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device *idi_48_device;
-
-static struct platform_driver idi_48_driver = {
+static struct isa_driver idi_48_driver = {
+ .probe = idi_48_probe,
.driver = {
.name = "104-idi-48"
},
.remove = idi_48_remove
};
-
-static void __exit idi_48_exit(void)
-{
- platform_device_unregister(idi_48_device);
- platform_driver_unregister(&idi_48_driver);
-}
-
-static int __init idi_48_init(void)
-{
- int err;
-
- idi_48_device = platform_device_alloc(idi_48_driver.driver.name, -1);
- if (!idi_48_device)
- return -ENOMEM;
-
- err = platform_device_add(idi_48_device);
- if (err)
- goto err_platform_device;
-
- err = platform_driver_probe(&idi_48_driver, idi_48_probe);
- if (err)
- goto err_platform_driver;
-
- return 0;
-
-err_platform_driver:
- platform_device_del(idi_48_device);
-err_platform_device:
- platform_device_put(idi_48_device);
- return err;
-}
-
-module_init(idi_48_init);
-module_exit(idi_48_exit);
+module_isa_driver(idi_48_driver, num_idi_48);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDI-48 GPIO driver");
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index ecc85fe93..6787b8fcf 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -10,6 +10,9 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
+ *
+ * This driver supports the following ACCES devices: 104-IDIO-16,
+ * 104-IDIO-16E, 104-IDO-16, 104-IDIO-8, 104-IDIO-8E, and 104-IDO-8.
*/
#include <linux/bitops.h>
#include <linux/device.h>
@@ -19,18 +22,23 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irqdesc.h>
+#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
#include <linux/spinlock.h>
-static unsigned idio_16_base;
-module_param(idio_16_base, uint, 0);
-MODULE_PARM_DESC(idio_16_base, "ACCES 104-IDIO-16 base address");
-static unsigned idio_16_irq;
-module_param(idio_16_irq, uint, 0);
-MODULE_PARM_DESC(idio_16_irq, "ACCES 104-IDIO-16 interrupt line number");
+#define IDIO_16_EXTENT 8
+#define MAX_NUM_IDIO_16 max_num_isa_dev(IDIO_16_EXTENT)
+
+static unsigned int base[MAX_NUM_IDIO_16];
+static unsigned int num_idio_16;
+module_param_array(base, uint, &num_idio_16, 0);
+MODULE_PARM_DESC(base, "ACCES 104-IDIO-16 base addresses");
+
+static unsigned int irq[MAX_NUM_IDIO_16];
+module_param_array(irq, uint, NULL, 0);
+MODULE_PARM_DESC(irq, "ACCES 104-IDIO-16 interrupt line numbers");
/**
* struct idio_16_gpio - GPIO device private data structure
@@ -185,23 +193,19 @@ static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init idio_16_probe(struct platform_device *pdev)
+static int idio_16_probe(struct device *dev, unsigned int id)
{
- struct device *dev = &pdev->dev;
struct idio_16_gpio *idio16gpio;
- const unsigned base = idio_16_base;
- const unsigned extent = 8;
const char *const name = dev_name(dev);
int err;
- const unsigned irq = idio_16_irq;
idio16gpio = devm_kzalloc(dev, sizeof(*idio16gpio), GFP_KERNEL);
if (!idio16gpio)
return -ENOMEM;
- if (!devm_request_region(dev, base, extent, name)) {
+ if (!devm_request_region(dev, base[id], IDIO_16_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
- base, base + extent);
+ base[id], base[id] + IDIO_16_EXTENT);
return -EBUSY;
}
@@ -215,8 +219,8 @@ static int __init idio_16_probe(struct platform_device *pdev)
idio16gpio->chip.direction_output = idio_16_gpio_direction_output;
idio16gpio->chip.get = idio_16_gpio_get;
idio16gpio->chip.set = idio_16_gpio_set;
- idio16gpio->base = base;
- idio16gpio->irq = irq;
+ idio16gpio->base = base[id];
+ idio16gpio->irq = irq[id];
idio16gpio->out_state = 0xFFFF;
spin_lock_init(&idio16gpio->lock);
@@ -230,8 +234,8 @@ static int __init idio_16_probe(struct platform_device *pdev)
}
/* Disable IRQ by default */
- outb(0, base + 2);
- outb(0, base + 1);
+ outb(0, base[id] + 2);
+ outb(0, base[id] + 1);
err = gpiochip_irqchip_add(&idio16gpio->chip, &idio_16_irqchip, 0,
handle_edge_irq, IRQ_TYPE_NONE);
@@ -240,7 +244,7 @@ static int __init idio_16_probe(struct platform_device *pdev)
goto err_gpiochip_remove;
}
- err = request_irq(irq, idio_16_irq_handler, 0, name, idio16gpio);
+ err = request_irq(irq[id], idio_16_irq_handler, 0, name, idio16gpio);
if (err) {
dev_err(dev, "IRQ handler registering failed (%d)\n", err);
goto err_gpiochip_remove;
@@ -253,9 +257,9 @@ err_gpiochip_remove:
return err;
}
-static int idio_16_remove(struct platform_device *pdev)
+static int idio_16_remove(struct device *dev, unsigned int id)
{
- struct idio_16_gpio *const idio16gpio = platform_get_drvdata(pdev);
+ struct idio_16_gpio *const idio16gpio = dev_get_drvdata(dev);
free_irq(idio16gpio->irq, idio16gpio);
gpiochip_remove(&idio16gpio->chip);
@@ -263,48 +267,15 @@ static int idio_16_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device *idio_16_device;
-
-static struct platform_driver idio_16_driver = {
+static struct isa_driver idio_16_driver = {
+ .probe = idio_16_probe,
.driver = {
.name = "104-idio-16"
},
.remove = idio_16_remove
};
-static void __exit idio_16_exit(void)
-{
- platform_device_unregister(idio_16_device);
- platform_driver_unregister(&idio_16_driver);
-}
-
-static int __init idio_16_init(void)
-{
- int err;
-
- idio_16_device = platform_device_alloc(idio_16_driver.driver.name, -1);
- if (!idio_16_device)
- return -ENOMEM;
-
- err = platform_device_add(idio_16_device);
- if (err)
- goto err_platform_device;
-
- err = platform_driver_probe(&idio_16_driver, idio_16_probe);
- if (err)
- goto err_platform_driver;
-
- return 0;
-
-err_platform_driver:
- platform_device_del(idio_16_device);
-err_platform_device:
- platform_device_put(idio_16_device);
- return err;
-}
-
-module_init(idio_16_init);
-module_exit(idio_16_exit);
+module_isa_driver(idio_16_driver, num_idio_16);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("ACCES 104-IDIO-16 GPIO driver");
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index c81224ff2..80f9ddf13 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -75,6 +75,29 @@ static void gen_74x164_set_value(struct gpio_chip *gc,
mutex_unlock(&chip->lock);
}
+static void gen_74x164_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gen_74x164_chip *chip = gpiochip_get_data(gc);
+ unsigned int i, idx, shift;
+ u8 bank, bankmask;
+
+ mutex_lock(&chip->lock);
+ for (i = 0, bank = chip->registers - 1; i < chip->registers;
+ i++, bank--) {
+ idx = i / sizeof(*mask);
+ shift = i % sizeof(*mask) * BITS_PER_BYTE;
+ bankmask = mask[idx] >> shift;
+ if (!bankmask)
+ continue;
+
+ chip->buffer[bank] &= ~bankmask;
+ chip->buffer[bank] |= bankmask & (bits[idx] >> shift);
+ }
+ __gen_74x164_write_config(chip);
+ mutex_unlock(&chip->lock);
+}
+
static int gen_74x164_direction_output(struct gpio_chip *gc,
unsigned offset, int val)
{
@@ -114,6 +137,7 @@ static int gen_74x164_probe(struct spi_device *spi)
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
chip->gpio_chip.set = gen_74x164_set_value;
+ chip->gpio_chip.set_multiple = gen_74x164_set_multiple;
chip->gpio_chip.base = -1;
chip->registers = nregs;
@@ -153,6 +177,7 @@ static int gen_74x164_remove(struct spi_device *spi)
static const struct of_device_id gen_74x164_dt_ids[] = {
{ .compatible = "fairchild,74hc595" },
+ { .compatible = "nxp,74lvc594" },
{},
};
MODULE_DEVICE_TABLE(of, gen_74x164_dt_ids);
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index c2484046e..9b78dc837 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -28,7 +28,6 @@
struct pt_gpio_chip {
struct gpio_chip gc;
void __iomem *reg_base;
- spinlock_t lock;
};
static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
@@ -39,19 +38,19 @@ static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
dev_dbg(gc->parent, "pt_gpio_request offset=%x\n", offset);
- spin_lock_irqsave(&pt_gpio->lock, flags);
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
if (using_pins & BIT(offset)) {
dev_warn(gc->parent, "PT GPIO pin %x reconfigured\n",
offset);
- spin_unlock_irqrestore(&pt_gpio->lock, flags);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return -EINVAL;
}
writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG);
- spin_unlock_irqrestore(&pt_gpio->lock, flags);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -62,111 +61,17 @@ static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
unsigned long flags;
u32 using_pins;
- spin_lock_irqsave(&pt_gpio->lock, flags);
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
using_pins &= ~BIT(offset);
writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG);
- spin_unlock_irqrestore(&pt_gpio->lock, flags);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
dev_dbg(gc->parent, "pt_gpio_free offset=%x\n", offset);
}
-static void pt_gpio_set_value(struct gpio_chip *gc, unsigned offset, int value)
-{
- struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
- u32 data;
-
- dev_dbg(gc->parent, "pt_gpio_set_value offset=%x, value=%x\n",
- offset, value);
-
- spin_lock_irqsave(&pt_gpio->lock, flags);
-
- data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
- data &= ~BIT(offset);
- if (value)
- data |= BIT(offset);
- writel(data, pt_gpio->reg_base + PT_OUTPUTDATA_REG);
-
- spin_unlock_irqrestore(&pt_gpio->lock, flags);
-}
-
-static int pt_gpio_get_value(struct gpio_chip *gc, unsigned offset)
-{
- struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
- u32 data;
-
- spin_lock_irqsave(&pt_gpio->lock, flags);
-
- data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
-
- /* configure as output */
- if (data & BIT(offset))
- data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
- else /* configure as input */
- data = readl(pt_gpio->reg_base + PT_INPUTDATA_REG);
-
- spin_unlock_irqrestore(&pt_gpio->lock, flags);
-
- data >>= offset;
- data &= 1;
-
- dev_dbg(gc->parent, "pt_gpio_get_value offset=%x, value=%x\n",
- offset, data);
-
- return data;
-}
-
-static int pt_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
-{
- struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
- u32 data;
-
- dev_dbg(gc->parent, "pt_gpio_dirction_input offset=%x\n", offset);
-
- spin_lock_irqsave(&pt_gpio->lock, flags);
-
- data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
- data &= ~BIT(offset);
- writel(data, pt_gpio->reg_base + PT_DIRECTION_REG);
-
- spin_unlock_irqrestore(&pt_gpio->lock, flags);
-
- return 0;
-}
-
-static int pt_gpio_direction_output(struct gpio_chip *gc,
- unsigned offset, int value)
-{
- struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
- u32 data;
-
- dev_dbg(gc->parent, "pt_gpio_direction_output offset=%x, value=%x\n",
- offset, value);
-
- spin_lock_irqsave(&pt_gpio->lock, flags);
-
- data = readl(pt_gpio->reg_base + PT_OUTPUTDATA_REG);
- if (value)
- data |= BIT(offset);
- else
- data &= ~BIT(offset);
- writel(data, pt_gpio->reg_base + PT_OUTPUTDATA_REG);
-
- data = readl(pt_gpio->reg_base + PT_DIRECTION_REG);
- data |= BIT(offset);
- writel(data, pt_gpio->reg_base + PT_DIRECTION_REG);
-
- spin_unlock_irqrestore(&pt_gpio->lock, flags);
-
- return 0;
-}
-
static int pt_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -196,18 +101,19 @@ static int pt_gpio_probe(struct platform_device *pdev)
return PTR_ERR(pt_gpio->reg_base);
}
- spin_lock_init(&pt_gpio->lock);
+ ret = bgpio_init(&pt_gpio->gc, dev, 4,
+ pt_gpio->reg_base + PT_INPUTDATA_REG,
+ pt_gpio->reg_base + PT_OUTPUTDATA_REG, NULL,
+ pt_gpio->reg_base + PT_DIRECTION_REG, NULL,
+ BGPIOF_READ_OUTPUT_REG_SET);
+ if (ret) {
+ dev_err(&pdev->dev, "bgpio_init failed\n");
+ return ret;
+ }
- pt_gpio->gc.label = pdev->name;
pt_gpio->gc.owner = THIS_MODULE;
- pt_gpio->gc.parent = dev;
pt_gpio->gc.request = pt_gpio_request;
pt_gpio->gc.free = pt_gpio_free;
- pt_gpio->gc.direction_input = pt_gpio_direction_input;
- pt_gpio->gc.direction_output = pt_gpio_direction_output;
- pt_gpio->gc.get = pt_gpio_get_value;
- pt_gpio->gc.set = pt_gpio_set_value;
- pt_gpio->gc.base = -1;
pt_gpio->gc.ngpio = PT_TOTAL_GPIO;
#if defined(CONFIG_OF_GPIO)
pt_gpio->gc.of_node = pdev->dev.of_node;
@@ -239,6 +145,7 @@ static int pt_gpio_remove(struct platform_device *pdev)
static const struct acpi_device_id pt_gpio_acpi_match[] = {
{ "AMDF030", 0 },
+ { "AMDIF030", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, pt_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 3c5e83263..953e4b829 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,4 +1,7 @@
/*
+ * Broadcom Kona GPIO Driver
+ *
+ * Author: Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>
* Copyright (C) 2012-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
@@ -17,7 +20,7 @@
#include <linux/gpio.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -502,8 +505,6 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = {
{}
};
-MODULE_DEVICE_TABLE(of, bcm_kona_gpio_of_match);
-
/*
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
@@ -659,9 +660,4 @@ static struct platform_driver bcm_kona_gpio_driver = {
},
.probe = bcm_kona_gpio_probe,
};
-
-module_platform_driver(bcm_kona_gpio_driver);
-
-MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(bcm_kona_gpio_driver);
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 42d51c59e..e64891437 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -461,6 +461,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
bank->id = num_banks;
if (bank_width <= 0 || bank_width > MAX_GPIO_PER_BANK) {
dev_err(dev, "Invalid bank width %d\n", bank_width);
+ err = -EINVAL;
goto fail;
} else {
bank->width = bank_width;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 597de1ef4..34779bb37 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -7,6 +7,7 @@
*
* All enquiries to support@picochip.com
*/
+#include <linux/acpi.h>
#include <linux/gpio/driver.h>
/* FIXME: for gpio_get_value(), replace this with direct register read */
#include <linux/gpio.h>
@@ -22,10 +23,13 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
#include <linux/platform_data/gpio-dwapb.h>
#include <linux/slab.h>
+#include "gpiolib.h"
+
#define GPIO_SWPORTA_DR 0x00
#define GPIO_SWPORTA_DDR 0x04
#define GPIO_SWPORTB_DR 0x0c
@@ -290,14 +294,14 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp)
{
struct gpio_chip *gc = &port->gc;
- struct device_node *node = pp->node;
+ struct fwnode_handle *fwnode = pp->fwnode;
struct irq_chip_generic *irq_gc = NULL;
unsigned int hwirq, ngpio = gc->ngpio;
struct irq_chip_type *ct;
int err, i;
- gpio->domain = irq_domain_add_linear(node, ngpio,
- &irq_generic_chip_ops, gpio);
+ gpio->domain = irq_domain_create_linear(fwnode, ngpio,
+ &irq_generic_chip_ops, gpio);
if (!gpio->domain)
return;
@@ -409,13 +413,13 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
NULL, false);
if (err) {
- dev_err(gpio->dev, "failed to init gpio chip for %s\n",
- pp->name);
+ dev_err(gpio->dev, "failed to init gpio chip for port%d\n",
+ port->idx);
return err;
}
#ifdef CONFIG_OF_GPIO
- port->gc.of_node = pp->node;
+ port->gc.of_node = to_of_node(pp->fwnode);
#endif
port->gc.ngpio = pp->ngpio;
port->gc.base = pp->gpio_base;
@@ -429,11 +433,15 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
err = gpiochip_add_data(&port->gc, port);
if (err)
- dev_err(gpio->dev, "failed to register gpiochip for %s\n",
- pp->name);
+ dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
+ port->idx);
else
port->is_registered = true;
+ /* Add GPIO-signaled ACPI event support */
+ if (pp->irq)
+ acpi_gpiochip_request_interrupts(&port->gc);
+
return err;
}
@@ -447,19 +455,15 @@ static void dwapb_gpio_unregister(struct dwapb_gpio *gpio)
}
static struct dwapb_platform_data *
-dwapb_gpio_get_pdata_of(struct device *dev)
+dwapb_gpio_get_pdata(struct device *dev)
{
- struct device_node *node, *port_np;
+ struct fwnode_handle *fwnode;
struct dwapb_platform_data *pdata;
struct dwapb_port_property *pp;
int nports;
int i;
- node = dev->of_node;
- if (!IS_ENABLED(CONFIG_OF_GPIO) || !node)
- return ERR_PTR(-ENODEV);
-
- nports = of_get_child_count(node);
+ nports = device_get_child_node_count(dev);
if (nports == 0)
return ERR_PTR(-ENODEV);
@@ -474,21 +478,22 @@ dwapb_gpio_get_pdata_of(struct device *dev)
pdata->nports = nports;
i = 0;
- for_each_child_of_node(node, port_np) {
+ device_for_each_child_node(dev, fwnode) {
pp = &pdata->properties[i++];
- pp->node = port_np;
+ pp->fwnode = fwnode;
- if (of_property_read_u32(port_np, "reg", &pp->idx) ||
+ if (fwnode_property_read_u32(fwnode, "reg", &pp->idx) ||
pp->idx >= DWAPB_MAX_PORTS) {
- dev_err(dev, "missing/invalid port index for %s\n",
- port_np->full_name);
+ dev_err(dev,
+ "missing/invalid port index for port%d\n", i);
return ERR_PTR(-EINVAL);
}
- if (of_property_read_u32(port_np, "snps,nr-gpios",
+ if (fwnode_property_read_u32(fwnode, "snps,nr-gpios",
&pp->ngpio)) {
- dev_info(dev, "failed to get number of gpios for %s\n",
- port_np->full_name);
+ dev_info(dev,
+ "failed to get number of gpios for port%d\n",
+ i);
pp->ngpio = 32;
}
@@ -496,18 +501,19 @@ dwapb_gpio_get_pdata_of(struct device *dev)
* Only port A can provide interrupts in all configurations of
* the IP.
*/
- if (pp->idx == 0 &&
- of_property_read_bool(port_np, "interrupt-controller")) {
- pp->irq = irq_of_parse_and_map(port_np, 0);
- if (!pp->irq) {
- dev_warn(dev, "no irq for bank %s\n",
- port_np->full_name);
- }
+ if (dev->of_node && pp->idx == 0 &&
+ fwnode_property_read_bool(fwnode,
+ "interrupt-controller")) {
+ pp->irq = irq_of_parse_and_map(to_of_node(fwnode), 0);
+ if (!pp->irq)
+ dev_warn(dev, "no irq for port%d\n", pp->idx);
}
+ if (has_acpi_companion(dev) && pp->idx == 0)
+ pp->irq = platform_get_irq(to_platform_device(dev), 0);
+
pp->irq_shared = false;
pp->gpio_base = -1;
- pp->name = port_np->full_name;
}
return pdata;
@@ -523,7 +529,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
struct dwapb_platform_data *pdata = dev_get_platdata(dev);
if (!pdata) {
- pdata = dwapb_gpio_get_pdata_of(dev);
+ pdata = dwapb_gpio_get_pdata(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
}
@@ -580,6 +586,13 @@ static const struct of_device_id dwapb_of_match[] = {
};
MODULE_DEVICE_TABLE(of, dwapb_of_match);
+static const struct acpi_device_id dwapb_acpi_match[] = {
+ {"HISI0181", 0},
+ {"APMC0D07", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, dwapb_acpi_match);
+
#ifdef CONFIG_PM_SLEEP
static int dwapb_gpio_suspend(struct device *dev)
{
@@ -674,6 +687,7 @@ static struct platform_driver dwapb_gpio_driver = {
.name = "gpio-dwapb",
.pm = &dwapb_gpio_pm_ops,
.of_match_table = of_match_ptr(dwapb_of_match),
+ .acpi_match_table = ACPI_PTR(dwapb_acpi_match),
},
.probe = dwapb_gpio_probe,
.remove = dwapb_gpio_remove,
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index daac2d480..05aa538c3 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -15,7 +15,8 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/bitops.h>
#define DRVNAME "gpio-f7188x"
@@ -129,6 +130,9 @@ static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset);
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value);
static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
+static int f7188x_gpio_set_single_ended(struct gpio_chip *gc,
+ unsigned offset,
+ enum single_ended_mode mode);
#define F7188X_GPIO_BANK(_base, _ngpio, _regbase) \
{ \
@@ -139,6 +143,7 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value);
.get = f7188x_gpio_get, \
.direction_output = f7188x_gpio_direction_out, \
.set = f7188x_gpio_set, \
+ .set_single_ended = f7188x_gpio_set_single_ended, \
.base = _base, \
.ngpio = _ngpio, \
.can_sleep = true, \
@@ -217,7 +222,7 @@ static int f7188x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
superio_select(sio->addr, SIO_LD_GPIO);
dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
- dir &= ~(1 << offset);
+ dir &= ~BIT(offset);
superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
superio_exit(sio->addr);
@@ -238,7 +243,7 @@ static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset)
superio_select(sio->addr, SIO_LD_GPIO);
dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
- dir = !!(dir & (1 << offset));
+ dir = !!(dir & BIT(offset));
if (dir)
data = superio_inb(sio->addr, gpio_data_out(bank->regbase));
else
@@ -246,7 +251,7 @@ static int f7188x_gpio_get(struct gpio_chip *chip, unsigned offset)
superio_exit(sio->addr);
- return !!(data & 1 << offset);
+ return !!(data & BIT(offset));
}
static int f7188x_gpio_direction_out(struct gpio_chip *chip,
@@ -264,13 +269,13 @@ static int f7188x_gpio_direction_out(struct gpio_chip *chip,
data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
if (value)
- data_out |= (1 << offset);
+ data_out |= BIT(offset);
else
- data_out &= ~(1 << offset);
+ data_out &= ~BIT(offset);
superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
dir = superio_inb(sio->addr, gpio_dir(bank->regbase));
- dir |= (1 << offset);
+ dir |= BIT(offset);
superio_outb(sio->addr, gpio_dir(bank->regbase), dir);
superio_exit(sio->addr);
@@ -292,14 +297,43 @@ static void f7188x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
data_out = superio_inb(sio->addr, gpio_data_out(bank->regbase));
if (value)
- data_out |= (1 << offset);
+ data_out |= BIT(offset);
else
- data_out &= ~(1 << offset);
+ data_out &= ~BIT(offset);
superio_outb(sio->addr, gpio_data_out(bank->regbase), data_out);
superio_exit(sio->addr);
}
+static int f7188x_gpio_set_single_ended(struct gpio_chip *chip,
+ unsigned offset,
+ enum single_ended_mode mode)
+{
+ int err;
+ struct f7188x_gpio_bank *bank = gpiochip_get_data(chip);
+ struct f7188x_sio *sio = bank->data->sio;
+ u8 data;
+
+ if (mode != LINE_MODE_OPEN_DRAIN &&
+ mode != LINE_MODE_PUSH_PULL)
+ return -ENOTSUPP;
+
+ err = superio_enter(sio->addr);
+ if (err)
+ return err;
+ superio_select(sio->addr, SIO_LD_GPIO);
+
+ data = superio_inb(sio->addr, gpio_out_mode(bank->regbase));
+ if (mode == LINE_MODE_OPEN_DRAIN)
+ data &= ~BIT(offset);
+ else
+ data |= BIT(offset);
+ superio_outb(sio->addr, gpio_out_mode(bank->regbase), data);
+
+ superio_exit(sio->addr);
+ return 0;
+}
+
/*
* Platform device and driver.
*/
diff --git a/drivers/gpio/gpio-it87.c b/drivers/gpio/gpio-it87.c
index b219c8241..63a962d18 100644
--- a/drivers/gpio/gpio-it87.c
+++ b/drivers/gpio/gpio-it87.c
@@ -34,6 +34,8 @@
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
+#define IT8620_ID 0x8620
+#define IT8628_ID 0x8628
#define IT8728_ID 0x8728
#define IT8732_ID 0x8732
#define IT8761_ID 0x8761
@@ -302,6 +304,14 @@ static int __init it87_gpio_init(void)
it87_gpio->chip = it87_template_chip;
switch (chip_type) {
+ case IT8620_ID:
+ case IT8628_ID:
+ gpio_ba_reg = 0x62;
+ it87_gpio->io_size = 11;
+ it87_gpio->output_base = 0xc8;
+ it87_gpio->simple_size = 0;
+ it87_gpio->chip.ngpio = 64;
+ break;
case IT8728_ID:
case IT8732_ID:
gpio_ba_reg = 0x62;
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
new file mode 100644
index 000000000..10c09bdd8
--- /dev/null
+++ b/drivers/gpio/gpio-loongson1.c
@@ -0,0 +1,102 @@
+/*
+ * GPIO Driver for Loongson 1 SoC
+ *
+ * Copyright (C) 2015-2016 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+
+/* Loongson 1 GPIO Register Definitions */
+#define GPIO_CFG 0x0
+#define GPIO_DIR 0x10
+#define GPIO_DATA 0x20
+#define GPIO_OUTPUT 0x30
+
+static void __iomem *gpio_reg_base;
+
+static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long pinmask = gc->pin2mask(gc, offset);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) | pinmask,
+ gpio_reg_base + GPIO_CFG);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ return 0;
+}
+
+static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
+{
+ unsigned long pinmask = gc->pin2mask(gc, offset);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gc->bgpio_lock, flags);
+ __raw_writel(__raw_readl(gpio_reg_base + GPIO_CFG) & ~pinmask,
+ gpio_reg_base + GPIO_CFG);
+ spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+}
+
+static int ls1x_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gpio_chip *gc;
+ struct resource *res;
+ int ret;
+
+ gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
+ if (!gc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to get I/O memory\n");
+ return -EINVAL;
+ }
+
+ gpio_reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(gpio_reg_base))
+ return PTR_ERR(gpio_reg_base);
+
+ ret = bgpio_init(gc, dev, 4, gpio_reg_base + GPIO_DATA,
+ gpio_reg_base + GPIO_OUTPUT, NULL,
+ NULL, gpio_reg_base + GPIO_DIR, 0);
+ if (ret)
+ goto err;
+
+ gc->owner = THIS_MODULE;
+ gc->request = ls1x_gpio_request;
+ gc->free = ls1x_gpio_free;
+ gc->base = pdev->id * 32;
+
+ ret = devm_gpiochip_add_data(dev, gc, NULL);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, gc);
+ dev_info(dev, "Loongson1 GPIO driver registered\n");
+
+ return 0;
+err:
+ dev_err(dev, "failed to register GPIO device\n");
+ return ret;
+}
+
+static struct platform_driver ls1x_gpio_driver = {
+ .probe = ls1x_gpio_probe,
+ .driver = {
+ .name = "ls1x-gpio",
+ },
+};
+
+module_platform_driver(ls1x_gpio_driver);
+
+MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson1 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index d39014dae..fc5f19790 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -29,7 +29,6 @@
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/irqs.h>
#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
{
- return IRQ_LPC32XX_P0_P1_IRQ;
+ return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
- IRQ_LPC32XX_GPIO_00,
- IRQ_LPC32XX_GPIO_01,
- IRQ_LPC32XX_GPIO_02,
- IRQ_LPC32XX_GPIO_03,
- IRQ_LPC32XX_GPIO_04,
- IRQ_LPC32XX_GPIO_05,
-};
-
static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
- return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
- IRQ_LPC32XX_GPI_00,
- IRQ_LPC32XX_GPI_01,
- IRQ_LPC32XX_GPI_02,
- IRQ_LPC32XX_GPI_03,
- IRQ_LPC32XX_GPI_04,
- IRQ_LPC32XX_GPI_05,
- IRQ_LPC32XX_GPI_06,
- IRQ_LPC32XX_GPI_07,
- IRQ_LPC32XX_GPI_08,
- IRQ_LPC32XX_GPI_09,
- -ENXIO, /* 10 */
- -ENXIO, /* 11 */
- -ENXIO, /* 12 */
- -ENXIO, /* 13 */
- -ENXIO, /* 14 */
- -ENXIO, /* 15 */
- -ENXIO, /* 16 */
- -ENXIO, /* 17 */
- -ENXIO, /* 18 */
- IRQ_LPC32XX_GPI_19,
- -ENXIO, /* 20 */
- -ENXIO, /* 21 */
- -ENXIO, /* 22 */
- -ENXIO, /* 23 */
- -ENXIO, /* 24 */
- -ENXIO, /* 25 */
- -ENXIO, /* 26 */
- -ENXIO, /* 27 */
- IRQ_LPC32XX_GPI_28,
-};
-
static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
- return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
return -ENXIO;
}
diff --git a/drivers/gpio/gpio-mb86s7x.c b/drivers/gpio/gpio-mb86s7x.c
index 7fffc1d6c..d55af50e7 100644
--- a/drivers/gpio/gpio-mb86s7x.c
+++ b/drivers/gpio/gpio-mb86s7x.c
@@ -17,7 +17,6 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/clk.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/ioport.h>
@@ -185,8 +184,6 @@ static int mb86s70_gpio_probe(struct platform_device *pdev)
gchip->gc.parent = &pdev->dev;
gchip->gc.base = -1;
- platform_set_drvdata(pdev, gchip);
-
ret = gpiochip_add_data(&gchip->gc, gchip);
if (ret) {
dev_err(&pdev->dev, "couldn't register gpio driver\n");
@@ -210,7 +207,6 @@ static const struct of_device_id mb86s70_gpio_dt_ids[] = {
{ .compatible = "fujitsu,mb86s70-gpio" },
{ /* sentinel */ }
};
-MODULE_DEVICE_TABLE(of, mb86s70_gpio_dt_ids);
static struct platform_driver mb86s70_gpio_driver = {
.driver = {
@@ -225,8 +221,4 @@ static int __init mb86s70_gpio_init(void)
{
return platform_driver_register(&mb86s70_gpio_driver);
}
-module_init(mb86s70_gpio_init);
-
-MODULE_DESCRIPTION("MB86S7x GPIO Driver");
-MODULE_ALIAS("platform:mb86s70-gpio");
-MODULE_LICENSE("GPL");
+device_initcall(mb86s70_gpio_init);
diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c
index 14f252f9e..2fcad5b9c 100644
--- a/drivers/gpio/gpio-mc9s08dz60.c
+++ b/drivers/gpio/gpio-mc9s08dz60.c
@@ -15,7 +15,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
@@ -111,8 +111,6 @@ static const struct i2c_device_id mc9s08dz60_id[] = {
{},
};
-MODULE_DEVICE_TABLE(i2c, mc9s08dz60_id);
-
static struct i2c_driver mc9s08dz60_i2c_driver = {
.driver = {
.name = "mc9s08dz60",
@@ -120,10 +118,4 @@ static struct i2c_driver mc9s08dz60_i2c_driver = {
.probe = mc9s08dz60_probe,
.id_table = mc9s08dz60_id,
};
-
-module_i2c_driver(mc9s08dz60_i2c_driver);
-
-MODULE_AUTHOR("Freescale Semiconductor, Inc. "
- "Wu Guoxing <b39297@freescale.com>");
-MODULE_DESCRIPTION("mc9s08dz60 gpio function on mx35 3ds board");
-MODULE_LICENSE("GPL v2");
+builtin_i2c_driver(mc9s08dz60_i2c_driver);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 47e486910..ac22efc18 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -77,7 +77,6 @@ struct mcp23s08 {
/* lock protects the cached values */
struct mutex lock;
struct mutex irq_lock;
- struct irq_domain *irq_domain;
struct gpio_chip chip;
@@ -96,11 +95,6 @@ struct mcp23s08_driver_data {
struct mcp23s08 chip[];
};
-/* This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-
/*----------------------------------------------------------------------*/
#if IS_ENABLED(CONFIG_I2C)
@@ -368,8 +362,9 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
for (i = 0; i < mcp->chip.ngpio; i++) {
if ((BIT(i) & mcp->cache[MCP_INTF]) &&
((BIT(i) & intcap & mcp->irq_rise) ||
- (mcp->irq_fall & ~intcap & BIT(i)))) {
- child_irq = irq_find_mapping(mcp->irq_domain, i);
+ (mcp->irq_fall & ~intcap & BIT(i)) ||
+ (BIT(i) & mcp->cache[MCP_INTCON]))) {
+ child_irq = irq_find_mapping(mcp->chip.irqdomain, i);
handle_nested_irq(child_irq);
}
}
@@ -377,16 +372,10 @@ static irqreturn_t mcp23s08_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static int mcp23s08_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct mcp23s08 *mcp = gpiochip_get_data(chip);
-
- return irq_find_mapping(mcp->irq_domain, offset);
-}
-
static void mcp23s08_irq_mask(struct irq_data *data)
{
- struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct mcp23s08 *mcp = gpiochip_get_data(gc);
unsigned int pos = data->hwirq;
mcp->cache[MCP_GPINTEN] &= ~BIT(pos);
@@ -394,7 +383,8 @@ static void mcp23s08_irq_mask(struct irq_data *data)
static void mcp23s08_irq_unmask(struct irq_data *data)
{
- struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct mcp23s08 *mcp = gpiochip_get_data(gc);
unsigned int pos = data->hwirq;
mcp->cache[MCP_GPINTEN] |= BIT(pos);
@@ -402,7 +392,8 @@ static void mcp23s08_irq_unmask(struct irq_data *data)
static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct mcp23s08 *mcp = gpiochip_get_data(gc);
unsigned int pos = data->hwirq;
int status = 0;
@@ -418,6 +409,12 @@ static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
mcp->cache[MCP_INTCON] &= ~BIT(pos);
mcp->irq_rise &= ~BIT(pos);
mcp->irq_fall |= BIT(pos);
+ } else if (type & IRQ_TYPE_LEVEL_HIGH) {
+ mcp->cache[MCP_INTCON] |= BIT(pos);
+ mcp->cache[MCP_DEFVAL] &= ~BIT(pos);
+ } else if (type & IRQ_TYPE_LEVEL_LOW) {
+ mcp->cache[MCP_INTCON] |= BIT(pos);
+ mcp->cache[MCP_DEFVAL] |= BIT(pos);
} else
return -EINVAL;
@@ -426,14 +423,16 @@ static int mcp23s08_irq_set_type(struct irq_data *data, unsigned int type)
static void mcp23s08_irq_bus_lock(struct irq_data *data)
{
- struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct mcp23s08 *mcp = gpiochip_get_data(gc);
mutex_lock(&mcp->irq_lock);
}
static void mcp23s08_irq_bus_unlock(struct irq_data *data)
{
- struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct mcp23s08 *mcp = gpiochip_get_data(gc);
mutex_lock(&mcp->lock);
mcp->ops->write(mcp, MCP_GPINTEN, mcp->cache[MCP_GPINTEN]);
@@ -443,27 +442,6 @@ static void mcp23s08_irq_bus_unlock(struct irq_data *data)
mutex_unlock(&mcp->irq_lock);
}
-static int mcp23s08_irq_reqres(struct irq_data *data)
-{
- struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
-
- if (gpiochip_lock_as_irq(&mcp->chip, data->hwirq)) {
- dev_err(mcp->chip.parent,
- "unable to lock HW IRQ %lu for IRQ usage\n",
- data->hwirq);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void mcp23s08_irq_relres(struct irq_data *data)
-{
- struct mcp23s08 *mcp = irq_data_get_irq_chip_data(data);
-
- gpiochip_unlock_as_irq(&mcp->chip, data->hwirq);
-}
-
static struct irq_chip mcp23s08_irq_chip = {
.name = "gpio-mcp23xxx",
.irq_mask = mcp23s08_irq_mask,
@@ -471,24 +449,16 @@ static struct irq_chip mcp23s08_irq_chip = {
.irq_set_type = mcp23s08_irq_set_type,
.irq_bus_lock = mcp23s08_irq_bus_lock,
.irq_bus_sync_unlock = mcp23s08_irq_bus_unlock,
- .irq_request_resources = mcp23s08_irq_reqres,
- .irq_release_resources = mcp23s08_irq_relres,
};
static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
{
struct gpio_chip *chip = &mcp->chip;
- int err, irq, j;
+ int err;
unsigned long irqflags = IRQF_ONESHOT | IRQF_SHARED;
mutex_init(&mcp->irq_lock);
- mcp->irq_domain = irq_domain_add_linear(chip->parent->of_node,
- chip->ngpio,
- &irq_domain_simple_ops, mcp);
- if (!mcp->irq_domain)
- return -ENODEV;
-
if (mcp->irq_active_high)
irqflags |= IRQF_TRIGGER_HIGH;
else
@@ -503,30 +473,23 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
return err;
}
- chip->to_irq = mcp23s08_gpio_to_irq;
-
- for (j = 0; j < mcp->chip.ngpio; j++) {
- irq = irq_create_mapping(mcp->irq_domain, j);
- irq_set_lockdep_class(irq, &gpio_lock_class);
- irq_set_chip_data(irq, mcp);
- irq_set_chip(irq, &mcp23s08_irq_chip);
- irq_set_nested_thread(irq, true);
- irq_set_noprobe(irq);
+ err = gpiochip_irqchip_add(chip,
+ &mcp23s08_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (err) {
+ dev_err(chip->parent,
+ "could not connect irqchip to gpiochip: %d\n", err);
+ return err;
}
- return 0;
-}
-static void mcp23s08_irq_teardown(struct mcp23s08 *mcp)
-{
- unsigned int irq, i;
+ gpiochip_set_chained_irqchip(chip,
+ &mcp23s08_irq_chip,
+ mcp->irq,
+ NULL);
- for (i = 0; i < mcp->chip.ngpio; i++) {
- irq = irq_find_mapping(mcp->irq_domain, i);
- if (irq > 0)
- irq_dispose_mapping(irq);
- }
-
- irq_domain_remove(mcp->irq_domain);
+ return 0;
}
/*----------------------------------------------------------------------*/
@@ -721,7 +684,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
if (mcp->irq && mcp->irq_controller) {
status = mcp23s08_irq_setup(mcp);
if (status) {
- mcp23s08_irq_teardown(mcp);
goto fail;
}
}
@@ -847,9 +809,6 @@ static int mcp230xx_remove(struct i2c_client *client)
{
struct mcp23s08 *mcp = i2c_get_clientdata(client);
- if (client->irq && mcp->irq_controller)
- mcp23s08_irq_teardown(mcp);
-
gpiochip_remove(&mcp->chip);
kfree(mcp);
@@ -1017,8 +976,6 @@ static int mcp23s08_remove(struct spi_device *spi)
if (!data->mcp[addr])
continue;
- if (spi->irq && data->mcp[addr]->irq_controller)
- mcp23s08_irq_teardown(data->mcp[addr]);
gpiochip_remove(&data->mcp[addr]->chip);
}
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index c5c9599a3..cc103aff4 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -35,7 +35,6 @@
struct men_z127_gpio {
struct gpio_chip gc;
void __iomem *reg_base;
- struct mcb_device *mdev;
struct resource *mem;
};
@@ -43,7 +42,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
unsigned debounce)
{
struct men_z127_gpio *priv = gpiochip_get_data(gc);
- struct device *dev = &priv->mdev->dev;
+ struct device *dev = gc->parent;
unsigned int rnd;
u32 db_en, db_cnt;
@@ -88,21 +87,25 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
return 0;
}
-static int men_z127_request(struct gpio_chip *gc, unsigned gpio_pin)
+static int men_z127_set_single_ended(struct gpio_chip *gc,
+ unsigned offset,
+ enum single_ended_mode mode)
{
struct men_z127_gpio *priv = gpiochip_get_data(gc);
u32 od_en;
- if (gpio_pin >= gc->ngpio)
- return -EINVAL;
+ if (mode != LINE_MODE_OPEN_DRAIN &&
+ mode != LINE_MODE_PUSH_PULL)
+ return -ENOTSUPP;
spin_lock(&gc->bgpio_lock);
od_en = readl(priv->reg_base + MEN_Z127_ODER);
- if (gpiochip_line_is_open_drain(gc, gpio_pin))
- od_en |= BIT(gpio_pin);
+ if (mode == LINE_MODE_OPEN_DRAIN)
+ od_en |= BIT(offset);
else
- od_en &= ~BIT(gpio_pin);
+ /* Implicitly LINE_MODE_PUSH_PULL */
+ od_en &= ~BIT(offset);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
spin_unlock(&gc->bgpio_lock);
@@ -135,7 +138,6 @@ static int men_z127_probe(struct mcb_device *mdev,
goto err_release;
}
- men_z127_gpio->mdev = mdev;
mcb_set_drvdata(mdev, men_z127_gpio);
ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
@@ -148,7 +150,7 @@ static int men_z127_probe(struct mcb_device *mdev,
goto err_unmap;
men_z127_gpio->gc.set_debounce = men_z127_debounce;
- men_z127_gpio->gc.request = men_z127_request;
+ men_z127_gpio->gc.set_single_ended = men_z127_set_single_ended;
ret = gpiochip_add_data(&men_z127_gpio->gc, men_z127_gpio);
if (ret) {
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-mmio.c
index 54cddfa98..6c1cb3b8c 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -549,7 +549,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
}
EXPORT_SYMBOL_GPL(bgpio_init);
-#ifdef CONFIG_GPIO_GENERIC_PLATFORM
+#if IS_ENABLED(CONFIG_GPIO_GENERIC_PLATFORM)
static void __iomem *bgpio_map(struct platform_device *pdev,
const char *name,
diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c
index f02d0b490..d58d38906 100644
--- a/drivers/gpio/gpio-moxart.c
+++ b/drivers/gpio/gpio-moxart.c
@@ -15,7 +15,6 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
@@ -82,8 +81,4 @@ static struct platform_driver moxart_gpio_driver = {
},
.probe = moxart_gpio_probe,
};
-module_platform_driver(moxart_gpio_driver);
-
-MODULE_DESCRIPTION("MOXART GPIO chip driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+builtin_platform_driver(moxart_gpio_driver);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 11c6582ef..cd5dc2732 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -34,7 +34,7 @@
*/
#include <linux/err.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/slab.h>
@@ -557,7 +557,6 @@ static const struct of_device_id mvebu_gpio_of_match[] = {
/* sentinel */
},
};
-MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state)
{
@@ -838,4 +837,4 @@ static struct platform_driver mvebu_gpio_driver = {
.suspend = mvebu_gpio_suspend,
.resume = mvebu_gpio_resume,
};
-module_platform_driver(mvebu_gpio_driver);
+builtin_platform_driver(mvebu_gpio_driver);
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index 47aead1ed..96a8a8cb2 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -83,6 +83,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
struct octeon_gpio *gpio;
struct gpio_chip *chip;
struct resource *res_mem;
+ void __iomem *reg_base;
int err = 0;
gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
@@ -91,21 +92,11 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip = &gpio->chip;
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res_mem == NULL) {
- dev_err(&pdev->dev, "found no memory resource\n");
- err = -ENXIO;
- goto out;
- }
- if (!devm_request_mem_region(&pdev->dev, res_mem->start,
- resource_size(res_mem),
- res_mem->name)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- err = -ENXIO;
- goto out;
- }
- gpio->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
- resource_size(res_mem));
+ reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+ gpio->register_base = (u64)reg_base;
pdev->dev.platform_data = chip;
chip->label = "octeon-gpio";
chip->parent = &pdev->dev;
@@ -119,14 +110,13 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip->set = octeon_gpio_set;
err = devm_gpiochip_add_data(&pdev->dev, chip, gpio);
if (err)
- goto out;
+ return err;
dev_info(&pdev->dev, "OCTEON GPIO driver probed.\n");
-out:
- return err;
+ return 0;
}
-static struct of_device_id octeon_gpio_match[] = {
+static const struct of_device_id octeon_gpio_match[] = {
{
.compatible = "cavium,octeon-3860-gpio",
},
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 551dfa9d9..b98ede78c 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -611,51 +611,12 @@ static inline void omap_set_gpio_irqenable(struct gpio_bank *bank,
omap_disable_gpio_irqbank(bank, BIT(offset));
}
-/*
- * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
- * 1510 does not seem to have a wake-up register. If JTAG is connected
- * to the target, system will wake up always on GPIO events. While
- * system is running all registered GPIO interrupts need to have wake-up
- * enabled. When system is suspended, only selected GPIO interrupts need
- * to have wake-up enabled.
- */
-static int omap_set_gpio_wakeup(struct gpio_bank *bank, unsigned offset,
- int enable)
-{
- u32 gpio_bit = BIT(offset);
- unsigned long flags;
-
- if (bank->non_wakeup_gpios & gpio_bit) {
- dev_err(bank->chip.parent,
- "Unable to modify wakeup on non-wakeup GPIO%d\n",
- offset);
- return -EINVAL;
- }
-
- raw_spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->context.wake_en |= gpio_bit;
- else
- bank->context.wake_en &= ~gpio_bit;
-
- writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
-}
-
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
{
struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned offset = d->hwirq;
- int ret;
- ret = omap_set_gpio_wakeup(bank, offset, enable);
- if (!ret)
- ret = irq_set_irq_wake(bank->irq, enable);
-
- return ret;
+ return irq_set_irq_wake(bank->irq, enable);
}
static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -1187,6 +1148,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
irqc->name = dev_name(&pdev->dev);
+ irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
bank->irq = platform_get_irq(pdev, 0);
if (bank->irq <= 0) {
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 6f27b3d94..e248707ca 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -20,7 +20,7 @@
#include <linux/gpio.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/palmas.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -218,14 +218,3 @@ static int __init palmas_gpio_init(void)
return platform_driver_register(&palmas_gpio_driver);
}
subsys_initcall(palmas_gpio_init);
-
-static void __exit palmas_gpio_exit(void)
-{
- platform_driver_unregister(&palmas_gpio_driver);
-}
-module_exit(palmas_gpio_exit);
-
-MODULE_ALIAS("platform:palmas-gpio");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_DESCRIPTION("GPIO driver for TI Palmas series PMICs");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index e66084c29..5e3be32eb 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -38,8 +38,13 @@
#define PCA957X_MSK 6
#define PCA957X_INTS 7
+#define PCAL953X_IN_LATCH 34
+#define PCAL953X_INT_MASK 37
+#define PCAL953X_INT_STAT 38
+
#define PCA_GPIO_MASK 0x00FF
#define PCA_INT 0x0100
+#define PCA_PCAL 0x0200
#define PCA953X_TYPE 0x1000
#define PCA957X_TYPE 0x2000
#define PCA_TYPE_MASK 0xF000
@@ -77,7 +82,7 @@ static const struct i2c_device_id pca953x_id[] = {
MODULE_DEVICE_TABLE(i2c, pca953x_id);
static const struct acpi_device_id pca953x_acpi_ids[] = {
- { "INT3491", 16 | PCA953X_TYPE | PCA_INT, },
+ { "INT3491", 16 | PCA953X_TYPE | PCA_INT | PCA_PCAL, },
{ }
};
MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids);
@@ -437,6 +442,18 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 new_irqs;
int level, i;
+ u8 invert_irq_mask[MAX_BANK];
+
+ if (chip->driver_data & PCA_PCAL) {
+ /* Enable latch on interrupt-enabled inputs */
+ pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
+
+ for (i = 0; i < NBANK(chip); i++)
+ invert_irq_mask[i] = ~chip->irq_mask[i];
+
+ /* Unmask enabled interrupts */
+ pca953x_write_regs(chip, PCAL953X_INT_MASK, invert_irq_mask);
+ }
/* Look for any newly setup interrupt */
for (i = 0; i < NBANK(chip); i++) {
@@ -498,6 +515,29 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
u8 trigger[MAX_BANK];
int ret, i, offset = 0;
+ if (chip->driver_data & PCA_PCAL) {
+ /* Read the current interrupt status from the device */
+ ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
+ if (ret)
+ return false;
+
+ /* Check latched inputs and clear interrupt status */
+ ret = pca953x_read_regs(chip, PCA953X_INPUT, cur_stat);
+ if (ret)
+ return false;
+
+ for (i = 0; i < NBANK(chip); i++) {
+ /* Apply filter for rising/falling edge selection */
+ pending[i] = (~cur_stat[i] & chip->irq_trig_fall[i]) |
+ (cur_stat[i] & chip->irq_trig_raise[i]);
+ pending[i] &= trigger[i];
+ if (pending[i])
+ pending_seen = true;
+ }
+
+ return pending_seen;
+ }
+
switch (chip->chip_type) {
case PCA953X_TYPE:
offset = PCA953X_INPUT;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 5cb38212b..6e3c14306 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -1,6 +1,8 @@
/*
* Copyright (C) 2008, 2009 Provigent Ltd.
*
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -11,7 +13,7 @@
*/
#include <linux/spinlock.h>
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
@@ -59,15 +61,19 @@ struct pl061_gpio {
#endif
};
+static int pl061_get_direction(struct gpio_chip *gc, unsigned offset)
+{
+ struct pl061_gpio *chip = gpiochip_get_data(gc);
+
+ return !(readb(chip->base + GPIODIR) & BIT(offset));
+}
+
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
unsigned char gpiodir;
- if (offset >= gc->ngpio)
- return -EINVAL;
-
spin_lock_irqsave(&chip->lock, flags);
gpiodir = readb(chip->base + GPIODIR);
gpiodir &= ~(BIT(offset));
@@ -84,9 +90,6 @@ static int pl061_direction_output(struct gpio_chip *gc, unsigned offset,
unsigned long flags;
unsigned char gpiodir;
- if (offset >= gc->ngpio)
- return -EINVAL;
-
spin_lock_irqsave(&chip->lock, flags);
writeb(!!value << offset, chip->base + (BIT(offset + 2)));
gpiodir = readb(chip->base + GPIODIR);
@@ -319,6 +322,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
chip->gc.free = gpiochip_generic_free;
}
+ chip->gc.get_direction = pl061_get_direction;
chip->gc.direction_input = pl061_direction_input;
chip->gc.direction_output = pl061_direction_output;
chip->gc.get = pl061_get_value;
@@ -429,8 +433,6 @@ static struct amba_id pl061_ids[] = {
{ 0, 0 },
};
-MODULE_DEVICE_TABLE(amba, pl061_ids);
-
static struct amba_driver pl061_gpio_driver = {
.drv = {
.name = "pl061_gpio",
@@ -446,8 +448,4 @@ static int __init pl061_gpio_init(void)
{
return amba_driver_register(&pl061_gpio_driver);
}
-module_init(pl061_gpio_init);
-
-MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
-MODULE_DESCRIPTION("PL061 GPIO driver");
-MODULE_LICENSE("GPL");
+device_initcall(pl061_gpio_init);
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 1d6100fa3..3b4dc1a9a 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -23,7 +23,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/gpio.h>
@@ -152,14 +151,3 @@ static int __init rc5t583_gpio_init(void)
return platform_driver_register(&rc5t583_gpio_driver);
}
subsys_initcall(rc5t583_gpio_init);
-
-static void __exit rc5t583_gpio_exit(void)
-{
- platform_driver_unregister(&rc5t583_gpio_driver);
-}
-module_exit(rc5t583_gpio_exit);
-
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_DESCRIPTION("GPIO interface for RC5T583");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:rc5t583-gpio");
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 4d9a315cf..681c93fb9 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,25 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
spin_unlock_irqrestore(&p->lock, flags);
}
+static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+ unsigned long flags;
+ u32 val, bankmask;
+
+ bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
+ if (!bankmask)
+ return;
+
+ spin_lock_irqsave(&p->lock, flags);
+ val = gpio_rcar_read(p, OUTDT);
+ val &= ~bankmask;
+ val |= (bankmask & bits[0]);
+ gpio_rcar_write(p, OUTDT, val);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
@@ -425,6 +444,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->get = gpio_rcar_get;
gpio_chip->direction_output = gpio_rcar_direction_output;
gpio_chip->set = gpio_rcar_set;
+ gpio_chip->set_multiple = gpio_rcar_set_multiple;
gpio_chip->label = name;
gpio_chip->parent = dev;
gpio_chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index e3cb6772f..7da9e6c45 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -3,6 +3,8 @@
*
* Copyright (c) 2010, 2011 Intel Corporation
*
+ * Author: Hans J. Koch <hjk@linutronix.de>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License 2 as published
* by the Free Software Foundation.
@@ -15,7 +17,6 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/of_irq.h>
@@ -257,34 +258,17 @@ done:
return ret;
}
-static void sdv_gpio_remove(struct pci_dev *pdev)
-{
- struct sdv_gpio_chip_data *sd = pci_get_drvdata(pdev);
-
- free_irq(pdev->irq, sd);
- irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS);
-
- gpiochip_remove(&sd->chip);
- pci_release_region(pdev, GPIO_BAR);
- iounmap(sd->gpio_pub_base);
- pci_disable_device(pdev);
- kfree(sd);
-}
-
static const struct pci_device_id sdv_gpio_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_SDV_GPIO) },
{ 0, },
};
static struct pci_driver sdv_gpio_driver = {
+ .driver = {
+ .suppress_bind_attrs = true,
+ },
.name = DRV_NAME,
.id_table = sdv_gpio_pci_ids,
.probe = sdv_gpio_probe,
- .remove = sdv_gpio_remove,
};
-
-module_pci_driver(sdv_gpio_driver);
-
-MODULE_AUTHOR("Hans J. Koch <hjk@linutronix.de>");
-MODULE_DESCRIPTION("GPIO interface for Intel Sodaville SoCs");
-MODULE_LICENSE("GPL v2");
+builtin_pci_driver(sdv_gpio_driver);
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 0d5b8c525..853ca23ca 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -20,7 +20,7 @@
*
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/gpio.h>
@@ -432,8 +432,4 @@ static struct platform_driver sta2x11_gpio_platform_driver = {
},
.probe = gsta_probe,
};
-
-module_platform_driver(sta2x11_gpio_platform_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
+builtin_platform_driver(sta2x11_gpio_platform_driver);
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 5197edf1a..6f7af28b8 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -5,7 +5,6 @@
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -413,23 +412,13 @@ out_free:
return ret;
}
-static int stmpe_gpio_remove(struct platform_device *pdev)
-{
- struct stmpe_gpio *stmpe_gpio = platform_get_drvdata(pdev);
- struct stmpe *stmpe = stmpe_gpio->stmpe;
-
- gpiochip_remove(&stmpe_gpio->chip);
- stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
- kfree(stmpe_gpio);
-
- return 0;
-}
-
static struct platform_driver stmpe_gpio_driver = {
- .driver.name = "stmpe-gpio",
- .driver.owner = THIS_MODULE,
+ .driver = {
+ .suppress_bind_attrs = true,
+ .name = "stmpe-gpio",
+ .owner = THIS_MODULE,
+ },
.probe = stmpe_gpio_probe,
- .remove = stmpe_gpio_remove,
};
static int __init stmpe_gpio_init(void)
@@ -437,13 +426,3 @@ static int __init stmpe_gpio_init(void)
return platform_driver_register(&stmpe_gpio_driver);
}
subsys_initcall(stmpe_gpio_init);
-
-static void __exit stmpe_gpio_exit(void)
-{
- platform_driver_unregister(&stmpe_gpio_driver);
-}
-module_exit(stmpe_gpio_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("STMPExxxx GPIO driver");
-MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index d387eb524..a177ebd92 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -1,5 +1,9 @@
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
+ * Driver for Semtech SX150X I2C GPIO Expanders
+ *
+ * Author: Gregory Bean <gbean@codeaurora.org>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -19,10 +23,8 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/i2c/sx150x.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -82,6 +84,57 @@ struct sx150x_device_data {
} pri;
};
+/**
+ * struct sx150x_platform_data - config data for SX150x driver
+ * @gpio_base: The index number of the first GPIO assigned to this
+ * GPIO expander. The expander will create a block of
+ * consecutively numbered gpios beginning at the given base,
+ * with the size of the block depending on the model of the
+ * expander chip.
+ * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO
+ * instead of as an oscillator, increasing the size of the
+ * GP(I)O pool created by this expander by one. The
+ * output-only GPO pin will be added at the end of the block.
+ * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor
+ * for each IO line in the expander. Setting the bit at
+ * position n will enable the pull-up for the IO at
+ * the corresponding offset. For chips with fewer than
+ * 16 IO pins, high-end bits are ignored.
+ * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down
+ * resistor for each IO line in the expander. Setting the
+ * bit at position n will enable the pull-down for the IO at
+ * the corresponding offset. For chips with fewer than
+ * 16 IO pins, high-end bits are ignored.
+ * @io_polarity: A bit-mask which enables polarity inversion for each IO line
+ * in the expander. Setting the bit at position n inverts
+ * the polarity of that IO line, while clearing it results
+ * in normal polarity. For chips with fewer than 16 IO pins,
+ * high-end bits are ignored.
+ * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line
+ * is connected, via which it reports interrupt events
+ * across all GPIO lines. This must be a real,
+ * pre-existing IRQ line.
+ * Setting this value < 0 disables the irq_chip functionality
+ * of the driver.
+ * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based
+ * IRQ lines will appear. Similarly to gpio_base, the expander
+ * will create a block of irqs beginning at this number.
+ * This value is ignored if irq_summary is < 0.
+ * @reset_during_probe: If set to true, the driver will trigger a full
+ * reset of the chip at the beginning of the probe
+ * in order to place it in a known state.
+ */
+struct sx150x_platform_data {
+ unsigned gpio_base;
+ bool oscio_is_gpo;
+ u16 io_pullup_ena;
+ u16 io_pulldn_ena;
+ u16 io_polarity;
+ int irq_summary;
+ unsigned irq_base;
+ bool reset_during_probe;
+};
+
struct sx150x_chip {
struct gpio_chip gpio_chip;
struct i2c_client *client;
@@ -354,6 +407,32 @@ static void sx150x_gpio_set(struct gpio_chip *gc, unsigned offset, int val)
mutex_unlock(&chip->lock);
}
+static int sx150x_gpio_set_single_ended(struct gpio_chip *gc,
+ unsigned offset,
+ enum single_ended_mode mode)
+{
+ struct sx150x_chip *chip = gpiochip_get_data(gc);
+
+ /* On the SX160X 789 we can set open drain */
+ if (chip->dev_cfg->model != SX150X_789)
+ return -ENOTSUPP;
+
+ if (mode == LINE_MODE_PUSH_PULL)
+ return sx150x_write_cfg(chip,
+ offset,
+ 1,
+ chip->dev_cfg->pri.x789.reg_drain,
+ 0);
+
+ if (mode == LINE_MODE_OPEN_DRAIN)
+ return sx150x_write_cfg(chip,
+ offset,
+ 1,
+ chip->dev_cfg->pri.x789.reg_drain,
+ 1);
+ return -ENOTSUPP;
+}
+
static int sx150x_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
struct sx150x_chip *chip = gpiochip_get_data(gc);
@@ -508,6 +587,7 @@ static void sx150x_init_chip(struct sx150x_chip *chip,
chip->gpio_chip.direction_output = sx150x_gpio_direction_output;
chip->gpio_chip.get = sx150x_gpio_get;
chip->gpio_chip.set = sx150x_gpio_set;
+ chip->gpio_chip.set_single_ended = sx150x_gpio_set_single_ended;
chip->gpio_chip.base = pdata->gpio_base;
chip->gpio_chip.can_sleep = true;
chip->gpio_chip.ngpio = chip->dev_cfg->ngpios;
@@ -597,12 +677,6 @@ static int sx150x_init_hw(struct sx150x_chip *chip,
if (chip->dev_cfg->model == SX150X_789) {
err = sx150x_init_io(chip,
- chip->dev_cfg->pri.x789.reg_drain,
- pdata->io_open_drain_ena);
- if (err < 0)
- return err;
-
- err = sx150x_init_io(chip,
chip->dev_cfg->pri.x789.reg_polarity,
pdata->io_polarity);
if (err < 0)
@@ -718,13 +792,3 @@ static int __init sx150x_init(void)
return i2c_add_driver(&sx150x_driver);
}
subsys_initcall(sx150x_init);
-
-static void __exit sx150x_exit(void)
-{
- return i2c_del_driver(&sx150x_driver);
-}
-module_exit(sx150x_exit);
-
-MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
-MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 4f566e6b8..2e35ed3ab 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -6,14 +6,14 @@
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/of.h>
#include <linux/interrupt.h>
#include <linux/mfd/tc3589x.h>
+#include <linux/bitops.h>
/*
* These registers are modified under the irq bus lock and cached to avoid
@@ -39,7 +39,7 @@ static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned offset)
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
u8 reg = TC3589x_GPIODATA0 + (offset / 8) * 2;
- u8 mask = 1 << (offset % 8);
+ u8 mask = BIT(offset % 8);
int ret;
ret = tc3589x_reg_read(tc3589x, reg);
@@ -55,7 +55,7 @@ static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
u8 reg = TC3589x_GPIODATA0 + (offset / 8) * 2;
unsigned pos = offset % 8;
- u8 data[] = {!!val << pos, 1 << pos};
+ u8 data[] = {val ? BIT(pos) : 0, BIT(pos)};
tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
}
@@ -70,7 +70,7 @@ static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
tc3589x_gpio_set(chip, offset, val);
- return tc3589x_set_bits(tc3589x, reg, 1 << pos, 1 << pos);
+ return tc3589x_set_bits(tc3589x, reg, BIT(pos), BIT(pos));
}
static int tc3589x_gpio_direction_input(struct gpio_chip *chip,
@@ -81,7 +81,47 @@ static int tc3589x_gpio_direction_input(struct gpio_chip *chip,
u8 reg = TC3589x_GPIODIR0 + offset / 8;
unsigned pos = offset % 8;
- return tc3589x_set_bits(tc3589x, reg, 1 << pos, 0);
+ return tc3589x_set_bits(tc3589x, reg, BIT(pos), 0);
+}
+
+static int tc3589x_gpio_single_ended(struct gpio_chip *chip,
+ unsigned offset,
+ enum single_ended_mode mode)
+{
+ struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
+ struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
+ /*
+ * These registers are alterated at each second address
+ * ODM bit 0 = drive to GND or Hi-Z (open drain)
+ * ODM bit 1 = drive to VDD or Hi-Z (open source)
+ */
+ u8 odmreg = TC3589x_GPIOODM0 + (offset / 8) * 2;
+ u8 odereg = TC3589x_GPIOODE0 + (offset / 8) * 2;
+ unsigned pos = offset % 8;
+ int ret;
+
+ switch(mode) {
+ case LINE_MODE_OPEN_DRAIN:
+ /* Set open drain mode */
+ ret = tc3589x_set_bits(tc3589x, odmreg, BIT(pos), 0);
+ if (ret)
+ return ret;
+ /* Enable open drain/source mode */
+ return tc3589x_set_bits(tc3589x, odereg, BIT(pos), BIT(pos));
+ case LINE_MODE_OPEN_SOURCE:
+ /* Set open source mode */
+ ret = tc3589x_set_bits(tc3589x, odmreg, BIT(pos), BIT(pos));
+ if (ret)
+ return ret;
+ /* Enable open drain/source mode */
+ return tc3589x_set_bits(tc3589x, odereg, BIT(pos), BIT(pos));
+ case LINE_MODE_PUSH_PULL:
+ /* Disable open drain/source mode */
+ return tc3589x_set_bits(tc3589x, odereg, BIT(pos), 0);
+ default:
+ break;
+ }
+ return -ENOTSUPP;
}
static struct gpio_chip template_chip = {
@@ -91,6 +131,7 @@ static struct gpio_chip template_chip = {
.get = tc3589x_gpio_get,
.direction_output = tc3589x_gpio_direction_output,
.set = tc3589x_gpio_set,
+ .set_single_ended = tc3589x_gpio_single_ended,
.can_sleep = true,
};
@@ -100,7 +141,7 @@ static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
- int mask = 1 << (offset % 8);
+ int mask = BIT(offset % 8);
if (type == IRQ_TYPE_EDGE_BOTH) {
tc3589x_gpio->regs[REG_IBE][regoffset] |= mask;
@@ -165,7 +206,7 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d)
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
- int mask = 1 << (offset % 8);
+ int mask = BIT(offset % 8);
tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
}
@@ -176,7 +217,7 @@ static void tc3589x_gpio_irq_unmask(struct irq_data *d)
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
- int mask = 1 << (offset % 8);
+ int mask = BIT(offset % 8);
tc3589x_gpio->regs[REG_IE][regoffset] |= mask;
}
@@ -311,13 +352,3 @@ static int __init tc3589x_gpio_init(void)
return platform_driver_register(&tc3589x_gpio_driver);
}
subsys_initcall(tc3589x_gpio_init);
-
-static void __exit tc3589x_gpio_exit(void)
-{
- platform_driver_unregister(&tc3589x_gpio_driver);
-}
-module_exit(tc3589x_gpio_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("TC3589x GPIO driver");
-MODULE_AUTHOR("Hanumath Prasad, Rabin Vincent");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 790bb111b..661b0e34e 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -35,24 +35,27 @@
#define GPIO_PORT(x) (((x) >> 3) & 0x3)
#define GPIO_BIT(x) ((x) & 0x7)
-#define GPIO_REG(x) (GPIO_BANK(x) * tegra_gpio_bank_stride + \
+#define GPIO_REG(tgi, x) (GPIO_BANK(x) * tgi->soc->bank_stride + \
GPIO_PORT(x) * 4)
-#define GPIO_CNF(x) (GPIO_REG(x) + 0x00)
-#define GPIO_OE(x) (GPIO_REG(x) + 0x10)
-#define GPIO_OUT(x) (GPIO_REG(x) + 0X20)
-#define GPIO_IN(x) (GPIO_REG(x) + 0x30)
-#define GPIO_INT_STA(x) (GPIO_REG(x) + 0x40)
-#define GPIO_INT_ENB(x) (GPIO_REG(x) + 0x50)
-#define GPIO_INT_LVL(x) (GPIO_REG(x) + 0x60)
-#define GPIO_INT_CLR(x) (GPIO_REG(x) + 0x70)
-
-#define GPIO_MSK_CNF(x) (GPIO_REG(x) + tegra_gpio_upper_offset + 0x00)
-#define GPIO_MSK_OE(x) (GPIO_REG(x) + tegra_gpio_upper_offset + 0x10)
-#define GPIO_MSK_OUT(x) (GPIO_REG(x) + tegra_gpio_upper_offset + 0X20)
-#define GPIO_MSK_INT_STA(x) (GPIO_REG(x) + tegra_gpio_upper_offset + 0x40)
-#define GPIO_MSK_INT_ENB(x) (GPIO_REG(x) + tegra_gpio_upper_offset + 0x50)
-#define GPIO_MSK_INT_LVL(x) (GPIO_REG(x) + tegra_gpio_upper_offset + 0x60)
+#define GPIO_CNF(t, x) (GPIO_REG(t, x) + 0x00)
+#define GPIO_OE(t, x) (GPIO_REG(t, x) + 0x10)
+#define GPIO_OUT(t, x) (GPIO_REG(t, x) + 0X20)
+#define GPIO_IN(t, x) (GPIO_REG(t, x) + 0x30)
+#define GPIO_INT_STA(t, x) (GPIO_REG(t, x) + 0x40)
+#define GPIO_INT_ENB(t, x) (GPIO_REG(t, x) + 0x50)
+#define GPIO_INT_LVL(t, x) (GPIO_REG(t, x) + 0x60)
+#define GPIO_INT_CLR(t, x) (GPIO_REG(t, x) + 0x70)
+#define GPIO_DBC_CNT(t, x) (GPIO_REG(t, x) + 0xF0)
+
+
+#define GPIO_MSK_CNF(t, x) (GPIO_REG(t, x) + t->soc->upper_offset + 0x00)
+#define GPIO_MSK_OE(t, x) (GPIO_REG(t, x) + t->soc->upper_offset + 0x10)
+#define GPIO_MSK_OUT(t, x) (GPIO_REG(t, x) + t->soc->upper_offset + 0X20)
+#define GPIO_MSK_DBC_EN(t, x) (GPIO_REG(t, x) + t->soc->upper_offset + 0x30)
+#define GPIO_MSK_INT_STA(t, x) (GPIO_REG(t, x) + t->soc->upper_offset + 0x40)
+#define GPIO_MSK_INT_ENB(t, x) (GPIO_REG(t, x) + t->soc->upper_offset + 0x50)
+#define GPIO_MSK_INT_LVL(t, x) (GPIO_REG(t, x) + t->soc->upper_offset + 0x60)
#define GPIO_INT_LVL_MASK 0x010101
#define GPIO_INT_LVL_EDGE_RISING 0x000101
@@ -61,10 +64,13 @@
#define GPIO_INT_LVL_LEVEL_HIGH 0x000001
#define GPIO_INT_LVL_LEVEL_LOW 0x000000
+struct tegra_gpio_info;
+
struct tegra_gpio_bank {
int bank;
int irq;
spinlock_t lvl_lock[4];
+ spinlock_t dbc_lock[4]; /* Lock for updating debounce count register */
#ifdef CONFIG_PM_SLEEP
u32 cnf[4];
u32 out[4];
@@ -72,25 +78,38 @@ struct tegra_gpio_bank {
u32 int_enb[4];
u32 int_lvl[4];
u32 wake_enb[4];
+ u32 dbc_enb[4];
#endif
+ u32 dbc_cnt[4];
+ struct tegra_gpio_info *tgi;
+};
+
+struct tegra_gpio_soc_config {
+ bool debounce_supported;
+ u32 bank_stride;
+ u32 upper_offset;
};
-static struct device *dev;
-static struct irq_domain *irq_domain;
-static void __iomem *regs;
-static u32 tegra_gpio_bank_count;
-static u32 tegra_gpio_bank_stride;
-static u32 tegra_gpio_upper_offset;
-static struct tegra_gpio_bank *tegra_gpio_banks;
+struct tegra_gpio_info {
+ struct device *dev;
+ void __iomem *regs;
+ struct irq_domain *irq_domain;
+ struct tegra_gpio_bank *bank_info;
+ const struct tegra_gpio_soc_config *soc;
+ struct gpio_chip gc;
+ struct irq_chip ic;
+ u32 bank_count;
+};
-static inline void tegra_gpio_writel(u32 val, u32 reg)
+static inline void tegra_gpio_writel(struct tegra_gpio_info *tgi,
+ u32 val, u32 reg)
{
- __raw_writel(val, regs + reg);
+ __raw_writel(val, tgi->regs + reg);
}
-static inline u32 tegra_gpio_readl(u32 reg)
+static inline u32 tegra_gpio_readl(struct tegra_gpio_info *tgi, u32 reg)
{
- return __raw_readl(regs + reg);
+ return __raw_readl(tgi->regs + reg);
}
static int tegra_gpio_compose(int bank, int port, int bit)
@@ -98,24 +117,25 @@ static int tegra_gpio_compose(int bank, int port, int bit)
return (bank << 5) | ((port & 0x3) << 3) | (bit & 0x7);
}
-static void tegra_gpio_mask_write(u32 reg, int gpio, int value)
+static void tegra_gpio_mask_write(struct tegra_gpio_info *tgi, u32 reg,
+ int gpio, int value)
{
u32 val;
val = 0x100 << GPIO_BIT(gpio);
if (value)
val |= 1 << GPIO_BIT(gpio);
- tegra_gpio_writel(val, reg);
+ tegra_gpio_writel(tgi, val, reg);
}
-static void tegra_gpio_enable(int gpio)
+static void tegra_gpio_enable(struct tegra_gpio_info *tgi, int gpio)
{
- tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 1);
+ tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 1);
}
-static void tegra_gpio_disable(int gpio)
+static void tegra_gpio_disable(struct tegra_gpio_info *tgi, int gpio)
{
- tegra_gpio_mask_write(GPIO_MSK_CNF(gpio), gpio, 0);
+ tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 0);
}
static int tegra_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -125,83 +145,138 @@ static int tegra_gpio_request(struct gpio_chip *chip, unsigned offset)
static void tegra_gpio_free(struct gpio_chip *chip, unsigned offset)
{
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
pinctrl_free_gpio(offset);
- tegra_gpio_disable(offset);
+ tegra_gpio_disable(tgi, offset);
}
static void tegra_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- tegra_gpio_mask_write(GPIO_MSK_OUT(offset), offset, value);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+ tegra_gpio_mask_write(tgi, GPIO_MSK_OUT(tgi, offset), offset, value);
}
static int tegra_gpio_get(struct gpio_chip *chip, unsigned offset)
{
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+ int bval = BIT(GPIO_BIT(offset));
+
/* If gpio is in output mode then read from the out value */
- if ((tegra_gpio_readl(GPIO_OE(offset)) >> GPIO_BIT(offset)) & 1)
- return (tegra_gpio_readl(GPIO_OUT(offset)) >>
- GPIO_BIT(offset)) & 0x1;
+ if (tegra_gpio_readl(tgi, GPIO_OE(tgi, offset)) & bval)
+ return !!(tegra_gpio_readl(tgi, GPIO_OUT(tgi, offset)) & bval);
- return (tegra_gpio_readl(GPIO_IN(offset)) >> GPIO_BIT(offset)) & 0x1;
+ return !!(tegra_gpio_readl(tgi, GPIO_IN(tgi, offset)) & bval);
}
static int tegra_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 0);
- tegra_gpio_enable(offset);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+ tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 0);
+ tegra_gpio_enable(tgi, offset);
return 0;
}
static int tegra_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
tegra_gpio_set(chip, offset, value);
- tegra_gpio_mask_write(GPIO_MSK_OE(offset), offset, 1);
- tegra_gpio_enable(offset);
+ tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 1);
+ tegra_gpio_enable(tgi, offset);
return 0;
}
-static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+static int tegra_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
- return irq_find_mapping(irq_domain, offset);
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+ u32 pin_mask = BIT(GPIO_BIT(offset));
+ u32 cnf, oe;
+
+ cnf = tegra_gpio_readl(tgi, GPIO_CNF(tgi, offset));
+ if (!(cnf & pin_mask))
+ return -EINVAL;
+
+ oe = tegra_gpio_readl(tgi, GPIO_OE(tgi, offset));
+
+ return (oe & pin_mask) ? GPIOF_DIR_OUT : GPIOF_DIR_IN;
}
-static struct gpio_chip tegra_gpio_chip = {
- .label = "tegra-gpio",
- .request = tegra_gpio_request,
- .free = tegra_gpio_free,
- .direction_input = tegra_gpio_direction_input,
- .get = tegra_gpio_get,
- .direction_output = tegra_gpio_direction_output,
- .set = tegra_gpio_set,
- .to_irq = tegra_gpio_to_irq,
- .base = 0,
-};
+static int tegra_gpio_set_debounce(struct gpio_chip *chip, unsigned int offset,
+ unsigned int debounce)
+{
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+ struct tegra_gpio_bank *bank = &tgi->bank_info[GPIO_BANK(offset)];
+ unsigned int debounce_ms = DIV_ROUND_UP(debounce, 1000);
+ unsigned long flags;
+ int port;
+
+ if (!debounce_ms) {
+ tegra_gpio_mask_write(tgi, GPIO_MSK_DBC_EN(tgi, offset),
+ offset, 0);
+ return 0;
+ }
+
+ debounce_ms = min(debounce_ms, 255U);
+ port = GPIO_PORT(offset);
+
+ /* There is only one debounce count register per port and hence
+ * set the maximum of current and requested debounce time.
+ */
+ spin_lock_irqsave(&bank->dbc_lock[port], flags);
+ if (bank->dbc_cnt[port] < debounce_ms) {
+ tegra_gpio_writel(tgi, debounce_ms, GPIO_DBC_CNT(tgi, offset));
+ bank->dbc_cnt[port] = debounce_ms;
+ }
+ spin_unlock_irqrestore(&bank->dbc_lock[port], flags);
+
+ tegra_gpio_mask_write(tgi, GPIO_MSK_DBC_EN(tgi, offset), offset, 1);
+
+ return 0;
+}
+
+static int tegra_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
+
+ return irq_find_mapping(tgi->irq_domain, offset);
+}
static void tegra_gpio_irq_ack(struct irq_data *d)
{
+ struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = bank->tgi;
int gpio = d->hwirq;
- tegra_gpio_writel(1 << GPIO_BIT(gpio), GPIO_INT_CLR(gpio));
+ tegra_gpio_writel(tgi, 1 << GPIO_BIT(gpio), GPIO_INT_CLR(tgi, gpio));
}
static void tegra_gpio_irq_mask(struct irq_data *d)
{
+ struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = bank->tgi;
int gpio = d->hwirq;
- tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 0);
+ tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0);
}
static void tegra_gpio_irq_unmask(struct irq_data *d)
{
+ struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = bank->tgi;
int gpio = d->hwirq;
- tegra_gpio_mask_write(GPIO_MSK_INT_ENB(gpio), gpio, 1);
+ tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1);
}
static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
int gpio = d->hwirq;
struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = bank->tgi;
int port = GPIO_PORT(gpio);
int lvl_type;
int val;
@@ -233,23 +308,24 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- ret = gpiochip_lock_as_irq(&tegra_gpio_chip, gpio);
+ ret = gpiochip_lock_as_irq(&tgi->gc, gpio);
if (ret) {
- dev_err(dev, "unable to lock Tegra GPIO %d as IRQ\n", gpio);
+ dev_err(tgi->dev,
+ "unable to lock Tegra GPIO %d as IRQ\n", gpio);
return ret;
}
spin_lock_irqsave(&bank->lvl_lock[port], flags);
- val = tegra_gpio_readl(GPIO_INT_LVL(gpio));
+ val = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio));
val |= lvl_type << GPIO_BIT(gpio);
- tegra_gpio_writel(val, GPIO_INT_LVL(gpio));
+ tegra_gpio_writel(tgi, val, GPIO_INT_LVL(tgi, gpio));
spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
- tegra_gpio_mask_write(GPIO_MSK_OE(gpio), gpio, 0);
- tegra_gpio_enable(gpio);
+ tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, gpio), gpio, 0);
+ tegra_gpio_enable(tgi, gpio);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
irq_set_handler_locked(d, handle_level_irq);
@@ -261,9 +337,11 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
static void tegra_gpio_irq_shutdown(struct irq_data *d)
{
+ struct tegra_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ struct tegra_gpio_info *tgi = bank->tgi;
int gpio = d->hwirq;
- gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio);
+ gpiochip_unlock_as_irq(&tgi->gc, gpio);
}
static void tegra_gpio_irq_handler(struct irq_desc *desc)
@@ -271,19 +349,24 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
int port;
int pin;
int unmasked = 0;
+ int gpio;
+ u32 lvl;
+ unsigned long sta;
struct irq_chip *chip = irq_desc_get_chip(desc);
struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc);
+ struct tegra_gpio_info *tgi = bank->tgi;
chained_irq_enter(chip, desc);
for (port = 0; port < 4; port++) {
- int gpio = tegra_gpio_compose(bank->bank, port, 0);
- unsigned long sta = tegra_gpio_readl(GPIO_INT_STA(gpio)) &
- tegra_gpio_readl(GPIO_INT_ENB(gpio));
- u32 lvl = tegra_gpio_readl(GPIO_INT_LVL(gpio));
+ gpio = tegra_gpio_compose(bank->bank, port, 0);
+ sta = tegra_gpio_readl(tgi, GPIO_INT_STA(tgi, gpio)) &
+ tegra_gpio_readl(tgi, GPIO_INT_ENB(tgi, gpio));
+ lvl = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
for_each_set_bit(pin, &sta, 8) {
- tegra_gpio_writel(1 << pin, GPIO_INT_CLR(gpio));
+ tegra_gpio_writel(tgi, 1 << pin,
+ GPIO_INT_CLR(tgi, gpio));
/* if gpio is edge triggered, clear condition
* before executing the handler so that we don't
@@ -306,22 +389,37 @@ static void tegra_gpio_irq_handler(struct irq_desc *desc)
#ifdef CONFIG_PM_SLEEP
static int tegra_gpio_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_gpio_info *tgi = platform_get_drvdata(pdev);
unsigned long flags;
int b;
int p;
local_irq_save(flags);
- for (b = 0; b < tegra_gpio_bank_count; b++) {
- struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+ for (b = 0; b < tgi->bank_count; b++) {
+ struct tegra_gpio_bank *bank = &tgi->bank_info[b];
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
unsigned int gpio = (b<<5) | (p<<3);
- tegra_gpio_writel(bank->cnf[p], GPIO_CNF(gpio));
- tegra_gpio_writel(bank->out[p], GPIO_OUT(gpio));
- tegra_gpio_writel(bank->oe[p], GPIO_OE(gpio));
- tegra_gpio_writel(bank->int_lvl[p], GPIO_INT_LVL(gpio));
- tegra_gpio_writel(bank->int_enb[p], GPIO_INT_ENB(gpio));
+ tegra_gpio_writel(tgi, bank->cnf[p],
+ GPIO_CNF(tgi, gpio));
+
+ if (tgi->soc->debounce_supported) {
+ tegra_gpio_writel(tgi, bank->dbc_cnt[p],
+ GPIO_DBC_CNT(tgi, gpio));
+ tegra_gpio_writel(tgi, bank->dbc_enb[p],
+ GPIO_MSK_DBC_EN(tgi, gpio));
+ }
+
+ tegra_gpio_writel(tgi, bank->out[p],
+ GPIO_OUT(tgi, gpio));
+ tegra_gpio_writel(tgi, bank->oe[p],
+ GPIO_OE(tgi, gpio));
+ tegra_gpio_writel(tgi, bank->int_lvl[p],
+ GPIO_INT_LVL(tgi, gpio));
+ tegra_gpio_writel(tgi, bank->int_enb[p],
+ GPIO_INT_ENB(tgi, gpio));
}
}
@@ -331,25 +429,39 @@ static int tegra_gpio_resume(struct device *dev)
static int tegra_gpio_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_gpio_info *tgi = platform_get_drvdata(pdev);
unsigned long flags;
int b;
int p;
local_irq_save(flags);
- for (b = 0; b < tegra_gpio_bank_count; b++) {
- struct tegra_gpio_bank *bank = &tegra_gpio_banks[b];
+ for (b = 0; b < tgi->bank_count; b++) {
+ struct tegra_gpio_bank *bank = &tgi->bank_info[b];
for (p = 0; p < ARRAY_SIZE(bank->oe); p++) {
unsigned int gpio = (b<<5) | (p<<3);
- bank->cnf[p] = tegra_gpio_readl(GPIO_CNF(gpio));
- bank->out[p] = tegra_gpio_readl(GPIO_OUT(gpio));
- bank->oe[p] = tegra_gpio_readl(GPIO_OE(gpio));
- bank->int_enb[p] = tegra_gpio_readl(GPIO_INT_ENB(gpio));
- bank->int_lvl[p] = tegra_gpio_readl(GPIO_INT_LVL(gpio));
+ bank->cnf[p] = tegra_gpio_readl(tgi,
+ GPIO_CNF(tgi, gpio));
+ bank->out[p] = tegra_gpio_readl(tgi,
+ GPIO_OUT(tgi, gpio));
+ bank->oe[p] = tegra_gpio_readl(tgi,
+ GPIO_OE(tgi, gpio));
+ if (tgi->soc->debounce_supported) {
+ bank->dbc_enb[p] = tegra_gpio_readl(tgi,
+ GPIO_MSK_DBC_EN(tgi, gpio));
+ bank->dbc_enb[p] = (bank->dbc_enb[p] << 8) |
+ bank->dbc_enb[p];
+ }
+
+ bank->int_enb[p] = tegra_gpio_readl(tgi,
+ GPIO_INT_ENB(tgi, gpio));
+ bank->int_lvl[p] = tegra_gpio_readl(tgi,
+ GPIO_INT_LVL(tgi, gpio));
/* Enable gpio irq for wake up source */
- tegra_gpio_writel(bank->wake_enb[p],
- GPIO_INT_ENB(gpio));
+ tegra_gpio_writel(tgi, bank->wake_enb[p],
+ GPIO_INT_ENB(tgi, gpio));
}
}
local_irq_restore(flags);
@@ -382,22 +494,23 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
static int dbg_gpio_show(struct seq_file *s, void *unused)
{
+ struct tegra_gpio_info *tgi = s->private;
int i;
int j;
- for (i = 0; i < tegra_gpio_bank_count; i++) {
+ for (i = 0; i < tgi->bank_count; i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
seq_printf(s,
"%d:%d %02x %02x %02x %02x %02x %02x %06x\n",
i, j,
- tegra_gpio_readl(GPIO_CNF(gpio)),
- tegra_gpio_readl(GPIO_OE(gpio)),
- tegra_gpio_readl(GPIO_OUT(gpio)),
- tegra_gpio_readl(GPIO_IN(gpio)),
- tegra_gpio_readl(GPIO_INT_STA(gpio)),
- tegra_gpio_readl(GPIO_INT_ENB(gpio)),
- tegra_gpio_readl(GPIO_INT_LVL(gpio)));
+ tegra_gpio_readl(tgi, GPIO_CNF(tgi, gpio)),
+ tegra_gpio_readl(tgi, GPIO_OE(tgi, gpio)),
+ tegra_gpio_readl(tgi, GPIO_OUT(tgi, gpio)),
+ tegra_gpio_readl(tgi, GPIO_IN(tgi, gpio)),
+ tegra_gpio_readl(tgi, GPIO_INT_STA(tgi, gpio)),
+ tegra_gpio_readl(tgi, GPIO_INT_ENB(tgi, gpio)),
+ tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio)));
}
}
return 0;
@@ -405,7 +518,7 @@ static int dbg_gpio_show(struct seq_file *s, void *unused)
static int dbg_gpio_open(struct inode *inode, struct file *file)
{
- return single_open(file, dbg_gpio_show, &inode->i_private);
+ return single_open(file, dbg_gpio_show, inode->i_private);
}
static const struct file_operations debug_fops = {
@@ -415,66 +528,34 @@ static const struct file_operations debug_fops = {
.release = single_release,
};
-static void tegra_gpio_debuginit(void)
+static void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
{
(void) debugfs_create_file("tegra_gpio", S_IRUGO,
- NULL, NULL, &debug_fops);
+ NULL, tgi, &debug_fops);
}
#else
-static inline void tegra_gpio_debuginit(void)
+static inline void tegra_gpio_debuginit(struct tegra_gpio_info *tgi)
{
}
#endif
-static struct irq_chip tegra_gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = tegra_gpio_irq_ack,
- .irq_mask = tegra_gpio_irq_mask,
- .irq_unmask = tegra_gpio_irq_unmask,
- .irq_set_type = tegra_gpio_irq_set_type,
- .irq_shutdown = tegra_gpio_irq_shutdown,
-#ifdef CONFIG_PM_SLEEP
- .irq_set_wake = tegra_gpio_irq_set_wake,
-#endif
-};
-
static const struct dev_pm_ops tegra_gpio_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
-struct tegra_gpio_soc_config {
- u32 bank_stride;
- u32 upper_offset;
-};
-
-static struct tegra_gpio_soc_config tegra20_gpio_config = {
- .bank_stride = 0x80,
- .upper_offset = 0x800,
-};
-
-static struct tegra_gpio_soc_config tegra30_gpio_config = {
- .bank_stride = 0x100,
- .upper_offset = 0x80,
-};
-
-static const struct of_device_id tegra_gpio_of_match[] = {
- { .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config },
- { .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
- { },
-};
-
-/* This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different category
+ * than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
static int tegra_gpio_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
- struct tegra_gpio_soc_config *config;
+ const struct tegra_gpio_soc_config *config;
+ struct tegra_gpio_info *tgi;
struct resource *res;
struct tegra_gpio_bank *bank;
int ret;
@@ -482,102 +563,153 @@ static int tegra_gpio_probe(struct platform_device *pdev)
int i;
int j;
- dev = &pdev->dev;
-
- match = of_match_device(tegra_gpio_of_match, &pdev->dev);
- if (!match) {
+ config = of_device_get_match_data(&pdev->dev);
+ if (!config) {
dev_err(&pdev->dev, "Error: No device match found\n");
return -ENODEV;
}
- config = (struct tegra_gpio_soc_config *)match->data;
- tegra_gpio_bank_stride = config->bank_stride;
- tegra_gpio_upper_offset = config->upper_offset;
+ tgi = devm_kzalloc(&pdev->dev, sizeof(*tgi), GFP_KERNEL);
+ if (!tgi)
+ return -ENODEV;
+
+ tgi->soc = config;
+ tgi->dev = &pdev->dev;
for (;;) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ, tegra_gpio_bank_count);
+ res = platform_get_resource(pdev, IORESOURCE_IRQ,
+ tgi->bank_count);
if (!res)
break;
- tegra_gpio_bank_count++;
+ tgi->bank_count++;
}
- if (!tegra_gpio_bank_count) {
+ if (!tgi->bank_count) {
dev_err(&pdev->dev, "Missing IRQ resource\n");
return -ENODEV;
}
- tegra_gpio_chip.ngpio = tegra_gpio_bank_count * 32;
+ tgi->gc.label = "tegra-gpio";
+ tgi->gc.request = tegra_gpio_request;
+ tgi->gc.free = tegra_gpio_free;
+ tgi->gc.direction_input = tegra_gpio_direction_input;
+ tgi->gc.get = tegra_gpio_get;
+ tgi->gc.direction_output = tegra_gpio_direction_output;
+ tgi->gc.set = tegra_gpio_set;
+ tgi->gc.get_direction = tegra_gpio_get_direction;
+ tgi->gc.to_irq = tegra_gpio_to_irq;
+ tgi->gc.base = 0;
+ tgi->gc.ngpio = tgi->bank_count * 32;
+ tgi->gc.parent = &pdev->dev;
+ tgi->gc.of_node = pdev->dev.of_node;
+
+ tgi->ic.name = "GPIO";
+ tgi->ic.irq_ack = tegra_gpio_irq_ack;
+ tgi->ic.irq_mask = tegra_gpio_irq_mask;
+ tgi->ic.irq_unmask = tegra_gpio_irq_unmask;
+ tgi->ic.irq_set_type = tegra_gpio_irq_set_type;
+ tgi->ic.irq_shutdown = tegra_gpio_irq_shutdown;
+#ifdef CONFIG_PM_SLEEP
+ tgi->ic.irq_set_wake = tegra_gpio_irq_set_wake;
+#endif
+
+ platform_set_drvdata(pdev, tgi);
+
+ if (config->debounce_supported)
+ tgi->gc.set_debounce = tegra_gpio_set_debounce;
- tegra_gpio_banks = devm_kzalloc(&pdev->dev,
- tegra_gpio_bank_count * sizeof(*tegra_gpio_banks),
- GFP_KERNEL);
- if (!tegra_gpio_banks)
+ tgi->bank_info = devm_kzalloc(&pdev->dev, tgi->bank_count *
+ sizeof(*tgi->bank_info), GFP_KERNEL);
+ if (!tgi->bank_info)
return -ENODEV;
- irq_domain = irq_domain_add_linear(pdev->dev.of_node,
- tegra_gpio_chip.ngpio,
- &irq_domain_simple_ops, NULL);
- if (!irq_domain)
+ tgi->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+ tgi->gc.ngpio,
+ &irq_domain_simple_ops, NULL);
+ if (!tgi->irq_domain)
return -ENODEV;
- for (i = 0; i < tegra_gpio_bank_count; i++) {
+ for (i = 0; i < tgi->bank_count; i++) {
res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (!res) {
dev_err(&pdev->dev, "Missing IRQ resource\n");
return -ENODEV;
}
- bank = &tegra_gpio_banks[i];
+ bank = &tgi->bank_info[i];
bank->bank = i;
bank->irq = res->start;
+ bank->tgi = tgi;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
+ tgi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tgi->regs))
+ return PTR_ERR(tgi->regs);
- for (i = 0; i < tegra_gpio_bank_count; i++) {
+ for (i = 0; i < tgi->bank_count; i++) {
for (j = 0; j < 4; j++) {
int gpio = tegra_gpio_compose(i, j, 0);
- tegra_gpio_writel(0x00, GPIO_INT_ENB(gpio));
+ tegra_gpio_writel(tgi, 0x00, GPIO_INT_ENB(tgi, gpio));
}
}
- tegra_gpio_chip.of_node = pdev->dev.of_node;
-
- ret = devm_gpiochip_add_data(&pdev->dev, &tegra_gpio_chip, NULL);
+ ret = devm_gpiochip_add_data(&pdev->dev, &tgi->gc, tgi);
if (ret < 0) {
- irq_domain_remove(irq_domain);
+ irq_domain_remove(tgi->irq_domain);
return ret;
}
- for (gpio = 0; gpio < tegra_gpio_chip.ngpio; gpio++) {
- int irq = irq_create_mapping(irq_domain, gpio);
+ for (gpio = 0; gpio < tgi->gc.ngpio; gpio++) {
+ int irq = irq_create_mapping(tgi->irq_domain, gpio);
/* No validity check; all Tegra GPIOs are valid IRQs */
- bank = &tegra_gpio_banks[GPIO_BANK(gpio)];
+ bank = &tgi->bank_info[GPIO_BANK(gpio)];
irq_set_lockdep_class(irq, &gpio_lock_class);
irq_set_chip_data(irq, bank);
- irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip,
- handle_simple_irq);
+ irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
- for (i = 0; i < tegra_gpio_bank_count; i++) {
- bank = &tegra_gpio_banks[i];
+ for (i = 0; i < tgi->bank_count; i++) {
+ bank = &tgi->bank_info[i];
irq_set_chained_handler_and_data(bank->irq,
tegra_gpio_irq_handler, bank);
- for (j = 0; j < 4; j++)
+ for (j = 0; j < 4; j++) {
spin_lock_init(&bank->lvl_lock[j]);
+ spin_lock_init(&bank->dbc_lock[j]);
+ }
}
- tegra_gpio_debuginit();
+ tegra_gpio_debuginit(tgi);
return 0;
}
+static const struct tegra_gpio_soc_config tegra20_gpio_config = {
+ .bank_stride = 0x80,
+ .upper_offset = 0x800,
+};
+
+static const struct tegra_gpio_soc_config tegra30_gpio_config = {
+ .bank_stride = 0x100,
+ .upper_offset = 0x80,
+};
+
+static const struct tegra_gpio_soc_config tegra210_gpio_config = {
+ .debounce_supported = true,
+ .bank_stride = 0x100,
+ .upper_offset = 0x80,
+};
+
+static const struct of_device_id tegra_gpio_of_match[] = {
+ { .compatible = "nvidia,tegra210-gpio", .data = &tegra210_gpio_config },
+ { .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config },
+ { .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config },
+ { },
+};
+
static struct platform_driver tegra_gpio_driver = {
.driver = {
.name = "tegra-gpio",
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 85ed608c2..181f86ce0 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -1,5 +1,6 @@
/*
* Timberdale FPGA GPIO driver
+ * Author: Mocean Laboratories
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
@@ -20,7 +21,7 @@
* Timberdale FPGA GPIO
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
@@ -290,40 +291,14 @@ static int timbgpio_probe(struct platform_device *pdev)
return 0;
}
-static int timbgpio_remove(struct platform_device *pdev)
-{
- struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct timbgpio *tgpio = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- if (irq >= 0 && tgpio->irq_base > 0) {
- int i;
- for (i = 0; i < pdata->nr_pins; i++) {
- irq_set_chip(tgpio->irq_base + i, NULL);
- irq_set_chip_data(tgpio->irq_base + i, NULL);
- }
-
- irq_set_handler(irq, NULL);
- irq_set_handler_data(irq, NULL);
- }
-
- return 0;
-}
-
static struct platform_driver timbgpio_platform_driver = {
.driver = {
- .name = DRIVER_NAME,
+ .name = DRIVER_NAME,
+ .suppress_bind_attrs = true,
},
.probe = timbgpio_probe,
- .remove = timbgpio_remove,
};
/*--------------------------------------------------------------------------*/
-module_platform_driver(timbgpio_platform_driver);
-
-MODULE_DESCRIPTION("Timberdale GPIO driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mocean Laboratories");
-MODULE_ALIAS("platform:"DRIVER_NAME);
-
+builtin_platform_driver(timbgpio_platform_driver);
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index 9f020aa4b..cace79c1b 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -57,39 +57,34 @@ static int tpic2810_direction_output(struct gpio_chip *chip,
return 0;
}
-static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits)
{
struct tpic2810 *gpio = gpiochip_get_data(chip);
+ u8 buffer;
+ int err;
mutex_lock(&gpio->lock);
- if (value)
- gpio->buffer |= BIT(offset);
- else
- gpio->buffer &= ~BIT(offset);
+ buffer = gpio->buffer & ~mask;
+ buffer |= (mask & bits);
- i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
- gpio->buffer);
+ err = i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
+ buffer);
+ if (!err)
+ gpio->buffer = buffer;
mutex_unlock(&gpio->lock);
}
+static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ tpic2810_set_mask_bits(chip, BIT(offset), value ? BIT(offset) : 0);
+}
+
static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
- struct tpic2810 *gpio = gpiochip_get_data(chip);
-
- mutex_lock(&gpio->lock);
-
- /* clear bits under mask */
- gpio->buffer &= ~(*mask);
- /* set bits under mask */
- gpio->buffer |= ((*mask) & (*bits));
-
- i2c_smbus_write_byte_data(gpio->client, TPIC2810_WS_COMMAND,
- gpio->buffer);
-
- mutex_unlock(&gpio->lock);
+ tpic2810_set_mask_bits(chip, *mask, *bits);
}
static struct gpio_chip template_chip = {
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index 313c0e484..0eaeac8de 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -101,16 +101,6 @@ static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
break;
case 1:
- /* GP02 is push-pull by default, can be set as open drain. */
- if (gpiochip_line_is_open_drain(gc, offset)) {
- ret = tps65218_clear_bits(tps65218,
- TPS65218_REG_CONFIG1,
- TPS65218_CONFIG1_GPO2_BUF,
- TPS65218_PROTECT_L1);
- if (ret)
- return ret;
- }
-
/* Setup GPO2 */
ret = tps65218_clear_bits(tps65218, TPS65218_REG_CONFIG1,
TPS65218_CONFIG1_IO1_SEL,
@@ -148,6 +138,40 @@ static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
return 0;
}
+static int tps65218_gpio_set_single_ended(struct gpio_chip *gc,
+ unsigned offset,
+ enum single_ended_mode mode)
+{
+ struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
+ struct tps65218 *tps65218 = tps65218_gpio->tps65218;
+
+ switch (offset) {
+ case 0:
+ case 2:
+ /* GPO1 is hardwired to be open drain */
+ if (mode == LINE_MODE_OPEN_DRAIN)
+ return 0;
+ return -ENOTSUPP;
+ case 1:
+ /* GPO2 is push-pull by default, can be set as open drain. */
+ if (mode == LINE_MODE_OPEN_DRAIN)
+ return tps65218_clear_bits(tps65218,
+ TPS65218_REG_CONFIG1,
+ TPS65218_CONFIG1_GPO2_BUF,
+ TPS65218_PROTECT_L1);
+ if (mode == LINE_MODE_PUSH_PULL)
+ return tps65218_set_bits(tps65218,
+ TPS65218_REG_CONFIG1,
+ TPS65218_CONFIG1_GPO2_BUF,
+ TPS65218_CONFIG1_GPO2_BUF,
+ TPS65218_PROTECT_L1);
+ return -ENOTSUPP;
+ default:
+ break;
+ }
+ return -ENOTSUPP;
+}
+
static struct gpio_chip template_chip = {
.label = "gpio-tps65218",
.owner = THIS_MODULE,
@@ -156,6 +180,7 @@ static struct gpio_chip template_chip = {
.direction_input = tps65218_gpio_input,
.get = tps65218_gpio_get,
.set = tps65218_gpio_set,
+ .set_single_ended = tps65218_gpio_set_single_ended,
.can_sleep = true,
.ngpio = 3,
.base = -1,
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index c88bdc8ee..6b15e68a3 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -24,7 +24,7 @@
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/tps6586x.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -140,14 +140,3 @@ static int __init tps6586x_gpio_init(void)
return platform_driver_register(&tps6586x_gpio_driver);
}
subsys_initcall(tps6586x_gpio_init);
-
-static void __exit tps6586x_gpio_exit(void)
-{
- platform_driver_unregister(&tps6586x_gpio_driver);
-}
-module_exit(tps6586x_gpio_exit);
-
-MODULE_ALIAS("platform:tps6586x-gpio");
-MODULE_DESCRIPTION("GPIO interface for TPS6586X PMIC");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index cdbd7c740..0ae6a5a54 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -4,7 +4,7 @@
* Copyright 2010 Texas Instruments Inc.
*
* Author: Graeme Gregory <gg@slimlogic.co.uk>
- * Author: Jorge Eduardo Candelaria jedu@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -14,7 +14,7 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
@@ -193,15 +193,3 @@ static int __init tps65910_gpio_init(void)
return platform_driver_register(&tps65910_gpio_driver);
}
subsys_initcall(tps65910_gpio_init);
-
-static void __exit tps65910_gpio_exit(void)
-{
- platform_driver_unregister(&tps65910_gpio_driver);
-}
-module_exit(tps65910_gpio_exit);
-
-MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
-MODULE_AUTHOR("Jorge Eduardo Candelaria jedu@slimlogic.co.uk>");
-MODULE_DESCRIPTION("GPIO interface for TPS65910/TPS6511 PMICs");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tps65910-gpio");
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 8cdb9f7ec..4e4501211 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -186,6 +186,28 @@ static int vx855gpio_direction_output(struct gpio_chip *gpio,
return 0;
}
+static int vx855gpio_set_single_ended(struct gpio_chip *gpio,
+ unsigned int nr,
+ enum single_ended_mode mode)
+{
+ /* The GPI cannot be single-ended */
+ if (nr < NR_VX855_GPI)
+ return -EINVAL;
+
+ /* The GPO's are push-pull */
+ if (nr < NR_VX855_GPInO) {
+ if (mode != LINE_MODE_PUSH_PULL)
+ return -ENOTSUPP;
+ return 0;
+ }
+
+ /* The GPIO's are open drain */
+ if (mode != LINE_MODE_OPEN_DRAIN)
+ return -ENOTSUPP;
+
+ return 0;
+}
+
static const char *vx855gpio_names[NR_VX855_GP] = {
"VX855_GPI0", "VX855_GPI1", "VX855_GPI2", "VX855_GPI3", "VX855_GPI4",
"VX855_GPI5", "VX855_GPI6", "VX855_GPI7", "VX855_GPI8", "VX855_GPI9",
@@ -209,6 +231,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
c->set = vx855gpio_set;
+ c->set_single_ended = vx855gpio_set_single_ended;
c->dbg_show = NULL;
c->base = 0;
c->ngpio = NR_VX855_GP;
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 18cb0f534..41ec78340 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -132,6 +132,28 @@ static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn);
}
+static int wm831x_set_single_ended(struct gpio_chip *chip,
+ unsigned int offset,
+ enum single_ended_mode mode)
+{
+ struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
+ struct wm831x *wm831x = wm831x_gpio->wm831x;
+ int reg = WM831X_GPIO1_CONTROL + offset;
+
+ switch (mode) {
+ case LINE_MODE_OPEN_DRAIN:
+ return wm831x_set_bits(wm831x, reg,
+ WM831X_GPN_OD_MASK, WM831X_GPN_OD);
+ case LINE_MODE_PUSH_PULL:
+ return wm831x_set_bits(wm831x, reg,
+ WM831X_GPN_OD_MASK, 0);
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
#ifdef CONFIG_DEBUG_FS
static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
@@ -216,7 +238,7 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
pull,
powerdomain,
reg & WM831X_GPN_POL ? "" : " inverted",
- reg & WM831X_GPN_OD ? "open-drain" : "CMOS",
+ reg & WM831X_GPN_OD ? "open-drain" : "push-pull",
tristated ? " tristated" : "",
reg);
}
@@ -234,6 +256,7 @@ static struct gpio_chip template_chip = {
.set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
.set_debounce = wm831x_gpio_set_debounce,
+ .set_single_ended = wm831x_set_single_ended,
.dbg_show = wm831x_gpio_dbg_show,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index b089df99a..744af388c 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -103,6 +103,28 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
}
+static int wm8994_gpio_set_single_ended(struct gpio_chip *chip,
+ unsigned int offset,
+ enum single_ended_mode mode)
+{
+ struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
+ struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+ switch (mode) {
+ case LINE_MODE_OPEN_DRAIN:
+ return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
+ WM8994_GPN_OP_CFG_MASK,
+ WM8994_GPN_OP_CFG);
+ case LINE_MODE_PUSH_PULL:
+ return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
+ WM8994_GPN_OP_CFG_MASK, 0);
+ default:
+ break;
+ }
+
+ return -ENOTSUPP;
+}
+
static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
@@ -217,7 +239,7 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (reg & WM8994_GPN_OP_CFG)
seq_printf(s, "open drain ");
else
- seq_printf(s, "CMOS ");
+ seq_printf(s, "push-pull ");
seq_printf(s, "%s (%x)\n",
wm8994_gpio_fn(reg & WM8994_GPN_FN_MASK), reg);
@@ -235,6 +257,7 @@ static struct gpio_chip template_chip = {
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
.set = wm8994_gpio_set,
+ .set_single_ended = wm8994_gpio_set_single_ended,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index 51f41e8fd..eaa71d440 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -19,18 +19,23 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irqdesc.h>
+#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
#include <linux/spinlock.h>
-static unsigned ws16c48_base;
-module_param(ws16c48_base, uint, 0);
-MODULE_PARM_DESC(ws16c48_base, "WinSystems WS16C48 base address");
-static unsigned ws16c48_irq;
-module_param(ws16c48_irq, uint, 0);
-MODULE_PARM_DESC(ws16c48_irq, "WinSystems WS16C48 interrupt line number");
+#define WS16C48_EXTENT 16
+#define MAX_NUM_WS16C48 max_num_isa_dev(WS16C48_EXTENT)
+
+static unsigned int base[MAX_NUM_WS16C48];
+static unsigned int num_ws16c48;
+module_param_array(base, uint, &num_ws16c48, 0);
+MODULE_PARM_DESC(base, "WinSystems WS16C48 base addresses");
+
+static unsigned int irq[MAX_NUM_WS16C48];
+module_param_array(irq, uint, NULL, 0);
+MODULE_PARM_DESC(irq, "WinSystems WS16C48 interrupt line numbers");
/**
* struct ws16c48_gpio - GPIO device private data structure
@@ -298,23 +303,19 @@ static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init ws16c48_probe(struct platform_device *pdev)
+static int ws16c48_probe(struct device *dev, unsigned int id)
{
- struct device *dev = &pdev->dev;
struct ws16c48_gpio *ws16c48gpio;
- const unsigned base = ws16c48_base;
- const unsigned extent = 16;
const char *const name = dev_name(dev);
int err;
- const unsigned irq = ws16c48_irq;
ws16c48gpio = devm_kzalloc(dev, sizeof(*ws16c48gpio), GFP_KERNEL);
if (!ws16c48gpio)
return -ENOMEM;
- if (!devm_request_region(dev, base, extent, name)) {
+ if (!devm_request_region(dev, base[id], WS16C48_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
- base, base + extent);
+ base[id], base[id] + WS16C48_EXTENT);
return -EBUSY;
}
@@ -328,8 +329,8 @@ static int __init ws16c48_probe(struct platform_device *pdev)
ws16c48gpio->chip.direction_output = ws16c48_gpio_direction_output;
ws16c48gpio->chip.get = ws16c48_gpio_get;
ws16c48gpio->chip.set = ws16c48_gpio_set;
- ws16c48gpio->base = base;
- ws16c48gpio->irq = irq;
+ ws16c48gpio->base = base[id];
+ ws16c48gpio->irq = irq[id];
spin_lock_init(&ws16c48gpio->lock);
@@ -342,11 +343,11 @@ static int __init ws16c48_probe(struct platform_device *pdev)
}
/* Disable IRQ by default */
- outb(0x80, base + 7);
- outb(0, base + 8);
- outb(0, base + 9);
- outb(0, base + 10);
- outb(0xC0, base + 7);
+ outb(0x80, base[id] + 7);
+ outb(0, base[id] + 8);
+ outb(0, base[id] + 9);
+ outb(0, base[id] + 10);
+ outb(0xC0, base[id] + 7);
err = gpiochip_irqchip_add(&ws16c48gpio->chip, &ws16c48_irqchip, 0,
handle_edge_irq, IRQ_TYPE_NONE);
@@ -355,7 +356,7 @@ static int __init ws16c48_probe(struct platform_device *pdev)
goto err_gpiochip_remove;
}
- err = request_irq(irq, ws16c48_irq_handler, IRQF_SHARED, name,
+ err = request_irq(irq[id], ws16c48_irq_handler, IRQF_SHARED, name,
ws16c48gpio);
if (err) {
dev_err(dev, "IRQ handler registering failed (%d)\n", err);
@@ -369,9 +370,9 @@ err_gpiochip_remove:
return err;
}
-static int ws16c48_remove(struct platform_device *pdev)
+static int ws16c48_remove(struct device *dev, unsigned int id)
{
- struct ws16c48_gpio *const ws16c48gpio = platform_get_drvdata(pdev);
+ struct ws16c48_gpio *const ws16c48gpio = dev_get_drvdata(dev);
free_irq(ws16c48gpio->irq, ws16c48gpio);
gpiochip_remove(&ws16c48gpio->chip);
@@ -379,48 +380,15 @@ static int ws16c48_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device *ws16c48_device;
-
-static struct platform_driver ws16c48_driver = {
+static struct isa_driver ws16c48_driver = {
+ .probe = ws16c48_probe,
.driver = {
.name = "ws16c48"
},
.remove = ws16c48_remove
};
-static void __exit ws16c48_exit(void)
-{
- platform_device_unregister(ws16c48_device);
- platform_driver_unregister(&ws16c48_driver);
-}
-
-static int __init ws16c48_init(void)
-{
- int err;
-
- ws16c48_device = platform_device_alloc(ws16c48_driver.driver.name, -1);
- if (!ws16c48_device)
- return -ENOMEM;
-
- err = platform_device_add(ws16c48_device);
- if (err)
- goto err_platform_device;
-
- err = platform_driver_probe(&ws16c48_driver, ws16c48_probe);
- if (err)
- goto err_platform_driver;
-
- return 0;
-
-err_platform_driver:
- platform_device_del(ws16c48_device);
-err_platform_device:
- platform_device_put(ws16c48_device);
- return err;
-}
-
-module_init(ws16c48_init);
-module_exit(ws16c48_exit);
+module_isa_driver(ws16c48_driver, num_ws16c48);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("WinSystems WS16C48 GPIO driver");
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index 31cbcb84c..033258634 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -216,23 +216,10 @@ static int xgene_gpio_sb_domain_alloc(struct irq_domain *domain,
&parent_fwspec);
}
-static void xgene_gpio_sb_domain_free(struct irq_domain *domain,
- unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d;
- unsigned int i;
-
- for (i = 0; i < nr_irqs; i++) {
- d = irq_domain_get_irq_data(domain, virq + i);
- irq_domain_reset_irq_data(d);
- }
-}
-
static const struct irq_domain_ops xgene_gpio_sb_domain_ops = {
.translate = xgene_gpio_sb_domain_translate,
.alloc = xgene_gpio_sb_domain_alloc,
- .free = xgene_gpio_sb_domain_free,
+ .free = irq_domain_free_irqs_common,
.activate = xgene_gpio_sb_domain_activate,
.deactivate = xgene_gpio_sb_domain_deactivate,
};
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 0dc916191..40a8881c2 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -17,7 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/module.h>
+#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -85,6 +85,17 @@ static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
spin_unlock_irqrestore(&chip->lock, flags);
}
+static int xgene_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct xgene_gpio *chip = gpiochip_get_data(gc);
+ unsigned long bank_offset, bit_offset;
+
+ bank_offset = GPIO_SET_DR_OFFSET + GPIO_BANK_OFFSET(offset);
+ bit_offset = GPIO_BIT_OFFSET(offset);
+
+ return !!(ioread32(chip->base + bank_offset) & BIT(bit_offset));
+}
+
static int xgene_gpio_dir_in(struct gpio_chip *gc, unsigned int offset)
{
struct xgene_gpio *chip = gpiochip_get_data(gc);
@@ -189,6 +200,7 @@ static int xgene_gpio_probe(struct platform_device *pdev)
spin_lock_init(&gpio->lock);
gpio->chip.parent = &pdev->dev;
+ gpio->chip.get_direction = xgene_gpio_get_direction;
gpio->chip.direction_input = xgene_gpio_dir_in;
gpio->chip.direction_output = xgene_gpio_dir_out;
gpio->chip.get = xgene_gpio_get;
@@ -216,19 +228,21 @@ static const struct of_device_id xgene_gpio_of_match[] = {
{ .compatible = "apm,xgene-gpio", },
{},
};
-MODULE_DEVICE_TABLE(of, xgene_gpio_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_gpio_acpi_match[] = {
+ { "APMC0D14", 0 },
+ { },
+};
+#endif
static struct platform_driver xgene_gpio_driver = {
.driver = {
.name = "xgene-gpio",
.of_match_table = xgene_gpio_of_match,
+ .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
.pm = XGENE_GPIO_PM_OPS,
},
.probe = xgene_gpio_probe,
};
-
-module_platform_driver(xgene_gpio_driver);
-
-MODULE_AUTHOR("Feng Kan <fkan@apm.com>");
-MODULE_DESCRIPTION("APM X-Gene GPIO driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xgene_gpio_driver);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index aa5813d2d..1a33a19d9 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -85,7 +85,8 @@ enum {
XLP_GPIO_VARIANT_XLP316,
XLP_GPIO_VARIANT_XLP208,
XLP_GPIO_VARIANT_XLP980,
- XLP_GPIO_VARIANT_XLP532
+ XLP_GPIO_VARIANT_XLP532,
+ GPIO_VARIANT_VULCAN
};
struct xlp_gpio_priv {
@@ -285,6 +286,10 @@ static const struct of_device_id xlp_gpio_of_ids[] = {
.compatible = "netlogic,xlp532-gpio",
.data = (void *)XLP_GPIO_VARIANT_XLP532,
},
+ {
+ .compatible = "brcm,vulcan-gpio",
+ .data = (void *)GPIO_VARIANT_VULCAN,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, xlp_gpio_of_ids);
@@ -347,6 +352,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
break;
case XLP_GPIO_VARIANT_XLP980:
case XLP_GPIO_VARIANT_XLP532:
+ case GPIO_VARIANT_VULCAN:
priv->gpio_out_en = gpio_base + GPIO_9XX_OUTPUT_EN;
priv->gpio_paddrv = gpio_base + GPIO_9XX_PADDRV;
priv->gpio_intr_stat = gpio_base + GPIO_9XX_INT_STAT;
@@ -354,7 +360,12 @@ static int xlp_gpio_probe(struct platform_device *pdev)
priv->gpio_intr_pol = gpio_base + GPIO_9XX_INT_POL;
priv->gpio_intr_en = gpio_base + GPIO_9XX_INT_EN00;
- ngpio = (soc_type == XLP_GPIO_VARIANT_XLP980) ? 66 : 67;
+ if (soc_type == XLP_GPIO_VARIANT_XLP980)
+ ngpio = 66;
+ else if (soc_type == XLP_GPIO_VARIANT_XLP532)
+ ngpio = 67;
+ else
+ ngpio = 70;
break;
default:
dev_err(&pdev->dev, "Unknown Processor type!\n");
@@ -377,10 +388,14 @@ static int xlp_gpio_probe(struct platform_device *pdev)
gc->get = xlp_gpio_get;
spin_lock_init(&priv->lock);
- irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
+ /* XLP has fixed IRQ range for GPIO interrupts */
+ if (soc_type == GPIO_VARIANT_VULCAN)
+ irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0);
+ else
+ irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
if (irq_base < 0) {
dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
- return -ENODEV;
+ return irq_base;
}
err = gpiochip_add_data(gc, priv);
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index cda6d922b..e23ef7b94 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -10,7 +10,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/of_device.h>
@@ -203,32 +203,17 @@ static int zevio_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int zevio_gpio_remove(struct platform_device *pdev)
-{
- struct zevio_gpio *controller = platform_get_drvdata(pdev);
-
- of_mm_gpiochip_remove(&controller->chip);
-
- return 0;
-}
-
static const struct of_device_id zevio_gpio_of_match[] = {
{ .compatible = "lsi,zevio-gpio", },
{ },
};
-MODULE_DEVICE_TABLE(of, zevio_gpio_of_match);
-
static struct platform_driver zevio_gpio_driver = {
.driver = {
.name = "gpio-zevio",
.of_match_table = zevio_gpio_of_match,
+ .suppress_bind_attrs = true,
},
.probe = zevio_gpio_probe,
- .remove = zevio_gpio_remove,
};
-module_platform_driver(zevio_gpio_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Fabian Vogt <fabian@ritter-vogt.de>");
-MODULE_DESCRIPTION("LSI ZEVIO SoC GPIO driver");
+builtin_platform_driver(zevio_gpio_driver);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 47c79fa65..93de8be0d 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -1,4 +1,8 @@
/*
+ * ZTE ZX296702 GPIO driver
+ *
+ * Author: Jun Nie <jun.nie@linaro.org>
+ *
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -10,7 +14,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -282,7 +286,6 @@ static const struct of_device_id zx_gpio_match[] = {
},
{ },
};
-MODULE_DEVICE_TABLE(of, zx_gpio_match);
static struct platform_driver zx_gpio_driver = {
.probe = zx_gpio_probe,
@@ -291,9 +294,4 @@ static struct platform_driver zx_gpio_driver = {
.of_match_table = of_match_ptr(zx_gpio_match),
},
};
-
-module_platform_driver(zx_gpio_driver)
-
-MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
-MODULE_DESCRIPTION("ZTE ZX296702 GPIO driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(zx_gpio_driver)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 42a4bb7cf..4aabddb38 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/io-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -196,21 +197,68 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
}
/**
+ * of_gpiochip_set_names() - set up the names of the lines
+ * @chip: GPIO chip whose lines should be named, if possible
+ */
+static void of_gpiochip_set_names(struct gpio_chip *gc)
+{
+ struct gpio_device *gdev = gc->gpiodev;
+ struct device_node *np = gc->of_node;
+ int i;
+ int nstrings;
+
+ nstrings = of_property_count_strings(np, "gpio-line-names");
+ if (nstrings <= 0)
+ /* Lines names not present */
+ return;
+
+ /* This is normally not what you want */
+ if (gdev->ngpio != nstrings)
+ dev_info(&gdev->dev, "gpio-line-names specifies %d line "
+ "names but there are %d lines on the chip\n",
+ nstrings, gdev->ngpio);
+
+ /*
+ * Make sure to not index beyond the end of the number of descriptors
+ * of the GPIO device.
+ */
+ for (i = 0; i < gdev->ngpio; i++) {
+ const char *name;
+ int ret;
+
+ ret = of_property_read_string_index(np,
+ "gpio-line-names",
+ i,
+ &name);
+ if (ret) {
+ if (ret != -ENODATA)
+ dev_err(&gdev->dev,
+ "unable to name line %d: %d\n",
+ i, ret);
+ break;
+ }
+ gdev->descs[i].name = name;
+ }
+}
+
+/**
* of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions
* @chip: gpio chip to act on
*
* This is only used by of_gpiochip_add to request/set GPIO initial
* configuration.
+ * It retures error if it fails otherwise 0 on success.
*/
-static void of_gpiochip_scan_gpios(struct gpio_chip *chip)
+static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
{
struct gpio_desc *desc = NULL;
struct device_node *np;
const char *name;
enum gpio_lookup_flags lflags;
enum gpiod_flags dflags;
+ int ret;
- for_each_child_of_node(chip->of_node, np) {
+ for_each_available_child_of_node(chip->of_node, np) {
if (!of_property_read_bool(np, "gpio-hog"))
continue;
@@ -218,9 +266,12 @@ static void of_gpiochip_scan_gpios(struct gpio_chip *chip)
if (IS_ERR(desc))
continue;
- if (gpiod_hog(desc, name, lflags, dflags))
- continue;
+ ret = gpiod_hog(desc, name, lflags, dflags);
+ if (ret < 0)
+ return ret;
}
+
+ return 0;
}
/**
@@ -440,11 +491,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
if (status)
return status;
- of_node_get(chip->of_node);
+ /* If the chip defines names itself, these take precedence */
+ if (!chip->names)
+ of_gpiochip_set_names(chip);
- of_gpiochip_scan_gpios(chip);
+ of_node_get(chip->of_node);
- return 0;
+ return of_gpiochip_scan_gpios(chip);
}
void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 996a73390..be74bd370 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -20,6 +20,7 @@
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct gpio_device *gdev = filp->private_data;
struct gpio_chip *chip = gdev->chip;
- int __user *ip = (int __user *)arg;
+ void __user *ip = (void __user *)arg;
/* We fail any subsequent ioctl():s when the chip is gone */
if (!chip)
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
+#ifdef CONFIG_COMPAT
+static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
@@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = gpio_ioctl,
- .compat_ioctl = gpio_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = gpio_ioctl_compat,
+#endif
};
static void gpiodevice_release(struct device *dev)
@@ -616,21 +627,38 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
goto err_free_label;
}
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
desc->gdev = gdev;
-
- /* REVISIT: most hardware initializes GPIOs as inputs (often
- * with pullups enabled) so power usage is minimized. Linux
- * code should set the gpio direction first thing; but until
- * it does, and in case chip->get_direction is not set, we may
- * expose the wrong direction in sysfs.
+ /*
+ * REVISIT: most hardware initializes GPIOs as inputs
+ * (often with pullups enabled) so power usage is
+ * minimized. Linux code should set the gpio direction
+ * first thing; but until it does, and in case
+ * chip->get_direction is not set, we may expose the
+ * wrong direction in sysfs.
*/
- desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
- }
- spin_unlock_irqrestore(&gpio_lock, flags);
+ if (chip->get_direction) {
+ /*
+ * If we have .get_direction, set up the initial
+ * direction flag from the hardware.
+ */
+ int dir = chip->get_direction(chip, i);
+
+ if (!dir)
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ } else if (!chip->direction_input) {
+ /*
+ * If the chip lacks the .direction_input callback
+ * we logically assume all lines are outputs.
+ */
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ }
+ }
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
@@ -1552,8 +1580,8 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
- struct gpio_chip *chip;
- int status = -EINVAL;
+ struct gpio_chip *gc = desc->gdev->chip;
+ int ret;
/* GPIOs used for IRQs shall not be set as output */
if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
@@ -1563,28 +1591,50 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
return -EIO;
}
- /* Open drain pin should not be driven to 1 */
- if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- return gpiod_direction_input(desc);
-
- /* Open source pin should not be driven to 0 */
- if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- return gpiod_direction_input(desc);
+ if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
+ /* First see if we can enable open drain in hardware */
+ if (gc->set_single_ended) {
+ ret = gc->set_single_ended(gc, gpio_chip_hwgpio(desc),
+ LINE_MODE_OPEN_DRAIN);
+ if (!ret)
+ goto set_output_value;
+ }
+ /* Emulate open drain by not actively driving the line high */
+ if (value)
+ return gpiod_direction_input(desc);
+ }
+ else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
+ if (gc->set_single_ended) {
+ ret = gc->set_single_ended(gc, gpio_chip_hwgpio(desc),
+ LINE_MODE_OPEN_SOURCE);
+ if (!ret)
+ goto set_output_value;
+ }
+ /* Emulate open source by not actively driving the line low */
+ if (!value)
+ return gpiod_direction_input(desc);
+ } else {
+ /* Make sure to disable open drain/source hardware, if any */
+ if (gc->set_single_ended)
+ gc->set_single_ended(gc,
+ gpio_chip_hwgpio(desc),
+ LINE_MODE_PUSH_PULL);
+ }
- chip = desc->gdev->chip;
- if (!chip->set || !chip->direction_output) {
+set_output_value:
+ if (!gc->set || !gc->direction_output) {
gpiod_warn(desc,
"%s: missing set() or direction_output() operations\n",
__func__);
return -EIO;
}
- status = chip->direction_output(chip, gpio_chip_hwgpio(desc), value);
- if (status == 0)
+ ret = gc->direction_output(gc, gpio_chip_hwgpio(desc), value);
+ if (!ret)
set_bit(FLAG_IS_OUT, &desc->flags);
trace_gpio_value(desc_to_gpio(desc), 0, value);
- trace_gpio_direction(desc_to_gpio(desc), 0, status);
- return status;
+ trace_gpio_direction(desc_to_gpio(desc), 0, ret);
+ return ret;
}
/**
@@ -1846,10 +1896,10 @@ static void gpio_chip_set_multiple(struct gpio_chip *chip,
}
}
-static void gpiod_set_array_value_priv(bool raw, bool can_sleep,
- unsigned int array_size,
- struct gpio_desc **desc_array,
- int *value_array)
+void gpiod_set_array_value_complex(bool raw, bool can_sleep,
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array)
{
int i = 0;
@@ -1955,8 +2005,8 @@ void gpiod_set_raw_array_value(unsigned int array_size,
{
if (!desc_array)
return;
- gpiod_set_array_value_priv(true, false, array_size, desc_array,
- value_array);
+ gpiod_set_array_value_complex(true, false, array_size, desc_array,
+ value_array);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value);
@@ -1977,8 +2027,8 @@ void gpiod_set_array_value(unsigned int array_size,
{
if (!desc_array)
return;
- gpiod_set_array_value_priv(false, false, array_size, desc_array,
- value_array);
+ gpiod_set_array_value_complex(false, false, array_size, desc_array,
+ value_array);
}
EXPORT_SYMBOL_GPL(gpiod_set_array_value);
@@ -2003,8 +2053,8 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep);
*/
int gpiod_to_irq(const struct gpio_desc *desc)
{
- struct gpio_chip *chip;
- int offset;
+ struct gpio_chip *chip;
+ int offset;
/*
* Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
@@ -2016,7 +2066,16 @@ int gpiod_to_irq(const struct gpio_desc *desc)
chip = desc->gdev->chip;
offset = gpio_chip_hwgpio(desc);
- return chip->to_irq ? chip->to_irq(chip, offset) : -ENXIO;
+ if (chip->to_irq) {
+ int retirq = chip->to_irq(chip, offset);
+
+ /* Zero means NO_IRQ */
+ if (!retirq)
+ return -ENXIO;
+
+ return retirq;
+ }
+ return -ENXIO;
}
EXPORT_SYMBOL_GPL(gpiod_to_irq);
@@ -2030,17 +2089,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
*/
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
+ struct gpio_desc *desc;
+
+ desc = gpiochip_get_desc(chip, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ /* Flush direction if something changed behind our back */
+ if (chip->get_direction) {
+ int dir = chip->get_direction(chip, offset);
- if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
+ if (dir)
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ }
+
+ if (test_bit(FLAG_IS_OUT, &desc->flags)) {
chip_err(chip,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ set_bit(FLAG_USED_AS_IRQ, &desc->flags);
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
@@ -2188,8 +2260,8 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
might_sleep_if(extra_checks);
if (!desc_array)
return;
- gpiod_set_array_value_priv(true, true, array_size, desc_array,
- value_array);
+ gpiod_set_array_value_complex(true, true, array_size, desc_array,
+ value_array);
}
EXPORT_SYMBOL_GPL(gpiod_set_raw_array_value_cansleep);
@@ -2211,8 +2283,8 @@ void gpiod_set_array_value_cansleep(unsigned int array_size,
might_sleep_if(extra_checks);
if (!desc_array)
return;
- gpiod_set_array_value_priv(false, true, array_size, desc_array,
- value_array);
+ gpiod_set_array_value_complex(false, true, array_size, desc_array,
+ value_array);
}
EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep);
@@ -2726,15 +2798,16 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
local_desc = gpiochip_request_own_desc(chip, hwnum, name);
if (IS_ERR(local_desc)) {
- pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
- name, chip->label, hwnum);
- return PTR_ERR(local_desc);
+ status = PTR_ERR(local_desc);
+ pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
+ name, chip->label, hwnum, status);
+ return status;
}
status = gpiod_configure_flags(desc, name, lflags, dflags);
if (status < 0) {
- pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
- name, chip->label, hwnum);
+ pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
+ name, chip->label, hwnum, status);
gpiochip_free_own_desc(desc);
return status;
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index e30e5fdb1..2d9ea5e0c 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -141,6 +141,10 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags);
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+void gpiod_set_array_value_complex(bool raw, bool can_sleep,
+ unsigned int array_size,
+ struct gpio_desc **desc_array,
+ int *value_array);
extern struct spinlock gpio_lock;
extern struct list_head gpio_devices;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f2a74d0b6..fc357319d 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -52,6 +52,7 @@ config DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_DEFERRED_IO
help
FBDEV helpers for KMS drivers.
@@ -252,6 +253,8 @@ source "drivers/gpu/drm/rcar-du/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"
+source "drivers/gpu/drm/sun4i/Kconfig"
+
source "drivers/gpu/drm/omapdrm/Kconfig"
source "drivers/gpu/drm/tilcdc/Kconfig"
@@ -281,3 +284,9 @@ source "drivers/gpu/drm/imx/Kconfig"
source "drivers/gpu/drm/vc4/Kconfig"
source "drivers/gpu/drm/etnaviv/Kconfig"
+
+source "drivers/gpu/drm/arc/Kconfig"
+
+source "drivers/gpu/drm/hisilicon/Kconfig"
+
+source "drivers/gpu/drm/mediatek/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 22228ef50..be43afb08 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -1,4 +1,4 @@
-#
+
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
@@ -65,6 +65,7 @@ obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-y += omapdrm/
+obj-$(CONFIG_DRM_SUN4I) += sun4i/
obj-y += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_BOCHS) += bochs/
@@ -73,8 +74,11 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
+obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
+obj-$(CONFIG_DRM_ARCPGU)+= arc/
+obj-y += hisilicon/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index ca77ec101..e503e3d6d 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -2,6 +2,7 @@ menu "ACP (Audio CoProcessor) Configuration"
config DRM_AMD_ACP
bool "Enable AMD Audio CoProcessor IP support"
+ depends on DRM_AMDGPU
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
diff --git a/drivers/gpu/drm/amd/acp/acp_hw.c b/drivers/gpu/drm/amd/acp/acp_hw.c
index 7af83f142..c7d7205c9 100644
--- a/drivers/gpu/drm/amd/acp/acp_hw.c
+++ b/drivers/gpu/drm/amd/acp/acp_hw.c
@@ -34,7 +34,7 @@
#define mmACP_AZALIA_I2S_SELECT 0x51d4
-int amd_acp_hw_init(void *cgs_device,
+int amd_acp_hw_init(struct cgs_device *cgs_device,
unsigned acp_version_major, unsigned acp_version_minor)
{
unsigned int acp_mode = ACP_MODE_I2S;
diff --git a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
index bccf47b63..a72ddb2f6 100644
--- a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
+++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
@@ -28,7 +28,7 @@
#include "cgs_linux.h"
#include "cgs_common.h"
-int amd_acp_hw_init(void *cgs_device,
+int amd_acp_hw_init(struct cgs_device *cgs_device,
unsigned acp_version_major, unsigned acp_version_minor);
#endif /* _ACP_GFX_IF_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index b30fcfa4b..7335c0420 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -15,3 +15,13 @@ config DRM_AMDGPU_USERPTR
help
This option selects CONFIG_MMU_NOTIFIER if it isn't already
selected to enabled full userptr support.
+
+config DRM_AMDGPU_GART_DEBUGFS
+ bool "Allow GART access through debugfs"
+ depends on DRM_AMDGPU
+ depends on DEBUG_FS
+ default n
+ help
+ Selecting this option creates a debugfs file to inspect the mapped
+ pages. Uses more memory for housekeeping, enable only for debugging.
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1bcbade47..e055d5be1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -283,7 +283,8 @@ struct amdgpu_ring_funcs {
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib);
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -302,6 +303,8 @@ struct amdgpu_ring_funcs {
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+ unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
+ void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
};
/*
@@ -365,13 +368,6 @@ struct amdgpu_fence_driver {
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
-struct amdgpu_user_fence {
- /* write-back bo */
- struct amdgpu_bo *bo;
- /* write-back address offset to bo start */
- uint32_t offset;
-};
-
int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
@@ -391,6 +387,14 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
* TTM.
*/
+
+#define AMDGPU_TTM_LRU_SIZE 20
+
+struct amdgpu_mman_lru {
+ struct list_head *lru[TTM_NUM_MEM_TYPES];
+ struct list_head *swap_lru;
+};
+
struct amdgpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct drm_global_reference mem_global_ref;
@@ -408,6 +412,9 @@ struct amdgpu_mman {
struct amdgpu_ring *buffer_funcs_ring;
/* Scheduler entity for buffer moves */
struct amd_sched_entity entity;
+
+ /* custom LRU management */
+ struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
};
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
@@ -494,9 +501,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
+struct drm_gem_object *
+amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg);
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags);
@@ -586,11 +594,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync);
+int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
+ struct fence *fence);
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
+int amdgpu_fence_slab_init(void);
+void amdgpu_fence_slab_fini(void);
/*
* GART structures, functions & helpers
@@ -609,8 +622,9 @@ struct amdgpu_gart {
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
struct page **pages;
- dma_addr_t *pages_addr;
+#endif
bool ready;
const struct amdgpu_gart_funcs *gart_funcs;
};
@@ -709,6 +723,7 @@ struct amdgpu_flip_work {
unsigned shared_count;
struct fence **shared;
struct fence_cb cb;
+ bool async;
};
@@ -721,17 +736,7 @@ struct amdgpu_ib {
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
- struct amdgpu_user_fence *user;
- struct amdgpu_vm *vm;
- unsigned vm_id;
- uint64_t vm_pd_addr;
- struct amdgpu_ctx *ctx;
- uint32_t gds_base, gds_size;
- uint32_t gws_base, gws_size;
- uint32_t oa_base, oa_size;
uint32_t flags;
- /* resulting sequence number */
- uint64_t sequence;
};
enum amdgpu_ring_type {
@@ -742,22 +747,25 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_VCE
};
-extern struct amd_sched_backend_ops amdgpu_sched_ops;
+extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job);
+ struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
+
void amdgpu_job_free(struct amdgpu_job *job);
+void amdgpu_job_free_func(struct kref *refcount);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f);
+void amdgpu_job_timeout_func(struct work_struct *work);
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
+ struct amd_gpu_scheduler sched;
spinlock_t fence_lock;
struct amdgpu_bo *ring_obj;
@@ -785,9 +793,12 @@ struct amdgpu_ring {
unsigned wptr_offs;
unsigned next_rptr_offs;
unsigned fence_offs;
- struct amdgpu_ctx *current_ctx;
+ uint64_t current_ctx;
enum amdgpu_ring_type type;
char name[16];
+ unsigned cond_exe_offs;
+ u64 cond_exe_gpu_addr;
+ volatile u32 *cond_exe_cpu_addr;
};
/*
@@ -830,13 +841,6 @@ struct amdgpu_vm_pt {
uint64_t addr;
};
-struct amdgpu_vm_id {
- struct amdgpu_vm_manager_id *mgr_id;
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct fence *flushed_updates;
-};
-
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root va;
@@ -862,19 +866,29 @@ struct amdgpu_vm {
struct amdgpu_vm_pt *page_tables;
/* for id and flush management per ring */
- struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
+ struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
/* protecting freed */
spinlock_t freed_lock;
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
+
+ /* client id */
+ u64 client_id;
};
-struct amdgpu_vm_manager_id {
+struct amdgpu_vm_id {
struct list_head list;
- struct fence *active;
- atomic_long_t owner;
+ struct fence *first;
+ struct amdgpu_sync active;
+ struct fence *last_flush;
+ struct amdgpu_ring *last_user;
+ atomic64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct fence *flushed_updates;
uint32_t gds_base;
uint32_t gds_size;
@@ -889,7 +903,7 @@ struct amdgpu_vm_manager {
struct mutex lock;
unsigned num_ids;
struct list_head ids_lru;
- struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM];
+ struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
uint32_t max_pfn;
/* vram base address for page table entry */
@@ -901,6 +915,8 @@ struct amdgpu_vm_manager {
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rings;
atomic_t vm_pte_next_ring;
+ /* client id counter */
+ atomic64_t client_counter;
};
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
@@ -916,11 +932,11 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence,
unsigned *vm_id, uint64_t *vm_pd_addr);
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size);
+int amdgpu_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1026,6 +1042,11 @@ void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
*/
#include "clearstate_defs.h"
+struct amdgpu_rlc_funcs {
+ void (*enter_safe_mode)(struct amdgpu_device *adev);
+ void (*exit_safe_mode)(struct amdgpu_device *adev);
+};
+
struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
@@ -1044,6 +1065,24 @@ struct amdgpu_rlc {
uint64_t cp_table_gpu_addr;
volatile uint32_t *cp_table_ptr;
u32 cp_table_size;
+
+ /* safe mode for updating CG/PG state */
+ bool in_safe_mode;
+ const struct amdgpu_rlc_funcs *funcs;
+
+ /* for firmware data */
+ u32 save_and_restore_offset;
+ u32 clear_state_descriptor_offset;
+ u32 avail_scratch_ram_locations;
+ u32 reg_restore_list_size;
+ u32 reg_list_format_start;
+ u32 reg_list_format_separate_start;
+ u32 starting_offsets_start;
+ u32 reg_list_format_size_bytes;
+ u32 reg_list_size_bytes;
+
+ u32 *register_list_format;
+ u32 *register_restore;
};
struct amdgpu_mec {
@@ -1097,6 +1136,12 @@ struct amdgpu_gca_config {
uint32_t macrotile_mode_array[16];
};
+struct amdgpu_cu_info {
+ uint32_t number; /* total active CU number */
+ uint32_t ao_cu_mask;
+ uint32_t bitmap[4][4];
+};
+
struct amdgpu_gfx {
struct mutex gpu_clock_mutex;
struct amdgpu_gca_config config;
@@ -1129,17 +1174,19 @@ struct amdgpu_gfx {
struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq;
/* gfx status */
- uint32_t gfx_current_status;
+ uint32_t gfx_current_status;
/* ce ram size*/
- unsigned ce_ram_size;
+ unsigned ce_ram_size;
+ struct amdgpu_cu_info cu_info;
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
+ struct fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib, struct fence *last_vm_update,
- struct fence **f);
+ struct amdgpu_job *job, struct fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
@@ -1164,7 +1211,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring);
struct amdgpu_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
- uint32_t *kdata;
+ void *kdata;
};
struct amdgpu_cs_parser {
@@ -1195,13 +1242,25 @@ struct amdgpu_cs_parser {
struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
+ struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
struct fence *fence; /* the hw fence */
uint32_t num_ibs;
void *owner;
- struct amdgpu_user_fence uf;
+ uint64_t ctx;
+ unsigned vm_id;
+ uint64_t vm_pd_addr;
+ uint32_t gds_base, gds_size;
+ uint32_t gws_base, gws_size;
+ uint32_t oa_base, oa_size;
+
+ /* user fence handling */
+ struct amdgpu_bo *uf_bo;
+ uint32_t uf_offset;
+ uint64_t uf_sequence;
+
};
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)
@@ -1582,10 +1641,12 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/*
* UVD
*/
-#define AMDGPU_MAX_UVD_HANDLES 10
-#define AMDGPU_UVD_STACK_SIZE (1024*1024)
-#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
-#define AMDGPU_UVD_FIRMWARE_OFFSET 256
+#define AMDGPU_DEFAULT_UVD_HANDLES 10
+#define AMDGPU_MAX_UVD_HANDLES 40
+#define AMDGPU_UVD_STACK_SIZE (200*1024)
+#define AMDGPU_UVD_HEAP_SIZE (256*1024)
+#define AMDGPU_UVD_SESSION_SIZE (50*1024)
+#define AMDGPU_UVD_FIRMWARE_OFFSET 256
struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo;
@@ -1593,6 +1654,7 @@ struct amdgpu_uvd {
uint64_t gpu_addr;
unsigned fw_version;
void *saved_bo;
+ unsigned max_handles;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@@ -1645,7 +1707,7 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
- int num_instances;
+ int num_instances;
};
/*
@@ -1691,12 +1753,12 @@ static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
* Debugfs
*/
struct amdgpu_debugfs {
- struct drm_info_list *files;
+ const struct drm_info_list *files;
unsigned num_files;
};
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- struct drm_info_list *files,
+ const struct drm_info_list *files,
unsigned nfiles);
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
@@ -1738,13 +1800,6 @@ struct amdgpu_allowed_register_entry {
bool grbm_indexed;
};
-struct amdgpu_cu_info {
- uint32_t number; /* total active CU number */
- uint32_t ao_cu_mask;
- uint32_t bitmap[4][4];
-};
-
-
/*
* ASIC specific functions.
*/
@@ -1762,10 +1817,11 @@ struct amdgpu_asic_funcs {
u32 (*get_xclk)(struct amdgpu_device *adev);
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
- int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
+ /* query virtual capabilities */
+ u32 (*get_virtual_caps)(struct amdgpu_device *adev);
};
/*
@@ -1855,20 +1911,17 @@ struct amdgpu_atcs {
/*
* CGS
*/
-void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
-void amdgpu_cgs_destroy_device(void *cgs_device);
-
-
-/*
- * CGS
- */
-void *amdgpu_cgs_create_device(struct amdgpu_device *adev);
-void amdgpu_cgs_destroy_device(void *cgs_device);
+struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
+void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
/* GPU virtualization */
+#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
+#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
struct amdgpu_virtualization {
bool supports_sr_iov;
+ bool is_virtual;
+ u32 caps;
};
/*
@@ -1904,16 +1957,15 @@ struct amdgpu_device {
int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
- bool suspend;
bool need_dma32;
bool accel_working;
- struct work_struct reset_work;
+ struct work_struct reset_work;
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
- unsigned debugfs_count;
+ unsigned debugfs_count;
#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_regs;
+ struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
#endif
struct amdgpu_atif atif;
struct amdgpu_atcs atcs;
@@ -1926,7 +1978,6 @@ struct amdgpu_device {
/* BIOS */
uint8_t *bios;
bool is_atom_bios;
- uint16_t bios_header_start;
struct amdgpu_bo *stollen_vga_memory;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -2159,11 +2210,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
+#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
-#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@@ -2175,7 +2226,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
+#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -2183,6 +2234,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
+#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
+#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
@@ -2196,7 +2249,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
-#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
+#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
@@ -2339,7 +2392,7 @@ static inline void amdgpu_unregister_atpx_handler(void) {}
* KMS
*/
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
-extern int amdgpu_max_kms_ioctl;
+extern const int amdgpu_max_kms_ioctl;
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
int amdgpu_driver_unload_kms(struct drm_device *dev);
@@ -2398,5 +2451,4 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo);
#include "amdgpu_object.h"
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index b7b583c42..252edba16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -467,13 +467,6 @@ static int acp_soft_reset(void *handle)
return 0;
}
-static void acp_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "ACP STATUS\n");
-}
-
static int acp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -487,6 +480,7 @@ static int acp_set_powergating_state(void *handle,
}
const struct amd_ip_funcs acp_ip_funcs = {
+ .name = "acp_ip",
.early_init = acp_early_init,
.late_init = NULL,
.sw_init = acp_sw_init,
@@ -498,7 +492,6 @@ const struct amd_ip_funcs acp_ip_funcs = {
.is_idle = acp_is_idle,
.wait_for_idle = acp_wait_for_idle,
.soft_reset = acp_soft_reset,
- .print_status = acp_print_status,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
index f6e32a639..8a396313c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -30,7 +30,7 @@
struct amdgpu_acp {
struct device *parent;
- void *cgs_device;
+ struct cgs_device *cgs_device;
struct amd_acp_private *private;
struct mfd_cell *acp_cell;
struct resource *acp_res;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 84b0ce39e..9df1bcb35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -234,16 +234,6 @@ amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev,
return hpd;
}
-static bool amdgpu_atombios_apply_quirks(struct amdgpu_device *adev,
- uint32_t supported_device,
- int *connector_type,
- struct amdgpu_i2c_bus_rec *i2c_bus,
- uint16_t *line_mux,
- struct amdgpu_hpd *hpd)
-{
- return true;
-}
-
static const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVII,
@@ -514,11 +504,6 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
conn_id = le16_to_cpu(path->usConnObjectId);
- if (!amdgpu_atombios_apply_quirks
- (adev, le16_to_cpu(path->usDeviceTag), &connector_type,
- &ddc_bus, &conn_id, &hpd))
- continue;
-
amdgpu_display_add_connector(adev,
conn_id,
le16_to_cpu(path->usDeviceTag),
@@ -699,6 +684,36 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
return ret;
}
+union gfx_info {
+ ATOM_GFX_INFO_V2_1 info;
+};
+
+int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, GFX_Info);
+ uint8_t frev, crev;
+ uint16_t data_offset;
+ int ret = -EINVAL;
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union gfx_info *gfx_info = (union gfx_info *)
+ (mode_info->atom_context->bios + data_offset);
+
+ adev->gfx.config.max_shader_engines = gfx_info->info.max_shader_engines;
+ adev->gfx.config.max_tile_pipes = gfx_info->info.max_tile_pipes;
+ adev->gfx.config.max_cu_per_sh = gfx_info->info.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->info.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->info.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches =
+ gfx_info->info.max_texture_channel_caches;
+
+ ret = 0;
+ }
+ return ret;
+}
+
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 9e1442053..8c2e69661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -144,6 +144,8 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
+int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
+
bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id, u32 clock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index cd639c362..33e47a43a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -141,7 +141,7 @@ out_cleanup:
void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
{
int i;
- int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
+ static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
640 * 480 * 4,
720 * 480 * 4,
800 * 600 * 4,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 80add2237..99ca75baa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -349,7 +349,7 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
bool amdgpu_get_bios(struct amdgpu_device *adev)
{
bool r;
- uint16_t tmp;
+ uint16_t tmp, bios_header_start;
r = amdgpu_atrm_get_bios(adev);
if (r == false)
@@ -383,11 +383,11 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto free_bios;
}
- adev->bios_header_start = RBIOS16(0x48);
- if (!adev->bios_header_start) {
+ bios_header_start = RBIOS16(0x48);
+ if (!bios_header_start) {
goto free_bios;
}
- tmp = adev->bios_header_start + 4;
+ tmp = bios_header_start + 4;
if (!memcmp(adev->bios + tmp, "ATOM", 4) ||
!memcmp(adev->bios + tmp, "MOTA", 4)) {
adev->is_atom_bios = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index eacd810fc..823bf5e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -106,7 +106,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
struct amdgpu_bo *bo;
struct mm_struct *usermm;
- gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
+ gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
if (!gobj) {
r = -ENOENT;
goto error_free;
@@ -263,7 +263,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
for (i = 0; i < args->in.bo_number; ++i) {
if (copy_from_user(&info[i], uptr, bytes))
goto error_free;
-
+
uptr += args->in.bo_info_size;
}
}
@@ -271,7 +271,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
r = amdgpu_bo_list_create(fpriv, &list, &handle);
- if (r)
+ if (r)
goto error_free;
r = amdgpu_bo_list_set(adev, filp, list, info,
@@ -281,7 +281,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
goto error_free;
break;
-
+
case AMDGPU_BO_LIST_OP_DESTROY:
amdgpu_bo_list_destroy(fpriv, handle);
handle = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index fa197c9af..6f9dcfddc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -42,7 +42,7 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
-static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
+static int amdgpu_cgs_gpu_mem_info(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size)
{
@@ -73,7 +73,7 @@ static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
return 0;
}
-static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
+static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr)
@@ -102,7 +102,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
return ret;
}
-static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
+static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t kmem_handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
@@ -118,7 +118,7 @@ static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
return 0;
}
-static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
+static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
@@ -208,7 +208,7 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
return ret;
}
-static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
+static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -225,7 +225,7 @@ static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
return 0;
}
-static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
uint64_t *mcaddr)
{
int r;
@@ -246,7 +246,7 @@ static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
return r;
}
-static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -258,7 +258,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
return r;
}
-static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
+static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
void **map)
{
int r;
@@ -271,7 +271,7 @@ static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
return r;
}
-static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
+static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
{
int r;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -283,20 +283,20 @@ static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
return r;
}
-static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
+static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
{
CGS_FUNC_ADEV;
return RREG32(offset);
}
-static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
+static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
uint32_t value)
{
CGS_FUNC_ADEV;
WREG32(offset, value);
}
-static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
+static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
unsigned index)
{
@@ -320,7 +320,7 @@ static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
return 0;
}
-static void amdgpu_cgs_write_ind_register(void *cgs_device,
+static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
unsigned index, uint32_t value)
{
@@ -343,7 +343,7 @@ static void amdgpu_cgs_write_ind_register(void *cgs_device,
WARN(1, "Invalid indirect register space");
}
-static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
+static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint8_t val;
@@ -353,7 +353,7 @@ static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
return val;
}
-static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
+static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device *cgs_device, unsigned addr)
{
CGS_FUNC_ADEV;
uint16_t val;
@@ -363,7 +363,7 @@ static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
return val;
}
-static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
+static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device *cgs_device,
unsigned addr)
{
CGS_FUNC_ADEV;
@@ -374,7 +374,7 @@ static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
return val;
}
-static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
+static void amdgpu_cgs_write_pci_config_byte(struct cgs_device *cgs_device, unsigned addr,
uint8_t value)
{
CGS_FUNC_ADEV;
@@ -382,7 +382,7 @@ static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
WARN(ret, "pci_write_config_byte error");
}
-static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
+static void amdgpu_cgs_write_pci_config_word(struct cgs_device *cgs_device, unsigned addr,
uint16_t value)
{
CGS_FUNC_ADEV;
@@ -390,7 +390,7 @@ static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
WARN(ret, "pci_write_config_word error");
}
-static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
+static void amdgpu_cgs_write_pci_config_dword(struct cgs_device *cgs_device, unsigned addr,
uint32_t value)
{
CGS_FUNC_ADEV;
@@ -399,7 +399,7 @@ static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
}
-static int amdgpu_cgs_get_pci_resource(void *cgs_device,
+static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
uint64_t offset,
@@ -433,7 +433,7 @@ static int amdgpu_cgs_get_pci_resource(void *cgs_device,
}
}
-static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
+static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
unsigned table, uint16_t *size,
uint8_t *frev, uint8_t *crev)
{
@@ -449,7 +449,7 @@ static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
return NULL;
}
-static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
+static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
uint8_t *frev, uint8_t *crev)
{
CGS_FUNC_ADEV;
@@ -462,7 +462,7 @@ static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
return -EINVAL;
}
-static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
+static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
void *args)
{
CGS_FUNC_ADEV;
@@ -471,33 +471,33 @@ static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
adev->mode_info.atom_context, table, args);
}
-static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
+static int amdgpu_cgs_create_pm_request(struct cgs_device *cgs_device, cgs_handle_t *request)
{
/* TODO */
return 0;
}
-static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
+static int amdgpu_cgs_destroy_pm_request(struct cgs_device *cgs_device, cgs_handle_t request)
{
/* TODO */
return 0;
}
-static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
+static int amdgpu_cgs_set_pm_request(struct cgs_device *cgs_device, cgs_handle_t request,
int active)
{
/* TODO */
return 0;
}
-static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
+static int amdgpu_cgs_pm_request_clock(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq)
{
/* TODO */
return 0;
}
-static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
+static int amdgpu_cgs_pm_request_engine(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered)
{
/* TODO */
@@ -506,7 +506,7 @@ static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
-static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
+static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits)
{
@@ -514,7 +514,7 @@ static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
return 0;
}
-static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
+static int amdgpu_cgs_set_camera_voltages(struct cgs_device *cgs_device, uint32_t mask,
const uint32_t *voltages)
{
DRM_ERROR("not implemented");
@@ -565,7 +565,7 @@ static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
.process = cgs_process_irq,
};
-static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
+static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
unsigned num_types,
cgs_irq_source_set_func_t set,
cgs_irq_handler_func_t handler,
@@ -600,19 +600,19 @@ static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
return ret;
}
-static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
+static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
}
-static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
+static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
{
CGS_FUNC_ADEV;
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
}
-int amdgpu_cgs_set_clockgating_state(void *cgs_device,
+int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
@@ -633,7 +633,7 @@ int amdgpu_cgs_set_clockgating_state(void *cgs_device,
return r;
}
-int amdgpu_cgs_set_powergating_state(void *cgs_device,
+int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
@@ -655,7 +655,7 @@ int amdgpu_cgs_set_powergating_state(void *cgs_device,
}
-static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
+static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
{
CGS_FUNC_ADEV;
enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
@@ -681,9 +681,10 @@ static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_CP_MEC_JT2:
- if (adev->asic_type == CHIP_TONGA)
+ if (adev->asic_type == CHIP_TONGA || adev->asic_type == CHIP_POLARIS11
+ || adev->asic_type == CHIP_POLARIS10)
result = AMDGPU_UCODE_ID_CP_MEC2;
- else if (adev->asic_type == CHIP_CARRIZO)
+ else
result = AMDGPU_UCODE_ID_CP_MEC1;
break;
case CGS_UCODE_ID_RLC_G:
@@ -695,13 +696,24 @@ static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
return result;
}
-static int amdgpu_cgs_get_firmware_info(void *cgs_device,
+static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
+{
+ CGS_FUNC_ADEV;
+ if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
+ release_firmware(adev->pm.fw);
+ return 0;
+ }
+ /* cannot release other firmware because they are not created by cgs */
+ return -EINVAL;
+}
+
+static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
{
CGS_FUNC_ADEV;
- if (CGS_UCODE_ID_SMU != type) {
+ if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
uint64_t gpu_addr;
uint32_t data_size;
const struct gfx_firmware_header_v1_0 *header;
@@ -734,30 +746,44 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
const uint8_t *src;
const struct smc_firmware_header_v1_0 *hdr;
- switch (adev->asic_type) {
- case CHIP_TONGA:
- strcpy(fw_name, "/*(DEBLOBBED)*/");
- break;
- case CHIP_FIJI:
- strcpy(fw_name, "/*(DEBLOBBED)*/");
- break;
- default:
- DRM_ERROR("SMC firmware not supported\n");
- return -EINVAL;
- }
+ if (!adev->pm.fw) {
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ strcpy(fw_name, "/*(DEBLOBBED)*/");
+ break;
+ case CHIP_FIJI:
+ strcpy(fw_name, "/*(DEBLOBBED)*/");
+ break;
+ case CHIP_POLARIS11:
+ if (type == CGS_UCODE_ID_SMU)
+ strcpy(fw_name, "/*(DEBLOBBED)*/");
+ else if (type == CGS_UCODE_ID_SMU_SK)
+ strcpy(fw_name, "/*(DEBLOBBED)*/");
+ break;
+ case CHIP_POLARIS10:
+ if (type == CGS_UCODE_ID_SMU)
+ strcpy(fw_name, "/*(DEBLOBBED)*/");
+ else if (type == CGS_UCODE_ID_SMU_SK)
+ strcpy(fw_name, "/*(DEBLOBBED)*/");
+ break;
+ default:
+ DRM_ERROR("SMC firmware not supported\n");
+ return -EINVAL;
+ }
- err = reject_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err) {
- DRM_ERROR("Failed to request firmware\n");
- return err;
- }
+ err = reject_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err) {
+ DRM_ERROR("Failed to request firmware\n");
+ return err;
+ }
- err = amdgpu_ucode_validate(adev->pm.fw);
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- return err;
+ err = amdgpu_ucode_validate(adev->pm.fw);
+ if (err) {
+ DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ return err;
+ }
}
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
@@ -774,7 +800,7 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
return 0;
}
-static int amdgpu_cgs_query_system_info(void *cgs_device,
+static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info)
{
CGS_FUNC_ADEV;
@@ -801,6 +827,9 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
case CGS_SYSTEM_INFO_PG_FLAGS:
sys_info->value = adev->pg_flags;
break;
+ case CGS_SYSTEM_INFO_GFX_CU_INFO:
+ sys_info->value = adev->gfx.cu_info.number;
+ break;
default:
return -ENODEV;
}
@@ -808,7 +837,7 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
return 0;
}
-static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
+static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
@@ -851,7 +880,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
}
-static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
+static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
{
CGS_FUNC_ADEV;
@@ -867,7 +896,7 @@ static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled)
*/
#if defined(CONFIG_ACPI)
-static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
+static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
struct cgs_acpi_method_info *info)
{
CGS_FUNC_ADEV;
@@ -1030,14 +1059,14 @@ error:
return result;
}
#else
-static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
+static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
struct cgs_acpi_method_info *info)
{
return -EIO;
}
#endif
-int amdgpu_cgs_call_acpi_method(void *cgs_device,
+int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
@@ -1107,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_pm_query_clock_limits,
amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info,
+ amdgpu_cgs_rel_firmware,
amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
@@ -1121,7 +1151,7 @@ static const struct cgs_os_ops amdgpu_cgs_os_ops = {
amdgpu_cgs_irq_put
};
-void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
+struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
{
struct amdgpu_cgs_device *cgs_device =
kmalloc(sizeof(*cgs_device), GFP_KERNEL);
@@ -1135,10 +1165,10 @@ void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
cgs_device->adev = adev;
- return cgs_device;
+ return (struct cgs_device *)cgs_device;
}
-void amdgpu_cgs_destroy_device(void *cgs_device)
+void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device)
{
kfree(cgs_device);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 7ef2c1392..cb07da411 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -439,7 +439,7 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
- struct mode_size {
+ static const struct mode_size {
int w;
int h;
} common_modes[17] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9392e50a7..9bc8f1d99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -24,7 +24,6 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
-#include <linux/list_sort.h>
#include <linux/pagemap.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
@@ -88,44 +87,41 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
- struct amdgpu_user_fence *uf,
- struct drm_amdgpu_cs_chunk_fence *fence_data)
+ struct drm_amdgpu_cs_chunk_fence *data,
+ uint32_t *offset)
{
struct drm_gem_object *gobj;
- uint32_t handle;
- handle = fence_data->handle;
- gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
- fence_data->handle);
+ gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
return -EINVAL;
- uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- uf->offset = fence_data->offset;
-
- if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
- drm_gem_object_unreference_unlocked(gobj);
- return -EINVAL;
- }
-
- p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
+ p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL;
+ *offset = data->offset;
drm_gem_object_unreference_unlocked(gobj);
+
+ if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
+ amdgpu_bo_unref(&p->uf_entry.robj);
+ return -EINVAL;
+ }
+
return 0;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
- struct amdgpu_user_fence uf = {};
unsigned size, num_ibs = 0;
+ uint32_t uf_offset = 0;
int i;
int ret;
@@ -200,7 +196,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_partial_kdata;
}
- ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata);
+ ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
+ &uf_offset);
if (ret)
goto free_partial_kdata;
@@ -215,11 +212,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
}
- ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job);
+ ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
if (ret)
goto free_all_kdata;
- p->job->uf = uf;
+ if (p->uf_entry.robj) {
+ p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
+ p->job->uf_offset = uf_offset;
+ }
kfree(chunk_array);
return 0;
@@ -377,7 +377,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
- if (p->job->uf.bo)
+ if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
@@ -473,6 +473,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_validate;
if (p->bo_list) {
+ struct amdgpu_bo *gds = p->bo_list->gds_obj;
+ struct amdgpu_bo *gws = p->bo_list->gws_obj;
+ struct amdgpu_bo *oa = p->bo_list->oa_obj;
struct amdgpu_vm *vm = &fpriv->vm;
unsigned i;
@@ -481,6 +484,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
}
+
+ if (gds) {
+ p->job->gds_base = amdgpu_bo_gpu_offset(gds);
+ p->job->gds_size = amdgpu_bo_size(gds);
+ }
+ if (gws) {
+ p->job->gws_base = amdgpu_bo_gpu_offset(gws);
+ p->job->gws_size = amdgpu_bo_size(gws);
+ }
+ if (oa) {
+ p->job->oa_base = amdgpu_bo_gpu_offset(oa);
+ p->job->oa_size = amdgpu_bo_size(oa);
+ }
}
error_validate:
@@ -527,16 +543,6 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return 0;
}
-static int cmp_size_smaller_first(void *priv, struct list_head *a,
- struct list_head *b)
-{
- struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
- struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
-
- /* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
-}
-
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@@ -553,18 +559,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
if (!error) {
amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
- /* Sort the buffer list from the smallest to largest buffer,
- * which affects the order of buffers in the LRU list.
- * This assures that the smallest buffers are added first
- * to the LRU list, so they are likely to be later evicted
- * first, instead of large buffers whose eviction is more
- * expensive.
- *
- * This slightly lowers the number of bytes moved by TTM
- * per frame under memory pressure.
- */
- list_sort(NULL, &parser->validated, cmp_size_smaller_first);
-
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
@@ -763,41 +757,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
- ib->ctx = parser->ctx;
j++;
}
- /* add GDS resources to first IB */
- if (parser->bo_list) {
- struct amdgpu_bo *gds = parser->bo_list->gds_obj;
- struct amdgpu_bo *gws = parser->bo_list->gws_obj;
- struct amdgpu_bo *oa = parser->bo_list->oa_obj;
- struct amdgpu_ib *ib = &parser->job->ibs[0];
-
- if (gds) {
- ib->gds_base = amdgpu_bo_gpu_offset(gds);
- ib->gds_size = amdgpu_bo_size(gds);
- }
- if (gws) {
- ib->gws_base = amdgpu_bo_gpu_offset(gws);
- ib->gws_size = amdgpu_bo_size(gws);
- }
- if (oa) {
- ib->oa_base = amdgpu_bo_gpu_offset(oa);
- ib->oa_size = amdgpu_bo_size(oa);
- }
- }
- /* wrap the last IB with user fence */
- if (parser->job->uf.bo) {
- struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
-
- /* UVD & VCE fw doesn't support user fences */
- if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
- return -EINVAL;
-
- ib->user = &parser->job->uf;
- }
+ /* UVD & VCE fw doesn't support user fences */
+ if (parser->job->uf_bo && (
+ parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
+ parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
+ return -EINVAL;
return 0;
}
@@ -862,28 +829,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
- struct amd_sched_fence *fence;
+ struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ struct fence *fence;
struct amdgpu_job *job;
+ int r;
job = p->job;
p->job = NULL;
- job->base.sched = &ring->sched;
- job->base.s_entity = &p->ctx->rings[ring->idx].entity;
- job->owner = p->filp;
-
- fence = amd_sched_fence_create(job->base.s_entity, p->filp);
- if (!fence) {
+ r = amd_sched_job_init(&job->base, &ring->sched,
+ entity, amdgpu_job_timeout_func,
+ amdgpu_job_free_func,
+ p->filp, &fence);
+ if (r) {
amdgpu_job_free(job);
- return -ENOMEM;
+ return r;
}
- job->base.s_fence = fence;
- p->fence = fence_get(&fence->base);
-
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring,
- &fence->base);
- job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
+ job->owner = p->filp;
+ job->ctx = entity->fence_context;
+ p->fence = fence_get(fence);
+ cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
+ job->uf_sequence = cs->out.handle;
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2139da773..6e920086a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@ static const char *amdgpu_asic_name[] = {
"FIJI",
"CARRIZO",
"STONEY",
+ "POLARIS10",
+ "POLARIS11",
"LAST",
};
@@ -346,7 +348,7 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
- adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
+ adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
@@ -825,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
*/
static void amdgpu_atombios_fini(struct amdgpu_device *adev)
{
- if (adev->mode_info.atom_context)
+ if (adev->mode_info.atom_context) {
kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
kfree(adev->mode_info.atom_context);
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
@@ -936,15 +940,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
}
if (amdgpu_gart_size != -1) {
- /* gtt size must be power of two and greater or equal to 32M */
+ /* gtt size must be greater or equal to 32M */
if (amdgpu_gart_size < 32) {
dev_warn(adev->dev, "gart size (%d) too small\n",
amdgpu_gart_size);
amdgpu_gart_size = -1;
- } else if (!amdgpu_check_pot_argument(amdgpu_gart_size)) {
- dev_warn(adev->dev, "gart size (%d) must be a power of 2\n",
- amdgpu_gart_size);
- amdgpu_gart_size = -1;
}
}
@@ -1144,6 +1144,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
@@ -1196,7 +1198,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
if (r == -ENOENT) {
adev->ip_block_status[i].valid = false;
} else if (r) {
- DRM_ERROR("early_init %d failed %d\n", i, r);
+ DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
} else {
adev->ip_block_status[i].valid = true;
@@ -1219,7 +1221,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r) {
- DRM_ERROR("sw_init %d failed %d\n", i, r);
+ DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
adev->ip_block_status[i].sw = true;
@@ -1252,7 +1254,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
+ DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
adev->ip_block_status[i].hw = true;
@@ -1272,13 +1274,13 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE);
if (r) {
- DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r);
+ DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) {
- DRM_ERROR("late_init %d failed %d\n", i, r);
+ DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
}
@@ -1302,13 +1304,13 @@ static int amdgpu_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini %d failed %d\n", i, r);
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
}
adev->ip_block_status[i].hw = false;
}
@@ -1319,12 +1321,17 @@ static int amdgpu_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("sw_fini %d failed %d\n", i, r);
+ DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
}
adev->ip_block_status[i].sw = false;
adev->ip_block_status[i].valid = false;
}
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (adev->ip_blocks[i].funcs->late_fini)
+ adev->ip_blocks[i].funcs->late_fini((void *)adev);
+ }
+
return 0;
}
@@ -1332,20 +1339,29 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
{
int i, r;
+ /* ungate SMC block first */
+ r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
+ }
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].valid)
continue;
/* ungate blocks so that suspend can properly shut them down */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
+ if (i != AMD_IP_BLOCK_TYPE_SMC) {
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
+ }
}
/* XXX handle errors */
r = adev->ip_blocks[i].funcs->suspend(adev);
/* XXX handle errors */
if (r) {
- DRM_ERROR("suspend %d failed %d\n", i, r);
+ DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
}
}
@@ -1361,7 +1377,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
continue;
r = adev->ip_blocks[i].funcs->resume(adev);
if (r) {
- DRM_ERROR("resume %d failed %d\n", i, r);
+ DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r;
}
}
@@ -1369,6 +1385,15 @@ static int amdgpu_resume(struct amdgpu_device *adev)
return 0;
}
+static bool amdgpu_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -1503,9 +1528,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->virtualization.supports_sr_iov =
amdgpu_atombios_has_gpu_virtualization_table(adev);
+ /* Check if we are executing in a virtualized environment */
+ adev->virtualization.is_virtual = amdgpu_device_is_virtual();
+ adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+
/* Post card if necessary */
if (!amdgpu_card_posted(adev) ||
- adev->virtualization.supports_sr_iov) {
+ (adev->virtualization.is_virtual &&
+ !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
if (!adev->bios) {
dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
@@ -2007,7 +2037,7 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
* Debugfs
*/
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- struct drm_info_list *files,
+ const struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
@@ -2119,32 +2149,246 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
return result;
}
+static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_PCIE(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_PCIE(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_DIDT(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_DIDT(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_SMC(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_SMC(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
.write = amdgpu_debugfs_regs_write,
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_didt_read,
+ .write = amdgpu_debugfs_regs_didt_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_pcie_read,
+ .write = amdgpu_debugfs_regs_pcie_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_smc_read,
+ .write = amdgpu_debugfs_regs_smc_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations *debugfs_regs[] = {
+ &amdgpu_debugfs_regs_fops,
+ &amdgpu_debugfs_regs_didt_fops,
+ &amdgpu_debugfs_regs_pcie_fops,
+ &amdgpu_debugfs_regs_smc_fops,
+};
+
+static const char *debugfs_regs_names[] = {
+ "amdgpu_regs",
+ "amdgpu_regs_didt",
+ "amdgpu_regs_pcie",
+ "amdgpu_regs_smc",
+};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
+ unsigned i, j;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ ent = debugfs_create_file(debugfs_regs_names[i],
+ S_IFREG | S_IRUGO, root,
+ adev, debugfs_regs[i]);
+ if (IS_ERR(ent)) {
+ for (j = 0; j < i; j++) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ return PTR_ERR(ent);
+ }
- ent = debugfs_create_file("amdgpu_regs", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_debugfs_regs_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->rmmio_size);
- adev->debugfs_regs = ent;
+ if (!i)
+ i_size_write(ent->d_inode, adev->rmmio_size);
+ adev->debugfs_regs[i] = ent;
+ }
return 0;
}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
{
- debugfs_remove(adev->debugfs_regs);
- adev->debugfs_regs = NULL;
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ if (adev->debugfs_regs[i]) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ }
}
int amdgpu_debugfs_init(struct drm_minor *minor)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 3fb405b3a..b0832da2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -131,12 +131,17 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
vblank->framedur_ns / 1000,
vblank->linedur_ns / 1000, stat, vpos, hpos);
- /* set the flip status */
+ /* Do the flip (mmio) */
+ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
+
+ /* Set the flip status */
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- /* Do the flip (mmio) */
- adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
+
+ DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
+ amdgpuCrtc->crtc_id, amdgpuCrtc, work);
+
}
/*
@@ -192,6 +197,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->adev = adev;
work->crtc_id = amdgpu_crtc->crtc_id;
+ work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/* schedule unpin of the old buffer */
old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
@@ -252,6 +258,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work;
+
+ DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -554,7 +563,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
struct amdgpu_framebuffer *amdgpu_fb;
int ret;
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handles[0]);
@@ -588,20 +597,20 @@ const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.output_poll_changed = amdgpu_output_poll_changed
};
-static struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
+static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
{ { UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
-static struct drm_prop_enum_list amdgpu_audio_enum_list[] =
+static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
{ { AMDGPU_AUDIO_DISABLE, "off" },
{ AMDGPU_AUDIO_ENABLE, "on" },
{ AMDGPU_AUDIO_AUTO, "auto" },
};
/* XXX support different dither options? spatial, temporal, both, etc. */
-static struct drm_prop_enum_list amdgpu_dither_enum_list[] =
+static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f1e17d600..f888c015f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -50,9 +50,11 @@
* KMS wrapper.
* - 3.0.0 - initial driver
* - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
+ * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
+ * at the end of IBs.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 1
+#define KMS_DRIVER_MINOR 2
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -166,7 +168,7 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
-static struct pci_device_id pciidlist[] = {
+static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
@@ -277,6 +279,28 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
/* stoney */
{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
+ /* Polaris11 */
+ {0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ /* Polaris10 */
+ {0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0, 0, 0}
};
@@ -514,7 +538,7 @@ static struct drm_driver kms_driver = {
.irq_uninstall = amdgpu_irq_uninstall,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
- .gem_free_object = amdgpu_gem_object_free,
+ .gem_free_object_unlocked = amdgpu_gem_object_free,
.gem_open_object = amdgpu_gem_object_open,
.gem_close_object = amdgpu_gem_object_close,
.dumb_create = amdgpu_mode_dumb_create,
@@ -553,22 +577,22 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.driver.pm = &amdgpu_pm_ops,
};
+
+
static int __init amdgpu_init(void)
{
amdgpu_sync_init();
-#ifdef CONFIG_VGA_CONSOLE
+ amdgpu_fence_slab_init();
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
}
-#endif
DRM_INFO("amdgpu kernel modesetting enabled.\n");
driver = &kms_driver;
pdriver = &amdgpu_kms_pci_driver;
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
-
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
@@ -579,6 +603,7 @@ static void __exit amdgpu_exit(void)
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
+ amdgpu_fence_slab_fini();
}
module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d81f1f488..d1558768c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -55,8 +55,21 @@ struct amdgpu_fence {
};
static struct kmem_cache *amdgpu_fence_slab;
-static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
+int amdgpu_fence_slab_init(void)
+{
+ amdgpu_fence_slab = kmem_cache_create(
+ "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!amdgpu_fence_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void amdgpu_fence_slab_fini(void)
+{
+ kmem_cache_destroy(amdgpu_fence_slab);
+}
/*
* Cast helper
*/
@@ -198,7 +211,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
/* There is always exactly one thread signaling this fence slot */
fence = rcu_dereference_protected(*ptr, 1);
- rcu_assign_pointer(*ptr, NULL);
+ RCU_INIT_POINTER(*ptr, NULL);
BUG_ON(!fence);
@@ -352,9 +365,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
(unsigned long)ring);
- ring->fence_drv.num_fences_mask = num_hw_submission - 1;
+ ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
- ring->fence_drv.fences = kcalloc(num_hw_submission, sizeof(void *),
+ ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
GFP_KERNEL);
if (!ring->fence_drv.fences)
return -ENOMEM;
@@ -396,13 +409,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
- if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
- amdgpu_fence_slab = kmem_cache_create(
- "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- }
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
@@ -437,13 +443,10 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- fence_put(ring->fence_drv.fences[i]);
+ fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.initialized = false;
}
-
- if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
- kmem_cache_destroy(amdgpu_fence_slab);
}
/**
@@ -639,7 +642,7 @@ static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
return 0;
}
-static struct drm_info_list amdgpu_debugfs_fence_list[] = {
+static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
{"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
{"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 7312d729d..921bce2df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -238,18 +238,17 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- if (adev->gart.pages[p]) {
- adev->gart.pages[p] = NULL;
- adev->gart.pages_addr[p] = adev->dummy_page.addr;
- page_base = adev->gart.pages_addr[p];
- if (!adev->gart.ptr)
- continue;
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
+ adev->gart.pages[p] = NULL;
+#endif
+ page_base = adev->dummy_page.addr;
+ if (!adev->gart.ptr)
+ continue;
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
- amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
- t, page_base, flags);
- page_base += AMDGPU_GPU_PAGE_SIZE;
- }
+ for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
+ amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
+ t, page_base, flags);
+ page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
mb();
@@ -287,10 +286,11 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- adev->gart.pages_addr[p] = dma_addr[i];
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
adev->gart.pages[p] = pagelist[i];
+#endif
if (adev->gart.ptr) {
- page_base = adev->gart.pages_addr[p];
+ page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
@@ -312,11 +312,11 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
*/
int amdgpu_gart_init(struct amdgpu_device *adev)
{
- int r, i;
+ int r;
- if (adev->gart.pages) {
+ if (adev->dummy_page.page)
return 0;
- }
+
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
if (PAGE_SIZE < AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("Page size is smaller than GPU page size!\n");
@@ -330,22 +330,16 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
+
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
/* Allocate pages table */
adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
if (adev->gart.pages == NULL) {
amdgpu_gart_fini(adev);
return -ENOMEM;
}
- adev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
- adev->gart.num_cpu_pages);
- if (adev->gart.pages_addr == NULL) {
- amdgpu_gart_fini(adev);
- return -ENOMEM;
- }
- /* set GART entry to point to the dummy page by default */
- for (i = 0; i < adev->gart.num_cpu_pages; i++) {
- adev->gart.pages_addr[i] = adev->dummy_page.addr;
- }
+#endif
+
return 0;
}
@@ -358,15 +352,14 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
*/
void amdgpu_gart_fini(struct amdgpu_device *adev)
{
- if (adev->gart.pages && adev->gart.pages_addr && adev->gart.ready) {
+ if (adev->gart.ready) {
/* unbind pages */
amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
}
adev->gart.ready = false;
+#ifdef CONFIG_AMDGPU_GART_DEBUGFS
vfree(adev->gart.pages);
- vfree(adev->gart.pages_addr);
adev->gart.pages = NULL;
- adev->gart.pages_addr = NULL;
-
+#endif
amdgpu_dummy_page_fini(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index c3f4e8559..503d54098 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -43,7 +43,7 @@ struct amdgpu_ring;
struct amdgpu_bo;
struct amdgpu_gds_asic_info {
- uint32_t total_size;
+ uint32_t total_size;
uint32_t gfx_partition_size;
uint32_t cs_partition_size;
};
@@ -52,8 +52,8 @@ struct amdgpu_gds {
struct amdgpu_gds_asic_info mem;
struct amdgpu_gds_asic_info gws;
struct amdgpu_gds_asic_info oa;
- /* At present, GDS, GWS and OA resources for gfx (graphics)
- * is always pre-allocated and available for graphics operation.
+ /* At present, GDS, GWS and OA resources for gfx (graphics)
+ * is always pre-allocated and available for graphics operation.
* Such resource is shared between all gfx clients.
* TODO: move this operation to user space
* */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index fa6a27bff..8fab64860 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -93,7 +93,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
struct drm_device *ddev = adev->ddev;
struct drm_file *file;
- mutex_lock(&ddev->struct_mutex);
+ mutex_lock(&ddev->filelist_mutex);
list_for_each_entry(file, &ddev->filelist, lhead) {
struct drm_gem_object *gobj;
@@ -103,13 +103,13 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
- drm_gem_object_unreference(gobj);
+ drm_gem_object_unreference_unlocked(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
}
- mutex_unlock(&ddev->struct_mutex);
+ mutex_unlock(&ddev->filelist_mutex);
}
/*
@@ -338,7 +338,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct drm_gem_object *gobj;
struct amdgpu_bo *robj;
- gobj = drm_gem_object_lookup(dev, filp, handle);
+ gobj = drm_gem_object_lookup(filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
@@ -402,7 +402,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
int r = 0;
long ret;
- gobj = drm_gem_object_lookup(dev, filp, handle);
+ gobj = drm_gem_object_lookup(filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
@@ -436,7 +436,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
int r = -1;
DRM_DEBUG("%d \n", args->handle);
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
@@ -584,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
rbo = gem_to_amdgpu_bo(gobj);
@@ -646,7 +646,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo *robj;
int r;
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
@@ -769,7 +769,7 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
struct drm_file *file;
int r;
- r = mutex_lock_interruptible(&dev->struct_mutex);
+ r = mutex_lock_interruptible(&dev->filelist_mutex);
if (r)
return r;
@@ -793,11 +793,11 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
spin_unlock(&file->table_lock);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->filelist_mutex);
return 0;
}
-static struct drm_info_list amdgpu_debugfs_gem_list[] = {
+static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
};
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 8443cea68..34e35423b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -74,9 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
}
- ib->vm = vm;
- ib->vm_id = 0;
-
return 0;
}
@@ -89,7 +86,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*
* Free an IB (all asics).
*/
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f)
+void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
+ struct fence *f)
{
amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
}
@@ -117,28 +115,37 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fen
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct fence *last_vm_update,
- struct fence **f)
+ struct amdgpu_job *job, struct fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
- struct amdgpu_ctx *ctx, *old_ctx;
+ bool skip_preamble, need_ctx_switch;
+ unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
struct fence *hwf;
+ uint64_t ctx;
+
unsigned i;
int r = 0;
if (num_ibs == 0)
return -EINVAL;
- ctx = ibs->ctx;
- vm = ibs->vm;
+ /* ring tests don't use a job */
+ if (job) {
+ vm = job->vm;
+ ctx = job->ctx;
+ } else {
+ vm = NULL;
+ ctx = 0;
+ }
if (!ring->ready) {
dev_err(adev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
- if (vm && !ibs->vm_id) {
+ if (vm && !job->vm_id) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@@ -149,58 +156,68 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
+ if (ring->type == AMDGPU_RING_TYPE_SDMA && ring->funcs->init_cond_exec)
+ patch_offset = amdgpu_ring_init_cond_exec(ring);
+
if (vm) {
- /* do context switch */
- amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr,
- ib->gds_base, ib->gds_size,
- ib->gws_base, ib->gws_size,
- ib->oa_base, ib->oa_size);
-
- if (ring->funcs->emit_hdp_flush)
- amdgpu_ring_emit_hdp_flush(ring);
+ r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
+ job->gds_base, job->gds_size,
+ job->gws_base, job->gws_size,
+ job->oa_base, job->oa_size);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ return r;
+ }
}
- old_ctx = ring->current_ctx;
+ if (ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+
+ /* always set cond_exec_polling to CONTINUE */
+ *ring->cond_exe_cpu_addr = 1;
+
+ skip_preamble = ring->current_ctx == ctx;
+ need_ctx_switch = ring->current_ctx != ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
- if (ib->ctx != ctx || ib->vm != vm) {
- ring->current_ctx = old_ctx;
- if (ib->vm_id)
- amdgpu_vm_reset_id(adev, ib->vm_id);
- amdgpu_ring_undo(ring);
- return -EINVAL;
- }
- amdgpu_ring_emit_ib(ring, ib);
- ring->current_ctx = ctx;
- }
+ /* drop preamble IBs if we don't have a context switch */
+ if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
+ continue;
- if (vm) {
- if (ring->funcs->emit_hdp_invalidate)
- amdgpu_ring_emit_hdp_invalidate(ring);
+ amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
+ need_ctx_switch);
+ need_ctx_switch = false;
}
+ if (ring->funcs->emit_hdp_invalidate)
+ amdgpu_ring_emit_hdp_invalidate(ring);
+
r = amdgpu_fence_emit(ring, &hwf);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
- ring->current_ctx = old_ctx;
- if (ib->vm_id)
- amdgpu_vm_reset_id(adev, ib->vm_id);
+ if (job && job->vm_id)
+ amdgpu_vm_reset_id(adev, job->vm_id);
amdgpu_ring_undo(ring);
return r;
}
/* wrap the last IB with fence */
- if (ib->user) {
- uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
- addr += ib->user->offset;
- amdgpu_ring_emit_fence(ring, addr, ib->sequence,
+ if (job && job->uf_bo) {
+ uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
+
+ addr += job->uf_offset;
+ amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
AMDGPU_FENCE_FLAG_64BIT);
}
if (f)
*f = fence_get(hwf);
+ if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, patch_offset);
+
+ ring->current_ctx = ctx;
amdgpu_ring_commit(ring);
return 0;
}
@@ -315,7 +332,7 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
}
-static struct drm_info_list amdgpu_debugfs_sa_list[] = {
+static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
{"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 762cfdb85..835a3fa8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -219,7 +219,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
if (r) {
return r;
}
- adev->ddev->vblank_disable_allowed = true;
/* enable msi */
adev->irq.msi_enabled = false;
@@ -498,7 +497,7 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
return 0;
}
-static struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
+static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
.map = amdgpu_irqdomain_map,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9c9b19e2f..f0dafa514 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,8 +28,25 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
+static void amdgpu_job_free_handler(struct work_struct *ws)
+{
+ struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job);
+ amd_sched_job_put(&job->base);
+}
+
+void amdgpu_job_timeout_func(struct work_struct *work)
+{
+ struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
+ DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
+ job->base.sched->name,
+ (uint32_t)atomic_read(&job->ring->fence_drv.last_seq),
+ job->ring->fence_drv.sync_seq);
+
+ amd_sched_job_put(&job->base);
+}
+
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job)
+ struct amdgpu_job **job, struct amdgpu_vm *vm)
{
size_t size = sizeof(struct amdgpu_job);
@@ -43,8 +60,10 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
return -ENOMEM;
(*job)->adev = adev;
+ (*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
+ INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
amdgpu_sync_create(&(*job)->sync);
@@ -56,7 +75,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
{
int r;
- r = amdgpu_job_alloc(adev, 1, job);
+ r = amdgpu_job_alloc(adev, 1, job, NULL);
if (r)
return r;
@@ -78,8 +97,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
fence_put(job->fence);
- amdgpu_bo_unref(&job->uf.bo);
+ amdgpu_bo_unref(&job->uf_bo);
amdgpu_sync_free(&job->sync);
+
+ if (!job->base.use_sched)
+ kfree(job);
+}
+
+void amdgpu_job_free_func(struct kref *refcount)
+{
+ struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount);
kfree(job);
}
@@ -87,16 +114,22 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
struct amd_sched_entity *entity, void *owner,
struct fence **f)
{
+ struct fence *fence;
+ int r;
job->ring = ring;
- job->base.sched = &ring->sched;
- job->base.s_entity = entity;
- job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
- if (!job->base.s_fence)
- return -ENOMEM;
- *f = fence_get(&job->base.s_fence->base);
+ if (!f)
+ return -EINVAL;
+
+ r = amd_sched_job_init(&job->base, &ring->sched,
+ entity, amdgpu_job_timeout_func,
+ amdgpu_job_free_func, owner, &fence);
+ if (r)
+ return r;
job->owner = owner;
+ job->ctx = entity->fence_context;
+ *f = fence_get(fence);
amd_sched_entity_push_job(&job->base);
return 0;
@@ -105,27 +138,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
- struct amdgpu_vm *vm = job->ibs->vm;
+ struct amdgpu_vm *vm = job->vm;
struct fence *fence = amdgpu_sync_get_fence(&job->sync);
- if (fence == NULL && vm && !job->ibs->vm_id) {
+ if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring;
- unsigned i, vm_id;
- uint64_t vm_pd_addr;
int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync,
&job->base.s_fence->base,
- &vm_id, &vm_pd_addr);
+ &job->vm_id, &job->vm_pd_addr);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
- else {
- for (i = 0; i < job->num_ibs; ++i) {
- job->ibs[i].vm_id = vm_id;
- job->ibs[i].vm_pd_addr = vm_pd_addr;
- }
- }
fence = amdgpu_sync_get_fence(&job->sync);
}
@@ -153,7 +178,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
- job->sync.last_vm_update, &fence);
+ job->sync.last_vm_update, job, &fence);
if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
@@ -165,7 +190,9 @@ err:
return fence;
}
-struct amd_sched_backend_ops amdgpu_sched_ops = {
+const struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
+ .begin_job = amd_sched_job_begin,
+ .finish_job = amd_sched_job_finish,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d78739d29..d851ea150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -427,7 +427,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info = {};
- struct amdgpu_cu_info cu_info;
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
@@ -462,11 +461,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
- amdgpu_asic_get_cu_info(adev, &cu_info);
- dev_info.cu_active_number = cu_info.number;
- dev_info.cu_ao_mask = cu_info.ao_cu_mask;
+ dev_info.cu_active_number = adev->gfx.cu_info.number;
+ dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info.ce_ram_size = adev->gfx.ce_ram_size;
- memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
+ memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
+ sizeof(adev->gfx.cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width;
dev_info.vce_harvest_config = adev->vce.harvest_config;
@@ -756,4 +755,4 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
-int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
+const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 9f4a45cd2..32fa7b791 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -232,7 +232,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
int r;
mutex_lock(&adev->mn_lock);
- down_write(&mm->mmap_sem);
+ if (down_write_killable(&mm->mmap_sem)) {
+ mutex_unlock(&adev->mn_lock);
+ return ERR_PTR(-EINTR);
+ }
hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
if (rmn->mm == mm)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 81bd964d3..6b1d7d306 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -283,7 +283,7 @@ struct amdgpu_display_funcs {
u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev);
/* pageflipping */
void (*page_flip)(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base);
+ int crtc_id, u64 crtc_base, bool async);
int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position);
/* display topology setup */
@@ -530,7 +530,7 @@ struct amdgpu_framebuffer {
((em) == ATOM_ENCODER_MODE_DP_MST))
/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
-#define USE_REAL_VBLANKSTART (1 << 30)
+#define USE_REAL_VBLANKSTART (1 << 30)
#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
void amdgpu_link_encoder_connector(struct drm_device *dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index acc08018c..bdb01d932 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -71,7 +71,7 @@ static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->adev->dev, "%p reserve failed\n", bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index ff9597ce2..0e13d80d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state = 0;
- long idx;
+ unsigned long idx;
int ret;
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
- else {
- ret = kstrtol(buf, 0, &idx);
+ else if (adev->pp_enabled) {
+ struct pp_states_info data;
- if (ret) {
+ ret = kstrtoul(buf, 0, &idx);
+ if (ret || idx >= ARRAY_SIZE(data.states)) {
count = -EINVAL;
goto fail;
}
- if (adev->pp_enabled) {
- struct pp_states_info data;
- amdgpu_dpm_get_pp_num_states(adev, &data);
- state = data.states[idx];
- /* only set user selected power states */
- if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
- state != POWER_STATE_TYPE_DEFAULT) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
- adev->pp_force_state_enabled = true;
- }
+ amdgpu_dpm_get_pp_num_states(adev, &data);
+ state = data.states[idx];
+ /* only set user selected power states */
+ if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+ state != POWER_STATE_TYPE_DEFAULT) {
+ amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ adev->pp_force_state_enabled = true;
}
}
fail:
@@ -362,16 +360,23 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
+ uint32_t i, mask = 0;
+ char sub_str[2];
- ret = kstrtol(buf, 0, &level);
+ for (i = 0; i < strlen(buf) - 1; i++) {
+ sub_str[0] = *(buf + i);
+ sub_str[1] = '\0';
+ ret = kstrtol(sub_str, 0, &level);
- if (ret) {
- count = -EINVAL;
- goto fail;
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ mask |= 1 << level;
}
if (adev->pp_enabled)
- amdgpu_dpm_force_clock_level(adev, PP_SCLK, level);
+ amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
fail:
return count;
}
@@ -399,16 +404,23 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
+ uint32_t i, mask = 0;
+ char sub_str[2];
- ret = kstrtol(buf, 0, &level);
+ for (i = 0; i < strlen(buf) - 1; i++) {
+ sub_str[0] = *(buf + i);
+ sub_str[1] = '\0';
+ ret = kstrtol(sub_str, 0, &level);
- if (ret) {
- count = -EINVAL;
- goto fail;
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ mask |= 1 << level;
}
if (adev->pp_enabled)
- amdgpu_dpm_force_clock_level(adev, PP_MCLK, level);
+ amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
fail:
return count;
}
@@ -436,16 +448,23 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret;
long level;
+ uint32_t i, mask = 0;
+ char sub_str[2];
- ret = kstrtol(buf, 0, &level);
+ for (i = 0; i < strlen(buf) - 1; i++) {
+ sub_str[0] = *(buf + i);
+ sub_str[1] = '\0';
+ ret = kstrtol(sub_str, 0, &level);
- if (ret) {
- count = -EINVAL;
- goto fail;
+ if (ret) {
+ count = -EINVAL;
+ goto fail;
+ }
+ mask |= 1 << level;
}
if (adev->pp_enabled)
- amdgpu_dpm_force_clock_level(adev, PP_PCIE, level);
+ amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
fail:
return count;
}
@@ -1212,7 +1231,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
return 0;
}
-static struct drm_info_list amdgpu_pm_info_list[] = {
+static const struct drm_info_list amdgpu_pm_info_list[] = {
{"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
};
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index e9c6ae6ed..82256558e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -99,6 +99,10 @@ static int amdgpu_pp_early_init(void *handle)
#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) {
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ adev->pp_enabled = true;
+ break;
case CHIP_TONGA:
case CHIP_FIJI:
adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
@@ -179,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle)
if (ret)
return ret;
-#ifdef CONFIG_DRM_AMD_POWERPLAY
- if (adev->pp_enabled) {
- amdgpu_pm_sysfs_fini(adev);
- amd_powerplay_fini(adev->powerplay.pp_handle);
- }
-#endif
-
return ret;
}
@@ -219,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle)
return ret;
}
+static void amdgpu_pp_late_fini(void *handle)
+{
+#ifdef CONFIG_DRM_AMD_POWERPLAY
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->pp_enabled) {
+ amdgpu_pm_sysfs_fini(adev);
+ amd_powerplay_fini(adev->powerplay.pp_handle);
+ }
+
+ if (adev->powerplay.ip_funcs->late_fini)
+ adev->powerplay.ip_funcs->late_fini(
+ adev->powerplay.pp_handle);
+#endif
+}
+
static int amdgpu_pp_suspend(void *handle)
{
int ret = 0;
@@ -299,28 +312,20 @@ static int amdgpu_pp_soft_reset(void *handle)
return ret;
}
-static void amdgpu_pp_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->print_status)
- adev->powerplay.ip_funcs->print_status(
- adev->powerplay.pp_handle);
-}
-
const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
+ .name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
.sw_init = amdgpu_pp_sw_init,
.sw_fini = amdgpu_pp_sw_fini,
.hw_init = amdgpu_pp_hw_init,
.hw_fini = amdgpu_pp_hw_fini,
+ .late_fini = amdgpu_pp_late_fini,
.suspend = amdgpu_pp_suspend,
.resume = amdgpu_pp_resume,
.is_idle = amdgpu_pp_is_idle,
.wait_for_idle = amdgpu_pp_wait_for_idle,
.soft_reset = amdgpu_pp_soft_reset,
- .print_status = amdgpu_pp_print_status,
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index be6388f73..7700dc22f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -57,9 +57,10 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
-struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
+struct drm_gem_object *
+amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sg)
{
struct reservation_object *resv = attach->dmabuf->resv;
struct amdgpu_device *adev = dev->dev_private;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 972eed2ef..870f94942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -46,7 +46,8 @@
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@@ -215,18 +216,17 @@ int amdgpu_ring_restore(struct amdgpu_ring *ring,
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
- * @ring_size: size of the ring
+ * @max_ndw: maximum number of dw for ring alloc
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, u32 nop, u32 align_mask,
+ unsigned max_dw, u32 nop, u32 align_mask,
struct amdgpu_irq_src *irq_src, unsigned irq_type,
enum amdgpu_ring_type ring_type)
{
- u32 rb_bufsz;
int r;
if (ring->adev == NULL) {
@@ -265,8 +265,17 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
return r;
}
- ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
+ ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
+
+ r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+ return r;
+ }
+ ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
+ ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
+
spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) {
@@ -274,10 +283,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- /* Align ring size */
- rb_bufsz = order_base_2(ring_size / 8);
- ring_size = (1 << (rb_bufsz + 1)) * 4;
- ring->ring_size = ring_size;
+ ring->ring_size = roundup_pow_of_two(max_dw * 4 *
+ amdgpu_sched_hw_submission);
ring->align_mask = align_mask;
ring->nop = nop;
ring->type = ring_type;
@@ -310,8 +317,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
}
ring->ptr_mask = (ring->ring_size / 4) - 1;
- ring->max_dw = DIV_ROUND_UP(ring->ring_size / 4,
- amdgpu_sched_hw_submission);
+ ring->max_dw = max_dw;
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
@@ -337,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
ring->ring = NULL;
ring->ring_obj = NULL;
+ amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
amdgpu_wb_free(ring->adev, ring->fence_offs);
amdgpu_wb_free(ring->adev, ring->rptr_offs);
amdgpu_wb_free(ring->adev, ring->wptr_offs);
@@ -363,9 +370,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- int roffset = *(int*)node->info_ent->data;
+ int roffset = (unsigned long)node->info_ent->data;
struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset);
-
uint32_t rptr, wptr, rptr_next;
unsigned i;
@@ -408,46 +414,37 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
return 0;
}
-/* TODO: clean this up !*/
-static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]);
-static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]);
-static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]);
-static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring);
-static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring);
-static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring);
-static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]);
-static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]);
-
-static struct drm_info_list amdgpu_debugfs_ring_info_list[] = {
- {"amdgpu_ring_gfx", amdgpu_debugfs_ring_info, 0, &amdgpu_gfx_index},
- {"amdgpu_ring_cp1", amdgpu_debugfs_ring_info, 0, &cayman_cp1_index},
- {"amdgpu_ring_cp2", amdgpu_debugfs_ring_info, 0, &cayman_cp2_index},
- {"amdgpu_ring_dma1", amdgpu_debugfs_ring_info, 0, &amdgpu_dma1_index},
- {"amdgpu_ring_dma2", amdgpu_debugfs_ring_info, 0, &amdgpu_dma2_index},
- {"amdgpu_ring_uvd", amdgpu_debugfs_ring_info, 0, &r600_uvd_index},
- {"amdgpu_ring_vce1", amdgpu_debugfs_ring_info, 0, &si_vce1_index},
- {"amdgpu_ring_vce2", amdgpu_debugfs_ring_info, 0, &si_vce2_index},
-};
+static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS];
+static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32];
#endif
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
+ unsigned offset = (uint8_t*)ring - (uint8_t*)adev;
unsigned i;
+ struct drm_info_list *info;
+ char *name;
+
for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
- struct drm_info_list *info = &amdgpu_debugfs_ring_info_list[i];
- int roffset = *(int*)amdgpu_debugfs_ring_info_list[i].data;
- struct amdgpu_ring *other = (void *)(((uint8_t*)adev) + roffset);
- unsigned r;
+ info = &amdgpu_debugfs_ring_info_list[i];
+ if (!info->data)
+ break;
+ }
- if (other != ring)
- continue;
+ if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
+ return -ENOSPC;
- r = amdgpu_debugfs_add_files(adev, info, 1);
- if (r)
- return r;
- }
+ name = &amdgpu_debugfs_ring_names[i][0];
+ sprintf(name, "amdgpu_ring_%s", ring->name);
+ info->name = name;
+ info->show = amdgpu_debugfs_ring_info;
+ info->driver_features = 0;
+ info->data = (void*)(uintptr_t)offset;
+
+ return amdgpu_debugfs_add_files(adev, info, 1);
#endif
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8bf84efaf..48618ee32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
return r;
}
r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ memset(sa_manager->cpu_ptr, 0, sa_manager->size);
amdgpu_bo_unreserve(sa_manager->bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index c48b4fce5..34a92808b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -109,6 +109,29 @@ static void amdgpu_sync_keep_later(struct fence **keep, struct fence *fence)
}
/**
+ * amdgpu_sync_add_later - add the fence to the hash
+ *
+ * @sync: sync object to add the fence to
+ * @f: fence to add
+ *
+ * Tries to add the fence to an existing hash entry. Returns true when an entry
+ * was found, false otherwise.
+ */
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct fence *f)
+{
+ struct amdgpu_sync_entry *e;
+
+ hash_for_each_possible(sync->fences, e, node, f->context) {
+ if (unlikely(e->fence->context != f->context))
+ continue;
+
+ amdgpu_sync_keep_later(&e->fence, f);
+ return true;
+ }
+ return false;
+}
+
+/**
* amdgpu_sync_fence - remember to sync to this fence
*
* @sync: sync object to add fence to
@@ -127,13 +150,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
- hash_for_each_possible(sync->fences, e, node, f->context) {
- if (unlikely(e->fence->context != f->context))
- continue;
-
- amdgpu_sync_keep_later(&e->fence, f);
+ if (amdgpu_sync_add_later(sync, f))
return 0;
- }
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
if (!e)
@@ -204,6 +222,81 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_sync_is_idle - test if all fences are signaled
+ *
+ * @sync: the sync object
+ *
+ * Returns true if all fences in the sync object are signaled.
+ */
+bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
+{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(sync->fences, i, tmp, e, node) {
+ struct fence *f = e->fence;
+
+ if (fence_is_signaled(f)) {
+ hash_del(&e->node);
+ fence_put(f);
+ kmem_cache_free(amdgpu_sync_slab, e);
+ continue;
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * amdgpu_sync_cycle_fences - move fences from one sync object into another
+ *
+ * @dst: the destination sync object
+ * @src: the source sync object
+ * @fence: fence to add to source
+ *
+ * Remove all fences from source and put them into destination and add
+ * fence as new one into source.
+ */
+int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
+ struct fence *fence)
+{
+ struct amdgpu_sync_entry *e, *newone;
+ struct hlist_node *tmp;
+ int i;
+
+ /* Allocate the new entry before moving the old ones */
+ newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+ if (!newone)
+ return -ENOMEM;
+
+ hash_for_each_safe(src->fences, i, tmp, e, node) {
+ struct fence *f = e->fence;
+
+ hash_del(&e->node);
+ if (fence_is_signaled(f)) {
+ fence_put(f);
+ kmem_cache_free(amdgpu_sync_slab, e);
+ continue;
+ }
+
+ if (amdgpu_sync_add_later(dst, f)) {
+ kmem_cache_free(amdgpu_sync_slab, e);
+ continue;
+ }
+
+ hash_add(dst->fences, &e->node, f->context);
+ }
+
+ hash_add(src->fences, &newone->node, fence->context);
+ newone->fence = fence_get(fence);
+
+ return 0;
+}
+
struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 11af4492b..3b9053af4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -911,6 +911,52 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
return flags;
}
+static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo)
+{
+ struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
+ struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
+
+ for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+ if (&tbo->lru == lru->lru[j])
+ lru->lru[j] = tbo->lru.prev;
+
+ if (&tbo->swap == lru->swap_lru)
+ lru->swap_lru = tbo->swap.prev;
+ }
+}
+
+static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo)
+{
+ struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev);
+ unsigned log2_size = min(ilog2(tbo->num_pages),
+ AMDGPU_TTM_LRU_SIZE - 1);
+
+ return &adev->mman.log2_size[log2_size];
+}
+
+static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
+{
+ struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
+ struct list_head *res = lru->lru[tbo->mem.mem_type];
+
+ lru->lru[tbo->mem.mem_type] = &tbo->lru;
+
+ return res;
+}
+
+static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
+{
+ struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo);
+ struct list_head *res = lru->swap_lru;
+
+ lru->swap_lru = &tbo->swap;
+
+ return res;
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
@@ -924,10 +970,14 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
+ .lru_removal = &amdgpu_ttm_lru_removal,
+ .lru_tail = &amdgpu_ttm_lru_tail,
+ .swap_lru_tail = &amdgpu_ttm_swap_lru_tail,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
+ unsigned i, j;
int r;
r = amdgpu_ttm_global_init(adev);
@@ -945,6 +995,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
}
+
+ for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) {
+ struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
+
+ for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
+ lru->lru[j] = &adev->mman.bdev.man[j].lru;
+ lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
+ }
+
adev->mman.initialized = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT);
@@ -1167,7 +1226,7 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
static int ttm_pl_vram = TTM_PL_VRAM;
static int ttm_pl_tt = TTM_PL_TT;
-static struct drm_info_list amdgpu_ttm_debugfs_list[] = {
+static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
@@ -1218,6 +1277,8 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
.llseek = default_llseek
};
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+
static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -1265,6 +1326,8 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
+#endif
+
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
@@ -1280,6 +1343,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
adev->mman.vram = ent;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
adev, &amdgpu_ttm_gtt_fops);
if (IS_ERR(ent))
@@ -1287,6 +1351,7 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
i_size_write(ent->d_inode, adev->mc.gtt_size);
adev->mman.gtt = ent;
+#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
@@ -1308,7 +1373,10 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
debugfs_remove(adev->mman.vram);
adev->mman.vram = NULL;
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
debugfs_remove(adev->mman.gtt);
adev->mman.gtt = NULL;
#endif
+
+#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index cdb963cc2..3959055eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -41,19 +41,23 @@
/* 1 second timeout */
#define UVD_IDLE_TIMEOUT_MS 1000
+/* Polaris10/11 firmware version */
+#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "/*(DEBLOBBED)*/"
-#define FIRMWARE_KABINI "/*(DEBLOBBED)*/"
-#define FIRMWARE_KAVERI "/*(DEBLOBBED)*/"
-#define FIRMWARE_HAWAII "/*(DEBLOBBED)*/"
+#define FIRMWARE_KABINI "/*(DEBLOBBED)*/"
+#define FIRMWARE_KAVERI "/*(DEBLOBBED)*/"
+#define FIRMWARE_HAWAII "/*(DEBLOBBED)*/"
#define FIRMWARE_MULLINS "/*(DEBLOBBED)*/"
#endif
#define FIRMWARE_TONGA "/*(DEBLOBBED)*/"
#define FIRMWARE_CARRIZO "/*(DEBLOBBED)*/"
#define FIRMWARE_FIJI "/*(DEBLOBBED)*/"
#define FIRMWARE_STONEY "/*(DEBLOBBED)*/"
+#define FIRMWARE_POLARIS10 "/*(DEBLOBBED)*/"
+#define FIRMWARE_POLARIS11 "/*(DEBLOBBED)*/"
/**
* amdgpu_uvd_cs_ctx - Command submission parser context
@@ -124,6 +128,12 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_STONEY:
fw_name = FIRMWARE_STONEY;
break;
+ case CHIP_POLARIS10:
+ fw_name = FIRMWARE_POLARIS10;
+ break;
+ case CHIP_POLARIS11:
+ fw_name = FIRMWARE_POLARIS11;
+ break;
default:
return -EINVAL;
}
@@ -144,6 +154,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return r;
}
+ /* Set the default UVD handles that the firmware can handle */
+ adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
+
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
@@ -151,11 +164,28 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
version_major, version_minor, family_id);
+ /*
+ * Limit the number of UVD handles depending on microcode major
+ * and minor versions. The firmware version which has 40 UVD
+ * instances support is 1.80. So all subsequent versions should
+ * also have the same support.
+ */
+ if ((version_major > 0x01) ||
+ ((version_major == 0x01) && (version_minor >= 0x50)))
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
(family_id << 8));
+ if ((adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS11) &&
+ (adev->uvd.fw_version < FW_1_66_16))
+ DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+ version_major, version_minor);
+
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
- + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
+ + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@@ -198,7 +228,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return r;
}
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
}
@@ -214,19 +244,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int r;
- if (adev->uvd.vcpu_bo == NULL)
- return 0;
+ kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
- r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
- if (!r) {
- amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
- amdgpu_bo_unpin(adev->uvd.vcpu_bo);
- amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
- }
+ if (adev->uvd.vcpu_bo) {
+ r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false);
+ if (!r) {
+ amdgpu_bo_kunmap(adev->uvd.vcpu_bo);
+ amdgpu_bo_unpin(adev->uvd.vcpu_bo);
+ amdgpu_bo_unreserve(adev->uvd.vcpu_bo);
+ }
- amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ amdgpu_bo_unref(&adev->uvd.vcpu_bo);
+ }
amdgpu_ring_fini(&adev->uvd.ring);
@@ -244,7 +275,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
@@ -301,7 +332,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
struct amdgpu_ring *ring = &adev->uvd.ring;
int i, r;
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct fence *fence;
@@ -383,7 +414,8 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
*
* Peek into the decode message and calculate the necessary buffer sizes.
*/
-static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
+static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
+ unsigned buf_sizes[])
{
unsigned stream_type = msg[4];
unsigned width = msg[6];
@@ -405,7 +437,6 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
switch (stream_type) {
case 0: /* H264 */
- case 7: /* H264 Perf */
switch(level) {
case 30:
num_dpb_buffer = 8100 / fs_in_mb;
@@ -483,6 +514,54 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
break;
+ case 7: /* H264 Perf */
+ switch(level) {
+ case 30:
+ num_dpb_buffer = 8100 / fs_in_mb;
+ break;
+ case 31:
+ num_dpb_buffer = 18000 / fs_in_mb;
+ break;
+ case 32:
+ num_dpb_buffer = 20480 / fs_in_mb;
+ break;
+ case 41:
+ num_dpb_buffer = 32768 / fs_in_mb;
+ break;
+ case 42:
+ num_dpb_buffer = 34816 / fs_in_mb;
+ break;
+ case 50:
+ num_dpb_buffer = 110400 / fs_in_mb;
+ break;
+ case 51:
+ num_dpb_buffer = 184320 / fs_in_mb;
+ break;
+ default:
+ num_dpb_buffer = 184320 / fs_in_mb;
+ break;
+ }
+ num_dpb_buffer++;
+ if (num_dpb_buffer > 17)
+ num_dpb_buffer = 17;
+
+ /* reference picture buffer */
+ min_dpb_size = image_size * num_dpb_buffer;
+
+ if (adev->asic_type < CHIP_POLARIS10){
+ /* macroblock context buffer */
+ min_dpb_size +=
+ width_in_mb * height_in_mb * num_dpb_buffer * 192;
+
+ /* IT surface buffer */
+ min_dpb_size += width_in_mb * height_in_mb * 32;
+ } else {
+ /* macroblock context buffer */
+ min_ctx_size =
+ width_in_mb * height_in_mb * num_dpb_buffer * 192;
+ }
+ break;
+
case 16: /* H265 */
image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
image_size = ALIGN(image_size, 256);
@@ -561,7 +640,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
amdgpu_bo_kunmap(bo);
/* try to alloc a new handle */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle);
return -EINVAL;
@@ -578,13 +657,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
case 1:
/* it's a decode msg, calc buffer sizes */
- r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
+ r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
amdgpu_bo_kunmap(bo);
if (r)
return r;
/* validate the handle */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
if (adev->uvd.filp[i] != ctx->parser->filp) {
DRM_ERROR("UVD handle collision detected!\n");
@@ -599,7 +678,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
case 2:
/* it's a destroy msg, free the handle */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ for (i = 0; i < adev->uvd.max_handles; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
@@ -879,7 +958,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err_free;
@@ -1011,13 +1090,17 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
+ for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
++handles;
if (fences == 0 && handles == 0) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, false);
+ /* just work around for uvd clock remain high even
+ * when uvd dpm disabled on Polaris10 */
+ if (adev->asic_type == CHIP_POLARIS10)
+ amdgpu_asic_set_uvd_clocks(adev, 0, 0);
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index e933cb785..0a08cf930 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -41,15 +41,17 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "/*(DEBLOBBED)*/"
-#define FIRMWARE_KABINI "/*(DEBLOBBED)*/"
-#define FIRMWARE_KAVERI "/*(DEBLOBBED)*/"
-#define FIRMWARE_HAWAII "/*(DEBLOBBED)*/"
+#define FIRMWARE_KABINI "/*(DEBLOBBED)*/"
+#define FIRMWARE_KAVERI "/*(DEBLOBBED)*/"
+#define FIRMWARE_HAWAII "/*(DEBLOBBED)*/"
#define FIRMWARE_MULLINS "/*(DEBLOBBED)*/"
#endif
#define FIRMWARE_TONGA "/*(DEBLOBBED)*/"
#define FIRMWARE_CARRIZO "/*(DEBLOBBED)*/"
#define FIRMWARE_FIJI "/*(DEBLOBBED)*/"
#define FIRMWARE_STONEY "/*(DEBLOBBED)*/"
+#define FIRMWARE_POLARIS10 "/*(DEBLOBBED)*/"
+#define FIRMWARE_POLARIS11 "/*(DEBLOBBED)*/"
#ifdef CONFIG_DRM_AMDGPU_CIK
/*(DEBLOBBED)*/
@@ -106,6 +108,12 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_STONEY:
fw_name = FIRMWARE_STONEY;
break;
+ case CHIP_POLARIS10:
+ fw_name = FIRMWARE_POLARIS10;
+ break;
+ case CHIP_POLARIS11:
+ fw_name = FIRMWARE_POLARIS11;
+ break;
default:
return -EINVAL;
@@ -419,7 +427,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err;
@@ -481,7 +489,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err;
@@ -745,7 +753,8 @@ out:
* @ib: the IB to execute
*
*/
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index ef99d2370..f40cf761c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -34,7 +34,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b6c011b83..9f36ed30b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -53,6 +53,18 @@
/* Special value that no flush is necessary */
#define AMDGPU_VM_NO_FLUSH (~0ll)
+/* Local structure. Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
+ */
+struct amdgpu_vm_update_params {
+ /* address where to copy page table entries from */
+ uint64_t src;
+ /* DMA addresses to use for mapping */
+ dma_addr_t *pages_addr;
+ /* indirect buffer to fill with commands */
+ struct amdgpu_ib *ib;
+};
+
/**
* amdgpu_vm_num_pde - return the number of page directory entries
*
@@ -166,74 +178,109 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
{
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_vm_id *id = &vm->ids[ring->idx];
struct fence *updates = sync->last_vm_update;
+ struct amdgpu_vm_id *id;
+ unsigned i = ring->idx;
int r;
mutex_lock(&adev->vm_manager.lock);
- /* check if the id is still valid */
- if (id->mgr_id) {
- struct fence *flushed = id->flushed_updates;
- bool is_later;
- long owner;
+ /* Check if we can use a VMID already assigned to this VM */
+ do {
+ struct fence *flushed;
- if (!flushed)
- is_later = true;
- else if (!updates)
- is_later = false;
- else
- is_later = fence_is_later(updates, flushed);
+ id = vm->ids[i++];
+ if (i == AMDGPU_MAX_RINGS)
+ i = 0;
+
+ /* Check all the prerequisites to using this VMID */
+ if (!id)
+ continue;
+
+ if (atomic64_read(&id->owner) != vm->client_id)
+ continue;
- owner = atomic_long_read(&id->mgr_id->owner);
- if (!is_later && owner == (long)id &&
- pd_addr == id->pd_gpu_addr) {
+ if (pd_addr != id->pd_gpu_addr)
+ continue;
+
+ if (id->last_user != ring &&
+ (!id->last_flush || !fence_is_signaled(id->last_flush)))
+ continue;
+
+ flushed = id->flushed_updates;
+ if (updates && (!flushed || fence_is_later(updates, flushed)))
+ continue;
+ /* Good we can use this VMID */
+ if (id->last_user == ring) {
r = amdgpu_sync_fence(ring->adev, sync,
- id->mgr_id->active);
- if (r) {
- mutex_unlock(&adev->vm_manager.lock);
- return r;
- }
+ id->first);
+ if (r)
+ goto error;
+ }
+
+ /* And remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence);
+ if (r)
+ goto error;
+
+ list_move_tail(&id->list, &adev->vm_manager.ids_lru);
+ vm->ids[ring->idx] = id;
+
+ *vm_id = id - adev->vm_manager.ids;
+ *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+
+ mutex_unlock(&adev->vm_manager.lock);
+ return 0;
- fence_put(id->mgr_id->active);
- id->mgr_id->active = fence_get(fence);
+ } while (i != ring->idx);
- list_move_tail(&id->mgr_id->list,
- &adev->vm_manager.ids_lru);
+ id = list_first_entry(&adev->vm_manager.ids_lru,
+ struct amdgpu_vm_id,
+ list);
- *vm_id = id->mgr_id - adev->vm_manager.ids;
- *vm_pd_addr = AMDGPU_VM_NO_FLUSH;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
- *vm_pd_addr);
+ if (!amdgpu_sync_is_idle(&id->active)) {
+ struct list_head *head = &adev->vm_manager.ids_lru;
+ struct amdgpu_vm_id *tmp;
- mutex_unlock(&adev->vm_manager.lock);
- return 0;
+ list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru,
+ list) {
+ if (amdgpu_sync_is_idle(&id->active)) {
+ list_move(&id->list, head);
+ head = &id->list;
+ }
}
+ id = list_first_entry(&adev->vm_manager.ids_lru,
+ struct amdgpu_vm_id,
+ list);
}
- id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
- struct amdgpu_vm_manager_id,
- list);
+ r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
+ if (r)
+ goto error;
- r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
- if (!r) {
- fence_put(id->mgr_id->active);
- id->mgr_id->active = fence_get(fence);
+ fence_put(id->first);
+ id->first = fence_get(fence);
- fence_put(id->flushed_updates);
- id->flushed_updates = fence_get(updates);
+ fence_put(id->last_flush);
+ id->last_flush = NULL;
- id->pd_gpu_addr = pd_addr;
+ fence_put(id->flushed_updates);
+ id->flushed_updates = fence_get(updates);
- list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
- atomic_long_set(&id->mgr_id->owner, (long)id);
+ id->pd_gpu_addr = pd_addr;
- *vm_id = id->mgr_id - adev->vm_manager.ids;
- *vm_pd_addr = pd_addr;
- trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
- }
+ list_move_tail(&id->list, &adev->vm_manager.ids_lru);
+ id->last_user = ring;
+ atomic64_set(&id->owner, vm->client_id);
+ vm->ids[ring->idx] = id;
+
+ *vm_id = id - adev->vm_manager.ids;
+ *vm_pd_addr = pd_addr;
+ trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
+error:
mutex_unlock(&adev->vm_manager.lock);
return r;
}
@@ -247,43 +294,62 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
*
* Emit a VM flush when it is necessary.
*/
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr,
- uint32_t gds_base, uint32_t gds_size,
- uint32_t gws_base, uint32_t gws_size,
- uint32_t oa_base, uint32_t oa_size)
+int amdgpu_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr,
+ uint32_t gds_base, uint32_t gds_size,
+ uint32_t gws_base, uint32_t gws_size,
+ uint32_t oa_base, uint32_t oa_size)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
+ struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
- mgr_id->gds_base != gds_base ||
- mgr_id->gds_size != gds_size ||
- mgr_id->gws_base != gws_base ||
- mgr_id->gws_size != gws_size ||
- mgr_id->oa_base != oa_base ||
- mgr_id->oa_size != oa_size);
+ id->gds_base != gds_base ||
+ id->gds_size != gds_size ||
+ id->gws_base != gws_base ||
+ id->gws_size != gws_size ||
+ id->oa_base != oa_base ||
+ id->oa_size != oa_size);
+ int r;
if (ring->funcs->emit_pipeline_sync && (
- pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
+ pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
+ ring->type == AMDGPU_RING_TYPE_COMPUTE))
amdgpu_ring_emit_pipeline_sync(ring);
- if (pd_addr != AMDGPU_VM_NO_FLUSH) {
+ if (ring->funcs->emit_vm_flush &&
+ pd_addr != AMDGPU_VM_NO_FLUSH) {
+ struct fence *fence;
+
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr);
+
+ mutex_lock(&adev->vm_manager.lock);
+ if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) {
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r) {
+ mutex_unlock(&adev->vm_manager.lock);
+ return r;
+ }
+ fence_put(id->last_flush);
+ id->last_flush = fence;
+ }
+ mutex_unlock(&adev->vm_manager.lock);
}
if (gds_switch_needed) {
- mgr_id->gds_base = gds_base;
- mgr_id->gds_size = gds_size;
- mgr_id->gws_base = gws_base;
- mgr_id->gws_size = gws_size;
- mgr_id->oa_base = oa_base;
- mgr_id->oa_size = oa_size;
+ id->gds_base = gds_base;
+ id->gds_size = gds_size;
+ id->gws_base = gws_base;
+ id->gws_size = gws_size;
+ id->oa_base = oa_base;
+ id->oa_size = oa_size;
amdgpu_ring_emit_gds_switch(ring, vm_id,
gds_base, gds_size,
gws_base, gws_size,
oa_base, oa_size);
}
+
+ return 0;
}
/**
@@ -296,14 +362,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring,
*/
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id)
{
- struct amdgpu_vm_manager_id *mgr_id = &adev->vm_manager.ids[vm_id];
-
- mgr_id->gds_base = 0;
- mgr_id->gds_size = 0;
- mgr_id->gws_base = 0;
- mgr_id->gws_size = 0;
- mgr_id->oa_base = 0;
- mgr_id->oa_size = 0;
+ struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
+
+ id->gds_base = 0;
+ id->gds_size = 0;
+ id->gws_base = 0;
+ id->gws_size = 0;
+ id->oa_base = 0;
+ id->oa_size = 0;
}
/**
@@ -335,9 +401,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_update_pages - helper to call the right asic function
*
* @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
- * @gtt_flags: GTT hw access flags
- * @ib: indirect buffer to fill with commands
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -348,30 +412,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* to setup the page table using the DMA.
*/
static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
- struct amdgpu_gart *gtt,
- uint32_t gtt_flags,
- struct amdgpu_ib *ib,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint32_t flags)
{
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
- if ((gtt == &adev->gart) && (flags == gtt_flags)) {
- uint64_t src = gtt->table_addr + (addr >> 12) * 8;
- amdgpu_vm_copy_pte(adev, ib, pe, src, count);
+ if (vm_update_params->src) {
+ amdgpu_vm_copy_pte(adev, vm_update_params->ib,
+ pe, (vm_update_params->src + (addr >> 12) * 8), count);
- } else if (gtt) {
- dma_addr_t *pages_addr = gtt->pages_addr;
- amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
- count, incr, flags);
+ } else if (vm_update_params->pages_addr) {
+ amdgpu_vm_write_pte(adev, vm_update_params->ib,
+ vm_update_params->pages_addr,
+ pe, addr, count, incr, flags);
} else if (count < 3) {
- amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
+ amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
count, incr, flags);
} else {
- amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
+ amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
count, incr, flags);
}
}
@@ -391,10 +454,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
struct fence *fence = NULL;
struct amdgpu_job *job;
+ struct amdgpu_vm_update_params vm_update_params;
unsigned entries;
uint64_t addr;
int r;
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -412,7 +477,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
- amdgpu_vm_update_pages(adev, NULL, 0, &job->ibs[0], addr, 0, entries,
+ vm_update_params.ib = &job->ibs[0];
+ amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@ -485,11 +551,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
- struct amdgpu_ib *ib;
+ struct amdgpu_vm_update_params vm_update_params;
struct fence *fence = NULL;
int r;
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
/* padding, etc. */
@@ -502,7 +569,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (r)
return r;
- ib = &job->ibs[0];
+ vm_update_params.ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -522,7 +589,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
((last_pt + incr * count) != pt)) {
if (count) {
- amdgpu_vm_update_pages(adev, NULL, 0, ib,
+ amdgpu_vm_update_pages(adev, &vm_update_params,
last_pde, last_pt,
count, incr,
AMDGPU_PTE_VALID);
@@ -537,14 +604,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, NULL, 0, ib, last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
+ amdgpu_vm_update_pages(adev, &vm_update_params,
+ last_pde, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
- if (ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, ib);
+ if (vm_update_params.ib->length_dw != 0) {
+ amdgpu_ring_pad_ib(ring, vm_update_params.ib);
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(ib->length_dw > ndw);
+ WARN_ON(vm_update_params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
@@ -570,18 +638,15 @@ error_free:
* amdgpu_vm_frag_ptes - add fragment information to PTEs
*
* @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
- * @gtt_flags: GTT hw mapping flags
- * @ib: IB for the update
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @pe_start: first PTE to handle
* @pe_end: last PTE to handle
* @addr: addr those PTEs should point to
* @flags: hw mapping flags
*/
static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
- struct amdgpu_gart *gtt,
- uint32_t gtt_flags,
- struct amdgpu_ib *ib,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
uint64_t pe_start, uint64_t pe_end,
uint64_t addr, uint32_t flags)
{
@@ -618,10 +683,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
return;
/* system pages are non continuously */
- if (gtt || !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
+ if (vm_update_params->src || vm_update_params->pages_addr ||
+ !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
- amdgpu_vm_update_pages(adev, gtt, gtt_flags, ib, pe_start,
+ amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
addr, count, AMDGPU_GPU_PAGE_SIZE,
flags);
return;
@@ -630,21 +696,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
- amdgpu_vm_update_pages(adev, NULL, 0, ib, pe_start, addr,
+ amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
addr += AMDGPU_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
- amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_start, addr, count,
+ amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += AMDGPU_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
- amdgpu_vm_update_pages(adev, NULL, 0, ib, frag_end, addr,
+ amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
}
}
@@ -653,8 +719,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
- * @gtt_flags: GTT hw mapping flags
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
@@ -664,10 +729,9 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* Update the page tables in the range @start - @end.
*/
static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
- struct amdgpu_gart *gtt,
- uint32_t gtt_flags,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
struct amdgpu_vm *vm,
- struct amdgpu_ib *ib,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
@@ -693,7 +757,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
if (last_pe_end != pe_start) {
- amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
+ amdgpu_vm_frag_ptes(adev, vm_update_params,
last_pe_start, last_pe_end,
last_dst, flags);
@@ -708,17 +772,16 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- amdgpu_vm_frag_ptes(adev, gtt, gtt_flags, ib,
- last_pe_start, last_pe_end,
- last_dst, flags);
+ amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
+ last_pe_end, last_dst, flags);
}
/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
- * @gtt_flags: flags as they are used for GTT
+ * @src: address where to copy page table entries from
+ * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @start: start of mapped range
* @last: last mapped entry
@@ -730,8 +793,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct amdgpu_gart *gtt,
- uint32_t gtt_flags,
+ uint64_t src,
+ dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
uint32_t flags, uint64_t addr,
@@ -741,11 +804,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
- struct amdgpu_ib *ib;
+ struct amdgpu_vm_update_params vm_update_params;
struct fence *f = NULL;
int r;
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
+ vm_update_params.src = src;
+ vm_update_params.pages_addr = pages_addr;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -762,11 +828,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
- if ((gtt == &adev->gart) && (flags == gtt_flags)) {
+ if (vm_update_params.src) {
/* only copy commands needed */
ndw += ncmds * 7;
- } else if (gtt) {
+ } else if (vm_update_params.pages_addr) {
/* header for write data commands */
ndw += ncmds * 4;
@@ -785,7 +851,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
return r;
- ib = &job->ibs[0];
+ vm_update_params.ib = &job->ibs[0];
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
@@ -796,11 +862,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_vm_update_ptes(adev, gtt, gtt_flags, vm, ib, start, last + 1,
- addr, flags);
+ amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
+ last + 1, addr, flags);
- amdgpu_ring_pad_ib(ring, ib);
- WARN_ON(ib->length_dw > ndw);
+ amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+ WARN_ON(vm_update_params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &f);
if (r)
@@ -823,11 +889,12 @@ error_free:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
- * @gtt: GART instance to use for mapping
+ * @gtt_flags: flags as they are used for GTT
+ * @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
* @addr: addr to set the area to
- * @gtt_flags: flags as they are used for GTT
+ * @flags: HW flags for the mapping
* @fence: optional resulting fence
*
* Split the mapping into smaller chunks so that each update fits
@@ -835,16 +902,16 @@ error_free:
* Returns 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct amdgpu_gart *gtt,
uint32_t gtt_flags,
+ dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
- uint64_t addr, struct fence **fence)
+ uint32_t flags, uint64_t addr,
+ struct fence **fence)
{
const uint64_t max_size = 64ULL * 1024ULL * 1024ULL / AMDGPU_GPU_PAGE_SIZE;
- uint64_t start = mapping->it.start;
- uint32_t flags = gtt_flags;
+ uint64_t src = 0, start = mapping->it.start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -857,10 +924,15 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_update(mapping);
+ if (pages_addr) {
+ if (flags == gtt_flags)
+ src = adev->gart.table_addr + (addr >> 12) * 8;
+ addr = 0;
+ }
addr += mapping->offset;
- if (!gtt || ((gtt == &adev->gart) && (flags == gtt_flags)))
- return amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
+ if (!pages_addr || src)
+ return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
start, mapping->it.last,
flags, addr, fence);
@@ -868,7 +940,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t last;
last = min((uint64_t)mapping->it.last, start + max_size - 1);
- r = amdgpu_vm_bo_update_mapping(adev, gtt, gtt_flags, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm,
start, last, flags, addr,
fence);
if (r)
@@ -899,16 +971,20 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
{
struct amdgpu_vm *vm = bo_va->vm;
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_gart *gtt = NULL;
- uint32_t flags;
+ dma_addr_t *pages_addr = NULL;
+ uint32_t gtt_flags, flags;
uint64_t addr;
int r;
if (mem) {
+ struct ttm_dma_tt *ttm;
+
addr = (u64)mem->start << PAGE_SHIFT;
switch (mem->mem_type) {
case TTM_PL_TT:
- gtt = &bo_va->bo->adev->gart;
+ ttm = container_of(bo_va->bo->tbo.ttm, struct
+ ttm_dma_tt, ttm);
+ pages_addr = ttm->dma_address;
break;
case TTM_PL_VRAM:
@@ -923,6 +999,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
}
flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
+ gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->vm_status))
@@ -930,7 +1007,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, gtt, flags, vm, mapping, addr,
+ r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm,
+ mapping, flags, addr,
&bo_va->last_pt_update);
if (r)
return r;
@@ -976,8 +1054,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, vm, mapping,
- 0, NULL);
+ r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping,
+ 0, 0, NULL);
kfree(mapping);
if (r)
return r;
@@ -1320,11 +1398,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amd_sched_rq *rq;
int i, r;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- vm->ids[i].mgr_id = NULL;
- vm->ids[i].flushed_updates = NULL;
- }
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ vm->ids[i] = NULL;
vm->va = RB_ROOT;
+ vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared);
@@ -1416,15 +1493,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_vm_id *id = &vm->ids[i];
-
- if (id->mgr_id)
- atomic_long_cmpxchg(&id->mgr_id->owner,
- (long)id, 0);
- fence_put(id->flushed_updates);
- }
}
/**
@@ -1443,11 +1511,13 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < adev->vm_manager.num_ids; ++i) {
amdgpu_vm_reset_id(adev, i);
+ amdgpu_sync_create(&adev->vm_manager.ids[i].active);
list_add_tail(&adev->vm_manager.ids[i].list,
&adev->vm_manager.ids_lru);
}
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
+ atomic64_set(&adev->vm_manager.client_counter, 0);
}
/**
@@ -1461,6 +1531,11 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i;
- for (i = 0; i < AMDGPU_NUM_VM; ++i)
- fence_put(adev->vm_manager.ids[i].active);
+ for (i = 0; i < AMDGPU_NUM_VM; ++i) {
+ struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
+
+ fence_put(adev->vm_manager.ids[i].first);
+ amdgpu_sync_free(&adev->vm_manager.ids[i].active);
+ fence_put(id->flushed_updates);
+ }
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index fece8f45d..49daf6d72 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -92,7 +92,7 @@
#define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47
-#define ATOM_WS_REGPTR 0x48
+#define ATOM_WS_REGPTR 0x48
#define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 49aa35016..49a39b1a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -461,13 +461,14 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS_V3 v3;
PIXEL_CLOCK_PARAMETERS_V5 v5;
PIXEL_CLOCK_PARAMETERS_V6 v6;
+ PIXEL_CLOCK_PARAMETERS_V7 v7;
};
/* on DCE5, make sure the voltage is high enough to support the
* required disp clk.
*/
void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
- u32 dispclk)
+ u32 dispclk)
{
u8 frev, crev;
int index;
@@ -510,6 +511,49 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
}
+union set_dce_clock {
+ SET_DCE_CLOCK_PS_ALLOCATION_V1_1 v1_1;
+ SET_DCE_CLOCK_PS_ALLOCATION_V2_1 v2_1;
+};
+
+u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
+ u32 freq, u8 clk_type, u8 clk_src)
+{
+ u8 frev, crev;
+ int index;
+ union set_dce_clock args;
+ u32 ret_freq = 0;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, SetDCEClock);
+ if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev,
+ &crev))
+ return 0;
+
+ switch (frev) {
+ case 2:
+ switch (crev) {
+ case 1:
+ args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */
+ args.v2_1.asParam.ucDCEClkType = clk_type;
+ args.v2_1.asParam.ucDCEClkSrc = clk_src;
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return 0;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return 0;
+ }
+
+ return ret_freq;
+}
+
static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
{
if (ENCODER_MODE_IS_DP(encoder_mode)) {
@@ -523,18 +567,18 @@ static bool is_pixel_clock_source_from_pll(u32 encoder_mode, int pll_id)
}
void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
- u32 crtc_id,
- int pll_id,
- u32 encoder_mode,
- u32 encoder_id,
- u32 clock,
- u32 ref_div,
- u32 fb_div,
- u32 frac_fb_div,
- u32 post_div,
- int bpc,
- bool ss_enabled,
- struct amdgpu_atom_ss *ss)
+ u32 crtc_id,
+ int pll_id,
+ u32 encoder_mode,
+ u32 encoder_id,
+ u32 clock,
+ u32 ref_div,
+ u32 fb_div,
+ u32 frac_fb_div,
+ u32 post_div,
+ int bpc,
+ bool ss_enabled,
+ struct amdgpu_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -652,6 +696,34 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v6.ucEncoderMode = encoder_mode;
args.v6.ucPpll = pll_id;
break;
+ case 7:
+ args.v7.ulPixelClock = cpu_to_le32(clock * 10); /* 100 hz units */
+ args.v7.ucMiscInfo = 0;
+ if ((encoder_mode == ATOM_ENCODER_MODE_DVI) &&
+ (clock > 165000))
+ args.v7.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+ args.v7.ucCRTC = crtc_id;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
+ switch (bpc) {
+ case 8:
+ default:
+ args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+ break;
+ case 10:
+ args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+ break;
+ case 12:
+ args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+ break;
+ case 16:
+ args.v7.ucDeepColorRatio = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+ break;
+ }
+ }
+ args.v7.ucTransmitterID = encoder_id;
+ args.v7.ucEncoderMode = encoder_mode;
+ args.v7.ucPpll = pll_id;
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
index c67083335..0eeda8e3b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.h
@@ -37,6 +37,8 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
struct drm_display_mode *mode);
void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
u32 dispclk);
+u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
+ u32 freq, u8 clk_type, u8 clk_src);
void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
u32 crtc_id,
int pll_id,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 1cd6de575..48b6bd671 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -567,6 +567,7 @@ union dig_encoder_control {
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+ DIG_ENCODER_CONTROL_PARAMETERS_V5 v5;
};
void
@@ -694,6 +695,47 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
else
args.v4.ucHPD_ID = hpd_id + 1;
break;
+ case 5:
+ switch (action) {
+ case ATOM_ENCODER_CMD_SETUP_PANEL_MODE:
+ args.v5.asDPPanelModeParam.ucAction = action;
+ args.v5.asDPPanelModeParam.ucPanelMode = panel_mode;
+ args.v5.asDPPanelModeParam.ucDigId = dig->dig_encoder;
+ break;
+ case ATOM_ENCODER_CMD_STREAM_SETUP:
+ args.v5.asStreamParam.ucAction = action;
+ args.v5.asStreamParam.ucDigId = dig->dig_encoder;
+ args.v5.asStreamParam.ucDigMode =
+ amdgpu_atombios_encoder_get_encoder_mode(encoder);
+ if (ENCODER_MODE_IS_DP(args.v5.asStreamParam.ucDigMode))
+ args.v5.asStreamParam.ucLaneNum = dp_lane_count;
+ else if (amdgpu_dig_monitor_is_duallink(encoder,
+ amdgpu_encoder->pixel_clock))
+ args.v5.asStreamParam.ucLaneNum = 8;
+ else
+ args.v5.asStreamParam.ucLaneNum = 4;
+ args.v5.asStreamParam.ulPixelClock =
+ cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
+ args.v5.asStreamParam.ucBitPerColor =
+ amdgpu_atombios_encoder_get_bpc(encoder);
+ args.v5.asStreamParam.ucLinkRateIn270Mhz = dp_clock / 27000;
+ break;
+ case ATOM_ENCODER_CMD_DP_LINK_TRAINING_START:
+ case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1:
+ case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2:
+ case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3:
+ case ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4:
+ case ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE:
+ case ATOM_ENCODER_CMD_DP_VIDEO_OFF:
+ case ATOM_ENCODER_CMD_DP_VIDEO_ON:
+ args.v5.asCmdParam.ucAction = action;
+ args.v5.asCmdParam.ucDigId = dig->dig_encoder;
+ break;
+ default:
+ DRM_ERROR("Unsupported action 0x%x\n", action);
+ break;
+ }
+ break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
@@ -714,11 +756,12 @@ union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 v6;
};
void
amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int action,
- uint8_t lane_num, uint8_t lane_set)
+ uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_device *adev = dev->dev_private;
@@ -1070,6 +1113,54 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
args.v5.ucDigEncoderSel = 1 << dig_encoder;
args.v5.ucDPLaneSet = lane_set;
break;
+ case 6:
+ args.v6.ucAction = action;
+ if (is_dp)
+ args.v6.ulSymClock = cpu_to_le32(dp_clock / 10);
+ else
+ args.v6.ulSymClock = cpu_to_le32(amdgpu_encoder->pixel_clock / 10);
+
+ switch (amdgpu_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYB;
+ else
+ args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYD;
+ else
+ args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYF;
+ else
+ args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ args.v6.ucPhyId = ATOM_PHY_ID_UNIPHYG;
+ break;
+ }
+ if (is_dp)
+ args.v6.ucLaneNum = dp_lane_count;
+ else if (amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock))
+ args.v6.ucLaneNum = 8;
+ else
+ args.v6.ucLaneNum = 4;
+ args.v6.ucConnObjId = connector_object_id;
+ if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH)
+ args.v6.ucDPLaneSet = lane_set;
+ else
+ args.v6.ucDigMode = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+
+ if (hpd_id == AMDGPU_HPD_NONE)
+ args.v6.ucHPDSel = 0;
+ else
+ args.v6.ucHPDSel = hpd_id + 1;
+ args.v6.ucDigEncoderSel = 1 << dig_encoder;
+ break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index 13cdb01e9..bc56c8a18 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -156,3 +156,18 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
+{
+ PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+
+ args.ucRegIndex = offset;
+ args.lpI2CDataOut = data;
+ args.ucFlag = 1;
+ args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+ args.ucTransBytes = 1;
+ args.ucSlaveAddr = slave_addr;
+ args.ucLineNumber = line_number;
+
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
index d6128d9de..251aaf41f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
@@ -27,5 +27,7 @@
int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num);
u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev,
+ u8 slave_addr, u8 line_number, u8 offset, u8 data);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 285ea5ea2..040dd26ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -2548,19 +2548,17 @@ static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
return 0;
}
-static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
- u32 sclk, u32 min_sclk_in_sr)
+static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
{
u32 i;
u32 tmp;
- u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
- min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
+ u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
if (sclk < min)
return 0;
for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
- tmp = sclk / (1 << i);
+ tmp = sclk >> i;
if (tmp >= min || i == 0)
break;
}
@@ -3357,8 +3355,7 @@ static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
graphic_level->PowerThrottle = 0;
if (pi->caps_sclk_ds)
- graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev,
- engine_clock,
+ graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
CISLAND_MINIMUM_ENGINE_CLOCK);
graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@@ -6223,6 +6220,9 @@ static int ci_dpm_sw_fini(void *handle)
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
@@ -6308,215 +6308,6 @@ static int ci_dpm_wait_for_idle(void *handle)
return 0;
}
-static void ci_dpm_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "CIK DPM registers\n");
- dev_info(adev->dev, " BIOS_SCRATCH_4=0x%08X\n",
- RREG32(mmBIOS_SCRATCH_4));
- dev_info(adev->dev, " MC_ARB_DRAM_TIMING=0x%08X\n",
- RREG32(mmMC_ARB_DRAM_TIMING));
- dev_info(adev->dev, " MC_ARB_DRAM_TIMING2=0x%08X\n",
- RREG32(mmMC_ARB_DRAM_TIMING2));
- dev_info(adev->dev, " MC_ARB_BURST_TIME=0x%08X\n",
- RREG32(mmMC_ARB_BURST_TIME));
- dev_info(adev->dev, " MC_ARB_DRAM_TIMING_1=0x%08X\n",
- RREG32(mmMC_ARB_DRAM_TIMING_1));
- dev_info(adev->dev, " MC_ARB_DRAM_TIMING2_1=0x%08X\n",
- RREG32(mmMC_ARB_DRAM_TIMING2_1));
- dev_info(adev->dev, " MC_CG_CONFIG=0x%08X\n",
- RREG32(mmMC_CG_CONFIG));
- dev_info(adev->dev, " MC_ARB_CG=0x%08X\n",
- RREG32(mmMC_ARB_CG));
- dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n",
- RREG32_DIDT(ixDIDT_SQ_CTRL0));
- dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n",
- RREG32_DIDT(ixDIDT_DB_CTRL0));
- dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n",
- RREG32_DIDT(ixDIDT_TD_CTRL0));
- dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n",
- RREG32_DIDT(ixDIDT_TCP_CTRL0));
- dev_info(adev->dev, " CG_THERMAL_INT=0x%08X\n",
- RREG32_SMC(ixCG_THERMAL_INT));
- dev_info(adev->dev, " CG_THERMAL_CTRL=0x%08X\n",
- RREG32_SMC(ixCG_THERMAL_CTRL));
- dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n",
- RREG32_SMC(ixGENERAL_PWRMGT));
- dev_info(adev->dev, " MC_SEQ_CNTL_3=0x%08X\n",
- RREG32(mmMC_SEQ_CNTL_3));
- dev_info(adev->dev, " LCAC_MC0_CNTL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC0_CNTL));
- dev_info(adev->dev, " LCAC_MC1_CNTL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC1_CNTL));
- dev_info(adev->dev, " LCAC_CPL_CNTL=0x%08X\n",
- RREG32_SMC(ixLCAC_CPL_CNTL));
- dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n",
- RREG32_SMC(ixSCLK_PWRMGT_CNTL));
- dev_info(adev->dev, " BIF_LNCNT_RESET=0x%08X\n",
- RREG32(mmBIF_LNCNT_RESET));
- dev_info(adev->dev, " FIRMWARE_FLAGS=0x%08X\n",
- RREG32_SMC(ixFIRMWARE_FLAGS));
- dev_info(adev->dev, " CG_SPLL_FUNC_CNTL=0x%08X\n",
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL));
- dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_2=0x%08X\n",
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2));
- dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_3=0x%08X\n",
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3));
- dev_info(adev->dev, " CG_SPLL_FUNC_CNTL_4=0x%08X\n",
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4));
- dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
- RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM));
- dev_info(adev->dev, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
- RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2));
- dev_info(adev->dev, " DLL_CNTL=0x%08X\n",
- RREG32(mmDLL_CNTL));
- dev_info(adev->dev, " MCLK_PWRMGT_CNTL=0x%08X\n",
- RREG32(mmMCLK_PWRMGT_CNTL));
- dev_info(adev->dev, " MPLL_AD_FUNC_CNTL=0x%08X\n",
- RREG32(mmMPLL_AD_FUNC_CNTL));
- dev_info(adev->dev, " MPLL_DQ_FUNC_CNTL=0x%08X\n",
- RREG32(mmMPLL_DQ_FUNC_CNTL));
- dev_info(adev->dev, " MPLL_FUNC_CNTL=0x%08X\n",
- RREG32(mmMPLL_FUNC_CNTL));
- dev_info(adev->dev, " MPLL_FUNC_CNTL_1=0x%08X\n",
- RREG32(mmMPLL_FUNC_CNTL_1));
- dev_info(adev->dev, " MPLL_FUNC_CNTL_2=0x%08X\n",
- RREG32(mmMPLL_FUNC_CNTL_2));
- dev_info(adev->dev, " MPLL_SS1=0x%08X\n",
- RREG32(mmMPLL_SS1));
- dev_info(adev->dev, " MPLL_SS2=0x%08X\n",
- RREG32(mmMPLL_SS2));
- dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL=0x%08X\n",
- RREG32_SMC(ixCG_DISPLAY_GAP_CNTL));
- dev_info(adev->dev, " CG_DISPLAY_GAP_CNTL2=0x%08X\n",
- RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2));
- dev_info(adev->dev, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
- RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_1=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_2=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_3=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_4=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_5=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_6=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_7=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7));
- dev_info(adev->dev, " RCU_UC_EVENTS=0x%08X\n",
- RREG32_SMC(ixRCU_UC_EVENTS));
- dev_info(adev->dev, " DPM_TABLE_475=0x%08X\n",
- RREG32_SMC(ixDPM_TABLE_475));
- dev_info(adev->dev, " MC_SEQ_RAS_TIMING_LP=0x%08X\n",
- RREG32(mmMC_SEQ_RAS_TIMING_LP));
- dev_info(adev->dev, " MC_SEQ_RAS_TIMING=0x%08X\n",
- RREG32(mmMC_SEQ_RAS_TIMING));
- dev_info(adev->dev, " MC_SEQ_CAS_TIMING_LP=0x%08X\n",
- RREG32(mmMC_SEQ_CAS_TIMING_LP));
- dev_info(adev->dev, " MC_SEQ_CAS_TIMING=0x%08X\n",
- RREG32(mmMC_SEQ_CAS_TIMING));
- dev_info(adev->dev, " MC_SEQ_DLL_STBY_LP=0x%08X\n",
- RREG32(mmMC_SEQ_DLL_STBY_LP));
- dev_info(adev->dev, " MC_SEQ_DLL_STBY=0x%08X\n",
- RREG32(mmMC_SEQ_DLL_STBY));
- dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
- RREG32(mmMC_SEQ_G5PDX_CMD0_LP));
- dev_info(adev->dev, " MC_SEQ_G5PDX_CMD0=0x%08X\n",
- RREG32(mmMC_SEQ_G5PDX_CMD0));
- dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
- RREG32(mmMC_SEQ_G5PDX_CMD1_LP));
- dev_info(adev->dev, " MC_SEQ_G5PDX_CMD1=0x%08X\n",
- RREG32(mmMC_SEQ_G5PDX_CMD1));
- dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
- RREG32(mmMC_SEQ_G5PDX_CTRL_LP));
- dev_info(adev->dev, " MC_SEQ_G5PDX_CTRL=0x%08X\n",
- RREG32(mmMC_SEQ_G5PDX_CTRL));
- dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_DVS_CMD_LP));
- dev_info(adev->dev, " MC_SEQ_PMG_DVS_CMD=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_DVS_CMD));
- dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_DVS_CTL_LP));
- dev_info(adev->dev, " MC_SEQ_PMG_DVS_CTL=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_DVS_CTL));
- dev_info(adev->dev, " MC_SEQ_MISC_TIMING_LP=0x%08X\n",
- RREG32(mmMC_SEQ_MISC_TIMING_LP));
- dev_info(adev->dev, " MC_SEQ_MISC_TIMING=0x%08X\n",
- RREG32(mmMC_SEQ_MISC_TIMING));
- dev_info(adev->dev, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
- RREG32(mmMC_SEQ_MISC_TIMING2_LP));
- dev_info(adev->dev, " MC_SEQ_MISC_TIMING2=0x%08X\n",
- RREG32(mmMC_SEQ_MISC_TIMING2));
- dev_info(adev->dev, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP));
- dev_info(adev->dev, " MC_PMG_CMD_EMRS=0x%08X\n",
- RREG32(mmMC_PMG_CMD_EMRS));
- dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_CMD_MRS_LP));
- dev_info(adev->dev, " MC_PMG_CMD_MRS=0x%08X\n",
- RREG32(mmMC_PMG_CMD_MRS));
- dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP));
- dev_info(adev->dev, " MC_PMG_CMD_MRS1=0x%08X\n",
- RREG32(mmMC_PMG_CMD_MRS1));
- dev_info(adev->dev, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
- RREG32(mmMC_SEQ_WR_CTL_D0_LP));
- dev_info(adev->dev, " MC_SEQ_WR_CTL_D0=0x%08X\n",
- RREG32(mmMC_SEQ_WR_CTL_D0));
- dev_info(adev->dev, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
- RREG32(mmMC_SEQ_WR_CTL_D1_LP));
- dev_info(adev->dev, " MC_SEQ_WR_CTL_D1=0x%08X\n",
- RREG32(mmMC_SEQ_WR_CTL_D1));
- dev_info(adev->dev, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
- RREG32(mmMC_SEQ_RD_CTL_D0_LP));
- dev_info(adev->dev, " MC_SEQ_RD_CTL_D0=0x%08X\n",
- RREG32(mmMC_SEQ_RD_CTL_D0));
- dev_info(adev->dev, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
- RREG32(mmMC_SEQ_RD_CTL_D1_LP));
- dev_info(adev->dev, " MC_SEQ_RD_CTL_D1=0x%08X\n",
- RREG32(mmMC_SEQ_RD_CTL_D1));
- dev_info(adev->dev, " MC_SEQ_PMG_TIMING_LP=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_TIMING_LP));
- dev_info(adev->dev, " MC_SEQ_PMG_TIMING=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_TIMING));
- dev_info(adev->dev, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
- RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP));
- dev_info(adev->dev, " MC_PMG_CMD_MRS2=0x%08X\n",
- RREG32(mmMC_PMG_CMD_MRS2));
- dev_info(adev->dev, " MC_SEQ_WR_CTL_2_LP=0x%08X\n",
- RREG32(mmMC_SEQ_WR_CTL_2_LP));
- dev_info(adev->dev, " MC_SEQ_WR_CTL_2=0x%08X\n",
- RREG32(mmMC_SEQ_WR_CTL_2));
- dev_info(adev->dev, " PCIE_LC_SPEED_CNTL=0x%08X\n",
- RREG32_PCIE(ixPCIE_LC_SPEED_CNTL));
- dev_info(adev->dev, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
- RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL));
- dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n",
- RREG32(mmSMC_IND_INDEX_0));
- dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n",
- RREG32(mmSMC_IND_DATA_0));
- dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n",
- RREG32(mmSMC_IND_ACCESS_CNTL));
- dev_info(adev->dev, " SMC_RESP_0=0x%08X\n",
- RREG32(mmSMC_RESP_0));
- dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n",
- RREG32(mmSMC_MESSAGE_0));
- dev_info(adev->dev, " SMC_SYSCON_RESET_CNTL=0x%08X\n",
- RREG32_SMC(ixSMC_SYSCON_RESET_CNTL));
- dev_info(adev->dev, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
- RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0));
- dev_info(adev->dev, " SMC_SYSCON_MISC_CNTL=0x%08X\n",
- RREG32_SMC(ixSMC_SYSCON_MISC_CNTL));
- dev_info(adev->dev, " SMC_PC_C=0x%08X\n",
- RREG32_SMC(ixSMC_PC_C));
-}
-
static int ci_dpm_soft_reset(void *handle)
{
return 0;
@@ -6571,7 +6362,7 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
}
static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
+ struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
bool queue_thermal = false;
@@ -6613,6 +6404,7 @@ static int ci_dpm_set_powergating_state(void *handle,
}
const struct amd_ip_funcs ci_dpm_ip_funcs = {
+ .name = "ci_dpm",
.early_init = ci_dpm_early_init,
.late_init = ci_dpm_late_init,
.sw_init = ci_dpm_sw_init,
@@ -6624,7 +6416,6 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.is_idle = ci_dpm_is_idle,
.wait_for_idle = ci_dpm_wait_for_idle,
.soft_reset = ci_dpm_soft_reset,
- .print_status = ci_dpm_print_status,
.set_clockgating_state = ci_dpm_set_clockgating_state,
.set_powergating_state = ci_dpm_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index bddc9ba11..910431808 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -962,7 +962,13 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
+static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
+{
+ /* CIK does not support SR-IOV */
+ return 0;
+}
+
+static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
{mmMC_ARB_RAMCFG, false},
@@ -2007,7 +2013,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
- .get_cu_info = &gfx_v7_0_get_cu_info,
+ .get_virtual_caps = &cik_get_virtual_caps,
/* these should be moved to their own ip modules */
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
@@ -2214,11 +2220,6 @@ static int cik_common_wait_for_idle(void *handle)
return 0;
}
-static void cik_common_print_status(void *handle)
-{
-
-}
-
static int cik_common_soft_reset(void *handle)
{
/* XXX hard reset?? */
@@ -2238,6 +2239,7 @@ static int cik_common_set_powergating_state(void *handle,
}
const struct amd_ip_funcs cik_common_ip_funcs = {
+ .name = "cik_common",
.early_init = cik_common_early_init,
.late_init = NULL,
.sw_init = cik_common_sw_init,
@@ -2249,7 +2251,6 @@ const struct amd_ip_funcs cik_common_ip_funcs = {
.is_idle = cik_common_is_idle,
.wait_for_idle = cik_common_wait_for_idle,
.soft_reset = cik_common_soft_reset,
- .print_status = cik_common_print_status,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 30c9b3bee..be3d6f79a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -103,7 +103,6 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cik_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -156,7 +155,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
/* enable irqs */
cik_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
@@ -243,7 +242,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
/* wptr/rptr are in bytes! */
u32 ring_index = adev->irq.ih.rptr >> 2;
uint32_t dw[4];
-
+
dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
@@ -372,35 +371,6 @@ static int cik_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void cik_ih_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "CIK IH registers\n");
- dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(mmSRBM_STATUS));
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
- RREG32(mmINTERRUPT_CNTL));
- dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
- RREG32(mmINTERRUPT_CNTL2));
- dev_info(adev->dev, " IH_CNTL=0x%08X\n",
- RREG32(mmIH_CNTL));
- dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
- RREG32(mmIH_RB_CNTL));
- dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
- RREG32(mmIH_RB_BASE));
- dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
- RREG32(mmIH_RB_WPTR_ADDR_LO));
- dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
- RREG32(mmIH_RB_WPTR_ADDR_HI));
- dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
- RREG32(mmIH_RB_RPTR));
- dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
- RREG32(mmIH_RB_WPTR));
-}
-
static int cik_ih_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -412,8 +382,6 @@ static int cik_ih_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_IH_MASK;
if (srbm_soft_reset) {
- cik_ih_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -428,8 +396,6 @@ static int cik_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- cik_ih_print_status((void *)adev);
}
return 0;
@@ -448,6 +414,7 @@ static int cik_ih_set_powergating_state(void *handle,
}
const struct amd_ip_funcs cik_ih_ip_funcs = {
+ .name = "cik_ih",
.early_init = cik_ih_early_init,
.late_init = NULL,
.sw_init = cik_ih_sw_init,
@@ -459,7 +426,6 @@ const struct amd_ip_funcs cik_ih_ip_funcs = {
.is_idle = cik_ih_is_idle,
.wait_for_idle = cik_ih_wait_for_idle,
.soft_reset = cik_ih_soft_reset,
- .print_status = cik_ih_print_status,
.set_clockgating_state = cik_ih_set_clockgating_state,
.set_powergating_state = cik_ih_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 263ecd5e4..c56485e4c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -57,6 +57,16 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+
+static void cik_sdma_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/*
* sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous
@@ -201,9 +211,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
- u32 extra_bits = ib->vm_id & 0xf;
+ u32 extra_bits = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 4)
@@ -409,6 +420,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -436,7 +449,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+
+ cik_sdma_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -519,8 +537,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
if (r)
return r;
- /* unhalt the MEs */
- cik_sdma_enable(adev, true);
+ /* halt the engine before programing */
+ cik_sdma_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
@@ -634,7 +652,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err1;
@@ -967,7 +985,7 @@ static int cik_sdma_sw_init(void *handle)
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 256 * 1024,
+ r = amdgpu_ring_init(adev, ring, 1024,
SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
@@ -988,6 +1006,7 @@ static int cik_sdma_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ cik_sdma_free_microcode(adev);
return 0;
}
@@ -1055,57 +1074,6 @@ static int cik_sdma_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void cik_sdma_print_status(void *handle)
-{
- int i, j;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "CIK SDMA registers\n");
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- for (i = 0; i < adev->sdma.num_instances; i++) {
- dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
- i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
- i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
- mutex_lock(&adev->srbm_mutex);
- for (j = 0; j < 16; j++) {
- cik_srbm_select(adev, 0, 0, 0, j);
- dev_info(adev->dev, " VM %d:\n", j);
- dev_info(adev->dev, " SDMA0_GFX_VIRTUAL_ADDR=0x%08X\n",
- RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA0_GFX_APE1_CNTL=0x%08X\n",
- RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
- }
- cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-}
-
static int cik_sdma_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@@ -1128,8 +1096,6 @@ static int cik_sdma_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- cik_sdma_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1144,8 +1110,6 @@ static int cik_sdma_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- cik_sdma_print_status((void *)adev);
}
return 0;
@@ -1269,6 +1233,7 @@ static int cik_sdma_set_powergating_state(void *handle,
}
const struct amd_ip_funcs cik_sdma_ip_funcs = {
+ .name = "cik_sdma",
.early_init = cik_sdma_early_init,
.late_init = NULL,
.sw_init = cik_sdma_sw_init,
@@ -1280,7 +1245,6 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
.is_idle = cik_sdma_is_idle,
.wait_for_idle = cik_sdma_wait_for_idle,
.soft_reset = cik_sdma_soft_reset,
- .print_status = cik_sdma_print_status,
.set_clockgating_state = cik_sdma_set_clockgating_state,
.set_powergating_state = cik_sdma_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 60d449320..c4f6f00d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -190,8 +190,8 @@
# define MACRO_TILE_ASPECT(x) ((x) << 4)
# define NUM_BANKS(x) ((x) << 6)
-#define MSG_ENTER_RLC_SAFE_MODE 1
-#define MSG_EXIT_RLC_SAFE_MODE 0
+#define MSG_ENTER_RLC_SAFE_MODE 1
+#define MSG_EXIT_RLC_SAFE_MODE 0
/*
* PM4
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index e7ef2261f..933e425a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1579,7 +1579,6 @@ static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
if (pi->caps_sclk_ds) {
@@ -1588,20 +1587,19 @@ static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
CZ_MIN_DEEP_SLEEP_SCLK);
}
- return ret;
+ return 0;
}
/* ?? without dal support, is this still needed in setpowerstate list*/
static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetWatermarkFrequency,
pi->sclk_dpm.soft_max_clk);
- return ret;
+ return 0;
}
static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
@@ -1636,7 +1634,6 @@ static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
struct cz_ps *ps = &pi->requested_ps;
@@ -1647,21 +1644,19 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
cz_dpm_nbdpm_lm_pstate_enable(adev, true);
}
- return ret;
+ return 0;
}
/* with dpm enabled */
static int cz_dpm_set_power_state(struct amdgpu_device *adev)
{
- int ret = 0;
-
cz_dpm_update_sclk_limit(adev);
cz_dpm_set_deep_sleep_sclk_threshold(adev);
cz_dpm_set_watermark_threshold(adev);
cz_dpm_enable_nbdpm(adev);
cz_dpm_update_low_memory_pstate(adev);
- return ret;
+ return 0;
}
static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
@@ -2230,6 +2225,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
}
const struct amd_ip_funcs cz_dpm_ip_funcs = {
+ .name = "cz_dpm",
.early_init = cz_dpm_early_init,
.late_init = cz_dpm_late_init,
.sw_init = cz_dpm_sw_init,
@@ -2241,7 +2237,6 @@ const struct amd_ip_funcs cz_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
- .print_status = NULL,
.set_clockgating_state = cz_dpm_set_clockgating_state,
.set_powergating_state = cz_dpm_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index c79638f8e..3d23a70b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -103,7 +103,6 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cz_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -157,7 +156,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
cz_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
@@ -222,7 +221,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
/* wptr/rptr are in bytes! */
u32 ring_index = adev->irq.ih.rptr >> 2;
uint32_t dw[4];
-
+
dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
@@ -351,35 +350,6 @@ static int cz_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void cz_ih_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "CZ IH registers\n");
- dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(mmSRBM_STATUS));
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
- RREG32(mmINTERRUPT_CNTL));
- dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
- RREG32(mmINTERRUPT_CNTL2));
- dev_info(adev->dev, " IH_CNTL=0x%08X\n",
- RREG32(mmIH_CNTL));
- dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
- RREG32(mmIH_RB_CNTL));
- dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
- RREG32(mmIH_RB_BASE));
- dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
- RREG32(mmIH_RB_WPTR_ADDR_LO));
- dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
- RREG32(mmIH_RB_WPTR_ADDR_HI));
- dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
- RREG32(mmIH_RB_RPTR));
- dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
- RREG32(mmIH_RB_WPTR));
-}
-
static int cz_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@@ -391,8 +361,6 @@ static int cz_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
- cz_ih_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -407,8 +375,6 @@ static int cz_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- cz_ih_print_status((void *)adev);
}
return 0;
@@ -429,6 +395,7 @@ static int cz_ih_set_powergating_state(void *handle,
}
const struct amd_ip_funcs cz_ih_ip_funcs = {
+ .name = "cz_ih",
.early_init = cz_ih_early_init,
.late_init = NULL,
.sw_init = cz_ih_sw_init,
@@ -440,7 +407,6 @@ const struct amd_ip_funcs cz_ih_ip_funcs = {
.is_idle = cz_ih_is_idle,
.wait_for_idle = cz_ih_wait_for_idle,
.soft_reset = cz_ih_soft_reset,
- .print_status = cz_ih_print_status,
.set_clockgating_state = cz_ih_set_clockgating_state,
.set_powergating_state = cz_ih_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
index 924d355b4..026342fcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h
@@ -77,7 +77,7 @@ struct cz_smu_private_data {
uint8_t driver_buffer_length;
uint8_t scratch_buffer_length;
uint16_t toc_entry_used_count;
- uint16_t toc_entry_initialize_index;
+ uint16_t toc_entry_initialize_index;
uint16_t toc_entry_power_profiling_index;
uint16_t toc_entry_aram;
uint16_t toc_entry_ih_register_restore_task_index;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 6de2ce535..8227344d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -284,10 +284,16 @@ static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address.
*/
static void dce_v10_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base)
+ int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ u32 tmp;
+ /* flip at hsync for async, default is vsync */
+ tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the primary scanout address */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -2211,6 +2217,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v10_0_vga_enable(crtc, false);
+ /* Make sure surface address is updated at vertical blank rather than
+ * horizontal blank
+ */
+ tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2261,13 +2275,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* pageflip setup */
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@@ -2587,7 +2594,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
return -ENOENT;
@@ -2992,6 +2999,8 @@ static int dce_v10_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev->ddev->mode_config.async_page_flip = true;
+
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
@@ -3130,14 +3139,6 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
-static void dce_v10_0_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "DCE 10.x registers\n");
- /* XXX todo */
-}
-
static int dce_v10_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@@ -3147,8 +3148,6 @@ static int dce_v10_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
- dce_v10_0_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -3163,7 +3162,6 @@ static int dce_v10_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
- dce_v10_0_print_status((void *)adev);
}
return 0;
}
@@ -3370,7 +3368,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
/* wakeup usersapce */
if (works->event)
- drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@ -3501,6 +3499,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs dce_v10_0_ip_funcs = {
+ .name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
.late_init = NULL,
.sw_init = dce_v10_0_sw_init,
@@ -3512,7 +3511,6 @@ const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.is_idle = dce_v10_0_is_idle,
.wait_for_idle = dce_v10_0_wait_for_idle,
.soft_reset = dce_v10_0_soft_reset,
- .print_status = dce_v10_0_print_status,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
.set_powergating_state = dce_v10_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index e9ccc6b78..af26ec0bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -132,6 +132,22 @@ static const u32 stoney_golden_settings_a11[] =
mmFBC_MISC, 0x1f311fff, 0x14302000,
};
+static const u32 polaris11_golden_settings_a11[] =
+{
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_DEBUG1, 0xffffffff, 0x00000008,
+ mmFBC_MISC, 0x9f313fff, 0x14302008,
+ mmHDMI_CONTROL, 0x313f031f, 0x00000011,
+};
+
+static const u32 polaris10_golden_settings_a11[] =
+{
+ mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
+ mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
+ mmFBC_MISC, 0x9f313fff, 0x14302008,
+ mmHDMI_CONTROL, 0x313f031f, 0x00000011,
+};
static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
@@ -149,6 +165,16 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
stoney_golden_settings_a11,
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
break;
+ case CHIP_POLARIS11:
+ amdgpu_program_register_sequence(adev,
+ polaris11_golden_settings_a11,
+ (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
+ break;
+ case CHIP_POLARIS10:
+ amdgpu_program_register_sequence(adev,
+ polaris10_golden_settings_a11,
+ (const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
+ break;
default:
break;
}
@@ -276,10 +302,17 @@ static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address.
*/
static void dce_v11_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base)
+ int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ u32 tmp;
+ /* flip at hsync for async, default is vsync */
+ /* use UPDATE_IMMEDIATE_EN instead for async? */
+ tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -565,35 +598,14 @@ static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
CRTC_CONTROL, CRTC_MASTER_EN);
if (crtc_enabled) {
-#if 0
- u32 frame_count;
- int j;
-
+#if 1
save->crtc_enabled[i] = true;
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- amdgpu_display_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ /*it is correct only for RGB ; black is 0*/
+ WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
- WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
}
#else
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
@@ -614,54 +626,20 @@ static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
- u32 tmp, frame_count;
- int i, j;
+ u32 tmp;
+ int i;
/* update crtc base addresses */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
if (save->crtc_enabled[i]) {
- tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
- tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
- WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
- WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
- break;
- udelay(1);
- }
tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
}
}
@@ -1624,6 +1602,7 @@ static const u32 pin_offsets[] =
AUD4_REGISTER_OFFSET,
AUD5_REGISTER_OFFSET,
AUD6_REGISTER_OFFSET,
+ AUD7_REGISTER_OFFSET,
};
static int dce_v11_0_audio_init(struct amdgpu_device *adev)
@@ -1635,7 +1614,20 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.enabled = true;
- adev->mode_info.audio.num_pins = 7;
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ adev->mode_info.audio.num_pins = 7;
+ break;
+ case CHIP_POLARIS10:
+ adev->mode_info.audio.num_pins = 8;
+ break;
+ case CHIP_POLARIS11:
+ adev->mode_info.audio.num_pins = 6;
+ break;
+ default:
+ return -EINVAL;
+ }
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
adev->mode_info.audio.pin[i].channels = -1;
@@ -2201,6 +2193,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v11_0_vga_enable(crtc, false);
+ /* Make sure surface address is updated at vertical blank rather than
+ * horizontal blank
+ */
+ tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2251,13 +2251,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* pageflip setup */
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@@ -2427,6 +2420,40 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
u32 pll_in_use;
int pll;
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11)) {
+ struct amdgpu_encoder *amdgpu_encoder =
+ to_amdgpu_encoder(amdgpu_crtc->encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+
+ if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
+ return ATOM_DP_DTO;
+
+ switch (amdgpu_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig->linkb)
+ return ATOM_COMBOPHY_PLL1;
+ else
+ return ATOM_COMBOPHY_PLL0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig->linkb)
+ return ATOM_COMBOPHY_PLL3;
+ else
+ return ATOM_COMBOPHY_PLL2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig->linkb)
+ return ATOM_COMBOPHY_PLL5;
+ else
+ return ATOM_COMBOPHY_PLL4;
+ break;
+ default:
+ DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
+ return ATOM_PPLL_INVALID;
+ }
+ }
+
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
if (adev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
@@ -2578,7 +2605,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
return -ENOENT;
@@ -2782,7 +2809,17 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL2:
/* disable the ppll */
amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
+ break;
+ case ATOM_COMBOPHY_PLL0:
+ case ATOM_COMBOPHY_PLL1:
+ case ATOM_COMBOPHY_PLL2:
+ case ATOM_COMBOPHY_PLL3:
+ case ATOM_COMBOPHY_PLL4:
+ case ATOM_COMBOPHY_PLL5:
+ /* disable the ppll */
+ amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
default:
break;
@@ -2800,11 +2837,28 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct amdgpu_device *adev = dev->dev_private;
if (!amdgpu_crtc->adjusted_clock)
return -EINVAL;
- amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11)) {
+ struct amdgpu_encoder *amdgpu_encoder =
+ to_amdgpu_encoder(amdgpu_crtc->encoder);
+ int encoder_mode =
+ amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
+
+ /* SetPixelClock calculates the plls and ss values now */
+ amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
+ amdgpu_crtc->pll_id,
+ encoder_mode, amdgpu_encoder->encoder_id,
+ adjusted_mode->clock, 0, 0, 0, 0,
+ amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
+ } else {
+ amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
+ }
amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
@@ -2955,6 +3009,16 @@ static int dce_v11_0_early_init(void *handle)
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
+ case CHIP_POLARIS10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_POLARIS11:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -2987,6 +3051,8 @@ static int dce_v11_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev->ddev->mode_config.async_page_flip = true;
+
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
@@ -3057,7 +3123,15 @@ static int dce_v11_0_hw_init(void *handle)
/* init dig PHYs, disp eng pll */
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
- amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11)) {
+ amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
+ DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
+ amdgpu_atombios_crtc_set_dce_clock(adev, 0,
+ DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
+ } else {
+ amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
+ }
/* initialize hpd */
dce_v11_0_hpd_init(adev);
@@ -3126,14 +3200,6 @@ static int dce_v11_0_wait_for_idle(void *handle)
return 0;
}
-static void dce_v11_0_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "DCE 10.x registers\n");
- /* XXX todo */
-}
-
static int dce_v11_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@@ -3143,8 +3209,6 @@ static int dce_v11_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
- dce_v11_0_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -3159,7 +3223,6 @@ static int dce_v11_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
- dce_v11_0_print_status((void *)adev);
}
return 0;
}
@@ -3366,7 +3429,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
/* wakeup usersapce */
if(works->event)
- drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@ -3497,6 +3560,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs dce_v11_0_ip_funcs = {
+ .name = "dce_v11_0",
.early_init = dce_v11_0_early_init,
.late_init = NULL,
.sw_init = dce_v11_0_sw_init,
@@ -3508,7 +3572,6 @@ const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.is_idle = dce_v11_0_is_idle,
.wait_for_idle = dce_v11_0_wait_for_idle,
.soft_reset = dce_v11_0_soft_reset,
- .print_status = dce_v11_0_print_status,
.set_clockgating_state = dce_v11_0_set_clockgating_state,
.set_powergating_state = dce_v11_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index e56b55d8c..3fb65e41a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -233,10 +233,13 @@ static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address.
*/
static void dce_v8_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base)
+ int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ /* flip at hsync for async, default is vsync */
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
+ GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
/* update the primary scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -1999,7 +2002,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 pipe_config;
- u32 tmp, viewport_w, viewport_h;
+ u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
@@ -2135,6 +2138,11 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v8_0_vga_enable(crtc, false);
+ /* Make sure surface address is updated at vertical blank rather than
+ * horizontal blank
+ */
+ WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
+
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2182,12 +2190,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* pageflip setup */
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@@ -2499,7 +2501,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
return -ENOENT;
@@ -2902,6 +2904,8 @@ static int dce_v8_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev->ddev->mode_config.async_page_flip = true;
+
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
@@ -3038,14 +3042,6 @@ static int dce_v8_0_wait_for_idle(void *handle)
return 0;
}
-static void dce_v8_0_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "DCE 8.x registers\n");
- /* XXX todo */
-}
-
static int dce_v8_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0, tmp;
@@ -3055,8 +3051,6 @@ static int dce_v8_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
if (srbm_soft_reset) {
- dce_v8_0_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -3071,7 +3065,6 @@ static int dce_v8_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
- dce_v8_0_print_status((void *)adev);
}
return 0;
}
@@ -3379,7 +3372,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
/* wakeup usersapce */
if (works->event)
- drm_send_vblank_event(adev->ddev, crtc_id, works->event);
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@ -3431,6 +3424,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs dce_v8_0_ip_funcs = {
+ .name = "dce_v8_0",
.early_init = dce_v8_0_early_init,
.late_init = NULL,
.sw_init = dce_v8_0_sw_init,
@@ -3442,7 +3436,6 @@ const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.is_idle = dce_v8_0_is_idle,
.wait_for_idle = dce_v8_0_wait_for_idle,
.soft_reset = dce_v8_0_soft_reset,
- .print_status = dce_v8_0_print_status,
.set_clockgating_state = dce_v8_0_set_clockgating_state,
.set_powergating_state = dce_v8_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
index ae8d0b55d..07ed7dd92 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
@@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle)
static int fiji_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
@@ -143,6 +148,7 @@ static int fiji_dpm_set_powergating_state(void *handle,
}
const struct amd_ip_funcs fiji_dpm_ip_funcs = {
+ .name = "fiji_dpm",
.early_init = fiji_dpm_early_init,
.late_init = NULL,
.sw_init = fiji_dpm_sw_init,
@@ -154,7 +160,6 @@ const struct amd_ip_funcs fiji_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
- .print_status = NULL,
.set_clockgating_state = fiji_dpm_set_clockgating_state,
.set_powergating_state = fiji_dpm_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 699cda831..507160a4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -53,7 +53,6 @@
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
-int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *);
/*(DEBLOBBED)*/
@@ -853,6 +852,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
+static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
/*
* Core functions
@@ -962,6 +962,22 @@ out:
return err;
}
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+}
+
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
@@ -1689,6 +1705,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
gfx_v7_0_tiling_mode_table_init(adev);
gfx_v7_0_setup_rb(adev);
+ gfx_v7_0_get_cu_info(adev);
/* set HW defaults for 3D engine */
WREG32(mmCP_MEQ_THRESHOLDS,
@@ -2000,17 +2017,13 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
- bool need_ctx_switch = ring->current_ctx != ib->ctx;
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
- /* drop the CE preamble IB for the same context */
- if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
- return;
-
- if (need_ctx_switch)
+ if (ctx_switch)
next_rptr += 2;
next_rptr += 4;
@@ -2021,7 +2034,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (need_ctx_switch) {
+ if (ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -2031,7 +2044,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (ib->vm_id << 24);
+ control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -2044,7 +2057,8 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
@@ -2059,7 +2073,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (ib->vm_id << 24);
+ control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -2107,7 +2121,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err2;
@@ -3024,6 +3038,19 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3) | /* equal */
+ WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
+ amdgpu_ring_write(ring, 0xffffffff);
+ amdgpu_ring_write(ring, 4); /* poll interval */
+
if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -3051,18 +3078,6 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
- uint32_t seq = ring->fence_drv.sync_seq;
- uint64_t addr = ring->fence_drv.gpu_addr;
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3) | /* equal */
- WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
- amdgpu_ring_write(ring, addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
- amdgpu_ring_write(ring, seq);
- amdgpu_ring_write(ring, 0xffffffff);
- amdgpu_ring_write(ring, 4); /* poll interval */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3840,18 +3855,13 @@ static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
{
- uint32_t tmp, active_cu_number;
- struct amdgpu_cu_info cu_info;
-
- gfx_v7_0_get_cu_info(adev, &cu_info);
- tmp = cu_info.ao_cu_mask;
- active_cu_number = cu_info.number;
+ u32 tmp;
- WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp);
+ WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
tmp = RREG32(mmRLC_MAX_PG_CU);
tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
- tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
+ tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
WREG32(mmRLC_MAX_PG_CU, tmp);
}
@@ -4385,7 +4395,7 @@ static int gfx_v7_0_sw_init(void *handle)
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
- r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+ r = amdgpu_ring_init(adev, ring, 1024,
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
AMDGPU_RING_TYPE_GFX);
@@ -4409,10 +4419,10 @@ static int gfx_v7_0_sw_init(void *handle)
ring->me = 1; /* first MEC */
ring->pipe = i / 8;
ring->queue = i % 8;
- sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+ sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+ r = amdgpu_ring_init(adev, ring, 1024,
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
&adev->gfx.eop_irq, irq_type,
AMDGPU_RING_TYPE_COMPUTE);
@@ -4466,6 +4476,7 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ gfx_v7_0_free_microcode(adev);
return 0;
}
@@ -4543,256 +4554,6 @@ static int gfx_v7_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void gfx_v7_0_print_status(void *handle)
-{
- int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "GFX 7.x registers\n");
- dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(mmGRBM_STATUS));
- dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(mmGRBM_STATUS2));
- dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE0));
- dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE1));
- dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE2));
- dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE3));
- dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
- dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT1));
- dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT2));
- dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT3));
- dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
- RREG32(mmCP_CPF_BUSY_STAT));
- dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_CPF_STALLED_STAT1));
- dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
- dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
- dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_CPC_STALLED_STAT1));
- dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
-
- for (i = 0; i < 32; i++) {
- dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
- i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
- }
- for (i = 0; i < 16; i++) {
- dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
- i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
- }
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- dev_info(adev->dev, " se: %d\n", i);
- gfx_v7_0_select_se_sh(adev, i, 0xffffffff);
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
- RREG32(mmPA_SC_RASTER_CONFIG));
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
- RREG32(mmPA_SC_RASTER_CONFIG_1));
- }
- gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
-
- dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmGB_ADDR_CONFIG));
- dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n",
- RREG32(mmHDP_ADDR_CONFIG));
- dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
- RREG32(mmDMIF_ADDR_CALC));
-
- dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
- RREG32(mmCP_MEQ_THRESHOLDS));
- dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
- RREG32(mmSX_DEBUG_1));
- dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
- RREG32(mmTA_CNTL_AUX));
- dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
- RREG32(mmSPI_CONFIG_CNTL));
- dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
- RREG32(mmSQ_CONFIG));
- dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
- RREG32(mmDB_DEBUG));
- dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
- RREG32(mmDB_DEBUG2));
- dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
- RREG32(mmDB_DEBUG3));
- dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
- RREG32(mmCB_HW_CONTROL));
- dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
- RREG32(mmSPI_CONFIG_CNTL_1));
- dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
- RREG32(mmPA_SC_FIFO_SIZE));
- dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
- RREG32(mmVGT_NUM_INSTANCES));
- dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
- RREG32(mmCP_PERFMON_CNTL));
- dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
- RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
- dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
- RREG32(mmVGT_CACHE_INVALIDATION));
- dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
- RREG32(mmVGT_GS_VERTEX_REUSE));
- dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
- RREG32(mmPA_SC_LINE_STIPPLE_STATE));
- dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
- RREG32(mmPA_CL_ENHANCE));
- dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
- RREG32(mmPA_SC_ENHANCE));
-
- dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
- RREG32(mmCP_ME_CNTL));
- dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
- RREG32(mmCP_MAX_CONTEXT));
- dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n",
- RREG32(mmCP_ENDIAN_SWAP));
- dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
- RREG32(mmCP_DEVICE_ID));
-
- dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
- RREG32(mmCP_SEM_WAIT_TIMER));
- if (adev->asic_type != CHIP_HAWAII)
- dev_info(adev->dev, " CP_SEM_INCOMPLETE_TIMER_CNTL=0x%08X\n",
- RREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL));
-
- dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
- RREG32(mmCP_RB_WPTR_DELAY));
- dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
- RREG32(mmCP_RB_VMID));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(mmCP_RB0_CNTL));
- dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
- RREG32(mmCP_RB0_WPTR));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
- RREG32(mmCP_RB0_RPTR_ADDR));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
- RREG32(mmCP_RB0_RPTR_ADDR_HI));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(mmCP_RB0_CNTL));
- dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
- RREG32(mmCP_RB0_BASE));
- dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
- RREG32(mmCP_RB0_BASE_HI));
- dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
- RREG32(mmCP_MEC_CNTL));
- dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n",
- RREG32(mmCP_CPF_DEBUG));
-
- dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
- RREG32(mmSCRATCH_ADDR));
- dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
- RREG32(mmSCRATCH_UMSK));
-
- /* init the pipes */
- mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) {
- int me = (i < 4) ? 1 : 2;
- int pipe = (i < 4) ? i : (i - 4);
- int queue;
-
- dev_info(adev->dev, " me: %d, pipe: %d\n", me, pipe);
- cik_srbm_select(adev, me, pipe, 0, 0);
- dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR=0x%08X\n",
- RREG32(mmCP_HPD_EOP_BASE_ADDR));
- dev_info(adev->dev, " CP_HPD_EOP_BASE_ADDR_HI=0x%08X\n",
- RREG32(mmCP_HPD_EOP_BASE_ADDR_HI));
- dev_info(adev->dev, " CP_HPD_EOP_VMID=0x%08X\n",
- RREG32(mmCP_HPD_EOP_VMID));
- dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
- RREG32(mmCP_HPD_EOP_CONTROL));
-
- for (queue = 0; queue < 8; queue++) {
- cik_srbm_select(adev, me, pipe, queue, 0);
- dev_info(adev->dev, " queue: %d\n", queue);
- dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
- RREG32(mmCP_PQ_WPTR_POLL_CNTL));
- dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
- RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
- dev_info(adev->dev, " CP_HQD_ACTIVE=0x%08X\n",
- RREG32(mmCP_HQD_ACTIVE));
- dev_info(adev->dev, " CP_HQD_DEQUEUE_REQUEST=0x%08X\n",
- RREG32(mmCP_HQD_DEQUEUE_REQUEST));
- dev_info(adev->dev, " CP_HQD_PQ_RPTR=0x%08X\n",
- RREG32(mmCP_HQD_PQ_RPTR));
- dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
- RREG32(mmCP_HQD_PQ_WPTR));
- dev_info(adev->dev, " CP_HQD_PQ_BASE=0x%08X\n",
- RREG32(mmCP_HQD_PQ_BASE));
- dev_info(adev->dev, " CP_HQD_PQ_BASE_HI=0x%08X\n",
- RREG32(mmCP_HQD_PQ_BASE_HI));
- dev_info(adev->dev, " CP_HQD_PQ_CONTROL=0x%08X\n",
- RREG32(mmCP_HQD_PQ_CONTROL));
- dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR=0x%08X\n",
- RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR));
- dev_info(adev->dev, " CP_HQD_PQ_WPTR_POLL_ADDR_HI=0x%08X\n",
- RREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI));
- dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR=0x%08X\n",
- RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR));
- dev_info(adev->dev, " CP_HQD_PQ_RPTR_REPORT_ADDR_HI=0x%08X\n",
- RREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI));
- dev_info(adev->dev, " CP_HQD_PQ_DOORBELL_CONTROL=0x%08X\n",
- RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL));
- dev_info(adev->dev, " CP_HQD_PQ_WPTR=0x%08X\n",
- RREG32(mmCP_HQD_PQ_WPTR));
- dev_info(adev->dev, " CP_HQD_VMID=0x%08X\n",
- RREG32(mmCP_HQD_VMID));
- dev_info(adev->dev, " CP_MQD_BASE_ADDR=0x%08X\n",
- RREG32(mmCP_MQD_BASE_ADDR));
- dev_info(adev->dev, " CP_MQD_BASE_ADDR_HI=0x%08X\n",
- RREG32(mmCP_MQD_BASE_ADDR_HI));
- dev_info(adev->dev, " CP_MQD_CONTROL=0x%08X\n",
- RREG32(mmCP_MQD_CONTROL));
- }
- }
- cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-
- dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
- RREG32(mmCP_INT_CNTL_RING0));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(mmRLC_LB_CNTL));
- dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
- RREG32(mmRLC_CNTL));
- dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
- RREG32(mmRLC_CGCG_CGLS_CTRL));
- dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
- RREG32(mmRLC_LB_CNTR_INIT));
- dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
- RREG32(mmRLC_LB_CNTR_MAX));
- dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
- RREG32(mmRLC_LB_INIT_CU_MASK));
- dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
- RREG32(mmRLC_LB_PARAMS));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(mmRLC_LB_CNTL));
- dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n",
- RREG32(mmRLC_MC_CNTL));
- dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
- RREG32(mmRLC_UCODE_CNTL));
-
- if (adev->asic_type == CHIP_BONAIRE)
- dev_info(adev->dev, " RLC_DRIVER_CPDMA_STATUS=0x%08X\n",
- RREG32(mmRLC_DRIVER_CPDMA_STATUS));
-
- mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < 16; i++) {
- cik_srbm_select(adev, 0, 0, 0, i);
- dev_info(adev->dev, " VM %d:\n", i);
- dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
- RREG32(mmSH_MEM_CONFIG));
- dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n",
- RREG32(mmSH_MEM_APE1_BASE));
- dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n",
- RREG32(mmSH_MEM_APE1_LIMIT));
- dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
- RREG32(mmSH_MEM_BASES));
- }
- cik_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
static int gfx_v7_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -4826,7 +4587,6 @@ static int gfx_v7_0_soft_reset(void *handle)
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK;
if (grbm_soft_reset || srbm_soft_reset) {
- gfx_v7_0_print_status((void *)adev);
/* disable CG/PG */
gfx_v7_0_fini_pg(adev);
gfx_v7_0_update_cg(adev, false);
@@ -4869,7 +4629,6 @@ static int gfx_v7_0_soft_reset(void *handle)
}
/* Wait a little for things to settle down */
udelay(50);
- gfx_v7_0_print_status((void *)adev);
}
return 0;
}
@@ -5121,6 +4880,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
+ .name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
.late_init = gfx_v7_0_late_init,
.sw_init = gfx_v7_0_sw_init,
@@ -5132,7 +4892,6 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.is_idle = gfx_v7_0_is_idle,
.wait_for_idle = gfx_v7_0_wait_for_idle,
.soft_reset = gfx_v7_0_soft_reset,
- .print_status = gfx_v7_0_print_status,
.set_clockgating_state = gfx_v7_0_set_clockgating_state,
.set_powergating_state = gfx_v7_0_set_powergating_state,
};
@@ -5239,14 +4998,11 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
}
-int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
- struct amdgpu_cu_info *cu_info)
+static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
-
- if (!adev || !cu_info)
- return -EINVAL;
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
memset(cu_info, 0, sizeof(*cu_info));
@@ -5277,6 +5033,4 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
-
- return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index c04bfbabf..e747aa935 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -32,6 +32,5 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 667d04755..1127b2b62 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -27,6 +27,8 @@
#include "vi.h"
#include "vid.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
#include "clearstate_vi.h"
#include "gmc/gmc_8_2_d.h"
@@ -46,11 +48,14 @@
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
+#include "smu/smu_7_1_3_d.h"
+
#define GFX8_NUM_GFX_RINGS 1
#define GFX8_NUM_COMPUTE_RINGS 8
#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
+#define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
#define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
#define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
@@ -84,6 +89,8 @@ enum {
BPM_REG_FGCG_MAX
};
+#define RLC_FormatDirectRegListLength 14
+
/*(DEBLOBBED)*/
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
@@ -216,6 +223,69 @@ static const u32 tonga_mgcg_cgcg_init[] =
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
};
+static const u32 golden_settings_polaris11_a11[] =
+{
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+ mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
+ mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
+ mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
+ mmSQ_CONFIG, 0x07f80000, 0x07180000,
+ mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
+};
+
+static const u32 polaris11_golden_common_all[] =
+{
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
+ mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+};
+
+static const u32 golden_settings_polaris10_a11[] =
+{
+ mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
+ mmCB_HW_CONTROL_2, 0, 0x0f000000,
+ mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
+ mmDB_DEBUG2, 0xf00fffff, 0x00000400,
+ mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
+ mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
+ mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
+ mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
+ mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
+ mmSQ_CONFIG, 0x07f80000, 0x07180000,
+ mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+ mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
+ mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+ mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
+};
+
+static const u32 polaris10_golden_common_all[] =
+{
+ mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
+ mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
+ mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
+ mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
+ mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
+ mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
+ mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
+};
+
static const u32 fiji_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
@@ -496,7 +566,7 @@ static const u32 stoney_golden_settings_a11[] =
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
- mmTCC_CTRL, 0x00100000, 0xf31fff7f,
+ mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
@@ -527,6 +597,9 @@ static const u32 stoney_mgcg_cgcg_init[] =
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
+static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
+static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
+static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
@@ -565,6 +638,27 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
tonga_golden_common_all,
(const u32)ARRAY_SIZE(tonga_golden_common_all));
break;
+ case CHIP_POLARIS11:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_program_register_sequence(adev,
+ polaris11_golden_common_all,
+ (const u32)ARRAY_SIZE(polaris11_golden_common_all));
+ break;
+ case CHIP_POLARIS10:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_program_register_sequence(adev,
+ polaris10_golden_common_all,
+ (const u32)ARRAY_SIZE(polaris10_golden_common_all));
+ WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
+ if (adev->pdev->revision == 0xc7) {
+ amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
+ amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
+ }
+ break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
cz_mgcg_cgcg_init,
@@ -675,7 +769,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err2;
@@ -708,6 +802,26 @@ err1:
return r;
}
+
+static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) {
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ if ((adev->asic_type != CHIP_STONEY) &&
+ (adev->asic_type != CHIP_TOPAZ))
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -716,6 +830,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
const struct gfx_firmware_header_v1_0 *cp_hdr;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ unsigned int *tmp = NULL, i;
DRM_DEBUG("\n");
@@ -732,6 +848,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_FIJI:
chip_name = "fiji";
break;
+ case CHIP_POLARIS11:
+ chip_name = "polaris11";
+ break;
+ case CHIP_POLARIS10:
+ chip_name = "polaris10";
+ break;
case CHIP_STONEY:
chip_name = "stoney";
break;
@@ -777,9 +899,49 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
- adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+
+ adev->gfx.rlc.save_and_restore_offset =
+ le32_to_cpu(rlc_hdr->save_and_restore_offset);
+ adev->gfx.rlc.clear_state_descriptor_offset =
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+ adev->gfx.rlc.avail_scratch_ram_locations =
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+ adev->gfx.rlc.reg_restore_list_size =
+ le32_to_cpu(rlc_hdr->reg_restore_list_size);
+ adev->gfx.rlc.reg_list_format_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_start);
+ adev->gfx.rlc.reg_list_format_separate_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+ adev->gfx.rlc.starting_offsets_start =
+ le32_to_cpu(rlc_hdr->starting_offsets_start);
+ adev->gfx.rlc.reg_list_format_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+ adev->gfx.rlc.reg_list_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+
+ adev->gfx.rlc.register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+
+ if (!adev->gfx.rlc.register_list_format) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
err = reject_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -880,6 +1042,153 @@ out:
return err;
}
+static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
+ volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (adev->gfx.rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] =
+ cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index -
+ PACKET3_SET_CONTEXT_REG_START);
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ } else {
+ return;
+ }
+ }
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
+ PACKET3_SET_CONTEXT_REG_START);
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ buffer[count++] = cpu_to_le32(0x16000012);
+ buffer[count++] = cpu_to_le32(0x0000002A);
+ break;
+ case CHIP_POLARIS11:
+ buffer[count++] = cpu_to_le32(0x16000012);
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ case CHIP_FIJI:
+ buffer[count++] = cpu_to_le32(0x3a00161a);
+ buffer[count++] = cpu_to_le32(0x0000002e);
+ break;
+ case CHIP_TOPAZ:
+ case CHIP_CARRIZO:
+ buffer[count++] = cpu_to_le32(0x00000002);
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ case CHIP_STONEY:
+ buffer[count++] = cpu_to_le32(0x00000000);
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ default:
+ buffer[count++] = cpu_to_le32(0x00000000);
+ buffer[count++] = cpu_to_le32(0x00000000);
+ break;
+ }
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
+static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* clear state block */
+ if (adev->gfx.rlc.clear_state_obj) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
+ adev->gfx.rlc.clear_state_obj = NULL;
+ }
+}
+
+static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
+{
+ volatile u32 *dst_ptr;
+ u32 dws;
+ const struct cs_section_def *cs_data;
+ int r;
+
+ adev->gfx.rlc.cs_data = vi_cs_data;
+
+ cs_data = adev->gfx.rlc.cs_data;
+
+ if (cs_data) {
+ /* clear state block */
+ adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
+
+ if (adev->gfx.rlc.clear_state_obj == NULL) {
+ r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ NULL, NULL,
+ &adev->gfx.rlc.clear_state_obj);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
+ gfx_v8_0_rlc_fini(adev);
+ return r;
+ }
+ }
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ gfx_v8_0_rlc_fini(adev);
+ return r;
+ }
+ r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_gpu_addr);
+ if (r) {
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
+ gfx_v8_0_rlc_fini(adev);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
+ gfx_v8_0_rlc_fini(adev);
+ return r;
+ }
+ /* set up the cs buffer */
+ dst_ptr = adev->gfx.rlc.cs_ptr;
+ gfx_v8_0_get_csb_buffer(adev, dst_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ }
+
+ return 0;
+}
+
static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
{
int r;
@@ -1231,7 +1540,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail;
@@ -1265,12 +1574,13 @@ fail:
return r;
}
-static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
+static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
u32 tmp;
+ int ret;
switch (adev->asic_type) {
case CHIP_TOPAZ:
@@ -1307,6 +1617,34 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
+ case CHIP_POLARIS11:
+ ret = amdgpu_atombios_get_gfx_info(adev);
+ if (ret)
+ return ret;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
+ break;
+ case CHIP_POLARIS10:
+ ret = amdgpu_atombios_get_gfx_info(adev);
+ if (ret)
+ return ret;
+ adev->gfx.config.max_gprs = 256;
+ adev->gfx.config.max_gs_threads = 32;
+ adev->gfx.config.max_hw_contexts = 8;
+
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
+ break;
case CHIP_TONGA:
adev->gfx.config.max_shader_engines = 4;
adev->gfx.config.max_tile_pipes = 8;
@@ -1489,6 +1827,8 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
break;
}
adev->gfx.config.gb_addr_config = gb_addr_config;
+
+ return 0;
}
static int gfx_v8_0_sw_init(void *handle)
@@ -1522,6 +1862,12 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
+ r = gfx_v8_0_rlc_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init rlc BOs!\n");
+ return r;
+ }
+
r = gfx_v8_0_mec_init(adev);
if (r) {
DRM_ERROR("Failed to init MEC BOs!\n");
@@ -1539,7 +1885,7 @@ static int gfx_v8_0_sw_init(void *handle)
ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
}
- r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+ r = amdgpu_ring_init(adev, ring, 1024,
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP,
AMDGPU_RING_TYPE_GFX);
@@ -1563,10 +1909,10 @@ static int gfx_v8_0_sw_init(void *handle)
ring->me = 1; /* first MEC */
ring->pipe = i / 8;
ring->queue = i % 8;
- sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue);
+ sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024 * 1024,
+ r = amdgpu_ring_init(adev, ring, 1024,
PACKET3(PACKET3_NOP, 0x3FFF), 0xf,
&adev->gfx.eop_irq, irq_type,
AMDGPU_RING_TYPE_COMPUTE);
@@ -1598,7 +1944,9 @@ static int gfx_v8_0_sw_init(void *handle)
adev->gfx.ce_ram_size = 0x8000;
- gfx_v8_0_gpu_early_init(adev);
+ r = gfx_v8_0_gpu_early_init(adev);
+ if (r)
+ return r;
return 0;
}
@@ -1619,6 +1967,10 @@ static int gfx_v8_0_sw_fini(void *handle)
gfx_v8_0_mec_fini(adev);
+ gfx_v8_0_rlc_fini(adev);
+
+ gfx_v8_0_free_microcode(adev);
+
return 0;
}
@@ -2188,6 +2540,410 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
break;
+ case CHIP_POLARIS11:
+ modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16));
+ modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+
+ mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+
+ mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
+
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ if (reg_offset != 7)
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
+
+ break;
+ case CHIP_POLARIS10:
+ modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+ modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
+ modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+ modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+ modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+ modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+ PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+ MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+ SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+
+ mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK));
+
+ mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_8_BANK));
+
+ mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+
+ mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+ NUM_BANKS(ADDR_SURF_4_BANK));
+
+ for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
+ WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
+
+ for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
+ if (reg_offset != 7)
+ WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
+
+ break;
case CHIP_STONEY:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
@@ -2664,6 +3420,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
gfx_v8_0_tiling_mode_table_init(adev);
gfx_v8_0_setup_rb(adev);
+ gfx_v8_0_get_cu_info(adev);
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2757,6 +3514,188 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
WREG32(mmCP_INT_CNTL_RING0, tmp);
}
+static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
+{
+ /* csib */
+ WREG32(mmRLC_CSIB_ADDR_HI,
+ adev->gfx.rlc.clear_state_gpu_addr >> 32);
+ WREG32(mmRLC_CSIB_ADDR_LO,
+ adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+ WREG32(mmRLC_CSIB_LENGTH,
+ adev->gfx.rlc.clear_state_size);
+}
+
+static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
+ int ind_offset,
+ int list_size,
+ int *unique_indices,
+ int *indices_count,
+ int max_indices,
+ int *ind_start_offsets,
+ int *offset_count,
+ int max_offset)
+{
+ int indices;
+ bool new_entry = true;
+
+ for (; ind_offset < list_size; ind_offset++) {
+
+ if (new_entry) {
+ new_entry = false;
+ ind_start_offsets[*offset_count] = ind_offset;
+ *offset_count = *offset_count + 1;
+ BUG_ON(*offset_count >= max_offset);
+ }
+
+ if (register_list_format[ind_offset] == 0xFFFFFFFF) {
+ new_entry = true;
+ continue;
+ }
+
+ ind_offset += 2;
+
+ /* look for the matching indice */
+ for (indices = 0;
+ indices < *indices_count;
+ indices++) {
+ if (unique_indices[indices] ==
+ register_list_format[ind_offset])
+ break;
+ }
+
+ if (indices >= *indices_count) {
+ unique_indices[*indices_count] =
+ register_list_format[ind_offset];
+ indices = *indices_count;
+ *indices_count = *indices_count + 1;
+ BUG_ON(*indices_count >= max_indices);
+ }
+
+ register_list_format[ind_offset] = indices;
+ }
+}
+
+static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
+{
+ int i, temp, data;
+ int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
+ int indices_count = 0;
+ int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ int offset_count = 0;
+
+ int list_size;
+ unsigned int *register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
+ if (register_list_format == NULL)
+ return -ENOMEM;
+ memcpy(register_list_format, adev->gfx.rlc.register_list_format,
+ adev->gfx.rlc.reg_list_format_size_bytes);
+
+ gfx_v8_0_parse_ind_reg_list(register_list_format,
+ RLC_FormatDirectRegListLength,
+ adev->gfx.rlc.reg_list_format_size_bytes >> 2,
+ unique_indices,
+ &indices_count,
+ sizeof(unique_indices) / sizeof(int),
+ indirect_start_offsets,
+ &offset_count,
+ sizeof(indirect_start_offsets)/sizeof(int));
+
+ /* save and restore list */
+ temp = RREG32(mmRLC_SRM_CNTL);
+ temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
+ WREG32(mmRLC_SRM_CNTL, temp);
+
+ WREG32(mmRLC_SRM_ARAM_ADDR, 0);
+ for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
+ WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
+
+ /* indirect list */
+ WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
+ for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
+ WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
+
+ list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
+ list_size = list_size >> 1;
+ WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
+ WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
+
+ /* starting offsets starts */
+ WREG32(mmRLC_GPM_SCRATCH_ADDR,
+ adev->gfx.rlc.starting_offsets_start);
+ for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
+ WREG32(mmRLC_GPM_SCRATCH_DATA,
+ indirect_start_offsets[i]);
+
+ /* unique indices */
+ temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
+ data = mmRLC_SRM_INDEX_CNTL_DATA_0;
+ for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
+ amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false);
+ amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false);
+ }
+ kfree(register_list_format);
+
+ return 0;
+}
+
+static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ data = RREG32(mmRLC_SRM_CNTL);
+ data |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
+ WREG32(mmRLC_SRM_CNTL, data);
+}
+
+static void polaris11_init_power_gating(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG)) {
+ data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
+ data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
+ data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
+ WREG32(mmCP_RB_WPTR_POLL_CNTL, data);
+
+ data = 0;
+ data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
+ data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
+ data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
+ data |= (0x10 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
+ WREG32(mmRLC_PG_DELAY, data);
+
+ data = RREG32(mmRLC_PG_DELAY_2);
+ data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
+ data |= (0x3 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
+ WREG32(mmRLC_PG_DELAY_2, data);
+
+ data = RREG32(mmRLC_AUTO_PG_CTRL);
+ data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
+ data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
+ WREG32(mmRLC_AUTO_PG_CTRL, data);
+ }
+}
+
+static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
+{
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
+ gfx_v8_0_init_csb(adev);
+ gfx_v8_0_init_save_restore_list(adev);
+ gfx_v8_0_enable_save_restore_machine(adev);
+
+ if (adev->asic_type == CHIP_POLARIS11)
+ polaris11_init_power_gating(adev);
+ }
+}
+
void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
{
u32 tmp = RREG32(mmRLC_CNTL);
@@ -2827,12 +3766,17 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
/* disable CG */
WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+ if (adev->asic_type == CHIP_POLARIS11 ||
+ adev->asic_type == CHIP_POLARIS10)
+ WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
/* disable PG */
WREG32(mmRLC_PG_CNTL, 0);
gfx_v8_0_rlc_reset(adev);
+ gfx_v8_0_init_pg(adev);
+
if (!adev->pp_enabled) {
if (!adev->firmware.smu_load) {
/* legacy rlc firmware loading */
@@ -3004,18 +3948,27 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
switch (adev->asic_type) {
case CHIP_TONGA:
+ case CHIP_POLARIS10:
amdgpu_ring_write(ring, 0x16000012);
amdgpu_ring_write(ring, 0x0000002A);
break;
+ case CHIP_POLARIS11:
+ amdgpu_ring_write(ring, 0x16000012);
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
case CHIP_FIJI:
amdgpu_ring_write(ring, 0x3a00161a);
amdgpu_ring_write(ring, 0x0000002e);
break;
- case CHIP_TOPAZ:
case CHIP_CARRIZO:
amdgpu_ring_write(ring, 0x00000002);
amdgpu_ring_write(ring, 0x00000000);
break;
+ case CHIP_TOPAZ:
+ amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ?
+ 0x00000000 : 0x00000002);
+ amdgpu_ring_write(ring, 0x00000000);
+ break;
case CHIP_STONEY:
amdgpu_ring_write(ring, 0x00000000);
amdgpu_ring_write(ring, 0x00000000);
@@ -3091,6 +4044,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_OFFSET, ring->doorbell_index);
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
+ DOORBELL_HIT, 0);
+ tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
DOORBELL_EN, 1);
} else {
tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
@@ -3648,7 +4603,9 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
if (use_doorbell) {
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_FIJI) ||
- (adev->asic_type == CHIP_STONEY)) {
+ (adev->asic_type == CHIP_STONEY) ||
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS10)) {
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -3682,7 +4639,9 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
WREG32(mmCP_HQD_PERSISTENT_STATE, tmp);
mqd->cp_hqd_persistent_state = tmp;
- if (adev->asic_type == CHIP_STONEY) {
+ if (adev->asic_type == CHIP_STONEY ||
+ adev->asic_type == CHIP_POLARIS11 ||
+ adev->asic_type == CHIP_POLARIS10) {
tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
@@ -3814,6 +4773,9 @@ static int gfx_v8_0_hw_fini(void *handle)
gfx_v8_0_rlc_stop(adev);
gfx_v8_0_cp_compute_fini(adev);
+ amdgpu_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE);
+
return 0;
}
@@ -3858,185 +4820,6 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void gfx_v8_0_print_status(void *handle)
-{
- int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "GFX 8.x registers\n");
- dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(mmGRBM_STATUS));
- dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(mmGRBM_STATUS2));
- dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE0));
- dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE1));
- dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE2));
- dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
- RREG32(mmGRBM_STATUS_SE3));
- dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
- dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT1));
- dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT2));
- dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
- RREG32(mmCP_STALLED_STAT3));
- dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
- RREG32(mmCP_CPF_BUSY_STAT));
- dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_CPF_STALLED_STAT1));
- dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
- dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
- dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
- RREG32(mmCP_CPC_STALLED_STAT1));
- dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
-
- for (i = 0; i < 32; i++) {
- dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
- i, RREG32(mmGB_TILE_MODE0 + (i * 4)));
- }
- for (i = 0; i < 16; i++) {
- dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
- i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4)));
- }
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- dev_info(adev->dev, " se: %d\n", i);
- gfx_v8_0_select_se_sh(adev, i, 0xffffffff);
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
- RREG32(mmPA_SC_RASTER_CONFIG));
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
- RREG32(mmPA_SC_RASTER_CONFIG_1));
- }
- gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
-
- dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmGB_ADDR_CONFIG));
- dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n",
- RREG32(mmHDP_ADDR_CONFIG));
- dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n",
- RREG32(mmDMIF_ADDR_CALC));
-
- dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
- RREG32(mmCP_MEQ_THRESHOLDS));
- dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
- RREG32(mmSX_DEBUG_1));
- dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
- RREG32(mmTA_CNTL_AUX));
- dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
- RREG32(mmSPI_CONFIG_CNTL));
- dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
- RREG32(mmSQ_CONFIG));
- dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
- RREG32(mmDB_DEBUG));
- dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
- RREG32(mmDB_DEBUG2));
- dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
- RREG32(mmDB_DEBUG3));
- dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
- RREG32(mmCB_HW_CONTROL));
- dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
- RREG32(mmSPI_CONFIG_CNTL_1));
- dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
- RREG32(mmPA_SC_FIFO_SIZE));
- dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
- RREG32(mmVGT_NUM_INSTANCES));
- dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
- RREG32(mmCP_PERFMON_CNTL));
- dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
- RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS));
- dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
- RREG32(mmVGT_CACHE_INVALIDATION));
- dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
- RREG32(mmVGT_GS_VERTEX_REUSE));
- dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
- RREG32(mmPA_SC_LINE_STIPPLE_STATE));
- dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
- RREG32(mmPA_CL_ENHANCE));
- dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
- RREG32(mmPA_SC_ENHANCE));
-
- dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
- RREG32(mmCP_ME_CNTL));
- dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
- RREG32(mmCP_MAX_CONTEXT));
- dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n",
- RREG32(mmCP_ENDIAN_SWAP));
- dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
- RREG32(mmCP_DEVICE_ID));
-
- dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
- RREG32(mmCP_SEM_WAIT_TIMER));
-
- dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
- RREG32(mmCP_RB_WPTR_DELAY));
- dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
- RREG32(mmCP_RB_VMID));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(mmCP_RB0_CNTL));
- dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
- RREG32(mmCP_RB0_WPTR));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
- RREG32(mmCP_RB0_RPTR_ADDR));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
- RREG32(mmCP_RB0_RPTR_ADDR_HI));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(mmCP_RB0_CNTL));
- dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
- RREG32(mmCP_RB0_BASE));
- dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
- RREG32(mmCP_RB0_BASE_HI));
- dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
- RREG32(mmCP_MEC_CNTL));
- dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n",
- RREG32(mmCP_CPF_DEBUG));
-
- dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
- RREG32(mmSCRATCH_ADDR));
- dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
- RREG32(mmSCRATCH_UMSK));
-
- dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
- RREG32(mmCP_INT_CNTL_RING0));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(mmRLC_LB_CNTL));
- dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
- RREG32(mmRLC_CNTL));
- dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
- RREG32(mmRLC_CGCG_CGLS_CTRL));
- dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
- RREG32(mmRLC_LB_CNTR_INIT));
- dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
- RREG32(mmRLC_LB_CNTR_MAX));
- dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
- RREG32(mmRLC_LB_INIT_CU_MASK));
- dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
- RREG32(mmRLC_LB_PARAMS));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(mmRLC_LB_CNTL));
- dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n",
- RREG32(mmRLC_MC_CNTL));
- dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
- RREG32(mmRLC_UCODE_CNTL));
-
- mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < 16; i++) {
- vi_srbm_select(adev, 0, 0, 0, i);
- dev_info(adev->dev, " VM %d:\n", i);
- dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
- RREG32(mmSH_MEM_CONFIG));
- dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n",
- RREG32(mmSH_MEM_APE1_BASE));
- dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n",
- RREG32(mmSH_MEM_APE1_LIMIT));
- dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
- RREG32(mmSH_MEM_BASES));
- }
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
static int gfx_v8_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -4077,7 +4860,6 @@ static int gfx_v8_0_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
- gfx_v8_0_print_status((void *)adev);
/* stop the rlc */
gfx_v8_0_rlc_stop(adev);
@@ -4137,7 +4919,6 @@ static int gfx_v8_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
- gfx_v8_0_print_status((void *)adev);
}
return 0;
}
@@ -4219,6 +5000,7 @@ static int gfx_v8_0_early_init(void *handle)
gfx_v8_0_set_ring_funcs(adev);
gfx_v8_0_set_irq_funcs(adev);
gfx_v8_0_set_gds_init(adev);
+ gfx_v8_0_set_rlc_funcs(adev);
return 0;
}
@@ -4241,17 +5023,109 @@ static int gfx_v8_0_late_init(void *handle)
if (r)
return r;
+ amdgpu_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE);
+
return 0;
}
+static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, temp;
+
+ /* Send msg to SMU via Powerplay */
+ amdgpu_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+
+ if (enable) {
+ /* Enable static MGPG */
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+ } else {
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+ }
+}
+
+static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, temp;
+
+ if (enable) {
+ /* Enable dynamic MGPG */
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+ } else {
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+ }
+}
+
+static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t data, temp;
+
+ if (enable) {
+ /* Enable quick PG */
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ data |= 0x100000;
+
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+ } else {
+ temp = data = RREG32(mmRLC_PG_CNTL);
+ data &= ~0x100000;
+
+ if (temp != data)
+ WREG32(mmRLC_PG_CNTL, data);
+ }
+}
+
static int gfx_v8_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
+ return 0;
+
+ switch (adev->asic_type) {
+ case CHIP_POLARIS11:
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)
+ polaris11_enable_gfx_static_mg_power_gating(adev,
+ state == AMD_PG_STATE_GATE ? true : false);
+ else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)
+ polaris11_enable_gfx_dynamic_mg_power_gating(adev,
+ state == AMD_PG_STATE_GATE ? true : false);
+ else
+ polaris11_enable_gfx_quick_mg_power_gating(adev,
+ state == AMD_PG_STATE_GATE ? true : false);
+ break;
+ default:
+ break;
+ }
+
return 0;
}
-static void fiji_send_serdes_cmd(struct amdgpu_device *adev,
- uint32_t reg_addr, uint32_t cmd)
+static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
+ uint32_t reg_addr, uint32_t cmd)
{
uint32_t data;
@@ -4261,7 +5135,8 @@ static void fiji_send_serdes_cmd(struct amdgpu_device *adev,
WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = RREG32(mmRLC_SERDES_WR_CTRL);
- data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+ if (adev->asic_type == CHIP_STONEY)
+ data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
@@ -4269,42 +5144,218 @@ static void fiji_send_serdes_cmd(struct amdgpu_device *adev,
RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
RLC_SERDES_WR_CTRL__POWER_UP_MASK |
RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
- RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
- RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
+ else
+ data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
+ RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
+ RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
+ RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
+ RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
+ RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
+ RLC_SERDES_WR_CTRL__POWER_UP_MASK |
+ RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
+ RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
+ RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
+ RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
- (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
- (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
- (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
+ (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
+ (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
+ (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
WREG32(mmRLC_SERDES_WR_CTRL, data);
}
-static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+#define MSG_ENTER_RLC_SAFE_MODE 1
+#define MSG_EXIT_RLC_SAFE_MODE 0
+
+#define RLC_GPR_REG2__REQ_MASK 0x00000001
+#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
+#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
+
+static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev)
+{
+ u32 data = 0;
+ unsigned i;
+
+ data = RREG32(mmRLC_CNTL);
+ if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0)
+ return;
+
+ if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) ||
+ (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG))) {
+ data |= RLC_GPR_REG2__REQ_MASK;
+ data &= ~RLC_GPR_REG2__MESSAGE_MASK;
+ data |= (MSG_ENTER_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT);
+ WREG32(mmRLC_GPR_REG2, data);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPM_STAT) &
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+ break;
+ udelay(1);
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+ break;
+ udelay(1);
+ }
+ adev->gfx.rlc.in_safe_mode = true;
+ }
+}
+
+static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev)
+{
+ u32 data;
+ unsigned i;
+
+ data = RREG32(mmRLC_CNTL);
+ if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0)
+ return;
+
+ if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) ||
+ (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG))) {
+ data |= RLC_GPR_REG2__REQ_MASK;
+ data &= ~RLC_GPR_REG2__MESSAGE_MASK;
+ data |= (MSG_EXIT_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT);
+ WREG32(mmRLC_GPR_REG2, data);
+ adev->gfx.rlc.in_safe_mode = false;
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPR_REG2) & RLC_GPR_REG2__REQ_MASK) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+{
+ u32 data;
+ unsigned i;
+
+ data = RREG32(mmRLC_CNTL);
+ if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
+ return;
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
+ data |= RLC_SAFE_MODE__CMD_MASK;
+ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+ WREG32(mmRLC_SAFE_MODE, data);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPM_STAT) &
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+ break;
+ udelay(1);
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+ break;
+ udelay(1);
+ }
+ adev->gfx.rlc.in_safe_mode = true;
+ }
+}
+
+static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+{
+ u32 data = 0;
+ unsigned i;
+
+ data = RREG32(mmRLC_CNTL);
+ if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
+ return;
+
+ if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
+ if (adev->gfx.rlc.in_safe_mode) {
+ data |= RLC_SAFE_MODE__CMD_MASK;
+ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+ WREG32(mmRLC_SAFE_MODE, data);
+ adev->gfx.rlc.in_safe_mode = false;
+ }
+ }
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_SAFE_MODE) & RLC_SAFE_MODE__CMD_MASK) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void gfx_v8_0_nop_enter_rlc_safe_mode(struct amdgpu_device *adev)
+{
+ adev->gfx.rlc.in_safe_mode = true;
+}
+
+static void gfx_v8_0_nop_exit_rlc_safe_mode(struct amdgpu_device *adev)
+{
+ adev->gfx.rlc.in_safe_mode = false;
+}
+
+static const struct amdgpu_rlc_funcs cz_rlc_funcs = {
+ .enter_safe_mode = cz_enter_rlc_safe_mode,
+ .exit_safe_mode = cz_exit_rlc_safe_mode
+};
+
+static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
+ .enter_safe_mode = iceland_enter_rlc_safe_mode,
+ .exit_safe_mode = iceland_exit_rlc_safe_mode
+};
+
+static const struct amdgpu_rlc_funcs gfx_v8_0_nop_rlc_funcs = {
+ .enter_safe_mode = gfx_v8_0_nop_enter_rlc_safe_mode,
+ .exit_safe_mode = gfx_v8_0_nop_exit_rlc_safe_mode
+};
+
+static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t temp, data;
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
/* It is disabled by HW by default */
- if (enable) {
- /* 1 - RLC memory Light sleep */
- temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
- data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
- if (temp != data)
- WREG32(mmRLC_MEM_SLP_CNTL, data);
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
+ /* 1 - RLC memory Light sleep */
+ temp = data = RREG32(mmRLC_MEM_SLP_CNTL);
+ data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
+ if (temp != data)
+ WREG32(mmRLC_MEM_SLP_CNTL, data);
+ }
- /* 2 - CP memory Light sleep */
- temp = data = RREG32(mmCP_MEM_SLP_CNTL);
- data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- if (temp != data)
- WREG32(mmCP_MEM_SLP_CNTL, data);
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
+ /* 2 - CP memory Light sleep */
+ temp = data = RREG32(mmCP_MEM_SLP_CNTL);
+ data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ if (temp != data)
+ WREG32(mmCP_MEM_SLP_CNTL, data);
+ }
+ }
/* 3 - RLC_CGTT_MGCG_OVERRIDE */
temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
- data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
- RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
- RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
- RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
+ if (adev->flags & AMD_IS_APU)
+ data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
+ else
+ data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
+ RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
if (temp != data)
WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
@@ -4313,19 +5364,23 @@ static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev,
gfx_v8_0_wait_for_rlc_serdes(adev);
/* 5 - clear mgcg override */
- fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
-
- /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
- temp = data = RREG32(mmCGTS_SM_CTRL_REG);
- data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
- data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
- data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
- data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
- data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
- data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
- data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
- if (temp != data)
- WREG32(mmCGTS_SM_CTRL_REG, data);
+ gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
+
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
+ /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
+ temp = data = RREG32(mmCGTS_SM_CTRL_REG);
+ data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
+ data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
+ data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
+ data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
+ if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
+ (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
+ data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
+ data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
+ data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
+ if (temp != data)
+ WREG32(mmCGTS_SM_CTRL_REG, data);
+ }
udelay(50);
/* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
@@ -4365,23 +5420,27 @@ static void fiji_update_medium_grain_clock_gating(struct amdgpu_device *adev,
gfx_v8_0_wait_for_rlc_serdes(adev);
/* 6 - set mgcg override */
- fiji_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
+ gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
udelay(50);
/* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
}
+
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
-static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t temp, temp1, data, data1;
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- if (enable) {
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
/* 1 enable cntx_empty_int_enable/cntx_busy_int_enable/
* Cmp_busy/GFX_Idle interrupts
*/
@@ -4396,25 +5455,29 @@ static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
gfx_v8_0_wait_for_rlc_serdes(adev);
/* 3 - clear cgcg override */
- fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
+ gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
/* 4 - write cmd to set CGLS */
- fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
+ gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
/* 5 - enable cgcg */
data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
- /* enable cgls*/
- data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
+ /* enable cgls*/
+ data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
- temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
- data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
+ temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
+ data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
- if (temp1 != data1)
- WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
+ if (temp1 != data1)
+ WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
+ } else {
+ data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
+ }
if (temp != data)
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
@@ -4439,36 +5502,38 @@ static void fiji_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
gfx_v8_0_wait_for_rlc_serdes(adev);
/* write cmd to Set CGCG Overrride */
- fiji_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
+ gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
/* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
gfx_v8_0_wait_for_rlc_serdes(adev);
/* write cmd to Clear CGLS */
- fiji_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
+ gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
/* disable cgcg, cgls should be disabled too. */
data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
- RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
if (temp != data)
WREG32(mmRLC_CGCG_CGLS_CTRL, data);
}
+
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
}
-static int fiji_update_gfx_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
if (enable) {
/* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
* === MGCG + MGLS + TS(CG/LS) ===
*/
- fiji_update_medium_grain_clock_gating(adev, enable);
- fiji_update_coarse_grain_clock_gating(adev, enable);
+ gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
+ gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
} else {
/* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
* === CGCG + CGLS ===
*/
- fiji_update_coarse_grain_clock_gating(adev, enable);
- fiji_update_medium_grain_clock_gating(adev, enable);
+ gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
+ gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
}
return 0;
}
@@ -4480,8 +5545,10 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_FIJI:
- fiji_update_gfx_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ gfx_v8_0_update_gfx_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
break;
default:
break;
@@ -4571,17 +5638,13 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
}
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
- bool need_ctx_switch = ring->current_ctx != ib->ctx;
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
- /* drop the CE preamble IB for the same context */
- if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
- return;
-
- if (need_ctx_switch)
+ if (ctx_switch)
next_rptr += 2;
next_rptr += 4;
@@ -4592,7 +5655,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
- if (need_ctx_switch) {
+ if (ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
}
@@ -4602,7 +5665,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (ib->vm_id << 24);
+ control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -4615,7 +5678,8 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
u32 header, control = 0;
u32 next_rptr = ring->wptr + 5;
@@ -4631,7 +5695,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (ib->vm_id << 24);
+ control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -4653,6 +5717,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
+ EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
@@ -4991,6 +6056,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
}
const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
+ .name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
.late_init = gfx_v8_0_late_init,
.sw_init = gfx_v8_0_sw_init,
@@ -5002,7 +6068,6 @@ const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.is_idle = gfx_v8_0_is_idle,
.wait_for_idle = gfx_v8_0_wait_for_idle,
.soft_reset = gfx_v8_0_soft_reset,
- .print_status = gfx_v8_0_print_status,
.set_clockgating_state = gfx_v8_0_set_clockgating_state,
.set_powergating_state = gfx_v8_0_set_powergating_state,
};
@@ -5081,6 +6146,22 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
}
+static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ case CHIP_STONEY:
+ adev->gfx.rlc.funcs = &iceland_rlc_funcs;
+ break;
+ case CHIP_CARRIZO:
+ adev->gfx.rlc.funcs = &cz_rlc_funcs;
+ break;
+ default:
+ adev->gfx.rlc.funcs = &gfx_v8_0_nop_rlc_funcs;
+ break;
+ }
+}
+
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
@@ -5124,14 +6205,11 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask;
}
-int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
- struct amdgpu_cu_info *cu_info)
+static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
{
int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
-
- if (!adev || !cu_info)
- return -EINVAL;
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
memset(cu_info, 0, sizeof(*cu_info));
@@ -5162,6 +6240,4 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
-
- return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index 021e05193..16a49f53a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -28,6 +28,5 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 4ebaf9c97..233f38cc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1115,114 +1115,6 @@ static int gmc_v7_0_wait_for_idle(void *handle)
}
-static void gmc_v7_0_print_status(void *handle)
-{
- int i, j;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "GMC 8.x registers\n");
- dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(mmSRBM_STATUS));
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
-
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
- dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
- RREG32(mmMC_VM_MX_L1_TLB_CNTL));
- dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
- RREG32(mmVM_L2_CNTL));
- dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
- RREG32(mmVM_L2_CNTL2));
- dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
- RREG32(mmVM_L2_CNTL3));
- dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
- dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
- dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
- dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
- RREG32(mmVM_CONTEXT0_CNTL2));
- dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
- RREG32(mmVM_CONTEXT0_CNTL));
- dev_info(adev->dev, " 0x15D4=0x%08X\n",
- RREG32(0x575));
- dev_info(adev->dev, " 0x15D8=0x%08X\n",
- RREG32(0x576));
- dev_info(adev->dev, " 0x15DC=0x%08X\n",
- RREG32(0x577));
- dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
- RREG32(mmVM_CONTEXT1_CNTL2));
- dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
- RREG32(mmVM_CONTEXT1_CNTL));
- for (i = 0; i < 16; i++) {
- if (i < 8)
- dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
- i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
- else
- dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
- i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
- }
- dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
- RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
- dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
- RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
- dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
- RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
- dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
- RREG32(mmMC_VM_FB_LOCATION));
- dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
- RREG32(mmMC_VM_AGP_BASE));
- dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
- RREG32(mmMC_VM_AGP_TOP));
- dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
- RREG32(mmMC_VM_AGP_BOT));
-
- if (adev->asic_type == CHIP_KAVERI) {
- dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n",
- RREG32(mmCHUB_CONTROL));
- }
-
- dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
- RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
- dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
- RREG32(mmHDP_NONSURFACE_BASE));
- dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
- RREG32(mmHDP_NONSURFACE_INFO));
- dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
- RREG32(mmHDP_NONSURFACE_SIZE));
- dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
- RREG32(mmHDP_MISC_CNTL));
- dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
- RREG32(mmHDP_HOST_PATH_CNTL));
-
- for (i = 0, j = 0; i < 32; i++, j += 0x6) {
- dev_info(adev->dev, " %d:\n", i);
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb05 + j, RREG32(0xb05 + j));
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb06 + j, RREG32(0xb06 + j));
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb07 + j, RREG32(0xb07 + j));
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb08 + j, RREG32(0xb08 + j));
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb09 + j, RREG32(0xb09 + j));
- }
-
- dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
- RREG32(mmBIF_FB_EN));
-}
-
static int gmc_v7_0_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1242,8 +1134,6 @@ static int gmc_v7_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- gmc_v7_0_print_status((void *)adev);
-
gmc_v7_0_mc_stop(adev, &save);
if (gmc_v7_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
@@ -1267,8 +1157,6 @@ static int gmc_v7_0_soft_reset(void *handle)
gmc_v7_0_mc_resume(adev, &save);
udelay(50);
-
- gmc_v7_0_print_status((void *)adev);
}
return 0;
@@ -1371,6 +1259,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
+ .name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init,
.sw_init = gmc_v7_0_sw_init,
@@ -1382,7 +1271,6 @@ const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.is_idle = gmc_v7_0_is_idle,
.wait_for_idle = gmc_v7_0_wait_for_idle,
.soft_reset = gmc_v7_0_soft_reset,
- .print_status = gmc_v7_0_print_status,
.set_clockgating_state = gmc_v7_0_set_clockgating_state,
.set_powergating_state = gmc_v7_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 20ed14a84..a7b6de8a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -73,6 +73,23 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
+static const u32 golden_settings_polaris11_a11[] =
+{
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 golden_settings_polaris10_a11[] =
+{
+ mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
+ mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+ mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
static const u32 cz_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -103,6 +120,16 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_tonga_a11,
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
+ case CHIP_POLARIS11:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
+ break;
+ case CHIP_POLARIS10:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
+ break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
cz_mgcg_cgcg_init,
@@ -209,6 +236,12 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_TONGA:
chip_name = "tonga";
break;
+ case CHIP_POLARIS11:
+ chip_name = "polaris11";
+ break;
+ case CHIP_POLARIS10:
+ chip_name = "polaris10";
+ break;
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -1085,111 +1118,6 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static void gmc_v8_0_print_status(void *handle)
-{
- int i, j;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "GMC 8.x registers\n");
- dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(mmSRBM_STATUS));
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
-
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
- dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
- RREG32(mmMC_VM_MX_L1_TLB_CNTL));
- dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n",
- RREG32(mmVM_L2_CNTL));
- dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n",
- RREG32(mmVM_L2_CNTL2));
- dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n",
- RREG32(mmVM_L2_CNTL3));
- dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n",
- RREG32(mmVM_L2_CNTL4));
- dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR));
- dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR));
- dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR));
- dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n",
- RREG32(mmVM_CONTEXT0_CNTL2));
- dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n",
- RREG32(mmVM_CONTEXT0_CNTL));
- dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
- RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR));
- dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
- RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR));
- dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
- RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET));
- dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
- RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR));
- dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n",
- RREG32(mmVM_CONTEXT1_CNTL2));
- dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n",
- RREG32(mmVM_CONTEXT1_CNTL));
- for (i = 0; i < 16; i++) {
- if (i < 8)
- dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
- i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i));
- else
- dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
- i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8));
- }
- dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
- RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR));
- dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
- RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR));
- dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
- RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR));
- dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n",
- RREG32(mmMC_VM_FB_LOCATION));
- dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n",
- RREG32(mmMC_VM_AGP_BASE));
- dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n",
- RREG32(mmMC_VM_AGP_TOP));
- dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n",
- RREG32(mmMC_VM_AGP_BOT));
-
- dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
- RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL));
- dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n",
- RREG32(mmHDP_NONSURFACE_BASE));
- dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n",
- RREG32(mmHDP_NONSURFACE_INFO));
- dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n",
- RREG32(mmHDP_NONSURFACE_SIZE));
- dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n",
- RREG32(mmHDP_MISC_CNTL));
- dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n",
- RREG32(mmHDP_HOST_PATH_CNTL));
-
- for (i = 0, j = 0; i < 32; i++, j += 0x6) {
- dev_info(adev->dev, " %d:\n", i);
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb05 + j, RREG32(0xb05 + j));
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb06 + j, RREG32(0xb06 + j));
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb07 + j, RREG32(0xb07 + j));
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb08 + j, RREG32(0xb08 + j));
- dev_info(adev->dev, " 0x%04X=0x%08X\n",
- 0xb09 + j, RREG32(0xb09 + j));
- }
-
- dev_info(adev->dev, " BIF_FB_EN=0x%08X\n",
- RREG32(mmBIF_FB_EN));
-}
-
static int gmc_v8_0_soft_reset(void *handle)
{
struct amdgpu_mode_mc_save save;
@@ -1209,8 +1137,6 @@ static int gmc_v8_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- gmc_v8_0_print_status((void *)adev);
-
gmc_v8_0_mc_stop(adev, &save);
if (gmc_v8_0_wait_for_idle(adev)) {
dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
@@ -1234,8 +1160,6 @@ static int gmc_v8_0_soft_reset(void *handle)
gmc_v8_0_mc_resume(adev, &save);
udelay(50);
-
- gmc_v8_0_print_status((void *)adev);
}
return 0;
@@ -1313,11 +1237,11 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
}
static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
uint32_t data;
- if (enable) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
@@ -1393,11 +1317,11 @@ static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
}
static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
uint32_t data;
- if (enable) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
@@ -1497,6 +1421,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
+ .name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init,
.sw_init = gmc_v8_0_sw_init,
@@ -1508,7 +1433,6 @@ const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.is_idle = gmc_v8_0_is_idle,
.wait_for_idle = gmc_v8_0_wait_for_idle,
.soft_reset = gmc_v8_0_soft_reset,
- .print_status = gmc_v8_0_print_status,
.set_clockgating_state = gmc_v8_0_set_clockgating_state,
.set_powergating_state = gmc_v8_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 5731b3648..571e37566 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle)
static int iceland_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
@@ -157,6 +162,7 @@ static int iceland_dpm_set_powergating_state(void *handle,
}
const struct amd_ip_funcs iceland_dpm_ip_funcs = {
+ .name = "iceland_dpm",
.early_init = iceland_dpm_early_init,
.late_init = NULL,
.sw_init = iceland_dpm_sw_init,
@@ -168,7 +174,6 @@ const struct amd_ip_funcs iceland_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
- .print_status = NULL,
.set_clockgating_state = iceland_dpm_set_clockgating_state,
.set_powergating_state = iceland_dpm_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 679e7394a..3b8906ce3 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -103,7 +103,6 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int iceland_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -157,7 +156,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
iceland_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
@@ -351,35 +350,6 @@ static int iceland_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void iceland_ih_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "ICELAND IH registers\n");
- dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(mmSRBM_STATUS));
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
- RREG32(mmINTERRUPT_CNTL));
- dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
- RREG32(mmINTERRUPT_CNTL2));
- dev_info(adev->dev, " IH_CNTL=0x%08X\n",
- RREG32(mmIH_CNTL));
- dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
- RREG32(mmIH_RB_CNTL));
- dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
- RREG32(mmIH_RB_BASE));
- dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
- RREG32(mmIH_RB_WPTR_ADDR_LO));
- dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
- RREG32(mmIH_RB_WPTR_ADDR_HI));
- dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
- RREG32(mmIH_RB_RPTR));
- dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
- RREG32(mmIH_RB_WPTR));
-}
-
static int iceland_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@@ -391,8 +361,6 @@ static int iceland_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
- iceland_ih_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -407,8 +375,6 @@ static int iceland_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- iceland_ih_print_status((void *)adev);
}
return 0;
@@ -427,6 +393,7 @@ static int iceland_ih_set_powergating_state(void *handle,
}
const struct amd_ip_funcs iceland_ih_ip_funcs = {
+ .name = "iceland_ih",
.early_init = iceland_ih_early_init,
.late_init = NULL,
.sw_init = iceland_ih_sw_init,
@@ -438,7 +405,6 @@ const struct amd_ip_funcs iceland_ih_ip_funcs = {
.is_idle = iceland_ih_is_idle,
.wait_for_idle = iceland_ih_wait_for_idle,
.soft_reset = iceland_ih_soft_reset,
- .print_status = iceland_ih_print_status,
.set_clockgating_state = iceland_ih_set_clockgating_state,
.set_powergating_state = iceland_ih_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 654d76723..a789a863d 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -135,11 +135,6 @@ static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
#endif
}
-static u32 sumo_get_sleep_divider_from_id(u32 id)
-{
- return 1 << id;
-}
-
static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
ATOM_AVAILABLE_SCLK_LIST *table)
@@ -2176,8 +2171,7 @@ static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
struct kv_power_info *pi = kv_get_pi(adev);
u32 i;
u32 temp;
- u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
- min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
+ u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
if (sclk < min)
return 0;
@@ -2186,7 +2180,7 @@ static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
return 0;
for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
- temp = sclk / sumo_get_sleep_divider_from_id(i);
+ temp = sclk >> i;
if (temp >= min)
break;
}
@@ -2258,7 +2252,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
@@ -3147,62 +3141,6 @@ static int kv_dpm_wait_for_idle(void *handle)
return 0;
}
-static void kv_dpm_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "KV/KB DPM registers\n");
- dev_info(adev->dev, " DIDT_SQ_CTRL0=0x%08X\n",
- RREG32_DIDT(ixDIDT_SQ_CTRL0));
- dev_info(adev->dev, " DIDT_DB_CTRL0=0x%08X\n",
- RREG32_DIDT(ixDIDT_DB_CTRL0));
- dev_info(adev->dev, " DIDT_TD_CTRL0=0x%08X\n",
- RREG32_DIDT(ixDIDT_TD_CTRL0));
- dev_info(adev->dev, " DIDT_TCP_CTRL0=0x%08X\n",
- RREG32_DIDT(ixDIDT_TCP_CTRL0));
- dev_info(adev->dev, " LCAC_SX0_OVR_SEL=0x%08X\n",
- RREG32_SMC(ixLCAC_SX0_OVR_SEL));
- dev_info(adev->dev, " LCAC_SX0_OVR_VAL=0x%08X\n",
- RREG32_SMC(ixLCAC_SX0_OVR_VAL));
- dev_info(adev->dev, " LCAC_MC0_OVR_SEL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC0_OVR_SEL));
- dev_info(adev->dev, " LCAC_MC0_OVR_VAL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC0_OVR_VAL));
- dev_info(adev->dev, " LCAC_MC1_OVR_SEL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC1_OVR_SEL));
- dev_info(adev->dev, " LCAC_MC1_OVR_VAL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC1_OVR_VAL));
- dev_info(adev->dev, " LCAC_MC2_OVR_SEL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC2_OVR_SEL));
- dev_info(adev->dev, " LCAC_MC2_OVR_VAL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC2_OVR_VAL));
- dev_info(adev->dev, " LCAC_MC3_OVR_SEL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC3_OVR_SEL));
- dev_info(adev->dev, " LCAC_MC3_OVR_VAL=0x%08X\n",
- RREG32_SMC(ixLCAC_MC3_OVR_VAL));
- dev_info(adev->dev, " LCAC_CPL_OVR_SEL=0x%08X\n",
- RREG32_SMC(ixLCAC_CPL_OVR_SEL));
- dev_info(adev->dev, " LCAC_CPL_OVR_VAL=0x%08X\n",
- RREG32_SMC(ixLCAC_CPL_OVR_VAL));
- dev_info(adev->dev, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
- RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0));
- dev_info(adev->dev, " GENERAL_PWRMGT=0x%08X\n",
- RREG32_SMC(ixGENERAL_PWRMGT));
- dev_info(adev->dev, " SCLK_PWRMGT_CNTL=0x%08X\n",
- RREG32_SMC(ixSCLK_PWRMGT_CNTL));
- dev_info(adev->dev, " SMC_MESSAGE_0=0x%08X\n",
- RREG32(mmSMC_MESSAGE_0));
- dev_info(adev->dev, " SMC_RESP_0=0x%08X\n",
- RREG32(mmSMC_RESP_0));
- dev_info(adev->dev, " SMC_MSG_ARG_0=0x%08X\n",
- RREG32(mmSMC_MSG_ARG_0));
- dev_info(adev->dev, " SMC_IND_INDEX_0=0x%08X\n",
- RREG32(mmSMC_IND_INDEX_0));
- dev_info(adev->dev, " SMC_IND_DATA_0=0x%08X\n",
- RREG32(mmSMC_IND_DATA_0));
- dev_info(adev->dev, " SMC_IND_ACCESS_CNTL=0x%08X\n",
- RREG32(mmSMC_IND_ACCESS_CNTL));
-}
static int kv_dpm_soft_reset(void *handle)
{
@@ -3300,6 +3238,7 @@ static int kv_dpm_set_powergating_state(void *handle,
}
const struct amd_ip_funcs kv_dpm_ip_funcs = {
+ .name = "kv_dpm",
.early_init = kv_dpm_early_init,
.late_init = kv_dpm_late_init,
.sw_init = kv_dpm_sw_init,
@@ -3311,7 +3250,6 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.is_idle = kv_dpm_is_idle,
.wait_for_idle = kv_dpm_wait_for_idle,
.soft_reset = kv_dpm_soft_reset,
- .print_status = kv_dpm_print_status,
.set_clockgating_state = kv_dpm_set_clockgating_state,
.set_powergating_state = kv_dpm_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 47d143269..36d97195f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -104,6 +104,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
}
}
+static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v2_4_init_microcode - load ucode images from disk
*
@@ -241,9 +250,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
- u32 vmid = ib->vm_id & 0xf;
+ u32 vmid = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 2)
@@ -459,6 +469,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -487,7 +499,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+ sdma_v2_4_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -578,8 +594,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
return -EINVAL;
}
- /* unhalt the MEs */
- sdma_v2_4_enable(adev, true);
+ /* halt the engine before programing */
+ sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev);
@@ -700,7 +716,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err1;
@@ -989,7 +1005,7 @@ static int sdma_v2_4_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 256 * 1024,
+ r = amdgpu_ring_init(adev, ring, 1024,
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
@@ -1010,6 +1026,7 @@ static int sdma_v2_4_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v2_4_free_microcode(adev);
return 0;
}
@@ -1079,55 +1096,6 @@ static int sdma_v2_4_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void sdma_v2_4_print_status(void *handle)
-{
- int i, j;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "VI SDMA registers\n");
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- for (i = 0; i < adev->sdma.num_instances; i++) {
- dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
- i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
- i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
- mutex_lock(&adev->srbm_mutex);
- for (j = 0; j < 16; j++) {
- vi_srbm_select(adev, 0, 0, 0, j);
- dev_info(adev->dev, " VM %d:\n", j);
- dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
- }
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-}
-
static int sdma_v2_4_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@@ -1150,8 +1118,6 @@ static int sdma_v2_4_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- sdma_v2_4_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1166,8 +1132,6 @@ static int sdma_v2_4_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- sdma_v2_4_print_status((void *)adev);
}
return 0;
@@ -1282,6 +1246,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
}
const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
+ .name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init,
.late_init = NULL,
.sw_init = sdma_v2_4_sw_init,
@@ -1293,7 +1258,6 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.is_idle = sdma_v2_4_is_idle,
.wait_for_idle = sdma_v2_4_wait_for_idle,
.soft_reset = sdma_v2_4_soft_reset,
- .print_status = sdma_v2_4_print_status,
.set_clockgating_state = sdma_v2_4_set_clockgating_state,
.set_powergating_state = sdma_v2_4_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 44f059dbc..95c44942e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -51,6 +51,7 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev);
/*(DEBLOBBED)*/
+
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{
SDMA0_REGISTER_OFFSET,
@@ -95,6 +96,34 @@ static const u32 fiji_mgcg_cgcg_init[] =
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
};
+static const u32 golden_settings_polaris11_a11[] =
+{
+ mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
+ mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
+ mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
+static const u32 golden_settings_polaris10_a11[] =
+{
+ mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
+ mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
+ mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
+ mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
+};
+
static const u32 cz_golden_settings_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
@@ -166,6 +195,16 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_tonga_a11,
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
+ case CHIP_POLARIS11:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
+ break;
+ case CHIP_POLARIS10:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ (const u32)ARRAY_SIZE(golden_settings_polaris10_a11));
+ break;
case CHIP_CARRIZO:
amdgpu_program_register_sequence(adev,
cz_mgcg_cgcg_init,
@@ -187,6 +226,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
}
}
+static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
+{
+ int i;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+}
+
/**
* sdma_v3_0_init_microcode - load ucode images from disk
*
@@ -214,6 +262,12 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
case CHIP_FIJI:
chip_name = "fiji";
break;
+ case CHIP_POLARIS11:
+ chip_name = "polaris11";
+ break;
+ case CHIP_POLARIS10:
+ chip_name = "polaris10";
+ break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
@@ -347,9 +401,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
- u32 vmid = ib->vm_id & 0xf;
+ u32 vmid = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 2)
@@ -446,6 +501,31 @@ static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
}
+unsigned init_cond_exec(struct amdgpu_ring *ring)
+{
+ unsigned ret;
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
+ amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
+ amdgpu_ring_write(ring, 1);
+ ret = ring->wptr;/* this is the offset we need patch later */
+ amdgpu_ring_write(ring, 0x55aa55aa);/* insert dummy here and patch it later */
+ return ret;
+}
+
+void patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
+{
+ unsigned cur;
+ BUG_ON(ring->ring[offset] != 0x55aa55aa);
+
+ cur = ring->wptr - 1;
+ if (likely(cur > offset))
+ ring->ring[offset] = cur - offset;
+ else
+ ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
+}
+
+
/**
* sdma_v3_0_gfx_stop - stop the gfx async dma engines
*
@@ -591,6 +671,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0);
WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0);
+ WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0);
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
@@ -630,7 +712,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true;
+ }
+ /* unhalt the MEs */
+ sdma_v3_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v3_0_ctx_switch_enable(adev, true);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring);
if (r) {
ring->ready = false;
@@ -723,10 +813,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
}
}
- /* unhalt the MEs */
- sdma_v3_0_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v3_0_ctx_switch_enable(adev, true);
+ /* disble sdma engine before programing it */
+ sdma_v3_0_ctx_switch_enable(adev, false);
+ sdma_v3_0_enable(adev, false);
/* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev);
@@ -847,7 +936,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
- r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err1;
@@ -1145,7 +1234,7 @@ static int sdma_v3_0_sw_init(void *handle)
AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 256 * 1024,
+ r = amdgpu_ring_init(adev, ring, 1024,
SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf,
&adev->sdma.trap_irq,
(i == 0) ?
@@ -1166,6 +1255,7 @@ static int sdma_v3_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ sdma_v3_0_free_microcode(adev);
return 0;
}
@@ -1236,57 +1326,6 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void sdma_v3_0_print_status(void *handle)
-{
- int i, j;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "VI SDMA registers\n");
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- for (i = 0; i < adev->sdma.num_instances; i++) {
- dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n",
- i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_TILING_CONFIG=0x%08X\n",
- i, RREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i]));
- mutex_lock(&adev->srbm_mutex);
- for (j = 0; j < 16; j++) {
- vi_srbm_select(adev, 0, 0, 0, j);
- dev_info(adev->dev, " VM %d:\n", j);
- dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i]));
- dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n",
- i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i]));
- }
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
- }
-}
-
static int sdma_v3_0_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@@ -1309,8 +1348,6 @@ static int sdma_v3_0_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- sdma_v3_0_print_status((void *)adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -1325,8 +1362,6 @@ static int sdma_v3_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- sdma_v3_0_print_status((void *)adev);
}
return 0;
@@ -1427,40 +1462,31 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
-static void fiji_update_sdma_medium_grain_clock_gating(
+static void sdma_v3_0_update_sdma_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
+ int i;
- if (enable) {
- temp = data = RREG32(mmSDMA0_CLK_CTRL);
- data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
- SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
- if (data != temp)
- WREG32(mmSDMA0_CLK_CTRL, data);
-
- temp = data = RREG32(mmSDMA1_CLK_CTRL);
- data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
-
- if (data != temp)
- WREG32(mmSDMA1_CLK_CTRL, data);
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
+ data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
+ SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
+ if (data != temp)
+ WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
+ }
} else {
- temp = data = RREG32(mmSDMA0_CLK_CTRL);
- data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]);
+ data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
@@ -1469,54 +1495,35 @@ static void fiji_update_sdma_medium_grain_clock_gating(
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
- if (data != temp)
- WREG32(mmSDMA0_CLK_CTRL, data);
-
- temp = data = RREG32(mmSDMA1_CLK_CTRL);
- data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
- SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK;
-
- if (data != temp)
- WREG32(mmSDMA1_CLK_CTRL, data);
+ if (data != temp)
+ WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data);
+ }
}
}
-static void fiji_update_sdma_medium_grain_light_sleep(
+static void sdma_v3_0_update_sdma_medium_grain_light_sleep(
struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
+ int i;
- if (enable) {
- temp = data = RREG32(mmSDMA0_POWER_CNTL);
- data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
-
- if (temp != data)
- WREG32(mmSDMA0_POWER_CNTL, data);
-
- temp = data = RREG32(mmSDMA1_POWER_CNTL);
- data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
+ data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
- if (temp != data)
- WREG32(mmSDMA1_POWER_CNTL, data);
+ if (temp != data)
+ WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
+ }
} else {
- temp = data = RREG32(mmSDMA0_POWER_CNTL);
- data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
-
- if (temp != data)
- WREG32(mmSDMA0_POWER_CNTL, data);
-
- temp = data = RREG32(mmSDMA1_POWER_CNTL);
- data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]);
+ data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
- if (temp != data)
- WREG32(mmSDMA1_POWER_CNTL, data);
+ if (temp != data)
+ WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data);
+ }
}
}
@@ -1527,9 +1534,11 @@ static int sdma_v3_0_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_FIJI:
- fiji_update_sdma_medium_grain_clock_gating(adev,
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ sdma_v3_0_update_sdma_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- fiji_update_sdma_medium_grain_light_sleep(adev,
+ sdma_v3_0_update_sdma_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
default:
@@ -1545,6 +1554,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
+ .name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init,
.late_init = NULL,
.sw_init = sdma_v3_0_sw_init,
@@ -1556,7 +1566,6 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.is_idle = sdma_v3_0_is_idle,
.wait_for_idle = sdma_v3_0_wait_for_idle,
.soft_reset = sdma_v3_0_soft_reset,
- .print_status = sdma_v3_0_print_status,
.set_clockgating_state = sdma_v3_0_set_clockgating_state,
.set_powergating_state = sdma_v3_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
index c24a81eeb..880152c0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h
@@ -44,6 +44,7 @@
#define UCODE_ID_IH_REG_RESTORE 11
#define UCODE_ID_VBIOS 12
#define UCODE_ID_MISC_METADATA 13
+#define UCODE_ID_SMU_SK 14
#define UCODE_ID_RLC_SCRATCH 32
#define UCODE_ID_RLC_SRM_ARAM 33
#define UCODE_ID_RLC_SRM_DRAM 34
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
index 4dc71926d..fc5c33a6e 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
@@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle)
static int tonga_dpm_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+
return 0;
}
@@ -143,6 +148,7 @@ static int tonga_dpm_set_powergating_state(void *handle,
}
const struct amd_ip_funcs tonga_dpm_ip_funcs = {
+ .name = "tonga_dpm",
.early_init = tonga_dpm_early_init,
.late_init = NULL,
.sw_init = tonga_dpm_sw_init,
@@ -154,7 +160,6 @@ const struct amd_ip_funcs tonga_dpm_ip_funcs = {
.is_idle = NULL,
.wait_for_idle = NULL,
.soft_reset = NULL,
- .print_status = NULL,
.set_clockgating_state = tonga_dpm_set_clockgating_state,
.set_powergating_state = tonga_dpm_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 0f14199cf..c92055805 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -99,7 +99,6 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int tonga_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
u64 wptr_off;
@@ -165,7 +164,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
tonga_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
@@ -374,35 +373,6 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void tonga_ih_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "TONGA IH registers\n");
- dev_info(adev->dev, " SRBM_STATUS=0x%08X\n",
- RREG32(mmSRBM_STATUS));
- dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n",
- RREG32(mmSRBM_STATUS2));
- dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n",
- RREG32(mmINTERRUPT_CNTL));
- dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n",
- RREG32(mmINTERRUPT_CNTL2));
- dev_info(adev->dev, " IH_CNTL=0x%08X\n",
- RREG32(mmIH_CNTL));
- dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n",
- RREG32(mmIH_RB_CNTL));
- dev_info(adev->dev, " IH_RB_BASE=0x%08X\n",
- RREG32(mmIH_RB_BASE));
- dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n",
- RREG32(mmIH_RB_WPTR_ADDR_LO));
- dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n",
- RREG32(mmIH_RB_WPTR_ADDR_HI));
- dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n",
- RREG32(mmIH_RB_RPTR));
- dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n",
- RREG32(mmIH_RB_WPTR));
-}
-
static int tonga_ih_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
@@ -414,8 +384,6 @@ static int tonga_ih_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
- tonga_ih_print_status(adev);
-
tmp = RREG32(mmSRBM_SOFT_RESET);
tmp |= srbm_soft_reset;
dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
@@ -430,8 +398,6 @@ static int tonga_ih_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
-
- tonga_ih_print_status(adev);
}
return 0;
@@ -450,6 +416,7 @@ static int tonga_ih_set_powergating_state(void *handle,
}
const struct amd_ip_funcs tonga_ih_ip_funcs = {
+ .name = "tonga_ih",
.early_init = tonga_ih_early_init,
.late_init = NULL,
.sw_init = tonga_ih_sw_init,
@@ -461,7 +428,6 @@ const struct amd_ip_funcs tonga_ih_ip_funcs = {
.is_idle = tonga_ih_is_idle,
.wait_for_idle = tonga_ih_wait_for_idle,
.soft_reset = tonga_ih_soft_reset,
- .print_status = tonga_ih_print_status,
.set_clockgating_state = tonga_ih_set_clockgating_state,
.set_powergating_state = tonga_ih_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index cb4637531..f07551476 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -114,7 +114,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -489,7 +489,8 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr);
@@ -559,12 +560,13 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
addr += size;
- size = AMDGPU_UVD_STACK_SIZE >> 3;
+ size = AMDGPU_UVD_HEAP_SIZE >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
addr += size;
- size = AMDGPU_UVD_HEAP_SIZE >> 3;
+ size = (AMDGPU_UVD_STACK_SIZE +
+ (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
@@ -679,117 +681,6 @@ static int uvd_v4_2_soft_reset(void *handle)
return uvd_v4_2_start(adev);
}
-static void uvd_v4_2_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- dev_info(adev->dev, "UVD 4.2 registers\n");
- dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n",
- RREG32(mmUVD_SEMA_ADDR_LOW));
- dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
- RREG32(mmUVD_SEMA_ADDR_HIGH));
- dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n",
- RREG32(mmUVD_SEMA_CMD));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_CMD));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_DATA0));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_DATA1));
- dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n",
- RREG32(mmUVD_ENGINE_CNTL));
- dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_CNTL));
- dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n",
- RREG32(mmUVD_LMI_EXT40_ADDR));
- dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n",
- RREG32(mmUVD_CTX_INDEX));
- dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n",
- RREG32(mmUVD_CTX_DATA));
- dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n",
- RREG32(mmUVD_CGC_GATE));
- dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n",
- RREG32(mmUVD_CGC_CTRL));
- dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n",
- RREG32(mmUVD_LMI_CTRL2));
- dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n",
- RREG32(mmUVD_MASTINT_EN));
- dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n",
- RREG32(mmUVD_LMI_ADDR_EXT));
- dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n",
- RREG32(mmUVD_LMI_CTRL));
- dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n",
- RREG32(mmUVD_LMI_SWAP_CNTL));
- dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n",
- RREG32(mmUVD_MP_SWAP_CNTL));
- dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXA0));
- dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXA1));
- dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXB0));
- dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXB1));
- dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUX));
- dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n",
- RREG32(mmUVD_MPC_SET_ALU));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET0));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE0));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET1));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE1));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET2));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE2));
- dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n",
- RREG32(mmUVD_VCPU_CNTL));
- dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n",
- RREG32(mmUVD_SOFT_RESET));
- dev_info(adev->dev, " UVD_RBC_IB_BASE=0x%08X\n",
- RREG32(mmUVD_RBC_IB_BASE));
- dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n",
- RREG32(mmUVD_RBC_IB_SIZE));
- dev_info(adev->dev, " UVD_RBC_RB_BASE=0x%08X\n",
- RREG32(mmUVD_RBC_RB_BASE));
- dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n",
- RREG32(mmUVD_RBC_RB_RPTR));
- dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n",
- RREG32(mmUVD_RBC_RB_WPTR));
- dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
- RREG32(mmUVD_RBC_RB_WPTR_CNTL));
- dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n",
- RREG32(mmUVD_RBC_RB_CNTL));
- dev_info(adev->dev, " UVD_STATUS=0x%08X\n",
- RREG32(mmUVD_STATUS));
- dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
- RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
- dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
- RREG32(mmUVD_CONTEXT_ID));
- dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-
-}
-
static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -849,6 +740,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
}
const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
+ .name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init,
.late_init = NULL,
.sw_init = uvd_v4_2_sw_init,
@@ -860,7 +752,6 @@ const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.is_idle = uvd_v4_2_is_idle,
.wait_for_idle = uvd_v4_2_wait_for_idle,
.soft_reset = uvd_v4_2_soft_reset,
- .print_status = uvd_v4_2_print_status,
.set_clockgating_state = uvd_v4_2_set_clockgating_state,
.set_powergating_state = uvd_v4_2_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 16476d80f..e0a76a883 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -31,6 +31,7 @@
#include "uvd/uvd_5_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "vi.h"
static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -110,7 +111,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -271,12 +272,13 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
offset += size;
- size = AMDGPU_UVD_STACK_SIZE;
+ size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
offset += size;
- size = AMDGPU_UVD_HEAP_SIZE;
+ size = AMDGPU_UVD_STACK_SIZE +
+ (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
@@ -537,7 +539,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -622,120 +625,6 @@ static int uvd_v5_0_soft_reset(void *handle)
return uvd_v5_0_start(adev);
}
-static void uvd_v5_0_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- dev_info(adev->dev, "UVD 5.0 registers\n");
- dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n",
- RREG32(mmUVD_SEMA_ADDR_LOW));
- dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
- RREG32(mmUVD_SEMA_ADDR_HIGH));
- dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n",
- RREG32(mmUVD_SEMA_CMD));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_CMD));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_DATA0));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_DATA1));
- dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n",
- RREG32(mmUVD_ENGINE_CNTL));
- dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_CNTL));
- dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n",
- RREG32(mmUVD_LMI_EXT40_ADDR));
- dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n",
- RREG32(mmUVD_CTX_INDEX));
- dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n",
- RREG32(mmUVD_CTX_DATA));
- dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n",
- RREG32(mmUVD_CGC_GATE));
- dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n",
- RREG32(mmUVD_CGC_CTRL));
- dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n",
- RREG32(mmUVD_LMI_CTRL2));
- dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n",
- RREG32(mmUVD_MASTINT_EN));
- dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n",
- RREG32(mmUVD_LMI_ADDR_EXT));
- dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n",
- RREG32(mmUVD_LMI_CTRL));
- dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n",
- RREG32(mmUVD_LMI_SWAP_CNTL));
- dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n",
- RREG32(mmUVD_MP_SWAP_CNTL));
- dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXA0));
- dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXA1));
- dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXB0));
- dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXB1));
- dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUX));
- dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n",
- RREG32(mmUVD_MPC_SET_ALU));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET0));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE0));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET1));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE1));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET2));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE2));
- dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n",
- RREG32(mmUVD_VCPU_CNTL));
- dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n",
- RREG32(mmUVD_SOFT_RESET));
- dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n",
- RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW));
- dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n",
- RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH));
- dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n",
- RREG32(mmUVD_RBC_IB_SIZE));
- dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n",
- RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW));
- dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n",
- RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH));
- dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n",
- RREG32(mmUVD_RBC_RB_RPTR));
- dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n",
- RREG32(mmUVD_RBC_RB_WPTR));
- dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
- RREG32(mmUVD_RBC_RB_WPTR_CNTL));
- dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n",
- RREG32(mmUVD_RBC_RB_CNTL));
- dev_info(adev->dev, " UVD_STATUS=0x%08X\n",
- RREG32(mmUVD_STATUS));
- dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
- RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
- dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
- RREG32(mmUVD_CONTEXT_ID));
- dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-}
-
static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -754,14 +643,128 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data, data1, data2, suvd_flags;
+
+ data = RREG32(mmUVD_CGC_CTRL);
+ data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+ data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
+
+ data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
+ UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+
+ suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
+ UVD_SUVD_CGC_GATE__SIT_MASK |
+ UVD_SUVD_CGC_GATE__SMP_MASK |
+ UVD_SUVD_CGC_GATE__SCM_MASK |
+ UVD_SUVD_CGC_GATE__SDB_MASK;
+
+ data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
+ (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
+ (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
+
+ data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+ UVD_CGC_CTRL__SYS_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MODE_MASK |
+ UVD_CGC_CTRL__MPEG2_MODE_MASK |
+ UVD_CGC_CTRL__REGS_MODE_MASK |
+ UVD_CGC_CTRL__RBC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+ UVD_CGC_CTRL__IDCT_MODE_MASK |
+ UVD_CGC_CTRL__MPRD_MODE_MASK |
+ UVD_CGC_CTRL__MPC_MODE_MASK |
+ UVD_CGC_CTRL__LBSI_MODE_MASK |
+ UVD_CGC_CTRL__LRBBM_MODE_MASK |
+ UVD_CGC_CTRL__WCB_MODE_MASK |
+ UVD_CGC_CTRL__VCPU_MODE_MASK |
+ UVD_CGC_CTRL__JPEG_MODE_MASK |
+ UVD_CGC_CTRL__SCPU_MODE_MASK);
+ data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
+ UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
+ UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
+ UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
+ UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
+ data1 |= suvd_flags;
+
+ WREG32(mmUVD_CGC_CTRL, data);
+ WREG32(mmUVD_CGC_GATE, 0);
+ WREG32(mmUVD_SUVD_CGC_GATE, data1);
+ WREG32(mmUVD_SUVD_CGC_CTRL, data2);
+}
+
+#if 0
+static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data, data1, cgc_flags, suvd_flags;
+
+ data = RREG32(mmUVD_CGC_GATE);
+ data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+
+ cgc_flags = UVD_CGC_GATE__SYS_MASK |
+ UVD_CGC_GATE__UDEC_MASK |
+ UVD_CGC_GATE__MPEG2_MASK |
+ UVD_CGC_GATE__RBC_MASK |
+ UVD_CGC_GATE__LMI_MC_MASK |
+ UVD_CGC_GATE__IDCT_MASK |
+ UVD_CGC_GATE__MPRD_MASK |
+ UVD_CGC_GATE__MPC_MASK |
+ UVD_CGC_GATE__LBSI_MASK |
+ UVD_CGC_GATE__LRBBM_MASK |
+ UVD_CGC_GATE__UDEC_RE_MASK |
+ UVD_CGC_GATE__UDEC_CM_MASK |
+ UVD_CGC_GATE__UDEC_IT_MASK |
+ UVD_CGC_GATE__UDEC_DB_MASK |
+ UVD_CGC_GATE__UDEC_MP_MASK |
+ UVD_CGC_GATE__WCB_MASK |
+ UVD_CGC_GATE__VCPU_MASK |
+ UVD_CGC_GATE__SCPU_MASK;
+
+ suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
+ UVD_SUVD_CGC_GATE__SIT_MASK |
+ UVD_SUVD_CGC_GATE__SMP_MASK |
+ UVD_SUVD_CGC_GATE__SCM_MASK |
+ UVD_SUVD_CGC_GATE__SDB_MASK;
+
+ data |= cgc_flags;
+ data1 |= suvd_flags;
+
+ WREG32(mmUVD_CGC_GATE, data);
+ WREG32(mmUVD_SUVD_CGC_GATE, data1);
+}
+#endif
+
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ static int curstate = -1;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
+ if (curstate == state)
+ return 0;
+
+ curstate = state;
+ if (enable) {
+ /* disable HW gating and enable Sw gating */
+ uvd_v5_0_set_sw_clock_gating(adev);
+ } else {
+ /* wait for STATUS to clear */
+ if (uvd_v5_0_wait_for_idle(handle))
+ return -EBUSY;
+
+ /* enable HW gates because UVD is idle */
+/* uvd_v5_0_set_hw_clock_gating(adev); */
+ }
+
return 0;
}
@@ -789,6 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
+ .name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init,
.late_init = NULL,
.sw_init = uvd_v5_0_sw_init,
@@ -800,7 +804,6 @@ const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.is_idle = uvd_v5_0_is_idle,
.wait_for_idle = uvd_v5_0_wait_for_idle,
.soft_reset = uvd_v5_0_soft_reset,
- .print_status = uvd_v5_0_print_status,
.set_clockgating_state = uvd_v5_0_set_clockgating_state,
.set_powergating_state = uvd_v5_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d49379145..c9929d665 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -31,11 +31,15 @@
#include "uvd/uvd_6_0_sh_mask.h"
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "vi.h"
static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v6_0_start(struct amdgpu_device *adev);
static void uvd_v6_0_stop(struct amdgpu_device *adev);
+static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
/**
* uvd_v6_0_ring_get_rptr - get read pointer
@@ -110,7 +114,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, CP_PACKET2, 0xf,
&adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD);
return r;
@@ -270,20 +274,24 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
offset += size;
- size = AMDGPU_UVD_STACK_SIZE;
+ size = AMDGPU_UVD_HEAP_SIZE;
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
offset += size;
- size = AMDGPU_UVD_HEAP_SIZE;
+ size = AMDGPU_UVD_STACK_SIZE +
+ (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
+
+ WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
}
+#if 0
static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
bool enable)
{
@@ -360,157 +368,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
-
-static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
- bool enable)
-{
- u32 data, data1;
-
- data = RREG32(mmUVD_CGC_GATE);
- data1 = RREG32(mmUVD_SUVD_CGC_GATE);
- if (enable) {
- data |= UVD_CGC_GATE__SYS_MASK |
- UVD_CGC_GATE__UDEC_MASK |
- UVD_CGC_GATE__MPEG2_MASK |
- UVD_CGC_GATE__RBC_MASK |
- UVD_CGC_GATE__LMI_MC_MASK |
- UVD_CGC_GATE__IDCT_MASK |
- UVD_CGC_GATE__MPRD_MASK |
- UVD_CGC_GATE__MPC_MASK |
- UVD_CGC_GATE__LBSI_MASK |
- UVD_CGC_GATE__LRBBM_MASK |
- UVD_CGC_GATE__UDEC_RE_MASK |
- UVD_CGC_GATE__UDEC_CM_MASK |
- UVD_CGC_GATE__UDEC_IT_MASK |
- UVD_CGC_GATE__UDEC_DB_MASK |
- UVD_CGC_GATE__UDEC_MP_MASK |
- UVD_CGC_GATE__WCB_MASK |
- UVD_CGC_GATE__VCPU_MASK |
- UVD_CGC_GATE__SCPU_MASK;
- data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
- UVD_SUVD_CGC_GATE__SIT_MASK |
- UVD_SUVD_CGC_GATE__SMP_MASK |
- UVD_SUVD_CGC_GATE__SCM_MASK |
- UVD_SUVD_CGC_GATE__SDB_MASK;
- } else {
- data &= ~(UVD_CGC_GATE__SYS_MASK |
- UVD_CGC_GATE__UDEC_MASK |
- UVD_CGC_GATE__MPEG2_MASK |
- UVD_CGC_GATE__RBC_MASK |
- UVD_CGC_GATE__LMI_MC_MASK |
- UVD_CGC_GATE__LMI_UMC_MASK |
- UVD_CGC_GATE__IDCT_MASK |
- UVD_CGC_GATE__MPRD_MASK |
- UVD_CGC_GATE__MPC_MASK |
- UVD_CGC_GATE__LBSI_MASK |
- UVD_CGC_GATE__LRBBM_MASK |
- UVD_CGC_GATE__UDEC_RE_MASK |
- UVD_CGC_GATE__UDEC_CM_MASK |
- UVD_CGC_GATE__UDEC_IT_MASK |
- UVD_CGC_GATE__UDEC_DB_MASK |
- UVD_CGC_GATE__UDEC_MP_MASK |
- UVD_CGC_GATE__WCB_MASK |
- UVD_CGC_GATE__VCPU_MASK |
- UVD_CGC_GATE__SCPU_MASK);
- data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
- UVD_SUVD_CGC_GATE__SIT_MASK |
- UVD_SUVD_CGC_GATE__SMP_MASK |
- UVD_SUVD_CGC_GATE__SCM_MASK |
- UVD_SUVD_CGC_GATE__SDB_MASK);
- }
- WREG32(mmUVD_CGC_GATE, data);
- WREG32(mmUVD_SUVD_CGC_GATE, data1);
-}
-
-static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device *adev,
- bool swmode)
-{
- u32 data, data1 = 0, data2;
-
- /* Always un-gate UVD REGS bit */
- data = RREG32(mmUVD_CGC_GATE);
- data &= ~(UVD_CGC_GATE__REGS_MASK);
- WREG32(mmUVD_CGC_GATE, data);
-
- data = RREG32(mmUVD_CGC_CTRL);
- data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
- UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
- data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
- 1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER) |
- 4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY);
-
- data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
- if (swmode) {
- data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
- UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
- UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
- UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
- UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
- UVD_CGC_CTRL__SYS_MODE_MASK |
- UVD_CGC_CTRL__UDEC_MODE_MASK |
- UVD_CGC_CTRL__MPEG2_MODE_MASK |
- UVD_CGC_CTRL__REGS_MODE_MASK |
- UVD_CGC_CTRL__RBC_MODE_MASK |
- UVD_CGC_CTRL__LMI_MC_MODE_MASK |
- UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
- UVD_CGC_CTRL__IDCT_MODE_MASK |
- UVD_CGC_CTRL__MPRD_MODE_MASK |
- UVD_CGC_CTRL__MPC_MODE_MASK |
- UVD_CGC_CTRL__LBSI_MODE_MASK |
- UVD_CGC_CTRL__LRBBM_MODE_MASK |
- UVD_CGC_CTRL__WCB_MODE_MASK |
- UVD_CGC_CTRL__VCPU_MODE_MASK |
- UVD_CGC_CTRL__JPEG_MODE_MASK |
- UVD_CGC_CTRL__SCPU_MODE_MASK);
- data1 |= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
- UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK;
- data1 &= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK;
- data1 |= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2, GATER_DIV_ID);
- data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
- } else {
- data |= UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
- UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
- UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
- UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
- UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
- UVD_CGC_CTRL__SYS_MODE_MASK |
- UVD_CGC_CTRL__UDEC_MODE_MASK |
- UVD_CGC_CTRL__MPEG2_MODE_MASK |
- UVD_CGC_CTRL__REGS_MODE_MASK |
- UVD_CGC_CTRL__RBC_MODE_MASK |
- UVD_CGC_CTRL__LMI_MC_MODE_MASK |
- UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
- UVD_CGC_CTRL__IDCT_MODE_MASK |
- UVD_CGC_CTRL__MPRD_MODE_MASK |
- UVD_CGC_CTRL__MPC_MODE_MASK |
- UVD_CGC_CTRL__LBSI_MODE_MASK |
- UVD_CGC_CTRL__LRBBM_MODE_MASK |
- UVD_CGC_CTRL__WCB_MODE_MASK |
- UVD_CGC_CTRL__VCPU_MODE_MASK |
- UVD_CGC_CTRL__SCPU_MODE_MASK;
- data2 |= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
- UVD_SUVD_CGC_CTRL__SDB_MODE_MASK;
- }
- WREG32(mmUVD_CGC_CTRL, data);
- WREG32(mmUVD_SUVD_CGC_CTRL, data2);
-
- data = RREG32_UVD_CTX(ixUVD_CGC_CTRL2);
- data &= ~(REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
- REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
- REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
- data1 &= (REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
- REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
- REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
- data |= data1;
- WREG32_UVD_CTX(ixUVD_CGC_CTRL2, data);
-}
+#endif
/**
* uvd_v6_0_start - start UVD block
@@ -538,11 +396,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
/* Set dynamic clock gating in S/W control mode */
if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
- if (adev->flags & AMD_IS_APU)
- cz_set_uvd_clock_gating_branches(adev, false);
- else
- tonga_set_uvd_clock_gating_branches(adev, false);
- uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
+ uvd_v6_0_set_sw_clock_gating(adev);
} else {
/* disable clock gating */
uint32_t data = RREG32(mmUVD_CGC_CTRL);
@@ -777,7 +631,8 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib)
+ struct amdgpu_ib *ib,
+ unsigned vm_id, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -854,112 +709,6 @@ static int uvd_v6_0_soft_reset(void *handle)
return uvd_v6_0_start(adev);
}
-static void uvd_v6_0_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- dev_info(adev->dev, "UVD 6.0 registers\n");
- dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n",
- RREG32(mmUVD_SEMA_ADDR_LOW));
- dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
- RREG32(mmUVD_SEMA_ADDR_HIGH));
- dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n",
- RREG32(mmUVD_SEMA_CMD));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_CMD));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_DATA0));
- dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
- RREG32(mmUVD_GPCOM_VCPU_DATA1));
- dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n",
- RREG32(mmUVD_ENGINE_CNTL));
- dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_CNTL));
- dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n",
- RREG32(mmUVD_LMI_EXT40_ADDR));
- dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n",
- RREG32(mmUVD_CTX_INDEX));
- dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n",
- RREG32(mmUVD_CTX_DATA));
- dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n",
- RREG32(mmUVD_CGC_GATE));
- dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n",
- RREG32(mmUVD_CGC_CTRL));
- dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n",
- RREG32(mmUVD_LMI_CTRL2));
- dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n",
- RREG32(mmUVD_MASTINT_EN));
- dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n",
- RREG32(mmUVD_LMI_ADDR_EXT));
- dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n",
- RREG32(mmUVD_LMI_CTRL));
- dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n",
- RREG32(mmUVD_LMI_SWAP_CNTL));
- dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n",
- RREG32(mmUVD_MP_SWAP_CNTL));
- dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXA0));
- dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXA1));
- dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXB0));
- dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUXB1));
- dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n",
- RREG32(mmUVD_MPC_SET_MUX));
- dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n",
- RREG32(mmUVD_MPC_SET_ALU));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET0));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE0));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET1));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE1));
- dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_OFFSET2));
- dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
- RREG32(mmUVD_VCPU_CACHE_SIZE2));
- dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n",
- RREG32(mmUVD_VCPU_CNTL));
- dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n",
- RREG32(mmUVD_SOFT_RESET));
- dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n",
- RREG32(mmUVD_RBC_IB_SIZE));
- dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n",
- RREG32(mmUVD_RBC_RB_RPTR));
- dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n",
- RREG32(mmUVD_RBC_RB_WPTR));
- dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
- RREG32(mmUVD_RBC_RB_WPTR_CNTL));
- dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n",
- RREG32(mmUVD_RBC_RB_CNTL));
- dev_info(adev->dev, " UVD_STATUS=0x%08X\n",
- RREG32(mmUVD_STATUS));
- dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
- RREG32(mmUVD_SEMA_TIMEOUT_STATUS));
- dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
- RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL));
- dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n",
- RREG32(mmUVD_CONTEXT_ID));
- dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DB_ADDR_CONFIG));
- dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
- RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG));
-}
-
static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -978,25 +727,146 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data, data1, data2, suvd_flags;
+
+ data = RREG32(mmUVD_CGC_CTRL);
+ data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+ data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
+
+ data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
+ UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
+
+ suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
+ UVD_SUVD_CGC_GATE__SIT_MASK |
+ UVD_SUVD_CGC_GATE__SMP_MASK |
+ UVD_SUVD_CGC_GATE__SCM_MASK |
+ UVD_SUVD_CGC_GATE__SDB_MASK;
+
+ data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
+ (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
+ (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
+
+ data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
+ UVD_CGC_CTRL__SYS_MODE_MASK |
+ UVD_CGC_CTRL__UDEC_MODE_MASK |
+ UVD_CGC_CTRL__MPEG2_MODE_MASK |
+ UVD_CGC_CTRL__REGS_MODE_MASK |
+ UVD_CGC_CTRL__RBC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_MC_MODE_MASK |
+ UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
+ UVD_CGC_CTRL__IDCT_MODE_MASK |
+ UVD_CGC_CTRL__MPRD_MODE_MASK |
+ UVD_CGC_CTRL__MPC_MODE_MASK |
+ UVD_CGC_CTRL__LBSI_MODE_MASK |
+ UVD_CGC_CTRL__LRBBM_MODE_MASK |
+ UVD_CGC_CTRL__WCB_MODE_MASK |
+ UVD_CGC_CTRL__VCPU_MODE_MASK |
+ UVD_CGC_CTRL__JPEG_MODE_MASK |
+ UVD_CGC_CTRL__SCPU_MODE_MASK |
+ UVD_CGC_CTRL__JPEG2_MODE_MASK);
+ data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
+ UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
+ UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
+ UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
+ UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
+ data1 |= suvd_flags;
+
+ WREG32(mmUVD_CGC_CTRL, data);
+ WREG32(mmUVD_CGC_GATE, 0);
+ WREG32(mmUVD_SUVD_CGC_GATE, data1);
+ WREG32(mmUVD_SUVD_CGC_CTRL, data2);
+}
+
+#if 0
+static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
+{
+ uint32_t data, data1, cgc_flags, suvd_flags;
+
+ data = RREG32(mmUVD_CGC_GATE);
+ data1 = RREG32(mmUVD_SUVD_CGC_GATE);
+
+ cgc_flags = UVD_CGC_GATE__SYS_MASK |
+ UVD_CGC_GATE__UDEC_MASK |
+ UVD_CGC_GATE__MPEG2_MASK |
+ UVD_CGC_GATE__RBC_MASK |
+ UVD_CGC_GATE__LMI_MC_MASK |
+ UVD_CGC_GATE__IDCT_MASK |
+ UVD_CGC_GATE__MPRD_MASK |
+ UVD_CGC_GATE__MPC_MASK |
+ UVD_CGC_GATE__LBSI_MASK |
+ UVD_CGC_GATE__LRBBM_MASK |
+ UVD_CGC_GATE__UDEC_RE_MASK |
+ UVD_CGC_GATE__UDEC_CM_MASK |
+ UVD_CGC_GATE__UDEC_IT_MASK |
+ UVD_CGC_GATE__UDEC_DB_MASK |
+ UVD_CGC_GATE__UDEC_MP_MASK |
+ UVD_CGC_GATE__WCB_MASK |
+ UVD_CGC_GATE__VCPU_MASK |
+ UVD_CGC_GATE__SCPU_MASK |
+ UVD_CGC_GATE__JPEG_MASK |
+ UVD_CGC_GATE__JPEG2_MASK;
+
+ suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
+ UVD_SUVD_CGC_GATE__SIT_MASK |
+ UVD_SUVD_CGC_GATE__SMP_MASK |
+ UVD_SUVD_CGC_GATE__SCM_MASK |
+ UVD_SUVD_CGC_GATE__SDB_MASK;
+
+ data |= cgc_flags;
+ data1 |= suvd_flags;
+
+ WREG32(mmUVD_CGC_GATE, data);
+ WREG32(mmUVD_SUVD_CGC_GATE, data1);
+}
+#endif
+
+static void uvd_v6_set_bypass_mode(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
+
+ if (enable)
+ tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+ else
+ tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK |
+ GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK);
+
+ WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
+}
+
static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ static int curstate = -1;
+
+ if (adev->asic_type == CHIP_FIJI)
+ uvd_v6_set_bypass_mode(adev, enable);
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
+ if (curstate == state)
+ return 0;
+
+ curstate = state;
if (enable) {
- if (adev->flags & AMD_IS_APU)
- cz_set_uvd_clock_gating_branches(adev, enable);
- else
- tonga_set_uvd_clock_gating_branches(adev, enable);
- uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
+ /* disable HW gating and enable Sw gating */
+ uvd_v6_0_set_sw_clock_gating(adev);
} else {
- uint32_t data = RREG32(mmUVD_CGC_CTRL);
- data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- WREG32(mmUVD_CGC_CTRL, data);
+ /* wait for STATUS to clear */
+ if (uvd_v6_0_wait_for_idle(handle))
+ return -EBUSY;
+
+ /* enable HW gates because UVD is idle */
+/* uvd_v6_0_set_hw_clock_gating(adev); */
}
return 0;
@@ -1026,6 +896,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
+ .name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init,
.late_init = NULL,
.sw_init = uvd_v6_0_sw_init,
@@ -1037,7 +908,6 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.is_idle = uvd_v6_0_is_idle,
.wait_for_idle = uvd_v6_0_wait_for_idle,
.soft_reset = uvd_v6_0_soft_reset,
- .print_status = uvd_v6_0_print_status,
.set_clockgating_state = uvd_v6_0_set_clockgating_state,
.set_powergating_state = uvd_v6_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index c7e885bcf..45d92aceb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -44,7 +44,7 @@
static void vce_v2_0_mc_resume(struct amdgpu_device *adev);
static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
-
+static int vce_v2_0_wait_for_idle(void *handle);
/**
* vce_v2_0_ring_get_rptr - get read pointer
*
@@ -201,14 +201,14 @@ static int vce_v2_0_sw_init(void *handle)
ring = &adev->vce.ring[0];
sprintf(ring->name, "vce0");
- r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
if (r)
return r;
ring = &adev->vce.ring[1];
sprintf(ring->name, "vce1");
- r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
if (r)
return r;
@@ -240,7 +240,8 @@ static int vce_v2_0_hw_init(void *handle)
r = vce_v2_0_start(adev);
if (r)
- return r;
+/* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */
+ return 0;
ring = &adev->vce.ring[0];
ring->ready = true;
@@ -318,7 +319,7 @@ static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
- } else {
+ } else {
tmp = RREG32(mmVCE_CLOCK_GATING_B);
tmp |= 0xe7;
tmp &= ~0xe70000;
@@ -339,6 +340,21 @@ static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
{
u32 orig, tmp;
+ if (gated) {
+ if (vce_v2_0_wait_for_idle(adev)) {
+ DRM_INFO("VCE is busy, Can't set clock gateing");
+ return;
+ }
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(100);
+ WREG32(mmVCE_STATUS, 0);
+ } else {
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+ WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(100);
+ }
+
tmp = RREG32(mmVCE_CLOCK_GATING_B);
tmp &= ~0x00060006;
if (gated) {
@@ -362,6 +378,7 @@ static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
if (gated)
WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
+ WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
}
static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
@@ -478,75 +495,6 @@ static int vce_v2_0_soft_reset(void *handle)
return vce_v2_0_start(adev);
}
-static void vce_v2_0_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "VCE 2.0 registers\n");
- dev_info(adev->dev, " VCE_STATUS=0x%08X\n",
- RREG32(mmVCE_STATUS));
- dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n",
- RREG32(mmVCE_VCPU_CNTL));
- dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_OFFSET0));
- dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_SIZE0));
- dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_OFFSET1));
- dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_SIZE1));
- dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_OFFSET2));
- dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_SIZE2));
- dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n",
- RREG32(mmVCE_SOFT_RESET));
- dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n",
- RREG32(mmVCE_RB_BASE_LO2));
- dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n",
- RREG32(mmVCE_RB_BASE_HI2));
- dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n",
- RREG32(mmVCE_RB_SIZE2));
- dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n",
- RREG32(mmVCE_RB_RPTR2));
- dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n",
- RREG32(mmVCE_RB_WPTR2));
- dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n",
- RREG32(mmVCE_RB_BASE_LO));
- dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n",
- RREG32(mmVCE_RB_BASE_HI));
- dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n",
- RREG32(mmVCE_RB_SIZE));
- dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n",
- RREG32(mmVCE_RB_RPTR));
- dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n",
- RREG32(mmVCE_RB_WPTR));
- dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n",
- RREG32(mmVCE_CLOCK_GATING_A));
- dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n",
- RREG32(mmVCE_CLOCK_GATING_B));
- dev_info(adev->dev, " VCE_CGTT_CLK_OVERRIDE=0x%08X\n",
- RREG32(mmVCE_CGTT_CLK_OVERRIDE));
- dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n",
- RREG32(mmVCE_UENC_CLOCK_GATING));
- dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
- RREG32(mmVCE_UENC_REG_CLOCK_GATING));
- dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n",
- RREG32(mmVCE_SYS_INT_EN));
- dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n",
- RREG32(mmVCE_LMI_CTRL2));
- dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n",
- RREG32(mmVCE_LMI_CTRL));
- dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n",
- RREG32(mmVCE_LMI_VM_CTRL));
- dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n",
- RREG32(mmVCE_LMI_SWAP_CNTL));
- dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
- RREG32(mmVCE_LMI_SWAP_CNTL1));
- dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n",
- RREG32(mmVCE_LMI_CACHE_CTRL));
-}
-
static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -619,6 +567,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs vce_v2_0_ip_funcs = {
+ .name = "vce_v2_0",
.early_init = vce_v2_0_early_init,
.late_init = NULL,
.sw_init = vce_v2_0_sw_init,
@@ -630,7 +579,6 @@ const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.is_idle = vce_v2_0_is_idle,
.wait_for_idle = vce_v2_0_wait_for_idle,
.soft_reset = vce_v2_0_soft_reset,
- .print_status = vce_v2_0_print_status,
.set_clockgating_state = vce_v2_0_set_clockgating_state,
.set_powergating_state = vce_v2_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index ce468ee5d..30e8099e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -40,9 +40,9 @@
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
-#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
-#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
-#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
+#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
#define VCE_V3_0_FW_SIZE (384 * 1024)
#define VCE_V3_0_STACK_SIZE (64 * 1024)
@@ -315,9 +315,11 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
{
u32 tmp;
- /* Fiji, Stoney are single pipe */
+ /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
if ((adev->asic_type == CHIP_FIJI) ||
- (adev->asic_type == CHIP_STONEY))
+ (adev->asic_type == CHIP_STONEY) ||
+ (adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11))
return AMDGPU_VCE_HARVEST_VCE1;
/* Tonga and CZ are dual or single pipe */
@@ -381,14 +383,14 @@ static int vce_v3_0_sw_init(void *handle)
ring = &adev->vce.ring[0];
sprintf(ring->name, "vce0");
- r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
if (r)
return r;
ring = &adev->vce.ring[1];
sprintf(ring->name, "vce1");
- r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf,
+ r = amdgpu_ring_init(adev, ring, 512, VCE_CMD_NO_OP, 0xf,
&adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE);
if (r)
return r;
@@ -564,73 +566,6 @@ static int vce_v3_0_soft_reset(void *handle)
return vce_v3_0_start(adev);
}
-static void vce_v3_0_print_status(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "VCE 3.0 registers\n");
- dev_info(adev->dev, " VCE_STATUS=0x%08X\n",
- RREG32(mmVCE_STATUS));
- dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n",
- RREG32(mmVCE_VCPU_CNTL));
- dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_OFFSET0));
- dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_SIZE0));
- dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_OFFSET1));
- dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_SIZE1));
- dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_OFFSET2));
- dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
- RREG32(mmVCE_VCPU_CACHE_SIZE2));
- dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n",
- RREG32(mmVCE_SOFT_RESET));
- dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n",
- RREG32(mmVCE_RB_BASE_LO2));
- dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n",
- RREG32(mmVCE_RB_BASE_HI2));
- dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n",
- RREG32(mmVCE_RB_SIZE2));
- dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n",
- RREG32(mmVCE_RB_RPTR2));
- dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n",
- RREG32(mmVCE_RB_WPTR2));
- dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n",
- RREG32(mmVCE_RB_BASE_LO));
- dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n",
- RREG32(mmVCE_RB_BASE_HI));
- dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n",
- RREG32(mmVCE_RB_SIZE));
- dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n",
- RREG32(mmVCE_RB_RPTR));
- dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n",
- RREG32(mmVCE_RB_WPTR));
- dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n",
- RREG32(mmVCE_CLOCK_GATING_A));
- dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n",
- RREG32(mmVCE_CLOCK_GATING_B));
- dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n",
- RREG32(mmVCE_UENC_CLOCK_GATING));
- dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
- RREG32(mmVCE_UENC_REG_CLOCK_GATING));
- dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n",
- RREG32(mmVCE_SYS_INT_EN));
- dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n",
- RREG32(mmVCE_LMI_CTRL2));
- dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n",
- RREG32(mmVCE_LMI_CTRL));
- dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n",
- RREG32(mmVCE_LMI_VM_CTRL));
- dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n",
- RREG32(mmVCE_LMI_SWAP_CNTL));
- dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
- RREG32(mmVCE_LMI_SWAP_CNTL1));
- dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n",
- RREG32(mmVCE_LMI_CACHE_CTRL));
-}
-
static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned type,
@@ -739,6 +674,7 @@ static int vce_v3_0_set_powergating_state(void *handle,
}
const struct amd_ip_funcs vce_v3_0_ip_funcs = {
+ .name = "vce_v3_0",
.early_init = vce_v3_0_early_init,
.late_init = NULL,
.sw_init = vce_v3_0_sw_init,
@@ -750,7 +686,6 @@ const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.is_idle = vce_v3_0_is_idle,
.wait_for_idle = vce_v3_0_wait_for_idle,
.soft_reset = vce_v3_0_soft_reset,
- .print_status = vce_v3_0_print_status,
.set_clockgating_state = vce_v3_0_set_clockgating_state,
.set_powergating_state = vce_v3_0_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 1c120efa2..d8fca2e11 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -78,6 +78,8 @@
#include "amdgpu_acp.h"
#endif
+/*(DEBLOBBED)*/
+
/*
* Indirect registers accessor
*/
@@ -276,6 +278,8 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
default:
break;
}
@@ -414,11 +418,25 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
+static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+{
+ u32 caps = 0;
+ u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+ caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+ caps |= AMDGPU_VIRT_CAPS_IS_VF;
+
+ return caps;
+}
+
+static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
{mmGB_MACROTILE_MODE7, true},
};
-static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
+static const struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
{mmGB_TILE_MODE7, true},
{mmGB_TILE_MODE12, true},
{mmGB_TILE_MODE17, true},
@@ -426,7 +444,7 @@ static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
{mmGB_MACROTILE_MODE7, true},
};
-static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
+static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGRBM_STATUS2, false},
{mmGRBM_STATUS_SE0, false},
@@ -525,8 +543,8 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
- struct amdgpu_allowed_register_entry *asic_register_table = NULL;
- struct amdgpu_allowed_register_entry *asic_register_entry;
+ const struct amdgpu_allowed_register_entry *asic_register_table = NULL;
+ const struct amdgpu_allowed_register_entry *asic_register_entry;
uint32_t size, i;
*value = 0;
@@ -537,6 +555,8 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
break;
case CHIP_FIJI:
case CHIP_TONGA:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
case CHIP_CARRIZO:
case CHIP_STONEY:
asic_register_table = cz_allowed_read_registers;
@@ -907,6 +927,74 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
},
};
+static const struct amdgpu_ip_block_version polaris11_ip_blocks[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &tonga_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 7,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &amdgpu_pp_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 2,
+ .rev = 0,
+ .funcs = &dce_v11_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 3,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 4,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+
static const struct amdgpu_ip_block_version cz_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -999,6 +1087,11 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_blocks = tonga_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ adev->ip_blocks = polaris11_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks);
+ break;
case CHIP_CARRIZO:
case CHIP_STONEY:
adev->ip_blocks = cz_ip_blocks;
@@ -1036,7 +1129,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
- .get_cu_info = &gfx_v8_0_get_cu_info,
+ .get_virtual_caps = &vi_get_virtual_caps,
/* these should be moved to their own ip modules */
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
@@ -1076,18 +1169,68 @@ static int vi_common_early_init(void *handle)
adev->external_rev_id = 0x1;
break;
case CHIP_FIJI:
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_ROM_MGCG |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_MC_LS;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
case CHIP_TONGA:
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
+ case CHIP_POLARIS11:
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x5A;
+ break;
+ case CHIP_POLARIS10:
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x50;
+ break;
case CHIP_CARRIZO:
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_RLC_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x1;
+ break;
case CHIP_STONEY:
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_HDP_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1;
break;
@@ -1164,24 +1307,19 @@ static int vi_common_wait_for_idle(void *handle)
return 0;
}
-static void vi_common_print_status(void *handle)
-{
- return;
-}
-
static int vi_common_soft_reset(void *handle)
{
return 0;
}
-static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
- bool enable)
+static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t temp, data;
temp = data = RREG32_PCIE(ixPCIE_CNTL2);
- if (enable)
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
@@ -1194,14 +1332,14 @@ static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
WREG32_PCIE(ixPCIE_CNTL2, data);
}
-static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t temp, data;
temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
- if (enable)
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
else
data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
@@ -1210,14 +1348,14 @@ static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32(mmHDP_HOST_PATH_CNTL, data);
}
-static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
- bool enable)
+static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t temp, data;
temp = data = RREG32(mmHDP_MEM_POWER_LS);
- if (enable)
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
else
data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
@@ -1226,14 +1364,14 @@ static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
WREG32(mmHDP_MEM_POWER_LS, data);
}
-static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t temp, data;
temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
- if (enable)
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
else
@@ -1245,19 +1383,28 @@ static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev
}
static int vi_common_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+ enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
case CHIP_FIJI:
- fiji_update_bif_medium_grain_light_sleep(adev,
+ vi_update_bif_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
- fiji_update_hdp_medium_grain_clock_gating(adev,
+ vi_update_hdp_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ vi_update_hdp_light_sleep(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ vi_update_rom_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE ? true : false);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ vi_update_bif_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
- fiji_update_hdp_light_sleep(adev,
+ vi_update_hdp_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- fiji_update_rom_medium_grain_clock_gating(adev,
+ vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
default:
@@ -1273,6 +1420,7 @@ static int vi_common_set_powergating_state(void *handle,
}
const struct amd_ip_funcs vi_common_ip_funcs = {
+ .name = "vi_common",
.early_init = vi_common_early_init,
.late_init = NULL,
.sw_init = vi_common_sw_init,
@@ -1284,7 +1432,6 @@ const struct amd_ip_funcs vi_common_ip_funcs = {
.is_idle = vi_common_is_idle,
.wait_for_idle = vi_common_wait_for_idle,
.soft_reset = vi_common_soft_reset,
- .print_status = vi_common_print_status,
.set_clockgating_state = vi_common_set_clockgating_state,
.set_powergating_state = vi_common_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index ace49976f..062ee1676 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -54,7 +54,8 @@
#define AUD3_REGISTER_OFFSET (0x17b4 - 0x17a8)
#define AUD4_REGISTER_OFFSET (0x17b8 - 0x17a8)
#define AUD5_REGISTER_OFFSET (0x17bc - 0x17a8)
-#define AUD6_REGISTER_OFFSET (0x17c4 - 0x17a8)
+#define AUD6_REGISTER_OFFSET (0x17c0 - 0x17a8)
+#define AUD7_REGISTER_OFFSET (0x17c4 - 0x17a8)
/* hpd instance offsets */
#define HPD0_REGISTER_OFFSET (0x1898 - 0x1898)
@@ -365,7 +366,7 @@
#define VCE_CMD_IB 0x00000002
#define VCE_CMD_FENCE 0x00000003
#define VCE_CMD_TRAP 0x00000004
-#define VCE_CMD_IB_AUTO 0x00000005
+#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 07ac724e3..ee3e04e10 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -109,7 +109,7 @@ static int kfd_open(struct inode *inode, struct file *filep)
is_32bit_user_mode = in_compat_syscall();
- if (is_32bit_user_mode == true) {
+ if (is_32bit_user_mode) {
dev_warn(kfd_device,
"Process %d (32-bit) failed to open /dev/kfd\n"
"32-bit processes are not supported by amdkfd\n",
@@ -131,12 +131,11 @@ static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
void *data)
{
struct kfd_ioctl_get_version_args *args = data;
- int err = 0;
args->major_version = KFD_IOCTL_MAJOR_VERSION;
args->minor_version = KFD_IOCTL_MINOR_VERSION;
- return err;
+ return 0;
}
static int set_queue_properties_from_user(struct queue_properties *q_properties,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 4bb7f4223..f49c55119 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -216,7 +216,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
}
}
- if (set == false)
+ if (!set)
return -EBUSY;
pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
@@ -354,7 +354,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
return -ENOMEM;
}
- if (q->properties.is_active == true)
+ if (q->properties.is_active)
prev_active = true;
/*
@@ -363,9 +363,9 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
* and modify counter accordingly
*/
retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
- if ((q->properties.is_active == true) && (prev_active == false))
+ if ((q->properties.is_active) && (!prev_active))
dqm->queue_count++;
- else if ((q->properties.is_active == false) && (prev_active == true))
+ else if ((!q->properties.is_active) && (prev_active))
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
@@ -954,7 +954,7 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm,
if (lock)
mutex_lock(&dqm->lock);
- if (dqm->active_runlist == false)
+ if (!dqm->active_runlist)
goto out;
pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index b6e28dcae..a6a4b2b1c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -177,9 +177,9 @@ static bool allocate_event_notification_slot(struct file *devkfd,
bool ret;
ret = allocate_free_slot(p, page, signal_slot_index);
- if (ret == false) {
+ if (!ret) {
ret = allocate_signal_page(devkfd, p);
- if (ret == true)
+ if (ret)
ret = allocate_free_slot(p, page, signal_slot_index);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 8fa894100..9beae87aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -300,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
break;
}
- if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
+ if (!kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE)) {
pr_err("amdkfd: failed to init kernel queue\n");
kfree(kq);
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 90f391434..ca8c09326 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -98,7 +98,7 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
int retval;
BUG_ON(!pm);
- BUG_ON(pm->allocated == true);
+ BUG_ON(pm->allocated);
BUG_ON(is_over_subscription == NULL);
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
@@ -292,7 +292,7 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
q->properties.doorbell_off;
packet->mes_map_queues_ordinals[0].bitfields3.is_static =
- (use_static == true) ? 1 : 0;
+ (use_static) ? 1 : 0;
packet->mes_map_queues_ordinals[0].mqd_addr_lo =
lower_32_bits(q->gart_mqd_addr);
@@ -357,7 +357,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
alloc_size_bytes);
list_for_each_entry(kq, &qpd->priv_queue_list, list) {
- if (kq->queue->properties.is_active != true)
+ if (!kq->queue->properties.is_active)
continue;
pr_debug("kfd: static_queue, mapping kernel q %d, is debug status %d\n",
@@ -383,7 +383,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
}
list_for_each_entry(q, &qpd->queues_list, list) {
- if (q->properties.is_active != true)
+ if (!q->properties.is_active)
continue;
pr_debug("kfd: static_queue, mapping user queue %d, is debug status %d\n",
@@ -531,7 +531,7 @@ fail_create_runlist:
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
- if (pm->allocated == true)
+ if (pm->allocated)
pm_release_ib(pm);
return retval;
}
@@ -647,7 +647,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
default:
BUG();
break;
- };
+ }
pm->priv_queue->ops.submit_packet(pm->priv_queue);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 74909e72a..884c96f50 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+ pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count,
dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count",
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 04e409066..afce1edbe 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -48,6 +48,8 @@ enum amd_asic_type {
CHIP_FIJI,
CHIP_CARRIZO,
CHIP_STONEY,
+ CHIP_POLARIS10,
+ CHIP_POLARIS11,
CHIP_LAST,
};
@@ -104,6 +106,7 @@ enum amd_powergating_state {
#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
+#define AMD_CG_SUPPORT_ROM_MGCG (1 << 17)
/* PG flags */
#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
@@ -140,6 +143,8 @@ enum amd_pm_state_type {
};
struct amd_ip_funcs {
+ /* Name of IP block */
+ char *name;
/* sets up early driver state (pre sw_init), does not configure hw - Optional */
int (*early_init)(void *handle);
/* sets up late driver/hw state (post hw_init) - Optional */
@@ -152,6 +157,7 @@ struct amd_ip_funcs {
int (*hw_init)(void *handle);
/* tears down the hw state */
int (*hw_fini)(void *handle);
+ void (*late_fini)(void *handle);
/* handles IP specific hw/sw changes for suspend */
int (*suspend)(void *handle);
/* handles IP specific hw/sw changes for resume */
@@ -162,8 +168,6 @@ struct amd_ip_funcs {
int (*wait_for_idle)(void *handle);
/* soft reset the IP block */
int (*soft_reset)(void *handle);
- /* dump the IP block status registers */
- void (*print_status)(void *handle);
/* enable/disable cg for the IP block */
int (*set_clockgating_state)(void *handle,
enum amd_clockgating_state state);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
new file mode 100755
index 000000000..09a7df175
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_d.h
@@ -0,0 +1,10075 @@
+/*
+ * DCE_11_2 Register documentation
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_11_2_D_H
+#define DCE_11_2_D_H
+
+#define mmPIPE0_PG_CONFIG 0x2c0
+#define mmPIPE0_PG_ENABLE 0x2c1
+#define mmPIPE0_PG_STATUS 0x2c2
+#define mmPIPE1_PG_CONFIG 0x2c3
+#define mmPIPE1_PG_ENABLE 0x2c4
+#define mmPIPE1_PG_STATUS 0x2c5
+#define mmPIPE2_PG_CONFIG 0x2c6
+#define mmPIPE2_PG_ENABLE 0x2c7
+#define mmPIPE2_PG_STATUS 0x2c8
+#define mmPIPE3_PG_CONFIG 0x2c9
+#define mmPIPE3_PG_ENABLE 0x2ca
+#define mmPIPE3_PG_STATUS 0x2cb
+#define mmPIPE4_PG_CONFIG 0x2cc
+#define mmPIPE4_PG_ENABLE 0x2cd
+#define mmPIPE4_PG_STATUS 0x2ce
+#define mmPIPE5_PG_CONFIG 0x2cf
+#define mmPIPE5_PG_ENABLE 0x2d0
+#define mmPIPE5_PG_STATUS 0x2d1
+#define mmDCPG_INTERRUPT_STATUS 0x2de
+#define mmDCPG_INTERRUPT_CONTROL 0x2df
+#define mmDCPG_INTERRUPT_CONTROL2 0x2e0
+#define mmDC_IP_REQUEST_CNTL 0x2d2
+#define mmDC_PGFSM_CONFIG_REG 0x2d3
+#define mmDC_PGFSM_WRITE_REG 0x2d4
+#define mmDC_PGCNTL_STATUS_REG 0x2d5
+#define mmDCPG_TEST_DEBUG_INDEX 0x2d6
+#define mmDCPG_TEST_DEBUG_DATA 0x2d7
+#define mmBL1_PWM_AMBIENT_LIGHT_LEVEL 0x1628
+#define mmBL1_PWM_USER_LEVEL 0x1629
+#define mmBL1_PWM_TARGET_ABM_LEVEL 0x162a
+#define mmBL1_PWM_CURRENT_ABM_LEVEL 0x162b
+#define mmBL1_PWM_FINAL_DUTY_CYCLE 0x162c
+#define mmBL1_PWM_MINIMUM_DUTY_CYCLE 0x162d
+#define mmBL1_PWM_ABM_CNTL 0x162e
+#define mmBL1_PWM_BL_UPDATE_SAMPLE_RATE 0x162f
+#define mmBL1_PWM_GRP2_REG_LOCK 0x1630
+#define mmDC_ABM1_CNTL 0x1638
+#define mmDC_ABM1_IPCSC_COEFF_SEL 0x1639
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_0 0x163a
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_1 0x163b
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_2 0x163c
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_3 0x163d
+#define mmDC_ABM1_ACE_OFFSET_SLOPE_4 0x163e
+#define mmDC_ABM1_ACE_THRES_12 0x163f
+#define mmDC_ABM1_ACE_THRES_34 0x1640
+#define mmDC_ABM1_ACE_CNTL_MISC 0x1641
+#define mmDC_ABM1_DEBUG_MISC 0x1649
+#define mmDC_ABM1_HGLS_REG_READ_PROGRESS 0x164a
+#define mmDC_ABM1_HG_MISC_CTRL 0x164b
+#define mmDC_ABM1_LS_SUM_OF_LUMA 0x164c
+#define mmDC_ABM1_LS_MIN_MAX_LUMA 0x164d
+#define mmDC_ABM1_LS_FILTERED_MIN_MAX_LUMA 0x164e
+#define mmDC_ABM1_LS_PIXEL_COUNT 0x164f
+#define mmDC_ABM1_LS_OVR_SCAN_BIN 0x1650
+#define mmDC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES 0x1651
+#define mmDC_ABM1_LS_MIN_PIXEL_VALUE_COUNT 0x1652
+#define mmDC_ABM1_LS_MAX_PIXEL_VALUE_COUNT 0x1653
+#define mmDC_ABM1_HG_SAMPLE_RATE 0x1654
+#define mmDC_ABM1_LS_SAMPLE_RATE 0x1655
+#define mmDC_ABM1_HG_BIN_1_32_SHIFT_FLAG 0x1656
+#define mmDC_ABM1_HG_BIN_1_8_SHIFT_INDEX 0x1657
+#define mmDC_ABM1_HG_BIN_9_16_SHIFT_INDEX 0x1658
+#define mmDC_ABM1_HG_BIN_17_24_SHIFT_INDEX 0x1659
+#define mmDC_ABM1_HG_BIN_25_32_SHIFT_INDEX 0x165a
+#define mmDC_ABM1_HG_RESULT_1 0x165b
+#define mmDC_ABM1_HG_RESULT_2 0x165c
+#define mmDC_ABM1_HG_RESULT_3 0x165d
+#define mmDC_ABM1_HG_RESULT_4 0x165e
+#define mmDC_ABM1_HG_RESULT_5 0x165f
+#define mmDC_ABM1_HG_RESULT_6 0x1660
+#define mmDC_ABM1_HG_RESULT_7 0x1661
+#define mmDC_ABM1_HG_RESULT_8 0x1662
+#define mmDC_ABM1_HG_RESULT_9 0x1663
+#define mmDC_ABM1_HG_RESULT_10 0x1664
+#define mmDC_ABM1_HG_RESULT_11 0x1665
+#define mmDC_ABM1_HG_RESULT_12 0x1666
+#define mmDC_ABM1_HG_RESULT_13 0x1667
+#define mmDC_ABM1_HG_RESULT_14 0x1668
+#define mmDC_ABM1_HG_RESULT_15 0x1669
+#define mmDC_ABM1_HG_RESULT_16 0x166a
+#define mmDC_ABM1_HG_RESULT_17 0x166b
+#define mmDC_ABM1_HG_RESULT_18 0x166c
+#define mmDC_ABM1_HG_RESULT_19 0x166d
+#define mmDC_ABM1_HG_RESULT_20 0x166e
+#define mmDC_ABM1_HG_RESULT_21 0x166f
+#define mmDC_ABM1_HG_RESULT_22 0x1670
+#define mmDC_ABM1_HG_RESULT_23 0x1671
+#define mmDC_ABM1_HG_RESULT_24 0x1672
+#define mmDC_ABM1_OVERSCAN_PIXEL_VALUE 0x169b
+#define mmDC_ABM1_BL_MASTER_LOCK 0x169c
+#define mmABM_TEST_DEBUG_INDEX 0x169e
+#define mmABM_TEST_DEBUG_DATA 0x169f
+#define mmCRTC_H_BLANK_EARLY_NUM 0x1b7d
+#define mmCRTC0_CRTC_H_BLANK_EARLY_NUM 0x1b7d
+#define mmCRTC1_CRTC_H_BLANK_EARLY_NUM 0x1d7d
+#define mmCRTC2_CRTC_H_BLANK_EARLY_NUM 0x1f7d
+#define mmCRTC3_CRTC_H_BLANK_EARLY_NUM 0x417d
+#define mmCRTC4_CRTC_H_BLANK_EARLY_NUM 0x437d
+#define mmCRTC5_CRTC_H_BLANK_EARLY_NUM 0x457d
+#define mmCRTC_H_TOTAL 0x1b80
+#define mmCRTC0_CRTC_H_TOTAL 0x1b80
+#define mmCRTC1_CRTC_H_TOTAL 0x1d80
+#define mmCRTC2_CRTC_H_TOTAL 0x1f80
+#define mmCRTC3_CRTC_H_TOTAL 0x4180
+#define mmCRTC4_CRTC_H_TOTAL 0x4380
+#define mmCRTC5_CRTC_H_TOTAL 0x4580
+#define mmCRTC_H_BLANK_START_END 0x1b81
+#define mmCRTC0_CRTC_H_BLANK_START_END 0x1b81
+#define mmCRTC1_CRTC_H_BLANK_START_END 0x1d81
+#define mmCRTC2_CRTC_H_BLANK_START_END 0x1f81
+#define mmCRTC3_CRTC_H_BLANK_START_END 0x4181
+#define mmCRTC4_CRTC_H_BLANK_START_END 0x4381
+#define mmCRTC5_CRTC_H_BLANK_START_END 0x4581
+#define mmCRTC_H_SYNC_A 0x1b82
+#define mmCRTC0_CRTC_H_SYNC_A 0x1b82
+#define mmCRTC1_CRTC_H_SYNC_A 0x1d82
+#define mmCRTC2_CRTC_H_SYNC_A 0x1f82
+#define mmCRTC3_CRTC_H_SYNC_A 0x4182
+#define mmCRTC4_CRTC_H_SYNC_A 0x4382
+#define mmCRTC5_CRTC_H_SYNC_A 0x4582
+#define mmCRTC_H_SYNC_A_CNTL 0x1b83
+#define mmCRTC0_CRTC_H_SYNC_A_CNTL 0x1b83
+#define mmCRTC1_CRTC_H_SYNC_A_CNTL 0x1d83
+#define mmCRTC2_CRTC_H_SYNC_A_CNTL 0x1f83
+#define mmCRTC3_CRTC_H_SYNC_A_CNTL 0x4183
+#define mmCRTC4_CRTC_H_SYNC_A_CNTL 0x4383
+#define mmCRTC5_CRTC_H_SYNC_A_CNTL 0x4583
+#define mmCRTC_H_SYNC_B 0x1b84
+#define mmCRTC0_CRTC_H_SYNC_B 0x1b84
+#define mmCRTC1_CRTC_H_SYNC_B 0x1d84
+#define mmCRTC2_CRTC_H_SYNC_B 0x1f84
+#define mmCRTC3_CRTC_H_SYNC_B 0x4184
+#define mmCRTC4_CRTC_H_SYNC_B 0x4384
+#define mmCRTC5_CRTC_H_SYNC_B 0x4584
+#define mmCRTC_H_SYNC_B_CNTL 0x1b85
+#define mmCRTC0_CRTC_H_SYNC_B_CNTL 0x1b85
+#define mmCRTC1_CRTC_H_SYNC_B_CNTL 0x1d85
+#define mmCRTC2_CRTC_H_SYNC_B_CNTL 0x1f85
+#define mmCRTC3_CRTC_H_SYNC_B_CNTL 0x4185
+#define mmCRTC4_CRTC_H_SYNC_B_CNTL 0x4385
+#define mmCRTC5_CRTC_H_SYNC_B_CNTL 0x4585
+#define mmCRTC_VBI_END 0x1b86
+#define mmCRTC0_CRTC_VBI_END 0x1b86
+#define mmCRTC1_CRTC_VBI_END 0x1d86
+#define mmCRTC2_CRTC_VBI_END 0x1f86
+#define mmCRTC3_CRTC_VBI_END 0x4186
+#define mmCRTC4_CRTC_VBI_END 0x4386
+#define mmCRTC5_CRTC_VBI_END 0x4586
+#define mmCRTC_V_TOTAL 0x1b87
+#define mmCRTC0_CRTC_V_TOTAL 0x1b87
+#define mmCRTC1_CRTC_V_TOTAL 0x1d87
+#define mmCRTC2_CRTC_V_TOTAL 0x1f87
+#define mmCRTC3_CRTC_V_TOTAL 0x4187
+#define mmCRTC4_CRTC_V_TOTAL 0x4387
+#define mmCRTC5_CRTC_V_TOTAL 0x4587
+#define mmCRTC_V_TOTAL_MIN 0x1b88
+#define mmCRTC0_CRTC_V_TOTAL_MIN 0x1b88
+#define mmCRTC1_CRTC_V_TOTAL_MIN 0x1d88
+#define mmCRTC2_CRTC_V_TOTAL_MIN 0x1f88
+#define mmCRTC3_CRTC_V_TOTAL_MIN 0x4188
+#define mmCRTC4_CRTC_V_TOTAL_MIN 0x4388
+#define mmCRTC5_CRTC_V_TOTAL_MIN 0x4588
+#define mmCRTC_V_TOTAL_MAX 0x1b89
+#define mmCRTC0_CRTC_V_TOTAL_MAX 0x1b89
+#define mmCRTC1_CRTC_V_TOTAL_MAX 0x1d89
+#define mmCRTC2_CRTC_V_TOTAL_MAX 0x1f89
+#define mmCRTC3_CRTC_V_TOTAL_MAX 0x4189
+#define mmCRTC4_CRTC_V_TOTAL_MAX 0x4389
+#define mmCRTC5_CRTC_V_TOTAL_MAX 0x4589
+#define mmCRTC_V_TOTAL_CONTROL 0x1b8a
+#define mmCRTC0_CRTC_V_TOTAL_CONTROL 0x1b8a
+#define mmCRTC1_CRTC_V_TOTAL_CONTROL 0x1d8a
+#define mmCRTC2_CRTC_V_TOTAL_CONTROL 0x1f8a
+#define mmCRTC3_CRTC_V_TOTAL_CONTROL 0x418a
+#define mmCRTC4_CRTC_V_TOTAL_CONTROL 0x438a
+#define mmCRTC5_CRTC_V_TOTAL_CONTROL 0x458a
+#define mmCRTC_V_TOTAL_INT_STATUS 0x1b8b
+#define mmCRTC0_CRTC_V_TOTAL_INT_STATUS 0x1b8b
+#define mmCRTC1_CRTC_V_TOTAL_INT_STATUS 0x1d8b
+#define mmCRTC2_CRTC_V_TOTAL_INT_STATUS 0x1f8b
+#define mmCRTC3_CRTC_V_TOTAL_INT_STATUS 0x418b
+#define mmCRTC4_CRTC_V_TOTAL_INT_STATUS 0x438b
+#define mmCRTC5_CRTC_V_TOTAL_INT_STATUS 0x458b
+#define mmCRTC_VSYNC_NOM_INT_STATUS 0x1b8c
+#define mmCRTC0_CRTC_VSYNC_NOM_INT_STATUS 0x1b8c
+#define mmCRTC1_CRTC_VSYNC_NOM_INT_STATUS 0x1d8c
+#define mmCRTC2_CRTC_VSYNC_NOM_INT_STATUS 0x1f8c
+#define mmCRTC3_CRTC_VSYNC_NOM_INT_STATUS 0x418c
+#define mmCRTC4_CRTC_VSYNC_NOM_INT_STATUS 0x438c
+#define mmCRTC5_CRTC_VSYNC_NOM_INT_STATUS 0x458c
+#define mmCRTC_V_BLANK_START_END 0x1b8d
+#define mmCRTC0_CRTC_V_BLANK_START_END 0x1b8d
+#define mmCRTC1_CRTC_V_BLANK_START_END 0x1d8d
+#define mmCRTC2_CRTC_V_BLANK_START_END 0x1f8d
+#define mmCRTC3_CRTC_V_BLANK_START_END 0x418d
+#define mmCRTC4_CRTC_V_BLANK_START_END 0x438d
+#define mmCRTC5_CRTC_V_BLANK_START_END 0x458d
+#define mmCRTC_V_SYNC_A 0x1b8e
+#define mmCRTC0_CRTC_V_SYNC_A 0x1b8e
+#define mmCRTC1_CRTC_V_SYNC_A 0x1d8e
+#define mmCRTC2_CRTC_V_SYNC_A 0x1f8e
+#define mmCRTC3_CRTC_V_SYNC_A 0x418e
+#define mmCRTC4_CRTC_V_SYNC_A 0x438e
+#define mmCRTC5_CRTC_V_SYNC_A 0x458e
+#define mmCRTC_V_SYNC_A_CNTL 0x1b8f
+#define mmCRTC0_CRTC_V_SYNC_A_CNTL 0x1b8f
+#define mmCRTC1_CRTC_V_SYNC_A_CNTL 0x1d8f
+#define mmCRTC2_CRTC_V_SYNC_A_CNTL 0x1f8f
+#define mmCRTC3_CRTC_V_SYNC_A_CNTL 0x418f
+#define mmCRTC4_CRTC_V_SYNC_A_CNTL 0x438f
+#define mmCRTC5_CRTC_V_SYNC_A_CNTL 0x458f
+#define mmCRTC_V_SYNC_B 0x1b90
+#define mmCRTC0_CRTC_V_SYNC_B 0x1b90
+#define mmCRTC1_CRTC_V_SYNC_B 0x1d90
+#define mmCRTC2_CRTC_V_SYNC_B 0x1f90
+#define mmCRTC3_CRTC_V_SYNC_B 0x4190
+#define mmCRTC4_CRTC_V_SYNC_B 0x4390
+#define mmCRTC5_CRTC_V_SYNC_B 0x4590
+#define mmCRTC_V_SYNC_B_CNTL 0x1b91
+#define mmCRTC0_CRTC_V_SYNC_B_CNTL 0x1b91
+#define mmCRTC1_CRTC_V_SYNC_B_CNTL 0x1d91
+#define mmCRTC2_CRTC_V_SYNC_B_CNTL 0x1f91
+#define mmCRTC3_CRTC_V_SYNC_B_CNTL 0x4191
+#define mmCRTC4_CRTC_V_SYNC_B_CNTL 0x4391
+#define mmCRTC5_CRTC_V_SYNC_B_CNTL 0x4591
+#define mmCRTC_DTMTEST_CNTL 0x1b92
+#define mmCRTC0_CRTC_DTMTEST_CNTL 0x1b92
+#define mmCRTC1_CRTC_DTMTEST_CNTL 0x1d92
+#define mmCRTC2_CRTC_DTMTEST_CNTL 0x1f92
+#define mmCRTC3_CRTC_DTMTEST_CNTL 0x4192
+#define mmCRTC4_CRTC_DTMTEST_CNTL 0x4392
+#define mmCRTC5_CRTC_DTMTEST_CNTL 0x4592
+#define mmCRTC_DTMTEST_STATUS_POSITION 0x1b93
+#define mmCRTC0_CRTC_DTMTEST_STATUS_POSITION 0x1b93
+#define mmCRTC1_CRTC_DTMTEST_STATUS_POSITION 0x1d93
+#define mmCRTC2_CRTC_DTMTEST_STATUS_POSITION 0x1f93
+#define mmCRTC3_CRTC_DTMTEST_STATUS_POSITION 0x4193
+#define mmCRTC4_CRTC_DTMTEST_STATUS_POSITION 0x4393
+#define mmCRTC5_CRTC_DTMTEST_STATUS_POSITION 0x4593
+#define mmCRTC_TRIGA_CNTL 0x1b94
+#define mmCRTC0_CRTC_TRIGA_CNTL 0x1b94
+#define mmCRTC1_CRTC_TRIGA_CNTL 0x1d94
+#define mmCRTC2_CRTC_TRIGA_CNTL 0x1f94
+#define mmCRTC3_CRTC_TRIGA_CNTL 0x4194
+#define mmCRTC4_CRTC_TRIGA_CNTL 0x4394
+#define mmCRTC5_CRTC_TRIGA_CNTL 0x4594
+#define mmCRTC_TRIGA_MANUAL_TRIG 0x1b95
+#define mmCRTC0_CRTC_TRIGA_MANUAL_TRIG 0x1b95
+#define mmCRTC1_CRTC_TRIGA_MANUAL_TRIG 0x1d95
+#define mmCRTC2_CRTC_TRIGA_MANUAL_TRIG 0x1f95
+#define mmCRTC3_CRTC_TRIGA_MANUAL_TRIG 0x4195
+#define mmCRTC4_CRTC_TRIGA_MANUAL_TRIG 0x4395
+#define mmCRTC5_CRTC_TRIGA_MANUAL_TRIG 0x4595
+#define mmCRTC_TRIGB_CNTL 0x1b96
+#define mmCRTC0_CRTC_TRIGB_CNTL 0x1b96
+#define mmCRTC1_CRTC_TRIGB_CNTL 0x1d96
+#define mmCRTC2_CRTC_TRIGB_CNTL 0x1f96
+#define mmCRTC3_CRTC_TRIGB_CNTL 0x4196
+#define mmCRTC4_CRTC_TRIGB_CNTL 0x4396
+#define mmCRTC5_CRTC_TRIGB_CNTL 0x4596
+#define mmCRTC_TRIGB_MANUAL_TRIG 0x1b97
+#define mmCRTC0_CRTC_TRIGB_MANUAL_TRIG 0x1b97
+#define mmCRTC1_CRTC_TRIGB_MANUAL_TRIG 0x1d97
+#define mmCRTC2_CRTC_TRIGB_MANUAL_TRIG 0x1f97
+#define mmCRTC3_CRTC_TRIGB_MANUAL_TRIG 0x4197
+#define mmCRTC4_CRTC_TRIGB_MANUAL_TRIG 0x4397
+#define mmCRTC5_CRTC_TRIGB_MANUAL_TRIG 0x4597
+#define mmCRTC_FORCE_COUNT_NOW_CNTL 0x1b98
+#define mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL 0x1b98
+#define mmCRTC1_CRTC_FORCE_COUNT_NOW_CNTL 0x1d98
+#define mmCRTC2_CRTC_FORCE_COUNT_NOW_CNTL 0x1f98
+#define mmCRTC3_CRTC_FORCE_COUNT_NOW_CNTL 0x4198
+#define mmCRTC4_CRTC_FORCE_COUNT_NOW_CNTL 0x4398
+#define mmCRTC5_CRTC_FORCE_COUNT_NOW_CNTL 0x4598
+#define mmCRTC_FLOW_CONTROL 0x1b99
+#define mmCRTC0_CRTC_FLOW_CONTROL 0x1b99
+#define mmCRTC1_CRTC_FLOW_CONTROL 0x1d99
+#define mmCRTC2_CRTC_FLOW_CONTROL 0x1f99
+#define mmCRTC3_CRTC_FLOW_CONTROL 0x4199
+#define mmCRTC4_CRTC_FLOW_CONTROL 0x4399
+#define mmCRTC5_CRTC_FLOW_CONTROL 0x4599
+#define mmCRTC_STEREO_FORCE_NEXT_EYE 0x1b9a
+#define mmCRTC0_CRTC_STEREO_FORCE_NEXT_EYE 0x1b9a
+#define mmCRTC1_CRTC_STEREO_FORCE_NEXT_EYE 0x1d9a
+#define mmCRTC2_CRTC_STEREO_FORCE_NEXT_EYE 0x1f9a
+#define mmCRTC3_CRTC_STEREO_FORCE_NEXT_EYE 0x419a
+#define mmCRTC4_CRTC_STEREO_FORCE_NEXT_EYE 0x439a
+#define mmCRTC5_CRTC_STEREO_FORCE_NEXT_EYE 0x459a
+#define mmCRTC_AVSYNC_COUNTER 0x1b9b
+#define mmCRTC0_CRTC_AVSYNC_COUNTER 0x1b9b
+#define mmCRTC1_CRTC_AVSYNC_COUNTER 0x1d9b
+#define mmCRTC2_CRTC_AVSYNC_COUNTER 0x1f9b
+#define mmCRTC3_CRTC_AVSYNC_COUNTER 0x419b
+#define mmCRTC4_CRTC_AVSYNC_COUNTER 0x439b
+#define mmCRTC5_CRTC_AVSYNC_COUNTER 0x459b
+#define mmCRTC_CONTROL 0x1b9c
+#define mmCRTC0_CRTC_CONTROL 0x1b9c
+#define mmCRTC1_CRTC_CONTROL 0x1d9c
+#define mmCRTC2_CRTC_CONTROL 0x1f9c
+#define mmCRTC3_CRTC_CONTROL 0x419c
+#define mmCRTC4_CRTC_CONTROL 0x439c
+#define mmCRTC5_CRTC_CONTROL 0x459c
+#define mmCRTC_BLANK_CONTROL 0x1b9d
+#define mmCRTC0_CRTC_BLANK_CONTROL 0x1b9d
+#define mmCRTC1_CRTC_BLANK_CONTROL 0x1d9d
+#define mmCRTC2_CRTC_BLANK_CONTROL 0x1f9d
+#define mmCRTC3_CRTC_BLANK_CONTROL 0x419d
+#define mmCRTC4_CRTC_BLANK_CONTROL 0x439d
+#define mmCRTC5_CRTC_BLANK_CONTROL 0x459d
+#define mmCRTC_INTERLACE_CONTROL 0x1b9e
+#define mmCRTC0_CRTC_INTERLACE_CONTROL 0x1b9e
+#define mmCRTC1_CRTC_INTERLACE_CONTROL 0x1d9e
+#define mmCRTC2_CRTC_INTERLACE_CONTROL 0x1f9e
+#define mmCRTC3_CRTC_INTERLACE_CONTROL 0x419e
+#define mmCRTC4_CRTC_INTERLACE_CONTROL 0x439e
+#define mmCRTC5_CRTC_INTERLACE_CONTROL 0x459e
+#define mmCRTC_INTERLACE_STATUS 0x1b9f
+#define mmCRTC0_CRTC_INTERLACE_STATUS 0x1b9f
+#define mmCRTC1_CRTC_INTERLACE_STATUS 0x1d9f
+#define mmCRTC2_CRTC_INTERLACE_STATUS 0x1f9f
+#define mmCRTC3_CRTC_INTERLACE_STATUS 0x419f
+#define mmCRTC4_CRTC_INTERLACE_STATUS 0x439f
+#define mmCRTC5_CRTC_INTERLACE_STATUS 0x459f
+#define mmCRTC_FIELD_INDICATION_CONTROL 0x1ba0
+#define mmCRTC0_CRTC_FIELD_INDICATION_CONTROL 0x1ba0
+#define mmCRTC1_CRTC_FIELD_INDICATION_CONTROL 0x1da0
+#define mmCRTC2_CRTC_FIELD_INDICATION_CONTROL 0x1fa0
+#define mmCRTC3_CRTC_FIELD_INDICATION_CONTROL 0x41a0
+#define mmCRTC4_CRTC_FIELD_INDICATION_CONTROL 0x43a0
+#define mmCRTC5_CRTC_FIELD_INDICATION_CONTROL 0x45a0
+#define mmCRTC_PIXEL_DATA_READBACK0 0x1ba1
+#define mmCRTC0_CRTC_PIXEL_DATA_READBACK0 0x1ba1
+#define mmCRTC1_CRTC_PIXEL_DATA_READBACK0 0x1da1
+#define mmCRTC2_CRTC_PIXEL_DATA_READBACK0 0x1fa1
+#define mmCRTC3_CRTC_PIXEL_DATA_READBACK0 0x41a1
+#define mmCRTC4_CRTC_PIXEL_DATA_READBACK0 0x43a1
+#define mmCRTC5_CRTC_PIXEL_DATA_READBACK0 0x45a1
+#define mmCRTC_PIXEL_DATA_READBACK1 0x1ba2
+#define mmCRTC0_CRTC_PIXEL_DATA_READBACK1 0x1ba2
+#define mmCRTC1_CRTC_PIXEL_DATA_READBACK1 0x1da2
+#define mmCRTC2_CRTC_PIXEL_DATA_READBACK1 0x1fa2
+#define mmCRTC3_CRTC_PIXEL_DATA_READBACK1 0x41a2
+#define mmCRTC4_CRTC_PIXEL_DATA_READBACK1 0x43a2
+#define mmCRTC5_CRTC_PIXEL_DATA_READBACK1 0x45a2
+#define mmCRTC_STATUS 0x1ba3
+#define mmCRTC0_CRTC_STATUS 0x1ba3
+#define mmCRTC1_CRTC_STATUS 0x1da3
+#define mmCRTC2_CRTC_STATUS 0x1fa3
+#define mmCRTC3_CRTC_STATUS 0x41a3
+#define mmCRTC4_CRTC_STATUS 0x43a3
+#define mmCRTC5_CRTC_STATUS 0x45a3
+#define mmCRTC_STATUS_POSITION 0x1ba4
+#define mmCRTC0_CRTC_STATUS_POSITION 0x1ba4
+#define mmCRTC1_CRTC_STATUS_POSITION 0x1da4
+#define mmCRTC2_CRTC_STATUS_POSITION 0x1fa4
+#define mmCRTC3_CRTC_STATUS_POSITION 0x41a4
+#define mmCRTC4_CRTC_STATUS_POSITION 0x43a4
+#define mmCRTC5_CRTC_STATUS_POSITION 0x45a4
+#define mmCRTC_NOM_VERT_POSITION 0x1ba5
+#define mmCRTC0_CRTC_NOM_VERT_POSITION 0x1ba5
+#define mmCRTC1_CRTC_NOM_VERT_POSITION 0x1da5
+#define mmCRTC2_CRTC_NOM_VERT_POSITION 0x1fa5
+#define mmCRTC3_CRTC_NOM_VERT_POSITION 0x41a5
+#define mmCRTC4_CRTC_NOM_VERT_POSITION 0x43a5
+#define mmCRTC5_CRTC_NOM_VERT_POSITION 0x45a5
+#define mmCRTC_STATUS_FRAME_COUNT 0x1ba6
+#define mmCRTC0_CRTC_STATUS_FRAME_COUNT 0x1ba6
+#define mmCRTC1_CRTC_STATUS_FRAME_COUNT 0x1da6
+#define mmCRTC2_CRTC_STATUS_FRAME_COUNT 0x1fa6
+#define mmCRTC3_CRTC_STATUS_FRAME_COUNT 0x41a6
+#define mmCRTC4_CRTC_STATUS_FRAME_COUNT 0x43a6
+#define mmCRTC5_CRTC_STATUS_FRAME_COUNT 0x45a6
+#define mmCRTC_STATUS_VF_COUNT 0x1ba7
+#define mmCRTC0_CRTC_STATUS_VF_COUNT 0x1ba7
+#define mmCRTC1_CRTC_STATUS_VF_COUNT 0x1da7
+#define mmCRTC2_CRTC_STATUS_VF_COUNT 0x1fa7
+#define mmCRTC3_CRTC_STATUS_VF_COUNT 0x41a7
+#define mmCRTC4_CRTC_STATUS_VF_COUNT 0x43a7
+#define mmCRTC5_CRTC_STATUS_VF_COUNT 0x45a7
+#define mmCRTC_STATUS_HV_COUNT 0x1ba8
+#define mmCRTC0_CRTC_STATUS_HV_COUNT 0x1ba8
+#define mmCRTC1_CRTC_STATUS_HV_COUNT 0x1da8
+#define mmCRTC2_CRTC_STATUS_HV_COUNT 0x1fa8
+#define mmCRTC3_CRTC_STATUS_HV_COUNT 0x41a8
+#define mmCRTC4_CRTC_STATUS_HV_COUNT 0x43a8
+#define mmCRTC5_CRTC_STATUS_HV_COUNT 0x45a8
+#define mmCRTC_COUNT_CONTROL 0x1ba9
+#define mmCRTC0_CRTC_COUNT_CONTROL 0x1ba9
+#define mmCRTC1_CRTC_COUNT_CONTROL 0x1da9
+#define mmCRTC2_CRTC_COUNT_CONTROL 0x1fa9
+#define mmCRTC3_CRTC_COUNT_CONTROL 0x41a9
+#define mmCRTC4_CRTC_COUNT_CONTROL 0x43a9
+#define mmCRTC5_CRTC_COUNT_CONTROL 0x45a9
+#define mmCRTC_COUNT_RESET 0x1baa
+#define mmCRTC0_CRTC_COUNT_RESET 0x1baa
+#define mmCRTC1_CRTC_COUNT_RESET 0x1daa
+#define mmCRTC2_CRTC_COUNT_RESET 0x1faa
+#define mmCRTC3_CRTC_COUNT_RESET 0x41aa
+#define mmCRTC4_CRTC_COUNT_RESET 0x43aa
+#define mmCRTC5_CRTC_COUNT_RESET 0x45aa
+#define mmCRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bab
+#define mmCRTC0_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bab
+#define mmCRTC1_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1dab
+#define mmCRTC2_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1fab
+#define mmCRTC3_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x41ab
+#define mmCRTC4_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x43ab
+#define mmCRTC5_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE 0x45ab
+#define mmCRTC_VERT_SYNC_CONTROL 0x1bac
+#define mmCRTC0_CRTC_VERT_SYNC_CONTROL 0x1bac
+#define mmCRTC1_CRTC_VERT_SYNC_CONTROL 0x1dac
+#define mmCRTC2_CRTC_VERT_SYNC_CONTROL 0x1fac
+#define mmCRTC3_CRTC_VERT_SYNC_CONTROL 0x41ac
+#define mmCRTC4_CRTC_VERT_SYNC_CONTROL 0x43ac
+#define mmCRTC5_CRTC_VERT_SYNC_CONTROL 0x45ac
+#define mmCRTC_STEREO_STATUS 0x1bad
+#define mmCRTC0_CRTC_STEREO_STATUS 0x1bad
+#define mmCRTC1_CRTC_STEREO_STATUS 0x1dad
+#define mmCRTC2_CRTC_STEREO_STATUS 0x1fad
+#define mmCRTC3_CRTC_STEREO_STATUS 0x41ad
+#define mmCRTC4_CRTC_STEREO_STATUS 0x43ad
+#define mmCRTC5_CRTC_STEREO_STATUS 0x45ad
+#define mmCRTC_STEREO_CONTROL 0x1bae
+#define mmCRTC0_CRTC_STEREO_CONTROL 0x1bae
+#define mmCRTC1_CRTC_STEREO_CONTROL 0x1dae
+#define mmCRTC2_CRTC_STEREO_CONTROL 0x1fae
+#define mmCRTC3_CRTC_STEREO_CONTROL 0x41ae
+#define mmCRTC4_CRTC_STEREO_CONTROL 0x43ae
+#define mmCRTC5_CRTC_STEREO_CONTROL 0x45ae
+#define mmCRTC_SNAPSHOT_STATUS 0x1baf
+#define mmCRTC0_CRTC_SNAPSHOT_STATUS 0x1baf
+#define mmCRTC1_CRTC_SNAPSHOT_STATUS 0x1daf
+#define mmCRTC2_CRTC_SNAPSHOT_STATUS 0x1faf
+#define mmCRTC3_CRTC_SNAPSHOT_STATUS 0x41af
+#define mmCRTC4_CRTC_SNAPSHOT_STATUS 0x43af
+#define mmCRTC5_CRTC_SNAPSHOT_STATUS 0x45af
+#define mmCRTC_SNAPSHOT_CONTROL 0x1bb0
+#define mmCRTC0_CRTC_SNAPSHOT_CONTROL 0x1bb0
+#define mmCRTC1_CRTC_SNAPSHOT_CONTROL 0x1db0
+#define mmCRTC2_CRTC_SNAPSHOT_CONTROL 0x1fb0
+#define mmCRTC3_CRTC_SNAPSHOT_CONTROL 0x41b0
+#define mmCRTC4_CRTC_SNAPSHOT_CONTROL 0x43b0
+#define mmCRTC5_CRTC_SNAPSHOT_CONTROL 0x45b0
+#define mmCRTC_SNAPSHOT_POSITION 0x1bb1
+#define mmCRTC0_CRTC_SNAPSHOT_POSITION 0x1bb1
+#define mmCRTC1_CRTC_SNAPSHOT_POSITION 0x1db1
+#define mmCRTC2_CRTC_SNAPSHOT_POSITION 0x1fb1
+#define mmCRTC3_CRTC_SNAPSHOT_POSITION 0x41b1
+#define mmCRTC4_CRTC_SNAPSHOT_POSITION 0x43b1
+#define mmCRTC5_CRTC_SNAPSHOT_POSITION 0x45b1
+#define mmCRTC_SNAPSHOT_FRAME 0x1bb2
+#define mmCRTC0_CRTC_SNAPSHOT_FRAME 0x1bb2
+#define mmCRTC1_CRTC_SNAPSHOT_FRAME 0x1db2
+#define mmCRTC2_CRTC_SNAPSHOT_FRAME 0x1fb2
+#define mmCRTC3_CRTC_SNAPSHOT_FRAME 0x41b2
+#define mmCRTC4_CRTC_SNAPSHOT_FRAME 0x43b2
+#define mmCRTC5_CRTC_SNAPSHOT_FRAME 0x45b2
+#define mmCRTC_START_LINE_CONTROL 0x1bb3
+#define mmCRTC0_CRTC_START_LINE_CONTROL 0x1bb3
+#define mmCRTC1_CRTC_START_LINE_CONTROL 0x1db3
+#define mmCRTC2_CRTC_START_LINE_CONTROL 0x1fb3
+#define mmCRTC3_CRTC_START_LINE_CONTROL 0x41b3
+#define mmCRTC4_CRTC_START_LINE_CONTROL 0x43b3
+#define mmCRTC5_CRTC_START_LINE_CONTROL 0x45b3
+#define mmCRTC_INTERRUPT_CONTROL 0x1bb4
+#define mmCRTC0_CRTC_INTERRUPT_CONTROL 0x1bb4
+#define mmCRTC1_CRTC_INTERRUPT_CONTROL 0x1db4
+#define mmCRTC2_CRTC_INTERRUPT_CONTROL 0x1fb4
+#define mmCRTC3_CRTC_INTERRUPT_CONTROL 0x41b4
+#define mmCRTC4_CRTC_INTERRUPT_CONTROL 0x43b4
+#define mmCRTC5_CRTC_INTERRUPT_CONTROL 0x45b4
+#define mmCRTC_UPDATE_LOCK 0x1bb5
+#define mmCRTC0_CRTC_UPDATE_LOCK 0x1bb5
+#define mmCRTC1_CRTC_UPDATE_LOCK 0x1db5
+#define mmCRTC2_CRTC_UPDATE_LOCK 0x1fb5
+#define mmCRTC3_CRTC_UPDATE_LOCK 0x41b5
+#define mmCRTC4_CRTC_UPDATE_LOCK 0x43b5
+#define mmCRTC5_CRTC_UPDATE_LOCK 0x45b5
+#define mmCRTC_DOUBLE_BUFFER_CONTROL 0x1bb6
+#define mmCRTC0_CRTC_DOUBLE_BUFFER_CONTROL 0x1bb6
+#define mmCRTC1_CRTC_DOUBLE_BUFFER_CONTROL 0x1db6
+#define mmCRTC2_CRTC_DOUBLE_BUFFER_CONTROL 0x1fb6
+#define mmCRTC3_CRTC_DOUBLE_BUFFER_CONTROL 0x41b6
+#define mmCRTC4_CRTC_DOUBLE_BUFFER_CONTROL 0x43b6
+#define mmCRTC5_CRTC_DOUBLE_BUFFER_CONTROL 0x45b6
+#define mmCRTC_VGA_PARAMETER_CAPTURE_MODE 0x1bb7
+#define mmCRTC0_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x1bb7
+#define mmCRTC1_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x1db7
+#define mmCRTC2_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x1fb7
+#define mmCRTC3_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x41b7
+#define mmCRTC4_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x43b7
+#define mmCRTC5_CRTC_VGA_PARAMETER_CAPTURE_MODE 0x45b7
+#define mmCRTC_TEST_PATTERN_CONTROL 0x1bba
+#define mmCRTC0_CRTC_TEST_PATTERN_CONTROL 0x1bba
+#define mmCRTC1_CRTC_TEST_PATTERN_CONTROL 0x1dba
+#define mmCRTC2_CRTC_TEST_PATTERN_CONTROL 0x1fba
+#define mmCRTC3_CRTC_TEST_PATTERN_CONTROL 0x41ba
+#define mmCRTC4_CRTC_TEST_PATTERN_CONTROL 0x43ba
+#define mmCRTC5_CRTC_TEST_PATTERN_CONTROL 0x45ba
+#define mmCRTC_TEST_PATTERN_PARAMETERS 0x1bbb
+#define mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS 0x1bbb
+#define mmCRTC1_CRTC_TEST_PATTERN_PARAMETERS 0x1dbb
+#define mmCRTC2_CRTC_TEST_PATTERN_PARAMETERS 0x1fbb
+#define mmCRTC3_CRTC_TEST_PATTERN_PARAMETERS 0x41bb
+#define mmCRTC4_CRTC_TEST_PATTERN_PARAMETERS 0x43bb
+#define mmCRTC5_CRTC_TEST_PATTERN_PARAMETERS 0x45bb
+#define mmCRTC_TEST_PATTERN_COLOR 0x1bbc
+#define mmCRTC0_CRTC_TEST_PATTERN_COLOR 0x1bbc
+#define mmCRTC1_CRTC_TEST_PATTERN_COLOR 0x1dbc
+#define mmCRTC2_CRTC_TEST_PATTERN_COLOR 0x1fbc
+#define mmCRTC3_CRTC_TEST_PATTERN_COLOR 0x41bc
+#define mmCRTC4_CRTC_TEST_PATTERN_COLOR 0x43bc
+#define mmCRTC5_CRTC_TEST_PATTERN_COLOR 0x45bc
+#define mmCRTC_MASTER_UPDATE_LOCK 0x1bbd
+#define mmCRTC0_CRTC_MASTER_UPDATE_LOCK 0x1bbd
+#define mmCRTC1_CRTC_MASTER_UPDATE_LOCK 0x1dbd
+#define mmCRTC2_CRTC_MASTER_UPDATE_LOCK 0x1fbd
+#define mmCRTC3_CRTC_MASTER_UPDATE_LOCK 0x41bd
+#define mmCRTC4_CRTC_MASTER_UPDATE_LOCK 0x43bd
+#define mmCRTC5_CRTC_MASTER_UPDATE_LOCK 0x45bd
+#define mmCRTC_MASTER_UPDATE_MODE 0x1bbe
+#define mmCRTC0_CRTC_MASTER_UPDATE_MODE 0x1bbe
+#define mmCRTC1_CRTC_MASTER_UPDATE_MODE 0x1dbe
+#define mmCRTC2_CRTC_MASTER_UPDATE_MODE 0x1fbe
+#define mmCRTC3_CRTC_MASTER_UPDATE_MODE 0x41be
+#define mmCRTC4_CRTC_MASTER_UPDATE_MODE 0x43be
+#define mmCRTC5_CRTC_MASTER_UPDATE_MODE 0x45be
+#define mmCRTC_MVP_INBAND_CNTL_INSERT 0x1bbf
+#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT 0x1bbf
+#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT 0x1dbf
+#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT 0x1fbf
+#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT 0x41bf
+#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT 0x43bf
+#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT 0x45bf
+#define mmCRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1bc0
+#define mmCRTC0_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1bc0
+#define mmCRTC1_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1dc0
+#define mmCRTC2_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x1fc0
+#define mmCRTC3_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x41c0
+#define mmCRTC4_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x43c0
+#define mmCRTC5_CRTC_MVP_INBAND_CNTL_INSERT_TIMER 0x45c0
+#define mmCRTC_MVP_STATUS 0x1bc1
+#define mmCRTC0_CRTC_MVP_STATUS 0x1bc1
+#define mmCRTC1_CRTC_MVP_STATUS 0x1dc1
+#define mmCRTC2_CRTC_MVP_STATUS 0x1fc1
+#define mmCRTC3_CRTC_MVP_STATUS 0x41c1
+#define mmCRTC4_CRTC_MVP_STATUS 0x43c1
+#define mmCRTC5_CRTC_MVP_STATUS 0x45c1
+#define mmCRTC_MASTER_EN 0x1bc2
+#define mmCRTC0_CRTC_MASTER_EN 0x1bc2
+#define mmCRTC1_CRTC_MASTER_EN 0x1dc2
+#define mmCRTC2_CRTC_MASTER_EN 0x1fc2
+#define mmCRTC3_CRTC_MASTER_EN 0x41c2
+#define mmCRTC4_CRTC_MASTER_EN 0x43c2
+#define mmCRTC5_CRTC_MASTER_EN 0x45c2
+#define mmCRTC_ALLOW_STOP_OFF_V_CNT 0x1bc3
+#define mmCRTC0_CRTC_ALLOW_STOP_OFF_V_CNT 0x1bc3
+#define mmCRTC1_CRTC_ALLOW_STOP_OFF_V_CNT 0x1dc3
+#define mmCRTC2_CRTC_ALLOW_STOP_OFF_V_CNT 0x1fc3
+#define mmCRTC3_CRTC_ALLOW_STOP_OFF_V_CNT 0x41c3
+#define mmCRTC4_CRTC_ALLOW_STOP_OFF_V_CNT 0x43c3
+#define mmCRTC5_CRTC_ALLOW_STOP_OFF_V_CNT 0x45c3
+#define mmCRTC_V_UPDATE_INT_STATUS 0x1bc4
+#define mmCRTC0_CRTC_V_UPDATE_INT_STATUS 0x1bc4
+#define mmCRTC1_CRTC_V_UPDATE_INT_STATUS 0x1dc4
+#define mmCRTC2_CRTC_V_UPDATE_INT_STATUS 0x1fc4
+#define mmCRTC3_CRTC_V_UPDATE_INT_STATUS 0x41c4
+#define mmCRTC4_CRTC_V_UPDATE_INT_STATUS 0x43c4
+#define mmCRTC5_CRTC_V_UPDATE_INT_STATUS 0x45c4
+#define mmCRTC_OVERSCAN_COLOR 0x1bc8
+#define mmCRTC0_CRTC_OVERSCAN_COLOR 0x1bc8
+#define mmCRTC1_CRTC_OVERSCAN_COLOR 0x1dc8
+#define mmCRTC2_CRTC_OVERSCAN_COLOR 0x1fc8
+#define mmCRTC3_CRTC_OVERSCAN_COLOR 0x41c8
+#define mmCRTC4_CRTC_OVERSCAN_COLOR 0x43c8
+#define mmCRTC5_CRTC_OVERSCAN_COLOR 0x45c8
+#define mmCRTC_OVERSCAN_COLOR_EXT 0x1bc9
+#define mmCRTC0_CRTC_OVERSCAN_COLOR_EXT 0x1bc9
+#define mmCRTC1_CRTC_OVERSCAN_COLOR_EXT 0x1dc9
+#define mmCRTC2_CRTC_OVERSCAN_COLOR_EXT 0x1fc9
+#define mmCRTC3_CRTC_OVERSCAN_COLOR_EXT 0x41c9
+#define mmCRTC4_CRTC_OVERSCAN_COLOR_EXT 0x43c9
+#define mmCRTC5_CRTC_OVERSCAN_COLOR_EXT 0x45c9
+#define mmCRTC_BLANK_DATA_COLOR 0x1bca
+#define mmCRTC0_CRTC_BLANK_DATA_COLOR 0x1bca
+#define mmCRTC1_CRTC_BLANK_DATA_COLOR 0x1dca
+#define mmCRTC2_CRTC_BLANK_DATA_COLOR 0x1fca
+#define mmCRTC3_CRTC_BLANK_DATA_COLOR 0x41ca
+#define mmCRTC4_CRTC_BLANK_DATA_COLOR 0x43ca
+#define mmCRTC5_CRTC_BLANK_DATA_COLOR 0x45ca
+#define mmCRTC_BLANK_DATA_COLOR_EXT 0x1bcb
+#define mmCRTC0_CRTC_BLANK_DATA_COLOR_EXT 0x1bcb
+#define mmCRTC1_CRTC_BLANK_DATA_COLOR_EXT 0x1dcb
+#define mmCRTC2_CRTC_BLANK_DATA_COLOR_EXT 0x1fcb
+#define mmCRTC3_CRTC_BLANK_DATA_COLOR_EXT 0x41cb
+#define mmCRTC4_CRTC_BLANK_DATA_COLOR_EXT 0x43cb
+#define mmCRTC5_CRTC_BLANK_DATA_COLOR_EXT 0x45cb
+#define mmCRTC_BLACK_COLOR 0x1bcc
+#define mmCRTC0_CRTC_BLACK_COLOR 0x1bcc
+#define mmCRTC1_CRTC_BLACK_COLOR 0x1dcc
+#define mmCRTC2_CRTC_BLACK_COLOR 0x1fcc
+#define mmCRTC3_CRTC_BLACK_COLOR 0x41cc
+#define mmCRTC4_CRTC_BLACK_COLOR 0x43cc
+#define mmCRTC5_CRTC_BLACK_COLOR 0x45cc
+#define mmCRTC_BLACK_COLOR_EXT 0x1bcd
+#define mmCRTC0_CRTC_BLACK_COLOR_EXT 0x1bcd
+#define mmCRTC1_CRTC_BLACK_COLOR_EXT 0x1dcd
+#define mmCRTC2_CRTC_BLACK_COLOR_EXT 0x1fcd
+#define mmCRTC3_CRTC_BLACK_COLOR_EXT 0x41cd
+#define mmCRTC4_CRTC_BLACK_COLOR_EXT 0x43cd
+#define mmCRTC5_CRTC_BLACK_COLOR_EXT 0x45cd
+#define mmCRTC_VERTICAL_INTERRUPT0_POSITION 0x1bce
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT0_POSITION 0x1bce
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT0_POSITION 0x1dce
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT0_POSITION 0x1fce
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT0_POSITION 0x41ce
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT0_POSITION 0x43ce
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT0_POSITION 0x45ce
+#define mmCRTC_VERTICAL_INTERRUPT0_CONTROL 0x1bcf
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT0_CONTROL 0x1bcf
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT0_CONTROL 0x1dcf
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT0_CONTROL 0x1fcf
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT0_CONTROL 0x41cf
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT0_CONTROL 0x43cf
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT0_CONTROL 0x45cf
+#define mmCRTC_VERTICAL_INTERRUPT1_POSITION 0x1bd0
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT1_POSITION 0x1bd0
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT1_POSITION 0x1dd0
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT1_POSITION 0x1fd0
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT1_POSITION 0x41d0
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT1_POSITION 0x43d0
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT1_POSITION 0x45d0
+#define mmCRTC_VERTICAL_INTERRUPT1_CONTROL 0x1bd1
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT1_CONTROL 0x1bd1
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT1_CONTROL 0x1dd1
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT1_CONTROL 0x1fd1
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT1_CONTROL 0x41d1
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT1_CONTROL 0x43d1
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT1_CONTROL 0x45d1
+#define mmCRTC_VERTICAL_INTERRUPT2_POSITION 0x1bd2
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT2_POSITION 0x1bd2
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT2_POSITION 0x1dd2
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT2_POSITION 0x1fd2
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT2_POSITION 0x41d2
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT2_POSITION 0x43d2
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT2_POSITION 0x45d2
+#define mmCRTC_VERTICAL_INTERRUPT2_CONTROL 0x1bd3
+#define mmCRTC0_CRTC_VERTICAL_INTERRUPT2_CONTROL 0x1bd3
+#define mmCRTC1_CRTC_VERTICAL_INTERRUPT2_CONTROL 0x1dd3
+#define mmCRTC2_CRTC_VERTICAL_INTERRUPT2_CONTROL 0x1fd3
+#define mmCRTC3_CRTC_VERTICAL_INTERRUPT2_CONTROL 0x41d3
+#define mmCRTC4_CRTC_VERTICAL_INTERRUPT2_CONTROL 0x43d3
+#define mmCRTC5_CRTC_VERTICAL_INTERRUPT2_CONTROL 0x45d3
+#define mmCRTC_CRC_CNTL 0x1bd4
+#define mmCRTC0_CRTC_CRC_CNTL 0x1bd4
+#define mmCRTC1_CRTC_CRC_CNTL 0x1dd4
+#define mmCRTC2_CRTC_CRC_CNTL 0x1fd4
+#define mmCRTC3_CRTC_CRC_CNTL 0x41d4
+#define mmCRTC4_CRTC_CRC_CNTL 0x43d4
+#define mmCRTC5_CRTC_CRC_CNTL 0x45d4
+#define mmCRTC_CRC0_WINDOWA_X_CONTROL 0x1bd5
+#define mmCRTC0_CRTC_CRC0_WINDOWA_X_CONTROL 0x1bd5
+#define mmCRTC1_CRTC_CRC0_WINDOWA_X_CONTROL 0x1dd5
+#define mmCRTC2_CRTC_CRC0_WINDOWA_X_CONTROL 0x1fd5
+#define mmCRTC3_CRTC_CRC0_WINDOWA_X_CONTROL 0x41d5
+#define mmCRTC4_CRTC_CRC0_WINDOWA_X_CONTROL 0x43d5
+#define mmCRTC5_CRTC_CRC0_WINDOWA_X_CONTROL 0x45d5
+#define mmCRTC_CRC0_WINDOWA_Y_CONTROL 0x1bd6
+#define mmCRTC0_CRTC_CRC0_WINDOWA_Y_CONTROL 0x1bd6
+#define mmCRTC1_CRTC_CRC0_WINDOWA_Y_CONTROL 0x1dd6
+#define mmCRTC2_CRTC_CRC0_WINDOWA_Y_CONTROL 0x1fd6
+#define mmCRTC3_CRTC_CRC0_WINDOWA_Y_CONTROL 0x41d6
+#define mmCRTC4_CRTC_CRC0_WINDOWA_Y_CONTROL 0x43d6
+#define mmCRTC5_CRTC_CRC0_WINDOWA_Y_CONTROL 0x45d6
+#define mmCRTC_CRC0_WINDOWB_X_CONTROL 0x1bd7
+#define mmCRTC0_CRTC_CRC0_WINDOWB_X_CONTROL 0x1bd7
+#define mmCRTC1_CRTC_CRC0_WINDOWB_X_CONTROL 0x1dd7
+#define mmCRTC2_CRTC_CRC0_WINDOWB_X_CONTROL 0x1fd7
+#define mmCRTC3_CRTC_CRC0_WINDOWB_X_CONTROL 0x41d7
+#define mmCRTC4_CRTC_CRC0_WINDOWB_X_CONTROL 0x43d7
+#define mmCRTC5_CRTC_CRC0_WINDOWB_X_CONTROL 0x45d7
+#define mmCRTC_CRC0_WINDOWB_Y_CONTROL 0x1bd8
+#define mmCRTC0_CRTC_CRC0_WINDOWB_Y_CONTROL 0x1bd8
+#define mmCRTC1_CRTC_CRC0_WINDOWB_Y_CONTROL 0x1dd8
+#define mmCRTC2_CRTC_CRC0_WINDOWB_Y_CONTROL 0x1fd8
+#define mmCRTC3_CRTC_CRC0_WINDOWB_Y_CONTROL 0x41d8
+#define mmCRTC4_CRTC_CRC0_WINDOWB_Y_CONTROL 0x43d8
+#define mmCRTC5_CRTC_CRC0_WINDOWB_Y_CONTROL 0x45d8
+#define mmCRTC_CRC0_DATA_RG 0x1bd9
+#define mmCRTC0_CRTC_CRC0_DATA_RG 0x1bd9
+#define mmCRTC1_CRTC_CRC0_DATA_RG 0x1dd9
+#define mmCRTC2_CRTC_CRC0_DATA_RG 0x1fd9
+#define mmCRTC3_CRTC_CRC0_DATA_RG 0x41d9
+#define mmCRTC4_CRTC_CRC0_DATA_RG 0x43d9
+#define mmCRTC5_CRTC_CRC0_DATA_RG 0x45d9
+#define mmCRTC_CRC0_DATA_B 0x1bda
+#define mmCRTC0_CRTC_CRC0_DATA_B 0x1bda
+#define mmCRTC1_CRTC_CRC0_DATA_B 0x1dda
+#define mmCRTC2_CRTC_CRC0_DATA_B 0x1fda
+#define mmCRTC3_CRTC_CRC0_DATA_B 0x41da
+#define mmCRTC4_CRTC_CRC0_DATA_B 0x43da
+#define mmCRTC5_CRTC_CRC0_DATA_B 0x45da
+#define mmCRTC_CRC1_WINDOWA_X_CONTROL 0x1bdb
+#define mmCRTC0_CRTC_CRC1_WINDOWA_X_CONTROL 0x1bdb
+#define mmCRTC1_CRTC_CRC1_WINDOWA_X_CONTROL 0x1ddb
+#define mmCRTC2_CRTC_CRC1_WINDOWA_X_CONTROL 0x1fdb
+#define mmCRTC3_CRTC_CRC1_WINDOWA_X_CONTROL 0x41db
+#define mmCRTC4_CRTC_CRC1_WINDOWA_X_CONTROL 0x43db
+#define mmCRTC5_CRTC_CRC1_WINDOWA_X_CONTROL 0x45db
+#define mmCRTC_CRC1_WINDOWA_Y_CONTROL 0x1bdc
+#define mmCRTC0_CRTC_CRC1_WINDOWA_Y_CONTROL 0x1bdc
+#define mmCRTC1_CRTC_CRC1_WINDOWA_Y_CONTROL 0x1ddc
+#define mmCRTC2_CRTC_CRC1_WINDOWA_Y_CONTROL 0x1fdc
+#define mmCRTC3_CRTC_CRC1_WINDOWA_Y_CONTROL 0x41dc
+#define mmCRTC4_CRTC_CRC1_WINDOWA_Y_CONTROL 0x43dc
+#define mmCRTC5_CRTC_CRC1_WINDOWA_Y_CONTROL 0x45dc
+#define mmCRTC_CRC1_WINDOWB_X_CONTROL 0x1bdd
+#define mmCRTC0_CRTC_CRC1_WINDOWB_X_CONTROL 0x1bdd
+#define mmCRTC1_CRTC_CRC1_WINDOWB_X_CONTROL 0x1ddd
+#define mmCRTC2_CRTC_CRC1_WINDOWB_X_CONTROL 0x1fdd
+#define mmCRTC3_CRTC_CRC1_WINDOWB_X_CONTROL 0x41dd
+#define mmCRTC4_CRTC_CRC1_WINDOWB_X_CONTROL 0x43dd
+#define mmCRTC5_CRTC_CRC1_WINDOWB_X_CONTROL 0x45dd
+#define mmCRTC_CRC1_WINDOWB_Y_CONTROL 0x1bde
+#define mmCRTC0_CRTC_CRC1_WINDOWB_Y_CONTROL 0x1bde
+#define mmCRTC1_CRTC_CRC1_WINDOWB_Y_CONTROL 0x1dde
+#define mmCRTC2_CRTC_CRC1_WINDOWB_Y_CONTROL 0x1fde
+#define mmCRTC3_CRTC_CRC1_WINDOWB_Y_CONTROL 0x41de
+#define mmCRTC4_CRTC_CRC1_WINDOWB_Y_CONTROL 0x43de
+#define mmCRTC5_CRTC_CRC1_WINDOWB_Y_CONTROL 0x45de
+#define mmCRTC_CRC1_DATA_RG 0x1bdf
+#define mmCRTC0_CRTC_CRC1_DATA_RG 0x1bdf
+#define mmCRTC1_CRTC_CRC1_DATA_RG 0x1ddf
+#define mmCRTC2_CRTC_CRC1_DATA_RG 0x1fdf
+#define mmCRTC3_CRTC_CRC1_DATA_RG 0x41df
+#define mmCRTC4_CRTC_CRC1_DATA_RG 0x43df
+#define mmCRTC5_CRTC_CRC1_DATA_RG 0x45df
+#define mmCRTC_CRC1_DATA_B 0x1be0
+#define mmCRTC0_CRTC_CRC1_DATA_B 0x1be0
+#define mmCRTC1_CRTC_CRC1_DATA_B 0x1de0
+#define mmCRTC2_CRTC_CRC1_DATA_B 0x1fe0
+#define mmCRTC3_CRTC_CRC1_DATA_B 0x41e0
+#define mmCRTC4_CRTC_CRC1_DATA_B 0x43e0
+#define mmCRTC5_CRTC_CRC1_DATA_B 0x45e0
+#define mmCRTC_EXT_TIMING_SYNC_CONTROL 0x1be1
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_CONTROL 0x1be1
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_CONTROL 0x1de1
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_CONTROL 0x1fe1
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_CONTROL 0x41e1
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_CONTROL 0x43e1
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_CONTROL 0x45e1
+#define mmCRTC_EXT_TIMING_SYNC_WINDOW_START 0x1be2
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_WINDOW_START 0x1be2
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_WINDOW_START 0x1de2
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_WINDOW_START 0x1fe2
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_WINDOW_START 0x41e2
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_WINDOW_START 0x43e2
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_WINDOW_START 0x45e2
+#define mmCRTC_EXT_TIMING_SYNC_WINDOW_END 0x1be3
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_WINDOW_END 0x1be3
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_WINDOW_END 0x1de3
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_WINDOW_END 0x1fe3
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_WINDOW_END 0x41e3
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_WINDOW_END 0x43e3
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_WINDOW_END 0x45e3
+#define mmCRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL 0x1be4
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL 0x1be4
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL 0x1de4
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL 0x1fe4
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL 0x41e4
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL 0x43e4
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL 0x45e4
+#define mmCRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL 0x1be5
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL 0x1be5
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL 0x1de5
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL 0x1fe5
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL 0x41e5
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL 0x43e5
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL 0x45e5
+#define mmCRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL 0x1be6
+#define mmCRTC0_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL 0x1be6
+#define mmCRTC1_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL 0x1de6
+#define mmCRTC2_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL 0x1fe6
+#define mmCRTC3_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL 0x41e6
+#define mmCRTC4_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL 0x43e6
+#define mmCRTC5_CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL 0x45e6
+#define mmCRTC_STATIC_SCREEN_CONTROL 0x1be7
+#define mmCRTC0_CRTC_STATIC_SCREEN_CONTROL 0x1be7
+#define mmCRTC1_CRTC_STATIC_SCREEN_CONTROL 0x1de7
+#define mmCRTC2_CRTC_STATIC_SCREEN_CONTROL 0x1fe7
+#define mmCRTC3_CRTC_STATIC_SCREEN_CONTROL 0x41e7
+#define mmCRTC4_CRTC_STATIC_SCREEN_CONTROL 0x43e7
+#define mmCRTC5_CRTC_STATIC_SCREEN_CONTROL 0x45e7
+#define mmCRTC_3D_STRUCTURE_CONTROL 0x1b78
+#define mmCRTC0_CRTC_3D_STRUCTURE_CONTROL 0x1b78
+#define mmCRTC1_CRTC_3D_STRUCTURE_CONTROL 0x1d78
+#define mmCRTC2_CRTC_3D_STRUCTURE_CONTROL 0x1f78
+#define mmCRTC3_CRTC_3D_STRUCTURE_CONTROL 0x4178
+#define mmCRTC4_CRTC_3D_STRUCTURE_CONTROL 0x4378
+#define mmCRTC5_CRTC_3D_STRUCTURE_CONTROL 0x4578
+#define mmCRTC_GSL_VSYNC_GAP 0x1b79
+#define mmCRTC0_CRTC_GSL_VSYNC_GAP 0x1b79
+#define mmCRTC1_CRTC_GSL_VSYNC_GAP 0x1d79
+#define mmCRTC2_CRTC_GSL_VSYNC_GAP 0x1f79
+#define mmCRTC3_CRTC_GSL_VSYNC_GAP 0x4179
+#define mmCRTC4_CRTC_GSL_VSYNC_GAP 0x4379
+#define mmCRTC5_CRTC_GSL_VSYNC_GAP 0x4579
+#define mmCRTC_GSL_WINDOW 0x1b7a
+#define mmCRTC0_CRTC_GSL_WINDOW 0x1b7a
+#define mmCRTC1_CRTC_GSL_WINDOW 0x1d7a
+#define mmCRTC2_CRTC_GSL_WINDOW 0x1f7a
+#define mmCRTC3_CRTC_GSL_WINDOW 0x417a
+#define mmCRTC4_CRTC_GSL_WINDOW 0x437a
+#define mmCRTC5_CRTC_GSL_WINDOW 0x457a
+#define mmCRTC_GSL_CONTROL 0x1b7b
+#define mmCRTC0_CRTC_GSL_CONTROL 0x1b7b
+#define mmCRTC1_CRTC_GSL_CONTROL 0x1d7b
+#define mmCRTC2_CRTC_GSL_CONTROL 0x1f7b
+#define mmCRTC3_CRTC_GSL_CONTROL 0x417b
+#define mmCRTC4_CRTC_GSL_CONTROL 0x437b
+#define mmCRTC5_CRTC_GSL_CONTROL 0x457b
+#define mmCRTC_TEST_DEBUG_INDEX 0x1bc6
+#define mmCRTC0_CRTC_TEST_DEBUG_INDEX 0x1bc6
+#define mmCRTC1_CRTC_TEST_DEBUG_INDEX 0x1dc6
+#define mmCRTC2_CRTC_TEST_DEBUG_INDEX 0x1fc6
+#define mmCRTC3_CRTC_TEST_DEBUG_INDEX 0x41c6
+#define mmCRTC4_CRTC_TEST_DEBUG_INDEX 0x43c6
+#define mmCRTC5_CRTC_TEST_DEBUG_INDEX 0x45c6
+#define mmCRTC_TEST_DEBUG_DATA 0x1bc7
+#define mmCRTC0_CRTC_TEST_DEBUG_DATA 0x1bc7
+#define mmCRTC1_CRTC_TEST_DEBUG_DATA 0x1dc7
+#define mmCRTC2_CRTC_TEST_DEBUG_DATA 0x1fc7
+#define mmCRTC3_CRTC_TEST_DEBUG_DATA 0x41c7
+#define mmCRTC4_CRTC_TEST_DEBUG_DATA 0x43c7
+#define mmCRTC5_CRTC_TEST_DEBUG_DATA 0x45c7
+#define mmDAC_ENABLE 0x16aa
+#define mmDAC_SOURCE_SELECT 0x16ab
+#define mmDAC_CRC_EN 0x16ac
+#define mmDAC_CRC_CONTROL 0x16ad
+#define mmDAC_CRC_SIG_RGB_MASK 0x16ae
+#define mmDAC_CRC_SIG_CONTROL_MASK 0x16af
+#define mmDAC_CRC_SIG_RGB 0x16b0
+#define mmDAC_CRC_SIG_CONTROL 0x16b1
+#define mmDAC_SYNC_TRISTATE_CONTROL 0x16b2
+#define mmDAC_STEREOSYNC_SELECT 0x16b3
+#define mmDAC_AUTODETECT_CONTROL 0x16b4
+#define mmDAC_AUTODETECT_CONTROL2 0x16b5
+#define mmDAC_AUTODETECT_CONTROL3 0x16b6
+#define mmDAC_AUTODETECT_STATUS 0x16b7
+#define mmDAC_AUTODETECT_INT_CONTROL 0x16b8
+#define mmDAC_FORCE_OUTPUT_CNTL 0x16b9
+#define mmDAC_FORCE_DATA 0x16ba
+#define mmDAC_POWERDOWN 0x16bb
+#define mmDAC_CONTROL 0x16bc
+#define mmDAC_COMPARATOR_ENABLE 0x16bd
+#define mmDAC_COMPARATOR_OUTPUT 0x16be
+#define mmDAC_PWR_CNTL 0x16bf
+#define mmDAC_DFT_CONFIG 0x16c0
+#define mmDAC_FIFO_STATUS 0x16c1
+#define mmDAC_TEST_DEBUG_INDEX 0x16c2
+#define mmDAC_TEST_DEBUG_DATA 0x16c3
+#define mmPERFCOUNTER_CNTL 0x170
+#define mmDC_PERFMON0_PERFCOUNTER_CNTL 0x170
+#define mmDC_PERFMON1_PERFCOUNTER_CNTL 0x358
+#define mmDC_PERFMON2_PERFCOUNTER_CNTL 0x364
+#define mmDC_PERFMON3_PERFCOUNTER_CNTL 0x18c8
+#define mmDC_PERFMON4_PERFCOUNTER_CNTL 0x1b24
+#define mmDC_PERFMON5_PERFCOUNTER_CNTL 0x1d24
+#define mmDC_PERFMON6_PERFCOUNTER_CNTL 0x1f24
+#define mmDC_PERFMON7_PERFCOUNTER_CNTL 0x4124
+#define mmDC_PERFMON8_PERFCOUNTER_CNTL 0x4324
+#define mmDC_PERFMON9_PERFCOUNTER_CNTL 0x4524
+#define mmDC_PERFMON10_PERFCOUNTER_CNTL 0x4724
+#define mmDC_PERFMON11_PERFCOUNTER_CNTL 0x59a0
+#define mmDC_PERFMON12_PERFCOUNTER_CNTL 0x5f68
+#define mmDC_PERFMON13_PERFCOUNTER_CNTL 0x9924
+#define mmPERFCOUNTER_STATE 0x171
+#define mmDC_PERFMON0_PERFCOUNTER_STATE 0x171
+#define mmDC_PERFMON1_PERFCOUNTER_STATE 0x359
+#define mmDC_PERFMON2_PERFCOUNTER_STATE 0x365
+#define mmDC_PERFMON3_PERFCOUNTER_STATE 0x18c9
+#define mmDC_PERFMON4_PERFCOUNTER_STATE 0x1b25
+#define mmDC_PERFMON5_PERFCOUNTER_STATE 0x1d25
+#define mmDC_PERFMON6_PERFCOUNTER_STATE 0x1f25
+#define mmDC_PERFMON7_PERFCOUNTER_STATE 0x4125
+#define mmDC_PERFMON8_PERFCOUNTER_STATE 0x4325
+#define mmDC_PERFMON9_PERFCOUNTER_STATE 0x4525
+#define mmDC_PERFMON10_PERFCOUNTER_STATE 0x4725
+#define mmDC_PERFMON11_PERFCOUNTER_STATE 0x59a1
+#define mmDC_PERFMON12_PERFCOUNTER_STATE 0x5f69
+#define mmDC_PERFMON13_PERFCOUNTER_STATE 0x9925
+#define mmPERFMON_CNTL 0x173
+#define mmDC_PERFMON0_PERFMON_CNTL 0x173
+#define mmDC_PERFMON1_PERFMON_CNTL 0x35b
+#define mmDC_PERFMON2_PERFMON_CNTL 0x367
+#define mmDC_PERFMON3_PERFMON_CNTL 0x18cb
+#define mmDC_PERFMON4_PERFMON_CNTL 0x1b27
+#define mmDC_PERFMON5_PERFMON_CNTL 0x1d27
+#define mmDC_PERFMON6_PERFMON_CNTL 0x1f27
+#define mmDC_PERFMON7_PERFMON_CNTL 0x4127
+#define mmDC_PERFMON8_PERFMON_CNTL 0x4327
+#define mmDC_PERFMON9_PERFMON_CNTL 0x4527
+#define mmDC_PERFMON10_PERFMON_CNTL 0x4727
+#define mmDC_PERFMON11_PERFMON_CNTL 0x59a3
+#define mmDC_PERFMON12_PERFMON_CNTL 0x5f6b
+#define mmDC_PERFMON13_PERFMON_CNTL 0x9927
+#define mmPERFMON_CNTL2 0x17a
+#define mmDC_PERFMON0_PERFMON_CNTL2 0x17a
+#define mmDC_PERFMON1_PERFMON_CNTL2 0x362
+#define mmDC_PERFMON2_PERFMON_CNTL2 0x36e
+#define mmDC_PERFMON3_PERFMON_CNTL2 0x18d2
+#define mmDC_PERFMON4_PERFMON_CNTL2 0x1b2e
+#define mmDC_PERFMON5_PERFMON_CNTL2 0x1d2e
+#define mmDC_PERFMON6_PERFMON_CNTL2 0x1f2e
+#define mmDC_PERFMON7_PERFMON_CNTL2 0x412e
+#define mmDC_PERFMON8_PERFMON_CNTL2 0x432e
+#define mmDC_PERFMON9_PERFMON_CNTL2 0x452e
+#define mmDC_PERFMON10_PERFMON_CNTL2 0x472e
+#define mmDC_PERFMON11_PERFMON_CNTL2 0x59aa
+#define mmDC_PERFMON12_PERFMON_CNTL2 0x5f72
+#define mmDC_PERFMON13_PERFMON_CNTL2 0x992e
+#define mmPERFMON_CVALUE_INT_MISC 0x172
+#define mmDC_PERFMON0_PERFMON_CVALUE_INT_MISC 0x172
+#define mmDC_PERFMON1_PERFMON_CVALUE_INT_MISC 0x35a
+#define mmDC_PERFMON2_PERFMON_CVALUE_INT_MISC 0x366
+#define mmDC_PERFMON3_PERFMON_CVALUE_INT_MISC 0x18ca
+#define mmDC_PERFMON4_PERFMON_CVALUE_INT_MISC 0x1b26
+#define mmDC_PERFMON5_PERFMON_CVALUE_INT_MISC 0x1d26
+#define mmDC_PERFMON6_PERFMON_CVALUE_INT_MISC 0x1f26
+#define mmDC_PERFMON7_PERFMON_CVALUE_INT_MISC 0x4126
+#define mmDC_PERFMON8_PERFMON_CVALUE_INT_MISC 0x4326
+#define mmDC_PERFMON9_PERFMON_CVALUE_INT_MISC 0x4526
+#define mmDC_PERFMON10_PERFMON_CVALUE_INT_MISC 0x4726
+#define mmDC_PERFMON11_PERFMON_CVALUE_INT_MISC 0x59a2
+#define mmDC_PERFMON12_PERFMON_CVALUE_INT_MISC 0x5f6a
+#define mmDC_PERFMON13_PERFMON_CVALUE_INT_MISC 0x9926
+#define mmPERFMON_CVALUE_LOW 0x174
+#define mmDC_PERFMON0_PERFMON_CVALUE_LOW 0x174
+#define mmDC_PERFMON1_PERFMON_CVALUE_LOW 0x35c
+#define mmDC_PERFMON2_PERFMON_CVALUE_LOW 0x368
+#define mmDC_PERFMON3_PERFMON_CVALUE_LOW 0x18cc
+#define mmDC_PERFMON4_PERFMON_CVALUE_LOW 0x1b28
+#define mmDC_PERFMON5_PERFMON_CVALUE_LOW 0x1d28
+#define mmDC_PERFMON6_PERFMON_CVALUE_LOW 0x1f28
+#define mmDC_PERFMON7_PERFMON_CVALUE_LOW 0x4128
+#define mmDC_PERFMON8_PERFMON_CVALUE_LOW 0x4328
+#define mmDC_PERFMON9_PERFMON_CVALUE_LOW 0x4528
+#define mmDC_PERFMON10_PERFMON_CVALUE_LOW 0x4728
+#define mmDC_PERFMON11_PERFMON_CVALUE_LOW 0x59a4
+#define mmDC_PERFMON12_PERFMON_CVALUE_LOW 0x5f6c
+#define mmDC_PERFMON13_PERFMON_CVALUE_LOW 0x9928
+#define mmPERFMON_HI 0x175
+#define mmDC_PERFMON0_PERFMON_HI 0x175
+#define mmDC_PERFMON1_PERFMON_HI 0x35d
+#define mmDC_PERFMON2_PERFMON_HI 0x369
+#define mmDC_PERFMON3_PERFMON_HI 0x18cd
+#define mmDC_PERFMON4_PERFMON_HI 0x1b29
+#define mmDC_PERFMON5_PERFMON_HI 0x1d29
+#define mmDC_PERFMON6_PERFMON_HI 0x1f29
+#define mmDC_PERFMON7_PERFMON_HI 0x4129
+#define mmDC_PERFMON8_PERFMON_HI 0x4329
+#define mmDC_PERFMON9_PERFMON_HI 0x4529
+#define mmDC_PERFMON10_PERFMON_HI 0x4729
+#define mmDC_PERFMON11_PERFMON_HI 0x59a5
+#define mmDC_PERFMON12_PERFMON_HI 0x5f6d
+#define mmDC_PERFMON13_PERFMON_HI 0x9929
+#define mmPERFMON_LOW 0x176
+#define mmDC_PERFMON0_PERFMON_LOW 0x176
+#define mmDC_PERFMON1_PERFMON_LOW 0x35e
+#define mmDC_PERFMON2_PERFMON_LOW 0x36a
+#define mmDC_PERFMON3_PERFMON_LOW 0x18ce
+#define mmDC_PERFMON4_PERFMON_LOW 0x1b2a
+#define mmDC_PERFMON5_PERFMON_LOW 0x1d2a
+#define mmDC_PERFMON6_PERFMON_LOW 0x1f2a
+#define mmDC_PERFMON7_PERFMON_LOW 0x412a
+#define mmDC_PERFMON8_PERFMON_LOW 0x432a
+#define mmDC_PERFMON9_PERFMON_LOW 0x452a
+#define mmDC_PERFMON10_PERFMON_LOW 0x472a
+#define mmDC_PERFMON11_PERFMON_LOW 0x59a6
+#define mmDC_PERFMON12_PERFMON_LOW 0x5f6e
+#define mmDC_PERFMON13_PERFMON_LOW 0x992a
+#define mmPERFMON_TEST_DEBUG_INDEX 0x177
+#define mmDC_PERFMON0_PERFMON_TEST_DEBUG_INDEX 0x177
+#define mmDC_PERFMON1_PERFMON_TEST_DEBUG_INDEX 0x35f
+#define mmDC_PERFMON2_PERFMON_TEST_DEBUG_INDEX 0x36b
+#define mmDC_PERFMON3_PERFMON_TEST_DEBUG_INDEX 0x18cf
+#define mmDC_PERFMON4_PERFMON_TEST_DEBUG_INDEX 0x1b2b
+#define mmDC_PERFMON5_PERFMON_TEST_DEBUG_INDEX 0x1d2b
+#define mmDC_PERFMON6_PERFMON_TEST_DEBUG_INDEX 0x1f2b
+#define mmDC_PERFMON7_PERFMON_TEST_DEBUG_INDEX 0x412b
+#define mmDC_PERFMON8_PERFMON_TEST_DEBUG_INDEX 0x432b
+#define mmDC_PERFMON9_PERFMON_TEST_DEBUG_INDEX 0x452b
+#define mmDC_PERFMON10_PERFMON_TEST_DEBUG_INDEX 0x472b
+#define mmDC_PERFMON11_PERFMON_TEST_DEBUG_INDEX 0x59a7
+#define mmDC_PERFMON12_PERFMON_TEST_DEBUG_INDEX 0x5f6f
+#define mmDC_PERFMON13_PERFMON_TEST_DEBUG_INDEX 0x992b
+#define mmPERFMON_TEST_DEBUG_DATA 0x178
+#define mmDC_PERFMON0_PERFMON_TEST_DEBUG_DATA 0x178
+#define mmDC_PERFMON1_PERFMON_TEST_DEBUG_DATA 0x360
+#define mmDC_PERFMON2_PERFMON_TEST_DEBUG_DATA 0x36c
+#define mmDC_PERFMON3_PERFMON_TEST_DEBUG_DATA 0x18d0
+#define mmDC_PERFMON4_PERFMON_TEST_DEBUG_DATA 0x1b2c
+#define mmDC_PERFMON5_PERFMON_TEST_DEBUG_DATA 0x1d2c
+#define mmDC_PERFMON6_PERFMON_TEST_DEBUG_DATA 0x1f2c
+#define mmDC_PERFMON7_PERFMON_TEST_DEBUG_DATA 0x412c
+#define mmDC_PERFMON8_PERFMON_TEST_DEBUG_DATA 0x432c
+#define mmDC_PERFMON9_PERFMON_TEST_DEBUG_DATA 0x452c
+#define mmDC_PERFMON10_PERFMON_TEST_DEBUG_DATA 0x472c
+#define mmDC_PERFMON11_PERFMON_TEST_DEBUG_DATA 0x59a8
+#define mmDC_PERFMON12_PERFMON_TEST_DEBUG_DATA 0x5f70
+#define mmDC_PERFMON13_PERFMON_TEST_DEBUG_DATA 0x992c
+#define mmREFCLK_CNTL 0x109
+#define mmDCCG_CBUS_ANTIGLITCH_RESETB 0x15c
+#define mmDCCG_CBUS_SPARE 0x15d
+#define mmDCCG_CBUS_WRCMD_DELAY 0x110
+#define mmDPREFCLK_CNTL 0x118
+#define mmDCE_VERSION 0x11e
+#define mmAVSYNC_COUNTER_WRITE 0x12a
+#define mmAVSYNC_COUNTER_CONTROL 0x12b
+#define mmAVSYNC_COUNTER_READ 0x12f
+#define mmDCCG_GTC_CNTL 0x120
+#define mmDCCG_GTC_DTO_INCR 0x121
+#define mmDCCG_GTC_DTO_MODULO 0x122
+#define mmDCCG_GTC_CURRENT 0x123
+#define mmDCCG_DS_DTO_INCR 0x113
+#define mmDCCG_DS_DTO_MODULO 0x114
+#define mmDCCG_DS_CNTL 0x115
+#define mmDCCG_DS_HW_CAL_INTERVAL 0x116
+#define mmDCCG_DS_DEBUG_CNTL 0x112
+#define mmDMCU_SMU_INTERRUPT_CNTL 0x12c
+#define mmSMU_CONTROL 0x12d
+#define mmSMU_INTERRUPT_CONTROL 0x12e
+#define mmDAC_CLK_ENABLE 0x128
+#define mmDVO_CLK_ENABLE 0x129
+#define mmDCCG_GATE_DISABLE_CNTL 0x134
+#define mmDCCG_GATE_DISABLE_CNTL2 0x13c
+#define mmDISPCLK_CGTT_BLK_CTRL_REG 0x135
+#define mmSCLK_CGTT_BLK_CTRL_REG 0x136
+#define mmDPREFCLK_CGTT_BLK_CTRL_REG 0x108
+#define mmREFCLK_CGTT_BLK_CTRL_REG 0x10b
+#define mmSYMCLK_CGTT_BLK_CTRL_REG 0x13d
+#define mmDCCG_CAC_STATUS 0x137
+#define mmPIXCLK0_RESYNC_CNTL 0x13a
+#define mmPHYPLLA_PIXCLK_RESYNC_CNTL 0x100
+#define mmPHYPLLB_PIXCLK_RESYNC_CNTL 0x101
+#define mmPHYPLLC_PIXCLK_RESYNC_CNTL 0x102
+#define mmPHYPLLD_PIXCLK_RESYNC_CNTL 0x103
+#define mmPHYPLLE_PIXCLK_RESYNC_CNTL 0x10c
+#define mmPHYPLLF_PIXCLK_RESYNC_CNTL 0x13e
+#define mmMICROSECOND_TIME_BASE_DIV 0x13b
+#define mmDCCG_DISP_CNTL_REG 0x13f
+#define mmMILLISECOND_TIME_BASE_DIV 0x130
+#define mmDISPCLK_FREQ_CHANGE_CNTL 0x131
+#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL 0x132
+#define mmDCCG_PERFMON_CNTL 0x133
+#define mmDCCG_PERFMON_CNTL2 0x10e
+#define mmCRTC0_PIXEL_RATE_CNTL 0x140
+#define mmDP_DTO0_PHASE 0x141
+#define mmDP_DTO0_MODULO 0x142
+#define mmCRTC0_PHYPLL_PIXEL_RATE_CNTL 0x143
+#define mmCRTC1_PIXEL_RATE_CNTL 0x144
+#define mmDP_DTO1_PHASE 0x145
+#define mmDP_DTO1_MODULO 0x146
+#define mmCRTC1_PHYPLL_PIXEL_RATE_CNTL 0x147
+#define mmCRTC2_PIXEL_RATE_CNTL 0x148
+#define mmDP_DTO2_PHASE 0x149
+#define mmDP_DTO2_MODULO 0x14a
+#define mmCRTC2_PHYPLL_PIXEL_RATE_CNTL 0x14b
+#define mmCRTC3_PIXEL_RATE_CNTL 0x14c
+#define mmDP_DTO3_PHASE 0x14d
+#define mmDP_DTO3_MODULO 0x14e
+#define mmCRTC3_PHYPLL_PIXEL_RATE_CNTL 0x14f
+#define mmCRTC4_PIXEL_RATE_CNTL 0x150
+#define mmDP_DTO4_PHASE 0x151
+#define mmDP_DTO4_MODULO 0x152
+#define mmCRTC4_PHYPLL_PIXEL_RATE_CNTL 0x153
+#define mmCRTC5_PIXEL_RATE_CNTL 0x154
+#define mmDP_DTO5_PHASE 0x155
+#define mmDP_DTO5_MODULO 0x156
+#define mmCRTC5_PHYPLL_PIXEL_RATE_CNTL 0x157
+#define mmDCCG_SOFT_RESET 0x15f
+#define mmSYMCLKA_CLOCK_ENABLE 0x160
+#define mmSYMCLKB_CLOCK_ENABLE 0x161
+#define mmSYMCLKC_CLOCK_ENABLE 0x162
+#define mmSYMCLKD_CLOCK_ENABLE 0x163
+#define mmSYMCLKE_CLOCK_ENABLE 0x164
+#define mmSYMCLKF_CLOCK_ENABLE 0x165
+#define mmDPDBG_CLK_FORCE_CONTROL 0x10d
+#define mmDCCG_AUDIO_DTO_SOURCE 0x16b
+#define mmDCCG_AUDIO_DTO0_PHASE 0x16c
+#define mmDCCG_AUDIO_DTO0_MODULE 0x16d
+#define mmDCCG_AUDIO_DTO1_PHASE 0x16e
+#define mmDCCG_AUDIO_DTO1_MODULE 0x16f
+#define mmDCCG_TEST_DEBUG_INDEX 0x17c
+#define mmDCCG_TEST_DEBUG_DATA 0x17d
+#define mmDCCG_TEST_CLK_SEL 0x17e
+#define mmCPLL_MACRO_CNTL_RESERVED0 0x5fd0
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED0 0x5fd0
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED0 0x5fdc
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED0 0x5fe8
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED0 0x5ff4
+#define mmCPLL_MACRO_CNTL_RESERVED1 0x5fd1
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED1 0x5fd1
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED1 0x5fdd
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED1 0x5fe9
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED1 0x5ff5
+#define mmCPLL_MACRO_CNTL_RESERVED2 0x5fd2
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED2 0x5fd2
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED2 0x5fde
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED2 0x5fea
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED2 0x5ff6
+#define mmCPLL_MACRO_CNTL_RESERVED3 0x5fd3
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED3 0x5fd3
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED3 0x5fdf
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED3 0x5feb
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED3 0x5ff7
+#define mmCPLL_MACRO_CNTL_RESERVED4 0x5fd4
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED4 0x5fd4
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED4 0x5fe0
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED4 0x5fec
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED4 0x5ff8
+#define mmCPLL_MACRO_CNTL_RESERVED5 0x5fd5
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED5 0x5fd5
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED5 0x5fe1
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED5 0x5fed
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED5 0x5ff9
+#define mmCPLL_MACRO_CNTL_RESERVED6 0x5fd6
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED6 0x5fd6
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED6 0x5fe2
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED6 0x5fee
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED6 0x5ffa
+#define mmCPLL_MACRO_CNTL_RESERVED7 0x5fd7
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED7 0x5fd7
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED7 0x5fe3
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED7 0x5fef
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED7 0x5ffb
+#define mmCPLL_MACRO_CNTL_RESERVED8 0x5fd8
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED8 0x5fd8
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED8 0x5fe4
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED8 0x5ff0
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED8 0x5ffc
+#define mmCPLL_MACRO_CNTL_RESERVED9 0x5fd9
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED9 0x5fd9
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED9 0x5fe5
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED9 0x5ff1
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED9 0x5ffd
+#define mmCPLL_MACRO_CNTL_RESERVED10 0x5fda
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED10 0x5fda
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED10 0x5fe6
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED10 0x5ff2
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED10 0x5ffe
+#define mmCPLL_MACRO_CNTL_RESERVED11 0x5fdb
+#define mmDCCG_CPLL0_CPLL_MACRO_CNTL_RESERVED11 0x5fdb
+#define mmDCCG_CPLL1_CPLL_MACRO_CNTL_RESERVED11 0x5fe7
+#define mmDCCG_CPLL2_CPLL_MACRO_CNTL_RESERVED11 0x5ff3
+#define mmDCCG_CPLL3_CPLL_MACRO_CNTL_RESERVED11 0x5fff
+#define mmPLL_MACRO_CNTL_RESERVED0 0x1700
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED0 0x1700
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED0 0x172a
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED0 0x1754
+#define mmPLL_MACRO_CNTL_RESERVED1 0x1701
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED1 0x1701
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED1 0x172b
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED1 0x1755
+#define mmPLL_MACRO_CNTL_RESERVED2 0x1702
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED2 0x1702
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED2 0x172c
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED2 0x1756
+#define mmPLL_MACRO_CNTL_RESERVED3 0x1703
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED3 0x1703
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED3 0x172d
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED3 0x1757
+#define mmPLL_MACRO_CNTL_RESERVED4 0x1704
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED4 0x1704
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED4 0x172e
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED4 0x1758
+#define mmPLL_MACRO_CNTL_RESERVED5 0x1705
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED5 0x1705
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED5 0x172f
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED5 0x1759
+#define mmPLL_MACRO_CNTL_RESERVED6 0x1706
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED6 0x1706
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED6 0x1730
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED6 0x175a
+#define mmPLL_MACRO_CNTL_RESERVED7 0x1707
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED7 0x1707
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED7 0x1731
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED7 0x175b
+#define mmPLL_MACRO_CNTL_RESERVED8 0x1708
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED8 0x1708
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED8 0x1732
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED8 0x175c
+#define mmPLL_MACRO_CNTL_RESERVED9 0x1709
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED9 0x1709
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED9 0x1733
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED9 0x175d
+#define mmPLL_MACRO_CNTL_RESERVED10 0x170a
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED10 0x170a
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED10 0x1734
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED10 0x175e
+#define mmPLL_MACRO_CNTL_RESERVED11 0x170b
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED11 0x170b
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED11 0x1735
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED11 0x175f
+#define mmPLL_MACRO_CNTL_RESERVED12 0x170c
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED12 0x170c
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED12 0x1736
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED12 0x1760
+#define mmPLL_MACRO_CNTL_RESERVED13 0x170d
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED13 0x170d
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED13 0x1737
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED13 0x1761
+#define mmPLL_MACRO_CNTL_RESERVED14 0x170e
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED14 0x170e
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED14 0x1738
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED14 0x1762
+#define mmPLL_MACRO_CNTL_RESERVED15 0x170f
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED15 0x170f
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED15 0x1739
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED15 0x1763
+#define mmPLL_MACRO_CNTL_RESERVED16 0x1710
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED16 0x1710
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED16 0x173a
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED16 0x1764
+#define mmPLL_MACRO_CNTL_RESERVED17 0x1711
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED17 0x1711
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED17 0x173b
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED17 0x1765
+#define mmPLL_MACRO_CNTL_RESERVED18 0x1712
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED18 0x1712
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED18 0x173c
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED18 0x1766
+#define mmPLL_MACRO_CNTL_RESERVED19 0x1713
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED19 0x1713
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED19 0x173d
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED19 0x1767
+#define mmPLL_MACRO_CNTL_RESERVED20 0x1714
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED20 0x1714
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED20 0x173e
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED20 0x1768
+#define mmPLL_MACRO_CNTL_RESERVED21 0x1715
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED21 0x1715
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED21 0x173f
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED21 0x1769
+#define mmPLL_MACRO_CNTL_RESERVED22 0x1716
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED22 0x1716
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED22 0x1740
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED22 0x176a
+#define mmPLL_MACRO_CNTL_RESERVED23 0x1717
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED23 0x1717
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED23 0x1741
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED23 0x176b
+#define mmPLL_MACRO_CNTL_RESERVED24 0x1718
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED24 0x1718
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED24 0x1742
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED24 0x176c
+#define mmPLL_MACRO_CNTL_RESERVED25 0x1719
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED25 0x1719
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED25 0x1743
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED25 0x176d
+#define mmPLL_MACRO_CNTL_RESERVED26 0x171a
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED26 0x171a
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED26 0x1744
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED26 0x176e
+#define mmPLL_MACRO_CNTL_RESERVED27 0x171b
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED27 0x171b
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED27 0x1745
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED27 0x176f
+#define mmPLL_MACRO_CNTL_RESERVED28 0x171c
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED28 0x171c
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED28 0x1746
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED28 0x1770
+#define mmPLL_MACRO_CNTL_RESERVED29 0x171d
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED29 0x171d
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED29 0x1747
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED29 0x1771
+#define mmPLL_MACRO_CNTL_RESERVED30 0x171e
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED30 0x171e
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED30 0x1748
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED30 0x1772
+#define mmPLL_MACRO_CNTL_RESERVED31 0x171f
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED31 0x171f
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED31 0x1749
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED31 0x1773
+#define mmPLL_MACRO_CNTL_RESERVED32 0x1720
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED32 0x1720
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED32 0x174a
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED32 0x1774
+#define mmPLL_MACRO_CNTL_RESERVED33 0x1721
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED33 0x1721
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED33 0x174b
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED33 0x1775
+#define mmPLL_MACRO_CNTL_RESERVED34 0x1722
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED34 0x1722
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED34 0x174c
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED34 0x1776
+#define mmPLL_MACRO_CNTL_RESERVED35 0x1723
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED35 0x1723
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED35 0x174d
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED35 0x1777
+#define mmPLL_MACRO_CNTL_RESERVED36 0x1724
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED36 0x1724
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED36 0x174e
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED36 0x1778
+#define mmPLL_MACRO_CNTL_RESERVED37 0x1725
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED37 0x1725
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED37 0x174f
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED37 0x1779
+#define mmPLL_MACRO_CNTL_RESERVED38 0x1726
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED38 0x1726
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED38 0x1750
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED38 0x177a
+#define mmPLL_MACRO_CNTL_RESERVED39 0x1727
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED39 0x1727
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED39 0x1751
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED39 0x177b
+#define mmPLL_MACRO_CNTL_RESERVED40 0x1728
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED40 0x1728
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED40 0x1752
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED40 0x177c
+#define mmPLL_MACRO_CNTL_RESERVED41 0x1729
+#define mmDCCG_PLL0_PLL_MACRO_CNTL_RESERVED41 0x1729
+#define mmDCCG_PLL1_PLL_MACRO_CNTL_RESERVED41 0x1753
+#define mmDCCG_PLL2_PLL_MACRO_CNTL_RESERVED41 0x177d
+#define mmDENTIST_DISPCLK_CNTL 0x124
+#define mmDCDEBUG_BUS_CLK1_SEL 0x16c4
+#define mmDCDEBUG_BUS_CLK2_SEL 0x16c5
+#define mmDCDEBUG_BUS_CLK3_SEL 0x16c6
+#define mmDCDEBUG_BUS_CLK4_SEL 0x16c7
+#define mmDCDEBUG_BUS_CLK5_SEL 0x16c8
+#define mmDCDEBUG_OUT_PIN_OVERRIDE 0x16c9
+#define mmDCDEBUG_OUT_CNTL 0x16ca
+#define mmDCDEBUG_OUT_DATA 0x16cb
+#define mmDMIF_CONTROL 0x2f6
+#define mmDMIF_STATUS 0x2f7
+#define mmDMIFV_STATUS 0x2f5
+#define mmDMIF_HW_DEBUG 0x2f8
+#define mmDMIF_ARBITRATION_CONTROL 0x2f9
+#define mmPIPE0_ARBITRATION_CONTROL3 0x2fa
+#define mmPIPE1_ARBITRATION_CONTROL3 0x2fb
+#define mmPIPE2_ARBITRATION_CONTROL3 0x2fc
+#define mmPIPE3_ARBITRATION_CONTROL3 0x2fd
+#define mmPIPE4_ARBITRATION_CONTROL3 0x2fe
+#define mmPIPE5_ARBITRATION_CONTROL3 0x2ff
+#define mmPIPE6_ARBITRATION_CONTROL3 0x32a
+#define mmPIPE7_ARBITRATION_CONTROL3 0x32b
+#define mmDMIF_P_VMID 0x300
+#define mmDMIF_URG_OVERRIDE 0x329
+#define mmDMIF_TEST_DEBUG_INDEX 0x301
+#define mmDMIF_TEST_DEBUG_DATA 0x302
+#define ixDMIF_DEBUG02_CORE0 0x2
+#define ixDMIF_DEBUG02_CORE1 0xa
+#define mmDMIF_ADDR_CALC 0x303
+#define mmDMIF_STATUS2 0x304
+#define mmPIPE0_MAX_REQUESTS 0x305
+#define mmPIPE1_MAX_REQUESTS 0x306
+#define mmPIPE2_MAX_REQUESTS 0x307
+#define mmPIPE3_MAX_REQUESTS 0x308
+#define mmPIPE4_MAX_REQUESTS 0x309
+#define mmPIPE5_MAX_REQUESTS 0x30a
+#define mmPIPE6_MAX_REQUESTS 0x32c
+#define mmPIPE7_MAX_REQUESTS 0x32d
+#define mmDVMM_REG_RD_STATUS 0x32e
+#define mmDVMM_REG_RD_DATA 0x32f
+#define mmDVMM_PTE_REQ 0x330
+#define mmDVMM_CNTL 0x331
+#define mmDVMM_FAULT_STATUS 0x332
+#define mmDVMM_FAULT_ADDR 0x333
+#define mmLOW_POWER_TILING_CONTROL 0x30b
+#define mmMCIF_CONTROL 0x30c
+#define mmMCIF_WRITE_COMBINE_CONTROL 0x30d
+#define mmMCIF_TEST_DEBUG_INDEX 0x30e
+#define mmMCIF_TEST_DEBUG_DATA 0x30f
+#define ixIDDCCIF02_DBG_DCCIF_C 0x9
+#define ixIDDCCIF04_DBG_DCCIF_E 0xb
+#define ixIDDCCIF05_DBG_DCCIF_F 0xc
+#define mmMCIF_VMID 0x310
+#define mmMCIF_MEM_CONTROL 0x311
+#define mmCC_DC_PIPE_DIS 0x312
+#define mmMC_DC_INTERFACE_NACK_STATUS 0x313
+#define mmRBBMIF_TIMEOUT 0x314
+#define mmRBBMIF_STATUS 0x315
+#define mmRBBMIF_TIMEOUT_DIS 0x316
+#define mmRBBMIF_STATUS_FLAG 0x327
+#define mmDCI_MEM_PWR_STATUS 0x317
+#define mmDCI_MEM_PWR_STATUS2 0x318
+#define mmDCI_MEM_PWR_STATUS3 0x33d
+#define mmDCI_CLK_CNTL 0x319
+#define mmDCI_CLK_RAMP_CNTL 0x31a
+#define mmDCI_MEM_PWR_CNTL 0x31b
+#define mmDCI_MEM_PWR_CNTL2 0x31c
+#define mmDCI_MEM_PWR_CNTL3 0x31d
+#define mmDCI_MEM_PWR_CNTL4 0x33b
+#define mmDVMM_PTE_PGMEM_CONTROL 0x335
+#define mmDVMM_PTE_PGMEM_STATE 0x336
+#define mmDCI_SOFT_RESET 0x328
+#define mmDCI_MISC 0x33c
+#define mmDCI_TEST_DEBUG_INDEX 0x31e
+#define mmDCI_TEST_DEBUG_DATA 0x31f
+#define mmDCI_DEBUG_CONFIG 0x320
+#define mmPIPE0_DMIF_BUFFER_CONTROL 0x321
+#define mmPIPE1_DMIF_BUFFER_CONTROL 0x322
+#define mmPIPE2_DMIF_BUFFER_CONTROL 0x323
+#define mmPIPE3_DMIF_BUFFER_CONTROL 0x324
+#define mmPIPE4_DMIF_BUFFER_CONTROL 0x325
+#define mmPIPE5_DMIF_BUFFER_CONTROL 0x326
+#define mmDC_GENERICA 0x4800
+#define mmDC_GENERICB 0x4801
+#define mmDC_PAD_EXTERN_SIG 0x4802
+#define mmDC_REF_CLK_CNTL 0x4803
+#define mmDC_GPIO_DEBUG 0x4804
+#define mmUNIPHYA_LINK_CNTL 0x4805
+#define mmUNIPHYB_LINK_CNTL 0x4807
+#define mmUNIPHYC_LINK_CNTL 0x4809
+#define mmUNIPHYD_LINK_CNTL 0x480b
+#define mmUNIPHYE_LINK_CNTL 0x480d
+#define mmUNIPHYF_LINK_CNTL 0x480f
+#define mmUNIPHYG_LINK_CNTL 0x4811
+#define mmUNIPHYA_CHANNEL_XBAR_CNTL 0x4806
+#define mmUNIPHYB_CHANNEL_XBAR_CNTL 0x4808
+#define mmUNIPHYC_CHANNEL_XBAR_CNTL 0x480a
+#define mmUNIPHYD_CHANNEL_XBAR_CNTL 0x480c
+#define mmUNIPHYE_CHANNEL_XBAR_CNTL 0x480e
+#define mmUNIPHYF_CHANNEL_XBAR_CNTL 0x4810
+#define mmUNIPHYG_CHANNEL_XBAR_CNTL 0x4812
+#define mmUNIPHYLPA_LINK_CNTL 0x4847
+#define mmUNIPHYLPB_LINK_CNTL 0x4848
+#define mmUNIPHYLPA_CHANNEL_XBAR_CNTL 0x4849
+#define mmUNIPHYLPB_CHANNEL_XBAR_CNTL 0x484a
+#define mmUNIPHY_IMPCAL_LINKA 0x4838
+#define mmUNIPHY_IMPCAL_LINKB 0x4839
+#define mmUNIPHY_IMPCAL_LINKC 0x483f
+#define mmUNIPHY_IMPCAL_LINKD 0x4840
+#define mmUNIPHY_IMPCAL_LINKE 0x4843
+#define mmUNIPHY_IMPCAL_LINKF 0x4844
+#define mmUNIPHY_IMPCAL_PERIOD 0x483a
+#define mmAUXP_IMPCAL 0x483b
+#define mmAUXN_IMPCAL 0x483c
+#define mmDCIO_IMPCAL_CNTL 0x483d
+#define mmUNIPHY_IMPCAL_PSW_AB 0x483e
+#define mmDCIO_IMPCAL_CNTL_CD 0x4841
+#define mmUNIPHY_IMPCAL_PSW_CD 0x4842
+#define mmDCIO_IMPCAL_CNTL_EF 0x4845
+#define mmUNIPHY_IMPCAL_PSW_EF 0x4846
+#define mmDCIO_WRCMD_DELAY 0x4816
+#define mmDC_PINSTRAPS 0x4818
+#define mmDC_DVODATA_CONFIG 0x481a
+#define mmLVTMA_PWRSEQ_CNTL 0x481b
+#define mmLVTMA_PWRSEQ_STATE 0x481c
+#define mmLVTMA_PWRSEQ_REF_DIV 0x481d
+#define mmLVTMA_PWRSEQ_DELAY1 0x481e
+#define mmLVTMA_PWRSEQ_DELAY2 0x481f
+#define mmBL_PWM_CNTL 0x4820
+#define mmBL_PWM_CNTL2 0x4821
+#define mmBL_PWM_PERIOD_CNTL 0x4822
+#define mmBL_PWM_GRP1_REG_LOCK 0x4823
+#define mmDCIO_GSL_GENLK_PAD_CNTL 0x4824
+#define mmDCIO_GSL_SWAPLOCK_PAD_CNTL 0x4825
+#define mmDCIO_GSL0_CNTL 0x4826
+#define mmDCIO_GSL1_CNTL 0x4827
+#define mmDCIO_GSL2_CNTL 0x4828
+#define mmDC_GPU_TIMER_START_POSITION_V_UPDATE 0x4829
+#define mmDC_GPU_TIMER_START_POSITION_P_FLIP 0x482a
+#define mmDC_GPU_TIMER_READ 0x482b
+#define mmDC_GPU_TIMER_READ_CNTL 0x482c
+#define mmDCIO_CLOCK_CNTL 0x482d
+#define mmDCIO_DEBUG 0x482f
+#define mmDCO_DCFE_EXT_VSYNC_CNTL 0x4830
+#define mmDBG_OUT_CNTL 0x4834
+#define mmDCIO_DEBUG_CONFIG 0x4835
+#define mmDCIO_SOFT_RESET 0x4836
+#define mmDCIO_DPHY_SEL 0x4837
+#define mmDCIO_DPCS_TX_INTERRUPT 0x484b
+#define mmDCIO_DPCS_RX_INTERRUPT 0x484c
+#define mmDCIO_SEMAPHORE0 0x484d
+#define mmDCIO_SEMAPHORE1 0x484e
+#define mmDCIO_SEMAPHORE2 0x484f
+#define mmDCIO_SEMAPHORE3 0x4850
+#define mmDCIO_SEMAPHORE4 0x4851
+#define mmDCIO_SEMAPHORE5 0x4852
+#define mmDCIO_SEMAPHORE6 0x4853
+#define mmDCIO_SEMAPHORE7 0x4854
+#define mmDCIO_TEST_DEBUG_INDEX 0x4831
+#define mmDCIO_TEST_DEBUG_DATA 0x4832
+#define ixDCIO_DEBUG1 0x1
+#define ixDCIO_DEBUG2 0x2
+#define ixDCIO_DEBUG3 0x3
+#define ixDCIO_DEBUG4 0x4
+#define ixDCIO_DEBUG5 0x5
+#define ixDCIO_DEBUG6 0x6
+#define ixDCIO_DEBUG7 0x7
+#define ixDCIO_DEBUG8 0x8
+#define ixDCIO_DEBUG9 0x9
+#define ixDCIO_DEBUGA 0xa
+#define ixDCIO_DEBUGB 0xb
+#define ixDCIO_DEBUGC 0xc
+#define ixDCIO_DEBUGD 0xd
+#define ixDCIO_DEBUGE 0xe
+#define ixDCIO_DEBUGF 0xf
+#define ixDCIO_DEBUG10 0x10
+#define ixDCIO_DEBUG11 0x11
+#define ixDCIO_DEBUG12 0x12
+#define ixDCIO_DEBUG13 0x13
+#define ixDCIO_DEBUG14 0x14
+#define ixDCIO_DEBUG15 0x15
+#define ixDCIO_DEBUG16 0x16
+#define ixDCIO_DEBUG17 0x17
+#define ixDCIO_DEBUG18 0x18
+#define ixDCIO_DEBUG19 0x19
+#define ixDCIO_DEBUG1A 0x1a
+#define ixDCIO_DEBUG1B 0x1b
+#define ixDCIO_DEBUG1C 0x1c
+#define ixDCIO_DEBUG1D 0x1d
+#define ixDCIO_DEBUG1E 0x1e
+#define ixDCIO_DEBUG1F 0x1f
+#define ixDCIO_DEBUG20 0x20
+#define ixDCIO_DEBUG21 0x21
+#define ixDCIO_DEBUG22 0x22
+#define ixDCIO_DEBUG23 0x23
+#define ixDCIO_DEBUG24 0x24
+#define ixDCIO_DEBUG25 0x25
+#define ixDCIO_DEBUG26 0x26
+#define ixDCIO_DEBUG27 0x27
+#define ixDCIO_DEBUG28 0x28
+#define ixDCIO_DEBUG_ID 0x0
+#define mmDC_GPIO_GENERIC_MASK 0x4860
+#define mmDC_GPIO_GENERIC_A 0x4861
+#define mmDC_GPIO_GENERIC_EN 0x4862
+#define mmDC_GPIO_GENERIC_Y 0x4863
+#define mmDC_GPIO_DDC1_MASK 0x4868
+#define mmDC_GPIO_DDC1_A 0x4869
+#define mmDC_GPIO_DDC1_EN 0x486a
+#define mmDC_GPIO_DDC1_Y 0x486b
+#define mmDC_GPIO_DDC2_MASK 0x486c
+#define mmDC_GPIO_DDC2_A 0x486d
+#define mmDC_GPIO_DDC2_EN 0x486e
+#define mmDC_GPIO_DDC2_Y 0x486f
+#define mmDC_GPIO_DDC3_MASK 0x4870
+#define mmDC_GPIO_DDC3_A 0x4871
+#define mmDC_GPIO_DDC3_EN 0x4872
+#define mmDC_GPIO_DDC3_Y 0x4873
+#define mmDC_GPIO_DDC4_MASK 0x4874
+#define mmDC_GPIO_DDC4_A 0x4875
+#define mmDC_GPIO_DDC4_EN 0x4876
+#define mmDC_GPIO_DDC4_Y 0x4877
+#define mmDC_GPIO_DDC5_MASK 0x4878
+#define mmDC_GPIO_DDC5_A 0x4879
+#define mmDC_GPIO_DDC5_EN 0x487a
+#define mmDC_GPIO_DDC5_Y 0x487b
+#define mmDC_GPIO_DDC6_MASK 0x487c
+#define mmDC_GPIO_DDC6_A 0x487d
+#define mmDC_GPIO_DDC6_EN 0x487e
+#define mmDC_GPIO_DDC6_Y 0x487f
+#define mmDC_GPIO_DDCVGA_MASK 0x4880
+#define mmDC_GPIO_DDCVGA_A 0x4881
+#define mmDC_GPIO_DDCVGA_EN 0x4882
+#define mmDC_GPIO_DDCVGA_Y 0x4883
+#define mmDC_GPIO_SYNCA_MASK 0x4884
+#define mmDC_GPIO_SYNCA_A 0x4885
+#define mmDC_GPIO_SYNCA_EN 0x4886
+#define mmDC_GPIO_SYNCA_Y 0x4887
+#define mmDC_GPIO_GENLK_MASK 0x4888
+#define mmDC_GPIO_GENLK_A 0x4889
+#define mmDC_GPIO_GENLK_EN 0x488a
+#define mmDC_GPIO_GENLK_Y 0x488b
+#define mmDC_GPIO_HPD_MASK 0x488c
+#define mmDC_GPIO_HPD_A 0x488d
+#define mmDC_GPIO_HPD_EN 0x488e
+#define mmDC_GPIO_HPD_Y 0x488f
+#define mmDC_GPIO_PWRSEQ_MASK 0x4890
+#define mmDC_GPIO_PWRSEQ_A 0x4891
+#define mmDC_GPIO_PWRSEQ_EN 0x4892
+#define mmDC_GPIO_PWRSEQ_Y 0x4893
+#define mmDC_GPIO_PAD_STRENGTH_1 0x4894
+#define mmDC_GPIO_PAD_STRENGTH_2 0x4895
+#define mmPHY_AUX_CNTL 0x4897
+#define mmDC_GPIO_I2CPAD_A 0x4899
+#define mmDC_GPIO_I2CPAD_EN 0x489a
+#define mmDC_GPIO_I2CPAD_Y 0x489b
+#define mmDC_GPIO_I2CPAD_STRENGTH 0x489c
+#define mmDVO_VREF_CONTROL 0x489e
+#define mmDVO_SKEW_ADJUST 0x489f
+#define mmDC_GPIO_RECEIVER_EN0 0x48a0
+#define mmDC_GPIO_RECEIVER_EN1 0x48a1
+#define mmDC_GPIO_I2S_SPDIF_MASK 0x48a8
+#define mmDC_GPIO_I2S_SPDIF_A 0x48a9
+#define mmDC_GPIO_I2S_SPDIF_EN 0x48aa
+#define mmDC_GPIO_I2S_SPDIF_Y 0x48ab
+#define mmDC_GPIO_I2S_SPDIF_STRENGTH 0x48ac
+#define mmDC_GPIO_TX12_EN 0x48ad
+#define mmDC_GPIO_AUX_CTRL_0 0x48ae
+#define mmDC_GPIO_AUX_CTRL_1 0x48af
+#define mmDC_GPIO_AUX_CTRL_2 0x48b0
+#define mmDC_GPIO_HPD_CTRL_0 0x48b1
+#define mmDC_GPIO_HPD_CTRL_1 0x48b2
+#define mmDAC_MACRO_CNTL_RESERVED0 0x48b8
+#define mmDAC_MACRO_CNTL_RESERVED1 0x48b9
+#define mmDAC_MACRO_CNTL_RESERVED2 0x48ba
+#define mmDAC_MACRO_CNTL_RESERVED3 0x48bb
+#define mmUNIPHY_MACRO_CNTL_RESERVED0 0x48c0
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED0 0x48c0
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED0 0x4960
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED0 0x9a00
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED0 0x9aa0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED0 0x9b40
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED0 0x9be0
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED0 0x9c80
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED0 0x9d20
+#define mmUNIPHY_MACRO_CNTL_RESERVED1 0x48c1
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED1 0x48c1
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED1 0x4961
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED1 0x9a01
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED1 0x9aa1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED1 0x9b41
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED1 0x9be1
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED1 0x9c81
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED1 0x9d21
+#define mmUNIPHY_MACRO_CNTL_RESERVED2 0x48c2
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED2 0x48c2
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED2 0x4962
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED2 0x9a02
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED2 0x9aa2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED2 0x9b42
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED2 0x9be2
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED2 0x9c82
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED2 0x9d22
+#define mmUNIPHY_MACRO_CNTL_RESERVED3 0x48c3
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED3 0x48c3
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED3 0x4963
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED3 0x9a03
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED3 0x9aa3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED3 0x9b43
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED3 0x9be3
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED3 0x9c83
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED3 0x9d23
+#define mmUNIPHY_MACRO_CNTL_RESERVED4 0x48c4
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED4 0x48c4
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED4 0x4964
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED4 0x9a04
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED4 0x9aa4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED4 0x9b44
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED4 0x9be4
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED4 0x9c84
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED4 0x9d24
+#define mmUNIPHY_MACRO_CNTL_RESERVED5 0x48c5
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED5 0x48c5
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED5 0x4965
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED5 0x9a05
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED5 0x9aa5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED5 0x9b45
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED5 0x9be5
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED5 0x9c85
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED5 0x9d25
+#define mmUNIPHY_MACRO_CNTL_RESERVED6 0x48c6
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED6 0x48c6
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED6 0x4966
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED6 0x9a06
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED6 0x9aa6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED6 0x9b46
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED6 0x9be6
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED6 0x9c86
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED6 0x9d26
+#define mmUNIPHY_MACRO_CNTL_RESERVED7 0x48c7
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED7 0x48c7
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED7 0x4967
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED7 0x9a07
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED7 0x9aa7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED7 0x9b47
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED7 0x9be7
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED7 0x9c87
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED7 0x9d27
+#define mmUNIPHY_MACRO_CNTL_RESERVED8 0x48c8
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED8 0x48c8
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED8 0x4968
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED8 0x9a08
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED8 0x9aa8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED8 0x9b48
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED8 0x9be8
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED8 0x9c88
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED8 0x9d28
+#define mmUNIPHY_MACRO_CNTL_RESERVED9 0x48c9
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED9 0x48c9
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED9 0x4969
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED9 0x9a09
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED9 0x9aa9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED9 0x9b49
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED9 0x9be9
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED9 0x9c89
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED9 0x9d29
+#define mmUNIPHY_MACRO_CNTL_RESERVED10 0x48ca
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED10 0x48ca
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED10 0x496a
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED10 0x9a0a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED10 0x9aaa
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED10 0x9b4a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED10 0x9bea
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED10 0x9c8a
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED10 0x9d2a
+#define mmUNIPHY_MACRO_CNTL_RESERVED11 0x48cb
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED11 0x48cb
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED11 0x496b
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED11 0x9a0b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED11 0x9aab
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED11 0x9b4b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED11 0x9beb
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED11 0x9c8b
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED11 0x9d2b
+#define mmUNIPHY_MACRO_CNTL_RESERVED12 0x48cc
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED12 0x48cc
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED12 0x496c
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED12 0x9a0c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED12 0x9aac
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED12 0x9b4c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED12 0x9bec
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED12 0x9c8c
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED12 0x9d2c
+#define mmUNIPHY_MACRO_CNTL_RESERVED13 0x48cd
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED13 0x48cd
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED13 0x496d
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED13 0x9a0d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED13 0x9aad
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED13 0x9b4d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED13 0x9bed
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED13 0x9c8d
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED13 0x9d2d
+#define mmUNIPHY_MACRO_CNTL_RESERVED14 0x48ce
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED14 0x48ce
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED14 0x496e
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED14 0x9a0e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED14 0x9aae
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED14 0x9b4e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED14 0x9bee
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED14 0x9c8e
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED14 0x9d2e
+#define mmUNIPHY_MACRO_CNTL_RESERVED15 0x48cf
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED15 0x48cf
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED15 0x496f
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED15 0x9a0f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED15 0x9aaf
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED15 0x9b4f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED15 0x9bef
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED15 0x9c8f
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED15 0x9d2f
+#define mmUNIPHY_MACRO_CNTL_RESERVED16 0x48d0
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED16 0x48d0
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED16 0x4970
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED16 0x9a10
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED16 0x9ab0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED16 0x9b50
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED16 0x9bf0
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED16 0x9c90
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED16 0x9d30
+#define mmUNIPHY_MACRO_CNTL_RESERVED17 0x48d1
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED17 0x48d1
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED17 0x4971
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED17 0x9a11
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED17 0x9ab1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED17 0x9b51
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED17 0x9bf1
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED17 0x9c91
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED17 0x9d31
+#define mmUNIPHY_MACRO_CNTL_RESERVED18 0x48d2
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED18 0x48d2
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED18 0x4972
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED18 0x9a12
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED18 0x9ab2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED18 0x9b52
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED18 0x9bf2
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED18 0x9c92
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED18 0x9d32
+#define mmUNIPHY_MACRO_CNTL_RESERVED19 0x48d3
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED19 0x48d3
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED19 0x4973
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED19 0x9a13
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED19 0x9ab3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED19 0x9b53
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED19 0x9bf3
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED19 0x9c93
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED19 0x9d33
+#define mmUNIPHY_MACRO_CNTL_RESERVED20 0x48d4
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED20 0x48d4
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED20 0x4974
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED20 0x9a14
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED20 0x9ab4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED20 0x9b54
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED20 0x9bf4
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED20 0x9c94
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED20 0x9d34
+#define mmUNIPHY_MACRO_CNTL_RESERVED21 0x48d5
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED21 0x48d5
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED21 0x4975
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED21 0x9a15
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED21 0x9ab5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED21 0x9b55
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED21 0x9bf5
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED21 0x9c95
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED21 0x9d35
+#define mmUNIPHY_MACRO_CNTL_RESERVED22 0x48d6
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED22 0x48d6
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED22 0x4976
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED22 0x9a16
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED22 0x9ab6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED22 0x9b56
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED22 0x9bf6
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED22 0x9c96
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED22 0x9d36
+#define mmUNIPHY_MACRO_CNTL_RESERVED23 0x48d7
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED23 0x48d7
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED23 0x4977
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED23 0x9a17
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED23 0x9ab7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED23 0x9b57
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED23 0x9bf7
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED23 0x9c97
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED23 0x9d37
+#define mmUNIPHY_MACRO_CNTL_RESERVED24 0x48d8
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED24 0x48d8
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED24 0x4978
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED24 0x9a18
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED24 0x9ab8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED24 0x9b58
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED24 0x9bf8
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED24 0x9c98
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED24 0x9d38
+#define mmUNIPHY_MACRO_CNTL_RESERVED25 0x48d9
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED25 0x48d9
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED25 0x4979
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED25 0x9a19
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED25 0x9ab9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED25 0x9b59
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED25 0x9bf9
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED25 0x9c99
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED25 0x9d39
+#define mmUNIPHY_MACRO_CNTL_RESERVED26 0x48da
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED26 0x48da
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED26 0x497a
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED26 0x9a1a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED26 0x9aba
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED26 0x9b5a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED26 0x9bfa
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED26 0x9c9a
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED26 0x9d3a
+#define mmUNIPHY_MACRO_CNTL_RESERVED27 0x48db
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED27 0x48db
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED27 0x497b
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED27 0x9a1b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED27 0x9abb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED27 0x9b5b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED27 0x9bfb
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED27 0x9c9b
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED27 0x9d3b
+#define mmUNIPHY_MACRO_CNTL_RESERVED28 0x48dc
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED28 0x48dc
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED28 0x497c
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED28 0x9a1c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED28 0x9abc
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED28 0x9b5c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED28 0x9bfc
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED28 0x9c9c
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED28 0x9d3c
+#define mmUNIPHY_MACRO_CNTL_RESERVED29 0x48dd
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED29 0x48dd
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED29 0x497d
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED29 0x9a1d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED29 0x9abd
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED29 0x9b5d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED29 0x9bfd
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED29 0x9c9d
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED29 0x9d3d
+#define mmUNIPHY_MACRO_CNTL_RESERVED30 0x48de
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED30 0x48de
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED30 0x497e
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED30 0x9a1e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED30 0x9abe
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED30 0x9b5e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED30 0x9bfe
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED30 0x9c9e
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED30 0x9d3e
+#define mmUNIPHY_MACRO_CNTL_RESERVED31 0x48df
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED31 0x48df
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED31 0x497f
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED31 0x9a1f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED31 0x9abf
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED31 0x9b5f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED31 0x9bff
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED31 0x9c9f
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED31 0x9d3f
+#define mmUNIPHY_MACRO_CNTL_RESERVED32 0x48e0
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED32 0x48e0
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED32 0x4980
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED32 0x9a20
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED32 0x9ac0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED32 0x9b60
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED32 0x9c00
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED32 0x9ca0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED32 0x9d40
+#define mmUNIPHY_MACRO_CNTL_RESERVED33 0x48e1
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED33 0x48e1
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED33 0x4981
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED33 0x9a21
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED33 0x9ac1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED33 0x9b61
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED33 0x9c01
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED33 0x9ca1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED33 0x9d41
+#define mmUNIPHY_MACRO_CNTL_RESERVED34 0x48e2
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED34 0x48e2
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED34 0x4982
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED34 0x9a22
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED34 0x9ac2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED34 0x9b62
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED34 0x9c02
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED34 0x9ca2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED34 0x9d42
+#define mmUNIPHY_MACRO_CNTL_RESERVED35 0x48e3
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED35 0x48e3
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED35 0x4983
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED35 0x9a23
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED35 0x9ac3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED35 0x9b63
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED35 0x9c03
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED35 0x9ca3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED35 0x9d43
+#define mmUNIPHY_MACRO_CNTL_RESERVED36 0x48e4
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED36 0x48e4
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED36 0x4984
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED36 0x9a24
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED36 0x9ac4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED36 0x9b64
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED36 0x9c04
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED36 0x9ca4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED36 0x9d44
+#define mmUNIPHY_MACRO_CNTL_RESERVED37 0x48e5
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED37 0x48e5
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED37 0x4985
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED37 0x9a25
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED37 0x9ac5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED37 0x9b65
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED37 0x9c05
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED37 0x9ca5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED37 0x9d45
+#define mmUNIPHY_MACRO_CNTL_RESERVED38 0x48e6
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED38 0x48e6
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED38 0x4986
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED38 0x9a26
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED38 0x9ac6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED38 0x9b66
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED38 0x9c06
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED38 0x9ca6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED38 0x9d46
+#define mmUNIPHY_MACRO_CNTL_RESERVED39 0x48e7
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED39 0x48e7
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED39 0x4987
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED39 0x9a27
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED39 0x9ac7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED39 0x9b67
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED39 0x9c07
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED39 0x9ca7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED39 0x9d47
+#define mmUNIPHY_MACRO_CNTL_RESERVED40 0x48e8
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED40 0x48e8
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED40 0x4988
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED40 0x9a28
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED40 0x9ac8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED40 0x9b68
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED40 0x9c08
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED40 0x9ca8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED40 0x9d48
+#define mmUNIPHY_MACRO_CNTL_RESERVED41 0x48e9
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED41 0x48e9
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED41 0x4989
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED41 0x9a29
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED41 0x9ac9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED41 0x9b69
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED41 0x9c09
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED41 0x9ca9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED41 0x9d49
+#define mmUNIPHY_MACRO_CNTL_RESERVED42 0x48ea
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED42 0x48ea
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED42 0x498a
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED42 0x9a2a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED42 0x9aca
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED42 0x9b6a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED42 0x9c0a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED42 0x9caa
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED42 0x9d4a
+#define mmUNIPHY_MACRO_CNTL_RESERVED43 0x48eb
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED43 0x48eb
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED43 0x498b
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED43 0x9a2b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED43 0x9acb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED43 0x9b6b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED43 0x9c0b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED43 0x9cab
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED43 0x9d4b
+#define mmUNIPHY_MACRO_CNTL_RESERVED44 0x48ec
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED44 0x48ec
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED44 0x498c
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED44 0x9a2c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED44 0x9acc
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED44 0x9b6c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED44 0x9c0c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED44 0x9cac
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED44 0x9d4c
+#define mmUNIPHY_MACRO_CNTL_RESERVED45 0x48ed
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED45 0x48ed
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED45 0x498d
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED45 0x9a2d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED45 0x9acd
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED45 0x9b6d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED45 0x9c0d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED45 0x9cad
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED45 0x9d4d
+#define mmUNIPHY_MACRO_CNTL_RESERVED46 0x48ee
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED46 0x48ee
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED46 0x498e
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED46 0x9a2e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED46 0x9ace
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED46 0x9b6e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED46 0x9c0e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED46 0x9cae
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED46 0x9d4e
+#define mmUNIPHY_MACRO_CNTL_RESERVED47 0x48ef
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED47 0x48ef
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED47 0x498f
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED47 0x9a2f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED47 0x9acf
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED47 0x9b6f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED47 0x9c0f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED47 0x9caf
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED47 0x9d4f
+#define mmUNIPHY_MACRO_CNTL_RESERVED48 0x48f0
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED48 0x48f0
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED48 0x4990
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED48 0x9a30
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED48 0x9ad0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED48 0x9b70
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED48 0x9c10
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED48 0x9cb0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED48 0x9d50
+#define mmUNIPHY_MACRO_CNTL_RESERVED49 0x48f1
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED49 0x48f1
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED49 0x4991
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED49 0x9a31
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED49 0x9ad1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED49 0x9b71
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED49 0x9c11
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED49 0x9cb1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED49 0x9d51
+#define mmUNIPHY_MACRO_CNTL_RESERVED50 0x48f2
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED50 0x48f2
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED50 0x4992
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED50 0x9a32
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED50 0x9ad2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED50 0x9b72
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED50 0x9c12
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED50 0x9cb2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED50 0x9d52
+#define mmUNIPHY_MACRO_CNTL_RESERVED51 0x48f3
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED51 0x48f3
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED51 0x4993
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED51 0x9a33
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED51 0x9ad3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED51 0x9b73
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED51 0x9c13
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED51 0x9cb3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED51 0x9d53
+#define mmUNIPHY_MACRO_CNTL_RESERVED52 0x48f4
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED52 0x48f4
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED52 0x4994
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED52 0x9a34
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED52 0x9ad4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED52 0x9b74
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED52 0x9c14
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED52 0x9cb4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED52 0x9d54
+#define mmUNIPHY_MACRO_CNTL_RESERVED53 0x48f5
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED53 0x48f5
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED53 0x4995
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED53 0x9a35
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED53 0x9ad5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED53 0x9b75
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED53 0x9c15
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED53 0x9cb5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED53 0x9d55
+#define mmUNIPHY_MACRO_CNTL_RESERVED54 0x48f6
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED54 0x48f6
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED54 0x4996
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED54 0x9a36
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED54 0x9ad6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED54 0x9b76
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED54 0x9c16
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED54 0x9cb6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED54 0x9d56
+#define mmUNIPHY_MACRO_CNTL_RESERVED55 0x48f7
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED55 0x48f7
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED55 0x4997
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED55 0x9a37
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED55 0x9ad7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED55 0x9b77
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED55 0x9c17
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED55 0x9cb7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED55 0x9d57
+#define mmUNIPHY_MACRO_CNTL_RESERVED56 0x48f8
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED56 0x48f8
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED56 0x4998
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED56 0x9a38
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED56 0x9ad8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED56 0x9b78
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED56 0x9c18
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED56 0x9cb8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED56 0x9d58
+#define mmUNIPHY_MACRO_CNTL_RESERVED57 0x48f9
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED57 0x48f9
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED57 0x4999
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED57 0x9a39
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED57 0x9ad9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED57 0x9b79
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED57 0x9c19
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED57 0x9cb9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED57 0x9d59
+#define mmUNIPHY_MACRO_CNTL_RESERVED58 0x48fa
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED58 0x48fa
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED58 0x499a
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED58 0x9a3a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED58 0x9ada
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED58 0x9b7a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED58 0x9c1a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED58 0x9cba
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED58 0x9d5a
+#define mmUNIPHY_MACRO_CNTL_RESERVED59 0x48fb
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED59 0x48fb
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED59 0x499b
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED59 0x9a3b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED59 0x9adb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED59 0x9b7b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED59 0x9c1b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED59 0x9cbb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED59 0x9d5b
+#define mmUNIPHY_MACRO_CNTL_RESERVED60 0x48fc
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED60 0x48fc
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED60 0x499c
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED60 0x9a3c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED60 0x9adc
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED60 0x9b7c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED60 0x9c1c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED60 0x9cbc
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED60 0x9d5c
+#define mmUNIPHY_MACRO_CNTL_RESERVED61 0x48fd
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED61 0x48fd
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED61 0x499d
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED61 0x9a3d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED61 0x9add
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED61 0x9b7d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED61 0x9c1d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED61 0x9cbd
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED61 0x9d5d
+#define mmUNIPHY_MACRO_CNTL_RESERVED62 0x48fe
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED62 0x48fe
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED62 0x499e
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED62 0x9a3e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED62 0x9ade
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED62 0x9b7e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED62 0x9c1e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED62 0x9cbe
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED62 0x9d5e
+#define mmUNIPHY_MACRO_CNTL_RESERVED63 0x48ff
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED63 0x48ff
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED63 0x499f
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED63 0x9a3f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED63 0x9adf
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED63 0x9b7f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED63 0x9c1f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED63 0x9cbf
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED63 0x9d5f
+#define mmUNIPHY_MACRO_CNTL_RESERVED64 0x4900
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED64 0x4900
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED64 0x49a0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED64 0x9a40
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED64 0x9ae0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED64 0x9b80
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED64 0x9c20
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED64 0x9cc0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED64 0x9d60
+#define mmUNIPHY_MACRO_CNTL_RESERVED65 0x4901
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED65 0x4901
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED65 0x49a1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED65 0x9a41
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED65 0x9ae1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED65 0x9b81
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED65 0x9c21
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED65 0x9cc1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED65 0x9d61
+#define mmUNIPHY_MACRO_CNTL_RESERVED66 0x4902
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED66 0x4902
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED66 0x49a2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED66 0x9a42
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED66 0x9ae2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED66 0x9b82
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED66 0x9c22
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED66 0x9cc2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED66 0x9d62
+#define mmUNIPHY_MACRO_CNTL_RESERVED67 0x4903
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED67 0x4903
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED67 0x49a3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED67 0x9a43
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED67 0x9ae3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED67 0x9b83
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED67 0x9c23
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED67 0x9cc3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED67 0x9d63
+#define mmUNIPHY_MACRO_CNTL_RESERVED68 0x4904
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED68 0x4904
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED68 0x49a4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED68 0x9a44
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED68 0x9ae4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED68 0x9b84
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED68 0x9c24
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED68 0x9cc4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED68 0x9d64
+#define mmUNIPHY_MACRO_CNTL_RESERVED69 0x4905
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED69 0x4905
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED69 0x49a5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED69 0x9a45
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED69 0x9ae5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED69 0x9b85
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED69 0x9c25
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED69 0x9cc5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED69 0x9d65
+#define mmUNIPHY_MACRO_CNTL_RESERVED70 0x4906
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED70 0x4906
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED70 0x49a6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED70 0x9a46
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED70 0x9ae6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED70 0x9b86
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED70 0x9c26
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED70 0x9cc6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED70 0x9d66
+#define mmUNIPHY_MACRO_CNTL_RESERVED71 0x4907
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED71 0x4907
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED71 0x49a7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED71 0x9a47
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED71 0x9ae7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED71 0x9b87
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED71 0x9c27
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED71 0x9cc7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED71 0x9d67
+#define mmUNIPHY_MACRO_CNTL_RESERVED72 0x4908
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED72 0x4908
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED72 0x49a8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED72 0x9a48
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED72 0x9ae8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED72 0x9b88
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED72 0x9c28
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED72 0x9cc8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED72 0x9d68
+#define mmUNIPHY_MACRO_CNTL_RESERVED73 0x4909
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED73 0x4909
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED73 0x49a9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED73 0x9a49
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED73 0x9ae9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED73 0x9b89
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED73 0x9c29
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED73 0x9cc9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED73 0x9d69
+#define mmUNIPHY_MACRO_CNTL_RESERVED74 0x490a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED74 0x490a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED74 0x49aa
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED74 0x9a4a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED74 0x9aea
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED74 0x9b8a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED74 0x9c2a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED74 0x9cca
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED74 0x9d6a
+#define mmUNIPHY_MACRO_CNTL_RESERVED75 0x490b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED75 0x490b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED75 0x49ab
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED75 0x9a4b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED75 0x9aeb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED75 0x9b8b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED75 0x9c2b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED75 0x9ccb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED75 0x9d6b
+#define mmUNIPHY_MACRO_CNTL_RESERVED76 0x490c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED76 0x490c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED76 0x49ac
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED76 0x9a4c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED76 0x9aec
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED76 0x9b8c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED76 0x9c2c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED76 0x9ccc
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED76 0x9d6c
+#define mmUNIPHY_MACRO_CNTL_RESERVED77 0x490d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED77 0x490d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED77 0x49ad
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED77 0x9a4d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED77 0x9aed
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED77 0x9b8d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED77 0x9c2d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED77 0x9ccd
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED77 0x9d6d
+#define mmUNIPHY_MACRO_CNTL_RESERVED78 0x490e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED78 0x490e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED78 0x49ae
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED78 0x9a4e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED78 0x9aee
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED78 0x9b8e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED78 0x9c2e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED78 0x9cce
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED78 0x9d6e
+#define mmUNIPHY_MACRO_CNTL_RESERVED79 0x490f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED79 0x490f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED79 0x49af
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED79 0x9a4f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED79 0x9aef
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED79 0x9b8f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED79 0x9c2f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED79 0x9ccf
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED79 0x9d6f
+#define mmUNIPHY_MACRO_CNTL_RESERVED80 0x4910
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED80 0x4910
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED80 0x49b0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED80 0x9a50
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED80 0x9af0
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED80 0x9b90
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED80 0x9c30
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED80 0x9cd0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED80 0x9d70
+#define mmUNIPHY_MACRO_CNTL_RESERVED81 0x4911
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED81 0x4911
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED81 0x49b1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED81 0x9a51
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED81 0x9af1
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED81 0x9b91
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED81 0x9c31
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED81 0x9cd1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED81 0x9d71
+#define mmUNIPHY_MACRO_CNTL_RESERVED82 0x4912
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED82 0x4912
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED82 0x49b2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED82 0x9a52
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED82 0x9af2
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED82 0x9b92
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED82 0x9c32
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED82 0x9cd2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED82 0x9d72
+#define mmUNIPHY_MACRO_CNTL_RESERVED83 0x4913
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED83 0x4913
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED83 0x49b3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED83 0x9a53
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED83 0x9af3
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED83 0x9b93
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED83 0x9c33
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED83 0x9cd3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED83 0x9d73
+#define mmUNIPHY_MACRO_CNTL_RESERVED84 0x4914
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED84 0x4914
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED84 0x49b4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED84 0x9a54
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED84 0x9af4
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED84 0x9b94
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED84 0x9c34
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED84 0x9cd4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED84 0x9d74
+#define mmUNIPHY_MACRO_CNTL_RESERVED85 0x4915
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED85 0x4915
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED85 0x49b5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED85 0x9a55
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED85 0x9af5
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED85 0x9b95
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED85 0x9c35
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED85 0x9cd5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED85 0x9d75
+#define mmUNIPHY_MACRO_CNTL_RESERVED86 0x4916
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED86 0x4916
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED86 0x49b6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED86 0x9a56
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED86 0x9af6
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED86 0x9b96
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED86 0x9c36
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED86 0x9cd6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED86 0x9d76
+#define mmUNIPHY_MACRO_CNTL_RESERVED87 0x4917
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED87 0x4917
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED87 0x49b7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED87 0x9a57
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED87 0x9af7
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED87 0x9b97
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED87 0x9c37
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED87 0x9cd7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED87 0x9d77
+#define mmUNIPHY_MACRO_CNTL_RESERVED88 0x4918
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED88 0x4918
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED88 0x49b8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED88 0x9a58
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED88 0x9af8
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED88 0x9b98
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED88 0x9c38
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED88 0x9cd8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED88 0x9d78
+#define mmUNIPHY_MACRO_CNTL_RESERVED89 0x4919
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED89 0x4919
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED89 0x49b9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED89 0x9a59
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED89 0x9af9
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED89 0x9b99
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED89 0x9c39
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED89 0x9cd9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED89 0x9d79
+#define mmUNIPHY_MACRO_CNTL_RESERVED90 0x491a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED90 0x491a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED90 0x49ba
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED90 0x9a5a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED90 0x9afa
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED90 0x9b9a
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED90 0x9c3a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED90 0x9cda
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED90 0x9d7a
+#define mmUNIPHY_MACRO_CNTL_RESERVED91 0x491b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED91 0x491b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED91 0x49bb
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED91 0x9a5b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED91 0x9afb
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED91 0x9b9b
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED91 0x9c3b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED91 0x9cdb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED91 0x9d7b
+#define mmUNIPHY_MACRO_CNTL_RESERVED92 0x491c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED92 0x491c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED92 0x49bc
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED92 0x9a5c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED92 0x9afc
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED92 0x9b9c
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED92 0x9c3c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED92 0x9cdc
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED92 0x9d7c
+#define mmUNIPHY_MACRO_CNTL_RESERVED93 0x491d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED93 0x491d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED93 0x49bd
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED93 0x9a5d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED93 0x9afd
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED93 0x9b9d
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED93 0x9c3d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED93 0x9cdd
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED93 0x9d7d
+#define mmUNIPHY_MACRO_CNTL_RESERVED94 0x491e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED94 0x491e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED94 0x49be
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED94 0x9a5e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED94 0x9afe
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED94 0x9b9e
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED94 0x9c3e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED94 0x9cde
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED94 0x9d7e
+#define mmUNIPHY_MACRO_CNTL_RESERVED95 0x491f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED95 0x491f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED95 0x49bf
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED95 0x9a5f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED95 0x9aff
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED95 0x9b9f
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED95 0x9c3f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED95 0x9cdf
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED95 0x9d7f
+#define mmUNIPHY_MACRO_CNTL_RESERVED96 0x4920
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED96 0x4920
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED96 0x49c0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED96 0x9a60
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED96 0x9b00
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED96 0x9ba0
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED96 0x9c40
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED96 0x9ce0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED96 0x9d80
+#define mmUNIPHY_MACRO_CNTL_RESERVED97 0x4921
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED97 0x4921
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED97 0x49c1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED97 0x9a61
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED97 0x9b01
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED97 0x9ba1
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED97 0x9c41
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED97 0x9ce1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED97 0x9d81
+#define mmUNIPHY_MACRO_CNTL_RESERVED98 0x4922
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED98 0x4922
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED98 0x49c2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED98 0x9a62
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED98 0x9b02
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED98 0x9ba2
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED98 0x9c42
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED98 0x9ce2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED98 0x9d82
+#define mmUNIPHY_MACRO_CNTL_RESERVED99 0x4923
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED99 0x4923
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED99 0x49c3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED99 0x9a63
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED99 0x9b03
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED99 0x9ba3
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED99 0x9c43
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED99 0x9ce3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED99 0x9d83
+#define mmUNIPHY_MACRO_CNTL_RESERVED100 0x4924
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED100 0x4924
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED100 0x49c4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED100 0x9a64
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED100 0x9b04
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED100 0x9ba4
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED100 0x9c44
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED100 0x9ce4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED100 0x9d84
+#define mmUNIPHY_MACRO_CNTL_RESERVED101 0x4925
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED101 0x4925
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED101 0x49c5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED101 0x9a65
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED101 0x9b05
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED101 0x9ba5
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED101 0x9c45
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED101 0x9ce5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED101 0x9d85
+#define mmUNIPHY_MACRO_CNTL_RESERVED102 0x4926
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED102 0x4926
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED102 0x49c6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED102 0x9a66
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED102 0x9b06
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED102 0x9ba6
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED102 0x9c46
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED102 0x9ce6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED102 0x9d86
+#define mmUNIPHY_MACRO_CNTL_RESERVED103 0x4927
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED103 0x4927
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED103 0x49c7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED103 0x9a67
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED103 0x9b07
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED103 0x9ba7
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED103 0x9c47
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED103 0x9ce7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED103 0x9d87
+#define mmUNIPHY_MACRO_CNTL_RESERVED104 0x4928
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED104 0x4928
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED104 0x49c8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED104 0x9a68
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED104 0x9b08
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED104 0x9ba8
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED104 0x9c48
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED104 0x9ce8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED104 0x9d88
+#define mmUNIPHY_MACRO_CNTL_RESERVED105 0x4929
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED105 0x4929
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED105 0x49c9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED105 0x9a69
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED105 0x9b09
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED105 0x9ba9
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED105 0x9c49
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED105 0x9ce9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED105 0x9d89
+#define mmUNIPHY_MACRO_CNTL_RESERVED106 0x492a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED106 0x492a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED106 0x49ca
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED106 0x9a6a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED106 0x9b0a
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED106 0x9baa
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED106 0x9c4a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED106 0x9cea
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED106 0x9d8a
+#define mmUNIPHY_MACRO_CNTL_RESERVED107 0x492b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED107 0x492b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED107 0x49cb
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED107 0x9a6b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED107 0x9b0b
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED107 0x9bab
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED107 0x9c4b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED107 0x9ceb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED107 0x9d8b
+#define mmUNIPHY_MACRO_CNTL_RESERVED108 0x492c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED108 0x492c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED108 0x49cc
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED108 0x9a6c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED108 0x9b0c
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED108 0x9bac
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED108 0x9c4c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED108 0x9cec
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED108 0x9d8c
+#define mmUNIPHY_MACRO_CNTL_RESERVED109 0x492d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED109 0x492d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED109 0x49cd
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED109 0x9a6d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED109 0x9b0d
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED109 0x9bad
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED109 0x9c4d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED109 0x9ced
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED109 0x9d8d
+#define mmUNIPHY_MACRO_CNTL_RESERVED110 0x492e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED110 0x492e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED110 0x49ce
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED110 0x9a6e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED110 0x9b0e
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED110 0x9bae
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED110 0x9c4e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED110 0x9cee
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED110 0x9d8e
+#define mmUNIPHY_MACRO_CNTL_RESERVED111 0x492f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED111 0x492f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED111 0x49cf
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED111 0x9a6f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED111 0x9b0f
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED111 0x9baf
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED111 0x9c4f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED111 0x9cef
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED111 0x9d8f
+#define mmUNIPHY_MACRO_CNTL_RESERVED112 0x4930
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED112 0x4930
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED112 0x49d0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED112 0x9a70
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED112 0x9b10
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED112 0x9bb0
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED112 0x9c50
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED112 0x9cf0
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED112 0x9d90
+#define mmUNIPHY_MACRO_CNTL_RESERVED113 0x4931
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED113 0x4931
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED113 0x49d1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED113 0x9a71
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED113 0x9b11
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED113 0x9bb1
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED113 0x9c51
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED113 0x9cf1
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED113 0x9d91
+#define mmUNIPHY_MACRO_CNTL_RESERVED114 0x4932
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED114 0x4932
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED114 0x49d2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED114 0x9a72
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED114 0x9b12
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED114 0x9bb2
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED114 0x9c52
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED114 0x9cf2
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED114 0x9d92
+#define mmUNIPHY_MACRO_CNTL_RESERVED115 0x4933
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED115 0x4933
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED115 0x49d3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED115 0x9a73
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED115 0x9b13
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED115 0x9bb3
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED115 0x9c53
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED115 0x9cf3
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED115 0x9d93
+#define mmUNIPHY_MACRO_CNTL_RESERVED116 0x4934
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED116 0x4934
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED116 0x49d4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED116 0x9a74
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED116 0x9b14
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED116 0x9bb4
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED116 0x9c54
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED116 0x9cf4
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED116 0x9d94
+#define mmUNIPHY_MACRO_CNTL_RESERVED117 0x4935
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED117 0x4935
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED117 0x49d5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED117 0x9a75
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED117 0x9b15
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED117 0x9bb5
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED117 0x9c55
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED117 0x9cf5
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED117 0x9d95
+#define mmUNIPHY_MACRO_CNTL_RESERVED118 0x4936
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED118 0x4936
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED118 0x49d6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED118 0x9a76
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED118 0x9b16
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED118 0x9bb6
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED118 0x9c56
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED118 0x9cf6
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED118 0x9d96
+#define mmUNIPHY_MACRO_CNTL_RESERVED119 0x4937
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED119 0x4937
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED119 0x49d7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED119 0x9a77
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED119 0x9b17
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED119 0x9bb7
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED119 0x9c57
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED119 0x9cf7
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED119 0x9d97
+#define mmUNIPHY_MACRO_CNTL_RESERVED120 0x4938
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED120 0x4938
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED120 0x49d8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED120 0x9a78
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED120 0x9b18
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED120 0x9bb8
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED120 0x9c58
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED120 0x9cf8
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED120 0x9d98
+#define mmUNIPHY_MACRO_CNTL_RESERVED121 0x4939
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED121 0x4939
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED121 0x49d9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED121 0x9a79
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED121 0x9b19
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED121 0x9bb9
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED121 0x9c59
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED121 0x9cf9
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED121 0x9d99
+#define mmUNIPHY_MACRO_CNTL_RESERVED122 0x493a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED122 0x493a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED122 0x49da
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED122 0x9a7a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED122 0x9b1a
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED122 0x9bba
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED122 0x9c5a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED122 0x9cfa
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED122 0x9d9a
+#define mmUNIPHY_MACRO_CNTL_RESERVED123 0x493b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED123 0x493b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED123 0x49db
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED123 0x9a7b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED123 0x9b1b
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED123 0x9bbb
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED123 0x9c5b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED123 0x9cfb
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED123 0x9d9b
+#define mmUNIPHY_MACRO_CNTL_RESERVED124 0x493c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED124 0x493c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED124 0x49dc
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED124 0x9a7c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED124 0x9b1c
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED124 0x9bbc
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED124 0x9c5c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED124 0x9cfc
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED124 0x9d9c
+#define mmUNIPHY_MACRO_CNTL_RESERVED125 0x493d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED125 0x493d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED125 0x49dd
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED125 0x9a7d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED125 0x9b1d
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED125 0x9bbd
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED125 0x9c5d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED125 0x9cfd
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED125 0x9d9d
+#define mmUNIPHY_MACRO_CNTL_RESERVED126 0x493e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED126 0x493e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED126 0x49de
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED126 0x9a7e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED126 0x9b1e
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED126 0x9bbe
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED126 0x9c5e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED126 0x9cfe
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED126 0x9d9e
+#define mmUNIPHY_MACRO_CNTL_RESERVED127 0x493f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED127 0x493f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED127 0x49df
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED127 0x9a7f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED127 0x9b1f
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED127 0x9bbf
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED127 0x9c5f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED127 0x9cff
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED127 0x9d9f
+#define mmUNIPHY_MACRO_CNTL_RESERVED128 0x4940
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED128 0x4940
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED128 0x49e0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED128 0x9a80
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED128 0x9b20
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED128 0x9bc0
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED128 0x9c60
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED128 0x9d00
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED128 0x9da0
+#define mmUNIPHY_MACRO_CNTL_RESERVED129 0x4941
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED129 0x4941
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED129 0x49e1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED129 0x9a81
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED129 0x9b21
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED129 0x9bc1
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED129 0x9c61
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED129 0x9d01
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED129 0x9da1
+#define mmUNIPHY_MACRO_CNTL_RESERVED130 0x4942
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED130 0x4942
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED130 0x49e2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED130 0x9a82
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED130 0x9b22
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED130 0x9bc2
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED130 0x9c62
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED130 0x9d02
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED130 0x9da2
+#define mmUNIPHY_MACRO_CNTL_RESERVED131 0x4943
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED131 0x4943
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED131 0x49e3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED131 0x9a83
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED131 0x9b23
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED131 0x9bc3
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED131 0x9c63
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED131 0x9d03
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED131 0x9da3
+#define mmUNIPHY_MACRO_CNTL_RESERVED132 0x4944
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED132 0x4944
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED132 0x49e4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED132 0x9a84
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED132 0x9b24
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED132 0x9bc4
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED132 0x9c64
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED132 0x9d04
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED132 0x9da4
+#define mmUNIPHY_MACRO_CNTL_RESERVED133 0x4945
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED133 0x4945
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED133 0x49e5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED133 0x9a85
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED133 0x9b25
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED133 0x9bc5
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED133 0x9c65
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED133 0x9d05
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED133 0x9da5
+#define mmUNIPHY_MACRO_CNTL_RESERVED134 0x4946
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED134 0x4946
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED134 0x49e6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED134 0x9a86
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED134 0x9b26
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED134 0x9bc6
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED134 0x9c66
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED134 0x9d06
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED134 0x9da6
+#define mmUNIPHY_MACRO_CNTL_RESERVED135 0x4947
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED135 0x4947
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED135 0x49e7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED135 0x9a87
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED135 0x9b27
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED135 0x9bc7
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED135 0x9c67
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED135 0x9d07
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED135 0x9da7
+#define mmUNIPHY_MACRO_CNTL_RESERVED136 0x4948
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED136 0x4948
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED136 0x49e8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED136 0x9a88
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED136 0x9b28
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED136 0x9bc8
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED136 0x9c68
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED136 0x9d08
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED136 0x9da8
+#define mmUNIPHY_MACRO_CNTL_RESERVED137 0x4949
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED137 0x4949
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED137 0x49e9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED137 0x9a89
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED137 0x9b29
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED137 0x9bc9
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED137 0x9c69
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED137 0x9d09
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED137 0x9da9
+#define mmUNIPHY_MACRO_CNTL_RESERVED138 0x494a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED138 0x494a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED138 0x49ea
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED138 0x9a8a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED138 0x9b2a
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED138 0x9bca
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED138 0x9c6a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED138 0x9d0a
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED138 0x9daa
+#define mmUNIPHY_MACRO_CNTL_RESERVED139 0x494b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED139 0x494b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED139 0x49eb
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED139 0x9a8b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED139 0x9b2b
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED139 0x9bcb
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED139 0x9c6b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED139 0x9d0b
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED139 0x9dab
+#define mmUNIPHY_MACRO_CNTL_RESERVED140 0x494c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED140 0x494c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED140 0x49ec
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED140 0x9a8c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED140 0x9b2c
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED140 0x9bcc
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED140 0x9c6c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED140 0x9d0c
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED140 0x9dac
+#define mmUNIPHY_MACRO_CNTL_RESERVED141 0x494d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED141 0x494d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED141 0x49ed
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED141 0x9a8d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED141 0x9b2d
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED141 0x9bcd
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED141 0x9c6d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED141 0x9d0d
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED141 0x9dad
+#define mmUNIPHY_MACRO_CNTL_RESERVED142 0x494e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED142 0x494e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED142 0x49ee
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED142 0x9a8e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED142 0x9b2e
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED142 0x9bce
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED142 0x9c6e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED142 0x9d0e
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED142 0x9dae
+#define mmUNIPHY_MACRO_CNTL_RESERVED143 0x494f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED143 0x494f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED143 0x49ef
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED143 0x9a8f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED143 0x9b2f
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED143 0x9bcf
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED143 0x9c6f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED143 0x9d0f
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED143 0x9daf
+#define mmUNIPHY_MACRO_CNTL_RESERVED144 0x4950
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED144 0x4950
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED144 0x49f0
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED144 0x9a90
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED144 0x9b30
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED144 0x9bd0
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED144 0x9c70
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED144 0x9d10
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED144 0x9db0
+#define mmUNIPHY_MACRO_CNTL_RESERVED145 0x4951
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED145 0x4951
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED145 0x49f1
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED145 0x9a91
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED145 0x9b31
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED145 0x9bd1
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED145 0x9c71
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED145 0x9d11
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED145 0x9db1
+#define mmUNIPHY_MACRO_CNTL_RESERVED146 0x4952
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED146 0x4952
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED146 0x49f2
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED146 0x9a92
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED146 0x9b32
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED146 0x9bd2
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED146 0x9c72
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED146 0x9d12
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED146 0x9db2
+#define mmUNIPHY_MACRO_CNTL_RESERVED147 0x4953
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED147 0x4953
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED147 0x49f3
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED147 0x9a93
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED147 0x9b33
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED147 0x9bd3
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED147 0x9c73
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED147 0x9d13
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED147 0x9db3
+#define mmUNIPHY_MACRO_CNTL_RESERVED148 0x4954
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED148 0x4954
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED148 0x49f4
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED148 0x9a94
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED148 0x9b34
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED148 0x9bd4
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED148 0x9c74
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED148 0x9d14
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED148 0x9db4
+#define mmUNIPHY_MACRO_CNTL_RESERVED149 0x4955
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED149 0x4955
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED149 0x49f5
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED149 0x9a95
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED149 0x9b35
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED149 0x9bd5
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED149 0x9c75
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED149 0x9d15
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED149 0x9db5
+#define mmUNIPHY_MACRO_CNTL_RESERVED150 0x4956
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED150 0x4956
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED150 0x49f6
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED150 0x9a96
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED150 0x9b36
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED150 0x9bd6
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED150 0x9c76
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED150 0x9d16
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED150 0x9db6
+#define mmUNIPHY_MACRO_CNTL_RESERVED151 0x4957
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED151 0x4957
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED151 0x49f7
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED151 0x9a97
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED151 0x9b37
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED151 0x9bd7
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED151 0x9c77
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED151 0x9d17
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED151 0x9db7
+#define mmUNIPHY_MACRO_CNTL_RESERVED152 0x4958
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED152 0x4958
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED152 0x49f8
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED152 0x9a98
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED152 0x9b38
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED152 0x9bd8
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED152 0x9c78
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED152 0x9d18
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED152 0x9db8
+#define mmUNIPHY_MACRO_CNTL_RESERVED153 0x4959
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED153 0x4959
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED153 0x49f9
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED153 0x9a99
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED153 0x9b39
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED153 0x9bd9
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED153 0x9c79
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED153 0x9d19
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED153 0x9db9
+#define mmUNIPHY_MACRO_CNTL_RESERVED154 0x495a
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED154 0x495a
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED154 0x49fa
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED154 0x9a9a
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED154 0x9b3a
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED154 0x9bda
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED154 0x9c7a
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED154 0x9d1a
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED154 0x9dba
+#define mmUNIPHY_MACRO_CNTL_RESERVED155 0x495b
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED155 0x495b
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED155 0x49fb
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED155 0x9a9b
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED155 0x9b3b
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED155 0x9bdb
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED155 0x9c7b
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED155 0x9d1b
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED155 0x9dbb
+#define mmUNIPHY_MACRO_CNTL_RESERVED156 0x495c
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED156 0x495c
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED156 0x49fc
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED156 0x9a9c
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED156 0x9b3c
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED156 0x9bdc
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED156 0x9c7c
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED156 0x9d1c
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED156 0x9dbc
+#define mmUNIPHY_MACRO_CNTL_RESERVED157 0x495d
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED157 0x495d
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED157 0x49fd
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED157 0x9a9d
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED157 0x9b3d
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED157 0x9bdd
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED157 0x9c7d
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED157 0x9d1d
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED157 0x9dbd
+#define mmUNIPHY_MACRO_CNTL_RESERVED158 0x495e
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED158 0x495e
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED158 0x49fe
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED158 0x9a9e
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED158 0x9b3e
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED158 0x9bde
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED158 0x9c7e
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED158 0x9d1e
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED158 0x9dbe
+#define mmUNIPHY_MACRO_CNTL_RESERVED159 0x495f
+#define mmDCIO_UNIPHY0_UNIPHY_MACRO_CNTL_RESERVED159 0x495f
+#define mmDCIO_UNIPHY1_UNIPHY_MACRO_CNTL_RESERVED159 0x49ff
+#define mmDCIO_UNIPHY2_UNIPHY_MACRO_CNTL_RESERVED159 0x9a9f
+#define mmDCIO_UNIPHY3_UNIPHY_MACRO_CNTL_RESERVED159 0x9b3f
+#define mmDCIO_UNIPHY4_UNIPHY_MACRO_CNTL_RESERVED159 0x9bdf
+#define mmDCIO_UNIPHY5_UNIPHY_MACRO_CNTL_RESERVED159 0x9c7f
+#define mmDCIO_UNIPHY6_UNIPHY_MACRO_CNTL_RESERVED159 0x9d1f
+#define mmDCIO_UNIPHY7_UNIPHY_MACRO_CNTL_RESERVED159 0x9dbf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED0 0x5a84
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED1 0x5a85
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED2 0x5a86
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED3 0x5a87
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED4 0x5a88
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED5 0x5a89
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED6 0x5a8a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED7 0x5a8b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED8 0x5a8c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED9 0x5a8d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED10 0x5a8e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED11 0x5a8f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED12 0x5a90
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED13 0x5a91
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED14 0x5a92
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED15 0x5a93
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED16 0x5a94
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED17 0x5a95
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED18 0x5a96
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED19 0x5a97
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED20 0x5a98
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED21 0x5a99
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED22 0x5a9a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED23 0x5a9b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED24 0x5a9c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED25 0x5a9d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED26 0x5a9e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED27 0x5a9f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED28 0x5aa0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED29 0x5aa1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED30 0x5aa2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED31 0x5aa3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED32 0x5aa4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED33 0x5aa5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED34 0x5aa6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED35 0x5aa7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED36 0x5aa8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED37 0x5aa9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED38 0x5aaa
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED39 0x5aab
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED40 0x5aac
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED41 0x5aad
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED42 0x5aae
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED43 0x5aaf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED44 0x5ab0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED45 0x5ab1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED46 0x5ab2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED47 0x5ab3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED48 0x5ab4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED49 0x5ab5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED50 0x5ab6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED51 0x5ab7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED52 0x5ab8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED53 0x5ab9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED54 0x5aba
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED55 0x5abb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED56 0x5abc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED57 0x5abd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED58 0x5abe
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED59 0x5abf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED60 0x5ac0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED61 0x5ac1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED62 0x5ac2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED63 0x5ac3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED64 0x5ac4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED65 0x5ac5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED66 0x5ac6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED67 0x5ac7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED68 0x5ac8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED69 0x5ac9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED70 0x5aca
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED71 0x5acb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED72 0x5acc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED73 0x5acd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED74 0x5ace
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED75 0x5acf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED76 0x5ad0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED77 0x5ad1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED78 0x5ad2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED79 0x5ad3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED80 0x5ad4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED81 0x5ad5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED82 0x5ad6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED83 0x5ad7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED84 0x5ad8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED85 0x5ad9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED86 0x5ada
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED87 0x5adb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED88 0x5adc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED89 0x5add
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED90 0x5ade
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED91 0x5adf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED92 0x5ae0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED93 0x5ae1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED94 0x5ae2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED95 0x5ae3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED96 0x5ae4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED97 0x5ae5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED98 0x5ae6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED99 0x5ae7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED100 0x5ae8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED101 0x5ae9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED102 0x5aea
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED103 0x5aeb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED104 0x5aec
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED105 0x5aed
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED106 0x5aee
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED107 0x5aef
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED108 0x5af0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED109 0x5af1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED110 0x5af2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED111 0x5af3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED112 0x5af4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED113 0x5af5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED114 0x5af6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED115 0x5af7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED116 0x5af8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED117 0x5af9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED118 0x5afa
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED119 0x5afb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED120 0x5afc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED121 0x5afd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED122 0x5afe
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED123 0x5aff
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED124 0x5b00
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED125 0x5b01
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED126 0x5b02
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED127 0x5b03
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED128 0x5b04
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED129 0x5b05
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED130 0x5b06
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED131 0x5b07
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED132 0x5b08
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED133 0x5b09
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED134 0x5b0a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED135 0x5b0b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED136 0x5b0c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED137 0x5b0d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED138 0x5b0e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED139 0x5b0f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED140 0x5b10
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED141 0x5b11
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED142 0x5b12
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED143 0x5b13
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED144 0x5b14
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED145 0x5b15
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED146 0x5b16
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED147 0x5b17
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED148 0x5b18
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED149 0x5b19
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED150 0x5b1a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED151 0x5b1b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED152 0x5b1c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED153 0x5b1d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED154 0x5b1e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED155 0x5b1f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED156 0x5b20
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED157 0x5b21
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED158 0x5b22
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED159 0x5b23
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED160 0x5b24
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED161 0x5b25
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED162 0x5b26
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED163 0x5b27
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED164 0x5b28
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED165 0x5b29
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED166 0x5b2a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED167 0x5b2b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED168 0x5b2c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED169 0x5b2d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED170 0x5b2e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED171 0x5b2f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED172 0x5b30
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED173 0x5b31
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED174 0x5b32
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED175 0x5b33
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED176 0x5b34
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED177 0x5b35
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED178 0x5b36
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED179 0x5b37
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED180 0x5b38
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED181 0x5b39
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED182 0x5b3a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED183 0x5b3b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED184 0x5b3c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED185 0x5b3d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED186 0x5b3e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED187 0x5b3f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED188 0x5b40
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED189 0x5b41
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED190 0x5b42
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED191 0x5b43
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED192 0x5b44
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED193 0x5b45
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED194 0x5b46
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED195 0x5b47
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED196 0x5b48
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED197 0x5b49
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED198 0x5b4a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED199 0x5b4b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED200 0x5b4c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED201 0x5b4d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED202 0x5b4e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED203 0x5b4f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED204 0x5b50
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED205 0x5b51
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED206 0x5b52
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED207 0x5b53
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED208 0x5b54
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED209 0x5b55
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED210 0x5b56
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED211 0x5b57
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED212 0x5b58
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED213 0x5b59
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED214 0x5b5a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED215 0x5b5b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED216 0x5b5c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED217 0x5b5d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED218 0x5b5e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED219 0x5b5f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED220 0x5b60
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED221 0x5b61
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED222 0x5b62
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED223 0x5b63
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED224 0x5b64
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED225 0x5b65
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED226 0x5b66
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED227 0x5b67
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED228 0x5b68
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED229 0x5b69
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED230 0x5b6a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED231 0x5b6b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED232 0x5b6c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED233 0x5b6d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED234 0x5b6e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED235 0x5b6f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED236 0x5b70
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED237 0x5b71
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED238 0x5b72
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED239 0x5b73
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED240 0x5b74
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED241 0x5b75
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED242 0x5b76
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED243 0x5b77
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED244 0x5b78
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED245 0x5b79
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED246 0x5b7a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED247 0x5b7b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED248 0x5b7c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED249 0x5b7d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED250 0x5b7e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED251 0x5b7f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED252 0x5b80
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED253 0x5b81
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED254 0x5b82
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED255 0x5b83
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED256 0x5b84
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED257 0x5b85
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED258 0x5b86
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED259 0x5b87
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED260 0x5b88
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED261 0x5b89
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED262 0x5b8a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED263 0x5b8b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED264 0x5b8c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED265 0x5b8d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED266 0x5b8e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED267 0x5b8f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED268 0x5b90
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED269 0x5b91
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED270 0x5b92
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED271 0x5b93
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED272 0x5b94
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED273 0x5b95
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED274 0x5b96
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED275 0x5b97
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED276 0x5b98
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED277 0x5b99
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED278 0x5b9a
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED279 0x5b9b
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED280 0x5b9c
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED281 0x5b9d
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED282 0x5b9e
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED283 0x5b9f
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED284 0x5ba0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED285 0x5ba1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED286 0x5ba2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED287 0x5ba3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED288 0x5ba4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED289 0x5ba5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED290 0x5ba6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED291 0x5ba7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED292 0x5ba8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED293 0x5ba9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED294 0x5baa
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED295 0x5bab
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED296 0x5bac
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED297 0x5bad
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED298 0x5bae
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED299 0x5baf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED300 0x5bb0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED301 0x5bb1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED302 0x5bb2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED303 0x5bb3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED304 0x5bb4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED305 0x5bb5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED306 0x5bb6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED307 0x5bb7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED308 0x5bb8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED309 0x5bb9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED310 0x5bba
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED311 0x5bbb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED312 0x5bbc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED313 0x5bbd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED314 0x5bbe
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED315 0x5bbf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED316 0x5bc0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED317 0x5bc1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED318 0x5bc2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED319 0x5bc3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED320 0x5bc4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED321 0x5bc5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED322 0x5bc6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED323 0x5bc7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED324 0x5bc8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED325 0x5bc9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED326 0x5bca
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED327 0x5bcb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED328 0x5bcc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED329 0x5bcd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED330 0x5bce
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED331 0x5bcf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED332 0x5bd0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED333 0x5bd1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED334 0x5bd2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED335 0x5bd3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED336 0x5bd4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED337 0x5bd5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED338 0x5bd6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED339 0x5bd7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED340 0x5bd8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED341 0x5bd9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED342 0x5bda
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED343 0x5bdb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED344 0x5bdc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED345 0x5bdd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED346 0x5bde
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED347 0x5bdf
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED348 0x5be0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED349 0x5be1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED350 0x5be2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED351 0x5be3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED352 0x5be4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED353 0x5be5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED354 0x5be6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED355 0x5be7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED356 0x5be8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED357 0x5be9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED358 0x5bea
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED359 0x5beb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED360 0x5bec
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED361 0x5bed
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED362 0x5bee
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED363 0x5bef
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED364 0x5bf0
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED365 0x5bf1
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED366 0x5bf2
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED367 0x5bf3
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED368 0x5bf4
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED369 0x5bf5
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED370 0x5bf6
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED371 0x5bf7
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED372 0x5bf8
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED373 0x5bf9
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED374 0x5bfa
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED375 0x5bfb
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED376 0x5bfc
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED377 0x5bfd
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED378 0x5bfe
+#define mmDCRX_PHY_MACRO_CNTL_RESERVED379 0x5bff
+#define mmDPHY_MACRO_CNTL_RESERVED0 0x5d98
+#define mmDPHY_MACRO_CNTL_RESERVED1 0x5d99
+#define mmDPHY_MACRO_CNTL_RESERVED2 0x5d9a
+#define mmDPHY_MACRO_CNTL_RESERVED3 0x5d9b
+#define mmDPHY_MACRO_CNTL_RESERVED4 0x5d9c
+#define mmDPHY_MACRO_CNTL_RESERVED5 0x5d9d
+#define mmDPHY_MACRO_CNTL_RESERVED6 0x5d9e
+#define mmDPHY_MACRO_CNTL_RESERVED7 0x5d9f
+#define mmDPHY_MACRO_CNTL_RESERVED8 0x5da0
+#define mmDPHY_MACRO_CNTL_RESERVED9 0x5da1
+#define mmDPHY_MACRO_CNTL_RESERVED10 0x5da2
+#define mmDPHY_MACRO_CNTL_RESERVED11 0x5da3
+#define mmDPHY_MACRO_CNTL_RESERVED12 0x5da4
+#define mmDPHY_MACRO_CNTL_RESERVED13 0x5da5
+#define mmDPHY_MACRO_CNTL_RESERVED14 0x5da6
+#define mmDPHY_MACRO_CNTL_RESERVED15 0x5da7
+#define mmDPHY_MACRO_CNTL_RESERVED16 0x5da8
+#define mmDPHY_MACRO_CNTL_RESERVED17 0x5da9
+#define mmDPHY_MACRO_CNTL_RESERVED18 0x5daa
+#define mmDPHY_MACRO_CNTL_RESERVED19 0x5dab
+#define mmDPHY_MACRO_CNTL_RESERVED20 0x5dac
+#define mmDPHY_MACRO_CNTL_RESERVED21 0x5dad
+#define mmDPHY_MACRO_CNTL_RESERVED22 0x5dae
+#define mmDPHY_MACRO_CNTL_RESERVED23 0x5daf
+#define mmDPHY_MACRO_CNTL_RESERVED24 0x5db0
+#define mmDPHY_MACRO_CNTL_RESERVED25 0x5db1
+#define mmDPHY_MACRO_CNTL_RESERVED26 0x5db2
+#define mmDPHY_MACRO_CNTL_RESERVED27 0x5db3
+#define mmDPHY_MACRO_CNTL_RESERVED28 0x5db4
+#define mmDPHY_MACRO_CNTL_RESERVED29 0x5db5
+#define mmDPHY_MACRO_CNTL_RESERVED30 0x5db6
+#define mmDPHY_MACRO_CNTL_RESERVED31 0x5db7
+#define mmDPHY_MACRO_CNTL_RESERVED32 0x5db8
+#define mmDPHY_MACRO_CNTL_RESERVED33 0x5db9
+#define mmDPHY_MACRO_CNTL_RESERVED34 0x5dba
+#define mmDPHY_MACRO_CNTL_RESERVED35 0x5dbb
+#define mmDPHY_MACRO_CNTL_RESERVED36 0x5dbc
+#define mmDPHY_MACRO_CNTL_RESERVED37 0x5dbd
+#define mmDPHY_MACRO_CNTL_RESERVED38 0x5dbe
+#define mmDPHY_MACRO_CNTL_RESERVED39 0x5dbf
+#define mmDPHY_MACRO_CNTL_RESERVED40 0x5dc0
+#define mmDPHY_MACRO_CNTL_RESERVED41 0x5dc1
+#define mmDPHY_MACRO_CNTL_RESERVED42 0x5dc2
+#define mmDPHY_MACRO_CNTL_RESERVED43 0x5dc3
+#define mmDPHY_MACRO_CNTL_RESERVED44 0x5dc4
+#define mmDPHY_MACRO_CNTL_RESERVED45 0x5dc5
+#define mmDPHY_MACRO_CNTL_RESERVED46 0x5dc6
+#define mmDPHY_MACRO_CNTL_RESERVED47 0x5dc7
+#define mmDPHY_MACRO_CNTL_RESERVED48 0x5dc8
+#define mmDPHY_MACRO_CNTL_RESERVED49 0x5dc9
+#define mmDPHY_MACRO_CNTL_RESERVED50 0x5dca
+#define mmDPHY_MACRO_CNTL_RESERVED51 0x5dcb
+#define mmDPHY_MACRO_CNTL_RESERVED52 0x5dcc
+#define mmDPHY_MACRO_CNTL_RESERVED53 0x5dcd
+#define mmDPHY_MACRO_CNTL_RESERVED54 0x5dce
+#define mmDPHY_MACRO_CNTL_RESERVED55 0x5dcf
+#define mmDPHY_MACRO_CNTL_RESERVED56 0x5dd0
+#define mmDPHY_MACRO_CNTL_RESERVED57 0x5dd1
+#define mmDPHY_MACRO_CNTL_RESERVED58 0x5dd2
+#define mmDPHY_MACRO_CNTL_RESERVED59 0x5dd3
+#define mmDPHY_MACRO_CNTL_RESERVED60 0x5dd4
+#define mmDPHY_MACRO_CNTL_RESERVED61 0x5dd5
+#define mmDPHY_MACRO_CNTL_RESERVED62 0x5dd6
+#define mmDPHY_MACRO_CNTL_RESERVED63 0x5dd7
+#define mmGRPH_ENABLE 0x1a00
+#define mmDCP0_GRPH_ENABLE 0x1a00
+#define mmDCP1_GRPH_ENABLE 0x1c00
+#define mmDCP2_GRPH_ENABLE 0x1e00
+#define mmDCP3_GRPH_ENABLE 0x4000
+#define mmDCP4_GRPH_ENABLE 0x4200
+#define mmDCP5_GRPH_ENABLE 0x4400
+#define mmGRPH_CONTROL 0x1a01
+#define mmDCP0_GRPH_CONTROL 0x1a01
+#define mmDCP1_GRPH_CONTROL 0x1c01
+#define mmDCP2_GRPH_CONTROL 0x1e01
+#define mmDCP3_GRPH_CONTROL 0x4001
+#define mmDCP4_GRPH_CONTROL 0x4201
+#define mmDCP5_GRPH_CONTROL 0x4401
+#define mmGRPH_LUT_10BIT_BYPASS 0x1a02
+#define mmDCP0_GRPH_LUT_10BIT_BYPASS 0x1a02
+#define mmDCP1_GRPH_LUT_10BIT_BYPASS 0x1c02
+#define mmDCP2_GRPH_LUT_10BIT_BYPASS 0x1e02
+#define mmDCP3_GRPH_LUT_10BIT_BYPASS 0x4002
+#define mmDCP4_GRPH_LUT_10BIT_BYPASS 0x4202
+#define mmDCP5_GRPH_LUT_10BIT_BYPASS 0x4402
+#define mmGRPH_SWAP_CNTL 0x1a03
+#define mmDCP0_GRPH_SWAP_CNTL 0x1a03
+#define mmDCP1_GRPH_SWAP_CNTL 0x1c03
+#define mmDCP2_GRPH_SWAP_CNTL 0x1e03
+#define mmDCP3_GRPH_SWAP_CNTL 0x4003
+#define mmDCP4_GRPH_SWAP_CNTL 0x4203
+#define mmDCP5_GRPH_SWAP_CNTL 0x4403
+#define mmGRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
+#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS 0x1a04
+#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS 0x1c04
+#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS 0x1e04
+#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS 0x4004
+#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS 0x4204
+#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS 0x4404
+#define mmGRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
+#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS 0x1a05
+#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS 0x1c05
+#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS 0x1e05
+#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS 0x4005
+#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS 0x4205
+#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS 0x4405
+#define mmGRPH_PITCH 0x1a06
+#define mmDCP0_GRPH_PITCH 0x1a06
+#define mmDCP1_GRPH_PITCH 0x1c06
+#define mmDCP2_GRPH_PITCH 0x1e06
+#define mmDCP3_GRPH_PITCH 0x4006
+#define mmDCP4_GRPH_PITCH 0x4206
+#define mmDCP5_GRPH_PITCH 0x4406
+#define mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
+#define mmDCP0_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1a07
+#define mmDCP1_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1c07
+#define mmDCP2_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x1e07
+#define mmDCP3_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4007
+#define mmDCP4_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4207
+#define mmDCP5_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x4407
+#define mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
+#define mmDCP0_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1a08
+#define mmDCP1_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1c08
+#define mmDCP2_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x1e08
+#define mmDCP3_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4008
+#define mmDCP4_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4208
+#define mmDCP5_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x4408
+#define mmGRPH_SURFACE_OFFSET_X 0x1a09
+#define mmDCP0_GRPH_SURFACE_OFFSET_X 0x1a09
+#define mmDCP1_GRPH_SURFACE_OFFSET_X 0x1c09
+#define mmDCP2_GRPH_SURFACE_OFFSET_X 0x1e09
+#define mmDCP3_GRPH_SURFACE_OFFSET_X 0x4009
+#define mmDCP4_GRPH_SURFACE_OFFSET_X 0x4209
+#define mmDCP5_GRPH_SURFACE_OFFSET_X 0x4409
+#define mmGRPH_SURFACE_OFFSET_Y 0x1a0a
+#define mmDCP0_GRPH_SURFACE_OFFSET_Y 0x1a0a
+#define mmDCP1_GRPH_SURFACE_OFFSET_Y 0x1c0a
+#define mmDCP2_GRPH_SURFACE_OFFSET_Y 0x1e0a
+#define mmDCP3_GRPH_SURFACE_OFFSET_Y 0x400a
+#define mmDCP4_GRPH_SURFACE_OFFSET_Y 0x420a
+#define mmDCP5_GRPH_SURFACE_OFFSET_Y 0x440a
+#define mmGRPH_X_START 0x1a0b
+#define mmDCP0_GRPH_X_START 0x1a0b
+#define mmDCP1_GRPH_X_START 0x1c0b
+#define mmDCP2_GRPH_X_START 0x1e0b
+#define mmDCP3_GRPH_X_START 0x400b
+#define mmDCP4_GRPH_X_START 0x420b
+#define mmDCP5_GRPH_X_START 0x440b
+#define mmGRPH_Y_START 0x1a0c
+#define mmDCP0_GRPH_Y_START 0x1a0c
+#define mmDCP1_GRPH_Y_START 0x1c0c
+#define mmDCP2_GRPH_Y_START 0x1e0c
+#define mmDCP3_GRPH_Y_START 0x400c
+#define mmDCP4_GRPH_Y_START 0x420c
+#define mmDCP5_GRPH_Y_START 0x440c
+#define mmGRPH_X_END 0x1a0d
+#define mmDCP0_GRPH_X_END 0x1a0d
+#define mmDCP1_GRPH_X_END 0x1c0d
+#define mmDCP2_GRPH_X_END 0x1e0d
+#define mmDCP3_GRPH_X_END 0x400d
+#define mmDCP4_GRPH_X_END 0x420d
+#define mmDCP5_GRPH_X_END 0x440d
+#define mmGRPH_Y_END 0x1a0e
+#define mmDCP0_GRPH_Y_END 0x1a0e
+#define mmDCP1_GRPH_Y_END 0x1c0e
+#define mmDCP2_GRPH_Y_END 0x1e0e
+#define mmDCP3_GRPH_Y_END 0x400e
+#define mmDCP4_GRPH_Y_END 0x420e
+#define mmDCP5_GRPH_Y_END 0x440e
+#define mmINPUT_GAMMA_CONTROL 0x1a10
+#define mmDCP0_INPUT_GAMMA_CONTROL 0x1a10
+#define mmDCP1_INPUT_GAMMA_CONTROL 0x1c10
+#define mmDCP2_INPUT_GAMMA_CONTROL 0x1e10
+#define mmDCP3_INPUT_GAMMA_CONTROL 0x4010
+#define mmDCP4_INPUT_GAMMA_CONTROL 0x4210
+#define mmDCP5_INPUT_GAMMA_CONTROL 0x4410
+#define mmGRPH_UPDATE 0x1a11
+#define mmDCP0_GRPH_UPDATE 0x1a11
+#define mmDCP1_GRPH_UPDATE 0x1c11
+#define mmDCP2_GRPH_UPDATE 0x1e11
+#define mmDCP3_GRPH_UPDATE 0x4011
+#define mmDCP4_GRPH_UPDATE 0x4211
+#define mmDCP5_GRPH_UPDATE 0x4411
+#define mmGRPH_FLIP_CONTROL 0x1a12
+#define mmDCP0_GRPH_FLIP_CONTROL 0x1a12
+#define mmDCP1_GRPH_FLIP_CONTROL 0x1c12
+#define mmDCP2_GRPH_FLIP_CONTROL 0x1e12
+#define mmDCP3_GRPH_FLIP_CONTROL 0x4012
+#define mmDCP4_GRPH_FLIP_CONTROL 0x4212
+#define mmDCP5_GRPH_FLIP_CONTROL 0x4412
+#define mmGRPH_SURFACE_ADDRESS_INUSE 0x1a13
+#define mmDCP0_GRPH_SURFACE_ADDRESS_INUSE 0x1a13
+#define mmDCP1_GRPH_SURFACE_ADDRESS_INUSE 0x1c13
+#define mmDCP2_GRPH_SURFACE_ADDRESS_INUSE 0x1e13
+#define mmDCP3_GRPH_SURFACE_ADDRESS_INUSE 0x4013
+#define mmDCP4_GRPH_SURFACE_ADDRESS_INUSE 0x4213
+#define mmDCP5_GRPH_SURFACE_ADDRESS_INUSE 0x4413
+#define mmGRPH_DFQ_CONTROL 0x1a14
+#define mmDCP0_GRPH_DFQ_CONTROL 0x1a14
+#define mmDCP1_GRPH_DFQ_CONTROL 0x1c14
+#define mmDCP2_GRPH_DFQ_CONTROL 0x1e14
+#define mmDCP3_GRPH_DFQ_CONTROL 0x4014
+#define mmDCP4_GRPH_DFQ_CONTROL 0x4214
+#define mmDCP5_GRPH_DFQ_CONTROL 0x4414
+#define mmGRPH_DFQ_STATUS 0x1a15
+#define mmDCP0_GRPH_DFQ_STATUS 0x1a15
+#define mmDCP1_GRPH_DFQ_STATUS 0x1c15
+#define mmDCP2_GRPH_DFQ_STATUS 0x1e15
+#define mmDCP3_GRPH_DFQ_STATUS 0x4015
+#define mmDCP4_GRPH_DFQ_STATUS 0x4215
+#define mmDCP5_GRPH_DFQ_STATUS 0x4415
+#define mmGRPH_INTERRUPT_STATUS 0x1a16
+#define mmDCP0_GRPH_INTERRUPT_STATUS 0x1a16
+#define mmDCP1_GRPH_INTERRUPT_STATUS 0x1c16
+#define mmDCP2_GRPH_INTERRUPT_STATUS 0x1e16
+#define mmDCP3_GRPH_INTERRUPT_STATUS 0x4016
+#define mmDCP4_GRPH_INTERRUPT_STATUS 0x4216
+#define mmDCP5_GRPH_INTERRUPT_STATUS 0x4416
+#define mmGRPH_INTERRUPT_CONTROL 0x1a17
+#define mmDCP0_GRPH_INTERRUPT_CONTROL 0x1a17
+#define mmDCP1_GRPH_INTERRUPT_CONTROL 0x1c17
+#define mmDCP2_GRPH_INTERRUPT_CONTROL 0x1e17
+#define mmDCP3_GRPH_INTERRUPT_CONTROL 0x4017
+#define mmDCP4_GRPH_INTERRUPT_CONTROL 0x4217
+#define mmDCP5_GRPH_INTERRUPT_CONTROL 0x4417
+#define mmGRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1a18
+#define mmDCP0_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1a18
+#define mmDCP1_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1c18
+#define mmDCP2_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x1e18
+#define mmDCP3_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4018
+#define mmDCP4_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4218
+#define mmDCP5_GRPH_SURFACE_ADDRESS_HIGH_INUSE 0x4418
+#define mmGRPH_COMPRESS_SURFACE_ADDRESS 0x1a19
+#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS 0x1a19
+#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS 0x1c19
+#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS 0x1e19
+#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS 0x4019
+#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS 0x4219
+#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS 0x4419
+#define mmGRPH_COMPRESS_PITCH 0x1a1a
+#define mmDCP0_GRPH_COMPRESS_PITCH 0x1a1a
+#define mmDCP1_GRPH_COMPRESS_PITCH 0x1c1a
+#define mmDCP2_GRPH_COMPRESS_PITCH 0x1e1a
+#define mmDCP3_GRPH_COMPRESS_PITCH 0x401a
+#define mmDCP4_GRPH_COMPRESS_PITCH 0x421a
+#define mmDCP5_GRPH_COMPRESS_PITCH 0x441a
+#define mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1a1b
+#define mmDCP0_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1a1b
+#define mmDCP1_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1c1b
+#define mmDCP2_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x1e1b
+#define mmDCP3_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x401b
+#define mmDCP4_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x421b
+#define mmDCP5_GRPH_COMPRESS_SURFACE_ADDRESS_HIGH 0x441b
+#define mmGRPH_PIPE_OUTSTANDING_REQUEST_LIMIT 0x1a1c
+#define mmDCP0_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT 0x1a1c
+#define mmDCP1_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT 0x1c1c
+#define mmDCP2_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT 0x1e1c
+#define mmDCP3_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT 0x401c
+#define mmDCP4_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT 0x421c
+#define mmDCP5_GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT 0x441c
+#define mmPRESCALE_GRPH_CONTROL 0x1a2d
+#define mmDCP0_PRESCALE_GRPH_CONTROL 0x1a2d
+#define mmDCP1_PRESCALE_GRPH_CONTROL 0x1c2d
+#define mmDCP2_PRESCALE_GRPH_CONTROL 0x1e2d
+#define mmDCP3_PRESCALE_GRPH_CONTROL 0x402d
+#define mmDCP4_PRESCALE_GRPH_CONTROL 0x422d
+#define mmDCP5_PRESCALE_GRPH_CONTROL 0x442d
+#define mmPRESCALE_VALUES_GRPH_R 0x1a2e
+#define mmDCP0_PRESCALE_VALUES_GRPH_R 0x1a2e
+#define mmDCP1_PRESCALE_VALUES_GRPH_R 0x1c2e
+#define mmDCP2_PRESCALE_VALUES_GRPH_R 0x1e2e
+#define mmDCP3_PRESCALE_VALUES_GRPH_R 0x402e
+#define mmDCP4_PRESCALE_VALUES_GRPH_R 0x422e
+#define mmDCP5_PRESCALE_VALUES_GRPH_R 0x442e
+#define mmPRESCALE_VALUES_GRPH_G 0x1a2f
+#define mmDCP0_PRESCALE_VALUES_GRPH_G 0x1a2f
+#define mmDCP1_PRESCALE_VALUES_GRPH_G 0x1c2f
+#define mmDCP2_PRESCALE_VALUES_GRPH_G 0x1e2f
+#define mmDCP3_PRESCALE_VALUES_GRPH_G 0x402f
+#define mmDCP4_PRESCALE_VALUES_GRPH_G 0x422f
+#define mmDCP5_PRESCALE_VALUES_GRPH_G 0x442f
+#define mmPRESCALE_VALUES_GRPH_B 0x1a30
+#define mmDCP0_PRESCALE_VALUES_GRPH_B 0x1a30
+#define mmDCP1_PRESCALE_VALUES_GRPH_B 0x1c30
+#define mmDCP2_PRESCALE_VALUES_GRPH_B 0x1e30
+#define mmDCP3_PRESCALE_VALUES_GRPH_B 0x4030
+#define mmDCP4_PRESCALE_VALUES_GRPH_B 0x4230
+#define mmDCP5_PRESCALE_VALUES_GRPH_B 0x4430
+#define mmINPUT_CSC_CONTROL 0x1a35
+#define mmDCP0_INPUT_CSC_CONTROL 0x1a35
+#define mmDCP1_INPUT_CSC_CONTROL 0x1c35
+#define mmDCP2_INPUT_CSC_CONTROL 0x1e35
+#define mmDCP3_INPUT_CSC_CONTROL 0x4035
+#define mmDCP4_INPUT_CSC_CONTROL 0x4235
+#define mmDCP5_INPUT_CSC_CONTROL 0x4435
+#define mmINPUT_CSC_C11_C12 0x1a36
+#define mmDCP0_INPUT_CSC_C11_C12 0x1a36
+#define mmDCP1_INPUT_CSC_C11_C12 0x1c36
+#define mmDCP2_INPUT_CSC_C11_C12 0x1e36
+#define mmDCP3_INPUT_CSC_C11_C12 0x4036
+#define mmDCP4_INPUT_CSC_C11_C12 0x4236
+#define mmDCP5_INPUT_CSC_C11_C12 0x4436
+#define mmINPUT_CSC_C13_C14 0x1a37
+#define mmDCP0_INPUT_CSC_C13_C14 0x1a37
+#define mmDCP1_INPUT_CSC_C13_C14 0x1c37
+#define mmDCP2_INPUT_CSC_C13_C14 0x1e37
+#define mmDCP3_INPUT_CSC_C13_C14 0x4037
+#define mmDCP4_INPUT_CSC_C13_C14 0x4237
+#define mmDCP5_INPUT_CSC_C13_C14 0x4437
+#define mmINPUT_CSC_C21_C22 0x1a38
+#define mmDCP0_INPUT_CSC_C21_C22 0x1a38
+#define mmDCP1_INPUT_CSC_C21_C22 0x1c38
+#define mmDCP2_INPUT_CSC_C21_C22 0x1e38
+#define mmDCP3_INPUT_CSC_C21_C22 0x4038
+#define mmDCP4_INPUT_CSC_C21_C22 0x4238
+#define mmDCP5_INPUT_CSC_C21_C22 0x4438
+#define mmINPUT_CSC_C23_C24 0x1a39
+#define mmDCP0_INPUT_CSC_C23_C24 0x1a39
+#define mmDCP1_INPUT_CSC_C23_C24 0x1c39
+#define mmDCP2_INPUT_CSC_C23_C24 0x1e39
+#define mmDCP3_INPUT_CSC_C23_C24 0x4039
+#define mmDCP4_INPUT_CSC_C23_C24 0x4239
+#define mmDCP5_INPUT_CSC_C23_C24 0x4439
+#define mmINPUT_CSC_C31_C32 0x1a3a
+#define mmDCP0_INPUT_CSC_C31_C32 0x1a3a
+#define mmDCP1_INPUT_CSC_C31_C32 0x1c3a
+#define mmDCP2_INPUT_CSC_C31_C32 0x1e3a
+#define mmDCP3_INPUT_CSC_C31_C32 0x403a
+#define mmDCP4_INPUT_CSC_C31_C32 0x423a
+#define mmDCP5_INPUT_CSC_C31_C32 0x443a
+#define mmINPUT_CSC_C33_C34 0x1a3b
+#define mmDCP0_INPUT_CSC_C33_C34 0x1a3b
+#define mmDCP1_INPUT_CSC_C33_C34 0x1c3b
+#define mmDCP2_INPUT_CSC_C33_C34 0x1e3b
+#define mmDCP3_INPUT_CSC_C33_C34 0x403b
+#define mmDCP4_INPUT_CSC_C33_C34 0x423b
+#define mmDCP5_INPUT_CSC_C33_C34 0x443b
+#define mmOUTPUT_CSC_CONTROL 0x1a3c
+#define mmDCP0_OUTPUT_CSC_CONTROL 0x1a3c
+#define mmDCP1_OUTPUT_CSC_CONTROL 0x1c3c
+#define mmDCP2_OUTPUT_CSC_CONTROL 0x1e3c
+#define mmDCP3_OUTPUT_CSC_CONTROL 0x403c
+#define mmDCP4_OUTPUT_CSC_CONTROL 0x423c
+#define mmDCP5_OUTPUT_CSC_CONTROL 0x443c
+#define mmOUTPUT_CSC_C11_C12 0x1a3d
+#define mmDCP0_OUTPUT_CSC_C11_C12 0x1a3d
+#define mmDCP1_OUTPUT_CSC_C11_C12 0x1c3d
+#define mmDCP2_OUTPUT_CSC_C11_C12 0x1e3d
+#define mmDCP3_OUTPUT_CSC_C11_C12 0x403d
+#define mmDCP4_OUTPUT_CSC_C11_C12 0x423d
+#define mmDCP5_OUTPUT_CSC_C11_C12 0x443d
+#define mmOUTPUT_CSC_C13_C14 0x1a3e
+#define mmDCP0_OUTPUT_CSC_C13_C14 0x1a3e
+#define mmDCP1_OUTPUT_CSC_C13_C14 0x1c3e
+#define mmDCP2_OUTPUT_CSC_C13_C14 0x1e3e
+#define mmDCP3_OUTPUT_CSC_C13_C14 0x403e
+#define mmDCP4_OUTPUT_CSC_C13_C14 0x423e
+#define mmDCP5_OUTPUT_CSC_C13_C14 0x443e
+#define mmOUTPUT_CSC_C21_C22 0x1a3f
+#define mmDCP0_OUTPUT_CSC_C21_C22 0x1a3f
+#define mmDCP1_OUTPUT_CSC_C21_C22 0x1c3f
+#define mmDCP2_OUTPUT_CSC_C21_C22 0x1e3f
+#define mmDCP3_OUTPUT_CSC_C21_C22 0x403f
+#define mmDCP4_OUTPUT_CSC_C21_C22 0x423f
+#define mmDCP5_OUTPUT_CSC_C21_C22 0x443f
+#define mmOUTPUT_CSC_C23_C24 0x1a40
+#define mmDCP0_OUTPUT_CSC_C23_C24 0x1a40
+#define mmDCP1_OUTPUT_CSC_C23_C24 0x1c40
+#define mmDCP2_OUTPUT_CSC_C23_C24 0x1e40
+#define mmDCP3_OUTPUT_CSC_C23_C24 0x4040
+#define mmDCP4_OUTPUT_CSC_C23_C24 0x4240
+#define mmDCP5_OUTPUT_CSC_C23_C24 0x4440
+#define mmOUTPUT_CSC_C31_C32 0x1a41
+#define mmDCP0_OUTPUT_CSC_C31_C32 0x1a41
+#define mmDCP1_OUTPUT_CSC_C31_C32 0x1c41
+#define mmDCP2_OUTPUT_CSC_C31_C32 0x1e41
+#define mmDCP3_OUTPUT_CSC_C31_C32 0x4041
+#define mmDCP4_OUTPUT_CSC_C31_C32 0x4241
+#define mmDCP5_OUTPUT_CSC_C31_C32 0x4441
+#define mmOUTPUT_CSC_C33_C34 0x1a42
+#define mmDCP0_OUTPUT_CSC_C33_C34 0x1a42
+#define mmDCP1_OUTPUT_CSC_C33_C34 0x1c42
+#define mmDCP2_OUTPUT_CSC_C33_C34 0x1e42
+#define mmDCP3_OUTPUT_CSC_C33_C34 0x4042
+#define mmDCP4_OUTPUT_CSC_C33_C34 0x4242
+#define mmDCP5_OUTPUT_CSC_C33_C34 0x4442
+#define mmCOMM_MATRIXA_TRANS_C11_C12 0x1a43
+#define mmDCP0_COMM_MATRIXA_TRANS_C11_C12 0x1a43
+#define mmDCP1_COMM_MATRIXA_TRANS_C11_C12 0x1c43
+#define mmDCP2_COMM_MATRIXA_TRANS_C11_C12 0x1e43
+#define mmDCP3_COMM_MATRIXA_TRANS_C11_C12 0x4043
+#define mmDCP4_COMM_MATRIXA_TRANS_C11_C12 0x4243
+#define mmDCP5_COMM_MATRIXA_TRANS_C11_C12 0x4443
+#define mmCOMM_MATRIXA_TRANS_C13_C14 0x1a44
+#define mmDCP0_COMM_MATRIXA_TRANS_C13_C14 0x1a44
+#define mmDCP1_COMM_MATRIXA_TRANS_C13_C14 0x1c44
+#define mmDCP2_COMM_MATRIXA_TRANS_C13_C14 0x1e44
+#define mmDCP3_COMM_MATRIXA_TRANS_C13_C14 0x4044
+#define mmDCP4_COMM_MATRIXA_TRANS_C13_C14 0x4244
+#define mmDCP5_COMM_MATRIXA_TRANS_C13_C14 0x4444
+#define mmCOMM_MATRIXA_TRANS_C21_C22 0x1a45
+#define mmDCP0_COMM_MATRIXA_TRANS_C21_C22 0x1a45
+#define mmDCP1_COMM_MATRIXA_TRANS_C21_C22 0x1c45
+#define mmDCP2_COMM_MATRIXA_TRANS_C21_C22 0x1e45
+#define mmDCP3_COMM_MATRIXA_TRANS_C21_C22 0x4045
+#define mmDCP4_COMM_MATRIXA_TRANS_C21_C22 0x4245
+#define mmDCP5_COMM_MATRIXA_TRANS_C21_C22 0x4445
+#define mmCOMM_MATRIXA_TRANS_C23_C24 0x1a46
+#define mmDCP0_COMM_MATRIXA_TRANS_C23_C24 0x1a46
+#define mmDCP1_COMM_MATRIXA_TRANS_C23_C24 0x1c46
+#define mmDCP2_COMM_MATRIXA_TRANS_C23_C24 0x1e46
+#define mmDCP3_COMM_MATRIXA_TRANS_C23_C24 0x4046
+#define mmDCP4_COMM_MATRIXA_TRANS_C23_C24 0x4246
+#define mmDCP5_COMM_MATRIXA_TRANS_C23_C24 0x4446
+#define mmCOMM_MATRIXA_TRANS_C31_C32 0x1a47
+#define mmDCP0_COMM_MATRIXA_TRANS_C31_C32 0x1a47
+#define mmDCP1_COMM_MATRIXA_TRANS_C31_C32 0x1c47
+#define mmDCP2_COMM_MATRIXA_TRANS_C31_C32 0x1e47
+#define mmDCP3_COMM_MATRIXA_TRANS_C31_C32 0x4047
+#define mmDCP4_COMM_MATRIXA_TRANS_C31_C32 0x4247
+#define mmDCP5_COMM_MATRIXA_TRANS_C31_C32 0x4447
+#define mmCOMM_MATRIXA_TRANS_C33_C34 0x1a48
+#define mmDCP0_COMM_MATRIXA_TRANS_C33_C34 0x1a48
+#define mmDCP1_COMM_MATRIXA_TRANS_C33_C34 0x1c48
+#define mmDCP2_COMM_MATRIXA_TRANS_C33_C34 0x1e48
+#define mmDCP3_COMM_MATRIXA_TRANS_C33_C34 0x4048
+#define mmDCP4_COMM_MATRIXA_TRANS_C33_C34 0x4248
+#define mmDCP5_COMM_MATRIXA_TRANS_C33_C34 0x4448
+#define mmCOMM_MATRIXB_TRANS_C11_C12 0x1a49
+#define mmDCP0_COMM_MATRIXB_TRANS_C11_C12 0x1a49
+#define mmDCP1_COMM_MATRIXB_TRANS_C11_C12 0x1c49
+#define mmDCP2_COMM_MATRIXB_TRANS_C11_C12 0x1e49
+#define mmDCP3_COMM_MATRIXB_TRANS_C11_C12 0x4049
+#define mmDCP4_COMM_MATRIXB_TRANS_C11_C12 0x4249
+#define mmDCP5_COMM_MATRIXB_TRANS_C11_C12 0x4449
+#define mmCOMM_MATRIXB_TRANS_C13_C14 0x1a4a
+#define mmDCP0_COMM_MATRIXB_TRANS_C13_C14 0x1a4a
+#define mmDCP1_COMM_MATRIXB_TRANS_C13_C14 0x1c4a
+#define mmDCP2_COMM_MATRIXB_TRANS_C13_C14 0x1e4a
+#define mmDCP3_COMM_MATRIXB_TRANS_C13_C14 0x404a
+#define mmDCP4_COMM_MATRIXB_TRANS_C13_C14 0x424a
+#define mmDCP5_COMM_MATRIXB_TRANS_C13_C14 0x444a
+#define mmCOMM_MATRIXB_TRANS_C21_C22 0x1a4b
+#define mmDCP0_COMM_MATRIXB_TRANS_C21_C22 0x1a4b
+#define mmDCP1_COMM_MATRIXB_TRANS_C21_C22 0x1c4b
+#define mmDCP2_COMM_MATRIXB_TRANS_C21_C22 0x1e4b
+#define mmDCP3_COMM_MATRIXB_TRANS_C21_C22 0x404b
+#define mmDCP4_COMM_MATRIXB_TRANS_C21_C22 0x424b
+#define mmDCP5_COMM_MATRIXB_TRANS_C21_C22 0x444b
+#define mmCOMM_MATRIXB_TRANS_C23_C24 0x1a4c
+#define mmDCP0_COMM_MATRIXB_TRANS_C23_C24 0x1a4c
+#define mmDCP1_COMM_MATRIXB_TRANS_C23_C24 0x1c4c
+#define mmDCP2_COMM_MATRIXB_TRANS_C23_C24 0x1e4c
+#define mmDCP3_COMM_MATRIXB_TRANS_C23_C24 0x404c
+#define mmDCP4_COMM_MATRIXB_TRANS_C23_C24 0x424c
+#define mmDCP5_COMM_MATRIXB_TRANS_C23_C24 0x444c
+#define mmCOMM_MATRIXB_TRANS_C31_C32 0x1a4d
+#define mmDCP0_COMM_MATRIXB_TRANS_C31_C32 0x1a4d
+#define mmDCP1_COMM_MATRIXB_TRANS_C31_C32 0x1c4d
+#define mmDCP2_COMM_MATRIXB_TRANS_C31_C32 0x1e4d
+#define mmDCP3_COMM_MATRIXB_TRANS_C31_C32 0x404d
+#define mmDCP4_COMM_MATRIXB_TRANS_C31_C32 0x424d
+#define mmDCP5_COMM_MATRIXB_TRANS_C31_C32 0x444d
+#define mmCOMM_MATRIXB_TRANS_C33_C34 0x1a4e
+#define mmDCP0_COMM_MATRIXB_TRANS_C33_C34 0x1a4e
+#define mmDCP1_COMM_MATRIXB_TRANS_C33_C34 0x1c4e
+#define mmDCP2_COMM_MATRIXB_TRANS_C33_C34 0x1e4e
+#define mmDCP3_COMM_MATRIXB_TRANS_C33_C34 0x404e
+#define mmDCP4_COMM_MATRIXB_TRANS_C33_C34 0x424e
+#define mmDCP5_COMM_MATRIXB_TRANS_C33_C34 0x444e
+#define mmDENORM_CONTROL 0x1a50
+#define mmDCP0_DENORM_CONTROL 0x1a50
+#define mmDCP1_DENORM_CONTROL 0x1c50
+#define mmDCP2_DENORM_CONTROL 0x1e50
+#define mmDCP3_DENORM_CONTROL 0x4050
+#define mmDCP4_DENORM_CONTROL 0x4250
+#define mmDCP5_DENORM_CONTROL 0x4450
+#define mmOUT_ROUND_CONTROL 0x1a51
+#define mmDCP0_OUT_ROUND_CONTROL 0x1a51
+#define mmDCP1_OUT_ROUND_CONTROL 0x1c51
+#define mmDCP2_OUT_ROUND_CONTROL 0x1e51
+#define mmDCP3_OUT_ROUND_CONTROL 0x4051
+#define mmDCP4_OUT_ROUND_CONTROL 0x4251
+#define mmDCP5_OUT_ROUND_CONTROL 0x4451
+#define mmOUT_CLAMP_CONTROL_R_CR 0x1a52
+#define mmDCP0_OUT_CLAMP_CONTROL_R_CR 0x1a52
+#define mmDCP1_OUT_CLAMP_CONTROL_R_CR 0x1c52
+#define mmDCP2_OUT_CLAMP_CONTROL_R_CR 0x1e52
+#define mmDCP3_OUT_CLAMP_CONTROL_R_CR 0x4052
+#define mmDCP4_OUT_CLAMP_CONTROL_R_CR 0x4252
+#define mmDCP5_OUT_CLAMP_CONTROL_R_CR 0x4452
+#define mmOUT_CLAMP_CONTROL_G_Y 0x1a9c
+#define mmDCP0_OUT_CLAMP_CONTROL_G_Y 0x1a9c
+#define mmDCP1_OUT_CLAMP_CONTROL_G_Y 0x1c9c
+#define mmDCP2_OUT_CLAMP_CONTROL_G_Y 0x1e9c
+#define mmDCP3_OUT_CLAMP_CONTROL_G_Y 0x409c
+#define mmDCP4_OUT_CLAMP_CONTROL_G_Y 0x429c
+#define mmDCP5_OUT_CLAMP_CONTROL_G_Y 0x449c
+#define mmOUT_CLAMP_CONTROL_B_CB 0x1a9d
+#define mmDCP0_OUT_CLAMP_CONTROL_B_CB 0x1a9d
+#define mmDCP1_OUT_CLAMP_CONTROL_B_CB 0x1c9d
+#define mmDCP2_OUT_CLAMP_CONTROL_B_CB 0x1e9d
+#define mmDCP3_OUT_CLAMP_CONTROL_B_CB 0x409d
+#define mmDCP4_OUT_CLAMP_CONTROL_B_CB 0x429d
+#define mmDCP5_OUT_CLAMP_CONTROL_B_CB 0x449d
+#define mmKEY_CONTROL 0x1a53
+#define mmDCP0_KEY_CONTROL 0x1a53
+#define mmDCP1_KEY_CONTROL 0x1c53
+#define mmDCP2_KEY_CONTROL 0x1e53
+#define mmDCP3_KEY_CONTROL 0x4053
+#define mmDCP4_KEY_CONTROL 0x4253
+#define mmDCP5_KEY_CONTROL 0x4453
+#define mmKEY_RANGE_ALPHA 0x1a54
+#define mmDCP0_KEY_RANGE_ALPHA 0x1a54
+#define mmDCP1_KEY_RANGE_ALPHA 0x1c54
+#define mmDCP2_KEY_RANGE_ALPHA 0x1e54
+#define mmDCP3_KEY_RANGE_ALPHA 0x4054
+#define mmDCP4_KEY_RANGE_ALPHA 0x4254
+#define mmDCP5_KEY_RANGE_ALPHA 0x4454
+#define mmKEY_RANGE_RED 0x1a55
+#define mmDCP0_KEY_RANGE_RED 0x1a55
+#define mmDCP1_KEY_RANGE_RED 0x1c55
+#define mmDCP2_KEY_RANGE_RED 0x1e55
+#define mmDCP3_KEY_RANGE_RED 0x4055
+#define mmDCP4_KEY_RANGE_RED 0x4255
+#define mmDCP5_KEY_RANGE_RED 0x4455
+#define mmKEY_RANGE_GREEN 0x1a56
+#define mmDCP0_KEY_RANGE_GREEN 0x1a56
+#define mmDCP1_KEY_RANGE_GREEN 0x1c56
+#define mmDCP2_KEY_RANGE_GREEN 0x1e56
+#define mmDCP3_KEY_RANGE_GREEN 0x4056
+#define mmDCP4_KEY_RANGE_GREEN 0x4256
+#define mmDCP5_KEY_RANGE_GREEN 0x4456
+#define mmKEY_RANGE_BLUE 0x1a57
+#define mmDCP0_KEY_RANGE_BLUE 0x1a57
+#define mmDCP1_KEY_RANGE_BLUE 0x1c57
+#define mmDCP2_KEY_RANGE_BLUE 0x1e57
+#define mmDCP3_KEY_RANGE_BLUE 0x4057
+#define mmDCP4_KEY_RANGE_BLUE 0x4257
+#define mmDCP5_KEY_RANGE_BLUE 0x4457
+#define mmDEGAMMA_CONTROL 0x1a58
+#define mmDCP0_DEGAMMA_CONTROL 0x1a58
+#define mmDCP1_DEGAMMA_CONTROL 0x1c58
+#define mmDCP2_DEGAMMA_CONTROL 0x1e58
+#define mmDCP3_DEGAMMA_CONTROL 0x4058
+#define mmDCP4_DEGAMMA_CONTROL 0x4258
+#define mmDCP5_DEGAMMA_CONTROL 0x4458
+#define mmGAMUT_REMAP_CONTROL 0x1a59
+#define mmDCP0_GAMUT_REMAP_CONTROL 0x1a59
+#define mmDCP1_GAMUT_REMAP_CONTROL 0x1c59
+#define mmDCP2_GAMUT_REMAP_CONTROL 0x1e59
+#define mmDCP3_GAMUT_REMAP_CONTROL 0x4059
+#define mmDCP4_GAMUT_REMAP_CONTROL 0x4259
+#define mmDCP5_GAMUT_REMAP_CONTROL 0x4459
+#define mmGAMUT_REMAP_C11_C12 0x1a5a
+#define mmDCP0_GAMUT_REMAP_C11_C12 0x1a5a
+#define mmDCP1_GAMUT_REMAP_C11_C12 0x1c5a
+#define mmDCP2_GAMUT_REMAP_C11_C12 0x1e5a
+#define mmDCP3_GAMUT_REMAP_C11_C12 0x405a
+#define mmDCP4_GAMUT_REMAP_C11_C12 0x425a
+#define mmDCP5_GAMUT_REMAP_C11_C12 0x445a
+#define mmGAMUT_REMAP_C13_C14 0x1a5b
+#define mmDCP0_GAMUT_REMAP_C13_C14 0x1a5b
+#define mmDCP1_GAMUT_REMAP_C13_C14 0x1c5b
+#define mmDCP2_GAMUT_REMAP_C13_C14 0x1e5b
+#define mmDCP3_GAMUT_REMAP_C13_C14 0x405b
+#define mmDCP4_GAMUT_REMAP_C13_C14 0x425b
+#define mmDCP5_GAMUT_REMAP_C13_C14 0x445b
+#define mmGAMUT_REMAP_C21_C22 0x1a5c
+#define mmDCP0_GAMUT_REMAP_C21_C22 0x1a5c
+#define mmDCP1_GAMUT_REMAP_C21_C22 0x1c5c
+#define mmDCP2_GAMUT_REMAP_C21_C22 0x1e5c
+#define mmDCP3_GAMUT_REMAP_C21_C22 0x405c
+#define mmDCP4_GAMUT_REMAP_C21_C22 0x425c
+#define mmDCP5_GAMUT_REMAP_C21_C22 0x445c
+#define mmGAMUT_REMAP_C23_C24 0x1a5d
+#define mmDCP0_GAMUT_REMAP_C23_C24 0x1a5d
+#define mmDCP1_GAMUT_REMAP_C23_C24 0x1c5d
+#define mmDCP2_GAMUT_REMAP_C23_C24 0x1e5d
+#define mmDCP3_GAMUT_REMAP_C23_C24 0x405d
+#define mmDCP4_GAMUT_REMAP_C23_C24 0x425d
+#define mmDCP5_GAMUT_REMAP_C23_C24 0x445d
+#define mmGAMUT_REMAP_C31_C32 0x1a5e
+#define mmDCP0_GAMUT_REMAP_C31_C32 0x1a5e
+#define mmDCP1_GAMUT_REMAP_C31_C32 0x1c5e
+#define mmDCP2_GAMUT_REMAP_C31_C32 0x1e5e
+#define mmDCP3_GAMUT_REMAP_C31_C32 0x405e
+#define mmDCP4_GAMUT_REMAP_C31_C32 0x425e
+#define mmDCP5_GAMUT_REMAP_C31_C32 0x445e
+#define mmGAMUT_REMAP_C33_C34 0x1a5f
+#define mmDCP0_GAMUT_REMAP_C33_C34 0x1a5f
+#define mmDCP1_GAMUT_REMAP_C33_C34 0x1c5f
+#define mmDCP2_GAMUT_REMAP_C33_C34 0x1e5f
+#define mmDCP3_GAMUT_REMAP_C33_C34 0x405f
+#define mmDCP4_GAMUT_REMAP_C33_C34 0x425f
+#define mmDCP5_GAMUT_REMAP_C33_C34 0x445f
+#define mmDCP_SPATIAL_DITHER_CNTL 0x1a60
+#define mmDCP0_DCP_SPATIAL_DITHER_CNTL 0x1a60
+#define mmDCP1_DCP_SPATIAL_DITHER_CNTL 0x1c60
+#define mmDCP2_DCP_SPATIAL_DITHER_CNTL 0x1e60
+#define mmDCP3_DCP_SPATIAL_DITHER_CNTL 0x4060
+#define mmDCP4_DCP_SPATIAL_DITHER_CNTL 0x4260
+#define mmDCP5_DCP_SPATIAL_DITHER_CNTL 0x4460
+#define mmDCP_RANDOM_SEEDS 0x1a61
+#define mmDCP0_DCP_RANDOM_SEEDS 0x1a61
+#define mmDCP1_DCP_RANDOM_SEEDS 0x1c61
+#define mmDCP2_DCP_RANDOM_SEEDS 0x1e61
+#define mmDCP3_DCP_RANDOM_SEEDS 0x4061
+#define mmDCP4_DCP_RANDOM_SEEDS 0x4261
+#define mmDCP5_DCP_RANDOM_SEEDS 0x4461
+#define mmDCP_FP_CONVERTED_FIELD 0x1a65
+#define mmDCP0_DCP_FP_CONVERTED_FIELD 0x1a65
+#define mmDCP1_DCP_FP_CONVERTED_FIELD 0x1c65
+#define mmDCP2_DCP_FP_CONVERTED_FIELD 0x1e65
+#define mmDCP3_DCP_FP_CONVERTED_FIELD 0x4065
+#define mmDCP4_DCP_FP_CONVERTED_FIELD 0x4265
+#define mmDCP5_DCP_FP_CONVERTED_FIELD 0x4465
+#define mmCUR_CONTROL 0x1a66
+#define mmDCP0_CUR_CONTROL 0x1a66
+#define mmDCP1_CUR_CONTROL 0x1c66
+#define mmDCP2_CUR_CONTROL 0x1e66
+#define mmDCP3_CUR_CONTROL 0x4066
+#define mmDCP4_CUR_CONTROL 0x4266
+#define mmDCP5_CUR_CONTROL 0x4466
+#define mmCUR_SURFACE_ADDRESS 0x1a67
+#define mmDCP0_CUR_SURFACE_ADDRESS 0x1a67
+#define mmDCP1_CUR_SURFACE_ADDRESS 0x1c67
+#define mmDCP2_CUR_SURFACE_ADDRESS 0x1e67
+#define mmDCP3_CUR_SURFACE_ADDRESS 0x4067
+#define mmDCP4_CUR_SURFACE_ADDRESS 0x4267
+#define mmDCP5_CUR_SURFACE_ADDRESS 0x4467
+#define mmCUR_SIZE 0x1a68
+#define mmDCP0_CUR_SIZE 0x1a68
+#define mmDCP1_CUR_SIZE 0x1c68
+#define mmDCP2_CUR_SIZE 0x1e68
+#define mmDCP3_CUR_SIZE 0x4068
+#define mmDCP4_CUR_SIZE 0x4268
+#define mmDCP5_CUR_SIZE 0x4468
+#define mmCUR_SURFACE_ADDRESS_HIGH 0x1a69
+#define mmDCP0_CUR_SURFACE_ADDRESS_HIGH 0x1a69
+#define mmDCP1_CUR_SURFACE_ADDRESS_HIGH 0x1c69
+#define mmDCP2_CUR_SURFACE_ADDRESS_HIGH 0x1e69
+#define mmDCP3_CUR_SURFACE_ADDRESS_HIGH 0x4069
+#define mmDCP4_CUR_SURFACE_ADDRESS_HIGH 0x4269
+#define mmDCP5_CUR_SURFACE_ADDRESS_HIGH 0x4469
+#define mmCUR_POSITION 0x1a6a
+#define mmDCP0_CUR_POSITION 0x1a6a
+#define mmDCP1_CUR_POSITION 0x1c6a
+#define mmDCP2_CUR_POSITION 0x1e6a
+#define mmDCP3_CUR_POSITION 0x406a
+#define mmDCP4_CUR_POSITION 0x426a
+#define mmDCP5_CUR_POSITION 0x446a
+#define mmCUR_HOT_SPOT 0x1a6b
+#define mmDCP0_CUR_HOT_SPOT 0x1a6b
+#define mmDCP1_CUR_HOT_SPOT 0x1c6b
+#define mmDCP2_CUR_HOT_SPOT 0x1e6b
+#define mmDCP3_CUR_HOT_SPOT 0x406b
+#define mmDCP4_CUR_HOT_SPOT 0x426b
+#define mmDCP5_CUR_HOT_SPOT 0x446b
+#define mmCUR_COLOR1 0x1a6c
+#define mmDCP0_CUR_COLOR1 0x1a6c
+#define mmDCP1_CUR_COLOR1 0x1c6c
+#define mmDCP2_CUR_COLOR1 0x1e6c
+#define mmDCP3_CUR_COLOR1 0x406c
+#define mmDCP4_CUR_COLOR1 0x426c
+#define mmDCP5_CUR_COLOR1 0x446c
+#define mmCUR_COLOR2 0x1a6d
+#define mmDCP0_CUR_COLOR2 0x1a6d
+#define mmDCP1_CUR_COLOR2 0x1c6d
+#define mmDCP2_CUR_COLOR2 0x1e6d
+#define mmDCP3_CUR_COLOR2 0x406d
+#define mmDCP4_CUR_COLOR2 0x426d
+#define mmDCP5_CUR_COLOR2 0x446d
+#define mmCUR_UPDATE 0x1a6e
+#define mmDCP0_CUR_UPDATE 0x1a6e
+#define mmDCP1_CUR_UPDATE 0x1c6e
+#define mmDCP2_CUR_UPDATE 0x1e6e
+#define mmDCP3_CUR_UPDATE 0x406e
+#define mmDCP4_CUR_UPDATE 0x426e
+#define mmDCP5_CUR_UPDATE 0x446e
+#define mmCUR_REQUEST_FILTER_CNTL 0x1a99
+#define mmDCP0_CUR_REQUEST_FILTER_CNTL 0x1a99
+#define mmDCP1_CUR_REQUEST_FILTER_CNTL 0x1c99
+#define mmDCP2_CUR_REQUEST_FILTER_CNTL 0x1e99
+#define mmDCP3_CUR_REQUEST_FILTER_CNTL 0x4099
+#define mmDCP4_CUR_REQUEST_FILTER_CNTL 0x4299
+#define mmDCP5_CUR_REQUEST_FILTER_CNTL 0x4499
+#define mmCUR_STEREO_CONTROL 0x1a9a
+#define mmDCP0_CUR_STEREO_CONTROL 0x1a9a
+#define mmDCP1_CUR_STEREO_CONTROL 0x1c9a
+#define mmDCP2_CUR_STEREO_CONTROL 0x1e9a
+#define mmDCP3_CUR_STEREO_CONTROL 0x409a
+#define mmDCP4_CUR_STEREO_CONTROL 0x429a
+#define mmDCP5_CUR_STEREO_CONTROL 0x449a
+#define mmDC_LUT_RW_MODE 0x1a78
+#define mmDCP0_DC_LUT_RW_MODE 0x1a78
+#define mmDCP1_DC_LUT_RW_MODE 0x1c78
+#define mmDCP2_DC_LUT_RW_MODE 0x1e78
+#define mmDCP3_DC_LUT_RW_MODE 0x4078
+#define mmDCP4_DC_LUT_RW_MODE 0x4278
+#define mmDCP5_DC_LUT_RW_MODE 0x4478
+#define mmDC_LUT_RW_INDEX 0x1a79
+#define mmDCP0_DC_LUT_RW_INDEX 0x1a79
+#define mmDCP1_DC_LUT_RW_INDEX 0x1c79
+#define mmDCP2_DC_LUT_RW_INDEX 0x1e79
+#define mmDCP3_DC_LUT_RW_INDEX 0x4079
+#define mmDCP4_DC_LUT_RW_INDEX 0x4279
+#define mmDCP5_DC_LUT_RW_INDEX 0x4479
+#define mmDC_LUT_SEQ_COLOR 0x1a7a
+#define mmDCP0_DC_LUT_SEQ_COLOR 0x1a7a
+#define mmDCP1_DC_LUT_SEQ_COLOR 0x1c7a
+#define mmDCP2_DC_LUT_SEQ_COLOR 0x1e7a
+#define mmDCP3_DC_LUT_SEQ_COLOR 0x407a
+#define mmDCP4_DC_LUT_SEQ_COLOR 0x427a
+#define mmDCP5_DC_LUT_SEQ_COLOR 0x447a
+#define mmDC_LUT_PWL_DATA 0x1a7b
+#define mmDCP0_DC_LUT_PWL_DATA 0x1a7b
+#define mmDCP1_DC_LUT_PWL_DATA 0x1c7b
+#define mmDCP2_DC_LUT_PWL_DATA 0x1e7b
+#define mmDCP3_DC_LUT_PWL_DATA 0x407b
+#define mmDCP4_DC_LUT_PWL_DATA 0x427b
+#define mmDCP5_DC_LUT_PWL_DATA 0x447b
+#define mmDC_LUT_30_COLOR 0x1a7c
+#define mmDCP0_DC_LUT_30_COLOR 0x1a7c
+#define mmDCP1_DC_LUT_30_COLOR 0x1c7c
+#define mmDCP2_DC_LUT_30_COLOR 0x1e7c
+#define mmDCP3_DC_LUT_30_COLOR 0x407c
+#define mmDCP4_DC_LUT_30_COLOR 0x427c
+#define mmDCP5_DC_LUT_30_COLOR 0x447c
+#define mmDC_LUT_VGA_ACCESS_ENABLE 0x1a7d
+#define mmDCP0_DC_LUT_VGA_ACCESS_ENABLE 0x1a7d
+#define mmDCP1_DC_LUT_VGA_ACCESS_ENABLE 0x1c7d
+#define mmDCP2_DC_LUT_VGA_ACCESS_ENABLE 0x1e7d
+#define mmDCP3_DC_LUT_VGA_ACCESS_ENABLE 0x407d
+#define mmDCP4_DC_LUT_VGA_ACCESS_ENABLE 0x427d
+#define mmDCP5_DC_LUT_VGA_ACCESS_ENABLE 0x447d
+#define mmDC_LUT_WRITE_EN_MASK 0x1a7e
+#define mmDCP0_DC_LUT_WRITE_EN_MASK 0x1a7e
+#define mmDCP1_DC_LUT_WRITE_EN_MASK 0x1c7e
+#define mmDCP2_DC_LUT_WRITE_EN_MASK 0x1e7e
+#define mmDCP3_DC_LUT_WRITE_EN_MASK 0x407e
+#define mmDCP4_DC_LUT_WRITE_EN_MASK 0x427e
+#define mmDCP5_DC_LUT_WRITE_EN_MASK 0x447e
+#define mmDC_LUT_AUTOFILL 0x1a7f
+#define mmDCP0_DC_LUT_AUTOFILL 0x1a7f
+#define mmDCP1_DC_LUT_AUTOFILL 0x1c7f
+#define mmDCP2_DC_LUT_AUTOFILL 0x1e7f
+#define mmDCP3_DC_LUT_AUTOFILL 0x407f
+#define mmDCP4_DC_LUT_AUTOFILL 0x427f
+#define mmDCP5_DC_LUT_AUTOFILL 0x447f
+#define mmDC_LUT_CONTROL 0x1a80
+#define mmDCP0_DC_LUT_CONTROL 0x1a80
+#define mmDCP1_DC_LUT_CONTROL 0x1c80
+#define mmDCP2_DC_LUT_CONTROL 0x1e80
+#define mmDCP3_DC_LUT_CONTROL 0x4080
+#define mmDCP4_DC_LUT_CONTROL 0x4280
+#define mmDCP5_DC_LUT_CONTROL 0x4480
+#define mmDC_LUT_BLACK_OFFSET_BLUE 0x1a81
+#define mmDCP0_DC_LUT_BLACK_OFFSET_BLUE 0x1a81
+#define mmDCP1_DC_LUT_BLACK_OFFSET_BLUE 0x1c81
+#define mmDCP2_DC_LUT_BLACK_OFFSET_BLUE 0x1e81
+#define mmDCP3_DC_LUT_BLACK_OFFSET_BLUE 0x4081
+#define mmDCP4_DC_LUT_BLACK_OFFSET_BLUE 0x4281
+#define mmDCP5_DC_LUT_BLACK_OFFSET_BLUE 0x4481
+#define mmDC_LUT_BLACK_OFFSET_GREEN 0x1a82
+#define mmDCP0_DC_LUT_BLACK_OFFSET_GREEN 0x1a82
+#define mmDCP1_DC_LUT_BLACK_OFFSET_GREEN 0x1c82
+#define mmDCP2_DC_LUT_BLACK_OFFSET_GREEN 0x1e82
+#define mmDCP3_DC_LUT_BLACK_OFFSET_GREEN 0x4082
+#define mmDCP4_DC_LUT_BLACK_OFFSET_GREEN 0x4282
+#define mmDCP5_DC_LUT_BLACK_OFFSET_GREEN 0x4482
+#define mmDC_LUT_BLACK_OFFSET_RED 0x1a83
+#define mmDCP0_DC_LUT_BLACK_OFFSET_RED 0x1a83
+#define mmDCP1_DC_LUT_BLACK_OFFSET_RED 0x1c83
+#define mmDCP2_DC_LUT_BLACK_OFFSET_RED 0x1e83
+#define mmDCP3_DC_LUT_BLACK_OFFSET_RED 0x4083
+#define mmDCP4_DC_LUT_BLACK_OFFSET_RED 0x4283
+#define mmDCP5_DC_LUT_BLACK_OFFSET_RED 0x4483
+#define mmDC_LUT_WHITE_OFFSET_BLUE 0x1a84
+#define mmDCP0_DC_LUT_WHITE_OFFSET_BLUE 0x1a84
+#define mmDCP1_DC_LUT_WHITE_OFFSET_BLUE 0x1c84
+#define mmDCP2_DC_LUT_WHITE_OFFSET_BLUE 0x1e84
+#define mmDCP3_DC_LUT_WHITE_OFFSET_BLUE 0x4084
+#define mmDCP4_DC_LUT_WHITE_OFFSET_BLUE 0x4284
+#define mmDCP5_DC_LUT_WHITE_OFFSET_BLUE 0x4484
+#define mmDC_LUT_WHITE_OFFSET_GREEN 0x1a85
+#define mmDCP0_DC_LUT_WHITE_OFFSET_GREEN 0x1a85
+#define mmDCP1_DC_LUT_WHITE_OFFSET_GREEN 0x1c85
+#define mmDCP2_DC_LUT_WHITE_OFFSET_GREEN 0x1e85
+#define mmDCP3_DC_LUT_WHITE_OFFSET_GREEN 0x4085
+#define mmDCP4_DC_LUT_WHITE_OFFSET_GREEN 0x4285
+#define mmDCP5_DC_LUT_WHITE_OFFSET_GREEN 0x4485
+#define mmDC_LUT_WHITE_OFFSET_RED 0x1a86
+#define mmDCP0_DC_LUT_WHITE_OFFSET_RED 0x1a86
+#define mmDCP1_DC_LUT_WHITE_OFFSET_RED 0x1c86
+#define mmDCP2_DC_LUT_WHITE_OFFSET_RED 0x1e86
+#define mmDCP3_DC_LUT_WHITE_OFFSET_RED 0x4086
+#define mmDCP4_DC_LUT_WHITE_OFFSET_RED 0x4286
+#define mmDCP5_DC_LUT_WHITE_OFFSET_RED 0x4486
+#define mmDCP_CRC_CONTROL 0x1a87
+#define mmDCP0_DCP_CRC_CONTROL 0x1a87
+#define mmDCP1_DCP_CRC_CONTROL 0x1c87
+#define mmDCP2_DCP_CRC_CONTROL 0x1e87
+#define mmDCP3_DCP_CRC_CONTROL 0x4087
+#define mmDCP4_DCP_CRC_CONTROL 0x4287
+#define mmDCP5_DCP_CRC_CONTROL 0x4487
+#define mmDCP_CRC_MASK 0x1a88
+#define mmDCP0_DCP_CRC_MASK 0x1a88
+#define mmDCP1_DCP_CRC_MASK 0x1c88
+#define mmDCP2_DCP_CRC_MASK 0x1e88
+#define mmDCP3_DCP_CRC_MASK 0x4088
+#define mmDCP4_DCP_CRC_MASK 0x4288
+#define mmDCP5_DCP_CRC_MASK 0x4488
+#define mmDCP_CRC_CURRENT 0x1a89
+#define mmDCP0_DCP_CRC_CURRENT 0x1a89
+#define mmDCP1_DCP_CRC_CURRENT 0x1c89
+#define mmDCP2_DCP_CRC_CURRENT 0x1e89
+#define mmDCP3_DCP_CRC_CURRENT 0x4089
+#define mmDCP4_DCP_CRC_CURRENT 0x4289
+#define mmDCP5_DCP_CRC_CURRENT 0x4489
+#define mmDVMM_PTE_CONTROL 0x1a8a
+#define mmDCP0_DVMM_PTE_CONTROL 0x1a8a
+#define mmDCP1_DVMM_PTE_CONTROL 0x1c8a
+#define mmDCP2_DVMM_PTE_CONTROL 0x1e8a
+#define mmDCP3_DVMM_PTE_CONTROL 0x408a
+#define mmDCP4_DVMM_PTE_CONTROL 0x428a
+#define mmDCP5_DVMM_PTE_CONTROL 0x448a
+#define mmDCP_CRC_LAST 0x1a8b
+#define mmDCP0_DCP_CRC_LAST 0x1a8b
+#define mmDCP1_DCP_CRC_LAST 0x1c8b
+#define mmDCP2_DCP_CRC_LAST 0x1e8b
+#define mmDCP3_DCP_CRC_LAST 0x408b
+#define mmDCP4_DCP_CRC_LAST 0x428b
+#define mmDCP5_DCP_CRC_LAST 0x448b
+#define mmDCP_DEBUG 0x1a8d
+#define mmDCP0_DCP_DEBUG 0x1a8d
+#define mmDCP1_DCP_DEBUG 0x1c8d
+#define mmDCP2_DCP_DEBUG 0x1e8d
+#define mmDCP3_DCP_DEBUG 0x408d
+#define mmDCP4_DCP_DEBUG 0x428d
+#define mmDCP5_DCP_DEBUG 0x448d
+#define mmGRPH_FLIP_RATE_CNTL 0x1a8e
+#define mmDCP0_GRPH_FLIP_RATE_CNTL 0x1a8e
+#define mmDCP1_GRPH_FLIP_RATE_CNTL 0x1c8e
+#define mmDCP2_GRPH_FLIP_RATE_CNTL 0x1e8e
+#define mmDCP3_GRPH_FLIP_RATE_CNTL 0x408e
+#define mmDCP4_GRPH_FLIP_RATE_CNTL 0x428e
+#define mmDCP5_GRPH_FLIP_RATE_CNTL 0x448e
+#define mmDCP_GSL_CONTROL 0x1a90
+#define mmDCP0_DCP_GSL_CONTROL 0x1a90
+#define mmDCP1_DCP_GSL_CONTROL 0x1c90
+#define mmDCP2_DCP_GSL_CONTROL 0x1e90
+#define mmDCP3_DCP_GSL_CONTROL 0x4090
+#define mmDCP4_DCP_GSL_CONTROL 0x4290
+#define mmDCP5_DCP_GSL_CONTROL 0x4490
+#define mmDCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1a91
+#define mmDCP0_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1a91
+#define mmDCP1_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1c91
+#define mmDCP2_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x1e91
+#define mmDCP3_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4091
+#define mmDCP4_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4291
+#define mmDCP5_DCP_LB_DATA_GAP_BETWEEN_CHUNK 0x4491
+#define mmDCP_DEBUG_SG 0x1a92
+#define mmDCP0_DCP_DEBUG_SG 0x1a92
+#define mmDCP1_DCP_DEBUG_SG 0x1c92
+#define mmDCP2_DCP_DEBUG_SG 0x1e92
+#define mmDCP3_DCP_DEBUG_SG 0x4092
+#define mmDCP4_DCP_DEBUG_SG 0x4292
+#define mmDCP5_DCP_DEBUG_SG 0x4492
+#define mmDCP_DEBUG_SG2 0x1a94
+#define mmDCP0_DCP_DEBUG_SG2 0x1a94
+#define mmDCP1_DCP_DEBUG_SG2 0x1c94
+#define mmDCP2_DCP_DEBUG_SG2 0x1e94
+#define mmDCP3_DCP_DEBUG_SG2 0x4094
+#define mmDCP4_DCP_DEBUG_SG2 0x4294
+#define mmDCP5_DCP_DEBUG_SG2 0x4494
+#define mmDCP_DVMM_DEBUG 0x1a93
+#define mmDCP0_DCP_DVMM_DEBUG 0x1a93
+#define mmDCP1_DCP_DVMM_DEBUG 0x1c93
+#define mmDCP2_DCP_DVMM_DEBUG 0x1e93
+#define mmDCP3_DCP_DVMM_DEBUG 0x4093
+#define mmDCP4_DCP_DVMM_DEBUG 0x4293
+#define mmDCP5_DCP_DVMM_DEBUG 0x4493
+#define mmDCP_TEST_DEBUG_INDEX 0x1a95
+#define mmDCP0_DCP_TEST_DEBUG_INDEX 0x1a95
+#define mmDCP1_DCP_TEST_DEBUG_INDEX 0x1c95
+#define mmDCP2_DCP_TEST_DEBUG_INDEX 0x1e95
+#define mmDCP3_DCP_TEST_DEBUG_INDEX 0x4095
+#define mmDCP4_DCP_TEST_DEBUG_INDEX 0x4295
+#define mmDCP5_DCP_TEST_DEBUG_INDEX 0x4495
+#define mmDCP_TEST_DEBUG_DATA 0x1a96
+#define mmDCP0_DCP_TEST_DEBUG_DATA 0x1a96
+#define mmDCP1_DCP_TEST_DEBUG_DATA 0x1c96
+#define mmDCP2_DCP_TEST_DEBUG_DATA 0x1e96
+#define mmDCP3_DCP_TEST_DEBUG_DATA 0x4096
+#define mmDCP4_DCP_TEST_DEBUG_DATA 0x4296
+#define mmDCP5_DCP_TEST_DEBUG_DATA 0x4496
+#define mmGRPH_STEREOSYNC_FLIP 0x1a97
+#define mmDCP0_GRPH_STEREOSYNC_FLIP 0x1a97
+#define mmDCP1_GRPH_STEREOSYNC_FLIP 0x1c97
+#define mmDCP2_GRPH_STEREOSYNC_FLIP 0x1e97
+#define mmDCP3_GRPH_STEREOSYNC_FLIP 0x4097
+#define mmDCP4_GRPH_STEREOSYNC_FLIP 0x4297
+#define mmDCP5_GRPH_STEREOSYNC_FLIP 0x4497
+#define mmDCP_DEBUG2 0x1a98
+#define mmDCP0_DCP_DEBUG2 0x1a98
+#define mmDCP1_DCP_DEBUG2 0x1c98
+#define mmDCP2_DCP_DEBUG2 0x1e98
+#define mmDCP3_DCP_DEBUG2 0x4098
+#define mmDCP4_DCP_DEBUG2 0x4298
+#define mmDCP5_DCP_DEBUG2 0x4498
+#define mmHW_ROTATION 0x1a9e
+#define mmDCP0_HW_ROTATION 0x1a9e
+#define mmDCP1_HW_ROTATION 0x1c9e
+#define mmDCP2_HW_ROTATION 0x1e9e
+#define mmDCP3_HW_ROTATION 0x409e
+#define mmDCP4_HW_ROTATION 0x429e
+#define mmDCP5_HW_ROTATION 0x449e
+#define mmGRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL 0x1a9f
+#define mmDCP0_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL 0x1a9f
+#define mmDCP1_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL 0x1c9f
+#define mmDCP2_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL 0x1e9f
+#define mmDCP3_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL 0x409f
+#define mmDCP4_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL 0x429f
+#define mmDCP5_GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL 0x449f
+#define mmREGAMMA_CONTROL 0x1aa0
+#define mmDCP0_REGAMMA_CONTROL 0x1aa0
+#define mmDCP1_REGAMMA_CONTROL 0x1ca0
+#define mmDCP2_REGAMMA_CONTROL 0x1ea0
+#define mmDCP3_REGAMMA_CONTROL 0x40a0
+#define mmDCP4_REGAMMA_CONTROL 0x42a0
+#define mmDCP5_REGAMMA_CONTROL 0x44a0
+#define mmREGAMMA_LUT_INDEX 0x1aa1
+#define mmDCP0_REGAMMA_LUT_INDEX 0x1aa1
+#define mmDCP1_REGAMMA_LUT_INDEX 0x1ca1
+#define mmDCP2_REGAMMA_LUT_INDEX 0x1ea1
+#define mmDCP3_REGAMMA_LUT_INDEX 0x40a1
+#define mmDCP4_REGAMMA_LUT_INDEX 0x42a1
+#define mmDCP5_REGAMMA_LUT_INDEX 0x44a1
+#define mmREGAMMA_LUT_DATA 0x1aa2
+#define mmDCP0_REGAMMA_LUT_DATA 0x1aa2
+#define mmDCP1_REGAMMA_LUT_DATA 0x1ca2
+#define mmDCP2_REGAMMA_LUT_DATA 0x1ea2
+#define mmDCP3_REGAMMA_LUT_DATA 0x40a2
+#define mmDCP4_REGAMMA_LUT_DATA 0x42a2
+#define mmDCP5_REGAMMA_LUT_DATA 0x44a2
+#define mmREGAMMA_LUT_WRITE_EN_MASK 0x1aa3
+#define mmDCP0_REGAMMA_LUT_WRITE_EN_MASK 0x1aa3
+#define mmDCP1_REGAMMA_LUT_WRITE_EN_MASK 0x1ca3
+#define mmDCP2_REGAMMA_LUT_WRITE_EN_MASK 0x1ea3
+#define mmDCP3_REGAMMA_LUT_WRITE_EN_MASK 0x40a3
+#define mmDCP4_REGAMMA_LUT_WRITE_EN_MASK 0x42a3
+#define mmDCP5_REGAMMA_LUT_WRITE_EN_MASK 0x44a3
+#define mmREGAMMA_CNTLA_START_CNTL 0x1aa4
+#define mmDCP0_REGAMMA_CNTLA_START_CNTL 0x1aa4
+#define mmDCP1_REGAMMA_CNTLA_START_CNTL 0x1ca4
+#define mmDCP2_REGAMMA_CNTLA_START_CNTL 0x1ea4
+#define mmDCP3_REGAMMA_CNTLA_START_CNTL 0x40a4
+#define mmDCP4_REGAMMA_CNTLA_START_CNTL 0x42a4
+#define mmDCP5_REGAMMA_CNTLA_START_CNTL 0x44a4
+#define mmREGAMMA_CNTLA_SLOPE_CNTL 0x1aa5
+#define mmDCP0_REGAMMA_CNTLA_SLOPE_CNTL 0x1aa5
+#define mmDCP1_REGAMMA_CNTLA_SLOPE_CNTL 0x1ca5
+#define mmDCP2_REGAMMA_CNTLA_SLOPE_CNTL 0x1ea5
+#define mmDCP3_REGAMMA_CNTLA_SLOPE_CNTL 0x40a5
+#define mmDCP4_REGAMMA_CNTLA_SLOPE_CNTL 0x42a5
+#define mmDCP5_REGAMMA_CNTLA_SLOPE_CNTL 0x44a5
+#define mmREGAMMA_CNTLA_END_CNTL1 0x1aa6
+#define mmDCP0_REGAMMA_CNTLA_END_CNTL1 0x1aa6
+#define mmDCP1_REGAMMA_CNTLA_END_CNTL1 0x1ca6
+#define mmDCP2_REGAMMA_CNTLA_END_CNTL1 0x1ea6
+#define mmDCP3_REGAMMA_CNTLA_END_CNTL1 0x40a6
+#define mmDCP4_REGAMMA_CNTLA_END_CNTL1 0x42a6
+#define mmDCP5_REGAMMA_CNTLA_END_CNTL1 0x44a6
+#define mmREGAMMA_CNTLA_END_CNTL2 0x1aa7
+#define mmDCP0_REGAMMA_CNTLA_END_CNTL2 0x1aa7
+#define mmDCP1_REGAMMA_CNTLA_END_CNTL2 0x1ca7
+#define mmDCP2_REGAMMA_CNTLA_END_CNTL2 0x1ea7
+#define mmDCP3_REGAMMA_CNTLA_END_CNTL2 0x40a7
+#define mmDCP4_REGAMMA_CNTLA_END_CNTL2 0x42a7
+#define mmDCP5_REGAMMA_CNTLA_END_CNTL2 0x44a7
+#define mmREGAMMA_CNTLA_REGION_0_1 0x1aa8
+#define mmDCP0_REGAMMA_CNTLA_REGION_0_1 0x1aa8
+#define mmDCP1_REGAMMA_CNTLA_REGION_0_1 0x1ca8
+#define mmDCP2_REGAMMA_CNTLA_REGION_0_1 0x1ea8
+#define mmDCP3_REGAMMA_CNTLA_REGION_0_1 0x40a8
+#define mmDCP4_REGAMMA_CNTLA_REGION_0_1 0x42a8
+#define mmDCP5_REGAMMA_CNTLA_REGION_0_1 0x44a8
+#define mmREGAMMA_CNTLA_REGION_2_3 0x1aa9
+#define mmDCP0_REGAMMA_CNTLA_REGION_2_3 0x1aa9
+#define mmDCP1_REGAMMA_CNTLA_REGION_2_3 0x1ca9
+#define mmDCP2_REGAMMA_CNTLA_REGION_2_3 0x1ea9
+#define mmDCP3_REGAMMA_CNTLA_REGION_2_3 0x40a9
+#define mmDCP4_REGAMMA_CNTLA_REGION_2_3 0x42a9
+#define mmDCP5_REGAMMA_CNTLA_REGION_2_3 0x44a9
+#define mmREGAMMA_CNTLA_REGION_4_5 0x1aaa
+#define mmDCP0_REGAMMA_CNTLA_REGION_4_5 0x1aaa
+#define mmDCP1_REGAMMA_CNTLA_REGION_4_5 0x1caa
+#define mmDCP2_REGAMMA_CNTLA_REGION_4_5 0x1eaa
+#define mmDCP3_REGAMMA_CNTLA_REGION_4_5 0x40aa
+#define mmDCP4_REGAMMA_CNTLA_REGION_4_5 0x42aa
+#define mmDCP5_REGAMMA_CNTLA_REGION_4_5 0x44aa
+#define mmREGAMMA_CNTLA_REGION_6_7 0x1aab
+#define mmDCP0_REGAMMA_CNTLA_REGION_6_7 0x1aab
+#define mmDCP1_REGAMMA_CNTLA_REGION_6_7 0x1cab
+#define mmDCP2_REGAMMA_CNTLA_REGION_6_7 0x1eab
+#define mmDCP3_REGAMMA_CNTLA_REGION_6_7 0x40ab
+#define mmDCP4_REGAMMA_CNTLA_REGION_6_7 0x42ab
+#define mmDCP5_REGAMMA_CNTLA_REGION_6_7 0x44ab
+#define mmREGAMMA_CNTLA_REGION_8_9 0x1aac
+#define mmDCP0_REGAMMA_CNTLA_REGION_8_9 0x1aac
+#define mmDCP1_REGAMMA_CNTLA_REGION_8_9 0x1cac
+#define mmDCP2_REGAMMA_CNTLA_REGION_8_9 0x1eac
+#define mmDCP3_REGAMMA_CNTLA_REGION_8_9 0x40ac
+#define mmDCP4_REGAMMA_CNTLA_REGION_8_9 0x42ac
+#define mmDCP5_REGAMMA_CNTLA_REGION_8_9 0x44ac
+#define mmREGAMMA_CNTLA_REGION_10_11 0x1aad
+#define mmDCP0_REGAMMA_CNTLA_REGION_10_11 0x1aad
+#define mmDCP1_REGAMMA_CNTLA_REGION_10_11 0x1cad
+#define mmDCP2_REGAMMA_CNTLA_REGION_10_11 0x1ead
+#define mmDCP3_REGAMMA_CNTLA_REGION_10_11 0x40ad
+#define mmDCP4_REGAMMA_CNTLA_REGION_10_11 0x42ad
+#define mmDCP5_REGAMMA_CNTLA_REGION_10_11 0x44ad
+#define mmREGAMMA_CNTLA_REGION_12_13 0x1aae
+#define mmDCP0_REGAMMA_CNTLA_REGION_12_13 0x1aae
+#define mmDCP1_REGAMMA_CNTLA_REGION_12_13 0x1cae
+#define mmDCP2_REGAMMA_CNTLA_REGION_12_13 0x1eae
+#define mmDCP3_REGAMMA_CNTLA_REGION_12_13 0x40ae
+#define mmDCP4_REGAMMA_CNTLA_REGION_12_13 0x42ae
+#define mmDCP5_REGAMMA_CNTLA_REGION_12_13 0x44ae
+#define mmREGAMMA_CNTLA_REGION_14_15 0x1aaf
+#define mmDCP0_REGAMMA_CNTLA_REGION_14_15 0x1aaf
+#define mmDCP1_REGAMMA_CNTLA_REGION_14_15 0x1caf
+#define mmDCP2_REGAMMA_CNTLA_REGION_14_15 0x1eaf
+#define mmDCP3_REGAMMA_CNTLA_REGION_14_15 0x40af
+#define mmDCP4_REGAMMA_CNTLA_REGION_14_15 0x42af
+#define mmDCP5_REGAMMA_CNTLA_REGION_14_15 0x44af
+#define mmREGAMMA_CNTLB_START_CNTL 0x1ab0
+#define mmDCP0_REGAMMA_CNTLB_START_CNTL 0x1ab0
+#define mmDCP1_REGAMMA_CNTLB_START_CNTL 0x1cb0
+#define mmDCP2_REGAMMA_CNTLB_START_CNTL 0x1eb0
+#define mmDCP3_REGAMMA_CNTLB_START_CNTL 0x40b0
+#define mmDCP4_REGAMMA_CNTLB_START_CNTL 0x42b0
+#define mmDCP5_REGAMMA_CNTLB_START_CNTL 0x44b0
+#define mmREGAMMA_CNTLB_SLOPE_CNTL 0x1ab1
+#define mmDCP0_REGAMMA_CNTLB_SLOPE_CNTL 0x1ab1
+#define mmDCP1_REGAMMA_CNTLB_SLOPE_CNTL 0x1cb1
+#define mmDCP2_REGAMMA_CNTLB_SLOPE_CNTL 0x1eb1
+#define mmDCP3_REGAMMA_CNTLB_SLOPE_CNTL 0x40b1
+#define mmDCP4_REGAMMA_CNTLB_SLOPE_CNTL 0x42b1
+#define mmDCP5_REGAMMA_CNTLB_SLOPE_CNTL 0x44b1
+#define mmREGAMMA_CNTLB_END_CNTL1 0x1ab2
+#define mmDCP0_REGAMMA_CNTLB_END_CNTL1 0x1ab2
+#define mmDCP1_REGAMMA_CNTLB_END_CNTL1 0x1cb2
+#define mmDCP2_REGAMMA_CNTLB_END_CNTL1 0x1eb2
+#define mmDCP3_REGAMMA_CNTLB_END_CNTL1 0x40b2
+#define mmDCP4_REGAMMA_CNTLB_END_CNTL1 0x42b2
+#define mmDCP5_REGAMMA_CNTLB_END_CNTL1 0x44b2
+#define mmREGAMMA_CNTLB_END_CNTL2 0x1ab3
+#define mmDCP0_REGAMMA_CNTLB_END_CNTL2 0x1ab3
+#define mmDCP1_REGAMMA_CNTLB_END_CNTL2 0x1cb3
+#define mmDCP2_REGAMMA_CNTLB_END_CNTL2 0x1eb3
+#define mmDCP3_REGAMMA_CNTLB_END_CNTL2 0x40b3
+#define mmDCP4_REGAMMA_CNTLB_END_CNTL2 0x42b3
+#define mmDCP5_REGAMMA_CNTLB_END_CNTL2 0x44b3
+#define mmREGAMMA_CNTLB_REGION_0_1 0x1ab4
+#define mmDCP0_REGAMMA_CNTLB_REGION_0_1 0x1ab4
+#define mmDCP1_REGAMMA_CNTLB_REGION_0_1 0x1cb4
+#define mmDCP2_REGAMMA_CNTLB_REGION_0_1 0x1eb4
+#define mmDCP3_REGAMMA_CNTLB_REGION_0_1 0x40b4
+#define mmDCP4_REGAMMA_CNTLB_REGION_0_1 0x42b4
+#define mmDCP5_REGAMMA_CNTLB_REGION_0_1 0x44b4
+#define mmREGAMMA_CNTLB_REGION_2_3 0x1ab5
+#define mmDCP0_REGAMMA_CNTLB_REGION_2_3 0x1ab5
+#define mmDCP1_REGAMMA_CNTLB_REGION_2_3 0x1cb5
+#define mmDCP2_REGAMMA_CNTLB_REGION_2_3 0x1eb5
+#define mmDCP3_REGAMMA_CNTLB_REGION_2_3 0x40b5
+#define mmDCP4_REGAMMA_CNTLB_REGION_2_3 0x42b5
+#define mmDCP5_REGAMMA_CNTLB_REGION_2_3 0x44b5
+#define mmREGAMMA_CNTLB_REGION_4_5 0x1ab6
+#define mmDCP0_REGAMMA_CNTLB_REGION_4_5 0x1ab6
+#define mmDCP1_REGAMMA_CNTLB_REGION_4_5 0x1cb6
+#define mmDCP2_REGAMMA_CNTLB_REGION_4_5 0x1eb6
+#define mmDCP3_REGAMMA_CNTLB_REGION_4_5 0x40b6
+#define mmDCP4_REGAMMA_CNTLB_REGION_4_5 0x42b6
+#define mmDCP5_REGAMMA_CNTLB_REGION_4_5 0x44b6
+#define mmREGAMMA_CNTLB_REGION_6_7 0x1ab7
+#define mmDCP0_REGAMMA_CNTLB_REGION_6_7 0x1ab7
+#define mmDCP1_REGAMMA_CNTLB_REGION_6_7 0x1cb7
+#define mmDCP2_REGAMMA_CNTLB_REGION_6_7 0x1eb7
+#define mmDCP3_REGAMMA_CNTLB_REGION_6_7 0x40b7
+#define mmDCP4_REGAMMA_CNTLB_REGION_6_7 0x42b7
+#define mmDCP5_REGAMMA_CNTLB_REGION_6_7 0x44b7
+#define mmREGAMMA_CNTLB_REGION_8_9 0x1ab8
+#define mmDCP0_REGAMMA_CNTLB_REGION_8_9 0x1ab8
+#define mmDCP1_REGAMMA_CNTLB_REGION_8_9 0x1cb8
+#define mmDCP2_REGAMMA_CNTLB_REGION_8_9 0x1eb8
+#define mmDCP3_REGAMMA_CNTLB_REGION_8_9 0x40b8
+#define mmDCP4_REGAMMA_CNTLB_REGION_8_9 0x42b8
+#define mmDCP5_REGAMMA_CNTLB_REGION_8_9 0x44b8
+#define mmREGAMMA_CNTLB_REGION_10_11 0x1ab9
+#define mmDCP0_REGAMMA_CNTLB_REGION_10_11 0x1ab9
+#define mmDCP1_REGAMMA_CNTLB_REGION_10_11 0x1cb9
+#define mmDCP2_REGAMMA_CNTLB_REGION_10_11 0x1eb9
+#define mmDCP3_REGAMMA_CNTLB_REGION_10_11 0x40b9
+#define mmDCP4_REGAMMA_CNTLB_REGION_10_11 0x42b9
+#define mmDCP5_REGAMMA_CNTLB_REGION_10_11 0x44b9
+#define mmREGAMMA_CNTLB_REGION_12_13 0x1aba
+#define mmDCP0_REGAMMA_CNTLB_REGION_12_13 0x1aba
+#define mmDCP1_REGAMMA_CNTLB_REGION_12_13 0x1cba
+#define mmDCP2_REGAMMA_CNTLB_REGION_12_13 0x1eba
+#define mmDCP3_REGAMMA_CNTLB_REGION_12_13 0x40ba
+#define mmDCP4_REGAMMA_CNTLB_REGION_12_13 0x42ba
+#define mmDCP5_REGAMMA_CNTLB_REGION_12_13 0x44ba
+#define mmREGAMMA_CNTLB_REGION_14_15 0x1abb
+#define mmDCP0_REGAMMA_CNTLB_REGION_14_15 0x1abb
+#define mmDCP1_REGAMMA_CNTLB_REGION_14_15 0x1cbb
+#define mmDCP2_REGAMMA_CNTLB_REGION_14_15 0x1ebb
+#define mmDCP3_REGAMMA_CNTLB_REGION_14_15 0x40bb
+#define mmDCP4_REGAMMA_CNTLB_REGION_14_15 0x42bb
+#define mmDCP5_REGAMMA_CNTLB_REGION_14_15 0x44bb
+#define mmALPHA_CONTROL 0x1abc
+#define mmDCP0_ALPHA_CONTROL 0x1abc
+#define mmDCP1_ALPHA_CONTROL 0x1cbc
+#define mmDCP2_ALPHA_CONTROL 0x1ebc
+#define mmDCP3_ALPHA_CONTROL 0x40bc
+#define mmDCP4_ALPHA_CONTROL 0x42bc
+#define mmDCP5_ALPHA_CONTROL 0x44bc
+#define mmGRPH_XDMA_RECOVERY_SURFACE_ADDRESS 0x1abd
+#define mmDCP0_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS 0x1abd
+#define mmDCP1_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS 0x1cbd
+#define mmDCP2_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS 0x1ebd
+#define mmDCP3_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS 0x40bd
+#define mmDCP4_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS 0x42bd
+#define mmDCP5_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS 0x44bd
+#define mmGRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH 0x1abe
+#define mmDCP0_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH 0x1abe
+#define mmDCP1_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH 0x1cbe
+#define mmDCP2_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH 0x1ebe
+#define mmDCP3_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH 0x40be
+#define mmDCP4_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH 0x42be
+#define mmDCP5_GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH 0x44be
+#define mmGRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS 0x1abf
+#define mmDCP0_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS 0x1abf
+#define mmDCP1_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS 0x1cbf
+#define mmDCP2_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS 0x1ebf
+#define mmDCP3_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS 0x40bf
+#define mmDCP4_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS 0x42bf
+#define mmDCP5_GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS 0x44bf
+#define mmGRPH_SURFACE_COUNTER_CONTROL 0x1a0f
+#define mmDCP0_GRPH_SURFACE_COUNTER_CONTROL 0x1a0f
+#define mmDCP1_GRPH_SURFACE_COUNTER_CONTROL 0x1c0f
+#define mmDCP2_GRPH_SURFACE_COUNTER_CONTROL 0x1e0f
+#define mmDCP3_GRPH_SURFACE_COUNTER_CONTROL 0x400f
+#define mmDCP4_GRPH_SURFACE_COUNTER_CONTROL 0x420f
+#define mmDCP5_GRPH_SURFACE_COUNTER_CONTROL 0x440f
+#define mmGRPH_SURFACE_COUNTER_OUTPUT 0x1a1d
+#define mmDCP0_GRPH_SURFACE_COUNTER_OUTPUT 0x1a1d
+#define mmDCP1_GRPH_SURFACE_COUNTER_OUTPUT 0x1c1d
+#define mmDCP2_GRPH_SURFACE_COUNTER_OUTPUT 0x1e1d
+#define mmDCP3_GRPH_SURFACE_COUNTER_OUTPUT 0x401d
+#define mmDCP4_GRPH_SURFACE_COUNTER_OUTPUT 0x421d
+#define mmDCP5_GRPH_SURFACE_COUNTER_OUTPUT 0x441d
+#define mmDIG_FE_CNTL 0x4a00
+#define mmDIG0_DIG_FE_CNTL 0x4a00
+#define mmDIG1_DIG_FE_CNTL 0x4b00
+#define mmDIG2_DIG_FE_CNTL 0x4c00
+#define mmDIG3_DIG_FE_CNTL 0x4d00
+#define mmDIG4_DIG_FE_CNTL 0x4e00
+#define mmDIG5_DIG_FE_CNTL 0x4f00
+#define mmDIG6_DIG_FE_CNTL 0x5400
+#define mmDIG7_DIG_FE_CNTL 0x5600
+#define mmDIG8_DIG_FE_CNTL 0x5700
+#define mmDIG_OUTPUT_CRC_CNTL 0x4a01
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL 0x4a01
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL 0x4b01
+#define mmDIG2_DIG_OUTPUT_CRC_CNTL 0x4c01
+#define mmDIG3_DIG_OUTPUT_CRC_CNTL 0x4d01
+#define mmDIG4_DIG_OUTPUT_CRC_CNTL 0x4e01
+#define mmDIG5_DIG_OUTPUT_CRC_CNTL 0x4f01
+#define mmDIG6_DIG_OUTPUT_CRC_CNTL 0x5401
+#define mmDIG7_DIG_OUTPUT_CRC_CNTL 0x5601
+#define mmDIG8_DIG_OUTPUT_CRC_CNTL 0x5701
+#define mmDIG_OUTPUT_CRC_RESULT 0x4a02
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT 0x4a02
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT 0x4b02
+#define mmDIG2_DIG_OUTPUT_CRC_RESULT 0x4c02
+#define mmDIG3_DIG_OUTPUT_CRC_RESULT 0x4d02
+#define mmDIG4_DIG_OUTPUT_CRC_RESULT 0x4e02
+#define mmDIG5_DIG_OUTPUT_CRC_RESULT 0x4f02
+#define mmDIG6_DIG_OUTPUT_CRC_RESULT 0x5402
+#define mmDIG7_DIG_OUTPUT_CRC_RESULT 0x5602
+#define mmDIG8_DIG_OUTPUT_CRC_RESULT 0x5702
+#define mmDIG_CLOCK_PATTERN 0x4a03
+#define mmDIG0_DIG_CLOCK_PATTERN 0x4a03
+#define mmDIG1_DIG_CLOCK_PATTERN 0x4b03
+#define mmDIG2_DIG_CLOCK_PATTERN 0x4c03
+#define mmDIG3_DIG_CLOCK_PATTERN 0x4d03
+#define mmDIG4_DIG_CLOCK_PATTERN 0x4e03
+#define mmDIG5_DIG_CLOCK_PATTERN 0x4f03
+#define mmDIG6_DIG_CLOCK_PATTERN 0x5403
+#define mmDIG7_DIG_CLOCK_PATTERN 0x5603
+#define mmDIG8_DIG_CLOCK_PATTERN 0x5703
+#define mmDIG_TEST_PATTERN 0x4a04
+#define mmDIG0_DIG_TEST_PATTERN 0x4a04
+#define mmDIG1_DIG_TEST_PATTERN 0x4b04
+#define mmDIG2_DIG_TEST_PATTERN 0x4c04
+#define mmDIG3_DIG_TEST_PATTERN 0x4d04
+#define mmDIG4_DIG_TEST_PATTERN 0x4e04
+#define mmDIG5_DIG_TEST_PATTERN 0x4f04
+#define mmDIG6_DIG_TEST_PATTERN 0x5404
+#define mmDIG7_DIG_TEST_PATTERN 0x5604
+#define mmDIG8_DIG_TEST_PATTERN 0x5704
+#define mmDIG_RANDOM_PATTERN_SEED 0x4a05
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED 0x4a05
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED 0x4b05
+#define mmDIG2_DIG_RANDOM_PATTERN_SEED 0x4c05
+#define mmDIG3_DIG_RANDOM_PATTERN_SEED 0x4d05
+#define mmDIG4_DIG_RANDOM_PATTERN_SEED 0x4e05
+#define mmDIG5_DIG_RANDOM_PATTERN_SEED 0x4f05
+#define mmDIG6_DIG_RANDOM_PATTERN_SEED 0x5405
+#define mmDIG7_DIG_RANDOM_PATTERN_SEED 0x5605
+#define mmDIG8_DIG_RANDOM_PATTERN_SEED 0x5705
+#define mmDIG_FIFO_STATUS 0x4a06
+#define mmDIG0_DIG_FIFO_STATUS 0x4a06
+#define mmDIG1_DIG_FIFO_STATUS 0x4b06
+#define mmDIG2_DIG_FIFO_STATUS 0x4c06
+#define mmDIG3_DIG_FIFO_STATUS 0x4d06
+#define mmDIG4_DIG_FIFO_STATUS 0x4e06
+#define mmDIG5_DIG_FIFO_STATUS 0x4f06
+#define mmDIG6_DIG_FIFO_STATUS 0x5406
+#define mmDIG7_DIG_FIFO_STATUS 0x5606
+#define mmDIG8_DIG_FIFO_STATUS 0x5706
+#define mmDIG_DISPCLK_SWITCH_CNTL 0x4a07
+#define mmDIG0_DIG_DISPCLK_SWITCH_CNTL 0x4a07
+#define mmDIG1_DIG_DISPCLK_SWITCH_CNTL 0x4b07
+#define mmDIG2_DIG_DISPCLK_SWITCH_CNTL 0x4c07
+#define mmDIG3_DIG_DISPCLK_SWITCH_CNTL 0x4d07
+#define mmDIG4_DIG_DISPCLK_SWITCH_CNTL 0x4e07
+#define mmDIG5_DIG_DISPCLK_SWITCH_CNTL 0x4f07
+#define mmDIG6_DIG_DISPCLK_SWITCH_CNTL 0x5407
+#define mmDIG7_DIG_DISPCLK_SWITCH_CNTL 0x5607
+#define mmDIG8_DIG_DISPCLK_SWITCH_CNTL 0x5707
+#define mmDIG_DISPCLK_SWITCH_STATUS 0x4a08
+#define mmDIG0_DIG_DISPCLK_SWITCH_STATUS 0x4a08
+#define mmDIG1_DIG_DISPCLK_SWITCH_STATUS 0x4b08
+#define mmDIG2_DIG_DISPCLK_SWITCH_STATUS 0x4c08
+#define mmDIG3_DIG_DISPCLK_SWITCH_STATUS 0x4d08
+#define mmDIG4_DIG_DISPCLK_SWITCH_STATUS 0x4e08
+#define mmDIG5_DIG_DISPCLK_SWITCH_STATUS 0x4f08
+#define mmDIG6_DIG_DISPCLK_SWITCH_STATUS 0x5408
+#define mmDIG7_DIG_DISPCLK_SWITCH_STATUS 0x5608
+#define mmDIG8_DIG_DISPCLK_SWITCH_STATUS 0x5708
+#define mmHDMI_CONTROL 0x4a09
+#define mmDIG0_HDMI_CONTROL 0x4a09
+#define mmDIG1_HDMI_CONTROL 0x4b09
+#define mmDIG2_HDMI_CONTROL 0x4c09
+#define mmDIG3_HDMI_CONTROL 0x4d09
+#define mmDIG4_HDMI_CONTROL 0x4e09
+#define mmDIG5_HDMI_CONTROL 0x4f09
+#define mmDIG6_HDMI_CONTROL 0x5409
+#define mmDIG7_HDMI_CONTROL 0x5609
+#define mmDIG8_HDMI_CONTROL 0x5709
+#define mmHDMI_STATUS 0x4a0a
+#define mmDIG0_HDMI_STATUS 0x4a0a
+#define mmDIG1_HDMI_STATUS 0x4b0a
+#define mmDIG2_HDMI_STATUS 0x4c0a
+#define mmDIG3_HDMI_STATUS 0x4d0a
+#define mmDIG4_HDMI_STATUS 0x4e0a
+#define mmDIG5_HDMI_STATUS 0x4f0a
+#define mmDIG6_HDMI_STATUS 0x540a
+#define mmDIG7_HDMI_STATUS 0x560a
+#define mmDIG8_HDMI_STATUS 0x570a
+#define mmHDMI_AUDIO_PACKET_CONTROL 0x4a0b
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL 0x4a0b
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL 0x4b0b
+#define mmDIG2_HDMI_AUDIO_PACKET_CONTROL 0x4c0b
+#define mmDIG3_HDMI_AUDIO_PACKET_CONTROL 0x4d0b
+#define mmDIG4_HDMI_AUDIO_PACKET_CONTROL 0x4e0b
+#define mmDIG5_HDMI_AUDIO_PACKET_CONTROL 0x4f0b
+#define mmDIG6_HDMI_AUDIO_PACKET_CONTROL 0x540b
+#define mmDIG7_HDMI_AUDIO_PACKET_CONTROL 0x560b
+#define mmDIG8_HDMI_AUDIO_PACKET_CONTROL 0x570b
+#define mmHDMI_ACR_PACKET_CONTROL 0x4a0c
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL 0x4a0c
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL 0x4b0c
+#define mmDIG2_HDMI_ACR_PACKET_CONTROL 0x4c0c
+#define mmDIG3_HDMI_ACR_PACKET_CONTROL 0x4d0c
+#define mmDIG4_HDMI_ACR_PACKET_CONTROL 0x4e0c
+#define mmDIG5_HDMI_ACR_PACKET_CONTROL 0x4f0c
+#define mmDIG6_HDMI_ACR_PACKET_CONTROL 0x540c
+#define mmDIG7_HDMI_ACR_PACKET_CONTROL 0x560c
+#define mmDIG8_HDMI_ACR_PACKET_CONTROL 0x570c
+#define mmHDMI_VBI_PACKET_CONTROL 0x4a0d
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL 0x4a0d
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL 0x4b0d
+#define mmDIG2_HDMI_VBI_PACKET_CONTROL 0x4c0d
+#define mmDIG3_HDMI_VBI_PACKET_CONTROL 0x4d0d
+#define mmDIG4_HDMI_VBI_PACKET_CONTROL 0x4e0d
+#define mmDIG5_HDMI_VBI_PACKET_CONTROL 0x4f0d
+#define mmDIG6_HDMI_VBI_PACKET_CONTROL 0x540d
+#define mmDIG7_HDMI_VBI_PACKET_CONTROL 0x560d
+#define mmDIG8_HDMI_VBI_PACKET_CONTROL 0x570d
+#define mmHDMI_INFOFRAME_CONTROL0 0x4a0e
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0 0x4a0e
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0 0x4b0e
+#define mmDIG2_HDMI_INFOFRAME_CONTROL0 0x4c0e
+#define mmDIG3_HDMI_INFOFRAME_CONTROL0 0x4d0e
+#define mmDIG4_HDMI_INFOFRAME_CONTROL0 0x4e0e
+#define mmDIG5_HDMI_INFOFRAME_CONTROL0 0x4f0e
+#define mmDIG6_HDMI_INFOFRAME_CONTROL0 0x540e
+#define mmDIG7_HDMI_INFOFRAME_CONTROL0 0x560e
+#define mmDIG8_HDMI_INFOFRAME_CONTROL0 0x570e
+#define mmHDMI_INFOFRAME_CONTROL1 0x4a0f
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1 0x4a0f
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1 0x4b0f
+#define mmDIG2_HDMI_INFOFRAME_CONTROL1 0x4c0f
+#define mmDIG3_HDMI_INFOFRAME_CONTROL1 0x4d0f
+#define mmDIG4_HDMI_INFOFRAME_CONTROL1 0x4e0f
+#define mmDIG5_HDMI_INFOFRAME_CONTROL1 0x4f0f
+#define mmDIG6_HDMI_INFOFRAME_CONTROL1 0x540f
+#define mmDIG7_HDMI_INFOFRAME_CONTROL1 0x560f
+#define mmDIG8_HDMI_INFOFRAME_CONTROL1 0x570f
+#define mmHDMI_GENERIC_PACKET_CONTROL0 0x4a10
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x4a10
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x4b10
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL0 0x4c10
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL0 0x4d10
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL0 0x4e10
+#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL0 0x4f10
+#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL0 0x5410
+#define mmDIG7_HDMI_GENERIC_PACKET_CONTROL0 0x5610
+#define mmDIG8_HDMI_GENERIC_PACKET_CONTROL0 0x5710
+#define mmAFMT_INTERRUPT_STATUS 0x4a11
+#define mmDIG0_AFMT_INTERRUPT_STATUS 0x4a11
+#define mmDIG1_AFMT_INTERRUPT_STATUS 0x4b11
+#define mmDIG2_AFMT_INTERRUPT_STATUS 0x4c11
+#define mmDIG3_AFMT_INTERRUPT_STATUS 0x4d11
+#define mmDIG4_AFMT_INTERRUPT_STATUS 0x4e11
+#define mmDIG5_AFMT_INTERRUPT_STATUS 0x4f11
+#define mmDIG6_AFMT_INTERRUPT_STATUS 0x5411
+#define mmDIG7_AFMT_INTERRUPT_STATUS 0x5611
+#define mmDIG8_AFMT_INTERRUPT_STATUS 0x5711
+#define mmHDMI_GC 0x4a13
+#define mmDIG0_HDMI_GC 0x4a13
+#define mmDIG1_HDMI_GC 0x4b13
+#define mmDIG2_HDMI_GC 0x4c13
+#define mmDIG3_HDMI_GC 0x4d13
+#define mmDIG4_HDMI_GC 0x4e13
+#define mmDIG5_HDMI_GC 0x4f13
+#define mmDIG6_HDMI_GC 0x5413
+#define mmDIG7_HDMI_GC 0x5613
+#define mmDIG8_HDMI_GC 0x5713
+#define mmAFMT_AUDIO_PACKET_CONTROL2 0x4a14
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2 0x4a14
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2 0x4b14
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL2 0x4c14
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL2 0x4d14
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL2 0x4e14
+#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL2 0x4f14
+#define mmDIG6_AFMT_AUDIO_PACKET_CONTROL2 0x5414
+#define mmDIG7_AFMT_AUDIO_PACKET_CONTROL2 0x5614
+#define mmDIG8_AFMT_AUDIO_PACKET_CONTROL2 0x5714
+#define mmAFMT_ISRC1_0 0x4a15
+#define mmDIG0_AFMT_ISRC1_0 0x4a15
+#define mmDIG1_AFMT_ISRC1_0 0x4b15
+#define mmDIG2_AFMT_ISRC1_0 0x4c15
+#define mmDIG3_AFMT_ISRC1_0 0x4d15
+#define mmDIG4_AFMT_ISRC1_0 0x4e15
+#define mmDIG5_AFMT_ISRC1_0 0x4f15
+#define mmDIG6_AFMT_ISRC1_0 0x5415
+#define mmDIG7_AFMT_ISRC1_0 0x5615
+#define mmDIG8_AFMT_ISRC1_0 0x5715
+#define mmAFMT_ISRC1_1 0x4a16
+#define mmDIG0_AFMT_ISRC1_1 0x4a16
+#define mmDIG1_AFMT_ISRC1_1 0x4b16
+#define mmDIG2_AFMT_ISRC1_1 0x4c16
+#define mmDIG3_AFMT_ISRC1_1 0x4d16
+#define mmDIG4_AFMT_ISRC1_1 0x4e16
+#define mmDIG5_AFMT_ISRC1_1 0x4f16
+#define mmDIG6_AFMT_ISRC1_1 0x5416
+#define mmDIG7_AFMT_ISRC1_1 0x5616
+#define mmDIG8_AFMT_ISRC1_1 0x5716
+#define mmAFMT_ISRC1_2 0x4a17
+#define mmDIG0_AFMT_ISRC1_2 0x4a17
+#define mmDIG1_AFMT_ISRC1_2 0x4b17
+#define mmDIG2_AFMT_ISRC1_2 0x4c17
+#define mmDIG3_AFMT_ISRC1_2 0x4d17
+#define mmDIG4_AFMT_ISRC1_2 0x4e17
+#define mmDIG5_AFMT_ISRC1_2 0x4f17
+#define mmDIG6_AFMT_ISRC1_2 0x5417
+#define mmDIG7_AFMT_ISRC1_2 0x5617
+#define mmDIG8_AFMT_ISRC1_2 0x5717
+#define mmAFMT_ISRC1_3 0x4a18
+#define mmDIG0_AFMT_ISRC1_3 0x4a18
+#define mmDIG1_AFMT_ISRC1_3 0x4b18
+#define mmDIG2_AFMT_ISRC1_3 0x4c18
+#define mmDIG3_AFMT_ISRC1_3 0x4d18
+#define mmDIG4_AFMT_ISRC1_3 0x4e18
+#define mmDIG5_AFMT_ISRC1_3 0x4f18
+#define mmDIG6_AFMT_ISRC1_3 0x5418
+#define mmDIG7_AFMT_ISRC1_3 0x5618
+#define mmDIG8_AFMT_ISRC1_3 0x5718
+#define mmAFMT_ISRC1_4 0x4a19
+#define mmDIG0_AFMT_ISRC1_4 0x4a19
+#define mmDIG1_AFMT_ISRC1_4 0x4b19
+#define mmDIG2_AFMT_ISRC1_4 0x4c19
+#define mmDIG3_AFMT_ISRC1_4 0x4d19
+#define mmDIG4_AFMT_ISRC1_4 0x4e19
+#define mmDIG5_AFMT_ISRC1_4 0x4f19
+#define mmDIG6_AFMT_ISRC1_4 0x5419
+#define mmDIG7_AFMT_ISRC1_4 0x5619
+#define mmDIG8_AFMT_ISRC1_4 0x5719
+#define mmAFMT_ISRC2_0 0x4a1a
+#define mmDIG0_AFMT_ISRC2_0 0x4a1a
+#define mmDIG1_AFMT_ISRC2_0 0x4b1a
+#define mmDIG2_AFMT_ISRC2_0 0x4c1a
+#define mmDIG3_AFMT_ISRC2_0 0x4d1a
+#define mmDIG4_AFMT_ISRC2_0 0x4e1a
+#define mmDIG5_AFMT_ISRC2_0 0x4f1a
+#define mmDIG6_AFMT_ISRC2_0 0x541a
+#define mmDIG7_AFMT_ISRC2_0 0x561a
+#define mmDIG8_AFMT_ISRC2_0 0x571a
+#define mmAFMT_ISRC2_1 0x4a1b
+#define mmDIG0_AFMT_ISRC2_1 0x4a1b
+#define mmDIG1_AFMT_ISRC2_1 0x4b1b
+#define mmDIG2_AFMT_ISRC2_1 0x4c1b
+#define mmDIG3_AFMT_ISRC2_1 0x4d1b
+#define mmDIG4_AFMT_ISRC2_1 0x4e1b
+#define mmDIG5_AFMT_ISRC2_1 0x4f1b
+#define mmDIG6_AFMT_ISRC2_1 0x541b
+#define mmDIG7_AFMT_ISRC2_1 0x561b
+#define mmDIG8_AFMT_ISRC2_1 0x571b
+#define mmAFMT_ISRC2_2 0x4a1c
+#define mmDIG0_AFMT_ISRC2_2 0x4a1c
+#define mmDIG1_AFMT_ISRC2_2 0x4b1c
+#define mmDIG2_AFMT_ISRC2_2 0x4c1c
+#define mmDIG3_AFMT_ISRC2_2 0x4d1c
+#define mmDIG4_AFMT_ISRC2_2 0x4e1c
+#define mmDIG5_AFMT_ISRC2_2 0x4f1c
+#define mmDIG6_AFMT_ISRC2_2 0x541c
+#define mmDIG7_AFMT_ISRC2_2 0x561c
+#define mmDIG8_AFMT_ISRC2_2 0x571c
+#define mmAFMT_ISRC2_3 0x4a1d
+#define mmDIG0_AFMT_ISRC2_3 0x4a1d
+#define mmDIG1_AFMT_ISRC2_3 0x4b1d
+#define mmDIG2_AFMT_ISRC2_3 0x4c1d
+#define mmDIG3_AFMT_ISRC2_3 0x4d1d
+#define mmDIG4_AFMT_ISRC2_3 0x4e1d
+#define mmDIG5_AFMT_ISRC2_3 0x4f1d
+#define mmDIG6_AFMT_ISRC2_3 0x541d
+#define mmDIG7_AFMT_ISRC2_3 0x561d
+#define mmDIG8_AFMT_ISRC2_3 0x571d
+#define mmAFMT_AVI_INFO0 0x4a1e
+#define mmDIG0_AFMT_AVI_INFO0 0x4a1e
+#define mmDIG1_AFMT_AVI_INFO0 0x4b1e
+#define mmDIG2_AFMT_AVI_INFO0 0x4c1e
+#define mmDIG3_AFMT_AVI_INFO0 0x4d1e
+#define mmDIG4_AFMT_AVI_INFO0 0x4e1e
+#define mmDIG5_AFMT_AVI_INFO0 0x4f1e
+#define mmDIG6_AFMT_AVI_INFO0 0x541e
+#define mmDIG7_AFMT_AVI_INFO0 0x561e
+#define mmDIG8_AFMT_AVI_INFO0 0x571e
+#define mmAFMT_AVI_INFO1 0x4a1f
+#define mmDIG0_AFMT_AVI_INFO1 0x4a1f
+#define mmDIG1_AFMT_AVI_INFO1 0x4b1f
+#define mmDIG2_AFMT_AVI_INFO1 0x4c1f
+#define mmDIG3_AFMT_AVI_INFO1 0x4d1f
+#define mmDIG4_AFMT_AVI_INFO1 0x4e1f
+#define mmDIG5_AFMT_AVI_INFO1 0x4f1f
+#define mmDIG6_AFMT_AVI_INFO1 0x541f
+#define mmDIG7_AFMT_AVI_INFO1 0x561f
+#define mmDIG8_AFMT_AVI_INFO1 0x571f
+#define mmAFMT_AVI_INFO2 0x4a20
+#define mmDIG0_AFMT_AVI_INFO2 0x4a20
+#define mmDIG1_AFMT_AVI_INFO2 0x4b20
+#define mmDIG2_AFMT_AVI_INFO2 0x4c20
+#define mmDIG3_AFMT_AVI_INFO2 0x4d20
+#define mmDIG4_AFMT_AVI_INFO2 0x4e20
+#define mmDIG5_AFMT_AVI_INFO2 0x4f20
+#define mmDIG6_AFMT_AVI_INFO2 0x5420
+#define mmDIG7_AFMT_AVI_INFO2 0x5620
+#define mmDIG8_AFMT_AVI_INFO2 0x5720
+#define mmAFMT_AVI_INFO3 0x4a21
+#define mmDIG0_AFMT_AVI_INFO3 0x4a21
+#define mmDIG1_AFMT_AVI_INFO3 0x4b21
+#define mmDIG2_AFMT_AVI_INFO3 0x4c21
+#define mmDIG3_AFMT_AVI_INFO3 0x4d21
+#define mmDIG4_AFMT_AVI_INFO3 0x4e21
+#define mmDIG5_AFMT_AVI_INFO3 0x4f21
+#define mmDIG6_AFMT_AVI_INFO3 0x5421
+#define mmDIG7_AFMT_AVI_INFO3 0x5621
+#define mmDIG8_AFMT_AVI_INFO3 0x5721
+#define mmAFMT_MPEG_INFO0 0x4a22
+#define mmDIG0_AFMT_MPEG_INFO0 0x4a22
+#define mmDIG1_AFMT_MPEG_INFO0 0x4b22
+#define mmDIG2_AFMT_MPEG_INFO0 0x4c22
+#define mmDIG3_AFMT_MPEG_INFO0 0x4d22
+#define mmDIG4_AFMT_MPEG_INFO0 0x4e22
+#define mmDIG5_AFMT_MPEG_INFO0 0x4f22
+#define mmDIG6_AFMT_MPEG_INFO0 0x5422
+#define mmDIG7_AFMT_MPEG_INFO0 0x5622
+#define mmDIG8_AFMT_MPEG_INFO0 0x5722
+#define mmAFMT_MPEG_INFO1 0x4a23
+#define mmDIG0_AFMT_MPEG_INFO1 0x4a23
+#define mmDIG1_AFMT_MPEG_INFO1 0x4b23
+#define mmDIG2_AFMT_MPEG_INFO1 0x4c23
+#define mmDIG3_AFMT_MPEG_INFO1 0x4d23
+#define mmDIG4_AFMT_MPEG_INFO1 0x4e23
+#define mmDIG5_AFMT_MPEG_INFO1 0x4f23
+#define mmDIG6_AFMT_MPEG_INFO1 0x5423
+#define mmDIG7_AFMT_MPEG_INFO1 0x5623
+#define mmDIG8_AFMT_MPEG_INFO1 0x5723
+#define mmAFMT_GENERIC_HDR 0x4a24
+#define mmDIG0_AFMT_GENERIC_HDR 0x4a24
+#define mmDIG1_AFMT_GENERIC_HDR 0x4b24
+#define mmDIG2_AFMT_GENERIC_HDR 0x4c24
+#define mmDIG3_AFMT_GENERIC_HDR 0x4d24
+#define mmDIG4_AFMT_GENERIC_HDR 0x4e24
+#define mmDIG5_AFMT_GENERIC_HDR 0x4f24
+#define mmDIG6_AFMT_GENERIC_HDR 0x5424
+#define mmDIG7_AFMT_GENERIC_HDR 0x5624
+#define mmDIG8_AFMT_GENERIC_HDR 0x5724
+#define mmAFMT_GENERIC_0 0x4a25
+#define mmDIG0_AFMT_GENERIC_0 0x4a25
+#define mmDIG1_AFMT_GENERIC_0 0x4b25
+#define mmDIG2_AFMT_GENERIC_0 0x4c25
+#define mmDIG3_AFMT_GENERIC_0 0x4d25
+#define mmDIG4_AFMT_GENERIC_0 0x4e25
+#define mmDIG5_AFMT_GENERIC_0 0x4f25
+#define mmDIG6_AFMT_GENERIC_0 0x5425
+#define mmDIG7_AFMT_GENERIC_0 0x5625
+#define mmDIG8_AFMT_GENERIC_0 0x5725
+#define mmAFMT_GENERIC_1 0x4a26
+#define mmDIG0_AFMT_GENERIC_1 0x4a26
+#define mmDIG1_AFMT_GENERIC_1 0x4b26
+#define mmDIG2_AFMT_GENERIC_1 0x4c26
+#define mmDIG3_AFMT_GENERIC_1 0x4d26
+#define mmDIG4_AFMT_GENERIC_1 0x4e26
+#define mmDIG5_AFMT_GENERIC_1 0x4f26
+#define mmDIG6_AFMT_GENERIC_1 0x5426
+#define mmDIG7_AFMT_GENERIC_1 0x5626
+#define mmDIG8_AFMT_GENERIC_1 0x5726
+#define mmAFMT_GENERIC_2 0x4a27
+#define mmDIG0_AFMT_GENERIC_2 0x4a27
+#define mmDIG1_AFMT_GENERIC_2 0x4b27
+#define mmDIG2_AFMT_GENERIC_2 0x4c27
+#define mmDIG3_AFMT_GENERIC_2 0x4d27
+#define mmDIG4_AFMT_GENERIC_2 0x4e27
+#define mmDIG5_AFMT_GENERIC_2 0x4f27
+#define mmDIG6_AFMT_GENERIC_2 0x5427
+#define mmDIG7_AFMT_GENERIC_2 0x5627
+#define mmDIG8_AFMT_GENERIC_2 0x5727
+#define mmAFMT_GENERIC_3 0x4a28
+#define mmDIG0_AFMT_GENERIC_3 0x4a28
+#define mmDIG1_AFMT_GENERIC_3 0x4b28
+#define mmDIG2_AFMT_GENERIC_3 0x4c28
+#define mmDIG3_AFMT_GENERIC_3 0x4d28
+#define mmDIG4_AFMT_GENERIC_3 0x4e28
+#define mmDIG5_AFMT_GENERIC_3 0x4f28
+#define mmDIG6_AFMT_GENERIC_3 0x5428
+#define mmDIG7_AFMT_GENERIC_3 0x5628
+#define mmDIG8_AFMT_GENERIC_3 0x5728
+#define mmAFMT_GENERIC_4 0x4a29
+#define mmDIG0_AFMT_GENERIC_4 0x4a29
+#define mmDIG1_AFMT_GENERIC_4 0x4b29
+#define mmDIG2_AFMT_GENERIC_4 0x4c29
+#define mmDIG3_AFMT_GENERIC_4 0x4d29
+#define mmDIG4_AFMT_GENERIC_4 0x4e29
+#define mmDIG5_AFMT_GENERIC_4 0x4f29
+#define mmDIG6_AFMT_GENERIC_4 0x5429
+#define mmDIG7_AFMT_GENERIC_4 0x5629
+#define mmDIG8_AFMT_GENERIC_4 0x5729
+#define mmAFMT_GENERIC_5 0x4a2a
+#define mmDIG0_AFMT_GENERIC_5 0x4a2a
+#define mmDIG1_AFMT_GENERIC_5 0x4b2a
+#define mmDIG2_AFMT_GENERIC_5 0x4c2a
+#define mmDIG3_AFMT_GENERIC_5 0x4d2a
+#define mmDIG4_AFMT_GENERIC_5 0x4e2a
+#define mmDIG5_AFMT_GENERIC_5 0x4f2a
+#define mmDIG6_AFMT_GENERIC_5 0x542a
+#define mmDIG7_AFMT_GENERIC_5 0x562a
+#define mmDIG8_AFMT_GENERIC_5 0x572a
+#define mmAFMT_GENERIC_6 0x4a2b
+#define mmDIG0_AFMT_GENERIC_6 0x4a2b
+#define mmDIG1_AFMT_GENERIC_6 0x4b2b
+#define mmDIG2_AFMT_GENERIC_6 0x4c2b
+#define mmDIG3_AFMT_GENERIC_6 0x4d2b
+#define mmDIG4_AFMT_GENERIC_6 0x4e2b
+#define mmDIG5_AFMT_GENERIC_6 0x4f2b
+#define mmDIG6_AFMT_GENERIC_6 0x542b
+#define mmDIG7_AFMT_GENERIC_6 0x562b
+#define mmDIG8_AFMT_GENERIC_6 0x572b
+#define mmAFMT_GENERIC_7 0x4a2c
+#define mmDIG0_AFMT_GENERIC_7 0x4a2c
+#define mmDIG1_AFMT_GENERIC_7 0x4b2c
+#define mmDIG2_AFMT_GENERIC_7 0x4c2c
+#define mmDIG3_AFMT_GENERIC_7 0x4d2c
+#define mmDIG4_AFMT_GENERIC_7 0x4e2c
+#define mmDIG5_AFMT_GENERIC_7 0x4f2c
+#define mmDIG6_AFMT_GENERIC_7 0x542c
+#define mmDIG7_AFMT_GENERIC_7 0x562c
+#define mmDIG8_AFMT_GENERIC_7 0x572c
+#define mmHDMI_GENERIC_PACKET_CONTROL1 0x4a2d
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x4a2d
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x4b2d
+#define mmDIG2_HDMI_GENERIC_PACKET_CONTROL1 0x4c2d
+#define mmDIG3_HDMI_GENERIC_PACKET_CONTROL1 0x4d2d
+#define mmDIG4_HDMI_GENERIC_PACKET_CONTROL1 0x4e2d
+#define mmDIG5_HDMI_GENERIC_PACKET_CONTROL1 0x4f2d
+#define mmDIG6_HDMI_GENERIC_PACKET_CONTROL1 0x542d
+#define mmDIG7_HDMI_GENERIC_PACKET_CONTROL1 0x562d
+#define mmDIG8_HDMI_GENERIC_PACKET_CONTROL1 0x572d
+#define mmHDMI_ACR_32_0 0x4a2e
+#define mmDIG0_HDMI_ACR_32_0 0x4a2e
+#define mmDIG1_HDMI_ACR_32_0 0x4b2e
+#define mmDIG2_HDMI_ACR_32_0 0x4c2e
+#define mmDIG3_HDMI_ACR_32_0 0x4d2e
+#define mmDIG4_HDMI_ACR_32_0 0x4e2e
+#define mmDIG5_HDMI_ACR_32_0 0x4f2e
+#define mmDIG6_HDMI_ACR_32_0 0x542e
+#define mmDIG7_HDMI_ACR_32_0 0x562e
+#define mmDIG8_HDMI_ACR_32_0 0x572e
+#define mmHDMI_ACR_32_1 0x4a2f
+#define mmDIG0_HDMI_ACR_32_1 0x4a2f
+#define mmDIG1_HDMI_ACR_32_1 0x4b2f
+#define mmDIG2_HDMI_ACR_32_1 0x4c2f
+#define mmDIG3_HDMI_ACR_32_1 0x4d2f
+#define mmDIG4_HDMI_ACR_32_1 0x4e2f
+#define mmDIG5_HDMI_ACR_32_1 0x4f2f
+#define mmDIG6_HDMI_ACR_32_1 0x542f
+#define mmDIG7_HDMI_ACR_32_1 0x562f
+#define mmDIG8_HDMI_ACR_32_1 0x572f
+#define mmHDMI_ACR_44_0 0x4a30
+#define mmDIG0_HDMI_ACR_44_0 0x4a30
+#define mmDIG1_HDMI_ACR_44_0 0x4b30
+#define mmDIG2_HDMI_ACR_44_0 0x4c30
+#define mmDIG3_HDMI_ACR_44_0 0x4d30
+#define mmDIG4_HDMI_ACR_44_0 0x4e30
+#define mmDIG5_HDMI_ACR_44_0 0x4f30
+#define mmDIG6_HDMI_ACR_44_0 0x5430
+#define mmDIG7_HDMI_ACR_44_0 0x5630
+#define mmDIG8_HDMI_ACR_44_0 0x5730
+#define mmHDMI_ACR_44_1 0x4a31
+#define mmDIG0_HDMI_ACR_44_1 0x4a31
+#define mmDIG1_HDMI_ACR_44_1 0x4b31
+#define mmDIG2_HDMI_ACR_44_1 0x4c31
+#define mmDIG3_HDMI_ACR_44_1 0x4d31
+#define mmDIG4_HDMI_ACR_44_1 0x4e31
+#define mmDIG5_HDMI_ACR_44_1 0x4f31
+#define mmDIG6_HDMI_ACR_44_1 0x5431
+#define mmDIG7_HDMI_ACR_44_1 0x5631
+#define mmDIG8_HDMI_ACR_44_1 0x5731
+#define mmHDMI_ACR_48_0 0x4a32
+#define mmDIG0_HDMI_ACR_48_0 0x4a32
+#define mmDIG1_HDMI_ACR_48_0 0x4b32
+#define mmDIG2_HDMI_ACR_48_0 0x4c32
+#define mmDIG3_HDMI_ACR_48_0 0x4d32
+#define mmDIG4_HDMI_ACR_48_0 0x4e32
+#define mmDIG5_HDMI_ACR_48_0 0x4f32
+#define mmDIG6_HDMI_ACR_48_0 0x5432
+#define mmDIG7_HDMI_ACR_48_0 0x5632
+#define mmDIG8_HDMI_ACR_48_0 0x5732
+#define mmHDMI_ACR_48_1 0x4a33
+#define mmDIG0_HDMI_ACR_48_1 0x4a33
+#define mmDIG1_HDMI_ACR_48_1 0x4b33
+#define mmDIG2_HDMI_ACR_48_1 0x4c33
+#define mmDIG3_HDMI_ACR_48_1 0x4d33
+#define mmDIG4_HDMI_ACR_48_1 0x4e33
+#define mmDIG5_HDMI_ACR_48_1 0x4f33
+#define mmDIG6_HDMI_ACR_48_1 0x5433
+#define mmDIG7_HDMI_ACR_48_1 0x5633
+#define mmDIG8_HDMI_ACR_48_1 0x5733
+#define mmHDMI_ACR_STATUS_0 0x4a34
+#define mmDIG0_HDMI_ACR_STATUS_0 0x4a34
+#define mmDIG1_HDMI_ACR_STATUS_0 0x4b34
+#define mmDIG2_HDMI_ACR_STATUS_0 0x4c34
+#define mmDIG3_HDMI_ACR_STATUS_0 0x4d34
+#define mmDIG4_HDMI_ACR_STATUS_0 0x4e34
+#define mmDIG5_HDMI_ACR_STATUS_0 0x4f34
+#define mmDIG6_HDMI_ACR_STATUS_0 0x5434
+#define mmDIG7_HDMI_ACR_STATUS_0 0x5634
+#define mmDIG8_HDMI_ACR_STATUS_0 0x5734
+#define mmHDMI_ACR_STATUS_1 0x4a35
+#define mmDIG0_HDMI_ACR_STATUS_1 0x4a35
+#define mmDIG1_HDMI_ACR_STATUS_1 0x4b35
+#define mmDIG2_HDMI_ACR_STATUS_1 0x4c35
+#define mmDIG3_HDMI_ACR_STATUS_1 0x4d35
+#define mmDIG4_HDMI_ACR_STATUS_1 0x4e35
+#define mmDIG5_HDMI_ACR_STATUS_1 0x4f35
+#define mmDIG6_HDMI_ACR_STATUS_1 0x5435
+#define mmDIG7_HDMI_ACR_STATUS_1 0x5635
+#define mmDIG8_HDMI_ACR_STATUS_1 0x5735
+#define mmAFMT_AUDIO_INFO0 0x4a36
+#define mmDIG0_AFMT_AUDIO_INFO0 0x4a36
+#define mmDIG1_AFMT_AUDIO_INFO0 0x4b36
+#define mmDIG2_AFMT_AUDIO_INFO0 0x4c36
+#define mmDIG3_AFMT_AUDIO_INFO0 0x4d36
+#define mmDIG4_AFMT_AUDIO_INFO0 0x4e36
+#define mmDIG5_AFMT_AUDIO_INFO0 0x4f36
+#define mmDIG6_AFMT_AUDIO_INFO0 0x5436
+#define mmDIG7_AFMT_AUDIO_INFO0 0x5636
+#define mmDIG8_AFMT_AUDIO_INFO0 0x5736
+#define mmAFMT_AUDIO_INFO1 0x4a37
+#define mmDIG0_AFMT_AUDIO_INFO1 0x4a37
+#define mmDIG1_AFMT_AUDIO_INFO1 0x4b37
+#define mmDIG2_AFMT_AUDIO_INFO1 0x4c37
+#define mmDIG3_AFMT_AUDIO_INFO1 0x4d37
+#define mmDIG4_AFMT_AUDIO_INFO1 0x4e37
+#define mmDIG5_AFMT_AUDIO_INFO1 0x4f37
+#define mmDIG6_AFMT_AUDIO_INFO1 0x5437
+#define mmDIG7_AFMT_AUDIO_INFO1 0x5637
+#define mmDIG8_AFMT_AUDIO_INFO1 0x5737
+#define mmAFMT_60958_0 0x4a38
+#define mmDIG0_AFMT_60958_0 0x4a38
+#define mmDIG1_AFMT_60958_0 0x4b38
+#define mmDIG2_AFMT_60958_0 0x4c38
+#define mmDIG3_AFMT_60958_0 0x4d38
+#define mmDIG4_AFMT_60958_0 0x4e38
+#define mmDIG5_AFMT_60958_0 0x4f38
+#define mmDIG6_AFMT_60958_0 0x5438
+#define mmDIG7_AFMT_60958_0 0x5638
+#define mmDIG8_AFMT_60958_0 0x5738
+#define mmAFMT_60958_1 0x4a39
+#define mmDIG0_AFMT_60958_1 0x4a39
+#define mmDIG1_AFMT_60958_1 0x4b39
+#define mmDIG2_AFMT_60958_1 0x4c39
+#define mmDIG3_AFMT_60958_1 0x4d39
+#define mmDIG4_AFMT_60958_1 0x4e39
+#define mmDIG5_AFMT_60958_1 0x4f39
+#define mmDIG6_AFMT_60958_1 0x5439
+#define mmDIG7_AFMT_60958_1 0x5639
+#define mmDIG8_AFMT_60958_1 0x5739
+#define mmAFMT_AUDIO_CRC_CONTROL 0x4a3a
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL 0x4a3a
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL 0x4b3a
+#define mmDIG2_AFMT_AUDIO_CRC_CONTROL 0x4c3a
+#define mmDIG3_AFMT_AUDIO_CRC_CONTROL 0x4d3a
+#define mmDIG4_AFMT_AUDIO_CRC_CONTROL 0x4e3a
+#define mmDIG5_AFMT_AUDIO_CRC_CONTROL 0x4f3a
+#define mmDIG6_AFMT_AUDIO_CRC_CONTROL 0x543a
+#define mmDIG7_AFMT_AUDIO_CRC_CONTROL 0x563a
+#define mmDIG8_AFMT_AUDIO_CRC_CONTROL 0x573a
+#define mmAFMT_RAMP_CONTROL0 0x4a3b
+#define mmDIG0_AFMT_RAMP_CONTROL0 0x4a3b
+#define mmDIG1_AFMT_RAMP_CONTROL0 0x4b3b
+#define mmDIG2_AFMT_RAMP_CONTROL0 0x4c3b
+#define mmDIG3_AFMT_RAMP_CONTROL0 0x4d3b
+#define mmDIG4_AFMT_RAMP_CONTROL0 0x4e3b
+#define mmDIG5_AFMT_RAMP_CONTROL0 0x4f3b
+#define mmDIG6_AFMT_RAMP_CONTROL0 0x543b
+#define mmDIG7_AFMT_RAMP_CONTROL0 0x563b
+#define mmDIG8_AFMT_RAMP_CONTROL0 0x573b
+#define mmAFMT_RAMP_CONTROL1 0x4a3c
+#define mmDIG0_AFMT_RAMP_CONTROL1 0x4a3c
+#define mmDIG1_AFMT_RAMP_CONTROL1 0x4b3c
+#define mmDIG2_AFMT_RAMP_CONTROL1 0x4c3c
+#define mmDIG3_AFMT_RAMP_CONTROL1 0x4d3c
+#define mmDIG4_AFMT_RAMP_CONTROL1 0x4e3c
+#define mmDIG5_AFMT_RAMP_CONTROL1 0x4f3c
+#define mmDIG6_AFMT_RAMP_CONTROL1 0x543c
+#define mmDIG7_AFMT_RAMP_CONTROL1 0x563c
+#define mmDIG8_AFMT_RAMP_CONTROL1 0x573c
+#define mmAFMT_RAMP_CONTROL2 0x4a3d
+#define mmDIG0_AFMT_RAMP_CONTROL2 0x4a3d
+#define mmDIG1_AFMT_RAMP_CONTROL2 0x4b3d
+#define mmDIG2_AFMT_RAMP_CONTROL2 0x4c3d
+#define mmDIG3_AFMT_RAMP_CONTROL2 0x4d3d
+#define mmDIG4_AFMT_RAMP_CONTROL2 0x4e3d
+#define mmDIG5_AFMT_RAMP_CONTROL2 0x4f3d
+#define mmDIG6_AFMT_RAMP_CONTROL2 0x543d
+#define mmDIG7_AFMT_RAMP_CONTROL2 0x563d
+#define mmDIG8_AFMT_RAMP_CONTROL2 0x573d
+#define mmAFMT_RAMP_CONTROL3 0x4a3e
+#define mmDIG0_AFMT_RAMP_CONTROL3 0x4a3e
+#define mmDIG1_AFMT_RAMP_CONTROL3 0x4b3e
+#define mmDIG2_AFMT_RAMP_CONTROL3 0x4c3e
+#define mmDIG3_AFMT_RAMP_CONTROL3 0x4d3e
+#define mmDIG4_AFMT_RAMP_CONTROL3 0x4e3e
+#define mmDIG5_AFMT_RAMP_CONTROL3 0x4f3e
+#define mmDIG6_AFMT_RAMP_CONTROL3 0x543e
+#define mmDIG7_AFMT_RAMP_CONTROL3 0x563e
+#define mmDIG8_AFMT_RAMP_CONTROL3 0x573e
+#define mmAFMT_60958_2 0x4a3f
+#define mmDIG0_AFMT_60958_2 0x4a3f
+#define mmDIG1_AFMT_60958_2 0x4b3f
+#define mmDIG2_AFMT_60958_2 0x4c3f
+#define mmDIG3_AFMT_60958_2 0x4d3f
+#define mmDIG4_AFMT_60958_2 0x4e3f
+#define mmDIG5_AFMT_60958_2 0x4f3f
+#define mmDIG6_AFMT_60958_2 0x543f
+#define mmDIG7_AFMT_60958_2 0x563f
+#define mmDIG8_AFMT_60958_2 0x573f
+#define mmAFMT_AUDIO_CRC_RESULT 0x4a40
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT 0x4a40
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT 0x4b40
+#define mmDIG2_AFMT_AUDIO_CRC_RESULT 0x4c40
+#define mmDIG3_AFMT_AUDIO_CRC_RESULT 0x4d40
+#define mmDIG4_AFMT_AUDIO_CRC_RESULT 0x4e40
+#define mmDIG5_AFMT_AUDIO_CRC_RESULT 0x4f40
+#define mmDIG6_AFMT_AUDIO_CRC_RESULT 0x5440
+#define mmDIG7_AFMT_AUDIO_CRC_RESULT 0x5640
+#define mmDIG8_AFMT_AUDIO_CRC_RESULT 0x5740
+#define mmAFMT_STATUS 0x4a41
+#define mmDIG0_AFMT_STATUS 0x4a41
+#define mmDIG1_AFMT_STATUS 0x4b41
+#define mmDIG2_AFMT_STATUS 0x4c41
+#define mmDIG3_AFMT_STATUS 0x4d41
+#define mmDIG4_AFMT_STATUS 0x4e41
+#define mmDIG5_AFMT_STATUS 0x4f41
+#define mmDIG6_AFMT_STATUS 0x5441
+#define mmDIG7_AFMT_STATUS 0x5641
+#define mmDIG8_AFMT_STATUS 0x5741
+#define mmAFMT_AUDIO_PACKET_CONTROL 0x4a42
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL 0x4a42
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL 0x4b42
+#define mmDIG2_AFMT_AUDIO_PACKET_CONTROL 0x4c42
+#define mmDIG3_AFMT_AUDIO_PACKET_CONTROL 0x4d42
+#define mmDIG4_AFMT_AUDIO_PACKET_CONTROL 0x4e42
+#define mmDIG5_AFMT_AUDIO_PACKET_CONTROL 0x4f42
+#define mmDIG6_AFMT_AUDIO_PACKET_CONTROL 0x5442
+#define mmDIG7_AFMT_AUDIO_PACKET_CONTROL 0x5642
+#define mmDIG8_AFMT_AUDIO_PACKET_CONTROL 0x5742
+#define mmAFMT_VBI_PACKET_CONTROL 0x4a43
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL 0x4a43
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL 0x4b43
+#define mmDIG2_AFMT_VBI_PACKET_CONTROL 0x4c43
+#define mmDIG3_AFMT_VBI_PACKET_CONTROL 0x4d43
+#define mmDIG4_AFMT_VBI_PACKET_CONTROL 0x4e43
+#define mmDIG5_AFMT_VBI_PACKET_CONTROL 0x4f43
+#define mmDIG6_AFMT_VBI_PACKET_CONTROL 0x5443
+#define mmDIG7_AFMT_VBI_PACKET_CONTROL 0x5643
+#define mmDIG8_AFMT_VBI_PACKET_CONTROL 0x5743
+#define mmAFMT_INFOFRAME_CONTROL0 0x4a44
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0 0x4a44
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0 0x4b44
+#define mmDIG2_AFMT_INFOFRAME_CONTROL0 0x4c44
+#define mmDIG3_AFMT_INFOFRAME_CONTROL0 0x4d44
+#define mmDIG4_AFMT_INFOFRAME_CONTROL0 0x4e44
+#define mmDIG5_AFMT_INFOFRAME_CONTROL0 0x4f44
+#define mmDIG6_AFMT_INFOFRAME_CONTROL0 0x5444
+#define mmDIG7_AFMT_INFOFRAME_CONTROL0 0x5644
+#define mmDIG8_AFMT_INFOFRAME_CONTROL0 0x5744
+#define mmAFMT_AUDIO_SRC_CONTROL 0x4a45
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL 0x4a45
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL 0x4b45
+#define mmDIG2_AFMT_AUDIO_SRC_CONTROL 0x4c45
+#define mmDIG3_AFMT_AUDIO_SRC_CONTROL 0x4d45
+#define mmDIG4_AFMT_AUDIO_SRC_CONTROL 0x4e45
+#define mmDIG5_AFMT_AUDIO_SRC_CONTROL 0x4f45
+#define mmDIG6_AFMT_AUDIO_SRC_CONTROL 0x5445
+#define mmDIG7_AFMT_AUDIO_SRC_CONTROL 0x5645
+#define mmDIG8_AFMT_AUDIO_SRC_CONTROL 0x5745
+#define mmAFMT_AUDIO_DBG_DTO_CNTL 0x4a46
+#define mmDIG0_AFMT_AUDIO_DBG_DTO_CNTL 0x4a46
+#define mmDIG1_AFMT_AUDIO_DBG_DTO_CNTL 0x4b46
+#define mmDIG2_AFMT_AUDIO_DBG_DTO_CNTL 0x4c46
+#define mmDIG3_AFMT_AUDIO_DBG_DTO_CNTL 0x4d46
+#define mmDIG4_AFMT_AUDIO_DBG_DTO_CNTL 0x4e46
+#define mmDIG5_AFMT_AUDIO_DBG_DTO_CNTL 0x4f46
+#define mmDIG6_AFMT_AUDIO_DBG_DTO_CNTL 0x5446
+#define mmDIG7_AFMT_AUDIO_DBG_DTO_CNTL 0x5646
+#define mmDIG8_AFMT_AUDIO_DBG_DTO_CNTL 0x5746
+#define mmAFMT_CNTL 0x4a7e
+#define mmDIG0_AFMT_CNTL 0x4a7e
+#define mmDIG1_AFMT_CNTL 0x4b7e
+#define mmDIG2_AFMT_CNTL 0x4c7e
+#define mmDIG3_AFMT_CNTL 0x4d7e
+#define mmDIG4_AFMT_CNTL 0x4e7e
+#define mmDIG5_AFMT_CNTL 0x4f7e
+#define mmDIG6_AFMT_CNTL 0x547e
+#define mmDIG7_AFMT_CNTL 0x567e
+#define mmDIG8_AFMT_CNTL 0x577e
+#define mmDIG_BE_CNTL 0x4a47
+#define mmDIG0_DIG_BE_CNTL 0x4a47
+#define mmDIG1_DIG_BE_CNTL 0x4b47
+#define mmDIG2_DIG_BE_CNTL 0x4c47
+#define mmDIG3_DIG_BE_CNTL 0x4d47
+#define mmDIG4_DIG_BE_CNTL 0x4e47
+#define mmDIG5_DIG_BE_CNTL 0x4f47
+#define mmDIG6_DIG_BE_CNTL 0x5447
+#define mmDIG7_DIG_BE_CNTL 0x5647
+#define mmDIG8_DIG_BE_CNTL 0x5747
+#define mmDIG_BE_EN_CNTL 0x4a48
+#define mmDIG0_DIG_BE_EN_CNTL 0x4a48
+#define mmDIG1_DIG_BE_EN_CNTL 0x4b48
+#define mmDIG2_DIG_BE_EN_CNTL 0x4c48
+#define mmDIG3_DIG_BE_EN_CNTL 0x4d48
+#define mmDIG4_DIG_BE_EN_CNTL 0x4e48
+#define mmDIG5_DIG_BE_EN_CNTL 0x4f48
+#define mmDIG6_DIG_BE_EN_CNTL 0x5448
+#define mmDIG7_DIG_BE_EN_CNTL 0x5648
+#define mmDIG8_DIG_BE_EN_CNTL 0x5748
+#define mmTMDS_CNTL 0x4a6b
+#define mmDIG0_TMDS_CNTL 0x4a6b
+#define mmDIG1_TMDS_CNTL 0x4b6b
+#define mmDIG2_TMDS_CNTL 0x4c6b
+#define mmDIG3_TMDS_CNTL 0x4d6b
+#define mmDIG4_TMDS_CNTL 0x4e6b
+#define mmDIG5_TMDS_CNTL 0x4f6b
+#define mmDIG6_TMDS_CNTL 0x546b
+#define mmDIG7_TMDS_CNTL 0x566b
+#define mmDIG8_TMDS_CNTL 0x576b
+#define mmTMDS_CONTROL_CHAR 0x4a6c
+#define mmDIG0_TMDS_CONTROL_CHAR 0x4a6c
+#define mmDIG1_TMDS_CONTROL_CHAR 0x4b6c
+#define mmDIG2_TMDS_CONTROL_CHAR 0x4c6c
+#define mmDIG3_TMDS_CONTROL_CHAR 0x4d6c
+#define mmDIG4_TMDS_CONTROL_CHAR 0x4e6c
+#define mmDIG5_TMDS_CONTROL_CHAR 0x4f6c
+#define mmDIG6_TMDS_CONTROL_CHAR 0x546c
+#define mmDIG7_TMDS_CONTROL_CHAR 0x566c
+#define mmDIG8_TMDS_CONTROL_CHAR 0x576c
+#define mmTMDS_CONTROL0_FEEDBACK 0x4a6d
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK 0x4a6d
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK 0x4b6d
+#define mmDIG2_TMDS_CONTROL0_FEEDBACK 0x4c6d
+#define mmDIG3_TMDS_CONTROL0_FEEDBACK 0x4d6d
+#define mmDIG4_TMDS_CONTROL0_FEEDBACK 0x4e6d
+#define mmDIG5_TMDS_CONTROL0_FEEDBACK 0x4f6d
+#define mmDIG6_TMDS_CONTROL0_FEEDBACK 0x546d
+#define mmDIG7_TMDS_CONTROL0_FEEDBACK 0x566d
+#define mmDIG8_TMDS_CONTROL0_FEEDBACK 0x576d
+#define mmTMDS_STEREOSYNC_CTL_SEL 0x4a6e
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL 0x4a6e
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL 0x4b6e
+#define mmDIG2_TMDS_STEREOSYNC_CTL_SEL 0x4c6e
+#define mmDIG3_TMDS_STEREOSYNC_CTL_SEL 0x4d6e
+#define mmDIG4_TMDS_STEREOSYNC_CTL_SEL 0x4e6e
+#define mmDIG5_TMDS_STEREOSYNC_CTL_SEL 0x4f6e
+#define mmDIG6_TMDS_STEREOSYNC_CTL_SEL 0x546e
+#define mmDIG7_TMDS_STEREOSYNC_CTL_SEL 0x566e
+#define mmDIG8_TMDS_STEREOSYNC_CTL_SEL 0x576e
+#define mmTMDS_SYNC_CHAR_PATTERN_0_1 0x4a6f
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x4a6f
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x4b6f
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_0_1 0x4c6f
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_0_1 0x4d6f
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_0_1 0x4e6f
+#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_0_1 0x4f6f
+#define mmDIG6_TMDS_SYNC_CHAR_PATTERN_0_1 0x546f
+#define mmDIG7_TMDS_SYNC_CHAR_PATTERN_0_1 0x566f
+#define mmDIG8_TMDS_SYNC_CHAR_PATTERN_0_1 0x576f
+#define mmTMDS_SYNC_CHAR_PATTERN_2_3 0x4a70
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x4a70
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x4b70
+#define mmDIG2_TMDS_SYNC_CHAR_PATTERN_2_3 0x4c70
+#define mmDIG3_TMDS_SYNC_CHAR_PATTERN_2_3 0x4d70
+#define mmDIG4_TMDS_SYNC_CHAR_PATTERN_2_3 0x4e70
+#define mmDIG5_TMDS_SYNC_CHAR_PATTERN_2_3 0x4f70
+#define mmDIG6_TMDS_SYNC_CHAR_PATTERN_2_3 0x5470
+#define mmDIG7_TMDS_SYNC_CHAR_PATTERN_2_3 0x5670
+#define mmDIG8_TMDS_SYNC_CHAR_PATTERN_2_3 0x5770
+#define mmTMDS_DEBUG 0x4a71
+#define mmDIG0_TMDS_DEBUG 0x4a71
+#define mmDIG1_TMDS_DEBUG 0x4b71
+#define mmDIG2_TMDS_DEBUG 0x4c71
+#define mmDIG3_TMDS_DEBUG 0x4d71
+#define mmDIG4_TMDS_DEBUG 0x4e71
+#define mmDIG5_TMDS_DEBUG 0x4f71
+#define mmDIG6_TMDS_DEBUG 0x5471
+#define mmDIG7_TMDS_DEBUG 0x5671
+#define mmDIG8_TMDS_DEBUG 0x5771
+#define mmTMDS_CTL_BITS 0x4a72
+#define mmDIG0_TMDS_CTL_BITS 0x4a72
+#define mmDIG1_TMDS_CTL_BITS 0x4b72
+#define mmDIG2_TMDS_CTL_BITS 0x4c72
+#define mmDIG3_TMDS_CTL_BITS 0x4d72
+#define mmDIG4_TMDS_CTL_BITS 0x4e72
+#define mmDIG5_TMDS_CTL_BITS 0x4f72
+#define mmDIG6_TMDS_CTL_BITS 0x5472
+#define mmDIG7_TMDS_CTL_BITS 0x5672
+#define mmDIG8_TMDS_CTL_BITS 0x5772
+#define mmTMDS_DCBALANCER_CONTROL 0x4a73
+#define mmDIG0_TMDS_DCBALANCER_CONTROL 0x4a73
+#define mmDIG1_TMDS_DCBALANCER_CONTROL 0x4b73
+#define mmDIG2_TMDS_DCBALANCER_CONTROL 0x4c73
+#define mmDIG3_TMDS_DCBALANCER_CONTROL 0x4d73
+#define mmDIG4_TMDS_DCBALANCER_CONTROL 0x4e73
+#define mmDIG5_TMDS_DCBALANCER_CONTROL 0x4f73
+#define mmDIG6_TMDS_DCBALANCER_CONTROL 0x5473
+#define mmDIG7_TMDS_DCBALANCER_CONTROL 0x5673
+#define mmDIG8_TMDS_DCBALANCER_CONTROL 0x5773
+#define mmTMDS_CTL0_1_GEN_CNTL 0x4a75
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL 0x4a75
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL 0x4b75
+#define mmDIG2_TMDS_CTL0_1_GEN_CNTL 0x4c75
+#define mmDIG3_TMDS_CTL0_1_GEN_CNTL 0x4d75
+#define mmDIG4_TMDS_CTL0_1_GEN_CNTL 0x4e75
+#define mmDIG5_TMDS_CTL0_1_GEN_CNTL 0x4f75
+#define mmDIG6_TMDS_CTL0_1_GEN_CNTL 0x5475
+#define mmDIG7_TMDS_CTL0_1_GEN_CNTL 0x5675
+#define mmDIG8_TMDS_CTL0_1_GEN_CNTL 0x5775
+#define mmTMDS_CTL2_3_GEN_CNTL 0x4a76
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL 0x4a76
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL 0x4b76
+#define mmDIG2_TMDS_CTL2_3_GEN_CNTL 0x4c76
+#define mmDIG3_TMDS_CTL2_3_GEN_CNTL 0x4d76
+#define mmDIG4_TMDS_CTL2_3_GEN_CNTL 0x4e76
+#define mmDIG5_TMDS_CTL2_3_GEN_CNTL 0x4f76
+#define mmDIG6_TMDS_CTL2_3_GEN_CNTL 0x5476
+#define mmDIG7_TMDS_CTL2_3_GEN_CNTL 0x5676
+#define mmDIG8_TMDS_CTL2_3_GEN_CNTL 0x5776
+#define mmDIG_VERSION 0x4a78
+#define mmDIG0_DIG_VERSION 0x4a78
+#define mmDIG1_DIG_VERSION 0x4b78
+#define mmDIG2_DIG_VERSION 0x4c78
+#define mmDIG3_DIG_VERSION 0x4d78
+#define mmDIG4_DIG_VERSION 0x4e78
+#define mmDIG5_DIG_VERSION 0x4f78
+#define mmDIG6_DIG_VERSION 0x5478
+#define mmDIG7_DIG_VERSION 0x5678
+#define mmDIG8_DIG_VERSION 0x5778
+#define mmDIG_LANE_ENABLE 0x4a79
+#define mmDIG0_DIG_LANE_ENABLE 0x4a79
+#define mmDIG1_DIG_LANE_ENABLE 0x4b79
+#define mmDIG2_DIG_LANE_ENABLE 0x4c79
+#define mmDIG3_DIG_LANE_ENABLE 0x4d79
+#define mmDIG4_DIG_LANE_ENABLE 0x4e79
+#define mmDIG5_DIG_LANE_ENABLE 0x4f79
+#define mmDIG6_DIG_LANE_ENABLE 0x5479
+#define mmDIG7_DIG_LANE_ENABLE 0x5679
+#define mmDIG8_DIG_LANE_ENABLE 0x5779
+#define mmDIG_TEST_DEBUG_INDEX 0x4a7a
+#define mmDIG0_DIG_TEST_DEBUG_INDEX 0x4a7a
+#define mmDIG1_DIG_TEST_DEBUG_INDEX 0x4b7a
+#define mmDIG2_DIG_TEST_DEBUG_INDEX 0x4c7a
+#define mmDIG3_DIG_TEST_DEBUG_INDEX 0x4d7a
+#define mmDIG4_DIG_TEST_DEBUG_INDEX 0x4e7a
+#define mmDIG5_DIG_TEST_DEBUG_INDEX 0x4f7a
+#define mmDIG6_DIG_TEST_DEBUG_INDEX 0x547a
+#define mmDIG7_DIG_TEST_DEBUG_INDEX 0x567a
+#define mmDIG8_DIG_TEST_DEBUG_INDEX 0x577a
+#define mmDIG_TEST_DEBUG_DATA 0x4a7b
+#define mmDIG0_DIG_TEST_DEBUG_DATA 0x4a7b
+#define mmDIG1_DIG_TEST_DEBUG_DATA 0x4b7b
+#define mmDIG2_DIG_TEST_DEBUG_DATA 0x4c7b
+#define mmDIG3_DIG_TEST_DEBUG_DATA 0x4d7b
+#define mmDIG4_DIG_TEST_DEBUG_DATA 0x4e7b
+#define mmDIG5_DIG_TEST_DEBUG_DATA 0x4f7b
+#define mmDIG6_DIG_TEST_DEBUG_DATA 0x547b
+#define mmDIG7_DIG_TEST_DEBUG_DATA 0x567b
+#define mmDIG8_DIG_TEST_DEBUG_DATA 0x577b
+#define mmDIG_FE_TEST_DEBUG_INDEX 0x4a7c
+#define mmDIG0_DIG_FE_TEST_DEBUG_INDEX 0x4a7c
+#define mmDIG1_DIG_FE_TEST_DEBUG_INDEX 0x4b7c
+#define mmDIG2_DIG_FE_TEST_DEBUG_INDEX 0x4c7c
+#define mmDIG3_DIG_FE_TEST_DEBUG_INDEX 0x4d7c
+#define mmDIG4_DIG_FE_TEST_DEBUG_INDEX 0x4e7c
+#define mmDIG5_DIG_FE_TEST_DEBUG_INDEX 0x4f7c
+#define mmDIG6_DIG_FE_TEST_DEBUG_INDEX 0x547c
+#define mmDIG7_DIG_FE_TEST_DEBUG_INDEX 0x567c
+#define mmDIG8_DIG_FE_TEST_DEBUG_INDEX 0x577c
+#define mmDIG_FE_TEST_DEBUG_DATA 0x4a7d
+#define mmDIG0_DIG_FE_TEST_DEBUG_DATA 0x4a7d
+#define mmDIG1_DIG_FE_TEST_DEBUG_DATA 0x4b7d
+#define mmDIG2_DIG_FE_TEST_DEBUG_DATA 0x4c7d
+#define mmDIG3_DIG_FE_TEST_DEBUG_DATA 0x4d7d
+#define mmDIG4_DIG_FE_TEST_DEBUG_DATA 0x4e7d
+#define mmDIG5_DIG_FE_TEST_DEBUG_DATA 0x4f7d
+#define mmDIG6_DIG_FE_TEST_DEBUG_DATA 0x547d
+#define mmDIG7_DIG_FE_TEST_DEBUG_DATA 0x567d
+#define mmDIG8_DIG_FE_TEST_DEBUG_DATA 0x577d
+#define mmDMCU_CTRL 0x1600
+#define mmDMCU_STATUS 0x1601
+#define mmDMCU_PC_START_ADDR 0x1602
+#define mmDMCU_FW_START_ADDR 0x1603
+#define mmDMCU_FW_END_ADDR 0x1604
+#define mmDMCU_FW_ISR_START_ADDR 0x1605
+#define mmDMCU_FW_CS_HI 0x1606
+#define mmDMCU_FW_CS_LO 0x1607
+#define mmDMCU_RAM_ACCESS_CTRL 0x1608
+#define mmDMCU_ERAM_WR_CTRL 0x1609
+#define mmDMCU_ERAM_WR_DATA 0x160a
+#define mmDMCU_ERAM_RD_CTRL 0x160b
+#define mmDMCU_ERAM_RD_DATA 0x160c
+#define mmDMCU_IRAM_WR_CTRL 0x160d
+#define mmDMCU_IRAM_WR_DATA 0x160e
+#define mmDMCU_IRAM_RD_CTRL 0x160f
+#define mmDMCU_IRAM_RD_DATA 0x1610
+#define mmDMCU_EVENT_TRIGGER 0x1611
+#define mmDMCU_UC_INTERNAL_INT_STATUS 0x1612
+#define mmDMCU_SS_INTERRUPT_CNTL_STATUS 0x1613
+#define mmDMCU_INTERRUPT_STATUS 0x1614
+#define mmDMCU_INTERRUPT_STATUS_1 0x1633
+#define mmDMCU_INTERRUPT_TO_HOST_EN_MASK 0x1615
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK 0x1616
+#define mmDMCU_INTERRUPT_TO_UC_EN_MASK_1 0x1631
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL 0x1617
+#define mmDMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1 0x1632
+#define mmDC_DMCU_SCRATCH 0x1618
+#define mmDMCU_INT_CNT 0x1619
+#define mmDMCU_FW_CHECKSUM_SMPL_BYTE_POS 0x161a
+#define mmDMCU_UC_CLK_GATING_CNTL 0x161b
+#define mmMASTER_COMM_DATA_REG1 0x161c
+#define mmMASTER_COMM_DATA_REG2 0x161d
+#define mmMASTER_COMM_DATA_REG3 0x161e
+#define mmMASTER_COMM_CMD_REG 0x161f
+#define mmMASTER_COMM_CNTL_REG 0x1620
+#define mmSLAVE_COMM_DATA_REG1 0x1621
+#define mmSLAVE_COMM_DATA_REG2 0x1622
+#define mmSLAVE_COMM_DATA_REG3 0x1623
+#define mmSLAVE_COMM_CMD_REG 0x1624
+#define mmSLAVE_COMM_CNTL_REG 0x1625
+#define mmDMCU_TEST_DEBUG_INDEX 0x1626
+#define mmDMCU_TEST_DEBUG_DATA 0x1627
+#define mmDMCU_PERFMON_INTERRUPT_STATUS1 0x1644
+#define mmDMCU_PERFMON_INTERRUPT_STATUS2 0x1645
+#define mmDMCU_PERFMON_INTERRUPT_STATUS3 0x1646
+#define mmDMCU_PERFMON_INTERRUPT_STATUS4 0x1647
+#define mmDMCU_PERFMON_INTERRUPT_STATUS5 0x1642
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1 0x1674
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2 0x1675
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3 0x1676
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4 0x1677
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5 0x1643
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1 0x1678
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2 0x1679
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3 0x167a
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4 0x167b
+#define mmDMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5 0x1673
+#define mmDMCU_DPRX_INTERRUPT_STATUS1 0x1634
+#define mmDMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1 0x1635
+#define mmDMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1 0x1636
+#define mmDP_LINK_CNTL 0x4aa0
+#define mmDP0_DP_LINK_CNTL 0x4aa0
+#define mmDP1_DP_LINK_CNTL 0x4ba0
+#define mmDP2_DP_LINK_CNTL 0x4ca0
+#define mmDP3_DP_LINK_CNTL 0x4da0
+#define mmDP4_DP_LINK_CNTL 0x4ea0
+#define mmDP5_DP_LINK_CNTL 0x4fa0
+#define mmDP6_DP_LINK_CNTL 0x54a0
+#define mmDP7_DP_LINK_CNTL 0x56a0
+#define mmDP8_DP_LINK_CNTL 0x57a0
+#define mmDP_PIXEL_FORMAT 0x4aa1
+#define mmDP0_DP_PIXEL_FORMAT 0x4aa1
+#define mmDP1_DP_PIXEL_FORMAT 0x4ba1
+#define mmDP2_DP_PIXEL_FORMAT 0x4ca1
+#define mmDP3_DP_PIXEL_FORMAT 0x4da1
+#define mmDP4_DP_PIXEL_FORMAT 0x4ea1
+#define mmDP5_DP_PIXEL_FORMAT 0x4fa1
+#define mmDP6_DP_PIXEL_FORMAT 0x54a1
+#define mmDP7_DP_PIXEL_FORMAT 0x56a1
+#define mmDP8_DP_PIXEL_FORMAT 0x57a1
+#define mmDP_MSA_COLORIMETRY 0x4aa2
+#define mmDP0_DP_MSA_COLORIMETRY 0x4aa2
+#define mmDP1_DP_MSA_COLORIMETRY 0x4ba2
+#define mmDP2_DP_MSA_COLORIMETRY 0x4ca2
+#define mmDP3_DP_MSA_COLORIMETRY 0x4da2
+#define mmDP4_DP_MSA_COLORIMETRY 0x4ea2
+#define mmDP5_DP_MSA_COLORIMETRY 0x4fa2
+#define mmDP6_DP_MSA_COLORIMETRY 0x54a2
+#define mmDP7_DP_MSA_COLORIMETRY 0x56a2
+#define mmDP8_DP_MSA_COLORIMETRY 0x57a2
+#define mmDP_CONFIG 0x4aa3
+#define mmDP0_DP_CONFIG 0x4aa3
+#define mmDP1_DP_CONFIG 0x4ba3
+#define mmDP2_DP_CONFIG 0x4ca3
+#define mmDP3_DP_CONFIG 0x4da3
+#define mmDP4_DP_CONFIG 0x4ea3
+#define mmDP5_DP_CONFIG 0x4fa3
+#define mmDP6_DP_CONFIG 0x54a3
+#define mmDP7_DP_CONFIG 0x56a3
+#define mmDP8_DP_CONFIG 0x57a3
+#define mmDP_VID_STREAM_CNTL 0x4aa4
+#define mmDP0_DP_VID_STREAM_CNTL 0x4aa4
+#define mmDP1_DP_VID_STREAM_CNTL 0x4ba4
+#define mmDP2_DP_VID_STREAM_CNTL 0x4ca4
+#define mmDP3_DP_VID_STREAM_CNTL 0x4da4
+#define mmDP4_DP_VID_STREAM_CNTL 0x4ea4
+#define mmDP5_DP_VID_STREAM_CNTL 0x4fa4
+#define mmDP6_DP_VID_STREAM_CNTL 0x54a4
+#define mmDP7_DP_VID_STREAM_CNTL 0x56a4
+#define mmDP8_DP_VID_STREAM_CNTL 0x57a4
+#define mmDP_STEER_FIFO 0x4aa5
+#define mmDP0_DP_STEER_FIFO 0x4aa5
+#define mmDP1_DP_STEER_FIFO 0x4ba5
+#define mmDP2_DP_STEER_FIFO 0x4ca5
+#define mmDP3_DP_STEER_FIFO 0x4da5
+#define mmDP4_DP_STEER_FIFO 0x4ea5
+#define mmDP5_DP_STEER_FIFO 0x4fa5
+#define mmDP6_DP_STEER_FIFO 0x54a5
+#define mmDP7_DP_STEER_FIFO 0x56a5
+#define mmDP8_DP_STEER_FIFO 0x57a5
+#define mmDP_MSA_MISC 0x4aa6
+#define mmDP0_DP_MSA_MISC 0x4aa6
+#define mmDP1_DP_MSA_MISC 0x4ba6
+#define mmDP2_DP_MSA_MISC 0x4ca6
+#define mmDP3_DP_MSA_MISC 0x4da6
+#define mmDP4_DP_MSA_MISC 0x4ea6
+#define mmDP5_DP_MSA_MISC 0x4fa6
+#define mmDP6_DP_MSA_MISC 0x54a6
+#define mmDP7_DP_MSA_MISC 0x56a6
+#define mmDP8_DP_MSA_MISC 0x57a6
+#define mmDP_VID_TIMING 0x4aa8
+#define mmDP0_DP_VID_TIMING 0x4aa8
+#define mmDP1_DP_VID_TIMING 0x4ba8
+#define mmDP2_DP_VID_TIMING 0x4ca8
+#define mmDP3_DP_VID_TIMING 0x4da8
+#define mmDP4_DP_VID_TIMING 0x4ea8
+#define mmDP5_DP_VID_TIMING 0x4fa8
+#define mmDP6_DP_VID_TIMING 0x54a8
+#define mmDP7_DP_VID_TIMING 0x56a8
+#define mmDP8_DP_VID_TIMING 0x57a8
+#define mmDP_VID_N 0x4aa9
+#define mmDP0_DP_VID_N 0x4aa9
+#define mmDP1_DP_VID_N 0x4ba9
+#define mmDP2_DP_VID_N 0x4ca9
+#define mmDP3_DP_VID_N 0x4da9
+#define mmDP4_DP_VID_N 0x4ea9
+#define mmDP5_DP_VID_N 0x4fa9
+#define mmDP6_DP_VID_N 0x54a9
+#define mmDP7_DP_VID_N 0x56a9
+#define mmDP8_DP_VID_N 0x57a9
+#define mmDP_VID_M 0x4aaa
+#define mmDP0_DP_VID_M 0x4aaa
+#define mmDP1_DP_VID_M 0x4baa
+#define mmDP2_DP_VID_M 0x4caa
+#define mmDP3_DP_VID_M 0x4daa
+#define mmDP4_DP_VID_M 0x4eaa
+#define mmDP5_DP_VID_M 0x4faa
+#define mmDP6_DP_VID_M 0x54aa
+#define mmDP7_DP_VID_M 0x56aa
+#define mmDP8_DP_VID_M 0x57aa
+#define mmDP_LINK_FRAMING_CNTL 0x4aab
+#define mmDP0_DP_LINK_FRAMING_CNTL 0x4aab
+#define mmDP1_DP_LINK_FRAMING_CNTL 0x4bab
+#define mmDP2_DP_LINK_FRAMING_CNTL 0x4cab
+#define mmDP3_DP_LINK_FRAMING_CNTL 0x4dab
+#define mmDP4_DP_LINK_FRAMING_CNTL 0x4eab
+#define mmDP5_DP_LINK_FRAMING_CNTL 0x4fab
+#define mmDP6_DP_LINK_FRAMING_CNTL 0x54ab
+#define mmDP7_DP_LINK_FRAMING_CNTL 0x56ab
+#define mmDP8_DP_LINK_FRAMING_CNTL 0x57ab
+#define mmDP_HBR2_EYE_PATTERN 0x4aac
+#define mmDP0_DP_HBR2_EYE_PATTERN 0x4aac
+#define mmDP1_DP_HBR2_EYE_PATTERN 0x4bac
+#define mmDP2_DP_HBR2_EYE_PATTERN 0x4cac
+#define mmDP3_DP_HBR2_EYE_PATTERN 0x4dac
+#define mmDP4_DP_HBR2_EYE_PATTERN 0x4eac
+#define mmDP5_DP_HBR2_EYE_PATTERN 0x4fac
+#define mmDP6_DP_HBR2_EYE_PATTERN 0x54ac
+#define mmDP7_DP_HBR2_EYE_PATTERN 0x56ac
+#define mmDP8_DP_HBR2_EYE_PATTERN 0x57ac
+#define mmDP_VID_MSA_VBID 0x4aad
+#define mmDP0_DP_VID_MSA_VBID 0x4aad
+#define mmDP1_DP_VID_MSA_VBID 0x4bad
+#define mmDP2_DP_VID_MSA_VBID 0x4cad
+#define mmDP3_DP_VID_MSA_VBID 0x4dad
+#define mmDP4_DP_VID_MSA_VBID 0x4ead
+#define mmDP5_DP_VID_MSA_VBID 0x4fad
+#define mmDP6_DP_VID_MSA_VBID 0x54ad
+#define mmDP7_DP_VID_MSA_VBID 0x56ad
+#define mmDP8_DP_VID_MSA_VBID 0x57ad
+#define mmDP_VID_INTERRUPT_CNTL 0x4aae
+#define mmDP0_DP_VID_INTERRUPT_CNTL 0x4aae
+#define mmDP1_DP_VID_INTERRUPT_CNTL 0x4bae
+#define mmDP2_DP_VID_INTERRUPT_CNTL 0x4cae
+#define mmDP3_DP_VID_INTERRUPT_CNTL 0x4dae
+#define mmDP4_DP_VID_INTERRUPT_CNTL 0x4eae
+#define mmDP5_DP_VID_INTERRUPT_CNTL 0x4fae
+#define mmDP6_DP_VID_INTERRUPT_CNTL 0x54ae
+#define mmDP7_DP_VID_INTERRUPT_CNTL 0x56ae
+#define mmDP8_DP_VID_INTERRUPT_CNTL 0x57ae
+#define mmDP_DPHY_CNTL 0x4aaf
+#define mmDP0_DP_DPHY_CNTL 0x4aaf
+#define mmDP1_DP_DPHY_CNTL 0x4baf
+#define mmDP2_DP_DPHY_CNTL 0x4caf
+#define mmDP3_DP_DPHY_CNTL 0x4daf
+#define mmDP4_DP_DPHY_CNTL 0x4eaf
+#define mmDP5_DP_DPHY_CNTL 0x4faf
+#define mmDP6_DP_DPHY_CNTL 0x54af
+#define mmDP7_DP_DPHY_CNTL 0x56af
+#define mmDP8_DP_DPHY_CNTL 0x57af
+#define mmDP_DPHY_TRAINING_PATTERN_SEL 0x4ab0
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x4ab0
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x4bb0
+#define mmDP2_DP_DPHY_TRAINING_PATTERN_SEL 0x4cb0
+#define mmDP3_DP_DPHY_TRAINING_PATTERN_SEL 0x4db0
+#define mmDP4_DP_DPHY_TRAINING_PATTERN_SEL 0x4eb0
+#define mmDP5_DP_DPHY_TRAINING_PATTERN_SEL 0x4fb0
+#define mmDP6_DP_DPHY_TRAINING_PATTERN_SEL 0x54b0
+#define mmDP7_DP_DPHY_TRAINING_PATTERN_SEL 0x56b0
+#define mmDP8_DP_DPHY_TRAINING_PATTERN_SEL 0x57b0
+#define mmDP_DPHY_SYM0 0x4ab1
+#define mmDP0_DP_DPHY_SYM0 0x4ab1
+#define mmDP1_DP_DPHY_SYM0 0x4bb1
+#define mmDP2_DP_DPHY_SYM0 0x4cb1
+#define mmDP3_DP_DPHY_SYM0 0x4db1
+#define mmDP4_DP_DPHY_SYM0 0x4eb1
+#define mmDP5_DP_DPHY_SYM0 0x4fb1
+#define mmDP6_DP_DPHY_SYM0 0x54b1
+#define mmDP7_DP_DPHY_SYM0 0x56b1
+#define mmDP8_DP_DPHY_SYM0 0x57b1
+#define mmDP_DPHY_SYM1 0x4ab2
+#define mmDP0_DP_DPHY_SYM1 0x4ab2
+#define mmDP1_DP_DPHY_SYM1 0x4bb2
+#define mmDP2_DP_DPHY_SYM1 0x4cb2
+#define mmDP3_DP_DPHY_SYM1 0x4db2
+#define mmDP4_DP_DPHY_SYM1 0x4eb2
+#define mmDP5_DP_DPHY_SYM1 0x4fb2
+#define mmDP6_DP_DPHY_SYM1 0x54b2
+#define mmDP7_DP_DPHY_SYM1 0x56b2
+#define mmDP8_DP_DPHY_SYM1 0x57b2
+#define mmDP_DPHY_SYM2 0x4ab3
+#define mmDP0_DP_DPHY_SYM2 0x4ab3
+#define mmDP1_DP_DPHY_SYM2 0x4bb3
+#define mmDP2_DP_DPHY_SYM2 0x4cb3
+#define mmDP3_DP_DPHY_SYM2 0x4db3
+#define mmDP4_DP_DPHY_SYM2 0x4eb3
+#define mmDP5_DP_DPHY_SYM2 0x4fb3
+#define mmDP6_DP_DPHY_SYM2 0x54b3
+#define mmDP7_DP_DPHY_SYM2 0x56b3
+#define mmDP8_DP_DPHY_SYM2 0x57b3
+#define mmDP_DPHY_8B10B_CNTL 0x4ab4
+#define mmDP0_DP_DPHY_8B10B_CNTL 0x4ab4
+#define mmDP1_DP_DPHY_8B10B_CNTL 0x4bb4
+#define mmDP2_DP_DPHY_8B10B_CNTL 0x4cb4
+#define mmDP3_DP_DPHY_8B10B_CNTL 0x4db4
+#define mmDP4_DP_DPHY_8B10B_CNTL 0x4eb4
+#define mmDP5_DP_DPHY_8B10B_CNTL 0x4fb4
+#define mmDP6_DP_DPHY_8B10B_CNTL 0x54b4
+#define mmDP7_DP_DPHY_8B10B_CNTL 0x56b4
+#define mmDP8_DP_DPHY_8B10B_CNTL 0x57b4
+#define mmDP_DPHY_PRBS_CNTL 0x4ab5
+#define mmDP0_DP_DPHY_PRBS_CNTL 0x4ab5
+#define mmDP1_DP_DPHY_PRBS_CNTL 0x4bb5
+#define mmDP2_DP_DPHY_PRBS_CNTL 0x4cb5
+#define mmDP3_DP_DPHY_PRBS_CNTL 0x4db5
+#define mmDP4_DP_DPHY_PRBS_CNTL 0x4eb5
+#define mmDP5_DP_DPHY_PRBS_CNTL 0x4fb5
+#define mmDP6_DP_DPHY_PRBS_CNTL 0x54b5
+#define mmDP7_DP_DPHY_PRBS_CNTL 0x56b5
+#define mmDP8_DP_DPHY_PRBS_CNTL 0x57b5
+#define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4adc
+#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4adc
+#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4bdc
+#define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4cdc
+#define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4ddc
+#define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4edc
+#define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4fdc
+#define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54dc
+#define mmDP7_DP_DPHY_BS_SR_SWAP_CNTL 0x56dc
+#define mmDP8_DP_DPHY_BS_SR_SWAP_CNTL 0x57dc
+#define mmDP_DPHY_CRC_EN 0x4ab7
+#define mmDP0_DP_DPHY_CRC_EN 0x4ab7
+#define mmDP1_DP_DPHY_CRC_EN 0x4bb7
+#define mmDP2_DP_DPHY_CRC_EN 0x4cb7
+#define mmDP3_DP_DPHY_CRC_EN 0x4db7
+#define mmDP4_DP_DPHY_CRC_EN 0x4eb7
+#define mmDP5_DP_DPHY_CRC_EN 0x4fb7
+#define mmDP6_DP_DPHY_CRC_EN 0x54b7
+#define mmDP7_DP_DPHY_CRC_EN 0x56b7
+#define mmDP8_DP_DPHY_CRC_EN 0x57b7
+#define mmDP_DPHY_CRC_CNTL 0x4ab8
+#define mmDP0_DP_DPHY_CRC_CNTL 0x4ab8
+#define mmDP1_DP_DPHY_CRC_CNTL 0x4bb8
+#define mmDP2_DP_DPHY_CRC_CNTL 0x4cb8
+#define mmDP3_DP_DPHY_CRC_CNTL 0x4db8
+#define mmDP4_DP_DPHY_CRC_CNTL 0x4eb8
+#define mmDP5_DP_DPHY_CRC_CNTL 0x4fb8
+#define mmDP6_DP_DPHY_CRC_CNTL 0x54b8
+#define mmDP7_DP_DPHY_CRC_CNTL 0x56b8
+#define mmDP8_DP_DPHY_CRC_CNTL 0x57b8
+#define mmDP_DPHY_CRC_RESULT 0x4ab9
+#define mmDP0_DP_DPHY_CRC_RESULT 0x4ab9
+#define mmDP1_DP_DPHY_CRC_RESULT 0x4bb9
+#define mmDP2_DP_DPHY_CRC_RESULT 0x4cb9
+#define mmDP3_DP_DPHY_CRC_RESULT 0x4db9
+#define mmDP4_DP_DPHY_CRC_RESULT 0x4eb9
+#define mmDP5_DP_DPHY_CRC_RESULT 0x4fb9
+#define mmDP6_DP_DPHY_CRC_RESULT 0x54b9
+#define mmDP7_DP_DPHY_CRC_RESULT 0x56b9
+#define mmDP8_DP_DPHY_CRC_RESULT 0x57b9
+#define mmDP_DPHY_CRC_MST_CNTL 0x4aba
+#define mmDP0_DP_DPHY_CRC_MST_CNTL 0x4aba
+#define mmDP1_DP_DPHY_CRC_MST_CNTL 0x4bba
+#define mmDP2_DP_DPHY_CRC_MST_CNTL 0x4cba
+#define mmDP3_DP_DPHY_CRC_MST_CNTL 0x4dba
+#define mmDP4_DP_DPHY_CRC_MST_CNTL 0x4eba
+#define mmDP5_DP_DPHY_CRC_MST_CNTL 0x4fba
+#define mmDP6_DP_DPHY_CRC_MST_CNTL 0x54ba
+#define mmDP7_DP_DPHY_CRC_MST_CNTL 0x56ba
+#define mmDP8_DP_DPHY_CRC_MST_CNTL 0x57ba
+#define mmDP_DPHY_CRC_MST_STATUS 0x4abb
+#define mmDP0_DP_DPHY_CRC_MST_STATUS 0x4abb
+#define mmDP1_DP_DPHY_CRC_MST_STATUS 0x4bbb
+#define mmDP2_DP_DPHY_CRC_MST_STATUS 0x4cbb
+#define mmDP3_DP_DPHY_CRC_MST_STATUS 0x4dbb
+#define mmDP4_DP_DPHY_CRC_MST_STATUS 0x4ebb
+#define mmDP5_DP_DPHY_CRC_MST_STATUS 0x4fbb
+#define mmDP6_DP_DPHY_CRC_MST_STATUS 0x54bb
+#define mmDP7_DP_DPHY_CRC_MST_STATUS 0x56bb
+#define mmDP8_DP_DPHY_CRC_MST_STATUS 0x57bb
+#define mmDP_DPHY_FAST_TRAINING 0x4abc
+#define mmDP0_DP_DPHY_FAST_TRAINING 0x4abc
+#define mmDP1_DP_DPHY_FAST_TRAINING 0x4bbc
+#define mmDP2_DP_DPHY_FAST_TRAINING 0x4cbc
+#define mmDP3_DP_DPHY_FAST_TRAINING 0x4dbc
+#define mmDP4_DP_DPHY_FAST_TRAINING 0x4ebc
+#define mmDP5_DP_DPHY_FAST_TRAINING 0x4fbc
+#define mmDP6_DP_DPHY_FAST_TRAINING 0x54bc
+#define mmDP7_DP_DPHY_FAST_TRAINING 0x56bc
+#define mmDP8_DP_DPHY_FAST_TRAINING 0x57bc
+#define mmDP_DPHY_FAST_TRAINING_STATUS 0x4abd
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS 0x4abd
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS 0x4bbd
+#define mmDP2_DP_DPHY_FAST_TRAINING_STATUS 0x4cbd
+#define mmDP3_DP_DPHY_FAST_TRAINING_STATUS 0x4dbd
+#define mmDP4_DP_DPHY_FAST_TRAINING_STATUS 0x4ebd
+#define mmDP5_DP_DPHY_FAST_TRAINING_STATUS 0x4fbd
+#define mmDP6_DP_DPHY_FAST_TRAINING_STATUS 0x54bd
+#define mmDP7_DP_DPHY_FAST_TRAINING_STATUS 0x56bd
+#define mmDP8_DP_DPHY_FAST_TRAINING_STATUS 0x57bd
+#define mmDP_DPHY_HBR2_PATTERN_CONTROL 0x4add
+#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL 0x4add
+#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL 0x4bdd
+#define mmDP2_DP_DPHY_HBR2_PATTERN_CONTROL 0x4cdd
+#define mmDP3_DP_DPHY_HBR2_PATTERN_CONTROL 0x4ddd
+#define mmDP4_DP_DPHY_HBR2_PATTERN_CONTROL 0x4edd
+#define mmDP5_DP_DPHY_HBR2_PATTERN_CONTROL 0x4fdd
+#define mmDP6_DP_DPHY_HBR2_PATTERN_CONTROL 0x54dd
+#define mmDP7_DP_DPHY_HBR2_PATTERN_CONTROL 0x56dd
+#define mmDP8_DP_DPHY_HBR2_PATTERN_CONTROL 0x57dd
+#define mmDP_MSA_V_TIMING_OVERRIDE1 0x4abe
+#define mmDP0_DP_MSA_V_TIMING_OVERRIDE1 0x4abe
+#define mmDP1_DP_MSA_V_TIMING_OVERRIDE1 0x4bbe
+#define mmDP2_DP_MSA_V_TIMING_OVERRIDE1 0x4cbe
+#define mmDP3_DP_MSA_V_TIMING_OVERRIDE1 0x4dbe
+#define mmDP4_DP_MSA_V_TIMING_OVERRIDE1 0x4ebe
+#define mmDP5_DP_MSA_V_TIMING_OVERRIDE1 0x4fbe
+#define mmDP6_DP_MSA_V_TIMING_OVERRIDE1 0x54be
+#define mmDP7_DP_MSA_V_TIMING_OVERRIDE1 0x56be
+#define mmDP8_DP_MSA_V_TIMING_OVERRIDE1 0x57be
+#define mmDP_MSA_V_TIMING_OVERRIDE2 0x4abf
+#define mmDP0_DP_MSA_V_TIMING_OVERRIDE2 0x4abf
+#define mmDP1_DP_MSA_V_TIMING_OVERRIDE2 0x4bbf
+#define mmDP2_DP_MSA_V_TIMING_OVERRIDE2 0x4cbf
+#define mmDP3_DP_MSA_V_TIMING_OVERRIDE2 0x4dbf
+#define mmDP4_DP_MSA_V_TIMING_OVERRIDE2 0x4ebf
+#define mmDP5_DP_MSA_V_TIMING_OVERRIDE2 0x4fbf
+#define mmDP6_DP_MSA_V_TIMING_OVERRIDE2 0x54bf
+#define mmDP7_DP_MSA_V_TIMING_OVERRIDE2 0x56bf
+#define mmDP8_DP_MSA_V_TIMING_OVERRIDE2 0x57bf
+#define mmDP_SEC_CNTL 0x4ac3
+#define mmDP0_DP_SEC_CNTL 0x4ac3
+#define mmDP1_DP_SEC_CNTL 0x4bc3
+#define mmDP2_DP_SEC_CNTL 0x4cc3
+#define mmDP3_DP_SEC_CNTL 0x4dc3
+#define mmDP4_DP_SEC_CNTL 0x4ec3
+#define mmDP5_DP_SEC_CNTL 0x4fc3
+#define mmDP6_DP_SEC_CNTL 0x54c3
+#define mmDP7_DP_SEC_CNTL 0x56c3
+#define mmDP8_DP_SEC_CNTL 0x57c3
+#define mmDP_SEC_CNTL1 0x4ac4
+#define mmDP0_DP_SEC_CNTL1 0x4ac4
+#define mmDP1_DP_SEC_CNTL1 0x4bc4
+#define mmDP2_DP_SEC_CNTL1 0x4cc4
+#define mmDP3_DP_SEC_CNTL1 0x4dc4
+#define mmDP4_DP_SEC_CNTL1 0x4ec4
+#define mmDP5_DP_SEC_CNTL1 0x4fc4
+#define mmDP6_DP_SEC_CNTL1 0x54c4
+#define mmDP7_DP_SEC_CNTL1 0x56c4
+#define mmDP8_DP_SEC_CNTL1 0x57c4
+#define mmDP_SEC_FRAMING1 0x4ac5
+#define mmDP0_DP_SEC_FRAMING1 0x4ac5
+#define mmDP1_DP_SEC_FRAMING1 0x4bc5
+#define mmDP2_DP_SEC_FRAMING1 0x4cc5
+#define mmDP3_DP_SEC_FRAMING1 0x4dc5
+#define mmDP4_DP_SEC_FRAMING1 0x4ec5
+#define mmDP5_DP_SEC_FRAMING1 0x4fc5
+#define mmDP6_DP_SEC_FRAMING1 0x54c5
+#define mmDP7_DP_SEC_FRAMING1 0x56c5
+#define mmDP8_DP_SEC_FRAMING1 0x57c5
+#define mmDP_SEC_FRAMING2 0x4ac6
+#define mmDP0_DP_SEC_FRAMING2 0x4ac6
+#define mmDP1_DP_SEC_FRAMING2 0x4bc6
+#define mmDP2_DP_SEC_FRAMING2 0x4cc6
+#define mmDP3_DP_SEC_FRAMING2 0x4dc6
+#define mmDP4_DP_SEC_FRAMING2 0x4ec6
+#define mmDP5_DP_SEC_FRAMING2 0x4fc6
+#define mmDP6_DP_SEC_FRAMING2 0x54c6
+#define mmDP7_DP_SEC_FRAMING2 0x56c6
+#define mmDP8_DP_SEC_FRAMING2 0x57c6
+#define mmDP_SEC_FRAMING3 0x4ac7
+#define mmDP0_DP_SEC_FRAMING3 0x4ac7
+#define mmDP1_DP_SEC_FRAMING3 0x4bc7
+#define mmDP2_DP_SEC_FRAMING3 0x4cc7
+#define mmDP3_DP_SEC_FRAMING3 0x4dc7
+#define mmDP4_DP_SEC_FRAMING3 0x4ec7
+#define mmDP5_DP_SEC_FRAMING3 0x4fc7
+#define mmDP6_DP_SEC_FRAMING3 0x54c7
+#define mmDP7_DP_SEC_FRAMING3 0x56c7
+#define mmDP8_DP_SEC_FRAMING3 0x57c7
+#define mmDP_SEC_FRAMING4 0x4ac8
+#define mmDP0_DP_SEC_FRAMING4 0x4ac8
+#define mmDP1_DP_SEC_FRAMING4 0x4bc8
+#define mmDP2_DP_SEC_FRAMING4 0x4cc8
+#define mmDP3_DP_SEC_FRAMING4 0x4dc8
+#define mmDP4_DP_SEC_FRAMING4 0x4ec8
+#define mmDP5_DP_SEC_FRAMING4 0x4fc8
+#define mmDP6_DP_SEC_FRAMING4 0x54c8
+#define mmDP7_DP_SEC_FRAMING4 0x56c8
+#define mmDP8_DP_SEC_FRAMING4 0x57c8
+#define mmDP_SEC_AUD_N 0x4ac9
+#define mmDP0_DP_SEC_AUD_N 0x4ac9
+#define mmDP1_DP_SEC_AUD_N 0x4bc9
+#define mmDP2_DP_SEC_AUD_N 0x4cc9
+#define mmDP3_DP_SEC_AUD_N 0x4dc9
+#define mmDP4_DP_SEC_AUD_N 0x4ec9
+#define mmDP5_DP_SEC_AUD_N 0x4fc9
+#define mmDP6_DP_SEC_AUD_N 0x54c9
+#define mmDP7_DP_SEC_AUD_N 0x56c9
+#define mmDP8_DP_SEC_AUD_N 0x57c9
+#define mmDP_SEC_AUD_N_READBACK 0x4aca
+#define mmDP0_DP_SEC_AUD_N_READBACK 0x4aca
+#define mmDP1_DP_SEC_AUD_N_READBACK 0x4bca
+#define mmDP2_DP_SEC_AUD_N_READBACK 0x4cca
+#define mmDP3_DP_SEC_AUD_N_READBACK 0x4dca
+#define mmDP4_DP_SEC_AUD_N_READBACK 0x4eca
+#define mmDP5_DP_SEC_AUD_N_READBACK 0x4fca
+#define mmDP6_DP_SEC_AUD_N_READBACK 0x54ca
+#define mmDP7_DP_SEC_AUD_N_READBACK 0x56ca
+#define mmDP8_DP_SEC_AUD_N_READBACK 0x57ca
+#define mmDP_SEC_AUD_M 0x4acb
+#define mmDP0_DP_SEC_AUD_M 0x4acb
+#define mmDP1_DP_SEC_AUD_M 0x4bcb
+#define mmDP2_DP_SEC_AUD_M 0x4ccb
+#define mmDP3_DP_SEC_AUD_M 0x4dcb
+#define mmDP4_DP_SEC_AUD_M 0x4ecb
+#define mmDP5_DP_SEC_AUD_M 0x4fcb
+#define mmDP6_DP_SEC_AUD_M 0x54cb
+#define mmDP7_DP_SEC_AUD_M 0x56cb
+#define mmDP8_DP_SEC_AUD_M 0x57cb
+#define mmDP_SEC_AUD_M_READBACK 0x4acc
+#define mmDP0_DP_SEC_AUD_M_READBACK 0x4acc
+#define mmDP1_DP_SEC_AUD_M_READBACK 0x4bcc
+#define mmDP2_DP_SEC_AUD_M_READBACK 0x4ccc
+#define mmDP3_DP_SEC_AUD_M_READBACK 0x4dcc
+#define mmDP4_DP_SEC_AUD_M_READBACK 0x4ecc
+#define mmDP5_DP_SEC_AUD_M_READBACK 0x4fcc
+#define mmDP6_DP_SEC_AUD_M_READBACK 0x54cc
+#define mmDP7_DP_SEC_AUD_M_READBACK 0x56cc
+#define mmDP8_DP_SEC_AUD_M_READBACK 0x57cc
+#define mmDP_SEC_TIMESTAMP 0x4acd
+#define mmDP0_DP_SEC_TIMESTAMP 0x4acd
+#define mmDP1_DP_SEC_TIMESTAMP 0x4bcd
+#define mmDP2_DP_SEC_TIMESTAMP 0x4ccd
+#define mmDP3_DP_SEC_TIMESTAMP 0x4dcd
+#define mmDP4_DP_SEC_TIMESTAMP 0x4ecd
+#define mmDP5_DP_SEC_TIMESTAMP 0x4fcd
+#define mmDP6_DP_SEC_TIMESTAMP 0x54cd
+#define mmDP7_DP_SEC_TIMESTAMP 0x56cd
+#define mmDP8_DP_SEC_TIMESTAMP 0x57cd
+#define mmDP_SEC_PACKET_CNTL 0x4ace
+#define mmDP0_DP_SEC_PACKET_CNTL 0x4ace
+#define mmDP1_DP_SEC_PACKET_CNTL 0x4bce
+#define mmDP2_DP_SEC_PACKET_CNTL 0x4cce
+#define mmDP3_DP_SEC_PACKET_CNTL 0x4dce
+#define mmDP4_DP_SEC_PACKET_CNTL 0x4ece
+#define mmDP5_DP_SEC_PACKET_CNTL 0x4fce
+#define mmDP6_DP_SEC_PACKET_CNTL 0x54ce
+#define mmDP7_DP_SEC_PACKET_CNTL 0x56ce
+#define mmDP8_DP_SEC_PACKET_CNTL 0x57ce
+#define mmDP_MSE_RATE_CNTL 0x4acf
+#define mmDP0_DP_MSE_RATE_CNTL 0x4acf
+#define mmDP1_DP_MSE_RATE_CNTL 0x4bcf
+#define mmDP2_DP_MSE_RATE_CNTL 0x4ccf
+#define mmDP3_DP_MSE_RATE_CNTL 0x4dcf
+#define mmDP4_DP_MSE_RATE_CNTL 0x4ecf
+#define mmDP5_DP_MSE_RATE_CNTL 0x4fcf
+#define mmDP6_DP_MSE_RATE_CNTL 0x54cf
+#define mmDP7_DP_MSE_RATE_CNTL 0x56cf
+#define mmDP8_DP_MSE_RATE_CNTL 0x57cf
+#define mmDP_MSE_RATE_UPDATE 0x4ad1
+#define mmDP0_DP_MSE_RATE_UPDATE 0x4ad1
+#define mmDP1_DP_MSE_RATE_UPDATE 0x4bd1
+#define mmDP2_DP_MSE_RATE_UPDATE 0x4cd1
+#define mmDP3_DP_MSE_RATE_UPDATE 0x4dd1
+#define mmDP4_DP_MSE_RATE_UPDATE 0x4ed1
+#define mmDP5_DP_MSE_RATE_UPDATE 0x4fd1
+#define mmDP6_DP_MSE_RATE_UPDATE 0x54d1
+#define mmDP7_DP_MSE_RATE_UPDATE 0x56d1
+#define mmDP8_DP_MSE_RATE_UPDATE 0x57d1
+#define mmDP_MSE_SAT0 0x4ad2
+#define mmDP0_DP_MSE_SAT0 0x4ad2
+#define mmDP1_DP_MSE_SAT0 0x4bd2
+#define mmDP2_DP_MSE_SAT0 0x4cd2
+#define mmDP3_DP_MSE_SAT0 0x4dd2
+#define mmDP4_DP_MSE_SAT0 0x4ed2
+#define mmDP5_DP_MSE_SAT0 0x4fd2
+#define mmDP6_DP_MSE_SAT0 0x54d2
+#define mmDP7_DP_MSE_SAT0 0x56d2
+#define mmDP8_DP_MSE_SAT0 0x57d2
+#define mmDP_MSE_SAT1 0x4ad3
+#define mmDP0_DP_MSE_SAT1 0x4ad3
+#define mmDP1_DP_MSE_SAT1 0x4bd3
+#define mmDP2_DP_MSE_SAT1 0x4cd3
+#define mmDP3_DP_MSE_SAT1 0x4dd3
+#define mmDP4_DP_MSE_SAT1 0x4ed3
+#define mmDP5_DP_MSE_SAT1 0x4fd3
+#define mmDP6_DP_MSE_SAT1 0x54d3
+#define mmDP7_DP_MSE_SAT1 0x56d3
+#define mmDP8_DP_MSE_SAT1 0x57d3
+#define mmDP_MSE_SAT2 0x4ad4
+#define mmDP0_DP_MSE_SAT2 0x4ad4
+#define mmDP1_DP_MSE_SAT2 0x4bd4
+#define mmDP2_DP_MSE_SAT2 0x4cd4
+#define mmDP3_DP_MSE_SAT2 0x4dd4
+#define mmDP4_DP_MSE_SAT2 0x4ed4
+#define mmDP5_DP_MSE_SAT2 0x4fd4
+#define mmDP6_DP_MSE_SAT2 0x54d4
+#define mmDP7_DP_MSE_SAT2 0x56d4
+#define mmDP8_DP_MSE_SAT2 0x57d4
+#define mmDP_MSE_SAT_UPDATE 0x4ad5
+#define mmDP0_DP_MSE_SAT_UPDATE 0x4ad5
+#define mmDP1_DP_MSE_SAT_UPDATE 0x4bd5
+#define mmDP2_DP_MSE_SAT_UPDATE 0x4cd5
+#define mmDP3_DP_MSE_SAT_UPDATE 0x4dd5
+#define mmDP4_DP_MSE_SAT_UPDATE 0x4ed5
+#define mmDP5_DP_MSE_SAT_UPDATE 0x4fd5
+#define mmDP6_DP_MSE_SAT_UPDATE 0x54d5
+#define mmDP7_DP_MSE_SAT_UPDATE 0x56d5
+#define mmDP8_DP_MSE_SAT_UPDATE 0x57d5
+#define mmDP_MSE_LINK_TIMING 0x4ad6
+#define mmDP0_DP_MSE_LINK_TIMING 0x4ad6
+#define mmDP1_DP_MSE_LINK_TIMING 0x4bd6
+#define mmDP2_DP_MSE_LINK_TIMING 0x4cd6
+#define mmDP3_DP_MSE_LINK_TIMING 0x4dd6
+#define mmDP4_DP_MSE_LINK_TIMING 0x4ed6
+#define mmDP5_DP_MSE_LINK_TIMING 0x4fd6
+#define mmDP6_DP_MSE_LINK_TIMING 0x54d6
+#define mmDP7_DP_MSE_LINK_TIMING 0x56d6
+#define mmDP8_DP_MSE_LINK_TIMING 0x57d6
+#define mmDP_MSE_MISC_CNTL 0x4ad7
+#define mmDP0_DP_MSE_MISC_CNTL 0x4ad7
+#define mmDP1_DP_MSE_MISC_CNTL 0x4bd7
+#define mmDP2_DP_MSE_MISC_CNTL 0x4cd7
+#define mmDP3_DP_MSE_MISC_CNTL 0x4dd7
+#define mmDP4_DP_MSE_MISC_CNTL 0x4ed7
+#define mmDP5_DP_MSE_MISC_CNTL 0x4fd7
+#define mmDP6_DP_MSE_MISC_CNTL 0x54d7
+#define mmDP7_DP_MSE_MISC_CNTL 0x56d7
+#define mmDP8_DP_MSE_MISC_CNTL 0x57d7
+#define mmDP_MSE_SAT0_STATUS 0x4adf
+#define mmDP0_DP_MSE_SAT0_STATUS 0x4adf
+#define mmDP1_DP_MSE_SAT0_STATUS 0x4bdf
+#define mmDP2_DP_MSE_SAT0_STATUS 0x4cdf
+#define mmDP3_DP_MSE_SAT0_STATUS 0x4ddf
+#define mmDP4_DP_MSE_SAT0_STATUS 0x4edf
+#define mmDP5_DP_MSE_SAT0_STATUS 0x4fdf
+#define mmDP6_DP_MSE_SAT0_STATUS 0x54df
+#define mmDP7_DP_MSE_SAT0_STATUS 0x56df
+#define mmDP8_DP_MSE_SAT0_STATUS 0x57df
+#define mmDP_MSE_SAT1_STATUS 0x4ae0
+#define mmDP0_DP_MSE_SAT1_STATUS 0x4ae0
+#define mmDP1_DP_MSE_SAT1_STATUS 0x4be0
+#define mmDP2_DP_MSE_SAT1_STATUS 0x4ce0
+#define mmDP3_DP_MSE_SAT1_STATUS 0x4de0
+#define mmDP4_DP_MSE_SAT1_STATUS 0x4ee0
+#define mmDP5_DP_MSE_SAT1_STATUS 0x4fe0
+#define mmDP6_DP_MSE_SAT1_STATUS 0x54e0
+#define mmDP7_DP_MSE_SAT1_STATUS 0x56e0
+#define mmDP8_DP_MSE_SAT1_STATUS 0x57e0
+#define mmDP_MSE_SAT2_STATUS 0x4ae1
+#define mmDP0_DP_MSE_SAT2_STATUS 0x4ae1
+#define mmDP1_DP_MSE_SAT2_STATUS 0x4be1
+#define mmDP2_DP_MSE_SAT2_STATUS 0x4ce1
+#define mmDP3_DP_MSE_SAT2_STATUS 0x4de1
+#define mmDP4_DP_MSE_SAT2_STATUS 0x4ee1
+#define mmDP5_DP_MSE_SAT2_STATUS 0x4fe1
+#define mmDP6_DP_MSE_SAT2_STATUS 0x54e1
+#define mmDP7_DP_MSE_SAT2_STATUS 0x56e1
+#define mmDP8_DP_MSE_SAT2_STATUS 0x57e1
+#define mmDP_TEST_DEBUG_INDEX 0x4ad8
+#define mmDP0_DP_TEST_DEBUG_INDEX 0x4ad8
+#define mmDP1_DP_TEST_DEBUG_INDEX 0x4bd8
+#define mmDP2_DP_TEST_DEBUG_INDEX 0x4cd8
+#define mmDP3_DP_TEST_DEBUG_INDEX 0x4dd8
+#define mmDP4_DP_TEST_DEBUG_INDEX 0x4ed8
+#define mmDP5_DP_TEST_DEBUG_INDEX 0x4fd8
+#define mmDP6_DP_TEST_DEBUG_INDEX 0x54d8
+#define mmDP7_DP_TEST_DEBUG_INDEX 0x56d8
+#define mmDP8_DP_TEST_DEBUG_INDEX 0x57d8
+#define mmDP_TEST_DEBUG_DATA 0x4ad9
+#define mmDP0_DP_TEST_DEBUG_DATA 0x4ad9
+#define mmDP1_DP_TEST_DEBUG_DATA 0x4bd9
+#define mmDP2_DP_TEST_DEBUG_DATA 0x4cd9
+#define mmDP3_DP_TEST_DEBUG_DATA 0x4dd9
+#define mmDP4_DP_TEST_DEBUG_DATA 0x4ed9
+#define mmDP5_DP_TEST_DEBUG_DATA 0x4fd9
+#define mmDP6_DP_TEST_DEBUG_DATA 0x54d9
+#define mmDP7_DP_TEST_DEBUG_DATA 0x56d9
+#define mmDP8_DP_TEST_DEBUG_DATA 0x57d9
+#define mmDP_FE_TEST_DEBUG_INDEX 0x4ada
+#define mmDP0_DP_FE_TEST_DEBUG_INDEX 0x4ada
+#define mmDP1_DP_FE_TEST_DEBUG_INDEX 0x4bda
+#define mmDP2_DP_FE_TEST_DEBUG_INDEX 0x4cda
+#define mmDP3_DP_FE_TEST_DEBUG_INDEX 0x4dda
+#define mmDP4_DP_FE_TEST_DEBUG_INDEX 0x4eda
+#define mmDP5_DP_FE_TEST_DEBUG_INDEX 0x4fda
+#define mmDP6_DP_FE_TEST_DEBUG_INDEX 0x54da
+#define mmDP7_DP_FE_TEST_DEBUG_INDEX 0x56da
+#define mmDP8_DP_FE_TEST_DEBUG_INDEX 0x57da
+#define mmDP_FE_TEST_DEBUG_DATA 0x4adb
+#define mmDP0_DP_FE_TEST_DEBUG_DATA 0x4adb
+#define mmDP1_DP_FE_TEST_DEBUG_DATA 0x4bdb
+#define mmDP2_DP_FE_TEST_DEBUG_DATA 0x4cdb
+#define mmDP3_DP_FE_TEST_DEBUG_DATA 0x4ddb
+#define mmDP4_DP_FE_TEST_DEBUG_DATA 0x4edb
+#define mmDP5_DP_FE_TEST_DEBUG_DATA 0x4fdb
+#define mmDP6_DP_FE_TEST_DEBUG_DATA 0x54db
+#define mmDP7_DP_FE_TEST_DEBUG_DATA 0x56db
+#define mmDP8_DP_FE_TEST_DEBUG_DATA 0x57db
+#define mmAUX_CONTROL 0x5c00
+#define mmDP_AUX0_AUX_CONTROL 0x5c00
+#define mmDP_AUX1_AUX_CONTROL 0x5c1c
+#define mmDP_AUX2_AUX_CONTROL 0x5c38
+#define mmDP_AUX3_AUX_CONTROL 0x5c54
+#define mmDP_AUX4_AUX_CONTROL 0x5c70
+#define mmDP_AUX5_AUX_CONTROL 0x5c8c
+#define mmAUX_SW_CONTROL 0x5c01
+#define mmDP_AUX0_AUX_SW_CONTROL 0x5c01
+#define mmDP_AUX1_AUX_SW_CONTROL 0x5c1d
+#define mmDP_AUX2_AUX_SW_CONTROL 0x5c39
+#define mmDP_AUX3_AUX_SW_CONTROL 0x5c55
+#define mmDP_AUX4_AUX_SW_CONTROL 0x5c71
+#define mmDP_AUX5_AUX_SW_CONTROL 0x5c8d
+#define mmAUX_ARB_CONTROL 0x5c02
+#define mmDP_AUX0_AUX_ARB_CONTROL 0x5c02
+#define mmDP_AUX1_AUX_ARB_CONTROL 0x5c1e
+#define mmDP_AUX2_AUX_ARB_CONTROL 0x5c3a
+#define mmDP_AUX3_AUX_ARB_CONTROL 0x5c56
+#define mmDP_AUX4_AUX_ARB_CONTROL 0x5c72
+#define mmDP_AUX5_AUX_ARB_CONTROL 0x5c8e
+#define mmAUX_INTERRUPT_CONTROL 0x5c03
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL 0x5c03
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL 0x5c1f
+#define mmDP_AUX2_AUX_INTERRUPT_CONTROL 0x5c3b
+#define mmDP_AUX3_AUX_INTERRUPT_CONTROL 0x5c57
+#define mmDP_AUX4_AUX_INTERRUPT_CONTROL 0x5c73
+#define mmDP_AUX5_AUX_INTERRUPT_CONTROL 0x5c8f
+#define mmAUX_SW_STATUS 0x5c04
+#define mmDP_AUX0_AUX_SW_STATUS 0x5c04
+#define mmDP_AUX1_AUX_SW_STATUS 0x5c20
+#define mmDP_AUX2_AUX_SW_STATUS 0x5c3c
+#define mmDP_AUX3_AUX_SW_STATUS 0x5c58
+#define mmDP_AUX4_AUX_SW_STATUS 0x5c74
+#define mmDP_AUX5_AUX_SW_STATUS 0x5c90
+#define mmAUX_LS_STATUS 0x5c05
+#define mmDP_AUX0_AUX_LS_STATUS 0x5c05
+#define mmDP_AUX1_AUX_LS_STATUS 0x5c21
+#define mmDP_AUX2_AUX_LS_STATUS 0x5c3d
+#define mmDP_AUX3_AUX_LS_STATUS 0x5c59
+#define mmDP_AUX4_AUX_LS_STATUS 0x5c75
+#define mmDP_AUX5_AUX_LS_STATUS 0x5c91
+#define mmAUX_SW_DATA 0x5c06
+#define mmDP_AUX0_AUX_SW_DATA 0x5c06
+#define mmDP_AUX1_AUX_SW_DATA 0x5c22
+#define mmDP_AUX2_AUX_SW_DATA 0x5c3e
+#define mmDP_AUX3_AUX_SW_DATA 0x5c5a
+#define mmDP_AUX4_AUX_SW_DATA 0x5c76
+#define mmDP_AUX5_AUX_SW_DATA 0x5c92
+#define mmAUX_LS_DATA 0x5c07
+#define mmDP_AUX0_AUX_LS_DATA 0x5c07
+#define mmDP_AUX1_AUX_LS_DATA 0x5c23
+#define mmDP_AUX2_AUX_LS_DATA 0x5c3f
+#define mmDP_AUX3_AUX_LS_DATA 0x5c5b
+#define mmDP_AUX4_AUX_LS_DATA 0x5c77
+#define mmDP_AUX5_AUX_LS_DATA 0x5c93
+#define mmAUX_DPHY_TX_REF_CONTROL 0x5c08
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x5c08
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x5c24
+#define mmDP_AUX2_AUX_DPHY_TX_REF_CONTROL 0x5c40
+#define mmDP_AUX3_AUX_DPHY_TX_REF_CONTROL 0x5c5c
+#define mmDP_AUX4_AUX_DPHY_TX_REF_CONTROL 0x5c78
+#define mmDP_AUX5_AUX_DPHY_TX_REF_CONTROL 0x5c94
+#define mmAUX_DPHY_TX_CONTROL 0x5c09
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL 0x5c09
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL 0x5c25
+#define mmDP_AUX2_AUX_DPHY_TX_CONTROL 0x5c41
+#define mmDP_AUX3_AUX_DPHY_TX_CONTROL 0x5c5d
+#define mmDP_AUX4_AUX_DPHY_TX_CONTROL 0x5c79
+#define mmDP_AUX5_AUX_DPHY_TX_CONTROL 0x5c95
+#define mmAUX_DPHY_RX_CONTROL0 0x5c0a
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0 0x5c0a
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0 0x5c26
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL0 0x5c42
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL0 0x5c5e
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL0 0x5c7a
+#define mmDP_AUX5_AUX_DPHY_RX_CONTROL0 0x5c96
+#define mmAUX_DPHY_RX_CONTROL1 0x5c0b
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1 0x5c0b
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1 0x5c27
+#define mmDP_AUX2_AUX_DPHY_RX_CONTROL1 0x5c43
+#define mmDP_AUX3_AUX_DPHY_RX_CONTROL1 0x5c5f
+#define mmDP_AUX4_AUX_DPHY_RX_CONTROL1 0x5c7b
+#define mmDP_AUX5_AUX_DPHY_RX_CONTROL1 0x5c97
+#define mmAUX_DPHY_TX_STATUS 0x5c0c
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS 0x5c0c
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS 0x5c28
+#define mmDP_AUX2_AUX_DPHY_TX_STATUS 0x5c44
+#define mmDP_AUX3_AUX_DPHY_TX_STATUS 0x5c60
+#define mmDP_AUX4_AUX_DPHY_TX_STATUS 0x5c7c
+#define mmDP_AUX5_AUX_DPHY_TX_STATUS 0x5c98
+#define mmAUX_DPHY_RX_STATUS 0x5c0d
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS 0x5c0d
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS 0x5c29
+#define mmDP_AUX2_AUX_DPHY_RX_STATUS 0x5c45
+#define mmDP_AUX3_AUX_DPHY_RX_STATUS 0x5c61
+#define mmDP_AUX4_AUX_DPHY_RX_STATUS 0x5c7d
+#define mmDP_AUX5_AUX_DPHY_RX_STATUS 0x5c99
+#define mmAUX_GTC_SYNC_ERROR_CONTROL 0x5c0f
+#define mmDP_AUX0_AUX_GTC_SYNC_ERROR_CONTROL 0x5c0f
+#define mmDP_AUX1_AUX_GTC_SYNC_ERROR_CONTROL 0x5c2b
+#define mmDP_AUX2_AUX_GTC_SYNC_ERROR_CONTROL 0x5c47
+#define mmDP_AUX3_AUX_GTC_SYNC_ERROR_CONTROL 0x5c63
+#define mmDP_AUX4_AUX_GTC_SYNC_ERROR_CONTROL 0x5c7f
+#define mmDP_AUX5_AUX_GTC_SYNC_ERROR_CONTROL 0x5c9b
+#define mmAUX_GTC_SYNC_CONTROLLER_STATUS 0x5c10
+#define mmDP_AUX0_AUX_GTC_SYNC_CONTROLLER_STATUS 0x5c10
+#define mmDP_AUX1_AUX_GTC_SYNC_CONTROLLER_STATUS 0x5c2c
+#define mmDP_AUX2_AUX_GTC_SYNC_CONTROLLER_STATUS 0x5c48
+#define mmDP_AUX3_AUX_GTC_SYNC_CONTROLLER_STATUS 0x5c64
+#define mmDP_AUX4_AUX_GTC_SYNC_CONTROLLER_STATUS 0x5c80
+#define mmDP_AUX5_AUX_GTC_SYNC_CONTROLLER_STATUS 0x5c9c
+#define mmAUX_GTC_SYNC_STATUS 0x5c11
+#define mmDP_AUX0_AUX_GTC_SYNC_STATUS 0x5c11
+#define mmDP_AUX1_AUX_GTC_SYNC_STATUS 0x5c2d
+#define mmDP_AUX2_AUX_GTC_SYNC_STATUS 0x5c49
+#define mmDP_AUX3_AUX_GTC_SYNC_STATUS 0x5c65
+#define mmDP_AUX4_AUX_GTC_SYNC_STATUS 0x5c81
+#define mmDP_AUX5_AUX_GTC_SYNC_STATUS 0x5c9d
+#define mmAUX_TEST_DEBUG_INDEX 0x5c14
+#define mmDP_AUX0_AUX_TEST_DEBUG_INDEX 0x5c14
+#define mmDP_AUX1_AUX_TEST_DEBUG_INDEX 0x5c30
+#define mmDP_AUX2_AUX_TEST_DEBUG_INDEX 0x5c4c
+#define mmDP_AUX3_AUX_TEST_DEBUG_INDEX 0x5c68
+#define mmDP_AUX4_AUX_TEST_DEBUG_INDEX 0x5c84
+#define mmDP_AUX5_AUX_TEST_DEBUG_INDEX 0x5ca0
+#define mmAUX_TEST_DEBUG_DATA 0x5c15
+#define mmDP_AUX0_AUX_TEST_DEBUG_DATA 0x5c15
+#define mmDP_AUX1_AUX_TEST_DEBUG_DATA 0x5c31
+#define mmDP_AUX2_AUX_TEST_DEBUG_DATA 0x5c4d
+#define mmDP_AUX3_AUX_TEST_DEBUG_DATA 0x5c69
+#define mmDP_AUX4_AUX_TEST_DEBUG_DATA 0x5c85
+#define mmDP_AUX5_AUX_TEST_DEBUG_DATA 0x5ca1
+#define ixDP_AUX_DEBUG_A 0x10
+#define ixDP_AUX_DEBUG_B 0x11
+#define ixDP_AUX_DEBUG_C 0x12
+#define ixDP_AUX_DEBUG_D 0x13
+#define ixDP_AUX_DEBUG_E 0x14
+#define ixDP_AUX_DEBUG_F 0x15
+#define ixDP_AUX_DEBUG_G 0x16
+#define ixDP_AUX_DEBUG_H 0x17
+#define ixDP_AUX_DEBUG_I 0x18
+#define ixDP_AUX_DEBUG_J 0x19
+#define ixDP_AUX_DEBUG_K 0x1a
+#define ixDP_AUX_DEBUG_L 0x1b
+#define ixDP_AUX_DEBUG_M 0x1c
+#define ixDP_AUX_DEBUG_N 0x1d
+#define ixDP_AUX_DEBUG_O 0x1e
+#define ixDP_AUX_DEBUG_P 0x1f
+#define ixDP_AUX_DEBUG_Q 0x20
+#define mmDVO_ENABLE 0x16a0
+#define mmDVO_SOURCE_SELECT 0x16a1
+#define mmDVO_OUTPUT 0x16a2
+#define mmDVO_CONTROL 0x16a3
+#define mmDVO_CRC_EN 0x16a4
+#define mmDVO_CRC2_SIG_MASK 0x16a5
+#define mmDVO_CRC2_SIG_RESULT 0x16a6
+#define mmDVO_FIFO_ERROR_STATUS 0x16a7
+#define mmDVO_TEST_DEBUG_INDEX 0x16a8
+#define mmDVO_TEST_DEBUG_DATA 0x16a9
+#define mmFBC_CNTL 0x280
+#define mmFBC_IDLE_FORCE_CLEAR_MASK 0x282
+#define mmFBC_START_STOP_DELAY 0x283
+#define mmFBC_COMP_CNTL 0x284
+#define mmFBC_COMP_MODE 0x285
+#define mmFBC_DEBUG0 0x286
+#define mmFBC_DEBUG1 0x287
+#define mmFBC_DEBUG2 0x288
+#define mmFBC_IND_LUT0 0x289
+#define mmFBC_IND_LUT1 0x28a
+#define mmFBC_IND_LUT2 0x28b
+#define mmFBC_IND_LUT3 0x28c
+#define mmFBC_IND_LUT4 0x28d
+#define mmFBC_IND_LUT5 0x28e
+#define mmFBC_IND_LUT6 0x28f
+#define mmFBC_IND_LUT7 0x290
+#define mmFBC_IND_LUT8 0x291
+#define mmFBC_IND_LUT9 0x292
+#define mmFBC_IND_LUT10 0x293
+#define mmFBC_IND_LUT11 0x294
+#define mmFBC_IND_LUT12 0x295
+#define mmFBC_IND_LUT13 0x296
+#define mmFBC_IND_LUT14 0x297
+#define mmFBC_IND_LUT15 0x298
+#define mmFBC_CSM_REGION_OFFSET_01 0x299
+#define mmFBC_CSM_REGION_OFFSET_23 0x29a
+#define mmFBC_CLIENT_REGION_MASK 0x29b
+#define mmFBC_DEBUG_COMP 0x29c
+#define mmFBC_DEBUG_CSR 0x29d
+#define mmFBC_DEBUG_CSR_RDATA 0x29e
+#define mmFBC_DEBUG_CSR_WDATA 0x29f
+#define mmFBC_DEBUG_CSR_RDATA_HI 0x2a0
+#define mmFBC_DEBUG_CSR_WDATA_HI 0x2a1
+#define mmFBC_MISC 0x2a2
+#define mmFBC_STATUS 0x2a3
+#define mmFBC_ALPHA_CNTL 0x2a6
+#define mmFBC_ALPHA_RGB_OVERRIDE 0x2a7
+#define mmFBC_TEST_DEBUG_INDEX 0x2a4
+#define mmFBC_TEST_DEBUG_DATA 0x2a5
+#define mmFMT_CLAMP_COMPONENT_R 0x1be8
+#define mmFMT0_FMT_CLAMP_COMPONENT_R 0x1be8
+#define mmFMT1_FMT_CLAMP_COMPONENT_R 0x1de8
+#define mmFMT2_FMT_CLAMP_COMPONENT_R 0x1fe8
+#define mmFMT3_FMT_CLAMP_COMPONENT_R 0x41e8
+#define mmFMT4_FMT_CLAMP_COMPONENT_R 0x43e8
+#define mmFMT5_FMT_CLAMP_COMPONENT_R 0x45e8
+#define mmFMT_CLAMP_COMPONENT_G 0x1be9
+#define mmFMT0_FMT_CLAMP_COMPONENT_G 0x1be9
+#define mmFMT1_FMT_CLAMP_COMPONENT_G 0x1de9
+#define mmFMT2_FMT_CLAMP_COMPONENT_G 0x1fe9
+#define mmFMT3_FMT_CLAMP_COMPONENT_G 0x41e9
+#define mmFMT4_FMT_CLAMP_COMPONENT_G 0x43e9
+#define mmFMT5_FMT_CLAMP_COMPONENT_G 0x45e9
+#define mmFMT_CLAMP_COMPONENT_B 0x1bea
+#define mmFMT0_FMT_CLAMP_COMPONENT_B 0x1bea
+#define mmFMT1_FMT_CLAMP_COMPONENT_B 0x1dea
+#define mmFMT2_FMT_CLAMP_COMPONENT_B 0x1fea
+#define mmFMT3_FMT_CLAMP_COMPONENT_B 0x41ea
+#define mmFMT4_FMT_CLAMP_COMPONENT_B 0x43ea
+#define mmFMT5_FMT_CLAMP_COMPONENT_B 0x45ea
+#define mmFMT_DYNAMIC_EXP_CNTL 0x1bed
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL 0x1bed
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL 0x1ded
+#define mmFMT2_FMT_DYNAMIC_EXP_CNTL 0x1fed
+#define mmFMT3_FMT_DYNAMIC_EXP_CNTL 0x41ed
+#define mmFMT4_FMT_DYNAMIC_EXP_CNTL 0x43ed
+#define mmFMT5_FMT_DYNAMIC_EXP_CNTL 0x45ed
+#define mmFMT_CONTROL 0x1bee
+#define mmFMT0_FMT_CONTROL 0x1bee
+#define mmFMT1_FMT_CONTROL 0x1dee
+#define mmFMT2_FMT_CONTROL 0x1fee
+#define mmFMT3_FMT_CONTROL 0x41ee
+#define mmFMT4_FMT_CONTROL 0x43ee
+#define mmFMT5_FMT_CONTROL 0x45ee
+#define mmFMT_BIT_DEPTH_CONTROL 0x1bf2
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL 0x1bf2
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL 0x1df2
+#define mmFMT2_FMT_BIT_DEPTH_CONTROL 0x1ff2
+#define mmFMT3_FMT_BIT_DEPTH_CONTROL 0x41f2
+#define mmFMT4_FMT_BIT_DEPTH_CONTROL 0x43f2
+#define mmFMT5_FMT_BIT_DEPTH_CONTROL 0x45f2
+#define mmFMT_DITHER_RAND_R_SEED 0x1bf3
+#define mmFMT0_FMT_DITHER_RAND_R_SEED 0x1bf3
+#define mmFMT1_FMT_DITHER_RAND_R_SEED 0x1df3
+#define mmFMT2_FMT_DITHER_RAND_R_SEED 0x1ff3
+#define mmFMT3_FMT_DITHER_RAND_R_SEED 0x41f3
+#define mmFMT4_FMT_DITHER_RAND_R_SEED 0x43f3
+#define mmFMT5_FMT_DITHER_RAND_R_SEED 0x45f3
+#define mmFMT_DITHER_RAND_G_SEED 0x1bf4
+#define mmFMT0_FMT_DITHER_RAND_G_SEED 0x1bf4
+#define mmFMT1_FMT_DITHER_RAND_G_SEED 0x1df4
+#define mmFMT2_FMT_DITHER_RAND_G_SEED 0x1ff4
+#define mmFMT3_FMT_DITHER_RAND_G_SEED 0x41f4
+#define mmFMT4_FMT_DITHER_RAND_G_SEED 0x43f4
+#define mmFMT5_FMT_DITHER_RAND_G_SEED 0x45f4
+#define mmFMT_DITHER_RAND_B_SEED 0x1bf5
+#define mmFMT0_FMT_DITHER_RAND_B_SEED 0x1bf5
+#define mmFMT1_FMT_DITHER_RAND_B_SEED 0x1df5
+#define mmFMT2_FMT_DITHER_RAND_B_SEED 0x1ff5
+#define mmFMT3_FMT_DITHER_RAND_B_SEED 0x41f5
+#define mmFMT4_FMT_DITHER_RAND_B_SEED 0x43f5
+#define mmFMT5_FMT_DITHER_RAND_B_SEED 0x45f5
+#define mmFMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1bf6
+#define mmFMT0_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1bf6
+#define mmFMT1_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1df6
+#define mmFMT2_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x1ff6
+#define mmFMT3_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x41f6
+#define mmFMT4_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x43f6
+#define mmFMT5_FMT_TEMPORAL_DITHER_PATTERN_CONTROL 0x45f6
+#define mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1bf7
+#define mmFMT0_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1bf7
+#define mmFMT1_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1df7
+#define mmFMT2_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x1ff7
+#define mmFMT3_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x41f7
+#define mmFMT4_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x43f7
+#define mmFMT5_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX 0x45f7
+#define mmFMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1bf8
+#define mmFMT0_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1bf8
+#define mmFMT1_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1df8
+#define mmFMT2_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x1ff8
+#define mmFMT3_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x41f8
+#define mmFMT4_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x43f8
+#define mmFMT5_FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX 0x45f8
+#define mmFMT_CLAMP_CNTL 0x1bf9
+#define mmFMT0_FMT_CLAMP_CNTL 0x1bf9
+#define mmFMT1_FMT_CLAMP_CNTL 0x1df9
+#define mmFMT2_FMT_CLAMP_CNTL 0x1ff9
+#define mmFMT3_FMT_CLAMP_CNTL 0x41f9
+#define mmFMT4_FMT_CLAMP_CNTL 0x43f9
+#define mmFMT5_FMT_CLAMP_CNTL 0x45f9
+#define mmFMT_CRC_CNTL 0x1bfa
+#define mmFMT0_FMT_CRC_CNTL 0x1bfa
+#define mmFMT1_FMT_CRC_CNTL 0x1dfa
+#define mmFMT2_FMT_CRC_CNTL 0x1ffa
+#define mmFMT3_FMT_CRC_CNTL 0x41fa
+#define mmFMT4_FMT_CRC_CNTL 0x43fa
+#define mmFMT5_FMT_CRC_CNTL 0x45fa
+#define mmFMT_CRC_SIG_RED_GREEN_MASK 0x1bfb
+#define mmFMT0_FMT_CRC_SIG_RED_GREEN_MASK 0x1bfb
+#define mmFMT1_FMT_CRC_SIG_RED_GREEN_MASK 0x1dfb
+#define mmFMT2_FMT_CRC_SIG_RED_GREEN_MASK 0x1ffb
+#define mmFMT3_FMT_CRC_SIG_RED_GREEN_MASK 0x41fb
+#define mmFMT4_FMT_CRC_SIG_RED_GREEN_MASK 0x43fb
+#define mmFMT5_FMT_CRC_SIG_RED_GREEN_MASK 0x45fb
+#define mmFMT_CRC_SIG_BLUE_CONTROL_MASK 0x1bfc
+#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x1bfc
+#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x1dfc
+#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x1ffc
+#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x41fc
+#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x43fc
+#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL_MASK 0x45fc
+#define mmFMT_CRC_SIG_RED_GREEN 0x1bfd
+#define mmFMT0_FMT_CRC_SIG_RED_GREEN 0x1bfd
+#define mmFMT1_FMT_CRC_SIG_RED_GREEN 0x1dfd
+#define mmFMT2_FMT_CRC_SIG_RED_GREEN 0x1ffd
+#define mmFMT3_FMT_CRC_SIG_RED_GREEN 0x41fd
+#define mmFMT4_FMT_CRC_SIG_RED_GREEN 0x43fd
+#define mmFMT5_FMT_CRC_SIG_RED_GREEN 0x45fd
+#define mmFMT_CRC_SIG_BLUE_CONTROL 0x1bfe
+#define mmFMT0_FMT_CRC_SIG_BLUE_CONTROL 0x1bfe
+#define mmFMT1_FMT_CRC_SIG_BLUE_CONTROL 0x1dfe
+#define mmFMT2_FMT_CRC_SIG_BLUE_CONTROL 0x1ffe
+#define mmFMT3_FMT_CRC_SIG_BLUE_CONTROL 0x41fe
+#define mmFMT4_FMT_CRC_SIG_BLUE_CONTROL 0x43fe
+#define mmFMT5_FMT_CRC_SIG_BLUE_CONTROL 0x45fe
+#define mmFMT_DEBUG_CNTL 0x1bff
+#define mmFMT0_FMT_DEBUG_CNTL 0x1bff
+#define mmFMT1_FMT_DEBUG_CNTL 0x1dff
+#define mmFMT2_FMT_DEBUG_CNTL 0x1fff
+#define mmFMT3_FMT_DEBUG_CNTL 0x41ff
+#define mmFMT4_FMT_DEBUG_CNTL 0x43ff
+#define mmFMT5_FMT_DEBUG_CNTL 0x45ff
+#define mmFMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1bf0
+#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1bf0
+#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1df0
+#define mmFMT2_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1ff0
+#define mmFMT3_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x41f0
+#define mmFMT4_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x43f0
+#define mmFMT5_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x45f0
+#define mmFMT_420_HBLANK_EARLY_START 0x1bf1
+#define mmFMT0_FMT_420_HBLANK_EARLY_START 0x1bf1
+#define mmFMT1_FMT_420_HBLANK_EARLY_START 0x1df1
+#define mmFMT2_FMT_420_HBLANK_EARLY_START 0x1ff1
+#define mmFMT3_FMT_420_HBLANK_EARLY_START 0x41f1
+#define mmFMT4_FMT_420_HBLANK_EARLY_START 0x43f1
+#define mmFMT5_FMT_420_HBLANK_EARLY_START 0x45f1
+#define mmFMT_TEST_DEBUG_INDEX 0x1beb
+#define mmFMT0_FMT_TEST_DEBUG_INDEX 0x1beb
+#define mmFMT1_FMT_TEST_DEBUG_INDEX 0x1deb
+#define mmFMT2_FMT_TEST_DEBUG_INDEX 0x1feb
+#define mmFMT3_FMT_TEST_DEBUG_INDEX 0x41eb
+#define mmFMT4_FMT_TEST_DEBUG_INDEX 0x43eb
+#define mmFMT5_FMT_TEST_DEBUG_INDEX 0x45eb
+#define mmFMT_TEST_DEBUG_DATA 0x1bec
+#define mmFMT0_FMT_TEST_DEBUG_DATA 0x1bec
+#define mmFMT1_FMT_TEST_DEBUG_DATA 0x1dec
+#define mmFMT2_FMT_TEST_DEBUG_DATA 0x1fec
+#define mmFMT3_FMT_TEST_DEBUG_DATA 0x41ec
+#define mmFMT4_FMT_TEST_DEBUG_DATA 0x43ec
+#define mmFMT5_FMT_TEST_DEBUG_DATA 0x45ec
+#define ixFMT_DEBUG0 0x1
+#define ixFMT_DEBUG1 0x2
+#define ixFMT_DEBUG2 0x3
+#define ixFMT_DEBUG3 0x4
+#define ixFMT_DEBUG_ID 0x0
+#define mmLB_DATA_FORMAT 0x1ac0
+#define mmLB0_LB_DATA_FORMAT 0x1ac0
+#define mmLB1_LB_DATA_FORMAT 0x1cc0
+#define mmLB2_LB_DATA_FORMAT 0x1ec0
+#define mmLB3_LB_DATA_FORMAT 0x40c0
+#define mmLB4_LB_DATA_FORMAT 0x42c0
+#define mmLB5_LB_DATA_FORMAT 0x44c0
+#define mmLB_MEMORY_CTRL 0x1ac1
+#define mmLB0_LB_MEMORY_CTRL 0x1ac1
+#define mmLB1_LB_MEMORY_CTRL 0x1cc1
+#define mmLB2_LB_MEMORY_CTRL 0x1ec1
+#define mmLB3_LB_MEMORY_CTRL 0x40c1
+#define mmLB4_LB_MEMORY_CTRL 0x42c1
+#define mmLB5_LB_MEMORY_CTRL 0x44c1
+#define mmLB_MEMORY_SIZE_STATUS 0x1ac2
+#define mmLB0_LB_MEMORY_SIZE_STATUS 0x1ac2
+#define mmLB1_LB_MEMORY_SIZE_STATUS 0x1cc2
+#define mmLB2_LB_MEMORY_SIZE_STATUS 0x1ec2
+#define mmLB3_LB_MEMORY_SIZE_STATUS 0x40c2
+#define mmLB4_LB_MEMORY_SIZE_STATUS 0x42c2
+#define mmLB5_LB_MEMORY_SIZE_STATUS 0x44c2
+#define mmLB_DESKTOP_HEIGHT 0x1ac3
+#define mmLB0_LB_DESKTOP_HEIGHT 0x1ac3
+#define mmLB1_LB_DESKTOP_HEIGHT 0x1cc3
+#define mmLB2_LB_DESKTOP_HEIGHT 0x1ec3
+#define mmLB3_LB_DESKTOP_HEIGHT 0x40c3
+#define mmLB4_LB_DESKTOP_HEIGHT 0x42c3
+#define mmLB5_LB_DESKTOP_HEIGHT 0x44c3
+#define mmLB_VLINE_START_END 0x1ac4
+#define mmLB0_LB_VLINE_START_END 0x1ac4
+#define mmLB1_LB_VLINE_START_END 0x1cc4
+#define mmLB2_LB_VLINE_START_END 0x1ec4
+#define mmLB3_LB_VLINE_START_END 0x40c4
+#define mmLB4_LB_VLINE_START_END 0x42c4
+#define mmLB5_LB_VLINE_START_END 0x44c4
+#define mmLB_VLINE2_START_END 0x1ac5
+#define mmLB0_LB_VLINE2_START_END 0x1ac5
+#define mmLB1_LB_VLINE2_START_END 0x1cc5
+#define mmLB2_LB_VLINE2_START_END 0x1ec5
+#define mmLB3_LB_VLINE2_START_END 0x40c5
+#define mmLB4_LB_VLINE2_START_END 0x42c5
+#define mmLB5_LB_VLINE2_START_END 0x44c5
+#define mmLB_V_COUNTER 0x1ac6
+#define mmLB0_LB_V_COUNTER 0x1ac6
+#define mmLB1_LB_V_COUNTER 0x1cc6
+#define mmLB2_LB_V_COUNTER 0x1ec6
+#define mmLB3_LB_V_COUNTER 0x40c6
+#define mmLB4_LB_V_COUNTER 0x42c6
+#define mmLB5_LB_V_COUNTER 0x44c6
+#define mmLB_SNAPSHOT_V_COUNTER 0x1ac7
+#define mmLB0_LB_SNAPSHOT_V_COUNTER 0x1ac7
+#define mmLB1_LB_SNAPSHOT_V_COUNTER 0x1cc7
+#define mmLB2_LB_SNAPSHOT_V_COUNTER 0x1ec7
+#define mmLB3_LB_SNAPSHOT_V_COUNTER 0x40c7
+#define mmLB4_LB_SNAPSHOT_V_COUNTER 0x42c7
+#define mmLB5_LB_SNAPSHOT_V_COUNTER 0x44c7
+#define mmLB_INTERRUPT_MASK 0x1ac8
+#define mmLB0_LB_INTERRUPT_MASK 0x1ac8
+#define mmLB1_LB_INTERRUPT_MASK 0x1cc8
+#define mmLB2_LB_INTERRUPT_MASK 0x1ec8
+#define mmLB3_LB_INTERRUPT_MASK 0x40c8
+#define mmLB4_LB_INTERRUPT_MASK 0x42c8
+#define mmLB5_LB_INTERRUPT_MASK 0x44c8
+#define mmLB_VLINE_STATUS 0x1ac9
+#define mmLB0_LB_VLINE_STATUS 0x1ac9
+#define mmLB1_LB_VLINE_STATUS 0x1cc9
+#define mmLB2_LB_VLINE_STATUS 0x1ec9
+#define mmLB3_LB_VLINE_STATUS 0x40c9
+#define mmLB4_LB_VLINE_STATUS 0x42c9
+#define mmLB5_LB_VLINE_STATUS 0x44c9
+#define mmLB_VLINE2_STATUS 0x1aca
+#define mmLB0_LB_VLINE2_STATUS 0x1aca
+#define mmLB1_LB_VLINE2_STATUS 0x1cca
+#define mmLB2_LB_VLINE2_STATUS 0x1eca
+#define mmLB3_LB_VLINE2_STATUS 0x40ca
+#define mmLB4_LB_VLINE2_STATUS 0x42ca
+#define mmLB5_LB_VLINE2_STATUS 0x44ca
+#define mmLB_VBLANK_STATUS 0x1acb
+#define mmLB0_LB_VBLANK_STATUS 0x1acb
+#define mmLB1_LB_VBLANK_STATUS 0x1ccb
+#define mmLB2_LB_VBLANK_STATUS 0x1ecb
+#define mmLB3_LB_VBLANK_STATUS 0x40cb
+#define mmLB4_LB_VBLANK_STATUS 0x42cb
+#define mmLB5_LB_VBLANK_STATUS 0x44cb
+#define mmLB_SYNC_RESET_SEL 0x1acc
+#define mmLB0_LB_SYNC_RESET_SEL 0x1acc
+#define mmLB1_LB_SYNC_RESET_SEL 0x1ccc
+#define mmLB2_LB_SYNC_RESET_SEL 0x1ecc
+#define mmLB3_LB_SYNC_RESET_SEL 0x40cc
+#define mmLB4_LB_SYNC_RESET_SEL 0x42cc
+#define mmLB5_LB_SYNC_RESET_SEL 0x44cc
+#define mmLB_BLACK_KEYER_R_CR 0x1acd
+#define mmLB0_LB_BLACK_KEYER_R_CR 0x1acd
+#define mmLB1_LB_BLACK_KEYER_R_CR 0x1ccd
+#define mmLB2_LB_BLACK_KEYER_R_CR 0x1ecd
+#define mmLB3_LB_BLACK_KEYER_R_CR 0x40cd
+#define mmLB4_LB_BLACK_KEYER_R_CR 0x42cd
+#define mmLB5_LB_BLACK_KEYER_R_CR 0x44cd
+#define mmLB_BLACK_KEYER_G_Y 0x1ace
+#define mmLB0_LB_BLACK_KEYER_G_Y 0x1ace
+#define mmLB1_LB_BLACK_KEYER_G_Y 0x1cce
+#define mmLB2_LB_BLACK_KEYER_G_Y 0x1ece
+#define mmLB3_LB_BLACK_KEYER_G_Y 0x40ce
+#define mmLB4_LB_BLACK_KEYER_G_Y 0x42ce
+#define mmLB5_LB_BLACK_KEYER_G_Y 0x44ce
+#define mmLB_BLACK_KEYER_B_CB 0x1acf
+#define mmLB0_LB_BLACK_KEYER_B_CB 0x1acf
+#define mmLB1_LB_BLACK_KEYER_B_CB 0x1ccf
+#define mmLB2_LB_BLACK_KEYER_B_CB 0x1ecf
+#define mmLB3_LB_BLACK_KEYER_B_CB 0x40cf
+#define mmLB4_LB_BLACK_KEYER_B_CB 0x42cf
+#define mmLB5_LB_BLACK_KEYER_B_CB 0x44cf
+#define mmLB_KEYER_COLOR_CTRL 0x1ad0
+#define mmLB0_LB_KEYER_COLOR_CTRL 0x1ad0
+#define mmLB1_LB_KEYER_COLOR_CTRL 0x1cd0
+#define mmLB2_LB_KEYER_COLOR_CTRL 0x1ed0
+#define mmLB3_LB_KEYER_COLOR_CTRL 0x40d0
+#define mmLB4_LB_KEYER_COLOR_CTRL 0x42d0
+#define mmLB5_LB_KEYER_COLOR_CTRL 0x44d0
+#define mmLB_KEYER_COLOR_R_CR 0x1ad1
+#define mmLB0_LB_KEYER_COLOR_R_CR 0x1ad1
+#define mmLB1_LB_KEYER_COLOR_R_CR 0x1cd1
+#define mmLB2_LB_KEYER_COLOR_R_CR 0x1ed1
+#define mmLB3_LB_KEYER_COLOR_R_CR 0x40d1
+#define mmLB4_LB_KEYER_COLOR_R_CR 0x42d1
+#define mmLB5_LB_KEYER_COLOR_R_CR 0x44d1
+#define mmLB_KEYER_COLOR_G_Y 0x1ad2
+#define mmLB0_LB_KEYER_COLOR_G_Y 0x1ad2
+#define mmLB1_LB_KEYER_COLOR_G_Y 0x1cd2
+#define mmLB2_LB_KEYER_COLOR_G_Y 0x1ed2
+#define mmLB3_LB_KEYER_COLOR_G_Y 0x40d2
+#define mmLB4_LB_KEYER_COLOR_G_Y 0x42d2
+#define mmLB5_LB_KEYER_COLOR_G_Y 0x44d2
+#define mmLB_KEYER_COLOR_B_CB 0x1ad3
+#define mmLB0_LB_KEYER_COLOR_B_CB 0x1ad3
+#define mmLB1_LB_KEYER_COLOR_B_CB 0x1cd3
+#define mmLB2_LB_KEYER_COLOR_B_CB 0x1ed3
+#define mmLB3_LB_KEYER_COLOR_B_CB 0x40d3
+#define mmLB4_LB_KEYER_COLOR_B_CB 0x42d3
+#define mmLB5_LB_KEYER_COLOR_B_CB 0x44d3
+#define mmLB_KEYER_COLOR_REP_R_CR 0x1ad4
+#define mmLB0_LB_KEYER_COLOR_REP_R_CR 0x1ad4
+#define mmLB1_LB_KEYER_COLOR_REP_R_CR 0x1cd4
+#define mmLB2_LB_KEYER_COLOR_REP_R_CR 0x1ed4
+#define mmLB3_LB_KEYER_COLOR_REP_R_CR 0x40d4
+#define mmLB4_LB_KEYER_COLOR_REP_R_CR 0x42d4
+#define mmLB5_LB_KEYER_COLOR_REP_R_CR 0x44d4
+#define mmLB_KEYER_COLOR_REP_G_Y 0x1ad5
+#define mmLB0_LB_KEYER_COLOR_REP_G_Y 0x1ad5
+#define mmLB1_LB_KEYER_COLOR_REP_G_Y 0x1cd5
+#define mmLB2_LB_KEYER_COLOR_REP_G_Y 0x1ed5
+#define mmLB3_LB_KEYER_COLOR_REP_G_Y 0x40d5
+#define mmLB4_LB_KEYER_COLOR_REP_G_Y 0x42d5
+#define mmLB5_LB_KEYER_COLOR_REP_G_Y 0x44d5
+#define mmLB_KEYER_COLOR_REP_B_CB 0x1ad6
+#define mmLB0_LB_KEYER_COLOR_REP_B_CB 0x1ad6
+#define mmLB1_LB_KEYER_COLOR_REP_B_CB 0x1cd6
+#define mmLB2_LB_KEYER_COLOR_REP_B_CB 0x1ed6
+#define mmLB3_LB_KEYER_COLOR_REP_B_CB 0x40d6
+#define mmLB4_LB_KEYER_COLOR_REP_B_CB 0x42d6
+#define mmLB5_LB_KEYER_COLOR_REP_B_CB 0x44d6
+#define mmLB_BUFFER_LEVEL_STATUS 0x1ad7
+#define mmLB0_LB_BUFFER_LEVEL_STATUS 0x1ad7
+#define mmLB1_LB_BUFFER_LEVEL_STATUS 0x1cd7
+#define mmLB2_LB_BUFFER_LEVEL_STATUS 0x1ed7
+#define mmLB3_LB_BUFFER_LEVEL_STATUS 0x40d7
+#define mmLB4_LB_BUFFER_LEVEL_STATUS 0x42d7
+#define mmLB5_LB_BUFFER_LEVEL_STATUS 0x44d7
+#define mmLB_BUFFER_URGENCY_CTRL 0x1ad8
+#define mmLB0_LB_BUFFER_URGENCY_CTRL 0x1ad8
+#define mmLB1_LB_BUFFER_URGENCY_CTRL 0x1cd8
+#define mmLB2_LB_BUFFER_URGENCY_CTRL 0x1ed8
+#define mmLB3_LB_BUFFER_URGENCY_CTRL 0x40d8
+#define mmLB4_LB_BUFFER_URGENCY_CTRL 0x42d8
+#define mmLB5_LB_BUFFER_URGENCY_CTRL 0x44d8
+#define mmLB_BUFFER_URGENCY_STATUS 0x1ad9
+#define mmLB0_LB_BUFFER_URGENCY_STATUS 0x1ad9
+#define mmLB1_LB_BUFFER_URGENCY_STATUS 0x1cd9
+#define mmLB2_LB_BUFFER_URGENCY_STATUS 0x1ed9
+#define mmLB3_LB_BUFFER_URGENCY_STATUS 0x40d9
+#define mmLB4_LB_BUFFER_URGENCY_STATUS 0x42d9
+#define mmLB5_LB_BUFFER_URGENCY_STATUS 0x44d9
+#define mmLB_BUFFER_STATUS 0x1ada
+#define mmLB0_LB_BUFFER_STATUS 0x1ada
+#define mmLB1_LB_BUFFER_STATUS 0x1cda
+#define mmLB2_LB_BUFFER_STATUS 0x1eda
+#define mmLB3_LB_BUFFER_STATUS 0x40da
+#define mmLB4_LB_BUFFER_STATUS 0x42da
+#define mmLB5_LB_BUFFER_STATUS 0x44da
+#define mmLB_NO_OUTSTANDING_REQ_STATUS 0x1adc
+#define mmLB0_LB_NO_OUTSTANDING_REQ_STATUS 0x1adc
+#define mmLB1_LB_NO_OUTSTANDING_REQ_STATUS 0x1cdc
+#define mmLB2_LB_NO_OUTSTANDING_REQ_STATUS 0x1edc
+#define mmLB3_LB_NO_OUTSTANDING_REQ_STATUS 0x40dc
+#define mmLB4_LB_NO_OUTSTANDING_REQ_STATUS 0x42dc
+#define mmLB5_LB_NO_OUTSTANDING_REQ_STATUS 0x44dc
+#define mmMVP_AFR_FLIP_MODE 0x1ae0
+#define mmLB0_MVP_AFR_FLIP_MODE 0x1ae0
+#define mmLB1_MVP_AFR_FLIP_MODE 0x1ce0
+#define mmLB2_MVP_AFR_FLIP_MODE 0x1ee0
+#define mmLB3_MVP_AFR_FLIP_MODE 0x40e0
+#define mmLB4_MVP_AFR_FLIP_MODE 0x42e0
+#define mmLB5_MVP_AFR_FLIP_MODE 0x44e0
+#define mmMVP_AFR_FLIP_FIFO_CNTL 0x1ae1
+#define mmLB0_MVP_AFR_FLIP_FIFO_CNTL 0x1ae1
+#define mmLB1_MVP_AFR_FLIP_FIFO_CNTL 0x1ce1
+#define mmLB2_MVP_AFR_FLIP_FIFO_CNTL 0x1ee1
+#define mmLB3_MVP_AFR_FLIP_FIFO_CNTL 0x40e1
+#define mmLB4_MVP_AFR_FLIP_FIFO_CNTL 0x42e1
+#define mmLB5_MVP_AFR_FLIP_FIFO_CNTL 0x44e1
+#define mmMVP_FLIP_LINE_NUM_INSERT 0x1ae2
+#define mmLB0_MVP_FLIP_LINE_NUM_INSERT 0x1ae2
+#define mmLB1_MVP_FLIP_LINE_NUM_INSERT 0x1ce2
+#define mmLB2_MVP_FLIP_LINE_NUM_INSERT 0x1ee2
+#define mmLB3_MVP_FLIP_LINE_NUM_INSERT 0x40e2
+#define mmLB4_MVP_FLIP_LINE_NUM_INSERT 0x42e2
+#define mmLB5_MVP_FLIP_LINE_NUM_INSERT 0x44e2
+#define mmDC_MVP_LB_CONTROL 0x1ae3
+#define mmLB0_DC_MVP_LB_CONTROL 0x1ae3
+#define mmLB1_DC_MVP_LB_CONTROL 0x1ce3
+#define mmLB2_DC_MVP_LB_CONTROL 0x1ee3
+#define mmLB3_DC_MVP_LB_CONTROL 0x40e3
+#define mmLB4_DC_MVP_LB_CONTROL 0x42e3
+#define mmLB5_DC_MVP_LB_CONTROL 0x44e3
+#define mmLB_DEBUG 0x1ae4
+#define mmLB0_LB_DEBUG 0x1ae4
+#define mmLB1_LB_DEBUG 0x1ce4
+#define mmLB2_LB_DEBUG 0x1ee4
+#define mmLB3_LB_DEBUG 0x40e4
+#define mmLB4_LB_DEBUG 0x42e4
+#define mmLB5_LB_DEBUG 0x44e4
+#define mmLB_DEBUG2 0x1ae5
+#define mmLB0_LB_DEBUG2 0x1ae5
+#define mmLB1_LB_DEBUG2 0x1ce5
+#define mmLB2_LB_DEBUG2 0x1ee5
+#define mmLB3_LB_DEBUG2 0x40e5
+#define mmLB4_LB_DEBUG2 0x42e5
+#define mmLB5_LB_DEBUG2 0x44e5
+#define mmLB_DEBUG3 0x1ae6
+#define mmLB0_LB_DEBUG3 0x1ae6
+#define mmLB1_LB_DEBUG3 0x1ce6
+#define mmLB2_LB_DEBUG3 0x1ee6
+#define mmLB3_LB_DEBUG3 0x40e6
+#define mmLB4_LB_DEBUG3 0x42e6
+#define mmLB5_LB_DEBUG3 0x44e6
+#define mmLB_TEST_DEBUG_INDEX 0x1afe
+#define mmLB0_LB_TEST_DEBUG_INDEX 0x1afe
+#define mmLB1_LB_TEST_DEBUG_INDEX 0x1cfe
+#define mmLB2_LB_TEST_DEBUG_INDEX 0x1efe
+#define mmLB3_LB_TEST_DEBUG_INDEX 0x40fe
+#define mmLB4_LB_TEST_DEBUG_INDEX 0x42fe
+#define mmLB5_LB_TEST_DEBUG_INDEX 0x44fe
+#define mmLB_TEST_DEBUG_DATA 0x1aff
+#define mmLB0_LB_TEST_DEBUG_DATA 0x1aff
+#define mmLB1_LB_TEST_DEBUG_DATA 0x1cff
+#define mmLB2_LB_TEST_DEBUG_DATA 0x1eff
+#define mmLB3_LB_TEST_DEBUG_DATA 0x40ff
+#define mmLB4_LB_TEST_DEBUG_DATA 0x42ff
+#define mmLB5_LB_TEST_DEBUG_DATA 0x44ff
+#define mmLBV_DATA_FORMAT 0x463c
+#define mmLBV0_LBV_DATA_FORMAT 0x463c
+#define mmLBV1_LBV_DATA_FORMAT 0x983c
+#define mmLBV_MEMORY_CTRL 0x463d
+#define mmLBV0_LBV_MEMORY_CTRL 0x463d
+#define mmLBV1_LBV_MEMORY_CTRL 0x983d
+#define mmLBV_MEMORY_SIZE_STATUS 0x463e
+#define mmLBV0_LBV_MEMORY_SIZE_STATUS 0x463e
+#define mmLBV1_LBV_MEMORY_SIZE_STATUS 0x983e
+#define mmLBV_DESKTOP_HEIGHT 0x463f
+#define mmLBV0_LBV_DESKTOP_HEIGHT 0x463f
+#define mmLBV1_LBV_DESKTOP_HEIGHT 0x983f
+#define mmLBV_VLINE_START_END 0x4640
+#define mmLBV0_LBV_VLINE_START_END 0x4640
+#define mmLBV1_LBV_VLINE_START_END 0x9840
+#define mmLBV_VLINE2_START_END 0x4641
+#define mmLBV0_LBV_VLINE2_START_END 0x4641
+#define mmLBV1_LBV_VLINE2_START_END 0x9841
+#define mmLBV_V_COUNTER 0x4642
+#define mmLBV0_LBV_V_COUNTER 0x4642
+#define mmLBV1_LBV_V_COUNTER 0x9842
+#define mmLBV_SNAPSHOT_V_COUNTER 0x4643
+#define mmLBV0_LBV_SNAPSHOT_V_COUNTER 0x4643
+#define mmLBV1_LBV_SNAPSHOT_V_COUNTER 0x9843
+#define mmLBV_V_COUNTER_CHROMA 0x4644
+#define mmLBV0_LBV_V_COUNTER_CHROMA 0x4644
+#define mmLBV1_LBV_V_COUNTER_CHROMA 0x9844
+#define mmLBV_SNAPSHOT_V_COUNTER_CHROMA 0x4645
+#define mmLBV0_LBV_SNAPSHOT_V_COUNTER_CHROMA 0x4645
+#define mmLBV1_LBV_SNAPSHOT_V_COUNTER_CHROMA 0x9845
+#define mmLBV_INTERRUPT_MASK 0x4646
+#define mmLBV0_LBV_INTERRUPT_MASK 0x4646
+#define mmLBV1_LBV_INTERRUPT_MASK 0x9846
+#define mmLBV_VLINE_STATUS 0x4647
+#define mmLBV0_LBV_VLINE_STATUS 0x4647
+#define mmLBV1_LBV_VLINE_STATUS 0x9847
+#define mmLBV_VLINE2_STATUS 0x4648
+#define mmLBV0_LBV_VLINE2_STATUS 0x4648
+#define mmLBV1_LBV_VLINE2_STATUS 0x9848
+#define mmLBV_VBLANK_STATUS 0x4649
+#define mmLBV0_LBV_VBLANK_STATUS 0x4649
+#define mmLBV1_LBV_VBLANK_STATUS 0x9849
+#define mmLBV_SYNC_RESET_SEL 0x464a
+#define mmLBV0_LBV_SYNC_RESET_SEL 0x464a
+#define mmLBV1_LBV_SYNC_RESET_SEL 0x984a
+#define mmLBV_BLACK_KEYER_R_CR 0x464b
+#define mmLBV0_LBV_BLACK_KEYER_R_CR 0x464b
+#define mmLBV1_LBV_BLACK_KEYER_R_CR 0x984b
+#define mmLBV_BLACK_KEYER_G_Y 0x464c
+#define mmLBV0_LBV_BLACK_KEYER_G_Y 0x464c
+#define mmLBV1_LBV_BLACK_KEYER_G_Y 0x984c
+#define mmLBV_BLACK_KEYER_B_CB 0x464d
+#define mmLBV0_LBV_BLACK_KEYER_B_CB 0x464d
+#define mmLBV1_LBV_BLACK_KEYER_B_CB 0x984d
+#define mmLBV_KEYER_COLOR_CTRL 0x464e
+#define mmLBV0_LBV_KEYER_COLOR_CTRL 0x464e
+#define mmLBV1_LBV_KEYER_COLOR_CTRL 0x984e
+#define mmLBV_KEYER_COLOR_R_CR 0x464f
+#define mmLBV0_LBV_KEYER_COLOR_R_CR 0x464f
+#define mmLBV1_LBV_KEYER_COLOR_R_CR 0x984f
+#define mmLBV_KEYER_COLOR_G_Y 0x4650
+#define mmLBV0_LBV_KEYER_COLOR_G_Y 0x4650
+#define mmLBV1_LBV_KEYER_COLOR_G_Y 0x9850
+#define mmLBV_KEYER_COLOR_B_CB 0x4651
+#define mmLBV0_LBV_KEYER_COLOR_B_CB 0x4651
+#define mmLBV1_LBV_KEYER_COLOR_B_CB 0x9851
+#define mmLBV_KEYER_COLOR_REP_R_CR 0x4652
+#define mmLBV0_LBV_KEYER_COLOR_REP_R_CR 0x4652
+#define mmLBV1_LBV_KEYER_COLOR_REP_R_CR 0x9852
+#define mmLBV_KEYER_COLOR_REP_G_Y 0x4653
+#define mmLBV0_LBV_KEYER_COLOR_REP_G_Y 0x4653
+#define mmLBV1_LBV_KEYER_COLOR_REP_G_Y 0x9853
+#define mmLBV_KEYER_COLOR_REP_B_CB 0x4654
+#define mmLBV0_LBV_KEYER_COLOR_REP_B_CB 0x4654
+#define mmLBV1_LBV_KEYER_COLOR_REP_B_CB 0x9854
+#define mmLBV_BUFFER_LEVEL_STATUS 0x4655
+#define mmLBV0_LBV_BUFFER_LEVEL_STATUS 0x4655
+#define mmLBV1_LBV_BUFFER_LEVEL_STATUS 0x9855
+#define mmLBV_BUFFER_URGENCY_CTRL 0x4656
+#define mmLBV0_LBV_BUFFER_URGENCY_CTRL 0x4656
+#define mmLBV1_LBV_BUFFER_URGENCY_CTRL 0x9856
+#define mmLBV_BUFFER_URGENCY_STATUS 0x4657
+#define mmLBV0_LBV_BUFFER_URGENCY_STATUS 0x4657
+#define mmLBV1_LBV_BUFFER_URGENCY_STATUS 0x9857
+#define mmLBV_BUFFER_STATUS 0x4658
+#define mmLBV0_LBV_BUFFER_STATUS 0x4658
+#define mmLBV1_LBV_BUFFER_STATUS 0x9858
+#define mmLBV_NO_OUTSTANDING_REQ_STATUS 0x4659
+#define mmLBV0_LBV_NO_OUTSTANDING_REQ_STATUS 0x4659
+#define mmLBV1_LBV_NO_OUTSTANDING_REQ_STATUS 0x9859
+#define mmLBV_DEBUG 0x465a
+#define mmLBV0_LBV_DEBUG 0x465a
+#define mmLBV1_LBV_DEBUG 0x985a
+#define mmLBV_DEBUG2 0x465b
+#define mmLBV0_LBV_DEBUG2 0x465b
+#define mmLBV1_LBV_DEBUG2 0x985b
+#define mmLBV_DEBUG3 0x465c
+#define mmLBV0_LBV_DEBUG3 0x465c
+#define mmLBV1_LBV_DEBUG3 0x985c
+#define mmLBV_TEST_DEBUG_INDEX 0x4666
+#define mmLBV0_LBV_TEST_DEBUG_INDEX 0x4666
+#define mmLBV1_LBV_TEST_DEBUG_INDEX 0x9866
+#define mmLBV_TEST_DEBUG_DATA 0x4667
+#define mmLBV0_LBV_TEST_DEBUG_DATA 0x4667
+#define mmLBV1_LBV_TEST_DEBUG_DATA 0x9867
+#define mmMVP_CONTROL1 0x2ac
+#define mmMVP_CONTROL2 0x2ad
+#define mmMVP_FIFO_CONTROL 0x2ae
+#define mmMVP_FIFO_STATUS 0x2af
+#define mmMVP_SLAVE_STATUS 0x2b0
+#define mmMVP_INBAND_CNTL_CAP 0x2b1
+#define mmMVP_BLACK_KEYER 0x2b2
+#define mmMVP_CRC_CNTL 0x2b3
+#define mmMVP_CRC_RESULT_BLUE_GREEN 0x2b4
+#define mmMVP_CRC_RESULT_RED 0x2b5
+#define mmMVP_CONTROL3 0x2b6
+#define mmMVP_RECEIVE_CNT_CNTL1 0x2b7
+#define mmMVP_RECEIVE_CNT_CNTL2 0x2b8
+#define mmMVP_DEBUG 0x2bb
+#define mmMVP_TEST_DEBUG_INDEX 0x2b9
+#define mmMVP_TEST_DEBUG_DATA 0x2ba
+#define ixMVP_DEBUG_12 0xc
+#define ixMVP_DEBUG_13 0xd
+#define ixMVP_DEBUG_14 0xe
+#define ixMVP_DEBUG_15 0xf
+#define ixMVP_DEBUG_16 0x10
+#define ixMVP_DEBUG_17 0x11
+#define mmSCL_COEF_RAM_SELECT 0x1b40
+#define mmSCL0_SCL_COEF_RAM_SELECT 0x1b40
+#define mmSCL1_SCL_COEF_RAM_SELECT 0x1d40
+#define mmSCL2_SCL_COEF_RAM_SELECT 0x1f40
+#define mmSCL3_SCL_COEF_RAM_SELECT 0x4140
+#define mmSCL4_SCL_COEF_RAM_SELECT 0x4340
+#define mmSCL5_SCL_COEF_RAM_SELECT 0x4540
+#define mmSCL_COEF_RAM_TAP_DATA 0x1b41
+#define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1b41
+#define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1d41
+#define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x1f41
+#define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4141
+#define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4341
+#define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4541
+#define mmSCL_MODE 0x1b42
+#define mmSCL0_SCL_MODE 0x1b42
+#define mmSCL1_SCL_MODE 0x1d42
+#define mmSCL2_SCL_MODE 0x1f42
+#define mmSCL3_SCL_MODE 0x4142
+#define mmSCL4_SCL_MODE 0x4342
+#define mmSCL5_SCL_MODE 0x4542
+#define mmSCL_TAP_CONTROL 0x1b43
+#define mmSCL0_SCL_TAP_CONTROL 0x1b43
+#define mmSCL1_SCL_TAP_CONTROL 0x1d43
+#define mmSCL2_SCL_TAP_CONTROL 0x1f43
+#define mmSCL3_SCL_TAP_CONTROL 0x4143
+#define mmSCL4_SCL_TAP_CONTROL 0x4343
+#define mmSCL5_SCL_TAP_CONTROL 0x4543
+#define mmSCL_CONTROL 0x1b44
+#define mmSCL0_SCL_CONTROL 0x1b44
+#define mmSCL1_SCL_CONTROL 0x1d44
+#define mmSCL2_SCL_CONTROL 0x1f44
+#define mmSCL3_SCL_CONTROL 0x4144
+#define mmSCL4_SCL_CONTROL 0x4344
+#define mmSCL5_SCL_CONTROL 0x4544
+#define mmSCL_BYPASS_CONTROL 0x1b45
+#define mmSCL0_SCL_BYPASS_CONTROL 0x1b45
+#define mmSCL1_SCL_BYPASS_CONTROL 0x1d45
+#define mmSCL2_SCL_BYPASS_CONTROL 0x1f45
+#define mmSCL3_SCL_BYPASS_CONTROL 0x4145
+#define mmSCL4_SCL_BYPASS_CONTROL 0x4345
+#define mmSCL5_SCL_BYPASS_CONTROL 0x4545
+#define mmSCL_MANUAL_REPLICATE_CONTROL 0x1b46
+#define mmSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x1b46
+#define mmSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x1d46
+#define mmSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x1f46
+#define mmSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x4146
+#define mmSCL4_SCL_MANUAL_REPLICATE_CONTROL 0x4346
+#define mmSCL5_SCL_MANUAL_REPLICATE_CONTROL 0x4546
+#define mmSCL_AUTOMATIC_MODE_CONTROL 0x1b47
+#define mmSCL0_SCL_AUTOMATIC_MODE_CONTROL 0x1b47
+#define mmSCL1_SCL_AUTOMATIC_MODE_CONTROL 0x1d47
+#define mmSCL2_SCL_AUTOMATIC_MODE_CONTROL 0x1f47
+#define mmSCL3_SCL_AUTOMATIC_MODE_CONTROL 0x4147
+#define mmSCL4_SCL_AUTOMATIC_MODE_CONTROL 0x4347
+#define mmSCL5_SCL_AUTOMATIC_MODE_CONTROL 0x4547
+#define mmSCL_HORZ_FILTER_CONTROL 0x1b48
+#define mmSCL0_SCL_HORZ_FILTER_CONTROL 0x1b48
+#define mmSCL1_SCL_HORZ_FILTER_CONTROL 0x1d48
+#define mmSCL2_SCL_HORZ_FILTER_CONTROL 0x1f48
+#define mmSCL3_SCL_HORZ_FILTER_CONTROL 0x4148
+#define mmSCL4_SCL_HORZ_FILTER_CONTROL 0x4348
+#define mmSCL5_SCL_HORZ_FILTER_CONTROL 0x4548
+#define mmSCL_HORZ_FILTER_SCALE_RATIO 0x1b49
+#define mmSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x1b49
+#define mmSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x1d49
+#define mmSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x1f49
+#define mmSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x4149
+#define mmSCL4_SCL_HORZ_FILTER_SCALE_RATIO 0x4349
+#define mmSCL5_SCL_HORZ_FILTER_SCALE_RATIO 0x4549
+#define mmSCL_HORZ_FILTER_INIT 0x1b4a
+#define mmSCL0_SCL_HORZ_FILTER_INIT 0x1b4a
+#define mmSCL1_SCL_HORZ_FILTER_INIT 0x1d4a
+#define mmSCL2_SCL_HORZ_FILTER_INIT 0x1f4a
+#define mmSCL3_SCL_HORZ_FILTER_INIT 0x414a
+#define mmSCL4_SCL_HORZ_FILTER_INIT 0x434a
+#define mmSCL5_SCL_HORZ_FILTER_INIT 0x454a
+#define mmSCL_VERT_FILTER_CONTROL 0x1b4b
+#define mmSCL0_SCL_VERT_FILTER_CONTROL 0x1b4b
+#define mmSCL1_SCL_VERT_FILTER_CONTROL 0x1d4b
+#define mmSCL2_SCL_VERT_FILTER_CONTROL 0x1f4b
+#define mmSCL3_SCL_VERT_FILTER_CONTROL 0x414b
+#define mmSCL4_SCL_VERT_FILTER_CONTROL 0x434b
+#define mmSCL5_SCL_VERT_FILTER_CONTROL 0x454b
+#define mmSCL_VERT_FILTER_SCALE_RATIO 0x1b4c
+#define mmSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x1b4c
+#define mmSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x1d4c
+#define mmSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x1f4c
+#define mmSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x414c
+#define mmSCL4_SCL_VERT_FILTER_SCALE_RATIO 0x434c
+#define mmSCL5_SCL_VERT_FILTER_SCALE_RATIO 0x454c
+#define mmSCL_VERT_FILTER_INIT 0x1b4d
+#define mmSCL0_SCL_VERT_FILTER_INIT 0x1b4d
+#define mmSCL1_SCL_VERT_FILTER_INIT 0x1d4d
+#define mmSCL2_SCL_VERT_FILTER_INIT 0x1f4d
+#define mmSCL3_SCL_VERT_FILTER_INIT 0x414d
+#define mmSCL4_SCL_VERT_FILTER_INIT 0x434d
+#define mmSCL5_SCL_VERT_FILTER_INIT 0x454d
+#define mmSCL_VERT_FILTER_INIT_BOT 0x1b4e
+#define mmSCL0_SCL_VERT_FILTER_INIT_BOT 0x1b4e
+#define mmSCL1_SCL_VERT_FILTER_INIT_BOT 0x1d4e
+#define mmSCL2_SCL_VERT_FILTER_INIT_BOT 0x1f4e
+#define mmSCL3_SCL_VERT_FILTER_INIT_BOT 0x414e
+#define mmSCL4_SCL_VERT_FILTER_INIT_BOT 0x434e
+#define mmSCL5_SCL_VERT_FILTER_INIT_BOT 0x454e
+#define mmSCL_ROUND_OFFSET 0x1b4f
+#define mmSCL0_SCL_ROUND_OFFSET 0x1b4f
+#define mmSCL1_SCL_ROUND_OFFSET 0x1d4f
+#define mmSCL2_SCL_ROUND_OFFSET 0x1f4f
+#define mmSCL3_SCL_ROUND_OFFSET 0x414f
+#define mmSCL4_SCL_ROUND_OFFSET 0x434f
+#define mmSCL5_SCL_ROUND_OFFSET 0x454f
+#define mmSCL_UPDATE 0x1b51
+#define mmSCL0_SCL_UPDATE 0x1b51
+#define mmSCL1_SCL_UPDATE 0x1d51
+#define mmSCL2_SCL_UPDATE 0x1f51
+#define mmSCL3_SCL_UPDATE 0x4151
+#define mmSCL4_SCL_UPDATE 0x4351
+#define mmSCL5_SCL_UPDATE 0x4551
+#define mmSCL_F_SHARP_CONTROL 0x1b53
+#define mmSCL0_SCL_F_SHARP_CONTROL 0x1b53
+#define mmSCL1_SCL_F_SHARP_CONTROL 0x1d53
+#define mmSCL2_SCL_F_SHARP_CONTROL 0x1f53
+#define mmSCL3_SCL_F_SHARP_CONTROL 0x4153
+#define mmSCL4_SCL_F_SHARP_CONTROL 0x4353
+#define mmSCL5_SCL_F_SHARP_CONTROL 0x4553
+#define mmSCL_ALU_CONTROL 0x1b54
+#define mmSCL0_SCL_ALU_CONTROL 0x1b54
+#define mmSCL1_SCL_ALU_CONTROL 0x1d54
+#define mmSCL2_SCL_ALU_CONTROL 0x1f54
+#define mmSCL3_SCL_ALU_CONTROL 0x4154
+#define mmSCL4_SCL_ALU_CONTROL 0x4354
+#define mmSCL5_SCL_ALU_CONTROL 0x4554
+#define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1b55
+#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1b55
+#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1d55
+#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x1f55
+#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
+#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4355
+#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4555
+#define mmVIEWPORT_START_SECONDARY 0x1b5b
+#define mmSCL0_VIEWPORT_START_SECONDARY 0x1b5b
+#define mmSCL1_VIEWPORT_START_SECONDARY 0x1d5b
+#define mmSCL2_VIEWPORT_START_SECONDARY 0x1f5b
+#define mmSCL3_VIEWPORT_START_SECONDARY 0x415b
+#define mmSCL4_VIEWPORT_START_SECONDARY 0x435b
+#define mmSCL5_VIEWPORT_START_SECONDARY 0x455b
+#define mmVIEWPORT_START 0x1b5c
+#define mmSCL0_VIEWPORT_START 0x1b5c
+#define mmSCL1_VIEWPORT_START 0x1d5c
+#define mmSCL2_VIEWPORT_START 0x1f5c
+#define mmSCL3_VIEWPORT_START 0x415c
+#define mmSCL4_VIEWPORT_START 0x435c
+#define mmSCL5_VIEWPORT_START 0x455c
+#define mmVIEWPORT_SIZE 0x1b5d
+#define mmSCL0_VIEWPORT_SIZE 0x1b5d
+#define mmSCL1_VIEWPORT_SIZE 0x1d5d
+#define mmSCL2_VIEWPORT_SIZE 0x1f5d
+#define mmSCL3_VIEWPORT_SIZE 0x415d
+#define mmSCL4_VIEWPORT_SIZE 0x435d
+#define mmSCL5_VIEWPORT_SIZE 0x455d
+#define mmEXT_OVERSCAN_LEFT_RIGHT 0x1b5e
+#define mmSCL0_EXT_OVERSCAN_LEFT_RIGHT 0x1b5e
+#define mmSCL1_EXT_OVERSCAN_LEFT_RIGHT 0x1d5e
+#define mmSCL2_EXT_OVERSCAN_LEFT_RIGHT 0x1f5e
+#define mmSCL3_EXT_OVERSCAN_LEFT_RIGHT 0x415e
+#define mmSCL4_EXT_OVERSCAN_LEFT_RIGHT 0x435e
+#define mmSCL5_EXT_OVERSCAN_LEFT_RIGHT 0x455e
+#define mmEXT_OVERSCAN_TOP_BOTTOM 0x1b5f
+#define mmSCL0_EXT_OVERSCAN_TOP_BOTTOM 0x1b5f
+#define mmSCL1_EXT_OVERSCAN_TOP_BOTTOM 0x1d5f
+#define mmSCL2_EXT_OVERSCAN_TOP_BOTTOM 0x1f5f
+#define mmSCL3_EXT_OVERSCAN_TOP_BOTTOM 0x415f
+#define mmSCL4_EXT_OVERSCAN_TOP_BOTTOM 0x435f
+#define mmSCL5_EXT_OVERSCAN_TOP_BOTTOM 0x455f
+#define mmSCL_MODE_CHANGE_DET1 0x1b60
+#define mmSCL0_SCL_MODE_CHANGE_DET1 0x1b60
+#define mmSCL1_SCL_MODE_CHANGE_DET1 0x1d60
+#define mmSCL2_SCL_MODE_CHANGE_DET1 0x1f60
+#define mmSCL3_SCL_MODE_CHANGE_DET1 0x4160
+#define mmSCL4_SCL_MODE_CHANGE_DET1 0x4360
+#define mmSCL5_SCL_MODE_CHANGE_DET1 0x4560
+#define mmSCL_MODE_CHANGE_DET2 0x1b61
+#define mmSCL0_SCL_MODE_CHANGE_DET2 0x1b61
+#define mmSCL1_SCL_MODE_CHANGE_DET2 0x1d61
+#define mmSCL2_SCL_MODE_CHANGE_DET2 0x1f61
+#define mmSCL3_SCL_MODE_CHANGE_DET2 0x4161
+#define mmSCL4_SCL_MODE_CHANGE_DET2 0x4361
+#define mmSCL5_SCL_MODE_CHANGE_DET2 0x4561
+#define mmSCL_MODE_CHANGE_DET3 0x1b62
+#define mmSCL0_SCL_MODE_CHANGE_DET3 0x1b62
+#define mmSCL1_SCL_MODE_CHANGE_DET3 0x1d62
+#define mmSCL2_SCL_MODE_CHANGE_DET3 0x1f62
+#define mmSCL3_SCL_MODE_CHANGE_DET3 0x4162
+#define mmSCL4_SCL_MODE_CHANGE_DET3 0x4362
+#define mmSCL5_SCL_MODE_CHANGE_DET3 0x4562
+#define mmSCL_MODE_CHANGE_MASK 0x1b63
+#define mmSCL0_SCL_MODE_CHANGE_MASK 0x1b63
+#define mmSCL1_SCL_MODE_CHANGE_MASK 0x1d63
+#define mmSCL2_SCL_MODE_CHANGE_MASK 0x1f63
+#define mmSCL3_SCL_MODE_CHANGE_MASK 0x4163
+#define mmSCL4_SCL_MODE_CHANGE_MASK 0x4363
+#define mmSCL5_SCL_MODE_CHANGE_MASK 0x4563
+#define mmSCL_DEBUG2 0x1b69
+#define mmSCL0_SCL_DEBUG2 0x1b69
+#define mmSCL1_SCL_DEBUG2 0x1d69
+#define mmSCL2_SCL_DEBUG2 0x1f69
+#define mmSCL3_SCL_DEBUG2 0x4169
+#define mmSCL4_SCL_DEBUG2 0x4369
+#define mmSCL5_SCL_DEBUG2 0x4569
+#define mmSCL_DEBUG 0x1b6a
+#define mmSCL0_SCL_DEBUG 0x1b6a
+#define mmSCL1_SCL_DEBUG 0x1d6a
+#define mmSCL2_SCL_DEBUG 0x1f6a
+#define mmSCL3_SCL_DEBUG 0x416a
+#define mmSCL4_SCL_DEBUG 0x436a
+#define mmSCL5_SCL_DEBUG 0x456a
+#define mmSCL_TEST_DEBUG_INDEX 0x1b6b
+#define mmSCL0_SCL_TEST_DEBUG_INDEX 0x1b6b
+#define mmSCL1_SCL_TEST_DEBUG_INDEX 0x1d6b
+#define mmSCL2_SCL_TEST_DEBUG_INDEX 0x1f6b
+#define mmSCL3_SCL_TEST_DEBUG_INDEX 0x416b
+#define mmSCL4_SCL_TEST_DEBUG_INDEX 0x436b
+#define mmSCL5_SCL_TEST_DEBUG_INDEX 0x456b
+#define mmSCL_TEST_DEBUG_DATA 0x1b6c
+#define mmSCL0_SCL_TEST_DEBUG_DATA 0x1b6c
+#define mmSCL1_SCL_TEST_DEBUG_DATA 0x1d6c
+#define mmSCL2_SCL_TEST_DEBUG_DATA 0x1f6c
+#define mmSCL3_SCL_TEST_DEBUG_DATA 0x416c
+#define mmSCL4_SCL_TEST_DEBUG_DATA 0x436c
+#define mmSCL5_SCL_TEST_DEBUG_DATA 0x456c
+#define mmSCLV_COEF_RAM_SELECT 0x4670
+#define mmSCLV0_SCLV_COEF_RAM_SELECT 0x4670
+#define mmSCLV1_SCLV_COEF_RAM_SELECT 0x9870
+#define mmSCLV_COEF_RAM_TAP_DATA 0x4671
+#define mmSCLV0_SCLV_COEF_RAM_TAP_DATA 0x4671
+#define mmSCLV1_SCLV_COEF_RAM_TAP_DATA 0x9871
+#define mmSCLV_MODE 0x4672
+#define mmSCLV0_SCLV_MODE 0x4672
+#define mmSCLV1_SCLV_MODE 0x9872
+#define mmSCLV_TAP_CONTROL 0x4673
+#define mmSCLV0_SCLV_TAP_CONTROL 0x4673
+#define mmSCLV1_SCLV_TAP_CONTROL 0x9873
+#define mmSCLV_CONTROL 0x4674
+#define mmSCLV0_SCLV_CONTROL 0x4674
+#define mmSCLV1_SCLV_CONTROL 0x9874
+#define mmSCLV_MANUAL_REPLICATE_CONTROL 0x4675
+#define mmSCLV0_SCLV_MANUAL_REPLICATE_CONTROL 0x4675
+#define mmSCLV1_SCLV_MANUAL_REPLICATE_CONTROL 0x9875
+#define mmSCLV_AUTOMATIC_MODE_CONTROL 0x4676
+#define mmSCLV0_SCLV_AUTOMATIC_MODE_CONTROL 0x4676
+#define mmSCLV1_SCLV_AUTOMATIC_MODE_CONTROL 0x9876
+#define mmSCLV_HORZ_FILTER_CONTROL 0x4677
+#define mmSCLV0_SCLV_HORZ_FILTER_CONTROL 0x4677
+#define mmSCLV1_SCLV_HORZ_FILTER_CONTROL 0x9877
+#define mmSCLV_HORZ_FILTER_SCALE_RATIO 0x4678
+#define mmSCLV0_SCLV_HORZ_FILTER_SCALE_RATIO 0x4678
+#define mmSCLV1_SCLV_HORZ_FILTER_SCALE_RATIO 0x9878
+#define mmSCLV_HORZ_FILTER_INIT 0x4679
+#define mmSCLV0_SCLV_HORZ_FILTER_INIT 0x4679
+#define mmSCLV1_SCLV_HORZ_FILTER_INIT 0x9879
+#define mmSCLV_HORZ_FILTER_SCALE_RATIO_C 0x467a
+#define mmSCLV0_SCLV_HORZ_FILTER_SCALE_RATIO_C 0x467a
+#define mmSCLV1_SCLV_HORZ_FILTER_SCALE_RATIO_C 0x987a
+#define mmSCLV_HORZ_FILTER_INIT_C 0x467b
+#define mmSCLV0_SCLV_HORZ_FILTER_INIT_C 0x467b
+#define mmSCLV1_SCLV_HORZ_FILTER_INIT_C 0x987b
+#define mmSCLV_VERT_FILTER_CONTROL 0x467c
+#define mmSCLV0_SCLV_VERT_FILTER_CONTROL 0x467c
+#define mmSCLV1_SCLV_VERT_FILTER_CONTROL 0x987c
+#define mmSCLV_VERT_FILTER_SCALE_RATIO 0x467d
+#define mmSCLV0_SCLV_VERT_FILTER_SCALE_RATIO 0x467d
+#define mmSCLV1_SCLV_VERT_FILTER_SCALE_RATIO 0x987d
+#define mmSCLV_VERT_FILTER_INIT 0x467e
+#define mmSCLV0_SCLV_VERT_FILTER_INIT 0x467e
+#define mmSCLV1_SCLV_VERT_FILTER_INIT 0x987e
+#define mmSCLV_VERT_FILTER_INIT_BOT 0x467f
+#define mmSCLV0_SCLV_VERT_FILTER_INIT_BOT 0x467f
+#define mmSCLV1_SCLV_VERT_FILTER_INIT_BOT 0x987f
+#define mmSCLV_VERT_FILTER_SCALE_RATIO_C 0x4680
+#define mmSCLV0_SCLV_VERT_FILTER_SCALE_RATIO_C 0x4680
+#define mmSCLV1_SCLV_VERT_FILTER_SCALE_RATIO_C 0x9880
+#define mmSCLV_VERT_FILTER_INIT_C 0x4681
+#define mmSCLV0_SCLV_VERT_FILTER_INIT_C 0x4681
+#define mmSCLV1_SCLV_VERT_FILTER_INIT_C 0x9881
+#define mmSCLV_VERT_FILTER_INIT_BOT_C 0x4682
+#define mmSCLV0_SCLV_VERT_FILTER_INIT_BOT_C 0x4682
+#define mmSCLV1_SCLV_VERT_FILTER_INIT_BOT_C 0x9882
+#define mmSCLV_ROUND_OFFSET 0x4683
+#define mmSCLV0_SCLV_ROUND_OFFSET 0x4683
+#define mmSCLV1_SCLV_ROUND_OFFSET 0x9883
+#define mmSCLV_UPDATE 0x4684
+#define mmSCLV0_SCLV_UPDATE 0x4684
+#define mmSCLV1_SCLV_UPDATE 0x9884
+#define mmSCLV_ALU_CONTROL 0x4685
+#define mmSCLV0_SCLV_ALU_CONTROL 0x4685
+#define mmSCLV1_SCLV_ALU_CONTROL 0x9885
+#define mmSCLV_VIEWPORT_START 0x4686
+#define mmSCLV0_SCLV_VIEWPORT_START 0x4686
+#define mmSCLV1_SCLV_VIEWPORT_START 0x9886
+#define mmSCLV_VIEWPORT_START_SECONDARY 0x4687
+#define mmSCLV0_SCLV_VIEWPORT_START_SECONDARY 0x4687
+#define mmSCLV1_SCLV_VIEWPORT_START_SECONDARY 0x9887
+#define mmSCLV_VIEWPORT_SIZE 0x4688
+#define mmSCLV0_SCLV_VIEWPORT_SIZE 0x4688
+#define mmSCLV1_SCLV_VIEWPORT_SIZE 0x9888
+#define mmSCLV_VIEWPORT_START_C 0x4689
+#define mmSCLV0_SCLV_VIEWPORT_START_C 0x4689
+#define mmSCLV1_SCLV_VIEWPORT_START_C 0x9889
+#define mmSCLV_VIEWPORT_START_SECONDARY_C 0x468a
+#define mmSCLV0_SCLV_VIEWPORT_START_SECONDARY_C 0x468a
+#define mmSCLV1_SCLV_VIEWPORT_START_SECONDARY_C 0x988a
+#define mmSCLV_VIEWPORT_SIZE_C 0x468b
+#define mmSCLV0_SCLV_VIEWPORT_SIZE_C 0x468b
+#define mmSCLV1_SCLV_VIEWPORT_SIZE_C 0x988b
+#define mmSCLV_EXT_OVERSCAN_LEFT_RIGHT 0x468c
+#define mmSCLV0_SCLV_EXT_OVERSCAN_LEFT_RIGHT 0x468c
+#define mmSCLV1_SCLV_EXT_OVERSCAN_LEFT_RIGHT 0x988c
+#define mmSCLV_EXT_OVERSCAN_TOP_BOTTOM 0x468d
+#define mmSCLV0_SCLV_EXT_OVERSCAN_TOP_BOTTOM 0x468d
+#define mmSCLV1_SCLV_EXT_OVERSCAN_TOP_BOTTOM 0x988d
+#define mmSCLV_MODE_CHANGE_DET1 0x468e
+#define mmSCLV0_SCLV_MODE_CHANGE_DET1 0x468e
+#define mmSCLV1_SCLV_MODE_CHANGE_DET1 0x988e
+#define mmSCLV_MODE_CHANGE_DET2 0x468f
+#define mmSCLV0_SCLV_MODE_CHANGE_DET2 0x468f
+#define mmSCLV1_SCLV_MODE_CHANGE_DET2 0x988f
+#define mmSCLV_MODE_CHANGE_DET3 0x4690
+#define mmSCLV0_SCLV_MODE_CHANGE_DET3 0x4690
+#define mmSCLV1_SCLV_MODE_CHANGE_DET3 0x9890
+#define mmSCLV_MODE_CHANGE_MASK 0x4691
+#define mmSCLV0_SCLV_MODE_CHANGE_MASK 0x4691
+#define mmSCLV1_SCLV_MODE_CHANGE_MASK 0x9891
+#define mmSCLV_HORZ_FILTER_INIT_BOT 0x4692
+#define mmSCLV0_SCLV_HORZ_FILTER_INIT_BOT 0x4692
+#define mmSCLV1_SCLV_HORZ_FILTER_INIT_BOT 0x9892
+#define mmSCLV_HORZ_FILTER_INIT_BOT_C 0x4693
+#define mmSCLV0_SCLV_HORZ_FILTER_INIT_BOT_C 0x4693
+#define mmSCLV1_SCLV_HORZ_FILTER_INIT_BOT_C 0x9893
+#define mmSCLV_DEBUG2 0x4694
+#define mmSCLV0_SCLV_DEBUG2 0x4694
+#define mmSCLV1_SCLV_DEBUG2 0x9894
+#define mmSCLV_DEBUG 0x4695
+#define mmSCLV0_SCLV_DEBUG 0x4695
+#define mmSCLV1_SCLV_DEBUG 0x9895
+#define mmSCLV_TEST_DEBUG_INDEX 0x4696
+#define mmSCLV0_SCLV_TEST_DEBUG_INDEX 0x4696
+#define mmSCLV1_SCLV_TEST_DEBUG_INDEX 0x9896
+#define mmSCLV_TEST_DEBUG_DATA 0x4697
+#define mmSCLV0_SCLV_TEST_DEBUG_DATA 0x4697
+#define mmSCLV1_SCLV_TEST_DEBUG_DATA 0x9897
+#define mmCOL_MAN_UPDATE 0x46a4
+#define mmCOL_MAN0_COL_MAN_UPDATE 0x46a4
+#define mmCOL_MAN1_COL_MAN_UPDATE 0x98a4
+#define mmCOL_MAN_INPUT_CSC_CONTROL 0x46a5
+#define mmCOL_MAN0_COL_MAN_INPUT_CSC_CONTROL 0x46a5
+#define mmCOL_MAN1_COL_MAN_INPUT_CSC_CONTROL 0x98a5
+#define mmINPUT_CSC_C11_C12_A 0x46a6
+#define mmCOL_MAN0_INPUT_CSC_C11_C12_A 0x46a6
+#define mmCOL_MAN1_INPUT_CSC_C11_C12_A 0x98a6
+#define mmINPUT_CSC_C13_C14_A 0x46a7
+#define mmCOL_MAN0_INPUT_CSC_C13_C14_A 0x46a7
+#define mmCOL_MAN1_INPUT_CSC_C13_C14_A 0x98a7
+#define mmINPUT_CSC_C21_C22_A 0x46a8
+#define mmCOL_MAN0_INPUT_CSC_C21_C22_A 0x46a8
+#define mmCOL_MAN1_INPUT_CSC_C21_C22_A 0x98a8
+#define mmINPUT_CSC_C23_C24_A 0x46a9
+#define mmCOL_MAN0_INPUT_CSC_C23_C24_A 0x46a9
+#define mmCOL_MAN1_INPUT_CSC_C23_C24_A 0x98a9
+#define mmINPUT_CSC_C31_C32_A 0x46aa
+#define mmCOL_MAN0_INPUT_CSC_C31_C32_A 0x46aa
+#define mmCOL_MAN1_INPUT_CSC_C31_C32_A 0x98aa
+#define mmINPUT_CSC_C33_C34_A 0x46ab
+#define mmCOL_MAN0_INPUT_CSC_C33_C34_A 0x46ab
+#define mmCOL_MAN1_INPUT_CSC_C33_C34_A 0x98ab
+#define mmINPUT_CSC_C11_C12_B 0x46ac
+#define mmCOL_MAN0_INPUT_CSC_C11_C12_B 0x46ac
+#define mmCOL_MAN1_INPUT_CSC_C11_C12_B 0x98ac
+#define mmINPUT_CSC_C13_C14_B 0x46ad
+#define mmCOL_MAN0_INPUT_CSC_C13_C14_B 0x46ad
+#define mmCOL_MAN1_INPUT_CSC_C13_C14_B 0x98ad
+#define mmINPUT_CSC_C21_C22_B 0x46ae
+#define mmCOL_MAN0_INPUT_CSC_C21_C22_B 0x46ae
+#define mmCOL_MAN1_INPUT_CSC_C21_C22_B 0x98ae
+#define mmINPUT_CSC_C23_C24_B 0x46af
+#define mmCOL_MAN0_INPUT_CSC_C23_C24_B 0x46af
+#define mmCOL_MAN1_INPUT_CSC_C23_C24_B 0x98af
+#define mmINPUT_CSC_C31_C32_B 0x46b0
+#define mmCOL_MAN0_INPUT_CSC_C31_C32_B 0x46b0
+#define mmCOL_MAN1_INPUT_CSC_C31_C32_B 0x98b0
+#define mmINPUT_CSC_C33_C34_B 0x46b1
+#define mmCOL_MAN0_INPUT_CSC_C33_C34_B 0x46b1
+#define mmCOL_MAN1_INPUT_CSC_C33_C34_B 0x98b1
+#define mmPRESCALE_CONTROL 0x46b2
+#define mmCOL_MAN0_PRESCALE_CONTROL 0x46b2
+#define mmCOL_MAN1_PRESCALE_CONTROL 0x98b2
+#define mmPRESCALE_VALUES_R 0x46b3
+#define mmCOL_MAN0_PRESCALE_VALUES_R 0x46b3
+#define mmCOL_MAN1_PRESCALE_VALUES_R 0x98b3
+#define mmPRESCALE_VALUES_G 0x46b4
+#define mmCOL_MAN0_PRESCALE_VALUES_G 0x46b4
+#define mmCOL_MAN1_PRESCALE_VALUES_G 0x98b4
+#define mmPRESCALE_VALUES_B 0x46b5
+#define mmCOL_MAN0_PRESCALE_VALUES_B 0x46b5
+#define mmCOL_MAN1_PRESCALE_VALUES_B 0x98b5
+#define mmCOL_MAN_OUTPUT_CSC_CONTROL 0x46b6
+#define mmCOL_MAN0_COL_MAN_OUTPUT_CSC_CONTROL 0x46b6
+#define mmCOL_MAN1_COL_MAN_OUTPUT_CSC_CONTROL 0x98b6
+#define mmOUTPUT_CSC_C11_C12_A 0x46b7
+#define mmCOL_MAN0_OUTPUT_CSC_C11_C12_A 0x46b7
+#define mmCOL_MAN1_OUTPUT_CSC_C11_C12_A 0x98b7
+#define mmOUTPUT_CSC_C13_C14_A 0x46b8
+#define mmCOL_MAN0_OUTPUT_CSC_C13_C14_A 0x46b8
+#define mmCOL_MAN1_OUTPUT_CSC_C13_C14_A 0x98b8
+#define mmOUTPUT_CSC_C21_C22_A 0x46b9
+#define mmCOL_MAN0_OUTPUT_CSC_C21_C22_A 0x46b9
+#define mmCOL_MAN1_OUTPUT_CSC_C21_C22_A 0x98b9
+#define mmOUTPUT_CSC_C23_C24_A 0x46ba
+#define mmCOL_MAN0_OUTPUT_CSC_C23_C24_A 0x46ba
+#define mmCOL_MAN1_OUTPUT_CSC_C23_C24_A 0x98ba
+#define mmOUTPUT_CSC_C31_C32_A 0x46bb
+#define mmCOL_MAN0_OUTPUT_CSC_C31_C32_A 0x46bb
+#define mmCOL_MAN1_OUTPUT_CSC_C31_C32_A 0x98bb
+#define mmOUTPUT_CSC_C33_C34_A 0x46bc
+#define mmCOL_MAN0_OUTPUT_CSC_C33_C34_A 0x46bc
+#define mmCOL_MAN1_OUTPUT_CSC_C33_C34_A 0x98bc
+#define mmOUTPUT_CSC_C11_C12_B 0x46bd
+#define mmCOL_MAN0_OUTPUT_CSC_C11_C12_B 0x46bd
+#define mmCOL_MAN1_OUTPUT_CSC_C11_C12_B 0x98bd
+#define mmOUTPUT_CSC_C13_C14_B 0x46be
+#define mmCOL_MAN0_OUTPUT_CSC_C13_C14_B 0x46be
+#define mmCOL_MAN1_OUTPUT_CSC_C13_C14_B 0x98be
+#define mmOUTPUT_CSC_C21_C22_B 0x46bf
+#define mmCOL_MAN0_OUTPUT_CSC_C21_C22_B 0x46bf
+#define mmCOL_MAN1_OUTPUT_CSC_C21_C22_B 0x98bf
+#define mmOUTPUT_CSC_C23_C24_B 0x46c0
+#define mmCOL_MAN0_OUTPUT_CSC_C23_C24_B 0x46c0
+#define mmCOL_MAN1_OUTPUT_CSC_C23_C24_B 0x98c0
+#define mmOUTPUT_CSC_C31_C32_B 0x46c1
+#define mmCOL_MAN0_OUTPUT_CSC_C31_C32_B 0x46c1
+#define mmCOL_MAN1_OUTPUT_CSC_C31_C32_B 0x98c1
+#define mmOUTPUT_CSC_C33_C34_B 0x46c2
+#define mmCOL_MAN0_OUTPUT_CSC_C33_C34_B 0x46c2
+#define mmCOL_MAN1_OUTPUT_CSC_C33_C34_B 0x98c2
+#define mmDENORM_CLAMP_CONTROL 0x46c3
+#define mmCOL_MAN0_DENORM_CLAMP_CONTROL 0x46c3
+#define mmCOL_MAN1_DENORM_CLAMP_CONTROL 0x98c3
+#define mmDENORM_CLAMP_RANGE_R_CR 0x46c4
+#define mmCOL_MAN0_DENORM_CLAMP_RANGE_R_CR 0x46c4
+#define mmCOL_MAN1_DENORM_CLAMP_RANGE_R_CR 0x98c4
+#define mmDENORM_CLAMP_RANGE_G_Y 0x46c5
+#define mmCOL_MAN0_DENORM_CLAMP_RANGE_G_Y 0x46c5
+#define mmCOL_MAN1_DENORM_CLAMP_RANGE_G_Y 0x98c5
+#define mmDENORM_CLAMP_RANGE_B_CB 0x46c6
+#define mmCOL_MAN0_DENORM_CLAMP_RANGE_B_CB 0x46c6
+#define mmCOL_MAN1_DENORM_CLAMP_RANGE_B_CB 0x98c6
+#define mmCOL_MAN_FP_CONVERTED_FIELD 0x46c7
+#define mmCOL_MAN0_COL_MAN_FP_CONVERTED_FIELD 0x46c7
+#define mmCOL_MAN1_COL_MAN_FP_CONVERTED_FIELD 0x98c7
+#define mmGAMMA_CORR_CONTROL 0x46c8
+#define mmCOL_MAN0_GAMMA_CORR_CONTROL 0x46c8
+#define mmCOL_MAN1_GAMMA_CORR_CONTROL 0x98c8
+#define mmGAMMA_CORR_LUT_INDEX 0x46c9
+#define mmCOL_MAN0_GAMMA_CORR_LUT_INDEX 0x46c9
+#define mmCOL_MAN1_GAMMA_CORR_LUT_INDEX 0x98c9
+#define mmGAMMA_CORR_LUT_DATA 0x46ca
+#define mmCOL_MAN0_GAMMA_CORR_LUT_DATA 0x46ca
+#define mmCOL_MAN1_GAMMA_CORR_LUT_DATA 0x98ca
+#define mmGAMMA_CORR_LUT_WRITE_EN_MASK 0x46cb
+#define mmCOL_MAN0_GAMMA_CORR_LUT_WRITE_EN_MASK 0x46cb
+#define mmCOL_MAN1_GAMMA_CORR_LUT_WRITE_EN_MASK 0x98cb
+#define mmGAMMA_CORR_CNTLA_START_CNTL 0x46cc
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_START_CNTL 0x46cc
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_START_CNTL 0x98cc
+#define mmGAMMA_CORR_CNTLA_SLOPE_CNTL 0x46cd
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_SLOPE_CNTL 0x46cd
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_SLOPE_CNTL 0x98cd
+#define mmGAMMA_CORR_CNTLA_END_CNTL1 0x46ce
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_END_CNTL1 0x46ce
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_END_CNTL1 0x98ce
+#define mmGAMMA_CORR_CNTLA_END_CNTL2 0x46cf
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_END_CNTL2 0x46cf
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_END_CNTL2 0x98cf
+#define mmGAMMA_CORR_CNTLA_REGION_0_1 0x46d0
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_0_1 0x46d0
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_0_1 0x98d0
+#define mmGAMMA_CORR_CNTLA_REGION_2_3 0x46d1
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_2_3 0x46d1
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_2_3 0x98d1
+#define mmGAMMA_CORR_CNTLA_REGION_4_5 0x46d2
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_4_5 0x46d2
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_4_5 0x98d2
+#define mmGAMMA_CORR_CNTLA_REGION_6_7 0x46d3
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_6_7 0x46d3
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_6_7 0x98d3
+#define mmGAMMA_CORR_CNTLA_REGION_8_9 0x46d4
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_8_9 0x46d4
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_8_9 0x98d4
+#define mmGAMMA_CORR_CNTLA_REGION_10_11 0x46d5
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_10_11 0x46d5
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_10_11 0x98d5
+#define mmGAMMA_CORR_CNTLA_REGION_12_13 0x46d6
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_12_13 0x46d6
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_12_13 0x98d6
+#define mmGAMMA_CORR_CNTLA_REGION_14_15 0x46d7
+#define mmCOL_MAN0_GAMMA_CORR_CNTLA_REGION_14_15 0x46d7
+#define mmCOL_MAN1_GAMMA_CORR_CNTLA_REGION_14_15 0x98d7
+#define mmGAMMA_CORR_CNTLB_START_CNTL 0x46d8
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_START_CNTL 0x46d8
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_START_CNTL 0x98d8
+#define mmGAMMA_CORR_CNTLB_SLOPE_CNTL 0x46d9
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_SLOPE_CNTL 0x46d9
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_SLOPE_CNTL 0x98d9
+#define mmGAMMA_CORR_CNTLB_END_CNTL1 0x46da
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_END_CNTL1 0x46da
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_END_CNTL1 0x98da
+#define mmGAMMA_CORR_CNTLB_END_CNTL2 0x46db
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_END_CNTL2 0x46db
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_END_CNTL2 0x98db
+#define mmGAMMA_CORR_CNTLB_REGION_0_1 0x46dc
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_0_1 0x46dc
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_0_1 0x98dc
+#define mmGAMMA_CORR_CNTLB_REGION_2_3 0x46dd
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_2_3 0x46dd
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_2_3 0x98dd
+#define mmGAMMA_CORR_CNTLB_REGION_4_5 0x46de
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_4_5 0x46de
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_4_5 0x98de
+#define mmGAMMA_CORR_CNTLB_REGION_6_7 0x46df
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_6_7 0x46df
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_6_7 0x98df
+#define mmGAMMA_CORR_CNTLB_REGION_8_9 0x46e0
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_8_9 0x46e0
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_8_9 0x98e0
+#define mmGAMMA_CORR_CNTLB_REGION_10_11 0x46e1
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_10_11 0x46e1
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_10_11 0x98e1
+#define mmGAMMA_CORR_CNTLB_REGION_12_13 0x46e2
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_12_13 0x46e2
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_12_13 0x98e2
+#define mmGAMMA_CORR_CNTLB_REGION_14_15 0x46e3
+#define mmCOL_MAN0_GAMMA_CORR_CNTLB_REGION_14_15 0x46e3
+#define mmCOL_MAN1_GAMMA_CORR_CNTLB_REGION_14_15 0x98e3
+#define mmPACK_FIFO_ERROR 0x46e4
+#define mmCOL_MAN0_PACK_FIFO_ERROR 0x46e4
+#define mmCOL_MAN1_PACK_FIFO_ERROR 0x98e4
+#define mmOUTPUT_FIFO_ERROR 0x46e5
+#define mmCOL_MAN0_OUTPUT_FIFO_ERROR 0x46e5
+#define mmCOL_MAN1_OUTPUT_FIFO_ERROR 0x98e5
+#define mmINPUT_GAMMA_LUT_AUTOFILL 0x46e6
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_AUTOFILL 0x46e6
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_AUTOFILL 0x98e6
+#define mmINPUT_GAMMA_LUT_RW_INDEX 0x46e7
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_RW_INDEX 0x46e7
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_RW_INDEX 0x98e7
+#define mmINPUT_GAMMA_LUT_SEQ_COLOR 0x46e8
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_SEQ_COLOR 0x46e8
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_SEQ_COLOR 0x98e8
+#define mmINPUT_GAMMA_LUT_PWL_DATA 0x46e9
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_PWL_DATA 0x46e9
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_PWL_DATA 0x98e9
+#define mmINPUT_GAMMA_LUT_30_COLOR 0x46ea
+#define mmCOL_MAN0_INPUT_GAMMA_LUT_30_COLOR 0x46ea
+#define mmCOL_MAN1_INPUT_GAMMA_LUT_30_COLOR 0x98ea
+#define mmCOL_MAN_INPUT_GAMMA_CONTROL1 0x46eb
+#define mmCOL_MAN0_COL_MAN_INPUT_GAMMA_CONTROL1 0x46eb
+#define mmCOL_MAN1_COL_MAN_INPUT_GAMMA_CONTROL1 0x98eb
+#define mmCOL_MAN_INPUT_GAMMA_CONTROL2 0x46ec
+#define mmCOL_MAN0_COL_MAN_INPUT_GAMMA_CONTROL2 0x46ec
+#define mmCOL_MAN1_COL_MAN_INPUT_GAMMA_CONTROL2 0x98ec
+#define mmINPUT_GAMMA_BW_OFFSETS_B 0x46ed
+#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_B 0x46ed
+#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_B 0x98ed
+#define mmINPUT_GAMMA_BW_OFFSETS_G 0x46ee
+#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_G 0x46ee
+#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_G 0x98ee
+#define mmINPUT_GAMMA_BW_OFFSETS_R 0x46ef
+#define mmCOL_MAN0_INPUT_GAMMA_BW_OFFSETS_R 0x46ef
+#define mmCOL_MAN1_INPUT_GAMMA_BW_OFFSETS_R 0x98ef
+#define mmCOL_MAN_DEBUG_CONTROL 0x46f0
+#define mmCOL_MAN0_COL_MAN_DEBUG_CONTROL 0x46f0
+#define mmCOL_MAN1_COL_MAN_DEBUG_CONTROL 0x98f0
+#define mmCOL_MAN_TEST_DEBUG_INDEX 0x46f1
+#define mmCOL_MAN0_COL_MAN_TEST_DEBUG_INDEX 0x46f1
+#define mmCOL_MAN1_COL_MAN_TEST_DEBUG_INDEX 0x98f1
+#define mmCOL_MAN_TEST_DEBUG_DATA 0x46f3
+#define mmCOL_MAN0_COL_MAN_TEST_DEBUG_DATA 0x46f3
+#define mmCOL_MAN1_COL_MAN_TEST_DEBUG_DATA 0x98f3
+#define mmUNP_GRPH_ENABLE 0x4600
+#define mmUNP0_UNP_GRPH_ENABLE 0x4600
+#define mmUNP1_UNP_GRPH_ENABLE 0x9800
+#define mmUNP_GRPH_CONTROL 0x4601
+#define mmUNP0_UNP_GRPH_CONTROL 0x4601
+#define mmUNP1_UNP_GRPH_CONTROL 0x9801
+#define mmUNP_GRPH_CONTROL_C 0x4602
+#define mmUNP0_UNP_GRPH_CONTROL_C 0x4602
+#define mmUNP1_UNP_GRPH_CONTROL_C 0x9802
+#define mmUNP_GRPH_CONTROL_EXP 0x4603
+#define mmUNP0_UNP_GRPH_CONTROL_EXP 0x4603
+#define mmUNP1_UNP_GRPH_CONTROL_EXP 0x9803
+#define mmUNP_GRPH_SWAP_CNTL 0x4605
+#define mmUNP0_UNP_GRPH_SWAP_CNTL 0x4605
+#define mmUNP1_UNP_GRPH_SWAP_CNTL 0x9805
+#define mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_L 0x4606
+#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L 0x4606
+#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L 0x9806
+#define mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_C 0x4607
+#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C 0x4607
+#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C 0x9807
+#define mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L 0x4608
+#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L 0x4608
+#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L 0x9808
+#define mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x4609
+#define mmUNP0_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x4609
+#define mmUNP1_UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x9809
+#define mmUNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L 0x460a
+#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L 0x460a
+#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L 0x980a
+#define mmUNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C 0x460b
+#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C 0x460b
+#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C 0x980b
+#define mmUNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L 0x460c
+#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L 0x460c
+#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L 0x980c
+#define mmUNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C 0x460d
+#define mmUNP0_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C 0x460d
+#define mmUNP1_UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C 0x980d
+#define mmUNP_GRPH_SECONDARY_SURFACE_ADDRESS_L 0x460e
+#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L 0x460e
+#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L 0x980e
+#define mmUNP_GRPH_SECONDARY_SURFACE_ADDRESS_C 0x460f
+#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C 0x460f
+#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C 0x980f
+#define mmUNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L 0x4610
+#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L 0x4610
+#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L 0x9810
+#define mmUNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x4611
+#define mmUNP0_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x4611
+#define mmUNP1_UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x9811
+#define mmUNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L 0x4612
+#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L 0x4612
+#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L 0x9812
+#define mmUNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C 0x4613
+#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C 0x4613
+#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C 0x9813
+#define mmUNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L 0x4614
+#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L 0x4614
+#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L 0x9814
+#define mmUNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C 0x4615
+#define mmUNP0_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C 0x4615
+#define mmUNP1_UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C 0x9815
+#define mmUNP_GRPH_PITCH_L 0x4616
+#define mmUNP0_UNP_GRPH_PITCH_L 0x4616
+#define mmUNP1_UNP_GRPH_PITCH_L 0x9816
+#define mmUNP_GRPH_PITCH_C 0x4617
+#define mmUNP0_UNP_GRPH_PITCH_C 0x4617
+#define mmUNP1_UNP_GRPH_PITCH_C 0x9817
+#define mmUNP_GRPH_SURFACE_OFFSET_X_L 0x4618
+#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_X_L 0x4618
+#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_X_L 0x9818
+#define mmUNP_GRPH_SURFACE_OFFSET_X_C 0x4619
+#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_X_C 0x4619
+#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_X_C 0x9819
+#define mmUNP_GRPH_SURFACE_OFFSET_Y_L 0x461a
+#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_Y_L 0x461a
+#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_Y_L 0x981a
+#define mmUNP_GRPH_SURFACE_OFFSET_Y_C 0x461b
+#define mmUNP0_UNP_GRPH_SURFACE_OFFSET_Y_C 0x461b
+#define mmUNP1_UNP_GRPH_SURFACE_OFFSET_Y_C 0x981b
+#define mmUNP_GRPH_X_START_L 0x461c
+#define mmUNP0_UNP_GRPH_X_START_L 0x461c
+#define mmUNP1_UNP_GRPH_X_START_L 0x981c
+#define mmUNP_GRPH_X_START_C 0x461d
+#define mmUNP0_UNP_GRPH_X_START_C 0x461d
+#define mmUNP1_UNP_GRPH_X_START_C 0x981d
+#define mmUNP_GRPH_Y_START_L 0x461e
+#define mmUNP0_UNP_GRPH_Y_START_L 0x461e
+#define mmUNP1_UNP_GRPH_Y_START_L 0x981e
+#define mmUNP_GRPH_Y_START_C 0x461f
+#define mmUNP0_UNP_GRPH_Y_START_C 0x461f
+#define mmUNP1_UNP_GRPH_Y_START_C 0x981f
+#define mmUNP_GRPH_X_END_L 0x4620
+#define mmUNP0_UNP_GRPH_X_END_L 0x4620
+#define mmUNP1_UNP_GRPH_X_END_L 0x9820
+#define mmUNP_GRPH_X_END_C 0x4621
+#define mmUNP0_UNP_GRPH_X_END_C 0x4621
+#define mmUNP1_UNP_GRPH_X_END_C 0x9821
+#define mmUNP_GRPH_Y_END_L 0x4622
+#define mmUNP0_UNP_GRPH_Y_END_L 0x4622
+#define mmUNP1_UNP_GRPH_Y_END_L 0x9822
+#define mmUNP_GRPH_Y_END_C 0x4623
+#define mmUNP0_UNP_GRPH_Y_END_C 0x4623
+#define mmUNP1_UNP_GRPH_Y_END_C 0x9823
+#define mmUNP_GRPH_UPDATE 0x4624
+#define mmUNP0_UNP_GRPH_UPDATE 0x4624
+#define mmUNP1_UNP_GRPH_UPDATE 0x9824
+#define mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT 0x463a
+#define mmUNP0_UNP_PIPE_OUTSTANDING_REQUEST_LIMIT 0x463a
+#define mmUNP1_UNP_PIPE_OUTSTANDING_REQUEST_LIMIT 0x983a
+#define mmUNP_GRPH_SURFACE_ADDRESS_INUSE_L 0x4625
+#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_INUSE_L 0x4625
+#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_INUSE_L 0x9825
+#define mmUNP_GRPH_SURFACE_ADDRESS_INUSE_C 0x4626
+#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_INUSE_C 0x4626
+#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_INUSE_C 0x9826
+#define mmUNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L 0x4627
+#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L 0x4627
+#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L 0x9827
+#define mmUNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C 0x4628
+#define mmUNP0_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C 0x4628
+#define mmUNP1_UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C 0x9828
+#define mmUNP_DVMM_PTE_CONTROL 0x4629
+#define mmUNP_GRPH_INTERRUPT_STATUS 0x462b
+#define mmUNP0_UNP_GRPH_INTERRUPT_STATUS 0x462b
+#define mmUNP1_UNP_GRPH_INTERRUPT_STATUS 0x982b
+#define mmUNP_GRPH_INTERRUPT_CONTROL 0x462c
+#define mmUNP0_UNP_GRPH_INTERRUPT_CONTROL 0x462c
+#define mmUNP1_UNP_GRPH_INTERRUPT_CONTROL 0x982c
+#define mmUNP_GRPH_STEREOSYNC_FLIP 0x462e
+#define mmUNP0_UNP_GRPH_STEREOSYNC_FLIP 0x462e
+#define mmUNP1_UNP_GRPH_STEREOSYNC_FLIP 0x982e
+#define mmUNP_FLIP_CONTROL 0x462f
+#define mmUNP0_UNP_FLIP_CONTROL 0x462f
+#define mmUNP1_UNP_FLIP_CONTROL 0x982f
+#define mmUNP_CRC_CONTROL 0x4630
+#define mmUNP0_UNP_CRC_CONTROL 0x4630
+#define mmUNP1_UNP_CRC_CONTROL 0x9830
+#define mmUNP_CRC_MASK 0x4631
+#define mmUNP0_UNP_CRC_MASK 0x4631
+#define mmUNP1_UNP_CRC_MASK 0x9831
+#define mmUNP_CRC_CURRENT 0x4632
+#define mmUNP0_UNP_CRC_CURRENT 0x4632
+#define mmUNP1_UNP_CRC_CURRENT 0x9832
+#define mmUNP_CRC_LAST 0x4633
+#define mmUNP0_UNP_CRC_LAST 0x4633
+#define mmUNP1_UNP_CRC_LAST 0x9833
+#define mmUNP_LB_DATA_GAP_BETWEEN_CHUNK 0x4634
+#define mmUNP0_UNP_LB_DATA_GAP_BETWEEN_CHUNK 0x4634
+#define mmUNP1_UNP_LB_DATA_GAP_BETWEEN_CHUNK 0x9834
+#define mmUNP_HW_ROTATION 0x4635
+#define mmUNP0_UNP_HW_ROTATION 0x4635
+#define mmUNP1_UNP_HW_ROTATION 0x9835
+#define mmUNP_DEBUG 0x4636
+#define mmUNP0_UNP_DEBUG 0x4636
+#define mmUNP1_UNP_DEBUG 0x9836
+#define mmUNP_DEBUG2 0x4637
+#define mmUNP0_UNP_DEBUG2 0x4637
+#define mmUNP1_UNP_DEBUG2 0x9837
+#define mmUNP_DVMM_DEBUG 0x463b
+#define mmUNP0_UNP_DVMM_DEBUG 0x463b
+#define mmUNP1_UNP_DVMM_DEBUG 0x983b
+#define mmUNP_TEST_DEBUG_INDEX 0x4638
+#define mmUNP0_UNP_TEST_DEBUG_INDEX 0x4638
+#define mmUNP1_UNP_TEST_DEBUG_INDEX 0x9838
+#define mmUNP_TEST_DEBUG_DATA 0x4639
+#define mmUNP0_UNP_TEST_DEBUG_DATA 0x4639
+#define mmUNP1_UNP_TEST_DEBUG_DATA 0x9839
+#define mmGENMO_WT 0xf0
+#define mmGENMO_RD 0xf3
+#define mmGENENB 0xf0
+#define mmGENFC_WT 0xee
+#define mmVGA0_GENFC_WT 0xee
+#define mmVGA1_GENFC_WT 0xf6
+#define mmGENFC_RD 0xf2
+#define mmGENS0 0xf0
+#define mmGENS1 0xee
+#define mmVGA0_GENS1 0xee
+#define mmVGA1_GENS1 0xf6
+#define mmDAC_DATA 0xf2
+#define mmDAC_MASK 0xf1
+#define mmDAC_R_INDEX 0xf1
+#define mmDAC_W_INDEX 0xf2
+#define mmSEQ8_IDX 0xf1
+#define mmSEQ8_DATA 0xf1
+#define ixSEQ00 0x0
+#define ixSEQ01 0x1
+#define ixSEQ02 0x2
+#define ixSEQ03 0x3
+#define ixSEQ04 0x4
+#define mmCRTC8_IDX 0xed
+#define mmVGA0_CRTC8_IDX 0xed
+#define mmVGA1_CRTC8_IDX 0xf5
+#define mmCRTC8_DATA 0xed
+#define mmVGA0_CRTC8_DATA 0xed
+#define mmVGA1_CRTC8_DATA 0xf5
+#define ixCRT00 0x0
+#define ixCRT01 0x1
+#define ixCRT02 0x2
+#define ixCRT03 0x3
+#define ixCRT04 0x4
+#define ixCRT05 0x5
+#define ixCRT06 0x6
+#define ixCRT07 0x7
+#define ixCRT08 0x8
+#define ixCRT09 0x9
+#define ixCRT0A 0xa
+#define ixCRT0B 0xb
+#define ixCRT0C 0xc
+#define ixCRT0D 0xd
+#define ixCRT0E 0xe
+#define ixCRT0F 0xf
+#define ixCRT10 0x10
+#define ixCRT11 0x11
+#define ixCRT12 0x12
+#define ixCRT13 0x13
+#define ixCRT14 0x14
+#define ixCRT15 0x15
+#define ixCRT16 0x16
+#define ixCRT17 0x17
+#define ixCRT18 0x18
+#define ixCRT1E 0x1e
+#define ixCRT1F 0x1f
+#define ixCRT22 0x22
+#define mmGRPH8_IDX 0xf3
+#define mmGRPH8_DATA 0xf3
+#define ixGRA00 0x0
+#define ixGRA01 0x1
+#define ixGRA02 0x2
+#define ixGRA03 0x3
+#define ixGRA04 0x4
+#define ixGRA05 0x5
+#define ixGRA06 0x6
+#define ixGRA07 0x7
+#define ixGRA08 0x8
+#define mmATTRX 0xf0
+#define mmATTRDW 0xf0
+#define mmATTRDR 0xf0
+#define ixATTR00 0x0
+#define ixATTR01 0x1
+#define ixATTR02 0x2
+#define ixATTR03 0x3
+#define ixATTR04 0x4
+#define ixATTR05 0x5
+#define ixATTR06 0x6
+#define ixATTR07 0x7
+#define ixATTR08 0x8
+#define ixATTR09 0x9
+#define ixATTR0A 0xa
+#define ixATTR0B 0xb
+#define ixATTR0C 0xc
+#define ixATTR0D 0xd
+#define ixATTR0E 0xe
+#define ixATTR0F 0xf
+#define ixATTR10 0x10
+#define ixATTR11 0x11
+#define ixATTR12 0x12
+#define ixATTR13 0x13
+#define ixATTR14 0x14
+#define mmVGA_RENDER_CONTROL 0xc0
+#define mmVGA_SOURCE_SELECT 0xfc
+#define mmVGA_SEQUENCER_RESET_CONTROL 0xc1
+#define mmVGA_MODE_CONTROL 0xc2
+#define mmVGA_SURFACE_PITCH_SELECT 0xc3
+#define mmVGA_MEMORY_BASE_ADDRESS 0xc4
+#define mmVGA_MEMORY_BASE_ADDRESS_HIGH 0xc9
+#define mmVGA_DISPBUF1_SURFACE_ADDR 0xc6
+#define mmVGA_DISPBUF2_SURFACE_ADDR 0xc8
+#define mmVGA_HDP_CONTROL 0xca
+#define mmVGA_CACHE_CONTROL 0xcb
+#define mmD1VGA_CONTROL 0xcc
+#define mmD2VGA_CONTROL 0xce
+#define mmD3VGA_CONTROL 0xf8
+#define mmD4VGA_CONTROL 0xf9
+#define mmD5VGA_CONTROL 0xfa
+#define mmD6VGA_CONTROL 0xfb
+#define mmVGA_HW_DEBUG 0xcf
+#define mmVGA_STATUS 0xd0
+#define mmVGA_INTERRUPT_CONTROL 0xd1
+#define mmVGA_STATUS_CLEAR 0xd2
+#define mmVGA_INTERRUPT_STATUS 0xd3
+#define mmVGA_MAIN_CONTROL 0xd4
+#define mmVGA_TEST_CONTROL 0xd5
+#define mmVGA_DEBUG_READBACK_INDEX 0xd6
+#define mmVGA_DEBUG_READBACK_DATA 0xd7
+#define mmVGA_MEM_WRITE_PAGE_ADDR 0x12
+#define mmVGA_MEM_READ_PAGE_ADDR 0x13
+#define mmVGA_TEST_DEBUG_INDEX 0xc5
+#define mmVGA_TEST_DEBUG_DATA 0xc7
+#define ixVGADCC_DBG_DCCIF_C 0x7e
+#define mmBPHYC_DAC_MACRO_CNTL 0x48b9
+#define mmBPHYC_DAC_AUTO_CALIB_CONTROL 0x48ba
+#define mmDPG_PIPE_ARBITRATION_CONTROL1 0x1b30
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1 0x1b30
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL1 0x1d30
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL1 0x1f30
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL1 0x4130
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL1 0x4330
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL1 0x4530
+#define mmDPG_PIPE_ARBITRATION_CONTROL2 0x1b31
+#define mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL2 0x1b31
+#define mmDMIF_PG1_DPG_PIPE_ARBITRATION_CONTROL2 0x1d31
+#define mmDMIF_PG2_DPG_PIPE_ARBITRATION_CONTROL2 0x1f31
+#define mmDMIF_PG3_DPG_PIPE_ARBITRATION_CONTROL2 0x4131
+#define mmDMIF_PG4_DPG_PIPE_ARBITRATION_CONTROL2 0x4331
+#define mmDMIF_PG5_DPG_PIPE_ARBITRATION_CONTROL2 0x4531
+#define mmDPG_WATERMARK_MASK_CONTROL 0x1b32
+#define mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL 0x1b32
+#define mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL 0x1d32
+#define mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL 0x1f32
+#define mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL 0x4132
+#define mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL 0x4332
+#define mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL 0x4532
+#define mmDPG_PIPE_URGENCY_CONTROL 0x1b33
+#define mmDMIF_PG0_DPG_PIPE_URGENCY_CONTROL 0x1b33
+#define mmDMIF_PG1_DPG_PIPE_URGENCY_CONTROL 0x1d33
+#define mmDMIF_PG2_DPG_PIPE_URGENCY_CONTROL 0x1f33
+#define mmDMIF_PG3_DPG_PIPE_URGENCY_CONTROL 0x4133
+#define mmDMIF_PG4_DPG_PIPE_URGENCY_CONTROL 0x4333
+#define mmDMIF_PG5_DPG_PIPE_URGENCY_CONTROL 0x4533
+#define mmDPG_PIPE_DPM_CONTROL 0x1b34
+#define mmDMIF_PG0_DPG_PIPE_DPM_CONTROL 0x1b34
+#define mmDMIF_PG1_DPG_PIPE_DPM_CONTROL 0x1d34
+#define mmDMIF_PG2_DPG_PIPE_DPM_CONTROL 0x1f34
+#define mmDMIF_PG3_DPG_PIPE_DPM_CONTROL 0x4134
+#define mmDMIF_PG4_DPG_PIPE_DPM_CONTROL 0x4334
+#define mmDMIF_PG5_DPG_PIPE_DPM_CONTROL 0x4534
+#define mmDPG_PIPE_STUTTER_CONTROL 0x1b35
+#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL 0x1b35
+#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL 0x1d35
+#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL 0x1f35
+#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL 0x4135
+#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL 0x4335
+#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL 0x4535
+#define mmDPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1b36
+#define mmDMIF_PG0_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1b36
+#define mmDMIF_PG1_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1d36
+#define mmDMIF_PG2_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x1f36
+#define mmDMIF_PG3_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4136
+#define mmDMIF_PG4_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4336
+#define mmDMIF_PG5_DPG_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4536
+#define mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1b37
+#define mmDMIF_PG0_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1b37
+#define mmDMIF_PG1_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1d37
+#define mmDMIF_PG2_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x1f37
+#define mmDMIF_PG3_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4137
+#define mmDMIF_PG4_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4337
+#define mmDMIF_PG5_DPG_PIPE_STUTTER_CONTROL_NONLPTCH 0x4537
+#define mmDPG_REPEATER_PROGRAM 0x1b3a
+#define mmDMIF_PG0_DPG_REPEATER_PROGRAM 0x1b3a
+#define mmDMIF_PG1_DPG_REPEATER_PROGRAM 0x1d3a
+#define mmDMIF_PG2_DPG_REPEATER_PROGRAM 0x1f3a
+#define mmDMIF_PG3_DPG_REPEATER_PROGRAM 0x413a
+#define mmDMIF_PG4_DPG_REPEATER_PROGRAM 0x433a
+#define mmDMIF_PG5_DPG_REPEATER_PROGRAM 0x453a
+#define mmDPG_HW_DEBUG_A 0x1b3b
+#define mmDMIF_PG0_DPG_HW_DEBUG_A 0x1b3b
+#define mmDMIF_PG1_DPG_HW_DEBUG_A 0x1d3b
+#define mmDMIF_PG2_DPG_HW_DEBUG_A 0x1f3b
+#define mmDMIF_PG3_DPG_HW_DEBUG_A 0x413b
+#define mmDMIF_PG4_DPG_HW_DEBUG_A 0x433b
+#define mmDMIF_PG5_DPG_HW_DEBUG_A 0x453b
+#define mmDPG_HW_DEBUG_B 0x1b3c
+#define mmDMIF_PG0_DPG_HW_DEBUG_B 0x1b3c
+#define mmDMIF_PG1_DPG_HW_DEBUG_B 0x1d3c
+#define mmDMIF_PG2_DPG_HW_DEBUG_B 0x1f3c
+#define mmDMIF_PG3_DPG_HW_DEBUG_B 0x413c
+#define mmDMIF_PG4_DPG_HW_DEBUG_B 0x433c
+#define mmDMIF_PG5_DPG_HW_DEBUG_B 0x453c
+#define mmDPG_HW_DEBUG_11 0x1b3d
+#define mmDMIF_PG0_DPG_HW_DEBUG_11 0x1b3d
+#define mmDMIF_PG1_DPG_HW_DEBUG_11 0x1d3d
+#define mmDMIF_PG2_DPG_HW_DEBUG_11 0x1f3d
+#define mmDMIF_PG3_DPG_HW_DEBUG_11 0x413d
+#define mmDMIF_PG4_DPG_HW_DEBUG_11 0x433d
+#define mmDMIF_PG5_DPG_HW_DEBUG_11 0x453d
+#define mmDPG_CHK_PRE_PROC_CNTL 0x1b3e
+#define mmDMIF_PG0_DPG_CHK_PRE_PROC_CNTL 0x1b3e
+#define mmDMIF_PG1_DPG_CHK_PRE_PROC_CNTL 0x1d3e
+#define mmDMIF_PG2_DPG_CHK_PRE_PROC_CNTL 0x1f3e
+#define mmDMIF_PG3_DPG_CHK_PRE_PROC_CNTL 0x413e
+#define mmDMIF_PG4_DPG_CHK_PRE_PROC_CNTL 0x433e
+#define mmDMIF_PG5_DPG_CHK_PRE_PROC_CNTL 0x453e
+#define mmDPG_DVMM_STATUS 0x1b3f
+#define mmDMIF_PG0_DPG_DVMM_STATUS 0x1b3f
+#define mmDMIF_PG1_DPG_DVMM_STATUS 0x1d3f
+#define mmDMIF_PG2_DPG_DVMM_STATUS 0x1f3f
+#define mmDMIF_PG3_DPG_DVMM_STATUS 0x413f
+#define mmDMIF_PG4_DPG_DVMM_STATUS 0x433f
+#define mmDMIF_PG5_DPG_DVMM_STATUS 0x453f
+#define mmDPG_TEST_DEBUG_INDEX 0x1b38
+#define mmDMIF_PG0_DPG_TEST_DEBUG_INDEX 0x1b38
+#define mmDMIF_PG1_DPG_TEST_DEBUG_INDEX 0x1d38
+#define mmDMIF_PG2_DPG_TEST_DEBUG_INDEX 0x1f38
+#define mmDMIF_PG3_DPG_TEST_DEBUG_INDEX 0x4138
+#define mmDMIF_PG4_DPG_TEST_DEBUG_INDEX 0x4338
+#define mmDMIF_PG5_DPG_TEST_DEBUG_INDEX 0x4538
+#define mmDPG_TEST_DEBUG_DATA 0x1b39
+#define mmDMIF_PG0_DPG_TEST_DEBUG_DATA 0x1b39
+#define mmDMIF_PG1_DPG_TEST_DEBUG_DATA 0x1d39
+#define mmDMIF_PG2_DPG_TEST_DEBUG_DATA 0x1f39
+#define mmDMIF_PG3_DPG_TEST_DEBUG_DATA 0x4139
+#define mmDMIF_PG4_DPG_TEST_DEBUG_DATA 0x4339
+#define mmDMIF_PG5_DPG_TEST_DEBUG_DATA 0x4539
+#define mmDPGV0_PIPE_ARBITRATION_CONTROL1 0x4730
+#define mmDMIFV_PG0_DPGV0_PIPE_ARBITRATION_CONTROL1 0x4730
+#define mmDMIFV_PG1_DPGV0_PIPE_ARBITRATION_CONTROL1 0x9930
+#define mmDPGV1_PIPE_ARBITRATION_CONTROL1 0x473d
+#define mmDMIFV_PG0_DPGV1_PIPE_ARBITRATION_CONTROL1 0x473d
+#define mmDMIFV_PG1_DPGV1_PIPE_ARBITRATION_CONTROL1 0x993d
+#define mmDPGV0_PIPE_ARBITRATION_CONTROL2 0x4731
+#define mmDMIFV_PG0_DPGV0_PIPE_ARBITRATION_CONTROL2 0x4731
+#define mmDMIFV_PG1_DPGV0_PIPE_ARBITRATION_CONTROL2 0x9931
+#define mmDPGV1_PIPE_ARBITRATION_CONTROL2 0x473e
+#define mmDMIFV_PG0_DPGV1_PIPE_ARBITRATION_CONTROL2 0x473e
+#define mmDMIFV_PG1_DPGV1_PIPE_ARBITRATION_CONTROL2 0x993e
+#define mmDPGV0_WATERMARK_MASK_CONTROL 0x4732
+#define mmDMIFV_PG0_DPGV0_WATERMARK_MASK_CONTROL 0x4732
+#define mmDMIFV_PG1_DPGV0_WATERMARK_MASK_CONTROL 0x9932
+#define mmDPGV1_WATERMARK_MASK_CONTROL 0x473f
+#define mmDMIFV_PG0_DPGV1_WATERMARK_MASK_CONTROL 0x473f
+#define mmDMIFV_PG1_DPGV1_WATERMARK_MASK_CONTROL 0x993f
+#define mmDPGV0_PIPE_URGENCY_CONTROL 0x4733
+#define mmDMIFV_PG0_DPGV0_PIPE_URGENCY_CONTROL 0x4733
+#define mmDMIFV_PG1_DPGV0_PIPE_URGENCY_CONTROL 0x9933
+#define mmDPGV1_PIPE_URGENCY_CONTROL 0x4740
+#define mmDMIFV_PG0_DPGV1_PIPE_URGENCY_CONTROL 0x4740
+#define mmDMIFV_PG1_DPGV1_PIPE_URGENCY_CONTROL 0x9940
+#define mmDPGV0_PIPE_DPM_CONTROL 0x4734
+#define mmDMIFV_PG0_DPGV0_PIPE_DPM_CONTROL 0x4734
+#define mmDMIFV_PG1_DPGV0_PIPE_DPM_CONTROL 0x9934
+#define mmDPGV1_PIPE_DPM_CONTROL 0x4741
+#define mmDMIFV_PG0_DPGV1_PIPE_DPM_CONTROL 0x4741
+#define mmDMIFV_PG1_DPGV1_PIPE_DPM_CONTROL 0x9941
+#define mmDPGV0_PIPE_STUTTER_CONTROL 0x4735
+#define mmDMIFV_PG0_DPGV0_PIPE_STUTTER_CONTROL 0x4735
+#define mmDMIFV_PG1_DPGV0_PIPE_STUTTER_CONTROL 0x9935
+#define mmDPGV1_PIPE_STUTTER_CONTROL 0x4742
+#define mmDMIFV_PG0_DPGV1_PIPE_STUTTER_CONTROL 0x4742
+#define mmDMIFV_PG1_DPGV1_PIPE_STUTTER_CONTROL 0x9942
+#define mmDPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4736
+#define mmDMIFV_PG0_DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4736
+#define mmDMIFV_PG1_DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL 0x9936
+#define mmDPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4743
+#define mmDMIFV_PG0_DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL 0x4743
+#define mmDMIFV_PG1_DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL 0x9943
+#define mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH 0x4737
+#define mmDMIFV_PG0_DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH 0x4737
+#define mmDMIFV_PG1_DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH 0x9937
+#define mmDPGV1_PIPE_STUTTER_CONTROL_NONLPTCH 0x4744
+#define mmDMIFV_PG0_DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH 0x4744
+#define mmDMIFV_PG1_DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH 0x9944
+#define mmDPGV0_REPEATER_PROGRAM 0x4738
+#define mmDMIFV_PG0_DPGV0_REPEATER_PROGRAM 0x4738
+#define mmDMIFV_PG1_DPGV0_REPEATER_PROGRAM 0x9938
+#define mmDPGV1_REPEATER_PROGRAM 0x4745
+#define mmDMIFV_PG0_DPGV1_REPEATER_PROGRAM 0x4745
+#define mmDMIFV_PG1_DPGV1_REPEATER_PROGRAM 0x9945
+#define mmDPGV0_HW_DEBUG_A 0x4739
+#define mmDMIFV_PG0_DPGV0_HW_DEBUG_A 0x4739
+#define mmDMIFV_PG1_DPGV0_HW_DEBUG_A 0x9939
+#define mmDPGV1_HW_DEBUG_A 0x4746
+#define mmDMIFV_PG0_DPGV1_HW_DEBUG_A 0x4746
+#define mmDMIFV_PG1_DPGV1_HW_DEBUG_A 0x9946
+#define mmDPGV0_HW_DEBUG_B 0x473a
+#define mmDMIFV_PG0_DPGV0_HW_DEBUG_B 0x473a
+#define mmDMIFV_PG1_DPGV0_HW_DEBUG_B 0x993a
+#define mmDPGV1_HW_DEBUG_B 0x4747
+#define mmDMIFV_PG0_DPGV1_HW_DEBUG_B 0x4747
+#define mmDMIFV_PG1_DPGV1_HW_DEBUG_B 0x9947
+#define mmDPGV0_HW_DEBUG_11 0x473b
+#define mmDMIFV_PG0_DPGV0_HW_DEBUG_11 0x473b
+#define mmDMIFV_PG1_DPGV0_HW_DEBUG_11 0x993b
+#define mmDPGV1_HW_DEBUG_11 0x4748
+#define mmDMIFV_PG0_DPGV1_HW_DEBUG_11 0x4748
+#define mmDMIFV_PG1_DPGV1_HW_DEBUG_11 0x9948
+#define mmDPGV0_CHK_PRE_PROC_CNTL 0x473c
+#define mmDMIFV_PG0_DPGV0_CHK_PRE_PROC_CNTL 0x473c
+#define mmDMIFV_PG1_DPGV0_CHK_PRE_PROC_CNTL 0x993c
+#define mmDPGV1_CHK_PRE_PROC_CNTL 0x4749
+#define mmDMIFV_PG0_DPGV1_CHK_PRE_PROC_CNTL 0x4749
+#define mmDMIFV_PG1_DPGV1_CHK_PRE_PROC_CNTL 0x9949
+#define mmDPGV_TEST_DEBUG_INDEX 0x474e
+#define mmDMIFV_PG0_DPGV_TEST_DEBUG_INDEX 0x474e
+#define mmDMIFV_PG1_DPGV_TEST_DEBUG_INDEX 0x994e
+#define mmDPGV_TEST_DEBUG_DATA 0x474f
+#define mmDMIFV_PG0_DPGV_TEST_DEBUG_DATA 0x474f
+#define mmDMIFV_PG1_DPGV_TEST_DEBUG_DATA 0x994f
+#define mmAZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x18
+#define mmAZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x18
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0xf00
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID 0xf02
+#define ixAZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT 0xf04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT 0x1f04
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x1f05
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x1f0a
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x1f0b
+#define ixAZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x1f0f
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE 0x1705
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESET 0x17ff
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x1720
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2 0x1721
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3 0x1722
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4 0x1723
+#define ixAZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x1770
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x1828
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x1829
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x182a
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x182b
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x182c
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x182d
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x182e
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x182f
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x1830
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x1831
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x1832
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x1833
+#define mmCC_RCU_DC_AUDIO_PORT_CONNECTIVITY 0x1834
+#define mmCC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x1835
+#define mmAZALIA_F0_CODEC_DEBUG 0x1836
+#define mmAZALIA_F0_GTC_GROUP_OFFSET0 0x1837
+#define mmAZALIA_F0_GTC_GROUP_OFFSET1 0x1838
+#define mmAZALIA_F0_GTC_GROUP_OFFSET2 0x1839
+#define mmAZALIA_F0_GTC_GROUP_OFFSET3 0x183a
+#define mmAZALIA_F0_GTC_GROUP_OFFSET4 0x183b
+#define mmAZALIA_F0_GTC_GROUP_OFFSET5 0x183c
+#define mmAZALIA_F0_GTC_GROUP_OFFSET6 0x183d
+#define mmGLOBAL_CAPABILITIES 0x0
+#define mmMINOR_VERSION 0x0
+#define mmMAJOR_VERSION 0x0
+#define mmOUTPUT_PAYLOAD_CAPABILITY 0x1
+#define mmINPUT_PAYLOAD_CAPABILITY 0x1
+#define mmGLOBAL_CONTROL 0x2
+#define mmWAKE_ENABLE 0x3
+#define mmSTATE_CHANGE_STATUS 0x3
+#define mmGLOBAL_STATUS 0x4
+#define mmOUTPUT_STREAM_PAYLOAD_CAPABILITY 0x6
+#define mmINPUT_STREAM_PAYLOAD_CAPABILITY 0x6
+#define mmINTERRUPT_CONTROL 0x8
+#define mmINTERRUPT_STATUS 0x9
+#define mmWALL_CLOCK_COUNTER 0xc
+#define mmSTREAM_SYNCHRONIZATION 0xe
+#define mmCORB_LOWER_BASE_ADDRESS 0x10
+#define mmCORB_UPPER_BASE_ADDRESS 0x11
+#define mmCORB_WRITE_POINTER 0x12
+#define mmCORB_READ_POINTER 0x12
+#define mmCORB_CONTROL 0x13
+#define mmCORB_STATUS 0x13
+#define mmCORB_SIZE 0x13
+#define mmRIRB_LOWER_BASE_ADDRESS 0x14
+#define mmRIRB_UPPER_BASE_ADDRESS 0x15
+#define mmRIRB_WRITE_POINTER 0x16
+#define mmRESPONSE_INTERRUPT_COUNT 0x16
+#define mmRIRB_CONTROL 0x17
+#define mmRIRB_STATUS 0x17
+#define mmRIRB_SIZE 0x17
+#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE 0x18
+#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x18
+#define mmIMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x18
+#define mmIMMEDIATE_RESPONSE_INPUT_INTERFACE 0x19
+#define mmIMMEDIATE_COMMAND_STATUS 0x1a
+#define mmDMA_POSITION_LOWER_BASE_ADDRESS 0x1c
+#define mmDMA_POSITION_UPPER_BASE_ADDRESS 0x1d
+#define mmWALL_CLOCK_COUNTER_ALIAS 0x80c
+#define mmOUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS 0x20
+#define mmOUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER 0x21
+#define mmOUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH 0x22
+#define mmOUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX 0x23
+#define mmOUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE 0x24
+#define mmOUTPUT_STREAM_DESCRIPTOR_FORMAT 0x24
+#define mmOUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS 0x26
+#define mmOUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS 0x27
+#define mmOUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS 0x821
+#define mmAZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX 0x18
+#define mmAZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA 0x18
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x2f09
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x2f0a
+#define ixAZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x2f0b
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2200
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x2706
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x270d
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2 0x270e
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3 0x273e
+#define ixAZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL 0x2724
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x2770
+#define ixAZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x2771
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x3f09
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES 0x3f0c
+#define ixAZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH 0x3f0e
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY 0x3702
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x3707
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x3708
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x3709
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x371c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x371d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x371e
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x371f
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION 0x3770
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION 0x3771
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO 0x3772
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR 0x3776
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA 0x3776
+#define ixAUDIO_DESCRIPTOR0 0x1
+#define ixAUDIO_DESCRIPTOR1 0x2
+#define ixAUDIO_DESCRIPTOR2 0x3
+#define ixAUDIO_DESCRIPTOR3 0x4
+#define ixAUDIO_DESCRIPTOR4 0x5
+#define ixAUDIO_DESCRIPTOR5 0x6
+#define ixAUDIO_DESCRIPTOR6 0x7
+#define ixAUDIO_DESCRIPTOR7 0x8
+#define ixAUDIO_DESCRIPTOR8 0x9
+#define ixAUDIO_DESCRIPTOR9 0xa
+#define ixAUDIO_DESCRIPTOR10 0xb
+#define ixAUDIO_DESCRIPTOR11 0xc
+#define ixAUDIO_DESCRIPTOR12 0xd
+#define ixAUDIO_DESCRIPTOR13 0xe
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE 0x3777
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE 0x3778
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE 0x3779
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE 0x377a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC 0x377b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_HBR 0x377c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX 0x3780
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA 0x3781
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID 0x0
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID 0x1
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN 0x2
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID0 0x3
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_PORTID1 0x4
+#define ixSINK_DESCRIPTION0 0x5
+#define ixSINK_DESCRIPTION1 0x6
+#define ixSINK_DESCRIPTION2 0x7
+#define ixSINK_DESCRIPTION3 0x8
+#define ixSINK_DESCRIPTION4 0x9
+#define ixSINK_DESCRIPTION5 0xa
+#define ixSINK_DESCRIPTION6 0xb
+#define ixSINK_DESCRIPTION7 0xc
+#define ixSINK_DESCRIPTION8 0xd
+#define ixSINK_DESCRIPTION9 0xe
+#define ixSINK_DESCRIPTION10 0xf
+#define ixSINK_DESCRIPTION11 0x10
+#define ixSINK_DESCRIPTION12 0x11
+#define ixSINK_DESCRIPTION13 0x12
+#define ixSINK_DESCRIPTION14 0x13
+#define ixSINK_DESCRIPTION15 0x14
+#define ixSINK_DESCRIPTION16 0x15
+#define ixSINK_DESCRIPTION17 0x16
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x3785
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x3786
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x3787
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x3788
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x3789
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x378a
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x378b
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x378c
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x378d
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x378e
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x378f
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x3790
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x3791
+#define ixAZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x3792
+#define ixAZALIA_F2_CODEC_PIN_ASSOCIATION_INFO 0x3793
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x3797
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x3798
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB 0x3799
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x379a
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE 0x379b
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x379c
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x379d
+#define ixAZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x379e
+#define mmAZALIA_CONTROLLER_CLOCK_GATING 0x17e4
+#define mmAZALIA_AUDIO_DTO 0x17e5
+#define mmAZALIA_AUDIO_DTO_CONTROL 0x17e6
+#define mmAZALIA_SCLK_CONTROL 0x17e7
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE 0x17e8
+#define mmAZALIA_DATA_DMA_CONTROL 0x17e9
+#define mmAZALIA_BDL_DMA_CONTROL 0x17ea
+#define mmAZALIA_RIRB_AND_DP_CONTROL 0x17eb
+#define mmAZALIA_CORB_DMA_CONTROL 0x17ec
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER 0x17f3
+#define mmAZALIA_CYCLIC_BUFFER_SYNC 0x17f4
+#define mmAZALIA_GLOBAL_CAPABILITIES 0x17f5
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x17f6
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x17f7
+#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY 0x17f8
+#define mmAZALIA_CONTROLLER_DEBUG 0x17f9
+#define mmAZALIA_MEM_PWR_CTRL 0x1810
+#define mmAZALIA_MEM_PWR_STATUS 0x1811
+#define mmDCI_PG_DEBUG_CONFIG 0x1812
+#define mmAZALIA_INPUT_CRC0_CONTROL0 0x17fb
+#define mmAZALIA_INPUT_CRC0_CONTROL1 0x17fc
+#define mmAZALIA_INPUT_CRC0_CONTROL2 0x17fd
+#define mmAZALIA_INPUT_CRC0_CONTROL3 0x17fe
+#define mmAZALIA_INPUT_CRC0_RESULT 0x17ff
+#define ixAZALIA_INPUT_CRC0_CHANNEL0 0x0
+#define ixAZALIA_INPUT_CRC0_CHANNEL1 0x1
+#define ixAZALIA_INPUT_CRC0_CHANNEL2 0x2
+#define ixAZALIA_INPUT_CRC0_CHANNEL3 0x3
+#define ixAZALIA_INPUT_CRC0_CHANNEL4 0x4
+#define ixAZALIA_INPUT_CRC0_CHANNEL5 0x5
+#define ixAZALIA_INPUT_CRC0_CHANNEL6 0x6
+#define ixAZALIA_INPUT_CRC0_CHANNEL7 0x7
+#define mmAZALIA_INPUT_CRC1_CONTROL0 0x1800
+#define mmAZALIA_INPUT_CRC1_CONTROL1 0x1801
+#define mmAZALIA_INPUT_CRC1_CONTROL2 0x1802
+#define mmAZALIA_INPUT_CRC1_CONTROL3 0x1803
+#define mmAZALIA_INPUT_CRC1_RESULT 0x1804
+#define ixAZALIA_INPUT_CRC1_CHANNEL0 0x0
+#define ixAZALIA_INPUT_CRC1_CHANNEL1 0x1
+#define ixAZALIA_INPUT_CRC1_CHANNEL2 0x2
+#define ixAZALIA_INPUT_CRC1_CHANNEL3 0x3
+#define ixAZALIA_INPUT_CRC1_CHANNEL4 0x4
+#define ixAZALIA_INPUT_CRC1_CHANNEL5 0x5
+#define ixAZALIA_INPUT_CRC1_CHANNEL6 0x6
+#define ixAZALIA_INPUT_CRC1_CHANNEL7 0x7
+#define mmAZALIA_CRC0_CONTROL0 0x1805
+#define mmAZALIA_CRC0_CONTROL1 0x1806
+#define mmAZALIA_CRC0_CONTROL2 0x1807
+#define mmAZALIA_CRC0_CONTROL3 0x1808
+#define mmAZALIA_CRC0_RESULT 0x1809
+#define ixAZALIA_CRC0_CHANNEL0 0x0
+#define ixAZALIA_CRC0_CHANNEL1 0x1
+#define ixAZALIA_CRC0_CHANNEL2 0x2
+#define ixAZALIA_CRC0_CHANNEL3 0x3
+#define ixAZALIA_CRC0_CHANNEL4 0x4
+#define ixAZALIA_CRC0_CHANNEL5 0x5
+#define ixAZALIA_CRC0_CHANNEL6 0x6
+#define ixAZALIA_CRC0_CHANNEL7 0x7
+#define mmAZALIA_CRC1_CONTROL0 0x180a
+#define mmAZALIA_CRC1_CONTROL1 0x180b
+#define mmAZALIA_CRC1_CONTROL2 0x180c
+#define mmAZALIA_CRC1_CONTROL3 0x180d
+#define mmAZALIA_CRC1_RESULT 0x180e
+#define ixAZALIA_CRC1_CHANNEL0 0x0
+#define ixAZALIA_CRC1_CHANNEL1 0x1
+#define ixAZALIA_CRC1_CHANNEL2 0x2
+#define ixAZALIA_CRC1_CHANNEL3 0x3
+#define ixAZALIA_CRC1_CHANNEL4 0x4
+#define ixAZALIA_CRC1_CHANNEL5 0x5
+#define ixAZALIA_CRC1_CHANNEL6 0x6
+#define ixAZALIA_CRC1_CHANNEL7 0x7
+#define mmAZ_TEST_DEBUG_INDEX 0x181f
+#define mmAZ_TEST_DEBUG_DATA 0x1820
+#define mmAZALIA_STREAM_INDEX 0x1780
+#define mmAZF0STREAM0_AZALIA_STREAM_INDEX 0x1780
+#define mmAZF0STREAM1_AZALIA_STREAM_INDEX 0x1782
+#define mmAZF0STREAM2_AZALIA_STREAM_INDEX 0x1784
+#define mmAZF0STREAM3_AZALIA_STREAM_INDEX 0x1786
+#define mmAZF0STREAM4_AZALIA_STREAM_INDEX 0x1788
+#define mmAZF0STREAM5_AZALIA_STREAM_INDEX 0x178a
+#define mmAZF0STREAM6_AZALIA_STREAM_INDEX 0x178c
+#define mmAZF0STREAM7_AZALIA_STREAM_INDEX 0x178e
+#define mmAZF0STREAM8_AZALIA_STREAM_INDEX 0x59c0
+#define mmAZF0STREAM9_AZALIA_STREAM_INDEX 0x59c2
+#define mmAZF0STREAM10_AZALIA_STREAM_INDEX 0x59c4
+#define mmAZF0STREAM11_AZALIA_STREAM_INDEX 0x59c6
+#define mmAZF0STREAM12_AZALIA_STREAM_INDEX 0x59c8
+#define mmAZF0STREAM13_AZALIA_STREAM_INDEX 0x59ca
+#define mmAZF0STREAM14_AZALIA_STREAM_INDEX 0x59cc
+#define mmAZF0STREAM15_AZALIA_STREAM_INDEX 0x59ce
+#define mmAZALIA_STREAM_DATA 0x1781
+#define mmAZF0STREAM0_AZALIA_STREAM_DATA 0x1781
+#define mmAZF0STREAM1_AZALIA_STREAM_DATA 0x1783
+#define mmAZF0STREAM2_AZALIA_STREAM_DATA 0x1785
+#define mmAZF0STREAM3_AZALIA_STREAM_DATA 0x1787
+#define mmAZF0STREAM4_AZALIA_STREAM_DATA 0x1789
+#define mmAZF0STREAM5_AZALIA_STREAM_DATA 0x178b
+#define mmAZF0STREAM6_AZALIA_STREAM_DATA 0x178d
+#define mmAZF0STREAM7_AZALIA_STREAM_DATA 0x178f
+#define mmAZF0STREAM8_AZALIA_STREAM_DATA 0x59c1
+#define mmAZF0STREAM9_AZALIA_STREAM_DATA 0x59c3
+#define mmAZF0STREAM10_AZALIA_STREAM_DATA 0x59c5
+#define mmAZF0STREAM11_AZALIA_STREAM_DATA 0x59c7
+#define mmAZF0STREAM12_AZALIA_STREAM_DATA 0x59c9
+#define mmAZF0STREAM13_AZALIA_STREAM_DATA 0x59cb
+#define mmAZF0STREAM14_AZALIA_STREAM_DATA 0x59cd
+#define mmAZF0STREAM15_AZALIA_STREAM_DATA 0x59cf
+#define ixAZALIA_FIFO_SIZE_CONTROL 0x0
+#define ixAZALIA_LATENCY_COUNTER_CONTROL 0x1
+#define ixAZALIA_WORSTCASE_LATENCY_COUNT 0x2
+#define ixAZALIA_CUMULATIVE_LATENCY_COUNT 0x3
+#define ixAZALIA_CUMULATIVE_REQUEST_COUNT 0x4
+#define ixAZALIA_STREAM_DEBUG 0x5
+#define mmAZALIA_F0_CODEC_ENDPOINT_INDEX 0x17a8
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x17a8
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x17ac
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x17b0
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x17b4
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x17b8
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x17bc
+#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x17c0
+#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x17c4
+#define mmAZALIA_F0_CODEC_ENDPOINT_DATA 0x17a9
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x17a9
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x17ad
+#define mmAZF0ENDPOINT2_AZALIA_F0_CODEC_ENDPOINT_DATA 0x17b1
+#define mmAZF0ENDPOINT3_AZALIA_F0_CODEC_ENDPOINT_DATA 0x17b5
+#define mmAZF0ENDPOINT4_AZALIA_F0_CODEC_ENDPOINT_DATA 0x17b9
+#define mmAZF0ENDPOINT5_AZALIA_F0_CODEC_ENDPOINT_DATA 0x17bd
+#define mmAZF0ENDPOINT6_AZALIA_F0_CODEC_ENDPOINT_DATA 0x17c1
+#define mmAZF0ENDPOINT7_AZALIA_F0_CODEC_ENDPOINT_DATA 0x17c5
+#define ixAZALIA_F0_CODEC_CONVERTER_PIN_DEBUG 0x0
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x1
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x3
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x4
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x5
+#define ixAZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x6
+#define ixAZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x7
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x8
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x9
+#define ixAZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG 0xa
+#define ixAZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0xc
+#define ixAZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0xd
+#define ixAZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0xe
+#define ixAZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x20
+#define ixAZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x21
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x22
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x23
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x24
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2a
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2b
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2c
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2d
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2e
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2f
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x36
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x57
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x58
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x55
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x59
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x5a
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x5b
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x5c
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x5d
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x5e
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x5f
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x60
+#define ixAZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x61
+#define ixAZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x62
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x63
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x64
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x65
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x66
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x67
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x68
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x69
+#define ixAZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x6a
+#define ixAZALIA_F0_AUDIO_ENABLE_STATUS 0x6b
+#define ixAZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x6c
+#define ixAZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x6d
+#define ixAZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x6e
+#define mmAZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59d4
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59d4
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59d8
+#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59dc
+#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59e0
+#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59e4
+#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59e8
+#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59ec
+#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x59f0
+#define mmAZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59d5
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59d5
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59d9
+#define mmAZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59dd
+#define mmAZF0INPUTENDPOINT3_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59e1
+#define mmAZF0INPUTENDPOINT4_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59e5
+#define mmAZF0INPUTENDPOINT5_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59e9
+#define mmAZF0INPUTENDPOINT6_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59ed
+#define mmAZF0INPUTENDPOINT7_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x59f1
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_PIN_DEBUG 0x0
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x1
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x2
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x3
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x4
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x5
+#define ixAZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x6
+#define ixAZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x20
+#define ixAZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x21
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x22
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x23
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x24
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x36
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x37
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x38
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x53
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x55
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x67
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x68
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x64
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x65
+#define ixAZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x66
+#define mmAZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX 0x18
+#define mmAZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA 0x18
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x6f09
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x6f0a
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x6f0b
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x6200
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x6706
+#define ixAZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x670d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x7f09
+#define ixAZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x7f0c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x7707
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x7708
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE 0x7709
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x771c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2 0x771d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3 0x771e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4 0x771f
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE 0x7777
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE 0x7785
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE 0x7778
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE 0x7786
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR 0x777c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE 0x7779
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE 0x7787
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE 0x777a
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE 0x7788
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x7771
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x779b
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x779c
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L 0x779d
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H 0x779e
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x7798
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB 0x7799
+#define ixAZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x779a
+#define mmBLND_CONTROL 0x1b6d
+#define mmBLND0_BLND_CONTROL 0x1b6d
+#define mmBLND1_BLND_CONTROL 0x1d6d
+#define mmBLND2_BLND_CONTROL 0x1f6d
+#define mmBLND3_BLND_CONTROL 0x416d
+#define mmBLND4_BLND_CONTROL 0x436d
+#define mmBLND5_BLND_CONTROL 0x456d
+#define mmBLND_SM_CONTROL2 0x1b6e
+#define mmBLND0_BLND_SM_CONTROL2 0x1b6e
+#define mmBLND1_BLND_SM_CONTROL2 0x1d6e
+#define mmBLND2_BLND_SM_CONTROL2 0x1f6e
+#define mmBLND3_BLND_SM_CONTROL2 0x416e
+#define mmBLND4_BLND_SM_CONTROL2 0x436e
+#define mmBLND5_BLND_SM_CONTROL2 0x456e
+#define mmBLND_CONTROL2 0x1b6f
+#define mmBLND0_BLND_CONTROL2 0x1b6f
+#define mmBLND1_BLND_CONTROL2 0x1d6f
+#define mmBLND2_BLND_CONTROL2 0x1f6f
+#define mmBLND3_BLND_CONTROL2 0x416f
+#define mmBLND4_BLND_CONTROL2 0x436f
+#define mmBLND5_BLND_CONTROL2 0x456f
+#define mmBLND_UPDATE 0x1b70
+#define mmBLND0_BLND_UPDATE 0x1b70
+#define mmBLND1_BLND_UPDATE 0x1d70
+#define mmBLND2_BLND_UPDATE 0x1f70
+#define mmBLND3_BLND_UPDATE 0x4170
+#define mmBLND4_BLND_UPDATE 0x4370
+#define mmBLND5_BLND_UPDATE 0x4570
+#define mmBLND_UNDERFLOW_INTERRUPT 0x1b71
+#define mmBLND0_BLND_UNDERFLOW_INTERRUPT 0x1b71
+#define mmBLND1_BLND_UNDERFLOW_INTERRUPT 0x1d71
+#define mmBLND2_BLND_UNDERFLOW_INTERRUPT 0x1f71
+#define mmBLND3_BLND_UNDERFLOW_INTERRUPT 0x4171
+#define mmBLND4_BLND_UNDERFLOW_INTERRUPT 0x4371
+#define mmBLND5_BLND_UNDERFLOW_INTERRUPT 0x4571
+#define mmBLND_V_UPDATE_LOCK 0x1b73
+#define mmBLND0_BLND_V_UPDATE_LOCK 0x1b73
+#define mmBLND1_BLND_V_UPDATE_LOCK 0x1d73
+#define mmBLND2_BLND_V_UPDATE_LOCK 0x1f73
+#define mmBLND3_BLND_V_UPDATE_LOCK 0x4173
+#define mmBLND4_BLND_V_UPDATE_LOCK 0x4373
+#define mmBLND5_BLND_V_UPDATE_LOCK 0x4573
+#define mmBLND_REG_UPDATE_STATUS 0x1b77
+#define mmBLND0_BLND_REG_UPDATE_STATUS 0x1b77
+#define mmBLND1_BLND_REG_UPDATE_STATUS 0x1d77
+#define mmBLND2_BLND_REG_UPDATE_STATUS 0x1f77
+#define mmBLND3_BLND_REG_UPDATE_STATUS 0x4177
+#define mmBLND4_BLND_REG_UPDATE_STATUS 0x4377
+#define mmBLND5_BLND_REG_UPDATE_STATUS 0x4577
+#define mmBLND_DEBUG 0x1b74
+#define mmBLND0_BLND_DEBUG 0x1b74
+#define mmBLND1_BLND_DEBUG 0x1d74
+#define mmBLND2_BLND_DEBUG 0x1f74
+#define mmBLND3_BLND_DEBUG 0x4174
+#define mmBLND4_BLND_DEBUG 0x4374
+#define mmBLND5_BLND_DEBUG 0x4574
+#define mmBLND_TEST_DEBUG_INDEX 0x1b75
+#define mmBLND0_BLND_TEST_DEBUG_INDEX 0x1b75
+#define mmBLND1_BLND_TEST_DEBUG_INDEX 0x1d75
+#define mmBLND2_BLND_TEST_DEBUG_INDEX 0x1f75
+#define mmBLND3_BLND_TEST_DEBUG_INDEX 0x4175
+#define mmBLND4_BLND_TEST_DEBUG_INDEX 0x4375
+#define mmBLND5_BLND_TEST_DEBUG_INDEX 0x4575
+#define mmBLND_TEST_DEBUG_DATA 0x1b76
+#define mmBLND0_BLND_TEST_DEBUG_DATA 0x1b76
+#define mmBLND1_BLND_TEST_DEBUG_DATA 0x1d76
+#define mmBLND2_BLND_TEST_DEBUG_DATA 0x1f76
+#define mmBLND3_BLND_TEST_DEBUG_DATA 0x4176
+#define mmBLND4_BLND_TEST_DEBUG_DATA 0x4376
+#define mmBLND5_BLND_TEST_DEBUG_DATA 0x4576
+#define mmWB_ENABLE 0x5e18
+#define mmWB_EC_CONFIG 0x5e19
+#define mmCNV_MODE 0x5e1a
+#define mmCNV_WINDOW_START 0x5e1b
+#define mmCNV_WINDOW_SIZE 0x5e1c
+#define mmCNV_UPDATE 0x5e1d
+#define mmCNV_SOURCE_SIZE 0x5e1e
+#define mmCNV_CSC_CONTROL 0x5e1f
+#define mmCNV_CSC_C11_C12 0x5e20
+#define mmCNV_CSC_C13_C14 0x5e21
+#define mmCNV_CSC_C21_C22 0x5e22
+#define mmCNV_CSC_C23_C24 0x5e23
+#define mmCNV_CSC_C31_C32 0x5e24
+#define mmCNV_CSC_C33_C34 0x5e25
+#define mmCNV_CSC_ROUND_OFFSET_R 0x5e26
+#define mmCNV_CSC_ROUND_OFFSET_G 0x5e27
+#define mmCNV_CSC_ROUND_OFFSET_B 0x5e28
+#define mmCNV_CSC_CLAMP_R 0x5e29
+#define mmCNV_CSC_CLAMP_G 0x5e2a
+#define mmCNV_CSC_CLAMP_B 0x5e2b
+#define mmCNV_TEST_CNTL 0x5e2c
+#define mmCNV_TEST_CRC_RED 0x5e2d
+#define mmCNV_TEST_CRC_GREEN 0x5e2e
+#define mmCNV_TEST_CRC_BLUE 0x5e2f
+#define mmWB_DEBUG_CTRL 0x5e30
+#define mmWB_DBG_MODE 0x5e31
+#define mmWB_HW_DEBUG 0x5e32
+#define mmCNV_INPUT_SELECT 0x5e33
+#define mmWB_SOFT_RESET 0x5e36
+#define mmWB_WARM_UP_MODE_CTL1 0x5e37
+#define mmWB_WARM_UP_MODE_CTL2 0x5e38
+#define mmCNV_TEST_DEBUG_INDEX 0x5e34
+#define mmCNV_TEST_DEBUG_DATA 0x5e35
+#define mmDCFE_CLOCK_CONTROL 0x1b00
+#define mmDCFE0_DCFE_CLOCK_CONTROL 0x1b00
+#define mmDCFE1_DCFE_CLOCK_CONTROL 0x1d00
+#define mmDCFE2_DCFE_CLOCK_CONTROL 0x1f00
+#define mmDCFE3_DCFE_CLOCK_CONTROL 0x4100
+#define mmDCFE4_DCFE_CLOCK_CONTROL 0x4300
+#define mmDCFE5_DCFE_CLOCK_CONTROL 0x4500
+#define mmDCFE_SOFT_RESET 0x1b01
+#define mmDCFE0_DCFE_SOFT_RESET 0x1b01
+#define mmDCFE1_DCFE_SOFT_RESET 0x1d01
+#define mmDCFE2_DCFE_SOFT_RESET 0x1f01
+#define mmDCFE3_DCFE_SOFT_RESET 0x4101
+#define mmDCFE4_DCFE_SOFT_RESET 0x4301
+#define mmDCFE5_DCFE_SOFT_RESET 0x4501
+#define mmDCFE_DBG_CONFIG 0x1b02
+#define mmDCFE0_DCFE_DBG_CONFIG 0x1b02
+#define mmDCFE1_DCFE_DBG_CONFIG 0x1d02
+#define mmDCFE2_DCFE_DBG_CONFIG 0x1f02
+#define mmDCFE3_DCFE_DBG_CONFIG 0x4102
+#define mmDCFE4_DCFE_DBG_CONFIG 0x4302
+#define mmDCFE5_DCFE_DBG_CONFIG 0x4502
+#define mmDCFE_MEM_PWR_CTRL 0x1b03
+#define mmDCFE0_DCFE_MEM_PWR_CTRL 0x1b03
+#define mmDCFE1_DCFE_MEM_PWR_CTRL 0x1d03
+#define mmDCFE2_DCFE_MEM_PWR_CTRL 0x1f03
+#define mmDCFE3_DCFE_MEM_PWR_CTRL 0x4103
+#define mmDCFE4_DCFE_MEM_PWR_CTRL 0x4303
+#define mmDCFE5_DCFE_MEM_PWR_CTRL 0x4503
+#define mmDCFE_MEM_PWR_CTRL2 0x1b04
+#define mmDCFE0_DCFE_MEM_PWR_CTRL2 0x1b04
+#define mmDCFE1_DCFE_MEM_PWR_CTRL2 0x1d04
+#define mmDCFE2_DCFE_MEM_PWR_CTRL2 0x1f04
+#define mmDCFE3_DCFE_MEM_PWR_CTRL2 0x4104
+#define mmDCFE4_DCFE_MEM_PWR_CTRL2 0x4304
+#define mmDCFE5_DCFE_MEM_PWR_CTRL2 0x4504
+#define mmDCFE_MEM_PWR_STATUS 0x1b05
+#define mmDCFE0_DCFE_MEM_PWR_STATUS 0x1b05
+#define mmDCFE1_DCFE_MEM_PWR_STATUS 0x1d05
+#define mmDCFE2_DCFE_MEM_PWR_STATUS 0x1f05
+#define mmDCFE3_DCFE_MEM_PWR_STATUS 0x4105
+#define mmDCFE4_DCFE_MEM_PWR_STATUS 0x4305
+#define mmDCFE5_DCFE_MEM_PWR_STATUS 0x4505
+#define mmDCFE_MISC 0x1b06
+#define mmDCFE0_DCFE_MISC 0x1b06
+#define mmDCFE1_DCFE_MISC 0x1d06
+#define mmDCFE2_DCFE_MISC 0x1f06
+#define mmDCFE3_DCFE_MISC 0x4106
+#define mmDCFE4_DCFE_MISC 0x4306
+#define mmDCFE5_DCFE_MISC 0x4506
+#define mmDCFE_FLUSH 0x1b07
+#define mmDCFE0_DCFE_FLUSH 0x1b07
+#define mmDCFE1_DCFE_FLUSH 0x1d07
+#define mmDCFE2_DCFE_FLUSH 0x1f07
+#define mmDCFE3_DCFE_FLUSH 0x4107
+#define mmDCFE4_DCFE_FLUSH 0x4307
+#define mmDCFE5_DCFE_FLUSH 0x4507
+#define mmDCFEV_CLOCK_CONTROL 0x46f4
+#define mmDCFEV0_DCFEV_CLOCK_CONTROL 0x46f4
+#define mmDCFEV1_DCFEV_CLOCK_CONTROL 0x98f4
+#define mmDCFEV_SOFT_RESET 0x46f5
+#define mmDCFEV0_DCFEV_SOFT_RESET 0x46f5
+#define mmDCFEV1_DCFEV_SOFT_RESET 0x98f5
+#define mmDCFEV_DMIFV_CLOCK_CONTROL 0x46f6
+#define mmDCFEV0_DCFEV_DMIFV_CLOCK_CONTROL 0x46f6
+#define mmDCFEV1_DCFEV_DMIFV_CLOCK_CONTROL 0x98f6
+#define mmDCFEV_DBG_CONFIG 0x46f7
+#define mmDCFEV0_DCFEV_DBG_CONFIG 0x46f7
+#define mmDCFEV1_DCFEV_DBG_CONFIG 0x98f7
+#define mmDCFEV_DMIFV_MEM_PWR_CTRL 0x46f8
+#define mmDCFEV0_DCFEV_DMIFV_MEM_PWR_CTRL 0x46f8
+#define mmDCFEV1_DCFEV_DMIFV_MEM_PWR_CTRL 0x98f8
+#define mmDCFEV_DMIFV_MEM_PWR_STATUS 0x46f9
+#define mmDCFEV0_DCFEV_DMIFV_MEM_PWR_STATUS 0x46f9
+#define mmDCFEV1_DCFEV_DMIFV_MEM_PWR_STATUS 0x98f9
+#define mmDCFEV_MEM_PWR_CTRL 0x46fa
+#define mmDCFEV0_DCFEV_MEM_PWR_CTRL 0x46fa
+#define mmDCFEV1_DCFEV_MEM_PWR_CTRL 0x98fa
+#define mmDCFEV_MEM_PWR_CTRL2 0x46fb
+#define mmDCFEV0_DCFEV_MEM_PWR_CTRL2 0x46fb
+#define mmDCFEV1_DCFEV_MEM_PWR_CTRL2 0x98fb
+#define mmDCFEV_MEM_PWR_STATUS 0x46fc
+#define mmDCFEV0_DCFEV_MEM_PWR_STATUS 0x46fc
+#define mmDCFEV1_DCFEV_MEM_PWR_STATUS 0x98fc
+#define mmDCFEV_L_FLUSH 0x46ff
+#define mmDCFEV0_DCFEV_L_FLUSH 0x46ff
+#define mmDCFEV1_DCFEV_L_FLUSH 0x98ff
+#define mmDCFEV_C_FLUSH 0x4700
+#define mmDCFEV0_DCFEV_C_FLUSH 0x4700
+#define mmDCFEV1_DCFEV_C_FLUSH 0x9900
+#define mmDCFEV_DMIFV_DEBUG 0x46fd
+#define mmDCFEV0_DCFEV_DMIFV_DEBUG 0x46fd
+#define mmDCFEV1_DCFEV_DMIFV_DEBUG 0x98fd
+#define mmDCFEV_MISC 0x46fe
+#define mmDCFEV0_DCFEV_MISC 0x46fe
+#define mmDCFEV1_DCFEV_MISC 0x98fe
+#define mmDC_HPD_INT_STATUS 0x1898
+#define mmHPD0_DC_HPD_INT_STATUS 0x1898
+#define mmHPD1_DC_HPD_INT_STATUS 0x18a0
+#define mmHPD2_DC_HPD_INT_STATUS 0x18a8
+#define mmHPD3_DC_HPD_INT_STATUS 0x18b0
+#define mmHPD4_DC_HPD_INT_STATUS 0x18b8
+#define mmHPD5_DC_HPD_INT_STATUS 0x18c0
+#define mmDC_HPD_INT_CONTROL 0x1899
+#define mmHPD0_DC_HPD_INT_CONTROL 0x1899
+#define mmHPD1_DC_HPD_INT_CONTROL 0x18a1
+#define mmHPD2_DC_HPD_INT_CONTROL 0x18a9
+#define mmHPD3_DC_HPD_INT_CONTROL 0x18b1
+#define mmHPD4_DC_HPD_INT_CONTROL 0x18b9
+#define mmHPD5_DC_HPD_INT_CONTROL 0x18c1
+#define mmDC_HPD_CONTROL 0x189a
+#define mmHPD0_DC_HPD_CONTROL 0x189a
+#define mmHPD1_DC_HPD_CONTROL 0x18a2
+#define mmHPD2_DC_HPD_CONTROL 0x18aa
+#define mmHPD3_DC_HPD_CONTROL 0x18b2
+#define mmHPD4_DC_HPD_CONTROL 0x18ba
+#define mmHPD5_DC_HPD_CONTROL 0x18c2
+#define mmDC_HPD_FAST_TRAIN_CNTL 0x189b
+#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL 0x189b
+#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL 0x18a3
+#define mmHPD2_DC_HPD_FAST_TRAIN_CNTL 0x18ab
+#define mmHPD3_DC_HPD_FAST_TRAIN_CNTL 0x18b3
+#define mmHPD4_DC_HPD_FAST_TRAIN_CNTL 0x18bb
+#define mmHPD5_DC_HPD_FAST_TRAIN_CNTL 0x18c3
+#define mmDC_HPD_TOGGLE_FILT_CNTL 0x189c
+#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL 0x189c
+#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL 0x18a4
+#define mmHPD2_DC_HPD_TOGGLE_FILT_CNTL 0x18ac
+#define mmHPD3_DC_HPD_TOGGLE_FILT_CNTL 0x18b4
+#define mmHPD4_DC_HPD_TOGGLE_FILT_CNTL 0x18bc
+#define mmHPD5_DC_HPD_TOGGLE_FILT_CNTL 0x18c4
+#define mmDCO_SCRATCH0 0x184e
+#define mmDCO_SCRATCH1 0x184f
+#define mmDCO_SCRATCH2 0x1850
+#define mmDCO_SCRATCH3 0x1851
+#define mmDCO_SCRATCH4 0x1852
+#define mmDCO_SCRATCH5 0x1853
+#define mmDCO_SCRATCH6 0x1854
+#define mmDCO_SCRATCH7 0x1855
+#define mmDCE_VCE_CONTROL 0x1856
+#define mmDISP_INTERRUPT_STATUS 0x1857
+#define mmDISP_INTERRUPT_STATUS_CONTINUE 0x1858
+#define mmDISP_INTERRUPT_STATUS_CONTINUE2 0x1859
+#define mmDISP_INTERRUPT_STATUS_CONTINUE3 0x185a
+#define mmDISP_INTERRUPT_STATUS_CONTINUE4 0x185b
+#define mmDISP_INTERRUPT_STATUS_CONTINUE5 0x185c
+#define mmDISP_INTERRUPT_STATUS_CONTINUE6 0x185d
+#define mmDISP_INTERRUPT_STATUS_CONTINUE7 0x185e
+#define mmDISP_INTERRUPT_STATUS_CONTINUE8 0x185f
+#define mmDISP_INTERRUPT_STATUS_CONTINUE9 0x1860
+#define mmDISP_INTERRUPT_STATUS_CONTINUE10 0x1875
+#define mmDCO_MEM_PWR_STATUS 0x1861
+#define mmDCO_MEM_PWR_STATUS1 0x1874
+#define mmDCO_MEM_PWR_CTRL 0x1862
+#define mmDCO_MEM_PWR_CTRL2 0x1863
+#define mmFMT_MEMORY0_CONTROL 0x1888
+#define mmFMT_MEMORY1_CONTROL 0x1889
+#define mmFMT_MEMORY2_CONTROL 0x188a
+#define mmFMT_MEMORY3_CONTROL 0x188b
+#define mmFMT_MEMORY4_CONTROL 0x188c
+#define mmFMT_MEMORY5_CONTROL 0x188d
+#define mmDCO_CLK_CNTL 0x1864
+#define mmDCO_CLK_CNTL2 0x1876
+#define mmDCO_CLK_CNTL3 0x1877
+#define mmDPDBG_CNTL 0x1866
+#define mmDPDBG_INTERRUPT 0x1867
+#define mmDCO_POWER_MANAGEMENT_CNTL 0x1868
+#define mmDCO_SOFT_RESET 0x1871
+#define mmDIG_SOFT_RESET 0x1872
+#define mmDIG_SOFT_RESET_2 0x186a
+#define mmDCO_STEREOSYNC_SEL 0x186e
+#define mmDCO_HDMI_RXSTATUS_TIMER_CONTROL 0x1883
+#define mmDCO_PSP_INTERRUPT_STATUS 0x1884
+#define mmDCO_PSP_INTERRUPT_CLEAR 0x1885
+#define mmDCO_GENERIC_INTERRUPT_MESSAGE 0x1886
+#define mmDCO_GENERIC_INTERRUPT_CLEAR 0x1887
+#define mmDCO_TEST_DEBUG_INDEX 0x186f
+#define mmDCO_TEST_DEBUG_DATA 0x1870
+#define mmDC_I2C_CONTROL 0x16d4
+#define mmDC_I2C_ARBITRATION 0x16d5
+#define mmDC_I2C_INTERRUPT_CONTROL 0x16d6
+#define mmDC_I2C_SW_STATUS 0x16d7
+#define mmDC_I2C_DDC1_HW_STATUS 0x16d8
+#define mmDC_I2C_DDC2_HW_STATUS 0x16d9
+#define mmDC_I2C_DDC3_HW_STATUS 0x16da
+#define mmDC_I2C_DDC4_HW_STATUS 0x16db
+#define mmDC_I2C_DDC5_HW_STATUS 0x16dc
+#define mmDC_I2C_DDC6_HW_STATUS 0x16dd
+#define mmDC_I2C_DDC1_SPEED 0x16de
+#define mmDC_I2C_DDC1_SETUP 0x16df
+#define mmDC_I2C_DDC2_SPEED 0x16e0
+#define mmDC_I2C_DDC2_SETUP 0x16e1
+#define mmDC_I2C_DDC3_SPEED 0x16e2
+#define mmDC_I2C_DDC3_SETUP 0x16e3
+#define mmDC_I2C_DDC4_SPEED 0x16e4
+#define mmDC_I2C_DDC4_SETUP 0x16e5
+#define mmDC_I2C_DDC5_SPEED 0x16e6
+#define mmDC_I2C_DDC5_SETUP 0x16e7
+#define mmDC_I2C_DDC6_SPEED 0x16e8
+#define mmDC_I2C_DDC6_SETUP 0x16e9
+#define mmDC_I2C_TRANSACTION0 0x16ea
+#define mmDC_I2C_TRANSACTION1 0x16eb
+#define mmDC_I2C_TRANSACTION2 0x16ec
+#define mmDC_I2C_TRANSACTION3 0x16ed
+#define mmDC_I2C_DATA 0x16ee
+#define mmDC_I2C_DDCVGA_HW_STATUS 0x16ef
+#define mmDC_I2C_DDCVGA_SPEED 0x16f0
+#define mmDC_I2C_DDCVGA_SETUP 0x16f1
+#define mmDC_I2C_EDID_DETECT_CTRL 0x16f2
+#define mmDC_I2C_READ_REQUEST_INTERRUPT 0x16f3
+#define mmGENERIC_I2C_CONTROL 0x16f4
+#define mmGENERIC_I2C_INTERRUPT_CONTROL 0x16f5
+#define mmGENERIC_I2C_STATUS 0x16f6
+#define mmGENERIC_I2C_SPEED 0x16f7
+#define mmGENERIC_I2C_SETUP 0x16f8
+#define mmGENERIC_I2C_TRANSACTION 0x16f9
+#define mmGENERIC_I2C_DATA 0x16fa
+#define mmGENERIC_I2C_PIN_SELECTION 0x16fb
+#define mmGENERIC_I2C_PIN_DEBUG 0x16fc
+#define mmBLNDV_CONTROL 0x476d
+#define mmBLNDV0_BLNDV_CONTROL 0x476d
+#define mmBLNDV1_BLNDV_CONTROL 0x996d
+#define mmBLNDV_SM_CONTROL2 0x476e
+#define mmBLNDV0_BLNDV_SM_CONTROL2 0x476e
+#define mmBLNDV1_BLNDV_SM_CONTROL2 0x996e
+#define mmBLNDV_CONTROL2 0x476f
+#define mmBLNDV0_BLNDV_CONTROL2 0x476f
+#define mmBLNDV1_BLNDV_CONTROL2 0x996f
+#define mmBLNDV_UPDATE 0x4770
+#define mmBLNDV0_BLNDV_UPDATE 0x4770
+#define mmBLNDV1_BLNDV_UPDATE 0x9970
+#define mmBLNDV_UNDERFLOW_INTERRUPT 0x4771
+#define mmBLNDV0_BLNDV_UNDERFLOW_INTERRUPT 0x4771
+#define mmBLNDV1_BLNDV_UNDERFLOW_INTERRUPT 0x9971
+#define mmBLNDV_V_UPDATE_LOCK 0x4773
+#define mmBLNDV0_BLNDV_V_UPDATE_LOCK 0x4773
+#define mmBLNDV1_BLNDV_V_UPDATE_LOCK 0x9973
+#define mmBLNDV_REG_UPDATE_STATUS 0x4777
+#define mmBLNDV0_BLNDV_REG_UPDATE_STATUS 0x4777
+#define mmBLNDV1_BLNDV_REG_UPDATE_STATUS 0x9977
+#define mmBLNDV_DEBUG 0x4774
+#define mmBLNDV0_BLNDV_DEBUG 0x4774
+#define mmBLNDV1_BLNDV_DEBUG 0x9974
+#define mmBLNDV_TEST_DEBUG_INDEX 0x4775
+#define mmBLNDV0_BLNDV_TEST_DEBUG_INDEX 0x4775
+#define mmBLNDV1_BLNDV_TEST_DEBUG_INDEX 0x9975
+#define mmBLNDV_TEST_DEBUG_DATA 0x4776
+#define mmBLNDV0_BLNDV_TEST_DEBUG_DATA 0x4776
+#define mmBLNDV1_BLNDV_TEST_DEBUG_DATA 0x9976
+#define mmCRTCV_H_TOTAL 0x4780
+#define mmCRTCV0_CRTCV_H_TOTAL 0x4780
+#define mmCRTCV1_CRTCV_H_TOTAL 0x9980
+#define mmCRTCV_H_BLANK_START_END 0x4781
+#define mmCRTCV0_CRTCV_H_BLANK_START_END 0x4781
+#define mmCRTCV1_CRTCV_H_BLANK_START_END 0x9981
+#define mmCRTCV_H_SYNC_A 0x4782
+#define mmCRTCV0_CRTCV_H_SYNC_A 0x4782
+#define mmCRTCV1_CRTCV_H_SYNC_A 0x9982
+#define mmCRTCV_V_TOTAL 0x4787
+#define mmCRTCV0_CRTCV_V_TOTAL 0x4787
+#define mmCRTCV1_CRTCV_V_TOTAL 0x9987
+#define mmCRTCV_V_BLANK_START_END 0x478d
+#define mmCRTCV0_CRTCV_V_BLANK_START_END 0x478d
+#define mmCRTCV1_CRTCV_V_BLANK_START_END 0x998d
+#define mmCRTCV_V_SYNC_A 0x478e
+#define mmCRTCV0_CRTCV_V_SYNC_A 0x478e
+#define mmCRTCV1_CRTCV_V_SYNC_A 0x998e
+#define mmCRTCV_CONTROL 0x479c
+#define mmCRTCV0_CRTCV_CONTROL 0x479c
+#define mmCRTCV1_CRTCV_CONTROL 0x999c
+#define mmCRTCV_START_LINE_CONTROL 0x47b3
+#define mmCRTCV0_CRTCV_START_LINE_CONTROL 0x47b3
+#define mmCRTCV1_CRTCV_START_LINE_CONTROL 0x99b3
+#define mmCRTCV_OVERSCAN_COLOR 0x47c8
+#define mmCRTCV0_CRTCV_OVERSCAN_COLOR 0x47c8
+#define mmCRTCV1_CRTCV_OVERSCAN_COLOR 0x99c8
+#define mmCRTCV_OVERSCAN_COLOR_EXT 0x47c9
+#define mmCRTCV0_CRTCV_OVERSCAN_COLOR_EXT 0x47c9
+#define mmCRTCV1_CRTCV_OVERSCAN_COLOR_EXT 0x99c9
+#define mmCRTCV_BLACK_COLOR 0x47cc
+#define mmCRTCV0_CRTCV_BLACK_COLOR 0x47cc
+#define mmCRTCV1_CRTCV_BLACK_COLOR 0x99cc
+#define mmCRTCV_BLACK_COLOR_EXT 0x47cd
+#define mmCRTCV0_CRTCV_BLACK_COLOR_EXT 0x47cd
+#define mmCRTCV1_CRTCV_BLACK_COLOR_EXT 0x99cd
+#define mmCRTCV_CRC_CNTL 0x47d4
+#define mmCRTCV0_CRTCV_CRC_CNTL 0x47d4
+#define mmCRTCV1_CRTCV_CRC_CNTL 0x99d4
+#define mmCRTCV_CRC0_WINDOWA_X_CONTROL 0x47d5
+#define mmCRTCV0_CRTCV_CRC0_WINDOWA_X_CONTROL 0x47d5
+#define mmCRTCV1_CRTCV_CRC0_WINDOWA_X_CONTROL 0x99d5
+#define mmCRTCV_CRC0_WINDOWA_Y_CONTROL 0x47d6
+#define mmCRTCV0_CRTCV_CRC0_WINDOWA_Y_CONTROL 0x47d6
+#define mmCRTCV1_CRTCV_CRC0_WINDOWA_Y_CONTROL 0x99d6
+#define mmCRTCV_CRC0_WINDOWB_X_CONTROL 0x47d7
+#define mmCRTCV0_CRTCV_CRC0_WINDOWB_X_CONTROL 0x47d7
+#define mmCRTCV1_CRTCV_CRC0_WINDOWB_X_CONTROL 0x99d7
+#define mmCRTCV_CRC0_WINDOWB_Y_CONTROL 0x47d8
+#define mmCRTCV0_CRTCV_CRC0_WINDOWB_Y_CONTROL 0x47d8
+#define mmCRTCV1_CRTCV_CRC0_WINDOWB_Y_CONTROL 0x99d8
+#define mmCRTCV_CRC0_DATA_RG 0x47d9
+#define mmCRTCV0_CRTCV_CRC0_DATA_RG 0x47d9
+#define mmCRTCV1_CRTCV_CRC0_DATA_RG 0x99d9
+#define mmCRTCV_CRC0_DATA_B 0x47da
+#define mmCRTCV0_CRTCV_CRC0_DATA_B 0x47da
+#define mmCRTCV1_CRTCV_CRC0_DATA_B 0x99da
+#define mmCRTCV_CRC1_WINDOWA_X_CONTROL 0x47db
+#define mmCRTCV0_CRTCV_CRC1_WINDOWA_X_CONTROL 0x47db
+#define mmCRTCV1_CRTCV_CRC1_WINDOWA_X_CONTROL 0x99db
+#define mmCRTCV_CRC1_WINDOWA_Y_CONTROL 0x47dc
+#define mmCRTCV0_CRTCV_CRC1_WINDOWA_Y_CONTROL 0x47dc
+#define mmCRTCV1_CRTCV_CRC1_WINDOWA_Y_CONTROL 0x99dc
+#define mmCRTCV_CRC1_WINDOWB_X_CONTROL 0x47dd
+#define mmCRTCV0_CRTCV_CRC1_WINDOWB_X_CONTROL 0x47dd
+#define mmCRTCV1_CRTCV_CRC1_WINDOWB_X_CONTROL 0x99dd
+#define mmCRTCV_CRC1_WINDOWB_Y_CONTROL 0x47de
+#define mmCRTCV0_CRTCV_CRC1_WINDOWB_Y_CONTROL 0x47de
+#define mmCRTCV1_CRTCV_CRC1_WINDOWB_Y_CONTROL 0x99de
+#define mmCRTCV_CRC1_DATA_RG 0x47df
+#define mmCRTCV0_CRTCV_CRC1_DATA_RG 0x47df
+#define mmCRTCV1_CRTCV_CRC1_DATA_RG 0x99df
+#define mmCRTCV_CRC1_DATA_B 0x47e0
+#define mmCRTCV0_CRTCV_CRC1_DATA_B 0x47e0
+#define mmCRTCV1_CRTCV_CRC1_DATA_B 0x99e0
+#define mmCRTCV_TEST_DEBUG_INDEX 0x47c6
+#define mmCRTCV0_CRTCV_TEST_DEBUG_INDEX 0x47c6
+#define mmCRTCV1_CRTCV_TEST_DEBUG_INDEX 0x99c6
+#define mmCRTCV_TEST_DEBUG_DATA 0x47c7
+#define mmCRTCV0_CRTCV_TEST_DEBUG_DATA 0x47c7
+#define mmCRTCV1_CRTCV_TEST_DEBUG_DATA 0x99c7
+#define mmXDMA_MC_PCIE_CLIENT_CONFIG 0x3e0
+#define mmXDMA_LOCAL_SURFACE_TILING1 0x3e1
+#define mmXDMA_LOCAL_SURFACE_TILING2 0x3e2
+#define mmXDMA_INTERRUPT 0x3e3
+#define mmXDMA_CLOCK_GATING_CNTL 0x3e4
+#define mmXDMA_MEM_POWER_CNTL 0x3e6
+#define mmXDMA_IF_BIF_STATUS 0x3e7
+#define mmXDMA_PERF_MEAS_STATUS 0x3e8
+#define mmXDMA_IF_STATUS 0x3e9
+#define mmXDMA_TEST_DEBUG_INDEX 0x3ea
+#define mmXDMA_TEST_DEBUG_DATA 0x3eb
+#define mmXDMA_RBBMIF_RDWR_CNTL 0x3f8
+#define mmXDMA_PG_CONTROL 0x3f9
+#define mmXDMA_PG_WDATA 0x3fa
+#define mmXDMA_PG_STATUS 0x3fb
+#define mmXDMA_AON_TEST_DEBUG_INDEX 0x3fc
+#define mmXDMA_AON_TEST_DEBUG_DATA 0x3fd
+#define mmXDMA_MSTR_CNTL 0x3ec
+#define mmXDMA_MSTR_STATUS 0x3ed
+#define mmXDMA_MSTR_MEM_CLIENT_CONFIG 0x3ee
+#define mmXDMA_MSTR_LOCAL_SURFACE_BASE_ADDR 0x3ef
+#define mmXDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH 0x3f0
+#define mmXDMA_MSTR_LOCAL_SURFACE_PITCH 0x3f1
+#define mmXDMA_MSTR_CMD_URGENT_CNTL 0x3f2
+#define mmXDMA_MSTR_MEM_URGENT_CNTL 0x3f3
+#define mmXDMA_MSTR_PCIE_NACK_STATUS 0x3f5
+#define mmXDMA_MSTR_MEM_NACK_STATUS 0x3f6
+#define mmXDMA_MSTR_VSYNC_GSL_CHECK 0x3f7
+#define mmXDMA_MSTR_PIPE_CNTL 0x400
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_PIPE_CNTL 0x400
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_PIPE_CNTL 0x410
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_PIPE_CNTL 0x420
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_PIPE_CNTL 0x430
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_PIPE_CNTL 0x440
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_PIPE_CNTL 0x450
+#define mmXDMA_MSTR_READ_COMMAND 0x401
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_READ_COMMAND 0x401
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_READ_COMMAND 0x411
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_READ_COMMAND 0x421
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_READ_COMMAND 0x431
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_READ_COMMAND 0x441
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_READ_COMMAND 0x451
+#define mmXDMA_MSTR_CHANNEL_DIM 0x402
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CHANNEL_DIM 0x402
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CHANNEL_DIM 0x412
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CHANNEL_DIM 0x422
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CHANNEL_DIM 0x432
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CHANNEL_DIM 0x442
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CHANNEL_DIM 0x452
+#define mmXDMA_MSTR_HEIGHT 0x403
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_HEIGHT 0x403
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_HEIGHT 0x413
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_HEIGHT 0x423
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_HEIGHT 0x433
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_HEIGHT 0x443
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_HEIGHT 0x453
+#define mmXDMA_MSTR_REMOTE_SURFACE_BASE 0x404
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_REMOTE_SURFACE_BASE 0x404
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_REMOTE_SURFACE_BASE 0x414
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_REMOTE_SURFACE_BASE 0x424
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_REMOTE_SURFACE_BASE 0x434
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_REMOTE_SURFACE_BASE 0x444
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_REMOTE_SURFACE_BASE 0x454
+#define mmXDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x405
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x405
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x415
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x425
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x435
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x445
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH 0x455
+#define mmXDMA_MSTR_REMOTE_GPU_ADDRESS 0x406
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_REMOTE_GPU_ADDRESS 0x406
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_REMOTE_GPU_ADDRESS 0x416
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_REMOTE_GPU_ADDRESS 0x426
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_REMOTE_GPU_ADDRESS 0x436
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_REMOTE_GPU_ADDRESS 0x446
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_REMOTE_GPU_ADDRESS 0x456
+#define mmXDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x407
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x407
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x417
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x427
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x437
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x447
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH 0x457
+#define mmXDMA_MSTR_CACHE_BASE_ADDR 0x408
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CACHE_BASE_ADDR 0x408
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CACHE_BASE_ADDR 0x418
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CACHE_BASE_ADDR 0x428
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CACHE_BASE_ADDR 0x438
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CACHE_BASE_ADDR 0x448
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CACHE_BASE_ADDR 0x458
+#define mmXDMA_MSTR_CACHE_BASE_ADDR_HIGH 0x409
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CACHE_BASE_ADDR_HIGH 0x409
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CACHE_BASE_ADDR_HIGH 0x419
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CACHE_BASE_ADDR_HIGH 0x429
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CACHE_BASE_ADDR_HIGH 0x439
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CACHE_BASE_ADDR_HIGH 0x449
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CACHE_BASE_ADDR_HIGH 0x459
+#define mmXDMA_MSTR_CACHE 0x40a
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CACHE 0x40a
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CACHE 0x41a
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CACHE 0x42a
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CACHE 0x43a
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CACHE 0x44a
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CACHE 0x45a
+#define mmXDMA_MSTR_CHANNEL_START 0x40b
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_CHANNEL_START 0x40b
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_CHANNEL_START 0x41b
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_CHANNEL_START 0x42b
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_CHANNEL_START 0x43b
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_CHANNEL_START 0x44b
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_CHANNEL_START 0x45b
+#define mmXDMA_MSTR_PERFMEAS_STATUS 0x40e
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_PERFMEAS_STATUS 0x40e
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_PERFMEAS_STATUS 0x41e
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_PERFMEAS_STATUS 0x42e
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_PERFMEAS_STATUS 0x43e
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_PERFMEAS_STATUS 0x44e
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_PERFMEAS_STATUS 0x45e
+#define mmXDMA_MSTR_PERFMEAS_CNTL 0x40f
+#define mmXDMA_MSTR_PIPE0_XDMA_MSTR_PERFMEAS_CNTL 0x40f
+#define mmXDMA_MSTR_PIPE1_XDMA_MSTR_PERFMEAS_CNTL 0x41f
+#define mmXDMA_MSTR_PIPE2_XDMA_MSTR_PERFMEAS_CNTL 0x42f
+#define mmXDMA_MSTR_PIPE3_XDMA_MSTR_PERFMEAS_CNTL 0x43f
+#define mmXDMA_MSTR_PIPE4_XDMA_MSTR_PERFMEAS_CNTL 0x44f
+#define mmXDMA_MSTR_PIPE5_XDMA_MSTR_PERFMEAS_CNTL 0x45f
+#define mmXDMA_SLV_CNTL 0x460
+#define mmXDMA_SLV_MEM_CLIENT_CONFIG 0x461
+#define mmXDMA_SLV_SLS_PITCH 0x462
+#define mmXDMA_SLV_READ_URGENT_CNTL 0x463
+#define mmXDMA_SLV_WRITE_URGENT_CNTL 0x464
+#define mmXDMA_SLV_WB_RATE_CNTL 0x465
+#define mmXDMA_SLV_READ_LATENCY_MINMAX 0x466
+#define mmXDMA_SLV_READ_LATENCY_AVE 0x467
+#define mmXDMA_SLV_PCIE_NACK_STATUS 0x468
+#define mmXDMA_SLV_MEM_NACK_STATUS 0x469
+#define mmXDMA_SLV_RDRET_BUF_STATUS 0x46a
+#define mmXDMA_SLV_READ_LATENCY_TIMER 0x46b
+#define mmXDMA_SLV_FLIP_PENDING 0x46c
+#define mmXDMA_SLV_CHANNEL_CNTL 0x470
+#define mmXDMA_SLV_CHANNEL0_XDMA_SLV_CHANNEL_CNTL 0x470
+#define mmXDMA_SLV_CHANNEL1_XDMA_SLV_CHANNEL_CNTL 0x478
+#define mmXDMA_SLV_CHANNEL2_XDMA_SLV_CHANNEL_CNTL 0x480
+#define mmXDMA_SLV_CHANNEL3_XDMA_SLV_CHANNEL_CNTL 0x488
+#define mmXDMA_SLV_CHANNEL4_XDMA_SLV_CHANNEL_CNTL 0x490
+#define mmXDMA_SLV_CHANNEL5_XDMA_SLV_CHANNEL_CNTL 0x498
+#define mmXDMA_SLV_REMOTE_GPU_ADDRESS 0x471
+#define mmXDMA_SLV_CHANNEL0_XDMA_SLV_REMOTE_GPU_ADDRESS 0x471
+#define mmXDMA_SLV_CHANNEL1_XDMA_SLV_REMOTE_GPU_ADDRESS 0x479
+#define mmXDMA_SLV_CHANNEL2_XDMA_SLV_REMOTE_GPU_ADDRESS 0x481
+#define mmXDMA_SLV_CHANNEL3_XDMA_SLV_REMOTE_GPU_ADDRESS 0x489
+#define mmXDMA_SLV_CHANNEL4_XDMA_SLV_REMOTE_GPU_ADDRESS 0x491
+#define mmXDMA_SLV_CHANNEL5_XDMA_SLV_REMOTE_GPU_ADDRESS 0x499
+#define mmXDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x472
+#define mmXDMA_SLV_CHANNEL0_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x472
+#define mmXDMA_SLV_CHANNEL1_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x47a
+#define mmXDMA_SLV_CHANNEL2_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x482
+#define mmXDMA_SLV_CHANNEL3_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x48a
+#define mmXDMA_SLV_CHANNEL4_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x492
+#define mmXDMA_SLV_CHANNEL5_XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH 0x49a
+#define mmCMD_BUS_TX_CONTROL_LANE0 0x48e0
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE0 0x48e0
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE0 0x4980
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE0 0x9a20
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE0 0x9ac0
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE0 0x9b60
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE0 0x9c00
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE0 0x9ca0
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_TX_CONTROL_LANE0 0x9d40
+#define mmCMD_BUS_TX_CONTROL_LANE1 0x48f0
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE1 0x48f0
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE1 0x4990
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE1 0x9a30
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE1 0x9ad0
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE1 0x9b70
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE1 0x9c10
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE1 0x9cb0
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_TX_CONTROL_LANE1 0x9d50
+#define mmCMD_BUS_TX_CONTROL_LANE2 0x4900
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE2 0x4900
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE2 0x49a0
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE2 0x9a40
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE2 0x9ae0
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE2 0x9b80
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE2 0x9c20
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE2 0x9cc0
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_TX_CONTROL_LANE2 0x9d60
+#define mmCMD_BUS_TX_CONTROL_LANE3 0x4910
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_TX_CONTROL_LANE3 0x4910
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_TX_CONTROL_LANE3 0x49b0
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_TX_CONTROL_LANE3 0x9a50
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_TX_CONTROL_LANE3 0x9af0
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_TX_CONTROL_LANE3 0x9b90
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_TX_CONTROL_LANE3 0x9c30
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_TX_CONTROL_LANE3 0x9cd0
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_TX_CONTROL_LANE3 0x9d70
+#define mmMARGIN_DEEMPH_LANE0 0x48e1
+#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE0 0x48e1
+#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE0 0x4981
+#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE0 0x9a21
+#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE0 0x9ac1
+#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE0 0x9b61
+#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE0 0x9c01
+#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE0 0x9ca1
+#define mmDC_COMBOPHYTXREGS7_MARGIN_DEEMPH_LANE0 0x9d41
+#define mmMARGIN_DEEMPH_LANE1 0x48f1
+#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE1 0x48f1
+#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE1 0x4991
+#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE1 0x9a31
+#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE1 0x9ad1
+#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE1 0x9b71
+#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE1 0x9c11
+#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE1 0x9cb1
+#define mmDC_COMBOPHYTXREGS7_MARGIN_DEEMPH_LANE1 0x9d51
+#define mmMARGIN_DEEMPH_LANE2 0x4901
+#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE2 0x4901
+#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE2 0x49a1
+#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE2 0x9a41
+#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE2 0x9ae1
+#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE2 0x9b81
+#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE2 0x9c21
+#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE2 0x9cc1
+#define mmDC_COMBOPHYTXREGS7_MARGIN_DEEMPH_LANE2 0x9d61
+#define mmMARGIN_DEEMPH_LANE3 0x4911
+#define mmDC_COMBOPHYTXREGS0_MARGIN_DEEMPH_LANE3 0x4911
+#define mmDC_COMBOPHYTXREGS1_MARGIN_DEEMPH_LANE3 0x49b1
+#define mmDC_COMBOPHYTXREGS2_MARGIN_DEEMPH_LANE3 0x9a51
+#define mmDC_COMBOPHYTXREGS3_MARGIN_DEEMPH_LANE3 0x9af1
+#define mmDC_COMBOPHYTXREGS4_MARGIN_DEEMPH_LANE3 0x9b91
+#define mmDC_COMBOPHYTXREGS5_MARGIN_DEEMPH_LANE3 0x9c31
+#define mmDC_COMBOPHYTXREGS6_MARGIN_DEEMPH_LANE3 0x9cd1
+#define mmDC_COMBOPHYTXREGS7_MARGIN_DEEMPH_LANE3 0x9d71
+#define mmCMD_BUS_GLOBAL_FOR_TX_LANE0 0x48e2
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE0 0x48e2
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE0 0x4982
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE0 0x9a22
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE0 0x9ac2
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE0 0x9b62
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE0 0x9c02
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE0 0x9ca2
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_GLOBAL_FOR_TX_LANE0 0x9d42
+#define mmCMD_BUS_GLOBAL_FOR_TX_LANE1 0x48f2
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE1 0x48f2
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE1 0x4992
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE1 0x9a32
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE1 0x9ad2
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE1 0x9b72
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE1 0x9c12
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE1 0x9cb2
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_GLOBAL_FOR_TX_LANE1 0x9d52
+#define mmCMD_BUS_GLOBAL_FOR_TX_LANE2 0x4902
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE2 0x4902
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE2 0x49a2
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE2 0x9a42
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE2 0x9ae2
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE2 0x9b82
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE2 0x9c22
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE2 0x9cc2
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_GLOBAL_FOR_TX_LANE2 0x9d62
+#define mmCMD_BUS_GLOBAL_FOR_TX_LANE3 0x4912
+#define mmDC_COMBOPHYTXREGS0_CMD_BUS_GLOBAL_FOR_TX_LANE3 0x4912
+#define mmDC_COMBOPHYTXREGS1_CMD_BUS_GLOBAL_FOR_TX_LANE3 0x49b2
+#define mmDC_COMBOPHYTXREGS2_CMD_BUS_GLOBAL_FOR_TX_LANE3 0x9a52
+#define mmDC_COMBOPHYTXREGS3_CMD_BUS_GLOBAL_FOR_TX_LANE3 0x9af2
+#define mmDC_COMBOPHYTXREGS4_CMD_BUS_GLOBAL_FOR_TX_LANE3 0x9b92
+#define mmDC_COMBOPHYTXREGS5_CMD_BUS_GLOBAL_FOR_TX_LANE3 0x9c32
+#define mmDC_COMBOPHYTXREGS6_CMD_BUS_GLOBAL_FOR_TX_LANE3 0x9cd2
+#define mmDC_COMBOPHYTXREGS7_CMD_BUS_GLOBAL_FOR_TX_LANE3 0x9d72
+#define mmTX_DISP_RFU0_LANE0 0x48e3
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE0 0x48e3
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE0 0x4983
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE0 0x9a23
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE0 0x9ac3
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE0 0x9b63
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE0 0x9c03
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE0 0x9ca3
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU0_LANE0 0x9d43
+#define mmTX_DISP_RFU0_LANE1 0x48f3
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE1 0x48f3
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE1 0x4993
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE1 0x9a33
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE1 0x9ad3
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE1 0x9b73
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE1 0x9c13
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE1 0x9cb3
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU0_LANE1 0x9d53
+#define mmTX_DISP_RFU0_LANE2 0x4903
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE2 0x4903
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE2 0x49a3
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE2 0x9a43
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE2 0x9ae3
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE2 0x9b83
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE2 0x9c23
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE2 0x9cc3
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU0_LANE2 0x9d63
+#define mmTX_DISP_RFU0_LANE3 0x4913
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU0_LANE3 0x4913
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU0_LANE3 0x49b3
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU0_LANE3 0x9a53
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU0_LANE3 0x9af3
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU0_LANE3 0x9b93
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU0_LANE3 0x9c33
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU0_LANE3 0x9cd3
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU0_LANE3 0x9d73
+#define mmTX_DISP_RFU1_LANE0 0x48e4
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE0 0x48e4
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE0 0x4984
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE0 0x9a24
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE0 0x9ac4
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE0 0x9b64
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE0 0x9c04
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE0 0x9ca4
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU1_LANE0 0x9d44
+#define mmTX_DISP_RFU1_LANE1 0x48f4
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE1 0x48f4
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE1 0x4994
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE1 0x9a34
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE1 0x9ad4
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE1 0x9b74
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE1 0x9c14
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE1 0x9cb4
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU1_LANE1 0x9d54
+#define mmTX_DISP_RFU1_LANE2 0x4904
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE2 0x4904
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE2 0x49a4
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE2 0x9a44
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE2 0x9ae4
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE2 0x9b84
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE2 0x9c24
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE2 0x9cc4
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU1_LANE2 0x9d64
+#define mmTX_DISP_RFU1_LANE3 0x4914
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU1_LANE3 0x4914
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU1_LANE3 0x49b4
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU1_LANE3 0x9a54
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU1_LANE3 0x9af4
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU1_LANE3 0x9b94
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU1_LANE3 0x9c34
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU1_LANE3 0x9cd4
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU1_LANE3 0x9d74
+#define mmTX_DISP_RFU2_LANE0 0x48e5
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE0 0x48e5
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE0 0x4985
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE0 0x9a25
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE0 0x9ac5
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE0 0x9b65
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE0 0x9c05
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE0 0x9ca5
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU2_LANE0 0x9d45
+#define mmTX_DISP_RFU2_LANE1 0x48f5
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE1 0x48f5
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE1 0x4995
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE1 0x9a35
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE1 0x9ad5
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE1 0x9b75
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE1 0x9c15
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE1 0x9cb5
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU2_LANE1 0x9d55
+#define mmTX_DISP_RFU2_LANE2 0x4905
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE2 0x4905
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE2 0x49a5
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE2 0x9a45
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE2 0x9ae5
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE2 0x9b85
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE2 0x9c25
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE2 0x9cc5
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU2_LANE2 0x9d65
+#define mmTX_DISP_RFU2_LANE3 0x4915
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU2_LANE3 0x4915
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU2_LANE3 0x49b5
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU2_LANE3 0x9a55
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU2_LANE3 0x9af5
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU2_LANE3 0x9b95
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU2_LANE3 0x9c35
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU2_LANE3 0x9cd5
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU2_LANE3 0x9d75
+#define mmTX_DISP_RFU3_LANE0 0x48e6
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE0 0x48e6
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE0 0x4986
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE0 0x9a26
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE0 0x9ac6
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE0 0x9b66
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE0 0x9c06
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE0 0x9ca6
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU3_LANE0 0x9d46
+#define mmTX_DISP_RFU3_LANE1 0x48f6
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE1 0x48f6
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE1 0x4996
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE1 0x9a36
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE1 0x9ad6
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE1 0x9b76
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE1 0x9c16
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE1 0x9cb6
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU3_LANE1 0x9d56
+#define mmTX_DISP_RFU3_LANE2 0x4906
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE2 0x4906
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE2 0x49a6
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE2 0x9a46
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE2 0x9ae6
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE2 0x9b86
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE2 0x9c26
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE2 0x9cc6
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU3_LANE2 0x9d66
+#define mmTX_DISP_RFU3_LANE3 0x4916
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU3_LANE3 0x4916
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU3_LANE3 0x49b6
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU3_LANE3 0x9a56
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU3_LANE3 0x9af6
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU3_LANE3 0x9b96
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU3_LANE3 0x9c36
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU3_LANE3 0x9cd6
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU3_LANE3 0x9d76
+#define mmTX_DISP_RFU4_LANE0 0x48e7
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE0 0x48e7
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE0 0x4987
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE0 0x9a27
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE0 0x9ac7
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE0 0x9b67
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE0 0x9c07
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE0 0x9ca7
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU4_LANE0 0x9d47
+#define mmTX_DISP_RFU4_LANE1 0x48f7
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE1 0x48f7
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE1 0x4997
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE1 0x9a37
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE1 0x9ad7
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE1 0x9b77
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE1 0x9c17
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE1 0x9cb7
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU4_LANE1 0x9d57
+#define mmTX_DISP_RFU4_LANE2 0x4907
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE2 0x4907
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE2 0x49a7
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE2 0x9a47
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE2 0x9ae7
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE2 0x9b87
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE2 0x9c27
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE2 0x9cc7
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU4_LANE2 0x9d67
+#define mmTX_DISP_RFU4_LANE3 0x4917
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU4_LANE3 0x4917
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU4_LANE3 0x49b7
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU4_LANE3 0x9a57
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU4_LANE3 0x9af7
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU4_LANE3 0x9b97
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU4_LANE3 0x9c37
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU4_LANE3 0x9cd7
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU4_LANE3 0x9d77
+#define mmTX_DISP_RFU5_LANE0 0x48e8
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE0 0x48e8
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE0 0x4988
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE0 0x9a28
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE0 0x9ac8
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE0 0x9b68
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE0 0x9c08
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE0 0x9ca8
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU5_LANE0 0x9d48
+#define mmTX_DISP_RFU5_LANE1 0x48f8
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE1 0x48f8
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE1 0x4998
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE1 0x9a38
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE1 0x9ad8
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE1 0x9b78
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE1 0x9c18
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE1 0x9cb8
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU5_LANE1 0x9d58
+#define mmTX_DISP_RFU5_LANE2 0x4908
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE2 0x4908
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE2 0x49a8
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE2 0x9a48
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE2 0x9ae8
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE2 0x9b88
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE2 0x9c28
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE2 0x9cc8
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU5_LANE2 0x9d68
+#define mmTX_DISP_RFU5_LANE3 0x4918
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU5_LANE3 0x4918
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU5_LANE3 0x49b8
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU5_LANE3 0x9a58
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU5_LANE3 0x9af8
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU5_LANE3 0x9b98
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU5_LANE3 0x9c38
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU5_LANE3 0x9cd8
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU5_LANE3 0x9d78
+#define mmTX_DISP_RFU6_LANE0 0x48e9
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE0 0x48e9
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE0 0x4989
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE0 0x9a29
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE0 0x9ac9
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE0 0x9b69
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE0 0x9c09
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE0 0x9ca9
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU6_LANE0 0x9d49
+#define mmTX_DISP_RFU6_LANE1 0x48f9
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE1 0x48f9
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE1 0x4999
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE1 0x9a39
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE1 0x9ad9
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE1 0x9b79
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE1 0x9c19
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE1 0x9cb9
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU6_LANE1 0x9d59
+#define mmTX_DISP_RFU6_LANE2 0x4909
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE2 0x4909
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE2 0x49a9
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE2 0x9a49
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE2 0x9ae9
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE2 0x9b89
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE2 0x9c29
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE2 0x9cc9
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU6_LANE2 0x9d69
+#define mmTX_DISP_RFU6_LANE3 0x4919
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU6_LANE3 0x4919
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU6_LANE3 0x49b9
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU6_LANE3 0x9a59
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU6_LANE3 0x9af9
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU6_LANE3 0x9b99
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU6_LANE3 0x9c39
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU6_LANE3 0x9cd9
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU6_LANE3 0x9d79
+#define mmTX_DISP_RFU7_LANE0 0x48ea
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE0 0x48ea
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE0 0x498a
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE0 0x9a2a
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE0 0x9aca
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE0 0x9b6a
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE0 0x9c0a
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE0 0x9caa
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU7_LANE0 0x9d4a
+#define mmTX_DISP_RFU7_LANE1 0x48fa
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE1 0x48fa
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE1 0x499a
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE1 0x9a3a
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE1 0x9ada
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE1 0x9b7a
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE1 0x9c1a
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE1 0x9cba
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU7_LANE1 0x9d5a
+#define mmTX_DISP_RFU7_LANE2 0x490a
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE2 0x490a
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE2 0x49aa
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE2 0x9a4a
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE2 0x9aea
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE2 0x9b8a
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE2 0x9c2a
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE2 0x9cca
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU7_LANE2 0x9d6a
+#define mmTX_DISP_RFU7_LANE3 0x491a
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU7_LANE3 0x491a
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU7_LANE3 0x49ba
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU7_LANE3 0x9a5a
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU7_LANE3 0x9afa
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU7_LANE3 0x9b9a
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU7_LANE3 0x9c3a
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU7_LANE3 0x9cda
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU7_LANE3 0x9d7a
+#define mmTX_DISP_RFU8_LANE0 0x48eb
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE0 0x48eb
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE0 0x498b
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE0 0x9a2b
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE0 0x9acb
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE0 0x9b6b
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE0 0x9c0b
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE0 0x9cab
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU8_LANE0 0x9d4b
+#define mmTX_DISP_RFU8_LANE1 0x48fb
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE1 0x48fb
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE1 0x499b
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE1 0x9a3b
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE1 0x9adb
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE1 0x9b7b
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE1 0x9c1b
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE1 0x9cbb
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU8_LANE1 0x9d5b
+#define mmTX_DISP_RFU8_LANE2 0x490b
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE2 0x490b
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE2 0x49ab
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE2 0x9a4b
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE2 0x9aeb
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE2 0x9b8b
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE2 0x9c2b
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE2 0x9ccb
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU8_LANE2 0x9d6b
+#define mmTX_DISP_RFU8_LANE3 0x491b
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU8_LANE3 0x491b
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU8_LANE3 0x49bb
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU8_LANE3 0x9a5b
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU8_LANE3 0x9afb
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU8_LANE3 0x9b9b
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU8_LANE3 0x9c3b
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU8_LANE3 0x9cdb
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU8_LANE3 0x9d7b
+#define mmTX_DISP_RFU9_LANE0 0x48ec
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE0 0x48ec
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE0 0x498c
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE0 0x9a2c
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE0 0x9acc
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE0 0x9b6c
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE0 0x9c0c
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE0 0x9cac
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU9_LANE0 0x9d4c
+#define mmTX_DISP_RFU9_LANE1 0x48fc
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE1 0x48fc
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE1 0x499c
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE1 0x9a3c
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE1 0x9adc
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE1 0x9b7c
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE1 0x9c1c
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE1 0x9cbc
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU9_LANE1 0x9d5c
+#define mmTX_DISP_RFU9_LANE2 0x490c
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE2 0x490c
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE2 0x49ac
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE2 0x9a4c
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE2 0x9aec
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE2 0x9b8c
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE2 0x9c2c
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE2 0x9ccc
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU9_LANE2 0x9d6c
+#define mmTX_DISP_RFU9_LANE3 0x491c
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU9_LANE3 0x491c
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU9_LANE3 0x49bc
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU9_LANE3 0x9a5c
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU9_LANE3 0x9afc
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU9_LANE3 0x9b9c
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU9_LANE3 0x9c3c
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU9_LANE3 0x9cdc
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU9_LANE3 0x9d7c
+#define mmTX_DISP_RFU10_LANE0 0x48ed
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE0 0x48ed
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE0 0x498d
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE0 0x9a2d
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE0 0x9acd
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE0 0x9b6d
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE0 0x9c0d
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE0 0x9cad
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU10_LANE0 0x9d4d
+#define mmTX_DISP_RFU10_LANE1 0x48fd
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE1 0x48fd
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE1 0x499d
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE1 0x9a3d
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE1 0x9add
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE1 0x9b7d
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE1 0x9c1d
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE1 0x9cbd
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU10_LANE1 0x9d5d
+#define mmTX_DISP_RFU10_LANE2 0x490d
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE2 0x490d
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE2 0x49ad
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE2 0x9a4d
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE2 0x9aed
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE2 0x9b8d
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE2 0x9c2d
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE2 0x9ccd
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU10_LANE2 0x9d6d
+#define mmTX_DISP_RFU10_LANE3 0x491d
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU10_LANE3 0x491d
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU10_LANE3 0x49bd
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU10_LANE3 0x9a5d
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU10_LANE3 0x9afd
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU10_LANE3 0x9b9d
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU10_LANE3 0x9c3d
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU10_LANE3 0x9cdd
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU10_LANE3 0x9d7d
+#define mmTX_DISP_RFU11_LANE0 0x48ee
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE0 0x48ee
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE0 0x498e
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE0 0x9a2e
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE0 0x9ace
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE0 0x9b6e
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE0 0x9c0e
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE0 0x9cae
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU11_LANE0 0x9d4e
+#define mmTX_DISP_RFU11_LANE1 0x48fe
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE1 0x48fe
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE1 0x499e
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE1 0x9a3e
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE1 0x9ade
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE1 0x9b7e
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE1 0x9c1e
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE1 0x9cbe
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU11_LANE1 0x9d5e
+#define mmTX_DISP_RFU11_LANE2 0x490e
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE2 0x490e
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE2 0x49ae
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE2 0x9a4e
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE2 0x9aee
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE2 0x9b8e
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE2 0x9c2e
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE2 0x9cce
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU11_LANE2 0x9d6e
+#define mmTX_DISP_RFU11_LANE3 0x491e
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU11_LANE3 0x491e
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU11_LANE3 0x49be
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU11_LANE3 0x9a5e
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU11_LANE3 0x9afe
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU11_LANE3 0x9b9e
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU11_LANE3 0x9c3e
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU11_LANE3 0x9cde
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU11_LANE3 0x9d7e
+#define mmTX_DISP_RFU12_LANE0 0x48ef
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE0 0x48ef
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE0 0x498f
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE0 0x9a2f
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE0 0x9acf
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE0 0x9b6f
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE0 0x9c0f
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE0 0x9caf
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU12_LANE0 0x9d4f
+#define mmTX_DISP_RFU12_LANE1 0x48ff
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE1 0x48ff
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE1 0x499f
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE1 0x9a3f
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE1 0x9adf
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE1 0x9b7f
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE1 0x9c1f
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE1 0x9cbf
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU12_LANE1 0x9d5f
+#define mmTX_DISP_RFU12_LANE2 0x490f
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE2 0x490f
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE2 0x49af
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE2 0x9a4f
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE2 0x9aef
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE2 0x9b8f
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE2 0x9c2f
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE2 0x9ccf
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU12_LANE2 0x9d6f
+#define mmTX_DISP_RFU12_LANE3 0x491f
+#define mmDC_COMBOPHYTXREGS0_TX_DISP_RFU12_LANE3 0x491f
+#define mmDC_COMBOPHYTXREGS1_TX_DISP_RFU12_LANE3 0x49bf
+#define mmDC_COMBOPHYTXREGS2_TX_DISP_RFU12_LANE3 0x9a5f
+#define mmDC_COMBOPHYTXREGS3_TX_DISP_RFU12_LANE3 0x9aff
+#define mmDC_COMBOPHYTXREGS4_TX_DISP_RFU12_LANE3 0x9b9f
+#define mmDC_COMBOPHYTXREGS5_TX_DISP_RFU12_LANE3 0x9c3f
+#define mmDC_COMBOPHYTXREGS6_TX_DISP_RFU12_LANE3 0x9cdf
+#define mmDC_COMBOPHYTXREGS7_TX_DISP_RFU12_LANE3 0x9d7f
+#define mmCOMMON_MAR_DEEMPH_NOM 0x48c3
+#define mmDC_COMBOPHYCMREGS0_COMMON_MAR_DEEMPH_NOM 0x48c3
+#define mmDC_COMBOPHYCMREGS1_COMMON_MAR_DEEMPH_NOM 0x4963
+#define mmDC_COMBOPHYCMREGS2_COMMON_MAR_DEEMPH_NOM 0x9a03
+#define mmDC_COMBOPHYCMREGS3_COMMON_MAR_DEEMPH_NOM 0x9aa3
+#define mmDC_COMBOPHYCMREGS4_COMMON_MAR_DEEMPH_NOM 0x9b43
+#define mmDC_COMBOPHYCMREGS5_COMMON_MAR_DEEMPH_NOM 0x9be3
+#define mmDC_COMBOPHYCMREGS6_COMMON_MAR_DEEMPH_NOM 0x9c83
+#define mmDC_COMBOPHYCMREGS7_COMMON_MAR_DEEMPH_NOM 0x9d23
+#define mmCOMMON_LANE_PWRMGMT 0x48c4
+#define mmDC_COMBOPHYCMREGS0_COMMON_LANE_PWRMGMT 0x48c4
+#define mmDC_COMBOPHYCMREGS1_COMMON_LANE_PWRMGMT 0x4964
+#define mmDC_COMBOPHYCMREGS2_COMMON_LANE_PWRMGMT 0x9a04
+#define mmDC_COMBOPHYCMREGS3_COMMON_LANE_PWRMGMT 0x9aa4
+#define mmDC_COMBOPHYCMREGS4_COMMON_LANE_PWRMGMT 0x9b44
+#define mmDC_COMBOPHYCMREGS5_COMMON_LANE_PWRMGMT 0x9be4
+#define mmDC_COMBOPHYCMREGS6_COMMON_LANE_PWRMGMT 0x9c84
+#define mmDC_COMBOPHYCMREGS7_COMMON_LANE_PWRMGMT 0x9d24
+#define mmCOMMON_TXCNTRL 0x48c5
+#define mmDC_COMBOPHYCMREGS0_COMMON_TXCNTRL 0x48c5
+#define mmDC_COMBOPHYCMREGS1_COMMON_TXCNTRL 0x4965
+#define mmDC_COMBOPHYCMREGS2_COMMON_TXCNTRL 0x9a05
+#define mmDC_COMBOPHYCMREGS3_COMMON_TXCNTRL 0x9aa5
+#define mmDC_COMBOPHYCMREGS4_COMMON_TXCNTRL 0x9b45
+#define mmDC_COMBOPHYCMREGS5_COMMON_TXCNTRL 0x9be5
+#define mmDC_COMBOPHYCMREGS6_COMMON_TXCNTRL 0x9c85
+#define mmDC_COMBOPHYCMREGS7_COMMON_TXCNTRL 0x9d25
+#define mmCOMMON_TMDP 0x48c6
+#define mmDC_COMBOPHYCMREGS0_COMMON_TMDP 0x48c6
+#define mmDC_COMBOPHYCMREGS1_COMMON_TMDP 0x4966
+#define mmDC_COMBOPHYCMREGS2_COMMON_TMDP 0x9a06
+#define mmDC_COMBOPHYCMREGS3_COMMON_TMDP 0x9aa6
+#define mmDC_COMBOPHYCMREGS4_COMMON_TMDP 0x9b46
+#define mmDC_COMBOPHYCMREGS5_COMMON_TMDP 0x9be6
+#define mmDC_COMBOPHYCMREGS6_COMMON_TMDP 0x9c86
+#define mmDC_COMBOPHYCMREGS7_COMMON_TMDP 0x9d26
+#define mmCOMMON_LANE_RESETS 0x48c7
+#define mmDC_COMBOPHYCMREGS0_COMMON_LANE_RESETS 0x48c7
+#define mmDC_COMBOPHYCMREGS1_COMMON_LANE_RESETS 0x4967
+#define mmDC_COMBOPHYCMREGS2_COMMON_LANE_RESETS 0x9a07
+#define mmDC_COMBOPHYCMREGS3_COMMON_LANE_RESETS 0x9aa7
+#define mmDC_COMBOPHYCMREGS4_COMMON_LANE_RESETS 0x9b47
+#define mmDC_COMBOPHYCMREGS5_COMMON_LANE_RESETS 0x9be7
+#define mmDC_COMBOPHYCMREGS6_COMMON_LANE_RESETS 0x9c87
+#define mmDC_COMBOPHYCMREGS7_COMMON_LANE_RESETS 0x9d27
+#define mmCOMMON_ZCALCODE_CTRL 0x48c8
+#define mmDC_COMBOPHYCMREGS0_COMMON_ZCALCODE_CTRL 0x48c8
+#define mmDC_COMBOPHYCMREGS1_COMMON_ZCALCODE_CTRL 0x4968
+#define mmDC_COMBOPHYCMREGS2_COMMON_ZCALCODE_CTRL 0x9a08
+#define mmDC_COMBOPHYCMREGS3_COMMON_ZCALCODE_CTRL 0x9aa8
+#define mmDC_COMBOPHYCMREGS4_COMMON_ZCALCODE_CTRL 0x9b48
+#define mmDC_COMBOPHYCMREGS5_COMMON_ZCALCODE_CTRL 0x9be8
+#define mmDC_COMBOPHYCMREGS6_COMMON_ZCALCODE_CTRL 0x9c88
+#define mmDC_COMBOPHYCMREGS7_COMMON_ZCALCODE_CTRL 0x9d28
+#define mmCOMMON_DISP_RFU1 0x48c9
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU1 0x48c9
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU1 0x4969
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU1 0x9a09
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU1 0x9aa9
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU1 0x9b49
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU1 0x9be9
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU1 0x9c89
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU1 0x9d29
+#define mmCOMMON_DISP_RFU2 0x48ca
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU2 0x48ca
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU2 0x496a
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU2 0x9a0a
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU2 0x9aaa
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU2 0x9b4a
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU2 0x9bea
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU2 0x9c8a
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU2 0x9d2a
+#define mmCOMMON_DISP_RFU3 0x48cb
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU3 0x48cb
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU3 0x496b
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU3 0x9a0b
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU3 0x9aab
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU3 0x9b4b
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU3 0x9beb
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU3 0x9c8b
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU3 0x9d2b
+#define mmCOMMON_DISP_RFU4 0x48cc
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU4 0x48cc
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU4 0x496c
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU4 0x9a0c
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU4 0x9aac
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU4 0x9b4c
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU4 0x9bec
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU4 0x9c8c
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU4 0x9d2c
+#define mmCOMMON_DISP_RFU5 0x48cd
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU5 0x48cd
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU5 0x496d
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU5 0x9a0d
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU5 0x9aad
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU5 0x9b4d
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU5 0x9bed
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU5 0x9c8d
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU5 0x9d2d
+#define mmCOMMON_DISP_RFU6 0x48ce
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU6 0x48ce
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU6 0x496e
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU6 0x9a0e
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU6 0x9aae
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU6 0x9b4e
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU6 0x9bee
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU6 0x9c8e
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU6 0x9d2e
+#define mmCOMMON_DISP_RFU7 0x48cf
+#define mmDC_COMBOPHYCMREGS0_COMMON_DISP_RFU7 0x48cf
+#define mmDC_COMBOPHYCMREGS1_COMMON_DISP_RFU7 0x496f
+#define mmDC_COMBOPHYCMREGS2_COMMON_DISP_RFU7 0x9a0f
+#define mmDC_COMBOPHYCMREGS3_COMMON_DISP_RFU7 0x9aaf
+#define mmDC_COMBOPHYCMREGS4_COMMON_DISP_RFU7 0x9b4f
+#define mmDC_COMBOPHYCMREGS5_COMMON_DISP_RFU7 0x9bef
+#define mmDC_COMBOPHYCMREGS6_COMMON_DISP_RFU7 0x9c8f
+#define mmDC_COMBOPHYCMREGS7_COMMON_DISP_RFU7 0x9d2f
+#define mmFREQ_CTRL0 0x4920
+#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL0 0x4920
+#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL0 0x49c0
+#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL0 0x9a60
+#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL0 0x9b00
+#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL0 0x9ba0
+#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL0 0x9c40
+#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL0 0x9ce0
+#define mmDC_COMBOPHYPLLREGS7_FREQ_CTRL0 0x9d80
+#define mmFREQ_CTRL1 0x4921
+#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL1 0x4921
+#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL1 0x49c1
+#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL1 0x9a61
+#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL1 0x9b01
+#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL1 0x9ba1
+#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL1 0x9c41
+#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL1 0x9ce1
+#define mmDC_COMBOPHYPLLREGS7_FREQ_CTRL1 0x9d81
+#define mmFREQ_CTRL2 0x4922
+#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL2 0x4922
+#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL2 0x49c2
+#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL2 0x9a62
+#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL2 0x9b02
+#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL2 0x9ba2
+#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL2 0x9c42
+#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL2 0x9ce2
+#define mmDC_COMBOPHYPLLREGS7_FREQ_CTRL2 0x9d82
+#define mmFREQ_CTRL3 0x4923
+#define mmDC_COMBOPHYPLLREGS0_FREQ_CTRL3 0x4923
+#define mmDC_COMBOPHYPLLREGS1_FREQ_CTRL3 0x49c3
+#define mmDC_COMBOPHYPLLREGS2_FREQ_CTRL3 0x9a63
+#define mmDC_COMBOPHYPLLREGS3_FREQ_CTRL3 0x9b03
+#define mmDC_COMBOPHYPLLREGS4_FREQ_CTRL3 0x9ba3
+#define mmDC_COMBOPHYPLLREGS5_FREQ_CTRL3 0x9c43
+#define mmDC_COMBOPHYPLLREGS6_FREQ_CTRL3 0x9ce3
+#define mmDC_COMBOPHYPLLREGS7_FREQ_CTRL3 0x9d83
+#define mmBW_CTRL_COARSE 0x4924
+#define mmDC_COMBOPHYPLLREGS0_BW_CTRL_COARSE 0x4924
+#define mmDC_COMBOPHYPLLREGS1_BW_CTRL_COARSE 0x49c4
+#define mmDC_COMBOPHYPLLREGS2_BW_CTRL_COARSE 0x9a64
+#define mmDC_COMBOPHYPLLREGS3_BW_CTRL_COARSE 0x9b04
+#define mmDC_COMBOPHYPLLREGS4_BW_CTRL_COARSE 0x9ba4
+#define mmDC_COMBOPHYPLLREGS5_BW_CTRL_COARSE 0x9c44
+#define mmDC_COMBOPHYPLLREGS6_BW_CTRL_COARSE 0x9ce4
+#define mmDC_COMBOPHYPLLREGS7_BW_CTRL_COARSE 0x9d84
+#define mmBW_CTRL_FINE 0x4925
+#define mmDC_COMBOPHYPLLREGS0_BW_CTRL_FINE 0x4925
+#define mmDC_COMBOPHYPLLREGS1_BW_CTRL_FINE 0x49c5
+#define mmDC_COMBOPHYPLLREGS2_BW_CTRL_FINE 0x9a65
+#define mmDC_COMBOPHYPLLREGS3_BW_CTRL_FINE 0x9b05
+#define mmDC_COMBOPHYPLLREGS4_BW_CTRL_FINE 0x9ba5
+#define mmDC_COMBOPHYPLLREGS5_BW_CTRL_FINE 0x9c45
+#define mmDC_COMBOPHYPLLREGS6_BW_CTRL_FINE 0x9ce5
+#define mmDC_COMBOPHYPLLREGS7_BW_CTRL_FINE 0x9d85
+#define mmCAL_CTRL 0x4926
+#define mmDC_COMBOPHYPLLREGS0_CAL_CTRL 0x4926
+#define mmDC_COMBOPHYPLLREGS1_CAL_CTRL 0x49c6
+#define mmDC_COMBOPHYPLLREGS2_CAL_CTRL 0x9a66
+#define mmDC_COMBOPHYPLLREGS3_CAL_CTRL 0x9b06
+#define mmDC_COMBOPHYPLLREGS4_CAL_CTRL 0x9ba6
+#define mmDC_COMBOPHYPLLREGS5_CAL_CTRL 0x9c46
+#define mmDC_COMBOPHYPLLREGS6_CAL_CTRL 0x9ce6
+#define mmDC_COMBOPHYPLLREGS7_CAL_CTRL 0x9d86
+#define mmLOOP_CTRL 0x4927
+#define mmDC_COMBOPHYPLLREGS0_LOOP_CTRL 0x4927
+#define mmDC_COMBOPHYPLLREGS1_LOOP_CTRL 0x49c7
+#define mmDC_COMBOPHYPLLREGS2_LOOP_CTRL 0x9a67
+#define mmDC_COMBOPHYPLLREGS3_LOOP_CTRL 0x9b07
+#define mmDC_COMBOPHYPLLREGS4_LOOP_CTRL 0x9ba7
+#define mmDC_COMBOPHYPLLREGS5_LOOP_CTRL 0x9c47
+#define mmDC_COMBOPHYPLLREGS6_LOOP_CTRL 0x9ce7
+#define mmDC_COMBOPHYPLLREGS7_LOOP_CTRL 0x9d87
+#define mmDEBUG0 0x4928
+#define mmDC_COMBOPHYPLLREGS0_DEBUG0 0x4928
+#define mmDC_COMBOPHYPLLREGS1_DEBUG0 0x49c8
+#define mmDC_COMBOPHYPLLREGS2_DEBUG0 0x9a68
+#define mmDC_COMBOPHYPLLREGS3_DEBUG0 0x9b08
+#define mmDC_COMBOPHYPLLREGS4_DEBUG0 0x9ba8
+#define mmDC_COMBOPHYPLLREGS5_DEBUG0 0x9c48
+#define mmDC_COMBOPHYPLLREGS6_DEBUG0 0x9ce8
+#define mmDC_COMBOPHYPLLREGS7_DEBUG0 0x9d88
+#define mmVREG_CFG 0x4929
+#define mmDC_COMBOPHYPLLREGS0_VREG_CFG 0x4929
+#define mmDC_COMBOPHYPLLREGS1_VREG_CFG 0x49c9
+#define mmDC_COMBOPHYPLLREGS2_VREG_CFG 0x9a69
+#define mmDC_COMBOPHYPLLREGS3_VREG_CFG 0x9b09
+#define mmDC_COMBOPHYPLLREGS4_VREG_CFG 0x9ba9
+#define mmDC_COMBOPHYPLLREGS5_VREG_CFG 0x9c49
+#define mmDC_COMBOPHYPLLREGS6_VREG_CFG 0x9ce9
+#define mmDC_COMBOPHYPLLREGS7_VREG_CFG 0x9d89
+#define mmOBSERVE0 0x492a
+#define mmDC_COMBOPHYPLLREGS0_OBSERVE0 0x492a
+#define mmDC_COMBOPHYPLLREGS1_OBSERVE0 0x49ca
+#define mmDC_COMBOPHYPLLREGS2_OBSERVE0 0x9a6a
+#define mmDC_COMBOPHYPLLREGS3_OBSERVE0 0x9b0a
+#define mmDC_COMBOPHYPLLREGS4_OBSERVE0 0x9baa
+#define mmDC_COMBOPHYPLLREGS5_OBSERVE0 0x9c4a
+#define mmDC_COMBOPHYPLLREGS6_OBSERVE0 0x9cea
+#define mmDC_COMBOPHYPLLREGS7_OBSERVE0 0x9d8a
+#define mmOBSERVE1 0x492b
+#define mmDC_COMBOPHYPLLREGS0_OBSERVE1 0x492b
+#define mmDC_COMBOPHYPLLREGS1_OBSERVE1 0x49cb
+#define mmDC_COMBOPHYPLLREGS2_OBSERVE1 0x9a6b
+#define mmDC_COMBOPHYPLLREGS3_OBSERVE1 0x9b0b
+#define mmDC_COMBOPHYPLLREGS4_OBSERVE1 0x9bab
+#define mmDC_COMBOPHYPLLREGS5_OBSERVE1 0x9c4b
+#define mmDC_COMBOPHYPLLREGS6_OBSERVE1 0x9ceb
+#define mmDC_COMBOPHYPLLREGS7_OBSERVE1 0x9d8b
+#define mmDFT_OUT 0x492c
+#define mmDC_COMBOPHYPLLREGS0_DFT_OUT 0x492c
+#define mmDC_COMBOPHYPLLREGS1_DFT_OUT 0x49cc
+#define mmDC_COMBOPHYPLLREGS2_DFT_OUT 0x9a6c
+#define mmDC_COMBOPHYPLLREGS3_DFT_OUT 0x9b0c
+#define mmDC_COMBOPHYPLLREGS4_DFT_OUT 0x9bac
+#define mmDC_COMBOPHYPLLREGS5_DFT_OUT 0x9c4c
+#define mmDC_COMBOPHYPLLREGS6_DFT_OUT 0x9cec
+#define mmDC_COMBOPHYPLLREGS7_DFT_OUT 0x9d8c
+#define mmPLL_WRAP_CNTRL1 0x495e
+#define mmDC_COMBOPHYPLLREGS0_PLL_WRAP_CNTRL1 0x495e
+#define mmDC_COMBOPHYPLLREGS1_PLL_WRAP_CNTRL1 0x49fe
+#define mmDC_COMBOPHYPLLREGS2_PLL_WRAP_CNTRL1 0x9a9e
+#define mmDC_COMBOPHYPLLREGS3_PLL_WRAP_CNTRL1 0x9b3e
+#define mmDC_COMBOPHYPLLREGS4_PLL_WRAP_CNTRL1 0x9bde
+#define mmDC_COMBOPHYPLLREGS5_PLL_WRAP_CNTRL1 0x9c7e
+#define mmDC_COMBOPHYPLLREGS6_PLL_WRAP_CNTRL1 0x9d1e
+#define mmDC_COMBOPHYPLLREGS7_PLL_WRAP_CNTRL1 0x9dbe
+#define mmPLL_WRAP_CNTRL 0x495f
+#define mmDC_COMBOPHYPLLREGS0_PLL_WRAP_CNTRL 0x495f
+#define mmDC_COMBOPHYPLLREGS1_PLL_WRAP_CNTRL 0x49ff
+#define mmDC_COMBOPHYPLLREGS2_PLL_WRAP_CNTRL 0x9a9f
+#define mmDC_COMBOPHYPLLREGS3_PLL_WRAP_CNTRL 0x9b3f
+#define mmDC_COMBOPHYPLLREGS4_PLL_WRAP_CNTRL 0x9bdf
+#define mmDC_COMBOPHYPLLREGS5_PLL_WRAP_CNTRL 0x9c7f
+#define mmDC_COMBOPHYPLLREGS6_PLL_WRAP_CNTRL 0x9d1f
+#define mmDC_COMBOPHYPLLREGS7_PLL_WRAP_CNTRL 0x9dbf
+#define mmPPLL_VREG_CFG 0x1700
+#define mmDC_DISPLAYPLLREGS0_PPLL_VREG_CFG 0x1700
+#define mmDC_DISPLAYPLLREGS1_PPLL_VREG_CFG 0x172a
+#define mmDC_DISPLAYPLLREGS2_PPLL_VREG_CFG 0x1754
+#define mmPPLL_MODE_CNTL 0x1701
+#define mmDC_DISPLAYPLLREGS0_PPLL_MODE_CNTL 0x1701
+#define mmDC_DISPLAYPLLREGS1_PPLL_MODE_CNTL 0x172b
+#define mmDC_DISPLAYPLLREGS2_PPLL_MODE_CNTL 0x1755
+#define mmPPLL_FREQ_CTRL0 0x1702
+#define mmDC_DISPLAYPLLREGS0_PPLL_FREQ_CTRL0 0x1702
+#define mmDC_DISPLAYPLLREGS1_PPLL_FREQ_CTRL0 0x172c
+#define mmDC_DISPLAYPLLREGS2_PPLL_FREQ_CTRL0 0x1756
+#define mmPPLL_FREQ_CTRL1 0x1703
+#define mmDC_DISPLAYPLLREGS0_PPLL_FREQ_CTRL1 0x1703
+#define mmDC_DISPLAYPLLREGS1_PPLL_FREQ_CTRL1 0x172d
+#define mmDC_DISPLAYPLLREGS2_PPLL_FREQ_CTRL1 0x1757
+#define mmPPLL_FREQ_CTRL2 0x1704
+#define mmDC_DISPLAYPLLREGS0_PPLL_FREQ_CTRL2 0x1704
+#define mmDC_DISPLAYPLLREGS1_PPLL_FREQ_CTRL2 0x172e
+#define mmDC_DISPLAYPLLREGS2_PPLL_FREQ_CTRL2 0x1758
+#define mmPPLL_FREQ_CTRL3 0x1705
+#define mmDC_DISPLAYPLLREGS0_PPLL_FREQ_CTRL3 0x1705
+#define mmDC_DISPLAYPLLREGS1_PPLL_FREQ_CTRL3 0x172f
+#define mmDC_DISPLAYPLLREGS2_PPLL_FREQ_CTRL3 0x1759
+#define mmPPLL_BW_CTRL_COARSE 0x1706
+#define mmDC_DISPLAYPLLREGS0_PPLL_BW_CTRL_COARSE 0x1706
+#define mmDC_DISPLAYPLLREGS1_PPLL_BW_CTRL_COARSE 0x1730
+#define mmDC_DISPLAYPLLREGS2_PPLL_BW_CTRL_COARSE 0x175a
+#define mmPPLL_BW_CTRL_FINE 0x1708
+#define mmDC_DISPLAYPLLREGS0_PPLL_BW_CTRL_FINE 0x1708
+#define mmDC_DISPLAYPLLREGS1_PPLL_BW_CTRL_FINE 0x1732
+#define mmDC_DISPLAYPLLREGS2_PPLL_BW_CTRL_FINE 0x175c
+#define mmPPLL_CAL_CTRL 0x1709
+#define mmDC_DISPLAYPLLREGS0_PPLL_CAL_CTRL 0x1709
+#define mmDC_DISPLAYPLLREGS1_PPLL_CAL_CTRL 0x1733
+#define mmDC_DISPLAYPLLREGS2_PPLL_CAL_CTRL 0x175d
+#define mmPPLL_LOOP_CTRL 0x170a
+#define mmDC_DISPLAYPLLREGS0_PPLL_LOOP_CTRL 0x170a
+#define mmDC_DISPLAYPLLREGS1_PPLL_LOOP_CTRL 0x1734
+#define mmDC_DISPLAYPLLREGS2_PPLL_LOOP_CTRL 0x175e
+#define mmPPLL_REFCLK_CNTL 0x1718
+#define mmDC_DISPLAYPLLREGS0_PPLL_REFCLK_CNTL 0x1718
+#define mmDC_DISPLAYPLLREGS1_PPLL_REFCLK_CNTL 0x1742
+#define mmDC_DISPLAYPLLREGS2_PPLL_REFCLK_CNTL 0x176c
+#define mmPPLL_CLKOUT_CNTL 0x1719
+#define mmDC_DISPLAYPLLREGS0_PPLL_CLKOUT_CNTL 0x1719
+#define mmDC_DISPLAYPLLREGS1_PPLL_CLKOUT_CNTL 0x1743
+#define mmDC_DISPLAYPLLREGS2_PPLL_CLKOUT_CNTL 0x176d
+#define mmPPLL_DFT_CNTL 0x171a
+#define mmDC_DISPLAYPLLREGS0_PPLL_DFT_CNTL 0x171a
+#define mmDC_DISPLAYPLLREGS1_PPLL_DFT_CNTL 0x1744
+#define mmDC_DISPLAYPLLREGS2_PPLL_DFT_CNTL 0x176e
+#define mmPPLL_ANALOG_CNTL 0x171b
+#define mmDC_DISPLAYPLLREGS0_PPLL_ANALOG_CNTL 0x171b
+#define mmDC_DISPLAYPLLREGS1_PPLL_ANALOG_CNTL 0x1745
+#define mmDC_DISPLAYPLLREGS2_PPLL_ANALOG_CNTL 0x176f
+#define mmPPLL_POSTDIV 0x171c
+#define mmDC_DISPLAYPLLREGS0_PPLL_POSTDIV 0x171c
+#define mmDC_DISPLAYPLLREGS1_PPLL_POSTDIV 0x1746
+#define mmDC_DISPLAYPLLREGS2_PPLL_POSTDIV 0x1770
+#define mmPPLL_DEBUG0 0x1720
+#define mmDC_DISPLAYPLLREGS0_PPLL_DEBUG0 0x1720
+#define mmDC_DISPLAYPLLREGS1_PPLL_DEBUG0 0x174a
+#define mmDC_DISPLAYPLLREGS2_PPLL_DEBUG0 0x1774
+#define mmPPLL_OBSERVE0 0x1721
+#define mmDC_DISPLAYPLLREGS0_PPLL_OBSERVE0 0x1721
+#define mmDC_DISPLAYPLLREGS1_PPLL_OBSERVE0 0x174b
+#define mmDC_DISPLAYPLLREGS2_PPLL_OBSERVE0 0x1775
+#define mmPPLL_OBSERVE1 0x1722
+#define mmDC_DISPLAYPLLREGS0_PPLL_OBSERVE1 0x1722
+#define mmDC_DISPLAYPLLREGS1_PPLL_OBSERVE1 0x174c
+#define mmDC_DISPLAYPLLREGS2_PPLL_OBSERVE1 0x1776
+#define mmPPLL_UPDATE_CNTL 0x1724
+#define mmDC_DISPLAYPLLREGS0_PPLL_UPDATE_CNTL 0x1724
+#define mmDC_DISPLAYPLLREGS1_PPLL_UPDATE_CNTL 0x174e
+#define mmDC_DISPLAYPLLREGS2_PPLL_UPDATE_CNTL 0x1778
+#define mmPPLL_OBSERVE0_OUT 0x1725
+#define mmDC_DISPLAYPLLREGS0_PPLL_OBSERVE0_OUT 0x1725
+#define mmDC_DISPLAYPLLREGS1_PPLL_OBSERVE0_OUT 0x174f
+#define mmDC_DISPLAYPLLREGS2_PPLL_OBSERVE0_OUT 0x1779
+#define mmPPLL_STATUS_DEBUG1 0x1726
+#define mmDC_DISPLAYPLLREGS0_PPLL_STATUS_DEBUG1 0x1726
+#define mmDC_DISPLAYPLLREGS1_PPLL_STATUS_DEBUG1 0x1750
+#define mmDC_DISPLAYPLLREGS2_PPLL_STATUS_DEBUG1 0x177a
+#define mmPPLL_DEBUG_MUX_CNTL 0x1727
+#define mmDC_DISPLAYPLLREGS0_PPLL_DEBUG_MUX_CNTL 0x1727
+#define mmDC_DISPLAYPLLREGS1_PPLL_DEBUG_MUX_CNTL 0x1751
+#define mmDC_DISPLAYPLLREGS2_PPLL_DEBUG_MUX_CNTL 0x177b
+#define mmPPLL_DIV_UPDATE_DEBUG 0x1728
+#define mmDC_DISPLAYPLLREGS0_PPLL_DIV_UPDATE_DEBUG 0x1728
+#define mmDC_DISPLAYPLLREGS1_PPLL_DIV_UPDATE_DEBUG 0x1752
+#define mmDC_DISPLAYPLLREGS2_PPLL_DIV_UPDATE_DEBUG 0x177c
+#define mmPPLL_STATUS_DEBUG0 0x1729
+#define mmDC_DISPLAYPLLREGS0_PPLL_STATUS_DEBUG0 0x1729
+#define mmDC_DISPLAYPLLREGS1_PPLL_STATUS_DEBUG0 0x1753
+#define mmDC_DISPLAYPLLREGS2_PPLL_STATUS_DEBUG0 0x177d
+#define mmCOMP_EN_CTL 0x9dc0
+#define mmDPCSTX_PHY_CNTL 0x48d0
+#define mmDPCSTX0_DPCSTX_PHY_CNTL 0x48d0
+#define mmDPCSTX1_DPCSTX_PHY_CNTL 0x4970
+#define mmDPCSTX2_DPCSTX_PHY_CNTL 0x9a10
+#define mmDPCSTX3_DPCSTX_PHY_CNTL 0x9ab0
+#define mmDPCSTX4_DPCSTX_PHY_CNTL 0x9b50
+#define mmDPCSTX5_DPCSTX_PHY_CNTL 0x9bf0
+#define mmDPCSTX6_DPCSTX_PHY_CNTL 0x9c90
+#define mmDPCSTX7_DPCSTX_PHY_CNTL 0x9d30
+#define mmDPCSTX_TX_CLOCK_CNTL 0x48d1
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x48d1
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x4971
+#define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL 0x9a11
+#define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL 0x9ab1
+#define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL 0x9b51
+#define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL 0x9bf1
+#define mmDPCSTX6_DPCSTX_TX_CLOCK_CNTL 0x9c91
+#define mmDPCSTX7_DPCSTX_TX_CLOCK_CNTL 0x9d31
+#define mmDPCSTX_TX_CNTL 0x48d3
+#define mmDPCSTX0_DPCSTX_TX_CNTL 0x48d3
+#define mmDPCSTX1_DPCSTX_TX_CNTL 0x4973
+#define mmDPCSTX2_DPCSTX_TX_CNTL 0x9a13
+#define mmDPCSTX3_DPCSTX_TX_CNTL 0x9ab3
+#define mmDPCSTX4_DPCSTX_TX_CNTL 0x9b53
+#define mmDPCSTX5_DPCSTX_TX_CNTL 0x9bf3
+#define mmDPCSTX6_DPCSTX_TX_CNTL 0x9c93
+#define mmDPCSTX7_DPCSTX_TX_CNTL 0x9d33
+#define mmDPCSTX_CBUS_CNTL 0x48d5
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x48d5
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x4975
+#define mmDPCSTX2_DPCSTX_CBUS_CNTL 0x9a15
+#define mmDPCSTX3_DPCSTX_CBUS_CNTL 0x9ab5
+#define mmDPCSTX4_DPCSTX_CBUS_CNTL 0x9b55
+#define mmDPCSTX5_DPCSTX_CBUS_CNTL 0x9bf5
+#define mmDPCSTX6_DPCSTX_CBUS_CNTL 0x9c95
+#define mmDPCSTX7_DPCSTX_CBUS_CNTL 0x9d35
+#define mmDPCSTX_REG_ERROR_STATUS 0x48d6
+#define mmDPCSTX0_DPCSTX_REG_ERROR_STATUS 0x48d6
+#define mmDPCSTX1_DPCSTX_REG_ERROR_STATUS 0x4976
+#define mmDPCSTX2_DPCSTX_REG_ERROR_STATUS 0x9a16
+#define mmDPCSTX3_DPCSTX_REG_ERROR_STATUS 0x9ab6
+#define mmDPCSTX4_DPCSTX_REG_ERROR_STATUS 0x9b56
+#define mmDPCSTX5_DPCSTX_REG_ERROR_STATUS 0x9bf6
+#define mmDPCSTX6_DPCSTX_REG_ERROR_STATUS 0x9c96
+#define mmDPCSTX7_DPCSTX_REG_ERROR_STATUS 0x9d36
+#define mmDPCSTX_TX_ERROR_STATUS 0x48d7
+#define mmDPCSTX0_DPCSTX_TX_ERROR_STATUS 0x48d7
+#define mmDPCSTX1_DPCSTX_TX_ERROR_STATUS 0x4977
+#define mmDPCSTX2_DPCSTX_TX_ERROR_STATUS 0x9a17
+#define mmDPCSTX3_DPCSTX_TX_ERROR_STATUS 0x9ab7
+#define mmDPCSTX4_DPCSTX_TX_ERROR_STATUS 0x9b57
+#define mmDPCSTX5_DPCSTX_TX_ERROR_STATUS 0x9bf7
+#define mmDPCSTX6_DPCSTX_TX_ERROR_STATUS 0x9c97
+#define mmDPCSTX7_DPCSTX_TX_ERROR_STATUS 0x9d37
+#define mmDPCSTX_PLL_UPDATE_ADDR 0x48d8
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x48d8
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x4978
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR 0x9a18
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR 0x9ab8
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR 0x9b58
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR 0x9bf8
+#define mmDPCSTX6_DPCSTX_PLL_UPDATE_ADDR 0x9c98
+#define mmDPCSTX7_DPCSTX_PLL_UPDATE_ADDR 0x9d38
+#define mmDPCSTX_PLL_UPDATE_DATA 0x48d9
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x48d9
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x4979
+#define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x9a19
+#define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x9ab9
+#define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x9b59
+#define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x9bf9
+#define mmDPCSTX6_DPCSTX_PLL_UPDATE_DATA 0x9c99
+#define mmDPCSTX7_DPCSTX_PLL_UPDATE_DATA 0x9d39
+#define mmDPCSTX_INDEX_MODE_ADDR 0x48da
+#define mmDPCSTX0_DPCSTX_INDEX_MODE_ADDR 0x48da
+#define mmDPCSTX1_DPCSTX_INDEX_MODE_ADDR 0x497a
+#define mmDPCSTX2_DPCSTX_INDEX_MODE_ADDR 0x9a1a
+#define mmDPCSTX3_DPCSTX_INDEX_MODE_ADDR 0x9aba
+#define mmDPCSTX4_DPCSTX_INDEX_MODE_ADDR 0x9b5a
+#define mmDPCSTX5_DPCSTX_INDEX_MODE_ADDR 0x9bfa
+#define mmDPCSTX6_DPCSTX_INDEX_MODE_ADDR 0x9c9a
+#define mmDPCSTX7_DPCSTX_INDEX_MODE_ADDR 0x9d3a
+#define mmDPCSTX_INDEX_MODE_DATA 0x48db
+#define mmDPCSTX0_DPCSTX_INDEX_MODE_DATA 0x48db
+#define mmDPCSTX1_DPCSTX_INDEX_MODE_DATA 0x497b
+#define mmDPCSTX2_DPCSTX_INDEX_MODE_DATA 0x9a1b
+#define mmDPCSTX3_DPCSTX_INDEX_MODE_DATA 0x9abb
+#define mmDPCSTX4_DPCSTX_INDEX_MODE_DATA 0x9b5b
+#define mmDPCSTX5_DPCSTX_INDEX_MODE_DATA 0x9bfb
+#define mmDPCSTX6_DPCSTX_INDEX_MODE_DATA 0x9c9b
+#define mmDPCSTX7_DPCSTX_INDEX_MODE_DATA 0x9d3b
+#define mmDPCSTX_DEBUG_CONFIG 0x48dc
+#define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x48dc
+#define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x497c
+#define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x9a1c
+#define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x9abc
+#define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x9b5c
+#define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x9bfc
+#define mmDPCSTX6_DPCSTX_DEBUG_CONFIG 0x9c9c
+#define mmDPCSTX7_DPCSTX_DEBUG_CONFIG 0x9d3c
+#define mmDPCSTX_TEST_DEBUG_DATA 0x48dd
+#define mmDPCSTX0_DPCSTX_TEST_DEBUG_DATA 0x48dd
+#define mmDPCSTX1_DPCSTX_TEST_DEBUG_DATA 0x497d
+#define mmDPCSTX2_DPCSTX_TEST_DEBUG_DATA 0x9a1d
+#define mmDPCSTX3_DPCSTX_TEST_DEBUG_DATA 0x9abd
+#define mmDPCSTX4_DPCSTX_TEST_DEBUG_DATA 0x9b5d
+#define mmDPCSTX5_DPCSTX_TEST_DEBUG_DATA 0x9bfd
+#define mmDPCSTX6_DPCSTX_TEST_DEBUG_DATA 0x9c9d
+#define mmDPCSTX7_DPCSTX_TEST_DEBUG_DATA 0x9d3d
+
+#endif /* DCE_11_2_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_enum.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_enum.h
new file mode 100644
index 000000000..b2ea4202d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_enum.h
@@ -0,0 +1,6813 @@
+/*
+ * DCE_11_2 Register documentation
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_11_2_ENUM_H
+#define DCE_11_2_ENUM_H
+
+typedef enum CRTC_CONTROL_CRTC_START_POINT_CNTL {
+ CRTC_CONTROL_CRTC_START_POINT_CNTL_NORMAL = 0x0,
+ CRTC_CONTROL_CRTC_START_POINT_CNTL_DP = 0x1,
+} CRTC_CONTROL_CRTC_START_POINT_CNTL;
+typedef enum CRTC_CONTROL_CRTC_FIELD_NUMBER_CNTL {
+ CRTC_CONTROL_CRTC_FIELD_NUMBER_CNTL_NORMAL = 0x0,
+ CRTC_CONTROL_CRTC_FIELD_NUMBER_CNTL_DP = 0x1,
+} CRTC_CONTROL_CRTC_FIELD_NUMBER_CNTL;
+typedef enum CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL {
+ CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL_DISABLE = 0x0,
+ CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL_DISABLE_CURRENT= 0x1,
+ CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL_RESERVED = 0x2,
+ CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL_DISABLE_FIRST= 0x3,
+} CRTC_CONTROL_CRTC_DISABLE_POINT_CNTL;
+typedef enum CRTC_CONTROL_CRTC_FIELD_NUMBER_POLARITY {
+ CRTC_CONTROL_CRTC_FIELD_NUMBER_POLARITY_FALSE = 0x0,
+ CRTC_CONTROL_CRTC_FIELD_NUMBER_POLARITY_TRUE = 0x1,
+} CRTC_CONTROL_CRTC_FIELD_NUMBER_POLARITY;
+typedef enum CRTC_CONTROL_CRTC_DISP_READ_REQUEST_DISABLE {
+ CRTC_CONTROL_CRTC_DISP_READ_REQUEST_DISABLE_FALSE= 0x0,
+ CRTC_CONTROL_CRTC_DISP_READ_REQUEST_DISABLE_TRUE = 0x1,
+} CRTC_CONTROL_CRTC_DISP_READ_REQUEST_DISABLE;
+typedef enum CRTC_CONTROL_CRTC_SOF_PULL_EN {
+ CRTC_CONTROL_CRTC_SOF_PULL_EN_FALSE = 0x0,
+ CRTC_CONTROL_CRTC_SOF_PULL_EN_TRUE = 0x1,
+} CRTC_CONTROL_CRTC_SOF_PULL_EN;
+typedef enum CRTC_H_SYNC_B_CNTL_CRTC_H_SYNC_B_POL {
+ CRTC_H_SYNC_B_CNTL_CRTC_H_SYNC_B_POL_FALSE = 0x0,
+ CRTC_H_SYNC_B_CNTL_CRTC_H_SYNC_B_POL_TRUE = 0x1,
+} CRTC_H_SYNC_B_CNTL_CRTC_H_SYNC_B_POL;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MAX_SEL {
+ CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MAX_SEL_FALSE = 0x0,
+ CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MAX_SEL_TRUE = 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MAX_SEL;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MIN_SEL {
+ CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MIN_SEL_FALSE = 0x0,
+ CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MIN_SEL_TRUE = 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_V_TOTAL_MIN_SEL;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_EN {
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_EN_FALSE= 0x0,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_EN_TRUE= 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_EN;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_TO_MASTER_VSYNC {
+ CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_TO_MASTER_VSYNC_DISABLE= 0x0,
+ CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_TO_MASTER_VSYNC_ENABLE= 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_TO_MASTER_VSYNC;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_ON_EVENT {
+ CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_ON_EVENT_DISABLE= 0x0,
+ CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_ON_EVENT_ENABLE= 0x1,
+} CRTC_V_TOTAL_CONTROL_CRTC_FORCE_LOCK_ON_EVENT;
+typedef enum CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK {
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_FRAME_START= 0x0,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_CRTC_TRIG_A= 0x1,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_CRTC_TRIG_B= 0x2,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_CURSOR_CHANGE= 0x3,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_OTHER_CLIENT= 0x4,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_MC_DC_REGION0= 0x5,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_MC_DC_REGION1= 0x6,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_MC_DC_REGION2= 0x7,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_MC_DC_REGION3= 0x8,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_GRAPHIC_UPDATE_PENDING= 0x9,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_RESERVED2= 0xa,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_INVALID= 0xb,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_DOUBLE_BUFFER= 0xc,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_D1CRTC_VERT_COUNT_NOM= 0xd,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_D1CRTC_VERT_COUNT= 0xe,
+ CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK_RESERVED= 0xf,
+} CRTC_V_TOTAL_CONTROL_CRTC_SET_V_TOTAL_MIN_MASK;
+typedef enum CRTC_V_TOTAL_INT_STATUS_CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK {
+ CRTC_V_TOTAL_INT_STATUS_CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_FALSE= 0x0,
+ CRTC_V_TOTAL_INT_STATUS_CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_TRUE= 0x1,
+} CRTC_V_TOTAL_INT_STATUS_CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK;
+typedef enum CRTC_VSYNC_NOM_INT_STATUS_CRTC_VSYNC_NOM_INT_CLEAR {
+ CRTC_VSYNC_NOM_INT_STATUS_CRTC_VSYNC_NOM_INT_CLEAR_FALSE= 0x0,
+ CRTC_VSYNC_NOM_INT_STATUS_CRTC_VSYNC_NOM_INT_CLEAR_TRUE= 0x1,
+} CRTC_VSYNC_NOM_INT_STATUS_CRTC_VSYNC_NOM_INT_CLEAR;
+typedef enum CRTC_V_SYNC_B_CNTL_CRTC_V_SYNC_B_POL {
+ CRTC_V_SYNC_B_CNTL_CRTC_V_SYNC_B_POL_FALSE = 0x0,
+ CRTC_V_SYNC_B_CNTL_CRTC_V_SYNC_B_POL_TRUE = 0x1,
+} CRTC_V_SYNC_B_CNTL_CRTC_V_SYNC_B_POL;
+typedef enum CRTC_DTMTEST_CNTL_CRTC_DTMTEST_CRTC_EN {
+ CRTC_DTMTEST_CNTL_CRTC_DTMTEST_CRTC_EN_FALSE = 0x0,
+ CRTC_DTMTEST_CNTL_CRTC_DTMTEST_CRTC_EN_TRUE = 0x1,
+} CRTC_DTMTEST_CNTL_CRTC_DTMTEST_CRTC_EN;
+typedef enum CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT {
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_VSYNCA_OTHER= 0x1,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HSYNCA_OTHER= 0x2,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICF= 0x5,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICE= 0x6,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_VSYNCA = 0x7,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HSYNCA = 0x8,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_VSYNCB = 0x9,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HSYNCB = 0xa,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HPD1 = 0xb,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_HPD2 = 0xc,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICD= 0xd,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICC= 0xe,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IGSL0 = 0x10,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IGSL1 = 0x11,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IGSL2 = 0x12,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IBLON = 0x13,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICA= 0x14,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_GENERICB= 0x15,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_IGSL_ALLOW= 0x16,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT_MANUAL_FLOW= 0x17,
+} CRTC_TRIGA_CNTL_CRTC_TRIGA_SOURCE_SELECT;
+typedef enum CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT {
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_INTERLACE= 0x1,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_GENERICA= 0x2,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_GENERICB= 0x3,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_HSYNCA= 0x4,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_HSYNCB= 0x5,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_VIDEO = 0x6,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT_GENERICC= 0x7,
+} CRTC_TRIGA_CNTL_CRTC_TRIGA_POLARITY_SELECT;
+typedef enum CRTC_TRIGA_CNTL_CRTC_TRIGA_RESYNC_BYPASS_EN {
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_RESYNC_BYPASS_EN_FALSE= 0x0,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_RESYNC_BYPASS_EN_TRUE = 0x1,
+} CRTC_TRIGA_CNTL_CRTC_TRIGA_RESYNC_BYPASS_EN;
+typedef enum CRTC_TRIGA_CNTL_CRTC_TRIGA_CLEAR {
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_CLEAR_FALSE = 0x0,
+ CRTC_TRIGA_CNTL_CRTC_TRIGA_CLEAR_TRUE = 0x1,
+} CRTC_TRIGA_CNTL_CRTC_TRIGA_CLEAR;
+typedef enum CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT {
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_VSYNCA_OTHER= 0x1,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HSYNCA_OTHER= 0x2,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICF= 0x5,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICE= 0x6,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_VSYNCA = 0x7,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HSYNCA = 0x8,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_VSYNCB = 0x9,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HSYNCB = 0xa,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HPD1 = 0xb,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_HPD2 = 0xc,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICD= 0xd,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICC= 0xe,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IGSL0 = 0x10,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IGSL1 = 0x11,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IGSL2 = 0x12,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IBLON = 0x13,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICA= 0x14,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_GENERICB= 0x15,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_IGSL_ALLOW= 0x16,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT_MANUAL_FLOW= 0x17,
+} CRTC_TRIGB_CNTL_CRTC_TRIGB_SOURCE_SELECT;
+typedef enum CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT {
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_INTERLACE= 0x1,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_GENERICA= 0x2,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_GENERICB= 0x3,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_HSYNCA= 0x4,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_HSYNCB= 0x5,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_VIDEO = 0x6,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT_GENERICC= 0x7,
+} CRTC_TRIGB_CNTL_CRTC_TRIGB_POLARITY_SELECT;
+typedef enum CRTC_TRIGB_CNTL_CRTC_TRIGB_RESYNC_BYPASS_EN {
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_RESYNC_BYPASS_EN_FALSE= 0x0,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_RESYNC_BYPASS_EN_TRUE = 0x1,
+} CRTC_TRIGB_CNTL_CRTC_TRIGB_RESYNC_BYPASS_EN;
+typedef enum CRTC_TRIGB_CNTL_CRTC_TRIGB_CLEAR {
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_CLEAR_FALSE = 0x0,
+ CRTC_TRIGB_CNTL_CRTC_TRIGB_CLEAR_TRUE = 0x1,
+} CRTC_TRIGB_CNTL_CRTC_TRIGB_CLEAR;
+typedef enum CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE {
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE_DISABLE= 0x0,
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE_HCOUNT= 0x1,
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE_HCOUNT_VCOUNT= 0x2,
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE_RESERVED= 0x3,
+} CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_MODE;
+typedef enum CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CHECK {
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CHECK_FALSE= 0x0,
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CHECK_TRUE= 0x1,
+} CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CHECK;
+typedef enum CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_TRIG_SEL {
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_TRIG_SEL_FALSE= 0x0,
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_TRIG_SEL_TRUE= 0x1,
+} CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_TRIG_SEL;
+typedef enum CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CLEAR {
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CLEAR_FALSE= 0x0,
+ CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CLEAR_TRUE= 0x1,
+} CRTC_FORCE_COUNT_NOW_CNTL_CRTC_FORCE_COUNT_NOW_CLEAR;
+typedef enum CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT {
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_LOGIC0= 0x0,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICF= 0x1,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICE= 0x2,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_HPD1= 0x3,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_HPD2= 0x4,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DDC1DATA= 0x5,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DDC1CLK= 0x6,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DDC2DATA= 0x7,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DDC2CLK= 0x8,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_DVOCLK= 0x9,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_MANUAL= 0xa,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_LOGIC1= 0xb,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICB= 0xc,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICA= 0xd,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICD= 0xe,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT_GENERICC= 0xf,
+} CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_SOURCE_SELECT;
+typedef enum CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_POLARITY {
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_POLARITY_FALSE= 0x0,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_POLARITY_TRUE= 0x1,
+} CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_POLARITY;
+typedef enum CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_GRANULARITY {
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_GRANULARITY_FALSE= 0x0,
+ CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_GRANULARITY_TRUE= 0x1,
+} CRTC_FLOW_CONTROL_CRTC_FLOW_CONTROL_GRANULARITY;
+typedef enum CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE {
+ CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE_NO= 0x0,
+ CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE_RIGHT= 0x1,
+ CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE_LEFT= 0x2,
+ CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE_RESERVED= 0x3,
+} CRTC_STEREO_FORCE_NEXT_EYE_CRTC_STEREO_FORCE_NEXT_EYE;
+typedef enum CRTC_CONTROL_CRTC_MASTER_EN {
+ CRTC_CONTROL_CRTC_MASTER_EN_FALSE = 0x0,
+ CRTC_CONTROL_CRTC_MASTER_EN_TRUE = 0x1,
+} CRTC_CONTROL_CRTC_MASTER_EN;
+typedef enum CRTC_BLANK_CONTROL_CRTC_BLANK_DATA_EN {
+ CRTC_BLANK_CONTROL_CRTC_BLANK_DATA_EN_FALSE = 0x0,
+ CRTC_BLANK_CONTROL_CRTC_BLANK_DATA_EN_TRUE = 0x1,
+} CRTC_BLANK_CONTROL_CRTC_BLANK_DATA_EN;
+typedef enum CRTC_BLANK_CONTROL_CRTC_BLANK_DE_MODE {
+ CRTC_BLANK_CONTROL_CRTC_BLANK_DE_MODE_FALSE = 0x0,
+ CRTC_BLANK_CONTROL_CRTC_BLANK_DE_MODE_TRUE = 0x1,
+} CRTC_BLANK_CONTROL_CRTC_BLANK_DE_MODE;
+typedef enum CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_ENABLE {
+ CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_ENABLE_FALSE= 0x0,
+ CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_ENABLE_TRUE= 0x1,
+} CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_ENABLE;
+typedef enum CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD {
+ CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD_NOT= 0x0,
+ CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD_ODD= 0x1,
+ CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD_EVEN= 0x2,
+ CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD_NOT2= 0x3,
+} CRTC_INTERLACE_CONTROL_CRTC_INTERLACE_FORCE_NEXT_FIELD;
+typedef enum CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_INDICATION_OUTPUT_POLARITY {
+ CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_INDICATION_OUTPUT_POLARITY_FALSE= 0x0,
+ CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_INDICATION_OUTPUT_POLARITY_TRUE= 0x1,
+} CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_INDICATION_OUTPUT_POLARITY;
+typedef enum CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_ALIGNMENT {
+ CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_ALIGNMENT_FALSE= 0x0,
+ CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_ALIGNMENT_TRUE= 0x1,
+} CRTC_FIELD_INDICATION_CONTROL_CRTC_FIELD_ALIGNMENT;
+typedef enum CRTC_COUNT_CONTROL_CRTC_HORZ_COUNT_BY2_EN {
+ CRTC_COUNT_CONTROL_CRTC_HORZ_COUNT_BY2_EN_FALSE = 0x0,
+ CRTC_COUNT_CONTROL_CRTC_HORZ_COUNT_BY2_EN_TRUE = 0x1,
+} CRTC_COUNT_CONTROL_CRTC_HORZ_COUNT_BY2_EN;
+typedef enum CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE {
+ CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_FALSE= 0x0,
+ CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_TRUE= 0x1,
+} CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE;
+typedef enum CRTC_VERT_SYNC_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR {
+ CRTC_VERT_SYNC_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR_FALSE= 0x0,
+ CRTC_VERT_SYNC_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR_TRUE= 0x1,
+} CRTC_VERT_SYNC_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR;
+typedef enum CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE {
+ CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE_DISABLE= 0x0,
+ CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE_TRIGGERA= 0x1,
+ CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE_TRIGGERB= 0x2,
+ CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE_RESERVED= 0x3,
+} CRTC_VERT_SYNC_CONTROL_CRTC_AUTO_FORCE_VSYNC_MODE;
+typedef enum CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_OUTPUT_POLARITY {
+ CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_OUTPUT_POLARITY_FALSE= 0x0,
+ CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_OUTPUT_POLARITY_TRUE= 0x1,
+} CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_OUTPUT_POLARITY;
+typedef enum CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_SELECT_POLARITY {
+ CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_SELECT_POLARITY_FALSE= 0x0,
+ CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_SELECT_POLARITY_TRUE= 0x1,
+} CRTC_STEREO_CONTROL_CRTC_STEREO_SYNC_SELECT_POLARITY;
+typedef enum CRTC_STEREO_CONTROL_CRTC_STEREO_EYE_FLAG_POLARITY {
+ CRTC_STEREO_CONTROL_CRTC_STEREO_EYE_FLAG_POLARITY_FALSE= 0x0,
+ CRTC_STEREO_CONTROL_CRTC_STEREO_EYE_FLAG_POLARITY_TRUE= 0x1,
+} CRTC_STEREO_CONTROL_CRTC_STEREO_EYE_FLAG_POLARITY;
+typedef enum CRTC_STEREO_CONTROL_CRTC_STEREO_EN {
+ CRTC_STEREO_CONTROL_CRTC_STEREO_EN_FALSE = 0x0,
+ CRTC_STEREO_CONTROL_CRTC_STEREO_EN_TRUE = 0x1,
+} CRTC_STEREO_CONTROL_CRTC_STEREO_EN;
+typedef enum CRTC_SNAPSHOT_STATUS_CRTC_SNAPSHOT_CLEAR {
+ CRTC_SNAPSHOT_STATUS_CRTC_SNAPSHOT_CLEAR_FALSE = 0x0,
+ CRTC_SNAPSHOT_STATUS_CRTC_SNAPSHOT_CLEAR_TRUE = 0x1,
+} CRTC_SNAPSHOT_STATUS_CRTC_SNAPSHOT_CLEAR;
+typedef enum CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL {
+ CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL_DISABLE= 0x0,
+ CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL_TRIGGERA= 0x1,
+ CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL_TRIGGERB= 0x2,
+ CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL_RESERVED= 0x3,
+} CRTC_SNAPSHOT_CONTROL_CRTC_AUTO_SNAPSHOT_TRIG_SEL;
+typedef enum CRTC_START_LINE_CONTROL_CRTC_PROGRESSIVE_START_LINE_EARLY {
+ CRTC_START_LINE_CONTROL_CRTC_PROGRESSIVE_START_LINE_EARLY_FALSE= 0x0,
+ CRTC_START_LINE_CONTROL_CRTC_PROGRESSIVE_START_LINE_EARLY_TRUE= 0x1,
+} CRTC_START_LINE_CONTROL_CRTC_PROGRESSIVE_START_LINE_EARLY;
+typedef enum CRTC_START_LINE_CONTROL_CRTC_INTERLACE_START_LINE_EARLY {
+ CRTC_START_LINE_CONTROL_CRTC_INTERLACE_START_LINE_EARLY_FALSE= 0x0,
+ CRTC_START_LINE_CONTROL_CRTC_INTERLACE_START_LINE_EARLY_TRUE= 0x1,
+} CRTC_START_LINE_CONTROL_CRTC_INTERLACE_START_LINE_EARLY;
+typedef enum CRTC_START_LINE_CONTROL_CRTC_LEGACY_REQUESTOR_EN {
+ CRTC_START_LINE_CONTROL_CRTC_LEGACY_REQUESTOR_EN_FALSE= 0x0,
+ CRTC_START_LINE_CONTROL_CRTC_LEGACY_REQUESTOR_EN_TRUE= 0x1,
+} CRTC_START_LINE_CONTROL_CRTC_LEGACY_REQUESTOR_EN;
+typedef enum CRTC_START_LINE_CONTROL_CRTC_PREFETCH_EN {
+ CRTC_START_LINE_CONTROL_CRTC_PREFETCH_EN_FALSE = 0x0,
+ CRTC_START_LINE_CONTROL_CRTC_PREFETCH_EN_TRUE = 0x1,
+} CRTC_START_LINE_CONTROL_CRTC_PREFETCH_EN;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_MSK {
+ CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_MSK_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_TYPE {
+ CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_TYPE_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_SNAPSHOT_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_MSK {
+ CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_MSK_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_TYPE {
+ CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_TYPE_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_V_UPDATE_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_MSK {
+ CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_MSK_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_TYPE {
+ CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_TYPE_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_FORCE_COUNT_NOW_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK {
+ CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE {
+ CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_MSK {
+ CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_MSK_FALSE = 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_MSK_TRUE = 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_TYPE {
+ CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_TYPE_FALSE = 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_TYPE_TRUE = 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_TRIGA_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_MSK {
+ CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_MSK_FALSE = 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_MSK_TRUE = 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_TYPE {
+ CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_TYPE_FALSE = 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_TYPE_TRUE = 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_TRIGB_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_MSK {
+ CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_MSK_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_TYPE {
+ CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_TYPE_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_VSYNC_NOM_INT_TYPE;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_MSK {
+ CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_MSK_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_MSK_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_MSK;
+typedef enum CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_TYPE {
+ CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_TYPE_FALSE= 0x0,
+ CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_TYPE_TRUE= 0x1,
+} CRTC_INTERRUPT_CONTROL_CRTC_GSL_VSYNC_GAP_INT_TYPE;
+typedef enum CRTC_UPDATE_LOCK_CRTC_UPDATE_LOCK {
+ CRTC_UPDATE_LOCK_CRTC_UPDATE_LOCK_FALSE = 0x0,
+ CRTC_UPDATE_LOCK_CRTC_UPDATE_LOCK_TRUE = 0x1,
+} CRTC_UPDATE_LOCK_CRTC_UPDATE_LOCK;
+typedef enum CRTC_DOUBLE_BUFFER_CONTROL_CRTC_UPDATE_INSTANTLY {
+ CRTC_DOUBLE_BUFFER_CONTROL_CRTC_UPDATE_INSTANTLY_FALSE= 0x0,
+ CRTC_DOUBLE_BUFFER_CONTROL_CRTC_UPDATE_INSTANTLY_TRUE= 0x1,
+} CRTC_DOUBLE_BUFFER_CONTROL_CRTC_UPDATE_INSTANTLY;
+typedef enum CRTC_DOUBLE_BUFFER_CONTROL_CRTC_BLANK_DATA_DOUBLE_BUFFER_EN {
+ CRTC_DOUBLE_BUFFER_CONTROL_CRTC_BLANK_DATA_DOUBLE_BUFFER_EN_FALSE= 0x0,
+ CRTC_DOUBLE_BUFFER_CONTROL_CRTC_BLANK_DATA_DOUBLE_BUFFER_EN_TRUE= 0x1,
+} CRTC_DOUBLE_BUFFER_CONTROL_CRTC_BLANK_DATA_DOUBLE_BUFFER_EN;
+typedef enum CRTC_VGA_PARAMETER_CAPTURE_MODE_CRTC_VGA_PARAMETER_CAPTURE_MODE {
+ CRTC_VGA_PARAMETER_CAPTURE_MODE_CRTC_VGA_PARAMETER_CAPTURE_MODE_FALSE= 0x0,
+ CRTC_VGA_PARAMETER_CAPTURE_MODE_CRTC_VGA_PARAMETER_CAPTURE_MODE_TRUE= 0x1,
+} CRTC_VGA_PARAMETER_CAPTURE_MODE_CRTC_VGA_PARAMETER_CAPTURE_MODE;
+typedef enum CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_EN {
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_EN_FALSE= 0x0,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_EN_TRUE= 0x1,
+} CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_EN;
+typedef enum CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE {
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_RGB= 0x0,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_YCBCR601= 0x1,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_YCBCR709= 0x2,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_VBARS= 0x3,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_HBARS= 0x4,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_SRRGB= 0x5,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_DRRGB= 0x6,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE_XRBIAS= 0x7,
+} CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_MODE;
+typedef enum CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_DYNAMIC_RANGE {
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_DYNAMIC_RANGE_FALSE= 0x0,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_DYNAMIC_RANGE_TRUE= 0x1,
+} CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_DYNAMIC_RANGE;
+typedef enum CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT {
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT_6BPC= 0x0,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT_8BPC= 0x1,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT_10BPC= 0x2,
+ CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT_RESERVED= 0x3,
+} CRTC_TEST_PATTERN_CONTROL_CRTC_TEST_PATTERN_COLOR_FORMAT;
+typedef enum MASTER_UPDATE_LOCK_MASTER_UPDATE_LOCK {
+ MASTER_UPDATE_LOCK_MASTER_UPDATE_LOCK_FALSE = 0x0,
+ MASTER_UPDATE_LOCK_MASTER_UPDATE_LOCK_TRUE = 0x1,
+} MASTER_UPDATE_LOCK_MASTER_UPDATE_LOCK;
+typedef enum MASTER_UPDATE_LOCK_GSL_CONTROL_MASTER_UPDATE_LOCK {
+ MASTER_UPDATE_LOCK_GSL_CONTROL_MASTER_UPDATE_LOCK_FALSE= 0x0,
+ MASTER_UPDATE_LOCK_GSL_CONTROL_MASTER_UPDATE_LOCK_TRUE= 0x1,
+} MASTER_UPDATE_LOCK_GSL_CONTROL_MASTER_UPDATE_LOCK;
+typedef enum MASTER_UPDATE_LOCK_UNDERFLOW_UPDATE_LOCK {
+ MASTER_UPDATE_LOCK_UNDERFLOW_UPDATE_LOCK_FALSE = 0x0,
+ MASTER_UPDATE_LOCK_UNDERFLOW_UPDATE_LOCK_TRUE = 0x1,
+} MASTER_UPDATE_LOCK_UNDERFLOW_UPDATE_LOCK;
+typedef enum MASTER_UPDATE_MODE_MASTER_UPDATE_MODE {
+ MASTER_UPDATE_MODE_MASTER_UPDATE_MODE_BETWEEN = 0x0,
+ MASTER_UPDATE_MODE_MASTER_UPDATE_MODE_HSYNCA = 0x1,
+ MASTER_UPDATE_MODE_MASTER_UPDATE_MODE_VSYNCA = 0x2,
+ MASTER_UPDATE_MODE_MASTER_UPDATE_MODE_BEFORE = 0x3,
+} MASTER_UPDATE_MODE_MASTER_UPDATE_MODE;
+typedef enum MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE {
+ MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE_BOTH= 0x0,
+ MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE_EVEN= 0x1,
+ MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE_ODD= 0x2,
+ MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE_RESERVED= 0x3,
+} MASTER_UPDATE_MODE_MASTER_UPDATE_INTERLACED_MODE;
+typedef enum CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE {
+ CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE_DISABLE= 0x0,
+ CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE_DEBUG= 0x1,
+ CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE_NORMAL= 0x2,
+} CRTC_MVP_INBAND_CNTL_INSERT_CRTC_MVP_INBAND_OUT_MODE;
+typedef enum CRTC_MVP_STATUS_CRTC_FLIP_NOW_CLEAR {
+ CRTC_MVP_STATUS_CRTC_FLIP_NOW_CLEAR_FALSE = 0x0,
+ CRTC_MVP_STATUS_CRTC_FLIP_NOW_CLEAR_TRUE = 0x1,
+} CRTC_MVP_STATUS_CRTC_FLIP_NOW_CLEAR;
+typedef enum CRTC_MVP_STATUS_CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR {
+ CRTC_MVP_STATUS_CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR_FALSE= 0x0,
+ CRTC_MVP_STATUS_CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR_TRUE= 0x1,
+} CRTC_MVP_STATUS_CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR;
+typedef enum CRTC_V_UPDATE_INT_STATUS_CRTC_V_UPDATE_INT_CLEAR {
+ CRTC_V_UPDATE_INT_STATUS_CRTC_V_UPDATE_INT_CLEAR_FALSE= 0x0,
+ CRTC_V_UPDATE_INT_STATUS_CRTC_V_UPDATE_INT_CLEAR_TRUE= 0x1,
+} CRTC_V_UPDATE_INT_STATUS_CRTC_V_UPDATE_INT_CLEAR;
+typedef enum CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY {
+ CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY;
+typedef enum CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_ENABLE {
+ CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_ENABLE;
+typedef enum CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_CLEAR {
+ CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_CLEAR_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_CLEAR_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_CLEAR;
+typedef enum CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_TYPE {
+ CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_TYPE_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_TYPE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT0_CONTROL_CRTC_VERTICAL_INTERRUPT0_INT_TYPE;
+typedef enum CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_CLEAR {
+ CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_CLEAR_CLEAR_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_CLEAR_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_CLEAR;
+typedef enum CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_ENABLE {
+ CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_ENABLE_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_ENABLE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_ENABLE;
+typedef enum CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_TYPE {
+ CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_TYPE_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_TYPE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT1_CONTROL_CRTC_VERTICAL_INTERRUPT1_INT_TYPE;
+typedef enum CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_CLEAR {
+ CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_CLEAR_CLEAR_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_CLEAR_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_CLEAR;
+typedef enum CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_ENABLE {
+ CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_ENABLE_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_ENABLE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_ENABLE;
+typedef enum CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_TYPE {
+ CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_TYPE_FALSE= 0x0,
+ CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_TYPE_TRUE= 0x1,
+} CRTC_VERTICAL_INTERRUPT2_CONTROL_CRTC_VERTICAL_INTERRUPT2_INT_TYPE;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_EN {
+ CRTC_CRC_CNTL_CRTC_CRC_EN_FALSE = 0x0,
+ CRTC_CRC_CNTL_CRTC_CRC_EN_TRUE = 0x1,
+} CRTC_CRC_CNTL_CRTC_CRC_EN;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_CONT_EN {
+ CRTC_CRC_CNTL_CRTC_CRC_CONT_EN_FALSE = 0x0,
+ CRTC_CRC_CNTL_CRTC_CRC_CONT_EN_TRUE = 0x1,
+} CRTC_CRC_CNTL_CRTC_CRC_CONT_EN;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE {
+ CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE_LEFT = 0x0,
+ CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE_RIGHT = 0x1,
+ CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE_BOTH_EYES = 0x2,
+ CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE_BOTH_FIELDS = 0x3,
+} CRTC_CRC_CNTL_CRTC_CRC_STEREO_MODE;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE {
+ CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE_TOP = 0x0,
+ CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE_BOTTOM = 0x1,
+ CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE_BOTH_BOTTOM= 0x2,
+ CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE_BOTH_FIELD = 0x3,
+} CRTC_CRC_CNTL_CRTC_CRC_INTERLACE_MODE;
+typedef enum CRTC_CRC_CNTL_CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS {
+ CRTC_CRC_CNTL_CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS_FALSE= 0x0,
+ CRTC_CRC_CNTL_CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS_TRUE= 0x1,
+} CRTC_CRC_CNTL_CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS;
+typedef enum CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT {
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_UAB = 0x0,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_UA_B = 0x1,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_U_AB = 0x2,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_U_A_B = 0x3,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_IAB = 0x4,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_IA_B = 0x5,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_I_AB = 0x6,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT_I_A_B = 0x7,
+} CRTC_CRC_CNTL_CRTC_CRTC_CRC0_SELECT;
+typedef enum CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT {
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_UAB = 0x0,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_UA_B = 0x1,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_U_AB = 0x2,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_U_A_B = 0x3,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_IAB = 0x4,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_IA_B = 0x5,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_I_AB = 0x6,
+ CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT_I_A_B = 0x7,
+} CRTC_CRC_CNTL_CRTC_CRTC_CRC1_SELECT;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE_DISABLE= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE_ONESHOT= 0x1,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE_CONTINUOUS= 0x2,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE_RESERVED= 0x3,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_1pixel= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_2pixel= 0x1,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_3pixel= 0x2,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_4pixel= 0x3,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY;
+typedef enum CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_INTERLACE_MODE {
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_INTERLACE_MODE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_INTERLACE_MODE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_CONTROL_CRTC_EXT_TIMING_SYNC_INTERLACE_MODE;
+typedef enum CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE {
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_CLEAR {
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_CLEAR_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_CLEAR_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_CLEAR;
+typedef enum CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE {
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE;
+typedef enum CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT {
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_1FRAME= 0x0,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_2FRAME= 0x1,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_4FRAME= 0x2,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_8FRAME= 0x3,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_16FRAME= 0x4,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_32FRAME= 0x5,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_64FRAME= 0x6,
+ CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_128FRAME= 0x7,
+} CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT;
+typedef enum CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_ENABLE {
+ CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_ENABLE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_CLEAR {
+ CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_CLEAR_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_CLEAR_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_CLEAR;
+typedef enum CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_TYPE {
+ CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_TYPE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_TYPE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_INT_TYPE;
+typedef enum CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE {
+ CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE;
+typedef enum CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR {
+ CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR;
+typedef enum CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE {
+ CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE_FALSE= 0x0,
+ CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE_TRUE= 0x1,
+} CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL_CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_ENABLE {
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_ENABLE_FALSE= 0x0,
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_ENABLE_TRUE= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_ENABLE;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_CLEAR {
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_CLEAR_FALSE= 0x0,
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_CLEAR_TRUE= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_CLEAR;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_TYPE {
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_TYPE_FALSE= 0x0,
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_TYPE_TRUE= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_CPU_SS_INT_TYPE;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE {
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_FALSE= 0x0,
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_TRUE= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE;
+typedef enum CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_VALUE {
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_VALUE_OFF= 0x0,
+ CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_VALUE_ON= 0x1,
+} CRTC_STATIC_SCREEN_CONTROL_CRTC_STATIC_SCREEN_OVERRIDE_VALUE;
+typedef enum CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN {
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_FALSE= 0x0,
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_TRUE= 0x1,
+} CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN;
+typedef enum CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_DB {
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_DB_FALSE= 0x0,
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_DB_TRUE= 0x1,
+} CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_EN_DB;
+typedef enum CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE {
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE_BLOCK_BOTH= 0x0,
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE_BLOCK_INTERLACE= 0x1,
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE_BLOCK_PROGRASSIVE= 0x2,
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE_RESERVED= 0x3,
+} CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_V_UPDATE_MODE;
+typedef enum CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_STEREO_SEL_OVR {
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_STEREO_SEL_OVR_FALSE= 0x0,
+ CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_STEREO_SEL_OVR_TRUE= 0x1,
+} CRTC_3D_STRUCTURE_CONTROL_CRTC_3D_STRUCTURE_STEREO_SEL_OVR;
+typedef enum CRTC_V_SYNC_A_POL {
+ CRTC_V_SYNC_A_POL_HIGH = 0x0,
+ CRTC_V_SYNC_A_POL_LOW = 0x1,
+} CRTC_V_SYNC_A_POL;
+typedef enum CRTC_H_SYNC_A_POL {
+ CRTC_H_SYNC_A_POL_HIGH = 0x0,
+ CRTC_H_SYNC_A_POL_LOW = 0x1,
+} CRTC_H_SYNC_A_POL;
+typedef enum CRTC_HORZ_REPETITION_COUNT {
+ CRTC_HORZ_REPETITION_COUNT_0 = 0x0,
+ CRTC_HORZ_REPETITION_COUNT_1 = 0x1,
+ CRTC_HORZ_REPETITION_COUNT_2 = 0x2,
+ CRTC_HORZ_REPETITION_COUNT_3 = 0x3,
+ CRTC_HORZ_REPETITION_COUNT_4 = 0x4,
+ CRTC_HORZ_REPETITION_COUNT_5 = 0x5,
+ CRTC_HORZ_REPETITION_COUNT_6 = 0x6,
+ CRTC_HORZ_REPETITION_COUNT_7 = 0x7,
+ CRTC_HORZ_REPETITION_COUNT_8 = 0x8,
+ CRTC_HORZ_REPETITION_COUNT_9 = 0x9,
+ CRTC_HORZ_REPETITION_COUNT_10 = 0xa,
+ CRTC_HORZ_REPETITION_COUNT_11 = 0xb,
+ CRTC_HORZ_REPETITION_COUNT_12 = 0xc,
+ CRTC_HORZ_REPETITION_COUNT_13 = 0xd,
+ CRTC_HORZ_REPETITION_COUNT_14 = 0xe,
+ CRTC_HORZ_REPETITION_COUNT_15 = 0xf,
+} CRTC_HORZ_REPETITION_COUNT;
+typedef enum PERFCOUNTER_CVALUE_SEL {
+ PERFCOUNTER_CVALUE_SEL_47_0 = 0x0,
+ PERFCOUNTER_CVALUE_SEL_15_0 = 0x1,
+ PERFCOUNTER_CVALUE_SEL_31_16 = 0x2,
+ PERFCOUNTER_CVALUE_SEL_47_32 = 0x3,
+ PERFCOUNTER_CVALUE_SEL_11_0 = 0x4,
+ PERFCOUNTER_CVALUE_SEL_23_12 = 0x5,
+ PERFCOUNTER_CVALUE_SEL_35_24 = 0x6,
+ PERFCOUNTER_CVALUE_SEL_47_36 = 0x7,
+} PERFCOUNTER_CVALUE_SEL;
+typedef enum PERFCOUNTER_INC_MODE {
+ PERFCOUNTER_INC_MODE_MULTI_BIT = 0x0,
+ PERFCOUNTER_INC_MODE_BOTH_EDGE = 0x1,
+ PERFCOUNTER_INC_MODE_LSB = 0x2,
+ PERFCOUNTER_INC_MODE_POS_EDGE = 0x3,
+} PERFCOUNTER_INC_MODE;
+typedef enum PERFCOUNTER_HW_CNTL_SEL {
+ PERFCOUNTER_HW_CNTL_SEL_RUNEN = 0x0,
+ PERFCOUNTER_HW_CNTL_SEL_CNTOFF = 0x1,
+} PERFCOUNTER_HW_CNTL_SEL;
+typedef enum PERFCOUNTER_RUNEN_MODE {
+ PERFCOUNTER_RUNEN_MODE_LEVEL = 0x0,
+ PERFCOUNTER_RUNEN_MODE_EDGE = 0x1,
+} PERFCOUNTER_RUNEN_MODE;
+typedef enum PERFCOUNTER_CNTOFF_START_DIS {
+ PERFCOUNTER_CNTOFF_START_ENABLE = 0x0,
+ PERFCOUNTER_CNTOFF_START_DISABLE = 0x1,
+} PERFCOUNTER_CNTOFF_START_DIS;
+typedef enum PERFCOUNTER_RESTART_EN {
+ PERFCOUNTER_RESTART_DISABLE = 0x0,
+ PERFCOUNTER_RESTART_ENABLE = 0x1,
+} PERFCOUNTER_RESTART_EN;
+typedef enum PERFCOUNTER_INT_EN {
+ PERFCOUNTER_INT_DISABLE = 0x0,
+ PERFCOUNTER_INT_ENABLE = 0x1,
+} PERFCOUNTER_INT_EN;
+typedef enum PERFCOUNTER_OFF_MASK {
+ PERFCOUNTER_OFF_MASK_DISABLE = 0x0,
+ PERFCOUNTER_OFF_MASK_ENABLE = 0x1,
+} PERFCOUNTER_OFF_MASK;
+typedef enum PERFCOUNTER_ACTIVE {
+ PERFCOUNTER_IS_IDLE = 0x0,
+ PERFCOUNTER_IS_ACTIVE = 0x1,
+} PERFCOUNTER_ACTIVE;
+typedef enum PERFCOUNTER_INT_TYPE {
+ PERFCOUNTER_INT_TYPE_LEVEL = 0x0,
+ PERFCOUNTER_INT_TYPE_PULSE = 0x1,
+} PERFCOUNTER_INT_TYPE;
+typedef enum PERFCOUNTER_COUNTED_VALUE_TYPE {
+ PERFCOUNTER_COUNTED_VALUE_TYPE_ACC = 0x0,
+ PERFCOUNTER_COUNTED_VALUE_TYPE_MAX = 0x1,
+} PERFCOUNTER_COUNTED_VALUE_TYPE;
+typedef enum PERFCOUNTER_CNTL_SEL {
+ PERFCOUNTER_CNTL_SEL_0 = 0x0,
+ PERFCOUNTER_CNTL_SEL_1 = 0x1,
+ PERFCOUNTER_CNTL_SEL_2 = 0x2,
+ PERFCOUNTER_CNTL_SEL_3 = 0x3,
+ PERFCOUNTER_CNTL_SEL_4 = 0x4,
+ PERFCOUNTER_CNTL_SEL_5 = 0x5,
+ PERFCOUNTER_CNTL_SEL_6 = 0x6,
+ PERFCOUNTER_CNTL_SEL_7 = 0x7,
+} PERFCOUNTER_CNTL_SEL;
+typedef enum PERFCOUNTER_CNT0_STATE {
+ PERFCOUNTER_CNT0_STATE_RESET = 0x0,
+ PERFCOUNTER_CNT0_STATE_START = 0x1,
+ PERFCOUNTER_CNT0_STATE_FREEZE = 0x2,
+ PERFCOUNTER_CNT0_STATE_HW = 0x3,
+} PERFCOUNTER_CNT0_STATE;
+typedef enum PERFCOUNTER_STATE_SEL0 {
+ PERFCOUNTER_STATE_SEL0_GLOBAL = 0x0,
+ PERFCOUNTER_STATE_SEL0_LOCAL = 0x1,
+} PERFCOUNTER_STATE_SEL0;
+typedef enum PERFCOUNTER_CNT1_STATE {
+ PERFCOUNTER_CNT1_STATE_RESET = 0x0,
+ PERFCOUNTER_CNT1_STATE_START = 0x1,
+ PERFCOUNTER_CNT1_STATE_FREEZE = 0x2,
+ PERFCOUNTER_CNT1_STATE_HW = 0x3,
+} PERFCOUNTER_CNT1_STATE;
+typedef enum PERFCOUNTER_STATE_SEL1 {
+ PERFCOUNTER_STATE_SEL1_GLOBAL = 0x0,
+ PERFCOUNTER_STATE_SEL1_LOCAL = 0x1,
+} PERFCOUNTER_STATE_SEL1;
+typedef enum PERFCOUNTER_CNT2_STATE {
+ PERFCOUNTER_CNT2_STATE_RESET = 0x0,
+ PERFCOUNTER_CNT2_STATE_START = 0x1,
+ PERFCOUNTER_CNT2_STATE_FREEZE = 0x2,
+ PERFCOUNTER_CNT2_STATE_HW = 0x3,
+} PERFCOUNTER_CNT2_STATE;
+typedef enum PERFCOUNTER_STATE_SEL2 {
+ PERFCOUNTER_STATE_SEL2_GLOBAL = 0x0,
+ PERFCOUNTER_STATE_SEL2_LOCAL = 0x1,
+} PERFCOUNTER_STATE_SEL2;
+typedef enum PERFCOUNTER_CNT3_STATE {
+ PERFCOUNTER_CNT3_STATE_RESET = 0x0,
+ PERFCOUNTER_CNT3_STATE_START = 0x1,
+ PERFCOUNTER_CNT3_STATE_FREEZE = 0x2,
+ PERFCOUNTER_CNT3_STATE_HW = 0x3,
+} PERFCOUNTER_CNT3_STATE;
+typedef enum PERFCOUNTER_STATE_SEL3 {
+ PERFCOUNTER_STATE_SEL3_GLOBAL = 0x0,
+ PERFCOUNTER_STATE_SEL3_LOCAL = 0x1,
+} PERFCOUNTER_STATE_SEL3;
+typedef enum PERFCOUNTER_CNT4_STATE {
+ PERFCOUNTER_CNT4_STATE_RESET = 0x0,
+ PERFCOUNTER_CNT4_STATE_START = 0x1,
+ PERFCOUNTER_CNT4_STATE_FREEZE = 0x2,
+ PERFCOUNTER_CNT4_STATE_HW = 0x3,
+} PERFCOUNTER_CNT4_STATE;
+typedef enum PERFCOUNTER_STATE_SEL4 {
+ PERFCOUNTER_STATE_SEL4_GLOBAL = 0x0,
+ PERFCOUNTER_STATE_SEL4_LOCAL = 0x1,
+} PERFCOUNTER_STATE_SEL4;
+typedef enum PERFCOUNTER_CNT5_STATE {
+ PERFCOUNTER_CNT5_STATE_RESET = 0x0,
+ PERFCOUNTER_CNT5_STATE_START = 0x1,
+ PERFCOUNTER_CNT5_STATE_FREEZE = 0x2,
+ PERFCOUNTER_CNT5_STATE_HW = 0x3,
+} PERFCOUNTER_CNT5_STATE;
+typedef enum PERFCOUNTER_STATE_SEL5 {
+ PERFCOUNTER_STATE_SEL5_GLOBAL = 0x0,
+ PERFCOUNTER_STATE_SEL5_LOCAL = 0x1,
+} PERFCOUNTER_STATE_SEL5;
+typedef enum PERFCOUNTER_CNT6_STATE {
+ PERFCOUNTER_CNT6_STATE_RESET = 0x0,
+ PERFCOUNTER_CNT6_STATE_START = 0x1,
+ PERFCOUNTER_CNT6_STATE_FREEZE = 0x2,
+ PERFCOUNTER_CNT6_STATE_HW = 0x3,
+} PERFCOUNTER_CNT6_STATE;
+typedef enum PERFCOUNTER_STATE_SEL6 {
+ PERFCOUNTER_STATE_SEL6_GLOBAL = 0x0,
+ PERFCOUNTER_STATE_SEL6_LOCAL = 0x1,
+} PERFCOUNTER_STATE_SEL6;
+typedef enum PERFCOUNTER_CNT7_STATE {
+ PERFCOUNTER_CNT7_STATE_RESET = 0x0,
+ PERFCOUNTER_CNT7_STATE_START = 0x1,
+ PERFCOUNTER_CNT7_STATE_FREEZE = 0x2,
+ PERFCOUNTER_CNT7_STATE_HW = 0x3,
+} PERFCOUNTER_CNT7_STATE;
+typedef enum PERFCOUNTER_STATE_SEL7 {
+ PERFCOUNTER_STATE_SEL7_GLOBAL = 0x0,
+ PERFCOUNTER_STATE_SEL7_LOCAL = 0x1,
+} PERFCOUNTER_STATE_SEL7;
+typedef enum PERFMON_STATE {
+ PERFMON_STATE_RESET = 0x0,
+ PERFMON_STATE_START = 0x1,
+ PERFMON_STATE_FREEZE = 0x2,
+ PERFMON_STATE_HW = 0x3,
+} PERFMON_STATE;
+typedef enum PERFMON_CNTOFF_AND_OR {
+ PERFMON_CNTOFF_OR = 0x0,
+ PERFMON_CNTOFF_AND = 0x1,
+} PERFMON_CNTOFF_AND_OR;
+typedef enum PERFMON_CNTOFF_INT_EN {
+ PERFMON_CNTOFF_INT_DISABLE = 0x0,
+ PERFMON_CNTOFF_INT_ENABLE = 0x1,
+} PERFMON_CNTOFF_INT_EN;
+typedef enum PERFMON_CNTOFF_INT_TYPE {
+ PERFMON_CNTOFF_INT_TYPE_LEVEL = 0x0,
+ PERFMON_CNTOFF_INT_TYPE_PULSE = 0x1,
+} PERFMON_CNTOFF_INT_TYPE;
+typedef enum ENABLE {
+ DISABLE_THE_FEATURE = 0x0,
+ ENABLE_THE_FEATURE = 0x1,
+} ENABLE;
+typedef enum ENABLE_CLOCK {
+ DISABLE_THE_CLOCK = 0x0,
+ ENABLE_THE_CLOCK = 0x1,
+} ENABLE_CLOCK;
+typedef enum FORCE_VBI {
+ FORCE_VBI_LOW = 0x0,
+ FORCE_VBI_HIGH = 0x1,
+} FORCE_VBI;
+typedef enum OVERRIDE_CGTT_SCLK {
+ OVERRIDE_CGTT_SCLK_NOOP = 0x0,
+ SET_OVERRIDE_CGTT_SCLK = 0x1,
+} OVERRIDE_CGTT_SCLK;
+typedef enum CLEAR_SMU_INTR {
+ SMU_INTR_STATUS_NOOP = 0x0,
+ SMU_INTR_STATUS_CLEAR = 0x1,
+} CLEAR_SMU_INTR;
+typedef enum STATIC_SCREEN_SMU_INTR {
+ STATIC_SCREEN_SMU_INTR_NOOP = 0x0,
+ SET_STATIC_SCREEN_SMU_INTR = 0x1,
+} STATIC_SCREEN_SMU_INTR;
+typedef enum JITTER_REMOVE_DISABLE {
+ ENABLE_JITTER_REMOVAL = 0x0,
+ DISABLE_JITTER_REMOVAL = 0x1,
+} JITTER_REMOVE_DISABLE;
+typedef enum DISABLE_CLOCK_GATING {
+ CLOCK_GATING_ENABLED = 0x0,
+ CLOCK_GATING_DISABLED = 0x1,
+} DISABLE_CLOCK_GATING;
+typedef enum DISABLE_CLOCK_GATING_IN_DCO {
+ CLOCK_GATING_ENABLED_IN_DCO = 0x0,
+ CLOCK_GATING_DISABLED_IN_DCO = 0x1,
+} DISABLE_CLOCK_GATING_IN_DCO;
+typedef enum DCCG_DEEP_COLOR_CNTL {
+ DCCG_DEEP_COLOR_DTO_DISABLE = 0x0,
+ DCCG_DEEP_COLOR_DTO_5_4_RATIO = 0x1,
+ DCCG_DEEP_COLOR_DTO_3_2_RATIO = 0x2,
+ DCCG_DEEP_COLOR_DTO_2_1_RATIO = 0x3,
+} DCCG_DEEP_COLOR_CNTL;
+typedef enum REFCLK_CLOCK_EN {
+ REFCLK_CLOCK_EN_PCIE_REFCLK = 0x0,
+ REFCLK_CLOCK_EN_ALLOW_SRC = 0x1,
+} REFCLK_CLOCK_EN;
+typedef enum REFCLK_SRC_SEL {
+ REFCLK_SRC_SEL_XTALIN = 0x0,
+ REFCLK_SRC_SEL_DISPPLL = 0x1,
+} REFCLK_SRC_SEL;
+typedef enum DPREFCLK_SRC_SEL {
+ DPREFCLK_SRC_SEL_CK = 0x0,
+ DPREFCLK_SRC_SEL_P0PLL = 0x1,
+ DPREFCLK_SRC_SEL_P1PLL = 0x2,
+ DPREFCLK_SRC_SEL_P2PLL = 0x3,
+ DPREFCLK_SRC_SEL_P3PLL = 0x4,
+} DPREFCLK_SRC_SEL;
+typedef enum XTAL_REF_SEL {
+ XTAL_REF_SEL_1X = 0x0,
+ XTAL_REF_SEL_2X = 0x1,
+} XTAL_REF_SEL;
+typedef enum XTAL_REF_CLOCK_SOURCE_SEL {
+ XTAL_REF_CLOCK_SOURCE_SEL_XTALIN = 0x0,
+ XTAL_REF_CLOCK_SOURCE_SEL_PPLL = 0x1,
+} XTAL_REF_CLOCK_SOURCE_SEL;
+typedef enum MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL {
+ MICROSECOND_TIME_BASE_CLOCK_IS_XTALIN = 0x0,
+ MICROSECOND_TIME_BASE_CLOCK_IS_PPLL_REFCLK = 0x1,
+} MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL;
+typedef enum ALLOW_SR_ON_TRANS_REQ {
+ ALLOW_SR_ON_TRANS_REQ_ENABLE = 0x0,
+ ALLOW_SR_ON_TRANS_REQ_DISABLE = 0x1,
+} ALLOW_SR_ON_TRANS_REQ;
+typedef enum MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL {
+ MILLISECOND_TIME_BASE_CLOCK_IS_XTALIN = 0x0,
+ MILLISECOND_TIME_BASE_CLOCK_IS_PPLL_REFCLK = 0x1,
+} MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL;
+typedef enum PIPE_PIXEL_RATE_SOURCE {
+ PIPE_PIXEL_RATE_SOURCE_P0PLL = 0x0,
+ PIPE_PIXEL_RATE_SOURCE_P1PLL = 0x1,
+ PIPE_PIXEL_RATE_SOURCE_P2PLL = 0x2,
+} PIPE_PIXEL_RATE_SOURCE;
+typedef enum PIPE_PHYPLL_PIXEL_RATE_SOURCE {
+ PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYA = 0x0,
+ PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYB = 0x1,
+ PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYC = 0x2,
+ PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYD = 0x3,
+ PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYE = 0x4,
+ PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYF = 0x5,
+ PIPE_PHYPLL_PIXEL_RATE_SOURCE_UNIPHYG = 0x6,
+} PIPE_PHYPLL_PIXEL_RATE_SOURCE;
+typedef enum PIPE_PIXEL_RATE_PLL_SOURCE {
+ PIPE_PIXEL_RATE_PLL_SOURCE_PHYPLL = 0x0,
+ PIPE_PIXEL_RATE_PLL_SOURCE_DISPPLL = 0x1,
+} PIPE_PIXEL_RATE_PLL_SOURCE;
+typedef enum DP_DTO_DS_DISABLE {
+ DP_DTO_DESPREAD_DISABLE = 0x0,
+ DP_DTO_DESPREAD_ENABLE = 0x1,
+} DP_DTO_DS_DISABLE;
+typedef enum CRTC_ADD_PIXEL {
+ CRTC_ADD_PIXEL_NOOP = 0x0,
+ CRTC_ADD_PIXEL_FORCE = 0x1,
+} CRTC_ADD_PIXEL;
+typedef enum CRTC_DROP_PIXEL {
+ CRTC_DROP_PIXEL_NOOP = 0x0,
+ CRTC_DROP_PIXEL_FORCE = 0x1,
+} CRTC_DROP_PIXEL;
+typedef enum SYMCLK_FE_FORCE_EN {
+ SYMCLK_FE_FORCE_EN_DISABLE = 0x0,
+ SYMCLK_FE_FORCE_EN_ENABLE = 0x1,
+} SYMCLK_FE_FORCE_EN;
+typedef enum SYMCLK_FE_FORCE_SRC {
+ SYMCLK_FE_FORCE_SRC_UNIPHYA = 0x0,
+ SYMCLK_FE_FORCE_SRC_UNIPHYB = 0x1,
+ SYMCLK_FE_FORCE_SRC_UNIPHYC = 0x2,
+ SYMCLK_FE_FORCE_SRC_UNIPHYD = 0x3,
+ SYMCLK_FE_FORCE_SRC_UNIPHYE = 0x4,
+ SYMCLK_FE_FORCE_SRC_UNIPHYF = 0x5,
+ SYMCLK_FE_FORCE_SRC_UNIPHYG = 0x6,
+} SYMCLK_FE_FORCE_SRC;
+typedef enum DPDBG_CLK_FORCE_EN {
+ DPDBG_CLK_FORCE_EN_DISABLE = 0x0,
+ DPDBG_CLK_FORCE_EN_ENABLE = 0x1,
+} DPDBG_CLK_FORCE_EN;
+typedef enum DVOACLK_COARSE_SKEW_CNTL {
+ DVOACLK_COARSE_SKEW_CNTL_NO_ADJUSTMENT = 0x0,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_1_STEP = 0x1,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_2_STEPS = 0x2,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_3_STEPS = 0x3,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_4_STEPS = 0x4,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_5_STEPS = 0x5,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_6_STEPS = 0x6,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_7_STEPS = 0x7,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_8_STEPS = 0x8,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_9_STEPS = 0x9,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_10_STEPS = 0xa,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_11_STEPS = 0xb,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_12_STEPS = 0xc,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_13_STEPS = 0xd,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_14_STEPS = 0xe,
+ DVOACLK_COARSE_SKEW_CNTL_DELAY_15_STEPS = 0xf,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_1_STEP = 0x10,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_2_STEPS = 0x11,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_3_STEPS = 0x12,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_4_STEPS = 0x13,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_5_STEPS = 0x14,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_6_STEPS = 0x15,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_7_STEPS = 0x16,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_8_STEPS = 0x17,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_9_STEPS = 0x18,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_10_STEPS = 0x19,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_11_STEPS = 0x1a,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_12_STEPS = 0x1b,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_13_STEPS = 0x1c,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_14_STEPS = 0x1d,
+ DVOACLK_COARSE_SKEW_CNTL_EARLY_15_STEPS = 0x1e,
+} DVOACLK_COARSE_SKEW_CNTL;
+typedef enum DVOACLK_FINE_SKEW_CNTL {
+ DVOACLK_FINE_SKEW_CNTL_NO_ADJUSTMENT = 0x0,
+ DVOACLK_FINE_SKEW_CNTL_DELAY_1_STEP = 0x1,
+ DVOACLK_FINE_SKEW_CNTL_DELAY_2_STEPS = 0x2,
+ DVOACLK_FINE_SKEW_CNTL_DELAY_3_STEPS = 0x3,
+ DVOACLK_FINE_SKEW_CNTL_EARLY_1_STEP = 0x4,
+ DVOACLK_FINE_SKEW_CNTL_EARLY_2_STEPS = 0x5,
+ DVOACLK_FINE_SKEW_CNTL_EARLY_3_STEPS = 0x6,
+ DVOACLK_FINE_SKEW_CNTL_EARLY_4_STEPS = 0x7,
+} DVOACLK_FINE_SKEW_CNTL;
+typedef enum DVOACLKD_IN_PHASE {
+ DVOACLKD_IN_OPPOSITE_PHASE_WITH_PCLK_DVO = 0x0,
+ DVOACLKD_IN_PHASE_WITH_PCLK_DVO = 0x1,
+} DVOACLKD_IN_PHASE;
+typedef enum DVOACLKC_IN_PHASE {
+ DVOACLKC_IN_OPPOSITE_PHASE_WITH_PCLK_DVO = 0x0,
+ DVOACLKC_IN_PHASE_WITH_PCLK_DVO = 0x1,
+} DVOACLKC_IN_PHASE;
+typedef enum DVOACLKC_MVP_IN_PHASE {
+ DVOACLKC_MVP_IN_OPPOSITE_PHASE_WITH_PCLK_DVO = 0x0,
+ DVOACLKC_MVP_IN_PHASE_WITH_PCLK_DVO = 0x1,
+} DVOACLKC_MVP_IN_PHASE;
+typedef enum DVOACLKC_MVP_SKEW_PHASE_OVERRIDE {
+ DVOACLKC_MVP_SKEW_PHASE_OVERRIDE_DISABLE = 0x0,
+ DVOACLKC_MVP_SKEW_PHASE_OVERRIDE_ENABLE = 0x1,
+} DVOACLKC_MVP_SKEW_PHASE_OVERRIDE;
+typedef enum MVP_CLK_SRC_SEL {
+ MVP_CLK_SRC_SEL_RSRV = 0x0,
+ MVP_CLK_SRC_SEL_IO_1 = 0x1,
+ MVP_CLK_SRC_SEL_IO_2 = 0x2,
+ MVP_CLK_SRC_SEL_REFCLK = 0x3,
+} MVP_CLK_SRC_SEL;
+typedef enum DCCG_AUDIO_DTO0_SOURCE_SEL {
+ DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC0 = 0x0,
+ DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC1 = 0x1,
+ DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC2 = 0x2,
+ DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC3 = 0x3,
+ DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC4 = 0x4,
+ DCCG_AUDIO_DTO0_SOURCE_SEL_CRTC5 = 0x5,
+ DCCG_AUDIO_DTO0_SOURCE_SEL_RESERVED = 0x6,
+} DCCG_AUDIO_DTO0_SOURCE_SEL;
+typedef enum DCCG_AUDIO_DTO_SEL {
+ DCCG_AUDIO_DTO_SEL_AUDIO_DTO0 = 0x0,
+ DCCG_AUDIO_DTO_SEL_AUDIO_DTO1 = 0x1,
+ DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO = 0x2,
+} DCCG_AUDIO_DTO_SEL;
+typedef enum DCCG_AUDIO_DTO2_SOURCE_SEL {
+ DCCG_AUDIO_DTO2_SOURCE_SEL_AMCLK0 = 0x0,
+ DCCG_AUDIO_DTO2_SOURCE_SEL_AMCLK1 = 0x1,
+} DCCG_AUDIO_DTO2_SOURCE_SEL;
+typedef enum DCCG_AUDIO_DTO_USE_512FBR_DTO {
+ DCCG_AUDIO_DTO_USE_128FBR_FOR_DP = 0x0,
+ DCCG_AUDIO_DTO_USE_512FBR_FOR_DP = 0x1,
+} DCCG_AUDIO_DTO_USE_512FBR_DTO;
+typedef enum DCCG_DBG_EN {
+ DCCG_DBG_EN_DISABLE = 0x0,
+ DCCG_DBG_EN_ENABLE = 0x1,
+} DCCG_DBG_EN;
+typedef enum DCCG_DBG_BLOCK_SEL {
+ DCCG_DBG_BLOCK_SEL_DCCG = 0x0,
+ DCCG_DBG_BLOCK_SEL_PMON = 0x1,
+ DCCG_DBG_BLOCK_SEL_PMON2 = 0x2,
+} DCCG_DBG_BLOCK_SEL;
+typedef enum DCCG_DBG_CLOCK_SEL {
+ DCCG_DBG_CLOCK_SEL_DISPCLK = 0x0,
+ DCCG_DBG_CLOCK_SEL_SCLK = 0x1,
+ DCCG_DBG_CLOCK_SEL_MVPCLK = 0x2,
+ DCCG_DBG_CLOCK_SEL_DVOCLK = 0x3,
+ DCCG_DBG_CLOCK_SEL_DACCLK = 0x4,
+ DCCG_DBG_CLOCK_SEL_REFCLK = 0x5,
+ DCCG_DBG_CLOCK_SEL_SYMCLKA = 0x6,
+ DCCG_DBG_CLOCK_SEL_SYMCLKB = 0x7,
+ DCCG_DBG_CLOCK_SEL_SYMCLKC = 0x8,
+ DCCG_DBG_CLOCK_SEL_SYMCLKD = 0x9,
+ DCCG_DBG_CLOCK_SEL_SYMCLKE = 0xa,
+ DCCG_DBG_CLOCK_SEL_SYMCLKG = 0xb,
+ DCCG_DBG_CLOCK_SEL_SYMCLKF = 0xc,
+ DCCG_DBG_CLOCK_SEL_RSRV = 0xd,
+ DCCG_DBG_CLOCK_SEL_AOMCLK0 = 0xe,
+ DCCG_DBG_CLOCK_SEL_AOMCLK1 = 0xf,
+ DCCG_DBG_CLOCK_SEL_AOMCLK2 = 0x10,
+ DCCG_DBG_CLOCK_SEL_DPREFCLK = 0x11,
+ DCCG_DBG_CLOCK_SEL_UNB_DB_CLK = 0x12,
+ DCCG_DBG_CLOCK_SEL_DSICLK = 0x13,
+ DCCG_DBG_CLOCK_SEL_BYTECLK = 0x14,
+ DCCG_DBG_CLOCK_SEL_ESCCLK = 0x15,
+ DCCG_DBG_CLOCK_SEL_SYMCLKLPA = 0x16,
+ DCCG_DBG_CLOCK_SEL_SYMCLKLPB = 0x17,
+} DCCG_DBG_CLOCK_SEL;
+typedef enum DCCG_DBG_OUT_BLOCK_SEL {
+ DCCG_DBG_OUT_BLOCK_SEL_DCCG = 0x0,
+ DCCG_DBG_OUT_BLOCK_SEL_DCO = 0x1,
+ DCCG_DBG_OUT_BLOCK_SEL_DCIO = 0x2,
+ DCCG_DBG_OUT_BLOCK_SEL_DSI = 0x3,
+} DCCG_DBG_OUT_BLOCK_SEL;
+typedef enum DISPCLK_FREQ_RAMP_DONE {
+ DISPCLK_FREQ_RAMP_IN_PROGRESS = 0x0,
+ DISPCLK_FREQ_RAMP_COMPLETED = 0x1,
+} DISPCLK_FREQ_RAMP_DONE;
+typedef enum DCCG_FIFO_ERRDET_RESET {
+ DCCG_FIFO_ERRDET_RESET_NOOP = 0x0,
+ DCCG_FIFO_ERRDET_RESET_FORCE = 0x1,
+} DCCG_FIFO_ERRDET_RESET;
+typedef enum DCCG_FIFO_ERRDET_STATE {
+ DCCG_FIFO_ERRDET_STATE_DETECTION = 0x0,
+ DCCG_FIFO_ERRDET_STATE_CALIBRATION = 0x1,
+} DCCG_FIFO_ERRDET_STATE;
+typedef enum DCCG_FIFO_ERRDET_OVR_EN {
+ DCCG_FIFO_ERRDET_OVR_DISABLE = 0x0,
+ DCCG_FIFO_ERRDET_OVR_ENABLE = 0x1,
+} DCCG_FIFO_ERRDET_OVR_EN;
+typedef enum DISPCLK_CHG_FWD_CORR_DISABLE {
+ DISPCLK_CHG_FWD_CORR_ENABLE_AT_BEGINNING = 0x0,
+ DISPCLK_CHG_FWD_CORR_DISABLE_AT_BEGINNING = 0x1,
+} DISPCLK_CHG_FWD_CORR_DISABLE;
+typedef enum DC_MEM_GLOBAL_PWR_REQ_DIS {
+ DC_MEM_GLOBAL_PWR_REQ_ENABLE = 0x0,
+ DC_MEM_GLOBAL_PWR_REQ_DISABLE = 0x1,
+} DC_MEM_GLOBAL_PWR_REQ_DIS;
+typedef enum DCCG_PERF_RUN {
+ DCCG_PERF_RUN_NOOP = 0x0,
+ DCCG_PERF_RUN_START = 0x1,
+} DCCG_PERF_RUN;
+typedef enum DCCG_PERF_MODE_VSYNC {
+ DCCG_PERF_MODE_VSYNC_NOOP = 0x0,
+ DCCG_PERF_MODE_VSYNC_START = 0x1,
+} DCCG_PERF_MODE_VSYNC;
+typedef enum DCCG_PERF_MODE_HSYNC {
+ DCCG_PERF_MODE_HSYNC_NOOP = 0x0,
+ DCCG_PERF_MODE_HSYNC_START = 0x1,
+} DCCG_PERF_MODE_HSYNC;
+typedef enum DCCG_PERF_CRTC_SELECT {
+ DCCG_PERF_SEL_CRTC0 = 0x0,
+ DCCG_PERF_SEL_CRTC1 = 0x1,
+ DCCG_PERF_SEL_CRTC2 = 0x2,
+ DCCG_PERF_SEL_CRTC3 = 0x3,
+ DCCG_PERF_SEL_CRTC4 = 0x4,
+ DCCG_PERF_SEL_CRTC5 = 0x5,
+} DCCG_PERF_CRTC_SELECT;
+typedef enum CLOCK_BRANCH_SOFT_RESET {
+ CLOCK_BRANCH_SOFT_RESET_NOOP = 0x0,
+ CLOCK_BRANCH_SOFT_RESET_FORCE = 0x1,
+} CLOCK_BRANCH_SOFT_RESET;
+typedef enum PLL_CFG_IF_SOFT_RESET {
+ PLL_CFG_IF_SOFT_RESET_NOOP = 0x0,
+ PLL_CFG_IF_SOFT_RESET_FORCE = 0x1,
+} PLL_CFG_IF_SOFT_RESET;
+typedef enum DVO_ENABLE_RST {
+ DVO_ENABLE_RST_DISABLE = 0x0,
+ DVO_ENABLE_RST_ENABLE = 0x1,
+} DVO_ENABLE_RST;
+typedef enum LptNumBanks {
+ LPT_NUM_BANKS_2BANK = 0x0,
+ LPT_NUM_BANKS_4BANK = 0x1,
+ LPT_NUM_BANKS_8BANK = 0x2,
+ LPT_NUM_BANKS_16BANK = 0x3,
+ LPT_NUM_BANKS_32BANK = 0x4,
+} LptNumBanks;
+typedef enum DCIO_DC_GENERICA_SEL {
+ DCIO_GENERICA_SEL_DACA_STEREOSYNC = 0x0,
+ DCIO_GENERICA_SEL_STEREOSYNC = 0x1,
+ DCIO_GENERICA_SEL_DACA_PIXCLK = 0x2,
+ DCIO_GENERICA_SEL_DACB_PIXCLK = 0x3,
+ DCIO_GENERICA_SEL_DVOA_CTL3 = 0x4,
+ DCIO_GENERICA_SEL_P1_PLLCLK = 0x5,
+ DCIO_GENERICA_SEL_P2_PLLCLK = 0x6,
+ DCIO_GENERICA_SEL_DVOA_STEREOSYNC = 0x7,
+ DCIO_GENERICA_SEL_DACA_FIELD_NUMBER = 0x8,
+ DCIO_GENERICA_SEL_DACB_FIELD_NUMBER = 0x9,
+ DCIO_GENERICA_SEL_GENERICA_DCCG = 0xa,
+ DCIO_GENERICA_SEL_SYNCEN = 0xb,
+ DCIO_GENERICA_SEL_GENERICA_SCG = 0xc,
+ DCIO_GENERICA_SEL_RESERVED_VALUE13 = 0xd,
+ DCIO_GENERICA_SEL_RESERVED_VALUE14 = 0xe,
+ DCIO_GENERICA_SEL_RESERVED_VALUE15 = 0xf,
+ DCIO_GENERICA_SEL_GENERICA_DPRX = 0x10,
+ DCIO_GENERICA_SEL_GENERICB_DPRX = 0x11,
+} DCIO_DC_GENERICA_SEL;
+typedef enum DCIO_DC_GENERIC_UNIPHY_REFDIV_CLK_SEL {
+ DCIO_UNIPHYA_TEST_REFDIV_CLK = 0x0,
+ DCIO_UNIPHYB_TEST_REFDIV_CLK = 0x1,
+ DCIO_UNIPHYC_TEST_REFDIV_CLK = 0x2,
+ DCIO_UNIPHYD_TEST_REFDIV_CLK = 0x3,
+ DCIO_UNIPHYE_TEST_REFDIV_CLK = 0x4,
+ DCIO_UNIPHYF_TEST_REFDIV_CLK = 0x5,
+ DCIO_UNIPHYG_TEST_REFDIV_CLK = 0x6,
+ DCIO_UNIPHYLPA_TEST_REFDIV_CLK = 0x7,
+ DCIO_UNIPHYLPB_TEST_REFDIV_CLK = 0x8,
+} DCIO_DC_GENERIC_UNIPHY_REFDIV_CLK_SEL;
+typedef enum DCIO_DC_GENERIC_UNIPHY_FBDIV_CLK_SEL {
+ DCIO_UNIPHYA_FBDIV_CLK = 0x0,
+ DCIO_UNIPHYB_FBDIV_CLK = 0x1,
+ DCIO_UNIPHYC_FBDIV_CLK = 0x2,
+ DCIO_UNIPHYD_FBDIV_CLK = 0x3,
+ DCIO_UNIPHYE_FBDIV_CLK = 0x4,
+ DCIO_UNIPHYF_FBDIV_CLK = 0x5,
+ DCIO_UNIPHYG_FBDIV_CLK = 0x6,
+ DCIO_UNIPHYLPA_FBDIV_CLK = 0x7,
+ DCIO_UNIPHYLPB_FBDIV_CLK = 0x8,
+} DCIO_DC_GENERIC_UNIPHY_FBDIV_CLK_SEL;
+typedef enum DCIO_DC_GENERIC_UNIPHY_FBDIV_SSC_CLK_SEL {
+ DCIO_UNIPHYA_FBDIV_SSC_CLK = 0x0,
+ DCIO_UNIPHYB_FBDIV_SSC_CLK = 0x1,
+ DCIO_UNIPHYC_FBDIV_SSC_CLK = 0x2,
+ DCIO_UNIPHYD_FBDIV_SSC_CLK = 0x3,
+ DCIO_UNIPHYE_FBDIV_SSC_CLK = 0x4,
+ DCIO_UNIPHYF_FBDIV_SSC_CLK = 0x5,
+ DCIO_UNIPHYG_FBDIV_SSC_CLK = 0x6,
+ DCIO_UNIPHYLPA_FBDIV_SSC_CLK = 0x7,
+ DCIO_UNIPHYLPB_FBDIV_SSC_CLK = 0x8,
+} DCIO_DC_GENERIC_UNIPHY_FBDIV_SSC_CLK_SEL;
+typedef enum DCIO_DC_GENERIC_UNIPHY_FBDIV_CLK_DIV2_SEL {
+ DCIO_UNIPHYA_TEST_FBDIV_CLK_DIV2 = 0x0,
+ DCIO_UNIPHYB_TEST_FBDIV_CLK_DIV2 = 0x1,
+ DCIO_UNIPHYC_TEST_FBDIV_CLK_DIV2 = 0x2,
+ DCIO_UNIPHYD_TEST_FBDIV_CLK_DIV2 = 0x3,
+ DCIO_UNIPHYE_TEST_FBDIV_CLK_DIV2 = 0x4,
+ DCIO_UNIPHYF_TEST_FBDIV_CLK_DIV2 = 0x5,
+ DCIO_UNIPHYG_TEST_FBDIV_CLK_DIV2 = 0x6,
+ DCIO_UNIPHYLPA_TEST_FBDIV_CLK_DIV2 = 0x7,
+ DCIO_UNIPHYLPB_TEST_FBDIV_CLK_DIV2 = 0x8,
+} DCIO_DC_GENERIC_UNIPHY_FBDIV_CLK_DIV2_SEL;
+typedef enum DCIO_DC_GENERICB_SEL {
+ DCIO_GENERICB_SEL_DACA_STEREOSYNC = 0x0,
+ DCIO_GENERICB_SEL_STEREOSYNC = 0x1,
+ DCIO_GENERICB_SEL_DACA_PIXCLK = 0x2,
+ DCIO_GENERICB_SEL_DACB_PIXCLK = 0x3,
+ DCIO_GENERICB_SEL_DVOA_CTL3 = 0x4,
+ DCIO_GENERICB_SEL_P1_PLLCLK = 0x5,
+ DCIO_GENERICB_SEL_P2_PLLCLK = 0x6,
+ DCIO_GENERICB_SEL_DVOA_STEREOSYNC = 0x7,
+ DCIO_GENERICB_SEL_DACA_FIELD_NUMBER = 0x8,
+ DCIO_GENERICB_SEL_DACB_FIELD_NUMBER = 0x9,
+ DCIO_GENERICB_SEL_GENERICB_DCCG = 0xa,
+ DCIO_GENERICB_SEL_SYNCEN = 0xb,
+ DCIO_GENERICB_SEL_GENERICA_SCG = 0xc,
+ DCIO_GENERICB_SEL_RESERVED_VALUE13 = 0xd,
+ DCIO_GENERICB_SEL_RESERVED_VALUE14 = 0xe,
+ DCIO_GENERICB_SEL_RESERVED_VALUE15 = 0xf,
+} DCIO_DC_GENERICB_SEL;
+typedef enum DCIO_DC_PAD_EXTERN_SIG_SEL {
+ DCIO_DC_PAD_EXTERN_SIG_SEL_MVP = 0x0,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_VSYNCA = 0x1,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_GENLK_CLK = 0x2,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_GENLK_VSYNC = 0x3,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_GENERICA = 0x4,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_GENERICB = 0x5,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_GENERICC = 0x6,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_HPD1 = 0x7,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_HPD2 = 0x8,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_DDC1CLK = 0x9,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_DDC1DATA = 0xa,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_DDC2CLK = 0xb,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_DDC2DATA = 0xc,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_VHAD1 = 0xd,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_VHAD0 = 0xe,
+ DCIO_DC_PAD_EXTERN_SIG_SEL_VPHCTL = 0xf,
+} DCIO_DC_PAD_EXTERN_SIG_SEL;
+typedef enum DCIO_DC_PAD_EXTERN_SIG_MVP_PIXEL_SRC_STATUS {
+ DCIO_MVP_PIXEL_SRC_STATUS_HSYNCA = 0x0,
+ DCIO_MVP_PIXEL_SRC_STATUS_HSYNCA_DUPLICATE = 0x1,
+ DCIO_MVP_PIXEL_SRC_STATUS_CRTC = 0x2,
+ DCIO_MVP_PIXEL_SRC_STATUS_LB = 0x3,
+} DCIO_DC_PAD_EXTERN_SIG_MVP_PIXEL_SRC_STATUS;
+typedef enum DCIO_DC_REF_CLK_CNTL_HSYNCA_OUTPUT_SEL {
+ DCIO_HSYNCA_OUTPUT_SEL_DISABLE = 0x0,
+ DCIO_HSYNCA_OUTPUT_SEL_PPLL1 = 0x1,
+ DCIO_HSYNCA_OUTPUT_SEL_PPLL2 = 0x2,
+ DCIO_HSYNCA_OUTPUT_SEL_RESERVED = 0x3,
+} DCIO_DC_REF_CLK_CNTL_HSYNCA_OUTPUT_SEL;
+typedef enum DCIO_DC_REF_CLK_CNTL_GENLK_CLK_OUTPUT_SEL {
+ DCIO_GENLK_CLK_OUTPUT_SEL_DISABLE = 0x0,
+ DCIO_GENLK_CLK_OUTPUT_SEL_PPLL1 = 0x1,
+ DCIO_GENLK_CLK_OUTPUT_SEL_PPLL2 = 0x2,
+ DCIO_GENLK_CLK_OUTPUT_SEL_RESERVED_VALUE3 = 0x3,
+} DCIO_DC_REF_CLK_CNTL_GENLK_CLK_OUTPUT_SEL;
+typedef enum DCIO_DC_GPIO_VIP_DEBUG {
+ DCIO_DC_GPIO_VIP_DEBUG_NORMAL = 0x0,
+ DCIO_DC_GPIO_VIP_DEBUG_CG_BIG = 0x1,
+} DCIO_DC_GPIO_VIP_DEBUG;
+typedef enum DCIO_DC_GPIO_MACRO_DEBUG {
+ DCIO_DC_GPIO_MACRO_DEBUG_NORMAL = 0x0,
+ DCIO_DC_GPIO_MACRO_DEBUG_CHIP_BIF = 0x1,
+ DCIO_DC_GPIO_MACRO_DEBUG_RESERVED_VALUE2 = 0x2,
+ DCIO_DC_GPIO_MACRO_DEBUG_RESERVED_VALUE3 = 0x3,
+} DCIO_DC_GPIO_MACRO_DEBUG;
+typedef enum DCIO_DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL {
+ DCIO_DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL_NORMAL = 0x0,
+ DCIO_DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL_SWAP = 0x1,
+} DCIO_DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL;
+typedef enum DCIO_DC_GPIO_DEBUG_BUS_FLOP_EN {
+ DCIO_DC_GPIO_DEBUG_BUS_FLOP_EN_BYPASS = 0x0,
+ DCIO_DC_GPIO_DEBUG_BUS_FLOP_EN_ENABLE = 0x1,
+} DCIO_DC_GPIO_DEBUG_BUS_FLOP_EN;
+typedef enum DCIO_DC_GPIO_DEBUG_DPRX_LOOPBACK_ENABLE {
+ DCIO_DPRX_LOOPBACK_ENABLE_NORMAL = 0x0,
+ DCIO_DPRX_LOOPBACK_ENABLE_LOOP = 0x1,
+} DCIO_DC_GPIO_DEBUG_DPRX_LOOPBACK_ENABLE;
+typedef enum DCIO_UNIPHY_LINK_CNTL_MINIMUM_PIXVLD_LOW_DURATION {
+ DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_3_CLOCKS = 0x0,
+ DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_7_CLOCKS = 0x1,
+ DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_11_CLOCKS= 0x2,
+ DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_15_CLOCKS= 0x3,
+ DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_19_CLOCKS= 0x4,
+ DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_23_CLOCKS= 0x5,
+ DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_27_CLOCKS= 0x6,
+ DCIO_UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_31_CLOCKS= 0x7,
+} DCIO_UNIPHY_LINK_CNTL_MINIMUM_PIXVLD_LOW_DURATION;
+typedef enum DCIO_UNIPHY_LINK_CNTL_CHANNEL_INVERT {
+ DCIO_UNIPHY_CHANNEL_NO_INVERSION = 0x0,
+ DCIO_UNIPHY_CHANNEL_INVERTED = 0x1,
+} DCIO_UNIPHY_LINK_CNTL_CHANNEL_INVERT;
+typedef enum DCIO_UNIPHY_LINK_CNTL_ENABLE_HPD_MASK {
+ DCIO_UNIPHY_LINK_ENABLE_HPD_MASK_DISALLOW = 0x0,
+ DCIO_UNIPHY_LINK_ENABLE_HPD_MASK_ALLOW = 0x1,
+ DCIO_UNIPHY_LINK_ENABLE_HPD_MASK_ALLOW_DEBOUNCED = 0x2,
+ DCIO_UNIPHY_LINK_ENABLE_HPD_MASK_ALLOW_TOGGLE_FILTERED= 0x3,
+} DCIO_UNIPHY_LINK_CNTL_ENABLE_HPD_MASK;
+typedef enum DCIO_UNIPHY_CHANNEL_XBAR_SOURCE {
+ DCIO_UNIPHY_CHANNEL_XBAR_SOURCE_CH0 = 0x0,
+ DCIO_UNIPHY_CHANNEL_XBAR_SOURCE_CH1 = 0x1,
+ DCIO_UNIPHY_CHANNEL_XBAR_SOURCE_CH2 = 0x2,
+ DCIO_UNIPHY_CHANNEL_XBAR_SOURCE_CH3 = 0x3,
+} DCIO_UNIPHY_CHANNEL_XBAR_SOURCE;
+typedef enum DCIO_DC_DVODATA_CONFIG_VIP_MUX_EN {
+ DCIO_VIP_MUX_EN_DVO = 0x0,
+ DCIO_VIP_MUX_EN_VIP = 0x1,
+} DCIO_DC_DVODATA_CONFIG_VIP_MUX_EN;
+typedef enum DCIO_DC_DVODATA_CONFIG_VIP_ALTER_MAPPING_EN {
+ DCIO_VIP_ALTER_MAPPING_EN_DEFAULT = 0x0,
+ DCIO_VIP_ALTER_MAPPING_EN_ALTERNATIVE = 0x1,
+} DCIO_DC_DVODATA_CONFIG_VIP_ALTER_MAPPING_EN;
+typedef enum DCIO_DC_DVODATA_CONFIG_DVO_ALTER_MAPPING_EN {
+ DCIO_DVO_ALTER_MAPPING_EN_DEFAULT = 0x0,
+ DCIO_DVO_ALTER_MAPPING_EN_ALTERNATIVE = 0x1,
+} DCIO_DC_DVODATA_CONFIG_DVO_ALTER_MAPPING_EN;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_DISABLE_SYNCEN_CONTROL_OF_TX_EN {
+ DCIO_LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_ENABLE= 0x0,
+ DCIO_LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_DISABLE= 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_DISABLE_SYNCEN_CONTROL_OF_TX_EN;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_TARGET_STATE {
+ DCIO_LVTMA_PWRSEQ_TARGET_STATE_LCD_OFF = 0x0,
+ DCIO_LVTMA_PWRSEQ_TARGET_STATE_LCD_ON = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_TARGET_STATE;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_SYNCEN_POL {
+ DCIO_LVTMA_SYNCEN_POL_NON_INVERT = 0x0,
+ DCIO_LVTMA_SYNCEN_POL_INVERT = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_SYNCEN_POL;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_DIGON {
+ DCIO_LVTMA_DIGON_OFF = 0x0,
+ DCIO_LVTMA_DIGON_ON = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_DIGON;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_DIGON_POL {
+ DCIO_LVTMA_DIGON_POL_NON_INVERT = 0x0,
+ DCIO_LVTMA_DIGON_POL_INVERT = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_DIGON_POL;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_BLON {
+ DCIO_LVTMA_BLON_OFF = 0x0,
+ DCIO_LVTMA_BLON_ON = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_BLON;
+typedef enum DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_BLON_POL {
+ DCIO_LVTMA_BLON_POL_NON_INVERT = 0x0,
+ DCIO_LVTMA_BLON_POL_INVERT = 0x1,
+} DCIO_LVTMA_PWRSEQ_CNTL_LVTMA_BLON_POL;
+typedef enum DCIO_LVTMA_PWRSEQ_DELAY2_LVTMA_VARY_BL_OVERRIDE_EN {
+ DCIO_LVTMA_VARY_BL_OVERRIDE_EN_BLON = 0x0,
+ DCIO_LVTMA_VARY_BL_OVERRIDE_EN_SEPARATE = 0x1,
+} DCIO_LVTMA_PWRSEQ_DELAY2_LVTMA_VARY_BL_OVERRIDE_EN;
+typedef enum DCIO_BL_PWM_CNTL_BL_PWM_FRACTIONAL_EN {
+ DCIO_BL_PWM_FRACTIONAL_DISABLE = 0x0,
+ DCIO_BL_PWM_FRACTIONAL_ENABLE = 0x1,
+} DCIO_BL_PWM_CNTL_BL_PWM_FRACTIONAL_EN;
+typedef enum DCIO_BL_PWM_CNTL_BL_PWM_EN {
+ DCIO_BL_PWM_DISABLE = 0x0,
+ DCIO_BL_PWM_ENABLE = 0x1,
+} DCIO_BL_PWM_CNTL_BL_PWM_EN;
+typedef enum DCIO_BL_PWM_CNTL2_DBG_BL_PWM_INPUT_REFCLK_SELECT {
+ DCIO_DBG_BL_PWM_INPUT_REFCLK_SELECT_NORMAL = 0x0,
+ DCIO_DBG_BL_PWM_INPUT_REFCLK_SELECT_DEBUG1 = 0x1,
+ DCIO_DBG_BL_PWM_INPUT_REFCLK_SELECT_DEBUG2 = 0x2,
+ DCIO_DBG_BL_PWM_INPUT_REFCLK_SELECT_DEBUG3 = 0x3,
+} DCIO_BL_PWM_CNTL2_DBG_BL_PWM_INPUT_REFCLK_SELECT;
+typedef enum DCIO_BL_PWM_CNTL2_BL_PWM_OVERRIDE_BL_OUT_ENABLE {
+ DCIO_BL_PWM_OVERRIDE_BL_OUT_DISABLE = 0x0,
+ DCIO_BL_PWM_OVERRIDE_BL_OUT_ENABLE = 0x1,
+} DCIO_BL_PWM_CNTL2_BL_PWM_OVERRIDE_BL_OUT_ENABLE;
+typedef enum DCIO_BL_PWM_CNTL2_BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN {
+ DCIO_BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_NORMAL = 0x0,
+ DCIO_BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_PWM = 0x1,
+} DCIO_BL_PWM_CNTL2_BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN;
+typedef enum DCIO_BL_PWM_GRP1_REG_LOCK {
+ DCIO_BL_PWM_GRP1_REG_LOCK_DISABLE = 0x0,
+ DCIO_BL_PWM_GRP1_REG_LOCK_ENABLE = 0x1,
+} DCIO_BL_PWM_GRP1_REG_LOCK;
+typedef enum DCIO_BL_PWM_GRP1_UPDATE_AT_FRAME_START {
+ DCIO_BL_PWM_GRP1_UPDATE_AT_FRAME_START_DISABLE = 0x0,
+ DCIO_BL_PWM_GRP1_UPDATE_AT_FRAME_START_ENABLE = 0x1,
+} DCIO_BL_PWM_GRP1_UPDATE_AT_FRAME_START;
+typedef enum DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL {
+ DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER1= 0x0,
+ DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER2= 0x1,
+ DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER3= 0x2,
+ DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER4= 0x3,
+ DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER5= 0x4,
+ DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL_CONTROLLER6= 0x5,
+} DCIO_BL_PWM_GRP1_FRAME_START_DISP_SEL;
+typedef enum DCIO_BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN {
+ DCIO_BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_BL_PWM = 0x0,
+ DCIO_BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_BL1_PWM= 0x1,
+} DCIO_BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN;
+typedef enum DCIO_BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN {
+ DCIO_BL_PWM_GRP1_IGNORE_MASTER_LOCK_ENABLE = 0x0,
+ DCIO_BL_PWM_GRP1_IGNORE_MASTER_LOCK_DISABLE = 0x1,
+} DCIO_BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN;
+typedef enum DCIO_GSL_SEL {
+ DCIO_GSL_SEL_GROUP_0 = 0x0,
+ DCIO_GSL_SEL_GROUP_1 = 0x1,
+ DCIO_GSL_SEL_GROUP_2 = 0x2,
+} DCIO_GSL_SEL;
+typedef enum DCIO_GENLK_CLK_GSL_MASK {
+ DCIO_GENLK_CLK_GSL_MASK_NO = 0x0,
+ DCIO_GENLK_CLK_GSL_MASK_TIMING = 0x1,
+ DCIO_GENLK_CLK_GSL_MASK_STEREO = 0x2,
+} DCIO_GENLK_CLK_GSL_MASK;
+typedef enum DCIO_GENLK_VSYNC_GSL_MASK {
+ DCIO_GENLK_VSYNC_GSL_MASK_NO = 0x0,
+ DCIO_GENLK_VSYNC_GSL_MASK_TIMING = 0x1,
+ DCIO_GENLK_VSYNC_GSL_MASK_STEREO = 0x2,
+} DCIO_GENLK_VSYNC_GSL_MASK;
+typedef enum DCIO_SWAPLOCK_A_GSL_MASK {
+ DCIO_SWAPLOCK_A_GSL_MASK_NO = 0x0,
+ DCIO_SWAPLOCK_A_GSL_MASK_TIMING = 0x1,
+ DCIO_SWAPLOCK_A_GSL_MASK_STEREO = 0x2,
+} DCIO_SWAPLOCK_A_GSL_MASK;
+typedef enum DCIO_SWAPLOCK_B_GSL_MASK {
+ DCIO_SWAPLOCK_B_GSL_MASK_NO = 0x0,
+ DCIO_SWAPLOCK_B_GSL_MASK_TIMING = 0x1,
+ DCIO_SWAPLOCK_B_GSL_MASK_STEREO = 0x2,
+} DCIO_SWAPLOCK_B_GSL_MASK;
+typedef enum DCIO_GSL_VSYNC_SEL {
+ DCIO_GSL_VSYNC_SEL_PIPE0 = 0x0,
+ DCIO_GSL_VSYNC_SEL_PIPE1 = 0x1,
+ DCIO_GSL_VSYNC_SEL_PIPE2 = 0x2,
+ DCIO_GSL_VSYNC_SEL_PIPE3 = 0x3,
+ DCIO_GSL_VSYNC_SEL_PIPE4 = 0x4,
+ DCIO_GSL_VSYNC_SEL_PIPE5 = 0x5,
+} DCIO_GSL_VSYNC_SEL;
+typedef enum DCIO_GSL0_TIMING_SYNC_SEL {
+ DCIO_GSL0_TIMING_SYNC_SEL_PIPE = 0x0,
+ DCIO_GSL0_TIMING_SYNC_SEL_GENCLK_VSYNC = 0x1,
+ DCIO_GSL0_TIMING_SYNC_SEL_GENCLK_CLK = 0x2,
+ DCIO_GSL0_TIMING_SYNC_SEL_SWAPLOCK_A = 0x3,
+ DCIO_GSL0_TIMING_SYNC_SEL_SWAPLOCK_B = 0x4,
+} DCIO_GSL0_TIMING_SYNC_SEL;
+typedef enum DCIO_GSL0_GLOBAL_UNLOCK_SEL {
+ DCIO_GSL0_GLOBAL_UNLOCK_SEL_INVERSION = 0x0,
+ DCIO_GSL0_GLOBAL_UNLOCK_SEL_GENCLK_VSYNC = 0x1,
+ DCIO_GSL0_GLOBAL_UNLOCK_SEL_GENLK_CLK = 0x2,
+ DCIO_GSL0_GLOBAL_UNLOCK_SEL_SWAPLOCK_A = 0x3,
+ DCIO_GSL0_GLOBAL_UNLOCK_SEL_SWAPLOCK_B = 0x4,
+} DCIO_GSL0_GLOBAL_UNLOCK_SEL;
+typedef enum DCIO_GSL1_TIMING_SYNC_SEL {
+ DCIO_GSL1_TIMING_SYNC_SEL_PIPE = 0x0,
+ DCIO_GSL1_TIMING_SYNC_SEL_GENCLK_VSYNC = 0x1,
+ DCIO_GSL1_TIMING_SYNC_SEL_GENCLK_CLK = 0x2,
+ DCIO_GSL1_TIMING_SYNC_SEL_SWAPLOCK_A = 0x3,
+ DCIO_GSL1_TIMING_SYNC_SEL_SWAPLOCK_B = 0x4,
+} DCIO_GSL1_TIMING_SYNC_SEL;
+typedef enum DCIO_GSL1_GLOBAL_UNLOCK_SEL {
+ DCIO_GSL1_GLOBAL_UNLOCK_SEL_INVERSION = 0x0,
+ DCIO_GSL1_GLOBAL_UNLOCK_SEL_GENCLK_VSYNC = 0x1,
+ DCIO_GSL1_GLOBAL_UNLOCK_SEL_GENLK_CLK = 0x2,
+ DCIO_GSL1_GLOBAL_UNLOCK_SEL_SWAPLOCK_A = 0x3,
+ DCIO_GSL1_GLOBAL_UNLOCK_SEL_SWAPLOCK_B = 0x4,
+} DCIO_GSL1_GLOBAL_UNLOCK_SEL;
+typedef enum DCIO_GSL2_TIMING_SYNC_SEL {
+ DCIO_GSL2_TIMING_SYNC_SEL_PIPE = 0x0,
+ DCIO_GSL2_TIMING_SYNC_SEL_GENCLK_VSYNC = 0x1,
+ DCIO_GSL2_TIMING_SYNC_SEL_GENCLK_CLK = 0x2,
+ DCIO_GSL2_TIMING_SYNC_SEL_SWAPLOCK_A = 0x3,
+ DCIO_GSL2_TIMING_SYNC_SEL_SWAPLOCK_B = 0x4,
+} DCIO_GSL2_TIMING_SYNC_SEL;
+typedef enum DCIO_GSL2_GLOBAL_UNLOCK_SEL {
+ DCIO_GSL2_GLOBAL_UNLOCK_SEL_INVERSION = 0x0,
+ DCIO_GSL2_GLOBAL_UNLOCK_SEL_GENCLK_VSYNC = 0x1,
+ DCIO_GSL2_GLOBAL_UNLOCK_SEL_GENLK_CLK = 0x2,
+ DCIO_GSL2_GLOBAL_UNLOCK_SEL_SWAPLOCK_A = 0x3,
+ DCIO_GSL2_GLOBAL_UNLOCK_SEL_SWAPLOCK_B = 0x4,
+} DCIO_GSL2_GLOBAL_UNLOCK_SEL;
+typedef enum DCIO_DC_GPU_TIMER_START_POSITION {
+ DCIO_GPU_TIMER_START_0_END_27 = 0x0,
+ DCIO_GPU_TIMER_START_1_END_28 = 0x1,
+ DCIO_GPU_TIMER_START_2_END_29 = 0x2,
+ DCIO_GPU_TIMER_START_3_END_30 = 0x3,
+ DCIO_GPU_TIMER_START_4_END_31 = 0x4,
+ DCIO_GPU_TIMER_START_6_END_33 = 0x5,
+ DCIO_GPU_TIMER_START_8_END_35 = 0x6,
+ DCIO_GPU_TIMER_START_10_END_37 = 0x7,
+} DCIO_DC_GPU_TIMER_START_POSITION;
+typedef enum DCIO_CLOCK_CNTL_DCIO_TEST_CLK_SEL {
+ DCIO_TEST_CLK_SEL_DISPCLK = 0x0,
+ DCIO_TEST_CLK_SEL_GATED_DISPCLK = 0x1,
+ DCIO_TEST_CLK_SEL_SCLK = 0x2,
+} DCIO_CLOCK_CNTL_DCIO_TEST_CLK_SEL;
+typedef enum DCIO_CLOCK_CNTL_DISPCLK_R_DCIO_GATE_DIS {
+ DCIO_DISPCLK_R_DCIO_GATE_DISABLE = 0x0,
+ DCIO_DISPCLK_R_DCIO_GATE_ENABLE = 0x1,
+} DCIO_CLOCK_CNTL_DISPCLK_R_DCIO_GATE_DIS;
+typedef enum DCIO_DCO_DCFE_EXT_VSYNC_MUX {
+ DCIO_EXT_VSYNC_MUX_SWAPLOCKB = 0x0,
+ DCIO_EXT_VSYNC_MUX_CRTC0 = 0x1,
+ DCIO_EXT_VSYNC_MUX_CRTC1 = 0x2,
+ DCIO_EXT_VSYNC_MUX_CRTC2 = 0x3,
+ DCIO_EXT_VSYNC_MUX_CRTC3 = 0x4,
+ DCIO_EXT_VSYNC_MUX_CRTC4 = 0x5,
+ DCIO_EXT_VSYNC_MUX_CRTC5 = 0x6,
+ DCIO_EXT_VSYNC_MUX_GENERICB = 0x7,
+} DCIO_DCO_DCFE_EXT_VSYNC_MUX;
+typedef enum DCIO_DCO_EXT_VSYNC_MASK {
+ DCIO_EXT_VSYNC_MASK_NONE = 0x0,
+ DCIO_EXT_VSYNC_MASK_PIPE0 = 0x1,
+ DCIO_EXT_VSYNC_MASK_PIPE1 = 0x2,
+ DCIO_EXT_VSYNC_MASK_PIPE2 = 0x3,
+ DCIO_EXT_VSYNC_MASK_PIPE3 = 0x4,
+ DCIO_EXT_VSYNC_MASK_PIPE4 = 0x5,
+ DCIO_EXT_VSYNC_MASK_PIPE5 = 0x6,
+ DCIO_EXT_VSYNC_MASK_NONE_DUPLICATE = 0x7,
+} DCIO_DCO_EXT_VSYNC_MASK;
+typedef enum DCIO_DBG_OUT_PIN_SEL {
+ DCIO_DBG_OUT_PIN_SEL_LOW_12BIT = 0x0,
+ DCIO_DBG_OUT_PIN_SEL_HIGH_12BIT = 0x1,
+} DCIO_DBG_OUT_PIN_SEL;
+typedef enum DCIO_DBG_OUT_12BIT_SEL {
+ DCIO_DBG_OUT_12BIT_SEL_LOW_12BIT = 0x0,
+ DCIO_DBG_OUT_12BIT_SEL_MID_12BIT = 0x1,
+ DCIO_DBG_OUT_12BIT_SEL_HIGH_12BIT = 0x2,
+ DCIO_DBG_OUT_12BIT_SEL_OVERRIDE = 0x3,
+} DCIO_DBG_OUT_12BIT_SEL;
+typedef enum DCIO_DSYNC_SOFT_RESET {
+ DCIO_DSYNC_SOFT_RESET_DEASSERT = 0x0,
+ DCIO_DSYNC_SOFT_RESET_ASSERT = 0x1,
+} DCIO_DSYNC_SOFT_RESET;
+typedef enum DCIO_DACA_SOFT_RESET {
+ DCIO_DACA_SOFT_RESET_DEASSERT = 0x0,
+ DCIO_DACA_SOFT_RESET_ASSERT = 0x1,
+} DCIO_DACA_SOFT_RESET;
+typedef enum DCIO_DCRXPHY_SOFT_RESET {
+ DCIO_DCRXPHY_SOFT_RESET_DEASSERT = 0x0,
+ DCIO_DCRXPHY_SOFT_RESET_ASSERT = 0x1,
+} DCIO_DCRXPHY_SOFT_RESET;
+typedef enum DCIO_DPHY_LANE_SEL {
+ DCIO_DPHY_LANE_SEL_LANE0 = 0x0,
+ DCIO_DPHY_LANE_SEL_LANE1 = 0x1,
+ DCIO_DPHY_LANE_SEL_LANE2 = 0x2,
+ DCIO_DPHY_LANE_SEL_LANE3 = 0x3,
+} DCIO_DPHY_LANE_SEL;
+typedef enum DCIO_DPCS_INTERRUPT_TYPE {
+ DCIO_DPCS_INTERRUPT_TYPE_LEVEL_BASED = 0x0,
+ DCIO_DPCS_INTERRUPT_TYPE_PULSE_BASED = 0x1,
+} DCIO_DPCS_INTERRUPT_TYPE;
+typedef enum DCIO_DPCS_INTERRUPT_MASK {
+ DCIO_DPCS_INTERRUPT_DISABLE = 0x0,
+ DCIO_DPCS_INTERRUPT_ENABLE = 0x1,
+} DCIO_DPCS_INTERRUPT_MASK;
+typedef enum DCIO_DC_GPU_TIMER_READ_SELECT {
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D1_V_UPDATE = 0x0,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D1_V_UPDATE = 0x1,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D2_V_UPDATE = 0x2,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D2_V_UPDATE = 0x3,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D3_V_UPDATE = 0x4,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D3_V_UPDATE = 0x5,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D4_V_UPDATE = 0x6,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D4_V_UPDATE = 0x7,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D5_V_UPDATE = 0x8,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D5_V_UPDATE = 0x9,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D6_V_UPDATE = 0xa,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D6_V_UPDATE = 0xb,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D1_P_FLIP = 0xc,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D1_P_FLIP = 0xd,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D2_P_FLIP = 0xe,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D2_P_FLIP = 0xf,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D3_P_FLIP = 0x10,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D3_P_FLIP = 0x11,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D4_P_FLIP = 0x12,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D4_P_FLIP = 0x13,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D5_P_FLIP = 0x14,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D5_P_FLIP = 0x15,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D6_P_FLIP = 0x16,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D6_P_FLIP = 0x17,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D1_VSYNC_NOM = 0x18,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D1_VSYNC_NOM = 0x19,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D2_VSYNC_NOM = 0x1a,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D2_VSYNC_NOM = 0x1b,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D3_VSYNC_NOM = 0x1c,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D3_VSYNC_NOM = 0x1d,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D4_VSYNC_NOM = 0x1e,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D4_VSYNC_NOM = 0x1f,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D5_VSYNC_NOM = 0x20,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D5_VSYNC_NOM = 0x21,
+ DCIO_GPU_TIMER_READ_SELECT_LOWER_D6_VSYNC_NOM = 0x22,
+ DCIO_GPU_TIMER_READ_SELECT_UPPER_D6_VSYNC_NOM = 0x23,
+} DCIO_DC_GPU_TIMER_READ_SELECT;
+typedef enum DCIO_IMPCAL_STEP_DELAY {
+ DCIO_IMPCAL_STEP_DELAY_1us = 0x0,
+ DCIO_IMPCAL_STEP_DELAY_2us = 0x1,
+ DCIO_IMPCAL_STEP_DELAY_3us = 0x2,
+ DCIO_IMPCAL_STEP_DELAY_4us = 0x3,
+ DCIO_IMPCAL_STEP_DELAY_5us = 0x4,
+ DCIO_IMPCAL_STEP_DELAY_6us = 0x5,
+ DCIO_IMPCAL_STEP_DELAY_7us = 0x6,
+ DCIO_IMPCAL_STEP_DELAY_8us = 0x7,
+ DCIO_IMPCAL_STEP_DELAY_9us = 0x8,
+ DCIO_IMPCAL_STEP_DELAY_10us = 0x9,
+ DCIO_IMPCAL_STEP_DELAY_11us = 0xa,
+ DCIO_IMPCAL_STEP_DELAY_12us = 0xb,
+ DCIO_IMPCAL_STEP_DELAY_13us = 0xc,
+ DCIO_IMPCAL_STEP_DELAY_14us = 0xd,
+ DCIO_IMPCAL_STEP_DELAY_15us = 0xe,
+ DCIO_IMPCAL_STEP_DELAY_16us = 0xf,
+} DCIO_IMPCAL_STEP_DELAY;
+typedef enum DCIO_UNIPHY_IMPCAL_SEL {
+ DCIO_UNIPHY_IMPCAL_SEL_TEMPERATURE = 0x0,
+ DCIO_UNIPHY_IMPCAL_SEL_BINARY = 0x1,
+} DCIO_UNIPHY_IMPCAL_SEL;
+typedef enum DCIO_DBG_CLOCK_SEL {
+ DCIO_DBG_CLOCK_SEL_DISPCLK = 0x0,
+ DCIO_DBG_CLOCK_SEL_SYMCLKA = 0x1,
+ DCIO_DBG_CLOCK_SEL_SYMCLKB = 0x2,
+ DCIO_DBG_CLOCK_SEL_SYMCLKC = 0x3,
+ DCIO_DBG_CLOCK_SEL_SYMCLKD = 0x4,
+ DCIO_DBG_CLOCK_SEL_SYMCLKE = 0x5,
+ DCIO_DBG_CLOCK_SEL_SYMCLKF = 0x6,
+ DCIO_DBG_CLOCK_SEL_REFCLK = 0xb,
+} DCIO_DBG_CLOCK_SEL;
+typedef enum DCIOCHIP_HPD_SEL {
+ DCIOCHIP_HPD_SEL_ASYNC = 0x0,
+ DCIOCHIP_HPD_SEL_CLOCKED = 0x1,
+} DCIOCHIP_HPD_SEL;
+typedef enum DCIOCHIP_PAD_MODE {
+ DCIOCHIP_PAD_MODE_DDC = 0x0,
+ DCIOCHIP_PAD_MODE_DP = 0x1,
+} DCIOCHIP_PAD_MODE;
+typedef enum DCIOCHIP_AUXSLAVE_PAD_MODE {
+ DCIOCHIP_AUXSLAVE_PAD_MODE_I2C = 0x0,
+ DCIOCHIP_AUXSLAVE_PAD_MODE_AUX = 0x1,
+} DCIOCHIP_AUXSLAVE_PAD_MODE;
+typedef enum DCIOCHIP_INVERT {
+ DCIOCHIP_POL_NON_INVERT = 0x0,
+ DCIOCHIP_POL_INVERT = 0x1,
+} DCIOCHIP_INVERT;
+typedef enum DCIOCHIP_PD_EN {
+ DCIOCHIP_PD_EN_NOTALLOW = 0x0,
+ DCIOCHIP_PD_EN_ALLOW = 0x1,
+} DCIOCHIP_PD_EN;
+typedef enum DCIOCHIP_GPIO_MASK_EN {
+ DCIOCHIP_GPIO_MASK_EN_HARDWARE = 0x0,
+ DCIOCHIP_GPIO_MASK_EN_SOFTWARE = 0x1,
+} DCIOCHIP_GPIO_MASK_EN;
+typedef enum DCIOCHIP_MASK {
+ DCIOCHIP_MASK_DISABLE = 0x0,
+ DCIOCHIP_MASK_ENABLE = 0x1,
+} DCIOCHIP_MASK;
+typedef enum DCIOCHIP_GPIO_I2C_MASK {
+ DCIOCHIP_GPIO_I2C_MASK_DISABLE = 0x0,
+ DCIOCHIP_GPIO_I2C_MASK_ENABLE = 0x1,
+} DCIOCHIP_GPIO_I2C_MASK;
+typedef enum DCIOCHIP_GPIO_I2C_DRIVE {
+ DCIOCHIP_GPIO_I2C_DRIVE_LOW = 0x0,
+ DCIOCHIP_GPIO_I2C_DRIVE_HIGH = 0x1,
+} DCIOCHIP_GPIO_I2C_DRIVE;
+typedef enum DCIOCHIP_GPIO_I2C_EN {
+ DCIOCHIP_GPIO_I2C_DISABLE = 0x0,
+ DCIOCHIP_GPIO_I2C_ENABLE = 0x1,
+} DCIOCHIP_GPIO_I2C_EN;
+typedef enum DCIOCHIP_MASK_4BIT {
+ DCIOCHIP_MASK_4BIT_DISABLE = 0x0,
+ DCIOCHIP_MASK_4BIT_ENABLE = 0xf,
+} DCIOCHIP_MASK_4BIT;
+typedef enum DCIOCHIP_ENABLE_4BIT {
+ DCIOCHIP_4BIT_DISABLE = 0x0,
+ DCIOCHIP_4BIT_ENABLE = 0xf,
+} DCIOCHIP_ENABLE_4BIT;
+typedef enum DCIOCHIP_MASK_5BIT {
+ DCIOCHIP_MASIK_5BIT_DISABLE = 0x0,
+ DCIOCHIP_MASIK_5BIT_ENABLE = 0x1f,
+} DCIOCHIP_MASK_5BIT;
+typedef enum DCIOCHIP_ENABLE_5BIT {
+ DCIOCHIP_5BIT_DISABLE = 0x0,
+ DCIOCHIP_5BIT_ENABLE = 0x1f,
+} DCIOCHIP_ENABLE_5BIT;
+typedef enum DCIOCHIP_MASK_2BIT {
+ DCIOCHIP_MASK_2BIT_DISABLE = 0x0,
+ DCIOCHIP_MASK_2BIT_ENABLE = 0x3,
+} DCIOCHIP_MASK_2BIT;
+typedef enum DCIOCHIP_ENABLE_2BIT {
+ DCIOCHIP_2BIT_DISABLE = 0x0,
+ DCIOCHIP_2BIT_ENABLE = 0x3,
+} DCIOCHIP_ENABLE_2BIT;
+typedef enum DCIOCHIP_REF_27_SRC_SEL {
+ DCIOCHIP_REF_27_SRC_SEL_XTAL_DIVIDER = 0x0,
+ DCIOCHIP_REF_27_SRC_SEL_DISP_CLKIN2_DIVIDER = 0x1,
+ DCIOCHIP_REF_27_SRC_SEL_XTAL_BYPASS = 0x2,
+ DCIOCHIP_REF_27_SRC_SEL_DISP_CLKIN2_BYPASS = 0x3,
+} DCIOCHIP_REF_27_SRC_SEL;
+typedef enum DCIOCHIP_DVO_VREFPON {
+ DCIOCHIP_DVO_VREFPON_DISABLE = 0x0,
+ DCIOCHIP_DVO_VREFPON_ENABLE = 0x1,
+} DCIOCHIP_DVO_VREFPON;
+typedef enum DCIOCHIP_DVO_VREFSEL {
+ DCIOCHIP_DVO_VREFSEL_ONCHIP = 0x0,
+ DCIOCHIP_DVO_VREFSEL_EXTERNAL = 0x1,
+} DCIOCHIP_DVO_VREFSEL;
+typedef enum DCIOCHIP_SPDIF1_IMODE {
+ DCIOCHIP_SPDIF1_IMODE_OE_A = 0x0,
+ DCIOCHIP_SPDIF1_IMODE_TSTE_TSTO = 0x1,
+} DCIOCHIP_SPDIF1_IMODE;
+typedef enum DCIOCHIP_AUX_FALLSLEWSEL {
+ DCIOCHIP_AUX_FALLSLEWSEL_LOW = 0x0,
+ DCIOCHIP_AUX_FALLSLEWSEL_HIGH0 = 0x1,
+ DCIOCHIP_AUX_FALLSLEWSEL_HIGH1 = 0x2,
+ DCIOCHIP_AUX_FALLSLEWSEL_ULTRAHIGH = 0x3,
+} DCIOCHIP_AUX_FALLSLEWSEL;
+typedef enum DCIOCHIP_AUX_SPIKESEL {
+ DCIOCHIP_AUX_SPIKESEL_50NS = 0x0,
+ DCIOCHIP_AUX_SPIKESEL_10NS = 0x1,
+} DCIOCHIP_AUX_SPIKESEL;
+typedef enum DCIOCHIP_AUX_CSEL0P9 {
+ DCIOCHIP_AUX_CSEL_DEC1P0 = 0x0,
+ DCIOCHIP_AUX_CSEL_DEC0P9 = 0x1,
+} DCIOCHIP_AUX_CSEL0P9;
+typedef enum DCIOCHIP_AUX_CSEL1P1 {
+ DCIOCHIP_AUX_CSEL_INC1P0 = 0x0,
+ DCIOCHIP_AUX_CSEL_INC1P1 = 0x1,
+} DCIOCHIP_AUX_CSEL1P1;
+typedef enum DCIOCHIP_AUX_RSEL0P9 {
+ DCIOCHIP_AUX_RSEL_DEC1P0 = 0x0,
+ DCIOCHIP_AUX_RSEL_DEC0P9 = 0x1,
+} DCIOCHIP_AUX_RSEL0P9;
+typedef enum DCIOCHIP_AUX_RSEL1P1 {
+ DCIOCHIP_AUX_RSEL_INC1P0 = 0x0,
+ DCIOCHIP_AUX_RSEL_INC1P1 = 0x1,
+} DCIOCHIP_AUX_RSEL1P1;
+typedef enum DCP_GRPH_ENABLE {
+ DCP_GRPH_ENABLE_FALSE = 0x0,
+ DCP_GRPH_ENABLE_TRUE = 0x1,
+} DCP_GRPH_ENABLE;
+typedef enum DCP_GRPH_KEYER_ALPHA_SEL {
+ DCP_GRPH_KEYER_ALPHA_SEL_FALSE = 0x0,
+ DCP_GRPH_KEYER_ALPHA_SEL_TRUE = 0x1,
+} DCP_GRPH_KEYER_ALPHA_SEL;
+typedef enum DCP_GRPH_DEPTH {
+ DCP_GRPH_DEPTH_8BPP = 0x0,
+ DCP_GRPH_DEPTH_16BPP = 0x1,
+ DCP_GRPH_DEPTH_32BPP = 0x2,
+ DCP_GRPH_DEPTH_64BPP = 0x3,
+} DCP_GRPH_DEPTH;
+typedef enum DCP_GRPH_NUM_BANKS {
+ DCP_GRPH_NUM_BANKS_2BANK = 0x0,
+ DCP_GRPH_NUM_BANKS_4BANK = 0x1,
+ DCP_GRPH_NUM_BANKS_8BANK = 0x2,
+ DCP_GRPH_NUM_BANKS_16BANK = 0x3,
+} DCP_GRPH_NUM_BANKS;
+typedef enum DCP_GRPH_BANK_WIDTH {
+ DCP_GRPH_BANK_WIDTH_1 = 0x0,
+ DCP_GRPH_BANK_WIDTH_2 = 0x1,
+ DCP_GRPH_BANK_WIDTH_4 = 0x2,
+ DCP_GRPH_BANK_WIDTH_8 = 0x3,
+} DCP_GRPH_BANK_WIDTH;
+typedef enum DCP_GRPH_FORMAT {
+ DCP_GRPH_FORMAT_8BPP = 0x0,
+ DCP_GRPH_FORMAT_16BPP = 0x1,
+ DCP_GRPH_FORMAT_32BPP = 0x2,
+ DCP_GRPH_FORMAT_64BPP = 0x3,
+} DCP_GRPH_FORMAT;
+typedef enum DCP_GRPH_BANK_HEIGHT {
+ DCP_GRPH_BANK_HEIGHT_1 = 0x0,
+ DCP_GRPH_BANK_HEIGHT_2 = 0x1,
+ DCP_GRPH_BANK_HEIGHT_4 = 0x2,
+ DCP_GRPH_BANK_HEIGHT_8 = 0x3,
+} DCP_GRPH_BANK_HEIGHT;
+typedef enum DCP_GRPH_TILE_SPLIT {
+ DCP_GRPH_TILE_SPLIT_64B = 0x0,
+ DCP_GRPH_TILE_SPLIT_128B = 0x1,
+ DCP_GRPH_TILE_SPLIT_256B = 0x2,
+ DCP_GRPH_TILE_SPLIT_512B = 0x3,
+ DCP_GRPH_TILE_SPLIT_1B = 0x4,
+ DCP_GRPH_TILE_SPLIT_2B = 0x5,
+ DCP_GRPH_TILE_SPLIT_4B = 0x6,
+} DCP_GRPH_TILE_SPLIT;
+typedef enum DCP_GRPH_ADDRESS_TRANSLATION_ENABLE {
+ DCP_GRPH_ADDRESS_TRANSLATION_ENABLE_FALSE = 0x0,
+ DCP_GRPH_ADDRESS_TRANSLATION_ENABLE_TRUE = 0x1,
+} DCP_GRPH_ADDRESS_TRANSLATION_ENABLE;
+typedef enum DCP_GRPH_PRIVILEGED_ACCESS_ENABLE {
+ DCP_GRPH_PRIVILEGED_ACCESS_ENABLE_FALSE = 0x0,
+ DCP_GRPH_PRIVILEGED_ACCESS_ENABLE_TRUE = 0x1,
+} DCP_GRPH_PRIVILEGED_ACCESS_ENABLE;
+typedef enum DCP_GRPH_MACRO_TILE_ASPECT {
+ DCP_GRPH_MACRO_TILE_ASPECT_1 = 0x0,
+ DCP_GRPH_MACRO_TILE_ASPECT_2 = 0x1,
+ DCP_GRPH_MACRO_TILE_ASPECT_4 = 0x2,
+ DCP_GRPH_MACRO_TILE_ASPECT_8 = 0x3,
+} DCP_GRPH_MACRO_TILE_ASPECT;
+typedef enum DCP_GRPH_ARRAY_MODE {
+ DCP_GRPH_ARRAY_MODE_0 = 0x0,
+ DCP_GRPH_ARRAY_MODE_1 = 0x1,
+ DCP_GRPH_ARRAY_MODE_2 = 0x2,
+ DCP_GRPH_ARRAY_MODE_3 = 0x3,
+ DCP_GRPH_ARRAY_MODE_4 = 0x4,
+ DCP_GRPH_ARRAY_MODE_7 = 0x7,
+ DCP_GRPH_ARRAY_MODE_12 = 0xc,
+ DCP_GRPH_ARRAY_MODE_13 = 0xd,
+} DCP_GRPH_ARRAY_MODE;
+typedef enum DCP_GRPH_MICRO_TILE_MODE {
+ DCP_GRPH_MICRO_TILE_MODE_0 = 0x0,
+ DCP_GRPH_MICRO_TILE_MODE_1 = 0x1,
+ DCP_GRPH_MICRO_TILE_MODE_2 = 0x2,
+ DCP_GRPH_MICRO_TILE_MODE_3 = 0x3,
+} DCP_GRPH_MICRO_TILE_MODE;
+typedef enum DCP_GRPH_COLOR_EXPANSION_MODE {
+ DCP_GRPH_COLOR_EXPANSION_MODE_DEXP = 0x0,
+ DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP = 0x1,
+} DCP_GRPH_COLOR_EXPANSION_MODE;
+typedef enum DCP_GRPH_LUT_10BIT_BYPASS_EN {
+ DCP_GRPH_LUT_10BIT_BYPASS_EN_FALSE = 0x0,
+ DCP_GRPH_LUT_10BIT_BYPASS_EN_TRUE = 0x1,
+} DCP_GRPH_LUT_10BIT_BYPASS_EN;
+typedef enum DCP_GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN {
+ DCP_GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN_FALSE = 0x0,
+ DCP_GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN_TRUE = 0x1,
+} DCP_GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN;
+typedef enum DCP_GRPH_ENDIAN_SWAP {
+ DCP_GRPH_ENDIAN_SWAP_NONE = 0x0,
+ DCP_GRPH_ENDIAN_SWAP_8IN16 = 0x1,
+ DCP_GRPH_ENDIAN_SWAP_8IN32 = 0x2,
+ DCP_GRPH_ENDIAN_SWAP_8IN64 = 0x3,
+} DCP_GRPH_ENDIAN_SWAP;
+typedef enum DCP_GRPH_RED_CROSSBAR {
+ DCP_GRPH_RED_CROSSBAR_FROM_R = 0x0,
+ DCP_GRPH_RED_CROSSBAR_FROM_G = 0x1,
+ DCP_GRPH_RED_CROSSBAR_FROM_B = 0x2,
+ DCP_GRPH_RED_CROSSBAR_FROM_A = 0x3,
+} DCP_GRPH_RED_CROSSBAR;
+typedef enum DCP_GRPH_GREEN_CROSSBAR {
+ DCP_GRPH_GREEN_CROSSBAR_FROM_G = 0x0,
+ DCP_GRPH_GREEN_CROSSBAR_FROM_B = 0x1,
+ DCP_GRPH_GREEN_CROSSBAR_FROM_A = 0x2,
+ DCP_GRPH_GREEN_CROSSBAR_FROM_R = 0x3,
+} DCP_GRPH_GREEN_CROSSBAR;
+typedef enum DCP_GRPH_BLUE_CROSSBAR {
+ DCP_GRPH_BLUE_CROSSBAR_FROM_B = 0x0,
+ DCP_GRPH_BLUE_CROSSBAR_FROM_A = 0x1,
+ DCP_GRPH_BLUE_CROSSBAR_FROM_R = 0x2,
+ DCP_GRPH_BLUE_CROSSBAR_FROM_G = 0x3,
+} DCP_GRPH_BLUE_CROSSBAR;
+typedef enum DCP_GRPH_ALPHA_CROSSBAR {
+ DCP_GRPH_ALPHA_CROSSBAR_FROM_A = 0x0,
+ DCP_GRPH_ALPHA_CROSSBAR_FROM_R = 0x1,
+ DCP_GRPH_ALPHA_CROSSBAR_FROM_G = 0x2,
+ DCP_GRPH_ALPHA_CROSSBAR_FROM_B = 0x3,
+} DCP_GRPH_ALPHA_CROSSBAR;
+typedef enum DCP_GRPH_PRIMARY_DFQ_ENABLE {
+ DCP_GRPH_PRIMARY_DFQ_ENABLE_FALSE = 0x0,
+ DCP_GRPH_PRIMARY_DFQ_ENABLE_TRUE = 0x1,
+} DCP_GRPH_PRIMARY_DFQ_ENABLE;
+typedef enum DCP_GRPH_SECONDARY_DFQ_ENABLE {
+ DCP_GRPH_SECONDARY_DFQ_ENABLE_FALSE = 0x0,
+ DCP_GRPH_SECONDARY_DFQ_ENABLE_TRUE = 0x1,
+} DCP_GRPH_SECONDARY_DFQ_ENABLE;
+typedef enum DCP_GRPH_INPUT_GAMMA_MODE {
+ DCP_GRPH_INPUT_GAMMA_MODE_LUT = 0x0,
+ DCP_GRPH_INPUT_GAMMA_MODE_BYPASS = 0x1,
+} DCP_GRPH_INPUT_GAMMA_MODE;
+typedef enum DCP_GRPH_MODE_UPDATE_PENDING {
+ DCP_GRPH_MODE_UPDATE_PENDING_FALSE = 0x0,
+ DCP_GRPH_MODE_UPDATE_PENDING_TRUE = 0x1,
+} DCP_GRPH_MODE_UPDATE_PENDING;
+typedef enum DCP_GRPH_MODE_UPDATE_TAKEN {
+ DCP_GRPH_MODE_UPDATE_TAKEN_FALSE = 0x0,
+ DCP_GRPH_MODE_UPDATE_TAKEN_TRUE = 0x1,
+} DCP_GRPH_MODE_UPDATE_TAKEN;
+typedef enum DCP_GRPH_SURFACE_UPDATE_PENDING {
+ DCP_GRPH_SURFACE_UPDATE_PENDING_FALSE = 0x0,
+ DCP_GRPH_SURFACE_UPDATE_PENDING_TRUE = 0x1,
+} DCP_GRPH_SURFACE_UPDATE_PENDING;
+typedef enum DCP_GRPH_SURFACE_UPDATE_TAKEN {
+ DCP_GRPH_SURFACE_UPDATE_TAKEN_FALSE = 0x0,
+ DCP_GRPH_SURFACE_UPDATE_TAKEN_TRUE = 0x1,
+} DCP_GRPH_SURFACE_UPDATE_TAKEN;
+typedef enum DCP_GRPH_SURFACE_XDMA_PENDING_ENABLE {
+ DCP_GRPH_SURFACE_XDMA_PENDING_ENABLE_FALSE = 0x0,
+ DCP_GRPH_SURFACE_XDMA_PENDING_ENABLE_TRUE = 0x1,
+} DCP_GRPH_SURFACE_XDMA_PENDING_ENABLE;
+typedef enum DCP_GRPH_UPDATE_LOCK {
+ DCP_GRPH_UPDATE_LOCK_FALSE = 0x0,
+ DCP_GRPH_UPDATE_LOCK_TRUE = 0x1,
+} DCP_GRPH_UPDATE_LOCK;
+typedef enum DCP_GRPH_SURFACE_IGNORE_UPDATE_LOCK {
+ DCP_GRPH_SURFACE_IGNORE_UPDATE_LOCK_FALSE = 0x0,
+ DCP_GRPH_SURFACE_IGNORE_UPDATE_LOCK_TRUE = 0x1,
+} DCP_GRPH_SURFACE_IGNORE_UPDATE_LOCK;
+typedef enum DCP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE {
+ DCP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE_FALSE = 0x0,
+ DCP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE_TRUE = 0x1,
+} DCP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE;
+typedef enum DCP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE {
+ DCP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_FALSE = 0x0,
+ DCP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_TRUE = 0x1,
+} DCP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE;
+typedef enum DCP_GRPH_SURFACE_UPDATE_H_RETRACE_EN {
+ DCP_GRPH_SURFACE_UPDATE_H_RETRACE_EN_FALSE = 0x0,
+ DCP_GRPH_SURFACE_UPDATE_H_RETRACE_EN_TRUE = 0x1,
+} DCP_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+typedef enum DCP_GRPH_XDMA_SUPER_AA_EN {
+ DCP_GRPH_XDMA_SUPER_AA_EN_FALSE = 0x0,
+ DCP_GRPH_XDMA_SUPER_AA_EN_TRUE = 0x1,
+} DCP_GRPH_XDMA_SUPER_AA_EN;
+typedef enum DCP_GRPH_DFQ_RESET {
+ DCP_GRPH_DFQ_RESET_FALSE = 0x0,
+ DCP_GRPH_DFQ_RESET_TRUE = 0x1,
+} DCP_GRPH_DFQ_RESET;
+typedef enum DCP_GRPH_DFQ_SIZE {
+ DCP_GRPH_DFQ_SIZE_DEEP1 = 0x0,
+ DCP_GRPH_DFQ_SIZE_DEEP2 = 0x1,
+ DCP_GRPH_DFQ_SIZE_DEEP3 = 0x2,
+ DCP_GRPH_DFQ_SIZE_DEEP4 = 0x3,
+ DCP_GRPH_DFQ_SIZE_DEEP5 = 0x4,
+ DCP_GRPH_DFQ_SIZE_DEEP6 = 0x5,
+ DCP_GRPH_DFQ_SIZE_DEEP7 = 0x6,
+ DCP_GRPH_DFQ_SIZE_DEEP8 = 0x7,
+} DCP_GRPH_DFQ_SIZE;
+typedef enum DCP_GRPH_DFQ_MIN_FREE_ENTRIES {
+ DCP_GRPH_DFQ_MIN_FREE_ENTRIES_1 = 0x0,
+ DCP_GRPH_DFQ_MIN_FREE_ENTRIES_2 = 0x1,
+ DCP_GRPH_DFQ_MIN_FREE_ENTRIES_3 = 0x2,
+ DCP_GRPH_DFQ_MIN_FREE_ENTRIES_4 = 0x3,
+ DCP_GRPH_DFQ_MIN_FREE_ENTRIES_5 = 0x4,
+ DCP_GRPH_DFQ_MIN_FREE_ENTRIES_6 = 0x5,
+ DCP_GRPH_DFQ_MIN_FREE_ENTRIES_7 = 0x6,
+ DCP_GRPH_DFQ_MIN_FREE_ENTRIES_8 = 0x7,
+} DCP_GRPH_DFQ_MIN_FREE_ENTRIES;
+typedef enum DCP_GRPH_DFQ_RESET_ACK {
+ DCP_GRPH_DFQ_RESET_ACK_FALSE = 0x0,
+ DCP_GRPH_DFQ_RESET_ACK_TRUE = 0x1,
+} DCP_GRPH_DFQ_RESET_ACK;
+typedef enum DCP_GRPH_PFLIP_INT_CLEAR {
+ DCP_GRPH_PFLIP_INT_CLEAR_FALSE = 0x0,
+ DCP_GRPH_PFLIP_INT_CLEAR_TRUE = 0x1,
+} DCP_GRPH_PFLIP_INT_CLEAR;
+typedef enum DCP_GRPH_PFLIP_INT_MASK {
+ DCP_GRPH_PFLIP_INT_MASK_FALSE = 0x0,
+ DCP_GRPH_PFLIP_INT_MASK_TRUE = 0x1,
+} DCP_GRPH_PFLIP_INT_MASK;
+typedef enum DCP_GRPH_PFLIP_INT_TYPE {
+ DCP_GRPH_PFLIP_INT_TYPE_LEGACY_LEVEL = 0x0,
+ DCP_GRPH_PFLIP_INT_TYPE_PULSE = 0x1,
+} DCP_GRPH_PFLIP_INT_TYPE;
+typedef enum DCP_GRPH_PRESCALE_SELECT {
+ DCP_GRPH_PRESCALE_SELECT_FIXED = 0x0,
+ DCP_GRPH_PRESCALE_SELECT_FLOATING = 0x1,
+} DCP_GRPH_PRESCALE_SELECT;
+typedef enum DCP_GRPH_PRESCALE_R_SIGN {
+ DCP_GRPH_PRESCALE_R_SIGN_UNSIGNED = 0x0,
+ DCP_GRPH_PRESCALE_R_SIGN_SIGNED = 0x1,
+} DCP_GRPH_PRESCALE_R_SIGN;
+typedef enum DCP_GRPH_PRESCALE_G_SIGN {
+ DCP_GRPH_PRESCALE_G_SIGN_UNSIGNED = 0x0,
+ DCP_GRPH_PRESCALE_G_SIGN_SIGNED = 0x1,
+} DCP_GRPH_PRESCALE_G_SIGN;
+typedef enum DCP_GRPH_PRESCALE_B_SIGN {
+ DCP_GRPH_PRESCALE_B_SIGN_UNSIGNED = 0x0,
+ DCP_GRPH_PRESCALE_B_SIGN_SIGNED = 0x1,
+} DCP_GRPH_PRESCALE_B_SIGN;
+typedef enum DCP_GRPH_PRESCALE_BYPASS {
+ DCP_GRPH_PRESCALE_BYPASS_FALSE = 0x0,
+ DCP_GRPH_PRESCALE_BYPASS_TRUE = 0x1,
+} DCP_GRPH_PRESCALE_BYPASS;
+typedef enum DCP_INPUT_CSC_GRPH_MODE {
+ DCP_INPUT_CSC_GRPH_MODE_BYPASS = 0x0,
+ DCP_INPUT_CSC_GRPH_MODE_INPUT_CSC_COEF = 0x1,
+ DCP_INPUT_CSC_GRPH_MODE_SHARED_COEF = 0x2,
+ DCP_INPUT_CSC_GRPH_MODE_RESERVED = 0x3,
+} DCP_INPUT_CSC_GRPH_MODE;
+typedef enum DCP_OUTPUT_CSC_GRPH_MODE {
+ DCP_OUTPUT_CSC_GRPH_MODE_BYPASS = 0x0,
+ DCP_OUTPUT_CSC_GRPH_MODE_RGB = 0x1,
+ DCP_OUTPUT_CSC_GRPH_MODE_YCBCR601 = 0x2,
+ DCP_OUTPUT_CSC_GRPH_MODE_YCBCR709 = 0x3,
+ DCP_OUTPUT_CSC_GRPH_MODE_OUTPUT_CSC_COEF = 0x4,
+ DCP_OUTPUT_CSC_GRPH_MODE_SHARED_COEF = 0x5,
+ DCP_OUTPUT_CSC_GRPH_MODE_RESERVED0 = 0x6,
+ DCP_OUTPUT_CSC_GRPH_MODE_RESERVED1 = 0x7,
+} DCP_OUTPUT_CSC_GRPH_MODE;
+typedef enum DCP_DENORM_MODE {
+ DCP_DENORM_MODE_UNITY = 0x0,
+ DCP_DENORM_MODE_6BIT = 0x1,
+ DCP_DENORM_MODE_8BIT = 0x2,
+ DCP_DENORM_MODE_10BIT = 0x3,
+ DCP_DENORM_MODE_11BIT = 0x4,
+ DCP_DENORM_MODE_12BIT = 0x5,
+ DCP_DENORM_MODE_RESERVED0 = 0x6,
+ DCP_DENORM_MODE_RESERVED1 = 0x7,
+} DCP_DENORM_MODE;
+typedef enum DCP_DENORM_14BIT_OUT {
+ DCP_DENORM_14BIT_OUT_FALSE = 0x0,
+ DCP_DENORM_14BIT_OUT_TRUE = 0x1,
+} DCP_DENORM_14BIT_OUT;
+typedef enum DCP_OUT_ROUND_TRUNC_MODE {
+ DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_12 = 0x0,
+ DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_11 = 0x1,
+ DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_10 = 0x2,
+ DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_9 = 0x3,
+ DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_8 = 0x4,
+ DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_RESERVED = 0x5,
+ DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_14 = 0x6,
+ DCP_OUT_ROUND_TRUNC_MODE_TRUNCATE_13 = 0x7,
+ DCP_OUT_ROUND_TRUNC_MODE_ROUND_12 = 0x8,
+ DCP_OUT_ROUND_TRUNC_MODE_ROUND_11 = 0x9,
+ DCP_OUT_ROUND_TRUNC_MODE_ROUND_10 = 0xa,
+ DCP_OUT_ROUND_TRUNC_MODE_ROUND_9 = 0xb,
+ DCP_OUT_ROUND_TRUNC_MODE_ROUND_8 = 0xc,
+ DCP_OUT_ROUND_TRUNC_MODE_ROUND_RESERVED = 0xd,
+ DCP_OUT_ROUND_TRUNC_MODE_ROUND_14 = 0xe,
+ DCP_OUT_ROUND_TRUNC_MODE_ROUND_13 = 0xf,
+} DCP_OUT_ROUND_TRUNC_MODE;
+typedef enum DCP_KEY_MODE {
+ DCP_KEY_MODE_ALPHA0 = 0x0,
+ DCP_KEY_MODE_ALPHA1 = 0x1,
+ DCP_KEY_MODE_IN_RANGE_ALPHA1 = 0x2,
+ DCP_KEY_MODE_IN_RANGE_ALPHA0 = 0x3,
+} DCP_KEY_MODE;
+typedef enum DCP_GRPH_DEGAMMA_MODE {
+ DCP_GRPH_DEGAMMA_MODE_BYPASS = 0x0,
+ DCP_GRPH_DEGAMMA_MODE_ROMA = 0x1,
+ DCP_GRPH_DEGAMMA_MODE_ROMB = 0x2,
+ DCP_GRPH_DEGAMMA_MODE_RESERVED = 0x3,
+} DCP_GRPH_DEGAMMA_MODE;
+typedef enum DCP_CURSOR2_DEGAMMA_MODE {
+ DCP_CURSOR2_DEGAMMA_MODE_BYPASS = 0x0,
+ DCP_CURSOR2_DEGAMMA_MODE_ROMA = 0x1,
+ DCP_CURSOR2_DEGAMMA_MODE_ROMB = 0x2,
+ DCP_CURSOR2_DEGAMMA_MODE_RESERVED = 0x3,
+} DCP_CURSOR2_DEGAMMA_MODE;
+typedef enum DCP_CURSOR_DEGAMMA_MODE {
+ DCP_CURSOR_DEGAMMA_MODE_BYPASS = 0x0,
+ DCP_CURSOR_DEGAMMA_MODE_ROMA = 0x1,
+ DCP_CURSOR_DEGAMMA_MODE_ROMB = 0x2,
+ DCP_CURSOR_DEGAMMA_MODE_RESERVED = 0x3,
+} DCP_CURSOR_DEGAMMA_MODE;
+typedef enum DCP_GRPH_GAMUT_REMAP_MODE {
+ DCP_GRPH_GAMUT_REMAP_MODE_BYPASS = 0x0,
+ DCP_GRPH_GAMUT_REMAP_MODE_ROMA = 0x1,
+ DCP_GRPH_GAMUT_REMAP_MODE_ROMB = 0x2,
+ DCP_GRPH_GAMUT_REMAP_MODE_RESERVED = 0x3,
+} DCP_GRPH_GAMUT_REMAP_MODE;
+typedef enum DCP_SPATIAL_DITHER_EN {
+ DCP_SPATIAL_DITHER_EN_FALSE = 0x0,
+ DCP_SPATIAL_DITHER_EN_TRUE = 0x1,
+} DCP_SPATIAL_DITHER_EN;
+typedef enum DCP_SPATIAL_DITHER_MODE {
+ DCP_SPATIAL_DITHER_MODE_BYPASS = 0x0,
+ DCP_SPATIAL_DITHER_MODE_ROMA = 0x1,
+ DCP_SPATIAL_DITHER_MODE_ROMB = 0x2,
+ DCP_SPATIAL_DITHER_MODE_RESERVED = 0x3,
+} DCP_SPATIAL_DITHER_MODE;
+typedef enum DCP_SPATIAL_DITHER_DEPTH {
+ DCP_SPATIAL_DITHER_DEPTH_30BPP = 0x0,
+ DCP_SPATIAL_DITHER_DEPTH_24BPP = 0x1,
+ DCP_SPATIAL_DITHER_DEPTH_36BPP = 0x2,
+ DCP_SPATIAL_DITHER_DEPTH_UNDEFINED = 0x3,
+} DCP_SPATIAL_DITHER_DEPTH;
+typedef enum DCP_FRAME_RANDOM_ENABLE {
+ DCP_FRAME_RANDOM_ENABLE_FALSE = 0x0,
+ DCP_FRAME_RANDOM_ENABLE_TRUE = 0x1,
+} DCP_FRAME_RANDOM_ENABLE;
+typedef enum DCP_RGB_RANDOM_ENABLE {
+ DCP_RGB_RANDOM_ENABLE_FALSE = 0x0,
+ DCP_RGB_RANDOM_ENABLE_TRUE = 0x1,
+} DCP_RGB_RANDOM_ENABLE;
+typedef enum DCP_HIGHPASS_RANDOM_ENABLE {
+ DCP_HIGHPASS_RANDOM_ENABLE_FALSE = 0x0,
+ DCP_HIGHPASS_RANDOM_ENABLE_TRUE = 0x1,
+} DCP_HIGHPASS_RANDOM_ENABLE;
+typedef enum DCP_CURSOR_EN {
+ DCP_CURSOR_EN_FALSE = 0x0,
+ DCP_CURSOR_EN_TRUE = 0x1,
+} DCP_CURSOR_EN;
+typedef enum DCP_CUR_INV_TRANS_CLAMP {
+ DCP_CUR_INV_TRANS_CLAMP_FALSE = 0x0,
+ DCP_CUR_INV_TRANS_CLAMP_TRUE = 0x1,
+} DCP_CUR_INV_TRANS_CLAMP;
+typedef enum DCP_CURSOR_MODE {
+ DCP_CURSOR_MODE_MONO_2BPP = 0x0,
+ DCP_CURSOR_MODE_24BPP_1BIT = 0x1,
+ DCP_CURSOR_MODE_24BPP_8BIT_PREMULTI = 0x2,
+ DCP_CURSOR_MODE_24BPP_8BIT_UNPREMULTI = 0x3,
+} DCP_CURSOR_MODE;
+typedef enum DCP_CURSOR_2X_MAGNIFY {
+ DCP_CURSOR_2X_MAGNIFY_FALSE = 0x0,
+ DCP_CURSOR_2X_MAGNIFY_TRUE = 0x1,
+} DCP_CURSOR_2X_MAGNIFY;
+typedef enum DCP_CURSOR_FORCE_MC_ON {
+ DCP_CURSOR_FORCE_MC_ON_FALSE = 0x0,
+ DCP_CURSOR_FORCE_MC_ON_TRUE = 0x1,
+} DCP_CURSOR_FORCE_MC_ON;
+typedef enum DCP_CURSOR_URGENT_CONTROL {
+ DCP_CURSOR_URGENT_CONTROL_MODE_0 = 0x0,
+ DCP_CURSOR_URGENT_CONTROL_MODE_1 = 0x1,
+ DCP_CURSOR_URGENT_CONTROL_MODE_2 = 0x2,
+ DCP_CURSOR_URGENT_CONTROL_MODE_3 = 0x3,
+ DCP_CURSOR_URGENT_CONTROL_MODE_4 = 0x4,
+} DCP_CURSOR_URGENT_CONTROL;
+typedef enum DCP_CURSOR_UPDATE_PENDING {
+ DCP_CURSOR_UPDATE_PENDING_FALSE = 0x0,
+ DCP_CURSOR_UPDATE_PENDING_TRUE = 0x1,
+} DCP_CURSOR_UPDATE_PENDING;
+typedef enum DCP_CURSOR_UPDATE_TAKEN {
+ DCP_CURSOR_UPDATE_TAKEN_FALSE = 0x0,
+ DCP_CURSOR_UPDATE_TAKEN_TRUE = 0x1,
+} DCP_CURSOR_UPDATE_TAKEN;
+typedef enum DCP_CURSOR_UPDATE_LOCK {
+ DCP_CURSOR_UPDATE_LOCK_FALSE = 0x0,
+ DCP_CURSOR_UPDATE_LOCK_TRUE = 0x1,
+} DCP_CURSOR_UPDATE_LOCK;
+typedef enum DCP_CURSOR_DISABLE_MULTIPLE_UPDATE {
+ DCP_CURSOR_DISABLE_MULTIPLE_UPDATE_FALSE = 0x0,
+ DCP_CURSOR_DISABLE_MULTIPLE_UPDATE_TRUE = 0x1,
+} DCP_CURSOR_DISABLE_MULTIPLE_UPDATE;
+typedef enum DCP_CURSOR_UPDATE_STEREO_MODE {
+ DCP_CURSOR_UPDATE_STEREO_MODE_BOTH = 0x0,
+ DCP_CURSOR_UPDATE_STEREO_MODE_SECONDARY_ONLY = 0x1,
+ DCP_CURSOR_UPDATE_STEREO_MODE_UNDEFINED = 0x2,
+ DCP_CURSOR_UPDATE_STEREO_MODE_PRIMARY_ONLY = 0x3,
+} DCP_CURSOR_UPDATE_STEREO_MODE;
+typedef enum DCP_CURSOR2_EN {
+ DCP_CURSOR2_EN_FALSE = 0x0,
+ DCP_CURSOR2_EN_TRUE = 0x1,
+} DCP_CURSOR2_EN;
+typedef enum DCP_CUR2_INV_TRANS_CLAMP {
+ DCP_CUR2_INV_TRANS_CLAMP_FALSE = 0x0,
+ DCP_CUR2_INV_TRANS_CLAMP_TRUE = 0x1,
+} DCP_CUR2_INV_TRANS_CLAMP;
+typedef enum DCP_CURSOR2_MODE {
+ DCP_CURSOR2_MODE_MONO_2BPP = 0x0,
+ DCP_CURSOR2_MODE_24BPP_1BIT = 0x1,
+ DCP_CURSOR2_MODE_24BPP_8BIT_PREMULTI = 0x2,
+ DCP_CURSOR2_MODE_24BPP_8BIT_UNPREMULTI = 0x3,
+} DCP_CURSOR2_MODE;
+typedef enum DCP_CURSOR2_2X_MAGNIFY {
+ DCP_CURSOR2_2X_MAGNIFY_FALSE = 0x0,
+ DCP_CURSOR2_2X_MAGNIFY_TRUE = 0x1,
+} DCP_CURSOR2_2X_MAGNIFY;
+typedef enum DCP_CURSOR2_FORCE_MC_ON {
+ DCP_CURSOR2_FORCE_MC_ON_FALSE = 0x0,
+ DCP_CURSOR2_FORCE_MC_ON_TRUE = 0x1,
+} DCP_CURSOR2_FORCE_MC_ON;
+typedef enum DCP_CURSOR2_URGENT_CONTROL {
+ DCP_CURSOR2_URGENT_CONTROL_MODE_0 = 0x0,
+ DCP_CURSOR2_URGENT_CONTROL_MODE_1 = 0x1,
+ DCP_CURSOR2_URGENT_CONTROL_MODE_2 = 0x2,
+ DCP_CURSOR2_URGENT_CONTROL_MODE_3 = 0x3,
+ DCP_CURSOR2_URGENT_CONTROL_MODE_4 = 0x4,
+} DCP_CURSOR2_URGENT_CONTROL;
+typedef enum DCP_CURSOR2_UPDATE_PENDING {
+ DCP_CURSOR2_UPDATE_PENDING_FALSE = 0x0,
+ DCP_CURSOR2_UPDATE_PENDING_TRUE = 0x1,
+} DCP_CURSOR2_UPDATE_PENDING;
+typedef enum DCP_CURSOR2_UPDATE_TAKEN {
+ DCP_CURSOR2_UPDATE_TAKEN_FALSE = 0x0,
+ DCP_CURSOR2_UPDATE_TAKEN_TRUE = 0x1,
+} DCP_CURSOR2_UPDATE_TAKEN;
+typedef enum DCP_CURSOR2_UPDATE_LOCK {
+ DCP_CURSOR2_UPDATE_LOCK_FALSE = 0x0,
+ DCP_CURSOR2_UPDATE_LOCK_TRUE = 0x1,
+} DCP_CURSOR2_UPDATE_LOCK;
+typedef enum DCP_CURSOR2_DISABLE_MULTIPLE_UPDATE {
+ DCP_CURSOR2_DISABLE_MULTIPLE_UPDATE_FALSE = 0x0,
+ DCP_CURSOR2_DISABLE_MULTIPLE_UPDATE_TRUE = 0x1,
+} DCP_CURSOR2_DISABLE_MULTIPLE_UPDATE;
+typedef enum DCP_CURSOR2_UPDATE_STEREO_MODE {
+ DCP_CURSOR2_UPDATE_STEREO_MODE_BOTH = 0x0,
+ DCP_CURSOR2_UPDATE_STEREO_MODE_SECONDARY_ONLY = 0x1,
+ DCP_CURSOR2_UPDATE_STEREO_MODE_UNDEFINED = 0x2,
+ DCP_CURSOR2_UPDATE_STEREO_MODE_PRIMARY_ONLY = 0x3,
+} DCP_CURSOR2_UPDATE_STEREO_MODE;
+typedef enum DCP_CUR_REQUEST_FILTER_DIS {
+ DCP_CUR_REQUEST_FILTER_DIS_FALSE = 0x0,
+ DCP_CUR_REQUEST_FILTER_DIS_TRUE = 0x1,
+} DCP_CUR_REQUEST_FILTER_DIS;
+typedef enum DCP_CURSOR_STEREO_EN {
+ DCP_CURSOR_STEREO_EN_FALSE = 0x0,
+ DCP_CURSOR_STEREO_EN_TRUE = 0x1,
+} DCP_CURSOR_STEREO_EN;
+typedef enum DCP_CURSOR_STEREO_OFFSET_YNX {
+ DCP_CURSOR_STEREO_OFFSET_YNX_X_POSITION = 0x0,
+ DCP_CURSOR_STEREO_OFFSET_YNX_Y_POSITION = 0x1,
+} DCP_CURSOR_STEREO_OFFSET_YNX;
+typedef enum DCP_CURSOR2_STEREO_EN {
+ DCP_CURSOR2_STEREO_EN_FALSE = 0x0,
+ DCP_CURSOR2_STEREO_EN_TRUE = 0x1,
+} DCP_CURSOR2_STEREO_EN;
+typedef enum DCP_CURSOR2_STEREO_OFFSET_YNX {
+ DCP_CURSOR2_STEREO_OFFSET_YNX_X_POSITION = 0x0,
+ DCP_CURSOR2_STEREO_OFFSET_YNX_Y_POSITION = 0x1,
+} DCP_CURSOR2_STEREO_OFFSET_YNX;
+typedef enum DCP_DC_LUT_RW_MODE {
+ DCP_DC_LUT_RW_MODE_256_ENTRY = 0x0,
+ DCP_DC_LUT_RW_MODE_PWL = 0x1,
+} DCP_DC_LUT_RW_MODE;
+typedef enum DCP_DC_LUT_VGA_ACCESS_ENABLE {
+ DCP_DC_LUT_VGA_ACCESS_ENABLE_FALSE = 0x0,
+ DCP_DC_LUT_VGA_ACCESS_ENABLE_TRUE = 0x1,
+} DCP_DC_LUT_VGA_ACCESS_ENABLE;
+typedef enum DCP_DC_LUT_AUTOFILL {
+ DCP_DC_LUT_AUTOFILL_FALSE = 0x0,
+ DCP_DC_LUT_AUTOFILL_TRUE = 0x1,
+} DCP_DC_LUT_AUTOFILL;
+typedef enum DCP_DC_LUT_AUTOFILL_DONE {
+ DCP_DC_LUT_AUTOFILL_DONE_FALSE = 0x0,
+ DCP_DC_LUT_AUTOFILL_DONE_TRUE = 0x1,
+} DCP_DC_LUT_AUTOFILL_DONE;
+typedef enum DCP_DC_LUT_INC_B {
+ DCP_DC_LUT_INC_B_NA = 0x0,
+ DCP_DC_LUT_INC_B_2 = 0x1,
+ DCP_DC_LUT_INC_B_4 = 0x2,
+ DCP_DC_LUT_INC_B_8 = 0x3,
+ DCP_DC_LUT_INC_B_16 = 0x4,
+ DCP_DC_LUT_INC_B_32 = 0x5,
+ DCP_DC_LUT_INC_B_64 = 0x6,
+ DCP_DC_LUT_INC_B_128 = 0x7,
+ DCP_DC_LUT_INC_B_256 = 0x8,
+ DCP_DC_LUT_INC_B_512 = 0x9,
+} DCP_DC_LUT_INC_B;
+typedef enum DCP_DC_LUT_DATA_B_SIGNED_EN {
+ DCP_DC_LUT_DATA_B_SIGNED_EN_FALSE = 0x0,
+ DCP_DC_LUT_DATA_B_SIGNED_EN_TRUE = 0x1,
+} DCP_DC_LUT_DATA_B_SIGNED_EN;
+typedef enum DCP_DC_LUT_DATA_B_FLOAT_POINT_EN {
+ DCP_DC_LUT_DATA_B_FLOAT_POINT_EN_FALSE = 0x0,
+ DCP_DC_LUT_DATA_B_FLOAT_POINT_EN_TRUE = 0x1,
+} DCP_DC_LUT_DATA_B_FLOAT_POINT_EN;
+typedef enum DCP_DC_LUT_DATA_B_FORMAT {
+ DCP_DC_LUT_DATA_B_FORMAT_U0P10 = 0x0,
+ DCP_DC_LUT_DATA_B_FORMAT_S1P10 = 0x1,
+ DCP_DC_LUT_DATA_B_FORMAT_U1P11 = 0x2,
+ DCP_DC_LUT_DATA_B_FORMAT_U0P12 = 0x3,
+} DCP_DC_LUT_DATA_B_FORMAT;
+typedef enum DCP_DC_LUT_INC_G {
+ DCP_DC_LUT_INC_G_NA = 0x0,
+ DCP_DC_LUT_INC_G_2 = 0x1,
+ DCP_DC_LUT_INC_G_4 = 0x2,
+ DCP_DC_LUT_INC_G_8 = 0x3,
+ DCP_DC_LUT_INC_G_16 = 0x4,
+ DCP_DC_LUT_INC_G_32 = 0x5,
+ DCP_DC_LUT_INC_G_64 = 0x6,
+ DCP_DC_LUT_INC_G_128 = 0x7,
+ DCP_DC_LUT_INC_G_256 = 0x8,
+ DCP_DC_LUT_INC_G_512 = 0x9,
+} DCP_DC_LUT_INC_G;
+typedef enum DCP_DC_LUT_DATA_G_SIGNED_EN {
+ DCP_DC_LUT_DATA_G_SIGNED_EN_FALSE = 0x0,
+ DCP_DC_LUT_DATA_G_SIGNED_EN_TRUE = 0x1,
+} DCP_DC_LUT_DATA_G_SIGNED_EN;
+typedef enum DCP_DC_LUT_DATA_G_FLOAT_POINT_EN {
+ DCP_DC_LUT_DATA_G_FLOAT_POINT_EN_FALSE = 0x0,
+ DCP_DC_LUT_DATA_G_FLOAT_POINT_EN_TRUE = 0x1,
+} DCP_DC_LUT_DATA_G_FLOAT_POINT_EN;
+typedef enum DCP_DC_LUT_DATA_G_FORMAT {
+ DCP_DC_LUT_DATA_G_FORMAT_U0P10 = 0x0,
+ DCP_DC_LUT_DATA_G_FORMAT_S1P10 = 0x1,
+ DCP_DC_LUT_DATA_G_FORMAT_U1P11 = 0x2,
+ DCP_DC_LUT_DATA_G_FORMAT_U0P12 = 0x3,
+} DCP_DC_LUT_DATA_G_FORMAT;
+typedef enum DCP_DC_LUT_INC_R {
+ DCP_DC_LUT_INC_R_NA = 0x0,
+ DCP_DC_LUT_INC_R_2 = 0x1,
+ DCP_DC_LUT_INC_R_4 = 0x2,
+ DCP_DC_LUT_INC_R_8 = 0x3,
+ DCP_DC_LUT_INC_R_16 = 0x4,
+ DCP_DC_LUT_INC_R_32 = 0x5,
+ DCP_DC_LUT_INC_R_64 = 0x6,
+ DCP_DC_LUT_INC_R_128 = 0x7,
+ DCP_DC_LUT_INC_R_256 = 0x8,
+ DCP_DC_LUT_INC_R_512 = 0x9,
+} DCP_DC_LUT_INC_R;
+typedef enum DCP_DC_LUT_DATA_R_SIGNED_EN {
+ DCP_DC_LUT_DATA_R_SIGNED_EN_FALSE = 0x0,
+ DCP_DC_LUT_DATA_R_SIGNED_EN_TRUE = 0x1,
+} DCP_DC_LUT_DATA_R_SIGNED_EN;
+typedef enum DCP_DC_LUT_DATA_R_FLOAT_POINT_EN {
+ DCP_DC_LUT_DATA_R_FLOAT_POINT_EN_FALSE = 0x0,
+ DCP_DC_LUT_DATA_R_FLOAT_POINT_EN_TRUE = 0x1,
+} DCP_DC_LUT_DATA_R_FLOAT_POINT_EN;
+typedef enum DCP_DC_LUT_DATA_R_FORMAT {
+ DCP_DC_LUT_DATA_R_FORMAT_U0P10 = 0x0,
+ DCP_DC_LUT_DATA_R_FORMAT_S1P10 = 0x1,
+ DCP_DC_LUT_DATA_R_FORMAT_U1P11 = 0x2,
+ DCP_DC_LUT_DATA_R_FORMAT_U0P12 = 0x3,
+} DCP_DC_LUT_DATA_R_FORMAT;
+typedef enum DCP_CRC_ENABLE {
+ DCP_CRC_ENABLE_FALSE = 0x0,
+ DCP_CRC_ENABLE_TRUE = 0x1,
+} DCP_CRC_ENABLE;
+typedef enum DCP_CRC_SOURCE_SEL {
+ DCP_CRC_SOURCE_SEL_OUTPUT_PIX = 0x0,
+ DCP_CRC_SOURCE_SEL_INPUT_L32 = 0x1,
+ DCP_CRC_SOURCE_SEL_INPUT_H32 = 0x2,
+ DCP_CRC_SOURCE_SEL_OUTPUT_CNTL = 0x4,
+} DCP_CRC_SOURCE_SEL;
+typedef enum DCP_CRC_LINE_SEL {
+ DCP_CRC_LINE_SEL_RESERVED = 0x0,
+ DCP_CRC_LINE_SEL_EVEN = 0x1,
+ DCP_CRC_LINE_SEL_ODD = 0x2,
+ DCP_CRC_LINE_SEL_BOTH = 0x3,
+} DCP_CRC_LINE_SEL;
+typedef enum DCP_GRPH_FLIP_RATE {
+ DCP_GRPH_FLIP_RATE_1FRAME = 0x0,
+ DCP_GRPH_FLIP_RATE_2FRAME = 0x1,
+ DCP_GRPH_FLIP_RATE_3FRAME = 0x2,
+ DCP_GRPH_FLIP_RATE_4FRAME = 0x3,
+ DCP_GRPH_FLIP_RATE_5FRAME = 0x4,
+ DCP_GRPH_FLIP_RATE_6FRAME = 0x5,
+ DCP_GRPH_FLIP_RATE_7FRAME = 0x6,
+ DCP_GRPH_FLIP_RATE_8FRAME = 0x7,
+} DCP_GRPH_FLIP_RATE;
+typedef enum DCP_GRPH_FLIP_RATE_ENABLE {
+ DCP_GRPH_FLIP_RATE_ENABLE_FALSE = 0x0,
+ DCP_GRPH_FLIP_RATE_ENABLE_TRUE = 0x1,
+} DCP_GRPH_FLIP_RATE_ENABLE;
+typedef enum DCP_GSL0_EN {
+ DCP_GSL0_EN_FALSE = 0x0,
+ DCP_GSL0_EN_TRUE = 0x1,
+} DCP_GSL0_EN;
+typedef enum DCP_GSL1_EN {
+ DCP_GSL1_EN_FALSE = 0x0,
+ DCP_GSL1_EN_TRUE = 0x1,
+} DCP_GSL1_EN;
+typedef enum DCP_GSL2_EN {
+ DCP_GSL2_EN_FALSE = 0x0,
+ DCP_GSL2_EN_TRUE = 0x1,
+} DCP_GSL2_EN;
+typedef enum DCP_GSL_MASTER_EN {
+ DCP_GSL_MASTER_EN_FALSE = 0x0,
+ DCP_GSL_MASTER_EN_TRUE = 0x1,
+} DCP_GSL_MASTER_EN;
+typedef enum DCP_GSL_XDMA_GROUP {
+ DCP_GSL_XDMA_GROUP_VSYNC = 0x0,
+ DCP_GSL_XDMA_GROUP_HSYNC0 = 0x1,
+ DCP_GSL_XDMA_GROUP_HSYNC1 = 0x2,
+ DCP_GSL_XDMA_GROUP_HSYNC2 = 0x3,
+} DCP_GSL_XDMA_GROUP;
+typedef enum DCP_GSL_XDMA_GROUP_UNDERFLOW_EN {
+ DCP_GSL_XDMA_GROUP_UNDERFLOW_EN_FALSE = 0x0,
+ DCP_GSL_XDMA_GROUP_UNDERFLOW_EN_TRUE = 0x1,
+} DCP_GSL_XDMA_GROUP_UNDERFLOW_EN;
+typedef enum DCP_GSL_SYNC_SOURCE {
+ DCP_GSL_SYNC_SOURCE_FLIP = 0x0,
+ DCP_GSL_SYNC_SOURCE_PHASE0 = 0x1,
+ DCP_GSL_SYNC_SOURCE_RESET = 0x2,
+ DCP_GSL_SYNC_SOURCE_PHASE1 = 0x3,
+} DCP_GSL_SYNC_SOURCE;
+typedef enum DCP_GSL_DELAY_SURFACE_UPDATE_PENDING {
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING_FALSE = 0x0,
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING_TRUE = 0x1,
+} DCP_GSL_DELAY_SURFACE_UPDATE_PENDING;
+typedef enum DCP_TEST_DEBUG_WRITE_EN {
+ DCP_TEST_DEBUG_WRITE_EN_FALSE = 0x0,
+ DCP_TEST_DEBUG_WRITE_EN_TRUE = 0x1,
+} DCP_TEST_DEBUG_WRITE_EN;
+typedef enum DCP_GRPH_STEREOSYNC_FLIP_EN {
+ DCP_GRPH_STEREOSYNC_FLIP_EN_FALSE = 0x0,
+ DCP_GRPH_STEREOSYNC_FLIP_EN_TRUE = 0x1,
+} DCP_GRPH_STEREOSYNC_FLIP_EN;
+typedef enum DCP_GRPH_STEREOSYNC_FLIP_MODE {
+ DCP_GRPH_STEREOSYNC_FLIP_MODE_FLIP = 0x0,
+ DCP_GRPH_STEREOSYNC_FLIP_MODE_PHASE0 = 0x1,
+ DCP_GRPH_STEREOSYNC_FLIP_MODE_RESET = 0x2,
+ DCP_GRPH_STEREOSYNC_FLIP_MODE_PHASE1 = 0x3,
+} DCP_GRPH_STEREOSYNC_FLIP_MODE;
+typedef enum DCP_GRPH_STEREOSYNC_SELECT_DISABLE {
+ DCP_GRPH_STEREOSYNC_SELECT_DISABLE_FALSE = 0x0,
+ DCP_GRPH_STEREOSYNC_SELECT_DISABLE_TRUE = 0x1,
+} DCP_GRPH_STEREOSYNC_SELECT_DISABLE;
+typedef enum DCP_GRPH_ROTATION_ANGLE {
+ DCP_GRPH_ROTATION_ANGLE_0 = 0x0,
+ DCP_GRPH_ROTATION_ANGLE_90 = 0x1,
+ DCP_GRPH_ROTATION_ANGLE_180 = 0x2,
+ DCP_GRPH_ROTATION_ANGLE_270 = 0x3,
+} DCP_GRPH_ROTATION_ANGLE;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN {
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN_FALSE = 0x0,
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN_TRUE = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE {
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE_RELY_NUM = 0x0,
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE_RELY_ENABLE= 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE;
+typedef enum DCP_GRPH_REGAMMA_MODE {
+ DCP_GRPH_REGAMMA_MODE_BYPASS = 0x0,
+ DCP_GRPH_REGAMMA_MODE_SRGB = 0x1,
+ DCP_GRPH_REGAMMA_MODE_XVYCC = 0x2,
+ DCP_GRPH_REGAMMA_MODE_PROGA = 0x3,
+ DCP_GRPH_REGAMMA_MODE_PROGB = 0x4,
+} DCP_GRPH_REGAMMA_MODE;
+typedef enum DCP_ALPHA_ROUND_TRUNC_MODE {
+ DCP_ALPHA_ROUND_TRUNC_MODE_ROUND = 0x0,
+ DCP_ALPHA_ROUND_TRUNC_MODE_TRUNC = 0x1,
+} DCP_ALPHA_ROUND_TRUNC_MODE;
+typedef enum DCP_CURSOR_ALPHA_BLND_ENA {
+ DCP_CURSOR_ALPHA_BLND_ENA_FALSE = 0x0,
+ DCP_CURSOR_ALPHA_BLND_ENA_TRUE = 0x1,
+} DCP_CURSOR_ALPHA_BLND_ENA;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK {
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK_FALSE = 0x0,
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK_TRUE = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK {
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK_FALSE = 0x0,
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK_TRUE = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK {
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK_FALSE = 0x0,
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK_TRUE = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK;
+typedef enum DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK {
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK_FALSE = 0x0,
+ DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK_TRUE = 0x1,
+} DCP_GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK;
+typedef enum DCP_GRPH_SURFACE_COUNTER_EN {
+ DCP_GRPH_SURFACE_COUNTER_EN_DISABLE = 0x0,
+ DCP_GRPH_SURFACE_COUNTER_EN_ENABLE = 0x1,
+} DCP_GRPH_SURFACE_COUNTER_EN;
+typedef enum DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT {
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_0 = 0x0,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_1 = 0x1,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_2 = 0x2,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_3 = 0x3,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_4 = 0x4,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_5 = 0x5,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_6 = 0x6,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_7 = 0x7,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_8 = 0x8,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_9 = 0x9,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_10 = 0xa,
+ DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT_11 = 0xb,
+} DCP_GRPH_SURFACE_COUNTER_EVENT_SELECT;
+typedef enum DCP_GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED {
+ DCP_GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED_NO = 0x0,
+ DCP_GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED_YES = 0x1,
+} DCP_GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED;
+typedef enum HDMI_KEEPOUT_MODE {
+ HDMI_KEEPOUT_0_650PIX_AFTER_VSYNC = 0x0,
+ HDMI_KEEPOUT_509_650PIX_AFTER_VSYNC = 0x1,
+} HDMI_KEEPOUT_MODE;
+typedef enum HDMI_CLOCK_CHANNEL_RATE {
+ HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE = 0x0,
+ HDMI_CLOCK_CHANNEL_FREQ_QUARTER_TO_CHAR_RATE = 0x1,
+} HDMI_CLOCK_CHANNEL_RATE;
+typedef enum HDMI_NO_EXTRA_NULL_PACKET_FILLED {
+ HDMI_EXTRA_NULL_PACKET_FILLED_ENABLE = 0x0,
+ HDMI_EXTRA_NULL_PACKET_FILLED_DISABLE = 0x1,
+} HDMI_NO_EXTRA_NULL_PACKET_FILLED;
+typedef enum HDMI_PACKET_GEN_VERSION {
+ HDMI_PACKET_GEN_VERSION_OLD = 0x0,
+ HDMI_PACKET_GEN_VERSION_NEW = 0x1,
+} HDMI_PACKET_GEN_VERSION;
+typedef enum HDMI_ERROR_ACK {
+ HDMI_ERROR_ACK_INT = 0x0,
+ HDMI_ERROR_NOT_ACK = 0x1,
+} HDMI_ERROR_ACK;
+typedef enum HDMI_ERROR_MASK {
+ HDMI_ERROR_MASK_INT = 0x0,
+ HDMI_ERROR_NOT_MASK = 0x1,
+} HDMI_ERROR_MASK;
+typedef enum HDMI_DEEP_COLOR_DEPTH {
+ HDMI_DEEP_COLOR_DEPTH_24BPP = 0x0,
+ HDMI_DEEP_COLOR_DEPTH_30BPP = 0x1,
+ HDMI_DEEP_COLOR_DEPTH_36BPP = 0x2,
+ HDMI_DEEP_COLOR_DEPTH_RESERVED = 0x3,
+} HDMI_DEEP_COLOR_DEPTH;
+typedef enum HDMI_AUDIO_DELAY_EN {
+ HDMI_AUDIO_DELAY_DISABLE = 0x0,
+ HDMI_AUDIO_DELAY_58CLK = 0x1,
+ HDMI_AUDIO_DELAY_56CLK = 0x2,
+ HDMI_AUDIO_DELAY_RESERVED = 0x3,
+} HDMI_AUDIO_DELAY_EN;
+typedef enum HDMI_AUDIO_SEND_MAX_PACKETS {
+ HDMI_NOT_SEND_MAX_AUDIO_PACKETS = 0x0,
+ HDMI_SEND_MAX_AUDIO_PACKETS = 0x1,
+} HDMI_AUDIO_SEND_MAX_PACKETS;
+typedef enum HDMI_ACR_SEND {
+ HDMI_ACR_NOT_SEND = 0x0,
+ HDMI_ACR_PKT_SEND = 0x1,
+} HDMI_ACR_SEND;
+typedef enum HDMI_ACR_CONT {
+ HDMI_ACR_CONT_DISABLE = 0x0,
+ HDMI_ACR_CONT_ENABLE = 0x1,
+} HDMI_ACR_CONT;
+typedef enum HDMI_ACR_SELECT {
+ HDMI_ACR_SELECT_HW = 0x0,
+ HDMI_ACR_SELECT_32K = 0x1,
+ HDMI_ACR_SELECT_44K = 0x2,
+ HDMI_ACR_SELECT_48K = 0x3,
+} HDMI_ACR_SELECT;
+typedef enum HDMI_ACR_SOURCE {
+ HDMI_ACR_SOURCE_HW = 0x0,
+ HDMI_ACR_SOURCE_SW = 0x1,
+} HDMI_ACR_SOURCE;
+typedef enum HDMI_ACR_N_MULTIPLE {
+ HDMI_ACR_0_MULTIPLE_RESERVED = 0x0,
+ HDMI_ACR_1_MULTIPLE = 0x1,
+ HDMI_ACR_2_MULTIPLE = 0x2,
+ HDMI_ACR_3_MULTIPLE_RESERVED = 0x3,
+ HDMI_ACR_4_MULTIPLE = 0x4,
+ HDMI_ACR_5_MULTIPLE_RESERVED = 0x5,
+ HDMI_ACR_6_MULTIPLE_RESERVED = 0x6,
+ HDMI_ACR_7_MULTIPLE_RESERVED = 0x7,
+} HDMI_ACR_N_MULTIPLE;
+typedef enum HDMI_ACR_AUDIO_PRIORITY {
+ HDMI_ACR_PKT_HIGH_PRIORITY_THAN_AUDIO_SAMPLE = 0x0,
+ HDMI_AUDIO_SAMPLE_HIGH_PRIORITY_THAN_ACR_PKT = 0x1,
+} HDMI_ACR_AUDIO_PRIORITY;
+typedef enum HDMI_NULL_SEND {
+ HDMI_NULL_NOT_SEND = 0x0,
+ HDMI_NULL_PKT_SEND = 0x1,
+} HDMI_NULL_SEND;
+typedef enum HDMI_GC_SEND {
+ HDMI_GC_NOT_SEND = 0x0,
+ HDMI_GC_PKT_SEND = 0x1,
+} HDMI_GC_SEND;
+typedef enum HDMI_GC_CONT {
+ HDMI_GC_CONT_DISABLE = 0x0,
+ HDMI_GC_CONT_ENABLE = 0x1,
+} HDMI_GC_CONT;
+typedef enum HDMI_ISRC_SEND {
+ HDMI_ISRC_NOT_SEND = 0x0,
+ HDMI_ISRC_PKT_SEND = 0x1,
+} HDMI_ISRC_SEND;
+typedef enum HDMI_ISRC_CONT {
+ HDMI_ISRC_CONT_DISABLE = 0x0,
+ HDMI_ISRC_CONT_ENABLE = 0x1,
+} HDMI_ISRC_CONT;
+typedef enum HDMI_AVI_INFO_SEND {
+ HDMI_AVI_INFO_NOT_SEND = 0x0,
+ HDMI_AVI_INFO_PKT_SEND = 0x1,
+} HDMI_AVI_INFO_SEND;
+typedef enum HDMI_AVI_INFO_CONT {
+ HDMI_AVI_INFO_CONT_DISABLE = 0x0,
+ HDMI_AVI_INFO_CONT_ENABLE = 0x1,
+} HDMI_AVI_INFO_CONT;
+typedef enum HDMI_AUDIO_INFO_SEND {
+ HDMI_AUDIO_INFO_NOT_SEND = 0x0,
+ HDMI_AUDIO_INFO_PKT_SEND = 0x1,
+} HDMI_AUDIO_INFO_SEND;
+typedef enum HDMI_AUDIO_INFO_CONT {
+ HDMI_AUDIO_INFO_CONT_DISABLE = 0x0,
+ HDMI_AUDIO_INFO_CONT_ENABLE = 0x1,
+} HDMI_AUDIO_INFO_CONT;
+typedef enum HDMI_MPEG_INFO_SEND {
+ HDMI_MPEG_INFO_NOT_SEND = 0x0,
+ HDMI_MPEG_INFO_PKT_SEND = 0x1,
+} HDMI_MPEG_INFO_SEND;
+typedef enum HDMI_MPEG_INFO_CONT {
+ HDMI_MPEG_INFO_CONT_DISABLE = 0x0,
+ HDMI_MPEG_INFO_CONT_ENABLE = 0x1,
+} HDMI_MPEG_INFO_CONT;
+typedef enum HDMI_GENERIC0_SEND {
+ HDMI_GENERIC0_NOT_SEND = 0x0,
+ HDMI_GENERIC0_PKT_SEND = 0x1,
+} HDMI_GENERIC0_SEND;
+typedef enum HDMI_GENERIC0_CONT {
+ HDMI_GENERIC0_CONT_DISABLE = 0x0,
+ HDMI_GENERIC0_CONT_ENABLE = 0x1,
+} HDMI_GENERIC0_CONT;
+typedef enum HDMI_GENERIC1_SEND {
+ HDMI_GENERIC1_NOT_SEND = 0x0,
+ HDMI_GENERIC1_PKT_SEND = 0x1,
+} HDMI_GENERIC1_SEND;
+typedef enum HDMI_GENERIC1_CONT {
+ HDMI_GENERIC1_CONT_DISABLE = 0x0,
+ HDMI_GENERIC1_CONT_ENABLE = 0x1,
+} HDMI_GENERIC1_CONT;
+typedef enum HDMI_GC_AVMUTE_CONT {
+ HDMI_GC_AVMUTE_CONT_DISABLE = 0x0,
+ HDMI_GC_AVMUTE_CONT_ENABLE = 0x1,
+} HDMI_GC_AVMUTE_CONT;
+typedef enum HDMI_PACKING_PHASE_OVERRIDE {
+ HDMI_PACKING_PHASE_SET_BY_HW = 0x0,
+ HDMI_PACKING_PHASE_SET_BY_SW = 0x1,
+} HDMI_PACKING_PHASE_OVERRIDE;
+typedef enum HDMI_GENERIC2_SEND {
+ HDMI_GENERIC2_NOT_SEND = 0x0,
+ HDMI_GENERIC2_PKT_SEND = 0x1,
+} HDMI_GENERIC2_SEND;
+typedef enum HDMI_GENERIC2_CONT {
+ HDMI_GENERIC2_CONT_DISABLE = 0x0,
+ HDMI_GENERIC2_CONT_ENABLE = 0x1,
+} HDMI_GENERIC2_CONT;
+typedef enum HDMI_GENERIC3_SEND {
+ HDMI_GENERIC3_NOT_SEND = 0x0,
+ HDMI_GENERIC3_PKT_SEND = 0x1,
+} HDMI_GENERIC3_SEND;
+typedef enum HDMI_GENERIC3_CONT {
+ HDMI_GENERIC3_CONT_DISABLE = 0x0,
+ HDMI_GENERIC3_CONT_ENABLE = 0x1,
+} HDMI_GENERIC3_CONT;
+typedef enum TMDS_PIXEL_ENCODING {
+ TMDS_PIXEL_ENCODING_444_OR_420 = 0x0,
+ TMDS_PIXEL_ENCODING_422 = 0x1,
+} TMDS_PIXEL_ENCODING;
+typedef enum TMDS_COLOR_FORMAT {
+ TMDS_COLOR_FORMAT__24BPP__TWIN30BPP_MSB__DUAL48BPP= 0x0,
+ TMDS_COLOR_FORMAT_TWIN30BPP_LSB = 0x1,
+ TMDS_COLOR_FORMAT_DUAL30BPP = 0x2,
+ TMDS_COLOR_FORMAT_RESERVED = 0x3,
+} TMDS_COLOR_FORMAT;
+typedef enum TMDS_STEREOSYNC_CTL_SEL_REG {
+ TMDS_STEREOSYNC_CTL0 = 0x0,
+ TMDS_STEREOSYNC_CTL1 = 0x1,
+ TMDS_STEREOSYNC_CTL2 = 0x2,
+ TMDS_STEREOSYNC_CTL3 = 0x3,
+} TMDS_STEREOSYNC_CTL_SEL_REG;
+typedef enum TMDS_CTL0_DATA_SEL {
+ TMDS_CTL0_DATA_SEL0_RESERVED = 0x0,
+ TMDS_CTL0_DATA_SEL1_DISPLAY_ENABLE = 0x1,
+ TMDS_CTL0_DATA_SEL2_VSYNC = 0x2,
+ TMDS_CTL0_DATA_SEL3_RESERVED = 0x3,
+ TMDS_CTL0_DATA_SEL4_HSYNC = 0x4,
+ TMDS_CTL0_DATA_SEL5_SEL7_RESERVED = 0x5,
+ TMDS_CTL0_DATA_SEL8_RANDOM_DATA = 0x6,
+ TMDS_CTL0_DATA_SEL9_SEL15_RANDOM_DATA = 0x7,
+} TMDS_CTL0_DATA_SEL;
+typedef enum TMDS_CTL0_DATA_INVERT {
+ TMDS_CTL0_DATA_NORMAL = 0x0,
+ TMDS_CTL0_DATA_INVERT_EN = 0x1,
+} TMDS_CTL0_DATA_INVERT;
+typedef enum TMDS_CTL0_DATA_MODULATION {
+ TMDS_CTL0_DATA_MODULATION_DISABLE = 0x0,
+ TMDS_CTL0_DATA_MODULATION_BIT0 = 0x1,
+ TMDS_CTL0_DATA_MODULATION_BIT1 = 0x2,
+ TMDS_CTL0_DATA_MODULATION_BIT2 = 0x3,
+} TMDS_CTL0_DATA_MODULATION;
+typedef enum TMDS_CTL0_PATTERN_OUT_EN {
+ TMDS_CTL0_PATTERN_OUT_DISABLE = 0x0,
+ TMDS_CTL0_PATTERN_OUT_ENABLE = 0x1,
+} TMDS_CTL0_PATTERN_OUT_EN;
+typedef enum TMDS_CTL1_DATA_SEL {
+ TMDS_CTL1_DATA_SEL0_RESERVED = 0x0,
+ TMDS_CTL1_DATA_SEL1_DISPLAY_ENABLE = 0x1,
+ TMDS_CTL1_DATA_SEL2_VSYNC = 0x2,
+ TMDS_CTL1_DATA_SEL3_RESERVED = 0x3,
+ TMDS_CTL1_DATA_SEL4_HSYNC = 0x4,
+ TMDS_CTL1_DATA_SEL5_SEL7_RESERVED = 0x5,
+ TMDS_CTL1_DATA_SEL8_BLANK_TIME = 0x6,
+ TMDS_CTL1_DATA_SEL9_SEL15_RESERVED = 0x7,
+} TMDS_CTL1_DATA_SEL;
+typedef enum TMDS_CTL1_DATA_INVERT {
+ TMDS_CTL1_DATA_NORMAL = 0x0,
+ TMDS_CTL1_DATA_INVERT_EN = 0x1,
+} TMDS_CTL1_DATA_INVERT;
+typedef enum TMDS_CTL1_DATA_MODULATION {
+ TMDS_CTL1_DATA_MODULATION_DISABLE = 0x0,
+ TMDS_CTL1_DATA_MODULATION_BIT0 = 0x1,
+ TMDS_CTL1_DATA_MODULATION_BIT1 = 0x2,
+ TMDS_CTL1_DATA_MODULATION_BIT2 = 0x3,
+} TMDS_CTL1_DATA_MODULATION;
+typedef enum TMDS_CTL1_PATTERN_OUT_EN {
+ TMDS_CTL1_PATTERN_OUT_DISABLE = 0x0,
+ TMDS_CTL1_PATTERN_OUT_ENABLE = 0x1,
+} TMDS_CTL1_PATTERN_OUT_EN;
+typedef enum TMDS_CTL2_DATA_SEL {
+ TMDS_CTL2_DATA_SEL0_RESERVED = 0x0,
+ TMDS_CTL2_DATA_SEL1_DISPLAY_ENABLE = 0x1,
+ TMDS_CTL2_DATA_SEL2_VSYNC = 0x2,
+ TMDS_CTL2_DATA_SEL3_RESERVED = 0x3,
+ TMDS_CTL2_DATA_SEL4_HSYNC = 0x4,
+ TMDS_CTL2_DATA_SEL5_SEL7_RESERVED = 0x5,
+ TMDS_CTL2_DATA_SEL8_BLANK_TIME = 0x6,
+ TMDS_CTL2_DATA_SEL9_SEL15_RESERVED = 0x7,
+} TMDS_CTL2_DATA_SEL;
+typedef enum TMDS_CTL2_DATA_INVERT {
+ TMDS_CTL2_DATA_NORMAL = 0x0,
+ TMDS_CTL2_DATA_INVERT_EN = 0x1,
+} TMDS_CTL2_DATA_INVERT;
+typedef enum TMDS_CTL2_DATA_MODULATION {
+ TMDS_CTL2_DATA_MODULATION_DISABLE = 0x0,
+ TMDS_CTL2_DATA_MODULATION_BIT0 = 0x1,
+ TMDS_CTL2_DATA_MODULATION_BIT1 = 0x2,
+ TMDS_CTL2_DATA_MODULATION_BIT2 = 0x3,
+} TMDS_CTL2_DATA_MODULATION;
+typedef enum TMDS_CTL2_PATTERN_OUT_EN {
+ TMDS_CTL2_PATTERN_OUT_DISABLE = 0x0,
+ TMDS_CTL2_PATTERN_OUT_ENABLE = 0x1,
+} TMDS_CTL2_PATTERN_OUT_EN;
+typedef enum TMDS_CTL3_DATA_INVERT {
+ TMDS_CTL3_DATA_NORMAL = 0x0,
+ TMDS_CTL3_DATA_INVERT_EN = 0x1,
+} TMDS_CTL3_DATA_INVERT;
+typedef enum TMDS_CTL3_DATA_MODULATION {
+ TMDS_CTL3_DATA_MODULATION_DISABLE = 0x0,
+ TMDS_CTL3_DATA_MODULATION_BIT0 = 0x1,
+ TMDS_CTL3_DATA_MODULATION_BIT1 = 0x2,
+ TMDS_CTL3_DATA_MODULATION_BIT2 = 0x3,
+} TMDS_CTL3_DATA_MODULATION;
+typedef enum TMDS_CTL3_PATTERN_OUT_EN {
+ TMDS_CTL3_PATTERN_OUT_DISABLE = 0x0,
+ TMDS_CTL3_PATTERN_OUT_ENABLE = 0x1,
+} TMDS_CTL3_PATTERN_OUT_EN;
+typedef enum TMDS_CTL3_DATA_SEL {
+ TMDS_CTL3_DATA_SEL0_RESERVED = 0x0,
+ TMDS_CTL3_DATA_SEL1_DISPLAY_ENABLE = 0x1,
+ TMDS_CTL3_DATA_SEL2_VSYNC = 0x2,
+ TMDS_CTL3_DATA_SEL3_RESERVED = 0x3,
+ TMDS_CTL3_DATA_SEL4_HSYNC = 0x4,
+ TMDS_CTL3_DATA_SEL5_SEL7_RESERVED = 0x5,
+ TMDS_CTL3_DATA_SEL8_BLANK_TIME = 0x6,
+ TMDS_CTL3_DATA_SEL9_SEL15_RESERVED = 0x7,
+} TMDS_CTL3_DATA_SEL;
+typedef enum DIG_FE_CNTL_SOURCE_SELECT {
+ DIG_FE_SOURCE_FROM_FMT0 = 0x0,
+ DIG_FE_SOURCE_FROM_FMT1 = 0x1,
+ DIG_FE_SOURCE_FROM_FMT2 = 0x2,
+ DIG_FE_SOURCE_FROM_FMT3 = 0x3,
+ DIG_FE_SOURCE_FROM_FMT4 = 0x4,
+ DIG_FE_SOURCE_FROM_FMT5 = 0x5,
+} DIG_FE_CNTL_SOURCE_SELECT;
+typedef enum DIG_FE_CNTL_STEREOSYNC_SELECT {
+ DIG_FE_STEREOSYNC_FROM_FMT0 = 0x0,
+ DIG_FE_STEREOSYNC_FROM_FMT1 = 0x1,
+ DIG_FE_STEREOSYNC_FROM_FMT2 = 0x2,
+ DIG_FE_STEREOSYNC_FROM_FMT3 = 0x3,
+ DIG_FE_STEREOSYNC_FROM_FMT4 = 0x4,
+ DIG_FE_STEREOSYNC_FROM_FMT5 = 0x5,
+} DIG_FE_CNTL_STEREOSYNC_SELECT;
+typedef enum DIG_FIFO_READ_CLOCK_SRC {
+ DIG_FIFO_READ_CLOCK_SRC_FROM_DCCG = 0x0,
+ DIG_FIFO_READ_CLOCK_SRC_FROM_DISPLAY_PIPE = 0x1,
+} DIG_FIFO_READ_CLOCK_SRC;
+typedef enum DIG_OUTPUT_CRC_CNTL_LINK_SEL {
+ DIG_OUTPUT_CRC_ON_LINK0 = 0x0,
+ DIG_OUTPUT_CRC_ON_LINK1 = 0x1,
+} DIG_OUTPUT_CRC_CNTL_LINK_SEL;
+typedef enum DIG_OUTPUT_CRC_DATA_SEL {
+ DIG_OUTPUT_CRC_FOR_FULLFRAME = 0x0,
+ DIG_OUTPUT_CRC_FOR_ACTIVEONLY = 0x1,
+ DIG_OUTPUT_CRC_FOR_VBI = 0x2,
+ DIG_OUTPUT_CRC_FOR_AUDIO = 0x3,
+} DIG_OUTPUT_CRC_DATA_SEL;
+typedef enum DIG_TEST_PATTERN_TEST_PATTERN_OUT_EN {
+ DIG_IN_NORMAL_OPERATION = 0x0,
+ DIG_IN_DEBUG_MODE = 0x1,
+} DIG_TEST_PATTERN_TEST_PATTERN_OUT_EN;
+typedef enum DIG_TEST_PATTERN_HALF_CLOCK_PATTERN_SEL {
+ DIG_10BIT_TEST_PATTERN = 0x0,
+ DIG_ALTERNATING_TEST_PATTERN = 0x1,
+} DIG_TEST_PATTERN_HALF_CLOCK_PATTERN_SEL;
+typedef enum DIG_TEST_PATTERN_RANDOM_PATTERN_OUT_EN {
+ DIG_TEST_PATTERN_NORMAL = 0x0,
+ DIG_TEST_PATTERN_RANDOM = 0x1,
+} DIG_TEST_PATTERN_RANDOM_PATTERN_OUT_EN;
+typedef enum DIG_TEST_PATTERN_RANDOM_PATTERN_RESET {
+ DIG_RANDOM_PATTERN_ENABLED = 0x0,
+ DIG_RANDOM_PATTERN_RESETED = 0x1,
+} DIG_TEST_PATTERN_RANDOM_PATTERN_RESET;
+typedef enum DIG_TEST_PATTERN_EXTERNAL_RESET_EN {
+ DIG_TEST_PATTERN_EXTERNAL_RESET_ENABLE = 0x0,
+ DIG_TEST_PATTERN_EXTERNAL_RESET_BY_EXT_SIG = 0x1,
+} DIG_TEST_PATTERN_EXTERNAL_RESET_EN;
+typedef enum DIG_RANDOM_PATTERN_SEED_RAN_PAT {
+ DIG_RANDOM_PATTERN_SEED_RAN_PAT_ALL_PIXELS = 0x0,
+ DIG_RANDOM_PATTERN_SEED_RAN_PAT_DE_HIGH = 0x1,
+} DIG_RANDOM_PATTERN_SEED_RAN_PAT;
+typedef enum DIG_FIFO_STATUS_USE_OVERWRITE_LEVEL {
+ DIG_FIFO_USE_OVERWRITE_LEVEL = 0x0,
+ DIG_FIFO_USE_CAL_AVERAGE_LEVEL = 0x1,
+} DIG_FIFO_STATUS_USE_OVERWRITE_LEVEL;
+typedef enum DIG_FIFO_ERROR_ACK {
+ DIG_FIFO_ERROR_ACK_INT = 0x0,
+ DIG_FIFO_ERROR_NOT_ACK = 0x1,
+} DIG_FIFO_ERROR_ACK;
+typedef enum DIG_FIFO_STATUS_FORCE_RECAL_AVERAGE {
+ DIG_FIFO_NOT_FORCE_RECAL_AVERAGE = 0x0,
+ DIG_FIFO_FORCE_RECAL_AVERAGE_LEVEL = 0x1,
+} DIG_FIFO_STATUS_FORCE_RECAL_AVERAGE;
+typedef enum DIG_FIFO_STATUS_FORCE_RECOMP_MINMAX {
+ DIG_FIFO_NOT_FORCE_RECOMP_MINMAX = 0x0,
+ DIG_FIFO_FORCE_RECOMP_MINMAX = 0x1,
+} DIG_FIFO_STATUS_FORCE_RECOMP_MINMAX;
+typedef enum DIG_DISPCLK_SWITCH_CNTL_SWITCH_POINT {
+ DIG_DISPCLK_SWITCH_AT_EARLY_VBLANK = 0x0,
+ DIG_DISPCLK_SWITCH_AT_FIRST_HSYNC = 0x1,
+} DIG_DISPCLK_SWITCH_CNTL_SWITCH_POINT;
+typedef enum DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK {
+ DIG_DISPCLK_SWITCH_ALLOWED_ACK_INT = 0x0,
+ DIG_DISPCLK_SWITCH_ALLOWED_INT_NOT_ACK = 0x1,
+} DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK;
+typedef enum DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK {
+ DIG_DISPCLK_SWITCH_ALLOWED_MASK_INT = 0x0,
+ DIG_DISPCLK_SWITCH_ALLOWED_INT_UNMASK = 0x1,
+} DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK;
+typedef enum AFMT_INTERRUPT_STATUS_CHG_MASK {
+ AFMT_INTERRUPT_DISABLE = 0x0,
+ AFMT_INTERRUPT_ENABLE = 0x1,
+} AFMT_INTERRUPT_STATUS_CHG_MASK;
+typedef enum HDMI_GC_AVMUTE {
+ HDMI_GC_AVMUTE_SET = 0x0,
+ HDMI_GC_AVMUTE_UNSET = 0x1,
+} HDMI_GC_AVMUTE;
+typedef enum HDMI_DEFAULT_PAHSE {
+ HDMI_DEFAULT_PHASE_IS_0 = 0x0,
+ HDMI_DEFAULT_PHASE_IS_1 = 0x1,
+} HDMI_DEFAULT_PAHSE;
+typedef enum AFMT_AUDIO_PACKET_CONTROL2_AUDIO_LAYOUT_OVRD {
+ AFMT_AUDIO_LAYOUT_DETERMINED_BY_AZ_AUDIO_CHANNEL_STATUS= 0x0,
+ AFMT_AUDIO_LAYOUT_OVRD_BY_REGISTER = 0x1,
+} AFMT_AUDIO_PACKET_CONTROL2_AUDIO_LAYOUT_OVRD;
+typedef enum AUDIO_LAYOUT_SELECT {
+ AUDIO_LAYOUT_0 = 0x0,
+ AUDIO_LAYOUT_1 = 0x1,
+} AUDIO_LAYOUT_SELECT;
+typedef enum AFMT_AUDIO_CRC_CONTROL_CONT {
+ AFMT_AUDIO_CRC_ONESHOT = 0x0,
+ AFMT_AUDIO_CRC_AUTO_RESTART = 0x1,
+} AFMT_AUDIO_CRC_CONTROL_CONT;
+typedef enum AFMT_AUDIO_CRC_CONTROL_SOURCE {
+ AFMT_AUDIO_CRC_SOURCE_FROM_FIFO_INPUT = 0x0,
+ AFMT_AUDIO_CRC_SOURCE_FROM_FIFO_OUTPUT = 0x1,
+} AFMT_AUDIO_CRC_CONTROL_SOURCE;
+typedef enum AFMT_AUDIO_CRC_CONTROL_CH_SEL {
+ AFMT_AUDIO_CRC_CH0_SIG = 0x0,
+ AFMT_AUDIO_CRC_CH1_SIG = 0x1,
+ AFMT_AUDIO_CRC_CH2_SIG = 0x2,
+ AFMT_AUDIO_CRC_CH3_SIG = 0x3,
+ AFMT_AUDIO_CRC_CH4_SIG = 0x4,
+ AFMT_AUDIO_CRC_CH5_SIG = 0x5,
+ AFMT_AUDIO_CRC_CH6_SIG = 0x6,
+ AFMT_AUDIO_CRC_CH7_SIG = 0x7,
+ AFMT_AUDIO_CRC_RESERVED = 0x8,
+ AFMT_AUDIO_CRC_AUDIO_SAMPLE_COUNT = 0x9,
+} AFMT_AUDIO_CRC_CONTROL_CH_SEL;
+typedef enum AFMT_RAMP_CONTROL0_SIGN {
+ AFMT_RAMP_SIGNED = 0x0,
+ AFMT_RAMP_UNSIGNED = 0x1,
+} AFMT_RAMP_CONTROL0_SIGN;
+typedef enum AFMT_AUDIO_PACKET_CONTROL_AUDIO_SAMPLE_SEND {
+ AFMT_AUDIO_PACKET_SENT_DISABLED = 0x0,
+ AFMT_AUDIO_PACKET_SENT_ENABLED = 0x1,
+} AFMT_AUDIO_PACKET_CONTROL_AUDIO_SAMPLE_SEND;
+typedef enum AFMT_AUDIO_PACKET_CONTROL_RESET_FIFO_WHEN_AUDIO_DIS {
+ AFMT_NOT_RESET_AUDIO_FIFO_WHEN_AUDIO_DISABLED_RESERVED= 0x0,
+ AFMT_RESET_AUDIO_FIFO_WHEN_AUDIO_DISABLED = 0x1,
+} AFMT_AUDIO_PACKET_CONTROL_RESET_FIFO_WHEN_AUDIO_DIS;
+typedef enum AFMT_INFOFRAME_CONTROL0_AUDIO_INFO_SOURCE {
+ AFMT_INFOFRAME_SOURCE_FROM_AZALIA_BLOCK = 0x0,
+ AFMT_INFOFRAME_SOURCE_FROM_AFMT_REGISTERS = 0x1,
+} AFMT_INFOFRAME_CONTROL0_AUDIO_INFO_SOURCE;
+typedef enum AFMT_AUDIO_SRC_CONTROL_SELECT {
+ AFMT_AUDIO_SRC_FROM_AZ_STREAM0 = 0x0,
+ AFMT_AUDIO_SRC_FROM_AZ_STREAM1 = 0x1,
+ AFMT_AUDIO_SRC_FROM_AZ_STREAM2 = 0x2,
+ AFMT_AUDIO_SRC_FROM_AZ_STREAM3 = 0x3,
+ AFMT_AUDIO_SRC_FROM_AZ_STREAM4 = 0x4,
+ AFMT_AUDIO_SRC_FROM_AZ_STREAM5 = 0x5,
+ AFMT_AUDIO_SRC_RESERVED = 0x6,
+} AFMT_AUDIO_SRC_CONTROL_SELECT;
+typedef enum DIG_BE_CNTL_MODE {
+ DIG_BE_DP_SST_MODE = 0x0,
+ DIG_BE_RESERVED1 = 0x1,
+ DIG_BE_TMDS_DVI_MODE = 0x2,
+ DIG_BE_TMDS_HDMI_MODE = 0x3,
+ DIG_BE_SDVO_RESERVED = 0x4,
+ DIG_BE_DP_MST_MODE = 0x5,
+ DIG_BE_RESERVED2 = 0x6,
+ DIG_BE_RESERVED3 = 0x7,
+} DIG_BE_CNTL_MODE;
+typedef enum DIG_BE_CNTL_HPD_SELECT {
+ DIG_BE_CNTL_HPD1 = 0x0,
+ DIG_BE_CNTL_HPD2 = 0x1,
+ DIG_BE_CNTL_HPD3 = 0x2,
+ DIG_BE_CNTL_HPD4 = 0x3,
+ DIG_BE_CNTL_HPD5 = 0x4,
+ DIG_BE_CNTL_HPD6 = 0x5,
+} DIG_BE_CNTL_HPD_SELECT;
+typedef enum LVTMA_RANDOM_PATTERN_SEED_RAN_PAT {
+ LVTMA_RANDOM_PATTERN_SEED_ALL_PIXELS = 0x0,
+ LVTMA_RANDOM_PATTERN_SEED_ONLY_DE_HIGH = 0x1,
+} LVTMA_RANDOM_PATTERN_SEED_RAN_PAT;
+typedef enum TMDS_SYNC_PHASE {
+ TMDS_NOT_SYNC_PHASE_ON_FRAME_START = 0x0,
+ TMDS_SYNC_PHASE_ON_FRAME_START = 0x1,
+} TMDS_SYNC_PHASE;
+typedef enum TMDS_DATA_SYNCHRONIZATION_DSINTSEL {
+ TMDS_DATA_SYNCHRONIZATION_DSINTSEL_PCLK_TMDS = 0x0,
+ TMDS_DATA_SYNCHRONIZATION_DSINTSEL_TMDS_PLL = 0x1,
+} TMDS_DATA_SYNCHRONIZATION_DSINTSEL;
+typedef enum TMDS_TRANSMITTER_ENABLE_HPD_MASK {
+ TMDS_TRANSMITTER_HPD_MASK_NOT_OVERRIDE = 0x0,
+ TMDS_TRANSMITTER_HPD_MASK_OVERRIDE = 0x1,
+} TMDS_TRANSMITTER_ENABLE_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK {
+ TMDS_TRANSMITTER_LNKCEN_HPD_MASK_NOT_OVERRIDE = 0x0,
+ TMDS_TRANSMITTER_LNKCEN_HPD_MASK_OVERRIDE = 0x1,
+} TMDS_TRANSMITTER_ENABLE_LNKCEN_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK {
+ TMDS_TRANSMITTER_LNKDEN_HPD_MASK_NOT_OVERRIDE = 0x0,
+ TMDS_TRANSMITTER_LNKDEN_HPD_MASK_OVERRIDE = 0x1,
+} TMDS_TRANSMITTER_ENABLE_LNKDEN_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_CONTROL_PLL_ENABLE_HPD_MASK {
+ TMDS_TRANSMITTER_HPD_NOT_OVERRIDE_PLL_ENABLE = 0x0,
+ TMDS_TRANSMITTER_HPD_OVERRIDE_PLL_ENABLE_ON_DISCON= 0x1,
+ TMDS_TRANSMITTER_HPD_OVERRIDE_PLL_ENABLE_ON_CON = 0x2,
+ TMDS_TRANSMITTER_HPD_OVERRIDE_PLL_ENABLE = 0x3,
+} TMDS_TRANSMITTER_CONTROL_PLL_ENABLE_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_CONTROL_IDSCKSELA {
+ TMDS_TRANSMITTER_IDSCKSELA_USE_IPIXCLK = 0x0,
+ TMDS_TRANSMITTER_IDSCKSELA_USE_IDCLK = 0x1,
+} TMDS_TRANSMITTER_CONTROL_IDSCKSELA;
+typedef enum TMDS_TRANSMITTER_CONTROL_IDSCKSELB {
+ TMDS_TRANSMITTER_IDSCKSELB_USE_IPIXCLK = 0x0,
+ TMDS_TRANSMITTER_IDSCKSELB_USE_IDCLK = 0x1,
+} TMDS_TRANSMITTER_CONTROL_IDSCKSELB;
+typedef enum TMDS_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN {
+ TMDS_TRANSMITTER_PLL_PWRUP_SEQ_DISABLE = 0x0,
+ TMDS_TRANSMITTER_PLL_PWRUP_SEQ_ENABLE = 0x1,
+} TMDS_TRANSMITTER_CONTROL_PLL_PWRUP_SEQ_EN;
+typedef enum TMDS_TRANSMITTER_CONTROL_PLL_RESET_HPD_MASK {
+ TMDS_TRANSMITTER_PLL_NOT_RST_ON_HPD = 0x0,
+ TMDS_TRANSMITTER_PLL_RST_ON_HPD = 0x1,
+} TMDS_TRANSMITTER_CONTROL_PLL_RESET_HPD_MASK;
+typedef enum TMDS_TRANSMITTER_CONTROL_TMCLK_FROM_PADS {
+ TMDS_TRANSMITTER_TMCLK_FROM_TMDS_TMCLK = 0x0,
+ TMDS_TRANSMITTER_TMCLK_FROM_PADS = 0x1,
+} TMDS_TRANSMITTER_CONTROL_TMCLK_FROM_PADS;
+typedef enum TMDS_TRANSMITTER_CONTROL_TDCLK_FROM_PADS {
+ TMDS_TRANSMITTER_TDCLK_FROM_TMDS_TDCLK = 0x0,
+ TMDS_TRANSMITTER_TDCLK_FROM_PADS = 0x1,
+} TMDS_TRANSMITTER_CONTROL_TDCLK_FROM_PADS;
+typedef enum TMDS_TRANSMITTER_CONTROL_PLLSEL_OVERWRITE_EN {
+ TMDS_TRANSMITTER_PLLSEL_BY_HW = 0x0,
+ TMDS_TRANSMITTER_PLLSEL_OVERWRITE_BY_SW = 0x1,
+} TMDS_TRANSMITTER_CONTROL_PLLSEL_OVERWRITE_EN;
+typedef enum TMDS_TRANSMITTER_CONTROL_BYPASS_PLLA {
+ TMDS_TRANSMITTER_BYPASS_PLLA_COHERENT = 0x0,
+ TMDS_TRANSMITTER_BYPASS_PLLA_INCOHERENT = 0x1,
+} TMDS_TRANSMITTER_CONTROL_BYPASS_PLLA;
+typedef enum TMDS_TRANSMITTER_CONTROL_BYPASS_PLLB {
+ TMDS_TRANSMITTER_BYPASS_PLLB_COHERENT = 0x0,
+ TMDS_TRANSMITTER_BYPASS_PLLB_INCOHERENT = 0x1,
+} TMDS_TRANSMITTER_CONTROL_BYPASS_PLLB;
+typedef enum TMDS_REG_TEST_OUTPUTA_CNTLA {
+ TMDS_REG_TEST_OUTPUTA_CNTLA_OTDATA0 = 0x0,
+ TMDS_REG_TEST_OUTPUTA_CNTLA_OTDATA1 = 0x1,
+ TMDS_REG_TEST_OUTPUTA_CNTLA_OTDATA2 = 0x2,
+ TMDS_REG_TEST_OUTPUTA_CNTLA_NA = 0x3,
+} TMDS_REG_TEST_OUTPUTA_CNTLA;
+typedef enum TMDS_REG_TEST_OUTPUTB_CNTLB {
+ TMDS_REG_TEST_OUTPUTB_CNTLB_OTDATB0 = 0x0,
+ TMDS_REG_TEST_OUTPUTB_CNTLB_OTDATB1 = 0x1,
+ TMDS_REG_TEST_OUTPUTB_CNTLB_OTDATB2 = 0x2,
+ TMDS_REG_TEST_OUTPUTB_CNTLB_NA = 0x3,
+} TMDS_REG_TEST_OUTPUTB_CNTLB;
+typedef enum DP_LINK_TRAINING_COMPLETE {
+ DP_LINK_TRAINING_NOT_COMPLETE = 0x0,
+ DP_LINK_TRAINING_ALREADY_COMPLETE = 0x1,
+} DP_LINK_TRAINING_COMPLETE;
+typedef enum DP_EMBEDDED_PANEL_MODE {
+ DP_EXTERNAL_PANEL = 0x0,
+ DP_EMBEDDED_PANEL = 0x1,
+} DP_EMBEDDED_PANEL_MODE;
+typedef enum DP_PIXEL_ENCODING {
+ DP_PIXEL_ENCODING_RGB444 = 0x0,
+ DP_PIXEL_ENCODING_YCBCR422 = 0x1,
+ DP_PIXEL_ENCODING_YCBCR444 = 0x2,
+ DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x3,
+ DP_PIXEL_ENCODING_Y_ONLY = 0x4,
+ DP_PIXEL_ENCODING_YCBCR420 = 0x5,
+ DP_PIXEL_ENCODING_RESERVED = 0x6,
+} DP_PIXEL_ENCODING;
+typedef enum DP_DYN_RANGE {
+ DP_DYN_VESA_RANGE = 0x0,
+ DP_DYN_CEA_RANGE = 0x1,
+} DP_DYN_RANGE;
+typedef enum DP_YCBCR_RANGE {
+ DP_YCBCR_RANGE_BT601_5 = 0x0,
+ DP_YCBCR_RANGE_BT709_5 = 0x1,
+} DP_YCBCR_RANGE;
+typedef enum DP_COMPONENT_DEPTH {
+ DP_COMPONENT_DEPTH_6BPC = 0x0,
+ DP_COMPONENT_DEPTH_8BPC = 0x1,
+ DP_COMPONENT_DEPTH_10BPC = 0x2,
+ DP_COMPONENT_DEPTH_12BPC = 0x3,
+ DP_COMPONENT_DEPTH_16BPC = 0x4,
+ DP_COMPONENT_DEPTH_RESERVED = 0x5,
+} DP_COMPONENT_DEPTH;
+typedef enum DP_MSA_MISC0_OVERRIDE_ENABLE {
+ MSA_MISC0_OVERRIDE_DISABLE = 0x0,
+ MSA_MISC0_OVERRIDE_ENABLE = 0x1,
+} DP_MSA_MISC0_OVERRIDE_ENABLE;
+typedef enum DP_MSA_MISC1_BIT7_OVERRIDE_ENABLE {
+ MSA_MISC1_BIT7_OVERRIDE_DISABLE = 0x0,
+ MSA_MISC1_BIT7_OVERRIDE_ENABLE = 0x1,
+} DP_MSA_MISC1_BIT7_OVERRIDE_ENABLE;
+typedef enum DP_UDI_LANES {
+ DP_UDI_1_LANE = 0x0,
+ DP_UDI_2_LANES = 0x1,
+ DP_UDI_LANES_RESERVED = 0x2,
+ DP_UDI_4_LANES = 0x3,
+} DP_UDI_LANES;
+typedef enum DP_VID_STREAM_DIS_DEFER {
+ DP_VID_STREAM_DIS_NO_DEFER = 0x0,
+ DP_VID_STREAM_DIS_DEFER_TO_HBLANK = 0x1,
+ DP_VID_STREAM_DIS_DEFER_TO_VBLANK = 0x2,
+} DP_VID_STREAM_DIS_DEFER;
+typedef enum DP_STEER_OVERFLOW_ACK {
+ DP_STEER_OVERFLOW_ACK_NO_EFFECT = 0x0,
+ DP_STEER_OVERFLOW_ACK_CLR_INTERRUPT = 0x1,
+} DP_STEER_OVERFLOW_ACK;
+typedef enum DP_STEER_OVERFLOW_MASK {
+ DP_STEER_OVERFLOW_MASKED = 0x0,
+ DP_STEER_OVERFLOW_UNMASK = 0x1,
+} DP_STEER_OVERFLOW_MASK;
+typedef enum DP_TU_OVERFLOW_ACK {
+ DP_TU_OVERFLOW_ACK_NO_EFFECT = 0x0,
+ DP_TU_OVERFLOW_ACK_CLR_INTERRUPT = 0x1,
+} DP_TU_OVERFLOW_ACK;
+typedef enum DP_VID_TIMING_MODE {
+ DP_VID_TIMING_MODE_ASYNC = 0x0,
+ DP_VID_TIMING_MODE_SYNC = 0x1,
+} DP_VID_TIMING_MODE;
+typedef enum DP_VID_M_N_DOUBLE_BUFFER_MODE {
+ DP_VID_M_N_DOUBLE_BUFFER_AFTER_VID_M_UPDATE = 0x0,
+ DP_VID_M_N_DOUBLE_BUFFER_AT_FRAME_START = 0x1,
+} DP_VID_M_N_DOUBLE_BUFFER_MODE;
+typedef enum DP_VID_M_N_GEN_EN {
+ DP_VID_M_N_PROGRAMMED_VIA_REG = 0x0,
+ DP_VID_M_N_CALC_AUTO = 0x1,
+} DP_VID_M_N_GEN_EN;
+typedef enum DP_VID_M_DOUBLE_VALUE_EN {
+ DP_VID_M_INPUT_PIXEL_RATE = 0x0,
+ DP_VID_M_DOUBLE_INPUT_PIXEL_RATE = 0x1,
+} DP_VID_M_DOUBLE_VALUE_EN;
+typedef enum DP_VID_ENHANCED_FRAME_MODE {
+ VID_NORMAL_FRAME_MODE = 0x0,
+ VID_ENHANCED_MODE = 0x1,
+} DP_VID_ENHANCED_FRAME_MODE;
+typedef enum DP_VID_MSA_TOP_FIELD_MODE {
+ DP_TOP_FIELD_ONLY = 0x0,
+ DP_TOP_PLUS_BOTTOM_FIELD = 0x1,
+} DP_VID_MSA_TOP_FIELD_MODE;
+typedef enum DP_VID_VBID_FIELD_POL {
+ DP_VID_VBID_FIELD_POL_NORMAL = 0x0,
+ DP_VID_VBID_FIELD_POL_INV = 0x1,
+} DP_VID_VBID_FIELD_POL;
+typedef enum DP_VID_STREAM_DISABLE_ACK {
+ ID_STREAM_DISABLE_NO_ACK = 0x0,
+ ID_STREAM_DISABLE_ACKED = 0x1,
+} DP_VID_STREAM_DISABLE_ACK;
+typedef enum DP_VID_STREAM_DISABLE_MASK {
+ VID_STREAM_DISABLE_MASKED = 0x0,
+ VID_STREAM_DISABLE_UNMASK = 0x1,
+} DP_VID_STREAM_DISABLE_MASK;
+typedef enum DPHY_ATEST_SEL_LANE0 {
+ DPHY_ATEST_LANE0_PRBS_PATTERN = 0x0,
+ DPHY_ATEST_LANE0_REG_PATTERN = 0x1,
+} DPHY_ATEST_SEL_LANE0;
+typedef enum DPHY_ATEST_SEL_LANE1 {
+ DPHY_ATEST_LANE1_PRBS_PATTERN = 0x0,
+ DPHY_ATEST_LANE1_REG_PATTERN = 0x1,
+} DPHY_ATEST_SEL_LANE1;
+typedef enum DPHY_ATEST_SEL_LANE2 {
+ DPHY_ATEST_LANE2_PRBS_PATTERN = 0x0,
+ DPHY_ATEST_LANE2_REG_PATTERN = 0x1,
+} DPHY_ATEST_SEL_LANE2;
+typedef enum DPHY_ATEST_SEL_LANE3 {
+ DPHY_ATEST_LANE3_PRBS_PATTERN = 0x0,
+ DPHY_ATEST_LANE3_REG_PATTERN = 0x1,
+} DPHY_ATEST_SEL_LANE3;
+typedef enum DPHY_BYPASS {
+ DPHY_8B10B_OUTPUT = 0x0,
+ DPHY_DBG_OUTPUT = 0x1,
+} DPHY_BYPASS;
+typedef enum DPHY_SKEW_BYPASS {
+ DPHY_WITH_SKEW = 0x0,
+ DPHY_NO_SKEW = 0x1,
+} DPHY_SKEW_BYPASS;
+typedef enum DPHY_TRAINING_PATTERN_SEL {
+ DPHY_TRAINING_PATTERN_1 = 0x0,
+ DPHY_TRAINING_PATTERN_2 = 0x1,
+ DPHY_TRAINING_PATTERN_3 = 0x2,
+ DPHY_TRAINING_PATTERN_4 = 0x3,
+} DPHY_TRAINING_PATTERN_SEL;
+typedef enum DPHY_8B10B_RESET {
+ DPHY_8B10B_NOT_RESET = 0x0,
+ DPHY_8B10B_RESETET = 0x1,
+} DPHY_8B10B_RESET;
+typedef enum DP_DPHY_8B10B_EXT_DISP {
+ DP_DPHY_8B10B_EXT_DISP_ZERO = 0x0,
+ DP_DPHY_8B10B_EXT_DISP_ONE = 0x1,
+} DP_DPHY_8B10B_EXT_DISP;
+typedef enum DPHY_8B10B_CUR_DISP {
+ DPHY_8B10B_CUR_DISP_ZERO = 0x0,
+ DPHY_8B10B_CUR_DISP_ONE = 0x1,
+} DPHY_8B10B_CUR_DISP;
+typedef enum DPHY_PRBS_EN {
+ DPHY_PRBS_DISABLE = 0x0,
+ DPHY_PRBS_ENABLE = 0x1,
+} DPHY_PRBS_EN;
+typedef enum DPHY_PRBS_SEL {
+ DPHY_PRBS7_SELECTED = 0x0,
+ DPHY_PRBS23_SELECTED = 0x1,
+ DPHY_PRBS11_SELECTED = 0x2,
+} DPHY_PRBS_SEL;
+typedef enum DPHY_LOAD_BS_COUNT_START {
+ DPHY_LOAD_BS_COUNT_STARTED = 0x0,
+ DPHY_LOAD_BS_COUNT_NOT_STARTED = 0x1,
+} DPHY_LOAD_BS_COUNT_START;
+typedef enum DPHY_CRC_EN {
+ DPHY_CRC_DISABLED = 0x0,
+ DPHY_CRC_ENABLED = 0x1,
+} DPHY_CRC_EN;
+typedef enum DPHY_CRC_CONT_EN {
+ DPHY_CRC_ONE_SHOT = 0x0,
+ DPHY_CRC_CONTINUOUS = 0x1,
+} DPHY_CRC_CONT_EN;
+typedef enum DPHY_CRC_FIELD {
+ DPHY_CRC_START_FROM_TOP_FIELD = 0x0,
+ DPHY_CRC_START_FROM_BOTTOM_FIELD = 0x1,
+} DPHY_CRC_FIELD;
+typedef enum DPHY_CRC_SEL {
+ DPHY_CRC_LANE0_SELECTED = 0x0,
+ DPHY_CRC_LANE1_SELECTED = 0x1,
+ DPHY_CRC_LANE2_SELECTED = 0x2,
+ DPHY_CRC_LANE3_SELECTED = 0x3,
+} DPHY_CRC_SEL;
+typedef enum DPHY_RX_FAST_TRAINING_CAPABLE {
+ DPHY_FAST_TRAINING_NOT_CAPABLE_0 = 0x0,
+ DPHY_FAST_TRAINING_CAPABLE = 0x1,
+} DPHY_RX_FAST_TRAINING_CAPABLE;
+typedef enum DP_SEC_COLLISION_ACK {
+ DP_SEC_COLLISION_ACK_NO_EFFECT = 0x0,
+ DP_SEC_COLLISION_ACK_CLR_FLAG = 0x1,
+} DP_SEC_COLLISION_ACK;
+typedef enum DP_SEC_AUDIO_MUTE {
+ DP_SEC_AUDIO_MUTE_HW_CTRL = 0x0,
+ DP_SEC_AUDIO_MUTE_SW_CTRL = 0x1,
+} DP_SEC_AUDIO_MUTE;
+typedef enum DP_SEC_TIMESTAMP_MODE {
+ DP_SEC_TIMESTAMP_PROGRAMMABLE_MODE = 0x0,
+ DP_SEC_TIMESTAMP_AUTO_CALC_MODE = 0x1,
+} DP_SEC_TIMESTAMP_MODE;
+typedef enum DP_SEC_ASP_PRIORITY {
+ DP_SEC_ASP_LOW_PRIORITY = 0x0,
+ DP_SEC_ASP_HIGH_PRIORITY = 0x1,
+} DP_SEC_ASP_PRIORITY;
+typedef enum DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE {
+ DP_SEC_ASP_CHANNEL_COUNT_FROM_AZ = 0x0,
+ DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_ENABLED = 0x1,
+} DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE;
+typedef enum DP_MSE_SAT_UPDATE_ACT {
+ DP_MSE_SAT_UPDATE_NO_ACTION = 0x0,
+ DP_MSE_SAT_UPDATE_WITH_TRIGGER = 0x1,
+ DP_MSE_SAT_UPDATE_WITHOUT_TRIGGER = 0x2,
+} DP_MSE_SAT_UPDATE_ACT;
+typedef enum DP_MSE_LINK_LINE {
+ DP_MSE_LINK_LINE_32_MTP_LONG = 0x0,
+ DP_MSE_LINK_LINE_64_MTP_LONG = 0x1,
+ DP_MSE_LINK_LINE_128_MTP_LONG = 0x2,
+ DP_MSE_LINK_LINE_256_MTP_LONG = 0x3,
+} DP_MSE_LINK_LINE;
+typedef enum DP_MSE_BLANK_CODE {
+ DP_MSE_BLANK_CODE_SF_FILLED = 0x0,
+ DP_MSE_BLANK_CODE_ZERO_FILLED = 0x1,
+} DP_MSE_BLANK_CODE;
+typedef enum DP_MSE_TIMESTAMP_MODE {
+ DP_MSE_TIMESTAMP_CALC_BASED_ON_LINK_RATE = 0x0,
+ DP_MSE_TIMESTAMP_CALC_BASED_ON_VC_RATE = 0x1,
+} DP_MSE_TIMESTAMP_MODE;
+typedef enum DP_MSE_ZERO_ENCODER {
+ DP_MSE_NOT_ZERO_FE_ENCODER = 0x0,
+ DP_MSE_ZERO_FE_ENCODER = 0x1,
+} DP_MSE_ZERO_ENCODER;
+typedef enum DP_MSE_OUTPUT_DPDBG_DATA {
+ DP_MSE_OUTPUT_DPDBG_DATA_DIS = 0x0,
+ DP_MSE_OUTPUT_DPDBG_DATA_EN = 0x1,
+} DP_MSE_OUTPUT_DPDBG_DATA;
+typedef enum DP_DPHY_HBR2_PATTERN_CONTROL_MODE {
+ DP_DPHY_HBR2_PASS_THROUGH = 0x0,
+ DP_DPHY_HBR2_PATTERN_1 = 0x1,
+ DP_DPHY_HBR2_PATTERN_2_NEG = 0x2,
+ DP_DPHY_HBR2_PATTERN_3 = 0x3,
+ DP_DPHY_HBR2_PATTERN_2_POS = 0x6,
+} DP_DPHY_HBR2_PATTERN_CONTROL_MODE;
+typedef enum DPHY_CRC_MST_PHASE_ERROR_ACK {
+ DPHY_CRC_MST_PHASE_ERROR_NO_ACK = 0x0,
+ DPHY_CRC_MST_PHASE_ERROR_ACKED = 0x1,
+} DPHY_CRC_MST_PHASE_ERROR_ACK;
+typedef enum DPHY_SW_FAST_TRAINING_START {
+ DPHY_SW_FAST_TRAINING_NOT_STARTED = 0x0,
+ DPHY_SW_FAST_TRAINING_STARTED = 0x1,
+} DPHY_SW_FAST_TRAINING_START;
+typedef enum DP_DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN {
+ DP_DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_DISABLED= 0x0,
+ DP_DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_ENABLED = 0x1,
+} DP_DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN;
+typedef enum DP_DPHY_FAST_TRAINING_COMPLETE_MASK {
+ DP_DPHY_FAST_TRAINING_COMPLETE_MASKED = 0x0,
+ DP_DPHY_FAST_TRAINING_COMPLETE_NOT_MASKED = 0x1,
+} DP_DPHY_FAST_TRAINING_COMPLETE_MASK;
+typedef enum DP_DPHY_FAST_TRAINING_COMPLETE_ACK {
+ DP_DPHY_FAST_TRAINING_COMPLETE_NOT_ACKED = 0x0,
+ DP_DPHY_FAST_TRAINING_COMPLETE_ACKED = 0x1,
+} DP_DPHY_FAST_TRAINING_COMPLETE_ACK;
+typedef enum DP_MSA_V_TIMING_OVERRIDE_EN {
+ MSA_V_TIMING_OVERRIDE_DISABLED = 0x0,
+ MSA_V_TIMING_OVERRIDE_ENABLED = 0x1,
+} DP_MSA_V_TIMING_OVERRIDE_EN;
+typedef enum DP_SEC_GSP0_PRIORITY {
+ SEC_GSP0_PRIORITY_LOW = 0x0,
+ SEC_GSP0_PRIORITY_HIGH = 0x1,
+} DP_SEC_GSP0_PRIORITY;
+typedef enum DP_SEC_GSP0_SEND {
+ NOT_SENT = 0x0,
+ FORCE_SENT = 0x1,
+} DP_SEC_GSP0_SEND;
+typedef enum DP_AUX_CONTROL_HPD_SEL {
+ DP_AUX_CONTROL_HPD1_SELECTED = 0x0,
+ DP_AUX_CONTROL_HPD2_SELECTED = 0x1,
+ DP_AUX_CONTROL_HPD3_SELECTED = 0x2,
+ DP_AUX_CONTROL_HPD4_SELECTED = 0x3,
+ DP_AUX_CONTROL_HPD5_SELECTED = 0x4,
+ DP_AUX_CONTROL_HPD6_SELECTED = 0x5,
+} DP_AUX_CONTROL_HPD_SEL;
+typedef enum DP_AUX_CONTROL_TEST_MODE {
+ DP_AUX_CONTROL_TEST_MODE_DISABLE = 0x0,
+ DP_AUX_CONTROL_TEST_MODE_ENABLE = 0x1,
+} DP_AUX_CONTROL_TEST_MODE;
+typedef enum DP_AUX_SW_CONTROL_SW_GO {
+ DP_AUX_SW_CONTROL_SW__NOT_GO = 0x0,
+ DP_AUX_SW_CONTROL_SW__GO = 0x1,
+} DP_AUX_SW_CONTROL_SW_GO;
+typedef enum DP_AUX_SW_CONTROL_LS_READ_TRIG {
+ DP_AUX_SW_CONTROL_LS_READ__NOT_TRIG = 0x0,
+ DP_AUX_SW_CONTROL_LS_READ__TRIG = 0x1,
+} DP_AUX_SW_CONTROL_LS_READ_TRIG;
+typedef enum DP_AUX_ARB_CONTROL_ARB_PRIORITY {
+ DP_AUX_ARB_CONTROL_ARB_PRIORITY__GTC_LS_SW = 0x0,
+ DP_AUX_ARB_CONTROL_ARB_PRIORITY__LS_GTC_SW = 0x1,
+ DP_AUX_ARB_CONTROL_ARB_PRIORITY__SW_LS_GTC = 0x2,
+ DP_AUX_ARB_CONTROL_ARB_PRIORITY__SW_GTC_LS = 0x3,
+} DP_AUX_ARB_CONTROL_ARB_PRIORITY;
+typedef enum DP_AUX_ARB_CONTROL_USE_AUX_REG_REQ {
+ DP_AUX_ARB_CONTROL__NOT_USE_AUX_REG_REQ = 0x0,
+ DP_AUX_ARB_CONTROL__USE_AUX_REG_REQ = 0x1,
+} DP_AUX_ARB_CONTROL_USE_AUX_REG_REQ;
+typedef enum DP_AUX_ARB_CONTROL_DONE_USING_AUX_REG {
+ DP_AUX_ARB_CONTROL__DONE_NOT_USING_AUX_REG = 0x0,
+ DP_AUX_ARB_CONTROL__DONE_USING_AUX_REG = 0x1,
+} DP_AUX_ARB_CONTROL_DONE_USING_AUX_REG;
+typedef enum DP_AUX_INT_ACK {
+ DP_AUX_INT__NOT_ACK = 0x0,
+ DP_AUX_INT__ACK = 0x1,
+} DP_AUX_INT_ACK;
+typedef enum DP_AUX_LS_UPDATE_ACK {
+ DP_AUX_INT_LS_UPDATE_NOT_ACK = 0x0,
+ DP_AUX_INT_LS_UPDATE_ACK = 0x1,
+} DP_AUX_LS_UPDATE_ACK;
+typedef enum DP_AUX_DPHY_TX_REF_CONTROL_TX_REF_SEL {
+ DP_AUX_DPHY_TX_REF_CONTROL_TX_REF_SEL__DIVIDED_SYM_CLK= 0x0,
+ DP_AUX_DPHY_TX_REF_CONTROL_TX_REF_SEL__FROM_DCCG_MICROSECOND_REF= 0x1,
+} DP_AUX_DPHY_TX_REF_CONTROL_TX_REF_SEL;
+typedef enum DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE {
+ DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE__1MHZ = 0x0,
+ DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE__2MHZ = 0x1,
+ DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE__4MHZ = 0x2,
+ DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE__8MHZ = 0x3,
+} DP_AUX_DPHY_TX_REF_CONTROL_TX_RATE;
+typedef enum DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN {
+ DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__0US = 0x0,
+ DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__8US = 0x1,
+ DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__16US = 0x2,
+ DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__24US = 0x3,
+ DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__32US = 0x4,
+ DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__40US = 0x5,
+ DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__48US = 0x6,
+ DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN__56US = 0x7,
+} DP_AUX_DPHY_TX_CONTROL_PRECHARGE_LEN;
+typedef enum DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY {
+ DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__0 = 0x0,
+ DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__16US= 0x1,
+ DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__32US= 0x2,
+ DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__64US= 0x3,
+ DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__128US= 0x4,
+ DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY__256US= 0x5,
+} DP_AUX_DPHY_TX_CONTROL_MODE_DET_CHECK_DELAY;
+typedef enum DP_AUX_DPHY_RX_CONTROL_START_WINDOW {
+ DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO2_PERIOD = 0x0,
+ DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO4_PERIOD = 0x1,
+ DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO8_PERIOD = 0x2,
+ DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO16_PERIOD= 0x3,
+ DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO32_PERIOD= 0x4,
+ DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO64_PERIOD= 0x5,
+ DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO128_PERIOD= 0x6,
+ DP_AUX_DPHY_RX_CONTROL_START_WINDOW__1TO256_PERIOD= 0x7,
+} DP_AUX_DPHY_RX_CONTROL_START_WINDOW;
+typedef enum DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW {
+ DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO2_PERIOD= 0x0,
+ DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO4_PERIOD= 0x1,
+ DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO8_PERIOD= 0x2,
+ DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO16_PERIOD= 0x3,
+ DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO32_PERIOD= 0x4,
+ DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO64_PERIOD= 0x5,
+ DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO128_PERIOD= 0x6,
+ DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW__1TO256_PERIOD= 0x7,
+} DP_AUX_DPHY_RX_CONTROL_RECEIVE_WINDOW;
+typedef enum DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN {
+ DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN__6_EDGES= 0x0,
+ DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN__10_EDGES= 0x1,
+ DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN__18_EDGES= 0x2,
+ DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN__RESERVED= 0x3,
+} DP_AUX_DPHY_RX_CONTROL_HALF_SYM_DETECT_LEN;
+typedef enum DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_PHASE_DETECT {
+ DP_AUX_DPHY_RX_CONTROL__NOT_ALLOW_BELOW_THRESHOLD_PHASE_DETECT= 0x0,
+ DP_AUX_DPHY_RX_CONTROL__ALLOW_BELOW_THRESHOLD_PHASE_DETECT= 0x1,
+} DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_PHASE_DETECT;
+typedef enum DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_START {
+ DP_AUX_DPHY_RX_CONTROL__NOT_ALLOW_BELOW_THRESHOLD_START= 0x0,
+ DP_AUX_DPHY_RX_CONTROL__ALLOW_BELOW_THRESHOLD_START= 0x1,
+} DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_START;
+typedef enum DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_STOP {
+ DP_AUX_DPHY_RX_CONTROL__NOT_ALLOW_BELOW_THRESHOLD_STOP= 0x0,
+ DP_AUX_DPHY_RX_CONTROL__ALLOW_BELOW_THRESHOLD_STOP= 0x1,
+} DP_AUX_DPHY_RX_CONTROL_ALLOW_BELOW_THRESHOLD_STOP;
+typedef enum DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN {
+ DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN__2_HALF_SYMBOLS= 0x0,
+ DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN__4_HALF_SYMBOLS= 0x1,
+ DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN__6_HALF_SYMBOLS= 0x2,
+ DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN__8_HALF_SYMBOLS= 0x3,
+} DP_AUX_DPHY_RX_CONTROL_PHASE_DETECT_LEN;
+typedef enum DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN {
+ DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_450US = 0x0,
+ DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_500US = 0x1,
+ DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_550US = 0x2,
+ DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_600US = 0x3,
+ DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_650US = 0x4,
+ DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_700US = 0x5,
+ DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_750US = 0x6,
+ DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN_800US = 0x7,
+} DP_AUX_DPHY_RX_CONTROL_TIMEOUT_LEN;
+typedef enum DP_AUX_DPHY_RX_DETECTION_THRESHOLD {
+ DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 = 0x0,
+ DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 = 0x1,
+ DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 = 0x2,
+ DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 = 0x3,
+ DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 = 0x4,
+ DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 = 0x5,
+ DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 = 0x6,
+ DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 = 0x7,
+} DP_AUX_DPHY_RX_DETECTION_THRESHOLD;
+typedef enum DP_AUX_GTC_SYNC_CONTROL_GTC_SYNC_BLOCK_REQ {
+ DP_AUX_GTC_SYNC_CONTROL_GTC_SYNC_ALLOW_REQ_FROM_OTHER_AUX= 0x0,
+ DP_AUX_GTC_SYNC_CONTROL_GTC_SYNC_BLOCK_REQ_FROM_OTHER_AUX= 0x1,
+} DP_AUX_GTC_SYNC_CONTROL_GTC_SYNC_BLOCK_REQ;
+typedef enum DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW {
+ DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW__300US= 0x0,
+ DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW__400US= 0x1,
+ DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW__500US= 0x2,
+ DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW__600US= 0x3,
+} DP_AUX_GTC_SYNC_CONTROL_INTERVAL_RESET_WINDOW;
+typedef enum DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT {
+ DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT__4_ATTAMPS= 0x0,
+ DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT__8_ATTAMPS= 0x1,
+ DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT__16_ATTAMPS= 0x2,
+ DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT__RESERVED= 0x3,
+} DP_AUX_GTC_SYNC_CONTROL_OFFSET_CALC_MAX_ATTEMPT;
+typedef enum DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN {
+ DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN__0= 0x0,
+ DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN__64= 0x1,
+ DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN__128= 0x2,
+ DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN__256= 0x3,
+} DP_AUX_GTC_SYNC_ERROR_CONTROL_LOCK_ACQ_TIMEOUT_LEN;
+typedef enum DP_AUX_ERR_OCCURRED_ACK {
+ DP_AUX_ERR_OCCURRED__NOT_ACK = 0x0,
+ DP_AUX_ERR_OCCURRED__ACK = 0x1,
+} DP_AUX_ERR_OCCURRED_ACK;
+typedef enum DP_AUX_POTENTIAL_ERR_REACHED_ACK {
+ DP_AUX_POTENTIAL_ERR_REACHED__NOT_ACK = 0x0,
+ DP_AUX_POTENTIAL_ERR_REACHED__ACK = 0x1,
+} DP_AUX_POTENTIAL_ERR_REACHED_ACK;
+typedef enum DP_AUX_DEFINITE_ERR_REACHED_ACK {
+ ALPHA_DP_AUX_DEFINITE_ERR_REACHED_NOT_ACK = 0x0,
+ ALPHA_DP_AUX_DEFINITE_ERR_REACHED_ACK = 0x1,
+} DP_AUX_DEFINITE_ERR_REACHED_ACK;
+typedef enum DP_AUX_RESET {
+ DP_AUX_RESET_DEASSERTED = 0x0,
+ DP_AUX_RESET_ASSERTED = 0x1,
+} DP_AUX_RESET;
+typedef enum DP_AUX_RESET_DONE {
+ DP_AUX_RESET_SEQUENCE_NOT_DONE = 0x0,
+ DP_AUX_RESET_SEQUENCE_DONE = 0x1,
+} DP_AUX_RESET_DONE;
+typedef enum FBC_IDLE_MASK_MASK_BITS {
+ FBC_IDLE_MASK_DISP_REG_UPDATE = 0x0,
+ FBC_IDLE_MASK_RESERVED1 = 0x1,
+ FBC_IDLE_MASK_FBC_GRPH_COMP_EN = 0x2,
+ FBC_IDLE_MASK_FBC_MIN_COMPRESSION = 0x3,
+ FBC_IDLE_MASK_FBC_ALPHA_COMP_EN = 0x4,
+ FBC_IDLE_MASK_FBC_ZERO_ALPHA_CHUNK_SKIP_EN = 0x5,
+ FBC_IDLE_MASK_FBC_FORCE_COPY_TO_COMP_BUF = 0x6,
+ FBC_IDLE_MASK_RESERVED7 = 0x7,
+ FBC_IDLE_MASK_RESERVED8 = 0x8,
+ FBC_IDLE_MASK_RESERVED9 = 0x9,
+ FBC_IDLE_MASK_RESERVED10 = 0xa,
+ FBC_IDLE_MASK_RESERVED11 = 0xb,
+ FBC_IDLE_MASK_RESERVED12 = 0xc,
+ FBC_IDLE_MASK_RESERVED13 = 0xd,
+ FBC_IDLE_MASK_RESERVED14 = 0xe,
+ FBC_IDLE_MASK_RESERVED15 = 0xf,
+ FBC_IDLE_MASK_RESERVED16 = 0x10,
+ FBC_IDLE_MASK_RESERVED17 = 0x11,
+ FBC_IDLE_MASK_RESERVED18 = 0x12,
+ FBC_IDLE_MASK_RESERVED19 = 0x13,
+ FBC_IDLE_MASK_RESERVED20 = 0x14,
+ FBC_IDLE_MASK_RESERVED21 = 0x15,
+ FBC_IDLE_MASK_RESERVED22 = 0x16,
+ FBC_IDLE_MASK_RESERVED23 = 0x17,
+ FBC_IDLE_MASK_MC_HIT_REGION_0 = 0x18,
+ FBC_IDLE_MASK_MC_HIT_REGION_1 = 0x19,
+ FBC_IDLE_MASK_MC_HIT_REGION_2 = 0x1a,
+ FBC_IDLE_MASK_MC_HIT_REGION_3 = 0x1b,
+ FBC_IDLE_MASK_MC_WRITE = 0x1c,
+ FBC_IDLE_MASK_CG_STATIC_SCREEN = 0x1d,
+ FBC_IDLE_MASK_RESERVED30 = 0x1e,
+ FBC_IDLE_MASK_RESERVED31 = 0x1f,
+} FBC_IDLE_MASK_MASK_BITS;
+typedef enum FMT_CONTROL_PIXEL_ENCODING {
+ FMT_CONTROL_PIXEL_ENCODING_RGB444_OR_YCBCR444 = 0x0,
+ FMT_CONTROL_PIXEL_ENCODING_YCBCR422 = 0x1,
+ FMT_CONTROL_PIXEL_ENCODING_YCBCR420 = 0x2,
+ FMT_CONTROL_PIXEL_ENCODING_RESERVED = 0x3,
+} FMT_CONTROL_PIXEL_ENCODING;
+typedef enum FMT_CONTROL_SUBSAMPLING_MODE {
+ FMT_CONTROL_SUBSAMPLING_MODE_DROP = 0x0,
+ FMT_CONTROL_SUBSAMPLING_MODE_AVERAGE = 0x1,
+ FMT_CONTROL_SUBSAMPLING_MODE_3_TAP = 0x2,
+ FMT_CONTROL_SUBSAMPLING_MODE_RESERVED = 0x3,
+} FMT_CONTROL_SUBSAMPLING_MODE;
+typedef enum FMT_CONTROL_SUBSAMPLING_ORDER {
+ FMT_CONTROL_SUBSAMPLING_ORDER_CB_BEFORE_CR = 0x0,
+ FMT_CONTROL_SUBSAMPLING_ORDER_CR_BEFORE_CB = 0x1,
+} FMT_CONTROL_SUBSAMPLING_ORDER;
+typedef enum FMT_CONTROL_CBCR_BIT_REDUCTION_BYPASS {
+ FMT_CONTROL_CBCR_BIT_REDUCTION_BYPASS_DISABLE = 0x0,
+ FMT_CONTROL_CBCR_BIT_REDUCTION_BYPASS_ENABLE = 0x1,
+} FMT_CONTROL_CBCR_BIT_REDUCTION_BYPASS;
+typedef enum FMT_BIT_DEPTH_CONTROL_TRUNCATE_MODE {
+ FMT_BIT_DEPTH_CONTROL_TRUNCATE_MODE_TRUNCATION = 0x0,
+ FMT_BIT_DEPTH_CONTROL_TRUNCATE_MODE_ROUNDING = 0x1,
+} FMT_BIT_DEPTH_CONTROL_TRUNCATE_MODE;
+typedef enum FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH {
+ FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH_18BPP = 0x0,
+ FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH_24BPP = 0x1,
+ FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH_30BPP = 0x2,
+} FMT_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH;
+typedef enum FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH {
+ FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH_18BPP = 0x0,
+ FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH_24BPP = 0x1,
+ FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH_30BPP = 0x2,
+} FMT_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH;
+typedef enum FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH {
+ FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH_18BPP= 0x0,
+ FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH_24BPP= 0x1,
+ FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH_30BPP= 0x2,
+} FMT_BIT_DEPTH_CONTROL_TEMPORAL_DITHER_DEPTH;
+typedef enum FMT_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL {
+ FMT_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL_GREY_LEVEL2 = 0x0,
+ FMT_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL_GREY_LEVEL4 = 0x1,
+} FMT_BIT_DEPTH_CONTROL_TEMPORAL_LEVEL;
+typedef enum FMT_BIT_DEPTH_CONTROL_25FRC_SEL {
+ FMT_BIT_DEPTH_CONTROL_25FRC_SEL_Ei = 0x0,
+ FMT_BIT_DEPTH_CONTROL_25FRC_SEL_Fi = 0x1,
+ FMT_BIT_DEPTH_CONTROL_25FRC_SEL_Gi = 0x2,
+ FMT_BIT_DEPTH_CONTROL_25FRC_SEL_RESERVED = 0x3,
+} FMT_BIT_DEPTH_CONTROL_25FRC_SEL;
+typedef enum FMT_BIT_DEPTH_CONTROL_50FRC_SEL {
+ FMT_BIT_DEPTH_CONTROL_50FRC_SEL_A = 0x0,
+ FMT_BIT_DEPTH_CONTROL_50FRC_SEL_B = 0x1,
+ FMT_BIT_DEPTH_CONTROL_50FRC_SEL_C = 0x2,
+ FMT_BIT_DEPTH_CONTROL_50FRC_SEL_D = 0x3,
+} FMT_BIT_DEPTH_CONTROL_50FRC_SEL;
+typedef enum FMT_BIT_DEPTH_CONTROL_75FRC_SEL {
+ FMT_BIT_DEPTH_CONTROL_75FRC_SEL_E = 0x0,
+ FMT_BIT_DEPTH_CONTROL_75FRC_SEL_F = 0x1,
+ FMT_BIT_DEPTH_CONTROL_75FRC_SEL_G = 0x2,
+ FMT_BIT_DEPTH_CONTROL_75FRC_SEL_RESERVED = 0x3,
+} FMT_BIT_DEPTH_CONTROL_75FRC_SEL;
+typedef enum FMT_TEMPORAL_DITHER_PATTERN_CONTROL_SELECT {
+ FMT_TEMPORAL_DITHER_PATTERN_CONTROL_SELECT_LEGACY_HARDCODED_PATTERN= 0x0,
+ FMT_TEMPORAL_DITHER_PATTERN_CONTROL_SELECT_PROGRAMMABLE_PATTERN= 0x1,
+} FMT_TEMPORAL_DITHER_PATTERN_CONTROL_SELECT;
+typedef enum FMT_TEMPORAL_DITHER_PATTERN_CONTROL_RGB1_BGR0 {
+ FMT_TEMPORAL_DITHER_PATTERN_CONTROL_RGB1_BGR0_BGR= 0x0,
+ FMT_TEMPORAL_DITHER_PATTERN_CONTROL_RGB1_BGR0_RGB= 0x1,
+} FMT_TEMPORAL_DITHER_PATTERN_CONTROL_RGB1_BGR0;
+typedef enum FMT_CLAMP_CNTL_COLOR_FORMAT {
+ FMT_CLAMP_CNTL_COLOR_FORMAT_6BPC = 0x0,
+ FMT_CLAMP_CNTL_COLOR_FORMAT_8BPC = 0x1,
+ FMT_CLAMP_CNTL_COLOR_FORMAT_10BPC = 0x2,
+ FMT_CLAMP_CNTL_COLOR_FORMAT_12BPC = 0x3,
+ FMT_CLAMP_CNTL_COLOR_FORMAT_RESERVED1 = 0x4,
+ FMT_CLAMP_CNTL_COLOR_FORMAT_RESERVED2 = 0x5,
+ FMT_CLAMP_CNTL_COLOR_FORMAT_RESERVED3 = 0x6,
+ FMT_CLAMP_CNTL_COLOR_FORMAT_PROGRAMMABLE = 0x7,
+} FMT_CLAMP_CNTL_COLOR_FORMAT;
+typedef enum FMT_CRC_CNTL_CONT_EN {
+ FMT_CRC_CNTL_CONT_EN_ONE_SHOT = 0x0,
+ FMT_CRC_CNTL_CONT_EN_CONT = 0x1,
+} FMT_CRC_CNTL_CONT_EN;
+typedef enum FMT_CRC_CNTL_INCLUDE_OVERSCAN {
+ FMT_CRC_CNTL_INCLUDE_OVERSCAN_NOT_INCLUDE = 0x0,
+ FMT_CRC_CNTL_INCLUDE_OVERSCAN_INCLUDE = 0x1,
+} FMT_CRC_CNTL_INCLUDE_OVERSCAN;
+typedef enum FMT_CRC_CNTL_ONLY_BLANKB {
+ FMT_CRC_CNTL_ONLY_BLANKB_ENTIRE_FIELD = 0x0,
+ FMT_CRC_CNTL_ONLY_BLANKB_NON_BLANK = 0x1,
+} FMT_CRC_CNTL_ONLY_BLANKB;
+typedef enum FMT_CRC_CNTL_PSR_MODE_ENABLE {
+ FMT_CRC_CNTL_PSR_MODE_ENABLE_NORMAL = 0x0,
+ FMT_CRC_CNTL_PSR_MODE_ENABLE_EDP_PSR_CRC = 0x1,
+} FMT_CRC_CNTL_PSR_MODE_ENABLE;
+typedef enum FMT_CRC_CNTL_INTERLACE_MODE {
+ FMT_CRC_CNTL_INTERLACE_MODE_TOP = 0x0,
+ FMT_CRC_CNTL_INTERLACE_MODE_BOTTOM = 0x1,
+ FMT_CRC_CNTL_INTERLACE_MODE_BOTH_BOTTOM = 0x2,
+ FMT_CRC_CNTL_INTERLACE_MODE_BOTH_EACH = 0x3,
+} FMT_CRC_CNTL_INTERLACE_MODE;
+typedef enum FMT_CRC_CNTL_EVEN_ODD_PIX_ENABLE {
+ FMT_CRC_CNTL_EVEN_ODD_PIX_ENABLE_ALL = 0x0,
+ FMT_CRC_CNTL_EVEN_ODD_PIX_ENABLE_ODD_EVEN = 0x1,
+} FMT_CRC_CNTL_EVEN_ODD_PIX_ENABLE;
+typedef enum FMT_CRC_CNTL_EVEN_ODD_PIX_SELECT {
+ FMT_CRC_CNTL_EVEN_ODD_PIX_SELECT_EVEN = 0x0,
+ FMT_CRC_CNTL_EVEN_ODD_PIX_SELECT_ODD = 0x1,
+} FMT_CRC_CNTL_EVEN_ODD_PIX_SELECT;
+typedef enum FMT_DEBUG_CNTL_COLOR_SELECT {
+ FMT_DEBUG_CNTL_COLOR_SELECT_BLUE = 0x0,
+ FMT_DEBUG_CNTL_COLOR_SELECT_GREEN = 0x1,
+ FMT_DEBUG_CNTL_COLOR_SELECT_RED1 = 0x2,
+ FMT_DEBUG_CNTL_COLOR_SELECT_RED2 = 0x3,
+} FMT_DEBUG_CNTL_COLOR_SELECT;
+typedef enum FMT_SPATIAL_DITHER_MODE {
+ FMT_SPATIAL_DITHER_MODE_0 = 0x0,
+ FMT_SPATIAL_DITHER_MODE_1 = 0x1,
+ FMT_SPATIAL_DITHER_MODE_2 = 0x2,
+ FMT_SPATIAL_DITHER_MODE_3 = 0x3,
+} FMT_SPATIAL_DITHER_MODE;
+typedef enum FMT_STEREOSYNC_OVR_POL {
+ FMT_STEREOSYNC_OVR_POL_INVERTED = 0x0,
+ FMT_STEREOSYNC_OVR_POL_NOT_INVERTED = 0x1,
+} FMT_STEREOSYNC_OVR_POL;
+typedef enum FMT_DYNAMIC_EXP_MODE {
+ FMT_DYNAMIC_EXP_MODE_10to12 = 0x0,
+ FMT_DYNAMIC_EXP_MODE_8to12 = 0x1,
+} FMT_DYNAMIC_EXP_MODE;
+typedef enum LB_DATA_FORMAT_PIXEL_DEPTH {
+ LB_DATA_FORMAT_PIXEL_DEPTH_30BPP = 0x0,
+ LB_DATA_FORMAT_PIXEL_DEPTH_24BPP = 0x1,
+ LB_DATA_FORMAT_PIXEL_DEPTH_18BPP = 0x2,
+ LB_DATA_FORMAT_PIXEL_DEPTH_36BPP = 0x3,
+} LB_DATA_FORMAT_PIXEL_DEPTH;
+typedef enum LB_DATA_FORMAT_PIXEL_EXPAN_MODE {
+ LB_DATA_FORMAT_PIXEL_EXPAN_MODE_ZERO_PIXEL_EXPANSION= 0x0,
+ LB_DATA_FORMAT_PIXEL_EXPAN_MODE_DYNAMIC_PIXEL_EXPANSION= 0x1,
+} LB_DATA_FORMAT_PIXEL_EXPAN_MODE;
+typedef enum LB_DATA_FORMAT_PIXEL_REDUCE_MODE {
+ LB_DATA_FORMAT_PIXEL_REDUCE_MODE_TRUNCATION = 0x0,
+ LB_DATA_FORMAT_PIXEL_REDUCE_MODE_ROUNDING = 0x1,
+} LB_DATA_FORMAT_PIXEL_REDUCE_MODE;
+typedef enum LB_DATA_FORMAT_DYNAMIC_PIXEL_DEPTH {
+ LB_DATA_FORMAT_DYNAMIC_PIXEL_DEPTH_36BPP = 0x0,
+ LB_DATA_FORMAT_DYNAMIC_PIXEL_DEPTH_30BPP = 0x1,
+} LB_DATA_FORMAT_DYNAMIC_PIXEL_DEPTH;
+typedef enum LB_DATA_FORMAT_INTERLEAVE_EN {
+ LB_DATA_FORMAT_INTERLEAVE_DISABLE = 0x0,
+ LB_DATA_FORMAT_INTERLEAVE_ENABLE = 0x1,
+} LB_DATA_FORMAT_INTERLEAVE_EN;
+typedef enum LB_DATA_FORMAT_PREFILL_EN {
+ LB_DATA_FORMAT_PREFILL_DISABLE = 0x0,
+ LB_DATA_FORMAT_PREFILL_ENABLE = 0x1,
+} LB_DATA_FORMAT_PREFILL_EN;
+typedef enum LB_DATA_FORMAT_REQUEST_MODE {
+ LB_DATA_FORMAT_REQUEST_MODE_NORMAL = 0x0,
+ LB_DATA_FORMAT_REQUEST_MODE_START_OF_LINE = 0x1,
+} LB_DATA_FORMAT_REQUEST_MODE;
+typedef enum LB_DATA_FORMAT_ALPHA_EN {
+ LB_DATA_FORMAT_ALPHA_DISABLE = 0x0,
+ LB_DATA_FORMAT_ALPHA_ENABLE = 0x1,
+} LB_DATA_FORMAT_ALPHA_EN;
+typedef enum LB_VLINE_START_END_VLINE_INV {
+ LB_VLINE_START_END_VLINE_NORMAL = 0x0,
+ LB_VLINE_START_END_VLINE_INVERSE = 0x1,
+} LB_VLINE_START_END_VLINE_INV;
+typedef enum LB_VLINE2_START_END_VLINE2_INV {
+ LB_VLINE2_START_END_VLINE2_NORMAL = 0x0,
+ LB_VLINE2_START_END_VLINE2_INVERSE = 0x1,
+} LB_VLINE2_START_END_VLINE2_INV;
+typedef enum LB_INTERRUPT_MASK_VBLANK_INTERRUPT_MASK {
+ LB_INTERRUPT_MASK_VBLANK_INTERRUPT_DISABLE = 0x0,
+ LB_INTERRUPT_MASK_VBLANK_INTERRUPT_ENABLE = 0x1,
+} LB_INTERRUPT_MASK_VBLANK_INTERRUPT_MASK;
+typedef enum LB_INTERRUPT_MASK_VLINE_INTERRUPT_MASK {
+ LB_INTERRUPT_MASK_VLINE_INTERRUPT_DISABLE = 0x0,
+ LB_INTERRUPT_MASK_VLINE_INTERRUPT_ENABLE = 0x1,
+} LB_INTERRUPT_MASK_VLINE_INTERRUPT_MASK;
+typedef enum LB_INTERRUPT_MASK_VLINE2_INTERRUPT_MASK {
+ LB_INTERRUPT_MASK_VLINE2_INTERRUPT_DISABLE = 0x0,
+ LB_INTERRUPT_MASK_VLINE2_INTERRUPT_ENABLE = 0x1,
+} LB_INTERRUPT_MASK_VLINE2_INTERRUPT_MASK;
+typedef enum LB_VLINE_STATUS_VLINE_ACK {
+ LB_VLINE_STATUS_VLINE_NORMAL = 0x0,
+ LB_VLINE_STATUS_VLINE_CLEAR = 0x1,
+} LB_VLINE_STATUS_VLINE_ACK;
+typedef enum LB_VLINE_STATUS_VLINE_INTERRUPT_TYPE {
+ LB_VLINE_STATUS_VLINE_INTERRUPT_TYPE_LEVEL_BASED = 0x0,
+ LB_VLINE_STATUS_VLINE_INTERRUPT_TYPE_PULSE_BASED = 0x1,
+} LB_VLINE_STATUS_VLINE_INTERRUPT_TYPE;
+typedef enum LB_VLINE2_STATUS_VLINE2_ACK {
+ LB_VLINE2_STATUS_VLINE2_NORMAL = 0x0,
+ LB_VLINE2_STATUS_VLINE2_CLEAR = 0x1,
+} LB_VLINE2_STATUS_VLINE2_ACK;
+typedef enum LB_VLINE2_STATUS_VLINE2_INTERRUPT_TYPE {
+ LB_VLINE2_STATUS_VLINE2_INTERRUPT_TYPE_LEVEL_BASED= 0x0,
+ LB_VLINE2_STATUS_VLINE2_INTERRUPT_TYPE_PULSE_BASED= 0x1,
+} LB_VLINE2_STATUS_VLINE2_INTERRUPT_TYPE;
+typedef enum LB_VBLANK_STATUS_VBLANK_ACK {
+ LB_VBLANK_STATUS_VBLANK_NORMAL = 0x0,
+ LB_VBLANK_STATUS_VBLANK_CLEAR = 0x1,
+} LB_VBLANK_STATUS_VBLANK_ACK;
+typedef enum LB_VBLANK_STATUS_VBLANK_INTERRUPT_TYPE {
+ LB_VBLANK_STATUS_VBLANK_INTERRUPT_TYPE_LEVEL_BASED= 0x0,
+ LB_VBLANK_STATUS_VBLANK_INTERRUPT_TYPE_PULSE_BASED= 0x1,
+} LB_VBLANK_STATUS_VBLANK_INTERRUPT_TYPE;
+typedef enum LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL {
+ LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL_DISABLE = 0x0,
+ LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL_FROM_VSYNC_VBLANK= 0x1,
+ LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL_FROM_POWERDOWN_RESET= 0x2,
+ LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL_FROM_VSYNC_VBLANK_POWERDOWN_RESET= 0x3,
+} LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL;
+typedef enum LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL2 {
+ LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL2_USE_VBLANK = 0x0,
+ LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL2_USE_VSYNC = 0x1,
+} LB_SYNC_RESET_SEL_LB_SYNC_RESET_SEL2;
+typedef enum LB_SYNC_RESET_SEL_LB_SYNC_DURATION {
+ LB_SYNC_RESET_SEL_LB_SYNC_DURATION_16_CLOCKS = 0x0,
+ LB_SYNC_RESET_SEL_LB_SYNC_DURATION_32_CLOCKS = 0x1,
+ LB_SYNC_RESET_SEL_LB_SYNC_DURATION_64_CLOCKS = 0x2,
+ LB_SYNC_RESET_SEL_LB_SYNC_DURATION_128_CLOCKS = 0x3,
+} LB_SYNC_RESET_SEL_LB_SYNC_DURATION;
+typedef enum LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_EN {
+ LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_DISABLE = 0x0,
+ LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_ENABLE = 0x1,
+} LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_EN;
+typedef enum LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_REP_EN {
+ LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_REPLACEMENT_DISABLE= 0x0,
+ LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_REPLACEMENT_ENABLE= 0x1,
+} LB_KEYER_COLOR_CTRL_LB_KEYER_COLOR_REP_EN;
+typedef enum LB_BUFFER_STATUS_LB_BUFFER_EMPTY_ACK {
+ LB_BUFFER_STATUS_LB_BUFFER_EMPTY_NORMAL = 0x0,
+ LB_BUFFER_STATUS_LB_BUFFER_EMPTY_RESET = 0x1,
+} LB_BUFFER_STATUS_LB_BUFFER_EMPTY_ACK;
+typedef enum LB_BUFFER_STATUS_LB_BUFFER_FULL_ACK {
+ LB_BUFFER_STATUS_LB_BUFFER_FULL_NORMAL = 0x0,
+ LB_BUFFER_STATUS_LB_BUFFER_FULL_RESET = 0x1,
+} LB_BUFFER_STATUS_LB_BUFFER_FULL_ACK;
+typedef enum LB_MVP_AFR_FLIP_MODE_MVP_AFR_FLIP_MODE {
+ LB_MVP_AFR_FLIP_MODE_MVP_AFR_FLIP_MODE_REAL_FLIP = 0x2,
+ LB_MVP_AFR_FLIP_MODE_MVP_AFR_FLIP_MODE_DUMMY_FLIP= 0x3,
+} LB_MVP_AFR_FLIP_MODE_MVP_AFR_FLIP_MODE;
+typedef enum LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET {
+ LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_NORMAL= 0x0,
+ LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACTIVE= 0x1,
+} LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET;
+typedef enum LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACK {
+ LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACK_NOT_USED0= 0x0,
+ LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACK_NOT_USED1= 0x1,
+} LB_MVP_AFR_FLIP_FIFO_CNTL_MVP_AFR_FLIP_FIFO_RESET_ACK;
+typedef enum LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE {
+ LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE_NO_INSERT= 0x0,
+ LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE_DEBUG= 0x1,
+ LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE_HSYNC_MODE= 0x2,
+} LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_LINE_NUM_INSERT_MODE;
+typedef enum LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_AUTO_ENABLE {
+ LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_AUTO_DISABLE= 0x0,
+ LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_AUTO_EN = 0x1,
+} LB_MVP_FLIP_LINE_NUM_INSERT_MVP_FLIP_AUTO_ENABLE;
+typedef enum LB_DC_MVP_LB_CONTROL_MVP_SWAP_LOCK_IN_MODE {
+ ALPHA_LB_DC_MVP_LB_CONTROL_MVP_SWAP_LOCK_IN_MODE_MASTER= 0x1,
+ ALPHA_LB_DC_MVP_LB_CONTROL_MVP_SWAP_LOCK_IN_MODE_SLAVE= 0x2,
+} LB_DC_MVP_LB_CONTROL_MVP_SWAP_LOCK_IN_MODE;
+typedef enum LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_SEL {
+ LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_SEL_NOT_USED0= 0x0,
+ LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_SEL_NOT_USED1= 0x1,
+} LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_SEL;
+typedef enum LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_ONE {
+ LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_NO_FORCE_ONE= 0x0,
+ LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_TO_ONE= 0x1,
+} LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_ONE;
+typedef enum LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO {
+ LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_NO_FORCE_ZERO= 0x0,
+ LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_TO_ZERO= 0x1,
+} LB_DC_MVP_LB_CONTROL_DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO;
+typedef enum LB_TEST_DEBUG_INDEX_LB_TEST_DEBUG_WRITE_EN {
+ LB_TEST_DEBUG_INDEX_LB_TEST_DEBUG_WRITE_EN_NOT_USED0= 0x0,
+ LB_TEST_DEBUG_INDEX_LB_TEST_DEBUG_WRITE_EN_NOT_USED1= 0x1,
+} LB_TEST_DEBUG_INDEX_LB_TEST_DEBUG_WRITE_EN;
+typedef enum LBV_PIXEL_DEPTH {
+ PIXEL_DEPTH_30BPP = 0x0,
+ PIXEL_DEPTH_24BPP = 0x1,
+ PIXEL_DEPTH_18BPP = 0x2,
+ PIXEL_DEPTH_38BPP = 0x3,
+} LBV_PIXEL_DEPTH;
+typedef enum LBV_PIXEL_EXPAN_MODE {
+ PIXEL_EXPAN_MODE_ZERO_EXP = 0x0,
+ PIXEL_EXPAN_MODE_DYN_EXP = 0x1,
+} LBV_PIXEL_EXPAN_MODE;
+typedef enum LBV_INTERLEAVE_EN {
+ INTERLEAVE_DIS = 0x0,
+ INTERLEAVE_EN = 0x1,
+} LBV_INTERLEAVE_EN;
+typedef enum LBV_PIXEL_REDUCE_MODE {
+ PIXEL_REDUCE_MODE_TRUNCATION = 0x0,
+ PIXEL_REDUCE_MODE_ROUNDING = 0x1,
+} LBV_PIXEL_REDUCE_MODE;
+typedef enum LBV_DYNAMIC_PIXEL_DEPTH {
+ DYNAMIC_PIXEL_DEPTH_36BPP = 0x0,
+ DYNAMIC_PIXEL_DEPTH_30BPP = 0x1,
+} LBV_DYNAMIC_PIXEL_DEPTH;
+typedef enum LBV_DITHER_EN {
+ DITHER_DIS = 0x0,
+ DITHER_EN = 0x1,
+} LBV_DITHER_EN;
+typedef enum LBV_DOWNSCALE_PREFETCH_EN {
+ DOWNSCALE_PREFETCH_DIS = 0x0,
+ DOWNSCALE_PREFETCH_EN = 0x1,
+} LBV_DOWNSCALE_PREFETCH_EN;
+typedef enum LBV_MEMORY_CONFIG {
+ MEMORY_CONFIG_0 = 0x0,
+ MEMORY_CONFIG_1 = 0x1,
+ MEMORY_CONFIG_2 = 0x2,
+ MEMORY_CONFIG_3 = 0x3,
+} LBV_MEMORY_CONFIG;
+typedef enum LBV_SYNC_RESET_SEL2 {
+ SYNC_RESET_SEL2_VBLANK = 0x0,
+ SYNC_RESET_SEL2_VSYNC = 0x1,
+} LBV_SYNC_RESET_SEL2;
+typedef enum LBV_SYNC_DURATION {
+ SYNC_DURATION_16 = 0x0,
+ SYNC_DURATION_32 = 0x1,
+ SYNC_DURATION_64 = 0x2,
+ SYNC_DURATION_128 = 0x3,
+} LBV_SYNC_DURATION;
+typedef enum SCL_C_RAM_TAP_PAIR_IDX {
+ SCL_C_RAM_TAP_PAIR_ID0 = 0x0,
+ SCL_C_RAM_TAP_PAIR_ID1 = 0x1,
+ SCL_C_RAM_TAP_PAIR_ID2 = 0x2,
+ SCL_C_RAM_TAP_PAIR_ID3 = 0x3,
+ SCL_C_RAM_TAP_PAIR_ID4 = 0x4,
+} SCL_C_RAM_TAP_PAIR_IDX;
+typedef enum SCL_C_RAM_PHASE {
+ SCL_C_RAM_PHASE_0 = 0x0,
+ SCL_C_RAM_PHASE_1 = 0x1,
+ SCL_C_RAM_PHASE_2 = 0x2,
+ SCL_C_RAM_PHASE_3 = 0x3,
+ SCL_C_RAM_PHASE_4 = 0x4,
+ SCL_C_RAM_PHASE_5 = 0x5,
+ SCL_C_RAM_PHASE_6 = 0x6,
+ SCL_C_RAM_PHASE_7 = 0x7,
+ SCL_C_RAM_PHASE_8 = 0x8,
+} SCL_C_RAM_PHASE;
+typedef enum SCL_C_RAM_FILTER_TYPE {
+ SCL_C_RAM_FILTER_TYPE_VERT_LUMA_RGB_LUT = 0x0,
+ SCL_C_RAM_FILTER_TYPE_VERT_CHROMA_LUT = 0x1,
+ SCL_C_RAM_FILTER_TYPE_HORI_LUMA_RGB_LUT = 0x2,
+ SCL_C_RAM_FILTER_TYPE_HORI_CHROMA_LUT = 0x3,
+} SCL_C_RAM_FILTER_TYPE;
+typedef enum SCL_MODE_SEL {
+ SCL_MODE_RGB_BYPASS = 0x0,
+ SCL_MODE_RGB_SCALING = 0x1,
+ SCL_MODE_YCBCR_SCALING = 0x2,
+ SCL_MODE_YCBCR_BYPASS = 0x3,
+} SCL_MODE_SEL;
+typedef enum SCL_PSCL_EN {
+ SCL_PSCL_DISABLE = 0x0,
+ SCL_PSCL_ENANBLE = 0x1,
+} SCL_PSCL_EN;
+typedef enum SCL_V_NUM_OF_TAPS {
+ SCL_V_NUM_OF_TAPS_1 = 0x0,
+ SCL_V_NUM_OF_TAPS_2 = 0x1,
+ SCL_V_NUM_OF_TAPS_3 = 0x2,
+ SCL_V_NUM_OF_TAPS_4 = 0x3,
+ SCL_V_NUM_OF_TAPS_5 = 0x4,
+ SCL_V_NUM_OF_TAPS_6 = 0x5,
+} SCL_V_NUM_OF_TAPS;
+typedef enum SCL_H_NUM_OF_TAPS {
+ SCL_H_NUM_OF_TAPS_1 = 0x0,
+ SCL_H_NUM_OF_TAPS_2 = 0x1,
+ SCL_H_NUM_OF_TAPS_4 = 0x3,
+ SCL_H_NUM_OF_TAPS_6 = 0x5,
+ SCL_H_NUM_OF_TAPS_8 = 0x7,
+ SCL_H_NUM_OF_TAPS_10 = 0x9,
+} SCL_H_NUM_OF_TAPS;
+typedef enum SCL_BOUNDARY_MODE {
+ SCL_BOUNDARY_MODE_BLACK = 0x0,
+ SCL_BOUNDARY_MODE_EDGE = 0x1,
+} SCL_BOUNDARY_MODE;
+typedef enum SCL_EARLY_EOL_MOD {
+ SCL_EARLY_EOL_MODE_CRTC = 0x0,
+ SCL_EARLY_EOL_MODE_INTERNAL = 0x1,
+} SCL_EARLY_EOL_MOD;
+typedef enum SCL_BYPASS_MODE {
+ SCL_BYPASS_MODE_MC_MR = 0x0,
+ SCL_BYPASS_MODE_AC_NR = 0x1,
+ SCL_BYPASS_MODE_AC_AR = 0x2,
+ SCL_BYPASS_MODE_RESERVED = 0x3,
+} SCL_BYPASS_MODE;
+typedef enum SCL_V_MANUAL_REPLICATE_FACTOR {
+ SCL_V_MANUAL_REPLICATE_FACTOR_1 = 0x0,
+ SCL_V_MANUAL_REPLICATE_FACTOR_2 = 0x1,
+ SCL_V_MANUAL_REPLICATE_FACTOR_3 = 0x2,
+ SCL_V_MANUAL_REPLICATE_FACTOR_4 = 0x3,
+ SCL_V_MANUAL_REPLICATE_FACTOR_5 = 0x4,
+ SCL_V_MANUAL_REPLICATE_FACTOR_6 = 0x5,
+ SCL_V_MANUAL_REPLICATE_FACTOR_7 = 0x6,
+ SCL_V_MANUAL_REPLICATE_FACTOR_8 = 0x7,
+ SCL_V_MANUAL_REPLICATE_FACTOR_9 = 0x8,
+ SCL_V_MANUAL_REPLICATE_FACTOR_10 = 0x9,
+ SCL_V_MANUAL_REPLICATE_FACTOR_11 = 0xa,
+ SCL_V_MANUAL_REPLICATE_FACTOR_12 = 0xb,
+ SCL_V_MANUAL_REPLICATE_FACTOR_13 = 0xc,
+ SCL_V_MANUAL_REPLICATE_FACTOR_14 = 0xd,
+ SCL_V_MANUAL_REPLICATE_FACTOR_15 = 0xe,
+ SCL_V_MANUAL_REPLICATE_FACTOR_16 = 0xf,
+} SCL_V_MANUAL_REPLICATE_FACTOR;
+typedef enum SCL_H_MANUAL_REPLICATE_FACTOR {
+ SCL_H_MANUAL_REPLICATE_FACTOR_1 = 0x0,
+ SCL_H_MANUAL_REPLICATE_FACTOR_2 = 0x1,
+ SCL_H_MANUAL_REPLICATE_FACTOR_3 = 0x2,
+ SCL_H_MANUAL_REPLICATE_FACTOR_4 = 0x3,
+ SCL_H_MANUAL_REPLICATE_FACTOR_5 = 0x4,
+ SCL_H_MANUAL_REPLICATE_FACTOR_6 = 0x5,
+ SCL_H_MANUAL_REPLICATE_FACTOR_7 = 0x6,
+ SCL_H_MANUAL_REPLICATE_FACTOR_8 = 0x7,
+ SCL_H_MANUAL_REPLICATE_FACTOR_9 = 0x8,
+ SCL_H_MANUAL_REPLICATE_FACTOR_10 = 0x9,
+ SCL_H_MANUAL_REPLICATE_FACTOR_11 = 0xa,
+ SCL_H_MANUAL_REPLICATE_FACTOR_12 = 0xb,
+ SCL_H_MANUAL_REPLICATE_FACTOR_13 = 0xc,
+ SCL_H_MANUAL_REPLICATE_FACTOR_14 = 0xd,
+ SCL_H_MANUAL_REPLICATE_FACTOR_15 = 0xe,
+ SCL_H_MANUAL_REPLICATE_FACTOR_16 = 0xf,
+} SCL_H_MANUAL_REPLICATE_FACTOR;
+typedef enum SCL_V_CALC_AUTO_RATIO_EN {
+ SCL_V_CALC_AUTO_RATIO_DISABLE = 0x0,
+ SCL_V_CALC_AUTO_RATIO_ENABLE = 0x1,
+} SCL_V_CALC_AUTO_RATIO_EN;
+typedef enum SCL_H_CALC_AUTO_RATIO_EN {
+ SCL_H_CALC_AUTO_RATIO_DISABLE = 0x0,
+ SCL_H_CALC_AUTO_RATIO_ENABLE = 0x1,
+} SCL_H_CALC_AUTO_RATIO_EN;
+typedef enum SCL_H_FILTER_PICK_NEAREST {
+ SCL_H_FILTER_PICK_NEAREST_DISABLE = 0x0,
+ SCL_H_FILTER_PICK_NEAREST_ENABLE = 0x1,
+} SCL_H_FILTER_PICK_NEAREST;
+typedef enum SCL_H_2TAP_HARDCODE_COEF_EN {
+ SCL_H_2TAP_HARDCODE_COEF_DISABLE = 0x0,
+ SCL_H_2TAP_HARDCODE_COEF_ENABLE = 0x1,
+} SCL_H_2TAP_HARDCODE_COEF_EN;
+typedef enum SCL_V_FILTER_PICK_NEAREST {
+ SCL_V_FILTER_PICK_NEAREST_DISABLE = 0x0,
+ SCL_V_FILTER_PICK_NEAREST_ENABLE = 0x1,
+} SCL_V_FILTER_PICK_NEAREST;
+typedef enum SCL_V_2TAP_HARDCODE_COEF_EN {
+ SCL_V_2TAP_HARDCODE_COEF_DISABLE = 0x0,
+ SCL_V_2TAP_HARDCODE_COEF_ENABLE = 0x1,
+} SCL_V_2TAP_HARDCODE_COEF_EN;
+typedef enum SCL_UPDATE_TAKEN {
+ SCL_UPDATE_TAKEN_NO = 0x0,
+ SCL_UPDATE_TAKEN_YES = 0x1,
+} SCL_UPDATE_TAKEN;
+typedef enum SCL_UPDATE_LOCK {
+ SCL_UPDATE_UNLOCKED = 0x0,
+ SCL_UPDATE_LOCKED = 0x1,
+} SCL_UPDATE_LOCK;
+typedef enum SCL_COEF_UPDATE_COMPLETE {
+ SCL_COEF_UPDATE_NOT_COMPLETED = 0x0,
+ SCL_COEF_UPDATE_COMPLETED = 0x1,
+} SCL_COEF_UPDATE_COMPLETE;
+typedef enum SCL_HF_SHARP_SCALE_FACTOR {
+ SCL_HF_SHARP_SCALE_FACTOR_0 = 0x0,
+ SCL_HF_SHARP_SCALE_FACTOR_1 = 0x1,
+ SCL_HF_SHARP_SCALE_FACTOR_2 = 0x2,
+ SCL_HF_SHARP_SCALE_FACTOR_3 = 0x3,
+ SCL_HF_SHARP_SCALE_FACTOR_4 = 0x4,
+ SCL_HF_SHARP_SCALE_FACTOR_5 = 0x5,
+ SCL_HF_SHARP_SCALE_FACTOR_6 = 0x6,
+ SCL_HF_SHARP_SCALE_FACTOR_7 = 0x7,
+} SCL_HF_SHARP_SCALE_FACTOR;
+typedef enum SCL_HF_SHARP_EN {
+ SCL_HF_SHARP_DISABLE = 0x0,
+ SCL_HF_SHARP_ENABLE = 0x1,
+} SCL_HF_SHARP_EN;
+typedef enum SCL_VF_SHARP_SCALE_FACTOR {
+ SCL_VF_SHARP_SCALE_FACTOR_0 = 0x0,
+ SCL_VF_SHARP_SCALE_FACTOR_1 = 0x1,
+ SCL_VF_SHARP_SCALE_FACTOR_2 = 0x2,
+ SCL_VF_SHARP_SCALE_FACTOR_3 = 0x3,
+ SCL_VF_SHARP_SCALE_FACTOR_4 = 0x4,
+ SCL_VF_SHARP_SCALE_FACTOR_5 = 0x5,
+ SCL_VF_SHARP_SCALE_FACTOR_6 = 0x6,
+ SCL_VF_SHARP_SCALE_FACTOR_7 = 0x7,
+} SCL_VF_SHARP_SCALE_FACTOR;
+typedef enum SCL_VF_SHARP_EN {
+ SCL_VF_SHARP_DISABLE = 0x0,
+ SCL_VF_SHARP_ENABLE = 0x1,
+} SCL_VF_SHARP_EN;
+typedef enum SCL_ALU_DISABLE {
+ SCL_ALU_ENABLED = 0x0,
+ SCL_ALU_DISABLED = 0x1,
+} SCL_ALU_DISABLE;
+typedef enum SCL_HOST_CONFLICT_MASK {
+ SCL_HOST_CONFLICT_DISABLE_INTERRUPT = 0x0,
+ SCL_HOST_CONFLICT_ENABLE_INTERRUPT = 0x1,
+} SCL_HOST_CONFLICT_MASK;
+typedef enum SCL_SCL_MODE_CHANGE_MASK {
+ SCL_MODE_CHANGE_DISABLE_INTERRUPT = 0x0,
+ SCL_MODE_CHANGE_ENABLE_INTERRUPT = 0x1,
+} SCL_SCL_MODE_CHANGE_MASK;
+typedef enum SCLV_MODE_SEL {
+ SCLV_MODE_RGB_BYPASS = 0x0,
+ SCLV_MODE_RGB_SCALING = 0x1,
+ SCLV_MODE_YCBCR_SCALING = 0x2,
+ SCLV_MODE_YCBCR_BYPASS = 0x3,
+} SCLV_MODE_SEL;
+typedef enum SCLV_INTERLACE_SOURCE {
+ INTERLACE_SOURCE_PROGRESSIVE = 0x0,
+ INTERLACE_SOURCE_INTERLEAVE = 0x1,
+ INTERLACE_SOURCE_STACK = 0x2,
+} SCLV_INTERLACE_SOURCE;
+typedef enum SCLV_UPDATE_LOCK {
+ UPDATE_UNLOCKED = 0x0,
+ UPDATE_LOCKED = 0x1,
+} SCLV_UPDATE_LOCK;
+typedef enum SCLV_COEF_UPDATE_COMPLETE {
+ COEF_UPDATE_NOT_COMPLETE = 0x0,
+ COEF_UPDATE_COMPLETE = 0x1,
+} SCLV_COEF_UPDATE_COMPLETE;
+typedef enum COL_MAN_UPDATE_LOCK {
+ COL_MAN_UPDATE_UNLOCKED = 0x0,
+ COL_MAN_UPDATE_LOCKED = 0x1,
+} COL_MAN_UPDATE_LOCK;
+typedef enum COL_MAN_DISABLE_MULTIPLE_UPDATE {
+ COL_MAN_MULTIPLE_UPDATE = 0x0,
+ COL_MAN_MULTIPLE_UPDAT_EDISABLE = 0x1,
+} COL_MAN_DISABLE_MULTIPLE_UPDATE;
+typedef enum COL_MAN_INPUTCSC_MODE {
+ INPUTCSC_MODE_BYPASS = 0x0,
+ INPUTCSC_MODE_A = 0x1,
+ INPUTCSC_MODE_B = 0x2,
+ INPUTCSC_MODE_UNITY = 0x3,
+} COL_MAN_INPUTCSC_MODE;
+typedef enum COL_MAN_INPUTCSC_TYPE {
+ INPUTCSC_TYPE_12_0 = 0x0,
+ INPUTCSC_TYPE_10_2 = 0x1,
+ INPUTCSC_TYPE_8_4 = 0x2,
+} COL_MAN_INPUTCSC_TYPE;
+typedef enum COL_MAN_INPUTCSC_CONVERT {
+ INPUTCSC_ROUND = 0x0,
+ INPUTCSC_TRUNCATE = 0x1,
+} COL_MAN_INPUTCSC_CONVERT;
+typedef enum COL_MAN_PRESCALE_MODE {
+ PRESCALE_MODE_BYPASS = 0x0,
+ PRESCALE_MODE_PROGRAM = 0x1,
+ PRESCALE_MODE_UNITY = 0x2,
+} COL_MAN_PRESCALE_MODE;
+typedef enum COL_MAN_INPUT_GAMMA_MODE {
+ INGAMMA_MODE_BYPASS = 0x0,
+ INGAMMA_MODE_FIX = 0x1,
+ INGAMMA_MODE_FLOAT = 0x2,
+} COL_MAN_INPUT_GAMMA_MODE;
+typedef enum COL_MAN_OUTPUT_CSC_MODE {
+ COL_MAN_OUTPUT_CSC_BYPASS = 0x0,
+ COL_MAN_OUTPUT_CSC_RGB = 0x1,
+ COL_MAN_OUTPUT_CSC_YCrCb601 = 0x2,
+ COL_MAN_OUTPUT_CSC_YCrCb709 = 0x3,
+ COL_MAN_OUTPUT_CSC_A = 0x4,
+ COL_MAN_OUTPUT_CSC_B = 0x5,
+ COL_MAN_OUTPUT_CSC_UNITY = 0x6,
+} COL_MAN_OUTPUT_CSC_MODE;
+typedef enum COL_MAN_DENORM_CLAMP_CONTROL {
+ DENORM_CLAMP_MODE_UNITY = 0x0,
+ DENORM_CLAMP_MODE_8 = 0x1,
+ DENORM_CLAMP_MODE_10 = 0x2,
+ DENORM_CLAMP_MODE_12 = 0x3,
+} COL_MAN_DENORM_CLAMP_CONTROL;
+typedef enum COL_MAN_GAMMA_CORR_CONTROL {
+ GAMMA_CORR_MODE_BYPASS = 0x0,
+ GAMMA_CORR_MODE_A = 0x1,
+ GAMMA_CORR_MODE_B = 0x2,
+} COL_MAN_GAMMA_CORR_CONTROL;
+typedef enum COL_MAN_GLOBAL_PASSTHROUGH_ENABLE {
+ CM_GLOBAL_PASSTHROUGH_DISBALE = 0x0,
+ CM_GLOBAL_PASSTHROUGH_ENABLE = 0x1,
+} COL_MAN_GLOBAL_PASSTHROUGH_ENABLE;
+typedef enum UNP_GRPH_EN {
+ UNP_GRPH_DISABLED = 0x0,
+ UNP_GRPH_ENABLED = 0x1,
+} UNP_GRPH_EN;
+typedef enum UNP_GRPH_DEPTH {
+ UNP_GRPH_8BPP = 0x0,
+ UNP_GRPH_16BPP = 0x1,
+ UNP_GRPH_32BPP = 0x2,
+} UNP_GRPH_DEPTH;
+typedef enum UNP_GRPH_NUM_BANKS {
+ UNP_GRPH_ADDR_SURF_2_BANK = 0x0,
+ UNP_GRPH_ADDR_SURF_4_BANK = 0x1,
+ UNP_GRPH_ADDR_SURF_8_BANK = 0x2,
+ UNP_GRPH_ADDR_SURF_16_BANK = 0x3,
+} UNP_GRPH_NUM_BANKS;
+typedef enum UNP_GRPH_BANK_WIDTH {
+ UNP_GRPH_ADDR_SURF_BANK_WIDTH_1 = 0x0,
+ UNP_GRPH_ADDR_SURF_BANK_WIDTH_2 = 0x1,
+ UNP_GRPH_ADDR_SURF_BANK_WIDTH_4 = 0x2,
+ UNP_GRPH_ADDR_SURF_BANK_WIDTH_8 = 0x3,
+} UNP_GRPH_BANK_WIDTH;
+typedef enum UNP_GRPH_BANK_HEIGHT {
+ UNP_GRPH_ADDR_SURF_BANK_HEIGHT_1 = 0x0,
+ UNP_GRPH_ADDR_SURF_BANK_HEIGHT_2 = 0x1,
+ UNP_GRPH_ADDR_SURF_BANK_HEIGHT_4 = 0x2,
+ UNP_GRPH_ADDR_SURF_BANK_HEIGHT_8 = 0x3,
+} UNP_GRPH_BANK_HEIGHT;
+typedef enum UNP_GRPH_TILE_SPLIT {
+ UNP_ADDR_SURF_TILE_SPLIT_64B = 0x0,
+ UNP_ADDR_SURF_TILE_SPLIT_128B = 0x1,
+ UNP_ADDR_SURF_TILE_SPLIT_256B = 0x2,
+ UNP_ADDR_SURF_TILE_SPLIT_512B = 0x3,
+ UNP_ADDR_SURF_TILE_SPLIT_1KB = 0x4,
+ UNP_ADDR_SURF_TILE_SPLIT_2KB = 0x5,
+ UNP_ADDR_SURF_TILE_SPLIT_4KB = 0x6,
+} UNP_GRPH_TILE_SPLIT;
+typedef enum UNP_GRPH_ADDRESS_TRANSLATION_ENABLE {
+ UNP_GRPH_ADDRESS_TRANSLATION_ENABLE0 = 0x0,
+ UNP_GRPH_ADDRESS_TRANSLATION_ENABLE1 = 0x1,
+} UNP_GRPH_ADDRESS_TRANSLATION_ENABLE;
+typedef enum UNP_GRPH_PRIVILEGED_ACCESS_ENABLE {
+ UNP_GRPH_PRIVILEGED_ACCESS_DIS = 0x0,
+ UNP_GRPH_PRIVILEGED_ACCESS_EN = 0x1,
+} UNP_GRPH_PRIVILEGED_ACCESS_ENABLE;
+typedef enum UNP_GRPH_MACRO_TILE_ASPECT {
+ UNP_ADDR_SURF_MACRO_ASPECT_1 = 0x0,
+ UNP_ADDR_SURF_MACRO_ASPECT_2 = 0x1,
+ UNP_ADDR_SURF_MACRO_ASPECT_4 = 0x2,
+ UNP_ADDR_SURF_MACRO_ASPECT_8 = 0x3,
+} UNP_GRPH_MACRO_TILE_ASPECT;
+typedef enum UNP_GRPH_COLOR_EXPANSION_MODE {
+ UNP_GRPH_DYNAMIC_EXPANSION = 0x0,
+ UNP_GRPH_ZERO_EXPANSION = 0x1,
+} UNP_GRPH_COLOR_EXPANSION_MODE;
+typedef enum UNP_VIDEO_FORMAT {
+ UNP_VIDEO_FORMAT0 = 0x0,
+ UNP_VIDEO_FORMAT1 = 0x1,
+ UNP_VIDEO_FORMAT_YUV420_YCbCr = 0x2,
+ UNP_VIDEO_FORMAT_YUV420_YCrCb = 0x3,
+ UNP_VIDEO_FORMAT_YUV422_YCb = 0x4,
+ UNP_VIDEO_FORMAT_YUV422_YCr = 0x5,
+ UNP_VIDEO_FORMAT_YUV422_CbY = 0x6,
+ UNP_VIDEO_FORMAT_YUV422_CrY = 0x7,
+} UNP_VIDEO_FORMAT;
+typedef enum UNP_GRPH_ENDIAN_SWAP {
+ UNP_GRPH_ENDIAN_SWAP_NONE = 0x0,
+ UNP_GRPH_ENDIAN_SWAP_8IN16 = 0x1,
+ UNP_GRPH_ENDIAN_SWAP_8IN32 = 0x2,
+ UNP_GRPH_ENDIAN_SWAP_8IN43 = 0x3,
+} UNP_GRPH_ENDIAN_SWAP;
+typedef enum UNP_GRPH_RED_CROSSBAR {
+ UNP_GRPH_RED_CROSSBAR_R_Cr = 0x0,
+ UNP_GRPH_RED_CROSSBAR_G_Y = 0x1,
+ UNP_GRPH_RED_CROSSBAR_B_Cb = 0x2,
+ UNP_GRPH_RED_CROSSBAR_A = 0x3,
+} UNP_GRPH_RED_CROSSBAR;
+typedef enum UNP_GRPH_GREEN_CROSSBAR {
+ UNP_UNP_GRPH_GREEN_CROSSBAR_GY_AND_Y = 0x0,
+ UNP_UNP_GRPH_GREEN_CROSSBAR_B_Cb_AND_C = 0x1,
+ UNP_UNP_GRPH_GREEN_CROSSBAR_A = 0x2,
+ UNP_UNP_GRPH_GREEN_CROSSBAR_R_Cr = 0x3,
+} UNP_GRPH_GREEN_CROSSBAR;
+typedef enum UNP_GRPH_BLUE_CROSSBAR {
+ UNP_GRPH_BLUE_CROSSBAR_B_Cb_AND_C = 0x0,
+ UNP_GRPH_BLUE_CROSSBAR_A = 0x1,
+ UNP_GRPH_BLUE_CROSSBAR_R_Cr = 0x2,
+ UNP_GRPH_BLUE_CROSSBAR_GY_AND_Y = 0x3,
+} UNP_GRPH_BLUE_CROSSBAR;
+typedef enum UNP_GRPH_MODE_UPDATE_LOCKG {
+ UNP_GRPH_UPDATE_LOCK_0 = 0x0,
+ UNP_GRPH_UPDATE_LOCK_1 = 0x1,
+} UNP_GRPH_MODE_UPDATE_LOCKG;
+typedef enum UNP_GRPH_SURFACE_IGNORE_UPDATE_LOCK {
+ UNP_GRPH_SURFACE_IGNORE_UPDATE_LOCK_0 = 0x0,
+ UNP_GRPH_SURFACE_IGNORE_UPDATE_LOCK_1 = 0x1,
+} UNP_GRPH_SURFACE_IGNORE_UPDATE_LOCK;
+typedef enum UNP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE {
+ UNP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE_0 = 0x0,
+ UNP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE_1 = 0x1,
+} UNP_GRPH_MODE_DISABLE_MULTIPLE_UPDATE;
+typedef enum UNP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE {
+ UNP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_0 = 0x0,
+ UNP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_1 = 0x1,
+} UNP_GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE;
+typedef enum UNP_GRPH_STEREOSYNC_FLIP_EN {
+ UNP_GRPH_STEREOSYNC_FLIP_DISABLE = 0x0,
+ UNP_GRPH_STEREOSYNC_FLIP_ENABLE = 0x1,
+} UNP_GRPH_STEREOSYNC_FLIP_EN;
+typedef enum UNP_GRPH_STEREOSYNC_FLIP_MODE {
+ UNP_GRPH_STEREOSYNC_FLIP_MODE_0 = 0x0,
+ UNP_GRPH_STEREOSYNC_FLIP_MODE_1 = 0x1,
+ UNP_GRPH_STEREOSYNC_FLIP_MODE_2 = 0x2,
+ UNP_GRPH_STEREOSYNC_FLIP_MODE_3 = 0x3,
+} UNP_GRPH_STEREOSYNC_FLIP_MODE;
+typedef enum UNP_GRPH_STACK_INTERLACE_FLIP_EN {
+ UNP_GRPH_STACK_INTERLACE_FLIP_DISABLE = 0x0,
+ UNP_GRPH_STACK_INTERLACE_FLIP_ENABLE = 0x1,
+} UNP_GRPH_STACK_INTERLACE_FLIP_EN;
+typedef enum UNP_GRPH_STACK_INTERLACE_FLIP_MODE {
+ UNP_GRPH_STACK_INTERLACE_FLIP_MODE_0 = 0x0,
+ UNP_GRPH_STACK_INTERLACE_FLIP_MODE_1 = 0x1,
+ UNP_GRPH_STACK_INTERLACE_FLIP_MODE_2 = 0x2,
+ UNP_GRPH_STACK_INTERLACE_FLIP_MODE_3 = 0x3,
+} UNP_GRPH_STACK_INTERLACE_FLIP_MODE;
+typedef enum UNP_GRPH_STEREOSYNC_SELECT_DISABLE {
+ UNP_GRPH_STEREOSYNC_SELECT_EN = 0x0,
+ UNP_GRPH_STEREOSYNC_SELECT_DIS = 0x1,
+} UNP_GRPH_STEREOSYNC_SELECT_DISABLE;
+typedef enum UNP_CRC_SOURCE_SEL {
+ UNP_CRC_SOURCE_SEL_NP_TO_LBV = 0x0,
+ UNP_CRC_SOURCE_SEL_LOWER32 = 0x1,
+ UNP_CRC_SOURCE_SEL_RESERVED = 0x2,
+ UNP_CRC_SOURCE_SEL_LOWER16 = 0x3,
+ UNP_CRC_SOURCE_SEL_UNP_TO_LBV = 0x4,
+} UNP_CRC_SOURCE_SEL;
+typedef enum UNP_CRC_LINE_SEL {
+ UNP_CRC_LINE_SEL_RESERVED = 0x0,
+ UNP_CRC_LINE_SEL_EVEN_ONLY = 0x1,
+ UNP_CRC_LINE_SEL_ODD_ONLY = 0x2,
+ UNP_CRC_LINE_SEL_ODD_EVEN = 0x3,
+} UNP_CRC_LINE_SEL;
+typedef enum UNP_ROTATION_ANGLE {
+ UNP_ROTATION_ANGLE_0 = 0x0,
+ UNP_ROTATION_ANGLE_90 = 0x1,
+ UNP_ROTATION_ANGLE_180 = 0x2,
+ UNP_ROTATION_ANGLE_270 = 0x3,
+ UNP_ROTATION_ANGLE_0m = 0x4,
+ UNP_ROTATION_ANGLE_90m = 0x5,
+ UNP_ROTATION_ANGLE_180m = 0x6,
+ UNP_ROTATION_ANGLE_270m = 0x7,
+} UNP_ROTATION_ANGLE;
+typedef enum UNP_PIXEL_DROP {
+ UNP_PIXEL_NO_DROP = 0x0,
+ UNP_PIXEL_DROPPING = 0x1,
+} UNP_PIXEL_DROP;
+typedef enum UNP_BUFFER_MODE {
+ UNP_BUFFER_MODE_LUMA = 0x0,
+ UNP_BUFFER_MODE_LUMA_CHROMA = 0x1,
+} UNP_BUFFER_MODE;
+typedef enum WATERMARK_MASK_CONTROL {
+ WM_MASK_CONTROL_SET_A = 0x0,
+ WM_MASK_CONTROL_SET_B = 0x1,
+ WM_MASK_CONTROL_SET_C = 0x2,
+ WM_MASK_CONTROL_SET_D = 0x3,
+ WM_MASK_CONTROL_RESERVED1 = 0x4,
+ WM_MASK_CONTROL_RESERVED2 = 0x5,
+ WM_MASK_CONTROL_RESERVED3 = 0x6,
+ WM_MASK_CONTROL_ACTIVE_SET = 0x7,
+} WATERMARK_MASK_CONTROL;
+typedef enum AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_CODEC_RESET {
+ AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_CODEC_NOT_RESET= 0x0,
+ AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_CODEC_DO_RESET= 0x1,
+} AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET_CODEC_RESET;
+typedef enum CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY {
+ CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_ALL= 0x0,
+ CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_6= 0x1,
+ CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_5= 0x2,
+ CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_4= 0x3,
+ CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_3= 0x4,
+ CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_2= 0x5,
+ CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_1= 0x6,
+ CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY_0= 0x7,
+} CC_RCU_DC_AUDIO_PORT_CONNECTIVITY_PORT_CONNECTIVITY;
+typedef enum CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY {
+ CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_ALL= 0x0,
+ CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_6= 0x1,
+ CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_5= 0x2,
+ CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_4= 0x3,
+ CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_3= 0x4,
+ CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_2= 0x5,
+ CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_1= 0x6,
+ CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY_0= 0x7,
+} CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY_INPUT_PORT_CONNECTIVITY;
+typedef enum GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_CONTROL {
+ GENERIC_AZ_CONTROLLER_REGISTER_DISABLE = 0x0,
+ GENERIC_AZ_CONTROLLER_REGISTER_ENABLE = 0x1,
+} GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_CONTROL;
+typedef enum GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_CONTROL_RESERVED {
+ GENERIC_AZ_CONTROLLER_REGISTER_DISABLE_RESERVED = 0x0,
+ GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_RESERVED = 0x1,
+} GENERIC_AZ_CONTROLLER_REGISTER_ENABLE_CONTROL_RESERVED;
+typedef enum GENERIC_AZ_CONTROLLER_REGISTER_STATUS {
+ GENERIC_AZ_CONTROLLER_REGISTER_STATUS_NOT_SET = 0x0,
+ GENERIC_AZ_CONTROLLER_REGISTER_STATUS_SET = 0x1,
+} GENERIC_AZ_CONTROLLER_REGISTER_STATUS;
+typedef enum GENERIC_AZ_CONTROLLER_REGISTER_STATUS_RESERVED {
+ GENERIC_AZ_CONTROLLER_REGISTER_STATUS_NOT_SET_RESERVED= 0x0,
+ GENERIC_AZ_CONTROLLER_REGISTER_STATUS_SET_RESERVED= 0x1,
+} GENERIC_AZ_CONTROLLER_REGISTER_STATUS_RESERVED;
+typedef enum AZ_GLOBAL_CAPABILITIES {
+ AZ_GLOBAL_CAPABILITIES_SIXTY_FOUR_BIT_ADDRESS_NOT_SUPPORTED= 0x0,
+ AZ_GLOBAL_CAPABILITIES_SIXTY_FOUR_BIT_ADDRESS_SUPPORTED= 0x1,
+} AZ_GLOBAL_CAPABILITIES;
+typedef enum GLOBAL_CONTROL_ACCEPT_UNSOLICITED_RESPONSE {
+ ACCEPT_UNSOLICITED_RESPONSE_NOT_ENABLE = 0x0,
+ ACCEPT_UNSOLICITED_RESPONSE_ENABLE = 0x1,
+} GLOBAL_CONTROL_ACCEPT_UNSOLICITED_RESPONSE;
+typedef enum GLOBAL_CONTROL_FLUSH_CONTROL {
+ FLUSH_CONTROL_FLUSH_NOT_STARTED = 0x0,
+ FLUSH_CONTROL_FLUSH_STARTED = 0x1,
+} GLOBAL_CONTROL_FLUSH_CONTROL;
+typedef enum GLOBAL_CONTROL_CONTROLLER_RESET {
+ CONTROLLER_RESET_AZ_CONTROLLER_IN_RESET = 0x0,
+ CONTROLLER_RESET_AZ_CONTROLLER_NOT_IN_RESET = 0x1,
+} GLOBAL_CONTROL_CONTROLLER_RESET;
+typedef enum AZ_STATE_CHANGE_STATUS {
+ AZ_STATE_CHANGE_STATUS_CODEC_NOT_PRESENT = 0x0,
+ AZ_STATE_CHANGE_STATUS_CODEC_PRESENT = 0x1,
+} AZ_STATE_CHANGE_STATUS;
+typedef enum GLOBAL_STATUS_FLUSH_STATUS {
+ GLOBAL_STATUS_FLUSH_STATUS_FLUSH_NOT_ENDED = 0x0,
+ GLOBAL_STATUS_FLUSH_STATUS_FLUSH_ENDED = 0x1,
+} GLOBAL_STATUS_FLUSH_STATUS;
+typedef enum STREAM_0_SYNCHRONIZATION {
+ STREAM_0_SYNCHRONIZATION_STEAM_NOT_STOPPED = 0x0,
+ STREAM_0_SYNCHRONIZATION_STEAM_STOPPED = 0x1,
+} STREAM_0_SYNCHRONIZATION;
+typedef enum STREAM_1_SYNCHRONIZATION {
+ STREAM_1_SYNCHRONIZATION_STEAM_NOT_STOPPED = 0x0,
+ STREAM_1_SYNCHRONIZATION_STEAM_STOPPED = 0x1,
+} STREAM_1_SYNCHRONIZATION;
+typedef enum STREAM_2_SYNCHRONIZATION {
+ STREAM_2_SYNCHRONIZATION_STEAM_NOT_STOPPED = 0x0,
+ STREAM_2_SYNCHRONIZATION_STEAM_STOPPED = 0x1,
+} STREAM_2_SYNCHRONIZATION;
+typedef enum STREAM_3_SYNCHRONIZATION {
+ STREAM_3_SYNCHRONIZATION_STEAM_NOT_STOPPED = 0x0,
+ STREAM_3_SYNCHRONIZATION_STEAM_STOPPED = 0x1,
+} STREAM_3_SYNCHRONIZATION;
+typedef enum STREAM_4_SYNCHRONIZATION {
+ STREAM_4_SYNCHRONIZATION_STEAM_NOT_STOPPED = 0x0,
+ STREAM_4_SYNCHRONIZATION_STEAM_STOPPED = 0x1,
+} STREAM_4_SYNCHRONIZATION;
+typedef enum STREAM_5_SYNCHRONIZATION {
+ STREAM_5_SYNCHRONIZATION_STEAM_NOT_STOPPED = 0x0,
+ STREAM_5_SYNCHRONIZATION_STEAM_STOPPED = 0x1,
+} STREAM_5_SYNCHRONIZATION;
+typedef enum STREAM_6_SYNCHRONIZATION {
+ STREAM_6_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_6_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_6_SYNCHRONIZATION;
+typedef enum STREAM_7_SYNCHRONIZATION {
+ STREAM_7_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_7_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_7_SYNCHRONIZATION;
+typedef enum STREAM_8_SYNCHRONIZATION {
+ STREAM_8_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_8_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_8_SYNCHRONIZATION;
+typedef enum STREAM_9_SYNCHRONIZATION {
+ STREAM_9_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_9_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_9_SYNCHRONIZATION;
+typedef enum STREAM_10_SYNCHRONIZATION {
+ STREAM_10_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_10_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_10_SYNCHRONIZATION;
+typedef enum STREAM_11_SYNCHRONIZATION {
+ STREAM_11_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_11_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_11_SYNCHRONIZATION;
+typedef enum STREAM_12_SYNCHRONIZATION {
+ STREAM_12_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_12_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_12_SYNCHRONIZATION;
+typedef enum STREAM_13_SYNCHRONIZATION {
+ STREAM_13_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_13_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_13_SYNCHRONIZATION;
+typedef enum STREAM_14_SYNCHRONIZATION {
+ STREAM_14_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_14_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_14_SYNCHRONIZATION;
+typedef enum STREAM_15_SYNCHRONIZATION {
+ STREAM_15_SYNCHRONIZATION_STEAM_NOT_STOPPED_RESERVED= 0x0,
+ STREAM_15_SYNCHRONIZATION_STEAM_STOPPED_RESERVED = 0x1,
+} STREAM_15_SYNCHRONIZATION;
+typedef enum CORB_READ_POINTER_RESET {
+ CORB_READ_POINTER_RESET_CORB_DMA_IS_NOT_RESET = 0x0,
+ CORB_READ_POINTER_RESET_CORB_DMA_IS_RESET = 0x1,
+} CORB_READ_POINTER_RESET;
+typedef enum AZ_CORB_SIZE {
+ AZ_CORB_SIZE_2ENTRIES_RESERVED = 0x0,
+ AZ_CORB_SIZE_16ENTRIES_RESERVED = 0x1,
+ AZ_CORB_SIZE_256ENTRIES = 0x2,
+ AZ_CORB_SIZE_RESERVED = 0x3,
+} AZ_CORB_SIZE;
+typedef enum AZ_RIRB_WRITE_POINTER_RESET {
+ AZ_RIRB_WRITE_POINTER_NOT_RESET = 0x0,
+ AZ_RIRB_WRITE_POINTER_DO_RESET = 0x1,
+} AZ_RIRB_WRITE_POINTER_RESET;
+typedef enum RIRB_CONTROL_RESPONSE_OVERRUN_INTERRUPT_CONTROL {
+ RIRB_CONTROL_RESPONSE_OVERRUN_INTERRUPT_CONTROL_INTERRUPT_DISABLED= 0x0,
+ RIRB_CONTROL_RESPONSE_OVERRUN_INTERRUPT_CONTROL_INTERRUPT_ENABLED= 0x1,
+} RIRB_CONTROL_RESPONSE_OVERRUN_INTERRUPT_CONTROL;
+typedef enum RIRB_CONTROL_RESPONSE_INTERRUPT_CONTROL {
+ RIRB_CONTROL_RESPONSE_INTERRUPT_CONTROL_INTERRUPT_DISABLED= 0x0,
+ RIRB_CONTROL_RESPONSE_INTERRUPT_CONTROL_INTERRUPT_ENABLED= 0x1,
+} RIRB_CONTROL_RESPONSE_INTERRUPT_CONTROL;
+typedef enum AZ_RIRB_SIZE {
+ AZ_RIRB_SIZE_2ENTRIES_RESERVED = 0x0,
+ AZ_RIRB_SIZE_16ENTRIES_RESERVED = 0x1,
+ AZ_RIRB_SIZE_256ENTRIES = 0x2,
+ AZ_RIRB_SIZE_UNDEFINED = 0x3,
+} AZ_RIRB_SIZE;
+typedef enum IMMEDIATE_COMMAND_STATUS_IMMEDIATE_RESULT_VALID {
+ IMMEDIATE_COMMAND_STATUS_IMMEDIATE_RESULT_VALID_NO_IMMEDIATE_RESPONSE_VALID= 0x0,
+ IMMEDIATE_COMMAND_STATUS_IMMEDIATE_RESULT_VALID_IMMEDIATE_RESPONSE_VALID= 0x1,
+} IMMEDIATE_COMMAND_STATUS_IMMEDIATE_RESULT_VALID;
+typedef enum IMMEDIATE_COMMAND_STATUS_IMMEDIATE_COMMAND_BUSY {
+ IMMEDIATE_COMMAND_STATUS_IMMEDIATE_COMMAND_NOT_BUSY= 0x0,
+ IMMEDIATE_COMMAND_STATUS_IMMEDIATE_COMMAND_IS_BUSY= 0x1,
+} IMMEDIATE_COMMAND_STATUS_IMMEDIATE_COMMAND_BUSY;
+typedef enum DMA_POSITION_LOWER_BASE_ADDRESS_BUFFER_ENABLE {
+ DMA_POSITION_LOWER_BASE_ADDRESS_BUFFER_ENABLE_DMA_DISABLE= 0x0,
+ DMA_POSITION_LOWER_BASE_ADDRESS_BUFFER_ENABLE_DMA_ENABLE= 0x1,
+} DMA_POSITION_LOWER_BASE_ADDRESS_BUFFER_ENABLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_STATUS_NOT_SET= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_STATUS_SET= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_STATUS_NOT_SET= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_STATUS_SET= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_BUFFER_COMPLETION_INTERRUPT_STATUS {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_BUFFER_COMPLETION_INTERRUPT_STATUS_NOT_SET= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_BUFFER_COMPLETION_INTERRUPT_STATUS_SET= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_BUFFER_COMPLETION_INTERRUPT_STATUS;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_TRAFFIC_PRIORITY {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_NO_TRAFFIC_PRIORITY= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_YES_TRAFFIC_PRIORITY= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_TRAFFIC_PRIORITY;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_INTERRUPT_ENABLE {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_INTERRUPT_DISABLED= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_INTERRUPT_ENABLED= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_DESCRIPTOR_ERROR_INTERRUPT_ENABLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_INTERRUPT_ENABLE {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_INTERRUPT_DISABLED= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_INTERRUPT_ENABLED= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_FIFO_ERROR_INTERRUPT_ENABLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_INTERRUPT_ON_COMPLETION_ENABLE {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_INTERRUPT_ON_COMPLETION_ENABLE_INTERRUPT_DISABLED= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_INTERRUPT_ON_COMPLETION_ENABLE_INTERRUPT_ENABLED= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_INTERRUPT_ON_COMPLETION_ENABLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_RUN {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_NOT_RUN= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_DO_RUN= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_RUN;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_RESET {
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_NOT_RESET= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_IS_RESET= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS_STREAM_RESET;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_RATE {
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_RATE_48KHZ= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_RATE_44P1KHZ= 0x1,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_RATE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE {
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_BY1= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_BY2= 0x1,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_BY3_RESERVED= 0x2,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_BY4= 0x3,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE_RESERVED= 0x4,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_MULTIPLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR {
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY1= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY2_RESERVED= 0x1,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY3= 0x2,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY4_RESERVED= 0x3,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY5_RESERVED= 0x4,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY6_RESERVED= 0x5,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY7_RESERVED= 0x6,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR_BY8_RESERVED= 0x7,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_SAMPLE_BASE_DIVISOR;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE {
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_8_RESERVED= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_16= 0x1,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_20= 0x2,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_24= 0x3,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_32_RESERVED= 0x4,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE_RESERVED= 0x5,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_BITS_PER_SAMPLE;
+typedef enum OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS {
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_1= 0x0,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_2= 0x1,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_3= 0x2,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_4= 0x3,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_5= 0x4,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_6= 0x5,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_7= 0x6,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_8= 0x7,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_9_RESERVED= 0x8,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_10_RESERVED= 0x9,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_11_RESERVED= 0xa,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_12_RESERVED= 0xb,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_13_RESERVED= 0xc,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_14_RESERVED= 0xd,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_15_RESERVED= 0xe,
+ OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS_16_RESERVED= 0xf,
+} OUTPUT_STREAM_DESCRIPTOR_FORMAT_NUMBER_OF_CHANNELS;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE_PCM= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE_NOT_PCM= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE_48KHZ= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE_44P1KHZ= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY1= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY2= 0x1,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY3_RESERVED= 0x2,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY4= 0x3,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_RESERVED= 0x4,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY1= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY2_RESERVED= 0x1,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY3= 0x2,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY4_RESERVED= 0x3,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY5_RESERVED= 0x4,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY6_RESERVED= 0x5,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY7_RESERVED= 0x6,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY8_RESERVED= 0x7,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_8_RESERVED= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_16= 0x1,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_20= 0x2,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_24= 0x3,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_32_RESERVED= 0x4,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_RESERVED= 0x5,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_1= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_2= 0x1,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_3= 0x2,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_4= 0x3,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_5= 0x4,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_6= 0x5,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_7= 0x6,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_8= 0x7,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_RESERVED= 0x8,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_L {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_L_BIT7_NOT_SET= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_L_BIT7_IS_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_L;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRO {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRO_BIT_A_NOT_SET= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRO_BIT_A_IS_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRO;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_NON_AUDIO {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_NON_AUDIO_BIT_B_NOT_SET= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_NON_AUDIO_BIT_B_IS_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_NON_AUDIO;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_COPY {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_COPY_BIT_C_IS_SET= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_COPY_BIT_C_NOT_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_COPY;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRE {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRE_LSB_OF_D_NOT_SET= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRE_LSB_OF_D_IS_SET= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_PRE;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_VCFG {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_VALIDITY_CFG_NOT_ON= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_VALIDITY_CFG_ON= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_VCFG;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_V {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_V_BIT28_IS_ZERO= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_V_BIT28_IS_ONE= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_V;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN_DIGITAL_TRANSMISSION_DISABLED= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN_DIGITAL_TRANSMISSION_ENABLED= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN;
+typedef enum AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_KEEPALIVE {
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_KEEPALIVE_SILENT_STREAM_NOT_ENABLE= 0x0,
+ AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_KEEPALIVE_SILENT_STREAM_ENABLE= 0x1,
+} AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3_KEEPALIVE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_OUT_ENABLE {
+ AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_OUT_ENABLE_PIN_SHUT_OFF= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_OUT_ENABLE_PIN_DRIVEN= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL_OUT_ENABLE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLE {
+ AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_DISABLED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO_DOWN_MIX_INHIBIT {
+ AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_NO_INFO_OR_PERMITTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_FORBIDDEN = 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO_DOWN_MIX_INHIBIT;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_MULTICHANNEL01_MUTE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_MULTICHANNEL01_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_MULTICHANNEL01_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE_MULTICHANNEL01_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_MULTICHANNEL23_MUTE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_MULTICHANNEL23_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_MULTICHANNEL23_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE_MULTICHANNEL23_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_MULTICHANNEL45_MUTE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_MULTICHANNEL45_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_MULTICHANNEL45_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE_MULTICHANNEL45_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_MULTICHANNEL67_MUTE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_MULTICHANNEL67_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_MULTICHANNEL67_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE_MULTICHANNEL67_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTED= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTE;
+typedef enum AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_MODE {
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_PAIR_MODE= 0x0,
+ AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_SINGLE_MODE= 0x1,
+} AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_MODE;
+typedef enum AZ_LATENCY_COUNTER_CONTROL {
+ AZ_LATENCY_COUNTER_NO_RESET = 0x0,
+ AZ_LATENCY_COUNTER_RESET_DONE = 0x1,
+} AZ_LATENCY_COUNTER_CONTROL;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_OUTPUT_CONVERTER_RESERVED= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_INPUT_CONVERTER_RESERVED= 0x1,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_MIXER_RESERVED= 0x2,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_SELECTOR_RESERVED= 0x3,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_PIN_RESERVED= 0x4,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_POWER_WIDGET_RESERVED= 0x5,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VOLUME_KNOB_RESERVED= 0x6,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_BEEP_GENERATOR_RESERVED= 0x7,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_RESERVED_RESERVED= 0x8,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VENDOR_DEFINED_RESERVED= 0x9,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_LR_SWAP_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_LR_SWAP_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_POWER_CONTROL_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_POWER_CONTROL_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_ANALOG= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_DIGITAL= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_CONNECTION_LIST= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_CONNECTION_LIST= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_UNSOLICITED_RESPONSE_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_UNSOLICITED_RESPONSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_NO_PROCESSING_CAPABILITIES= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_HAVE_PROCESSING_CAPABILITIES= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_SUPPORT_STRIPING= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_STRIPING= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_FORMAT_OVERRIDE {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_FORMAT_OVERRIDE= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_FORMAT_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_FORMAT_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_AMPLIFIER_PARAMETER= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_AMPLIFIER_PARAMETER_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_OUTPUT_AMPLIFIER= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_OUTPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_INPUT_AMPLIFIER= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_INPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES {
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES_MONOPHONIC= 0x0,
+ AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES_STEREO= 0x1,
+} AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_OUTPUT_CONVERTER_RESERVED= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_INPUT_CONVERTER_RESERVED= 0x1,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_MIXER_RESERVED= 0x2,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_SELECTOR_RESERVED= 0x3,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_PIN_RESERVED= 0x4,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_POWER_WIDGET_RESERVED= 0x5,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VOLUME_KNOB_RESERVED= 0x6,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_BEEP_GENERATOR_RESERVED= 0x7,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_RESERVED_RESERVED= 0x8,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VENDOR_DEFINED_RESERVED= 0x9,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_LR_SWAP_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_LR_SWAP_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_POWER_CONTROL_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_POWER_CONTROL_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_ANALOG= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_DIGITAL= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_CONNECTION_LIST= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_CONNECTION_LIST= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_UNSOLICITED_RESPONSE_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_UNSOLICITED_RESPONSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_NO_PROCESSING_CAPABILITIES= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_HAVE_PROCESSING_CAPABILITIES= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_SUPPORT_STRIPING= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_STRIPING= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_AMPLIFIER_PARAMETER= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_AMPLIFIER_PARAMETER_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_OUTPUT_AMPLIFIER= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_OUTPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT {
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_INPUT_AMPLIFIER_PRESENT= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_INPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE {
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_EAPD_PIN= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_EAPD_PIN= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_BALANCED_I_O_PINS {
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_I_O_PINS_ARE_NOT_BALANCED= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_I_O_PINS_ARE_BALANCED= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_BALANCED_I_O_PINS;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_INPUT_CAPABLE {
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_INPUT_PIN= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_INPUT_PIN= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_INPUT_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_OUTPUT_CAPABLE {
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_OUTPUT_PIN= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_OUTPUT_PIN= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_OUTPUT_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HEADPHONE_DRIVE_CAPABLE {
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_HEADPHONE_DRIVE_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_HEADPHONE_DRIVE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HEADPHONE_DRIVE_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_JACK_DETECTION_CAPABILITY {
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_JACK_DETECTION_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_JACK_DETECTION_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_JACK_DETECTION_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED {
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_TRIGGER_REQUIRED_FOR_IMPEDANCE_MEASUREMENT= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED_FOR_IMPEDANCE_MEASUREMENT= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED;
+typedef enum AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_IMPEDANCE_SENSE_CAPABLE {
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_NO_IMPEDANCE_SENSE_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_HAVE_IMPEDANCE_SENSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES_IMPEDANCE_SENSE_CAPABLE;
+typedef enum AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_MODE {
+ AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_PAIR_MODE= 0x0,
+ AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_SINGLE_MODE= 0x1,
+} AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE_MULTICHANNEL_MODE;
+typedef enum AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_HBR_CAPABLE {
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_NO_HBR_CAPABLILITY= 0x0,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_HAVE_HBR_CAPABLILITY= 0x1,
+} AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR_HBR_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_OUTPUT_CONVERTER_RESERVED= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_INPUT_CONVERTER_RESERVED= 0x1,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_MIXER_RESERVED= 0x2,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_SELECTOR_RESERVED= 0x3,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_PIN_RESERVED= 0x4,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_POWER_WIDGET_RESERVED= 0x5,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VOLUME_KNOB_RESERVED= 0x6,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_BEEP_GENERATOR_RESERVED= 0x7,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_RESERVED= 0x8,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VENDOR_DEFINED_RESERVED= 0x9,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_LR_SWAP_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_LR_SWAP_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_POWER_CONTROL_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_POWER_CONTROL_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CODEC_CONVERTER0_IS_ANALOG= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CODEC_CONVERTER0_IS_DIGITAL= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_CONNECTION_LIST= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_CONNECTION_LIST= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_UNSOLICITED_RESPONSE_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_UNSOLICITED_RESPONSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_CODEC_CONVERTER0_HAVE_NO_PROCESSING_CAPABILITIES= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_CODEC_CONVERTER0_HAVE_PROCESSING_CAPABILITIES= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NOT_SUPPORT_STRIPING= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_STRIPING= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_FORMAT_OVERRIDE {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_FORMAT_OVERRIDE= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_FORMAT_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_FORMAT_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_AMPLIFIER_PARAMETER= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_AMPLIFIER_PARAMETER= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_OUTPUT_AMPLIFIER= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_OUTPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_INPUT_AMPLIFIER= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_INPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES {
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES_MONOPHONIC= 0x0,
+ AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES_STEREO= 0x1,
+} AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AUDIO_CHANNEL_CAPABILITIES;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_OUTPUT_CONVERTER_RESERVED= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_INPUT_CONVERTER_RESERVED= 0x1,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_MIXER_RESERVED= 0x2,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_SELECTOR_RESERVED= 0x3,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_PIN_RESERVED= 0x4,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_POWER_WIDGET_RESERVED= 0x5,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VOLUME_KNOB_RESERVED= 0x6,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_BEEP_GENERATOR_RESERVED= 0x7,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_RESERVED= 0x8,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE_VENDOR_DEFINED_RESERVED= 0x9,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_TYPE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_LR_SWAP= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_LR_SWAP= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_LR_SWAP;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_POWER_CONTROL_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_POWER_CONTROL_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_POWER_CONTROL;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_ANALOG= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_IS_DIGITAL= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_DIGITAL;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_CONNECTION_LIST= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_CONNECTION_LIST= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_CONNECTION_LIST;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_UNSOLICITED_RESPONSE_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_UNSOLICITED_RESPONSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_UNSOLICITED_RESPONSE_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_NO_PROCESING_CAPABILITIES= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET_HAVE_PROCESING_CAPABILITIES= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_PROCESSING_WIDGET;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_SUPPORT_STRIPING= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_SUPPORT_STRIPING= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_STRIPE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_AMPLIFIER_PARAMETER= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_AMPLIFIER_PARAMETER_OVERRIDE= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_AMPLIFIER_PARAMETER_OVERRIDE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_OUTPUT_AMPLIFIER= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_OUTPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_OUTPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_NO_INPUT_AMPLIFIER= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_HAVE_INPUT_AMPLIFIER= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES_INPUT_AMPLIFIER_PRESENT;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DP {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DP_NOT_ENABLED= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DP_ENABLED= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_DP;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE_NO_EAPD_PIN= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE_HAVE_EAPD_PIN= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_EAPD_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HDMI {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HDMI_NOT_ENABLED= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HDMI_ENABLED= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HDMI;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_BALANCED_I_O_PINS {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_I_O_PINS_NOT_BALANCED= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_I_O_PINS_ARE_BALANCED= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_BALANCED_I_O_PINS;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_INPUT_CAPABLE {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_INPUT_PIN= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_INPUT_PIN= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_INPUT_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_OUTPUT_CAPABLE {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_OUTPUT_PIN= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_OUTPUT_PIN= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_OUTPUT_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HEADPHONE_DRIVE_CAPABLE {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_HEADPHONE_DRIVE_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_HEADPHONE_DRIVE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HEADPHONE_DRIVE_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_JACK_DETECTION_CAPABILITY {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_JACK_PRESENCE_DETECTION_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_JACK_PRESENCE_DETECTION_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_JACK_DETECTION_CAPABILITY;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_TRIGGER_REQUIRED_FOR_IMPEDANCE_MEASUREMENT= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED_FOR_IMPEDANCE_MEASUREMENT= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_TRIGGER_REQUIRED;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_IMPEDANCE_SENSE_CAPABLE {
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_NO_IMPEDANCE_SENSE_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_HAVE_IMPEDANCE_SENSE_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES_IMPEDANCE_SENSE_CAPABLE;
+typedef enum AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_HBR_CAPABLE {
+ AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_NO_HBR_CAPABILITY= 0x0,
+ AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_HAVE_HBR_CAPABILITY= 0x1,
+} AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR_HBR_CAPABLE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE {
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE_PCM= 0x0,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE_NOT_PCM= 0x1,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_STREAM_TYPE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE {
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE_48KHZ= 0x0,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE_44P1KHZ= 0x1,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_RATE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE {
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY1= 0x0,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY2= 0x1,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY3_RESERVED= 0x2,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_BY4= 0x3,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE_RESERVED= 0x4,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_MULTIPLE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR {
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY1= 0x0,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY2_RESERVED= 0x1,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY3= 0x2,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY4_RESERVED= 0x3,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY5_RESERVED= 0x4,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY6_RESERVED= 0x5,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY7_RESERVED= 0x6,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR_BY8_RESERVED= 0x7,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_SAMPLE_BASE_DIVISOR;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE {
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_8_RESERVED= 0x0,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_16= 0x1,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_20= 0x2,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_24= 0x3,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_32_RESERVED= 0x4,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE_RESERVED= 0x5,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_BITS_PER_SAMPLE;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS {
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_1= 0x0,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_2= 0x1,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_3= 0x2,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_4= 0x3,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_5= 0x4,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_6= 0x5,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_7= 0x6,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_8= 0x7,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS_RESERVED= 0x8,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT_NUMBER_OF_CHANNELS;
+typedef enum AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN {
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN_DIGITAL_TRANSMISSION_DISABLED= 0x0,
+ AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN_DIGITAL_TRANSMISSION_ENABLED= 0x1,
+} AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER_DIGEN;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_IN_ENABLE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_IN_ENABLE_PIN_SHUT_OFF= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_IN_ENABLE_PIN_DRIVEN= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL_IN_ENABLE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_DISABLED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_ENABLE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_MULTICHANNEL0_MUTE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_MULTICHANNEL0_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_MULTICHANNEL0_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE_MULTICHANNEL0_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE_MULTICHANNEL1_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_MULTICHANNEL2_MUTE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_MULTICHANNEL2_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_MULTICHANNEL2_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE_MULTICHANNEL2_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE_MULTICHANNEL3_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_MULTICHANNEL4_MUTE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_MULTICHANNEL4_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_MULTICHANNEL4_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE_MULTICHANNEL4_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE_MULTICHANNEL5_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_MULTICHANNEL6_MUTE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_MULTICHANNEL6_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_MULTICHANNEL6_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE_MULTICHANNEL6_MUTE;
+typedef enum AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTE {
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_NOT_MUTED= 0x0,
+ AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTED= 0x1,
+} AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE_MULTICHANNEL7_MUTE;
+typedef enum BLND_CONTROL_BLND_MODE {
+ BLND_CONTROL_BLND_MODE_CURRENT_PIPE_ONLY = 0x0,
+ BLND_CONTROL_BLND_MODE_OTHER_PIPE_ONLY = 0x1,
+ BLND_CONTROL_BLND_MODE_ALPHA_BLENDING_MODE = 0x2,
+ BLND_CONTROL_BLND_MODE_OTHER_STEREO_TYPE = 0x3,
+} BLND_CONTROL_BLND_MODE;
+typedef enum BLND_CONTROL_BLND_STEREO_TYPE {
+ BLND_CONTROL_BLND_STEREO_TYPE_NON_SINGLE_PIPE_STEREO= 0x0,
+ BLND_CONTROL_BLND_STEREO_TYPE_SIDE_BY_SIDE_SINGLE_PIPE_STEREO= 0x1,
+ BLND_CONTROL_BLND_STEREO_TYPE_TOP_BOTTOM_SINGLE_PIPE_STEREO= 0x2,
+ BLND_CONTROL_BLND_STEREO_TYPE_UNUSED = 0x3,
+} BLND_CONTROL_BLND_STEREO_TYPE;
+typedef enum BLND_CONTROL_BLND_STEREO_POLARITY {
+ BLND_CONTROL_BLND_STEREO_POLARITY_LOW = 0x0,
+ BLND_CONTROL_BLND_STEREO_POLARITY_HIGH = 0x1,
+} BLND_CONTROL_BLND_STEREO_POLARITY;
+typedef enum BLND_CONTROL_BLND_FEEDTHROUGH_EN {
+ BLND_CONTROL_BLND_FEEDTHROUGH_EN_FALSE = 0x0,
+ BLND_CONTROL_BLND_FEEDTHROUGH_EN_TRUE = 0x1,
+} BLND_CONTROL_BLND_FEEDTHROUGH_EN;
+typedef enum BLND_CONTROL_BLND_ALPHA_MODE {
+ BLND_CONTROL_BLND_ALPHA_MODE_CURRENT_PIXEL_ALPHA = 0x0,
+ BLND_CONTROL_BLND_ALPHA_MODE_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN= 0x1,
+ BLND_CONTROL_BLND_ALPHA_MODE_GLOBAL_ALPHA_ONLY = 0x2,
+ BLND_CONTROL_BLND_ALPHA_MODE_UNUSED = 0x3,
+} BLND_CONTROL_BLND_ALPHA_MODE;
+typedef enum BLND_CONTROL_BLND_ACTIVE_OVERLAP_ONLY {
+ BLND_CONTROL_BLND_ACTIVE_OVERLAY_ONLY_FALSE = 0x0,
+ BLND_CONTROL_BLND_ACTIVE_OVERLAY_ONLY_TRUE = 0x1,
+} BLND_CONTROL_BLND_ACTIVE_OVERLAP_ONLY;
+typedef enum BLND_CONTROL_BLND_MULTIPLIED_MODE {
+ BLND_CONTROL_BLND_MULTIPLIED_MODE_FALSE = 0x0,
+ BLND_CONTROL_BLND_MULTIPLIED_MODE_TRUE = 0x1,
+} BLND_CONTROL_BLND_MULTIPLIED_MODE;
+typedef enum BLND_SM_CONTROL2_SM_MODE {
+ BLND_SM_CONTROL2_SM_MODE_SINGLE_PLANE = 0x0,
+ BLND_SM_CONTROL2_SM_MODE_ROW_SUBSAMPLING = 0x2,
+ BLND_SM_CONTROL2_SM_MODE_COLUMN_SUBSAMPLING = 0x4,
+ BLND_SM_CONTROL2_SM_MODE_CHECKERBOARD_SUBSAMPLING= 0x6,
+} BLND_SM_CONTROL2_SM_MODE;
+typedef enum BLND_SM_CONTROL2_SM_FRAME_ALTERNATE {
+ BLND_SM_CONTROL2_SM_FRAME_ALTERNATE_FALSE = 0x0,
+ BLND_SM_CONTROL2_SM_FRAME_ALTERNATE_TRUE = 0x1,
+} BLND_SM_CONTROL2_SM_FRAME_ALTERNATE;
+typedef enum BLND_SM_CONTROL2_SM_FIELD_ALTERNATE {
+ BLND_SM_CONTROL2_SM_FIELD_ALTERNATE_FALSE = 0x0,
+ BLND_SM_CONTROL2_SM_FIELD_ALTERNATE_TRUE = 0x1,
+} BLND_SM_CONTROL2_SM_FIELD_ALTERNATE;
+typedef enum BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL {
+ BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_NO_FORCE= 0x0,
+ BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_RESERVED= 0x1,
+ BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_FORCE_LOW= 0x2,
+ BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_FORCE_HIGH= 0x3,
+} BLND_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL;
+typedef enum BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL {
+ BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_NO_FORCE = 0x0,
+ BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_RESERVED = 0x1,
+ BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_FORCE_LOW = 0x2,
+ BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_FORCE_HIGH= 0x3,
+} BLND_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL;
+typedef enum BLND_CONTROL2_PTI_ENABLE {
+ BLND_CONTROL2_PTI_ENABLE_FALSE = 0x0,
+ BLND_CONTROL2_PTI_ENABLE_TRUE = 0x1,
+} BLND_CONTROL2_PTI_ENABLE;
+typedef enum BLND_CONTROL2_BLND_SUPERAA_DEGAMMA_EN {
+ BLND_CONTROL2_BLND_SUPERAA_DEGAMMA_EN_FALSE = 0x0,
+ BLND_CONTROL2_BLND_SUPERAA_DEGAMMA_EN_TRUE = 0x1,
+} BLND_CONTROL2_BLND_SUPERAA_DEGAMMA_EN;
+typedef enum BLND_CONTROL2_BLND_SUPERAA_REGAMMA_EN {
+ BLND_CONTROL2_BLND_SUPERAA_REGAMMA_EN_FALSE = 0x0,
+ BLND_CONTROL2_BLND_SUPERAA_REGAMMA_EN_TRUE = 0x1,
+} BLND_CONTROL2_BLND_SUPERAA_REGAMMA_EN;
+typedef enum BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK {
+ BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK_FALSE= 0x0,
+ BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK_TRUE= 0x1,
+} BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK;
+typedef enum BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK {
+ BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK_FALSE= 0x0,
+ BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK_TRUE= 0x1,
+} BLND_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK {
+ BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK_FALSE= 0x0,
+ BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK_TRUE= 0x1,
+} BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK {
+ BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_FALSE= 0x0,
+ BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_TRUE= 0x1,
+} BLND_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK {
+ BLND_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK_FALSE= 0x0,
+ BLND_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK_TRUE= 0x1,
+} BLND_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK {
+ BLND_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK_FALSE= 0x0,
+ BLND_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK_TRUE= 0x1,
+} BLND_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK {
+ BLND_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK_FALSE = 0x0,
+ BLND_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK_TRUE = 0x1,
+} BLND_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK {
+ BLND_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK_FALSE = 0x0,
+ BLND_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK_TRUE = 0x1,
+} BLND_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK;
+typedef enum BLND_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE {
+ BLND_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE_FALSE = 0x0,
+ BLND_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE_TRUE = 0x1,
+} BLND_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE;
+typedef enum BLND_DEBUG_BLND_CNV_MUX_SELECT {
+ BLND_DEBUG_BLND_CNV_MUX_SELECT_LOW = 0x0,
+ BLND_DEBUG_BLND_CNV_MUX_SELECT_HIGH = 0x1,
+} BLND_DEBUG_BLND_CNV_MUX_SELECT;
+typedef enum BLND_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN {
+ BLND_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN_FALSE= 0x0,
+ BLND_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN_TRUE= 0x1,
+} BLND_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN;
+typedef enum SurfaceEndian {
+ ENDIAN_NONE = 0x0,
+ ENDIAN_8IN16 = 0x1,
+ ENDIAN_8IN32 = 0x2,
+ ENDIAN_8IN64 = 0x3,
+} SurfaceEndian;
+typedef enum ArrayMode {
+ ARRAY_LINEAR_GENERAL = 0x0,
+ ARRAY_LINEAR_ALIGNED = 0x1,
+ ARRAY_1D_TILED_THIN1 = 0x2,
+ ARRAY_1D_TILED_THICK = 0x3,
+ ARRAY_2D_TILED_THIN1 = 0x4,
+ ARRAY_PRT_TILED_THIN1 = 0x5,
+ ARRAY_PRT_2D_TILED_THIN1 = 0x6,
+ ARRAY_2D_TILED_THICK = 0x7,
+ ARRAY_2D_TILED_XTHICK = 0x8,
+ ARRAY_PRT_TILED_THICK = 0x9,
+ ARRAY_PRT_2D_TILED_THICK = 0xa,
+ ARRAY_PRT_3D_TILED_THIN1 = 0xb,
+ ARRAY_3D_TILED_THIN1 = 0xc,
+ ARRAY_3D_TILED_THICK = 0xd,
+ ARRAY_3D_TILED_XTHICK = 0xe,
+ ARRAY_PRT_3D_TILED_THICK = 0xf,
+} ArrayMode;
+typedef enum PipeTiling {
+ CONFIG_1_PIPE = 0x0,
+ CONFIG_2_PIPE = 0x1,
+ CONFIG_4_PIPE = 0x2,
+ CONFIG_8_PIPE = 0x3,
+} PipeTiling;
+typedef enum BankTiling {
+ CONFIG_4_BANK = 0x0,
+ CONFIG_8_BANK = 0x1,
+} BankTiling;
+typedef enum GroupInterleave {
+ CONFIG_256B_GROUP = 0x0,
+ CONFIG_512B_GROUP = 0x1,
+} GroupInterleave;
+typedef enum RowTiling {
+ CONFIG_1KB_ROW = 0x0,
+ CONFIG_2KB_ROW = 0x1,
+ CONFIG_4KB_ROW = 0x2,
+ CONFIG_8KB_ROW = 0x3,
+ CONFIG_1KB_ROW_OPT = 0x4,
+ CONFIG_2KB_ROW_OPT = 0x5,
+ CONFIG_4KB_ROW_OPT = 0x6,
+ CONFIG_8KB_ROW_OPT = 0x7,
+} RowTiling;
+typedef enum BankSwapBytes {
+ CONFIG_128B_SWAPS = 0x0,
+ CONFIG_256B_SWAPS = 0x1,
+ CONFIG_512B_SWAPS = 0x2,
+ CONFIG_1KB_SWAPS = 0x3,
+} BankSwapBytes;
+typedef enum SampleSplitBytes {
+ CONFIG_1KB_SPLIT = 0x0,
+ CONFIG_2KB_SPLIT = 0x1,
+ CONFIG_4KB_SPLIT = 0x2,
+ CONFIG_8KB_SPLIT = 0x3,
+} SampleSplitBytes;
+typedef enum NumPipes {
+ ADDR_CONFIG_1_PIPE = 0x0,
+ ADDR_CONFIG_2_PIPE = 0x1,
+ ADDR_CONFIG_4_PIPE = 0x2,
+ ADDR_CONFIG_8_PIPE = 0x3,
+} NumPipes;
+typedef enum PipeInterleaveSize {
+ ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0,
+ ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1,
+} PipeInterleaveSize;
+typedef enum BankInterleaveSize {
+ ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0,
+ ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1,
+ ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2,
+ ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3,
+} BankInterleaveSize;
+typedef enum NumShaderEngines {
+ ADDR_CONFIG_1_SHADER_ENGINE = 0x0,
+ ADDR_CONFIG_2_SHADER_ENGINE = 0x1,
+} NumShaderEngines;
+typedef enum ShaderEngineTileSize {
+ ADDR_CONFIG_SE_TILE_16 = 0x0,
+ ADDR_CONFIG_SE_TILE_32 = 0x1,
+} ShaderEngineTileSize;
+typedef enum NumGPUs {
+ ADDR_CONFIG_1_GPU = 0x0,
+ ADDR_CONFIG_2_GPU = 0x1,
+ ADDR_CONFIG_4_GPU = 0x2,
+} NumGPUs;
+typedef enum MultiGPUTileSize {
+ ADDR_CONFIG_GPU_TILE_16 = 0x0,
+ ADDR_CONFIG_GPU_TILE_32 = 0x1,
+ ADDR_CONFIG_GPU_TILE_64 = 0x2,
+ ADDR_CONFIG_GPU_TILE_128 = 0x3,
+} MultiGPUTileSize;
+typedef enum RowSize {
+ ADDR_CONFIG_1KB_ROW = 0x0,
+ ADDR_CONFIG_2KB_ROW = 0x1,
+ ADDR_CONFIG_4KB_ROW = 0x2,
+} RowSize;
+typedef enum NumLowerPipes {
+ ADDR_CONFIG_1_LOWER_PIPES = 0x0,
+ ADDR_CONFIG_2_LOWER_PIPES = 0x1,
+} NumLowerPipes;
+typedef enum DebugBlockId {
+ DBG_CLIENT_BLKID_RESERVED = 0x0,
+ DBG_CLIENT_BLKID_dbg = 0x1,
+ DBG_CLIENT_BLKID_scf2 = 0x2,
+ DBG_CLIENT_BLKID_mcd5 = 0x3,
+ DBG_CLIENT_BLKID_vmc = 0x4,
+ DBG_CLIENT_BLKID_sx30 = 0x5,
+ DBG_CLIENT_BLKID_mcd2 = 0x6,
+ DBG_CLIENT_BLKID_bci1 = 0x7,
+ DBG_CLIENT_BLKID_xdma_dbg_client_wrapper = 0x8,
+ DBG_CLIENT_BLKID_mcc0 = 0x9,
+ DBG_CLIENT_BLKID_uvdf_2 = 0xa,
+ DBG_CLIENT_BLKID_uvdf_3 = 0xb,
+ DBG_CLIENT_BLKID_uvdt_0 = 0xc,
+ DBG_CLIENT_BLKID_uvdi_0 = 0xd,
+ DBG_CLIENT_BLKID_bci0 = 0xe,
+ DBG_CLIENT_BLKID_vceb0_1 = 0xf,
+ DBG_CLIENT_BLKID_cb100 = 0x10,
+ DBG_CLIENT_BLKID_cb001 = 0x11,
+ DBG_CLIENT_BLKID_mcd4 = 0x12,
+ DBG_CLIENT_BLKID_tmonw00 = 0x13,
+ DBG_CLIENT_BLKID_cb101 = 0x14,
+ DBG_CLIENT_BLKID_sx10 = 0x15,
+ DBG_CLIENT_BLKID_cb301 = 0x16,
+ DBG_CLIENT_BLKID_tmonw01 = 0x17,
+ DBG_CLIENT_BLKID_vcea0_0 = 0x18,
+ DBG_CLIENT_BLKID_vcea0_1 = 0x19,
+ DBG_CLIENT_BLKID_vcea0_2 = 0x1a,
+ DBG_CLIENT_BLKID_vcea0_3 = 0x1b,
+ DBG_CLIENT_BLKID_scf1 = 0x1c,
+ DBG_CLIENT_BLKID_sx20 = 0x1d,
+ DBG_CLIENT_BLKID_spim1 = 0x1e,
+ DBG_CLIENT_BLKID_pa10 = 0x1f,
+ DBG_CLIENT_BLKID_pa00 = 0x20,
+ DBG_CLIENT_BLKID_gmcon = 0x21,
+ DBG_CLIENT_BLKID_mcb = 0x22,
+ DBG_CLIENT_BLKID_vgt0 = 0x23,
+ DBG_CLIENT_BLKID_pc0 = 0x24,
+ DBG_CLIENT_BLKID_bci2 = 0x25,
+ DBG_CLIENT_BLKID_uvdb_0 = 0x26,
+ DBG_CLIENT_BLKID_spim3 = 0x27,
+ DBG_CLIENT_BLKID_cpc_0 = 0x28,
+ DBG_CLIENT_BLKID_cpc_1 = 0x29,
+ DBG_CLIENT_BLKID_uvdm_0 = 0x2a,
+ DBG_CLIENT_BLKID_uvdm_1 = 0x2b,
+ DBG_CLIENT_BLKID_uvdm_2 = 0x2c,
+ DBG_CLIENT_BLKID_uvdm_3 = 0x2d,
+ DBG_CLIENT_BLKID_cb000 = 0x2e,
+ DBG_CLIENT_BLKID_spim0 = 0x2f,
+ DBG_CLIENT_BLKID_mcc2 = 0x30,
+ DBG_CLIENT_BLKID_ds0 = 0x31,
+ DBG_CLIENT_BLKID_srbm = 0x32,
+ DBG_CLIENT_BLKID_ih = 0x33,
+ DBG_CLIENT_BLKID_sem = 0x34,
+ DBG_CLIENT_BLKID_sdma_0 = 0x35,
+ DBG_CLIENT_BLKID_sdma_1 = 0x36,
+ DBG_CLIENT_BLKID_hdp = 0x37,
+ DBG_CLIENT_BLKID_cb200 = 0x38,
+ DBG_CLIENT_BLKID_scf3 = 0x39,
+ DBG_CLIENT_BLKID_vceb1_0 = 0x3a,
+ DBG_CLIENT_BLKID_vcea1_0 = 0x3b,
+ DBG_CLIENT_BLKID_vcea1_1 = 0x3c,
+ DBG_CLIENT_BLKID_vcea1_2 = 0x3d,
+ DBG_CLIENT_BLKID_vcea1_3 = 0x3e,
+ DBG_CLIENT_BLKID_bci3 = 0x3f,
+ DBG_CLIENT_BLKID_mcd0 = 0x40,
+ DBG_CLIENT_BLKID_pa11 = 0x41,
+ DBG_CLIENT_BLKID_pa01 = 0x42,
+ DBG_CLIENT_BLKID_cb201 = 0x43,
+ DBG_CLIENT_BLKID_spim2 = 0x44,
+ DBG_CLIENT_BLKID_vgt2 = 0x45,
+ DBG_CLIENT_BLKID_pc2 = 0x46,
+ DBG_CLIENT_BLKID_smu_0 = 0x47,
+ DBG_CLIENT_BLKID_smu_1 = 0x48,
+ DBG_CLIENT_BLKID_smu_2 = 0x49,
+ DBG_CLIENT_BLKID_cb1 = 0x4a,
+ DBG_CLIENT_BLKID_ia0 = 0x4b,
+ DBG_CLIENT_BLKID_wd = 0x4c,
+ DBG_CLIENT_BLKID_ia1 = 0x4d,
+ DBG_CLIENT_BLKID_vcec1_0 = 0x4e,
+ DBG_CLIENT_BLKID_scf0 = 0x4f,
+ DBG_CLIENT_BLKID_vgt1 = 0x50,
+ DBG_CLIENT_BLKID_pc1 = 0x51,
+ DBG_CLIENT_BLKID_cb0 = 0x52,
+ DBG_CLIENT_BLKID_gdc_one_0 = 0x53,
+ DBG_CLIENT_BLKID_gdc_one_1 = 0x54,
+ DBG_CLIENT_BLKID_gdc_one_2 = 0x55,
+ DBG_CLIENT_BLKID_gdc_one_3 = 0x56,
+ DBG_CLIENT_BLKID_gdc_one_4 = 0x57,
+ DBG_CLIENT_BLKID_gdc_one_5 = 0x58,
+ DBG_CLIENT_BLKID_gdc_one_6 = 0x59,
+ DBG_CLIENT_BLKID_gdc_one_7 = 0x5a,
+ DBG_CLIENT_BLKID_gdc_one_8 = 0x5b,
+ DBG_CLIENT_BLKID_gdc_one_9 = 0x5c,
+ DBG_CLIENT_BLKID_gdc_one_10 = 0x5d,
+ DBG_CLIENT_BLKID_gdc_one_11 = 0x5e,
+ DBG_CLIENT_BLKID_gdc_one_12 = 0x5f,
+ DBG_CLIENT_BLKID_gdc_one_13 = 0x60,
+ DBG_CLIENT_BLKID_gdc_one_14 = 0x61,
+ DBG_CLIENT_BLKID_gdc_one_15 = 0x62,
+ DBG_CLIENT_BLKID_gdc_one_16 = 0x63,
+ DBG_CLIENT_BLKID_gdc_one_17 = 0x64,
+ DBG_CLIENT_BLKID_gdc_one_18 = 0x65,
+ DBG_CLIENT_BLKID_gdc_one_19 = 0x66,
+ DBG_CLIENT_BLKID_gdc_one_20 = 0x67,
+ DBG_CLIENT_BLKID_gdc_one_21 = 0x68,
+ DBG_CLIENT_BLKID_gdc_one_22 = 0x69,
+ DBG_CLIENT_BLKID_gdc_one_23 = 0x6a,
+ DBG_CLIENT_BLKID_gdc_one_24 = 0x6b,
+ DBG_CLIENT_BLKID_gdc_one_25 = 0x6c,
+ DBG_CLIENT_BLKID_gdc_one_26 = 0x6d,
+ DBG_CLIENT_BLKID_gdc_one_27 = 0x6e,
+ DBG_CLIENT_BLKID_gdc_one_28 = 0x6f,
+ DBG_CLIENT_BLKID_gdc_one_29 = 0x70,
+ DBG_CLIENT_BLKID_gdc_one_30 = 0x71,
+ DBG_CLIENT_BLKID_gdc_one_31 = 0x72,
+ DBG_CLIENT_BLKID_gdc_one_32 = 0x73,
+ DBG_CLIENT_BLKID_gdc_one_33 = 0x74,
+ DBG_CLIENT_BLKID_gdc_one_34 = 0x75,
+ DBG_CLIENT_BLKID_gdc_one_35 = 0x76,
+ DBG_CLIENT_BLKID_vceb0_0 = 0x77,
+ DBG_CLIENT_BLKID_vgt3 = 0x78,
+ DBG_CLIENT_BLKID_pc3 = 0x79,
+ DBG_CLIENT_BLKID_mcd3 = 0x7a,
+ DBG_CLIENT_BLKID_uvdu_0 = 0x7b,
+ DBG_CLIENT_BLKID_uvdu_1 = 0x7c,
+ DBG_CLIENT_BLKID_uvdu_2 = 0x7d,
+ DBG_CLIENT_BLKID_uvdu_3 = 0x7e,
+ DBG_CLIENT_BLKID_uvdu_4 = 0x7f,
+ DBG_CLIENT_BLKID_uvdu_5 = 0x80,
+ DBG_CLIENT_BLKID_uvdu_6 = 0x81,
+ DBG_CLIENT_BLKID_cb300 = 0x82,
+ DBG_CLIENT_BLKID_mcd1 = 0x83,
+ DBG_CLIENT_BLKID_sx00 = 0x84,
+ DBG_CLIENT_BLKID_uvdf_0 = 0x85,
+ DBG_CLIENT_BLKID_uvdf_1 = 0x86,
+ DBG_CLIENT_BLKID_mcc3 = 0x87,
+ DBG_CLIENT_BLKID_cpg_0 = 0x88,
+ DBG_CLIENT_BLKID_cpg_1 = 0x89,
+ DBG_CLIENT_BLKID_gck = 0x8a,
+ DBG_CLIENT_BLKID_mcc1 = 0x8b,
+ DBG_CLIENT_BLKID_cpf_0 = 0x8c,
+ DBG_CLIENT_BLKID_cpf_1 = 0x8d,
+ DBG_CLIENT_BLKID_rlc = 0x8e,
+ DBG_CLIENT_BLKID_grbm = 0x8f,
+ DBG_CLIENT_BLKID_sammsp = 0x90,
+ DBG_CLIENT_BLKID_dci_pg = 0x91,
+ DBG_CLIENT_BLKID_dci_0 = 0x92,
+ DBG_CLIENT_BLKID_dccg0_0 = 0x93,
+ DBG_CLIENT_BLKID_dccg0_1 = 0x94,
+ DBG_CLIENT_BLKID_dccg0_2 = 0x95,
+ DBG_CLIENT_BLKID_dccg0_3 = 0x96,
+ DBG_CLIENT_BLKID_dccg0_4 = 0x97,
+ DBG_CLIENT_BLKID_dccg0_5 = 0x98,
+ DBG_CLIENT_BLKID_dccg0_6 = 0x99,
+ DBG_CLIENT_BLKID_dccg0_7 = 0x9a,
+ DBG_CLIENT_BLKID_dccg0_8 = 0x9b,
+ DBG_CLIENT_BLKID_dcfe01_0 = 0x9c,
+ DBG_CLIENT_BLKID_dcfe02_0 = 0x9d,
+ DBG_CLIENT_BLKID_dcfe03_0 = 0x9e,
+ DBG_CLIENT_BLKID_dcfe04_0 = 0x9f,
+ DBG_CLIENT_BLKID_dcfe05_0 = 0xa0,
+ DBG_CLIENT_BLKID_dcfe06_0 = 0xa1,
+ DBG_CLIENT_BLKID_uvde_0 = 0xa2,
+ DBG_CLIENT_BLKID_RESERVED_LAST = 0xa3,
+} DebugBlockId;
+typedef enum DebugBlockId_OLD {
+ DBG_BLOCK_ID_RESERVED = 0x0,
+ DBG_BLOCK_ID_DBG = 0x1,
+ DBG_BLOCK_ID_VMC = 0x2,
+ DBG_BLOCK_ID_PDMA = 0x3,
+ DBG_BLOCK_ID_CG = 0x4,
+ DBG_BLOCK_ID_SRBM = 0x5,
+ DBG_BLOCK_ID_GRBM = 0x6,
+ DBG_BLOCK_ID_RLC = 0x7,
+ DBG_BLOCK_ID_CSC = 0x8,
+ DBG_BLOCK_ID_SEM = 0x9,
+ DBG_BLOCK_ID_IH = 0xa,
+ DBG_BLOCK_ID_SC = 0xb,
+ DBG_BLOCK_ID_SQ = 0xc,
+ DBG_BLOCK_ID_AVP = 0xd,
+ DBG_BLOCK_ID_GMCON = 0xe,
+ DBG_BLOCK_ID_SMU = 0xf,
+ DBG_BLOCK_ID_DMA0 = 0x10,
+ DBG_BLOCK_ID_DMA1 = 0x11,
+ DBG_BLOCK_ID_SPIM = 0x12,
+ DBG_BLOCK_ID_GDS = 0x13,
+ DBG_BLOCK_ID_SPIS = 0x14,
+ DBG_BLOCK_ID_UNUSED0 = 0x15,
+ DBG_BLOCK_ID_PA0 = 0x16,
+ DBG_BLOCK_ID_PA1 = 0x17,
+ DBG_BLOCK_ID_CP0 = 0x18,
+ DBG_BLOCK_ID_CP1 = 0x19,
+ DBG_BLOCK_ID_CP2 = 0x1a,
+ DBG_BLOCK_ID_UNUSED1 = 0x1b,
+ DBG_BLOCK_ID_UVDU = 0x1c,
+ DBG_BLOCK_ID_UVDM = 0x1d,
+ DBG_BLOCK_ID_VCE = 0x1e,
+ DBG_BLOCK_ID_UNUSED2 = 0x1f,
+ DBG_BLOCK_ID_VGT0 = 0x20,
+ DBG_BLOCK_ID_VGT1 = 0x21,
+ DBG_BLOCK_ID_IA = 0x22,
+ DBG_BLOCK_ID_UNUSED3 = 0x23,
+ DBG_BLOCK_ID_SCT0 = 0x24,
+ DBG_BLOCK_ID_SCT1 = 0x25,
+ DBG_BLOCK_ID_SPM0 = 0x26,
+ DBG_BLOCK_ID_SPM1 = 0x27,
+ DBG_BLOCK_ID_TCAA = 0x28,
+ DBG_BLOCK_ID_TCAB = 0x29,
+ DBG_BLOCK_ID_TCCA = 0x2a,
+ DBG_BLOCK_ID_TCCB = 0x2b,
+ DBG_BLOCK_ID_MCC0 = 0x2c,
+ DBG_BLOCK_ID_MCC1 = 0x2d,
+ DBG_BLOCK_ID_MCC2 = 0x2e,
+ DBG_BLOCK_ID_MCC3 = 0x2f,
+ DBG_BLOCK_ID_SX0 = 0x30,
+ DBG_BLOCK_ID_SX1 = 0x31,
+ DBG_BLOCK_ID_SX2 = 0x32,
+ DBG_BLOCK_ID_SX3 = 0x33,
+ DBG_BLOCK_ID_UNUSED4 = 0x34,
+ DBG_BLOCK_ID_UNUSED5 = 0x35,
+ DBG_BLOCK_ID_UNUSED6 = 0x36,
+ DBG_BLOCK_ID_UNUSED7 = 0x37,
+ DBG_BLOCK_ID_PC0 = 0x38,
+ DBG_BLOCK_ID_PC1 = 0x39,
+ DBG_BLOCK_ID_UNUSED8 = 0x3a,
+ DBG_BLOCK_ID_UNUSED9 = 0x3b,
+ DBG_BLOCK_ID_UNUSED10 = 0x3c,
+ DBG_BLOCK_ID_UNUSED11 = 0x3d,
+ DBG_BLOCK_ID_MCB = 0x3e,
+ DBG_BLOCK_ID_UNUSED12 = 0x3f,
+ DBG_BLOCK_ID_SCB0 = 0x40,
+ DBG_BLOCK_ID_SCB1 = 0x41,
+ DBG_BLOCK_ID_UNUSED13 = 0x42,
+ DBG_BLOCK_ID_UNUSED14 = 0x43,
+ DBG_BLOCK_ID_SCF0 = 0x44,
+ DBG_BLOCK_ID_SCF1 = 0x45,
+ DBG_BLOCK_ID_UNUSED15 = 0x46,
+ DBG_BLOCK_ID_UNUSED16 = 0x47,
+ DBG_BLOCK_ID_BCI0 = 0x48,
+ DBG_BLOCK_ID_BCI1 = 0x49,
+ DBG_BLOCK_ID_BCI2 = 0x4a,
+ DBG_BLOCK_ID_BCI3 = 0x4b,
+ DBG_BLOCK_ID_UNUSED17 = 0x4c,
+ DBG_BLOCK_ID_UNUSED18 = 0x4d,
+ DBG_BLOCK_ID_UNUSED19 = 0x4e,
+ DBG_BLOCK_ID_UNUSED20 = 0x4f,
+ DBG_BLOCK_ID_CB00 = 0x50,
+ DBG_BLOCK_ID_CB01 = 0x51,
+ DBG_BLOCK_ID_CB02 = 0x52,
+ DBG_BLOCK_ID_CB03 = 0x53,
+ DBG_BLOCK_ID_CB04 = 0x54,
+ DBG_BLOCK_ID_UNUSED21 = 0x55,
+ DBG_BLOCK_ID_UNUSED22 = 0x56,
+ DBG_BLOCK_ID_UNUSED23 = 0x57,
+ DBG_BLOCK_ID_CB10 = 0x58,
+ DBG_BLOCK_ID_CB11 = 0x59,
+ DBG_BLOCK_ID_CB12 = 0x5a,
+ DBG_BLOCK_ID_CB13 = 0x5b,
+ DBG_BLOCK_ID_CB14 = 0x5c,
+ DBG_BLOCK_ID_UNUSED24 = 0x5d,
+ DBG_BLOCK_ID_UNUSED25 = 0x5e,
+ DBG_BLOCK_ID_UNUSED26 = 0x5f,
+ DBG_BLOCK_ID_TCP0 = 0x60,
+ DBG_BLOCK_ID_TCP1 = 0x61,
+ DBG_BLOCK_ID_TCP2 = 0x62,
+ DBG_BLOCK_ID_TCP3 = 0x63,
+ DBG_BLOCK_ID_TCP4 = 0x64,
+ DBG_BLOCK_ID_TCP5 = 0x65,
+ DBG_BLOCK_ID_TCP6 = 0x66,
+ DBG_BLOCK_ID_TCP7 = 0x67,
+ DBG_BLOCK_ID_TCP8 = 0x68,
+ DBG_BLOCK_ID_TCP9 = 0x69,
+ DBG_BLOCK_ID_TCP10 = 0x6a,
+ DBG_BLOCK_ID_TCP11 = 0x6b,
+ DBG_BLOCK_ID_TCP12 = 0x6c,
+ DBG_BLOCK_ID_TCP13 = 0x6d,
+ DBG_BLOCK_ID_TCP14 = 0x6e,
+ DBG_BLOCK_ID_TCP15 = 0x6f,
+ DBG_BLOCK_ID_TCP16 = 0x70,
+ DBG_BLOCK_ID_TCP17 = 0x71,
+ DBG_BLOCK_ID_TCP18 = 0x72,
+ DBG_BLOCK_ID_TCP19 = 0x73,
+ DBG_BLOCK_ID_TCP20 = 0x74,
+ DBG_BLOCK_ID_TCP21 = 0x75,
+ DBG_BLOCK_ID_TCP22 = 0x76,
+ DBG_BLOCK_ID_TCP23 = 0x77,
+ DBG_BLOCK_ID_TCP_RESERVED0 = 0x78,
+ DBG_BLOCK_ID_TCP_RESERVED1 = 0x79,
+ DBG_BLOCK_ID_TCP_RESERVED2 = 0x7a,
+ DBG_BLOCK_ID_TCP_RESERVED3 = 0x7b,
+ DBG_BLOCK_ID_TCP_RESERVED4 = 0x7c,
+ DBG_BLOCK_ID_TCP_RESERVED5 = 0x7d,
+ DBG_BLOCK_ID_TCP_RESERVED6 = 0x7e,
+ DBG_BLOCK_ID_TCP_RESERVED7 = 0x7f,
+ DBG_BLOCK_ID_DB00 = 0x80,
+ DBG_BLOCK_ID_DB01 = 0x81,
+ DBG_BLOCK_ID_DB02 = 0x82,
+ DBG_BLOCK_ID_DB03 = 0x83,
+ DBG_BLOCK_ID_DB04 = 0x84,
+ DBG_BLOCK_ID_UNUSED27 = 0x85,
+ DBG_BLOCK_ID_UNUSED28 = 0x86,
+ DBG_BLOCK_ID_UNUSED29 = 0x87,
+ DBG_BLOCK_ID_DB10 = 0x88,
+ DBG_BLOCK_ID_DB11 = 0x89,
+ DBG_BLOCK_ID_DB12 = 0x8a,
+ DBG_BLOCK_ID_DB13 = 0x8b,
+ DBG_BLOCK_ID_DB14 = 0x8c,
+ DBG_BLOCK_ID_UNUSED30 = 0x8d,
+ DBG_BLOCK_ID_UNUSED31 = 0x8e,
+ DBG_BLOCK_ID_UNUSED32 = 0x8f,
+ DBG_BLOCK_ID_TCC0 = 0x90,
+ DBG_BLOCK_ID_TCC1 = 0x91,
+ DBG_BLOCK_ID_TCC2 = 0x92,
+ DBG_BLOCK_ID_TCC3 = 0x93,
+ DBG_BLOCK_ID_TCC4 = 0x94,
+ DBG_BLOCK_ID_TCC5 = 0x95,
+ DBG_BLOCK_ID_TCC6 = 0x96,
+ DBG_BLOCK_ID_TCC7 = 0x97,
+ DBG_BLOCK_ID_SPS00 = 0x98,
+ DBG_BLOCK_ID_SPS01 = 0x99,
+ DBG_BLOCK_ID_SPS02 = 0x9a,
+ DBG_BLOCK_ID_SPS10 = 0x9b,
+ DBG_BLOCK_ID_SPS11 = 0x9c,
+ DBG_BLOCK_ID_SPS12 = 0x9d,
+ DBG_BLOCK_ID_UNUSED33 = 0x9e,
+ DBG_BLOCK_ID_UNUSED34 = 0x9f,
+ DBG_BLOCK_ID_TA00 = 0xa0,
+ DBG_BLOCK_ID_TA01 = 0xa1,
+ DBG_BLOCK_ID_TA02 = 0xa2,
+ DBG_BLOCK_ID_TA03 = 0xa3,
+ DBG_BLOCK_ID_TA04 = 0xa4,
+ DBG_BLOCK_ID_TA05 = 0xa5,
+ DBG_BLOCK_ID_TA06 = 0xa6,
+ DBG_BLOCK_ID_TA07 = 0xa7,
+ DBG_BLOCK_ID_TA08 = 0xa8,
+ DBG_BLOCK_ID_TA09 = 0xa9,
+ DBG_BLOCK_ID_TA0A = 0xaa,
+ DBG_BLOCK_ID_TA0B = 0xab,
+ DBG_BLOCK_ID_UNUSED35 = 0xac,
+ DBG_BLOCK_ID_UNUSED36 = 0xad,
+ DBG_BLOCK_ID_UNUSED37 = 0xae,
+ DBG_BLOCK_ID_UNUSED38 = 0xaf,
+ DBG_BLOCK_ID_TA10 = 0xb0,
+ DBG_BLOCK_ID_TA11 = 0xb1,
+ DBG_BLOCK_ID_TA12 = 0xb2,
+ DBG_BLOCK_ID_TA13 = 0xb3,
+ DBG_BLOCK_ID_TA14 = 0xb4,
+ DBG_BLOCK_ID_TA15 = 0xb5,
+ DBG_BLOCK_ID_TA16 = 0xb6,
+ DBG_BLOCK_ID_TA17 = 0xb7,
+ DBG_BLOCK_ID_TA18 = 0xb8,
+ DBG_BLOCK_ID_TA19 = 0xb9,
+ DBG_BLOCK_ID_TA1A = 0xba,
+ DBG_BLOCK_ID_TA1B = 0xbb,
+ DBG_BLOCK_ID_UNUSED39 = 0xbc,
+ DBG_BLOCK_ID_UNUSED40 = 0xbd,
+ DBG_BLOCK_ID_UNUSED41 = 0xbe,
+ DBG_BLOCK_ID_UNUSED42 = 0xbf,
+ DBG_BLOCK_ID_TD00 = 0xc0,
+ DBG_BLOCK_ID_TD01 = 0xc1,
+ DBG_BLOCK_ID_TD02 = 0xc2,
+ DBG_BLOCK_ID_TD03 = 0xc3,
+ DBG_BLOCK_ID_TD04 = 0xc4,
+ DBG_BLOCK_ID_TD05 = 0xc5,
+ DBG_BLOCK_ID_TD06 = 0xc6,
+ DBG_BLOCK_ID_TD07 = 0xc7,
+ DBG_BLOCK_ID_TD08 = 0xc8,
+ DBG_BLOCK_ID_TD09 = 0xc9,
+ DBG_BLOCK_ID_TD0A = 0xca,
+ DBG_BLOCK_ID_TD0B = 0xcb,
+ DBG_BLOCK_ID_UNUSED43 = 0xcc,
+ DBG_BLOCK_ID_UNUSED44 = 0xcd,
+ DBG_BLOCK_ID_UNUSED45 = 0xce,
+ DBG_BLOCK_ID_UNUSED46 = 0xcf,
+ DBG_BLOCK_ID_TD10 = 0xd0,
+ DBG_BLOCK_ID_TD11 = 0xd1,
+ DBG_BLOCK_ID_TD12 = 0xd2,
+ DBG_BLOCK_ID_TD13 = 0xd3,
+ DBG_BLOCK_ID_TD14 = 0xd4,
+ DBG_BLOCK_ID_TD15 = 0xd5,
+ DBG_BLOCK_ID_TD16 = 0xd6,
+ DBG_BLOCK_ID_TD17 = 0xd7,
+ DBG_BLOCK_ID_TD18 = 0xd8,
+ DBG_BLOCK_ID_TD19 = 0xd9,
+ DBG_BLOCK_ID_TD1A = 0xda,
+ DBG_BLOCK_ID_TD1B = 0xdb,
+ DBG_BLOCK_ID_UNUSED47 = 0xdc,
+ DBG_BLOCK_ID_UNUSED48 = 0xdd,
+ DBG_BLOCK_ID_UNUSED49 = 0xde,
+ DBG_BLOCK_ID_UNUSED50 = 0xdf,
+ DBG_BLOCK_ID_MCD0 = 0xe0,
+ DBG_BLOCK_ID_MCD1 = 0xe1,
+ DBG_BLOCK_ID_MCD2 = 0xe2,
+ DBG_BLOCK_ID_MCD3 = 0xe3,
+ DBG_BLOCK_ID_MCD4 = 0xe4,
+ DBG_BLOCK_ID_MCD5 = 0xe5,
+ DBG_BLOCK_ID_UNUSED51 = 0xe6,
+ DBG_BLOCK_ID_UNUSED52 = 0xe7,
+} DebugBlockId_OLD;
+typedef enum DebugBlockId_BY2 {
+ DBG_BLOCK_ID_RESERVED_BY2 = 0x0,
+ DBG_BLOCK_ID_VMC_BY2 = 0x1,
+ DBG_BLOCK_ID_CG_BY2 = 0x2,
+ DBG_BLOCK_ID_GRBM_BY2 = 0x3,
+ DBG_BLOCK_ID_CSC_BY2 = 0x4,
+ DBG_BLOCK_ID_IH_BY2 = 0x5,
+ DBG_BLOCK_ID_SQ_BY2 = 0x6,
+ DBG_BLOCK_ID_GMCON_BY2 = 0x7,
+ DBG_BLOCK_ID_DMA0_BY2 = 0x8,
+ DBG_BLOCK_ID_SPIM_BY2 = 0x9,
+ DBG_BLOCK_ID_SPIS_BY2 = 0xa,
+ DBG_BLOCK_ID_PA0_BY2 = 0xb,
+ DBG_BLOCK_ID_CP0_BY2 = 0xc,
+ DBG_BLOCK_ID_CP2_BY2 = 0xd,
+ DBG_BLOCK_ID_UVDU_BY2 = 0xe,
+ DBG_BLOCK_ID_VCE_BY2 = 0xf,
+ DBG_BLOCK_ID_VGT0_BY2 = 0x10,
+ DBG_BLOCK_ID_IA_BY2 = 0x11,
+ DBG_BLOCK_ID_SCT0_BY2 = 0x12,
+ DBG_BLOCK_ID_SPM0_BY2 = 0x13,
+ DBG_BLOCK_ID_TCAA_BY2 = 0x14,
+ DBG_BLOCK_ID_TCCA_BY2 = 0x15,
+ DBG_BLOCK_ID_MCC0_BY2 = 0x16,
+ DBG_BLOCK_ID_MCC2_BY2 = 0x17,
+ DBG_BLOCK_ID_SX0_BY2 = 0x18,
+ DBG_BLOCK_ID_SX2_BY2 = 0x19,
+ DBG_BLOCK_ID_UNUSED4_BY2 = 0x1a,
+ DBG_BLOCK_ID_UNUSED6_BY2 = 0x1b,
+ DBG_BLOCK_ID_PC0_BY2 = 0x1c,
+ DBG_BLOCK_ID_UNUSED8_BY2 = 0x1d,
+ DBG_BLOCK_ID_UNUSED10_BY2 = 0x1e,
+ DBG_BLOCK_ID_MCB_BY2 = 0x1f,
+ DBG_BLOCK_ID_SCB0_BY2 = 0x20,
+ DBG_BLOCK_ID_UNUSED13_BY2 = 0x21,
+ DBG_BLOCK_ID_SCF0_BY2 = 0x22,
+ DBG_BLOCK_ID_UNUSED15_BY2 = 0x23,
+ DBG_BLOCK_ID_BCI0_BY2 = 0x24,
+ DBG_BLOCK_ID_BCI2_BY2 = 0x25,
+ DBG_BLOCK_ID_UNUSED17_BY2 = 0x26,
+ DBG_BLOCK_ID_UNUSED19_BY2 = 0x27,
+ DBG_BLOCK_ID_CB00_BY2 = 0x28,
+ DBG_BLOCK_ID_CB02_BY2 = 0x29,
+ DBG_BLOCK_ID_CB04_BY2 = 0x2a,
+ DBG_BLOCK_ID_UNUSED22_BY2 = 0x2b,
+ DBG_BLOCK_ID_CB10_BY2 = 0x2c,
+ DBG_BLOCK_ID_CB12_BY2 = 0x2d,
+ DBG_BLOCK_ID_CB14_BY2 = 0x2e,
+ DBG_BLOCK_ID_UNUSED25_BY2 = 0x2f,
+ DBG_BLOCK_ID_TCP0_BY2 = 0x30,
+ DBG_BLOCK_ID_TCP2_BY2 = 0x31,
+ DBG_BLOCK_ID_TCP4_BY2 = 0x32,
+ DBG_BLOCK_ID_TCP6_BY2 = 0x33,
+ DBG_BLOCK_ID_TCP8_BY2 = 0x34,
+ DBG_BLOCK_ID_TCP10_BY2 = 0x35,
+ DBG_BLOCK_ID_TCP12_BY2 = 0x36,
+ DBG_BLOCK_ID_TCP14_BY2 = 0x37,
+ DBG_BLOCK_ID_TCP16_BY2 = 0x38,
+ DBG_BLOCK_ID_TCP18_BY2 = 0x39,
+ DBG_BLOCK_ID_TCP20_BY2 = 0x3a,
+ DBG_BLOCK_ID_TCP22_BY2 = 0x3b,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c,
+ DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d,
+ DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e,
+ DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f,
+ DBG_BLOCK_ID_DB00_BY2 = 0x40,
+ DBG_BLOCK_ID_DB02_BY2 = 0x41,
+ DBG_BLOCK_ID_DB04_BY2 = 0x42,
+ DBG_BLOCK_ID_UNUSED28_BY2 = 0x43,
+ DBG_BLOCK_ID_DB10_BY2 = 0x44,
+ DBG_BLOCK_ID_DB12_BY2 = 0x45,
+ DBG_BLOCK_ID_DB14_BY2 = 0x46,
+ DBG_BLOCK_ID_UNUSED31_BY2 = 0x47,
+ DBG_BLOCK_ID_TCC0_BY2 = 0x48,
+ DBG_BLOCK_ID_TCC2_BY2 = 0x49,
+ DBG_BLOCK_ID_TCC4_BY2 = 0x4a,
+ DBG_BLOCK_ID_TCC6_BY2 = 0x4b,
+ DBG_BLOCK_ID_SPS00_BY2 = 0x4c,
+ DBG_BLOCK_ID_SPS02_BY2 = 0x4d,
+ DBG_BLOCK_ID_SPS11_BY2 = 0x4e,
+ DBG_BLOCK_ID_UNUSED33_BY2 = 0x4f,
+ DBG_BLOCK_ID_TA00_BY2 = 0x50,
+ DBG_BLOCK_ID_TA02_BY2 = 0x51,
+ DBG_BLOCK_ID_TA04_BY2 = 0x52,
+ DBG_BLOCK_ID_TA06_BY2 = 0x53,
+ DBG_BLOCK_ID_TA08_BY2 = 0x54,
+ DBG_BLOCK_ID_TA0A_BY2 = 0x55,
+ DBG_BLOCK_ID_UNUSED35_BY2 = 0x56,
+ DBG_BLOCK_ID_UNUSED37_BY2 = 0x57,
+ DBG_BLOCK_ID_TA10_BY2 = 0x58,
+ DBG_BLOCK_ID_TA12_BY2 = 0x59,
+ DBG_BLOCK_ID_TA14_BY2 = 0x5a,
+ DBG_BLOCK_ID_TA16_BY2 = 0x5b,
+ DBG_BLOCK_ID_TA18_BY2 = 0x5c,
+ DBG_BLOCK_ID_TA1A_BY2 = 0x5d,
+ DBG_BLOCK_ID_UNUSED39_BY2 = 0x5e,
+ DBG_BLOCK_ID_UNUSED41_BY2 = 0x5f,
+ DBG_BLOCK_ID_TD00_BY2 = 0x60,
+ DBG_BLOCK_ID_TD02_BY2 = 0x61,
+ DBG_BLOCK_ID_TD04_BY2 = 0x62,
+ DBG_BLOCK_ID_TD06_BY2 = 0x63,
+ DBG_BLOCK_ID_TD08_BY2 = 0x64,
+ DBG_BLOCK_ID_TD0A_BY2 = 0x65,
+ DBG_BLOCK_ID_UNUSED43_BY2 = 0x66,
+ DBG_BLOCK_ID_UNUSED45_BY2 = 0x67,
+ DBG_BLOCK_ID_TD10_BY2 = 0x68,
+ DBG_BLOCK_ID_TD12_BY2 = 0x69,
+ DBG_BLOCK_ID_TD14_BY2 = 0x6a,
+ DBG_BLOCK_ID_TD16_BY2 = 0x6b,
+ DBG_BLOCK_ID_TD18_BY2 = 0x6c,
+ DBG_BLOCK_ID_TD1A_BY2 = 0x6d,
+ DBG_BLOCK_ID_UNUSED47_BY2 = 0x6e,
+ DBG_BLOCK_ID_UNUSED49_BY2 = 0x6f,
+ DBG_BLOCK_ID_MCD0_BY2 = 0x70,
+ DBG_BLOCK_ID_MCD2_BY2 = 0x71,
+ DBG_BLOCK_ID_MCD4_BY2 = 0x72,
+ DBG_BLOCK_ID_UNUSED51_BY2 = 0x73,
+} DebugBlockId_BY2;
+typedef enum DebugBlockId_BY4 {
+ DBG_BLOCK_ID_RESERVED_BY4 = 0x0,
+ DBG_BLOCK_ID_CG_BY4 = 0x1,
+ DBG_BLOCK_ID_CSC_BY4 = 0x2,
+ DBG_BLOCK_ID_SQ_BY4 = 0x3,
+ DBG_BLOCK_ID_DMA0_BY4 = 0x4,
+ DBG_BLOCK_ID_SPIS_BY4 = 0x5,
+ DBG_BLOCK_ID_CP0_BY4 = 0x6,
+ DBG_BLOCK_ID_UVDU_BY4 = 0x7,
+ DBG_BLOCK_ID_VGT0_BY4 = 0x8,
+ DBG_BLOCK_ID_SCT0_BY4 = 0x9,
+ DBG_BLOCK_ID_TCAA_BY4 = 0xa,
+ DBG_BLOCK_ID_MCC0_BY4 = 0xb,
+ DBG_BLOCK_ID_SX0_BY4 = 0xc,
+ DBG_BLOCK_ID_UNUSED4_BY4 = 0xd,
+ DBG_BLOCK_ID_PC0_BY4 = 0xe,
+ DBG_BLOCK_ID_UNUSED10_BY4 = 0xf,
+ DBG_BLOCK_ID_SCB0_BY4 = 0x10,
+ DBG_BLOCK_ID_SCF0_BY4 = 0x11,
+ DBG_BLOCK_ID_BCI0_BY4 = 0x12,
+ DBG_BLOCK_ID_UNUSED17_BY4 = 0x13,
+ DBG_BLOCK_ID_CB00_BY4 = 0x14,
+ DBG_BLOCK_ID_CB04_BY4 = 0x15,
+ DBG_BLOCK_ID_CB10_BY4 = 0x16,
+ DBG_BLOCK_ID_CB14_BY4 = 0x17,
+ DBG_BLOCK_ID_TCP0_BY4 = 0x18,
+ DBG_BLOCK_ID_TCP4_BY4 = 0x19,
+ DBG_BLOCK_ID_TCP8_BY4 = 0x1a,
+ DBG_BLOCK_ID_TCP12_BY4 = 0x1b,
+ DBG_BLOCK_ID_TCP16_BY4 = 0x1c,
+ DBG_BLOCK_ID_TCP20_BY4 = 0x1d,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e,
+ DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f,
+ DBG_BLOCK_ID_DB_BY4 = 0x20,
+ DBG_BLOCK_ID_DB04_BY4 = 0x21,
+ DBG_BLOCK_ID_DB10_BY4 = 0x22,
+ DBG_BLOCK_ID_DB14_BY4 = 0x23,
+ DBG_BLOCK_ID_TCC0_BY4 = 0x24,
+ DBG_BLOCK_ID_TCC4_BY4 = 0x25,
+ DBG_BLOCK_ID_SPS00_BY4 = 0x26,
+ DBG_BLOCK_ID_SPS11_BY4 = 0x27,
+ DBG_BLOCK_ID_TA00_BY4 = 0x28,
+ DBG_BLOCK_ID_TA04_BY4 = 0x29,
+ DBG_BLOCK_ID_TA08_BY4 = 0x2a,
+ DBG_BLOCK_ID_UNUSED35_BY4 = 0x2b,
+ DBG_BLOCK_ID_TA10_BY4 = 0x2c,
+ DBG_BLOCK_ID_TA14_BY4 = 0x2d,
+ DBG_BLOCK_ID_TA18_BY4 = 0x2e,
+ DBG_BLOCK_ID_UNUSED39_BY4 = 0x2f,
+ DBG_BLOCK_ID_TD00_BY4 = 0x30,
+ DBG_BLOCK_ID_TD04_BY4 = 0x31,
+ DBG_BLOCK_ID_TD08_BY4 = 0x32,
+ DBG_BLOCK_ID_UNUSED43_BY4 = 0x33,
+ DBG_BLOCK_ID_TD10_BY4 = 0x34,
+ DBG_BLOCK_ID_TD14_BY4 = 0x35,
+ DBG_BLOCK_ID_TD18_BY4 = 0x36,
+ DBG_BLOCK_ID_UNUSED47_BY4 = 0x37,
+ DBG_BLOCK_ID_MCD0_BY4 = 0x38,
+ DBG_BLOCK_ID_MCD4_BY4 = 0x39,
+} DebugBlockId_BY4;
+typedef enum DebugBlockId_BY8 {
+ DBG_BLOCK_ID_RESERVED_BY8 = 0x0,
+ DBG_BLOCK_ID_CSC_BY8 = 0x1,
+ DBG_BLOCK_ID_DMA0_BY8 = 0x2,
+ DBG_BLOCK_ID_CP0_BY8 = 0x3,
+ DBG_BLOCK_ID_VGT0_BY8 = 0x4,
+ DBG_BLOCK_ID_TCAA_BY8 = 0x5,
+ DBG_BLOCK_ID_SX0_BY8 = 0x6,
+ DBG_BLOCK_ID_PC0_BY8 = 0x7,
+ DBG_BLOCK_ID_SCB0_BY8 = 0x8,
+ DBG_BLOCK_ID_BCI0_BY8 = 0x9,
+ DBG_BLOCK_ID_CB00_BY8 = 0xa,
+ DBG_BLOCK_ID_CB10_BY8 = 0xb,
+ DBG_BLOCK_ID_TCP0_BY8 = 0xc,
+ DBG_BLOCK_ID_TCP8_BY8 = 0xd,
+ DBG_BLOCK_ID_TCP16_BY8 = 0xe,
+ DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf,
+ DBG_BLOCK_ID_DB00_BY8 = 0x10,
+ DBG_BLOCK_ID_DB10_BY8 = 0x11,
+ DBG_BLOCK_ID_TCC0_BY8 = 0x12,
+ DBG_BLOCK_ID_SPS00_BY8 = 0x13,
+ DBG_BLOCK_ID_TA00_BY8 = 0x14,
+ DBG_BLOCK_ID_TA08_BY8 = 0x15,
+ DBG_BLOCK_ID_TA10_BY8 = 0x16,
+ DBG_BLOCK_ID_TA18_BY8 = 0x17,
+ DBG_BLOCK_ID_TD00_BY8 = 0x18,
+ DBG_BLOCK_ID_TD08_BY8 = 0x19,
+ DBG_BLOCK_ID_TD10_BY8 = 0x1a,
+ DBG_BLOCK_ID_TD18_BY8 = 0x1b,
+ DBG_BLOCK_ID_MCD0_BY8 = 0x1c,
+} DebugBlockId_BY8;
+typedef enum DebugBlockId_BY16 {
+ DBG_BLOCK_ID_RESERVED_BY16 = 0x0,
+ DBG_BLOCK_ID_DMA0_BY16 = 0x1,
+ DBG_BLOCK_ID_VGT0_BY16 = 0x2,
+ DBG_BLOCK_ID_SX0_BY16 = 0x3,
+ DBG_BLOCK_ID_SCB0_BY16 = 0x4,
+ DBG_BLOCK_ID_CB00_BY16 = 0x5,
+ DBG_BLOCK_ID_TCP0_BY16 = 0x6,
+ DBG_BLOCK_ID_TCP16_BY16 = 0x7,
+ DBG_BLOCK_ID_DB00_BY16 = 0x8,
+ DBG_BLOCK_ID_TCC0_BY16 = 0x9,
+ DBG_BLOCK_ID_TA00_BY16 = 0xa,
+ DBG_BLOCK_ID_TA10_BY16 = 0xb,
+ DBG_BLOCK_ID_TD00_BY16 = 0xc,
+ DBG_BLOCK_ID_TD10_BY16 = 0xd,
+ DBG_BLOCK_ID_MCD0_BY16 = 0xe,
+} DebugBlockId_BY16;
+typedef enum ColorTransform {
+ DCC_CT_AUTO = 0x0,
+ DCC_CT_NONE = 0x1,
+ ABGR_TO_A_BG_G_RB = 0x2,
+ BGRA_TO_BG_G_RB_A = 0x3,
+} ColorTransform;
+typedef enum CompareRef {
+ REF_NEVER = 0x0,
+ REF_LESS = 0x1,
+ REF_EQUAL = 0x2,
+ REF_LEQUAL = 0x3,
+ REF_GREATER = 0x4,
+ REF_NOTEQUAL = 0x5,
+ REF_GEQUAL = 0x6,
+ REF_ALWAYS = 0x7,
+} CompareRef;
+typedef enum ReadSize {
+ READ_256_BITS = 0x0,
+ READ_512_BITS = 0x1,
+} ReadSize;
+typedef enum DepthFormat {
+ DEPTH_INVALID = 0x0,
+ DEPTH_16 = 0x1,
+ DEPTH_X8_24 = 0x2,
+ DEPTH_8_24 = 0x3,
+ DEPTH_X8_24_FLOAT = 0x4,
+ DEPTH_8_24_FLOAT = 0x5,
+ DEPTH_32_FLOAT = 0x6,
+ DEPTH_X24_8_32_FLOAT = 0x7,
+} DepthFormat;
+typedef enum ZFormat {
+ Z_INVALID = 0x0,
+ Z_16 = 0x1,
+ Z_24 = 0x2,
+ Z_32_FLOAT = 0x3,
+} ZFormat;
+typedef enum StencilFormat {
+ STENCIL_INVALID = 0x0,
+ STENCIL_8 = 0x1,
+} StencilFormat;
+typedef enum CmaskMode {
+ CMASK_CLEAR_NONE = 0x0,
+ CMASK_CLEAR_ONE = 0x1,
+ CMASK_CLEAR_ALL = 0x2,
+ CMASK_ANY_EXPANDED = 0x3,
+ CMASK_ALPHA0_FRAG1 = 0x4,
+ CMASK_ALPHA0_FRAG2 = 0x5,
+ CMASK_ALPHA0_FRAG4 = 0x6,
+ CMASK_ALPHA0_FRAGS = 0x7,
+ CMASK_ALPHA1_FRAG1 = 0x8,
+ CMASK_ALPHA1_FRAG2 = 0x9,
+ CMASK_ALPHA1_FRAG4 = 0xa,
+ CMASK_ALPHA1_FRAGS = 0xb,
+ CMASK_ALPHAX_FRAG1 = 0xc,
+ CMASK_ALPHAX_FRAG2 = 0xd,
+ CMASK_ALPHAX_FRAG4 = 0xe,
+ CMASK_ALPHAX_FRAGS = 0xf,
+} CmaskMode;
+typedef enum QuadExportFormat {
+ EXPORT_UNUSED = 0x0,
+ EXPORT_32_R = 0x1,
+ EXPORT_32_GR = 0x2,
+ EXPORT_32_AR = 0x3,
+ EXPORT_FP16_ABGR = 0x4,
+ EXPORT_UNSIGNED16_ABGR = 0x5,
+ EXPORT_SIGNED16_ABGR = 0x6,
+ EXPORT_32_ABGR = 0x7,
+} QuadExportFormat;
+typedef enum QuadExportFormatOld {
+ EXPORT_4P_32BPC_ABGR = 0x0,
+ EXPORT_4P_16BPC_ABGR = 0x1,
+ EXPORT_4P_32BPC_GR = 0x2,
+ EXPORT_4P_32BPC_AR = 0x3,
+ EXPORT_2P_32BPC_ABGR = 0x4,
+ EXPORT_8P_32BPC_R = 0x5,
+} QuadExportFormatOld;
+typedef enum ColorFormat {
+ COLOR_INVALID = 0x0,
+ COLOR_8 = 0x1,
+ COLOR_16 = 0x2,
+ COLOR_8_8 = 0x3,
+ COLOR_32 = 0x4,
+ COLOR_16_16 = 0x5,
+ COLOR_10_11_11 = 0x6,
+ COLOR_11_11_10 = 0x7,
+ COLOR_10_10_10_2 = 0x8,
+ COLOR_2_10_10_10 = 0x9,
+ COLOR_8_8_8_8 = 0xa,
+ COLOR_32_32 = 0xb,
+ COLOR_16_16_16_16 = 0xc,
+ COLOR_RESERVED_13 = 0xd,
+ COLOR_32_32_32_32 = 0xe,
+ COLOR_RESERVED_15 = 0xf,
+ COLOR_5_6_5 = 0x10,
+ COLOR_1_5_5_5 = 0x11,
+ COLOR_5_5_5_1 = 0x12,
+ COLOR_4_4_4_4 = 0x13,
+ COLOR_8_24 = 0x14,
+ COLOR_24_8 = 0x15,
+ COLOR_X24_8_32_FLOAT = 0x16,
+ COLOR_RESERVED_23 = 0x17,
+} ColorFormat;
+typedef enum SurfaceFormat {
+ FMT_INVALID = 0x0,
+ FMT_8 = 0x1,
+ FMT_16 = 0x2,
+ FMT_8_8 = 0x3,
+ FMT_32 = 0x4,
+ FMT_16_16 = 0x5,
+ FMT_10_11_11 = 0x6,
+ FMT_11_11_10 = 0x7,
+ FMT_10_10_10_2 = 0x8,
+ FMT_2_10_10_10 = 0x9,
+ FMT_8_8_8_8 = 0xa,
+ FMT_32_32 = 0xb,
+ FMT_16_16_16_16 = 0xc,
+ FMT_32_32_32 = 0xd,
+ FMT_32_32_32_32 = 0xe,
+ FMT_RESERVED_4 = 0xf,
+ FMT_5_6_5 = 0x10,
+ FMT_1_5_5_5 = 0x11,
+ FMT_5_5_5_1 = 0x12,
+ FMT_4_4_4_4 = 0x13,
+ FMT_8_24 = 0x14,
+ FMT_24_8 = 0x15,
+ FMT_X24_8_32_FLOAT = 0x16,
+ FMT_RESERVED_33 = 0x17,
+ FMT_11_11_10_FLOAT = 0x18,
+ FMT_16_FLOAT = 0x19,
+ FMT_32_FLOAT = 0x1a,
+ FMT_16_16_FLOAT = 0x1b,
+ FMT_8_24_FLOAT = 0x1c,
+ FMT_24_8_FLOAT = 0x1d,
+ FMT_32_32_FLOAT = 0x1e,
+ FMT_10_11_11_FLOAT = 0x1f,
+ FMT_16_16_16_16_FLOAT = 0x20,
+ FMT_3_3_2 = 0x21,
+ FMT_6_5_5 = 0x22,
+ FMT_32_32_32_32_FLOAT = 0x23,
+ FMT_RESERVED_36 = 0x24,
+ FMT_1 = 0x25,
+ FMT_1_REVERSED = 0x26,
+ FMT_GB_GR = 0x27,
+ FMT_BG_RG = 0x28,
+ FMT_32_AS_8 = 0x29,
+ FMT_32_AS_8_8 = 0x2a,
+ FMT_5_9_9_9_SHAREDEXP = 0x2b,
+ FMT_8_8_8 = 0x2c,
+ FMT_16_16_16 = 0x2d,
+ FMT_16_16_16_FLOAT = 0x2e,
+ FMT_4_4 = 0x2f,
+ FMT_32_32_32_FLOAT = 0x30,
+ FMT_BC1 = 0x31,
+ FMT_BC2 = 0x32,
+ FMT_BC3 = 0x33,
+ FMT_BC4 = 0x34,
+ FMT_BC5 = 0x35,
+ FMT_BC6 = 0x36,
+ FMT_BC7 = 0x37,
+ FMT_32_AS_32_32_32_32 = 0x38,
+ FMT_APC3 = 0x39,
+ FMT_APC4 = 0x3a,
+ FMT_APC5 = 0x3b,
+ FMT_APC6 = 0x3c,
+ FMT_APC7 = 0x3d,
+ FMT_CTX1 = 0x3e,
+ FMT_RESERVED_63 = 0x3f,
+} SurfaceFormat;
+typedef enum BUF_DATA_FORMAT {
+ BUF_DATA_FORMAT_INVALID = 0x0,
+ BUF_DATA_FORMAT_8 = 0x1,
+ BUF_DATA_FORMAT_16 = 0x2,
+ BUF_DATA_FORMAT_8_8 = 0x3,
+ BUF_DATA_FORMAT_32 = 0x4,
+ BUF_DATA_FORMAT_16_16 = 0x5,
+ BUF_DATA_FORMAT_10_11_11 = 0x6,
+ BUF_DATA_FORMAT_11_11_10 = 0x7,
+ BUF_DATA_FORMAT_10_10_10_2 = 0x8,
+ BUF_DATA_FORMAT_2_10_10_10 = 0x9,
+ BUF_DATA_FORMAT_8_8_8_8 = 0xa,
+ BUF_DATA_FORMAT_32_32 = 0xb,
+ BUF_DATA_FORMAT_16_16_16_16 = 0xc,
+ BUF_DATA_FORMAT_32_32_32 = 0xd,
+ BUF_DATA_FORMAT_32_32_32_32 = 0xe,
+ BUF_DATA_FORMAT_RESERVED_15 = 0xf,
+} BUF_DATA_FORMAT;
+typedef enum IMG_DATA_FORMAT {
+ IMG_DATA_FORMAT_INVALID = 0x0,
+ IMG_DATA_FORMAT_8 = 0x1,
+ IMG_DATA_FORMAT_16 = 0x2,
+ IMG_DATA_FORMAT_8_8 = 0x3,
+ IMG_DATA_FORMAT_32 = 0x4,
+ IMG_DATA_FORMAT_16_16 = 0x5,
+ IMG_DATA_FORMAT_10_11_11 = 0x6,
+ IMG_DATA_FORMAT_11_11_10 = 0x7,
+ IMG_DATA_FORMAT_10_10_10_2 = 0x8,
+ IMG_DATA_FORMAT_2_10_10_10 = 0x9,
+ IMG_DATA_FORMAT_8_8_8_8 = 0xa,
+ IMG_DATA_FORMAT_32_32 = 0xb,
+ IMG_DATA_FORMAT_16_16_16_16 = 0xc,
+ IMG_DATA_FORMAT_32_32_32 = 0xd,
+ IMG_DATA_FORMAT_32_32_32_32 = 0xe,
+ IMG_DATA_FORMAT_RESERVED_15 = 0xf,
+ IMG_DATA_FORMAT_5_6_5 = 0x10,
+ IMG_DATA_FORMAT_1_5_5_5 = 0x11,
+ IMG_DATA_FORMAT_5_5_5_1 = 0x12,
+ IMG_DATA_FORMAT_4_4_4_4 = 0x13,
+ IMG_DATA_FORMAT_8_24 = 0x14,
+ IMG_DATA_FORMAT_24_8 = 0x15,
+ IMG_DATA_FORMAT_X24_8_32 = 0x16,
+ IMG_DATA_FORMAT_RESERVED_23 = 0x17,
+ IMG_DATA_FORMAT_RESERVED_24 = 0x18,
+ IMG_DATA_FORMAT_RESERVED_25 = 0x19,
+ IMG_DATA_FORMAT_RESERVED_26 = 0x1a,
+ IMG_DATA_FORMAT_RESERVED_27 = 0x1b,
+ IMG_DATA_FORMAT_RESERVED_28 = 0x1c,
+ IMG_DATA_FORMAT_RESERVED_29 = 0x1d,
+ IMG_DATA_FORMAT_RESERVED_30 = 0x1e,
+ IMG_DATA_FORMAT_RESERVED_31 = 0x1f,
+ IMG_DATA_FORMAT_GB_GR = 0x20,
+ IMG_DATA_FORMAT_BG_RG = 0x21,
+ IMG_DATA_FORMAT_5_9_9_9 = 0x22,
+ IMG_DATA_FORMAT_BC1 = 0x23,
+ IMG_DATA_FORMAT_BC2 = 0x24,
+ IMG_DATA_FORMAT_BC3 = 0x25,
+ IMG_DATA_FORMAT_BC4 = 0x26,
+ IMG_DATA_FORMAT_BC5 = 0x27,
+ IMG_DATA_FORMAT_BC6 = 0x28,
+ IMG_DATA_FORMAT_BC7 = 0x29,
+ IMG_DATA_FORMAT_RESERVED_42 = 0x2a,
+ IMG_DATA_FORMAT_RESERVED_43 = 0x2b,
+ IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c,
+ IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d,
+ IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e,
+ IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f,
+ IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30,
+ IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31,
+ IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32,
+ IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33,
+ IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34,
+ IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35,
+ IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36,
+ IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37,
+ IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38,
+ IMG_DATA_FORMAT_4_4 = 0x39,
+ IMG_DATA_FORMAT_6_5_5 = 0x3a,
+ IMG_DATA_FORMAT_1 = 0x3b,
+ IMG_DATA_FORMAT_1_REVERSED = 0x3c,
+ IMG_DATA_FORMAT_32_AS_8 = 0x3d,
+ IMG_DATA_FORMAT_32_AS_8_8 = 0x3e,
+ IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f,
+} IMG_DATA_FORMAT;
+typedef enum BUF_NUM_FORMAT {
+ BUF_NUM_FORMAT_UNORM = 0x0,
+ BUF_NUM_FORMAT_SNORM = 0x1,
+ BUF_NUM_FORMAT_USCALED = 0x2,
+ BUF_NUM_FORMAT_SSCALED = 0x3,
+ BUF_NUM_FORMAT_UINT = 0x4,
+ BUF_NUM_FORMAT_SINT = 0x5,
+ BUF_NUM_FORMAT_RESERVED_6 = 0x6,
+ BUF_NUM_FORMAT_FLOAT = 0x7,
+} BUF_NUM_FORMAT;
+typedef enum IMG_NUM_FORMAT {
+ IMG_NUM_FORMAT_UNORM = 0x0,
+ IMG_NUM_FORMAT_SNORM = 0x1,
+ IMG_NUM_FORMAT_USCALED = 0x2,
+ IMG_NUM_FORMAT_SSCALED = 0x3,
+ IMG_NUM_FORMAT_UINT = 0x4,
+ IMG_NUM_FORMAT_SINT = 0x5,
+ IMG_NUM_FORMAT_RESERVED_6 = 0x6,
+ IMG_NUM_FORMAT_FLOAT = 0x7,
+ IMG_NUM_FORMAT_RESERVED_8 = 0x8,
+ IMG_NUM_FORMAT_SRGB = 0x9,
+ IMG_NUM_FORMAT_RESERVED_10 = 0xa,
+ IMG_NUM_FORMAT_RESERVED_11 = 0xb,
+ IMG_NUM_FORMAT_RESERVED_12 = 0xc,
+ IMG_NUM_FORMAT_RESERVED_13 = 0xd,
+ IMG_NUM_FORMAT_RESERVED_14 = 0xe,
+ IMG_NUM_FORMAT_RESERVED_15 = 0xf,
+} IMG_NUM_FORMAT;
+typedef enum TileType {
+ ARRAY_COLOR_TILE = 0x0,
+ ARRAY_DEPTH_TILE = 0x1,
+} TileType;
+typedef enum NonDispTilingOrder {
+ ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
+ ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
+} NonDispTilingOrder;
+typedef enum MicroTileMode {
+ ADDR_SURF_DISPLAY_MICRO_TILING = 0x0,
+ ADDR_SURF_THIN_MICRO_TILING = 0x1,
+ ADDR_SURF_DEPTH_MICRO_TILING = 0x2,
+ ADDR_SURF_ROTATED_MICRO_TILING = 0x3,
+ ADDR_SURF_THICK_MICRO_TILING = 0x4,
+} MicroTileMode;
+typedef enum TileSplit {
+ ADDR_SURF_TILE_SPLIT_64B = 0x0,
+ ADDR_SURF_TILE_SPLIT_128B = 0x1,
+ ADDR_SURF_TILE_SPLIT_256B = 0x2,
+ ADDR_SURF_TILE_SPLIT_512B = 0x3,
+ ADDR_SURF_TILE_SPLIT_1KB = 0x4,
+ ADDR_SURF_TILE_SPLIT_2KB = 0x5,
+ ADDR_SURF_TILE_SPLIT_4KB = 0x6,
+} TileSplit;
+typedef enum SampleSplit {
+ ADDR_SURF_SAMPLE_SPLIT_1 = 0x0,
+ ADDR_SURF_SAMPLE_SPLIT_2 = 0x1,
+ ADDR_SURF_SAMPLE_SPLIT_4 = 0x2,
+ ADDR_SURF_SAMPLE_SPLIT_8 = 0x3,
+} SampleSplit;
+typedef enum PipeConfig {
+ ADDR_SURF_P2 = 0x0,
+ ADDR_SURF_P2_RESERVED0 = 0x1,
+ ADDR_SURF_P2_RESERVED1 = 0x2,
+ ADDR_SURF_P2_RESERVED2 = 0x3,
+ ADDR_SURF_P4_8x16 = 0x4,
+ ADDR_SURF_P4_16x16 = 0x5,
+ ADDR_SURF_P4_16x32 = 0x6,
+ ADDR_SURF_P4_32x32 = 0x7,
+ ADDR_SURF_P8_16x16_8x16 = 0x8,
+ ADDR_SURF_P8_16x32_8x16 = 0x9,
+ ADDR_SURF_P8_32x32_8x16 = 0xa,
+ ADDR_SURF_P8_16x32_16x16 = 0xb,
+ ADDR_SURF_P8_32x32_16x16 = 0xc,
+ ADDR_SURF_P8_32x32_16x32 = 0xd,
+ ADDR_SURF_P8_32x64_32x32 = 0xe,
+ ADDR_SURF_P8_RESERVED0 = 0xf,
+ ADDR_SURF_P16_32x32_8x16 = 0x10,
+ ADDR_SURF_P16_32x32_16x16 = 0x11,
+} PipeConfig;
+typedef enum NumBanks {
+ ADDR_SURF_2_BANK = 0x0,
+ ADDR_SURF_4_BANK = 0x1,
+ ADDR_SURF_8_BANK = 0x2,
+ ADDR_SURF_16_BANK = 0x3,
+} NumBanks;
+typedef enum BankWidth {
+ ADDR_SURF_BANK_WIDTH_1 = 0x0,
+ ADDR_SURF_BANK_WIDTH_2 = 0x1,
+ ADDR_SURF_BANK_WIDTH_4 = 0x2,
+ ADDR_SURF_BANK_WIDTH_8 = 0x3,
+} BankWidth;
+typedef enum BankHeight {
+ ADDR_SURF_BANK_HEIGHT_1 = 0x0,
+ ADDR_SURF_BANK_HEIGHT_2 = 0x1,
+ ADDR_SURF_BANK_HEIGHT_4 = 0x2,
+ ADDR_SURF_BANK_HEIGHT_8 = 0x3,
+} BankHeight;
+typedef enum BankWidthHeight {
+ ADDR_SURF_BANK_WH_1 = 0x0,
+ ADDR_SURF_BANK_WH_2 = 0x1,
+ ADDR_SURF_BANK_WH_4 = 0x2,
+ ADDR_SURF_BANK_WH_8 = 0x3,
+} BankWidthHeight;
+typedef enum MacroTileAspect {
+ ADDR_SURF_MACRO_ASPECT_1 = 0x0,
+ ADDR_SURF_MACRO_ASPECT_2 = 0x1,
+ ADDR_SURF_MACRO_ASPECT_4 = 0x2,
+ ADDR_SURF_MACRO_ASPECT_8 = 0x3,
+} MacroTileAspect;
+typedef enum GATCL1RequestType {
+ GATCL1_TYPE_NORMAL = 0x0,
+ GATCL1_TYPE_SHOOTDOWN = 0x1,
+ GATCL1_TYPE_BYPASS = 0x2,
+} GATCL1RequestType;
+typedef enum TCC_CACHE_POLICIES {
+ TCC_CACHE_POLICY_LRU = 0x0,
+ TCC_CACHE_POLICY_STREAM = 0x1,
+} TCC_CACHE_POLICIES;
+typedef enum MTYPE {
+ MTYPE_NC_NV = 0x0,
+ MTYPE_NC = 0x1,
+ MTYPE_CC = 0x2,
+ MTYPE_UC = 0x3,
+} MTYPE;
+typedef enum PERFMON_COUNTER_MODE {
+ PERFMON_COUNTER_MODE_ACCUM = 0x0,
+ PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1,
+ PERFMON_COUNTER_MODE_MAX = 0x2,
+ PERFMON_COUNTER_MODE_DIRTY = 0x3,
+ PERFMON_COUNTER_MODE_SAMPLE = 0x4,
+ PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5,
+ PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6,
+ PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7,
+ PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8,
+ PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9,
+ PERFMON_COUNTER_MODE_RESERVED = 0xf,
+} PERFMON_COUNTER_MODE;
+typedef enum PERFMON_SPM_MODE {
+ PERFMON_SPM_MODE_OFF = 0x0,
+ PERFMON_SPM_MODE_16BIT_CLAMP = 0x1,
+ PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2,
+ PERFMON_SPM_MODE_32BIT_CLAMP = 0x3,
+ PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4,
+ PERFMON_SPM_MODE_RESERVED_5 = 0x5,
+ PERFMON_SPM_MODE_RESERVED_6 = 0x6,
+ PERFMON_SPM_MODE_RESERVED_7 = 0x7,
+ PERFMON_SPM_MODE_TEST_MODE_0 = 0x8,
+ PERFMON_SPM_MODE_TEST_MODE_1 = 0x9,
+ PERFMON_SPM_MODE_TEST_MODE_2 = 0xa,
+} PERFMON_SPM_MODE;
+typedef enum SurfaceTiling {
+ ARRAY_LINEAR = 0x0,
+ ARRAY_TILED = 0x1,
+} SurfaceTiling;
+typedef enum SurfaceArray {
+ ARRAY_1D = 0x0,
+ ARRAY_2D = 0x1,
+ ARRAY_3D = 0x2,
+ ARRAY_3D_SLICE = 0x3,
+} SurfaceArray;
+typedef enum ColorArray {
+ ARRAY_2D_ALT_COLOR = 0x0,
+ ARRAY_2D_COLOR = 0x1,
+ ARRAY_3D_SLICE_COLOR = 0x3,
+} ColorArray;
+typedef enum DepthArray {
+ ARRAY_2D_ALT_DEPTH = 0x0,
+ ARRAY_2D_DEPTH = 0x1,
+} DepthArray;
+typedef enum ENUM_NUM_SIMD_PER_CU {
+ NUM_SIMD_PER_CU = 0x4,
+} ENUM_NUM_SIMD_PER_CU;
+typedef enum MEM_PWR_FORCE_CTRL {
+ NO_FORCE_REQUEST = 0x0,
+ FORCE_LIGHT_SLEEP_REQUEST = 0x1,
+ FORCE_DEEP_SLEEP_REQUEST = 0x2,
+ FORCE_SHUT_DOWN_REQUEST = 0x3,
+} MEM_PWR_FORCE_CTRL;
+typedef enum MEM_PWR_FORCE_CTRL2 {
+ NO_FORCE_REQ = 0x0,
+ FORCE_LIGHT_SLEEP_REQ = 0x1,
+} MEM_PWR_FORCE_CTRL2;
+typedef enum MEM_PWR_DIS_CTRL {
+ ENABLE_MEM_PWR_CTRL = 0x0,
+ DISABLE_MEM_PWR_CTRL = 0x1,
+} MEM_PWR_DIS_CTRL;
+typedef enum MEM_PWR_SEL_CTRL {
+ DYNAMIC_SHUT_DOWN_ENABLE = 0x0,
+ DYNAMIC_DEEP_SLEEP_ENABLE = 0x1,
+ DYNAMIC_LIGHT_SLEEP_ENABLE = 0x2,
+} MEM_PWR_SEL_CTRL;
+typedef enum MEM_PWR_SEL_CTRL2 {
+ DYNAMIC_DEEP_SLEEP_EN = 0x0,
+ DYNAMIC_LIGHT_SLEEP_EN = 0x1,
+} MEM_PWR_SEL_CTRL2;
+typedef enum HPD_INT_CONTROL_ACK {
+ HPD_INT_CONTROL_ACK_0 = 0x0,
+ HPD_INT_CONTROL_ACK_1 = 0x1,
+} HPD_INT_CONTROL_ACK;
+typedef enum HPD_INT_CONTROL_POLARITY {
+ HPD_INT_CONTROL_GEN_INT_ON_DISCON = 0x0,
+ HPD_INT_CONTROL_GEN_INT_ON_CON = 0x1,
+} HPD_INT_CONTROL_POLARITY;
+typedef enum HPD_INT_CONTROL_RX_INT_ACK {
+ HPD_INT_CONTROL_RX_INT_ACK_0 = 0x0,
+ HPD_INT_CONTROL_RX_INT_ACK_1 = 0x1,
+} HPD_INT_CONTROL_RX_INT_ACK;
+typedef enum DPDBG_EN {
+ DPDBG_DISABLE = 0x0,
+ DPDBG_ENABLE = 0x1,
+} DPDBG_EN;
+typedef enum DPDBG_INPUT_EN {
+ DPDBG_INPUT_DISABLE = 0x0,
+ DPDBG_INPUT_ENABLE = 0x1,
+} DPDBG_INPUT_EN;
+typedef enum DPDBG_ERROR_DETECTION_MODE {
+ DPDBG_ERROR_DETECTION_MODE_CSC = 0x0,
+ DPDBG_ERROR_DETECTION_MODE_RS_ENCODING = 0x1,
+} DPDBG_ERROR_DETECTION_MODE;
+typedef enum DPDBG_FIFO_OVERFLOW_INTERRUPT_MASK {
+ DPDBG_FIFO_OVERFLOW_INT_DISABLE = 0x0,
+ DPDBG_FIFO_OVERFLOW_INT_ENABLE = 0x1,
+} DPDBG_FIFO_OVERFLOW_INTERRUPT_MASK;
+typedef enum DPDBG_FIFO_OVERFLOW_INTERRUPT_TYPE {
+ DPDBG_FIFO_OVERFLOW_INT_LEVEL_BASED = 0x0,
+ DPDBG_FIFO_OVERFLOW_INT_PULSE_BASED = 0x1,
+} DPDBG_FIFO_OVERFLOW_INTERRUPT_TYPE;
+typedef enum DPDBG_FIFO_OVERFLOW_INTERRUPT_ACK {
+ DPDBG_FIFO_OVERFLOW_INT_NO_ACK = 0x0,
+ DPDBG_FIFO_OVERFLOW_INT_CLEAR = 0x1,
+} DPDBG_FIFO_OVERFLOW_INTERRUPT_ACK;
+typedef enum PM_ASSERT_RESET {
+ PM_ASSERT_RESET_0 = 0x0,
+ PM_ASSERT_RESET_1 = 0x1,
+} PM_ASSERT_RESET;
+typedef enum DAC_MUX_SELECT {
+ DAC_MUX_SELECT_DACA = 0x0,
+ DAC_MUX_SELECT_DACB = 0x1,
+} DAC_MUX_SELECT;
+typedef enum TMDS_DVO_MUX_SELECT {
+ TMDS_DVO_MUX_SELECT_B = 0x0,
+ TMDS_DVO_MUX_SELECT_G = 0x1,
+ TMDS_DVO_MUX_SELECT_R = 0x2,
+ TMDS_DVO_MUX_SELECT_RESERVED = 0x3,
+} TMDS_DVO_MUX_SELECT;
+typedef enum DACA_SOFT_RESET {
+ DACA_SOFT_RESET_0 = 0x0,
+ DACA_SOFT_RESET_1 = 0x1,
+} DACA_SOFT_RESET;
+typedef enum I2S0_SPDIF0_SOFT_RESET {
+ I2S0_SPDIF0_SOFT_RESET_0 = 0x0,
+ I2S0_SPDIF0_SOFT_RESET_1 = 0x1,
+} I2S0_SPDIF0_SOFT_RESET;
+typedef enum I2S1_SOFT_RESET {
+ I2S1_SOFT_RESET_0 = 0x0,
+ I2S1_SOFT_RESET_1 = 0x1,
+} I2S1_SOFT_RESET;
+typedef enum SPDIF1_SOFT_RESET {
+ SPDIF1_SOFT_RESET_0 = 0x0,
+ SPDIF1_SOFT_RESET_1 = 0x1,
+} SPDIF1_SOFT_RESET;
+typedef enum DB_CLK_SOFT_RESET {
+ DB_CLK_SOFT_RESET_0 = 0x0,
+ DB_CLK_SOFT_RESET_1 = 0x1,
+} DB_CLK_SOFT_RESET;
+typedef enum FMT0_SOFT_RESET {
+ FMT0_SOFT_RESET_0 = 0x0,
+ FMT0_SOFT_RESET_1 = 0x1,
+} FMT0_SOFT_RESET;
+typedef enum FMT1_SOFT_RESET {
+ FMT1_SOFT_RESET_0 = 0x0,
+ FMT1_SOFT_RESET_1 = 0x1,
+} FMT1_SOFT_RESET;
+typedef enum FMT2_SOFT_RESET {
+ FMT2_SOFT_RESET_0 = 0x0,
+ FMT2_SOFT_RESET_1 = 0x1,
+} FMT2_SOFT_RESET;
+typedef enum FMT3_SOFT_RESET {
+ FMT3_SOFT_RESET_0 = 0x0,
+ FMT3_SOFT_RESET_1 = 0x1,
+} FMT3_SOFT_RESET;
+typedef enum FMT4_SOFT_RESET {
+ FMT4_SOFT_RESET_0 = 0x0,
+ FMT4_SOFT_RESET_1 = 0x1,
+} FMT4_SOFT_RESET;
+typedef enum FMT5_SOFT_RESET {
+ FMT5_SOFT_RESET_0 = 0x0,
+ FMT5_SOFT_RESET_1 = 0x1,
+} FMT5_SOFT_RESET;
+typedef enum MVP_SOFT_RESET {
+ MVP_SOFT_RESET_0 = 0x0,
+ MVP_SOFT_RESET_1 = 0x1,
+} MVP_SOFT_RESET;
+typedef enum ABM_SOFT_RESET {
+ ABM_SOFT_RESET_0 = 0x0,
+ ABM_SOFT_RESET_1 = 0x1,
+} ABM_SOFT_RESET;
+typedef enum DVO_SOFT_RESET {
+ DVO_SOFT_RESET_0 = 0x0,
+ DVO_SOFT_RESET_1 = 0x1,
+} DVO_SOFT_RESET;
+typedef enum DIGA_FE_SOFT_RESET {
+ DIGA_FE_SOFT_RESET_0 = 0x0,
+ DIGA_FE_SOFT_RESET_1 = 0x1,
+} DIGA_FE_SOFT_RESET;
+typedef enum DIGA_BE_SOFT_RESET {
+ DIGA_BE_SOFT_RESET_0 = 0x0,
+ DIGA_BE_SOFT_RESET_1 = 0x1,
+} DIGA_BE_SOFT_RESET;
+typedef enum DIGB_FE_SOFT_RESET {
+ DIGB_FE_SOFT_RESET_0 = 0x0,
+ DIGB_FE_SOFT_RESET_1 = 0x1,
+} DIGB_FE_SOFT_RESET;
+typedef enum DIGB_BE_SOFT_RESET {
+ DIGB_BE_SOFT_RESET_0 = 0x0,
+ DIGB_BE_SOFT_RESET_1 = 0x1,
+} DIGB_BE_SOFT_RESET;
+typedef enum DIGC_FE_SOFT_RESET {
+ DIGC_FE_SOFT_RESET_0 = 0x0,
+ DIGC_FE_SOFT_RESET_1 = 0x1,
+} DIGC_FE_SOFT_RESET;
+typedef enum DIGC_BE_SOFT_RESET {
+ DIGC_BE_SOFT_RESET_0 = 0x0,
+ DIGC_BE_SOFT_RESET_1 = 0x1,
+} DIGC_BE_SOFT_RESET;
+typedef enum DIGD_FE_SOFT_RESET {
+ DIGD_FE_SOFT_RESET_0 = 0x0,
+ DIGD_FE_SOFT_RESET_1 = 0x1,
+} DIGD_FE_SOFT_RESET;
+typedef enum DIGD_BE_SOFT_RESET {
+ DIGD_BE_SOFT_RESET_0 = 0x0,
+ DIGD_BE_SOFT_RESET_1 = 0x1,
+} DIGD_BE_SOFT_RESET;
+typedef enum DIGE_FE_SOFT_RESET {
+ DIGE_FE_SOFT_RESET_0 = 0x0,
+ DIGE_FE_SOFT_RESET_1 = 0x1,
+} DIGE_FE_SOFT_RESET;
+typedef enum DIGE_BE_SOFT_RESET {
+ DIGE_BE_SOFT_RESET_0 = 0x0,
+ DIGE_BE_SOFT_RESET_1 = 0x1,
+} DIGE_BE_SOFT_RESET;
+typedef enum DIGF_FE_SOFT_RESET {
+ DIGF_FE_SOFT_RESET_0 = 0x0,
+ DIGF_FE_SOFT_RESET_1 = 0x1,
+} DIGF_FE_SOFT_RESET;
+typedef enum DIGF_BE_SOFT_RESET {
+ DIGF_BE_SOFT_RESET_0 = 0x0,
+ DIGF_BE_SOFT_RESET_1 = 0x1,
+} DIGF_BE_SOFT_RESET;
+typedef enum DIGG_FE_SOFT_RESET {
+ DIGG_FE_SOFT_RESET_0 = 0x0,
+ DIGG_FE_SOFT_RESET_1 = 0x1,
+} DIGG_FE_SOFT_RESET;
+typedef enum DIGG_BE_SOFT_RESET {
+ DIGG_BE_SOFT_RESET_0 = 0x0,
+ DIGG_BE_SOFT_RESET_1 = 0x1,
+} DIGG_BE_SOFT_RESET;
+typedef enum DPDBG_SOFT_RESET {
+ DPDBG_SOFT_RESET_0 = 0x0,
+ DPDBG_SOFT_RESET_1 = 0x1,
+} DPDBG_SOFT_RESET;
+typedef enum DIGLPA_FE_SOFT_RESET {
+ DIGLPA_FE_SOFT_RESET_0 = 0x0,
+ DIGLPA_FE_SOFT_RESET_1 = 0x1,
+} DIGLPA_FE_SOFT_RESET;
+typedef enum DIGLPA_BE_SOFT_RESET {
+ DIGLPA_BE_SOFT_RESET_0 = 0x0,
+ DIGLPA_BE_SOFT_RESET_1 = 0x1,
+} DIGLPA_BE_SOFT_RESET;
+typedef enum DIGLPB_FE_SOFT_RESET {
+ DIGLPB_FE_SOFT_RESET_0 = 0x0,
+ DIGLPB_FE_SOFT_RESET_1 = 0x1,
+} DIGLPB_FE_SOFT_RESET;
+typedef enum DIGLPB_BE_SOFT_RESET {
+ DIGLPB_BE_SOFT_RESET_0 = 0x0,
+ DIGLPB_BE_SOFT_RESET_1 = 0x1,
+} DIGLPB_BE_SOFT_RESET;
+typedef enum GENERICA_STEREOSYNC_SEL {
+ GENERICA_STEREOSYNC_SEL_D1 = 0x0,
+ GENERICA_STEREOSYNC_SEL_D2 = 0x1,
+ GENERICA_STEREOSYNC_SEL_D3 = 0x2,
+ GENERICA_STEREOSYNC_SEL_D4 = 0x3,
+ GENERICA_STEREOSYNC_SEL_D5 = 0x4,
+ GENERICA_STEREOSYNC_SEL_D6 = 0x5,
+ GENERICA_STEREOSYNC_SEL_RESERVED = 0x6,
+} GENERICA_STEREOSYNC_SEL;
+typedef enum GENERICB_STEREOSYNC_SEL {
+ GENERICB_STEREOSYNC_SEL_D1 = 0x0,
+ GENERICB_STEREOSYNC_SEL_D2 = 0x1,
+ GENERICB_STEREOSYNC_SEL_D3 = 0x2,
+ GENERICB_STEREOSYNC_SEL_D4 = 0x3,
+ GENERICB_STEREOSYNC_SEL_D5 = 0x4,
+ GENERICB_STEREOSYNC_SEL_D6 = 0x5,
+ GENERICB_STEREOSYNC_SEL_RESERVED = 0x6,
+} GENERICB_STEREOSYNC_SEL;
+typedef enum DCO_DBG_BLOCK_SEL {
+ DCO_DBG_BLOCK_SEL_DCO = 0x0,
+ DCO_DBG_BLOCK_SEL_ABM = 0x1,
+ DCO_DBG_BLOCK_SEL_DVO = 0x2,
+ DCO_DBG_BLOCK_SEL_DAC = 0x3,
+ DCO_DBG_BLOCK_SEL_MVP = 0x4,
+ DCO_DBG_BLOCK_SEL_FMT0 = 0x5,
+ DCO_DBG_BLOCK_SEL_FMT1 = 0x6,
+ DCO_DBG_BLOCK_SEL_FMT2 = 0x7,
+ DCO_DBG_BLOCK_SEL_FMT3 = 0x8,
+ DCO_DBG_BLOCK_SEL_FMT4 = 0x9,
+ DCO_DBG_BLOCK_SEL_FMT5 = 0xa,
+ DCO_DBG_BLOCK_SEL_DIGFE_A = 0xb,
+ DCO_DBG_BLOCK_SEL_DIGFE_B = 0xc,
+ DCO_DBG_BLOCK_SEL_DIGFE_C = 0xd,
+ DCO_DBG_BLOCK_SEL_DIGFE_D = 0xe,
+ DCO_DBG_BLOCK_SEL_DIGFE_E = 0xf,
+ DCO_DBG_BLOCK_SEL_DIGFE_F = 0x10,
+ DCO_DBG_BLOCK_SEL_DIGFE_G = 0x11,
+ DCO_DBG_BLOCK_SEL_DIGA = 0x12,
+ DCO_DBG_BLOCK_SEL_DIGB = 0x13,
+ DCO_DBG_BLOCK_SEL_DIGC = 0x14,
+ DCO_DBG_BLOCK_SEL_DIGD = 0x15,
+ DCO_DBG_BLOCK_SEL_DIGE = 0x16,
+ DCO_DBG_BLOCK_SEL_DIGF = 0x17,
+ DCO_DBG_BLOCK_SEL_DIGG = 0x18,
+ DCO_DBG_BLOCK_SEL_DPFE_A = 0x19,
+ DCO_DBG_BLOCK_SEL_DPFE_B = 0x1a,
+ DCO_DBG_BLOCK_SEL_DPFE_C = 0x1b,
+ DCO_DBG_BLOCK_SEL_DPFE_D = 0x1c,
+ DCO_DBG_BLOCK_SEL_DPFE_E = 0x1d,
+ DCO_DBG_BLOCK_SEL_DPFE_F = 0x1e,
+ DCO_DBG_BLOCK_SEL_DPFE_G = 0x1f,
+ DCO_DBG_BLOCK_SEL_DPA = 0x20,
+ DCO_DBG_BLOCK_SEL_DPB = 0x21,
+ DCO_DBG_BLOCK_SEL_DPC = 0x22,
+ DCO_DBG_BLOCK_SEL_DPD = 0x23,
+ DCO_DBG_BLOCK_SEL_DPE = 0x24,
+ DCO_DBG_BLOCK_SEL_DPF = 0x25,
+ DCO_DBG_BLOCK_SEL_DPG = 0x26,
+ DCO_DBG_BLOCK_SEL_AUX0 = 0x27,
+ DCO_DBG_BLOCK_SEL_AUX1 = 0x28,
+ DCO_DBG_BLOCK_SEL_AUX2 = 0x29,
+ DCO_DBG_BLOCK_SEL_AUX3 = 0x2a,
+ DCO_DBG_BLOCK_SEL_AUX4 = 0x2b,
+ DCO_DBG_BLOCK_SEL_AUX5 = 0x2c,
+ DCO_DBG_BLOCK_SEL_PERFMON_DCO = 0x2d,
+ DCO_DBG_BLOCK_SEL_AUDIO_OUT = 0x2e,
+ DCO_DBG_BLOCK_SEL_DIGLPFEA = 0x2f,
+ DCO_DBG_BLOCK_SEL_DIGLPFEB = 0x30,
+ DCO_DBG_BLOCK_SEL_DIGLPA = 0x31,
+ DCO_DBG_BLOCK_SEL_DIGLPB = 0x32,
+ DCO_DBG_BLOCK_SEL_DPLPFEA = 0x33,
+ DCO_DBG_BLOCK_SEL_DPLPFEB = 0x34,
+ DCO_DBG_BLOCK_SEL_DPLPA = 0x35,
+ DCO_DBG_BLOCK_SEL_DPLPB = 0x36,
+} DCO_DBG_BLOCK_SEL;
+typedef enum DCO_DBG_CLOCK_SEL {
+ DCO_DBG_CLOCK_SEL_DISPCLK = 0x0,
+ DCO_DBG_CLOCK_SEL_SCLK = 0x1,
+ DCO_DBG_CLOCK_SEL_MVPCLK = 0x2,
+ DCO_DBG_CLOCK_SEL_DVOCLK = 0x3,
+ DCO_DBG_CLOCK_SEL_DACCLK = 0x4,
+ DCO_DBG_CLOCK_SEL_REFCLK = 0x5,
+ DCO_DBG_CLOCK_SEL_SYMCLKA = 0x6,
+ DCO_DBG_CLOCK_SEL_SYMCLKB = 0x7,
+ DCO_DBG_CLOCK_SEL_SYMCLKC = 0x8,
+ DCO_DBG_CLOCK_SEL_SYMCLKD = 0x9,
+ DCO_DBG_CLOCK_SEL_SYMCLKE = 0xa,
+ DCO_DBG_CLOCK_SEL_SYMCLKF = 0xb,
+ DCO_DBG_CLOCK_SEL_SYMCLKG = 0xc,
+ DCO_DBG_CLOCK_SEL_RESERVED = 0xd,
+ DCO_DBG_CLOCK_SEL_AM0CLK = 0xe,
+ DCO_DBG_CLOCK_SEL_AM1CLK = 0xf,
+ DCO_DBG_CLOCK_SEL_AM2CLK = 0x10,
+ DCO_DBG_CLOCK_SEL_SYMCLKLPA = 0x11,
+ DCO_DBG_CLOCK_SEL_SYMCLKLPB = 0x12,
+} DCO_DBG_CLOCK_SEL;
+typedef enum DCO_HDMI_RXSTATUS_TIMER_CONTROL_DCO_HDMI_RXSTATUS_TIMER_TYPE {
+ DCO_HDMI_RXSTATUS_TIMER_TYPE_LEVEL = 0x0,
+ DCO_HDMI_RXSTATUS_TIMER_TYPE_PULSE = 0x1,
+} DCO_HDMI_RXSTATUS_TIMER_CONTROL_DCO_HDMI_RXSTATUS_TIMER_TYPE;
+typedef enum FMT420_MEMORY_SOURCE_SEL {
+ FMT420_MEMORY_SOURCE_SEL_FMT0 = 0x0,
+ FMT420_MEMORY_SOURCE_SEL_FMT1 = 0x1,
+ FMT420_MEMORY_SOURCE_SEL_FMT2 = 0x2,
+ FMT420_MEMORY_SOURCE_SEL_FMT3 = 0x3,
+ FMT420_MEMORY_SOURCE_SEL_FMT4 = 0x4,
+ FMT420_MEMORY_SOURCE_SEL_FMT5 = 0x5,
+ FMT420_MEMORY_SOURCE_SEL_FMT_RESERVED = 0x6,
+} FMT420_MEMORY_SOURCE_SEL;
+typedef enum DOUT_I2C_CONTROL_GO {
+ DOUT_I2C_CONTROL_STOP_TRANSFER = 0x0,
+ DOUT_I2C_CONTROL_START_TRANSFER = 0x1,
+} DOUT_I2C_CONTROL_GO;
+typedef enum DOUT_I2C_CONTROL_SOFT_RESET {
+ DOUT_I2C_CONTROL_NOT_RESET_I2C_CONTROLLER = 0x0,
+ DOUT_I2C_CONTROL_RESET_I2C_CONTROLLER = 0x1,
+} DOUT_I2C_CONTROL_SOFT_RESET;
+typedef enum DOUT_I2C_CONTROL_SEND_RESET {
+ DOUT_I2C_CONTROL__NOT_SEND_RESET = 0x0,
+ DOUT_I2C_CONTROL__SEND_RESET = 0x1,
+} DOUT_I2C_CONTROL_SEND_RESET;
+typedef enum DOUT_I2C_CONTROL_SW_STATUS_RESET {
+ DOUT_I2C_CONTROL_NOT_RESET_SW_STATUS = 0x0,
+ DOUT_I2C_CONTROL_RESET_SW_STATUS = 0x1,
+} DOUT_I2C_CONTROL_SW_STATUS_RESET;
+typedef enum DOUT_I2C_CONTROL_DDC_SELECT {
+ DOUT_I2C_CONTROL_SELECT_DDC1 = 0x0,
+ DOUT_I2C_CONTROL_SELECT_DDC2 = 0x1,
+ DOUT_I2C_CONTROL_SELECT_DDC3 = 0x2,
+ DOUT_I2C_CONTROL_SELECT_DDC4 = 0x3,
+ DOUT_I2C_CONTROL_SELECT_DDC5 = 0x4,
+ DOUT_I2C_CONTROL_SELECT_DDC6 = 0x5,
+ DOUT_I2C_CONTROL_SELECT_DDCVGA = 0x6,
+} DOUT_I2C_CONTROL_DDC_SELECT;
+typedef enum DOUT_I2C_CONTROL_TRANSACTION_COUNT {
+ DOUT_I2C_CONTROL_TRANS0 = 0x0,
+ DOUT_I2C_CONTROL_TRANS0_TRANS1 = 0x1,
+ DOUT_I2C_CONTROL_TRANS0_TRANS1_TRANS2 = 0x2,
+ DOUT_I2C_CONTROL_TRANS0_TRANS1_TRANS2_TRANS3 = 0x3,
+} DOUT_I2C_CONTROL_TRANSACTION_COUNT;
+typedef enum DOUT_I2C_CONTROL_DBG_REF_SEL {
+ DOUT_I2C_CONTROL_NORMAL_DEBUG = 0x0,
+ DOUT_I2C_CONTROL_FAST_REFERENCE_DEBUG = 0x1,
+} DOUT_I2C_CONTROL_DBG_REF_SEL;
+typedef enum DOUT_I2C_ARBITRATION_SW_PRIORITY {
+ DOUT_I2C_ARBITRATION_SW_PRIORITY_NORMAL = 0x0,
+ DOUT_I2C_ARBITRATION_SW_PRIORITY_HIGH = 0x1,
+ DOUT_I2C_ARBITRATION_SW_PRIORITY_0_RESERVED = 0x2,
+ DOUT_I2C_ARBITRATION_SW_PRIORITY_1_RESERVED = 0x3,
+} DOUT_I2C_ARBITRATION_SW_PRIORITY;
+typedef enum DOUT_I2C_ARBITRATION_NO_QUEUED_SW_GO {
+ DOUT_I2C_ARBITRATION_SW_QUEUE_ENABLED = 0x0,
+ DOUT_I2C_ARBITRATION_SW_QUEUE_DISABLED = 0x1,
+} DOUT_I2C_ARBITRATION_NO_QUEUED_SW_GO;
+typedef enum DOUT_I2C_ARBITRATION_ABORT_XFER {
+ DOUT_I2C_ARBITRATION_NOT_ABORT_CURRENT_TRANSFER = 0x0,
+ DOUT_I2C_ARBITRATION_ABORT_CURRENT_TRANSFER = 0x1,
+} DOUT_I2C_ARBITRATION_ABORT_XFER;
+typedef enum DOUT_I2C_ARBITRATION_USE_I2C_REG_REQ {
+ DOUT_I2C_ARBITRATION__NOT_USE_I2C_REG_REQ = 0x0,
+ DOUT_I2C_ARBITRATION__USE_I2C_REG_REQ = 0x1,
+} DOUT_I2C_ARBITRATION_USE_I2C_REG_REQ;
+typedef enum DOUT_I2C_ARBITRATION_DONE_USING_I2C_REG {
+ DOUT_I2C_ARBITRATION_DONE__NOT_USING_I2C_REG = 0x0,
+ DOUT_I2C_ARBITRATION_DONE__USING_I2C_REG = 0x1,
+} DOUT_I2C_ARBITRATION_DONE_USING_I2C_REG;
+typedef enum DOUT_I2C_ACK {
+ DOUT_I2C_NO_ACK = 0x0,
+ DOUT_I2C_ACK_TO_CLEAN = 0x1,
+} DOUT_I2C_ACK;
+typedef enum DOUT_I2C_DDC_SPEED_THRESHOLD {
+ DOUT_I2C_DDC_SPEED_THRESHOLD_BIG_THAN_ZERO = 0x0,
+ DOUT_I2C_DDC_SPEED_THRESHOLD_QUATER_OF_TOTAL_SAMPLE= 0x1,
+ DOUT_I2C_DDC_SPEED_THRESHOLD_HALF_OF_TOTAL_SAMPLE= 0x2,
+ DOUT_I2C_DDC_SPEED_THRESHOLD_THREE_QUATERS_OF_TOTAL_SAMPLE= 0x3,
+} DOUT_I2C_DDC_SPEED_THRESHOLD;
+typedef enum DOUT_I2C_DDC_SETUP_DATA_DRIVE_EN {
+ DOUT_I2C_DDC_SETUP_DATA_DRIVE_BY_EXTERNAL_RESISTOR= 0x0,
+ DOUT_I2C_DDC_SETUP_I2C_PAD_DRIVE_SDA = 0x1,
+} DOUT_I2C_DDC_SETUP_DATA_DRIVE_EN;
+typedef enum DOUT_I2C_DDC_SETUP_DATA_DRIVE_SEL {
+ DOUT_I2C_DDC_SETUP_DATA_DRIVE_FOR_10MCLKS = 0x0,
+ DOUT_I2C_DDC_SETUP_DATA_DRIVE_FOR_20MCLKS = 0x1,
+} DOUT_I2C_DDC_SETUP_DATA_DRIVE_SEL;
+typedef enum DOUT_I2C_DDC_SETUP_EDID_DETECT_MODE {
+ DOUT_I2C_DDC_SETUP_EDID_DETECT_CONNECT = 0x0,
+ DOUT_I2C_DDC_SETUP_EDID_DETECT_DISCONNECT = 0x1,
+} DOUT_I2C_DDC_SETUP_EDID_DETECT_MODE;
+typedef enum DOUT_I2C_DDC_SETUP_CLK_DRIVE_EN {
+ DOUT_I2C_DDC_SETUP_CLK_DRIVE_BY_EXTERNAL_RESISTOR= 0x0,
+ DOUT_I2C_DDC_SETUP_I2C_PAD_DRIVE_SCL = 0x1,
+} DOUT_I2C_DDC_SETUP_CLK_DRIVE_EN;
+typedef enum DOUT_I2C_TRANSACTION_STOP_ON_NACK {
+ DOUT_I2C_TRANSACTION_STOP_CURRENT_TRANS = 0x0,
+ DOUT_I2C_TRANSACTION_STOP_ALL_TRANS = 0x1,
+} DOUT_I2C_TRANSACTION_STOP_ON_NACK;
+typedef enum DOUT_I2C_DATA_INDEX_WRITE {
+ DOUT_I2C_DATA__NOT_INDEX_WRITE = 0x0,
+ DOUT_I2C_DATA__INDEX_WRITE = 0x1,
+} DOUT_I2C_DATA_INDEX_WRITE;
+typedef enum DOUT_I2C_EDID_DETECT_CTRL_SEND_RESET {
+ DOUT_I2C_EDID_NOT_SEND_RESET_BEFORE_EDID_READ_TRACTION= 0x0,
+ DOUT_I2C_EDID_SEND_RESET_BEFORE_EDID_READ_TRACTION= 0x1,
+} DOUT_I2C_EDID_DETECT_CTRL_SEND_RESET;
+typedef enum DOUT_I2C_READ_REQUEST_INTERRUPT_TYPE {
+ DOUT_I2C_READ_REQUEST_INTERRUPT_TYPE__LEVEL = 0x0,
+ DOUT_I2C_READ_REQUEST_INTERRUPT_TYPE__PULSE = 0x1,
+} DOUT_I2C_READ_REQUEST_INTERRUPT_TYPE;
+typedef enum BLNDV_CONTROL_BLND_MODE {
+ BLNDV_CONTROL_BLND_MODE_CURRENT_PIPE_ONLY = 0x0,
+ BLNDV_CONTROL_BLND_MODE_OTHER_PIPE_ONLY = 0x1,
+ BLNDV_CONTROL_BLND_MODE_ALPHA_BLENDING_MODE = 0x2,
+ BLNDV_CONTROL_BLND_MODE_OTHER_STEREO_TYPE = 0x3,
+} BLNDV_CONTROL_BLND_MODE;
+typedef enum BLNDV_CONTROL_BLND_STEREO_TYPE {
+ BLNDV_CONTROL_BLND_STEREO_TYPE_NON_SINGLE_PIPE_STEREO= 0x0,
+ BLNDV_CONTROL_BLND_STEREO_TYPE_SIDE_BY_SIDE_SINGLE_PIPE_STEREO= 0x1,
+ BLNDV_CONTROL_BLND_STEREO_TYPE_TOP_BOTTOM_SINGLE_PIPE_STEREO= 0x2,
+ BLNDV_CONTROL_BLND_STEREO_TYPE_UNUSED = 0x3,
+} BLNDV_CONTROL_BLND_STEREO_TYPE;
+typedef enum BLNDV_CONTROL_BLND_STEREO_POLARITY {
+ BLNDV_CONTROL_BLND_STEREO_POLARITY_LOW = 0x0,
+ BLNDV_CONTROL_BLND_STEREO_POLARITY_HIGH = 0x1,
+} BLNDV_CONTROL_BLND_STEREO_POLARITY;
+typedef enum BLNDV_CONTROL_BLND_FEEDTHROUGH_EN {
+ BLNDV_CONTROL_BLND_FEEDTHROUGH_EN_FALSE = 0x0,
+ BLNDV_CONTROL_BLND_FEEDTHROUGH_EN_TRUE = 0x1,
+} BLNDV_CONTROL_BLND_FEEDTHROUGH_EN;
+typedef enum BLNDV_CONTROL_BLND_ALPHA_MODE {
+ BLNDV_CONTROL_BLND_ALPHA_MODE_CURRENT_PIXEL_ALPHA= 0x0,
+ BLNDV_CONTROL_BLND_ALPHA_MODE_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN= 0x1,
+ BLNDV_CONTROL_BLND_ALPHA_MODE_GLOBAL_ALPHA_ONLY = 0x2,
+ BLNDV_CONTROL_BLND_ALPHA_MODE_UNUSED = 0x3,
+} BLNDV_CONTROL_BLND_ALPHA_MODE;
+typedef enum BLNDV_CONTROL_BLND_ACTIVE_OVERLAP_ONLY {
+ BLNDV_CONTROL_BLND_ACTIVE_OVERLAP_ONLY_FALSE = 0x0,
+ BLNDV_CONTROL_BLND_ACTIVE_OVERLAP_ONLY_TRUE = 0x1,
+} BLNDV_CONTROL_BLND_ACTIVE_OVERLAP_ONLY;
+typedef enum BLNDV_CONTROL_BLND_MULTIPLIED_MODE {
+ BLNDV_CONTROL_BLND_MULTIPLIED_MODE_FALSE = 0x0,
+ BLNDV_CONTROL_BLND_MULTIPLIED_MODE_TRUE = 0x1,
+} BLNDV_CONTROL_BLND_MULTIPLIED_MODE;
+typedef enum BLNDV_SM_CONTROL2_SM_MODE {
+ BLNDV_SM_CONTROL2_SM_MODE_SINGLE_PLANE = 0x0,
+ BLNDV_SM_CONTROL2_SM_MODE_ROW_SUBSAMPLING = 0x2,
+ BLNDV_SM_CONTROL2_SM_MODE_COLUMN_SUBSAMPLING = 0x4,
+ BLNDV_SM_CONTROL2_SM_MODE_CHECKERBOARD_SUBSAMPLING= 0x6,
+} BLNDV_SM_CONTROL2_SM_MODE;
+typedef enum BLNDV_SM_CONTROL2_SM_FRAME_ALTERNATE {
+ BLNDV_SM_CONTROL2_SM_FRAME_ALTERNATE_FALSE = 0x0,
+ BLNDV_SM_CONTROL2_SM_FRAME_ALTERNATE_TRUE = 0x1,
+} BLNDV_SM_CONTROL2_SM_FRAME_ALTERNATE;
+typedef enum BLNDV_SM_CONTROL2_SM_FIELD_ALTERNATE {
+ BLNDV_SM_CONTROL2_SM_FIELD_ALTERNATE_FALSE = 0x0,
+ BLNDV_SM_CONTROL2_SM_FIELD_ALTERNATE_TRUE = 0x1,
+} BLNDV_SM_CONTROL2_SM_FIELD_ALTERNATE;
+typedef enum BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL {
+ BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_NO_FORCE= 0x0,
+ BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_RESERVED= 0x1,
+ BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_FORCE_LOW= 0x2,
+ BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL_FORCE_HIGH= 0x3,
+} BLNDV_SM_CONTROL2_SM_FORCE_NEXT_FRAME_POL;
+typedef enum BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL {
+ BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_NO_FORCE = 0x0,
+ BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_RESERVED = 0x1,
+ BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_FORCE_LOW= 0x2,
+ BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL_FORCE_HIGH= 0x3,
+} BLNDV_SM_CONTROL2_SM_FORCE_NEXT_TOP_POL;
+typedef enum BLNDV_CONTROL2_PTI_ENABLE {
+ BLNDV_CONTROL2_PTI_ENABLE_FALSE = 0x0,
+ BLNDV_CONTROL2_PTI_ENABLE_TRUE = 0x1,
+} BLNDV_CONTROL2_PTI_ENABLE;
+typedef enum BLNDV_CONTROL2_BLND_SUPERAA_DEGAMMA_EN {
+ BLNDV_CONTROL2_BLND_SUPERAA_DEGAMMA_EN_FALSE = 0x0,
+ BLNDV_CONTROL2_BLND_SUPERAA_DEGAMMA_EN_TRUE = 0x1,
+} BLNDV_CONTROL2_BLND_SUPERAA_DEGAMMA_EN;
+typedef enum BLNDV_CONTROL2_BLND_SUPERAA_REGAMMA_EN {
+ BLNDV_CONTROL2_BLND_SUPERAA_REGAMMA_EN_FALSE = 0x0,
+ BLNDV_CONTROL2_BLND_SUPERAA_REGAMMA_EN_TRUE = 0x1,
+} BLNDV_CONTROL2_BLND_SUPERAA_REGAMMA_EN;
+typedef enum BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK {
+ BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK_FALSE= 0x0,
+ BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK_TRUE= 0x1,
+} BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_ACK;
+typedef enum BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK {
+ BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK_FALSE= 0x0,
+ BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK_TRUE= 0x1,
+} BLNDV_UNDERFLOW_INTERRUPT_BLND_UNDERFLOW_INT_MASK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK {
+ BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK_FALSE= 0x0,
+ BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK_TRUE= 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK {
+ BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_FALSE= 0x0,
+ BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_TRUE= 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_DCP_GRPH_SURF_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK {
+ BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK_FALSE= 0x0,
+ BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK_TRUE= 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK {
+ BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK_FALSE= 0x0,
+ BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK_TRUE= 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_DCP_CUR2_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK {
+ BLNDV_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK_FALSE = 0x0,
+ BLNDV_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK_TRUE = 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_SCL_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK {
+ BLNDV_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK_FALSE= 0x0,
+ BLNDV_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK_TRUE = 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_BLND_V_UPDATE_LOCK;
+typedef enum BLNDV_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE {
+ BLNDV_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE_FALSE= 0x0,
+ BLNDV_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE_TRUE = 0x1,
+} BLNDV_V_UPDATE_LOCK_BLND_V_UPDATE_LOCK_MODE;
+typedef enum BLNDV_DEBUG_BLND_CNV_MUX_SELECT {
+ BLNDV_DEBUG_BLND_CNV_MUX_SELECT_LOW = 0x0,
+ BLNDV_DEBUG_BLND_CNV_MUX_SELECT_HIGH = 0x1,
+} BLNDV_DEBUG_BLND_CNV_MUX_SELECT;
+typedef enum BLNDV_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN {
+ BLNDV_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN_FALSE= 0x0,
+ BLNDV_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN_TRUE= 0x1,
+} BLNDV_TEST_DEBUG_INDEX_BLND_TEST_DEBUG_WRITE_EN;
+typedef enum DPCSTX_DBG_CFGCLK_SEL {
+ DPCSTX_DBG_CFGCLK_SEL_DC_DPCS_INF = 0x0,
+ DPCSTX_DBG_CFGCLK_SEL_DPCS_BPHY_INF = 0x1,
+ DPCSTX_DBG_CFGCLK_SEL_CBUS_SLAVE = 0x2,
+ DPCSTX_DBG_CFGCLK_SEL_CBUS_MASTER = 0x3,
+} DPCSTX_DBG_CFGCLK_SEL;
+typedef enum DPCSTX_TX_SYMCLK_SEL {
+ DPCSTX_DBG_TX_SYMCLK_SEL_IN0 = 0x0,
+ DPCSTX_DBG_TX_SYMCLK_SEL_IN1 = 0x1,
+ DPCSTX_DBG_TX_SYMCLK_SEL_FIFO_WR = 0x2,
+} DPCSTX_TX_SYMCLK_SEL;
+typedef enum DPCSTX_TX_SYMCLK_DIV2_SEL {
+ DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_OUT0 = 0x0,
+ DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_OUT1 = 0x1,
+ DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_OUT2 = 0x2,
+ DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_OUT3 = 0x3,
+ DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_FIFO_RD = 0x4,
+ DPCSTX_DBG_TX_SYMCLK_DIV2_SEL_INT = 0x5,
+} DPCSTX_TX_SYMCLK_DIV2_SEL;
+typedef enum DPCSTX_DBG_CLOCK_SEL {
+ DPCSTX_DBG_CLOCK_SEL_DC_CFGCLK = 0x0,
+ DPCSTX_DBG_CLOCK_SEL_PHY_CFGCLK = 0x1,
+ DPCSTX_DBG_CLOCK_SEL_TXSYMCLK = 0x2,
+} DPCSTX_DBG_CLOCK_SEL;
+typedef enum DPCSTX_DVI_LINK_MODE {
+ DPCSTX_DVI_LINK_MODE_NORMAL = 0x0,
+ DPCSTX_DVI_LINK_MODE_DUAL_LINK_MASTER = 0x1,
+ DPCSTX_DVI_LINK_MODE_DUAL_LINK_SLAVER = 0x2,
+} DPCSTX_DVI_LINK_MODE;
+
+#endif /* DCE_11_2_ENUM_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
new file mode 100755
index 000000000..1ddc4183a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h
@@ -0,0 +1,18687 @@
+/*
+ * DCE_11_2 Register documentation
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DCE_11_2_SH_MASK_H
+#define DCE_11_2_SH_MASK_H
+
+#define PIPE0_PG_CONFIG__PIPE0_POWER_FORCEON_MASK 0x1
+#define PIPE0_PG_CONFIG__PIPE0_POWER_FORCEON__SHIFT 0x0
+#define PIPE0_PG_ENABLE__PIPE0_POWER_GATE_MASK 0x1
+#define PIPE0_PG_ENABLE__PIPE0_POWER_GATE__SHIFT 0x0
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE0_PG_STATUS__PIPE0_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE0_PG_STATUS__PIPE0_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE0_PG_STATUS__PIPE0_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE0_PG_STATUS__PIPE0_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE0_PG_STATUS__PIPE0_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE0_PG_STATUS__PIPE0_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE0_PG_STATUS__PIPE0_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE1_PG_CONFIG__PIPE1_POWER_FORCEON_MASK 0x1
+#define PIPE1_PG_CONFIG__PIPE1_POWER_FORCEON__SHIFT 0x0
+#define PIPE1_PG_ENABLE__PIPE1_POWER_GATE_MASK 0x1
+#define PIPE1_PG_ENABLE__PIPE1_POWER_GATE__SHIFT 0x0
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE1_PG_STATUS__PIPE1_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE1_PG_STATUS__PIPE1_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE1_PG_STATUS__PIPE1_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE1_PG_STATUS__PIPE1_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE1_PG_STATUS__PIPE1_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE1_PG_STATUS__PIPE1_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE1_PG_STATUS__PIPE1_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE2_PG_CONFIG__PIPE2_POWER_FORCEON_MASK 0x1
+#define PIPE2_PG_CONFIG__PIPE2_POWER_FORCEON__SHIFT 0x0
+#define PIPE2_PG_ENABLE__PIPE2_POWER_GATE_MASK 0x1
+#define PIPE2_PG_ENABLE__PIPE2_POWER_GATE__SHIFT 0x0
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE2_PG_STATUS__PIPE2_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE2_PG_STATUS__PIPE2_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE2_PG_STATUS__PIPE2_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE2_PG_STATUS__PIPE2_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE2_PG_STATUS__PIPE2_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE2_PG_STATUS__PIPE2_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE2_PG_STATUS__PIPE2_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE3_PG_CONFIG__PIPE3_POWER_FORCEON_MASK 0x1
+#define PIPE3_PG_CONFIG__PIPE3_POWER_FORCEON__SHIFT 0x0
+#define PIPE3_PG_ENABLE__PIPE3_POWER_GATE_MASK 0x1
+#define PIPE3_PG_ENABLE__PIPE3_POWER_GATE__SHIFT 0x0
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE3_PG_STATUS__PIPE3_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE3_PG_STATUS__PIPE3_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE3_PG_STATUS__PIPE3_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE3_PG_STATUS__PIPE3_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE3_PG_STATUS__PIPE3_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE3_PG_STATUS__PIPE3_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE3_PG_STATUS__PIPE3_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE4_PG_CONFIG__PIPE4_POWER_FORCEON_MASK 0x1
+#define PIPE4_PG_CONFIG__PIPE4_POWER_FORCEON__SHIFT 0x0
+#define PIPE4_PG_ENABLE__PIPE4_POWER_GATE_MASK 0x1
+#define PIPE4_PG_ENABLE__PIPE4_POWER_GATE__SHIFT 0x0
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE4_PG_STATUS__PIPE4_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE4_PG_STATUS__PIPE4_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE4_PG_STATUS__PIPE4_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE4_PG_STATUS__PIPE4_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE4_PG_STATUS__PIPE4_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE4_PG_STATUS__PIPE4_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE4_PG_STATUS__PIPE4_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define PIPE5_PG_CONFIG__PIPE5_POWER_FORCEON_MASK 0x1
+#define PIPE5_PG_CONFIG__PIPE5_POWER_FORCEON__SHIFT 0x0
+#define PIPE5_PG_ENABLE__PIPE5_POWER_GATE_MASK 0x1
+#define PIPE5_PG_ENABLE__PIPE5_POWER_GATE__SHIFT 0x0
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_READ_DATA_MASK 0xffffff
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_READ_DATA__SHIFT 0x0
+#define PIPE5_PG_STATUS__PIPE5_DEBUG_PWR_STATUS_MASK 0x3000000
+#define PIPE5_PG_STATUS__PIPE5_DEBUG_PWR_STATUS__SHIFT 0x18
+#define PIPE5_PG_STATUS__PIPE5_DESIRED_PWR_STATE_MASK 0x10000000
+#define PIPE5_PG_STATUS__PIPE5_DESIRED_PWR_STATE__SHIFT 0x1c
+#define PIPE5_PG_STATUS__PIPE5_REQUESTED_PWR_STATE_MASK 0x20000000
+#define PIPE5_PG_STATUS__PIPE5_REQUESTED_PWR_STATE__SHIFT 0x1d
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_PWR_STATUS_MASK 0xc0000000
+#define PIPE5_PG_STATUS__PIPE5_PGFSM_PWR_STATUS__SHIFT 0x1e
+#define DCPG_INTERRUPT_STATUS__DCFE0_POWER_UP_INT_OCCURRED_MASK 0x1
+#define DCPG_INTERRUPT_STATUS__DCFE0_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DCPG_INTERRUPT_STATUS__DCFE0_POWER_DOWN_INT_OCCURRED_MASK 0x2
+#define DCPG_INTERRUPT_STATUS__DCFE0_POWER_DOWN_INT_OCCURRED__SHIFT 0x1
+#define DCPG_INTERRUPT_STATUS__DCFE1_POWER_UP_INT_OCCURRED_MASK 0x4
+#define DCPG_INTERRUPT_STATUS__DCFE1_POWER_UP_INT_OCCURRED__SHIFT 0x2
+#define DCPG_INTERRUPT_STATUS__DCFE1_POWER_DOWN_INT_OCCURRED_MASK 0x8
+#define DCPG_INTERRUPT_STATUS__DCFE1_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DCPG_INTERRUPT_STATUS__DCFE2_POWER_UP_INT_OCCURRED_MASK 0x10
+#define DCPG_INTERRUPT_STATUS__DCFE2_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DCPG_INTERRUPT_STATUS__DCFE2_POWER_DOWN_INT_OCCURRED_MASK 0x20
+#define DCPG_INTERRUPT_STATUS__DCFE2_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DCPG_INTERRUPT_STATUS__DCFE3_POWER_UP_INT_OCCURRED_MASK 0x40
+#define DCPG_INTERRUPT_STATUS__DCFE3_POWER_UP_INT_OCCURRED__SHIFT 0x6
+#define DCPG_INTERRUPT_STATUS__DCFE3_POWER_DOWN_INT_OCCURRED_MASK 0x80
+#define DCPG_INTERRUPT_STATUS__DCFE3_POWER_DOWN_INT_OCCURRED__SHIFT 0x7
+#define DCPG_INTERRUPT_STATUS__DCFE4_POWER_UP_INT_OCCURRED_MASK 0x100
+#define DCPG_INTERRUPT_STATUS__DCFE4_POWER_UP_INT_OCCURRED__SHIFT 0x8
+#define DCPG_INTERRUPT_STATUS__DCFE4_POWER_DOWN_INT_OCCURRED_MASK 0x200
+#define DCPG_INTERRUPT_STATUS__DCFE4_POWER_DOWN_INT_OCCURRED__SHIFT 0x9
+#define DCPG_INTERRUPT_STATUS__DCFE5_POWER_UP_INT_OCCURRED_MASK 0x400
+#define DCPG_INTERRUPT_STATUS__DCFE5_POWER_UP_INT_OCCURRED__SHIFT 0xa
+#define DCPG_INTERRUPT_STATUS__DCFE5_POWER_DOWN_INT_OCCURRED_MASK 0x800
+#define DCPG_INTERRUPT_STATUS__DCFE5_POWER_DOWN_INT_OCCURRED__SHIFT 0xb
+#define DCPG_INTERRUPT_STATUS__DCFEV0_POWER_UP_INT_OCCURRED_MASK 0x1000
+#define DCPG_INTERRUPT_STATUS__DCFEV0_POWER_UP_INT_OCCURRED__SHIFT 0xc
+#define DCPG_INTERRUPT_STATUS__DCFEV0_POWER_DOWN_INT_OCCURRED_MASK 0x2000
+#define DCPG_INTERRUPT_STATUS__DCFEV0_POWER_DOWN_INT_OCCURRED__SHIFT 0xd
+#define DCPG_INTERRUPT_STATUS__DSI_POWER_UP_INT_OCCURRED_MASK 0x4000
+#define DCPG_INTERRUPT_STATUS__DSI_POWER_UP_INT_OCCURRED__SHIFT 0xe
+#define DCPG_INTERRUPT_STATUS__DSI_POWER_DOWN_INT_OCCURRED_MASK 0x8000
+#define DCPG_INTERRUPT_STATUS__DSI_POWER_DOWN_INT_OCCURRED__SHIFT 0xf
+#define DCPG_INTERRUPT_STATUS__DCFEV1_POWER_UP_INT_OCCURRED_MASK 0x10000
+#define DCPG_INTERRUPT_STATUS__DCFEV1_POWER_UP_INT_OCCURRED__SHIFT 0x10
+#define DCPG_INTERRUPT_STATUS__DCFEV1_POWER_DOWN_INT_OCCURRED_MASK 0x20000
+#define DCPG_INTERRUPT_STATUS__DCFEV1_POWER_DOWN_INT_OCCURRED__SHIFT 0x11
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_UP_INT_MASK_MASK 0x1
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_UP_INT_MASK__SHIFT 0x0
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_UP_INT_CLEAR_MASK 0x2
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_DOWN_INT_MASK_MASK 0x4
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_DOWN_INT_MASK__SHIFT 0x2
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_DOWN_INT_CLEAR_MASK 0x8
+#define DCPG_INTERRUPT_CONTROL__DCFE0_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_UP_INT_MASK_MASK 0x10
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_UP_INT_MASK__SHIFT 0x4
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_UP_INT_CLEAR_MASK 0x20
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_UP_INT_CLEAR__SHIFT 0x5
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_DOWN_INT_MASK_MASK 0x40
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_DOWN_INT_MASK__SHIFT 0x6
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_DOWN_INT_CLEAR_MASK 0x80
+#define DCPG_INTERRUPT_CONTROL__DCFE1_POWER_DOWN_INT_CLEAR__SHIFT 0x7
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_UP_INT_MASK_MASK 0x100
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_UP_INT_MASK__SHIFT 0x8
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_UP_INT_CLEAR_MASK 0x200
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_UP_INT_CLEAR__SHIFT 0x9
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_DOWN_INT_MASK_MASK 0x400
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_DOWN_INT_MASK__SHIFT 0xa
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_DOWN_INT_CLEAR_MASK 0x800
+#define DCPG_INTERRUPT_CONTROL__DCFE2_POWER_DOWN_INT_CLEAR__SHIFT 0xb
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_UP_INT_MASK_MASK 0x1000
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_UP_INT_MASK__SHIFT 0xc
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_UP_INT_CLEAR_MASK 0x2000
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_DOWN_INT_MASK_MASK 0x4000
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_DOWN_INT_MASK__SHIFT 0xe
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_DOWN_INT_CLEAR_MASK 0x8000
+#define DCPG_INTERRUPT_CONTROL__DCFE3_POWER_DOWN_INT_CLEAR__SHIFT 0xf
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_UP_INT_MASK_MASK 0x10000
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_UP_INT_MASK__SHIFT 0x10
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_UP_INT_CLEAR_MASK 0x20000
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_UP_INT_CLEAR__SHIFT 0x11
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_DOWN_INT_MASK_MASK 0x40000
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_DOWN_INT_MASK__SHIFT 0x12
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_DOWN_INT_CLEAR_MASK 0x80000
+#define DCPG_INTERRUPT_CONTROL__DCFE4_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_UP_INT_MASK_MASK 0x100000
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_UP_INT_MASK__SHIFT 0x14
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_UP_INT_CLEAR_MASK 0x200000
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_UP_INT_CLEAR__SHIFT 0x15
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_DOWN_INT_MASK_MASK 0x400000
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_DOWN_INT_MASK__SHIFT 0x16
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_DOWN_INT_CLEAR_MASK 0x800000
+#define DCPG_INTERRUPT_CONTROL__DCFE5_POWER_DOWN_INT_CLEAR__SHIFT 0x17
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_UP_INT_MASK_MASK 0x1000000
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_UP_INT_MASK__SHIFT 0x18
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_UP_INT_CLEAR_MASK 0x2000000
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_UP_INT_CLEAR__SHIFT 0x19
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_DOWN_INT_MASK_MASK 0x4000000
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_DOWN_INT_MASK__SHIFT 0x1a
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_DOWN_INT_CLEAR_MASK 0x8000000
+#define DCPG_INTERRUPT_CONTROL__DCFEV0_POWER_DOWN_INT_CLEAR__SHIFT 0x1b
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_UP_INT_MASK_MASK 0x10000000
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_UP_INT_MASK__SHIFT 0x1c
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_UP_INT_CLEAR_MASK 0x20000000
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_UP_INT_CLEAR__SHIFT 0x1d
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_DOWN_INT_MASK_MASK 0x40000000
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_DOWN_INT_MASK__SHIFT 0x1e
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_DOWN_INT_CLEAR_MASK 0x80000000
+#define DCPG_INTERRUPT_CONTROL__DSI_POWER_DOWN_INT_CLEAR__SHIFT 0x1f
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_UP_INT_MASK_MASK 0x1000000
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_UP_INT_MASK__SHIFT 0x18
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_UP_INT_CLEAR_MASK 0x2000000
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_UP_INT_CLEAR__SHIFT 0x19
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_DOWN_INT_MASK_MASK 0x4000000
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_DOWN_INT_MASK__SHIFT 0x1a
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_DOWN_INT_CLEAR_MASK 0x8000000
+#define DCPG_INTERRUPT_CONTROL2__DCFEV1_POWER_DOWN_INT_CLEAR__SHIFT 0x1b
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN_MASK 0x1
+#define DC_IP_REQUEST_CNTL__IP_REQUEST_EN__SHIFT 0x0
+#define DC_PGFSM_CONFIG_REG__PGFSM_CONFIG_REG_MASK 0xffffffff
+#define DC_PGFSM_CONFIG_REG__PGFSM_CONFIG_REG__SHIFT 0x0
+#define DC_PGFSM_WRITE_REG__PGFSM_WRITE_REG_MASK 0xffffffff
+#define DC_PGFSM_WRITE_REG__PGFSM_WRITE_REG__SHIFT 0x0
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_BUSY_MASK 0x1
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_BUSY__SHIFT 0x0
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_FORCE_MASK 0x2
+#define DC_PGCNTL_STATUS_REG__SWREQ_RWOP_FORCE__SHIFT 0x1
+#define DC_PGCNTL_STATUS_REG__IPREQ_IGNORE_STATUS_MASK 0x4
+#define DC_PGCNTL_STATUS_REG__IPREQ_IGNORE_STATUS__SHIFT 0x2
+#define DC_PGCNTL_STATUS_REG__DCPG_ECO_DEBUG_MASK 0xffff0000
+#define DC_PGCNTL_STATUS_REG__DCPG_ECO_DEBUG__SHIFT 0x10
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_INDEX_MASK 0xff
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCPG_TEST_DEBUG_INDEX__DCPG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCPG_TEST_DEBUG_DATA__DCPG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCPG_TEST_DEBUG_DATA__DCPG_TEST_DEBUG_DATA__SHIFT 0x0
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL_MASK 0x1ffff
+#define BL1_PWM_AMBIENT_LIGHT_LEVEL__BL1_PWM_AMBIENT_LIGHT_LEVEL__SHIFT 0x0
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL_MASK 0x1ffff
+#define BL1_PWM_USER_LEVEL__BL1_PWM_USER_LEVEL__SHIFT 0x0
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL_MASK 0x1ffff
+#define BL1_PWM_TARGET_ABM_LEVEL__BL1_PWM_TARGET_ABM_LEVEL__SHIFT 0x0
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL_MASK 0x1ffff
+#define BL1_PWM_CURRENT_ABM_LEVEL__BL1_PWM_CURRENT_ABM_LEVEL__SHIFT 0x0
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE_MASK 0x1ffff
+#define BL1_PWM_FINAL_DUTY_CYCLE__BL1_PWM_FINAL_DUTY_CYCLE__SHIFT 0x0
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE_MASK 0x1ffff
+#define BL1_PWM_MINIMUM_DUTY_CYCLE__BL1_PWM_MINIMUM_DUTY_CYCLE__SHIFT 0x0
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN_MASK 0x1
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_ABM_EN__SHIFT 0x0
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN_MASK 0x2
+#define BL1_PWM_ABM_CNTL__BL1_PWM_USE_AMBIENT_LEVEL_EN__SHIFT 0x1
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN_MASK 0x4
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_LEVEL_EN__SHIFT 0x2
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN_MASK 0x8
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_CALC_FINAL_DUTY_CYCLE_EN__SHIFT 0x3
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE_MASK 0xffff0000
+#define BL1_PWM_ABM_CNTL__BL1_PWM_AUTO_UPDATE_CURRENT_ABM_STEP_SIZE__SHIFT 0x10
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN_MASK 0x1
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x2
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT_MASK 0xff00
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0xff0000
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__BL1_PWM_BL_UPDATE_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define BL1_PWM_BL_UPDATE_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK_MASK 0x1
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_LOCK__SHIFT 0x0
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING_MASK 0x100
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_REG_UPDATE_PENDING__SHIFT 0x8
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START_MASK 0x10000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL_MASK 0xe0000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_FRAME_START_DISP_SEL__SHIFT 0x11
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN_MASK 0x1000000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN_MASK 0x80000000
+#define BL1_PWM_GRP2_REG_LOCK__BL1_PWM_GRP2_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define DC_ABM1_CNTL__ABM1_EN_MASK 0x1
+#define DC_ABM1_CNTL__ABM1_EN__SHIFT 0x0
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT_MASK 0x700
+#define DC_ABM1_CNTL__ABM1_SOURCE_SELECT__SHIFT 0x8
+#define DC_ABM1_CNTL__ABM1_BLANK_MODE_SUPPORT_ENABLE_MASK 0x80000000
+#define DC_ABM1_CNTL__ABM1_BLANK_MODE_SUPPORT_ENABLE__SHIFT 0x1f
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B_MASK 0xf
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_B__SHIFT 0x0
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G_MASK 0xf00
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_G__SHIFT 0x8
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R_MASK 0xf0000
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_IPCSC_COEFF_SEL_R__SHIFT 0x10
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_IPCSC_COEFF_SEL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_SLOPE_0__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_OFFSET_0__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_0__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_SLOPE_1__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_OFFSET_1__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_1__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_SLOPE_2__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_OFFSET_2__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_2__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_SLOPE_3__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_OFFSET_3__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_3__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4_MASK 0x7fff
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_SLOPE_4__SHIFT 0x0
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4_MASK 0x7ff0000
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_OFFSET_4__SHIFT 0x10
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_OFFSET_SLOPE_4__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1_MASK 0x3ff
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_1__SHIFT 0x0
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2_MASK 0x3ff0000
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_THRES_2__SHIFT 0x10
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_THRES_12__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3_MASK 0x3ff
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_3__SHIFT 0x0
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4_MASK 0x3ff0000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_THRES_4__SHIFT 0x10
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN_MASK 0x10000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_IGNORE_MASTER_LOCK_EN__SHIFT 0x1c
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN_MASK 0x20000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_READBACK_DB_REG_VALUE_EN__SHIFT 0x1d
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING_MASK 0x40000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_DBUF_REG_UPDATE_PENDING__SHIFT 0x1e
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK_MASK 0x80000000
+#define DC_ABM1_ACE_THRES_34__ABM1_ACE_LOCK__SHIFT 0x1f
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_MASK 0x1
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME__SHIFT 0x0
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR_MASK 0x100
+#define DC_ABM1_ACE_CNTL_MISC__ABM1_ACE_REG_WR_MISSED_FRAME_CLEAR__SHIFT 0x8
+#define DC_ABM1_DEBUG_MISC__ABM1_HG_FORCE_INTERRUPT_MASK 0x1
+#define DC_ABM1_DEBUG_MISC__ABM1_HG_FORCE_INTERRUPT__SHIFT 0x0
+#define DC_ABM1_DEBUG_MISC__ABM1_LS_FORCE_INTERRUPT_MASK 0x100
+#define DC_ABM1_DEBUG_MISC__ABM1_LS_FORCE_INTERRUPT__SHIFT 0x8
+#define DC_ABM1_DEBUG_MISC__ABM1_BL_FORCE_INTERRUPT_MASK 0x10000
+#define DC_ABM1_DEBUG_MISC__ABM1_BL_FORCE_INTERRUPT__SHIFT 0x10
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS_MASK 0x1
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_IN_PROGRESS__SHIFT 0x0
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS_MASK 0x2
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_IN_PROGRESS__SHIFT 0x1
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS_MASK 0x4
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_IN_PROGRESS__SHIFT 0x2
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_MASK 0x100
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME__SHIFT 0x8
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_MASK 0x200
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME__SHIFT 0x9
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_MASK 0x400
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME__SHIFT 0xa
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR_MASK 0x10000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_HG_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x10
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR_MASK 0x1000000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_LS_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x18
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR_MASK 0x80000000
+#define DC_ABM1_HGLS_REG_READ_PROGRESS__ABM1_BL_REG_READ_MISSED_FRAME_CLEAR__SHIFT 0x1f
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL_MASK 0x3
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_NUM_OF_BINS_SEL__SHIFT 0x0
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL_MASK 0x100
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_VMAX_SEL__SHIFT 0x8
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL_MASK 0x1000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_FINE_MODE_BIN_SEL__SHIFT 0xc
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL_MASK 0x30000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HG_BIN_BITWIDTH_SIZE_SEL__SHIFT 0x10
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN_MASK 0x100000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_OVR_SCAN_PIXEL_PROCESS_EN__SHIFT 0x14
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN_MASK 0x800000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_READBACK_DB_REG_VALUE_EN__SHIFT 0x17
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL_MASK 0x7000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_FRAME_START_DISP_SEL__SHIFT 0x18
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START_MASK 0x10000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_AT_FRAME_START__SHIFT 0x1c
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN_MASK 0x20000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_IGNORE_MASTER_LOCK_EN__SHIFT 0x1d
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING_MASK 0x40000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_DBUF_HGLS_REG_UPDATE_PENDING__SHIFT 0x1e
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_HG_MISC_CTRL__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA_MASK 0xffffffff
+#define DC_ABM1_LS_SUM_OF_LUMA__ABM1_LS_SUM_OF_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA_MASK 0x3ff
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MIN_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA_MASK 0x3ff0000
+#define DC_ABM1_LS_MIN_MAX_LUMA__ABM1_LS_MAX_LUMA__SHIFT 0x10
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA_MASK 0x3ff
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MIN_LUMA__SHIFT 0x0
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA_MASK 0x3ff0000
+#define DC_ABM1_LS_FILTERED_MIN_MAX_LUMA__ABM1_LS_FILTERED_MAX_LUMA__SHIFT 0x10
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT_MASK 0xffffff
+#define DC_ABM1_LS_PIXEL_COUNT__ABM1_LS_PIXEL_COUNT__SHIFT 0x0
+#define DC_ABM1_LS_OVR_SCAN_BIN__ABM1_LS_OVR_SCAN_BIN_MASK 0xffffff
+#define DC_ABM1_LS_OVR_SCAN_BIN__ABM1_LS_OVR_SCAN_BIN__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES_MASK 0x3ff
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MIN_PIXEL_VALUE_THRES__SHIFT 0x0
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES_MASK 0x3ff0000
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_LS_MAX_PIXEL_VALUE_THRES__SHIFT 0x10
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT_MASK 0xffffff
+#define DC_ABM1_LS_MIN_PIXEL_VALUE_COUNT__ABM1_LS_MIN_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT_MASK 0xffffff
+#define DC_ABM1_LS_MAX_PIXEL_VALUE_COUNT__ABM1_LS_MAX_PIXEL_VALUE_COUNT__SHIFT 0x0
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN_MASK 0x1
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x2
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT_MASK 0xff00
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0xff0000
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HG_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_HG_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN_MASK 0x1
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_COUNT_EN__SHIFT 0x0
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER_MASK 0x2
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_RESET_SAMPLE_RATE_FRAME_COUNTER__SHIFT 0x1
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT_MASK 0xff00
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_SAMPLE_RATE_FRAME_COUNT__SHIFT 0x8
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET_MASK 0xff0000
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_LS_INITIAL_SAMPLE_RATE_COUNT_VALUE_WHEN_RESET__SHIFT 0x10
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK_MASK 0x80000000
+#define DC_ABM1_LS_SAMPLE_RATE__ABM1_HGLS_REG_LOCK__SHIFT 0x1f
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_1_32_SHIFT_FLAG__ABM1_HG_BIN_1_32_SHIFT_FLAG__SHIFT 0x0
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_1_8_SHIFT_INDEX__ABM1_HG_BIN_1_8_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_9_16_SHIFT_INDEX__ABM1_HG_BIN_9_16_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_17_24_SHIFT_INDEX__ABM1_HG_BIN_17_24_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX_MASK 0xffffffff
+#define DC_ABM1_HG_BIN_25_32_SHIFT_INDEX__ABM1_HG_BIN_25_32_SHIFT_INDEX__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_1__ABM1_HG_RESULT_1__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_2__ABM1_HG_RESULT_2__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_3__ABM1_HG_RESULT_3__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_4__ABM1_HG_RESULT_4__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_5__ABM1_HG_RESULT_5__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_6__ABM1_HG_RESULT_6__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_7__ABM1_HG_RESULT_7__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_8__ABM1_HG_RESULT_8__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_9__ABM1_HG_RESULT_9__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_10__ABM1_HG_RESULT_10__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_11__ABM1_HG_RESULT_11__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_12__ABM1_HG_RESULT_12__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_13__ABM1_HG_RESULT_13__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_14__ABM1_HG_RESULT_14__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_15__ABM1_HG_RESULT_15__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_16__ABM1_HG_RESULT_16__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_17__ABM1_HG_RESULT_17__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_18__ABM1_HG_RESULT_18__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_19__ABM1_HG_RESULT_19__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_20__ABM1_HG_RESULT_20__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_21__ABM1_HG_RESULT_21__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_22__ABM1_HG_RESULT_22__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_23__ABM1_HG_RESULT_23__SHIFT 0x0
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24_MASK 0xffffffff
+#define DC_ABM1_HG_RESULT_24__ABM1_HG_RESULT_24__SHIFT 0x0
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_R_PIXEL_VALUE_MASK 0x3ff
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_R_PIXEL_VALUE__SHIFT 0x0
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_G_PIXEL_VALUE_MASK 0xffc00
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_G_PIXEL_VALUE__SHIFT 0xa
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_B_PIXEL_VALUE_MASK 0x3ff00000
+#define DC_ABM1_OVERSCAN_PIXEL_VALUE__ABM1_OVERSCAN_B_PIXEL_VALUE__SHIFT 0x14
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK_MASK 0x80000000
+#define DC_ABM1_BL_MASTER_LOCK__ABM1_BL_MASTER_LOCK__SHIFT 0x1f
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_INDEX_MASK 0xff
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define ABM_TEST_DEBUG_INDEX__ABM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define ABM_TEST_DEBUG_DATA__ABM_TEST_DEBUG_DATA_MASK 0xffffffff
+#define ABM_TEST_DEBUG_DATA__ABM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_MASK 0x3ff
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM__SHIFT 0x0
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_DIS_MASK 0x10000
+#define CRTC_H_BLANK_EARLY_NUM__CRTC_H_BLANK_EARLY_NUM_DIS__SHIFT 0x10
+#define CRTC_H_TOTAL__CRTC_H_TOTAL_MASK 0x3fff
+#define CRTC_H_TOTAL__CRTC_H_TOTAL__SHIFT 0x0
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_START_MASK 0x3fff
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_START__SHIFT 0x0
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_END_MASK 0x3fff0000
+#define CRTC_H_BLANK_START_END__CRTC_H_BLANK_END__SHIFT 0x10
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_START_MASK 0x3fff
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_START__SHIFT 0x0
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_END_MASK 0x3fff0000
+#define CRTC_H_SYNC_A__CRTC_H_SYNC_A_END__SHIFT 0x10
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_POL_MASK 0x1
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_POL__SHIFT 0x0
+#define CRTC_H_SYNC_A_CNTL__CRTC_COMP_SYNC_A_EN_MASK 0x10000
+#define CRTC_H_SYNC_A_CNTL__CRTC_COMP_SYNC_A_EN__SHIFT 0x10
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_CUTOFF_MASK 0x20000
+#define CRTC_H_SYNC_A_CNTL__CRTC_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_START_MASK 0x3fff
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_START__SHIFT 0x0
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_END_MASK 0x3fff0000
+#define CRTC_H_SYNC_B__CRTC_H_SYNC_B_END__SHIFT 0x10
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_POL_MASK 0x1
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_POL__SHIFT 0x0
+#define CRTC_H_SYNC_B_CNTL__CRTC_COMP_SYNC_B_EN_MASK 0x10000
+#define CRTC_H_SYNC_B_CNTL__CRTC_COMP_SYNC_B_EN__SHIFT 0x10
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_CUTOFF_MASK 0x20000
+#define CRTC_H_SYNC_B_CNTL__CRTC_H_SYNC_B_CUTOFF__SHIFT 0x11
+#define CRTC_VBI_END__CRTC_VBI_V_END_MASK 0x3fff
+#define CRTC_VBI_END__CRTC_VBI_V_END__SHIFT 0x0
+#define CRTC_VBI_END__CRTC_VBI_H_END_MASK 0x3fff0000
+#define CRTC_VBI_END__CRTC_VBI_H_END__SHIFT 0x10
+#define CRTC_V_TOTAL__CRTC_V_TOTAL_MASK 0x3fff
+#define CRTC_V_TOTAL__CRTC_V_TOTAL__SHIFT 0x0
+#define CRTC_V_TOTAL_MIN__CRTC_V_TOTAL_MIN_MASK 0x3fff
+#define CRTC_V_TOTAL_MIN__CRTC_V_TOTAL_MIN__SHIFT 0x0
+#define CRTC_V_TOTAL_MAX__CRTC_V_TOTAL_MAX_MASK 0x3fff
+#define CRTC_V_TOTAL_MAX__CRTC_V_TOTAL_MAX__SHIFT 0x0
+#define CRTC_V_TOTAL_MAX__CRTC_ALLOW_VBLANK_EXTENSION_FOR_MC_TRAINING_MASK 0x10000
+#define CRTC_V_TOTAL_MAX__CRTC_ALLOW_VBLANK_EXTENSION_FOR_MC_TRAINING__SHIFT 0x10
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL_MASK 0x1
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL_MASK 0x10
+#define CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL__SHIFT 0x4
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT_MASK 0x100
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT__SHIFT 0x8
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC_MASK 0x1000
+#define CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC__SHIFT 0xc
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_EN_MASK 0x8000
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0xf
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_MASK 0xffff0000
+#define CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x1
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x0
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x10
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x4
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x100
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x8
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x1000
+#define CRTC_V_TOTAL_INT_STATUS__CRTC_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0xc
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_MASK 0x1
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM__SHIFT 0x0
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_INT_CLEAR_MASK 0x10
+#define CRTC_VSYNC_NOM_INT_STATUS__CRTC_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK 0x3fff
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_START__SHIFT 0x0
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_END_MASK 0x3fff0000
+#define CRTC_V_BLANK_START_END__CRTC_V_BLANK_END__SHIFT 0x10
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_START_MASK 0x3fff
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_START__SHIFT 0x0
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_END_MASK 0x3fff0000
+#define CRTC_V_SYNC_A__CRTC_V_SYNC_A_END__SHIFT 0x10
+#define CRTC_V_SYNC_A_CNTL__CRTC_V_SYNC_A_POL_MASK 0x1
+#define CRTC_V_SYNC_A_CNTL__CRTC_V_SYNC_A_POL__SHIFT 0x0
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_START_MASK 0x3fff
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_START__SHIFT 0x0
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_END_MASK 0x3fff0000
+#define CRTC_V_SYNC_B__CRTC_V_SYNC_B_END__SHIFT 0x10
+#define CRTC_V_SYNC_B_CNTL__CRTC_V_SYNC_B_POL_MASK 0x1
+#define CRTC_V_SYNC_B_CNTL__CRTC_V_SYNC_B_POL__SHIFT 0x0
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CRTC_EN_MASK 0x1
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CRTC_EN__SHIFT 0x0
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CLK_DIV_MASK 0x1e
+#define CRTC_DTMTEST_CNTL__CRTC_DTMTEST_CLK_DIV__SHIFT 0x1
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_VERT_COUNT_MASK 0x3fff
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_VERT_COUNT__SHIFT 0x0
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_HORZ_COUNT_MASK 0x3fff0000
+#define CRTC_DTMTEST_STATUS_POSITION__CRTC_DTMTEST_HORZ_COUNT__SHIFT 0x10
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_SOURCE_SELECT_MASK 0x1f
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_SELECT_MASK 0xe0
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_SELECT__SHIFT 0x5
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RESYNC_BYPASS_EN_MASK 0x100
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RESYNC_BYPASS_EN__SHIFT 0x8
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_INPUT_STATUS_MASK 0x200
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_INPUT_STATUS__SHIFT 0x9
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_STATUS_MASK 0x400
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_POLARITY_STATUS__SHIFT 0xa
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_OCCURRED_MASK 0x800
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_OCCURRED__SHIFT 0xb
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x3000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0xc
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x30000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FREQUENCY_SELECT_MASK 0x300000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_DELAY_MASK 0x1f000000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_DELAY__SHIFT 0x18
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_CLEAR_MASK 0x80000000
+#define CRTC_TRIGA_CNTL__CRTC_TRIGA_CLEAR__SHIFT 0x1f
+#define CRTC_TRIGA_MANUAL_TRIG__CRTC_TRIGA_MANUAL_TRIG_MASK 0x1
+#define CRTC_TRIGA_MANUAL_TRIG__CRTC_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT_MASK 0x1f
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT_MASK 0xe0
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT__SHIFT 0x5
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RESYNC_BYPASS_EN_MASK 0x100
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RESYNC_BYPASS_EN__SHIFT 0x8
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_INPUT_STATUS_MASK 0x200
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_INPUT_STATUS__SHIFT 0x9
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_STATUS_MASK 0x400
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_STATUS__SHIFT 0xa
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_OCCURRED_MASK 0x800
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_OCCURRED__SHIFT 0xb
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x3000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0xc
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x30000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT_MASK 0x300000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY_MASK 0x1f000000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY__SHIFT 0x18
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR_MASK 0x80000000
+#define CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR__SHIFT 0x1f
+#define CRTC_TRIGB_MANUAL_TRIG__CRTC_TRIGB_MANUAL_TRIG_MASK 0x1
+#define CRTC_TRIGB_MANUAL_TRIG__CRTC_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_MODE_MASK 0x3
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CHECK_MASK 0x10
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x100
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_OCCURRED_MASK 0x10000
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CLEAR_MASK 0x1000000
+#define CRTC_FORCE_COUNT_NOW_CNTL__CRTC_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_SOURCE_SELECT_MASK 0x1f
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_SOURCE_SELECT__SHIFT 0x0
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_POLARITY_MASK 0x100
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_POLARITY__SHIFT 0x8
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_GRANULARITY_MASK 0x10000
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_GRANULARITY__SHIFT 0x10
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_INPUT_STATUS_MASK 0x1000000
+#define CRTC_FLOW_CONTROL__CRTC_FLOW_CONTROL_INPUT_STATUS__SHIFT 0x18
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_STEREO_FORCE_NEXT_EYE_MASK 0x3
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_AVSYNC_FRAME_COUNTER_MASK 0xff00
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_AVSYNC_LINE_COUNTER_MASK 0x1fff0000
+#define CRTC_STEREO_FORCE_NEXT_EYE__CRTC_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define CRTC_AVSYNC_COUNTER__CRTC_AVSYNC_COUNTER_MASK 0xffffffff
+#define CRTC_AVSYNC_COUNTER__CRTC_AVSYNC_COUNTER__SHIFT 0x0
+#define CRTC_CONTROL__CRTC_MASTER_EN_MASK 0x1
+#define CRTC_CONTROL__CRTC_MASTER_EN__SHIFT 0x0
+#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL_MASK 0x10
+#define CRTC_CONTROL__CRTC_SYNC_RESET_SEL__SHIFT 0x4
+#define CRTC_CONTROL__CRTC_DISABLE_POINT_CNTL_MASK 0x300
+#define CRTC_CONTROL__CRTC_DISABLE_POINT_CNTL__SHIFT 0x8
+#define CRTC_CONTROL__CRTC_START_POINT_CNTL_MASK 0x1000
+#define CRTC_CONTROL__CRTC_START_POINT_CNTL__SHIFT 0xc
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_CNTL_MASK 0x2000
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_POLARITY_MASK 0x4000
+#define CRTC_CONTROL__CRTC_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define CRTC_CONTROL__CRTC_CURRENT_MASTER_EN_STATE_MASK 0x10000
+#define CRTC_CONTROL__CRTC_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define CRTC_CONTROL__CRTC_HBLANK_EARLY_CONTROL_MASK 0x700000
+#define CRTC_CONTROL__CRTC_HBLANK_EARLY_CONTROL__SHIFT 0x14
+#define CRTC_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE_MASK 0x1000000
+#define CRTC_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define CRTC_CONTROL__CRTC_SOF_PULL_EN_MASK 0x20000000
+#define CRTC_CONTROL__CRTC_SOF_PULL_EN__SHIFT 0x1d
+#define CRTC_CONTROL__CRTC_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000
+#define CRTC_CONTROL__CRTC_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define CRTC_CONTROL__CRTC_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000
+#define CRTC_CONTROL__CRTC_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define CRTC_BLANK_CONTROL__CRTC_CURRENT_BLANK_STATE_MASK 0x1
+#define CRTC_BLANK_CONTROL__CRTC_CURRENT_BLANK_STATE__SHIFT 0x0
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK 0x100
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN__SHIFT 0x8
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DE_MODE_MASK 0x10000
+#define CRTC_BLANK_CONTROL__CRTC_BLANK_DE_MODE__SHIFT 0x10
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_ENABLE_MASK 0x1
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_ENABLE__SHIFT 0x0
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_FORCE_NEXT_FIELD_MASK 0x30000
+#define CRTC_INTERLACE_CONTROL__CRTC_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_CURRENT_FIELD_MASK 0x1
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_NEXT_FIELD_MASK 0x2
+#define CRTC_INTERLACE_STATUS__CRTC_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define CRTC_FIELD_INDICATION_CONTROL__CRTC_FIELD_INDICATION_OUTPUT_POLARITY_MASK 0x1
+#define CRTC_FIELD_INDICATION_CONTROL__CRTC_FIELD_INDICATION_OUTPUT_POLARITY__SHIFT 0x0
+#define CRTC_FIELD_INDICATION_CONTROL__CRTC_FIELD_ALIGNMENT_MASK 0x2
+#define CRTC_FIELD_INDICATION_CONTROL__CRTC_FIELD_ALIGNMENT__SHIFT 0x1
+#define CRTC_PIXEL_DATA_READBACK0__CRTC_PIXEL_DATA_BLUE_CB_MASK 0xfff
+#define CRTC_PIXEL_DATA_READBACK0__CRTC_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define CRTC_PIXEL_DATA_READBACK0__CRTC_PIXEL_DATA_GREEN_Y_MASK 0xfff0000
+#define CRTC_PIXEL_DATA_READBACK0__CRTC_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define CRTC_PIXEL_DATA_READBACK1__CRTC_PIXEL_DATA_RED_CR_MASK 0xfff
+#define CRTC_PIXEL_DATA_READBACK1__CRTC_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define CRTC_STATUS__CRTC_V_BLANK_MASK 0x1
+#define CRTC_STATUS__CRTC_V_BLANK__SHIFT 0x0
+#define CRTC_STATUS__CRTC_V_ACTIVE_DISP_MASK 0x2
+#define CRTC_STATUS__CRTC_V_ACTIVE_DISP__SHIFT 0x1
+#define CRTC_STATUS__CRTC_V_SYNC_A_MASK 0x4
+#define CRTC_STATUS__CRTC_V_SYNC_A__SHIFT 0x2
+#define CRTC_STATUS__CRTC_V_UPDATE_MASK 0x8
+#define CRTC_STATUS__CRTC_V_UPDATE__SHIFT 0x3
+#define CRTC_STATUS__CRTC_V_START_LINE_MASK 0x10
+#define CRTC_STATUS__CRTC_V_START_LINE__SHIFT 0x4
+#define CRTC_STATUS__CRTC_V_BLANK_3D_STRUCTURE_MASK 0x20
+#define CRTC_STATUS__CRTC_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define CRTC_STATUS__CRTC_H_BLANK_MASK 0x10000
+#define CRTC_STATUS__CRTC_H_BLANK__SHIFT 0x10
+#define CRTC_STATUS__CRTC_H_ACTIVE_DISP_MASK 0x20000
+#define CRTC_STATUS__CRTC_H_ACTIVE_DISP__SHIFT 0x11
+#define CRTC_STATUS__CRTC_H_SYNC_A_MASK 0x40000
+#define CRTC_STATUS__CRTC_H_SYNC_A__SHIFT 0x12
+#define CRTC_STATUS_POSITION__CRTC_VERT_COUNT_MASK 0x3fff
+#define CRTC_STATUS_POSITION__CRTC_VERT_COUNT__SHIFT 0x0
+#define CRTC_STATUS_POSITION__CRTC_HORZ_COUNT_MASK 0x3fff0000
+#define CRTC_STATUS_POSITION__CRTC_HORZ_COUNT__SHIFT 0x10
+#define CRTC_NOM_VERT_POSITION__CRTC_VERT_COUNT_NOM_MASK 0x3fff
+#define CRTC_NOM_VERT_POSITION__CRTC_VERT_COUNT_NOM__SHIFT 0x0
+#define CRTC_STATUS_FRAME_COUNT__CRTC_FRAME_COUNT_MASK 0xffffff
+#define CRTC_STATUS_FRAME_COUNT__CRTC_FRAME_COUNT__SHIFT 0x0
+#define CRTC_STATUS_VF_COUNT__CRTC_VF_COUNT_MASK 0x3fffffff
+#define CRTC_STATUS_VF_COUNT__CRTC_VF_COUNT__SHIFT 0x0
+#define CRTC_STATUS_HV_COUNT__CRTC_HV_COUNT_MASK 0x3fffffff
+#define CRTC_STATUS_HV_COUNT__CRTC_HV_COUNT__SHIFT 0x0
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN_MASK 0x1
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT_MASK 0x1e
+#define CRTC_COUNT_CONTROL__CRTC_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define CRTC_COUNT_RESET__CRTC_RESET_FRAME_COUNT_MASK 0x1
+#define CRTC_COUNT_RESET__CRTC_RESET_FRAME_COUNT__SHIFT 0x0
+#define CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x1
+#define CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__CRTC_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x1
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x100
+#define CRTC_VERT_SYNC_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define CRTC_VERT_SYNC_CONTROL__CRTC_AUTO_FORCE_VSYNC_MODE_MASK 0x30000
+#define CRTC_VERT_SYNC_CONTROL__CRTC_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define CRTC_STEREO_STATUS__CRTC_STEREO_CURRENT_EYE_MASK 0x1
+#define CRTC_STEREO_STATUS__CRTC_STEREO_CURRENT_EYE__SHIFT 0x0
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_OUTPUT_MASK 0x100
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_SELECT_MASK 0x10000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_SYNC_SELECT__SHIFT 0x10
+#define CRTC_STEREO_STATUS__CRTC_STEREO_EYE_FLAG_MASK 0x100000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_EYE_FLAG__SHIFT 0x14
+#define CRTC_STEREO_STATUS__CRTC_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x3000000
+#define CRTC_STEREO_STATUS__CRTC_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x3fff
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x8000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_SELECT_POLARITY_MASK 0x10000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_SYNC_SELECT_POLARITY__SHIFT 0x10
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EYE_FLAG_POLARITY_MASK 0x20000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x40000
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_FIELD_NUM_MASK 0x80000
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_FIELD_NUM__SHIFT 0x13
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x100000
+#define CRTC_STEREO_CONTROL__CRTC_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EN_MASK 0x1000000
+#define CRTC_STEREO_CONTROL__CRTC_STEREO_EN__SHIFT 0x18
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_OCCURRED_MASK 0x1
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_CLEAR_MASK 0x2
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_CLEAR__SHIFT 0x1
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_MANUAL_TRIGGER_MASK 0x4
+#define CRTC_SNAPSHOT_STATUS__CRTC_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define CRTC_SNAPSHOT_CONTROL__CRTC_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x3
+#define CRTC_SNAPSHOT_CONTROL__CRTC_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_VERT_COUNT_MASK 0x3fff
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_HORZ_COUNT_MASK 0x3fff0000
+#define CRTC_SNAPSHOT_POSITION__CRTC_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define CRTC_SNAPSHOT_FRAME__CRTC_SNAPSHOT_FRAME_COUNT_MASK 0xffffff
+#define CRTC_SNAPSHOT_FRAME__CRTC_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define CRTC_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY_MASK 0x1
+#define CRTC_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY__SHIFT 0x0
+#define CRTC_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY_MASK 0x2
+#define CRTC_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY__SHIFT 0x1
+#define CRTC_START_LINE_CONTROL__CRTC_PREFETCH_EN_MASK 0x4
+#define CRTC_START_LINE_CONTROL__CRTC_PREFETCH_EN__SHIFT 0x2
+#define CRTC_START_LINE_CONTROL__CRTC_LEGACY_REQUESTOR_EN_MASK 0x100
+#define CRTC_START_LINE_CONTROL__CRTC_LEGACY_REQUESTOR_EN__SHIFT 0x8
+#define CRTC_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION_MASK 0xff000
+#define CRTC_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION__SHIFT 0xc
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_MSK_MASK 0x1
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_TYPE_MASK 0x2
+#define CRTC_INTERRUPT_CONTROL__CRTC_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK 0x10
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK__SHIFT 0x4
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_TYPE_MASK 0x20
+#define CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_TYPE__SHIFT 0x5
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_MSK_MASK 0x100
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_TYPE_MASK 0x200
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x10000
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x20000
+#define CRTC_INTERRUPT_CONTROL__CRTC_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_MSK_MASK 0x1000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_MSK__SHIFT 0x18
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_MSK_MASK 0x2000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_MSK__SHIFT 0x19
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_TYPE_MASK 0x4000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGA_INT_TYPE__SHIFT 0x1a
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_TYPE_MASK 0x8000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_TRIGB_INT_TYPE__SHIFT 0x1b
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_MSK_MASK 0x10000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_TYPE_MASK 0x20000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000
+#define CRTC_INTERRUPT_CONTROL__CRTC_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define CRTC_UPDATE_LOCK__CRTC_UPDATE_LOCK_MASK 0x1
+#define CRTC_UPDATE_LOCK__CRTC_UPDATE_LOCK__SHIFT 0x0
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_PENDING_MASK 0x1
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_PENDING__SHIFT 0x0
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_INSTANTLY_MASK 0x100
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_UPDATE_INSTANTLY__SHIFT 0x8
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x10000
+#define CRTC_DOUBLE_BUFFER_CONTROL__CRTC_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define CRTC_VGA_PARAMETER_CAPTURE_MODE__CRTC_VGA_PARAMETER_CAPTURE_MODE_MASK 0x1
+#define CRTC_VGA_PARAMETER_CAPTURE_MODE__CRTC_VGA_PARAMETER_CAPTURE_MODE__SHIFT 0x0
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_EN_MASK 0x1
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_EN__SHIFT 0x0
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_MODE_MASK 0x700
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_MODE__SHIFT 0x8
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_DYNAMIC_RANGE_MASK 0x10000
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_DYNAMIC_RANGE__SHIFT 0x10
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_COLOR_FORMAT_MASK 0xff000000
+#define CRTC_TEST_PATTERN_CONTROL__CRTC_TEST_PATTERN_COLOR_FORMAT__SHIFT 0x18
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC0_MASK 0xf
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC0__SHIFT 0x0
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC1_MASK 0xf0
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_INC1__SHIFT 0x4
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_VRES_MASK 0xf00
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_VRES__SHIFT 0x8
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_HRES_MASK 0xf000
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_HRES__SHIFT 0xc
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_RAMP0_OFFSET_MASK 0xffff0000
+#define CRTC_TEST_PATTERN_PARAMETERS__CRTC_TEST_PATTERN_RAMP0_OFFSET__SHIFT 0x10
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_DATA_MASK 0xffff
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_DATA__SHIFT 0x0
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_MASK_MASK 0x3f0000
+#define CRTC_TEST_PATTERN_COLOR__CRTC_TEST_PATTERN_MASK__SHIFT 0x10
+#define CRTC_MASTER_UPDATE_LOCK__MASTER_UPDATE_LOCK_MASK 0x1
+#define CRTC_MASTER_UPDATE_LOCK__MASTER_UPDATE_LOCK__SHIFT 0x0
+#define CRTC_MASTER_UPDATE_LOCK__GSL_CONTROL_MASTER_UPDATE_LOCK_MASK 0x100
+#define CRTC_MASTER_UPDATE_LOCK__GSL_CONTROL_MASTER_UPDATE_LOCK__SHIFT 0x8
+#define CRTC_MASTER_UPDATE_LOCK__UNDERFLOW_UPDATE_LOCK_MASK 0x10000
+#define CRTC_MASTER_UPDATE_LOCK__UNDERFLOW_UPDATE_LOCK__SHIFT 0x10
+#define CRTC_MASTER_UPDATE_MODE__MASTER_UPDATE_MODE_MASK 0x7
+#define CRTC_MASTER_UPDATE_MODE__MASTER_UPDATE_MODE__SHIFT 0x0
+#define CRTC_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x30000
+#define CRTC_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x10
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_OUT_MODE_MASK 0x3
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_OUT_MODE__SHIFT 0x0
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_MASK 0xffffff00
+#define CRTC_MVP_INBAND_CNTL_INSERT__CRTC_MVP_INBAND_CNTL_CHAR_INSERT__SHIFT 0x8
+#define CRTC_MVP_INBAND_CNTL_INSERT_TIMER__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_TIMER_MASK 0xff
+#define CRTC_MVP_INBAND_CNTL_INSERT_TIMER__CRTC_MVP_INBAND_CNTL_CHAR_INSERT_TIMER__SHIFT 0x0
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_OCCURRED_MASK 0x1
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_OCCURRED__SHIFT 0x0
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_OCCURRED_MASK 0x10
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_OCCURRED__SHIFT 0x4
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_CLEAR_MASK 0x10000
+#define CRTC_MVP_STATUS__CRTC_FLIP_NOW_CLEAR__SHIFT 0x10
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR_MASK 0x100000
+#define CRTC_MVP_STATUS__CRTC_AFR_HSYNC_SWITCH_DONE_CLEAR__SHIFT 0x14
+#define CRTC_MASTER_EN__CRTC_MASTER_EN_MASK 0x1
+#define CRTC_MASTER_EN__CRTC_MASTER_EN__SHIFT 0x0
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_ALLOW_STOP_OFF_V_CNT_MASK 0xff
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_ALLOW_STOP_OFF_V_CNT__SHIFT 0x0
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_DISABLE_ALLOW_STOP_OFF_V_CNT_MASK 0x10000
+#define CRTC_ALLOW_STOP_OFF_V_CNT__CRTC_DISABLE_ALLOW_STOP_OFF_V_CNT__SHIFT 0x10
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_OCCURRED_MASK 0x1
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_OCCURRED__SHIFT 0x0
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK 0x100
+#define CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR__SHIFT 0x8
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE_MASK 0x3ff
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE__SHIFT 0x0
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN_MASK 0xffc00
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN__SHIFT 0xa
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED_MASK 0x3ff00000
+#define CRTC_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED__SHIFT 0x14
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_BLUE_EXT_MASK 0x3
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_BLUE_EXT__SHIFT 0x0
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_GREEN_EXT_MASK 0x300
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_GREEN_EXT__SHIFT 0x8
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_RED_EXT_MASK 0x30000
+#define CRTC_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_RED_EXT__SHIFT 0x10
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_BLUE_CB_MASK 0x3ff
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_GREEN_Y_MASK 0xffc00
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_RED_CR_MASK 0x3ff00000
+#define CRTC_BLANK_DATA_COLOR__CRTC_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x3
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x300
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x30000
+#define CRTC_BLANK_DATA_COLOR_EXT__CRTC_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB_MASK 0x3ff
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB__SHIFT 0x0
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y_MASK 0xffc00
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y__SHIFT 0xa
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR_MASK 0x3ff00000
+#define CRTC_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR__SHIFT 0x14
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_B_CB_EXT_MASK 0x3
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_G_Y_EXT_MASK 0x300
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_R_CR_EXT_MASK 0x30000
+#define CRTC_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT0_POSITION__CRTC_VERTICAL_INTERRUPT0_LINE_START_MASK 0x3fff
+#define CRTC_VERTICAL_INTERRUPT0_POSITION__CRTC_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define CRTC_VERTICAL_INTERRUPT0_POSITION__CRTC_VERTICAL_INTERRUPT0_LINE_END_MASK 0x3fff0000
+#define CRTC_VERTICAL_INTERRUPT0_POSITION__CRTC_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x10
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x100
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_STATUS_MASK 0x1000
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x10000
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK 0x100000
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x1000000
+#define CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define CRTC_VERTICAL_INTERRUPT1_POSITION__CRTC_VERTICAL_INTERRUPT1_LINE_START_MASK 0x3fff
+#define CRTC_VERTICAL_INTERRUPT1_POSITION__CRTC_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x100
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_STATUS_MASK 0x1000
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x10000
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_CLEAR_MASK 0x100000
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x1000000
+#define CRTC_VERTICAL_INTERRUPT1_CONTROL__CRTC_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define CRTC_VERTICAL_INTERRUPT2_POSITION__CRTC_VERTICAL_INTERRUPT2_LINE_START_MASK 0x3fff
+#define CRTC_VERTICAL_INTERRUPT2_POSITION__CRTC_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x100
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_STATUS_MASK 0x1000
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x10000
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_CLEAR_MASK 0x100000
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x1000000
+#define CRTC_VERTICAL_INTERRUPT2_CONTROL__CRTC_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define CRTC_CRC_CNTL__CRTC_CRC_EN_MASK 0x1
+#define CRTC_CRC_CNTL__CRTC_CRC_EN__SHIFT 0x0
+#define CRTC_CRC_CNTL__CRTC_CRC_CONT_EN_MASK 0x10
+#define CRTC_CRC_CNTL__CRTC_CRC_CONT_EN__SHIFT 0x4
+#define CRTC_CRC_CNTL__CRTC_CRC_STEREO_MODE_MASK 0x300
+#define CRTC_CRC_CNTL__CRTC_CRC_STEREO_MODE__SHIFT 0x8
+#define CRTC_CRC_CNTL__CRTC_CRC_INTERLACE_MODE_MASK 0x3000
+#define CRTC_CRC_CNTL__CRTC_CRC_INTERLACE_MODE__SHIFT 0xc
+#define CRTC_CRC_CNTL__CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x10000
+#define CRTC_CRC_CNTL__CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x10
+#define CRTC_CRC_CNTL__CRTC_CRC0_SELECT_MASK 0x700000
+#define CRTC_CRC_CNTL__CRTC_CRC0_SELECT__SHIFT 0x14
+#define CRTC_CRC_CNTL__CRTC_CRC1_SELECT_MASK 0x7000000
+#define CRTC_CRC_CNTL__CRTC_CRC1_SELECT__SHIFT 0x18
+#define CRTC_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_START_MASK 0x3fff
+#define CRTC_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define CRTC_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_END_MASK 0x3fff0000
+#define CRTC_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define CRTC_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_START_MASK 0x3fff
+#define CRTC_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define CRTC_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_END_MASK 0x3fff0000
+#define CRTC_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define CRTC_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_START_MASK 0x3fff
+#define CRTC_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define CRTC_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_END_MASK 0x3fff0000
+#define CRTC_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define CRTC_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_START_MASK 0x3fff
+#define CRTC_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define CRTC_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_END_MASK 0x3fff0000
+#define CRTC_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define CRTC_CRC0_DATA_RG__CRC0_R_CR_MASK 0xffff
+#define CRTC_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define CRTC_CRC0_DATA_RG__CRC0_G_Y_MASK 0xffff0000
+#define CRTC_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define CRTC_CRC0_DATA_B__CRC0_B_CB_MASK 0xffff
+#define CRTC_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define CRTC_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_START_MASK 0x3fff
+#define CRTC_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define CRTC_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_END_MASK 0x3fff0000
+#define CRTC_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define CRTC_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_START_MASK 0x3fff
+#define CRTC_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define CRTC_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_END_MASK 0x3fff0000
+#define CRTC_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define CRTC_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_START_MASK 0x3fff
+#define CRTC_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define CRTC_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_END_MASK 0x3fff0000
+#define CRTC_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define CRTC_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_START_MASK 0x3fff
+#define CRTC_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define CRTC_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_END_MASK 0x3fff0000
+#define CRTC_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define CRTC_CRC1_DATA_RG__CRC1_R_CR_MASK 0xffff
+#define CRTC_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define CRTC_CRC1_DATA_RG__CRC1_G_Y_MASK 0xffff0000
+#define CRTC_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define CRTC_CRC1_DATA_B__CRC1_B_CB_MASK 0xffff
+#define CRTC_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_ENABLE_MASK 0x3
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_ENABLE__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE_MASK 0x8
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_HCOUNT_MODE_ENABLE__SHIFT 0x3
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE_MASK 0x10
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_ENABLE__SHIFT 0x4
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW_MASK 0x60
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_JITTER_FILTERING_WINDOW__SHIFT 0x5
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE_MASK 0x100
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_WINDOW_ENABLE__SHIFT 0x8
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE_MASK 0x200
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_WINDOW_UPDATE__SHIFT 0x9
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY_MASK 0x1000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_VSYNC_POLARITY__SHIFT 0xc
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY_MASK 0x2000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_HSYNC_POLARITY__SHIFT 0xd
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_INTERLACE_MODE_MASK 0x4000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_INTERLACE_MODE__SHIFT 0xe
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_MASTER_FRAME_RATE_MASK 0x7000000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_MASTER_FRAME_RATE__SHIFT 0x18
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_SLAVE_FRAME_RATE_MASK 0x70000000
+#define CRTC_EXT_TIMING_SYNC_CONTROL__CRTC_EXT_TIMING_SYNC_SLAVE_FRAME_RATE__SHIFT 0x1c
+#define CRTC_EXT_TIMING_SYNC_WINDOW_START__CRTC_EXT_TIMING_SYNC_WINDOW_START_X_MASK 0x3fff
+#define CRTC_EXT_TIMING_SYNC_WINDOW_START__CRTC_EXT_TIMING_SYNC_WINDOW_START_X__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_WINDOW_START__CRTC_EXT_TIMING_SYNC_WINDOW_START_Y_MASK 0x3fff0000
+#define CRTC_EXT_TIMING_SYNC_WINDOW_START__CRTC_EXT_TIMING_SYNC_WINDOW_START_Y__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_WINDOW_END__CRTC_EXT_TIMING_SYNC_WINDOW_END_X_MASK 0x3fff
+#define CRTC_EXT_TIMING_SYNC_WINDOW_END__CRTC_EXT_TIMING_SYNC_WINDOW_END_X__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_WINDOW_END__CRTC_EXT_TIMING_SYNC_WINDOW_END_Y_MASK 0x3fff0000
+#define CRTC_EXT_TIMING_SYNC_WINDOW_END__CRTC_EXT_TIMING_SYNC_WINDOW_END_Y__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE_MASK 0x1
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_ENABLE__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_STATUS_MASK 0x10
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_STATUS__SHIFT 0x4
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_STATUS_MASK 0x100
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_STATUS__SHIFT 0x8
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_CLEAR_MASK 0x10000
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_CLEAR__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE_MASK 0x100000
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_INT_TYPE__SHIFT 0x14
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT_MASK 0xe0000000
+#define CRTC_EXT_TIMING_SYNC_LOSS_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_LOSS_FRAME_COUNT__SHIFT 0x1d
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_ENABLE_MASK 0x1
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_ENABLE__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_STATUS_MASK 0x10
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_STATUS__SHIFT 0x4
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_STATUS_MASK 0x100
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_STATUS__SHIFT 0x8
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_CLEAR_MASK 0x10000
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_CLEAR__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_TYPE_MASK 0x100000
+#define CRTC_EXT_TIMING_SYNC_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_INT_TYPE__SHIFT 0x14
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE_MASK 0x1
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_ENABLE__SHIFT 0x0
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_STATUS_MASK 0x10
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_STATUS__SHIFT 0x4
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_STATUS_MASK 0x100
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_STATUS__SHIFT 0x8
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR_MASK 0x10000
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_CLEAR__SHIFT 0x10
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE_MASK 0x100000
+#define CRTC_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_CONTROL__CRTC_EXT_TIMING_SYNC_SIGNAL_INT_TYPE__SHIFT 0x14
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_EVENT_MASK_MASK 0xffff
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_FRAME_COUNT_MASK 0xff0000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_ENABLE_MASK 0x1000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_SS_STATUS_MASK 0x2000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_SS_STATUS__SHIFT 0x19
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_STATUS_MASK 0x4000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_CLEAR_MASK 0x8000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_TYPE_MASK 0x10000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_OVERRIDE_MASK 0x40000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000
+#define CRTC_STATIC_SCREEN_CONTROL__CRTC_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_MASK 0x1
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN__SHIFT 0x0
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_DB_MASK 0x10
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_EN_DB__SHIFT 0x4
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x300
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x1000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_MASK 0x10000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x20000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT_MASK 0xc0000
+#define CRTC_3D_STRUCTURE_CONTROL__CRTC_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_LIMIT_MASK 0xff
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_DELAY_MASK 0xff00
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x10000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MODE_MASK 0x60000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_CLEAR_MASK 0x80000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_OCCURRED_MASK 0x100000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x800000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP_MASK 0xff000000
+#define CRTC_GSL_VSYNC_GAP__CRTC_GSL_VSYNC_GAP__SHIFT 0x18
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_START_MASK 0x3fff
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_START__SHIFT 0x0
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_END_MASK 0x3fff0000
+#define CRTC_GSL_WINDOW__CRTC_GSL_WINDOW_END__SHIFT 0x10
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_LINE_NUM_MASK 0x3fff
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_LINE_NUM__SHIFT 0x0
+#define CRTC_GSL_CONTROL__CRTC_GSL_FORCE_DELAY_MASK 0x1f0000
+#define CRTC_GSL_CONTROL__CRTC_GSL_FORCE_DELAY__SHIFT 0x10
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_ALL_FIELDS_MASK 0x10000000
+#define CRTC_GSL_CONTROL__CRTC_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX_MASK 0xff
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define CRTC_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CRTC_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA_MASK 0xffffffff
+#define CRTC_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA__SHIFT 0x0
+#define DAC_ENABLE__DAC_ENABLE_MASK 0x1
+#define DAC_ENABLE__DAC_ENABLE__SHIFT 0x0
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ENABLE_MASK 0x2
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ENABLE__SHIFT 0x1
+#define DAC_ENABLE__DAC_RESYNC_FIFO_POINTER_SKEW_MASK 0xc
+#define DAC_ENABLE__DAC_RESYNC_FIFO_POINTER_SKEW__SHIFT 0x2
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_MASK 0x10
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR__SHIFT 0x4
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_ACK_MASK 0x20
+#define DAC_ENABLE__DAC_RESYNC_FIFO_ERROR_ACK__SHIFT 0x5
+#define DAC_ENABLE__DAC_RESYNC_FIFO_TVOUT_SIM_MASK 0x100
+#define DAC_ENABLE__DAC_RESYNC_FIFO_TVOUT_SIM__SHIFT 0x8
+#define DAC_SOURCE_SELECT__DAC_SOURCE_SELECT_MASK 0x7
+#define DAC_SOURCE_SELECT__DAC_SOURCE_SELECT__SHIFT 0x0
+#define DAC_SOURCE_SELECT__DAC_TV_SELECT_MASK 0x8
+#define DAC_SOURCE_SELECT__DAC_TV_SELECT__SHIFT 0x3
+#define DAC_CRC_EN__DAC_CRC_EN_MASK 0x1
+#define DAC_CRC_EN__DAC_CRC_EN__SHIFT 0x0
+#define DAC_CRC_EN__DAC_CRC_CONT_EN_MASK 0x10000
+#define DAC_CRC_EN__DAC_CRC_CONT_EN__SHIFT 0x10
+#define DAC_CRC_CONTROL__DAC_CRC_FIELD_MASK 0x1
+#define DAC_CRC_CONTROL__DAC_CRC_FIELD__SHIFT 0x0
+#define DAC_CRC_CONTROL__DAC_CRC_ONLY_BLANKB_MASK 0x100
+#define DAC_CRC_CONTROL__DAC_CRC_ONLY_BLANKB__SHIFT 0x8
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_BLUE_MASK_MASK 0x3ff
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_GREEN_MASK_MASK 0xffc00
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_GREEN_MASK__SHIFT 0xa
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_RED_MASK_MASK 0x3ff00000
+#define DAC_CRC_SIG_RGB_MASK__DAC_CRC_SIG_RED_MASK__SHIFT 0x14
+#define DAC_CRC_SIG_CONTROL_MASK__DAC_CRC_SIG_CONTROL_MASK_MASK 0x3f
+#define DAC_CRC_SIG_CONTROL_MASK__DAC_CRC_SIG_CONTROL_MASK__SHIFT 0x0
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_BLUE_MASK 0x3ff
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_BLUE__SHIFT 0x0
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_GREEN_MASK 0xffc00
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_GREEN__SHIFT 0xa
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_RED_MASK 0x3ff00000
+#define DAC_CRC_SIG_RGB__DAC_CRC_SIG_RED__SHIFT 0x14
+#define DAC_CRC_SIG_CONTROL__DAC_CRC_SIG_CONTROL_MASK 0x3f
+#define DAC_CRC_SIG_CONTROL__DAC_CRC_SIG_CONTROL__SHIFT 0x0
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_HSYNCA_TRISTATE_MASK 0x1
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_HSYNCA_TRISTATE__SHIFT 0x0
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_VSYNCA_TRISTATE_MASK 0x100
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_VSYNCA_TRISTATE__SHIFT 0x8
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_SYNCA_TRISTATE_MASK 0x10000
+#define DAC_SYNC_TRISTATE_CONTROL__DAC_SYNCA_TRISTATE__SHIFT 0x10
+#define DAC_STEREOSYNC_SELECT__DAC_STEREOSYNC_SELECT_MASK 0x7
+#define DAC_STEREOSYNC_SELECT__DAC_STEREOSYNC_SELECT__SHIFT 0x0
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_MODE_MASK 0x3
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_MODE__SHIFT 0x0
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_FRAME_TIME_COUNTER_MASK 0xff00
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_FRAME_TIME_COUNTER__SHIFT 0x8
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_CHECK_MASK_MASK 0x70000
+#define DAC_AUTODETECT_CONTROL__DAC_AUTODETECT_CHECK_MASK__SHIFT 0x10
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_POWERUP_COUNTER_MASK 0xff
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_POWERUP_COUNTER__SHIFT 0x0
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_TESTMODE_MASK 0x100
+#define DAC_AUTODETECT_CONTROL2__DAC_AUTODETECT_TESTMODE__SHIFT 0x8
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_IN_DELAY_MASK 0xff
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_IN_DELAY__SHIFT 0x0
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_OUT_DELAY_MASK 0xff00
+#define DAC_AUTODETECT_CONTROL3__DAC_AUTODET_COMPARATOR_OUT_DELAY__SHIFT 0x8
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_STATUS_MASK 0x1
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_STATUS__SHIFT 0x0
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_CONNECT_MASK 0x10
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_CONNECT__SHIFT 0x4
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_RED_SENSE_MASK 0x300
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_RED_SENSE__SHIFT 0x8
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_GREEN_SENSE_MASK 0x30000
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_GREEN_SENSE__SHIFT 0x10
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_BLUE_SENSE_MASK 0x3000000
+#define DAC_AUTODETECT_STATUS__DAC_AUTODETECT_BLUE_SENSE__SHIFT 0x18
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_ACK_MASK 0x1
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_ACK__SHIFT 0x0
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_INT_ENABLE_MASK 0x10000
+#define DAC_AUTODETECT_INT_CONTROL__DAC_AUTODETECT_INT_ENABLE__SHIFT 0x10
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_EN_MASK 0x1
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_EN__SHIFT 0x0
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_SEL_MASK 0x700
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_SEL__SHIFT 0x8
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_ON_BLANKB_ONLY_MASK 0x1000000
+#define DAC_FORCE_OUTPUT_CNTL__DAC_FORCE_DATA_ON_BLANKB_ONLY__SHIFT 0x18
+#define DAC_FORCE_DATA__DAC_FORCE_DATA_MASK 0x3ff
+#define DAC_FORCE_DATA__DAC_FORCE_DATA__SHIFT 0x0
+#define DAC_POWERDOWN__DAC_POWERDOWN_MASK 0x1
+#define DAC_POWERDOWN__DAC_POWERDOWN__SHIFT 0x0
+#define DAC_POWERDOWN__DAC_POWERDOWN_BLUE_MASK 0x100
+#define DAC_POWERDOWN__DAC_POWERDOWN_BLUE__SHIFT 0x8
+#define DAC_POWERDOWN__DAC_POWERDOWN_GREEN_MASK 0x10000
+#define DAC_POWERDOWN__DAC_POWERDOWN_GREEN__SHIFT 0x10
+#define DAC_POWERDOWN__DAC_POWERDOWN_RED_MASK 0x1000000
+#define DAC_POWERDOWN__DAC_POWERDOWN_RED__SHIFT 0x18
+#define DAC_CONTROL__DAC_DFORCE_EN_MASK 0x1
+#define DAC_CONTROL__DAC_DFORCE_EN__SHIFT 0x0
+#define DAC_CONTROL__DAC_TV_ENABLE_MASK 0x100
+#define DAC_CONTROL__DAC_TV_ENABLE__SHIFT 0x8
+#define DAC_CONTROL__DAC_ZSCALE_SHIFT_MASK 0x10000
+#define DAC_CONTROL__DAC_ZSCALE_SHIFT__SHIFT 0x10
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_DDET_REF_EN_MASK 0x1
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_DDET_REF_EN__SHIFT 0x0
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_SDET_REF_EN_MASK 0x100
+#define DAC_COMPARATOR_ENABLE__DAC_COMP_SDET_REF_EN__SHIFT 0x8
+#define DAC_COMPARATOR_ENABLE__DAC_R_ASYNC_ENABLE_MASK 0x10000
+#define DAC_COMPARATOR_ENABLE__DAC_R_ASYNC_ENABLE__SHIFT 0x10
+#define DAC_COMPARATOR_ENABLE__DAC_G_ASYNC_ENABLE_MASK 0x20000
+#define DAC_COMPARATOR_ENABLE__DAC_G_ASYNC_ENABLE__SHIFT 0x11
+#define DAC_COMPARATOR_ENABLE__DAC_B_ASYNC_ENABLE_MASK 0x40000
+#define DAC_COMPARATOR_ENABLE__DAC_B_ASYNC_ENABLE__SHIFT 0x12
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_MASK 0x1
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT__SHIFT 0x0
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_BLUE_MASK 0x2
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_BLUE__SHIFT 0x1
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_GREEN_MASK 0x4
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_GREEN__SHIFT 0x2
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_RED_MASK 0x8
+#define DAC_COMPARATOR_OUTPUT__DAC_COMPARATOR_OUTPUT_RED__SHIFT 0x3
+#define DAC_PWR_CNTL__DAC_BG_MODE_MASK 0x3
+#define DAC_PWR_CNTL__DAC_BG_MODE__SHIFT 0x0
+#define DAC_PWR_CNTL__DAC_PWRCNTL_MASK 0x30000
+#define DAC_PWR_CNTL__DAC_PWRCNTL__SHIFT 0x10
+#define DAC_DFT_CONFIG__DAC_DFT_CONFIG_MASK 0xffffffff
+#define DAC_DFT_CONFIG__DAC_DFT_CONFIG__SHIFT 0x0
+#define DAC_FIFO_STATUS__DAC_FIFO_USE_OVERWRITE_LEVEL_MASK 0x2
+#define DAC_FIFO_STATUS__DAC_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DAC_FIFO_STATUS__DAC_FIFO_OVERWRITE_LEVEL_MASK 0xfc
+#define DAC_FIFO_STATUS__DAC_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DAC_FIFO_STATUS__DAC_FIFO_CAL_AVERAGE_LEVEL_MASK 0xfc00
+#define DAC_FIFO_STATUS__DAC_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DAC_FIFO_STATUS__DAC_FIFO_MAXIMUM_LEVEL_MASK 0xf0000
+#define DAC_FIFO_STATUS__DAC_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DAC_FIFO_STATUS__DAC_FIFO_MINIMUM_LEVEL_MASK 0x3c00000
+#define DAC_FIFO_STATUS__DAC_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DAC_FIFO_STATUS__DAC_FIFO_CALIBRATED_MASK 0x20000000
+#define DAC_FIFO_STATUS__DAC_FIFO_CALIBRATED__SHIFT 0x1d
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000
+#define DAC_FIFO_STATUS__DAC_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DAC_TEST_DEBUG_INDEX__DAC_TEST_DEBUG_INDEX_MASK 0xff
+#define DAC_TEST_DEBUG_INDEX__DAC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DAC_TEST_DEBUG_INDEX__DAC_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DAC_TEST_DEBUG_INDEX__DAC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DAC_TEST_DEBUG_DATA__DAC_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DAC_TEST_DEBUG_DATA__DAC_TEST_DEBUG_DATA__SHIFT 0x0
+#define PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL_MASK 0x1ff
+#define PERFCOUNTER_CNTL__PERFCOUNTER_EVENT_SEL__SHIFT 0x0
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL_MASK 0xe00
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CVALUE_SEL__SHIFT 0x9
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE_MASK 0x3000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INC_MODE__SHIFT 0xc
+#define PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL_MASK 0x4000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_HW_CNTL_SEL__SHIFT 0xe
+#define PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE_MASK 0x8000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_RUNEN_MODE__SHIFT 0xf
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_SEL_MASK 0x1f0000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_SEL__SHIFT 0x10
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS_MASK 0x200000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTOFF_START_DIS__SHIFT 0x15
+#define PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN_MASK 0x400000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_RESTART_EN__SHIFT 0x16
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN_MASK 0x800000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INT_EN__SHIFT 0x17
+#define PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK_MASK 0x1000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_OFF_MASK__SHIFT 0x18
+#define PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE_MASK 0x2000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_ACTIVE__SHIFT 0x19
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INT_TYPE_MASK 0x4000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_INT_TYPE__SHIFT 0x1a
+#define PERFCOUNTER_CNTL__PERFCOUNTER_COUNTED_VALUE_TYPE_MASK 0x8000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_COUNTED_VALUE_TYPE__SHIFT 0x1b
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL_MASK 0xe0000000
+#define PERFCOUNTER_CNTL__PERFCOUNTER_CNTL_SEL__SHIFT 0x1d
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE_MASK 0x3
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT0_STATE__SHIFT 0x0
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0_MASK 0x4
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL0__SHIFT 0x2
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE_MASK 0x30
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT1_STATE__SHIFT 0x4
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1_MASK 0x40
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL1__SHIFT 0x6
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE_MASK 0x300
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT2_STATE__SHIFT 0x8
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2_MASK 0x400
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL2__SHIFT 0xa
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE_MASK 0x3000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT3_STATE__SHIFT 0xc
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3_MASK 0x4000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL3__SHIFT 0xe
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE_MASK 0x30000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT4_STATE__SHIFT 0x10
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4_MASK 0x40000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL4__SHIFT 0x12
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE_MASK 0x300000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT5_STATE__SHIFT 0x14
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5_MASK 0x400000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL5__SHIFT 0x16
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE_MASK 0x3000000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT6_STATE__SHIFT 0x18
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6_MASK 0x4000000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL6__SHIFT 0x1a
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE_MASK 0x30000000
+#define PERFCOUNTER_STATE__PERFCOUNTER_CNT7_STATE__SHIFT 0x1c
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7_MASK 0x40000000
+#define PERFCOUNTER_STATE__PERFCOUNTER_STATE_SEL7__SHIFT 0x1e
+#define PERFMON_CNTL__PERFMON_STATE_MASK 0x3
+#define PERFMON_CNTL__PERFMON_STATE__SHIFT 0x0
+#define PERFMON_CNTL__PERFMON_RUN_ENABLE_SEL_MASK 0xfc
+#define PERFMON_CNTL__PERFMON_RUN_ENABLE_SEL__SHIFT 0x2
+#define PERFMON_CNTL__PERFMON_RPT_COUNT_MASK 0xfffff00
+#define PERFMON_CNTL__PERFMON_RPT_COUNT__SHIFT 0x8
+#define PERFMON_CNTL__PERFMON_CNTOFF_AND_OR_MASK 0x10000000
+#define PERFMON_CNTL__PERFMON_CNTOFF_AND_OR__SHIFT 0x1c
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_EN_MASK 0x20000000
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_EN__SHIFT 0x1d
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS_MASK 0x40000000
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_STATUS__SHIFT 0x1e
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK_MASK 0x80000000
+#define PERFMON_CNTL__PERFMON_CNTOFF_INT_ACK__SHIFT 0x1f
+#define PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE_MASK 0x1
+#define PERFMON_CNTL2__PERFMON_CNTOFF_INT_TYPE__SHIFT 0x0
+#define PERFMON_CNTL2__PERFMON_CLK_ENABLE_MASK 0x2
+#define PERFMON_CNTL2__PERFMON_CLK_ENABLE__SHIFT 0x1
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS_MASK 0x1
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_STATUS__SHIFT 0x0
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS_MASK 0x2
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_STATUS__SHIFT 0x1
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS_MASK 0x4
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_STATUS__SHIFT 0x2
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS_MASK 0x8
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_STATUS__SHIFT 0x3
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS_MASK 0x10
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_STATUS__SHIFT 0x4
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS_MASK 0x20
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_STATUS__SHIFT 0x5
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS_MASK 0x40
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_STATUS__SHIFT 0x6
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS_MASK 0x80
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_STATUS__SHIFT 0x7
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK_MASK 0x100
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT0_ACK__SHIFT 0x8
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK_MASK 0x200
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT1_ACK__SHIFT 0x9
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK_MASK 0x400
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT2_ACK__SHIFT 0xa
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK_MASK 0x800
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT3_ACK__SHIFT 0xb
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK_MASK 0x1000
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT4_ACK__SHIFT 0xc
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK_MASK 0x2000
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT5_ACK__SHIFT 0xd
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK_MASK 0x4000
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT6_ACK__SHIFT 0xe
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK_MASK 0x8000
+#define PERFMON_CVALUE_INT_MISC__PERFCOUNTER_INT7_ACK__SHIFT 0xf
+#define PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI_MASK 0xffff0000
+#define PERFMON_CVALUE_INT_MISC__PERFMON_CVALUE_HI__SHIFT 0x10
+#define PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW_MASK 0xffffffff
+#define PERFMON_CVALUE_LOW__PERFMON_CVALUE_LOW__SHIFT 0x0
+#define PERFMON_HI__PERFMON_HI_MASK 0xffff
+#define PERFMON_HI__PERFMON_HI__SHIFT 0x0
+#define PERFMON_HI__PERFMON_READ_SEL_MASK 0xe0000000
+#define PERFMON_HI__PERFMON_READ_SEL__SHIFT 0x1d
+#define PERFMON_LOW__PERFMON_LOW_MASK 0xffffffff
+#define PERFMON_LOW__PERFMON_LOW__SHIFT 0x0
+#define PERFMON_TEST_DEBUG_INDEX__PERFMON_TEST_DEBUG_INDEX_MASK 0xff
+#define PERFMON_TEST_DEBUG_INDEX__PERFMON_TEST_DEBUG_INDEX__SHIFT 0x0
+#define PERFMON_TEST_DEBUG_INDEX__PERFMON_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define PERFMON_TEST_DEBUG_INDEX__PERFMON_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define PERFMON_TEST_DEBUG_DATA__PERFMON_TEST_DEBUG_DATA_MASK 0xffffffff
+#define PERFMON_TEST_DEBUG_DATA__PERFMON_TEST_DEBUG_DATA__SHIFT 0x0
+#define REFCLK_CNTL__REFCLK_CLOCK_EN_MASK 0x1
+#define REFCLK_CNTL__REFCLK_CLOCK_EN__SHIFT 0x0
+#define REFCLK_CNTL__REFCLK_SRC_SEL_MASK 0x2
+#define REFCLK_CNTL__REFCLK_SRC_SEL__SHIFT 0x1
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P0PLL_CBUS_ANTIGLITCH_RESETB_MASK 0x1
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P0PLL_CBUS_ANTIGLITCH_RESETB__SHIFT 0x0
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P1PLL_CBUS_ANTIGLITCH_RESETB_MASK 0x2
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P1PLL_CBUS_ANTIGLITCH_RESETB__SHIFT 0x1
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P2PLL_CBUS_ANTIGLITCH_RESETB_MASK 0x4
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P2PLL_CBUS_ANTIGLITCH_RESETB__SHIFT 0x2
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P3PLL_CBUS_ANTIGLITCH_RESETB_MASK 0x8
+#define DCCG_CBUS_ANTIGLITCH_RESETB__P3PLL_CBUS_ANTIGLITCH_RESETB__SHIFT 0x3
+#define DCCG_CBUS_SPARE__P0PLL_CBUS_SPARE_MASK 0xff
+#define DCCG_CBUS_SPARE__P0PLL_CBUS_SPARE__SHIFT 0x0
+#define DCCG_CBUS_SPARE__P1PLL_CBUS_SPARE_MASK 0xff00
+#define DCCG_CBUS_SPARE__P1PLL_CBUS_SPARE__SHIFT 0x8
+#define DCCG_CBUS_SPARE__P2PLL_CBUS_SPARE_MASK 0xff0000
+#define DCCG_CBUS_SPARE__P2PLL_CBUS_SPARE__SHIFT 0x10
+#define DCCG_CBUS_SPARE__P3PLL_CBUS_SPARE_MASK 0xff000000
+#define DCCG_CBUS_SPARE__P3PLL_CBUS_SPARE__SHIFT 0x18
+#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY_MASK 0xf
+#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY__SHIFT 0x0
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x7
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0
+#define DPREFCLK_CNTL__UNB_DB_CLK_ENABLE_MASK 0x100
+#define DPREFCLK_CNTL__UNB_DB_CLK_ENABLE__SHIFT 0x8
+#define DCE_VERSION__MAJOR_VERSION_MASK 0xff
+#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define DCE_VERSION__MINOR_VERSION_MASK 0xff00
+#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8
+#define AVSYNC_COUNTER_WRITE__AVSYNC_COUNTER_WRVALUE_MASK 0xffffffff
+#define AVSYNC_COUNTER_WRITE__AVSYNC_COUNTER_WRVALUE__SHIFT 0x0
+#define AVSYNC_COUNTER_CONTROL__AVSYNC_COUNTER_ENABLE_MASK 0x1
+#define AVSYNC_COUNTER_CONTROL__AVSYNC_COUNTER_ENABLE__SHIFT 0x0
+#define AVSYNC_COUNTER_READ__AVSYNC_COUNTER_RDVALUE_MASK 0xffffffff
+#define AVSYNC_COUNTER_READ__AVSYNC_COUNTER_RDVALUE__SHIFT 0x0
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x1
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xffffffff
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xffffffff
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xffffffff
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xffffffff
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xffffffff
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x1
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x30
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x100
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x200
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x30000
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x1000000
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x2000000
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xffffffff
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_ENABLE_MASK 0x1
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_ENABLE__SHIFT 0x0
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_VALUE_MASK 0x1ff0
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_VALUE__SHIFT 0x4
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_OCCURRED_MASK 0x10000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_OCCURRED__SHIFT 0x10
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_CLEAR_MASK 0x20000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_DEBUG_COUNT_TRIG_CLEAR__SHIFT 0x11
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_ENABLE_MASK 0x100000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_ENABLE__SHIFT 0x14
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_SRC_SEL_MASK 0x200000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_SRC_SEL__SHIFT 0x15
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT_MASK 0xff000000
+#define DCCG_DS_DEBUG_CNTL__DCCG_DS_JITTER_COUNT__SHIFT 0x18
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_INT_MASK 0x1
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_INT__SHIFT 0x0
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_STATUS_MASK 0xffff0000
+#define DMCU_SMU_INTERRUPT_CNTL__DMCU_SMU_STATIC_SCREEN_STATUS__SHIFT 0x10
+#define SMU_CONTROL__DISPLAY0_FORCE_VBI_MASK 0x1
+#define SMU_CONTROL__DISPLAY0_FORCE_VBI__SHIFT 0x0
+#define SMU_CONTROL__DISPLAY1_FORCE_VBI_MASK 0x2
+#define SMU_CONTROL__DISPLAY1_FORCE_VBI__SHIFT 0x1
+#define SMU_CONTROL__DISPLAY2_FORCE_VBI_MASK 0x4
+#define SMU_CONTROL__DISPLAY2_FORCE_VBI__SHIFT 0x2
+#define SMU_CONTROL__DISPLAY3_FORCE_VBI_MASK 0x8
+#define SMU_CONTROL__DISPLAY3_FORCE_VBI__SHIFT 0x3
+#define SMU_CONTROL__DISPLAY4_FORCE_VBI_MASK 0x10
+#define SMU_CONTROL__DISPLAY4_FORCE_VBI__SHIFT 0x4
+#define SMU_CONTROL__DISPLAY5_FORCE_VBI_MASK 0x20
+#define SMU_CONTROL__DISPLAY5_FORCE_VBI__SHIFT 0x5
+#define SMU_CONTROL__DISPLAY_V0_FORCE_VBI_MASK 0x40
+#define SMU_CONTROL__DISPLAY_V0_FORCE_VBI__SHIFT 0x6
+#define SMU_CONTROL__DISPLAY_V1_FORCE_VBI_MASK 0x80
+#define SMU_CONTROL__DISPLAY_V1_FORCE_VBI__SHIFT 0x7
+#define SMU_CONTROL__MCIF_WB_FORCE_VBI_MASK 0x100
+#define SMU_CONTROL__MCIF_WB_FORCE_VBI__SHIFT 0x8
+#define SMU_CONTROL__SMU_DC_INT_CLEAR_MASK 0x10000
+#define SMU_CONTROL__SMU_DC_INT_CLEAR__SHIFT 0x10
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE_MASK 0x1
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_ENABLE__SHIFT 0x0
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS_MASK 0x10
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_STATUS__SHIFT 0x4
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT_MASK 0xffff0000
+#define SMU_INTERRUPT_CONTROL__DC_SMU_INT_EVENT__SHIFT 0x10
+#define DAC_CLK_ENABLE__DACA_CLK_ENABLE_MASK 0x1
+#define DAC_CLK_ENABLE__DACA_CLK_ENABLE__SHIFT 0x0
+#define DAC_CLK_ENABLE__DACB_CLK_ENABLE_MASK 0x10
+#define DAC_CLK_ENABLE__DACB_CLK_ENABLE__SHIFT 0x4
+#define DVO_CLK_ENABLE__DVO_CLK_ENABLE_MASK 0x1
+#define DVO_CLK_ENABLE__DVO_CLK_ENABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x1
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x2
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL__SCLK_GATE_DISABLE_MASK 0x4
+#define DCCG_GATE_DISABLE_CNTL__SCLK_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x8
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x10
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL__DACBCLK_GATE_DISABLE_MASK 0x20
+#define DCCG_GATE_DISABLE_CNTL__DACBCLK_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x40
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL__DPDBG_CLK_GATE_DISABLE_MASK 0x80
+#define DCCG_GATE_DISABLE_CNTL__DPDBG_CLK_GATE_DISABLE__SHIFT 0x7
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x100
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x20000
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x40000
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x80000
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE_MASK 0x200000
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x400000
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL__UNB_DB_CLK_GATE_DISABLE_MASK 0x800000
+#define DCCG_GATE_DISABLE_CNTL__UNB_DB_CLK_GATE_DISABLE__SHIFT 0x17
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x4000000
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x8000000
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x8
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x20
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x40
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPA_FE_GATE_DISABLE_MASK 0x100
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPA_FE_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPB_FE_GATE_DISABLE_MASK 0x200
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPB_FE_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x10000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x20000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x40000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x80000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x100000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x200000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x400000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPA_GATE_DISABLE_MASK 0x1000000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPA_GATE_DISABLE__SHIFT 0x18
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPB_GATE_DISABLE_MASK 0x2000000
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKLPB_GATE_DISABLE__SHIFT 0x19
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0xf
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0xff0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_ON_DELAY_MASK 0xf
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_OFF_DELAY_MASK 0xff0
+#define SCLK_CGTT_BLK_CTRL_REG__SCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SCLK_CGTT_BLK_CTRL_REG__CGTT_SCLK_OVERRIDE_MASK 0x1000
+#define SCLK_CGTT_BLK_CTRL_REG__CGTT_SCLK_OVERRIDE__SHIFT 0xc
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0xf
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0xff0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY_MASK 0xf
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY_MASK 0xff0
+#define REFCLK_CGTT_BLK_CTRL_REG__REFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0xf
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0xff0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xffffffff
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0
+#define PIXCLK0_RESYNC_CNTL__PIXCLK0_RESYNC_ENABLE_MASK 0x1
+#define PIXCLK0_RESYNC_CNTL__PIXCLK0_RESYNC_ENABLE__SHIFT 0x0
+#define PIXCLK0_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL0_MASK 0x30
+#define PIXCLK0_RESYNC_CNTL__DCCG_DEEP_COLOR_CNTL0__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLC_PIXCLK_RESYNC_CNTL__PHYPLLC_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLD_PIXCLK_RESYNC_CNTL__PHYPLLD_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLE_PIXCLK_RESYNC_CNTL__PHYPLLE_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x1
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x30
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x100
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x200
+#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x7f
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x7f00
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x10000
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x20000
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x100000
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x100
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x1ffff
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x100000
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x3fff
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0xf0000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x100000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0xe000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x1
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x10
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x20
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x40
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x80
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7
+#define DCCG_PERFMON_CNTL__DCCG_PERF_CRTC_SEL_MASK 0x700
+#define DCCG_PERFMON_CNTL__DCCG_PERF_CRTC_SEL__SHIFT 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xfffff800
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE_MASK 0x1
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_DSICLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE_MASK 0x2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_REFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE_MASK 0x4
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK1_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE_MASK 0x8
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_PIXCLK2_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE_MASK 0x10
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYC_PIXCLK_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE_MASK 0x20
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYD_PIXCLK_ENABLE__SHIFT 0x5
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE_MASK 0x40
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYE_PIXCLK_ENABLE__SHIFT 0x6
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE_MASK 0x80
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYF_PIXCLK_ENABLE__SHIFT 0x7
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE_MASK 0x100
+#define DCCG_PERFMON_CNTL2__DCCG_PERF_UNIPHYG_PIXCLK_ENABLE__SHIFT 0x8
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x10
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x20
+#define CRTC0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_ADD_PIXEL_MASK 0x100
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_ADD_PIXEL__SHIFT 0x8
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DROP_PIXEL_MASK 0x200
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DROP_PIXEL__SHIFT 0x9
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC0_PIXEL_RATE_CNTL__CRTC0_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xffffffff
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xffffffff
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0
+#define CRTC0_PHYPLL_PIXEL_RATE_CNTL__CRTC0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC0_PHYPLL_PIXEL_RATE_CNTL__CRTC0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC0_PHYPLL_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC0_PHYPLL_PIXEL_RATE_CNTL__CRTC0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x10
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x20
+#define CRTC1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_ADD_PIXEL_MASK 0x100
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_ADD_PIXEL__SHIFT 0x8
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DROP_PIXEL_MASK 0x200
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DROP_PIXEL__SHIFT 0x9
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC1_PIXEL_RATE_CNTL__CRTC1_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xffffffff
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xffffffff
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0
+#define CRTC1_PHYPLL_PIXEL_RATE_CNTL__CRTC1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC1_PHYPLL_PIXEL_RATE_CNTL__CRTC1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC1_PHYPLL_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC1_PHYPLL_PIXEL_RATE_CNTL__CRTC1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE_MASK 0x10
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_ENABLE__SHIFT 0x4
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE_MASK 0x20
+#define CRTC2_PIXEL_RATE_CNTL__DP_DTO2_DS_DISABLE__SHIFT 0x5
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_ADD_PIXEL_MASK 0x100
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_ADD_PIXEL__SHIFT 0x8
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DROP_PIXEL_MASK 0x200
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DROP_PIXEL__SHIFT 0x9
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC2_PIXEL_RATE_CNTL__CRTC2_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO2_PHASE__DP_DTO2_PHASE_MASK 0xffffffff
+#define DP_DTO2_PHASE__DP_DTO2_PHASE__SHIFT 0x0
+#define DP_DTO2_MODULO__DP_DTO2_MODULO_MASK 0xffffffff
+#define DP_DTO2_MODULO__DP_DTO2_MODULO__SHIFT 0x0
+#define CRTC2_PHYPLL_PIXEL_RATE_CNTL__CRTC2_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC2_PHYPLL_PIXEL_RATE_CNTL__CRTC2_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC2_PHYPLL_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC2_PHYPLL_PIXEL_RATE_CNTL__CRTC2_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE_MASK 0x10
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_ENABLE__SHIFT 0x4
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE_MASK 0x20
+#define CRTC3_PIXEL_RATE_CNTL__DP_DTO3_DS_DISABLE__SHIFT 0x5
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_ADD_PIXEL_MASK 0x100
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_ADD_PIXEL__SHIFT 0x8
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DROP_PIXEL_MASK 0x200
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DROP_PIXEL__SHIFT 0x9
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC3_PIXEL_RATE_CNTL__CRTC3_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO3_PHASE__DP_DTO3_PHASE_MASK 0xffffffff
+#define DP_DTO3_PHASE__DP_DTO3_PHASE__SHIFT 0x0
+#define DP_DTO3_MODULO__DP_DTO3_MODULO_MASK 0xffffffff
+#define DP_DTO3_MODULO__DP_DTO3_MODULO__SHIFT 0x0
+#define CRTC3_PHYPLL_PIXEL_RATE_CNTL__CRTC3_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC3_PHYPLL_PIXEL_RATE_CNTL__CRTC3_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC3_PHYPLL_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC3_PHYPLL_PIXEL_RATE_CNTL__CRTC3_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_ENABLE_MASK 0x10
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_ENABLE__SHIFT 0x4
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_DS_DISABLE_MASK 0x20
+#define CRTC4_PIXEL_RATE_CNTL__DP_DTO4_DS_DISABLE__SHIFT 0x5
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_ADD_PIXEL_MASK 0x100
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_ADD_PIXEL__SHIFT 0x8
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DROP_PIXEL_MASK 0x200
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DROP_PIXEL__SHIFT 0x9
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC4_PIXEL_RATE_CNTL__CRTC4_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO4_PHASE__DP_DTO4_PHASE_MASK 0xffffffff
+#define DP_DTO4_PHASE__DP_DTO4_PHASE__SHIFT 0x0
+#define DP_DTO4_MODULO__DP_DTO4_MODULO_MASK 0xffffffff
+#define DP_DTO4_MODULO__DP_DTO4_MODULO__SHIFT 0x0
+#define CRTC4_PHYPLL_PIXEL_RATE_CNTL__CRTC4_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC4_PHYPLL_PIXEL_RATE_CNTL__CRTC4_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC4_PHYPLL_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC4_PHYPLL_PIXEL_RATE_CNTL__CRTC4_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_SOURCE_MASK 0x3
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_ENABLE_MASK 0x10
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_ENABLE__SHIFT 0x4
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_DS_DISABLE_MASK 0x20
+#define CRTC5_PIXEL_RATE_CNTL__DP_DTO5_DS_DISABLE__SHIFT 0x5
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_ADD_PIXEL_MASK 0x100
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_ADD_PIXEL__SHIFT 0x8
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DROP_PIXEL_MASK 0x200
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DROP_PIXEL__SHIFT 0x9
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_HALF_RATE_EN_MASK 0x800
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_FIFO_ERROR_MASK 0xc000
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_FIFO_ERROR__SHIFT 0xe
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_ERROR_COUNT_MASK 0xfff0000
+#define CRTC5_PIXEL_RATE_CNTL__CRTC5_DISPOUT_ERROR_COUNT__SHIFT 0x10
+#define DP_DTO5_PHASE__DP_DTO5_PHASE_MASK 0xffffffff
+#define DP_DTO5_PHASE__DP_DTO5_PHASE__SHIFT 0x0
+#define DP_DTO5_MODULO__DP_DTO5_MODULO_MASK 0xffffffff
+#define DP_DTO5_MODULO__DP_DTO5_MODULO__SHIFT 0x0
+#define CRTC5_PHYPLL_PIXEL_RATE_CNTL__CRTC5_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x7
+#define CRTC5_PHYPLL_PIXEL_RATE_CNTL__CRTC5_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define CRTC5_PHYPLL_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_PLL_SOURCE_MASK 0x10
+#define CRTC5_PHYPLL_PIXEL_RATE_CNTL__CRTC5_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET_MASK 0x1
+#define DCCG_SOFT_RESET__REFCLK_SOFT_RESET__SHIFT 0x0
+#define DCCG_SOFT_RESET__PCIE_REFCLK_SOFT_RESET_MASK 0x2
+#define DCCG_SOFT_RESET__PCIE_REFCLK_SOFT_RESET__SHIFT 0x1
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO_MASK 0x4
+#define DCCG_SOFT_RESET__SOFT_RESET_DVO__SHIFT 0x2
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST_MASK 0x8
+#define DCCG_SOFT_RESET__DVO_ENABLE_RST__SHIFT 0x3
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET_MASK 0x10
+#define DCCG_SOFT_RESET__AUDIO_DTO2_CLK_SOFT_RESET__SHIFT 0x4
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET_MASK 0x100
+#define DCCG_SOFT_RESET__DPREFCLK_SOFT_RESET__SHIFT 0x8
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET_MASK 0x1000
+#define DCCG_SOFT_RESET__AMCLK0_SOFT_RESET__SHIFT 0xc
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET_MASK 0x2000
+#define DCCG_SOFT_RESET__AMCLK1_SOFT_RESET__SHIFT 0xd
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET_MASK 0x4000
+#define DCCG_SOFT_RESET__P0PLL_CFG_IF_SOFT_RESET__SHIFT 0xe
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET_MASK 0x8000
+#define DCCG_SOFT_RESET__P1PLL_CFG_IF_SOFT_RESET__SHIFT 0xf
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET_MASK 0x10000
+#define DCCG_SOFT_RESET__P2PLL_CFG_IF_SOFT_RESET__SHIFT 0x10
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET_MASK 0x20000
+#define DCCG_SOFT_RESET__A0PLL_CFG_IF_SOFT_RESET__SHIFT 0x11
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET_MASK 0x40000
+#define DCCG_SOFT_RESET__A1PLL_CFG_IF_SOFT_RESET__SHIFT 0x12
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET_MASK 0x80000
+#define DCCG_SOFT_RESET__C0PLL_CFG_IF_SOFT_RESET__SHIFT 0x13
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET_MASK 0x100000
+#define DCCG_SOFT_RESET__C1PLL_CFG_IF_SOFT_RESET__SHIFT 0x14
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET_MASK 0x200000
+#define DCCG_SOFT_RESET__C2PLL_CFG_IF_SOFT_RESET__SHIFT 0x15
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN_MASK 0x10
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN_MASK 0x10
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN_MASK 0x10
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKC_CLOCK_ENABLE__SYMCLKC_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN_MASK 0x10
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKD_CLOCK_ENABLE__SYMCLKD_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN_MASK 0x10
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKE_CLOCK_ENABLE__SYMCLKE_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_CLOCK_ENABLE_MASK 0x1
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_EN_MASK 0x10
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_SRC_MASK 0x700
+#define SYMCLKF_CLOCK_ENABLE__SYMCLKF_FE_FORCE_SRC__SHIFT 0x8
+#define DPDBG_CLK_FORCE_CONTROL__DPDBG_CLK_FORCE_EN_MASK 0x10
+#define DPDBG_CLK_FORCE_CONTROL__DPDBG_CLK_FORCE_EN__SHIFT 0x4
+#define DPDBG_CLK_FORCE_CONTROL__DPDBG_CLK_FORCE_SRC_MASK 0x700
+#define DPDBG_CLK_FORCE_CONTROL__DPDBG_CLK_FORCE_SRC__SHIFT 0x8
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x7
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x30
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL_MASK 0x3000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL__SHIFT 0xc
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN_MASK 0x10000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN__SHIFT 0x10
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x100000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x1000000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xffffffff
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xffffffff
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xffffffff
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xffffffff
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_INDEX_MASK 0xff
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCCG_TEST_DEBUG_INDEX__DCCG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCCG_TEST_DEBUG_DATA__DCCG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCCG_TEST_DEBUG_DATA__DCCG_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL_MASK 0x1ff
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_SEL__SHIFT 0x0
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV_MASK 0x1000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICA_INV__SHIFT 0xc
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL_MASK 0x1ff0000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_SEL__SHIFT 0x10
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV_MASK 0x10000000
+#define DCCG_TEST_CLK_SEL__DCCG_TEST_CLK_GENERICB_INV__SHIFT 0x1c
+#define CPLL_MACRO_CNTL_RESERVED0__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED0__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED1__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED1__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED2__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED2__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED3__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED3__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED4__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED4__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED5__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED5__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED6__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED6__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED7__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED7__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED8__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED8__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED9__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED9__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED10__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED10__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define CPLL_MACRO_CNTL_RESERVED11__CPLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define CPLL_MACRO_CNTL_RESERVED11__CPLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED0__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED0__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED1__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED1__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED2__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED2__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED3__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED3__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED4__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED4__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED5__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED5__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED6__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED6__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED7__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED7__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED8__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED8__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED9__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED9__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED10__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED10__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED11__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED11__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED12__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED12__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED13__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED13__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED14__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED14__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED15__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED15__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED16__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED16__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED17__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED17__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED18__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED18__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED19__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED19__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED20__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED20__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED21__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED21__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED22__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED22__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED23__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED23__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED24__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED24__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED25__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED25__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED26__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED26__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED27__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED27__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED28__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED28__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED29__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED29__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED30__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED30__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED31__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED31__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED32__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED32__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED33__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED33__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED34__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED34__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED35__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED35__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED36__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED36__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED37__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED37__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED38__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED38__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED39__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED39__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED40__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED40__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define PLL_MACRO_CNTL_RESERVED41__PLL_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define PLL_MACRO_CNTL_RESERVED41__PLL_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x7f
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x7f00
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x18000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x20000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x40000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x80000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHG_DONE_MASK 0x100000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHGTOG_MASK 0x200000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_CHGTOG__SHIFT 0x15
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_DONETOG_MASK 0x400000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_DONETOG__SHIFT 0x16
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_WDIVIDER_MASK 0x7f000000
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPREFCLK_WDIVIDER__SHIFT 0x18
+#define DCDEBUG_BUS_CLK1_SEL__DCDEBUG_BUS_CLK1_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK1_SEL__DCDEBUG_BUS_CLK1_SEL__SHIFT 0x0
+#define DCDEBUG_BUS_CLK2_SEL__DCDEBUG_BUS_CLK2_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK2_SEL__DCDEBUG_BUS_CLK2_SEL__SHIFT 0x0
+#define DCDEBUG_BUS_CLK3_SEL__DCDEBUG_BUS_CLK3_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK3_SEL__DCDEBUG_BUS_CLK3_SEL__SHIFT 0x0
+#define DCDEBUG_BUS_CLK4_SEL__DCDEBUG_BUS_CLK4_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK4_SEL__DCDEBUG_BUS_CLK4_SEL__SHIFT 0x0
+#define DCDEBUG_BUS_CLK5_SEL__DCDEBUG_BUS_CLK5_SEL_MASK 0xffffffff
+#define DCDEBUG_BUS_CLK5_SEL__DCDEBUG_BUS_CLK5_SEL__SHIFT 0x0
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_PIN_SEL_MASK 0x1f
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_PIN_SEL__SHIFT 0x0
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_REGBIT_SEL_MASK 0x3e0
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_REGBIT_SEL__SHIFT 0x5
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_EN_MASK 0x1000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE1_EN__SHIFT 0xc
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_PIN_SEL_MASK 0xf8000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_PIN_SEL__SHIFT 0xf
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_REGBIT_SEL_MASK 0x1f00000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_REGBIT_SEL__SHIFT 0x14
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_EN_MASK 0x10000000
+#define DCDEBUG_OUT_PIN_OVERRIDE__DCDEBUG_OUT_OVERRIDE2_EN__SHIFT 0x1c
+#define DCDEBUG_OUT_CNTL__DCDEBUG_BLOCK_SEL_MASK 0x1f
+#define DCDEBUG_OUT_CNTL__DCDEBUG_BLOCK_SEL__SHIFT 0x0
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_24BIT_SEL_MASK 0x800000
+#define DCDEBUG_OUT_CNTL__DCDEBUG_OUT_24BIT_SEL__SHIFT 0x17
+#define DCDEBUG_OUT_CNTL__DCDEBUG_CLK_SEL_MASK 0x1f000000
+#define DCDEBUG_OUT_CNTL__DCDEBUG_CLK_SEL__SHIFT 0x18
+#define DCDEBUG_OUT_DATA__DCDEBUG_OUT_DATA_MASK 0xffffffff
+#define DCDEBUG_OUT_DATA__DCDEBUG_OUT_DATA__SHIFT 0x0
+#define DMIF_CONTROL__DMIF_BUFF_SIZE_MASK 0x3
+#define DMIF_CONTROL__DMIF_BUFF_SIZE__SHIFT 0x0
+#define DMIF_CONTROL__DMIF_GROUP_REQUESTS_IN_CHUNK_MASK 0x4
+#define DMIF_CONTROL__DMIF_GROUP_REQUESTS_IN_CHUNK__SHIFT 0x2
+#define DMIF_CONTROL__DMIF_DISABLE_EARLY_RECEIVED_LEVEL_COUNT_MASK 0x10
+#define DMIF_CONTROL__DMIF_DISABLE_EARLY_RECEIVED_LEVEL_COUNT__SHIFT 0x4
+#define DMIF_CONTROL__DMIF_REQ_BURST_SIZE_MASK 0x700
+#define DMIF_CONTROL__DMIF_REQ_BURST_SIZE__SHIFT 0x8
+#define DMIF_CONTROL__DMIF_UNDERFLOW_RECOVERY_EN_MASK 0x800
+#define DMIF_CONTROL__DMIF_UNDERFLOW_RECOVERY_EN__SHIFT 0xb
+#define DMIF_CONTROL__DMIF_FORCE_TOTAL_REQ_BURST_SIZE_MASK 0x1f000
+#define DMIF_CONTROL__DMIF_FORCE_TOTAL_REQ_BURST_SIZE__SHIFT 0xc
+#define DMIF_CONTROL__DMIF_MAX_TOTAL_OUTSTANDING_CHUNK_REQUESTS_MASK 0x7e0000
+#define DMIF_CONTROL__DMIF_MAX_TOTAL_OUTSTANDING_CHUNK_REQUESTS__SHIFT 0x11
+#define DMIF_CONTROL__DMIF_DELAY_ARBITRATION_MASK 0x1f000000
+#define DMIF_CONTROL__DMIF_DELAY_ARBITRATION__SHIFT 0x18
+#define DMIF_CONTROL__DMIF_CHUNK_BUFF_MARGIN_MASK 0x60000000
+#define DMIF_CONTROL__DMIF_CHUNK_BUFF_MARGIN__SHIFT 0x1d
+#define DMIF_CONTROL__DMIF_PSTATE_URGENT_DISABLE_MASK 0x80000000
+#define DMIF_CONTROL__DMIF_PSTATE_URGENT_DISABLE__SHIFT 0x1f
+#define DMIF_STATUS__DMIF_MC_SEND_ON_IDLE_MASK 0x3f
+#define DMIF_STATUS__DMIF_MC_SEND_ON_IDLE__SHIFT 0x0
+#define DMIF_STATUS__DMIF_CLEAR_MC_SEND_ON_IDLE_MASK 0x3f00
+#define DMIF_STATUS__DMIF_CLEAR_MC_SEND_ON_IDLE__SHIFT 0x8
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x10000
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x10
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x20000
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x11
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_SOURCE_SELECT_MASK 0xf00000
+#define DMIF_STATUS__DMIF_MC_LATENCY_COUNTER_SOURCE_SELECT__SHIFT 0x14
+#define DMIF_STATUS__DMIF_PERFORMANCE_COUNTER_SOURCE_SELECT_MASK 0xf000000
+#define DMIF_STATUS__DMIF_PERFORMANCE_COUNTER_SOURCE_SELECT__SHIFT 0x18
+#define DMIF_STATUS__DMIF_UNDERFLOW_MASK 0x10000000
+#define DMIF_STATUS__DMIF_UNDERFLOW__SHIFT 0x1c
+#define DMIF_STATUS__DMIF_MC_LATENCY_TAP_POINT_MASK 0x60000000
+#define DMIF_STATUS__DMIF_MC_LATENCY_TAP_POINT__SHIFT 0x1d
+#define DMIF_STATUS__DMIF_MC_LATENCY_REQ_TYPE_MASK 0x80000000
+#define DMIF_STATUS__DMIF_MC_LATENCY_REQ_TYPE__SHIFT 0x1f
+#define DMIFV_STATUS__DMIFV_MC_SEND_ON_IDLE_MASK 0xf
+#define DMIFV_STATUS__DMIFV_MC_SEND_ON_IDLE__SHIFT 0x0
+#define DMIFV_STATUS__DMIFV_CLEAR_MC_SEND_ON_IDLE_MASK 0xf00
+#define DMIFV_STATUS__DMIFV_CLEAR_MC_SEND_ON_IDLE__SHIFT 0x8
+#define DMIF_HW_DEBUG__DMIF_HW_DEBUG_MASK 0xffffffff
+#define DMIF_HW_DEBUG__DMIF_HW_DEBUG__SHIFT 0x0
+#define DMIF_ARBITRATION_CONTROL__DMIF_ARBITRATION_REFERENCE_CLOCK_PERIOD_MASK 0xffff
+#define DMIF_ARBITRATION_CONTROL__DMIF_ARBITRATION_REFERENCE_CLOCK_PERIOD__SHIFT 0x0
+#define DMIF_ARBITRATION_CONTROL__PIPE_SWITCH_EFFICIENCY_WEIGHT_MASK 0xffff0000
+#define DMIF_ARBITRATION_CONTROL__PIPE_SWITCH_EFFICIENCY_WEIGHT__SHIFT 0x10
+#define PIPE0_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE0_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE1_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE1_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE2_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE2_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE3_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE3_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE4_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE4_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE5_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE5_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE6_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE6_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define PIPE7_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT_MASK 0xffff
+#define PIPE7_ARBITRATION_CONTROL3__EFFICIENCY_WEIGHT__SHIFT 0x0
+#define DMIF_P_VMID__P_VMID_PIPE0_MASK 0xf
+#define DMIF_P_VMID__P_VMID_PIPE0__SHIFT 0x0
+#define DMIF_P_VMID__P_VMID_PIPE1_MASK 0xf0
+#define DMIF_P_VMID__P_VMID_PIPE1__SHIFT 0x4
+#define DMIF_P_VMID__P_VMID_PIPE2_MASK 0xf00
+#define DMIF_P_VMID__P_VMID_PIPE2__SHIFT 0x8
+#define DMIF_P_VMID__P_VMID_PIPE3_MASK 0xf000
+#define DMIF_P_VMID__P_VMID_PIPE3__SHIFT 0xc
+#define DMIF_P_VMID__P_VMID_PIPE4_MASK 0xf0000
+#define DMIF_P_VMID__P_VMID_PIPE4__SHIFT 0x10
+#define DMIF_P_VMID__P_VMID_PIPE5_MASK 0xf00000
+#define DMIF_P_VMID__P_VMID_PIPE5__SHIFT 0x14
+#define DMIF_P_VMID__P_VMID_PIPE6_MASK 0xf000000
+#define DMIF_P_VMID__P_VMID_PIPE6__SHIFT 0x18
+#define DMIF_P_VMID__P_VMID_PIPE7_MASK 0xf0000000
+#define DMIF_P_VMID__P_VMID_PIPE7__SHIFT 0x1c
+#define DMIF_URG_OVERRIDE__DMIF_URG_OVERRIDE_EN_MASK 0x1
+#define DMIF_URG_OVERRIDE__DMIF_URG_OVERRIDE_EN__SHIFT 0x0
+#define DMIF_URG_OVERRIDE__DMIF_URG_OVERRIDE_LEVEL_MASK 0xf0
+#define DMIF_URG_OVERRIDE__DMIF_URG_OVERRIDE_LEVEL__SHIFT 0x4
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_INDEX_MASK 0xff
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DMIF_TEST_DEBUG_INDEX__DMIF_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DMIF_TEST_DEBUG_DATA__DMIF_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DMIF_TEST_DEBUG_DATA__DMIF_TEST_DEBUG_DATA__SHIFT 0x0
+#define DMIF_DEBUG02_CORE0__DB_DATA_MASK 0xffff
+#define DMIF_DEBUG02_CORE0__DB_DATA__SHIFT 0x0
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNT_EN_MASK 0x10000
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNT_EN__SHIFT 0x10
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNTER_MASK 0xffe0000
+#define DMIF_DEBUG02_CORE0__MC_RDRET_COUNTER__SHIFT 0x11
+#define DMIF_DEBUG02_CORE1__DB_DATA_MASK 0xffff
+#define DMIF_DEBUG02_CORE1__DB_DATA__SHIFT 0x0
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNT_EN_MASK 0x10000
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNT_EN__SHIFT 0x10
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNTER_MASK 0xffe0000
+#define DMIF_DEBUG02_CORE1__MC_RDRET_COUNTER__SHIFT 0x11
+#define DMIF_ADDR_CALC__ADDR_CONFIG_PIPE_INTERLEAVE_SIZE_MASK 0x70
+#define DMIF_ADDR_CALC__ADDR_CONFIG_PIPE_INTERLEAVE_SIZE__SHIFT 0x4
+#define DMIF_ADDR_CALC__ADDR_CONFIG_ROW_SIZE_MASK 0x30000000
+#define DMIF_ADDR_CALC__ADDR_CONFIG_ROW_SIZE__SHIFT 0x1c
+#define DMIF_STATUS2__DMIF_PIPE0_DISPCLK_STATUS_MASK 0x1
+#define DMIF_STATUS2__DMIF_PIPE0_DISPCLK_STATUS__SHIFT 0x0
+#define DMIF_STATUS2__DMIF_PIPE1_DISPCLK_STATUS_MASK 0x2
+#define DMIF_STATUS2__DMIF_PIPE1_DISPCLK_STATUS__SHIFT 0x1
+#define DMIF_STATUS2__DMIF_PIPE2_DISPCLK_STATUS_MASK 0x4
+#define DMIF_STATUS2__DMIF_PIPE2_DISPCLK_STATUS__SHIFT 0x2
+#define DMIF_STATUS2__DMIF_PIPE3_DISPCLK_STATUS_MASK 0x8
+#define DMIF_STATUS2__DMIF_PIPE3_DISPCLK_STATUS__SHIFT 0x3
+#define DMIF_STATUS2__DMIF_PIPE4_DISPCLK_STATUS_MASK 0x10
+#define DMIF_STATUS2__DMIF_PIPE4_DISPCLK_STATUS__SHIFT 0x4
+#define DMIF_STATUS2__DMIF_PIPE5_DISPCLK_STATUS_MASK 0x20
+#define DMIF_STATUS2__DMIF_PIPE5_DISPCLK_STATUS__SHIFT 0x5
+#define DMIF_STATUS2__DMIF_CHUNK_TRACKER_SCLK_STATUS_MASK 0x100
+#define DMIF_STATUS2__DMIF_CHUNK_TRACKER_SCLK_STATUS__SHIFT 0x8
+#define DMIF_STATUS2__DMIF_FBC_TRACKER_SCLK_STATUS_MASK 0x200
+#define DMIF_STATUS2__DMIF_FBC_TRACKER_SCLK_STATUS__SHIFT 0x9
+#define PIPE0_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE0_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE1_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE1_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE2_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE2_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE3_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE3_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE4_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE4_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE5_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE5_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE6_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE6_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define PIPE7_MAX_REQUESTS__MAX_REQUESTS_MASK 0x3ff
+#define PIPE7_MAX_REQUESTS__MAX_REQUESTS__SHIFT 0x0
+#define DVMM_REG_RD_STATUS__DVMM_REG_RD_STATUS_MASK 0x1
+#define DVMM_REG_RD_STATUS__DVMM_REG_RD_STATUS__SHIFT 0x0
+#define DVMM_REG_RD_DATA__DVMM_REG_RD_DATA_MASK 0xffffffff
+#define DVMM_REG_RD_DATA__DVMM_REG_RD_DATA__SHIFT 0x0
+#define DVMM_PTE_REQ__MAX_PTEREQ_TO_ISSUE_MASK 0xff
+#define DVMM_PTE_REQ__MAX_PTEREQ_TO_ISSUE__SHIFT 0x0
+#define DVMM_PTE_REQ__HFLIP_PTEREQ_PER_CHUNK_INT_MASK 0xff00
+#define DVMM_PTE_REQ__HFLIP_PTEREQ_PER_CHUNK_INT__SHIFT 0x8
+#define DVMM_PTE_REQ__HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER_MASK 0x3f0000
+#define DVMM_PTE_REQ__HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER__SHIFT 0x10
+#define DVMM_CNTL__PDE_CACHE_INVALIDATE_CNTL_MASK 0x3
+#define DVMM_CNTL__PDE_CACHE_INVALIDATE_CNTL__SHIFT 0x0
+#define DVMM_CNTL__DEBUG_SYSTEM_ACCESS_MODE_MASK 0x30
+#define DVMM_CNTL__DEBUG_SYSTEM_ACCESS_MODE__SHIFT 0x4
+#define DVMM_CNTL__FORCE_SYSTEM_ACCESS_MODE_MASK 0x80
+#define DVMM_CNTL__FORCE_SYSTEM_ACCESS_MODE__SHIFT 0x7
+#define DVMM_CNTL__DBG_DCE_VMID_MASK 0xf00
+#define DVMM_CNTL__DBG_DCE_VMID__SHIFT 0x8
+#define DVMM_CNTL__FORCE_DBG_DCE_VMID_MASK 0x8000
+#define DVMM_CNTL__FORCE_DBG_DCE_VMID__SHIFT 0xf
+#define DVMM_CNTL__OVERRIDE_SNOOP_MASK 0x20000
+#define DVMM_CNTL__OVERRIDE_SNOOP__SHIFT 0x11
+#define DVMM_CNTL__ENABLE_PDE_INVALIDATE_MASK 0x40000
+#define DVMM_CNTL__ENABLE_PDE_INVALIDATE__SHIFT 0x12
+#define DVMM_FAULT_STATUS__DVMM_FAULT_STATUS_MASK 0xffffffff
+#define DVMM_FAULT_STATUS__DVMM_FAULT_STATUS__SHIFT 0x0
+#define DVMM_FAULT_ADDR__DVMM_FAULT_ADDR_MASK 0xffffffff
+#define DVMM_FAULT_ADDR__DVMM_FAULT_ADDR__SHIFT 0x0
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ENABLE_MASK 0x1
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ENABLE__SHIFT 0x0
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_MODE_MASK 0x18
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_MODE__SHIFT 0x3
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_PIPES_MASK 0xe0
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_PIPES__SHIFT 0x5
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_BANKS_MASK 0x700
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_NUM_BANKS__SHIFT 0x8
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE_MASK 0x800
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE__SHIFT 0xb
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROW_SIZE_MASK 0x7000
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROW_SIZE__SHIFT 0xc
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROWS_PER_CHAN_MASK 0xfff0000
+#define LOW_POWER_TILING_CONTROL__LOW_POWER_TILING_ROWS_PER_CHAN__SHIFT 0x10
+#define MCIF_CONTROL__MCIF_BUFF_SIZE_MASK 0x3
+#define MCIF_CONTROL__MCIF_BUFF_SIZE__SHIFT 0x0
+#define MCIF_CONTROL__ADDRESS_TRANSLATION_ENABLE_MASK 0x10
+#define MCIF_CONTROL__ADDRESS_TRANSLATION_ENABLE__SHIFT 0x4
+#define MCIF_CONTROL__PRIVILEGED_ACCESS_ENABLE_MASK 0x100
+#define MCIF_CONTROL__PRIVILEGED_ACCESS_ENABLE__SHIFT 0x8
+#define MCIF_CONTROL__MCIF_SLOW_REQ_INTERVAL_MASK 0xf000
+#define MCIF_CONTROL__MCIF_SLOW_REQ_INTERVAL__SHIFT 0xc
+#define MCIF_CONTROL__LOW_READ_URG_LEVEL_MASK 0xff0000
+#define MCIF_CONTROL__LOW_READ_URG_LEVEL__SHIFT 0x10
+#define MCIF_CONTROL__MC_CLEAN_DEASSERT_LATENCY_MASK 0x3f000000
+#define MCIF_CONTROL__MC_CLEAN_DEASSERT_LATENCY__SHIFT 0x18
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE_MASK 0x40000000
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_ENABLE__SHIFT 0x1e
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY_MASK 0x80000000
+#define MCIF_CONTROL__MCIF_MC_LATENCY_COUNTER_URGENT_ONLY__SHIFT 0x1f
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT_MASK 0xff
+#define MCIF_WRITE_COMBINE_CONTROL__MCIF_WRITE_COMBINE_TIMEOUT__SHIFT 0x0
+#define MCIF_WRITE_COMBINE_CONTROL__VIP_WRITE_COMBINE_TIMEOUT_MASK 0xff00
+#define MCIF_WRITE_COMBINE_CONTROL__VIP_WRITE_COMBINE_TIMEOUT__SHIFT 0x8
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_INDEX_MASK 0xff
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define MCIF_TEST_DEBUG_INDEX__MCIF_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MCIF_TEST_DEBUG_DATA__MCIF_TEST_DEBUG_DATA_MASK 0xffffffff
+#define MCIF_TEST_DEBUG_DATA__MCIF_TEST_DEBUG_DATA__SHIFT 0x0
+#define IDDCCIF02_DBG_DCCIF_C__DBG_DCCIF_C_MASK 0xffffffff
+#define IDDCCIF02_DBG_DCCIF_C__DBG_DCCIF_C__SHIFT 0x0
+#define IDDCCIF04_DBG_DCCIF_E__DBG_DCCIF_E_MASK 0xffffffff
+#define IDDCCIF04_DBG_DCCIF_E__DBG_DCCIF_E__SHIFT 0x0
+#define IDDCCIF05_DBG_DCCIF_F__DBG_DCCIF_F_MASK 0xffffffff
+#define IDDCCIF05_DBG_DCCIF_F__DBG_DCCIF_F__SHIFT 0x0
+#define MCIF_VMID__MCIF_WR_VMID_MASK 0xf
+#define MCIF_VMID__MCIF_WR_VMID__SHIFT 0x0
+#define MCIF_VMID__VIP_WR_VMID_MASK 0xf0
+#define MCIF_VMID__VIP_WR_VMID__SHIFT 0x4
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_DIS_MASK 0x1
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_DIS__SHIFT 0x0
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE_MASK 0x30
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_MODE__SHIFT 0x4
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_SIZE_MASK 0xff00
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_SIZE__SHIFT 0x8
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_PIPE_MASK 0x70000
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_PIPE__SHIFT 0x10
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_TYPE_MASK 0x180000
+#define MCIF_MEM_CONTROL__MCIFMEM_CACHE_TYPE__SHIFT 0x13
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS_MASK 0x7e
+#define CC_DC_PIPE_DIS__DC_PIPE_DIS__SHIFT 0x1
+#define CC_DC_PIPE_DIS__DC_UNDERLAY_PIPE_DIS_MASK 0x3f0000
+#define CC_DC_PIPE_DIS__DC_UNDERLAY_PIPE_DIS__SHIFT 0x10
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_OCCURRED_MASK 0x1
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_OCCURRED__SHIFT 0x0
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_CLEAR_MASK 0x10
+#define MC_DC_INTERFACE_NACK_STATUS__DMIF_RDRET_NACK_CLEAR__SHIFT 0x4
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_OCCURRED_MASK 0x100
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_OCCURRED__SHIFT 0x8
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_CLEAR_MASK 0x1000
+#define MC_DC_INTERFACE_NACK_STATUS__VIP_WRRET_NACK_CLEAR__SHIFT 0xc
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_OCCURRED_MASK 0x10000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_OCCURRED__SHIFT 0x10
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_CLEAR_MASK 0x100000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_RDRET_NACK_CLEAR__SHIFT 0x14
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_OCCURRED_MASK 0x1000000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_OCCURRED__SHIFT 0x18
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_CLEAR_MASK 0x10000000
+#define MC_DC_INTERFACE_NACK_STATUS__MCIF_WRRET_NACK_CLEAR__SHIFT 0x1c
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY_MASK 0xfffff
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_DELAY__SHIFT 0x0
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD_MASK 0xfff00000
+#define RBBMIF_TIMEOUT__RBBMIF_TIMEOUT_TO_REQ_HOLD__SHIFT 0x14
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC_MASK 0xffff
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_CLIENTS_DEC__SHIFT 0x0
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_OP_MASK 0x10000000
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_OP__SHIFT 0x1c
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS_MASK 0x20000000
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_RDWR_STATUS__SHIFT 0x1d
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_ACK_MASK 0x40000000
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_ACK__SHIFT 0x1e
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_MASK_MASK 0x80000000
+#define RBBMIF_STATUS__RBBMIF_TIMEOUT_MASK__SHIFT 0x1f
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS_MASK 0x1
+#define RBBMIF_TIMEOUT_DIS__CLIENT0_TIMEOUT_DIS__SHIFT 0x0
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS_MASK 0x2
+#define RBBMIF_TIMEOUT_DIS__CLIENT1_TIMEOUT_DIS__SHIFT 0x1
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS_MASK 0x4
+#define RBBMIF_TIMEOUT_DIS__CLIENT2_TIMEOUT_DIS__SHIFT 0x2
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS_MASK 0x8
+#define RBBMIF_TIMEOUT_DIS__CLIENT3_TIMEOUT_DIS__SHIFT 0x3
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS_MASK 0x10
+#define RBBMIF_TIMEOUT_DIS__CLIENT4_TIMEOUT_DIS__SHIFT 0x4
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS_MASK 0x20
+#define RBBMIF_TIMEOUT_DIS__CLIENT5_TIMEOUT_DIS__SHIFT 0x5
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS_MASK 0x40
+#define RBBMIF_TIMEOUT_DIS__CLIENT6_TIMEOUT_DIS__SHIFT 0x6
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS_MASK 0x80
+#define RBBMIF_TIMEOUT_DIS__CLIENT7_TIMEOUT_DIS__SHIFT 0x7
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS_MASK 0x100
+#define RBBMIF_TIMEOUT_DIS__CLIENT8_TIMEOUT_DIS__SHIFT 0x8
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS_MASK 0x200
+#define RBBMIF_TIMEOUT_DIS__CLIENT9_TIMEOUT_DIS__SHIFT 0x9
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS_MASK 0x400
+#define RBBMIF_TIMEOUT_DIS__CLIENT10_TIMEOUT_DIS__SHIFT 0xa
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS_MASK 0x800
+#define RBBMIF_TIMEOUT_DIS__CLIENT11_TIMEOUT_DIS__SHIFT 0xb
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS_MASK 0x1000
+#define RBBMIF_TIMEOUT_DIS__CLIENT12_TIMEOUT_DIS__SHIFT 0xc
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS_MASK 0x2000
+#define RBBMIF_TIMEOUT_DIS__CLIENT13_TIMEOUT_DIS__SHIFT 0xd
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS_MASK 0x4000
+#define RBBMIF_TIMEOUT_DIS__CLIENT14_TIMEOUT_DIS__SHIFT 0xe
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS_MASK 0x8000
+#define RBBMIF_TIMEOUT_DIS__CLIENT15_TIMEOUT_DIS__SHIFT 0xf
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE_MASK 0x3
+#define RBBMIF_STATUS_FLAG__RBBMIF_STATE__SHIFT 0x0
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT_MASK 0x10
+#define RBBMIF_STATUS_FLAG__RBBMIF_READ_TIMEOUT__SHIFT 0x4
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY_MASK 0x20
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_EMPTY__SHIFT 0x5
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL_MASK 0x40
+#define RBBMIF_STATUS_FLAG__RBBMIF_FIFO_FULL__SHIFT 0x6
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM1_PWR_STATE_MASK 0x3
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM1_PWR_STATE__SHIFT 0x0
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM2_PWR_STATE_MASK 0xc
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM2_PWR_STATE__SHIFT 0x2
+#define DCI_MEM_PWR_STATUS__MCIF_RDREQ_MEM_PWR_STATE_MASK 0x10
+#define DCI_MEM_PWR_STATUS__MCIF_RDREQ_MEM_PWR_STATE__SHIFT 0x4
+#define DCI_MEM_PWR_STATUS__MCIF_WRREQ_MEM_PWR_STATE_MASK 0x40
+#define DCI_MEM_PWR_STATUS__MCIF_WRREQ_MEM_PWR_STATE__SHIFT 0x6
+#define DCI_MEM_PWR_STATUS__VGA_MEM_PWR_STATE_MASK 0x100
+#define DCI_MEM_PWR_STATUS__VGA_MEM_PWR_STATE__SHIFT 0x8
+#define DCI_MEM_PWR_STATUS__DMCU_ERAM_MEM_PWR_STATE_MASK 0x600
+#define DCI_MEM_PWR_STATUS__DMCU_ERAM_MEM_PWR_STATE__SHIFT 0x9
+#define DCI_MEM_PWR_STATUS__DMCU_IRAM_MEM_PWR_STATE_MASK 0x800
+#define DCI_MEM_PWR_STATUS__DMCU_IRAM_MEM_PWR_STATE__SHIFT 0xb
+#define DCI_MEM_PWR_STATUS__FBC_MEM_PWR_STATE_MASK 0x3000
+#define DCI_MEM_PWR_STATUS__FBC_MEM_PWR_STATE__SHIFT 0xc
+#define DCI_MEM_PWR_STATUS__MCIF_MEM_PWR_STATE_MASK 0xc000
+#define DCI_MEM_PWR_STATUS__MCIF_MEM_PWR_STATE__SHIFT 0xe
+#define DCI_MEM_PWR_STATUS__VIP_MEM_PWR_STATE_MASK 0x400000
+#define DCI_MEM_PWR_STATUS__VIP_MEM_PWR_STATE__SHIFT 0x16
+#define DCI_MEM_PWR_STATUS__DMIF0_ASYNC_MEM_PWR_STATE_MASK 0x3000000
+#define DCI_MEM_PWR_STATUS__DMIF0_ASYNC_MEM_PWR_STATE__SHIFT 0x18
+#define DCI_MEM_PWR_STATUS__DMIF0_DATA_MEM_PWR_STATE_MASK 0xc000000
+#define DCI_MEM_PWR_STATUS__DMIF0_DATA_MEM_PWR_STATE__SHIFT 0x1a
+#define DCI_MEM_PWR_STATUS__DMIF0_CHUNK_MEM_PWR_STATE_MASK 0x10000000
+#define DCI_MEM_PWR_STATUS__DMIF0_CHUNK_MEM_PWR_STATE__SHIFT 0x1c
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM3_PWR_STATE_MASK 0xc0000000
+#define DCI_MEM_PWR_STATUS__DMIF_RDREQ_MEM3_PWR_STATE__SHIFT 0x1e
+#define DCI_MEM_PWR_STATUS2__DMIF1_ASYNC_MEM_PWR_STATE_MASK 0x3
+#define DCI_MEM_PWR_STATUS2__DMIF1_ASYNC_MEM_PWR_STATE__SHIFT 0x0
+#define DCI_MEM_PWR_STATUS2__DMIF1_DATA_MEM_PWR_STATE_MASK 0xc
+#define DCI_MEM_PWR_STATUS2__DMIF1_DATA_MEM_PWR_STATE__SHIFT 0x2
+#define DCI_MEM_PWR_STATUS2__DMIF1_CHUNK_MEM_PWR_STATE_MASK 0x10
+#define DCI_MEM_PWR_STATUS2__DMIF1_CHUNK_MEM_PWR_STATE__SHIFT 0x4
+#define DCI_MEM_PWR_STATUS2__DMIF2_ASYNC_MEM_PWR_STATE_MASK 0x60
+#define DCI_MEM_PWR_STATUS2__DMIF2_ASYNC_MEM_PWR_STATE__SHIFT 0x5
+#define DCI_MEM_PWR_STATUS2__DMIF2_DATA_MEM_PWR_STATE_MASK 0x180
+#define DCI_MEM_PWR_STATUS2__DMIF2_DATA_MEM_PWR_STATE__SHIFT 0x7
+#define DCI_MEM_PWR_STATUS2__DMIF2_CHUNK_MEM_PWR_STATE_MASK 0x200
+#define DCI_MEM_PWR_STATUS2__DMIF2_CHUNK_MEM_PWR_STATE__SHIFT 0x9
+#define DCI_MEM_PWR_STATUS2__DMIF3_ASYNC_MEM_PWR_STATE_MASK 0xc00
+#define DCI_MEM_PWR_STATUS2__DMIF3_ASYNC_MEM_PWR_STATE__SHIFT 0xa
+#define DCI_MEM_PWR_STATUS2__DMIF3_DATA_MEM_PWR_STATE_MASK 0x3000
+#define DCI_MEM_PWR_STATUS2__DMIF3_DATA_MEM_PWR_STATE__SHIFT 0xc
+#define DCI_MEM_PWR_STATUS2__DMIF3_CHUNK_MEM_PWR_STATE_MASK 0x4000
+#define DCI_MEM_PWR_STATUS2__DMIF3_CHUNK_MEM_PWR_STATE__SHIFT 0xe
+#define DCI_MEM_PWR_STATUS2__DMIF4_ASYNC_MEM_PWR_STATE_MASK 0x18000
+#define DCI_MEM_PWR_STATUS2__DMIF4_ASYNC_MEM_PWR_STATE__SHIFT 0xf
+#define DCI_MEM_PWR_STATUS2__DMIF4_DATA_MEM_PWR_STATE_MASK 0x60000
+#define DCI_MEM_PWR_STATUS2__DMIF4_DATA_MEM_PWR_STATE__SHIFT 0x11
+#define DCI_MEM_PWR_STATUS2__DMIF4_CHUNK_MEM_PWR_STATE_MASK 0x80000
+#define DCI_MEM_PWR_STATUS2__DMIF4_CHUNK_MEM_PWR_STATE__SHIFT 0x13
+#define DCI_MEM_PWR_STATUS2__DMIF5_ASYNC_MEM_PWR_STATE_MASK 0x300000
+#define DCI_MEM_PWR_STATUS2__DMIF5_ASYNC_MEM_PWR_STATE__SHIFT 0x14
+#define DCI_MEM_PWR_STATUS2__DMIF5_DATA_MEM_PWR_STATE_MASK 0xc00000
+#define DCI_MEM_PWR_STATUS2__DMIF5_DATA_MEM_PWR_STATE__SHIFT 0x16
+#define DCI_MEM_PWR_STATUS2__DMIF5_CHUNK_MEM_PWR_STATE_MASK 0x1000000
+#define DCI_MEM_PWR_STATUS2__DMIF5_CHUNK_MEM_PWR_STATE__SHIFT 0x18
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_LUMA_MEM0_PWR_STATE_MASK 0x3
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_LUMA_MEM0_PWR_STATE__SHIFT 0x0
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_LUMA_MEM1_PWR_STATE_MASK 0xc
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_LUMA_MEM1_PWR_STATE__SHIFT 0x2
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_CHROMA_MEM0_PWR_STATE_MASK 0x30
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_CHROMA_MEM0_PWR_STATE__SHIFT 0x4
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_CHROMA_MEM1_PWR_STATE_MASK 0xc0
+#define DCI_MEM_PWR_STATUS3__MCIF_DWB_CHROMA_MEM1_PWR_STATE__SHIFT 0x6
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_LUMA_MEM0_PWR_STATE_MASK 0x300
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_LUMA_MEM0_PWR_STATE__SHIFT 0x8
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_LUMA_MEM1_PWR_STATE_MASK 0xc00
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_LUMA_MEM1_PWR_STATE__SHIFT 0xa
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_CHROMA_MEM0_PWR_STATE_MASK 0x3000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_CHROMA_MEM0_PWR_STATE__SHIFT 0xc
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_CHROMA_MEM1_PWR_STATE_MASK 0xc000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB0_CHROMA_MEM1_PWR_STATE__SHIFT 0xe
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_LUMA_MEM0_PWR_STATE_MASK 0x30000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_LUMA_MEM0_PWR_STATE__SHIFT 0x10
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_LUMA_MEM1_PWR_STATE_MASK 0xc0000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_LUMA_MEM1_PWR_STATE__SHIFT 0x12
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_CHROMA_MEM0_PWR_STATE_MASK 0x300000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_CHROMA_MEM0_PWR_STATE__SHIFT 0x14
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_CHROMA_MEM1_PWR_STATE_MASK 0xc00000
+#define DCI_MEM_PWR_STATUS3__MCIF_CWB1_CHROMA_MEM1_PWR_STATE__SHIFT 0x16
+#define DCI_CLK_CNTL__DCI_TEST_CLK_SEL_MASK 0x1f
+#define DCI_CLK_CNTL__DCI_TEST_CLK_SEL__SHIFT 0x0
+#define DCI_CLK_CNTL__DISPCLK_R_DCI_GATE_DIS_MASK 0x20
+#define DCI_CLK_CNTL__DISPCLK_R_DCI_GATE_DIS__SHIFT 0x5
+#define DCI_CLK_CNTL__DISPCLK_M_GATE_DIS_MASK 0x40
+#define DCI_CLK_CNTL__DISPCLK_M_GATE_DIS__SHIFT 0x6
+#define DCI_CLK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x80
+#define DCI_CLK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x7
+#define DCI_CLK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x100
+#define DCI_CLK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x8
+#define DCI_CLK_CNTL__DISPCLK_G_FBC_GATE_DIS_MASK 0x200
+#define DCI_CLK_CNTL__DISPCLK_G_FBC_GATE_DIS__SHIFT 0x9
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV1_L_GATE_DIS_MASK 0x400
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV1_L_GATE_DIS__SHIFT 0xa
+#define DCI_CLK_CNTL__DISPCLK_G_VGA_GATE_DIS_MASK 0x800
+#define DCI_CLK_CNTL__DISPCLK_G_VGA_GATE_DIS__SHIFT 0xb
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV1_C_GATE_DIS_MASK 0x1000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV1_C_GATE_DIS__SHIFT 0xc
+#define DCI_CLK_CNTL__DISPCLK_G_VIP_GATE_DIS_MASK 0x2000
+#define DCI_CLK_CNTL__DISPCLK_G_VIP_GATE_DIS__SHIFT 0xd
+#define DCI_CLK_CNTL__VPCLK_POL_MASK 0x4000
+#define DCI_CLK_CNTL__VPCLK_POL__SHIFT 0xe
+#define DCI_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS_MASK 0x8000
+#define DCI_CLK_CNTL__DISPCLK_G_DMCU_GATE_DIS__SHIFT 0xf
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF0_GATE_DIS_MASK 0x10000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF0_GATE_DIS__SHIFT 0x10
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF1_GATE_DIS_MASK 0x20000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF1_GATE_DIS__SHIFT 0x11
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF2_GATE_DIS_MASK 0x40000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF2_GATE_DIS__SHIFT 0x12
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF3_GATE_DIS_MASK 0x80000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF3_GATE_DIS__SHIFT 0x13
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF4_GATE_DIS_MASK 0x100000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF4_GATE_DIS__SHIFT 0x14
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF5_GATE_DIS_MASK 0x200000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIF5_GATE_DIS__SHIFT 0x15
+#define DCI_CLK_CNTL__SCLK_G_DMIF_GATE_DIS_MASK 0x400000
+#define DCI_CLK_CNTL__SCLK_G_DMIF_GATE_DIS__SHIFT 0x16
+#define DCI_CLK_CNTL__SCLK_G_DMIFTRK_GATE_DIS_MASK 0x800000
+#define DCI_CLK_CNTL__SCLK_G_DMIFTRK_GATE_DIS__SHIFT 0x17
+#define DCI_CLK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x1000000
+#define DCI_CLK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x18
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV0_L_GATE_DIS_MASK 0x2000000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV0_L_GATE_DIS__SHIFT 0x19
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV0_C_GATE_DIS_MASK 0x4000000
+#define DCI_CLK_CNTL__DISPCLK_G_DMIFV0_C_GATE_DIS__SHIFT 0x1a
+#define DCI_CLK_CNTL__DCI_PG_TEST_CLK_SEL_MASK 0xf8000000
+#define DCI_CLK_CNTL__DCI_PG_TEST_CLK_SEL__SHIFT 0x1b
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_DWB_GATE_DIS_MASK 0x1
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_DWB_GATE_DIS__SHIFT 0x0
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_DWB_GATE_DIS_MASK 0x2
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_DWB_GATE_DIS__SHIFT 0x1
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_CWB0_GATE_DIS_MASK 0x4
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_CWB0_GATE_DIS__SHIFT 0x2
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_CWB0_GATE_DIS_MASK 0x8
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_CWB0_GATE_DIS__SHIFT 0x3
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_CWB1_GATE_DIS_MASK 0x10
+#define DCI_CLK_RAMP_CNTL__DISPCLK_G_MCIF_CWB1_GATE_DIS__SHIFT 0x4
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_CWB1_GATE_DIS_MASK 0x80000000
+#define DCI_CLK_RAMP_CNTL__SCLK_G_MCIF_CWB1_GATE_DIS__SHIFT 0x1f
+#define DCI_MEM_PWR_CNTL__DMIF_RDREQ_MEM_PWR_FORCE_MASK 0x3
+#define DCI_MEM_PWR_CNTL__DMIF_RDREQ_MEM_PWR_FORCE__SHIFT 0x0
+#define DCI_MEM_PWR_CNTL__DMIF_RDREQ_MEM_PWR_DIS_MASK 0x4
+#define DCI_MEM_PWR_CNTL__DMIF_RDREQ_MEM_PWR_DIS__SHIFT 0x2
+#define DCI_MEM_PWR_CNTL__MCIF_RDREQ_MEM_PWR_FORCE_MASK 0x8
+#define DCI_MEM_PWR_CNTL__MCIF_RDREQ_MEM_PWR_FORCE__SHIFT 0x3
+#define DCI_MEM_PWR_CNTL__MCIF_RDREQ_MEM_PWR_DIS_MASK 0x10
+#define DCI_MEM_PWR_CNTL__MCIF_RDREQ_MEM_PWR_DIS__SHIFT 0x4
+#define DCI_MEM_PWR_CNTL__MCIF_WRREQ_MEM_PWR_FORCE_MASK 0x20
+#define DCI_MEM_PWR_CNTL__MCIF_WRREQ_MEM_PWR_FORCE__SHIFT 0x5
+#define DCI_MEM_PWR_CNTL__MCIF_WRREQ_MEM_PWR_DIS_MASK 0x40
+#define DCI_MEM_PWR_CNTL__MCIF_WRREQ_MEM_PWR_DIS__SHIFT 0x6
+#define DCI_MEM_PWR_CNTL__VGA_MEM_PWR_FORCE_MASK 0x80
+#define DCI_MEM_PWR_CNTL__VGA_MEM_PWR_FORCE__SHIFT 0x7
+#define DCI_MEM_PWR_CNTL__VGA_MEM_PWR_DIS_MASK 0x100
+#define DCI_MEM_PWR_CNTL__VGA_MEM_PWR_DIS__SHIFT 0x8
+#define DCI_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_FORCE_MASK 0x600
+#define DCI_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_FORCE__SHIFT 0x9
+#define DCI_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_DIS_MASK 0x800
+#define DCI_MEM_PWR_CNTL__DMCU_ERAM_MEM_PWR_DIS__SHIFT 0xb
+#define DCI_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_FORCE_MASK 0x1000
+#define DCI_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_FORCE__SHIFT 0xc
+#define DCI_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_DIS_MASK 0x2000
+#define DCI_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_DIS__SHIFT 0xd
+#define DCI_MEM_PWR_CNTL__FBC_MEM_PWR_FORCE_MASK 0xc000
+#define DCI_MEM_PWR_CNTL__FBC_MEM_PWR_FORCE__SHIFT 0xe
+#define DCI_MEM_PWR_CNTL__FBC_MEM_PWR_DIS_MASK 0x10000
+#define DCI_MEM_PWR_CNTL__FBC_MEM_PWR_DIS__SHIFT 0x10
+#define DCI_MEM_PWR_CNTL__MCIF_MEM_PWR_FORCE_MASK 0x60000
+#define DCI_MEM_PWR_CNTL__MCIF_MEM_PWR_FORCE__SHIFT 0x11
+#define DCI_MEM_PWR_CNTL__MCIF_MEM_PWR_DIS_MASK 0x80000
+#define DCI_MEM_PWR_CNTL__MCIF_MEM_PWR_DIS__SHIFT 0x13
+#define DCI_MEM_PWR_CNTL__MCIF_DWB_MEM_PWR_FORCE_MASK 0x300000
+#define DCI_MEM_PWR_CNTL__MCIF_DWB_MEM_PWR_FORCE__SHIFT 0x14
+#define DCI_MEM_PWR_CNTL__MCIF_DWB_MEM_PWR_DIS_MASK 0x400000
+#define DCI_MEM_PWR_CNTL__MCIF_DWB_MEM_PWR_DIS__SHIFT 0x16
+#define DCI_MEM_PWR_CNTL__MCIF_CWB0_MEM_PWR_FORCE_MASK 0x1800000
+#define DCI_MEM_PWR_CNTL__MCIF_CWB0_MEM_PWR_FORCE__SHIFT 0x17
+#define DCI_MEM_PWR_CNTL__MCIF_CWB0_MEM_PWR_DIS_MASK 0x2000000
+#define DCI_MEM_PWR_CNTL__MCIF_CWB0_MEM_PWR_DIS__SHIFT 0x19
+#define DCI_MEM_PWR_CNTL__MCIF_CWB1_MEM_PWR_FORCE_MASK 0xc000000
+#define DCI_MEM_PWR_CNTL__MCIF_CWB1_MEM_PWR_FORCE__SHIFT 0x1a
+#define DCI_MEM_PWR_CNTL__MCIF_CWB1_MEM_PWR_DIS_MASK 0x10000000
+#define DCI_MEM_PWR_CNTL__MCIF_CWB1_MEM_PWR_DIS__SHIFT 0x1c
+#define DCI_MEM_PWR_CNTL__VIP_MEM_PWR_FORCE_MASK 0x20000000
+#define DCI_MEM_PWR_CNTL__VIP_MEM_PWR_FORCE__SHIFT 0x1d
+#define DCI_MEM_PWR_CNTL__VIP_MEM_PWR_DIS_MASK 0x40000000
+#define DCI_MEM_PWR_CNTL__VIP_MEM_PWR_DIS__SHIFT 0x1e
+#define DCI_MEM_PWR_CNTL2__DMIF0_ASYNC_MEM_PWR_FORCE_MASK 0x3
+#define DCI_MEM_PWR_CNTL2__DMIF0_ASYNC_MEM_PWR_FORCE__SHIFT 0x0
+#define DCI_MEM_PWR_CNTL2__DMIF0_ASYNC_MEM_PWR_DIS_MASK 0x4
+#define DCI_MEM_PWR_CNTL2__DMIF0_ASYNC_MEM_PWR_DIS__SHIFT 0x2
+#define DCI_MEM_PWR_CNTL2__DMIF0_DATA_MEM_PWR_FORCE_MASK 0x18
+#define DCI_MEM_PWR_CNTL2__DMIF0_DATA_MEM_PWR_FORCE__SHIFT 0x3
+#define DCI_MEM_PWR_CNTL2__DMIF0_DATA_MEM_PWR_DIS_MASK 0x20
+#define DCI_MEM_PWR_CNTL2__DMIF0_DATA_MEM_PWR_DIS__SHIFT 0x5
+#define DCI_MEM_PWR_CNTL2__DMIF0_CHUNK_MEM_PWR_FORCE_MASK 0x40
+#define DCI_MEM_PWR_CNTL2__DMIF0_CHUNK_MEM_PWR_FORCE__SHIFT 0x6
+#define DCI_MEM_PWR_CNTL2__DMIF0_CHUNK_MEM_PWR_DIS_MASK 0x80
+#define DCI_MEM_PWR_CNTL2__DMIF0_CHUNK_MEM_PWR_DIS__SHIFT 0x7
+#define DCI_MEM_PWR_CNTL2__DMIF1_ASYNC_MEM_PWR_FORCE_MASK 0x300
+#define DCI_MEM_PWR_CNTL2__DMIF1_ASYNC_MEM_PWR_FORCE__SHIFT 0x8
+#define DCI_MEM_PWR_CNTL2__DMIF1_ASYNC_MEM_PWR_DIS_MASK 0x400
+#define DCI_MEM_PWR_CNTL2__DMIF1_ASYNC_MEM_PWR_DIS__SHIFT 0xa
+#define DCI_MEM_PWR_CNTL2__DMIF1_DATA_MEM_PWR_FORCE_MASK 0x1800
+#define DCI_MEM_PWR_CNTL2__DMIF1_DATA_MEM_PWR_FORCE__SHIFT 0xb
+#define DCI_MEM_PWR_CNTL2__DMIF1_DATA_MEM_PWR_DIS_MASK 0x2000
+#define DCI_MEM_PWR_CNTL2__DMIF1_DATA_MEM_PWR_DIS__SHIFT 0xd
+#define DCI_MEM_PWR_CNTL2__DMIF1_CHUNK_MEM_PWR_FORCE_MASK 0x4000
+#define DCI_MEM_PWR_CNTL2__DMIF1_CHUNK_MEM_PWR_FORCE__SHIFT 0xe
+#define DCI_MEM_PWR_CNTL2__DMIF1_CHUNK_MEM_PWR_DIS_MASK 0x8000
+#define DCI_MEM_PWR_CNTL2__DMIF1_CHUNK_MEM_PWR_DIS__SHIFT 0xf
+#define DCI_MEM_PWR_CNTL2__DMIF2_ASYNC_MEM_PWR_FORCE_MASK 0x30000
+#define DCI_MEM_PWR_CNTL2__DMIF2_ASYNC_MEM_PWR_FORCE__SHIFT 0x10
+#define DCI_MEM_PWR_CNTL2__DMIF2_ASYNC_MEM_PWR_DIS_MASK 0x40000
+#define DCI_MEM_PWR_CNTL2__DMIF2_ASYNC_MEM_PWR_DIS__SHIFT 0x12
+#define DCI_MEM_PWR_CNTL2__DMIF2_DATA_MEM_PWR_FORCE_MASK 0x180000
+#define DCI_MEM_PWR_CNTL2__DMIF2_DATA_MEM_PWR_FORCE__SHIFT 0x13
+#define DCI_MEM_PWR_CNTL2__DMIF2_DATA_MEM_PWR_DIS_MASK 0x200000
+#define DCI_MEM_PWR_CNTL2__DMIF2_DATA_MEM_PWR_DIS__SHIFT 0x15
+#define DCI_MEM_PWR_CNTL2__DMIF2_CHUNK_MEM_PWR_FORCE_MASK 0x400000
+#define DCI_MEM_PWR_CNTL2__DMIF2_CHUNK_MEM_PWR_FORCE__SHIFT 0x16
+#define DCI_MEM_PWR_CNTL2__DMIF2_CHUNK_MEM_PWR_DIS_MASK 0x800000
+#define DCI_MEM_PWR_CNTL2__DMIF2_CHUNK_MEM_PWR_DIS__SHIFT 0x17
+#define DCI_MEM_PWR_CNTL2__DMIF3_ASYNC_MEM_PWR_FORCE_MASK 0x3000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_ASYNC_MEM_PWR_FORCE__SHIFT 0x18
+#define DCI_MEM_PWR_CNTL2__DMIF3_ASYNC_MEM_PWR_DIS_MASK 0x4000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_ASYNC_MEM_PWR_DIS__SHIFT 0x1a
+#define DCI_MEM_PWR_CNTL2__DMIF3_DATA_MEM_PWR_FORCE_MASK 0x18000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_DATA_MEM_PWR_FORCE__SHIFT 0x1b
+#define DCI_MEM_PWR_CNTL2__DMIF3_DATA_MEM_PWR_DIS_MASK 0x20000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_DATA_MEM_PWR_DIS__SHIFT 0x1d
+#define DCI_MEM_PWR_CNTL2__DMIF3_CHUNK_MEM_PWR_FORCE_MASK 0x40000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_CHUNK_MEM_PWR_FORCE__SHIFT 0x1e
+#define DCI_MEM_PWR_CNTL2__DMIF3_CHUNK_MEM_PWR_DIS_MASK 0x80000000
+#define DCI_MEM_PWR_CNTL2__DMIF3_CHUNK_MEM_PWR_DIS__SHIFT 0x1f
+#define DCI_MEM_PWR_CNTL3__DMIF4_ASYNC_MEM_PWR_FORCE_MASK 0x3
+#define DCI_MEM_PWR_CNTL3__DMIF4_ASYNC_MEM_PWR_FORCE__SHIFT 0x0
+#define DCI_MEM_PWR_CNTL3__DMIF4_ASYNC_MEM_PWR_DIS_MASK 0x4
+#define DCI_MEM_PWR_CNTL3__DMIF4_ASYNC_MEM_PWR_DIS__SHIFT 0x2
+#define DCI_MEM_PWR_CNTL3__DMIF4_DATA_MEM_PWR_FORCE_MASK 0x18
+#define DCI_MEM_PWR_CNTL3__DMIF4_DATA_MEM_PWR_FORCE__SHIFT 0x3
+#define DCI_MEM_PWR_CNTL3__DMIF4_DATA_MEM_PWR_DIS_MASK 0x20
+#define DCI_MEM_PWR_CNTL3__DMIF4_DATA_MEM_PWR_DIS__SHIFT 0x5
+#define DCI_MEM_PWR_CNTL3__DMIF4_CHUNK_MEM_PWR_FORCE_MASK 0x40
+#define DCI_MEM_PWR_CNTL3__DMIF4_CHUNK_MEM_PWR_FORCE__SHIFT 0x6
+#define DCI_MEM_PWR_CNTL3__DMIF4_CHUNK_MEM_PWR_DIS_MASK 0x80
+#define DCI_MEM_PWR_CNTL3__DMIF4_CHUNK_MEM_PWR_DIS__SHIFT 0x7
+#define DCI_MEM_PWR_CNTL3__DMIF5_ASYNC_MEM_PWR_FORCE_MASK 0x300
+#define DCI_MEM_PWR_CNTL3__DMIF5_ASYNC_MEM_PWR_FORCE__SHIFT 0x8
+#define DCI_MEM_PWR_CNTL3__DMIF5_ASYNC_MEM_PWR_DIS_MASK 0x400
+#define DCI_MEM_PWR_CNTL3__DMIF5_ASYNC_MEM_PWR_DIS__SHIFT 0xa
+#define DCI_MEM_PWR_CNTL3__DMIF5_DATA_MEM_PWR_FORCE_MASK 0x1800
+#define DCI_MEM_PWR_CNTL3__DMIF5_DATA_MEM_PWR_FORCE__SHIFT 0xb
+#define DCI_MEM_PWR_CNTL3__DMIF5_DATA_MEM_PWR_DIS_MASK 0x2000
+#define DCI_MEM_PWR_CNTL3__DMIF5_DATA_MEM_PWR_DIS__SHIFT 0xd
+#define DCI_MEM_PWR_CNTL3__DMIF5_CHUNK_MEM_PWR_FORCE_MASK 0x4000
+#define DCI_MEM_PWR_CNTL3__DMIF5_CHUNK_MEM_PWR_FORCE__SHIFT 0xe
+#define DCI_MEM_PWR_CNTL3__DMIF5_CHUNK_MEM_PWR_DIS_MASK 0x8000
+#define DCI_MEM_PWR_CNTL3__DMIF5_CHUNK_MEM_PWR_DIS__SHIFT 0xf
+#define DCI_MEM_PWR_CNTL3__DMIF_RDREQ_MEM_PWR_MODE_SEL_MASK 0x30000
+#define DCI_MEM_PWR_CNTL3__DMIF_RDREQ_MEM_PWR_MODE_SEL__SHIFT 0x10
+#define DCI_MEM_PWR_CNTL3__DMIF_ASYNC_MEM_PWR_MODE_SEL_MASK 0xc0000
+#define DCI_MEM_PWR_CNTL3__DMIF_ASYNC_MEM_PWR_MODE_SEL__SHIFT 0x12
+#define DCI_MEM_PWR_CNTL3__DMIF_DATA_MEM_PWR_MODE_SEL_MASK 0x300000
+#define DCI_MEM_PWR_CNTL3__DMIF_DATA_MEM_PWR_MODE_SEL__SHIFT 0x14
+#define DCI_MEM_PWR_CNTL3__DMCU_ERAM_MEM_PWR_MODE_SEL_MASK 0x400000
+#define DCI_MEM_PWR_CNTL3__DMCU_ERAM_MEM_PWR_MODE_SEL__SHIFT 0x16
+#define DCI_MEM_PWR_CNTL3__FBC_MEM_PWR_MODE_SEL_MASK 0x1800000
+#define DCI_MEM_PWR_CNTL3__FBC_MEM_PWR_MODE_SEL__SHIFT 0x17
+#define DCI_MEM_PWR_CNTL3__MCIF_CWB0_MEM_PWR_MODE_SEL_MASK 0x6000000
+#define DCI_MEM_PWR_CNTL3__MCIF_CWB0_MEM_PWR_MODE_SEL__SHIFT 0x19
+#define DCI_MEM_PWR_CNTL3__MCIF_CWB1_MEM_PWR_MODE_SEL_MASK 0x18000000
+#define DCI_MEM_PWR_CNTL3__MCIF_CWB1_MEM_PWR_MODE_SEL__SHIFT 0x1b
+#define DCI_MEM_PWR_CNTL3__MCIF_DWB_MEM_PWR_MODE_SEL_MASK 0x60000000
+#define DCI_MEM_PWR_CNTL3__MCIF_DWB_MEM_PWR_MODE_SEL__SHIFT 0x1d
+#define DCI_MEM_PWR_CNTL4__MCIF_DWB_LUMA_MEM_EN_NUM_MASK 0x1
+#define DCI_MEM_PWR_CNTL4__MCIF_DWB_LUMA_MEM_EN_NUM__SHIFT 0x0
+#define DCI_MEM_PWR_CNTL4__MCIF_DWB_CHROMA_MEM_EN_NUM_MASK 0x2
+#define DCI_MEM_PWR_CNTL4__MCIF_DWB_CHROMA_MEM_EN_NUM__SHIFT 0x1
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB0_LUMA_MEM_EN_NUM_MASK 0x4
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB0_LUMA_MEM_EN_NUM__SHIFT 0x2
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB0_CHROMA_MEM_EN_NUM_MASK 0x8
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB0_CHROMA_MEM_EN_NUM__SHIFT 0x3
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB1_LUMA_MEM_EN_NUM_MASK 0x10
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB1_LUMA_MEM_EN_NUM__SHIFT 0x4
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB1_CHROMA_MEM_EN_NUM_MASK 0x20
+#define DCI_MEM_PWR_CNTL4__MCIF_CWB1_CHROMA_MEM_EN_NUM__SHIFT 0x5
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE0_MEM_PWR_FORCE_MASK 0x3
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE0_MEM_PWR_FORCE__SHIFT 0x0
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE0_MEM_PWR_DIS_MASK 0x4
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE0_MEM_PWR_DIS__SHIFT 0x2
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE1_MEM_PWR_FORCE_MASK 0x18
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE1_MEM_PWR_FORCE__SHIFT 0x3
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE1_MEM_PWR_DIS_MASK 0x20
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE1_MEM_PWR_DIS__SHIFT 0x5
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE2_MEM_PWR_FORCE_MASK 0xc0
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE2_MEM_PWR_FORCE__SHIFT 0x6
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE2_MEM_PWR_DIS_MASK 0x100
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE2_MEM_PWR_DIS__SHIFT 0x8
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE3_MEM_PWR_FORCE_MASK 0x600
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE3_MEM_PWR_FORCE__SHIFT 0x9
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE3_MEM_PWR_DIS_MASK 0x800
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE3_MEM_PWR_DIS__SHIFT 0xb
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE4_MEM_PWR_FORCE_MASK 0x3000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE4_MEM_PWR_FORCE__SHIFT 0xc
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE4_MEM_PWR_DIS_MASK 0x4000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE4_MEM_PWR_DIS__SHIFT 0xe
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE5_MEM_PWR_FORCE_MASK 0x18000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE5_MEM_PWR_FORCE__SHIFT 0xf
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE5_MEM_PWR_DIS_MASK 0x20000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE5_MEM_PWR_DIS__SHIFT 0x11
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE6_MEM_PWR_FORCE_MASK 0xc0000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE6_MEM_PWR_FORCE__SHIFT 0x12
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE6_MEM_PWR_DIS_MASK 0x100000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE6_MEM_PWR_DIS__SHIFT 0x14
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE7_MEM_PWR_FORCE_MASK 0x600000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE7_MEM_PWR_FORCE__SHIFT 0x15
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE7_MEM_PWR_DIS_MASK 0x800000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE7_MEM_PWR_DIS__SHIFT 0x17
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE_MEM_PWR_MODE_SEL_MASK 0x3000000
+#define DVMM_PTE_PGMEM_CONTROL__DVMM_PTE_MEM_PWR_MODE_SEL__SHIFT 0x18
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE0_PTE_PGMEM_STATE_MASK 0x3
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE0_PTE_PGMEM_STATE__SHIFT 0x0
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE1_PTE_PGMEM_STATE_MASK 0xc
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE1_PTE_PGMEM_STATE__SHIFT 0x2
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE2_PTE_PGMEM_STATE_MASK 0x30
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE2_PTE_PGMEM_STATE__SHIFT 0x4
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE3_PTE_PGMEM_STATE_MASK 0xc0
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE3_PTE_PGMEM_STATE__SHIFT 0x6
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE4_PTE_PGMEM_STATE_MASK 0x300
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE4_PTE_PGMEM_STATE__SHIFT 0x8
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE5_PTE_PGMEM_STATE_MASK 0xc00
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE5_PTE_PGMEM_STATE__SHIFT 0xa
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE6_PTE_PGMEM_STATE_MASK 0x3000
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE6_PTE_PGMEM_STATE__SHIFT 0xc
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE7_PTE_PGMEM_STATE_MASK 0xc000
+#define DVMM_PTE_PGMEM_STATE__DVMM_PIPE7_PTE_PGMEM_STATE__SHIFT 0xe
+#define DCI_SOFT_RESET__VGA_SOFT_RESET_MASK 0x1
+#define DCI_SOFT_RESET__VGA_SOFT_RESET__SHIFT 0x0
+#define DCI_SOFT_RESET__VIP_SOFT_RESET_MASK 0x2
+#define DCI_SOFT_RESET__VIP_SOFT_RESET__SHIFT 0x1
+#define DCI_SOFT_RESET__MCIF_SOFT_RESET_MASK 0x4
+#define DCI_SOFT_RESET__MCIF_SOFT_RESET__SHIFT 0x2
+#define DCI_SOFT_RESET__FBC_SOFT_RESET_MASK 0x8
+#define DCI_SOFT_RESET__FBC_SOFT_RESET__SHIFT 0x3
+#define DCI_SOFT_RESET__DMIF0_SOFT_RESET_MASK 0x10
+#define DCI_SOFT_RESET__DMIF0_SOFT_RESET__SHIFT 0x4
+#define DCI_SOFT_RESET__DMIF1_SOFT_RESET_MASK 0x20
+#define DCI_SOFT_RESET__DMIF1_SOFT_RESET__SHIFT 0x5
+#define DCI_SOFT_RESET__DMIF2_SOFT_RESET_MASK 0x40
+#define DCI_SOFT_RESET__DMIF2_SOFT_RESET__SHIFT 0x6
+#define DCI_SOFT_RESET__DMIF3_SOFT_RESET_MASK 0x80
+#define DCI_SOFT_RESET__DMIF3_SOFT_RESET__SHIFT 0x7
+#define DCI_SOFT_RESET__DMIF4_SOFT_RESET_MASK 0x100
+#define DCI_SOFT_RESET__DMIF4_SOFT_RESET__SHIFT 0x8
+#define DCI_SOFT_RESET__DMIF5_SOFT_RESET_MASK 0x200
+#define DCI_SOFT_RESET__DMIF5_SOFT_RESET__SHIFT 0x9
+#define DCI_SOFT_RESET__DCFEV0_L_SOFT_RESET_MASK 0x400
+#define DCI_SOFT_RESET__DCFEV0_L_SOFT_RESET__SHIFT 0xa
+#define DCI_SOFT_RESET__DCFEV0_C_SOFT_RESET_MASK 0x800
+#define DCI_SOFT_RESET__DCFEV0_C_SOFT_RESET__SHIFT 0xb
+#define DCI_SOFT_RESET__DCFEV1_L_SOFT_RESET_MASK 0x1000
+#define DCI_SOFT_RESET__DCFEV1_L_SOFT_RESET__SHIFT 0xc
+#define DCI_SOFT_RESET__DCFEV1_C_SOFT_RESET_MASK 0x2000
+#define DCI_SOFT_RESET__DCFEV1_C_SOFT_RESET__SHIFT 0xd
+#define DCI_SOFT_RESET__DMIFARB_SOFT_RESET_MASK 0x4000
+#define DCI_SOFT_RESET__DMIFARB_SOFT_RESET__SHIFT 0xe
+#define DCI_SOFT_RESET__MCIF_DWB_SOFT_RESET_MASK 0x10000
+#define DCI_SOFT_RESET__MCIF_DWB_SOFT_RESET__SHIFT 0x10
+#define DCI_SOFT_RESET__MCIF_CWB0_SOFT_RESET_MASK 0x20000
+#define DCI_SOFT_RESET__MCIF_CWB0_SOFT_RESET__SHIFT 0x11
+#define DCI_SOFT_RESET__MCIF_CWB1_SOFT_RESET_MASK 0x40000
+#define DCI_SOFT_RESET__MCIF_CWB1_SOFT_RESET__SHIFT 0x12
+#define DCI_SOFT_RESET__MCIF_WB_SOFT_RESET_MASK 0x80000
+#define DCI_SOFT_RESET__MCIF_WB_SOFT_RESET__SHIFT 0x13
+#define DCI_MISC__MCIF_WB_URG_OVRD_MASK 0x1
+#define DCI_MISC__MCIF_WB_URG_OVRD__SHIFT 0x0
+#define DCI_MISC__MCIF_WB_URG_LVL_MASK 0x1e
+#define DCI_MISC__MCIF_WB_URG_LVL__SHIFT 0x1
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_INDEX_MASK 0xff
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCI_TEST_DEBUG_INDEX__DCI_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCI_TEST_DEBUG_DATA__DCI_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCI_TEST_DEBUG_DATA__DCI_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCI_DEBUG_CONFIG__DCI_DBG_EN_MASK 0x1
+#define DCI_DEBUG_CONFIG__DCI_DBG_EN__SHIFT 0x0
+#define DCI_DEBUG_CONFIG__DCI_DBG_BLOCK_SEL_MASK 0xf0
+#define DCI_DEBUG_CONFIG__DCI_DBG_BLOCK_SEL__SHIFT 0x4
+#define DCI_DEBUG_CONFIG__DCI_DBG_CLOCK_SEL_MASK 0xf00
+#define DCI_DEBUG_CONFIG__DCI_DBG_CLOCK_SEL__SHIFT 0x8
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE1_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE2_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE3_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE4_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED_MASK 0x7
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT 0x0
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK 0x10
+#define PIPE5_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED__SHIFT 0x4
+#define DC_GENERICA__GENERICA_EN_MASK 0x1
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x0
+#define DC_GENERICA__GENERICA_SEL_MASK 0xf80
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL_MASK 0xf000
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL_MASK 0xf0000
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0xf00000
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0xf000000
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18
+#define DC_GENERICB__GENERICB_EN_MASK 0x1
+#define DC_GENERICB__GENERICB_EN__SHIFT 0x0
+#define DC_GENERICB__GENERICB_SEL_MASK 0xf00
+#define DC_GENERICB__GENERICB_SEL__SHIFT 0x8
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL_MASK 0xf000
+#define DC_GENERICB__GENERICB_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL_MASK 0xf0000
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0xf00000
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0xf000000
+#define DC_GENERICB__GENERICB_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18
+#define DC_PAD_EXTERN_SIG__DC_PAD_EXTERN_SIG_SEL_MASK 0xf
+#define DC_PAD_EXTERN_SIG__DC_PAD_EXTERN_SIG_SEL__SHIFT 0x0
+#define DC_PAD_EXTERN_SIG__MVP_PIXEL_SRC_STATUS_MASK 0x30
+#define DC_PAD_EXTERN_SIG__MVP_PIXEL_SRC_STATUS__SHIFT 0x4
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL_MASK 0x3
+#define DC_REF_CLK_CNTL__HSYNCA_OUTPUT_SEL__SHIFT 0x0
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL_MASK 0x300
+#define DC_REF_CLK_CNTL__GENLK_CLK_OUTPUT_SEL__SHIFT 0x8
+#define DC_GPIO_DEBUG__DC_GPIO_VIP_DEBUG_MASK 0x1
+#define DC_GPIO_DEBUG__DC_GPIO_VIP_DEBUG__SHIFT 0x0
+#define DC_GPIO_DEBUG__DC_GPIO_MACRO_DEBUG_MASK 0x300
+#define DC_GPIO_DEBUG__DC_GPIO_MACRO_DEBUG__SHIFT 0x8
+#define DC_GPIO_DEBUG__DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL_MASK 0x10000
+#define DC_GPIO_DEBUG__DC_GPIO_CHIP_DEBUG_OUT_PIN_SEL__SHIFT 0x10
+#define DC_GPIO_DEBUG__DC_GPIO_DEBUG_BUS_FLOP_EN_MASK 0x20000
+#define DC_GPIO_DEBUG__DC_GPIO_DEBUG_BUS_FLOP_EN__SHIFT 0x11
+#define DC_GPIO_DEBUG__DPRX_LOOPBACK_ENABLE_MASK 0x80000000
+#define DC_GPIO_DEBUG__DPRX_LOOPBACK_ENABLE__SHIFT 0x1f
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYC_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYC_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYC_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYC_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYC_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYC_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYC_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYC_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYC_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYC_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYC_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYD_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYD_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYD_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYD_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYD_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYD_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYD_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYD_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYD_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYD_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYD_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYE_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYE_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYE_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYE_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYE_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYE_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYE_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYE_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYE_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYE_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYE_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYF_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYF_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYF_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYF_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYF_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYF_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYF_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYF_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYF_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYF_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYF_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYG_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x1
+#define UNIPHYG_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYG_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x10
+#define UNIPHYG_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYG_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYG_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYG_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYG_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYG_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYG_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYG_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYC_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYD_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYE_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYF_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYG_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_PFREQCHG_MASK 0x1
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_PFREQCHG__SHIFT 0x0
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_PIXVLD_RESET_MASK 0x10
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYLPA_LINK_CNTL__UNIPHYLP_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_PFREQCHG_MASK 0x1
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_PFREQCHG__SHIFT 0x0
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_PIXVLD_RESET_MASK 0x10
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x700
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL0_INVERT_MASK 0x1000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL1_INVERT_MASK 0x2000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL2_INVERT_MASK 0x4000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL3_INVERT_MASK 0x8000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_LANE_STAGGER_DELAY_MASK 0x700000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_LINK_ENABLE_HPD_MASK_MASK 0x3000000
+#define UNIPHYLPB_LINK_CNTL__UNIPHYLP_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYLPA_CHANNEL_XBAR_CNTL__UNIPHYLP_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL0_XBAR_SOURCE_MASK 0x3
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL1_XBAR_SOURCE_MASK 0x300
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL2_XBAR_SOURCE_MASK 0x30000
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL3_XBAR_SOURCE_MASK 0x3000000
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_LINK_ENABLE_MASK 0x10000000
+#define UNIPHYLPB_CHANNEL_XBAR_CNTL__UNIPHYLP_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_ENABLE_LINKA_MASK 0x1
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_ENABLE_LINKA__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_CALOUT_LINKA_MASK 0x100
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_CALOUT_LINKA__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_MASK 0x200
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_CALOUT_ERROR_LINKA_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_VALUE_LINKA_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_VALUE_LINKA__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_STEP_DELAY_LINKA_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_STEP_DELAY_LINKA__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_LINKA_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_LINKA__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKA_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKA__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_SEL_LINKA_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKA__UNIPHY_IMPCAL_SEL_LINKA__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_ENABLE_LINKB_MASK 0x1
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_ENABLE_LINKB__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_CALOUT_LINKB_MASK 0x100
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_CALOUT_LINKB__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_MASK 0x200
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_CALOUT_ERROR_LINKB_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_VALUE_LINKB_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_VALUE_LINKB__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_STEP_DELAY_LINKB_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_STEP_DELAY_LINKB__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_LINKB_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_LINKB__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKB_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKB__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_SEL_LINKB_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKB__UNIPHY_IMPCAL_SEL_LINKB__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_ENABLE_LINKC_MASK 0x1
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_ENABLE_LINKC__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_CALOUT_LINKC_MASK 0x100
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_CALOUT_LINKC__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_MASK 0x200
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_CALOUT_ERROR_LINKC_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_VALUE_LINKC_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_VALUE_LINKC__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_STEP_DELAY_LINKC_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_STEP_DELAY_LINKC__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_LINKC_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_LINKC__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKC_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKC__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_SEL_LINKC_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKC__UNIPHY_IMPCAL_SEL_LINKC__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_ENABLE_LINKD_MASK 0x1
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_ENABLE_LINKD__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_CALOUT_LINKD_MASK 0x100
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_CALOUT_LINKD__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_MASK 0x200
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_CALOUT_ERROR_LINKD_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_VALUE_LINKD_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_VALUE_LINKD__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_STEP_DELAY_LINKD_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_STEP_DELAY_LINKD__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_LINKD_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_LINKD__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKD_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKD__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_SEL_LINKD_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKD__UNIPHY_IMPCAL_SEL_LINKD__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_ENABLE_LINKE_MASK 0x1
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_ENABLE_LINKE__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_CALOUT_LINKE_MASK 0x100
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_CALOUT_LINKE__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_MASK 0x200
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_CALOUT_ERROR_LINKE_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_VALUE_LINKE_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_VALUE_LINKE__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_STEP_DELAY_LINKE_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_STEP_DELAY_LINKE__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_LINKE_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_LINKE__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKE_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKE__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_SEL_LINKE_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKE__UNIPHY_IMPCAL_SEL_LINKE__SHIFT 0x1e
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_ENABLE_LINKF_MASK 0x1
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_ENABLE_LINKF__SHIFT 0x0
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_CALOUT_LINKF_MASK 0x100
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_CALOUT_LINKF__SHIFT 0x8
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_MASK 0x200
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF__SHIFT 0x9
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_AK_MASK 0x400
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_CALOUT_ERROR_LINKF_AK__SHIFT 0xa
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_VALUE_LINKF_MASK 0xf0000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_VALUE_LINKF__SHIFT 0x10
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_STEP_DELAY_LINKF_MASK 0xf00000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_STEP_DELAY_LINKF__SHIFT 0x14
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_LINKF_MASK 0xf000000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_LINKF__SHIFT 0x18
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKF_MASK 0x10000000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_OVERRIDE_ENABLE_LINKF__SHIFT 0x1c
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_SEL_LINKF_MASK 0x40000000
+#define UNIPHY_IMPCAL_LINKF__UNIPHY_IMPCAL_SEL_LINKF__SHIFT 0x1e
+#define UNIPHY_IMPCAL_PERIOD__UNIPHY_IMPCAL_PERIOD_MASK 0xffffffff
+#define UNIPHY_IMPCAL_PERIOD__UNIPHY_IMPCAL_PERIOD__SHIFT 0x0
+#define AUXP_IMPCAL__AUXP_IMPCAL_ENABLE_MASK 0x1
+#define AUXP_IMPCAL__AUXP_IMPCAL_ENABLE__SHIFT 0x0
+#define AUXP_IMPCAL__AUXP_IMPCAL_CALOUT_MASK 0x100
+#define AUXP_IMPCAL__AUXP_IMPCAL_CALOUT__SHIFT 0x8
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_MASK 0x200
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR__SHIFT 0x9
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_AK_MASK 0x400
+#define AUXP_IMPCAL__AUXP_CALOUT_ERROR_AK__SHIFT 0xa
+#define AUXP_IMPCAL__AUXP_IMPCAL_VALUE_MASK 0xf0000
+#define AUXP_IMPCAL__AUXP_IMPCAL_VALUE__SHIFT 0x10
+#define AUXP_IMPCAL__AUXP_IMPCAL_STEP_DELAY_MASK 0xf00000
+#define AUXP_IMPCAL__AUXP_IMPCAL_STEP_DELAY__SHIFT 0x14
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_MASK 0xf000000
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE__SHIFT 0x18
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_ENABLE_MASK 0x10000000
+#define AUXP_IMPCAL__AUXP_IMPCAL_OVERRIDE_ENABLE__SHIFT 0x1c
+#define AUXN_IMPCAL__AUXN_IMPCAL_ENABLE_MASK 0x1
+#define AUXN_IMPCAL__AUXN_IMPCAL_ENABLE__SHIFT 0x0
+#define AUXN_IMPCAL__AUXN_IMPCAL_CALOUT_MASK 0x100
+#define AUXN_IMPCAL__AUXN_IMPCAL_CALOUT__SHIFT 0x8
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_MASK 0x200
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR__SHIFT 0x9
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_AK_MASK 0x400
+#define AUXN_IMPCAL__AUXN_CALOUT_ERROR_AK__SHIFT 0xa
+#define AUXN_IMPCAL__AUXN_IMPCAL_VALUE_MASK 0xf0000
+#define AUXN_IMPCAL__AUXN_IMPCAL_VALUE__SHIFT 0x10
+#define AUXN_IMPCAL__AUXN_IMPCAL_STEP_DELAY_MASK 0xf00000
+#define AUXN_IMPCAL__AUXN_IMPCAL_STEP_DELAY__SHIFT 0x14
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_MASK 0xf000000
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE__SHIFT 0x18
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_ENABLE_MASK 0x10000000
+#define AUXN_IMPCAL__AUXN_IMPCAL_OVERRIDE_ENABLE__SHIFT 0x1c
+#define DCIO_IMPCAL_CNTL__CALR_CNTL_OVERRIDE_MASK 0xf
+#define DCIO_IMPCAL_CNTL__CALR_CNTL_OVERRIDE__SHIFT 0x0
+#define DCIO_IMPCAL_CNTL__IMPCAL_SOFT_RESET_MASK 0x20
+#define DCIO_IMPCAL_CNTL__IMPCAL_SOFT_RESET__SHIFT 0x5
+#define DCIO_IMPCAL_CNTL__IMPCAL_STATUS_MASK 0x300
+#define DCIO_IMPCAL_CNTL__IMPCAL_STATUS__SHIFT 0x8
+#define DCIO_IMPCAL_CNTL__IMPCAL_ARB_STATE_MASK 0x7000
+#define DCIO_IMPCAL_CNTL__IMPCAL_ARB_STATE__SHIFT 0xc
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_INTERVAL_MASK 0x78000
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_INTERVAL__SHIFT 0xf
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_BIASENTST_MASK 0x380000
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_BIASENTST__SHIFT 0x13
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_RESBIASEN_MASK 0x400000
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_RESBIASEN__SHIFT 0x16
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_SPARE_CONTROL_MASK 0x1800000
+#define DCIO_IMPCAL_CNTL__AUX_IMPCAL_SPARE_CONTROL__SHIFT 0x17
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKA_MASK 0x7fff
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKA__SHIFT 0x0
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKB_MASK 0x7fff0000
+#define UNIPHY_IMPCAL_PSW_AB__UNIPHY_IMPCAL_PSW_LINKB__SHIFT 0x10
+#define DCIO_IMPCAL_CNTL_CD__CALR_CNTL_OVERRIDE_MASK 0xf
+#define DCIO_IMPCAL_CNTL_CD__CALR_CNTL_OVERRIDE__SHIFT 0x0
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_SOFT_RESET_MASK 0x20
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_SOFT_RESET__SHIFT 0x5
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_STATUS_MASK 0x300
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_STATUS__SHIFT 0x8
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_ARB_STATE_MASK 0x7000
+#define DCIO_IMPCAL_CNTL_CD__IMPCAL_ARB_STATE__SHIFT 0xc
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKC_MASK 0x7fff
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKC__SHIFT 0x0
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKD_MASK 0x7fff0000
+#define UNIPHY_IMPCAL_PSW_CD__UNIPHY_IMPCAL_PSW_LINKD__SHIFT 0x10
+#define DCIO_IMPCAL_CNTL_EF__CALR_CNTL_OVERRIDE_MASK 0xf
+#define DCIO_IMPCAL_CNTL_EF__CALR_CNTL_OVERRIDE__SHIFT 0x0
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_SOFT_RESET_MASK 0x20
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_SOFT_RESET__SHIFT 0x5
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_STATUS_MASK 0x300
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_STATUS__SHIFT 0x8
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_ARB_STATE_MASK 0x7000
+#define DCIO_IMPCAL_CNTL_EF__IMPCAL_ARB_STATE__SHIFT 0xc
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKE_MASK 0x7fff
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKE__SHIFT 0x0
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKF_MASK 0x7fff0000
+#define UNIPHY_IMPCAL_PSW_EF__UNIPHY_IMPCAL_PSW_LINKF__SHIFT 0x10
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY_MASK 0xf
+#define DCIO_WRCMD_DELAY__UNIPHY_DELAY__SHIFT 0x0
+#define DCIO_WRCMD_DELAY__DAC_DELAY_MASK 0xf0
+#define DCIO_WRCMD_DELAY__DAC_DELAY__SHIFT 0x4
+#define DCIO_WRCMD_DELAY__DPHY_DELAY_MASK 0xf00
+#define DCIO_WRCMD_DELAY__DPHY_DELAY__SHIFT 0x8
+#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY_MASK 0xf000
+#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY__SHIFT 0xc
+#define DCIO_WRCMD_DELAY__ZCAL_DELAY_MASK 0xf0000
+#define DCIO_WRCMD_DELAY__ZCAL_DELAY__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_BIF_CEC_DIS_MASK 0x400
+#define DC_PINSTRAPS__DC_PINSTRAPS_BIF_CEC_DIS__SHIFT 0xa
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x2000
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0xc000
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x10000
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY_MASK 0xe0000
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY__SHIFT 0x11
+#define DC_DVODATA_CONFIG__VIP_MUX_EN_MASK 0x80000
+#define DC_DVODATA_CONFIG__VIP_MUX_EN__SHIFT 0x13
+#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN_MASK 0x100000
+#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN__SHIFT 0x14
+#define DC_DVODATA_CONFIG__DVO_ALTER_MAPPING_EN_MASK 0x200000
+#define DC_DVODATA_CONFIG__DVO_ALTER_MAPPING_EN__SHIFT 0x15
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN_MASK 0x1
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_EN__SHIFT 0x0
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN_MASK 0x2
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_DISABLE_SYNCEN_CONTROL_OF_TX_EN__SHIFT 0x1
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE_MASK 0x10
+#define LVTMA_PWRSEQ_CNTL__LVTMA_PWRSEQ_TARGET_STATE__SHIFT 0x4
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_MASK 0x100
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN__SHIFT 0x8
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD_MASK 0x200
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_OVRD__SHIFT 0x9
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL_MASK 0x400
+#define LVTMA_PWRSEQ_CNTL__LVTMA_SYNCEN_POL__SHIFT 0xa
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_MASK 0x10000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON__SHIFT 0x10
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD_MASK 0x20000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_OVRD__SHIFT 0x11
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL_MASK 0x40000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_DIGON_POL__SHIFT 0x12
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_MASK 0x1000000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON__SHIFT 0x18
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD_MASK 0x2000000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_OVRD__SHIFT 0x19
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL_MASK 0x4000000
+#define LVTMA_PWRSEQ_CNTL__LVTMA_BLON_POL__SHIFT 0x1a
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R_MASK 0x1
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_TARGET_STATE_R__SHIFT 0x0
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON_MASK 0x2
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DIGON__SHIFT 0x1
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN_MASK 0x4
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_SYNCEN__SHIFT 0x2
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON_MASK 0x8
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_BLON__SHIFT 0x3
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE_MASK 0x10
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_DONE__SHIFT 0x4
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE_MASK 0xf00
+#define LVTMA_PWRSEQ_STATE__LVTMA_PWRSEQ_STATE__SHIFT 0x8
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV_MASK 0xfff
+#define LVTMA_PWRSEQ_REF_DIV__LVTMA_PWRSEQ_REF_DIV__SHIFT 0x0
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV_MASK 0xffff0000
+#define LVTMA_PWRSEQ_REF_DIV__BL_PWM_REF_DIV__SHIFT 0x10
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1_MASK 0xff
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY1__SHIFT 0x0
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2_MASK 0xff00
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRUP_DELAY2__SHIFT 0x8
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1_MASK 0xff0000
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY1__SHIFT 0x10
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2_MASK 0xff000000
+#define LVTMA_PWRSEQ_DELAY1__LVTMA_PWRDN_DELAY2__SHIFT 0x18
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH_MASK 0xff
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_MIN_LENGTH__SHIFT 0x0
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3_MASK 0xff00
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRUP_DELAY3__SHIFT 0x8
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3_MASK 0xff0000
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_PWRDN_DELAY3__SHIFT 0x10
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN_MASK 0x1000000
+#define LVTMA_PWRSEQ_DELAY2__LVTMA_VARY_BL_OVERRIDE_EN__SHIFT 0x18
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT_MASK 0xffff
+#define BL_PWM_CNTL__BL_ACTIVE_INT_FRAC_CNT__SHIFT 0x0
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN_MASK 0x40000000
+#define BL_PWM_CNTL__BL_PWM_FRACTIONAL_EN__SHIFT 0x1e
+#define BL_PWM_CNTL__BL_PWM_EN_MASK 0x80000000
+#define BL_PWM_CNTL__BL_PWM_EN__SHIFT 0x1f
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE_MASK 0xffff
+#define BL_PWM_CNTL2__BL_PWM_POST_FRAME_START_DELAY_BEFORE_UPDATE__SHIFT 0x0
+#define BL_PWM_CNTL2__DBG_BL_PWM_INPUT_REFCLK_SELECT_MASK 0x30000000
+#define BL_PWM_CNTL2__DBG_BL_PWM_INPUT_REFCLK_SELECT__SHIFT 0x1c
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE_MASK 0x40000000
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_BL_OUT_ENABLE__SHIFT 0x1e
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN_MASK 0x80000000
+#define BL_PWM_CNTL2__BL_PWM_OVERRIDE_LVTMA_PWRSEQ_EN__SHIFT 0x1f
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_MASK 0xffff
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD__SHIFT 0x0
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT_MASK 0xf0000
+#define BL_PWM_PERIOD_CNTL__BL_PWM_PERIOD_BITCNT__SHIFT 0x10
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK_MASK 0x1
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_LOCK__SHIFT 0x0
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING_MASK 0x100
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_REG_UPDATE_PENDING__SHIFT 0x8
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START_MASK 0x10000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_UPDATE_AT_FRAME_START__SHIFT 0x10
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL_MASK 0xe0000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_FRAME_START_DISP_SEL__SHIFT 0x11
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN_MASK 0x1000000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_READBACK_DB_REG_VALUE_EN__SHIFT 0x18
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN_MASK 0x80000000
+#define BL_PWM_GRP1_REG_LOCK__BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN__SHIFT 0x1f
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_TIMING_SYNC_SEL_MASK 0x3
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_TIMING_SYNC_SEL__SHIFT 0x0
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_LOCK_SEL_MASK 0x30
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_FLIP_LOCK_SEL__SHIFT 0x4
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK_MASK 0x300
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_CLK_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_TIMING_SYNC_SEL_MASK 0x30000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_LOCK_SEL_MASK 0x300000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_FLIP_LOCK_SEL__SHIFT 0x14
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK_MASK 0x3000000
+#define DCIO_GSL_GENLK_PAD_CNTL__DCIO_GENLK_VSYNC_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_TIMING_SYNC_SEL_MASK 0x3
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_TIMING_SYNC_SEL__SHIFT 0x0
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_LOCK_SEL_MASK 0x30
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_FLIP_LOCK_SEL__SHIFT 0x4
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK_MASK 0x300
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_A_GSL_MASK__SHIFT 0x8
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_TIMING_SYNC_SEL_MASK 0x30000
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_LOCK_SEL_MASK 0x300000
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_FLIP_LOCK_SEL__SHIFT 0x14
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK_MASK 0x3000000
+#define DCIO_GSL_SWAPLOCK_PAD_CNTL__DCIO_SWAPLOCK_B_GSL_MASK__SHIFT 0x18
+#define DCIO_GSL0_CNTL__DCIO_GSL0_VSYNC_SEL_MASK 0x7
+#define DCIO_GSL0_CNTL__DCIO_GSL0_VSYNC_SEL__SHIFT 0x0
+#define DCIO_GSL0_CNTL__DCIO_GSL0_TIMING_SYNC_SEL_MASK 0x700
+#define DCIO_GSL0_CNTL__DCIO_GSL0_TIMING_SYNC_SEL__SHIFT 0x8
+#define DCIO_GSL0_CNTL__DCIO_GSL0_GLOBAL_UNLOCK_SEL_MASK 0x70000
+#define DCIO_GSL0_CNTL__DCIO_GSL0_GLOBAL_UNLOCK_SEL__SHIFT 0x10
+#define DCIO_GSL1_CNTL__DCIO_GSL1_VSYNC_SEL_MASK 0x7
+#define DCIO_GSL1_CNTL__DCIO_GSL1_VSYNC_SEL__SHIFT 0x0
+#define DCIO_GSL1_CNTL__DCIO_GSL1_TIMING_SYNC_SEL_MASK 0x700
+#define DCIO_GSL1_CNTL__DCIO_GSL1_TIMING_SYNC_SEL__SHIFT 0x8
+#define DCIO_GSL1_CNTL__DCIO_GSL1_GLOBAL_UNLOCK_SEL_MASK 0x70000
+#define DCIO_GSL1_CNTL__DCIO_GSL1_GLOBAL_UNLOCK_SEL__SHIFT 0x10
+#define DCIO_GSL2_CNTL__DCIO_GSL2_VSYNC_SEL_MASK 0x7
+#define DCIO_GSL2_CNTL__DCIO_GSL2_VSYNC_SEL__SHIFT 0x0
+#define DCIO_GSL2_CNTL__DCIO_GSL2_TIMING_SYNC_SEL_MASK 0x700
+#define DCIO_GSL2_CNTL__DCIO_GSL2_TIMING_SYNC_SEL__SHIFT 0x8
+#define DCIO_GSL2_CNTL__DCIO_GSL2_GLOBAL_UNLOCK_SEL_MASK 0x70000
+#define DCIO_GSL2_CNTL__DCIO_GSL2_GLOBAL_UNLOCK_SEL__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE_MASK 0x7
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D1_V_UPDATE__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE_MASK 0x70
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D2_V_UPDATE__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE_MASK 0x700
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D3_V_UPDATE__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE_MASK 0x7000
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D4_V_UPDATE__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE_MASK 0x70000
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D5_V_UPDATE__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE_MASK 0x700000
+#define DC_GPU_TIMER_START_POSITION_V_UPDATE__DC_GPU_TIMER_START_POSITION_D6_V_UPDATE__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D1_P_FLIP_MASK 0x7
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D1_P_FLIP__SHIFT 0x0
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D2_P_FLIP_MASK 0x70
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D2_P_FLIP__SHIFT 0x4
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D3_P_FLIP_MASK 0x700
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D3_P_FLIP__SHIFT 0x8
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D4_P_FLIP_MASK 0x7000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D4_P_FLIP__SHIFT 0xc
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D5_P_FLIP_MASK 0x70000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D5_P_FLIP__SHIFT 0x10
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D6_P_FLIP_MASK 0x700000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_D6_P_FLIP__SHIFT 0x14
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_DCFEV0_P_FLIP_MASK 0x3800000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_DCFEV0_P_FLIP__SHIFT 0x17
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_DCFEV1_P_FLIP_MASK 0x1c000000
+#define DC_GPU_TIMER_START_POSITION_P_FLIP__DC_GPU_TIMER_START_POSITION_DCFEV1_P_FLIP__SHIFT 0x1a
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ_MASK 0xffffffff
+#define DC_GPU_TIMER_READ__DC_GPU_TIMER_READ__SHIFT 0x0
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT_MASK 0x3f
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_READ_SELECT__SHIFT 0x0
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM_MASK 0x700
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D1_VSYNC_NOM__SHIFT 0x8
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM_MASK 0x3800
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D2_VSYNC_NOM__SHIFT 0xb
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM_MASK 0x1c000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D3_VSYNC_NOM__SHIFT 0xe
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM_MASK 0xe0000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D4_VSYNC_NOM__SHIFT 0x11
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM_MASK 0x700000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D5_VSYNC_NOM__SHIFT 0x14
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM_MASK 0x3800000
+#define DC_GPU_TIMER_READ_CNTL__DC_GPU_TIMER_START_POSITION_D6_VSYNC_NOM__SHIFT 0x17
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL_MASK 0x1f
+#define DCIO_CLOCK_CNTL__DCIO_TEST_CLK_SEL__SHIFT 0x0
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x20
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5
+#define DCIO_DEBUG__DCIO_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG__DCIO_DEBUG__SHIFT 0x0
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE0_EXT_VSYNC_MUX_MASK 0x7
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE0_EXT_VSYNC_MUX__SHIFT 0x0
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE1_EXT_VSYNC_MUX_MASK 0x70
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE1_EXT_VSYNC_MUX__SHIFT 0x4
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE2_EXT_VSYNC_MUX_MASK 0x700
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE2_EXT_VSYNC_MUX__SHIFT 0x8
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE3_EXT_VSYNC_MUX_MASK 0x7000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE3_EXT_VSYNC_MUX__SHIFT 0xc
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE4_EXT_VSYNC_MUX_MASK 0x70000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE4_EXT_VSYNC_MUX__SHIFT 0x10
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE5_EXT_VSYNC_MUX_MASK 0x700000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_DCFE5_EXT_VSYNC_MUX__SHIFT 0x14
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_SWAPLOCKB_EXT_VSYNC_MASK_MASK 0x7000000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_SWAPLOCKB_EXT_VSYNC_MASK__SHIFT 0x18
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_GENERICB_EXT_VSYNC_MASK_MASK 0x70000000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_GENERICB_EXT_VSYNC_MASK__SHIFT 0x1c
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_CRTC_MANUAL_FLOW_CONTROL_MASK 0x80000000
+#define DCO_DCFE_EXT_VSYNC_CNTL__DCO_CRTC_MANUAL_FLOW_CONTROL__SHIFT 0x1f
+#define DBG_OUT_CNTL__DBG_OUT_PIN_EN_MASK 0x1
+#define DBG_OUT_CNTL__DBG_OUT_PIN_EN__SHIFT 0x0
+#define DBG_OUT_CNTL__DBG_OUT_PIN_SEL_MASK 0x10
+#define DBG_OUT_CNTL__DBG_OUT_PIN_SEL__SHIFT 0x4
+#define DBG_OUT_CNTL__DBG_OUT_12BIT_SEL_MASK 0x300
+#define DBG_OUT_CNTL__DBG_OUT_12BIT_SEL__SHIFT 0x8
+#define DBG_OUT_CNTL__DBG_OUT_TEST_DATA_MASK 0xfff000
+#define DBG_OUT_CNTL__DBG_OUT_TEST_DATA__SHIFT 0xc
+#define DCIO_DEBUG_CONFIG__DCIO_DBG_EN_MASK 0x1
+#define DCIO_DEBUG_CONFIG__DCIO_DBG_EN__SHIFT 0x0
+#define DCIO_DEBUG_CONFIG__DCIO_DBG_SEL_MASK 0xf00
+#define DCIO_DEBUG_CONFIG__DCIO_DBG_SEL__SHIFT 0x8
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET_MASK 0x1
+#define DCIO_SOFT_RESET__UNIPHYA_SOFT_RESET__SHIFT 0x0
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET_MASK 0x2
+#define DCIO_SOFT_RESET__DSYNCA_SOFT_RESET__SHIFT 0x1
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET_MASK 0x4
+#define DCIO_SOFT_RESET__UNIPHYB_SOFT_RESET__SHIFT 0x2
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET_MASK 0x8
+#define DCIO_SOFT_RESET__DSYNCB_SOFT_RESET__SHIFT 0x3
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET_MASK 0x10
+#define DCIO_SOFT_RESET__UNIPHYC_SOFT_RESET__SHIFT 0x4
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET_MASK 0x20
+#define DCIO_SOFT_RESET__DSYNCC_SOFT_RESET__SHIFT 0x5
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET_MASK 0x40
+#define DCIO_SOFT_RESET__UNIPHYD_SOFT_RESET__SHIFT 0x6
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET_MASK 0x80
+#define DCIO_SOFT_RESET__DSYNCD_SOFT_RESET__SHIFT 0x7
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET_MASK 0x100
+#define DCIO_SOFT_RESET__UNIPHYE_SOFT_RESET__SHIFT 0x8
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET_MASK 0x200
+#define DCIO_SOFT_RESET__DSYNCE_SOFT_RESET__SHIFT 0x9
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET_MASK 0x400
+#define DCIO_SOFT_RESET__UNIPHYF_SOFT_RESET__SHIFT 0xa
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET_MASK 0x800
+#define DCIO_SOFT_RESET__DSYNCF_SOFT_RESET__SHIFT 0xb
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET_MASK 0x1000
+#define DCIO_SOFT_RESET__UNIPHYG_SOFT_RESET__SHIFT 0xc
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET_MASK 0x2000
+#define DCIO_SOFT_RESET__DSYNCG_SOFT_RESET__SHIFT 0xd
+#define DCIO_SOFT_RESET__DACA_SOFT_RESET_MASK 0x10000
+#define DCIO_SOFT_RESET__DACA_SOFT_RESET__SHIFT 0x10
+#define DCIO_SOFT_RESET__DCRXPHY_SOFT_RESET_MASK 0x100000
+#define DCIO_SOFT_RESET__DCRXPHY_SOFT_RESET__SHIFT 0x14
+#define DCIO_SOFT_RESET__DPHY_SOFT_RESET_MASK 0x1000000
+#define DCIO_SOFT_RESET__DPHY_SOFT_RESET__SHIFT 0x18
+#define DCIO_SOFT_RESET__ZCAL_SOFT_RESET_MASK 0x4000000
+#define DCIO_SOFT_RESET__ZCAL_SOFT_RESET__SHIFT 0x1a
+#define DCIO_SOFT_RESET__UNIPHYLPA_SOFT_RESET_MASK 0x10000000
+#define DCIO_SOFT_RESET__UNIPHYLPA_SOFT_RESET__SHIFT 0x1c
+#define DCIO_SOFT_RESET__DSYNCLPA_SOFT_RESET_MASK 0x20000000
+#define DCIO_SOFT_RESET__DSYNCLPA_SOFT_RESET__SHIFT 0x1d
+#define DCIO_SOFT_RESET__UNIPHYLPB_SOFT_RESET_MASK 0x40000000
+#define DCIO_SOFT_RESET__UNIPHYLPB_SOFT_RESET__SHIFT 0x1e
+#define DCIO_SOFT_RESET__DSYNCLPB_SOFT_RESET_MASK 0x80000000
+#define DCIO_SOFT_RESET__DSYNCLPB_SOFT_RESET__SHIFT 0x1f
+#define DCIO_DPHY_SEL__DPHY_LANE0_SEL_MASK 0x3
+#define DCIO_DPHY_SEL__DPHY_LANE0_SEL__SHIFT 0x0
+#define DCIO_DPHY_SEL__DPHY_LANE1_SEL_MASK 0xc
+#define DCIO_DPHY_SEL__DPHY_LANE1_SEL__SHIFT 0x2
+#define DCIO_DPHY_SEL__DPHY_LANE2_SEL_MASK 0x30
+#define DCIO_DPHY_SEL__DPHY_LANE2_SEL__SHIFT 0x4
+#define DCIO_DPHY_SEL__DPHY_LANE3_SEL_MASK 0xc0
+#define DCIO_DPHY_SEL__DPHY_LANE3_SEL__SHIFT 0x6
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_TYPE_MASK 0x1
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_TYPE__SHIFT 0x0
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_MASK_MASK 0x2
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_MASK__SHIFT 0x1
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_OCCUR_MASK 0x4
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXA_INT_OCCUR__SHIFT 0x2
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_TYPE_MASK 0x8
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_TYPE__SHIFT 0x3
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_MASK_MASK 0x10
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_MASK__SHIFT 0x4
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_OCCUR_MASK 0x20
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXB_INT_OCCUR__SHIFT 0x5
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_TYPE_MASK 0x40
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_TYPE__SHIFT 0x6
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_MASK_MASK 0x80
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_MASK__SHIFT 0x7
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_OCCUR_MASK 0x100
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXC_INT_OCCUR__SHIFT 0x8
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_TYPE_MASK 0x200
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_TYPE__SHIFT 0x9
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_MASK_MASK 0x400
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_MASK__SHIFT 0xa
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_OCCUR_MASK 0x800
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXD_INT_OCCUR__SHIFT 0xb
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_TYPE_MASK 0x1000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_TYPE__SHIFT 0xc
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_MASK_MASK 0x2000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_MASK__SHIFT 0xd
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_OCCUR_MASK 0x4000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXE_INT_OCCUR__SHIFT 0xe
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_TYPE_MASK 0x8000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_TYPE__SHIFT 0xf
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_MASK_MASK 0x10000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_MASK__SHIFT 0x10
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_OCCUR_MASK 0x20000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXF_INT_OCCUR__SHIFT 0x11
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_TYPE_MASK 0x40000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_TYPE__SHIFT 0x12
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_MASK_MASK 0x80000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_MASK__SHIFT 0x13
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_OCCUR_MASK 0x100000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXG_INT_OCCUR__SHIFT 0x14
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_TYPE_MASK 0x1000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_TYPE__SHIFT 0x18
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_MASK_MASK 0x2000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_MASK__SHIFT 0x19
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_OCCUR_MASK 0x4000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPA_INT_OCCUR__SHIFT 0x1a
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_TYPE_MASK 0x8000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_TYPE__SHIFT 0x1b
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_MASK_MASK 0x10000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_MASK__SHIFT 0x1c
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_OCCUR_MASK 0x20000000
+#define DCIO_DPCS_TX_INTERRUPT__DCIO_DPCS_TXLPB_INT_OCCUR__SHIFT 0x1d
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_TYPE_MASK 0x1
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_TYPE__SHIFT 0x0
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_MASK_MASK 0x2
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_MASK__SHIFT 0x1
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_OCCUR_MASK 0x4
+#define DCIO_DPCS_RX_INTERRUPT__DCIO_DPCS_RXA_INT_OCCUR__SHIFT 0x2
+#define DCIO_SEMAPHORE0__DCIO_SEMAPHORE0_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE0__DCIO_SEMAPHORE0_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE0__DCIO_SEMAPHORE0_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE0__DCIO_SEMAPHORE0_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE1__DCIO_SEMAPHORE1_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE1__DCIO_SEMAPHORE1_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE1__DCIO_SEMAPHORE1_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE1__DCIO_SEMAPHORE1_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE2__DCIO_SEMAPHORE2_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE2__DCIO_SEMAPHORE2_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE2__DCIO_SEMAPHORE2_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE2__DCIO_SEMAPHORE2_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE3__DCIO_SEMAPHORE3_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE3__DCIO_SEMAPHORE3_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE3__DCIO_SEMAPHORE3_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE3__DCIO_SEMAPHORE3_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE4__DCIO_SEMAPHORE4_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE4__DCIO_SEMAPHORE4_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE4__DCIO_SEMAPHORE4_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE4__DCIO_SEMAPHORE4_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE5__DCIO_SEMAPHORE5_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE5__DCIO_SEMAPHORE5_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE5__DCIO_SEMAPHORE5_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE5__DCIO_SEMAPHORE5_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE6__DCIO_SEMAPHORE6_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE6__DCIO_SEMAPHORE6_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE6__DCIO_SEMAPHORE6_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE6__DCIO_SEMAPHORE6_GNT__SHIFT 0x10
+#define DCIO_SEMAPHORE7__DCIO_SEMAPHORE7_REQ_MASK 0xffff
+#define DCIO_SEMAPHORE7__DCIO_SEMAPHORE7_REQ__SHIFT 0x0
+#define DCIO_SEMAPHORE7__DCIO_SEMAPHORE7_GNT_MASK 0xffff0000
+#define DCIO_SEMAPHORE7__DCIO_SEMAPHORE7_GNT__SHIFT 0x10
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_INDEX_MASK 0xff
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCIO_TEST_DEBUG_INDEX__DCIO_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCIO_TEST_DEBUG_DATA__DCIO_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCIO_TEST_DEBUG_DATA__DCIO_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_A0_REG_MASK 0x3
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_A0_REG__SHIFT 0x0
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_MASK_REG_MASK 0xc
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_MASK_REG__SHIFT 0x2
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_EN_REG_MASK 0x30
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_EN_REG__SHIFT 0x4
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_A0_MASK 0xc0
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_A0__SHIFT 0x6
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_SEL0_MASK 0x300
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_SEL0__SHIFT 0x8
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_EN_MASK 0xc00
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCNTL_EN__SHIFT 0xa
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCLK_C_MASK 0x1000
+#define DCIO_DEBUG1__DCO_DCIO_MVP_DVOCLK_C__SHIFT 0xc
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_REG_MASK 0x2000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_REG__SHIFT 0xd
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_PREMUX_MASK 0x4000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_PREMUX__SHIFT 0xe
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0_MASK 0x8000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_A0__SHIFT 0xf
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_REG_MASK 0x10000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_REG__SHIFT 0x10
+#define DCIO_DEBUG1__DCO_DCIO_DVO_HSYNC_TRISTATE_MASK 0x20000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_HSYNC_TRISTATE__SHIFT 0x11
+#define DCIO_DEBUG1__DCO_DCIO_DVO_CLK_TRISTATE_MASK 0x40000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_CLK_TRISTATE__SHIFT 0x12
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_PREMUX_MASK 0x80000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_PREMUX__SHIFT 0x13
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN_MASK 0x100000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_EN__SHIFT 0x14
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_MUX_MASK 0x200000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_MUX__SHIFT 0x15
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_MASK_REG_MASK 0x400000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_MASK_REG__SHIFT 0x16
+#define DCIO_DEBUG1__DCO_DCIO_DVO_ENABLE_MASK 0x800000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_ENABLE__SHIFT 0x17
+#define DCIO_DEBUG1__DCO_DCIO_DVO_VSYNC_TRISTATE_MASK 0x1000000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_VSYNC_TRISTATE__SHIFT 0x18
+#define DCIO_DEBUG1__DCO_DCIO_DVO_RATE_SEL_MASK 0x2000000
+#define DCIO_DEBUG1__DCO_DCIO_DVO_RATE_SEL__SHIFT 0x19
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_SEL0_PREMUX_MASK 0x4000000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_SEL0_PREMUX__SHIFT 0x1a
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_SEL0_MASK 0x8000000
+#define DCIO_DEBUG1__DCO_DCIO_DVOCNTL1_SEL0__SHIFT 0x1b
+#define DCIO_DEBUG2__DCIO_DEBUG2_MASK 0xffffffff
+#define DCIO_DEBUG2__DCIO_DEBUG2__SHIFT 0x0
+#define DCIO_DEBUG3__DCIO_DEBUG3_MASK 0xffffffff
+#define DCIO_DEBUG3__DCIO_DEBUG3__SHIFT 0x0
+#define DCIO_DEBUG4__DCIO_DEBUG4_MASK 0xffffffff
+#define DCIO_DEBUG4__DCIO_DEBUG4__SHIFT 0x0
+#define DCIO_DEBUG5__DCIO_DEBUG5_MASK 0xffffffff
+#define DCIO_DEBUG5__DCIO_DEBUG5__SHIFT 0x0
+#define DCIO_DEBUG6__DCIO_DEBUG6_MASK 0xffffffff
+#define DCIO_DEBUG6__DCIO_DEBUG6__SHIFT 0x0
+#define DCIO_DEBUG7__DCIO_DEBUG7_MASK 0xffffffff
+#define DCIO_DEBUG7__DCIO_DEBUG7__SHIFT 0x0
+#define DCIO_DEBUG8__DCIO_DEBUG8_MASK 0xffffffff
+#define DCIO_DEBUG8__DCIO_DEBUG8__SHIFT 0x0
+#define DCIO_DEBUG9__DCIO_DEBUG9_MASK 0xffffffff
+#define DCIO_DEBUG9__DCIO_DEBUG9__SHIFT 0x0
+#define DCIO_DEBUGA__DCIO_DEBUGA_MASK 0xffffffff
+#define DCIO_DEBUGA__DCIO_DEBUGA__SHIFT 0x0
+#define DCIO_DEBUGB__DCIO_DEBUGB_MASK 0xffffffff
+#define DCIO_DEBUGB__DCIO_DEBUGB__SHIFT 0x0
+#define DCIO_DEBUGC__DCIO_DEBUGC_MASK 0xffffffff
+#define DCIO_DEBUGC__DCIO_DEBUGC__SHIFT 0x0
+#define DCIO_DEBUGD__DCIO_DEBUGD_MASK 0xffffffff
+#define DCIO_DEBUGD__DCIO_DEBUGD__SHIFT 0x0
+#define DCIO_DEBUGE__DCIO_DIGA_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUGE__DCIO_DIGA_DEBUG__SHIFT 0x0
+#define DCIO_DEBUGF__DCIO_DIGB_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUGF__DCIO_DIGB_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG10__DCIO_DIGC_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG10__DCIO_DIGC_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG11__DCIO_DIGD_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG11__DCIO_DIGD_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG12__DCIO_DIGE_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG12__DCIO_DIGE_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG13__DCIO_DIGF_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG13__DCIO_DIGF_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG14__DCIO_DIGG_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG14__DCIO_DIGG_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG15__DCIO_DEBUG15_MASK 0xffffffff
+#define DCIO_DEBUG15__DCIO_DEBUG15__SHIFT 0x0
+#define DCIO_DEBUG16__DCIO_DEBUG16_MASK 0xffffffff
+#define DCIO_DEBUG16__DCIO_DEBUG16__SHIFT 0x0
+#define DCIO_DEBUG17__DCIO_DEBUG17_MASK 0xffffffff
+#define DCIO_DEBUG17__DCIO_DEBUG17__SHIFT 0x0
+#define DCIO_DEBUG18__DCIO_DEBUG18_MASK 0xffffffff
+#define DCIO_DEBUG18__DCIO_DEBUG18__SHIFT 0x0
+#define DCIO_DEBUG19__DCIO_DIGLPA_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG19__DCIO_DIGLPA_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG1A__DCIO_DIGLPB_DEBUG_MASK 0xffffffff
+#define DCIO_DEBUG1A__DCIO_DIGLPB_DEBUG__SHIFT 0x0
+#define DCIO_DEBUG1B__DCIO_DEBUGHPD_MASK 0xffffffff
+#define DCIO_DEBUG1B__DCIO_DEBUGHPD__SHIFT 0x0
+#define DCIO_DEBUG1C__DCIO_DEBUG_UNIPHYA_CFG_MASK 0xffffffff
+#define DCIO_DEBUG1C__DCIO_DEBUG_UNIPHYA_CFG__SHIFT 0x0
+#define DCIO_DEBUG1D__DCIO_DEBUG_UNIPHYB_CFG_MASK 0xffffffff
+#define DCIO_DEBUG1D__DCIO_DEBUG_UNIPHYB_CFG__SHIFT 0x0
+#define DCIO_DEBUG1E__DCIO_DEBUG_UNIPHYC_CFG_MASK 0xffffffff
+#define DCIO_DEBUG1E__DCIO_DEBUG_UNIPHYC_CFG__SHIFT 0x0
+#define DCIO_DEBUG1F__DCIO_DEBUG_UNIPHYD_CFG_MASK 0xffffffff
+#define DCIO_DEBUG1F__DCIO_DEBUG_UNIPHYD_CFG__SHIFT 0x0
+#define DCIO_DEBUG20__DCIO_DEBUG_UNIPHYE_CFG_MASK 0xffffffff
+#define DCIO_DEBUG20__DCIO_DEBUG_UNIPHYE_CFG__SHIFT 0x0
+#define DCIO_DEBUG21__DCIO_DEBUG_UNIPHYF_CFG_MASK 0xffffffff
+#define DCIO_DEBUG21__DCIO_DEBUG_UNIPHYF_CFG__SHIFT 0x0
+#define DCIO_DEBUG22__DCIO_DEBUG_UNIPHYG_CFG_MASK 0xffffffff
+#define DCIO_DEBUG22__DCIO_DEBUG_UNIPHYG_CFG__SHIFT 0x0
+#define DCIO_DEBUG23__DCIO_DEBUG_UNIPHYLPA_CFG_MASK 0xffffffff
+#define DCIO_DEBUG23__DCIO_DEBUG_UNIPHYLPA_CFG__SHIFT 0x0
+#define DCIO_DEBUG24__DCIO_DEBUG_UNIPHYLPB_CFG_MASK 0xffffffff
+#define DCIO_DEBUG24__DCIO_DEBUG_UNIPHYLPB_CFG__SHIFT 0x0
+#define DCIO_DEBUG25__DCIO_DEBUG_DCRXPHY_CFG_MASK 0xffffffff
+#define DCIO_DEBUG25__DCIO_DEBUG_DCRXPHY_CFG__SHIFT 0x0
+#define DCIO_DEBUG26__DCIO_DEBUG_DPHY_CFG_MASK 0xffffffff
+#define DCIO_DEBUG26__DCIO_DEBUG_DPHY_CFG__SHIFT 0x0
+#define DCIO_DEBUG27__DCIO_DEBUG_DACA_CFG_MASK 0xffffffff
+#define DCIO_DEBUG27__DCIO_DEBUG_DACA_CFG__SHIFT 0x0
+#define DCIO_DEBUG28__DCIO_DEBUG_ZCAL_CFG_MASK 0xffffffff
+#define DCIO_DEBUG28__DCIO_DEBUG_ZCAL_CFG__SHIFT 0x0
+#define DCIO_DEBUG_ID__DCIO_DEBUG_ID_MASK 0xffffffff
+#define DCIO_DEBUG_ID__DCIO_DEBUG_ID__SHIFT 0x0
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK_MASK 0x1
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_MASK__SHIFT 0x0
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS_MASK 0x2
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV_MASK 0x4
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV__SHIFT 0x2
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV1_MASK 0x8
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICA_RECV1__SHIFT 0x3
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK_MASK 0x10
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_MASK__SHIFT 0x4
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS_MASK 0x20
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_PD_DIS__SHIFT 0x5
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV_MASK 0x40
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV__SHIFT 0x6
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV1_MASK 0x80
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICB_RECV1__SHIFT 0x7
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK_MASK 0x100
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_MASK__SHIFT 0x8
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS_MASK 0x200
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV_MASK 0x400
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV__SHIFT 0xa
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV1_MASK 0x800
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICC_RECV1__SHIFT 0xb
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK_MASK 0x1000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_MASK__SHIFT 0xc
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS_MASK 0x2000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_PD_DIS__SHIFT 0xd
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV_MASK 0x4000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV__SHIFT 0xe
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV1_MASK 0x8000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICD_RECV1__SHIFT 0xf
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK_MASK 0x10000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_MASK__SHIFT 0x10
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS_MASK 0x20000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV_MASK 0x40000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV__SHIFT 0x12
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV1_MASK 0x80000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICE_RECV1__SHIFT 0x13
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK_MASK 0x100000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_MASK__SHIFT 0x14
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS_MASK 0x200000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_PD_DIS__SHIFT 0x15
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV_MASK 0x400000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV__SHIFT 0x16
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV1_MASK 0x800000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICF_RECV1__SHIFT 0x17
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK_MASK 0x1000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_MASK__SHIFT 0x18
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS_MASK 0x2000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV_MASK 0x4000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV__SHIFT 0x1a
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV1_MASK 0x8000000
+#define DC_GPIO_GENERIC_MASK__DC_GPIO_GENERICG_RECV1__SHIFT 0x1b
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK 0x1
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A__SHIFT 0x0
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK 0x100
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A__SHIFT 0x8
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK 0x10000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A__SHIFT 0x10
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK 0x100000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A__SHIFT 0x14
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK 0x200000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A__SHIFT 0x15
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK 0x400000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A__SHIFT 0x16
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK 0x800000
+#define DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A__SHIFT 0x17
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN_MASK 0x1
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICA_EN__SHIFT 0x0
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN_MASK 0x100
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICB_EN__SHIFT 0x8
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN_MASK 0x10000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICC_EN__SHIFT 0x10
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN_MASK 0x100000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICD_EN__SHIFT 0x14
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN_MASK 0x200000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICE_EN__SHIFT 0x15
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN_MASK 0x400000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICF_EN__SHIFT 0x16
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN_MASK 0x800000
+#define DC_GPIO_GENERIC_EN__DC_GPIO_GENERICG_EN__SHIFT 0x17
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y_MASK 0x1
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICA_Y__SHIFT 0x0
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y_MASK 0x100
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICB_Y__SHIFT 0x8
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y_MASK 0x10000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICC_Y__SHIFT 0x10
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y_MASK 0x100000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICD_Y__SHIFT 0x14
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y_MASK 0x200000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICE_Y__SHIFT 0x15
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y_MASK 0x400000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICF_Y__SHIFT 0x16
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y_MASK 0x800000
+#define DC_GPIO_GENERIC_Y__DC_GPIO_GENERICG_Y__SHIFT 0x17
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x10000
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x100000
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x1
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x100
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x1
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x100
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x1
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x100
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x10000
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x100000
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x1
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x100
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x1
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x100
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x1
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x100
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE_MASK 0x10000
+#define DC_GPIO_DDC3_MASK__AUX_PAD3_MODE__SHIFT 0x10
+#define DC_GPIO_DDC3_MASK__AUX3_POL_MASK 0x100000
+#define DC_GPIO_DDC3_MASK__AUX3_POL__SHIFT 0x14
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC3_MASK__ALLOW_HW_DDC3_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC3_MASK__DC_GPIO_DDC3DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A_MASK 0x1
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A_MASK 0x100
+#define DC_GPIO_DDC3_A__DC_GPIO_DDC3DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN_MASK 0x1
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN_MASK 0x100
+#define DC_GPIO_DDC3_EN__DC_GPIO_DDC3DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y_MASK 0x1
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y_MASK 0x100
+#define DC_GPIO_DDC3_Y__DC_GPIO_DDC3DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE_MASK 0x10000
+#define DC_GPIO_DDC4_MASK__AUX_PAD4_MODE__SHIFT 0x10
+#define DC_GPIO_DDC4_MASK__AUX4_POL_MASK 0x100000
+#define DC_GPIO_DDC4_MASK__AUX4_POL__SHIFT 0x14
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC4_MASK__ALLOW_HW_DDC4_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC4_MASK__DC_GPIO_DDC4DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A_MASK 0x1
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A_MASK 0x100
+#define DC_GPIO_DDC4_A__DC_GPIO_DDC4DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN_MASK 0x1
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN_MASK 0x100
+#define DC_GPIO_DDC4_EN__DC_GPIO_DDC4DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y_MASK 0x1
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y_MASK 0x100
+#define DC_GPIO_DDC4_Y__DC_GPIO_DDC4DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE_MASK 0x10000
+#define DC_GPIO_DDC5_MASK__AUX_PAD5_MODE__SHIFT 0x10
+#define DC_GPIO_DDC5_MASK__AUX5_POL_MASK 0x100000
+#define DC_GPIO_DDC5_MASK__AUX5_POL__SHIFT 0x14
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC5_MASK__ALLOW_HW_DDC5_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC5_MASK__DC_GPIO_DDC5DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A_MASK 0x1
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A_MASK 0x100
+#define DC_GPIO_DDC5_A__DC_GPIO_DDC5DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN_MASK 0x1
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN_MASK 0x100
+#define DC_GPIO_DDC5_EN__DC_GPIO_DDC5DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y_MASK 0x1
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y_MASK 0x100
+#define DC_GPIO_DDC5_Y__DC_GPIO_DDC5DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_MASK_MASK 0x1
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_PD_EN_MASK 0x10
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV_MASK 0x40
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV1_MASK 0x80
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_MASK_MASK 0x100
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV_MASK 0x4000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDC6_MASK__AUX_PAD6_MODE_MASK 0x10000
+#define DC_GPIO_DDC6_MASK__AUX_PAD6_MODE__SHIFT 0x10
+#define DC_GPIO_DDC6_MASK__AUX6_POL_MASK 0x100000
+#define DC_GPIO_DDC6_MASK__AUX6_POL__SHIFT 0x14
+#define DC_GPIO_DDC6_MASK__ALLOW_HW_DDC6_PD_EN_MASK 0x400000
+#define DC_GPIO_DDC6_MASK__ALLOW_HW_DDC6_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_STR_MASK 0xf000000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDC6_MASK__DC_GPIO_DDC6DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK 0x1
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK 0x100
+#define DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6CLK_EN_MASK 0x1
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6DATA_EN_MASK 0x100
+#define DC_GPIO_DDC6_EN__DC_GPIO_DDC6DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6CLK_Y_MASK 0x1
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6DATA_Y_MASK 0x100
+#define DC_GPIO_DDC6_Y__DC_GPIO_DDC6DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK_MASK 0x1
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV_MASK 0x40
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV1_MASK 0x80
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_RECV1__SHIFT 0x7
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK_MASK 0x100
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN_MASK 0x1000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV_MASK 0x4000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV1_MASK 0x8000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_RECV1__SHIFT 0xf
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE_MASK 0x10000
+#define DC_GPIO_DDCVGA_MASK__AUX_PADVGA_MODE__SHIFT 0x10
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL_MASK 0x100000
+#define DC_GPIO_DDCVGA_MASK__AUXVGA_POL__SHIFT 0x14
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN_MASK 0x400000
+#define DC_GPIO_DDCVGA_MASK__ALLOW_HW_DDCVGA_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR_MASK 0xf000000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGACLK_STR__SHIFT 0x18
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR_MASK 0xf0000000
+#define DC_GPIO_DDCVGA_MASK__DC_GPIO_DDCVGADATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A_MASK 0x1
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGACLK_A__SHIFT 0x0
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A_MASK 0x100
+#define DC_GPIO_DDCVGA_A__DC_GPIO_DDCVGADATA_A__SHIFT 0x8
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN_MASK 0x1
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGACLK_EN__SHIFT 0x0
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN_MASK 0x100
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_EN__SHIFT 0x8
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RXSEL_MASK 0x30000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RXSEL__SHIFT 0x10
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPARE_MASK 0xc0000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPARE__SHIFT 0x12
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_BIASCRTEN_MASK 0x100000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_CSEL0P9_MASK 0x200000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_CSEL0P9__SHIFT 0x15
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_CSEL1P1_MASK 0x400000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_CSEL1P1__SHIFT 0x16
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_COMPSEL_MASK 0x800000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_COMPSEL__SHIFT 0x17
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RSEL0P9_MASK 0x1000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RSEL0P9__SHIFT 0x18
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RSEL1P1_MASK 0x2000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RSEL1P1__SHIFT 0x19
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPIKERCEN_MASK 0x4000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPIKERCEN__SHIFT 0x1a
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPIKERCSEL_MASK 0x8000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_FALLSLEWSEL_MASK 0x30000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_FALLSLEWSEL__SHIFT 0x1c
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RESBIASEN_MASK 0x40000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_RESBIASEN__SHIFT 0x1e
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SLEWN_MASK 0x80000000
+#define DC_GPIO_DDCVGA_EN__DC_GPIO_DDCVGADATA_PAD_SLEWN__SHIFT 0x1f
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y_MASK 0x1
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGACLK_Y__SHIFT 0x0
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y_MASK 0x100
+#define DC_GPIO_DDCVGA_Y__DC_GPIO_DDCVGADATA_Y__SHIFT 0x8
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_MASK_MASK 0x1
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_MASK__SHIFT 0x0
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_PD_DIS_MASK 0x10
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_PD_DIS__SHIFT 0x4
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV_MASK 0x40
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV__SHIFT 0x6
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV1_MASK 0x80
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_RECV1__SHIFT 0x7
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_MASK_MASK 0x100
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_MASK__SHIFT 0x8
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_PD_DIS_MASK 0x1000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_PD_DIS__SHIFT 0xc
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV_MASK 0x4000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV__SHIFT 0xe
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV1_MASK 0x8000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_RECV1__SHIFT 0xf
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_CRTC_HSYNC_MASK_MASK 0x7000000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_HSYNCA_CRTC_HSYNC_MASK__SHIFT 0x18
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_CRTC_VSYNC_MASK_MASK 0x70000000
+#define DC_GPIO_SYNCA_MASK__DC_GPIO_VSYNCA_CRTC_VSYNC_MASK__SHIFT 0x1c
+#define DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK 0x1
+#define DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A__SHIFT 0x0
+#define DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK 0x100
+#define DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A__SHIFT 0x8
+#define DC_GPIO_SYNCA_EN__DC_GPIO_HSYNCA_EN_MASK 0x1
+#define DC_GPIO_SYNCA_EN__DC_GPIO_HSYNCA_EN__SHIFT 0x0
+#define DC_GPIO_SYNCA_EN__DC_GPIO_VSYNCA_EN_MASK 0x100
+#define DC_GPIO_SYNCA_EN__DC_GPIO_VSYNCA_EN__SHIFT 0x8
+#define DC_GPIO_SYNCA_Y__DC_GPIO_HSYNCA_Y_MASK 0x1
+#define DC_GPIO_SYNCA_Y__DC_GPIO_HSYNCA_Y__SHIFT 0x0
+#define DC_GPIO_SYNCA_Y__DC_GPIO_VSYNCA_Y_MASK 0x100
+#define DC_GPIO_SYNCA_Y__DC_GPIO_VSYNCA_Y__SHIFT 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK_MASK 0x1
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_MASK__SHIFT 0x0
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS_MASK 0x2
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PD_DIS__SHIFT 0x1
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV_MASK 0x4
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV__SHIFT 0x2
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN_MASK 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_PU_EN__SHIFT 0x3
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV1_MASK 0x10
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_CLK_RECV1__SHIFT 0x4
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV1_MASK 0x20
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV1__SHIFT 0x5
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK_MASK 0x100
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_MASK__SHIFT 0x8
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS_MASK 0x200
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PD_DIS__SHIFT 0x9
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV_MASK 0x400
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_RECV__SHIFT 0xa
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN_MASK 0x800
+#define DC_GPIO_GENLK_MASK__DC_GPIO_GENLK_VSYNC_PU_EN__SHIFT 0xb
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK_MASK 0x10000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_MASK__SHIFT 0x10
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS_MASK 0x20000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PD_DIS__SHIFT 0x11
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV_MASK 0x40000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV__SHIFT 0x12
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN_MASK 0x80000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_PU_EN__SHIFT 0x13
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV1_MASK 0x100000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_A_RECV1__SHIFT 0x14
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV1_MASK 0x800000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV1__SHIFT 0x17
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK_MASK 0x1000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_MASK__SHIFT 0x18
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS_MASK 0x2000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PD_DIS__SHIFT 0x19
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV_MASK 0x4000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_RECV__SHIFT 0x1a
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN_MASK 0x8000000
+#define DC_GPIO_GENLK_MASK__DC_GPIO_SWAPLOCK_B_PU_EN__SHIFT 0x1b
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK 0x1
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A__SHIFT 0x0
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK 0x100
+#define DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A__SHIFT 0x8
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK 0x10000
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A__SHIFT 0x10
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK 0x1000000
+#define DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A__SHIFT 0x18
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN_MASK 0x1
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_CLK_EN__SHIFT 0x0
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN_MASK 0x100
+#define DC_GPIO_GENLK_EN__DC_GPIO_GENLK_VSYNC_EN__SHIFT 0x8
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN_MASK 0x10000
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_A_EN__SHIFT 0x10
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN_MASK 0x1000000
+#define DC_GPIO_GENLK_EN__DC_GPIO_SWAPLOCK_B_EN__SHIFT 0x18
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y_MASK 0x1
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_CLK_Y__SHIFT 0x0
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y_MASK 0x100
+#define DC_GPIO_GENLK_Y__DC_GPIO_GENLK_VSYNC_Y__SHIFT 0x8
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y_MASK 0x10000
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_A_Y__SHIFT 0x10
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y_MASK 0x1000000
+#define DC_GPIO_GENLK_Y__DC_GPIO_SWAPLOCK_B_Y__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x1
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK_MASK 0x2
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK__SHIFT 0x1
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS_MASK 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS__SHIFT 0x2
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RECV_MASK 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RECV__SHIFT 0x3
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV1_MASK 0x20
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV1__SHIFT 0x5
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x40
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RECV1_MASK 0x80
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RECV1__SHIFT 0x7
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x100
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x200
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x400
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV1_MASK 0x800
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV1__SHIFT 0xb
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x10000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x20000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x40000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV1_MASK 0x80000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV1__SHIFT 0x13
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x100000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x200000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x400000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV1_MASK 0x800000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV1__SHIFT 0x17
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x1000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x2000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x4000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV1_MASK 0x8000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV1__SHIFT 0x1b
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0x40000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV1_MASK 0x80000000
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV1__SHIFT 0x1f
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x1
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x100
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x10000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x1000000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x4000000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x1
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x2
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x4
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI_MASK 0x8
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI__SHIFT 0x3
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE_MASK 0x10
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE__SHIFT 0x4
+#define DC_GPIO_HPD_EN__HPD12_SPARE0_MASK 0x20
+#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5
+#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x40
+#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0_MASK 0x80
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0__SHIFT 0x7
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x100
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x200
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9
+#define DC_GPIO_HPD_EN__HPD12_SPARE1_MASK 0x400
+#define DC_GPIO_HPD_EN__HPD12_SPARE1__SHIFT 0xa
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN_MASK 0x10000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD3_EN__SHIFT 0x10
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI_MASK 0x20000
+#define DC_GPIO_HPD_EN__HPD3_SCHMEN_PI__SHIFT 0x11
+#define DC_GPIO_HPD_EN__HPD34_SPARE0_MASK 0x40000
+#define DC_GPIO_HPD_EN__HPD34_SPARE0__SHIFT 0x12
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN_MASK 0x100000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD4_EN__SHIFT 0x14
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI_MASK 0x200000
+#define DC_GPIO_HPD_EN__HPD4_SCHMEN_PI__SHIFT 0x15
+#define DC_GPIO_HPD_EN__HPD34_SPARE1_MASK 0x400000
+#define DC_GPIO_HPD_EN__HPD34_SPARE1__SHIFT 0x16
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN_MASK 0x1000000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD5_EN__SHIFT 0x18
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI_MASK 0x2000000
+#define DC_GPIO_HPD_EN__HPD5_SCHMEN_PI__SHIFT 0x19
+#define DC_GPIO_HPD_EN__HPD56_SPARE0_MASK 0x4000000
+#define DC_GPIO_HPD_EN__HPD56_SPARE0__SHIFT 0x1a
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN_MASK 0x10000000
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD6_EN__SHIFT 0x1c
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI_MASK 0x20000000
+#define DC_GPIO_HPD_EN__HPD6_SCHMEN_PI__SHIFT 0x1d
+#define DC_GPIO_HPD_EN__HPD56_SPARE1_MASK 0x40000000
+#define DC_GPIO_HPD_EN__HPD56_SPARE1__SHIFT 0x1e
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x1
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x100
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y_MASK 0x10000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD3_Y__SHIFT 0x10
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y_MASK 0x1000000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD4_Y__SHIFT 0x18
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y_MASK 0x4000000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD5_Y__SHIFT 0x1a
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y_MASK 0x10000000
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD6_Y__SHIFT 0x1c
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK_MASK 0x1
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_MASK__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS_MASK 0x10
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_PD_DIS__SHIFT 0x4
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV_MASK 0x40
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV__SHIFT 0x6
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV1_MASK 0x80
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_BLON_RECV1__SHIFT 0x7
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK_MASK 0x100
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_MASK__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS_MASK 0x1000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_PD_DIS__SHIFT 0xc
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV_MASK 0x4000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV__SHIFT 0xe
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV1_MASK 0x8000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_DIGON_RECV1__SHIFT 0xf
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK_MASK 0x10000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_MASK__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS_MASK 0x100000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_PD_DIS__SHIFT 0x14
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV_MASK 0x400000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV__SHIFT 0x16
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV1_MASK 0x800000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_ENA_BL_RECV1__SHIFT 0x17
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_MASK_MASK 0x1000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_MASK__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_PD_DIS_MASK 0x2000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_PD_DIS__SHIFT 0x19
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV_MASK 0x4000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV__SHIFT 0x1a
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV1_MASK 0x8000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_VSYNC_IN_RECV1__SHIFT 0x1b
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_MASK_MASK 0x10000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_MASK__SHIFT 0x1c
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_PD_DIS_MASK 0x20000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV_MASK 0x40000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV__SHIFT 0x1e
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV1_MASK 0x80000000
+#define DC_GPIO_PWRSEQ_MASK__DC_GPIO_HSYNC_IN_RECV1__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A_MASK 0x1
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_BLON_A__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A_MASK 0x100
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_DIGON_A__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A_MASK 0x10000
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_ENA_BL_A__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_VSYNC_IN_A_MASK 0x1000000
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_VSYNC_IN_A__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_HSYNC_IN_A_MASK 0x80000000
+#define DC_GPIO_PWRSEQ_A__DC_GPIO_HSYNC_IN_A__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN_MASK 0x1
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_BLON_EN__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN_MASK 0x2
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VARY_BL_GENERICA_EN__SHIFT 0x1
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN_MASK 0x100
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_DIGON_EN__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN_MASK 0x10000
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_ENA_BL_EN__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VSYNC_IN_EN_MASK 0x1000000
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_VSYNC_IN_EN__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_HSYNC_IN_EN_MASK 0x80000000
+#define DC_GPIO_PWRSEQ_EN__DC_GPIO_HSYNC_IN_EN__SHIFT 0x1f
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y_MASK 0x1
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_BLON_Y__SHIFT 0x0
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y_MASK 0x100
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_DIGON_Y__SHIFT 0x8
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y_MASK 0x10000
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_ENA_BL_Y__SHIFT 0x10
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_VSYNC_IN_MASK 0x1000000
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_VSYNC_IN__SHIFT 0x18
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_HSYNC_IN_MASK 0x80000000
+#define DC_GPIO_PWRSEQ_Y__DC_GPIO_HSYNC_IN__SHIFT 0x1f
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0xf
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0xf0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN_MASK 0xf00
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP_MASK 0xf000
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0xf0000
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0xf00000
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0xf000000
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xf0000000
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN_MASK 0xf
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP_MASK 0xf0
+#define DC_GPIO_PAD_STRENGTH_2__STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH_MASK 0x700
+#define DC_GPIO_PAD_STRENGTH_2__EXT_RESET_DRVSTRENGTH__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH_MASK 0x7000
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_DRVSTRENGTH__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN_MASK 0xf0000
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP_MASK 0xf00000
+#define DC_GPIO_PAD_STRENGTH_2__PWRSEQ_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL_MASK 0xc0000000
+#define DC_GPIO_PAD_STRENGTH_2__REF_27_SRC_SEL__SHIFT 0x1e
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN_MASK 0x1
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN__SHIFT 0x0
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE_MASK 0x2
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE__SHIFT 0x1
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL_MASK 0x4
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL__SHIFT 0x2
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE_MASK 0x8
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE__SHIFT 0x3
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN_MASK 0x10
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN__SHIFT 0x4
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN_MASK 0x20
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN__SHIFT 0x5
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN_MASK 0x40
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN__SHIFT 0x6
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN_MASK 0x80
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN__SHIFT 0x7
+#define PHY_AUX_CNTL__AUX_PAD_SLEWN_MASK 0x1000
+#define PHY_AUX_CNTL__AUX_PAD_SLEWN__SHIFT 0xc
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN_MASK 0x2000
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN__SHIFT 0xd
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x4000
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0xe
+#define PHY_AUX_CNTL__AUX_PAD_RXSEL_MASK 0x30000
+#define PHY_AUX_CNTL__AUX_PAD_RXSEL__SHIFT 0x10
+#define PHY_AUX_CNTL__AUX_PAD_RESBIASEN_MASK 0x40000
+#define PHY_AUX_CNTL__AUX_PAD_RESBIASEN__SHIFT 0x12
+#define PHY_AUX_CNTL__AUX_PAD_COMPSEL_MASK 0x80000
+#define PHY_AUX_CNTL__AUX_PAD_COMPSEL__SHIFT 0x13
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A_MASK 0x1
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SCL_A__SHIFT 0x0
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A_MASK 0x2
+#define DC_GPIO_I2CPAD_A__DC_GPIO_SDA_A__SHIFT 0x1
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SCL_EN_MASK 0x1
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SCL_EN__SHIFT 0x0
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SDA_EN_MASK 0x2
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_SDA_EN__SHIFT 0x1
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_DATA_PD_EN_MASK 0x4
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_DATA_PD_EN__SHIFT 0x2
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RXSEL_MASK 0x30000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RXSEL__SHIFT 0x10
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPARE_MASK 0xc0000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPARE__SHIFT 0x12
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_BIASCRTEN_MASK 0x100000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_CSEL0P9_MASK 0x200000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_CSEL0P9__SHIFT 0x15
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_CSEL1P1_MASK 0x400000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_CSEL1P1__SHIFT 0x16
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_COMPSEL_MASK 0x800000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_COMPSEL__SHIFT 0x17
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RSEL0P9_MASK 0x1000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RSEL0P9__SHIFT 0x18
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RSEL1P1_MASK 0x2000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RSEL1P1__SHIFT 0x19
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPIKERCEN_MASK 0x4000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPIKERCEN__SHIFT 0x1a
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPIKERCSEL_MASK 0x8000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_FALLSLEWSEL_MASK 0x30000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_FALLSLEWSEL__SHIFT 0x1c
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RESBIASEN_MASK 0x40000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_RESBIASEN__SHIFT 0x1e
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SLEWN_MASK 0x80000000
+#define DC_GPIO_I2CPAD_EN__DC_GPIO_I2C_PAD_SLEWN__SHIFT 0x1f
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SCL_Y_MASK 0x1
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SCL_Y__SHIFT 0x0
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SDA_Y_MASK 0x2
+#define DC_GPIO_I2CPAD_Y__DC_GPIO_SDA_Y__SHIFT 0x1
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SN_MASK 0xf
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SP_MASK 0xf0
+#define DC_GPIO_I2CPAD_STRENGTH__I2C_STRENGTH_SP__SHIFT 0x4
+#define DVO_VREF_CONTROL__DVO_VREFPON_MASK 0x1
+#define DVO_VREF_CONTROL__DVO_VREFPON__SHIFT 0x0
+#define DVO_VREF_CONTROL__DVO_VREFSEL_MASK 0x2
+#define DVO_VREF_CONTROL__DVO_VREFSEL__SHIFT 0x1
+#define DVO_VREF_CONTROL__DVO_VREFCAL_MASK 0xf0
+#define DVO_VREF_CONTROL__DVO_VREFCAL__SHIFT 0x4
+#define DVO_SKEW_ADJUST__DVO_SKEW_ADJUST_MASK 0xffffffff
+#define DVO_SKEW_ADJUST__DVO_SKEW_ADJUST__SHIFT 0x0
+#define DC_GPIO_RECEIVER_EN0__VIPPAD_SCL_RECEN_MASK 0x1
+#define DC_GPIO_RECEIVER_EN0__VIPPAD_SCL_RECEN__SHIFT 0x0
+#define DC_GPIO_RECEIVER_EN0__VIPPAD_SDA_RECEN_MASK 0x2
+#define DC_GPIO_RECEIVER_EN0__VIPPAD_SDA_RECEN__SHIFT 0x1
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_RX_HPD_RECEN_MASK 0x10000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_RX_HPD_RECEN__SHIFT 0x10
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_HPD1_RECEN_MASK 0x20000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_HPD1_RECEN__SHIFT 0x11
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENLK_VSYNC_RECEN_MASK 0x40000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENLK_VSYNC_RECEN__SHIFT 0x12
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENLK_CLK_RECEN_MASK 0x80000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENLK_CLK_RECEN__SHIFT 0x13
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_VSYNCA_RECEN_MASK 0x100000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_VSYNCA_RECEN__SHIFT 0x14
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_HSYNCA_RECEN_MASK 0x200000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_HSYNCA_RECEN__SHIFT 0x15
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICG_RECEN_MASK 0x400000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICG_RECEN__SHIFT 0x16
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICF_RECEN_MASK 0x800000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICF_RECEN__SHIFT 0x17
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICE_RECEN_MASK 0x1000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICE_RECEN__SHIFT 0x18
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICD_RECEN_MASK 0x2000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICD_RECEN__SHIFT 0x19
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICC_RECEN_MASK 0x4000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICC_RECEN__SHIFT 0x1a
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICB_RECEN_MASK 0x8000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICB_RECEN__SHIFT 0x1b
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICA_RECEN_MASK 0x10000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_GENERICA_RECEN__SHIFT 0x1c
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_BLON_RECEN_MASK 0x20000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_BLON_RECEN__SHIFT 0x1d
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_DIGON_RECEN_MASK 0x40000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_DIGON_RECEN__SHIFT 0x1e
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_DDC2DATA_RECEN_MASK 0x80000000
+#define DC_GPIO_RECEIVER_EN0__DC_GPIO_DDC2DATA_RECEN__SHIFT 0x1f
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC2CLK_RECEN_MASK 0x1
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC2CLK_RECEN__SHIFT 0x0
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC1DATA_RECEN_MASK 0x2
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC1DATA_RECEN__SHIFT 0x1
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC1CLK_RECEN_MASK 0x4
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC1CLK_RECEN__SHIFT 0x2
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC3DATA_RECEN_MASK 0x8
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC3DATA_RECEN__SHIFT 0x3
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC3CLK_RECEN_MASK 0x10
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC3CLK_RECEN__SHIFT 0x4
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC4DATA_RECEN_MASK 0x20
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC4DATA_RECEN__SHIFT 0x5
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC4CLK_RECEN_MASK 0x40
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC4CLK_RECEN__SHIFT 0x6
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC5DATA_RECEN_MASK 0x80
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC5DATA_RECEN__SHIFT 0x7
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC5CLK_RECEN_MASK 0x100
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC5CLK_RECEN__SHIFT 0x8
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC6DATA_RECEN_MASK 0x200
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC6DATA_RECEN__SHIFT 0x9
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC6CLK_RECEN_MASK 0x400
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_DDC6CLK_RECEN__SHIFT 0xa
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD2_RECEN_MASK 0x800
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD2_RECEN__SHIFT 0xb
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD3_RECEN_MASK 0x1000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD3_RECEN__SHIFT 0xc
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD4_RECEN_MASK 0x2000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD4_RECEN__SHIFT 0xd
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD5_RECEN_MASK 0x4000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD5_RECEN__SHIFT 0xe
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD6_RECEN_MASK 0x8000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_HPD6_RECEN__SHIFT 0xf
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_ENA_BL_RECEN_MASK 0x10000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_ENA_BL_RECEN__SHIFT 0x10
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_SWAPLOCK_A_RECEN_MASK 0x20000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_SWAPLOCK_A_RECEN__SHIFT 0x11
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_SWAPLOCK_B_RECEN_MASK 0x40000
+#define DC_GPIO_RECEIVER_EN1__DC_GPIO_SWAPLOCK_B_RECEN__SHIFT 0x12
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_I2SDATA0_MASK_MASK 0xf
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_I2SDATA0_MASK__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_MCLK0_MASK_MASK 0x10
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_MCLK0_MASK__SHIFT 0x4
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_BCLK0_MASK_MASK 0x20
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_BCLK0_MASK__SHIFT 0x5
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_LRCK0_MASK_MASK 0x40
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_LRCK0_MASK__SHIFT 0x6
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_SPDIF0_MASK_MASK 0x80
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_SPDIF0_MASK__SHIFT 0x7
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_I2SDATA1_MASK_MASK 0x100
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_I2SDATA1_MASK__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_MCLK1_MASK_MASK 0x200
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_MCLK1_MASK__SHIFT 0x9
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_BCLK1_MASK_MASK 0x400
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_BCLK1_MASK__SHIFT 0xa
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_LRCK1_MASK_MASK 0x800
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_LRCK1_MASK__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_SPDIF1_MASK_MASK 0x1000
+#define DC_GPIO_I2S_SPDIF_MASK__DC_GPIO_SPDIF1_MASK__SHIFT 0xc
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_I2SDATA0_A_MASK 0xf
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_I2SDATA0_A__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_MCLK0_A_MASK 0x10
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_MCLK0_A__SHIFT 0x4
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_BCLK0_A_MASK 0x20
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_BCLK0_A__SHIFT 0x5
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_LRCK0_A_MASK 0x40
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_LRCK0_A__SHIFT 0x6
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_SPDIF0_A_MASK 0x80
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_SPDIF0_A__SHIFT 0x7
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_I2SDATA1_A_MASK 0x100
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_I2SDATA1_A__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_MCLK1_A_MASK 0x200
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_MCLK1_A__SHIFT 0x9
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_BCLK1_A_MASK 0x400
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_BCLK1_A__SHIFT 0xa
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_LRCK1_A_MASK 0x800
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_LRCK1_A__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_SPDIF1_A_MASK 0x1000
+#define DC_GPIO_I2S_SPDIF_A__DC_GPIO_SPDIF1_A__SHIFT 0xc
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_I2SDATA0_EN_MASK 0xf
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_I2SDATA0_EN__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_MCLK0_EN_MASK 0x10
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_MCLK0_EN__SHIFT 0x4
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_BCLK0_EN_MASK 0x20
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_BCLK0_EN__SHIFT 0x5
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_LRCK0_EN_MASK 0x40
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_LRCK0_EN__SHIFT 0x6
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_SPDIF0_EN_MASK 0x80
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_SPDIF0_EN__SHIFT 0x7
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_I2SDATA1_EN_MASK 0x100
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_I2SDATA1_EN__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_MCLK1_EN_MASK 0x200
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_MCLK1_EN__SHIFT 0x9
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_BCLK1_EN_MASK 0x400
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_BCLK1_EN__SHIFT 0xa
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_LRCK1_EN_MASK 0x800
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_LRCK1_EN__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_SPDIF1_EN_MASK 0x1000
+#define DC_GPIO_I2S_SPDIF_EN__DC_GPIO_SPDIF1_EN__SHIFT 0xc
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_APORT_MASK 0x2000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_APORT__SHIFT 0xd
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_PU_MASK 0x4000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_PU__SHIFT 0xe
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_RXSEL_MASK 0x8000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_RXSEL__SHIFT 0xf
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_SCHMEN_MASK 0x10000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_SCHMEN__SHIFT 0x10
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_SMODE_EN_MASK 0x20000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_SMODE_EN__SHIFT 0x11
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_IMODE_MASK 0x40000
+#define DC_GPIO_I2S_SPDIF_EN__SPDIF1_IMODE__SHIFT 0x12
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_I2SDATA0_Y_MASK 0xf
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_I2SDATA0_Y__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_MCLK0_Y_MASK 0x10
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_MCLK0_Y__SHIFT 0x4
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_BCLK0_Y_MASK 0x20
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_BCLK0_Y__SHIFT 0x5
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_LRCK0_Y_MASK 0x40
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_LRCK0_Y__SHIFT 0x6
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_SPDIF0_Y_MASK 0x80
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_SPDIF0_Y__SHIFT 0x7
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_I2SDATA1_Y_MASK 0x100
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_I2SDATA1_Y__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_MCLK1_Y_MASK 0x200
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_MCLK1_Y__SHIFT 0x9
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_BCLK1_Y_MASK 0x400
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_BCLK1_Y__SHIFT 0xa
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_LRCK1_Y_MASK 0x800
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_LRCK1_Y__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_SPDIF1_Y_MASK 0x1000
+#define DC_GPIO_I2S_SPDIF_Y__DC_GPIO_SPDIF1_Y__SHIFT 0xc
+#define DC_GPIO_I2S_SPDIF_STRENGTH__I2S0_DRVSTRENGTH_MASK 0x7
+#define DC_GPIO_I2S_SPDIF_STRENGTH__I2S0_DRVSTRENGTH__SHIFT 0x0
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF0_DRVSTRENGTH_SN_MASK 0x700
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF0_DRVSTRENGTH_SN__SHIFT 0x8
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF0_DRVSTRENGTH_SP_MASK 0x3800
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF0_DRVSTRENGTH_SP__SHIFT 0xb
+#define DC_GPIO_I2S_SPDIF_STRENGTH__I2S1_DRVSTRENGTH_MASK 0x70000
+#define DC_GPIO_I2S_SPDIF_STRENGTH__I2S1_DRVSTRENGTH__SHIFT 0x10
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF1_DRVSTRENGTH_SN_MASK 0x7000000
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF1_DRVSTRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF1_DRVSTRENGTH_SP_MASK 0x38000000
+#define DC_GPIO_I2S_SPDIF_STRENGTH__SPDIF1_DRVSTRENGTH_SP__SHIFT 0x1b
+#define DC_GPIO_TX12_EN__DC_GPIO_BLON_TX12_EN_MASK 0x1
+#define DC_GPIO_TX12_EN__DC_GPIO_BLON_TX12_EN__SHIFT 0x0
+#define DC_GPIO_TX12_EN__DC_GPIO_DIGON_TX12_EN_MASK 0x2
+#define DC_GPIO_TX12_EN__DC_GPIO_DIGON_TX12_EN__SHIFT 0x1
+#define DC_GPIO_TX12_EN__DC_GPIO_ENA_BL_TX12_EN_MASK 0x4
+#define DC_GPIO_TX12_EN__DC_GPIO_ENA_BL_TX12_EN__SHIFT 0x2
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN_MASK 0x8
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICA_TX12_EN__SHIFT 0x3
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN_MASK 0x10
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICB_TX12_EN__SHIFT 0x4
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN_MASK 0x20
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICC_TX12_EN__SHIFT 0x5
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN_MASK 0x40
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICD_TX12_EN__SHIFT 0x6
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN_MASK 0x80
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICE_TX12_EN__SHIFT 0x7
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN_MASK 0x100
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICF_TX12_EN__SHIFT 0x8
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN_MASK 0x200
+#define DC_GPIO_TX12_EN__DC_GPIO_GENERICG_TX12_EN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL_MASK 0x3
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL_MASK 0xc
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL_MASK 0x30
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL_MASK 0xc0
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_FALLSLEWSEL__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL_MASK 0x300
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_FALLSLEWSEL__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL_MASK 0xc00
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_FALLSLEWSEL__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN_MASK 0x10000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCEN__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN_MASK 0x20000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCEN__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN_MASK 0x40000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCEN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN_MASK 0x80000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCEN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN_MASK 0x100000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN_MASK 0x200000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCEN__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL_MASK 0x1000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX1_SPIKERCSEL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL_MASK 0x2000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX2_SPIKERCSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL_MASK 0x4000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX3_SPIKERCSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL_MASK 0x8000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX4_SPIKERCSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL_MASK 0x10000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX5_SPIKERCSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL_MASK 0x20000000
+#define DC_GPIO_AUX_CTRL_0__DC_GPIO_AUX6_SPIKERCSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_CSEL_0P9_MASK 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_CSEL_0P9__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_CSEL_0P9_MASK 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_CSEL_0P9__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_CSEL_0P9_MASK 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_CSEL_0P9__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_CSEL_0P9_MASK 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_CSEL_0P9__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_CSEL_0P9_MASK 0x10
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_CSEL_0P9__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_CSEL_0P9_MASK 0x20
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_CSEL_0P9__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_CSEL_1P1_MASK 0x100
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_CSEL_1P1__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_CSEL_1P1_MASK 0x200
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_CSEL_1P1__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_CSEL_1P1_MASK 0x400
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_CSEL_1P1__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_CSEL_1P1_MASK 0x800
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_CSEL_1P1__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_CSEL_1P1_MASK 0x1000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_CSEL_1P1__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_CSEL_1P1_MASK 0x2000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_CSEL_1P1__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_RSEL_0P9_MASK 0x10000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_RSEL_0P9__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_RSEL_0P9_MASK 0x20000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_RSEL_0P9__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_RSEL_0P9_MASK 0x40000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_RSEL_0P9__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_RSEL_0P9_MASK 0x80000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_RSEL_0P9__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_RSEL_0P9_MASK 0x100000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_RSEL_0P9__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_RSEL_0P9_MASK 0x200000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_RSEL_0P9__SHIFT 0x15
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_RSEL_1P1_MASK 0x1000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_RSEL_1P1__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_RSEL_1P1_MASK 0x2000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_RSEL_1P1__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_RSEL_1P1_MASK 0x4000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_RSEL_1P1__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_RSEL_1P1_MASK 0x8000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_RSEL_1P1__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_RSEL_1P1_MASK 0x10000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_RSEL_1P1__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_RSEL_1P1_MASK 0x20000000
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_RSEL_1P1__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX1_BIASCRTEN_MASK 0x1
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX1_BIASCRTEN__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX2_BIASCRTEN_MASK 0x2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX2_BIASCRTEN__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX3_BIASCRTEN_MASK 0x4
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX3_BIASCRTEN__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX4_BIASCRTEN_MASK 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX4_BIASCRTEN__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX5_BIASCRTEN_MASK 0x10
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX5_BIASCRTEN__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX6_BIASCRTEN_MASK 0x20
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_AUX6_BIASCRTEN__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX1_SPARE_MASK 0xc0
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX1_SPARE__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX2_SPARE_MASK 0x300
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX2_SPARE__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX3_SPARE_MASK 0xc00
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX3_SPARE__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX4_SPARE_MASK 0x3000
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX4_SPARE__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX5_SPARE_MASK 0xc000
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX5_SPARE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX6_SPARE_MASK 0x30000
+#define DC_GPIO_AUX_CTRL_2__DC_IO_AUX6_SPARE__SHIFT 0x10
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x3
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_FALLSLEWSEL_MASK 0xc
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_FALLSLEWSEL__SHIFT 0x2
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_FALLSLEWSEL_MASK 0x30
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_FALLSLEWSEL__SHIFT 0x4
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_SPIKERCEN_MASK 0x100
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_SPIKERCEN_MASK 0x200
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_SPIKERCEN__SHIFT 0x9
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_SPIKERCEN_MASK 0x400
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_SPIKERCEN__SHIFT 0xa
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x1000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_SPIKERCSEL_MASK 0x2000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_SPIKERCSEL__SHIFT 0xd
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_SPIKERCSEL_MASK 0x4000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_SPIKERCSEL__SHIFT 0xe
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_CSEL_0P9_MASK 0x10000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_CSEL_0P9__SHIFT 0x10
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_CSEL_0P9_MASK 0x20000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_CSEL_0P9__SHIFT 0x11
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_CSEL_0P9_MASK 0x40000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_CSEL_0P9__SHIFT 0x12
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_CSEL_1P1_MASK 0x100000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_CSEL_1P1__SHIFT 0x14
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_CSEL_1P1_MASK 0x200000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_CSEL_1P1__SHIFT 0x15
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_CSEL_1P1_MASK 0x400000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_CSEL_1P1__SHIFT 0x16
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_RSEL_0P9_MASK 0x1000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_RSEL_0P9__SHIFT 0x18
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_RSEL_0P9_MASK 0x2000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_RSEL_0P9__SHIFT 0x19
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_RSEL_0P9_MASK 0x4000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_RSEL_0P9__SHIFT 0x1a
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_RSEL_1P1_MASK 0x10000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD12_RSEL_1P1__SHIFT 0x1c
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_RSEL_1P1_MASK 0x20000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD34_RSEL_1P1__SHIFT 0x1d
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_RSEL_1P1_MASK 0x40000000
+#define DC_GPIO_HPD_CTRL_0__DC_GPIO_HPD56_RSEL_1P1__SHIFT 0x1e
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD12_BIASCRTEN_MASK 0x1
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD12_BIASCRTEN__SHIFT 0x0
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD34_BIASCRTEN_MASK 0x2
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD34_BIASCRTEN__SHIFT 0x1
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD56_BIASCRTEN_MASK 0x4
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD56_BIASCRTEN__SHIFT 0x2
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD12_SLEWN_MASK 0x10
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD12_SLEWN__SHIFT 0x4
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD34_SLEWN_MASK 0x20
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD34_SLEWN__SHIFT 0x5
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD56_SLEWN_MASK 0x40
+#define DC_GPIO_HPD_CTRL_1__DC_GPIO_HPD56_SLEWN__SHIFT 0x6
+#define DAC_MACRO_CNTL_RESERVED0__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DAC_MACRO_CNTL_RESERVED0__DAC_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DAC_MACRO_CNTL_RESERVED1__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DAC_MACRO_CNTL_RESERVED1__DAC_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DAC_MACRO_CNTL_RESERVED2__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DAC_MACRO_CNTL_RESERVED2__DAC_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DAC_MACRO_CNTL_RESERVED3__DAC_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DAC_MACRO_CNTL_RESERVED3__DAC_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED0__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED1__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED2__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED3__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED4__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED5__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED6__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED7__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED8__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED9__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED10__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED11__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED12__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED13__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED14__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED15__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED16__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED17__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED18__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED19__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED20__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED21__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED22__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED23__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED24__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED25__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED26__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED27__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED28__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED29__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED30__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED31__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED32__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED33__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED34__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED35__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED36__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED37__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED38__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED39__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED40__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED41__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED42__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED43__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED44__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED45__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED46__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED47__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED48__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED49__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED50__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED51__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED52__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED53__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED54__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED55__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED56__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED57__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED58__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED58__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED59__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED59__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED60__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED60__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED61__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED61__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED62__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED62__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED63__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED63__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED64__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED64__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED65__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED65__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED66__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED66__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED67__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED67__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED68__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED68__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED69__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED69__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED70__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED70__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED71__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED71__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED72__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED72__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED73__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED73__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED74__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED74__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED75__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED75__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED76__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED76__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED77__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED77__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED78__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED78__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED79__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED79__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED80__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED80__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED81__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED81__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED82__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED82__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED83__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED83__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED84__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED84__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED85__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED85__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED86__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED86__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED87__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED87__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED88__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED88__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED89__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED89__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED90__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED90__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED91__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED91__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED92__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED92__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED93__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED93__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED94__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED94__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED95__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED95__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED96__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED96__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED97__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED97__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED98__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED98__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED99__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED99__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED100__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED100__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED101__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED101__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED102__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED102__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED103__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED103__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED104__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED104__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED105__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED105__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED106__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED106__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED107__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED107__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED108__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED108__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED109__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED109__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED110__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED110__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED111__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED111__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED112__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED112__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED113__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED113__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED114__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED114__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED115__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED115__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED116__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED116__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED117__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED117__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED118__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED118__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED119__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED119__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED120__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED120__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED121__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED121__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED122__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED122__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED123__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED123__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED124__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED124__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED125__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED125__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED126__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED126__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED127__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED127__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED128__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED128__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED129__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED129__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED130__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED130__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED131__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED131__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED132__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED132__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED133__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED133__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED134__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED134__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED135__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED135__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED136__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED136__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED137__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED137__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED138__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED138__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED139__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED139__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED140__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED140__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED141__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED141__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED142__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED142__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED143__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED143__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED144__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED144__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED145__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED145__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED146__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED146__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED147__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED147__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED148__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED148__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED149__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED149__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED150__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED150__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED151__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED151__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED152__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED152__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED153__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED153__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED154__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED154__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED155__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED155__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED156__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED156__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED157__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED157__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED158__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED158__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define UNIPHY_MACRO_CNTL_RESERVED159__UNIPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define UNIPHY_MACRO_CNTL_RESERVED159__UNIPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED0__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED0__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED1__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED1__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED2__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED2__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED3__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED3__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED4__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED4__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED5__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED5__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED6__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED6__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED7__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED7__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED8__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED8__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED9__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED9__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED10__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED10__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED11__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED11__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED12__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED12__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED13__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED13__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED14__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED14__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED15__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED15__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED16__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED16__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED17__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED17__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED18__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED18__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED19__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED19__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED20__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED20__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED21__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED21__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED22__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED22__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED23__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED23__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED24__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED24__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED25__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED25__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED26__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED26__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED27__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED27__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED28__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED28__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED29__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED29__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED30__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED30__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED31__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED31__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED32__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED32__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED33__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED33__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED34__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED34__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED35__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED35__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED36__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED36__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED37__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED37__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED38__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED38__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED39__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED39__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED40__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED40__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED41__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED41__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED42__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED42__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED43__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED43__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED44__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED44__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED45__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED45__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED46__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED46__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED47__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED47__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED48__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED48__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED49__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED49__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED50__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED50__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED51__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED51__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED52__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED52__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED53__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED53__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED54__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED54__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED55__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED55__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED56__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED56__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED57__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED57__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED58__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED58__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED59__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED59__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED60__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED60__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED61__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED61__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED62__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED62__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED63__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED63__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED64__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED64__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED65__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED65__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED66__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED66__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED67__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED67__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED68__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED68__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED69__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED69__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED70__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED70__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED71__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED71__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED72__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED72__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED73__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED73__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED74__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED74__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED75__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED75__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED76__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED76__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED77__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED77__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED78__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED78__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED79__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED79__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED80__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED80__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED81__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED81__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED82__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED82__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED83__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED83__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED84__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED84__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED85__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED85__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED86__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED86__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED87__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED87__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED88__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED88__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED89__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED89__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED90__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED90__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED91__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED91__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED92__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED92__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED93__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED93__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED94__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED94__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED95__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED95__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED96__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED96__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED97__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED97__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED98__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED98__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED99__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED99__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED100__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED100__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED101__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED101__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED102__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED102__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED103__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED103__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED104__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED104__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED105__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED105__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED106__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED106__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED107__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED107__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED108__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED108__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED109__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED109__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED110__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED110__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED111__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED111__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED112__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED112__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED113__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED113__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED114__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED114__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED115__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED115__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED116__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED116__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED117__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED117__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED118__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED118__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED119__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED119__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED120__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED120__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED121__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED121__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED122__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED122__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED123__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED123__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED124__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED124__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED125__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED125__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED126__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED126__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED127__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED127__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED128__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED128__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED129__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED129__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED130__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED130__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED131__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED131__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED132__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED132__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED133__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED133__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED134__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED134__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED135__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED135__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED136__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED136__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED137__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED137__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED138__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED138__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED139__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED139__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED140__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED140__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED141__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED141__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED142__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED142__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED143__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED143__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED144__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED144__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED145__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED145__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED146__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED146__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED147__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED147__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED148__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED148__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED149__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED149__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED150__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED150__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED151__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED151__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED152__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED152__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED153__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED153__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED154__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED154__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED155__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED155__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED156__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED156__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED157__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED157__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED158__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED158__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED159__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED159__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED160__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED160__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED161__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED161__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED162__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED162__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED163__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED163__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED164__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED164__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED165__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED165__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED166__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED166__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED167__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED167__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED168__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED168__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED169__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED169__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED170__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED170__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED171__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED171__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED172__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED172__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED173__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED173__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED174__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED174__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED175__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED175__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED176__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED176__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED177__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED177__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED178__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED178__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED179__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED179__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED180__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED180__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED181__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED181__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED182__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED182__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED183__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED183__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED184__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED184__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED185__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED185__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED186__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED186__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED187__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED187__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED188__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED188__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED189__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED189__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED190__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED190__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED191__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED191__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED192__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED192__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED193__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED193__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED194__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED194__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED195__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED195__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED196__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED196__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED197__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED197__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED198__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED198__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED199__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED199__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED200__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED200__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED201__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED201__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED202__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED202__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED203__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED203__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED204__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED204__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED205__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED205__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED206__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED206__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED207__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED207__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED208__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED208__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED209__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED209__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED210__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED210__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED211__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED211__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED212__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED212__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED213__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED213__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED214__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED214__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED215__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED215__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED216__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED216__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED217__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED217__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED218__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED218__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED219__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED219__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED220__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED220__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED221__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED221__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED222__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED222__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED223__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED223__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED224__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED224__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED225__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED225__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED226__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED226__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED227__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED227__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED228__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED228__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED229__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED229__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED230__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED230__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED231__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED231__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED232__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED232__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED233__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED233__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED234__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED234__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED235__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED235__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED236__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED236__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED237__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED237__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED238__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED238__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED239__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED239__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED240__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED240__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED241__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED241__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED242__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED242__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED243__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED243__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED244__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED244__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED245__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED245__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED246__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED246__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED247__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED247__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED248__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED248__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED249__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED249__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED250__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED250__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED251__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED251__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED252__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED252__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED253__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED253__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED254__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED254__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED255__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED255__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED256__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED256__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED257__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED257__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED258__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED258__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED259__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED259__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED260__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED260__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED261__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED261__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED262__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED262__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED263__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED263__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED264__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED264__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED265__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED265__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED266__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED266__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED267__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED267__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED268__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED268__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED269__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED269__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED270__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED270__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED271__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED271__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED272__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED272__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED273__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED273__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED274__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED274__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED275__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED275__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED276__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED276__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED277__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED277__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED278__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED278__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED279__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED279__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED280__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED280__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED281__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED281__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED282__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED282__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED283__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED283__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED284__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED284__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED285__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED285__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED286__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED286__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED287__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED287__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED288__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED288__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED289__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED289__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED290__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED290__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED291__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED291__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED292__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED292__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED293__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED293__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED294__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED294__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED295__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED295__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED296__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED296__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED297__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED297__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED298__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED298__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED299__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED299__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED300__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED300__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED301__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED301__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED302__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED302__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED303__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED303__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED304__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED304__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED305__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED305__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED306__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED306__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED307__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED307__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED308__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED308__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED309__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED309__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED310__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED310__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED311__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED311__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED312__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED312__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED313__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED313__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED314__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED314__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED315__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED315__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED316__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED316__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED317__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED317__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED318__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED318__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED319__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED319__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED320__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED320__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED321__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED321__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED322__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED322__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED323__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED323__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED324__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED324__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED325__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED325__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED326__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED326__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED327__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED327__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED328__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED328__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED329__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED329__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED330__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED330__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED331__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED331__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED332__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED332__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED333__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED333__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED334__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED334__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED335__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED335__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED336__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED336__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED337__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED337__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED338__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED338__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED339__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED339__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED340__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED340__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED341__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED341__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED342__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED342__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED343__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED343__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED344__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED344__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED345__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED345__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED346__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED346__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED347__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED347__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED348__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED348__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED349__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED349__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED350__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED350__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED351__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED351__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED352__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED352__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED353__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED353__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED354__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED354__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED355__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED355__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED356__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED356__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED357__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED357__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED358__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED358__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED359__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED359__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED360__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED360__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED361__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED361__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED362__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED362__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED363__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED363__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED364__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED364__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED365__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED365__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED366__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED366__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED367__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED367__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED368__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED368__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED369__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED369__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED370__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED370__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED371__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED371__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED372__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED372__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED373__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED373__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED374__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED374__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED375__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED375__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED376__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED376__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED377__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED377__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED378__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED378__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DCRX_PHY_MACRO_CNTL_RESERVED379__DCRX_PHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DCRX_PHY_MACRO_CNTL_RESERVED379__DCRX_PHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED0__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED0__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED1__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED1__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED2__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED2__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED3__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED3__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED4__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED4__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED5__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED5__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED6__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED6__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED7__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED7__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED8__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED8__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED9__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED9__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED10__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED10__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED11__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED11__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED12__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED12__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED13__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED13__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED14__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED14__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED15__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED15__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED16__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED16__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED17__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED17__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED18__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED18__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED19__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED19__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED20__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED20__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED21__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED21__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED22__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED22__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED23__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED23__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED24__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED24__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED25__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED25__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED26__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED26__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED27__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED27__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED28__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED28__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED29__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED29__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED30__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED30__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED31__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED31__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED32__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED32__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED33__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED33__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED34__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED34__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED35__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED35__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED36__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED36__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED37__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED37__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED38__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED38__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED39__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED39__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED40__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED40__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED41__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED41__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED42__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED42__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED43__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED43__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED44__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED44__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED45__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED45__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED46__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED46__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED47__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED47__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED48__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED48__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED49__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED49__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED50__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED50__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED51__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED51__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED52__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED52__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED53__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED53__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED54__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED54__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED55__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED55__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED56__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED56__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED57__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED57__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED58__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED58__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED59__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED59__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED60__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED60__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED61__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED61__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED62__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED62__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define DPHY_MACRO_CNTL_RESERVED63__DPHY_MACRO_CNTL_RESERVED_MASK 0xffffffff
+#define DPHY_MACRO_CNTL_RESERVED63__DPHY_MACRO_CNTL_RESERVED__SHIFT 0x0
+#define GRPH_ENABLE__GRPH_ENABLE_MASK 0x1
+#define GRPH_ENABLE__GRPH_ENABLE__SHIFT 0x0
+#define GRPH_ENABLE__GRPH_KEYER_ALPHA_SEL_MASK 0x2
+#define GRPH_ENABLE__GRPH_KEYER_ALPHA_SEL__SHIFT 0x1
+#define GRPH_CONTROL__GRPH_DEPTH_MASK 0x3
+#define GRPH_CONTROL__GRPH_DEPTH__SHIFT 0x0
+#define GRPH_CONTROL__GRPH_NUM_BANKS_MASK 0xc
+#define GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT 0x2
+#define GRPH_CONTROL__GRPH_Z_MASK 0x30
+#define GRPH_CONTROL__GRPH_Z__SHIFT 0x4
+#define GRPH_CONTROL__GRPH_BANK_WIDTH_MASK 0xc0
+#define GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT 0x6
+#define GRPH_CONTROL__GRPH_FORMAT_MASK 0x700
+#define GRPH_CONTROL__GRPH_FORMAT__SHIFT 0x8
+#define GRPH_CONTROL__GRPH_BANK_HEIGHT_MASK 0x1800
+#define GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT 0xb
+#define GRPH_CONTROL__GRPH_TILE_SPLIT_MASK 0xe000
+#define GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT 0xd
+#define GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE_MASK 0x10000
+#define GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x10
+#define GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE_MASK 0x20000
+#define GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x11
+#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_MASK 0xc0000
+#define GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT 0x12
+#define GRPH_CONTROL__GRPH_ARRAY_MODE_MASK 0xf00000
+#define GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT 0x14
+#define GRPH_CONTROL__GRPH_PIPE_CONFIG_MASK 0x1f000000
+#define GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT 0x18
+#define GRPH_CONTROL__GRPH_MICRO_TILE_MODE_MASK 0x60000000
+#define GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT 0x1d
+#define GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE_MASK 0x80000000
+#define GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE__SHIFT 0x1f
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN_MASK 0x100
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_EN__SHIFT 0x8
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN_MASK 0x10000
+#define GRPH_LUT_10BIT_BYPASS__GRPH_LUT_10BIT_BYPASS_DBL_BUF_EN__SHIFT 0x10
+#define GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP_MASK 0x3
+#define GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT 0x0
+#define GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR_MASK 0x30
+#define GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT 0x4
+#define GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR_MASK 0xc0
+#define GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR__SHIFT 0x6
+#define GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR_MASK 0x300
+#define GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT 0x8
+#define GRPH_SWAP_CNTL__GRPH_ALPHA_CROSSBAR_MASK 0xc00
+#define GRPH_SWAP_CNTL__GRPH_ALPHA_CROSSBAR__SHIFT 0xa
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_DFQ_ENABLE_MASK 0x1
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_DFQ_ENABLE__SHIFT 0x0
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK 0xffffff00
+#define GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS__SHIFT 0x8
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_DFQ_ENABLE_MASK 0x1
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_DFQ_ENABLE__SHIFT 0x0
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK 0xffffff00
+#define GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS__SHIFT 0x8
+#define GRPH_PITCH__GRPH_PITCH_MASK 0x7fff
+#define GRPH_PITCH__GRPH_PITCH__SHIFT 0x0
+#define GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define GRPH_SURFACE_OFFSET_X__GRPH_SURFACE_OFFSET_X_MASK 0x3fff
+#define GRPH_SURFACE_OFFSET_X__GRPH_SURFACE_OFFSET_X__SHIFT 0x0
+#define GRPH_SURFACE_OFFSET_Y__GRPH_SURFACE_OFFSET_Y_MASK 0x3fff
+#define GRPH_SURFACE_OFFSET_Y__GRPH_SURFACE_OFFSET_Y__SHIFT 0x0
+#define GRPH_X_START__GRPH_X_START_MASK 0x3fff
+#define GRPH_X_START__GRPH_X_START__SHIFT 0x0
+#define GRPH_Y_START__GRPH_Y_START_MASK 0x3fff
+#define GRPH_Y_START__GRPH_Y_START__SHIFT 0x0
+#define GRPH_X_END__GRPH_X_END_MASK 0x7fff
+#define GRPH_X_END__GRPH_X_END__SHIFT 0x0
+#define GRPH_Y_END__GRPH_Y_END_MASK 0x7fff
+#define GRPH_Y_END__GRPH_Y_END__SHIFT 0x0
+#define INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE_MASK 0x1
+#define INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT 0x0
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING_MASK 0x1
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING__SHIFT 0x0
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN_MASK 0x2
+#define GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN__SHIFT 0x1
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK 0x4
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING__SHIFT 0x2
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN_MASK 0x8
+#define GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN__SHIFT 0x3
+#define GRPH_UPDATE__GRPH_SURFACE_XDMA_PENDING_ENABLE_MASK 0x100
+#define GRPH_UPDATE__GRPH_SURFACE_XDMA_PENDING_ENABLE__SHIFT 0x8
+#define GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK 0x10000
+#define GRPH_UPDATE__GRPH_UPDATE_LOCK__SHIFT 0x10
+#define GRPH_UPDATE__GRPH_SURFACE_IGNORE_UPDATE_LOCK_MASK 0x100000
+#define GRPH_UPDATE__GRPH_SURFACE_IGNORE_UPDATE_LOCK__SHIFT 0x14
+#define GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE_MASK 0x1000000
+#define GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x18
+#define GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_MASK 0x10000000
+#define GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x1c
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK 0x1
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN__SHIFT 0x0
+#define GRPH_FLIP_CONTROL__GRPH_XDMA_SUPER_AA_EN_MASK 0x2
+#define GRPH_FLIP_CONTROL__GRPH_XDMA_SUPER_AA_EN__SHIFT 0x1
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_IMMEDIATE_EN_MASK 0x10
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_IMMEDIATE_EN__SHIFT 0x4
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_PENDING_MODE_MASK 0x20
+#define GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_PENDING_MODE__SHIFT 0x5
+#define GRPH_SURFACE_ADDRESS_INUSE__GRPH_SURFACE_ADDRESS_INUSE_MASK 0xffffff00
+#define GRPH_SURFACE_ADDRESS_INUSE__GRPH_SURFACE_ADDRESS_INUSE__SHIFT 0x8
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_RESET_MASK 0x1
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_RESET__SHIFT 0x0
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_SIZE_MASK 0x70
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_SIZE__SHIFT 0x4
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_MIN_FREE_ENTRIES_MASK 0x700
+#define GRPH_DFQ_CONTROL__GRPH_DFQ_MIN_FREE_ENTRIES__SHIFT 0x8
+#define GRPH_DFQ_STATUS__GRPH_PRIMARY_DFQ_NUM_ENTRIES_MASK 0xf
+#define GRPH_DFQ_STATUS__GRPH_PRIMARY_DFQ_NUM_ENTRIES__SHIFT 0x0
+#define GRPH_DFQ_STATUS__GRPH_SECONDARY_DFQ_NUM_ENTRIES_MASK 0xf0
+#define GRPH_DFQ_STATUS__GRPH_SECONDARY_DFQ_NUM_ENTRIES__SHIFT 0x4
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_FLAG_MASK 0x100
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_FLAG__SHIFT 0x8
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_ACK_MASK 0x200
+#define GRPH_DFQ_STATUS__GRPH_DFQ_RESET_ACK__SHIFT 0x9
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED__SHIFT 0x0
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+#define GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR__SHIFT 0x8
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK__SHIFT 0x0
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE_MASK 0x100
+#define GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE__SHIFT 0x8
+#define GRPH_SURFACE_ADDRESS_HIGH_INUSE__GRPH_SURFACE_ADDRESS_HIGH_INUSE_MASK 0xff
+#define GRPH_SURFACE_ADDRESS_HIGH_INUSE__GRPH_SURFACE_ADDRESS_HIGH_INUSE__SHIFT 0x0
+#define GRPH_COMPRESS_SURFACE_ADDRESS__GRPH_COMPRESS_SURFACE_ADDRESS_MASK 0xffffff00
+#define GRPH_COMPRESS_SURFACE_ADDRESS__GRPH_COMPRESS_SURFACE_ADDRESS__SHIFT 0x8
+#define GRPH_COMPRESS_PITCH__GRPH_COMPRESS_PITCH_MASK 0x1ffc0
+#define GRPH_COMPRESS_PITCH__GRPH_COMPRESS_PITCH__SHIFT 0x6
+#define GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__GRPH_COMPRESS_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__GRPH_COMPRESS_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT__GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT_MASK 0xff
+#define GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT__GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT__SHIFT 0x0
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_SELECT_MASK 0x1
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_SELECT__SHIFT 0x0
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_R_SIGN_MASK 0x2
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_R_SIGN__SHIFT 0x1
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_G_SIGN_MASK 0x4
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_G_SIGN__SHIFT 0x2
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_B_SIGN_MASK 0x8
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_B_SIGN__SHIFT 0x3
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK 0x10
+#define PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS__SHIFT 0x4
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_BIAS_R_MASK 0xffff
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_BIAS_R__SHIFT 0x0
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_SCALE_R_MASK 0xffff0000
+#define PRESCALE_VALUES_GRPH_R__GRPH_PRESCALE_SCALE_R__SHIFT 0x10
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_BIAS_G_MASK 0xffff
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_BIAS_G__SHIFT 0x0
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_SCALE_G_MASK 0xffff0000
+#define PRESCALE_VALUES_GRPH_G__GRPH_PRESCALE_SCALE_G__SHIFT 0x10
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_BIAS_B_MASK 0xffff
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_BIAS_B__SHIFT 0x0
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_SCALE_B_MASK 0xffff0000
+#define PRESCALE_VALUES_GRPH_B__GRPH_PRESCALE_SCALE_B__SHIFT 0x10
+#define INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE_MASK 0x3
+#define INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT 0x0
+#define INPUT_CSC_C11_C12__INPUT_CSC_C11_MASK 0xffff
+#define INPUT_CSC_C11_C12__INPUT_CSC_C11__SHIFT 0x0
+#define INPUT_CSC_C11_C12__INPUT_CSC_C12_MASK 0xffff0000
+#define INPUT_CSC_C11_C12__INPUT_CSC_C12__SHIFT 0x10
+#define INPUT_CSC_C13_C14__INPUT_CSC_C13_MASK 0xffff
+#define INPUT_CSC_C13_C14__INPUT_CSC_C13__SHIFT 0x0
+#define INPUT_CSC_C13_C14__INPUT_CSC_C14_MASK 0xffff0000
+#define INPUT_CSC_C13_C14__INPUT_CSC_C14__SHIFT 0x10
+#define INPUT_CSC_C21_C22__INPUT_CSC_C21_MASK 0xffff
+#define INPUT_CSC_C21_C22__INPUT_CSC_C21__SHIFT 0x0
+#define INPUT_CSC_C21_C22__INPUT_CSC_C22_MASK 0xffff0000
+#define INPUT_CSC_C21_C22__INPUT_CSC_C22__SHIFT 0x10
+#define INPUT_CSC_C23_C24__INPUT_CSC_C23_MASK 0xffff
+#define INPUT_CSC_C23_C24__INPUT_CSC_C23__SHIFT 0x0
+#define INPUT_CSC_C23_C24__INPUT_CSC_C24_MASK 0xffff0000
+#define INPUT_CSC_C23_C24__INPUT_CSC_C24__SHIFT 0x10
+#define INPUT_CSC_C31_C32__INPUT_CSC_C31_MASK 0xffff
+#define INPUT_CSC_C31_C32__INPUT_CSC_C31__SHIFT 0x0
+#define INPUT_CSC_C31_C32__INPUT_CSC_C32_MASK 0xffff0000
+#define INPUT_CSC_C31_C32__INPUT_CSC_C32__SHIFT 0x10
+#define INPUT_CSC_C33_C34__INPUT_CSC_C33_MASK 0xffff
+#define INPUT_CSC_C33_C34__INPUT_CSC_C33__SHIFT 0x0
+#define INPUT_CSC_C33_C34__INPUT_CSC_C34_MASK 0xffff0000
+#define INPUT_CSC_C33_C34__INPUT_CSC_C34__SHIFT 0x10
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE_MASK 0x7
+#define OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C11_MASK 0xffff
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C11__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C12_MASK 0xffff0000
+#define OUTPUT_CSC_C11_C12__OUTPUT_CSC_C12__SHIFT 0x10
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C13_MASK 0xffff
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C13__SHIFT 0x0
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C14_MASK 0xffff0000
+#define OUTPUT_CSC_C13_C14__OUTPUT_CSC_C14__SHIFT 0x10
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C21_MASK 0xffff
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C21__SHIFT 0x0
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C22_MASK 0xffff0000
+#define OUTPUT_CSC_C21_C22__OUTPUT_CSC_C22__SHIFT 0x10
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C23_MASK 0xffff
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C23__SHIFT 0x0
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C24_MASK 0xffff0000
+#define OUTPUT_CSC_C23_C24__OUTPUT_CSC_C24__SHIFT 0x10
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C31_MASK 0xffff
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C31__SHIFT 0x0
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C32_MASK 0xffff0000
+#define OUTPUT_CSC_C31_C32__OUTPUT_CSC_C32__SHIFT 0x10
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C33_MASK 0xffff
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C33__SHIFT 0x0
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C34_MASK 0xffff0000
+#define OUTPUT_CSC_C33_C34__OUTPUT_CSC_C34__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C11_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C11__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C12_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C11_C12__COMM_MATRIXA_TRANS_C12__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C13_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C13__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C14_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C13_C14__COMM_MATRIXA_TRANS_C14__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C21_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C21__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C22_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C21_C22__COMM_MATRIXA_TRANS_C22__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C23_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C23__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C24_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C23_C24__COMM_MATRIXA_TRANS_C24__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C31_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C31__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C32_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C31_C32__COMM_MATRIXA_TRANS_C32__SHIFT 0x10
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C33_MASK 0xffff
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C33__SHIFT 0x0
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C34_MASK 0xffff0000
+#define COMM_MATRIXA_TRANS_C33_C34__COMM_MATRIXA_TRANS_C34__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C11_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C11__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C12_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C11_C12__COMM_MATRIXB_TRANS_C12__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C13_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C13__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C14_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C13_C14__COMM_MATRIXB_TRANS_C14__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C21_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C21__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C22_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C21_C22__COMM_MATRIXB_TRANS_C22__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C23_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C23__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C24_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C23_C24__COMM_MATRIXB_TRANS_C24__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C31_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C31__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C32_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C31_C32__COMM_MATRIXB_TRANS_C32__SHIFT 0x10
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C33_MASK 0xffff
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C33__SHIFT 0x0
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C34_MASK 0xffff0000
+#define COMM_MATRIXB_TRANS_C33_C34__COMM_MATRIXB_TRANS_C34__SHIFT 0x10
+#define DENORM_CONTROL__DENORM_MODE_MASK 0x7
+#define DENORM_CONTROL__DENORM_MODE__SHIFT 0x0
+#define DENORM_CONTROL__DENORM_14BIT_OUT_MASK 0x10
+#define DENORM_CONTROL__DENORM_14BIT_OUT__SHIFT 0x4
+#define OUT_ROUND_CONTROL__OUT_ROUND_TRUNC_MODE_MASK 0xf
+#define OUT_ROUND_CONTROL__OUT_ROUND_TRUNC_MODE__SHIFT 0x0
+#define OUT_CLAMP_CONTROL_R_CR__OUT_CLAMP_MAX_R_CR_MASK 0x3fff
+#define OUT_CLAMP_CONTROL_R_CR__OUT_CLAMP_MAX_R_CR__SHIFT 0x0
+#define OUT_CLAMP_CONTROL_R_CR__OUT_CLAMP_MIN_R_CR_MASK 0x3fff0000
+#define OUT_CLAMP_CONTROL_R_CR__OUT_CLAMP_MIN_R_CR__SHIFT 0x10
+#define OUT_CLAMP_CONTROL_G_Y__OUT_CLAMP_MAX_G_Y_MASK 0x3fff
+#define OUT_CLAMP_CONTROL_G_Y__OUT_CLAMP_MAX_G_Y__SHIFT 0x0
+#define OUT_CLAMP_CONTROL_G_Y__OUT_CLAMP_MIN_G_Y_MASK 0x3fff0000
+#define OUT_CLAMP_CONTROL_G_Y__OUT_CLAMP_MIN_G_Y__SHIFT 0x10
+#define OUT_CLAMP_CONTROL_B_CB__OUT_CLAMP_MAX_B_CB_MASK 0x3fff
+#define OUT_CLAMP_CONTROL_B_CB__OUT_CLAMP_MAX_B_CB__SHIFT 0x0
+#define OUT_CLAMP_CONTROL_B_CB__OUT_CLAMP_MIN_B_CB_MASK 0x3fff0000
+#define OUT_CLAMP_CONTROL_B_CB__OUT_CLAMP_MIN_B_CB__SHIFT 0x10
+#define KEY_CONTROL__KEY_MODE_MASK 0x6
+#define KEY_CONTROL__KEY_MODE__SHIFT 0x1
+#define KEY_RANGE_ALPHA__KEY_ALPHA_LOW_MASK 0xffff
+#define KEY_RANGE_ALPHA__KEY_ALPHA_LOW__SHIFT 0x0
+#define KEY_RANGE_ALPHA__KEY_ALPHA_HIGH_MASK 0xffff0000
+#define KEY_RANGE_ALPHA__KEY_ALPHA_HIGH__SHIFT 0x10
+#define KEY_RANGE_RED__KEY_RED_LOW_MASK 0xffff
+#define KEY_RANGE_RED__KEY_RED_LOW__SHIFT 0x0
+#define KEY_RANGE_RED__KEY_RED_HIGH_MASK 0xffff0000
+#define KEY_RANGE_RED__KEY_RED_HIGH__SHIFT 0x10
+#define KEY_RANGE_GREEN__KEY_GREEN_LOW_MASK 0xffff
+#define KEY_RANGE_GREEN__KEY_GREEN_LOW__SHIFT 0x0
+#define KEY_RANGE_GREEN__KEY_GREEN_HIGH_MASK 0xffff0000
+#define KEY_RANGE_GREEN__KEY_GREEN_HIGH__SHIFT 0x10
+#define KEY_RANGE_BLUE__KEY_BLUE_LOW_MASK 0xffff
+#define KEY_RANGE_BLUE__KEY_BLUE_LOW__SHIFT 0x0
+#define KEY_RANGE_BLUE__KEY_BLUE_HIGH_MASK 0xffff0000
+#define KEY_RANGE_BLUE__KEY_BLUE_HIGH__SHIFT 0x10
+#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE_MASK 0x3
+#define DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT 0x0
+#define DEGAMMA_CONTROL__CURSOR2_DEGAMMA_MODE_MASK 0x300
+#define DEGAMMA_CONTROL__CURSOR2_DEGAMMA_MODE__SHIFT 0x8
+#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE_MASK 0x3000
+#define DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT 0xc
+#define GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE_MASK 0x3
+#define GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT 0x0
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C11_MASK 0xffff
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C11__SHIFT 0x0
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C12_MASK 0xffff0000
+#define GAMUT_REMAP_C11_C12__GAMUT_REMAP_C12__SHIFT 0x10
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C13_MASK 0xffff
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C13__SHIFT 0x0
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C14_MASK 0xffff0000
+#define GAMUT_REMAP_C13_C14__GAMUT_REMAP_C14__SHIFT 0x10
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C21_MASK 0xffff
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C21__SHIFT 0x0
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C22_MASK 0xffff0000
+#define GAMUT_REMAP_C21_C22__GAMUT_REMAP_C22__SHIFT 0x10
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C23_MASK 0xffff
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C23__SHIFT 0x0
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C24_MASK 0xffff0000
+#define GAMUT_REMAP_C23_C24__GAMUT_REMAP_C24__SHIFT 0x10
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C31_MASK 0xffff
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C31__SHIFT 0x0
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C32_MASK 0xffff0000
+#define GAMUT_REMAP_C31_C32__GAMUT_REMAP_C32__SHIFT 0x10
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C33_MASK 0xffff
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C33__SHIFT 0x0
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C34_MASK 0xffff0000
+#define GAMUT_REMAP_C33_C34__GAMUT_REMAP_C34__SHIFT 0x10
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_EN_MASK 0x1
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_EN__SHIFT 0x0
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_MODE_MASK 0x30
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_MODE__SHIFT 0x4
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_DEPTH_MASK 0xc0
+#define DCP_SPATIAL_DITHER_CNTL__DCP_SPATIAL_DITHER_DEPTH__SHIFT 0x6
+#define DCP_SPATIAL_DITHER_CNTL__DCP_FRAME_RANDOM_ENABLE_MASK 0x100
+#define DCP_SPATIAL_DITHER_CNTL__DCP_FRAME_RANDOM_ENABLE__SHIFT 0x8
+#define DCP_SPATIAL_DITHER_CNTL__DCP_RGB_RANDOM_ENABLE_MASK 0x200
+#define DCP_SPATIAL_DITHER_CNTL__DCP_RGB_RANDOM_ENABLE__SHIFT 0x9
+#define DCP_SPATIAL_DITHER_CNTL__DCP_HIGHPASS_RANDOM_ENABLE_MASK 0x400
+#define DCP_SPATIAL_DITHER_CNTL__DCP_HIGHPASS_RANDOM_ENABLE__SHIFT 0xa
+#define DCP_RANDOM_SEEDS__DCP_RAND_R_SEED_MASK 0xff
+#define DCP_RANDOM_SEEDS__DCP_RAND_R_SEED__SHIFT 0x0
+#define DCP_RANDOM_SEEDS__DCP_RAND_G_SEED_MASK 0xff00
+#define DCP_RANDOM_SEEDS__DCP_RAND_G_SEED__SHIFT 0x8
+#define DCP_RANDOM_SEEDS__DCP_RAND_B_SEED_MASK 0xff0000
+#define DCP_RANDOM_SEEDS__DCP_RAND_B_SEED__SHIFT 0x10
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_DATA_MASK 0x3ffff
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_DATA__SHIFT 0x0
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_INDEX_MASK 0x7f00000
+#define DCP_FP_CONVERTED_FIELD__DCP_FP_CONVERTED_FIELD_INDEX__SHIFT 0x14
+#define CUR_CONTROL__CURSOR_EN_MASK 0x1
+#define CUR_CONTROL__CURSOR_EN__SHIFT 0x0
+#define CUR_CONTROL__CUR_INV_TRANS_CLAMP_MASK 0x10
+#define CUR_CONTROL__CUR_INV_TRANS_CLAMP__SHIFT 0x4
+#define CUR_CONTROL__CURSOR_MODE_MASK 0x300
+#define CUR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CUR_CONTROL__CURSOR_BUSY_START_LINE_POSITION_MASK 0xf000
+#define CUR_CONTROL__CURSOR_BUSY_START_LINE_POSITION__SHIFT 0xc
+#define CUR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x10000
+#define CUR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x10
+#define CUR_CONTROL__CURSOR_FORCE_MC_ON_MASK 0x100000
+#define CUR_CONTROL__CURSOR_FORCE_MC_ON__SHIFT 0x14
+#define CUR_CONTROL__CURSOR_URGENT_CONTROL_MASK 0x7000000
+#define CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT 0x18
+#define CUR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xffffffff
+#define CUR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CUR_SIZE__CURSOR_HEIGHT_MASK 0x7f
+#define CUR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CUR_SIZE__CURSOR_WIDTH_MASK 0x7f0000
+#define CUR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CUR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define CUR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CUR_POSITION__CURSOR_Y_POSITION_MASK 0x3fff
+#define CUR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CUR_POSITION__CURSOR_X_POSITION_MASK 0x3fff0000
+#define CUR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x7f
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x7f0000
+#define CUR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CUR_COLOR1__CUR_COLOR1_BLUE_MASK 0xff
+#define CUR_COLOR1__CUR_COLOR1_BLUE__SHIFT 0x0
+#define CUR_COLOR1__CUR_COLOR1_GREEN_MASK 0xff00
+#define CUR_COLOR1__CUR_COLOR1_GREEN__SHIFT 0x8
+#define CUR_COLOR1__CUR_COLOR1_RED_MASK 0xff0000
+#define CUR_COLOR1__CUR_COLOR1_RED__SHIFT 0x10
+#define CUR_COLOR2__CUR_COLOR2_BLUE_MASK 0xff
+#define CUR_COLOR2__CUR_COLOR2_BLUE__SHIFT 0x0
+#define CUR_COLOR2__CUR_COLOR2_GREEN_MASK 0xff00
+#define CUR_COLOR2__CUR_COLOR2_GREEN__SHIFT 0x8
+#define CUR_COLOR2__CUR_COLOR2_RED_MASK 0xff0000
+#define CUR_COLOR2__CUR_COLOR2_RED__SHIFT 0x10
+#define CUR_UPDATE__CURSOR_UPDATE_PENDING_MASK 0x1
+#define CUR_UPDATE__CURSOR_UPDATE_PENDING__SHIFT 0x0
+#define CUR_UPDATE__CURSOR_UPDATE_TAKEN_MASK 0x2
+#define CUR_UPDATE__CURSOR_UPDATE_TAKEN__SHIFT 0x1
+#define CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK 0x10000
+#define CUR_UPDATE__CURSOR_UPDATE_LOCK__SHIFT 0x10
+#define CUR_UPDATE__CURSOR_DISABLE_MULTIPLE_UPDATE_MASK 0x1000000
+#define CUR_UPDATE__CURSOR_DISABLE_MULTIPLE_UPDATE__SHIFT 0x18
+#define CUR_UPDATE__CURSOR_UPDATE_STEREO_MODE_MASK 0x6000000
+#define CUR_UPDATE__CURSOR_UPDATE_STEREO_MODE__SHIFT 0x19
+#define CUR_REQUEST_FILTER_CNTL__CUR_REQUEST_FILTER_DIS_MASK 0x1
+#define CUR_REQUEST_FILTER_CNTL__CUR_REQUEST_FILTER_DIS__SHIFT 0x0
+#define CUR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x1
+#define CUR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CUR_STEREO_CONTROL__CURSOR_STEREO_OFFSET_YNX_MASK 0x2
+#define CUR_STEREO_CONTROL__CURSOR_STEREO_OFFSET_YNX__SHIFT 0x1
+#define CUR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x3ff0
+#define CUR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CUR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0x3ff0000
+#define CUR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x10
+#define DC_LUT_RW_MODE__DC_LUT_RW_MODE_MASK 0x1
+#define DC_LUT_RW_MODE__DC_LUT_RW_MODE__SHIFT 0x0
+#define DC_LUT_RW_MODE__DC_LUT_ERROR_MASK 0x10000
+#define DC_LUT_RW_MODE__DC_LUT_ERROR__SHIFT 0x10
+#define DC_LUT_RW_MODE__DC_LUT_ERROR_RST_MASK 0x20000
+#define DC_LUT_RW_MODE__DC_LUT_ERROR_RST__SHIFT 0x11
+#define DC_LUT_RW_INDEX__DC_LUT_RW_INDEX_MASK 0xff
+#define DC_LUT_RW_INDEX__DC_LUT_RW_INDEX__SHIFT 0x0
+#define DC_LUT_SEQ_COLOR__DC_LUT_SEQ_COLOR_MASK 0xffff
+#define DC_LUT_SEQ_COLOR__DC_LUT_SEQ_COLOR__SHIFT 0x0
+#define DC_LUT_PWL_DATA__DC_LUT_BASE_MASK 0xffff
+#define DC_LUT_PWL_DATA__DC_LUT_BASE__SHIFT 0x0
+#define DC_LUT_PWL_DATA__DC_LUT_DELTA_MASK 0xffff0000
+#define DC_LUT_PWL_DATA__DC_LUT_DELTA__SHIFT 0x10
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_BLUE_MASK 0x3ff
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_BLUE__SHIFT 0x0
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_GREEN_MASK 0xffc00
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_GREEN__SHIFT 0xa
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_RED_MASK 0x3ff00000
+#define DC_LUT_30_COLOR__DC_LUT_COLOR_10_RED__SHIFT 0x14
+#define DC_LUT_VGA_ACCESS_ENABLE__DC_LUT_VGA_ACCESS_ENABLE_MASK 0x1
+#define DC_LUT_VGA_ACCESS_ENABLE__DC_LUT_VGA_ACCESS_ENABLE__SHIFT 0x0
+#define DC_LUT_WRITE_EN_MASK__DC_LUT_WRITE_EN_MASK_MASK 0x7
+#define DC_LUT_WRITE_EN_MASK__DC_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_MASK 0x1
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL__SHIFT 0x0
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_DONE_MASK 0x2
+#define DC_LUT_AUTOFILL__DC_LUT_AUTOFILL_DONE__SHIFT 0x1
+#define DC_LUT_CONTROL__DC_LUT_INC_B_MASK 0xf
+#define DC_LUT_CONTROL__DC_LUT_INC_B__SHIFT 0x0
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_SIGNED_EN_MASK 0x10
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_SIGNED_EN__SHIFT 0x4
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FLOAT_POINT_EN_MASK 0x20
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FLOAT_POINT_EN__SHIFT 0x5
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FORMAT_MASK 0xc0
+#define DC_LUT_CONTROL__DC_LUT_DATA_B_FORMAT__SHIFT 0x6
+#define DC_LUT_CONTROL__DC_LUT_INC_G_MASK 0xf00
+#define DC_LUT_CONTROL__DC_LUT_INC_G__SHIFT 0x8
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_SIGNED_EN_MASK 0x1000
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_SIGNED_EN__SHIFT 0xc
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FLOAT_POINT_EN_MASK 0x2000
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FLOAT_POINT_EN__SHIFT 0xd
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FORMAT_MASK 0xc000
+#define DC_LUT_CONTROL__DC_LUT_DATA_G_FORMAT__SHIFT 0xe
+#define DC_LUT_CONTROL__DC_LUT_INC_R_MASK 0xf0000
+#define DC_LUT_CONTROL__DC_LUT_INC_R__SHIFT 0x10
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_SIGNED_EN_MASK 0x100000
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_SIGNED_EN__SHIFT 0x14
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FLOAT_POINT_EN_MASK 0x200000
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FLOAT_POINT_EN__SHIFT 0x15
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FORMAT_MASK 0xc00000
+#define DC_LUT_CONTROL__DC_LUT_DATA_R_FORMAT__SHIFT 0x16
+#define DC_LUT_BLACK_OFFSET_BLUE__DC_LUT_BLACK_OFFSET_BLUE_MASK 0xffff
+#define DC_LUT_BLACK_OFFSET_BLUE__DC_LUT_BLACK_OFFSET_BLUE__SHIFT 0x0
+#define DC_LUT_BLACK_OFFSET_GREEN__DC_LUT_BLACK_OFFSET_GREEN_MASK 0xffff
+#define DC_LUT_BLACK_OFFSET_GREEN__DC_LUT_BLACK_OFFSET_GREEN__SHIFT 0x0
+#define DC_LUT_BLACK_OFFSET_RED__DC_LUT_BLACK_OFFSET_RED_MASK 0xffff
+#define DC_LUT_BLACK_OFFSET_RED__DC_LUT_BLACK_OFFSET_RED__SHIFT 0x0
+#define DC_LUT_WHITE_OFFSET_BLUE__DC_LUT_WHITE_OFFSET_BLUE_MASK 0xffff
+#define DC_LUT_WHITE_OFFSET_BLUE__DC_LUT_WHITE_OFFSET_BLUE__SHIFT 0x0
+#define DC_LUT_WHITE_OFFSET_GREEN__DC_LUT_WHITE_OFFSET_GREEN_MASK 0xffff
+#define DC_LUT_WHITE_OFFSET_GREEN__DC_LUT_WHITE_OFFSET_GREEN__SHIFT 0x0
+#define DC_LUT_WHITE_OFFSET_RED__DC_LUT_WHITE_OFFSET_RED_MASK 0xffff
+#define DC_LUT_WHITE_OFFSET_RED__DC_LUT_WHITE_OFFSET_RED__SHIFT 0x0
+#define DCP_CRC_CONTROL__DCP_CRC_ENABLE_MASK 0x1
+#define DCP_CRC_CONTROL__DCP_CRC_ENABLE__SHIFT 0x0
+#define DCP_CRC_CONTROL__DCP_CRC_SOURCE_SEL_MASK 0x1c
+#define DCP_CRC_CONTROL__DCP_CRC_SOURCE_SEL__SHIFT 0x2
+#define DCP_CRC_CONTROL__DCP_CRC_LINE_SEL_MASK 0x300
+#define DCP_CRC_CONTROL__DCP_CRC_LINE_SEL__SHIFT 0x8
+#define DCP_CRC_MASK__DCP_CRC_MASK_MASK 0xffffffff
+#define DCP_CRC_MASK__DCP_CRC_MASK__SHIFT 0x0
+#define DCP_CRC_CURRENT__DCP_CRC_CURRENT_MASK 0xffffffff
+#define DCP_CRC_CURRENT__DCP_CRC_CURRENT__SHIFT 0x0
+#define DVMM_PTE_CONTROL__DVMM_USE_SINGLE_PTE_MASK 0x1
+#define DVMM_PTE_CONTROL__DVMM_USE_SINGLE_PTE__SHIFT 0x0
+#define DVMM_PTE_CONTROL__DVMM_PAGE_WIDTH_MASK 0x1e
+#define DVMM_PTE_CONTROL__DVMM_PAGE_WIDTH__SHIFT 0x1
+#define DVMM_PTE_CONTROL__DVMM_PAGE_HEIGHT_MASK 0x1e0
+#define DVMM_PTE_CONTROL__DVMM_PAGE_HEIGHT__SHIFT 0x5
+#define DVMM_PTE_CONTROL__DVMM_MIN_PTE_BEFORE_FLIP_MASK 0x7fe00
+#define DVMM_PTE_CONTROL__DVMM_MIN_PTE_BEFORE_FLIP__SHIFT 0x9
+#define DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE0_MASK 0x100000
+#define DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE0__SHIFT 0x14
+#define DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE1_MASK 0x200000
+#define DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE1__SHIFT 0x15
+#define DCP_CRC_LAST__DCP_CRC_LAST_MASK 0xffffffff
+#define DCP_CRC_LAST__DCP_CRC_LAST__SHIFT 0x0
+#define DCP_DEBUG__DCP_DEBUG_MASK 0xffffffff
+#define DCP_DEBUG__DCP_DEBUG__SHIFT 0x0
+#define GRPH_FLIP_RATE_CNTL__GRPH_FLIP_RATE_MASK 0x7
+#define GRPH_FLIP_RATE_CNTL__GRPH_FLIP_RATE__SHIFT 0x0
+#define GRPH_FLIP_RATE_CNTL__GRPH_FLIP_RATE_ENABLE_MASK 0x8
+#define GRPH_FLIP_RATE_CNTL__GRPH_FLIP_RATE_ENABLE__SHIFT 0x3
+#define DCP_GSL_CONTROL__DCP_GSL0_EN_MASK 0x1
+#define DCP_GSL_CONTROL__DCP_GSL0_EN__SHIFT 0x0
+#define DCP_GSL_CONTROL__DCP_GSL1_EN_MASK 0x2
+#define DCP_GSL_CONTROL__DCP_GSL1_EN__SHIFT 0x1
+#define DCP_GSL_CONTROL__DCP_GSL2_EN_MASK 0x4
+#define DCP_GSL_CONTROL__DCP_GSL2_EN__SHIFT 0x2
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY_MASK 0xf000
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY__SHIFT 0xc
+#define DCP_GSL_CONTROL__DCP_GSL_MASTER_EN_MASK 0x10000
+#define DCP_GSL_CONTROL__DCP_GSL_MASTER_EN__SHIFT 0x10
+#define DCP_GSL_CONTROL__DCP_GSL_XDMA_GROUP_MASK 0x60000
+#define DCP_GSL_CONTROL__DCP_GSL_XDMA_GROUP__SHIFT 0x11
+#define DCP_GSL_CONTROL__DCP_GSL_XDMA_GROUP_UNDERFLOW_EN_MASK 0x80000
+#define DCP_GSL_CONTROL__DCP_GSL_XDMA_GROUP_UNDERFLOW_EN__SHIFT 0x13
+#define DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE_MASK 0x3000000
+#define DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE__SHIFT 0x18
+#define DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING_MASK 0x8000000
+#define DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING__SHIFT 0x1b
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY_MASK 0xf0000000
+#define DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY__SHIFT 0x1c
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_20BPP_MASK 0xf
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_20BPP__SHIFT 0x0
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_30BPP_MASK 0x1f0
+#define DCP_LB_DATA_GAP_BETWEEN_CHUNK__DCP_LB_GAP_BETWEEN_CHUNK_30BPP__SHIFT 0x4
+#define DCP_DEBUG_SG__DCP_DEBUG_SG_MASK 0xffffffff
+#define DCP_DEBUG_SG__DCP_DEBUG_SG__SHIFT 0x0
+#define DCP_DEBUG_SG2__DCP_DEBUG_SG2_MASK 0xffffffff
+#define DCP_DEBUG_SG2__DCP_DEBUG_SG2__SHIFT 0x0
+#define DCP_DVMM_DEBUG__DCP_DVMM_DEBUG_MASK 0xffffffff
+#define DCP_DVMM_DEBUG__DCP_DVMM_DEBUG__SHIFT 0x0
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_INDEX_MASK 0xff
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCP_TEST_DEBUG_INDEX__DCP_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCP_TEST_DEBUG_DATA__DCP_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCP_TEST_DEBUG_DATA__DCP_TEST_DEBUG_DATA__SHIFT 0x0
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN_MASK 0x1
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN__SHIFT 0x0
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE_MASK 0x300
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE__SHIFT 0x8
+#define GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING_MASK 0x10000
+#define GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING__SHIFT 0x10
+#define GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING_MASK 0x20000
+#define GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING__SHIFT 0x11
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE_MASK 0x10000000
+#define GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE__SHIFT 0x1c
+#define DCP_DEBUG2__DCP_DEBUG2_MASK 0xffffffff
+#define DCP_DEBUG2__DCP_DEBUG2__SHIFT 0x0
+#define HW_ROTATION__GRPH_ROTATION_ANGLE_MASK 0x7
+#define HW_ROTATION__GRPH_ROTATION_ANGLE__SHIFT 0x0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN_MASK 0x1
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_CNT_EN__SHIFT 0x0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE_MASK 0x2
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_CNT_MODE__SHIFT 0x1
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_CNT_MASK 0x1fff0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_CNTL__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_CNT__SHIFT 0x4
+#define REGAMMA_CONTROL__GRPH_REGAMMA_MODE_MASK 0x7
+#define REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT 0x0
+#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX_MASK 0x1ff
+#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x0
+#define REGAMMA_LUT_DATA__REGAMMA_LUT_DATA_MASK 0x7ffff
+#define REGAMMA_LUT_DATA__REGAMMA_LUT_DATA__SHIFT 0x0
+#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x7
+#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_MASK 0x3ffff
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START__SHIFT 0x0
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_SEGMENT_MASK 0x7f00000
+#define REGAMMA_CNTLA_START_CNTL__REGAMMA_CNTLA_EXP_REGION_START_SEGMENT__SHIFT 0x14
+#define REGAMMA_CNTLA_SLOPE_CNTL__REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE_MASK 0x3ffff
+#define REGAMMA_CNTLA_SLOPE_CNTL__REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE__SHIFT 0x0
+#define REGAMMA_CNTLA_END_CNTL1__REGAMMA_CNTLA_EXP_REGION_END_MASK 0xffff
+#define REGAMMA_CNTLA_END_CNTL1__REGAMMA_CNTLA_EXP_REGION_END__SHIFT 0x0
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_SLOPE_MASK 0xffff
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_SLOPE__SHIFT 0x0
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_BASE_MASK 0xffff0000
+#define REGAMMA_CNTLA_END_CNTL2__REGAMMA_CNTLA_EXP_REGION_END_BASE__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_0_1__REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_2_3__REGAMMA_CNTLA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_4_5__REGAMMA_CNTLA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_6_7__REGAMMA_CNTLA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_8_9__REGAMMA_CNTLA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_10_11__REGAMMA_CNTLA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_12_13__REGAMMA_CNTLA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLA_REGION_14_15__REGAMMA_CNTLA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_MASK 0x3ffff
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START__SHIFT 0x0
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_SEGMENT_MASK 0x7f00000
+#define REGAMMA_CNTLB_START_CNTL__REGAMMA_CNTLB_EXP_REGION_START_SEGMENT__SHIFT 0x14
+#define REGAMMA_CNTLB_SLOPE_CNTL__REGAMMA_CNTLB_EXP_REGION_LINEAR_SLOPE_MASK 0x3ffff
+#define REGAMMA_CNTLB_SLOPE_CNTL__REGAMMA_CNTLB_EXP_REGION_LINEAR_SLOPE__SHIFT 0x0
+#define REGAMMA_CNTLB_END_CNTL1__REGAMMA_CNTLB_EXP_REGION_END_MASK 0xffff
+#define REGAMMA_CNTLB_END_CNTL1__REGAMMA_CNTLB_EXP_REGION_END__SHIFT 0x0
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_SLOPE_MASK 0xffff
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_SLOPE__SHIFT 0x0
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_BASE_MASK 0xffff0000
+#define REGAMMA_CNTLB_END_CNTL2__REGAMMA_CNTLB_EXP_REGION_END_BASE__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_0_1__REGAMMA_CNTLB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_2_3__REGAMMA_CNTLB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_4_5__REGAMMA_CNTLB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_6_7__REGAMMA_CNTLB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_8_9__REGAMMA_CNTLB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_10_11__REGAMMA_CNTLB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_12_13__REGAMMA_CNTLB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_LUT_OFFSET_MASK 0x1ff
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_NUM_SEGMENTS_MASK 0x7000
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_LUT_OFFSET_MASK 0x1ff0000
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000
+#define REGAMMA_CNTLB_REGION_14_15__REGAMMA_CNTLB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define ALPHA_CONTROL__ALPHA_ROUND_TRUNC_MODE_MASK 0x1
+#define ALPHA_CONTROL__ALPHA_ROUND_TRUNC_MODE__SHIFT 0x0
+#define ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK 0x2
+#define ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA__SHIFT 0x1
+#define GRPH_XDMA_RECOVERY_SURFACE_ADDRESS__GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_MASK 0xffffff00
+#define GRPH_XDMA_RECOVERY_SURFACE_ADDRESS__GRPH_XDMA_RECOVERY_SURFACE_ADDRESS__SHIFT 0x8
+#define GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH__GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH_MASK 0xff
+#define GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH__GRPH_XDMA_RECOVERY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_CNT_MASK 0xfffff
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_CNT__SHIFT 0x0
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_CNT_STATUS_MASK 0x1000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_CNT_STATUS__SHIFT 0x18
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK_MASK 0x2000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_MASK__SHIFT 0x19
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK_MASK 0x4000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_FRAME_ACK__SHIFT 0x1a
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK 0x10000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT__SHIFT 0x1c
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK_MASK 0x20000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_MASK__SHIFT 0x1d
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK_MASK 0x40000000
+#define GRPH_XDMA_CACHE_UNDERFLOW_DET_STATUS__GRPH_XDMA_CACHE_UNDERFLOW_INT_ACK__SHIFT 0x1e
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_EN_MASK 0x1
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_EN__SHIFT 0x0
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_EVENT_SELECT_MASK 0x1e
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_EVENT_SELECT__SHIFT 0x1
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED_MASK 0x200
+#define GRPH_SURFACE_COUNTER_CONTROL__GRPH_SURFACE_COUNTER_ERR_WRAP_OCCURED__SHIFT 0x9
+#define GRPH_SURFACE_COUNTER_OUTPUT__GRPH_SURFACE_COUNTER_MIN_MASK 0xffff
+#define GRPH_SURFACE_COUNTER_OUTPUT__GRPH_SURFACE_COUNTER_MIN__SHIFT 0x0
+#define GRPH_SURFACE_COUNTER_OUTPUT__GRPH_SURFACE_COUNTER_MAX_MASK 0xffff0000
+#define GRPH_SURFACE_COUNTER_OUTPUT__GRPH_SURFACE_COUNTER_MAX__SHIFT 0x10
+#define DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x7
+#define DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x70
+#define DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x100
+#define DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG_FE_CNTL__DIG_START_MASK 0x400
+#define DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x1000000
+#define DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000
+#define DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xc0000000
+#define DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x1
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x10
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x300
+#define DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3fffffff
+#define DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x3ff
+#define DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x1
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x2
+#define DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x10
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x20
+#define DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x40
+#define DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x3ff0000
+#define DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0xffffff
+#define DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x1000000
+#define DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x1
+#define DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x2
+#define DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0xfc
+#define DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x100
+#define DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0xfc00
+#define DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x1f0000
+#define DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x3c00000
+#define DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x4000000
+#define DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000
+#define DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000
+#define DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG_DISPCLK_SWITCH_CNTL__DIG_DISPCLK_SWITCH_POINT_MASK 0x1
+#define DIG_DISPCLK_SWITCH_CNTL__DIG_DISPCLK_SWITCH_POINT__SHIFT 0x0
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_MASK 0x1
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED__SHIFT 0x0
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK 0x10
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT__SHIFT 0x4
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK_MASK 0x100
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_ACK__SHIFT 0x8
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK_MASK 0x1000
+#define DIG_DISPCLK_SWITCH_STATUS__DIG_DISPCLK_SWITCH_ALLOWED_INT_MASK__SHIFT 0xc
+#define HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x1
+#define HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x4
+#define HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x8
+#define HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x10
+#define HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x100
+#define HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x200
+#define HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x1000000
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000
+#define HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x1
+#define HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x10000
+#define HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x100000
+#define HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define HDMI_STATUS__HDMI_ERROR_INT_MASK 0x8000000
+#define HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x30
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x100
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x1f0000
+#define HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x1
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x2
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x30
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x100
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x1000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x70000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000
+#define HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x1
+#define HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x10
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x20
+#define HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x100
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x200
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x3f0000
+#define HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK 0x1
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND__SHIFT 0x0
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK 0x2
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT__SHIFT 0x1
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x10
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x20
+#define HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x100
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x200
+#define HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK 0x3f
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT 0x0
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x3f00
+#define HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x3f0000
+#define HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x1
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x2
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x10
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x20
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_MASK 0x3f0000
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE__SHIFT 0x10
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_MASK 0x3f000000
+#define HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE__SHIFT 0x18
+#define HDMI_GC__HDMI_GC_AVMUTE_MASK 0x1
+#define HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x4
+#define HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x10
+#define HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define HDMI_GC__HDMI_PACKING_PHASE_MASK 0xf00
+#define HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x1000
+#define HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x1
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x2
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0xff00
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0xff0000
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x1000000
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000
+#define AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x7
+#define AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x40
+#define AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x80
+#define AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0xff
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0xff00
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0xff0000
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xff000000
+#define AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0xff
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0xff00
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0xff0000
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xff000000
+#define AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0xff
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0xff00
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0xff0000
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xff000000
+#define AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0xff
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0xff00
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0xff0000
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xff000000
+#define AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0xff
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0xff00
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0xff0000
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xff000000
+#define AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0xff
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0xff00
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0xff0000
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xff000000
+#define AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0xff
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0xff00
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0xff0000
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xff000000
+#define AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0xff
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0xff00
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0xff0000
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xff000000
+#define AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_CHECKSUM_MASK 0xff
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_S_MASK 0x300
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_S__SHIFT 0x8
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_B_MASK 0xc00
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_B__SHIFT 0xa
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_A_MASK 0x1000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_A__SHIFT 0xc
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Y_MASK 0xe000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Y__SHIFT 0xd
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_R_MASK 0xf0000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_R__SHIFT 0x10
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_M_MASK 0x300000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_M__SHIFT 0x14
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_C_MASK 0xc00000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_C__SHIFT 0x16
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_SC_MASK 0x3000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_SC__SHIFT 0x18
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Q_MASK 0xc000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_Q__SHIFT 0x1a
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_EC_MASK 0x70000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_EC__SHIFT 0x1c
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_ITC_MASK 0x80000000
+#define AFMT_AVI_INFO0__AFMT_AVI_INFO_ITC__SHIFT 0x1f
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_VIC_MASK 0xff
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_VIC__SHIFT 0x0
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PR_MASK 0xf00
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_PR__SHIFT 0x8
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_CN_MASK 0x3000
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_CN__SHIFT 0xc
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_YQ_MASK 0xc000
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_YQ__SHIFT 0xe
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_TOP_MASK 0xffff0000
+#define AFMT_AVI_INFO1__AFMT_AVI_INFO_TOP__SHIFT 0x10
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_BOTTOM_MASK 0xffff
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_BOTTOM__SHIFT 0x0
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_LEFT_MASK 0xffff0000
+#define AFMT_AVI_INFO2__AFMT_AVI_INFO_LEFT__SHIFT 0x10
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_RIGHT_MASK 0xffff
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_RIGHT__SHIFT 0x0
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_VERSION_MASK 0xff000000
+#define AFMT_AVI_INFO3__AFMT_AVI_INFO_VERSION__SHIFT 0x18
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0xff
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0xff00
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0xff0000
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xff000000
+#define AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0xff
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x300
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x1000
+#define AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0xff
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0xff00
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0xff0000
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xff000000
+#define AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0xff
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0xff00
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0xff0000
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xff000000
+#define AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0xff
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0xff00
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0xff0000
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xff000000
+#define AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0xff
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0xff00
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0xff0000
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xff000000
+#define AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0xff
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0xff00
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0xff0000
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xff000000
+#define AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0xff
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0xff00
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0xff0000
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xff000000
+#define AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0xff
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0xff00
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0xff0000
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xff000000
+#define AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0xff
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0xff00
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0xff0000
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xff000000
+#define AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0xff
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0xff00
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0xff0000
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xff000000
+#define AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_SEND_MASK 0x1
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_SEND__SHIFT 0x0
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_CONT_MASK 0x2
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_CONT__SHIFT 0x1
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_SEND_MASK 0x10
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_SEND__SHIFT 0x4
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_CONT_MASK 0x20
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_CONT__SHIFT 0x5
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_LINE_MASK 0x3f0000
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC2_LINE__SHIFT 0x10
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_LINE_MASK 0x3f000000
+#define HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC3_LINE__SHIFT 0x18
+#define HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xfffff000
+#define HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0xfffff
+#define HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xfffff000
+#define HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0xfffff
+#define HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xfffff000
+#define HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0xfffff
+#define HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xfffff000
+#define HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0xfffff
+#define HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0xff
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x700
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x7800
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0xff0000
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1f000000
+#define AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0xff
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x7800
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x8000
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x30000
+#define AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define AFMT_60958_0__AFMT_60958_CS_A_MASK 0x1
+#define AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define AFMT_60958_0__AFMT_60958_CS_B_MASK 0x2
+#define AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define AFMT_60958_0__AFMT_60958_CS_C_MASK 0x4
+#define AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define AFMT_60958_0__AFMT_60958_CS_D_MASK 0x38
+#define AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0xc0
+#define AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0xff00
+#define AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0xf0000
+#define AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0xf00000
+#define AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0xf000000
+#define AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000
+#define AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0xf
+#define AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0xf0
+#define AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x10000
+#define AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x40000
+#define AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0xf00000
+#define AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x1
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x10
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x100
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0xf000
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xffff0000
+#define AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0xffffff
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000
+#define AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0xffffff
+#define AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xff000000
+#define AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0xffffff
+#define AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0xffffff
+#define AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0xf
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0xf0
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0xf00
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0xf000
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0xf0000
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0xf00000
+#define AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x1
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xffffff00
+#define AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x10
+#define AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x100
+#define AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x1000000
+#define AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000
+#define AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x1
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x800
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x1000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x4000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x800000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x1000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x4000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000
+#define AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC0_UPDATE_MASK 0x4
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC0_UPDATE__SHIFT 0x2
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC2_UPDATE_MASK 0x8
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC2_UPDATE__SHIFT 0x3
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xc0000000
+#define AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1e
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x40
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x80
+#define AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x400
+#define AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x7
+#define AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_FS_DIV_SEL_MASK 0x7
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_FS_DIV_SEL__SHIFT 0x0
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_BASE_MASK 0x100
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_BASE__SHIFT 0x8
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_MULTI_MASK 0x7000
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_MULTI__SHIFT 0xc
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_DIV_MASK 0x70000
+#define AFMT_AUDIO_DBG_DTO_CNTL__AFMT_AUDIO_DTO_DBG_DIV__SHIFT 0x10
+#define AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x1
+#define AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x100
+#define AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x1
+#define DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG_BE_CNTL__DIG_SWAP_MASK 0x2
+#define DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x4
+#define DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x7f00
+#define DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG_BE_CNTL__DIG_MODE_MASK 0x70000
+#define DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000
+#define DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x1
+#define DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x100
+#define DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x1
+#define TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x1
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x2
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x4
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x8
+#define TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x3
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x300
+#define TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x3
+#define TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x3ff
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x3ff0000
+#define TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x3ff
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x3ff0000
+#define TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define TMDS_DEBUG__TMDS_DEBUG_EN_MASK 0x1
+#define TMDS_DEBUG__TMDS_DEBUG_EN__SHIFT 0x0
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_MASK 0x100
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC__SHIFT 0x8
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_EN_MASK 0x200
+#define TMDS_DEBUG__TMDS_DEBUG_HSYNC_EN__SHIFT 0x9
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_MASK 0x10000
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC__SHIFT 0x10
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_EN_MASK 0x20000
+#define TMDS_DEBUG__TMDS_DEBUG_VSYNC_EN__SHIFT 0x11
+#define TMDS_DEBUG__TMDS_DEBUG_DE_MASK 0x1000000
+#define TMDS_DEBUG__TMDS_DEBUG_DE__SHIFT 0x18
+#define TMDS_DEBUG__TMDS_DEBUG_DE_EN_MASK 0x2000000
+#define TMDS_DEBUG__TMDS_DEBUG_DE_EN__SHIFT 0x19
+#define TMDS_CTL_BITS__TMDS_CTL0_MASK 0x1
+#define TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define TMDS_CTL_BITS__TMDS_CTL1_MASK 0x100
+#define TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define TMDS_CTL_BITS__TMDS_CTL2_MASK 0x10000
+#define TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define TMDS_CTL_BITS__TMDS_CTL3_MASK 0x1000000
+#define TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x1
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x70
+#define TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x100
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0xf0000
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x1000000
+#define TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0xf
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x70
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x80
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x300
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x400
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x800
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x1000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0xf0000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x700000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x800000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x3000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x4000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x8000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000
+#define TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0xf
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x70
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x80
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x300
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x400
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x800
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x1000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0xf0000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x700000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x800000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x3000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x4000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x8000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000
+#define TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG_VERSION__DIG_TYPE_MASK 0x1
+#define DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x1
+#define DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x2
+#define DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x4
+#define DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x8
+#define DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x100
+#define DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG_TEST_DEBUG_INDEX__DIG_TEST_DEBUG_INDEX_MASK 0xff
+#define DIG_TEST_DEBUG_INDEX__DIG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DIG_TEST_DEBUG_INDEX__DIG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DIG_TEST_DEBUG_INDEX__DIG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DIG_TEST_DEBUG_DATA__DIG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DIG_TEST_DEBUG_DATA__DIG_TEST_DEBUG_DATA__SHIFT 0x0
+#define DIG_FE_TEST_DEBUG_INDEX__DIG_FE_TEST_DEBUG_INDEX_MASK 0xff
+#define DIG_FE_TEST_DEBUG_INDEX__DIG_FE_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DIG_FE_TEST_DEBUG_INDEX__DIG_FE_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DIG_FE_TEST_DEBUG_INDEX__DIG_FE_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DIG_FE_TEST_DEBUG_DATA__DIG_FE_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DIG_FE_TEST_DEBUG_DATA__DIG_FE_TEST_DEBUG_DATA__SHIFT 0x0
+#define DMCU_CTRL__RESET_UC_MASK 0x1
+#define DMCU_CTRL__RESET_UC__SHIFT 0x0
+#define DMCU_CTRL__IGNORE_PWRMGT_MASK 0x2
+#define DMCU_CTRL__IGNORE_PWRMGT__SHIFT 0x1
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC_MASK 0x4
+#define DMCU_CTRL__DISABLE_IRQ_TO_UC__SHIFT 0x2
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC_MASK 0x8
+#define DMCU_CTRL__DISABLE_XIRQ_TO_UC__SHIFT 0x3
+#define DMCU_CTRL__DMCU_ENABLE_MASK 0x10
+#define DMCU_CTRL__DMCU_ENABLE__SHIFT 0x4
+#define DMCU_CTRL__DMCU_DYN_CLK_GATING_EN_MASK 0x100
+#define DMCU_CTRL__DMCU_DYN_CLK_GATING_EN__SHIFT 0x8
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT_MASK 0xffff0000
+#define DMCU_CTRL__UC_REG_RD_TIMEOUT__SHIFT 0x10
+#define DMCU_STATUS__UC_IN_RESET_MASK 0x1
+#define DMCU_STATUS__UC_IN_RESET__SHIFT 0x0
+#define DMCU_STATUS__UC_IN_WAIT_MODE_MASK 0x2
+#define DMCU_STATUS__UC_IN_WAIT_MODE__SHIFT 0x1
+#define DMCU_STATUS__UC_IN_STOP_MODE_MASK 0x4
+#define DMCU_STATUS__UC_IN_STOP_MODE__SHIFT 0x2
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB_MASK 0xff
+#define DMCU_PC_START_ADDR__PC_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB_MASK 0xff00
+#define DMCU_PC_START_ADDR__PC_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB_MASK 0xff
+#define DMCU_FW_START_ADDR__FW_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB_MASK 0xff00
+#define DMCU_FW_START_ADDR__FW_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB_MASK 0xff
+#define DMCU_FW_END_ADDR__FW_END_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB_MASK 0xff00
+#define DMCU_FW_END_ADDR__FW_END_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB_MASK 0xff
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_LSB__SHIFT 0x0
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB_MASK 0xff00
+#define DMCU_FW_ISR_START_ADDR__FW_ISR_START_ADDR_MSB__SHIFT 0x8
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI_MASK 0xffffffff
+#define DMCU_FW_CS_HI__FW_CHECKSUM_HI__SHIFT 0x0
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO_MASK 0xffffffff
+#define DMCU_FW_CS_LO__FW_CHECKSUM_LO__SHIFT 0x0
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC_MASK 0x1
+#define DMCU_RAM_ACCESS_CTRL__ERAM_WR_ADDR_AUTO_INC__SHIFT 0x0
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC_MASK 0x2
+#define DMCU_RAM_ACCESS_CTRL__ERAM_RD_ADDR_AUTO_INC__SHIFT 0x1
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC_MASK 0x4
+#define DMCU_RAM_ACCESS_CTRL__IRAM_WR_ADDR_AUTO_INC__SHIFT 0x2
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC_MASK 0x8
+#define DMCU_RAM_ACCESS_CTRL__IRAM_RD_ADDR_AUTO_INC__SHIFT 0x3
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN_MASK 0x10
+#define DMCU_RAM_ACCESS_CTRL__ERAM_HOST_ACCESS_EN__SHIFT 0x4
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN_MASK 0x20
+#define DMCU_RAM_ACCESS_CTRL__IRAM_HOST_ACCESS_EN__SHIFT 0x5
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR_MASK 0xffff
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_ADDR__SHIFT 0x0
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE_MASK 0xf0000
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BE__SHIFT 0x10
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE_MASK 0x100000
+#define DMCU_ERAM_WR_CTRL__ERAM_WR_BYTE_MODE__SHIFT 0x14
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA_MASK 0xffffffff
+#define DMCU_ERAM_WR_DATA__ERAM_WR_DATA__SHIFT 0x0
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR_MASK 0xffff
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_ADDR__SHIFT 0x0
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE_MASK 0xf0000
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BE__SHIFT 0x10
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE_MASK 0x100000
+#define DMCU_ERAM_RD_CTRL__ERAM_RD_BYTE_MODE__SHIFT 0x14
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA_MASK 0xffffffff
+#define DMCU_ERAM_RD_DATA__ERAM_RD_DATA__SHIFT 0x0
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR_MASK 0x3ff
+#define DMCU_IRAM_WR_CTRL__IRAM_WR_ADDR__SHIFT 0x0
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA_MASK 0xff
+#define DMCU_IRAM_WR_DATA__IRAM_WR_DATA__SHIFT 0x0
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR_MASK 0x3ff
+#define DMCU_IRAM_RD_CTRL__IRAM_RD_ADDR__SHIFT 0x0
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA_MASK 0xff
+#define DMCU_IRAM_RD_DATA__IRAM_RD_DATA__SHIFT 0x0
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC_MASK 0x1
+#define DMCU_EVENT_TRIGGER__GEN_SW_INT_TO_UC__SHIFT 0x0
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE_MASK 0x7f0000
+#define DMCU_EVENT_TRIGGER__UC_INTERNAL_INT_CODE__SHIFT 0x10
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST_MASK 0x800000
+#define DMCU_EVENT_TRIGGER__GEN_UC_INTERNAL_INT_TO_HOST__SHIFT 0x17
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN_MASK 0x1
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_IRQ_N_PIN__SHIFT 0x0
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN_MASK 0x2
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_XIRQ_N_PIN__SHIFT 0x1
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT_MASK 0x4
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_SOFTWARE_INTERRUPT__SHIFT 0x2
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP_MASK 0x8
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_ILLEGAL_OPCODE_TRAP__SHIFT 0x3
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4_MASK 0x10
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_4__SHIFT 0x4
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3_MASK 0x20
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_3__SHIFT 0x5
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2_MASK 0x40
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_2__SHIFT 0x6
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1_MASK 0x80
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OUTPUT_COMPARE_1__SHIFT 0x7
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW_MASK 0x100
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_OVERFLOW__SHIFT 0x8
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT_MASK 0x200
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_REAL_TIME_INTERRUPT__SHIFT 0x9
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5_MASK 0x400
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_4_OUTPUT_COMPARE_5__SHIFT 0xa
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3_MASK 0x800
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_3__SHIFT 0xb
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2_MASK 0x1000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_2__SHIFT 0xc
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1_MASK 0x2000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_TIMER_INPUT_CAPTURE_1__SHIFT 0xd
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE_MASK 0x4000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_INPUT_EDGE__SHIFT 0xe
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW_MASK 0x8000
+#define DMCU_UC_INTERNAL_INT_STATUS__UC_INT_PULSE_ACCUMULATOR_OVERFLOW__SHIFT 0xf
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_STATUS_MASK 0x2000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_STATUS__SHIFT 0xd
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_OCCURRED_MASK 0x4000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_OCCURRED__SHIFT 0xe
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_CLEAR_MASK 0x4000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN1_INT_CLEAR__SHIFT 0xe
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_STATUS_MASK 0x8000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_STATUS__SHIFT 0xf
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_OCCURRED_MASK 0x10000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_OCCURRED__SHIFT 0x10
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_CLEAR_MASK 0x10000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN2_INT_CLEAR__SHIFT 0x10
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_STATUS_MASK 0x20000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_STATUS__SHIFT 0x11
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_OCCURRED_MASK 0x40000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_OCCURRED__SHIFT 0x12
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_CLEAR_MASK 0x40000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN3_INT_CLEAR__SHIFT 0x12
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_STATUS_MASK 0x80000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_STATUS__SHIFT 0x13
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_OCCURRED_MASK 0x100000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_CLEAR_MASK 0x100000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN4_INT_CLEAR__SHIFT 0x14
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_STATUS_MASK 0x200000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_STATUS__SHIFT 0x15
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_OCCURRED_MASK 0x400000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_OCCURRED__SHIFT 0x16
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_CLEAR_MASK 0x400000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN5_INT_CLEAR__SHIFT 0x16
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_STATUS_MASK 0x800000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_STATUS__SHIFT 0x17
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_OCCURRED_MASK 0x1000000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_OCCURRED__SHIFT 0x18
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_CLEAR_MASK 0x1000000
+#define DMCU_SS_INTERRUPT_CNTL_STATUS__STATIC_SCREEN6_INT_CLEAR__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED_MASK 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_OCCURRED__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR_MASK 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_HG_READY_INT_CLEAR__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED_MASK 0x2
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_OCCURRED__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR_MASK 0x2
+#define DMCU_INTERRUPT_STATUS__ABM1_LS_READY_INT_CLEAR__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED_MASK 0x4
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_OCCURRED__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR_MASK 0x4
+#define DMCU_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_CLEAR__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED_MASK 0x8
+#define DMCU_INTERRUPT_STATUS__MCP_INT_OCCURRED__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_UP_INT_OCCURRED_MASK 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_UP_INT_OCCURRED__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_UP_INT_CLEAR_MASK 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_UP_INT_CLEAR__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_DOWN_INT_OCCURRED_MASK 0x20
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_DOWN_INT_OCCURRED__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_DOWN_INT_CLEAR_MASK 0x20
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DSI_POWER_DOWN_INT_CLEAR__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED_MASK 0x100
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_OCCURRED__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR_MASK 0x100
+#define DMCU_INTERRUPT_STATUS__EXTERNAL_SW_INT_CLEAR__SHIFT 0x8
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED_MASK 0x200
+#define DMCU_INTERRUPT_STATUS__SCP_INT_OCCURRED__SHIFT 0x9
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED_MASK 0x400
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_OCCURRED__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR_MASK 0x400
+#define DMCU_INTERRUPT_STATUS__UC_INTERNAL_INT_CLEAR__SHIFT 0xa
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED_MASK 0x800
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_OCCURRED__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR_MASK 0x800
+#define DMCU_INTERRUPT_STATUS__UC_REG_RD_TIMEOUT_INT_CLEAR__SHIFT 0xb
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_OCCURRED_MASK 0x1000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_OCCURRED__SHIFT 0xc
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_CLEAR_MASK 0x1000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_UP_INT_CLEAR__SHIFT 0xc
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_OCCURRED_MASK 0x2000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_OCCURRED__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_CLEAR_MASK 0x2000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_UP_INT_CLEAR__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_OCCURRED_MASK 0x4000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_OCCURRED__SHIFT 0xe
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_CLEAR_MASK 0x4000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_UP_INT_CLEAR__SHIFT 0xe
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_OCCURRED_MASK 0x8000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_OCCURRED__SHIFT 0xf
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_CLEAR_MASK 0x8000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_UP_INT_CLEAR__SHIFT 0xf
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_OCCURRED_MASK 0x10000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_OCCURRED__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_CLEAR_MASK 0x10000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_UP_INT_CLEAR__SHIFT 0x10
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_OCCURRED_MASK 0x20000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_OCCURRED__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_CLEAR_MASK 0x20000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_UP_INT_CLEAR__SHIFT 0x11
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_OCCURRED_MASK 0x40000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_OCCURRED__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_CLEAR_MASK 0x40000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE0_POWER_DOWN_INT_CLEAR__SHIFT 0x12
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_OCCURRED_MASK 0x80000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_OCCURRED__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_CLEAR_MASK 0x80000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE1_POWER_DOWN_INT_CLEAR__SHIFT 0x13
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_OCCURRED_MASK 0x100000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_OCCURRED__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_CLEAR_MASK 0x100000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE2_POWER_DOWN_INT_CLEAR__SHIFT 0x14
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_OCCURRED_MASK 0x200000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_OCCURRED__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_CLEAR_MASK 0x200000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE3_POWER_DOWN_INT_CLEAR__SHIFT 0x15
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_OCCURRED_MASK 0x400000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_OCCURRED__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_CLEAR_MASK 0x400000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE4_POWER_DOWN_INT_CLEAR__SHIFT 0x16
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_OCCURRED_MASK 0x800000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_OCCURRED__SHIFT 0x17
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_CLEAR_MASK 0x800000
+#define DMCU_INTERRUPT_STATUS__DCPG_IHC_DCFE5_POWER_DOWN_INT_CLEAR__SHIFT 0x17
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED_MASK 0x1000000
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_OCCURRED__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR_MASK 0x1000000
+#define DMCU_INTERRUPT_STATUS__VBLANK1_INT_CLEAR__SHIFT 0x18
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED_MASK 0x2000000
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_OCCURRED__SHIFT 0x19
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR_MASK 0x2000000
+#define DMCU_INTERRUPT_STATUS__VBLANK2_INT_CLEAR__SHIFT 0x19
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED_MASK 0x4000000
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR_MASK 0x4000000
+#define DMCU_INTERRUPT_STATUS__VBLANK3_INT_CLEAR__SHIFT 0x1a
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED_MASK 0x8000000
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_OCCURRED__SHIFT 0x1b
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR_MASK 0x8000000
+#define DMCU_INTERRUPT_STATUS__VBLANK4_INT_CLEAR__SHIFT 0x1b
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED_MASK 0x10000000
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_OCCURRED__SHIFT 0x1c
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR_MASK 0x10000000
+#define DMCU_INTERRUPT_STATUS__VBLANK5_INT_CLEAR__SHIFT 0x1c
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED_MASK 0x20000000
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_OCCURRED__SHIFT 0x1d
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR_MASK 0x20000000
+#define DMCU_INTERRUPT_STATUS__VBLANK6_INT_CLEAR__SHIFT 0x1d
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_UP_INT_OCCURRED_MASK 0x1
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_UP_INT_OCCURRED__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_UP_INT_CLEAR_MASK 0x1
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_UP_INT_CLEAR__SHIFT 0x0
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_UP_INT_OCCURRED_MASK 0x2
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_UP_INT_OCCURRED__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_UP_INT_CLEAR_MASK 0x2
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_UP_INT_CLEAR__SHIFT 0x1
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_OCCURRED_MASK 0x4
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_OCCURRED__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_CLEAR_MASK 0x4
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_CLEAR__SHIFT 0x2
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_OCCURRED_MASK 0x8
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_OCCURRED__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_CLEAR_MASK 0x8
+#define DMCU_INTERRUPT_STATUS_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_CLEAR__SHIFT 0x3
+#define DMCU_INTERRUPT_STATUS_1__DCFEV0_VBLANK_INT_OCCURRED_MASK 0x10
+#define DMCU_INTERRUPT_STATUS_1__DCFEV0_VBLANK_INT_OCCURRED__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS_1__DCFEV0_VBLANK_INT_CLEAR_MASK 0x10
+#define DMCU_INTERRUPT_STATUS_1__DCFEV0_VBLANK_INT_CLEAR__SHIFT 0x4
+#define DMCU_INTERRUPT_STATUS_1__DCFEV1_VBLANK_INT_OCCURRED_MASK 0x20
+#define DMCU_INTERRUPT_STATUS_1__DCFEV1_VBLANK_INT_OCCURRED__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS_1__DCFEV1_VBLANK_INT_CLEAR_MASK 0x20
+#define DMCU_INTERRUPT_STATUS_1__DCFEV1_VBLANK_INT_CLEAR__SHIFT 0x5
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_OCCURRED_MASK 0x2000
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_OCCURRED__SHIFT 0xd
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_CLEAR_MASK 0x2000
+#define DMCU_INTERRUPT_STATUS_1__DMCU_GENERIC_INTERRUPT_CLEAR__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK_MASK 0x1
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_HG_READY_INT_MASK__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK_MASK 0x2
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_LS_READY_INT_MASK__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK_MASK 0x4
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__ABM1_BL_UPDATE_INT_MASK__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK_MASK 0x200
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__SCP_INT_MASK__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK_MASK 0x400
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_INTERNAL_INT_MASK__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK_MASK 0x800
+#define DMCU_INTERRUPT_TO_HOST_EN_MASK__UC_REG_RD_TIMEOUT_INT_MASK__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN_MASK 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_HG_READY_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN_MASK 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_LS_READY_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN_MASK 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__ABM1_BL_UPDATE_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN_MASK 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__MCP_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DSI_POWER_UP_INT_TO_UC_EN_MASK 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DSI_POWER_UP_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DSI_POWER_DOWN_INT_TO_UC_EN_MASK 0x20
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DSI_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN1_INT_TO_UC_EN_MASK 0x40
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN1_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN2_INT_TO_UC_EN_MASK 0x80
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN2_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN_MASK 0x100
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__EXTERNAL_SW_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN3_INT_TO_UC_EN_MASK 0x200
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN3_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN4_INT_TO_UC_EN_MASK 0x400
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN4_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN5_INT_TO_UC_EN_MASK 0x800
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN5_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_UP_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_UP_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_UP_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_UP_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_UP_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_UP_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE0_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE1_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE2_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE3_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE4_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__DCPG_IHC_DCFE5_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK1_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK2_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK3_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN_MASK 0x8000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK4_INT_TO_UC_EN__SHIFT 0x1b
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN_MASK 0x10000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK5_INT_TO_UC_EN__SHIFT 0x1c
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN_MASK 0x20000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__VBLANK6_INT_TO_UC_EN__SHIFT 0x1d
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN6_INT_TO_UC_EN_MASK 0x40000000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK__STATIC_SCREEN6_INT_TO_UC_EN__SHIFT 0x1e
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV0_POWER_UP_INT_TO_UC_EN_MASK 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV0_POWER_UP_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_TO_UC_EN_MASK 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCFEV0_VBLANK_INT_TO_UC_EN_MASK 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCFEV0_VBLANK_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV1_POWER_UP_INT_TO_UC_EN_MASK 0x8
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV1_POWER_UP_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_TO_UC_EN_MASK 0x10
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCFEV1_VBLANK_INT_TO_UC_EN_MASK 0x20
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DCFEV1_VBLANK_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DMCU_GENERIC_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_INTERRUPT_TO_UC_EN_MASK_1__DMCU_GENERIC_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_HG_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_LS_READY_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__ABM1_BL_UPDATE_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__MCP_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DSI_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DSI_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DSI_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DSI_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN1_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN1_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN2_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN2_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__EXTERNAL_SW_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN3_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN3_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN4_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN4_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN5_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN5_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE0_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE1_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE2_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE3_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE4_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__DCPG_IHC_DCFE5_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK1_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK2_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK3_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL_MASK 0x8000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK4_INT_XIRQ_IRQ_SEL__SHIFT 0x1b
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL_MASK 0x10000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK5_INT_XIRQ_IRQ_SEL__SHIFT 0x1c
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL_MASK 0x20000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__VBLANK6_INT_XIRQ_IRQ_SEL__SHIFT 0x1d
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN6_INT_XIRQ_IRQ_SEL_MASK 0x40000000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL__STATIC_SCREEN6_INT_XIRQ_IRQ_SEL__SHIFT 0x1e
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV0_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV0_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV0_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCFEV0_VBLANK_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCFEV0_VBLANK_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV1_POWER_UP_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV1_POWER_UP_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCPG_IHC_DCFEV1_POWER_DOWN_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCFEV1_VBLANK_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DCFEV1_VBLANK_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DMCU_GENERIC_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_INTERRUPT_TO_UC_XIRQ_IRQ_SEL_1__DMCU_GENERIC_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH_MASK 0xffffffff
+#define DC_DMCU_SCRATCH__DMCU_SCRATCH__SHIFT 0x0
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT_MASK 0xff
+#define DMCU_INT_CNT__DMCU_ABM1_HG_READY_INT_CNT__SHIFT 0x0
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT_MASK 0xff00
+#define DMCU_INT_CNT__DMCU_ABM1_LS_READY_INT_CNT__SHIFT 0x8
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT_MASK 0xff0000
+#define DMCU_INT_CNT__DMCU_ABM1_BL_UPDATE_INT_CNT__SHIFT 0x10
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS_MASK 0x3
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_LO_SMPL_BYTE_POS__SHIFT 0x0
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS_MASK 0xc
+#define DMCU_FW_CHECKSUM_SMPL_BYTE_POS__DMCU_FW_CHECKSUM_HI_SMPL_BYTE_POS__SHIFT 0x2
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY_MASK 0x7
+#define DMCU_UC_CLK_GATING_CNTL__UC_IRAM_RD_DELAY__SHIFT 0x0
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY_MASK 0x700
+#define DMCU_UC_CLK_GATING_CNTL__UC_ERAM_RD_DELAY__SHIFT 0x8
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN_MASK 0x10000
+#define DMCU_UC_CLK_GATING_CNTL__UC_RBBM_RD_CLK_GATING_EN__SHIFT 0x10
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0_MASK 0xff
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1_MASK 0xff00
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2_MASK 0xff0000
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3_MASK 0xff000000
+#define MASTER_COMM_DATA_REG1__MASTER_COMM_DATA_REG1_BYTE3__SHIFT 0x18
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0_MASK 0xff
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1_MASK 0xff00
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2_MASK 0xff0000
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3_MASK 0xff000000
+#define MASTER_COMM_DATA_REG2__MASTER_COMM_DATA_REG2_BYTE3__SHIFT 0x18
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0_MASK 0xff
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE0__SHIFT 0x0
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1_MASK 0xff00
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE1__SHIFT 0x8
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2_MASK 0xff0000
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE2__SHIFT 0x10
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3_MASK 0xff000000
+#define MASTER_COMM_DATA_REG3__MASTER_COMM_DATA_REG3_BYTE3__SHIFT 0x18
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0_MASK 0xff
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE0__SHIFT 0x0
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1_MASK 0xff00
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE1__SHIFT 0x8
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2_MASK 0xff0000
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE2__SHIFT 0x10
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3_MASK 0xff000000
+#define MASTER_COMM_CMD_REG__MASTER_COMM_CMD_REG_BYTE3__SHIFT 0x18
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x1
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0_MASK 0xff
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1_MASK 0xff00
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2_MASK 0xff0000
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3_MASK 0xff000000
+#define SLAVE_COMM_DATA_REG1__SLAVE_COMM_DATA_REG1_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0_MASK 0xff
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1_MASK 0xff00
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2_MASK 0xff0000
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3_MASK 0xff000000
+#define SLAVE_COMM_DATA_REG2__SLAVE_COMM_DATA_REG2_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0_MASK 0xff
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1_MASK 0xff00
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2_MASK 0xff0000
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3_MASK 0xff000000
+#define SLAVE_COMM_DATA_REG3__SLAVE_COMM_DATA_REG3_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0_MASK 0xff
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE0__SHIFT 0x0
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1_MASK 0xff00
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE1__SHIFT 0x8
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2_MASK 0xff0000
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE2__SHIFT 0x10
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3_MASK 0xff000000
+#define SLAVE_COMM_CMD_REG__SLAVE_COMM_CMD_REG_BYTE3__SHIFT 0x18
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT_MASK 0x1
+#define SLAVE_COMM_CNTL_REG__SLAVE_COMM_INTERRUPT__SHIFT 0x0
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS_MASK 0x100
+#define SLAVE_COMM_CNTL_REG__COMM_PORT_MSG_TO_HOST_IN_PROGRESS__SHIFT 0x8
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_INDEX_MASK 0xff
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DMCU_TEST_DEBUG_INDEX__DMCU_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DMCU_TEST_DEBUG_DATA__DMCU_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DMCU_TEST_DEBUG_DATA__DMCU_TEST_DEBUG_DATA__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER0_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER1_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER2_INT_CLEAR_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER3_INT_CLEAR_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER4_INT_CLEAR_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER5_INT_CLEAR_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER6_INT_CLEAR_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER7_INT_CLEAR_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCI_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCO_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS1__DCCG_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER0_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER1_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER2_INT_CLEAR_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER3_INT_CLEAR_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER4_INT_CLEAR_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER5_INT_CLEAR_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER6_INT_CLEAR_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER7_INT_CLEAR_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE0_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE1_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS2__DCFE2_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER0_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER1_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER2_INT_CLEAR_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER3_INT_CLEAR_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER4_INT_CLEAR_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER5_INT_CLEAR_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER6_INT_CLEAR_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER7_INT_CLEAR_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE3_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE4_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS3__DCFE5_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER0_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER0_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER0_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER0_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER1_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER1_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER1_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER1_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER2_INT_OCCURRED_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER2_INT_OCCURRED__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER2_INT_CLEAR_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER2_INT_CLEAR__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER3_INT_OCCURRED_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER3_INT_OCCURRED__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER3_INT_CLEAR_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER3_INT_CLEAR__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER4_INT_OCCURRED_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER4_INT_OCCURRED__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER4_INT_CLEAR_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER4_INT_CLEAR__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER5_INT_OCCURRED_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER5_INT_OCCURRED__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER5_INT_CLEAR_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER5_INT_CLEAR__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER6_INT_OCCURRED_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER6_INT_OCCURRED__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER6_INT_CLEAR_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER6_INT_CLEAR__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER7_INT_OCCURRED_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER7_INT_OCCURRED__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER7_INT_CLEAR_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER7_INT_CLEAR__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__WB_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCRX_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_OFF_INT_OCCURRED_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_OFF_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_OFF_INT_CLEAR_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_STATUS4__DCCG_PERFMON2_COUNTER_OFF_INT_CLEAR__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER0_INT_CLEAR_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER1_INT_CLEAR_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER2_INT_CLEAR_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER3_INT_CLEAR_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER4_INT_CLEAR_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER5_INT_CLEAR_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER6_INT_CLEAR_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER7_INT_CLEAR_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER0_INT_OCCURRED_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER0_INT_OCCURRED__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER0_INT_CLEAR_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER0_INT_CLEAR__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER1_INT_OCCURRED_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER1_INT_OCCURRED__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER1_INT_CLEAR_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER1_INT_CLEAR__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER2_INT_OCCURRED_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER2_INT_OCCURRED__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER2_INT_CLEAR_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER2_INT_CLEAR__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER3_INT_OCCURRED_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER3_INT_OCCURRED__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER3_INT_CLEAR_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER3_INT_CLEAR__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER4_INT_OCCURRED_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER4_INT_OCCURRED__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER4_INT_CLEAR_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER4_INT_CLEAR__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER5_INT_OCCURRED_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER5_INT_OCCURRED__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER5_INT_CLEAR_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER5_INT_CLEAR__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER6_INT_OCCURRED_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER6_INT_OCCURRED__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER6_INT_CLEAR_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER6_INT_CLEAR__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER7_INT_OCCURRED_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER7_INT_OCCURRED__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER7_INT_CLEAR_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER7_INT_CLEAR__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV0_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER_OFF_INT_OCCURRED_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER_OFF_INT_OCCURRED__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER_OFF_INT_CLEAR_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_STATUS5__DCFEV1_PERFMON_COUNTER_OFF_INT_CLEAR__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCI_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCO_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK1__DCCG_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE0_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE1_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK2__DCFE2_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE3_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE4_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK3__DCFE5_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER0_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER0_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER1_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER1_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER2_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER2_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER3_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER3_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER4_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER4_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER5_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER5_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER6_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER6_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER7_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER7_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__WB_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCRX_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER_OFF_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK4__DCCG_PERFMON2_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV0_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER0_INT_TO_UC_EN_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER0_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER1_INT_TO_UC_EN_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER1_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER2_INT_TO_UC_EN_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER2_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER3_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER3_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER4_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER4_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER5_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER5_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER6_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER6_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER7_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER7_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER_OFF_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_EN_MASK5__DCFEV1_PERFMON_COUNTER_OFF_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCI_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCO_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DCCG_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE0_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE1_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL2__DCFE2_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE3_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE4_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL3__DCFE5_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__WB_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCRX_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL4__DCCG_PERFMON2_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV0_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER0_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER1_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER2_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER3_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER4_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER5_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER6_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER7_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_PERFMON_INTERRUPT_TO_UC_XIRQ_IRQ_SEL5__DCFEV1_PERFMON_COUNTER_OFF_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_OCCURRED_MASK 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_OCCURRED__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_CLEAR_MASK 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_MSA_RECEIVED_INT_CLEAR__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED_MASK 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR_MASK 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_OCCURRED_MASK 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_OCCURRED__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_CLEAR_MASK 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT0_CLEAR__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_OCCURRED_MASK 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_OCCURRED__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_CLEAR_MASK 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_VERTICAL_INT1_CLEAR__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_OCCURRED_MASK 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_OCCURRED__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_CLEAR_MASK 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD0P0_SDP_RECEIVED_INT_CLEAR__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_OCCURRED_MASK 0x20
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_OCCURRED__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_CLEAR_MASK 0x20
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_MSA_RECEIVED_INT_CLEAR__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED_MASK 0x40
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_OCCURRED__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR_MASK 0x40
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_CLEAR__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_OCCURRED_MASK 0x80
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_OCCURRED__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_CLEAR_MASK 0x80
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT0_CLEAR__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_OCCURRED_MASK 0x100
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_OCCURRED__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_CLEAR_MASK 0x100
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_VERTICAL_INT1_CLEAR__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_OCCURRED_MASK 0x200
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_OCCURRED__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_CLEAR_MASK 0x200
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_SD1P0_SDP_RECEIVED_INT_CLEAR__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x400
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x400
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x800
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x800
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x1000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x1000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x2000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x2000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x4000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x4000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x8000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x8000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_OCCURRED_MASK 0x10000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_OCCURRED__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_CLEAR_MASK 0x10000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_CLEAR__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_OCCURRED_MASK 0x20000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_OCCURRED__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_CLEAR_MASK 0x20000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_CLEAR__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_OCCURRED_MASK 0x40000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_OCCURRED__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_CLEAR_MASK 0x40000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_CLEAR__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_OCCURRED_MASK 0x80000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_OCCURRED__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_CLEAR_MASK 0x80000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_CLEAR__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_OCCURRED_MASK 0x100000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_OCCURRED__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_CLEAR_MASK 0x100000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_CLEAR__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_OCCURRED_MASK 0x200000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_OCCURRED__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_CLEAR_MASK 0x200000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_CLEAR__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_OCCURRED_MASK 0x400000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_OCCURRED__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_CLEAR_MASK 0x400000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_AUX_INT_CLEAR__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_OCCURRED_MASK 0x800000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_OCCURRED__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_CLEAR_MASK 0x800000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_I2C_INT_CLEAR__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_OCCURRED_MASK 0x1000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_OCCURRED__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_CLEAR_MASK 0x1000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_CPU_INT_CLEAR__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_OCCURRED_MASK 0x2000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_OCCURRED__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_CLEAR_MASK 0x2000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_CLEAR__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_OCCURRED_MASK 0x4000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_OCCURRED__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_CLEAR_MASK 0x4000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_CLEAR__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_OCCURRED_MASK 0x8000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_OCCURRED__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_CLEAR_MASK 0x8000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_CLEAR__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_OCCURRED_MASK 0x10000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_OCCURRED__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_CLEAR_MASK 0x10000000
+#define DMCU_DPRX_INTERRUPT_STATUS1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_CLEAR__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_MSA_RECEIVED_INT_TO_UC_EN_MASK 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_MSA_RECEIVED_INT_TO_UC_EN__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN_MASK 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT0_TO_UC_EN_MASK 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT0_TO_UC_EN__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT1_TO_UC_EN_MASK 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_VERTICAL_INT1_TO_UC_EN__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_SDP_RECEIVED_INT_TO_UC_EN_MASK 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD0P0_SDP_RECEIVED_INT_TO_UC_EN__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_MSA_RECEIVED_INT_TO_UC_EN_MASK 0x20
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_MSA_RECEIVED_INT_TO_UC_EN__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN_MASK 0x40
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_TO_UC_EN__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT0_TO_UC_EN_MASK 0x80
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT0_TO_UC_EN__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT1_TO_UC_EN_MASK 0x100
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_VERTICAL_INT1_TO_UC_EN__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_SDP_RECEIVED_INT_TO_UC_EN_MASK 0x200
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_SD1P0_SDP_RECEIVED_INT_TO_UC_EN__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x400
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x800
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x1000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x2000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x4000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x8000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN_MASK 0x10000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_TO_UC_EN__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_TO_UC_EN_MASK 0x20000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_TO_UC_EN__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_TO_UC_EN_MASK 0x40000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_TO_UC_EN__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_TO_UC_EN_MASK 0x80000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_TO_UC_EN__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_TO_UC_EN_MASK 0x100000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_TO_UC_EN__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_TO_UC_EN_MASK 0x200000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_TO_UC_EN__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_AUX_INT_TO_UC_EN_MASK 0x400000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_AUX_INT_TO_UC_EN__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_I2C_INT_TO_UC_EN_MASK 0x800000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_I2C_INT_TO_UC_EN__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_CPU_INT_TO_UC_EN_MASK 0x1000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_CPU_INT_TO_UC_EN__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_TO_UC_EN_MASK 0x2000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_TO_UC_EN__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_TO_UC_EN_MASK 0x4000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_TO_UC_EN_MASK 0x8000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_TO_UC_EN_MASK 0x10000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_EN_MASK1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_TO_UC_EN__SHIFT 0x1c
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x0
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL_MASK 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL__SHIFT 0x1
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT0_XIRQ_IRQ_SEL_MASK 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT0_XIRQ_IRQ_SEL__SHIFT 0x2
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT1_XIRQ_IRQ_SEL_MASK 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_VERTICAL_INT1_XIRQ_IRQ_SEL__SHIFT 0x3
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD0P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x4
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x20
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_MSA_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x5
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL_MASK 0x40
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VBID_VID_STREAM_STATUS_TOGGLED_INT_XIRQ_IRQ_SEL__SHIFT 0x6
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT0_XIRQ_IRQ_SEL_MASK 0x80
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT0_XIRQ_IRQ_SEL__SHIFT 0x7
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT1_XIRQ_IRQ_SEL_MASK 0x100
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_VERTICAL_INT1_XIRQ_IRQ_SEL__SHIFT 0x8
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL_MASK 0x200
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_SD1P0_SDP_RECEIVED_INT_XIRQ_IRQ_SEL__SHIFT 0x9
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x400
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_BS_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xa
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x800
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SR_INTERVAL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xb
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x1000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_SYMBOL_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xc
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x2000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DISPARITY_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xd
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x4000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TRAINING_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xe
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x8000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_TEST_PATTERN_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0xf
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL_MASK 0x10000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_ECF_ERROR_THRESH_EXCEEDED_INT_XIRQ_IRQ_SEL__SHIFT 0x10
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_XIRQ_IRQ_SEL_MASK 0x20000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DETECT_SR_LOCK_INT_XIRQ_IRQ_SEL__SHIFT 0x11
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_XIRQ_IRQ_SEL_MASK 0x40000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_ALIGN_INT_XIRQ_IRQ_SEL__SHIFT 0x12
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_XIRQ_IRQ_SEL_MASK 0x80000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_LOSS_OF_DESKEW_INT_XIRQ_IRQ_SEL__SHIFT 0x13
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_XIRQ_IRQ_SEL_MASK 0x100000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_EXCESSIVE_ERROR_INT_XIRQ_IRQ_SEL__SHIFT 0x14
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_XIRQ_IRQ_SEL_MASK 0x200000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_DPHY_P0_DESKEW_FIFO_OVERFLOW_INT_XIRQ_IRQ_SEL__SHIFT 0x15
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_AUX_INT_XIRQ_IRQ_SEL_MASK 0x400000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_AUX_INT_XIRQ_IRQ_SEL__SHIFT 0x16
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_I2C_INT_XIRQ_IRQ_SEL_MASK 0x800000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_I2C_INT_XIRQ_IRQ_SEL__SHIFT 0x17
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_CPU_INT_XIRQ_IRQ_SEL_MASK 0x1000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_CPU_INT_XIRQ_IRQ_SEL__SHIFT 0x18
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x2000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG1_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x19
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x4000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG2_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1a
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x8000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG3_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1b
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_XIRQ_IRQ_SEL_MASK 0x10000000
+#define DMCU_DPRX_INTERRUPT_TO_UC_XIRQ_IRQ_SEL1__DPRX_AUX_P0_MSG4_TIMEOUT_INT_XIRQ_IRQ_SEL__SHIFT 0x1c
+#define DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x10
+#define DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x100
+#define DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x20000
+#define DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x7
+#define DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP_PIXEL_FORMAT__DP_DYN_RANGE_MASK 0x100
+#define DP_PIXEL_FORMAT__DP_DYN_RANGE__SHIFT 0x8
+#define DP_PIXEL_FORMAT__DP_YCBCR_RANGE_MASK 0x10000
+#define DP_PIXEL_FORMAT__DP_YCBCR_RANGE__SHIFT 0x10
+#define DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x7000000
+#define DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_MASK 0xff
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE__SHIFT 0x0
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_ENABLE_MASK 0x100
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC0_OVERRIDE_ENABLE__SHIFT 0x8
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC1_BIT7_OVERRIDE_MASK 0x200
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC1_BIT7_OVERRIDE__SHIFT 0x9
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC1_BIT7_OVERRIDE_ENABLE_MASK 0x20000
+#define DP_MSA_COLORIMETRY__DP_MSA_MISC1_BIT7_OVERRIDE_ENABLE__SHIFT 0x11
+#define DP_CONFIG__DP_UDI_LANES_MASK 0x3
+#define DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x1
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x300
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x10000
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x100000
+#define DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x1
+#define DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x10
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x20
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x40
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x80
+#define DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x100
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x1000
+#define DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP_MSA_MISC__DP_MSA_MISC1_MASK 0x78
+#define DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x3
+#define DP_MSA_MISC__DP_MSA_MISC2_MASK 0xff00
+#define DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP_MSA_MISC__DP_MSA_MISC3_MASK 0xff0000
+#define DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP_MSA_MISC__DP_MSA_MISC4_MASK 0xff000000
+#define DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP_VID_TIMING__DP_VID_TIMING_MODE_MASK 0x1
+#define DP_VID_TIMING__DP_VID_TIMING_MODE__SHIFT 0x0
+#define DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x10
+#define DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x100
+#define DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP_VID_TIMING__DP_VID_M_DOUBLE_VALUE_EN_MASK 0x200
+#define DP_VID_TIMING__DP_VID_M_DOUBLE_VALUE_EN__SHIFT 0x9
+#define DP_VID_TIMING__DP_VID_N_DIV_MASK 0xff000000
+#define DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP_VID_N__DP_VID_N_MASK 0xffffff
+#define DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP_VID_M__DP_VID_M_MASK 0xffffff
+#define DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x3ffff
+#define DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x1000000
+#define DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000
+#define DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x1
+#define DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0xfff
+#define DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP_VID_MSA_VBID__DP_VID_MSA_TOP_FIELD_MODE_MASK 0x10000
+#define DP_VID_MSA_VBID__DP_VID_MSA_TOP_FIELD_MODE__SHIFT 0x10
+#define DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x1000000
+#define DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x1
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x2
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x4
+#define DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x1
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x2
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x4
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x8
+#define DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x10000
+#define DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x1000000
+#define DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x3
+#define DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP_DPHY_SYM0__DPHY_SYM1_MASK 0x3ff
+#define DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP_DPHY_SYM0__DPHY_SYM2_MASK 0xffc00
+#define DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3ff00000
+#define DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP_DPHY_SYM1__DPHY_SYM4_MASK 0x3ff
+#define DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP_DPHY_SYM1__DPHY_SYM5_MASK 0xffc00
+#define DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3ff00000
+#define DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP_DPHY_SYM2__DPHY_SYM7_MASK 0x3ff
+#define DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP_DPHY_SYM2__DPHY_SYM8_MASK 0xffc00
+#define DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x100
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x10000
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x1000000
+#define DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x1
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x30
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7fffff00
+#define DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x3ff
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x8000
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x10000
+#define DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x1
+#define DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x10
+#define DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x100
+#define DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x1
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x30
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0xff0000
+#define DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0xff
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0xff00
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0xff0000
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xff000000
+#define DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x3f
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x3f00
+#define DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x1
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x100
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x10000
+#define DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x1
+#define DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x2
+#define DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x4
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0xfff00
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xfff00000
+#define DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x7
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x10
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x100
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x1000
+#define DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x7
+#define DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TIMING_OVERRIDE_EN_MASK 0x1
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TIMING_OVERRIDE_EN__SHIFT 0x0
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TOTAL_OVERRIDE_MASK 0x3fff0
+#define DP_MSA_V_TIMING_OVERRIDE1__DP_MSA_V_TOTAL_OVERRIDE__SHIFT 0x4
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_START_OVERRIDE_MASK 0x3fff
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_START_OVERRIDE__SHIFT 0x0
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_END_OVERRIDE_MASK 0x3fff0000
+#define DP_MSA_V_TIMING_OVERRIDE2__DP_MSA_V_BLANK_END_OVERRIDE__SHIFT 0x10
+#define DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x1
+#define DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x10
+#define DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x100
+#define DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x1000
+#define DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x10000
+#define DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x100000
+#define DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x200000
+#define DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x400000
+#define DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x800000
+#define DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP_SEC_CNTL__DP_SEC_AVI_ENABLE_MASK 0x1000000
+#define DP_SEC_CNTL__DP_SEC_AVI_ENABLE__SHIFT 0x18
+#define DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000
+#define DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x1
+#define DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x10
+#define DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x20
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x40
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x80
+#define DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xffff0000
+#define DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0xfff
+#define DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xffff0000
+#define DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0xffff
+#define DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xffff0000
+#define DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x3fff
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xffff0000
+#define DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x100000
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x1000000
+#define DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000
+#define DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0xffffff
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0xffffff
+#define DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0xffffff
+#define DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0xffffff
+#define DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x1
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0xe
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x10
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x3f00
+#define DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x10000
+#define DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x3ffffff
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xfc000000
+#define DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x1
+#define DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x7
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x3f00
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x70000
+#define DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3f000000
+#define DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x7
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x3f00
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x70000
+#define DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3f000000
+#define DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x7
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x3f00
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x70000
+#define DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3f000000
+#define DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x3
+#define DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x100
+#define DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x3ff
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x30000
+#define DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x1
+#define DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x10
+#define DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x100
+#define DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP_MSE_MISC_CNTL__DP_MSE_OUTPUT_DPDBG_DATA_MASK 0x10000
+#define DP_MSE_MISC_CNTL__DP_MSE_OUTPUT_DPDBG_DATA__SHIFT 0x10
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x7
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x3f00
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x70000
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3f000000
+#define DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x7
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x3f00
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x70000
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3f000000
+#define DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x7
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x3f00
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x70000
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3f000000
+#define DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_INDEX_MASK 0xff
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DP_TEST_DEBUG_INDEX__DP_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DP_TEST_DEBUG_DATA__DP_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DP_TEST_DEBUG_DATA__DP_TEST_DEBUG_DATA__SHIFT 0x0
+#define DP_FE_TEST_DEBUG_INDEX__DP_FE_TEST_DEBUG_INDEX_MASK 0xff
+#define DP_FE_TEST_DEBUG_INDEX__DP_FE_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DP_FE_TEST_DEBUG_INDEX__DP_FE_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DP_FE_TEST_DEBUG_INDEX__DP_FE_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DP_FE_TEST_DEBUG_DATA__DP_FE_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DP_FE_TEST_DEBUG_DATA__DP_FE_TEST_DEBUG_DATA__SHIFT 0x0
+#define AUX_CONTROL__AUX_EN_MASK 0x1
+#define AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define AUX_CONTROL__AUX_RESET_MASK 0x10
+#define AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define AUX_CONTROL__AUX_RESET_DONE_MASK 0x20
+#define AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define AUX_CONTROL__AUX_LS_READ_EN_MASK 0x100
+#define AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x1000
+#define AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x10000
+#define AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x40000
+#define AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define AUX_CONTROL__AUX_HPD_SEL_MASK 0x700000
+#define AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x1000000
+#define AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000
+#define AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000
+#define AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define AUX_CONTROL__SPARE_0_MASK 0x40000000
+#define AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define AUX_CONTROL__SPARE_1_MASK 0x80000000
+#define AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define AUX_SW_CONTROL__AUX_SW_GO_MASK 0x1
+#define AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x4
+#define AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0xf0
+#define AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x1f0000
+#define AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x3
+#define AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0xc
+#define AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x100
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x400
+#define AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x10000
+#define AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x10000
+#define AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x20000
+#define AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x1000000
+#define AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x1000000
+#define AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x2000000
+#define AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x1
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x2
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x4
+#define AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x10
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x20
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x40
+#define AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK 0x100
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT__SHIFT 0x8
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK_MASK 0x200
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_ACK__SHIFT 0x9
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK_MASK 0x400
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_LOCK_DONE_INT_MASK__SHIFT 0xa
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK 0x1000
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT__SHIFT 0xc
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK_MASK 0x2000
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_ACK__SHIFT 0xd
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK_MASK 0x4000
+#define AUX_INTERRUPT_CONTROL__AUX_GTC_SYNC_ERROR_INT_MASK__SHIFT 0xe
+#define AUX_SW_STATUS__AUX_SW_DONE_MASK 0x1
+#define AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define AUX_SW_STATUS__AUX_SW_REQ_MASK 0x2
+#define AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x70
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x80
+#define AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x100
+#define AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x200
+#define AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x400
+#define AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x800
+#define AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x1000
+#define AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x4000
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x20000
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x40000
+#define AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x80000
+#define AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x100000
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x400000
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x800000
+#define AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1f000000
+#define AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xc0000000
+#define AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1e
+#define AUX_LS_STATUS__AUX_LS_DONE_MASK 0x1
+#define AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define AUX_LS_STATUS__AUX_LS_REQ_MASK 0x2
+#define AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x70
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x80
+#define AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x100
+#define AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x200
+#define AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x400
+#define AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x800
+#define AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x1000
+#define AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x4000
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x20000
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x40000
+#define AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x80000
+#define AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x100000
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x400000
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x800000
+#define AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1f000000
+#define AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000
+#define AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000
+#define AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000
+#define AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x1
+#define AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define AUX_SW_DATA__AUX_SW_DATA_MASK 0xff00
+#define AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define AUX_SW_DATA__AUX_SW_INDEX_MASK 0x1f0000
+#define AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000
+#define AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define AUX_LS_DATA__AUX_LS_DATA_MASK 0xff00
+#define AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define AUX_LS_DATA__AUX_LS_INDEX_MASK 0x1f0000
+#define AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x1
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x30
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x1ff0000
+#define AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x7
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x3f00
+#define AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x70000
+#define AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x70
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x700
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x3000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x10000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x20000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x40000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x80000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x300000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TIMEOUT_LEN_MASK 0x7000000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_TIMEOUT_LEN__SHIFT 0x18
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000
+#define AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0xff
+#define AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x1
+#define AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x70
+#define AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x1ff0000
+#define AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x7
+#define AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x1f00
+#define AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x1f0000
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3fe00000
+#define AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD_MASK 0x1f
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_POTENTIAL_ERROR_THRESHOLD__SHIFT 0x0
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD_MASK 0x1f00
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_DEFINITE_ERROR_THRESHOLD__SHIFT 0x8
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN_MASK 0x30000
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_LEN__SHIFT 0x10
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT_MASK 0x300000
+#define AUX_GTC_SYNC_ERROR_CONTROL__AUX_GTC_SYNC_NUM_RETRY_FOR_LOCK_MAINT__SHIFT 0x14
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE_MASK 0x1
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_COMPLETE__SHIFT 0x0
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST_MASK 0x10
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_LOST__SHIFT 0x4
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED_MASK 0x100
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_OCCURRED__SHIFT 0x8
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE_MASK 0x1e00
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_LOCK_ACQ_TIMEOUT_STATE__SHIFT 0x9
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL_MASK 0x10000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_PHASE_ADJUST_TIME_VIOL__SHIFT 0x10
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_MASK 0x100000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED__SHIFT 0x14
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK_MASK 0x200000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CRITICAL_ERR_OCCURRED_ACK__SHIFT 0x15
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_MASK 0x400000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED__SHIFT 0x16
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK_MASK 0x800000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_POTENTIAL_ERR_REACHED_ACK__SHIFT 0x17
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_MASK 0x1000000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED__SHIFT 0x18
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK_MASK 0x2000000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_MAX_DEFINITE_ERR_REACHED_ACK__SHIFT 0x19
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE_MASK 0xf0000000
+#define AUX_GTC_SYNC_CONTROLLER_STATUS__AUX_GTC_SYNC_CTRL_STATE__SHIFT 0x1c
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE_MASK 0x1
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_DONE__SHIFT 0x0
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ_MASK 0x2
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REQ__SHIFT 0x1
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE_MASK 0x70
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_TIMEOUT_STATE__SHIFT 0x4
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT_MASK 0x80
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_TIMEOUT__SHIFT 0x7
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW_MASK 0x100
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_OVERFLOW__SHIFT 0x8
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON_MASK 0x200
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_HPD_DISCON__SHIFT 0x9
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE_MASK 0x400
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_PARTIAL_BYTE__SHIFT 0xa
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE_MASK 0x800
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NON_AUX_MODE__SHIFT 0xb
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL_MASK 0x1000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP_MASK 0x4000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_STOP__SHIFT 0xe
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L_MASK 0x20000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_L__SHIFT 0x11
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H_MASK 0x40000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_SYNC_INVALID_H__SHIFT 0x12
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START_MASK 0x80000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_INVALID_START__SHIFT 0x13
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET_MASK 0x100000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_NO_DET__SHIFT 0x14
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H_MASK 0x400000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_H__SHIFT 0x16
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L_MASK 0x800000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_RX_RECV_INVALID_L__SHIFT 0x17
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT_MASK 0x1f000000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_REPLY_BYTE_COUNT__SHIFT 0x18
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED_MASK 0x20000000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_SYNC_NACKED__SHIFT 0x1d
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX_MASK 0x40000000
+#define AUX_GTC_SYNC_STATUS__AUX_GTC_MASTER_REQ_BY_RX__SHIFT 0x1e
+#define AUX_TEST_DEBUG_INDEX__AUX_TEST_DEBUG_INDEX_MASK 0xff
+#define AUX_TEST_DEBUG_INDEX__AUX_TEST_DEBUG_INDEX__SHIFT 0x0
+#define AUX_TEST_DEBUG_INDEX__AUX_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define AUX_TEST_DEBUG_INDEX__AUX_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define AUX_TEST_DEBUG_DATA__AUX_TEST_DEBUG_DATA_MASK 0xffffffff
+#define AUX_TEST_DEBUG_DATA__AUX_TEST_DEBUG_DATA__SHIFT 0x0
+#define DP_AUX_DEBUG_A__DP_AUX_DEBUG_A_MASK 0xffffffff
+#define DP_AUX_DEBUG_A__DP_AUX_DEBUG_A__SHIFT 0x0
+#define DP_AUX_DEBUG_B__DP_AUX_DEBUG_B_MASK 0xffffffff
+#define DP_AUX_DEBUG_B__DP_AUX_DEBUG_B__SHIFT 0x0
+#define DP_AUX_DEBUG_C__DP_AUX_DEBUG_C_MASK 0xffffffff
+#define DP_AUX_DEBUG_C__DP_AUX_DEBUG_C__SHIFT 0x0
+#define DP_AUX_DEBUG_D__DP_AUX_DEBUG_D_MASK 0xffffffff
+#define DP_AUX_DEBUG_D__DP_AUX_DEBUG_D__SHIFT 0x0
+#define DP_AUX_DEBUG_E__DP_AUX_DEBUG_E_MASK 0xffffffff
+#define DP_AUX_DEBUG_E__DP_AUX_DEBUG_E__SHIFT 0x0
+#define DP_AUX_DEBUG_F__DP_AUX_DEBUG_F_MASK 0xffffffff
+#define DP_AUX_DEBUG_F__DP_AUX_DEBUG_F__SHIFT 0x0
+#define DP_AUX_DEBUG_G__DP_AUX_DEBUG_G_MASK 0xffffffff
+#define DP_AUX_DEBUG_G__DP_AUX_DEBUG_G__SHIFT 0x0
+#define DP_AUX_DEBUG_H__DP_AUX_DEBUG_H_MASK 0xffffffff
+#define DP_AUX_DEBUG_H__DP_AUX_DEBUG_H__SHIFT 0x0
+#define DP_AUX_DEBUG_I__DP_AUX_DEBUG_I_MASK 0xffffffff
+#define DP_AUX_DEBUG_I__DP_AUX_DEBUG_I__SHIFT 0x0
+#define DP_AUX_DEBUG_J__DP_AUX_DEBUG_J_MASK 0xffffffff
+#define DP_AUX_DEBUG_J__DP_AUX_DEBUG_J__SHIFT 0x0
+#define DP_AUX_DEBUG_K__DP_AUX_DEBUG_K_MASK 0xffffffff
+#define DP_AUX_DEBUG_K__DP_AUX_DEBUG_K__SHIFT 0x0
+#define DP_AUX_DEBUG_L__DP_AUX_DEBUG_L_MASK 0xffffffff
+#define DP_AUX_DEBUG_L__DP_AUX_DEBUG_L__SHIFT 0x0
+#define DP_AUX_DEBUG_M__DP_AUX_DEBUG_M_MASK 0xffffffff
+#define DP_AUX_DEBUG_M__DP_AUX_DEBUG_M__SHIFT 0x0
+#define DP_AUX_DEBUG_N__DP_AUX_DEBUG_N_MASK 0xffffffff
+#define DP_AUX_DEBUG_N__DP_AUX_DEBUG_N__SHIFT 0x0
+#define DP_AUX_DEBUG_O__DP_AUX_DEBUG_O_MASK 0xffffffff
+#define DP_AUX_DEBUG_O__DP_AUX_DEBUG_O__SHIFT 0x0
+#define DP_AUX_DEBUG_P__DP_AUX_DEBUG_P_MASK 0xffffffff
+#define DP_AUX_DEBUG_P__DP_AUX_DEBUG_P__SHIFT 0x0
+#define DP_AUX_DEBUG_Q__DP_AUX_DEBUG_Q_MASK 0xffffffff
+#define DP_AUX_DEBUG_Q__DP_AUX_DEBUG_Q__SHIFT 0x0
+#define DVO_ENABLE__DVO_ENABLE_MASK 0x1
+#define DVO_ENABLE__DVO_ENABLE__SHIFT 0x0
+#define DVO_ENABLE__DVO_PIXEL_WIDTH_MASK 0x30
+#define DVO_ENABLE__DVO_PIXEL_WIDTH__SHIFT 0x4
+#define DVO_SOURCE_SELECT__DVO_SOURCE_SELECT_MASK 0x7
+#define DVO_SOURCE_SELECT__DVO_SOURCE_SELECT__SHIFT 0x0
+#define DVO_SOURCE_SELECT__DVO_STEREOSYNC_SELECT_MASK 0x70000
+#define DVO_SOURCE_SELECT__DVO_STEREOSYNC_SELECT__SHIFT 0x10
+#define DVO_OUTPUT__DVO_OUTPUT_ENABLE_MODE_MASK 0x3
+#define DVO_OUTPUT__DVO_OUTPUT_ENABLE_MODE__SHIFT 0x0
+#define DVO_OUTPUT__DVO_CLOCK_MODE_MASK 0x100
+#define DVO_OUTPUT__DVO_CLOCK_MODE__SHIFT 0x8
+#define DVO_CONTROL__DVO_RATE_SELECT_MASK 0x1
+#define DVO_CONTROL__DVO_RATE_SELECT__SHIFT 0x0
+#define DVO_CONTROL__DVO_SDRCLK_SEL_MASK 0x2
+#define DVO_CONTROL__DVO_SDRCLK_SEL__SHIFT 0x1
+#define DVO_CONTROL__DVO_DVPDATA_WIDTH_MASK 0x30
+#define DVO_CONTROL__DVO_DVPDATA_WIDTH__SHIFT 0x4
+#define DVO_CONTROL__DVO_DUAL_CHANNEL_EN_MASK 0x100
+#define DVO_CONTROL__DVO_DUAL_CHANNEL_EN__SHIFT 0x8
+#define DVO_CONTROL__DVO_RESET_FIFO_MASK 0x10000
+#define DVO_CONTROL__DVO_RESET_FIFO__SHIFT 0x10
+#define DVO_CONTROL__DVO_SYNC_PHASE_MASK 0x20000
+#define DVO_CONTROL__DVO_SYNC_PHASE__SHIFT 0x11
+#define DVO_CONTROL__DVO_INVERT_DVOCLK_MASK 0x40000
+#define DVO_CONTROL__DVO_INVERT_DVOCLK__SHIFT 0x12
+#define DVO_CONTROL__DVO_HSYNC_POLARITY_MASK 0x100000
+#define DVO_CONTROL__DVO_HSYNC_POLARITY__SHIFT 0x14
+#define DVO_CONTROL__DVO_VSYNC_POLARITY_MASK 0x200000
+#define DVO_CONTROL__DVO_VSYNC_POLARITY__SHIFT 0x15
+#define DVO_CONTROL__DVO_DE_POLARITY_MASK 0x400000
+#define DVO_CONTROL__DVO_DE_POLARITY__SHIFT 0x16
+#define DVO_CONTROL__DVO_COLOR_FORMAT_MASK 0x3000000
+#define DVO_CONTROL__DVO_COLOR_FORMAT__SHIFT 0x18
+#define DVO_CONTROL__DVO_CTL3_MASK 0x80000000
+#define DVO_CONTROL__DVO_CTL3__SHIFT 0x1f
+#define DVO_CRC_EN__DVO_CRC2_EN_MASK 0x10000
+#define DVO_CRC_EN__DVO_CRC2_EN__SHIFT 0x10
+#define DVO_CRC2_SIG_MASK__DVO_CRC2_SIG_MASK_MASK 0x7ffffff
+#define DVO_CRC2_SIG_MASK__DVO_CRC2_SIG_MASK__SHIFT 0x0
+#define DVO_CRC2_SIG_RESULT__DVO_CRC2_SIG_RESULT_MASK 0x7ffffff
+#define DVO_CRC2_SIG_RESULT__DVO_CRC2_SIG_RESULT__SHIFT 0x0
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_LEVEL_ERROR_MASK 0x1
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_USE_OVERWRITE_LEVEL_MASK 0x2
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_OVERWRITE_LEVEL_MASK 0xfc
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_ERROR_ACK_MASK 0x100
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_ERROR_ACK__SHIFT 0x8
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CAL_AVERAGE_LEVEL_MASK 0xfc00
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MAXIMUM_LEVEL_MASK 0xf0000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MINIMUM_LEVEL_MASK 0x3c00000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CALIBRATED_MASK 0x20000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_CALIBRATED__SHIFT 0x1d
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000
+#define DVO_FIFO_ERROR_STATUS__DVO_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DVO_TEST_DEBUG_INDEX__DVO_TEST_DEBUG_INDEX_MASK 0xff
+#define DVO_TEST_DEBUG_INDEX__DVO_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DVO_TEST_DEBUG_INDEX__DVO_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DVO_TEST_DEBUG_INDEX__DVO_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DVO_TEST_DEBUG_DATA__DVO_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DVO_TEST_DEBUG_DATA__DVO_TEST_DEBUG_DATA__SHIFT 0x0
+#define FBC_CNTL__FBC_GRPH_COMP_EN_MASK 0x1
+#define FBC_CNTL__FBC_GRPH_COMP_EN__SHIFT 0x0
+#define FBC_CNTL__FBC_SRC_SEL_MASK 0xe
+#define FBC_CNTL__FBC_SRC_SEL__SHIFT 0x1
+#define FBC_CNTL__FBC_COMP_CLK_GATE_EN_MASK 0x100
+#define FBC_CNTL__FBC_COMP_CLK_GATE_EN__SHIFT 0x8
+#define FBC_CNTL__FBC_DECOMP_CLK_GATE_EN_MASK 0x400
+#define FBC_CNTL__FBC_DECOMP_CLK_GATE_EN__SHIFT 0xa
+#define FBC_CNTL__FBC_COHERENCY_MODE_MASK 0x30000
+#define FBC_CNTL__FBC_COHERENCY_MODE__SHIFT 0x10
+#define FBC_CNTL__FBC_SOFT_COMPRESS_EN_MASK 0x2000000
+#define FBC_CNTL__FBC_SOFT_COMPRESS_EN__SHIFT 0x19
+#define FBC_CNTL__FBC_EN_MASK 0x80000000
+#define FBC_CNTL__FBC_EN__SHIFT 0x1f
+#define FBC_IDLE_FORCE_CLEAR_MASK__FBC_IDLE_FORCE_CLEAR_MASK_MASK 0xffffffff
+#define FBC_IDLE_FORCE_CLEAR_MASK__FBC_IDLE_FORCE_CLEAR_MASK__SHIFT 0x0
+#define FBC_START_STOP_DELAY__FBC_DECOMP_START_DELAY_MASK 0x1f
+#define FBC_START_STOP_DELAY__FBC_DECOMP_START_DELAY__SHIFT 0x0
+#define FBC_START_STOP_DELAY__FBC_DECOMP_STOP_DELAY_MASK 0x80
+#define FBC_START_STOP_DELAY__FBC_DECOMP_STOP_DELAY__SHIFT 0x7
+#define FBC_START_STOP_DELAY__FBC_COMP_START_DELAY_MASK 0x1f00
+#define FBC_START_STOP_DELAY__FBC_COMP_START_DELAY__SHIFT 0x8
+#define FBC_COMP_CNTL__FBC_MIN_COMPRESSION_MASK 0xf
+#define FBC_COMP_CNTL__FBC_MIN_COMPRESSION__SHIFT 0x0
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO08_EN_MASK 0x10000
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO08_EN__SHIFT 0x10
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO16_EN_MASK 0x20000
+#define FBC_COMP_CNTL__FBC_DEPTH_MONO16_EN__SHIFT 0x11
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB04_EN_MASK 0x40000
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB04_EN__SHIFT 0x12
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB08_EN_MASK 0x80000
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB08_EN__SHIFT 0x13
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB16_EN_MASK 0x100000
+#define FBC_COMP_CNTL__FBC_DEPTH_RGB16_EN__SHIFT 0x14
+#define FBC_COMP_MODE__FBC_RLE_EN_MASK 0x1
+#define FBC_COMP_MODE__FBC_RLE_EN__SHIFT 0x0
+#define FBC_COMP_MODE__FBC_DPCM4_RGB_EN_MASK 0x100
+#define FBC_COMP_MODE__FBC_DPCM4_RGB_EN__SHIFT 0x8
+#define FBC_COMP_MODE__FBC_DPCM8_RGB_EN_MASK 0x200
+#define FBC_COMP_MODE__FBC_DPCM8_RGB_EN__SHIFT 0x9
+#define FBC_COMP_MODE__FBC_DPCM4_YUV_EN_MASK 0x400
+#define FBC_COMP_MODE__FBC_DPCM4_YUV_EN__SHIFT 0xa
+#define FBC_COMP_MODE__FBC_DPCM8_YUV_EN_MASK 0x800
+#define FBC_COMP_MODE__FBC_DPCM8_YUV_EN__SHIFT 0xb
+#define FBC_COMP_MODE__FBC_IND_EN_MASK 0x10000
+#define FBC_COMP_MODE__FBC_IND_EN__SHIFT 0x10
+#define FBC_DEBUG0__FBC_PERF_MUX0_MASK 0xff
+#define FBC_DEBUG0__FBC_PERF_MUX0__SHIFT 0x0
+#define FBC_DEBUG0__FBC_PERF_MUX1_MASK 0xff00
+#define FBC_DEBUG0__FBC_PERF_MUX1__SHIFT 0x8
+#define FBC_DEBUG0__FBC_COMP_WAKE_DIS_MASK 0x10000
+#define FBC_DEBUG0__FBC_COMP_WAKE_DIS__SHIFT 0x10
+#define FBC_DEBUG0__FBC_DEBUG0_MASK 0xfe0000
+#define FBC_DEBUG0__FBC_DEBUG0__SHIFT 0x11
+#define FBC_DEBUG0__FBC_DEBUG_MUX_MASK 0xff000000
+#define FBC_DEBUG0__FBC_DEBUG_MUX__SHIFT 0x18
+#define FBC_DEBUG1__FBC_DEBUG1_MASK 0xffffffff
+#define FBC_DEBUG1__FBC_DEBUG1__SHIFT 0x0
+#define FBC_DEBUG2__FBC_DEBUG2_MASK 0xffffffff
+#define FBC_DEBUG2__FBC_DEBUG2__SHIFT 0x0
+#define FBC_IND_LUT0__FBC_IND_LUT0_MASK 0xffffffff
+#define FBC_IND_LUT0__FBC_IND_LUT0__SHIFT 0x0
+#define FBC_IND_LUT1__FBC_IND_LUT1_MASK 0xffffffff
+#define FBC_IND_LUT1__FBC_IND_LUT1__SHIFT 0x0
+#define FBC_IND_LUT2__FBC_IND_LUT2_MASK 0xffffffff
+#define FBC_IND_LUT2__FBC_IND_LUT2__SHIFT 0x0
+#define FBC_IND_LUT3__FBC_IND_LUT3_MASK 0xffffffff
+#define FBC_IND_LUT3__FBC_IND_LUT3__SHIFT 0x0
+#define FBC_IND_LUT4__FBC_IND_LUT4_MASK 0xffffffff
+#define FBC_IND_LUT4__FBC_IND_LUT4__SHIFT 0x0
+#define FBC_IND_LUT5__FBC_IND_LUT5_MASK 0xffffffff
+#define FBC_IND_LUT5__FBC_IND_LUT5__SHIFT 0x0
+#define FBC_IND_LUT6__FBC_IND_LUT6_MASK 0xffffffff
+#define FBC_IND_LUT6__FBC_IND_LUT6__SHIFT 0x0
+#define FBC_IND_LUT7__FBC_IND_LUT7_MASK 0xffffffff
+#define FBC_IND_LUT7__FBC_IND_LUT7__SHIFT 0x0
+#define FBC_IND_LUT8__FBC_IND_LUT8_MASK 0xffffffff
+#define FBC_IND_LUT8__FBC_IND_LUT8__SHIFT 0x0
+#define FBC_IND_LUT9__FBC_IND_LUT9_MASK 0xffffffff
+#define FBC_IND_LUT9__FBC_IND_LUT9__SHIFT 0x0
+#define FBC_IND_LUT10__FBC_IND_LUT10_MASK 0xffffffff
+#define FBC_IND_LUT10__FBC_IND_LUT10__SHIFT 0x0
+#define FBC_IND_LUT11__FBC_IND_LUT11_MASK 0xffffffff
+#define FBC_IND_LUT11__FBC_IND_LUT11__SHIFT 0x0
+#define FBC_IND_LUT12__FBC_IND_LUT12_MASK 0xffffffff
+#define FBC_IND_LUT12__FBC_IND_LUT12__SHIFT 0x0
+#define FBC_IND_LUT13__FBC_IND_LUT13_MASK 0xffffffff
+#define FBC_IND_LUT13__FBC_IND_LUT13__SHIFT 0x0
+#define FBC_IND_LUT14__FBC_IND_LUT14_MASK 0xffffffff
+#define FBC_IND_LUT14__FBC_IND_LUT14__SHIFT 0x0
+#define FBC_IND_LUT15__FBC_IND_LUT15_MASK 0xffffffff
+#define FBC_IND_LUT15__FBC_IND_LUT15__SHIFT 0x0
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_0_MASK 0xfff
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_0__SHIFT 0x0
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_1_MASK 0xfff0000
+#define FBC_CSM_REGION_OFFSET_01__FBC_CSM_REGION_OFFSET_1__SHIFT 0x10
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_2_MASK 0xfff
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_2__SHIFT 0x0
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_3_MASK 0xfff0000
+#define FBC_CSM_REGION_OFFSET_23__FBC_CSM_REGION_OFFSET_3__SHIFT 0x10
+#define FBC_CLIENT_REGION_MASK__FBC_MEMORY_REGION_MASK_MASK 0xf0000
+#define FBC_CLIENT_REGION_MASK__FBC_MEMORY_REGION_MASK__SHIFT 0x10
+#define FBC_DEBUG_COMP__FBC_COMP_SWAP_MASK 0x3
+#define FBC_DEBUG_COMP__FBC_COMP_SWAP__SHIFT 0x0
+#define FBC_DEBUG_COMP__FBC_COMP_RSIZE_MASK 0x8
+#define FBC_DEBUG_COMP__FBC_COMP_RSIZE__SHIFT 0x3
+#define FBC_DEBUG_COMP__FBC_COMP_BUSY_HYSTERESIS_MASK 0xf0
+#define FBC_DEBUG_COMP__FBC_COMP_BUSY_HYSTERESIS__SHIFT 0x4
+#define FBC_DEBUG_COMP__FBC_COMP_CLK_CNTL_MASK 0x300
+#define FBC_DEBUG_COMP__FBC_COMP_CLK_CNTL__SHIFT 0x8
+#define FBC_DEBUG_COMP__FBC_COMP_PRIVILEGED_ACCESS_ENABLE_MASK 0x400
+#define FBC_DEBUG_COMP__FBC_COMP_PRIVILEGED_ACCESS_ENABLE__SHIFT 0xa
+#define FBC_DEBUG_COMP__FBC_COMP_ADDRESS_TRANSLATION_ENABLE_MASK 0x800
+#define FBC_DEBUG_COMP__FBC_COMP_ADDRESS_TRANSLATION_ENABLE__SHIFT 0xb
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_ADDR_MASK 0xfff
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_ADDR__SHIFT 0x0
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_WR_DATA_MASK 0x10000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_WR_DATA__SHIFT 0x10
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_RD_DATA_MASK 0x20000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_RD_DATA__SHIFT 0x11
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_EN_MASK 0x80000000
+#define FBC_DEBUG_CSR__FBC_DEBUG_CSR_EN__SHIFT 0x1f
+#define FBC_DEBUG_CSR_RDATA__FBC_DEBUG_CSR_RDATA_MASK 0xffffffff
+#define FBC_DEBUG_CSR_RDATA__FBC_DEBUG_CSR_RDATA__SHIFT 0x0
+#define FBC_DEBUG_CSR_WDATA__FBC_DEBUG_CSR_WDATA_MASK 0xffffffff
+#define FBC_DEBUG_CSR_WDATA__FBC_DEBUG_CSR_WDATA__SHIFT 0x0
+#define FBC_DEBUG_CSR_RDATA_HI__FBC_DEBUG_CSR_RDATA_HI_MASK 0xff
+#define FBC_DEBUG_CSR_RDATA_HI__FBC_DEBUG_CSR_RDATA_HI__SHIFT 0x0
+#define FBC_DEBUG_CSR_WDATA_HI__FBC_DEBUG_CSR_WDATA_HI_MASK 0xff
+#define FBC_DEBUG_CSR_WDATA_HI__FBC_DEBUG_CSR_WDATA_HI__SHIFT 0x0
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_MASK 0x3
+#define FBC_MISC__FBC_DECOMPRESS_ERROR__SHIFT 0x0
+#define FBC_MISC__FBC_STOP_ON_ERROR_MASK 0x4
+#define FBC_MISC__FBC_STOP_ON_ERROR__SHIFT 0x2
+#define FBC_MISC__FBC_INVALIDATE_ON_ERROR_MASK 0x8
+#define FBC_MISC__FBC_INVALIDATE_ON_ERROR__SHIFT 0x3
+#define FBC_MISC__FBC_ERROR_PIXEL_MASK 0xf0
+#define FBC_MISC__FBC_ERROR_PIXEL__SHIFT 0x4
+#define FBC_MISC__FBC_DIVIDE_X_MASK 0x300
+#define FBC_MISC__FBC_DIVIDE_X__SHIFT 0x8
+#define FBC_MISC__FBC_DIVIDE_Y_MASK 0x400
+#define FBC_MISC__FBC_DIVIDE_Y__SHIFT 0xa
+#define FBC_MISC__FBC_RSM_WRITE_VALUE_MASK 0x800
+#define FBC_MISC__FBC_RSM_WRITE_VALUE__SHIFT 0xb
+#define FBC_MISC__FBC_RSM_UNCOMP_DATA_IMMEDIATELY_MASK 0x1000
+#define FBC_MISC__FBC_RSM_UNCOMP_DATA_IMMEDIATELY__SHIFT 0xc
+#define FBC_MISC__FBC_STOP_ON_HFLIP_EVENT_MASK 0x2000
+#define FBC_MISC__FBC_STOP_ON_HFLIP_EVENT__SHIFT 0xd
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_CLEAR_MASK 0x10000
+#define FBC_MISC__FBC_DECOMPRESS_ERROR_CLEAR__SHIFT 0x10
+#define FBC_MISC__FBC_RESET_AT_ENABLE_MASK 0x100000
+#define FBC_MISC__FBC_RESET_AT_ENABLE__SHIFT 0x14
+#define FBC_MISC__FBC_RESET_AT_DISABLE_MASK 0x200000
+#define FBC_MISC__FBC_RESET_AT_DISABLE__SHIFT 0x15
+#define FBC_MISC__FBC_SLOW_REQ_INTERVAL_MASK 0x1f000000
+#define FBC_MISC__FBC_SLOW_REQ_INTERVAL__SHIFT 0x18
+#define FBC_MISC__FBC_FORCE_DECOMPRESSOR_EN_MASK 0x80000000
+#define FBC_MISC__FBC_FORCE_DECOMPRESSOR_EN__SHIFT 0x1f
+#define FBC_STATUS__FBC_ENABLE_STATUS_MASK 0x1
+#define FBC_STATUS__FBC_ENABLE_STATUS__SHIFT 0x0
+#define FBC_ALPHA_CNTL__FBC_ALPHA_COMP_EN_MASK 0x1
+#define FBC_ALPHA_CNTL__FBC_ALPHA_COMP_EN__SHIFT 0x0
+#define FBC_ALPHA_CNTL__FBC_FORCE_COPY_TO_COMP_BUF_MASK 0x10
+#define FBC_ALPHA_CNTL__FBC_FORCE_COPY_TO_COMP_BUF__SHIFT 0x4
+#define FBC_ALPHA_CNTL__FBC_ZERO_ALPHA_CHUNK_SKIP_EN_MASK 0x100
+#define FBC_ALPHA_CNTL__FBC_ZERO_ALPHA_CHUNK_SKIP_EN__SHIFT 0x8
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_R_VAL_MASK 0xff
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_R_VAL__SHIFT 0x0
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_G_VAL_MASK 0xff000
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_G_VAL__SHIFT 0xc
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_B_VAL_MASK 0xff000000
+#define FBC_ALPHA_RGB_OVERRIDE__FBC_ZERO_ALPHA_B_VAL__SHIFT 0x18
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_INDEX_MASK 0xff
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define FBC_TEST_DEBUG_INDEX__FBC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define FBC_TEST_DEBUG_DATA__FBC_TEST_DEBUG_DATA_MASK 0xffffffff
+#define FBC_TEST_DEBUG_DATA__FBC_TEST_DEBUG_DATA__SHIFT 0x0
+#define FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0xffff
+#define FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xffff0000
+#define FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0xffff
+#define FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xffff0000
+#define FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0xffff
+#define FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xffff0000
+#define FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x1
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x10
+#define FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x1
+#define FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT_CONTROL__FMT_STEREOSYNC_OVR_POL_MASK 0x10
+#define FMT_CONTROL__FMT_STEREOSYNC_OVR_POL__SHIFT 0x4
+#define FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0xf00
+#define FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x3000
+#define FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x30000
+#define FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0xc0000
+#define FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x100000
+#define FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x200000
+#define FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT_CONTROL__FMT_SRC_SELECT_MASK 0x7000000
+#define FMT_CONTROL__FMT_SRC_SELECT__SHIFT 0x18
+#define FMT_CONTROL__FMT_420_PIXEL_PHASE_LOCKED_MASK 0x40000000
+#define FMT_CONTROL__FMT_420_PIXEL_PHASE_LOCKED__SHIFT 0x1e
+#define FMT_CONTROL__FMT_420_PIXEL_PHASE_LOCKED_CLEAR_MASK 0x80000000
+#define FMT_CONTROL__FMT_420_PIXEL_PHASE_LOCKED_CLEAR__SHIFT 0x1f
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x1
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x2
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x30
+#define FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x100
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x600
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x1800
+#define FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x2000
+#define FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x4000
+#define FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x8000
+#define FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x10000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x60000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x600000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x1000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x2000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0xc000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xc0000000
+#define FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0xff
+#define FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xffff0000
+#define FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0xff
+#define FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xffff0000
+#define FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0xff
+#define FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xffff0000
+#define FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x1
+#define FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x70000
+#define FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT_CRC_CNTL__FMT_CRC_EN_MASK 0x1
+#define FMT_CRC_CNTL__FMT_CRC_EN__SHIFT 0x0
+#define FMT_CRC_CNTL__FMT_DTMTEST_CRC_EN_MASK 0x2
+#define FMT_CRC_CNTL__FMT_DTMTEST_CRC_EN__SHIFT 0x1
+#define FMT_CRC_CNTL__FMT_CRC_CONT_EN_MASK 0x10
+#define FMT_CRC_CNTL__FMT_CRC_CONT_EN__SHIFT 0x4
+#define FMT_CRC_CNTL__FMT_ONE_SHOT_CRC_PENDING_MASK 0x20
+#define FMT_CRC_CNTL__FMT_ONE_SHOT_CRC_PENDING__SHIFT 0x5
+#define FMT_CRC_CNTL__FMT_CRC_INCLUDE_OVERSCAN_MASK 0x40
+#define FMT_CRC_CNTL__FMT_CRC_INCLUDE_OVERSCAN__SHIFT 0x6
+#define FMT_CRC_CNTL__FMT_CRC_ONLY_BLANKB_MASK 0x100
+#define FMT_CRC_CNTL__FMT_CRC_ONLY_BLANKB__SHIFT 0x8
+#define FMT_CRC_CNTL__FMT_CRC_PSR_MODE_ENABLE_MASK 0x200
+#define FMT_CRC_CNTL__FMT_CRC_PSR_MODE_ENABLE__SHIFT 0x9
+#define FMT_CRC_CNTL__FMT_CRC_INTERLACE_MODE_MASK 0x3000
+#define FMT_CRC_CNTL__FMT_CRC_INTERLACE_MODE__SHIFT 0xc
+#define FMT_CRC_CNTL__FMT_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x10000
+#define FMT_CRC_CNTL__FMT_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x10
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_ENABLE_MASK 0x100000
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_ENABLE__SHIFT 0x14
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_SELECT_MASK 0x1000000
+#define FMT_CRC_CNTL__FMT_CRC_EVEN_ODD_PIX_SELECT__SHIFT 0x18
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_RED_MASK_MASK 0xffff
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_RED_MASK__SHIFT 0x0
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_GREEN_MASK_MASK 0xffff0000
+#define FMT_CRC_SIG_RED_GREEN_MASK__FMT_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_BLUE_MASK_MASK 0xffff
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_CONTROL_MASK_MASK 0xffff0000
+#define FMT_CRC_SIG_BLUE_CONTROL_MASK__FMT_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_RED_MASK 0xffff
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_RED__SHIFT 0x0
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_GREEN_MASK 0xffff0000
+#define FMT_CRC_SIG_RED_GREEN__FMT_CRC_SIG_GREEN__SHIFT 0x10
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_BLUE_MASK 0xffff
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_BLUE__SHIFT 0x0
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_CONTROL_MASK 0xffff0000
+#define FMT_CRC_SIG_BLUE_CONTROL__FMT_CRC_SIG_CONTROL__SHIFT 0x10
+#define FMT_DEBUG_CNTL__FMT_DEBUG_COLOR_SELECT_MASK 0x3
+#define FMT_DEBUG_CNTL__FMT_DEBUG_COLOR_SELECT__SHIFT 0x0
+#define FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x1fff
+#define FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT_420_HBLANK_EARLY_START__FMT_420_HBLANK_EARLY_START_MASK 0xfff
+#define FMT_420_HBLANK_EARLY_START__FMT_420_HBLANK_EARLY_START__SHIFT 0x0
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_INDEX_MASK 0xff
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_INDEX__SHIFT 0x0
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define FMT_TEST_DEBUG_INDEX__FMT_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define FMT_TEST_DEBUG_DATA__FMT_TEST_DEBUG_DATA_MASK 0xffffffff
+#define FMT_TEST_DEBUG_DATA__FMT_TEST_DEBUG_DATA__SHIFT 0x0
+#define FMT_DEBUG0__FMT_DEBUG0_MASK 0xffffffff
+#define FMT_DEBUG0__FMT_DEBUG0__SHIFT 0x0
+#define FMT_DEBUG1__FMT_DEBUG1_MASK 0xffffffff
+#define FMT_DEBUG1__FMT_DEBUG1__SHIFT 0x0
+#define FMT_DEBUG2__FMT_DEBUG2_MASK 0xffffffff
+#define FMT_DEBUG2__FMT_DEBUG2__SHIFT 0x0
+#define FMT_DEBUG3__FMT_DEBUG3_MASK 0xffffffff
+#define FMT_DEBUG3__FMT_DEBUG3__SHIFT 0x0
+#define FMT_DEBUG_ID__FMT_DEBUG_ID_MASK 0xffffffff
+#define FMT_DEBUG_ID__FMT_DEBUG_ID__SHIFT 0x0
+#define LB_DATA_FORMAT__PIXEL_DEPTH_MASK 0x3
+#define LB_DATA_FORMAT__PIXEL_DEPTH__SHIFT 0x0
+#define LB_DATA_FORMAT__PIXEL_EXPAN_MODE_MASK 0x4
+#define LB_DATA_FORMAT__PIXEL_EXPAN_MODE__SHIFT 0x2
+#define LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x8
+#define LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x3
+#define LB_DATA_FORMAT__PIXEL_REDUCE_MODE_MASK 0x10
+#define LB_DATA_FORMAT__PIXEL_REDUCE_MODE__SHIFT 0x4
+#define LB_DATA_FORMAT__DYNAMIC_PIXEL_DEPTH_MASK 0x20
+#define LB_DATA_FORMAT__DYNAMIC_PIXEL_DEPTH__SHIFT 0x5
+#define LB_DATA_FORMAT__PREFILL_EN_MASK 0x100
+#define LB_DATA_FORMAT__PREFILL_EN__SHIFT 0x8
+#define LB_DATA_FORMAT__PREFETCH_MASK 0x1000
+#define LB_DATA_FORMAT__PREFETCH__SHIFT 0xc
+#define LB_DATA_FORMAT__REQUEST_MODE_MASK 0x1000000
+#define LB_DATA_FORMAT__REQUEST_MODE__SHIFT 0x18
+#define LB_DATA_FORMAT__ALPHA_EN_MASK 0x80000000
+#define LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x1f
+#define LB_MEMORY_CTRL__LB_MEMORY_SIZE_MASK 0x1fff
+#define LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT 0x0
+#define LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0xf0000
+#define LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define LB_MEMORY_CTRL__LB_MEMORY_CONFIG_MASK 0x300000
+#define LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT 0x14
+#define LB_MEMORY_SIZE_STATUS__LB_MEMORY_SIZE_STATUS_MASK 0x1fff
+#define LB_MEMORY_SIZE_STATUS__LB_MEMORY_SIZE_STATUS__SHIFT 0x0
+#define LB_DESKTOP_HEIGHT__DESKTOP_HEIGHT_MASK 0x7fff
+#define LB_DESKTOP_HEIGHT__DESKTOP_HEIGHT__SHIFT 0x0
+#define LB_VLINE_START_END__VLINE_START_MASK 0x3fff
+#define LB_VLINE_START_END__VLINE_START__SHIFT 0x0
+#define LB_VLINE_START_END__VLINE_END_MASK 0x7fff0000
+#define LB_VLINE_START_END__VLINE_END__SHIFT 0x10
+#define LB_VLINE_START_END__VLINE_INV_MASK 0x80000000
+#define LB_VLINE_START_END__VLINE_INV__SHIFT 0x1f
+#define LB_VLINE2_START_END__VLINE2_START_MASK 0x3fff
+#define LB_VLINE2_START_END__VLINE2_START__SHIFT 0x0
+#define LB_VLINE2_START_END__VLINE2_END_MASK 0x7fff0000
+#define LB_VLINE2_START_END__VLINE2_END__SHIFT 0x10
+#define LB_VLINE2_START_END__VLINE2_INV_MASK 0x80000000
+#define LB_VLINE2_START_END__VLINE2_INV__SHIFT 0x1f
+#define LB_V_COUNTER__V_COUNTER_MASK 0x7fff
+#define LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define LB_SNAPSHOT_V_COUNTER__SNAPSHOT_V_COUNTER_MASK 0x7fff
+#define LB_SNAPSHOT_V_COUNTER__SNAPSHOT_V_COUNTER__SHIFT 0x0
+#define LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK 0x1
+#define LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK__SHIFT 0x0
+#define LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK 0x10
+#define LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK__SHIFT 0x4
+#define LB_INTERRUPT_MASK__VLINE2_INTERRUPT_MASK_MASK 0x100
+#define LB_INTERRUPT_MASK__VLINE2_INTERRUPT_MASK__SHIFT 0x8
+#define LB_VLINE_STATUS__VLINE_OCCURRED_MASK 0x1
+#define LB_VLINE_STATUS__VLINE_OCCURRED__SHIFT 0x0
+#define LB_VLINE_STATUS__VLINE_ACK_MASK 0x10
+#define LB_VLINE_STATUS__VLINE_ACK__SHIFT 0x4
+#define LB_VLINE_STATUS__VLINE_STAT_MASK 0x1000
+#define LB_VLINE_STATUS__VLINE_STAT__SHIFT 0xc
+#define LB_VLINE_STATUS__VLINE_INTERRUPT_MASK 0x10000
+#define LB_VLINE_STATUS__VLINE_INTERRUPT__SHIFT 0x10
+#define LB_VLINE_STATUS__VLINE_INTERRUPT_TYPE_MASK 0x20000
+#define LB_VLINE_STATUS__VLINE_INTERRUPT_TYPE__SHIFT 0x11
+#define LB_VLINE2_STATUS__VLINE2_OCCURRED_MASK 0x1
+#define LB_VLINE2_STATUS__VLINE2_OCCURRED__SHIFT 0x0
+#define LB_VLINE2_STATUS__VLINE2_ACK_MASK 0x10
+#define LB_VLINE2_STATUS__VLINE2_ACK__SHIFT 0x4
+#define LB_VLINE2_STATUS__VLINE2_STAT_MASK 0x1000
+#define LB_VLINE2_STATUS__VLINE2_STAT__SHIFT 0xc
+#define LB_VLINE2_STATUS__VLINE2_INTERRUPT_MASK 0x10000
+#define LB_VLINE2_STATUS__VLINE2_INTERRUPT__SHIFT 0x10
+#define LB_VLINE2_STATUS__VLINE2_INTERRUPT_TYPE_MASK 0x20000
+#define LB_VLINE2_STATUS__VLINE2_INTERRUPT_TYPE__SHIFT 0x11
+#define LB_VBLANK_STATUS__VBLANK_OCCURRED_MASK 0x1
+#define LB_VBLANK_STATUS__VBLANK_OCCURRED__SHIFT 0x0
+#define LB_VBLANK_STATUS__VBLANK_ACK_MASK 0x10
+#define LB_VBLANK_STATUS__VBLANK_ACK__SHIFT 0x4
+#define LB_VBLANK_STATUS__VBLANK_STAT_MASK 0x1000
+#define LB_VBLANK_STATUS__VBLANK_STAT__SHIFT 0xc
+#define LB_VBLANK_STATUS__VBLANK_INTERRUPT_MASK 0x10000
+#define LB_VBLANK_STATUS__VBLANK_INTERRUPT__SHIFT 0x10
+#define LB_VBLANK_STATUS__VBLANK_INTERRUPT_TYPE_MASK 0x20000
+#define LB_VBLANK_STATUS__VBLANK_INTERRUPT_TYPE__SHIFT 0x11
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL_MASK 0x3
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL__SHIFT 0x0
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2_MASK 0x10
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2__SHIFT 0x4
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_DELAY_MASK 0xff00
+#define LB_SYNC_RESET_SEL__LB_SYNC_RESET_DELAY__SHIFT 0x8
+#define LB_SYNC_RESET_SEL__LB_SYNC_DURATION_MASK 0xc00000
+#define LB_SYNC_RESET_SEL__LB_SYNC_DURATION__SHIFT 0x16
+#define LB_BLACK_KEYER_R_CR__LB_BLACK_KEYER_R_CR_MASK 0xfff0
+#define LB_BLACK_KEYER_R_CR__LB_BLACK_KEYER_R_CR__SHIFT 0x4
+#define LB_BLACK_KEYER_G_Y__LB_BLACK_KEYER_G_Y_MASK 0xfff0
+#define LB_BLACK_KEYER_G_Y__LB_BLACK_KEYER_G_Y__SHIFT 0x4
+#define LB_BLACK_KEYER_B_CB__LB_BLACK_KEYER_B_CB_MASK 0xfff0
+#define LB_BLACK_KEYER_B_CB__LB_BLACK_KEYER_B_CB__SHIFT 0x4
+#define LB_KEYER_COLOR_CTRL__LB_KEYER_COLOR_EN_MASK 0x1
+#define LB_KEYER_COLOR_CTRL__LB_KEYER_COLOR_EN__SHIFT 0x0
+#define LB_KEYER_COLOR_CTRL__LB_KEYER_COLOR_REP_EN_MASK 0x100
+#define LB_KEYER_COLOR_CTRL__LB_KEYER_COLOR_REP_EN__SHIFT 0x8
+#define LB_KEYER_COLOR_R_CR__LB_KEYER_COLOR_R_CR_MASK 0xfff0
+#define LB_KEYER_COLOR_R_CR__LB_KEYER_COLOR_R_CR__SHIFT 0x4
+#define LB_KEYER_COLOR_G_Y__LB_KEYER_COLOR_G_Y_MASK 0xfff0
+#define LB_KEYER_COLOR_G_Y__LB_KEYER_COLOR_G_Y__SHIFT 0x4
+#define LB_KEYER_COLOR_B_CB__LB_KEYER_COLOR_B_CB_MASK 0xfff0
+#define LB_KEYER_COLOR_B_CB__LB_KEYER_COLOR_B_CB__SHIFT 0x4
+#define LB_KEYER_COLOR_REP_R_CR__LB_KEYER_COLOR_REP_R_CR_MASK 0xfff0
+#define LB_KEYER_COLOR_REP_R_CR__LB_KEYER_COLOR_REP_R_CR__SHIFT 0x4
+#define LB_KEYER_COLOR_REP_G_Y__LB_KEYER_COLOR_REP_G_Y_MASK 0xfff0
+#define LB_KEYER_COLOR_REP_G_Y__LB_KEYER_COLOR_REP_G_Y__SHIFT 0x4
+#define LB_KEYER_COLOR_REP_B_CB__LB_KEYER_COLOR_REP_B_CB_MASK 0xfff0
+#define LB_KEYER_COLOR_REP_B_CB__LB_KEYER_COLOR_REP_B_CB__SHIFT 0x4
+#define LB_BUFFER_LEVEL_STATUS__REQ_FIFO_LEVEL_MASK 0x3f
+#define LB_BUFFER_LEVEL_STATUS__REQ_FIFO_LEVEL__SHIFT 0x0
+#define LB_BUFFER_LEVEL_STATUS__REQ_FIFO_FULL_CNTL_MASK 0xfc00
+#define LB_BUFFER_LEVEL_STATUS__REQ_FIFO_FULL_CNTL__SHIFT 0xa
+#define LB_BUFFER_LEVEL_STATUS__DATA_BUFFER_LEVEL_MASK 0xfff0000
+#define LB_BUFFER_LEVEL_STATUS__DATA_BUFFER_LEVEL__SHIFT 0x10
+#define LB_BUFFER_LEVEL_STATUS__DATA_FIFO_FULL_CNTL_MASK 0xf0000000
+#define LB_BUFFER_LEVEL_STATUS__DATA_FIFO_FULL_CNTL__SHIFT 0x1c
+#define LB_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_ON_MASK 0xfff
+#define LB_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_ON__SHIFT 0x0
+#define LB_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_OFF_MASK 0xfff0000
+#define LB_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_OFF__SHIFT 0x10
+#define LB_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_LEVEL_MASK 0xfff
+#define LB_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_LEVEL__SHIFT 0x0
+#define LB_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_STAT_MASK 0x10000
+#define LB_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_STAT__SHIFT 0x10
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_MARGIN_MASK 0xf
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_MARGIN__SHIFT 0x0
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_STAT_MASK 0x10
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_STAT__SHIFT 0x4
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_OCCURRED_MASK 0x100
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_OCCURRED__SHIFT 0x8
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_ACK_MASK 0x1000
+#define LB_BUFFER_STATUS__LB_BUFFER_EMPTY_ACK__SHIFT 0xc
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_STAT_MASK 0x10000
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_STAT__SHIFT 0x10
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_OCCURRED_MASK 0x100000
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_OCCURRED__SHIFT 0x14
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_ACK_MASK 0x1000000
+#define LB_BUFFER_STATUS__LB_BUFFER_FULL_ACK__SHIFT 0x18
+#define LB_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT_MASK 0x1
+#define LB_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT__SHIFT 0x0
+#define MVP_AFR_FLIP_MODE__MVP_AFR_FLIP_MODE_MASK 0x3
+#define MVP_AFR_FLIP_MODE__MVP_AFR_FLIP_MODE__SHIFT 0x0
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_NUM_ENTRIES_MASK 0xf
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_NUM_ENTRIES__SHIFT 0x0
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_MASK 0x10
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET__SHIFT 0x4
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_FLAG_MASK 0x100
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_FLAG__SHIFT 0x8
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_ACK_MASK 0x1000
+#define MVP_AFR_FLIP_FIFO_CNTL__MVP_AFR_FLIP_FIFO_RESET_ACK__SHIFT 0xc
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MODE_MASK 0x3
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MODE__SHIFT 0x0
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT_MASK 0x7fff00
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_INSERT__SHIFT 0x8
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_OFFSET_MASK 0x3f000000
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_LINE_NUM_OFFSET__SHIFT 0x18
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_AUTO_ENABLE_MASK 0x40000000
+#define MVP_FLIP_LINE_NUM_INSERT__MVP_FLIP_AUTO_ENABLE__SHIFT 0x1e
+#define DC_MVP_LB_CONTROL__MVP_SWAP_LOCK_IN_MODE_MASK 0x3
+#define DC_MVP_LB_CONTROL__MVP_SWAP_LOCK_IN_MODE__SHIFT 0x0
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_SEL_MASK 0x100
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_SEL__SHIFT 0x8
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ONE_MASK 0x1000
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ONE__SHIFT 0xc
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO_MASK 0x10000
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_OUT_FORCE_ZERO__SHIFT 0x10
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_STATUS_MASK 0x100000
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_STATUS__SHIFT 0x14
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_IN_CAP_MASK 0x10000000
+#define DC_MVP_LB_CONTROL__DC_MVP_SWAP_LOCK_IN_CAP__SHIFT 0x1c
+#define DC_MVP_LB_CONTROL__DC_MVP_SPARE_FLOPS_MASK 0x80000000
+#define DC_MVP_LB_CONTROL__DC_MVP_SPARE_FLOPS__SHIFT 0x1f
+#define LB_DEBUG__LB_DEBUG_MASK 0xffffffff
+#define LB_DEBUG__LB_DEBUG__SHIFT 0x0
+#define LB_DEBUG2__LB_DEBUG2_MASK 0xffffffff
+#define LB_DEBUG2__LB_DEBUG2__SHIFT 0x0
+#define LB_DEBUG3__LB_DEBUG3_MASK 0xffffffff
+#define LB_DEBUG3__LB_DEBUG3__SHIFT 0x0
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX_MASK 0xff
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define LB_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define LB_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA_MASK 0xffffffff
+#define LB_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA__SHIFT 0x0
+#define LBV_DATA_FORMAT__PIXEL_DEPTH_MASK 0x3
+#define LBV_DATA_FORMAT__PIXEL_DEPTH__SHIFT 0x0
+#define LBV_DATA_FORMAT__PIXEL_EXPAN_MODE_MASK 0x4
+#define LBV_DATA_FORMAT__PIXEL_EXPAN_MODE__SHIFT 0x2
+#define LBV_DATA_FORMAT__INTERLEAVE_EN_MASK 0x8
+#define LBV_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x3
+#define LBV_DATA_FORMAT__PIXEL_REDUCE_MODE_MASK 0x10
+#define LBV_DATA_FORMAT__PIXEL_REDUCE_MODE__SHIFT 0x4
+#define LBV_DATA_FORMAT__DYNAMIC_PIXEL_DEPTH_MASK 0x20
+#define LBV_DATA_FORMAT__DYNAMIC_PIXEL_DEPTH__SHIFT 0x5
+#define LBV_DATA_FORMAT__DITHER_EN_MASK 0x40
+#define LBV_DATA_FORMAT__DITHER_EN__SHIFT 0x6
+#define LBV_DATA_FORMAT__DOWNSCALE_PREFETCH_EN_MASK 0x80
+#define LBV_DATA_FORMAT__DOWNSCALE_PREFETCH_EN__SHIFT 0x7
+#define LBV_DATA_FORMAT__PREFETCH_MASK 0x1000
+#define LBV_DATA_FORMAT__PREFETCH__SHIFT 0xc
+#define LBV_DATA_FORMAT__REQUEST_MODE_MASK 0x1000000
+#define LBV_DATA_FORMAT__REQUEST_MODE__SHIFT 0x18
+#define LBV_DATA_FORMAT__ALPHA_EN_MASK 0x80000000
+#define LBV_DATA_FORMAT__ALPHA_EN__SHIFT 0x1f
+#define LBV_MEMORY_CTRL__LB_MEMORY_SIZE_MASK 0xfff
+#define LBV_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT 0x0
+#define LBV_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0xf0000
+#define LBV_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define LBV_MEMORY_CTRL__LB_MEMORY_CONFIG_MASK 0x300000
+#define LBV_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT 0x14
+#define LBV_MEMORY_SIZE_STATUS__LB_MEMORY_SIZE_STATUS_MASK 0xfff
+#define LBV_MEMORY_SIZE_STATUS__LB_MEMORY_SIZE_STATUS__SHIFT 0x0
+#define LBV_DESKTOP_HEIGHT__DESKTOP_HEIGHT_MASK 0x7fff
+#define LBV_DESKTOP_HEIGHT__DESKTOP_HEIGHT__SHIFT 0x0
+#define LBV_VLINE_START_END__VLINE_START_MASK 0x3fff
+#define LBV_VLINE_START_END__VLINE_START__SHIFT 0x0
+#define LBV_VLINE_START_END__VLINE_END_MASK 0x7fff0000
+#define LBV_VLINE_START_END__VLINE_END__SHIFT 0x10
+#define LBV_VLINE_START_END__VLINE_INV_MASK 0x80000000
+#define LBV_VLINE_START_END__VLINE_INV__SHIFT 0x1f
+#define LBV_VLINE2_START_END__VLINE2_START_MASK 0x3fff
+#define LBV_VLINE2_START_END__VLINE2_START__SHIFT 0x0
+#define LBV_VLINE2_START_END__VLINE2_END_MASK 0x7fff0000
+#define LBV_VLINE2_START_END__VLINE2_END__SHIFT 0x10
+#define LBV_VLINE2_START_END__VLINE2_INV_MASK 0x80000000
+#define LBV_VLINE2_START_END__VLINE2_INV__SHIFT 0x1f
+#define LBV_V_COUNTER__V_COUNTER_MASK 0x7fff
+#define LBV_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define LBV_SNAPSHOT_V_COUNTER__SNAPSHOT_V_COUNTER_MASK 0x7fff
+#define LBV_SNAPSHOT_V_COUNTER__SNAPSHOT_V_COUNTER__SHIFT 0x0
+#define LBV_V_COUNTER_CHROMA__V_COUNTER_CHROMA_MASK 0x7fff
+#define LBV_V_COUNTER_CHROMA__V_COUNTER_CHROMA__SHIFT 0x0
+#define LBV_SNAPSHOT_V_COUNTER_CHROMA__SNAPSHOT_V_COUNTER_CHROMA_MASK 0x7fff
+#define LBV_SNAPSHOT_V_COUNTER_CHROMA__SNAPSHOT_V_COUNTER_CHROMA__SHIFT 0x0
+#define LBV_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK 0x1
+#define LBV_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK__SHIFT 0x0
+#define LBV_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK 0x10
+#define LBV_INTERRUPT_MASK__VLINE_INTERRUPT_MASK__SHIFT 0x4
+#define LBV_INTERRUPT_MASK__VLINE2_INTERRUPT_MASK_MASK 0x100
+#define LBV_INTERRUPT_MASK__VLINE2_INTERRUPT_MASK__SHIFT 0x8
+#define LBV_VLINE_STATUS__VLINE_OCCURRED_MASK 0x1
+#define LBV_VLINE_STATUS__VLINE_OCCURRED__SHIFT 0x0
+#define LBV_VLINE_STATUS__VLINE_ACK_MASK 0x10
+#define LBV_VLINE_STATUS__VLINE_ACK__SHIFT 0x4
+#define LBV_VLINE_STATUS__VLINE_STAT_MASK 0x1000
+#define LBV_VLINE_STATUS__VLINE_STAT__SHIFT 0xc
+#define LBV_VLINE_STATUS__VLINE_INTERRUPT_MASK 0x10000
+#define LBV_VLINE_STATUS__VLINE_INTERRUPT__SHIFT 0x10
+#define LBV_VLINE_STATUS__VLINE_INTERRUPT_TYPE_MASK 0x20000
+#define LBV_VLINE_STATUS__VLINE_INTERRUPT_TYPE__SHIFT 0x11
+#define LBV_VLINE2_STATUS__VLINE2_OCCURRED_MASK 0x1
+#define LBV_VLINE2_STATUS__VLINE2_OCCURRED__SHIFT 0x0
+#define LBV_VLINE2_STATUS__VLINE2_ACK_MASK 0x10
+#define LBV_VLINE2_STATUS__VLINE2_ACK__SHIFT 0x4
+#define LBV_VLINE2_STATUS__VLINE2_STAT_MASK 0x1000
+#define LBV_VLINE2_STATUS__VLINE2_STAT__SHIFT 0xc
+#define LBV_VLINE2_STATUS__VLINE2_INTERRUPT_MASK 0x10000
+#define LBV_VLINE2_STATUS__VLINE2_INTERRUPT__SHIFT 0x10
+#define LBV_VLINE2_STATUS__VLINE2_INTERRUPT_TYPE_MASK 0x20000
+#define LBV_VLINE2_STATUS__VLINE2_INTERRUPT_TYPE__SHIFT 0x11
+#define LBV_VBLANK_STATUS__VBLANK_OCCURRED_MASK 0x1
+#define LBV_VBLANK_STATUS__VBLANK_OCCURRED__SHIFT 0x0
+#define LBV_VBLANK_STATUS__VBLANK_ACK_MASK 0x10
+#define LBV_VBLANK_STATUS__VBLANK_ACK__SHIFT 0x4
+#define LBV_VBLANK_STATUS__VBLANK_STAT_MASK 0x1000
+#define LBV_VBLANK_STATUS__VBLANK_STAT__SHIFT 0xc
+#define LBV_VBLANK_STATUS__VBLANK_INTERRUPT_MASK 0x10000
+#define LBV_VBLANK_STATUS__VBLANK_INTERRUPT__SHIFT 0x10
+#define LBV_VBLANK_STATUS__VBLANK_INTERRUPT_TYPE_MASK 0x20000
+#define LBV_VBLANK_STATUS__VBLANK_INTERRUPT_TYPE__SHIFT 0x11
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_SEL_MASK 0x3
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_SEL__SHIFT 0x0
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2_MASK 0x10
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_SEL2__SHIFT 0x4
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_DELAY_MASK 0xff00
+#define LBV_SYNC_RESET_SEL__LB_SYNC_RESET_DELAY__SHIFT 0x8
+#define LBV_SYNC_RESET_SEL__LB_SYNC_DURATION_MASK 0xc00000
+#define LBV_SYNC_RESET_SEL__LB_SYNC_DURATION__SHIFT 0x16
+#define LBV_BLACK_KEYER_R_CR__LB_BLACK_KEYER_R_CR_MASK 0xfff0
+#define LBV_BLACK_KEYER_R_CR__LB_BLACK_KEYER_R_CR__SHIFT 0x4
+#define LBV_BLACK_KEYER_G_Y__LB_BLACK_KEYER_G_Y_MASK 0xfff0
+#define LBV_BLACK_KEYER_G_Y__LB_BLACK_KEYER_G_Y__SHIFT 0x4
+#define LBV_BLACK_KEYER_B_CB__LB_BLACK_KEYER_B_CB_MASK 0xfff0
+#define LBV_BLACK_KEYER_B_CB__LB_BLACK_KEYER_B_CB__SHIFT 0x4
+#define LBV_KEYER_COLOR_CTRL__LB_KEYER_COLOR_EN_MASK 0x1
+#define LBV_KEYER_COLOR_CTRL__LB_KEYER_COLOR_EN__SHIFT 0x0
+#define LBV_KEYER_COLOR_CTRL__LB_KEYER_COLOR_REP_EN_MASK 0x100
+#define LBV_KEYER_COLOR_CTRL__LB_KEYER_COLOR_REP_EN__SHIFT 0x8
+#define LBV_KEYER_COLOR_R_CR__LB_KEYER_COLOR_R_CR_MASK 0xfff0
+#define LBV_KEYER_COLOR_R_CR__LB_KEYER_COLOR_R_CR__SHIFT 0x4
+#define LBV_KEYER_COLOR_G_Y__LB_KEYER_COLOR_G_Y_MASK 0xfff0
+#define LBV_KEYER_COLOR_G_Y__LB_KEYER_COLOR_G_Y__SHIFT 0x4
+#define LBV_KEYER_COLOR_B_CB__LB_KEYER_COLOR_B_CB_MASK 0xfff0
+#define LBV_KEYER_COLOR_B_CB__LB_KEYER_COLOR_B_CB__SHIFT 0x4
+#define LBV_KEYER_COLOR_REP_R_CR__LB_KEYER_COLOR_REP_R_CR_MASK 0xfff0
+#define LBV_KEYER_COLOR_REP_R_CR__LB_KEYER_COLOR_REP_R_CR__SHIFT 0x4
+#define LBV_KEYER_COLOR_REP_G_Y__LB_KEYER_COLOR_REP_G_Y_MASK 0xfff0
+#define LBV_KEYER_COLOR_REP_G_Y__LB_KEYER_COLOR_REP_G_Y__SHIFT 0x4
+#define LBV_KEYER_COLOR_REP_B_CB__LB_KEYER_COLOR_REP_B_CB_MASK 0xfff0
+#define LBV_KEYER_COLOR_REP_B_CB__LB_KEYER_COLOR_REP_B_CB__SHIFT 0x4
+#define LBV_BUFFER_LEVEL_STATUS__REQ_FIFO_LEVEL_MASK 0x3f
+#define LBV_BUFFER_LEVEL_STATUS__REQ_FIFO_LEVEL__SHIFT 0x0
+#define LBV_BUFFER_LEVEL_STATUS__REQ_FIFO_FULL_CNTL_MASK 0xfc00
+#define LBV_BUFFER_LEVEL_STATUS__REQ_FIFO_FULL_CNTL__SHIFT 0xa
+#define LBV_BUFFER_LEVEL_STATUS__DATA_BUFFER_LEVEL_MASK 0xfff0000
+#define LBV_BUFFER_LEVEL_STATUS__DATA_BUFFER_LEVEL__SHIFT 0x10
+#define LBV_BUFFER_LEVEL_STATUS__DATA_FIFO_FULL_CNTL_MASK 0xf0000000
+#define LBV_BUFFER_LEVEL_STATUS__DATA_FIFO_FULL_CNTL__SHIFT 0x1c
+#define LBV_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_ON_MASK 0xfff
+#define LBV_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_ON__SHIFT 0x0
+#define LBV_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_OFF_MASK 0xfff0000
+#define LBV_BUFFER_URGENCY_CTRL__LB_BUFFER_URGENCY_MARK_OFF__SHIFT 0x10
+#define LBV_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_LEVEL_MASK 0xfff
+#define LBV_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_LEVEL__SHIFT 0x0
+#define LBV_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_STAT_MASK 0x10000
+#define LBV_BUFFER_URGENCY_STATUS__LB_BUFFER_URGENCY_STAT__SHIFT 0x10
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_MARGIN_MASK 0xf
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_MARGIN__SHIFT 0x0
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_STAT_MASK 0x10
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_STAT__SHIFT 0x4
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_OCCURRED_MASK 0x100
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_OCCURRED__SHIFT 0x8
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_ACK_MASK 0x1000
+#define LBV_BUFFER_STATUS__LB_BUFFER_EMPTY_ACK__SHIFT 0xc
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_STAT_MASK 0x10000
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_STAT__SHIFT 0x10
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_OCCURRED_MASK 0x100000
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_OCCURRED__SHIFT 0x14
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_ACK_MASK 0x1000000
+#define LBV_BUFFER_STATUS__LB_BUFFER_FULL_ACK__SHIFT 0x18
+#define LBV_BUFFER_STATUS__LB_ENABLE_HIGH_THROUGHPUT_MASK 0x2000000
+#define LBV_BUFFER_STATUS__LB_ENABLE_HIGH_THROUGHPUT__SHIFT 0x19
+#define LBV_BUFFER_STATUS__LB_HIGH_THROUGHPUT_CNTL_MASK 0x1c000000
+#define LBV_BUFFER_STATUS__LB_HIGH_THROUGHPUT_CNTL__SHIFT 0x1a
+#define LBV_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT_MASK 0x1
+#define LBV_NO_OUTSTANDING_REQ_STATUS__LB_NO_OUTSTANDING_REQ_STAT__SHIFT 0x0
+#define LBV_DEBUG__LB_DEBUG_MASK 0xffffffff
+#define LBV_DEBUG__LB_DEBUG__SHIFT 0x0
+#define LBV_DEBUG2__LB_DEBUG2_MASK 0xffffffff
+#define LBV_DEBUG2__LB_DEBUG2__SHIFT 0x0
+#define LBV_DEBUG3__LB_DEBUG3_MASK 0xffffffff
+#define LBV_DEBUG3__LB_DEBUG3__SHIFT 0x0
+#define LBV_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX_MASK 0xff
+#define LBV_TEST_DEBUG_INDEX__LB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define LBV_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define LBV_TEST_DEBUG_INDEX__LB_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define LBV_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA_MASK 0xffffffff
+#define LBV_TEST_DEBUG_DATA__LB_TEST_DEBUG_DATA__SHIFT 0x0
+#define MVP_CONTROL1__MVP_EN_MASK 0x1
+#define MVP_CONTROL1__MVP_EN__SHIFT 0x0
+#define MVP_CONTROL1__MVP_MIXER_MODE_MASK 0x70
+#define MVP_CONTROL1__MVP_MIXER_MODE__SHIFT 0x4
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_MASK 0x100
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL__SHIFT 0x8
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_DELAY_UNTIL_END_OF_BLANK_MASK 0x200
+#define MVP_CONTROL1__MVP_MIXER_SLAVE_SEL_DELAY_UNTIL_END_OF_BLANK__SHIFT 0x9
+#define MVP_CONTROL1__MVP_ARBITRATION_MODE_FOR_AFR_MANUAL_SWITCH_MODE_MASK 0x400
+#define MVP_CONTROL1__MVP_ARBITRATION_MODE_FOR_AFR_MANUAL_SWITCH_MODE__SHIFT 0xa
+#define MVP_CONTROL1__MVP_RATE_CONTROL_MASK 0x1000
+#define MVP_CONTROL1__MVP_RATE_CONTROL__SHIFT 0xc
+#define MVP_CONTROL1__MVP_CHANNEL_CONTROL_MASK 0x10000
+#define MVP_CONTROL1__MVP_CHANNEL_CONTROL__SHIFT 0x10
+#define MVP_CONTROL1__MVP_GPU_CHAIN_LOCATION_MASK 0x300000
+#define MVP_CONTROL1__MVP_GPU_CHAIN_LOCATION__SHIFT 0x14
+#define MVP_CONTROL1__MVP_DISABLE_MSB_EXPAND_MASK 0x1000000
+#define MVP_CONTROL1__MVP_DISABLE_MSB_EXPAND__SHIFT 0x18
+#define MVP_CONTROL1__MVP_30BPP_EN_MASK 0x10000000
+#define MVP_CONTROL1__MVP_30BPP_EN__SHIFT 0x1c
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_A_MASK 0x40000000
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_A__SHIFT 0x1e
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_B_MASK 0x80000000
+#define MVP_CONTROL1__MVP_TERMINATION_CNTL_B__SHIFT 0x1f
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL0_SEL_MASK 0x1
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL0_SEL__SHIFT 0x0
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL2_SEL_MASK 0x10
+#define MVP_CONTROL2__MVP_MUX_DE_DVOCNTL2_SEL__SHIFT 0x4
+#define MVP_CONTROL2__MVP_MUXA_CLK_SEL_MASK 0x100
+#define MVP_CONTROL2__MVP_MUXA_CLK_SEL__SHIFT 0x8
+#define MVP_CONTROL2__MVP_MUXB_CLK_SEL_MASK 0x1000
+#define MVP_CONTROL2__MVP_MUXB_CLK_SEL__SHIFT 0xc
+#define MVP_CONTROL2__MVP_DVOCNTL_MUX_MASK 0x10000
+#define MVP_CONTROL2__MVP_DVOCNTL_MUX__SHIFT 0x10
+#define MVP_CONTROL2__MVP_FLOW_CONTROL_OUT_EN_MASK 0x100000
+#define MVP_CONTROL2__MVP_FLOW_CONTROL_OUT_EN__SHIFT 0x14
+#define MVP_CONTROL2__MVP_SWAP_LOCK_OUT_EN_MASK 0x1000000
+#define MVP_CONTROL2__MVP_SWAP_LOCK_OUT_EN__SHIFT 0x18
+#define MVP_CONTROL2__MVP_SWAP_AB_IN_DC_DDR_MASK 0x10000000
+#define MVP_CONTROL2__MVP_SWAP_AB_IN_DC_DDR__SHIFT 0x1c
+#define MVP_FIFO_CONTROL__MVP_STOP_SLAVE_WM_MASK 0xff
+#define MVP_FIFO_CONTROL__MVP_STOP_SLAVE_WM__SHIFT 0x0
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_WM_MASK 0xff00
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_WM__SHIFT 0x8
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_CNT_MASK 0xff0000
+#define MVP_FIFO_CONTROL__MVP_PAUSE_SLAVE_CNT__SHIFT 0x10
+#define MVP_FIFO_STATUS__MVP_FIFO_LEVEL_MASK 0xff
+#define MVP_FIFO_STATUS__MVP_FIFO_LEVEL__SHIFT 0x0
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_MASK 0x100
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW__SHIFT 0x8
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_OCCURRED_MASK 0x1000
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_OCCURRED__SHIFT 0xc
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_ACK_MASK 0x10000
+#define MVP_FIFO_STATUS__MVP_FIFO_OVERFLOW_ACK__SHIFT 0x10
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_MASK 0x100000
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW__SHIFT 0x14
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_OCCURRED_MASK 0x1000000
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_OCCURRED__SHIFT 0x18
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_ACK_MASK 0x10000000
+#define MVP_FIFO_STATUS__MVP_FIFO_UNDERFLOW_ACK__SHIFT 0x1c
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_MASK_MASK 0x40000000
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_MASK__SHIFT 0x1e
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_INT_STATUS_MASK 0x80000000
+#define MVP_FIFO_STATUS__MVP_FIFO_ERROR_INT_STATUS__SHIFT 0x1f
+#define MVP_SLAVE_STATUS__MVP_SLAVE_PIXELS_PER_LINE_RCVED_MASK 0x1fff
+#define MVP_SLAVE_STATUS__MVP_SLAVE_PIXELS_PER_LINE_RCVED__SHIFT 0x0
+#define MVP_SLAVE_STATUS__MVP_SLAVE_LINES_PER_FRAME_RCVED_MASK 0x1fff0000
+#define MVP_SLAVE_STATUS__MVP_SLAVE_LINES_PER_FRAME_RCVED__SHIFT 0x10
+#define MVP_INBAND_CNTL_CAP__MVP_IGNOR_INBAND_CNTL_MASK 0x1
+#define MVP_INBAND_CNTL_CAP__MVP_IGNOR_INBAND_CNTL__SHIFT 0x0
+#define MVP_INBAND_CNTL_CAP__MVP_PASSING_INBAND_CNTL_EN_MASK 0x10
+#define MVP_INBAND_CNTL_CAP__MVP_PASSING_INBAND_CNTL_EN__SHIFT 0x4
+#define MVP_INBAND_CNTL_CAP__MVP_INBAND_CNTL_CHAR_CAP_MASK 0xffffff00
+#define MVP_INBAND_CNTL_CAP__MVP_INBAND_CNTL_CHAR_CAP__SHIFT 0x8
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_R_MASK 0x3ff
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_R__SHIFT 0x0
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_G_MASK 0xffc00
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_G__SHIFT 0xa
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_B_MASK 0x3ff00000
+#define MVP_BLACK_KEYER__MVP_BLACK_KEYER_B__SHIFT 0x14
+#define MVP_CRC_CNTL__MVP_CRC_BLUE_MASK_MASK 0xff
+#define MVP_CRC_CNTL__MVP_CRC_BLUE_MASK__SHIFT 0x0
+#define MVP_CRC_CNTL__MVP_CRC_GREEN_MASK_MASK 0xff00
+#define MVP_CRC_CNTL__MVP_CRC_GREEN_MASK__SHIFT 0x8
+#define MVP_CRC_CNTL__MVP_CRC_RED_MASK_MASK 0xff0000
+#define MVP_CRC_CNTL__MVP_CRC_RED_MASK__SHIFT 0x10
+#define MVP_CRC_CNTL__MVP_CRC_EN_MASK 0x10000000
+#define MVP_CRC_CNTL__MVP_CRC_EN__SHIFT 0x1c
+#define MVP_CRC_CNTL__MVP_CRC_CONT_EN_MASK 0x20000000
+#define MVP_CRC_CNTL__MVP_CRC_CONT_EN__SHIFT 0x1d
+#define MVP_CRC_CNTL__MVP_DC_DDR_CRC_EVEN_ODD_PIX_SEL_MASK 0x40000000
+#define MVP_CRC_CNTL__MVP_DC_DDR_CRC_EVEN_ODD_PIX_SEL__SHIFT 0x1e
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_BLUE_RESULT_MASK 0xffff
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_BLUE_RESULT__SHIFT 0x0
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_GREEN_RESULT_MASK 0xffff0000
+#define MVP_CRC_RESULT_BLUE_GREEN__MVP_CRC_GREEN_RESULT__SHIFT 0x10
+#define MVP_CRC_RESULT_RED__MVP_CRC_RED_RESULT_MASK 0xffff
+#define MVP_CRC_RESULT_RED__MVP_CRC_RED_RESULT__SHIFT 0x0
+#define MVP_CONTROL3__MVP_RESET_IN_BETWEEN_FRAMES_MASK 0x1
+#define MVP_CONTROL3__MVP_RESET_IN_BETWEEN_FRAMES__SHIFT 0x0
+#define MVP_CONTROL3__MVP_DDR_SC_AB_SEL_MASK 0x10
+#define MVP_CONTROL3__MVP_DDR_SC_AB_SEL__SHIFT 0x4
+#define MVP_CONTROL3__MVP_DDR_SC_B_START_MODE_MASK 0x100
+#define MVP_CONTROL3__MVP_DDR_SC_B_START_MODE__SHIFT 0x8
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ONE_MASK 0x1000
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ONE__SHIFT 0xc
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ZERO_MASK 0x10000
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_OUT_FORCE_ZERO__SHIFT 0x10
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_CASCADE_EN_MASK 0x100000
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_CASCADE_EN__SHIFT 0x14
+#define MVP_CONTROL3__MVP_SWAP_48BIT_EN_MASK 0x1000000
+#define MVP_CONTROL3__MVP_SWAP_48BIT_EN__SHIFT 0x18
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_IN_CAP_MASK 0x10000000
+#define MVP_CONTROL3__MVP_FLOW_CONTROL_IN_CAP__SHIFT 0x1c
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_PIXEL_ERROR_CNT_MASK 0x1fff
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_PIXEL_ERROR_CNT__SHIFT 0x0
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_LINE_ERROR_CNT_MASK 0x1fff0000
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_LINE_ERROR_CNT__SHIFT 0x10
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_DATA_CHK_EN_MASK 0x80000000
+#define MVP_RECEIVE_CNT_CNTL1__MVP_SLAVE_DATA_CHK_EN__SHIFT 0x1f
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_MASK 0x1fff
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT__SHIFT 0x0
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_RESET_MASK 0x80000000
+#define MVP_RECEIVE_CNT_CNTL2__MVP_SLAVE_FRAME_ERROR_CNT_RESET__SHIFT 0x1f
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_EN_MASK 0x1
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_EN__SHIFT 0x0
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_EN_MASK 0x2
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_EN__SHIFT 0x1
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_SEL_MASK 0x4
+#define MVP_DEBUG__MVP_SWAP_LOCK_IN_SEL__SHIFT 0x2
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_SEL_MASK 0x8
+#define MVP_DEBUG__MVP_FLOW_CONTROL_IN_SEL__SHIFT 0x3
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_MANUAL_HSYNC_FLIP_MASK 0x10
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_MANUAL_HSYNC_FLIP__SHIFT 0x4
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_AUTO_VSYNC_FLIP_MASK 0x20
+#define MVP_DEBUG__MVP_DIS_FIX_AFR_AUTO_VSYNC_FLIP__SHIFT 0x5
+#define MVP_DEBUG__MVP_EN_FIX_AFR_MANUAL_SWITCH_IN_SFR_MASK 0x40
+#define MVP_DEBUG__MVP_EN_FIX_AFR_MANUAL_SWITCH_IN_SFR__SHIFT 0x6
+#define MVP_DEBUG__MVP_DIS_READ_POINTER_RESET_DELAY_MASK 0x80
+#define MVP_DEBUG__MVP_DIS_READ_POINTER_RESET_DELAY__SHIFT 0x7
+#define MVP_DEBUG__MVP_DEBUG_BITS_MASK 0xffffff00
+#define MVP_DEBUG__MVP_DEBUG_BITS__SHIFT 0x8
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_INDEX_MASK 0xff
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define MVP_TEST_DEBUG_INDEX__MVP_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MVP_TEST_DEBUG_DATA__MVP_TEST_DEBUG_DATA_MASK 0xffffffff
+#define MVP_TEST_DEBUG_DATA__MVP_TEST_DEBUG_DATA__SHIFT 0x0
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_H_MASK 0x1
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_H__SHIFT 0x0
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A_MASK 0x1fffffe
+#define MVP_DEBUG_12__IDEC_MVP_DATA_A__SHIFT 0x1
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_H_MASK 0x1
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_H__SHIFT 0x0
+#define MVP_DEBUG_13__IDED_MVP_DATA_B_MASK 0x1fffffe
+#define MVP_DEBUG_13__IDED_MVP_DATA_B__SHIFT 0x1
+#define MVP_DEBUG_13__IDED_START_READ_B_MASK 0x2000000
+#define MVP_DEBUG_13__IDED_START_READ_B__SHIFT 0x19
+#define MVP_DEBUG_13__IDED_READ_FIFO_ENTRY_DE_B_MASK 0x4000000
+#define MVP_DEBUG_13__IDED_READ_FIFO_ENTRY_DE_B__SHIFT 0x1a
+#define MVP_DEBUG_13__IDED_WRITE_ADD_B_MASK 0x38000000
+#define MVP_DEBUG_13__IDED_WRITE_ADD_B__SHIFT 0x1b
+#define MVP_DEBUG_14__IDEE_READ_ADD_MASK 0x7
+#define MVP_DEBUG_14__IDEE_READ_ADD__SHIFT 0x0
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_A_MASK 0x38
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_A__SHIFT 0x3
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_B_MASK 0x1c0
+#define MVP_DEBUG_14__IDEE_WRITE_ADD_B__SHIFT 0x6
+#define MVP_DEBUG_14__IDEE_START_READ_MASK 0x200
+#define MVP_DEBUG_14__IDEE_START_READ__SHIFT 0x9
+#define MVP_DEBUG_14__IDEE_START_READ_B_MASK 0x400
+#define MVP_DEBUG_14__IDEE_START_READ_B__SHIFT 0xa
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_A_MASK 0x800
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_A__SHIFT 0xb
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_B_MASK 0x1000
+#define MVP_DEBUG_14__IDEE_START_INCR_WR_B__SHIFT 0xc
+#define MVP_DEBUG_14__IDEE_WRITE2FIFO_MASK 0x2000
+#define MVP_DEBUG_14__IDEE_WRITE2FIFO__SHIFT 0xd
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_MASK 0x4000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE__SHIFT 0xe
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_B_MASK 0x8000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENTRY_DE_B__SHIFT 0xf
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_MASK 0x10000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE__SHIFT 0x10
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_B_MASK 0x20000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_DE_B__SHIFT 0x11
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENABLE_MASK 0x40000
+#define MVP_DEBUG_14__IDEE_READ_FIFO_ENABLE__SHIFT 0x12
+#define MVP_DEBUG_14__IDEE_CRTC1_CNTL_CAPTURE_START_A_MASK 0x80000
+#define MVP_DEBUG_14__IDEE_CRTC1_CNTL_CAPTURE_START_A__SHIFT 0x13
+#define MVP_DEBUG_14__IDEE_CRC_PHASE_MASK 0x100000
+#define MVP_DEBUG_14__IDEE_CRC_PHASE__SHIFT 0x14
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WEN_MASK 0x1
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WEN__SHIFT 0x0
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WDATA_MASK 0xfffffff0
+#define MVP_DEBUG_15__IDEF_MVP_ASYNC_FIFO_WDATA__SHIFT 0x4
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_READ_MASK 0x1
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_READ__SHIFT 0x0
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_STOP_LEVEL_MASK 0x2
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_STOP_LEVEL__SHIFT 0x1
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_PAUSE_LEVEL_MASK 0x4
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_EXCEED_PAUSE_LEVEL__SHIFT 0x2
+#define MVP_DEBUG_16__IDCC_FLOW_CONTROL_OUT_MASK 0x8
+#define MVP_DEBUG_16__IDCC_FLOW_CONTROL_OUT__SHIFT 0x3
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_NUM_ENTRIES_MASK 0xff0
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_NUM_ENTRIES__SHIFT 0x4
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_OVERFLOW_MASK 0x1000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_OVERFLOW__SHIFT 0xc
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_UNDERFLOW_MASK 0x2000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_FIFO_UNDERFLOW__SHIFT 0xd
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_READ_ADDR_MASK 0xff0000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_READ_ADDR__SHIFT 0x10
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_WRITE_ADDR_MASK 0xff000000
+#define MVP_DEBUG_16__IDCC_MVP_ASYNC_WRITE_ADDR__SHIFT 0x18
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_MASK 0x1
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ__SHIFT 0x0
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_PHASE_MASK 0x2
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_PHASE__SHIFT 0x1
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_DATA_MASK 0xfffffffc
+#define MVP_DEBUG_17__IDCD_MVP_ASYNC_FIFO_READ_DATA__SHIFT 0x2
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX_MASK 0xf
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_PHASE_MASK 0xf00
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_PHASE__SHIFT 0x8
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE_MASK 0x70000
+#define SCL_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE__SHIFT 0x10
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_MASK 0x3fff
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN_MASK 0x8000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_MASK 0x3fff0000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN_MASK 0x80000000
+#define SCL_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define SCL_MODE__SCL_MODE_MASK 0x3
+#define SCL_MODE__SCL_MODE__SHIFT 0x0
+#define SCL_MODE__SCL_PSCL_EN_MASK 0x10
+#define SCL_MODE__SCL_PSCL_EN__SHIFT 0x4
+#define SCL_TAP_CONTROL__SCL_V_NUM_OF_TAPS_MASK 0x7
+#define SCL_TAP_CONTROL__SCL_V_NUM_OF_TAPS__SHIFT 0x0
+#define SCL_TAP_CONTROL__SCL_H_NUM_OF_TAPS_MASK 0xf00
+#define SCL_TAP_CONTROL__SCL_H_NUM_OF_TAPS__SHIFT 0x8
+#define SCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x1
+#define SCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define SCL_CONTROL__SCL_EARLY_EOL_MODE_MASK 0x10
+#define SCL_CONTROL__SCL_EARLY_EOL_MODE__SHIFT 0x4
+#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x3
+#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE__SHIFT 0x0
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0xf
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0xf00
+#define SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define SCL_AUTOMATIC_MODE_CONTROL__SCL_V_CALC_AUTO_RATIO_EN_MASK 0x1
+#define SCL_AUTOMATIC_MODE_CONTROL__SCL_V_CALC_AUTO_RATIO_EN__SHIFT 0x0
+#define SCL_AUTOMATIC_MODE_CONTROL__SCL_H_CALC_AUTO_RATIO_EN_MASK 0x10000
+#define SCL_AUTOMATIC_MODE_CONTROL__SCL_H_CALC_AUTO_RATIO_EN__SHIFT 0x10
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_FILTER_PICK_NEAREST_MASK 0x1
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_FILTER_PICK_NEAREST__SHIFT 0x0
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x100
+#define SCL_HORZ_FILTER_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x8
+#define SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x3ffffff
+#define SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0xffffff
+#define SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0xf000000
+#define SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define SCL_VERT_FILTER_CONTROL__SCL_V_FILTER_PICK_NEAREST_MASK 0x1
+#define SCL_VERT_FILTER_CONTROL__SCL_V_FILTER_PICK_NEAREST__SHIFT 0x0
+#define SCL_VERT_FILTER_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x100
+#define SCL_VERT_FILTER_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x8
+#define SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x3ffffff
+#define SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0xffffff
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x7000000
+#define SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0xffffff
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x7000000
+#define SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define SCL_ROUND_OFFSET__SCL_ROUND_OFFSET_RGB_Y_MASK 0xffff
+#define SCL_ROUND_OFFSET__SCL_ROUND_OFFSET_RGB_Y__SHIFT 0x0
+#define SCL_ROUND_OFFSET__SCL_ROUND_OFFSET_CBCR_MASK 0xffff0000
+#define SCL_ROUND_OFFSET__SCL_ROUND_OFFSET_CBCR__SHIFT 0x10
+#define SCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x1
+#define SCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define SCL_UPDATE__SCL_UPDATE_TAKEN_MASK 0x100
+#define SCL_UPDATE__SCL_UPDATE_TAKEN__SHIFT 0x8
+#define SCL_UPDATE__SCL_UPDATE_LOCK_MASK 0x10000
+#define SCL_UPDATE__SCL_UPDATE_LOCK__SHIFT 0x10
+#define SCL_UPDATE__SCL_COEF_UPDATE_COMPLETE_MASK 0x1000000
+#define SCL_UPDATE__SCL_COEF_UPDATE_COMPLETE__SHIFT 0x18
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_SCALE_FACTOR_MASK 0x7
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_SCALE_FACTOR__SHIFT 0x0
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_EN_MASK 0x10
+#define SCL_F_SHARP_CONTROL__SCL_HF_SHARP_EN__SHIFT 0x4
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_SCALE_FACTOR_MASK 0x700
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_SCALE_FACTOR__SHIFT 0x8
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_EN_MASK 0x1000
+#define SCL_F_SHARP_CONTROL__SCL_VF_SHARP_EN__SHIFT 0xc
+#define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x1
+#define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x0
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_FLAG_MASK 0x1
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_FLAG__SHIFT 0x0
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_ACK_MASK 0x100
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_ACK__SHIFT 0x8
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_MASK_MASK 0x1000
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_MASK__SHIFT 0xc
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_INT_STATUS_MASK 0x10000
+#define SCL_COEF_RAM_CONFLICT_STATUS__SCL_HOST_CONFLICT_INT_STATUS__SHIFT 0x10
+#define VIEWPORT_START_SECONDARY__VIEWPORT_Y_START_SECONDARY_MASK 0x3fff
+#define VIEWPORT_START_SECONDARY__VIEWPORT_Y_START_SECONDARY__SHIFT 0x0
+#define VIEWPORT_START_SECONDARY__VIEWPORT_X_START_SECONDARY_MASK 0x3fff0000
+#define VIEWPORT_START_SECONDARY__VIEWPORT_X_START_SECONDARY__SHIFT 0x10
+#define VIEWPORT_START__VIEWPORT_Y_START_MASK 0x3fff
+#define VIEWPORT_START__VIEWPORT_Y_START__SHIFT 0x0
+#define VIEWPORT_START__VIEWPORT_X_START_MASK 0x3fff0000
+#define VIEWPORT_START__VIEWPORT_X_START__SHIFT 0x10
+#define VIEWPORT_SIZE__VIEWPORT_HEIGHT_MASK 0x3fff
+#define VIEWPORT_SIZE__VIEWPORT_HEIGHT__SHIFT 0x0
+#define VIEWPORT_SIZE__VIEWPORT_WIDTH_MASK 0x3fff0000
+#define VIEWPORT_SIZE__VIEWPORT_WIDTH__SHIFT 0x10
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x1fff
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1fff0000
+#define EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x1fff
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1fff0000
+#define EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_MASK 0x1
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE__SHIFT 0x0
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK_MASK 0x10
+#define SCL_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK__SHIFT 0x4
+#define SCL_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO_MASK 0xfffff80
+#define SCL_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO__SHIFT 0x7
+#define SCL_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO_MASK 0x1fffff
+#define SCL_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO__SHIFT 0x0
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT_MASK 0x3fff
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT__SHIFT 0x0
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH_MASK 0x3fff0000
+#define SCL_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH__SHIFT 0x10
+#define SCL_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK_MASK 0x1
+#define SCL_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK__SHIFT 0x0
+#define SCL_DEBUG2__SCL_DEBUG_REQ_MODE_MASK 0x1
+#define SCL_DEBUG2__SCL_DEBUG_REQ_MODE__SHIFT 0x0
+#define SCL_DEBUG2__SCL_DEBUG_EOF_MODE_MASK 0x6
+#define SCL_DEBUG2__SCL_DEBUG_EOF_MODE__SHIFT 0x1
+#define SCL_DEBUG2__SCL_DEBUG2_MASK 0xfffffff8
+#define SCL_DEBUG2__SCL_DEBUG2__SHIFT 0x3
+#define SCL_DEBUG__SCL_DEBUG_MASK 0xffffffff
+#define SCL_DEBUG__SCL_DEBUG__SHIFT 0x0
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX_MASK 0xff
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX__SHIFT 0x0
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define SCL_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define SCL_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA_MASK 0xffffffff
+#define SCL_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA__SHIFT 0x0
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX_MASK 0x3
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_PHASE_MASK 0x7f00
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_PHASE__SHIFT 0x8
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE_MASK 0x30000
+#define SCLV_COEF_RAM_SELECT__SCL_C_RAM_FILTER_TYPE__SHIFT 0x10
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_MASK 0x3fff
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN_MASK 0x8000
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_MASK 0x3fff0000
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN_MASK 0x80000000
+#define SCLV_COEF_RAM_TAP_DATA__SCL_C_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define SCLV_MODE__SCL_MODE_MASK 0x3
+#define SCLV_MODE__SCL_MODE__SHIFT 0x0
+#define SCLV_MODE__SCL_MODE_C_MASK 0xc
+#define SCLV_MODE__SCL_MODE_C__SHIFT 0x2
+#define SCLV_MODE__SCL_PSCL_EN_MASK 0x10
+#define SCLV_MODE__SCL_PSCL_EN__SHIFT 0x4
+#define SCLV_MODE__SCL_PSCL_EN_C_MASK 0x20
+#define SCLV_MODE__SCL_PSCL_EN_C__SHIFT 0x5
+#define SCLV_MODE__SCL_INTERLACE_SOURCE_MASK 0x300
+#define SCLV_MODE__SCL_INTERLACE_SOURCE__SHIFT 0x8
+#define SCLV_TAP_CONTROL__SCL_V_NUM_OF_TAPS_MASK 0x7
+#define SCLV_TAP_CONTROL__SCL_V_NUM_OF_TAPS__SHIFT 0x0
+#define SCLV_TAP_CONTROL__SCL_H_NUM_OF_TAPS_MASK 0x70
+#define SCLV_TAP_CONTROL__SCL_H_NUM_OF_TAPS__SHIFT 0x4
+#define SCLV_TAP_CONTROL__SCL_V_NUM_OF_TAPS_C_MASK 0x700
+#define SCLV_TAP_CONTROL__SCL_V_NUM_OF_TAPS_C__SHIFT 0x8
+#define SCLV_TAP_CONTROL__SCL_H_NUM_OF_TAPS_C_MASK 0x7000
+#define SCLV_TAP_CONTROL__SCL_H_NUM_OF_TAPS_C__SHIFT 0xc
+#define SCLV_CONTROL__SCL_BOUNDARY_MODE_MASK 0x1
+#define SCLV_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define SCLV_CONTROL__SCL_EARLY_EOL_MODE_MASK 0x10
+#define SCLV_CONTROL__SCL_EARLY_EOL_MODE__SHIFT 0x4
+#define SCLV_CONTROL__SCL_TOTAL_PHASE_MASK 0x100
+#define SCLV_CONTROL__SCL_TOTAL_PHASE__SHIFT 0x8
+#define SCLV_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0xf
+#define SCLV_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define SCLV_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0xf00
+#define SCLV_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define SCLV_AUTOMATIC_MODE_CONTROL__SCL_V_CALC_AUTO_RATIO_EN_MASK 0x1
+#define SCLV_AUTOMATIC_MODE_CONTROL__SCL_V_CALC_AUTO_RATIO_EN__SHIFT 0x0
+#define SCLV_AUTOMATIC_MODE_CONTROL__SCL_H_CALC_AUTO_RATIO_EN_MASK 0x10000
+#define SCLV_AUTOMATIC_MODE_CONTROL__SCL_H_CALC_AUTO_RATIO_EN__SHIFT 0x10
+#define SCLV_HORZ_FILTER_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x100
+#define SCLV_HORZ_FILTER_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x8
+#define SCLV_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x3ffffff
+#define SCLV_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0xffffff
+#define SCLV_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0xf000000
+#define SCLV_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define SCLV_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x3ffffff
+#define SCLV_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0xffffff
+#define SCLV_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0xf000000
+#define SCLV_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define SCLV_VERT_FILTER_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x100
+#define SCLV_VERT_FILTER_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x8
+#define SCLV_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x3ffffff
+#define SCLV_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0xffffff
+#define SCLV_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x7000000
+#define SCLV_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define SCLV_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0xffffff
+#define SCLV_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x7000000
+#define SCLV_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define SCLV_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x3ffffff
+#define SCLV_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0xffffff
+#define SCLV_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x7000000
+#define SCLV_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define SCLV_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0xffffff
+#define SCLV_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define SCLV_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x7000000
+#define SCLV_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define SCLV_ROUND_OFFSET__SCL_ROUND_OFFSET_RGB_Y_MASK 0xffff
+#define SCLV_ROUND_OFFSET__SCL_ROUND_OFFSET_RGB_Y__SHIFT 0x0
+#define SCLV_ROUND_OFFSET__SCL_ROUND_OFFSET_CBCR_MASK 0xffff0000
+#define SCLV_ROUND_OFFSET__SCL_ROUND_OFFSET_CBCR__SHIFT 0x10
+#define SCLV_UPDATE__SCL_UPDATE_PENDING_MASK 0x1
+#define SCLV_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define SCLV_UPDATE__SCL_UPDATE_TAKEN_MASK 0x100
+#define SCLV_UPDATE__SCL_UPDATE_TAKEN__SHIFT 0x8
+#define SCLV_UPDATE__SCL_UPDATE_LOCK_MASK 0x10000
+#define SCLV_UPDATE__SCL_UPDATE_LOCK__SHIFT 0x10
+#define SCLV_UPDATE__SCL_COEF_UPDATE_COMPLETE_MASK 0x1000000
+#define SCLV_UPDATE__SCL_COEF_UPDATE_COMPLETE__SHIFT 0x18
+#define SCLV_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x1
+#define SCLV_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x0
+#define SCLV_VIEWPORT_START__VIEWPORT_Y_START_MASK 0x3fff
+#define SCLV_VIEWPORT_START__VIEWPORT_Y_START__SHIFT 0x0
+#define SCLV_VIEWPORT_START__VIEWPORT_X_START_MASK 0x3fff0000
+#define SCLV_VIEWPORT_START__VIEWPORT_X_START__SHIFT 0x10
+#define SCLV_VIEWPORT_START_SECONDARY__VIEWPORT_Y_START_SECONDARY_MASK 0x3fff
+#define SCLV_VIEWPORT_START_SECONDARY__VIEWPORT_Y_START_SECONDARY__SHIFT 0x0
+#define SCLV_VIEWPORT_START_SECONDARY__VIEWPORT_X_START_SECONDARY_MASK 0x3fff0000
+#define SCLV_VIEWPORT_START_SECONDARY__VIEWPORT_X_START_SECONDARY__SHIFT 0x10
+#define SCLV_VIEWPORT_SIZE__VIEWPORT_HEIGHT_MASK 0x1fff
+#define SCLV_VIEWPORT_SIZE__VIEWPORT_HEIGHT__SHIFT 0x0
+#define SCLV_VIEWPORT_SIZE__VIEWPORT_WIDTH_MASK 0x1fff0000
+#define SCLV_VIEWPORT_SIZE__VIEWPORT_WIDTH__SHIFT 0x10
+#define SCLV_VIEWPORT_START_C__VIEWPORT_Y_START_C_MASK 0x3fff
+#define SCLV_VIEWPORT_START_C__VIEWPORT_Y_START_C__SHIFT 0x0
+#define SCLV_VIEWPORT_START_C__VIEWPORT_X_START_C_MASK 0x3fff0000
+#define SCLV_VIEWPORT_START_C__VIEWPORT_X_START_C__SHIFT 0x10
+#define SCLV_VIEWPORT_START_SECONDARY_C__VIEWPORT_Y_START_SECONDARY_C_MASK 0x3fff
+#define SCLV_VIEWPORT_START_SECONDARY_C__VIEWPORT_Y_START_SECONDARY_C__SHIFT 0x0
+#define SCLV_VIEWPORT_START_SECONDARY_C__VIEWPORT_X_START_SECONDARY_C_MASK 0x3fff0000
+#define SCLV_VIEWPORT_START_SECONDARY_C__VIEWPORT_X_START_SECONDARY_C__SHIFT 0x10
+#define SCLV_VIEWPORT_SIZE_C__VIEWPORT_HEIGHT_C_MASK 0x1fff
+#define SCLV_VIEWPORT_SIZE_C__VIEWPORT_HEIGHT_C__SHIFT 0x0
+#define SCLV_VIEWPORT_SIZE_C__VIEWPORT_WIDTH_C_MASK 0x1fff0000
+#define SCLV_VIEWPORT_SIZE_C__VIEWPORT_WIDTH_C__SHIFT 0x10
+#define SCLV_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x1fff
+#define SCLV_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define SCLV_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1fff0000
+#define SCLV_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define SCLV_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x1fff
+#define SCLV_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define SCLV_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1fff0000
+#define SCLV_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define SCLV_MODE_CHANGE_DET1__SCL_MODE_CHANGE_MASK 0x1
+#define SCLV_MODE_CHANGE_DET1__SCL_MODE_CHANGE__SHIFT 0x0
+#define SCLV_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK_MASK 0x10
+#define SCLV_MODE_CHANGE_DET1__SCL_MODE_CHANGE_ACK__SHIFT 0x4
+#define SCLV_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO_MASK 0xfffff80
+#define SCLV_MODE_CHANGE_DET1__SCL_ALU_H_SCALE_RATIO__SHIFT 0x7
+#define SCLV_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO_MASK 0x1fffff
+#define SCLV_MODE_CHANGE_DET2__SCL_ALU_V_SCALE_RATIO__SHIFT 0x0
+#define SCLV_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT_MASK 0x3fff
+#define SCLV_MODE_CHANGE_DET3__SCL_ALU_SOURCE_HEIGHT__SHIFT 0x0
+#define SCLV_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH_MASK 0x3fff0000
+#define SCLV_MODE_CHANGE_DET3__SCL_ALU_SOURCE_WIDTH__SHIFT 0x10
+#define SCLV_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK_MASK 0x1
+#define SCLV_MODE_CHANGE_MASK__SCL_MODE_CHANGE_MASK__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_BOT__SCL_H_INIT_FRAC_BOT_MASK 0xffffff
+#define SCLV_HORZ_FILTER_INIT_BOT__SCL_H_INIT_FRAC_BOT__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_BOT__SCL_H_INIT_INT_BOT_MASK 0xf000000
+#define SCLV_HORZ_FILTER_INIT_BOT__SCL_H_INIT_INT_BOT__SHIFT 0x18
+#define SCLV_HORZ_FILTER_INIT_BOT_C__SCL_H_INIT_FRAC_BOT_C_MASK 0xffffff
+#define SCLV_HORZ_FILTER_INIT_BOT_C__SCL_H_INIT_FRAC_BOT_C__SHIFT 0x0
+#define SCLV_HORZ_FILTER_INIT_BOT_C__SCL_H_INIT_INT_BOT_C_MASK 0xf000000
+#define SCLV_HORZ_FILTER_INIT_BOT_C__SCL_H_INIT_INT_BOT_C__SHIFT 0x18
+#define SCLV_DEBUG2__SCL_DEBUG_REQ_MODE_MASK 0x1
+#define SCLV_DEBUG2__SCL_DEBUG_REQ_MODE__SHIFT 0x0
+#define SCLV_DEBUG2__SCL_DEBUG_EOF_MODE_MASK 0x6
+#define SCLV_DEBUG2__SCL_DEBUG_EOF_MODE__SHIFT 0x1
+#define SCLV_DEBUG2__SCL_DEBUG2_MASK 0xfffffff8
+#define SCLV_DEBUG2__SCL_DEBUG2__SHIFT 0x3
+#define SCLV_DEBUG__SCL_DEBUG_MASK 0xffffffff
+#define SCLV_DEBUG__SCL_DEBUG__SHIFT 0x0
+#define SCLV_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX_MASK 0xff
+#define SCLV_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_INDEX__SHIFT 0x0
+#define SCLV_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define SCLV_TEST_DEBUG_INDEX__SCL_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define SCLV_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA_MASK 0xffffffff
+#define SCLV_TEST_DEBUG_DATA__SCL_TEST_DEBUG_DATA__SHIFT 0x0
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_PENDING_MASK 0x1
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_PENDING__SHIFT 0x0
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_TAKEN_MASK 0x2
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_TAKEN__SHIFT 0x1
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_LOCK_MASK 0x10000
+#define COL_MAN_UPDATE__COL_MAN_UPDATE_LOCK__SHIFT 0x10
+#define COL_MAN_UPDATE__COL_MAN_DISABLE_MULTIPLE_UPDATE_MASK 0x1000000
+#define COL_MAN_UPDATE__COL_MAN_DISABLE_MULTIPLE_UPDATE__SHIFT 0x18
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_MODE_MASK 0x3
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_MODE__SHIFT 0x0
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_INPUT_TYPE_MASK 0x300
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_INPUT_TYPE__SHIFT 0x8
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_CONVERSION_MODE_MASK 0x10000
+#define COL_MAN_INPUT_CSC_CONTROL__INPUT_CSC_CONVERSION_MODE__SHIFT 0x10
+#define INPUT_CSC_C11_C12_A__INPUT_CSC_C11_A_MASK 0xffff
+#define INPUT_CSC_C11_C12_A__INPUT_CSC_C11_A__SHIFT 0x0
+#define INPUT_CSC_C11_C12_A__INPUT_CSC_C12_A_MASK 0xffff0000
+#define INPUT_CSC_C11_C12_A__INPUT_CSC_C12_A__SHIFT 0x10
+#define INPUT_CSC_C13_C14_A__INPUT_CSC_C13_A_MASK 0xffff
+#define INPUT_CSC_C13_C14_A__INPUT_CSC_C13_A__SHIFT 0x0
+#define INPUT_CSC_C13_C14_A__INPUT_CSC_C14_A_MASK 0xffff0000
+#define INPUT_CSC_C13_C14_A__INPUT_CSC_C14_A__SHIFT 0x10
+#define INPUT_CSC_C21_C22_A__INPUT_CSC_C21_A_MASK 0xffff
+#define INPUT_CSC_C21_C22_A__INPUT_CSC_C21_A__SHIFT 0x0
+#define INPUT_CSC_C21_C22_A__INPUT_CSC_C22_A_MASK 0xffff0000
+#define INPUT_CSC_C21_C22_A__INPUT_CSC_C22_A__SHIFT 0x10
+#define INPUT_CSC_C23_C24_A__INPUT_CSC_C23_A_MASK 0xffff
+#define INPUT_CSC_C23_C24_A__INPUT_CSC_C23_A__SHIFT 0x0
+#define INPUT_CSC_C23_C24_A__INPUT_CSC_C24_A_MASK 0xffff0000
+#define INPUT_CSC_C23_C24_A__INPUT_CSC_C24_A__SHIFT 0x10
+#define INPUT_CSC_C31_C32_A__INPUT_CSC_C31_A_MASK 0xffff
+#define INPUT_CSC_C31_C32_A__INPUT_CSC_C31_A__SHIFT 0x0
+#define INPUT_CSC_C31_C32_A__INPUT_CSC_C32_A_MASK 0xffff0000
+#define INPUT_CSC_C31_C32_A__INPUT_CSC_C32_A__SHIFT 0x10
+#define INPUT_CSC_C33_C34_A__INPUT_CSC_C33_A_MASK 0xffff
+#define INPUT_CSC_C33_C34_A__INPUT_CSC_C33_A__SHIFT 0x0
+#define INPUT_CSC_C33_C34_A__INPUT_CSC_C34_A_MASK 0xffff0000
+#define INPUT_CSC_C33_C34_A__INPUT_CSC_C34_A__SHIFT 0x10
+#define INPUT_CSC_C11_C12_B__INPUT_CSC_C11_B_MASK 0xffff
+#define INPUT_CSC_C11_C12_B__INPUT_CSC_C11_B__SHIFT 0x0
+#define INPUT_CSC_C11_C12_B__INPUT_CSC_C12_B_MASK 0xffff0000
+#define INPUT_CSC_C11_C12_B__INPUT_CSC_C12_B__SHIFT 0x10
+#define INPUT_CSC_C13_C14_B__INPUT_CSC_C13_B_MASK 0xffff
+#define INPUT_CSC_C13_C14_B__INPUT_CSC_C13_B__SHIFT 0x0
+#define INPUT_CSC_C13_C14_B__INPUT_CSC_C14_B_MASK 0xffff0000
+#define INPUT_CSC_C13_C14_B__INPUT_CSC_C14_B__SHIFT 0x10
+#define INPUT_CSC_C21_C22_B__INPUT_CSC_C21_B_MASK 0xffff
+#define INPUT_CSC_C21_C22_B__INPUT_CSC_C21_B__SHIFT 0x0
+#define INPUT_CSC_C21_C22_B__INPUT_CSC_C22_B_MASK 0xffff0000
+#define INPUT_CSC_C21_C22_B__INPUT_CSC_C22_B__SHIFT 0x10
+#define INPUT_CSC_C23_C24_B__INPUT_CSC_C23_B_MASK 0xffff
+#define INPUT_CSC_C23_C24_B__INPUT_CSC_C23_B__SHIFT 0x0
+#define INPUT_CSC_C23_C24_B__INPUT_CSC_C24_B_MASK 0xffff0000
+#define INPUT_CSC_C23_C24_B__INPUT_CSC_C24_B__SHIFT 0x10
+#define INPUT_CSC_C31_C32_B__INPUT_CSC_C31_B_MASK 0xffff
+#define INPUT_CSC_C31_C32_B__INPUT_CSC_C31_B__SHIFT 0x0
+#define INPUT_CSC_C31_C32_B__INPUT_CSC_C32_B_MASK 0xffff0000
+#define INPUT_CSC_C31_C32_B__INPUT_CSC_C32_B__SHIFT 0x10
+#define INPUT_CSC_C33_C34_B__INPUT_CSC_C33_B_MASK 0xffff
+#define INPUT_CSC_C33_C34_B__INPUT_CSC_C33_B__SHIFT 0x0
+#define INPUT_CSC_C33_C34_B__INPUT_CSC_C34_B_MASK 0xffff0000
+#define INPUT_CSC_C33_C34_B__INPUT_CSC_C34_B__SHIFT 0x10
+#define PRESCALE_CONTROL__PRESCALE_MODE_MASK 0x3
+#define PRESCALE_CONTROL__PRESCALE_MODE__SHIFT 0x0
+#define PRESCALE_VALUES_R__PRESCALE_BIAS_R_MASK 0xffff
+#define PRESCALE_VALUES_R__PRESCALE_BIAS_R__SHIFT 0x0
+#define PRESCALE_VALUES_R__PRESCALE_SCALE_R_MASK 0xffff0000
+#define PRESCALE_VALUES_R__PRESCALE_SCALE_R__SHIFT 0x10
+#define PRESCALE_VALUES_G__PRESCALE_BIAS_G_MASK 0xffff
+#define PRESCALE_VALUES_G__PRESCALE_BIAS_G__SHIFT 0x0
+#define PRESCALE_VALUES_G__PRESCALE_SCALE_G_MASK 0xffff0000
+#define PRESCALE_VALUES_G__PRESCALE_SCALE_G__SHIFT 0x10
+#define PRESCALE_VALUES_B__PRESCALE_BIAS_B_MASK 0xffff
+#define PRESCALE_VALUES_B__PRESCALE_BIAS_B__SHIFT 0x0
+#define PRESCALE_VALUES_B__PRESCALE_SCALE_B_MASK 0xffff0000
+#define PRESCALE_VALUES_B__PRESCALE_SCALE_B__SHIFT 0x10
+#define COL_MAN_OUTPUT_CSC_CONTROL__OUTPUT_CSC_MODE_MASK 0x7
+#define COL_MAN_OUTPUT_CSC_CONTROL__OUTPUT_CSC_MODE__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12_A__OUTPUT_CSC_C11_A_MASK 0xffff
+#define OUTPUT_CSC_C11_C12_A__OUTPUT_CSC_C11_A__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12_A__OUTPUT_CSC_C12_A_MASK 0xffff0000
+#define OUTPUT_CSC_C11_C12_A__OUTPUT_CSC_C12_A__SHIFT 0x10
+#define OUTPUT_CSC_C13_C14_A__OUTPUT_CSC_C13_A_MASK 0xffff
+#define OUTPUT_CSC_C13_C14_A__OUTPUT_CSC_C13_A__SHIFT 0x0
+#define OUTPUT_CSC_C13_C14_A__OUTPUT_CSC_C14_A_MASK 0xffff0000
+#define OUTPUT_CSC_C13_C14_A__OUTPUT_CSC_C14_A__SHIFT 0x10
+#define OUTPUT_CSC_C21_C22_A__OUTPUT_CSC_C21_A_MASK 0xffff
+#define OUTPUT_CSC_C21_C22_A__OUTPUT_CSC_C21_A__SHIFT 0x0
+#define OUTPUT_CSC_C21_C22_A__OUTPUT_CSC_C22_A_MASK 0xffff0000
+#define OUTPUT_CSC_C21_C22_A__OUTPUT_CSC_C22_A__SHIFT 0x10
+#define OUTPUT_CSC_C23_C24_A__OUTPUT_CSC_C23_A_MASK 0xffff
+#define OUTPUT_CSC_C23_C24_A__OUTPUT_CSC_C23_A__SHIFT 0x0
+#define OUTPUT_CSC_C23_C24_A__OUTPUT_CSC_C24_A_MASK 0xffff0000
+#define OUTPUT_CSC_C23_C24_A__OUTPUT_CSC_C24_A__SHIFT 0x10
+#define OUTPUT_CSC_C31_C32_A__OUTPUT_CSC_C31_A_MASK 0xffff
+#define OUTPUT_CSC_C31_C32_A__OUTPUT_CSC_C31_A__SHIFT 0x0
+#define OUTPUT_CSC_C31_C32_A__OUTPUT_CSC_C32_A_MASK 0xffff0000
+#define OUTPUT_CSC_C31_C32_A__OUTPUT_CSC_C32_A__SHIFT 0x10
+#define OUTPUT_CSC_C33_C34_A__OUTPUT_CSC_C33_A_MASK 0xffff
+#define OUTPUT_CSC_C33_C34_A__OUTPUT_CSC_C33_A__SHIFT 0x0
+#define OUTPUT_CSC_C33_C34_A__OUTPUT_CSC_C34_A_MASK 0xffff0000
+#define OUTPUT_CSC_C33_C34_A__OUTPUT_CSC_C34_A__SHIFT 0x10
+#define OUTPUT_CSC_C11_C12_B__OUTPUT_CSC_C11_B_MASK 0xffff
+#define OUTPUT_CSC_C11_C12_B__OUTPUT_CSC_C11_B__SHIFT 0x0
+#define OUTPUT_CSC_C11_C12_B__OUTPUT_CSC_C12_B_MASK 0xffff0000
+#define OUTPUT_CSC_C11_C12_B__OUTPUT_CSC_C12_B__SHIFT 0x10
+#define OUTPUT_CSC_C13_C14_B__OUTPUT_CSC_C13_B_MASK 0xffff
+#define OUTPUT_CSC_C13_C14_B__OUTPUT_CSC_C13_B__SHIFT 0x0
+#define OUTPUT_CSC_C13_C14_B__OUTPUT_CSC_C14_B_MASK 0xffff0000
+#define OUTPUT_CSC_C13_C14_B__OUTPUT_CSC_C14_B__SHIFT 0x10
+#define OUTPUT_CSC_C21_C22_B__OUTPUT_CSC_C21_B_MASK 0xffff
+#define OUTPUT_CSC_C21_C22_B__OUTPUT_CSC_C21_B__SHIFT 0x0
+#define OUTPUT_CSC_C21_C22_B__OUTPUT_CSC_C22_B_MASK 0xffff0000
+#define OUTPUT_CSC_C21_C22_B__OUTPUT_CSC_C22_B__SHIFT 0x10
+#define OUTPUT_CSC_C23_C24_B__OUTPUT_CSC_C23_B_MASK 0xffff
+#define OUTPUT_CSC_C23_C24_B__OUTPUT_CSC_C23_B__SHIFT 0x0
+#define OUTPUT_CSC_C23_C24_B__OUTPUT_CSC_C24_B_MASK 0xffff0000
+#define OUTPUT_CSC_C23_C24_B__OUTPUT_CSC_C24_B__SHIFT 0x10
+#define OUTPUT_CSC_C31_C32_B__OUTPUT_CSC_C31_B_MASK 0xffff
+#define OUTPUT_CSC_C31_C32_B__OUTPUT_CSC_C31_B__SHIFT 0x0
+#define OUTPUT_CSC_C31_C32_B__OUTPUT_CSC_C32_B_MASK 0xffff0000
+#define OUTPUT_CSC_C31_C32_B__OUTPUT_CSC_C32_B__SHIFT 0x10
+#define OUTPUT_CSC_C33_C34_B__OUTPUT_CSC_C33_B_MASK 0xffff
+#define OUTPUT_CSC_C33_C34_B__OUTPUT_CSC_C33_B__SHIFT 0x0
+#define OUTPUT_CSC_C33_C34_B__OUTPUT_CSC_C34_B_MASK 0xffff0000
+#define OUTPUT_CSC_C33_C34_B__OUTPUT_CSC_C34_B__SHIFT 0x10
+#define DENORM_CLAMP_CONTROL__DENORM_MODE_MASK 0x3
+#define DENORM_CLAMP_CONTROL__DENORM_MODE__SHIFT 0x0
+#define DENORM_CLAMP_CONTROL__DENORM_10BIT_OUT_MASK 0x100
+#define DENORM_CLAMP_CONTROL__DENORM_10BIT_OUT__SHIFT 0x8
+#define DENORM_CLAMP_RANGE_R_CR__RANGE_CLAMP_MAX_R_CR_MASK 0xfff
+#define DENORM_CLAMP_RANGE_R_CR__RANGE_CLAMP_MAX_R_CR__SHIFT 0x0
+#define DENORM_CLAMP_RANGE_R_CR__RANGE_CLAMP_MIN_R_CR_MASK 0xfff000
+#define DENORM_CLAMP_RANGE_R_CR__RANGE_CLAMP_MIN_R_CR__SHIFT 0xc
+#define DENORM_CLAMP_RANGE_G_Y__RANGE_CLAMP_MAX_G_Y_MASK 0xfff
+#define DENORM_CLAMP_RANGE_G_Y__RANGE_CLAMP_MAX_G_Y__SHIFT 0x0
+#define DENORM_CLAMP_RANGE_G_Y__RANGE_CLAMP_MIN_G_Y_MASK 0xfff000
+#define DENORM_CLAMP_RANGE_G_Y__RANGE_CLAMP_MIN_G_Y__SHIFT 0xc
+#define DENORM_CLAMP_RANGE_B_CB__RANGE_CLAMP_MAX_B_CB_MASK 0xfff
+#define DENORM_CLAMP_RANGE_B_CB__RANGE_CLAMP_MAX_B_CB__SHIFT 0x0
+#define DENORM_CLAMP_RANGE_B_CB__RANGE_CLAMP_MIN_B_CB_MASK 0xfff000
+#define DENORM_CLAMP_RANGE_B_CB__RANGE_CLAMP_MIN_B_CB__SHIFT 0xc
+#define COL_MAN_FP_CONVERTED_FIELD__COL_MAN_FP_CONVERTED_FIELD_DATA_MASK 0x3ffff
+#define COL_MAN_FP_CONVERTED_FIELD__COL_MAN_FP_CONVERTED_FIELD_DATA__SHIFT 0x0
+#define COL_MAN_FP_CONVERTED_FIELD__COL_MAN_FP_CONVERTED_FIELD_INDEX_MASK 0x3f00000
+#define COL_MAN_FP_CONVERTED_FIELD__COL_MAN_FP_CONVERTED_FIELD_INDEX__SHIFT 0x14
+#define GAMMA_CORR_CONTROL__GAMMA_CORR_MODE_MASK 0x3
+#define GAMMA_CORR_CONTROL__GAMMA_CORR_MODE__SHIFT 0x0
+#define GAMMA_CORR_LUT_INDEX__GAMMA_CORR_LUT_INDEX_MASK 0xff
+#define GAMMA_CORR_LUT_INDEX__GAMMA_CORR_LUT_INDEX__SHIFT 0x0
+#define GAMMA_CORR_LUT_DATA__GAMMA_CORR_LUT_DATA_MASK 0x7ffff
+#define GAMMA_CORR_LUT_DATA__GAMMA_CORR_LUT_DATA__SHIFT 0x0
+#define GAMMA_CORR_LUT_WRITE_EN_MASK__GAMMA_CORR_LUT_WRITE_EN_MASK_MASK 0x7
+#define GAMMA_CORR_LUT_WRITE_EN_MASK__GAMMA_CORR_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_START_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_START_MASK 0x3ffff
+#define GAMMA_CORR_CNTLA_START_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_START__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_START_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT_MASK 0x7f00000
+#define GAMMA_CORR_CNTLA_START_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT__SHIFT 0x14
+#define GAMMA_CORR_CNTLA_SLOPE_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE_MASK 0x3ffff
+#define GAMMA_CORR_CNTLA_SLOPE_CNTL__GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_END_CNTL1__GAMMA_CORR_CNTLA_EXP_REGION_END_MASK 0xffff
+#define GAMMA_CORR_CNTLA_END_CNTL1__GAMMA_CORR_CNTLA_EXP_REGION_END__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_END_CNTL2__GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE_MASK 0xffff
+#define GAMMA_CORR_CNTLA_END_CNTL2__GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_END_CNTL2__GAMMA_CORR_CNTLA_EXP_REGION_END_BASE_MASK 0xffff0000
+#define GAMMA_CORR_CNTLA_END_CNTL2__GAMMA_CORR_CNTLA_EXP_REGION_END_BASE__SHIFT 0x10
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_0_1__GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_2_3__GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_4_5__GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_6_7__GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_8_9__GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_10_11__GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_12_13__GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLA_REGION_14_15__GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_START_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_START_MASK 0x3ffff
+#define GAMMA_CORR_CNTLB_START_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_START__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_START_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_START_SEGMENT_MASK 0x7f00000
+#define GAMMA_CORR_CNTLB_START_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_START_SEGMENT__SHIFT 0x14
+#define GAMMA_CORR_CNTLB_SLOPE_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_LINEAR_SLOPE_MASK 0x3ffff
+#define GAMMA_CORR_CNTLB_SLOPE_CNTL__GAMMA_CORR_CNTLB_EXP_REGION_LINEAR_SLOPE__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_END_CNTL1__GAMMA_CORR_CNTLB_EXP_REGION_END_MASK 0xffff
+#define GAMMA_CORR_CNTLB_END_CNTL1__GAMMA_CORR_CNTLB_EXP_REGION_END__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_END_CNTL2__GAMMA_CORR_CNTLB_EXP_REGION_END_SLOPE_MASK 0xffff
+#define GAMMA_CORR_CNTLB_END_CNTL2__GAMMA_CORR_CNTLB_EXP_REGION_END_SLOPE__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_END_CNTL2__GAMMA_CORR_CNTLB_EXP_REGION_END_BASE_MASK 0xffff0000
+#define GAMMA_CORR_CNTLB_END_CNTL2__GAMMA_CORR_CNTLB_EXP_REGION_END_BASE__SHIFT 0x10
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION0_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION0_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION1_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION1_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION1_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_0_1__GAMMA_CORR_CNTLB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION2_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION2_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION3_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION3_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION3_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_2_3__GAMMA_CORR_CNTLB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION4_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION4_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION5_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION5_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION5_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_4_5__GAMMA_CORR_CNTLB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION6_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION6_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION7_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION7_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION7_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_6_7__GAMMA_CORR_CNTLB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION8_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION8_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION9_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION9_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION9_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_8_9__GAMMA_CORR_CNTLB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION10_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION10_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION11_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION11_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION11_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_10_11__GAMMA_CORR_CNTLB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION12_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION12_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION13_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION13_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION13_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_12_13__GAMMA_CORR_CNTLB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1b
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION14_LUT_OFFSET_MASK 0xff
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION14_NUM_SEGMENTS_MASK 0x3800
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xb
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION15_LUT_OFFSET_MASK 0x7f8000
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION15_LUT_OFFSET__SHIFT 0xf
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION15_NUM_SEGMENTS_MASK 0x38000000
+#define GAMMA_CORR_CNTLB_REGION_14_15__GAMMA_CORR_CNTLB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1b
+#define PACK_FIFO_ERROR__PACK_FIFO_L_UNDERFLOW_OCCURED_MASK 0x1
+#define PACK_FIFO_ERROR__PACK_FIFO_L_UNDERFLOW_OCCURED__SHIFT 0x0
+#define PACK_FIFO_ERROR__PACK_FIFO_L_UNDERFLOW_ACK_MASK 0x2
+#define PACK_FIFO_ERROR__PACK_FIFO_L_UNDERFLOW_ACK__SHIFT 0x1
+#define PACK_FIFO_ERROR__PACK_FIFO_C_UNDERFLOW_OCCURED_MASK 0x100
+#define PACK_FIFO_ERROR__PACK_FIFO_C_UNDERFLOW_OCCURED__SHIFT 0x8
+#define PACK_FIFO_ERROR__PACK_FIFO_C_UNDERFLOW_ACK_MASK 0x200
+#define PACK_FIFO_ERROR__PACK_FIFO_C_UNDERFLOW_ACK__SHIFT 0x9
+#define PACK_FIFO_ERROR__PACK_FIFO_L_OVERFLOW_OCCURED_MASK 0x10000
+#define PACK_FIFO_ERROR__PACK_FIFO_L_OVERFLOW_OCCURED__SHIFT 0x10
+#define PACK_FIFO_ERROR__PACK_FIFO_L_OVERFLOW_ACK_MASK 0x20000
+#define PACK_FIFO_ERROR__PACK_FIFO_L_OVERFLOW_ACK__SHIFT 0x11
+#define PACK_FIFO_ERROR__PACK_FIFO_C_OVERFLOW_OCCURED_MASK 0x1000000
+#define PACK_FIFO_ERROR__PACK_FIFO_C_OVERFLOW_OCCURED__SHIFT 0x18
+#define PACK_FIFO_ERROR__PACK_FIFO_C_OVERFLOW_ACK_MASK 0x2000000
+#define PACK_FIFO_ERROR__PACK_FIFO_C_OVERFLOW_ACK__SHIFT 0x19
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_UNDERFLOW_OCCURED_MASK 0x1
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_UNDERFLOW_OCCURED__SHIFT 0x0
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_UNDERFLOW_ACK_MASK 0x2
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_UNDERFLOW_ACK__SHIFT 0x1
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_OVERFLOW_OCCURED_MASK 0x100
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_OVERFLOW_OCCURED__SHIFT 0x8
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_OVERFLOW_ACK_MASK 0x200
+#define OUTPUT_FIFO_ERROR__OUTPUT_FIFO_OVERFLOW_ACK__SHIFT 0x9
+#define INPUT_GAMMA_LUT_AUTOFILL__INPUT_GAMMA_LUT_AUTOFILL_MASK 0x1
+#define INPUT_GAMMA_LUT_AUTOFILL__INPUT_GAMMA_LUT_AUTOFILL__SHIFT 0x0
+#define INPUT_GAMMA_LUT_AUTOFILL__INPUT_GAMMA_LUT_AUTOFILL_DONE_MASK 0x2
+#define INPUT_GAMMA_LUT_AUTOFILL__INPUT_GAMMA_LUT_AUTOFILL_DONE__SHIFT 0x1
+#define INPUT_GAMMA_LUT_RW_INDEX__INPUT_GAMMA_LUT_RW_INDEX_MASK 0xff
+#define INPUT_GAMMA_LUT_RW_INDEX__INPUT_GAMMA_LUT_RW_INDEX__SHIFT 0x0
+#define INPUT_GAMMA_LUT_SEQ_COLOR__INPUT_GAMMA_LUT_SEQ_COLOR_MASK 0xffff
+#define INPUT_GAMMA_LUT_SEQ_COLOR__INPUT_GAMMA_LUT_SEQ_COLOR__SHIFT 0x0
+#define INPUT_GAMMA_LUT_PWL_DATA__INPUT_GAMMA_LUT_BASE_MASK 0xffff
+#define INPUT_GAMMA_LUT_PWL_DATA__INPUT_GAMMA_LUT_BASE__SHIFT 0x0
+#define INPUT_GAMMA_LUT_PWL_DATA__INPUT_GAMMA_LUT_DELTA_MASK 0xffff0000
+#define INPUT_GAMMA_LUT_PWL_DATA__INPUT_GAMMA_LUT_DELTA__SHIFT 0x10
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_BLUE_MASK 0x3ff
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_BLUE__SHIFT 0x0
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_GREEN_MASK 0xffc00
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_GREEN__SHIFT 0xa
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_RED_MASK 0x3ff00000
+#define INPUT_GAMMA_LUT_30_COLOR__INPUT_GAMMA_LUT_COLOR_10_RED__SHIFT 0x14
+#define COL_MAN_INPUT_GAMMA_CONTROL1__INPUT_GAMMA_MODE_MASK 0x3
+#define COL_MAN_INPUT_GAMMA_CONTROL1__INPUT_GAMMA_MODE__SHIFT 0x0
+#define COL_MAN_INPUT_GAMMA_CONTROL1__INPUT_GAMMA_LUT_10BIT_BYPASS_EN_MASK 0x4000000
+#define COL_MAN_INPUT_GAMMA_CONTROL1__INPUT_GAMMA_LUT_10BIT_BYPASS_EN__SHIFT 0x1a
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_B_MASK 0x1e
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_B__SHIFT 0x1
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_B_SIGNED_EN_MASK 0x20
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_B_SIGNED_EN__SHIFT 0x5
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_B_FORMAT_MASK 0xc0
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_B_FORMAT__SHIFT 0x6
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_G_MASK 0xf00
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_G__SHIFT 0x8
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_G_SIGNED_EN_MASK 0x1000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_G_SIGNED_EN__SHIFT 0xc
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_G_FORMAT_MASK 0x6000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_G_FORMAT__SHIFT 0xd
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_R_MASK 0x78000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_INC_R__SHIFT 0xf
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_R_SIGNED_EN_MASK 0x80000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_R_SIGNED_EN__SHIFT 0x13
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_R_FORMAT_MASK 0x300000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_DATA_R_FORMAT__SHIFT 0x14
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_RW_MODE_MASK 0x400000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_RW_MODE__SHIFT 0x16
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_WRITE_EN_MASK_MASK 0x3800000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_WRITE_EN_MASK__SHIFT 0x17
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_VGA_ACCESS_ENABLE_MASK 0x4000000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_VGA_ACCESS_ENABLE__SHIFT 0x1a
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_10BIT_BYPASS_DBL_BUF_EN_MASK 0x8000000
+#define COL_MAN_INPUT_GAMMA_CONTROL2__INPUT_GAMMA_LUT_10BIT_BYPASS_DBL_BUF_EN__SHIFT 0x1b
+#define INPUT_GAMMA_BW_OFFSETS_B__INPUT_GAMMA_BLACK_OFFSET_B_MASK 0xffff
+#define INPUT_GAMMA_BW_OFFSETS_B__INPUT_GAMMA_BLACK_OFFSET_B__SHIFT 0x0
+#define INPUT_GAMMA_BW_OFFSETS_B__INPUT_GAMMA_WHITE_OFFSET_B_MASK 0xffff0000
+#define INPUT_GAMMA_BW_OFFSETS_B__INPUT_GAMMA_WHITE_OFFSET_B__SHIFT 0x10
+#define INPUT_GAMMA_BW_OFFSETS_G__INPUT_GAMMA_BLACK_OFFSET_G_MASK 0xffff
+#define INPUT_GAMMA_BW_OFFSETS_G__INPUT_GAMMA_BLACK_OFFSET_G__SHIFT 0x0
+#define INPUT_GAMMA_BW_OFFSETS_G__INPUT_GAMMA_WHITE_OFFSET_G_MASK 0xffff0000
+#define INPUT_GAMMA_BW_OFFSETS_G__INPUT_GAMMA_WHITE_OFFSET_G__SHIFT 0x10
+#define INPUT_GAMMA_BW_OFFSETS_R__INPUT_GAMMA_BLACK_OFFSET_R_MASK 0xffff
+#define INPUT_GAMMA_BW_OFFSETS_R__INPUT_GAMMA_BLACK_OFFSET_R__SHIFT 0x0
+#define INPUT_GAMMA_BW_OFFSETS_R__INPUT_GAMMA_WHITE_OFFSET_R_MASK 0xffff0000
+#define INPUT_GAMMA_BW_OFFSETS_R__INPUT_GAMMA_WHITE_OFFSET_R__SHIFT 0x10
+#define COL_MAN_DEBUG_CONTROL__COL_MAN_GLOBAL_PASSTHROUGH_ENABLE_MASK 0x1
+#define COL_MAN_DEBUG_CONTROL__COL_MAN_GLOBAL_PASSTHROUGH_ENABLE__SHIFT 0x0
+#define COL_MAN_TEST_DEBUG_INDEX__COL_MAN_TEST_DEBUG_INDEX_MASK 0xff
+#define COL_MAN_TEST_DEBUG_INDEX__COL_MAN_TEST_DEBUG_INDEX__SHIFT 0x0
+#define COL_MAN_TEST_DEBUG_INDEX__COL_MAN_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define COL_MAN_TEST_DEBUG_INDEX__COL_MAN_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define COL_MAN_TEST_DEBUG_DATA__COL_MAN_TEST_DEBUG_DATA_MASK 0xffffffff
+#define COL_MAN_TEST_DEBUG_DATA__COL_MAN_TEST_DEBUG_DATA__SHIFT 0x0
+#define UNP_GRPH_ENABLE__GRPH_ENABLE_MASK 0x1
+#define UNP_GRPH_ENABLE__GRPH_ENABLE__SHIFT 0x0
+#define UNP_GRPH_CONTROL__GRPH_DEPTH_MASK 0x3
+#define UNP_GRPH_CONTROL__GRPH_DEPTH__SHIFT 0x0
+#define UNP_GRPH_CONTROL__GRPH_NUM_BANKS_MASK 0xc
+#define UNP_GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT 0x2
+#define UNP_GRPH_CONTROL__GRPH_Z_MASK 0x30
+#define UNP_GRPH_CONTROL__GRPH_Z__SHIFT 0x4
+#define UNP_GRPH_CONTROL__GRPH_BANK_WIDTH_L_MASK 0xc0
+#define UNP_GRPH_CONTROL__GRPH_BANK_WIDTH_L__SHIFT 0x6
+#define UNP_GRPH_CONTROL__GRPH_FORMAT_MASK 0x700
+#define UNP_GRPH_CONTROL__GRPH_FORMAT__SHIFT 0x8
+#define UNP_GRPH_CONTROL__GRPH_BANK_HEIGHT_L_MASK 0x1800
+#define UNP_GRPH_CONTROL__GRPH_BANK_HEIGHT_L__SHIFT 0xb
+#define UNP_GRPH_CONTROL__GRPH_TILE_SPLIT_L_MASK 0xe000
+#define UNP_GRPH_CONTROL__GRPH_TILE_SPLIT_L__SHIFT 0xd
+#define UNP_GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE_MASK 0x10000
+#define UNP_GRPH_CONTROL__GRPH_ADDRESS_TRANSLATION_ENABLE__SHIFT 0x10
+#define UNP_GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE_MASK 0x20000
+#define UNP_GRPH_CONTROL__GRPH_PRIVILEGED_ACCESS_ENABLE__SHIFT 0x11
+#define UNP_GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_L_MASK 0xc0000
+#define UNP_GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT_L__SHIFT 0x12
+#define UNP_GRPH_CONTROL__GRPH_ARRAY_MODE_MASK 0xf00000
+#define UNP_GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT 0x14
+#define UNP_GRPH_CONTROL__GRPH_PIPE_CONFIG_MASK 0x1f000000
+#define UNP_GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT 0x18
+#define UNP_GRPH_CONTROL__GRPH_MICRO_TILE_MODE_L_MASK 0x60000000
+#define UNP_GRPH_CONTROL__GRPH_MICRO_TILE_MODE_L__SHIFT 0x1d
+#define UNP_GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE_MASK 0x80000000
+#define UNP_GRPH_CONTROL__GRPH_COLOR_EXPANSION_MODE__SHIFT 0x1f
+#define UNP_GRPH_CONTROL_C__GRPH_BANK_WIDTH_C_MASK 0xc0
+#define UNP_GRPH_CONTROL_C__GRPH_BANK_WIDTH_C__SHIFT 0x6
+#define UNP_GRPH_CONTROL_C__GRPH_BANK_HEIGHT_C_MASK 0x1800
+#define UNP_GRPH_CONTROL_C__GRPH_BANK_HEIGHT_C__SHIFT 0xb
+#define UNP_GRPH_CONTROL_C__GRPH_TILE_SPLIT_C_MASK 0xe000
+#define UNP_GRPH_CONTROL_C__GRPH_TILE_SPLIT_C__SHIFT 0xd
+#define UNP_GRPH_CONTROL_C__GRPH_MACRO_TILE_ASPECT_C_MASK 0xc0000
+#define UNP_GRPH_CONTROL_C__GRPH_MACRO_TILE_ASPECT_C__SHIFT 0x12
+#define UNP_GRPH_CONTROL_C__GRPH_MICRO_TILE_MODE_C_MASK 0x60000000
+#define UNP_GRPH_CONTROL_C__GRPH_MICRO_TILE_MODE_C__SHIFT 0x1d
+#define UNP_GRPH_CONTROL_EXP__VIDEO_FORMAT_MASK 0x7
+#define UNP_GRPH_CONTROL_EXP__VIDEO_FORMAT__SHIFT 0x0
+#define UNP_GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP_MASK 0x3
+#define UNP_GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT 0x0
+#define UNP_GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR_MASK 0x30
+#define UNP_GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT 0x4
+#define UNP_GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR_MASK 0xc0
+#define UNP_GRPH_SWAP_CNTL__GRPH_GREEN_CROSSBAR__SHIFT 0x6
+#define UNP_GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR_MASK 0x300
+#define UNP_GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L_MASK 0xffffff00
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C_MASK 0xffffff00
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MASK 0xff
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__SHIFT 0x0
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0xff
+#define UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L_MASK 0xffffff00
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_L__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C_MASK 0xffffff00
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_C__SHIFT 0x8
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L_MASK 0xff
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__SHIFT 0x0
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C_MASK 0xff
+#define UNP_GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L__GRPH_SECONDARY_SURFACE_ADDRESS_L_MASK 0xffffff00
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_L__GRPH_SECONDARY_SURFACE_ADDRESS_L__SHIFT 0x8
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C__GRPH_SECONDARY_SURFACE_ADDRESS_C_MASK 0xffffff00
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_C__GRPH_SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x8
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L_MASK 0xff
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_L__SHIFT 0x0
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0xff
+#define UNP_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C__GRPH_SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L_MASK 0xffffff00
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_L__SHIFT 0x8
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C_MASK 0xffffff00
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_C__SHIFT 0x8
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L_MASK 0xff
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_L__SHIFT 0x0
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C_MASK 0xff
+#define UNP_GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__GRPH_SECONDARY_BOTTOM_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define UNP_GRPH_PITCH_L__GRPH_PITCH_L_MASK 0x7fff
+#define UNP_GRPH_PITCH_L__GRPH_PITCH_L__SHIFT 0x0
+#define UNP_GRPH_PITCH_C__GRPH_PITCH_C_MASK 0x7fff
+#define UNP_GRPH_PITCH_C__GRPH_PITCH_C__SHIFT 0x0
+#define UNP_GRPH_SURFACE_OFFSET_X_L__GRPH_SURFACE_OFFSET_X_L_MASK 0x3fff
+#define UNP_GRPH_SURFACE_OFFSET_X_L__GRPH_SURFACE_OFFSET_X_L__SHIFT 0x0
+#define UNP_GRPH_SURFACE_OFFSET_X_C__GRPH_SURFACE_OFFSET_X_C_MASK 0x3fff
+#define UNP_GRPH_SURFACE_OFFSET_X_C__GRPH_SURFACE_OFFSET_X_C__SHIFT 0x0
+#define UNP_GRPH_SURFACE_OFFSET_Y_L__GRPH_SURFACE_OFFSET_Y_L_MASK 0x3fff
+#define UNP_GRPH_SURFACE_OFFSET_Y_L__GRPH_SURFACE_OFFSET_Y_L__SHIFT 0x0
+#define UNP_GRPH_SURFACE_OFFSET_Y_C__GRPH_SURFACE_OFFSET_Y_C_MASK 0x3fff
+#define UNP_GRPH_SURFACE_OFFSET_Y_C__GRPH_SURFACE_OFFSET_Y_C__SHIFT 0x0
+#define UNP_GRPH_X_START_L__GRPH_X_START_L_MASK 0x3fff
+#define UNP_GRPH_X_START_L__GRPH_X_START_L__SHIFT 0x0
+#define UNP_GRPH_X_START_C__GRPH_X_START_C_MASK 0x3fff
+#define UNP_GRPH_X_START_C__GRPH_X_START_C__SHIFT 0x0
+#define UNP_GRPH_Y_START_L__GRPH_Y_START_L_MASK 0x3fff
+#define UNP_GRPH_Y_START_L__GRPH_Y_START_L__SHIFT 0x0
+#define UNP_GRPH_Y_START_C__GRPH_Y_START_C_MASK 0x3fff
+#define UNP_GRPH_Y_START_C__GRPH_Y_START_C__SHIFT 0x0
+#define UNP_GRPH_X_END_L__GRPH_X_END_L_MASK 0x7fff
+#define UNP_GRPH_X_END_L__GRPH_X_END_L__SHIFT 0x0
+#define UNP_GRPH_X_END_C__GRPH_X_END_C_MASK 0x7fff
+#define UNP_GRPH_X_END_C__GRPH_X_END_C__SHIFT 0x0
+#define UNP_GRPH_Y_END_L__GRPH_Y_END_L_MASK 0x7fff
+#define UNP_GRPH_Y_END_L__GRPH_Y_END_L__SHIFT 0x0
+#define UNP_GRPH_Y_END_C__GRPH_Y_END_C_MASK 0x7fff
+#define UNP_GRPH_Y_END_C__GRPH_Y_END_C__SHIFT 0x0
+#define UNP_GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING_MASK 0x1
+#define UNP_GRPH_UPDATE__GRPH_MODE_UPDATE_PENDING__SHIFT 0x0
+#define UNP_GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN_MASK 0x2
+#define UNP_GRPH_UPDATE__GRPH_MODE_UPDATE_TAKEN__SHIFT 0x1
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK 0x4
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING__SHIFT 0x2
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN_MASK 0x8
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_UPDATE_TAKEN__SHIFT 0x3
+#define UNP_GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK 0x10000
+#define UNP_GRPH_UPDATE__GRPH_UPDATE_LOCK__SHIFT 0x10
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_IGNORE_UPDATE_LOCK_MASK 0x100000
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_IGNORE_UPDATE_LOCK__SHIFT 0x14
+#define UNP_GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE_MASK 0x1000000
+#define UNP_GRPH_UPDATE__GRPH_MODE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x18
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE_MASK 0x10000000
+#define UNP_GRPH_UPDATE__GRPH_SURFACE_DISABLE_MULTIPLE_UPDATE__SHIFT 0x1c
+#define UNP_PIPE_OUTSTANDING_REQUEST_LIMIT__UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L_MASK 0xff
+#define UNP_PIPE_OUTSTANDING_REQUEST_LIMIT__UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L__SHIFT 0x0
+#define UNP_PIPE_OUTSTANDING_REQUEST_LIMIT__UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C_MASK 0xff00
+#define UNP_PIPE_OUTSTANDING_REQUEST_LIMIT__UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C__SHIFT 0x8
+#define UNP_GRPH_SURFACE_ADDRESS_INUSE_L__GRPH_SURFACE_ADDRESS_INUSE_L_MASK 0xffffff00
+#define UNP_GRPH_SURFACE_ADDRESS_INUSE_L__GRPH_SURFACE_ADDRESS_INUSE_L__SHIFT 0x8
+#define UNP_GRPH_SURFACE_ADDRESS_INUSE_C__GRPH_SURFACE_ADDRESS_INUSE_C_MASK 0xffffff00
+#define UNP_GRPH_SURFACE_ADDRESS_INUSE_C__GRPH_SURFACE_ADDRESS_INUSE_C__SHIFT 0x8
+#define UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L__GRPH_SURFACE_ADDRESS_HIGH_INUSE_L_MASK 0xff
+#define UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_L__GRPH_SURFACE_ADDRESS_HIGH_INUSE_L__SHIFT 0x0
+#define UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C__GRPH_SURFACE_ADDRESS_HIGH_INUSE_C_MASK 0xff
+#define UNP_GRPH_SURFACE_ADDRESS_HIGH_INUSE_C__GRPH_SURFACE_ADDRESS_HIGH_INUSE_C__SHIFT 0x0
+#define UNP_DVMM_PTE_CONTROL__DVMM_USE_SINGLE_PTE_MASK 0x1
+#define UNP_DVMM_PTE_CONTROL__DVMM_USE_SINGLE_PTE__SHIFT 0x0
+#define UNP_DVMM_PTE_CONTROL__DVMM_PAGE_WIDTH_MASK 0x1e
+#define UNP_DVMM_PTE_CONTROL__DVMM_PAGE_WIDTH__SHIFT 0x1
+#define UNP_DVMM_PTE_CONTROL__DVMM_PAGE_HEIGHT_MASK 0x1e0
+#define UNP_DVMM_PTE_CONTROL__DVMM_PAGE_HEIGHT__SHIFT 0x5
+#define UNP_DVMM_PTE_CONTROL__DVMM_MIN_PTE_BEFORE_FLIP_MASK 0x7fe00
+#define UNP_DVMM_PTE_CONTROL__DVMM_MIN_PTE_BEFORE_FLIP__SHIFT 0x9
+#define UNP_DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE0_MASK 0x100000
+#define UNP_DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE0__SHIFT 0x14
+#define UNP_DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE1_MASK 0x200000
+#define UNP_DVMM_PTE_CONTROL__DVMM_PTE_BUFFER_MODE1__SHIFT 0x15
+#define UNP_GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK 0x1
+#define UNP_GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED__SHIFT 0x0
+#define UNP_GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK 0x100
+#define UNP_GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR__SHIFT 0x8
+#define UNP_GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK 0x1
+#define UNP_GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK__SHIFT 0x0
+#define UNP_GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE_MASK 0x100
+#define UNP_GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_TYPE__SHIFT 0x8
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN_MASK 0x1
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_EN__SHIFT 0x0
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE_MASK 0x30
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_FLIP_MODE__SHIFT 0x4
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STACK_INTERLACE_FLIP_EN_MASK 0x100
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STACK_INTERLACE_FLIP_EN__SHIFT 0x8
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STACK_INTERLACE_FLIP_MODE_MASK 0x3000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STACK_INTERLACE_FLIP_MODE__SHIFT 0xc
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING_MASK 0x10000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_SURFACE_PENDING__SHIFT 0x10
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING_MASK 0x20000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_SURFACE_PENDING__SHIFT 0x11
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_BOTTOM_SURFACE_PENDING_MASK 0x40000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_PRIMARY_BOTTOM_SURFACE_PENDING__SHIFT 0x12
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_BOTTOM_SURFACE_PENDING_MASK 0x80000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_SECONDARY_BOTTOM_SURFACE_PENDING__SHIFT 0x13
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE_MASK 0x10000000
+#define UNP_GRPH_STEREOSYNC_FLIP__GRPH_STEREOSYNC_SELECT_DISABLE__SHIFT 0x1c
+#define UNP_FLIP_CONTROL__GRPH_SURFACE_UPDATE_PENDING_MODE_MASK 0x1
+#define UNP_FLIP_CONTROL__GRPH_SURFACE_UPDATE_PENDING_MODE__SHIFT 0x0
+#define UNP_FLIP_CONTROL__UNP_DEBUG_SG_MASK 0xfffffffc
+#define UNP_FLIP_CONTROL__UNP_DEBUG_SG__SHIFT 0x2
+#define UNP_CRC_CONTROL__UNP_CRC_ENABLE_MASK 0x1
+#define UNP_CRC_CONTROL__UNP_CRC_ENABLE__SHIFT 0x0
+#define UNP_CRC_CONTROL__UNP_CRC_SOURCE_SEL_MASK 0x1c
+#define UNP_CRC_CONTROL__UNP_CRC_SOURCE_SEL__SHIFT 0x2
+#define UNP_CRC_CONTROL__UNP_CRC_LINE_SEL_MASK 0x300
+#define UNP_CRC_CONTROL__UNP_CRC_LINE_SEL__SHIFT 0x8
+#define UNP_CRC_MASK__UNP_CRC_MASK_MASK 0xffffffff
+#define UNP_CRC_MASK__UNP_CRC_MASK__SHIFT 0x0
+#define UNP_CRC_CURRENT__UNP_CRC_CURRENT_MASK 0xffffffff
+#define UNP_CRC_CURRENT__UNP_CRC_CURRENT__SHIFT 0x0
+#define UNP_CRC_LAST__UNP_CRC_LAST_MASK 0xffffffff
+#define UNP_CRC_LAST__UNP_CRC_LAST__SHIFT 0x0
+#define UNP_LB_DATA_GAP_BETWEEN_CHUNK__UNP_LB_GAP_BETWEEN_CHUNK_MASK 0x1f0
+#define UNP_LB_DATA_GAP_BETWEEN_CHUNK__UNP_LB_GAP_BETWEEN_CHUNK__SHIFT 0x4
+#define UNP_HW_ROTATION__ROTATION_ANGLE_MASK 0x7
+#define UNP_HW_ROTATION__ROTATION_ANGLE__SHIFT 0x0
+#define UNP_HW_ROTATION__PIXEL_DROP_MASK 0x10
+#define UNP_HW_ROTATION__PIXEL_DROP__SHIFT 0x4
+#define UNP_HW_ROTATION__BUFFER_MODE_MASK 0x100
+#define UNP_HW_ROTATION__BUFFER_MODE__SHIFT 0x8
+#define UNP_DEBUG__UNP_DEBUG_MASK 0xffffffff
+#define UNP_DEBUG__UNP_DEBUG__SHIFT 0x0
+#define UNP_DEBUG2__UNP_DEBUG2_MASK 0xffffffff
+#define UNP_DEBUG2__UNP_DEBUG2__SHIFT 0x0
+#define UNP_DVMM_DEBUG__UNP_L_DVMM_DEBUG_MASK 0xffff
+#define UNP_DVMM_DEBUG__UNP_L_DVMM_DEBUG__SHIFT 0x0
+#define UNP_DVMM_DEBUG__UNP_C_DVMM_DEBUG_MASK 0xffff0000
+#define UNP_DVMM_DEBUG__UNP_C_DVMM_DEBUG__SHIFT 0x10
+#define UNP_TEST_DEBUG_INDEX__UNP_TEST_DEBUG_INDEX_MASK 0xff
+#define UNP_TEST_DEBUG_INDEX__UNP_TEST_DEBUG_INDEX__SHIFT 0x0
+#define UNP_TEST_DEBUG_INDEX__UNP_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define UNP_TEST_DEBUG_INDEX__UNP_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define UNP_TEST_DEBUG_DATA__UNP_TEST_DEBUG_DATA_MASK 0xffffffff
+#define UNP_TEST_DEBUG_DATA__UNP_TEST_DEBUG_DATA__SHIFT 0x0
+#define GENMO_WT__GENMO_MONO_ADDRESS_B_MASK 0x1
+#define GENMO_WT__GENMO_MONO_ADDRESS_B__SHIFT 0x0
+#define GENMO_WT__VGA_RAM_EN_MASK 0x2
+#define GENMO_WT__VGA_RAM_EN__SHIFT 0x1
+#define GENMO_WT__VGA_CKSEL_MASK 0xc
+#define GENMO_WT__VGA_CKSEL__SHIFT 0x2
+#define GENMO_WT__ODD_EVEN_MD_PGSEL_MASK 0x20
+#define GENMO_WT__ODD_EVEN_MD_PGSEL__SHIFT 0x5
+#define GENMO_WT__VGA_HSYNC_POL_MASK 0x40
+#define GENMO_WT__VGA_HSYNC_POL__SHIFT 0x6
+#define GENMO_WT__VGA_VSYNC_POL_MASK 0x80
+#define GENMO_WT__VGA_VSYNC_POL__SHIFT 0x7
+#define GENMO_RD__GENMO_MONO_ADDRESS_B_MASK 0x1
+#define GENMO_RD__GENMO_MONO_ADDRESS_B__SHIFT 0x0
+#define GENMO_RD__VGA_RAM_EN_MASK 0x2
+#define GENMO_RD__VGA_RAM_EN__SHIFT 0x1
+#define GENMO_RD__VGA_CKSEL_MASK 0xc
+#define GENMO_RD__VGA_CKSEL__SHIFT 0x2
+#define GENMO_RD__ODD_EVEN_MD_PGSEL_MASK 0x20
+#define GENMO_RD__ODD_EVEN_MD_PGSEL__SHIFT 0x5
+#define GENMO_RD__VGA_HSYNC_POL_MASK 0x40
+#define GENMO_RD__VGA_HSYNC_POL__SHIFT 0x6
+#define GENMO_RD__VGA_VSYNC_POL_MASK 0x80
+#define GENMO_RD__VGA_VSYNC_POL__SHIFT 0x7
+#define GENENB__BLK_IO_BASE_MASK 0xff
+#define GENENB__BLK_IO_BASE__SHIFT 0x0
+#define GENFC_WT__VSYNC_SEL_W_MASK 0x8
+#define GENFC_WT__VSYNC_SEL_W__SHIFT 0x3
+#define GENFC_RD__VSYNC_SEL_R_MASK 0x8
+#define GENFC_RD__VSYNC_SEL_R__SHIFT 0x3
+#define GENS0__SENSE_SWITCH_MASK 0x10
+#define GENS0__SENSE_SWITCH__SHIFT 0x4
+#define GENS0__CRT_INTR_MASK 0x80
+#define GENS0__CRT_INTR__SHIFT 0x7
+#define GENS1__NO_DISPLAY_MASK 0x1
+#define GENS1__NO_DISPLAY__SHIFT 0x0
+#define GENS1__VGA_VSTATUS_MASK 0x8
+#define GENS1__VGA_VSTATUS__SHIFT 0x3
+#define GENS1__PIXEL_READ_BACK_MASK 0x30
+#define GENS1__PIXEL_READ_BACK__SHIFT 0x4
+#define DAC_DATA__DAC_DATA_MASK 0x3f
+#define DAC_DATA__DAC_DATA__SHIFT 0x0
+#define DAC_MASK__DAC_MASK_MASK 0xff
+#define DAC_MASK__DAC_MASK__SHIFT 0x0
+#define DAC_R_INDEX__DAC_R_INDEX_MASK 0xff
+#define DAC_R_INDEX__DAC_R_INDEX__SHIFT 0x0
+#define DAC_W_INDEX__DAC_W_INDEX_MASK 0xff
+#define DAC_W_INDEX__DAC_W_INDEX__SHIFT 0x0
+#define SEQ8_IDX__SEQ_IDX_MASK 0x7
+#define SEQ8_IDX__SEQ_IDX__SHIFT 0x0
+#define SEQ8_DATA__SEQ_DATA_MASK 0xff
+#define SEQ8_DATA__SEQ_DATA__SHIFT 0x0
+#define SEQ00__SEQ_RST0B_MASK 0x1
+#define SEQ00__SEQ_RST0B__SHIFT 0x0
+#define SEQ00__SEQ_RST1B_MASK 0x2
+#define SEQ00__SEQ_RST1B__SHIFT 0x1
+#define SEQ01__SEQ_DOT8_MASK 0x1
+#define SEQ01__SEQ_DOT8__SHIFT 0x0
+#define SEQ01__SEQ_SHIFT2_MASK 0x4
+#define SEQ01__SEQ_SHIFT2__SHIFT 0x2
+#define SEQ01__SEQ_PCLKBY2_MASK 0x8
+#define SEQ01__SEQ_PCLKBY2__SHIFT 0x3
+#define SEQ01__SEQ_SHIFT4_MASK 0x10
+#define SEQ01__SEQ_SHIFT4__SHIFT 0x4
+#define SEQ01__SEQ_MAXBW_MASK 0x20
+#define SEQ01__SEQ_MAXBW__SHIFT 0x5
+#define SEQ02__SEQ_MAP0_EN_MASK 0x1
+#define SEQ02__SEQ_MAP0_EN__SHIFT 0x0
+#define SEQ02__SEQ_MAP1_EN_MASK 0x2
+#define SEQ02__SEQ_MAP1_EN__SHIFT 0x1
+#define SEQ02__SEQ_MAP2_EN_MASK 0x4
+#define SEQ02__SEQ_MAP2_EN__SHIFT 0x2
+#define SEQ02__SEQ_MAP3_EN_MASK 0x8
+#define SEQ02__SEQ_MAP3_EN__SHIFT 0x3
+#define SEQ03__SEQ_FONT_B1_MASK 0x1
+#define SEQ03__SEQ_FONT_B1__SHIFT 0x0
+#define SEQ03__SEQ_FONT_B2_MASK 0x2
+#define SEQ03__SEQ_FONT_B2__SHIFT 0x1
+#define SEQ03__SEQ_FONT_A1_MASK 0x4
+#define SEQ03__SEQ_FONT_A1__SHIFT 0x2
+#define SEQ03__SEQ_FONT_A2_MASK 0x8
+#define SEQ03__SEQ_FONT_A2__SHIFT 0x3
+#define SEQ03__SEQ_FONT_B0_MASK 0x10
+#define SEQ03__SEQ_FONT_B0__SHIFT 0x4
+#define SEQ03__SEQ_FONT_A0_MASK 0x20
+#define SEQ03__SEQ_FONT_A0__SHIFT 0x5
+#define SEQ04__SEQ_256K_MASK 0x2
+#define SEQ04__SEQ_256K__SHIFT 0x1
+#define SEQ04__SEQ_ODDEVEN_MASK 0x4
+#define SEQ04__SEQ_ODDEVEN__SHIFT 0x2
+#define SEQ04__SEQ_CHAIN_MASK 0x8
+#define SEQ04__SEQ_CHAIN__SHIFT 0x3
+#define CRTC8_IDX__VCRTC_IDX_MASK 0x3f
+#define CRTC8_IDX__VCRTC_IDX__SHIFT 0x0
+#define CRTC8_DATA__VCRTC_DATA_MASK 0xff
+#define CRTC8_DATA__VCRTC_DATA__SHIFT 0x0
+#define CRT00__H_TOTAL_MASK 0xff
+#define CRT00__H_TOTAL__SHIFT 0x0
+#define CRT01__H_DISP_END_MASK 0xff
+#define CRT01__H_DISP_END__SHIFT 0x0
+#define CRT02__H_BLANK_START_MASK 0xff
+#define CRT02__H_BLANK_START__SHIFT 0x0
+#define CRT03__H_BLANK_END_MASK 0x1f
+#define CRT03__H_BLANK_END__SHIFT 0x0
+#define CRT03__H_DE_SKEW_MASK 0x60
+#define CRT03__H_DE_SKEW__SHIFT 0x5
+#define CRT03__CR10CR11_R_DIS_B_MASK 0x80
+#define CRT03__CR10CR11_R_DIS_B__SHIFT 0x7
+#define CRT04__H_SYNC_START_MASK 0xff
+#define CRT04__H_SYNC_START__SHIFT 0x0
+#define CRT05__H_SYNC_END_MASK 0x1f
+#define CRT05__H_SYNC_END__SHIFT 0x0
+#define CRT05__H_SYNC_SKEW_MASK 0x60
+#define CRT05__H_SYNC_SKEW__SHIFT 0x5
+#define CRT05__H_BLANK_END_B5_MASK 0x80
+#define CRT05__H_BLANK_END_B5__SHIFT 0x7
+#define CRT06__V_TOTAL_MASK 0xff
+#define CRT06__V_TOTAL__SHIFT 0x0
+#define CRT07__V_TOTAL_B8_MASK 0x1
+#define CRT07__V_TOTAL_B8__SHIFT 0x0
+#define CRT07__V_DISP_END_B8_MASK 0x2
+#define CRT07__V_DISP_END_B8__SHIFT 0x1
+#define CRT07__V_SYNC_START_B8_MASK 0x4
+#define CRT07__V_SYNC_START_B8__SHIFT 0x2
+#define CRT07__V_BLANK_START_B8_MASK 0x8
+#define CRT07__V_BLANK_START_B8__SHIFT 0x3
+#define CRT07__LINE_CMP_B8_MASK 0x10
+#define CRT07__LINE_CMP_B8__SHIFT 0x4
+#define CRT07__V_TOTAL_B9_MASK 0x20
+#define CRT07__V_TOTAL_B9__SHIFT 0x5
+#define CRT07__V_DISP_END_B9_MASK 0x40
+#define CRT07__V_DISP_END_B9__SHIFT 0x6
+#define CRT07__V_SYNC_START_B9_MASK 0x80
+#define CRT07__V_SYNC_START_B9__SHIFT 0x7
+#define CRT08__ROW_SCAN_START_MASK 0x1f
+#define CRT08__ROW_SCAN_START__SHIFT 0x0
+#define CRT08__BYTE_PAN_MASK 0x60
+#define CRT08__BYTE_PAN__SHIFT 0x5
+#define CRT09__MAX_ROW_SCAN_MASK 0x1f
+#define CRT09__MAX_ROW_SCAN__SHIFT 0x0
+#define CRT09__V_BLANK_START_B9_MASK 0x20
+#define CRT09__V_BLANK_START_B9__SHIFT 0x5
+#define CRT09__LINE_CMP_B9_MASK 0x40
+#define CRT09__LINE_CMP_B9__SHIFT 0x6
+#define CRT09__DOUBLE_CHAR_HEIGHT_MASK 0x80
+#define CRT09__DOUBLE_CHAR_HEIGHT__SHIFT 0x7
+#define CRT0A__CURSOR_START_MASK 0x1f
+#define CRT0A__CURSOR_START__SHIFT 0x0
+#define CRT0A__CURSOR_DISABLE_MASK 0x20
+#define CRT0A__CURSOR_DISABLE__SHIFT 0x5
+#define CRT0B__CURSOR_END_MASK 0x1f
+#define CRT0B__CURSOR_END__SHIFT 0x0
+#define CRT0B__CURSOR_SKEW_MASK 0x60
+#define CRT0B__CURSOR_SKEW__SHIFT 0x5
+#define CRT0C__DISP_START_MASK 0xff
+#define CRT0C__DISP_START__SHIFT 0x0
+#define CRT0D__DISP_START_MASK 0xff
+#define CRT0D__DISP_START__SHIFT 0x0
+#define CRT0E__CURSOR_LOC_HI_MASK 0xff
+#define CRT0E__CURSOR_LOC_HI__SHIFT 0x0
+#define CRT0F__CURSOR_LOC_LO_MASK 0xff
+#define CRT0F__CURSOR_LOC_LO__SHIFT 0x0
+#define CRT10__V_SYNC_START_MASK 0xff
+#define CRT10__V_SYNC_START__SHIFT 0x0
+#define CRT11__V_SYNC_END_MASK 0xf
+#define CRT11__V_SYNC_END__SHIFT 0x0
+#define CRT11__V_INTR_CLR_MASK 0x10
+#define CRT11__V_INTR_CLR__SHIFT 0x4
+#define CRT11__V_INTR_EN_MASK 0x20
+#define CRT11__V_INTR_EN__SHIFT 0x5
+#define CRT11__SEL5_REFRESH_CYC_MASK 0x40
+#define CRT11__SEL5_REFRESH_CYC__SHIFT 0x6
+#define CRT11__C0T7_WR_ONLY_MASK 0x80
+#define CRT11__C0T7_WR_ONLY__SHIFT 0x7
+#define CRT12__V_DISP_END_MASK 0xff
+#define CRT12__V_DISP_END__SHIFT 0x0
+#define CRT13__DISP_PITCH_MASK 0xff
+#define CRT13__DISP_PITCH__SHIFT 0x0
+#define CRT14__UNDRLN_LOC_MASK 0x1f
+#define CRT14__UNDRLN_LOC__SHIFT 0x0
+#define CRT14__ADDR_CNT_BY4_MASK 0x20
+#define CRT14__ADDR_CNT_BY4__SHIFT 0x5
+#define CRT14__DOUBLE_WORD_MASK 0x40
+#define CRT14__DOUBLE_WORD__SHIFT 0x6
+#define CRT15__V_BLANK_START_MASK 0xff
+#define CRT15__V_BLANK_START__SHIFT 0x0
+#define CRT16__V_BLANK_END_MASK 0xff
+#define CRT16__V_BLANK_END__SHIFT 0x0
+#define CRT17__RA0_AS_A13B_MASK 0x1
+#define CRT17__RA0_AS_A13B__SHIFT 0x0
+#define CRT17__RA1_AS_A14B_MASK 0x2
+#define CRT17__RA1_AS_A14B__SHIFT 0x1
+#define CRT17__VCOUNT_BY2_MASK 0x4
+#define CRT17__VCOUNT_BY2__SHIFT 0x2
+#define CRT17__ADDR_CNT_BY2_MASK 0x8
+#define CRT17__ADDR_CNT_BY2__SHIFT 0x3
+#define CRT17__WRAP_A15TOA0_MASK 0x20
+#define CRT17__WRAP_A15TOA0__SHIFT 0x5
+#define CRT17__BYTE_MODE_MASK 0x40
+#define CRT17__BYTE_MODE__SHIFT 0x6
+#define CRT17__CRTC_SYNC_EN_MASK 0x80
+#define CRT17__CRTC_SYNC_EN__SHIFT 0x7
+#define CRT18__LINE_CMP_MASK 0xff
+#define CRT18__LINE_CMP__SHIFT 0x0
+#define CRT1E__GRPH_DEC_RD1_MASK 0x2
+#define CRT1E__GRPH_DEC_RD1__SHIFT 0x1
+#define CRT1F__GRPH_DEC_RD0_MASK 0xff
+#define CRT1F__GRPH_DEC_RD0__SHIFT 0x0
+#define CRT22__GRPH_LATCH_DATA_MASK 0xff
+#define CRT22__GRPH_LATCH_DATA__SHIFT 0x0
+#define GRPH8_IDX__GRPH_IDX_MASK 0xf
+#define GRPH8_IDX__GRPH_IDX__SHIFT 0x0
+#define GRPH8_DATA__GRPH_DATA_MASK 0xff
+#define GRPH8_DATA__GRPH_DATA__SHIFT 0x0
+#define GRA00__GRPH_SET_RESET0_MASK 0x1
+#define GRA00__GRPH_SET_RESET0__SHIFT 0x0
+#define GRA00__GRPH_SET_RESET1_MASK 0x2
+#define GRA00__GRPH_SET_RESET1__SHIFT 0x1
+#define GRA00__GRPH_SET_RESET2_MASK 0x4
+#define GRA00__GRPH_SET_RESET2__SHIFT 0x2
+#define GRA00__GRPH_SET_RESET3_MASK 0x8
+#define GRA00__GRPH_SET_RESET3__SHIFT 0x3
+#define GRA01__GRPH_SET_RESET_ENA0_MASK 0x1
+#define GRA01__GRPH_SET_RESET_ENA0__SHIFT 0x0
+#define GRA01__GRPH_SET_RESET_ENA1_MASK 0x2
+#define GRA01__GRPH_SET_RESET_ENA1__SHIFT 0x1
+#define GRA01__GRPH_SET_RESET_ENA2_MASK 0x4
+#define GRA01__GRPH_SET_RESET_ENA2__SHIFT 0x2
+#define GRA01__GRPH_SET_RESET_ENA3_MASK 0x8
+#define GRA01__GRPH_SET_RESET_ENA3__SHIFT 0x3
+#define GRA02__GRPH_CCOMP_MASK 0xf
+#define GRA02__GRPH_CCOMP__SHIFT 0x0
+#define GRA03__GRPH_ROTATE_MASK 0x7
+#define GRA03__GRPH_ROTATE__SHIFT 0x0
+#define GRA03__GRPH_FN_SEL_MASK 0x18
+#define GRA03__GRPH_FN_SEL__SHIFT 0x3
+#define GRA04__GRPH_RMAP_MASK 0x3
+#define GRA04__GRPH_RMAP__SHIFT 0x0
+#define GRA05__GRPH_WRITE_MODE_MASK 0x3
+#define GRA05__GRPH_WRITE_MODE__SHIFT 0x0
+#define GRA05__GRPH_READ1_MASK 0x8
+#define GRA05__GRPH_READ1__SHIFT 0x3
+#define GRA05__CGA_ODDEVEN_MASK 0x10
+#define GRA05__CGA_ODDEVEN__SHIFT 0x4
+#define GRA05__GRPH_OES_MASK 0x20
+#define GRA05__GRPH_OES__SHIFT 0x5
+#define GRA05__GRPH_PACK_MASK 0x40
+#define GRA05__GRPH_PACK__SHIFT 0x6
+#define GRA06__GRPH_GRAPHICS_MASK 0x1
+#define GRA06__GRPH_GRAPHICS__SHIFT 0x0
+#define GRA06__GRPH_ODDEVEN_MASK 0x2
+#define GRA06__GRPH_ODDEVEN__SHIFT 0x1
+#define GRA06__GRPH_ADRSEL_MASK 0xc
+#define GRA06__GRPH_ADRSEL__SHIFT 0x2
+#define GRA07__GRPH_XCARE0_MASK 0x1
+#define GRA07__GRPH_XCARE0__SHIFT 0x0
+#define GRA07__GRPH_XCARE1_MASK 0x2
+#define GRA07__GRPH_XCARE1__SHIFT 0x1
+#define GRA07__GRPH_XCARE2_MASK 0x4
+#define GRA07__GRPH_XCARE2__SHIFT 0x2
+#define GRA07__GRPH_XCARE3_MASK 0x8
+#define GRA07__GRPH_XCARE3__SHIFT 0x3
+#define GRA08__GRPH_BMSK_MASK 0xff
+#define GRA08__GRPH_BMSK__SHIFT 0x0
+#define ATTRX__ATTR_IDX_MASK 0x1f
+#define ATTRX__ATTR_IDX__SHIFT 0x0
+#define ATTRX__ATTR_PAL_RW_ENB_MASK 0x20
+#define ATTRX__ATTR_PAL_RW_ENB__SHIFT 0x5
+#define ATTRDW__ATTR_DATA_MASK 0xff
+#define ATTRDW__ATTR_DATA__SHIFT 0x0
+#define ATTRDR__ATTR_DATA_MASK 0xff
+#define ATTRDR__ATTR_DATA__SHIFT 0x0
+#define ATTR00__ATTR_PAL_MASK 0x3f
+#define ATTR00__ATTR_PAL__SHIFT 0x0
+#define ATTR01__ATTR_PAL_MASK 0x3f
+#define ATTR01__ATTR_PAL__SHIFT 0x0
+#define ATTR02__ATTR_PAL_MASK 0x3f
+#define ATTR02__ATTR_PAL__SHIFT 0x0
+#define ATTR03__ATTR_PAL_MASK 0x3f
+#define ATTR03__ATTR_PAL__SHIFT 0x0
+#define ATTR04__ATTR_PAL_MASK 0x3f
+#define ATTR04__ATTR_PAL__SHIFT 0x0
+#define ATTR05__ATTR_PAL_MASK 0x3f
+#define ATTR05__ATTR_PAL__SHIFT 0x0
+#define ATTR06__ATTR_PAL_MASK 0x3f
+#define ATTR06__ATTR_PAL__SHIFT 0x0
+#define ATTR07__ATTR_PAL_MASK 0x3f
+#define ATTR07__ATTR_PAL__SHIFT 0x0
+#define ATTR08__ATTR_PAL_MASK 0x3f
+#define ATTR08__ATTR_PAL__SHIFT 0x0
+#define ATTR09__ATTR_PAL_MASK 0x3f
+#define ATTR09__ATTR_PAL__SHIFT 0x0
+#define ATTR0A__ATTR_PAL_MASK 0x3f
+#define ATTR0A__ATTR_PAL__SHIFT 0x0
+#define ATTR0B__ATTR_PAL_MASK 0x3f
+#define ATTR0B__ATTR_PAL__SHIFT 0x0
+#define ATTR0C__ATTR_PAL_MASK 0x3f
+#define ATTR0C__ATTR_PAL__SHIFT 0x0
+#define ATTR0D__ATTR_PAL_MASK 0x3f
+#define ATTR0D__ATTR_PAL__SHIFT 0x0
+#define ATTR0E__ATTR_PAL_MASK 0x3f
+#define ATTR0E__ATTR_PAL__SHIFT 0x0
+#define ATTR0F__ATTR_PAL_MASK 0x3f
+#define ATTR0F__ATTR_PAL__SHIFT 0x0
+#define ATTR10__ATTR_GRPH_MODE_MASK 0x1
+#define ATTR10__ATTR_GRPH_MODE__SHIFT 0x0
+#define ATTR10__ATTR_MONO_EN_MASK 0x2
+#define ATTR10__ATTR_MONO_EN__SHIFT 0x1
+#define ATTR10__ATTR_LGRPH_EN_MASK 0x4
+#define ATTR10__ATTR_LGRPH_EN__SHIFT 0x2
+#define ATTR10__ATTR_BLINK_EN_MASK 0x8
+#define ATTR10__ATTR_BLINK_EN__SHIFT 0x3
+#define ATTR10__ATTR_PANTOPONLY_MASK 0x20
+#define ATTR10__ATTR_PANTOPONLY__SHIFT 0x5
+#define ATTR10__ATTR_PCLKBY2_MASK 0x40
+#define ATTR10__ATTR_PCLKBY2__SHIFT 0x6
+#define ATTR10__ATTR_CSEL_EN_MASK 0x80
+#define ATTR10__ATTR_CSEL_EN__SHIFT 0x7
+#define ATTR11__ATTR_OVSC_MASK 0xff
+#define ATTR11__ATTR_OVSC__SHIFT 0x0
+#define ATTR12__ATTR_MAP_EN_MASK 0xf
+#define ATTR12__ATTR_MAP_EN__SHIFT 0x0
+#define ATTR12__ATTR_VSMUX_MASK 0x30
+#define ATTR12__ATTR_VSMUX__SHIFT 0x4
+#define ATTR13__ATTR_PPAN_MASK 0xf
+#define ATTR13__ATTR_PPAN__SHIFT 0x0
+#define ATTR14__ATTR_CSEL1_MASK 0x3
+#define ATTR14__ATTR_CSEL1__SHIFT 0x0
+#define ATTR14__ATTR_CSEL2_MASK 0xc
+#define ATTR14__ATTR_CSEL2__SHIFT 0x2
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE_MASK 0x1f
+#define VGA_RENDER_CONTROL__VGA_BLINK_RATE__SHIFT 0x0
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE_MASK 0x60
+#define VGA_RENDER_CONTROL__VGA_BLINK_MODE__SHIFT 0x5
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT_MASK 0x80
+#define VGA_RENDER_CONTROL__VGA_CURSOR_BLINK_INVERT__SHIFT 0x7
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE_MASK 0x100
+#define VGA_RENDER_CONTROL__VGA_EXTD_ADDR_COUNT_ENABLE__SHIFT 0x8
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK 0x30000
+#define VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL__SHIFT 0x10
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT_MASK 0x1000000
+#define VGA_RENDER_CONTROL__VGA_LOCK_8DOT__SHIFT 0x18
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL_MASK 0x2000000
+#define VGA_RENDER_CONTROL__VGAREG_LINECMP_COMPATIBILITY_SEL__SHIFT 0x19
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A_MASK 0x7
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_A__SHIFT 0x0
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B_MASK 0x700
+#define VGA_SOURCE_SELECT__VGA_SOURCE_SEL_B__SHIFT 0x8
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x1
+#define VGA_SEQUENCER_RESET_CONTROL__D1_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x0
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x2
+#define VGA_SEQUENCER_RESET_CONTROL__D2_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x1
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x4
+#define VGA_SEQUENCER_RESET_CONTROL__D3_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x2
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x8
+#define VGA_SEQUENCER_RESET_CONTROL__D4_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x3
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x10
+#define VGA_SEQUENCER_RESET_CONTROL__D5_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x4
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET_MASK 0x20
+#define VGA_SEQUENCER_RESET_CONTROL__D6_BLANK_DISPLAY_WHEN_SEQUENCER_RESET__SHIFT 0x5
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x100
+#define VGA_SEQUENCER_RESET_CONTROL__D1_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x8
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x200
+#define VGA_SEQUENCER_RESET_CONTROL__D2_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0x9
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x400
+#define VGA_SEQUENCER_RESET_CONTROL__D3_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xa
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x800
+#define VGA_SEQUENCER_RESET_CONTROL__D4_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xb
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x1000
+#define VGA_SEQUENCER_RESET_CONTROL__D5_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xc
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET_MASK 0x2000
+#define VGA_SEQUENCER_RESET_CONTROL__D6_DISABLE_SYNCS_AND_DE_WHEN_SEQUENCER_RESET__SHIFT 0xd
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE_MASK 0x10000
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_ENABLE__SHIFT 0x10
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT_MASK 0x20000
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_REGISTER_SELECT__SHIFT 0x11
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT_MASK 0xfc0000
+#define VGA_SEQUENCER_RESET_CONTROL__VGA_MODE_AUTO_TRIGGER_INDEX_SELECT__SHIFT 0x12
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR_MASK 0x1
+#define VGA_MODE_CONTROL__VGA_ATI_LINEAR__SHIFT 0x0
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE_MASK 0x30
+#define VGA_MODE_CONTROL__VGA_LUT_PALETTE_UPDATE_MODE__SHIFT 0x4
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING_MASK 0x100
+#define VGA_MODE_CONTROL__VGA_128K_APERTURE_PAGING__SHIFT 0x8
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN_MASK 0x10000
+#define VGA_MODE_CONTROL__VGA_TEXT_132_COLUMNS_EN__SHIFT 0x10
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT_MASK 0x3
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_PITCH_SELECT__SHIFT 0x0
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT_MASK 0x300
+#define VGA_SURFACE_PITCH_SELECT__VGA_SURFACE_HEIGHT_SELECT__SHIFT 0x8
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS_MASK 0xffffffff
+#define VGA_MEMORY_BASE_ADDRESS__VGA_MEMORY_BASE_ADDRESS__SHIFT 0x0
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH_MASK 0xff
+#define VGA_MEMORY_BASE_ADDRESS_HIGH__VGA_MEMORY_BASE_ADDRESS_HIGH__SHIFT 0x0
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR_MASK 0x1ffffff
+#define VGA_DISPBUF1_SURFACE_ADDR__VGA_DISPBUF1_SURFACE_ADDR__SHIFT 0x0
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR_MASK 0x1ffffff
+#define VGA_DISPBUF2_SURFACE_ADDR__VGA_DISPBUF2_SURFACE_ADDR__SHIFT 0x0
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN_MASK 0x1
+#define VGA_HDP_CONTROL__VGA_MEM_PAGE_SELECT_EN__SHIFT 0x0
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK 0x10
+#define VGA_HDP_CONTROL__VGA_MEMORY_DISABLE__SHIFT 0x4
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE_MASK 0x100
+#define VGA_HDP_CONTROL__VGA_RBBM_LOCK_DISABLE__SHIFT 0x8
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET_MASK 0x10000
+#define VGA_HDP_CONTROL__VGA_SOFT_RESET__SHIFT 0x10
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL_MASK 0x1000000
+#define VGA_HDP_CONTROL__VGA_TEST_RESET_CONTROL__SHIFT 0x18
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS_MASK 0x1
+#define VGA_CACHE_CONTROL__VGA_WRITE_THROUGH_CACHE_DIS__SHIFT 0x0
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE_MASK 0x100
+#define VGA_CACHE_CONTROL__VGA_READ_CACHE_DISABLE__SHIFT 0x8
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE_MASK 0x10000
+#define VGA_CACHE_CONTROL__VGA_READ_BUFFER_INVALIDATE__SHIFT 0x10
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY_MASK 0x100000
+#define VGA_CACHE_CONTROL__VGA_DCCIF_W256ONLY__SHIFT 0x14
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT_MASK 0x3f000000
+#define VGA_CACHE_CONTROL__VGA_DCCIF_WC_TIMEOUT__SHIFT 0x18
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK 0x1
+#define D1VGA_CONTROL__D1VGA_MODE_ENABLE__SHIFT 0x0
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK 0x100
+#define D1VGA_CONTROL__D1VGA_TIMING_SELECT__SHIFT 0x8
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D1VGA_CONTROL__D1VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D1VGA_CONTROL__D1VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D1VGA_CONTROL__D1VGA_ROTATE_MASK 0x3000000
+#define D1VGA_CONTROL__D1VGA_ROTATE__SHIFT 0x18
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK 0x1
+#define D2VGA_CONTROL__D2VGA_MODE_ENABLE__SHIFT 0x0
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK 0x100
+#define D2VGA_CONTROL__D2VGA_TIMING_SELECT__SHIFT 0x8
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D2VGA_CONTROL__D2VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D2VGA_CONTROL__D2VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D2VGA_CONTROL__D2VGA_ROTATE_MASK 0x3000000
+#define D2VGA_CONTROL__D2VGA_ROTATE__SHIFT 0x18
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE_MASK 0x1
+#define D3VGA_CONTROL__D3VGA_MODE_ENABLE__SHIFT 0x0
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT_MASK 0x100
+#define D3VGA_CONTROL__D3VGA_TIMING_SELECT__SHIFT 0x8
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D3VGA_CONTROL__D3VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D3VGA_CONTROL__D3VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D3VGA_CONTROL__D3VGA_ROTATE_MASK 0x3000000
+#define D3VGA_CONTROL__D3VGA_ROTATE__SHIFT 0x18
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE_MASK 0x1
+#define D4VGA_CONTROL__D4VGA_MODE_ENABLE__SHIFT 0x0
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT_MASK 0x100
+#define D4VGA_CONTROL__D4VGA_TIMING_SELECT__SHIFT 0x8
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D4VGA_CONTROL__D4VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D4VGA_CONTROL__D4VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D4VGA_CONTROL__D4VGA_ROTATE_MASK 0x3000000
+#define D4VGA_CONTROL__D4VGA_ROTATE__SHIFT 0x18
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE_MASK 0x1
+#define D5VGA_CONTROL__D5VGA_MODE_ENABLE__SHIFT 0x0
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT_MASK 0x100
+#define D5VGA_CONTROL__D5VGA_TIMING_SELECT__SHIFT 0x8
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D5VGA_CONTROL__D5VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D5VGA_CONTROL__D5VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D5VGA_CONTROL__D5VGA_ROTATE_MASK 0x3000000
+#define D5VGA_CONTROL__D5VGA_ROTATE__SHIFT 0x18
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE_MASK 0x1
+#define D6VGA_CONTROL__D6VGA_MODE_ENABLE__SHIFT 0x0
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT_MASK 0x100
+#define D6VGA_CONTROL__D6VGA_TIMING_SELECT__SHIFT 0x8
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT_MASK 0x200
+#define D6VGA_CONTROL__D6VGA_SYNC_POLARITY_SELECT__SHIFT 0x9
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN_MASK 0x10000
+#define D6VGA_CONTROL__D6VGA_OVERSCAN_COLOR_EN__SHIFT 0x10
+#define D6VGA_CONTROL__D6VGA_ROTATE_MASK 0x3000000
+#define D6VGA_CONTROL__D6VGA_ROTATE__SHIFT 0x18
+#define VGA_HW_DEBUG__VGA_HW_DEBUG_MASK 0xffffffff
+#define VGA_HW_DEBUG__VGA_HW_DEBUG__SHIFT 0x0
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS_MASK 0x1
+#define VGA_STATUS__VGA_MEM_ACCESS_STATUS__SHIFT 0x0
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS_MASK 0x2
+#define VGA_STATUS__VGA_REG_ACCESS_STATUS__SHIFT 0x1
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS_MASK 0x4
+#define VGA_STATUS__VGA_DISPLAY_SWITCH_STATUS__SHIFT 0x2
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS_MASK 0x8
+#define VGA_STATUS__VGA_MODE_AUTO_TRIGGER_STATUS__SHIFT 0x3
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK_MASK 0x1
+#define VGA_INTERRUPT_CONTROL__VGA_MEM_ACCESS_INT_MASK__SHIFT 0x0
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK_MASK 0x100
+#define VGA_INTERRUPT_CONTROL__VGA_REG_ACCESS_INT_MASK__SHIFT 0x8
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK_MASK 0x10000
+#define VGA_INTERRUPT_CONTROL__VGA_DISPLAY_SWITCH_INT_MASK__SHIFT 0x10
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK_MASK 0x1000000
+#define VGA_INTERRUPT_CONTROL__VGA_MODE_AUTO_TRIGGER_INT_MASK__SHIFT 0x18
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR_MASK 0x1
+#define VGA_STATUS_CLEAR__VGA_MEM_ACCESS_INT_CLEAR__SHIFT 0x0
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR_MASK 0x100
+#define VGA_STATUS_CLEAR__VGA_REG_ACCESS_INT_CLEAR__SHIFT 0x8
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR_MASK 0x10000
+#define VGA_STATUS_CLEAR__VGA_DISPLAY_SWITCH_INT_CLEAR__SHIFT 0x10
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR_MASK 0x1000000
+#define VGA_STATUS_CLEAR__VGA_MODE_AUTO_TRIGGER_INT_CLEAR__SHIFT 0x18
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS_MASK 0x1
+#define VGA_INTERRUPT_STATUS__VGA_MEM_ACCESS_INT_STATUS__SHIFT 0x0
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS_MASK 0x2
+#define VGA_INTERRUPT_STATUS__VGA_REG_ACCESS_INT_STATUS__SHIFT 0x1
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS_MASK 0x4
+#define VGA_INTERRUPT_STATUS__VGA_DISPLAY_SWITCH_INT_STATUS__SHIFT 0x2
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS_MASK 0x8
+#define VGA_INTERRUPT_STATUS__VGA_MODE_AUTO_TRIGGER_INT_STATUS__SHIFT 0x3
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT_MASK 0x3
+#define VGA_MAIN_CONTROL__VGA_CRTC_TIMEOUT__SHIFT 0x0
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT_MASK 0x18
+#define VGA_MAIN_CONTROL__VGA_RENDER_TIMEOUT_COUNT__SHIFT 0x3
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION_MASK 0xe0
+#define VGA_MAIN_CONTROL__VGA_VIRTUAL_VERTICAL_RETRACE_DURATION__SHIFT 0x5
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT_MASK 0x300
+#define VGA_MAIN_CONTROL__VGA_READBACK_VGA_VSTATUS_SOURCE_SELECT__SHIFT 0x8
+#define VGA_MAIN_CONTROL__VGA_MC_WRITE_CLEAN_WAIT_DELAY_MASK 0xf000
+#define VGA_MAIN_CONTROL__VGA_MC_WRITE_CLEAN_WAIT_DELAY__SHIFT 0xc
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT_MASK 0x30000
+#define VGA_MAIN_CONTROL__VGA_READBACK_NO_DISPLAY_SOURCE_SELECT__SHIFT 0x10
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT_MASK 0x3000000
+#define VGA_MAIN_CONTROL__VGA_READBACK_CRT_INTR_SOURCE_SELECT__SHIFT 0x18
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT_MASK 0x4000000
+#define VGA_MAIN_CONTROL__VGA_READBACK_SENSE_SWITCH_SELECT__SHIFT 0x1a
+#define VGA_MAIN_CONTROL__VGA_READ_URGENT_ENABLE_MASK 0x8000000
+#define VGA_MAIN_CONTROL__VGA_READ_URGENT_ENABLE__SHIFT 0x1b
+#define VGA_MAIN_CONTROL__VGA_WRITES_URGENT_ENABLE_MASK 0x10000000
+#define VGA_MAIN_CONTROL__VGA_WRITES_URGENT_ENABLE__SHIFT 0x1c
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE_MASK 0x20000000
+#define VGA_MAIN_CONTROL__VGA_EXTERNAL_DAC_SENSE__SHIFT 0x1d
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT_MASK 0x80000000
+#define VGA_MAIN_CONTROL__VGA_MAIN_TEST_VSTATUS_NO_DISPLAY_CRTC_TIMEOUT__SHIFT 0x1f
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE_MASK 0x1
+#define VGA_TEST_CONTROL__VGA_TEST_ENABLE__SHIFT 0x0
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START_MASK 0x100
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_START__SHIFT 0x8
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE_MASK 0x10000
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DONE__SHIFT 0x10
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT_MASK 0x1000000
+#define VGA_TEST_CONTROL__VGA_TEST_RENDER_DISPBUF_SELECT__SHIFT 0x18
+#define VGA_DEBUG_READBACK_INDEX__VGA_DEBUG_READBACK_INDEX_MASK 0xff
+#define VGA_DEBUG_READBACK_INDEX__VGA_DEBUG_READBACK_INDEX__SHIFT 0x0
+#define VGA_DEBUG_READBACK_DATA__VGA_DEBUG_READBACK_DATA_MASK 0xffffffff
+#define VGA_DEBUG_READBACK_DATA__VGA_DEBUG_READBACK_DATA__SHIFT 0x0
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR_MASK 0x3ff
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE0_ADDR__SHIFT 0x0
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR_MASK 0x3ff0000
+#define VGA_MEM_WRITE_PAGE_ADDR__VGA_MEM_WRITE_PAGE1_ADDR__SHIFT 0x10
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR_MASK 0x3ff
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE0_ADDR__SHIFT 0x0
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR_MASK 0x3ff0000
+#define VGA_MEM_READ_PAGE_ADDR__VGA_MEM_READ_PAGE1_ADDR__SHIFT 0x10
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_INDEX_MASK 0xff
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_INDEX__SHIFT 0x0
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define VGA_TEST_DEBUG_INDEX__VGA_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define VGA_TEST_DEBUG_DATA__VGA_TEST_DEBUG_DATA_MASK 0xffffffff
+#define VGA_TEST_DEBUG_DATA__VGA_TEST_DEBUG_DATA__SHIFT 0x0
+#define VGADCC_DBG_DCCIF_C__DBG_DCCIF_C_MASK 0xffffffff
+#define VGADCC_DBG_DCCIF_C__DBG_DCCIF_C__SHIFT 0x0
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL_MASK 0x3
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL__SHIFT 0x0
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL_MASK 0x3f00
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL__SHIFT 0x8
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_BANDGAP_ADJUSTMENT_MASK 0x3f0000
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_BANDGAP_ADJUSTMENT__SHIFT 0x10
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_ANALOG_MONITOR_MASK 0xf000000
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_ANALOG_MONITOR__SHIFT 0x18
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_COREMON_MASK 0x10000000
+#define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_COREMON__SHIFT 0x1c
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_INITB_MASK 0x1
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_INITB__SHIFT 0x0
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_EN_MASK 0x2
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_EN__SHIFT 0x1
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_DACADJ_EN_MASK 0x4
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_DACADJ_EN__SHIFT 0x2
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_WAIT_ADJUST_MASK 0x3ff0
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_WAIT_ADJUST__SHIFT 0x4
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_MASK_MASK 0x700000
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_MASK__SHIFT 0x14
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_COMPLETE_MASK 0x10000000
+#define BPHYC_DAC_AUTO_CALIB_CONTROL__BPHYC_DAC_CAL_COMPLETE__SHIFT 0x1c
+#define DPG_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION_MASK 0xffff
+#define DPG_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION__SHIFT 0x0
+#define DPG_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT_MASK 0xffff0000
+#define DPG_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT__SHIFT 0x10
+#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT_MASK 0xffff
+#define DPG_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x0
+#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000
+#define DPG_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x10
+#define DPG_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK_MASK 0x7
+#define DPG_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK__SHIFT 0x0
+#define DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK_MASK 0x700
+#define DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT 0x8
+#define DPG_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x70000
+#define DPG_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x10
+#define DPG_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT_MASK 0x1000000
+#define DPG_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT__SHIFT 0x18
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK_MASK 0xffff
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT 0x0
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK_MASK 0xffff0000
+#define DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT 0x10
+#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x1
+#define DPG_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x0
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x10
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE__SHIFT 0x4
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON_MASK 0x100
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON__SHIFT 0x8
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK_MASK 0x3000
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK__SHIFT 0xc
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPG_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK__SHIFT 0x10
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x1
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x0
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x10
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR__SHIFT 0x4
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON_MASK 0x20
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON__SHIFT 0x5
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA_MASK 0x40
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA__SHIFT 0x6
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC_MASK 0x80
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC__SHIFT 0x7
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON_MASK 0x100
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON__SHIFT 0x8
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK_MASK 0x200
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK__SHIFT 0x9
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH_MASK 0x400
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH__SHIFT 0xa
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON_MASK 0x800
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON__SHIFT 0xb
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000
+#define DPG_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x10
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE_MASK 0x1
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE__SHIFT 0x0
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x10
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x4
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST_MASK 0x100
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x8
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x200
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x9
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x400
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0xa
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff8000
+#define DPG_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0xf
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x1
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH__SHIFT 0x0
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH_MASK 0x10
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH__SHIFT 0x4
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH_MASK 0x20
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH__SHIFT 0x5
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH_MASK 0x40
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH__SHIFT 0x6
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH_MASK 0x80
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH__SHIFT 0x7
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH_MASK 0x100
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x8
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH_MASK 0x200
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH__SHIFT 0x9
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH_MASK 0x400
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH__SHIFT 0xa
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH_MASK 0x800
+#define DPG_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH__SHIFT 0xb
+#define DPG_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER_MASK 0x7
+#define DPG_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER__SHIFT 0x0
+#define DPG_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER_MASK 0x70
+#define DPG_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER__SHIFT 0x4
+#define DPG_HW_DEBUG_A__DPG_HW_DEBUG_A_MASK 0xffffffff
+#define DPG_HW_DEBUG_A__DPG_HW_DEBUG_A__SHIFT 0x0
+#define DPG_HW_DEBUG_B__DPG_HW_DEBUG_B_MASK 0xffffffff
+#define DPG_HW_DEBUG_B__DPG_HW_DEBUG_B__SHIFT 0x0
+#define DPG_HW_DEBUG_11__DPG_HW_DEBUG_11_MASK 0x1
+#define DPG_HW_DEBUG_11__DPG_HW_DEBUG_11__SHIFT 0x0
+#define DPG_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK_MASK 0x1
+#define DPG_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK__SHIFT 0x0
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_UNMAPPED_MASK 0x1
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_UNMAPPED__SHIFT 0x0
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_MAPPED_MASK 0x2
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_MAPPED__SHIFT 0x1
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_UNMAPPED_CLR_MASK 0x10
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_UNMAPPED_CLR__SHIFT 0x4
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_MAPPED_CLR_MASK 0x20
+#define DPG_DVMM_STATUS__DPG_DVMM_FORCED_FLIP_TO_MAPPED_CLR__SHIFT 0x5
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX_MASK 0xff
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DPG_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DPG_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DPG_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA__SHIFT 0x0
+#define DPGV0_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION_MASK 0xffff
+#define DPGV0_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION__SHIFT 0x0
+#define DPGV0_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT_MASK 0xffff0000
+#define DPGV0_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT__SHIFT 0x10
+#define DPGV1_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION_MASK 0xffff
+#define DPGV1_PIPE_ARBITRATION_CONTROL1__PIXEL_DURATION__SHIFT 0x0
+#define DPGV1_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT_MASK 0xffff0000
+#define DPGV1_PIPE_ARBITRATION_CONTROL1__BASE_WEIGHT__SHIFT 0x10
+#define DPGV0_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT_MASK 0xffff
+#define DPGV0_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x0
+#define DPGV0_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000
+#define DPGV0_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x10
+#define DPGV1_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT_MASK 0xffff
+#define DPGV1_PIPE_ARBITRATION_CONTROL2__TIME_WEIGHT__SHIFT 0x0
+#define DPGV1_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT_MASK 0xffff0000
+#define DPGV1_PIPE_ARBITRATION_CONTROL2__URGENCY_WEIGHT__SHIFT 0x10
+#define DPGV0_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK_MASK 0x3
+#define DPGV0_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK__SHIFT 0x0
+#define DPGV0_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK_MASK 0x300
+#define DPGV0_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT 0x8
+#define DPGV0_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x30000
+#define DPGV0_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x10
+#define DPGV0_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT_MASK 0x1000000
+#define DPGV0_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT__SHIFT 0x18
+#define DPGV1_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK_MASK 0x3
+#define DPGV1_WATERMARK_MASK_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK__SHIFT 0x0
+#define DPGV1_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK_MASK 0x300
+#define DPGV1_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT 0x8
+#define DPGV1_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK_MASK 0x30000
+#define DPGV1_WATERMARK_MASK_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK__SHIFT 0x10
+#define DPGV1_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT_MASK 0x1000000
+#define DPGV1_WATERMARK_MASK_CONTROL__DISABLE_FLIP_URGENT__SHIFT 0x18
+#define DPGV0_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK_MASK 0xffff
+#define DPGV0_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT 0x0
+#define DPGV0_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK_MASK 0xffff0000
+#define DPGV0_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT 0x10
+#define DPGV1_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK_MASK 0xffff
+#define DPGV1_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT 0x0
+#define DPGV1_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK_MASK 0xffff0000
+#define DPGV1_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT 0x10
+#define DPGV0_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x1
+#define DPGV0_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x0
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x10
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE__SHIFT 0x4
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON_MASK 0x100
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON__SHIFT 0x8
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK_MASK 0x3000
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK__SHIFT 0xc
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPGV0_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK__SHIFT 0x10
+#define DPGV1_PIPE_DPM_CONTROL__DPM_ENABLE_MASK 0x1
+#define DPGV1_PIPE_DPM_CONTROL__DPM_ENABLE__SHIFT 0x0
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE_MASK 0x10
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_ENABLE__SHIFT 0x4
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON_MASK 0x100
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_FORCE_ON__SHIFT 0x8
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK_MASK 0x3000
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK__SHIFT 0xc
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPGV1_PIPE_DPM_CONTROL__MCLK_CHANGE_WATERMARK__SHIFT 0x10
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x1
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x0
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x10
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR__SHIFT 0x4
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON_MASK 0x20
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON__SHIFT 0x5
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA_MASK 0x40
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA__SHIFT 0x6
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC_MASK 0x80
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC__SHIFT 0x7
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON_MASK 0x100
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON__SHIFT 0x8
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK_MASK 0x200
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK__SHIFT 0x9
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH_MASK 0x400
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH__SHIFT 0xa
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON_MASK 0x800
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON__SHIFT 0xb
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000
+#define DPGV0_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x10
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK 0x1
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_ENABLE__SHIFT 0x0
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR_MASK 0x10
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_CURSOR__SHIFT 0x4
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON_MASK 0x20
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_ICON__SHIFT 0x5
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA_MASK 0x40
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_VGA__SHIFT 0x6
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC_MASK 0x80
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_IGNORE_FBC__SHIFT 0x7
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON_MASK 0x100
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_FORCE_ON__SHIFT 0x8
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK_MASK 0x200
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_WM_HIGH_EXCLUDES_VBLANK__SHIFT 0x9
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH_MASK 0x400
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_URGENT_IN_NOT_SELF_REFRESH__SHIFT 0xa
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON_MASK 0x800
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_SELF_REFRESH_FORCE_ON__SHIFT 0xb
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK 0xffff0000
+#define DPGV1_PIPE_STUTTER_CONTROL__STUTTER_EXIT_SELF_REFRESH_WATERMARK__SHIFT 0x10
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE_MASK 0x1
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE__SHIFT 0x0
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x10
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x4
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST_MASK 0x100
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x8
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x200
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x9
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x400
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0xa
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0x10
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE_MASK 0x1
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_ENABLE__SHIFT 0x0
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST_MASK 0x10
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_URGENT_DURING_REQUEST__SHIFT 0x4
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST_MASK 0x100
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST__SHIFT 0x8
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON_MASK 0x200
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_FORCE_ON__SHIFT 0x9
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT_MASK 0x400
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_ALLOW_FOR_URGENT__SHIFT 0xa
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK_MASK 0xffff0000
+#define DPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL__NB_PSTATE_CHANGE_WATERMARK__SHIFT 0x10
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x1
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH__SHIFT 0x0
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH_MASK 0x10
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH__SHIFT 0x4
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH_MASK 0x20
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH__SHIFT 0x5
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH_MASK 0x40
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH__SHIFT 0x6
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH_MASK 0x80
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH__SHIFT 0x7
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH_MASK 0x100
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x8
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH_MASK 0x200
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH__SHIFT 0x9
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH_MASK 0x400
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH__SHIFT 0xa
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH_MASK 0x800
+#define DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH__SHIFT 0xb
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH_MASK 0x1
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_ENABLE_NONLPTCH__SHIFT 0x0
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH_MASK 0x10
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_CURSOR_NONLPTCH__SHIFT 0x4
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH_MASK 0x20
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_ICON_NONLPTCH__SHIFT 0x5
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH_MASK 0x40
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_VGA_NONLPTCH__SHIFT 0x6
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH_MASK 0x80
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_IGNORE_FBC_NONLPTCH__SHIFT 0x7
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH_MASK 0x100
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_FORCE_ON_NONLPTCH__SHIFT 0x8
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH_MASK 0x200
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_WM_HIGH_EXCLUDES_VBLANK_NONLPTCH__SHIFT 0x9
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH_MASK 0x400
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_URGENT_IN_NOT_SELF_REFRESH_NONLPTCH__SHIFT 0xa
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH_MASK 0x800
+#define DPGV1_PIPE_STUTTER_CONTROL_NONLPTCH__STUTTER_SELF_REFRESH_FORCE_ON_NONLPTCH__SHIFT 0xb
+#define DPGV0_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER_MASK 0x7
+#define DPGV0_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER__SHIFT 0x0
+#define DPGV0_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER_MASK 0x70
+#define DPGV0_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER__SHIFT 0x4
+#define DPGV1_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER_MASK 0x7
+#define DPGV1_REPEATER_PROGRAM__REG_DPG_DMIFRC_REPEATER__SHIFT 0x0
+#define DPGV1_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER_MASK 0x70
+#define DPGV1_REPEATER_PROGRAM__REG_DMIFRC_DPG_REPEATER__SHIFT 0x4
+#define DPGV0_HW_DEBUG_A__DPG_HW_DEBUG_A_MASK 0xffffffff
+#define DPGV0_HW_DEBUG_A__DPG_HW_DEBUG_A__SHIFT 0x0
+#define DPGV1_HW_DEBUG_A__DPG_HW_DEBUG_A_MASK 0xffffffff
+#define DPGV1_HW_DEBUG_A__DPG_HW_DEBUG_A__SHIFT 0x0
+#define DPGV0_HW_DEBUG_B__DPG_HW_DEBUG_B_MASK 0xffffffff
+#define DPGV0_HW_DEBUG_B__DPG_HW_DEBUG_B__SHIFT 0x0
+#define DPGV1_HW_DEBUG_B__DPG_HW_DEBUG_B_MASK 0xffffffff
+#define DPGV1_HW_DEBUG_B__DPG_HW_DEBUG_B__SHIFT 0x0
+#define DPGV0_HW_DEBUG_11__DPG_HW_DEBUG_11_MASK 0x1
+#define DPGV0_HW_DEBUG_11__DPG_HW_DEBUG_11__SHIFT 0x0
+#define DPGV1_HW_DEBUG_11__DPG_HW_DEBUG_11_MASK 0x1
+#define DPGV1_HW_DEBUG_11__DPG_HW_DEBUG_11__SHIFT 0x0
+#define DPGV0_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK_MASK 0x1
+#define DPGV0_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK__SHIFT 0x0
+#define DPGV1_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK_MASK 0x1
+#define DPGV1_CHK_PRE_PROC_CNTL__DPG_DISABLE_DMIF_BUF_CHK__SHIFT 0x0
+#define DPGV_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX_MASK 0xff
+#define DPGV_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DPGV_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DPGV_TEST_DEBUG_INDEX__DPG_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DPGV_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DPGV_TEST_DEBUG_DATA__DPG_TEST_DEBUG_DATA__SHIFT 0x0
+#define AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x1ffff
+#define AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xffffffff
+#define AZROOT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xffffffff
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xffffffff
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xffffffff
+#define AZALIA_F2_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_ROOT_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT_MASK 0xffffffff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__AZALIA_CODEC_FUNCTION_PARAMETER_SUBORDINATE_NODE_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xffffffff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3fffffff
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000
+#define AZALIA_F2_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0xf
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0xf0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x200
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x400
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x1
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0xff
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0xff00
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0xff0000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xff000000
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1_MASK 0xff
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_2__SUBSYSTEM_ID_BYTE1__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2_MASK 0xff
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_3__SUBSYSTEM_ID_BYTE2__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3_MASK 0xff
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_4__SUBSYSTEM_ID_BYTE3__SHIFT 0x0
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x7f
+#define AZALIA_F2_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xffffffff
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xffffffff
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x7
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x70
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x3f
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xffffffff
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3fffffff
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0xf
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0xf0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x200
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x400
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x1
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0xff
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0xff00
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0xff0000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xff000000
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x7f
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x7
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x10
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x7
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x10
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_DEBUG__DISABLE_FORMAT_COMPARISON_MASK 0x3f
+#define AZALIA_F0_CODEC_DEBUG__DISABLE_FORMAT_COMPARISON__SHIFT 0x0
+#define AZALIA_F0_CODEC_DEBUG__CODEC_DEBUG_MASK 0xffffffc0
+#define AZALIA_F0_CODEC_DEBUG__CODEC_DEBUG__SHIFT 0x6
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET0__GTC_GROUP_OFFSET0__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET1__GTC_GROUP_OFFSET1__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET2__GTC_GROUP_OFFSET2__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET3__GTC_GROUP_OFFSET3__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET4__GTC_GROUP_OFFSET4__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET5__GTC_GROUP_OFFSET5__SHIFT 0x0
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6_MASK 0xffffffff
+#define AZALIA_F0_GTC_GROUP_OFFSET6__GTC_GROUP_OFFSET6__SHIFT 0x0
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED_MASK 0x1
+#define GLOBAL_CAPABILITIES__SIXTY_FOUR_BIT_ADDRESS_SUPPORTED__SHIFT 0x0
+#define GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x6
+#define GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED_MASK 0xf8
+#define GLOBAL_CAPABILITIES__NUMBER_OF_BIDIRECTIONAL_STREAMS_SUPPORTED__SHIFT 0x3
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED_MASK 0xf00
+#define GLOBAL_CAPABILITIES__NUMBER_OF_INPUT_STREAMS_SUPPORTED__SHIFT 0x8
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED_MASK 0xf000
+#define GLOBAL_CAPABILITIES__NUMBER_OF_OUTPUT_STREAMS_SUPPORTED__SHIFT 0xc
+#define MINOR_VERSION__MINOR_VERSION_MASK 0xff
+#define MINOR_VERSION__MINOR_VERSION__SHIFT 0x0
+#define MAJOR_VERSION__MAJOR_VERSION_MASK 0xff
+#define MAJOR_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0xffff
+#define OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0xffff
+#define INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define GLOBAL_CONTROL__CONTROLLER_RESET_MASK 0x1
+#define GLOBAL_CONTROL__CONTROLLER_RESET__SHIFT 0x0
+#define GLOBAL_CONTROL__FLUSH_CONTROL_MASK 0x2
+#define GLOBAL_CONTROL__FLUSH_CONTROL__SHIFT 0x1
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE_MASK 0x100
+#define GLOBAL_CONTROL__ACCEPT_UNSOLICITED_RESPONSE_ENABLE__SHIFT 0x8
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG_MASK 0x1
+#define WAKE_ENABLE__SDIN_WAKE_ENABLE_FLAG__SHIFT 0x0
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS_MASK 0x1
+#define STATE_CHANGE_STATUS__STATE_CHANGE_STATUS__SHIFT 0x0
+#define GLOBAL_STATUS__FLUSH_STATUS_MASK 0x2
+#define GLOBAL_STATUS__FLUSH_STATUS__SHIFT 0x1
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xffff
+#define OUTPUT_STREAM_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x0
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xffff
+#define INPUT_STREAM_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x0
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE_MASK 0x1
+#define INTERRUPT_CONTROL__STREAM_0_INTERRUPT_ENABLE__SHIFT 0x0
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE_MASK 0x2
+#define INTERRUPT_CONTROL__STREAM_1_INTERRUPT_ENABLE__SHIFT 0x1
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE_MASK 0x4
+#define INTERRUPT_CONTROL__STREAM_2_INTERRUPT_ENABLE__SHIFT 0x2
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE_MASK 0x8
+#define INTERRUPT_CONTROL__STREAM_3_INTERRUPT_ENABLE__SHIFT 0x3
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE_MASK 0x10
+#define INTERRUPT_CONTROL__STREAM_4_INTERRUPT_ENABLE__SHIFT 0x4
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE_MASK 0x20
+#define INTERRUPT_CONTROL__STREAM_5_INTERRUPT_ENABLE__SHIFT 0x5
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE_MASK 0x40
+#define INTERRUPT_CONTROL__STREAM_6_INTERRUPT_ENABLE__SHIFT 0x6
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE_MASK 0x80
+#define INTERRUPT_CONTROL__STREAM_7_INTERRUPT_ENABLE__SHIFT 0x7
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE_MASK 0x100
+#define INTERRUPT_CONTROL__STREAM_8_INTERRUPT_ENABLE__SHIFT 0x8
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE_MASK 0x200
+#define INTERRUPT_CONTROL__STREAM_9_INTERRUPT_ENABLE__SHIFT 0x9
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE_MASK 0x400
+#define INTERRUPT_CONTROL__STREAM_10_INTERRUPT_ENABLE__SHIFT 0xa
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE_MASK 0x800
+#define INTERRUPT_CONTROL__STREAM_11_INTERRUPT_ENABLE__SHIFT 0xb
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE_MASK 0x1000
+#define INTERRUPT_CONTROL__STREAM_12_INTERRUPT_ENABLE__SHIFT 0xc
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE_MASK 0x2000
+#define INTERRUPT_CONTROL__STREAM_13_INTERRUPT_ENABLE__SHIFT 0xd
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE_MASK 0x4000
+#define INTERRUPT_CONTROL__STREAM_14_INTERRUPT_ENABLE__SHIFT 0xe
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE_MASK 0x8000
+#define INTERRUPT_CONTROL__STREAM_15_INTERRUPT_ENABLE__SHIFT 0xf
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE_MASK 0x40000000
+#define INTERRUPT_CONTROL__CONTROLLER_INTERRUPT_ENABLE__SHIFT 0x1e
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE_MASK 0x80000000
+#define INTERRUPT_CONTROL__GLOBAL_INTERRUPT_ENABLE__SHIFT 0x1f
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS_MASK 0x1
+#define INTERRUPT_STATUS__STREAM_0_INTERRUPT_STATUS__SHIFT 0x0
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS_MASK 0x2
+#define INTERRUPT_STATUS__STREAM_1_INTERRUPT_STATUS__SHIFT 0x1
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS_MASK 0x4
+#define INTERRUPT_STATUS__STREAM_2_INTERRUPT_STATUS__SHIFT 0x2
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS_MASK 0x8
+#define INTERRUPT_STATUS__STREAM_3_INTERRUPT_STATUS__SHIFT 0x3
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS_MASK 0x10
+#define INTERRUPT_STATUS__STREAM_4_INTERRUPT_STATUS__SHIFT 0x4
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS_MASK 0x20
+#define INTERRUPT_STATUS__STREAM_5_INTERRUPT_STATUS__SHIFT 0x5
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS_MASK 0x40
+#define INTERRUPT_STATUS__STREAM_6_INTERRUPT_STATUS__SHIFT 0x6
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS_MASK 0x80
+#define INTERRUPT_STATUS__STREAM_7_INTERRUPT_STATUS__SHIFT 0x7
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS_MASK 0x100
+#define INTERRUPT_STATUS__STREAM_8_INTERRUPT_STATUS__SHIFT 0x8
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS_MASK 0x200
+#define INTERRUPT_STATUS__STREAM_9_INTERRUPT_STATUS__SHIFT 0x9
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS_MASK 0x400
+#define INTERRUPT_STATUS__STREAM_10_INTERRUPT_STATUS__SHIFT 0xa
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS_MASK 0x800
+#define INTERRUPT_STATUS__STREAM_11_INTERRUPT_STATUS__SHIFT 0xb
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS_MASK 0x1000
+#define INTERRUPT_STATUS__STREAM_12_INTERRUPT_STATUS__SHIFT 0xc
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS_MASK 0x2000
+#define INTERRUPT_STATUS__STREAM_13_INTERRUPT_STATUS__SHIFT 0xd
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS_MASK 0x4000
+#define INTERRUPT_STATUS__STREAM_14_INTERRUPT_STATUS__SHIFT 0xe
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS_MASK 0x8000
+#define INTERRUPT_STATUS__STREAM_15_INTERRUPT_STATUS__SHIFT 0xf
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS_MASK 0x40000000
+#define INTERRUPT_STATUS__CONTROLLER_INTERRUPT_STATUS__SHIFT 0x1e
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS_MASK 0x80000000
+#define INTERRUPT_STATUS__GLOBAL_INTERRUPT_STATUS__SHIFT 0x1f
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER_MASK 0xffffffff
+#define WALL_CLOCK_COUNTER__WALL_CLOCK_COUNTER__SHIFT 0x0
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION_MASK 0x1
+#define STREAM_SYNCHRONIZATION__STREAM_0_SYNCHRONIZATION__SHIFT 0x0
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION_MASK 0x2
+#define STREAM_SYNCHRONIZATION__STREAM_1_SYNCHRONIZATION__SHIFT 0x1
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION_MASK 0x4
+#define STREAM_SYNCHRONIZATION__STREAM_2_SYNCHRONIZATION__SHIFT 0x2
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION_MASK 0x8
+#define STREAM_SYNCHRONIZATION__STREAM_3_SYNCHRONIZATION__SHIFT 0x3
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION_MASK 0x10
+#define STREAM_SYNCHRONIZATION__STREAM_4_SYNCHRONIZATION__SHIFT 0x4
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION_MASK 0x20
+#define STREAM_SYNCHRONIZATION__STREAM_5_SYNCHRONIZATION__SHIFT 0x5
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION_MASK 0x40
+#define STREAM_SYNCHRONIZATION__STREAM_6_SYNCHRONIZATION__SHIFT 0x6
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION_MASK 0x80
+#define STREAM_SYNCHRONIZATION__STREAM_7_SYNCHRONIZATION__SHIFT 0x7
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION_MASK 0x100
+#define STREAM_SYNCHRONIZATION__STREAM_8_SYNCHRONIZATION__SHIFT 0x8
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION_MASK 0x200
+#define STREAM_SYNCHRONIZATION__STREAM_9_SYNCHRONIZATION__SHIFT 0x9
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION_MASK 0x400
+#define STREAM_SYNCHRONIZATION__STREAM_10_SYNCHRONIZATION__SHIFT 0xa
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION_MASK 0x800
+#define STREAM_SYNCHRONIZATION__STREAM_11_SYNCHRONIZATION__SHIFT 0xb
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION_MASK 0x1000
+#define STREAM_SYNCHRONIZATION__STREAM_12_SYNCHRONIZATION__SHIFT 0xc
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION_MASK 0x2000
+#define STREAM_SYNCHRONIZATION__STREAM_13_SYNCHRONIZATION__SHIFT 0xd
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION_MASK 0x4000
+#define STREAM_SYNCHRONIZATION__STREAM_14_SYNCHRONIZATION__SHIFT 0xe
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION_MASK 0x8000
+#define STREAM_SYNCHRONIZATION__STREAM_15_SYNCHRONIZATION__SHIFT 0xf
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x7f
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS_MASK 0xffffff80
+#define CORB_LOWER_BASE_ADDRESS__CORB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS_MASK 0xffffffff
+#define CORB_UPPER_BASE_ADDRESS__CORB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define CORB_WRITE_POINTER__CORB_WRITE_POINTER_MASK 0xff
+#define CORB_WRITE_POINTER__CORB_WRITE_POINTER__SHIFT 0x0
+#define CORB_READ_POINTER__CORB_READ_POINTER_MASK 0xff
+#define CORB_READ_POINTER__CORB_READ_POINTER__SHIFT 0x0
+#define CORB_READ_POINTER__CORB_READ_POINTER_RESET_MASK 0x8000
+#define CORB_READ_POINTER__CORB_READ_POINTER_RESET__SHIFT 0xf
+#define CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE_MASK 0x1
+#define CORB_CONTROL__CORB_MEMORY_ERROR_INTERRUPT_ENABLE__SHIFT 0x0
+#define CORB_CONTROL__ENABLE_CORB_DMA_ENGINE_MASK 0x2
+#define CORB_CONTROL__ENABLE_CORB_DMA_ENGINE__SHIFT 0x1
+#define CORB_STATUS__CORB_MEMORY_ERROR_INDICATION_MASK 0x1
+#define CORB_STATUS__CORB_MEMORY_ERROR_INDICATION__SHIFT 0x0
+#define CORB_SIZE__CORB_SIZE_MASK 0x3
+#define CORB_SIZE__CORB_SIZE__SHIFT 0x0
+#define CORB_SIZE__CORB_SIZE_CAPABILITY_MASK 0xf0
+#define CORB_SIZE__CORB_SIZE_CAPABILITY__SHIFT 0x4
+#define RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x7f
+#define RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS_MASK 0xffffff80
+#define RIRB_LOWER_BASE_ADDRESS__RIRB_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS_MASK 0xffffffff
+#define RIRB_UPPER_BASE_ADDRESS__RIRB_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_MASK 0xff
+#define RIRB_WRITE_POINTER__RIRB_WRITE_POINTER__SHIFT 0x0
+#define RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET_MASK 0x8000
+#define RIRB_WRITE_POINTER__RIRB_WRITE_POINTER_RESET__SHIFT 0xf
+#define RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT_MASK 0xff
+#define RESPONSE_INTERRUPT_COUNT__N_RESPONSE_INTERRUPT_COUNT__SHIFT 0x0
+#define RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL_MASK 0x1
+#define RIRB_CONTROL__RESPONSE_INTERRUPT_CONTROL__SHIFT 0x0
+#define RIRB_CONTROL__RIRB_DMA_ENABLE_MASK 0x2
+#define RIRB_CONTROL__RIRB_DMA_ENABLE__SHIFT 0x1
+#define RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL_MASK 0x4
+#define RIRB_CONTROL__RESPONSE_OVERRUN_INTERRUPT_CONTROL__SHIFT 0x2
+#define RIRB_STATUS__RESPONSE_INTERRUPT_MASK 0x1
+#define RIRB_STATUS__RESPONSE_INTERRUPT__SHIFT 0x0
+#define RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS_MASK 0x4
+#define RIRB_STATUS__RESPONSE_OVERRUN_INTERRUPT_STATUS__SHIFT 0x2
+#define RIRB_SIZE__RIRB_SIZE_MASK 0x3
+#define RIRB_SIZE__RIRB_SIZE__SHIFT 0x0
+#define RIRB_SIZE__RIRB_SIZE_CAPABILITY_MASK 0xf0
+#define RIRB_SIZE__RIRB_SIZE_CAPABILITY__SHIFT 0x4
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD_MASK 0xfffffff
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_VERB_AND_PAYLOAD__SHIFT 0x0
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS_MASK 0xf0000000
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE__IMMEDIATE_COMMAND_WRITE_CODEC_ADDRESS__SHIFT 0x1c
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0xffff
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xffffffff
+#define IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ_MASK 0xffffffff
+#define IMMEDIATE_RESPONSE_INPUT_INTERFACE__IMMEDIATE_RESPONSE_READ__SHIFT 0x0
+#define IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY_MASK 0x1
+#define IMMEDIATE_COMMAND_STATUS__IMMEDIATE_COMMAND_BUSY__SHIFT 0x0
+#define IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID_MASK 0x2
+#define IMMEDIATE_COMMAND_STATUS__IMMEDIATE_RESULT_VALID__SHIFT 0x1
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE_MASK 0x1
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_BUFFER_ENABLE__SHIFT 0x0
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS_MASK 0x7e
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_UNIMPLEMENTED_BITS__SHIFT 0x1
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS_MASK 0xffffff80
+#define DMA_POSITION_LOWER_BASE_ADDRESS__DMA_POSITION_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS_MASK 0xffffffff
+#define DMA_POSITION_UPPER_BASE_ADDRESS__DMA_POSITION_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS_MASK 0xffffffff
+#define WALL_CLOCK_COUNTER_ALIAS__WALL_CLOCK_COUNTER_ALIAS__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_RESET_MASK 0x1
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_RESET__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_RUN_MASK 0x2
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_RUN__SHIFT 0x1
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__INTERRUPT_ON_COMPLETION_ENABLE_MASK 0x4
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__INTERRUPT_ON_COMPLETION_ENABLE__SHIFT 0x2
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_ERROR_INTERRUPT_ENABLE_MASK 0x8
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_ERROR_INTERRUPT_ENABLE__SHIFT 0x3
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__DESCRIPTOR_ERROR_INTERRUPT_ENABLE_MASK 0x10
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__DESCRIPTOR_ERROR_INTERRUPT_ENABLE__SHIFT 0x4
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STRIPE_CONTROL_MASK 0x30000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STRIPE_CONTROL__SHIFT 0x10
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__TRAFFIC_PRIORITY_MASK 0x40000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__TRAFFIC_PRIORITY__SHIFT 0x12
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_NUMBER_MASK 0xf00000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__STREAM_NUMBER__SHIFT 0x14
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__BUFFER_COMPLETION_INTERRUPT_STATUS_MASK 0x4000000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__BUFFER_COMPLETION_INTERRUPT_STATUS__SHIFT 0x1a
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_ERROR_MASK 0x8000000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_ERROR__SHIFT 0x1b
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__DESCRIPTOR_ERROR_MASK 0x10000000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__DESCRIPTOR_ERROR__SHIFT 0x1c
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_READY_MASK 0x20000000
+#define OUTPUT_STREAM_DESCRIPTOR_CONTROL_AND_STATUS__FIFO_READY__SHIFT 0x1d
+#define OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER__LINK_POSITION_IN_BUFFER_MASK 0xffffffff
+#define OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER__LINK_POSITION_IN_BUFFER__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH__CYCLIC_BUFFER_LENGTH_MASK 0xffffffff
+#define OUTPUT_STREAM_DESCRIPTOR_CYCLIC_BUFFER_LENGTH__CYCLIC_BUFFER_LENGTH__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX__LAST_VALID_INDEX_MASK 0xff
+#define OUTPUT_STREAM_DESCRIPTOR_LAST_VALID_INDEX__LAST_VALID_INDEX__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE__FIFO_SIZE_MASK 0xffff
+#define OUTPUT_STREAM_DESCRIPTOR_FIFO_SIZE__FIFO_SIZE__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define OUTPUT_STREAM_DESCRIPTOR_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_LOWER_BASE_ADDRESS_UNIMPLEMENTED_BITS_MASK 0x7f
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_LOWER_BASE_ADDRESS_UNIMPLEMENTED_BITS__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_LOWER_BASE_ADDRESS_MASK 0xffffff80
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_LOWER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_LOWER_BASE_ADDRESS__SHIFT 0x7
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_UPPER_BASE_ADDRESS_MASK 0xffffffff
+#define OUTPUT_STREAM_DESCRIPTOR_BDL_POINTER_UPPER_BASE_ADDRESS__BUFFER_DESCRIPTOR_LIST_UPPER_BASE_ADDRESS__SHIFT 0x0
+#define OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS__LINK_POSITION_IN_BUFFER_ALIAS_MASK 0xffffffff
+#define OUTPUT_STREAM_DESCRIPTOR_LINK_POSITION_IN_CURRENT_BUFFER_ALIAS__LINK_POSITION_IN_BUFFER_ALIAS__SHIFT 0x0
+#define AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x1ffff
+#define AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xffffffff
+#define AZENDPOINT_IMMEDIATE_COMMAND_OUTPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F2_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x8000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R_MASK 0x8000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_R__SHIFT 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0xf
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x10
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x20
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x40
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x80
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x7f00
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x800000
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC_MASK 0x7f
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_2__CC__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE_MASK 0x80
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER_3__KEEPALIVE__SHIFT 0x7
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x3
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x700000
+#define AZALIA_F2_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0xff
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x2
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x70
+#define AZALIA_F2_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x20
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x40
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x80
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x10000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x1000000
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_PARAMETER_CONNECTION_LIST_LENGTH__CONNECTION_LIST_LENGTH__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONNECTION_LIST_ENTRY__CONNECTION_LIST_ENTRY__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x40
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x3f
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x80
+#define AZALIA_F2_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0xf
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0xf00
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0xf000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0xf0000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0xf00000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0xf
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0xf
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x3f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0xc0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION_MASK 0x7f
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION_MASK 0x100
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__HDMI_CONNECTION__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION_MASK 0x200
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__DP_CONNECTION__SHIFT 0x9
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO_MASK 0xfc00
+#define AZALIA_F2_CODEC_PIN_CONTROL_RESPONSE_SPEAKER_ALLOCATION__EXTRA_CONNECTION_INFO__SHIFT 0xa
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL_MASK 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LFE_PLAYBACK_LEVEL__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT_MASK 0x78
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__LEVEL_SHIFT__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT_MASK 0x80
+#define AZALIA_F2_CODEC_PIN_CONTROL_DOWN_MIX_INFO__DOWN_MIX_INHIBIT__SHIFT 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE_MASK 0x78
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__FORMAT_CODE__SHIFT 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR_DATA__DESCRIPTOR__SHIFT 0x0
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x7
+#define AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL01_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL23_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL45_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL67_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_CONTROL_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_INDEX__SINK_INFO_INDEX__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_AUDIO_SINK_INFO_DATA__SINK_DATA__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID_MASK 0xffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_MANUFACTURER_ID__MANUFACTURER_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID_MASK 0xffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_PRODUCT_ID__PRODUCT_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_SINK_DESCRIPTION_LEN__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID0__PORTID__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_PORTID1__PORTID__SHIFT 0x0
+#define SINK_DESCRIPTION0__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION0__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION1__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION1__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION2__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION2__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION3__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION3__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION4__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION4__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION5__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION5__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION6__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION6__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION7__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION7__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION8__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION8__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION9__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION9__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION10__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION10__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION11__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION11__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION12__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION12__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION13__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION13__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION14__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION14__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION15__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION15__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION16__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION16__DESCRIPTION__SHIFT 0x0
+#define SINK_DESCRIPTION17__DESCRIPTION_MASK 0xff
+#define SINK_DESCRIPTION17__DESCRIPTION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x3c
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x78
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x80
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x3f
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x40
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x10
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x10
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x60
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x80
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0xf0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0xf0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0xf0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0xf
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0xf0
+#define AZALIA_F2_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xffffffff
+#define AZALIA_F2_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0xff
+#define AZALIA_F2_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x2
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0xff00
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0xff0000
+#define AZALIA_F2_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x3
+#define AZALIA_F2_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x10
+#define AZALIA_F2_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x1
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x10
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0xffff
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xffff0000
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x300
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8
+#define AZALIA_SCLK_CONTROL__AUDIO_SCLK_CONTROL_MASK 0x30
+#define AZALIA_SCLK_CONTROL__AUDIO_SCLK_CONTROL__SHIFT 0x4
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xffffffff
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x3
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0xc
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x30
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0xc0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x10000
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x20000
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x3
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0xc
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x30
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0xc0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x1
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x10
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x1e0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP_MASK 0x1
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS_MASK 0x10
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER_MASK 0xffffffff
+#define AZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER__APPLICATION_POSITION_IN_CYCLIC_BUFFER__SHIFT 0x0
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE_MASK 0x1
+#define AZALIA_CYCLIC_BUFFER_SYNC__CYCLIC_BUFFER_SYNC_ENABLE__SHIFT 0x0
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS_MASK 0x6
+#define AZALIA_GLOBAL_CAPABILITIES__NUMBER_OF_SERIAL_DATA_OUTPUT_SIGNALS__SHIFT 0x1
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY_MASK 0xffff
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY_MASK 0xffff0000
+#define AZALIA_OUTPUT_PAYLOAD_CAPABILITY__OUTSTRMPAY__SHIFT 0x10
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL_MASK 0xff
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__LATENCY_HIDING_LEVEL__SHIFT 0x0
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE_MASK 0x100
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__SYS_MEM_ACTIVE_ENABLE__SHIFT 0x8
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL_MASK 0xff0000
+#define AZALIA_OUTPUT_STREAM_ARBITER_CONTROL__INPUT_LATENCY_HIDING_LEVEL__SHIFT 0x10
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY_MASK 0xffff
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INPUT_PAYLOAD_CAPABILITY__SHIFT 0x0
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY_MASK 0xffff0000
+#define AZALIA_INPUT_PAYLOAD_CAPABILITY__INSTRMPAY__SHIFT 0x10
+#define AZALIA_CONTROLLER_DEBUG__CONTROLLER_DEBUG_MASK 0xffffffff
+#define AZALIA_CONTROLLER_DEBUG__CONTROLLER_DEBUG__SHIFT 0x0
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE_MASK 0x3
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_FORCE__SHIFT 0x0
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS_MASK 0x4
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_DIS__SHIFT 0x2
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE_MASK 0x18
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_FORCE__SHIFT 0x3
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS_MASK 0x20
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM0_MEM_PWR_DIS__SHIFT 0x5
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE_MASK 0xc0
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_FORCE__SHIFT 0x6
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS_MASK 0x100
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM1_MEM_PWR_DIS__SHIFT 0x8
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE_MASK 0x600
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_FORCE__SHIFT 0x9
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS_MASK 0x800
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM2_MEM_PWR_DIS__SHIFT 0xb
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE_MASK 0x3000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_FORCE__SHIFT 0xc
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS_MASK 0x4000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM3_MEM_PWR_DIS__SHIFT 0xe
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE_MASK 0x18000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_FORCE__SHIFT 0xf
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS_MASK 0x20000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM4_MEM_PWR_DIS__SHIFT 0x11
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE_MASK 0xc0000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_FORCE__SHIFT 0x12
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS_MASK 0x100000
+#define AZALIA_MEM_PWR_CTRL__AZ_INPUT_STREAM5_MEM_PWR_DIS__SHIFT 0x14
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL_MASK 0x30000000
+#define AZALIA_MEM_PWR_CTRL__AZ_MEM_PWR_MODE_SEL__SHIFT 0x1c
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE_MASK 0x3
+#define AZALIA_MEM_PWR_STATUS__AZ_MEM_PWR_STATE__SHIFT 0x0
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE_MASK 0xc
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM0_MEM_PWR_STATE__SHIFT 0x2
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE_MASK 0x30
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM1_MEM_PWR_STATE__SHIFT 0x4
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE_MASK 0xc0
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM2_MEM_PWR_STATE__SHIFT 0x6
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE_MASK 0x300
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM3_MEM_PWR_STATE__SHIFT 0x8
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE_MASK 0xc00
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM4_MEM_PWR_STATE__SHIFT 0xa
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE_MASK 0x3000
+#define AZALIA_MEM_PWR_STATUS__AZ_INPUT_STREAM5_MEM_PWR_STATE__SHIFT 0xc
+#define DCI_PG_DEBUG_CONFIG__DCI_PG_DBG_EN_MASK 0x1
+#define DCI_PG_DEBUG_CONFIG__DCI_PG_DBG_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN_MASK 0x1
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x10
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x700
+#define AZALIA_INPUT_CRC0_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0xffff
+#define AZALIA_INPUT_CRC0_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x1
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x10
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x700
+#define AZALIA_INPUT_CRC0_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xffffffff
+#define AZALIA_INPUT_CRC0_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN_MASK 0x1
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_EN__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE_MASK 0x10
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL_MASK 0x700
+#define AZALIA_INPUT_CRC1_CONTROL0__INPUT_CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CONTROL1__INPUT_CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION_MASK 0xffff
+#define AZALIA_INPUT_CRC1_CONTROL2__INPUT_CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE_MASK 0x1
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE_MASK 0x10
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL_MASK 0x700
+#define AZALIA_INPUT_CRC1_CONTROL3__INPUT_CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_RESULT__INPUT_CRC_RESULT__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL0__INPUT_CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL1__INPUT_CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL2__INPUT_CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL3__INPUT_CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL4__INPUT_CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL5__INPUT_CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL6__INPUT_CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7_MASK 0xffffffff
+#define AZALIA_INPUT_CRC1_CHANNEL7__INPUT_CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL0__CRC_EN_MASK 0x1
+#define AZALIA_CRC0_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE_MASK 0x10
+#define AZALIA_CRC0_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL_MASK 0x700
+#define AZALIA_CRC0_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL_MASK 0x1000
+#define AZALIA_CRC0_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE_MASK 0xffffffff
+#define AZALIA_CRC0_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION_MASK 0xffff
+#define AZALIA_CRC0_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE_MASK 0x1
+#define AZALIA_CRC0_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x10
+#define AZALIA_CRC0_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x700
+#define AZALIA_CRC0_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC0_RESULT__CRC_RESULT_MASK 0xffffffff
+#define AZALIA_CRC0_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7_MASK 0xffffffff
+#define AZALIA_CRC0_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL0__CRC_EN_MASK 0x1
+#define AZALIA_CRC1_CONTROL0__CRC_EN__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE_MASK 0x10
+#define AZALIA_CRC1_CONTROL0__CRC_BLOCK_MODE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL_MASK 0x700
+#define AZALIA_CRC1_CONTROL0__CRC_INSTANCE_SEL__SHIFT 0x8
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL_MASK 0x1000
+#define AZALIA_CRC1_CONTROL0__CRC_SOURCE_SEL__SHIFT 0xc
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE_MASK 0xffffffff
+#define AZALIA_CRC1_CONTROL1__CRC_BLOCK_SIZE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION_MASK 0xffff
+#define AZALIA_CRC1_CONTROL2__CRC_BLOCK_ITERATION__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE_MASK 0x1
+#define AZALIA_CRC1_CONTROL3__CRC_COMPLETE__SHIFT 0x0
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE_MASK 0x10
+#define AZALIA_CRC1_CONTROL3__CRC_BLOCK_COMPLETE_PHASE__SHIFT 0x4
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL_MASK 0x700
+#define AZALIA_CRC1_CONTROL3__CRC_CHANNEL_RESULT_SEL__SHIFT 0x8
+#define AZALIA_CRC1_RESULT__CRC_RESULT_MASK 0xffffffff
+#define AZALIA_CRC1_RESULT__CRC_RESULT__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL0__CRC_CHANNEL0__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL1__CRC_CHANNEL1__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL2__CRC_CHANNEL2__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL3__CRC_CHANNEL3__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL4__CRC_CHANNEL4__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL5__CRC_CHANNEL5__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL6__CRC_CHANNEL6__SHIFT 0x0
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7_MASK 0xffffffff
+#define AZALIA_CRC1_CHANNEL7__CRC_CHANNEL7__SHIFT 0x0
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_INDEX_MASK 0xff
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_INDEX__SHIFT 0x0
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define AZ_TEST_DEBUG_INDEX__AZ_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define AZ_TEST_DEBUG_DATA__AZ_TEST_DEBUG_DATA_MASK 0xffffffff
+#define AZ_TEST_DEBUG_DATA__AZ_TEST_DEBUG_DATA__SHIFT 0x0
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX_MASK 0xff
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_INDEX__SHIFT 0x0
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN_MASK 0x100
+#define AZALIA_STREAM_INDEX__AZALIA_STREAM_REG_WRITE_EN__SHIFT 0x8
+#define AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA_MASK 0xffffffff
+#define AZALIA_STREAM_DATA__AZALIA_STREAM_REG_DATA__SHIFT 0x0
+#define AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE_MASK 0x7f
+#define AZALIA_FIFO_SIZE_CONTROL__MIN_FIFO_SIZE__SHIFT 0x0
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE_MASK 0x7f00
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_FIFO_SIZE__SHIFT 0x8
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT_MASK 0xff0000
+#define AZALIA_FIFO_SIZE_CONTROL__MAX_LATENCY_SUPPORT__SHIFT 0x10
+#define AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET_MASK 0x1
+#define AZALIA_LATENCY_COUNTER_CONTROL__AZALIA_LATENCY_COUNTER_RESET__SHIFT 0x0
+#define AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT_MASK 0xffffffff
+#define AZALIA_WORSTCASE_LATENCY_COUNT__AZALIA_WORSTCASE_LATENCY_COUNT__SHIFT 0x0
+#define AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT_MASK 0xffffffff
+#define AZALIA_CUMULATIVE_LATENCY_COUNT__AZALIA_CUMULATIVE_LATENCY_COUNT__SHIFT 0x0
+#define AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT_MASK 0xffffffff
+#define AZALIA_CUMULATIVE_REQUEST_COUNT__AZALIA_CUMULATIVE_REQUEST_COUNT__SHIFT 0x0
+#define AZALIA_STREAM_DEBUG__STREAM_DEBUG_DATA_MASK 0xffffffff
+#define AZALIA_STREAM_DEBUG__STREAM_DEBUG_DATA__SHIFT 0x0
+#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x3fff
+#define AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xffffffff
+#define AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PIN_DEBUG__AZALIA_DEBUG__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x10
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x8000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0xf
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x1
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x2
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x8
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x10
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x20
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x40
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x80
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x7f00
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x800000
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x3
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x700000
+#define AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0xff
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x2
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x70
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG__PRESENTATION_TIME_OFFSET_DEBUG_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG__PRESENTATION_TIME_OFFSET_DEBUG__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xffffffff
+#define AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x4
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x8
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x10
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x20
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x40
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x80
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x1000000
+#define AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x3f
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x80
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x40
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x7f
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x10000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x20000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0xfc0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x3000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x7
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x100
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x200
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0xf000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x20000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0xf00000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x1000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x2000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xf0000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x100
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x200
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0xf000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x20000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0xf00000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x1000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x2000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0xffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xffff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xff000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x3ffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0xf
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0xf0
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0xf00
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0xf000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0xf0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0xf00000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000
+#define AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x3
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x3c
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x3
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x78
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x80
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x3f
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x40
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x10
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x10
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x60
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x80
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0xf0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0xf0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0xf0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0xf
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0xf0
+#define AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xffffffff
+#define AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0xff
+#define AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x2
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0xff00
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0xff0000
+#define AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x3
+#define AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x10
+#define AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x1
+#define AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x1
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x10
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x100
+#define AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x1
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x10
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x100
+#define AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x1
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x10
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x100
+#define AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x3fff
+#define AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PIN_DEBUG__AZALIA_INPUT_DEBUG_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PIN_DEBUG__AZALIA_INPUT_DEBUG__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x8000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0xf
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x40
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x7f00
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x800000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x40
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0xff00
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x1000000
+#define AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x3f
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x80
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x100
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x200
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0xf000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x20000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x1000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x2000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xf0000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x2
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x100
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x200
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0xf000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x10000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x20000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x1000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x2000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0xff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x3ffffff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0xf
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0xf0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0xf00
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0xf000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0xf0000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0xf00000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x6
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x20
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x7
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0xff00
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0xff0000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x1
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0xff00
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xffffffff
+#define AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE_MASK 0x1ffff
+#define AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_INDEX__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE_MASK 0xffffffff
+#define AZENDPOINT_IMMEDIATE_COMMAND_INPUT_INTERFACE_DATA__IMMEDIATE_COMMAND_WRITE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0xfff
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x1f0000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x70
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x700
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x3800
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x4000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x8000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x40
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x7f00
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x800000
+#define AZALIA_F2_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x40
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x100
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x200
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x400
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x800
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0xf0000
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0xf00000
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x40
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0xff00
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x10000
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x1000000
+#define AZALIA_F2_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x3f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x80
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7fffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0xf00
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0xf000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0xf0000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0xf00000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3f000000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xc0000000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__MISC__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_2__COLOR__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE_MASK 0xf
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__CONNECTION_TYPE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_3__DEFAULT_DEVICE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION_MASK 0x3f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__LOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY_MASK 0xc0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT_4__PORT_CONNECTIVITY__SHIFT 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL0_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL1_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL2_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL3_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL4_ENABLE__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL5_ENABLE__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL6_ENABLE__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_ENABLE__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE_MASK 0x2
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_MUTE__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID_MASK 0xf0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL7_ENABLE__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0xff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x6
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x20
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x7
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0xff00
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0xff0000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_L__CHANNEL_STATUS_L__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_CHANNEL_STATUS_H__CHANNEL_STATUS_H__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x1
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0xff00
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xffffffff
+#define AZALIA_F2_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define BLND_CONTROL__BLND_GLOBAL_GAIN_MASK 0xff
+#define BLND_CONTROL__BLND_GLOBAL_GAIN__SHIFT 0x0
+#define BLND_CONTROL__BLND_MODE_MASK 0x300
+#define BLND_CONTROL__BLND_MODE__SHIFT 0x8
+#define BLND_CONTROL__BLND_STEREO_TYPE_MASK 0xc00
+#define BLND_CONTROL__BLND_STEREO_TYPE__SHIFT 0xa
+#define BLND_CONTROL__BLND_STEREO_POLARITY_MASK 0x1000
+#define BLND_CONTROL__BLND_STEREO_POLARITY__SHIFT 0xc
+#define BLND_CONTROL__BLND_FEEDTHROUGH_EN_MASK 0x2000
+#define BLND_CONTROL__BLND_FEEDTHROUGH_EN__SHIFT 0xd
+#define BLND_CONTROL__BLND_ALPHA_MODE_MASK 0x30000
+#define BLND_CONTROL__BLND_ALPHA_MODE__SHIFT 0x10
+#define BLND_CONTROL__BLND_ACTIVE_OVERLAP_ONLY_MASK 0x40000
+#define BLND_CONTROL__BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x12
+#define BLND_CONTROL__BLND_MULTIPLIED_MODE_MASK 0x100000
+#define BLND_CONTROL__BLND_MULTIPLIED_MODE__SHIFT 0x14
+#define BLND_CONTROL__BLND_GLOBAL_ALPHA_MASK 0xff000000
+#define BLND_CONTROL__BLND_GLOBAL_ALPHA__SHIFT 0x18
+#define BLND_SM_CONTROL2__SM_MODE_MASK 0x7
+#define BLND_SM_CONTROL2__SM_MODE__SHIFT 0x0
+#define BLND_SM_CONTROL2__SM_FRAME_ALTERNATE_MASK 0x10
+#define BLND_SM_CONTROL2__SM_FRAME_ALTERNATE__SHIFT 0x4
+#define BLND_SM_CONTROL2__SM_FIELD_ALTERNATE_MASK 0x20
+#define BLND_SM_CONTROL2__SM_FIELD_ALTERNATE__SHIFT 0x5
+#define BLND_SM_CONTROL2__SM_FORCE_NEXT_FRAME_POL_MASK 0x300
+#define BLND_SM_CONTROL2__SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define BLND_SM_CONTROL2__SM_FORCE_NEXT_TOP_POL_MASK 0x30000
+#define BLND_SM_CONTROL2__SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define BLND_SM_CONTROL2__SM_CURRENT_FRAME_POL_MASK 0x1000000
+#define BLND_SM_CONTROL2__SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define BLND_CONTROL2__PTI_ENABLE_MASK 0x1
+#define BLND_CONTROL2__PTI_ENABLE__SHIFT 0x0
+#define BLND_CONTROL2__PTI_NEW_PIXEL_GAP_MASK 0x30
+#define BLND_CONTROL2__PTI_NEW_PIXEL_GAP__SHIFT 0x4
+#define BLND_CONTROL2__BLND_NEW_PIXEL_MODE_MASK 0x40
+#define BLND_CONTROL2__BLND_NEW_PIXEL_MODE__SHIFT 0x6
+#define BLND_CONTROL2__BLND_SUPERAA_DEGAMMA_EN_MASK 0x80
+#define BLND_CONTROL2__BLND_SUPERAA_DEGAMMA_EN__SHIFT 0x7
+#define BLND_CONTROL2__BLND_SUPERAA_REGAMMA_EN_MASK 0x100
+#define BLND_CONTROL2__BLND_SUPERAA_REGAMMA_EN__SHIFT 0x8
+#define BLND_UPDATE__BLND_UPDATE_PENDING_MASK 0x1
+#define BLND_UPDATE__BLND_UPDATE_PENDING__SHIFT 0x0
+#define BLND_UPDATE__BLND_UPDATE_TAKEN_MASK 0x100
+#define BLND_UPDATE__BLND_UPDATE_TAKEN__SHIFT 0x8
+#define BLND_UPDATE__BLND_UPDATE_LOCK_MASK 0x10000
+#define BLND_UPDATE__BLND_UPDATE_LOCK__SHIFT 0x10
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_OCCURED_MASK 0x1
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_OCCURED__SHIFT 0x0
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_ACK_MASK 0x100
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_ACK__SHIFT 0x8
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_MASK_MASK 0x1000
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_MASK__SHIFT 0xc
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_PIPE_INDEX_MASK 0x30000
+#define BLND_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_PIPE_INDEX__SHIFT 0x10
+#define BLND_V_UPDATE_LOCK__BLND_DCP_GRPH_V_UPDATE_LOCK_MASK 0x1
+#define BLND_V_UPDATE_LOCK__BLND_DCP_GRPH_V_UPDATE_LOCK__SHIFT 0x0
+#define BLND_V_UPDATE_LOCK__BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_MASK 0x2
+#define BLND_V_UPDATE_LOCK__BLND_DCP_GRPH_SURF_V_UPDATE_LOCK__SHIFT 0x1
+#define BLND_V_UPDATE_LOCK__BLND_DCP_CUR_V_UPDATE_LOCK_MASK 0x10000
+#define BLND_V_UPDATE_LOCK__BLND_DCP_CUR_V_UPDATE_LOCK__SHIFT 0x10
+#define BLND_V_UPDATE_LOCK__BLND_DCP_CUR2_V_UPDATE_LOCK_MASK 0x1000000
+#define BLND_V_UPDATE_LOCK__BLND_DCP_CUR2_V_UPDATE_LOCK__SHIFT 0x18
+#define BLND_V_UPDATE_LOCK__BLND_SCL_V_UPDATE_LOCK_MASK 0x10000000
+#define BLND_V_UPDATE_LOCK__BLND_SCL_V_UPDATE_LOCK__SHIFT 0x1c
+#define BLND_V_UPDATE_LOCK__BLND_BLND_V_UPDATE_LOCK_MASK 0x20000000
+#define BLND_V_UPDATE_LOCK__BLND_BLND_V_UPDATE_LOCK__SHIFT 0x1d
+#define BLND_V_UPDATE_LOCK__BLND_V_UPDATE_LOCK_MODE_MASK 0x80000000
+#define BLND_V_UPDATE_LOCK__BLND_V_UPDATE_LOCK_MODE__SHIFT 0x1f
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_UPDATE_PENDING_MASK 0x1
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_UPDATE_PENDING__SHIFT 0x0
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_UPDATE_PENDING_MASK 0x2
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_UPDATE_PENDING__SHIFT 0x1
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_SURF_UPDATE_PENDING_MASK 0x4
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_SURF_UPDATE_PENDING__SHIFT 0x2
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_SURF_UPDATE_PENDING_MASK 0x8
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_SURF_UPDATE_PENDING__SHIFT 0x3
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_CUR_UPDATE_PENDING_MASK 0x40
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDC_CUR_UPDATE_PENDING__SHIFT 0x6
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_CUR_UPDATE_PENDING_MASK 0x80
+#define BLND_REG_UPDATE_STATUS__DCP_BLNDO_CUR_UPDATE_PENDING__SHIFT 0x7
+#define BLND_REG_UPDATE_STATUS__SCL_BLNDC_UPDATE_PENDING_MASK 0x100
+#define BLND_REG_UPDATE_STATUS__SCL_BLNDC_UPDATE_PENDING__SHIFT 0x8
+#define BLND_REG_UPDATE_STATUS__SCL_BLNDO_UPDATE_PENDING_MASK 0x200
+#define BLND_REG_UPDATE_STATUS__SCL_BLNDO_UPDATE_PENDING__SHIFT 0x9
+#define BLND_REG_UPDATE_STATUS__BLND_BLNDC_UPDATE_PENDING_MASK 0x400
+#define BLND_REG_UPDATE_STATUS__BLND_BLNDC_UPDATE_PENDING__SHIFT 0xa
+#define BLND_REG_UPDATE_STATUS__BLND_BLNDO_UPDATE_PENDING_MASK 0x800
+#define BLND_REG_UPDATE_STATUS__BLND_BLNDO_UPDATE_PENDING__SHIFT 0xb
+#define BLND_DEBUG__BLND_CNV_MUX_SELECT_MASK 0x1
+#define BLND_DEBUG__BLND_CNV_MUX_SELECT__SHIFT 0x0
+#define BLND_DEBUG__BLND_DEBUG_MASK 0xfffffffe
+#define BLND_DEBUG__BLND_DEBUG__SHIFT 0x1
+#define BLND_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_INDEX_MASK 0xff
+#define BLND_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_INDEX__SHIFT 0x0
+#define BLND_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define BLND_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define BLND_TEST_DEBUG_DATA__BLND_TEST_DEBUG_DATA_MASK 0xffffffff
+#define BLND_TEST_DEBUG_DATA__BLND_TEST_DEBUG_DATA__SHIFT 0x0
+#define WB_ENABLE__WB_ENABLE_MASK 0x1
+#define WB_ENABLE__WB_ENABLE__SHIFT 0x0
+#define WB_EC_CONFIG__DISPCLK_R_WB_GATE_DIS_MASK 0x1
+#define WB_EC_CONFIG__DISPCLK_R_WB_GATE_DIS__SHIFT 0x0
+#define WB_EC_CONFIG__DISPCLK_G_WB_GATE_DIS_MASK 0x2
+#define WB_EC_CONFIG__DISPCLK_G_WB_GATE_DIS__SHIFT 0x1
+#define WB_EC_CONFIG__DISPCLK_G_WBSCL_GATE_DIS_MASK 0x4
+#define WB_EC_CONFIG__DISPCLK_G_WBSCL_GATE_DIS__SHIFT 0x2
+#define WB_EC_CONFIG__WB_TEST_CLK_SEL_MASK 0x78
+#define WB_EC_CONFIG__WB_TEST_CLK_SEL__SHIFT 0x3
+#define WB_EC_CONFIG__WB_LB_LS_DIS_MASK 0x80
+#define WB_EC_CONFIG__WB_LB_LS_DIS__SHIFT 0x7
+#define WB_EC_CONFIG__WB_LB_SD_DIS_MASK 0x100
+#define WB_EC_CONFIG__WB_LB_SD_DIS__SHIFT 0x8
+#define WB_EC_CONFIG__WB_LUT_LS_DIS_MASK 0x200
+#define WB_EC_CONFIG__WB_LUT_LS_DIS__SHIFT 0x9
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_MODE_SEL_MASK 0x3000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_MODE_SEL__SHIFT 0xc
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_DIS_MASK 0x4000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_DIS__SHIFT 0xe
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_FORCE_MASK 0x18000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_FORCE__SHIFT 0xf
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_SM_MASK 0x60000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_SM__SHIFT 0x11
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_BG_MASK 0x180000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_BG__SHIFT 0x13
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE_MASK 0x600000
+#define WB_EC_CONFIG__WBSCL_LB_MEM_PWR_STATE__SHIFT 0x15
+#define WB_EC_CONFIG__WB_RAM_PW_SAVE_MODE_MASK 0x800000
+#define WB_EC_CONFIG__WB_RAM_PW_SAVE_MODE__SHIFT 0x17
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_SM_MASK 0x3000000
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_SM__SHIFT 0x18
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_BG_MASK 0xc000000
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_BG__SHIFT 0x1a
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE_MASK 0x30000000
+#define WB_EC_CONFIG__LB_MEM_PWR_STATE__SHIFT 0x1c
+#define WB_EC_CONFIG__LUT_MEM_PWR_STATE_MASK 0xc0000000
+#define WB_EC_CONFIG__LUT_MEM_PWR_STATE__SHIFT 0x1e
+#define CNV_MODE__CNV_FRAME_CAPTURE_RATE_MASK 0x300
+#define CNV_MODE__CNV_FRAME_CAPTURE_RATE__SHIFT 0x8
+#define CNV_MODE__CNV_WINDOW_CROP_EN_MASK 0x1000
+#define CNV_MODE__CNV_WINDOW_CROP_EN__SHIFT 0xc
+#define CNV_MODE__CNV_STEREO_TYPE_MASK 0x6000
+#define CNV_MODE__CNV_STEREO_TYPE__SHIFT 0xd
+#define CNV_MODE__CNV_INTERLACED_MODE_MASK 0x8000
+#define CNV_MODE__CNV_INTERLACED_MODE__SHIFT 0xf
+#define CNV_MODE__CNV_EYE_SELECTION_MASK 0x30000
+#define CNV_MODE__CNV_EYE_SELECTION__SHIFT 0x10
+#define CNV_MODE__CNV_STEREO_POLARITY_MASK 0x40000
+#define CNV_MODE__CNV_STEREO_POLARITY__SHIFT 0x12
+#define CNV_MODE__CNV_INTERLACED_FIELD_ORDER_MASK 0x80000
+#define CNV_MODE__CNV_INTERLACED_FIELD_ORDER__SHIFT 0x13
+#define CNV_MODE__CNV_STEREO_SPLIT_MASK 0x100000
+#define CNV_MODE__CNV_STEREO_SPLIT__SHIFT 0x14
+#define CNV_MODE__CNV_NEW_CONTENT_MASK 0x1000000
+#define CNV_MODE__CNV_NEW_CONTENT__SHIFT 0x18
+#define CNV_MODE__CNV_FRAME_CAPTURE_EN_MASK 0x80000000
+#define CNV_MODE__CNV_FRAME_CAPTURE_EN__SHIFT 0x1f
+#define CNV_WINDOW_START__CNV_WINDOW_START_X_MASK 0xfff
+#define CNV_WINDOW_START__CNV_WINDOW_START_X__SHIFT 0x0
+#define CNV_WINDOW_START__CNV_WINDOW_START_Y_MASK 0xfff0000
+#define CNV_WINDOW_START__CNV_WINDOW_START_Y__SHIFT 0x10
+#define CNV_WINDOW_SIZE__CNV_WINDOW_WIDTH_MASK 0xfff
+#define CNV_WINDOW_SIZE__CNV_WINDOW_WIDTH__SHIFT 0x0
+#define CNV_WINDOW_SIZE__CNV_WINDOW_HEIGHT_MASK 0xfff0000
+#define CNV_WINDOW_SIZE__CNV_WINDOW_HEIGHT__SHIFT 0x10
+#define CNV_UPDATE__CNV_UPDATE_PENDING_MASK 0x1
+#define CNV_UPDATE__CNV_UPDATE_PENDING__SHIFT 0x0
+#define CNV_UPDATE__CNV_UPDATE_TAKEN_MASK 0x100
+#define CNV_UPDATE__CNV_UPDATE_TAKEN__SHIFT 0x8
+#define CNV_UPDATE__CNV_UPDATE_LOCK_MASK 0x10000
+#define CNV_UPDATE__CNV_UPDATE_LOCK__SHIFT 0x10
+#define CNV_SOURCE_SIZE__CNV_SOURCE_WIDTH_MASK 0x7fff
+#define CNV_SOURCE_SIZE__CNV_SOURCE_WIDTH__SHIFT 0x0
+#define CNV_SOURCE_SIZE__CNV_SOURCE_HEIGHT_MASK 0x7fff0000
+#define CNV_SOURCE_SIZE__CNV_SOURCE_HEIGHT__SHIFT 0x10
+#define CNV_CSC_CONTROL__CNV_CSC_BYPASS_MASK 0x1
+#define CNV_CSC_CONTROL__CNV_CSC_BYPASS__SHIFT 0x0
+#define CNV_CSC_C11_C12__CNV_CSC_C11_MASK 0x1fff
+#define CNV_CSC_C11_C12__CNV_CSC_C11__SHIFT 0x0
+#define CNV_CSC_C11_C12__CNV_CSC_C12_MASK 0x1fff0000
+#define CNV_CSC_C11_C12__CNV_CSC_C12__SHIFT 0x10
+#define CNV_CSC_C13_C14__CNV_CSC_C13_MASK 0x1fff
+#define CNV_CSC_C13_C14__CNV_CSC_C13__SHIFT 0x0
+#define CNV_CSC_C13_C14__CNV_CSC_C14_MASK 0x7fff0000
+#define CNV_CSC_C13_C14__CNV_CSC_C14__SHIFT 0x10
+#define CNV_CSC_C21_C22__CNV_CSC_C21_MASK 0x1fff
+#define CNV_CSC_C21_C22__CNV_CSC_C21__SHIFT 0x0
+#define CNV_CSC_C21_C22__CNV_CSC_C22_MASK 0x1fff0000
+#define CNV_CSC_C21_C22__CNV_CSC_C22__SHIFT 0x10
+#define CNV_CSC_C23_C24__CNV_CSC_C23_MASK 0x1fff
+#define CNV_CSC_C23_C24__CNV_CSC_C23__SHIFT 0x0
+#define CNV_CSC_C23_C24__CNV_CSC_C24_MASK 0x7fff0000
+#define CNV_CSC_C23_C24__CNV_CSC_C24__SHIFT 0x10
+#define CNV_CSC_C31_C32__CNV_CSC_C31_MASK 0x1fff
+#define CNV_CSC_C31_C32__CNV_CSC_C31__SHIFT 0x0
+#define CNV_CSC_C31_C32__CNV_CSC_C32_MASK 0x1fff0000
+#define CNV_CSC_C31_C32__CNV_CSC_C32__SHIFT 0x10
+#define CNV_CSC_C33_C34__CNV_CSC_C33_MASK 0x1fff
+#define CNV_CSC_C33_C34__CNV_CSC_C33__SHIFT 0x0
+#define CNV_CSC_C33_C34__CNV_CSC_C34_MASK 0x7fff0000
+#define CNV_CSC_C33_C34__CNV_CSC_C34__SHIFT 0x10
+#define CNV_CSC_ROUND_OFFSET_R__CNV_CSC_ROUND_OFFSET_R_MASK 0xffff
+#define CNV_CSC_ROUND_OFFSET_R__CNV_CSC_ROUND_OFFSET_R__SHIFT 0x0
+#define CNV_CSC_ROUND_OFFSET_G__CNV_CSC_ROUND_OFFSET_G_MASK 0xffff
+#define CNV_CSC_ROUND_OFFSET_G__CNV_CSC_ROUND_OFFSET_G__SHIFT 0x0
+#define CNV_CSC_ROUND_OFFSET_B__CNV_CSC_ROUND_OFFSET_B_MASK 0xffff
+#define CNV_CSC_ROUND_OFFSET_B__CNV_CSC_ROUND_OFFSET_B__SHIFT 0x0
+#define CNV_CSC_CLAMP_R__CNV_CSC_CLAMP_UPPER_R_MASK 0xffff
+#define CNV_CSC_CLAMP_R__CNV_CSC_CLAMP_UPPER_R__SHIFT 0x0
+#define CNV_CSC_CLAMP_R__CNV_CSC_CLAMP_LOWER_R_MASK 0xffff0000
+#define CNV_CSC_CLAMP_R__CNV_CSC_CLAMP_LOWER_R__SHIFT 0x10
+#define CNV_CSC_CLAMP_G__CNV_CSC_CLAMP_UPPER_G_MASK 0xffff
+#define CNV_CSC_CLAMP_G__CNV_CSC_CLAMP_UPPER_G__SHIFT 0x0
+#define CNV_CSC_CLAMP_G__CNV_CSC_CLAMP_LOWER_G_MASK 0xffff0000
+#define CNV_CSC_CLAMP_G__CNV_CSC_CLAMP_LOWER_G__SHIFT 0x10
+#define CNV_CSC_CLAMP_B__CNV_CSC_CLAMP_UPPER_B_MASK 0xffff
+#define CNV_CSC_CLAMP_B__CNV_CSC_CLAMP_UPPER_B__SHIFT 0x0
+#define CNV_CSC_CLAMP_B__CNV_CSC_CLAMP_LOWER_B_MASK 0xffff0000
+#define CNV_CSC_CLAMP_B__CNV_CSC_CLAMP_LOWER_B__SHIFT 0x10
+#define CNV_TEST_CNTL__CNV_TEST_CRC_EN_MASK 0x10
+#define CNV_TEST_CNTL__CNV_TEST_CRC_EN__SHIFT 0x4
+#define CNV_TEST_CNTL__CNV_TEST_CRC_CONT_EN_MASK 0x100
+#define CNV_TEST_CNTL__CNV_TEST_CRC_CONT_EN__SHIFT 0x8
+#define CNV_TEST_CNTL__CNV_TEST_CRC_DE_ONLY_MASK 0x10000
+#define CNV_TEST_CNTL__CNV_TEST_CRC_DE_ONLY__SHIFT 0x10
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_RED_MASK_MASK 0xfff0
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_RED_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_SIG_RED_MASK 0xffff0000
+#define CNV_TEST_CRC_RED__CNV_TEST_CRC_SIG_RED__SHIFT 0x10
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_GREEN_MASK_MASK 0xfff0
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_GREEN_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_SIG_GREEN_MASK 0xffff0000
+#define CNV_TEST_CRC_GREEN__CNV_TEST_CRC_SIG_GREEN__SHIFT 0x10
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_BLUE_MASK_MASK 0xfff0
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_BLUE_MASK__SHIFT 0x4
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_SIG_BLUE_MASK 0xffff0000
+#define CNV_TEST_CRC_BLUE__CNV_TEST_CRC_SIG_BLUE__SHIFT 0x10
+#define WB_DEBUG_CTRL__WB_DEBUG_EN_MASK 0x1
+#define WB_DEBUG_CTRL__WB_DEBUG_EN__SHIFT 0x0
+#define WB_DEBUG_CTRL__WB_DEBUG_SEL_MASK 0xc0
+#define WB_DEBUG_CTRL__WB_DEBUG_SEL__SHIFT 0x6
+#define WB_DBG_MODE__WB_DBG_MODE_EN_MASK 0x1
+#define WB_DBG_MODE__WB_DBG_MODE_EN__SHIFT 0x0
+#define WB_DBG_MODE__WB_DBG_DIN_FMT_MASK 0x2
+#define WB_DBG_MODE__WB_DBG_DIN_FMT__SHIFT 0x1
+#define WB_DBG_MODE__WB_DBG_36MODE_MASK 0x4
+#define WB_DBG_MODE__WB_DBG_36MODE__SHIFT 0x2
+#define WB_DBG_MODE__WB_DBG_CMAP_MASK 0x8
+#define WB_DBG_MODE__WB_DBG_CMAP__SHIFT 0x3
+#define WB_DBG_MODE__WB_DBG_PXLRATE_ERROR_MASK 0x100
+#define WB_DBG_MODE__WB_DBG_PXLRATE_ERROR__SHIFT 0x8
+#define WB_DBG_MODE__WB_DBG_SOURCE_WIDTH_MASK 0x7fff0000
+#define WB_DBG_MODE__WB_DBG_SOURCE_WIDTH__SHIFT 0x10
+#define WB_HW_DEBUG__WB_HW_DEBUG_MASK 0xffffffff
+#define WB_HW_DEBUG__WB_HW_DEBUG__SHIFT 0x0
+#define CNV_INPUT_SELECT__CNV_INPUT_SRC_SELECT_MASK 0x3
+#define CNV_INPUT_SELECT__CNV_INPUT_SRC_SELECT__SHIFT 0x0
+#define CNV_INPUT_SELECT__CNV_INPUT_PIPE_SELECT_MASK 0x1c
+#define CNV_INPUT_SELECT__CNV_INPUT_PIPE_SELECT__SHIFT 0x2
+#define WB_SOFT_RESET__WB_SOFT_RESET_MASK 0x1
+#define WB_SOFT_RESET__WB_SOFT_RESET__SHIFT 0x0
+#define WB_WARM_UP_MODE_CTL1__WIDTH_WARMUP_MASK 0x7fff
+#define WB_WARM_UP_MODE_CTL1__WIDTH_WARMUP__SHIFT 0x0
+#define WB_WARM_UP_MODE_CTL1__HEIGHT_WARMUP_MASK 0x7fff0000
+#define WB_WARM_UP_MODE_CTL1__HEIGHT_WARMUP__SHIFT 0x10
+#define WB_WARM_UP_MODE_CTL1__GMC_WARM_UP_ENABLE_MASK 0x80000000
+#define WB_WARM_UP_MODE_CTL1__GMC_WARM_UP_ENABLE__SHIFT 0x1f
+#define WB_WARM_UP_MODE_CTL2__DATA_VALUE_WARMUP_MASK 0xff
+#define WB_WARM_UP_MODE_CTL2__DATA_VALUE_WARMUP__SHIFT 0x0
+#define WB_WARM_UP_MODE_CTL2__MODE_WARMUP_MASK 0x100
+#define WB_WARM_UP_MODE_CTL2__MODE_WARMUP__SHIFT 0x8
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_INDEX_MASK 0xff
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define CNV_TEST_DEBUG_INDEX__CNV_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CNV_TEST_DEBUG_DATA__CNV_TEST_DEBUG_DATA_MASK 0xffffffff
+#define CNV_TEST_DEBUG_DATA__CNV_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCFE_CLOCK_CONTROL__DISPCLK_R_DCFE_GATE_DISABLE_MASK 0x10
+#define DCFE_CLOCK_CONTROL__DISPCLK_R_DCFE_GATE_DISABLE__SHIFT 0x4
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_DCP_GATE_DISABLE_MASK 0x100
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_DCP_GATE_DISABLE__SHIFT 0x8
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_SCL_GATE_DISABLE_MASK 0x1000
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_SCL_GATE_DISABLE__SHIFT 0xc
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_PSCL_GATE_DISABLE_MASK 0x8000
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_PSCL_GATE_DISABLE__SHIFT 0xf
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_PIPE_REQUEST_DIS_GATE_DISABLE_MASK 0x20000
+#define DCFE_CLOCK_CONTROL__DISPCLK_G_PIPE_REQUEST_DIS_GATE_DISABLE__SHIFT 0x11
+#define DCFE_CLOCK_CONTROL__DCFE_TEST_CLK_SEL_MASK 0x1f000000
+#define DCFE_CLOCK_CONTROL__DCFE_TEST_CLK_SEL__SHIFT 0x18
+#define DCFE_CLOCK_CONTROL__DCFE_CLOCK_ENABLE_MASK 0x80000000
+#define DCFE_CLOCK_CONTROL__DCFE_CLOCK_ENABLE__SHIFT 0x1f
+#define DCFE_SOFT_RESET__DCP_PIXPIPE_SOFT_RESET_MASK 0x1
+#define DCFE_SOFT_RESET__DCP_PIXPIPE_SOFT_RESET__SHIFT 0x0
+#define DCFE_SOFT_RESET__DCP_REQ_SOFT_RESET_MASK 0x2
+#define DCFE_SOFT_RESET__DCP_REQ_SOFT_RESET__SHIFT 0x1
+#define DCFE_SOFT_RESET__SCL_ALU_SOFT_RESET_MASK 0x4
+#define DCFE_SOFT_RESET__SCL_ALU_SOFT_RESET__SHIFT 0x2
+#define DCFE_SOFT_RESET__SCL_SOFT_RESET_MASK 0x8
+#define DCFE_SOFT_RESET__SCL_SOFT_RESET__SHIFT 0x3
+#define DCFE_SOFT_RESET__CRTC_SOFT_RESET_MASK 0x10
+#define DCFE_SOFT_RESET__CRTC_SOFT_RESET__SHIFT 0x4
+#define DCFE_SOFT_RESET__PSCL_SOFT_RESET_MASK 0x20
+#define DCFE_SOFT_RESET__PSCL_SOFT_RESET__SHIFT 0x5
+#define DCFE_DBG_CONFIG__DCFE_DBG_EN_MASK 0x1
+#define DCFE_DBG_CONFIG__DCFE_DBG_EN__SHIFT 0x0
+#define DCFE_DBG_CONFIG__DCFE_DBG_SEL_MASK 0xf0
+#define DCFE_DBG_CONFIG__DCFE_DBG_SEL__SHIFT 0x4
+#define DCFE_MEM_PWR_CTRL__DCP_LUT_MEM_PWR_FORCE_MASK 0x3
+#define DCFE_MEM_PWR_CTRL__DCP_LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DCFE_MEM_PWR_CTRL__DCP_LUT_MEM_PWR_DIS_MASK 0x4
+#define DCFE_MEM_PWR_CTRL__DCP_LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DCFE_MEM_PWR_CTRL__DCP_REGAMMA_MEM_PWR_FORCE_MASK 0x18
+#define DCFE_MEM_PWR_CTRL__DCP_REGAMMA_MEM_PWR_FORCE__SHIFT 0x3
+#define DCFE_MEM_PWR_CTRL__DCP_REGAMMA_MEM_PWR_DIS_MASK 0x20
+#define DCFE_MEM_PWR_CTRL__DCP_REGAMMA_MEM_PWR_DIS__SHIFT 0x5
+#define DCFE_MEM_PWR_CTRL__SCL_COEFF_MEM_PWR_FORCE_MASK 0xc0
+#define DCFE_MEM_PWR_CTRL__SCL_COEFF_MEM_PWR_FORCE__SHIFT 0x6
+#define DCFE_MEM_PWR_CTRL__SCL_COEFF_MEM_PWR_DIS_MASK 0x100
+#define DCFE_MEM_PWR_CTRL__SCL_COEFF_MEM_PWR_DIS__SHIFT 0x8
+#define DCFE_MEM_PWR_CTRL__DCP_CURSOR_MEM_PWR_FORCE_MASK 0x600
+#define DCFE_MEM_PWR_CTRL__DCP_CURSOR_MEM_PWR_FORCE__SHIFT 0x9
+#define DCFE_MEM_PWR_CTRL__DCP_CURSOR_MEM_PWR_DIS_MASK 0x800
+#define DCFE_MEM_PWR_CTRL__DCP_CURSOR_MEM_PWR_DIS__SHIFT 0xb
+#define DCFE_MEM_PWR_CTRL__LB0_ALPHA_MEM_PWR_FORCE_MASK 0x3000
+#define DCFE_MEM_PWR_CTRL__LB0_ALPHA_MEM_PWR_FORCE__SHIFT 0xc
+#define DCFE_MEM_PWR_CTRL__LB0_ALPHA_MEM_PWR_DIS_MASK 0x4000
+#define DCFE_MEM_PWR_CTRL__LB0_ALPHA_MEM_PWR_DIS__SHIFT 0xe
+#define DCFE_MEM_PWR_CTRL__LB1_ALPHA_MEM_PWR_FORCE_MASK 0x18000
+#define DCFE_MEM_PWR_CTRL__LB1_ALPHA_MEM_PWR_FORCE__SHIFT 0xf
+#define DCFE_MEM_PWR_CTRL__LB1_ALPHA_MEM_PWR_DIS_MASK 0x20000
+#define DCFE_MEM_PWR_CTRL__LB1_ALPHA_MEM_PWR_DIS__SHIFT 0x11
+#define DCFE_MEM_PWR_CTRL__LB2_ALPHA_MEM_PWR_FORCE_MASK 0xc0000
+#define DCFE_MEM_PWR_CTRL__LB2_ALPHA_MEM_PWR_FORCE__SHIFT 0x12
+#define DCFE_MEM_PWR_CTRL__LB2_ALPHA_MEM_PWR_DIS_MASK 0x100000
+#define DCFE_MEM_PWR_CTRL__LB2_ALPHA_MEM_PWR_DIS__SHIFT 0x14
+#define DCFE_MEM_PWR_CTRL__LB0_MEM_PWR_FORCE_MASK 0x600000
+#define DCFE_MEM_PWR_CTRL__LB0_MEM_PWR_FORCE__SHIFT 0x15
+#define DCFE_MEM_PWR_CTRL__LB0_MEM_PWR_DIS_MASK 0x800000
+#define DCFE_MEM_PWR_CTRL__LB0_MEM_PWR_DIS__SHIFT 0x17
+#define DCFE_MEM_PWR_CTRL__LB1_MEM_PWR_FORCE_MASK 0x3000000
+#define DCFE_MEM_PWR_CTRL__LB1_MEM_PWR_FORCE__SHIFT 0x18
+#define DCFE_MEM_PWR_CTRL__LB1_MEM_PWR_DIS_MASK 0x4000000
+#define DCFE_MEM_PWR_CTRL__LB1_MEM_PWR_DIS__SHIFT 0x1a
+#define DCFE_MEM_PWR_CTRL__LB2_MEM_PWR_FORCE_MASK 0x18000000
+#define DCFE_MEM_PWR_CTRL__LB2_MEM_PWR_FORCE__SHIFT 0x1b
+#define DCFE_MEM_PWR_CTRL__LB2_MEM_PWR_DIS_MASK 0x20000000
+#define DCFE_MEM_PWR_CTRL__LB2_MEM_PWR_DIS__SHIFT 0x1d
+#define DCFE_MEM_PWR_CTRL2__DCP_LUT_MEM_PWR_MODE_SEL_MASK 0x3
+#define DCFE_MEM_PWR_CTRL2__DCP_LUT_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DCFE_MEM_PWR_CTRL2__DCP_REGAMMA_MEM_PWR_MODE_SEL_MASK 0xc
+#define DCFE_MEM_PWR_CTRL2__DCP_REGAMMA_MEM_PWR_MODE_SEL__SHIFT 0x2
+#define DCFE_MEM_PWR_CTRL2__SCL_COEFF_MEM_PWR_MODE_SEL_MASK 0x30
+#define DCFE_MEM_PWR_CTRL2__SCL_COEFF_MEM_PWR_MODE_SEL__SHIFT 0x4
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR_MEM_PWR_MODE_SEL_MASK 0xc0
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR_MEM_PWR_MODE_SEL__SHIFT 0x6
+#define DCFE_MEM_PWR_CTRL2__LB_ALPHA_MEM_PWR_MODE_SEL_MASK 0x300
+#define DCFE_MEM_PWR_CTRL2__LB_ALPHA_MEM_PWR_MODE_SEL__SHIFT 0x8
+#define DCFE_MEM_PWR_CTRL2__LB_MEM_PWR_MODE_SEL_MASK 0xc00
+#define DCFE_MEM_PWR_CTRL2__LB_MEM_PWR_MODE_SEL__SHIFT 0xa
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_MODE_SEL_MASK 0x3000
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_MODE_SEL__SHIFT 0xc
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_MODE_SEL_MASK 0xc000
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_MODE_SEL__SHIFT 0xe
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_FORCE_MASK 0x30000
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_FORCE__SHIFT 0x10
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_DIS_MASK 0x40000
+#define DCFE_MEM_PWR_CTRL2__BLND_MEM_PWR_DIS__SHIFT 0x12
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_FORCE_MASK 0x600000
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_FORCE__SHIFT 0x15
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_DIS_MASK 0x800000
+#define DCFE_MEM_PWR_CTRL2__DCP_CURSOR2_MEM_PWR_DIS__SHIFT 0x17
+#define DCFE_MEM_PWR_STATUS__DCP_LUT_MEM_PWR_STATE_MASK 0x3
+#define DCFE_MEM_PWR_STATUS__DCP_LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DCFE_MEM_PWR_STATUS__DCP_REGAMMA_MEM_PWR_STATE_MASK 0xc
+#define DCFE_MEM_PWR_STATUS__DCP_REGAMMA_MEM_PWR_STATE__SHIFT 0x2
+#define DCFE_MEM_PWR_STATUS__SCL_COEFF_MEM_PWR_STATE_MASK 0x30
+#define DCFE_MEM_PWR_STATUS__SCL_COEFF_MEM_PWR_STATE__SHIFT 0x4
+#define DCFE_MEM_PWR_STATUS__DCP_CURSOR_MEM_PWR_STATE_MASK 0xc0
+#define DCFE_MEM_PWR_STATUS__DCP_CURSOR_MEM_PWR_STATE__SHIFT 0x6
+#define DCFE_MEM_PWR_STATUS__DCP_CURSOR2_MEM_PWR_STATE_MASK 0x300
+#define DCFE_MEM_PWR_STATUS__DCP_CURSOR2_MEM_PWR_STATE__SHIFT 0x8
+#define DCFE_MEM_PWR_STATUS__LB0_ALPHA_MEM_PWR_STATE_MASK 0xc00
+#define DCFE_MEM_PWR_STATUS__LB0_ALPHA_MEM_PWR_STATE__SHIFT 0xa
+#define DCFE_MEM_PWR_STATUS__LB1_ALPHA_MEM_PWR_STATE_MASK 0x3000
+#define DCFE_MEM_PWR_STATUS__LB1_ALPHA_MEM_PWR_STATE__SHIFT 0xc
+#define DCFE_MEM_PWR_STATUS__LB2_ALPHA_MEM_PWR_STATE_MASK 0xc000
+#define DCFE_MEM_PWR_STATUS__LB2_ALPHA_MEM_PWR_STATE__SHIFT 0xe
+#define DCFE_MEM_PWR_STATUS__LB0_MEM_PWR_STATE_MASK 0x30000
+#define DCFE_MEM_PWR_STATUS__LB0_MEM_PWR_STATE__SHIFT 0x10
+#define DCFE_MEM_PWR_STATUS__LB1_MEM_PWR_STATE_MASK 0xc0000
+#define DCFE_MEM_PWR_STATUS__LB1_MEM_PWR_STATE__SHIFT 0x12
+#define DCFE_MEM_PWR_STATUS__LB2_MEM_PWR_STATE_MASK 0x300000
+#define DCFE_MEM_PWR_STATUS__LB2_MEM_PWR_STATE__SHIFT 0x14
+#define DCFE_MEM_PWR_STATUS__BLND_MEM_PWR_STATE_MASK 0xc00000
+#define DCFE_MEM_PWR_STATUS__BLND_MEM_PWR_STATE__SHIFT 0x16
+#define DCFE_MISC__DCFE_DPG_ALLOW_SR_ECO_EN_MASK 0x1
+#define DCFE_MISC__DCFE_DPG_ALLOW_SR_ECO_EN__SHIFT 0x0
+#define DCFE_FLUSH__FLUSH_OCCURED_MASK 0x1
+#define DCFE_FLUSH__FLUSH_OCCURED__SHIFT 0x0
+#define DCFE_FLUSH__CLEAR_FLUSH_OCCURED_MASK 0x2
+#define DCFE_FLUSH__CLEAR_FLUSH_OCCURED__SHIFT 0x1
+#define DCFE_FLUSH__FLUSH_DEEP_MASK 0x4
+#define DCFE_FLUSH__FLUSH_DEEP__SHIFT 0x2
+#define DCFE_FLUSH__CLEAR_FLUSH_DEEP_MASK 0x8
+#define DCFE_FLUSH__CLEAR_FLUSH_DEEP__SHIFT 0x3
+#define DCFE_FLUSH__ALL_MC_REQ_RET_MASK 0x10
+#define DCFE_FLUSH__ALL_MC_REQ_RET__SHIFT 0x4
+#define DCFEV_CLOCK_CONTROL__DISPCLK_R_DCFEV_GATE_DISABLE_MASK 0x8
+#define DCFEV_CLOCK_CONTROL__DISPCLK_R_DCFEV_GATE_DISABLE__SHIFT 0x3
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_UNP_GATE_DISABLE_MASK 0x80
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_UNP_GATE_DISABLE__SHIFT 0x7
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_SCLV_GATE_DISABLE_MASK 0x200
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_SCLV_GATE_DISABLE__SHIFT 0x9
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_COL_MAN_GATE_DISABLE_MASK 0x800
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_COL_MAN_GATE_DISABLE__SHIFT 0xb
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_PSCLV_GATE_DISABLE_MASK 0x2000
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_PSCLV_GATE_DISABLE__SHIFT 0xd
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_CRTC_GATE_DISABLE_MASK 0x8000
+#define DCFEV_CLOCK_CONTROL__DISPCLK_G_CRTC_GATE_DISABLE__SHIFT 0xf
+#define DCFEV_CLOCK_CONTROL__DCFEV_TEST_CLK_SEL_MASK 0x1f000000
+#define DCFEV_CLOCK_CONTROL__DCFEV_TEST_CLK_SEL__SHIFT 0x18
+#define DCFEV_CLOCK_CONTROL__DCFEV_CLOCK_ENABLE_MASK 0x80000000
+#define DCFEV_CLOCK_CONTROL__DCFEV_CLOCK_ENABLE__SHIFT 0x1f
+#define DCFEV_SOFT_RESET__UNP_PIXPIPE_SOFT_RESET_MASK 0x1
+#define DCFEV_SOFT_RESET__UNP_PIXPIPE_SOFT_RESET__SHIFT 0x0
+#define DCFEV_SOFT_RESET__UNP_REQ_SOFT_RESET_MASK 0x2
+#define DCFEV_SOFT_RESET__UNP_REQ_SOFT_RESET__SHIFT 0x1
+#define DCFEV_SOFT_RESET__SCLV_ALU_SOFT_RESET_MASK 0x4
+#define DCFEV_SOFT_RESET__SCLV_ALU_SOFT_RESET__SHIFT 0x2
+#define DCFEV_SOFT_RESET__SCLV_SOFT_RESET_MASK 0x8
+#define DCFEV_SOFT_RESET__SCLV_SOFT_RESET__SHIFT 0x3
+#define DCFEV_SOFT_RESET__CRTC_SOFT_RESET_MASK 0x10
+#define DCFEV_SOFT_RESET__CRTC_SOFT_RESET__SHIFT 0x4
+#define DCFEV_SOFT_RESET__PSCLV_SOFT_RESET_MASK 0x20
+#define DCFEV_SOFT_RESET__PSCLV_SOFT_RESET__SHIFT 0x5
+#define DCFEV_SOFT_RESET__COL_MAN_SOFT_RESET_MASK 0x40
+#define DCFEV_SOFT_RESET__COL_MAN_SOFT_RESET__SHIFT 0x6
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_SCLK_G_DMIFTRK_GATE_DIS_MASK 0x8
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_SCLK_G_DMIFTRK_GATE_DIS__SHIFT 0x3
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_DISPCLK_G_DMIFVL_GATE_DIS_MASK 0x10
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_DISPCLK_G_DMIFVL_GATE_DIS__SHIFT 0x4
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_DISPCLK_G_DMIFVC_GATE_DIS_MASK 0x20
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_DISPCLK_G_DMIFVC_GATE_DIS__SHIFT 0x5
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_SOFT_RESET_MASK 0x40
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_SOFT_RESET__SHIFT 0x6
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_TEST_CLK_SEL_MASK 0x1f000000
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_TEST_CLK_SEL__SHIFT 0x18
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_BUFFER_MODE_MASK 0x80000000
+#define DCFEV_DMIFV_CLOCK_CONTROL__DMIFV_BUFFER_MODE__SHIFT 0x1f
+#define DCFEV_DBG_CONFIG__DCFEV_DBG_EN_MASK 0x1
+#define DCFEV_DBG_CONFIG__DCFEV_DBG_EN__SHIFT 0x0
+#define DCFEV_DBG_CONFIG__DCFEV_DBG_SEL_MASK 0xf0
+#define DCFEV_DBG_CONFIG__DCFEV_DBG_SEL__SHIFT 0x4
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_SEL_MASK 0x3
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_SEL__SHIFT 0x0
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_0_FORCE_MASK 0x4
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_0_FORCE__SHIFT 0x2
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_1_FORCE_MASK 0x8
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_1_FORCE__SHIFT 0x3
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_2_FORCE_MASK 0x10
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_2_FORCE__SHIFT 0x4
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_3_FORCE_MASK 0x20
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_3_FORCE__SHIFT 0x5
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_4_FORCE_MASK 0x40
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_LUMA_4_FORCE__SHIFT 0x6
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_0_FORCE_MASK 0x80
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_0_FORCE__SHIFT 0x7
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_1_FORCE_MASK 0x100
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_1_FORCE__SHIFT 0x8
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_2_FORCE_MASK 0x200
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_2_FORCE__SHIFT 0x9
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_3_FORCE_MASK 0x400
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_3_FORCE__SHIFT 0xa
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_4_FORCE_MASK 0x800
+#define DCFEV_DMIFV_MEM_PWR_CTRL__DMIFV_MEM_PWR_CHROMA_4_FORCE__SHIFT 0xb
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_0_STATE_MASK 0x3
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_0_STATE__SHIFT 0x0
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_1_STATE_MASK 0xc
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_1_STATE__SHIFT 0x2
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_2_STATE_MASK 0x30
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_2_STATE__SHIFT 0x4
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_3_STATE_MASK 0xc0
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_3_STATE__SHIFT 0x6
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_4_STATE_MASK 0x300
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_LUMA_4_STATE__SHIFT 0x8
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_0_STATE_MASK 0xc00
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_0_STATE__SHIFT 0xa
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_1_STATE_MASK 0x3000
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_1_STATE__SHIFT 0xc
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_2_STATE_MASK 0xc000
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_2_STATE__SHIFT 0xe
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_3_STATE_MASK 0x30000
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_3_STATE__SHIFT 0x10
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_4_STATE_MASK 0xc0000
+#define DCFEV_DMIFV_MEM_PWR_STATUS__DMIFV_MEM_PWR_CHROMA_4_STATE__SHIFT 0x12
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_GAMMA_CORR_MEM_PWR_FORCE_MASK 0x3
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_GAMMA_CORR_MEM_PWR_FORCE__SHIFT 0x0
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_GAMMA_CORR_MEM_PWR_DIS_MASK 0x4
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_GAMMA_CORR_MEM_PWR_DIS__SHIFT 0x2
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE_MASK 0x18
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE__SHIFT 0x3
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_INPUT_GAMMA_MEM_PWR_DIS_MASK 0x20
+#define DCFEV_MEM_PWR_CTRL__COL_MAN_INPUT_GAMMA_MEM_PWR_DIS__SHIFT 0x5
+#define DCFEV_MEM_PWR_CTRL__SCLV_COEFF_MEM_PWR_FORCE_MASK 0xc0
+#define DCFEV_MEM_PWR_CTRL__SCLV_COEFF_MEM_PWR_FORCE__SHIFT 0x6
+#define DCFEV_MEM_PWR_CTRL__SCLV_COEFF_MEM_PWR_DIS_MASK 0x100
+#define DCFEV_MEM_PWR_CTRL__SCLV_COEFF_MEM_PWR_DIS__SHIFT 0x8
+#define DCFEV_MEM_PWR_CTRL__LBV0_MEM_PWR_FORCE_MASK 0x600
+#define DCFEV_MEM_PWR_CTRL__LBV0_MEM_PWR_FORCE__SHIFT 0x9
+#define DCFEV_MEM_PWR_CTRL__LBV0_MEM_PWR_DIS_MASK 0x800
+#define DCFEV_MEM_PWR_CTRL__LBV0_MEM_PWR_DIS__SHIFT 0xb
+#define DCFEV_MEM_PWR_CTRL__LBV1_MEM_PWR_FORCE_MASK 0x3000
+#define DCFEV_MEM_PWR_CTRL__LBV1_MEM_PWR_FORCE__SHIFT 0xc
+#define DCFEV_MEM_PWR_CTRL__LBV1_MEM_PWR_DIS_MASK 0x4000
+#define DCFEV_MEM_PWR_CTRL__LBV1_MEM_PWR_DIS__SHIFT 0xe
+#define DCFEV_MEM_PWR_CTRL__LBV2_MEM_PWR_FORCE_MASK 0x18000
+#define DCFEV_MEM_PWR_CTRL__LBV2_MEM_PWR_FORCE__SHIFT 0xf
+#define DCFEV_MEM_PWR_CTRL__LBV2_MEM_PWR_DIS_MASK 0x20000
+#define DCFEV_MEM_PWR_CTRL__LBV2_MEM_PWR_DIS__SHIFT 0x11
+#define DCFEV_MEM_PWR_CTRL2__COL_MAN_GAMMA_CORR_MEM_PWR_MODE_SEL_MASK 0x3
+#define DCFEV_MEM_PWR_CTRL2__COL_MAN_GAMMA_CORR_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DCFEV_MEM_PWR_CTRL2__COL_MAN_INPUT_GAMMA_MEM_PWR_MODE_SEL_MASK 0xc
+#define DCFEV_MEM_PWR_CTRL2__COL_MAN_INPUT_GAMMA_MEM_PWR_MODE_SEL__SHIFT 0x2
+#define DCFEV_MEM_PWR_CTRL2__SCLV_COEFF_MEM_PWR_MODE_SEL_MASK 0x30
+#define DCFEV_MEM_PWR_CTRL2__SCLV_COEFF_MEM_PWR_MODE_SEL__SHIFT 0x4
+#define DCFEV_MEM_PWR_CTRL2__LBV_MEM_PWR_MODE_SEL_MASK 0xc0
+#define DCFEV_MEM_PWR_CTRL2__LBV_MEM_PWR_MODE_SEL__SHIFT 0x6
+#define DCFEV_MEM_PWR_STATUS__COL_MAN_GAMMA_CORR_MEM_PWR_STATE_MASK 0x3
+#define DCFEV_MEM_PWR_STATUS__COL_MAN_GAMMA_CORR_MEM_PWR_STATE__SHIFT 0x0
+#define DCFEV_MEM_PWR_STATUS__COL_MAN_INPUT_GAMMA_MEM_PWR_STATE_MASK 0xc
+#define DCFEV_MEM_PWR_STATUS__COL_MAN_INPUT_GAMMA_MEM_PWR_STATE__SHIFT 0x2
+#define DCFEV_MEM_PWR_STATUS__SCLV_COEFF_MEM_PWR_STATE_MASK 0x30
+#define DCFEV_MEM_PWR_STATUS__SCLV_COEFF_MEM_PWR_STATE__SHIFT 0x4
+#define DCFEV_MEM_PWR_STATUS__LBV0_MEM_PWR_STATE_MASK 0xc0
+#define DCFEV_MEM_PWR_STATUS__LBV0_MEM_PWR_STATE__SHIFT 0x6
+#define DCFEV_MEM_PWR_STATUS__LBV1_MEM_PWR_STATE_MASK 0x300
+#define DCFEV_MEM_PWR_STATUS__LBV1_MEM_PWR_STATE__SHIFT 0x8
+#define DCFEV_MEM_PWR_STATUS__LBV2_MEM_PWR_STATE_MASK 0xc00
+#define DCFEV_MEM_PWR_STATUS__LBV2_MEM_PWR_STATE__SHIFT 0xa
+#define DCFEV_MEM_PWR_STATUS__LBV3_MEM_PWR_STATE_MASK 0x3000
+#define DCFEV_MEM_PWR_STATUS__LBV3_MEM_PWR_STATE__SHIFT 0xc
+#define DCFEV_L_FLUSH__FLUSH_OCCURED_MASK 0x1
+#define DCFEV_L_FLUSH__FLUSH_OCCURED__SHIFT 0x0
+#define DCFEV_L_FLUSH__CLEAR_FLUSH_OCCURED_MASK 0x2
+#define DCFEV_L_FLUSH__CLEAR_FLUSH_OCCURED__SHIFT 0x1
+#define DCFEV_L_FLUSH__FLUSH_DEEP_MASK 0x4
+#define DCFEV_L_FLUSH__FLUSH_DEEP__SHIFT 0x2
+#define DCFEV_L_FLUSH__CLEAR_FLUSH_DEEP_MASK 0x8
+#define DCFEV_L_FLUSH__CLEAR_FLUSH_DEEP__SHIFT 0x3
+#define DCFEV_L_FLUSH__ALL_MC_REQ_RET_MASK 0x10
+#define DCFEV_L_FLUSH__ALL_MC_REQ_RET__SHIFT 0x4
+#define DCFEV_C_FLUSH__FLUSH_OCCURED_MASK 0x1
+#define DCFEV_C_FLUSH__FLUSH_OCCURED__SHIFT 0x0
+#define DCFEV_C_FLUSH__CLEAR_FLUSH_OCCURED_MASK 0x2
+#define DCFEV_C_FLUSH__CLEAR_FLUSH_OCCURED__SHIFT 0x1
+#define DCFEV_C_FLUSH__FLUSH_DEEP_MASK 0x4
+#define DCFEV_C_FLUSH__FLUSH_DEEP__SHIFT 0x2
+#define DCFEV_C_FLUSH__CLEAR_FLUSH_DEEP_MASK 0x8
+#define DCFEV_C_FLUSH__CLEAR_FLUSH_DEEP__SHIFT 0x3
+#define DCFEV_C_FLUSH__ALL_MC_REQ_RET_MASK 0x10
+#define DCFEV_C_FLUSH__ALL_MC_REQ_RET__SHIFT 0x4
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_BUS_SEL_MASK 0xf
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_BUS_SEL__SHIFT 0x0
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_LUMA_VS_CHROMA_MASK 0x10
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_LUMA_VS_CHROMA__SHIFT 0x4
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_LOWER_UPPER_MASK 0x20
+#define DCFEV_DMIFV_DEBUG__DMIFV_DEBUG_LOWER_UPPER__SHIFT 0x5
+#define DCFEV_MISC__DCFEV_DPG_ALLOW_SR_ECO_EN_MASK 0x1
+#define DCFEV_MISC__DCFEV_DPG_ALLOW_SR_ECO_EN__SHIFT 0x0
+#define DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x1
+#define DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x2
+#define DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x10
+#define DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x100
+#define DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0xff000
+#define DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xff000000
+#define DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x1
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x100
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x10000
+#define DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x100000
+#define DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x1000000
+#define DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x1fff
+#define DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x3ff0000
+#define DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000
+#define DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0xff
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0xff000
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x1000000
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000
+#define DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0xff
+#define DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0xff00000
+#define DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define DCO_SCRATCH0__DCO_SCRATCH0_MASK 0xffffffff
+#define DCO_SCRATCH0__DCO_SCRATCH0__SHIFT 0x0
+#define DCO_SCRATCH1__DCO_SCRATCH1_MASK 0xffffffff
+#define DCO_SCRATCH1__DCO_SCRATCH1__SHIFT 0x0
+#define DCO_SCRATCH2__DCO_SCRATCH2_MASK 0xffffffff
+#define DCO_SCRATCH2__DCO_SCRATCH2__SHIFT 0x0
+#define DCO_SCRATCH3__DCO_SCRATCH3_MASK 0xffffffff
+#define DCO_SCRATCH3__DCO_SCRATCH3__SHIFT 0x0
+#define DCO_SCRATCH4__DCO_SCRATCH4_MASK 0xffffffff
+#define DCO_SCRATCH4__DCO_SCRATCH4__SHIFT 0x0
+#define DCO_SCRATCH5__DCO_SCRATCH5_MASK 0xffffffff
+#define DCO_SCRATCH5__DCO_SCRATCH5__SHIFT 0x0
+#define DCO_SCRATCH6__DCO_SCRATCH6_MASK 0xffffffff
+#define DCO_SCRATCH6__DCO_SCRATCH6__SHIFT 0x0
+#define DCO_SCRATCH7__DCO_SCRATCH7_MASK 0xffffffff
+#define DCO_SCRATCH7__DCO_SCRATCH7__SHIFT 0x0
+#define DCE_VCE_CONTROL__DC_VCE_VIDEO_PIPE_SELECT_MASK 0x7
+#define DCE_VCE_CONTROL__DC_VCE_VIDEO_PIPE_SELECT__SHIFT 0x0
+#define DCE_VCE_CONTROL__DC_VCE_AUDIO_STREAM_SELECT_MASK 0x70
+#define DCE_VCE_CONTROL__DC_VCE_AUDIO_STREAM_SELECT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__SCL_DISP1_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS__SCL_DISP1_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS__D1BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS__D1BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS__CRTC1_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS__CRTC1_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS__CRTC1_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS__CRTC1_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS__CRTC1_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS__CRTC1_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS__CRTC1_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS__CRTC1_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS__DIGA_DISPCLK_SWITCH_ALLOWED_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS__DIGA_DISPCLK_SWITCH_ALLOWED_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS__DACB_AUTODETECT_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS__DACB_AUTODETECT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS__DC_I2C_HW_DONE_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS__DC_I2C_HW_DONE_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE__SCL_DISP2_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE__SCL_DISP2_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE__D2BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE__D2BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC2_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DIGB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE__AUX2_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D1_VLINE2_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D1_VLINE2_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE2_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE2_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D3_VLINE2_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE__LB_D3_VLINE2_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__CRTC1_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE__DISP_INTERRUPT_STATUS_CONTINUE2__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE2__SCL_DISP3_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE2__SCL_DISP3_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE2__D3BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE2__D3BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC3_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DIGC_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__AUX3_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D4_VLINE2_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D4_VLINE2_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D5_VLINE2_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D5_VLINE2_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D6_VLINE2_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__LB_D6_VLINE2_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__CRTC2_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE2__DISP_INTERRUPT_STATUS_CONTINUE3__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE3__SCL_DISP4_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE3__SCL_DISP4_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE3__D4BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE3__D4BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC4_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DIGD_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__AUX4_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE3__BUFMGR_IHIF_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__BUFMGR_IHIF_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL_HOST_CONFLICT_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL_HOST_CONFLICT_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL_DATA_OVERFLOW_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__WBSCL_DATA_OVERFLOW_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__CRTC3_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE3__DISP_INTERRUPT_STATUS_CONTINUE4__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE4__SCL_DISP5_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE4__SCL_DISP5_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE4__D5BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE4__D5BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DIGE_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__AUX5_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC5_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__CRTC4_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE4__DISP_INTERRUPT_STATUS_CONTINUE5__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE5__SCL_DISP6_MODE_CHANGE_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE5__SCL_DISP6_MODE_CHANGE_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE5__D6BLND_DATA_UNDERFLOW_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE5__D6BLND_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SNAPSHOT_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_COUNT_NOW_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGA_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGB_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VSYNC_NOM_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DIGF_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__AUX6_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_LOSS_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_LOSS_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_SIGNAL_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_EXT_TIMING_SYNC_SIGNAL_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT0_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT0__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT1_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT1__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT2_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC5_VERTICAL_INTERRUPT2__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT0_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT0__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT1_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT1__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT2_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__CRTC6_VERTICAL_INTERRUPT2__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE5__DISP_INTERRUPT_STATUS_CONTINUE6__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER0_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER1_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER2_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER3_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER4_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER5_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER6_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER7_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DCRX_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE6__BUFMGR_CWB0_IHIF_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE6__BUFMGR_CWB0_IHIF_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE6__BUFMGR_CWB1_IHIF_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE6__BUFMGR_CWB1_IHIF_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DIGG_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DIGG_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX1_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX2_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX3_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX4_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX5_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_LOCK_DONE_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__AUX6_GTC_SYNC_ERROR_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE6__DISP_INTERRUPT_STATUS_CONTINUE7__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER2_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER3_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER4_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER5_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER6_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER7_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCCG_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER0_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER1_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER2_INTERRUPT_MASK 0x800
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER3_INTERRUPT_MASK 0x1000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER3_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER4_INTERRUPT_MASK 0x2000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER4_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER5_INTERRUPT_MASK 0x4000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER5_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER6_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER6_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER7_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCI_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER0_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER1_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER2_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER3_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER4_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER5_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER6_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER7_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DCO_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER0_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER1_INTERRUPT_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER2_INTERRUPT_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER3_INTERRUPT_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__WB_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE7__DISP_INTERRUPT_STATUS_CONTINUE8__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER0_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER1_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER2_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER3_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER4_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER5_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER6_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER7_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE0_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER0_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER1_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER2_INTERRUPT_MASK 0x800
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER3_INTERRUPT_MASK 0x1000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER3_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER4_INTERRUPT_MASK 0x2000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER4_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER5_INTERRUPT_MASK 0x4000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER5_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER6_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER6_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER7_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE1_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER0_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER1_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER2_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER3_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER4_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER5_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER6_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER7_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DCFE2_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER4_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER5_INTERRUPT_MASK 0x10000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER6_INTERRUPT_MASK 0x20000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER7_INTERRUPT_MASK 0x40000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__WB_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE8__DISP_INTERRUPT_STATUS_CONTINUE9__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER0_INTERRUPT_MASK 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x0
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER1_INTERRUPT_MASK 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER2_INTERRUPT_MASK 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x2
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER3_INTERRUPT_MASK 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x3
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER4_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER5_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER6_INTERRUPT_MASK 0x40
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER7_INTERRUPT_MASK 0x80
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x100
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE3_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER0_INTERRUPT_MASK 0x200
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER1_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER1_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER2_INTERRUPT_MASK 0x800
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER2_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER3_INTERRUPT_MASK 0x1000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER3_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER4_INTERRUPT_MASK 0x2000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER4_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER5_INTERRUPT_MASK 0x4000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER5_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER6_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER6_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER7_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE4_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER0_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER0_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER1_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER1_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER2_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER2_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER3_INTERRUPT_MASK 0x200000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER3_INTERRUPT__SHIFT 0x15
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER4_INTERRUPT_MASK 0x400000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER4_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER5_INTERRUPT_MASK 0x800000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER5_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER6_INTERRUPT_MASK 0x1000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER6_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER7_INTERRUPT_MASK 0x2000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER7_INTERRUPT__SHIFT 0x19
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x4000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DCFE5_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WB_PERFMON_COUNTER_OFF_INTERRUPT_MASK 0x8000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__WB_PERFMON_COUNTER_OFF_INTERRUPT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10_MASK 0x80000000
+#define DISP_INTERRUPT_STATUS_CONTINUE9__DISP_INTERRUPT_STATUS_CONTINUE10__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x20
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPB_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x400
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPB_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPB_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x800
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DIGLPB_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0xb
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT_MASK 0x1000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER0_INTERRUPT__SHIFT 0xc
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT_MASK 0x2000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER1_INTERRUPT__SHIFT 0xd
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER2_INTERRUPT_MASK 0x4000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER2_INTERRUPT__SHIFT 0xe
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER3_INTERRUPT_MASK 0x8000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER3_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER4_INTERRUPT_MASK 0x10000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER4_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER5_INTERRUPT_MASK 0x20000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER5_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER6_INTERRUPT_MASK 0x40000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER6_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER7_INTERRUPT_MASK 0x80000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER7_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER_OFF_INTERRUPT_MASK 0x100000
+#define DISP_INTERRUPT_STATUS_CONTINUE10__DCCG_PERFMON2_COUNTER_OFF_INTERRUPT__SHIFT 0x14
+#define DCO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE_MASK 0x1
+#define DCO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE__SHIFT 0x0
+#define DCO_MEM_PWR_STATUS__MVP_MEM_PWR_STATE_MASK 0x4
+#define DCO_MEM_PWR_STATUS__MVP_MEM_PWR_STATE__SHIFT 0x2
+#define DCO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE_MASK 0x8
+#define DCO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE__SHIFT 0x3
+#define DCO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE_MASK 0x10
+#define DCO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE__SHIFT 0x4
+#define DCO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE_MASK 0x20
+#define DCO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE__SHIFT 0x5
+#define DCO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE_MASK 0x40
+#define DCO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE__SHIFT 0x6
+#define DCO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE_MASK 0x80
+#define DCO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE__SHIFT 0x7
+#define DCO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE_MASK 0x100
+#define DCO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE__SHIFT 0x8
+#define DCO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE_MASK 0x200
+#define DCO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE__SHIFT 0x9
+#define DCO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE_MASK 0xc00
+#define DCO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE__SHIFT 0xa
+#define DCO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE_MASK 0x3000
+#define DCO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE__SHIFT 0xc
+#define DCO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE_MASK 0xc000
+#define DCO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE__SHIFT 0xe
+#define DCO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE_MASK 0x30000
+#define DCO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE__SHIFT 0x10
+#define DCO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE_MASK 0xc0000
+#define DCO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE__SHIFT 0x12
+#define DCO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE_MASK 0x300000
+#define DCO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE__SHIFT 0x14
+#define DCO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE_MASK 0xc00000
+#define DCO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE__SHIFT 0x16
+#define DCO_MEM_PWR_STATUS1__DPLPA_MEM_PWR_STATE_MASK 0x1
+#define DCO_MEM_PWR_STATUS1__DPLPA_MEM_PWR_STATE__SHIFT 0x0
+#define DCO_MEM_PWR_STATUS1__DPLPB_MEM_PWR_STATE_MASK 0x2
+#define DCO_MEM_PWR_STATUS1__DPLPB_MEM_PWR_STATE__SHIFT 0x1
+#define DCO_MEM_PWR_STATUS1__HDMILP0_MEM_PWR_STATE_MASK 0xc00
+#define DCO_MEM_PWR_STATUS1__HDMILP0_MEM_PWR_STATE__SHIFT 0xa
+#define DCO_MEM_PWR_STATUS1__HDMILP1_MEM_PWR_STATE_MASK 0x3000
+#define DCO_MEM_PWR_STATUS1__HDMILP1_MEM_PWR_STATE__SHIFT 0xc
+#define DCO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE_MASK 0x1
+#define DCO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x0
+#define DCO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS_MASK 0x2
+#define DCO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS__SHIFT 0x1
+#define DCO_MEM_PWR_CTRL__MVP_LIGHT_SLEEP_DIS_MASK 0x8
+#define DCO_MEM_PWR_CTRL__MVP_LIGHT_SLEEP_DIS__SHIFT 0x3
+#define DCO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS_MASK 0x10
+#define DCO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DCO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS_MASK 0x20
+#define DCO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS__SHIFT 0x5
+#define DCO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS_MASK 0x40
+#define DCO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DCO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS_MASK 0x80
+#define DCO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS__SHIFT 0x7
+#define DCO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS_MASK 0x100
+#define DCO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DCO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS_MASK 0x200
+#define DCO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS__SHIFT 0x9
+#define DCO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS_MASK 0x400
+#define DCO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DCO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE_MASK 0x1800
+#define DCO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE__SHIFT 0xb
+#define DCO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS_MASK 0x2000
+#define DCO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS__SHIFT 0xd
+#define DCO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE_MASK 0xc000
+#define DCO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE__SHIFT 0xe
+#define DCO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS_MASK 0x10000
+#define DCO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS__SHIFT 0x10
+#define DCO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE_MASK 0x60000
+#define DCO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE__SHIFT 0x11
+#define DCO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS_MASK 0x80000
+#define DCO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS__SHIFT 0x13
+#define DCO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE_MASK 0x300000
+#define DCO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE__SHIFT 0x14
+#define DCO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS_MASK 0x400000
+#define DCO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS__SHIFT 0x16
+#define DCO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE_MASK 0x1800000
+#define DCO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE__SHIFT 0x17
+#define DCO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS_MASK 0x2000000
+#define DCO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS__SHIFT 0x19
+#define DCO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE_MASK 0xc000000
+#define DCO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE__SHIFT 0x1a
+#define DCO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS_MASK 0x10000000
+#define DCO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS__SHIFT 0x1c
+#define DCO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE_MASK 0x60000000
+#define DCO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE__SHIFT 0x1d
+#define DCO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS_MASK 0x80000000
+#define DCO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS__SHIFT 0x1f
+#define DCO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL_MASK 0x3
+#define DCO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DCO_MEM_PWR_CTRL2__DPLPA_LIGHT_SLEEP_DIS_MASK 0x4
+#define DCO_MEM_PWR_CTRL2__DPLPA_LIGHT_SLEEP_DIS__SHIFT 0x2
+#define DCO_MEM_PWR_CTRL2__DPLPB_LIGHT_SLEEP_DIS_MASK 0x8
+#define DCO_MEM_PWR_CTRL2__DPLPB_LIGHT_SLEEP_DIS__SHIFT 0x3
+#define DCO_MEM_PWR_CTRL2__HDMILP0_MEM_PWR_FORCE_MASK 0x30000
+#define DCO_MEM_PWR_CTRL2__HDMILP0_MEM_PWR_FORCE__SHIFT 0x10
+#define DCO_MEM_PWR_CTRL2__HDMILP0_MEM_PWR_DIS_MASK 0x40000
+#define DCO_MEM_PWR_CTRL2__HDMILP0_MEM_PWR_DIS__SHIFT 0x12
+#define DCO_MEM_PWR_CTRL2__HDMILP1_MEM_PWR_FORCE_MASK 0x180000
+#define DCO_MEM_PWR_CTRL2__HDMILP1_MEM_PWR_FORCE__SHIFT 0x13
+#define DCO_MEM_PWR_CTRL2__HDMILP1_MEM_PWR_DIS_MASK 0x200000
+#define DCO_MEM_PWR_CTRL2__HDMILP1_MEM_PWR_DIS__SHIFT 0x15
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_DIS_MASK 0x100
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY0_CONTROL__FMT420_MEM0_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_DIS_MASK 0x100
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY1_CONTROL__FMT420_MEM1_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_DIS_MASK 0x100
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY2_CONTROL__FMT420_MEM2_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_DIS_MASK 0x100
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY3_CONTROL__FMT420_MEM3_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_DIS_MASK 0x100
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY4_CONTROL__FMT420_MEM4_PWR_STATE__SHIFT 0xc
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_SOURCE_SEL_MASK 0x7
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_SOURCE_SEL__SHIFT 0x0
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_FORCE_MASK 0x30
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_FORCE__SHIFT 0x4
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_DIS_MASK 0x100
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_DIS__SHIFT 0x8
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_STATE_MASK 0x3000
+#define FMT_MEMORY5_CONTROL__FMT420_MEM5_PWR_STATE__SHIFT 0xc
+#define DCO_CLK_CNTL__DISPCLK_R_DCO_GATE_DIS_MASK 0x20
+#define DCO_CLK_CNTL__DISPCLK_R_DCO_GATE_DIS__SHIFT 0x5
+#define DCO_CLK_CNTL__DISPCLK_G_ABM_GATE_DIS_MASK 0x40
+#define DCO_CLK_CNTL__DISPCLK_G_ABM_GATE_DIS__SHIFT 0x6
+#define DCO_CLK_CNTL__DISPCLK_G_DVO_GATE_DIS_MASK 0x80
+#define DCO_CLK_CNTL__DISPCLK_G_DVO_GATE_DIS__SHIFT 0x7
+#define DCO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS_MASK 0x100
+#define DCO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS__SHIFT 0x8
+#define DCO_CLK_CNTL__DISPCLK_G_DACB_GATE_DIS_MASK 0x200
+#define DCO_CLK_CNTL__DISPCLK_G_DACB_GATE_DIS__SHIFT 0x9
+#define DCO_CLK_CNTL__REFCLK_R_DCO_GATE_DIS_MASK 0x400
+#define DCO_CLK_CNTL__REFCLK_R_DCO_GATE_DIS__SHIFT 0xa
+#define DCO_CLK_CNTL__DISPCLK_G_FMT0_GATE_DIS_MASK 0x10000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT0_GATE_DIS__SHIFT 0x10
+#define DCO_CLK_CNTL__DISPCLK_G_FMT1_GATE_DIS_MASK 0x20000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT1_GATE_DIS__SHIFT 0x11
+#define DCO_CLK_CNTL__DISPCLK_G_FMT2_GATE_DIS_MASK 0x40000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT2_GATE_DIS__SHIFT 0x12
+#define DCO_CLK_CNTL__DISPCLK_G_FMT3_GATE_DIS_MASK 0x80000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT3_GATE_DIS__SHIFT 0x13
+#define DCO_CLK_CNTL__DISPCLK_G_FMT4_GATE_DIS_MASK 0x100000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT4_GATE_DIS__SHIFT 0x14
+#define DCO_CLK_CNTL__DISPCLK_G_FMT5_GATE_DIS_MASK 0x200000
+#define DCO_CLK_CNTL__DISPCLK_G_FMT5_GATE_DIS__SHIFT 0x15
+#define DCO_CLK_CNTL__DISPCLK_G_DIGLPA_GATE_DIS_MASK 0x400000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGLPA_GATE_DIS__SHIFT 0x16
+#define DCO_CLK_CNTL__DISPCLK_G_DIGLPB_GATE_DIS_MASK 0x800000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGLPB_GATE_DIS__SHIFT 0x17
+#define DCO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS_MASK 0x1000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS__SHIFT 0x18
+#define DCO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS_MASK 0x2000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS__SHIFT 0x19
+#define DCO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS_MASK 0x4000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS__SHIFT 0x1a
+#define DCO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS_MASK 0x8000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS__SHIFT 0x1b
+#define DCO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS_MASK 0x10000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS__SHIFT 0x1c
+#define DCO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS_MASK 0x20000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS__SHIFT 0x1d
+#define DCO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS_MASK 0x40000000
+#define DCO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS__SHIFT 0x1e
+#define DCO_CLK_CNTL2__DCO_TEST_CLK_SEL_MASK 0x7f
+#define DCO_CLK_CNTL2__DCO_TEST_CLK_SEL__SHIFT 0x0
+#define DCO_CLK_CNTL2__SCLK_G_AFMTA_GATE_DIS_MASK 0x80
+#define DCO_CLK_CNTL2__SCLK_G_AFMTA_GATE_DIS__SHIFT 0x7
+#define DCO_CLK_CNTL2__SCLK_G_AFMTB_GATE_DIS_MASK 0x100
+#define DCO_CLK_CNTL2__SCLK_G_AFMTB_GATE_DIS__SHIFT 0x8
+#define DCO_CLK_CNTL2__SCLK_G_AFMTC_GATE_DIS_MASK 0x200
+#define DCO_CLK_CNTL2__SCLK_G_AFMTC_GATE_DIS__SHIFT 0x9
+#define DCO_CLK_CNTL2__SCLK_G_AFMTD_GATE_DIS_MASK 0x400
+#define DCO_CLK_CNTL2__SCLK_G_AFMTD_GATE_DIS__SHIFT 0xa
+#define DCO_CLK_CNTL2__SCLK_G_AFMTE_GATE_DIS_MASK 0x800
+#define DCO_CLK_CNTL2__SCLK_G_AFMTE_GATE_DIS__SHIFT 0xb
+#define DCO_CLK_CNTL2__SCLK_G_AFMTF_GATE_DIS_MASK 0x1000
+#define DCO_CLK_CNTL2__SCLK_G_AFMTF_GATE_DIS__SHIFT 0xc
+#define DCO_CLK_CNTL2__SCLK_G_AFMTG_GATE_DIS_MASK 0x2000
+#define DCO_CLK_CNTL2__SCLK_G_AFMTG_GATE_DIS__SHIFT 0xd
+#define DCO_CLK_CNTL2__SCLK_G_AFMTLPA_GATE_DIS_MASK 0x8000
+#define DCO_CLK_CNTL2__SCLK_G_AFMTLPA_GATE_DIS__SHIFT 0xf
+#define DCO_CLK_CNTL2__SCLK_G_AFMTLPB_GATE_DIS_MASK 0x10000
+#define DCO_CLK_CNTL2__SCLK_G_AFMTLPB_GATE_DIS__SHIFT 0x10
+#define DCO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS_MASK 0x20000
+#define DCO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS__SHIFT 0x11
+#define DCO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS_MASK 0x40000
+#define DCO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS__SHIFT 0x12
+#define DCO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS_MASK 0x80000
+#define DCO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS__SHIFT 0x13
+#define DCO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS_MASK 0x100000
+#define DCO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS__SHIFT 0x14
+#define DCO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS_MASK 0x200000
+#define DCO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS__SHIFT 0x15
+#define DCO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS_MASK 0x400000
+#define DCO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS__SHIFT 0x16
+#define DCO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS_MASK 0x800000
+#define DCO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS__SHIFT 0x17
+#define DCO_CLK_CNTL2__SYMCLKLPA_FE_G_AFMT_GATE_DIS_MASK 0x2000000
+#define DCO_CLK_CNTL2__SYMCLKLPA_FE_G_AFMT_GATE_DIS__SHIFT 0x19
+#define DCO_CLK_CNTL2__SYMCLKLPB_FE_G_AFMT_GATE_DIS_MASK 0x4000000
+#define DCO_CLK_CNTL2__SYMCLKLPB_FE_G_AFMT_GATE_DIS__SHIFT 0x1a
+#define DCO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS_MASK 0x1
+#define DCO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS__SHIFT 0x0
+#define DCO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS_MASK 0x2
+#define DCO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS__SHIFT 0x1
+#define DCO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS_MASK 0x4
+#define DCO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS__SHIFT 0x2
+#define DCO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS_MASK 0x8
+#define DCO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS__SHIFT 0x3
+#define DCO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS_MASK 0x10
+#define DCO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS__SHIFT 0x4
+#define DCO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS_MASK 0x20
+#define DCO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS__SHIFT 0x5
+#define DCO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS_MASK 0x40
+#define DCO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS__SHIFT 0x6
+#define DCO_CLK_CNTL3__SYMCLKLPA_FE_G_TMDS_GATE_DIS_MASK 0x100
+#define DCO_CLK_CNTL3__SYMCLKLPA_FE_G_TMDS_GATE_DIS__SHIFT 0x8
+#define DCO_CLK_CNTL3__SYMCLKLPB_FE_G_TMDS_GATE_DIS_MASK 0x200
+#define DCO_CLK_CNTL3__SYMCLKLPB_FE_G_TMDS_GATE_DIS__SHIFT 0x9
+#define DCO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS_MASK 0x400
+#define DCO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS__SHIFT 0xa
+#define DCO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS_MASK 0x800
+#define DCO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS__SHIFT 0xb
+#define DCO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS_MASK 0x1000
+#define DCO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS__SHIFT 0xc
+#define DCO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS_MASK 0x2000
+#define DCO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS__SHIFT 0xd
+#define DCO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS_MASK 0x4000
+#define DCO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS__SHIFT 0xe
+#define DCO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS_MASK 0x8000
+#define DCO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS__SHIFT 0xf
+#define DCO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS_MASK 0x10000
+#define DCO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS__SHIFT 0x10
+#define DCO_CLK_CNTL3__SYMCLKLPA_G_TMDS_GATE_DIS_MASK 0x40000
+#define DCO_CLK_CNTL3__SYMCLKLPA_G_TMDS_GATE_DIS__SHIFT 0x12
+#define DCO_CLK_CNTL3__SYMCLKLPB_G_TMDS_GATE_DIS_MASK 0x80000
+#define DCO_CLK_CNTL3__SYMCLKLPB_G_TMDS_GATE_DIS__SHIFT 0x13
+#define DPDBG_CNTL__DPDBG_ENABLE_MASK 0x1
+#define DPDBG_CNTL__DPDBG_ENABLE__SHIFT 0x0
+#define DPDBG_CNTL__DPDBG_INPUT_ENABLE_MASK 0x2
+#define DPDBG_CNTL__DPDBG_INPUT_ENABLE__SHIFT 0x1
+#define DPDBG_CNTL__DPDBG_SYMCLK_ON_MASK 0x10
+#define DPDBG_CNTL__DPDBG_SYMCLK_ON__SHIFT 0x4
+#define DPDBG_CNTL__DPDBG_ERROR_DETECTION_MODE_MASK 0x100
+#define DPDBG_CNTL__DPDBG_ERROR_DETECTION_MODE__SHIFT 0x8
+#define DPDBG_CNTL__DPDBG_LINE_LENGTH_MASK 0xffff0000
+#define DPDBG_CNTL__DPDBG_LINE_LENGTH__SHIFT 0x10
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_MASK_MASK 0x1
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_MASK__SHIFT 0x0
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_TYPE_MASK 0x2
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_TYPE__SHIFT 0x1
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_ACK_MASK 0x100
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_ACK__SHIFT 0x8
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_OCCURRED_MASK 0x10000
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_OCCURRED__SHIFT 0x10
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_STATUS_MASK 0x1000000
+#define DPDBG_INTERRUPT__DPDBG_FIFO_OVERFLOW_INT_STATUS__SHIFT 0x18
+#define DCO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x1
+#define DCO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x0
+#define DCO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x100
+#define DCO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x8
+#define DCO_SOFT_RESET__DACA_SOFT_RESET_MASK 0x1
+#define DCO_SOFT_RESET__DACA_SOFT_RESET__SHIFT 0x0
+#define DCO_SOFT_RESET__I2S0_SPDIF0_SOFT_RESET_MASK 0x10
+#define DCO_SOFT_RESET__I2S0_SPDIF0_SOFT_RESET__SHIFT 0x4
+#define DCO_SOFT_RESET__I2S1_SOFT_RESET_MASK 0x20
+#define DCO_SOFT_RESET__I2S1_SOFT_RESET__SHIFT 0x5
+#define DCO_SOFT_RESET__SPDIF1_SOFT_RESET_MASK 0x40
+#define DCO_SOFT_RESET__SPDIF1_SOFT_RESET__SHIFT 0x6
+#define DCO_SOFT_RESET__DB_CLK_SOFT_RESET_MASK 0x1000
+#define DCO_SOFT_RESET__DB_CLK_SOFT_RESET__SHIFT 0xc
+#define DCO_SOFT_RESET__FMT0_SOFT_RESET_MASK 0x10000
+#define DCO_SOFT_RESET__FMT0_SOFT_RESET__SHIFT 0x10
+#define DCO_SOFT_RESET__FMT1_SOFT_RESET_MASK 0x20000
+#define DCO_SOFT_RESET__FMT1_SOFT_RESET__SHIFT 0x11
+#define DCO_SOFT_RESET__FMT2_SOFT_RESET_MASK 0x40000
+#define DCO_SOFT_RESET__FMT2_SOFT_RESET__SHIFT 0x12
+#define DCO_SOFT_RESET__FMT3_SOFT_RESET_MASK 0x80000
+#define DCO_SOFT_RESET__FMT3_SOFT_RESET__SHIFT 0x13
+#define DCO_SOFT_RESET__FMT4_SOFT_RESET_MASK 0x100000
+#define DCO_SOFT_RESET__FMT4_SOFT_RESET__SHIFT 0x14
+#define DCO_SOFT_RESET__FMT5_SOFT_RESET_MASK 0x200000
+#define DCO_SOFT_RESET__FMT5_SOFT_RESET__SHIFT 0x15
+#define DCO_SOFT_RESET__MVP_SOFT_RESET_MASK 0x1000000
+#define DCO_SOFT_RESET__MVP_SOFT_RESET__SHIFT 0x18
+#define DCO_SOFT_RESET__ABM_SOFT_RESET_MASK 0x2000000
+#define DCO_SOFT_RESET__ABM_SOFT_RESET__SHIFT 0x19
+#define DCO_SOFT_RESET__DVO_SOFT_RESET_MASK 0x8000000
+#define DCO_SOFT_RESET__DVO_SOFT_RESET__SHIFT 0x1b
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET_MASK 0x1
+#define DIG_SOFT_RESET__DIGA_FE_SOFT_RESET__SHIFT 0x0
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET_MASK 0x2
+#define DIG_SOFT_RESET__DIGA_BE_SOFT_RESET__SHIFT 0x1
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET_MASK 0x10
+#define DIG_SOFT_RESET__DIGB_FE_SOFT_RESET__SHIFT 0x4
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET_MASK 0x20
+#define DIG_SOFT_RESET__DIGB_BE_SOFT_RESET__SHIFT 0x5
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET_MASK 0x100
+#define DIG_SOFT_RESET__DIGC_FE_SOFT_RESET__SHIFT 0x8
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET_MASK 0x200
+#define DIG_SOFT_RESET__DIGC_BE_SOFT_RESET__SHIFT 0x9
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET_MASK 0x1000
+#define DIG_SOFT_RESET__DIGD_FE_SOFT_RESET__SHIFT 0xc
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET_MASK 0x2000
+#define DIG_SOFT_RESET__DIGD_BE_SOFT_RESET__SHIFT 0xd
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET_MASK 0x10000
+#define DIG_SOFT_RESET__DIGE_FE_SOFT_RESET__SHIFT 0x10
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET_MASK 0x20000
+#define DIG_SOFT_RESET__DIGE_BE_SOFT_RESET__SHIFT 0x11
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET_MASK 0x100000
+#define DIG_SOFT_RESET__DIGF_FE_SOFT_RESET__SHIFT 0x14
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET_MASK 0x200000
+#define DIG_SOFT_RESET__DIGF_BE_SOFT_RESET__SHIFT 0x15
+#define DIG_SOFT_RESET__DIGG_FE_SOFT_RESET_MASK 0x1000000
+#define DIG_SOFT_RESET__DIGG_FE_SOFT_RESET__SHIFT 0x18
+#define DIG_SOFT_RESET__DIGG_BE_SOFT_RESET_MASK 0x2000000
+#define DIG_SOFT_RESET__DIGG_BE_SOFT_RESET__SHIFT 0x19
+#define DIG_SOFT_RESET__DPDBG_SOFT_RESET_MASK 0x80000000
+#define DIG_SOFT_RESET__DPDBG_SOFT_RESET__SHIFT 0x1f
+#define DIG_SOFT_RESET_2__DIGLPA_FE_SOFT_RESET_MASK 0x1
+#define DIG_SOFT_RESET_2__DIGLPA_FE_SOFT_RESET__SHIFT 0x0
+#define DIG_SOFT_RESET_2__DIGLPA_BE_SOFT_RESET_MASK 0x2
+#define DIG_SOFT_RESET_2__DIGLPA_BE_SOFT_RESET__SHIFT 0x1
+#define DIG_SOFT_RESET_2__DIGLPB_FE_SOFT_RESET_MASK 0x10
+#define DIG_SOFT_RESET_2__DIGLPB_FE_SOFT_RESET__SHIFT 0x4
+#define DIG_SOFT_RESET_2__DIGLPB_BE_SOFT_RESET_MASK 0x20
+#define DIG_SOFT_RESET_2__DIGLPB_BE_SOFT_RESET__SHIFT 0x5
+#define DCO_STEREOSYNC_SEL__GENERICA_STEREOSYNC_SEL_MASK 0x7
+#define DCO_STEREOSYNC_SEL__GENERICA_STEREOSYNC_SEL__SHIFT 0x0
+#define DCO_STEREOSYNC_SEL__GENERICB_STEREOSYNC_SEL_MASK 0x70000
+#define DCO_STEREOSYNC_SEL__GENERICB_STEREOSYNC_SEL__SHIFT 0x10
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x1
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x10
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x100
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x1000
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0xfff0000
+#define DCO_HDMI_RXSTATUS_TIMER_CONTROL__DCO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10
+#define DCO_PSP_INTERRUPT_STATUS__DCO_PSP_INTERRUPT_STATUS_MASK 0x1
+#define DCO_PSP_INTERRUPT_STATUS__DCO_PSP_INTERRUPT_STATUS__SHIFT 0x0
+#define DCO_PSP_INTERRUPT_STATUS__DCO_PSP_INTERRUPT_MESSAGE_MASK 0xfffffffe
+#define DCO_PSP_INTERRUPT_STATUS__DCO_PSP_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DCO_PSP_INTERRUPT_CLEAR__DCO_PSP_INTERRUPT_CLEAR_MASK 0x1
+#define DCO_PSP_INTERRUPT_CLEAR__DCO_PSP_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCO_GENERIC_INTERRUPT_MESSAGE__DCO_GENERIC_INTERRUPT_STATUS_MASK 0x1
+#define DCO_GENERIC_INTERRUPT_MESSAGE__DCO_GENERIC_INTERRUPT_STATUS__SHIFT 0x0
+#define DCO_GENERIC_INTERRUPT_MESSAGE__DCO_GENERIC_INTERRUPT_MESSAGE_MASK 0xfffffffe
+#define DCO_GENERIC_INTERRUPT_MESSAGE__DCO_GENERIC_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DCO_GENERIC_INTERRUPT_CLEAR__DCO_GENERIC_INTERRUPT_CLEAR_MASK 0x1
+#define DCO_GENERIC_INTERRUPT_CLEAR__DCO_GENERIC_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCO_TEST_DEBUG_INDEX__DCO_TEST_DEBUG_INDEX_MASK 0xff
+#define DCO_TEST_DEBUG_INDEX__DCO_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCO_TEST_DEBUG_INDEX__DCO_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define DCO_TEST_DEBUG_INDEX__DCO_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define DCO_TEST_DEBUG_DATA__DCO_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DCO_TEST_DEBUG_DATA__DCO_TEST_DEBUG_DATA__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x1
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x2
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x4
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x8
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x700
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x300000
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14
+#define DC_I2C_CONTROL__DC_I2C_DBG_REF_SEL_MASK 0x80000000
+#define DC_I2C_CONTROL__DC_I2C_DBG_REF_SEL__SHIFT 0x1f
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x3
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x10
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x100
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x1000
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x100000
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x200000
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x1000000
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x2000000
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x10
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x20
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x40
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x100
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x200
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x400
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT_MASK 0x1000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_INT__SHIFT 0xc
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK_MASK 0x2000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_ACK__SHIFT 0xd
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK_MASK 0x4000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC3_HW_DONE_MASK__SHIFT 0xe
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT_MASK 0x10000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_INT__SHIFT 0x10
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK_MASK 0x20000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_ACK__SHIFT 0x11
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK_MASK 0x40000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC4_HW_DONE_MASK__SHIFT 0x12
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT_MASK 0x100000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_INT__SHIFT 0x14
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK_MASK 0x200000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_ACK__SHIFT 0x15
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK_MASK 0x400000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC5_HW_DONE_MASK__SHIFT 0x16
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT_MASK 0x1000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_INT__SHIFT 0x18
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK_MASK 0x2000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_ACK__SHIFT 0x19
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK_MASK 0x4000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC6_HW_DONE_MASK__SHIFT 0x1a
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT_MASK 0x8000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_INT__SHIFT 0x1b
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK_MASK 0x10000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_ACK__SHIFT 0x1c
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK_MASK 0x20000000
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDCVGA_HW_DONE_MASK__SHIFT 0x1d
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x3
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x10
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x20
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x40
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x80
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x100
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x1000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x2000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x4000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x8000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x40000
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x8
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x20000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x8
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x20000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE_MASK 0x8
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG_MASK 0x20000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC3_HW_STATUS__DC_I2C_DDC3_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE_MASK 0x8
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG_MASK 0x20000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC4_HW_STATUS__DC_I2C_DDC4_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE_MASK 0x8
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG_MASK 0x20000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC5_HW_STATUS__DC_I2C_DDC5_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_STATUS_MASK 0x3
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_DONE_MASK 0x8
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_REQ_MASK 0x10000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_URG_MASK 0x20000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDC6_HW_STATUS__DC_I2C_DDC6_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x40
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x40
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC3_SPEED__DC_I2C_DDC3_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE_MASK 0x40
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC3_SETUP__DC_I2C_DDC3_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC4_SPEED__DC_I2C_DDC4_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE_MASK 0x40
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC4_SETUP__DC_I2C_DDC4_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC5_SPEED__DC_I2C_DDC5_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE_MASK 0x40
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC5_SETUP__DC_I2C_DDC5_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_THRESHOLD_MASK 0x3
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDC6_SPEED__DC_I2C_DDC6_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_ENABLE_MASK 0x40
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDC6_SETUP__DC_I2C_DDC6_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x1
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x100
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x1000
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x2000
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x3ff0000
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x1
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x100
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x1000
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x2000
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x3ff0000
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x1
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x100
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x1000
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x2000
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x3ff0000
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x1
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x100
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x1000
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x2000
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x3ff0000
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x1
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0xff00
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x3ff0000
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_STATUS_MASK 0x3
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_DONE_MASK 0x8
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_REQ_MASK 0x10000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_URG_MASK 0x20000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_HW_URG__SHIFT 0x11
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATUS_MASK 0x100000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_NUM_VALID_TRIES_MASK 0xf000000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATE_MASK 0x70000000
+#define DC_I2C_DDCVGA_HW_STATUS__DC_I2C_DDCVGA_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_THRESHOLD_MASK 0x3
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_START_STOP_TIMING_CNTL_MASK 0x300
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_PRESCALE_MASK 0xffff0000
+#define DC_I2C_DDCVGA_SPEED__DC_I2C_DDCVGA_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_EN_MASK 0x1
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_SEL_MASK 0x2
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_ENABLE_MASK 0x10
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_MODE_MASK 0x20
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_ENABLE_MASK 0x40
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_ENABLE__SHIFT 0x6
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_CLK_DRIVE_EN_MASK 0x80
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_BYTE_DELAY_MASK 0xff00
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_TRANSACTION_DELAY_MASK 0xff0000
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_TIME_LIMIT_MASK 0xff000000
+#define DC_I2C_DDCVGA_SETUP__DC_I2C_DDCVGA_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME_MASK 0xffff
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_WAIT_TIME__SHIFT 0x0
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID_MASK 0xf00000
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_NUM_TRIES_UNTIL_VALID__SHIFT 0x14
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET_MASK 0x10000000
+#define DC_I2C_EDID_DETECT_CTRL__DC_I2C_EDID_DETECT_SEND_RESET__SHIFT 0x1c
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED_MASK 0x1
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_OCCURRED__SHIFT 0x0
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT_MASK 0x2
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_INT__SHIFT 0x1
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK_MASK 0x4
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_ACK__SHIFT 0x2
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK_MASK 0x8
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC1_READ_REQUEST_MASK__SHIFT 0x3
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED_MASK 0x10
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_OCCURRED__SHIFT 0x4
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT_MASK 0x20
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_INT__SHIFT 0x5
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK_MASK 0x40
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_ACK__SHIFT 0x6
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK_MASK 0x80
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC2_READ_REQUEST_MASK__SHIFT 0x7
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED_MASK 0x100
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_OCCURRED__SHIFT 0x8
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT_MASK 0x200
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_INT__SHIFT 0x9
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK_MASK 0x400
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_ACK__SHIFT 0xa
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK_MASK 0x800
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC3_READ_REQUEST_MASK__SHIFT 0xb
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED_MASK 0x1000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_OCCURRED__SHIFT 0xc
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT_MASK 0x2000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_INT__SHIFT 0xd
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK_MASK 0x4000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_ACK__SHIFT 0xe
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK_MASK 0x8000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC4_READ_REQUEST_MASK__SHIFT 0xf
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED_MASK 0x10000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_OCCURRED__SHIFT 0x10
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT_MASK 0x20000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_INT__SHIFT 0x11
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK_MASK 0x40000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_ACK__SHIFT 0x12
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK_MASK 0x80000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC5_READ_REQUEST_MASK__SHIFT 0x13
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED_MASK 0x100000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_OCCURRED__SHIFT 0x14
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT_MASK 0x200000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_INT__SHIFT 0x15
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK_MASK 0x400000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_ACK__SHIFT 0x16
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK_MASK 0x800000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC6_READ_REQUEST_MASK__SHIFT 0x17
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED_MASK 0x1000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_OCCURRED__SHIFT 0x18
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT_MASK 0x2000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_INT__SHIFT 0x19
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK_MASK 0x4000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_ACK__SHIFT 0x1a
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK_MASK 0x8000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDCVGA_READ_REQUEST_MASK__SHIFT 0x1b
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE_MASK 0x40000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_ACK_ENABLE__SHIFT 0x1e
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE_MASK 0x80000000
+#define DC_I2C_READ_REQUEST_INTERRUPT__DC_I2C_DDC_READ_REQUEST_INT_TYPE__SHIFT 0x1f
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_GO_MASK 0x1
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_GO__SHIFT 0x0
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SOFT_RESET_MASK 0x2
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SOFT_RESET__SHIFT 0x1
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SEND_RESET_MASK 0x4
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_SEND_RESET__SHIFT 0x2
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_ENABLE_MASK 0x8
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_ENABLE__SHIFT 0x3
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_DBG_REF_SEL_MASK 0x80000000
+#define GENERIC_I2C_CONTROL__GENERIC_I2C_DBG_REF_SEL__SHIFT 0x1f
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_INT_MASK 0x1
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_INT__SHIFT 0x0
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_ACK_MASK 0x2
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_ACK__SHIFT 0x1
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_MASK_MASK 0x4
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DONE_MASK__SHIFT 0x2
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_OCCURRED_MASK 0x100
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_OCCURRED__SHIFT 0x8
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_INT_MASK 0x200
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_INT__SHIFT 0x9
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_ACK_MASK 0x400
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_ACK__SHIFT 0xa
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_MASK_MASK 0x800
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_MASK__SHIFT 0xb
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_INT_TYPE_MASK 0x1000
+#define GENERIC_I2C_INTERRUPT_CONTROL__GENERIC_I2C_DDC_READ_REQUEST_INT_TYPE__SHIFT 0xc
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STATUS_MASK 0xf
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STATUS__SHIFT 0x0
+#define GENERIC_I2C_STATUS__GENERIC_I2C_DONE_MASK 0x10
+#define GENERIC_I2C_STATUS__GENERIC_I2C_DONE__SHIFT 0x4
+#define GENERIC_I2C_STATUS__GENERIC_I2C_ABORTED_MASK 0x20
+#define GENERIC_I2C_STATUS__GENERIC_I2C_ABORTED__SHIFT 0x5
+#define GENERIC_I2C_STATUS__GENERIC_I2C_TIMEOUT_MASK 0x40
+#define GENERIC_I2C_STATUS__GENERIC_I2C_TIMEOUT__SHIFT 0x6
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STOPPED_ON_NACK_MASK 0x200
+#define GENERIC_I2C_STATUS__GENERIC_I2C_STOPPED_ON_NACK__SHIFT 0x9
+#define GENERIC_I2C_STATUS__GENERIC_I2C_NACK_MASK 0x400
+#define GENERIC_I2C_STATUS__GENERIC_I2C_NACK__SHIFT 0xa
+#define GENERIC_I2C_SPEED__GENERIC_I2C_THRESHOLD_MASK 0x3
+#define GENERIC_I2C_SPEED__GENERIC_I2C_THRESHOLD__SHIFT 0x0
+#define GENERIC_I2C_SPEED__GENERIC_I2C_DISABLE_FILTER_DURING_STALL_MASK 0x10
+#define GENERIC_I2C_SPEED__GENERIC_I2C_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define GENERIC_I2C_SPEED__GENERIC_I2C_START_STOP_TIMING_CNTL_MASK 0x300
+#define GENERIC_I2C_SPEED__GENERIC_I2C_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define GENERIC_I2C_SPEED__GENERIC_I2C_PRESCALE_MASK 0xffff0000
+#define GENERIC_I2C_SPEED__GENERIC_I2C_PRESCALE__SHIFT 0x10
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_EN_MASK 0x1
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_EN__SHIFT 0x0
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_SEL_MASK 0x2
+#define GENERIC_I2C_SETUP__GENERIC_I2C_DATA_DRIVE_SEL__SHIFT 0x1
+#define GENERIC_I2C_SETUP__GENERIC_I2C_CLK_DRIVE_EN_MASK 0x80
+#define GENERIC_I2C_SETUP__GENERIC_I2C_CLK_DRIVE_EN__SHIFT 0x7
+#define GENERIC_I2C_SETUP__GENERIC_I2C_INTRA_BYTE_DELAY_MASK 0xff00
+#define GENERIC_I2C_SETUP__GENERIC_I2C_INTRA_BYTE_DELAY__SHIFT 0x8
+#define GENERIC_I2C_SETUP__GENERIC_I2C_TIME_LIMIT_MASK 0xff000000
+#define GENERIC_I2C_SETUP__GENERIC_I2C_TIME_LIMIT__SHIFT 0x18
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_RW_MASK 0x1
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_RW__SHIFT 0x0
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_ON_NACK_MASK 0x100
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_ON_NACK__SHIFT 0x8
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_ACK_ON_READ_MASK 0x200
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_ACK_ON_READ__SHIFT 0x9
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_START_MASK 0x1000
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_START__SHIFT 0xc
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP_MASK 0x2000
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_STOP__SHIFT 0xd
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_COUNT_MASK 0xf0000
+#define GENERIC_I2C_TRANSACTION__GENERIC_I2C_COUNT__SHIFT 0x10
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_RW_MASK 0x1
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_RW__SHIFT 0x0
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA_MASK 0xff00
+#define GENERIC_I2C_DATA__GENERIC_I2C_DATA__SHIFT 0x8
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_MASK 0xf0000
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX__SHIFT 0x10
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_WRITE_MASK 0x80000000
+#define GENERIC_I2C_DATA__GENERIC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SCL_PIN_SEL_MASK 0x7f
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SCL_PIN_SEL__SHIFT 0x0
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SDA_PIN_SEL_MASK 0x7f00
+#define GENERIC_I2C_PIN_SELECTION__GENERIC_I2C_SDA_PIN_SEL__SHIFT 0x8
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_OUTPUT_MASK 0x1
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_OUTPUT__SHIFT 0x0
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_INPUT_MASK 0x2
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_INPUT__SHIFT 0x1
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_EN_MASK 0x4
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SCL_EN__SHIFT 0x2
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_OUTPUT_MASK 0x10
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_OUTPUT__SHIFT 0x4
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_INPUT_MASK 0x20
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_INPUT__SHIFT 0x5
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_EN_MASK 0x40
+#define GENERIC_I2C_PIN_DEBUG__GENERIC_I2C_SDA_EN__SHIFT 0x6
+#define BLNDV_CONTROL__BLND_GLOBAL_GAIN_MASK 0xff
+#define BLNDV_CONTROL__BLND_GLOBAL_GAIN__SHIFT 0x0
+#define BLNDV_CONTROL__BLND_MODE_MASK 0x300
+#define BLNDV_CONTROL__BLND_MODE__SHIFT 0x8
+#define BLNDV_CONTROL__BLND_STEREO_TYPE_MASK 0xc00
+#define BLNDV_CONTROL__BLND_STEREO_TYPE__SHIFT 0xa
+#define BLNDV_CONTROL__BLND_STEREO_POLARITY_MASK 0x1000
+#define BLNDV_CONTROL__BLND_STEREO_POLARITY__SHIFT 0xc
+#define BLNDV_CONTROL__BLND_FEEDTHROUGH_EN_MASK 0x2000
+#define BLNDV_CONTROL__BLND_FEEDTHROUGH_EN__SHIFT 0xd
+#define BLNDV_CONTROL__BLND_ALPHA_MODE_MASK 0x30000
+#define BLNDV_CONTROL__BLND_ALPHA_MODE__SHIFT 0x10
+#define BLNDV_CONTROL__BLND_ACTIVE_OVERLAP_ONLY_MASK 0x40000
+#define BLNDV_CONTROL__BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x12
+#define BLNDV_CONTROL__BLND_MULTIPLIED_MODE_MASK 0x100000
+#define BLNDV_CONTROL__BLND_MULTIPLIED_MODE__SHIFT 0x14
+#define BLNDV_CONTROL__BLND_GLOBAL_ALPHA_MASK 0xff000000
+#define BLNDV_CONTROL__BLND_GLOBAL_ALPHA__SHIFT 0x18
+#define BLNDV_SM_CONTROL2__SM_MODE_MASK 0x7
+#define BLNDV_SM_CONTROL2__SM_MODE__SHIFT 0x0
+#define BLNDV_SM_CONTROL2__SM_FRAME_ALTERNATE_MASK 0x10
+#define BLNDV_SM_CONTROL2__SM_FRAME_ALTERNATE__SHIFT 0x4
+#define BLNDV_SM_CONTROL2__SM_FIELD_ALTERNATE_MASK 0x20
+#define BLNDV_SM_CONTROL2__SM_FIELD_ALTERNATE__SHIFT 0x5
+#define BLNDV_SM_CONTROL2__SM_FORCE_NEXT_FRAME_POL_MASK 0x300
+#define BLNDV_SM_CONTROL2__SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define BLNDV_SM_CONTROL2__SM_FORCE_NEXT_TOP_POL_MASK 0x30000
+#define BLNDV_SM_CONTROL2__SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define BLNDV_SM_CONTROL2__SM_CURRENT_FRAME_POL_MASK 0x1000000
+#define BLNDV_SM_CONTROL2__SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define BLNDV_CONTROL2__PTI_ENABLE_MASK 0x1
+#define BLNDV_CONTROL2__PTI_ENABLE__SHIFT 0x0
+#define BLNDV_CONTROL2__PTI_NEW_PIXEL_GAP_MASK 0x30
+#define BLNDV_CONTROL2__PTI_NEW_PIXEL_GAP__SHIFT 0x4
+#define BLNDV_CONTROL2__BLND_NEW_PIXEL_MODE_MASK 0x40
+#define BLNDV_CONTROL2__BLND_NEW_PIXEL_MODE__SHIFT 0x6
+#define BLNDV_CONTROL2__BLND_SUPERAA_DEGAMMA_EN_MASK 0x80
+#define BLNDV_CONTROL2__BLND_SUPERAA_DEGAMMA_EN__SHIFT 0x7
+#define BLNDV_CONTROL2__BLND_SUPERAA_REGAMMA_EN_MASK 0x100
+#define BLNDV_CONTROL2__BLND_SUPERAA_REGAMMA_EN__SHIFT 0x8
+#define BLNDV_UPDATE__BLND_UPDATE_PENDING_MASK 0x1
+#define BLNDV_UPDATE__BLND_UPDATE_PENDING__SHIFT 0x0
+#define BLNDV_UPDATE__BLND_UPDATE_TAKEN_MASK 0x100
+#define BLNDV_UPDATE__BLND_UPDATE_TAKEN__SHIFT 0x8
+#define BLNDV_UPDATE__BLND_UPDATE_LOCK_MASK 0x10000
+#define BLNDV_UPDATE__BLND_UPDATE_LOCK__SHIFT 0x10
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_OCCURED_MASK 0x1
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_OCCURED__SHIFT 0x0
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_ACK_MASK 0x100
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_ACK__SHIFT 0x8
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_MASK_MASK 0x1000
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_MASK__SHIFT 0xc
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_PIPE_INDEX_MASK 0x30000
+#define BLNDV_UNDERFLOW_INTERRUPT__BLND_UNDERFLOW_INT_PIPE_INDEX__SHIFT 0x10
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_GRPH_V_UPDATE_LOCK_MASK 0x1
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_GRPH_V_UPDATE_LOCK__SHIFT 0x0
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_GRPH_SURF_V_UPDATE_LOCK_MASK 0x2
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_GRPH_SURF_V_UPDATE_LOCK__SHIFT 0x1
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_CUR_V_UPDATE_LOCK_MASK 0x10000
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_CUR_V_UPDATE_LOCK__SHIFT 0x10
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_CUR2_V_UPDATE_LOCK_MASK 0x1000000
+#define BLNDV_V_UPDATE_LOCK__BLND_DCP_CUR2_V_UPDATE_LOCK__SHIFT 0x18
+#define BLNDV_V_UPDATE_LOCK__BLND_SCL_V_UPDATE_LOCK_MASK 0x10000000
+#define BLNDV_V_UPDATE_LOCK__BLND_SCL_V_UPDATE_LOCK__SHIFT 0x1c
+#define BLNDV_V_UPDATE_LOCK__BLND_BLND_V_UPDATE_LOCK_MASK 0x20000000
+#define BLNDV_V_UPDATE_LOCK__BLND_BLND_V_UPDATE_LOCK__SHIFT 0x1d
+#define BLNDV_V_UPDATE_LOCK__BLND_V_UPDATE_LOCK_MODE_MASK 0x80000000
+#define BLNDV_V_UPDATE_LOCK__BLND_V_UPDATE_LOCK_MODE__SHIFT 0x1f
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_UPDATE_PENDING_MASK 0x1
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_UPDATE_PENDING__SHIFT 0x0
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_UPDATE_PENDING_MASK 0x2
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_UPDATE_PENDING__SHIFT 0x1
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_SURF_UPDATE_PENDING_MASK 0x4
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_GRPH_SURF_UPDATE_PENDING__SHIFT 0x2
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_SURF_UPDATE_PENDING_MASK 0x8
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_GRPH_SURF_UPDATE_PENDING__SHIFT 0x3
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_CUR_UPDATE_PENDING_MASK 0x40
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDC_CUR_UPDATE_PENDING__SHIFT 0x6
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_CUR_UPDATE_PENDING_MASK 0x80
+#define BLNDV_REG_UPDATE_STATUS__DCP_BLNDO_CUR_UPDATE_PENDING__SHIFT 0x7
+#define BLNDV_REG_UPDATE_STATUS__SCL_BLNDC_UPDATE_PENDING_MASK 0x100
+#define BLNDV_REG_UPDATE_STATUS__SCL_BLNDC_UPDATE_PENDING__SHIFT 0x8
+#define BLNDV_REG_UPDATE_STATUS__SCL_BLNDO_UPDATE_PENDING_MASK 0x200
+#define BLNDV_REG_UPDATE_STATUS__SCL_BLNDO_UPDATE_PENDING__SHIFT 0x9
+#define BLNDV_REG_UPDATE_STATUS__BLND_BLNDC_UPDATE_PENDING_MASK 0x400
+#define BLNDV_REG_UPDATE_STATUS__BLND_BLNDC_UPDATE_PENDING__SHIFT 0xa
+#define BLNDV_REG_UPDATE_STATUS__BLND_BLNDO_UPDATE_PENDING_MASK 0x800
+#define BLNDV_REG_UPDATE_STATUS__BLND_BLNDO_UPDATE_PENDING__SHIFT 0xb
+#define BLNDV_DEBUG__BLND_CNV_MUX_SELECT_MASK 0x1
+#define BLNDV_DEBUG__BLND_CNV_MUX_SELECT__SHIFT 0x0
+#define BLNDV_DEBUG__BLND_DEBUG_MASK 0xfffffffe
+#define BLNDV_DEBUG__BLND_DEBUG__SHIFT 0x1
+#define BLNDV_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_INDEX_MASK 0xff
+#define BLNDV_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_INDEX__SHIFT 0x0
+#define BLNDV_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define BLNDV_TEST_DEBUG_INDEX__BLND_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define BLNDV_TEST_DEBUG_DATA__BLND_TEST_DEBUG_DATA_MASK 0xffffffff
+#define BLNDV_TEST_DEBUG_DATA__BLND_TEST_DEBUG_DATA__SHIFT 0x0
+#define CRTCV_H_TOTAL__CRTC_H_TOTAL_MASK 0x3fff
+#define CRTCV_H_TOTAL__CRTC_H_TOTAL__SHIFT 0x0
+#define CRTCV_H_BLANK_START_END__CRTC_H_BLANK_START_MASK 0x3fff
+#define CRTCV_H_BLANK_START_END__CRTC_H_BLANK_START__SHIFT 0x0
+#define CRTCV_H_BLANK_START_END__CRTC_H_BLANK_END_MASK 0x3fff0000
+#define CRTCV_H_BLANK_START_END__CRTC_H_BLANK_END__SHIFT 0x10
+#define CRTCV_H_SYNC_A__CRTC_H_SYNC_A_START_MASK 0x3fff
+#define CRTCV_H_SYNC_A__CRTC_H_SYNC_A_START__SHIFT 0x0
+#define CRTCV_H_SYNC_A__CRTC_H_SYNC_A_END_MASK 0x3fff0000
+#define CRTCV_H_SYNC_A__CRTC_H_SYNC_A_END__SHIFT 0x10
+#define CRTCV_V_TOTAL__CRTC_V_TOTAL_MASK 0x3fff
+#define CRTCV_V_TOTAL__CRTC_V_TOTAL__SHIFT 0x0
+#define CRTCV_V_BLANK_START_END__CRTC_V_BLANK_START_MASK 0x3fff
+#define CRTCV_V_BLANK_START_END__CRTC_V_BLANK_START__SHIFT 0x0
+#define CRTCV_V_BLANK_START_END__CRTC_V_BLANK_END_MASK 0x3fff0000
+#define CRTCV_V_BLANK_START_END__CRTC_V_BLANK_END__SHIFT 0x10
+#define CRTCV_V_SYNC_A__CRTC_V_SYNC_A_START_MASK 0x3fff
+#define CRTCV_V_SYNC_A__CRTC_V_SYNC_A_START__SHIFT 0x0
+#define CRTCV_V_SYNC_A__CRTC_V_SYNC_A_END_MASK 0x3fff0000
+#define CRTCV_V_SYNC_A__CRTC_V_SYNC_A_END__SHIFT 0x10
+#define CRTCV_CONTROL__CRTC_MASTER_EN_MASK 0x1
+#define CRTCV_CONTROL__CRTC_MASTER_EN__SHIFT 0x0
+#define CRTCV_CONTROL__CRTC_SYNC_RESET_SEL_MASK 0x10
+#define CRTCV_CONTROL__CRTC_SYNC_RESET_SEL__SHIFT 0x4
+#define CRTCV_CONTROL__CRTC_DISABLE_POINT_CNTL_MASK 0x300
+#define CRTCV_CONTROL__CRTC_DISABLE_POINT_CNTL__SHIFT 0x8
+#define CRTCV_CONTROL__CRTC_START_POINT_CNTL_MASK 0x1000
+#define CRTCV_CONTROL__CRTC_START_POINT_CNTL__SHIFT 0xc
+#define CRTCV_CONTROL__CRTC_FIELD_NUMBER_CNTL_MASK 0x2000
+#define CRTCV_CONTROL__CRTC_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define CRTCV_CONTROL__CRTC_FIELD_NUMBER_POLARITY_MASK 0x4000
+#define CRTCV_CONTROL__CRTC_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define CRTCV_CONTROL__CRTC_CURRENT_MASTER_EN_STATE_MASK 0x10000
+#define CRTCV_CONTROL__CRTC_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define CRTCV_CONTROL__CRTC_HBLANK_EARLY_CONTROL_MASK 0x700000
+#define CRTCV_CONTROL__CRTC_HBLANK_EARLY_CONTROL__SHIFT 0x14
+#define CRTCV_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE_MASK 0x1000000
+#define CRTCV_CONTROL__CRTC_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define CRTCV_CONTROL__CRTC_SOF_PULL_EN_MASK 0x20000000
+#define CRTCV_CONTROL__CRTC_SOF_PULL_EN__SHIFT 0x1d
+#define CRTCV_CONTROL__CRTC_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000
+#define CRTCV_CONTROL__CRTC_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define CRTCV_CONTROL__CRTC_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000
+#define CRTCV_CONTROL__CRTC_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define CRTCV_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY_MASK 0x1
+#define CRTCV_START_LINE_CONTROL__CRTC_PROGRESSIVE_START_LINE_EARLY__SHIFT 0x0
+#define CRTCV_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY_MASK 0x2
+#define CRTCV_START_LINE_CONTROL__CRTC_INTERLACE_START_LINE_EARLY__SHIFT 0x1
+#define CRTCV_START_LINE_CONTROL__CRTC_PREFETCH_EN_MASK 0x4
+#define CRTCV_START_LINE_CONTROL__CRTC_PREFETCH_EN__SHIFT 0x2
+#define CRTCV_START_LINE_CONTROL__CRTC_LEGACY_REQUESTOR_EN_MASK 0x100
+#define CRTCV_START_LINE_CONTROL__CRTC_LEGACY_REQUESTOR_EN__SHIFT 0x8
+#define CRTCV_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION_MASK 0xff000
+#define CRTCV_START_LINE_CONTROL__CRTC_ADVANCED_START_LINE_POSITION__SHIFT 0xc
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE_MASK 0x3ff
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_BLUE__SHIFT 0x0
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN_MASK 0xffc00
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_GREEN__SHIFT 0xa
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED_MASK 0x3ff00000
+#define CRTCV_OVERSCAN_COLOR__CRTC_OVERSCAN_COLOR_RED__SHIFT 0x14
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_BLUE_EXT_MASK 0x3
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_BLUE_EXT__SHIFT 0x0
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_GREEN_EXT_MASK 0x300
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_GREEN_EXT__SHIFT 0x8
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_RED_EXT_MASK 0x30000
+#define CRTCV_OVERSCAN_COLOR_EXT__CRTC_OVERSCAN_COLOR_RED_EXT__SHIFT 0x10
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB_MASK 0x3ff
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_B_CB__SHIFT 0x0
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y_MASK 0xffc00
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_G_Y__SHIFT 0xa
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR_MASK 0x3ff00000
+#define CRTCV_BLACK_COLOR__CRTC_BLACK_COLOR_R_CR__SHIFT 0x14
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_B_CB_EXT_MASK 0x3
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_G_Y_EXT_MASK 0x300
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_R_CR_EXT_MASK 0x30000
+#define CRTCV_BLACK_COLOR_EXT__CRTC_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define CRTCV_CRC_CNTL__CRTC_CRC_EN_MASK 0x1
+#define CRTCV_CRC_CNTL__CRTC_CRC_EN__SHIFT 0x0
+#define CRTCV_CRC_CNTL__CRTC_CRC_CONT_EN_MASK 0x10
+#define CRTCV_CRC_CNTL__CRTC_CRC_CONT_EN__SHIFT 0x4
+#define CRTCV_CRC_CNTL__CRTC_CRC_STEREO_MODE_MASK 0x300
+#define CRTCV_CRC_CNTL__CRTC_CRC_STEREO_MODE__SHIFT 0x8
+#define CRTCV_CRC_CNTL__CRTC_CRC_INTERLACE_MODE_MASK 0x3000
+#define CRTCV_CRC_CNTL__CRTC_CRC_INTERLACE_MODE__SHIFT 0xc
+#define CRTCV_CRC_CNTL__CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x10000
+#define CRTCV_CRC_CNTL__CRTC_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x10
+#define CRTCV_CRC_CNTL__CRTC_CRC0_SELECT_MASK 0x700000
+#define CRTCV_CRC_CNTL__CRTC_CRC0_SELECT__SHIFT 0x14
+#define CRTCV_CRC_CNTL__CRTC_CRC1_SELECT_MASK 0x7000000
+#define CRTCV_CRC_CNTL__CRTC_CRC1_SELECT__SHIFT 0x18
+#define CRTCV_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_START_MASK 0x3fff
+#define CRTCV_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define CRTCV_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_END_MASK 0x3fff0000
+#define CRTCV_CRC0_WINDOWA_X_CONTROL__CRTC_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define CRTCV_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_START_MASK 0x3fff
+#define CRTCV_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define CRTCV_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_END_MASK 0x3fff0000
+#define CRTCV_CRC0_WINDOWA_Y_CONTROL__CRTC_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define CRTCV_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_START_MASK 0x3fff
+#define CRTCV_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define CRTCV_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_END_MASK 0x3fff0000
+#define CRTCV_CRC0_WINDOWB_X_CONTROL__CRTC_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define CRTCV_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_START_MASK 0x3fff
+#define CRTCV_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define CRTCV_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_END_MASK 0x3fff0000
+#define CRTCV_CRC0_WINDOWB_Y_CONTROL__CRTC_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define CRTCV_CRC0_DATA_RG__CRC0_R_CR_MASK 0xffff
+#define CRTCV_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define CRTCV_CRC0_DATA_RG__CRC0_G_Y_MASK 0xffff0000
+#define CRTCV_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define CRTCV_CRC0_DATA_B__CRC0_B_CB_MASK 0xffff
+#define CRTCV_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_START_MASK 0x3fff
+#define CRTCV_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_END_MASK 0x3fff0000
+#define CRTCV_CRC1_WINDOWA_X_CONTROL__CRTC_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define CRTCV_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_START_MASK 0x3fff
+#define CRTCV_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_END_MASK 0x3fff0000
+#define CRTCV_CRC1_WINDOWA_Y_CONTROL__CRTC_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define CRTCV_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_START_MASK 0x3fff
+#define CRTCV_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_END_MASK 0x3fff0000
+#define CRTCV_CRC1_WINDOWB_X_CONTROL__CRTC_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define CRTCV_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_START_MASK 0x3fff
+#define CRTCV_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define CRTCV_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_END_MASK 0x3fff0000
+#define CRTCV_CRC1_WINDOWB_Y_CONTROL__CRTC_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define CRTCV_CRC1_DATA_RG__CRC1_R_CR_MASK 0xffff
+#define CRTCV_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define CRTCV_CRC1_DATA_RG__CRC1_G_Y_MASK 0xffff0000
+#define CRTCV_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define CRTCV_CRC1_DATA_B__CRC1_B_CB_MASK 0xffff
+#define CRTCV_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define CRTCV_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX_MASK 0xff
+#define CRTCV_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CRTCV_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define CRTCV_TEST_DEBUG_INDEX__CRTC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CRTCV_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA_MASK 0xffffffff
+#define CRTCV_TEST_DEBUG_DATA__CRTC_TEST_DEBUG_DATA__SHIFT 0x0
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_SWAP_MASK 0x300
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_SWAP__SHIFT 0x8
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_VMID_MASK 0xf000
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_VMID__SHIFT 0xc
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_PRIV_MASK 0x10000
+#define XDMA_MC_PCIE_CLIENT_CONFIG__XDMA_MC_PCIE_PRIV__SHIFT 0x10
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_ARRAY_MODE_MASK 0xf
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_ARRAY_MODE__SHIFT 0x0
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_TILE_SPLIT_MASK 0x70
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_TILE_SPLIT__SHIFT 0x4
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_WIDTH_MASK 0x300
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_WIDTH__SHIFT 0x8
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_HEIGHT_MASK 0xc00
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_BANK_HEIGHT__SHIFT 0xa
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_MACRO_TILE_ASPECT_MASK 0x3000
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_MACRO_TILE_ASPECT__SHIFT 0xc
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_NUM_BANKS_MASK 0x300000
+#define XDMA_LOCAL_SURFACE_TILING1__XDMA_LOCAL_NUM_BANKS__SHIFT 0x14
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_INTERLEAVE_SIZE_MASK 0x7
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_INTERLEAVE_SIZE__SHIFT 0x0
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_MICRO_TILE_MODE_MASK 0x700000
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_MICRO_TILE_MODE__SHIFT 0x14
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_CONFIG_MASK 0xf8000000
+#define XDMA_LOCAL_SURFACE_TILING2__XDMA_LOCAL_PIPE_CONFIG__SHIFT 0x1b
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_STAT_MASK 0x100
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_STAT__SHIFT 0x8
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_MASK_MASK 0x200
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_MASK__SHIFT 0x9
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_ACK_MASK 0x400
+#define XDMA_INTERRUPT__XDMA_MSTR_MEM_URGENT_ACK__SHIFT 0xa
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_STAT_MASK 0x10000
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_STAT__SHIFT 0x10
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_MASK_MASK 0x20000
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_MASK__SHIFT 0x11
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_ACK_MASK 0x40000
+#define XDMA_INTERRUPT__XDMA_SLV_READ_URGENT_ACK__SHIFT 0x12
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_STAT_MASK 0x100000
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_STAT__SHIFT 0x14
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_MASK_MASK 0x200000
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_MASK__SHIFT 0x15
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_ACK_MASK 0x400000
+#define XDMA_INTERRUPT__XDMA_PERF_MEAS_ACK__SHIFT 0x16
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_ON_DELAY_MASK 0xf
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_ON_DELAY__SHIFT 0x0
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_OFF_DELAY_MASK 0xff0
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_GATE_DIS_MASK 0x8000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_GATE_DIS__SHIFT 0xf
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_REG_GATE_DIS_MASK 0x10000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_REG_GATE_DIS__SHIFT 0x10
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_0_MASK 0x20000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_0__SHIFT 0x11
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_1_MASK 0x40000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_1__SHIFT 0x12
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_2_MASK 0x80000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_2__SHIFT 0x13
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_3_MASK 0x100000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_3__SHIFT 0x14
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_4_MASK 0x200000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_4__SHIFT 0x15
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_5_MASK 0x400000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MDYN_GATE_DIS_PIPE_5__SHIFT 0x16
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SDYN_GATE_DIS_MASK 0x800000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SDYN_GATE_DIS__SHIFT 0x17
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MSTAT_GATE_DIS_MASK 0x1000000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_MSTAT_GATE_DIS__SHIFT 0x18
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SSTAT_GATE_DIS_MASK 0x2000000
+#define XDMA_CLOCK_GATING_CNTL__XDMA_SCLK_G_SSTAT_GATE_DIS__SHIFT 0x19
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_CORE_IDLE_STATE_MASK 0x3
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_CORE_IDLE_STATE__SHIFT 0x0
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_IDLE_STATE_MASK 0xc
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_IDLE_STATE__SHIFT 0x2
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_PCIE_STATE_MASK 0x180000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_PCIE_STATE__SHIFT 0x13
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_PCIE_TRANS_MASK 0x200000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_PCIE_TRANS__SHIFT 0x15
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_RD_STATE_MASK 0xc00000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_RD_STATE__SHIFT 0x16
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_RD_TRANS_MASK 0x2000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_RD_TRANS__SHIFT 0x19
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_WR_STATE_MASK 0xc000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_WR_STATE__SHIFT 0x1a
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_WR_TRANS_MASK 0x10000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_WR_TRANS__SHIFT 0x1c
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_BIF_STATE_MASK 0x60000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_BIF_STATE__SHIFT 0x1d
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_BIF_TRANS_MASK 0x80000000
+#define XDMA_MEM_POWER_CNTL__XDMA_MEM_IF_BIF_TRANS__SHIFT 0x1f
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_STATUS_MASK 0xf
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_STATUS__SHIFT 0x0
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_CLEAR_MASK 0x100
+#define XDMA_IF_BIF_STATUS__XDMA_IF_BIF_ERROR_CLEAR__SHIFT 0x8
+#define XDMA_PERF_MEAS_STATUS__XDMA_PERF_MEAS_STATUS_MASK 0xff
+#define XDMA_PERF_MEAS_STATUS__XDMA_PERF_MEAS_STATUS__SHIFT 0x0
+#define XDMA_IF_STATUS__XDMA_MC_PCIEWR_BUSY_MASK 0x1
+#define XDMA_IF_STATUS__XDMA_MC_PCIEWR_BUSY__SHIFT 0x0
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_INDEX_MASK 0xff
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_INDEX__SHIFT 0x0
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define XDMA_TEST_DEBUG_INDEX__XDMA_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define XDMA_TEST_DEBUG_DATA__XDMA_TEST_DEBUG_DATA_MASK 0xffffffff
+#define XDMA_TEST_DEBUG_DATA__XDMA_TEST_DEBUG_DATA__SHIFT 0x0
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_DELAY_MASK 0x7
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_DELAY__SHIFT 0x0
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_TIMEOUT_DIS_MASK 0x8
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_RDWR_TIMEOUT_DIS__SHIFT 0x3
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_TIMEOUT_DELAY_MASK 0xffff8000
+#define XDMA_RBBMIF_RDWR_CNTL__XDMA_RBBMIF_TIMEOUT_DELAY__SHIFT 0xf
+#define XDMA_PG_CONTROL__XDMA_PG_CONTROL_MASK 0xffffffff
+#define XDMA_PG_CONTROL__XDMA_PG_CONTROL__SHIFT 0x0
+#define XDMA_PG_WDATA__XDMA_PG_WDATA_MASK 0xffffffff
+#define XDMA_PG_WDATA__XDMA_PG_WDATA__SHIFT 0x0
+#define XDMA_PG_STATUS__XDMA_SERDES_RDATA_MASK 0xffffff
+#define XDMA_PG_STATUS__XDMA_SERDES_RDATA__SHIFT 0x0
+#define XDMA_PG_STATUS__XDMA_PGFSM_READ_READY_MASK 0x1000000
+#define XDMA_PG_STATUS__XDMA_PGFSM_READ_READY__SHIFT 0x18
+#define XDMA_PG_STATUS__XDMA_SERDES_BUSY_MASK 0x2000000
+#define XDMA_PG_STATUS__XDMA_SERDES_BUSY__SHIFT 0x19
+#define XDMA_PG_STATUS__XDMA_SERDES_SMU_POWER_STATUS_MASK 0x4000000
+#define XDMA_PG_STATUS__XDMA_SERDES_SMU_POWER_STATUS__SHIFT 0x1a
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_AON_TEST_DEBUG_INDEX_MASK 0xff
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_AON_TEST_DEBUG_INDEX__SHIFT 0x0
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_AON_TEST_DEBUG_WRITE_EN_MASK 0x100
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_AON_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_DEBUG_SEL_MASK 0x200
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_DEBUG_SEL__SHIFT 0x9
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_DEBUG_OUT_EN_MASK 0x400
+#define XDMA_AON_TEST_DEBUG_INDEX__XDMA_DEBUG_OUT_EN__SHIFT 0xa
+#define XDMA_AON_TEST_DEBUG_DATA__XDMA_AON_TEST_DEBUG_DATA_MASK 0xffffffff
+#define XDMA_AON_TEST_DEBUG_DATA__XDMA_AON_TEST_DEBUG_DATA__SHIFT 0x0
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ALPHA_POSITION_MASK 0x3000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ALPHA_POSITION__SHIFT 0xc
+#define XDMA_MSTR_CNTL__XDMA_MSTR_MEM_READY_MASK 0x4000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_MEM_READY__SHIFT 0xe
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ENABLE_MASK 0x10000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_ENABLE__SHIFT 0x10
+#define XDMA_MSTR_CNTL__XDMA_MSTR_DEBUG_MODE_MASK 0x40000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_DEBUG_MODE__SHIFT 0x12
+#define XDMA_MSTR_CNTL__XDMA_MSTR_SOFT_RESET_MASK 0x100000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_SOFT_RESET__SHIFT 0x14
+#define XDMA_MSTR_CNTL__XDMA_MSTR_BIF_STALL_EN_MASK 0x200000
+#define XDMA_MSTR_CNTL__XDMA_MSTR_BIF_STALL_EN__SHIFT 0x15
+#define XDMA_MSTR_STATUS__XDMA_MSTR_VCOUNT_CURRENT_MASK 0x3fff
+#define XDMA_MSTR_STATUS__XDMA_MSTR_VCOUNT_CURRENT__SHIFT 0x0
+#define XDMA_MSTR_STATUS__XDMA_MSTR_WRITE_LINE_CURRENT_MASK 0xfff0000
+#define XDMA_MSTR_STATUS__XDMA_MSTR_WRITE_LINE_CURRENT__SHIFT 0x10
+#define XDMA_MSTR_STATUS__XDMA_MSTR_STATUS_SELECT_MASK 0x70000000
+#define XDMA_MSTR_STATUS__XDMA_MSTR_STATUS_SELECT__SHIFT 0x1c
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_SWAP_MASK 0x300
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_SWAP__SHIFT 0x8
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_VMID_MASK 0xf000
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_VMID__SHIFT 0xc
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_PRIV_MASK 0x10000
+#define XDMA_MSTR_MEM_CLIENT_CONFIG__XDMA_MSTR_MEM_CLIENT_PRIV__SHIFT 0x10
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_MASK 0xffffffff
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR__SHIFT 0x0
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH_MASK 0xff
+#define XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__XDMA_MSTR_LOCAL_SURFACE_BASE_ADDR_HIGH__SHIFT 0x0
+#define XDMA_MSTR_LOCAL_SURFACE_PITCH__XDMA_MSTR_LOCAL_SURFACE_PITCH_MASK 0x3fff
+#define XDMA_MSTR_LOCAL_SURFACE_PITCH__XDMA_MSTR_LOCAL_SURFACE_PITCH__SHIFT 0x0
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_CLIENT_STALL_MASK 0x1
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_CLIENT_STALL__SHIFT 0x0
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_URGENT_LEVEL_MASK 0xf00
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_URGENT_LEVEL__SHIFT 0x8
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_STALL_DELAY_MASK 0xf000
+#define XDMA_MSTR_CMD_URGENT_CNTL__XDMA_MSTR_CMD_STALL_DELAY__SHIFT 0xc
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_CLIENT_STALL_MASK 0x1
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_CLIENT_STALL__SHIFT 0x0
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LIMIT_MASK 0xf0
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LIMIT__SHIFT 0x4
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LEVEL_MASK 0xf00
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_LEVEL__SHIFT 0x8
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_STALL_DELAY_MASK 0xf000
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_STALL_DELAY__SHIFT 0xc
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_TIMER_MASK 0xffff0000
+#define XDMA_MSTR_MEM_URGENT_CNTL__XDMA_MSTR_MEM_URGENT_TIMER__SHIFT 0x10
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_TAG_MASK 0x3ff
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_TAG__SHIFT 0x0
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_MASK 0x3000
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK__SHIFT 0xc
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_CLR_MASK 0x10000
+#define XDMA_MSTR_PCIE_NACK_STATUS__XDMA_MSTR_PCIE_NACK_CLR__SHIFT 0x10
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_TAG_MASK 0x3ff
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_TAG__SHIFT 0x0
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_MASK 0x3000
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK__SHIFT 0xc
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_CLR_MASK 0x10000
+#define XDMA_MSTR_MEM_NACK_STATUS__XDMA_MSTR_MEM_NACK_CLR__SHIFT 0x10
+#define XDMA_MSTR_VSYNC_GSL_CHECK__XDMA_MSTR_VSYNC_GSL_CHECK_SEL_MASK 0x7
+#define XDMA_MSTR_VSYNC_GSL_CHECK__XDMA_MSTR_VSYNC_GSL_CHECK_SEL__SHIFT 0x0
+#define XDMA_MSTR_VSYNC_GSL_CHECK__XDMA_MSTR_VSYNC_GSL_CHECK_V_COUNT_MASK 0x3fff00
+#define XDMA_MSTR_VSYNC_GSL_CHECK__XDMA_MSTR_VSYNC_GSL_CHECK_V_COUNT__SHIFT 0x8
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_CACHE_LINES_MASK 0xff
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_CACHE_LINES__SHIFT 0x0
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_READ_REQUEST_MASK 0x100
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_READ_REQUEST__SHIFT 0x8
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FRAME_MODE_MASK 0x200
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FRAME_MODE__SHIFT 0x9
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_SOFT_RESET_MASK 0x400
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_SOFT_RESET__SHIFT 0xa
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_CACHE_INVALIDATE_MASK 0x800
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_CACHE_INVALIDATE__SHIFT 0xb
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_REQUEST_CHANNEL_ID_MASK 0x7000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_REQUEST_CHANNEL_ID__SHIFT 0xc
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_FLIP_MODE_MASK 0x8000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_FLIP_MODE__SHIFT 0xf
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_REQUEST_MIN_MASK 0xff0000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_REQUEST_MIN__SHIFT 0x10
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_ACTIVE_MASK 0x1000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_ACTIVE__SHIFT 0x18
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FLUSHING_MASK 0x2000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FLUSHING__SHIFT 0x19
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FLIP_PENDING_MASK 0x4000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_PIPE_FLIP_PENDING__SHIFT 0x1a
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_VSYNC_GSL_ENABLE_MASK 0x8000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_VSYNC_GSL_ENABLE__SHIFT 0x1b
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_SUPERAA_ENABLE_MASK 0x10000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_SUPERAA_ENABLE__SHIFT 0x1c
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_HSYNC_GSL_GROUP_MASK 0x60000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_HSYNC_GSL_GROUP__SHIFT 0x1d
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_GSL_GROUP_MASTER_MASK 0x80000000
+#define XDMA_MSTR_PIPE_CNTL__XDMA_MSTR_GSL_GROUP_MASTER__SHIFT 0x1f
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_SIZE_MASK 0x3fff
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_SIZE__SHIFT 0x0
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_PREFETCH_MASK 0x3fff0000
+#define XDMA_MSTR_READ_COMMAND__XDMA_MSTR_REQUEST_PREFETCH__SHIFT 0x10
+#define XDMA_MSTR_CHANNEL_DIM__XDMA_MSTR_CHANNEL_WIDTH_MASK 0x3fff
+#define XDMA_MSTR_CHANNEL_DIM__XDMA_MSTR_CHANNEL_WIDTH__SHIFT 0x0
+#define XDMA_MSTR_CHANNEL_DIM__XDMA_MSTR_CHANNEL_HEIGHT_MASK 0x3fff0000
+#define XDMA_MSTR_CHANNEL_DIM__XDMA_MSTR_CHANNEL_HEIGHT__SHIFT 0x10
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_ACTIVE_HEIGHT_MASK 0x3fff
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_ACTIVE_HEIGHT__SHIFT 0x0
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_FRAME_HEIGHT_MASK 0x3fff0000
+#define XDMA_MSTR_HEIGHT__XDMA_MSTR_FRAME_HEIGHT__SHIFT 0x10
+#define XDMA_MSTR_REMOTE_SURFACE_BASE__XDMA_MSTR_REMOTE_SURFACE_BASE_MASK 0xffffffff
+#define XDMA_MSTR_REMOTE_SURFACE_BASE__XDMA_MSTR_REMOTE_SURFACE_BASE__SHIFT 0x0
+#define XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH_MASK 0xff
+#define XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__XDMA_MSTR_REMOTE_SURFACE_BASE_HIGH__SHIFT 0x0
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS__XDMA_MSTR_REMOTE_GPU_ADDRESS_MASK 0xffffffff
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS__XDMA_MSTR_REMOTE_GPU_ADDRESS__SHIFT 0x0
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH_MASK 0xff
+#define XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__XDMA_MSTR_REMOTE_GPU_ADDRESS_HIGH__SHIFT 0x0
+#define XDMA_MSTR_CACHE_BASE_ADDR__XDMA_MSTR_CACHE_BASE_ADDR_MASK 0xffffffff
+#define XDMA_MSTR_CACHE_BASE_ADDR__XDMA_MSTR_CACHE_BASE_ADDR__SHIFT 0x0
+#define XDMA_MSTR_CACHE_BASE_ADDR_HIGH__XDMA_MSTR_CACHE_BASE_ADDR_HIGH_MASK 0xff
+#define XDMA_MSTR_CACHE_BASE_ADDR_HIGH__XDMA_MSTR_CACHE_BASE_ADDR_HIGH__SHIFT 0x0
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_PITCH_MASK 0x3fff
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_PITCH__SHIFT 0x0
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_TLB_PG_STATE_MASK 0x60000000
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_TLB_PG_STATE__SHIFT 0x1d
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_TLB_PG_TRANS_MASK 0x80000000
+#define XDMA_MSTR_CACHE__XDMA_MSTR_CACHE_TLB_PG_TRANS__SHIFT 0x1f
+#define XDMA_MSTR_CHANNEL_START__XDMA_MSTR_CHANNEL_START_X_MASK 0x3fff
+#define XDMA_MSTR_CHANNEL_START__XDMA_MSTR_CHANNEL_START_X__SHIFT 0x0
+#define XDMA_MSTR_CHANNEL_START__XDMA_MSTR_CHANNEL_START_Y_MASK 0x3fff0000
+#define XDMA_MSTR_CHANNEL_START__XDMA_MSTR_CHANNEL_START_Y__SHIFT 0x10
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_DATA_MASK 0xffffff
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_DATA__SHIFT 0x0
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_INDEX_MASK 0x7000000
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_INDEX__SHIFT 0x18
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_INDEX_MODE_MASK 0xc0000000
+#define XDMA_MSTR_PERFMEAS_STATUS__XDMA_MSTR_PERFMEAS_INDEX_MODE__SHIFT 0x1e
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_MEAS_ITER_MASK 0xfff
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_MEAS_ITER__SHIFT 0x0
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_SEGID_SEL_MASK 0x1f000
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_SEGID_SEL__SHIFT 0xc
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_COUNTER_RST_MASK 0x20000
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_CACHE_BW_COUNTER_RST__SHIFT 0x11
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_LT_MEAS_ITER_MASK 0x7ff80000
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_LT_MEAS_ITER__SHIFT 0x13
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_LT_COUNTER_RST_MASK 0x80000000
+#define XDMA_MSTR_PERFMEAS_CNTL__XDMA_MSTR_LT_COUNTER_RST__SHIFT 0x1f
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LINES_MASK 0x1
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LINES__SHIFT 0x0
+#define XDMA_SLV_CNTL__XDMA_SLV_MEM_READY_MASK 0x200
+#define XDMA_SLV_CNTL__XDMA_SLV_MEM_READY__SHIFT 0x9
+#define XDMA_SLV_CNTL__XDMA_SLV_ACTIVE_MASK 0x400
+#define XDMA_SLV_CNTL__XDMA_SLV_ACTIVE__SHIFT 0xa
+#define XDMA_SLV_CNTL__XDMA_SLV_ALPHA_POSITION_MASK 0x3000
+#define XDMA_SLV_CNTL__XDMA_SLV_ALPHA_POSITION__SHIFT 0xc
+#define XDMA_SLV_CNTL__XDMA_SLV_ENABLE_MASK 0x10000
+#define XDMA_SLV_CNTL__XDMA_SLV_ENABLE__SHIFT 0x10
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LAT_TEST_EN_MASK 0x80000
+#define XDMA_SLV_CNTL__XDMA_SLV_READ_LAT_TEST_EN__SHIFT 0x13
+#define XDMA_SLV_CNTL__XDMA_SLV_SOFT_RESET_MASK 0x100000
+#define XDMA_SLV_CNTL__XDMA_SLV_SOFT_RESET__SHIFT 0x14
+#define XDMA_SLV_CNTL__XDMA_SLV_REQ_MAXED_OUT_MASK 0x1000000
+#define XDMA_SLV_CNTL__XDMA_SLV_REQ_MAXED_OUT__SHIFT 0x18
+#define XDMA_SLV_CNTL__XDMA_SLV_WB_BURST_RESET_MASK 0x2000000
+#define XDMA_SLV_CNTL__XDMA_SLV_WB_BURST_RESET__SHIFT 0x19
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_SWAP_MASK 0x300
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_SWAP__SHIFT 0x8
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_VMID_MASK 0xf000
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_VMID__SHIFT 0xc
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_PRIV_MASK 0x10000
+#define XDMA_SLV_MEM_CLIENT_CONFIG__XDMA_SLV_MEM_CLIENT_PRIV__SHIFT 0x10
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_PITCH_MASK 0x3fff
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_PITCH__SHIFT 0x0
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_WIDTH_MASK 0x3fff0000
+#define XDMA_SLV_SLS_PITCH__XDMA_SLV_SLS_WIDTH__SHIFT 0x10
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_CLIENT_STALL_MASK 0x1
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_CLIENT_STALL__SHIFT 0x0
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LIMIT_MASK 0xf0
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LIMIT__SHIFT 0x4
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LEVEL_MASK 0xf00
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_LEVEL__SHIFT 0x8
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_STALL_DELAY_MASK 0xf000
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_STALL_DELAY__SHIFT 0xc
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_TIMER_MASK 0xffff0000
+#define XDMA_SLV_READ_URGENT_CNTL__XDMA_SLV_READ_URGENT_TIMER__SHIFT 0x10
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_MASK 0x1
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL__SHIFT 0x0
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_URGENT_LEVEL_MASK 0xf00
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_URGENT_LEVEL__SHIFT 0x8
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_DELAY_MASK 0xf000
+#define XDMA_SLV_WRITE_URGENT_CNTL__XDMA_SLV_WRITE_STALL_DELAY__SHIFT 0xc
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_SIZE_MASK 0x1ff
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_SIZE__SHIFT 0x0
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_PERIOD_MASK 0xffff0000
+#define XDMA_SLV_WB_RATE_CNTL__XDMA_SLV_WB_BURST_PERIOD__SHIFT 0x10
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MIN_MASK 0xffff
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MIN__SHIFT 0x0
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MAX_MASK 0xffff0000
+#define XDMA_SLV_READ_LATENCY_MINMAX__XDMA_SLV_READ_LATENCY_MAX__SHIFT 0x10
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_ACC_MASK 0xfffff
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_ACC__SHIFT 0x0
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_COUNT_MASK 0xfff00000
+#define XDMA_SLV_READ_LATENCY_AVE__XDMA_SLV_READ_LATENCY_COUNT__SHIFT 0x14
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_TAG_MASK 0x3ff
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_TAG__SHIFT 0x0
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_MASK 0x3000
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK__SHIFT 0xc
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_CLR_MASK 0x10000
+#define XDMA_SLV_PCIE_NACK_STATUS__XDMA_SLV_PCIE_NACK_CLR__SHIFT 0x10
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_TAG_MASK 0xffff
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_TAG__SHIFT 0x0
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_MASK 0x30000
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK__SHIFT 0x10
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_CLR_MASK 0x80000000
+#define XDMA_SLV_MEM_NACK_STATUS__XDMA_SLV_MEM_NACK_CLR__SHIFT 0x1f
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_FREE_ENTRIES_MASK 0x3ff
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_FREE_ENTRIES__SHIFT 0x0
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_BUF_SIZE_MASK 0x3ff000
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_BUF_SIZE__SHIFT 0xc
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_PG_STATE_MASK 0xc00000
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_PG_STATE__SHIFT 0x16
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_PG_TRANS_MASK 0x1000000
+#define XDMA_SLV_RDRET_BUF_STATUS__XDMA_SLV_RDRET_PG_TRANS__SHIFT 0x18
+#define XDMA_SLV_READ_LATENCY_TIMER__XDMA_SLV_READ_LATENCY_TIMER_MASK 0xffff
+#define XDMA_SLV_READ_LATENCY_TIMER__XDMA_SLV_READ_LATENCY_TIMER__SHIFT 0x0
+#define XDMA_SLV_FLIP_PENDING__XDMA_SLV_FLIP_PENDING_MASK 0x1
+#define XDMA_SLV_FLIP_PENDING__XDMA_SLV_FLIP_PENDING__SHIFT 0x0
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_WEIGHT_MASK 0x1ff
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_WEIGHT__SHIFT 0x0
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_STOP_TRANSFER_MASK 0x10000
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_STOP_TRANSFER__SHIFT 0x10
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_SOFT_RESET_MASK 0x20000
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_SOFT_RESET__SHIFT 0x11
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_ACTIVE_MASK 0x1000000
+#define XDMA_SLV_CHANNEL_CNTL__XDMA_SLV_CHANNEL_ACTIVE__SHIFT 0x18
+#define XDMA_SLV_REMOTE_GPU_ADDRESS__XDMA_SLV_REMOTE_GPU_ADDRESS_MASK 0xffffffff
+#define XDMA_SLV_REMOTE_GPU_ADDRESS__XDMA_SLV_REMOTE_GPU_ADDRESS__SHIFT 0x0
+#define XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH_MASK 0xff
+#define XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__XDMA_SLV_REMOTE_GPU_ADDRESS_HIGH__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE0__tx_pwr_MASK 0x7
+#define CMD_BUS_TX_CONTROL_LANE0__tx_pwr__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE0__tx_pg_en_MASK 0x18
+#define CMD_BUS_TX_CONTROL_LANE0__tx_pg_en__SHIFT 0x3
+#define CMD_BUS_TX_CONTROL_LANE0__tx_rdy_MASK 0x100
+#define CMD_BUS_TX_CONTROL_LANE0__tx_rdy__SHIFT 0x8
+#define CMD_BUS_TX_CONTROL_LANE1__tx_pwr_MASK 0x7
+#define CMD_BUS_TX_CONTROL_LANE1__tx_pwr__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE1__tx_pg_en_MASK 0x18
+#define CMD_BUS_TX_CONTROL_LANE1__tx_pg_en__SHIFT 0x3
+#define CMD_BUS_TX_CONTROL_LANE1__tx_rdy_MASK 0x100
+#define CMD_BUS_TX_CONTROL_LANE1__tx_rdy__SHIFT 0x8
+#define CMD_BUS_TX_CONTROL_LANE2__tx_pwr_MASK 0x7
+#define CMD_BUS_TX_CONTROL_LANE2__tx_pwr__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE2__tx_pg_en_MASK 0x18
+#define CMD_BUS_TX_CONTROL_LANE2__tx_pg_en__SHIFT 0x3
+#define CMD_BUS_TX_CONTROL_LANE2__tx_rdy_MASK 0x100
+#define CMD_BUS_TX_CONTROL_LANE2__tx_rdy__SHIFT 0x8
+#define CMD_BUS_TX_CONTROL_LANE3__tx_pwr_MASK 0x7
+#define CMD_BUS_TX_CONTROL_LANE3__tx_pwr__SHIFT 0x0
+#define CMD_BUS_TX_CONTROL_LANE3__tx_pg_en_MASK 0x18
+#define CMD_BUS_TX_CONTROL_LANE3__tx_pg_en__SHIFT 0x3
+#define CMD_BUS_TX_CONTROL_LANE3__tx_rdy_MASK 0x100
+#define CMD_BUS_TX_CONTROL_LANE3__tx_rdy__SHIFT 0x8
+#define MARGIN_DEEMPH_LANE0__txmarg_sel_MASK 0x7
+#define MARGIN_DEEMPH_LANE0__txmarg_sel__SHIFT 0x0
+#define MARGIN_DEEMPH_LANE0__deemph_sel_MASK 0x18
+#define MARGIN_DEEMPH_LANE0__deemph_sel__SHIFT 0x3
+#define MARGIN_DEEMPH_LANE0__tx_margin_en_MASK 0x20
+#define MARGIN_DEEMPH_LANE0__tx_margin_en__SHIFT 0x5
+#define MARGIN_DEEMPH_LANE1__txmarg_sel_MASK 0x7
+#define MARGIN_DEEMPH_LANE1__txmarg_sel__SHIFT 0x0
+#define MARGIN_DEEMPH_LANE1__deemph_sel_MASK 0x18
+#define MARGIN_DEEMPH_LANE1__deemph_sel__SHIFT 0x3
+#define MARGIN_DEEMPH_LANE1__tx_margin_en_MASK 0x20
+#define MARGIN_DEEMPH_LANE1__tx_margin_en__SHIFT 0x5
+#define MARGIN_DEEMPH_LANE2__txmarg_sel_MASK 0x7
+#define MARGIN_DEEMPH_LANE2__txmarg_sel__SHIFT 0x0
+#define MARGIN_DEEMPH_LANE2__deemph_sel_MASK 0x18
+#define MARGIN_DEEMPH_LANE2__deemph_sel__SHIFT 0x3
+#define MARGIN_DEEMPH_LANE2__tx_margin_en_MASK 0x20
+#define MARGIN_DEEMPH_LANE2__tx_margin_en__SHIFT 0x5
+#define MARGIN_DEEMPH_LANE3__txmarg_sel_MASK 0x7
+#define MARGIN_DEEMPH_LANE3__txmarg_sel__SHIFT 0x0
+#define MARGIN_DEEMPH_LANE3__deemph_sel_MASK 0x18
+#define MARGIN_DEEMPH_LANE3__deemph_sel__SHIFT 0x3
+#define MARGIN_DEEMPH_LANE3__tx_margin_en_MASK 0x20
+#define MARGIN_DEEMPH_LANE3__tx_margin_en__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__twosym_en_MASK 0x6
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__twosym_en__SHIFT 0x1
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__link_speed_MASK 0x18
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__link_speed__SHIFT 0x3
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__gang_mode_MASK 0xe0
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__gang_mode__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__max_linkrate_MASK 0x300
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__max_linkrate__SHIFT 0x8
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_freq_MASK 0xc00
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_freq__SHIFT 0xa
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_clken_MASK 0x1000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_clken__SHIFT 0xc
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_clkdone_MASK 0x2000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pcs_clkdone__SHIFT 0xd
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pll1_always_on_MASK 0x4000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__pll1_always_on__SHIFT 0xe
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__rdclk_div2_en_MASK 0x8000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__rdclk_div2_en__SHIFT 0xf
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_boost_adj_MASK 0xf0000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_boost_adj__SHIFT 0x10
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_boost_en_MASK 0x100000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_boost_en__SHIFT 0x14
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_binary_ron_code_offset_MASK 0xc00000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE0__tx_binary_ron_code_offset__SHIFT 0x16
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__twosym_en_MASK 0x6
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__twosym_en__SHIFT 0x1
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__link_speed_MASK 0x18
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__link_speed__SHIFT 0x3
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__gang_mode_MASK 0xe0
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__gang_mode__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__max_linkrate_MASK 0x300
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__max_linkrate__SHIFT 0x8
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_freq_MASK 0xc00
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_freq__SHIFT 0xa
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_clken_MASK 0x1000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_clken__SHIFT 0xc
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_clkdone_MASK 0x2000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pcs_clkdone__SHIFT 0xd
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pll1_always_on_MASK 0x4000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__pll1_always_on__SHIFT 0xe
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__rdclk_div2_en_MASK 0x8000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__rdclk_div2_en__SHIFT 0xf
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_boost_adj_MASK 0xf0000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_boost_adj__SHIFT 0x10
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_boost_en_MASK 0x100000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_boost_en__SHIFT 0x14
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_binary_ron_code_offset_MASK 0xc00000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE1__tx_binary_ron_code_offset__SHIFT 0x16
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__twosym_en_MASK 0x6
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__twosym_en__SHIFT 0x1
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__link_speed_MASK 0x18
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__link_speed__SHIFT 0x3
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__gang_mode_MASK 0xe0
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__gang_mode__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__max_linkrate_MASK 0x300
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__max_linkrate__SHIFT 0x8
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_freq_MASK 0xc00
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_freq__SHIFT 0xa
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_clken_MASK 0x1000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_clken__SHIFT 0xc
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_clkdone_MASK 0x2000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pcs_clkdone__SHIFT 0xd
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pll1_always_on_MASK 0x4000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__pll1_always_on__SHIFT 0xe
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__rdclk_div2_en_MASK 0x8000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__rdclk_div2_en__SHIFT 0xf
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_boost_adj_MASK 0xf0000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_boost_adj__SHIFT 0x10
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_boost_en_MASK 0x100000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_boost_en__SHIFT 0x14
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_binary_ron_code_offset_MASK 0xc00000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE2__tx_binary_ron_code_offset__SHIFT 0x16
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__twosym_en_MASK 0x6
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__twosym_en__SHIFT 0x1
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__link_speed_MASK 0x18
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__link_speed__SHIFT 0x3
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__gang_mode_MASK 0xe0
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__gang_mode__SHIFT 0x5
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__max_linkrate_MASK 0x300
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__max_linkrate__SHIFT 0x8
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_freq_MASK 0xc00
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_freq__SHIFT 0xa
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_clken_MASK 0x1000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_clken__SHIFT 0xc
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_clkdone_MASK 0x2000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pcs_clkdone__SHIFT 0xd
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pll1_always_on_MASK 0x4000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__pll1_always_on__SHIFT 0xe
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__rdclk_div2_en_MASK 0x8000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__rdclk_div2_en__SHIFT 0xf
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_boost_adj_MASK 0xf0000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_boost_adj__SHIFT 0x10
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_boost_en_MASK 0x100000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_boost_en__SHIFT 0x14
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_binary_ron_code_offset_MASK 0xc00000
+#define CMD_BUS_GLOBAL_FOR_TX_LANE3__tx_binary_ron_code_offset__SHIFT 0x16
+#define TX_DISP_RFU0_LANE0__rfu_value0_MASK 0xffffffff
+#define TX_DISP_RFU0_LANE0__rfu_value0__SHIFT 0x0
+#define TX_DISP_RFU0_LANE1__rfu_value0_MASK 0xffffffff
+#define TX_DISP_RFU0_LANE1__rfu_value0__SHIFT 0x0
+#define TX_DISP_RFU0_LANE2__rfu_value0_MASK 0xffffffff
+#define TX_DISP_RFU0_LANE2__rfu_value0__SHIFT 0x0
+#define TX_DISP_RFU0_LANE3__rfu_value0_MASK 0xffffffff
+#define TX_DISP_RFU0_LANE3__rfu_value0__SHIFT 0x0
+#define TX_DISP_RFU1_LANE0__rfu_value1_MASK 0xffffffff
+#define TX_DISP_RFU1_LANE0__rfu_value1__SHIFT 0x0
+#define TX_DISP_RFU1_LANE1__rfu_value1_MASK 0xffffffff
+#define TX_DISP_RFU1_LANE1__rfu_value1__SHIFT 0x0
+#define TX_DISP_RFU1_LANE2__rfu_value1_MASK 0xffffffff
+#define TX_DISP_RFU1_LANE2__rfu_value1__SHIFT 0x0
+#define TX_DISP_RFU1_LANE3__rfu_value1_MASK 0xffffffff
+#define TX_DISP_RFU1_LANE3__rfu_value1__SHIFT 0x0
+#define TX_DISP_RFU2_LANE0__rfu_value2_MASK 0xffffffff
+#define TX_DISP_RFU2_LANE0__rfu_value2__SHIFT 0x0
+#define TX_DISP_RFU2_LANE1__rfu_value2_MASK 0xffffffff
+#define TX_DISP_RFU2_LANE1__rfu_value2__SHIFT 0x0
+#define TX_DISP_RFU2_LANE2__rfu_value2_MASK 0xffffffff
+#define TX_DISP_RFU2_LANE2__rfu_value2__SHIFT 0x0
+#define TX_DISP_RFU2_LANE3__rfu_value2_MASK 0xffffffff
+#define TX_DISP_RFU2_LANE3__rfu_value2__SHIFT 0x0
+#define TX_DISP_RFU3_LANE0__rfu_value3_MASK 0xffffffff
+#define TX_DISP_RFU3_LANE0__rfu_value3__SHIFT 0x0
+#define TX_DISP_RFU3_LANE1__rfu_value3_MASK 0xffffffff
+#define TX_DISP_RFU3_LANE1__rfu_value3__SHIFT 0x0
+#define TX_DISP_RFU3_LANE2__rfu_value3_MASK 0xffffffff
+#define TX_DISP_RFU3_LANE2__rfu_value3__SHIFT 0x0
+#define TX_DISP_RFU3_LANE3__rfu_value3_MASK 0xffffffff
+#define TX_DISP_RFU3_LANE3__rfu_value3__SHIFT 0x0
+#define TX_DISP_RFU4_LANE0__rfu_value4_MASK 0xffffffff
+#define TX_DISP_RFU4_LANE0__rfu_value4__SHIFT 0x0
+#define TX_DISP_RFU4_LANE1__rfu_value4_MASK 0xffffffff
+#define TX_DISP_RFU4_LANE1__rfu_value4__SHIFT 0x0
+#define TX_DISP_RFU4_LANE2__rfu_value4_MASK 0xffffffff
+#define TX_DISP_RFU4_LANE2__rfu_value4__SHIFT 0x0
+#define TX_DISP_RFU4_LANE3__rfu_value4_MASK 0xffffffff
+#define TX_DISP_RFU4_LANE3__rfu_value4__SHIFT 0x0
+#define TX_DISP_RFU5_LANE0__rfu_value5_MASK 0xffffffff
+#define TX_DISP_RFU5_LANE0__rfu_value5__SHIFT 0x0
+#define TX_DISP_RFU5_LANE1__rfu_value5_MASK 0xffffffff
+#define TX_DISP_RFU5_LANE1__rfu_value5__SHIFT 0x0
+#define TX_DISP_RFU5_LANE2__rfu_value5_MASK 0xffffffff
+#define TX_DISP_RFU5_LANE2__rfu_value5__SHIFT 0x0
+#define TX_DISP_RFU5_LANE3__rfu_value5_MASK 0xffffffff
+#define TX_DISP_RFU5_LANE3__rfu_value5__SHIFT 0x0
+#define TX_DISP_RFU6_LANE0__rfu_value6_MASK 0xffffffff
+#define TX_DISP_RFU6_LANE0__rfu_value6__SHIFT 0x0
+#define TX_DISP_RFU6_LANE1__rfu_value6_MASK 0xffffffff
+#define TX_DISP_RFU6_LANE1__rfu_value6__SHIFT 0x0
+#define TX_DISP_RFU6_LANE2__rfu_value6_MASK 0xffffffff
+#define TX_DISP_RFU6_LANE2__rfu_value6__SHIFT 0x0
+#define TX_DISP_RFU6_LANE3__rfu_value6_MASK 0xffffffff
+#define TX_DISP_RFU6_LANE3__rfu_value6__SHIFT 0x0
+#define TX_DISP_RFU7_LANE0__rfu_value7_MASK 0xffffffff
+#define TX_DISP_RFU7_LANE0__rfu_value7__SHIFT 0x0
+#define TX_DISP_RFU7_LANE1__rfu_value7_MASK 0xffffffff
+#define TX_DISP_RFU7_LANE1__rfu_value7__SHIFT 0x0
+#define TX_DISP_RFU7_LANE2__rfu_value7_MASK 0xffffffff
+#define TX_DISP_RFU7_LANE2__rfu_value7__SHIFT 0x0
+#define TX_DISP_RFU7_LANE3__rfu_value7_MASK 0xffffffff
+#define TX_DISP_RFU7_LANE3__rfu_value7__SHIFT 0x0
+#define TX_DISP_RFU8_LANE0__rfu_value8_MASK 0xffffffff
+#define TX_DISP_RFU8_LANE0__rfu_value8__SHIFT 0x0
+#define TX_DISP_RFU8_LANE1__rfu_value8_MASK 0xffffffff
+#define TX_DISP_RFU8_LANE1__rfu_value8__SHIFT 0x0
+#define TX_DISP_RFU8_LANE2__rfu_value8_MASK 0xffffffff
+#define TX_DISP_RFU8_LANE2__rfu_value8__SHIFT 0x0
+#define TX_DISP_RFU8_LANE3__rfu_value8_MASK 0xffffffff
+#define TX_DISP_RFU8_LANE3__rfu_value8__SHIFT 0x0
+#define TX_DISP_RFU9_LANE0__rfu_value9_MASK 0xffffffff
+#define TX_DISP_RFU9_LANE0__rfu_value9__SHIFT 0x0
+#define TX_DISP_RFU9_LANE1__rfu_value9_MASK 0xffffffff
+#define TX_DISP_RFU9_LANE1__rfu_value9__SHIFT 0x0
+#define TX_DISP_RFU9_LANE2__rfu_value9_MASK 0xffffffff
+#define TX_DISP_RFU9_LANE2__rfu_value9__SHIFT 0x0
+#define TX_DISP_RFU9_LANE3__rfu_value9_MASK 0xffffffff
+#define TX_DISP_RFU9_LANE3__rfu_value9__SHIFT 0x0
+#define TX_DISP_RFU10_LANE0__rfu_value10_MASK 0xffffffff
+#define TX_DISP_RFU10_LANE0__rfu_value10__SHIFT 0x0
+#define TX_DISP_RFU10_LANE1__rfu_value10_MASK 0xffffffff
+#define TX_DISP_RFU10_LANE1__rfu_value10__SHIFT 0x0
+#define TX_DISP_RFU10_LANE2__rfu_value10_MASK 0xffffffff
+#define TX_DISP_RFU10_LANE2__rfu_value10__SHIFT 0x0
+#define TX_DISP_RFU10_LANE3__rfu_value10_MASK 0xffffffff
+#define TX_DISP_RFU10_LANE3__rfu_value10__SHIFT 0x0
+#define TX_DISP_RFU11_LANE0__rfu_value11_MASK 0xffffffff
+#define TX_DISP_RFU11_LANE0__rfu_value11__SHIFT 0x0
+#define TX_DISP_RFU11_LANE1__rfu_value11_MASK 0xffffffff
+#define TX_DISP_RFU11_LANE1__rfu_value11__SHIFT 0x0
+#define TX_DISP_RFU11_LANE2__rfu_value11_MASK 0xffffffff
+#define TX_DISP_RFU11_LANE2__rfu_value11__SHIFT 0x0
+#define TX_DISP_RFU11_LANE3__rfu_value11_MASK 0xffffffff
+#define TX_DISP_RFU11_LANE3__rfu_value11__SHIFT 0x0
+#define TX_DISP_RFU12_LANE0__rfu_value12_MASK 0xffffffff
+#define TX_DISP_RFU12_LANE0__rfu_value12__SHIFT 0x0
+#define TX_DISP_RFU12_LANE1__rfu_value12_MASK 0xffffffff
+#define TX_DISP_RFU12_LANE1__rfu_value12__SHIFT 0x0
+#define TX_DISP_RFU12_LANE2__rfu_value12_MASK 0xffffffff
+#define TX_DISP_RFU12_LANE2__rfu_value12__SHIFT 0x0
+#define TX_DISP_RFU12_LANE3__rfu_value12_MASK 0xffffffff
+#define TX_DISP_RFU12_LANE3__rfu_value12__SHIFT 0x0
+#define COMMON_MAR_DEEMPH_NOM__tx_margin_nom_MASK 0xff
+#define COMMON_MAR_DEEMPH_NOM__tx_margin_nom__SHIFT 0x0
+#define COMMON_MAR_DEEMPH_NOM__deemph_gen1_nom_MASK 0xff00
+#define COMMON_MAR_DEEMPH_NOM__deemph_gen1_nom__SHIFT 0x8
+#define COMMON_MAR_DEEMPH_NOM__deemph35_gen2_nom_MASK 0xff0000
+#define COMMON_MAR_DEEMPH_NOM__deemph35_gen2_nom__SHIFT 0x10
+#define COMMON_MAR_DEEMPH_NOM__deemph60_gen2_nom_MASK 0xff000000
+#define COMMON_MAR_DEEMPH_NOM__deemph60_gen2_nom__SHIFT 0x18
+#define COMMON_LANE_PWRMGMT__pgdelay_MASK 0xf
+#define COMMON_LANE_PWRMGMT__pgdelay__SHIFT 0x0
+#define COMMON_LANE_PWRMGMT__pgmask_MASK 0x3f0
+#define COMMON_LANE_PWRMGMT__pgmask__SHIFT 0x4
+#define COMMON_LANE_PWRMGMT__vprot_en_MASK 0x800
+#define COMMON_LANE_PWRMGMT__vprot_en__SHIFT 0xb
+#define COMMON_TXCNTRL__rdptr_rst_val_gen3_MASK 0x1f
+#define COMMON_TXCNTRL__rdptr_rst_val_gen3__SHIFT 0x0
+#define COMMON_TXCNTRL__clkgate_dis_MASK 0x20
+#define COMMON_TXCNTRL__clkgate_dis__SHIFT 0x5
+#define COMMON_TXCNTRL__slew_rate_ctl_gen1_MASK 0x1c0
+#define COMMON_TXCNTRL__slew_rate_ctl_gen1__SHIFT 0x6
+#define COMMON_TXCNTRL__slew_rate_ctl_gen2_MASK 0xe00
+#define COMMON_TXCNTRL__slew_rate_ctl_gen2__SHIFT 0x9
+#define COMMON_TXCNTRL__slew_rate_ctl_gen3_MASK 0x7000
+#define COMMON_TXCNTRL__slew_rate_ctl_gen3__SHIFT 0xc
+#define COMMON_TXCNTRL__dual_dvi_mstr_en_MASK 0x8000
+#define COMMON_TXCNTRL__dual_dvi_mstr_en__SHIFT 0xf
+#define COMMON_TXCNTRL__dual_dvi_en_MASK 0x10000
+#define COMMON_TXCNTRL__dual_dvi_en__SHIFT 0x10
+#define COMMON_TMDP__tmdp_spare_MASK 0xffffffff
+#define COMMON_TMDP__tmdp_spare__SHIFT 0x0
+#define COMMON_LANE_RESETS__lane_0_reset_l_MASK 0x1
+#define COMMON_LANE_RESETS__lane_0_reset_l__SHIFT 0x0
+#define COMMON_LANE_RESETS__lane_1_reset_l_MASK 0x2
+#define COMMON_LANE_RESETS__lane_1_reset_l__SHIFT 0x1
+#define COMMON_LANE_RESETS__lane_2_reset_l_MASK 0x4
+#define COMMON_LANE_RESETS__lane_2_reset_l__SHIFT 0x2
+#define COMMON_LANE_RESETS__lane_3_reset_l_MASK 0x8
+#define COMMON_LANE_RESETS__lane_3_reset_l__SHIFT 0x3
+#define COMMON_LANE_RESETS__lane_4_reset_l_MASK 0x10
+#define COMMON_LANE_RESETS__lane_4_reset_l__SHIFT 0x4
+#define COMMON_LANE_RESETS__lane_5_reset_l_MASK 0x20
+#define COMMON_LANE_RESETS__lane_5_reset_l__SHIFT 0x5
+#define COMMON_LANE_RESETS__lane_6_reset_l_MASK 0x40
+#define COMMON_LANE_RESETS__lane_6_reset_l__SHIFT 0x6
+#define COMMON_LANE_RESETS__lane_7_reset_l_MASK 0x80
+#define COMMON_LANE_RESETS__lane_7_reset_l__SHIFT 0x7
+#define COMMON_ZCALCODE_CTRL__zcalcode_override_MASK 0x1
+#define COMMON_ZCALCODE_CTRL__zcalcode_override__SHIFT 0x0
+#define COMMON_ZCALCODE_CTRL__tx_binary_code_override_val_MASK 0x3e
+#define COMMON_ZCALCODE_CTRL__tx_binary_code_override_val__SHIFT 0x1
+#define COMMON_ZCALCODE_CTRL__tx_driver_fifty_ohms_MASK 0x200000
+#define COMMON_ZCALCODE_CTRL__tx_driver_fifty_ohms__SHIFT 0x15
+#define COMMON_DISP_RFU1__rfu_value1_MASK 0xffffffff
+#define COMMON_DISP_RFU1__rfu_value1__SHIFT 0x0
+#define COMMON_DISP_RFU2__rfu_value2_MASK 0xffffffff
+#define COMMON_DISP_RFU2__rfu_value2__SHIFT 0x0
+#define COMMON_DISP_RFU3__rfu_value3_MASK 0xffffffff
+#define COMMON_DISP_RFU3__rfu_value3__SHIFT 0x0
+#define COMMON_DISP_RFU4__rfu_value4_MASK 0xffffffff
+#define COMMON_DISP_RFU4__rfu_value4__SHIFT 0x0
+#define COMMON_DISP_RFU5__rfu_value5_MASK 0xffffffff
+#define COMMON_DISP_RFU5__rfu_value5__SHIFT 0x0
+#define COMMON_DISP_RFU6__rfu_value6_MASK 0xffffffff
+#define COMMON_DISP_RFU6__rfu_value6__SHIFT 0x0
+#define COMMON_DISP_RFU7__rfu_value7_MASK 0xffffffff
+#define COMMON_DISP_RFU7__rfu_value7__SHIFT 0x0
+#define FREQ_CTRL0__fcw0_frac_MASK 0xffff
+#define FREQ_CTRL0__fcw0_frac__SHIFT 0x0
+#define FREQ_CTRL0__fcw0_int_MASK 0x1ff0000
+#define FREQ_CTRL0__fcw0_int__SHIFT 0x10
+#define FREQ_CTRL1__fcw1_frac_MASK 0xffff
+#define FREQ_CTRL1__fcw1_frac__SHIFT 0x0
+#define FREQ_CTRL1__fcw1_int_MASK 0x1ff0000
+#define FREQ_CTRL1__fcw1_int__SHIFT 0x10
+#define FREQ_CTRL2__fcw_denom_MASK 0xffff
+#define FREQ_CTRL2__fcw_denom__SHIFT 0x0
+#define FREQ_CTRL2__fcw_slew_frac_MASK 0xffff0000
+#define FREQ_CTRL2__fcw_slew_frac__SHIFT 0x10
+#define FREQ_CTRL3__refclk_div_MASK 0x3
+#define FREQ_CTRL3__refclk_div__SHIFT 0x0
+#define FREQ_CTRL3__vco_pre_div_MASK 0x18
+#define FREQ_CTRL3__vco_pre_div__SHIFT 0x3
+#define FREQ_CTRL3__fracn_en_MASK 0x40
+#define FREQ_CTRL3__fracn_en__SHIFT 0x6
+#define FREQ_CTRL3__ssc_en_MASK 0x100
+#define FREQ_CTRL3__ssc_en__SHIFT 0x8
+#define FREQ_CTRL3__fcw_sel_MASK 0x400
+#define FREQ_CTRL3__fcw_sel__SHIFT 0xa
+#define FREQ_CTRL3__freq_jump_en_MASK 0x1000
+#define FREQ_CTRL3__freq_jump_en__SHIFT 0xc
+#define FREQ_CTRL3__tdc_resolution_MASK 0xff0000
+#define FREQ_CTRL3__tdc_resolution__SHIFT 0x10
+#define FREQ_CTRL3__dpll_cfg_1_MASK 0xff000000
+#define FREQ_CTRL3__dpll_cfg_1__SHIFT 0x18
+#define BW_CTRL_COARSE__gi_coarse_mant_MASK 0x3
+#define BW_CTRL_COARSE__gi_coarse_mant__SHIFT 0x0
+#define BW_CTRL_COARSE__gi_coarse_exp_MASK 0x3c
+#define BW_CTRL_COARSE__gi_coarse_exp__SHIFT 0x2
+#define BW_CTRL_COARSE__gp_coarse_mant_MASK 0x780
+#define BW_CTRL_COARSE__gp_coarse_mant__SHIFT 0x7
+#define BW_CTRL_COARSE__gp_coarse_exp_MASK 0xf000
+#define BW_CTRL_COARSE__gp_coarse_exp__SHIFT 0xc
+#define BW_CTRL_COARSE__nctl_coarse_res_MASK 0x7e0000
+#define BW_CTRL_COARSE__nctl_coarse_res__SHIFT 0x11
+#define BW_CTRL_COARSE__nctl_coarse_frac_res_MASK 0x3000000
+#define BW_CTRL_COARSE__nctl_coarse_frac_res__SHIFT 0x18
+#define BW_CTRL_FINE__dpll_cfg_3_MASK 0x3ff
+#define BW_CTRL_FINE__dpll_cfg_3__SHIFT 0x0
+#define CAL_CTRL__bypass_freq_lock_MASK 0x1
+#define CAL_CTRL__bypass_freq_lock__SHIFT 0x0
+#define CAL_CTRL__tdc_cal_en_MASK 0x2
+#define CAL_CTRL__tdc_cal_en__SHIFT 0x1
+#define CAL_CTRL__tdc_cal_ctrl_MASK 0x1f8
+#define CAL_CTRL__tdc_cal_ctrl__SHIFT 0x3
+#define CAL_CTRL__meas_win_sel_MASK 0x600
+#define CAL_CTRL__meas_win_sel__SHIFT 0x9
+#define CAL_CTRL__kdco_cal_dis_MASK 0x800
+#define CAL_CTRL__kdco_cal_dis__SHIFT 0xb
+#define CAL_CTRL__kdco_ratio_MASK 0x1fe000
+#define CAL_CTRL__kdco_ratio__SHIFT 0xd
+#define CAL_CTRL__kdco_incr_cal_dis_MASK 0x400000
+#define CAL_CTRL__kdco_incr_cal_dis__SHIFT 0x16
+#define CAL_CTRL__nctl_adj_dis_MASK 0x800000
+#define CAL_CTRL__nctl_adj_dis__SHIFT 0x17
+#define CAL_CTRL__refclk_rate_MASK 0xff000000
+#define CAL_CTRL__refclk_rate__SHIFT 0x18
+#define LOOP_CTRL__fbdiv_mask_en_MASK 0x1
+#define LOOP_CTRL__fbdiv_mask_en__SHIFT 0x0
+#define LOOP_CTRL__fb_slip_dis_MASK 0x4
+#define LOOP_CTRL__fb_slip_dis__SHIFT 0x2
+#define LOOP_CTRL__clk_tdc_sel_MASK 0x30
+#define LOOP_CTRL__clk_tdc_sel__SHIFT 0x4
+#define LOOP_CTRL__clk_nctl_sel_MASK 0x180
+#define LOOP_CTRL__clk_nctl_sel__SHIFT 0x7
+#define LOOP_CTRL__sig_del_patt_sel_MASK 0x400
+#define LOOP_CTRL__sig_del_patt_sel__SHIFT 0xa
+#define LOOP_CTRL__nctl_sig_del_dis_MASK 0x1000
+#define LOOP_CTRL__nctl_sig_del_dis__SHIFT 0xc
+#define LOOP_CTRL__fbclk_track_refclk_MASK 0x4000
+#define LOOP_CTRL__fbclk_track_refclk__SHIFT 0xe
+#define LOOP_CTRL__prbs_en_MASK 0x10000
+#define LOOP_CTRL__prbs_en__SHIFT 0x10
+#define LOOP_CTRL__tdc_clk_gate_en_MASK 0x40000
+#define LOOP_CTRL__tdc_clk_gate_en__SHIFT 0x12
+#define LOOP_CTRL__phase_offset_MASK 0x7f00000
+#define LOOP_CTRL__phase_offset__SHIFT 0x14
+#define VREG_CFG__bleeder_ac_MASK 0x1
+#define VREG_CFG__bleeder_ac__SHIFT 0x0
+#define VREG_CFG__bleeder_en_MASK 0x2
+#define VREG_CFG__bleeder_en__SHIFT 0x1
+#define VREG_CFG__is_1p2_MASK 0x4
+#define VREG_CFG__is_1p2__SHIFT 0x2
+#define VREG_CFG__reg_obs_sel_MASK 0x18
+#define VREG_CFG__reg_obs_sel__SHIFT 0x3
+#define VREG_CFG__reg_on_mode_MASK 0x60
+#define VREG_CFG__reg_on_mode__SHIFT 0x5
+#define VREG_CFG__rlad_tap_sel_MASK 0x780
+#define VREG_CFG__rlad_tap_sel__SHIFT 0x7
+#define VREG_CFG__reg_off_hi_MASK 0x800
+#define VREG_CFG__reg_off_hi__SHIFT 0xb
+#define VREG_CFG__reg_off_lo_MASK 0x1000
+#define VREG_CFG__reg_off_lo__SHIFT 0xc
+#define VREG_CFG__scale_driver_MASK 0x6000
+#define VREG_CFG__scale_driver__SHIFT 0xd
+#define VREG_CFG__sel_bump_MASK 0x8000
+#define VREG_CFG__sel_bump__SHIFT 0xf
+#define VREG_CFG__sel_rladder_x_MASK 0x10000
+#define VREG_CFG__sel_rladder_x__SHIFT 0x10
+#define VREG_CFG__short_rc_filt_x_MASK 0x20000
+#define VREG_CFG__short_rc_filt_x__SHIFT 0x11
+#define VREG_CFG__vref_pwr_on_MASK 0x40000
+#define VREG_CFG__vref_pwr_on__SHIFT 0x12
+#define VREG_CFG__dpll_cfg_2_MASK 0xff00000
+#define VREG_CFG__dpll_cfg_2__SHIFT 0x14
+#define OBSERVE0__lock_det_tdc_steps_MASK 0x1f
+#define OBSERVE0__lock_det_tdc_steps__SHIFT 0x0
+#define OBSERVE0__clear_sticky_lock_MASK 0x40
+#define OBSERVE0__clear_sticky_lock__SHIFT 0x6
+#define OBSERVE0__lock_det_dis_MASK 0x100
+#define OBSERVE0__lock_det_dis__SHIFT 0x8
+#define OBSERVE0__dco_cfg_MASK 0x3fc00
+#define OBSERVE0__dco_cfg__SHIFT 0xa
+#define OBSERVE0__anaobs_sel_MASK 0xe00000
+#define OBSERVE0__anaobs_sel__SHIFT 0x15
+#define OBSERVE1__digobs_sel_MASK 0xf
+#define OBSERVE1__digobs_sel__SHIFT 0x0
+#define OBSERVE1__digobs_trig_sel_MASK 0x1e0
+#define OBSERVE1__digobs_trig_sel__SHIFT 0x5
+#define OBSERVE1__digobs_div_MASK 0xc00
+#define OBSERVE1__digobs_div__SHIFT 0xa
+#define OBSERVE1__digobs_trig_div_MASK 0x6000
+#define OBSERVE1__digobs_trig_div__SHIFT 0xd
+#define OBSERVE1__lock_timer_MASK 0x3fff0000
+#define OBSERVE1__lock_timer__SHIFT 0x10
+#define DFT_OUT__dft_data_MASK 0xffffffff
+#define DFT_OUT__dft_data__SHIFT 0x0
+#define PLL_WRAP_CNTRL1__wrap_cfg_sel_clk_MASK 0x3
+#define PLL_WRAP_CNTRL1__wrap_cfg_sel_clk__SHIFT 0x0
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_freq_programming_ovveride_MASK 0x1
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_freq_programming_ovveride__SHIFT 0x0
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_pwr_state_ovrride_MASK 0x2
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_pwr_state_ovrride__SHIFT 0x1
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_pwr_state_MASK 0xc
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_pwr_state__SHIFT 0x2
+#define PLL_WRAP_CNTRL__wrap_cfg_tx_pdiv_val_MASK 0xe0
+#define PLL_WRAP_CNTRL__wrap_cfg_tx_pdiv_val__SHIFT 0x5
+#define PLL_WRAP_CNTRL__wrap_cfg_tx_pixdiv_val_MASK 0x100
+#define PLL_WRAP_CNTRL__wrap_cfg_tx_pixdiv_val__SHIFT 0x8
+#define PLL_WRAP_CNTRL__wrap_cfg_cml_cmos_sel_MASK 0x400
+#define PLL_WRAP_CNTRL__wrap_cfg_cml_cmos_sel__SHIFT 0xa
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_rdy_MASK 0x2000
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_rdy__SHIFT 0xd
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_update_MASK 0x4000
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_update__SHIFT 0xe
+#define PLL_WRAP_CNTRL__wrap_cfg_ref_values_chg_MASK 0x8000
+#define PLL_WRAP_CNTRL__wrap_cfg_ref_values_chg__SHIFT 0xf
+#define PLL_WRAP_CNTRL__wrap_cfg_clk_gate_w_rdy_MASK 0x10000
+#define PLL_WRAP_CNTRL__wrap_cfg_clk_gate_w_rdy__SHIFT 0x10
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_dsm_sel_MASK 0xe0000
+#define PLL_WRAP_CNTRL__wrap_cfg_pll_dsm_sel__SHIFT 0x11
+#define PPLL_VREG_CFG__pw_pc_bleeder_ac_MASK 0x1
+#define PPLL_VREG_CFG__pw_pc_bleeder_ac__SHIFT 0x0
+#define PPLL_VREG_CFG__pw_pc_bleeder_en_MASK 0x2
+#define PPLL_VREG_CFG__pw_pc_bleeder_en__SHIFT 0x1
+#define PPLL_VREG_CFG__pw_pc_is_1p2_MASK 0x4
+#define PPLL_VREG_CFG__pw_pc_is_1p2__SHIFT 0x2
+#define PPLL_VREG_CFG__pw_pc_reg_obs_sel_MASK 0x18
+#define PPLL_VREG_CFG__pw_pc_reg_obs_sel__SHIFT 0x3
+#define PPLL_VREG_CFG__pw_pc_reg_on_mode_MASK 0x60
+#define PPLL_VREG_CFG__pw_pc_reg_on_mode__SHIFT 0x5
+#define PPLL_VREG_CFG__pw_pc_rlad_tap_sel_MASK 0x780
+#define PPLL_VREG_CFG__pw_pc_rlad_tap_sel__SHIFT 0x7
+#define PPLL_VREG_CFG__pw_pc_reg_off_hi_MASK 0x800
+#define PPLL_VREG_CFG__pw_pc_reg_off_hi__SHIFT 0xb
+#define PPLL_VREG_CFG__pw_pc_reg_off_lo_MASK 0x1000
+#define PPLL_VREG_CFG__pw_pc_reg_off_lo__SHIFT 0xc
+#define PPLL_VREG_CFG__pw_pc_scale_driver_MASK 0x6000
+#define PPLL_VREG_CFG__pw_pc_scale_driver__SHIFT 0xd
+#define PPLL_VREG_CFG__pw_pc_sel_bump_MASK 0x8000
+#define PPLL_VREG_CFG__pw_pc_sel_bump__SHIFT 0xf
+#define PPLL_VREG_CFG__pw_pc_sel_rladder_x_MASK 0x10000
+#define PPLL_VREG_CFG__pw_pc_sel_rladder_x__SHIFT 0x10
+#define PPLL_VREG_CFG__pw_pc_short_rc_filt_x_MASK 0x20000
+#define PPLL_VREG_CFG__pw_pc_short_rc_filt_x__SHIFT 0x11
+#define PPLL_VREG_CFG__pw_pc_vref_pwr_on_MASK 0x40000
+#define PPLL_VREG_CFG__pw_pc_vref_pwr_on__SHIFT 0x12
+#define PPLL_VREG_CFG__pw_pc_dpll_cfg_2_MASK 0xff00000
+#define PPLL_VREG_CFG__pw_pc_dpll_cfg_2__SHIFT 0x14
+#define PPLL_MODE_CNTL__pw_pc_refclk_gate_dis_MASK 0x1
+#define PPLL_MODE_CNTL__pw_pc_refclk_gate_dis__SHIFT 0x0
+#define PPLL_MODE_CNTL__pw_pc_multi_phase_en_MASK 0xf00
+#define PPLL_MODE_CNTL__pw_pc_multi_phase_en__SHIFT 0x8
+#define PPLL_MODE_CNTL__reg_tmg_pwr_state_MASK 0x30000
+#define PPLL_MODE_CNTL__reg_tmg_pwr_state__SHIFT 0x10
+#define PPLL_FREQ_CTRL0__reg_tmg_fcw0_frac_MASK 0xffff
+#define PPLL_FREQ_CTRL0__reg_tmg_fcw0_frac__SHIFT 0x0
+#define PPLL_FREQ_CTRL0__reg_tmg_fcw0_int_MASK 0x1ff0000
+#define PPLL_FREQ_CTRL0__reg_tmg_fcw0_int__SHIFT 0x10
+#define PPLL_FREQ_CTRL1__reg_tmg_fcw1_frac_MASK 0xffff
+#define PPLL_FREQ_CTRL1__reg_tmg_fcw1_frac__SHIFT 0x0
+#define PPLL_FREQ_CTRL1__reg_tmg_fcw1_int_MASK 0x1ff0000
+#define PPLL_FREQ_CTRL1__reg_tmg_fcw1_int__SHIFT 0x10
+#define PPLL_FREQ_CTRL2__reg_tmg_fcw_denom_MASK 0xffff
+#define PPLL_FREQ_CTRL2__reg_tmg_fcw_denom__SHIFT 0x0
+#define PPLL_FREQ_CTRL2__reg_tmg_fcw_slew_frac_MASK 0xffff0000
+#define PPLL_FREQ_CTRL2__reg_tmg_fcw_slew_frac__SHIFT 0x10
+#define PPLL_FREQ_CTRL3__reg_tmg_refclk_div_MASK 0x3
+#define PPLL_FREQ_CTRL3__reg_tmg_refclk_div__SHIFT 0x0
+#define PPLL_FREQ_CTRL3__reg_tmg_vco_pre_div_MASK 0x18
+#define PPLL_FREQ_CTRL3__reg_tmg_vco_pre_div__SHIFT 0x3
+#define PPLL_FREQ_CTRL3__reg_tmg_fracn_en_MASK 0x40
+#define PPLL_FREQ_CTRL3__reg_tmg_fracn_en__SHIFT 0x6
+#define PPLL_FREQ_CTRL3__reg_tmg_ssc_en_MASK 0x100
+#define PPLL_FREQ_CTRL3__reg_tmg_ssc_en__SHIFT 0x8
+#define PPLL_FREQ_CTRL3__reg_tmg_fcw_sel_MASK 0x400
+#define PPLL_FREQ_CTRL3__reg_tmg_fcw_sel__SHIFT 0xa
+#define PPLL_FREQ_CTRL3__reg_tmg_freq_jump_en_MASK 0x1000
+#define PPLL_FREQ_CTRL3__reg_tmg_freq_jump_en__SHIFT 0xc
+#define PPLL_FREQ_CTRL3__reg_tmg_tdc_resol_MASK 0xff0000
+#define PPLL_FREQ_CTRL3__reg_tmg_tdc_resol__SHIFT 0x10
+#define PPLL_FREQ_CTRL3__pw_pc_dpll_cfg_1_MASK 0xff000000
+#define PPLL_FREQ_CTRL3__pw_pc_dpll_cfg_1__SHIFT 0x18
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gi_crse_mant_MASK 0x3
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gi_crse_mant__SHIFT 0x0
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gi_crse_exp_MASK 0x3c
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gi_crse_exp__SHIFT 0x2
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gp_crse_mant_MASK 0x780
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gp_crse_mant__SHIFT 0x7
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gp_crse_exp_MASK 0xf000
+#define PPLL_BW_CTRL_COARSE__reg_tmg_gp_crse_exp__SHIFT 0xc
+#define PPLL_BW_CTRL_COARSE__reg_tmg_nctl_crse_res_MASK 0x7e0000
+#define PPLL_BW_CTRL_COARSE__reg_tmg_nctl_crse_res__SHIFT 0x11
+#define PPLL_BW_CTRL_COARSE__reg_tmg_nctl_crse_frac_res_MASK 0x3000000
+#define PPLL_BW_CTRL_COARSE__reg_tmg_nctl_crse_frac_res__SHIFT 0x18
+#define PPLL_BW_CTRL_FINE__pw_pc_dpll_cfg_3_MASK 0x3ff
+#define PPLL_BW_CTRL_FINE__pw_pc_dpll_cfg_3__SHIFT 0x0
+#define PPLL_CAL_CTRL__pw_pc_bypass_freq_lock_MASK 0x1
+#define PPLL_CAL_CTRL__pw_pc_bypass_freq_lock__SHIFT 0x0
+#define PPLL_CAL_CTRL__pw_pc_tdc_cal_en_MASK 0x2
+#define PPLL_CAL_CTRL__pw_pc_tdc_cal_en__SHIFT 0x1
+#define PPLL_CAL_CTRL__pw_pc_tdc_cal_ctrl_MASK 0x1f8
+#define PPLL_CAL_CTRL__pw_pc_tdc_cal_ctrl__SHIFT 0x3
+#define PPLL_CAL_CTRL__pw_pc_meas_win_sel_MASK 0x600
+#define PPLL_CAL_CTRL__pw_pc_meas_win_sel__SHIFT 0x9
+#define PPLL_CAL_CTRL__pw_pc_kdco_cal_dis_MASK 0x800
+#define PPLL_CAL_CTRL__pw_pc_kdco_cal_dis__SHIFT 0xb
+#define PPLL_CAL_CTRL__pw_pc_kdco_ratio_MASK 0x1fe000
+#define PPLL_CAL_CTRL__pw_pc_kdco_ratio__SHIFT 0xd
+#define PPLL_CAL_CTRL__pw_pc_kdco_incr_cal_dis_MASK 0x400000
+#define PPLL_CAL_CTRL__pw_pc_kdco_incr_cal_dis__SHIFT 0x16
+#define PPLL_CAL_CTRL__pw_pc_nctl_adj_dis_MASK 0x800000
+#define PPLL_CAL_CTRL__pw_pc_nctl_adj_dis__SHIFT 0x17
+#define PPLL_CAL_CTRL__pw_pc_refclk_rate_MASK 0xff000000
+#define PPLL_CAL_CTRL__pw_pc_refclk_rate__SHIFT 0x18
+#define PPLL_LOOP_CTRL__pw_pc_fbdiv_mask_en_MASK 0x1
+#define PPLL_LOOP_CTRL__pw_pc_fbdiv_mask_en__SHIFT 0x0
+#define PPLL_LOOP_CTRL__pw_pc_fb_slip_dis_MASK 0x4
+#define PPLL_LOOP_CTRL__pw_pc_fb_slip_dis__SHIFT 0x2
+#define PPLL_LOOP_CTRL__pw_pc_clk_tdc_sel_MASK 0x30
+#define PPLL_LOOP_CTRL__pw_pc_clk_tdc_sel__SHIFT 0x4
+#define PPLL_LOOP_CTRL__pw_pc_clk_nctl_sel_MASK 0x180
+#define PPLL_LOOP_CTRL__pw_pc_clk_nctl_sel__SHIFT 0x7
+#define PPLL_LOOP_CTRL__pw_pc_sig_del_patt_sel_MASK 0x400
+#define PPLL_LOOP_CTRL__pw_pc_sig_del_patt_sel__SHIFT 0xa
+#define PPLL_LOOP_CTRL__pw_pc_nctl_sig_del_dis_MASK 0x1000
+#define PPLL_LOOP_CTRL__pw_pc_nctl_sig_del_dis__SHIFT 0xc
+#define PPLL_LOOP_CTRL__pw_pc_fbclk_track_refclk_MASK 0x4000
+#define PPLL_LOOP_CTRL__pw_pc_fbclk_track_refclk__SHIFT 0xe
+#define PPLL_LOOP_CTRL__pw_pc_prbs_en_MASK 0x10000
+#define PPLL_LOOP_CTRL__pw_pc_prbs_en__SHIFT 0x10
+#define PPLL_LOOP_CTRL__pw_pc_tdc_clk_gate_en_MASK 0x40000
+#define PPLL_LOOP_CTRL__pw_pc_tdc_clk_gate_en__SHIFT 0x12
+#define PPLL_LOOP_CTRL__pw_pc_phase_offset_MASK 0x7f00000
+#define PPLL_LOOP_CTRL__pw_pc_phase_offset__SHIFT 0x14
+#define PPLL_REFCLK_CNTL__regs_pw_refclk0_recv_en_MASK 0x1
+#define PPLL_REFCLK_CNTL__regs_pw_refclk0_recv_en__SHIFT 0x0
+#define PPLL_REFCLK_CNTL__regs_pw_refclk1_recv_en_MASK 0x2
+#define PPLL_REFCLK_CNTL__regs_pw_refclk1_recv_en__SHIFT 0x1
+#define PPLL_REFCLK_CNTL__regs_pw_refclk2_recv_en_MASK 0x4
+#define PPLL_REFCLK_CNTL__regs_pw_refclk2_recv_en__SHIFT 0x2
+#define PPLL_REFCLK_CNTL__regs_pw_refclk3_recv_en_MASK 0x8
+#define PPLL_REFCLK_CNTL__regs_pw_refclk3_recv_en__SHIFT 0x3
+#define PPLL_REFCLK_CNTL__regs_pw_refclk0_recv_sel_MASK 0x100
+#define PPLL_REFCLK_CNTL__regs_pw_refclk0_recv_sel__SHIFT 0x8
+#define PPLL_REFCLK_CNTL__regs_pw_refclk1_recv_sel_MASK 0x200
+#define PPLL_REFCLK_CNTL__regs_pw_refclk1_recv_sel__SHIFT 0x9
+#define PPLL_REFCLK_CNTL__regs_pw_refclk2_recv_sel_MASK 0x400
+#define PPLL_REFCLK_CNTL__regs_pw_refclk2_recv_sel__SHIFT 0xa
+#define PPLL_REFCLK_CNTL__regs_pw_refclk3_recv_sel_MASK 0x800
+#define PPLL_REFCLK_CNTL__regs_pw_refclk3_recv_sel__SHIFT 0xb
+#define PPLL_REFCLK_CNTL__regs_pw_refdivsrc_MASK 0xc000
+#define PPLL_REFCLK_CNTL__regs_pw_refdivsrc__SHIFT 0xe
+#define PPLL_REFCLK_CNTL__regs_pw_ref2core_sel_MASK 0x10000
+#define PPLL_REFCLK_CNTL__regs_pw_ref2core_sel__SHIFT 0x10
+#define PPLL_CLKOUT_CNTL__regs_pw_pixclk_pre_pdivsel_MASK 0x100
+#define PPLL_CLKOUT_CNTL__regs_pw_pixclk_pre_pdivsel__SHIFT 0x8
+#define PPLL_CLKOUT_CNTL__regs_pw_pixclk_pdivsel_MASK 0x200
+#define PPLL_CLKOUT_CNTL__regs_pw_pixclk_pdivsel__SHIFT 0x9
+#define PPLL_CLKOUT_CNTL__regs_pw_dvoclk_pre_pdivsel_MASK 0x400
+#define PPLL_CLKOUT_CNTL__regs_pw_dvoclk_pre_pdivsel__SHIFT 0xa
+#define PPLL_CLKOUT_CNTL__regs_pw_dvoclk_pdivsel_MASK 0x800
+#define PPLL_CLKOUT_CNTL__regs_pw_dvoclk_pdivsel__SHIFT 0xb
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_en_MASK 0x1000
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_en__SHIFT 0xc
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_pre_pdivsel_MASK 0x2000
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_pre_pdivsel__SHIFT 0xd
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_pdivsel_MASK 0x4000
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_pdivsel__SHIFT 0xe
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_obs_sel_MASK 0x8000
+#define PPLL_CLKOUT_CNTL__regs_pw_idclk_obs_sel__SHIFT 0xf
+#define PPLL_CLKOUT_CNTL__regs_pw_refclk_sel_MASK 0x30000
+#define PPLL_CLKOUT_CNTL__regs_pw_refclk_sel__SHIFT 0x10
+#define PPLL_CLKOUT_CNTL__regs_cc_resetb_MASK 0x100000
+#define PPLL_CLKOUT_CNTL__regs_cc_resetb__SHIFT 0x14
+#define PPLL_DFT_CNTL__regs_pw_obs_en_MASK 0x1
+#define PPLL_DFT_CNTL__regs_pw_obs_en__SHIFT 0x0
+#define PPLL_DFT_CNTL__regs_pw_obs_div_sel_1_MASK 0x6
+#define PPLL_DFT_CNTL__regs_pw_obs_div_sel_1__SHIFT 0x1
+#define PPLL_DFT_CNTL__regs_pw_obs_clk_sel_1_MASK 0xf0
+#define PPLL_DFT_CNTL__regs_pw_obs_clk_sel_1__SHIFT 0x4
+#define PPLL_DFT_CNTL__regs_pw_obs_clk_sel_2_MASK 0xf00
+#define PPLL_DFT_CNTL__regs_pw_obs_clk_sel_2__SHIFT 0x8
+#define PPLL_DFT_CNTL__regs_pw_obs_sel_MASK 0x3000
+#define PPLL_DFT_CNTL__regs_pw_obs_sel__SHIFT 0xc
+#define PPLL_ANALOG_CNTL__regs_pw_spare_MASK 0xff
+#define PPLL_ANALOG_CNTL__regs_pw_spare__SHIFT 0x0
+#define PPLL_POSTDIV__reg_tmg_postdiv_MASK 0xf00
+#define PPLL_POSTDIV__reg_tmg_postdiv__SHIFT 0x8
+#define PPLL_POSTDIV__reg_tmg_pixclk_pdiv2_MASK 0x1000
+#define PPLL_POSTDIV__reg_tmg_pixclk_pdiv2__SHIFT 0xc
+#define PPLL_DEBUG0__pw_pc_phase_jump_trig_MASK 0x2
+#define PPLL_DEBUG0__pw_pc_phase_jump_trig__SHIFT 0x1
+#define PPLL_DEBUG0__pw_pc_fine_tdc_dis_MASK 0x4
+#define PPLL_DEBUG0__pw_pc_fine_tdc_dis__SHIFT 0x2
+#define PPLL_DEBUG0__pw_pc_coarse_tdc_dis_MASK 0x8
+#define PPLL_DEBUG0__pw_pc_coarse_tdc_dis__SHIFT 0x3
+#define PPLL_DEBUG0__pw_pc_alt_nctl_en_MASK 0x10
+#define PPLL_DEBUG0__pw_pc_alt_nctl_en__SHIFT 0x4
+#define PPLL_DEBUG0__pw_pc_alt_nctl_MASK 0x1ffffe0
+#define PPLL_DEBUG0__pw_pc_alt_nctl__SHIFT 0x5
+#define PPLL_DEBUG0__pw_pc_nctl_coarse_step_dis_MASK 0x2000000
+#define PPLL_DEBUG0__pw_pc_nctl_coarse_step_dis__SHIFT 0x19
+#define PPLL_DEBUG0__pw_pc_trig_coarse_step_MASK 0x4000000
+#define PPLL_DEBUG0__pw_pc_trig_coarse_step__SHIFT 0x1a
+#define PPLL_DEBUG0__pw_pc_dft_sel_MASK 0x38000000
+#define PPLL_DEBUG0__pw_pc_dft_sel__SHIFT 0x1b
+#define PPLL_DEBUG0__pw_pc_dft_capture_MASK 0x40000000
+#define PPLL_DEBUG0__pw_pc_dft_capture__SHIFT 0x1e
+#define PPLL_OBSERVE0__pw_pc_lock_det_tdc_steps_MASK 0x1f
+#define PPLL_OBSERVE0__pw_pc_lock_det_tdc_steps__SHIFT 0x0
+#define PPLL_OBSERVE0__pw_pc_clear_sticky_lock_MASK 0x40
+#define PPLL_OBSERVE0__pw_pc_clear_sticky_lock__SHIFT 0x6
+#define PPLL_OBSERVE0__pw_pc_lock_det_dis_MASK 0x100
+#define PPLL_OBSERVE0__pw_pc_lock_det_dis__SHIFT 0x8
+#define PPLL_OBSERVE0__pw_pc_dco_cfg_MASK 0x3fc00
+#define PPLL_OBSERVE0__pw_pc_dco_cfg__SHIFT 0xa
+#define PPLL_OBSERVE0__pw_pc_anaobs_sel_MASK 0xe00000
+#define PPLL_OBSERVE0__pw_pc_anaobs_sel__SHIFT 0x15
+#define PPLL_OBSERVE1__pw_pc_digobs_sel_MASK 0xf
+#define PPLL_OBSERVE1__pw_pc_digobs_sel__SHIFT 0x0
+#define PPLL_OBSERVE1__pw_pc_digobs_trig_sel_MASK 0x1e0
+#define PPLL_OBSERVE1__pw_pc_digobs_trig_sel__SHIFT 0x5
+#define PPLL_OBSERVE1__pw_pc_digobs_div_MASK 0xc00
+#define PPLL_OBSERVE1__pw_pc_digobs_div__SHIFT 0xa
+#define PPLL_OBSERVE1__pw_pc_digobs_trig_div_MASK 0x3000
+#define PPLL_OBSERVE1__pw_pc_digobs_trig_div__SHIFT 0xc
+#define PPLL_OBSERVE1__reg_tmg_lock_timer_MASK 0x3fff0000
+#define PPLL_OBSERVE1__reg_tmg_lock_timer__SHIFT 0x10
+#define PPLL_UPDATE_CNTL__reg_tmg_PLL_UPDATE_LOCK_MASK 0x4
+#define PPLL_UPDATE_CNTL__reg_tmg_PLL_UPDATE_LOCK__SHIFT 0x2
+#define PPLL_UPDATE_CNTL__reg_tmg_PLL_UPDATE_POINT_MASK 0x8
+#define PPLL_UPDATE_CNTL__reg_tmg_PLL_UPDATE_POINT__SHIFT 0x3
+#define PPLL_UPDATE_CNTL__tmg_reg_UPDATE_PENDING_MASK 0x100
+#define PPLL_UPDATE_CNTL__tmg_reg_UPDATE_PENDING__SHIFT 0x8
+#define PPLL_UPDATE_CNTL__pc_pw_pll_rdy_MASK 0x200
+#define PPLL_UPDATE_CNTL__pc_pw_pll_rdy__SHIFT 0x9
+#define PPLL_UPDATE_CNTL__TieLow1_MASK 0x10000
+#define PPLL_UPDATE_CNTL__TieLow1__SHIFT 0x10
+#define PPLL_OBSERVE0_OUT__disppll_core_obsout_MASK 0xffffffff
+#define PPLL_OBSERVE0_OUT__disppll_core_obsout__SHIFT 0x0
+#define PPLL_STATUS_DEBUG1__dbg_pll_rdy_MASK 0x1
+#define PPLL_STATUS_DEBUG1__dbg_pll_rdy__SHIFT 0x0
+#define PPLL_STATUS_DEBUG1__core_disppll_pwr_ok_vddp_MASK 0x2
+#define PPLL_STATUS_DEBUG1__core_disppll_pwr_ok_vddp__SHIFT 0x1
+#define PPLL_STATUS_DEBUG1__core_disppll_rcu_dc_resetb_vddp_MASK 0x4
+#define PPLL_STATUS_DEBUG1__core_disppll_rcu_dc_resetb_vddp__SHIFT 0x2
+#define PPLL_DEBUG_MUX_CNTL__DEBUG_BUS_MUX_SEL_MASK 0x1f
+#define PPLL_DEBUG_MUX_CNTL__DEBUG_BUS_MUX_SEL__SHIFT 0x0
+#define PPLL_DIV_UPDATE_DEBUG__TieLow2_MASK 0x1
+#define PPLL_DIV_UPDATE_DEBUG__TieLow2__SHIFT 0x0
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_FB_DIV_CHANGED_MASK 0x2
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_FB_DIV_CHANGED__SHIFT 0x1
+#define PPLL_DIV_UPDATE_DEBUG__dbg_UPDATE_PENDING_MASK 0x4
+#define PPLL_DIV_UPDATE_DEBUG__dbg_UPDATE_PENDING__SHIFT 0x2
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_CURRENT_STATE_MASK 0x18
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_CURRENT_STATE__SHIFT 0x3
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_ENABLE_MASK 0x20
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_ENABLE__SHIFT 0x5
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_REQ_MASK 0x40
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_REQ__SHIFT 0x6
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_ACK_MASK 0x80
+#define PPLL_DIV_UPDATE_DEBUG__tmg_reg_UPDATE_ACK__SHIFT 0x7
+#define PPLL_STATUS_DEBUG0__obsout_MASK 0xffffffff
+#define PPLL_STATUS_DEBUG0__obsout__SHIFT 0x0
+#define COMP_EN_CTL__comp_en_MASK 0x1
+#define COMP_EN_CTL__comp_en__SHIFT 0x0
+#define COMP_EN_CTL__comp_en_override_MASK 0x4
+#define COMP_EN_CTL__comp_en_override__SHIFT 0x2
+#define COMP_EN_CTL__comp_done_MASK 0x10
+#define COMP_EN_CTL__comp_done__SHIFT 0x4
+#define COMP_EN_CTL__zcal_code_override_MASK 0x40
+#define COMP_EN_CTL__zcal_code_override__SHIFT 0x6
+#define COMP_EN_CTL__zcal_cal_rtt_MASK 0x80
+#define COMP_EN_CTL__zcal_cal_rtt__SHIFT 0x7
+#define COMP_EN_CTL__zcal_base_en_MASK 0x100
+#define COMP_EN_CTL__zcal_base_en__SHIFT 0x8
+#define COMP_EN_CTL__zcal_ht_rtt_sel_MASK 0x200
+#define COMP_EN_CTL__zcal_ht_rtt_sel__SHIFT 0x9
+#define COMP_EN_CTL__zcal_code_MASK 0x7c00
+#define COMP_EN_CTL__zcal_code__SHIFT 0xa
+#define COMP_EN_CTL__zcal_ron_cal_mode_MASK 0x10000
+#define COMP_EN_CTL__zcal_ron_cal_mode__SHIFT 0x10
+#define COMP_EN_CTL__zcal_ana_dbg_sel_MASK 0x60000
+#define COMP_EN_CTL__zcal_ana_dbg_sel__SHIFT 0x11
+#define COMP_EN_CTL__cfg_cml_cmos_sel_MASK 0x80000
+#define COMP_EN_CTL__cfg_cml_cmos_sel__SHIFT 0x13
+#define COMP_EN_CTL__dsm_sel_MASK 0xf00000
+#define COMP_EN_CTL__dsm_sel__SHIFT 0x14
+#define DPCSTX_PHY_CNTL__DPCS_PHY_RESET_MASK 0x1
+#define DPCSTX_PHY_CNTL__DPCS_PHY_RESET__SHIFT 0x0
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x1
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x2
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x4
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x8
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX0_EN_MASK 0x10
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX1_EN_MASK 0x20
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX2_EN_MASK 0x40
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX3_EN_MASK 0x80
+#define DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define DPCSTX_TX_CNTL__DPCS_TX_RESYNC_MASK 0x1
+#define DPCSTX_TX_CNTL__DPCS_TX_RESYNC__SHIFT 0x0
+#define DPCSTX_TX_CNTL__DPCS_TX_STAGGERING_EN_MASK 0x2
+#define DPCSTX_TX_CNTL__DPCS_TX_STAGGERING_EN__SHIFT 0x1
+#define DPCSTX_TX_CNTL__DPCS_TX_HIGH_IMP_IDLE_OVERRIDE_EN_MASK 0x4
+#define DPCSTX_TX_CNTL__DPCS_TX_HIGH_IMP_IDLE_OVERRIDE_EN__SHIFT 0x2
+#define DPCSTX_TX_CNTL__DPCS_TX_HIGH_IMP_IDLE_MASK 0xf0
+#define DPCSTX_TX_CNTL__DPCS_TX_HIGH_IMP_IDLE__SHIFT 0x4
+#define DPCSTX_TX_CNTL__DPCS_TX_STAGGERING_DELAY_MASK 0x700
+#define DPCSTX_TX_CNTL__DPCS_TX_STAGGERING_DELAY__SHIFT 0x8
+#define DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x1000
+#define DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x2000
+#define DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x4000
+#define DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x10000
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x20000
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_WR_START_DELAY_MASK 0xf00000
+#define DPCSTX_TX_CNTL__DPCS_TX_FIFO_WR_START_DELAY__SHIFT 0x14
+#define DPCSTX_TX_CNTL__DPCS_TX_DVI_LINK_MODE_MASK 0x3000000
+#define DPCSTX_TX_CNTL__DPCS_TX_DVI_LINK_MODE__SHIFT 0x18
+#define DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000
+#define DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0xf
+#define DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY_MASK 0xff00
+#define DPCSTX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY__SHIFT 0x8
+#define DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000
+#define DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW_MASK 0x1
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR_MASK 0x2
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK_MASK 0x10
+#define DPCSTX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX0_FIFO_ERROR_MASK 0x1
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX0_FIFO_ERROR__SHIFT 0x0
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX1_FIFO_ERROR_MASK 0x2
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX1_FIFO_ERROR__SHIFT 0x1
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX2_FIFO_ERROR_MASK 0x4
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX2_FIFO_ERROR__SHIFT 0x2
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX3_FIFO_ERROR_MASK 0x8
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX3_FIFO_ERROR__SHIFT 0x3
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX_ERROR_CLR_MASK 0x100
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX_ERROR_CLR__SHIFT 0x8
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX_FIFO_ERROR_MASK_MASK 0x1000
+#define DPCSTX_TX_ERROR_STATUS__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0xc
+#define DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x3ffff
+#define DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xffffffff
+#define DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR_MASK 0x3ffff
+#define DPCSTX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR__SHIFT 0x0
+#define DPCSTX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA_MASK 0xffffffff
+#define DPCSTX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA__SHIFT 0x0
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x1
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x6
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x38
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x3
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CLOCK_SEL_MASK 0x700
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CLOCK_SEL__SHIFT 0x8
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL_MASK 0x3800
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL__SHIFT 0xb
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x4000
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x10000
+#define DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0xe0000
+#define DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x11
+#define DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xff000000
+#define DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA_MASK 0xffffffff
+#define DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA__SHIFT 0x0
+
+#endif /* DCE_11_2_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
index a9b692319..ebaf67bb1 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
@@ -1391,6 +1391,8 @@
#define mmRLC_CGTT_MGCG_OVERRIDE 0xec48
#define mmRLC_CGCG_CGLS_CTRL 0xec49
#define mmRLC_CGCG_RAMP_CTRL 0xec4a
+#define mmRLC_CGCG_CGLS_CTRL_3D 0xec9d
+#define mmRLC_CGCG_RAMP_CTRL_3D 0xec9e
#define mmRLC_DYN_PG_STATUS 0xec4b
#define mmRLC_DYN_PG_REQUEST 0xec4c
#define mmRLC_PG_DELAY 0xec4d
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index b2d4aaf04..6f6fb3474 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -111,5 +111,6 @@
#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
+#define mmUVD_GP_SCRATCH4 0x3d38
#endif /* UVD_6_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index eaf451e26..3493da5c8 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -79,9 +79,23 @@
#define ATOM_PPLL0 2
#define ATOM_PPLL3 3
+#define ATOM_PHY_PLL0 4
+#define ATOM_PHY_PLL1 5
+
#define ATOM_EXT_PLL1 8
+#define ATOM_GCK_DFS 8
#define ATOM_EXT_PLL2 9
+#define ATOM_FCH_CLK 9
#define ATOM_EXT_CLOCK 10
+#define ATOM_DP_DTO 11
+
+#define ATOM_COMBOPHY_PLL0 20
+#define ATOM_COMBOPHY_PLL1 21
+#define ATOM_COMBOPHY_PLL2 22
+#define ATOM_COMBOPHY_PLL3 23
+#define ATOM_COMBOPHY_PLL4 24
+#define ATOM_COMBOPHY_PLL5 25
+
#define ATOM_PPLL_INVALID 0xFF
#define ENCODER_REFCLK_SRC_P1PLL 0
@@ -224,6 +238,31 @@ typedef struct _ATOM_ROM_HEADER
UCHAR ucReserved;
}ATOM_ROM_HEADER;
+
+typedef struct _ATOM_ROM_HEADER_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR uaFirmWareSignature[4]; //Signature to distinguish between Atombios and non-atombios,
+ //atombios should init it as "ATOM", don't change the position
+ USHORT usBiosRuntimeSegmentAddress;
+ USHORT usProtectedModeInfoOffset;
+ USHORT usConfigFilenameOffset;
+ USHORT usCRC_BlockOffset;
+ USHORT usBIOS_BootupMessageOffset;
+ USHORT usInt10Offset;
+ USHORT usPciBusDevInitCode;
+ USHORT usIoBaseAddress;
+ USHORT usSubsystemVendorID;
+ USHORT usSubsystemID;
+ USHORT usPCI_InfoOffset;
+ USHORT usMasterCommandTableOffset;//Offest for SW to get all command table offsets, Don't change the position
+ USHORT usMasterDataTableOffset; //Offest for SW to get all data table offsets, Don't change the position
+ UCHAR ucExtendedFunctionCode;
+ UCHAR ucReserved;
+ ULONG ulPSPDirTableOffset;
+}ATOM_ROM_HEADER_V2_1;
+
+
//==============================Command Table Portion====================================
@@ -272,12 +311,12 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
- USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
+ USHORT GetSMUClockInfo; //Atomic Table, used only by Bios
USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios
USHORT LUT_AutoFill; //Atomic Table, only used by Bios
- USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
+ USHORT SetDCEClock; //Atomic Table, start from DCE11.1, shared by driver and VBIOS, change DISPCLK and DPREFCLK
USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
@@ -292,7 +331,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
- USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
+ USHORT Gfx_Init; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT MemoryTraining; //Atomic Table, used only by Bios
@@ -333,6 +372,10 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
#define LCD1OutputControl HW_Misc_Operation
#define TV1OutputControl Gfx_Harvesting
#define TVEncoderControl SMC_Init
+#define EnableHW_IconCursor SetDCEClock
+#define SetCRTC_Replication GetSMUClockInfo
+
+#define MemoryRefreshConversion Gfx_Init
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
@@ -425,6 +468,9 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
#define b3DRAM_SELF_REFRESH_EXIT 0x20 //Applicable to DRAM self refresh exit only. when set, it means it will go to program DRAM self refresh exit path
+#define b3SRIOV_INIT_BOOT 0x40 //Use by HV GPU driver only, to load uCode. for ASIC_InitTable SCLK parameter only
+#define b3SRIOV_LOAD_UCODE 0x40 //Use by HV GPU driver only, to load uCode. for ASIC_InitTable SCLK parameter only
+#define b3SRIOV_SKIP_ASIC_INIT 0x02 //Use by HV GPU driver only, skip ASIC_Init for primary adapter boot. for ASIC_InitTable SCLK parameter only
typedef struct _ATOM_COMPUTE_CLOCK_FREQ
{
@@ -518,6 +564,33 @@ typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6
//ucPllCntlFlag
#define SPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
+typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ULONG ulReserved[5];
+}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7;
+
+//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
+#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK 0x0f
+#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK 0x00
+#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK 0x01
+
+typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7
+{
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; //Output Parameter: ucPostDiv=DFS divider
+ USHORT usSclk_fcw_frac; //fractional divider of fcw = usSclk_fcw_frac/65536
+ USHORT usSclk_fcw_int; //integer divider of fcwc
+ UCHAR ucSclkPostDiv; //PLL post divider = 2^ucSclkPostDiv
+ UCHAR ucSclkVcoMode; //0: 4G~8Ghz, 1:3G~6Ghz,3: 2G~4Ghz, 2:Reserved
+ UCHAR ucSclkPllRange; //GreenTable SCLK PLL range entry index ( 0~7 )
+ UCHAR ucSscEnable;
+ USHORT usSsc_fcw1_frac; //fcw1_frac when SSC enable
+ USHORT usSsc_fcw1_int; //fcw1_int when SSC enable
+ USHORT usReserved;
+ USHORT usPcc_fcw_int;
+ USHORT usSsc_fcw_slew_frac; //fcw_slew_frac when SSC enable
+ USHORT usPcc_fcw_slew_frac;
+}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7;
// ucInputFlag
#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
@@ -557,12 +630,16 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2
ULONG ulReserved;
}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
+//Input parameter of DynamicMemorySettingsTable
+//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
ULONG ulReserved[2];
}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
+//Input parameter of DynamicMemorySettingsTable
+//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag == COMPUTE_ENGINE_PLL_PARAM
typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -570,6 +647,29 @@ typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
ULONG ulReserved;
}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+//Input parameter of DynamicMemorySettingsTable ver2.1 and above
+//when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag == ADJUST_MC_SETTING_PARAM
+typedef struct _DYNAMICE_MC_DPM_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ UCHAR ucMclkDPMState;
+ UCHAR ucReserved[3];
+ ULONG ulReserved;
+}DYNAMICE_MC_DPM_SETTINGS_PARAMETER;
+
+//ucMclkDPMState
+#define DYNAMIC_MC_DPM_SETTING_LOW_DPM_STATE 0
+#define DYNAMIC_MC_DPM_SETTING_MEDIUM_DPM_STATE 1
+#define DYNAMIC_MC_DPM_SETTING_HIGH_DPM_STATE 2
+
+typedef union _DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1
+{
+ DYNAMICE_MEMORY_SETTINGS_PARAMETER asMCReg;
+ DYNAMICE_ENGINE_SETTINGS_PARAMETER asMCArbReg;
+ DYNAMICE_MC_DPM_SETTINGS_PARAMETER asDPMMCReg;
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1;
+
+
/****************************************************************************/
// Structures used by SetEngineClockTable
/****************************************************************************/
@@ -584,6 +684,13 @@ typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
}SET_ENGINE_CLOCK_PS_ALLOCATION;
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION_V1_2
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+ COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_7 sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION_V1_2;
+
+
/****************************************************************************/
// Structures used by SetMemoryClockTable
/****************************************************************************/
@@ -827,6 +934,12 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
#define ATOM_ENCODER_CMD_SETUP 0x0f
#define ATOM_ENCODER_CMD_SETUP_PANEL_MODE 0x10
+// New Command for DIGxEncoderControlTable v1.5
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN4 0x14
+#define ATOM_ENCODER_CMD_STREAM_SETUP 0x0F //change name ATOM_ENCODER_CMD_SETUP
+#define ATOM_ENCODER_CMD_LINK_SETUP 0x11 //internal use, called by other Command Table
+#define ATOM_ENCODER_CMD_ENCODER_BLANK 0x12 //internal use, called by other Command Table
+
// ucStatus
#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
@@ -955,6 +1068,69 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
#define DP_PANEL_MODE_INTERNAL_DP2_MODE 0x01
#define DP_PANEL_MODE_INTERNAL_DP1_MODE 0x11
+
+typedef struct _ENCODER_STREAM_SETUP_PARAMETERS_V5
+{
+ UCHAR ucDigId; // 0~6 map to DIG0~DIG6
+ UCHAR ucAction; // = ATOM_ENOCODER_CMD_STREAM_SETUP
+ UCHAR ucDigMode; // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
+ UCHAR ucLaneNum; // Lane number
+ ULONG ulPixelClock; // Pixel Clock in 10Khz
+ UCHAR ucBitPerColor;
+ UCHAR ucLinkRateIn270Mhz;//= DP link rate/270Mhz, =6: 1.62G = 10: 2.7G, =20: 5.4Ghz, =30: 8.1Ghz etc
+ UCHAR ucReserved[2];
+}ENCODER_STREAM_SETUP_PARAMETERS_V5;
+
+typedef struct _ENCODER_LINK_SETUP_PARAMETERS_V5
+{
+ UCHAR ucDigId; // 0~6 map to DIG0~DIG6
+ UCHAR ucAction; // = ATOM_ENOCODER_CMD_LINK_SETUP
+ UCHAR ucDigMode; // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
+ UCHAR ucLaneNum; // Lane number
+ ULONG ulSymClock; // Symbol Clock in 10Khz
+ UCHAR ucHPDSel;
+ UCHAR ucDigEncoderSel; // DIG stream( front-end ) selection, bit0 means DIG0 FE is enable,
+ UCHAR ucReserved[2];
+}ENCODER_LINK_SETUP_PARAMETERS_V5;
+
+typedef struct _DP_PANEL_MODE_SETUP_PARAMETERS_V5
+{
+ UCHAR ucDigId; // 0~6 map to DIG0~DIG6
+ UCHAR ucAction; // = ATOM_ENCODER_CMD_DPLINK_SETUP
+ UCHAR ucPanelMode; // =0: external DP
+ // =0x1: internal DP2
+ // =0x11: internal DP1 NutMeg/Travis DP Translator
+ UCHAR ucReserved;
+ ULONG ulReserved[2];
+}DP_PANEL_MODE_SETUP_PARAMETERS_V5;
+
+typedef struct _ENCODER_GENERIC_CMD_PARAMETERS_V5
+{
+ UCHAR ucDigId; // 0~6 map to DIG0~DIG6
+ UCHAR ucAction; // = rest of generic encoder command which does not carry any parameters
+ UCHAR ucReserved[2];
+ ULONG ulReserved[2];
+}ENCODER_GENERIC_CMD_PARAMETERS_V5;
+
+//ucDigId
+#define ATOM_ENCODER_CONFIG_V5_DIG0_ENCODER 0x00
+#define ATOM_ENCODER_CONFIG_V5_DIG1_ENCODER 0x01
+#define ATOM_ENCODER_CONFIG_V5_DIG2_ENCODER 0x02
+#define ATOM_ENCODER_CONFIG_V5_DIG3_ENCODER 0x03
+#define ATOM_ENCODER_CONFIG_V5_DIG4_ENCODER 0x04
+#define ATOM_ENCODER_CONFIG_V5_DIG5_ENCODER 0x05
+#define ATOM_ENCODER_CONFIG_V5_DIG6_ENCODER 0x06
+
+
+typedef union _DIG_ENCODER_CONTROL_PARAMETERS_V5
+{
+ ENCODER_GENERIC_CMD_PARAMETERS_V5 asCmdParam;
+ ENCODER_STREAM_SETUP_PARAMETERS_V5 asStreamParam;
+ ENCODER_LINK_SETUP_PARAMETERS_V5 asLinkParam;
+ DP_PANEL_MODE_SETUP_PARAMETERS_V5 asDPPanelModeParam;
+}DIG_ENCODER_CONTROL_PARAMETERS_V5;
+
+
/****************************************************************************/
// Structures used by UNIPHYTransmitterControlTable
// LVTMATransmitterControlTable
@@ -1371,6 +1547,49 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6
+{
+ UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx
+ union
+ {
+ UCHAR ucDigMode; // ATOM_ENCODER_MODE_DP/ATOM_ENCODER_MODE_DVI/ATOM_ENCODER_MODE_HDMI
+ UCHAR ucDPLaneSet; // DP voltage swing and pre-emphasis value defined in DPCD DP_LANE_SET, "DP_LANE_SET__xDB_y_zV"
+ };
+ UCHAR ucLaneNum; // Lane number
+ ULONG ulSymClock; // Symbol Clock in 10Khz
+ UCHAR ucHPDSel; // =1: HPD1, =2: HPD2, .... =6: HPD6, =0: HPD is not assigned
+ UCHAR ucDigEncoderSel; // DIG stream( front-end ) selection, bit0 means DIG0 FE is enable,
+ UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h
+ UCHAR ucReserved;
+ ULONG ulReserved;
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6;
+
+
+// ucDigEncoderSel
+#define ATOM_TRANMSITTER_V6__DIGA_SEL 0x01
+#define ATOM_TRANMSITTER_V6__DIGB_SEL 0x02
+#define ATOM_TRANMSITTER_V6__DIGC_SEL 0x04
+#define ATOM_TRANMSITTER_V6__DIGD_SEL 0x08
+#define ATOM_TRANMSITTER_V6__DIGE_SEL 0x10
+#define ATOM_TRANMSITTER_V6__DIGF_SEL 0x20
+#define ATOM_TRANMSITTER_V6__DIGG_SEL 0x40
+
+// ucDigMode
+#define ATOM_TRANSMITTER_DIGMODE_V6_DP 0
+#define ATOM_TRANSMITTER_DIGMODE_V6_DVI 2
+#define ATOM_TRANSMITTER_DIGMODE_V6_HDMI 3
+#define ATOM_TRANSMITTER_DIGMODE_V6_DP_MST 5
+
+//ucHPDSel
+#define ATOM_TRANSMITTER_V6_NO_HPD_SEL 0x00
+#define ATOM_TRANSMITTER_V6_HPD1_SEL 0x01
+#define ATOM_TRANSMITTER_V6_HPD2_SEL 0x02
+#define ATOM_TRANSMITTER_V6_HPD3_SEL 0x03
+#define ATOM_TRANSMITTER_V6_HPD4_SEL 0x04
+#define ATOM_TRANSMITTER_V6_HPD5_SEL 0x05
+#define ATOM_TRANSMITTER_V6_HPD6_SEL 0x06
+
/****************************************************************************/
// Structures used by ExternalEncoderControlTable V1.3
@@ -1784,6 +2003,101 @@ typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+typedef struct _PIXEL_CLOCK_PARAMETERS_V7
+{
+ ULONG ulPixelClock; // target the pixel clock to drive the CRTC timing in unit of 100Hz.
+
+ UCHAR ucPpll; // ATOM_PHY_PLL0/ATOM_PHY_PLL1/ATOM_PPLL0
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PLL for pixclk
+ // bit[1]= Force program PHY PLL only ( internally used by VBIOS only in DP case which PHYPLL is programmed for SYMCLK, not Pixclk )
+ // bit[5:4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: pcie
+ // =2: GENLK
+ UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
+ UCHAR ucDeepColorRatio; // HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:36bpp
+ UCHAR ucReserved[2];
+ ULONG ulReserved;
+}PIXEL_CLOCK_PARAMETERS_V7;
+
+//ucMiscInfo
+#define PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V7_MISC_PROG_PHYPLL 0x02
+#define PIXEL_CLOCK_V7_MISC_YUV420_MODE 0x04
+#define PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN 0x08
+#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC 0x30
+#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN 0x00
+#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_PCIE 0x10
+#define PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK 0x20
+
+//ucDeepColorRatio
+#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS 0x00 //00 - DCCG_DEEP_COLOR_DTO_DISABLE: Disable Deep Color DTO
+#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4 0x01 //01 - DCCG_DEEP_COLOR_DTO_5_4_RATIO: Set Deep Color DTO to 5:4
+#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2 0x02 //02 - DCCG_DEEP_COLOR_DTO_3_2_RATIO: Set Deep Color DTO to 3:2
+#define PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1 0x03 //03 - DCCG_DEEP_COLOR_DTO_2_1_RATIO: Set Deep Color DTO to 2:1
+
+// SetDCEClockTable input parameter for DCE11.1
+typedef struct _SET_DCE_CLOCK_PARAMETERS_V1_1
+{
+ ULONG ulDISPClkFreq; // target DISPCLK frquency in unit of 10kHz, return real DISPCLK frequency. when ucFlag[1]=1, in unit of 100Hz.
+ UCHAR ucFlag; // bit0=1: DPREFCLK bypass DFS bit0=0: DPREFCLK not bypass DFS
+ UCHAR ucCrtc; // use when enable DCCG pixel clock ucFlag[1]=1
+ UCHAR ucPpllId; // use when enable DCCG pixel clock ucFlag[1]=1
+ UCHAR ucDeepColorRatio; // use when enable DCCG pixel clock ucFlag[1]=1
+}SET_DCE_CLOCK_PARAMETERS_V1_1;
+
+
+typedef struct _SET_DCE_CLOCK_PS_ALLOCATION_V1_1
+{
+ SET_DCE_CLOCK_PARAMETERS_V1_1 asParam;
+ ULONG ulReserved[2];
+}SET_DCE_CLOCK_PS_ALLOCATION_V1_1;
+
+//SET_DCE_CLOCK_PARAMETERS_V1_1.ucFlag
+#define SET_DCE_CLOCK_FLAG_GEN_DPREFCLK 0x01
+#define SET_DCE_CLOCK_FLAG_DPREFCLK_BYPASS 0x01
+#define SET_DCE_CLOCK_FLAG_ENABLE_PIXCLK 0x02
+
+// SetDCEClockTable input parameter for DCE11.2( POLARIS10 and POLARIS11 ) and above
+typedef struct _SET_DCE_CLOCK_PARAMETERS_V2_1
+{
+ ULONG ulDCEClkFreq; // target DCE frequency in unit of 10KHZ, return real DISPCLK/DPREFCLK frequency.
+ UCHAR ucDCEClkType; // =0: DISPCLK =1: DPREFCLK =2: PIXCLK
+ UCHAR ucDCEClkSrc; // ATOM_PLL0 or ATOM_GCK_DFS or ATOM_FCH_CLK or ATOM_COMBOPHY_PLLx
+ UCHAR ucDCEClkFlag; // Bit [1:0] = PPLL ref clock source ( when ucDCEClkSrc= ATOM_PPLL0 )
+ UCHAR ucCRTC; // ucDisp Pipe Id, ATOM_CRTC0/1/2/..., use only when ucDCEClkType = PIXCLK
+}SET_DCE_CLOCK_PARAMETERS_V2_1;
+
+//ucDCEClkType
+#define DCE_CLOCK_TYPE_DISPCLK 0
+#define DCE_CLOCK_TYPE_DPREFCLK 1
+#define DCE_CLOCK_TYPE_PIXELCLK 2 // used by VBIOS internally, called by SetPixelClockTable
+
+//ucDCEClkFlag when ucDCEClkType == DPREFCLK
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_MASK 0x03
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA 0x00
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK 0x01
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE 0x02
+#define DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN 0x03
+
+//ucDCEClkFlag when ucDCEClkType == PIXCLK
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_MASK 0x03
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_DIS 0x00 //00 - DCCG_DEEP_COLOR_DTO_DISABLE: Disable Deep Color DTO
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_5_4 0x01 //01 - DCCG_DEEP_COLOR_DTO_5_4_RATIO: Set Deep Color DTO to 5:4
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_3_2 0x02 //02 - DCCG_DEEP_COLOR_DTO_3_2_RATIO: Set Deep Color DTO to 3:2
+#define DCE_CLOCK_FLAG_PCLK_DEEPCOLOR_RATIO_2_1 0x03 //03 - DCCG_DEEP_COLOR_DTO_2_1_RATIO: Set Deep Color DTO to 2:1
+#define DCE_CLOCK_FLAG_PIXCLK_YUV420_MODE 0x04
+
+typedef struct _SET_DCE_CLOCK_PS_ALLOCATION_V2_1
+{
+ SET_DCE_CLOCK_PARAMETERS_V2_1 asParam;
+ ULONG ulReserved[2];
+}SET_DCE_CLOCK_PS_ALLOCATION_V2_1;
+
+
/****************************************************************************/
// Structures used by AdjustDisplayPllTable
@@ -2300,6 +2614,11 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
#define VOLTAGE_TYPE_VDDCI 4
#define VOLTAGE_TYPE_VDDGFX 5
#define VOLTAGE_TYPE_PCC 6
+#define VOLTAGE_TYPE_MVPP 7
+#define VOLTAGE_TYPE_LEDDPM 8
+#define VOLTAGE_TYPE_PCC_MVDD 9
+#define VOLTAGE_TYPE_PCIE_VDDC 10
+#define VOLTAGE_TYPE_PCIE_VDDR 11
#define VOLTAGE_TYPE_GENERIC_I2C_1 0x11
#define VOLTAGE_TYPE_GENERIC_I2C_2 0x12
@@ -2396,6 +2715,39 @@ typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
USHORT usTDP_Power; // TDP_Current in unit of 0.1W
}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
+
+// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
+typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3
+{
+ UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
+ UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
+ USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
+ ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
+ ULONG ulReserved[3];
+}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3;
+
+// New Added from CI Hawaii for EVV feature
+typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3
+{
+ ULONG ulVoltageLevel; // real voltage level in unit of 0.01mv
+ ULONG ulReserved[4];
+}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3;
+
+
+/****************************************************************************/
+// Structures used by GetSMUClockInfo
+/****************************************************************************/
+typedef struct _GET_SMU_CLOCK_INFO_INPUT_PARAMETER_V2_1
+{
+ ULONG ulDfsPllOutputFreq:24;
+ ULONG ucDfsDivider:8;
+}GET_SMU_CLOCK_INFO_INPUT_PARAMETER_V2_1;
+
+typedef struct _GET_SMU_CLOCK_INFO_OUTPUT_PARAMETER_V2_1
+{
+ ULONG ulDfsOutputFreq;
+}GET_SMU_CLOCK_INFO_OUTPUT_PARAMETER_V2_1;
+
/****************************************************************************/
// Structures used by TVEncoderControlTable
/****************************************************************************/
@@ -2429,13 +2781,13 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT PaletteData; // Only used by BIOS
USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1
- USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
+ USHORT SMU_Info; // Shared by various SW components,latest version 1.1
USHORT SupportedDevicesInfo; // Will be obsolete from R600
USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
USHORT VESA_ToInternalModeLUT; // Only used by Bios
- USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
+ USHORT GFX_Info; // Shared by various SW components,latest version 2.1 will be used from R600
USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
USHORT GPUVirtualizationInfo; // Will be obsolete from R600
USHORT SaveRestoreInfo; // Only used by Bios
@@ -2455,7 +2807,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1
USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
- USHORT ServiceInfo;
+ USHORT ServiceInfo;
}ATOM_MASTER_LIST_OF_DATA_TABLES;
typedef struct _ATOM_MASTER_DATA_TABLE
@@ -2469,6 +2821,8 @@ typedef struct _ATOM_MASTER_DATA_TABLE
#define DAC_Info PaletteData
#define TMDS_Info DIGTransmitterInfo
#define CompassionateData GPUVirtualizationInfo
+#define AnalogTV_Info SMU_Info
+#define ComponentVideoInfo GFX_Info
/****************************************************************************/
// Structure used in MultimediaCapabilityInfoTable
@@ -4278,10 +4632,15 @@ typedef struct _EXT_DISPLAY_PATH
#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
//usCaps
-#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
-#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x02
-#define EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 0x04
-#define EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT 0x08
+#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x0001
+#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x0002
+#define EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK 0x007C
+#define EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 (0x01 << 2 ) //PI redriver chip
+#define EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT (0x02 << 2 ) //TI retimer chip
+#define EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 (0x03 << 2 ) //Parade DP->HDMI recoverter chip
+
+
+
typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
{
@@ -4325,10 +4684,10 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
#define ATOM_ENCODER_CAP_RECORD_TYPE 20
#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21
-
+#define ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD_TYPE 22
//Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD_TYPE
typedef struct _ATOM_I2C_RECORD
{
@@ -4458,10 +4817,12 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD
UCHAR ucPadding[2];
}ATOM_ENCODER_DVO_CF_RECORD;
-// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
-#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder
+// Bit maps for ATOM_ENCODER_CAP_RECORD.usEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder, it is retired in NI. the real meaning from SI is MST_EN
+#define ATOM_ENCODER_CAP_RECORD_MST_EN 0x01 // from SI, this bit means DP MST is enable or not.
#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
#define ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN 0x04 // HDMI2.0 6Gbps enable or not.
+#define ATOM_ENCODER_CAP_RECORD_HBR3_EN 0x08 // DP1.3 HBR3 is supported by board.
typedef struct _ATOM_ENCODER_CAP_RECORD
{
@@ -4482,6 +4843,31 @@ typedef struct _ATOM_ENCODER_CAP_RECORD
};
}ATOM_ENCODER_CAP_RECORD;
+// Used after SI
+typedef struct _ATOM_ENCODER_CAP_RECORD_V2
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ union {
+ USHORT usEncoderCap;
+ struct {
+#if ATOM_BIG_ENDIAN
+ USHORT usReserved:12; // Bit4-15 may be defined for other capability in future
+ USHORT usHBR3En:1; // bit3 is for DP1.3 HBR3 enable
+ USHORT usHDMI6GEn:1; // Bit2 is for HDMI6Gbps enable, this bit is used starting from CZ( APU) Ellemere (dGPU)
+ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
+ USHORT usMSTEn:1; // Bit0 is for DP1.2 MST enable
+#else
+ USHORT usMSTEn:1; // Bit0 is for DP1.2 MST enable
+ USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
+ USHORT usHDMI6GEn:1; // Bit2 is for HDMI6Gbps enable, this bit is used starting from CZ( APU) Ellemere (dGPU)
+ USHORT usHBR3En:1; // bit3 is for DP1.3 HBR3 enable
+ USHORT usReserved:12; // Bit4-15 may be defined for other capability in future
+#endif
+ };
+ };
+}ATOM_ENCODER_CAP_RECORD_V2;
+
+
// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
@@ -4554,6 +4940,16 @@ typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
USHORT usReserved;
}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+typedef struct _ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ // override TMDS capability on this connector when it operate in TMDS mode. usMaxTmdsClkRate = max TMDS Clock in Mhz/2.5
+ UCHAR ucMaxTmdsClkRateIn2_5Mhz;
+ UCHAR ucReserved;
+} ATOM_CONNECTOR_FORCED_TMDS_CAP_RECORD;
+
+
typedef struct _ATOM_CONNECTOR_LAYOUT_INFO
{
USHORT usConnectorObjectId;
@@ -4657,12 +5053,12 @@ typedef struct _ATOM_VOLTAGE_CONTROL
#define VOLTAGE_CONTROL_ID_UP1801 0x0C
#define VOLTAGE_CONTROL_ID_ST6788A 0x0D
#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2 0x0E
-#define VOLTAGE_CONTROL_ID_AD527x 0x0F
-#define VOLTAGE_CONTROL_ID_NCP81022 0x10
-#define VOLTAGE_CONTROL_ID_LTC2635 0x11
-#define VOLTAGE_CONTROL_ID_NCP4208 0x12
+#define VOLTAGE_CONTROL_ID_AD527x 0x0F
+#define VOLTAGE_CONTROL_ID_NCP81022 0x10
+#define VOLTAGE_CONTROL_ID_LTC2635 0x11
+#define VOLTAGE_CONTROL_ID_NCP4208 0x12
#define VOLTAGE_CONTROL_ID_IR35xx 0x13
-#define VOLTAGE_CONTROL_ID_RT9403 0x14
+#define VOLTAGE_CONTROL_ID_RT9403 0x14
#define VOLTAGE_CONTROL_ID_GENERIC_I2C 0x40
@@ -4784,11 +5180,38 @@ typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
ULONG ulReserved;
}ATOM_SVID2_VOLTAGE_OBJECT_V3;
+
+
+typedef struct _ATOM_MERGED_VOLTAGE_OBJECT_V3
+{
+ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_MERGED_POWER
+ UCHAR ucMergedVType; // VDDC/VDCCI/....
+ UCHAR ucReserved[3];
+}ATOM_MERGED_VOLTAGE_OBJECT_V3;
+
+
+typedef struct _ATOM_EVV_DPM_INFO
+{
+ ULONG ulDPMSclk; // DPM state SCLK
+ USHORT usVAdjOffset; // Adjust Voltage offset in unit of mv
+ UCHAR ucDPMTblVIndex; // Voltage Index in SMC_DPM_Table structure VddcTable/VddGfxTable
+ UCHAR ucDPMState; // DPMState0~7
+} ATOM_EVV_DPM_INFO;
+
+// ucVoltageMode = VOLTAGE_OBJ_EVV
+typedef struct _ATOM_EVV_VOLTAGE_OBJECT_V3
+{
+ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2
+ ATOM_EVV_DPM_INFO asEvvDpmList[8];
+}ATOM_EVV_VOLTAGE_OBJECT_V3;
+
+
typedef union _ATOM_VOLTAGE_OBJECT_V3{
ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
+ ATOM_EVV_VOLTAGE_OBJECT_V3 asEvvObj;
}ATOM_VOLTAGE_OBJECT_V3;
typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
@@ -4963,7 +5386,11 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_3
ULONG ulLkgEncodeMax;
ULONG ulLkgEncodeMin;
ULONG ulEfuseLogisticAlpha;
+
+ union{
USHORT usPowerDpm0;
+ USHORT usParamNegFlag; //bit0 =1 :indicate ulRoBeta is Negative, bit1=1 indicate Kv_m max is postive
+ };
USHORT usPowerDpm1;
USHORT usPowerDpm2;
USHORT usPowerDpm3;
@@ -5067,6 +5494,158 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_4
ULONG ulReserved[8]; // Reserved for future ASIC
}ATOM_ASIC_PROFILING_INFO_V3_4;
+// for Polaris10/Polaris11 speed EVV algorithm
+typedef struct _ATOM_ASIC_PROFILING_INFO_V3_5
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ULONG ulMaxVddc; //Maximum voltage for all parts, in unit of 0.01mv
+ ULONG ulMinVddc; //Minimum voltage for all parts, in unit of 0.01mv
+ USHORT usLkgEuseIndex; //Efuse Lkg_FT address ( BYTE address )
+ UCHAR ucLkgEfuseBitLSB; //Efuse Lkg_FT bit shift in 32bit DWORD
+ UCHAR ucLkgEfuseLength; //Efuse Lkg_FT length
+ ULONG ulLkgEncodeLn_MaxDivMin; //value of ln(Max_Lkg_Ft/Min_Lkg_Ft ) in unit of 0.00001 ( unit=100000 )
+ ULONG ulLkgEncodeMax; //Maximum Lkg_Ft measured value ( or efuse decode value ), in unit of 0.00001 ( unit=100000 )
+ ULONG ulLkgEncodeMin; //Minimum Lkg_Ft measured value ( or efuse decode value ), in unit of 0.00001 ( unit=100000 )
+ EFUSE_LINEAR_FUNC_PARAM sRoFuse;//Efuse RO info: DWORD address, bit shift, length, max/min measure value. in unit of 1.
+ ULONG ulEvvDefaultVddc; //def="EVV_DEFAULT_VDDC" descr="return default VDDC(v) when Efuse not cut" unit="100000"/>
+ ULONG ulEvvNoCalcVddc; //def="EVV_NOCALC_VDDC" descr="return VDDC(v) when Calculation is bad" unit="100000"/>
+ ULONG ulSpeed_Model; //def="EVV_SPEED_MODEL" descr="0 = Greek model, 1 = multivariate model" unit="1"/>
+ ULONG ulSM_A0; //def="EVV_SM_A0" descr="Leakage coeff(Multivariant Mode)." unit="100000"/>
+ ULONG ulSM_A1; //def="EVV_SM_A1" descr="Leakage/SCLK coeff(Multivariant Mode)." unit="1000000"/>
+ ULONG ulSM_A2; //def="EVV_SM_A2" descr="Alpha( Greek Mode ) or VDDC/SCLK coeff(Multivariant Mode)." unit="100000"/>
+ ULONG ulSM_A3; //def="EVV_SM_A3" descr="Beta( Greek Mode ) or SCLK coeff(Multivariant Mode)." unit="100000"/>
+ ULONG ulSM_A4; //def="EVV_SM_A4" descr="VDDC^2/SCLK coeff(Multivariant Mode)." unit="100000"/>
+ ULONG ulSM_A5; //def="EVV_SM_A5" descr="VDDC^2 coeff(Multivariant Mode)." unit="100000"/>
+ ULONG ulSM_A6; //def="EVV_SM_A6" descr="Gamma( Greek Mode ) or VDDC coeff(Multivariant Mode)." unit="100000"/>
+ ULONG ulSM_A7; //def="EVV_SM_A7" descr="Epsilon( Greek Mode ) or constant(Multivariant Mode)." unit="100000"/>
+ UCHAR ucSM_A0_sign; //def="EVV_SM_A0_SIGN" descr="=0 SM_A0 is postive. =1: SM_A0 is negative" unit="1"/>
+ UCHAR ucSM_A1_sign; //def="EVV_SM_A1_SIGN" descr="=0 SM_A1 is postive. =1: SM_A1 is negative" unit="1"/>
+ UCHAR ucSM_A2_sign; //def="EVV_SM_A2_SIGN" descr="=0 SM_A2 is postive. =1: SM_A2 is negative" unit="1"/>
+ UCHAR ucSM_A3_sign; //def="EVV_SM_A3_SIGN" descr="=0 SM_A3 is postive. =1: SM_A3 is negative" unit="1"/>
+ UCHAR ucSM_A4_sign; //def="EVV_SM_A4_SIGN" descr="=0 SM_A4 is postive. =1: SM_A4 is negative" unit="1"/>
+ UCHAR ucSM_A5_sign; //def="EVV_SM_A5_SIGN" descr="=0 SM_A5 is postive. =1: SM_A5 is negative" unit="1"/>
+ UCHAR ucSM_A6_sign; //def="EVV_SM_A6_SIGN" descr="=0 SM_A6 is postive. =1: SM_A6 is negative" unit="1"/>
+ UCHAR ucSM_A7_sign; //def="EVV_SM_A7_SIGN" descr="=0 SM_A7 is postive. =1: SM_A7 is negative" unit="1"/>
+ ULONG ulMargin_RO_a; //def="EVV_MARGIN_RO_A" descr="A Term to represent RO equation in Ax2+Bx+C, unit=1"
+ ULONG ulMargin_RO_b; //def="EVV_MARGIN_RO_B" descr="B Term to represent RO equation in Ax2+Bx+C, unit=1"
+ ULONG ulMargin_RO_c; //def="EVV_MARGIN_RO_C" descr="C Term to represent RO equation in Ax2+Bx+C, unit=1"
+ ULONG ulMargin_fixed; //def="EVV_MARGIN_FIXED" descr="Fixed MHz to add to SCLK margin, unit=1" unit="1"/>
+ ULONG ulMargin_Fmax_mean; //def="EVV_MARGIN_FMAX_MEAN" descr="Percentage to add for Fmas mean margin unit=10000" unit="10000"/>
+ ULONG ulMargin_plat_mean; //def="EVV_MARGIN_PLAT_MEAN" descr="Percentage to add for platform mean margin unit=10000" unit="10000"/>
+ ULONG ulMargin_Fmax_sigma; //def="EVV_MARGIN_FMAX_SIGMA" descr="Percentage to add for Fmax sigma margin unit=10000" unit="10000"/>
+ ULONG ulMargin_plat_sigma; //def="EVV_MARGIN_PLAT_SIGMA" descr="Percentage to add for platform sigma margin unit=10000" unit="10000"/>
+ ULONG ulMargin_DC_sigma; //def="EVV_MARGIN_DC_SIGMA" descr="Regulator DC tolerance margin (mV) unit=100" unit="100"/>
+ ULONG ulReserved[12];
+}ATOM_ASIC_PROFILING_INFO_V3_5;
+
+/* for Polars10/11 AVFS parameters */
+typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ULONG ulMaxVddc;
+ ULONG ulMinVddc;
+ USHORT usLkgEuseIndex;
+ UCHAR ucLkgEfuseBitLSB;
+ UCHAR ucLkgEfuseLength;
+ ULONG ulLkgEncodeLn_MaxDivMin;
+ ULONG ulLkgEncodeMax;
+ ULONG ulLkgEncodeMin;
+ EFUSE_LINEAR_FUNC_PARAM sRoFuse;
+ ULONG ulEvvDefaultVddc;
+ ULONG ulEvvNoCalcVddc;
+ ULONG ulSpeed_Model;
+ ULONG ulSM_A0;
+ ULONG ulSM_A1;
+ ULONG ulSM_A2;
+ ULONG ulSM_A3;
+ ULONG ulSM_A4;
+ ULONG ulSM_A5;
+ ULONG ulSM_A6;
+ ULONG ulSM_A7;
+ UCHAR ucSM_A0_sign;
+ UCHAR ucSM_A1_sign;
+ UCHAR ucSM_A2_sign;
+ UCHAR ucSM_A3_sign;
+ UCHAR ucSM_A4_sign;
+ UCHAR ucSM_A5_sign;
+ UCHAR ucSM_A6_sign;
+ UCHAR ucSM_A7_sign;
+ ULONG ulMargin_RO_a;
+ ULONG ulMargin_RO_b;
+ ULONG ulMargin_RO_c;
+ ULONG ulMargin_fixed;
+ ULONG ulMargin_Fmax_mean;
+ ULONG ulMargin_plat_mean;
+ ULONG ulMargin_Fmax_sigma;
+ ULONG ulMargin_plat_sigma;
+ ULONG ulMargin_DC_sigma;
+ ULONG ulLoadLineSlop;
+ ULONG ulaTDClimitPerDPM[8];
+ ULONG ulaNoCalcVddcPerDPM[8];
+ ULONG ulAVFS_meanNsigma_Acontant0;
+ ULONG ulAVFS_meanNsigma_Acontant1;
+ ULONG ulAVFS_meanNsigma_Acontant2;
+ USHORT usAVFS_meanNsigma_DC_tol_sigma;
+ USHORT usAVFS_meanNsigma_Platform_mean;
+ USHORT usAVFS_meanNsigma_Platform_sigma;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a0;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a1;
+ ULONG ulGB_VDROOP_TABLE_CKSOFF_a2;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a0;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a1;
+ ULONG ulGB_VDROOP_TABLE_CKSON_a2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ USHORT usAVFSGB_FUSE_TABLE_CKSON_m2;
+ ULONG ulAVFSGB_FUSE_TABLE_CKSON_b;
+ USHORT usMaxVoltage_0_25mv;
+ UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF;
+ UCHAR ucEnableGB_VDROOP_TABLE_CKSON;
+ UCHAR ucEnableGB_FUSE_TABLE_CKSOFF;
+ UCHAR ucEnableGB_FUSE_TABLE_CKSON;
+ USHORT usPSM_Age_ComFactor;
+ UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage;
+ UCHAR ucReserved;
+}ATOM_ASIC_PROFILING_INFO_V3_6;
+
+
+typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
+ ULONG ulMaxSclkFreq;
+ UCHAR ucVco_setting; // 1: 3-6GHz, 3: 2-4GHz
+ UCHAR ucPostdiv; // divide by 2^n
+ USHORT ucFcw_pcc;
+ USHORT ucFcw_trans_upper;
+ USHORT ucRcw_trans_lower;
+}ATOM_SCLK_FCW_RANGE_ENTRY_V1;
+
+
+// SMU_InfoTable for Polaris10/Polaris11
+typedef struct _ATOM_SMU_INFO_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ UCHAR ucSclkEntryNum; // for potential future extend, indicate the number of ATOM_SCLK_FCW_RANGE_ENTRY_V1
+ UCHAR ucReserved[3];
+ ATOM_SCLK_FCW_RANGE_ENTRY_V1 asSclkFcwRangeEntry[8];
+}ATOM_SMU_INFO_V2_1;
+
+
+// GFX_InfoTable for Polaris10/Polaris11
+typedef struct _ATOM_GFX_INFO_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ UCHAR GfxIpMinVer;
+ UCHAR GfxIpMajVer;
+ UCHAR max_shader_engines;
+ UCHAR max_tile_pipes;
+ UCHAR max_cu_per_sh;
+ UCHAR max_sh_per_se;
+ UCHAR max_backends_per_se;
+ UCHAR max_texture_channel_caches;
+}ATOM_GFX_INFO_V2_1;
+
+
typedef struct _ATOM_POWER_SOURCE_OBJECT
{
UCHAR ucPwrSrcId; // Power source
@@ -5765,14 +6344,6 @@ sExtDispConnInfo: Display connector information table provided t
**********************************************************************************************************************/
-// this Table is used for Kaveri/Kabini APU
-typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
-{
- ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
- ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure
-}ATOM_FUSION_SYSTEM_INFO_V2;
-
-
typedef struct _ATOM_I2C_REG_INFO
{
UCHAR ucI2cRegIndex;
@@ -5859,7 +6430,50 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9
#define EDP_VS_VARIABLE_PREM_MODE 5
-// this IntegrateSystemInfoTable is used for Carrizo
+// ulGPUCapInfo
+#define SYS_INFO_V1_9_GPUCAPSINFO_DISABLE_AUX_MODE_DETECT 0x08
+#define SYS_INFO_V1_9_GPUCAPSINFO_ENABEL_DFS_BYPASS 0x10
+//ulGPUCapInfo[16]=1 indicate SMC firmware is able to support GNB fast resume function, so that driver can call SMC to program most of GNB register during resuming, from ML
+#define SYS_INFO_V1_9_GPUCAPSINFO_GNB_FAST_RESUME_CAPABLE 0x00010000
+//ulGPUCapInfo[18]=1 indicate the IOMMU is not available
+#define SYS_INFO_V1_9_GPUCAPINFO_IOMMU_DISABLE 0x00040000
+//ulGPUCapInfo[19]=1 indicate the MARC Aperture is opened.
+#define SYS_INFO_V1_9_GPUCAPINFO_MARC_APERTURE_ENABLE 0x00080000
+
+
+typedef struct _DPHY_TIMING_PARA
+{
+ UCHAR ucProfileID; // SENSOR_PROFILES
+ ULONG ucPara;
+} DPHY_TIMING_PARA;
+
+typedef struct _DPHY_ELEC_PARA
+{
+ USHORT usPara[3];
+} DPHY_ELEC_PARA;
+
+typedef struct _CAMERA_MODULE_INFO
+{
+ UCHAR ucID; // 0: Rear, 1: Front right of user, 2: Front left of user
+ UCHAR strModuleName[8];
+ DPHY_TIMING_PARA asTimingPara[6]; // Exact number is under estimation and confirmation from sensor vendor
+} CAMERA_MODULE_INFO;
+
+typedef struct _FLASHLIGHT_INFO
+{
+ UCHAR ucID; // 0: Rear, 1: Front
+ UCHAR strName[8];
+} FLASHLIGHT_INFO;
+
+typedef struct _CAMERA_DATA
+{
+ ULONG ulVersionCode;
+ CAMERA_MODULE_INFO asCameraInfo[3]; // Assuming 3 camera sensors max
+ FLASHLIGHT_INFO asFlashInfo; // Assuming 1 flashlight max
+ DPHY_ELEC_PARA asDphyElecPara;
+ ULONG ulCrcVal; // CRC
+}CAMERA_DATA;
+
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -5883,7 +6497,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10
USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
- UCHAR strVBIOSMsg[40];
+ ULONG ulMsgReserved[10];
ATOM_TDP_CONFIG asTdpConfig;
ULONG ulReserved[7];
ATOM_CLK_VOLT_CAPABILITY_V2 sDispClkVoltageMapping[8];
@@ -5925,8 +6539,27 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_10
UCHAR ucEDPv1_4VSMode;
UCHAR ucReserved2;
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+ CAMERA_DATA asCameraInfo;
+ ULONG ulReserved8[29];
}ATOM_INTEGRATED_SYSTEM_INFO_V1_10;
+
+// this Table is used for Kaveri/Kabini APU
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
+ ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure
+}ATOM_FUSION_SYSTEM_INFO_V2;
+
+
+typedef struct _ATOM_FUSION_SYSTEM_INFO_V3
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_10 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
+ ULONG ulPowerplayTable[192]; // Reserve 768 bytes space for PowerPlayInfoTable
+}ATOM_FUSION_SYSTEM_INFO_V3;
+
+#define FUSION_V3_OFFSET_FROM_TOP_OF_FB 0x800
+
/**************************************************************************/
// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
//Memory SS Info Table
@@ -6193,12 +6826,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S3_DFP1_ACTIVE 0x00000008L
#define ATOM_S3_CRT2_ACTIVE 0x00000010L
#define ATOM_S3_LCD2_ACTIVE 0x00000020L
-#define ATOM_S3_DFP6_ACTIVE 0x00000040L
+#define ATOM_S3_DFP6_ACTIVE 0x00000040L
#define ATOM_S3_DFP2_ACTIVE 0x00000080L
#define ATOM_S3_CV_ACTIVE 0x00000100L
-#define ATOM_S3_DFP3_ACTIVE 0x00000200L
-#define ATOM_S3_DFP4_ACTIVE 0x00000400L
-#define ATOM_S3_DFP5_ACTIVE 0x00000800L
+#define ATOM_S3_DFP3_ACTIVE 0x00000200L
+#define ATOM_S3_DFP4_ACTIVE 0x00000400L
+#define ATOM_S3_DFP5_ACTIVE 0x00000800L
#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL
@@ -6215,9 +6848,9 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L
#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
-#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
-#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L
-#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L
+#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
+#define ATOM_S3_DFP4_CRTC_ACTIVE 0x04000000L
+#define ATOM_S3_DFP5_CRTC_ACTIVE 0x08000000L
#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
@@ -6238,9 +6871,9 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S3_DFP6_ACTIVEb0 0x40
#define ATOM_S3_DFP2_ACTIVEb0 0x80
#define ATOM_S3_CV_ACTIVEb1 0x01
-#define ATOM_S3_DFP3_ACTIVEb1 0x02
-#define ATOM_S3_DFP4_ACTIVEb1 0x04
-#define ATOM_S3_DFP5_ACTIVEb1 0x08
+#define ATOM_S3_DFP3_ACTIVEb1 0x02
+#define ATOM_S3_DFP4_ACTIVEb1 0x04
+#define ATOM_S3_DFP5_ACTIVEb1 0x08
#define ATOM_S3_ACTIVE_CRTC1w0 0xFFF
@@ -6254,9 +6887,9 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40
#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
-#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
-#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04
-#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08
+#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
+#define ATOM_S3_DFP4_CRTC_ACTIVEb3 0x04
+#define ATOM_S3_DFP5_CRTC_ACTIVEb3 0x08
#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
@@ -6878,15 +7511,18 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE_V2_1
#define _32Mx16 0x32
#define _32Mx32 0x33
#define _32Mx128 0x35
-#define _64Mx32 0x43
#define _64Mx8 0x41
#define _64Mx16 0x42
+#define _64Mx32 0x43
+#define _64Mx128 0x45
#define _128Mx8 0x51
#define _128Mx16 0x52
#define _128Mx32 0x53
#define _256Mx8 0x61
#define _256Mx16 0x62
+#define _256Mx32 0x63
#define _512Mx8 0x71
+#define _512Mx16 0x72
#define SAMSUNG 0x1
@@ -7407,6 +8043,17 @@ typedef struct _ATOM_MEMORY_TRAINING_INFO
}ATOM_MEMORY_TRAINING_INFO;
+typedef struct _ATOM_MEMORY_TRAINING_INFO_V3_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulMCUcodeVersion;
+ USHORT usMCIOInitLen; //len of ATOM_REG_INIT_SETTING array
+ USHORT usMCUcodeLen; //len of ATOM_MC_UCODE_DATA array
+ USHORT usMCIORegInitOffset; //point of offset of ATOM_REG_INIT_SETTING array
+ USHORT usMCUcodeOffset; //point of offset of MC uCode ULONG array.
+}ATOM_MEMORY_TRAINING_INFO_V3_1;
+
+
typedef struct SW_I2C_CNTL_DATA_PARAMETERS
{
UCHAR ucControl;
@@ -7623,7 +8270,7 @@ typedef struct _ASIC_TRANSMITTER_INFO
{
USHORT usTransmitterObjId;
USHORT usSupportDevice;
- UCHAR ucTransmitterCmdTblId;
+ UCHAR ucTransmitterCmdTblId;
UCHAR ucConfig;
UCHAR ucEncoderID; //available 1st encoder ( default )
UCHAR ucOptionEncoderID; //available 2nd encoder ( optional )
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index ab84d4947..7464daf89 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -26,6 +26,8 @@
#include "amd_shared.h"
+struct cgs_device;
+
/**
* enum cgs_gpu_mem_type - GPU memory types
*/
@@ -92,6 +94,7 @@ enum cgs_voltage_planes {
*/
enum cgs_ucode_id {
CGS_UCODE_ID_SMU = 0,
+ CGS_UCODE_ID_SMU_SK,
CGS_UCODE_ID_SDMA0,
CGS_UCODE_ID_SDMA1,
CGS_UCODE_ID_CP_CE,
@@ -111,6 +114,7 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_PCIE_MLW,
CGS_SYSTEM_INFO_CG_FLAGS,
CGS_SYSTEM_INFO_PG_FLAGS,
+ CGS_SYSTEM_INFO_GFX_CU_INFO,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
@@ -223,7 +227,7 @@ struct cgs_acpi_method_info {
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+typedef int (*cgs_gpu_mem_info_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t *mc_start, uint64_t *mc_size,
uint64_t *mem_size);
@@ -239,7 +243,7 @@ typedef int (*cgs_gpu_mem_info_t)(void *cgs_device, enum cgs_gpu_mem_type type,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
+typedef int (*cgs_gmap_kmem_t)(struct cgs_device *cgs_device, void *kmem, uint64_t size,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *kmem_handle, uint64_t *mcaddr);
@@ -250,7 +254,7 @@ typedef int (*cgs_gmap_kmem_t)(void *cgs_device, void *kmem, uint64_t size,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
+typedef int (*cgs_gunmap_kmem_t)(struct cgs_device *cgs_device, cgs_handle_t kmem_handle);
/**
* cgs_alloc_gpu_mem() - Allocate GPU memory
@@ -279,7 +283,7 @@ typedef int (*cgs_gunmap_kmem_t)(void *cgs_device, cgs_handle_t kmem_handle);
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
+typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle);
@@ -291,7 +295,7 @@ typedef int (*cgs_alloc_gpu_mem_t)(void *cgs_device, enum cgs_gpu_mem_type type,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+typedef int (*cgs_free_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_gmap_gpu_mem() - GPU-map GPU memory
@@ -303,7 +307,7 @@ typedef int (*cgs_free_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+typedef int (*cgs_gmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
uint64_t *mcaddr);
/**
@@ -315,7 +319,7 @@ typedef int (*cgs_gmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+typedef int (*cgs_gunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_kmap_gpu_mem() - Kernel-map GPU memory
@@ -326,7 +330,7 @@ typedef int (*cgs_gunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
+typedef int (*cgs_kmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle,
void **map);
/**
@@ -336,7 +340,7 @@ typedef int (*cgs_kmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
+typedef int (*cgs_kunmap_gpu_mem_t)(struct cgs_device *cgs_device, cgs_handle_t handle);
/**
* cgs_read_register() - Read an MMIO register
@@ -345,7 +349,7 @@ typedef int (*cgs_kunmap_gpu_mem_t)(void *cgs_device, cgs_handle_t handle);
*
* Return: register value
*/
-typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
+typedef uint32_t (*cgs_read_register_t)(struct cgs_device *cgs_device, unsigned offset);
/**
* cgs_write_register() - Write an MMIO register
@@ -353,7 +357,7 @@ typedef uint32_t (*cgs_read_register_t)(void *cgs_device, unsigned offset);
* @offset: register offset
* @value: register value
*/
-typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
+typedef void (*cgs_write_register_t)(struct cgs_device *cgs_device, unsigned offset,
uint32_t value);
/**
@@ -363,7 +367,7 @@ typedef void (*cgs_write_register_t)(void *cgs_device, unsigned offset,
*
* Return: register value
*/
-typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
unsigned index);
/**
@@ -372,7 +376,7 @@ typedef uint32_t (*cgs_read_ind_register_t)(void *cgs_device, enum cgs_ind_reg s
* @offset: register offset
* @value: register value
*/
-typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg space,
+typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
unsigned index, uint32_t value);
/**
@@ -382,7 +386,7 @@ typedef void (*cgs_write_ind_register_t)(void *cgs_device, enum cgs_ind_reg spac
*
* Return: Value read
*/
-typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
+typedef uint8_t (*cgs_read_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr);
/**
* cgs_read_pci_config_word() - Read word from PCI configuration space
@@ -391,7 +395,7 @@ typedef uint8_t (*cgs_read_pci_config_byte_t)(void *cgs_device, unsigned addr);
*
* Return: Value read
*/
-typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
+typedef uint16_t (*cgs_read_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr);
/**
* cgs_read_pci_config_dword() - Read dword from PCI configuration space
@@ -400,7 +404,7 @@ typedef uint16_t (*cgs_read_pci_config_word_t)(void *cgs_device, unsigned addr);
*
* Return: Value read
*/
-typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
+typedef uint32_t (*cgs_read_pci_config_dword_t)(struct cgs_device *cgs_device,
unsigned addr);
/**
@@ -409,7 +413,7 @@ typedef uint32_t (*cgs_read_pci_config_dword_t)(void *cgs_device,
* @addr: address
* @value: value to write
*/
-typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
+typedef void (*cgs_write_pci_config_byte_t)(struct cgs_device *cgs_device, unsigned addr,
uint8_t value);
/**
@@ -418,7 +422,7 @@ typedef void (*cgs_write_pci_config_byte_t)(void *cgs_device, unsigned addr,
* @addr: address, must be word-aligned
* @value: value to write
*/
-typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
+typedef void (*cgs_write_pci_config_word_t)(struct cgs_device *cgs_device, unsigned addr,
uint16_t value);
/**
@@ -427,7 +431,7 @@ typedef void (*cgs_write_pci_config_word_t)(void *cgs_device, unsigned addr,
* @addr: address, must be dword-aligned
* @value: value to write
*/
-typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
+typedef void (*cgs_write_pci_config_dword_t)(struct cgs_device *cgs_device, unsigned addr,
uint32_t value);
@@ -441,7 +445,7 @@ typedef void (*cgs_write_pci_config_dword_t)(void *cgs_device, unsigned addr,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_get_pci_resource_t)(void *cgs_device,
+typedef int (*cgs_get_pci_resource_t)(struct cgs_device *cgs_device,
enum cgs_resource_type resource_type,
uint64_t size,
uint64_t offset,
@@ -458,7 +462,7 @@ typedef int (*cgs_get_pci_resource_t)(void *cgs_device,
* Return: Pointer to start of the table, or NULL on failure
*/
typedef const void *(*cgs_atom_get_data_table_t)(
- void *cgs_device, unsigned table,
+ struct cgs_device *cgs_device, unsigned table,
uint16_t *size, uint8_t *frev, uint8_t *crev);
/**
@@ -470,7 +474,7 @@ typedef const void *(*cgs_atom_get_data_table_t)(
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
+typedef int (*cgs_atom_get_cmd_table_revs_t)(struct cgs_device *cgs_device, unsigned table,
uint8_t *frev, uint8_t *crev);
/**
@@ -481,7 +485,7 @@ typedef int (*cgs_atom_get_cmd_table_revs_t)(void *cgs_device, unsigned table,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
+typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device,
unsigned table, void *args);
/**
@@ -491,7 +495,7 @@ typedef int (*cgs_atom_exec_cmd_table_t)(void *cgs_device,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
+typedef int (*cgs_create_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t *request);
/**
* cgs_destroy_pm_request() - Destroy a power management request
@@ -500,7 +504,7 @@ typedef int (*cgs_create_pm_request_t)(void *cgs_device, cgs_handle_t *request);
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
+typedef int (*cgs_destroy_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request);
/**
* cgs_set_pm_request() - Activate or deactiveate a PM request
@@ -516,7 +520,7 @@ typedef int (*cgs_destroy_pm_request_t)(void *cgs_device, cgs_handle_t request);
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
+typedef int (*cgs_set_pm_request_t)(struct cgs_device *cgs_device, cgs_handle_t request,
int active);
/**
@@ -528,7 +532,7 @@ typedef int (*cgs_set_pm_request_t)(void *cgs_device, cgs_handle_t request,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
+typedef int (*cgs_pm_request_clock_t)(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_clock clock, unsigned freq);
/**
@@ -540,7 +544,7 @@ typedef int (*cgs_pm_request_clock_t)(void *cgs_device, cgs_handle_t request,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
+typedef int (*cgs_pm_request_engine_t)(struct cgs_device *cgs_device, cgs_handle_t request,
enum cgs_engine engine, int powered);
/**
@@ -551,7 +555,7 @@ typedef int (*cgs_pm_request_engine_t)(void *cgs_device, cgs_handle_t request,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
+typedef int (*cgs_pm_query_clock_limits_t)(struct cgs_device *cgs_device,
enum cgs_clock clock,
struct cgs_clock_limits *limits);
@@ -563,7 +567,7 @@ typedef int (*cgs_pm_query_clock_limits_t)(void *cgs_device,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
+typedef int (*cgs_set_camera_voltages_t)(struct cgs_device *cgs_device, uint32_t mask,
const uint32_t *voltages);
/**
* cgs_get_firmware_info - Get the firmware information from core driver
@@ -573,25 +577,28 @@ typedef int (*cgs_set_camera_voltages_t)(void *cgs_device, uint32_t mask,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_get_firmware_info)(void *cgs_device,
+typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info);
-typedef int(*cgs_set_powergating_state)(void *cgs_device,
+typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device,
+ enum cgs_ucode_id type);
+
+typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_powergating_state state);
-typedef int(*cgs_set_clockgating_state)(void *cgs_device,
+typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state);
typedef int(*cgs_get_active_displays_info)(
- void *cgs_device,
+ struct cgs_device *cgs_device,
struct cgs_display_info *info);
-typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled);
+typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled);
-typedef int (*cgs_call_acpi_method)(void *cgs_device,
+typedef int (*cgs_call_acpi_method)(struct cgs_device *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
@@ -599,7 +606,7 @@ typedef int (*cgs_call_acpi_method)(void *cgs_device,
uint32_t input_size,
uint32_t output_size);
-typedef int (*cgs_query_system_info)(void *cgs_device,
+typedef int (*cgs_query_system_info)(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info);
struct cgs_ops {
@@ -641,6 +648,7 @@ struct cgs_ops {
cgs_set_camera_voltages_t set_camera_voltages;
/* Firmware Info */
cgs_get_firmware_info get_firmware_info;
+ cgs_rel_firmware rel_firmware;
/* cg pg interface*/
cgs_set_powergating_state set_powergating_state;
cgs_set_clockgating_state set_clockgating_state;
@@ -734,6 +742,8 @@ struct cgs_device
CGS_CALL(set_camera_voltages,dev,mask,voltages)
#define cgs_get_firmware_info(dev, type, info) \
CGS_CALL(get_firmware_info, dev, type, info)
+#define cgs_rel_firmware(dev, type) \
+ CGS_CALL(rel_firmware, dev, type)
#define cgs_set_powergating_state(dev, block_type, state) \
CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
index 3b47ae313..ca4f6007a 100644
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -66,7 +66,7 @@ typedef int (*cgs_irq_handler_func_t)(void *private_data,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
+typedef int (*cgs_add_irq_source_t)(struct cgs_device *cgs_device, unsigned src_id,
unsigned num_types,
cgs_irq_source_set_func_t set,
cgs_irq_handler_func_t handler,
@@ -83,7 +83,7 @@ typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned src_id,
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
+typedef int (*cgs_irq_get_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
/**
* cgs_irq_put() - Indicate IRQ source is no longer needed
@@ -98,7 +98,7 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
*
* Return: 0 on success, -errno otherwise
*/
-typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
+typedef int (*cgs_irq_put_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
struct cgs_os_ops {
/* IRQ handling */
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 9d2290044..e629f8a9f 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -37,6 +37,12 @@
return -EINVAL; \
} while (0)
+#define PP_CHECK_HW(hwmgr) \
+ do { \
+ if ((hwmgr) == NULL || (hwmgr)->hwmgr_func == NULL) \
+ return -EINVAL; \
+ } while (0)
+
static int pp_early_init(void *handle)
{
return 0;
@@ -54,22 +60,29 @@ static int pp_sw_init(void *handle)
pp_handle = (struct pp_instance *)handle;
hwmgr = pp_handle->hwmgr;
- if (hwmgr == NULL || hwmgr->pptable_func == NULL ||
- hwmgr->hwmgr_func == NULL ||
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->pptable_func == NULL ||
hwmgr->pptable_func->pptable_init == NULL ||
hwmgr->hwmgr_func->backend_init == NULL)
return -EINVAL;
ret = hwmgr->pptable_func->pptable_init(hwmgr);
+ if (ret)
+ goto err;
- if (ret == 0)
- ret = hwmgr->hwmgr_func->backend_init(hwmgr);
-
+ ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
- printk("amdgpu: powerplay initialization failed\n");
- else
- printk("amdgpu: powerplay initialized\n");
+ goto err1;
+
+ pr_info("amdgpu: powerplay initialized\n");
+ return 0;
+err1:
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
+err:
+ pr_err("amdgpu: powerplay initialization failed\n");
return ret;
}
@@ -85,10 +98,14 @@ static int pp_sw_fini(void *handle)
pp_handle = (struct pp_instance *)handle;
hwmgr = pp_handle->hwmgr;
- if (hwmgr != NULL || hwmgr->hwmgr_func != NULL ||
- hwmgr->hwmgr_func->backend_fini != NULL)
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->backend_fini != NULL)
ret = hwmgr->hwmgr_func->backend_fini(hwmgr);
+ if (hwmgr->pptable_func->pptable_fini)
+ hwmgr->pptable_func->pptable_fini(hwmgr);
+
return ret;
}
@@ -172,21 +189,117 @@ static int pp_sw_reset(void *handle)
return 0;
}
-static void pp_print_status(void *handle)
-{
-
-}
static int pp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
+ struct pp_hwmgr *hwmgr;
+ uint32_t msg_id, pp_state;
+
+ if (handle == NULL)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ /* Enable/disable GFX blocks clock gating through SMU */
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_3D,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_RLC,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CP,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+
+ /* Enable/disable System blocks clock gating through SMU */
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_MC,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_ROM,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_DRM,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_HDP,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_SDMA,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
+
return 0;
}
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
- return 0;
+ struct pp_hwmgr *hwmgr;
+
+ if (handle == NULL)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ /* Enable/disable GFX per cu powergating through SMU */
+ return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
+ state == AMD_PG_STATE_GATE ? true : false);
}
static int pp_suspend(void *handle)
@@ -236,6 +349,7 @@ static int pp_resume(void *handle)
}
const struct amd_ip_funcs pp_ip_funcs = {
+ .name = "powerplay",
.early_init = pp_early_init,
.late_init = NULL,
.sw_init = pp_sw_init,
@@ -247,7 +361,6 @@ const struct amd_ip_funcs pp_ip_funcs = {
.is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset,
- .print_status = pp_print_status,
.set_clockgating_state = pp_set_clockgating_state,
.set_powergating_state = pp_set_powergating_state,
};
@@ -275,9 +388,12 @@ static int pp_dpm_force_performance_level(void *handle,
hwmgr = pp_handle->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->force_dpm_level == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
@@ -309,9 +425,12 @@ static int pp_dpm_get_sclk(void *handle, bool low)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->get_sclk == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_sclk == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
}
@@ -325,9 +444,12 @@ static int pp_dpm_get_mclk(void *handle, bool low)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->get_mclk == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_mclk == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
}
@@ -341,9 +463,12 @@ static int pp_dpm_powergate_vce(void *handle, bool gate)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->powergate_vce == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->powergate_vce == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
}
@@ -357,9 +482,12 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->powergate_uvd == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
}
@@ -455,10 +583,14 @@ pp_debugfs_print_current_performance_level(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->print_current_perforce_level == NULL)
+ if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
return;
+ if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return;
+ }
+
hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
}
@@ -471,9 +603,12 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->set_fan_control_mode == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
}
@@ -487,9 +622,12 @@ static int pp_dpm_get_fan_control_mode(void *handle)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->get_fan_control_mode == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
}
@@ -503,9 +641,12 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->set_fan_speed_percent == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
}
@@ -519,9 +660,12 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->get_fan_speed_percent == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
}
@@ -535,9 +679,12 @@ static int pp_dpm_get_temperature(void *handle)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->get_temperature == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_temperature == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->get_temperature(hwmgr);
}
@@ -591,9 +738,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->get_pp_table == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->get_pp_table == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->get_pp_table(hwmgr, table);
}
@@ -607,15 +757,18 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->set_pp_table == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->set_pp_table == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size);
}
static int pp_dpm_force_clock_level(void *handle,
- enum pp_clock_type type, int level)
+ enum pp_clock_type type, uint32_t mask)
{
struct pp_hwmgr *hwmgr;
@@ -624,11 +777,14 @@ static int pp_dpm_force_clock_level(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->force_clock_level == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->force_clock_level == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
- return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, level);
+ return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
}
static int pp_dpm_print_clock_levels(void *handle,
@@ -641,10 +797,12 @@ static int pp_dpm_print_clock_levels(void *handle,
hwmgr = ((struct pp_instance *)handle)->hwmgr;
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL ||
- hwmgr->hwmgr_func->print_clock_levels == NULL)
- return -EINVAL;
+ PP_CHECK_HW(hwmgr);
+ if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 56856a286..d6635cc4b 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -24,7 +24,7 @@
#include "eventactionchains.h"
#include "eventsubchains.h"
-static const pem_event_action *initialize_event[] = {
+static const pem_event_action * const initialize_event[] = {
block_adjust_power_state_tasks,
power_budget_tasks,
system_config_tasks,
@@ -45,7 +45,7 @@ const struct action_chain initialize_action_chain = {
initialize_event
};
-static const pem_event_action *uninitialize_event[] = {
+static const pem_event_action * const uninitialize_event[] = {
ungate_all_display_phys_tasks,
uninitialize_display_phy_access_tasks,
disable_gfx_voltage_island_power_gating_tasks,
@@ -64,7 +64,7 @@ const struct action_chain uninitialize_action_chain = {
uninitialize_event
};
-static const pem_event_action *power_source_change_event_pp_enabled[] = {
+static const pem_event_action * const power_source_change_event_pp_enabled[] = {
set_power_source_tasks,
set_power_saving_state_tasks,
adjust_power_state_tasks,
@@ -79,7 +79,7 @@ const struct action_chain power_source_change_action_chain_pp_enabled = {
power_source_change_event_pp_enabled
};
-static const pem_event_action *power_source_change_event_pp_disabled[] = {
+static const pem_event_action * const power_source_change_event_pp_disabled[] = {
set_power_source_tasks,
set_nbmcu_state_tasks,
NULL
@@ -90,7 +90,7 @@ const struct action_chain power_source_changes_action_chain_pp_disabled = {
power_source_change_event_pp_disabled
};
-static const pem_event_action *power_source_change_event_hardware_dc[] = {
+static const pem_event_action * const power_source_change_event_hardware_dc[] = {
set_power_source_tasks,
set_power_saving_state_tasks,
adjust_power_state_tasks,
@@ -106,7 +106,7 @@ const struct action_chain power_source_change_action_chain_hardware_dc = {
power_source_change_event_hardware_dc
};
-static const pem_event_action *suspend_event[] = {
+static const pem_event_action * const suspend_event[] = {
reset_display_phy_access_tasks,
unregister_interrupt_tasks,
disable_gfx_voltage_island_power_gating_tasks,
@@ -130,7 +130,7 @@ const struct action_chain suspend_action_chain = {
suspend_event
};
-static const pem_event_action *resume_event[] = {
+static const pem_event_action * const resume_event[] = {
unblock_hw_access_tasks,
resume_connected_standby_tasks,
notify_smu_resume_tasks,
@@ -164,7 +164,7 @@ const struct action_chain resume_action_chain = {
resume_event
};
-static const pem_event_action *complete_init_event[] = {
+static const pem_event_action * const complete_init_event[] = {
unblock_adjust_power_state_tasks,
adjust_power_state_tasks,
enable_gfx_clock_gating_tasks,
@@ -178,7 +178,7 @@ const struct action_chain complete_init_action_chain = {
complete_init_event
};
-static const pem_event_action *enable_gfx_clock_gating_event[] = {
+static const pem_event_action * const enable_gfx_clock_gating_event[] = {
enable_gfx_clock_gating_tasks,
NULL
};
@@ -188,7 +188,7 @@ const struct action_chain enable_gfx_clock_gating_action_chain = {
enable_gfx_clock_gating_event
};
-static const pem_event_action *disable_gfx_clock_gating_event[] = {
+static const pem_event_action * const disable_gfx_clock_gating_event[] = {
disable_gfx_clock_gating_tasks,
NULL
};
@@ -198,7 +198,7 @@ const struct action_chain disable_gfx_clock_gating_action_chain = {
disable_gfx_clock_gating_event
};
-static const pem_event_action *enable_cgpg_event[] = {
+static const pem_event_action * const enable_cgpg_event[] = {
enable_cgpg_tasks,
NULL
};
@@ -208,7 +208,7 @@ const struct action_chain enable_cgpg_action_chain = {
enable_cgpg_event
};
-static const pem_event_action *disable_cgpg_event[] = {
+static const pem_event_action * const disable_cgpg_event[] = {
disable_cgpg_tasks,
NULL
};
@@ -221,7 +221,7 @@ const struct action_chain disable_cgpg_action_chain = {
/* Enable user _2d performance and activate */
-static const pem_event_action *enable_user_state_event[] = {
+static const pem_event_action * const enable_user_state_event[] = {
create_new_user_performance_state_tasks,
adjust_power_state_tasks,
NULL
@@ -232,7 +232,7 @@ const struct action_chain enable_user_state_action_chain = {
enable_user_state_event
};
-static const pem_event_action *enable_user_2d_performance_event[] = {
+static const pem_event_action * const enable_user_2d_performance_event[] = {
enable_user_2d_performance_tasks,
add_user_2d_performance_state_tasks,
set_performance_state_tasks,
@@ -247,7 +247,7 @@ const struct action_chain enable_user_2d_performance_action_chain = {
};
-static const pem_event_action *disable_user_2d_performance_event[] = {
+static const pem_event_action * const disable_user_2d_performance_event[] = {
disable_user_2d_performance_tasks,
delete_user_2d_performance_state_tasks,
NULL
@@ -259,7 +259,7 @@ const struct action_chain disable_user_2d_performance_action_chain = {
};
-static const pem_event_action *display_config_change_event[] = {
+static const pem_event_action * const display_config_change_event[] = {
/* countDisplayConfigurationChangeEventTasks, */
unblock_adjust_power_state_tasks,
set_cpu_power_state,
@@ -278,7 +278,7 @@ const struct action_chain display_config_change_action_chain = {
display_config_change_event
};
-static const pem_event_action *readjust_power_state_event[] = {
+static const pem_event_action * const readjust_power_state_event[] = {
adjust_power_state_tasks,
NULL
};
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
index 1e2ad5603..cd1ca07ef 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
@@ -62,7 +62,7 @@ int pem_init_event_action_chains(struct pp_eventmgr *eventmgr)
int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data)
{
- const pem_event_action **paction_chain;
+ const pem_event_action * const *paction_chain;
const pem_event_action *psub_chain;
int tmp_result = 0;
int result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 46410e3c7..fb88e4e5d 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr)
pem_unregister_interrupts(eventmgr);
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-
- if (eventmgr != NULL)
- kfree(eventmgr);
}
int eventmgr_init(struct pp_instance *handle)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index b664e34db..f7ce4cb71 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -8,7 +8,9 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
tonga_processpptables.o ppatomctrl.o \
tonga_hwmgr.o pppcielanes.o tonga_thermal.o\
fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
- fiji_clockpowergating.o fiji_thermal.o
+ fiji_clockpowergating.o fiji_thermal.o \
+ polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
+ polaris10_clockpowergating.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index ff08ce41b..436fc16da 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -237,7 +237,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
}
-static struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
+static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
/*we don't need an exit table here, because there is only D3 cold on Kv*/
{ phm_cf_want_uvd_power_gating, cz_tf_uvd_power_gating_initialize },
{ phm_cf_want_vce_power_gating, cz_tf_vce_power_gating_initialize },
@@ -245,7 +245,7 @@ static struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
{ NULL, NULL }
};
-struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
+const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
0,
PHM_MasterTableFlag_None,
cz_enable_clock_power_gatings_list
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
index bbbc05713..1954ceaed 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
@@ -28,8 +28,7 @@
#include "pp_asicblocks.h"
extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern struct phm_master_table_header cz_phm_disable_clock_power_gatings_master;
+extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 568249033..1f14c477d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -915,7 +915,7 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
return 0;
}
-static struct phm_master_table_item cz_set_power_state_list[] = {
+static const struct phm_master_table_item cz_set_power_state_list[] = {
{NULL, cz_tf_update_sclk_limit},
{NULL, cz_tf_set_deep_sleep_sclk_threshold},
{NULL, cz_tf_set_watermark_threshold},
@@ -925,13 +925,13 @@ static struct phm_master_table_item cz_set_power_state_list[] = {
{NULL, NULL}
};
-static struct phm_master_table_header cz_set_power_state_master = {
+static const struct phm_master_table_header cz_set_power_state_master = {
0,
PHM_MasterTableFlag_None,
cz_set_power_state_list
};
-static struct phm_master_table_item cz_setup_asic_list[] = {
+static const struct phm_master_table_item cz_setup_asic_list[] = {
{NULL, cz_tf_reset_active_process_mask},
{NULL, cz_tf_upload_pptable_to_smu},
{NULL, cz_tf_init_sclk_limit},
@@ -943,7 +943,7 @@ static struct phm_master_table_item cz_setup_asic_list[] = {
{NULL, NULL}
};
-static struct phm_master_table_header cz_setup_asic_master = {
+static const struct phm_master_table_header cz_setup_asic_master = {
0,
PHM_MasterTableFlag_None,
cz_setup_asic_list
@@ -984,14 +984,14 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
return 0;
}
-static struct phm_master_table_item cz_power_down_asic_list[] = {
+static const struct phm_master_table_item cz_power_down_asic_list[] = {
{NULL, cz_tf_power_up_display_clock_sys_pll},
{NULL, cz_tf_clear_nb_dpm_flag},
{NULL, cz_tf_reset_cc6_data},
{NULL, NULL}
};
-static struct phm_master_table_header cz_power_down_asic_master = {
+static const struct phm_master_table_header cz_power_down_asic_master = {
0,
PHM_MasterTableFlag_None,
cz_power_down_asic_list
@@ -1095,19 +1095,19 @@ static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
return 0;
}
-static struct phm_master_table_item cz_disable_dpm_list[] = {
+static const struct phm_master_table_item cz_disable_dpm_list[] = {
{ NULL, cz_tf_check_for_dpm_enabled},
{NULL, NULL},
};
-static struct phm_master_table_header cz_disable_dpm_master = {
+static const struct phm_master_table_header cz_disable_dpm_master = {
0,
PHM_MasterTableFlag_None,
cz_disable_dpm_list
};
-static struct phm_master_table_item cz_enable_dpm_list[] = {
+static const struct phm_master_table_item cz_enable_dpm_list[] = {
{ NULL, cz_tf_check_for_dpm_disabled },
{ NULL, cz_tf_program_voting_clients },
{ NULL, cz_tf_start_dpm},
@@ -1117,7 +1117,7 @@ static struct phm_master_table_item cz_enable_dpm_list[] = {
{NULL, NULL},
};
-static struct phm_master_table_header cz_enable_dpm_master = {
+static const struct phm_master_table_header cz_enable_dpm_master = {
0,
PHM_MasterTableFlag_None,
cz_enable_dpm_list
@@ -1729,7 +1729,7 @@ static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
}
static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, int level)
+ enum pp_clock_type type, uint32_t mask)
{
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
return -EINVAL;
@@ -1738,10 +1738,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
case PP_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMin,
- (1 << level));
+ mask);
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SetSclkSoftMax,
- (1 << level));
+ mask);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
index e68edf06e..e1b649bd5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
@@ -47,10 +47,17 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate;
- if (bgate)
+ if (bgate) {
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
fiji_update_uvd_dpm(hwmgr, true);
- else
+ } else {
fiji_update_uvd_dpm(hwmgr, false);
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 89f31bc5b..92912ab20 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -95,23 +95,23 @@ enum DPM_EVENT_SRC {
/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
* not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
*/
-uint16_t fiji_clock_stretcher_lookup_table[2][4] = { {600, 1050, 3, 0},
- {600, 1050, 6, 1} };
+static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
+{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
/* [FF, SS] type, [] 4 voltage ranges, and
* [Floor Freq, Boundary Freq, VID min , VID max]
*/
-uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
+static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
{ {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
* (coming from PWR_CKS_CNTL.stretch_amount reg spec)
*/
-uint8_t fiji_clock_stretch_amount_conversion[2][6] = { {0, 1, 3, 2, 4, 5},
- {0, 2, 4, 5, 6, 5} };
+static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
+{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
+static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
struct fiji_power_state *cast_phw_fiji_power_state(
struct pp_hw_power_state *hw_ps)
@@ -465,14 +465,14 @@ static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
+ "VDD dependency on SCLK table is missing. \
This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
+ "VDD dependency on SCLK table has to have is missing. \
This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
+ "VDD dependency on MCLK table is missing. \
This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
"VDD dependency on MCLK table has to have is missing. \
@@ -579,6 +579,18 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
return 0;
}
+static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+ struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
+
+ if (data->soft_pp_table) {
+ kfree(data->soft_pp_table);
+ data->soft_pp_table = NULL;
+ }
+
+ return phm_hwmgr_backend_fini(hwmgr);
+}
+
static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -621,6 +633,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
@@ -734,7 +748,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
data->pcie_lane_cap = (uint32_t)sys_info.value;
} else {
/* Ignore return value in here, we are cleaning up a mess. */
- tonga_hwmgr_backend_fini(hwmgr);
+ fiji_hwmgr_backend_fini(hwmgr);
}
return 0;
@@ -1818,7 +1832,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
@@ -1885,6 +1899,23 @@ static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
return 0;
}
+
+static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr)
+{
+ uint8_t i;
+ uint32_t temp;
+ uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
+
+ PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
+ for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ temp = clock >> i;
+
+ if (temp >= min || i == 0)
+ break;
+ }
+ return i;
+}
/**
* Populates single SMC SCLK structure using the provided engine clock
*
@@ -1928,17 +1959,13 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
threshold = clock * data->fast_watermark_threshold / 100;
- /*
- * TODO: get minimum clocks from dal configaration
- * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
- */
- /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
- /* get level->DeepSleepDivId
- if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- {
- level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
- } */
+ data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
+ hwmgr->display_config.min_core_set_clock_in_sr);
+
/* Default to slow, highest DPM level will be
* set to PPSMC_DISPLAY_WATERMARK_LOW later.
@@ -3364,7 +3391,7 @@ static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
DPM_EVENT_SRC, src);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
THERMAL_PROTECTION_DIS,
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ThermalController));
} else
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
@@ -3548,46 +3575,11 @@ static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
-static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
- dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
- return;
-
- for (i= 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i= 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
- << VDDC_SHIFT;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VddC_Request, req_volt);
- return;
- }
- }
- printk(KERN_ERR "DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
-
static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- fiji_apply_dal_min_voltage_request(hwmgr);
+ phm_apply_dal_min_voltage_request(hwmgr);
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
@@ -4066,7 +4058,6 @@ static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
uint32_t mclk = fiji_ps->performance_levels
[fiji_ps->performance_level_count - 1].memory_clock;
- struct PP_Clocks min_clocks = {0};
uint32_t i;
struct cgs_display_info info = {0};
@@ -4080,10 +4071,8 @@ static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
if (i >= sclk_table->count)
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
else {
- /* TODO: Check SCLK in DAL's minimum clocks
- * in case DeepSleep divider update is required.
- */
- if(data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR)
+ if(data->display_timing.min_clock_in_sr !=
+ hwmgr->display_config.min_core_set_clock_in_sr)
data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
}
@@ -4327,7 +4316,7 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
if (data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = fiji_populate_all_memory_levels(hwmgr);
+ result = fiji_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
@@ -5086,24 +5075,40 @@ static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- *table = (char *)&data->smc_state_table;
+ if (!data->soft_pp_table) {
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
+ if (!data->soft_pp_table)
+ return -ENOMEM;
+ }
+
+ *table = (char *)&data->soft_pp_table;
- return sizeof(struct SMU73_Discrete_DpmTable);
+ return hwmgr->soft_pp_table_size;
}
static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- void *table = (void *)&data->smc_state_table;
+ if (!data->soft_pp_table) {
+ data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ if (!data->soft_pp_table)
+ return -ENOMEM;
+ }
+
+ memcpy(data->soft_pp_table, buf, size);
- memcpy(table, buf, size);
+ hwmgr->soft_pp_table = data->soft_pp_table;
+
+ /* TODO: re-init powerplay to implement modified pptable */
return 0;
}
static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, int level)
+ enum pp_clock_type type, uint32_t mask)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -5115,20 +5120,30 @@ static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->sclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
break;
+
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
break;
+
case PP_PCIE:
+ {
+ uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
if (!data->pcie_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- (1 << level));
+ level);
break;
+ }
default:
break;
}
@@ -5252,19 +5267,19 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h
if (data->display_timing.num_existing_displays != info.display_count)
is_update_required = true;
-/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
- if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
- if(min_clocks.engineClockInSR != data->display_timing.minClockInSR)
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
is_update_required = true;
-*/
+ }
+
return is_update_required;
}
static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
.backend_init = &fiji_hwmgr_backend_init,
- .backend_fini = &tonga_hwmgr_backend_fini,
+ .backend_fini = &fiji_hwmgr_backend_fini,
.asic_setup = &fiji_setup_asic_task,
.dynamic_state_management_enable = &fiji_enable_dpm_tasks,
.force_dpm_level = &fiji_dpm_force_dpm_level,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
index a16f7cd4c..170edf5a7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -263,7 +263,7 @@ struct fiji_hwmgr {
bool enable_tdc_limit_feature;
bool enable_pkg_pwr_tracking_feature;
bool disable_uvd_power_tune_feature;
- struct fiji_pt_defaults *power_tune_defaults;
+ const struct fiji_pt_defaults *power_tune_defaults;
struct SMU73_Discrete_PmFuses power_tune_table;
uint32_t dte_tj_offset;
uint32_t fast_watermark_threshold;
@@ -302,6 +302,9 @@ struct fiji_hwmgr {
bool pg_acp_init;
bool frtc_enabled;
bool frtc_status_changed;
+
+ /* soft pptable for re-uploading into smu */
+ void *soft_pp_table;
};
/* To convert to Q8.8 format for firmware */
@@ -338,7 +341,6 @@ enum Fiji_I2CLineID {
#define FIJI_UNUSED_GPIO_PIN 0x7F
extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
index 6efcb2bac..db23a4068 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
@@ -32,7 +32,7 @@
#define VOLTAGE_SCALE 4
#define POWERTUNE_DEFAULT_SET_MAX 1
-struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
/*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
{1, 0xF, 0xFD,
/* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
@@ -143,7 +143,7 @@ static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_pt_defaults *defaults = data->power_tune_defaults;
+ const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -222,7 +222,7 @@ int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_pt_defaults *defaults = data->power_tune_defaults;
+ const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
@@ -238,7 +238,7 @@ static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct fiji_pt_defaults *defaults = data->power_tune_defaults;
+ const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
/* TDC number of fraction bits are changed from 8 to 7
* for Fiji as requested by SMC team
@@ -256,7 +256,7 @@ static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_pt_defaults *defaults = data->power_tune_defaults;
+ const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
uint32_t temp;
if (fiji_read_smc_sram_dword(hwmgr->smumgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
index e76a7de9a..92976b68d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c
@@ -221,8 +221,8 @@ int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (duty100 == 0)
return -EINVAL;
- tmp64 = (uint64_t)speed * 100;
- do_div(tmp64, duty100);
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
duty = (uint32_t)tmp64;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -615,7 +615,7 @@ static int tf_fiji_thermal_disable_alert(struct pp_hwmgr *hwmgr,
return fiji_thermal_disable_alert(hwmgr);
}
-static struct phm_master_table_item
+static const struct phm_master_table_item
fiji_thermal_start_thermal_controller_master_list[] = {
{NULL, tf_fiji_thermal_initialize},
{NULL, tf_fiji_thermal_set_temperature_range},
@@ -630,14 +630,14 @@ fiji_thermal_start_thermal_controller_master_list[] = {
{NULL, NULL}
};
-static struct phm_master_table_header
+static const struct phm_master_table_header
fiji_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
fiji_thermal_start_thermal_controller_master_list
};
-static struct phm_master_table_item
+static const struct phm_master_table_item
fiji_thermal_set_temperature_range_master_list[] = {
{NULL, tf_fiji_thermal_disable_alert},
{NULL, tf_fiji_thermal_set_temperature_range},
@@ -645,7 +645,7 @@ fiji_thermal_set_temperature_range_master_list[] = {
{NULL, NULL}
};
-struct phm_master_table_header
+static const struct phm_master_table_header
fiji_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 72cfecc4f..7a705cee0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -84,7 +84,7 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
}
int phm_construct_table(struct pp_hwmgr *hwmgr,
- struct phm_master_table_header *master_table,
+ const struct phm_master_table_header *master_table,
struct phm_runtime_table_header *rt_table)
{
uint32_t function_count = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 5fb98aa2e..20f20e075 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,10 +30,14 @@
#include "pppcielanes.h"
#include "pp_debug.h"
#include "ppatomctrl.h"
+#include "ppsmc.h"
+
+#define VOLTAGE_SCALE 4
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
+extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
{
@@ -67,6 +71,10 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
case CHIP_FIJI:
fiji_hwmgr_init(hwmgr);
break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ polaris10_hwmgr_init(hwmgr);
+ break;
default:
return -EINVAL;
}
@@ -85,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
if (hwmgr == NULL || hwmgr->ps == NULL)
return -EINVAL;
+ /* do hwmgr finish*/
+ kfree(hwmgr->backend);
+
+ kfree(hwmgr->start_thermal_controller.function_list);
+
+ kfree(hwmgr->set_temperature_range.function_list);
+
kfree(hwmgr->ps);
kfree(hwmgr);
return 0;
@@ -454,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u
PP_ASSERT_WITH_CODE(false,
"VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i].value);
+ return vddci_table->entries[i-1].value);
}
int phm_find_boot_level(void *table,
@@ -561,3 +576,38 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
return level;
}
+
+void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_clock_voltage_dependency_table *table =
+ table_info->vddc_dep_on_dal_pwrl;
+ struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
+ enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
+ uint32_t req_vddc = 0, req_volt, i;
+
+ if (!table || table->count <= 0
+ || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
+ || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
+ return;
+
+ for (i = 0; i < table->count; i++) {
+ if (dal_power_level == table->entries[i].clk) {
+ req_vddc = table->entries[i].v;
+ break;
+ }
+ }
+
+ vddc_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < vddc_table->count; i++) {
+ if (req_vddc <= vddc_table->entries[i].vddc) {
+ req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VddC_Request, req_volt);
+ return;
+ }
+ }
+ printk(KERN_ERR "DAL requested level can not"
+ " found a available voltage in VDDC DPM Table \n");
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
index c9e6c2d80..2930a3355 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
@@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record {
uint8_t phases;
uint8_t cks_enable;
uint8_t cks_voffset;
+ uint32_t sclk_offset;
};
typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
@@ -92,6 +93,8 @@ typedef struct phm_ppt_v1_voltage_lookup_table phm_ppt_v1_voltage_lookup_table;
struct phm_ppt_v1_pcie_record {
uint8_t gen_speed;
uint8_t lane_width;
+ uint16_t usreserved;
+ uint32_t pcie_sclk;
};
typedef struct phm_ppt_v1_pcie_record phm_ppt_v1_pcie_record;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
new file mode 100644
index 000000000..8f142a74a
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "polaris10_clockpowergating.h"
+
+int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cf_want_uvd_power_gating(hwmgr))
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_UVDPowerOFF);
+ return 0;
+}
+
+int polaris10_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cf_want_uvd_power_gating(hwmgr)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDynamicPowerGating)) {
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDPowerON, 1);
+ } else {
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDPowerON, 0);
+ }
+ }
+
+ return 0;
+}
+
+int polaris10_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cf_want_vce_power_gating(hwmgr))
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_VCEPowerOFF);
+ return 0;
+}
+
+int polaris10_phm_powerup_vce(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cf_want_vce_power_gating(hwmgr))
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_VCEPowerON);
+ return 0;
+}
+
+int polaris10_phm_powerdown_samu(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SamuPowerGating))
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SAMPowerOFF);
+ return 0;
+}
+
+int polaris10_phm_powerup_samu(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SamuPowerGating))
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SAMPowerON);
+ return 0;
+}
+
+int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
+
+ polaris10_phm_powerup_uvd(hwmgr);
+ polaris10_phm_powerup_vce(hwmgr);
+ polaris10_phm_powerup_samu(hwmgr);
+
+ return 0;
+}
+
+int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->uvd_power_gated == bgate)
+ return 0;
+
+ data->uvd_power_gated = bgate;
+
+ if (bgate) {
+ polaris10_update_uvd_dpm(hwmgr, true);
+ polaris10_phm_powerdown_uvd(hwmgr);
+ } else {
+ polaris10_phm_powerup_uvd(hwmgr);
+ polaris10_update_uvd_dpm(hwmgr, false);
+ }
+
+ return 0;
+}
+
+int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->vce_power_gated == bgate)
+ return 0;
+
+ data->vce_power_gated = bgate;
+
+ if (bgate)
+ polaris10_phm_powerdown_vce(hwmgr);
+ else
+ polaris10_phm_powerup_vce(hwmgr);
+
+ return 0;
+}
+
+int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->samu_power_gated == bgate)
+ return 0;
+
+ data->samu_power_gated = bgate;
+
+ if (bgate) {
+ polaris10_update_samu_dpm(hwmgr, true);
+ polaris10_phm_powerdown_samu(hwmgr);
+ } else {
+ polaris10_phm_powerup_samu(hwmgr);
+ polaris10_update_samu_dpm(hwmgr, false);
+ }
+
+ return 0;
+}
+
+int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+ const uint32_t *msg_id)
+{
+ PPSMC_Msg msg;
+ uint32_t value;
+
+ switch ((*msg_id & PP_GROUP_MASK) >> PP_GROUP_SHIFT) {
+ case PP_GROUP_GFX:
+ switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
+ case PP_BLOCK_GFX_CG:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_GFX_CGCG_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS
+ ? PPSMC_MSG_EnableClockGatingFeature
+ : PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_GFX_CGLS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_GFX_3D:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_GFX_3DCG_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_GFX_3DLS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_GFX_RLC:
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_GFX_RLC_LS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_GFX_CP:
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_GFX_CP_LS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_GFX_MG:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = (CG_CPF_MGCG_MASK | CG_RLC_MGCG_MASK |
+ CG_GFX_OTHERS_MGCG_MASK);
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+ break;
+
+ case PP_GROUP_SYS:
+ switch ((*msg_id & PP_BLOCK_MASK) >> PP_BLOCK_SHIFT) {
+ case PP_BLOCK_SYS_BIF:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_BIF_MGCG_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_BIF_MGLS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_SYS_MC:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_MC_MGCG_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_MC_MGLS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_SYS_DRM:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_CG ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_DRM_MGCG_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_DRM_MGLS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_SYS_HDP:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_HDP_MGCG_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_HDP_MGLS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_SYS_SDMA:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_SDMA_MGCG_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+
+ if (PP_STATE_SUPPORT_LS & *msg_id) {
+ msg = (*msg_id & PP_STATE_MASK) & PP_STATE_LS ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_SDMA_MGLS_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ case PP_BLOCK_SYS_ROM:
+ if (PP_STATE_SUPPORT_CG & *msg_id) {
+ msg = ((*msg_id & PP_STATE_MASK) & PP_STATE_CG) ?
+ PPSMC_MSG_EnableClockGatingFeature :
+ PPSMC_MSG_DisableClockGatingFeature;
+ value = CG_SYS_ROM_MASK;
+
+ if (smum_send_msg_to_smc_with_parameter(
+ hwmgr->smumgr, msg, value))
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+
+ }
+ break;
+
+ default:
+ return -1;
+
+ }
+
+ return 0;
+}
+
+/* This function is for Polaris11 only for now,
+ * Powerplay will only control the static per CU Power Gating.
+ * Dynamic per CU Power Gating will be done in gfx.
+ */
+int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+{
+ struct cgs_system_info sys_info = {0};
+ uint32_t active_cus;
+ int result;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
+
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+
+ if (result)
+ return -EINVAL;
+ else
+ active_cus = sys_info.value;
+
+ if (enable)
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
+ else
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_GFX_CU_PG_DISABLE);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
new file mode 100644
index 000000000..88d68cb6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
+#define _POLARIS10_CLOCK_POWER_GATING_H_
+
+#include "polaris10_hwmgr.h"
+#include "pp_asicblocks.h"
+
+int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
+int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
+int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
+ const uint32_t *msg_id);
+int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+
+#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
new file mode 100644
index 000000000..f78ffd935
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef POLARIS10_DYN_DEFAULTS_H
+#define POLARIS10_DYN_DEFAULTS_H
+
+
+enum Polaris10dpm_TrendDetection {
+ Polaris10Adpm_TrendDetection_AUTO,
+ Polaris10Adpm_TrendDetection_UP,
+ Polaris10Adpm_TrendDetection_DOWN
+};
+typedef enum Polaris10dpm_TrendDetection Polaris10dpm_TrendDetection;
+
+/* We need to fill in the default values */
+
+
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
+#define PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
+
+
+#define PPPOLARIS10_THERMALPROTECTCOUNTER_DFLT 0x200
+#define PPPOLARIS10_STATICSCREENTHRESHOLDUNIT_DFLT 0
+#define PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT 0x00C8
+#define PPPOLARIS10_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
+#define PPPOLARIS10_REFERENCEDIVIDER_DFLT 4
+
+#define PPPOLARIS10_ULVVOLTAGECHANGEDELAY_DFLT 1687
+
+#define PPPOLARIS10_CGULVPARAMETER_DFLT 0x00040035
+#define PPPOLARIS10_CGULVCONTROL_DFLT 0x00007450
+#define PPPOLARIS10_TARGETACTIVITY_DFLT 50
+#define PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT 10
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
new file mode 100644
index 000000000..91e25f942
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -0,0 +1,5060 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <asm/div64.h>
+#include "linux/delay.h"
+#include "pp_acpi.h"
+#include "hwmgr.h"
+#include "polaris10_hwmgr.h"
+#include "polaris10_powertune.h"
+#include "polaris10_dyn_defaults.h"
+#include "polaris10_smumgr.h"
+#include "pp_debug.h"
+#include "ppatomctrl.h"
+#include "atombios.h"
+#include "tonga_pptable.h"
+#include "pppcielanes.h"
+#include "amd_pcie_helpers.h"
+#include "hardwaremanager.h"
+#include "tonga_processpptables.h"
+#include "cgs_common.h"
+#include "smu74.h"
+#include "smu_ucode_xfer_vi.h"
+#include "smu74_discrete.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#include "polaris10_thermal.h"
+#include "polaris10_clockpowergating.h"
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define MC_CG_SEQ_DRAMCONF_S0 0x05
+#define MC_CG_SEQ_DRAMCONF_S1 0x06
+#define MC_CG_SEQ_YCLK_SUSPEND 0x04
+#define MC_CG_SEQ_YCLK_RESUME 0x0a
+
+
+#define SMC_RAM_END 0x40000
+
+#define SMC_CG_IND_START 0xc0030000
+#define SMC_CG_IND_END 0xc0040000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+
+#define VDDC_VDDCI_DELTA 200
+
+#define MEM_FREQ_LOW_LATENCY 25000
+#define MEM_FREQ_HIGH_LATENCY 80000
+
+#define MEM_LATENCY_HIGH 45
+#define MEM_LATENCY_LOW 35
+#define MEM_LATENCY_ERR 0xFFFF
+
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+
+
+#define PCIE_BUS_CLK 10000
+#define TCLK (PCIE_BUS_CLK / 10)
+
+
+static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
+{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
+
+/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
+static const uint32_t polaris10_clock_stretcher_ddt_table[2][4][4] =
+{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
+ { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
+
+/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
+static const uint8_t polaris10_clock_stretch_amount_conversion[2][6] =
+{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
+
+/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
+enum DPM_EVENT_SRC {
+ DPM_EVENT_SRC_ANALOG = 0,
+ DPM_EVENT_SRC_EXTERNAL = 1,
+ DPM_EVENT_SRC_DIGITAL = 2,
+ DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
+};
+
+static const unsigned long PhwPolaris10_Magic = (unsigned long)(PHM_VIslands_Magic);
+
+struct polaris10_power_state *cast_phw_polaris10_power_state(
+ struct pp_hw_power_state *hw_ps)
+{
+ PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
+ "Invalid Powerstate Type!",
+ return NULL);
+
+ return (struct polaris10_power_state *)hw_ps;
+}
+
+const struct polaris10_power_state *cast_const_phw_polaris10_power_state(
+ const struct pp_hw_power_state *hw_ps)
+{
+ PP_ASSERT_WITH_CODE((PhwPolaris10_Magic == hw_ps->magic),
+ "Invalid Powerstate Type!",
+ return NULL);
+
+ return (const struct polaris10_power_state *)hw_ps;
+}
+
+static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
+ ? true : false;
+}
+
+/**
+ * Find the MC microcode version and store it in the HwMgr struct
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+int phm_get_mc_microcode_version (struct pp_hwmgr *hwmgr)
+{
+ cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
+
+ hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
+
+ return 0;
+}
+
+uint16_t phm_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+ uint32_t speedCntl = 0;
+
+ /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+ speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
+ ixPCIE_LC_SPEED_CNTL);
+ return((uint16_t)PHM_GET_FIELD(speedCntl,
+ PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
+{
+ uint32_t link_width;
+
+ /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+ link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+ PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
+
+ PP_ASSERT_WITH_CODE((7 >= link_width),
+ "Invalid PCIe lane width!", return 0);
+
+ return decode_pcie_lane_width(link_width);
+}
+
+/**
+* Enable voltage control
+*
+* @param pHwMgr the address of the powerplay hardware manager.
+* @return always PP_Result_OK
+*/
+int polaris10_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
+{
+ PP_ASSERT_WITH_CODE(
+ (hwmgr->smumgr->smumgr_funcs->send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable) == 0),
+ "Failed to enable voltage DPM during DPM Start Function!",
+ return 1;
+ );
+
+ return 0;
+}
+
+/**
+* Checks if we want to support voltage control
+*
+* @param hwmgr the address of the powerplay hardware manager.
+*/
+static bool polaris10_voltage_control(const struct pp_hwmgr *hwmgr)
+{
+ const struct polaris10_hwmgr *data =
+ (const struct polaris10_hwmgr *)(hwmgr->backend);
+
+ return (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control);
+}
+
+/**
+* Enable voltage control
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_enable_voltage_control(struct pp_hwmgr *hwmgr)
+{
+ /* enable voltage control */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
+
+ return 0;
+}
+
+/**
+* Create Voltage Tables.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ int result;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
+ &(data->mvdd_voltage_table));
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve MVDD table.",
+ return result);
+ } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
+ table_info->vdd_dep_on_mclk);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 MVDD table from dependancy table.",
+ return result;);
+ }
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ result = atomctrl_get_voltage_table_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
+ &(data->vddci_voltage_table));
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve VDDCI table.",
+ return result);
+ } else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
+ table_info->vdd_dep_on_mclk);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 VDDCI table from dependancy table.",
+ return result);
+ }
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
+ table_info->vddc_lookup_table);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to retrieve SVI2 VDDC table from lookup table.",
+ return result);
+ }
+
+ PP_ASSERT_WITH_CODE(
+ (data->vddc_voltage_table.count <= (SMU74_MAX_LEVELS_VDDC)),
+ "Too many voltage values for VDDC. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDC,
+ &(data->vddc_voltage_table)));
+
+ PP_ASSERT_WITH_CODE(
+ (data->vddci_voltage_table.count <= (SMU74_MAX_LEVELS_VDDCI)),
+ "Too many voltage values for VDDCI. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_VDDCI,
+ &(data->vddci_voltage_table)));
+
+ PP_ASSERT_WITH_CODE(
+ (data->mvdd_voltage_table.count <= (SMU74_MAX_LEVELS_MVDD)),
+ "Too many voltage values for MVDD. Trimming to fit state table.",
+ phm_trim_voltage_table_to_fit_state_table(SMU74_MAX_LEVELS_MVDD,
+ &(data->mvdd_voltage_table)));
+
+ return 0;
+}
+
+/**
+* Programs static screed detection parameters
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_program_static_screen_threshold_parameters(
+ struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* Set static screen threshold unit */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
+ data->static_screen_threshold_unit);
+ /* Set static screen threshold */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
+ data->static_screen_threshold);
+
+ return 0;
+}
+
+/**
+* Setup display gap for glitch free memory clock switching.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_enable_display_gap(struct pp_hwmgr *hwmgr)
+{
+ uint32_t display_gap =
+ cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_DISPLAY_GAP_CNTL);
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+ DISP_GAP, DISPLAY_GAP_IGNORE);
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
+ DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+ return 0;
+}
+
+/**
+* Programs activity state transition voting clients
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* Clear reset for voting clients before enabling DPM */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
+
+ return 0;
+}
+
+/**
+* Get the location of various tables inside the FW image.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ uint32_t tmp;
+ int result;
+ bool error = false;
+
+ result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, DpmTable),
+ &tmp, data->sram_end);
+
+ if (0 == result)
+ data->dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &tmp, data->sram_end);
+
+ if (!result) {
+ data->soft_regs_start = tmp;
+ smu_data->soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcRegisterTable),
+ &tmp, data->sram_end);
+
+ if (!result)
+ data->mc_reg_table_start = tmp;
+
+ result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, FanTable),
+ &tmp, data->sram_end);
+
+ if (!result)
+ data->fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
+ &tmp, data->sram_end);
+
+ if (!result)
+ data->arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, Version),
+ &tmp, data->sram_end);
+
+ if (!result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? -1 : 0;
+}
+
+/* Copy one arb setting to another and then switch the active set.
+ * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
+ */
+static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
+ uint32_t arb_src, uint32_t arb_dest)
+{
+ uint32_t mc_arb_dram_timing;
+ uint32_t mc_arb_dram_timing2;
+ uint32_t burst_time;
+ uint32_t mc_cg_config;
+
+ switch (arb_src) {
+ case MC_CG_ARB_FREQ_F0:
+ mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
+ mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (arb_dest) {
+ case MC_CG_ARB_FREQ_F0:
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
+ break;
+ case MC_CG_ARB_FREQ_F1:
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
+ cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
+ mc_cg_config |= 0x0000000F;
+ cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
+ PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
+
+ return 0;
+}
+
+/**
+* Initial switch from ARB F0->F1
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+* This function is to be called from the SetPowerState table.
+*/
+static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
+{
+ return polaris10_copy_and_switch_arb_sets(hwmgr,
+ MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint32_t i, max_entry;
+
+ PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
+ data->use_pcie_power_saving_levels), "No pcie performance levels!",
+ return -EINVAL);
+
+ if (data->use_pcie_performance_levels &&
+ !data->use_pcie_power_saving_levels) {
+ data->pcie_gen_power_saving = data->pcie_gen_performance;
+ data->pcie_lane_power_saving = data->pcie_lane_performance;
+ } else if (!data->use_pcie_performance_levels &&
+ data->use_pcie_power_saving_levels) {
+ data->pcie_gen_performance = data->pcie_gen_power_saving;
+ data->pcie_lane_performance = data->pcie_lane_power_saving;
+ }
+
+ phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
+ SMU74_MAX_LEVELS_LINK,
+ MAX_REGULAR_DPM_NUMBER);
+
+ if (pcie_table != NULL) {
+ /* max_entry is used to make sure we reserve one PCIE level
+ * for boot level (fix for A+A PSPP issue).
+ * If PCIE table from PPTable have ULV entry + 8 entries,
+ * then ignore the last entry.*/
+ max_entry = (SMU74_MAX_LEVELS_LINK < pcie_table->count) ?
+ SMU74_MAX_LEVELS_LINK : pcie_table->count;
+ for (i = 1; i < max_entry; i++) {
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ pcie_table->entries[i].gen_speed),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ pcie_table->entries[i].lane_width));
+ }
+ data->dpm_table.pcie_speed_table.count = max_entry - 1;
+
+ /* Setup BIF_SCLK levels */
+ for (i = 0; i < max_entry; i++)
+ data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk;
+ } else {
+ /* Hardcode Pcie Table */
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+
+ data->dpm_table.pcie_speed_table.count = 6;
+ }
+ /* Populate last level for boot PCIE level, but do not increment count. */
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+ data->dpm_table.pcie_speed_table.count,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Min_PCIEGen),
+ get_pcie_lane_support(data->pcie_lane_cap,
+ PP_Max_PCIELane));
+
+ return 0;
+}
+
+/*
+ * This function is to initalize all DPM state tables
+ * for SMU7 based on the dependency table.
+ * Dynamic state patching function will then trim these
+ * state tables to the allowed range based
+ * on the power policy or external client requests,
+ * such as UVD request, etc.
+ */
+int polaris10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i;
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
+ table_info->vdd_dep_on_sclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+ table_info->vdd_dep_on_mclk;
+
+ PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
+ "SCLK dependency table is missing. This table is mandatory",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
+ "SCLK dependency table has to have is missing."
+ "This table is mandatory",
+ return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
+ "MCLK dependency table is missing. This table is mandatory",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
+ "MCLK dependency table has to have is missing."
+ "This table is mandatory",
+ return -EINVAL);
+
+ /* clear the state table to reset everything to default */
+ phm_reset_single_dpm_table(
+ &data->dpm_table.sclk_table, SMU74_MAX_LEVELS_GRAPHICS, MAX_REGULAR_DPM_NUMBER);
+ phm_reset_single_dpm_table(
+ &data->dpm_table.mclk_table, SMU74_MAX_LEVELS_MEMORY, MAX_REGULAR_DPM_NUMBER);
+
+
+ /* Initialize Sclk DPM table based on allow Sclk values */
+ data->dpm_table.sclk_table.count = 0;
+ for (i = 0; i < dep_sclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
+ dep_sclk_table->entries[i].clk) {
+
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
+ dep_sclk_table->entries[i].clk;
+
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
+ (i == 0) ? true : false;
+ data->dpm_table.sclk_table.count++;
+ }
+ }
+
+ /* Initialize Mclk DPM table based on allow Mclk values */
+ data->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < dep_mclk_table->count; i++) {
+ if (i == 0 || data->dpm_table.mclk_table.dpm_levels
+ [data->dpm_table.mclk_table.count - 1].value !=
+ dep_mclk_table->entries[i].clk) {
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
+ dep_mclk_table->entries[i].clk;
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
+ (i == 0) ? true : false;
+ data->dpm_table.mclk_table.count++;
+ }
+ }
+
+ /* setup PCIE gen speed levels */
+ polaris10_setup_default_pcie_table(hwmgr);
+
+ /* save a copy of the default DPM table */
+ memcpy(&(data->golden_dpm_table), &(data->dpm_table),
+ sizeof(struct polaris10_dpm_table));
+
+ return 0;
+}
+
+uint8_t convert_to_vid(uint16_t vddc)
+{
+ return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
+}
+
+/**
+ * Mvdd table preparation for SMC.
+ *
+ * @param *hwmgr The address of the hardware manager.
+ * @param *table The SMC DPM table structure to be populated.
+ * @return 0
+ */
+static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t count, level;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ count = data->mvdd_voltage_table.count;
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; level++) {
+ table->SmioTable2.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+ /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
+ table->SmioTable2.Pattern[level].Smio =
+ (uint8_t) level;
+ table->Smio[level] |=
+ data->mvdd_voltage_table.entries[level].smio_low;
+ }
+ table->SmioMask2 = data->mvdd_voltage_table.mask_low;
+
+ table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_smc_vddci_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count, level;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ count = data->vddci_voltage_table.count;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ if (count > SMU_MAX_SMIO_LEVELS)
+ count = SMU_MAX_SMIO_LEVELS;
+ for (level = 0; level < count; ++level) {
+ table->SmioTable1.Pattern[level].Voltage =
+ PP_HOST_TO_SMC_US(data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE);
+ table->SmioTable1.Pattern[level].Smio = (uint8_t) level;
+
+ table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low;
+ }
+ }
+
+ table->SmioMask1 = data->vddci_voltage_table.mask_low;
+
+ return 0;
+}
+
+/**
+* Preparation of vddc and vddgfx CAC tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int polaris10_populate_cac_table(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ uint32_t count;
+ uint8_t index;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ /* tables is already swapped, so in order to use the value from it,
+ * we need to swap it back.
+ * We are populating vddc CAC data to BapmVddc table
+ * in split and merged mode
+ */
+ for (count = 0; count < lookup_table->count; count++) {
+ index = phm_get_voltage_index(lookup_table,
+ data->vddc_voltage_table.entries[count].value);
+ table->BapmVddcVidLoSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_low);
+ table->BapmVddcVidHiSidd[count] = convert_to_vid(lookup_table->entries[index].us_cac_mid);
+ table->BapmVddcVidHiSidd2[count] = convert_to_vid(lookup_table->entries[index].us_cac_high);
+ }
+
+ return 0;
+}
+
+/**
+* Preparation of voltage tables for SMC.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+
+int polaris10_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ polaris10_populate_smc_vddci_table(hwmgr, table);
+ polaris10_populate_smc_mvdd_table(hwmgr, table);
+ polaris10_populate_cac_table(hwmgr, table);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_Ulv *state)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
+ state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+
+ state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int polaris10_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ return polaris10_populate_ulv_level(hwmgr, &table->Ulv);
+}
+
+static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+ int i;
+
+ /* Index (dpm_table->pcie_speed_table.count)
+ * is reserved for PCIE boot level. */
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
+ dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
+ table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
+ }
+
+ data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static uint32_t polaris10_get_xclk(struct pp_hwmgr *hwmgr)
+{
+ uint32_t reference_clock, tmp;
+ struct cgs_display_info info = {0};
+ struct cgs_mode_info mode_info;
+
+ info.mode_info = &mode_info;
+
+ tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK);
+
+ if (tmp)
+ return TCLK;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ reference_clock = mode_info.ref_clock;
+
+ tmp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
+
+ if (0 != tmp)
+ return reference_clock / 4;
+
+ return reference_clock;
+}
+
+/**
+* Calculates the SCLK dividers using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, SMU_SclkSetting *sclk_setting)
+{
+ const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ const SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
+ struct pp_atomctrl_clock_dividers_ai dividers;
+
+ uint32_t ref_clock;
+ uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq;
+ uint8_t i;
+ int result;
+ uint64_t temp;
+
+ sclk_setting->SclkFrequency = clock;
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, &dividers);
+ if (result == 0) {
+ sclk_setting->Fcw_int = dividers.usSclk_fcw_int;
+ sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac;
+ sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int;
+ sclk_setting->PllRange = dividers.ucSclkPllRange;
+ sclk_setting->Sclk_slew_rate = 0x400;
+ sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac;
+ sclk_setting->Pcc_down_slew_rate = 0xffff;
+ sclk_setting->SSc_En = dividers.ucSscEnable;
+ sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int;
+ sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac;
+ sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac;
+ return result;
+ }
+
+ ref_clock = polaris10_get_xclk(hwmgr);
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ if (clock > data->range_table[i].trans_lower_frequency
+ && clock <= data->range_table[i].trans_upper_frequency) {
+ sclk_setting->PllRange = i;
+ break;
+ }
+ }
+
+ sclk_setting->Fcw_int = (uint16_t)((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw_frac = temp & 0xffff;
+
+ pcc_target_percent = 10; /* Hardcode 10% for now. */
+ pcc_target_freq = clock - (clock * pcc_target_percent / 100);
+ sclk_setting->Pcc_fcw_int = (uint16_t)((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+
+ ss_target_percent = 2; /* Hardcode 2% for now. */
+ sclk_setting->SSc_En = 0;
+ if (ss_target_percent) {
+ sclk_setting->SSc_En = 1;
+ ss_target_freq = clock - (clock * ss_target_percent / 100);
+ sclk_setting->Fcw1_int = (uint16_t)((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / ref_clock);
+ temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv;
+ temp <<= 0x10;
+ do_div(temp, ref_clock);
+ sclk_setting->Fcw1_frac = temp & 0xffff;
+ }
+
+ return 0;
+}
+
+static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_table,
+ uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
+{
+ uint32_t i;
+ uint16_t vddci;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ *voltage = *mvdd = 0;
+
+ /* clock - voltage dependency table is empty table */
+ if (dep_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < dep_table->count; i++) {
+ /* find first sclk bigger than request */
+ if (dep_table->entries[i].clk >= clock) {
+ *voltage |= (dep_table->entries[i].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+ if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i].vddci)
+ *voltage |= (dep_table->entries[i].vddci *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)data->vddc_vddci_delta));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i].mvdd *
+ VOLTAGE_SCALE;
+
+ *voltage |= 1 << PHASES_SHIFT;
+ return 0;
+ }
+ }
+
+ /* sclk is bigger than max sclk in the dependence table */
+ *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ *voltage |= (data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE) << VDDCI_SHIFT;
+ else if (dep_table->entries[i-1].vddci) {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i].vddc -
+ (uint16_t)data->vddc_vddci_delta));
+ *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
+
+ if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
+ *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
+ else if (dep_table->entries[i].mvdd)
+ *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] =
+{ {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112},
+ {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160},
+ {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108},
+ {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} };
+
+static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t i, ref_clk;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
+ struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
+
+ ref_clk = polaris10_get_xclk(hwmgr);
+
+ if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) {
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+ table->SclkFcwRangeTable[i].vco_setting = range_table_from_vbios.entry[i].ucVco_setting;
+ table->SclkFcwRangeTable[i].postdiv = range_table_from_vbios.entry[i].ucPostdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = range_table_from_vbios.entry[i].usFcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = range_table_from_vbios.entry[i].usFcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = range_table_from_vbios.entry[i].usRcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+ return;
+ }
+
+ for (i = 0; i < NUM_SCLK_RANGE; i++) {
+
+ data->range_table[i].trans_lower_frequency = (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv;
+ data->range_table[i].trans_upper_frequency = (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv;
+
+ table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting;
+ table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv;
+ table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc;
+
+ table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper;
+ table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower);
+ }
+}
+
+/**
+* Populates single SMC SCLK structure using the provided engine clock
+*
+* @param hwmgr the address of the hardware manager
+* @param clock the engine clock to use to populate the structure
+* @param sclk the SMC SCLK structure to be populated
+*/
+
+static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU74_Discrete_GraphicsLevel *level)
+{
+ int result, i, temp;
+ /* PP_Clocks minClocks; */
+ uint32_t mvdd;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMU_SclkSetting curr_sclk_setting = { 0 };
+
+ result = polaris10_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting);
+
+ /* populate graphics levels */
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk, clock,
+ &level->MinVoltage, &mvdd);
+
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find VDDC voltage value for "
+ "VDDC engine clock dependency table",
+ return result);
+ level->ActivityLevel = sclk_al_threshold;
+
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ level->EnabledForThrottle = 1;
+ level->UpHyst = 10;
+ level->DownHyst = 0;
+ level->VoltageDownHyst = 0;
+ level->PowerThrottle = 0;
+
+ /*
+ * TODO: get minimum clocks from dal configaration
+ * PECI_GetMinClockSettings(hwmgr->pPECI, &minClocks);
+ */
+ /* data->DisplayTiming.minClockInSR = minClocks.engineClockInSR; */
+
+ /* get level->DeepSleepDivId
+ if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
+ */
+ PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
+ for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ temp = clock >> i;
+
+ if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
+ break;
+ }
+
+ level->DeepSleepDivId = i;
+
+ /* Default to slow, highest DPM level will be
+ * set to PPSMC_DISPLAY_WATERMARK_LOW later.
+ */
+ if (data->update_up_hyst)
+ level->UpHyst = (uint8_t)data->up_hyst;
+ if (data->update_down_hyst)
+ level->DownHyst = (uint8_t)data->down_hyst;
+
+ level->SclkSetting = curr_sclk_setting;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate);
+ return 0;
+}
+
+/**
+* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+static int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
+ uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
+ int result = 0;
+ uint32_t array = data->dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU74_Discrete_GraphicsLevel) *
+ SMU74_MAX_LEVELS_GRAPHICS;
+ struct SMU74_Discrete_GraphicsLevel *levels =
+ data->smc_state_table.GraphicsLevel;
+ uint32_t i, max_entry;
+ uint8_t hightest_pcie_level_enabled = 0,
+ lowest_pcie_level_enabled = 0,
+ mid_pcie_level_enabled = 0,
+ count = 0;
+
+ polaris10_get_sclk_range_table(hwmgr);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+
+ result = polaris10_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)data->activity_target[i],
+ &(data->smc_state_table.GraphicsLevel[i]));
+ if (result)
+ return result;
+
+ /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
+ if (i > 1)
+ levels[i].DeepSleepDivId = 0;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport))
+ data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0;
+
+ data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+ data->smc_state_table.GraphicsDpmLevelCount =
+ (uint8_t)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+
+ if (pcie_table != NULL) {
+ PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+ max_entry = pcie_entry_cnt - 1;
+ for (i = 0; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel =
+ (uint8_t) ((i < max_entry) ? i : max_entry);
+ } else {
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (hightest_pcie_level_enabled + 1))) != 0))
+ hightest_pcie_level_enabled++;
+
+ while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << lowest_pcie_level_enabled)) == 0))
+ lowest_pcie_level_enabled++;
+
+ while ((count < hightest_pcie_level_enabled) &&
+ ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
+ (1 << (lowest_pcie_level_enabled + 1 + count))) == 0))
+ count++;
+
+ mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) <
+ hightest_pcie_level_enabled ?
+ (lowest_pcie_level_enabled + 1 + count) :
+ hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to hightest_pcie_level_enabled */
+ for (i = 2; i < dpm_table->sclk_table.count; i++)
+ levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to lowest_pcie_level_enabled */
+ levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
+
+ /* set pcieDpmLevel to mid_pcie_level_enabled */
+ levels[1].pcieDpmLevel = mid_pcie_level_enabled;
+ }
+ /* level count will send to smc once at init smc table and never change */
+ result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, data->sram_end);
+
+ return result;
+}
+
+static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU74_Discrete_MemoryLevel *mem_level)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int result = 0;
+ struct cgs_display_info info = {0, 0, NULL};
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (table_info->vdd_dep_on_mclk) {
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk, clock,
+ &mem_level->MinVoltage, &mem_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory "
+ "VDDC voltage dependency table", return result);
+ }
+
+ mem_level->MclkFrequency = clock;
+ mem_level->EnabledForThrottle = 1;
+ mem_level->EnabledForActivity = 0;
+ mem_level->UpHyst = 0;
+ mem_level->DownHyst = 100;
+ mem_level->VoltageDownHyst = 0;
+ mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ mem_level->StutterEnable = false;
+ mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ data->display_timing.num_existing_displays = info.display_count;
+
+ if ((data->mclk_stutter_mode_threshold) &&
+ (clock <= data->mclk_stutter_mode_threshold) &&
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE) & 0x1))
+ mem_level->StutterEnable = true;
+
+ if (!result) {
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
+ }
+ return result;
+}
+
+/**
+* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
+*
+* @param hwmgr the address of the hardware manager
+*/
+static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+ /* populate MCLK dpm table to SMU7 */
+ uint32_t array = data->dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+ uint32_t array_size = sizeof(SMU74_Discrete_MemoryLevel) *
+ SMU74_MAX_LEVELS_MEMORY;
+ struct SMU74_Discrete_MemoryLevel *levels =
+ data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero",
+ return -EINVAL);
+ result = polaris10_populate_single_memory_level(hwmgr,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &levels[i]);
+ if (i == dpm_table->mclk_table.count - 1) {
+ levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+ levels[i].EnabledForActivity = 1;
+ }
+ if (result)
+ return result;
+ }
+
+ /* in order to prevent MC activity from stutter mode to push DPM up.
+ * the UVD change complements this by putting the MCLK in
+ * a higher state by default such that we are not effected by
+ * up threshold or and MCLK DPM latency.
+ */
+ levels[0].ActivityLevel = 0x1f;
+ CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
+
+ data->smc_state_table.MemoryDpmLevelCount =
+ (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ /* level count will send to smc once at init smc table and never change */
+ result = polaris10_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ (uint32_t)array_size, data->sram_end);
+
+ return result;
+}
+
+/**
+* Populates the SMC MVDD structure using the provided memory clock.
+*
+* @param hwmgr the address of the hardware manager
+* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
+* @param voltage the SMC VOLTAGE structure to be populated
+*/
+int polaris10_populate_mvdd_value(struct pp_hwmgr *hwmgr,
+ uint32_t mclk, SMIO_Pattern *smio_pat)
+{
+ const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint32_t i = 0;
+
+ if (POLARIS10_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
+ if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
+ smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+ PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
+ "MVDD Voltage is outside the supported range.",
+ return -EINVAL);
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint32_t sclk_frequency;
+ const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ SMIO_Pattern vol_level;
+ uint32_t mvdd;
+ uint16_t us_mvdd;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+
+ /* Get MinVoltage and Frequency from DPM0,
+ * already converted to SMC_UL */
+ sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_sclk,
+ sclk_frequency,
+ &table->ACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDC voltage value "
+ "in Clock Dependency Table",
+ );
+
+
+ result = polaris10_calculate_sclk_params(hwmgr, sclk_frequency, &(table->ACPILevel.SclkSetting));
+ PP_ASSERT_WITH_CODE(result == 0, "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ table->ACPILevel.DeepSleepDivId = 0;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac);
+ CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate);
+
+
+ /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
+ table->MemoryACPILevel.MclkFrequency =
+ data->dpm_table.mclk_table.dpm_levels[0].value;
+ result = polaris10_get_dependency_volt_by_clk(hwmgr,
+ table_info->vdd_dep_on_mclk,
+ table->MemoryACPILevel.MclkFrequency,
+ &table->MemoryACPILevel.MinVoltage, &mvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Cannot find ACPI VDDCI voltage value "
+ "in Clock Dependency Table",
+ );
+
+ us_mvdd = 0;
+ if ((POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled))
+ us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
+ else {
+ if (!polaris10_populate_mvdd_value(hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[0].value,
+ &vol_level))
+ us_mvdd = vol_level.Voltage;
+ }
+
+ if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
+ table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ table->MemoryACPILevel.StutterEnable = false;
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpHyst = 0;
+ table->MemoryACPILevel.DownHyst = 100;
+ table->MemoryACPILevel.VoltageDownHyst = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
+
+ return result;
+}
+
+static int polaris10_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->VceLevelCount = (uint8_t)(mm_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
+ table->VceLevel[count].MinVoltage = 0;
+ table->VceLevel[count].MinVoltage |=
+ (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+
+ table->VceLevel[count].MinVoltage |=
+ (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /*retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(mm_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ /* not sure whether we need evclk or not */
+ table->SamuLevel[count].MinVoltage = 0;
+ table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
+ table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
+ int32_t eng_clock, int32_t mem_clock,
+ SMU74_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ uint32_t dram_timing;
+ uint32_t dram_timing2;
+ uint32_t burst_time;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ eng_clock, mem_clock);
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
+ arb_regs->McArbBurstTime = (uint8_t)burst_time;
+
+ return 0;
+}
+
+static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+ int result = 0;
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = polaris10_populate_memory_timing_parameters(hwmgr,
+ data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (result == 0)
+ result = atomctrl_set_ac_timing_ai(hwmgr, data->dpm_table.mclk_table.dpm_levels[j].value, j);
+ if (result != 0)
+ return result;
+ }
+ }
+
+ result = polaris10_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ data->arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU74_Discrete_MCArbDramTimingTable),
+ data->sram_end);
+ return result;
+}
+
+static int polaris10_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t vddci;
+
+ table->UvdLevelCount = (uint8_t)(mm_table->count);
+ table->UvdBootLevel = 0;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].MinVoltage = 0;
+ table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
+ table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
+ VOLTAGE_SCALE) << VDDC_SHIFT;
+
+ if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table),
+ mm_table->entries[count].vddc - VDDC_VDDCI_DELTA);
+ else if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control)
+ vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA;
+ else
+ vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT;
+
+ table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
+ }
+
+ return result;
+}
+
+static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table */
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(table->GraphicsBootLevel));
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(table->MemoryBootLevel));
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
+ VOLTAGE_SCALE;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
+ VOLTAGE_SCALE;
+
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
+ CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
+
+ return 0;
+}
+
+
+static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint8_t count, level;
+
+ count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_sclk->entries[level].clk >=
+ data->vbios_boot_state.sclk_bootup_value) {
+ data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
+ for (level = 0; level < count; level++) {
+ if (table_info->vdd_dep_on_mclk->entries[level].clk >=
+ data->vbios_boot_state.mclk_bootup_value) {
+ data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
+{
+ uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+ stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
+
+ /* Read SMU_Eefuse to read and calculate RO and determine
+ * if the part is SS or FF. if RO >= 1660MHz, part is FF.
+ */
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixSMU_EFUSE_0 + (67 * 4));
+ efuse &= 0xFF000000;
+ efuse = efuse >> 24;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ min = 1000;
+ max = 2300;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
+
+ ro = efuse * (max -min)/255 + min;
+
+ /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
+ for (i = 0; i < sclk_table->count; i++) {
+ data->smc_state_table.Sclk_CKS_masterEn0_7 |=
+ sclk_table->entries[i].cks_enable << i;
+ if (hwmgr->chip_id == CHIP_POLARIS10) {
+ volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
+ (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
+ volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+ (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
+ } else {
+ volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
+ (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+ volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+ (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
+ }
+
+ if (volt_without_cks >= volt_with_cks)
+ volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+ sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
+
+ data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
+ }
+
+ data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
+ /* Populate CKS Lookup Table */
+ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
+ stretch_amount2 = 0;
+ else if (stretch_amount == 3 || stretch_amount == 4)
+ stretch_amount2 = 1;
+ else {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+ PP_ASSERT_WITH_CODE(false,
+ "Stretch Amount in PPTable not supported\n",
+ return -EINVAL);
+ }
+
+ value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
+ value &= 0xFFFFFFFE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
+
+ return 0;
+}
+
+/**
+* Populates the SMC VRConfig field in DPM table.
+*
+* @param hwmgr the address of the hardware manager
+* @param table the SMC DPM table structure to be populated
+* @return always 0
+*/
+static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
+ struct SMU74_Discrete_DpmTable *table)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint16_t config;
+
+ config = VR_MERGED_WITH_VDDC;
+ table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
+
+ /* Set Vddc Voltage Controller */
+ if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
+ config = VR_SVI2_PLANE_1;
+ table->VRConfig |= config;
+ } else {
+ PP_ASSERT_WITH_CODE(false,
+ "VDDC should be on SVI2 control in merged mode!",
+ );
+ }
+ /* Set Vddci Voltage Controller */
+ if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
+ config = VR_SVI2_PLANE_2; /* only in merged mode */
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
+ config = VR_SMIO_PATTERN_1;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
+ }
+ /* Set Mvdd Voltage Controller */
+ if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
+ config = VR_SVI2_PLANE_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
+ config = VR_SMIO_PATTERN_2;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ } else {
+ config = VR_STATIC_VOLTAGE;
+ table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
+ }
+
+ return 0;
+}
+
+
+int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
+ int result = 0;
+ struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+ AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+ AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+ uint32_t tmp, i;
+ struct pp_smumgr *smumgr = hwmgr->smumgr;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return result;
+
+ result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+ if (0 == result) {
+ table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+ table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+ table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+ table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+ table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+ table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+ table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+ table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+ table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+ table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12;
+ table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+ table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+ table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+ table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+ table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12;
+ table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+ AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+ AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+ AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+ AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+ AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+ AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+ AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+ for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+ AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+ AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+ }
+
+ result = polaris10_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+ &tmp, data->sram_end);
+
+ polaris10_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_meanNsigma,
+ sizeof(AVFS_meanNsigma_t),
+ data->sram_end);
+
+ result = polaris10_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+ &tmp, data->sram_end);
+ polaris10_copy_bytes_to_smc(smumgr,
+ tmp,
+ (uint8_t *)&AVFS_SclkOffset,
+ sizeof(AVFS_Sclk_Offset_t),
+ data->sram_end);
+
+ data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+ (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+ data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+ }
+ return result;
+}
+
+
+/**
+* Initializes the SMC table and uploads it
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct SMU74_Discrete_DpmTable *table = &(data->smc_state_table);
+ const struct polaris10_ulv_parm *ulv = &(data->ulv);
+ uint8_t i;
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ pp_atomctrl_clock_dividers_vi dividers;
+
+ result = polaris10_setup_default_dpm_tables(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to setup default DPM tables!", return result);
+
+ if (POLARIS10_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ polaris10_populate_smc_voltage_tables(hwmgr, table);
+
+ table->SystemFlags = 0;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
+ result = polaris10_populate_ulv_state(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, PPPOLARIS10_CGULVPARAMETER_DFLT);
+ }
+
+ result = polaris10_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = polaris10_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = polaris10_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = polaris10_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = polaris10_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = polaris10_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point
+ * (the other states are just copies of the boot state) we only
+ * need to populate the ARB settings for the initial state.
+ */
+ result = polaris10_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = polaris10_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ result = polaris10_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = polaris10_populate_smc_initailial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot State!", return result);
+
+ result = polaris10_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate BAPM Parameters!", return result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ result = polaris10_populate_clock_stretcher_data_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate Clock Stretcher Data Table!",
+ return result);
+ }
+
+ result = polaris10_populate_avfs_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
+ table->CurrSclkPllRange = 0xff;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh =
+ table_info->cac_dtp_table->usTargetOperatingTemp *
+ POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
+ table->TemperatureLimitLow =
+ (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
+ POLARIS10_Q88_FORMAT_CONVERSION_UNIT;
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ table->VRConfig = 0;
+
+ result = polaris10_populate_vr_config(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate VRConfig setting!", return result);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ } else {
+ table->VRHotGpio = POLARIS10_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
+ &gpio_pin)) {
+ table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ } else {
+ table->AcDcGpio = POLARIS10_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+ }
+
+ /* Thermal Output GPIO */
+ if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
+ &gpio_pin)) {
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalOutGPIO);
+
+ table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
+
+ /* For porlarity read GPIOPAD_A with assigned Gpio pin
+ * since VBIOS will program this register to set 'inactive state',
+ * driver can then determine 'active state' from this and
+ * program SMU with correct polarity
+ */
+ table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A)
+ & (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
+
+ /* if required, combine VRHot/PCC with thermal out GPIO */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot)
+ && phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CombinePCCWithThermalSignal))
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
+ } else {
+ table->ThermOutGpio = 17;
+ table->ThermOutPolarity = 1;
+ table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
+ }
+
+ /* Populate BIF_SCLK levels into SMC DPM table */
+ for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) {
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, data->bif_sclk_table[i], &dividers);
+ PP_ASSERT_WITH_CODE((result == 0), "Can not find DFS divide id for Sclk", return result);
+
+ if (i == 0)
+ table->Ulv.BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ else
+ table->LinkLevel[i-1].BifSclkDfs = PP_HOST_TO_SMC_US((USHORT)(dividers.pll_post_divider));
+ }
+
+ for (i = 0; i < SMU74_MAX_ENTRIES_SMIO; i++)
+ table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = polaris10_copy_bytes_to_smc(hwmgr->smumgr,
+ data->dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU74_Discrete_DpmTable) - 3 * sizeof(SMU74_PIDController),
+ data->sram_end);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result);
+
+ return 0;
+}
+
+/**
+* Initialize the ARB DRAM timing table's index field.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
+{
+ const struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t tmp;
+ int result;
+
+ /* This is a read-modify-write on the first byte of the ARB table.
+ * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
+ * is the field 'current'.
+ * This solution is ugly, but we never write the whole table only
+ * individual fields in it.
+ * In reality this field should not be in that structure
+ * but in a soft register.
+ */
+ result = polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ data->arb_table_start, &tmp, data->sram_end);
+
+ if (result)
+ return result;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
+
+ return polaris10_write_smc_sram_dword(hwmgr->smumgr,
+ data->arb_table_start, tmp, data->sram_end);
+}
+
+static int polaris10_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot))
+ return smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_EnableVRHotGPIOInterrupt);
+
+ return 0;
+}
+
+static int polaris10_enable_sclk_control(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ SCLK_PWRMGT_OFF, 0);
+ return 0;
+}
+
+static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_ulv_parm *ulv = &(data->ulv);
+
+ if (ulv->ulv_supported)
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+
+ return 0;
+}
+
+static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
+{
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep)) {
+ if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to enable Master Deep Sleep switch failed!",
+ return -1);
+ } else {
+ if (smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to disable Master Deep Sleep switch failed!",
+ return -1);
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t soft_register_value = 0;
+ uint32_t handshake_disables_offset = data->soft_regs_start
+ + offsetof(SMU74_SoftRegisters, HandshakeDisables);
+
+ /* enable SCLK dpm */
+ if (!data->sclk_dpm_key_disabled)
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+ "Failed to enable SCLK DPM during DPM Start Function!",
+ return -1);
+
+ /* enable MCLK dpm */
+ if (0 == data->mclk_dpm_key_disabled) {
+/* Disable UVD - SMU handshake for MCLK. */
+ soft_register_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, handshake_disables_offset);
+ soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ handshake_disables_offset, soft_register_value);
+
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_Enable)),
+ "Failed to enable MCLK DPM during DPM Start Function!",
+ return -1);
+
+ PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+ }
+
+ return 0;
+}
+
+static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /*enable general power management */
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ GLOBAL_PWRMGT_EN, 1);
+
+ /* enable sclk deep sleep */
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
+ DYNAMIC_PM_EN, 1);
+
+ /* prepare for PCIE DPM */
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ data->soft_regs_start + offsetof(SMU74_SoftRegisters,
+ VoltageChangeTimeout), 0x1000);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
+ SWRST_COMMAND_1, RESETLC, 0x0);
+/*
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_Voltage_Cntl_Enable)),
+ "Failed to enable voltage DPM during DPM Start Function!",
+ return -1);
+*/
+
+ if (polaris10_enable_sclk_mclk_dpm(hwmgr)) {
+ printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
+ return -1;
+ }
+
+ /* enable PCIE dpm */
+ if (0 == data->pcie_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(
+ (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_Enable)),
+ "Failed to enable pcie DPM during DPM Start Function!",
+ return -1);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_Falcon_QuickTransition)) {
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_EnableACDCGPIOInterrupt)),
+ "Failed to enable AC DC GPIO Interrupt!",
+ );
+ }
+
+ return 0;
+}
+
+static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
+{
+ bool protection;
+ enum DPM_EVENT_SRC src;
+
+ switch (sources) {
+ default:
+ printk(KERN_ERR "Unknown throttling event sources.");
+ /* fall through */
+ case 0:
+ protection = false;
+ /* src is unused */
+ break;
+ case (1 << PHM_AutoThrottleSource_Thermal):
+ protection = true;
+ src = DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << PHM_AutoThrottleSource_External):
+ protection = true;
+ src = DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case (1 << PHM_AutoThrottleSource_External) |
+ (1 << PHM_AutoThrottleSource_Thermal):
+ protection = true;
+ src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
+ break;
+ }
+ /* Order matters - don't enable thermal protection for the wrong source. */
+ if (protection) {
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
+ DPM_EVENT_SRC, src);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ THERMAL_PROTECTION_DIS,
+ !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController));
+ } else
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
+ THERMAL_PROTECTION_DIS, 1);
+}
+
+static int polaris10_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
+ PHM_AutoThrottleSource source)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (!(data->active_auto_throttle_sources & (1 << source))) {
+ data->active_auto_throttle_sources |= 1 << source;
+ polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
+ }
+ return 0;
+}
+
+static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
+{
+ return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
+}
+
+int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ data->pcie_performance_request = true;
+
+ return 0;
+}
+
+int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+ tmp_result = (!polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
+ PP_ASSERT_WITH_CODE(result == 0,
+ "DPM is already running right now, no need to enable DPM!",
+ return 0);
+
+ if (polaris10_voltage_control(hwmgr)) {
+ tmp_result = polaris10_enable_voltage_control(hwmgr);
+ PP_ASSERT_WITH_CODE(tmp_result == 0,
+ "Failed to enable voltage control!",
+ result = tmp_result);
+
+ tmp_result = polaris10_construct_voltage_tables(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to contruct voltage tables!",
+ result = tmp_result);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController))
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
+
+ tmp_result = polaris10_program_static_screen_threshold_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to program static screen threshold parameters!",
+ result = tmp_result);
+
+ tmp_result = polaris10_enable_display_gap(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable display gap!", result = tmp_result);
+
+ tmp_result = polaris10_program_voting_clients(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to program voting clients!", result = tmp_result);
+
+ tmp_result = polaris10_process_firmware_header(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to process firmware header!", result = tmp_result);
+
+ tmp_result = polaris10_initial_switch_from_arbf0_to_f1(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to initialize switch from ArbF0 to F1!",
+ result = tmp_result);
+
+ tmp_result = polaris10_init_smc_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to initialize SMC table!", result = tmp_result);
+
+ tmp_result = polaris10_init_arb_table_index(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to initialize ARB table index!", result = tmp_result);
+
+ tmp_result = polaris10_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to populate PM fuses!", result = tmp_result);
+
+ tmp_result = polaris10_enable_vrhot_gpio_interrupt(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+
+ tmp_result = polaris10_enable_sclk_control(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable SCLK control!", result = tmp_result);
+
+ tmp_result = polaris10_enable_smc_voltage_controller(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable voltage control!", result = tmp_result);
+
+ tmp_result = polaris10_enable_ulv(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable ULV!", result = tmp_result);
+
+ tmp_result = polaris10_enable_deep_sleep_master_switch(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable deep sleep master switch!", result = tmp_result);
+
+ tmp_result = polaris10_start_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to start DPM!", result = tmp_result);
+
+ tmp_result = polaris10_enable_smc_cac(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable SMC CAC!", result = tmp_result);
+
+ tmp_result = polaris10_enable_power_containment(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable power containment!", result = tmp_result);
+
+ tmp_result = polaris10_power_control_set_level(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to power control set level!", result = tmp_result);
+
+ tmp_result = polaris10_enable_thermal_auto_throttle(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable thermal auto throttle!", result = tmp_result);
+
+ tmp_result = polaris10_pcie_performance_request(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "pcie performance request failed!", result = tmp_result);
+
+ return result;
+}
+
+int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+
+ return 0;
+}
+
+int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
+{
+
+ return 0;
+}
+
+int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->soft_pp_table) {
+ kfree(data->soft_pp_table);
+ data->soft_pp_table = NULL;
+ }
+
+ return phm_hwmgr_backend_fini(hwmgr);
+}
+
+int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicPatchPowerState);
+
+ if (data->mvdd_control == POLARIS10_VOLTAGE_CONTROL_NONE)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableMVDDControl);
+
+ if (data->vddci_control == POLARIS10_VOLTAGE_CONTROL_NONE)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDCI);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableSMU7ThermalManagement);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicPowerManagement);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UnTabledHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SMC);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_NonABMSupportInPPLib);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DynamicUVDState);
+
+ /* power tune caps Assume disabled */
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODFuzzyFanControlSupport);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
+ if (hwmgr->chip_id == CHIP_POLARIS11)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport);
+ return 0;
+}
+
+static void polaris10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ polaris10_initialize_power_tune_defaults(hwmgr);
+
+ data->pcie_gen_performance.max = PP_PCIEGen1;
+ data->pcie_gen_performance.min = PP_PCIEGen3;
+ data->pcie_gen_power_saving.max = PP_PCIEGen1;
+ data->pcie_gen_power_saving.min = PP_PCIEGen3;
+ data->pcie_lane_performance.max = 0;
+ data->pcie_lane_performance.min = 16;
+ data->pcie_lane_power_saving.max = 0;
+ data->pcie_lane_power_saving.min = 16;
+}
+
+/**
+* Get Leakage VDDC based on leakage ID.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always 0
+*/
+static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint16_t vv_id;
+ uint32_t vddc = 0;
+ uint16_t i, j;
+ uint32_t sclk = 0;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ int result;
+
+ for (i = 0; i < POLARIS10_MAX_LEAKAGE_COUNT; i++) {
+ vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (!phm_get_sclk_for_voltage_evv(hwmgr,
+ table_info->vddc_lookup_table, vv_id, &sclk)) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher)) {
+ for (j = 1; j < sclk_table->count; j++) {
+ if (sclk_table->entries[j].clk == sclk &&
+ sclk_table->entries[j].cks_enable == 0) {
+ sclk += 5000;
+ break;
+ }
+ }
+ }
+
+
+ PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
+ VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
+ "Error retrieving EVV voltage value!",
+ continue);
+
+
+ /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
+ * real voltage level in unit of 0.01mv */
+ PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
+ "Invalid VDDC value", result = -EINVAL;);
+
+ /* the voltage should not be zero nor equal to leakage ID */
+ if (vddc != 0 && vddc != vv_id) {
+ data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
+ data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
+ data->vddc_leakage.count++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Change virtual leakage voltage to actual value.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @param pointer to changing voltage
+ * @param pointer to leakage table
+ */
+static void polaris10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *voltage, struct polaris10_leakage_voltage *leakage_table)
+{
+ uint32_t index;
+
+ /* search for leakage voltage ID 0xff01 ~ 0xff08 */
+ for (index = 0; index < leakage_table->count; index++) {
+ /* if this voltage matches a leakage voltage ID */
+ /* patch with actual leakage voltage */
+ if (leakage_table->leakage_id[index] == *voltage) {
+ *voltage = leakage_table->actual_voltage[index];
+ break;
+ }
+ }
+
+ if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
+ printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
+}
+
+/**
+* Patch voltage lookup table by EVV leakages.
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pointer to voltage lookup table
+* @param pointer to leakage table
+* @return always 0
+*/
+static int polaris10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
+ phm_ppt_v1_voltage_lookup_table *lookup_table,
+ struct polaris10_leakage_voltage *leakage_table)
+{
+ uint32_t i;
+
+ for (i = 0; i < lookup_table->count; i++)
+ polaris10_patch_with_vdd_leakage(hwmgr,
+ &lookup_table->entries[i].us_vdd, leakage_table);
+
+ return 0;
+}
+
+static int polaris10_patch_clock_voltage_limits_with_vddc_leakage(
+ struct pp_hwmgr *hwmgr, struct polaris10_leakage_voltage *leakage_table,
+ uint16_t *vddc)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ polaris10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
+ hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
+ table_info->max_clock_voltage_on_dc.vddc;
+ return 0;
+}
+
+static int polaris10_patch_voltage_dependency_tables_with_lookup_table(
+ struct pp_hwmgr *hwmgr)
+{
+ uint8_t entryId;
+ uint8_t voltageId;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+ table_info->vdd_dep_on_sclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+ table_info->vdd_dep_on_mclk;
+ struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
+ table_info->mm_dep_table;
+
+ for (entryId = 0; entryId < sclk_table->count; ++entryId) {
+ voltageId = sclk_table->entries[entryId].vddInd;
+ sclk_table->entries[entryId].vddc =
+ table_info->vddc_lookup_table->entries[voltageId].us_vdd;
+ }
+
+ for (entryId = 0; entryId < mclk_table->count; ++entryId) {
+ voltageId = mclk_table->entries[entryId].vddInd;
+ mclk_table->entries[entryId].vddc =
+ table_info->vddc_lookup_table->entries[voltageId].us_vdd;
+ }
+
+ for (entryId = 0; entryId < mm_table->count; ++entryId) {
+ voltageId = mm_table->entries[entryId].vddcInd;
+ mm_table->entries[entryId].vddc =
+ table_info->vddc_lookup_table->entries[voltageId].us_vdd;
+ }
+
+ return 0;
+
+}
+
+static int polaris10_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+ /* Need to determine if we need calculated voltage. */
+ return 0;
+}
+
+static int polaris10_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
+{
+ /* Need to determine if we need calculated voltage from mm table. */
+ return 0;
+}
+
+static int polaris10_sort_lookup_table(struct pp_hwmgr *hwmgr,
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table)
+{
+ uint32_t table_size, i, j;
+ struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
+ table_size = lookup_table->count;
+
+ PP_ASSERT_WITH_CODE(0 != lookup_table->count,
+ "Lookup table is empty", return -EINVAL);
+
+ /* Sorting voltages */
+ for (i = 0; i < table_size - 1; i++) {
+ for (j = i + 1; j > 0; j--) {
+ if (lookup_table->entries[j].us_vdd <
+ lookup_table->entries[j - 1].us_vdd) {
+ tmp_voltage_lookup_record = lookup_table->entries[j - 1];
+ lookup_table->entries[j - 1] = lookup_table->entries[j];
+ lookup_table->entries[j] = tmp_voltage_lookup_record;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
+{
+ int result = 0;
+ int tmp_result;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ tmp_result = polaris10_patch_lookup_table_with_leakage(hwmgr,
+ table_info->vddc_lookup_table, &(data->vddc_leakage));
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = polaris10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
+ &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = polaris10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = polaris10_calc_voltage_dependency_tables(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = polaris10_calc_mm_voltage_dependency_table(hwmgr);
+ if (tmp_result)
+ result = tmp_result;
+
+ tmp_result = polaris10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
+ if (tmp_result)
+ result = tmp_result;
+
+ return result;
+}
+
+static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
+ table_info->vdd_dep_on_sclk;
+ struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
+ table_info->vdd_dep_on_mclk;
+
+ PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
+ "VDD dependency on SCLK table is missing. \
+ This table is mandatory", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
+ "VDD dependency on SCLK table has to have is missing. \
+ This table is mandatory", return -EINVAL);
+
+ PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
+ "VDD dependency on MCLK table is missing. \
+ This table is mandatory", return -EINVAL);
+ PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
+ "VDD dependency on MCLK table has to have is missing. \
+ This table is mandatory", return -EINVAL);
+
+ table_info->max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
+ table_info->max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
+ table_info->max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
+ table_info->max_clock_voltage_on_ac.vddci =
+ allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
+
+ hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
+ hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =table_info->max_clock_voltage_on_ac.vddci;
+
+ return 0;
+}
+
+int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+ table_info->vdd_dep_on_mclk;
+ struct phm_ppt_v1_voltage_lookup_table *lookup_table =
+ table_info->vddc_lookup_table;
+ uint32_t i;
+
+ if (hwmgr->chip_id == CHIP_POLARIS10 && hwmgr->hw_revision == 0xC7) {
+ if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
+ return 0;
+
+ for (i = 0; i < lookup_table->count; i++) {
+ if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
+ dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+
+int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
+ uint32_t temp_reg;
+ int result;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ data->dll_default_on = false;
+ data->sram_end = SMC_RAM_END;
+ data->mclk_dpm0_activity_target = 0xa;
+ data->disable_dpm_mask = 0xFF;
+ data->static_screen_threshold = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
+ data->static_screen_threshold_unit = PPPOLARIS10_STATICSCREENTHRESHOLD_DFLT;
+ data->activity_target[0] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+ data->activity_target[1] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+ data->activity_target[2] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+ data->activity_target[3] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+ data->activity_target[4] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+ data->activity_target[5] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+ data->activity_target[6] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+ data->activity_target[7] = PPPOLARIS10_TARGETACTIVITY_DFLT;
+
+ data->voting_rights_clients0 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT0;
+ data->voting_rights_clients1 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT1;
+ data->voting_rights_clients2 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT2;
+ data->voting_rights_clients3 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT3;
+ data->voting_rights_clients4 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT4;
+ data->voting_rights_clients5 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT5;
+ data->voting_rights_clients6 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT6;
+ data->voting_rights_clients7 = PPPOLARIS10_VOTINGRIGHTSCLIENTS_DFLT7;
+
+ data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
+
+ data->mclk_activity_target = PPPOLARIS10_MCLK_TARGETACTIVITY_DFLT;
+
+ /* need to set voltage control types before EVV patching */
+ data->voltage_control = POLARIS10_VOLTAGE_CONTROL_NONE;
+ data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
+ data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
+
+ data->enable_tdc_limit_feature = true;
+ data->enable_pkg_pwr_tracking_feature = true;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+ data->mclk_stutter_mode_threshold = 40000;
+
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+ data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EnableMVDDControl)) {
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+ data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
+ else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+ data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ControlVDDCI)) {
+ if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+ data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_GPIO;
+ else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+ data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
+ }
+
+ if (table_info->cac_dtp_table->usClockStretchAmount != 0)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ClockStretcher);
+
+ polaris10_set_features_platform_caps(hwmgr);
+
+ polaris10_patch_voltage_workaround(hwmgr);
+ polaris10_init_dpm_defaults(hwmgr);
+
+ /* Get leakage voltage based on leakage ID. */
+ result = polaris10_get_evv_voltages(hwmgr);
+
+ if (result) {
+ printk("Get EVV Voltage Failed. Abort Driver loading!\n");
+ return -1;
+ }
+
+ polaris10_complete_dependency_tables(hwmgr);
+ polaris10_set_private_data_based_on_pptable(hwmgr);
+
+ /* Initalize Dynamic State Adjustment Rule Settings */
+ result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
+
+ if (0 == result) {
+ struct cgs_system_info sys_info = {0};
+
+ data->is_tlu_enabled = 0;
+
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ POLARIS10_MAX_HARDWARE_POWERLEVELS;
+ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+
+
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
+ temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
+ switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
+ case 0:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
+ break;
+ case 1:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
+ break;
+ case 2:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
+ break;
+ case 3:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
+ break;
+ case 4:
+ temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
+ break;
+ default:
+ PP_ASSERT_WITH_CODE(0,
+ "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!",
+ );
+ break;
+ }
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
+ }
+
+ if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
+ hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
+ (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
+
+ table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
+ (table_info->cac_dtp_table->usDefaultTargetOperatingTemp -50) : 0;
+
+ table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+ table_info->cac_dtp_table->usOperatingTempStep = 1;
+ table_info->cac_dtp_table->usOperatingTempHyst = 1;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
+ table_info->cac_dtp_table->usOperatingTempMinLimit;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
+ table_info->cac_dtp_table->usOperatingTempMaxLimit;
+
+ hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
+ table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
+
+ hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
+ table_info->cac_dtp_table->usOperatingTempStep;
+
+ hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
+ table_info->cac_dtp_table->usTargetOperatingTemp;
+ }
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (result)
+ data->pcie_gen_cap = 0x30007;
+ else
+ data->pcie_gen_cap = (uint32_t)sys_info.value;
+ if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ data->pcie_spc_cap = 20;
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (result)
+ data->pcie_lane_cap = 0x2f0000;
+ else
+ data->pcie_lane_cap = (uint32_t)sys_info.value;
+
+ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+ hwmgr->platform_descriptor.clockStep.engineClock = 500;
+ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+ } else {
+ /* Ignore return value in here, we are cleaning up a mess. */
+ polaris10_hwmgr_backend_fini(hwmgr);
+ }
+
+ return 0;
+}
+
+static int polaris10_force_dpm_highest(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t level, tmp;
+
+ if (!data->pcie_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel, level);
+ }
+ }
+
+ if (!data->sclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ level = 0;
+ tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ level++;
+
+ if (level)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ phm_apply_dal_min_voltage_request(hwmgr);
+
+ if (!data->sclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+
+ return 0;
+}
+
+static int polaris10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (!polaris10_is_dpm_running(hwmgr))
+ return -EINVAL;
+
+ if (!data->pcie_dpm_key_disabled) {
+ smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_UnForceLevel);
+ }
+
+ return polaris10_upload_dpm_level_enable_mask(hwmgr);
+}
+
+static int polaris10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data =
+ (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t level;
+
+ if (!data->sclk_dpm_key_disabled)
+ if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ (1 << level));
+
+ }
+
+ if (!data->mclk_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ (1 << level));
+ }
+ }
+
+ if (!data->pcie_dpm_key_disabled) {
+ if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ level = phm_get_lowest_enabled_level(hwmgr,
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ (level));
+ }
+ }
+
+ return 0;
+
+}
+static int polaris10_force_dpm_level(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = polaris10_force_dpm_highest(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = polaris10_force_dpm_lowest(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = polaris10_unforce_dpm_levels(hwmgr);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ hwmgr->dpm_level = level;
+
+ return ret;
+}
+
+static int polaris10_get_power_state_size(struct pp_hwmgr *hwmgr)
+{
+ return sizeof(struct polaris10_power_state);
+}
+
+
+static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
+ struct pp_power_state *request_ps,
+ const struct pp_power_state *current_ps)
+{
+
+ struct polaris10_power_state *polaris10_ps =
+ cast_phw_polaris10_power_state(&request_ps->hardware);
+ uint32_t sclk;
+ uint32_t mclk;
+ struct PP_Clocks minimum_clocks = {0};
+ bool disable_mclk_switching;
+ bool disable_mclk_switching_for_frame_lock;
+ struct cgs_display_info info = {0};
+ const struct phm_clock_and_voltage_limits *max_limits;
+ uint32_t i;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int32_t count;
+ int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
+
+ data->battery_state = (PP_StateUILabel_Battery ==
+ request_ps->classification.ui_label);
+
+ PP_ASSERT_WITH_CODE(polaris10_ps->performance_level_count == 2,
+ "VI should always have 2 performance levels",
+ );
+
+ max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
+ &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
+ &(hwmgr->dyn_state.max_clock_voltage_on_dc);
+
+ /* Cap clock DPM tables at DC MAX if it is in DC. */
+ if (PP_PowerSource_DC == hwmgr->power_source) {
+ for (i = 0; i < polaris10_ps->performance_level_count; i++) {
+ if (polaris10_ps->performance_levels[i].memory_clock > max_limits->mclk)
+ polaris10_ps->performance_levels[i].memory_clock = max_limits->mclk;
+ if (polaris10_ps->performance_levels[i].engine_clock > max_limits->sclk)
+ polaris10_ps->performance_levels[i].engine_clock = max_limits->sclk;
+ }
+ }
+
+ polaris10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
+ polaris10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
+
+ /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState)) {
+ max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
+ stable_pstate_sclk = (max_limits->sclk * 75) / 100;
+
+ for (count = table_info->vdd_dep_on_sclk->count - 1;
+ count >= 0; count--) {
+ if (stable_pstate_sclk >=
+ table_info->vdd_dep_on_sclk->entries[count].clk) {
+ stable_pstate_sclk =
+ table_info->vdd_dep_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+
+ if (count < 0)
+ stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+
+ stable_pstate_mclk = max_limits->mclk;
+
+ minimum_clocks.engineClock = stable_pstate_sclk;
+ minimum_clocks.memoryClock = stable_pstate_mclk;
+ }
+
+ if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
+ minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
+
+ if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
+ minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
+
+ polaris10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
+
+ if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
+ PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
+ hwmgr->platform_descriptor.overdriveLimit.engineClock),
+ "Overdrive sclk exceeds limit",
+ hwmgr->gfx_arbiter.sclk_over_drive =
+ hwmgr->platform_descriptor.overdriveLimit.engineClock);
+
+ if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
+ polaris10_ps->performance_levels[1].engine_clock =
+ hwmgr->gfx_arbiter.sclk_over_drive;
+ }
+
+ if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
+ PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock),
+ "Overdrive mclk exceeds limit",
+ hwmgr->gfx_arbiter.mclk_over_drive =
+ hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+
+ if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
+ polaris10_ps->performance_levels[1].memory_clock =
+ hwmgr->gfx_arbiter.mclk_over_drive;
+ }
+
+ disable_mclk_switching_for_frame_lock = phm_cap_enabled(
+ hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
+
+ disable_mclk_switching = (1 < info.display_count) ||
+ disable_mclk_switching_for_frame_lock;
+
+ sclk = polaris10_ps->performance_levels[0].engine_clock;
+ mclk = polaris10_ps->performance_levels[0].memory_clock;
+
+ if (disable_mclk_switching)
+ mclk = polaris10_ps->performance_levels
+ [polaris10_ps->performance_level_count - 1].memory_clock;
+
+ if (sclk < minimum_clocks.engineClock)
+ sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
+ max_limits->sclk : minimum_clocks.engineClock;
+
+ if (mclk < minimum_clocks.memoryClock)
+ mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
+ max_limits->mclk : minimum_clocks.memoryClock;
+
+ polaris10_ps->performance_levels[0].engine_clock = sclk;
+ polaris10_ps->performance_levels[0].memory_clock = mclk;
+
+ polaris10_ps->performance_levels[1].engine_clock =
+ (polaris10_ps->performance_levels[1].engine_clock >=
+ polaris10_ps->performance_levels[0].engine_clock) ?
+ polaris10_ps->performance_levels[1].engine_clock :
+ polaris10_ps->performance_levels[0].engine_clock;
+
+ if (disable_mclk_switching) {
+ if (mclk < polaris10_ps->performance_levels[1].memory_clock)
+ mclk = polaris10_ps->performance_levels[1].memory_clock;
+
+ polaris10_ps->performance_levels[0].memory_clock = mclk;
+ polaris10_ps->performance_levels[1].memory_clock = mclk;
+ } else {
+ if (polaris10_ps->performance_levels[1].memory_clock <
+ polaris10_ps->performance_levels[0].memory_clock)
+ polaris10_ps->performance_levels[1].memory_clock =
+ polaris10_ps->performance_levels[0].memory_clock;
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState)) {
+ for (i = 0; i < polaris10_ps->performance_level_count; i++) {
+ polaris10_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
+ polaris10_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
+ polaris10_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
+ polaris10_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
+ }
+ }
+ return 0;
+}
+
+
+static int polaris10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct pp_power_state *ps;
+ struct polaris10_power_state *polaris10_ps;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+ if (low)
+ return polaris10_ps->performance_levels[0].memory_clock;
+ else
+ return polaris10_ps->performance_levels
+ [polaris10_ps->performance_level_count-1].memory_clock;
+}
+
+static int polaris10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+{
+ struct pp_power_state *ps;
+ struct polaris10_power_state *polaris10_ps;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ ps = hwmgr->request_ps;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
+
+ if (low)
+ return polaris10_ps->performance_levels[0].engine_clock;
+ else
+ return polaris10_ps->performance_levels
+ [polaris10_ps->performance_level_count-1].engine_clock;
+}
+
+static int polaris10_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
+ struct pp_hw_power_state *hw_ps)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_power_state *ps = (struct polaris10_power_state *)hw_ps;
+ ATOM_FIRMWARE_INFO_V2_2 *fw_info;
+ uint16_t size;
+ uint8_t frev, crev;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+
+ /* First retrieve the Boot clocks and VDDC from the firmware info table.
+ * We assume here that fw_info is unchanged if this call fails.
+ */
+ fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
+ hwmgr->device, index,
+ &size, &frev, &crev);
+ if (!fw_info)
+ /* During a test, there is no firmware info table. */
+ return 0;
+
+ /* Patch the state. */
+ data->vbios_boot_state.sclk_bootup_value =
+ le32_to_cpu(fw_info->ulDefaultEngineClock);
+ data->vbios_boot_state.mclk_bootup_value =
+ le32_to_cpu(fw_info->ulDefaultMemoryClock);
+ data->vbios_boot_state.mvdd_bootup_value =
+ le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
+ data->vbios_boot_state.vddc_bootup_value =
+ le16_to_cpu(fw_info->usBootUpVDDCVoltage);
+ data->vbios_boot_state.vddci_bootup_value =
+ le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
+ data->vbios_boot_state.pcie_gen_bootup_value =
+ phm_get_current_pcie_speed(hwmgr);
+
+ data->vbios_boot_state.pcie_lane_bootup_value =
+ (uint16_t)phm_get_current_pcie_lane_number(hwmgr);
+
+ /* set boot power state */
+ ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
+ ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
+ ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
+ ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
+
+ return 0;
+}
+
+static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
+ void *state, struct pp_power_state *power_state,
+ void *pp_table, uint32_t classification_flag)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_power_state *polaris10_power_state =
+ (struct polaris10_power_state *)(&(power_state->hardware));
+ struct polaris10_performance_level *performance_level;
+ ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
+ ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
+ (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
+ PPTable_Generic_SubTable_Header *sclk_dep_table =
+ (PPTable_Generic_SubTable_Header *)
+ (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
+ ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
+ (ATOM_Tonga_MCLK_Dependency_Table *)
+ (((unsigned long)powerplay_table) +
+ le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
+
+ /* The following fields are not initialized here: id orderedList allStatesList */
+ power_state->classification.ui_label =
+ (le16_to_cpu(state_entry->usClassification) &
+ ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
+ ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
+ power_state->classification.flags = classification_flag;
+ /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
+
+ power_state->classification.temporary_state = false;
+ power_state->classification.to_be_deleted = false;
+
+ power_state->validation.disallowOnDC =
+ (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+ ATOM_Tonga_DISALLOW_ON_DC));
+
+ power_state->pcie.lanes = 0;
+
+ power_state->display.disableFrameModulation = false;
+ power_state->display.limitRefreshrate = false;
+ power_state->display.enableVariBright =
+ (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
+ ATOM_Tonga_ENABLE_VARIBRIGHT));
+
+ power_state->validation.supportedPowerLevels = 0;
+ power_state->uvd_clocks.VCLK = 0;
+ power_state->uvd_clocks.DCLK = 0;
+ power_state->temperatures.min = 0;
+ power_state->temperatures.max = 0;
+
+ performance_level = &(polaris10_power_state->performance_levels
+ [polaris10_power_state->performance_level_count++]);
+
+ PP_ASSERT_WITH_CODE(
+ (polaris10_power_state->performance_level_count < SMU74_MAX_LEVELS_GRAPHICS),
+ "Performance levels exceeds SMC limit!",
+ return -1);
+
+ PP_ASSERT_WITH_CODE(
+ (polaris10_power_state->performance_level_count <=
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
+ "Performance levels exceeds Driver limit!",
+ return -1);
+
+ /* Performance levels are arranged from low to high. */
+ performance_level->memory_clock = mclk_dep_table->entries
+ [state_entry->ucMemoryClockIndexLow].ulMclk;
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexLow].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexLow].ulSclk;
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+ state_entry->ucPCIEGenLow);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+ state_entry->ucPCIELaneHigh);
+
+ performance_level = &(polaris10_power_state->performance_levels
+ [polaris10_power_state->performance_level_count++]);
+ performance_level->memory_clock = mclk_dep_table->entries
+ [state_entry->ucMemoryClockIndexHigh].ulMclk;
+
+ if (sclk_dep_table->ucRevId == 0)
+ performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexHigh].ulSclk;
+ else if (sclk_dep_table->ucRevId == 1)
+ performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+ [state_entry->ucEngineClockIndexHigh].ulSclk;
+
+ performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
+ state_entry->ucPCIEGenHigh);
+ performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
+ state_entry->ucPCIELaneHigh);
+
+ return 0;
+}
+
+static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
+ unsigned long entry_index, struct pp_power_state *state)
+{
+ int result;
+ struct polaris10_power_state *ps;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
+ table_info->vdd_dep_on_mclk;
+
+ state->hardware.magic = PHM_VIslands_Magic;
+
+ ps = (struct polaris10_power_state *)(&state->hardware);
+
+ result = tonga_get_powerplay_table_entry(hwmgr, entry_index, state,
+ polaris10_get_pp_table_entry_callback_func);
+
+ /* This is the earliest time we have all the dependency table and the VBIOS boot state
+ * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
+ * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
+ */
+ if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
+ if (dep_mclk_table->entries[0].clk !=
+ data->vbios_boot_state.mclk_bootup_value)
+ printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot MCLK level");
+ if (dep_mclk_table->entries[0].vddci !=
+ data->vbios_boot_state.vddci_bootup_value)
+ printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
+ "does not match VBIOS boot VDDCI level");
+ }
+
+ /* set DC compatible flag if this state supports DC */
+ if (!state->validation.disallowOnDC)
+ ps->dc_compatible = true;
+
+ if (state->classification.flags & PP_StateClassificationFlag_ACPI)
+ data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
+
+ ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+ ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+
+ if (!result) {
+ uint32_t i;
+
+ switch (state->classification.ui_label) {
+ case PP_StateUILabel_Performance:
+ data->use_pcie_performance_levels = true;
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_performance.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_performance.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_performance.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_performance.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.max =
+ ps->performance_levels[i].pcie_lane;
+ if (data->pcie_lane_performance.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_performance.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ case PP_StateUILabel_Battery:
+ data->use_pcie_power_saving_levels = true;
+
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (data->pcie_gen_power_saving.max <
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.max =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_gen_power_saving.min >
+ ps->performance_levels[i].pcie_gen)
+ data->pcie_gen_power_saving.min =
+ ps->performance_levels[i].pcie_gen;
+
+ if (data->pcie_lane_power_saving.max <
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.max =
+ ps->performance_levels[i].pcie_lane;
+
+ if (data->pcie_lane_power_saving.min >
+ ps->performance_levels[i].pcie_lane)
+ data->pcie_lane_power_saving.min =
+ ps->performance_levels[i].pcie_lane;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static void
+polaris10_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
+{
+ uint32_t sclk, mclk, activity_percent;
+ uint32_t offset;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+
+ sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+
+ mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
+ mclk / 100, sclk / 100);
+
+ offset = data->soft_regs_start + offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
+ activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
+ activity_percent += 0x80;
+ activity_percent >>= 8;
+
+ seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
+
+ seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
+
+ seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
+}
+
+static int polaris10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ const struct polaris10_power_state *polaris10_ps =
+ cast_const_phw_polaris10_power_state(states->pnew_state);
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ uint32_t sclk = polaris10_ps->performance_levels
+ [polaris10_ps->performance_level_count - 1].engine_clock;
+ struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ uint32_t mclk = polaris10_ps->performance_levels
+ [polaris10_ps->performance_level_count - 1].memory_clock;
+ struct PP_Clocks min_clocks = {0};
+ uint32_t i;
+ struct cgs_display_info info = {0};
+
+ data->need_update_smu7_dpm_table = 0;
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= sclk_table->count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ else {
+ /* TODO: Check SCLK in DAL's minimum clocks
+ * in case DeepSleep divider update is required.
+ */
+ if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
+ (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
+ data->display_timing.min_clock_in_sr >= POLARIS10_MINIMUM_ENGINE_CLOCK))
+ data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+ }
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (mclk == mclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= mclk_table->count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+
+ return 0;
+}
+
+static uint16_t polaris10_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
+ const struct polaris10_power_state *polaris10_ps)
+{
+ uint32_t i;
+ uint32_t sclk, max_sclk = 0;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+
+ for (i = 0; i < polaris10_ps->performance_level_count; i++) {
+ sclk = polaris10_ps->performance_levels[i].engine_clock;
+ if (max_sclk < sclk)
+ max_sclk = sclk;
+ }
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
+ return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
+ dpm_table->pcie_speed_table.dpm_levels
+ [dpm_table->pcie_speed_table.count - 1].value :
+ dpm_table->pcie_speed_table.dpm_levels[i].value);
+ }
+
+ return 0;
+}
+
+static int polaris10_request_link_speed_change_before_state_change(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ const struct polaris10_power_state *polaris10_nps =
+ cast_const_phw_polaris10_power_state(states->pnew_state);
+ const struct polaris10_power_state *polaris10_cps =
+ cast_const_phw_polaris10_power_state(states->pcurrent_state);
+
+ uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_nps);
+ uint16_t current_link_speed;
+
+ if (data->force_pcie_gen == PP_PCIEGenInvalid)
+ current_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_cps);
+ else
+ current_link_speed = data->force_pcie_gen;
+
+ data->force_pcie_gen = PP_PCIEGenInvalid;
+ data->pspp_notify_required = false;
+
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+ case PP_PCIEGen3:
+ if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
+ break;
+ data->force_pcie_gen = PP_PCIEGen2;
+ if (current_link_speed == PP_PCIEGen2)
+ break;
+ case PP_PCIEGen2:
+ if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
+ break;
+ default:
+ data->force_pcie_gen = phm_get_current_pcie_speed(hwmgr);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ data->pspp_notify_required = true;
+ }
+
+ return 0;
+}
+
+static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((0 == data->sclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
+ "Trying to freeze SCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_FreezeLevel),
+ "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
+ return -1);
+ }
+
+ if ((0 == data->mclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ DPMTABLE_OD_UPDATE_MCLK)) {
+ PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
+ "Trying to freeze MCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_FreezeLevel),
+ "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
+ return -1);
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_and_upload_sclk_mclk_dpm_levels(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ int result = 0;
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ const struct polaris10_power_state *polaris10_ps =
+ cast_const_phw_polaris10_power_state(states->pnew_state);
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t sclk = polaris10_ps->performance_levels
+ [polaris10_ps->performance_level_count - 1].engine_clock;
+ uint32_t mclk = polaris10_ps->performance_levels
+ [polaris10_ps->performance_level_count - 1].memory_clock;
+ struct polaris10_dpm_table *dpm_table = &data->dpm_table;
+
+ struct polaris10_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+ uint32_t dpm_count, clock_percent;
+ uint32_t i;
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ dpm_table->sclk_table.dpm_levels
+ [dpm_table->sclk_table.count - 1].value = sclk;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ /* Need to do calculation based on the golden DPM table
+ * as the Heatmap GPU Clock axis is also based on the default values
+ */
+ PP_ASSERT_WITH_CODE(
+ (golden_dpm_table->sclk_table.dpm_levels
+ [golden_dpm_table->sclk_table.count - 1].value != 0),
+ "Divide by 0!",
+ return -1);
+ dpm_count = dpm_table->sclk_table.count < 2 ? 0 : dpm_table->sclk_table.count - 2;
+
+ for (i = dpm_count; i > 1; i--) {
+ if (sclk > golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value) {
+ clock_percent =
+ ((sclk
+ - golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value
+ ) * 100)
+ / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value +
+ (golden_dpm_table->sclk_table.dpm_levels[i].value *
+ clock_percent)/100;
+
+ } else if (golden_dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value > sclk) {
+ clock_percent =
+ ((golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value
+ - sclk) * 100)
+ / golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count-1].value;
+
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value -
+ (golden_dpm_table->sclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+ } else
+ dpm_table->sclk_table.dpm_levels[i].value =
+ golden_dpm_table->sclk_table.dpm_levels[i].value;
+ }
+ }
+ }
+
+ if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+ dpm_table->mclk_table.dpm_levels
+ [dpm_table->mclk_table.count - 1].value = mclk;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinDCSupport)) {
+
+ PP_ASSERT_WITH_CODE(
+ (golden_dpm_table->mclk_table.dpm_levels
+ [golden_dpm_table->mclk_table.count-1].value != 0),
+ "Divide by 0!",
+ return -1);
+ dpm_count = dpm_table->mclk_table.count < 2 ? 0 : dpm_table->mclk_table.count - 2;
+ for (i = dpm_count; i > 1; i--) {
+ if (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value < mclk) {
+ clock_percent = ((mclk -
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value) * 100)
+ / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value +
+ (golden_dpm_table->mclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+
+ } else if (golden_dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value > mclk) {
+ clock_percent = (
+ (golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value - mclk)
+ * 100)
+ / golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count-1].value;
+
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value -
+ (golden_dpm_table->mclk_table.dpm_levels[i].value *
+ clock_percent) / 100;
+ } else
+ dpm_table->mclk_table.dpm_levels[i].value =
+ golden_dpm_table->mclk_table.dpm_levels[i].value;
+ }
+ }
+ }
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+ result = polaris10_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
+ return result);
+ }
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ /*populate MCLK dpm table to SMU7 */
+ result = polaris10_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
+ return result);
+ }
+
+ return result;
+}
+
+static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
+ struct polaris10_single_dpm_table *dpm_table,
+ uint32_t low_limit, uint32_t high_limit)
+{
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if ((dpm_table->dpm_levels[i].value < low_limit)
+ || (dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ return 0;
+}
+
+static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
+ const struct polaris10_power_state *polaris10_ps)
+{
+ int result = 0;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t high_limit_count;
+
+ PP_ASSERT_WITH_CODE((polaris10_ps->performance_level_count >= 1),
+ "power state did not have any performance level",
+ return -1);
+
+ high_limit_count = (1 == polaris10_ps->performance_level_count) ? 0 : 1;
+
+ polaris10_trim_single_dpm_states(hwmgr,
+ &(data->dpm_table.sclk_table),
+ polaris10_ps->performance_levels[0].engine_clock,
+ polaris10_ps->performance_levels[high_limit_count].engine_clock);
+
+ polaris10_trim_single_dpm_states(hwmgr,
+ &(data->dpm_table.mclk_table),
+ polaris10_ps->performance_levels[0].memory_clock,
+ polaris10_ps->performance_levels[high_limit_count].memory_clock);
+
+ return result;
+}
+
+static int polaris10_generate_dpm_level_enable_mask(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ int result;
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ const struct polaris10_power_state *polaris10_ps =
+ cast_const_phw_polaris10_power_state(states->pnew_state);
+
+ result = polaris10_trim_dpm_states(hwmgr, polaris10_ps);
+ if (result)
+ return result;
+
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
+
+ return 0;
+}
+
+int polaris10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ PPSMC_MSG_UVDDPM_Enable :
+ PPSMC_MSG_UVDDPM_Disable);
+}
+
+int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable?
+ PPSMC_MSG_VCEDPM_Enable :
+ PPSMC_MSG_VCEDPM_Disable);
+}
+
+int polaris10_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, enable?
+ PPSMC_MSG_SAMUDPM_Enable :
+ PPSMC_MSG_SAMUDPM_Disable);
+}
+
+int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (!bgate) {
+ data->smc_state_table.UvdBootLevel = 0;
+ if (table_info->mm_dep_table->count > 0)
+ data->smc_state_table.UvdBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+ mm_boot_level_offset = data->dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, UvdBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0x00FFFFFF;
+ mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDDPM) ||
+ phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
+ }
+
+ return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
+}
+
+static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ const struct polaris10_power_state *polaris10_nps =
+ cast_const_phw_polaris10_power_state(states->pnew_state);
+ const struct polaris10_power_state *polaris10_cps =
+ cast_const_phw_polaris10_power_state(states->pcurrent_state);
+
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (polaris10_nps->vce_clks.evclk > 0 &&
+ (polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) {
+
+ data->smc_state_table.VceBootLevel =
+ (uint8_t) (table_info->mm_dep_table->count - 1);
+
+ mm_boot_level_offset = data->dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFF00FFFF;
+ mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (uint32_t)1 << data->smc_state_table.VceBootLevel);
+
+ polaris10_enable_disable_vce_dpm(hwmgr, true);
+ } else if (polaris10_nps->vce_clks.evclk == 0 &&
+ polaris10_cps != NULL &&
+ polaris10_cps->vce_clks.evclk > 0)
+ polaris10_enable_disable_vce_dpm(hwmgr, false);
+ }
+
+ return 0;
+}
+
+int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t mm_boot_level_offset, mm_boot_level_value;
+
+ if (!bgate) {
+ data->smc_state_table.SamuBootLevel = 0;
+ mm_boot_level_offset = data->dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
+ mm_boot_level_offset /= 4;
+ mm_boot_level_offset *= 4;
+ mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset);
+ mm_boot_level_value &= 0xFFFFFF00;
+ mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
+ cgs_write_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StablePState))
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
+ }
+
+ return polaris10_enable_disable_samu_dpm(hwmgr, !bgate);
+}
+
+static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = polaris10_copy_bytes_to_smc(
+ hwmgr->smumgr,
+ data->dpm_table_start +
+ offsetof(SMU74_Discrete_DpmTable,
+ LowSclkInterruptThreshold),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ data->sram_end);
+ }
+
+ return result;
+}
+
+static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return polaris10_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (0 == data->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((0 == data->sclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+
+ PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
+ "Trying to Unfreeze SCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
+ return -1);
+ }
+
+ if ((0 == data->mclk_dpm_key_disabled) &&
+ (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+
+ PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr),
+ "Trying to Unfreeze MCLK DPM when DPM is disabled",
+ );
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
+ return -1);
+ }
+
+ data->need_update_smu7_dpm_table = 0;
+
+ return 0;
+}
+
+static int polaris10_notify_link_speed_change_after_state_change(
+ struct pp_hwmgr *hwmgr, const void *input)
+{
+ const struct phm_set_power_state_input *states =
+ (const struct phm_set_power_state_input *)input;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ const struct polaris10_power_state *polaris10_ps =
+ cast_const_phw_polaris10_power_state(states->pnew_state);
+ uint16_t target_link_speed = polaris10_get_maximum_link_speed(hwmgr, polaris10_ps);
+ uint8_t request;
+
+ if (data->pspp_notify_required) {
+ if (target_link_speed == PP_PCIEGen3)
+ request = PCIE_PERF_REQ_GEN3;
+ else if (target_link_speed == PP_PCIEGen2)
+ request = PCIE_PERF_REQ_GEN2;
+ else
+ request = PCIE_PERF_REQ_GEN1;
+
+ if (request == PCIE_PERF_REQ_GEN1 &&
+ phm_get_current_pcie_speed(hwmgr) > 0)
+ return 0;
+
+ if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
+ if (PP_PCIEGen2 == target_link_speed)
+ printk("PSPP request to switch to Gen2 from Gen3 Failed!");
+ else
+ printk("PSPP request to switch to Gen1 from Gen2 Failed!");
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+}
+
+static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ int tmp_result, result = 0;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ tmp_result = polaris10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to find DPM states clocks in DPM table!",
+ result = tmp_result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PCIEPerformanceRequest)) {
+ tmp_result =
+ polaris10_request_link_speed_change_before_state_change(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to request link speed change before state change!",
+ result = tmp_result);
+ }
+
+ tmp_result = polaris10_freeze_sclk_mclk_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
+
+ tmp_result = polaris10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to populate and upload SCLK MCLK DPM levels!",
+ result = tmp_result);
+
+ tmp_result = polaris10_generate_dpm_level_enable_mask(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to generate DPM level enabled mask!",
+ result = tmp_result);
+
+ tmp_result = polaris10_update_vce_dpm(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to update VCE DPM!",
+ result = tmp_result);
+
+ tmp_result = polaris10_update_sclk_threshold(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to update SCLK threshold!",
+ result = tmp_result);
+
+ tmp_result = polaris10_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to program memory timing parameters!",
+ result = tmp_result);
+
+ tmp_result = polaris10_notify_smc_display(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to notify smc display settings!",
+ result = tmp_result);
+
+ tmp_result = polaris10_unfreeze_sclk_mclk_dpm(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to unfreeze SCLK MCLK DPM!",
+ result = tmp_result);
+
+ tmp_result = polaris10_upload_dpm_level_enable_mask(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to upload DPM level enabled mask!",
+ result = tmp_result);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PCIEPerformanceRequest)) {
+ tmp_result =
+ polaris10_notify_link_speed_change_after_state_change(hwmgr, input);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to notify link speed change after state change!",
+ result = tmp_result);
+ }
+ data->apply_optimized_settings = false;
+ return result;
+}
+
+static int polaris10_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
+{
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
+
+ if (phm_is_hw_access_blocked(hwmgr))
+ return 0;
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+}
+
+
+int polaris10_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
+{
+ PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
+
+ return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
+}
+
+int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
+{
+ uint32_t num_active_displays = 0;
+ struct cgs_display_info info = {0};
+ info.mode_info = NULL;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ num_active_displays = info.display_count;
+
+ if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
+ polaris10_notify_smc_display_change(hwmgr, false);
+
+ return 0;
+}
+
+/**
+* Programs the display gap
+*
+* @param hwmgr the address of the powerplay hardware manager.
+* @return always OK
+*/
+int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t num_active_displays = 0;
+ uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
+ uint32_t display_gap2;
+ uint32_t pre_vbi_time_in_us;
+ uint32_t frame_time_in_us;
+ uint32_t ref_clock;
+ uint32_t refresh_rate = 0;
+ struct cgs_display_info info = {0};
+ struct cgs_mode_info mode_info;
+
+ info.mode_info = &mode_info;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ num_active_displays = info.display_count;
+
+ display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
+
+ ref_clock = mode_info.ref_clock;
+ refresh_rate = mode_info.refresh_rate;
+
+ if (0 == refresh_rate)
+ refresh_rate = 60;
+
+ frame_time_in_us = 1000000 / refresh_rate;
+
+ pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+ data->frame_time_x2 = frame_time_in_us * 2 / 100;
+
+ display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, PreVBlankGap), 0x64);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
+ return 0;
+}
+
+
+int polaris10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+ return polaris10_program_display_gap(hwmgr);
+}
+
+/**
+* Set maximum target operating fan output RPM
+*
+* @param hwmgr: the address of the powerplay hardware manager.
+* @param usMaxFanRpm: max operating fan RPM value.
+* @return The response that came from the SMC.
+*/
+static int polaris10_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
+{
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
+
+ if (phm_is_hw_access_blocked(hwmgr))
+ return 0;
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+}
+
+int polaris10_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
+ const void *thermal_interrupt_info)
+{
+ return 0;
+}
+
+bool polaris10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ bool is_update_required = false;
+ struct cgs_display_info info = {0, 0, NULL};
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+
+ if (data->display_timing.num_existing_displays != info.display_count)
+ is_update_required = true;
+/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL
+ if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ cgs_get_min_clock_settings(hwmgr->device, &min_clocks);
+ if (min_clocks.engineClockInSR != data->display_timing.minClockInSR &&
+ (min_clocks.engineClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK ||
+ data->display_timing.minClockInSR >= POLARIS10_MINIMUM_ENGINE_CLOCK))
+ is_update_required = true;
+*/
+ return is_update_required;
+}
+
+static inline bool polaris10_are_power_levels_equal(const struct polaris10_performance_level *pl1,
+ const struct polaris10_performance_level *pl2)
+{
+ return ((pl1->memory_clock == pl2->memory_clock) &&
+ (pl1->engine_clock == pl2->engine_clock) &&
+ (pl1->pcie_gen == pl2->pcie_gen) &&
+ (pl1->pcie_lane == pl2->pcie_lane));
+}
+
+int polaris10_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
+{
+ const struct polaris10_power_state *psa = cast_const_phw_polaris10_power_state(pstate1);
+ const struct polaris10_power_state *psb = cast_const_phw_polaris10_power_state(pstate2);
+ int i;
+
+ if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
+ return -EINVAL;
+
+ /* If the two states don't even have the same number of performance levels they cannot be the same state. */
+ if (psa->performance_level_count != psb->performance_level_count) {
+ *equal = false;
+ return 0;
+ }
+
+ for (i = 0; i < psa->performance_level_count; i++) {
+ if (!polaris10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
+ /* If we have found even one performance level pair that is different the states are different. */
+ *equal = false;
+ return 0;
+ }
+ }
+
+ /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
+ *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
+ *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
+ *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+
+ return 0;
+}
+
+int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ uint32_t vbios_version;
+
+ /* Read MC indirect register offset 0x9F bits [3:0] to see if VBIOS has already loaded a full version of MC ucode or not.*/
+
+ phm_get_mc_microcode_version(hwmgr);
+ vbios_version = hwmgr->microcode_version_info.MC & 0xf;
+ /* Full version of MC ucode has already been loaded. */
+ if (vbios_version == 0) {
+ data->need_long_memory_training = false;
+ return 0;
+ }
+
+ data->need_long_memory_training = false;
+
+/*
+ * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
+ pfd = &tonga_mcmeFirmware;
+ if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
+ polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,
+ pfd->cfgArray, pfd->cfgSize, pfd->ioDebugArray,
+ pfd->ioDebugSize, pfd->ucodeArray, pfd->ucodeSize);
+*/
+ return 0;
+}
+
+/**
+ * Read clock related registers.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int polaris10_read_clock_registers(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ data->clock_registers.vCG_SPLL_FUNC_CNTL = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL)
+ & CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK;
+
+ data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2)
+ & CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
+
+ data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4)
+ & CG_SPLL_FUNC_CNTL_4__SPLL_SPARE_MASK;
+
+ return 0;
+}
+
+/**
+ * Find out if memory is GDDR5.
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int polaris10_get_memory_type(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t temp;
+
+ temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
+
+ data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
+ ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
+ MC_SEQ_MISC0_GDDR5_SHIFT));
+
+ return 0;
+}
+
+/**
+ * Enables Dynamic Power Management by SMC
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int polaris10_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
+{
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ GENERAL_PWRMGT, STATIC_PM_EN, 1);
+
+ return 0;
+}
+
+/**
+ * Initialize PowerGating States for different engines
+ *
+ * @param hwmgr the address of the powerplay hardware manager.
+ * @return always 0
+ */
+static int polaris10_init_power_gate_state(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = false;
+ data->vce_power_gated = false;
+ data->samu_power_gated = false;
+
+ return 0;
+}
+
+static int polaris10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ data->low_sclk_interrupt_threshold = 0;
+
+ return 0;
+}
+
+int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ int tmp_result, result = 0;
+
+ polaris10_upload_mc_firmware(hwmgr);
+
+ tmp_result = polaris10_read_clock_registers(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to read clock registers!", result = tmp_result);
+
+ tmp_result = polaris10_get_memory_type(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to get memory type!", result = tmp_result);
+
+ tmp_result = polaris10_enable_acpi_power_management(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to enable ACPI power management!", result = tmp_result);
+
+ tmp_result = polaris10_init_power_gate_state(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to init power gate state!", result = tmp_result);
+
+ tmp_result = phm_get_mc_microcode_version(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to get MC microcode version!", result = tmp_result);
+
+ tmp_result = polaris10_init_sclk_threshold(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to init sclk threshold!", result = tmp_result);
+
+ return result;
+}
+
+static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (!data->soft_pp_table) {
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
+ if (!data->soft_pp_table)
+ return -ENOMEM;
+ }
+
+ *table = (char *)&data->soft_pp_table;
+
+ return hwmgr->soft_pp_table_size;
+}
+
+static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (!data->soft_pp_table) {
+ data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ if (!data->soft_pp_table)
+ return -ENOMEM;
+ }
+
+ memcpy(data->soft_pp_table, buf, size);
+
+ hwmgr->soft_pp_table = data->soft_pp_table;
+
+ /* TODO: re-init powerplay to implement modified pptable */
+
+ return 0;
+}
+
+static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_SCLK:
+ if (!data->sclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ break;
+ case PP_MCLK:
+ if (!data->mclk_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ break;
+ case PP_PCIE:
+ {
+ uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
+ if (!data->pcie_dpm_key_disabled)
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PCIeDPM_ForceLevel,
+ level);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static uint16_t polaris10_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+ uint32_t speedCntl = 0;
+
+ /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
+ speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
+ ixPCIE_LC_SPEED_CNTL);
+ return((uint16_t)PHM_GET_FIELD(speedCntl,
+ PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
+}
+
+static int polaris10_print_clock_levels(struct pp_hwmgr *hwmgr,
+ enum pp_clock_type type, char *buf)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
+ struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
+ struct polaris10_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
+ int i, now, size = 0;
+ uint32_t clock, pcie_speed;
+
+ switch (type) {
+ case PP_SCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (clock > sclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < sclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, sclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_MCLK:
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (clock > mclk_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < mclk_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, mclk_table->dpm_levels[i].value / 100,
+ (i == now) ? "*" : "");
+ break;
+ case PP_PCIE:
+ pcie_speed = polaris10_get_current_pcie_speed(hwmgr);
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_speed != pcie_table->dpm_levels[i].value)
+ continue;
+ break;
+ }
+ now = i;
+
+ for (i = 0; i < pcie_table->count; i++)
+ size += sprintf(buf + size, "%d: %s %s\n", i,
+ (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x8" :
+ (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
+ (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
+ (i == now) ? "*" : "");
+ break;
+ default:
+ break;
+ }
+ return size;
+}
+
+static int polaris10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+ if (mode) {
+ /* stop auto-manage */
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
+ polaris10_fan_ctrl_set_static_mode(hwmgr, mode);
+ } else
+ /* restart auto-manage */
+ polaris10_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+
+ return 0;
+}
+
+static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->fan_ctrl_is_in_default_mode)
+ return hwmgr->fan_ctrl_default_mode;
+ else
+ return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, FDO_PWM_MODE);
+}
+
+static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
+ .backend_init = &polaris10_hwmgr_backend_init,
+ .backend_fini = &polaris10_hwmgr_backend_fini,
+ .asic_setup = &polaris10_setup_asic_task,
+ .dynamic_state_management_enable = &polaris10_enable_dpm_tasks,
+ .apply_state_adjust_rules = polaris10_apply_state_adjust_rules,
+ .force_dpm_level = &polaris10_force_dpm_level,
+ .power_state_set = polaris10_set_power_state_tasks,
+ .get_power_state_size = polaris10_get_power_state_size,
+ .get_mclk = polaris10_dpm_get_mclk,
+ .get_sclk = polaris10_dpm_get_sclk,
+ .patch_boot_state = polaris10_dpm_patch_boot_state,
+ .get_pp_table_entry = polaris10_get_pp_table_entry,
+ .get_num_of_pp_table_entries = tonga_get_number_of_powerplay_table_entries,
+ .print_current_perforce_level = polaris10_print_current_perforce_level,
+ .powerdown_uvd = polaris10_phm_powerdown_uvd,
+ .powergate_uvd = polaris10_phm_powergate_uvd,
+ .powergate_vce = polaris10_phm_powergate_vce,
+ .disable_clock_power_gating = polaris10_phm_disable_clock_power_gating,
+ .update_clock_gatings = polaris10_phm_update_clock_gatings,
+ .notify_smc_display_config_after_ps_adjustment = polaris10_notify_smc_display_config_after_ps_adjustment,
+ .display_config_changed = polaris10_display_configuration_changed_task,
+ .set_max_fan_pwm_output = polaris10_set_max_fan_pwm_output,
+ .set_max_fan_rpm_output = polaris10_set_max_fan_rpm_output,
+ .get_temperature = polaris10_thermal_get_temperature,
+ .stop_thermal_controller = polaris10_thermal_stop_thermal_controller,
+ .get_fan_speed_info = polaris10_fan_ctrl_get_fan_speed_info,
+ .get_fan_speed_percent = polaris10_fan_ctrl_get_fan_speed_percent,
+ .set_fan_speed_percent = polaris10_fan_ctrl_set_fan_speed_percent,
+ .reset_fan_speed_to_default = polaris10_fan_ctrl_reset_fan_speed_to_default,
+ .get_fan_speed_rpm = polaris10_fan_ctrl_get_fan_speed_rpm,
+ .set_fan_speed_rpm = polaris10_fan_ctrl_set_fan_speed_rpm,
+ .uninitialize_thermal_controller = polaris10_thermal_ctrl_uninitialize_thermal_controller,
+ .register_internal_thermal_interrupt = polaris10_register_internal_thermal_interrupt,
+ .check_smc_update_required_for_display_configuration = polaris10_check_smc_update_required_for_display_configuration,
+ .check_states_equal = polaris10_check_states_equal,
+ .set_fan_control_mode = polaris10_set_fan_control_mode,
+ .get_fan_control_mode = polaris10_get_fan_control_mode,
+ .get_pp_table = polaris10_get_pp_table,
+ .set_pp_table = polaris10_set_pp_table,
+ .force_clock_level = polaris10_force_clock_level,
+ .print_clock_levels = polaris10_print_clock_levels,
+ .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
+};
+
+int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data;
+
+ data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ hwmgr->backend = data;
+ hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
+ hwmgr->pptable_func = &tonga_pptable_funcs;
+ pp_polaris10_thermal_initialize(hwmgr);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
new file mode 100644
index 000000000..afc343482
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -0,0 +1,361 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef POLARIS10_HWMGR_H
+#define POLARIS10_HWMGR_H
+
+#include "hwmgr.h"
+#include "smu74.h"
+#include "smu74_discrete.h"
+#include "ppatomctrl.h"
+#include "polaris10_ppsmc.h"
+#include "polaris10_powertune.h"
+
+#define POLARIS10_MAX_HARDWARE_POWERLEVELS 2
+
+#define POLARIS10_VOLTAGE_CONTROL_NONE 0x0
+#define POLARIS10_VOLTAGE_CONTROL_BY_GPIO 0x1
+#define POLARIS10_VOLTAGE_CONTROL_BY_SVID2 0x2
+#define POLARIS10_VOLTAGE_CONTROL_MERGED 0x3
+
+#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
+#define DPMTABLE_UPDATE_SCLK 0x00000004
+#define DPMTABLE_UPDATE_MCLK 0x00000008
+
+struct polaris10_performance_level {
+ uint32_t memory_clock;
+ uint32_t engine_clock;
+ uint16_t pcie_gen;
+ uint16_t pcie_lane;
+};
+
+struct polaris10_uvd_clocks {
+ uint32_t vclk;
+ uint32_t dclk;
+};
+
+struct polaris10_vce_clocks {
+ uint32_t evclk;
+ uint32_t ecclk;
+};
+
+struct polaris10_power_state {
+ uint32_t magic;
+ struct polaris10_uvd_clocks uvd_clks;
+ struct polaris10_vce_clocks vce_clks;
+ uint32_t sam_clk;
+ uint16_t performance_level_count;
+ bool dc_compatible;
+ uint32_t sclk_threshold;
+ struct polaris10_performance_level performance_levels[POLARIS10_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct polaris10_dpm_level {
+ bool enabled;
+ uint32_t value;
+ uint32_t param1;
+};
+
+#define POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define POLARIS10_MINIMUM_ENGINE_CLOCK 2500
+
+struct polaris10_single_dpm_table {
+ uint32_t count;
+ struct polaris10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct polaris10_dpm_table {
+ struct polaris10_single_dpm_table sclk_table;
+ struct polaris10_single_dpm_table mclk_table;
+ struct polaris10_single_dpm_table pcie_speed_table;
+ struct polaris10_single_dpm_table vddc_table;
+ struct polaris10_single_dpm_table vddci_table;
+ struct polaris10_single_dpm_table mvdd_table;
+};
+
+struct polaris10_clock_registers {
+ uint32_t vCG_SPLL_FUNC_CNTL;
+ uint32_t vCG_SPLL_FUNC_CNTL_2;
+ uint32_t vCG_SPLL_FUNC_CNTL_3;
+ uint32_t vCG_SPLL_FUNC_CNTL_4;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t vDLL_CNTL;
+ uint32_t vMCLK_PWRMGT_CNTL;
+ uint32_t vMPLL_AD_FUNC_CNTL;
+ uint32_t vMPLL_DQ_FUNC_CNTL;
+ uint32_t vMPLL_FUNC_CNTL;
+ uint32_t vMPLL_FUNC_CNTL_1;
+ uint32_t vMPLL_FUNC_CNTL_2;
+ uint32_t vMPLL_SS1;
+ uint32_t vMPLL_SS2;
+};
+
+#define DISABLE_MC_LOADMICROCODE 1
+#define DISABLE_MC_CFGPROGRAMMING 2
+
+struct polaris10_voltage_smio_registers {
+ uint32_t vS0_VID_LOWER_SMIO_CNTL;
+};
+
+#define POLARIS10_MAX_LEAKAGE_COUNT 8
+
+struct polaris10_leakage_voltage {
+ uint16_t count;
+ uint16_t leakage_id[POLARIS10_MAX_LEAKAGE_COUNT];
+ uint16_t actual_voltage[POLARIS10_MAX_LEAKAGE_COUNT];
+};
+
+struct polaris10_vbios_boot_state {
+ uint16_t mvdd_bootup_value;
+ uint16_t vddc_bootup_value;
+ uint16_t vddci_bootup_value;
+ uint32_t sclk_bootup_value;
+ uint32_t mclk_bootup_value;
+ uint16_t pcie_gen_bootup_value;
+ uint16_t pcie_lane_bootup_value;
+};
+
+/* Ultra Low Voltage parameter structure */
+struct polaris10_ulv_parm {
+ bool ulv_supported;
+ uint32_t cg_ulv_parameter;
+ uint32_t ulv_volt_change_delay;
+ struct polaris10_performance_level ulv_power_level;
+};
+
+struct polaris10_display_timing {
+ uint32_t min_clock_in_sr;
+ uint32_t num_existing_displays;
+};
+
+struct polaris10_dpmlevel_enable_mask {
+ uint32_t uvd_dpm_enable_mask;
+ uint32_t vce_dpm_enable_mask;
+ uint32_t acp_dpm_enable_mask;
+ uint32_t samu_dpm_enable_mask;
+ uint32_t sclk_dpm_enable_mask;
+ uint32_t mclk_dpm_enable_mask;
+ uint32_t pcie_dpm_enable_mask;
+};
+
+struct polaris10_pcie_perf_range {
+ uint16_t max;
+ uint16_t min;
+};
+struct polaris10_range_table {
+ uint32_t trans_lower_frequency; /* in 10khz */
+ uint32_t trans_upper_frequency;
+};
+
+struct polaris10_hwmgr {
+ struct polaris10_dpm_table dpm_table;
+ struct polaris10_dpm_table golden_dpm_table;
+ SMU74_Discrete_DpmTable smc_state_table;
+ struct SMU74_Discrete_Ulv ulv_setting;
+
+ struct polaris10_range_table range_table[NUM_SCLK_RANGE];
+ uint32_t voting_rights_clients0;
+ uint32_t voting_rights_clients1;
+ uint32_t voting_rights_clients2;
+ uint32_t voting_rights_clients3;
+ uint32_t voting_rights_clients4;
+ uint32_t voting_rights_clients5;
+ uint32_t voting_rights_clients6;
+ uint32_t voting_rights_clients7;
+ uint32_t static_screen_threshold_unit;
+ uint32_t static_screen_threshold;
+ uint32_t voltage_control;
+ uint32_t vddc_vddci_delta;
+
+ uint32_t active_auto_throttle_sources;
+
+ struct polaris10_clock_registers clock_registers;
+ struct polaris10_voltage_smio_registers voltage_smio_registers;
+
+ bool is_memory_gddr5;
+ uint16_t acpi_vddc;
+ bool pspp_notify_required;
+ uint16_t force_pcie_gen;
+ uint16_t acpi_pcie_gen;
+ uint32_t pcie_gen_cap;
+ uint32_t pcie_lane_cap;
+ uint32_t pcie_spc_cap;
+ struct polaris10_leakage_voltage vddc_leakage;
+ struct polaris10_leakage_voltage Vddci_leakage;
+
+ uint32_t mvdd_control;
+ uint32_t vddc_mask_low;
+ uint32_t mvdd_mask_low;
+ uint16_t max_vddc_in_pptable;
+ uint16_t min_vddc_in_pptable;
+ uint16_t max_vddci_in_pptable;
+ uint16_t min_vddci_in_pptable;
+ uint32_t mclk_strobe_mode_threshold;
+ uint32_t mclk_stutter_mode_threshold;
+ uint32_t mclk_edc_enable_threshold;
+ uint32_t mclk_edcwr_enable_threshold;
+ bool is_uvd_enabled;
+ struct polaris10_vbios_boot_state vbios_boot_state;
+
+ bool pcie_performance_request;
+ bool battery_state;
+ bool is_tlu_enabled;
+
+ /* ---- SMC SRAM Address of firmware header tables ---- */
+ uint32_t sram_end;
+ uint32_t dpm_table_start;
+ uint32_t soft_regs_start;
+ uint32_t mc_reg_table_start;
+ uint32_t fan_table_start;
+ uint32_t arb_table_start;
+
+ /* ---- Stuff originally coming from Evergreen ---- */
+ uint32_t vddci_control;
+ struct pp_atomctrl_voltage_table vddc_voltage_table;
+ struct pp_atomctrl_voltage_table vddci_voltage_table;
+ struct pp_atomctrl_voltage_table mvdd_voltage_table;
+
+ uint32_t mgcg_cgtt_local2;
+ uint32_t mgcg_cgtt_local3;
+ uint32_t gpio_debug;
+ uint32_t mc_micro_code_feature;
+ uint32_t highest_mclk;
+ uint16_t acpi_vddci;
+ uint8_t mvdd_high_index;
+ uint8_t mvdd_low_index;
+ bool dll_default_on;
+ bool performance_request_registered;
+
+ /* ---- Low Power Features ---- */
+ struct polaris10_ulv_parm ulv;
+
+ /* ---- CAC Stuff ---- */
+ uint32_t cac_table_start;
+ bool cac_configuration_required;
+ bool driver_calculate_cac_leakage;
+ bool cac_enabled;
+
+ /* ---- DPM2 Parameters ---- */
+ uint32_t power_containment_features;
+ bool enable_dte_feature;
+ bool enable_tdc_limit_feature;
+ bool enable_pkg_pwr_tracking_feature;
+ bool disable_uvd_power_tune_feature;
+ const struct polaris10_pt_defaults *power_tune_defaults;
+ struct SMU74_Discrete_PmFuses power_tune_table;
+ uint32_t dte_tj_offset;
+ uint32_t fast_watermark_threshold;
+
+ /* ---- Phase Shedding ---- */
+ bool vddc_phase_shed_control;
+
+ /* ---- DI/DT ---- */
+ struct polaris10_display_timing display_timing;
+ uint32_t bif_sclk_table[SMU74_MAX_LEVELS_LINK];
+
+ /* ---- Thermal Temperature Setting ---- */
+ struct polaris10_dpmlevel_enable_mask dpm_level_enable_mask;
+ uint32_t need_update_smu7_dpm_table;
+ uint32_t sclk_dpm_key_disabled;
+ uint32_t mclk_dpm_key_disabled;
+ uint32_t pcie_dpm_key_disabled;
+ uint32_t min_engine_clocks;
+ struct polaris10_pcie_perf_range pcie_gen_performance;
+ struct polaris10_pcie_perf_range pcie_lane_performance;
+ struct polaris10_pcie_perf_range pcie_gen_power_saving;
+ struct polaris10_pcie_perf_range pcie_lane_power_saving;
+ bool use_pcie_performance_levels;
+ bool use_pcie_power_saving_levels;
+ uint32_t activity_target[SMU74_MAX_LEVELS_GRAPHICS];
+ uint32_t mclk_activity_target;
+ uint32_t mclk_dpm0_activity_target;
+ uint32_t low_sclk_interrupt_threshold;
+ uint32_t last_mclk_dpm_enable_mask;
+ bool uvd_enabled;
+
+ /* ---- Power Gating States ---- */
+ bool uvd_power_gated;
+ bool vce_power_gated;
+ bool samu_power_gated;
+ bool need_long_memory_training;
+
+ /* Application power optimization parameters */
+ bool update_up_hyst;
+ bool update_down_hyst;
+ uint32_t down_hyst;
+ uint32_t up_hyst;
+ uint32_t disable_dpm_mask;
+ bool apply_optimized_settings;
+
+ /* soft pptable for re-uploading into smu */
+ void *soft_pp_table;
+
+ uint32_t avfs_vdroop_override_setting;
+ bool apply_avfs_cks_off_voltage;
+ uint32_t frame_time_x2;
+};
+
+/* To convert to Q8.8 format for firmware */
+#define POLARIS10_Q88_FORMAT_CONVERSION_UNIT 256
+
+enum Polaris10_I2CLineID {
+ Polaris10_I2CLineID_DDC1 = 0x90,
+ Polaris10_I2CLineID_DDC2 = 0x91,
+ Polaris10_I2CLineID_DDC3 = 0x92,
+ Polaris10_I2CLineID_DDC4 = 0x93,
+ Polaris10_I2CLineID_DDC5 = 0x94,
+ Polaris10_I2CLineID_DDC6 = 0x95,
+ Polaris10_I2CLineID_SCLSDA = 0x96,
+ Polaris10_I2CLineID_DDCVGA = 0x97
+};
+
+#define POLARIS10_I2C_DDC1DATA 0
+#define POLARIS10_I2C_DDC1CLK 1
+#define POLARIS10_I2C_DDC2DATA 2
+#define POLARIS10_I2C_DDC2CLK 3
+#define POLARIS10_I2C_DDC3DATA 4
+#define POLARIS10_I2C_DDC3CLK 5
+#define POLARIS10_I2C_SDA 40
+#define POLARIS10_I2C_SCL 41
+#define POLARIS10_I2C_DDC4DATA 65
+#define POLARIS10_I2C_DDC4CLK 66
+#define POLARIS10_I2C_DDC5DATA 0x48
+#define POLARIS10_I2C_DDC5CLK 0x49
+#define POLARIS10_I2C_DDC6DATA 0x4a
+#define POLARIS10_I2C_DDC6CLK 0x4b
+#define POLARIS10_I2C_DDCVGADATA 0x4c
+#define POLARIS10_I2C_DDCVGACLK 0x4d
+
+#define POLARIS10_UNUSED_GPIO_PIN 0x7F
+
+int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
+
+int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
+int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
new file mode 100644
index 000000000..ae96f14b8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "hwmgr.h"
+#include "smumgr.h"
+#include "polaris10_hwmgr.h"
+#include "polaris10_powertune.h"
+#include "polaris10_smumgr.h"
+#include "smu74_discrete.h"
+#include "pp_debug.h"
+
+#define VOLTAGE_SCALE 4
+#define POWERTUNE_DEFAULT_SET_MAX 1
+
+static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
+ /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
+ * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
+ { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61},
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } },
+};
+
+void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *polaris10_hwmgr = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+
+ if (table_info &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
+ table_info->cac_dtp_table->usPowerTuneDataSetID)
+ polaris10_hwmgr->power_tune_defaults =
+ &polaris10_power_tune_data_set_array
+ [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
+ else
+ polaris10_hwmgr->power_tune_defaults = &polaris10_power_tune_data_set_array[0];
+
+}
+
+static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
+{
+ uint32_t tmp;
+ tmp = raw_setting * 4096 / 100;
+ return (uint16_t)tmp;
+}
+
+int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
+ SMU74_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
+ struct pp_advance_fan_control_parameters *fan_table=
+ &hwmgr->thermal_controller.advanceFanControlParameters;
+ int i, j, k;
+ const uint16_t *pdef1;
+ const uint16_t *pdef2;
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128));
+
+ PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
+ "Target Operating Temp is out of Range!",
+ );
+
+ dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTargetOperatingTemp * 256);
+ dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
+ cac_dtp_table->usTemperatureLimitHotspot * 256);
+ dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainEdge));
+ dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
+ scale_fan_gain_settings(fan_table->usFanGainHotspot));
+
+ pdef1 = defaults->BAPMTI_R;
+ pdef2 = defaults->BAPMTI_RC;
+
+ for (i = 0; i < SMU74_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU74_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU74_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2);
+ pdef1++;
+ pdef2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
+
+ data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
+ data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
+ data->power_tune_table.SviLoadLineTrimVddC = 3;
+ data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
+ data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
+ data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
+
+ return 0;
+}
+
+static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ const struct polaris10_pt_defaults *defaults = data->power_tune_defaults;
+ uint32_t temp;
+
+ if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ fuse_table_offset +
+ offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, data->sram_end))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else {
+ data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
+ data->power_tune_table.LPMLTemperatureMin =
+ (uint8_t)((temp >> 16) & 0xff);
+ data->power_tune_table.LPMLTemperatureMax =
+ (uint8_t)((temp >> 8) & 0xff);
+ data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
+ }
+ return 0;
+}
+
+static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ data->power_tune_table.LPMLTemperatureScaler[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US(
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity);
+ return 0;
+}
+
+static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ /* Currently not used. Set all to zero. */
+ for (i = 0; i < 16; i++)
+ data->power_tune_table.GnbLPML[i] = 0;
+
+ return 0;
+}
+
+static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ uint16_t hi_sidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t lo_sidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+
+ hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(hi_sidd);
+ data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(lo_sidd);
+
+ return 0;
+}
+
+int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ uint32_t pm_fuse_table_offset;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (polaris10_read_smc_sram_dword(hwmgr->smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU74_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, data->sram_end))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to get pm_fuse_table_offset Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_svi_load_line(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate SviLoadLine Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_tdc_limit(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TDCLimit Failed!", return -EINVAL);
+
+ if (polaris10_populate_dw8(hwmgr, pm_fuse_table_offset))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate TdcWaterfallCtl, "
+ "LPMLTemperature Min and Max Failed!",
+ return -EINVAL);
+
+ if (0 != polaris10_populate_temperature_scaler(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate LPMLTemperatureScaler Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_fuzzy_fan(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate Fuzzy Fan Control parameters Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_gnb_lpml(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Failed!",
+ return -EINVAL);
+
+ if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate GnbLPML Min and Max Vid Failed!",
+ return -EINVAL);
+
+ if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
+ "Sidd Failed!", return -EINVAL);
+
+ if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ (uint8_t *)&data->power_tune_table,
+ (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to download PmFuseTable Failed!",
+ return -EINVAL);
+ }
+ return 0;
+}
+
+int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC)) {
+ int smc_result;
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_EnableCac));
+ PP_ASSERT_WITH_CODE((0 == smc_result),
+ "Failed to enable CAC in SMC.", result = -1);
+
+ data->cac_enabled = (0 == smc_result) ? true : false;
+ }
+ return result;
+}
+
+int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (data->power_containment_features &
+ POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+ return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_PkgPwrSetLimit, n);
+ return 0;
+}
+
+static int polaris10_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
+{
+ return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
+ PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+}
+
+int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ int smc_result;
+ int result = 0;
+
+ data->power_containment_features = 0;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+
+ if (data->enable_tdc_limit_feature) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_TDCLimitEnable));
+ PP_ASSERT_WITH_CODE((0 == smc_result),
+ "Failed to enable TDCLimit in SMC.", result = -1;);
+ if (0 == smc_result)
+ data->power_containment_features |=
+ POWERCONTAINMENT_FEATURE_TDCLimit;
+ }
+
+ if (data->enable_pkg_pwr_tracking_feature) {
+ smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+ PP_ASSERT_WITH_CODE((0 == smc_result),
+ "Failed to enable PkgPwrTracking in SMC.", result = -1;);
+ if (0 == smc_result) {
+ struct phm_cac_tdp_table *cac_table =
+ table_info->cac_dtp_table;
+ uint32_t default_limit =
+ (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
+
+ data->power_containment_features |=
+ POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+ if (polaris10_set_power_limit(hwmgr, default_limit))
+ printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
+ }
+ }
+ }
+ return result;
+}
+
+int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
+ int adjust_percent, target_tdp;
+ int result = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ /* adjustment percentage has already been validated */
+ adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
+ hwmgr->platform_descriptor.TDPAdjustment :
+ (-1 * hwmgr->platform_descriptor.TDPAdjustment);
+ /* SMC requested that target_tdp to be 7 bit fraction in DPM table
+ * but message to be 8 bit fraction for messages
+ */
+ target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+ result = polaris10_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
+ }
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
new file mode 100644
index 000000000..68bc1cb6d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef POLARIS10_POWERTUNE_H
+#define POLARIS10_POWERTUNE_H
+
+enum polaris10_pt_config_reg_type {
+ POLARIS10_CONFIGREG_MMR = 0,
+ POLARIS10_CONFIGREG_SMC_IND,
+ POLARIS10_CONFIGREG_DIDT_IND,
+ POLARIS10_CONFIGREG_CACHE,
+ POLARIS10_CONFIGREG_MAX
+};
+
+/* PowerContainment Features */
+#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+
+struct polaris10_pt_config_reg {
+ uint32_t offset;
+ uint32_t mask;
+ uint32_t shift;
+ uint32_t value;
+ enum polaris10_pt_config_reg_type type;
+};
+
+struct polaris10_pt_defaults {
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t DTEAmbientTempBase;
+
+ uint32_t DisplayCac;
+ uint32_t BAPM_TEMP_GRADIENT;
+ uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+ uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+};
+
+void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
+int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
+int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
+int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
+int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
+int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
+int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
+
+#endif /* POLARIS10_POWERTUNE_H */
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
new file mode 100644
index 000000000..b206632d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <asm/div64.h>
+#include "polaris10_thermal.h"
+#include "polaris10_hwmgr.h"
+#include "polaris10_smumgr.h"
+#include "polaris10_ppsmc.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+
+int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
+ struct phm_fan_speed_info *fan_speed_info)
+{
+ if (hwmgr->thermal_controller.fanInfo.bNoFan)
+ return 0;
+
+ fan_speed_info->supports_percent_read = true;
+ fan_speed_info->supports_percent_write = true;
+ fan_speed_info->min_percent = 0;
+ fan_speed_info->max_percent = 100;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
+ fan_speed_info->supports_rpm_read = true;
+ fan_speed_info->supports_rpm_write = true;
+ fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
+ fan_speed_info->max_rpm = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
+ } else {
+ fan_speed_info->min_rpm = 0;
+ fan_speed_info->max_rpm = 0;
+ }
+
+ return 0;
+}
+
+int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
+ uint32_t *speed)
+{
+ uint32_t duty100;
+ uint32_t duty;
+ uint64_t tmp64;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan)
+ return 0;
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ duty = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_THERMAL_STATUS, FDO_PWM_DUTY);
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+
+ tmp64 = (uint64_t)duty * 100;
+ do_div(tmp64, duty100);
+ *speed = (uint32_t)tmp64;
+
+ if (*speed > 100)
+ *speed = 100;
+
+ return 0;
+}
+
+int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
+{
+ uint32_t tach_period;
+ uint32_t crystal_clock_freq;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+ (hwmgr->thermal_controller.fanInfo.
+ ucTachometerPulsesPerRevolution == 0))
+ return 0;
+
+ tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_TACH_STATUS, TACH_PERIOD);
+
+ if (tach_period == 0)
+ return -EINVAL;
+
+ crystal_clock_freq = tonga_get_xclk(hwmgr);
+
+ *speed = 60 * crystal_clock_freq * 10000 / tach_period;
+
+ return 0;
+}
+
+/**
+* Set Fan Speed Control to static mode, so that the user can decide what speed to use.
+* @param hwmgr the address of the powerplay hardware manager.
+* mode the fan control mode, 0 default, 1 by percent, 5, by RPM
+* @exception Should always succeed.
+*/
+int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+{
+
+ if (hwmgr->fan_ctrl_is_in_default_mode) {
+ hwmgr->fan_ctrl_default_mode =
+ PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, FDO_PWM_MODE);
+ hwmgr->tmin =
+ PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, TMIN);
+ hwmgr->fan_ctrl_is_in_default_mode = false;
+ }
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, TMIN, 0);
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode);
+
+ return 0;
+}
+
+/**
+* Reset Fan Speed Control to default mode.
+* @param hwmgr the address of the powerplay hardware manager.
+* @exception Should always succeed.
+*/
+int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
+{
+ if (!hwmgr->fan_ctrl_is_in_default_mode) {
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode);
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, TMIN, hwmgr->tmin);
+ hwmgr->fan_ctrl_is_in_default_mode = true;
+ }
+
+ return 0;
+}
+
+int polaris10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
+ result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_FanSpeedInTableIsRPM))
+ hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanRPM);
+ else
+ hwmgr->hwmgr_func->set_max_fan_pwm_output(hwmgr,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.usMaxFanPWM);
+
+ } else {
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
+ result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+ }
+
+ if (!result && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucTargetTemperature)
+ result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanTemperatureTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucTargetTemperature);
+
+ return result;
+}
+
+
+int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
+{
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
+}
+
+/**
+* Set Fan Speed in percent.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param speed is the percentage value (0% - 100%) to be set.
+* @exception Fails is the 100% setting appears to be 0.
+*/
+int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
+ uint32_t speed)
+{
+ uint32_t duty100;
+ uint32_t duty;
+ uint64_t tmp64;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan)
+ return 0;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
+
+ return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+}
+
+/**
+* Reset Fan Speed to default.
+* @param hwmgr the address of the powerplay hardware manager.
+* @exception Always succeeds.
+*/
+int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan)
+ return 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl)) {
+ result = polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ if (!result)
+ result = polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
+ } else
+ result = polaris10_fan_ctrl_set_default_mode(hwmgr);
+
+ return result;
+}
+
+/**
+* Set Fan Speed in RPM.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param speed is the percentage value (min - max) to be set.
+* @exception Fails is the speed not lie between min and max.
+*/
+int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
+{
+ uint32_t tach_period;
+ uint32_t crystal_clock_freq;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan ||
+ (hwmgr->thermal_controller.fanInfo.
+ ucTachometerPulsesPerRevolution == 0) ||
+ (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+ (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+ return 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl))
+ polaris10_fan_ctrl_stop_smc_fan_control(hwmgr);
+
+ crystal_clock_freq = tonga_get_xclk(hwmgr);
+
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_TACH_STATUS, TACH_PERIOD, tach_period);
+
+ return polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+}
+
+/**
+* Reads the remote temperature from the SIslands thermal controller.
+*
+* @param hwmgr The address of the hardware manager.
+*/
+int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+ int temp;
+
+ temp = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_STATUS, CTF_TEMP);
+
+ /* Bit 9 means the reading is lower than the lowest usable value. */
+ if (temp & 0x200)
+ temp = POLARIS10_THERMAL_MAXIMUM_TEMP_READING;
+ else
+ temp = temp & 0x1ff;
+
+ temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return temp;
+}
+
+/**
+* Set the requested temperature range for high and low alert signals
+*
+* @param hwmgr The address of the hardware manager.
+* @param range Temperature range to be programmed for high and low alert signals
+* @exception PP_Result_BadInput if the input data is not valid.
+*/
+static int polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+ uint32_t low_temp, uint32_t high_temp)
+{
+ uint32_t low = POLARIS10_THERMAL_MINIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ uint32_t high = POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ if (low < low_temp)
+ low = low_temp;
+ if (high > high_temp)
+ high = high_temp;
+
+ if (low > high)
+ return -EINVAL;
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_THERMAL_INT, DIG_THERM_INTH,
+ (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_THERMAL_INT, DIG_THERM_INTL,
+ (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_THERMAL_CTRL, DIG_THERM_DPM,
+ (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+
+ return 0;
+}
+
+/**
+* Programs thermal controller one-time setting registers
+*
+* @param hwmgr The address of the hardware manager.
+*/
+static int polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_TACH_CTRL, EDGE_PER_REV,
+ hwmgr->thermal_controller.fanInfo.
+ ucTachometerPulsesPerRevolution - 1);
+
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28);
+
+ return 0;
+}
+
+/**
+* Enable thermal alerts on the RV770 thermal controller.
+*
+* @param hwmgr The address of the hardware manager.
+*/
+static int polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+{
+ uint32_t alert;
+
+ alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_THERMAL_INT, THERM_INT_MASK);
+ alert &= ~(POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_THERMAL_INT, THERM_INT_MASK, alert);
+
+ /* send message to SMU to enable internal thermal interrupts */
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
+}
+
+/**
+* Disable thermal alerts on the RV770 thermal controller.
+* @param hwmgr The address of the hardware manager.
+*/
+static int polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
+{
+ uint32_t alert;
+
+ alert = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_THERMAL_INT, THERM_INT_MASK);
+ alert |= (POLARIS10_THERMAL_HIGH_ALERT_MASK | POLARIS10_THERMAL_LOW_ALERT_MASK);
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_THERMAL_INT, THERM_INT_MASK, alert);
+
+ /* send message to SMU to disable internal thermal interrupts */
+ return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
+}
+
+/**
+* Uninitialize the thermal controller.
+* Currently just disables alerts.
+* @param hwmgr The address of the hardware manager.
+*/
+int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+ int result = polaris10_thermal_disable_alert(hwmgr);
+
+ if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+ polaris10_fan_ctrl_set_default_mode(hwmgr);
+
+ return result;
+}
+
+/**
+* Set up the fan table to control the fan using the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int tf_polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+ SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (data->fan_table_start == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
+ CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (duty100 == 0) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.
+ usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh -
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->
+ thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->
+ thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = tonga_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->
+ thermal_controller.advanceFanControlParameters.ulCycleDelay *
+ reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(
+ hwmgr->device, CGS_IND_REG__SMC,
+ CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = polaris10_copy_bytes_to_smc(hwmgr->smumgr, data->fan_table_start,
+ (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
+ data->sram_end);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanMinPwm,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ucMinimumPWMLimit);
+
+ if (!res && hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
+ res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetFanSclkTarget,
+ hwmgr->thermal_controller.
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+
+ if (res)
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ return 0;
+}
+
+/**
+* Start the fan control on the SMC.
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int tf_polaris10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+/* If the fantable setup has failed we could have disabled
+ * PHM_PlatformCaps_MicrocodeFanControl even after
+ * this function was included in the table.
+ * Make sure that we still think controlling the fan is OK.
+*/
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl)) {
+ polaris10_fan_ctrl_start_smc_fan_control(hwmgr);
+ polaris10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
+ }
+
+ return 0;
+}
+
+/**
+* Set temperature range for high and low alerts
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from set temperature range routine
+*/
+int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+
+ if (range == NULL)
+ return -EINVAL;
+
+ return polaris10_thermal_set_temperature_range(hwmgr, range->min, range->max);
+}
+
+/**
+* Programs one-time setting registers
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from initialize thermal controller routine
+*/
+int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ return polaris10_thermal_initialize(hwmgr);
+}
+
+/**
+* Enable high and low alerts
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from enable alert routine
+*/
+int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ return polaris10_thermal_enable_alert(hwmgr);
+}
+
+/**
+* Disable high and low alerts
+* @param hwmgr the address of the powerplay hardware manager.
+* @param pInput the pointer to input data
+* @param pOutput the pointer to output data
+* @param pStorage the pointer to temporary storage
+* @param Result the last failure code
+* @return result from disable alert routine
+*/
+static int tf_polaris10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ return polaris10_thermal_disable_alert(hwmgr);
+}
+
+static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
+ void *input, void *output, void *storage, int result)
+{
+ int ret;
+ struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+
+ if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+ return 0;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
+ ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
+ 0 : -1;
+
+ if (!ret)
+ /* If this param is not changed, this function could fire unnecessarily */
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_COMPLETED_PREVIOUSLY;
+
+ return ret;
+}
+
+static const struct phm_master_table_item
+polaris10_thermal_start_thermal_controller_master_list[] = {
+ {NULL, tf_polaris10_thermal_initialize},
+ {NULL, tf_polaris10_thermal_set_temperature_range},
+ {NULL, tf_polaris10_thermal_enable_alert},
+ {NULL, tf_polaris10_thermal_avfs_enable},
+/* We should restrict performance levels to low before we halt the SMC.
+ * On the other hand we are still in boot state when we do this
+ * so it would be pointless.
+ * If this assumption changes we have to revisit this table.
+ */
+ {NULL, tf_polaris10_thermal_setup_fan_table},
+ {NULL, tf_polaris10_thermal_start_smc_fan_control},
+ {NULL, NULL}
+};
+
+static const struct phm_master_table_header
+polaris10_thermal_start_thermal_controller_master = {
+ 0,
+ PHM_MasterTableFlag_None,
+ polaris10_thermal_start_thermal_controller_master_list
+};
+
+static const struct phm_master_table_item
+polaris10_thermal_set_temperature_range_master_list[] = {
+ {NULL, tf_polaris10_thermal_disable_alert},
+ {NULL, tf_polaris10_thermal_set_temperature_range},
+ {NULL, tf_polaris10_thermal_enable_alert},
+ {NULL, NULL}
+};
+
+static const struct phm_master_table_header
+polaris10_thermal_set_temperature_range_master = {
+ 0,
+ PHM_MasterTableFlag_None,
+ polaris10_thermal_set_temperature_range_master_list
+};
+
+int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
+{
+ if (!hwmgr->thermal_controller.fanInfo.bNoFan)
+ polaris10_fan_ctrl_set_default_mode(hwmgr);
+ return 0;
+}
+
+/**
+* Initializes the thermal controller related functions in the Hardware Manager structure.
+* @param hwmgr The address of the hardware manager.
+* @exception Any error code from the low-level communication.
+*/
+int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr)
+{
+ int result;
+
+ result = phm_construct_table(hwmgr,
+ &polaris10_thermal_set_temperature_range_master,
+ &(hwmgr->set_temperature_range));
+
+ if (!result) {
+ result = phm_construct_table(hwmgr,
+ &polaris10_thermal_start_thermal_controller_master,
+ &(hwmgr->start_thermal_controller));
+ if (result)
+ phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
+ }
+
+ if (!result)
+ hwmgr->fan_ctrl_is_in_default_mode = true;
+ return result;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
new file mode 100644
index 000000000..62f8cbc2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _POLARIS10_THERMAL_H_
+#define _POLARIS10_THERMAL_H_
+
+#include "hwmgr.h"
+
+#define POLARIS10_THERMAL_HIGH_ALERT_MASK 0x1
+#define POLARIS10_THERMAL_LOW_ALERT_MASK 0x2
+
+#define POLARIS10_THERMAL_MINIMUM_TEMP_READING -256
+#define POLARIS10_THERMAL_MAXIMUM_TEMP_READING 255
+
+#define POLARIS10_THERMAL_MINIMUM_ALERT_TEMP 0
+#define POLARIS10_THERMAL_MAXIMUM_ALERT_TEMP 255
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
+
+extern int tf_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
+extern int tf_polaris10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
+extern int tf_polaris10_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
+
+extern int polaris10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
+extern int polaris10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int polaris10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
+extern int polaris10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int polaris10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
+extern int polaris10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
+extern int polaris10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int polaris10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
+extern int pp_polaris10_thermal_initialize(struct pp_hwmgr *hwmgr);
+extern int polaris10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
+extern int polaris10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
+extern int polaris10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
+extern int polaris10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index 7cce483b0..a3c38bbd1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#include <linux/errno.h>
#include "linux/delay.h"
#include "hwmgr.h"
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 2a83a4af2..90b35c5c1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -373,6 +373,37 @@ int atomctrl_get_engine_pll_dividers_vi(
return result;
}
+int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
+ uint32_t clock_value,
+ pp_atomctrl_clock_dividers_ai *dividers)
+{
+ COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
+ int result;
+
+ pll_patameters.ulClock.ulClock = clock_value;
+ pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
+
+ result = cgs_atom_exec_cmd_table
+ (hwmgr->device,
+ GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL),
+ &pll_patameters);
+
+ if (0 == result) {
+ dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac);
+ dividers->usSclk_fcw_int = le16_to_cpu(pll_patameters.usSclk_fcw_int);
+ dividers->ucSclkPostDiv = pll_patameters.ucSclkPostDiv;
+ dividers->ucSclkVcoMode = pll_patameters.ucSclkVcoMode;
+ dividers->ucSclkPllRange = pll_patameters.ucSclkPllRange;
+ dividers->ucSscEnable = pll_patameters.ucSscEnable;
+ dividers->usSsc_fcw1_frac = le16_to_cpu(pll_patameters.usSsc_fcw1_frac);
+ dividers->usSsc_fcw1_int = le16_to_cpu(pll_patameters.usSsc_fcw1_int);
+ dividers->usPcc_fcw_int = le16_to_cpu(pll_patameters.usPcc_fcw_int);
+ dividers->usSsc_fcw_slew_frac = le16_to_cpu(pll_patameters.usSsc_fcw_slew_frac);
+ dividers->usPcc_fcw_slew_frac = le16_to_cpu(pll_patameters.usPcc_fcw_slew_frac);
+ }
+ return result;
+}
+
int atomctrl_get_dfs_pll_dividers_vi(
struct pp_hwmgr *hwmgr,
uint32_t clock_value,
@@ -618,7 +649,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
if (!getASICProfilingInfo)
return -1;
- if(getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
+ if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
(getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
return -1;
@@ -891,18 +922,18 @@ int atomctrl_calculate_voltage_evv_on_sclk(
*-----------------------
*/
- fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4,fSclk), fSM_A5));
+ fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
fC_Term = fAdd(fMargin_RO_c,
fAdd(fMultiply(fSM_A0,fLkg_FT),
- fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT,fSclk)),
+ fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
fAdd(fMultiply(fSM_A3, fSclk),
- fSubtract(fSM_A7,fRO_fused)))));
+ fSubtract(fSM_A7, fRO_fused)))));
fVDDC_base = fSubtract(fRO_fused,
fSubtract(fMargin_RO_c,
fSubtract(fSM_A3, fMultiply(fSM_A1, fSclk))));
- fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0,fSclk), fSM_A2));
+ fVDDC_base = fDivide(fVDDC_base, fAdd(fMultiply(fSM_A0, fSclk), fSM_A2));
repeat = fSubtract(fVDDC_base,
fDivide(fMargin_DC_sigma, ConvertToFraction(1000)));
@@ -916,7 +947,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fSubtract(fRO_DC_margin,
fSubtract(fSM_A3,
fMultiply(fSM_A2, repeat))));
- fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0,repeat), fSM_A1));
+ fDC_SCLK = fDivide(fDC_SCLK, fAdd(fMultiply(fSM_A0, repeat), fSM_A1));
fSigma_DC = fSubtract(fSclk, fDC_SCLK);
@@ -996,7 +1027,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
fV_NL = fRoundUpByStepSize(fV_NL, fStepSize, 0);
if (GreaterThan(fV_max, fV_NL) &&
- (GreaterThan(fV_NL,fEVV_V) ||
+ (GreaterThan(fV_NL, fEVV_V) ||
Equal(fV_NL, fEVV_V))) {
fV_NL = fMultiply(fV_NL, ConvertToFraction(1000));
@@ -1010,10 +1041,10 @@ int atomctrl_calculate_voltage_evv_on_sclk(
}
/** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table.
- * @param hwmgr input: pointer to hwManager
+ * @param hwmgr input: pointer to hwManager
* @param voltage_type input: type of EVV voltage VDDC or VDDGFX
* @param sclk input: in 10Khz unit. DPM state SCLK frequency
- * which is define in PPTable SCLK/VDDC dependence
+ * which is define in PPTable SCLK/VDDC dependence
* table associated with this virtual_voltage_Id
* @param virtual_voltage_Id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
* @param voltage output: real voltage level in unit of mv
@@ -1205,3 +1236,112 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
return result;
}
+
+int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
+ uint8_t level)
+{
+ DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
+ int result;
+
+ memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK;
+ memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM;
+ memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
+
+ result = cgs_atom_exec_cmd_table
+ (hwmgr->device,
+ GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
+ &memory_clock_parameters);
+
+ return result;
+}
+
+int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
+{
+
+ int result;
+ GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space;
+
+ get_voltage_info_param_space.ucVoltageType = voltage_type;
+ get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id;
+ get_voltage_info_param_space.ulSCLKFreq = sclk;
+
+ result = cgs_atom_exec_cmd_table(hwmgr->device,
+ GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
+ &get_voltage_info_param_space);
+
+ if (0 != result)
+ return result;
+
+ *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
+
+ return result;
+}
+
+int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table)
+{
+
+ int i;
+ u8 frev, crev;
+ u16 size;
+
+ ATOM_SMU_INFO_V2_1 *psmu_info =
+ (ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, SMU_Info),
+ &size, &frev, &crev);
+
+
+ for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
+ table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
+ table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
+ table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc;
+ table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper;
+ table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower;
+ }
+
+ return 0;
+}
+
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
+{
+ ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
+
+ if (param == NULL)
+ return -EINVAL;
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
+ cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
+ NULL, NULL, NULL);
+ if (!profile)
+ return -1;
+
+ param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
+ param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
+ param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
+ param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
+ param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
+ param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
+ param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
+ param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
+ param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
+ param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
+ param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
+ param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
+ param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
+ param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
+ param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
+ param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
+ param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
+ param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index 627420b80..1e35a9625 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -101,6 +101,23 @@ struct pp_atomctrl_clock_dividers_vi {
};
typedef struct pp_atomctrl_clock_dividers_vi pp_atomctrl_clock_dividers_vi;
+struct pp_atomctrl_clock_dividers_ai {
+ u16 usSclk_fcw_frac;
+ u16 usSclk_fcw_int;
+ u8 ucSclkPostDiv;
+ u8 ucSclkVcoMode;
+ u8 ucSclkPllRange;
+ u8 ucSscEnable;
+ u16 usSsc_fcw1_frac;
+ u16 usSsc_fcw1_int;
+ u16 usReserved;
+ u16 usPcc_fcw_int;
+ u16 usSsc_fcw_slew_frac;
+ u16 usPcc_fcw_slew_frac;
+};
+typedef struct pp_atomctrl_clock_dividers_ai pp_atomctrl_clock_dividers_ai;
+
+
union pp_atomctrl_s_mpll_fb_divider {
struct {
uint32_t cl_kf : 12;
@@ -204,6 +221,21 @@ struct pp_atomctrl_mc_register_address {
typedef struct pp_atomctrl_mc_register_address pp_atomctrl_mc_register_address;
+#define MAX_SCLK_RANGE 8
+
+struct pp_atom_ctrl_sclk_range_table_entry{
+ uint8_t ucVco_setting;
+ uint8_t ucPostdiv;
+ uint16_t usFcw_pcc;
+ uint16_t usFcw_trans_upper;
+ uint16_t usRcw_trans_lower;
+};
+
+
+struct pp_atom_ctrl_sclk_range_table{
+ struct pp_atom_ctrl_sclk_range_table_entry entry[MAX_SCLK_RANGE];
+};
+
struct pp_atomctrl_mc_reg_table {
uint8_t last; /* number of registers */
uint8_t num_entries; /* number of AC timing entries */
@@ -218,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment {
};
typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
+struct pp_atom_ctrl__avfs_parameters {
+ uint32_t ulAVFS_meanNsigma_Acontant0;
+ uint32_t ulAVFS_meanNsigma_Acontant1;
+ uint32_t ulAVFS_meanNsigma_Acontant2;
+ uint16_t usAVFS_meanNsigma_DC_tol_sigma;
+ uint16_t usAVFS_meanNsigma_Platform_mean;
+ uint16_t usAVFS_meanNsigma_Platform_sigma;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1;
+ uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a0;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a1;
+ uint32_t ulGB_VDROOP_TABLE_CKSON_a2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+ uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1;
+ uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2;
+ uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b;
+ uint16_t usMaxVoltage_0_25mv;
+ uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF;
+ uint8_t ucEnableGB_VDROOP_TABLE_CKSON;
+ uint8_t ucEnableGB_FUSE_TABLE_CKSOFF;
+ uint8_t ucEnableGB_FUSE_TABLE_CKSON;
+ uint16_t usPSM_Age_ComFactor;
+ uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage;
+ uint8_t ucReserved;
+};
+
extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
@@ -240,7 +301,14 @@ extern int atomctrl_read_efuse(void *device, uint16_t start_index,
uint16_t end_index, uint32_t mask, uint32_t *efuse);
extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug);
-
+extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers);
+extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
+ uint8_t level);
+extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
+ uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
+extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
+
+extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
index b10df328d..009bd5963 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
@@ -127,8 +127,8 @@ fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
fInt solution = fPositiveOne; /*Starting off with baseline of 1 */
fInt error_term;
- uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
- uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
+ static const uint32_t k_array[11] = {55452, 27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
+ static const uint32_t expk_array[11] = {2560000, 160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
if (GreaterThan(fZERO, exponent)) {
exponent = fNegate(exponent);
@@ -162,8 +162,8 @@ fInt fNaturalLog(fInt value)
fInt solution = ConvertToFraction(0); /*Starting off with baseline of 0 */
fInt error_term;
- uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
- uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
+ static const uint32_t k_array[10] = {160000, 40000, 20000, 15000, 12500, 11250, 10625, 10313, 10156, 10078};
+ static const uint32_t logk_array[10] = {27726, 13863, 6931, 4055, 2231, 1178, 606, 308, 155, 78};
while (GreaterThan(fAdd(value, fNegativeOne), upper_bound)) {
for (i = 0; i < 10; i++) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index aae2e8ec0..5d0f655bf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -51,6 +51,9 @@
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
#include "cgs_linux.h"
#include "eventmgr.h"
#include "amd_pcie_helpers.h"
@@ -86,17 +89,17 @@
typedef uint32_t PECI_RegistryValue;
/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ] */
-uint16_t PP_ClockStretcherLookupTable[2][4] = {
+static const uint16_t PP_ClockStretcherLookupTable[2][4] = {
{600, 1050, 3, 0},
{600, 1050, 6, 1} };
/* [FF, SS] type, [] 4 voltage ranges, and [Floor Freq, Boundary Freq, VID min , VID max] */
-uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
+static const uint32_t PP_ClockStretcherDDTTable[2][4][4] = {
{ {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
{ {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%] (coming from PWR_CKS_CNTL.stretch_amount reg spec) */
-uint8_t PP_ClockStretchAmountConversion[2][6] = {
+static const uint8_t PP_ClockStretchAmountConversion[2][6] = {
{0, 1, 3, 2, 4, 5},
{0, 2, 4, 5, 6, 5} };
@@ -110,7 +113,7 @@ enum DPM_EVENT_SRC {
};
typedef enum DPM_EVENT_SRC DPM_EVENT_SRC;
-const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
+static const unsigned long PhwTonga_Magic = (unsigned long)(PHM_VIslands_Magic);
struct tonga_power_state *cast_phw_tonga_power_state(
struct pp_hw_power_state *hw_ps)
@@ -429,19 +432,20 @@ int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
}
}
}
- PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk
- (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
- virtual_voltage_id, &vddgfx),
- "Error retrieving EVV voltage value!", continue);
-
- /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
- data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
- data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
- data->vddcgfx_leakage.count++;
+ if (0 == atomctrl_get_voltage_evv_on_sclk
+ (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
+ virtual_voltage_id, &vddgfx)) {
+ /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
+ PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -1);
+
+ /* the voltage should not be zero nor equal to leakage ID */
+ if (vddgfx != 0 && vddgfx != virtual_voltage_id) {
+ data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
+ data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = virtual_voltage_id;
+ data->vddcgfx_leakage.count++;
+ }
+ } else {
+ printk("Error retrieving EVV voltage value!\n");
}
}
} else {
@@ -449,20 +453,20 @@ int tonga_get_evv_voltage(struct pp_hwmgr *hwmgr)
if (0 == tonga_get_sclk_for_voltage_evv(hwmgr,
pptable_info->vddc_lookup_table,
virtual_voltage_id, &sclk)) {
- PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk
- (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
- virtual_voltage_id, &vddc),
- "Error retrieving EVV voltage value!", continue);
-
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- if (vddc > 2000)
- printk(KERN_ERR "[ powerplay ] Invalid VDDC value! \n");
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddc != 0 && vddc != virtual_voltage_id) {
- data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
- data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
- data->vddc_leakage.count++;
+ if (0 == atomctrl_get_voltage_evv_on_sclk
+ (hwmgr, VOLTAGE_TYPE_VDDC, sclk,
+ virtual_voltage_id, &vddc)) {
+ /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
+ PP_ASSERT_WITH_CODE(vddc < 2000, "Invalid VDDC value!", return -1);
+
+ /* the voltage should not be zero nor equal to leakage ID */
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
+ data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
+ data->vddc_leakage.count++;
+ }
+ } else {
+ printk("Error retrieving EVV voltage value!\n");
}
}
}
@@ -2037,14 +2041,11 @@ static int tonga_populate_single_memory_level(
data->display_timing.num_existing_displays = info.display_count;
if ((data->mclk_stutter_mode_threshold != 0) &&
- (memory_clock <= data->mclk_stutter_mode_threshold) &&
- (data->is_uvd_enabled == 0)
-#if defined(LINUX)
- && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
- && (data->display_timing.num_existing_displays <= 2)
- && (data->display_timing.num_existing_displays != 0)
-#endif
- )
+ (memory_clock <= data->mclk_stutter_mode_threshold) &&
+ (data->is_uvd_enabled == 0)
+ && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
+ && (data->display_timing.num_existing_displays <= 2)
+ && (data->display_timing.num_existing_displays != 0))
memory_level->StutterEnable = 1;
/* decide strobe mode*/
@@ -2415,6 +2416,24 @@ int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
return 0;
}
+static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
+ uint32_t min_engine_clock_in_sr)
+{
+ uint32_t i, temp;
+ uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
+
+ PP_ASSERT_WITH_CODE((engine_clock >= min),
+ "Engine clock can't satisfy stutter requirement!", return 0);
+
+ for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
+ temp = engine_clock >> i;
+
+ if(temp >= min || i == 0)
+ break;
+ }
+ return (uint8_t)i;
+}
+
/**
* Populates single SMC SCLK structure using the provided engine clock
*
@@ -2463,12 +2482,12 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t
*get the DAL clock. do it in funture.
PECI_GetMinClockSettings(hwmgr->peci, &minClocks);
data->display_timing.min_clock_insr = minClocks.engineClockInSR;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- {
- graphic_level->DeepSleepDivId = PhwTonga_GetSleepDividerIdFromClock(hwmgr, engine_clock, minClocks.engineClockInSR);
- }
*/
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ graphic_level->DeepSleepDivId =
+ tonga_get_sleep_divider_id_from_clock(engine_clock,
+ data->display_timing.min_clock_insr);
/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@@ -2663,7 +2682,7 @@ static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
struct TONGA_DLL_SPEED_SETTING {
uint16_t Min; /* Minimum Data Rate*/
uint16_t Max; /* Maximum Data Rate*/
- uint32_t dll_speed; /* The desired DLL_SPEED setting*/
+ uint32_t dll_speed; /* The desired DLL_SPEED setting*/
};
static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
@@ -2828,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
}
}
- /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc;
- /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */
- /* param1 is for corresponding std voltage */
- data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
-
- if (NULL != allowed_vdd_mclk_table) {
- /* Initialize Vddci DPM table based on allow Mclk values */
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci;
- data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
- data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd;
- data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
- data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
- }
-
/* setup PCIE gen speed levels*/
tonga_setup_default_pcie_tables(hwmgr);
@@ -3296,14 +3294,14 @@ static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
pptable_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
+ "VDD dependency on SCLK table is missing. \
This table is mandatory", return -1);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
+ "VDD dependency on SCLK table has to have is missing. \
This table is mandatory", return -1);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
+ "VDD dependency on MCLK table is missing. \
This table is mandatory", return -1);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
"VDD dependency on MCLK table has to have is missing. \
@@ -4424,17 +4422,14 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
+ struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- if (NULL != hwmgr->backend) {
- kfree(hwmgr->backend);
- hwmgr->backend = NULL;
+ if (data->soft_pp_table) {
+ kfree(data->soft_pp_table);
+ data->soft_pp_table = NULL;
}
- return 0;
+ return phm_hwmgr_backend_fini(hwmgr);
}
/**
@@ -4494,6 +4489,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
+ data->force_pcie_gen = PP_PCIEGenInvalid;
if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
@@ -5315,7 +5311,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(
- true == tonga_is_dpm_running(hwmgr),
+ 0 == tonga_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5328,7 +5324,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
+ PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
"Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5429,7 +5425,7 @@ static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr
}
if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = tonga_populate_all_memory_levels(hwmgr);
+ result = tonga_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
@@ -5631,7 +5627,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
+ PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5645,7 +5641,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
PP_ASSERT_WITH_CODE(
- true == tonga_is_dpm_running(hwmgr),
+ 0 == tonga_is_dpm_running(hwmgr),
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5874,7 +5870,7 @@ uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr)
if (!fw_info)
return 0;
- reference_clock = le16_to_cpu(fw_info->usMinPixelClockPLL_Output);
+ reference_clock = le16_to_cpu(fw_info->usReferenceClock);
divide = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_CLKPIN_CNTL, XTALIN_DIVIDE);
@@ -6039,24 +6035,40 @@ static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- *table = (char *)&data->smc_state_table;
+ if (!data->soft_pp_table) {
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
+ if (!data->soft_pp_table)
+ return -ENOMEM;
+ }
+
+ *table = (char *)&data->soft_pp_table;
- return sizeof(struct SMU72_Discrete_DpmTable);
+ return hwmgr->soft_pp_table_size;
}
static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
- void *table = (void *)&data->smc_state_table;
+ if (!data->soft_pp_table) {
+ data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ if (!data->soft_pp_table)
+ return -ENOMEM;
+ }
+
+ memcpy(data->soft_pp_table, buf, size);
+
+ hwmgr->soft_pp_table = data->soft_pp_table;
- memcpy(table, buf, size);
+ /* TODO: re-init powerplay to implement modified pptable */
return 0;
}
static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, int level)
+ enum pp_clock_type type, uint32_t mask)
{
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
@@ -6068,20 +6080,28 @@ static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->sclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
break;
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
break;
case PP_PCIE:
+ {
+ uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ uint32_t level = 0;
+
+ while (tmp >>= 1)
+ level++;
+
if (!data->pcie_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- (1 << level));
+ level);
break;
+ }
default:
break;
}
@@ -6173,6 +6193,7 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
.powergate_uvd = tonga_phm_powergate_uvd,
.powergate_vce = tonga_phm_powergate_vce,
.disable_clock_power_gating = tonga_phm_disable_clock_power_gating,
+ .update_clock_gatings = tonga_phm_update_clock_gatings,
.notify_smc_display_config_after_ps_adjustment = tonga_notify_smc_display_config_after_ps_adjustment,
.display_config_changed = tonga_display_configuration_changed_task,
.set_max_fan_pwm_output = tonga_set_max_fan_pwm_output,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index f88d3bbe6..573cd39fe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -74,7 +74,7 @@ struct tonga_power_state {
};
struct _phw_tonga_dpm_level {
- bool enabled;
+ bool enabled;
uint32_t value;
uint32_t param1;
};
@@ -237,20 +237,20 @@ struct tonga_hwmgr {
irq_handler_func_t ctf_callback;
void *ctf_context;
- phw_tonga_clock_registers clock_registers;
+ phw_tonga_clock_registers clock_registers;
phw_tonga_voltage_smio_registers voltage_smio_registers;
- bool is_memory_GDDR5;
+ bool is_memory_GDDR5;
uint16_t acpi_vddc;
- bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
+ bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
- phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
- phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
- phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
+ phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
+ phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
+ phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
uint32_t mvdd_control;
uint32_t vddc_mask_low;
@@ -263,8 +263,8 @@ struct tonga_hwmgr {
uint32_t mclk_stutter_mode_threshold;
uint32_t mclk_edc_enable_threshold;
uint32_t mclk_edc_wr_enable_threshold;
- bool is_uvd_enabled;
- bool is_xdma_enabled;
+ bool is_uvd_enabled;
+ bool is_xdma_enabled;
phw_tonga_vbios_boot_state vbios_boot_state;
bool battery_state;
@@ -353,6 +353,8 @@ struct tonga_hwmgr {
bool acp_power_gated; /* 1: gated, 0:not gated */
bool pg_acp_init;
+ /* soft pptable for re-uploading into smu */
+ void *soft_pp_table;
};
typedef struct tonga_hwmgr tonga_hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
index 9a4456e65..f127198aa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
@@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
} ATOM_Tonga_SCLK_Dependency_Table;
+typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+ UCHAR ucVddInd; /* Base voltage */
+ USHORT usVddcOffset; /* Offset relative to base voltage */
+ ULONG ulSclk;
+ USHORT usEdcCurrent;
+ UCHAR ucReliabilityTemperature;
+ UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+ ULONG ulSclkOffset;
+} ATOM_Polaris_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+ ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
+} ATOM_Polaris_SCLK_Dependency_Table;
+
typedef struct _ATOM_Tonga_PCIE_Record {
UCHAR ucPCIEGenSpeed;
UCHAR usPCIELaneWidth;
@@ -209,6 +225,20 @@ typedef struct _ATOM_Tonga_PCIE_Table {
ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */
} ATOM_Tonga_PCIE_Table;
+typedef struct _ATOM_Polaris10_PCIE_Record {
+ UCHAR ucPCIEGenSpeed;
+ UCHAR usPCIELaneWidth;
+ UCHAR ucReserved[2];
+ ULONG ulPCIE_Sclk;
+} ATOM_Polaris10_PCIE_Record;
+
+typedef struct _ATOM_Polaris10_PCIE_Table {
+ UCHAR ucRevId;
+ UCHAR ucNumEntries; /* Number of entries. */
+ ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
+} ATOM_Polaris10_PCIE_Table;
+
+
typedef struct _ATOM_Tonga_MM_Dependency_Record {
UCHAR ucVddcInd; /* VDDC voltage */
USHORT usVddgfxOffset; /* Offset relative to VDDC voltage */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 17766e8da..dccc859f6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -138,12 +138,15 @@ const void *get_powerplay_table(struct pp_hwmgr *hwmgr)
u16 size;
u8 frev, crev;
- void *table_address;
-
- table_address = (ATOM_Tonga_POWERPLAYTABLE *)
- cgs_atom_get_data_table(hwmgr->device, index, &size, &frev, &crev);
-
- hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
+ void *table_address = (void *)hwmgr->soft_pp_table;
+
+ if (!table_address) {
+ table_address = (ATOM_Tonga_POWERPLAYTABLE *)
+ cgs_atom_get_data_table(hwmgr->device,
+ index, &size, &frev, &crev);
+ hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/
+ hwmgr->soft_pp_table_size = size;
+ }
return table_address;
}
@@ -405,41 +408,78 @@ static int get_mclk_voltage_dependency_table(
static int get_sclk_voltage_dependency_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
- const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table
+ const PPTable_Generic_SubTable_Header *sclk_dep_table
)
{
uint32_t table_size, i;
phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
- PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries),
- "Invalid PowerPlay Table!", return -1);
+ if (sclk_dep_table->ucRevId < 1) {
+ const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
+ (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
- table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
- * sclk_dep_table->ucNumEntries;
+ PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
+ "Invalid PowerPlay Table!", return -1);
- sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
- kzalloc(table_size, GFP_KERNEL);
+ table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+ * tonga_table->ucNumEntries;
- if (NULL == sclk_table)
- return -ENOMEM;
+ sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+ kzalloc(table_size, GFP_KERNEL);
- memset(sclk_table, 0x00, table_size);
-
- sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries;
-
- for (i = 0; i < sclk_dep_table->ucNumEntries; i++) {
- sclk_table->entries[i].vddInd =
- sclk_dep_table->entries[i].ucVddInd;
- sclk_table->entries[i].vdd_offset =
- sclk_dep_table->entries[i].usVddcOffset;
- sclk_table->entries[i].clk =
- sclk_dep_table->entries[i].ulSclk;
- sclk_table->entries[i].cks_enable =
- (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
- sclk_table->entries[i].cks_voffset =
- (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
- }
+ if (NULL == sclk_table)
+ return -ENOMEM;
+
+ memset(sclk_table, 0x00, table_size);
+
+ sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
+
+ for (i = 0; i < tonga_table->ucNumEntries; i++) {
+ sclk_table->entries[i].vddInd =
+ tonga_table->entries[i].ucVddInd;
+ sclk_table->entries[i].vdd_offset =
+ tonga_table->entries[i].usVddcOffset;
+ sclk_table->entries[i].clk =
+ tonga_table->entries[i].ulSclk;
+ sclk_table->entries[i].cks_enable =
+ (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table->entries[i].cks_voffset =
+ (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+ }
+ } else {
+ const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
+ (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
+
+ PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
+ "Invalid PowerPlay Table!", return -1);
+
+ table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+ * polaris_table->ucNumEntries;
+
+ sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+ kzalloc(table_size, GFP_KERNEL);
+
+ if (NULL == sclk_table)
+ return -ENOMEM;
+ memset(sclk_table, 0x00, table_size);
+
+ sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
+
+ for (i = 0; i < polaris_table->ucNumEntries; i++) {
+ sclk_table->entries[i].vddInd =
+ polaris_table->entries[i].ucVddInd;
+ sclk_table->entries[i].vdd_offset =
+ polaris_table->entries[i].usVddcOffset;
+ sclk_table->entries[i].clk =
+ polaris_table->entries[i].ulSclk;
+ sclk_table->entries[i].cks_enable =
+ (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+ sclk_table->entries[i].cks_voffset =
+ (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+ sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+ }
+ }
*pp_tonga_sclk_dep_table = sclk_table;
return 0;
@@ -448,47 +488,90 @@ static int get_sclk_voltage_dependency_table(
static int get_pcie_table(
struct pp_hwmgr *hwmgr,
phm_ppt_v1_pcie_table **pp_tonga_pcie_table,
- const ATOM_Tonga_PCIE_Table * atom_pcie_table
+ const PPTable_Generic_SubTable_Header * pTable
)
{
uint32_t table_size, i, pcie_count;
phm_ppt_v1_pcie_table *pcie_table;
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- PP_ASSERT_WITH_CODE((0 != atom_pcie_table->ucNumEntries),
- "Invalid PowerPlay Table!", return -1);
- table_size = sizeof(uint32_t) +
- sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+ if (pTable->ucRevId < 1) {
+ const ATOM_Tonga_PCIE_Table *atom_pcie_table = (ATOM_Tonga_PCIE_Table *)pTable;
+ PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+ "Invalid PowerPlay Table!", return -1);
- pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+ table_size = sizeof(uint32_t) +
+ sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
- if (NULL == pcie_table)
- return -ENOMEM;
+ pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
- memset(pcie_table, 0x00, table_size);
+ if (pcie_table == NULL)
+ return -ENOMEM;
- /*
- * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
- * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
- */
- pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
- if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
- pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
- else
- printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
- Disregarding the excess entries... \n");
+ memset(pcie_table, 0x00, table_size);
- pcie_table->count = pcie_count;
+ /*
+ * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+ * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+ */
+ pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+ if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+ pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+ else
+ printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+ Disregarding the excess entries... \n");
- for (i = 0; i < pcie_count; i++) {
- pcie_table->entries[i].gen_speed =
- atom_pcie_table->entries[i].ucPCIEGenSpeed;
- pcie_table->entries[i].lane_width =
- atom_pcie_table->entries[i].usPCIELaneWidth;
- }
+ pcie_table->count = pcie_count;
- *pp_tonga_pcie_table = pcie_table;
+ for (i = 0; i < pcie_count; i++) {
+ pcie_table->entries[i].gen_speed =
+ atom_pcie_table->entries[i].ucPCIEGenSpeed;
+ pcie_table->entries[i].lane_width =
+ atom_pcie_table->entries[i].usPCIELaneWidth;
+ }
+
+ *pp_tonga_pcie_table = pcie_table;
+ } else {
+ /* Polaris10/Polaris11 and newer. */
+ const ATOM_Polaris10_PCIE_Table *atom_pcie_table = (ATOM_Polaris10_PCIE_Table *)pTable;
+ PP_ASSERT_WITH_CODE((atom_pcie_table->ucNumEntries != 0),
+ "Invalid PowerPlay Table!", return -1);
+
+ table_size = sizeof(uint32_t) +
+ sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
+
+ pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL);
+
+ if (pcie_table == NULL)
+ return -ENOMEM;
+
+ memset(pcie_table, 0x00, table_size);
+
+ /*
+ * Make sure the number of pcie entries are less than or equal to sclk dpm levels.
+ * Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
+ */
+ pcie_count = (pp_table_information->vdd_dep_on_sclk->count) + 1;
+ if ((uint32_t)atom_pcie_table->ucNumEntries <= pcie_count)
+ pcie_count = (uint32_t)atom_pcie_table->ucNumEntries;
+ else
+ printk(KERN_ERR "[ powerplay ] Number of Pcie Entries exceed the number of SCLK Dpm Levels! \
+ Disregarding the excess entries... \n");
+
+ pcie_table->count = pcie_count;
+
+ for (i = 0; i < pcie_count; i++) {
+ pcie_table->entries[i].gen_speed =
+ atom_pcie_table->entries[i].ucPCIEGenSpeed;
+ pcie_table->entries[i].lane_width =
+ atom_pcie_table->entries[i].usPCIELaneWidth;
+ pcie_table->entries[i].pcie_sclk =
+ atom_pcie_table->entries[i].ulPCIE_Sclk;
+ }
+
+ *pp_tonga_pcie_table = pcie_table;
+ }
return 0;
}
@@ -662,14 +745,14 @@ static int init_clock_voltage_dependency(
const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
(const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
- const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
+ const PPTable_Generic_SubTable_Header *sclk_dep_table =
+ (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
(const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usHardLimitTableOffset));
- const ATOM_Tonga_PCIE_Table *pcie_table =
- (const ATOM_Tonga_PCIE_Table *)(((unsigned long) powerplay_table) +
+ const PPTable_Generic_SubTable_Header *pcie_table =
+ (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
le16_to_cpu(powerplay_table->usPCIETableOffset));
pp_table_information->vdd_dep_on_sclk = NULL;
@@ -994,48 +1077,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- if (NULL != hwmgr->soft_pp_table) {
- kfree(hwmgr->soft_pp_table);
+ if (NULL != hwmgr->soft_pp_table)
hwmgr->soft_pp_table = NULL;
- }
- if (NULL != pp_table_information->vdd_dep_on_sclk)
- pp_table_information->vdd_dep_on_sclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_sclk);
+ pp_table_information->vdd_dep_on_sclk = NULL;
- if (NULL != pp_table_information->vdd_dep_on_mclk)
- pp_table_information->vdd_dep_on_mclk = NULL;
+ kfree(pp_table_information->vdd_dep_on_mclk);
+ pp_table_information->vdd_dep_on_mclk = NULL;
- if (NULL != pp_table_information->valid_mclk_values)
- pp_table_information->valid_mclk_values = NULL;
+ kfree(pp_table_information->valid_mclk_values);
+ pp_table_information->valid_mclk_values = NULL;
- if (NULL != pp_table_information->valid_sclk_values)
- pp_table_information->valid_sclk_values = NULL;
+ kfree(pp_table_information->valid_sclk_values);
+ pp_table_information->valid_sclk_values = NULL;
- if (NULL != pp_table_information->vddc_lookup_table)
- pp_table_information->vddc_lookup_table = NULL;
+ kfree(pp_table_information->vddc_lookup_table);
+ pp_table_information->vddc_lookup_table = NULL;
- if (NULL != pp_table_information->vddgfx_lookup_table)
- pp_table_information->vddgfx_lookup_table = NULL;
+ kfree(pp_table_information->vddgfx_lookup_table);
+ pp_table_information->vddgfx_lookup_table = NULL;
- if (NULL != pp_table_information->mm_dep_table)
- pp_table_information->mm_dep_table = NULL;
+ kfree(pp_table_information->mm_dep_table);
+ pp_table_information->mm_dep_table = NULL;
- if (NULL != pp_table_information->cac_dtp_table)
- pp_table_information->cac_dtp_table = NULL;
+ kfree(pp_table_information->cac_dtp_table);
+ pp_table_information->cac_dtp_table = NULL;
- if (NULL != hwmgr->dyn_state.cac_dtp_table)
- hwmgr->dyn_state.cac_dtp_table = NULL;
+ kfree(hwmgr->dyn_state.cac_dtp_table);
+ hwmgr->dyn_state.cac_dtp_table = NULL;
- if (NULL != pp_table_information->ppm_parameter_table)
- pp_table_information->ppm_parameter_table = NULL;
+ kfree(pp_table_information->ppm_parameter_table);
+ pp_table_information->ppm_parameter_table = NULL;
- if (NULL != pp_table_information->pcie_table)
- pp_table_information->pcie_table = NULL;
+ kfree(pp_table_information->pcie_table);
+ pp_table_information->pcie_table = NULL;
- if (NULL != hwmgr->pptable) {
- kfree(hwmgr->pptable);
- hwmgr->pptable = NULL;
- }
+ kfree(hwmgr->pptable);
+ hwmgr->pptable = NULL;
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
index a18817474..47ef1ca2d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c
@@ -195,8 +195,8 @@ int tonga_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed)
if (0 == duty100)
return -EINVAL;
- tmp64 = (uint64_t)speed * 100;
- do_div(tmp64, duty100);
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
duty = (uint32_t)tmp64;
PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL0, FDO_STATIC_DUTY, duty);
@@ -525,7 +525,7 @@ static int tf_tonga_thermal_disable_alert(struct pp_hwmgr *hwmgr, void *input, v
return tonga_thermal_disable_alert(hwmgr);
}
-static struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
+static const struct phm_master_table_item tonga_thermal_start_thermal_controller_master_list[] = {
{ NULL, tf_tonga_thermal_initialize },
{ NULL, tf_tonga_thermal_set_temperature_range },
{ NULL, tf_tonga_thermal_enable_alert },
@@ -538,20 +538,20 @@ static struct phm_master_table_item tonga_thermal_start_thermal_controller_maste
{ NULL, NULL }
};
-static struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
+static const struct phm_master_table_header tonga_thermal_start_thermal_controller_master = {
0,
PHM_MasterTableFlag_None,
tonga_thermal_start_thermal_controller_master_list
};
-static struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
+static const struct phm_master_table_item tonga_thermal_set_temperature_range_master_list[] = {
{ NULL, tf_tonga_thermal_disable_alert},
{ NULL, tf_tonga_thermal_set_temperature_range},
{ NULL, tf_tonga_thermal_enable_alert},
{ NULL, NULL }
};
-struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
+static const struct phm_master_table_header tonga_thermal_set_temperature_range_master = {
0,
PHM_MasterTableFlag_None,
tonga_thermal_set_temperature_range_master_list
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 7255f7ddf..50b367d44 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -289,6 +289,9 @@ struct pp_states_info {
#define PP_BLOCK_GFX_CG 0x01
#define PP_BLOCK_GFX_MG 0x02
+#define PP_BLOCK_GFX_3D 0x04
+#define PP_BLOCK_GFX_RLC 0x08
+#define PP_BLOCK_GFX_CP 0x10
#define PP_BLOCK_SYS_BIF 0x01
#define PP_BLOCK_SYS_MC 0x02
#define PP_BLOCK_SYS_ROM 0x04
@@ -337,7 +340,7 @@ struct amd_powerplay_funcs {
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
int (*get_pp_table)(void *handle, char **table);
int (*set_pp_table)(void *handle, const char *buf, size_t size);
- int (*force_clock_level)(void *handle, enum pp_clock_type type, int level);
+ int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
index 10437dcfd..d63ef83b2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
@@ -37,7 +37,7 @@ typedef int (*pem_event_action)(struct pp_eventmgr *eventmgr,
struct action_chain {
const char *description; /* action chain description for debugging purpose */
- const pem_event_action **action_chain; /* pointer to chain of event actions */
+ const pem_event_action * const *action_chain; /* pointer to chain of event actions */
};
struct pem_power_source_ui_state_info {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
index 0262ad355..8a3166532 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h
@@ -46,7 +46,7 @@ struct PWR_Command_Table
typedef struct PWR_Command_Table PWR_Command_Table;
#define PWR_VIRUS_TABLE_SIZE 10243
-static PWR_Command_Table PwrVirusTable[PWR_VIRUS_TABLE_SIZE] =
+static const PWR_Command_Table PwrVirusTable[PWR_VIRUS_TABLE_SIZE] =
{
{ PwrCmdWrite, 0x100100b6, mmPCIE_INDEX },
{ PwrCmdWrite, 0x00000000, mmPCIE_DATA },
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 040d3f7cb..56f712c7d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -211,6 +211,7 @@ enum phm_platform_caps {
PHM_PlatformCaps_ClockStretcher,
PHM_PlatformCaps_TablelessHardwareInterface,
PHM_PlatformCaps_EnableDriverEVV,
+ PHM_PlatformCaps_SPLLShutdownSupport,
PHM_PlatformCaps_Max
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 928f5a740..77e8e33d5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -141,7 +141,7 @@ enum phm_master_table_flag {
struct phm_master_table_header {
uint32_t storage_size;
uint32_t flags;
- struct phm_master_table_item *master_list;
+ const struct phm_master_table_item *master_list;
};
struct phm_runtime_table_header {
@@ -199,7 +199,7 @@ extern int phm_dispatch_table(struct pp_hwmgr *hwmgr,
void *input, void *output);
extern int phm_construct_table(struct pp_hwmgr *hwmgr,
- struct phm_master_table_header *master_table,
+ const struct phm_master_table_header *master_table,
struct phm_runtime_table_header *rt_table);
extern int phm_destroy_table(struct pp_hwmgr *hwmgr,
@@ -335,8 +335,9 @@ struct pp_hwmgr_func {
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
- int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, int level);
+ int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
+ int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
};
struct pp_table_func {
@@ -410,6 +411,8 @@ struct phm_cac_tdp_table {
uint8_t ucVr_I2C_Line;
uint8_t ucPlx_I2C_address;
uint8_t ucPlx_I2C_Line;
+ uint32_t usBoostPowerLimit;
+ uint8_t ucCKS_LDO_REFSEL;
};
struct phm_ppm_table {
@@ -499,7 +502,7 @@ struct phm_dynamic_state_info {
struct phm_ppm_table *ppm_parameter_table;
struct phm_cac_tdp_table *cac_dtp_table;
struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk;
- struct phm_vq_budgeting_table *vq_budgeting_table;
+ struct phm_vq_budgeting_table *vq_budgeting_table;
};
struct pp_fan_info {
@@ -576,6 +579,7 @@ struct pp_hwmgr {
void *device;
struct pp_smumgr *smumgr;
const void *soft_pp_table;
+ uint32_t soft_pp_table_size;
bool need_pp_table_upload;
enum amd_dpm_forced_level dpm_level;
bool block_hw_access;
@@ -671,7 +675,7 @@ extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_volta
extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
-
+extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
new file mode 100644
index 000000000..b8f4b73c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
@@ -0,0 +1,412 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef POLARIS10_PP_SMC_H
+#define POLARIS10_PP_SMC_H
+
+
+#pragma pack(push, 1)
+
+#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305)
+
+#define PPSMC_SWSTATE_FLAG_DC 0x01
+#define PPSMC_SWSTATE_FLAG_UVD 0x02
+#define PPSMC_SWSTATE_FLAG_VCE 0x04
+
+#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
+#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
+#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
+
+#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
+#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
+#define PPSMC_SYSTEMFLAG_GDDR5 0x04
+
+#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
+
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
+#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
+#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
+
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
+#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
+
+
+#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
+#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
+#define PPSMC_DPM2FLAGS_OCP 0x04
+
+
+#define PPSMC_DISPLAY_WATERMARK_LOW 0
+#define PPSMC_DISPLAY_WATERMARK_HIGH 1
+
+
+#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
+#define PPSMC_STATEFLAG_POWERBOOST 0x02
+#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
+#define PPSMC_STATEFLAG_POWERSHIFT 0x08
+#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
+#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
+#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
+
+
+#define FDO_MODE_HARDWARE 0
+#define FDO_MODE_PIECE_WISE_LINEAR 1
+
+enum FAN_CONTROL {
+ FAN_CONTROL_FUZZY,
+ FAN_CONTROL_TABLE
+};
+
+
+#define PPSMC_Result_OK ((uint16_t)0x01)
+#define PPSMC_Result_NoMore ((uint16_t)0x02)
+
+#define PPSMC_Result_NotNow ((uint16_t)0x03)
+#define PPSMC_Result_Failed ((uint16_t)0xFF)
+#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
+#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
+
+typedef uint16_t PPSMC_Result;
+
+#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
+
+
+#define PPSMC_MSG_Halt ((uint16_t)0x10)
+#define PPSMC_MSG_Resume ((uint16_t)0x11)
+#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
+#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
+#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
+#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
+#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
+#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
+#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
+#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
+#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
+#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
+#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
+#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
+#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
+#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
+#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
+#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
+#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
+#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
+#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
+#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
+#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
+#define PPSMC_CACHistoryStart ((uint16_t)0x57)
+#define PPSMC_CACHistoryStop ((uint16_t)0x58)
+#define PPSMC_TDPClampingActive ((uint16_t)0x59)
+#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
+#define PPSMC_StartFanControl ((uint16_t)0x5B)
+#define PPSMC_StopFanControl ((uint16_t)0x5C)
+#define PPSMC_NoDisplay ((uint16_t)0x5D)
+#define PPSMC_HasDisplay ((uint16_t)0x5E)
+#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
+#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
+#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
+#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
+#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
+#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
+#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
+#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
+#define PPSMC_OCPActive ((uint16_t)0x6C)
+#define PPSMC_OCPInactive ((uint16_t)0x6D)
+#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
+#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
+#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
+#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
+#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
+#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
+#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
+#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
+#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
+#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
+#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
+#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
+#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
+#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
+#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
+#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
+
+#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
+#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
+#define PPSMC_FlushDataCache ((uint16_t)0x80)
+#define PPSMC_FlushInstrCache ((uint16_t)0x81)
+
+#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
+#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
+
+#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
+
+#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
+#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
+#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
+#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
+
+#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
+#define PPSM_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
+#define PPSM_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
+#define PPSM_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
+
+#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
+
+#define PPSMC_MSG_Test ((uint16_t) 0x100)
+#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101)
+#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102)
+#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103)
+#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
+#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105)
+#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106)
+#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107)
+#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108)
+#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109)
+#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a)
+#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b)
+#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f)
+#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110)
+#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111)
+#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112)
+#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113)
+#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114)
+#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117)
+#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118)
+#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119)
+#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a)
+#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b)
+#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c)
+#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d)
+#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e)
+#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f)
+#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120)
+#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121)
+#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122)
+#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123)
+#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124)
+#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125)
+#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126)
+#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127)
+#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128)
+
+#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129)
+#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A)
+#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B)
+#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C)
+#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
+#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
+#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134)
+#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
+#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
+#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b)
+#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e)
+#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f)
+#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
+#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142)
+#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143)
+#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
+#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b)
+#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c)
+#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d)
+
+#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
+#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152)
+#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153)
+#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
+#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c)
+#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d)
+#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e)
+#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
+#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160)
+#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
+#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163)
+#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164)
+#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165)
+#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
+#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168)
+#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
+#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b)
+#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c)
+#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d)
+#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e)
+#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f)
+#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170)
+#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171)
+#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172)
+#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173)
+#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174)
+#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175)
+#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176)
+#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177)
+#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178)
+#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179)
+#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a)
+#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b)
+#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c)
+#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d)
+#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e)
+#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f)
+#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181)
+#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182)
+#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184)
+#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
+#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D)
+#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E)
+#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
+#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193)
+#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194)
+#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195)
+#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207)
+#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196)
+#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197)
+#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198)
+#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199)
+#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
+#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B)
+#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
+#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
+
+#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
+#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202)
+#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203)
+#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204)
+#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
+#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206)
+#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209)
+#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A)
+
+#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240)
+#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241)
+#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242)
+#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243)
+#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244)
+#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245)
+#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246)
+
+#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250)
+#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251)
+#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252)
+#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253)
+#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254)
+#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256)
+#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258)
+#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259)
+#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A)
+#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B)
+#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C)
+#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D)
+#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260)
+#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261)
+#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262)
+#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263)
+#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264)
+#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265)
+#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266)
+#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267)
+#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268)
+#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269)
+#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A)
+#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B)
+
+#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C)
+#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x275)
+#define PPSMC_MSG_UseNewGPIOScheme ((uint16_t) 0x277)
+#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400)
+#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401)
+#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402)
+#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
+#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+
+#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
+#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
+#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
+
+#define PPSMC_MSG_SetGpuPllDfsForSclk ((uint16_t) 0x300)
+#define PPSMC_MSG_Didt_Block_Function ((uint16_t) 0x301)
+
+#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+
+#define PPSMC_MSG_SecureSRBMWrite ((uint16_t) 0x600)
+#define PPSMC_MSG_SecureSRBMRead ((uint16_t) 0x601)
+#define PPSMC_MSG_SetAddress ((uint16_t) 0x800)
+#define PPSMC_MSG_GetData ((uint16_t) 0x801)
+#define PPSMC_MSG_SetData ((uint16_t) 0x802)
+
+typedef uint16_t PPSMC_Msg;
+
+#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
+#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
+#define PPSMC_EVENT_STATUS_DC 0x00000004
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
new file mode 100644
index 000000000..f497e7d98
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h
@@ -0,0 +1,10088 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _POLARIS10_PWRVIRUS_H
+#define _POLARIS10_PWRVIRUS_H
+
+#define mmSMC_IND_INDEX_11 0x01AC
+#define mmSMC_IND_DATA_11 0x01AD
+#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a
+#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b
+#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c
+#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d
+
+enum PWR_Command {
+ PwrCmdNull = 0,
+ PwrCmdWrite,
+ PwrCmdEnd,
+ PwrCmdMax
+};
+
+typedef enum PWR_Command PWR_Command;
+
+struct PWR_Command_Table {
+ PWR_Command command;
+ uint32_t data;
+ uint32_t reg;
+};
+
+typedef struct PWR_Command_Table PWR_Command_Table;
+
+
+#define PWR_VIRUS_TABLE_SIZE 10031
+
+static const PWR_Command_Table pwr_virus_table[PWR_VIRUS_TABLE_SIZE] = {
+ { PwrCmdWrite, 0x00000000, mmRLC_CNTL },
+ { PwrCmdWrite, 0x00000002, mmRLC_SRM_CNTL },
+ { PwrCmdWrite, 0x15000000, mmCP_ME_CNTL },
+ { PwrCmdWrite, 0x50000000, mmCP_MEC_CNTL },
+ { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
+ { PwrCmdWrite, 0x0840800a, mmCP_RB0_CNTL },
+ { PwrCmdWrite, 0xf30fff0f, mmTCC_CTRL },
+ { PwrCmdWrite, 0x00000002, mmTCC_EXE_DISABLE },
+ { PwrCmdWrite, 0x000000ff, mmTCP_ADDR_CONFIG },
+ { PwrCmdWrite, 0x540ff000, mmCP_CPC_IC_BASE_LO },
+ { PwrCmdWrite, 0x000000b4, mmCP_CPC_IC_BASE_HI },
+ { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR },
+ { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA },
+ { PwrCmdWrite, 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR },
+ { PwrCmdWrite, 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00221408, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00591260, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00621387, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA },
+ { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
+ { PwrCmdWrite, 0x540fe800, mmCP_DFY_ADDR_LO },
+ { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e020201, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e040204, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e060205, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a080500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a0a0303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54106f00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000400b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00004000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00804fac, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
+ { PwrCmdWrite, 0x540fef00, mmCP_DFY_ADDR_LO },
+ { PwrCmdWrite, 0xc0031502, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00001e00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
+ { PwrCmdWrite, 0x540ff000, mmCP_DFY_ADDR_LO },
+ { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000145, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4080061, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24ccffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3cd08000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9500fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1cd0ffcf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d018001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x050c0019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x84c00000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000067, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000006a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000006d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000084, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000008f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000099, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800000a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800000af, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x388c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08880002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98800003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000002d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d808001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc0700, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10cc0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d0d000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000005d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14d00011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c01b10, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00e0080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00e0800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x280c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x280c0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400053, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x280c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00052, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28180039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28080001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca88004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc00006f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28180080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d10c017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000013b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97400001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc810000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd4c0380, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdcc0388, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55dc0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdcc038c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce0c0390, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce0c0394, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce4c0398, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56640020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce4c039c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce8c03a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56a80020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce8c03a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcecc03a8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcecc03ac, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf0c03b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57300020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf0c03b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf4c03b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57740020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf4c03bc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf8c03c0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57b80020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf8c03c4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfcc03c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57fc0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfcc03cc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05dc002f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc12009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d200a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc012009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25e01c00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25e40300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25e800c0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25ec003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e25c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31100006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9500007b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc1c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4df0388, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d7038c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d5dc01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4e30390, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d70394, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d62001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4e70398, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d7039c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d66401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4eb03a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d703a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d6a801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4ef03a8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d703ac, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d6ec01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4f303b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d703b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d73001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4f703b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d703bc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d77401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4fb03c0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d703c4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d7b801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4ff03c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d703cc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d7fc01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4d70380, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc0e0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0085, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc006a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc01e3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3cd00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900fffa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4080001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1c88001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc080000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400051, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04180018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aac0027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04080002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080228, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000367, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9880fff3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04080010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08880001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80c0309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80c0319, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9880fffc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00e0100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d0003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d4001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x155c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05e80180, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc410001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000031, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900091a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05280196, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d4fe04, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800001b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000032b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000350, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000352, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000035f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000701, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000047c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000019f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc419325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d98001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4140004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000043, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00050, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0044, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27fc0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000055, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9400036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15540008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd40005b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd40005d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840006d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11540015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1998003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1af0007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15dc000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d65400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a38003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd5c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7df1c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800045, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411326a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc415326b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc419326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425326e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293279, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd000056, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800058, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00059, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x259c8000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce40005a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29988000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd000073, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17300019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25140fff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4153279, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd00005f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26f00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15100010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd000035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1af07fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43dc031, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04343000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf413267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd1c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0160, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc810001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b4c0057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f4f400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55180020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1af4007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33740003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26d80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ae8003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9680000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413348, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253348, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x958000d8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000315, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04303000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26680001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800041, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1714000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25540800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x459801b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d77400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x199c01e2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e5c0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000282, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc80c0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1334e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01334f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd413350, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813351, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd881334d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193273, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3275, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4153274, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cdcc011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05900008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd00006a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc0006b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d594002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54d00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc12e23, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd012e24, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc12e25, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15540002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b340057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b280213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980198, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd40000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd40000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x20cc003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdd430000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc01e0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29dc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2d540002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x078c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001239, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04f80000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd5c005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840007c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400069, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c018a6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4412e22, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800007c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c018a2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd4c005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9680fffc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9680fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800002e3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000069, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013275, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9540188f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc013cfff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc13249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x38d00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04cc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdcc30000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c01882, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000304, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x49980198, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55e40020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x459801a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000329, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16ec001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1998003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00031, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce00000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a18003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24dc00ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31e00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580fff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95801827, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840002f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14dc0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800006d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a0000ad, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04080000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27fc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1af4003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9740004d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4080060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca88005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24880001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f4b4009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97400046, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313274, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d33400c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97400009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1eecffdd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013273, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013275, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800003c3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429326f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aa80030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28240001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a8004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3272, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19e80042, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc0006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e8e800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de9c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3271, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50cc0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ce8c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd30011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11e80007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd300001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b30003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240059, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1660001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e320009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0328000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e72400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0430000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02ac000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d310002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa87600, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280222, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4280058, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x22ec003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013273, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce813275, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800007b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8380018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57b00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04343108, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2374007e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32a80003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800003e7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xde030000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980104, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x49980104, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800003f2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf813279, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf41326e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x254c0700, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a641fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0726, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a640200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1237b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8813260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4280034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce40000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c01755, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9680000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce80000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xde830000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce80000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0174c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bb80040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100044, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19180024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x551c003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000043d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00c8000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840006c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28200000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000043f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00c4000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x282000f0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113255, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000053, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x195c00e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32200002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc5e124dc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e624001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2555fff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3255, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980158, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x49980158, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980170, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16200010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d43c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x195400e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1154000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18dc00e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05e80488, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18f807f0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e40077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18ec0199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000048e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000494, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800004de, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000685, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000686, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800006ac, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ccc001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1264000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d79400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e7a400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52a8001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aec0028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d325c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800004cc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc419324e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26e8003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d324d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d290004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f8f4001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f52800f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50e00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800004d1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d0dc002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5e401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f534002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x6665fc00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800004d7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aec003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3257, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213259, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12f4000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d75401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52200002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da1c011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a644000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x202c003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x259c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05e804e3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800004e7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800004f0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000505, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640fff4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26edf000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05a80507, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000050c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000528, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000057d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800005c2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800005f3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1be00fe4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000066, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400068, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd40005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c004d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00063b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801326f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000624, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bd400e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ed6c005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113271, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4153270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193272, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3273, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d51401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113274, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213275, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253276, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400061, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2730000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7db1800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00062, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd000063, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000064, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400065, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b700057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b680213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x46ec0188, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17e00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26e01000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9c131fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x192007ec, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x69dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de20014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x561c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013344, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc13345, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800068, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2010007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9500fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd00001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc410000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2010003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x191807e4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9540000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2511fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013344, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013345, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180050, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0052, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280042, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813273, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc13275, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce813260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000068, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400067, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07d40000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00124f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x057c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x46ec0190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4153249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2154003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bd800e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420004d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd413249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01326f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f598004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1be800e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce80005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801327a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800005f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000075, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424004c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41326e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28240100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a4004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc435325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x277401ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf41325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xda000068, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113277, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9540002d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425334d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc419334e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d334f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213350, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253351, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b680057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b700213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b740199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x46ec01b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc1334a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1be000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0360001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc63124dc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef6c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80fff9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02ee000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fc14001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000697, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25100007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31100005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900008e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x202c007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a9feff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d30b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce813265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00ac006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00e0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28880700, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0006de, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14cc0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30d4000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10cc0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41530b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19980028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800006c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8380023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fa38011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd3800025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x202400d0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28240006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81a2a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x194c00e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc0005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c004c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27301fff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce00005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cf0c00d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000712, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x194c1c03, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc0003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c002d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05e80714, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000071c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000720, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000747, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000071d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800007c4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000732, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000745, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000744, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a64008c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b301fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000075e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c0fff1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000723, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41f02f1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000743, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c0ffde, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000072e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0007e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd84131db, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b301ff8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8413260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc8000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x195800e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd80005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418004c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81326e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc0005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dd7fff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1a001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x46200200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04283247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1af80057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1af40213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6f400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc6990000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x329c325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x329c3269, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x329c3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc01defff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d8009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000078a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25980000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fff2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03e7ff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f3f0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03e4000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d30b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bf0003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000b80, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x203c003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300700, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf0130b7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x46200008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2000025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4080007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x259c0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31dc0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18ec0057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e40213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc0199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cecc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ce4c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000448, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31980002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19580066, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15600008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0120001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11980003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da18001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d24db, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd9c005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fff8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580137b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00ee000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113269, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19080070, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x190c00e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2510003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2518000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05a80809, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000080e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000080f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000898, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000946, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800009e1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04a80811, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000815, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000834, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3045, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1c091, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000241, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02f0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4252087, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5668001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a80005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000084a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec130b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80130b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31300021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd84002f1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293059, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56a8001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2b000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00021d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001a41, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43b02f1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec80278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56f00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8813247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000085e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31100011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x950001fa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02e0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aec0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc01c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11a40006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de6000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10e40008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e2e000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d10ffdf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2110003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013255, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d10ff9e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0245301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801325f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0121fff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29108eff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0127ff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0131fff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e524009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801326e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013279, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0100010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd2400c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0180003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd1c002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000866, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04a8089a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000089e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800008fa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000945, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31300022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x964012a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02620c0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01c082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0260400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800008d2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000903, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31240022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ec30011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32f80000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x67180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0bfc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd981325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000915, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9c1325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0fff6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f818001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001606, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d838001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3259, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16240014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a2801f0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013259, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00075e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4af0228, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1330000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13f40014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07fc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33e80010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9680ffec, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04a80948, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000094c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000099b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800009e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x459801e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2738000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8300011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8340011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9740002c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13b80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc79d3300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc7a13301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8393300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0260001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce793301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x964011fe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c028009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce40001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x242c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06ec0400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0260010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01c080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57240020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0260800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6e400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae8001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2f0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000978, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdf93300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce393301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dda801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e838011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001802, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x469c0390, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4280011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0014df, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31280014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce8802ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800062, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31280034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04a809e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800009ec, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a45, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a59, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d91801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e72401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b342010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x172c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ec0800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b30c012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef7400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x66740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97400041, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04383000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4393267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b38007e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x4598001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9740002f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf4002eb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf4002ec, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf4002ed, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf4002ee, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd84802e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001715, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04382000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffbc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04341001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431ecaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a55, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43130b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x233c0032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc130b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf0130b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49302ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5198001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193269, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2598000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80002f1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53b8001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7db9801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000a5e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c01106, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c010fd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50640020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ce4c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc80c0072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x58e801fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18dc01e2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5e4002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e5c0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e540002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9540000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8180011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x44cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55900020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4140011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x44cc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8100011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd812e01, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd012e02, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd412e03, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2264003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc410001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4140028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1e64001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14d00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ab1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a0010ac, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd880003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c0003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d403f7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41b0367, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d85800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d001fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05280adc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000af1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000adf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ae7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd8d2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d803f7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc010ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d0cc009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11940014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29544001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29544003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000af4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd44d2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd44dc000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d0003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000ace, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd8d2c00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000b0a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd44d2c00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28148004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d800ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4593240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0105e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313255, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef3400c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14e80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a8000af, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c01043, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18a01fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3620005c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2464003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc6290ce7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16ac001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ac003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ee6c00d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00fff8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000367, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640102e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x199c0037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19a00035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0005d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16f8001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9780000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc035f0ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e764009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19b401f8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13740008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x199c0034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ae4003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000b7c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19a4003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc01e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13fc0018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dbd800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d98ff15, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x592c00fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd80000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12e00016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x592c007e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12e00015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11a0000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1264001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1620000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12e4001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5924007e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19a4003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013257, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd413258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00fdb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9780f5ca, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001b6d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07740003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x269c003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5e4004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f67000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f674002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ab8c006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000bec, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000b47, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03a8004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc415325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18580037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x262001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d54001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd280200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd680208, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcda80210, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b400014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a80004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc6930200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc6970208, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc69b0210, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd900003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd940003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9400040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14fc0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24f800ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33b80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd88130b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d83c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4093249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1888003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000671, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc419324c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x259c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1598001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14d80011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24e000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x321c0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580ffee, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c30, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9480000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800f29, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800f23, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800f1a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9600f502, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c0f500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000f05, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1f30001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16e4001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640f4f4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc434000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33740002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40f4f1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aec003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12ec001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1374000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02e4000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1774000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12780001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bb80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00ac005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00e0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc8000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28884900, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ff3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400ee1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c40a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c40c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c40d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d0007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15580010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x255400ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01c411, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81c40f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41c40e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c410, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e80033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18ec0034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c414, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c415, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81c413, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18dc0032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c030011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c038011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431c417, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc435c416, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439c419, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43dc418, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf413261, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013262, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13263, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf813264, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18dc0030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d77000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51b80020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53300020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f97801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f3b000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000cd6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ca7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18dc0031, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc435c40b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9740fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4280032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000cf4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc032800b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d42011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17fc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800e6c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x596001fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ce0c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x505c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc0001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5e800c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x122c0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000d1f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8240010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x566c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce413261, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec13262, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f8cc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000d57, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb81fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0328009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04143000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e51001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d2d0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19640057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19580213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19600199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da6400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1000025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04142000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd413267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4153267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d80034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05280d83, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000d8a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000db1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000d95, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000dbc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e010001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d75400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580f3d8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x526c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e2ec01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5ae0073a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580f3c6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc3a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80fffb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980fff5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16200002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01c405, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd441c406, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580f3b1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11540010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29540002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4610000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580f3a5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00da7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5aac007e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12d80017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56a00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da1800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e82400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e58c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19d4003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x20880188, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x20240090, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28240004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf80003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd901a2a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd841325f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429325f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ac0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ac0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b301ff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2330003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9680000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0001a2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1910003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51100020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2220003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e2a000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27fc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c00038, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d40030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18fc0034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24e8000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80e71, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000edd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000e91, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ea1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000eaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000e7c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000e7f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000e87, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000e8f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9e001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213262, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253261, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a200008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213264, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253263, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc820001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e82005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51e00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da1801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1800072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8180072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x59a001fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ea2800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce80001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd180001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81c400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421c401, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400041, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425c401, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ee6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ede, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db09001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db09011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ebc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc5a10000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc5a50000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05280eea, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f11, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f2e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000efe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f1f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0f26f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e80058, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7daec01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5af8073a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eba800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0f25c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15980002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81c405, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01c406, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56240020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c406, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0f24e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439c409, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40f247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce190000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0f240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439c040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac2580, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac260c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac0800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac0828, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac2440, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac2390, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac0093, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac31dc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31ac31e6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ef2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39ac7c06, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db07c00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39acc337, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db0c330, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39acc335, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db0c336, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39acec70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db0ec6f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39ac9002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db09002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39ac9012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3db09012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ef1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc434000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b740008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b780001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf80001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c034001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c038001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01c080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41c081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f88, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51640020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e52401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce81c080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01c081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1334000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24e02000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f63400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32240003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32240000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc1c083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000f9d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51e40020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5a401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2400072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8280072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce81c082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56ac0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26f0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1af000fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13380016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e00039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e0007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1220001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e00074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fa3800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf81c078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc1c084, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18dc003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d001e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31140005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31140006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05280fb7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28140002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000fc2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000fbe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000fd1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ff2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e80039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52a8003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d69401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140004b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d958004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d150005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9500000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x159c0011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x259800ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31a00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31a40001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e25800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0fff5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580fff4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000fef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411326f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d100010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01326f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc011000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33b40003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0340008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000ffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c908009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x282c2002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x208801a8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000102f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1cccfe08, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1a2a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00b33, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da2400f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da28002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1ac002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d2ac002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3ef40010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40f11d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf81325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xde410000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdcc10000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdd010000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdd410000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdd810000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xddc10000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xde010000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c024001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8100086, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5510003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001075, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15800f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15c002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d520002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cde0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e20001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001071, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9c00036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00b01, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc200000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc1c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc180000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc100000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc0c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc240000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc40003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4080029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18a400e5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12500009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x248c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x200c006d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x200c0228, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc410002b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18881fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d4072c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc00d1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd4c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3094000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x38d80000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x311c0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30940007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1620001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800010c4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00041, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25140001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418002c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x259c007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19a00030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x199c0fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc0001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800010cb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000aac, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07a810d8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000104c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc400040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x200c007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28240007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xde430000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d3249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x192400fd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06681110, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19180070, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19100078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18f40058, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001117, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001118, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000112d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001130, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001133, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24ec0f00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32ec0600, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000117b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc81c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc81c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55e00020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001122, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02a0200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e8e8009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x22a8003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x22a80074, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2774001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13740014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eb6800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25ecffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55700020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15f40010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13740002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x275c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15dc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39e00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dc1c01e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05e40008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e62000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001165, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dc2001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1a0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e0d000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95000007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e02401e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06640008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05d80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dc2401e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da58001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05e00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da2000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9600ffe6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00116e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00116b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce00001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce81c078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1c080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41c082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01c083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x22640435, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c084, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0528117e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x312c0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001185, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001182, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03a0400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d81c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc130b7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0049, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19a000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de2c00c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc420007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce40003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800011a3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d654001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c020001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4140025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800011b6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253279, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc415326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2730003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3b380006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3f38000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800011b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0430000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb10004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e57000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e578002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d67c002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0be40001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d3a4002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x202c002c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26200010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e640010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce81325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc434002e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17780001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07a811cf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00feb8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x954009a7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000bfc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1c07c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c07d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c08c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01c07e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18f0012f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18f40612, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc00c1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cf7400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x39600004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0140004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18fc003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9740001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400041, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800011ee, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a6c003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800011e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428002c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ac007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ab00030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aac0fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001205, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11600001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0fffa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03a2800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03a0800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf80001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03ae000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf81c200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03a4000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf81c07c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17fc001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0fffa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30d00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000052, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640090f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19180038, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d324e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431324d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ab0c006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000127f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d3258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313257, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353259, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ab0c012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a0003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f67800f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53740002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef6c011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ab42010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16a8000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a80800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b740000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f6b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf40001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1514001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0012e1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x964008d7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9800036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300677, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012aa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b34060b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f37000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ec00ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03a8002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef6c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7edec00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f3b000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4140032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc410001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1858003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d0cc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d0006c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d407f0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2598003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d190004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d5d4001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d52000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d514002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012d8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193259, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d958001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd5c002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813259, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc1325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1ccc001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14f00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd980003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9c0003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9800040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd9c00040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800010de, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800051, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc80003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24b00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18a800e5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1d980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7da9800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b74003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b304000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431326c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50700020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04e81324, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18ac0024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50600020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d71401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x596401fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b74008d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e76400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a640000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000132c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000133b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001344, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42530b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a68003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2024003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25980700, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11980014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d19000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd0130b7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce4130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffea, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce40001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc428000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8240011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de6800f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffe0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00104f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28182002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340035, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140023, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11a00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d614011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4100026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05980008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca4800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d1a0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cb0800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3e280008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cb4800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x20240030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ca48001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b4c00f8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28340000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x507c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30e40004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d7d401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x557c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28342002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c018001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf81a2a4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c007eb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d0d001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8100072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x591c01fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45140210, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x595801fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11980009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29dc0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc0001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1624001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400069, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a307fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x23304076, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc00e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10cc0015, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x4514020c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a2001e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a204001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a64003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1264001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15dc000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dcdc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5dc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340022, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4412e01, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0434001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdf430000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdf030000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4412e40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c030, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41c031, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x248dfffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc12e00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc812e00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45140248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013257, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0434000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdb000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45540008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd140001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9980ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8200011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013259, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0337fff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f220009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55300020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d01c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c01d0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06ec0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f01c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c01c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c000d61, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50500020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd0c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd0c00072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8240072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd240001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19682011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5a6c01fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eeac00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4180011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99800007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfa0000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4380007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17b80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d40038, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400029, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9540073d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18c80066, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30880001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x4220000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24e80007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24ec0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc5310000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001465, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1000072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc82c0072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2c0001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18f02011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5aec01fc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12ec0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aec0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0aa80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a8146a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f1f0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001478, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f1b400c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f1b400d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f1b400f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f1b400e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000147a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f334002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97400014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000147b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b400012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e024001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000144a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fbfc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x251001ef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94800007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00187c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42c0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd910000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40d325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800012c2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13f4000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bf0060b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc0077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800014a9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d325a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc0677, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb81ff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0328007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb7800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13fc0017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ff3c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ffbc00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc1325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03a0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf8130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd9c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45dc0390, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04183000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c424001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c428001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c430001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c434001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04182000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd813267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a0800fd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x109c000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc13265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce080228, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9880000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0ec75, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce480250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce880258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52a80020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x66580001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc80260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec80288, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf080290, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec80298, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf0802a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf4802a8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27580001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0fffb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc802b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80802b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x178c000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b8003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cf8c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf8802c0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc802c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf8802d0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf8802d8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25b8ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd2800c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc5230309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2620ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e3a400c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001539, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd880353, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49b0353, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0228, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd14005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000154f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd080238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd08034b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3d200008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd900309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2198003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd910ce7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4190ce6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d918005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d918004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd810ce6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdd1054f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000156e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x090c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdcd050e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x040c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x110c0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc4001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41230a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41230b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41230c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc41230d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc480329, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc48032a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc4802e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d8003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09940001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x44100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580002c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x69100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000157f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4970290, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49b0288, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49b02a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49f0298, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x041c0040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dcdc002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d924019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d26400c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0fffa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001579, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d010021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d914019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55580020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd480298, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd8802a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10d40010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12180016, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc51f0309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d95800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d62000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd9c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdd00309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce113320, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f02e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49b02b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18dc01e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd9400e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c0001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800015aa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4a302b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12240004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e5e400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4ab02a8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04100000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce4c0319, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d9d8002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ea14005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800015bc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04240001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e624004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d25000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2620000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0fff4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd0d3330, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce0802b8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd8802b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4ab02e0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aa807f0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f02d0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49702d8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49b02c8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49f02c0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96800028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d4e000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9600000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d964002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cde4002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de94001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd64002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d694001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800015e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00163f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800015cd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930238, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d698002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd4802d8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x129c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc50f0319, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11a0000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11140001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1e000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1198000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd953300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e0e000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a8000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce953301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce100319, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b70280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73800a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x536c0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9780eb68, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001609, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30b40000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b400011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b70258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53780020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb3801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7faf8019, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x67b40001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x57b80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4bb0260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fab8001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf880260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x66f40001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97400005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4353247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f7f4009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fff7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x269c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a00018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a00060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x269c0018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a00007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a40060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11dc0006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29dc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de5c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b70228, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc80230, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f514005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2510000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001644, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd080240, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f130005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001688, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00120d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001219, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001232, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340801, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f130004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01051e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42d051f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ed2c005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96c0fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01051f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000055, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc5170309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x195c07f0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x196007f6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04340001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x6b740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001665, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4a702a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4ab0298, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f634014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e76401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8113320, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce480298, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce8802a0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc5170319, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b702b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x255c000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f5f4001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8113330, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf4802b0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11340001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x195c07e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x196007ee, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8353300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1e4001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8353301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce4802d0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8100309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8100319, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc48f0250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd4c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x64d80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580005c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd2000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3255, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc435324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7df5c00c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25980040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800016f1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a7003e6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a700064, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800016df, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800016f2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840004f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd80802e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18fc0064, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00042, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51980020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dd9801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x45980400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b380057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b340213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f7b400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f73400a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4bf0258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4a70250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53fc0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e7e401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x667c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0aec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eebc00c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fff8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x43300007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7db30011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd3000025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc03ec005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfca200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x192807fa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x203c003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0017f5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18fc01e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00185b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40ffd5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0ea24, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14d4001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d52400e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49f0258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4a30250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51dc0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400017, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d534002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4af0270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dae4005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000174f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b740001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00178a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40fff3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001608, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4ab0268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7daa4005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32a0001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001765, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d1d0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2110007d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8013256, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c0017f2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd013254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4113248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b3034b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f13000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001855, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32a4001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8413247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800004f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd080260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce880268, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940ffc0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ec28001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32e0001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253255, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431324f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e72400c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a80040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9680fff7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aa4003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400049, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aa400e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32680003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a800046, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4293260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1aa400e4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800017e2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc027ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2e6400ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a4009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a800ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4240009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19e403e6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26680003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12a80004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19e400e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ea68001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19e40064, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x32640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06640003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a640003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800017d0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16a40005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce412082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ea64002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4292083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ea68005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a80ffdf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc429325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26a400ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40ffca, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2024007b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800017e3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd841325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4a70280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4ab0278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52640020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7eae8014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e6a401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56680020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce480278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce880280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x042c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec80270, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800017fe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43b02eb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42302ec, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf813245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fa3801a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x47b8020c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x15e00008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1220000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2a206032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x513c001e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e3e001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4bf02e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000180f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b3c0077, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ff3000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1330000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd200000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4200007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd3800002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dc30001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc1e0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04380032, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf80000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001427, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc413248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3269, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27fc000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33fc0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0bfc0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd441326a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x173c0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300303, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f3f0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ff3c004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13084, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001842, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdfc30000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4413249, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c43c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x23fc003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc1326d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0bb80026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdf830000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd441326e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c438001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4393265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1fb8ffc6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xddc30000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf813265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc0000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001852, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc0000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c00142b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49f02e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c00018, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c420001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c3000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c0012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001878, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41f02ed, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42302ee, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc13252, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013253, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e2a0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013084, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28340001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x313c0bcc, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x393c051f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3d3c050e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x393c0560, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3d3c054f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x393c1538, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3d3c1537, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b740800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e8007c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c42c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a8189a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800018c5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800018f2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c414001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d0007e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x50580020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d59401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc8140072, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09240002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c418001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4340004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc42130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a24002c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2020002c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc418000d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1198001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10cc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14cc0004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7cd8c00a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc130b7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce0130b5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd1400025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x5978073a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bb80002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf800024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd800026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9600e8a8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9640e8a5, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800018a9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dad800c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0ffd2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580fff9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x442c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940fff1, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x66d80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x56ec0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26240007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940fff7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc023007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19e4003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7de1c009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dee000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96000007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c13260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd901325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc421325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x261c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940fff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000189e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28cc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43d3265, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bc800ea, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18e00064, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06281911, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14f4001d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24cc0003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x86800000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001915, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x800019af, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001a2b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8000016a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc48032b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc480333, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc48033b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc480343, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98800011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b3c0057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e3e000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04180000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f438001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00068, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213254, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a1c003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00065, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc01f007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1e0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97800062, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0bb80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x43bc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fcbc001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc7df032b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1fc00c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0101, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c0102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001994, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001982, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffcb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001995, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc1325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98800009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x41bc0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x53fc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e7fc011, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd3c00025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0012, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9bc0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x653c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dbd8001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940ff8f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2bfc0008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x043c2000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcfc13267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c410001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc55b0309, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x3d5c0010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2598ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x05540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d91800c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580fff8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09780001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9580005d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400058, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dc24001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41d3248, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25dc000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7df9c00c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95c00053, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e41c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a70003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a7000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33240003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a400046, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1a7000e4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001a21, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f270009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x266400ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27240003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12640004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e724001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06640002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001a0f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x16700005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e730002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4252083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e724005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x26640001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a40ffdf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x267000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001a22, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940ff9f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001a31, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8080280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213246, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4253245, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52200020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e26401a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x46640400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04203000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce013267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4213267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b180057, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b200213, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1b300199, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e1a000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e32000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce000024, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4970258, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4930250, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x51540020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4af0280, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4b30278, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x52ec0020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140020, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04280000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x65180001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800060, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x8c001628, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4193247, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x25980001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200101, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x30f00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95800056, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb0003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000049, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33380003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b800046, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4393260, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bb000e4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001aa2, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc033ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2f3000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f3b0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf01325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b800ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4300009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9700fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f003e6, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27380003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13b80004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f000e8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07b80002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x19f00064, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33300002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0b300003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001a90, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x17b00005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf012082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01203f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x13300005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb30002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4392083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7fb38005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b80ffdf, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c00034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc00013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc431325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27300010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc439325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27b000ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b00ffca, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2030007b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf00325b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001aa3, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce01325d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04300001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7f2b0014, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ef2c01a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99800005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd2400025, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x4664001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000026, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400027, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x06a80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55100001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940ff9c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc49b02e9, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99800008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc430000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2b300008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf000013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04302000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcf013267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc4313267, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x244c00ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc4c0200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc44f0200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc410000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d158010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x059cc000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccdd0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0037, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000049, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c003a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9500e69a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d0003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d40021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd840004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c003c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x14cc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c00028, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000033, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc438000b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0009, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x27fc0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd841c07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43dc07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1bfc0078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7ffbc00c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x97c0fffd, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x99000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0120840, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x282c0040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001ae8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0121841, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x282c001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01c07c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c07d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c08c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c079, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c07e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcec0001b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a200001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9a00ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x166c001f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04200004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9ac0fffb, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc434000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9b40ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801c07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc425c07f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8000034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940e66b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800004a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0036, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9900fffe, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18cc0021, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc00047, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc000046, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0039, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c003d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c40c001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24d003ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d47fea, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x18d87ff4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd00004c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd40004e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd80004d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41c405, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc02a0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2aa80001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01c406, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c406, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc414000e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x29540008, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x295c0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8c1325e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcdc0001a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11980002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x4110000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0160800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7d15000a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0164010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41c078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c080, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c081, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81c082, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc01c083, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01c084, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x98c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400048, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c003b, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x94c0ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000c16, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801c40a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd901c40d, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801c410, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801c40e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd801c40f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc40c0040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x09540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9940ffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04140096, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8400013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1c400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc411c401, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9500fffa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424003e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04d00001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x11100002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd01c40c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0180034, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd81c411, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd841c414, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0a540001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcd41c412, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x2468000f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc419c416, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x41980003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc41c003f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7dda0001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x12200002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x10cc0002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xccc1c40c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd901c411, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce41c412, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd8800013, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xce292e40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e01, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e02, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e03, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc412e00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000aa7, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc43c0007, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc120000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x31144000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x95400005, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xdc030000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd800002a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xcc3c000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b70, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x33f80003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd4400078, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x9780e601, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x188cfff0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x04e40002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001190, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400006, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x90000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc424005e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x96400003, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7c408001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x88000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80001b74, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000168, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110501, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120206, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130703, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92100400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92110105, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92120602, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x92130307, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
+ { PwrCmdWrite, 0x54106500, mmCP_DFY_ADDR_LO },
+ { PwrCmdWrite, 0x7e000200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e020204, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc00a0505, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xbf8c007f, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb8900904, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb8911a04, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb8920304, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb8930b44, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x921c0d0c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x921c1c13, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x921d0c12, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x811c1d1c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x811c111c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x921cff1c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000400, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x921dff10, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000100, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x81181d1c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e040218, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0701000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050102, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xe0501000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80050302, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
+ { PwrCmdWrite, 0x54106900, mmCP_DFY_ADDR_LO },
+ { PwrCmdWrite, 0x7e080200, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x7e100204, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xbefc00ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00010000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x24200087, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x262200ff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000001f0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x20222282, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x28182111, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000040c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd81a0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000080c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xd86c0000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x1100000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xbf810000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x80000004, mmCP_DFY_CNTL },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_ADDR_HI },
+ { PwrCmdWrite, 0x54116f00, mmCP_DFY_ADDR_LO },
+ { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb4540fe8, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000041, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000000c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54116f00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb454105e, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000c0, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54117300, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb4541065, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000500, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000001c, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54117700, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xc0310800, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000040, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb4541069, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000444, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x0000008a, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x07808000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xffffffff, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000002, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xaaaaaaaa, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x55555555, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x540fee40, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000010, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000001, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000004, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x54117b00, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00005301, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0xb4540fef, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x540fee20, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x000000b4, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x08000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_DFY_DATA_0 },
+ { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_MEC_CNTL },
+ { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x54116f00, mmCP_MQD_BASE_ADDR },
+ { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
+ { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x54117300, mmCP_MQD_BASE_ADDR },
+ { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
+ { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x54117700, mmCP_MQD_BASE_ADDR },
+ { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
+ { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x54117b00, mmCP_MQD_BASE_ADDR },
+ { PwrCmdWrite, 0x000000b4, mmCP_MQD_BASE_ADDR_HI },
+ { PwrCmdWrite, 0xb4540fef, mmCP_HQD_PQ_BASE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_BASE_HI },
+ { PwrCmdWrite, 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR },
+ { PwrCmdWrite, 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI },
+ { PwrCmdWrite, 0x00005301, mmCP_HQD_PERSISTENT_STATE },
+ { PwrCmdWrite, 0x00010000, mmCP_HQD_VMID },
+ { PwrCmdWrite, 0xc8318509, mmCP_HQD_PQ_CONTROL },
+ { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000104, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000204, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000304, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000404, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000504, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000604, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000704, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000005, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000105, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000205, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000305, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000405, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000505, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000605, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000705, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000006, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000106, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000206, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000306, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000406, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000506, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000606, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000706, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000007, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000107, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000207, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000307, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000407, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000507, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000607, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000707, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000008, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000108, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000208, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000308, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000408, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000508, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000608, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000708, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000009, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000109, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000209, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000309, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000409, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000509, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000609, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000709, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_RPTR },
+ { PwrCmdWrite, 0x00000000, mmCP_HQD_PQ_WPTR },
+ { PwrCmdWrite, 0x00000001, mmCP_HQD_ACTIVE },
+ { PwrCmdWrite, 0x00000004, mmSRBM_GFX_CNTL },
+ { PwrCmdWrite, 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 },
+ { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
+ { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
+ { PwrCmdWrite, 0x00000000, mmGRBM_STATUS },
+ { PwrCmdEnd, 0x00000000, 0x00000000 },
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/powerplay/inc/smu74.h
new file mode 100644
index 000000000..fd10a9fa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74.h
@@ -0,0 +1,833 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#ifndef SMU74_H
+#define SMU74_H
+
+#pragma pack(push, 1)
+
+#define SMU__DGPU_ONLY
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+
+#define EXP_M1 35
+#define EXP_M2 92821
+#define EXP_B 66629747
+
+#define EXP_M1_1 365
+#define EXP_M2_1 658700
+#define EXP_B_1 305506134
+
+#define EXP_M1_2 189
+#define EXP_M2_2 379692
+#define EXP_B_2 194609469
+
+#define EXP_M1_3 99
+#define EXP_M2_3 217915
+#define EXP_B_3 122255994
+
+#define EXP_M1_4 51
+#define EXP_M2_4 122643
+#define EXP_B_4 74893384
+
+#define EXP_M1_5 423
+#define EXP_M2_5 1103326
+#define EXP_B_5 728122621
+
+enum SID_OPTION {
+ SID_OPTION_HI,
+ SID_OPTION_LO,
+ SID_OPTION_COUNT
+};
+
+enum Poly3rdOrderCoeff {
+ LEAKAGE_TEMPERATURE_SCALAR,
+ LEAKAGE_VOLTAGE_SCALAR,
+ DYNAMIC_VOLTAGE_SCALAR,
+ POLY_3RD_ORDER_COUNT
+};
+
+struct SMU7_Poly3rdOrder_Data {
+ int32_t a;
+ int32_t b;
+ int32_t c;
+ int32_t d;
+ uint8_t a_shift;
+ uint8_t b_shift;
+ uint8_t c_shift;
+ uint8_t x_shift;
+};
+
+typedef struct SMU7_Poly3rdOrder_Data SMU7_Poly3rdOrder_Data;
+
+struct Power_Calculator_Data {
+ uint16_t NoLoadVoltage;
+ uint16_t LoadVoltage;
+ uint16_t Resistance;
+ uint16_t Temperature;
+ uint16_t BaseLeakage;
+ uint16_t LkgTempScalar;
+ uint16_t LkgVoltScalar;
+ uint16_t LkgAreaScalar;
+ uint16_t LkgPower;
+ uint16_t DynVoltScalar;
+ uint32_t Cac;
+ uint32_t DynPower;
+ uint32_t TotalCurrent;
+ uint32_t TotalPower;
+};
+
+typedef struct Power_Calculator_Data PowerCalculatorData_t;
+
+struct Gc_Cac_Weight_Data {
+ uint8_t index;
+ uint32_t value;
+};
+
+typedef struct Gc_Cac_Weight_Data GcCacWeight_Data;
+
+
+typedef struct {
+ uint32_t high;
+ uint32_t low;
+} data_64_t;
+
+typedef struct {
+ data_64_t high;
+ data_64_t low;
+} data_128_t;
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+#define SMU74_MAX_LEVELS_VDDC 16
+#define SMU74_MAX_LEVELS_VDDGFX 16
+#define SMU74_MAX_LEVELS_VDDCI 8
+#define SMU74_MAX_LEVELS_MVDD 4
+
+#define SMU_MAX_SMIO_LEVELS 4
+
+#define SMU74_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE /* SCLK + SQ DPM + ULV */
+#define SMU74_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS /* MCLK Levels DPM */
+#define SMU74_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS /* LCLK Levels */
+#define SMU74_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS /* PCIe speed and number of lanes */
+#define SMU74_MAX_LEVELS_UVD 8 /* VCLK/DCLK levels for UVD */
+#define SMU74_MAX_LEVELS_VCE 8 /* ECLK levels for VCE */
+#define SMU74_MAX_LEVELS_ACP 8 /* ACLK levels for ACP */
+#define SMU74_MAX_LEVELS_SAMU 8 /* SAMCLK levels for SAMU */
+#define SMU74_MAX_ENTRIES_SMIO 32 /* Number of entries in SMIO table */
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
+
+#define GPIO_CLAMP_MODE_VRHOT 1
+#define GPIO_CLAMP_MODE_THERM 2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+/* Virtualization Defines */
+#define CG_XDMA_MASK 0x1
+#define CG_XDMA_SHIFT 0
+#define CG_UVD_MASK 0x2
+#define CG_UVD_SHIFT 1
+#define CG_VCE_MASK 0x4
+#define CG_VCE_SHIFT 2
+#define CG_SAMU_MASK 0x8
+#define CG_SAMU_SHIFT 3
+#define CG_GFX_MASK 0x10
+#define CG_GFX_SHIFT 4
+#define CG_SDMA_MASK 0x20
+#define CG_SDMA_SHIFT 5
+#define CG_HDP_MASK 0x40
+#define CG_HDP_SHIFT 6
+#define CG_MC_MASK 0x80
+#define CG_MC_SHIFT 7
+#define CG_DRM_MASK 0x100
+#define CG_DRM_SHIFT 8
+#define CG_ROM_MASK 0x200
+#define CG_ROM_SHIFT 9
+#define CG_BIF_MASK 0x400
+#define CG_BIF_SHIFT 10
+
+
+#define SMU74_DTE_ITERATIONS 5
+#define SMU74_DTE_SOURCES 3
+#define SMU74_DTE_SINKS 1
+#define SMU74_NUM_CPU_TES 0
+#define SMU74_NUM_GPU_TES 1
+#define SMU74_NUM_NON_TES 2
+#define SMU74_DTE_FAN_SCALAR_MIN 0x100
+#define SMU74_DTE_FAN_SCALAR_MAX 0x166
+#define SMU74_DTE_FAN_TEMP_MAX 93
+#define SMU74_DTE_FAN_TEMP_MIN 83
+
+
+#if defined SMU__FUSION_ONLY
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+#endif
+
+struct SMU7_HystController_Data {
+ uint8_t waterfall_up;
+ uint8_t waterfall_down;
+ uint8_t waterfall_limit;
+ uint8_t spare;
+ uint16_t release_cnt;
+ uint16_t release_limit;
+};
+
+typedef struct SMU7_HystController_Data SMU7_HystController_Data;
+
+struct SMU74_PIDController {
+ uint32_t Ki;
+ int32_t LFWindupUpperLim;
+ int32_t LFWindupLowerLim;
+ uint32_t StatePrecision;
+ uint32_t LfPrecision;
+ uint32_t LfOffset;
+ uint32_t MaxState;
+ uint32_t MaxLfFraction;
+ uint32_t StateShift;
+};
+
+typedef struct SMU74_PIDController SMU74_PIDController;
+
+struct SMU7_LocalDpmScoreboard {
+ uint32_t PercentageBusy;
+
+ int32_t PIDError;
+ int32_t PIDIntegral;
+ int32_t PIDOutput;
+
+ uint32_t SigmaDeltaAccum;
+ uint32_t SigmaDeltaOutput;
+ uint32_t SigmaDeltaLevel;
+
+ uint32_t UtilizationSetpoint;
+
+ uint8_t TdpClampMode;
+ uint8_t TdcClampMode;
+ uint8_t ThermClampMode;
+ uint8_t VoltageBusy;
+
+ int8_t CurrLevel;
+ int8_t TargLevel;
+ uint8_t LevelChangeInProgress;
+ uint8_t UpHyst;
+
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+
+ uint32_t MinimumPerfSclk;
+
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t GfxClkSlow;
+ uint8_t GpioClampMode;
+
+ uint8_t spare2;
+ uint8_t EnabledLevelsChange;
+ uint8_t DteClampMode;
+ uint8_t FpsClampMode;
+
+ uint16_t LevelResidencyCounters[SMU74_MAX_LEVELS_GRAPHICS];
+ uint16_t LevelSwitchCounters[SMU74_MAX_LEVELS_GRAPHICS];
+
+ void (*TargetStateCalculator)(uint8_t);
+ void (*SavedTargetStateCalculator)(uint8_t);
+
+ uint16_t AutoDpmInterval;
+ uint16_t AutoDpmRange;
+
+ uint8_t FpsEnabled;
+ uint8_t MaxPerfLevel;
+ uint8_t AllowLowClkInterruptToHost;
+ uint8_t FpsRunning;
+
+ uint32_t MaxAllowedFrequency;
+
+ uint32_t FilteredSclkFrequency;
+ uint32_t LastSclkFrequency;
+ uint32_t FilteredSclkFrequencyCnt;
+
+ uint8_t MinPerfLevel;
+ uint8_t padding[3];
+
+ uint16_t FpsAlpha;
+ uint16_t DeltaTime;
+ uint32_t CurrentFps;
+ uint32_t FilteredFps;
+ uint32_t FrameCount;
+ uint32_t FrameCountLast;
+ uint16_t FpsTargetScalar;
+ uint16_t FpsWaterfallLimitScalar;
+ uint16_t FpsAlphaScalar;
+ uint16_t spare8;
+ SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_LocalDpmScoreboard SMU7_LocalDpmScoreboard;
+
+#define SMU7_MAX_VOLTAGE_CLIENTS 12
+
+typedef uint8_t (*VoltageChangeHandler_t)(uint16_t, uint8_t);
+
+#define VDDC_MASK 0x00007FFF
+#define VDDC_SHIFT 0
+#define VDDCI_MASK 0x3FFF8000
+#define VDDCI_SHIFT 15
+#define PHASES_MASK 0xC0000000
+#define PHASES_SHIFT 30
+
+typedef uint32_t SMU_VoltageLevel;
+
+struct SMU7_VoltageScoreboard {
+
+ SMU_VoltageLevel TargetVoltage;
+ uint16_t MaxVid;
+ uint8_t HighestVidOffset;
+ uint8_t CurrentVidOffset;
+
+ uint16_t CurrentVddc;
+ uint16_t CurrentVddci;
+
+
+ uint8_t ControllerBusy;
+ uint8_t CurrentVid;
+ uint8_t CurrentVddciVid;
+ uint8_t padding;
+
+ SMU_VoltageLevel RequestedVoltage[SMU7_MAX_VOLTAGE_CLIENTS];
+ SMU_VoltageLevel TargetVoltageState;
+ uint8_t EnabledRequest[SMU7_MAX_VOLTAGE_CLIENTS];
+
+ uint8_t padding2;
+ uint8_t padding3;
+ uint8_t ControllerEnable;
+ uint8_t ControllerRunning;
+ uint16_t CurrentStdVoltageHiSidd;
+ uint16_t CurrentStdVoltageLoSidd;
+ uint8_t OverrideVoltage;
+ uint8_t padding4;
+ uint8_t padding5;
+ uint8_t CurrentPhases;
+
+ VoltageChangeHandler_t ChangeVddc;
+
+ VoltageChangeHandler_t ChangeVddci;
+ VoltageChangeHandler_t ChangePhase;
+ VoltageChangeHandler_t ChangeMvdd;
+
+ VoltageChangeHandler_t functionLinks[6];
+
+ uint16_t *VddcFollower1;
+
+ int16_t Driver_OD_RequestedVidOffset1;
+ int16_t Driver_OD_RequestedVidOffset2;
+};
+
+typedef struct SMU7_VoltageScoreboard SMU7_VoltageScoreboard;
+
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+struct SMU7_PCIeLinkSpeedScoreboard {
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+
+ uint8_t CurrentLinkSpeed;
+ uint8_t EnabledLevelsChange;
+ uint16_t AutoDpmInterval;
+
+ uint16_t AutoDpmRange;
+ uint16_t AutoDpmCount;
+
+ uint8_t DpmMode;
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t CurrentLinkLevel;
+
+};
+
+typedef struct SMU7_PCIeLinkSpeedScoreboard SMU7_PCIeLinkSpeedScoreboard;
+
+#define SMU7_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
+#define SMU7_LKGE_LUT_NUM_OF_VOLT_ENTRIES 16
+
+#define SMU7_SCALE_I 7
+#define SMU7_SCALE_R 12
+
+struct SMU7_PowerScoreboard {
+ PowerCalculatorData_t VddcPowerData[SID_OPTION_COUNT];
+
+ uint32_t TotalGpuPower;
+ uint32_t TdcCurrent;
+
+ uint16_t VddciTotalPower;
+ uint16_t sparesasfsdfd;
+ uint16_t Vddr1Power;
+ uint16_t RocPower;
+
+ uint16_t CalcMeasPowerBlend;
+ uint8_t SidOptionPower;
+ uint8_t SidOptionCurrent;
+
+ uint32_t WinTime;
+
+ uint16_t Telemetry_1_slope;
+ uint16_t Telemetry_2_slope;
+ int32_t Telemetry_1_offset;
+ int32_t Telemetry_2_offset;
+
+ uint32_t VddcCurrentTelemetry;
+ uint32_t VddGfxCurrentTelemetry;
+ uint32_t VddcPowerTelemetry;
+ uint32_t VddGfxPowerTelemetry;
+ uint32_t VddciPowerTelemetry;
+
+ uint32_t VddcPower;
+ uint32_t VddGfxPower;
+ uint32_t VddciPower;
+
+ uint32_t TelemetryCurrent[2];
+ uint32_t TelemetryVoltage[2];
+ uint32_t TelemetryPower[2];
+};
+
+typedef struct SMU7_PowerScoreboard SMU7_PowerScoreboard;
+
+struct SMU7_ThermalScoreboard {
+ int16_t GpuLimit;
+ int16_t GpuHyst;
+ uint16_t CurrGnbTemp;
+ uint16_t FilteredGnbTemp;
+
+ uint8_t ControllerEnable;
+ uint8_t ControllerRunning;
+ uint8_t AutoTmonCalInterval;
+ uint8_t AutoTmonCalEnable;
+
+ uint8_t ThermalDpmEnabled;
+ uint8_t SclkEnabledMask;
+ uint8_t spare[2];
+ int32_t temperature_gradient;
+
+ SMU7_HystController_Data HystControllerData;
+ int32_t WeightedSensorTemperature;
+ uint16_t TemperatureLimit[SMU74_MAX_LEVELS_GRAPHICS];
+ uint32_t Alpha;
+};
+
+typedef struct SMU7_ThermalScoreboard SMU7_ThermalScoreboard;
+
+#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
+#define SMU7_UVD_DPM_CONFIG_MASK 0x10
+#define SMU7_VCE_DPM_CONFIG_MASK 0x20
+#define SMU7_ACP_DPM_CONFIG_MASK 0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
+
+/* All 'soft registers' should be uint32_t. */
+struct SMU74_SoftRegisters {
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerPeriod;
+ uint32_t FeatureEnables;
+
+ uint32_t PreVBlankGap;
+ uint32_t VBlankTimeout;
+ uint32_t TrainTimeGap;
+
+ uint32_t MvddSwitchTime;
+ uint32_t LongestAcpiTrainTime;
+ uint32_t AcpiDelay;
+ uint32_t G5TrainTime;
+ uint32_t DelayMpllPwron;
+ uint32_t VoltageChangeTimeout;
+
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsActivity;
+ uint32_t AverageMemoryActivity;
+ uint32_t AverageGioActivity;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterCount;
+ uint32_t UlvTime;
+ uint32_t UcodeLoadStatus;
+ uint32_t AllowMvddSwitch;
+ uint8_t Activity_Weight;
+ uint8_t Reserved8[3];
+};
+
+typedef struct SMU74_SoftRegisters SMU74_SoftRegisters;
+
+struct SMU74_Firmware_Header {
+ uint32_t Digest[5];
+ uint32_t Version;
+ uint32_t HeaderSize;
+ uint32_t Flags;
+ uint32_t EntryPoint;
+ uint32_t CodeSize;
+ uint32_t ImageSize;
+
+ uint32_t Rtos;
+ uint32_t SoftRegisters;
+ uint32_t DpmTable;
+ uint32_t FanTable;
+ uint32_t CacConfigTable;
+ uint32_t CacStatusTable;
+
+ uint32_t mcRegisterTable;
+
+ uint32_t mcArbDramTimingTable;
+
+ uint32_t PmFuseTable;
+ uint32_t Globals;
+ uint32_t ClockStretcherTable;
+ uint32_t VftTable;
+ uint32_t Reserved1;
+ uint32_t AvfsTable;
+ uint32_t AvfsCksOffGbvTable;
+ uint32_t AvfsMeanNSigma;
+ uint32_t AvfsSclkOffsetTable;
+ uint32_t Reserved[16];
+ uint32_t Signature;
+};
+
+typedef struct SMU74_Firmware_Header SMU74_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum DisplayConfig {
+ PowerDown = 1,
+ DP54x4,
+ DP54x2,
+ DP54x1,
+ DP27x4,
+ DP27x2,
+ DP27x1,
+ HDMI297,
+ HDMI162,
+ LVDS,
+ DP324x4,
+ DP324x2,
+ DP324x1
+};
+
+
+#define MC_BLOCK_COUNT 1
+#define CPL_BLOCK_COUNT 5
+#define SE_BLOCK_COUNT 15
+#define GC_BLOCK_COUNT 24
+
+struct SMU7_Local_Cac {
+ uint8_t BlockId;
+ uint8_t SignalId;
+ uint8_t Threshold;
+ uint8_t Padding;
+};
+
+typedef struct SMU7_Local_Cac SMU7_Local_Cac;
+
+struct SMU7_Local_Cac_Table {
+
+ SMU7_Local_Cac CplLocalCac[CPL_BLOCK_COUNT];
+ SMU7_Local_Cac McLocalCac[MC_BLOCK_COUNT];
+ SMU7_Local_Cac SeLocalCac[SE_BLOCK_COUNT];
+ SMU7_Local_Cac GcLocalCac[GC_BLOCK_COUNT];
+};
+
+typedef struct SMU7_Local_Cac_Table SMU7_Local_Cac_Table;
+
+#pragma pack(pop)
+
+/* Description of Clock Gating bitmask for Tonga:
+ * System Clock Gating
+ */
+#define CG_SYS_BITMASK_FIRST_BIT 0 /* First bit of Sys CG bitmask */
+#define CG_SYS_BITMASK_LAST_BIT 9 /* Last bit of Sys CG bitmask */
+#define CG_SYS_BIF_MGLS_SHIFT 0
+#define CG_SYS_ROM_SHIFT 1
+#define CG_SYS_MC_MGCG_SHIFT 2
+#define CG_SYS_MC_MGLS_SHIFT 3
+#define CG_SYS_SDMA_MGCG_SHIFT 4
+#define CG_SYS_SDMA_MGLS_SHIFT 5
+#define CG_SYS_DRM_MGCG_SHIFT 6
+#define CG_SYS_HDP_MGCG_SHIFT 7
+#define CG_SYS_HDP_MGLS_SHIFT 8
+#define CG_SYS_DRM_MGLS_SHIFT 9
+#define CG_SYS_BIF_MGCG_SHIFT 10
+
+#define CG_SYS_BIF_MGLS_MASK 0x1
+#define CG_SYS_ROM_MASK 0x2
+#define CG_SYS_MC_MGCG_MASK 0x4
+#define CG_SYS_MC_MGLS_MASK 0x8
+#define CG_SYS_SDMA_MGCG_MASK 0x10
+#define CG_SYS_SDMA_MGLS_MASK 0x20
+#define CG_SYS_DRM_MGCG_MASK 0x40
+#define CG_SYS_HDP_MGCG_MASK 0x80
+#define CG_SYS_HDP_MGLS_MASK 0x100
+#define CG_SYS_DRM_MGLS_MASK 0x200
+#define CG_SYS_BIF_MGCG_MASK 0x400
+
+/* Graphics Clock Gating */
+#define CG_GFX_BITMASK_FIRST_BIT 16 /* First bit of Gfx CG bitmask */
+#define CG_GFX_BITMASK_LAST_BIT 24 /* Last bit of Gfx CG bitmask */
+
+#define CG_GFX_CGCG_SHIFT 16
+#define CG_GFX_CGLS_SHIFT 17
+#define CG_CPF_MGCG_SHIFT 18
+#define CG_RLC_MGCG_SHIFT 19
+#define CG_GFX_OTHERS_MGCG_SHIFT 20
+#define CG_GFX_3DCG_SHIFT 21
+#define CG_GFX_3DLS_SHIFT 22
+#define CG_GFX_RLC_LS_SHIFT 23
+#define CG_GFX_CP_LS_SHIFT 24
+
+#define CG_GFX_CGCG_MASK 0x00010000
+#define CG_GFX_CGLS_MASK 0x00020000
+#define CG_CPF_MGCG_MASK 0x00040000
+#define CG_RLC_MGCG_MASK 0x00080000
+#define CG_GFX_OTHERS_MGCG_MASK 0x00100000
+#define CG_GFX_3DCG_MASK 0x00200000
+#define CG_GFX_3DLS_MASK 0x00400000
+#define CG_GFX_RLC_LS_MASK 0x00800000
+#define CG_GFX_CP_LS_MASK 0x01000000
+
+
+/* Voltage Regulator Configuration
+VR Config info is contained in dpmTable.VRConfig */
+
+#define VRCONF_VDDC_MASK 0x000000FF
+#define VRCONF_VDDC_SHIFT 0
+#define VRCONF_VDDGFX_MASK 0x0000FF00
+#define VRCONF_VDDGFX_SHIFT 8
+#define VRCONF_VDDCI_MASK 0x00FF0000
+#define VRCONF_VDDCI_SHIFT 16
+#define VRCONF_MVDD_MASK 0xFF000000
+#define VRCONF_MVDD_SHIFT 24
+
+#define VR_MERGED_WITH_VDDC 0
+#define VR_SVI2_PLANE_1 1
+#define VR_SVI2_PLANE_2 2
+#define VR_SMIO_PATTERN_1 3
+#define VR_SMIO_PATTERN_2 4
+#define VR_STATIC_VOLTAGE 5
+
+/* Clock Stretcher Configuration */
+
+#define CLOCK_STRETCHER_MAX_ENTRIES 0x4
+#define CKS_LOOKUPTable_MAX_ENTRIES 0x4
+
+/* The 'settings' field is subdivided in the following way: */
+#define CLOCK_STRETCHER_SETTING_DDT_MASK 0x01
+#define CLOCK_STRETCHER_SETTING_DDT_SHIFT 0x0
+#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_MASK 0x1E
+#define CLOCK_STRETCHER_SETTING_STRETCH_AMOUNT_SHIFT 0x1
+#define CLOCK_STRETCHER_SETTING_ENABLE_MASK 0x80
+#define CLOCK_STRETCHER_SETTING_ENABLE_SHIFT 0x7
+
+struct SMU_ClockStretcherDataTableEntry {
+ uint8_t minVID;
+ uint8_t maxVID;
+ uint16_t setting;
+};
+typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
+
+struct SMU_ClockStretcherDataTable {
+ SMU_ClockStretcherDataTableEntry ClockStretcherDataTableEntry[CLOCK_STRETCHER_MAX_ENTRIES];
+};
+typedef struct SMU_ClockStretcherDataTable SMU_ClockStretcherDataTable;
+
+struct SMU_CKS_LOOKUPTableEntry {
+ uint16_t minFreq;
+ uint16_t maxFreq;
+
+ uint8_t setting;
+ uint8_t padding[3];
+};
+typedef struct SMU_CKS_LOOKUPTableEntry SMU_CKS_LOOKUPTableEntry;
+
+struct SMU_CKS_LOOKUPTable {
+ SMU_CKS_LOOKUPTableEntry CKS_LOOKUPTableEntry[CKS_LOOKUPTable_MAX_ENTRIES];
+};
+typedef struct SMU_CKS_LOOKUPTable SMU_CKS_LOOKUPTable;
+
+struct AgmAvfsData_t {
+ uint16_t avgPsmCount[28];
+ uint16_t minPsmCount[28];
+};
+
+typedef struct AgmAvfsData_t AgmAvfsData_t;
+
+enum VFT_COLUMNS {
+ SCLK0,
+ SCLK1,
+ SCLK2,
+ SCLK3,
+ SCLK4,
+ SCLK5,
+ SCLK6,
+ SCLK7,
+
+ NUM_VFT_COLUMNS
+};
+
+#define VFT_TABLE_DEFINED
+
+#define TEMP_RANGE_MAXSTEPS 12
+
+struct VFT_CELL_t {
+ uint16_t Voltage;
+};
+
+typedef struct VFT_CELL_t VFT_CELL_t;
+
+struct VFT_TABLE_t {
+ VFT_CELL_t Cell[TEMP_RANGE_MAXSTEPS][NUM_VFT_COLUMNS];
+ uint16_t AvfsGbv[NUM_VFT_COLUMNS];
+ uint16_t BtcGbv[NUM_VFT_COLUMNS];
+ uint16_t Temperature[TEMP_RANGE_MAXSTEPS];
+
+ uint8_t NumTemperatureSteps;
+ uint8_t padding[3];
+};
+
+typedef struct VFT_TABLE_t VFT_TABLE_t;
+
+
+/* Total margin, root mean square of Fmax + DC + Platform */
+struct AVFS_Margin_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_Margin_t AVFS_Margin_t;
+
+#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
+#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
+
+struct GB_VDROOP_TABLE_t {
+ int32_t a0;
+ int32_t a1;
+ int32_t a2;
+ uint32_t spare;
+};
+typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
+
+struct AVFS_CksOff_Gbv_t {
+ VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
+
+struct AVFS_meanNsigma_t {
+ uint32_t Aconstant[3];
+ uint16_t DC_tol_sigma;
+ uint16_t Platform_mean;
+ uint16_t Platform_sigma;
+ uint16_t PSM_Age_CompFactor;
+ uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
+
+struct AVFS_Sclk_Offset_t {
+ uint16_t Sclk_Offset[8];
+};
+typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
+
+#endif
+
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
new file mode 100644
index 000000000..899d6d810
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
@@ -0,0 +1,849 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU74_DISCRETE_H
+#define SMU74_DISCRETE_H
+
+#include "smu74.h"
+
+#pragma pack(push, 1)
+
+
+#define NUM_SCLK_RANGE 8
+
+#define VCO_3_6 1
+#define VCO_2_4 3
+
+#define POSTDIV_DIV_BY_1 0
+#define POSTDIV_DIV_BY_2 1
+#define POSTDIV_DIV_BY_4 2
+#define POSTDIV_DIV_BY_8 3
+#define POSTDIV_DIV_BY_16 4
+
+struct sclkFcwRange_t {
+ uint8_t vco_setting;
+ uint8_t postdiv;
+ uint16_t fcw_pcc;
+
+ uint16_t fcw_trans_upper;
+ uint16_t fcw_trans_lower;
+};
+typedef struct sclkFcwRange_t sclkFcwRange_t;
+
+struct SMIO_Pattern {
+ uint16_t Voltage;
+ uint8_t Smio;
+ uint8_t padding;
+};
+
+typedef struct SMIO_Pattern SMIO_Pattern;
+
+struct SMIO_Table {
+ SMIO_Pattern Pattern[SMU_MAX_SMIO_LEVELS];
+};
+
+typedef struct SMIO_Table SMIO_Table;
+
+struct SMU_SclkSetting {
+ uint32_t SclkFrequency;
+ uint16_t Fcw_int;
+ uint16_t Fcw_frac;
+ uint16_t Pcc_fcw_int;
+ uint8_t PllRange;
+ uint8_t SSc_En;
+ uint16_t Sclk_slew_rate;
+ uint16_t Pcc_up_slew_rate;
+ uint16_t Pcc_down_slew_rate;
+ uint16_t Fcw1_int;
+ uint16_t Fcw1_frac;
+ uint16_t Sclk_ss_slew_rate;
+};
+typedef struct SMU_SclkSetting SMU_SclkSetting;
+
+struct SMU74_Discrete_GraphicsLevel {
+ SMU_VoltageLevel MinVoltage;
+ uint8_t pcieDpmLevel;
+ uint8_t DeepSleepDivId;
+ uint16_t ActivityLevel;
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint8_t SclkDid;
+ uint8_t padding;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpHyst;
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t PowerThrottle;
+ SMU_SclkSetting SclkSetting;
+};
+
+typedef struct SMU74_Discrete_GraphicsLevel SMU74_Discrete_GraphicsLevel;
+
+struct SMU74_Discrete_ACPILevel {
+ uint32_t Flags;
+ SMU_VoltageLevel MinVoltage;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+
+ SMU_SclkSetting SclkSetting;
+};
+
+typedef struct SMU74_Discrete_ACPILevel SMU74_Discrete_ACPILevel;
+
+struct SMU74_Discrete_Ulv {
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint16_t VddcOffset;
+ uint8_t VddcOffsetVid;
+ uint8_t VddcPhase;
+ uint16_t BifSclkDfs;
+ uint16_t Reserved;
+};
+
+typedef struct SMU74_Discrete_Ulv SMU74_Discrete_Ulv;
+
+struct SMU74_Discrete_MemoryLevel {
+ SMU_VoltageLevel MinVoltage;
+ uint32_t MinMvdd;
+
+ uint32_t MclkFrequency;
+
+ uint8_t StutterEnable;
+ uint8_t EnabledForThrottle;
+ uint8_t EnabledForActivity;
+ uint8_t padding_0;
+
+ uint8_t UpHyst;
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t padding_1;
+
+ uint16_t ActivityLevel;
+ uint8_t DisplayWatermark;
+ uint8_t Reserved;
+};
+
+typedef struct SMU74_Discrete_MemoryLevel SMU74_Discrete_MemoryLevel;
+
+struct SMU74_Discrete_LinkLevel {
+ uint8_t PcieGenSpeed;
+ uint8_t PcieLaneCount;
+ uint8_t EnabledForActivity;
+ uint8_t SPC;
+ uint32_t DownThreshold;
+ uint32_t UpThreshold;
+ uint16_t BifSclkDfs;
+ uint16_t Reserved;
+};
+
+typedef struct SMU74_Discrete_LinkLevel SMU74_Discrete_LinkLevel;
+
+struct SMU74_Discrete_MCArbDramTimingTableEntry {
+ uint32_t McArbDramTiming;
+ uint32_t McArbDramTiming2;
+ uint8_t McArbBurstTime;
+ uint8_t padding[3];
+};
+
+typedef struct SMU74_Discrete_MCArbDramTimingTableEntry SMU74_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU74_Discrete_MCArbDramTimingTable {
+ SMU74_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU74_Discrete_MCArbDramTimingTable SMU74_Discrete_MCArbDramTimingTable;
+
+struct SMU74_Discrete_UvdLevel {
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ SMU_VoltageLevel MinVoltage;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+ uint8_t padding[2];
+};
+
+typedef struct SMU74_Discrete_UvdLevel SMU74_Discrete_UvdLevel;
+
+struct SMU74_Discrete_ExtClkLevel {
+ uint32_t Frequency;
+ SMU_VoltageLevel MinVoltage;
+ uint8_t Divider;
+ uint8_t padding[3];
+};
+
+typedef struct SMU74_Discrete_ExtClkLevel SMU74_Discrete_ExtClkLevel;
+
+struct SMU74_Discrete_StateInfo {
+ uint32_t SclkFrequency;
+ uint32_t MclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint16_t MvddVoltage;
+ uint16_t padding16;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ uint8_t McRegIndex;
+ uint8_t SeqIndex;
+ uint8_t SclkDid;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+ uint8_t PCIeGen;
+};
+
+typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
+
+struct SMU_QuadraticCoeffs {
+ int32_t m1;
+ uint32_t b;
+
+ int16_t m2;
+ uint8_t m1_shift;
+ uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
+struct SMU74_Discrete_DpmTable {
+
+ SMU74_PIDController GraphicsPIDController;
+ SMU74_PIDController MemoryPIDController;
+ SMU74_PIDController LinkPIDController;
+
+ uint32_t SystemFlags;
+
+ uint32_t VRConfig;
+ uint32_t SmioMask1;
+ uint32_t SmioMask2;
+ SMIO_Table SmioTable1;
+ SMIO_Table SmioTable2;
+
+ uint32_t MvddLevelCount;
+
+
+ uint8_t BapmVddcVidHiSidd[SMU74_MAX_LEVELS_VDDC];
+ uint8_t BapmVddcVidLoSidd[SMU74_MAX_LEVELS_VDDC];
+ uint8_t BapmVddcVidHiSidd2[SMU74_MAX_LEVELS_VDDC];
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t MemoryDpmLevelCount;
+ uint8_t LinkLevelCount;
+ uint8_t MasterDeepSleepControl;
+
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+
+ uint8_t ThermOutGpio;
+ uint8_t ThermOutPolarity;
+ uint8_t ThermOutMode;
+ uint8_t BootPhases;
+
+ uint8_t VRHotLevel;
+ uint8_t LdoRefSel;
+ uint8_t Reserved1[2];
+ uint16_t FanStartTemperature;
+ uint16_t FanStopTemperature;
+ uint16_t MaxVoltage;
+ uint16_t Reserved2;
+ uint32_t Reserved[1];
+
+ SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
+ SMU74_Discrete_MemoryLevel MemoryACPILevel;
+ SMU74_Discrete_MemoryLevel MemoryLevel[SMU74_MAX_LEVELS_MEMORY];
+ SMU74_Discrete_LinkLevel LinkLevel[SMU74_MAX_LEVELS_LINK];
+ SMU74_Discrete_ACPILevel ACPILevel;
+ SMU74_Discrete_UvdLevel UvdLevel[SMU74_MAX_LEVELS_UVD];
+ SMU74_Discrete_ExtClkLevel VceLevel[SMU74_MAX_LEVELS_VCE];
+ SMU74_Discrete_ExtClkLevel AcpLevel[SMU74_MAX_LEVELS_ACP];
+ SMU74_Discrete_ExtClkLevel SamuLevel[SMU74_MAX_LEVELS_SAMU];
+ SMU74_Discrete_Ulv Ulv;
+
+ uint8_t DisplayWatermark[SMU74_MAX_LEVELS_MEMORY][SMU74_MAX_LEVELS_GRAPHICS];
+
+ uint32_t SclkStepSize;
+ uint32_t Smio[SMU74_MAX_ENTRIES_SMIO];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsVoltageChangeEnable;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsInterval;
+
+ uint8_t VoltageInterval;
+ uint8_t ThermalInterval;
+ uint16_t TemperatureLimitHigh;
+
+ uint16_t TemperatureLimitLow;
+ uint8_t MemoryBootLevel;
+ uint8_t MemoryVoltageChangeEnable;
+
+ uint16_t BootMVdd;
+ uint8_t MemoryInterval;
+ uint8_t MemoryThermThrottleEnable;
+
+ uint16_t VoltageResponseTime;
+ uint16_t PhaseResponseTime;
+
+ uint8_t PCIeBootLinkLevel;
+ uint8_t PCIeGenInterval;
+ uint8_t DTEInterval;
+ uint8_t DTEMode;
+
+ uint8_t SVI2Enable;
+ uint8_t VRHotGpio;
+ uint8_t AcDcGpio;
+ uint8_t ThermGpio;
+
+ uint16_t PPM_PkgPwrLimit;
+ uint16_t PPM_TemperatureLimit;
+
+ uint16_t DefaultTdp;
+ uint16_t TargetTdp;
+
+ uint16_t FpsHighThreshold;
+ uint16_t FpsLowThreshold;
+
+ uint16_t BAPMTI_R[SMU74_DTE_ITERATIONS][SMU74_DTE_SOURCES][SMU74_DTE_SINKS];
+ uint16_t BAPMTI_RC[SMU74_DTE_ITERATIONS][SMU74_DTE_SOURCES][SMU74_DTE_SINKS];
+
+ uint16_t TemperatureLimitEdge;
+ uint16_t TemperatureLimitHotspot;
+
+ uint16_t BootVddc;
+ uint16_t BootVddci;
+
+ uint16_t FanGainEdge;
+ uint16_t FanGainHotspot;
+
+ uint32_t LowSclkInterruptThreshold;
+ uint32_t VddGfxReChkWait;
+
+ uint8_t ClockStretcherAmount;
+ uint8_t Sclk_CKS_masterEn0_7;
+ uint8_t Sclk_CKS_masterEn8_15;
+ uint8_t DPMFreezeAndForced;
+
+ uint8_t Sclk_voltageOffset[8];
+
+ SMU_ClockStretcherDataTable ClockStretcherDataTable;
+ SMU_CKS_LOOKUPTable CKS_LOOKUPTable;
+
+ uint32_t CurrSclkPllRange;
+ sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE];
+ GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
+ SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
+};
+
+typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
+
+
+struct SMU74_Discrete_FanTable {
+ uint16_t FdoMode;
+ int16_t TempMin;
+ int16_t TempMed;
+ int16_t TempMax;
+ int16_t Slope1;
+ int16_t Slope2;
+ int16_t FdoMin;
+ int16_t HystUp;
+ int16_t HystDown;
+ int16_t HystSlope;
+ int16_t TempRespLim;
+ int16_t TempCurr;
+ int16_t SlopeCurr;
+ int16_t PwmCurr;
+ uint32_t RefreshPeriod;
+ int16_t FdoMax;
+ uint8_t TempSrc;
+ int8_t Padding;
+};
+
+typedef struct SMU74_Discrete_FanTable SMU74_Discrete_FanTable;
+
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG 4
+#define SMU7_DISCRETE_GPIO_SCLK_DEBUG_BIT (0x1 << SMU7_DISCRETE_GPIO_SCLK_DEBUG)
+
+
+struct SMU7_MclkDpmScoreboard {
+ uint32_t PercentageBusy;
+
+ int32_t PIDError;
+ int32_t PIDIntegral;
+ int32_t PIDOutput;
+
+ uint32_t SigmaDeltaAccum;
+ uint32_t SigmaDeltaOutput;
+ uint32_t SigmaDeltaLevel;
+
+ uint32_t UtilizationSetpoint;
+
+ uint8_t TdpClampMode;
+ uint8_t TdcClampMode;
+ uint8_t ThermClampMode;
+ uint8_t VoltageBusy;
+
+ int8_t CurrLevel;
+ int8_t TargLevel;
+ uint8_t LevelChangeInProgress;
+ uint8_t UpHyst;
+
+ uint8_t DownHyst;
+ uint8_t VoltageDownHyst;
+ uint8_t DpmEnable;
+ uint8_t DpmRunning;
+
+ uint8_t DpmForce;
+ uint8_t DpmForceLevel;
+ uint8_t padding2;
+ uint8_t McArbIndex;
+
+ uint32_t MinimumPerfMclk;
+
+ uint8_t AcpiReq;
+ uint8_t AcpiAck;
+ uint8_t MclkSwitchInProgress;
+ uint8_t MclkSwitchCritical;
+
+ uint8_t IgnoreVBlank;
+ uint8_t TargetMclkIndex;
+ uint16_t VbiFailureCount;
+ uint8_t VbiWaitCounter;
+ uint8_t EnabledLevelsChange;
+
+ uint16_t LevelResidencyCounters[SMU74_MAX_LEVELS_MEMORY];
+ uint16_t LevelSwitchCounters[SMU74_MAX_LEVELS_MEMORY];
+
+ void (*TargetStateCalculator)(uint8_t);
+ void (*SavedTargetStateCalculator)(uint8_t);
+
+ uint16_t AutoDpmInterval;
+ uint16_t AutoDpmRange;
+
+ uint16_t VbiTimeoutCount;
+ uint16_t MclkSwitchingTime;
+
+ uint8_t fastSwitch;
+ uint8_t Save_PIC_VDDGFX_EXIT;
+ uint8_t Save_PIC_VDDGFX_ENTER;
+ uint8_t padding;
+};
+
+typedef struct SMU7_MclkDpmScoreboard SMU7_MclkDpmScoreboard;
+
+struct SMU7_UlvScoreboard {
+ uint8_t EnterUlv;
+ uint8_t ExitUlv;
+ uint8_t UlvActive;
+ uint8_t WaitingForUlv;
+ uint8_t UlvEnable;
+ uint8_t UlvRunning;
+ uint8_t UlvMasterEnable;
+ uint8_t padding;
+ uint32_t UlvAbortedCount;
+ uint32_t UlvTimeStamp;
+};
+
+typedef struct SMU7_UlvScoreboard SMU7_UlvScoreboard;
+
+struct VddgfxSavedRegisters {
+ uint32_t GPU_DBG[3];
+ uint32_t MEC_BaseAddress_Hi;
+ uint32_t MEC_BaseAddress_Lo;
+ uint32_t THM_TMON0_CTRL2__RDIR_PRESENT;
+ uint32_t THM_TMON1_CTRL2__RDIR_PRESENT;
+ uint32_t CP_INT_CNTL;
+};
+
+typedef struct VddgfxSavedRegisters VddgfxSavedRegisters;
+
+struct SMU7_VddGfxScoreboard {
+ uint8_t VddGfxEnable;
+ uint8_t VddGfxActive;
+ uint8_t VPUResetOccured;
+ uint8_t padding;
+
+ uint32_t VddGfxEnteredCount;
+ uint32_t VddGfxAbortedCount;
+
+ uint32_t VddGfxVid;
+
+ VddgfxSavedRegisters SavedRegisters;
+};
+
+typedef struct SMU7_VddGfxScoreboard SMU7_VddGfxScoreboard;
+
+struct SMU7_TdcLimitScoreboard {
+ uint8_t Enable;
+ uint8_t Running;
+ uint16_t Alpha;
+ uint32_t FilteredIddc;
+ uint32_t IddcLimit;
+ uint32_t IddcHyst;
+ SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_TdcLimitScoreboard SMU7_TdcLimitScoreboard;
+
+struct SMU7_PkgPwrLimitScoreboard {
+ uint8_t Enable;
+ uint8_t Running;
+ uint16_t Alpha;
+ uint32_t FilteredPkgPwr;
+ uint32_t Limit;
+ uint32_t Hyst;
+ uint32_t LimitFromDriver;
+ SMU7_HystController_Data HystControllerData;
+};
+
+typedef struct SMU7_PkgPwrLimitScoreboard SMU7_PkgPwrLimitScoreboard;
+
+struct SMU7_BapmScoreboard {
+ uint32_t source_powers[SMU74_DTE_SOURCES];
+ uint32_t source_powers_last[SMU74_DTE_SOURCES];
+ int32_t entity_temperatures[SMU74_NUM_GPU_TES];
+ int32_t initial_entity_temperatures[SMU74_NUM_GPU_TES];
+ int32_t Limit;
+ int32_t Hyst;
+ int32_t therm_influence_coeff_table[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS * 2];
+ int32_t therm_node_table[SMU74_DTE_ITERATIONS * SMU74_DTE_SOURCES * SMU74_DTE_SINKS];
+ uint16_t ConfigTDPPowerScalar;
+ uint16_t FanSpeedPowerScalar;
+ uint16_t OverDrivePowerScalar;
+ uint16_t OverDriveLimitScalar;
+ uint16_t FinalPowerScalar;
+ uint8_t VariantID;
+ uint8_t spare997;
+
+ SMU7_HystController_Data HystControllerData;
+
+ int32_t temperature_gradient_slope;
+ int32_t temperature_gradient;
+ uint32_t measured_temperature;
+};
+
+
+typedef struct SMU7_BapmScoreboard SMU7_BapmScoreboard;
+
+struct SMU7_AcpiScoreboard {
+ uint32_t SavedInterruptMask[2];
+ uint8_t LastACPIRequest;
+ uint8_t CgBifResp;
+ uint8_t RequestType;
+ uint8_t Padding;
+ SMU74_Discrete_ACPILevel D0Level;
+};
+
+typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
+
+struct SMU74_Discrete_PmFuses {
+ uint8_t BapmVddCVidHiSidd[8];
+ uint8_t BapmVddCVidLoSidd[8];
+ uint8_t VddCVid[8];
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t SviLoadLineTrimVddC;
+ uint8_t SviLoadLineOffsetVddC;
+ uint16_t TDC_VDDC_PkgLimit;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+ uint8_t TdcWaterfallCtl;
+ uint8_t LPMLTemperatureMin;
+ uint8_t LPMLTemperatureMax;
+ uint8_t Reserved;
+
+ uint8_t LPMLTemperatureScaler[16];
+
+ int16_t FuzzyFan_ErrorSetDelta;
+ int16_t FuzzyFan_ErrorRateSetDelta;
+ int16_t FuzzyFan_PwmSetDelta;
+ uint16_t Reserved6;
+
+ uint8_t GnbLPML[16];
+
+ uint8_t GnbLPMLMaxVid;
+ uint8_t GnbLPMLMinVid;
+ uint8_t Reserved1[2];
+
+ uint16_t BapmVddCBaseLeakageHiSidd;
+ uint16_t BapmVddCBaseLeakageLoSidd;
+
+ uint16_t VFT_Temp[3];
+ uint16_t padding;
+
+ SMU_QuadraticCoeffs VFT_ATE[3];
+
+ SMU_QuadraticCoeffs AVFS_GB;
+ SMU_QuadraticCoeffs ATE_ACBTC_GB;
+
+ SMU_QuadraticCoeffs P2V;
+
+ uint32_t PsmCharzFreq;
+
+ uint16_t InversionVoltage;
+ uint16_t PsmCharzTemp;
+
+ uint32_t EnabledAvfsModules;
+};
+
+typedef struct SMU74_Discrete_PmFuses SMU74_Discrete_PmFuses;
+
+struct SMU7_Discrete_Log_Header_Table {
+ uint32_t version;
+ uint32_t asic_id;
+ uint16_t flags;
+ uint16_t entry_size;
+ uint32_t total_size;
+ uint32_t num_of_entries;
+ uint8_t type;
+ uint8_t mode;
+ uint8_t filler_0[2];
+ uint32_t filler_1[2];
+};
+
+typedef struct SMU7_Discrete_Log_Header_Table SMU7_Discrete_Log_Header_Table;
+
+struct SMU7_Discrete_Log_Cntl {
+ uint8_t Enabled;
+ uint8_t Type;
+ uint8_t padding[2];
+ uint32_t BufferSize;
+ uint32_t SamplesLogged;
+ uint32_t SampleSize;
+ uint32_t AddrL;
+ uint32_t AddrH;
+};
+
+typedef struct SMU7_Discrete_Log_Cntl SMU7_Discrete_Log_Cntl;
+
+#if defined SMU__DGPU_ONLY
+#define CAC_ACC_NW_NUM_OF_SIGNALS 87
+#endif
+
+
+struct SMU7_Discrete_Cac_Collection_Table {
+ uint32_t temperature;
+ uint32_t cac_acc_nw[CAC_ACC_NW_NUM_OF_SIGNALS];
+};
+
+typedef struct SMU7_Discrete_Cac_Collection_Table SMU7_Discrete_Cac_Collection_Table;
+
+struct SMU7_Discrete_Cac_Verification_Table {
+ uint32_t VddcTotalPower;
+ uint32_t VddcLeakagePower;
+ uint32_t VddcConstantPower;
+ uint32_t VddcGfxDynamicPower;
+ uint32_t VddcUvdDynamicPower;
+ uint32_t VddcVceDynamicPower;
+ uint32_t VddcAcpDynamicPower;
+ uint32_t VddcPcieDynamicPower;
+ uint32_t VddcDceDynamicPower;
+ uint32_t VddcCurrent;
+ uint32_t VddcVoltage;
+ uint32_t VddciTotalPower;
+ uint32_t VddciLeakagePower;
+ uint32_t VddciConstantPower;
+ uint32_t VddciDynamicPower;
+ uint32_t Vddr1TotalPower;
+ uint32_t Vddr1LeakagePower;
+ uint32_t Vddr1ConstantPower;
+ uint32_t Vddr1DynamicPower;
+ uint32_t spare[4];
+ uint32_t temperature;
+};
+
+typedef struct SMU7_Discrete_Cac_Verification_Table SMU7_Discrete_Cac_Verification_Table;
+
+struct SMU7_Discrete_Pm_Status_Table {
+ int32_t T_meas_max;
+ int32_t T_meas_acc;
+ int32_t T_calc_max;
+ int32_t T_calc_acc;
+ uint32_t P_scalar_acc;
+ uint32_t P_calc_max;
+ uint32_t P_calc_acc;
+
+ uint32_t I_calc_max;
+ uint32_t I_calc_acc;
+ uint32_t I_calc_acc_vddci;
+ uint32_t V_calc_noload_acc;
+ uint32_t V_calc_load_acc;
+ uint32_t V_calc_noload_acc_vddci;
+ uint32_t P_meas_acc;
+ uint32_t V_meas_noload_acc;
+ uint32_t V_meas_load_acc;
+ uint32_t I_meas_acc;
+ uint32_t P_meas_acc_vddci;
+ uint32_t V_meas_noload_acc_vddci;
+ uint32_t V_meas_load_acc_vddci;
+ uint32_t I_meas_acc_vddci;
+
+ uint16_t Sclk_dpm_residency[8];
+ uint16_t Uvd_dpm_residency[8];
+ uint16_t Vce_dpm_residency[8];
+ uint16_t Mclk_dpm_residency[4];
+
+ uint32_t P_vddci_acc;
+ uint32_t P_vddr1_acc;
+ uint32_t P_nte1_acc;
+ uint32_t PkgPwr_max;
+ uint32_t PkgPwr_acc;
+ uint32_t MclkSwitchingTime_max;
+ uint32_t MclkSwitchingTime_acc;
+ uint32_t FanPwm_acc;
+ uint32_t FanRpm_acc;
+
+ uint32_t AccCnt;
+};
+
+typedef struct SMU7_Discrete_Pm_Status_Table SMU7_Discrete_Pm_Status_Table;
+
+#define SMU7_MAX_GFX_CU_COUNT 16
+
+struct SMU7_GfxCuPgScoreboard {
+ uint8_t Enabled;
+ uint8_t WaterfallUp;
+ uint8_t WaterfallDown;
+ uint8_t WaterfallLimit;
+ uint8_t CurrMaxCu;
+ uint8_t TargMaxCu;
+ uint8_t ClampMode;
+ uint8_t Active;
+ uint8_t MaxSupportedCu;
+ uint8_t MinSupportedCu;
+ uint8_t PendingGfxCuHostInterrupt;
+ uint8_t LastFilteredMaxCuInteger;
+ uint16_t FilteredMaxCu;
+ uint16_t FilteredMaxCuAlpha;
+ uint16_t FilterResetCount;
+ uint16_t FilterResetCountLimit;
+ uint8_t ForceCu;
+ uint8_t ForceCuCount;
+ uint8_t spare[2];
+};
+
+typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
+
+#define SMU7_SCLK_CAC 0x561
+#define SMU7_MCLK_CAC 0xF9
+#define SMU7_VCLK_CAC 0x2DE
+#define SMU7_DCLK_CAC 0x2DE
+#define SMU7_ECLK_CAC 0x25E
+#define SMU7_ACLK_CAC 0x25E
+#define SMU7_SAMCLK_CAC 0x25E
+#define SMU7_DISPCLK_CAC 0x100
+#define SMU7_CAC_CONSTANT 0x2EE3430
+#define SMU7_CAC_CONSTANT_SHIFT 18
+
+#define SMU7_VDDCI_MCLK_CONST 1765
+#define SMU7_VDDCI_MCLK_CONST_SHIFT 16
+#define SMU7_VDDCI_VDDCI_CONST 50958
+#define SMU7_VDDCI_VDDCI_CONST_SHIFT 14
+#define SMU7_VDDCI_CONST 11781
+#define SMU7_VDDCI_STROBE_PWR 1331
+
+#define SMU7_VDDR1_CONST 693
+#define SMU7_VDDR1_CAC_WEIGHT 20
+#define SMU7_VDDR1_CAC_WEIGHT_SHIFT 19
+#define SMU7_VDDR1_STROBE_PWR 512
+
+#define SMU7_AREA_COEFF_UVD 0xA78
+#define SMU7_AREA_COEFF_VCE 0x190A
+#define SMU7_AREA_COEFF_ACP 0x22D1
+#define SMU7_AREA_COEFF_SAMU 0x534
+
+#define SMU7_THERM_OUT_MODE_DISABLE 0x0
+#define SMU7_THERM_OUT_MODE_THERM_ONLY 0x1
+#define SMU7_THERM_OUT_MODE_THERM_VRHOT 0x2
+
+// DIDT Defines
+#define SQ_Enable_MASK 0x1
+#define SQ_IR_MASK 0x2
+#define SQ_PCC_MASK 0x4
+#define SQ_EDC_MASK 0x8
+
+#define TCP_Enable_MASK 0x100
+#define TCP_IR_MASK 0x200
+#define TCP_PCC_MASK 0x400
+#define TCP_EDC_MASK 0x800
+
+#define TD_Enable_MASK 0x10000
+#define TD_IR_MASK 0x20000
+#define TD_PCC_MASK 0x40000
+#define TD_EDC_MASK 0x80000
+
+#define DB_Enable_MASK 0x1000000
+#define DB_IR_MASK 0x2000000
+#define DB_PCC_MASK 0x4000000
+#define DB_EDC_MASK 0x8000000
+
+#define SQ_Enable_SHIFT 0
+#define SQ_IR_SHIFT 1
+#define SQ_PCC_SHIFT 2
+#define SQ_EDC_SHIFT 3
+
+#define TCP_Enable_SHIFT 8
+#define TCP_IR_SHIFT 9
+#define TCP_PCC_SHIFT 10
+#define TCP_EDC_SHIFT 11
+
+#define TD_Enable_SHIFT 16
+#define TD_IR_SHIFT 17
+#define TD_PCC_SHIFT 18
+#define TD_EDC_SHIFT 19
+
+#define DB_Enable_SHIFT 24
+#define DB_IR_SHIFT 25
+#define DB_PCC_SHIFT 26
+#define DB_EDC_SHIFT 27
+
+#define BTCGB0_Vdroop_Enable_MASK 0x1
+#define BTCGB1_Vdroop_Enable_MASK 0x2
+#define AVFSGB0_Vdroop_Enable_MASK 0x4
+#define AVFSGB1_Vdroop_Enable_MASK 0x8
+
+#define BTCGB0_Vdroop_Enable_SHIFT 0
+#define BTCGB1_Vdroop_Enable_SHIFT 1
+#define AVFSGB0_Vdroop_Enable_SHIFT 2
+#define AVFSGB1_Vdroop_Enable_SHIFT 3
+
+
+#pragma pack(pop)
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
index f8ba071f3..eb0f79f9c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h
@@ -1,3 +1,25 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
// CZ Ucode Loading Definitions
#ifndef SMU_UCODE_XFER_CZ_H
#define SMU_UCODE_XFER_CZ_H
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
index c24a81eeb..880152c0f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h
@@ -44,6 +44,7 @@
#define UCODE_ID_IH_REG_RESTORE 11
#define UCODE_ID_VBIOS 12
#define UCODE_ID_MISC_METADATA 13
+#define UCODE_ID_SMU_SK 14
#define UCODE_ID_RLC_SCRATCH 32
#define UCODE_ID_RLC_SRM_ARAM 33
#define UCODE_ID_RLC_SRM_DRAM 34
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 6c4ef135c..f10fb64ef 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -2,7 +2,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o
+SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o polaris10_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index ec222c665..87c023e51 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -39,7 +39,7 @@
#define SIZE_ALIGN_32(x) (((x) + 31) / 32 * 32)
-static enum cz_scratch_entry firmware_list[] = {
+static const enum cz_scratch_entry firmware_list[] = {
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
@@ -639,7 +639,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
cz_smu->driver_buffer_length = 0;
- for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
+ for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
firmware_list[i]);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index cdbb9f89b..8e52a2e82 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -44,7 +44,7 @@
#define FIJI_SMC_SIZE 0x20000
-struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
+static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
/* Min Sclk pcie DeepSleep Activity CgSpll CgSpll spllSpread SpllSpread CcPwr CcPwr Sclk Display Enabled Enabled Voltage Power */
/* Voltage, Frequency, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, Spectrum, Spectrum2, DynRm, DynRm1 Did, Watermark, ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
{ 0x3c0fd047, 0x30750000, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0x21680000, 0x0c000000, 0, 0, 0x16, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 },
@@ -189,7 +189,7 @@ int fiji_copy_bytes_to_smc(struct pp_smumgr *smumgr,
int fiji_program_jump_on_start(struct pp_smumgr *smumgr)
{
- static unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
+ static const unsigned char data[] = { 0xE0, 0x00, 0x80, 0x40 };
fiji_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data) + 1);
@@ -665,7 +665,7 @@ int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
{
int i, result = -1;
uint32_t reg, data;
- PWR_Command_Table *virus = PwrVirusTable;
+ const PWR_Command_Table *virus = PwrVirusTable;
struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
priv->avfs.AvfsBtcStatus = AVFS_LOAD_VIRUS;
@@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
static int fiji_smu_fini(struct pp_smumgr *smumgr)
{
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
new file mode 100644
index 000000000..5dba7c509
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -0,0 +1,1007 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smumgr.h"
+#include "smu74.h"
+#include "smu_ucode_xfer_vi.h"
+#include "polaris10_smumgr.h"
+#include "smu74_discrete.h"
+#include "smu/smu_7_1_3_d.h"
+#include "smu/smu_7_1_3_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "oss/oss_3_0_d.h"
+#include "gca/gfx_8_0_d.h"
+#include "bif/bif_5_0_d.h"
+#include "bif/bif_5_0_sh_mask.h"
+#include "polaris10_pwrvirus.h"
+#include "ppatomctrl.h"
+#include "pp_debug.h"
+#include "cgs_common.h"
+
+#define POLARIS10_SMC_SIZE 0x20000
+#define VOLTAGE_SCALE 4
+
+/* Microcode file is stored in this buffer */
+#define BUFFER_SIZE 80000
+#define MAX_STRING_SIZE 15
+#define BUFFER_SIZETWO 131072 /* 128 *1024 */
+
+#define SMC_RAM_END 0x40000
+
+static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
+ /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */
+ /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
+ { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+ { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+ { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
+ { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+ { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
+ { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+ { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
+ { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
+};
+
+static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
+ {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
+
+/**
+* Set the address for reading/writing the SMC SRAM space.
+* @param smumgr the address of the powerplay hardware manager.
+* @param smcAddress the address in the SMC RAM to access.
+*/
+static int polaris10_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+{
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
+
+ cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+
+ return 0;
+}
+
+/**
+* Copy bytes from SMC RAM space into driver memory.
+*
+* @param smumgr the address of the powerplay SMU manager.
+* @param smc_start_address the start address in the SMC RAM to copy bytes from
+* @param src the byte array to copy the bytes to.
+* @param byte_count the number of bytes to copy.
+*/
+int polaris10_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+{
+ uint32_t data;
+ uint32_t addr;
+ uint8_t *dest_byte;
+ uint8_t i, data_byte[4] = {0};
+ uint32_t *pdata = (uint32_t *)&data_byte;
+
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1;);
+ PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
+
+ *dest = PP_SMC_TO_HOST_UL(data);
+
+ dest += 1;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (byte_count) {
+ polaris10_read_smc_sram_dword(smumgr, addr, &data, limit);
+ *pdata = PP_SMC_TO_HOST_UL(data);
+ /* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
+ dest_byte = (uint8_t *)dest;
+ for (i = 0; i < byte_count; i++)
+ dest_byte[i] = data_byte[i];
+ }
+
+ return 0;
+}
+
+/**
+* Copy bytes from an array into the SMC RAM space.
+*
+* @param pSmuMgr the address of the powerplay SMU manager.
+* @param smc_start_address the start address in the SMC RAM to copy bytes to.
+* @param src the byte array to copy the bytes from.
+* @param byte_count the number of bytes to copy.
+*/
+int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+ int result;
+ uint32_t data = 0;
+ uint32_t original_data;
+ uint32_t addr = 0;
+ uint32_t extra_shift;
+
+ PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -1);
+ PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -1);
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+ result = polaris10_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (0 != byte_count) {
+
+ data = 0;
+
+ result = polaris10_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+
+ original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = (0x100 * data) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ result = polaris10_set_smc_sram_address(smumgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ }
+
+ return 0;
+}
+
+
+static int polaris10_program_jump_on_start(struct pp_smumgr *smumgr)
+{
+ static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ polaris10_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+
+ return 0;
+}
+
+/**
+* Return if the SMC is currently running.
+*
+* @param smumgr the address of the powerplay hardware manager.
+*/
+bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
+{
+ return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+ uint32_t efuse;
+
+ efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse &= 0x00000001;
+ if (efuse)
+ return true;
+
+ return false;
+}
+
+/**
+* Send a message to the SMC, and wait for its response.
+*
+* @param smumgr the address of the powerplay hardware manager.
+* @param msg the message to send.
+* @return The response that came from the SMC.
+*/
+int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+{
+ int ret;
+
+ if (!polaris10_is_smc_ram_running(smumgr))
+ return -1;
+
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ printk("\n failed to send pre message %x ret is %d \n", msg, ret);
+
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ printk("\n failed to send message %x ret is %d \n", msg, ret);
+
+ return 0;
+}
+
+
+/**
+* Send a message to the SMC, and do not wait for its response.
+*
+* @param smumgr the address of the powerplay hardware manager.
+* @param msg the message to send.
+* @return Always return 0.
+*/
+int polaris10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+{
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+
+ return 0;
+}
+
+/**
+* Send a message to the SMC with parameter
+*
+* @param smumgr: the address of the powerplay hardware manager.
+* @param msg: the message to send.
+* @param parameter: the parameter to send
+* @return The response that came from the SMC.
+*/
+int polaris10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+ if (!polaris10_is_smc_ram_running(smumgr)) {
+ return -1;
+ }
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+ return polaris10_send_msg_to_smc(smumgr, msg);
+}
+
+
+/**
+* Send a message to the SMC with parameter, do not wait for response
+*
+* @param smumgr: the address of the powerplay hardware manager.
+* @param msg: the message to send.
+* @param parameter: the parameter to send
+* @return The response that came from the SMC.
+*/
+int polaris10_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+{
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+
+ return polaris10_send_msg_to_smc_without_waiting(smumgr, msg);
+}
+
+int polaris10_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+{
+ cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+
+ cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+
+ SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+
+ if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+ printk("Failed to send Message.\n");
+
+ return 0;
+}
+
+/**
+* Wait until the SMC is doing nithing. Doing nothing means that the SMC is either turned off or it is sitting on the STOP instruction.
+*
+* @param smumgr the address of the powerplay hardware manager.
+* @param msg the message to send.
+* @return The response that came from the SMC.
+*/
+int polaris10_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+{
+ /* If the SMC is not even on it qualifies as inactive. */
+ if (!polaris10_is_smc_ram_running(smumgr))
+ return -1;
+
+ SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+ return 0;
+}
+
+
+/**
+* Upload the SMC firmware to the SMC microcontroller.
+*
+* @param smumgr the address of the powerplay hardware manager.
+* @param pFirmware the data structure containing the various sections of the firmware.
+*/
+static int polaris10_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+{
+ uint32_t byte_count = length;
+
+ PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -1);
+
+ cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+
+ for (; byte_count >= 4; byte_count -= 4)
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+
+ SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+
+ PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -1);
+
+ return 0;
+}
+
+static enum cgs_ucode_id polaris10_convert_fw_type_to_cgs(uint32_t fw_type)
+{
+ enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
+
+ switch (fw_type) {
+ case UCODE_ID_SMU:
+ result = CGS_UCODE_ID_SMU;
+ break;
+ case UCODE_ID_SMU_SK:
+ result = CGS_UCODE_ID_SMU_SK;
+ break;
+ case UCODE_ID_SDMA0:
+ result = CGS_UCODE_ID_SDMA0;
+ break;
+ case UCODE_ID_SDMA1:
+ result = CGS_UCODE_ID_SDMA1;
+ break;
+ case UCODE_ID_CP_CE:
+ result = CGS_UCODE_ID_CP_CE;
+ break;
+ case UCODE_ID_CP_PFP:
+ result = CGS_UCODE_ID_CP_PFP;
+ break;
+ case UCODE_ID_CP_ME:
+ result = CGS_UCODE_ID_CP_ME;
+ break;
+ case UCODE_ID_CP_MEC:
+ result = CGS_UCODE_ID_CP_MEC;
+ break;
+ case UCODE_ID_CP_MEC_JT1:
+ result = CGS_UCODE_ID_CP_MEC_JT1;
+ break;
+ case UCODE_ID_CP_MEC_JT2:
+ result = CGS_UCODE_ID_CP_MEC_JT2;
+ break;
+ case UCODE_ID_RLC_G:
+ result = CGS_UCODE_ID_RLC_G;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static int polaris10_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+{
+ int result = 0;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ struct cgs_firmware_info info = {0};
+
+ if (smu_data->security_hard_key == 1)
+ cgs_get_firmware_info(smumgr->device,
+ polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
+ else
+ cgs_get_firmware_info(smumgr->device,
+ polaris10_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
+
+ /* TO DO cgs_init_samu_load_smu(smumgr->device, (uint32_t *)info.kptr, info.image_size, smu_data->post_initial_boot);*/
+ result = polaris10_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, POLARIS10_SMC_SIZE);
+
+ return result;
+}
+
+/**
+* Read a 32bit value from the SMC SRAM space.
+* ALL PARAMETERS ARE IN HOST BYTE ORDER.
+* @param smumgr the address of the powerplay hardware manager.
+* @param smcAddress the address in the SMC RAM to access.
+* @param value and output parameter for the data read from the SMC SRAM.
+*/
+int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+{
+ int result;
+
+ result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ return 0;
+}
+
+/**
+* Write a 32bit value to the SMC SRAM space.
+* ALL PARAMETERS ARE IN HOST BYTE ORDER.
+* @param smumgr the address of the powerplay hardware manager.
+* @param smc_addr the address in the SMC RAM to access.
+* @param value to write to the SMC SRAM.
+*/
+int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+{
+ int result;
+
+ result = polaris10_set_smc_sram_address(smumgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+
+ return 0;
+}
+
+
+int polaris10_smu_fini(struct pp_smumgr *smumgr)
+{
+ if (smumgr->backend) {
+ kfree(smumgr->backend);
+ smumgr->backend = NULL;
+ }
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+ return 0;
+}
+
+/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */
+static uint32_t polaris10_get_mask_for_firmware_type(uint32_t fw_type)
+{
+ uint32_t result = 0;
+
+ switch (fw_type) {
+ case UCODE_ID_SDMA0:
+ result = UCODE_ID_SDMA0_MASK;
+ break;
+ case UCODE_ID_SDMA1:
+ result = UCODE_ID_SDMA1_MASK;
+ break;
+ case UCODE_ID_CP_CE:
+ result = UCODE_ID_CP_CE_MASK;
+ break;
+ case UCODE_ID_CP_PFP:
+ result = UCODE_ID_CP_PFP_MASK;
+ break;
+ case UCODE_ID_CP_ME:
+ result = UCODE_ID_CP_ME_MASK;
+ break;
+ case UCODE_ID_CP_MEC_JT1:
+ case UCODE_ID_CP_MEC_JT2:
+ result = UCODE_ID_CP_MEC_MASK;
+ break;
+ case UCODE_ID_RLC_G:
+ result = UCODE_ID_RLC_G_MASK;
+ break;
+ default:
+ printk("UCode type is out of range! \n");
+ result = 0;
+ }
+
+ return result;
+}
+
+/* Populate one firmware image to the data structure */
+
+static int polaris10_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+ uint32_t fw_type,
+ struct SMU_Entry *entry)
+{
+ int result = 0;
+ struct cgs_firmware_info info = {0};
+
+ result = cgs_get_firmware_info(smumgr->device,
+ polaris10_convert_fw_type_to_cgs(fw_type),
+ &info);
+
+ if (!result) {
+ entry->version = info.version;
+ entry->id = (uint16_t)fw_type;
+ entry->image_addr_high = smu_upper_32_bits(info.mc_addr);
+ entry->image_addr_low = smu_lower_32_bits(info.mc_addr);
+ entry->meta_data_addr_high = 0;
+ entry->meta_data_addr_low = 0;
+ entry->data_size_byte = info.image_size;
+ entry->num_register_entries = 0;
+ }
+
+ if (fw_type == UCODE_ID_RLC_G)
+ entry->flags = 1;
+ else
+ entry->flags = 0;
+
+ return 0;
+}
+
+static int polaris10_request_smu_load_fw(struct pp_smumgr *smumgr)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ uint32_t fw_to_load;
+
+ int result = 0;
+ struct SMU_DRAMData_TOC *toc;
+
+ if (!smumgr->reload_fw) {
+ printk(KERN_INFO "[ powerplay ] skip reloading...\n");
+ return 0;
+ }
+
+ if (smu_data->soft_regs_start)
+ cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
+ 0x0);
+
+ polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, smu_data->smu_buffer.mc_addr_high);
+ polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_SMU_DRAM_ADDR_LO, smu_data->smu_buffer.mc_addr_low);
+
+ toc = (struct SMU_DRAMData_TOC *)smu_data->header;
+ toc->num_entries = 0;
+ toc->structure_version = 1;
+
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+ PP_ASSERT_WITH_CODE(0 == polaris10_populate_single_firmware_entry(smumgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -1);
+
+ polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+ polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+
+ fw_to_load = UCODE_ID_RLC_G_MASK
+ + UCODE_ID_SDMA0_MASK
+ + UCODE_ID_SDMA1_MASK
+ + UCODE_ID_CP_CE_MASK
+ + UCODE_ID_CP_ME_MASK
+ + UCODE_ID_CP_PFP_MASK
+ + UCODE_ID_CP_MEC_MASK;
+
+ if (polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+ printk(KERN_ERR "Fail to Request SMU Load uCode");
+
+ return result;
+}
+
+/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
+static int polaris10_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ uint32_t fw_mask = polaris10_get_mask_for_firmware_type(fw_type);
+ uint32_t ret;
+ /* Check SOFT_REGISTERS_TABLE_28.UcodeLoadStatus */
+ ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
+ smu_data->soft_regs_start + offsetof(SMU74_SoftRegisters, UcodeLoadStatus),
+ fw_mask, fw_mask);
+
+ return ret;
+}
+
+static int polaris10_reload_firmware(struct pp_smumgr *smumgr)
+{
+ return smumgr->smumgr_funcs->start_smu(smumgr);
+}
+
+static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
+{
+ int i;
+ int result = -1;
+ uint32_t reg, data;
+
+ const PWR_Command_Table *pvirus = pwr_virus_table;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+
+ for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
+ switch (pvirus->command) {
+ case PwrCmdWrite:
+ reg = pvirus->reg;
+ data = pvirus->data;
+ cgs_write_register(smumgr->device, reg, data);
+ break;
+
+ case PwrCmdEnd:
+ result = 0;
+ break;
+
+ default:
+ printk("Table Exit with Invalid Command!");
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
+ result = -1;
+ break;
+ }
+ pvirus++;
+ }
+
+ return result;
+}
+
+static int polaris10_perform_btc(struct pp_smumgr *smumgr)
+{
+ int result = 0;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ if (0 != smu_data->avfs.avfs_btc_param) {
+ if (0 != polaris10_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ printk("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
+ result = -1;
+ }
+ }
+ if (smu_data->avfs.avfs_btc_param > 1) {
+ /* Soft-Reset to reset the engine before loading uCode */
+ /* halt */
+ cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ /* reset everything */
+ cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ }
+ return result;
+}
+
+
+int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+{
+ uint32_t vr_config;
+ uint32_t dpm_table_start;
+
+ uint16_t u16_boot_mvdd;
+ uint32_t graphics_level_address, vr_config_address, graphics_level_size;
+
+ graphics_level_size = sizeof(avfs_graphics_level_polaris10);
+ u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
+
+ PP_ASSERT_WITH_CODE(0 == polaris10_read_smc_sram_dword(smumgr,
+ SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
+ &dpm_table_start, 0x40000),
+ "[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
+ return -1);
+
+ /* Default value for VRConfig = VR_MERGED_WITH_VDDC + VR_STATIC_VOLTAGE(VDDCI) */
+ vr_config = 0x01000500; /* Real value:0x50001 */
+
+ vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
+
+ PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, vr_config_address,
+ (uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
+ "[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
+ return -1);
+
+ graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
+
+ PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ (uint8_t *)(&avfs_graphics_level_polaris10),
+ graphics_level_size, 0x40000),
+ "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
+ return -1);
+
+ graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
+
+ PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ (uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
+ "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
+ return -1);
+
+ /* MVDD Boot value - neccessary for getting rid of the hang that occurs during Mclk DPM enablement */
+
+ graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
+
+ PP_ASSERT_WITH_CODE(0 == polaris10_copy_bytes_to_smc(smumgr, graphics_level_address,
+ (uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
+ "[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
+ return -1);
+
+ return 0;
+}
+
+int polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+{
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+ switch (smu_data->avfs.avfs_btc_status) {
+ case AVFS_BTC_COMPLETED_PREVIOUSLY:
+ break;
+
+ case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
+
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
+ return -1);
+
+ if (smu_data->avfs.avfs_btc_param > 1) {
+ printk("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
+ PP_ASSERT_WITH_CODE(-1 == polaris10_setup_pwr_virus(smumgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
+ return -1);
+ }
+
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
+ PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
+ "[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
+ return -1);
+
+ break;
+
+ case AVFS_BTC_DISABLED:
+ case AVFS_BTC_NOTSUPPORTED:
+ break;
+
+ default:
+ printk("[AVFS] Something is broken. See log!");
+ break;
+ }
+
+ return 0;
+}
+
+static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+{
+ int result = 0;
+
+ /* Wait for smc boot up */
+ /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
+
+ /* Assert reset */
+ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+
+ result = polaris10_upload_smu_firmware_image(smumgr);
+ if (result != 0)
+ return result;
+
+ /* Clear status */
+ cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
+
+ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+ /* De-assert reset */
+ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+
+ SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
+
+
+ /* Call Test SMU message with 0x20000 offset to trigger SMU start */
+ polaris10_send_msg_to_smc_offset(smumgr);
+
+ /* Wait done bit to be set */
+ /* Check pass/failed indicator */
+
+ SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
+
+ if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMU_STATUS, SMU_PASS))
+ PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
+
+ cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
+
+ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+
+ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+ /* Wait for firmware to initialize */
+ SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+
+ return result;
+}
+
+static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+{
+ int result = 0;
+
+ /* wait for smc boot up */
+ SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
+
+ /* Clear firmware interrupt enable flag */
+ /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
+ cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ ixFIRMWARE_FLAGS, 0);
+
+ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL,
+ rst_reg, 1);
+
+ result = polaris10_upload_smu_firmware_image(smumgr);
+ if (result != 0)
+ return result;
+
+ /* Set smc instruct start point at 0x0 */
+ polaris10_program_jump_on_start(smumgr);
+
+ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+ SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+ /* Wait for firmware to initialize */
+
+ SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+
+ return result;
+}
+
+static int polaris10_start_smu(struct pp_smumgr *smumgr)
+{
+ int result = 0;
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ bool SMU_VFT_INTACT;
+
+ /* Only start SMC if SMC RAM is not running */
+ if (!polaris10_is_smc_ram_running(smumgr)) {
+ SMU_VFT_INTACT = false;
+ smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+ smu_data->security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+
+ /* Check if SMU is running in protected mode */
+ if (smu_data->protected_mode == 0) {
+ result = polaris10_start_smu_in_non_protection_mode(smumgr);
+ } else {
+ result = polaris10_start_smu_in_protection_mode(smumgr);
+
+ /* If failed, try with different security Key. */
+ if (result != 0) {
+ smu_data->security_hard_key ^= 1;
+ result = polaris10_start_smu_in_protection_mode(smumgr);
+ }
+ }
+
+ if (result != 0)
+ PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
+
+ polaris10_avfs_event_mgr(smumgr, true);
+ } else
+ SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
+
+ smu_data->post_initial_boot = true;
+ polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
+ /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
+ polaris10_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+ &(smu_data->soft_regs_start), 0x40000);
+
+ result = polaris10_request_smu_load_fw(smumgr);
+
+ return result;
+}
+
+static int polaris10_smu_init(struct pp_smumgr *smumgr)
+{
+ struct polaris10_smumgr *smu_data;
+ uint8_t *internal_buf;
+ uint64_t mc_addr = 0;
+ /* Allocate memory for backend private data */
+ smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ smu_data->header_buffer.data_size =
+ ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
+ smu_data->smu_buffer.data_size = 200*4096;
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+/* Allocate FW image data structure and header buffer and
+ * send the header buffer address to SMU */
+ smu_allocate_memory(smumgr->device,
+ smu_data->header_buffer.data_size,
+ CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+ PAGE_SIZE,
+ &mc_addr,
+ &smu_data->header_buffer.kaddr,
+ &smu_data->header_buffer.handle);
+
+ smu_data->header = smu_data->header_buffer.kaddr;
+ smu_data->header_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+ smu_data->header_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+ PP_ASSERT_WITH_CODE((NULL != smu_data->header),
+ "Out of memory.",
+ kfree(smumgr->backend);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)smu_data->header_buffer.handle);
+ return -1);
+
+/* Allocate buffer for SMU internal buffer and send the address to SMU.
+ * Iceland SMU does not need internal buffer.*/
+ smu_allocate_memory(smumgr->device,
+ smu_data->smu_buffer.data_size,
+ CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
+ PAGE_SIZE,
+ &mc_addr,
+ &smu_data->smu_buffer.kaddr,
+ &smu_data->smu_buffer.handle);
+
+ internal_buf = smu_data->smu_buffer.kaddr;
+ smu_data->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
+ smu_data->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
+
+ PP_ASSERT_WITH_CODE((NULL != internal_buf),
+ "Out of memory.",
+ kfree(smumgr->backend);
+ cgs_free_gpu_mem(smumgr->device,
+ (cgs_handle_t)smu_data->smu_buffer.handle);
+ return -1;);
+
+ if (polaris10_is_hw_avfs_present(smumgr))
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+ else
+ smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
+ return 0;
+}
+
+static const struct pp_smumgr_func ellsemere_smu_funcs = {
+ .smu_init = polaris10_smu_init,
+ .smu_fini = polaris10_smu_fini,
+ .start_smu = polaris10_start_smu,
+ .check_fw_load_finish = polaris10_check_fw_load_finish,
+ .request_smu_load_fw = polaris10_reload_firmware,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = polaris10_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = polaris10_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+};
+
+int polaris10_smum_init(struct pp_smumgr *smumgr)
+{
+ struct polaris10_smumgr *polaris10_smu = NULL;
+
+ polaris10_smu = kzalloc(sizeof(struct polaris10_smumgr), GFP_KERNEL);
+
+ if (polaris10_smu == NULL)
+ return -1;
+
+ smumgr->backend = polaris10_smu;
+ smumgr->smumgr_funcs = &ellsemere_smu_funcs;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
new file mode 100644
index 000000000..e5377aec0
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _POLARIS10_SMUMANAGER_H
+#define _POLARIS10_SMUMANAGER_H
+
+#include <polaris10_ppsmc.h>
+#include <pp_endian.h>
+
+struct polaris10_avfs {
+ enum AVFS_BTC_STATUS avfs_btc_status;
+ uint32_t avfs_btc_param;
+};
+
+struct polaris10_buffer_entry {
+ uint32_t data_size;
+ uint32_t mc_addr_low;
+ uint32_t mc_addr_high;
+ void *kaddr;
+ unsigned long handle;
+};
+
+struct polaris10_smumgr {
+ uint8_t *header;
+ uint8_t *mec_image;
+ struct polaris10_buffer_entry smu_buffer;
+ struct polaris10_buffer_entry header_buffer;
+ uint32_t soft_regs_start;
+ uint8_t *read_rrm_straps;
+ uint32_t read_drm_straps_mc_address_high;
+ uint32_t read_drm_straps_mc_address_low;
+ uint32_t acpi_optimization;
+ bool post_initial_boot;
+ uint8_t protected_mode;
+ uint8_t security_hard_key;
+ struct polaris10_avfs avfs;
+};
+
+
+int polaris10_smum_init(struct pp_smumgr *smumgr);
+
+int polaris10_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit);
+int polaris10_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit);
+int polaris10_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 063ae71c9..0728c1e3d 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -30,6 +30,7 @@
#include "cz_smumgr.h"
#include "tonga_smumgr.h"
#include "fiji_smumgr.h"
+#include "polaris10_smumgr.h"
int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
{
@@ -62,6 +63,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
case CHIP_FIJI:
fiji_smum_init(smumgr);
break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ polaris10_smum_init(smumgr);
+ break;
default:
return -EINVAL;
}
@@ -76,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
int smum_fini(struct pp_smumgr *smumgr)
{
+ kfree(smumgr->device);
kfree(smumgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index ebdb43a8d..b22722eab 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -145,7 +145,7 @@ out:
int tonga_program_jump_on_start(struct pp_smumgr *smumgr)
{
- static unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
+ static const unsigned char pData[] = { 0xE0, 0x00, 0x80, 0x40 };
tonga_copy_bytes_to_smc(smumgr, 0x0, pData, 4, sizeof(pData)+1);
@@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr,
static int tonga_smu_fini(struct pp_smumgr *smumgr)
{
+ struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend);
+
+ smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle);
+ smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle);
+
if (smumgr->backend != NULL) {
kfree(smumgr->backend);
smumgr->backend = NULL;
}
+
+ cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index a5ff9458d..c16248cee 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -319,6 +319,48 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
return added;
}
+static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
+ struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
+ schedule_work(&job->work_free_job);
+}
+
+/* job_finish is called after hw fence signaled, and
+ * the job had already been deleted from ring_mirror_list
+ */
+void amd_sched_job_finish(struct amd_sched_job *s_job)
+{
+ struct amd_sched_job *next;
+ struct amd_gpu_scheduler *sched = s_job->sched;
+
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+ if (cancel_delayed_work(&s_job->work_tdr))
+ amd_sched_job_put(s_job);
+
+ /* queue TDR for next job */
+ next = list_first_entry_or_null(&sched->ring_mirror_list,
+ struct amd_sched_job, node);
+
+ if (next) {
+ INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
+ amd_sched_job_get(next);
+ schedule_delayed_work(&next->work_tdr, sched->timeout);
+ }
+ }
+}
+
+void amd_sched_job_begin(struct amd_sched_job *s_job)
+{
+ struct amd_gpu_scheduler *sched = s_job->sched;
+
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job)
+ {
+ INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
+ amd_sched_job_get(s_job);
+ schedule_delayed_work(&s_job->work_tdr, sched->timeout);
+ }
+}
+
/**
* Submit a job to the job queue
*
@@ -330,11 +372,39 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = sched_job->s_entity;
+ sched_job->use_sched = 1;
+ fence_add_callback(&sched_job->s_fence->base,
+ &sched_job->cb_free_job, amd_sched_free_job);
trace_amd_sched_job(sched_job);
wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
}
+/* init a sched_job with basic field */
+int amd_sched_job_init(struct amd_sched_job *job,
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void (*timeout_cb)(struct work_struct *work),
+ void (*free_cb)(struct kref *refcount),
+ void *owner, struct fence **fence)
+{
+ INIT_LIST_HEAD(&job->node);
+ kref_init(&job->refcount);
+ job->sched = sched;
+ job->s_entity = entity;
+ job->s_fence = amd_sched_fence_create(entity, owner);
+ if (!job->s_fence)
+ return -ENOMEM;
+
+ job->s_fence->s_job = job;
+ job->timeout_callback = timeout_cb;
+ job->free_callback = free_cb;
+
+ if (fence)
+ *fence = &job->s_fence->base;
+ return 0;
+}
+
/**
* Return ture if we can push more jobs to the hw.
*/
@@ -383,47 +453,26 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
unsigned long flags;
atomic_dec(&sched->hw_rq_count);
+
+ /* remove job from ring_mirror_list */
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_del_init(&s_fence->s_job->node);
+ sched->ops->finish_job(s_fence->s_job);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
amd_sched_fence_signal(s_fence);
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- cancel_delayed_work(&s_fence->dwork);
- spin_lock_irqsave(&sched->fence_list_lock, flags);
- list_del_init(&s_fence->list);
- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
- }
+
trace_amd_sched_process_job(s_fence);
fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker);
}
-static void amd_sched_fence_work_func(struct work_struct *work)
-{
- struct amd_sched_fence *s_fence =
- container_of(work, struct amd_sched_fence, dwork.work);
- struct amd_gpu_scheduler *sched = s_fence->sched;
- struct amd_sched_fence *entity, *tmp;
- unsigned long flags;
-
- DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
-
- /* Clean all pending fences */
- spin_lock_irqsave(&sched->fence_list_lock, flags);
- list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
- DRM_ERROR(" fence no %d\n", entity->base.seqno);
- cancel_delayed_work(&entity->dwork);
- list_del_init(&entity->list);
- fence_put(&entity->base);
- }
- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
-}
-
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
int r, count;
- spin_lock_init(&sched->fence_list_lock);
- INIT_LIST_HEAD(&sched->fence_list);
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
@@ -431,7 +480,6 @@ static int amd_sched_main(void *param)
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
- unsigned long flags;
wait_event_interruptible(sched->wake_up_worker,
(entity = amd_sched_select_entity(sched)) ||
@@ -446,15 +494,8 @@ static int amd_sched_main(void *param)
s_fence = sched_job->s_fence;
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
- schedule_delayed_work(&s_fence->dwork, sched->timeout);
- spin_lock_irqsave(&sched->fence_list_lock, flags);
- list_add_tail(&s_fence->list, &sched->fence_list);
- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
- }
-
atomic_inc(&sched->hw_rq_count);
+ amd_sched_job_pre_schedule(sched, sched_job);
fence = sched->ops->run_job(sched_job);
amd_sched_fence_scheduled(s_fence);
if (fence) {
@@ -489,7 +530,7 @@ static int amd_sched_main(void *param)
* Return 0 on success, otherwise error code.
*/
int amd_sched_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_backend_ops *ops,
+ const struct amd_sched_backend_ops *ops,
unsigned hw_submission, long timeout, const char *name)
{
int i;
@@ -502,6 +543,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
+ INIT_LIST_HEAD(&sched->ring_mirror_list);
+ spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
sched_fence_slab = kmem_cache_create(
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 9403145d7..070095a94 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -37,7 +37,7 @@ extern atomic_t sched_fence_slab_ref;
/**
* A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
+ * of other entities. Entities take turns emitting jobs from their
* job queues to corresponding hardware ring based on scheduling
* policy.
*/
@@ -74,14 +74,21 @@ struct amd_sched_fence {
struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
- struct delayed_work dwork;
- struct list_head list;
+ struct amd_sched_job *s_job;
};
struct amd_sched_job {
+ struct kref refcount;
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
+ bool use_sched; /* true if the job goes to scheduler */
+ struct fence_cb cb_free_job;
+ struct work_struct work_free_job;
+ struct list_head node;
+ struct delayed_work work_tdr;
+ void (*timeout_callback) (struct work_struct *work);
+ void (*free_callback)(struct kref *refcount);
};
extern const struct fence_ops amd_sched_fence_ops;
@@ -102,6 +109,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
struct amd_sched_backend_ops {
struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
+ void (*begin_job)(struct amd_sched_job *sched_job);
+ void (*finish_job)(struct amd_sched_job *sched_job);
};
enum amd_sched_priority {
@@ -114,7 +123,7 @@ enum amd_sched_priority {
* One scheduler is implemented for each hardware ring
*/
struct amd_gpu_scheduler {
- struct amd_sched_backend_ops *ops;
+ const struct amd_sched_backend_ops *ops;
uint32_t hw_submission_limit;
long timeout;
const char *name;
@@ -122,13 +131,13 @@ struct amd_gpu_scheduler {
wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled;
atomic_t hw_rq_count;
- struct list_head fence_list;
- spinlock_t fence_list_lock;
struct task_struct *thread;
+ struct list_head ring_mirror_list;
+ spinlock_t job_list_lock;
};
int amd_sched_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_backend_ops *ops,
+ const struct amd_sched_backend_ops *ops,
uint32_t hw_submission, long timeout, const char *name);
void amd_sched_fini(struct amd_gpu_scheduler *sched);
@@ -144,5 +153,24 @@ struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
void amd_sched_fence_signal(struct amd_sched_fence *fence);
+int amd_sched_job_init(struct amd_sched_job *job,
+ struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ void (*timeout_cb)(struct work_struct *work),
+ void (*free_cb)(struct kref* refcount),
+ void *owner, struct fence **fence);
+void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
+ struct amd_sched_job *s_job);
+void amd_sched_job_finish(struct amd_sched_job *s_job);
+void amd_sched_job_begin(struct amd_sched_job *s_job);
+static inline void amd_sched_job_get(struct amd_sched_job *job) {
+ if (job)
+ kref_get(&job->refcount);
+}
+
+static inline void amd_sched_job_put(struct amd_sched_job *job) {
+ if (job)
+ kref_put(&job->refcount, job->free_callback);
+}
#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index dc115aea3..2a732c490 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -57,6 +57,16 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence)
FENCE_TRACE(&fence->base, "was already signaled\n");
}
+void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
+ struct amd_sched_job *s_job)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_add_tail(&s_job->node, &sched->ring_mirror_list);
+ sched->ops->begin_job(s_job);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+
void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
{
struct fence_cb *cur, *tmp;
diff --git a/drivers/gpu/drm/arc/Kconfig b/drivers/gpu/drm/arc/Kconfig
new file mode 100644
index 000000000..f9a13b658
--- /dev/null
+++ b/drivers/gpu/drm/arc/Kconfig
@@ -0,0 +1,10 @@
+config DRM_ARCPGU
+ tristate "ARC PGU"
+ depends on DRM && OF
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_HELPER
+ help
+ Choose this option if you have an ARC PGU controller.
+
+ If M is selected the module will be called arcpgu.
diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile
new file mode 100644
index 000000000..d48fda70f
--- /dev/null
+++ b/drivers/gpu/drm/arc/Makefile
@@ -0,0 +1,2 @@
+arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o
+obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
new file mode 100644
index 000000000..86574b698
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -0,0 +1,50 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCPGU_H_
+#define _ARCPGU_H_
+
+struct arcpgu_drm_private {
+ void __iomem *regs;
+ struct clk *clk;
+ struct drm_fbdev_cma *fbdev;
+ struct drm_framebuffer *fb;
+ struct list_head event_list;
+ struct drm_crtc crtc;
+ struct drm_plane *plane;
+};
+
+#define crtc_to_arcpgu_priv(x) container_of(x, struct arcpgu_drm_private, crtc)
+
+static inline void arc_pgu_write(struct arcpgu_drm_private *arcpgu,
+ unsigned int reg, u32 value)
+{
+ iowrite32(value, arcpgu->regs + reg);
+}
+
+static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
+ unsigned int reg)
+{
+ return ioread32(arcpgu->regs + reg);
+}
+
+int arc_pgu_setup_crtc(struct drm_device *dev);
+int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
+struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int num_crtc,
+ unsigned int max_conn_count);
+
+#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
new file mode 100644
index 000000000..92f8beff8
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -0,0 +1,257 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <linux/clk.h>
+#include <linux/platform_data/simplefb.h>
+
+#include "arcpgu.h"
+#include "arcpgu_regs.h"
+
+#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1))
+
+static struct simplefb_format supported_formats[] = {
+ { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 },
+ { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 },
+};
+
+static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
+{
+ struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+ uint32_t pixel_format = crtc->primary->state->fb->pixel_format;
+ struct simplefb_format *format = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
+ if (supported_formats[i].fourcc == pixel_format)
+ format = &supported_formats[i];
+ }
+
+ if (WARN_ON(!format))
+ return;
+
+ if (format->fourcc == DRM_FORMAT_RGB888)
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
+ arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
+ ARCPGU_MODE_RGB888_MASK);
+
+}
+
+static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+ struct drm_display_mode *m = &crtc->state->adjusted_mode;
+ u32 val;
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_FMT,
+ ENCODE_PGU_XY(m->crtc_htotal, m->crtc_vtotal));
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_HSYNC,
+ ENCODE_PGU_XY(m->crtc_hsync_start - m->crtc_hdisplay,
+ m->crtc_hsync_end - m->crtc_hdisplay));
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_VSYNC,
+ ENCODE_PGU_XY(m->crtc_vsync_start - m->crtc_vdisplay,
+ m->crtc_vsync_end - m->crtc_vdisplay));
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_ACTIVE,
+ ENCODE_PGU_XY(m->crtc_hblank_end - m->crtc_hblank_start,
+ m->crtc_vblank_end - m->crtc_vblank_start));
+
+ val = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
+
+ if (m->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST;
+ else
+ val &= ~(ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST);
+
+ if (m->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST;
+ else
+ val &= ~(ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST);
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, val);
+ arc_pgu_write(arcpgu, ARCPGU_REG_STRIDE, 0);
+ arc_pgu_write(arcpgu, ARCPGU_REG_START_SET, 1);
+
+ arc_pgu_set_pxl_fmt(crtc);
+
+ clk_set_rate(arcpgu->clk, m->crtc_clock * 1000);
+}
+
+static void arc_pgu_crtc_enable(struct drm_crtc *crtc)
+{
+ struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+
+ clk_prepare_enable(arcpgu->clk);
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
+ arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
+ ARCPGU_CTRL_ENABLE_MASK);
+}
+
+static void arc_pgu_crtc_disable(struct drm_crtc *crtc)
+{
+ struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+
+ if (!crtc->primary->fb)
+ return;
+
+ clk_disable_unprepare(arcpgu->clk);
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
+ arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
+ ~ARCPGU_CTRL_ENABLE_MASK);
+}
+
+static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+ struct drm_display_mode *mode = &state->adjusted_mode;
+ long rate, clk_rate = mode->clock * 1000;
+
+ rate = clk_round_rate(arcpgu->clk, clk_rate);
+ if (rate != clk_rate)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ struct drm_pending_vblank_event *event = crtc->state->event;
+
+ crtc->state->event = NULL;
+ event->pipe = drm_crtc_index(crtc);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ list_add_tail(&event->base.link, &arcpgu->event_list);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
+ .mode_set = drm_helper_crtc_mode_set,
+ .mode_set_base = drm_helper_crtc_mode_set_base,
+ .mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
+ .enable = arc_pgu_crtc_enable,
+ .disable = arc_pgu_crtc_disable,
+ .prepare = arc_pgu_crtc_disable,
+ .commit = arc_pgu_crtc_enable,
+ .atomic_check = arc_pgu_crtc_atomic_check,
+ .atomic_begin = arc_pgu_crtc_atomic_begin,
+};
+
+static void arc_pgu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct arcpgu_drm_private *arcpgu;
+ struct drm_gem_cma_object *gem;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ arcpgu = crtc_to_arcpgu_priv(plane->state->crtc);
+ gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
+ arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr);
+}
+
+static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
+ .prepare_fb = NULL,
+ .cleanup_fb = NULL,
+ .atomic_update = arc_pgu_plane_atomic_update,
+};
+
+static void arc_pgu_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs arc_pgu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = arc_pgu_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
+{
+ struct arcpgu_drm_private *arcpgu = drm->dev_private;
+ struct drm_plane *plane = NULL;
+ u32 formats[ARRAY_SIZE(supported_formats)], i;
+ int ret;
+
+ plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
+ formats[i] = supported_formats[i].fourcc;
+
+ ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
+ formats, ARRAY_SIZE(formats),
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ drm_plane_helper_add(plane, &arc_pgu_plane_helper_funcs);
+ arcpgu->plane = plane;
+
+ return plane;
+}
+
+int arc_pgu_setup_crtc(struct drm_device *drm)
+{
+ struct arcpgu_drm_private *arcpgu = drm->dev_private;
+ struct drm_plane *primary;
+ int ret;
+
+ primary = arc_pgu_plane_init(drm);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL,
+ &arc_pgu_crtc_funcs, NULL);
+ if (ret) {
+ arc_pgu_plane_destroy(primary);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&arcpgu->crtc, &arc_pgu_crtc_helper_funcs);
+ return 0;
+}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
new file mode 100644
index 000000000..76e187a5b
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -0,0 +1,288 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/clk.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <linux/of_reserved_mem.h>
+
+#include "arcpgu.h"
+#include "arcpgu_regs.h"
+
+static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct arcpgu_drm_private *arcpgu = dev->dev_private;
+
+ if (arcpgu->fbdev)
+ drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
+}
+
+static int arcpgu_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state, bool async)
+{
+ return drm_atomic_helper_commit(dev, state, false);
+}
+
+static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
+ .fb_create = drm_fb_cma_create,
+ .output_poll_changed = arcpgu_fb_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = arcpgu_atomic_commit,
+};
+
+static void arcpgu_setup_mode_config(struct drm_device *drm)
+{
+ drm_mode_config_init(drm);
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = 1920;
+ drm->mode_config.max_height = 1080;
+ drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
+}
+
+int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ return 0;
+}
+
+static const struct file_operations arcpgu_drm_ops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = arcpgu_gem_mmap,
+};
+
+static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
+{
+ struct arcpgu_drm_private *arcpgu = drm->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm->event_lock, flags);
+ list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) {
+ if (e->base.file_priv != file)
+ continue;
+ list_del(&e->base.link);
+ e->base.destroy(&e->base);
+ }
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+}
+
+static void arcpgu_lastclose(struct drm_device *drm)
+{
+ struct arcpgu_drm_private *arcpgu = drm->dev_private;
+
+ drm_fbdev_cma_restore_mode(arcpgu->fbdev);
+}
+
+static int arcpgu_load(struct drm_device *drm)
+{
+ struct platform_device *pdev = to_platform_device(drm->dev);
+ struct arcpgu_drm_private *arcpgu;
+ struct device_node *encoder_node;
+ struct resource *res;
+ int ret;
+
+ arcpgu = devm_kzalloc(&pdev->dev, sizeof(*arcpgu), GFP_KERNEL);
+ if (arcpgu == NULL)
+ return -ENOMEM;
+
+ drm->dev_private = arcpgu;
+
+ arcpgu->clk = devm_clk_get(drm->dev, "pxlclk");
+ if (IS_ERR(arcpgu->clk))
+ return PTR_ERR(arcpgu->clk);
+
+ INIT_LIST_HEAD(&arcpgu->event_list);
+
+ arcpgu_setup_mode_config(drm);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(arcpgu->regs)) {
+ dev_err(drm->dev, "Could not remap IO mem\n");
+ return PTR_ERR(arcpgu->regs);
+ }
+
+ dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
+ arc_pgu_read(arcpgu, ARCPGU_REG_ID));
+
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(drm->dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+
+ if (arc_pgu_setup_crtc(drm) < 0)
+ return -ENODEV;
+
+ /* find the encoder node and initialize it */
+ encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
+ if (!encoder_node) {
+ dev_err(drm->dev, "failed to get an encoder slave node\n");
+ return -ENODEV;
+ }
+
+ ret = arcpgu_drm_hdmi_init(drm, encoder_node);
+ if (ret < 0)
+ return ret;
+
+ drm_mode_config_reset(drm);
+ drm_kms_helper_poll_init(drm);
+
+ arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
+ drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (IS_ERR(arcpgu->fbdev)) {
+ ret = PTR_ERR(arcpgu->fbdev);
+ arcpgu->fbdev = NULL;
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, arcpgu);
+ return 0;
+}
+
+int arcpgu_unload(struct drm_device *drm)
+{
+ struct arcpgu_drm_private *arcpgu = drm->dev_private;
+
+ if (arcpgu->fbdev) {
+ drm_fbdev_cma_fini(arcpgu->fbdev);
+ arcpgu->fbdev = NULL;
+ }
+ drm_kms_helper_poll_fini(drm);
+ drm_vblank_cleanup(drm);
+ drm_mode_config_cleanup(drm);
+
+ return 0;
+}
+
+static struct drm_driver arcpgu_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .preclose = arcpgu_preclose,
+ .lastclose = arcpgu_lastclose,
+ .name = "drm-arcpgu",
+ .desc = "ARC PGU Controller",
+ .date = "20160219",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+ .fops = &arcpgu_drm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+};
+
+static int arcpgu_probe(struct platform_device *pdev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = arcpgu_load(drm);
+ if (ret)
+ goto err_unref;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_unload;
+
+ ret = drm_connector_register_all(drm);
+ if (ret)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ drm_dev_unregister(drm);
+
+err_unload:
+ arcpgu_unload(drm);
+
+err_unref:
+ drm_dev_unref(drm);
+
+ return ret;
+}
+
+static int arcpgu_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_connector_unregister_all(drm);
+ drm_dev_unregister(drm);
+ arcpgu_unload(drm);
+ drm_dev_unref(drm);
+
+ return 0;
+}
+
+static const struct of_device_id arcpgu_of_table[] = {
+ {.compatible = "snps,arcpgu"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, arcpgu_of_table);
+
+static struct platform_driver arcpgu_platform_driver = {
+ .probe = arcpgu_probe,
+ .remove = arcpgu_remove,
+ .driver = {
+ .name = "arcpgu",
+ .of_match_table = arcpgu_of_table,
+ },
+};
+
+module_platform_driver(arcpgu_platform_driver);
+
+MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>");
+MODULE_DESCRIPTION("ARC PGU DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
new file mode 100644
index 000000000..08b6baeb3
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -0,0 +1,201 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "arcpgu.h"
+
+struct arcpgu_drm_connector {
+ struct drm_connector connector;
+ struct drm_encoder_slave *encoder_slave;
+};
+
+static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
+{
+ const struct drm_encoder_slave_funcs *sfuncs;
+ struct drm_encoder_slave *slave;
+ struct arcpgu_drm_connector *con =
+ container_of(connector, struct arcpgu_drm_connector, connector);
+
+ slave = con->encoder_slave;
+ if (slave == NULL) {
+ dev_err(connector->dev->dev,
+ "connector_get_modes: cannot find slave encoder for connector\n");
+ return 0;
+ }
+
+ sfuncs = slave->slave_funcs;
+ if (sfuncs->get_modes == NULL)
+ return 0;
+
+ return sfuncs->get_modes(&slave->base, connector);
+}
+
+struct drm_encoder *
+arcpgu_drm_connector_best_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder_slave *slave;
+ struct arcpgu_drm_connector *con =
+ container_of(connector, struct arcpgu_drm_connector, connector);
+
+ slave = con->encoder_slave;
+ if (slave == NULL) {
+ dev_err(connector->dev->dev,
+ "connector_best_encoder: cannot find slave encoder for connector\n");
+ return NULL;
+ }
+
+ return &slave->base;
+}
+
+static enum drm_connector_status
+arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ const struct drm_encoder_slave_funcs *sfuncs;
+ struct drm_encoder_slave *slave;
+
+ struct arcpgu_drm_connector *con =
+ container_of(connector, struct arcpgu_drm_connector, connector);
+
+ slave = con->encoder_slave;
+ if (slave == NULL) {
+ dev_err(connector->dev->dev,
+ "connector_detect: cannot find slave encoder for connector\n");
+ return status;
+ }
+
+ sfuncs = slave->slave_funcs;
+ if (sfuncs && sfuncs->detect)
+ return sfuncs->detect(&slave->base, connector);
+
+ dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n");
+ return status;
+}
+
+static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_helper_funcs
+arcpgu_drm_connector_helper_funcs = {
+ .get_modes = arcpgu_drm_connector_get_modes,
+ .best_encoder = arcpgu_drm_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = arcpgu_drm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = arcpgu_drm_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = {
+ .dpms = drm_i2c_encoder_dpms,
+ .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .mode_set = drm_i2c_encoder_mode_set,
+ .prepare = drm_i2c_encoder_prepare,
+ .commit = drm_i2c_encoder_commit,
+ .detect = drm_i2c_encoder_detect,
+};
+
+static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
+{
+ struct arcpgu_drm_connector *arcpgu_connector;
+ struct drm_i2c_encoder_driver *driver;
+ struct drm_encoder_slave *encoder;
+ struct drm_connector *connector;
+ struct i2c_client *i2c_slave;
+ int ret;
+
+ encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
+ if (encoder == NULL)
+ return -ENOMEM;
+
+ i2c_slave = of_find_i2c_device_by_node(np);
+ if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) {
+ dev_err(drm->dev, "failed to find i2c slave encoder\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (i2c_slave->dev.driver == NULL) {
+ dev_err(drm->dev, "failed to find i2c slave driver\n");
+ return -EPROBE_DEFER;
+ }
+
+ driver =
+ to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver));
+ ret = driver->encoder_init(i2c_slave, drm, encoder);
+ if (ret) {
+ dev_err(drm->dev, "failed to initialize i2c encoder slave\n");
+ return ret;
+ }
+
+ encoder->base.possible_crtcs = 1;
+ encoder->base.possible_clones = 0;
+ ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+
+ drm_encoder_helper_add(&encoder->base,
+ &arcpgu_drm_encoder_helper_funcs);
+
+ arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
+ GFP_KERNEL);
+ if (!arcpgu_connector) {
+ ret = -ENOMEM;
+ goto error_encoder_cleanup;
+ }
+
+ connector = &arcpgu_connector->connector;
+ drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
+ ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret < 0) {
+ dev_err(drm->dev, "failed to initialize drm connector\n");
+ goto error_encoder_cleanup;
+ }
+
+ ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
+ if (ret < 0) {
+ dev_err(drm->dev, "could not attach connector to encoder\n");
+ drm_connector_unregister(connector);
+ goto error_connector_cleanup;
+ }
+
+ arcpgu_connector->encoder_slave = encoder;
+
+ return 0;
+
+error_connector_cleanup:
+ drm_connector_cleanup(connector);
+
+error_encoder_cleanup:
+ drm_encoder_cleanup(&encoder->base);
+ return ret;
+}
diff --git a/drivers/gpu/drm/arc/arcpgu_regs.h b/drivers/gpu/drm/arc/arcpgu_regs.h
new file mode 100644
index 000000000..95a13a84c
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_regs.h
@@ -0,0 +1,40 @@
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARC_PGU_REGS_H_
+#define _ARC_PGU_REGS_H_
+
+#define ARCPGU_REG_CTRL 0x00
+#define ARCPGU_REG_STAT 0x04
+#define ARCPGU_REG_FMT 0x10
+#define ARCPGU_REG_HSYNC 0x14
+#define ARCPGU_REG_VSYNC 0x18
+#define ARCPGU_REG_ACTIVE 0x1c
+#define ARCPGU_REG_BUF0_ADDR 0x40
+#define ARCPGU_REG_STRIDE 0x50
+#define ARCPGU_REG_START_SET 0x84
+
+#define ARCPGU_REG_ID 0x3FC
+
+#define ARCPGU_CTRL_ENABLE_MASK 0x02
+#define ARCPGU_CTRL_VS_POL_MASK 0x1
+#define ARCPGU_CTRL_VS_POL_OFST 0x3
+#define ARCPGU_CTRL_HS_POL_MASK 0x1
+#define ARCPGU_CTRL_HS_POL_OFST 0x4
+#define ARCPGU_MODE_RGB888_MASK 0x04
+#define ARCPGU_STAT_BUSY_MASK 0x02
+
+#endif
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index fef1b04c2..0813c2f06 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -33,8 +33,17 @@
*
*/
+static void hdlcd_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+ /* stop the controller on cleanup */
+ hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+ drm_crtc_cleanup(crtc);
+}
+
static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
+ .destroy = hdlcd_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
@@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
struct drm_display_mode *m = &crtc->state->adjusted_mode;
struct videomode vm;
- unsigned int polarities, line_length, err;
+ unsigned int polarities, err;
vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
@@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (m->flags & DRM_MODE_FLAG_PVSYNC)
polarities |= HDLCD_POLARITY_VSYNC;
- line_length = crtc->primary->state->fb->pitches[0];
-
/* Allow max number of outstanding requests and largest burst size */
hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
- hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
- hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
- hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
+ hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
- hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
err = hdlcd_set_pxl_fmt(crtc);
@@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc)
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
clk_prepare_enable(hdlcd->clk);
+ hdlcd_crtc_mode_set_nofb(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
- drm_crtc_vblank_on(crtc);
}
static void hdlcd_crtc_disable(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- if (!crtc->primary->fb)
+ if (!crtc->state->active)
return;
- clk_disable_unprepare(hdlcd->clk);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
- drm_crtc_vblank_off(crtc);
+ clk_disable_unprepare(hdlcd->clk);
}
static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
@@ -179,20 +182,17 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- unsigned long flags;
-
- if (crtc->state->event) {
- struct drm_pending_vblank_event *event = crtc->state->event;
+ struct drm_pending_vblank_event *event = crtc->state->event;
+ if (event) {
crtc->state->event = NULL;
- event->pipe = drm_crtc_index(crtc);
-
- WARN_ON(drm_crtc_vblank_get(crtc) != 0);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- list_add_tail(&event->base.link, &hdlcd->event_list);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
}
}
@@ -225,6 +225,15 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ u32 src_w, src_h;
+
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+
+ /* we can't do any scaling of the plane source */
+ if ((src_w != state->crtc_w) || (src_h != state->crtc_h))
+ return -EINVAL;
+
return 0;
}
@@ -233,20 +242,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
{
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
+ unsigned int depth, bpp;
+ u32 src_w, src_h, dest_w, dest_h;
dma_addr_t scanout_start;
- if (!plane->state->crtc || !plane->state->fb)
+ if (!plane->state->fb)
return;
- hdlcd = crtc_to_hdlcd_priv(plane->state->crtc);
+ drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+ dest_w = plane->state->crtc_w;
+ dest_h = plane->state->crtc_h;
gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
- scanout_start = gem->paddr;
+ scanout_start = gem->paddr + plane->state->fb->offsets[0] +
+ plane->state->crtc_y * plane->state->fb->pitches[0] +
+ plane->state->crtc_x * bpp / 8;
+
+ hdlcd = plane->dev->dev_private;
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]);
+ hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
}
static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
- .prepare_fb = NULL,
- .cleanup_fb = NULL,
.atomic_check = hdlcd_plane_atomic_check,
.atomic_update = hdlcd_plane_atomic_update,
};
@@ -294,16 +314,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
return plane;
}
-void hdlcd_crtc_suspend(struct drm_crtc *crtc)
-{
- hdlcd_crtc_disable(crtc);
-}
-
-void hdlcd_crtc_resume(struct drm_crtc *crtc)
-{
- hdlcd_crtc_enable(crtc);
-}
-
int hdlcd_setup_crtc(struct drm_device *drm)
{
struct hdlcd_drm_private *hdlcd = drm->dev_private;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 3ac1ae4d8..a6ca36f00 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
atomic_set(&hdlcd->dma_end_count, 0);
#endif
- INIT_LIST_HEAD(&hdlcd->event_list);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(hdlcd->mmio)) {
@@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags)
goto setup_fail;
}
- pm_runtime_enable(drm->dev);
-
- pm_runtime_get_sync(drm->dev);
ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
- pm_runtime_put_sync(drm->dev);
if (ret < 0) {
DRM_ERROR("failed to install IRQ handler\n");
goto irq_fail;
@@ -113,7 +107,7 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
}
static int hdlcd_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool async)
+ struct drm_atomic_state *state, bool nonblock)
{
return drm_atomic_helper_commit(dev, state, false);
}
@@ -164,24 +158,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg)
atomic_inc(&hdlcd->vsync_count);
#endif
- if (irq_status & HDLCD_INTERRUPT_VSYNC) {
- bool events_sent = false;
- unsigned long flags;
- struct drm_pending_vblank_event *e, *t;
-
+ if (irq_status & HDLCD_INTERRUPT_VSYNC)
drm_crtc_handle_vblank(&hdlcd->crtc);
- spin_lock_irqsave(&drm->event_lock, flags);
- list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
- list_del(&e->base.link);
- drm_crtc_send_vblank_event(&hdlcd->crtc, e);
- events_sent = true;
- }
- if (events_sent)
- drm_crtc_vblank_put(&hdlcd->crtc);
- spin_unlock_irqrestore(&drm->event_lock, flags);
- }
-
/* acknowledge interrupt(s) */
hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
@@ -275,6 +254,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
static struct drm_info_list hdlcd_debugfs_list[] = {
{ "interrupt_count", hdlcd_show_underrun_count, 0 },
{ "clocks", hdlcd_show_pxlclock, 0 },
+ { "fb", drm_fb_cma_debugfs_show, 0 },
};
static int hdlcd_debugfs_init(struct drm_minor *minor)
@@ -357,6 +337,8 @@ static int hdlcd_drm_bind(struct device *dev)
return -ENOMEM;
drm->dev_private = hdlcd;
+ dev_set_drvdata(dev, drm);
+
hdlcd_setup_mode_config(drm);
ret = hdlcd_load(drm, 0);
if (ret)
@@ -366,20 +348,23 @@ static int hdlcd_drm_bind(struct device *dev)
if (ret)
goto err_unload;
- dev_set_drvdata(dev, drm);
-
ret = component_bind_all(dev, drm);
if (ret) {
DRM_ERROR("Failed to bind all components\n");
goto err_unregister;
}
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ goto err_pm_active;
+
+ pm_runtime_enable(dev);
+
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto err_vblank;
}
- drm->vblank_disable_allowed = true;
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
@@ -400,16 +385,16 @@ err_fbdev:
drm_mode_config_cleanup(drm);
drm_vblank_cleanup(drm);
err_vblank:
+ pm_runtime_disable(drm->dev);
+err_pm_active:
component_unbind_all(dev, drm);
err_unregister:
drm_dev_unregister(drm);
err_unload:
- pm_runtime_get_sync(drm->dev);
drm_irq_uninstall(drm);
- pm_runtime_put_sync(drm->dev);
- pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
err_free:
+ dev_set_drvdata(dev, NULL);
drm_dev_unref(drm);
return ret;
@@ -496,30 +481,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct drm_crtc *crtc;
+ struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
- if (pm_runtime_suspended(dev))
+ if (!hdlcd)
return 0;
- drm_modeset_lock_all(drm);
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- hdlcd_crtc_suspend(crtc);
- drm_modeset_unlock_all(drm);
+ drm_kms_helper_poll_disable(drm);
+
+ hdlcd->state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(hdlcd->state)) {
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(hdlcd->state);
+ }
+
return 0;
}
static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct drm_crtc *crtc;
+ struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
- if (!pm_runtime_suspended(dev))
+ if (!hdlcd)
return 0;
- drm_modeset_lock_all(drm);
- list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
- hdlcd_crtc_resume(crtc);
- drm_modeset_unlock_all(drm);
+ drm_atomic_helper_resume(drm, hdlcd->state);
+ drm_kms_helper_poll_enable(drm);
+ pm_runtime_set_active(dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index aa234784f..e3950a071 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -9,10 +9,9 @@ struct hdlcd_drm_private {
void __iomem *mmio;
struct clk *clk;
struct drm_fbdev_cma *fbdev;
- struct drm_framebuffer *fb;
- struct list_head event_list;
struct drm_crtc crtc;
struct drm_plane *plane;
+ struct drm_atomic_state *state;
#ifdef CONFIG_DEBUG_FS
atomic_t buffer_underrun_count;
atomic_t bus_error_count;
@@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg)
int hdlcd_setup_crtc(struct drm_device *dev);
void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
-void hdlcd_crtc_suspend(struct drm_crtc *crtc);
-void hdlcd_crtc_resume(struct drm_crtc *crtc);
#endif /* __HDLCD_DRV_H__ */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 0293eb74d..3130aa8bc 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -897,7 +897,6 @@ static void cursor_update(void *data)
static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
{
- struct drm_device *dev = crtc->dev;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_gem_object *obj = NULL;
int ret;
@@ -911,7 +910,7 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
if (w > 64 || h > 64 || (w > 32 && h > 32))
return -ENOMEM;
- obj = armada_gem_object_lookup(dev, file, handle);
+ obj = armada_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 82043c204..439824a61 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -113,7 +113,6 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
goto err_comp;
dev->irq_enabled = true;
- dev->vblank_disable_allowed = 1;
ret = armada_fbdev_init(dev);
if (ret)
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 5fa4bf20b..f03c212b7 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -120,7 +120,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
goto err;
}
- obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+ obj = armada_gem_object_lookup(dfile, mode->handles[0]);
if (!obj) {
ret = -ENOENT;
goto err;
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index aca7f9cc6..88e7fc797 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -278,7 +278,7 @@ int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
struct armada_gem_object *obj;
int ret = 0;
- obj = armada_gem_object_lookup(dev, file, handle);
+ obj = armada_gem_object_lookup(file, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
return -EINVAL;
@@ -348,7 +348,7 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct armada_gem_object *dobj;
unsigned long addr;
- dobj = armada_gem_object_lookup(dev, file, args->handle);
+ dobj = armada_gem_object_lookup(file, args->handle);
if (dobj == NULL)
return -ENOENT;
@@ -391,7 +391,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- dobj = armada_gem_object_lookup(dev, file, args->handle);
+ dobj = armada_gem_object_lookup(file, args->handle);
if (dobj == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index b000ea3a8..b88d2b985 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -45,9 +45,9 @@ struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
int armada_gem_map_import(struct armada_gem_object *);
static inline struct armada_gem_object *armada_gem_object_lookup(
- struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+ struct drm_file *dfile, unsigned handle)
{
- struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+ struct drm_gem_object *obj = drm_gem_object_lookup(dfile, handle);
return obj ? drm_to_armada_gem(obj) : NULL;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 9a32d9dfd..fcd9c0714 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -218,10 +218,8 @@ static struct drm_driver driver = {
static int __init ast_init(void)
{
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && ast_modeset == -1)
return -EINVAL;
-#endif
if (ast_modeset == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index eb5715994..908011d2c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -367,7 +367,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index b1480acbb..7bc3aa6dd 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -333,7 +333,7 @@ ast_user_framebuffer_create(struct drm_device *dev,
struct ast_framebuffer *ast_fb;
int ret;
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
@@ -574,7 +574,7 @@ ast_dumb_mmap_offset(struct drm_file *file,
struct drm_gem_object *obj;
struct ast_bo *bo;
- obj = drm_gem_object_lookup(dev, file, handle);
+ obj = drm_gem_object_lookup(file, handle);
if (obj == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index a965e7e8a..c33792260 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1141,7 +1141,7 @@ static int ast_cursor_set(struct drm_crtc *crtc,
if (width > AST_MAX_HWC_WIDTH || height > AST_MAX_HWC_HEIGHT)
return -EINVAL;
- obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
return -ENOENT;
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 08f82eae6..59f2f93b6 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -245,6 +245,8 @@ struct ttm_bo_driver ast_bo_driver = {
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int ast_mm_init(struct ast_private *ast)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 58c4f785c..bd12231ab 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -32,6 +32,23 @@
#include "atmel_hlcdc_dc.h"
/**
+ * Atmel HLCDC CRTC state structure
+ *
+ * @base: base CRTC state
+ * @output_mode: RGBXXX output mode
+ */
+struct atmel_hlcdc_crtc_state {
+ struct drm_crtc_state base;
+ unsigned int output_mode;
+};
+
+static inline struct atmel_hlcdc_crtc_state *
+drm_crtc_state_to_atmel_hlcdc_crtc_state(struct drm_crtc_state *state)
+{
+ return container_of(state, struct atmel_hlcdc_crtc_state, base);
+}
+
+/**
* Atmel HLCDC CRTC structure
*
* @base: base DRM CRTC structure
@@ -59,6 +76,7 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap;
struct drm_display_mode *adj = &c->state->adjusted_mode;
+ struct atmel_hlcdc_crtc_state *state;
unsigned long mode_rate;
struct videomode vm;
unsigned long prate;
@@ -112,15 +130,27 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
if (adj->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= ATMEL_HLCDC_HSPOL;
+ state = drm_crtc_state_to_atmel_hlcdc_crtc_state(c->state);
+ cfg |= state->output_mode << 8;
+
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
ATMEL_HLCDC_HSPOL | ATMEL_HLCDC_VSPOL |
ATMEL_HLCDC_VSPDLYS | ATMEL_HLCDC_VSPDLYE |
ATMEL_HLCDC_DISPPOL | ATMEL_HLCDC_DISPDLY |
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
- ATMEL_HLCDC_GUARDTIME_MASK,
+ ATMEL_HLCDC_GUARDTIME_MASK | ATMEL_HLCDC_MODE_MASK,
cfg);
}
+static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *c,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+
+ return atmel_hlcdc_dc_mode_valid(crtc->dc, adjusted_mode) == MODE_OK;
+}
+
static void atmel_hlcdc_crtc_disable(struct drm_crtc *c)
{
struct drm_device *dev = c->dev;
@@ -221,15 +251,79 @@ void atmel_hlcdc_crtc_resume(struct drm_crtc *c)
}
}
+#define ATMEL_HLCDC_RGB444_OUTPUT BIT(0)
+#define ATMEL_HLCDC_RGB565_OUTPUT BIT(1)
+#define ATMEL_HLCDC_RGB666_OUTPUT BIT(2)
+#define ATMEL_HLCDC_RGB888_OUTPUT BIT(3)
+#define ATMEL_HLCDC_OUTPUT_MODE_MASK GENMASK(3, 0)
+
+static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
+{
+ unsigned int output_fmts = ATMEL_HLCDC_OUTPUT_MODE_MASK;
+ struct atmel_hlcdc_crtc_state *hstate;
+ struct drm_connector_state *cstate;
+ struct drm_connector *connector;
+ struct atmel_hlcdc_crtc *crtc;
+ int i;
+
+ crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc);
+
+ for_each_connector_in_state(state->state, connector, cstate, i) {
+ struct drm_display_info *info = &connector->display_info;
+ unsigned int supported_fmts = 0;
+ int j;
+
+ if (!cstate->crtc)
+ continue;
+
+ for (j = 0; j < info->num_bus_formats; j++) {
+ switch (info->bus_formats[j]) {
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ supported_fmts |= ATMEL_HLCDC_RGB444_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ supported_fmts |= ATMEL_HLCDC_RGB565_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ supported_fmts |= ATMEL_HLCDC_RGB666_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ supported_fmts |= ATMEL_HLCDC_RGB888_OUTPUT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (crtc->dc->desc->conflicting_output_formats)
+ output_fmts &= supported_fmts;
+ else
+ output_fmts |= supported_fmts;
+ }
+
+ if (!output_fmts)
+ return -EINVAL;
+
+ hstate = drm_crtc_state_to_atmel_hlcdc_crtc_state(state);
+ hstate->output_mode = fls(output_fmts) - 1;
+
+ return 0;
+}
+
static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
struct drm_crtc_state *s)
{
- struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
+ int ret;
- if (atmel_hlcdc_dc_mode_valid(crtc->dc, &s->adjusted_mode) != MODE_OK)
- return -EINVAL;
+ ret = atmel_hlcdc_crtc_select_output_mode(s);
+ if (ret)
+ return ret;
+
+ ret = atmel_hlcdc_plane_prepare_disc_area(s);
+ if (ret)
+ return ret;
- return atmel_hlcdc_plane_prepare_disc_area(s);
+ return atmel_hlcdc_plane_prepare_ahb_routing(s);
}
static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
@@ -254,6 +348,7 @@ static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
+ .mode_fixup = atmel_hlcdc_crtc_mode_fixup,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
@@ -292,13 +387,60 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
+void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc)
+{
+ struct atmel_hlcdc_crtc_state *state;
+
+ if (crtc->state) {
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
+ kfree(state);
+ crtc->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+ }
+}
+
+static struct drm_crtc_state *
+atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct atmel_hlcdc_crtc_state *state, *cur;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state);
+ state->output_mode = cur->output_mode;
+
+ return &state->base;
+}
+
+static void atmel_hlcdc_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *s)
+{
+ struct atmel_hlcdc_crtc_state *state;
+
+ state = drm_crtc_state_to_atmel_hlcdc_crtc_state(s);
+ __drm_atomic_helper_crtc_destroy_state(s);
+ kfree(state);
+}
+
static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.set_config = drm_atomic_helper_set_config,
.destroy = atmel_hlcdc_crtc_destroy,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = atmel_hlcdc_crtc_reset,
+ .atomic_duplicate_state = atmel_hlcdc_crtc_duplicate_state,
+ .atomic_destroy_state = atmel_hlcdc_crtc_destroy_state,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 3d8d16402..8ded76457 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -50,6 +50,10 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9n12 = {
.min_height = 0,
.max_width = 1280,
.max_height = 860,
+ .max_spw = 0x3f,
+ .max_vpw = 0x3f,
+ .max_hpw = 0xff,
+ .conflicting_output_formats = true,
.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9n12_layers),
.layers = atmel_hlcdc_at91sam9n12_layers,
};
@@ -134,6 +138,10 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_at91sam9x5 = {
.min_height = 0,
.max_width = 800,
.max_height = 600,
+ .max_spw = 0x3f,
+ .max_vpw = 0x3f,
+ .max_hpw = 0xff,
+ .conflicting_output_formats = true,
.nlayers = ARRAY_SIZE(atmel_hlcdc_at91sam9x5_layers),
.layers = atmel_hlcdc_at91sam9x5_layers,
};
@@ -237,6 +245,10 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = {
.min_height = 0,
.max_width = 2048,
.max_height = 2048,
+ .max_spw = 0x3f,
+ .max_vpw = 0x3f,
+ .max_hpw = 0x1ff,
+ .conflicting_output_formats = true,
.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d3_layers),
.layers = atmel_hlcdc_sama5d3_layers,
};
@@ -320,6 +332,9 @@ static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d4 = {
.min_height = 0,
.max_width = 2048,
.max_height = 2048,
+ .max_spw = 0xff,
+ .max_vpw = 0xff,
+ .max_hpw = 0x3ff,
.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d4_layers),
.layers = atmel_hlcdc_sama5d4_layers,
};
@@ -358,19 +373,19 @@ int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
int hback_porch = mode->htotal - mode->hsync_end;
int hsync_len = mode->hsync_end - mode->hsync_start;
- if (hsync_len > 0x40 || hsync_len < 1)
+ if (hsync_len > dc->desc->max_spw + 1 || hsync_len < 1)
return MODE_HSYNC;
- if (vsync_len > 0x40 || vsync_len < 1)
+ if (vsync_len > dc->desc->max_spw + 1 || vsync_len < 1)
return MODE_VSYNC;
- if (hfront_porch > 0x200 || hfront_porch < 1 ||
- hback_porch > 0x200 || hback_porch < 1 ||
+ if (hfront_porch > dc->desc->max_hpw + 1 || hfront_porch < 1 ||
+ hback_porch > dc->desc->max_hpw + 1 || hback_porch < 1 ||
mode->hdisplay < 1)
return MODE_H_ILLEGAL;
- if (vfront_porch > 0x40 || vfront_porch < 1 ||
- vback_porch > 0x40 || vback_porch < 0 ||
+ if (vfront_porch > dc->desc->max_vpw + 1 || vfront_porch < 1 ||
+ vback_porch > dc->desc->max_vpw || vback_porch < 0 ||
mode->vdisplay < 1)
return MODE_V_ILLEGAL;
@@ -427,11 +442,102 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
}
}
+struct atmel_hlcdc_dc_commit {
+ struct work_struct work;
+ struct drm_device *dev;
+ struct drm_atomic_state *state;
+};
+
+static void
+atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
+{
+ struct drm_device *dev = commit->dev;
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ struct drm_atomic_state *old_state = commit->state;
+
+ /* Apply the atomic update. */
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+ drm_atomic_helper_commit_planes(dev, old_state, false);
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
+
+ drm_atomic_state_free(old_state);
+
+ /* Complete the commit, wake up any waiter. */
+ spin_lock(&dc->commit.wait.lock);
+ dc->commit.pending = false;
+ wake_up_all_locked(&dc->commit.wait);
+ spin_unlock(&dc->commit.wait.lock);
+
+ kfree(commit);
+}
+
+static void atmel_hlcdc_dc_atomic_work(struct work_struct *work)
+{
+ struct atmel_hlcdc_dc_commit *commit =
+ container_of(work, struct atmel_hlcdc_dc_commit, work);
+
+ atmel_hlcdc_dc_atomic_complete(commit);
+}
+
+static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ struct atmel_hlcdc_dc_commit *commit;
+ int ret;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
+
+ /* Allocate the commit object. */
+ commit = kzalloc(sizeof(*commit), GFP_KERNEL);
+ if (!commit) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ INIT_WORK(&commit->work, atmel_hlcdc_dc_atomic_work);
+ commit->dev = dev;
+ commit->state = state;
+
+ spin_lock(&dc->commit.wait.lock);
+ ret = wait_event_interruptible_locked(dc->commit.wait,
+ !dc->commit.pending);
+ if (ret == 0)
+ dc->commit.pending = true;
+ spin_unlock(&dc->commit.wait.lock);
+
+ if (ret) {
+ kfree(commit);
+ goto error;
+ }
+
+ /* Swap the state, this is the point of no return. */
+ drm_atomic_helper_swap_state(dev, state);
+
+ if (async)
+ queue_work(dc->wq, &commit->work);
+ else
+ atmel_hlcdc_dc_atomic_complete(commit);
+
+ return 0;
+
+error:
+ drm_atomic_helper_cleanup_planes(dev, state);
+ return ret;
+}
+
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = atmel_hlcdc_fb_create,
.output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ .atomic_commit = atmel_hlcdc_dc_atomic_commit,
};
static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
@@ -445,7 +551,7 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
ret = atmel_hlcdc_create_outputs(dev);
if (ret) {
- dev_err(dev->dev, "failed to create panel: %d\n", ret);
+ dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret);
return ret;
}
@@ -509,6 +615,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
if (!dc->wq)
return -ENOMEM;
+ init_waitqueue_head(&dc->commit.wait);
dc->desc = match->data;
dc->hlcdc = dev_get_drvdata(dev->dev->parent);
dev->dev_private = dc;
@@ -584,38 +691,10 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
destroy_workqueue(dc->wq);
}
-static int atmel_hlcdc_dc_connector_plug_all(struct drm_device *dev)
-{
- struct drm_connector *connector, *failed;
- int ret;
-
- mutex_lock(&dev->mode_config.mutex);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- ret = drm_connector_register(connector);
- if (ret) {
- failed = connector;
- goto err;
- }
- }
- mutex_unlock(&dev->mode_config.mutex);
- return 0;
-
-err:
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (failed == connector)
- break;
-
- drm_connector_unregister(connector);
- }
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
{
mutex_lock(&dev->mode_config.mutex);
- drm_connector_unplug_all(dev);
+ drm_connector_unregister_all(dev);
mutex_unlock(&dev->mode_config.mutex);
}
@@ -736,7 +815,7 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
- ret = atmel_hlcdc_dc_connector_plug_all(ddev);
+ ret = drm_connector_register_all(ddev);
if (ret)
goto err_unregister;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index fed517f29..7a47f8c09 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -50,6 +50,11 @@
* @min_height: minimum height supported by the Display Controller
* @max_width: maximum width supported by the Display Controller
* @max_height: maximum height supported by the Display Controller
+ * @max_spw: maximum vertical/horizontal pulse width
+ * @max_vpw: maximum vertical back/front porch width
+ * @max_hpw: maximum horizontal back/front porch width
+ * @conflicting_output_formats: true if RGBXXX output formats conflict with
+ * each other.
* @layers: a layer description table describing available layers
* @nlayers: layer description table size
*/
@@ -58,6 +63,10 @@ struct atmel_hlcdc_dc_desc {
int min_height;
int max_width;
int max_height;
+ int max_spw;
+ int max_vpw;
+ int max_hpw;
+ bool conflicting_output_formats;
const struct atmel_hlcdc_layer_desc *layers;
int nlayers;
};
@@ -128,6 +137,7 @@ struct atmel_hlcdc_planes {
* @planes: instantiated planes
* @layers: active HLCDC layer
* @wq: display controller workqueue
+ * @commit: used for async commit handling
*/
struct atmel_hlcdc_dc {
const struct atmel_hlcdc_dc_desc *desc;
@@ -137,6 +147,10 @@ struct atmel_hlcdc_dc {
struct atmel_hlcdc_planes *planes;
struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
struct workqueue_struct *wq;
+ struct {
+ wait_queue_head_t wait;
+ bool pending;
+ } commit;
};
extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats;
@@ -149,6 +163,7 @@ struct atmel_hlcdc_planes *
atmel_hlcdc_create_planes(struct drm_device *dev);
int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state);
+int atmel_hlcdc_plane_prepare_ahb_routing(struct drm_crtc_state *c_state);
void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 0f7ec016e..3d34fc4ca 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -27,16 +27,6 @@
#include "atmel_hlcdc_dc.h"
/**
- * Atmel HLCDC RGB output mode
- */
-enum atmel_hlcdc_connector_rgb_mode {
- ATMEL_HLCDC_CONNECTOR_RGB444,
- ATMEL_HLCDC_CONNECTOR_RGB565,
- ATMEL_HLCDC_CONNECTOR_RGB666,
- ATMEL_HLCDC_CONNECTOR_RGB888,
-};
-
-/**
* Atmel HLCDC RGB connector structure
*
* This structure stores RGB slave device information.
@@ -44,13 +34,13 @@ enum atmel_hlcdc_connector_rgb_mode {
* @connector: DRM connector
* @encoder: DRM encoder
* @dc: pointer to the atmel_hlcdc_dc structure
- * @dpms: current DPMS mode
+ * @panel: panel connected on the RGB output
*/
struct atmel_hlcdc_rgb_output {
struct drm_connector connector;
struct drm_encoder encoder;
struct atmel_hlcdc_dc *dc;
- int dpms;
+ struct drm_panel *panel;
};
static inline struct atmel_hlcdc_rgb_output *
@@ -66,91 +56,31 @@ drm_encoder_to_atmel_hlcdc_rgb_output(struct drm_encoder *encoder)
return container_of(encoder, struct atmel_hlcdc_rgb_output, encoder);
}
-/**
- * Atmel HLCDC Panel device structure
- *
- * This structure is specialization of the slave device structure to
- * interface with drm panels.
- *
- * @base: base slave device fields
- * @panel: drm panel attached to this slave device
- */
-struct atmel_hlcdc_panel {
- struct atmel_hlcdc_rgb_output base;
- struct drm_panel *panel;
-};
-
-static inline struct atmel_hlcdc_panel *
-atmel_hlcdc_rgb_output_to_panel(struct atmel_hlcdc_rgb_output *output)
-{
- return container_of(output, struct atmel_hlcdc_panel, base);
-}
-
-static void atmel_hlcdc_panel_encoder_enable(struct drm_encoder *encoder)
+static void atmel_hlcdc_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
- struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
- drm_panel_enable(panel->panel);
+ if (rgb->panel) {
+ drm_panel_prepare(rgb->panel);
+ drm_panel_enable(rgb->panel);
+ }
}
-static void atmel_hlcdc_panel_encoder_disable(struct drm_encoder *encoder)
+static void atmel_hlcdc_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
- struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
- drm_panel_disable(panel->panel);
-}
-
-static bool
-atmel_hlcdc_panel_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
-{
- return true;
-}
-
-static void
-atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted)
-{
- struct atmel_hlcdc_rgb_output *rgb =
- drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
- struct drm_display_info *info = &rgb->connector.display_info;
- unsigned int cfg;
-
- cfg = 0;
-
- if (info->num_bus_formats) {
- switch (info->bus_formats[0]) {
- case MEDIA_BUS_FMT_RGB565_1X16:
- cfg |= ATMEL_HLCDC_CONNECTOR_RGB565 << 8;
- break;
- case MEDIA_BUS_FMT_RGB666_1X18:
- cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
- break;
- case MEDIA_BUS_FMT_RGB888_1X24:
- cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
- break;
- case MEDIA_BUS_FMT_RGB444_1X12:
- default:
- break;
- }
+ if (rgb->panel) {
+ drm_panel_disable(rgb->panel);
+ drm_panel_unprepare(rgb->panel);
}
-
- regmap_update_bits(rgb->dc->hlcdc->regmap, ATMEL_HLCDC_CFG(5),
- ATMEL_HLCDC_MODE_MASK,
- cfg);
}
static const struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
- .mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
- .mode_set = atmel_hlcdc_rgb_encoder_mode_set,
- .disable = atmel_hlcdc_panel_encoder_disable,
- .enable = atmel_hlcdc_panel_encoder_enable,
+ .disable = atmel_hlcdc_rgb_encoder_disable,
+ .enable = atmel_hlcdc_rgb_encoder_enable,
};
static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder)
@@ -167,9 +97,11 @@ static int atmel_hlcdc_panel_get_modes(struct drm_connector *connector)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_connector_to_atmel_hlcdc_rgb_output(connector);
- struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
- return panel->panel->funcs->get_modes(panel->panel);
+ if (rgb->panel)
+ return rgb->panel->funcs->get_modes(rgb->panel);
+
+ return 0;
}
static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
@@ -201,7 +133,13 @@ static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helpe
static enum drm_connector_status
atmel_hlcdc_panel_connector_detect(struct drm_connector *connector, bool force)
{
- return connector_status_connected;
+ struct atmel_hlcdc_rgb_output *rgb =
+ drm_connector_to_atmel_hlcdc_rgb_output(connector);
+
+ if (rgb->panel)
+ return connector_status_connected;
+
+ return connector_status_disconnected;
}
static void
@@ -209,9 +147,10 @@ atmel_hlcdc_panel_connector_destroy(struct drm_connector *connector)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_connector_to_atmel_hlcdc_rgb_output(connector);
- struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
- drm_panel_detach(panel->panel);
+ if (rgb->panel)
+ drm_panel_detach(rgb->panel);
+
drm_connector_cleanup(connector);
}
@@ -225,88 +164,124 @@ static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
- struct of_endpoint *ep)
+static int atmel_hlcdc_check_endpoint(struct drm_device *dev,
+ const struct of_endpoint *ep)
{
- struct atmel_hlcdc_dc *dc = dev->dev_private;
struct device_node *np;
- struct drm_panel *p = NULL;
- struct atmel_hlcdc_panel *panel;
- int ret;
+ void *obj;
np = of_graph_get_remote_port_parent(ep->local_node);
- if (!np)
- return -EINVAL;
- p = of_drm_find_panel(np);
+ obj = of_drm_find_panel(np);
+ if (!obj)
+ obj = of_drm_find_bridge(np);
+
of_node_put(np);
- if (!p)
- return -EPROBE_DEFER;
+ return obj ? 0 : -EPROBE_DEFER;
+}
- panel = devm_kzalloc(dev->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -EINVAL;
+static int atmel_hlcdc_attach_endpoint(struct drm_device *dev,
+ const struct of_endpoint *ep)
+{
+ struct atmel_hlcdc_dc *dc = dev->dev_private;
+ struct atmel_hlcdc_rgb_output *output;
+ struct device_node *np;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ int ret;
- panel->base.dpms = DRM_MODE_DPMS_OFF;
+ output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
+ if (!output)
+ return -EINVAL;
- panel->base.dc = dc;
+ output->dc = dc;
- drm_encoder_helper_add(&panel->base.encoder,
+ drm_encoder_helper_add(&output->encoder,
&atmel_hlcdc_panel_encoder_helper_funcs);
- ret = drm_encoder_init(dev, &panel->base.encoder,
+ ret = drm_encoder_init(dev, &output->encoder,
&atmel_hlcdc_panel_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ret;
- panel->base.connector.dpms = DRM_MODE_DPMS_OFF;
- panel->base.connector.polled = DRM_CONNECTOR_POLL_CONNECT;
- drm_connector_helper_add(&panel->base.connector,
- &atmel_hlcdc_panel_connector_helper_funcs);
- ret = drm_connector_init(dev, &panel->base.connector,
- &atmel_hlcdc_panel_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- if (ret)
- goto err_encoder_cleanup;
+ output->encoder.possible_crtcs = 0x1;
- drm_mode_connector_attach_encoder(&panel->base.connector,
- &panel->base.encoder);
- panel->base.encoder.possible_crtcs = 0x1;
+ np = of_graph_get_remote_port_parent(ep->local_node);
- drm_panel_attach(p, &panel->base.connector);
- panel->panel = p;
+ ret = -EPROBE_DEFER;
+
+ panel = of_drm_find_panel(np);
+ if (panel) {
+ of_node_put(np);
+ output->connector.dpms = DRM_MODE_DPMS_OFF;
+ output->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
+ drm_connector_helper_add(&output->connector,
+ &atmel_hlcdc_panel_connector_helper_funcs);
+ ret = drm_connector_init(dev, &output->connector,
+ &atmel_hlcdc_panel_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ goto err_encoder_cleanup;
+
+ drm_mode_connector_attach_encoder(&output->connector,
+ &output->encoder);
+
+ ret = drm_panel_attach(panel, &output->connector);
+ if (ret) {
+ drm_connector_cleanup(&output->connector);
+ goto err_encoder_cleanup;
+ }
- return 0;
+ output->panel = panel;
+
+ return 0;
+ }
+
+ bridge = of_drm_find_bridge(np);
+ of_node_put(np);
+
+ if (bridge) {
+ output->encoder.bridge = bridge;
+ bridge->encoder = &output->encoder;
+ ret = drm_bridge_attach(dev, bridge);
+ if (!ret)
+ return 0;
+ }
err_encoder_cleanup:
- drm_encoder_cleanup(&panel->base.encoder);
+ drm_encoder_cleanup(&output->encoder);
return ret;
}
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
- struct device_node *port_np, *np;
+ struct device_node *ep_np = NULL;
struct of_endpoint ep;
int ret;
- port_np = of_get_child_by_name(dev->dev->of_node, "port");
- if (!port_np)
- return -EINVAL;
-
- np = of_get_child_by_name(port_np, "endpoint");
- of_node_put(port_np);
+ for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
+ ret = of_graph_parse_endpoint(ep_np, &ep);
+ if (!ret)
+ ret = atmel_hlcdc_check_endpoint(dev, &ep);
- if (!np)
- return -EINVAL;
+ if (ret) {
+ of_node_put(ep_np);
+ return ret;
+ }
+ }
- ret = of_graph_parse_endpoint(np, &ep);
- of_node_put(port_np);
+ for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
+ ret = of_graph_parse_endpoint(ep_np, &ep);
+ if (!ret)
+ ret = atmel_hlcdc_attach_endpoint(dev, &ep);
- if (ret)
- return ret;
+ if (ret) {
+ of_node_put(ep_np);
+ return ret;
+ }
+ }
- /* We currently only support panel output */
- return atmel_hlcdc_create_panel_output(dev, &ep);
+ return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 6d9c0f5bc..016c19122 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -37,6 +37,7 @@
* @xstride: value to add to the pixel pointer between each line
* @pstride: value to add to the pixel pointer between each pixel
* @nplanes: number of planes (deduced from pixel_format)
+ * @prepared: plane update has been prepared
*/
struct atmel_hlcdc_plane_state {
struct drm_plane_state base;
@@ -58,12 +59,15 @@ struct atmel_hlcdc_plane_state {
int disc_w;
int disc_h;
+ int ahb_id;
+
/* These fields are private and should not be touched */
int bpp[ATMEL_HLCDC_MAX_PLANES];
unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
int xstride[ATMEL_HLCDC_MAX_PLANES];
int pstride[ATMEL_HLCDC_MAX_PLANES];
int nplanes;
+ bool prepared;
};
static inline struct atmel_hlcdc_plane_state *
@@ -361,8 +365,10 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
atmel_hlcdc_layer_update_cfg(&plane->layer,
ATMEL_HLCDC_LAYER_DMA_CFG_ID,
- ATMEL_HLCDC_LAYER_DMA_BLEN_MASK,
- ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16);
+ ATMEL_HLCDC_LAYER_DMA_BLEN_MASK |
+ ATMEL_HLCDC_LAYER_DMA_SIF,
+ ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16 |
+ state->ahb_id);
atmel_hlcdc_layer_update_cfg(&plane->layer, layout->general_config,
ATMEL_HLCDC_LAYER_ITER2BL |
@@ -437,6 +443,41 @@ static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
}
}
+int atmel_hlcdc_plane_prepare_ahb_routing(struct drm_crtc_state *c_state)
+{
+ unsigned int ahb_load[2] = { };
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_state_for_each_plane(plane, c_state) {
+ struct atmel_hlcdc_plane_state *plane_state;
+ struct drm_plane_state *plane_s;
+ unsigned int pixels, load = 0;
+ int i;
+
+ plane_s = drm_atomic_get_plane_state(c_state->state, plane);
+ if (IS_ERR(plane_s))
+ return PTR_ERR(plane_s);
+
+ plane_state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(plane_s);
+
+ pixels = (plane_state->src_w * plane_state->src_h) -
+ (plane_state->disc_w * plane_state->disc_h);
+
+ for (i = 0; i < plane_state->nplanes; i++)
+ load += pixels * plane_state->bpp[i];
+
+ if (ahb_load[0] <= ahb_load[1])
+ plane_state->ahb_id = 0;
+ else
+ plane_state->ahb_id = 1;
+
+ ahb_load[plane_state->ahb_id] += load;
+ }
+
+ return 0;
+}
+
int
atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state)
{
@@ -716,12 +757,54 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
static int atmel_hlcdc_plane_prepare_fb(struct drm_plane *p,
const struct drm_plane_state *new_state)
{
+ /*
+ * FIXME: we should avoid this const -> non-const cast but it's
+ * currently the only solution we have to modify the ->prepared
+ * state and rollback the update request.
+ * Ideally, we should rework the code to attach all the resources
+ * to atmel_hlcdc_plane_state (including the DMA desc allocation),
+ * but this require a complete rework of the atmel_hlcdc_layer
+ * code.
+ */
+ struct drm_plane_state *s = (struct drm_plane_state *)new_state;
+ struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(s);
+ int ret;
+
+ ret = atmel_hlcdc_layer_update_start(&plane->layer);
+ if (!ret)
+ state->prepared = true;
+
+ return ret;
+}
+
+static void atmel_hlcdc_plane_cleanup_fb(struct drm_plane *p,
+ const struct drm_plane_state *old_state)
+{
+ /*
+ * FIXME: we should avoid this const -> non-const cast but it's
+ * currently the only solution we have to modify the ->prepared
+ * state and rollback the update request.
+ * Ideally, we should rework the code to attach all the resources
+ * to atmel_hlcdc_plane_state (including the DMA desc allocation),
+ * but this require a complete rework of the atmel_hlcdc_layer
+ * code.
+ */
+ struct drm_plane_state *s = (struct drm_plane_state *)old_state;
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
+ struct atmel_hlcdc_plane_state *state =
+ drm_plane_state_to_atmel_hlcdc_plane_state(s);
- if (!new_state->fb)
- return 0;
+ /*
+ * The Request has already been applied or cancelled, nothing to do
+ * here.
+ */
+ if (!state->prepared)
+ return;
- return atmel_hlcdc_layer_update_start(&plane->layer);
+ atmel_hlcdc_layer_update_rollback(&plane->layer);
+ state->prepared = false;
}
static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
@@ -846,6 +929,7 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
.prepare_fb = atmel_hlcdc_plane_prepare_fb,
+ .cleanup_fb = atmel_hlcdc_plane_cleanup_fb,
.atomic_check = atmel_hlcdc_plane_atomic_check,
.atomic_update = atmel_hlcdc_plane_atomic_update,
.atomic_disable = atmel_hlcdc_plane_atomic_disable,
@@ -885,6 +969,7 @@ atmel_hlcdc_plane_atomic_duplicate_state(struct drm_plane *p)
return NULL;
copy->disc_updated = false;
+ copy->prepared = false;
if (copy->base.fb)
drm_framebuffer_reference(copy->base.fb);
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 7520bf81f..e1ec498a6 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -82,7 +82,7 @@ static int bochsfb_create(struct drm_fb_helper *helper,
bo = gem_to_bochs_bo(gobj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret)
return ret;
@@ -162,22 +162,7 @@ static int bochs_fbdev_destroy(struct bochs_device *bochs)
return 0;
}
-void bochs_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
-}
-
-void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- *red = regno;
- *green = regno;
- *blue = regno;
-}
-
static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
- .gamma_set = bochs_fb_gamma_set,
- .gamma_get = bochs_fb_gamma_get,
.fb_probe = bochsfb_create,
};
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 96926f09e..207a2cbcc 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -43,7 +43,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
bochs_fb = to_bochs_framebuffer(old_fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret) {
DRM_ERROR("failed to reserve old_fb bo\n");
} else {
@@ -57,7 +57,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
+ ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
if (ret)
return ret;
@@ -93,11 +93,6 @@ static void bochs_crtc_commit(struct drm_crtc *crtc)
{
}
-static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
-{
-}
-
static int bochs_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -120,7 +115,6 @@ static int bochs_crtc_page_flip(struct drm_crtc *crtc,
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs bochs_crtc_funcs = {
- .gamma_set = bochs_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = bochs_crtc_page_flip,
@@ -140,7 +134,6 @@ static void bochs_crtc_init(struct drm_device *dev)
struct drm_crtc *crtc = &bochs->crtc;
drm_crtc_init(dev, crtc, &bochs_crtc_funcs);
- drm_mode_crtc_set_gamma_size(crtc, 256);
drm_crtc_helper_add(crtc, &bochs_helper_funcs);
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index d812ad014..6cf912c45 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -212,6 +212,8 @@ struct ttm_bo_driver bochs_bo_driver = {
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int bochs_mm_init(struct bochs_device *bochs)
@@ -456,7 +458,7 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
struct drm_gem_object *obj;
struct bochs_bo *bo;
- obj = drm_gem_object_lookup(dev, file, handle);
+ obj = drm_gem_object_lookup(file, handle);
if (obj == NULL)
return -ENOENT;
@@ -518,7 +520,7 @@ bochs_user_framebuffer_create(struct drm_device *dev,
if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
return ERR_PTR(-ENOENT);
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 27e2022de..8f7423f18 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -7,6 +7,16 @@ config DRM_BRIDGE
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
+config DRM_ANALOGIX_ANX78XX
+ tristate "Analogix ANX78XX bridge"
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ ---help---
+ ANX78XX is an ultra-low Full-HD SlimPort transmitter
+ designed for portable devices. The ANX78XX transforms
+ the HDMI output of an application processor to MyDP
+ or DisplayPort.
+
config DRM_DW_HDMI
tristate
select DRM_KMS_HELPER
@@ -40,4 +50,6 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+source "drivers/gpu/drm/bridge/analogix/Kconfig"
+
endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index f13c33d67..96b13b30e 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,6 +1,8 @@
ccflags-y := -Iinclude/drm
+obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
new file mode 100644
index 000000000..d087b054c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -0,0 +1,1514 @@
+/*
+ * Copyright(c) 2016, Analogix Semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Based on anx7808 driver obtained from chromeos with copyright:
+ * Copyright(c) 2013, Google Inc.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+
+#include "analogix-anx78xx.h"
+
+#define I2C_NUM_ADDRESSES 5
+#define I2C_IDX_TX_P0 0
+#define I2C_IDX_TX_P1 1
+#define I2C_IDX_TX_P2 2
+#define I2C_IDX_RX_P0 3
+#define I2C_IDX_RX_P1 4
+
+#define XTAL_CLK 270 /* 27M */
+#define AUX_CH_BUFFER_SIZE 16
+#define AUX_WAIT_TIMEOUT_MS 15
+
+static const u8 anx78xx_i2c_addresses[] = {
+ [I2C_IDX_TX_P0] = TX_P0,
+ [I2C_IDX_TX_P1] = TX_P1,
+ [I2C_IDX_TX_P2] = TX_P2,
+ [I2C_IDX_RX_P0] = RX_P0,
+ [I2C_IDX_RX_P1] = RX_P1,
+};
+
+struct anx78xx_platform_data {
+ struct regulator *dvdd10;
+ struct gpio_desc *gpiod_hpd;
+ struct gpio_desc *gpiod_pd;
+ struct gpio_desc *gpiod_reset;
+
+ int hpd_irq;
+ int intp_irq;
+};
+
+struct anx78xx {
+ struct drm_dp_aux aux;
+ struct drm_bridge bridge;
+ struct i2c_client *client;
+ struct edid *edid;
+ struct drm_connector connector;
+ struct drm_dp_link link;
+ struct anx78xx_platform_data pdata;
+ struct mutex lock;
+
+ /*
+ * I2C Slave addresses of ANX7814 are mapped as TX_P0, TX_P1, TX_P2,
+ * RX_P0 and RX_P1.
+ */
+ struct i2c_client *i2c_dummy[I2C_NUM_ADDRESSES];
+ struct regmap *map[I2C_NUM_ADDRESSES];
+
+ u16 chipid;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ bool powered;
+};
+
+static inline struct anx78xx *connector_to_anx78xx(struct drm_connector *c)
+{
+ return container_of(c, struct anx78xx, connector);
+}
+
+static inline struct anx78xx *bridge_to_anx78xx(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct anx78xx, bridge);
+}
+
+static int anx78xx_set_bits(struct regmap *map, u8 reg, u8 mask)
+{
+ return regmap_update_bits(map, reg, mask, mask);
+}
+
+static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask)
+{
+ return regmap_update_bits(map, reg, mask, 0);
+}
+
+static bool anx78xx_aux_op_finished(struct anx78xx *anx78xx)
+{
+ unsigned int value;
+ int err;
+
+ err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
+ &value);
+ if (err < 0)
+ return false;
+
+ return (value & SP_AUX_EN) == 0;
+}
+
+static int anx78xx_aux_wait(struct anx78xx *anx78xx)
+{
+ unsigned long timeout;
+ unsigned int status;
+ int err;
+
+ timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1;
+
+ while (!anx78xx_aux_op_finished(anx78xx)) {
+ if (time_after(jiffies, timeout)) {
+ if (!anx78xx_aux_op_finished(anx78xx)) {
+ DRM_ERROR("Timed out waiting AUX to finish\n");
+ return -ETIMEDOUT;
+ }
+
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ /* Read the AUX channel access status */
+ err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_CH_STATUS_REG,
+ &status);
+ if (err < 0) {
+ DRM_ERROR("Failed to read from AUX channel: %d\n", err);
+ return err;
+ }
+
+ if (status & SP_AUX_STATUS) {
+ DRM_ERROR("Failed to wait for AUX channel (status: %02x)\n",
+ status);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int anx78xx_aux_address(struct anx78xx *anx78xx, unsigned int addr)
+{
+ int err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_7_0_REG,
+ addr & 0xff);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_15_8_REG,
+ (addr & 0xff00) >> 8);
+ if (err)
+ return err;
+
+ /*
+ * DP AUX CH Address Register #2, only update bits[3:0]
+ * [7:4] RESERVED
+ * [3:0] AUX_ADDR[19:16], Register control AUX CH address.
+ */
+ err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_AUX_ADDR_19_16_REG,
+ SP_AUX_ADDR_19_16_MASK,
+ (addr & 0xf0000) >> 16);
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static ssize_t anx78xx_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct anx78xx *anx78xx = container_of(aux, struct anx78xx, aux);
+ u8 ctrl1 = msg->request;
+ u8 ctrl2 = SP_AUX_EN;
+ u8 *buffer = msg->buffer;
+ int err;
+
+ /* The DP AUX transmit and receive buffer has 16 bytes. */
+ if (WARN_ON(msg->size > AUX_CH_BUFFER_SIZE))
+ return -E2BIG;
+
+ /* Zero-sized messages specify address-only transactions. */
+ if (msg->size < 1)
+ ctrl2 |= SP_ADDR_ONLY;
+ else /* For non-zero-sized set the length field. */
+ ctrl1 |= (msg->size - 1) << SP_AUX_LENGTH_SHIFT;
+
+ if ((msg->request & DP_AUX_I2C_READ) == 0) {
+ /* When WRITE | MOT write values to data buffer */
+ err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_BUF_DATA0_REG, buffer,
+ msg->size);
+ if (err)
+ return err;
+ }
+
+ /* Write address and request */
+ err = anx78xx_aux_address(anx78xx, msg->address);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL1_REG,
+ ctrl1);
+ if (err)
+ return err;
+
+ /* Start transaction */
+ err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY |
+ SP_AUX_EN, ctrl2);
+ if (err)
+ return err;
+
+ err = anx78xx_aux_wait(anx78xx);
+ if (err)
+ return err;
+
+ msg->reply = DP_AUX_I2C_REPLY_ACK;
+
+ if ((msg->size > 0) && (msg->request & DP_AUX_I2C_READ)) {
+ /* Read values from data buffer */
+ err = regmap_bulk_read(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_BUF_DATA0_REG, buffer,
+ msg->size);
+ if (err)
+ return err;
+ }
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_AUX_CH_CTRL2_REG, SP_ADDR_ONLY);
+ if (err)
+ return err;
+
+ return msg->size;
+}
+
+static int anx78xx_set_hpd(struct anx78xx *anx78xx)
+{
+ int err;
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+ SP_TMDS_CTRL_BASE + 7, SP_PD_RT);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
+ SP_HPD_OUT);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int anx78xx_clear_hpd(struct anx78xx *anx78xx)
+{
+ int err;
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG,
+ SP_HPD_OUT);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+ SP_TMDS_CTRL_BASE + 7, SP_PD_RT);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct reg_sequence tmds_phy_initialization[] = {
+ { SP_TMDS_CTRL_BASE + 1, 0x90 },
+ { SP_TMDS_CTRL_BASE + 2, 0xa9 },
+ { SP_TMDS_CTRL_BASE + 6, 0x92 },
+ { SP_TMDS_CTRL_BASE + 7, 0x80 },
+ { SP_TMDS_CTRL_BASE + 20, 0xf2 },
+ { SP_TMDS_CTRL_BASE + 22, 0xc4 },
+ { SP_TMDS_CTRL_BASE + 23, 0x18 },
+};
+
+static int anx78xx_rx_initialization(struct anx78xx *anx78xx)
+{
+ int err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
+ SP_AUD_MUTE | SP_VID_MUTE);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_CHIP_CTRL_REG,
+ SP_MAN_HDMI5V_DET | SP_PLLLOCK_CKDT_EN |
+ SP_DIGITAL_CKDT_EN);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+ SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST |
+ SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST);
+ if (err)
+ return err;
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+ SP_SOFTWARE_RESET1_REG, SP_HDCP_MAN_RST |
+ SP_SW_MAN_RST | SP_TMDS_RST | SP_VIDEO_RST);
+ if (err)
+ return err;
+
+ /* Sync detect change, GP set mute */
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+ SP_AUD_EXCEPTION_ENABLE_BASE + 1, BIT(5) |
+ BIT(6));
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+ SP_AUD_EXCEPTION_ENABLE_BASE + 3,
+ SP_AEC_EN21);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUDVID_CTRL_REG,
+ SP_AVC_EN | SP_AAC_OE | SP_AAC_EN);
+ if (err)
+ return err;
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0],
+ SP_SYSTEM_POWER_DOWN1_REG, SP_PWDN_CTRL);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0],
+ SP_VID_DATA_RANGE_CTRL_REG, SP_R2Y_INPUT_LIMIT);
+ if (err)
+ return err;
+
+ /* Enable DDC stretch */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_EXTRA_I2C_DEV_ADDR_REG, SP_I2C_EXTRA_ADDR);
+ if (err)
+ return err;
+
+ /* TMDS phy initialization */
+ err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_RX_P0],
+ tmds_phy_initialization,
+ ARRAY_SIZE(tmds_phy_initialization));
+ if (err)
+ return err;
+
+ err = anx78xx_clear_hpd(anx78xx);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const u8 dp_tx_output_precise_tune_bits[20] = {
+ 0x01, 0x03, 0x07, 0x7f, 0x71, 0x6b, 0x7f,
+ 0x73, 0x7f, 0x7f, 0x00, 0x00, 0x00, 0x00,
+ 0x0c, 0x42, 0x1e, 0x3e, 0x72, 0x7e,
+};
+
+static int anx78xx_link_phy_initialization(struct anx78xx *anx78xx)
+{
+ int err;
+
+ /*
+ * REVISIT : It is writing to a RESERVED bits in Analog Control 0
+ * register.
+ */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_CTRL0_REG,
+ 0x02);
+ if (err)
+ return err;
+
+ /*
+ * Write DP TX output emphasis precise tune bits.
+ */
+ err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P1],
+ SP_DP_TX_LT_CTRL0_REG,
+ dp_tx_output_precise_tune_bits,
+ ARRAY_SIZE(dp_tx_output_precise_tune_bits));
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int anx78xx_xtal_clk_sel(struct anx78xx *anx78xx)
+{
+ unsigned int value;
+ int err;
+
+ err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P2],
+ SP_ANALOG_DEBUG2_REG,
+ SP_XTAL_FRQ | SP_FORCE_SW_OFF_BYPASS,
+ SP_XTAL_FRQ_27M);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL3_REG,
+ XTAL_CLK & SP_WAIT_COUNTER_7_0_MASK);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL4_REG,
+ ((XTAL_CLK & 0xff00) >> 2) | (XTAL_CLK / 10));
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+ SP_I2C_GEN_10US_TIMER0_REG, XTAL_CLK & 0xff);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+ SP_I2C_GEN_10US_TIMER1_REG,
+ (XTAL_CLK & 0xff00) >> 8);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_MISC_CTRL_REG,
+ XTAL_CLK / 10 - 1);
+ if (err)
+ return err;
+
+ err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
+ SP_HDMI_US_TIMER_CTRL_REG,
+ &value);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_RX_P0],
+ SP_HDMI_US_TIMER_CTRL_REG,
+ (value & SP_MS_TIMER_MARGIN_10_8_MASK) |
+ ((((XTAL_CLK / 10) >> 1) - 2) << 3));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static const struct reg_sequence otp_key_protect[] = {
+ { SP_OTP_KEY_PROTECT1_REG, SP_OTP_PSW1 },
+ { SP_OTP_KEY_PROTECT2_REG, SP_OTP_PSW2 },
+ { SP_OTP_KEY_PROTECT3_REG, SP_OTP_PSW3 },
+};
+
+static int anx78xx_tx_initialization(struct anx78xx *anx78xx)
+{
+ int err;
+
+ /* Set terminal resistor to 50 ohm */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG,
+ 0x30);
+ if (err)
+ return err;
+
+ /* Enable aux double diff output */
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_AUX_CH_CTRL2_REG, 0x08);
+ if (err)
+ return err;
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_HDCP_CTRL_REG, SP_AUTO_EN |
+ SP_AUTO_START);
+ if (err)
+ return err;
+
+ err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_TX_P0],
+ otp_key_protect,
+ ARRAY_SIZE(otp_key_protect));
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_HDCP_KEY_COMMAND_REG, SP_DISABLE_SYNC_HDCP);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL8_REG,
+ SP_VID_VRES_TH);
+ if (err)
+ return err;
+
+ /*
+ * DP HDCP auto authentication wait timer (when downstream starts to
+ * auth, DP side will wait for this period then do auth automatically)
+ */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_AUTO_TIMER_REG,
+ 0x00);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_HDCP_CTRL_REG, SP_LINK_POLLING);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_LINK_DEBUG_CTRL_REG, SP_M_VID_DEBUG);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2],
+ SP_ANALOG_DEBUG2_REG, SP_POWERON_TIME_1P5MS);
+ if (err)
+ return err;
+
+ err = anx78xx_xtal_clk_sel(anx78xx);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_DEFER_CTRL_REG,
+ SP_DEFER_CTRL_EN | 0x0c);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_POLLING_CTRL_REG,
+ SP_AUTO_POLLING_DISABLE);
+ if (err)
+ return err;
+
+ /*
+ * Short the link integrity check timer to speed up bstatus
+ * polling for HDCP CTS item 1A-07
+ */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+ SP_HDCP_LINK_CHECK_TIMER_REG, 0x1d);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_MISC_CTRL_REG, SP_EQ_TRAINING_LOOP);
+ if (err)
+ return err;
+
+ /* Power down the main link by default */
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+ if (err)
+ return err;
+
+ err = anx78xx_link_phy_initialization(anx78xx);
+ if (err)
+ return err;
+
+ /* Gen m_clk with downspreading */
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_M_CALCULATION_CTRL_REG, SP_M_GEN_CLK_SEL);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int anx78xx_enable_interrupts(struct anx78xx *anx78xx)
+{
+ int err;
+
+ /*
+ * BIT0: INT pin assertion polarity: 1 = assert high
+ * BIT1: INT pin output type: 0 = push/pull
+ */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_INT_CTRL_REG, 0x01);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
+ SP_COMMON_INT_MASK4_REG, SP_HPD_LOST | SP_HPD_PLUG);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_MASK1_REG,
+ SP_TRAINING_FINISH);
+ if (err)
+ return err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_MASK1_REG,
+ SP_CKDT_CHG | SP_SCDT_CHG);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void anx78xx_poweron(struct anx78xx *anx78xx)
+{
+ struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+ int err;
+
+ if (WARN_ON(anx78xx->powered))
+ return;
+
+ if (pdata->dvdd10) {
+ err = regulator_enable(pdata->dvdd10);
+ if (err) {
+ DRM_ERROR("Failed to enable DVDD10 regulator: %d\n",
+ err);
+ return;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+ usleep_range(1000, 2000);
+
+ gpiod_set_value_cansleep(pdata->gpiod_pd, 0);
+ usleep_range(1000, 2000);
+
+ gpiod_set_value_cansleep(pdata->gpiod_reset, 0);
+
+ /* Power on registers module */
+ anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+ SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+ anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+ SP_REGISTER_PD | SP_TOTAL_PD);
+
+ anx78xx->powered = true;
+}
+
+static void anx78xx_poweroff(struct anx78xx *anx78xx)
+{
+ struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+ int err;
+
+ if (WARN_ON(!anx78xx->powered))
+ return;
+
+ gpiod_set_value_cansleep(pdata->gpiod_reset, 1);
+ usleep_range(1000, 2000);
+
+ gpiod_set_value_cansleep(pdata->gpiod_pd, 1);
+ usleep_range(1000, 2000);
+
+ if (pdata->dvdd10) {
+ err = regulator_disable(pdata->dvdd10);
+ if (err) {
+ DRM_ERROR("Failed to disable DVDD10 regulator: %d\n",
+ err);
+ return;
+ }
+
+ usleep_range(1000, 2000);
+ }
+
+ anx78xx->powered = false;
+}
+
+static int anx78xx_start(struct anx78xx *anx78xx)
+{
+ int err;
+
+ /* Power on all modules */
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+ SP_POWERDOWN_CTRL_REG,
+ SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD |
+ SP_LINK_PD);
+
+ err = anx78xx_enable_interrupts(anx78xx);
+ if (err) {
+ DRM_ERROR("Failed to enable interrupts: %d\n", err);
+ goto err_poweroff;
+ }
+
+ err = anx78xx_rx_initialization(anx78xx);
+ if (err) {
+ DRM_ERROR("Failed receiver initialization: %d\n", err);
+ goto err_poweroff;
+ }
+
+ err = anx78xx_tx_initialization(anx78xx);
+ if (err) {
+ DRM_ERROR("Failed transmitter initialization: %d\n", err);
+ goto err_poweroff;
+ }
+
+ /*
+ * This delay seems to help keep the hardware in a good state. Without
+ * it, there are times where it fails silently.
+ */
+ usleep_range(10000, 15000);
+
+ return 0;
+
+err_poweroff:
+ DRM_ERROR("Failed SlimPort transmitter initialization: %d\n", err);
+ anx78xx_poweroff(anx78xx);
+
+ return err;
+}
+
+static int anx78xx_init_pdata(struct anx78xx *anx78xx)
+{
+ struct anx78xx_platform_data *pdata = &anx78xx->pdata;
+ struct device *dev = &anx78xx->client->dev;
+
+ /* 1.0V digital core power regulator */
+ pdata->dvdd10 = devm_regulator_get(dev, "dvdd10");
+ if (IS_ERR(pdata->dvdd10)) {
+ DRM_ERROR("DVDD10 regulator not found\n");
+ return PTR_ERR(pdata->dvdd10);
+ }
+
+ /* GPIO for HPD */
+ pdata->gpiod_hpd = devm_gpiod_get(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(pdata->gpiod_hpd))
+ return PTR_ERR(pdata->gpiod_hpd);
+
+ /* GPIO for chip power down */
+ pdata->gpiod_pd = devm_gpiod_get(dev, "pd", GPIOD_OUT_HIGH);
+ if (IS_ERR(pdata->gpiod_pd))
+ return PTR_ERR(pdata->gpiod_pd);
+
+ /* GPIO for chip reset */
+ pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+
+ return PTR_ERR_OR_ZERO(pdata->gpiod_reset);
+}
+
+static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
+{
+ u8 dp_bw, value;
+ int err;
+
+ err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG,
+ 0x0);
+ if (err)
+ return err;
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+ SP_POWERDOWN_CTRL_REG,
+ SP_TOTAL_PD);
+ if (err)
+ return err;
+
+ err = drm_dp_dpcd_readb(&anx78xx->aux, DP_MAX_LINK_RATE, &dp_bw);
+ if (err < 0)
+ return err;
+
+ switch (dp_bw) {
+ case DP_LINK_BW_1_62:
+ case DP_LINK_BW_2_7:
+ case DP_LINK_BW_5_4:
+ break;
+
+ default:
+ DRM_DEBUG_KMS("DP bandwidth (%#02x) not supported\n", dp_bw);
+ return -EINVAL;
+ }
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+ SP_VIDEO_MUTE);
+ if (err)
+ return err;
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2],
+ SP_VID_CTRL1_REG, SP_VIDEO_EN);
+ if (err)
+ return err;
+
+ /* Get DPCD info */
+ err = drm_dp_dpcd_read(&anx78xx->aux, DP_DPCD_REV,
+ &anx78xx->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (err < 0) {
+ DRM_ERROR("Failed to read DPCD: %d\n", err);
+ return err;
+ }
+
+ /* Clear channel x SERDES power down */
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_ANALOG_POWER_DOWN_REG, SP_CH0_PD);
+ if (err)
+ return err;
+
+ /* Check link capabilities */
+ err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link);
+ if (err < 0) {
+ DRM_ERROR("Failed to probe link capabilities: %d\n", err);
+ return err;
+ }
+
+ /* Power up the sink */
+ err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link);
+ if (err < 0) {
+ DRM_ERROR("Failed to power up DisplayPort link: %d\n", err);
+ return err;
+ }
+
+ /* Possibly enable downspread on the sink */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_DOWNSPREAD_CTRL1_REG, 0);
+ if (err)
+ return err;
+
+ if (anx78xx->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5) {
+ DRM_DEBUG("Enable downspread on the sink\n");
+ /* 4000PPM */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_DOWNSPREAD_CTRL1_REG, 8);
+ if (err)
+ return err;
+
+ err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL,
+ DP_SPREAD_AMP_0_5);
+ if (err < 0)
+ return err;
+ } else {
+ err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_DOWNSPREAD_CTRL, 0);
+ if (err < 0)
+ return err;
+ }
+
+ /* Set the lane count and the link rate on the sink */
+ if (drm_dp_enhanced_frame_cap(anx78xx->dpcd))
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_SYSTEM_CTRL_BASE + 4,
+ SP_ENHANCED_MODE);
+ else
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_SYSTEM_CTRL_BASE + 4,
+ SP_ENHANCED_MODE);
+ if (err)
+ return err;
+
+ value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate);
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
+ SP_DP_MAIN_LINK_BW_SET_REG, value);
+ if (err)
+ return err;
+
+ err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link);
+ if (err < 0) {
+ DRM_ERROR("Failed to configure DisplayPort link: %d\n", err);
+ return err;
+ }
+
+ /* Start training on the source */
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_LT_CTRL_REG,
+ SP_LT_EN);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int anx78xx_config_dp_output(struct anx78xx *anx78xx)
+{
+ int err;
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+ SP_VIDEO_MUTE);
+ if (err)
+ return err;
+
+ /* Enable DP output */
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG,
+ SP_VIDEO_EN);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int anx78xx_send_video_infoframe(struct anx78xx *anx78xx,
+ struct hdmi_avi_infoframe *frame)
+{
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ int err;
+
+ err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("Failed to pack AVI infoframe: %d\n", err);
+ return err;
+ }
+
+ err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN);
+ if (err)
+ return err;
+
+ err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P2],
+ SP_INFOFRAME_AVI_DB1_REG, buffer,
+ frame->length);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_UD);
+ if (err)
+ return err;
+
+ err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0],
+ SP_PACKET_SEND_CTRL_REG, SP_AVI_IF_EN);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int anx78xx_get_downstream_info(struct anx78xx *anx78xx)
+{
+ u8 value;
+ int err;
+
+ err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SINK_COUNT, &value);
+ if (err < 0) {
+ DRM_ERROR("Get sink count failed %d\n", err);
+ return err;
+ }
+
+ if (!DP_GET_SINK_COUNT(value)) {
+ DRM_ERROR("Downstream disconnected\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int anx78xx_get_modes(struct drm_connector *connector)
+{
+ struct anx78xx *anx78xx = connector_to_anx78xx(connector);
+ int err, num_modes = 0;
+
+ if (WARN_ON(!anx78xx->powered))
+ return 0;
+
+ if (anx78xx->edid)
+ return drm_add_edid_modes(connector, anx78xx->edid);
+
+ mutex_lock(&anx78xx->lock);
+
+ err = anx78xx_get_downstream_info(anx78xx);
+ if (err) {
+ DRM_ERROR("Failed to get downstream info: %d\n", err);
+ goto unlock;
+ }
+
+ anx78xx->edid = drm_get_edid(connector, &anx78xx->aux.ddc);
+ if (!anx78xx->edid) {
+ DRM_ERROR("Failed to read EDID\n");
+ goto unlock;
+ }
+
+ err = drm_mode_connector_update_edid_property(connector,
+ anx78xx->edid);
+ if (err) {
+ DRM_ERROR("Failed to update EDID property: %d\n", err);
+ goto unlock;
+ }
+
+ num_modes = drm_add_edid_modes(connector, anx78xx->edid);
+ /* Store the ELD */
+ drm_edid_to_eld(connector, anx78xx->edid);
+
+unlock:
+ mutex_unlock(&anx78xx->lock);
+
+ return num_modes;
+}
+
+static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector)
+{
+ struct anx78xx *anx78xx = connector_to_anx78xx(connector);
+
+ return anx78xx->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
+ .get_modes = anx78xx_get_modes,
+ .best_encoder = anx78xx_best_encoder,
+};
+
+static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
+ bool force)
+{
+ struct anx78xx *anx78xx = connector_to_anx78xx(connector);
+
+ if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static void anx78xx_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs anx78xx_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = anx78xx_detect,
+ .destroy = anx78xx_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int anx78xx_bridge_attach(struct drm_bridge *bridge)
+{
+ struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+ int err;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ /* Register aux channel */
+ anx78xx->aux.name = "DP-AUX";
+ anx78xx->aux.dev = &anx78xx->client->dev;
+ anx78xx->aux.transfer = anx78xx_aux_transfer;
+
+ err = drm_dp_aux_register(&anx78xx->aux);
+ if (err < 0) {
+ DRM_ERROR("Failed to register aux channel: %d\n", err);
+ return err;
+ }
+
+ err = drm_connector_init(bridge->dev, &anx78xx->connector,
+ &anx78xx_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (err) {
+ DRM_ERROR("Failed to initialize connector: %d\n", err);
+ return err;
+ }
+
+ drm_connector_helper_add(&anx78xx->connector,
+ &anx78xx_connector_helper_funcs);
+
+ err = drm_connector_register(&anx78xx->connector);
+ if (err) {
+ DRM_ERROR("Failed to register connector: %d\n", err);
+ return err;
+ }
+
+ anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ err = drm_mode_connector_attach_encoder(&anx78xx->connector,
+ bridge->encoder);
+ if (err) {
+ DRM_ERROR("Failed to link up connector to encoder: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static bool anx78xx_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return false;
+
+ /* Max 1200p at 5.4 Ghz, one lane */
+ if (mode->clock > 154000)
+ return false;
+
+ return true;
+}
+
+static void anx78xx_bridge_disable(struct drm_bridge *bridge)
+{
+ struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+
+ /* Power off all modules except configuration registers access */
+ anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG,
+ SP_HDCP_PD | SP_AUDIO_PD | SP_VIDEO_PD | SP_LINK_PD);
+}
+
+static void anx78xx_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+ struct hdmi_avi_infoframe frame;
+ int err;
+
+ if (WARN_ON(!anx78xx->powered))
+ return;
+
+ mutex_lock(&anx78xx->lock);
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, adjusted_mode);
+ if (err) {
+ DRM_ERROR("Failed to setup AVI infoframe: %d\n", err);
+ goto unlock;
+ }
+
+ err = anx78xx_send_video_infoframe(anx78xx, &frame);
+ if (err)
+ DRM_ERROR("Failed to send AVI infoframe: %d\n", err);
+
+unlock:
+ mutex_unlock(&anx78xx->lock);
+}
+
+static void anx78xx_bridge_enable(struct drm_bridge *bridge)
+{
+ struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
+ int err;
+
+ err = anx78xx_start(anx78xx);
+ if (err) {
+ DRM_ERROR("Failed to initialize: %d\n", err);
+ return;
+ }
+
+ err = anx78xx_set_hpd(anx78xx);
+ if (err)
+ DRM_ERROR("Failed to set HPD: %d\n", err);
+}
+
+static const struct drm_bridge_funcs anx78xx_bridge_funcs = {
+ .attach = anx78xx_bridge_attach,
+ .mode_fixup = anx78xx_bridge_mode_fixup,
+ .disable = anx78xx_bridge_disable,
+ .mode_set = anx78xx_bridge_mode_set,
+ .enable = anx78xx_bridge_enable,
+};
+
+static irqreturn_t anx78xx_hpd_threaded_handler(int irq, void *data)
+{
+ struct anx78xx *anx78xx = data;
+ int err;
+
+ if (anx78xx->powered)
+ return IRQ_HANDLED;
+
+ mutex_lock(&anx78xx->lock);
+
+ /* Cable is pulled, power on the chip */
+ anx78xx_poweron(anx78xx);
+
+ err = anx78xx_enable_interrupts(anx78xx);
+ if (err)
+ DRM_ERROR("Failed to enable interrupts: %d\n", err);
+
+ mutex_unlock(&anx78xx->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int anx78xx_handle_dp_int_1(struct anx78xx *anx78xx, u8 irq)
+{
+ int err;
+
+ DRM_DEBUG_KMS("Handle DP interrupt 1: %02x\n", irq);
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
+ irq);
+ if (err)
+ return err;
+
+ if (irq & SP_TRAINING_FINISH) {
+ DRM_DEBUG_KMS("IRQ: hardware link training finished\n");
+ err = anx78xx_config_dp_output(anx78xx);
+ }
+
+ return err;
+}
+
+static bool anx78xx_handle_common_int_4(struct anx78xx *anx78xx, u8 irq)
+{
+ bool event = false;
+ int err;
+
+ DRM_DEBUG_KMS("Handle common interrupt 4: %02x\n", irq);
+
+ err = regmap_write(anx78xx->map[I2C_IDX_TX_P2],
+ SP_COMMON_INT_STATUS4_REG, irq);
+ if (err) {
+ DRM_ERROR("Failed to write SP_COMMON_INT_STATUS4 %d\n", err);
+ return event;
+ }
+
+ if (irq & SP_HPD_LOST) {
+ DRM_DEBUG_KMS("IRQ: Hot plug detect - cable is pulled out\n");
+ event = true;
+ anx78xx_poweroff(anx78xx);
+ /* Free cached EDID */
+ kfree(anx78xx->edid);
+ anx78xx->edid = NULL;
+ } else if (irq & SP_HPD_PLUG) {
+ DRM_DEBUG_KMS("IRQ: Hot plug detect - cable plug\n");
+ event = true;
+ }
+
+ return event;
+}
+
+static void anx78xx_handle_hdmi_int_1(struct anx78xx *anx78xx, u8 irq)
+{
+ unsigned int value;
+ int err;
+
+ DRM_DEBUG_KMS("Handle HDMI interrupt 1: %02x\n", irq);
+
+ err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
+ irq);
+ if (err) {
+ DRM_ERROR("Write HDMI int 1 failed: %d\n", err);
+ return;
+ }
+
+ if ((irq & SP_CKDT_CHG) || (irq & SP_SCDT_CHG)) {
+ DRM_DEBUG_KMS("IRQ: HDMI input detected\n");
+
+ err = regmap_read(anx78xx->map[I2C_IDX_RX_P0],
+ SP_SYSTEM_STATUS_REG, &value);
+ if (err) {
+ DRM_ERROR("Read system status reg failed: %d\n", err);
+ return;
+ }
+
+ if (!(value & SP_TMDS_CLOCK_DET)) {
+ DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI clock ***\n");
+ return;
+ }
+
+ if (!(value & SP_TMDS_DE_DET)) {
+ DRM_DEBUG_KMS("IRQ: *** Waiting for HDMI signal ***\n");
+ return;
+ }
+
+ err = anx78xx_dp_link_training(anx78xx);
+ if (err)
+ DRM_ERROR("Failed to start link training: %d\n", err);
+ }
+}
+
+static irqreturn_t anx78xx_intp_threaded_handler(int unused, void *data)
+{
+ struct anx78xx *anx78xx = data;
+ bool event = false;
+ unsigned int irq;
+ int err;
+
+ mutex_lock(&anx78xx->lock);
+
+ err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG,
+ &irq);
+ if (err) {
+ DRM_ERROR("Failed to read DP interrupt 1 status: %d\n", err);
+ goto unlock;
+ }
+
+ if (irq)
+ anx78xx_handle_dp_int_1(anx78xx, irq);
+
+ err = regmap_read(anx78xx->map[I2C_IDX_TX_P2],
+ SP_COMMON_INT_STATUS4_REG, &irq);
+ if (err) {
+ DRM_ERROR("Failed to read common interrupt 4 status: %d\n",
+ err);
+ goto unlock;
+ }
+
+ if (irq)
+ event = anx78xx_handle_common_int_4(anx78xx, irq);
+
+ /* Make sure we are still powered after handle HPD events */
+ if (!anx78xx->powered)
+ goto unlock;
+
+ err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG,
+ &irq);
+ if (err) {
+ DRM_ERROR("Failed to read HDMI int 1 status: %d\n", err);
+ goto unlock;
+ }
+
+ if (irq)
+ anx78xx_handle_hdmi_int_1(anx78xx, irq);
+
+unlock:
+ mutex_unlock(&anx78xx->lock);
+
+ if (event)
+ drm_helper_hpd_irq_event(anx78xx->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static void unregister_i2c_dummy_clients(struct anx78xx *anx78xx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(anx78xx->i2c_dummy); i++)
+ if (anx78xx->i2c_dummy[i])
+ i2c_unregister_device(anx78xx->i2c_dummy[i]);
+}
+
+static const struct regmap_config anx78xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const u16 anx78xx_chipid_list[] = {
+ 0x7812,
+ 0x7814,
+ 0x7818,
+};
+
+static int anx78xx_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct anx78xx *anx78xx;
+ struct anx78xx_platform_data *pdata;
+ unsigned int i, idl, idh, version;
+ bool found = false;
+ int err;
+
+ anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
+ if (!anx78xx)
+ return -ENOMEM;
+
+ pdata = &anx78xx->pdata;
+
+ mutex_init(&anx78xx->lock);
+
+#if IS_ENABLED(CONFIG_OF)
+ anx78xx->bridge.of_node = client->dev.of_node;
+#endif
+
+ anx78xx->client = client;
+ i2c_set_clientdata(client, anx78xx);
+
+ err = anx78xx_init_pdata(anx78xx);
+ if (err) {
+ DRM_ERROR("Failed to initialize pdata: %d\n", err);
+ return err;
+ }
+
+ pdata->hpd_irq = gpiod_to_irq(pdata->gpiod_hpd);
+ if (pdata->hpd_irq < 0) {
+ DRM_ERROR("Failed to get HPD IRQ: %d\n", pdata->hpd_irq);
+ return -ENODEV;
+ }
+
+ pdata->intp_irq = client->irq;
+ if (!pdata->intp_irq) {
+ DRM_ERROR("Failed to get CABLE_DET and INTP IRQ\n");
+ return -ENODEV;
+ }
+
+ /* Map slave addresses of ANX7814 */
+ for (i = 0; i < I2C_NUM_ADDRESSES; i++) {
+ anx78xx->i2c_dummy[i] = i2c_new_dummy(client->adapter,
+ anx78xx_i2c_addresses[i] >> 1);
+ if (!anx78xx->i2c_dummy[i]) {
+ err = -ENOMEM;
+ DRM_ERROR("Failed to reserve I2C bus %02x\n",
+ anx78xx_i2c_addresses[i]);
+ goto err_unregister_i2c;
+ }
+
+ anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i],
+ &anx78xx_regmap_config);
+ if (IS_ERR(anx78xx->map[i])) {
+ err = PTR_ERR(anx78xx->map[i]);
+ DRM_ERROR("Failed regmap initialization %02x\n",
+ anx78xx_i2c_addresses[i]);
+ goto err_unregister_i2c;
+ }
+ }
+
+ /* Look for supported chip ID */
+ anx78xx_poweron(anx78xx);
+
+ err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDL_REG,
+ &idl);
+ if (err)
+ goto err_poweroff;
+
+ err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDH_REG,
+ &idh);
+ if (err)
+ goto err_poweroff;
+
+ anx78xx->chipid = (u8)idl | ((u8)idh << 8);
+
+ err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_VERSION_REG,
+ &version);
+ if (err)
+ goto err_poweroff;
+
+ for (i = 0; i < ARRAY_SIZE(anx78xx_chipid_list); i++) {
+ if (anx78xx->chipid == anx78xx_chipid_list[i]) {
+ DRM_INFO("Found ANX%x (ver. %d) SlimPort Transmitter\n",
+ anx78xx->chipid, version);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("ANX%x (ver. %d) not supported by this driver\n",
+ anx78xx->chipid, version);
+ err = -ENODEV;
+ goto err_poweroff;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, pdata->hpd_irq, NULL,
+ anx78xx_hpd_threaded_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "anx78xx-hpd", anx78xx);
+ if (err) {
+ DRM_ERROR("Failed to request CABLE_DET threaded IRQ: %d\n",
+ err);
+ goto err_poweroff;
+ }
+
+ err = devm_request_threaded_irq(&client->dev, pdata->intp_irq, NULL,
+ anx78xx_intp_threaded_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "anx78xx-intp", anx78xx);
+ if (err) {
+ DRM_ERROR("Failed to request INTP threaded IRQ: %d\n", err);
+ goto err_poweroff;
+ }
+
+ anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
+
+ err = drm_bridge_add(&anx78xx->bridge);
+ if (err < 0) {
+ DRM_ERROR("Failed to add drm bridge: %d\n", err);
+ goto err_poweroff;
+ }
+
+ /* If cable is pulled out, just poweroff and wait for HPD event */
+ if (!gpiod_get_value(anx78xx->pdata.gpiod_hpd))
+ anx78xx_poweroff(anx78xx);
+
+ return 0;
+
+err_poweroff:
+ anx78xx_poweroff(anx78xx);
+
+err_unregister_i2c:
+ unregister_i2c_dummy_clients(anx78xx);
+ return err;
+}
+
+static int anx78xx_i2c_remove(struct i2c_client *client)
+{
+ struct anx78xx *anx78xx = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&anx78xx->bridge);
+
+ unregister_i2c_dummy_clients(anx78xx);
+
+ kfree(anx78xx->edid);
+
+ return 0;
+}
+
+static const struct i2c_device_id anx78xx_id[] = {
+ { "anx7814", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, anx78xx_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id anx78xx_match_table[] = {
+ { .compatible = "analogix,anx7814", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, anx78xx_match_table);
+#endif
+
+static struct i2c_driver anx78xx_driver = {
+ .driver = {
+ .name = "anx7814",
+ .of_match_table = of_match_ptr(anx78xx_match_table),
+ },
+ .probe = anx78xx_i2c_probe,
+ .remove = anx78xx_i2c_remove,
+ .id_table = anx78xx_id,
+};
+module_i2c_driver(anx78xx_driver);
+
+MODULE_DESCRIPTION("ANX78xx SlimPort Transmitter driver");
+MODULE_AUTHOR("Enric Balletbo i Serra <enric.balletbo@collabora.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h
new file mode 100644
index 000000000..38753c870
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.h
@@ -0,0 +1,719 @@
+/*
+ * Copyright(c) 2016, Analogix Semiconductor. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ANX78xx_H
+#define __ANX78xx_H
+
+#define TX_P0 0x70
+#define TX_P1 0x7a
+#define TX_P2 0x72
+
+#define RX_P0 0x7e
+#define RX_P1 0x80
+
+/***************************************************************/
+/* Register definition of device address 0x7e */
+/***************************************************************/
+
+/*
+ * System Control and Status
+ */
+
+/* Software Reset Register 1 */
+#define SP_SOFTWARE_RESET1_REG 0x11
+#define SP_VIDEO_RST BIT(4)
+#define SP_HDCP_MAN_RST BIT(2)
+#define SP_TMDS_RST BIT(1)
+#define SP_SW_MAN_RST BIT(0)
+
+/* System Status Register */
+#define SP_SYSTEM_STATUS_REG 0x14
+#define SP_TMDS_CLOCK_DET BIT(1)
+#define SP_TMDS_DE_DET BIT(0)
+
+/* HDMI Status Register */
+#define SP_HDMI_STATUS_REG 0x15
+#define SP_HDMI_AUD_LAYOUT BIT(3)
+#define SP_HDMI_DET BIT(0)
+# define SP_DVI_MODE 0
+# define SP_HDMI_MODE 1
+
+/* HDMI Mute Control Register */
+#define SP_HDMI_MUTE_CTRL_REG 0x16
+#define SP_AUD_MUTE BIT(1)
+#define SP_VID_MUTE BIT(0)
+
+/* System Power Down Register 1 */
+#define SP_SYSTEM_POWER_DOWN1_REG 0x18
+#define SP_PWDN_CTRL BIT(0)
+
+/*
+ * Audio and Video Auto Control
+ */
+
+/* Auto Audio and Video Control register */
+#define SP_AUDVID_CTRL_REG 0x20
+#define SP_AVC_OE BIT(7)
+#define SP_AAC_OE BIT(6)
+#define SP_AVC_EN BIT(1)
+#define SP_AAC_EN BIT(0)
+
+/* Audio Exception Enable Registers */
+#define SP_AUD_EXCEPTION_ENABLE_BASE (0x24 - 1)
+/* Bits for Audio Exception Enable Register 3 */
+#define SP_AEC_EN21 BIT(5)
+
+/*
+ * Interrupt
+ */
+
+/* Interrupt Status Register 1 */
+#define SP_INT_STATUS1_REG 0x31
+/* Bits for Interrupt Status Register 1 */
+#define SP_HDMI_DVI BIT(7)
+#define SP_CKDT_CHG BIT(6)
+#define SP_SCDT_CHG BIT(5)
+#define SP_PCLK_CHG BIT(4)
+#define SP_PLL_UNLOCK BIT(3)
+#define SP_CABLE_PLUG_CHG BIT(2)
+#define SP_SET_MUTE BIT(1)
+#define SP_SW_INTR BIT(0)
+/* Bits for Interrupt Status Register 2 */
+#define SP_HDCP_ERR BIT(5)
+#define SP_AUDIO_SAMPLE_CHG BIT(0) /* undocumented */
+/* Bits for Interrupt Status Register 3 */
+#define SP_AUD_MODE_CHG BIT(0)
+/* Bits for Interrupt Status Register 5 */
+#define SP_AUDIO_RCV BIT(0)
+/* Bits for Interrupt Status Register 6 */
+#define SP_INT_STATUS6_REG 0x36
+#define SP_CTS_RCV BIT(7)
+#define SP_NEW_AUD_PKT BIT(4)
+#define SP_NEW_AVI_PKT BIT(1)
+#define SP_NEW_CP_PKT BIT(0)
+/* Bits for Interrupt Status Register 7 */
+#define SP_NO_VSI BIT(7)
+#define SP_NEW_VS BIT(4)
+
+/* Interrupt Mask 1 Status Registers */
+#define SP_INT_MASK1_REG 0x41
+
+/* HDMI US TIMER Control Register */
+#define SP_HDMI_US_TIMER_CTRL_REG 0x49
+#define SP_MS_TIMER_MARGIN_10_8_MASK 0x07
+
+/*
+ * TMDS Control
+ */
+
+/* TMDS Control Registers */
+#define SP_TMDS_CTRL_BASE (0x50 - 1)
+/* Bits for TMDS Control Register 7 */
+#define SP_PD_RT BIT(0)
+
+/*
+ * Video Control
+ */
+
+/* Video Status Register */
+#define SP_VIDEO_STATUS_REG 0x70
+#define SP_COLOR_DEPTH_MASK 0xf0
+#define SP_COLOR_DEPTH_SHIFT 4
+# define SP_COLOR_DEPTH_MODE_LEGACY 0x00
+# define SP_COLOR_DEPTH_MODE_24BIT 0x04
+# define SP_COLOR_DEPTH_MODE_30BIT 0x05
+# define SP_COLOR_DEPTH_MODE_36BIT 0x06
+# define SP_COLOR_DEPTH_MODE_48BIT 0x07
+
+/* Video Data Range Control Register */
+#define SP_VID_DATA_RANGE_CTRL_REG 0x83
+#define SP_R2Y_INPUT_LIMIT BIT(1)
+
+/* Pixel Clock High Resolution Counter Registers */
+#define SP_PCLK_HIGHRES_CNT_BASE (0x8c - 1)
+
+/*
+ * Audio Control
+ */
+
+/* Number of Audio Channels Status Registers */
+#define SP_AUD_CH_STATUS_REG_NUM 6
+
+/* Audio IN S/PDIF Channel Status Registers */
+#define SP_AUD_SPDIF_CH_STATUS_BASE 0xc7
+
+/* Audio IN S/PDIF Channel Status Register 4 */
+#define SP_FS_FREQ_MASK 0x0f
+# define SP_FS_FREQ_44100HZ 0x00
+# define SP_FS_FREQ_48000HZ 0x02
+# define SP_FS_FREQ_32000HZ 0x03
+# define SP_FS_FREQ_88200HZ 0x08
+# define SP_FS_FREQ_96000HZ 0x0a
+# define SP_FS_FREQ_176400HZ 0x0c
+# define SP_FS_FREQ_192000HZ 0x0e
+
+/*
+ * Micellaneous Control Block
+ */
+
+/* CHIP Control Register */
+#define SP_CHIP_CTRL_REG 0xe3
+#define SP_MAN_HDMI5V_DET BIT(3)
+#define SP_PLLLOCK_CKDT_EN BIT(2)
+#define SP_ANALOG_CKDT_EN BIT(1)
+#define SP_DIGITAL_CKDT_EN BIT(0)
+
+/* Packet Receiving Status Register */
+#define SP_PACKET_RECEIVING_STATUS_REG 0xf3
+#define SP_AVI_RCVD BIT(5)
+#define SP_VSI_RCVD BIT(1)
+
+/***************************************************************/
+/* Register definition of device address 0x80 */
+/***************************************************************/
+
+/* HDCP BCAPS Shadow Register */
+#define SP_HDCP_BCAPS_SHADOW_REG 0x2a
+#define SP_BCAPS_REPEATER BIT(5)
+
+/* HDCP Status Register */
+#define SP_RX_HDCP_STATUS_REG 0x3f
+#define SP_AUTH_EN BIT(4)
+
+/*
+ * InfoFrame and Control Packet Registers
+ */
+
+/* AVI InfoFrame packet checksum */
+#define SP_AVI_INFOFRAME_CHECKSUM 0xa3
+
+/* AVI InfoFrame Registers */
+#define SP_AVI_INFOFRAME_DATA_BASE 0xa4
+
+#define SP_AVI_COLOR_F_MASK 0x60
+#define SP_AVI_COLOR_F_SHIFT 5
+
+/* Audio InfoFrame Registers */
+#define SP_AUD_INFOFRAME_DATA_BASE 0xc4
+#define SP_AUD_INFOFRAME_LAYOUT_MASK 0x0f
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet type code */
+#define SP_MPEG_VS_INFOFRAME_TYPE_REG 0xe0
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet length */
+#define SP_MPEG_VS_INFOFRAME_LEN_REG 0xe2
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet version number */
+#define SP_MPEG_VS_INFOFRAME_VER_REG 0xe1
+
+/* MPEG/HDMI Vendor Specific InfoFrame Packet content */
+#define SP_MPEG_VS_INFOFRAME_DATA_BASE 0xe4
+
+/* General Control Packet Register */
+#define SP_GENERAL_CTRL_PACKET_REG 0x9f
+#define SP_CLEAR_AVMUTE BIT(4)
+#define SP_SET_AVMUTE BIT(0)
+
+/***************************************************************/
+/* Register definition of device address 0x70 */
+/***************************************************************/
+
+/* HDCP Status Register */
+#define SP_TX_HDCP_STATUS_REG 0x00
+#define SP_AUTH_FAIL BIT(5)
+#define SP_AUTHEN_PASS BIT(1)
+
+/* HDCP Control Register 0 */
+#define SP_HDCP_CTRL0_REG 0x01
+#define SP_RX_REPEATER BIT(6)
+#define SP_RE_AUTH BIT(5)
+#define SP_SW_AUTH_OK BIT(4)
+#define SP_HARD_AUTH_EN BIT(3)
+#define SP_HDCP_ENC_EN BIT(2)
+#define SP_BKSV_SRM_PASS BIT(1)
+#define SP_KSVLIST_VLD BIT(0)
+/* HDCP Function Enabled */
+#define SP_HDCP_FUNCTION_ENABLED (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* HDCP Receiver BSTATUS Register 0 */
+#define SP_HDCP_RX_BSTATUS0_REG 0x1b
+/* HDCP Receiver BSTATUS Register 1 */
+#define SP_HDCP_RX_BSTATUS1_REG 0x1c
+
+/* HDCP Embedded "Blue Screen" Content Registers */
+#define SP_HDCP_VID0_BLUE_SCREEN_REG 0x2c
+#define SP_HDCP_VID1_BLUE_SCREEN_REG 0x2d
+#define SP_HDCP_VID2_BLUE_SCREEN_REG 0x2e
+
+/* HDCP Wait R0 Timing Register */
+#define SP_HDCP_WAIT_R0_TIME_REG 0x40
+
+/* HDCP Link Integrity Check Timer Register */
+#define SP_HDCP_LINK_CHECK_TIMER_REG 0x41
+
+/* HDCP Repeater Ready Wait Timer Register */
+#define SP_HDCP_RPTR_RDY_WAIT_TIME_REG 0x42
+
+/* HDCP Auto Timer Register */
+#define SP_HDCP_AUTO_TIMER_REG 0x51
+
+/* HDCP Key Status Register */
+#define SP_HDCP_KEY_STATUS_REG 0x5e
+
+/* HDCP Key Command Register */
+#define SP_HDCP_KEY_COMMAND_REG 0x5f
+#define SP_DISABLE_SYNC_HDCP BIT(2)
+
+/* OTP Memory Key Protection Registers */
+#define SP_OTP_KEY_PROTECT1_REG 0x60
+#define SP_OTP_KEY_PROTECT2_REG 0x61
+#define SP_OTP_KEY_PROTECT3_REG 0x62
+#define SP_OTP_PSW1 0xa2
+#define SP_OTP_PSW2 0x7e
+#define SP_OTP_PSW3 0xc6
+
+/* DP System Control Registers */
+#define SP_DP_SYSTEM_CTRL_BASE (0x80 - 1)
+/* Bits for DP System Control Register 2 */
+#define SP_CHA_STA BIT(2)
+/* Bits for DP System Control Register 3 */
+#define SP_HPD_STATUS BIT(6)
+#define SP_STRM_VALID BIT(2)
+/* Bits for DP System Control Register 4 */
+#define SP_ENHANCED_MODE BIT(3)
+
+/* DP Video Control Register */
+#define SP_DP_VIDEO_CTRL_REG 0x84
+#define SP_COLOR_F_MASK 0x06
+#define SP_COLOR_F_SHIFT 1
+#define SP_BPC_MASK 0xe0
+#define SP_BPC_SHIFT 5
+# define SP_BPC_6BITS 0x00
+# define SP_BPC_8BITS 0x01
+# define SP_BPC_10BITS 0x02
+# define SP_BPC_12BITS 0x03
+
+/* DP Audio Control Register */
+#define SP_DP_AUDIO_CTRL_REG 0x87
+#define SP_AUD_EN BIT(0)
+
+/* 10us Pulse Generate Timer Registers */
+#define SP_I2C_GEN_10US_TIMER0_REG 0x88
+#define SP_I2C_GEN_10US_TIMER1_REG 0x89
+
+/* Packet Send Control Register */
+#define SP_PACKET_SEND_CTRL_REG 0x90
+#define SP_AUD_IF_UP BIT(7)
+#define SP_AVI_IF_UD BIT(6)
+#define SP_MPEG_IF_UD BIT(5)
+#define SP_SPD_IF_UD BIT(4)
+#define SP_AUD_IF_EN BIT(3)
+#define SP_AVI_IF_EN BIT(2)
+#define SP_MPEG_IF_EN BIT(1)
+#define SP_SPD_IF_EN BIT(0)
+
+/* DP HDCP Control Register */
+#define SP_DP_HDCP_CTRL_REG 0x92
+#define SP_AUTO_EN BIT(7)
+#define SP_AUTO_START BIT(5)
+#define SP_LINK_POLLING BIT(1)
+
+/* DP Main Link Bandwidth Setting Register */
+#define SP_DP_MAIN_LINK_BW_SET_REG 0xa0
+#define SP_LINK_BW_SET_MASK 0x1f
+#define SP_INITIAL_SLIM_M_AUD_SEL BIT(5)
+
+/* DP Training Pattern Set Register */
+#define SP_DP_TRAINING_PATTERN_SET_REG 0xa2
+
+/* DP Lane 0 Link Training Control Register */
+#define SP_DP_LANE0_LT_CTRL_REG 0xa3
+#define SP_TX_SW_SET_MASK 0x1b
+#define SP_MAX_PRE_REACH BIT(5)
+#define SP_MAX_DRIVE_REACH BIT(4)
+#define SP_PRE_EMP_LEVEL1 BIT(3)
+#define SP_DRVIE_CURRENT_LEVEL1 BIT(0)
+
+/* DP Link Training Control Register */
+#define SP_DP_LT_CTRL_REG 0xa8
+#define SP_LT_ERROR_TYPE_MASK 0x70
+# define SP_LT_NO_ERROR 0x00
+# define SP_LT_AUX_WRITE_ERROR 0x01
+# define SP_LT_MAX_DRIVE_REACHED 0x02
+# define SP_LT_WRONG_LANE_COUNT_SET 0x03
+# define SP_LT_LOOP_SAME_5_TIME 0x04
+# define SP_LT_CR_FAIL_IN_EQ 0x05
+# define SP_LT_EQ_LOOP_5_TIME 0x06
+#define SP_LT_EN BIT(0)
+
+/* DP CEP Training Control Registers */
+#define SP_DP_CEP_TRAINING_CTRL0_REG 0xa9
+#define SP_DP_CEP_TRAINING_CTRL1_REG 0xaa
+
+/* DP Debug Register 1 */
+#define SP_DP_DEBUG1_REG 0xb0
+#define SP_DEBUG_PLL_LOCK BIT(4)
+#define SP_POLLING_EN BIT(1)
+
+/* DP Polling Control Register */
+#define SP_DP_POLLING_CTRL_REG 0xb4
+#define SP_AUTO_POLLING_DISABLE BIT(0)
+
+/* DP Link Debug Control Register */
+#define SP_DP_LINK_DEBUG_CTRL_REG 0xb8
+#define SP_M_VID_DEBUG BIT(5)
+#define SP_NEW_PRBS7 BIT(4)
+#define SP_INSERT_ER BIT(1)
+#define SP_PRBS31_EN BIT(0)
+
+/* AUX Misc control Register */
+#define SP_AUX_MISC_CTRL_REG 0xbf
+
+/* DP PLL control Register */
+#define SP_DP_PLL_CTRL_REG 0xc7
+#define SP_PLL_RST BIT(6)
+
+/* DP Analog Power Down Register */
+#define SP_DP_ANALOG_POWER_DOWN_REG 0xc8
+#define SP_CH0_PD BIT(0)
+
+/* DP Misc Control Register */
+#define SP_DP_MISC_CTRL_REG 0xcd
+#define SP_EQ_TRAINING_LOOP BIT(6)
+
+/* DP Extra I2C Device Address Register */
+#define SP_DP_EXTRA_I2C_DEV_ADDR_REG 0xce
+#define SP_I2C_STRETCH_DISABLE BIT(7)
+
+#define SP_I2C_EXTRA_ADDR 0x50
+
+/* DP Downspread Control Register 1 */
+#define SP_DP_DOWNSPREAD_CTRL1_REG 0xd0
+
+/* DP M Value Calculation Control Register */
+#define SP_DP_M_CALCULATION_CTRL_REG 0xd9
+#define SP_M_GEN_CLK_SEL BIT(0)
+
+/* AUX Channel Access Status Register */
+#define SP_AUX_CH_STATUS_REG 0xe0
+#define SP_AUX_STATUS 0x0f
+
+/* AUX Channel DEFER Control Register */
+#define SP_AUX_DEFER_CTRL_REG 0xe2
+#define SP_DEFER_CTRL_EN BIT(7)
+
+/* DP Buffer Data Count Register */
+#define SP_BUF_DATA_COUNT_REG 0xe4
+#define SP_BUF_DATA_COUNT_MASK 0x1f
+#define SP_BUF_CLR BIT(7)
+
+/* DP AUX Channel Control Register 1 */
+#define SP_DP_AUX_CH_CTRL1_REG 0xe5
+#define SP_AUX_TX_COMM_MASK 0x0f
+#define SP_AUX_LENGTH_MASK 0xf0
+#define SP_AUX_LENGTH_SHIFT 4
+
+/* DP AUX CH Address Register 0 */
+#define SP_AUX_ADDR_7_0_REG 0xe6
+
+/* DP AUX CH Address Register 1 */
+#define SP_AUX_ADDR_15_8_REG 0xe7
+
+/* DP AUX CH Address Register 2 */
+#define SP_AUX_ADDR_19_16_REG 0xe8
+#define SP_AUX_ADDR_19_16_MASK 0x0f
+
+/* DP AUX Channel Control Register 2 */
+#define SP_DP_AUX_CH_CTRL2_REG 0xe9
+#define SP_AUX_SEL_RXCM BIT(6)
+#define SP_AUX_CHSEL BIT(3)
+#define SP_AUX_PN_INV BIT(2)
+#define SP_ADDR_ONLY BIT(1)
+#define SP_AUX_EN BIT(0)
+
+/* DP Video Stream Control InfoFrame Register */
+#define SP_DP_3D_VSC_CTRL_REG 0xea
+#define SP_INFO_FRAME_VSC_EN BIT(0)
+
+/* DP Video Stream Data Byte 1 Register */
+#define SP_DP_VSC_DB1_REG 0xeb
+
+/* DP AUX Channel Control Register 3 */
+#define SP_DP_AUX_CH_CTRL3_REG 0xec
+#define SP_WAIT_COUNTER_7_0_MASK 0xff
+
+/* DP AUX Channel Control Register 4 */
+#define SP_DP_AUX_CH_CTRL4_REG 0xed
+
+/* DP AUX Buffer Data Registers */
+#define SP_DP_BUF_DATA0_REG 0xf0
+
+/***************************************************************/
+/* Register definition of device address 0x72 */
+/***************************************************************/
+
+/*
+ * Core Register Definitions
+ */
+
+/* Device ID Low Byte Register */
+#define SP_DEVICE_IDL_REG 0x02
+
+/* Device ID High Byte Register */
+#define SP_DEVICE_IDH_REG 0x03
+
+/* Device version register */
+#define SP_DEVICE_VERSION_REG 0x04
+
+/* Power Down Control Register */
+#define SP_POWERDOWN_CTRL_REG 0x05
+#define SP_REGISTER_PD BIT(7)
+#define SP_HDCP_PD BIT(5)
+#define SP_AUDIO_PD BIT(4)
+#define SP_VIDEO_PD BIT(3)
+#define SP_LINK_PD BIT(2)
+#define SP_TOTAL_PD BIT(1)
+
+/* Reset Control Register 1 */
+#define SP_RESET_CTRL1_REG 0x06
+#define SP_MISC_RST BIT(7)
+#define SP_VIDCAP_RST BIT(6)
+#define SP_VIDFIF_RST BIT(5)
+#define SP_AUDFIF_RST BIT(4)
+#define SP_AUDCAP_RST BIT(3)
+#define SP_HDCP_RST BIT(2)
+#define SP_SW_RST BIT(1)
+#define SP_HW_RST BIT(0)
+
+/* Reset Control Register 2 */
+#define SP_RESET_CTRL2_REG 0x07
+#define SP_AUX_RST BIT(2)
+#define SP_SERDES_FIFO_RST BIT(1)
+#define SP_I2C_REG_RST BIT(0)
+
+/* Video Control Register 1 */
+#define SP_VID_CTRL1_REG 0x08
+#define SP_VIDEO_EN BIT(7)
+#define SP_VIDEO_MUTE BIT(2)
+#define SP_DE_GEN BIT(1)
+#define SP_DEMUX BIT(0)
+
+/* Video Control Register 2 */
+#define SP_VID_CTRL2_REG 0x09
+#define SP_IN_COLOR_F_MASK 0x03
+#define SP_IN_YC_BIT_SEL BIT(2)
+#define SP_IN_BPC_MASK 0x70
+#define SP_IN_BPC_SHIFT 4
+# define SP_IN_BPC_12BIT 0x03
+# define SP_IN_BPC_10BIT 0x02
+# define SP_IN_BPC_8BIT 0x01
+# define SP_IN_BPC_6BIT 0x00
+#define SP_IN_D_RANGE BIT(7)
+
+/* Video Control Register 3 */
+#define SP_VID_CTRL3_REG 0x0a
+#define SP_HPD_OUT BIT(6)
+
+/* Video Control Register 5 */
+#define SP_VID_CTRL5_REG 0x0c
+#define SP_CSC_STD_SEL BIT(7)
+#define SP_XVYCC_RNG_LMT BIT(6)
+#define SP_RANGE_Y2R BIT(5)
+#define SP_CSPACE_Y2R BIT(4)
+#define SP_RGB_RNG_LMT BIT(3)
+#define SP_Y_RNG_LMT BIT(2)
+#define SP_RANGE_R2Y BIT(1)
+#define SP_CSPACE_R2Y BIT(0)
+
+/* Video Control Register 6 */
+#define SP_VID_CTRL6_REG 0x0d
+#define SP_TEST_PATTERN_EN BIT(7)
+#define SP_VIDEO_PROCESS_EN BIT(6)
+#define SP_VID_US_MODE BIT(3)
+#define SP_VID_DS_MODE BIT(2)
+#define SP_UP_SAMPLE BIT(1)
+#define SP_DOWN_SAMPLE BIT(0)
+
+/* Video Control Register 8 */
+#define SP_VID_CTRL8_REG 0x0f
+#define SP_VID_VRES_TH BIT(0)
+
+/* Total Line Status Low Byte Register */
+#define SP_TOTAL_LINE_STAL_REG 0x24
+
+/* Total Line Status High Byte Register */
+#define SP_TOTAL_LINE_STAH_REG 0x25
+
+/* Active Line Status Low Byte Register */
+#define SP_ACT_LINE_STAL_REG 0x26
+
+/* Active Line Status High Byte Register */
+#define SP_ACT_LINE_STAH_REG 0x27
+
+/* Vertical Front Porch Status Register */
+#define SP_V_F_PORCH_STA_REG 0x28
+
+/* Vertical SYNC Width Status Register */
+#define SP_V_SYNC_STA_REG 0x29
+
+/* Vertical Back Porch Status Register */
+#define SP_V_B_PORCH_STA_REG 0x2a
+
+/* Total Pixel Status Low Byte Register */
+#define SP_TOTAL_PIXEL_STAL_REG 0x2b
+
+/* Total Pixel Status High Byte Register */
+#define SP_TOTAL_PIXEL_STAH_REG 0x2c
+
+/* Active Pixel Status Low Byte Register */
+#define SP_ACT_PIXEL_STAL_REG 0x2d
+
+/* Active Pixel Status High Byte Register */
+#define SP_ACT_PIXEL_STAH_REG 0x2e
+
+/* Horizontal Front Porch Status Low Byte Register */
+#define SP_H_F_PORCH_STAL_REG 0x2f
+
+/* Horizontal Front Porch Statys High Byte Register */
+#define SP_H_F_PORCH_STAH_REG 0x30
+
+/* Horizontal SYNC Width Status Low Byte Register */
+#define SP_H_SYNC_STAL_REG 0x31
+
+/* Horizontal SYNC Width Status High Byte Register */
+#define SP_H_SYNC_STAH_REG 0x32
+
+/* Horizontal Back Porch Status Low Byte Register */
+#define SP_H_B_PORCH_STAL_REG 0x33
+
+/* Horizontal Back Porch Status High Byte Register */
+#define SP_H_B_PORCH_STAH_REG 0x34
+
+/* InfoFrame AVI Packet DB1 Register */
+#define SP_INFOFRAME_AVI_DB1_REG 0x70
+
+/* Bit Control Specific Register */
+#define SP_BIT_CTRL_SPECIFIC_REG 0x80
+#define SP_BIT_CTRL_SELECT_SHIFT 1
+#define SP_ENABLE_BIT_CTRL BIT(0)
+
+/* InfoFrame Audio Packet DB1 Register */
+#define SP_INFOFRAME_AUD_DB1_REG 0x83
+
+/* InfoFrame MPEG Packet DB1 Register */
+#define SP_INFOFRAME_MPEG_DB1_REG 0xb0
+
+/* Audio Channel Status Registers */
+#define SP_AUD_CH_STATUS_BASE 0xd0
+
+/* Audio Channel Num Register 5 */
+#define SP_I2S_CHANNEL_NUM_MASK 0xe0
+# define SP_I2S_CH_NUM_1 (0x00 << 5)
+# define SP_I2S_CH_NUM_2 (0x01 << 5)
+# define SP_I2S_CH_NUM_3 (0x02 << 5)
+# define SP_I2S_CH_NUM_4 (0x03 << 5)
+# define SP_I2S_CH_NUM_5 (0x04 << 5)
+# define SP_I2S_CH_NUM_6 (0x05 << 5)
+# define SP_I2S_CH_NUM_7 (0x06 << 5)
+# define SP_I2S_CH_NUM_8 (0x07 << 5)
+#define SP_EXT_VUCP BIT(2)
+#define SP_VBIT BIT(1)
+#define SP_AUDIO_LAYOUT BIT(0)
+
+/* Analog Debug Register 2 */
+#define SP_ANALOG_DEBUG2_REG 0xdd
+#define SP_FORCE_SW_OFF_BYPASS 0x20
+#define SP_XTAL_FRQ 0x1c
+# define SP_XTAL_FRQ_19M2 (0x00 << 2)
+# define SP_XTAL_FRQ_24M (0x01 << 2)
+# define SP_XTAL_FRQ_25M (0x02 << 2)
+# define SP_XTAL_FRQ_26M (0x03 << 2)
+# define SP_XTAL_FRQ_27M (0x04 << 2)
+# define SP_XTAL_FRQ_38M4 (0x05 << 2)
+# define SP_XTAL_FRQ_52M (0x06 << 2)
+#define SP_POWERON_TIME_1P5MS 0x03
+
+/* Analog Control 0 Register */
+#define SP_ANALOG_CTRL0_REG 0xe1
+
+/* Common Interrupt Status Register 1 */
+#define SP_COMMON_INT_STATUS_BASE (0xf1 - 1)
+#define SP_PLL_LOCK_CHG 0x40
+
+/* Common Interrupt Status Register 2 */
+#define SP_COMMON_INT_STATUS2 0xf2
+#define SP_HDCP_AUTH_CHG BIT(1)
+#define SP_HDCP_AUTH_DONE BIT(0)
+
+#define SP_HDCP_LINK_CHECK_FAIL BIT(0)
+
+/* Common Interrupt Status Register 4 */
+#define SP_COMMON_INT_STATUS4_REG 0xf4
+#define SP_HPD_IRQ BIT(6)
+#define SP_HPD_ESYNC_ERR BIT(4)
+#define SP_HPD_CHG BIT(2)
+#define SP_HPD_LOST BIT(1)
+#define SP_HPD_PLUG BIT(0)
+
+/* DP Interrupt Status Register */
+#define SP_DP_INT_STATUS1_REG 0xf7
+#define SP_TRAINING_FINISH BIT(5)
+#define SP_POLLING_ERR BIT(4)
+
+/* Common Interrupt Mask Register */
+#define SP_COMMON_INT_MASK_BASE (0xf8 - 1)
+
+#define SP_COMMON_INT_MASK4_REG 0xfb
+
+/* DP Interrupts Mask Register */
+#define SP_DP_INT_MASK1_REG 0xfe
+
+/* Interrupt Control Register */
+#define SP_INT_CTRL_REG 0xff
+
+/***************************************************************/
+/* Register definition of device address 0x7a */
+/***************************************************************/
+
+/* DP TX Link Training Control Register */
+#define SP_DP_TX_LT_CTRL0_REG 0x30
+
+/* PD 1.2 Lint Training 80bit Pattern Register */
+#define SP_DP_LT_80BIT_PATTERN0_REG 0x80
+#define SP_DP_LT_80BIT_PATTERN_REG_NUM 10
+
+/* Audio Interface Control Register 0 */
+#define SP_AUD_INTERFACE_CTRL0_REG 0x5f
+#define SP_AUD_INTERFACE_DISABLE 0x80
+
+/* Audio Interface Control Register 2 */
+#define SP_AUD_INTERFACE_CTRL2_REG 0x60
+#define SP_M_AUD_ADJUST_ST 0x04
+
+/* Audio Interface Control Register 3 */
+#define SP_AUD_INTERFACE_CTRL3_REG 0x62
+
+/* Audio Interface Control Register 4 */
+#define SP_AUD_INTERFACE_CTRL4_REG 0x67
+
+/* Audio Interface Control Register 5 */
+#define SP_AUD_INTERFACE_CTRL5_REG 0x68
+
+/* Audio Interface Control Register 6 */
+#define SP_AUD_INTERFACE_CTRL6_REG 0x69
+
+/* Firmware Version Register */
+#define SP_FW_VER_REG 0xb7
+
+#endif
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
new file mode 100644
index 000000000..80f286fa3
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -0,0 +1,3 @@
+config DRM_ANALOGIX_DP
+ tristate
+ depends on DRM
diff --git a/drivers/gpu/drm/bridge/analogix/Makefile b/drivers/gpu/drm/bridge/analogix/Makefile
new file mode 100644
index 000000000..cd4010ba6
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/Makefile
@@ -0,0 +1,2 @@
+analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o
+obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
new file mode 100644
index 000000000..769959707
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -0,0 +1,1430 @@
+/*
+* Analogix DP (Display Port) core interface driver.
+*
+* Copyright (C) 2012 Samsung Electronics Co., Ltd.
+* Author: Jingoo Han <jg1.han@samsung.com>
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License as published by the
+* Free Software Foundation; either version 2 of the License, or (at your
+* option) any later version.
+*/
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/component.h>
+#include <linux/phy/phy.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include <drm/bridge/analogix_dp.h>
+
+#include "analogix_dp_core.h"
+
+#define to_dp(nm) container_of(nm, struct analogix_dp_device, nm)
+
+struct bridge_init {
+ struct i2c_client *client;
+ struct device_node *node;
+};
+
+static void analogix_dp_init_dp(struct analogix_dp_device *dp)
+{
+ analogix_dp_reset(dp);
+
+ analogix_dp_swreset(dp);
+
+ analogix_dp_init_analog_param(dp);
+ analogix_dp_init_interrupt(dp);
+
+ /* SW defined function Normal operation */
+ analogix_dp_enable_sw_function(dp);
+
+ analogix_dp_config_interrupt(dp);
+ analogix_dp_init_analog_func(dp);
+
+ analogix_dp_init_hpd(dp);
+ analogix_dp_init_aux(dp);
+}
+
+static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
+{
+ int timeout_loop = 0;
+
+ while (timeout_loop < DP_TIMEOUT_LOOP_COUNT) {
+ if (analogix_dp_get_plug_in_status(dp) == 0)
+ return 0;
+
+ timeout_loop++;
+ usleep_range(10, 11);
+ }
+
+ /*
+ * Some edp screen do not have hpd signal, so we can't just
+ * return failed when hpd plug in detect failed, DT property
+ * "force-hpd" would indicate whether driver need this.
+ */
+ if (!dp->force_hpd)
+ return -ETIMEDOUT;
+
+ /*
+ * The eDP TRM indicate that if HPD_STATUS(RO) is 0, AUX CH
+ * will not work, so we need to give a force hpd action to
+ * set HPD_STATUS manually.
+ */
+ dev_dbg(dp->dev, "failed to get hpd plug status, try to force hpd\n");
+
+ analogix_dp_force_hpd(dp);
+
+ if (analogix_dp_get_plug_in_status(dp) != 0) {
+ dev_err(dp->dev, "failed to get hpd plug in status\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(dp->dev, "success to get plug in status after force hpd\n");
+
+ return 0;
+}
+
+static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data)
+{
+ int i;
+ unsigned char sum = 0;
+
+ for (i = 0; i < EDID_BLOCK_LENGTH; i++)
+ sum = sum + edid_data[i];
+
+ return sum;
+}
+
+static int analogix_dp_read_edid(struct analogix_dp_device *dp)
+{
+ unsigned char *edid = dp->edid;
+ unsigned int extend_block = 0;
+ unsigned char sum;
+ unsigned char test_vector;
+ int retval;
+
+ /*
+ * EDID device address is 0x50.
+ * However, if necessary, you must have set upper address
+ * into E-EDID in I2C device, 0x30.
+ */
+
+ /* Read Extension Flag, Number of 128-byte EDID extension blocks */
+ retval = analogix_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
+ EDID_EXTENSION_FLAG,
+ &extend_block);
+ if (retval)
+ return retval;
+
+ if (extend_block > 0) {
+ dev_dbg(dp->dev, "EDID data includes a single extension!\n");
+
+ /* Read EDID data */
+ retval = analogix_dp_read_bytes_from_i2c(dp,
+ I2C_EDID_DEVICE_ADDR,
+ EDID_HEADER_PATTERN,
+ EDID_BLOCK_LENGTH,
+ &edid[EDID_HEADER_PATTERN]);
+ if (retval != 0) {
+ dev_err(dp->dev, "EDID Read failed!\n");
+ return -EIO;
+ }
+ sum = analogix_dp_calc_edid_check_sum(edid);
+ if (sum != 0) {
+ dev_err(dp->dev, "EDID bad checksum!\n");
+ return -EIO;
+ }
+
+ /* Read additional EDID data */
+ retval = analogix_dp_read_bytes_from_i2c(dp,
+ I2C_EDID_DEVICE_ADDR,
+ EDID_BLOCK_LENGTH,
+ EDID_BLOCK_LENGTH,
+ &edid[EDID_BLOCK_LENGTH]);
+ if (retval != 0) {
+ dev_err(dp->dev, "EDID Read failed!\n");
+ return -EIO;
+ }
+ sum = analogix_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
+ if (sum != 0) {
+ dev_err(dp->dev, "EDID bad checksum!\n");
+ return -EIO;
+ }
+
+ analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
+ &test_vector);
+ if (test_vector & DP_TEST_LINK_EDID_READ) {
+ analogix_dp_write_byte_to_dpcd(dp,
+ DP_TEST_EDID_CHECKSUM,
+ edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
+ analogix_dp_write_byte_to_dpcd(dp,
+ DP_TEST_RESPONSE,
+ DP_TEST_EDID_CHECKSUM_WRITE);
+ }
+ } else {
+ dev_info(dp->dev, "EDID data does not include any extensions.\n");
+
+ /* Read EDID data */
+ retval = analogix_dp_read_bytes_from_i2c(dp,
+ I2C_EDID_DEVICE_ADDR, EDID_HEADER_PATTERN,
+ EDID_BLOCK_LENGTH, &edid[EDID_HEADER_PATTERN]);
+ if (retval != 0) {
+ dev_err(dp->dev, "EDID Read failed!\n");
+ return -EIO;
+ }
+ sum = analogix_dp_calc_edid_check_sum(edid);
+ if (sum != 0) {
+ dev_err(dp->dev, "EDID bad checksum!\n");
+ return -EIO;
+ }
+
+ analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
+ &test_vector);
+ if (test_vector & DP_TEST_LINK_EDID_READ) {
+ analogix_dp_write_byte_to_dpcd(dp,
+ DP_TEST_EDID_CHECKSUM, edid[EDID_CHECKSUM]);
+ analogix_dp_write_byte_to_dpcd(dp,
+ DP_TEST_RESPONSE, DP_TEST_EDID_CHECKSUM_WRITE);
+ }
+ }
+
+ dev_dbg(dp->dev, "EDID Read success!\n");
+ return 0;
+}
+
+static int analogix_dp_handle_edid(struct analogix_dp_device *dp)
+{
+ u8 buf[12];
+ int i;
+ int retval;
+
+ /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
+ retval = analogix_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, 12, buf);
+ if (retval)
+ return retval;
+
+ /* Read EDID */
+ for (i = 0; i < 3; i++) {
+ retval = analogix_dp_read_edid(dp);
+ if (!retval)
+ break;
+ }
+
+ return retval;
+}
+
+static void
+analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp,
+ bool enable)
+{
+ u8 data;
+
+ analogix_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
+
+ if (enable)
+ analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
+ DP_LANE_COUNT_ENHANCED_FRAME_EN |
+ DPCD_LANE_COUNT_SET(data));
+ else
+ analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
+ DPCD_LANE_COUNT_SET(data));
+}
+
+static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp)
+{
+ u8 data;
+ int retval;
+
+ analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+ retval = DPCD_ENHANCED_FRAME_CAP(data);
+
+ return retval;
+}
+
+static void analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp)
+{
+ u8 data;
+
+ data = analogix_dp_is_enhanced_mode_available(dp);
+ analogix_dp_enable_rx_to_enhanced_mode(dp, data);
+ analogix_dp_enable_enhanced_mode(dp, data);
+}
+
+static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp)
+{
+ analogix_dp_set_training_pattern(dp, DP_NONE);
+
+ analogix_dp_write_byte_to_dpcd(dp, DP_TRAINING_PATTERN_SET,
+ DP_TRAINING_PATTERN_DISABLE);
+}
+
+static void
+analogix_dp_set_lane_lane_pre_emphasis(struct analogix_dp_device *dp,
+ int pre_emphasis, int lane)
+{
+ switch (lane) {
+ case 0:
+ analogix_dp_set_lane0_pre_emphasis(dp, pre_emphasis);
+ break;
+ case 1:
+ analogix_dp_set_lane1_pre_emphasis(dp, pre_emphasis);
+ break;
+
+ case 2:
+ analogix_dp_set_lane2_pre_emphasis(dp, pre_emphasis);
+ break;
+
+ case 3:
+ analogix_dp_set_lane3_pre_emphasis(dp, pre_emphasis);
+ break;
+ }
+}
+
+static int analogix_dp_link_start(struct analogix_dp_device *dp)
+{
+ u8 buf[4];
+ int lane, lane_count, pll_tries, retval;
+
+ lane_count = dp->link_train.lane_count;
+
+ dp->link_train.lt_state = CLOCK_RECOVERY;
+ dp->link_train.eq_loop = 0;
+
+ for (lane = 0; lane < lane_count; lane++)
+ dp->link_train.cr_loop[lane] = 0;
+
+ /* Set link rate and count as you want to establish*/
+ analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
+ analogix_dp_set_lane_count(dp, dp->link_train.lane_count);
+
+ /* Setup RX configuration */
+ buf[0] = dp->link_train.link_rate;
+ buf[1] = dp->link_train.lane_count;
+ retval = analogix_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, 2, buf);
+ if (retval)
+ return retval;
+
+ /* Set TX pre-emphasis to minimum */
+ for (lane = 0; lane < lane_count; lane++)
+ analogix_dp_set_lane_lane_pre_emphasis(dp,
+ PRE_EMPHASIS_LEVEL_0, lane);
+
+ /* Wait for PLL lock */
+ pll_tries = 0;
+ while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ if (pll_tries == DP_TIMEOUT_LOOP_COUNT) {
+ dev_err(dp->dev, "Wait for PLL lock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ pll_tries++;
+ usleep_range(90, 120);
+ }
+
+ /* Set training pattern 1 */
+ analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
+
+ /* Set RX training pattern */
+ retval = analogix_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
+ if (retval)
+ return retval;
+
+ for (lane = 0; lane < lane_count; lane++)
+ buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
+ DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+
+ retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
+ lane_count, buf);
+
+ return retval;
+}
+
+static unsigned char analogix_dp_get_lane_status(u8 link_status[2], int lane)
+{
+ int shift = (lane & 1) * 4;
+ u8 link_value = link_status[lane >> 1];
+
+ return (link_value >> shift) & 0xf;
+}
+
+static int analogix_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = analogix_dp_get_lane_status(link_status, lane);
+ if ((lane_status & DP_LANE_CR_DONE) == 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int analogix_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
+ int lane_count)
+{
+ int lane;
+ u8 lane_status;
+
+ if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0)
+ return -EINVAL;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ lane_status = analogix_dp_get_lane_status(link_status, lane);
+ lane_status &= DP_CHANNEL_EQ_BITS;
+ if (lane_status != DP_CHANNEL_EQ_BITS)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned char
+analogix_dp_get_adjust_request_voltage(u8 adjust_request[2], int lane)
+{
+ int shift = (lane & 1) * 4;
+ u8 link_value = adjust_request[lane >> 1];
+
+ return (link_value >> shift) & 0x3;
+}
+
+static unsigned char analogix_dp_get_adjust_request_pre_emphasis(
+ u8 adjust_request[2],
+ int lane)
+{
+ int shift = (lane & 1) * 4;
+ u8 link_value = adjust_request[lane >> 1];
+
+ return ((link_value >> shift) & 0xc) >> 2;
+}
+
+static void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp,
+ u8 training_lane_set, int lane)
+{
+ switch (lane) {
+ case 0:
+ analogix_dp_set_lane0_link_training(dp, training_lane_set);
+ break;
+ case 1:
+ analogix_dp_set_lane1_link_training(dp, training_lane_set);
+ break;
+
+ case 2:
+ analogix_dp_set_lane2_link_training(dp, training_lane_set);
+ break;
+
+ case 3:
+ analogix_dp_set_lane3_link_training(dp, training_lane_set);
+ break;
+ }
+}
+
+static unsigned int
+analogix_dp_get_lane_link_training(struct analogix_dp_device *dp,
+ int lane)
+{
+ u32 reg;
+
+ switch (lane) {
+ case 0:
+ reg = analogix_dp_get_lane0_link_training(dp);
+ break;
+ case 1:
+ reg = analogix_dp_get_lane1_link_training(dp);
+ break;
+ case 2:
+ reg = analogix_dp_get_lane2_link_training(dp);
+ break;
+ case 3:
+ reg = analogix_dp_get_lane3_link_training(dp);
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return reg;
+}
+
+static void analogix_dp_reduce_link_rate(struct analogix_dp_device *dp)
+{
+ analogix_dp_training_pattern_dis(dp);
+ analogix_dp_set_enhanced_mode(dp);
+
+ dp->link_train.lt_state = FAILED;
+}
+
+static void analogix_dp_get_adjust_training_lane(struct analogix_dp_device *dp,
+ u8 adjust_request[2])
+{
+ int lane, lane_count;
+ u8 voltage_swing, pre_emphasis, training_lane;
+
+ lane_count = dp->link_train.lane_count;
+ for (lane = 0; lane < lane_count; lane++) {
+ voltage_swing = analogix_dp_get_adjust_request_voltage(
+ adjust_request, lane);
+ pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis(
+ adjust_request, lane);
+ training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
+ DPCD_PRE_EMPHASIS_SET(pre_emphasis);
+
+ if (voltage_swing == VOLTAGE_LEVEL_3)
+ training_lane |= DP_TRAIN_MAX_SWING_REACHED;
+ if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
+ training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ dp->link_train.training_lane[lane] = training_lane;
+ }
+}
+
+static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp)
+{
+ int lane, lane_count, retval;
+ u8 voltage_swing, pre_emphasis, training_lane;
+ u8 link_status[2], adjust_request[2];
+
+ usleep_range(100, 101);
+
+ lane_count = dp->link_train.lane_count;
+
+ retval = analogix_dp_read_bytes_from_dpcd(dp,
+ DP_LANE0_1_STATUS, 2, link_status);
+ if (retval)
+ return retval;
+
+ retval = analogix_dp_read_bytes_from_dpcd(dp,
+ DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ if (retval)
+ return retval;
+
+ if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) {
+ /* set training pattern 2 for EQ */
+ analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
+
+ retval = analogix_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_2);
+ if (retval)
+ return retval;
+
+ dev_info(dp->dev, "Link Training Clock Recovery success\n");
+ dp->link_train.lt_state = EQUALIZER_TRAINING;
+ } else {
+ for (lane = 0; lane < lane_count; lane++) {
+ training_lane = analogix_dp_get_lane_link_training(
+ dp, lane);
+ voltage_swing = analogix_dp_get_adjust_request_voltage(
+ adjust_request, lane);
+ pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis(
+ adjust_request, lane);
+
+ if (DPCD_VOLTAGE_SWING_GET(training_lane) ==
+ voltage_swing &&
+ DPCD_PRE_EMPHASIS_GET(training_lane) ==
+ pre_emphasis)
+ dp->link_train.cr_loop[lane]++;
+
+ if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
+ voltage_swing == VOLTAGE_LEVEL_3 ||
+ pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
+ dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
+ dp->link_train.cr_loop[lane],
+ voltage_swing, pre_emphasis);
+ analogix_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+ }
+ }
+
+ analogix_dp_get_adjust_training_lane(dp, adjust_request);
+
+ for (lane = 0; lane < lane_count; lane++)
+ analogix_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[lane], lane);
+
+ retval = analogix_dp_write_bytes_to_dpcd(dp,
+ DP_TRAINING_LANE0_SET, lane_count,
+ dp->link_train.training_lane);
+ if (retval)
+ return retval;
+
+ return retval;
+}
+
+static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
+{
+ int lane, lane_count, retval;
+ u32 reg;
+ u8 link_align, link_status[2], adjust_request[2];
+
+ usleep_range(400, 401);
+
+ lane_count = dp->link_train.lane_count;
+
+ retval = analogix_dp_read_bytes_from_dpcd(dp,
+ DP_LANE0_1_STATUS, 2, link_status);
+ if (retval)
+ return retval;
+
+ if (analogix_dp_clock_recovery_ok(link_status, lane_count)) {
+ analogix_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+
+ retval = analogix_dp_read_bytes_from_dpcd(dp,
+ DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
+ if (retval)
+ return retval;
+
+ retval = analogix_dp_read_byte_from_dpcd(dp,
+ DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
+ if (retval)
+ return retval;
+
+ analogix_dp_get_adjust_training_lane(dp, adjust_request);
+
+ if (!analogix_dp_channel_eq_ok(link_status, link_align, lane_count)) {
+ /* traing pattern Set to Normal */
+ analogix_dp_training_pattern_dis(dp);
+
+ dev_info(dp->dev, "Link Training success!\n");
+
+ analogix_dp_get_link_bandwidth(dp, &reg);
+ dp->link_train.link_rate = reg;
+ dev_dbg(dp->dev, "final bandwidth = %.2x\n",
+ dp->link_train.link_rate);
+
+ analogix_dp_get_lane_count(dp, &reg);
+ dp->link_train.lane_count = reg;
+ dev_dbg(dp->dev, "final lane count = %.2x\n",
+ dp->link_train.lane_count);
+
+ /* set enhanced mode if available */
+ analogix_dp_set_enhanced_mode(dp);
+ dp->link_train.lt_state = FINISHED;
+
+ return 0;
+ }
+
+ /* not all locked */
+ dp->link_train.eq_loop++;
+
+ if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
+ dev_err(dp->dev, "EQ Max loop\n");
+ analogix_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+
+ for (lane = 0; lane < lane_count; lane++)
+ analogix_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[lane], lane);
+
+ retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
+ lane_count, dp->link_train.training_lane);
+
+ return retval;
+}
+
+static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp,
+ u8 *bandwidth)
+{
+ u8 data;
+
+ /*
+ * For DP rev.1.1, Maximum link rate of Main Link lanes
+ * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps
+ * For DP rev.1.2, Maximum link rate of Main Link lanes
+ * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps
+ */
+ analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
+ *bandwidth = data;
+}
+
+static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
+ u8 *lane_count)
+{
+ u8 data;
+
+ /*
+ * For DP rev.1.1, Maximum number of Main Link lanes
+ * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
+ */
+ analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
+ *lane_count = DPCD_MAX_LANE_COUNT(data);
+}
+
+static void analogix_dp_init_training(struct analogix_dp_device *dp,
+ enum link_lane_count_type max_lane,
+ int max_rate)
+{
+ /*
+ * MACRO_RST must be applied after the PLL_LOCK to avoid
+ * the DP inter pair skew issue for at least 10 us
+ */
+ analogix_dp_reset_macro(dp);
+
+ /* Initialize by reading RX's DPCD */
+ analogix_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate);
+ analogix_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count);
+
+ if ((dp->link_train.link_rate != DP_LINK_BW_1_62) &&
+ (dp->link_train.link_rate != DP_LINK_BW_2_7) &&
+ (dp->link_train.link_rate != DP_LINK_BW_5_4)) {
+ dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n",
+ dp->link_train.link_rate);
+ dp->link_train.link_rate = DP_LINK_BW_1_62;
+ }
+
+ if (dp->link_train.lane_count == 0) {
+ dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n",
+ dp->link_train.lane_count);
+ dp->link_train.lane_count = (u8)LANE_COUNT1;
+ }
+
+ /* Setup TX lane count & rate */
+ if (dp->link_train.lane_count > max_lane)
+ dp->link_train.lane_count = max_lane;
+ if (dp->link_train.link_rate > max_rate)
+ dp->link_train.link_rate = max_rate;
+
+ /* All DP analog module power up */
+ analogix_dp_set_analog_power_down(dp, POWER_ALL, 0);
+}
+
+static int analogix_dp_sw_link_training(struct analogix_dp_device *dp)
+{
+ int retval = 0, training_finished = 0;
+
+ dp->link_train.lt_state = START;
+
+ /* Process here */
+ while (!retval && !training_finished) {
+ switch (dp->link_train.lt_state) {
+ case START:
+ retval = analogix_dp_link_start(dp);
+ if (retval)
+ dev_err(dp->dev, "LT link start failed!\n");
+ break;
+ case CLOCK_RECOVERY:
+ retval = analogix_dp_process_clock_recovery(dp);
+ if (retval)
+ dev_err(dp->dev, "LT CR failed!\n");
+ break;
+ case EQUALIZER_TRAINING:
+ retval = analogix_dp_process_equalizer_training(dp);
+ if (retval)
+ dev_err(dp->dev, "LT EQ failed!\n");
+ break;
+ case FINISHED:
+ training_finished = 1;
+ break;
+ case FAILED:
+ return -EREMOTEIO;
+ }
+ }
+ if (retval)
+ dev_err(dp->dev, "eDP link training failed (%d)\n", retval);
+
+ return retval;
+}
+
+static int analogix_dp_set_link_train(struct analogix_dp_device *dp,
+ u32 count, u32 bwtype)
+{
+ int i;
+ int retval;
+
+ for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) {
+ analogix_dp_init_training(dp, count, bwtype);
+ retval = analogix_dp_sw_link_training(dp);
+ if (retval == 0)
+ break;
+
+ usleep_range(100, 110);
+ }
+
+ return retval;
+}
+
+static int analogix_dp_config_video(struct analogix_dp_device *dp)
+{
+ int retval = 0;
+ int timeout_loop = 0;
+ int done_count = 0;
+
+ analogix_dp_config_video_slave_mode(dp);
+
+ analogix_dp_set_video_color_format(dp);
+
+ if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ dev_err(dp->dev, "PLL is not locked yet.\n");
+ return -EINVAL;
+ }
+
+ for (;;) {
+ timeout_loop++;
+ if (analogix_dp_is_slave_video_stream_clock_on(dp) == 0)
+ break;
+ if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
+ dev_err(dp->dev, "Timeout of video streamclk ok\n");
+ return -ETIMEDOUT;
+ }
+
+ usleep_range(1, 2);
+ }
+
+ /* Set to use the register calculated M/N video */
+ analogix_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0);
+
+ /* For video bist, Video timing must be generated by register */
+ analogix_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE);
+
+ /* Disable video mute */
+ analogix_dp_enable_video_mute(dp, 0);
+
+ /* Configure video slave mode */
+ analogix_dp_enable_video_master(dp, 0);
+
+ timeout_loop = 0;
+
+ for (;;) {
+ timeout_loop++;
+ if (analogix_dp_is_video_stream_on(dp) == 0) {
+ done_count++;
+ if (done_count > 10)
+ break;
+ } else if (done_count) {
+ done_count = 0;
+ }
+ if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) {
+ dev_err(dp->dev, "Timeout of video streamclk ok\n");
+ return -ETIMEDOUT;
+ }
+
+ usleep_range(1000, 1001);
+ }
+
+ if (retval != 0)
+ dev_err(dp->dev, "Video stream is not detected!\n");
+
+ return retval;
+}
+
+static void analogix_dp_enable_scramble(struct analogix_dp_device *dp,
+ bool enable)
+{
+ u8 data;
+
+ if (enable) {
+ analogix_dp_enable_scrambling(dp);
+
+ analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
+ &data);
+ analogix_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
+ } else {
+ analogix_dp_disable_scrambling(dp);
+
+ analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET,
+ &data);
+ analogix_dp_write_byte_to_dpcd(dp,
+ DP_TRAINING_PATTERN_SET,
+ (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
+ }
+}
+
+static irqreturn_t analogix_dp_hardirq(int irq, void *arg)
+{
+ struct analogix_dp_device *dp = arg;
+ irqreturn_t ret = IRQ_NONE;
+ enum dp_irq_type irq_type;
+
+ irq_type = analogix_dp_get_irq_type(dp);
+ if (irq_type != DP_IRQ_TYPE_UNKNOWN) {
+ analogix_dp_mute_hpd_interrupt(dp);
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t analogix_dp_irq_thread(int irq, void *arg)
+{
+ struct analogix_dp_device *dp = arg;
+ enum dp_irq_type irq_type;
+
+ irq_type = analogix_dp_get_irq_type(dp);
+ if (irq_type & DP_IRQ_TYPE_HP_CABLE_IN ||
+ irq_type & DP_IRQ_TYPE_HP_CABLE_OUT) {
+ dev_dbg(dp->dev, "Detected cable status changed!\n");
+ if (dp->drm_dev)
+ drm_helper_hpd_irq_event(dp->drm_dev);
+ }
+
+ if (irq_type != DP_IRQ_TYPE_UNKNOWN) {
+ analogix_dp_clear_hotplug_interrupts(dp);
+ analogix_dp_unmute_hpd_interrupt(dp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void analogix_dp_commit(struct analogix_dp_device *dp)
+{
+ int ret;
+
+ /* Keep the panel disabled while we configure video */
+ if (dp->plat_data->panel) {
+ if (drm_panel_disable(dp->plat_data->panel))
+ DRM_ERROR("failed to disable the panel\n");
+ }
+
+ ret = analogix_dp_set_link_train(dp, dp->video_info.max_lane_count,
+ dp->video_info.max_link_rate);
+ if (ret) {
+ dev_err(dp->dev, "unable to do link train\n");
+ return;
+ }
+
+ analogix_dp_enable_scramble(dp, 1);
+ analogix_dp_enable_rx_to_enhanced_mode(dp, 1);
+ analogix_dp_enable_enhanced_mode(dp, 1);
+
+ analogix_dp_init_video(dp);
+ ret = analogix_dp_config_video(dp);
+ if (ret)
+ dev_err(dp->dev, "unable to config video\n");
+
+ /* Safe to enable the panel now */
+ if (dp->plat_data->panel) {
+ if (drm_panel_enable(dp->plat_data->panel))
+ DRM_ERROR("failed to enable the panel\n");
+ }
+
+ /* Enable video */
+ analogix_dp_start_video(dp);
+}
+
+int analogix_dp_get_modes(struct drm_connector *connector)
+{
+ struct analogix_dp_device *dp = to_dp(connector);
+ struct edid *edid = (struct edid *)dp->edid;
+ int num_modes = 0;
+
+ if (analogix_dp_handle_edid(dp) == 0) {
+ drm_mode_connector_update_edid_property(&dp->connector, edid);
+ num_modes += drm_add_edid_modes(&dp->connector, edid);
+ }
+
+ if (dp->plat_data->panel)
+ num_modes += drm_panel_get_modes(dp->plat_data->panel);
+
+ if (dp->plat_data->get_modes)
+ num_modes += dp->plat_data->get_modes(dp->plat_data);
+
+ return num_modes;
+}
+
+static struct drm_encoder *
+analogix_dp_best_encoder(struct drm_connector *connector)
+{
+ struct analogix_dp_device *dp = to_dp(connector);
+
+ return dp->encoder;
+}
+
+static const struct drm_connector_helper_funcs analogix_dp_connector_helper_funcs = {
+ .get_modes = analogix_dp_get_modes,
+ .best_encoder = analogix_dp_best_encoder,
+};
+
+enum drm_connector_status
+analogix_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct analogix_dp_device *dp = to_dp(connector);
+
+ if (analogix_dp_detect_hpd(dp))
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static void analogix_dp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+
+}
+
+static const struct drm_connector_funcs analogix_dp_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = analogix_dp_detect,
+ .destroy = analogix_dp_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_encoder *encoder = dp->encoder;
+ struct drm_connector *connector = &dp->connector;
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(dp->drm_dev, connector,
+ &analogix_dp_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(connector,
+ &analogix_dp_connector_helper_funcs);
+ drm_mode_connector_attach_encoder(connector, encoder);
+
+ /*
+ * NOTE: the connector registration is implemented in analogix
+ * platform driver, that to say connector would be exist after
+ * plat_data->attch return, that's why we record the connector
+ * point after plat attached.
+ */
+ if (dp->plat_data->attach) {
+ ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
+ if (ret) {
+ DRM_ERROR("Failed at platform attch func\n");
+ return ret;
+ }
+ }
+
+ if (dp->plat_data->panel) {
+ ret = drm_panel_attach(dp->plat_data->panel, &dp->connector);
+ if (ret) {
+ DRM_ERROR("Failed to attach panel\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+
+ if (dp->dpms_mode == DRM_MODE_DPMS_ON)
+ return;
+
+ pm_runtime_get_sync(dp->dev);
+
+ if (dp->plat_data->power_on)
+ dp->plat_data->power_on(dp->plat_data);
+
+ phy_power_on(dp->phy);
+ analogix_dp_init_dp(dp);
+ enable_irq(dp->irq);
+ analogix_dp_commit(dp);
+
+ dp->dpms_mode = DRM_MODE_DPMS_ON;
+}
+
+static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+
+ if (dp->dpms_mode != DRM_MODE_DPMS_ON)
+ return;
+
+ if (dp->plat_data->panel) {
+ if (drm_panel_disable(dp->plat_data->panel)) {
+ DRM_ERROR("failed to disable the panel\n");
+ return;
+ }
+ }
+
+ disable_irq(dp->irq);
+ phy_power_off(dp->phy);
+
+ if (dp->plat_data->power_off)
+ dp->plat_data->power_off(dp->plat_data);
+
+ pm_runtime_put_sync(dp->dev);
+
+ dp->dpms_mode = DRM_MODE_DPMS_OFF;
+}
+
+static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_display_info *display_info = &dp->connector.display_info;
+ struct video_info *video = &dp->video_info;
+ struct device_node *dp_node = dp->dev->of_node;
+ int vic;
+
+ /* Input video interlaces & hsync pol & vsync pol */
+ video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+ video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+ /* Input video dynamic_range & colorimetry */
+ vic = drm_match_cea_mode(mode);
+ if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) ||
+ (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) {
+ video->dynamic_range = CEA;
+ video->ycbcr_coeff = COLOR_YCBCR601;
+ } else if (vic) {
+ video->dynamic_range = CEA;
+ video->ycbcr_coeff = COLOR_YCBCR709;
+ } else {
+ video->dynamic_range = VESA;
+ video->ycbcr_coeff = COLOR_YCBCR709;
+ }
+
+ /* Input vide bpc and color_formats */
+ switch (display_info->bpc) {
+ case 12:
+ video->color_depth = COLOR_12;
+ break;
+ case 10:
+ video->color_depth = COLOR_10;
+ break;
+ case 8:
+ video->color_depth = COLOR_8;
+ break;
+ case 6:
+ video->color_depth = COLOR_6;
+ break;
+ default:
+ video->color_depth = COLOR_8;
+ break;
+ }
+ if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ video->color_space = COLOR_YCBCR444;
+ else if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ video->color_space = COLOR_YCBCR422;
+ else if (display_info->color_formats & DRM_COLOR_FORMAT_RGB444)
+ video->color_space = COLOR_RGB;
+ else
+ video->color_space = COLOR_RGB;
+
+ /*
+ * NOTE: those property parsing code is used for providing backward
+ * compatibility for samsung platform.
+ * Due to we used the "of_property_read_u32" interfaces, when this
+ * property isn't present, the "video_info" can keep the original
+ * values and wouldn't be modified.
+ */
+ of_property_read_u32(dp_node, "samsung,color-space",
+ &video->color_space);
+ of_property_read_u32(dp_node, "samsung,dynamic-range",
+ &video->dynamic_range);
+ of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
+ &video->ycbcr_coeff);
+ of_property_read_u32(dp_node, "samsung,color-depth",
+ &video->color_depth);
+ if (of_property_read_bool(dp_node, "hsync-active-high"))
+ video->h_sync_polarity = true;
+ if (of_property_read_bool(dp_node, "vsync-active-high"))
+ video->v_sync_polarity = true;
+ if (of_property_read_bool(dp_node, "interlaced"))
+ video->interlaced = true;
+}
+
+static void analogix_dp_bridge_nop(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+ .enable = analogix_dp_bridge_enable,
+ .disable = analogix_dp_bridge_disable,
+ .pre_enable = analogix_dp_bridge_nop,
+ .post_disable = analogix_dp_bridge_nop,
+ .mode_set = analogix_dp_bridge_mode_set,
+ .attach = analogix_dp_bridge_attach,
+};
+
+static int analogix_dp_create_bridge(struct drm_device *drm_dev,
+ struct analogix_dp_device *dp)
+{
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ DRM_ERROR("failed to allocate for drm bridge\n");
+ return -ENOMEM;
+ }
+
+ dp->bridge = bridge;
+
+ dp->encoder->bridge = bridge;
+ bridge->driver_private = dp;
+ bridge->encoder = dp->encoder;
+ bridge->funcs = &analogix_dp_bridge_funcs;
+
+ ret = drm_bridge_attach(drm_dev, bridge);
+ if (ret) {
+ DRM_ERROR("failed to attach drm bridge\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
+{
+ struct device_node *dp_node = dp->dev->of_node;
+ struct video_info *video_info = &dp->video_info;
+
+ switch (dp->plat_data->dev_type) {
+ case RK3288_DP:
+ /*
+ * Like Rk3288 DisplayPort TRM indicate that "Main link
+ * containing 4 physical lanes of 2.7/1.62 Gbps/lane".
+ */
+ video_info->max_link_rate = 0x0A;
+ video_info->max_lane_count = 0x04;
+ break;
+ case EXYNOS_DP:
+ /*
+ * NOTE: those property parseing code is used for
+ * providing backward compatibility for samsung platform.
+ */
+ of_property_read_u32(dp_node, "samsung,link-rate",
+ &video_info->max_link_rate);
+ of_property_read_u32(dp_node, "samsung,lane-count",
+ &video_info->max_lane_count);
+ break;
+ }
+
+ return 0;
+}
+
+int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
+ struct analogix_dp_plat_data *plat_data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct analogix_dp_device *dp;
+ struct resource *res;
+ unsigned int irq_flags;
+ int ret;
+
+ if (!plat_data) {
+ dev_err(dev, "Invalided input plat_data\n");
+ return -EINVAL;
+ }
+
+ dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, dp);
+
+ dp->dev = &pdev->dev;
+ dp->dpms_mode = DRM_MODE_DPMS_OFF;
+
+ /*
+ * platform dp driver need containor_of the plat_data to get
+ * the driver private data, so we need to store the point of
+ * plat_data, not the context of plat_data.
+ */
+ dp->plat_data = plat_data;
+
+ ret = analogix_dp_dt_parse_pdata(dp);
+ if (ret)
+ return ret;
+
+ dp->phy = devm_phy_get(dp->dev, "dp");
+ if (IS_ERR(dp->phy)) {
+ dev_err(dp->dev, "no DP phy configured\n");
+ ret = PTR_ERR(dp->phy);
+ if (ret) {
+ /*
+ * phy itself is not enabled, so we can move forward
+ * assigning NULL to phy pointer.
+ */
+ if (ret == -ENOSYS || ret == -ENODEV)
+ dp->phy = NULL;
+ else
+ return ret;
+ }
+ }
+
+ dp->clock = devm_clk_get(&pdev->dev, "dp");
+ if (IS_ERR(dp->clock)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(dp->clock);
+ }
+
+ clk_prepare_enable(dp->clock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dp->reg_base))
+ return PTR_ERR(dp->reg_base);
+
+ dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd");
+
+ dp->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0);
+ if (!gpio_is_valid(dp->hpd_gpio))
+ dp->hpd_gpio = of_get_named_gpio(dev->of_node,
+ "samsung,hpd-gpio", 0);
+
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ /*
+ * Set up the hotplug GPIO from the device tree as an interrupt.
+ * Simply specifying a different interrupt in the device tree
+ * doesn't work since we handle hotplug rather differently when
+ * using a GPIO. We also need the actual GPIO specifier so
+ * that we can get the current state of the GPIO.
+ */
+ ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
+ "hpd_gpio");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get hpd gpio\n");
+ return ret;
+ }
+ dp->irq = gpio_to_irq(dp->hpd_gpio);
+ irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+ } else {
+ dp->hpd_gpio = -ENODEV;
+ dp->irq = platform_get_irq(pdev, 0);
+ irq_flags = 0;
+ }
+
+ if (dp->irq == -ENXIO) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+
+ pm_runtime_enable(dev);
+
+ phy_power_on(dp->phy);
+
+ if (dp->plat_data->panel) {
+ if (drm_panel_prepare(dp->plat_data->panel)) {
+ DRM_ERROR("failed to setup the panel\n");
+ return -EBUSY;
+ }
+ }
+
+ analogix_dp_init_dp(dp);
+
+ ret = devm_request_threaded_irq(&pdev->dev, dp->irq,
+ analogix_dp_hardirq,
+ analogix_dp_irq_thread,
+ irq_flags, "analogix-dp", dp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ goto err_disable_pm_runtime;
+ }
+ disable_irq(dp->irq);
+
+ dp->drm_dev = drm_dev;
+ dp->encoder = dp->plat_data->encoder;
+
+ ret = analogix_dp_create_bridge(drm_dev, dp);
+ if (ret) {
+ DRM_ERROR("failed to create bridge (%d)\n", ret);
+ drm_encoder_cleanup(dp->encoder);
+ goto err_disable_pm_runtime;
+ }
+
+ return 0;
+
+err_disable_pm_runtime:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_bind);
+
+void analogix_dp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct analogix_dp_device *dp = dev_get_drvdata(dev);
+
+ analogix_dp_bridge_disable(dp->bridge);
+
+ if (dp->plat_data->panel) {
+ if (drm_panel_unprepare(dp->plat_data->panel))
+ DRM_ERROR("failed to turnoff the panel\n");
+ }
+
+ pm_runtime_disable(dev);
+}
+EXPORT_SYMBOL_GPL(analogix_dp_unbind);
+
+#ifdef CONFIG_PM
+int analogix_dp_suspend(struct device *dev)
+{
+ struct analogix_dp_device *dp = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(dp->clock);
+
+ if (dp->plat_data->panel) {
+ if (drm_panel_unprepare(dp->plat_data->panel))
+ DRM_ERROR("failed to turnoff the panel\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_suspend);
+
+int analogix_dp_resume(struct device *dev)
+{
+ struct analogix_dp_device *dp = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(dp->clock);
+ if (ret < 0) {
+ DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
+ return ret;
+ }
+
+ if (dp->plat_data->panel) {
+ if (drm_panel_prepare(dp->plat_data->panel)) {
+ DRM_ERROR("failed to setup the panel\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_resume);
+#endif
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Analogix DP Core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
new file mode 100644
index 000000000..f09275d40
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -0,0 +1,281 @@
+/*
+ * Header file for Analogix DP (Display Port) core interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _ANALOGIX_DP_CORE_H
+#define _ANALOGIX_DP_CORE_H
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_dp_helper.h>
+
+#define DP_TIMEOUT_LOOP_COUNT 100
+#define MAX_CR_LOOP 5
+#define MAX_EQ_LOOP 5
+
+/* I2C EDID Chip ID, Slave Address */
+#define I2C_EDID_DEVICE_ADDR 0x50
+#define I2C_E_EDID_DEVICE_ADDR 0x30
+
+#define EDID_BLOCK_LENGTH 0x80
+#define EDID_HEADER_PATTERN 0x00
+#define EDID_EXTENSION_FLAG 0x7e
+#define EDID_CHECKSUM 0x7f
+
+/* DP_MAX_LANE_COUNT */
+#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
+#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
+
+/* DP_LANE_COUNT_SET */
+#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f)
+
+/* DP_TRAINING_LANE0_SET */
+#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3)
+#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3)
+#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0)
+#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3)
+
+enum link_lane_count_type {
+ LANE_COUNT1 = 1,
+ LANE_COUNT2 = 2,
+ LANE_COUNT4 = 4
+};
+
+enum link_training_state {
+ START,
+ CLOCK_RECOVERY,
+ EQUALIZER_TRAINING,
+ FINISHED,
+ FAILED
+};
+
+enum voltage_swing_level {
+ VOLTAGE_LEVEL_0,
+ VOLTAGE_LEVEL_1,
+ VOLTAGE_LEVEL_2,
+ VOLTAGE_LEVEL_3,
+};
+
+enum pre_emphasis_level {
+ PRE_EMPHASIS_LEVEL_0,
+ PRE_EMPHASIS_LEVEL_1,
+ PRE_EMPHASIS_LEVEL_2,
+ PRE_EMPHASIS_LEVEL_3,
+};
+
+enum pattern_set {
+ PRBS7,
+ D10_2,
+ TRAINING_PTN1,
+ TRAINING_PTN2,
+ DP_NONE
+};
+
+enum color_space {
+ COLOR_RGB,
+ COLOR_YCBCR422,
+ COLOR_YCBCR444
+};
+
+enum color_depth {
+ COLOR_6,
+ COLOR_8,
+ COLOR_10,
+ COLOR_12
+};
+
+enum color_coefficient {
+ COLOR_YCBCR601,
+ COLOR_YCBCR709
+};
+
+enum dynamic_range {
+ VESA,
+ CEA
+};
+
+enum pll_status {
+ PLL_UNLOCKED,
+ PLL_LOCKED
+};
+
+enum clock_recovery_m_value_type {
+ CALCULATED_M,
+ REGISTER_M
+};
+
+enum video_timing_recognition_type {
+ VIDEO_TIMING_FROM_CAPTURE,
+ VIDEO_TIMING_FROM_REGISTER
+};
+
+enum analog_power_block {
+ AUX_BLOCK,
+ CH0_BLOCK,
+ CH1_BLOCK,
+ CH2_BLOCK,
+ CH3_BLOCK,
+ ANALOG_TOTAL,
+ POWER_ALL
+};
+
+enum dp_irq_type {
+ DP_IRQ_TYPE_HP_CABLE_IN,
+ DP_IRQ_TYPE_HP_CABLE_OUT,
+ DP_IRQ_TYPE_HP_CHANGE,
+ DP_IRQ_TYPE_UNKNOWN,
+};
+
+struct video_info {
+ char *name;
+
+ bool h_sync_polarity;
+ bool v_sync_polarity;
+ bool interlaced;
+
+ enum color_space color_space;
+ enum dynamic_range dynamic_range;
+ enum color_coefficient ycbcr_coeff;
+ enum color_depth color_depth;
+
+ int max_link_rate;
+ enum link_lane_count_type max_lane_count;
+};
+
+struct link_train {
+ int eq_loop;
+ int cr_loop[4];
+
+ u8 link_rate;
+ u8 lane_count;
+ u8 training_lane[4];
+
+ enum link_training_state lt_state;
+};
+
+struct analogix_dp_device {
+ struct drm_encoder *encoder;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct drm_connector connector;
+ struct drm_bridge *bridge;
+ struct clk *clock;
+ unsigned int irq;
+ void __iomem *reg_base;
+
+ struct video_info video_info;
+ struct link_train link_train;
+ struct phy *phy;
+ int dpms_mode;
+ int hpd_gpio;
+ bool force_hpd;
+ unsigned char edid[EDID_BLOCK_LENGTH * 2];
+
+ struct analogix_dp_plat_data *plat_data;
+};
+
+/* analogix_dp_reg.c */
+void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable);
+void analogix_dp_stop_video(struct analogix_dp_device *dp);
+void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable);
+void analogix_dp_init_analog_param(struct analogix_dp_device *dp);
+void analogix_dp_init_interrupt(struct analogix_dp_device *dp);
+void analogix_dp_reset(struct analogix_dp_device *dp);
+void analogix_dp_swreset(struct analogix_dp_device *dp);
+void analogix_dp_config_interrupt(struct analogix_dp_device *dp);
+void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp);
+void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp);
+enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp);
+void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable);
+void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
+ enum analog_power_block block,
+ bool enable);
+void analogix_dp_init_analog_func(struct analogix_dp_device *dp);
+void analogix_dp_init_hpd(struct analogix_dp_device *dp);
+void analogix_dp_force_hpd(struct analogix_dp_device *dp);
+enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp);
+void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp);
+void analogix_dp_reset_aux(struct analogix_dp_device *dp);
+void analogix_dp_init_aux(struct analogix_dp_device *dp);
+int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp);
+void analogix_dp_enable_sw_function(struct analogix_dp_device *dp);
+int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp);
+int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned char data);
+int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned char *data);
+int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char data[]);
+int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char data[]);
+int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr);
+int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr,
+ unsigned int *data);
+int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char edid[]);
+void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype);
+void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype);
+void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count);
+void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count);
+void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp,
+ bool enable);
+void analogix_dp_set_training_pattern(struct analogix_dp_device *dp,
+ enum pattern_set pattern);
+void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp,
+ u32 level);
+void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp,
+ u32 level);
+void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp,
+ u32 level);
+void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp,
+ u32 level);
+void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp,
+ u32 training_lane);
+void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp,
+ u32 training_lane);
+void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp,
+ u32 training_lane);
+void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
+ u32 training_lane);
+u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp);
+u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp);
+u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp);
+u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp);
+void analogix_dp_reset_macro(struct analogix_dp_device *dp);
+void analogix_dp_init_video(struct analogix_dp_device *dp);
+
+void analogix_dp_set_video_color_format(struct analogix_dp_device *dp);
+int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp);
+void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp,
+ enum clock_recovery_m_value_type type,
+ u32 m_value,
+ u32 n_value);
+void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type);
+void analogix_dp_enable_video_master(struct analogix_dp_device *dp,
+ bool enable);
+void analogix_dp_start_video(struct analogix_dp_device *dp);
+int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp);
+void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp);
+void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
+void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
+#endif /* _ANALOGIX_DP_CORE_H */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
new file mode 100644
index 000000000..49205ef02
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -0,0 +1,1320 @@
+/*
+ * Analogix DP (Display port) core register interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+
+#include <drm/bridge/analogix_dp.h>
+
+#include "analogix_dp_core.h"
+#include "analogix_dp_reg.h"
+
+#define COMMON_INT_MASK_1 0
+#define COMMON_INT_MASK_2 0
+#define COMMON_INT_MASK_3 0
+#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG)
+#define INT_STA_MASK INT_HPD
+
+void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+ reg |= HDCP_VIDEO_MUTE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+ } else {
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+ reg &= ~HDCP_VIDEO_MUTE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+ }
+}
+
+void analogix_dp_stop_video(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+ reg &= ~VIDEO_EN;
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+}
+
+void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable)
+ reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 |
+ LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3;
+ else
+ reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 |
+ LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0;
+
+ writel(reg, dp->reg_base + ANALOGIX_DP_LANE_MAP);
+}
+
+void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = TX_TERMINAL_CTRL_50_OHM;
+ writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_1);
+
+ reg = SEL_24M | TX_DVDD_BIT_1_0625V;
+ writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
+
+ if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) {
+ writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
+ writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
+ writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
+ writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
+ writel(0x22, dp->reg_base + ANALOGIX_DP_PLL_REG_5);
+ }
+
+ reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
+ writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_3);
+
+ reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
+ TX_CUR1_2X | TX_CUR_16_MA;
+ writel(reg, dp->reg_base + ANALOGIX_DP_PLL_FILTER_CTL_1);
+
+ reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
+ CH1_AMP_400_MV | CH0_AMP_400_MV;
+ writel(reg, dp->reg_base + ANALOGIX_DP_TX_AMP_TUNING_CTL);
+}
+
+void analogix_dp_init_interrupt(struct analogix_dp_device *dp)
+{
+ /* Set interrupt pin assertion polarity as high */
+ writel(INT_POL1 | INT_POL0, dp->reg_base + ANALOGIX_DP_INT_CTL);
+
+ /* Clear pending regisers */
+ writel(0xff, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
+ writel(0x4f, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_2);
+ writel(0xe0, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_3);
+ writel(0xe7, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
+ writel(0x63, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+ /* 0:mask,1: unmask */
+ writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1);
+ writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2);
+ writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3);
+ writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+ writel(0x00, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+}
+
+void analogix_dp_reset(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ analogix_dp_stop_video(dp);
+ analogix_dp_enable_video_mute(dp, 0);
+
+ reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
+ AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
+ HDCP_FUNC_EN_N | SW_FUNC_EN_N;
+ writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+
+ reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
+ SERDES_FIFO_FUNC_EN_N |
+ LS_CLK_DOMAIN_FUNC_EN_N;
+ writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+
+ usleep_range(20, 30);
+
+ analogix_dp_lane_swap(dp, 0);
+
+ writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+ writel(0x40, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+ writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+ writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+
+ writel(0x0, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+ writel(0x0, dp->reg_base + ANALOGIX_DP_HDCP_CTL);
+
+ writel(0x5e, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_L);
+ writel(0x1a, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_H);
+
+ writel(0x10, dp->reg_base + ANALOGIX_DP_LINK_DEBUG_CTL);
+
+ writel(0x0, dp->reg_base + ANALOGIX_DP_PHY_TEST);
+
+ writel(0x0, dp->reg_base + ANALOGIX_DP_VIDEO_FIFO_THRD);
+ writel(0x20, dp->reg_base + ANALOGIX_DP_AUDIO_MARGIN);
+
+ writel(0x4, dp->reg_base + ANALOGIX_DP_M_VID_GEN_FILTER_TH);
+ writel(0x2, dp->reg_base + ANALOGIX_DP_M_AUD_GEN_FILTER_TH);
+
+ writel(0x00000101, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+}
+
+void analogix_dp_swreset(struct analogix_dp_device *dp)
+{
+ writel(RESET_DP_TX, dp->reg_base + ANALOGIX_DP_TX_SW_RESET);
+}
+
+void analogix_dp_config_interrupt(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ /* 0: mask, 1: unmask */
+ reg = COMMON_INT_MASK_1;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1);
+
+ reg = COMMON_INT_MASK_2;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2);
+
+ reg = COMMON_INT_MASK_3;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3);
+
+ reg = COMMON_INT_MASK_4;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+
+ reg = INT_STA_MASK;
+ writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+}
+
+void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ /* 0: mask, 1: unmask */
+ reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+ reg &= ~COMMON_INT_MASK_4;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+ reg &= ~INT_STA_MASK;
+ writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+}
+
+void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ /* 0: mask, 1: unmask */
+ reg = COMMON_INT_MASK_4;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4);
+
+ reg = INT_STA_MASK;
+ writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK);
+}
+
+enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
+ if (reg & PLL_LOCK)
+ return PLL_LOCKED;
+ else
+ return PLL_UNLOCKED;
+}
+
+void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
+ reg |= DP_PLL_PD;
+ writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
+ } else {
+ reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL);
+ reg &= ~DP_PLL_PD;
+ writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL);
+ }
+}
+
+void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
+ enum analog_power_block block,
+ bool enable)
+{
+ u32 reg;
+ u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
+
+ if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+ phy_pd_addr = ANALOGIX_DP_PD;
+
+ switch (block) {
+ case AUX_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg |= AUX_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ } else {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg &= ~AUX_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ }
+ break;
+ case CH0_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg |= CH0_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ } else {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg &= ~CH0_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ }
+ break;
+ case CH1_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg |= CH1_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ } else {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg &= ~CH1_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ }
+ break;
+ case CH2_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg |= CH2_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ } else {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg &= ~CH2_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ }
+ break;
+ case CH3_BLOCK:
+ if (enable) {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg |= CH3_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ } else {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg &= ~CH3_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ }
+ break;
+ case ANALOG_TOTAL:
+ if (enable) {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg |= DP_PHY_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ } else {
+ reg = readl(dp->reg_base + phy_pd_addr);
+ reg &= ~DP_PHY_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ }
+ break;
+ case POWER_ALL:
+ if (enable) {
+ reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD |
+ CH1_PD | CH0_PD;
+ writel(reg, dp->reg_base + phy_pd_addr);
+ } else {
+ writel(0x00, dp->reg_base + phy_pd_addr);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void analogix_dp_init_analog_func(struct analogix_dp_device *dp)
+{
+ u32 reg;
+ int timeout_loop = 0;
+
+ analogix_dp_set_analog_power_down(dp, POWER_ALL, 0);
+
+ reg = PLL_LOCK_CHG;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
+ reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL);
+ writel(reg, dp->reg_base + ANALOGIX_DP_DEBUG_CTL);
+
+ /* Power up PLL */
+ if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ analogix_dp_set_pll_power_down(dp, 0);
+
+ while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ timeout_loop++;
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "failed to get pll lock status\n");
+ return;
+ }
+ usleep_range(10, 20);
+ }
+ }
+
+ /* Enable Serdes FIFO function and Link symbol clock domain module */
+ reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+ reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
+ | AUX_FUNC_EN_N);
+ writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+}
+
+void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ if (gpio_is_valid(dp->hpd_gpio))
+ return;
+
+ reg = HOTPLUG_CHG | HPD_LOST | PLUG;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
+
+ reg = INT_HPD;
+ writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
+}
+
+void analogix_dp_init_hpd(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ if (gpio_is_valid(dp->hpd_gpio))
+ return;
+
+ analogix_dp_clear_hotplug_interrupts(dp);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+ reg &= ~(F_HPD | HPD_CTRL);
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+}
+
+void analogix_dp_force_hpd(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+ reg = (F_HPD | HPD_CTRL);
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+}
+
+enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ reg = gpio_get_value(dp->hpd_gpio);
+ if (reg)
+ return DP_IRQ_TYPE_HP_CABLE_IN;
+ else
+ return DP_IRQ_TYPE_HP_CABLE_OUT;
+ } else {
+ /* Parse hotplug interrupt status register */
+ reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4);
+
+ if (reg & PLUG)
+ return DP_IRQ_TYPE_HP_CABLE_IN;
+
+ if (reg & HPD_LOST)
+ return DP_IRQ_TYPE_HP_CABLE_OUT;
+
+ if (reg & HOTPLUG_CHG)
+ return DP_IRQ_TYPE_HP_CHANGE;
+
+ return DP_IRQ_TYPE_UNKNOWN;
+ }
+}
+
+void analogix_dp_reset_aux(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ /* Disable AUX channel module */
+ reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+ reg |= AUX_FUNC_EN_N;
+ writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+}
+
+void analogix_dp_init_aux(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ /* Clear inerrupts related to AUX channel */
+ reg = RPLY_RECEIV | AUX_ERR;
+ writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+ analogix_dp_reset_aux(dp);
+
+ /* Disable AUX transaction H/W retry */
+ if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP))
+ reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
+ AUX_HW_RETRY_COUNT_SEL(3) |
+ AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+ else
+ reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) |
+ AUX_HW_RETRY_COUNT_SEL(0) |
+ AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL);
+
+ /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
+ reg = DEFER_CTRL_EN | DEFER_COUNT(1);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_DEFER_CTL);
+
+ /* Enable AUX channel module */
+ reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+ reg &= ~AUX_FUNC_EN_N;
+ writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2);
+}
+
+int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ if (gpio_is_valid(dp->hpd_gpio)) {
+ if (gpio_get_value(dp->hpd_gpio))
+ return 0;
+ } else {
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+ if (reg & HPD_STATUS)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+ reg &= ~SW_FUNC_EN_N;
+ writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+}
+
+int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
+{
+ int reg;
+ int retval = 0;
+ int timeout_loop = 0;
+
+ /* Enable AUX CH operation */
+ reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+ reg |= AUX_EN;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+
+ /* Is AUX CH command reply received? */
+ reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+ while (!(reg & RPLY_RECEIV)) {
+ timeout_loop++;
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "AUX CH command reply failed!\n");
+ return -ETIMEDOUT;
+ }
+ reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+ usleep_range(10, 11);
+ }
+
+ /* Clear interrupt source for AUX CH command reply */
+ writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA);
+
+ /* Clear interrupt source for AUX CH access error */
+ reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA);
+ if (reg & AUX_ERR) {
+ writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA);
+ return -EREMOTEIO;
+ }
+
+ /* Check AUX CH error access status */
+ reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA);
+ if ((reg & AUX_STATUS_MASK) != 0) {
+ dev_err(dp->dev, "AUX CH error happens: %d\n\n",
+ reg & AUX_STATUS_MASK);
+ return -EREMOTEIO;
+ }
+
+ return retval;
+}
+
+int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned char data)
+{
+ u32 reg;
+ int i;
+ int retval;
+
+ for (i = 0; i < 3; i++) {
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(reg_addr);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(reg_addr);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(reg_addr);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+ /* Write data buffer */
+ reg = (unsigned int)data;
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
+
+ /*
+ * Set DisplayPort transaction and write 1 byte
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = analogix_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+ }
+
+ return retval;
+}
+
+int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned char *data)
+{
+ u32 reg;
+ int i;
+ int retval;
+
+ for (i = 0; i < 3; i++) {
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(reg_addr);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(reg_addr);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(reg_addr);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+ /*
+ * Set DisplayPort transaction and read 1 byte
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = analogix_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+ }
+
+ /* Read data buffer */
+ reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
+ *data = (unsigned char)(reg & 0xff);
+
+ return retval;
+}
+
+int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char data[])
+{
+ u32 reg;
+ unsigned int start_offset;
+ unsigned int cur_data_count;
+ unsigned int cur_data_idx;
+ int i;
+ int retval = 0;
+
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+ start_offset = 0;
+ while (start_offset < count) {
+ /* Buffer size of AUX CH is 16 * 4bytes */
+ if ((count - start_offset) > 16)
+ cur_data_count = 16;
+ else
+ cur_data_count = count - start_offset;
+
+ for (i = 0; i < 3; i++) {
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(reg_addr + start_offset);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(reg_addr + start_offset);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(reg_addr + start_offset);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+ for (cur_data_idx = 0; cur_data_idx < cur_data_count;
+ cur_data_idx++) {
+ reg = data[start_offset + cur_data_idx];
+ writel(reg, dp->reg_base +
+ ANALOGIX_DP_BUF_DATA_0 +
+ 4 * cur_data_idx);
+ }
+
+ /*
+ * Set DisplayPort transaction and write
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_LENGTH(cur_data_count) |
+ AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = analogix_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+
+ start_offset += cur_data_count;
+ }
+
+ return retval;
+}
+
+int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char data[])
+{
+ u32 reg;
+ unsigned int start_offset;
+ unsigned int cur_data_count;
+ unsigned int cur_data_idx;
+ int i;
+ int retval = 0;
+
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+ start_offset = 0;
+ while (start_offset < count) {
+ /* Buffer size of AUX CH is 16 * 4bytes */
+ if ((count - start_offset) > 16)
+ cur_data_count = 16;
+ else
+ cur_data_count = count - start_offset;
+
+ /* AUX CH Request Transaction process */
+ for (i = 0; i < 3; i++) {
+ /* Select DPCD device address */
+ reg = AUX_ADDR_7_0(reg_addr + start_offset);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+ reg = AUX_ADDR_15_8(reg_addr + start_offset);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+ reg = AUX_ADDR_19_16(reg_addr + start_offset);
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+ /*
+ * Set DisplayPort transaction and read
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_LENGTH(cur_data_count) |
+ AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = analogix_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+
+ for (cur_data_idx = 0; cur_data_idx < cur_data_count;
+ cur_data_idx++) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
+ + 4 * cur_data_idx);
+ data[start_offset + cur_data_idx] =
+ (unsigned char)reg;
+ }
+
+ start_offset += cur_data_count;
+ }
+
+ return retval;
+}
+
+int analogix_dp_select_i2c_device(struct analogix_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr)
+{
+ u32 reg;
+ int retval;
+
+ /* Set EDID device address */
+ reg = device_addr;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0);
+ writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8);
+ writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16);
+
+ /* Set offset from base address of EDID device */
+ writel(reg_addr, dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
+
+ /*
+ * Set I2C transaction and write address
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
+ AUX_TX_COMM_WRITE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = analogix_dp_start_aux_transaction(dp);
+ if (retval != 0)
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+
+ return retval;
+}
+
+int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr,
+ unsigned int *data)
+{
+ u32 reg;
+ int i;
+ int retval;
+
+ for (i = 0; i < 3; i++) {
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+ /* Select EDID device */
+ retval = analogix_dp_select_i2c_device(dp, device_addr,
+ reg_addr);
+ if (retval != 0)
+ continue;
+
+ /*
+ * Set I2C transaction and read data
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_TX_COMM_I2C_TRANSACTION |
+ AUX_TX_COMM_READ;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = analogix_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
+ }
+
+ /* Read data */
+ if (retval == 0)
+ *data = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0);
+
+ return retval;
+}
+
+int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp,
+ unsigned int device_addr,
+ unsigned int reg_addr,
+ unsigned int count,
+ unsigned char edid[])
+{
+ u32 reg;
+ unsigned int i, j;
+ unsigned int cur_data_idx;
+ unsigned int defer = 0;
+ int retval = 0;
+
+ for (i = 0; i < count; i += 16) {
+ for (j = 0; j < 3; j++) {
+ /* Clear AUX CH data buffer */
+ reg = BUF_CLR;
+ writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL);
+
+ /* Set normal AUX CH command */
+ reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+ reg &= ~ADDR_ONLY;
+ writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2);
+
+ /*
+ * If Rx sends defer, Tx sends only reads
+ * request without sending address
+ */
+ if (!defer)
+ retval = analogix_dp_select_i2c_device(dp,
+ device_addr, reg_addr + i);
+ else
+ defer = 0;
+
+ if (retval == 0) {
+ /*
+ * Set I2C transaction and write data
+ * If bit 3 is 1, DisplayPort transaction.
+ * If Bit 3 is 0, I2C transaction.
+ */
+ reg = AUX_LENGTH(16) |
+ AUX_TX_COMM_I2C_TRANSACTION |
+ AUX_TX_COMM_READ;
+ writel(reg, dp->reg_base +
+ ANALOGIX_DP_AUX_CH_CTL_1);
+
+ /* Start AUX transaction */
+ retval = analogix_dp_start_aux_transaction(dp);
+ if (retval == 0)
+ break;
+
+ dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
+ __func__);
+ }
+ /* Check if Rx sends defer */
+ reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM);
+ if (reg == AUX_RX_COMM_AUX_DEFER ||
+ reg == AUX_RX_COMM_I2C_DEFER) {
+ dev_err(dp->dev, "Defer: %d\n\n", reg);
+ defer = 1;
+ }
+ }
+
+ for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0
+ + 4 * cur_data_idx);
+ edid[i + cur_data_idx] = (unsigned char)reg;
+ }
+ }
+
+ return retval;
+}
+
+void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype)
+{
+ u32 reg;
+
+ reg = bwtype;
+ if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62))
+ writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+}
+
+void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LINK_BW_SET);
+ *bwtype = reg;
+}
+
+void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count)
+{
+ u32 reg;
+
+ reg = count;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+}
+
+void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET);
+ *count = reg;
+}
+
+void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp,
+ bool enable)
+{
+ u32 reg;
+
+ if (enable) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+ reg |= ENHANCED;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+ } else {
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+ reg &= ~ENHANCED;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+ }
+}
+
+void analogix_dp_set_training_pattern(struct analogix_dp_device *dp,
+ enum pattern_set pattern)
+{
+ u32 reg;
+
+ switch (pattern) {
+ case PRBS7:
+ reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7;
+ writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+ break;
+ case D10_2:
+ reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2;
+ writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+ break;
+ case TRAINING_PTN1:
+ reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1;
+ writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+ break;
+ case TRAINING_PTN2:
+ reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2;
+ writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+ break;
+ case DP_NONE:
+ reg = SCRAMBLING_ENABLE |
+ LINK_QUAL_PATTERN_SET_DISABLE |
+ SW_TRAINING_PATTERN_SET_NORMAL;
+ writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+ break;
+ default:
+ break;
+ }
+}
+
+void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp,
+ u32 level)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
+ reg &= ~PRE_EMPHASIS_SET_MASK;
+ reg |= level << PRE_EMPHASIS_SET_SHIFT;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp,
+ u32 level)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
+ reg &= ~PRE_EMPHASIS_SET_MASK;
+ reg |= level << PRE_EMPHASIS_SET_SHIFT;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp,
+ u32 level)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
+ reg &= ~PRE_EMPHASIS_SET_MASK;
+ reg |= level << PRE_EMPHASIS_SET_SHIFT;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp,
+ u32 level)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
+ reg &= ~PRE_EMPHASIS_SET_MASK;
+ reg |= level << PRE_EMPHASIS_SET_SHIFT;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp,
+ u32 training_lane)
+{
+ u32 reg;
+
+ reg = training_lane;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp,
+ u32 training_lane)
+{
+ u32 reg;
+
+ reg = training_lane;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp,
+ u32 training_lane)
+{
+ u32 reg;
+
+ reg = training_lane;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
+}
+
+void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp,
+ u32 training_lane)
+{
+ u32 reg;
+
+ reg = training_lane;
+ writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
+}
+
+u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL);
+ return reg;
+}
+
+u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL);
+ return reg;
+}
+
+u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL);
+ return reg;
+}
+
+u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL);
+ return reg;
+}
+
+void analogix_dp_reset_macro(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_PHY_TEST);
+ reg |= MACRO_RST;
+ writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST);
+
+ /* 10 us is the minimum reset time. */
+ usleep_range(10, 20);
+
+ reg &= ~MACRO_RST;
+ writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST);
+}
+
+void analogix_dp_init_video(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG;
+ writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1);
+
+ reg = 0x0;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+
+ reg = CHA_CRI(4) | CHA_CTRL;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+
+ reg = 0x0;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+
+ reg = VID_HRES_TH(2) | VID_VRES_TH(0);
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_8);
+}
+
+void analogix_dp_set_video_color_format(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ /* Configure the input color depth, color space, dynamic range */
+ reg = (dp->video_info.dynamic_range << IN_D_RANGE_SHIFT) |
+ (dp->video_info.color_depth << IN_BPC_SHIFT) |
+ (dp->video_info.color_space << IN_COLOR_F_SHIFT);
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_2);
+
+ /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+ reg &= ~IN_YC_COEFFI_MASK;
+ if (dp->video_info.ycbcr_coeff)
+ reg |= IN_YC_COEFFI_ITU709;
+ else
+ reg |= IN_YC_COEFFI_ITU601;
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3);
+}
+
+int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1);
+
+ if (!(reg & DET_STA)) {
+ dev_dbg(dp->dev, "Input stream clock not detected.\n");
+ return -EINVAL;
+ }
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2);
+ dev_dbg(dp->dev, "wait SYS_CTL_2.\n");
+
+ if (reg & CHA_STA) {
+ dev_dbg(dp->dev, "Input stream clk is changing\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp,
+ enum clock_recovery_m_value_type type,
+ u32 m_value, u32 n_value)
+{
+ u32 reg;
+
+ if (type == REGISTER_M) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+ reg |= FIX_M_VID;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+ reg = m_value & 0xff;
+ writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_0);
+ reg = (m_value >> 8) & 0xff;
+ writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_1);
+ reg = (m_value >> 16) & 0xff;
+ writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_2);
+
+ reg = n_value & 0xff;
+ writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_0);
+ reg = (n_value >> 8) & 0xff;
+ writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_1);
+ reg = (n_value >> 16) & 0xff;
+ writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_2);
+ } else {
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+ reg &= ~FIX_M_VID;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4);
+
+ writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_0);
+ writel(0x80, dp->reg_base + ANALOGIX_DP_N_VID_1);
+ writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_2);
+ }
+}
+
+void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type)
+{
+ u32 reg;
+
+ if (type == VIDEO_TIMING_FROM_CAPTURE) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+ reg &= ~FORMAT_SEL;
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+ } else {
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+ reg |= FORMAT_SEL;
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+ }
+}
+
+void analogix_dp_enable_video_master(struct analogix_dp_device *dp, bool enable)
+{
+ u32 reg;
+
+ if (enable) {
+ reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+ reg &= ~VIDEO_MODE_MASK;
+ reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+ } else {
+ reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+ reg &= ~VIDEO_MODE_MASK;
+ reg |= VIDEO_MODE_SLAVE_MODE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+ }
+}
+
+void analogix_dp_start_video(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+ reg |= VIDEO_EN;
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1);
+}
+
+int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+ writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3);
+ if (!(reg & STRM_VALID)) {
+ dev_dbg(dp->dev, "Input video stream is not detected.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+ reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N);
+ reg |= MASTER_VID_FUNC_EN_N;
+ writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+ reg &= ~INTERACE_SCAN_CFG;
+ reg |= (dp->video_info.interlaced << 2);
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+ reg &= ~VSYNC_POLARITY_CFG;
+ reg |= (dp->video_info.v_sync_polarity << 1);
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+ reg &= ~HSYNC_POLARITY_CFG;
+ reg |= (dp->video_info.h_sync_polarity << 0);
+ writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10);
+
+ reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL);
+}
+
+void analogix_dp_enable_scrambling(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+ reg &= ~SCRAMBLING_DISABLE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+}
+
+void analogix_dp_disable_scrambling(struct analogix_dp_device *dp)
+{
+ u32 reg;
+
+ reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+ reg |= SCRAMBLING_DISABLE;
+ writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index 2e9bd0e0b..337912b0a 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -1,5 +1,5 @@
/*
- * Register definition file for Samsung DP driver
+ * Register definition file for Analogix DP core driver
*
* Copyright (C) 2012 Samsung Electronics Co., Ltd.
* Author: Jingoo Han <jg1.han@samsung.com>
@@ -9,96 +9,104 @@
* published by the Free Software Foundation.
*/
-#ifndef _EXYNOS_DP_REG_H
-#define _EXYNOS_DP_REG_H
-
-#define EXYNOS_DP_TX_SW_RESET 0x14
-#define EXYNOS_DP_FUNC_EN_1 0x18
-#define EXYNOS_DP_FUNC_EN_2 0x1C
-#define EXYNOS_DP_VIDEO_CTL_1 0x20
-#define EXYNOS_DP_VIDEO_CTL_2 0x24
-#define EXYNOS_DP_VIDEO_CTL_3 0x28
-
-#define EXYNOS_DP_VIDEO_CTL_8 0x3C
-#define EXYNOS_DP_VIDEO_CTL_10 0x44
-
-#define EXYNOS_DP_LANE_MAP 0x35C
-
-#define EXYNOS_DP_ANALOG_CTL_1 0x370
-#define EXYNOS_DP_ANALOG_CTL_2 0x374
-#define EXYNOS_DP_ANALOG_CTL_3 0x378
-#define EXYNOS_DP_PLL_FILTER_CTL_1 0x37C
-#define EXYNOS_DP_TX_AMP_TUNING_CTL 0x380
-
-#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390
-
-#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4
-#define EXYNOS_DP_COMMON_INT_STA_2 0x3C8
-#define EXYNOS_DP_COMMON_INT_STA_3 0x3CC
-#define EXYNOS_DP_COMMON_INT_STA_4 0x3D0
-#define EXYNOS_DP_INT_STA 0x3DC
-#define EXYNOS_DP_COMMON_INT_MASK_1 0x3E0
-#define EXYNOS_DP_COMMON_INT_MASK_2 0x3E4
-#define EXYNOS_DP_COMMON_INT_MASK_3 0x3E8
-#define EXYNOS_DP_COMMON_INT_MASK_4 0x3EC
-#define EXYNOS_DP_INT_STA_MASK 0x3F8
-#define EXYNOS_DP_INT_CTL 0x3FC
-
-#define EXYNOS_DP_SYS_CTL_1 0x600
-#define EXYNOS_DP_SYS_CTL_2 0x604
-#define EXYNOS_DP_SYS_CTL_3 0x608
-#define EXYNOS_DP_SYS_CTL_4 0x60C
-
-#define EXYNOS_DP_PKT_SEND_CTL 0x640
-#define EXYNOS_DP_HDCP_CTL 0x648
-
-#define EXYNOS_DP_LINK_BW_SET 0x680
-#define EXYNOS_DP_LANE_COUNT_SET 0x684
-#define EXYNOS_DP_TRAINING_PTN_SET 0x688
-#define EXYNOS_DP_LN0_LINK_TRAINING_CTL 0x68C
-#define EXYNOS_DP_LN1_LINK_TRAINING_CTL 0x690
-#define EXYNOS_DP_LN2_LINK_TRAINING_CTL 0x694
-#define EXYNOS_DP_LN3_LINK_TRAINING_CTL 0x698
-
-#define EXYNOS_DP_DEBUG_CTL 0x6C0
-#define EXYNOS_DP_HPD_DEGLITCH_L 0x6C4
-#define EXYNOS_DP_HPD_DEGLITCH_H 0x6C8
-#define EXYNOS_DP_LINK_DEBUG_CTL 0x6E0
-
-#define EXYNOS_DP_M_VID_0 0x700
-#define EXYNOS_DP_M_VID_1 0x704
-#define EXYNOS_DP_M_VID_2 0x708
-#define EXYNOS_DP_N_VID_0 0x70C
-#define EXYNOS_DP_N_VID_1 0x710
-#define EXYNOS_DP_N_VID_2 0x714
-
-#define EXYNOS_DP_PLL_CTL 0x71C
-#define EXYNOS_DP_PHY_PD 0x720
-#define EXYNOS_DP_PHY_TEST 0x724
-
-#define EXYNOS_DP_VIDEO_FIFO_THRD 0x730
-#define EXYNOS_DP_AUDIO_MARGIN 0x73C
-
-#define EXYNOS_DP_M_VID_GEN_FILTER_TH 0x764
-#define EXYNOS_DP_M_AUD_GEN_FILTER_TH 0x778
-#define EXYNOS_DP_AUX_CH_STA 0x780
-#define EXYNOS_DP_AUX_CH_DEFER_CTL 0x788
-#define EXYNOS_DP_AUX_RX_COMM 0x78C
-#define EXYNOS_DP_BUFFER_DATA_CTL 0x790
-#define EXYNOS_DP_AUX_CH_CTL_1 0x794
-#define EXYNOS_DP_AUX_ADDR_7_0 0x798
-#define EXYNOS_DP_AUX_ADDR_15_8 0x79C
-#define EXYNOS_DP_AUX_ADDR_19_16 0x7A0
-#define EXYNOS_DP_AUX_CH_CTL_2 0x7A4
-
-#define EXYNOS_DP_BUF_DATA_0 0x7C0
-
-#define EXYNOS_DP_SOC_GENERAL_CTL 0x800
-
-/* EXYNOS_DP_TX_SW_RESET */
+#ifndef _ANALOGIX_DP_REG_H
+#define _ANALOGIX_DP_REG_H
+
+#define ANALOGIX_DP_TX_SW_RESET 0x14
+#define ANALOGIX_DP_FUNC_EN_1 0x18
+#define ANALOGIX_DP_FUNC_EN_2 0x1C
+#define ANALOGIX_DP_VIDEO_CTL_1 0x20
+#define ANALOGIX_DP_VIDEO_CTL_2 0x24
+#define ANALOGIX_DP_VIDEO_CTL_3 0x28
+
+#define ANALOGIX_DP_VIDEO_CTL_8 0x3C
+#define ANALOGIX_DP_VIDEO_CTL_10 0x44
+
+#define ANALOGIX_DP_PLL_REG_1 0xfc
+#define ANALOGIX_DP_PLL_REG_2 0x9e4
+#define ANALOGIX_DP_PLL_REG_3 0x9e8
+#define ANALOGIX_DP_PLL_REG_4 0x9ec
+#define ANALOGIX_DP_PLL_REG_5 0xa00
+
+#define ANALOGIX_DP_PD 0x12c
+
+#define ANALOGIX_DP_LANE_MAP 0x35C
+
+#define ANALOGIX_DP_ANALOG_CTL_1 0x370
+#define ANALOGIX_DP_ANALOG_CTL_2 0x374
+#define ANALOGIX_DP_ANALOG_CTL_3 0x378
+#define ANALOGIX_DP_PLL_FILTER_CTL_1 0x37C
+#define ANALOGIX_DP_TX_AMP_TUNING_CTL 0x380
+
+#define ANALOGIX_DP_AUX_HW_RETRY_CTL 0x390
+
+#define ANALOGIX_DP_COMMON_INT_STA_1 0x3C4
+#define ANALOGIX_DP_COMMON_INT_STA_2 0x3C8
+#define ANALOGIX_DP_COMMON_INT_STA_3 0x3CC
+#define ANALOGIX_DP_COMMON_INT_STA_4 0x3D0
+#define ANALOGIX_DP_INT_STA 0x3DC
+#define ANALOGIX_DP_COMMON_INT_MASK_1 0x3E0
+#define ANALOGIX_DP_COMMON_INT_MASK_2 0x3E4
+#define ANALOGIX_DP_COMMON_INT_MASK_3 0x3E8
+#define ANALOGIX_DP_COMMON_INT_MASK_4 0x3EC
+#define ANALOGIX_DP_INT_STA_MASK 0x3F8
+#define ANALOGIX_DP_INT_CTL 0x3FC
+
+#define ANALOGIX_DP_SYS_CTL_1 0x600
+#define ANALOGIX_DP_SYS_CTL_2 0x604
+#define ANALOGIX_DP_SYS_CTL_3 0x608
+#define ANALOGIX_DP_SYS_CTL_4 0x60C
+
+#define ANALOGIX_DP_PKT_SEND_CTL 0x640
+#define ANALOGIX_DP_HDCP_CTL 0x648
+
+#define ANALOGIX_DP_LINK_BW_SET 0x680
+#define ANALOGIX_DP_LANE_COUNT_SET 0x684
+#define ANALOGIX_DP_TRAINING_PTN_SET 0x688
+#define ANALOGIX_DP_LN0_LINK_TRAINING_CTL 0x68C
+#define ANALOGIX_DP_LN1_LINK_TRAINING_CTL 0x690
+#define ANALOGIX_DP_LN2_LINK_TRAINING_CTL 0x694
+#define ANALOGIX_DP_LN3_LINK_TRAINING_CTL 0x698
+
+#define ANALOGIX_DP_DEBUG_CTL 0x6C0
+#define ANALOGIX_DP_HPD_DEGLITCH_L 0x6C4
+#define ANALOGIX_DP_HPD_DEGLITCH_H 0x6C8
+#define ANALOGIX_DP_LINK_DEBUG_CTL 0x6E0
+
+#define ANALOGIX_DP_M_VID_0 0x700
+#define ANALOGIX_DP_M_VID_1 0x704
+#define ANALOGIX_DP_M_VID_2 0x708
+#define ANALOGIX_DP_N_VID_0 0x70C
+#define ANALOGIX_DP_N_VID_1 0x710
+#define ANALOGIX_DP_N_VID_2 0x714
+
+#define ANALOGIX_DP_PLL_CTL 0x71C
+#define ANALOGIX_DP_PHY_PD 0x720
+#define ANALOGIX_DP_PHY_TEST 0x724
+
+#define ANALOGIX_DP_VIDEO_FIFO_THRD 0x730
+#define ANALOGIX_DP_AUDIO_MARGIN 0x73C
+
+#define ANALOGIX_DP_M_VID_GEN_FILTER_TH 0x764
+#define ANALOGIX_DP_M_AUD_GEN_FILTER_TH 0x778
+#define ANALOGIX_DP_AUX_CH_STA 0x780
+#define ANALOGIX_DP_AUX_CH_DEFER_CTL 0x788
+#define ANALOGIX_DP_AUX_RX_COMM 0x78C
+#define ANALOGIX_DP_BUFFER_DATA_CTL 0x790
+#define ANALOGIX_DP_AUX_CH_CTL_1 0x794
+#define ANALOGIX_DP_AUX_ADDR_7_0 0x798
+#define ANALOGIX_DP_AUX_ADDR_15_8 0x79C
+#define ANALOGIX_DP_AUX_ADDR_19_16 0x7A0
+#define ANALOGIX_DP_AUX_CH_CTL_2 0x7A4
+
+#define ANALOGIX_DP_BUF_DATA_0 0x7C0
+
+#define ANALOGIX_DP_SOC_GENERAL_CTL 0x800
+
+/* ANALOGIX_DP_TX_SW_RESET */
#define RESET_DP_TX (0x1 << 0)
-/* EXYNOS_DP_FUNC_EN_1 */
+/* ANALOGIX_DP_FUNC_EN_1 */
#define MASTER_VID_FUNC_EN_N (0x1 << 7)
#define SLAVE_VID_FUNC_EN_N (0x1 << 5)
#define AUD_FIFO_FUNC_EN_N (0x1 << 4)
@@ -107,17 +115,17 @@
#define CRC_FUNC_EN_N (0x1 << 1)
#define SW_FUNC_EN_N (0x1 << 0)
-/* EXYNOS_DP_FUNC_EN_2 */
+/* ANALOGIX_DP_FUNC_EN_2 */
#define SSC_FUNC_EN_N (0x1 << 7)
#define AUX_FUNC_EN_N (0x1 << 2)
#define SERDES_FIFO_FUNC_EN_N (0x1 << 1)
#define LS_CLK_DOMAIN_FUNC_EN_N (0x1 << 0)
-/* EXYNOS_DP_VIDEO_CTL_1 */
+/* ANALOGIX_DP_VIDEO_CTL_1 */
#define VIDEO_EN (0x1 << 7)
#define HDCP_VIDEO_MUTE (0x1 << 6)
-/* EXYNOS_DP_VIDEO_CTL_1 */
+/* ANALOGIX_DP_VIDEO_CTL_1 */
#define IN_D_RANGE_MASK (0x1 << 7)
#define IN_D_RANGE_SHIFT (7)
#define IN_D_RANGE_CEA (0x1 << 7)
@@ -134,7 +142,7 @@
#define IN_COLOR_F_YCBCR422 (0x1 << 0)
#define IN_COLOR_F_RGB (0x0 << 0)
-/* EXYNOS_DP_VIDEO_CTL_3 */
+/* ANALOGIX_DP_VIDEO_CTL_3 */
#define IN_YC_COEFFI_MASK (0x1 << 7)
#define IN_YC_COEFFI_SHIFT (7)
#define IN_YC_COEFFI_ITU709 (0x1 << 7)
@@ -144,17 +152,21 @@
#define VID_CHK_UPDATE_TYPE_1 (0x1 << 4)
#define VID_CHK_UPDATE_TYPE_0 (0x0 << 4)
-/* EXYNOS_DP_VIDEO_CTL_8 */
+/* ANALOGIX_DP_VIDEO_CTL_8 */
#define VID_HRES_TH(x) (((x) & 0xf) << 4)
#define VID_VRES_TH(x) (((x) & 0xf) << 0)
-/* EXYNOS_DP_VIDEO_CTL_10 */
+/* ANALOGIX_DP_VIDEO_CTL_10 */
#define FORMAT_SEL (0x1 << 4)
#define INTERACE_SCAN_CFG (0x1 << 2)
#define VSYNC_POLARITY_CFG (0x1 << 1)
#define HSYNC_POLARITY_CFG (0x1 << 0)
-/* EXYNOS_DP_LANE_MAP */
+/* ANALOGIX_DP_PLL_REG_1 */
+#define REF_CLK_24M (0x1 << 1)
+#define REF_CLK_27M (0x0 << 1)
+
+/* ANALOGIX_DP_LANE_MAP */
#define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6)
#define LANE3_MAP_LOGIC_LANE_1 (0x1 << 6)
#define LANE3_MAP_LOGIC_LANE_2 (0x2 << 6)
@@ -172,30 +184,30 @@
#define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0)
#define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0)
-/* EXYNOS_DP_ANALOG_CTL_1 */
+/* ANALOGIX_DP_ANALOG_CTL_1 */
#define TX_TERMINAL_CTRL_50_OHM (0x1 << 4)
-/* EXYNOS_DP_ANALOG_CTL_2 */
+/* ANALOGIX_DP_ANALOG_CTL_2 */
#define SEL_24M (0x1 << 3)
#define TX_DVDD_BIT_1_0625V (0x4 << 0)
-/* EXYNOS_DP_ANALOG_CTL_3 */
+/* ANALOGIX_DP_ANALOG_CTL_3 */
#define DRIVE_DVDD_BIT_1_0625V (0x4 << 5)
#define VCO_BIT_600_MICRO (0x5 << 0)
-/* EXYNOS_DP_PLL_FILTER_CTL_1 */
+/* ANALOGIX_DP_PLL_FILTER_CTL_1 */
#define PD_RING_OSC (0x1 << 6)
#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4)
#define TX_CUR1_2X (0x1 << 2)
#define TX_CUR_16_MA (0x3 << 0)
-/* EXYNOS_DP_TX_AMP_TUNING_CTL */
+/* ANALOGIX_DP_TX_AMP_TUNING_CTL */
#define CH3_AMP_400_MV (0x0 << 24)
#define CH2_AMP_400_MV (0x0 << 16)
#define CH1_AMP_400_MV (0x0 << 8)
#define CH0_AMP_400_MV (0x0 << 0)
-/* EXYNOS_DP_AUX_HW_RETRY_CTL */
+/* ANALOGIX_DP_AUX_HW_RETRY_CTL */
#define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8)
#define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3)
#define AUX_HW_RETRY_INTERVAL_600_MICROSECONDS (0x0 << 3)
@@ -204,7 +216,7 @@
#define AUX_HW_RETRY_INTERVAL_1800_MICROSECONDS (0x3 << 3)
#define AUX_HW_RETRY_COUNT_SEL(x) (((x) & 0x7) << 0)
-/* EXYNOS_DP_COMMON_INT_STA_1 */
+/* ANALOGIX_DP_COMMON_INT_STA_1 */
#define VSYNC_DET (0x1 << 7)
#define PLL_LOCK_CHG (0x1 << 6)
#define SPDIF_ERR (0x1 << 5)
@@ -214,19 +226,19 @@
#define VID_CLK_CHG (0x1 << 1)
#define SW_INT (0x1 << 0)
-/* EXYNOS_DP_COMMON_INT_STA_2 */
+/* ANALOGIX_DP_COMMON_INT_STA_2 */
#define ENC_EN_CHG (0x1 << 6)
#define HW_BKSV_RDY (0x1 << 3)
#define HW_SHA_DONE (0x1 << 2)
#define HW_AUTH_STATE_CHG (0x1 << 1)
#define HW_AUTH_DONE (0x1 << 0)
-/* EXYNOS_DP_COMMON_INT_STA_3 */
+/* ANALOGIX_DP_COMMON_INT_STA_3 */
#define AFIFO_UNDER (0x1 << 7)
#define AFIFO_OVER (0x1 << 6)
#define R0_CHK_FLAG (0x1 << 5)
-/* EXYNOS_DP_COMMON_INT_STA_4 */
+/* ANALOGIX_DP_COMMON_INT_STA_4 */
#define PSR_ACTIVE (0x1 << 7)
#define PSR_INACTIVE (0x1 << 6)
#define SPDIF_BI_PHASE_ERR (0x1 << 5)
@@ -234,29 +246,29 @@
#define HPD_LOST (0x1 << 1)
#define PLUG (0x1 << 0)
-/* EXYNOS_DP_INT_STA */
+/* ANALOGIX_DP_INT_STA */
#define INT_HPD (0x1 << 6)
#define HW_TRAINING_FINISH (0x1 << 5)
#define RPLY_RECEIV (0x1 << 1)
#define AUX_ERR (0x1 << 0)
-/* EXYNOS_DP_INT_CTL */
+/* ANALOGIX_DP_INT_CTL */
#define SOFT_INT_CTRL (0x1 << 2)
#define INT_POL1 (0x1 << 1)
#define INT_POL0 (0x1 << 0)
-/* EXYNOS_DP_SYS_CTL_1 */
+/* ANALOGIX_DP_SYS_CTL_1 */
#define DET_STA (0x1 << 2)
#define FORCE_DET (0x1 << 1)
#define DET_CTRL (0x1 << 0)
-/* EXYNOS_DP_SYS_CTL_2 */
+/* ANALOGIX_DP_SYS_CTL_2 */
#define CHA_CRI(x) (((x) & 0xf) << 4)
#define CHA_STA (0x1 << 2)
#define FORCE_CHA (0x1 << 1)
#define CHA_CTRL (0x1 << 0)
-/* EXYNOS_DP_SYS_CTL_3 */
+/* ANALOGIX_DP_SYS_CTL_3 */
#define HPD_STATUS (0x1 << 6)
#define F_HPD (0x1 << 5)
#define HPD_CTRL (0x1 << 4)
@@ -265,13 +277,13 @@
#define F_VALID (0x1 << 1)
#define VALID_CTRL (0x1 << 0)
-/* EXYNOS_DP_SYS_CTL_4 */
+/* ANALOGIX_DP_SYS_CTL_4 */
#define FIX_M_AUD (0x1 << 4)
#define ENHANCED (0x1 << 3)
#define FIX_M_VID (0x1 << 2)
#define M_VID_UPDATE_CTRL (0x3 << 0)
-/* EXYNOS_DP_TRAINING_PTN_SET */
+/* ANALOGIX_DP_TRAINING_PTN_SET */
#define SCRAMBLER_TYPE (0x1 << 9)
#define HW_LINK_TRAINING_PATTERN (0x1 << 8)
#define SCRAMBLING_DISABLE (0x1 << 5)
@@ -285,24 +297,24 @@
#define SW_TRAINING_PATTERN_SET_PTN1 (0x1 << 0)
#define SW_TRAINING_PATTERN_SET_NORMAL (0x0 << 0)
-/* EXYNOS_DP_LN0_LINK_TRAINING_CTL */
+/* ANALOGIX_DP_LN0_LINK_TRAINING_CTL */
#define PRE_EMPHASIS_SET_MASK (0x3 << 3)
#define PRE_EMPHASIS_SET_SHIFT (3)
-/* EXYNOS_DP_DEBUG_CTL */
+/* ANALOGIX_DP_DEBUG_CTL */
#define PLL_LOCK (0x1 << 4)
#define F_PLL_LOCK (0x1 << 3)
#define PLL_LOCK_CTRL (0x1 << 2)
#define PN_INV (0x1 << 0)
-/* EXYNOS_DP_PLL_CTL */
+/* ANALOGIX_DP_PLL_CTL */
#define DP_PLL_PD (0x1 << 7)
#define DP_PLL_RESET (0x1 << 6)
#define DP_PLL_LOOP_BIT_DEFAULT (0x1 << 4)
#define DP_PLL_REF_BIT_1_1250V (0x5 << 0)
#define DP_PLL_REF_BIT_1_2500V (0x7 << 0)
-/* EXYNOS_DP_PHY_PD */
+/* ANALOGIX_DP_PHY_PD */
#define DP_PHY_PD (0x1 << 5)
#define AUX_PD (0x1 << 4)
#define CH3_PD (0x1 << 3)
@@ -310,28 +322,28 @@
#define CH1_PD (0x1 << 1)
#define CH0_PD (0x1 << 0)
-/* EXYNOS_DP_PHY_TEST */
+/* ANALOGIX_DP_PHY_TEST */
#define MACRO_RST (0x1 << 5)
#define CH1_TEST (0x1 << 1)
#define CH0_TEST (0x1 << 0)
-/* EXYNOS_DP_AUX_CH_STA */
+/* ANALOGIX_DP_AUX_CH_STA */
#define AUX_BUSY (0x1 << 4)
#define AUX_STATUS_MASK (0xf << 0)
-/* EXYNOS_DP_AUX_CH_DEFER_CTL */
+/* ANALOGIX_DP_AUX_CH_DEFER_CTL */
#define DEFER_CTRL_EN (0x1 << 7)
#define DEFER_COUNT(x) (((x) & 0x7f) << 0)
-/* EXYNOS_DP_AUX_RX_COMM */
+/* ANALOGIX_DP_AUX_RX_COMM */
#define AUX_RX_COMM_I2C_DEFER (0x2 << 2)
#define AUX_RX_COMM_AUX_DEFER (0x2 << 0)
-/* EXYNOS_DP_BUFFER_DATA_CTL */
+/* ANALOGIX_DP_BUFFER_DATA_CTL */
#define BUF_CLR (0x1 << 7)
#define BUF_DATA_COUNT(x) (((x) & 0x1f) << 0)
-/* EXYNOS_DP_AUX_CH_CTL_1 */
+/* ANALOGIX_DP_AUX_CH_CTL_1 */
#define AUX_LENGTH(x) (((x - 1) & 0xf) << 4)
#define AUX_TX_COMM_MASK (0xf << 0)
#define AUX_TX_COMM_DP_TRANSACTION (0x1 << 3)
@@ -340,20 +352,20 @@
#define AUX_TX_COMM_WRITE (0x0 << 0)
#define AUX_TX_COMM_READ (0x1 << 0)
-/* EXYNOS_DP_AUX_ADDR_7_0 */
+/* ANALOGIX_DP_AUX_ADDR_7_0 */
#define AUX_ADDR_7_0(x) (((x) >> 0) & 0xff)
-/* EXYNOS_DP_AUX_ADDR_15_8 */
+/* ANALOGIX_DP_AUX_ADDR_15_8 */
#define AUX_ADDR_15_8(x) (((x) >> 8) & 0xff)
-/* EXYNOS_DP_AUX_ADDR_19_16 */
+/* ANALOGIX_DP_AUX_ADDR_19_16 */
#define AUX_ADDR_19_16(x) (((x) >> 16) & 0x0f)
-/* EXYNOS_DP_AUX_CH_CTL_2 */
+/* ANALOGIX_DP_AUX_CH_CTL_2 */
#define ADDR_ONLY (0x1 << 1)
#define AUX_EN (0x1 << 0)
-/* EXYNOS_DP_SOC_GENERAL_CTL */
+/* ANALOGIX_DP_SOC_GENERAL_CTL */
#define AUDIO_MODE_SPDIF_MODE (0x1 << 8)
#define AUDIO_MODE_MASTER_MODE (0x0 << 8)
#define MASTER_VIDEO_INTERLACE_EN (0x1 << 4)
@@ -363,4 +375,4 @@
#define VIDEO_MODE_SLAVE_MODE (0x1 << 0)
#define VIDEO_MODE_MASTER_MODE (0x0 << 0)
-#endif /* _EXYNOS_DP_REG_H */
+#endif /* _ANALOGIX_DP_REG_H */
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index 9795b7247..c9d941283 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1413,11 +1413,6 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
mutex_unlock(&hdmi->mutex);
}
-static void dw_hdmi_bridge_nop(struct drm_bridge *bridge)
-{
- /* do nothing */
-}
-
static enum drm_connector_status
dw_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1536,8 +1531,6 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs =
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
- .pre_enable = dw_hdmi_bridge_nop,
- .post_disable = dw_hdmi_bridge_nop,
.mode_set = dw_hdmi_bridge_mode_set,
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 7bc394ec9..dc83f69da 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -163,10 +163,8 @@ static struct pci_driver cirrus_pci_driver = {
static int __init cirrus_init(void)
{
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && cirrus_modeset == -1)
return -EINVAL;
-#endif
if (cirrus_modeset == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index b774d637a..2188d6b61 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -245,7 +245,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 0907715e9..32d32c5b7 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -61,7 +61,7 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
bpp, mode_cmd->pitches[0]))
return ERR_PTR(-EINVAL);
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
@@ -295,7 +295,7 @@ cirrus_dumb_mmap_offset(struct drm_file *file,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
- obj = drm_gem_object_lookup(dev, file, handle);
+ obj = drm_gem_object_lookup(file, handle);
if (obj == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index dfffd5285..6768b7b1a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -245,6 +245,8 @@ struct ttm_bo_driver cirrus_bo_driver = {
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int cirrus_mm_init(struct cirrus_device *cirrus)
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index a10ea6aec..605bd243f 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -423,7 +423,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
}
/**
- * drm_agp_clear - Clear AGP resource list
+ * drm_legacy_agp_clear - Clear AGP resource list
* @dev: DRM device
*
* Iterate over all AGP resources and remove them. But keep the AGP head
@@ -434,7 +434,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
* resources from getting destroyed. Drivers are responsible of cleaning them up
* during device shutdown.
*/
-void drm_agp_clear(struct drm_device *dev)
+void drm_legacy_agp_clear(struct drm_device *dev)
{
struct drm_agp_mem *entry, *tempe;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 080a09014..9bb99e274 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -31,6 +31,8 @@
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
+#include "drm_crtc_internal.h"
+
/**
* drm_atomic_state_default_release -
* release memory initialized by drm_atomic_state_init
@@ -139,21 +141,14 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i];
- if (!connector || !connector->funcs)
+ if (!connector)
continue;
- /*
- * FIXME: Async commits can race with connector unplugging and
- * there's currently nothing that prevents cleanup up state for
- * deleted connectors. As long as the callback doesn't look at
- * the connector we'll be fine though, so make sure that's the
- * case by setting all connector pointers to NULL.
- */
- state->connector_states[i]->connector = NULL;
- connector->funcs->atomic_destroy_state(NULL,
+ connector->funcs->atomic_destroy_state(connector,
state->connector_states[i]);
state->connectors[i] = NULL;
state->connector_states[i] = NULL;
+ drm_connector_unreference(connector);
}
for (i = 0; i < config->num_crtc; i++) {
@@ -261,6 +256,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
int ret, index = drm_crtc_index(crtc);
struct drm_crtc_state *crtc_state;
+ WARN_ON(!state->acquire_ctx);
+
crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
if (crtc_state)
return crtc_state;
@@ -621,6 +618,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
int ret, index = drm_plane_index(plane);
struct drm_plane_state *plane_state;
+ WARN_ON(!state->acquire_ctx);
+
plane_state = drm_atomic_get_existing_plane_state(state, plane);
if (plane_state)
return plane_state;
@@ -889,6 +888,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_mode_config *config = &connector->dev->mode_config;
struct drm_connector_state *connector_state;
+ WARN_ON(!state->acquire_ctx);
+
ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
@@ -925,6 +926,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
if (!connector_state)
return ERR_PTR(-ENOMEM);
+ drm_connector_reference(connector);
state->connector_states[index] = connector_state;
state->connectors[index] = connector;
connector_state->state = state;
@@ -1159,12 +1161,18 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
{
struct drm_crtc_state *crtc_state;
- if (conn_state->crtc && conn_state->crtc != crtc) {
+ if (conn_state->crtc == crtc)
+ return 0;
+
+ if (conn_state->crtc) {
crtc_state = drm_atomic_get_existing_crtc_state(conn_state->state,
conn_state->crtc);
crtc_state->connector_mask &=
~(1 << drm_connector_index(conn_state->connector));
+
+ drm_connector_unreference(conn_state->connector);
+ conn_state->crtc = NULL;
}
if (crtc) {
@@ -1174,16 +1182,16 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
crtc_state->connector_mask |=
1 << drm_connector_index(conn_state->connector);
- }
- conn_state->crtc = crtc;
+ drm_connector_reference(conn_state->connector);
+ conn_state->crtc = crtc;
- if (crtc)
DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d:%s]\n",
conn_state, crtc->base.id, crtc->name);
- else
+ } else {
DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
conn_state);
+ }
return 0;
}
@@ -1414,7 +1422,7 @@ int drm_atomic_commit(struct drm_atomic_state *state)
EXPORT_SYMBOL(drm_atomic_commit);
/**
- * drm_atomic_async_commit - atomic&async configuration commit
+ * drm_atomic_nonblocking_commit - atomic&nonblocking configuration commit
* @state: atomic configuration to check
*
* Note that this function can return -EDEADLK if the driver needed to acquire
@@ -1429,7 +1437,7 @@ EXPORT_SYMBOL(drm_atomic_commit);
* Returns:
* 0 on success, negative error code on failure.
*/
-int drm_atomic_async_commit(struct drm_atomic_state *state)
+int drm_atomic_nonblocking_commit(struct drm_atomic_state *state)
{
struct drm_mode_config *config = &state->dev->mode_config;
int ret;
@@ -1438,11 +1446,11 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
if (ret)
return ret;
- DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
+ DRM_DEBUG_ATOMIC("commiting %p nonblocking\n", state);
return config->funcs->atomic_commit(state->dev, state, true);
}
-EXPORT_SYMBOL(drm_atomic_async_commit);
+EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
/*
* The big monstor ioctl
@@ -1640,12 +1648,19 @@ retry:
}
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
- if (!obj || !obj->properties) {
+ if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ if (!obj->properties) {
+ drm_mode_object_unreference(obj);
ret = -ENOENT;
goto out;
}
if (get_user(count_props, count_props_ptr + copied_objs)) {
+ drm_mode_object_unreference(obj);
ret = -EFAULT;
goto out;
}
@@ -1658,12 +1673,14 @@ retry:
struct drm_property *prop;
if (get_user(prop_id, props_ptr + copied_props)) {
+ drm_mode_object_unreference(obj);
ret = -EFAULT;
goto out;
}
prop = drm_property_find(dev, prop_id);
if (!prop) {
+ drm_mode_object_unreference(obj);
ret = -ENOENT;
goto out;
}
@@ -1671,13 +1688,16 @@ retry:
if (copy_from_user(&prop_value,
prop_values_ptr + copied_props,
sizeof(prop_value))) {
+ drm_mode_object_unreference(obj);
ret = -EFAULT;
goto out;
}
ret = atomic_set_prop(state, obj, prop, prop_value);
- if (ret)
+ if (ret) {
+ drm_mode_object_unreference(obj);
goto out;
+ }
copied_props++;
}
@@ -1688,6 +1708,7 @@ retry:
plane_mask |= (1 << drm_plane_index(plane));
plane->old_fb = plane->fb;
}
+ drm_mode_object_unreference(obj);
}
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -1711,7 +1732,7 @@ retry:
*/
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
- ret = drm_atomic_async_commit(state);
+ ret = drm_atomic_nonblocking_commit(state);
} else {
ret = drm_atomic_commit(state);
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4befe25c8..ddfa0d120 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -384,8 +384,6 @@ mode_fixup(struct drm_atomic_state *state)
*/
encoder = conn_state->best_encoder;
funcs = encoder->helper_private;
- if (!funcs)
- continue;
ret = drm_bridge_mode_fixup(encoder->bridge, &crtc_state->mode,
&crtc_state->adjusted_mode);
@@ -394,7 +392,7 @@ mode_fixup(struct drm_atomic_state *state)
return -EINVAL;
}
- if (funcs->atomic_check) {
+ if (funcs && funcs->atomic_check) {
ret = funcs->atomic_check(encoder, crtc_state,
conn_state);
if (ret) {
@@ -402,7 +400,7 @@ mode_fixup(struct drm_atomic_state *state)
encoder->base.id, encoder->name);
return ret;
}
- } else if (funcs->mode_fixup) {
+ } else if (funcs && funcs->mode_fixup) {
ret = funcs->mode_fixup(encoder, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
@@ -707,12 +705,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
drm_bridge_disable(encoder->bridge);
/* Right function depends upon target state. */
- if (connector->state->crtc && funcs->prepare)
- funcs->prepare(encoder);
- else if (funcs->disable)
- funcs->disable(encoder);
- else
- funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+ if (funcs) {
+ if (connector->state->crtc && funcs->prepare)
+ funcs->prepare(encoder);
+ else if (funcs->disable)
+ funcs->disable(encoder);
+ else if (funcs->dpms)
+ funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+ }
drm_bridge_post_disable(encoder->bridge);
}
@@ -873,7 +873,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
* Each encoder has at most one connector (since we always steal
* it away), so we won't call mode_set hooks twice.
*/
- if (funcs->mode_set)
+ if (funcs && funcs->mode_set)
funcs->mode_set(encoder, mode, adjusted_mode);
drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
@@ -974,17 +974,29 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
*/
drm_bridge_pre_enable(encoder->bridge);
- if (funcs->enable)
- funcs->enable(encoder);
- else
- funcs->commit(encoder);
+ if (funcs) {
+ if (funcs->enable)
+ funcs->enable(encoder);
+ else if (funcs->commit)
+ funcs->commit(encoder);
+ }
drm_bridge_enable(encoder->bridge);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
-static void wait_for_fences(struct drm_device *dev,
+/**
+ * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state
+ * @dev: DRM device
+ * @state: atomic state object with old state structures
+ *
+ * For implicit sync, driver should fish the exclusive fence out from the
+ * incoming fb's and stash it in the drm_plane_state. This is called after
+ * drm_atomic_helper_swap_state() so it uses the current plane state (and
+ * just uses the atomic state to find the changed planes)
+ */
+void drm_atomic_helper_wait_for_fences(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_plane *plane;
@@ -1002,6 +1014,7 @@ static void wait_for_fences(struct drm_device *dev,
plane->state->fence = NULL;
}
}
+EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences);
/**
* drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
@@ -1092,6 +1105,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
+ WARN(!ret, "[CRTC:%d] vblank wait timed out\n", crtc->base.id);
+
drm_crtc_vblank_put(crtc);
}
}
@@ -1101,13 +1116,13 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
- * @async: asynchronous commit
+ * @nonblocking: whether nonblocking behavior is requested.
*
* This function commits a with drm_atomic_helper_check() pre-validated state
* object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * now this doesn't implement nonblocking commits.
*
- * Note that right now this function does not support async commits, and hence
+ * Note that right now this function does not support nonblocking commits, hence
* driver writers must implement their own version for now. Also note that the
* default ordering of how the various stages are called is to match the legacy
* modeset helper library closest. One peculiarity of that is that it doesn't
@@ -1128,11 +1143,11 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
*/
int drm_atomic_helper_commit(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async)
+ bool nonblock)
{
int ret;
- if (async)
+ if (nonblock)
return -EBUSY;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -1163,7 +1178,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* current layout.
*/
- wait_for_fences(dev, state);
+ drm_atomic_helper_wait_for_fences(dev, state);
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -1182,20 +1197,20 @@ int drm_atomic_helper_commit(struct drm_device *dev,
EXPORT_SYMBOL(drm_atomic_helper_commit);
/**
- * DOC: implementing async commit
+ * DOC: implementing nonblocking commit
*
- * For now the atomic helpers don't support async commit directly. If there is
- * real need it could be added though, using the dma-buf fence infrastructure
- * for generic synchronization with outstanding rendering.
+ * For now the atomic helpers don't support nonblocking commit directly. If
+ * there is real need it could be added though, using the dma-buf fence
+ * infrastructure for generic synchronization with outstanding rendering.
*
- * For now drivers have to implement async commit themselves, with the following
- * sequence being the recommended one:
+ * For now drivers have to implement nonblocking commit themselves, with the
+ * following sequence being the recommended one:
*
* 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
* which commit needs to call which can fail, so we want to run it first and
* synchronously.
*
- * 2. Synchronize with any outstanding asynchronous commit worker threads which
+ * 2. Synchronize with any outstanding nonblocking commit worker threads which
* might be affected the new state update. This can be done by either cancelling
* or flushing the work items, depending upon whether the driver can deal with
* cancelled updates. Note that it is important to ensure that the framebuffer
@@ -1209,9 +1224,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
* 3. The software state is updated synchronously with
* drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
* locks means concurrent callers never see inconsistent state. And doing this
- * while it's guaranteed that no relevant async worker runs means that async
- * workers do not need grab any locks. Actually they must not grab locks, for
- * otherwise the work flushing will deadlock.
+ * while it's guaranteed that no relevant nonblocking worker runs means that
+ * nonblocking workers do not need grab any locks. Actually they must not grab
+ * locks, for otherwise the work flushing will deadlock.
*
* 4. Schedule a work item to do all subsequent steps, using the split-out
* commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
@@ -2358,11 +2373,11 @@ retry:
goto fail;
}
- ret = drm_atomic_async_commit(state);
+ ret = drm_atomic_nonblocking_commit(state);
if (ret != 0)
goto fail;
- /* Driver takes ownership of state on successful async commit. */
+ /* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
@@ -2468,6 +2483,23 @@ backoff:
EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
/**
+ * drm_atomic_helper_best_encoder - Helper for &drm_connector_helper_funcs
+ * ->best_encoder callback
+ * @connector: Connector control structure
+ *
+ * This is a &drm_connector_helper_funcs ->best_encoder callback helper for
+ * connectors that support exactly 1 encoder, statically determined at driver
+ * init time.
+ */
+struct drm_encoder *
+drm_atomic_helper_best_encoder(struct drm_connector *connector)
+{
+ WARN_ON(connector->encoder_ids[1]);
+ return drm_encoder_find(connector->dev, connector->encoder_ids[0]);
+}
+EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
+
+/**
* DOC: atomic state reset and initialization
*
* Both the drm core and the atomic helpers assume that there is always the full
@@ -2497,12 +2529,9 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
*/
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
- if (crtc->state) {
- drm_property_unreference_blob(crtc->state->mode_blob);
- drm_property_unreference_blob(crtc->state->degamma_lut);
- drm_property_unreference_blob(crtc->state->ctm);
- drm_property_unreference_blob(crtc->state->gamma_lut);
- }
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
@@ -2566,15 +2595,13 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
/**
* __drm_atomic_helper_crtc_destroy_state - release CRTC state
- * @crtc: CRTC object
* @state: CRTC state object to release
*
* Releases all resources stored in the CRTC state without actually freeing
* the memory of the CRTC state. This is useful for drivers that subclass the
* CRTC state.
*/
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
{
drm_property_unreference_blob(state->mode_blob);
drm_property_unreference_blob(state->degamma_lut);
@@ -2594,7 +2621,7 @@ EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- __drm_atomic_helper_crtc_destroy_state(crtc, state);
+ __drm_atomic_helper_crtc_destroy_state(state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
@@ -2608,8 +2635,8 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
*/
void drm_atomic_helper_plane_reset(struct drm_plane *plane)
{
- if (plane->state && plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
@@ -2664,15 +2691,13 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
/**
* __drm_atomic_helper_plane_destroy_state - release plane state
- * @plane: plane object
* @state: plane state object to release
*
* Releases all resources stored in the plane state without actually freeing
* the memory of the plane state. This is useful for drivers that subclass the
* plane state.
*/
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
{
if (state->fb)
drm_framebuffer_unreference(state->fb);
@@ -2690,7 +2715,7 @@ EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- __drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
@@ -2730,6 +2755,9 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
struct drm_connector_state *conn_state =
kzalloc(sizeof(*conn_state), GFP_KERNEL);
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
kfree(connector->state);
__drm_atomic_helper_connector_reset(connector, conn_state);
}
@@ -2748,6 +2776,8 @@ __drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
memcpy(state, connector->state, sizeof(*state));
+ if (state->crtc)
+ drm_connector_reference(connector);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
@@ -2859,7 +2889,6 @@ EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
/**
* __drm_atomic_helper_connector_destroy_state - release connector state
- * @connector: connector object
* @state: connector state object to release
*
* Releases all resources stored in the connector state without actually
@@ -2867,14 +2896,15 @@ EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
* subclass the connector state.
*/
void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
- struct drm_connector_state *state)
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
{
/*
* This is currently a placeholder so that drivers that subclass the
* state will automatically do the right thing if code is ever added
* to this function.
*/
+ if (state->crtc)
+ drm_connector_unreference(state->connector);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
@@ -2889,7 +2919,7 @@ EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state)
{
- __drm_atomic_helper_connector_destroy_state(connector, state);
+ __drm_atomic_helper_connector_destroy_state(state);
kfree(state);
}
EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index f1a204d25..9b34158c0 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -396,6 +396,10 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
err = drm_addmap_core(dev, map->offset, map->size, map->type,
map->flags, &maplist);
@@ -416,6 +420,62 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
+/*
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_map *map = data;
+ struct drm_map_list *r_list = NULL;
+ struct list_head *list;
+ int idx;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ idx = map->offset;
+ if (idx < 0)
+ return -EINVAL;
+
+ i = 0;
+ mutex_lock(&dev->struct_mutex);
+ list_for_each(list, &dev->maplist) {
+ if (i == idx) {
+ r_list = list_entry(list, struct drm_map_list, head);
+ break;
+ }
+ i++;
+ }
+ if (!r_list || !r_list->map) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ map->offset = r_list->map->offset;
+ map->size = r_list->map->size;
+ map->type = r_list->map->type;
+ map->flags = r_list->map->flags;
+ map->handle = (void *)(unsigned long) r_list->user_token;
+ map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
/**
* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
@@ -482,18 +542,35 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
}
EXPORT_SYMBOL(drm_legacy_rmmap_locked);
-int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
+void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
- int ret;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
mutex_lock(&dev->struct_mutex);
- ret = drm_legacy_rmmap_locked(dev, map);
+ drm_legacy_rmmap_locked(dev, map);
mutex_unlock(&dev->struct_mutex);
-
- return ret;
}
EXPORT_SYMBOL(drm_legacy_rmmap);
+void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+ if (r_list->master == master) {
+ drm_legacy_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems
@@ -517,6 +594,10 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_map_list *r_list;
int ret;
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 6743ff7dc..059f7c39c 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -72,7 +72,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
drm_cache_flush_clflush(pages, num_pages);
return;
}
@@ -105,7 +105,7 @@ void
drm_clflush_sg(struct sg_table *st)
{
#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
struct sg_page_iter sg_iter;
mb();
@@ -129,7 +129,7 @@ void
drm_clflush_virt_range(void *addr, unsigned long length)
{
#if defined(CONFIG_X86)
- if (cpu_has_clflush) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
const int size = boot_cpu_data.x86_clflush_size;
void *end = addr + length;
addr = (void *)(((unsigned long)addr) & -size);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 691a1b939..0e3cc66aa 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -168,6 +168,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_eDP, "eDP" },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
+ { DRM_MODE_CONNECTOR_DPI, "DPI" },
};
static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
@@ -179,6 +180,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] = {
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
{ DRM_MODE_ENCODER_DSI, "DSI" },
{ DRM_MODE_ENCODER_DPMST, "DP MST" },
+ { DRM_MODE_ENCODER_DPI, "DPI" },
};
static const struct drm_prop_enum_list drm_subpixel_enum_list[] = {
@@ -275,7 +277,8 @@ EXPORT_SYMBOL(drm_get_format_name);
static int drm_mode_object_get_reg(struct drm_device *dev,
struct drm_mode_object *obj,
uint32_t obj_type,
- bool register_obj)
+ bool register_obj,
+ void (*obj_free_cb)(struct kref *kref))
{
int ret;
@@ -288,6 +291,10 @@ static int drm_mode_object_get_reg(struct drm_device *dev,
*/
obj->id = ret;
obj->type = obj_type;
+ if (obj_free_cb) {
+ obj->free_cb = obj_free_cb;
+ kref_init(&obj->refcount);
+ }
}
mutex_unlock(&dev->mode_config.idr_mutex);
@@ -311,7 +318,7 @@ static int drm_mode_object_get_reg(struct drm_device *dev,
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
- return drm_mode_object_get_reg(dev, obj, obj_type, true);
+ return drm_mode_object_get_reg(dev, obj, obj_type, true, NULL);
}
static void drm_mode_object_register(struct drm_device *dev,
@@ -323,19 +330,24 @@ static void drm_mode_object_register(struct drm_device *dev,
}
/**
- * drm_mode_object_put - free a modeset identifer
+ * drm_mode_object_unregister - free a modeset identifer
* @dev: DRM device
* @object: object to free
*
- * Free @id from @dev's unique identifier pool. Note that despite the _get
- * postfix modeset identifiers are _not_ reference counted. Hence don't use this
+ * Free @id from @dev's unique identifier pool.
+ * This function can be called multiple times, and guards against
+ * multiple removals.
+ * These modeset identifiers are _not_ reference counted. Hence don't use this
* for reference counted modeset objects like framebuffers.
*/
-void drm_mode_object_put(struct drm_device *dev,
+void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object)
{
mutex_lock(&dev->mode_config.idr_mutex);
- idr_remove(&dev->mode_config.crtc_idr, object->id);
+ if (object->id) {
+ idr_remove(&dev->mode_config.crtc_idr, object->id);
+ object->id = 0;
+ }
mutex_unlock(&dev->mode_config.idr_mutex);
}
@@ -350,11 +362,11 @@ static struct drm_mode_object *_object_find(struct drm_device *dev,
obj = NULL;
if (obj && obj->id != id)
obj = NULL;
- /* don't leak out unref'd fb's */
- if (obj &&
- (obj->type == DRM_MODE_OBJECT_FB ||
- obj->type == DRM_MODE_OBJECT_BLOB))
- obj = NULL;
+
+ if (obj && obj->free_cb) {
+ if (!kref_get_unless_zero(&obj->refcount))
+ obj = NULL;
+ }
mutex_unlock(&dev->mode_config.idr_mutex);
return obj;
@@ -366,25 +378,70 @@ static struct drm_mode_object *_object_find(struct drm_device *dev,
* @id: id of the mode object
* @type: type of the mode object
*
- * Note that framebuffers cannot be looked up with this functions - since those
- * are reference counted, they need special treatment. Even with
- * DRM_MODE_OBJECT_ANY (although that will simply return NULL
- * rather than WARN_ON()).
+ * This function is used to look up a modeset object. It will acquire a
+ * reference for reference counted objects. This reference must be dropped again
+ * by callind drm_mode_object_unreference().
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
- /* Framebuffers are reference counted and need their own lookup
- * function.*/
- WARN_ON(type == DRM_MODE_OBJECT_FB || type == DRM_MODE_OBJECT_BLOB);
obj = _object_find(dev, id, type);
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
/**
+ * drm_mode_object_unreference - decr the object refcnt
+ * @obj: mode_object
+ *
+ * This functions decrements the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. This is used to drop references
+ * acquired with drm_mode_object_reference().
+ */
+void drm_mode_object_unreference(struct drm_mode_object *obj)
+{
+ if (obj->free_cb) {
+ DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+ kref_put(&obj->refcount, obj->free_cb);
+ }
+}
+EXPORT_SYMBOL(drm_mode_object_unreference);
+
+/**
+ * drm_mode_object_reference - incr the object refcnt
+ * @obj: mode_object
+ *
+ * This functions increments the object's refcount if it is a refcounted modeset
+ * object. It is a no-op on any other object. References should be dropped again
+ * by calling drm_mode_object_unreference().
+ */
+void drm_mode_object_reference(struct drm_mode_object *obj)
+{
+ if (obj->free_cb) {
+ DRM_DEBUG("OBJ ID: %d (%d)\n", obj->id, atomic_read(&obj->refcount.refcount));
+ kref_get(&obj->refcount);
+ }
+}
+EXPORT_SYMBOL(drm_mode_object_reference);
+
+static void drm_framebuffer_free(struct kref *kref)
+{
+ struct drm_framebuffer *fb =
+ container_of(kref, struct drm_framebuffer, base.refcount);
+ struct drm_device *dev = fb->dev;
+
+ /*
+ * The lookup idr holds a weak reference, which has not necessarily been
+ * removed at this point. Check for that.
+ */
+ drm_mode_object_unregister(dev, &fb->base);
+
+ fb->funcs->destroy(fb);
+}
+
+/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
* @fb: framebuffer to be initialized
@@ -407,71 +464,26 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
{
int ret;
- mutex_lock(&dev->mode_config.fb_lock);
- kref_init(&fb->refcount);
INIT_LIST_HEAD(&fb->filp_head);
fb->dev = dev;
fb->funcs = funcs;
- ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
+ ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB,
+ false, drm_framebuffer_free);
if (ret)
goto out;
+ mutex_lock(&dev->mode_config.fb_lock);
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
-out:
mutex_unlock(&dev->mode_config.fb_lock);
+ drm_mode_object_register(dev, &fb->base);
+out:
return ret;
}
EXPORT_SYMBOL(drm_framebuffer_init);
-/* dev->mode_config.fb_lock must be held! */
-static void __drm_framebuffer_unregister(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- drm_mode_object_put(dev, &fb->base);
-
- fb->base.id = 0;
-}
-
-static void drm_framebuffer_free(struct kref *kref)
-{
- struct drm_framebuffer *fb =
- container_of(kref, struct drm_framebuffer, refcount);
- struct drm_device *dev = fb->dev;
-
- /*
- * The lookup idr holds a weak reference, which has not necessarily been
- * removed at this point. Check for that.
- */
- mutex_lock(&dev->mode_config.fb_lock);
- if (fb->base.id) {
- /* Mark fb as reaped and drop idr ref. */
- __drm_framebuffer_unregister(dev, fb);
- }
- mutex_unlock(&dev->mode_config.fb_lock);
-
- fb->funcs->destroy(fb);
-}
-
-static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
- uint32_t id)
-{
- struct drm_mode_object *obj = NULL;
- struct drm_framebuffer *fb;
-
- mutex_lock(&dev->mode_config.idr_mutex);
- obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
- fb = NULL;
- else
- fb = obj_to_fb(obj);
- mutex_unlock(&dev->mode_config.idr_mutex);
-
- return fb;
-}
-
/**
* drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
* @dev: drm device
@@ -484,47 +496,17 @@ static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
uint32_t id)
{
- struct drm_framebuffer *fb;
-
- mutex_lock(&dev->mode_config.fb_lock);
- fb = __drm_framebuffer_lookup(dev, id);
- if (fb) {
- if (!kref_get_unless_zero(&fb->refcount))
- fb = NULL;
- }
- mutex_unlock(&dev->mode_config.fb_lock);
+ struct drm_mode_object *obj;
+ struct drm_framebuffer *fb = NULL;
+ obj = _object_find(dev, id, DRM_MODE_OBJECT_FB);
+ if (obj)
+ fb = obj_to_fb(obj);
return fb;
}
EXPORT_SYMBOL(drm_framebuffer_lookup);
/**
- * drm_framebuffer_unreference - unref a framebuffer
- * @fb: framebuffer to unref
- *
- * This functions decrements the fb's refcount and frees it if it drops to zero.
- */
-void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
- DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
- kref_put(&fb->refcount, drm_framebuffer_free);
-}
-EXPORT_SYMBOL(drm_framebuffer_unreference);
-
-/**
- * drm_framebuffer_reference - incr the fb refcnt
- * @fb: framebuffer
- *
- * This functions increments the fb's refcount.
- */
-void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
- DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
- kref_get(&fb->refcount);
-}
-EXPORT_SYMBOL(drm_framebuffer_reference);
-
-/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
*
@@ -542,10 +524,8 @@ void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
dev = fb->dev;
- mutex_lock(&dev->mode_config.fb_lock);
/* Mark fb as reaped and drop idr ref. */
- __drm_framebuffer_unregister(dev, fb);
- mutex_unlock(&dev->mode_config.fb_lock);
+ drm_mode_object_unregister(dev, &fb->base);
}
EXPORT_SYMBOL(drm_framebuffer_unregister_private);
@@ -619,7 +599,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
* in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
* in this manner.
*/
- if (atomic_read(&fb->refcount.refcount) > 1) {
+ if (drm_framebuffer_read_refcount(fb) > 1) {
drm_modeset_lock_all(dev);
/* remove from any CRTC */
drm_for_each_crtc(crtc, dev) {
@@ -705,7 +685,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
drm_num_crtcs(dev));
}
if (!crtc->name) {
- drm_mode_object_put(dev, &crtc->base);
+ drm_mode_object_unregister(dev, &crtc->base);
return -ENOMEM;
}
@@ -747,7 +727,7 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
drm_modeset_lock_fini(&crtc->mutex);
- drm_mode_object_put(dev, &crtc->base);
+ drm_mode_object_unregister(dev, &crtc->base);
list_del(&crtc->head);
dev->mode_config.num_crtc--;
@@ -884,6 +864,16 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
mode->interlace ? " interlaced" : "");
}
+static void drm_connector_free(struct kref *kref)
+{
+ struct drm_connector *connector =
+ container_of(kref, struct drm_connector, base.refcount);
+ struct drm_device *dev = connector->dev;
+
+ drm_mode_object_unregister(dev, &connector->base);
+ connector->funcs->destroy(connector);
+}
+
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
@@ -909,7 +899,9 @@ int drm_connector_init(struct drm_device *dev,
drm_modeset_lock_all(dev);
- ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
+ ret = drm_mode_object_get_reg(dev, &connector->base,
+ DRM_MODE_OBJECT_CONNECTOR,
+ false, drm_connector_free);
if (ret)
goto out_unlock;
@@ -972,7 +964,7 @@ out_put_id:
ida_remove(&config->connector_ida, connector->connector_id);
out_put:
if (ret)
- drm_mode_object_put(dev, &connector->base);
+ drm_mode_object_unregister(dev, &connector->base);
out_unlock:
drm_modeset_unlock_all(dev);
@@ -1010,7 +1002,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->connector_id);
kfree(connector->display_info.bus_formats);
- drm_mode_object_put(dev, &connector->base);
+ drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
list_del(&connector->head);
@@ -1038,8 +1030,6 @@ int drm_connector_register(struct drm_connector *connector)
{
int ret;
- drm_mode_object_register(connector->dev, &connector->base);
-
ret = drm_sysfs_connector_add(connector);
if (ret)
return ret;
@@ -1050,6 +1040,8 @@ int drm_connector_register(struct drm_connector *connector)
return ret;
}
+ drm_mode_object_register(connector->dev, &connector->base);
+
return 0;
}
EXPORT_SYMBOL(drm_connector_register);
@@ -1067,25 +1059,65 @@ void drm_connector_unregister(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_connector_unregister);
+/**
+ * drm_connector_register_all - register all connectors
+ * @dev: drm device
+ *
+ * This function registers all connectors in sysfs and other places so that
+ * userspace can start to access them. Drivers can call it after calling
+ * drm_dev_register() to complete the device registration, if they don't call
+ * drm_connector_register() on each connector individually.
+ *
+ * When a device is unplugged and should be removed from userspace access,
+ * call drm_connector_unregister_all(), which is the inverse of this
+ * function.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_register_all(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ int ret;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ drm_for_each_connector(connector, dev) {
+ ret = drm_connector_register(connector);
+ if (ret)
+ goto err;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+
+err:
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_connector_unregister_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_connector_register_all);
/**
- * drm_connector_unplug_all - unregister connector userspace interfaces
+ * drm_connector_unregister_all - unregister connector userspace interfaces
* @dev: drm device
*
- * This function unregisters all connector userspace interfaces in sysfs. Should
- * be call when the device is disconnected, e.g. from an usb driver's
- * ->disconnect callback.
+ * This functions unregisters all connectors from sysfs and other places so
+ * that userspace can no longer access them. Drivers should call this as the
+ * first step tearing down the device instace, or when the underlying
+ * physical device disappeared (e.g. USB unplug), right before calling
+ * drm_dev_unregister().
*/
-void drm_connector_unplug_all(struct drm_device *dev)
+void drm_connector_unregister_all(struct drm_device *dev)
{
struct drm_connector *connector;
/* FIXME: taking the mode config mutex ends up in a clash with sysfs */
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
drm_connector_unregister(connector);
-
}
-EXPORT_SYMBOL(drm_connector_unplug_all);
+EXPORT_SYMBOL(drm_connector_unregister_all);
/**
* drm_encoder_init - Init a preallocated encoder
@@ -1138,7 +1170,7 @@ int drm_encoder_init(struct drm_device *dev,
out_put:
if (ret)
- drm_mode_object_put(dev, &encoder->base);
+ drm_mode_object_unregister(dev, &encoder->base);
out_unlock:
drm_modeset_unlock_all(dev);
@@ -1181,7 +1213,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
drm_modeset_lock_all(dev);
- drm_mode_object_put(dev, &encoder->base);
+ drm_mode_object_unregister(dev, &encoder->base);
kfree(encoder->name);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
@@ -1242,7 +1274,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
GFP_KERNEL);
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
- drm_mode_object_put(dev, &plane->base);
+ drm_mode_object_unregister(dev, &plane->base);
return -ENOMEM;
}
@@ -1258,7 +1290,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
}
if (!plane->name) {
kfree(plane->format_types);
- drm_mode_object_put(dev, &plane->base);
+ drm_mode_object_unregister(dev, &plane->base);
return -ENOMEM;
}
@@ -1338,7 +1370,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
drm_modeset_lock_all(dev);
kfree(plane->format_types);
- drm_mode_object_put(dev, &plane->base);
+ drm_mode_object_unregister(dev, &plane->base);
BUG_ON(list_empty(&plane->head));
@@ -1918,8 +1950,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
copied = 0;
crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr;
drm_for_each_crtc(crtc, dev) {
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n",
- crtc->base.id, crtc->name);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
goto out;
@@ -1934,8 +1964,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
copied = 0;
encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr;
drm_for_each_encoder(encoder, dev) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
- encoder->name);
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
@@ -1951,9 +1979,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
copied = 0;
connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr;
drm_for_each_connector(connector, dev) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id,
- connector->name);
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
@@ -1964,9 +1989,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
}
card_res->count_connectors = connector_count;
- DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
- card_res->count_connectors, card_res->count_encoders);
-
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
@@ -2125,11 +2147,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
- DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
-
mutex_lock(&dev->mode_config.mutex);
- connector = drm_connector_find(dev, out_resp->connector_id);
+ connector = drm_connector_lookup(dev, out_resp->connector_id);
if (!connector) {
ret = -ENOENT;
goto out_unlock;
@@ -2213,6 +2233,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out:
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_connector_unreference(connector);
out_unlock:
mutex_unlock(&dev->mode_config.mutex);
@@ -2855,13 +2876,14 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
for (i = 0; i < crtc_req->count_connectors; i++) {
+ connector_set[i] = NULL;
set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
}
- connector = drm_connector_find(dev, out_id);
+ connector = drm_connector_lookup(dev, out_id);
if (!connector) {
DRM_DEBUG_KMS("Connector id %d unknown\n",
out_id);
@@ -2889,6 +2911,12 @@ out:
if (fb)
drm_framebuffer_unreference(fb);
+ if (connector_set) {
+ for (i = 0; i < crtc_req->count_connectors; i++) {
+ if (connector_set[i])
+ drm_connector_unreference(connector_set[i]);
+ }
+ }
kfree(connector_set);
drm_mode_destroy(dev, mode);
drm_modeset_unlock_all(dev);
@@ -3421,11 +3449,11 @@ int drm_mode_addfb2(struct drm_device *dev,
if (IS_ERR(fb))
return PTR_ERR(fb);
- /* Transfer ownership to the filp for reaping on close */
-
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
- mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
+
+ /* Transfer ownership to the filp for reaping on close */
+ mutex_lock(&file_priv->fbs_lock);
list_add(&fb->filp_head, &file_priv->fbs);
mutex_unlock(&file_priv->fbs_lock);
@@ -3474,22 +3502,25 @@ int drm_mode_rmfb(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&file_priv->fbs_lock);
- mutex_lock(&dev->mode_config.fb_lock);
- fb = __drm_framebuffer_lookup(dev, *id);
+ fb = drm_framebuffer_lookup(dev, *id);
if (!fb)
- goto fail_lookup;
+ return -ENOENT;
+ mutex_lock(&file_priv->fbs_lock);
list_for_each_entry(fbl, &file_priv->fbs, filp_head)
if (fb == fbl)
found = 1;
- if (!found)
- goto fail_lookup;
+ if (!found) {
+ mutex_unlock(&file_priv->fbs_lock);
+ goto fail_unref;
+ }
list_del_init(&fb->filp_head);
- mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&file_priv->fbs_lock);
+ /* drop the reference we picked up in framebuffer lookup */
+ drm_framebuffer_unreference(fb);
+
/*
* we now own the reference that was stored in the fbs list
*
@@ -3497,7 +3528,7 @@ int drm_mode_rmfb(struct drm_device *dev,
* so run this in a separate stack as there's no way to correctly
* handle this after the fb is already removed from the lookup table.
*/
- if (atomic_read(&fb->refcount.refcount) > 1) {
+ if (drm_framebuffer_read_refcount(fb) > 1) {
struct drm_mode_rmfb_work arg;
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
@@ -3512,10 +3543,8 @@ int drm_mode_rmfb(struct drm_device *dev,
return 0;
-fail_lookup:
- mutex_unlock(&dev->mode_config.fb_lock);
- mutex_unlock(&file_priv->fbs_lock);
-
+fail_unref:
+ drm_framebuffer_unreference(fb);
return -ENOENT;
}
@@ -3690,7 +3719,7 @@ void drm_fb_release(struct drm_file *priv)
* at it any more.
*/
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
- if (atomic_read(&fb->refcount.refcount) > 1) {
+ if (drm_framebuffer_read_refcount(fb) > 1) {
list_move_tail(&fb->filp_head, &arg.fbs);
} else {
list_del_init(&fb->filp_head);
@@ -4077,7 +4106,7 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
if (property->num_values)
kfree(property->values);
- drm_mode_object_put(dev, &property->base);
+ drm_mode_object_unregister(dev, &property->base);
list_del(&property->head);
kfree(property);
}
@@ -4282,6 +4311,20 @@ done:
return ret;
}
+static void drm_property_free_blob(struct kref *kref)
+{
+ struct drm_property_blob *blob =
+ container_of(kref, struct drm_property_blob, base.refcount);
+
+ mutex_lock(&blob->dev->mode_config.blob_lock);
+ list_del(&blob->head_global);
+ mutex_unlock(&blob->dev->mode_config.blob_lock);
+
+ drm_mode_object_unregister(blob->dev, &blob->base);
+
+ kfree(blob);
+}
+
/**
* drm_property_create_blob - Create new blob property
*
@@ -4319,20 +4362,16 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
if (data)
memcpy(blob->data, data, length);
- mutex_lock(&dev->mode_config.blob_lock);
-
- ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
+ ret = drm_mode_object_get_reg(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
+ true, drm_property_free_blob);
if (ret) {
kfree(blob);
- mutex_unlock(&dev->mode_config.blob_lock);
return ERR_PTR(-EINVAL);
}
- kref_init(&blob->refcount);
-
+ mutex_lock(&dev->mode_config.blob_lock);
list_add_tail(&blob->head_global,
&dev->mode_config.property_blob_list);
-
mutex_unlock(&dev->mode_config.blob_lock);
return blob;
@@ -4340,27 +4379,6 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
EXPORT_SYMBOL(drm_property_create_blob);
/**
- * drm_property_free_blob - Blob property destructor
- *
- * Internal free function for blob properties; must not be used directly.
- *
- * @kref: Reference
- */
-static void drm_property_free_blob(struct kref *kref)
-{
- struct drm_property_blob *blob =
- container_of(kref, struct drm_property_blob, refcount);
-
- WARN_ON(!mutex_is_locked(&blob->dev->mode_config.blob_lock));
-
- list_del(&blob->head_global);
- list_del(&blob->head_file);
- drm_mode_object_put(blob->dev, &blob->base);
-
- kfree(blob);
-}
-
-/**
* drm_property_unreference_blob - Unreference a blob property
*
* Drop a reference on a blob property. May free the object.
@@ -4369,42 +4387,14 @@ static void drm_property_free_blob(struct kref *kref)
*/
void drm_property_unreference_blob(struct drm_property_blob *blob)
{
- struct drm_device *dev;
-
if (!blob)
return;
- dev = blob->dev;
-
- DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
-
- if (kref_put_mutex(&blob->refcount, drm_property_free_blob,
- &dev->mode_config.blob_lock))
- mutex_unlock(&dev->mode_config.blob_lock);
- else
- might_lock(&dev->mode_config.blob_lock);
+ drm_mode_object_unreference(&blob->base);
}
EXPORT_SYMBOL(drm_property_unreference_blob);
/**
- * drm_property_unreference_blob_locked - Unreference a blob property with blob_lock held
- *
- * Drop a reference on a blob property. May free the object. This must be
- * called with blob_lock held.
- *
- * @blob: Pointer to blob property
- */
-static void drm_property_unreference_blob_locked(struct drm_property_blob *blob)
-{
- if (!blob)
- return;
-
- DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
-
- kref_put(&blob->refcount, drm_property_free_blob);
-}
-
-/**
* drm_property_destroy_user_blobs - destroy all blobs created by this client
* @dev: DRM device
* @file_priv: destroy all blobs owned by this file handle
@@ -4414,14 +4404,14 @@ void drm_property_destroy_user_blobs(struct drm_device *dev,
{
struct drm_property_blob *blob, *bt;
- mutex_lock(&dev->mode_config.blob_lock);
-
+ /*
+ * When the file gets released that means no one else can access the
+ * blob list any more, so no need to grab dev->blob_lock.
+ */
list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) {
list_del_init(&blob->head_file);
- drm_property_unreference_blob_locked(blob);
+ drm_property_unreference_blob(blob);
}
-
- mutex_unlock(&dev->mode_config.blob_lock);
}
/**
@@ -4433,35 +4423,11 @@ void drm_property_destroy_user_blobs(struct drm_device *dev,
*/
struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob)
{
- DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount));
- kref_get(&blob->refcount);
+ drm_mode_object_reference(&blob->base);
return blob;
}
EXPORT_SYMBOL(drm_property_reference_blob);
-/*
- * Like drm_property_lookup_blob, but does not return an additional reference.
- * Must be called with blob_lock held.
- */
-static struct drm_property_blob *__drm_property_lookup_blob(struct drm_device *dev,
- uint32_t id)
-{
- struct drm_mode_object *obj = NULL;
- struct drm_property_blob *blob;
-
- WARN_ON(!mutex_is_locked(&dev->mode_config.blob_lock));
-
- mutex_lock(&dev->mode_config.idr_mutex);
- obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (obj->type != DRM_MODE_OBJECT_BLOB) || (obj->id != id))
- blob = NULL;
- else
- blob = obj_to_blob(obj);
- mutex_unlock(&dev->mode_config.idr_mutex);
-
- return blob;
-}
-
/**
* drm_property_lookup_blob - look up a blob property and take a reference
* @dev: drm device
@@ -4474,16 +4440,12 @@ static struct drm_property_blob *__drm_property_lookup_blob(struct drm_device *d
struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev,
uint32_t id)
{
- struct drm_property_blob *blob;
-
- mutex_lock(&dev->mode_config.blob_lock);
- blob = __drm_property_lookup_blob(dev, id);
- if (blob) {
- if (!kref_get_unless_zero(&blob->refcount))
- blob = NULL;
- }
- mutex_unlock(&dev->mode_config.blob_lock);
+ struct drm_mode_object *obj;
+ struct drm_property_blob *blob = NULL;
+ obj = _object_find(dev, id, DRM_MODE_OBJECT_BLOB);
+ if (obj)
+ blob = obj_to_blob(obj);
return blob;
}
EXPORT_SYMBOL(drm_property_lookup_blob);
@@ -4588,26 +4550,21 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- drm_modeset_lock_all(dev);
- mutex_lock(&dev->mode_config.blob_lock);
- blob = __drm_property_lookup_blob(dev, out_resp->blob_id);
- if (!blob) {
- ret = -ENOENT;
- goto done;
- }
+ blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+ if (!blob)
+ return -ENOENT;
if (out_resp->length == blob->length) {
blob_ptr = (void __user *)(unsigned long)out_resp->data;
if (copy_to_user(blob_ptr, blob->data, blob->length)) {
ret = -EFAULT;
- goto done;
+ goto unref;
}
}
out_resp->length = blob->length;
+unref:
+ drm_property_unreference_blob(blob);
-done:
- mutex_unlock(&dev->mode_config.blob_lock);
- drm_modeset_unlock_all(dev);
return ret;
}
@@ -4686,13 +4643,11 @@ int drm_mode_destroyblob_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- mutex_lock(&dev->mode_config.blob_lock);
- blob = __drm_property_lookup_blob(dev, out_resp->blob_id);
- if (!blob) {
- ret = -ENOENT;
- goto err;
- }
+ blob = drm_property_lookup_blob(dev, out_resp->blob_id);
+ if (!blob)
+ return -ENOENT;
+ mutex_lock(&dev->mode_config.blob_lock);
/* Ensure the property was actually created by this user. */
list_for_each_entry(bt, &file_priv->blobs, head_file) {
if (bt == blob) {
@@ -4709,13 +4664,18 @@ int drm_mode_destroyblob_ioctl(struct drm_device *dev,
/* We must drop head_file here, because we may not be the last
* reference on the blob. */
list_del_init(&blob->head_file);
- drm_property_unreference_blob_locked(blob);
mutex_unlock(&dev->mode_config.blob_lock);
+ /* One reference from lookup, and one from the filp. */
+ drm_property_unreference_blob(blob);
+ drm_property_unreference_blob(blob);
+
return 0;
err:
mutex_unlock(&dev->mode_config.blob_lock);
+ drm_property_unreference_blob(blob);
+
return ret;
}
@@ -4879,19 +4839,8 @@ bool drm_property_change_valid_get(struct drm_property *property,
if (value == 0)
return true;
- /* handle refcnt'd objects specially: */
- if (property->values[0] == DRM_MODE_OBJECT_FB) {
- struct drm_framebuffer *fb;
- fb = drm_framebuffer_lookup(property->dev, value);
- if (fb) {
- *ref = &fb->base;
- return true;
- } else {
- return false;
- }
- } else {
- return _object_find(property->dev, value, property->values[0]) != NULL;
- }
+ *ref = _object_find(property->dev, value, property->values[0]);
+ return *ref != NULL;
}
for (i = 0; i < property->num_values; i++)
@@ -4907,8 +4856,7 @@ void drm_property_change_valid_put(struct drm_property *property,
return;
if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) {
- if (property->values[0] == DRM_MODE_OBJECT_FB)
- drm_framebuffer_unreference(obj_to_fb(ref));
+ drm_mode_object_unreference(ref);
} else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
drm_property_unreference_blob(obj_to_blob(ref));
}
@@ -5039,7 +4987,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
}
if (!obj->properties) {
ret = -EINVAL;
- goto out;
+ goto out_unref;
}
ret = get_properties(obj, file_priv->atomic,
@@ -5047,6 +4995,8 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
&arg->count_props);
+out_unref:
+ drm_mode_object_unreference(obj);
out:
drm_modeset_unlock_all(dev);
return ret;
@@ -5089,25 +5039,25 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
goto out;
}
if (!arg_obj->properties)
- goto out;
+ goto out_unref;
for (i = 0; i < arg_obj->properties->count; i++)
if (arg_obj->properties->properties[i]->base.id == arg->prop_id)
break;
if (i == arg_obj->properties->count)
- goto out;
+ goto out_unref;
prop_obj = drm_mode_object_find(dev, arg->prop_id,
DRM_MODE_OBJECT_PROPERTY);
if (!prop_obj) {
ret = -ENOENT;
- goto out;
+ goto out_unref;
}
property = obj_to_property(prop_obj);
if (!drm_property_change_valid_get(property, arg->value, &ref))
- goto out;
+ goto out_unref;
switch (arg_obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
@@ -5125,6 +5075,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
drm_property_change_valid_put(property, ref);
+out_unref:
+ drm_mode_object_unreference(arg_obj);
out:
drm_modeset_unlock_all(dev);
return ret;
@@ -5962,6 +5914,15 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_property_destroy(dev, property);
}
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
+
+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list,
head_global) {
drm_property_unreference_blob(blob);
@@ -5977,16 +5938,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
- drm_framebuffer_free(&fb->refcount);
- }
-
- list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
- head) {
- plane->funcs->destroy(plane);
- }
-
- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
+ drm_framebuffer_free(&fb->base.refcount);
}
ida_destroy(&dev->mode_config.connector_ida);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 79555d2b1..26feb2f84 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -170,11 +170,14 @@ drm_encoder_disable(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (!encoder_funcs)
+ return;
+
drm_bridge_disable(encoder->bridge);
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
- else
+ else if (encoder_funcs->dpms)
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
drm_bridge_post_disable(encoder->bridge);
@@ -248,6 +251,9 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
drm_for_each_encoder(encoder, dev) {
encoder_funcs = encoder->helper_private;
+ if (!encoder_funcs)
+ continue;
+
/* Disable unused encoders */
if (encoder->crtc == NULL)
drm_encoder_disable(encoder);
@@ -326,6 +332,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+ encoder_funcs = encoder->helper_private;
+ if (!encoder_funcs)
+ continue;
+
ret = drm_bridge_mode_fixup(encoder->bridge,
mode, adjusted_mode);
if (!ret) {
@@ -360,11 +370,15 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+ encoder_funcs = encoder->helper_private;
+ if (!encoder_funcs)
+ continue;
+
drm_bridge_disable(encoder->bridge);
- encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
- encoder_funcs->prepare(encoder);
+ if (encoder_funcs->prepare)
+ encoder_funcs->prepare(encoder);
drm_bridge_post_disable(encoder->bridge);
}
@@ -385,11 +399,15 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+ encoder_funcs = encoder->helper_private;
+ if (!encoder_funcs)
+ continue;
+
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.id, encoder->name,
mode->base.id, mode->name);
- encoder_funcs = encoder->helper_private;
- encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+ if (encoder_funcs->mode_set)
+ encoder_funcs->mode_set(encoder, mode, adjusted_mode);
drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
}
@@ -402,10 +420,14 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+ encoder_funcs = encoder->helper_private;
+ if (!encoder_funcs)
+ continue;
+
drm_bridge_pre_enable(encoder->bridge);
- encoder_funcs = encoder->helper_private;
- encoder_funcs->commit(encoder);
+ if (encoder_funcs->commit)
+ encoder_funcs->commit(encoder);
drm_bridge_enable(encoder->bridge);
}
@@ -456,6 +478,9 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
* between them is henceforth no longer available.
*/
connector->dpms = DRM_MODE_DPMS_OFF;
+
+ /* we keep a reference while the encoder is bound */
+ drm_connector_unreference(connector);
}
}
@@ -503,11 +528,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
int drm_crtc_helper_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
- struct drm_crtc *new_crtc;
- struct drm_encoder *save_encoders, *new_encoder, *encoder;
+ struct drm_crtc **save_encoder_crtcs, *new_crtc;
+ struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
bool mode_changed = false; /* if true do a full mode set */
bool fb_changed = false; /* if true and !mode_changed just do a flip */
- struct drm_connector *save_connectors, *connector;
+ struct drm_connector *connector;
int count = 0, ro, fail = 0;
const struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_mode_set save_set;
@@ -549,15 +574,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
* Allocate space for the backup of all (non-pointer) encoder and
* connector data.
*/
- save_encoders = kzalloc(dev->mode_config.num_encoder *
- sizeof(struct drm_encoder), GFP_KERNEL);
- if (!save_encoders)
+ save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_crtc *), GFP_KERNEL);
+ if (!save_encoder_crtcs)
return -ENOMEM;
- save_connectors = kzalloc(dev->mode_config.num_connector *
- sizeof(struct drm_connector), GFP_KERNEL);
- if (!save_connectors) {
- kfree(save_encoders);
+ save_connector_encoders = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_encoder *), GFP_KERNEL);
+ if (!save_connector_encoders) {
+ kfree(save_encoder_crtcs);
return -ENOMEM;
}
@@ -568,12 +593,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
*/
count = 0;
drm_for_each_encoder(encoder, dev) {
- save_encoders[count++] = *encoder;
+ save_encoder_crtcs[count++] = encoder->crtc;
}
count = 0;
drm_for_each_connector(connector, dev) {
- save_connectors[count++] = *connector;
+ save_connector_encoders[count++] = connector->encoder;
}
save_set.crtc = set->crtc;
@@ -606,6 +631,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
}
+ /* take a reference on all unbound connectors in set, reuse the
+ * already taken reference for bound connectors
+ */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro]->encoder)
+ continue;
+ drm_connector_reference(set->connectors[ro]);
+ }
+
/* a) traverse passed in connector list and get encoders for them */
count = 0;
drm_for_each_connector(connector, dev) {
@@ -724,20 +758,29 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
}
}
- kfree(save_connectors);
- kfree(save_encoders);
+ kfree(save_connector_encoders);
+ kfree(save_encoder_crtcs);
return 0;
fail:
/* Restore all previous data. */
count = 0;
drm_for_each_encoder(encoder, dev) {
- *encoder = save_encoders[count++];
+ encoder->crtc = save_encoder_crtcs[count++];
}
count = 0;
drm_for_each_connector(connector, dev) {
- *connector = save_connectors[count++];
+ connector->encoder = save_connector_encoders[count++];
+ }
+
+ /* after fail drop reference on all unbound connectors in set, let
+ * bound connectors keep their reference
+ */
+ for (ro = 0; ro < set->num_connectors; ro++) {
+ if (set->connectors[ro]->encoder)
+ continue;
+ drm_connector_unreference(set->connectors[ro]);
}
/* Try to restore the config */
@@ -746,8 +789,8 @@ fail:
save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
- kfree(save_connectors);
- kfree(save_encoders);
+ kfree(save_connector_encoders);
+ kfree(save_encoder_crtcs);
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_config);
@@ -771,12 +814,15 @@ static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_bridge *bridge = encoder->bridge;
const struct drm_encoder_helper_funcs *encoder_funcs;
+ encoder_funcs = encoder->helper_private;
+ if (!encoder_funcs)
+ return;
+
if (mode == DRM_MODE_DPMS_ON)
drm_bridge_pre_enable(bridge);
else
drm_bridge_disable(bridge);
- encoder_funcs = encoder->helper_private;
if (encoder_funcs->dpms)
encoder_funcs->dpms(encoder, mode);
@@ -1053,10 +1099,12 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (plane->funcs->atomic_duplicate_state)
plane_state = plane->funcs->atomic_duplicate_state(plane);
- else if (plane->state)
+ else {
+ if (!plane->state)
+ drm_atomic_helper_plane_reset(plane);
+
plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- else
- plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ }
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 247dc8b62..a78c13828 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -33,8 +33,8 @@
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type);
-void drm_mode_object_put(struct drm_device *dev,
- struct drm_mode_object *object);
+void drm_mode_object_unregister(struct drm_device *dev,
+ struct drm_mode_object *object);
/* drm_atomic.c */
int drm_atomic_get_property(struct drm_mode_object *obj,
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index f73b38b33..3334baacf 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -159,6 +159,12 @@ static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count,
uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+ if (signal_pending(current)) {
+ res = num_bytes_processed ?
+ num_bytes_processed : -ERESTARTSYS;
+ goto out;
+ }
+
res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo);
if (res <= 0) {
res = num_bytes_processed ? num_bytes_processed : res;
@@ -202,6 +208,12 @@ static ssize_t auxdev_write(struct file *file, const char __user *buf,
uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES];
ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf));
+ if (signal_pending(current)) {
+ res = num_bytes_processed ?
+ num_bytes_processed : -ERESTARTSYS;
+ goto out;
+ }
+
if (__copy_from_user(localbuf,
buf + num_bytes_processed, todo)) {
res = num_bytes_processed ?
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index df64ed1c0..eeaf5a7c3 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -178,8 +178,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
unsigned int offset, void *buffer, size_t size)
{
struct drm_dp_aux_msg msg;
- unsigned int retry;
- int err = 0;
+ unsigned int retry, native_reply;
+ int err = 0, ret = 0;
memset(&msg, 0, sizeof(msg));
msg.address = offset;
@@ -196,38 +196,39 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
* sufficient, bump to 32 which makes Dell 4k monitors happier.
*/
for (retry = 0; retry < 32; retry++) {
-
- err = aux->transfer(aux, &msg);
- if (err < 0) {
- if (err == -EBUSY)
- continue;
-
- goto unlock;
+ if (ret != 0 && ret != -ETIMEDOUT) {
+ usleep_range(AUX_RETRY_INTERVAL,
+ AUX_RETRY_INTERVAL + 100);
}
+ ret = aux->transfer(aux, &msg);
- switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
- case DP_AUX_NATIVE_REPLY_ACK:
- if (err < size)
- err = -EPROTO;
- goto unlock;
+ if (ret > 0) {
+ native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
+ if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
+ if (ret == size)
+ goto unlock;
- case DP_AUX_NATIVE_REPLY_NACK:
- err = -EIO;
- goto unlock;
-
- case DP_AUX_NATIVE_REPLY_DEFER:
- usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
- break;
+ ret = -EPROTO;
+ } else
+ ret = -EIO;
}
+
+ /*
+ * We want the error we return to be the error we received on
+ * the first transaction, since we may get a different error the
+ * next time we retry
+ */
+ if (!err)
+ err = ret;
}
DRM_DEBUG_KMS("too many retries, giving up\n");
- err = -EIO;
+ ret = err;
unlock:
mutex_unlock(&aux->hw_mutex);
- return err;
+ return ret;
}
/**
@@ -247,6 +248,25 @@ unlock:
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size)
{
+ int ret;
+
+ /*
+ * HP ZR24w corrupts the first DPCD access after entering power save
+ * mode. Eg. on a read, the entire buffer will be filled with the same
+ * byte. Do a throw away read to avoid corrupting anything we care
+ * about. Afterwards things will work correctly until the monitor
+ * gets woken up and subsequently re-enters power save mode.
+ *
+ * The user pressing any button on the monitor is enough to wake it
+ * up, so there is no particularly good place to do the workaround.
+ * We just have to do it before any DPCD access and hope that the
+ * monitor doesn't power down exactly after the throw away read.
+ */
+ ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV, buffer,
+ 1);
+ if (ret != 1)
+ return ret;
+
return drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, buffer,
size);
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ccfe7e72d..653790805 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2756,7 +2756,7 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
list_for_each_entry(port, &mstb->ports, next) {
- seq_printf(m, "%sport: %d: ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
+ seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
if (port->mstb)
drm_dp_mst_dump_mstb(m, port->mstb);
}
@@ -2777,6 +2777,16 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
return false;
}
+static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, char *name,
+ int namelen)
+{
+ struct edid *mst_edid;
+
+ mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
+ drm_edid_get_monitor_name(mst_edid, name, namelen);
+}
+
/**
* drm_dp_mst_dump_topology(): dump topology to seq file.
* @m: seq_file to dump output to
@@ -2789,6 +2799,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
{
int i;
struct drm_dp_mst_port *port;
+
mutex_lock(&mgr->lock);
if (mgr->mst_primary)
drm_dp_mst_dump_mstb(m, mgr->mst_primary);
@@ -2797,14 +2808,21 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
mutex_unlock(&mgr->lock);
mutex_lock(&mgr->payload_lock);
- seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
+ seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
+ mgr->max_payloads);
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->proposed_vcpis[i]) {
+ char name[14];
+
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
- seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
+ fetch_monitor_name(mgr, port, name, sizeof(name));
+ seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
+ port->port_num, port->vcpi.vcpi,
+ port->vcpi.num_slots,
+ (*name != 0) ? name : "Unknown");
} else
- seq_printf(m, "vcpi %d:unsed\n", i);
+ seq_printf(m, "vcpi %d:unused\n", i);
}
for (i = 0; i < mgr->max_payloads; i++) {
seq_printf(m, "payload %d: %d, %d, %d\n",
@@ -2844,8 +2862,9 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
for (i = 0; i < 0x3; i++)
seq_printf(m, "%02x", buf[i]);
seq_printf(m, " devid: ");
- for (i = 0x3; i < 0x8; i++)
+ for (i = 0x3; i < 0x8 && buf[i]; i++)
seq_printf(m, "%c", buf[i]);
+
seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
seq_printf(m, "\n");
bret = dump_dp_payload_table(mgr, buf);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 167c8d3d4..bff89226a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -37,13 +37,23 @@
#include "drm_legacy.h"
#include "drm_internal.h"
-unsigned int drm_debug = 0; /* bitmask of DRM_UT_x */
+/*
+ * drm_debug: Enable debug output.
+ * Bitmask of DRM_UT_x. See include/drm/drmP.h for details.
+ */
+unsigned int drm_debug = 0;
EXPORT_SYMBOL(drm_debug);
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n"
+"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n"
+"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n"
+"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n"
+"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n"
+"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n"
+"\t\tBit 5 (0x20) will enable VBL messages (vblank code)");
module_param_named(debug, drm_debug, int, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
@@ -111,19 +121,11 @@ static void drm_master_destroy(struct kref *kref)
{
struct drm_master *master = container_of(kref, struct drm_master, refcount);
struct drm_device *dev = master->minor->dev;
- struct drm_map_list *r_list, *list_temp;
- mutex_lock(&dev->struct_mutex);
if (dev->driver->master_destroy)
dev->driver->master_destroy(dev, master);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_legacy_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
- mutex_unlock(&dev->struct_mutex);
+ drm_legacy_master_rmmaps(dev, master);
idr_destroy(&master->magic_map);
kfree(master->unique);
@@ -588,6 +590,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
+ mutex_init(&dev->filelist_mutex);
mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex);
@@ -715,7 +718,11 @@ EXPORT_SYMBOL(drm_dev_unref);
*
* Register the DRM device @dev with the system, advertise device to user-space
* and start normal device operation. @dev must be allocated via drm_dev_alloc()
- * previously.
+ * previously. Right after drm_dev_register() the driver should call
+ * drm_connector_register_all() to register all connectors in sysfs. This is
+ * a separate call for backward compatibility with drivers still using
+ * the deprecated ->load() callback, where connectors are registered from within
+ * the ->load() callback.
*
* Never call this twice on any device!
*
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 558ef9fc3..7df26d4b7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3293,6 +3293,46 @@ monitor_name(struct detailed_timing *t, void *data)
*(u8 **)data = t->data.other_data.data.str.str;
}
+static int get_monitor_name(struct edid *edid, char name[13])
+{
+ char *edid_name = NULL;
+ int mnl;
+
+ if (!edid || !name)
+ return 0;
+
+ drm_for_each_detailed_block((u8 *)edid, monitor_name, &edid_name);
+ for (mnl = 0; edid_name && mnl < 13; mnl++) {
+ if (edid_name[mnl] == 0x0a)
+ break;
+
+ name[mnl] = edid_name[mnl];
+ }
+
+ return mnl;
+}
+
+/**
+ * drm_edid_get_monitor_name - fetch the monitor name from the edid
+ * @edid: monitor EDID information
+ * @name: pointer to a character array to hold the name of the monitor
+ * @bufsize: The size of the name buffer (should be at least 14 chars.)
+ *
+ */
+void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize)
+{
+ int name_length;
+ char buf[13];
+
+ if (bufsize <= 0)
+ return;
+
+ name_length = min(get_monitor_name(edid, buf), bufsize - 1);
+ memcpy(name, buf, name_length);
+ name[name_length] = '\0';
+}
+EXPORT_SYMBOL(drm_edid_get_monitor_name);
+
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
@@ -3306,7 +3346,6 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
uint8_t *eld = connector->eld;
u8 *cea;
- u8 *name;
u8 *db;
int total_sad_count = 0;
int mnl;
@@ -3320,14 +3359,8 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
return;
}
- name = NULL;
- drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
- /* max: 13 bytes EDID, 16 bytes ELD */
- for (mnl = 0; name && mnl < 13; mnl++) {
- if (name[mnl] == 0x0a)
- break;
- eld[20 + mnl] = name[mnl];
- }
+ mnl = get_monitor_name(edid, eld + 20);
+
eld[4] = (cea[1] << 5) | mnl;
DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
@@ -3868,6 +3901,133 @@ static void drm_add_display_info(struct edid *edid,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
+static int validate_displayid(u8 *displayid, int length, int idx)
+{
+ int i;
+ u8 csum = 0;
+ struct displayid_hdr *base;
+
+ base = (struct displayid_hdr *)&displayid[idx];
+
+ DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+ base->rev, base->bytes, base->prod_id, base->ext_count);
+
+ if (base->bytes + 5 > length - idx)
+ return -EINVAL;
+ for (i = idx; i <= base->bytes + 5; i++) {
+ csum += displayid[i];
+ }
+ if (csum) {
+ DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
+ struct displayid_detailed_timings_1 *timings)
+{
+ struct drm_display_mode *mode;
+ unsigned pixel_clock = (timings->pixel_clock[0] |
+ (timings->pixel_clock[1] << 8) |
+ (timings->pixel_clock[2] << 16));
+ unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
+ unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
+ unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
+ unsigned hsync_width = (timings->hsw[0] | timings->hsw[1] << 8) + 1;
+ unsigned vactive = (timings->vactive[0] | timings->vactive[1] << 8) + 1;
+ unsigned vblank = (timings->vblank[0] | timings->vblank[1] << 8) + 1;
+ unsigned vsync = (timings->vsync[0] | (timings->vsync[1] & 0x7f) << 8) + 1;
+ unsigned vsync_width = (timings->vsw[0] | timings->vsw[1] << 8) + 1;
+ bool hsync_positive = (timings->hsync[1] >> 7) & 0x1;
+ bool vsync_positive = (timings->vsync[1] >> 7) & 0x1;
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ mode->clock = pixel_clock * 10;
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hsync;
+ mode->hsync_end = mode->hsync_start + hsync_width;
+ mode->htotal = mode->hdisplay + hblank;
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vsync;
+ mode->vsync_end = mode->vsync_start + vsync_width;
+ mode->vtotal = mode->vdisplay + vblank;
+
+ mode->flags = 0;
+ mode->flags |= hsync_positive ? DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= vsync_positive ? DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ if (timings->flags & 0x80)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+ drm_mode_set_name(mode);
+
+ return mode;
+}
+
+static int add_displayid_detailed_1_modes(struct drm_connector *connector,
+ struct displayid_block *block)
+{
+ struct displayid_detailed_timing_block *det = (struct displayid_detailed_timing_block *)block;
+ int i;
+ int num_timings;
+ struct drm_display_mode *newmode;
+ int num_modes = 0;
+ /* blocks must be multiple of 20 bytes length */
+ if (block->num_bytes % 20)
+ return 0;
+
+ num_timings = block->num_bytes / 20;
+ for (i = 0; i < num_timings; i++) {
+ struct displayid_detailed_timings_1 *timings = &det->timings[i];
+
+ newmode = drm_mode_displayid_detailed(connector->dev, timings);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ num_modes++;
+ }
+ return num_modes;
+}
+
+static int add_displayid_detailed_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ u8 *displayid;
+ int ret;
+ int idx = 1;
+ int length = EDID_LENGTH;
+ struct displayid_block *block;
+ int num_modes = 0;
+
+ displayid = drm_find_displayid_extension(edid);
+ if (!displayid)
+ return 0;
+
+ ret = validate_displayid(displayid, length, idx);
+ if (ret)
+ return 0;
+
+ idx += sizeof(struct displayid_hdr);
+ while (block = (struct displayid_block *)&displayid[idx],
+ idx + sizeof(struct displayid_block) <= length &&
+ idx + sizeof(struct displayid_block) + block->num_bytes <= length &&
+ block->num_bytes > 0) {
+ idx += block->num_bytes + sizeof(struct displayid_block);
+ switch (block->tag) {
+ case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+ num_modes += add_displayid_detailed_1_modes(connector, block);
+ break;
+ }
+ }
+ return num_modes;
+}
+
/**
* drm_add_edid_modes - add modes from EDID data, if available
* @connector: connector we're probing
@@ -3913,6 +4073,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_established_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
num_modes += add_alternate_cea_modes(connector, edid);
+ num_modes += add_displayid_detailed_modes(connector, edid);
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
num_modes += add_inferred_modes(connector, edid);
@@ -4119,96 +4280,98 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
+static int drm_parse_tiled_block(struct drm_connector *connector,
+ struct displayid_block *block)
+{
+ struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+ u16 w, h;
+ u8 tile_v_loc, tile_h_loc;
+ u8 num_v_tile, num_h_tile;
+ struct drm_tile_group *tg;
+
+ w = tile->tile_size[0] | tile->tile_size[1] << 8;
+ h = tile->tile_size[2] | tile->tile_size[3] << 8;
+
+ num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
+ num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
+ tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
+ tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
+
+ connector->has_tile = true;
+ if (tile->tile_cap & 0x80)
+ connector->tile_is_single_monitor = true;
+
+ connector->num_h_tile = num_h_tile + 1;
+ connector->num_v_tile = num_v_tile + 1;
+ connector->tile_h_loc = tile_h_loc;
+ connector->tile_v_loc = tile_v_loc;
+ connector->tile_h_size = w + 1;
+ connector->tile_v_size = h + 1;
+
+ DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
+ DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
+ DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
+ num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
+ DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
+
+ tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
+ if (!tg) {
+ tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+ }
+ if (!tg)
+ return -ENOMEM;
+
+ if (connector->tile_group != tg) {
+ /* if we haven't got a pointer,
+ take the reference, drop ref to old tile group */
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(connector->dev, connector->tile_group);
+ }
+ connector->tile_group = tg;
+ } else
+ /* if same tile group, then release the ref we just took. */
+ drm_mode_put_tile_group(connector->dev, tg);
+ return 0;
+}
+
static int drm_parse_display_id(struct drm_connector *connector,
u8 *displayid, int length,
bool is_edid_extension)
{
/* if this is an EDID extension the first byte will be 0x70 */
int idx = 0;
- struct displayid_hdr *base;
struct displayid_block *block;
- u8 csum = 0;
- int i;
+ int ret;
if (is_edid_extension)
idx = 1;
- base = (struct displayid_hdr *)&displayid[idx];
-
- DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
- base->rev, base->bytes, base->prod_id, base->ext_count);
-
- if (base->bytes + 5 > length - idx)
- return -EINVAL;
-
- for (i = idx; i <= base->bytes + 5; i++) {
- csum += displayid[i];
- }
- if (csum) {
- DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
- return -EINVAL;
- }
+ ret = validate_displayid(displayid, length, idx);
+ if (ret)
+ return ret;
- block = (struct displayid_block *)&displayid[idx + 4];
- DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
- block->tag, block->rev, block->num_bytes);
-
- switch (block->tag) {
- case DATA_BLOCK_TILED_DISPLAY: {
- struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
-
- u16 w, h;
- u8 tile_v_loc, tile_h_loc;
- u8 num_v_tile, num_h_tile;
- struct drm_tile_group *tg;
-
- w = tile->tile_size[0] | tile->tile_size[1] << 8;
- h = tile->tile_size[2] | tile->tile_size[3] << 8;
-
- num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
- num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
- tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
- tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
-
- connector->has_tile = true;
- if (tile->tile_cap & 0x80)
- connector->tile_is_single_monitor = true;
-
- connector->num_h_tile = num_h_tile + 1;
- connector->num_v_tile = num_v_tile + 1;
- connector->tile_h_loc = tile_h_loc;
- connector->tile_v_loc = tile_v_loc;
- connector->tile_h_size = w + 1;
- connector->tile_v_size = h + 1;
-
- DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
- DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
- DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
- num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
- DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
-
- tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
- if (!tg) {
- tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+ idx += sizeof(struct displayid_hdr);
+ while (block = (struct displayid_block *)&displayid[idx],
+ idx + sizeof(struct displayid_block) <= length &&
+ idx + sizeof(struct displayid_block) + block->num_bytes <= length &&
+ block->num_bytes > 0) {
+ idx += block->num_bytes + sizeof(struct displayid_block);
+ DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
+ block->tag, block->rev, block->num_bytes);
+
+ switch (block->tag) {
+ case DATA_BLOCK_TILED_DISPLAY:
+ ret = drm_parse_tiled_block(connector, block);
+ if (ret)
+ return ret;
+ break;
+ case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
+ /* handled in mode gathering code. */
+ break;
+ default:
+ DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
+ break;
}
- if (!tg)
- return -ENOMEM;
-
- if (connector->tile_group != tg) {
- /* if we haven't got a pointer,
- take the reference, drop ref to old tile group */
- if (connector->tile_group) {
- drm_mode_put_tile_group(connector->dev, connector->tile_group);
- }
- connector->tile_group = tg;
- } else
- /* if same tile group, then release the ref we just took. */
- drm_mode_put_tile_group(connector->dev, tg);
- }
- break;
- default:
- printk("unknown displayid tag %d\n", block->tag);
- break;
}
return 0;
}
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index e619b00c7..5075fae3c 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -25,6 +25,8 @@
#include <drm/drm_fb_cma_helper.h>
#include <linux/module.h>
+#define DEFAULT_FBDEFIO_DELAY_MS 50
+
struct drm_fb_cma {
struct drm_framebuffer fb;
struct drm_gem_cma_object *obj[4];
@@ -35,6 +37,59 @@ struct drm_fbdev_cma {
struct drm_fb_cma *fb;
};
+/**
+ * DOC: framebuffer cma helper functions
+ *
+ * Provides helper functions for creating a cma (contiguous memory allocator)
+ * backed framebuffer.
+ *
+ * drm_fb_cma_create() is used in the &drm_mode_config_funcs ->fb_create
+ * callback function to create a cma backed framebuffer.
+ *
+ * An fbdev framebuffer backed by cma is also available by calling
+ * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
+ * If the &drm_framebuffer_funcs ->dirty callback is set, fb_deferred_io
+ * will be set up automatically. dirty() is called by
+ * drm_fb_helper_deferred_io() in process context (struct delayed_work).
+ *
+ * Example fbdev deferred io code:
+ *
+ * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
+ * struct drm_file *file_priv,
+ * unsigned flags, unsigned color,
+ * struct drm_clip_rect *clips,
+ * unsigned num_clips)
+ * {
+ * struct drm_gem_cma_object *cma = drm_fb_cma_get_gem_obj(fb, 0);
+ * ... push changes ...
+ * return 0;
+ * }
+ *
+ * static struct drm_framebuffer_funcs driver_fbdev_fb_funcs = {
+ * .destroy = drm_fb_cma_destroy,
+ * .create_handle = drm_fb_cma_create_handle,
+ * .dirty = driver_fbdev_fb_dirty,
+ * };
+ *
+ * static int driver_fbdev_create(struct drm_fb_helper *helper,
+ * struct drm_fb_helper_surface_size *sizes)
+ * {
+ * return drm_fbdev_cma_create_with_funcs(helper, sizes,
+ * &driver_fbdev_fb_funcs);
+ * }
+ *
+ * static const struct drm_fb_helper_funcs driver_fb_helper_funcs = {
+ * .fb_probe = driver_fbdev_create,
+ * };
+ *
+ * Initialize:
+ * fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
+ * dev->mode_config.num_crtc,
+ * dev->mode_config.num_connector,
+ * &driver_fb_helper_funcs);
+ *
+ */
+
static inline struct drm_fbdev_cma *to_fbdev_cma(struct drm_fb_helper *helper)
{
return container_of(helper, struct drm_fbdev_cma, fb_helper);
@@ -45,7 +100,7 @@ static inline struct drm_fb_cma *to_fb_cma(struct drm_framebuffer *fb)
return container_of(fb, struct drm_fb_cma, fb);
}
-static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
+void drm_fb_cma_destroy(struct drm_framebuffer *fb)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
int i;
@@ -58,8 +113,9 @@ static void drm_fb_cma_destroy(struct drm_framebuffer *fb)
drm_framebuffer_cleanup(fb);
kfree(fb_cma);
}
+EXPORT_SYMBOL(drm_fb_cma_destroy);
-static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
+int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv, unsigned int *handle)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -67,6 +123,7 @@ static int drm_fb_cma_create_handle(struct drm_framebuffer *fb,
return drm_gem_handle_create(file_priv,
&fb_cma->obj[0]->base, handle);
}
+EXPORT_SYMBOL(drm_fb_cma_create_handle);
static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
.destroy = drm_fb_cma_destroy,
@@ -76,7 +133,7 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_cma_object **obj,
- unsigned int num_planes)
+ unsigned int num_planes, const struct drm_framebuffer_funcs *funcs)
{
struct drm_fb_cma *fb_cma;
int ret;
@@ -91,7 +148,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
for (i = 0; i < num_planes; i++)
fb_cma->obj[i] = obj[i];
- ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
+ ret = drm_framebuffer_init(dev, &fb_cma->fb, funcs);
if (ret) {
dev_err(dev->dev, "Failed to initialize framebuffer: %d\n", ret);
kfree(fb_cma);
@@ -102,13 +159,17 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
}
/**
- * drm_fb_cma_create() - (struct drm_mode_config_funcs *)->fb_create callback function
+ * drm_fb_cma_create_with_funcs() - helper function for the
+ * &drm_mode_config_funcs ->fb_create
+ * callback function
*
- * If your hardware has special alignment or pitch requirements these should be
- * checked before calling this function.
+ * This can be used to set &drm_framebuffer_funcs for drivers that need the
+ * dirty() callback. Use drm_fb_cma_create() if you don't need to change
+ * &drm_framebuffer_funcs.
*/
-struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
- struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
+struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
+ struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
{
struct drm_fb_cma *fb_cma;
struct drm_gem_cma_object *objs[4];
@@ -126,7 +187,7 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
unsigned int height = mode_cmd->height / (i ? vsub : 1);
unsigned int min_size;
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[i]);
+ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
dev_err(dev->dev, "Failed to lookup GEM object\n");
ret = -ENXIO;
@@ -145,7 +206,7 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
objs[i] = to_drm_gem_cma_obj(obj);
}
- fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i);
+ fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
if (IS_ERR(fb_cma)) {
ret = PTR_ERR(fb_cma);
goto err_gem_object_unreference;
@@ -158,6 +219,21 @@ err_gem_object_unreference:
drm_gem_object_unreference_unlocked(&objs[i]->base);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
+
+/**
+ * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
+ *
+ * If your hardware has special alignment or pitch requirements these should be
+ * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
+ * you need to set &drm_framebuffer_funcs ->dirty.
+ */
+struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
+ struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ return drm_fb_cma_create_with_funcs(dev, file_priv, mode_cmd,
+ &drm_fb_cma_funcs);
+}
EXPORT_SYMBOL_GPL(drm_fb_cma_create);
/**
@@ -233,8 +309,67 @@ static struct fb_ops drm_fbdev_cma_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
-static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ fb_deferred_io_mmap(info, vma);
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ return 0;
+}
+
+static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
+ struct drm_gem_cma_object *cma_obj)
+{
+ struct fb_deferred_io *fbdefio;
+ struct fb_ops *fbops;
+
+ /*
+ * Per device structures are needed because:
+ * fbops: fb_deferred_io_cleanup() clears fbops.fb_mmap
+ * fbdefio: individual delays
+ */
+ fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
+ fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+ if (!fbdefio || !fbops) {
+ kfree(fbdefio);
+ return -ENOMEM;
+ }
+
+ /* can't be offset from vaddr since dirty() uses cma_obj */
+ fbi->screen_buffer = cma_obj->vaddr;
+ /* fb_deferred_io_fault() needs a physical address */
+ fbi->fix.smem_start = page_to_phys(virt_to_page(fbi->screen_buffer));
+
+ *fbops = *fbi->fbops;
+ fbi->fbops = fbops;
+
+ fbdefio->delay = msecs_to_jiffies(DEFAULT_FBDEFIO_DELAY_MS);
+ fbdefio->deferred_io = drm_fb_helper_deferred_io;
+ fbi->fbdefio = fbdefio;
+ fb_deferred_io_init(fbi);
+ fbi->fbops->fb_mmap = drm_fbdev_cma_deferred_io_mmap;
+
+ return 0;
+}
+
+static void drm_fbdev_cma_defio_fini(struct fb_info *fbi)
+{
+ if (!fbi->fbdefio)
+ return;
+
+ fb_deferred_io_cleanup(fbi);
+ kfree(fbi->fbdefio);
+ kfree(fbi->fbops);
+}
+
+/*
+ * For use in a (struct drm_fb_helper_funcs *)->fb_probe callback function that
+ * needs custom struct drm_framebuffer_funcs, like dirty() for deferred_io use.
+ */
+int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes,
+ const struct drm_framebuffer_funcs *funcs)
{
struct drm_fbdev_cma *fbdev_cma = to_fbdev_cma(helper);
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
@@ -270,7 +405,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
goto err_gem_free_object;
}
- fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1);
+ fbdev_cma->fb = drm_fb_cma_alloc(dev, &mode_cmd, &obj, 1, funcs);
if (IS_ERR(fbdev_cma->fb)) {
dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
ret = PTR_ERR(fbdev_cma->fb);
@@ -296,31 +431,48 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
fbi->screen_size = size;
fbi->fix.smem_len = size;
+ if (funcs->dirty) {
+ ret = drm_fbdev_cma_defio_init(fbi, obj);
+ if (ret)
+ goto err_cma_destroy;
+ }
+
return 0;
+err_cma_destroy:
+ drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
+ drm_fb_cma_destroy(&fbdev_cma->fb->fb);
err_fb_info_destroy:
drm_fb_helper_release_fbi(helper);
err_gem_free_object:
drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
+EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs);
+
+static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ return drm_fbdev_cma_create_with_funcs(helper, sizes, &drm_fb_cma_funcs);
+}
static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
.fb_probe = drm_fbdev_cma_create,
};
/**
- * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device
* @num_crtc: Number of CRTCs
* @max_conn_count: Maximum number of connectors
+ * @funcs: fb helper functions, in particular fb_probe()
*
* Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
*/
-struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int num_crtc,
- unsigned int max_conn_count)
+ unsigned int max_conn_count, const struct drm_fb_helper_funcs *funcs)
{
struct drm_fbdev_cma *fbdev_cma;
struct drm_fb_helper *helper;
@@ -334,7 +486,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
helper = &fbdev_cma->fb_helper;
- drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+ drm_fb_helper_prepare(dev, helper, funcs);
ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
if (ret < 0) {
@@ -364,6 +516,24 @@ err_free:
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(drm_fbdev_cma_init_with_funcs);
+
+/**
+ * drm_fbdev_cma_init() - Allocate and initializes a drm_fbdev_cma struct
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device
+ * @num_crtc: Number of CRTCs
+ * @max_conn_count: Maximum number of connectors
+ *
+ * Returns a newly allocated drm_fbdev_cma struct or a ERR_PTR.
+ */
+struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int num_crtc,
+ unsigned int max_conn_count)
+{
+ return drm_fbdev_cma_init_with_funcs(dev, preferred_bpp, num_crtc,
+ max_conn_count, &drm_fb_cma_helper_funcs);
+}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
/**
@@ -373,6 +543,7 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
+ drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
if (fbdev_cma->fb) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index fe4df976f..7c2eb75db 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -84,6 +84,15 @@ static LIST_HEAD(kernel_fb_helper_list);
* and set up an initial configuration using the detected hardware, drivers
* should call drm_fb_helper_single_add_all_connectors() followed by
* drm_fb_helper_initial_config().
+ *
+ * If &drm_framebuffer_funcs ->dirty is set, the
+ * drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will
+ * accumulate changes and schedule &drm_fb_helper ->dirty_work to run right
+ * away. This worker then calls the dirty() function ensuring that it will
+ * always run in process context since the fb_*() function could be running in
+ * atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
+ * callback it will also schedule dirty_work with the damage collected from the
+ * mmap page writes.
*/
/**
@@ -153,40 +162,13 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
if (!fb_helper_connector)
return -ENOMEM;
+ drm_connector_reference(connector);
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
-static void remove_from_modeset(struct drm_mode_set *set,
- struct drm_connector *connector)
-{
- int i, j;
-
- for (i = 0; i < set->num_connectors; i++) {
- if (set->connectors[i] == connector)
- break;
- }
-
- if (i == set->num_connectors)
- return;
-
- for (j = i + 1; j < set->num_connectors; j++) {
- set->connectors[j - 1] = set->connectors[j];
- }
- set->num_connectors--;
-
- /*
- * TODO maybe need to makes sure we set it back to !=NULL somewhere?
- */
- if (set->num_connectors == 0) {
- set->fb = NULL;
- drm_mode_destroy(connector->dev, set->mode);
- set->mode = NULL;
- }
-}
-
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
@@ -206,6 +188,7 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
if (i == fb_helper->connector_count)
return -EINVAL;
fb_helper_connector = fb_helper->connector_info[i];
+ drm_connector_unreference(fb_helper_connector->connector);
for (j = i + 1; j < fb_helper->connector_count; j++) {
fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
@@ -213,10 +196,6 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
fb_helper->connector_count--;
kfree(fb_helper_connector);
- /* also cleanup dangling references to the connector: */
- for (i = 0; i < fb_helper->crtc_count; i++)
- remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
-
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
@@ -626,8 +605,10 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
- for (i = 0; i < helper->connector_count; i++)
+ for (i = 0; i < helper->connector_count; i++) {
+ drm_connector_unreference(helper->connector_info[i]->connector);
kfree(helper->connector_info[i]);
+ }
kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++) {
kfree(helper->crtc_info[i].mode_set.connectors);
@@ -637,6 +618,23 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
kfree(helper->crtc_info);
}
+static void drm_fb_helper_dirty_work(struct work_struct *work)
+{
+ struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
+ dirty_work);
+ struct drm_clip_rect *clip = &helper->dirty_clip;
+ struct drm_clip_rect clip_copy;
+ unsigned long flags;
+
+ spin_lock_irqsave(&helper->dirty_lock, flags);
+ clip_copy = *clip;
+ clip->x1 = clip->y1 = ~0;
+ clip->x2 = clip->y2 = 0;
+ spin_unlock_irqrestore(&helper->dirty_lock, flags);
+
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+}
+
/**
* drm_fb_helper_prepare - setup a drm_fb_helper structure
* @dev: DRM device
@@ -650,6 +648,9 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
{
INIT_LIST_HEAD(&helper->kernel_fb_list);
+ spin_lock_init(&helper->dirty_lock);
+ INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
+ helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
helper->funcs = funcs;
helper->dev = dev;
}
@@ -834,6 +835,59 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
+ u32 width, u32 height)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_clip_rect *clip = &helper->dirty_clip;
+ unsigned long flags;
+
+ if (!helper->fb->funcs->dirty)
+ return;
+
+ spin_lock_irqsave(&helper->dirty_lock, flags);
+ clip->x1 = min_t(u32, clip->x1, x);
+ clip->y1 = min_t(u32, clip->y1, y);
+ clip->x2 = max_t(u32, clip->x2, x + width);
+ clip->y2 = max_t(u32, clip->y2, y + height);
+ spin_unlock_irqrestore(&helper->dirty_lock, flags);
+
+ schedule_work(&helper->dirty_work);
+}
+
+/**
+ * drm_fb_helper_deferred_io() - fbdev deferred_io callback function
+ * @info: fb_info struct pointer
+ * @pagelist: list of dirty mmap framebuffer pages
+ *
+ * This function is used as the &fb_deferred_io ->deferred_io
+ * callback function for flushing the fbdev mmap writes.
+ */
+void drm_fb_helper_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ unsigned long start, end, min, max;
+ struct page *page;
+ u32 y1, y2;
+
+ min = ULONG_MAX;
+ max = 0;
+ list_for_each_entry(page, pagelist, lru) {
+ start = page->index << PAGE_SHIFT;
+ end = start + PAGE_SIZE - 1;
+ min = min(min, start);
+ max = max(max, end);
+ }
+
+ if (min < max) {
+ y1 = min / info->fix.line_length;
+ y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
+ info->var.yres);
+ drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1);
+ }
+}
+EXPORT_SYMBOL(drm_fb_helper_deferred_io);
+
/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
@@ -862,7 +916,14 @@ EXPORT_SYMBOL(drm_fb_helper_sys_read);
ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
- return fb_sys_write(info, buf, count, ppos);
+ ssize_t ret;
+
+ ret = fb_sys_write(info, buf, count, ppos);
+ if (ret > 0)
+ drm_fb_helper_dirty(info, 0, 0, info->var.xres,
+ info->var.yres);
+
+ return ret;
}
EXPORT_SYMBOL(drm_fb_helper_sys_write);
@@ -877,6 +938,8 @@ void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
sys_fillrect(info, rect);
+ drm_fb_helper_dirty(info, rect->dx, rect->dy,
+ rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
@@ -891,6 +954,8 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
sys_copyarea(info, area);
+ drm_fb_helper_dirty(info, area->dx, area->dy,
+ area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
@@ -905,6 +970,8 @@ void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image)
{
sys_imageblit(info, image);
+ drm_fb_helper_dirty(info, image->dx, image->dy,
+ image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
@@ -919,6 +986,8 @@ void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
cfb_fillrect(info, rect);
+ drm_fb_helper_dirty(info, rect->dx, rect->dy,
+ rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
@@ -933,6 +1002,8 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
cfb_copyarea(info, area);
+ drm_fb_helper_dirty(info, area->dx, area->dy,
+ area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
@@ -947,6 +1018,8 @@ void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
cfb_imageblit(info, image);
+ drm_fb_helper_dirty(info, image->dx, image->dy,
+ image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
@@ -2103,8 +2176,8 @@ out:
* cmdline option.
*
* The other option is to just disable fbdev emulation since very likely the
- * first modest from userspace will crash in the same way, and is even easier to
- * debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
+ * first modeset from userspace will crash in the same way, and is even easier
+ * to debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
* kernel cmdline option.
*
* RETURNS:
@@ -2149,7 +2222,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
* hotplug interrupt).
*
* Note that drivers may call this even before calling
- * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows
+ * drm_fb_helper_initial_config but only after drm_fb_helper_init. This allows
* for a race-free fbcon setup and will make sure that the fbdev emulation will
* not miss any hotplug events.
*
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index aeef58ed3..7af7f8bcb 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -297,9 +297,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
}
mutex_unlock(&dev->master_mutex);
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->filelist_mutex);
list_add(&priv->lhead, &dev->filelist);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->filelist_mutex);
#ifdef __alpha__
/*
@@ -381,14 +381,26 @@ static void drm_events_release(struct drm_file *file_priv)
*/
static void drm_legacy_dev_reinit(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+ mutex_lock(&dev->struct_mutex);
+
+ drm_legacy_agp_clear(dev);
+
+ drm_legacy_sg_cleanup(dev);
+ drm_legacy_vma_flush(dev);
+ drm_legacy_dma_takedown(dev);
+
+ mutex_unlock(&dev->struct_mutex);
dev->sigdata.lock = NULL;
dev->context_flag = 0;
dev->last_context = 0;
dev->if_version = 0;
+
+ DRM_DEBUG("lastclose completed\n");
}
/*
@@ -400,7 +412,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
*
* \sa drm_device
*/
-int drm_lastclose(struct drm_device * dev)
+void drm_lastclose(struct drm_device * dev)
{
DRM_DEBUG("\n");
@@ -408,23 +420,8 @@ int drm_lastclose(struct drm_device * dev)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
- if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
- drm_legacy_vma_flush(dev);
- drm_legacy_dma_takedown(dev);
-
- mutex_unlock(&dev->struct_mutex);
-
- drm_legacy_dev_reinit(dev);
-
- DRM_DEBUG("lastclose completed\n");
- return 0;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_legacy_dev_reinit(dev);
}
/**
@@ -445,14 +442,16 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_file *file_priv = filp->private_data;
struct drm_minor *minor = file_priv->minor;
struct drm_device *dev = minor->dev;
- int retcode = 0;
mutex_lock(&drm_global_mutex);
DRM_DEBUG("open_count = %d\n", dev->open_count);
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->filelist_mutex);
list_del(&file_priv->lhead);
+ mutex_unlock(&dev->filelist_mutex);
+
+ mutex_lock(&dev->struct_mutex);
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
mutex_unlock(&dev->struct_mutex);
@@ -538,7 +537,7 @@ int drm_release(struct inode *inode, struct file *filp)
*/
if (!--dev->open_count) {
- retcode = drm_lastclose(dev);
+ drm_lastclose(dev);
if (drm_device_is_unplugged(dev))
drm_put_dev(dev);
}
@@ -546,7 +545,7 @@ int drm_release(struct inode *inode, struct file *filp)
drm_minor_release(minor);
- return retcode;
+ return 0;
}
EXPORT_SYMBOL(drm_release);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index e74a9e21e..ddfecb5de 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -279,7 +279,6 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
- struct drm_device *dev;
struct drm_gem_object *obj;
/* This is gross. The idr system doesn't let us try a delete and
@@ -294,18 +293,19 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
+ obj = idr_replace(&filp->object_idr, NULL, handle);
+ spin_unlock(&filp->table_lock);
+ if (IS_ERR_OR_NULL(obj))
return -EINVAL;
- }
- dev = obj->dev;
- /* Release reference and decrement refcount. */
+ /* Release driver's reference and decrement refcount. */
+ drm_gem_object_release_handle(handle, obj, filp);
+
+ /* And finally make the handle available for future allocations. */
+ spin_lock(&filp->table_lock);
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- drm_gem_object_release_handle(handle, obj, filp);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_delete);
@@ -422,6 +422,10 @@ EXPORT_SYMBOL(drm_gem_handle_create);
* @obj: obj in question
*
* This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
+ *
+ * Note that drm_gem_object_release() already calls this function, so drivers
+ * don't have to take care of releasing the mmap offset themselves when freeing
+ * the GEM object.
*/
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
@@ -445,6 +449,9 @@ EXPORT_SYMBOL(drm_gem_free_mmap_offset);
* This routine allocates and attaches a fake offset for @obj, in cases where
* the virtual size differs from the physical size (ie. obj->size). Otherwise
* just use drm_gem_create_mmap_offset().
+ *
+ * This function is idempotent and handles an already allocated mmap offset
+ * transparently. Drivers do not need to check for this case.
*/
int
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
@@ -466,6 +473,9 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
* structures.
*
* This routine allocates and attaches a fake offset for @obj.
+ *
+ * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
+ * the fake offset again.
*/
int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
@@ -578,7 +588,6 @@ EXPORT_SYMBOL(drm_gem_put_pages);
/**
* drm_gem_object_lookup - look up a GEM object from it's handle
- * @dev: DRM device
* @filp: DRM file private date
* @handle: userspace handle
*
@@ -588,8 +597,7 @@ EXPORT_SYMBOL(drm_gem_put_pages);
* otherwise.
*/
struct drm_gem_object *
-drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
- u32 handle)
+drm_gem_object_lookup(struct drm_file *filp, u32 handle)
{
struct drm_gem_object *obj;
@@ -597,12 +605,8 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
- if (obj == NULL) {
- spin_unlock(&filp->table_lock);
- return NULL;
- }
-
- drm_gem_object_reference(obj);
+ if (obj)
+ drm_gem_object_reference(obj);
spin_unlock(&filp->table_lock);
@@ -655,7 +659,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_GEM))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(file_priv, args->handle);
if (obj == NULL)
return -ENOENT;
@@ -759,6 +763,13 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_destroy(&file_private->object_idr);
}
+/**
+ * drm_gem_object_release - release GEM buffer object resources
+ * @obj: GEM buffer object
+ *
+ * This releases any structures and resources used by @obj and is the invers of
+ * drm_gem_object_init().
+ */
void
drm_gem_object_release(struct drm_gem_object *obj)
{
@@ -787,14 +798,67 @@ drm_gem_object_free(struct kref *kref)
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ if (dev->driver->gem_free_object_unlocked) {
+ dev->driver->gem_free_object_unlocked(obj);
+ } else if (dev->driver->gem_free_object) {
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
+ }
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
+ * drm_gem_object_unreference_unlocked - release a GEM BO reference
+ * @obj: GEM buffer object
+ *
+ * This releases a reference to @obj. Callers must not hold the
+ * dev->struct_mutex lock when calling this function.
+ *
+ * See also __drm_gem_object_unreference().
+ */
+void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+ struct drm_device *dev;
+
+ if (!obj)
+ return;
+
+ dev = obj->dev;
+ might_lock(&dev->struct_mutex);
+
+ if (dev->driver->gem_free_object_unlocked)
+ kref_put(&obj->refcount, drm_gem_object_free);
+ else if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
+ &dev->struct_mutex))
+ mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_object_unreference_unlocked);
+
+/**
+ * drm_gem_object_unreference - release a GEM BO reference
+ * @obj: GEM buffer object
+ *
+ * This releases a reference to @obj. Callers must hold the dev->struct_mutex
+ * lock when calling this function, even when the driver doesn't use
+ * dev->struct_mutex for anything.
+ *
+ * For drivers not encumbered with legacy locking use
+ * drm_gem_object_unreference_unlocked() instead.
+ */
+void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ if (obj) {
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+ kref_put(&obj->refcount, drm_gem_object_free);
+ }
+}
+EXPORT_SYMBOL(drm_gem_object_unreference);
+
+/**
* drm_gem_vm_open - vma->ops->open implementation for GEM
* @vma: VM area structure
*
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index d988ca0b5..1d6c33558 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -285,7 +285,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
{
struct drm_gem_object *gem_obj;
- gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
+ gem_obj = drm_gem_object_lookup(file_priv, handle);
if (!gem_obj) {
dev_err(drm->dev, "failed to lookup GEM object\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index cbb4fc0fc..5d469b2f2 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -174,7 +174,7 @@ int drm_clients_info(struct seq_file *m, void *data)
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
*/
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
struct task_struct *task;
@@ -190,7 +190,7 @@ int drm_clients_info(struct seq_file *m, void *data)
priv->magic);
rcu_read_unlock();
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->filelist_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 43cbda330..902cf6a15 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -26,7 +26,7 @@ extern unsigned int drm_timestamp_monotonic;
/* drm_fops.c */
extern struct mutex drm_global_mutex;
-int drm_lastclose(struct drm_device *dev);
+void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
int drm_pci_set_unique(struct drm_device *dev,
@@ -37,8 +37,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
/* drm_vm.c */
int drm_vma_info(struct seq_file *m, void *data);
-void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
-void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
/* drm_prime.c */
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 8ce2a0c59..b7a39771c 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -150,58 +150,6 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
}
/*
- * Get a mapping information.
- *
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_map structure.
- *
- * \return zero on success or a negative number on failure.
- *
- * Searches for the mapping with the specified offset and copies its information
- * into userspace
- */
-static int drm_getmap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_map *map = data;
- struct drm_map_list *r_list = NULL;
- struct list_head *list;
- int idx;
- int i;
-
- idx = map->offset;
- if (idx < 0)
- return -EINVAL;
-
- i = 0;
- mutex_lock(&dev->struct_mutex);
- list_for_each(list, &dev->maplist) {
- if (i == idx) {
- r_list = list_entry(list, struct drm_map_list, head);
- break;
- }
- i++;
- }
- if (!r_list || !r_list->map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- map->offset = r_list->map->offset;
- map->size = r_list->map->size;
- map->type = r_list->map->type;
- map->flags = r_list->map->flags;
- map->handle = (void *)(unsigned long) r_list->user_token;
- map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-/*
* Get client information.
*
* \param inode device inode.
@@ -558,7 +506,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 881c5a6c1..0fac801c1 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -348,9 +348,6 @@ static void vblank_disable_fn(unsigned long arg)
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
- if (!dev->vblank_disable_allowed)
- return;
-
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
@@ -437,8 +434,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
"get_vblank_timestamp == NULL\n");
}
- dev->vblank_disable_allowed = false;
-
return 0;
err:
@@ -863,10 +858,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- if (delta_ns < 0)
- etime = ktime_add_ns(etime, -delta_ns);
- else
- etime = ktime_sub_ns(etime, delta_ns);
+ etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
DRM_DEBUG_VBL("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
@@ -1588,7 +1580,6 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- dev->vblank_disable_allowed = true;
drm_reset_vblank_timestamp(dev, pipe);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 9b731786e..d3b6ee357 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -63,6 +63,8 @@ int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
#define DRM_MAP_HASH_OFFSET 0x10000000
+int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 5d0fc2644..e5e6f504d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -98,7 +98,7 @@ void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
if (!mode)
return;
- drm_mode_object_put(dev, &mode->base);
+ drm_mode_object_unregister(dev, &mode->base);
kfree(mode);
}
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 2ef988e03..3dfe3c886 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -30,12 +30,36 @@
static DEFINE_MUTEX(panel_lock);
static LIST_HEAD(panel_list);
+/**
+ * DOC: drm panel
+ *
+ * The DRM panel helpers allow drivers to register panel objects with a
+ * central registry and provide functions to retrieve those panels in display
+ * drivers.
+ */
+
+/**
+ * drm_panel_init - initialize a panel
+ * @panel: DRM panel
+ *
+ * Sets up internal fields of the panel so that it can subsequently be added
+ * to the registry.
+ */
void drm_panel_init(struct drm_panel *panel)
{
INIT_LIST_HEAD(&panel->list);
}
EXPORT_SYMBOL(drm_panel_init);
+/**
+ * drm_panel_add - add a panel to the global registry
+ * @panel: panel to add
+ *
+ * Add a panel to the global registry so that it can be looked up by display
+ * drivers.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int drm_panel_add(struct drm_panel *panel)
{
mutex_lock(&panel_lock);
@@ -46,6 +70,12 @@ int drm_panel_add(struct drm_panel *panel)
}
EXPORT_SYMBOL(drm_panel_add);
+/**
+ * drm_panel_remove - remove a panel from the global registry
+ * @panel: DRM panel
+ *
+ * Removes a panel from the global registry.
+ */
void drm_panel_remove(struct drm_panel *panel)
{
mutex_lock(&panel_lock);
@@ -54,6 +84,18 @@ void drm_panel_remove(struct drm_panel *panel)
}
EXPORT_SYMBOL(drm_panel_remove);
+/**
+ * drm_panel_attach - attach a panel to a connector
+ * @panel: DRM panel
+ * @connector: DRM connector
+ *
+ * After obtaining a pointer to a DRM panel a display driver calls this
+ * function to attach a panel to a connector.
+ *
+ * An error is returned if the panel is already attached to another connector.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
{
if (panel->connector)
@@ -66,6 +108,15 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_panel_attach);
+/**
+ * drm_panel_detach - detach a panel from a connector
+ * @panel: DRM panel
+ *
+ * Detaches a panel from the connector it is attached to. If a panel is not
+ * attached to any connector this is effectively a no-op.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
int drm_panel_detach(struct drm_panel *panel)
{
panel->connector = NULL;
@@ -76,6 +127,16 @@ int drm_panel_detach(struct drm_panel *panel)
EXPORT_SYMBOL(drm_panel_detach);
#ifdef CONFIG_OF
+/**
+ * of_drm_find_panel - look up a panel using a device tree node
+ * @np: device tree node of the panel
+ *
+ * Searches the set of registered panels for one that matches the given device
+ * tree node. If a matching panel is found, return a pointer to it.
+ *
+ * Return: A pointer to the panel registered for the specified device tree
+ * node or NULL if no panel matching the device tree node can be found.
+ */
struct drm_panel *of_drm_find_panel(struct device_node *np)
{
struct drm_panel *panel;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index a1fff1179..29d5a548d 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -250,7 +250,7 @@ void drm_pci_agp_destroy(struct drm_device *dev)
{
if (dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
- drm_agp_clear(dev);
+ drm_legacy_agp_clear(dev);
kfree(dev->agp);
dev->agp = NULL;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index df6cdc76a..aab0f3f1f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -407,7 +407,7 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct dma_buf *dmabuf;
mutex_lock(&file_priv->prime.lock);
- obj = drm_gem_object_lookup(dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
ret = -ENOENT;
goto out_unlock;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index e714b5a79..0329080d7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -264,10 +264,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
count = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
} else {
-#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
-#endif
count = (*connector_funcs->get_modes)(connector);
}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index d503f8e8c..fa7fadce8 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -208,9 +208,12 @@ static ssize_t status_show(struct device *device,
char *buf)
{
struct drm_connector *connector = to_drm_connector(device);
+ enum drm_connector_status status;
+
+ status = READ_ONCE(connector->status);
return snprintf(buf, PAGE_SIZE, "%s\n",
- drm_get_connector_status_name(connector->status));
+ drm_get_connector_status_name(status));
}
static ssize_t dpms_show(struct device *device,
@@ -231,9 +234,11 @@ static ssize_t enabled_show(struct device *device,
char *buf)
{
struct drm_connector *connector = to_drm_connector(device);
+ bool enabled;
+
+ enabled = READ_ONCE(connector->encoder);
- return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
- "disabled");
+ return snprintf(buf, PAGE_SIZE, enabled ? "enabled\n" : "disabled\n");
}
static ssize_t edid_show(struct file *filp, struct kobject *kobj,
@@ -287,102 +292,6 @@ static ssize_t modes_show(struct device *device,
return written;
}
-static ssize_t tv_subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop;
- uint64_t subconnector;
- int ret;
-
- prop = dev->mode_config.tv_subconnector_property;
- if (!prop) {
- DRM_ERROR("Unable to find subconnector property\n");
- return 0;
- }
-
- ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s",
- drm_get_tv_subconnector_name((int)subconnector));
-}
-
-static ssize_t tv_select_subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop;
- uint64_t subconnector;
- int ret;
-
- prop = dev->mode_config.tv_select_subconnector_property;
- if (!prop) {
- DRM_ERROR("Unable to find select subconnector property\n");
- return 0;
- }
-
- ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s",
- drm_get_tv_select_name((int)subconnector));
-}
-
-static ssize_t dvii_subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop;
- uint64_t subconnector;
- int ret;
-
- prop = dev->mode_config.dvi_i_subconnector_property;
- if (!prop) {
- DRM_ERROR("Unable to find subconnector property\n");
- return 0;
- }
-
- ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s",
- drm_get_dvi_i_subconnector_name((int)subconnector));
-}
-
-static ssize_t dvii_select_subconnector_show(struct device *device,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_connector *connector = to_drm_connector(device);
- struct drm_device *dev = connector->dev;
- struct drm_property *prop;
- uint64_t subconnector;
- int ret;
-
- prop = dev->mode_config.dvi_i_select_subconnector_property;
- if (!prop) {
- DRM_ERROR("Unable to find select subconnector property\n");
- return 0;
- }
-
- ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
- if (ret)
- return 0;
-
- return snprintf(buf, PAGE_SIZE, "%s",
- drm_get_dvi_i_select_name((int)subconnector));
-}
-
static DEVICE_ATTR_RW(status);
static DEVICE_ATTR_RO(enabled);
static DEVICE_ATTR_RO(dpms);
@@ -396,54 +305,6 @@ static struct attribute *connector_dev_attrs[] = {
NULL
};
-static DEVICE_ATTR_RO(tv_subconnector);
-static DEVICE_ATTR_RO(tv_select_subconnector);
-
-static struct attribute *connector_tv_dev_attrs[] = {
- &dev_attr_tv_subconnector.attr,
- &dev_attr_tv_select_subconnector.attr,
- NULL
-};
-
-static DEVICE_ATTR_RO(dvii_subconnector);
-static DEVICE_ATTR_RO(dvii_select_subconnector);
-
-static struct attribute *connector_dvii_dev_attrs[] = {
- &dev_attr_dvii_subconnector.attr,
- &dev_attr_dvii_select_subconnector.attr,
- NULL
-};
-
-/* Connector type related helpers */
-static int kobj_connector_type(struct kobject *kobj)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct drm_connector *connector = to_drm_connector(dev);
-
- return connector->connector_type;
-}
-
-static umode_t connector_is_dvii(struct kobject *kobj,
- struct attribute *attr, int idx)
-{
- return kobj_connector_type(kobj) == DRM_MODE_CONNECTOR_DVII ?
- attr->mode : 0;
-}
-
-static umode_t connector_is_tv(struct kobject *kobj,
- struct attribute *attr, int idx)
-{
- switch (kobj_connector_type(kobj)) {
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_TV:
- return attr->mode;
- }
-
- return 0;
-}
-
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
@@ -461,20 +322,8 @@ static const struct attribute_group connector_dev_group = {
.bin_attrs = connector_bin_attrs,
};
-static const struct attribute_group connector_tv_dev_group = {
- .attrs = connector_tv_dev_attrs,
- .is_visible = connector_is_tv,
-};
-
-static const struct attribute_group connector_dvii_dev_group = {
- .attrs = connector_dvii_dev_attrs,
- .is_visible = connector_is_dvii,
-};
-
static const struct attribute_group *connector_dev_groups[] = {
&connector_dev_group,
- &connector_tv_dev_group,
- &connector_dvii_dev_group,
NULL
};
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index f90bd5fe3..ac9f4b3ec 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -395,16 +395,8 @@ static const struct vm_operations_struct drm_vm_sg_ops = {
.close = drm_vm_close,
};
-/**
- * \c open method for shared virtual memory.
- *
- * \param vma virtual memory area.
- *
- * Create a new drm_vma_entry structure as the \p vma private data entry and
- * add it to drm_device::vmalist.
- */
-void drm_vm_open_locked(struct drm_device *dev,
- struct vm_area_struct *vma)
+static void drm_vm_open_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
struct drm_vma_entry *vma_entry;
@@ -429,8 +421,8 @@ static void drm_vm_open(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-void drm_vm_close_locked(struct drm_device *dev,
- struct vm_area_struct *vma)
+static void drm_vm_close_locked(struct drm_device *dev,
+ struct vm_area_struct *vma)
{
struct drm_vma_entry *pt, *temp;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index e8858985f..3d4f56df8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -314,7 +314,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
return -EINVAL;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -335,7 +335,7 @@ static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -356,7 +356,7 @@ static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -441,7 +441,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
if (!gpu)
return -ENXIO;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -497,7 +497,7 @@ static struct drm_driver etnaviv_drm_driver = {
.open = etnaviv_open,
.preclose = etnaviv_preclose,
.set_busid = drm_platform_set_busid,
- .gem_free_object = etnaviv_gem_free_object,
+ .gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 281c6eca2..df9bcbab9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -129,10 +129,9 @@ void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
/* when we start tracking the pin count, then do something here */
}
-static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
+static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
struct vm_area_struct *vma)
{
- struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
pgprot_t vm_page_prot;
vma->vm_flags &= ~VM_PFNMAP;
@@ -151,9 +150,9 @@ static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
* in particular in the case of mmap'd dmabufs)
*/
fput(vma->vm_file);
- get_file(obj->filp);
+ get_file(etnaviv_obj->base.filp);
vma->vm_pgoff = 0;
- vma->vm_file = obj->filp;
+ vma->vm_file = etnaviv_obj->base.filp;
vma->vm_page_prot = vm_page_prot;
}
@@ -173,7 +172,7 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
}
obj = to_etnaviv_bo(vma->vm_private_data);
- return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
+ return obj->ops->mmap(obj, vma);
}
int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -545,6 +544,7 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
.get_pages = etnaviv_gem_shmem_get_pages,
.release = etnaviv_gem_shmem_release,
.vmap = etnaviv_gem_vmap_impl,
+ .mmap = etnaviv_gem_mmap_obj,
};
void etnaviv_gem_free_object(struct drm_gem_object *obj)
@@ -886,10 +886,17 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
put_task_struct(etnaviv_obj->userptr.task);
}
+static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+ struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
.get_pages = etnaviv_gem_userptr_get_pages,
.release = etnaviv_gem_userptr_release,
.vmap = etnaviv_gem_vmap_impl,
+ .mmap = etnaviv_gem_userptr_mmap_obj,
};
int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 02665d8c1..e63ff116a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -79,6 +79,7 @@ struct etnaviv_gem_ops {
int (*get_pages)(struct etnaviv_gem_object *);
void (*release)(struct etnaviv_gem_object *);
void *(*vmap)(struct etnaviv_gem_object *);
+ int (*mmap)(struct etnaviv_gem_object *, struct vm_area_struct *);
};
static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 4e67395f5..b93618c1a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -84,10 +84,17 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
}
+static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
+ struct vm_area_struct *vma)
+{
+ return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
+}
+
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
/* .get_pages should never be called */
.release = etnaviv_gem_prime_release,
.vmap = etnaviv_gem_prime_vmap_impl,
+ .mmap = etnaviv_gem_prime_mmap_obj,
};
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 236ada93d..afdd55ddf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -28,11 +28,6 @@
#define BO_LOCKED 0x4000
#define BO_PINNED 0x2000
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
struct etnaviv_gpu *gpu, size_t nr)
{
@@ -347,21 +342,21 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
cmdbuf->exec_state = args->exec_state;
cmdbuf->ctx = file->driver_priv;
- ret = copy_from_user(bos, to_user_ptr(args->bos),
+ ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
args->nr_bos * sizeof(*bos));
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
- ret = copy_from_user(relocs, to_user_ptr(args->relocs),
+ ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
args->nr_relocs * sizeof(*relocs));
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
- ret = copy_from_user(stream, to_user_ptr(args->stream),
+ ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
if (ret) {
ret = -EFAULT;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 306dde18a..ff6aa5dfb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -796,9 +796,9 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
debug.state[0] == debug.state[1]) {
seq_puts(m, "seems to be stuck\n");
} else if (debug.address[0] == debug.address[1]) {
- seq_puts(m, "adress is constant\n");
+ seq_puts(m, "address is constant\n");
} else {
- seq_puts(m, "is runing\n");
+ seq_puts(m, "is running\n");
}
seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
@@ -1528,8 +1528,8 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
INIT_WORK(&gpu->recover_work, recover_worker);
init_waitqueue_head(&gpu->fence_event);
- setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
- (unsigned long)gpu);
+ setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler,
+ (unsigned long)gpu);
priv->gpu[priv->num_gpus++] = gpu;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 522cfd447..16353ee81 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+ etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index baddf33fb..d814b3048 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -71,8 +71,9 @@ config DRM_EXYNOS_DSI
This enables support for Exynos MIPI-DSI device.
config DRM_EXYNOS_DP
- bool "Display Port"
+ bool "EXYNOS specific extensions for Analogix DP driver"
depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON
+ select DRM_ANALOGIX_DP
default DRM_EXYNOS
select DRM_PANEL
help
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 23d2f9587..f663490e9 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -12,7 +12,7 @@ exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp.o
exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER) += exynos_mixer.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5245bc5e8..ac21b4000 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -28,6 +28,10 @@
#define WINDOWS_NR 3
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
+#define IFTYPE_I80 (1 << 0)
+#define I80_HW_TRG (1 << 1)
+#define IFTYPE_HDMI (1 << 2)
+
static const char * const decon_clks_name[] = {
"pclk",
"aclk_decon",
@@ -38,12 +42,6 @@ static const char * const decon_clks_name[] = {
"sclk_decon_eclk",
};
-enum decon_iftype {
- IFTYPE_RGB,
- IFTYPE_I80,
- IFTYPE_HDMI
-};
-
enum decon_flag_bits {
BIT_CLKS_ENABLED,
BIT_IRQS_ENABLED,
@@ -61,7 +59,7 @@ struct decon_context {
struct clk *clks[ARRAY_SIZE(decon_clks_name)];
int pipe;
unsigned long flags;
- enum decon_iftype out_type;
+ unsigned long out_type;
int first_win;
};
@@ -95,7 +93,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
val = VIDINTCON0_INTEN;
- if (ctx->out_type == IFTYPE_I80)
+ if (ctx->out_type & IFTYPE_I80)
val |= VIDINTCON0_FRAMEDONE;
else
val |= VIDINTCON0_INTFRMEN;
@@ -119,11 +117,11 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
static void decon_setup_trigger(struct decon_context *ctx)
{
- u32 val = (ctx->out_type != IFTYPE_HDMI)
+ u32 val = !(ctx->out_type & I80_HW_TRG)
? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN
: TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
- TRIGCON_HWTRIGMASK_I80_RGB | TRIGCON_HWTRIGEN_I80_RGB;
+ TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN;
writel(val, ctx->addr + DECON_TRIGCON);
}
@@ -136,7 +134,7 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
if (test_bit(BIT_SUSPENDED, &ctx->flags))
return;
- if (ctx->out_type == IFTYPE_HDMI) {
+ if (ctx->out_type & IFTYPE_HDMI) {
m->crtc_hsync_start = m->crtc_hdisplay + 10;
m->crtc_hsync_end = m->crtc_htotal - 92;
m->crtc_vsync_start = m->crtc_vdisplay + 1;
@@ -149,19 +147,24 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
writel(val, ctx->addr + DECON_CMU);
+ if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
+ decon_setup_trigger(ctx);
+
/* lcd on and use command if */
val = VIDOUT_LCD_ON;
- if (ctx->out_type == IFTYPE_I80)
+ if (ctx->out_type & IFTYPE_I80) {
val |= VIDOUT_COMMAND_IF;
- else
+ } else {
val |= VIDOUT_RGB_IF;
+ }
+
writel(val, ctx->addr + DECON_VIDOUTCON0);
val = VIDTCON2_LINEVAL(m->vdisplay - 1) |
VIDTCON2_HOZVAL(m->hdisplay - 1);
writel(val, ctx->addr + DECON_VIDTCON2);
- if (ctx->out_type != IFTYPE_I80) {
+ if (!(ctx->out_type & IFTYPE_I80)) {
val = VIDTCON00_VBPD_F(
m->crtc_vtotal - m->crtc_vsync_end - 1) |
VIDTCON00_VFPD_F(
@@ -183,10 +186,10 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
writel(val, ctx->addr + DECON_VIDTCON11);
}
- decon_setup_trigger(ctx);
-
/* enable output and display signal */
decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID | VIDCON0_ENVID_F, ~0);
+
+ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
@@ -300,7 +303,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
val = dma_addr + pitch * state->src.h;
writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
- if (ctx->out_type != IFTYPE_HDMI)
+ if (!(ctx->out_type & IFTYPE_HDMI))
val = BIT_VAL(pitch - state->crtc.w * bpp, 27, 14)
| BIT_VAL(state->crtc.w * bpp, 13, 0);
else
@@ -312,9 +315,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
/* window enable */
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
-
- /* standalone update */
- decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -326,15 +326,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
if (test_bit(BIT_SUSPENDED, &ctx->flags))
return;
- decon_shadow_protect_win(ctx, win, true);
-
- /* window disable */
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
-
- decon_shadow_protect_win(ctx, win, false);
-
- /* standalone update */
- decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -348,7 +340,10 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
for (i = ctx->first_win; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
- if (ctx->out_type == IFTYPE_I80)
+ /* standalone update */
+ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+
+ if (ctx->out_type & IFTYPE_I80)
set_bit(BIT_WIN_UPDATED, &ctx->flags);
}
@@ -374,7 +369,7 @@ static void decon_swreset(struct decon_context *ctx)
WARN(tries == 0, "failed to software reset DECON\n");
- if (ctx->out_type != IFTYPE_HDMI)
+ if (!(ctx->out_type & IFTYPE_HDMI))
return;
writel(VIDCON0_CLKVALUP | VIDCON0_VLCKFREE, ctx->addr + DECON_VIDCON0);
@@ -383,7 +378,6 @@ static void decon_swreset(struct decon_context *ctx)
writel(VIDCON1_VCLK_RUN_VDEN_DISABLE, ctx->addr + DECON_VIDCON1);
writel(CRCCTRL_CRCEN | CRCCTRL_CRCSTART_F | CRCCTRL_CRCCLKEN,
ctx->addr + DECON_CRCCTRL);
- decon_setup_trigger(ctx);
}
static void decon_enable(struct exynos_drm_crtc *crtc)
@@ -395,8 +389,12 @@ static void decon_enable(struct exynos_drm_crtc *crtc)
pm_runtime_get_sync(ctx->dev);
+ exynos_drm_pipe_clk_enable(crtc, true);
+
set_bit(BIT_CLKS_ENABLED, &ctx->flags);
+ decon_swreset(ctx);
+
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(BIT_IRQS_ENABLED, &ctx->flags))
decon_enable_vblank(ctx->crtc);
@@ -424,6 +422,8 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
clear_bit(BIT_CLKS_ENABLED, &ctx->flags);
+ exynos_drm_pipe_clk_enable(crtc, false);
+
pm_runtime_put_sync(ctx->dev);
set_bit(BIT_SUSPENDED, &ctx->flags);
@@ -433,13 +433,12 @@ static void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
- if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags))
+ if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags) ||
+ (ctx->out_type & I80_HW_TRG))
return;
if (test_and_clear_bit(BIT_WIN_UPDATED, &ctx->flags))
decon_set_bits(ctx, DECON_TRIGCON, TRIGCON_SWTRIGCMD, ~0);
-
- drm_crtc_handle_vblank(&ctx->crtc->base);
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -459,8 +458,10 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
decon_shadow_protect_win(ctx, win, true);
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
decon_shadow_protect_win(ctx, win, false);
- decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
+
+ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+
/* TODO: wait for possible vsync */
msleep(50);
@@ -509,7 +510,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
}
exynos_plane = &ctx->planes[ctx->first_win];
- out_type = (ctx->out_type == IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
+ out_type = (ctx->out_type & IFTYPE_HDMI) ? EXYNOS_DISPLAY_TYPE_HDMI
: EXYNOS_DISPLAY_TYPE_LCD;
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
ctx->pipe, out_type,
@@ -570,6 +571,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
/* clear */
writel(val, ctx->addr + DECON_VIDINTCON1);
+ drm_crtc_handle_vblank(&ctx->crtc->base);
}
out:
@@ -617,11 +619,11 @@ static const struct dev_pm_ops exynos5433_decon_pm_ops = {
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
{
.compatible = "samsung,exynos5433-decon",
- .data = (void *)IFTYPE_RGB
+ .data = (void *)I80_HW_TRG
},
{
.compatible = "samsung,exynos5433-decon-tv",
- .data = (void *)IFTYPE_HDMI
+ .data = (void *)(I80_HW_TRG | IFTYPE_HDMI)
},
{},
};
@@ -629,7 +631,6 @@ MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
static int exynos5433_decon_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct decon_context *ctx;
struct resource *res;
@@ -642,14 +643,13 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
__set_bit(BIT_SUSPENDED, &ctx->flags);
ctx->dev = dev;
+ ctx->out_type = (unsigned long)of_device_get_match_data(dev);
- of_id = of_match_device(exynos5433_decon_driver_dt_match, &pdev->dev);
- ctx->out_type = (enum decon_iftype)of_id->data;
-
- if (ctx->out_type == IFTYPE_HDMI)
+ if (ctx->out_type & IFTYPE_HDMI) {
ctx->first_win = 1;
- else if (of_get_child_by_name(dev->of_node, "i80-if-timings"))
- ctx->out_type = IFTYPE_I80;
+ } else if (of_get_child_by_name(dev->of_node, "i80-if-timings")) {
+ ctx->out_type |= IFTYPE_I80;
+ }
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
struct clk *clk;
@@ -674,7 +674,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
}
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
- (ctx->out_type == IFTYPE_I80) ? "lcd_sys" : "vsync");
+ (ctx->out_type & IFTYPE_I80) ? "lcd_sys" : "vsync");
if (!res) {
dev_err(dev, "cannot find IRQ resource\n");
return -ENXIO;
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 93361073a..7f9901b77 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -31,7 +31,6 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
#include "exynos_drm_iommu.h"
/*
@@ -593,7 +592,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.commit = decon_commit,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
- .wait_for_vblank = decon_wait_for_vblank,
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
new file mode 100644
index 000000000..4c1fb3f8b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -0,0 +1,312 @@
+/*
+ * Samsung SoC DP (Display Port) interface driver.
+ *
+ * Copyright (C) 2012 Samsung Electronics Co., Ltd.
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include <drm/bridge/analogix_dp.h>
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_crtc.h"
+
+#define to_dp(nm) container_of(nm, struct exynos_dp_device, nm)
+
+struct exynos_dp_device {
+ struct drm_encoder encoder;
+ struct drm_connector *connector;
+ struct drm_bridge *ptn_bridge;
+ struct drm_device *drm_dev;
+ struct device *dev;
+
+ struct videomode vm;
+ struct analogix_dp_plat_data plat_data;
+};
+
+int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data,
+ bool enable)
+{
+ struct exynos_dp_device *dp = to_dp(plat_data);
+ struct drm_encoder *encoder = &dp->encoder;
+
+ if (!encoder->crtc)
+ return -EPERM;
+
+ exynos_drm_pipe_clk_enable(to_exynos_crtc(encoder->crtc), enable);
+
+ return 0;
+}
+
+static int exynos_dp_poweron(struct analogix_dp_plat_data *plat_data)
+{
+ return exynos_dp_crtc_clock_enable(plat_data, true);
+}
+
+static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
+{
+ return exynos_dp_crtc_clock_enable(plat_data, false);
+}
+
+static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
+{
+ struct exynos_dp_device *dp = to_dp(plat_data);
+ struct drm_connector *connector = dp->connector;
+ struct drm_display_mode *mode;
+ int num_modes = 0;
+
+ if (dp->plat_data.panel)
+ return num_modes;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_ERROR("failed to create a new display mode.\n");
+ return num_modes;
+ }
+
+ drm_display_mode_from_videomode(&dp->vm, mode);
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return num_modes + 1;
+}
+
+static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
+ struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct exynos_dp_device *dp = to_dp(plat_data);
+ struct drm_encoder *encoder = &dp->encoder;
+ int ret;
+
+ drm_connector_register(connector);
+ dp->connector = connector;
+
+ /* Pre-empt DP connector creation if there's a bridge */
+ if (dp->ptn_bridge) {
+ bridge->next = dp->ptn_bridge;
+ dp->ptn_bridge->encoder = encoder;
+ ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge);
+ if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+ bridge->next = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void exynos_dp_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void exynos_dp_nop(struct drm_encoder *encoder)
+{
+ /* do nothing */
+}
+
+static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
+ .mode_set = exynos_dp_mode_set,
+ .enable = exynos_dp_nop,
+ .disable = exynos_dp_nop,
+};
+
+static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
+{
+ int ret;
+
+ ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
+ if (ret) {
+ DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct exynos_dp_device *dp = dev_get_drvdata(dev);
+ struct drm_encoder *encoder = &dp->encoder;
+ struct drm_device *drm_dev = data;
+ int pipe, ret;
+
+ /*
+ * Just like the probe function said, we don't need the
+ * device drvrate anymore, we should leave the charge to
+ * analogix dp driver, set the device drvdata to NULL.
+ */
+ dev_set_drvdata(dev, NULL);
+
+ dp->dev = dev;
+ dp->drm_dev = drm_dev;
+
+ dp->plat_data.dev_type = EXYNOS_DP;
+ dp->plat_data.power_on = exynos_dp_poweron;
+ dp->plat_data.power_off = exynos_dp_poweroff;
+ dp->plat_data.attach = exynos_dp_bridge_attach;
+ dp->plat_data.get_modes = exynos_dp_get_modes;
+
+ if (!dp->plat_data.panel && !dp->ptn_bridge) {
+ ret = exynos_dp_dt_parse_panel(dp);
+ if (ret)
+ return ret;
+ }
+
+ pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
+ EXYNOS_DISPLAY_TYPE_LCD);
+ if (pipe < 0)
+ return pipe;
+
+ encoder->possible_crtcs = 1 << pipe;
+
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
+
+ dp->plat_data.encoder = encoder;
+
+ return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
+}
+
+static void exynos_dp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ return analogix_dp_unbind(dev, master, data);
+}
+
+static const struct component_ops exynos_dp_ops = {
+ .bind = exynos_dp_bind,
+ .unbind = exynos_dp_unbind,
+};
+
+static int exynos_dp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = NULL, *endpoint = NULL;
+ struct exynos_dp_device *dp;
+
+ dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+ GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ /*
+ * We just use the drvdata until driver run into component
+ * add function, and then we would set drvdata to null, so
+ * that analogix dp driver would take charge of the drvdata.
+ */
+ platform_set_drvdata(pdev, dp);
+
+ /* This is for the backward compatibility. */
+ np = of_parse_phandle(dev->of_node, "panel", 0);
+ if (np) {
+ dp->plat_data.panel = of_drm_find_panel(np);
+ of_node_put(np);
+ if (!dp->plat_data.panel)
+ return -EPROBE_DEFER;
+ goto out;
+ }
+
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ np = of_graph_get_remote_port_parent(endpoint);
+ if (np) {
+ /* The remote port can be either a panel or a bridge */
+ dp->plat_data.panel = of_drm_find_panel(np);
+ if (!dp->plat_data.panel) {
+ dp->ptn_bridge = of_drm_find_bridge(np);
+ if (!dp->ptn_bridge) {
+ of_node_put(np);
+ return -EPROBE_DEFER;
+ }
+ }
+ of_node_put(np);
+ } else {
+ DRM_ERROR("no remote endpoint device node found.\n");
+ return -EINVAL;
+ }
+ } else {
+ DRM_ERROR("no port endpoint subnode found.\n");
+ return -EINVAL;
+ }
+
+out:
+ return component_add(&pdev->dev, &exynos_dp_ops);
+}
+
+static int exynos_dp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &exynos_dp_ops);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int exynos_dp_suspend(struct device *dev)
+{
+ return analogix_dp_suspend(dev);
+}
+
+static int exynos_dp_resume(struct device *dev)
+{
+ return analogix_dp_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops exynos_dp_pm_ops = {
+ SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
+};
+
+static const struct of_device_id exynos_dp_match[] = {
+ { .compatible = "samsung,exynos5-dp" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_dp_match);
+
+struct platform_driver dp_driver = {
+ .probe = exynos_dp_probe,
+ .remove = exynos_dp_remove,
+ .driver = {
+ .name = "exynos-dp",
+ .owner = THIS_MODULE,
+ .pm = &exynos_dp_pm_ops,
+ .of_match_table = exynos_dp_match,
+ },
+};
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung Specific Analogix-DP Driver Extension");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
deleted file mode 100644
index cff8dc788..000000000
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ /dev/null
@@ -1,1499 +0,0 @@
-/*
- * Samsung SoC DP (Display Port) interface driver.
- *
- * Copyright (C) 2012 Samsung Electronics Co., Ltd.
- * Author: Jingoo Han <jg1.han@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/of_graph.h>
-#include <linux/gpio.h>
-#include <linux/component.h>
-#include <linux/phy/phy.h>
-#include <video/of_display_timing.h>
-#include <video/of_videomode.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_panel.h>
-
-#include "exynos_dp_core.h"
-#include "exynos_drm_crtc.h"
-
-#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
- connector)
-
-static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
-{
- return to_exynos_crtc(dp->encoder.crtc);
-}
-
-static inline struct exynos_dp_device *encoder_to_dp(
- struct drm_encoder *e)
-{
- return container_of(e, struct exynos_dp_device, encoder);
-}
-
-struct bridge_init {
- struct i2c_client *client;
- struct device_node *node;
-};
-
-static void exynos_dp_init_dp(struct exynos_dp_device *dp)
-{
- exynos_dp_reset(dp);
-
- exynos_dp_swreset(dp);
-
- exynos_dp_init_analog_param(dp);
- exynos_dp_init_interrupt(dp);
-
- /* SW defined function Normal operation */
- exynos_dp_enable_sw_function(dp);
-
- exynos_dp_config_interrupt(dp);
- exynos_dp_init_analog_func(dp);
-
- exynos_dp_init_hpd(dp);
- exynos_dp_init_aux(dp);
-}
-
-static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
-{
- int timeout_loop = 0;
-
- while (exynos_dp_get_plug_in_status(dp) != 0) {
- timeout_loop++;
- if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
- dev_err(dp->dev, "failed to get hpd plug status\n");
- return -ETIMEDOUT;
- }
- usleep_range(10, 11);
- }
-
- return 0;
-}
-
-static unsigned char exynos_dp_calc_edid_check_sum(unsigned char *edid_data)
-{
- int i;
- unsigned char sum = 0;
-
- for (i = 0; i < EDID_BLOCK_LENGTH; i++)
- sum = sum + edid_data[i];
-
- return sum;
-}
-
-static int exynos_dp_read_edid(struct exynos_dp_device *dp)
-{
- unsigned char edid[EDID_BLOCK_LENGTH * 2];
- unsigned int extend_block = 0;
- unsigned char sum;
- unsigned char test_vector;
- int retval;
-
- /*
- * EDID device address is 0x50.
- * However, if necessary, you must have set upper address
- * into E-EDID in I2C device, 0x30.
- */
-
- /* Read Extension Flag, Number of 128-byte EDID extension blocks */
- retval = exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
- EDID_EXTENSION_FLAG,
- &extend_block);
- if (retval)
- return retval;
-
- if (extend_block > 0) {
- dev_dbg(dp->dev, "EDID data includes a single extension!\n");
-
- /* Read EDID data */
- retval = exynos_dp_read_bytes_from_i2c(dp, I2C_EDID_DEVICE_ADDR,
- EDID_HEADER_PATTERN,
- EDID_BLOCK_LENGTH,
- &edid[EDID_HEADER_PATTERN]);
- if (retval != 0) {
- dev_err(dp->dev, "EDID Read failed!\n");
- return -EIO;
- }
- sum = exynos_dp_calc_edid_check_sum(edid);
- if (sum != 0) {
- dev_err(dp->dev, "EDID bad checksum!\n");
- return -EIO;
- }
-
- /* Read additional EDID data */
- retval = exynos_dp_read_bytes_from_i2c(dp,
- I2C_EDID_DEVICE_ADDR,
- EDID_BLOCK_LENGTH,
- EDID_BLOCK_LENGTH,
- &edid[EDID_BLOCK_LENGTH]);
- if (retval != 0) {
- dev_err(dp->dev, "EDID Read failed!\n");
- return -EIO;
- }
- sum = exynos_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]);
- if (sum != 0) {
- dev_err(dp->dev, "EDID bad checksum!\n");
- return -EIO;
- }
-
- exynos_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST,
- &test_vector);
- if (test_vector & DP_TEST_LINK_EDID_READ) {
- exynos_dp_write_byte_to_dpcd(dp,
- DP_TEST_EDID_CHECKSUM,
- edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]);
- exynos_dp_write_byte_to_dpcd(dp,
- DP_TEST_RESPONSE,
- DP_TEST_EDID_CHECKSUM_WRITE);
- }
- } else {
- dev_info(dp->dev, "EDID data does not include any extensions.\n");
-
- /* Read EDID data */
- retval = exynos_dp_read_bytes_from_i2c(dp,
- I2C_EDID_DEVICE_ADDR,
- EDID_HEADER_PATTERN,
- EDID_BLOCK_LENGTH,
- &edid[EDID_HEADER_PATTERN]);
- if (retval != 0) {
- dev_err(dp->dev, "EDID Read failed!\n");
- return -EIO;
- }
- sum = exynos_dp_calc_edid_check_sum(edid);
- if (sum != 0) {
- dev_err(dp->dev, "EDID bad checksum!\n");
- return -EIO;
- }
-
- exynos_dp_read_byte_from_dpcd(dp,
- DP_TEST_REQUEST,
- &test_vector);
- if (test_vector & DP_TEST_LINK_EDID_READ) {
- exynos_dp_write_byte_to_dpcd(dp,
- DP_TEST_EDID_CHECKSUM,
- edid[EDID_CHECKSUM]);
- exynos_dp_write_byte_to_dpcd(dp,
- DP_TEST_RESPONSE,
- DP_TEST_EDID_CHECKSUM_WRITE);
- }
- }
-
- dev_dbg(dp->dev, "EDID Read success!\n");
- return 0;
-}
-
-static int exynos_dp_handle_edid(struct exynos_dp_device *dp)
-{
- u8 buf[12];
- int i;
- int retval;
-
- /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */
- retval = exynos_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV,
- 12, buf);
- if (retval)
- return retval;
-
- /* Read EDID */
- for (i = 0; i < 3; i++) {
- retval = exynos_dp_read_edid(dp);
- if (!retval)
- break;
- }
-
- return retval;
-}
-
-static void exynos_dp_enable_rx_to_enhanced_mode(struct exynos_dp_device *dp,
- bool enable)
-{
- u8 data;
-
- exynos_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data);
-
- if (enable)
- exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
- DP_LANE_COUNT_ENHANCED_FRAME_EN |
- DPCD_LANE_COUNT_SET(data));
- else
- exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET,
- DPCD_LANE_COUNT_SET(data));
-}
-
-static int exynos_dp_is_enhanced_mode_available(struct exynos_dp_device *dp)
-{
- u8 data;
- int retval;
-
- exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
- retval = DPCD_ENHANCED_FRAME_CAP(data);
-
- return retval;
-}
-
-static void exynos_dp_set_enhanced_mode(struct exynos_dp_device *dp)
-{
- u8 data;
-
- data = exynos_dp_is_enhanced_mode_available(dp);
- exynos_dp_enable_rx_to_enhanced_mode(dp, data);
- exynos_dp_enable_enhanced_mode(dp, data);
-}
-
-static void exynos_dp_training_pattern_dis(struct exynos_dp_device *dp)
-{
- exynos_dp_set_training_pattern(dp, DP_NONE);
-
- exynos_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- DP_TRAINING_PATTERN_DISABLE);
-}
-
-static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp,
- int pre_emphasis, int lane)
-{
- switch (lane) {
- case 0:
- exynos_dp_set_lane0_pre_emphasis(dp, pre_emphasis);
- break;
- case 1:
- exynos_dp_set_lane1_pre_emphasis(dp, pre_emphasis);
- break;
-
- case 2:
- exynos_dp_set_lane2_pre_emphasis(dp, pre_emphasis);
- break;
-
- case 3:
- exynos_dp_set_lane3_pre_emphasis(dp, pre_emphasis);
- break;
- }
-}
-
-static int exynos_dp_link_start(struct exynos_dp_device *dp)
-{
- u8 buf[4];
- int lane, lane_count, pll_tries, retval;
-
- lane_count = dp->link_train.lane_count;
-
- dp->link_train.lt_state = CLOCK_RECOVERY;
- dp->link_train.eq_loop = 0;
-
- for (lane = 0; lane < lane_count; lane++)
- dp->link_train.cr_loop[lane] = 0;
-
- /* Set link rate and count as you want to establish*/
- exynos_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
- exynos_dp_set_lane_count(dp, dp->link_train.lane_count);
-
- /* Setup RX configuration */
- buf[0] = dp->link_train.link_rate;
- buf[1] = dp->link_train.lane_count;
- retval = exynos_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET,
- 2, buf);
- if (retval)
- return retval;
-
- /* Set TX pre-emphasis to minimum */
- for (lane = 0; lane < lane_count; lane++)
- exynos_dp_set_lane_lane_pre_emphasis(dp,
- PRE_EMPHASIS_LEVEL_0, lane);
-
- /* Wait for PLL lock */
- pll_tries = 0;
- while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
- if (pll_tries == DP_TIMEOUT_LOOP_COUNT) {
- dev_err(dp->dev, "Wait for PLL lock timed out\n");
- return -ETIMEDOUT;
- }
-
- pll_tries++;
- usleep_range(90, 120);
- }
-
- /* Set training pattern 1 */
- exynos_dp_set_training_pattern(dp, TRAINING_PTN1);
-
- /* Set RX training pattern */
- retval = exynos_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1);
- if (retval)
- return retval;
-
- for (lane = 0; lane < lane_count; lane++)
- buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 |
- DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
-
- retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
- lane_count, buf);
-
- return retval;
-}
-
-static unsigned char exynos_dp_get_lane_status(u8 link_status[2], int lane)
-{
- int shift = (lane & 1) * 4;
- u8 link_value = link_status[lane>>1];
-
- return (link_value >> shift) & 0xf;
-}
-
-static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count)
-{
- int lane;
- u8 lane_status;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = exynos_dp_get_lane_status(link_status, lane);
- if ((lane_status & DP_LANE_CR_DONE) == 0)
- return -EINVAL;
- }
- return 0;
-}
-
-static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align,
- int lane_count)
-{
- int lane;
- u8 lane_status;
-
- if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0)
- return -EINVAL;
-
- for (lane = 0; lane < lane_count; lane++) {
- lane_status = exynos_dp_get_lane_status(link_status, lane);
- lane_status &= DP_CHANNEL_EQ_BITS;
- if (lane_status != DP_CHANNEL_EQ_BITS)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static unsigned char exynos_dp_get_adjust_request_voltage(u8 adjust_request[2],
- int lane)
-{
- int shift = (lane & 1) * 4;
- u8 link_value = adjust_request[lane>>1];
-
- return (link_value >> shift) & 0x3;
-}
-
-static unsigned char exynos_dp_get_adjust_request_pre_emphasis(
- u8 adjust_request[2],
- int lane)
-{
- int shift = (lane & 1) * 4;
- u8 link_value = adjust_request[lane>>1];
-
- return ((link_value >> shift) & 0xc) >> 2;
-}
-
-static void exynos_dp_set_lane_link_training(struct exynos_dp_device *dp,
- u8 training_lane_set, int lane)
-{
- switch (lane) {
- case 0:
- exynos_dp_set_lane0_link_training(dp, training_lane_set);
- break;
- case 1:
- exynos_dp_set_lane1_link_training(dp, training_lane_set);
- break;
-
- case 2:
- exynos_dp_set_lane2_link_training(dp, training_lane_set);
- break;
-
- case 3:
- exynos_dp_set_lane3_link_training(dp, training_lane_set);
- break;
- }
-}
-
-static unsigned int exynos_dp_get_lane_link_training(
- struct exynos_dp_device *dp,
- int lane)
-{
- u32 reg;
-
- switch (lane) {
- case 0:
- reg = exynos_dp_get_lane0_link_training(dp);
- break;
- case 1:
- reg = exynos_dp_get_lane1_link_training(dp);
- break;
- case 2:
- reg = exynos_dp_get_lane2_link_training(dp);
- break;
- case 3:
- reg = exynos_dp_get_lane3_link_training(dp);
- break;
- default:
- WARN_ON(1);
- return 0;
- }
-
- return reg;
-}
-
-static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp)
-{
- exynos_dp_training_pattern_dis(dp);
- exynos_dp_set_enhanced_mode(dp);
-
- dp->link_train.lt_state = FAILED;
-}
-
-static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp,
- u8 adjust_request[2])
-{
- int lane, lane_count;
- u8 voltage_swing, pre_emphasis, training_lane;
-
- lane_count = dp->link_train.lane_count;
- for (lane = 0; lane < lane_count; lane++) {
- voltage_swing = exynos_dp_get_adjust_request_voltage(
- adjust_request, lane);
- pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
- adjust_request, lane);
- training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) |
- DPCD_PRE_EMPHASIS_SET(pre_emphasis);
-
- if (voltage_swing == VOLTAGE_LEVEL_3)
- training_lane |= DP_TRAIN_MAX_SWING_REACHED;
- if (pre_emphasis == PRE_EMPHASIS_LEVEL_3)
- training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
-
- dp->link_train.training_lane[lane] = training_lane;
- }
-}
-
-static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
-{
- int lane, lane_count, retval;
- u8 voltage_swing, pre_emphasis, training_lane;
- u8 link_status[2], adjust_request[2];
-
- usleep_range(100, 101);
-
- lane_count = dp->link_train.lane_count;
-
- retval = exynos_dp_read_bytes_from_dpcd(dp,
- DP_LANE0_1_STATUS, 2, link_status);
- if (retval)
- return retval;
-
- retval = exynos_dp_read_bytes_from_dpcd(dp,
- DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
- if (retval)
- return retval;
-
- if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
- /* set training pattern 2 for EQ */
- exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
-
- retval = exynos_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- DP_LINK_SCRAMBLING_DISABLE |
- DP_TRAINING_PATTERN_2);
- if (retval)
- return retval;
-
- dev_info(dp->dev, "Link Training Clock Recovery success\n");
- dp->link_train.lt_state = EQUALIZER_TRAINING;
- } else {
- for (lane = 0; lane < lane_count; lane++) {
- training_lane = exynos_dp_get_lane_link_training(
- dp, lane);
- voltage_swing = exynos_dp_get_adjust_request_voltage(
- adjust_request, lane);
- pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis(
- adjust_request, lane);
-
- if (DPCD_VOLTAGE_SWING_GET(training_lane) ==
- voltage_swing &&
- DPCD_PRE_EMPHASIS_GET(training_lane) ==
- pre_emphasis)
- dp->link_train.cr_loop[lane]++;
-
- if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP ||
- voltage_swing == VOLTAGE_LEVEL_3 ||
- pre_emphasis == PRE_EMPHASIS_LEVEL_3) {
- dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n",
- dp->link_train.cr_loop[lane],
- voltage_swing, pre_emphasis);
- exynos_dp_reduce_link_rate(dp);
- return -EIO;
- }
- }
- }
-
- exynos_dp_get_adjust_training_lane(dp, adjust_request);
-
- for (lane = 0; lane < lane_count; lane++)
- exynos_dp_set_lane_link_training(dp,
- dp->link_train.training_lane[lane], lane);
-
- retval = exynos_dp_write_bytes_to_dpcd(dp,
- DP_TRAINING_LANE0_SET, lane_count,
- dp->link_train.training_lane);
- if (retval)
- return retval;
-
- return retval;
-}
-
-static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
-{
- int lane, lane_count, retval;
- u32 reg;
- u8 link_align, link_status[2], adjust_request[2];
-
- usleep_range(400, 401);
-
- lane_count = dp->link_train.lane_count;
-
- retval = exynos_dp_read_bytes_from_dpcd(dp,
- DP_LANE0_1_STATUS, 2, link_status);
- if (retval)
- return retval;
-
- if (exynos_dp_clock_recovery_ok(link_status, lane_count)) {
- exynos_dp_reduce_link_rate(dp);
- return -EIO;
- }
-
- retval = exynos_dp_read_bytes_from_dpcd(dp,
- DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request);
- if (retval)
- return retval;
-
- retval = exynos_dp_read_byte_from_dpcd(dp,
- DP_LANE_ALIGN_STATUS_UPDATED, &link_align);
- if (retval)
- return retval;
-
- exynos_dp_get_adjust_training_lane(dp, adjust_request);
-
- if (!exynos_dp_channel_eq_ok(link_status, link_align, lane_count)) {
- /* traing pattern Set to Normal */
- exynos_dp_training_pattern_dis(dp);
-
- dev_info(dp->dev, "Link Training success!\n");
-
- exynos_dp_get_link_bandwidth(dp, &reg);
- dp->link_train.link_rate = reg;
- dev_dbg(dp->dev, "final bandwidth = %.2x\n",
- dp->link_train.link_rate);
-
- exynos_dp_get_lane_count(dp, &reg);
- dp->link_train.lane_count = reg;
- dev_dbg(dp->dev, "final lane count = %.2x\n",
- dp->link_train.lane_count);
-
- /* set enhanced mode if available */
- exynos_dp_set_enhanced_mode(dp);
- dp->link_train.lt_state = FINISHED;
-
- return 0;
- }
-
- /* not all locked */
- dp->link_train.eq_loop++;
-
- if (dp->link_train.eq_loop > MAX_EQ_LOOP) {
- dev_err(dp->dev, "EQ Max loop\n");
- exynos_dp_reduce_link_rate(dp);
- return -EIO;
- }
-
- for (lane = 0; lane < lane_count; lane++)
- exynos_dp_set_lane_link_training(dp,
- dp->link_train.training_lane[lane], lane);
-
- retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET,
- lane_count, dp->link_train.training_lane);
-
- return retval;
-}
-
-static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp,
- u8 *bandwidth)
-{
- u8 data;
-
- /*
- * For DP rev.1.1, Maximum link rate of Main Link lanes
- * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps
- */
- exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data);
- *bandwidth = data;
-}
-
-static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp,
- u8 *lane_count)
-{
- u8 data;
-
- /*
- * For DP rev.1.1, Maximum number of Main Link lanes
- * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes
- */
- exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data);
- *lane_count = DPCD_MAX_LANE_COUNT(data);
-}
-
-static void exynos_dp_init_training(struct exynos_dp_device *dp,
- enum link_lane_count_type max_lane,
- enum link_rate_type max_rate)
-{
- /*
- * MACRO_RST must be applied after the PLL_LOCK to avoid
- * the DP inter pair skew issue for at least 10 us
- */
- exynos_dp_reset_macro(dp);
-
- /* Initialize by reading RX's DPCD */
- exynos_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate);
- exynos_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count);
-
- if ((dp->link_train.link_rate != LINK_RATE_1_62GBPS) &&
- (dp->link_train.link_rate != LINK_RATE_2_70GBPS)) {
- dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n",
- dp->link_train.link_rate);
- dp->link_train.link_rate = LINK_RATE_1_62GBPS;
- }
-
- if (dp->link_train.lane_count == 0) {
- dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n",
- dp->link_train.lane_count);
- dp->link_train.lane_count = (u8)LANE_COUNT1;
- }
-
- /* Setup TX lane count & rate */
- if (dp->link_train.lane_count > max_lane)
- dp->link_train.lane_count = max_lane;
- if (dp->link_train.link_rate > max_rate)
- dp->link_train.link_rate = max_rate;
-
- /* All DP analog module power up */
- exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
-}
-
-static int exynos_dp_sw_link_training(struct exynos_dp_device *dp)
-{
- int retval = 0, training_finished = 0;
-
- dp->link_train.lt_state = START;
-
- /* Process here */
- while (!retval && !training_finished) {
- switch (dp->link_train.lt_state) {
- case START:
- retval = exynos_dp_link_start(dp);
- if (retval)
- dev_err(dp->dev, "LT link start failed!\n");
- break;
- case CLOCK_RECOVERY:
- retval = exynos_dp_process_clock_recovery(dp);
- if (retval)
- dev_err(dp->dev, "LT CR failed!\n");
- break;
- case EQUALIZER_TRAINING:
- retval = exynos_dp_process_equalizer_training(dp);
- if (retval)
- dev_err(dp->dev, "LT EQ failed!\n");
- break;
- case FINISHED:
- training_finished = 1;
- break;
- case FAILED:
- return -EREMOTEIO;
- }
- }
- if (retval)
- dev_err(dp->dev, "eDP link training failed (%d)\n", retval);
-
- return retval;
-}
-
-static int exynos_dp_set_link_train(struct exynos_dp_device *dp,
- u32 count,
- u32 bwtype)
-{
- int i;
- int retval;
-
- for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) {
- exynos_dp_init_training(dp, count, bwtype);
- retval = exynos_dp_sw_link_training(dp);
- if (retval == 0)
- break;
-
- usleep_range(100, 110);
- }
-
- return retval;
-}
-
-static int exynos_dp_config_video(struct exynos_dp_device *dp)
-{
- int retval = 0;
- int timeout_loop = 0;
- int done_count = 0;
-
- exynos_dp_config_video_slave_mode(dp);
-
- exynos_dp_set_video_color_format(dp);
-
- if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
- dev_err(dp->dev, "PLL is not locked yet.\n");
- return -EINVAL;
- }
-
- for (;;) {
- timeout_loop++;
- if (exynos_dp_is_slave_video_stream_clock_on(dp) == 0)
- break;
- if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
- dev_err(dp->dev, "Timeout of video streamclk ok\n");
- return -ETIMEDOUT;
- }
-
- usleep_range(1, 2);
- }
-
- /* Set to use the register calculated M/N video */
- exynos_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0);
-
- /* For video bist, Video timing must be generated by register */
- exynos_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE);
-
- /* Disable video mute */
- exynos_dp_enable_video_mute(dp, 0);
-
- /* Configure video slave mode */
- exynos_dp_enable_video_master(dp, 0);
-
- timeout_loop = 0;
-
- for (;;) {
- timeout_loop++;
- if (exynos_dp_is_video_stream_on(dp) == 0) {
- done_count++;
- if (done_count > 10)
- break;
- } else if (done_count) {
- done_count = 0;
- }
- if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
- dev_err(dp->dev, "Timeout of video streamclk ok\n");
- return -ETIMEDOUT;
- }
-
- usleep_range(1000, 1001);
- }
-
- if (retval != 0)
- dev_err(dp->dev, "Video stream is not detected!\n");
-
- return retval;
-}
-
-static void exynos_dp_enable_scramble(struct exynos_dp_device *dp, bool enable)
-{
- u8 data;
-
- if (enable) {
- exynos_dp_enable_scrambling(dp);
-
- exynos_dp_read_byte_from_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- &data);
- exynos_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE));
- } else {
- exynos_dp_disable_scrambling(dp);
-
- exynos_dp_read_byte_from_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- &data);
- exynos_dp_write_byte_to_dpcd(dp,
- DP_TRAINING_PATTERN_SET,
- (u8)(data | DP_LINK_SCRAMBLING_DISABLE));
- }
-}
-
-static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
-{
- struct exynos_dp_device *dp = arg;
-
- enum dp_irq_type irq_type;
-
- irq_type = exynos_dp_get_irq_type(dp);
- switch (irq_type) {
- case DP_IRQ_TYPE_HP_CABLE_IN:
- dev_dbg(dp->dev, "Received irq - cable in\n");
- schedule_work(&dp->hotplug_work);
- exynos_dp_clear_hotplug_interrupts(dp);
- break;
- case DP_IRQ_TYPE_HP_CABLE_OUT:
- dev_dbg(dp->dev, "Received irq - cable out\n");
- exynos_dp_clear_hotplug_interrupts(dp);
- break;
- case DP_IRQ_TYPE_HP_CHANGE:
- /*
- * We get these change notifications once in a while, but there
- * is nothing we can do with them. Just ignore it for now and
- * only handle cable changes.
- */
- dev_dbg(dp->dev, "Received irq - hotplug change; ignoring.\n");
- exynos_dp_clear_hotplug_interrupts(dp);
- break;
- default:
- dev_err(dp->dev, "Received irq - unknown type!\n");
- break;
- }
- return IRQ_HANDLED;
-}
-
-static void exynos_dp_hotplug(struct work_struct *work)
-{
- struct exynos_dp_device *dp;
-
- dp = container_of(work, struct exynos_dp_device, hotplug_work);
-
- if (dp->drm_dev)
- drm_helper_hpd_irq_event(dp->drm_dev);
-}
-
-static void exynos_dp_commit(struct drm_encoder *encoder)
-{
- struct exynos_dp_device *dp = encoder_to_dp(encoder);
- int ret;
-
- /* Keep the panel disabled while we configure video */
- if (dp->panel) {
- if (drm_panel_disable(dp->panel))
- DRM_ERROR("failed to disable the panel\n");
- }
-
- ret = exynos_dp_detect_hpd(dp);
- if (ret) {
- /* Cable has been disconnected, we're done */
- return;
- }
-
- ret = exynos_dp_handle_edid(dp);
- if (ret) {
- dev_err(dp->dev, "unable to handle edid\n");
- return;
- }
-
- ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count,
- dp->video_info->link_rate);
- if (ret) {
- dev_err(dp->dev, "unable to do link train\n");
- return;
- }
-
- exynos_dp_enable_scramble(dp, 1);
- exynos_dp_enable_rx_to_enhanced_mode(dp, 1);
- exynos_dp_enable_enhanced_mode(dp, 1);
-
- exynos_dp_set_lane_count(dp, dp->video_info->lane_count);
- exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate);
-
- exynos_dp_init_video(dp);
- ret = exynos_dp_config_video(dp);
- if (ret)
- dev_err(dp->dev, "unable to config video\n");
-
- /* Safe to enable the panel now */
- if (dp->panel) {
- if (drm_panel_enable(dp->panel))
- DRM_ERROR("failed to enable the panel\n");
- }
-
- /* Enable video */
- exynos_dp_start_video(dp);
-}
-
-static enum drm_connector_status exynos_dp_detect(
- struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static void exynos_dp_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs exynos_dp_connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = exynos_dp_detect,
- .destroy = exynos_dp_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int exynos_dp_get_modes(struct drm_connector *connector)
-{
- struct exynos_dp_device *dp = ctx_from_connector(connector);
- struct drm_display_mode *mode;
-
- if (dp->panel)
- return drm_panel_get_modes(dp->panel);
-
- mode = drm_mode_create(connector->dev);
- if (!mode) {
- DRM_ERROR("failed to create a new display mode.\n");
- return 0;
- }
-
- drm_display_mode_from_videomode(&dp->vm, mode);
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
-
- return 1;
-}
-
-static struct drm_encoder *exynos_dp_best_encoder(
- struct drm_connector *connector)
-{
- struct exynos_dp_device *dp = ctx_from_connector(connector);
-
- return &dp->encoder;
-}
-
-static const struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
- .get_modes = exynos_dp_get_modes,
- .best_encoder = exynos_dp_best_encoder,
-};
-
-/* returns the number of bridges attached */
-static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
- struct drm_encoder *encoder)
-{
- int ret;
-
- encoder->bridge->next = dp->ptn_bridge;
- dp->ptn_bridge->encoder = encoder;
- ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge);
- if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
- return ret;
- }
-
- return 0;
-}
-
-static int exynos_dp_bridge_attach(struct drm_bridge *bridge)
-{
- struct exynos_dp_device *dp = bridge->driver_private;
- struct drm_encoder *encoder = &dp->encoder;
- struct drm_connector *connector = &dp->connector;
- int ret;
-
- /* Pre-empt DP connector creation if there's a bridge */
- if (dp->ptn_bridge) {
- ret = exynos_drm_attach_lcd_bridge(dp, encoder);
- if (!ret)
- return 0;
- }
-
- connector->polled = DRM_CONNECTOR_POLL_HPD;
-
- ret = drm_connector_init(dp->drm_dev, connector,
- &exynos_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
-
- drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
- drm_connector_register(connector);
- drm_mode_connector_attach_encoder(connector, encoder);
-
- if (dp->panel)
- ret = drm_panel_attach(dp->panel, &dp->connector);
-
- return ret;
-}
-
-static void exynos_dp_bridge_enable(struct drm_bridge *bridge)
-{
- struct exynos_dp_device *dp = bridge->driver_private;
- struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
-
- if (dp->dpms_mode == DRM_MODE_DPMS_ON)
- return;
-
- pm_runtime_get_sync(dp->dev);
-
- if (dp->panel) {
- if (drm_panel_prepare(dp->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return;
- }
- }
-
- if (crtc->ops->clock_enable)
- crtc->ops->clock_enable(dp_to_crtc(dp), true);
-
- phy_power_on(dp->phy);
- exynos_dp_init_dp(dp);
- enable_irq(dp->irq);
- exynos_dp_commit(&dp->encoder);
-
- dp->dpms_mode = DRM_MODE_DPMS_ON;
-}
-
-static void exynos_dp_bridge_disable(struct drm_bridge *bridge)
-{
- struct exynos_dp_device *dp = bridge->driver_private;
- struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
-
- if (dp->dpms_mode != DRM_MODE_DPMS_ON)
- return;
-
- if (dp->panel) {
- if (drm_panel_disable(dp->panel)) {
- DRM_ERROR("failed to disable the panel\n");
- return;
- }
- }
-
- disable_irq(dp->irq);
- flush_work(&dp->hotplug_work);
- phy_power_off(dp->phy);
-
- if (crtc->ops->clock_enable)
- crtc->ops->clock_enable(dp_to_crtc(dp), false);
-
- if (dp->panel) {
- if (drm_panel_unprepare(dp->panel))
- DRM_ERROR("failed to turnoff the panel\n");
- }
-
- pm_runtime_put_sync(dp->dev);
-
- dp->dpms_mode = DRM_MODE_DPMS_OFF;
-}
-
-static void exynos_dp_bridge_nop(struct drm_bridge *bridge)
-{
- /* do nothing */
-}
-
-static const struct drm_bridge_funcs exynos_dp_bridge_funcs = {
- .enable = exynos_dp_bridge_enable,
- .disable = exynos_dp_bridge_disable,
- .pre_enable = exynos_dp_bridge_nop,
- .post_disable = exynos_dp_bridge_nop,
- .attach = exynos_dp_bridge_attach,
-};
-
-static int exynos_dp_create_connector(struct drm_encoder *encoder)
-{
- struct exynos_dp_device *dp = encoder_to_dp(encoder);
- struct drm_device *drm_dev = dp->drm_dev;
- struct drm_bridge *bridge;
- int ret;
-
- bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge) {
- DRM_ERROR("failed to allocate for drm bridge\n");
- return -ENOMEM;
- }
-
- dp->bridge = bridge;
-
- encoder->bridge = bridge;
- bridge->driver_private = dp;
- bridge->encoder = encoder;
- bridge->funcs = &exynos_dp_bridge_funcs;
-
- ret = drm_bridge_attach(drm_dev, bridge);
- if (ret) {
- DRM_ERROR("failed to attach drm bridge\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void exynos_dp_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void exynos_dp_enable(struct drm_encoder *encoder)
-{
-}
-
-static void exynos_dp_disable(struct drm_encoder *encoder)
-{
-}
-
-static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
- .mode_set = exynos_dp_mode_set,
- .enable = exynos_dp_enable,
- .disable = exynos_dp_disable,
-};
-
-static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
-{
- struct device_node *dp_node = dev->of_node;
- struct video_info *dp_video_config;
-
- dp_video_config = devm_kzalloc(dev,
- sizeof(*dp_video_config), GFP_KERNEL);
- if (!dp_video_config)
- return ERR_PTR(-ENOMEM);
-
- dp_video_config->h_sync_polarity =
- of_property_read_bool(dp_node, "hsync-active-high");
-
- dp_video_config->v_sync_polarity =
- of_property_read_bool(dp_node, "vsync-active-high");
-
- dp_video_config->interlaced =
- of_property_read_bool(dp_node, "interlaced");
-
- if (of_property_read_u32(dp_node, "samsung,color-space",
- &dp_video_config->color_space)) {
- dev_err(dev, "failed to get color-space\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(dp_node, "samsung,dynamic-range",
- &dp_video_config->dynamic_range)) {
- dev_err(dev, "failed to get dynamic-range\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(dp_node, "samsung,ycbcr-coeff",
- &dp_video_config->ycbcr_coeff)) {
- dev_err(dev, "failed to get ycbcr-coeff\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(dp_node, "samsung,color-depth",
- &dp_video_config->color_depth)) {
- dev_err(dev, "failed to get color-depth\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(dp_node, "samsung,link-rate",
- &dp_video_config->link_rate)) {
- dev_err(dev, "failed to get link-rate\n");
- return ERR_PTR(-EINVAL);
- }
-
- if (of_property_read_u32(dp_node, "samsung,lane-count",
- &dp_video_config->lane_count)) {
- dev_err(dev, "failed to get lane-count\n");
- return ERR_PTR(-EINVAL);
- }
-
- return dp_video_config;
-}
-
-static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
-{
- int ret;
-
- ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
- if (ret) {
- DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm_dev = data;
- struct drm_encoder *encoder = &dp->encoder;
- struct resource *res;
- unsigned int irq_flags;
- int pipe, ret = 0;
-
- dp->dev = &pdev->dev;
- dp->dpms_mode = DRM_MODE_DPMS_OFF;
-
- dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev);
- if (IS_ERR(dp->video_info))
- return PTR_ERR(dp->video_info);
-
- dp->phy = devm_phy_get(dp->dev, "dp");
- if (IS_ERR(dp->phy)) {
- dev_err(dp->dev, "no DP phy configured\n");
- ret = PTR_ERR(dp->phy);
- if (ret) {
- /*
- * phy itself is not enabled, so we can move forward
- * assigning NULL to phy pointer.
- */
- if (ret == -ENOSYS || ret == -ENODEV)
- dp->phy = NULL;
- else
- return ret;
- }
- }
-
- if (!dp->panel && !dp->ptn_bridge) {
- ret = exynos_dp_dt_parse_panel(dp);
- if (ret)
- return ret;
- }
-
- dp->clock = devm_clk_get(&pdev->dev, "dp");
- if (IS_ERR(dp->clock)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- return PTR_ERR(dp->clock);
- }
-
- clk_prepare_enable(dp->clock);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dp->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dp->reg_base))
- return PTR_ERR(dp->reg_base);
-
- dp->hpd_gpio = of_get_named_gpio(dev->of_node, "samsung,hpd-gpio", 0);
-
- if (gpio_is_valid(dp->hpd_gpio)) {
- /*
- * Set up the hotplug GPIO from the device tree as an interrupt.
- * Simply specifying a different interrupt in the device tree
- * doesn't work since we handle hotplug rather differently when
- * using a GPIO. We also need the actual GPIO specifier so
- * that we can get the current state of the GPIO.
- */
- ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN,
- "hpd_gpio");
- if (ret) {
- dev_err(&pdev->dev, "failed to get hpd gpio\n");
- return ret;
- }
- dp->irq = gpio_to_irq(dp->hpd_gpio);
- irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
- } else {
- dp->hpd_gpio = -ENODEV;
- dp->irq = platform_get_irq(pdev, 0);
- irq_flags = 0;
- }
-
- if (dp->irq == -ENXIO) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -ENODEV;
- }
-
- INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug);
-
- ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler,
- irq_flags, "exynos-dp", dp);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
- disable_irq(dp->irq);
-
- dp->drm_dev = drm_dev;
-
- pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev,
- EXYNOS_DISPLAY_TYPE_LCD);
- if (pipe < 0)
- return pipe;
-
- encoder->possible_crtcs = 1 << pipe;
-
- DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
-
- drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
-
- drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
-
- ret = exynos_dp_create_connector(encoder);
- if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
- drm_encoder_cleanup(encoder);
- return ret;
- }
-
- return 0;
-}
-
-static void exynos_dp_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- exynos_dp_disable(&dp->encoder);
-}
-
-static const struct component_ops exynos_dp_ops = {
- .bind = exynos_dp_bind,
- .unbind = exynos_dp_unbind,
-};
-
-static int exynos_dp_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = NULL, *endpoint = NULL;
- struct exynos_dp_device *dp;
- int ret;
-
- dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
- GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, dp);
-
- /* This is for the backward compatibility. */
- np = of_parse_phandle(dev->of_node, "panel", 0);
- if (np) {
- dp->panel = of_drm_find_panel(np);
- of_node_put(np);
- if (!dp->panel)
- return -EPROBE_DEFER;
- goto out;
- }
-
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (endpoint) {
- np = of_graph_get_remote_port_parent(endpoint);
- if (np) {
- /* The remote port can be either a panel or a bridge */
- dp->panel = of_drm_find_panel(np);
- if (!dp->panel) {
- dp->ptn_bridge = of_drm_find_bridge(np);
- if (!dp->ptn_bridge) {
- of_node_put(np);
- return -EPROBE_DEFER;
- }
- }
- of_node_put(np);
- } else {
- DRM_ERROR("no remote endpoint device node found.\n");
- return -EINVAL;
- }
- } else {
- DRM_ERROR("no port endpoint subnode found.\n");
- return -EINVAL;
- }
-
-out:
- pm_runtime_enable(dev);
-
- ret = component_add(&pdev->dev, &exynos_dp_ops);
- if (ret)
- goto err_disable_pm_runtime;
-
- return ret;
-
-err_disable_pm_runtime:
- pm_runtime_disable(dev);
-
- return ret;
-}
-
-static int exynos_dp_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
- component_del(&pdev->dev, &exynos_dp_ops);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int exynos_dp_suspend(struct device *dev)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- clk_disable_unprepare(dp->clock);
-
- return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare_enable(dp->clock);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret);
- return ret;
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
-};
-
-static const struct of_device_id exynos_dp_match[] = {
- { .compatible = "samsung,exynos5-dp" },
- {},
-};
-MODULE_DEVICE_TABLE(of, exynos_dp_match);
-
-struct platform_driver dp_driver = {
- .probe = exynos_dp_probe,
- .remove = exynos_dp_remove,
- .driver = {
- .name = "exynos-dp",
- .owner = THIS_MODULE,
- .pm = &exynos_dp_pm_ops,
- .of_match_table = exynos_dp_match,
- },
-};
-
-MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung SoC DP Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
deleted file mode 100644
index b5c2d8f47..000000000
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Header file for Samsung DP (Display Port) interface driver.
- *
- * Copyright (C) 2012 Samsung Electronics Co., Ltd.
- * Author: Jingoo Han <jg1.han@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DP_CORE_H
-#define _EXYNOS_DP_CORE_H
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/exynos_drm.h>
-#include <video/videomode.h>
-
-#include "exynos_drm_drv.h"
-
-#define DP_TIMEOUT_LOOP_COUNT 100
-#define MAX_CR_LOOP 5
-#define MAX_EQ_LOOP 5
-
-enum link_rate_type {
- LINK_RATE_1_62GBPS = 0x06,
- LINK_RATE_2_70GBPS = 0x0a
-};
-
-enum link_lane_count_type {
- LANE_COUNT1 = 1,
- LANE_COUNT2 = 2,
- LANE_COUNT4 = 4
-};
-
-enum link_training_state {
- START,
- CLOCK_RECOVERY,
- EQUALIZER_TRAINING,
- FINISHED,
- FAILED
-};
-
-enum voltage_swing_level {
- VOLTAGE_LEVEL_0,
- VOLTAGE_LEVEL_1,
- VOLTAGE_LEVEL_2,
- VOLTAGE_LEVEL_3,
-};
-
-enum pre_emphasis_level {
- PRE_EMPHASIS_LEVEL_0,
- PRE_EMPHASIS_LEVEL_1,
- PRE_EMPHASIS_LEVEL_2,
- PRE_EMPHASIS_LEVEL_3,
-};
-
-enum pattern_set {
- PRBS7,
- D10_2,
- TRAINING_PTN1,
- TRAINING_PTN2,
- DP_NONE
-};
-
-enum color_space {
- COLOR_RGB,
- COLOR_YCBCR422,
- COLOR_YCBCR444
-};
-
-enum color_depth {
- COLOR_6,
- COLOR_8,
- COLOR_10,
- COLOR_12
-};
-
-enum color_coefficient {
- COLOR_YCBCR601,
- COLOR_YCBCR709
-};
-
-enum dynamic_range {
- VESA,
- CEA
-};
-
-enum pll_status {
- PLL_UNLOCKED,
- PLL_LOCKED
-};
-
-enum clock_recovery_m_value_type {
- CALCULATED_M,
- REGISTER_M
-};
-
-enum video_timing_recognition_type {
- VIDEO_TIMING_FROM_CAPTURE,
- VIDEO_TIMING_FROM_REGISTER
-};
-
-enum analog_power_block {
- AUX_BLOCK,
- CH0_BLOCK,
- CH1_BLOCK,
- CH2_BLOCK,
- CH3_BLOCK,
- ANALOG_TOTAL,
- POWER_ALL
-};
-
-enum dp_irq_type {
- DP_IRQ_TYPE_HP_CABLE_IN,
- DP_IRQ_TYPE_HP_CABLE_OUT,
- DP_IRQ_TYPE_HP_CHANGE,
- DP_IRQ_TYPE_UNKNOWN,
-};
-
-struct video_info {
- char *name;
-
- bool h_sync_polarity;
- bool v_sync_polarity;
- bool interlaced;
-
- enum color_space color_space;
- enum dynamic_range dynamic_range;
- enum color_coefficient ycbcr_coeff;
- enum color_depth color_depth;
-
- enum link_rate_type link_rate;
- enum link_lane_count_type lane_count;
-};
-
-struct link_train {
- int eq_loop;
- int cr_loop[4];
-
- u8 link_rate;
- u8 lane_count;
- u8 training_lane[4];
-
- enum link_training_state lt_state;
-};
-
-struct exynos_dp_device {
- struct drm_encoder encoder;
- struct device *dev;
- struct drm_device *drm_dev;
- struct drm_connector connector;
- struct drm_panel *panel;
- struct drm_bridge *bridge;
- struct drm_bridge *ptn_bridge;
- struct clk *clock;
- unsigned int irq;
- void __iomem *reg_base;
-
- struct video_info *video_info;
- struct link_train link_train;
- struct work_struct hotplug_work;
- struct phy *phy;
- int dpms_mode;
- int hpd_gpio;
- struct videomode vm;
-};
-
-/* exynos_dp_reg.c */
-void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_stop_video(struct exynos_dp_device *dp);
-void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
-void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
-void exynos_dp_reset(struct exynos_dp_device *dp);
-void exynos_dp_swreset(struct exynos_dp_device *dp);
-void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
-enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
-void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
- enum analog_power_block block,
- bool enable);
-void exynos_dp_init_analog_func(struct exynos_dp_device *dp);
-void exynos_dp_init_hpd(struct exynos_dp_device *dp);
-enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp);
-void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp);
-void exynos_dp_reset_aux(struct exynos_dp_device *dp);
-void exynos_dp_init_aux(struct exynos_dp_device *dp);
-int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp);
-void exynos_dp_enable_sw_function(struct exynos_dp_device *dp);
-int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp);
-int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp,
- unsigned int reg_addr,
- unsigned char data);
-int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
- unsigned int reg_addr,
- unsigned char *data);
-int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char data[]);
-int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char data[]);
-int exynos_dp_select_i2c_device(struct exynos_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr);
-int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr,
- unsigned int *data);
-int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char edid[]);
-void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype);
-void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype);
-void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count);
-void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count);
-void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_set_training_pattern(struct exynos_dp_device *dp,
- enum pattern_set pattern);
-void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level);
-void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level);
-void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level);
-void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level);
-void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp,
- u32 training_lane);
-void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp,
- u32 training_lane);
-void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp,
- u32 training_lane);
-void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp,
- u32 training_lane);
-u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp);
-u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp);
-u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp);
-u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp);
-void exynos_dp_reset_macro(struct exynos_dp_device *dp);
-void exynos_dp_init_video(struct exynos_dp_device *dp);
-
-void exynos_dp_set_video_color_format(struct exynos_dp_device *dp);
-int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp);
-void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
- enum clock_recovery_m_value_type type,
- u32 m_value,
- u32 n_value);
-void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type);
-void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable);
-void exynos_dp_start_video(struct exynos_dp_device *dp);
-int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp);
-void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp);
-void exynos_dp_enable_scrambling(struct exynos_dp_device *dp);
-void exynos_dp_disable_scrambling(struct exynos_dp_device *dp);
-
-/* I2C EDID Chip ID, Slave Address */
-#define I2C_EDID_DEVICE_ADDR 0x50
-#define I2C_E_EDID_DEVICE_ADDR 0x30
-
-#define EDID_BLOCK_LENGTH 0x80
-#define EDID_HEADER_PATTERN 0x00
-#define EDID_EXTENSION_FLAG 0x7e
-#define EDID_CHECKSUM 0x7f
-
-/* DP_MAX_LANE_COUNT */
-#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
-#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
-
-/* DP_LANE_COUNT_SET */
-#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f)
-
-/* DP_TRAINING_LANE0_SET */
-#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3)
-#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3)
-#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0)
-#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3)
-
-#endif /* _EXYNOS_DP_CORE_H */
diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c
deleted file mode 100644
index c1f87a2a9..000000000
--- a/drivers/gpu/drm/exynos/exynos_dp_reg.c
+++ /dev/null
@@ -1,1263 +0,0 @@
-/*
- * Samsung DP (Display port) register interface driver.
- *
- * Copyright (C) 2012 Samsung Electronics Co., Ltd.
- * Author: Jingoo Han <jg1.han@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-
-#include "exynos_dp_core.h"
-#include "exynos_dp_reg.h"
-
-#define COMMON_INT_MASK_1 0
-#define COMMON_INT_MASK_2 0
-#define COMMON_INT_MASK_3 0
-#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG)
-#define INT_STA_MASK INT_HPD
-
-void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable)
-{
- u32 reg;
-
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
- reg |= HDCP_VIDEO_MUTE;
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
- reg &= ~HDCP_VIDEO_MUTE;
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
- }
-}
-
-void exynos_dp_stop_video(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
- reg &= ~VIDEO_EN;
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-}
-
-void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
-{
- u32 reg;
-
- if (enable)
- reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 |
- LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3;
- else
- reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 |
- LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0;
-
- writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
-}
-
-void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = TX_TERMINAL_CTRL_50_OHM;
- writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
-
- reg = SEL_24M | TX_DVDD_BIT_1_0625V;
- writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
-
- reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
- writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
-
- reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
- TX_CUR1_2X | TX_CUR_16_MA;
- writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
-
- reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
- CH1_AMP_400_MV | CH0_AMP_400_MV;
- writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
-}
-
-void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
-{
- /* Set interrupt pin assertion polarity as high */
- writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL);
-
- /* Clear pending regisers */
- writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
- writel(0x4f, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_2);
- writel(0xe0, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_3);
- writel(0xe7, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
- writel(0x63, dp->reg_base + EXYNOS_DP_INT_STA);
-
- /* 0:mask,1: unmask */
- writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1);
- writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2);
- writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3);
- writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4);
- writel(0x00, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
-}
-
-void exynos_dp_reset(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- exynos_dp_stop_video(dp);
- exynos_dp_enable_video_mute(dp, 0);
-
- reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N |
- AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N |
- HDCP_FUNC_EN_N | SW_FUNC_EN_N;
- writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
-
- reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N |
- SERDES_FIFO_FUNC_EN_N |
- LS_CLK_DOMAIN_FUNC_EN_N;
- writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-
- usleep_range(20, 30);
-
- exynos_dp_lane_swap(dp, 0);
-
- writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
- writel(0x40, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
- writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
- writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-
- writel(0x0, dp->reg_base + EXYNOS_DP_PKT_SEND_CTL);
- writel(0x0, dp->reg_base + EXYNOS_DP_HDCP_CTL);
-
- writel(0x5e, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_L);
- writel(0x1a, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_H);
-
- writel(0x10, dp->reg_base + EXYNOS_DP_LINK_DEBUG_CTL);
-
- writel(0x0, dp->reg_base + EXYNOS_DP_PHY_TEST);
-
- writel(0x0, dp->reg_base + EXYNOS_DP_VIDEO_FIFO_THRD);
- writel(0x20, dp->reg_base + EXYNOS_DP_AUDIO_MARGIN);
-
- writel(0x4, dp->reg_base + EXYNOS_DP_M_VID_GEN_FILTER_TH);
- writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH);
-
- writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
-}
-
-void exynos_dp_swreset(struct exynos_dp_device *dp)
-{
- writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
-}
-
-void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- /* 0: mask, 1: unmask */
- reg = COMMON_INT_MASK_1;
- writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1);
-
- reg = COMMON_INT_MASK_2;
- writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2);
-
- reg = COMMON_INT_MASK_3;
- writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3);
-
- reg = COMMON_INT_MASK_4;
- writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4);
-
- reg = INT_STA_MASK;
- writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK);
-}
-
-enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL);
- if (reg & PLL_LOCK)
- return PLL_LOCKED;
- else
- return PLL_UNLOCKED;
-}
-
-void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable)
-{
- u32 reg;
-
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL);
- reg |= DP_PLL_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL);
- reg &= ~DP_PLL_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL);
- }
-}
-
-void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
- enum analog_power_block block,
- bool enable)
-{
- u32 reg;
-
- switch (block) {
- case AUX_BLOCK:
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg |= AUX_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg &= ~AUX_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- }
- break;
- case CH0_BLOCK:
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg |= CH0_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg &= ~CH0_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- }
- break;
- case CH1_BLOCK:
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg |= CH1_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg &= ~CH1_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- }
- break;
- case CH2_BLOCK:
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg |= CH2_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg &= ~CH2_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- }
- break;
- case CH3_BLOCK:
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg |= CH3_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg &= ~CH3_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- }
- break;
- case ANALOG_TOTAL:
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg |= DP_PHY_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD);
- reg &= ~DP_PHY_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- }
- break;
- case POWER_ALL:
- if (enable) {
- reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD |
- CH1_PD | CH0_PD;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD);
- } else {
- writel(0x00, dp->reg_base + EXYNOS_DP_PHY_PD);
- }
- break;
- default:
- break;
- }
-}
-
-void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
-{
- u32 reg;
- int timeout_loop = 0;
-
- exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
-
- reg = PLL_LOCK_CHG;
- writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
-
- reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL);
- reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL);
- writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
-
- /* Power up PLL */
- if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
- exynos_dp_set_pll_power_down(dp, 0);
-
- while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
- timeout_loop++;
- if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
- dev_err(dp->dev, "failed to get pll lock status\n");
- return;
- }
- usleep_range(10, 20);
- }
- }
-
- /* Enable Serdes FIFO function and Link symbol clock domain module */
- reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
- reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
- | AUX_FUNC_EN_N);
- writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-}
-
-void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- if (gpio_is_valid(dp->hpd_gpio))
- return;
-
- reg = HOTPLUG_CHG | HPD_LOST | PLUG;
- writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
-
- reg = INT_HPD;
- writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
-}
-
-void exynos_dp_init_hpd(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- if (gpio_is_valid(dp->hpd_gpio))
- return;
-
- exynos_dp_clear_hotplug_interrupts(dp);
-
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
- reg &= ~(F_HPD | HPD_CTRL);
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-}
-
-enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- if (gpio_is_valid(dp->hpd_gpio)) {
- reg = gpio_get_value(dp->hpd_gpio);
- if (reg)
- return DP_IRQ_TYPE_HP_CABLE_IN;
- else
- return DP_IRQ_TYPE_HP_CABLE_OUT;
- } else {
- /* Parse hotplug interrupt status register */
- reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4);
-
- if (reg & PLUG)
- return DP_IRQ_TYPE_HP_CABLE_IN;
-
- if (reg & HPD_LOST)
- return DP_IRQ_TYPE_HP_CABLE_OUT;
-
- if (reg & HOTPLUG_CHG)
- return DP_IRQ_TYPE_HP_CHANGE;
-
- return DP_IRQ_TYPE_UNKNOWN;
- }
-}
-
-void exynos_dp_reset_aux(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- /* Disable AUX channel module */
- reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
- reg |= AUX_FUNC_EN_N;
- writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-}
-
-void exynos_dp_init_aux(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- /* Clear inerrupts related to AUX channel */
- reg = RPLY_RECEIV | AUX_ERR;
- writel(reg, dp->reg_base + EXYNOS_DP_INT_STA);
-
- exynos_dp_reset_aux(dp);
-
- /* Disable AUX transaction H/W retry */
- reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)|
- AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL);
-
- /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */
- reg = DEFER_CTRL_EN | DEFER_COUNT(1);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_DEFER_CTL);
-
- /* Enable AUX channel module */
- reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
- reg &= ~AUX_FUNC_EN_N;
- writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2);
-}
-
-int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- if (gpio_is_valid(dp->hpd_gpio)) {
- if (gpio_get_value(dp->hpd_gpio))
- return 0;
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
- if (reg & HPD_STATUS)
- return 0;
- }
-
- return -EINVAL;
-}
-
-void exynos_dp_enable_sw_function(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1);
- reg &= ~SW_FUNC_EN_N;
- writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
-}
-
-int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp)
-{
- int reg;
- int retval = 0;
- int timeout_loop = 0;
-
- /* Enable AUX CH operation */
- reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
- reg |= AUX_EN;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
-
- /* Is AUX CH command reply received? */
- reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
- while (!(reg & RPLY_RECEIV)) {
- timeout_loop++;
- if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
- dev_err(dp->dev, "AUX CH command reply failed!\n");
- return -ETIMEDOUT;
- }
- reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
- usleep_range(10, 11);
- }
-
- /* Clear interrupt source for AUX CH command reply */
- writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA);
-
- /* Clear interrupt source for AUX CH access error */
- reg = readl(dp->reg_base + EXYNOS_DP_INT_STA);
- if (reg & AUX_ERR) {
- writel(AUX_ERR, dp->reg_base + EXYNOS_DP_INT_STA);
- return -EREMOTEIO;
- }
-
- /* Check AUX CH error access status */
- reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_STA);
- if ((reg & AUX_STATUS_MASK) != 0) {
- dev_err(dp->dev, "AUX CH error happens: %d\n\n",
- reg & AUX_STATUS_MASK);
- return -EREMOTEIO;
- }
-
- return retval;
-}
-
-int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp,
- unsigned int reg_addr,
- unsigned char data)
-{
- u32 reg;
- int i;
- int retval;
-
- for (i = 0; i < 3; i++) {
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
- /* Select DPCD device address */
- reg = AUX_ADDR_7_0(reg_addr);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
- reg = AUX_ADDR_15_8(reg_addr);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
- reg = AUX_ADDR_19_16(reg_addr);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
- /* Write data buffer */
- reg = (unsigned int)data;
- writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0);
-
- /*
- * Set DisplayPort transaction and write 1 byte
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = exynos_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
- else
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
- __func__);
- }
-
- return retval;
-}
-
-int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp,
- unsigned int reg_addr,
- unsigned char *data)
-{
- u32 reg;
- int i;
- int retval;
-
- for (i = 0; i < 3; i++) {
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
- /* Select DPCD device address */
- reg = AUX_ADDR_7_0(reg_addr);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
- reg = AUX_ADDR_15_8(reg_addr);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
- reg = AUX_ADDR_19_16(reg_addr);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
- /*
- * Set DisplayPort transaction and read 1 byte
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = exynos_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
- else
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
- __func__);
- }
-
- /* Read data buffer */
- reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0);
- *data = (unsigned char)(reg & 0xff);
-
- return retval;
-}
-
-int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char data[])
-{
- u32 reg;
- unsigned int start_offset;
- unsigned int cur_data_count;
- unsigned int cur_data_idx;
- int i;
- int retval = 0;
-
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
- start_offset = 0;
- while (start_offset < count) {
- /* Buffer size of AUX CH is 16 * 4bytes */
- if ((count - start_offset) > 16)
- cur_data_count = 16;
- else
- cur_data_count = count - start_offset;
-
- for (i = 0; i < 3; i++) {
- /* Select DPCD device address */
- reg = AUX_ADDR_7_0(reg_addr + start_offset);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
- reg = AUX_ADDR_15_8(reg_addr + start_offset);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
- reg = AUX_ADDR_19_16(reg_addr + start_offset);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
- for (cur_data_idx = 0; cur_data_idx < cur_data_count;
- cur_data_idx++) {
- reg = data[start_offset + cur_data_idx];
- writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0
- + 4 * cur_data_idx);
- }
-
- /*
- * Set DisplayPort transaction and write
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_LENGTH(cur_data_count) |
- AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = exynos_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
- else
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
- __func__);
- }
-
- start_offset += cur_data_count;
- }
-
- return retval;
-}
-
-int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char data[])
-{
- u32 reg;
- unsigned int start_offset;
- unsigned int cur_data_count;
- unsigned int cur_data_idx;
- int i;
- int retval = 0;
-
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
- start_offset = 0;
- while (start_offset < count) {
- /* Buffer size of AUX CH is 16 * 4bytes */
- if ((count - start_offset) > 16)
- cur_data_count = 16;
- else
- cur_data_count = count - start_offset;
-
- /* AUX CH Request Transaction process */
- for (i = 0; i < 3; i++) {
- /* Select DPCD device address */
- reg = AUX_ADDR_7_0(reg_addr + start_offset);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
- reg = AUX_ADDR_15_8(reg_addr + start_offset);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
- reg = AUX_ADDR_19_16(reg_addr + start_offset);
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
- /*
- * Set DisplayPort transaction and read
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_LENGTH(cur_data_count) |
- AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = exynos_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
- else
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
- __func__);
- }
-
- for (cur_data_idx = 0; cur_data_idx < cur_data_count;
- cur_data_idx++) {
- reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0
- + 4 * cur_data_idx);
- data[start_offset + cur_data_idx] =
- (unsigned char)reg;
- }
-
- start_offset += cur_data_count;
- }
-
- return retval;
-}
-
-int exynos_dp_select_i2c_device(struct exynos_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr)
-{
- u32 reg;
- int retval;
-
- /* Set EDID device address */
- reg = device_addr;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0);
- writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8);
- writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16);
-
- /* Set offset from base address of EDID device */
- writel(reg_addr, dp->reg_base + EXYNOS_DP_BUF_DATA_0);
-
- /*
- * Set I2C transaction and write address
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT |
- AUX_TX_COMM_WRITE;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = exynos_dp_start_aux_transaction(dp);
- if (retval != 0)
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__);
-
- return retval;
-}
-
-int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr,
- unsigned int *data)
-{
- u32 reg;
- int i;
- int retval;
-
- for (i = 0; i < 3; i++) {
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
- /* Select EDID device */
- retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr);
- if (retval != 0)
- continue;
-
- /*
- * Set I2C transaction and read data
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_TX_COMM_I2C_TRANSACTION |
- AUX_TX_COMM_READ;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = exynos_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
- else
- dev_dbg(dp->dev, "%s: Aux Transaction fail!\n",
- __func__);
- }
-
- /* Read data */
- if (retval == 0)
- *data = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0);
-
- return retval;
-}
-
-int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
- unsigned int device_addr,
- unsigned int reg_addr,
- unsigned int count,
- unsigned char edid[])
-{
- u32 reg;
- unsigned int i, j;
- unsigned int cur_data_idx;
- unsigned int defer = 0;
- int retval = 0;
-
- for (i = 0; i < count; i += 16) {
- for (j = 0; j < 3; j++) {
- /* Clear AUX CH data buffer */
- reg = BUF_CLR;
- writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL);
-
- /* Set normal AUX CH command */
- reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
- reg &= ~ADDR_ONLY;
- writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2);
-
- /*
- * If Rx sends defer, Tx sends only reads
- * request without sending address
- */
- if (!defer)
- retval = exynos_dp_select_i2c_device(dp,
- device_addr, reg_addr + i);
- else
- defer = 0;
-
- if (retval == 0) {
- /*
- * Set I2C transaction and write data
- * If bit 3 is 1, DisplayPort transaction.
- * If Bit 3 is 0, I2C transaction.
- */
- reg = AUX_LENGTH(16) |
- AUX_TX_COMM_I2C_TRANSACTION |
- AUX_TX_COMM_READ;
- writel(reg, dp->reg_base +
- EXYNOS_DP_AUX_CH_CTL_1);
-
- /* Start AUX transaction */
- retval = exynos_dp_start_aux_transaction(dp);
- if (retval == 0)
- break;
- else
- dev_dbg(dp->dev,
- "%s: Aux Transaction fail!\n",
- __func__);
- }
- /* Check if Rx sends defer */
- reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM);
- if (reg == AUX_RX_COMM_AUX_DEFER ||
- reg == AUX_RX_COMM_I2C_DEFER) {
- dev_err(dp->dev, "Defer: %d\n\n", reg);
- defer = 1;
- }
- }
-
- for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) {
- reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0
- + 4 * cur_data_idx);
- edid[i + cur_data_idx] = (unsigned char)reg;
- }
- }
-
- return retval;
-}
-
-void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype)
-{
- u32 reg;
-
- reg = bwtype;
- if ((bwtype == LINK_RATE_2_70GBPS) || (bwtype == LINK_RATE_1_62GBPS))
- writel(reg, dp->reg_base + EXYNOS_DP_LINK_BW_SET);
-}
-
-void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LINK_BW_SET);
- *bwtype = reg;
-}
-
-void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count)
-{
- u32 reg;
-
- reg = count;
- writel(reg, dp->reg_base + EXYNOS_DP_LANE_COUNT_SET);
-}
-
-void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LANE_COUNT_SET);
- *count = reg;
-}
-
-void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable)
-{
- u32 reg;
-
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
- reg |= ENHANCED;
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
- reg &= ~ENHANCED;
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
- }
-}
-
-void exynos_dp_set_training_pattern(struct exynos_dp_device *dp,
- enum pattern_set pattern)
-{
- u32 reg;
-
- switch (pattern) {
- case PRBS7:
- reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7;
- writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
- break;
- case D10_2:
- reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2;
- writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
- break;
- case TRAINING_PTN1:
- reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1;
- writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
- break;
- case TRAINING_PTN2:
- reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2;
- writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
- break;
- case DP_NONE:
- reg = SCRAMBLING_ENABLE |
- LINK_QUAL_PATTERN_SET_DISABLE |
- SW_TRAINING_PATTERN_SET_NORMAL;
- writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
- break;
- default:
- break;
- }
-}
-
-void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
- reg &= ~PRE_EMPHASIS_SET_MASK;
- reg |= level << PRE_EMPHASIS_SET_SHIFT;
- writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
- reg &= ~PRE_EMPHASIS_SET_MASK;
- reg |= level << PRE_EMPHASIS_SET_SHIFT;
- writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
- reg &= ~PRE_EMPHASIS_SET_MASK;
- reg |= level << PRE_EMPHASIS_SET_SHIFT;
- writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
- reg &= ~PRE_EMPHASIS_SET_MASK;
- reg |= level << PRE_EMPHASIS_SET_SHIFT;
- writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp,
- u32 training_lane)
-{
- u32 reg;
-
- reg = training_lane;
- writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp,
- u32 training_lane)
-{
- u32 reg;
-
- reg = training_lane;
- writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp,
- u32 training_lane)
-{
- u32 reg;
-
- reg = training_lane;
- writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
-}
-
-void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp,
- u32 training_lane)
-{
- u32 reg;
-
- reg = training_lane;
- writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
-}
-
-u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL);
- return reg;
-}
-
-u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL);
- return reg;
-}
-
-u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL);
- return reg;
-}
-
-u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL);
- return reg;
-}
-
-void exynos_dp_reset_macro(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_PHY_TEST);
- reg |= MACRO_RST;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
-
- /* 10 us is the minimum reset time. */
- usleep_range(10, 20);
-
- reg &= ~MACRO_RST;
- writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST);
-}
-
-void exynos_dp_init_video(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG;
- writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1);
-
- reg = 0x0;
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
-
- reg = CHA_CRI(4) | CHA_CTRL;
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
-
- reg = 0x0;
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-
- reg = VID_HRES_TH(2) | VID_VRES_TH(0);
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8);
-}
-
-void exynos_dp_set_video_color_format(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- /* Configure the input color depth, color space, dynamic range */
- reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) |
- (dp->video_info->color_depth << IN_BPC_SHIFT) |
- (dp->video_info->color_space << IN_COLOR_F_SHIFT);
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2);
-
- /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
- reg &= ~IN_YC_COEFFI_MASK;
- if (dp->video_info->ycbcr_coeff)
- reg |= IN_YC_COEFFI_ITU709;
- else
- reg |= IN_YC_COEFFI_ITU601;
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_3);
-}
-
-int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1);
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1);
-
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1);
-
- if (!(reg & DET_STA)) {
- dev_dbg(dp->dev, "Input stream clock not detected.\n");
- return -EINVAL;
- }
-
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2);
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2);
-
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2);
- dev_dbg(dp->dev, "wait SYS_CTL_2.\n");
-
- if (reg & CHA_STA) {
- dev_dbg(dp->dev, "Input stream clk is changing\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp,
- enum clock_recovery_m_value_type type,
- u32 m_value,
- u32 n_value)
-{
- u32 reg;
-
- if (type == REGISTER_M) {
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
- reg |= FIX_M_VID;
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
- reg = m_value & 0xff;
- writel(reg, dp->reg_base + EXYNOS_DP_M_VID_0);
- reg = (m_value >> 8) & 0xff;
- writel(reg, dp->reg_base + EXYNOS_DP_M_VID_1);
- reg = (m_value >> 16) & 0xff;
- writel(reg, dp->reg_base + EXYNOS_DP_M_VID_2);
-
- reg = n_value & 0xff;
- writel(reg, dp->reg_base + EXYNOS_DP_N_VID_0);
- reg = (n_value >> 8) & 0xff;
- writel(reg, dp->reg_base + EXYNOS_DP_N_VID_1);
- reg = (n_value >> 16) & 0xff;
- writel(reg, dp->reg_base + EXYNOS_DP_N_VID_2);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4);
- reg &= ~FIX_M_VID;
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4);
-
- writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_0);
- writel(0x80, dp->reg_base + EXYNOS_DP_N_VID_1);
- writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_2);
- }
-}
-
-void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type)
-{
- u32 reg;
-
- if (type == VIDEO_TIMING_FROM_CAPTURE) {
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
- reg &= ~FORMAT_SEL;
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
- reg |= FORMAT_SEL;
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
- }
-}
-
-void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable)
-{
- u32 reg;
-
- if (enable) {
- reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
- reg &= ~VIDEO_MODE_MASK;
- reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE;
- writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
- } else {
- reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
- reg &= ~VIDEO_MODE_MASK;
- reg |= VIDEO_MODE_SLAVE_MODE;
- writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
- }
-}
-
-void exynos_dp_start_video(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
- reg |= VIDEO_EN;
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1);
-}
-
-int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
- writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3);
-
- reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3);
- if (!(reg & STRM_VALID)) {
- dev_dbg(dp->dev, "Input video stream is not detected.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1);
- reg &= ~(MASTER_VID_FUNC_EN_N|SLAVE_VID_FUNC_EN_N);
- reg |= MASTER_VID_FUNC_EN_N;
- writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1);
-
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
- reg &= ~INTERACE_SCAN_CFG;
- reg |= (dp->video_info->interlaced << 2);
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
- reg &= ~VSYNC_POLARITY_CFG;
- reg |= (dp->video_info->v_sync_polarity << 1);
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-
- reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
- reg &= ~HSYNC_POLARITY_CFG;
- reg |= (dp->video_info->h_sync_polarity << 0);
- writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10);
-
- reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE;
- writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
-}
-
-void exynos_dp_enable_scrambling(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
- reg &= ~SCRAMBLING_DISABLE;
- writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-}
-
-void exynos_dp_disable_scrambling(struct exynos_dp_device *dp)
-{
- u32 reg;
-
- reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
- reg |= SCRAMBLING_DISABLE;
- writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 011211e41..edbd98ff2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -15,7 +15,6 @@
#include <drm/drmP.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
-#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index e36579c1c..785ffa6cc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -157,9 +157,8 @@ err_crtc:
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct exynos_drm_private *private = dev->dev_private;
- struct exynos_drm_crtc *exynos_crtc =
- to_exynos_crtc(private->crtc[pipe]);
+ struct exynos_drm_crtc *exynos_crtc = exynos_drm_crtc_from_pipe(dev,
+ pipe);
if (exynos_crtc->ops->enable_vblank)
return exynos_crtc->ops->enable_vblank(exynos_crtc);
@@ -169,9 +168,8 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
- struct exynos_drm_private *private = dev->dev_private;
- struct exynos_drm_crtc *exynos_crtc =
- to_exynos_crtc(private->crtc[pipe]);
+ struct exynos_drm_crtc *exynos_crtc = exynos_drm_crtc_from_pipe(dev,
+ pipe);
if (exynos_crtc->ops->disable_vblank)
exynos_crtc->ops->disable_vblank(exynos_crtc);
@@ -235,20 +233,15 @@ void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc,
unsigned long flags;
spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
e = exynos_crtc->event;
if (e && e->base.file_priv == file) {
exynos_crtc->event = NULL;
- /*
- * event will be destroyed by core part
- * so below line should be removed later with core changes
- */
- e->base.destroy(&e->base);
- /*
- * event_space will be increased by core part
- * so below line should be removed later with core changes.
- */
- file->event_space += sizeof(e->event);
atomic_dec(&exynos_crtc->pending_update);
}
+
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ if (e && e->base.file_priv == file)
+ drm_event_cancel_free(crtc->dev, &e->base);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 75e570f45..5e38e749a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -15,6 +15,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_atomic_helper.h>
+#include <linux/of_graph.h>
#include <linux/regulator/consumer.h>
#include <video/of_videomode.h>
@@ -164,67 +165,6 @@ static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-/* of_* functions will be removed after merge of of_graph patches */
-static struct device_node *
-of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
-{
- struct device_node *np;
-
- for_each_child_of_node(parent, np) {
- u32 r;
-
- if (!np->name || of_node_cmp(np->name, name))
- continue;
-
- if (of_property_read_u32(np, "reg", &r) < 0)
- r = 0;
-
- if (reg == r)
- break;
- }
-
- return np;
-}
-
-static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
- u32 reg)
-{
- struct device_node *ports, *port;
-
- ports = of_get_child_by_name(parent, "ports");
- if (ports)
- parent = ports;
-
- port = of_get_child_by_name_reg(parent, "port", reg);
-
- of_node_put(ports);
-
- return port;
-}
-
-static struct device_node *
-of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
-{
- return of_get_child_by_name_reg(port, "endpoint", reg);
-}
-
-static struct device_node *
-of_graph_get_remote_port_parent(const struct device_node *node)
-{
- struct device_node *np;
- unsigned int depth;
-
- np = of_parse_phandle(node, "remote-endpoint", 0);
-
- /* Walk 3 levels up only if there is 'ports' node. */
- for (depth = 3; depth && np; depth--) {
- np = of_get_next_parent(np);
- if (depth == 2 && of_node_cmp(np->name, "ports"))
- break;
- }
- return np;
-}
-
enum {
FIMD_PORT_IN0,
FIMD_PORT_IN1,
@@ -237,12 +177,7 @@ static struct device_node *exynos_dpi_of_find_panel_node(struct device *dev)
{
struct device_node *np, *ep;
- np = of_graph_get_port_by_reg(dev->of_node, FIMD_PORT_RGB);
- if (!np)
- return NULL;
-
- ep = of_graph_get_endpoint_by_reg(np, 0);
- of_node_put(np);
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, FIMD_PORT_RGB, 0);
if (!ep)
return NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 5344940c8..2dd820e23 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -212,13 +212,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
*/
dev->irq_enabled = true;
- /*
- * with vblank_disable_allowed = true, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(after drm_vblank_put function is called)
- */
- dev->vblank_disable_allowed = true;
-
/* init kms poll for handling hpd */
drm_kms_helper_poll_init(dev);
@@ -270,7 +263,7 @@ static int commit_is_pending(struct exynos_drm_private *priv, u32 crtcs)
}
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
- bool async)
+ bool nonblock)
{
struct exynos_drm_private *priv = dev->dev_private;
struct exynos_atomic_commit *commit;
@@ -308,7 +301,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
drm_atomic_helper_swap_state(dev, state);
- if (async)
+ if (nonblock)
schedule_work(&commit->work);
else
exynos_atomic_commit_complete(commit);
@@ -418,7 +411,7 @@ static struct drm_driver exynos_drm_driver = {
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
- .gem_free_object = exynos_drm_gem_free_object,
+ .gem_free_object_unlocked = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
@@ -431,6 +424,7 @@ static struct drm_driver exynos_drm_driver = {
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
.gem_prime_vmap = exynos_drm_gem_prime_vmap,
.gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
+ .gem_prime_mmap = exynos_drm_gem_prime_mmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 502f750ba..cc33ec929 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -120,8 +120,6 @@ struct exynos_drm_plane_config {
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
- * @wait_for_vblank: wait for vblank interrupt to make sure that
- * hardware overlay is updated.
* @atomic_check: validate state
* @atomic_begin: prepare device to receive an update
* @atomic_flush: mark the end of device update
@@ -129,10 +127,6 @@ struct exynos_drm_plane_config {
* @disable_plane: disable hardware specific overlay.
* @te_handler: trigger to transfer video image at the tearing effect
* synchronization signal if there is a page flip request.
- * @clock_enable: optional function enabling/disabling display domain clock,
- * called from exynos-dp driver before powering up (with
- * 'enable' argument as true) and after powering down (with
- * 'enable' as false).
*/
struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
@@ -141,7 +135,6 @@ struct exynos_drm_crtc_ops {
void (*commit)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
- void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
int (*atomic_check)(struct exynos_drm_crtc *crtc,
struct drm_crtc_state *state);
void (*atomic_begin)(struct exynos_drm_crtc *crtc);
@@ -151,7 +144,10 @@ struct exynos_drm_crtc_ops {
struct exynos_drm_plane *plane);
void (*atomic_flush)(struct exynos_drm_crtc *crtc);
void (*te_handler)(struct exynos_drm_crtc *crtc);
- void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
+};
+
+struct exynos_drm_clk {
+ void (*enable)(struct exynos_drm_clk *clk, bool enable);
};
/*
@@ -182,8 +178,16 @@ struct exynos_drm_crtc {
atomic_t pending_update;
const struct exynos_drm_crtc_ops *ops;
void *ctx;
+ struct exynos_drm_clk *pipe_clk;
};
+static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
+ bool enable)
+{
+ if (crtc->pipe_clk)
+ crtc->pipe_clk->enable(crtc->pipe_clk, enable);
+}
+
struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
@@ -232,6 +236,14 @@ struct exynos_drm_private {
wait_queue_head_t wait;
};
+static inline struct exynos_drm_crtc *
+exynos_drm_crtc_from_pipe(struct drm_device *dev, int pipe)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ return to_exynos_crtc(private->crtc[pipe]);
+}
+
static inline struct device *to_dma_dev(struct drm_device *dev)
{
struct exynos_drm_private *priv = dev->dev_private;
@@ -296,7 +308,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev,
#endif
int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
- bool async);
+ bool nonblock);
extern struct platform_driver fimd_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 63c84a106..601ecf800 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -280,7 +280,7 @@ struct exynos_dsi {
spinlock_t transfer_lock; /* protects transfer_list */
struct list_head transfer_list;
- struct exynos_dsi_driver_data *driver_data;
+ const struct exynos_dsi_driver_data *driver_data;
struct device_node *bridge_node;
};
@@ -532,15 +532,6 @@ static const struct of_device_id exynos_dsi_of_match[] = {
{ }
};
-static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data(
- struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(exynos_dsi_of_match, &pdev->dev);
-
- return (struct exynos_dsi_driver_data *)of_id->data;
-}
-
static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
{
if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
@@ -564,7 +555,7 @@ static void exynos_dsi_reset(struct exynos_dsi *dsi)
static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
{
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
unsigned long best_freq = 0;
u32 min_delta = 0xffffffff;
u8 p_min, p_max;
@@ -618,7 +609,7 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
unsigned long freq)
{
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
unsigned long fin, fout;
int timeout;
u8 p, s;
@@ -712,7 +703,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
{
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
const unsigned int *reg_values = driver_data->reg_values;
u32 reg;
@@ -790,7 +781,7 @@ static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane)
static int exynos_dsi_init_link(struct exynos_dsi *dsi)
{
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
int timeout;
u32 reg;
u32 lanes_mask;
@@ -1334,7 +1325,7 @@ static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
static int exynos_dsi_init(struct exynos_dsi *dsi)
{
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
exynos_dsi_reset(dsi);
exynos_dsi_enable_irq(dsi);
@@ -1641,50 +1632,6 @@ static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
-/* of_* functions will be removed after merge of of_graph patches */
-static struct device_node *
-of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
-{
- struct device_node *np;
-
- for_each_child_of_node(parent, np) {
- u32 r;
-
- if (!np->name || of_node_cmp(np->name, name))
- continue;
-
- if (of_property_read_u32(np, "reg", &r) < 0)
- r = 0;
-
- if (reg == r)
- break;
- }
-
- return np;
-}
-
-static struct device_node *of_graph_get_port_by_reg(struct device_node *parent,
- u32 reg)
-{
- struct device_node *ports, *port;
-
- ports = of_get_child_by_name(parent, "ports");
- if (ports)
- parent = ports;
-
- port = of_get_child_by_name_reg(parent, "port", reg);
-
- of_node_put(ports);
-
- return port;
-}
-
-static struct device_node *
-of_graph_get_endpoint_by_reg(struct device_node *port, u32 reg)
-{
- return of_get_child_by_name_reg(port, "endpoint", reg);
-}
-
static int exynos_dsi_of_read_u32(const struct device_node *np,
const char *propname, u32 *out_value)
{
@@ -1706,7 +1653,7 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
{
struct device *dev = dsi->dev;
struct device_node *node = dev->of_node;
- struct device_node *port, *ep;
+ struct device_node *ep;
int ret;
ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency",
@@ -1714,16 +1661,9 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
- port = of_graph_get_port_by_reg(node, DSI_PORT_OUT);
- if (!port) {
- dev_err(dev, "no output port specified\n");
- return -EINVAL;
- }
-
- ep = of_graph_get_endpoint_by_reg(port, 0);
- of_node_put(port);
+ ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0);
if (!ep) {
- dev_err(dev, "no endpoint specified in output port\n");
+ dev_err(dev, "no output port with endpoint specified\n");
return -EINVAL;
}
@@ -1833,7 +1773,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->dsi_host.dev = dev;
dsi->dev = dev;
- dsi->driver_data = exynos_dsi_get_driver_data(pdev);
+ dsi->driver_data = of_device_get_match_data(dev);
ret = exynos_dsi_parse_dt(dsi);
if (ret)
@@ -1917,7 +1857,7 @@ static int __maybe_unused exynos_dsi_suspend(struct device *dev)
{
struct drm_encoder *encoder = dev_get_drvdata(dev);
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
int ret, i;
usleep_range(10000, 20000);
@@ -1948,7 +1888,7 @@ static int __maybe_unused exynos_dsi_resume(struct device *dev)
{
struct drm_encoder *encoder = dev_get_drvdata(dev);
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
- struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ const struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
int ret, i;
ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 81cc5537c..e0166403b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -97,20 +97,9 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
&exynos_fb->exynos_gem[0]->base, handle);
}
-static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
- struct drm_file *file_priv, unsigned flags,
- unsigned color, struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- /* TODO */
-
- return 0;
-}
-
static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.destroy = exynos_drm_fb_destroy,
.create_handle = exynos_drm_fb_create_handle,
- .dirty = exynos_drm_fb_dirty,
};
struct drm_framebuffer *
@@ -163,8 +152,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
int ret;
for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
- obj = drm_gem_object_lookup(dev, file_priv,
- mode_cmd->handles[i]);
+ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
DRM_ERROR("failed to lookup gem object\n");
ret = -ENOENT;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 72d7c0b7c..67dcd6831 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -138,8 +138,6 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- mutex_lock(&dev->struct_mutex);
-
size = mode_cmd.pitches[0] * mode_cmd.height;
exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
@@ -154,10 +152,8 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
size);
}
- if (IS_ERR(exynos_gem)) {
- ret = PTR_ERR(exynos_gem);
- goto out;
- }
+ if (IS_ERR(exynos_gem))
+ return PTR_ERR(exynos_gem);
exynos_fbdev->exynos_gem = exynos_gem;
@@ -173,7 +169,6 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
if (ret < 0)
goto err_destroy_framebuffer;
- mutex_unlock(&dev->struct_mutex);
return ret;
err_destroy_framebuffer:
@@ -181,13 +176,12 @@ err_destroy_framebuffer:
err_destroy_gem:
exynos_drm_gem_destroy(exynos_gem);
-/*
- * if failed, all resources allocated above would be released by
- * drm_mode_config_cleanup() when drm_load() had been called prior
- * to any specific driver such as fimd or hdmi driver.
- */
-out:
- mutex_unlock(&dev->struct_mutex);
+ /*
+ * if failed, all resources allocated above would be released by
+ * drm_mode_config_cleanup() when drm_load() had been called prior
+ * to any specific driver such as fimd or hdmi driver.
+ */
+
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 018449f8d..d47216488 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -30,7 +30,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
@@ -68,10 +67,15 @@
/* color key value register for hardware window 1 ~ 4. */
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
-/* I80 / RGB trigger control register */
+/* I80 trigger control register */
#define TRIGCON 0x1A4
-#define TRGMODE_I80_RGB_ENABLE_I80 (1 << 0)
-#define SWTRGCMD_I80_RGB_ENABLE (1 << 1)
+#define TRGMODE_ENABLE (1 << 0)
+#define SWTRGCMD_ENABLE (1 << 1)
+/* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */
+#define HWTRGEN_ENABLE (1 << 3)
+#define HWTRGMASK_ENABLE (1 << 4)
+/* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */
+#define HWTRIGEN_PER_ENABLE (1 << 31)
/* display mode change control register except exynos4 */
#define VIDOUT_CON 0x000
@@ -89,12 +93,16 @@
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
+/* HW trigger flag on i80 panel. */
+#define I80_HW_TRG (1 << 1)
+
struct fimd_driver_data {
unsigned int timing_base;
unsigned int lcdblk_offset;
unsigned int lcdblk_vt_shift;
unsigned int lcdblk_bypass_shift;
unsigned int lcdblk_mic_bypass_shift;
+ unsigned int trg_type;
unsigned int has_shadowcon:1;
unsigned int has_clksel:1;
@@ -102,6 +110,9 @@ struct fimd_driver_data {
unsigned int has_vidoutcon:1;
unsigned int has_vtsel:1;
unsigned int has_mic_bypass:1;
+ unsigned int has_dp_clk:1;
+ unsigned int has_hw_trigger:1;
+ unsigned int has_trigger_per_te:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -114,8 +125,10 @@ static struct fimd_driver_data exynos3_fimd_driver_data = {
.timing_base = 0x20000,
.lcdblk_offset = 0x210,
.lcdblk_bypass_shift = 1,
+ .trg_type = I80_HW_TRG,
.has_shadowcon = 1,
.has_vidoutcon = 1,
+ .has_trigger_per_te = 1,
};
static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -132,9 +145,11 @@ static struct fimd_driver_data exynos4415_fimd_driver_data = {
.lcdblk_offset = 0x210,
.lcdblk_vt_shift = 10,
.lcdblk_bypass_shift = 1,
+ .trg_type = I80_HW_TRG,
.has_shadowcon = 1,
.has_vidoutcon = 1,
.has_vtsel = 1,
+ .has_trigger_per_te = 1,
};
static struct fimd_driver_data exynos5_fimd_driver_data = {
@@ -145,6 +160,7 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
.has_shadowcon = 1,
.has_vidoutcon = 1,
.has_vtsel = 1,
+ .has_dp_clk = 1,
};
static struct fimd_driver_data exynos5420_fimd_driver_data = {
@@ -157,6 +173,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
.has_vidoutcon = 1,
.has_vtsel = 1,
.has_mic_bypass = 1,
+ .has_dp_clk = 1,
};
struct fimd_context {
@@ -182,8 +199,9 @@ struct fimd_context {
atomic_t win_updated;
atomic_t triggering;
- struct fimd_driver_data *driver_data;
+ const struct fimd_driver_data *driver_data;
struct drm_encoder *encoder;
+ struct exynos_drm_clk dp_clk;
};
static const struct of_device_id fimd_driver_dt_match[] = {
@@ -219,15 +237,6 @@ static const uint32_t fimd_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static inline struct fimd_driver_data *drm_fimd_get_driver_data(
- struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(fimd_driver_dt_match, &pdev->dev);
-
- return (struct fimd_driver_data *)of_id->data;
-}
-
static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -383,9 +392,16 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
const struct drm_display_mode *mode)
{
- unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+ unsigned long ideal_clk;
u32 clkdiv;
+ if (mode->clock == 0) {
+ DRM_ERROR("Mode has zero clock value.\n");
+ return 0xff;
+ }
+
+ ideal_clk = mode->clock * 1000;
+
if (ctx->i80_if) {
/*
* The frame done interrupt should be occurred prior to the
@@ -400,11 +416,31 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
+static void fimd_setup_trigger(struct fimd_context *ctx)
+{
+ void __iomem *timing_base = ctx->regs + ctx->driver_data->timing_base;
+ u32 trg_type = ctx->driver_data->trg_type;
+ u32 val = readl(timing_base + TRIGCON);
+
+ val &= ~(TRGMODE_ENABLE);
+
+ if (trg_type == I80_HW_TRG) {
+ if (ctx->driver_data->has_hw_trigger)
+ val |= HWTRGEN_ENABLE | HWTRGMASK_ENABLE;
+ if (ctx->driver_data->has_trigger_per_te)
+ val |= HWTRIGEN_PER_ENABLE;
+ } else {
+ val |= TRGMODE_ENABLE;
+ }
+
+ writel(val, timing_base + TRIGCON);
+}
+
static void fimd_commit(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
- struct fimd_driver_data *driver_data = ctx->driver_data;
+ const struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
u32 val, clkdiv;
@@ -495,6 +531,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
VIDTCON2_HOZVAL_E(mode->hdisplay - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
+ fimd_setup_trigger(ctx);
+
/*
* fields of register with prefix '_F' would be updated
* at vsync(same as dma start)
@@ -827,7 +865,7 @@ static void fimd_disable(struct exynos_drm_crtc *crtc)
static void fimd_trigger(struct device *dev)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
- struct fimd_driver_data *driver_data = ctx->driver_data;
+ const struct fimd_driver_data *driver_data = ctx->driver_data;
void *timing_base = ctx->regs + driver_data->timing_base;
u32 reg;
@@ -842,7 +880,7 @@ static void fimd_trigger(struct device *dev)
atomic_set(&ctx->triggering, 1);
reg = readl(timing_base + TRIGCON);
- reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
+ reg |= (TRGMODE_ENABLE | SWTRGCMD_ENABLE);
writel(reg, timing_base + TRIGCON);
/*
@@ -856,11 +894,15 @@ static void fimd_trigger(struct device *dev)
static void fimd_te_handler(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
+ u32 trg_type = ctx->driver_data->trg_type;
/* Checks the crtc is detached already from encoder */
if (ctx->pipe < 0 || !ctx->drm_dev)
return;
+ if (trg_type == I80_HW_TRG)
+ goto out;
+
/*
* If there is a page flip request, triggers and handles the page flip
* event so that current fb can be updated into panel GRAM.
@@ -868,6 +910,7 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
if (atomic_add_unless(&ctx->win_updated, -1, 0))
fimd_trigger(ctx->dev);
+out:
/* Wakes up vsync event queue */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
@@ -878,21 +921,11 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
drm_crtc_handle_vblank(&ctx->crtc->base);
}
-static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable)
{
- struct fimd_context *ctx = crtc->ctx;
- u32 val;
-
- /*
- * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
- * clock. On these SoCs the bootloader may enable it but any
- * power domain off/on will reset it to disable state.
- */
- if (ctx->driver_data != &exynos5_fimd_driver_data &&
- ctx->driver_data != &exynos5420_fimd_driver_data)
- return;
-
- val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+ struct fimd_context *ctx = container_of(clk, struct fimd_context,
+ dp_clk);
+ u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
writel(val, ctx->regs + DP_MIE_CLKCON);
}
@@ -902,13 +935,11 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
- .wait_for_vblank = fimd_wait_for_vblank,
.atomic_begin = fimd_atomic_begin,
.update_plane = fimd_update_plane,
.disable_plane = fimd_disable_plane,
.atomic_flush = fimd_atomic_flush,
.te_handler = fimd_te_handler,
- .clock_enable = fimd_dp_clock_enable,
};
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -987,6 +1018,11 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(ctx->crtc))
return PTR_ERR(ctx->crtc);
+ if (ctx->driver_data->has_dp_clk) {
+ ctx->dp_clk.enable = fimd_dp_clock_enable;
+ ctx->crtc->pipe_clk = &ctx->dp_clk;
+ }
+
if (ctx->encoder)
exynos_dpi_bind(drm_dev, ctx->encoder);
@@ -1035,7 +1071,7 @@ static int fimd_probe(struct platform_device *pdev)
ctx->dev = dev;
ctx->suspended = true;
- ctx->driver_data = drm_fimd_get_driver_data(pdev);
+ ctx->driver_data = of_device_get_match_data(dev);
if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
ctx->vidcon1 |= VIDCON1_INV_VDEN;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 193d3602d..8564c3da0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -48,13 +48,13 @@
/* registers for base address */
#define G2D_SRC_BASE_ADDR 0x0304
-#define G2D_SRC_STRIDE_REG 0x0308
+#define G2D_SRC_STRIDE 0x0308
#define G2D_SRC_COLOR_MODE 0x030C
#define G2D_SRC_LEFT_TOP 0x0310
#define G2D_SRC_RIGHT_BOTTOM 0x0314
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
#define G2D_DST_BASE_ADDR 0x0404
-#define G2D_DST_STRIDE_REG 0x0408
+#define G2D_DST_STRIDE 0x0408
#define G2D_DST_COLOR_MODE 0x040C
#define G2D_DST_LEFT_TOP 0x0410
#define G2D_DST_RIGHT_BOTTOM 0x0414
@@ -383,8 +383,8 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
return;
out:
- exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(to_dma_dev(drm_dev), g2d_userptr->sgt->sgl,
+ g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
pages = frame_vector_pages(g2d_userptr->vec);
if (!IS_ERR(pages)) {
@@ -501,10 +501,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->sgt = sgt;
- ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
- DMA_BIDIRECTIONAL);
- if (ret < 0) {
+ if (!dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL)) {
DRM_ERROR("failed to map sgt with dma region.\n");
+ ret = -ENOMEM;
goto err_sg_free_table;
}
@@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
switch (reg_offset) {
case G2D_SRC_BASE_ADDR:
- case G2D_SRC_STRIDE_REG:
+ case G2D_SRC_STRIDE:
case G2D_SRC_COLOR_MODE:
case G2D_SRC_LEFT_TOP:
case G2D_SRC_RIGHT_BOTTOM:
@@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
reg_type = REG_TYPE_SRC_PLANE2;
break;
case G2D_DST_BASE_ADDR:
- case G2D_DST_STRIDE_REG:
+ case G2D_DST_STRIDE:
case G2D_DST_COLOR_MODE:
case G2D_DST_LEFT_TOP:
case G2D_DST_RIGHT_BOTTOM:
@@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev,
} else
buf_info->types[reg_type] = BUF_TYPE_GEM;
break;
- case G2D_SRC_STRIDE_REG:
- case G2D_DST_STRIDE_REG:
+ case G2D_SRC_STRIDE:
+ case G2D_DST_STRIDE:
if (for_addr)
goto err;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 2914d62d0..cdf9f1af4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -177,7 +177,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
struct exynos_drm_gem *exynos_gem;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(file_priv, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return 0;
@@ -296,7 +296,7 @@ dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
struct exynos_drm_gem *exynos_gem;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, filp, gem_handle);
+ obj = drm_gem_object_lookup(filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
@@ -313,7 +313,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
{
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, filp, gem_handle);
+ obj = drm_gem_object_lookup(filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return;
@@ -362,12 +362,9 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
- mutex_lock(&dev->struct_mutex);
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
- mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
@@ -376,38 +373,11 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
args->flags = exynos_gem->flags;
args->size = exynos_gem->size;
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- int nents;
-
- mutex_lock(&drm_dev->struct_mutex);
-
- nents = dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
- if (!nents) {
- DRM_ERROR("failed to map sgl with dma.\n");
- mutex_unlock(&drm_dev->struct_mutex);
- return nents;
- }
+ drm_gem_object_unreference_unlocked(obj);
- mutex_unlock(&drm_dev->struct_mutex);
return 0;
}
-void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
- struct sg_table *sgt,
- enum dma_data_direction dir)
-{
- dma_unmap_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents, dir);
-}
-
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
exynos_drm_gem_destroy(to_exynos_gem(obj));
@@ -458,27 +428,22 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_gem_object *obj;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
-
/*
* get offset of memory allocated for drm framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_MAP_DUMB command.
*/
- obj = drm_gem_object_lookup(dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
*offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
- drm_gem_object_unreference(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -516,22 +481,12 @@ out:
}
}
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
{
- struct exynos_drm_gem *exynos_gem;
- struct drm_gem_object *obj;
+ struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
int ret;
- /* set vm_area_struct. */
- ret = drm_gem_mmap(filp, vma);
- if (ret < 0) {
- DRM_ERROR("failed to mmap.\n");
- return ret;
- }
-
- obj = vma->vm_private_data;
- exynos_gem = to_exynos_gem(obj);
-
DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
/* non-cachable as default. */
@@ -556,6 +511,26 @@ err_close_vm:
return ret;
}
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ /* set vm_area_struct. */
+ ret = drm_gem_mmap(filp, vma);
+ if (ret < 0) {
+ DRM_ERROR("failed to mmap.\n");
+ return ret;
+ }
+
+ obj = vma->vm_private_data;
+
+ if (obj->import_attach)
+ return dma_buf_mmap(obj->dma_buf, vma, 0);
+
+ return exynos_drm_gem_mmap_obj(obj, vma);
+}
+
/* low-level interface prime helpers */
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
@@ -630,3 +605,15 @@ void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
/* Nothing to do */
}
+
+int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ if (ret < 0)
+ return ret;
+
+ return exynos_drm_gem_mmap_obj(obj, vma);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 00223052b..781007422 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -121,16 +121,6 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-/* map sgt with dma region. */
-int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
- struct sg_table *sgt,
- enum dma_data_direction dir);
-
-/* unmap sgt from dma region. */
-void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
- struct sg_table *sgt,
- enum dma_data_direction dir);
-
/* low-level interface prime helpers */
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
@@ -139,5 +129,7 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 50185ac34..77f12c00a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -166,7 +166,7 @@ static void exynos_drm_plane_destroy_state(struct drm_plane *plane,
{
struct exynos_drm_plane_state *old_exynos_state =
to_exynos_plane_state(old_state);
- __drm_atomic_helper_plane_destroy_state(plane, old_state);
+ __drm_atomic_helper_plane_destroy_state(old_state);
kfree(old_exynos_state);
}
@@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
state->v_ratio == (1 << 15))
height_ok = true;
- if (width_ok & height_ok)
+ if (width_ok && height_ok)
return 0;
DRM_DEBUG_KMS("scaling mode is not supported");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index f18fbe43f..404367a43 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <drm/drmP.h>
@@ -696,7 +697,6 @@ static int rotator_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rot_context *rot;
struct exynos_drm_ippdrv *ippdrv;
- const struct of_device_id *match;
int ret;
if (!dev->of_node) {
@@ -708,13 +708,8 @@ static int rotator_probe(struct platform_device *pdev)
if (!rot)
return -ENOMEM;
- match = of_match_node(exynos_rotator_match, dev->of_node);
- if (!match) {
- dev_err(dev, "failed to match node\n");
- return -ENODEV;
- }
- rot->limit_tbl = (struct rot_limit_table *)match->data;
-
+ rot->limit_tbl = (struct rot_limit_table *)
+ of_device_get_match_data(dev);
rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rot->regs = devm_ioremap_resource(dev, rot->regs_res);
if (IS_ERR(rot->regs))
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index e148d728e..58de5a430 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -7,9 +7,9 @@
*
* Based on drivers/media/video/s5p-tv/hdmi_drv.c
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
@@ -49,14 +49,16 @@
/* AVI header and aspect ratio */
#define HDMI_AVI_VERSION 0x02
-#define HDMI_AVI_LENGTH 0x0D
+#define HDMI_AVI_LENGTH 0x0d
/* AUI header info */
-#define HDMI_AUI_VERSION 0x01
-#define HDMI_AUI_LENGTH 0x0A
-#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x8
-#define AVI_4_3_CENTER_RATIO 0x9
-#define AVI_16_9_CENTER_RATIO 0xa
+#define HDMI_AUI_VERSION 0x01
+#define HDMI_AUI_LENGTH 0x0a
+
+/* AVI active format aspect ratio */
+#define AVI_SAME_AS_PIC_ASPECT_RATIO 0x08
+#define AVI_4_3_CENTER_RATIO 0x09
+#define AVI_16_9_CENTER_RATIO 0x0a
enum hdmi_type {
HDMI_TYPE13,
@@ -90,11 +92,34 @@ static const char * const supply[] = {
"vdd_pll",
};
+struct hdmiphy_config {
+ int pixel_clock;
+ u8 conf[32];
+};
+
+struct hdmiphy_configs {
+ int count;
+ const struct hdmiphy_config *data;
+};
+
+struct string_array_spec {
+ int count;
+ const char * const *data;
+};
+
+#define INIT_ARRAY_SPEC(a) { .count = ARRAY_SIZE(a), .data = a }
+
struct hdmi_driver_data {
unsigned int type;
- const struct hdmiphy_config *phy_confs;
- unsigned int phy_conf_count;
unsigned int is_apb_phy:1;
+ unsigned int has_sysreg:1;
+ struct hdmiphy_configs phy_confs;
+ struct string_array_spec clk_gates;
+ /*
+ * Array of triplets (p_off, p_on, clock), where p_off and p_on are
+ * required parents of clock when HDMI-PHY is respectively off or on.
+ */
+ struct string_array_spec clk_muxes;
};
struct hdmi_context {
@@ -116,13 +141,12 @@ struct hdmi_context {
struct gpio_desc *hpd_gpio;
int irq;
struct regmap *pmureg;
- struct clk *hdmi;
- struct clk *sclk_hdmi;
- struct clk *sclk_pixel;
- struct clk *sclk_hdmiphy;
- struct clk *mout_hdmi;
+ struct regmap *sysreg;
+ struct clk **clk_gates;
+ struct clk **clk_muxes;
struct regulator_bulk_data regul_bulk[ARRAY_SIZE(supply)];
struct regulator *reg_hdmi_en;
+ struct exynos_drm_clk phy_clk;
};
static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e)
@@ -135,12 +159,6 @@ static inline struct hdmi_context *connector_to_hdmi(struct drm_connector *c)
return container_of(c, struct hdmi_context, connector);
}
-struct hdmiphy_config {
- int pixel_clock;
- u8 conf[32];
-};
-
-/* list of phy config settings */
static const struct hdmiphy_config hdmiphy_v13_configs[] = {
{
.pixel_clock = 27000000,
@@ -501,25 +519,136 @@ static const struct hdmiphy_config hdmiphy_5420_configs[] = {
},
};
-static struct hdmi_driver_data exynos5420_hdmi_driver_data = {
+static const struct hdmiphy_config hdmiphy_5433_configs[] = {
+ {
+ .pixel_clock = 27000000,
+ .conf = {
+ 0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46,
+ 0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
+ 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
+ {
+ .pixel_clock = 27027000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3,
+ 0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5,
+ 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
+ {
+ .pixel_clock = 40000000,
+ .conf = {
+ 0x01, 0x51, 0x32, 0x55, 0x01, 0x00, 0x88, 0x02,
+ 0x4d, 0x50, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+ 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
+ {
+ .pixel_clock = 50000000,
+ .conf = {
+ 0x01, 0x51, 0x34, 0x40, 0x64, 0x09, 0x88, 0xc3,
+ 0x3d, 0x50, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+ 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
+ {
+ .pixel_clock = 65000000,
+ .conf = {
+ 0x01, 0x51, 0x36, 0x31, 0x40, 0x10, 0x04, 0xc6,
+ 0x2e, 0xe8, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+ 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
+ {
+ .pixel_clock = 74176000,
+ .conf = {
+ 0x01, 0x51, 0x3E, 0x35, 0x5B, 0xDE, 0x88, 0x42,
+ 0x53, 0x51, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+ 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
+ {
+ .pixel_clock = 74250000,
+ .conf = {
+ 0x01, 0x51, 0x3E, 0x35, 0x40, 0xF0, 0x88, 0xC2,
+ 0x52, 0x51, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+ 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
+ {
+ .pixel_clock = 108000000,
+ .conf = {
+ 0x01, 0x51, 0x2d, 0x15, 0x01, 0x00, 0x88, 0x02,
+ 0x72, 0x52, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC,
+ 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40,
+ },
+ },
+ {
+ .pixel_clock = 148500000,
+ .conf = {
+ 0x01, 0x51, 0x1f, 0x00, 0x40, 0xf8, 0x88, 0xc1,
+ 0x52, 0x52, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5,
+ 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30,
+ 0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40,
+ },
+ },
+};
+
+static const char * const hdmi_clk_gates4[] = {
+ "hdmi", "sclk_hdmi"
+};
+
+static const char * const hdmi_clk_muxes4[] = {
+ "sclk_pixel", "sclk_hdmiphy", "mout_hdmi"
+};
+
+static const char * const hdmi_clk_gates5433[] = {
+ "hdmi_pclk", "hdmi_i_pclk", "i_tmds_clk", "i_pixel_clk", "i_spdif_clk"
+};
+
+static const char * const hdmi_clk_muxes5433[] = {
+ "oscclk", "tmds_clko", "tmds_clko_user",
+ "oscclk", "pixel_clko", "pixel_clko_user"
+};
+
+static const struct hdmi_driver_data exynos4210_hdmi_driver_data = {
+ .type = HDMI_TYPE13,
+ .phy_confs = INIT_ARRAY_SPEC(hdmiphy_v13_configs),
+ .clk_gates = INIT_ARRAY_SPEC(hdmi_clk_gates4),
+ .clk_muxes = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
+};
+
+static const struct hdmi_driver_data exynos4212_hdmi_driver_data = {
.type = HDMI_TYPE14,
- .phy_confs = hdmiphy_5420_configs,
- .phy_conf_count = ARRAY_SIZE(hdmiphy_5420_configs),
- .is_apb_phy = 1,
+ .phy_confs = INIT_ARRAY_SPEC(hdmiphy_v14_configs),
+ .clk_gates = INIT_ARRAY_SPEC(hdmi_clk_gates4),
+ .clk_muxes = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
};
-static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
+static const struct hdmi_driver_data exynos5420_hdmi_driver_data = {
.type = HDMI_TYPE14,
- .phy_confs = hdmiphy_v14_configs,
- .phy_conf_count = ARRAY_SIZE(hdmiphy_v14_configs),
- .is_apb_phy = 0,
+ .is_apb_phy = 1,
+ .phy_confs = INIT_ARRAY_SPEC(hdmiphy_5420_configs),
+ .clk_gates = INIT_ARRAY_SPEC(hdmi_clk_gates4),
+ .clk_muxes = INIT_ARRAY_SPEC(hdmi_clk_muxes4),
};
-static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
- .type = HDMI_TYPE13,
- .phy_confs = hdmiphy_v13_configs,
- .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
- .is_apb_phy = 0,
+static const struct hdmi_driver_data exynos5433_hdmi_driver_data = {
+ .type = HDMI_TYPE14,
+ .is_apb_phy = 1,
+ .has_sysreg = 1,
+ .phy_confs = INIT_ARRAY_SPEC(hdmiphy_5433_configs),
+ .clk_gates = INIT_ARRAY_SPEC(hdmi_clk_gates5433),
+ .clk_muxes = INIT_ARRAY_SPEC(hdmi_clk_muxes5433),
};
static inline u32 hdmi_map_reg(struct hdmi_context *hdata, u32 reg_id)
@@ -585,266 +714,52 @@ static int hdmiphy_reg_write_buf(struct hdmi_context *hdata,
}
}
-static void hdmi_v13_regs_dump(struct hdmi_context *hdata, char *prefix)
+static int hdmi_clk_enable_gates(struct hdmi_context *hdata)
{
-#define DUMPREG(reg_id) \
- DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
- readl(hdata->regs + reg_id))
- DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
- DUMPREG(HDMI_INTC_FLAG);
- DUMPREG(HDMI_INTC_CON);
- DUMPREG(HDMI_HPD_STATUS);
- DUMPREG(HDMI_V13_PHY_RSTOUT);
- DUMPREG(HDMI_V13_PHY_VPLL);
- DUMPREG(HDMI_V13_PHY_CMU);
- DUMPREG(HDMI_V13_CORE_RSTOUT);
-
- DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
- DUMPREG(HDMI_CON_0);
- DUMPREG(HDMI_CON_1);
- DUMPREG(HDMI_CON_2);
- DUMPREG(HDMI_SYS_STATUS);
- DUMPREG(HDMI_V13_PHY_STATUS);
- DUMPREG(HDMI_STATUS_EN);
- DUMPREG(HDMI_HPD);
- DUMPREG(HDMI_MODE_SEL);
- DUMPREG(HDMI_V13_HPD_GEN);
- DUMPREG(HDMI_V13_DC_CONTROL);
- DUMPREG(HDMI_V13_VIDEO_PATTERN_GEN);
-
- DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
- DUMPREG(HDMI_H_BLANK_0);
- DUMPREG(HDMI_H_BLANK_1);
- DUMPREG(HDMI_V13_V_BLANK_0);
- DUMPREG(HDMI_V13_V_BLANK_1);
- DUMPREG(HDMI_V13_V_BLANK_2);
- DUMPREG(HDMI_V13_H_V_LINE_0);
- DUMPREG(HDMI_V13_H_V_LINE_1);
- DUMPREG(HDMI_V13_H_V_LINE_2);
- DUMPREG(HDMI_VSYNC_POL);
- DUMPREG(HDMI_INT_PRO_MODE);
- DUMPREG(HDMI_V13_V_BLANK_F_0);
- DUMPREG(HDMI_V13_V_BLANK_F_1);
- DUMPREG(HDMI_V13_V_BLANK_F_2);
- DUMPREG(HDMI_V13_H_SYNC_GEN_0);
- DUMPREG(HDMI_V13_H_SYNC_GEN_1);
- DUMPREG(HDMI_V13_H_SYNC_GEN_2);
- DUMPREG(HDMI_V13_V_SYNC_GEN_1_0);
- DUMPREG(HDMI_V13_V_SYNC_GEN_1_1);
- DUMPREG(HDMI_V13_V_SYNC_GEN_1_2);
- DUMPREG(HDMI_V13_V_SYNC_GEN_2_0);
- DUMPREG(HDMI_V13_V_SYNC_GEN_2_1);
- DUMPREG(HDMI_V13_V_SYNC_GEN_2_2);
- DUMPREG(HDMI_V13_V_SYNC_GEN_3_0);
- DUMPREG(HDMI_V13_V_SYNC_GEN_3_1);
- DUMPREG(HDMI_V13_V_SYNC_GEN_3_2);
-
- DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
- DUMPREG(HDMI_TG_CMD);
- DUMPREG(HDMI_TG_H_FSZ_L);
- DUMPREG(HDMI_TG_H_FSZ_H);
- DUMPREG(HDMI_TG_HACT_ST_L);
- DUMPREG(HDMI_TG_HACT_ST_H);
- DUMPREG(HDMI_TG_HACT_SZ_L);
- DUMPREG(HDMI_TG_HACT_SZ_H);
- DUMPREG(HDMI_TG_V_FSZ_L);
- DUMPREG(HDMI_TG_V_FSZ_H);
- DUMPREG(HDMI_TG_VSYNC_L);
- DUMPREG(HDMI_TG_VSYNC_H);
- DUMPREG(HDMI_TG_VSYNC2_L);
- DUMPREG(HDMI_TG_VSYNC2_H);
- DUMPREG(HDMI_TG_VACT_ST_L);
- DUMPREG(HDMI_TG_VACT_ST_H);
- DUMPREG(HDMI_TG_VACT_SZ_L);
- DUMPREG(HDMI_TG_VACT_SZ_H);
- DUMPREG(HDMI_TG_FIELD_CHG_L);
- DUMPREG(HDMI_TG_FIELD_CHG_H);
- DUMPREG(HDMI_TG_VACT_ST2_L);
- DUMPREG(HDMI_TG_VACT_ST2_H);
- DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
- DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
- DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
- DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
- DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
- DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
- DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
- DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
-#undef DUMPREG
+ int i, ret;
+
+ for (i = 0; i < hdata->drv_data->clk_gates.count; ++i) {
+ ret = clk_prepare_enable(hdata->clk_gates[i]);
+ if (!ret)
+ continue;
+
+ dev_err(hdata->dev, "Cannot enable clock '%s', %d\n",
+ hdata->drv_data->clk_gates.data[i], ret);
+ while (i--)
+ clk_disable_unprepare(hdata->clk_gates[i]);
+ return ret;
+ }
+
+ return 0;
}
-static void hdmi_v14_regs_dump(struct hdmi_context *hdata, char *prefix)
+static void hdmi_clk_disable_gates(struct hdmi_context *hdata)
{
- int i;
+ int i = hdata->drv_data->clk_gates.count;
-#define DUMPREG(reg_id) \
- DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
- readl(hdata->regs + reg_id))
-
- DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
- DUMPREG(HDMI_INTC_CON);
- DUMPREG(HDMI_INTC_FLAG);
- DUMPREG(HDMI_HPD_STATUS);
- DUMPREG(HDMI_INTC_CON_1);
- DUMPREG(HDMI_INTC_FLAG_1);
- DUMPREG(HDMI_PHY_STATUS_0);
- DUMPREG(HDMI_PHY_STATUS_PLL);
- DUMPREG(HDMI_PHY_CON_0);
- DUMPREG(HDMI_V14_PHY_RSTOUT);
- DUMPREG(HDMI_PHY_VPLL);
- DUMPREG(HDMI_PHY_CMU);
- DUMPREG(HDMI_CORE_RSTOUT);
-
- DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
- DUMPREG(HDMI_CON_0);
- DUMPREG(HDMI_CON_1);
- DUMPREG(HDMI_CON_2);
- DUMPREG(HDMI_SYS_STATUS);
- DUMPREG(HDMI_PHY_STATUS_0);
- DUMPREG(HDMI_STATUS_EN);
- DUMPREG(HDMI_HPD);
- DUMPREG(HDMI_MODE_SEL);
- DUMPREG(HDMI_ENC_EN);
- DUMPREG(HDMI_DC_CONTROL);
- DUMPREG(HDMI_VIDEO_PATTERN_GEN);
-
- DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
- DUMPREG(HDMI_H_BLANK_0);
- DUMPREG(HDMI_H_BLANK_1);
- DUMPREG(HDMI_V2_BLANK_0);
- DUMPREG(HDMI_V2_BLANK_1);
- DUMPREG(HDMI_V1_BLANK_0);
- DUMPREG(HDMI_V1_BLANK_1);
- DUMPREG(HDMI_V_LINE_0);
- DUMPREG(HDMI_V_LINE_1);
- DUMPREG(HDMI_H_LINE_0);
- DUMPREG(HDMI_H_LINE_1);
- DUMPREG(HDMI_HSYNC_POL);
-
- DUMPREG(HDMI_VSYNC_POL);
- DUMPREG(HDMI_INT_PRO_MODE);
- DUMPREG(HDMI_V_BLANK_F0_0);
- DUMPREG(HDMI_V_BLANK_F0_1);
- DUMPREG(HDMI_V_BLANK_F1_0);
- DUMPREG(HDMI_V_BLANK_F1_1);
-
- DUMPREG(HDMI_H_SYNC_START_0);
- DUMPREG(HDMI_H_SYNC_START_1);
- DUMPREG(HDMI_H_SYNC_END_0);
- DUMPREG(HDMI_H_SYNC_END_1);
-
- DUMPREG(HDMI_V_SYNC_LINE_BEF_2_0);
- DUMPREG(HDMI_V_SYNC_LINE_BEF_2_1);
- DUMPREG(HDMI_V_SYNC_LINE_BEF_1_0);
- DUMPREG(HDMI_V_SYNC_LINE_BEF_1_1);
-
- DUMPREG(HDMI_V_SYNC_LINE_AFT_2_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_2_1);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_1_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_1_1);
-
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_2_1);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_1_1);
-
- DUMPREG(HDMI_V_BLANK_F2_0);
- DUMPREG(HDMI_V_BLANK_F2_1);
- DUMPREG(HDMI_V_BLANK_F3_0);
- DUMPREG(HDMI_V_BLANK_F3_1);
- DUMPREG(HDMI_V_BLANK_F4_0);
- DUMPREG(HDMI_V_BLANK_F4_1);
- DUMPREG(HDMI_V_BLANK_F5_0);
- DUMPREG(HDMI_V_BLANK_F5_1);
-
- DUMPREG(HDMI_V_SYNC_LINE_AFT_3_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_3_1);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_4_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_4_1);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_5_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_5_1);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_6_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_6_1);
-
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_3_1);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_4_1);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_5_1);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_0);
- DUMPREG(HDMI_V_SYNC_LINE_AFT_PXL_6_1);
-
- DUMPREG(HDMI_VACT_SPACE_1_0);
- DUMPREG(HDMI_VACT_SPACE_1_1);
- DUMPREG(HDMI_VACT_SPACE_2_0);
- DUMPREG(HDMI_VACT_SPACE_2_1);
- DUMPREG(HDMI_VACT_SPACE_3_0);
- DUMPREG(HDMI_VACT_SPACE_3_1);
- DUMPREG(HDMI_VACT_SPACE_4_0);
- DUMPREG(HDMI_VACT_SPACE_4_1);
- DUMPREG(HDMI_VACT_SPACE_5_0);
- DUMPREG(HDMI_VACT_SPACE_5_1);
- DUMPREG(HDMI_VACT_SPACE_6_0);
- DUMPREG(HDMI_VACT_SPACE_6_1);
-
- DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
- DUMPREG(HDMI_TG_CMD);
- DUMPREG(HDMI_TG_H_FSZ_L);
- DUMPREG(HDMI_TG_H_FSZ_H);
- DUMPREG(HDMI_TG_HACT_ST_L);
- DUMPREG(HDMI_TG_HACT_ST_H);
- DUMPREG(HDMI_TG_HACT_SZ_L);
- DUMPREG(HDMI_TG_HACT_SZ_H);
- DUMPREG(HDMI_TG_V_FSZ_L);
- DUMPREG(HDMI_TG_V_FSZ_H);
- DUMPREG(HDMI_TG_VSYNC_L);
- DUMPREG(HDMI_TG_VSYNC_H);
- DUMPREG(HDMI_TG_VSYNC2_L);
- DUMPREG(HDMI_TG_VSYNC2_H);
- DUMPREG(HDMI_TG_VACT_ST_L);
- DUMPREG(HDMI_TG_VACT_ST_H);
- DUMPREG(HDMI_TG_VACT_SZ_L);
- DUMPREG(HDMI_TG_VACT_SZ_H);
- DUMPREG(HDMI_TG_FIELD_CHG_L);
- DUMPREG(HDMI_TG_FIELD_CHG_H);
- DUMPREG(HDMI_TG_VACT_ST2_L);
- DUMPREG(HDMI_TG_VACT_ST2_H);
- DUMPREG(HDMI_TG_VACT_ST3_L);
- DUMPREG(HDMI_TG_VACT_ST3_H);
- DUMPREG(HDMI_TG_VACT_ST4_L);
- DUMPREG(HDMI_TG_VACT_ST4_H);
- DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
- DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
- DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
- DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
- DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
- DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
- DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
- DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
- DUMPREG(HDMI_TG_3D);
-
- DRM_DEBUG_KMS("%s: ---- PACKET REGISTERS ----\n", prefix);
- DUMPREG(HDMI_AVI_CON);
- DUMPREG(HDMI_AVI_HEADER0);
- DUMPREG(HDMI_AVI_HEADER1);
- DUMPREG(HDMI_AVI_HEADER2);
- DUMPREG(HDMI_AVI_CHECK_SUM);
- DUMPREG(HDMI_VSI_CON);
- DUMPREG(HDMI_VSI_HEADER0);
- DUMPREG(HDMI_VSI_HEADER1);
- DUMPREG(HDMI_VSI_HEADER2);
- for (i = 0; i < 7; ++i)
- DUMPREG(HDMI_VSI_DATA(i));
-
-#undef DUMPREG
+ while (i--)
+ clk_disable_unprepare(hdata->clk_gates[i]);
}
-static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+static int hdmi_clk_set_parents(struct hdmi_context *hdata, bool to_phy)
{
- if (hdata->drv_data->type == HDMI_TYPE13)
- hdmi_v13_regs_dump(hdata, prefix);
- else
- hdmi_v14_regs_dump(hdata, prefix);
+ struct device *dev = hdata->dev;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < hdata->drv_data->clk_muxes.count; i += 3) {
+ struct clk **c = &hdata->clk_muxes[i];
+
+ ret = clk_set_parent(c[2], c[to_phy]);
+ if (!ret)
+ continue;
+
+ dev_err(dev, "Cannot set clock parent of '%s' to '%s', %d\n",
+ hdata->drv_data->clk_muxes.data[i + 2],
+ hdata->drv_data->clk_muxes.data[i + to_phy], ret);
+ }
+
+ return ret;
}
static u8 hdmi_chksum(struct hdmi_context *hdata,
@@ -993,10 +908,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
{
+ const struct hdmiphy_configs *confs = &hdata->drv_data->phy_confs;
int i;
- for (i = 0; i < hdata->drv_data->phy_conf_count; i++)
- if (hdata->drv_data->phy_confs[i].pixel_clock == pixel_clock)
+ for (i = 0; i < confs->count; i++)
+ if (confs->data[i].pixel_clock == pixel_clock)
return i;
DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
@@ -1078,13 +994,11 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
mode_ok = hdmi_mode_valid(connector, adjusted_mode);
- /* just return if user desired mode exists. */
if (mode_ok == MODE_OK)
return true;
/*
- * otherwise, find the most suitable mode among modes and change it
- * to adjusted_mode.
+ * Find the most suitable mode and copy it to adjusted_mode.
*/
list_for_each_entry(m, &connector->modes, head) {
mode_ok = hdmi_mode_valid(connector, m);
@@ -1129,15 +1043,15 @@ static void hdmi_audio_init(struct hdmi_context *hdata)
switch (bits_per_sample) {
case 20:
data_num = 2;
- bit_ch = 1;
+ bit_ch = 1;
break;
case 24:
data_num = 3;
- bit_ch = 1;
+ bit_ch = 1;
break;
default:
data_num = 1;
- bit_ch = 0;
+ bit_ch = 0;
break;
}
@@ -1230,13 +1144,12 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
/* choose HDMI mode */
hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
- /* Apply Video preable and Guard band in HDMI mode only */
+ /* apply video pre-amble and guard band in HDMI mode only */
hdmi_reg_writeb(hdata, HDMI_CON_2, 0);
/* disable bluescreen */
hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
if (hdata->dvi_mode) {
- /* choose DVI mode */
hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
HDMI_MODE_DVI_EN, HDMI_MODE_MASK);
hdmi_reg_writeb(hdata, HDMI_CON_2,
@@ -1308,7 +1221,7 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
val = (m->hsync_start - m->hdisplay - 2);
val |= ((m->hsync_end - m->hdisplay - 2) << 10);
- val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
+ val |= ((m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0)<<20;
hdmi_reg_writev(hdata, HDMI_V13_H_SYNC_GEN_0, 3, val);
/*
@@ -1319,7 +1232,6 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
/* Following values & calculations differ for different type of modes */
if (m->flags & DRM_MODE_FLAG_INTERLACE) {
- /* Interlaced Mode */
val = ((m->vsync_end - m->vdisplay) / 2);
val |= ((m->vsync_start - m->vdisplay) / 2) << 12;
hdmi_reg_writev(hdata, HDMI_V13_V_SYNC_GEN_1_0, 3, val);
@@ -1348,8 +1260,6 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x249);
} else {
- /* Progressive Mode */
-
val = m->vtotal;
val |= (m->vtotal - m->vdisplay) << 11;
hdmi_reg_writev(hdata, HDMI_V13_V_BLANK_0, 3, val);
@@ -1365,21 +1275,12 @@ static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
m->vtotal - m->vdisplay);
hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
- hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
}
- /* Timing generator registers */
hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
- hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
- hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
- hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
- hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
- hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
- hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
- hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
}
static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
@@ -1390,7 +1291,7 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
hdmi_reg_writev(hdata, HDMI_V_LINE_0, 2, m->vtotal);
hdmi_reg_writev(hdata, HDMI_H_LINE_0, 2, m->htotal);
hdmi_reg_writev(hdata, HDMI_HSYNC_POL, 1,
- (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
+ (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
hdmi_reg_writev(hdata, HDMI_VSYNC_POL, 1,
(m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
hdmi_reg_writev(hdata, HDMI_INT_PRO_MODE, 1,
@@ -1404,7 +1305,6 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
/* Following values & calculations differ for different type of modes */
if (m->flags & DRM_MODE_FLAG_INTERLACE) {
- /* Interlaced Mode */
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
(m->vsync_end - m->vdisplay) / 2);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
@@ -1437,7 +1337,6 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x0);
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x0);
} else {
- /* Progressive Mode */
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_2_0, 2,
m->vsync_end - m->vdisplay);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_BEF_1_0, 2,
@@ -1454,15 +1353,8 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
hdmi_reg_writev(hdata, HDMI_TG_VACT_ST_L, 2,
m->vtotal - m->vdisplay);
hdmi_reg_writev(hdata, HDMI_TG_VACT_SZ_L, 2, m->vdisplay);
- hdmi_reg_writev(hdata, HDMI_TG_VACT_ST2_L, 2, 0x248);
- hdmi_reg_writev(hdata, HDMI_TG_VACT_ST3_L, 2, 0x47b);
- hdmi_reg_writev(hdata, HDMI_TG_VACT_ST4_L, 2, 0x6ae);
- hdmi_reg_writev(hdata, HDMI_TG_VSYNC2_L, 2, 0x233);
- hdmi_reg_writev(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, 2, 0x233);
- hdmi_reg_writev(hdata, HDMI_TG_FIELD_BOT_HDMI_L, 2, 0x233);
}
- /* Following values & calculations are same irrespective of mode type */
hdmi_reg_writev(hdata, HDMI_H_SYNC_START_0, 2,
m->hsync_start - m->hdisplay - 2);
hdmi_reg_writev(hdata, HDMI_H_SYNC_END_0, 2,
@@ -1486,16 +1378,12 @@ static void hdmi_v14_mode_apply(struct hdmi_context *hdata)
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_5_0, 2, 0xffff);
hdmi_reg_writev(hdata, HDMI_V_SYNC_LINE_AFT_PXL_6_0, 2, 0xffff);
- /* Timing generator registers */
hdmi_reg_writev(hdata, HDMI_TG_H_FSZ_L, 2, m->htotal);
hdmi_reg_writev(hdata, HDMI_TG_HACT_ST_L, 2, m->htotal - m->hdisplay);
hdmi_reg_writev(hdata, HDMI_TG_HACT_SZ_L, 2, m->hdisplay);
hdmi_reg_writev(hdata, HDMI_TG_V_FSZ_L, 2, m->vtotal);
- hdmi_reg_writev(hdata, HDMI_TG_VSYNC_L, 2, 0x1);
- hdmi_reg_writev(hdata, HDMI_TG_FIELD_CHG_L, 2, 0x233);
- hdmi_reg_writev(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, 2, 0x1);
- hdmi_reg_writev(hdata, HDMI_TG_FIELD_TOP_HDMI_L, 2, 0x1);
- hdmi_reg_writev(hdata, HDMI_TG_3D, 1, 0x0);
+ if (hdata->drv_data == &exynos5433_hdmi_driver_data)
+ hdmi_reg_writeb(hdata, HDMI_TG_DECON_EN, 1);
}
static void hdmi_mode_apply(struct hdmi_context *hdata)
@@ -1505,62 +1393,64 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
else
hdmi_v14_mode_apply(hdata);
- hdmiphy_wait_for_pll(hdata);
-
- clk_set_parent(hdata->mout_hdmi, hdata->sclk_hdmiphy);
-
- /* enable HDMI and timing generator */
hdmi_start(hdata, true);
}
static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{
- clk_set_parent(hdata->mout_hdmi, hdata->sclk_pixel);
-
- /* reset hdmiphy */
+ hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, 1);
+ usleep_range(10000, 12000);
+ hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, 1);
+ usleep_range(10000, 12000);
hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
usleep_range(10000, 12000);
- hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
+ hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
usleep_range(10000, 12000);
}
+static void hdmiphy_enable_mode_set(struct hdmi_context *hdata, bool enable)
+{
+ u8 v = enable ? HDMI_PHY_ENABLE_MODE_SET : HDMI_PHY_DISABLE_MODE_SET;
+
+ if (hdata->drv_data == &exynos5433_hdmi_driver_data)
+ writel(v, hdata->regs_hdmiphy + HDMIPHY5433_MODE_SET_DONE);
+}
+
static void hdmiphy_conf_apply(struct hdmi_context *hdata)
{
int ret;
- int i;
+ const u8 *phy_conf;
- /* pixel clock */
- i = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
- if (i < 0) {
+ ret = hdmi_find_phy_conf(hdata, hdata->current_mode.clock * 1000);
+ if (ret < 0) {
DRM_ERROR("failed to find hdmiphy conf\n");
return;
}
+ phy_conf = hdata->drv_data->phy_confs.data[ret].conf;
+
+ hdmi_clk_set_parents(hdata, false);
+
+ hdmiphy_conf_reset(hdata);
- ret = hdmiphy_reg_write_buf(hdata, 0,
- hdata->drv_data->phy_confs[i].conf, 32);
+ hdmiphy_enable_mode_set(hdata, true);
+ ret = hdmiphy_reg_write_buf(hdata, 0, phy_conf, 32);
if (ret) {
DRM_ERROR("failed to configure hdmiphy\n");
return;
}
-
+ hdmiphy_enable_mode_set(hdata, false);
+ hdmi_clk_set_parents(hdata, true);
usleep_range(10000, 12000);
+ hdmiphy_wait_for_pll(hdata);
}
static void hdmi_conf_apply(struct hdmi_context *hdata)
{
- hdmiphy_conf_reset(hdata);
- hdmiphy_conf_apply(hdata);
-
hdmi_start(hdata, false);
hdmi_conf_init(hdata);
-
hdmi_audio_init(hdata);
-
- /* setting core registers */
hdmi_mode_apply(hdata);
hdmi_audio_control(hdata, true);
-
- hdmi_regs_dump(hdata, "start");
}
static void hdmi_mode_set(struct drm_encoder *encoder,
@@ -1579,10 +1469,17 @@ static void hdmi_mode_set(struct drm_encoder *encoder,
hdata->cea_video_id = drm_match_cea_mode(mode);
}
-static void hdmi_enable(struct drm_encoder *encoder)
+static void hdmi_set_refclk(struct hdmi_context *hdata, bool on)
{
- struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+ if (!hdata->sysreg)
+ return;
+ regmap_update_bits(hdata->sysreg, EXYNOS5433_SYSREG_DISP_HDMI_PHY,
+ SYSREG_HDMI_REFCLK_INT_CLK, on ? ~0 : 0);
+}
+
+static void hdmiphy_enable(struct hdmi_context *hdata)
+{
if (hdata->powered)
return;
@@ -1591,15 +1488,47 @@ static void hdmi_enable(struct drm_encoder *encoder)
if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
DRM_DEBUG_KMS("failed to enable regulator bulk\n");
- /* set pmu hdmiphy control bit to enable hdmiphy */
regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
PMU_HDMI_PHY_ENABLE_BIT, 1);
- hdmi_conf_apply(hdata);
+ hdmi_set_refclk(hdata, true);
+
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0, HDMI_PHY_POWER_OFF_EN);
+
+ hdmiphy_conf_apply(hdata);
hdata->powered = true;
}
+static void hdmiphy_disable(struct hdmi_context *hdata)
+{
+ if (!hdata->powered)
+ return;
+
+ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
+
+ hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0, HDMI_PHY_POWER_OFF_EN);
+
+ hdmi_set_refclk(hdata, false);
+
+ regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
+ PMU_HDMI_PHY_ENABLE_BIT, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(supply), hdata->regul_bulk);
+
+ pm_runtime_put_sync(hdata->dev);
+
+ hdata->powered = false;
+}
+
+static void hdmi_enable(struct drm_encoder *encoder)
+{
+ struct hdmi_context *hdata = encoder_to_hdmi(encoder);
+
+ hdmiphy_enable(hdata);
+ hdmi_conf_apply(hdata);
+}
+
static void hdmi_disable(struct drm_encoder *encoder)
{
struct hdmi_context *hdata = encoder_to_hdmi(encoder);
@@ -1623,20 +1552,9 @@ static void hdmi_disable(struct drm_encoder *encoder)
if (funcs && funcs->disable)
(*funcs->disable)(crtc);
- /* HDMI System Disable */
- hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_EN);
-
cancel_delayed_work(&hdata->hotplug_work);
- /* reset pmu hdmiphy control bit to disable hdmiphy */
- regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
- PMU_HDMI_PHY_ENABLE_BIT, 0);
-
- regulator_bulk_disable(ARRAY_SIZE(supply), hdata->regul_bulk);
-
- pm_runtime_put_sync(hdata->dev);
-
- hdata->powered = false;
+ hdmiphy_disable(hdata);
}
static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = {
@@ -1670,6 +1588,68 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
+static int hdmi_clks_get(struct hdmi_context *hdata,
+ const struct string_array_spec *names,
+ struct clk **clks)
+{
+ struct device *dev = hdata->dev;
+ int i;
+
+ for (i = 0; i < names->count; ++i) {
+ struct clk *clk = devm_clk_get(dev, names->data[i]);
+
+ if (IS_ERR(clk)) {
+ int ret = PTR_ERR(clk);
+
+ dev_err(dev, "Cannot get clock %s, %d\n",
+ names->data[i], ret);
+
+ return ret;
+ }
+
+ clks[i] = clk;
+ }
+
+ return 0;
+}
+
+static int hdmi_clk_init(struct hdmi_context *hdata)
+{
+ const struct hdmi_driver_data *drv_data = hdata->drv_data;
+ int count = drv_data->clk_gates.count + drv_data->clk_muxes.count;
+ struct device *dev = hdata->dev;
+ struct clk **clks;
+ int ret;
+
+ if (!count)
+ return 0;
+
+ clks = devm_kzalloc(dev, sizeof(*clks) * count, GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ hdata->clk_gates = clks;
+ hdata->clk_muxes = clks + drv_data->clk_gates.count;
+
+ ret = hdmi_clks_get(hdata, &drv_data->clk_gates, hdata->clk_gates);
+ if (ret)
+ return ret;
+
+ return hdmi_clks_get(hdata, &drv_data->clk_muxes, hdata->clk_muxes);
+}
+
+
+static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable)
+{
+ struct hdmi_context *hdata = container_of(clk, struct hdmi_context,
+ phy_clk);
+
+ if (enable)
+ hdmiphy_enable(hdata);
+ else
+ hdmiphy_disable(hdata);
+}
+
static int hdmi_resources_init(struct hdmi_context *hdata)
{
struct device *dev = hdata->dev;
@@ -1688,39 +1668,14 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
DRM_ERROR("failed to get GPIO irq\n");
return hdata->irq;
}
- /* get clocks, power */
- hdata->hdmi = devm_clk_get(dev, "hdmi");
- if (IS_ERR(hdata->hdmi)) {
- DRM_ERROR("failed to get clock 'hdmi'\n");
- ret = PTR_ERR(hdata->hdmi);
- goto fail;
- }
- hdata->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
- if (IS_ERR(hdata->sclk_hdmi)) {
- DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
- ret = PTR_ERR(hdata->sclk_hdmi);
- goto fail;
- }
- hdata->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
- if (IS_ERR(hdata->sclk_pixel)) {
- DRM_ERROR("failed to get clock 'sclk_pixel'\n");
- ret = PTR_ERR(hdata->sclk_pixel);
- goto fail;
- }
- hdata->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
- if (IS_ERR(hdata->sclk_hdmiphy)) {
- DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
- ret = PTR_ERR(hdata->sclk_hdmiphy);
- goto fail;
- }
- hdata->mout_hdmi = devm_clk_get(dev, "mout_hdmi");
- if (IS_ERR(hdata->mout_hdmi)) {
- DRM_ERROR("failed to get clock 'mout_hdmi'\n");
- ret = PTR_ERR(hdata->mout_hdmi);
- goto fail;
- }
- clk_set_parent(hdata->mout_hdmi, hdata->sclk_pixel);
+ ret = hdmi_clk_init(hdata);
+ if (ret)
+ return ret;
+
+ ret = hdmi_clk_set_parents(hdata, false);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(supply); ++i) {
hdata->regul_bulk[i].supply = supply[i];
@@ -1728,7 +1683,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
}
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
if (ret) {
- DRM_ERROR("failed to get regulators\n");
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("failed to get regulators\n");
return ret;
}
@@ -1745,9 +1701,6 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
DRM_ERROR("failed to enable hdmi-en regulator\n");
return ret;
-fail:
- DRM_ERROR("HDMI resource init - failed\n");
- return ret;
}
static struct of_device_id hdmi_match_types[] = {
@@ -1761,6 +1714,9 @@ static struct of_device_id hdmi_match_types[] = {
.compatible = "samsung,exynos5420-hdmi",
.data = &exynos5420_hdmi_driver_data,
}, {
+ .compatible = "samsung,exynos5433-hdmi",
+ .data = &exynos5433_hdmi_driver_data,
+ }, {
/* end node */
}
};
@@ -1780,6 +1736,10 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
if (pipe < 0)
return pipe;
+ hdata->phy_clk.enable = hdmiphy_clk_enable;
+
+ exynos_drm_crtc_from_pipe(drm_dev, pipe)->pipe_clk = &hdata->phy_clk;
+
encoder->possible_crtcs = 1 << pipe;
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
@@ -1830,7 +1790,6 @@ static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev)
static int hdmi_probe(struct platform_device *pdev)
{
struct device_node *ddc_node, *phy_node;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
struct resource *res;
@@ -1840,11 +1799,7 @@ static int hdmi_probe(struct platform_device *pdev)
if (!hdata)
return -ENOMEM;
- match = of_match_device(hdmi_match_types, dev);
- if (!match)
- return -ENODEV;
-
- hdata->drv_data = match->data;
+ hdata->drv_data = of_device_get_match_data(dev);
platform_set_drvdata(pdev, hdata);
@@ -1852,7 +1807,8 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
- DRM_ERROR("hdmi_resources_init failed\n");
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("hdmi_resources_init failed\n");
return ret;
}
@@ -1867,7 +1823,6 @@ static int hdmi_probe(struct platform_device *pdev)
if (ddc_node)
goto out_get_ddc_adpt;
- /* DDC i2c driver */
ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
if (!ddc_node) {
DRM_ERROR("Failed to find ddc node in device tree\n");
@@ -1885,7 +1840,6 @@ out_get_ddc_adpt:
if (phy_node)
goto out_get_phy_port;
- /* hdmiphy i2c driver */
phy_node = of_parse_phandle(dev->of_node, "phy", 0);
if (!phy_node) {
DRM_ERROR("Failed to find hdmiphy node in device tree\n");
@@ -1929,6 +1883,16 @@ out_get_phy_port:
goto err_hdmiphy;
}
+ if (hdata->drv_data->has_sysreg) {
+ hdata->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,sysreg-phandle");
+ if (IS_ERR(hdata->sysreg)) {
+ DRM_ERROR("sysreg regmap lookup failed.\n");
+ ret = -EPROBE_DEFER;
+ goto err_hdmiphy;
+ }
+ }
+
pm_runtime_enable(dev);
ret = component_add(&pdev->dev, &hdmi_component_ops);
@@ -1975,8 +1939,7 @@ static int exynos_hdmi_suspend(struct device *dev)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
- clk_disable_unprepare(hdata->sclk_hdmi);
- clk_disable_unprepare(hdata->hdmi);
+ hdmi_clk_disable_gates(hdata);
return 0;
}
@@ -1986,17 +1949,9 @@ static int exynos_hdmi_resume(struct device *dev)
struct hdmi_context *hdata = dev_get_drvdata(dev);
int ret;
- ret = clk_prepare_enable(hdata->hdmi);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
- return ret;
- }
- ret = clk_prepare_enable(hdata->sclk_hdmi);
- if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the sclk_mixer clk [%d]\n",
- ret);
+ ret = hdmi_clk_enable_gates(hdata);
+ if (ret < 0)
return ret;
- }
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 0a5a60005..74a4269cc 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -31,6 +31,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/component.h>
#include <drm/exynos_drm.h>
@@ -103,8 +104,6 @@ struct mixer_context {
struct mixer_resources mixer_res;
enum mixer_version_id mxr_ver;
- wait_queue_head_t wait_vsync_queue;
- atomic_t wait_vsync_event;
};
struct mixer_drv_data {
@@ -787,12 +786,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
exynos_drm_crtc_finish_update(ctx->crtc, plane);
}
-
- /* set wait vsync event to zero and wake up queue. */
- if (atomic_read(&ctx->wait_vsync_event)) {
- atomic_set(&ctx->wait_vsync_event, 0);
- wake_up(&ctx->wait_vsync_queue);
- }
}
out:
@@ -1027,34 +1020,6 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
mixer_vsync_set_update(mixer_ctx, true);
}
-static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
-{
- struct mixer_context *mixer_ctx = crtc->ctx;
- int err;
-
- if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
- return;
-
- err = drm_vblank_get(mixer_ctx->drm_dev, mixer_ctx->pipe);
- if (err < 0) {
- DRM_DEBUG_KMS("failed to acquire vblank counter\n");
- return;
- }
-
- atomic_set(&mixer_ctx->wait_vsync_event, 1);
-
- /*
- * wait for MIXER to signal VSYNC interrupt or return after
- * timeout which is set to 50ms (refresh rate of 20).
- */
- if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
- !atomic_read(&mixer_ctx->wait_vsync_event),
- HZ/20))
- DRM_DEBUG_KMS("vblank wait timed out.\n");
-
- drm_vblank_put(mixer_ctx->drm_dev, mixer_ctx->pipe);
-}
-
static void mixer_enable(struct exynos_drm_crtc *crtc)
{
struct mixer_context *ctx = crtc->ctx;
@@ -1065,6 +1030,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
pm_runtime_get_sync(ctx->dev);
+ exynos_drm_pipe_clk_enable(crtc, true);
+
mixer_vsync_set_update(ctx, false);
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
@@ -1094,6 +1061,8 @@ static void mixer_disable(struct exynos_drm_crtc *crtc)
for (i = 0; i < MIXER_WIN_NR; i++)
mixer_disable_plane(crtc, &ctx->planes[i]);
+ exynos_drm_pipe_clk_enable(crtc, false);
+
pm_runtime_put(ctx->dev);
clear_bit(MXR_BIT_POWERED, &ctx->flags);
@@ -1126,7 +1095,6 @@ static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
.disable = mixer_disable,
.enable_vblank = mixer_enable_vblank,
.disable_vblank = mixer_disable_vblank,
- .wait_for_vblank = mixer_wait_for_vblank,
.atomic_begin = mixer_atomic_begin,
.update_plane = mixer_update_plane,
.disable_plane = mixer_disable_plane,
@@ -1155,18 +1123,6 @@ static struct mixer_drv_data exynos4210_mxr_drv_data = {
.has_sclk = 1,
};
-static const struct platform_device_id mixer_driver_types[] = {
- {
- .name = "s5p-mixer",
- .driver_data = (unsigned long)&exynos4210_mxr_drv_data,
- }, {
- .name = "exynos5-mixer",
- .driver_data = (unsigned long)&exynos5250_mxr_drv_data,
- }, {
- /* end node */
- }
-};
-
static struct of_device_id mixer_match_types[] = {
{
.compatible = "samsung,exynos4210-mixer",
@@ -1243,7 +1199,7 @@ static const struct component_ops mixer_component_ops = {
static int mixer_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct mixer_drv_data *drv;
+ const struct mixer_drv_data *drv;
struct mixer_context *ctx;
int ret;
@@ -1253,23 +1209,13 @@ static int mixer_probe(struct platform_device *pdev)
return -ENOMEM;
}
- if (dev->of_node) {
- const struct of_device_id *match;
-
- match = of_match_node(mixer_match_types, dev->of_node);
- drv = (struct mixer_drv_data *)match->data;
- } else {
- drv = (struct mixer_drv_data *)
- platform_get_device_id(pdev)->driver_data;
- }
+ drv = of_device_get_match_data(dev);
ctx->pdev = pdev;
ctx->dev = dev;
ctx->vp_enabled = drv->is_vp_enabled;
ctx->has_sclk = drv->has_sclk;
ctx->mxr_ver = drv->version;
- init_waitqueue_head(&ctx->wait_vsync_queue);
- atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, ctx);
@@ -1355,5 +1301,4 @@ struct platform_driver mixer_driver = {
},
.probe = mixer_probe,
.remove = mixer_remove,
- .id_table = mixer_driver_types,
};
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 8c891e59b..169667a22 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -586,10 +586,12 @@
#define HDMI_TG_VACT_ST4_L HDMI_TG_BASE(0x0070)
#define HDMI_TG_VACT_ST4_H HDMI_TG_BASE(0x0074)
#define HDMI_TG_3D HDMI_TG_BASE(0x00F0)
+#define HDMI_TG_DECON_EN HDMI_TG_BASE(0x01e0)
/* HDMI PHY Registers Offsets*/
-#define HDMIPHY_POWER (0x74 >> 2)
-#define HDMIPHY_MODE_SET_DONE (0x7c >> 2)
+#define HDMIPHY_POWER 0x74
+#define HDMIPHY_MODE_SET_DONE 0x7c
+#define HDMIPHY5433_MODE_SET_DONE 0x84
/* HDMI PHY Values */
#define HDMI_PHY_POWER_ON 0x80
@@ -603,4 +605,7 @@
#define PMU_HDMI_PHY_CONTROL 0x700
#define PMU_HDMI_PHY_ENABLE_BIT BIT(0)
+#define EXYNOS5433_SYSREG_DISP_HDMI_PHY 0x1008
+#define SYSREG_HDMI_REFCLK_INT_CLK 1
+
#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index c78cf3f60..b9c714de6 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -1,6 +1,6 @@
config DRM_FSL_DCU
tristate "DRM Support for Freescale DCU"
- depends on DRM && OF && ARM
+ depends on DRM && OF && ARM && COMMON_CLK
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
index 6ea1523ae..b35a29228 100644
--- a/drivers/gpu/drm/fsl-dcu/Makefile
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -3,5 +3,6 @@ fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
fsl_dcu_drm_rgb.o \
fsl_dcu_drm_plane.o \
fsl_dcu_drm_crtc.o \
- fsl_dcu_drm_fbdev.o
+ fsl_dcu_drm_fbdev.o \
+ fsl_tcon.o
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu-drm.o
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 4ed779853..89c0084c2 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -66,13 +66,12 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ struct drm_connector *con = &fsl_dev->connector.base;
struct drm_display_mode *mode = &crtc->state->mode;
- unsigned int hbp, hfp, hsw, vbp, vfp, vsw, div, index, pol = 0;
- unsigned long dcuclk;
+ unsigned int hbp, hfp, hsw, vbp, vfp, vsw, index, pol = 0;
index = drm_crtc_index(crtc);
- dcuclk = clk_get_rate(fsl_dev->clk);
- div = dcuclk / mode->clock / 1000;
+ clk_set_rate(fsl_dev->pix_clk, mode->clock * 1000);
/* Configure timings: */
hbp = mode->htotal - mode->hsync_end;
@@ -82,6 +81,10 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
vfp = mode->vsync_start - mode->vdisplay;
vsw = mode->vsync_end - mode->vsync_start;
+ /* INV_PXCK as default (most display sample data on rising edge) */
+ if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE))
+ pol |= DCU_SYN_POL_INV_PXCK;
+
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
pol |= DCU_SYN_POL_INV_HS_LOW;
@@ -99,7 +102,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
- regmap_write(fsl_dev->regmap, DCU_DIV_RATIO, div);
regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol);
regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
DCU_BGND_G(0) | DCU_BGND_B(0));
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 77886f118..dc723f7ea 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -23,10 +23,12 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
+#include "fsl_tcon.h"
static bool fsl_dcu_drm_is_volatile_reg(struct device *dev, unsigned int reg)
{
@@ -63,46 +65,54 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev)
return ret;
}
-static int fsl_dcu_load(struct drm_device *drm, unsigned long flags)
+static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
{
- struct device *dev = drm->dev;
- struct fsl_dcu_drm_device *fsl_dev = drm->dev_private;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
int ret;
ret = fsl_dcu_drm_modeset_init(fsl_dev);
if (ret < 0) {
- dev_err(dev, "failed to initialize mode setting\n");
+ dev_err(dev->dev, "failed to initialize mode setting\n");
return ret;
}
- ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret < 0) {
- dev_err(dev, "failed to initialize vblank\n");
+ dev_err(dev->dev, "failed to initialize vblank\n");
goto done;
}
- drm->vblank_disable_allowed = true;
- ret = fsl_dcu_drm_irq_init(drm);
+ ret = fsl_dcu_drm_irq_init(dev);
if (ret < 0)
goto done;
- drm->irq_enabled = true;
+ dev->irq_enabled = true;
- fsl_dcu_fbdev_init(drm);
+ fsl_dcu_fbdev_init(dev);
return 0;
done:
- if (ret) {
- drm_mode_config_cleanup(drm);
- drm_vblank_cleanup(drm);
- drm_irq_uninstall(drm);
- drm->dev_private = NULL;
- }
+ drm_kms_helper_poll_fini(dev);
+
+ if (fsl_dev->fbdev)
+ drm_fbdev_cma_fini(fsl_dev->fbdev);
+
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+ drm_irq_uninstall(dev);
+ dev->dev_private = NULL;
return ret;
}
static int fsl_dcu_unload(struct drm_device *dev)
{
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+
+ drm_kms_helper_poll_fini(dev);
+
+ if (fsl_dev->fbdev)
+ drm_fbdev_cma_fini(fsl_dev->fbdev);
+
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
drm_irq_uninstall(dev);
@@ -158,6 +168,13 @@ static void fsl_dcu_drm_disable_vblank(struct drm_device *dev,
regmap_write(fsl_dev->regmap, DCU_INT_MASK, value);
}
+static void fsl_dcu_drm_lastclose(struct drm_device *dev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+
+ drm_fbdev_cma_restore_mode(fsl_dev->fbdev);
+}
+
static const struct file_operations fsl_dcu_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -175,6 +192,7 @@ static const struct file_operations fsl_dcu_drm_fops = {
static struct drm_driver fsl_dcu_drm_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
| DRIVER_PRIME | DRIVER_ATOMIC,
+ .lastclose = fsl_dcu_drm_lastclose,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
.irq_handler = fsl_dcu_drm_irq,
@@ -198,9 +216,9 @@ static struct drm_driver fsl_dcu_drm_driver = {
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
- .date = "20150213",
+ .date = "20160425",
.major = 1,
- .minor = 0,
+ .minor = 1,
};
#ifdef CONFIG_PM_SLEEP
@@ -284,6 +302,9 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
struct drm_driver *driver = &fsl_dcu_drm_driver;
+ struct clk *pix_clk_in;
+ char pix_clk_name[32];
+ const char *pix_clk_in_name;
const struct of_device_id *id;
int ret;
@@ -291,6 +312,11 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
if (!fsl_dev)
return -ENOMEM;
+ id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+ fsl_dev->soc = id->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "could not get memory IO resource\n");
@@ -309,39 +335,54 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
return -ENXIO;
}
+ fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
+ &fsl_dcu_regmap_config);
+ if (IS_ERR(fsl_dev->regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(fsl_dev->regmap);
+ }
+
fsl_dev->clk = devm_clk_get(dev, "dcu");
if (IS_ERR(fsl_dev->clk)) {
- ret = PTR_ERR(fsl_dev->clk);
dev_err(dev, "failed to get dcu clock\n");
- return ret;
- }
- ret = clk_prepare(fsl_dev->clk);
- if (ret < 0) {
- dev_err(dev, "failed to prepare dcu clk\n");
- return ret;
+ return PTR_ERR(fsl_dev->clk);
}
- ret = clk_enable(fsl_dev->clk);
+ ret = clk_prepare_enable(fsl_dev->clk);
if (ret < 0) {
dev_err(dev, "failed to enable dcu clk\n");
- clk_unprepare(fsl_dev->clk);
return ret;
}
- fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
- &fsl_dcu_regmap_config);
- if (IS_ERR(fsl_dev->regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(fsl_dev->regmap);
+ pix_clk_in = devm_clk_get(dev, "pix");
+ if (IS_ERR(pix_clk_in)) {
+ /* legancy binding, use dcu clock as pixel clock input */
+ pix_clk_in = fsl_dev->clk;
}
- id = of_match_node(fsl_dcu_of_match, pdev->dev.of_node);
- if (!id)
- return -ENODEV;
- fsl_dev->soc = id->data;
+ pix_clk_in_name = __clk_get_name(pix_clk_in);
+ snprintf(pix_clk_name, sizeof(pix_clk_name), "%s_pix", pix_clk_in_name);
+ fsl_dev->pix_clk = clk_register_divider(dev, pix_clk_name,
+ pix_clk_in_name, 0, base + DCU_DIV_RATIO,
+ 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL);
+ if (IS_ERR(fsl_dev->pix_clk)) {
+ dev_err(dev, "failed to register pix clk\n");
+ ret = PTR_ERR(fsl_dev->pix_clk);
+ goto disable_clk;
+ }
+
+ ret = clk_prepare_enable(fsl_dev->pix_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable pix clk\n");
+ goto unregister_pix_clk;
+ }
+
+ fsl_dev->tcon = fsl_tcon_init(dev);
drm = drm_dev_alloc(driver, dev);
- if (!drm)
- return -ENOMEM;
+ if (!drm) {
+ ret = -ENOMEM;
+ goto disable_pix_clk;
+ }
fsl_dev->dev = dev;
fsl_dev->drm = drm;
@@ -361,6 +402,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
unref:
drm_dev_unref(drm);
+disable_pix_clk:
+ clk_disable_unprepare(fsl_dev->pix_clk);
+unregister_pix_clk:
+ clk_unregister(fsl_dev->pix_clk);
+disable_clk:
+ clk_disable_unprepare(fsl_dev->clk);
return ret;
}
@@ -368,6 +415,9 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
{
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
+ clk_disable_unprepare(fsl_dev->clk);
+ clk_disable_unprepare(fsl_dev->pix_clk);
+ clk_unregister(fsl_dev->pix_clk);
drm_put_dev(fsl_dev->drm);
return 0;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 6413ac9e4..c275f900f 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -47,8 +47,8 @@
#define DCU_VSYN_PARA_FP(x) (x)
#define DCU_SYN_POL 0x0024
-#define DCU_SYN_POL_INV_PXCK_FALL (0 << 6)
-#define DCU_SYN_POL_NEG_REMAIN (0 << 5)
+#define DCU_SYN_POL_INV_PXCK BIT(6)
+#define DCU_SYN_POL_NEG BIT(5)
#define DCU_SYN_POL_INV_VS_LOW BIT(1)
#define DCU_SYN_POL_INV_HS_LOW BIT(0)
@@ -183,6 +183,8 @@ struct fsl_dcu_drm_device {
struct regmap *regmap;
int irq;
struct clk *clk;
+ struct clk *pix_clk;
+ struct fsl_tcon *tcon;
/*protects hardware register*/
spinlock_t irq_lock;
struct drm_device *drm;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 8780deba5..98c998da9 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -17,6 +17,7 @@
#include <drm/drm_panel.h>
#include "fsl_dcu_drm_drv.h"
+#include "fsl_tcon.h"
static int
fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
@@ -28,10 +29,20 @@ fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_disable(fsl_dev->tcon);
}
static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
@@ -68,7 +79,10 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
static void fsl_dcu_drm_connector_destroy(struct drm_connector *connector)
{
+ struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
+
drm_connector_unregister(connector);
+ drm_panel_detach(fsl_con->panel);
drm_connector_cleanup(connector);
}
@@ -131,7 +145,7 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
struct drm_encoder *encoder)
{
struct drm_connector *connector = &fsl_dev->connector.base;
- struct drm_mode_config mode_config = fsl_dev->drm->mode_config;
+ struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
struct device_node *panel_node;
int ret;
@@ -153,19 +167,23 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
goto err_sysfs;
drm_object_property_set_value(&connector->base,
- mode_config.dpms_property,
+ mode_config->dpms_property,
DRM_MODE_DPMS_OFF);
panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
- if (panel_node) {
- fsl_dev->connector.panel = of_drm_find_panel(panel_node);
- if (!fsl_dev->connector.panel) {
- ret = -EPROBE_DEFER;
- goto err_sysfs;
- }
- of_node_put(panel_node);
+ if (!panel_node) {
+ dev_err(fsl_dev->dev, "fsl,panel property not found\n");
+ ret = -ENODEV;
+ goto err_sysfs;
}
+ fsl_dev->connector.panel = of_drm_find_panel(panel_node);
+ if (!fsl_dev->connector.panel) {
+ ret = -EPROBE_DEFER;
+ goto err_panel;
+ }
+ of_node_put(panel_node);
+
ret = drm_panel_attach(fsl_dev->connector.panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
@@ -174,6 +192,8 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
return 0;
+err_panel:
+ of_node_put(panel_node);
err_sysfs:
drm_connector_unregister(connector);
err_cleanup:
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
new file mode 100644
index 000000000..bbe34f1c0
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2015 Toradex AG
+ *
+ * Stefan Agner <stefan@agner.ch>
+ *
+ * Freescale TCON device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "fsl_tcon.h"
+
+void fsl_tcon_bypass_disable(struct fsl_tcon *tcon)
+{
+ regmap_update_bits(tcon->regs, FSL_TCON_CTRL1,
+ FSL_TCON_CTRL1_TCON_BYPASS, 0);
+}
+
+void fsl_tcon_bypass_enable(struct fsl_tcon *tcon)
+{
+ regmap_update_bits(tcon->regs, FSL_TCON_CTRL1,
+ FSL_TCON_CTRL1_TCON_BYPASS,
+ FSL_TCON_CTRL1_TCON_BYPASS);
+}
+
+static struct regmap_config fsl_tcon_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+
+ .name = "tcon",
+};
+
+static int fsl_tcon_init_regmap(struct device *dev,
+ struct fsl_tcon *tcon,
+ struct device_node *np)
+{
+ struct resource res;
+ void __iomem *regs;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -EINVAL;
+
+ regs = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ tcon->regs = devm_regmap_init_mmio(dev, regs,
+ &fsl_tcon_regmap_config);
+ if (IS_ERR(tcon->regs))
+ return PTR_ERR(tcon->regs);
+
+ return 0;
+}
+
+struct fsl_tcon *fsl_tcon_init(struct device *dev)
+{
+ struct fsl_tcon *tcon;
+ struct device_node *np;
+ int ret;
+
+ /* TCON node is not mandatory, some devices do not provide TCON */
+ np = of_parse_phandle(dev->of_node, "fsl,tcon", 0);
+ if (!np)
+ return NULL;
+
+ tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
+ if (!tcon) {
+ ret = -ENOMEM;
+ goto err_node_put;
+ }
+
+ ret = fsl_tcon_init_regmap(dev, tcon, np);
+ if (ret) {
+ dev_err(dev, "Couldn't create the TCON regmap\n");
+ goto err_node_put;
+ }
+
+ tcon->ipg_clk = of_clk_get_by_name(np, "ipg");
+ if (IS_ERR(tcon->ipg_clk)) {
+ dev_err(dev, "Couldn't get the TCON bus clock\n");
+ goto err_node_put;
+ }
+
+ clk_prepare_enable(tcon->ipg_clk);
+
+ dev_info(dev, "Using TCON in bypass mode\n");
+
+ return tcon;
+
+err_node_put:
+ of_node_put(np);
+ return NULL;
+}
+
+void fsl_tcon_free(struct fsl_tcon *tcon)
+{
+ clk_disable_unprepare(tcon->ipg_clk);
+ clk_put(tcon->ipg_clk);
+}
+
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.h b/drivers/gpu/drm/fsl-dcu/fsl_tcon.h
new file mode 100644
index 000000000..80a7617de
--- /dev/null
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2015 Toradex AG
+ *
+ * Stefan Agner <stefan@agner.ch>
+ *
+ * Freescale TCON device driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __FSL_TCON_H__
+#define __FSL_TCON_H__
+
+#include <linux/bitops.h>
+
+#define FSL_TCON_CTRL1 0x0
+#define FSL_TCON_CTRL1_TCON_BYPASS BIT(29)
+
+struct fsl_tcon {
+ struct regmap *regs;
+ struct clk *ipg_clk;
+};
+
+struct fsl_tcon *fsl_tcon_init(struct device *dev);
+void fsl_tcon_free(struct fsl_tcon *tcon);
+
+void fsl_tcon_bypass_disable(struct fsl_tcon *tcon);
+void fsl_tcon_bypass_enable(struct fsl_tcon *tcon);
+
+#endif /* __FSL_TCON_H__ */
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 033d894d0..7440bf90a 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -411,7 +411,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info = drm_fb_helper_alloc_fbi(&fbdev->psb_fb_helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
- goto out_err1;
+ goto err_free_range;
}
info->par = fbdev;
@@ -419,7 +419,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
if (ret)
- goto out_unref;
+ goto err_release;
fb = &psbfb->base;
psbfb->fbdev = info;
@@ -464,14 +464,9 @@ static int psbfb_create(struct psb_fbdev *fbdev,
psbfb->base.width, psbfb->base.height);
return 0;
-out_unref:
- if (backing->stolen)
- psb_gtt_free_range(dev, backing);
- else
- drm_gem_object_unreference_unlocked(&backing->gem);
-
+err_release:
drm_fb_helper_release_fbi(&fbdev->psb_fb_helper);
-out_err1:
+err_free_range:
psb_gtt_free_range(dev, backing);
return ret;
}
@@ -495,7 +490,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
* Find the GEM object and thus the gtt range object that is
* to back this space
*/
- obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
+ obj = drm_gem_object_lookup(filp, cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 506224b3a..6d1cb6b37 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -63,7 +63,7 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
struct drm_gem_object *obj;
/* GEM does all our handle to object mapping */
- obj = drm_gem_object_lookup(dev, file, handle);
+ obj = drm_gem_object_lookup(file, handle);
if (obj == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 5bf765de2..c95406e6f 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -372,7 +372,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = drm_gem_object_lookup(dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
ret = -ENOENT;
goto unlock;
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index 7cd87a0c2..a05c02060 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -979,11 +979,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
return NULL;
}
- if (dsi_connector->pipe)
- dpi_output->panel_on = 0;
- else
- dpi_output->panel_on = 0;
-
+ dpi_output->panel_on = 0;
dpi_output->dev = dev;
if (mdfld_get_panel_type(dev, pipe) != TC35876X)
dpi_output->p_funcs = p_funcs;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 4e1c68505..82b8ce418 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -374,7 +374,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
drm_irq_install(dev, dev->pdev->irq);
- dev->vblank_disable_allowed = true;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
dev->driver->get_vblank_counter = psb_get_vblank_counter;
diff --git a/drivers/gpu/drm/hisilicon/Kconfig b/drivers/gpu/drm/hisilicon/Kconfig
new file mode 100644
index 000000000..558c61b1b
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/Kconfig
@@ -0,0 +1,5 @@
+#
+# hisilicon drm device configuration.
+# Please keep this list sorted alphabetically
+
+source "drivers/gpu/drm/hisilicon/kirin/Kconfig"
diff --git a/drivers/gpu/drm/hisilicon/Makefile b/drivers/gpu/drm/hisilicon/Makefile
new file mode 100644
index 000000000..e3f6d493c
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for hisilicon drm drivers.
+# Please keep this list sorted alphabetically
+
+obj-$(CONFIG_DRM_HISI_KIRIN) += kirin/
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
new file mode 100644
index 000000000..ea0df6115
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -0,0 +1,18 @@
+config DRM_HISI_KIRIN
+ tristate "DRM Support for Hisilicon Kirin series SoCs Platform"
+ depends on DRM && OF && ARM64
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ help
+ Choose this option if you have a hisilicon Kirin chipsets(hi6220).
+ If M is selected the module will be called kirin-drm.
+
+config HISI_KIRIN_DW_DSI
+ tristate "HiSilicon Kirin specific extensions for Synopsys DW MIPI DSI"
+ depends on DRM_HISI_KIRIN
+ select DRM_MIPI_DSI
+ help
+ This selects support for HiSilicon Kirin SoC specific extensions for
+ the Synopsys DesignWare DSI driver. If you want to enable MIPI DSI on
+ hi6220 based SoC, you should selet this option.
diff --git a/drivers/gpu/drm/hisilicon/kirin/Makefile b/drivers/gpu/drm/hisilicon/kirin/Makefile
new file mode 100644
index 000000000..cdf615894
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/Makefile
@@ -0,0 +1,6 @@
+kirin-drm-y := kirin_drm_drv.o \
+ kirin_drm_ade.o
+
+obj-$(CONFIG_DRM_HISI_KIRIN) += kirin-drm.o
+
+obj-$(CONFIG_HISI_KIRIN_DW_DSI) += dw_drm_dsi.o
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
new file mode 100644
index 000000000..998452ad0
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -0,0 +1,858 @@
+/*
+ * DesignWare MIPI DSI Host Controller v1.02 driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ * Xinliang Liu <z.liuxinliang@hisilicon.com>
+ * Xinliang Liu <xinliang.liu@linaro.org>
+ * Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_of.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic_helper.h>
+
+#include "dw_dsi_reg.h"
+
+#define MAX_TX_ESC_CLK 10
+#define ROUND(x, y) ((x) / (y) + \
+ ((x) % (y) * 10 / (y) >= 5 ? 1 : 0))
+#define PHY_REF_CLK_RATE 19200000
+#define PHY_REF_CLK_PERIOD_PS (1000000000 / (PHY_REF_CLK_RATE / 1000))
+
+#define encoder_to_dsi(encoder) \
+ container_of(encoder, struct dw_dsi, encoder)
+#define host_to_dsi(host) \
+ container_of(host, struct dw_dsi, host)
+
+struct mipi_phy_params {
+ u32 clk_t_lpx;
+ u32 clk_t_hs_prepare;
+ u32 clk_t_hs_zero;
+ u32 clk_t_hs_trial;
+ u32 clk_t_wakeup;
+ u32 data_t_lpx;
+ u32 data_t_hs_prepare;
+ u32 data_t_hs_zero;
+ u32 data_t_hs_trial;
+ u32 data_t_ta_go;
+ u32 data_t_ta_get;
+ u32 data_t_wakeup;
+ u32 hstx_ckg_sel;
+ u32 pll_fbd_div5f;
+ u32 pll_fbd_div1f;
+ u32 pll_fbd_2p;
+ u32 pll_enbwt;
+ u32 pll_fbd_p;
+ u32 pll_fbd_s;
+ u32 pll_pre_div1p;
+ u32 pll_pre_p;
+ u32 pll_vco_750M;
+ u32 pll_lpf_rs;
+ u32 pll_lpf_cs;
+ u32 clklp2hs_time;
+ u32 clkhs2lp_time;
+ u32 lp2hs_time;
+ u32 hs2lp_time;
+ u32 clk_to_data_delay;
+ u32 data_to_clk_delay;
+ u32 lane_byte_clk_kHz;
+ u32 clk_division;
+};
+
+struct dsi_hw_ctx {
+ void __iomem *base;
+ struct clk *pclk;
+};
+
+struct dw_dsi {
+ struct drm_encoder encoder;
+ struct drm_bridge *bridge;
+ struct mipi_dsi_host host;
+ struct drm_display_mode cur_mode;
+ struct dsi_hw_ctx *ctx;
+ struct mipi_phy_params phy;
+
+ u32 lanes;
+ enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
+ bool enable;
+};
+
+struct dsi_data {
+ struct dw_dsi dsi;
+ struct dsi_hw_ctx ctx;
+};
+
+struct dsi_phy_range {
+ u32 min_range_kHz;
+ u32 max_range_kHz;
+ u32 pll_vco_750M;
+ u32 hstx_ckg_sel;
+};
+
+static const struct dsi_phy_range dphy_range_info[] = {
+ { 46875, 62500, 1, 7 },
+ { 62500, 93750, 0, 7 },
+ { 93750, 125000, 1, 6 },
+ { 125000, 187500, 0, 6 },
+ { 187500, 250000, 1, 5 },
+ { 250000, 375000, 0, 5 },
+ { 375000, 500000, 1, 4 },
+ { 500000, 750000, 0, 4 },
+ { 750000, 1000000, 1, 0 },
+ { 1000000, 1500000, 0, 0 }
+};
+
+static u32 dsi_calc_phy_rate(u32 req_kHz, struct mipi_phy_params *phy)
+{
+ u32 ref_clk_ps = PHY_REF_CLK_PERIOD_PS;
+ u32 tmp_kHz = req_kHz;
+ u32 i = 0;
+ u32 q_pll = 1;
+ u32 m_pll = 0;
+ u32 n_pll = 0;
+ u32 r_pll = 1;
+ u32 m_n = 0;
+ u32 m_n_int = 0;
+ u32 f_kHz = 0;
+ u64 temp;
+
+ /*
+ * Find a rate >= req_kHz.
+ */
+ do {
+ f_kHz = tmp_kHz;
+
+ for (i = 0; i < ARRAY_SIZE(dphy_range_info); i++)
+ if (f_kHz >= dphy_range_info[i].min_range_kHz &&
+ f_kHz <= dphy_range_info[i].max_range_kHz)
+ break;
+
+ if (i == ARRAY_SIZE(dphy_range_info)) {
+ DRM_ERROR("%dkHz out of range\n", f_kHz);
+ return 0;
+ }
+
+ phy->pll_vco_750M = dphy_range_info[i].pll_vco_750M;
+ phy->hstx_ckg_sel = dphy_range_info[i].hstx_ckg_sel;
+
+ if (phy->hstx_ckg_sel <= 7 &&
+ phy->hstx_ckg_sel >= 4)
+ q_pll = 0x10 >> (7 - phy->hstx_ckg_sel);
+
+ temp = f_kHz * (u64)q_pll * (u64)ref_clk_ps;
+ m_n_int = temp / (u64)1000000000;
+ m_n = (temp % (u64)1000000000) / (u64)100000000;
+
+ if (m_n_int % 2 == 0) {
+ if (m_n * 6 >= 50) {
+ n_pll = 2;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n * 6 >= 30) {
+ n_pll = 3;
+ m_pll = m_n_int * n_pll + 2;
+ } else {
+ n_pll = 1;
+ m_pll = m_n_int * n_pll;
+ }
+ } else {
+ if (m_n * 6 >= 50) {
+ n_pll = 1;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n * 6 >= 30) {
+ n_pll = 1;
+ m_pll = (m_n_int + 1) * n_pll;
+ } else if (m_n * 6 >= 10) {
+ n_pll = 3;
+ m_pll = m_n_int * n_pll + 1;
+ } else {
+ n_pll = 2;
+ m_pll = m_n_int * n_pll;
+ }
+ }
+
+ if (n_pll == 1) {
+ phy->pll_fbd_p = 0;
+ phy->pll_pre_div1p = 1;
+ } else {
+ phy->pll_fbd_p = n_pll;
+ phy->pll_pre_div1p = 0;
+ }
+
+ if (phy->pll_fbd_2p <= 7 && phy->pll_fbd_2p >= 4)
+ r_pll = 0x10 >> (7 - phy->pll_fbd_2p);
+
+ if (m_pll == 2) {
+ phy->pll_pre_p = 0;
+ phy->pll_fbd_s = 0;
+ phy->pll_fbd_div1f = 0;
+ phy->pll_fbd_div5f = 1;
+ } else if (m_pll >= 2 * 2 * r_pll && m_pll <= 2 * 4 * r_pll) {
+ phy->pll_pre_p = m_pll / (2 * r_pll);
+ phy->pll_fbd_s = 0;
+ phy->pll_fbd_div1f = 1;
+ phy->pll_fbd_div5f = 0;
+ } else if (m_pll >= 2 * 5 * r_pll && m_pll <= 2 * 150 * r_pll) {
+ if (((m_pll / (2 * r_pll)) % 2) == 0) {
+ phy->pll_pre_p =
+ (m_pll / (2 * r_pll)) / 2 - 1;
+ phy->pll_fbd_s =
+ (m_pll / (2 * r_pll)) % 2 + 2;
+ } else {
+ phy->pll_pre_p =
+ (m_pll / (2 * r_pll)) / 2;
+ phy->pll_fbd_s =
+ (m_pll / (2 * r_pll)) % 2;
+ }
+ phy->pll_fbd_div1f = 0;
+ phy->pll_fbd_div5f = 0;
+ } else {
+ phy->pll_pre_p = 0;
+ phy->pll_fbd_s = 0;
+ phy->pll_fbd_div1f = 0;
+ phy->pll_fbd_div5f = 1;
+ }
+
+ f_kHz = (u64)1000000000 * (u64)m_pll /
+ ((u64)ref_clk_ps * (u64)n_pll * (u64)q_pll);
+
+ if (f_kHz >= req_kHz)
+ break;
+
+ tmp_kHz += 10;
+
+ } while (true);
+
+ return f_kHz;
+}
+
+static void dsi_get_phy_params(u32 phy_req_kHz,
+ struct mipi_phy_params *phy)
+{
+ u32 ref_clk_ps = PHY_REF_CLK_PERIOD_PS;
+ u32 phy_rate_kHz;
+ u32 ui;
+
+ memset(phy, 0, sizeof(*phy));
+
+ phy_rate_kHz = dsi_calc_phy_rate(phy_req_kHz, phy);
+ if (!phy_rate_kHz)
+ return;
+
+ ui = 1000000 / phy_rate_kHz;
+
+ phy->clk_t_lpx = ROUND(50, 8 * ui);
+ phy->clk_t_hs_prepare = ROUND(133, 16 * ui) - 1;
+
+ phy->clk_t_hs_zero = ROUND(262, 8 * ui);
+ phy->clk_t_hs_trial = 2 * (ROUND(60, 8 * ui) - 1);
+ phy->clk_t_wakeup = ROUND(1000000, (ref_clk_ps / 1000) - 1);
+ if (phy->clk_t_wakeup > 0xff)
+ phy->clk_t_wakeup = 0xff;
+ phy->data_t_wakeup = phy->clk_t_wakeup;
+ phy->data_t_lpx = phy->clk_t_lpx;
+ phy->data_t_hs_prepare = ROUND(125 + 10 * ui, 16 * ui) - 1;
+ phy->data_t_hs_zero = ROUND(105 + 6 * ui, 8 * ui);
+ phy->data_t_hs_trial = 2 * (ROUND(60 + 4 * ui, 8 * ui) - 1);
+ phy->data_t_ta_go = 3;
+ phy->data_t_ta_get = 4;
+
+ phy->pll_enbwt = 1;
+ phy->clklp2hs_time = ROUND(407, 8 * ui) + 12;
+ phy->clkhs2lp_time = ROUND(105 + 12 * ui, 8 * ui);
+ phy->lp2hs_time = ROUND(240 + 12 * ui, 8 * ui) + 1;
+ phy->hs2lp_time = phy->clkhs2lp_time;
+ phy->clk_to_data_delay = 1 + phy->clklp2hs_time;
+ phy->data_to_clk_delay = ROUND(60 + 52 * ui, 8 * ui) +
+ phy->clkhs2lp_time;
+
+ phy->lane_byte_clk_kHz = phy_rate_kHz / 8;
+ phy->clk_division =
+ DIV_ROUND_UP(phy->lane_byte_clk_kHz, MAX_TX_ESC_CLK);
+}
+
+static u32 dsi_get_dpi_color_coding(enum mipi_dsi_pixel_format format)
+{
+ u32 val;
+
+ /*
+ * TODO: only support RGB888 now, to support more
+ */
+ switch (format) {
+ case MIPI_DSI_FMT_RGB888:
+ val = DSI_24BITS_1;
+ break;
+ default:
+ val = DSI_24BITS_1;
+ break;
+ }
+
+ return val;
+}
+
+/*
+ * dsi phy reg write function
+ */
+static void dsi_phy_tst_set(void __iomem *base, u32 reg, u32 val)
+{
+ u32 reg_write = 0x10000 + reg;
+
+ /*
+ * latch reg first
+ */
+ writel(reg_write, base + PHY_TST_CTRL1);
+ writel(0x02, base + PHY_TST_CTRL0);
+ writel(0x00, base + PHY_TST_CTRL0);
+
+ /*
+ * then latch value
+ */
+ writel(val, base + PHY_TST_CTRL1);
+ writel(0x02, base + PHY_TST_CTRL0);
+ writel(0x00, base + PHY_TST_CTRL0);
+}
+
+static void dsi_set_phy_timer(void __iomem *base,
+ struct mipi_phy_params *phy,
+ u32 lanes)
+{
+ u32 val;
+
+ /*
+ * Set lane value and phy stop wait time.
+ */
+ val = (lanes - 1) | (PHY_STOP_WAIT_TIME << 8);
+ writel(val, base + PHY_IF_CFG);
+
+ /*
+ * Set phy clk division.
+ */
+ val = readl(base + CLKMGR_CFG) | phy->clk_division;
+ writel(val, base + CLKMGR_CFG);
+
+ /*
+ * Set lp and hs switching params.
+ */
+ dw_update_bits(base + PHY_TMR_CFG, 24, MASK(8), phy->hs2lp_time);
+ dw_update_bits(base + PHY_TMR_CFG, 16, MASK(8), phy->lp2hs_time);
+ dw_update_bits(base + PHY_TMR_LPCLK_CFG, 16, MASK(10),
+ phy->clkhs2lp_time);
+ dw_update_bits(base + PHY_TMR_LPCLK_CFG, 0, MASK(10),
+ phy->clklp2hs_time);
+ dw_update_bits(base + CLK_DATA_TMR_CFG, 8, MASK(8),
+ phy->data_to_clk_delay);
+ dw_update_bits(base + CLK_DATA_TMR_CFG, 0, MASK(8),
+ phy->clk_to_data_delay);
+}
+
+static void dsi_set_mipi_phy(void __iomem *base,
+ struct mipi_phy_params *phy,
+ u32 lanes)
+{
+ u32 delay_count;
+ u32 val;
+ u32 i;
+
+ /* phy timer setting */
+ dsi_set_phy_timer(base, phy, lanes);
+
+ /*
+ * Reset to clean up phy tst params.
+ */
+ writel(0, base + PHY_RSTZ);
+ writel(0, base + PHY_TST_CTRL0);
+ writel(1, base + PHY_TST_CTRL0);
+ writel(0, base + PHY_TST_CTRL0);
+
+ /*
+ * Clock lane timing control setting: TLPX, THS-PREPARE,
+ * THS-ZERO, THS-TRAIL, TWAKEUP.
+ */
+ dsi_phy_tst_set(base, CLK_TLPX, phy->clk_t_lpx);
+ dsi_phy_tst_set(base, CLK_THS_PREPARE, phy->clk_t_hs_prepare);
+ dsi_phy_tst_set(base, CLK_THS_ZERO, phy->clk_t_hs_zero);
+ dsi_phy_tst_set(base, CLK_THS_TRAIL, phy->clk_t_hs_trial);
+ dsi_phy_tst_set(base, CLK_TWAKEUP, phy->clk_t_wakeup);
+
+ /*
+ * Data lane timing control setting: TLPX, THS-PREPARE,
+ * THS-ZERO, THS-TRAIL, TTA-GO, TTA-GET, TWAKEUP.
+ */
+ for (i = 0; i < lanes; i++) {
+ dsi_phy_tst_set(base, DATA_TLPX(i), phy->data_t_lpx);
+ dsi_phy_tst_set(base, DATA_THS_PREPARE(i),
+ phy->data_t_hs_prepare);
+ dsi_phy_tst_set(base, DATA_THS_ZERO(i), phy->data_t_hs_zero);
+ dsi_phy_tst_set(base, DATA_THS_TRAIL(i), phy->data_t_hs_trial);
+ dsi_phy_tst_set(base, DATA_TTA_GO(i), phy->data_t_ta_go);
+ dsi_phy_tst_set(base, DATA_TTA_GET(i), phy->data_t_ta_get);
+ dsi_phy_tst_set(base, DATA_TWAKEUP(i), phy->data_t_wakeup);
+ }
+
+ /*
+ * physical configuration: I, pll I, pll II, pll III,
+ * pll IV, pll V.
+ */
+ dsi_phy_tst_set(base, PHY_CFG_I, phy->hstx_ckg_sel);
+ val = (phy->pll_fbd_div5f << 5) + (phy->pll_fbd_div1f << 4) +
+ (phy->pll_fbd_2p << 1) + phy->pll_enbwt;
+ dsi_phy_tst_set(base, PHY_CFG_PLL_I, val);
+ dsi_phy_tst_set(base, PHY_CFG_PLL_II, phy->pll_fbd_p);
+ dsi_phy_tst_set(base, PHY_CFG_PLL_III, phy->pll_fbd_s);
+ val = (phy->pll_pre_div1p << 7) + phy->pll_pre_p;
+ dsi_phy_tst_set(base, PHY_CFG_PLL_IV, val);
+ val = (5 << 5) + (phy->pll_vco_750M << 4) + (phy->pll_lpf_rs << 2) +
+ phy->pll_lpf_cs;
+ dsi_phy_tst_set(base, PHY_CFG_PLL_V, val);
+
+ writel(PHY_ENABLECLK, base + PHY_RSTZ);
+ udelay(1);
+ writel(PHY_ENABLECLK | PHY_UNSHUTDOWNZ, base + PHY_RSTZ);
+ udelay(1);
+ writel(PHY_ENABLECLK | PHY_UNRSTZ | PHY_UNSHUTDOWNZ, base + PHY_RSTZ);
+ usleep_range(1000, 1500);
+
+ /*
+ * wait for phy's clock ready
+ */
+ delay_count = 100;
+ while (delay_count) {
+ val = readl(base + PHY_STATUS);
+ if ((BIT(0) | BIT(2)) & val)
+ break;
+
+ udelay(1);
+ delay_count--;
+ }
+
+ if (!delay_count)
+ DRM_INFO("phylock and phystopstateclklane is not ready.\n");
+}
+
+static void dsi_set_mode_timing(void __iomem *base,
+ u32 lane_byte_clk_kHz,
+ struct drm_display_mode *mode,
+ enum mipi_dsi_pixel_format format)
+{
+ u32 hfp, hbp, hsw, vfp, vbp, vsw;
+ u32 hline_time;
+ u32 hsa_time;
+ u32 hbp_time;
+ u32 pixel_clk_kHz;
+ int htot, vtot;
+ u32 val;
+ u64 tmp;
+
+ val = dsi_get_dpi_color_coding(format);
+ writel(val, base + DPI_COLOR_CODING);
+
+ val = (mode->flags & DRM_MODE_FLAG_NHSYNC ? 1 : 0) << 2;
+ val |= (mode->flags & DRM_MODE_FLAG_NVSYNC ? 1 : 0) << 1;
+ writel(val, base + DPI_CFG_POL);
+
+ /*
+ * The DSI IP accepts vertical timing using lines as normal,
+ * but horizontal timing is a mixture of pixel-clocks for the
+ * active region and byte-lane clocks for the blanking-related
+ * timings. hfp is specified as the total hline_time in byte-
+ * lane clocks minus hsa, hbp and active.
+ */
+ pixel_clk_kHz = mode->clock;
+ htot = mode->htotal;
+ vtot = mode->vtotal;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ if (vsw > 15) {
+ DRM_DEBUG_DRIVER("vsw exceeded 15\n");
+ vsw = 15;
+ }
+
+ hsa_time = (hsw * lane_byte_clk_kHz) / pixel_clk_kHz;
+ hbp_time = (hbp * lane_byte_clk_kHz) / pixel_clk_kHz;
+ tmp = (u64)htot * (u64)lane_byte_clk_kHz;
+ hline_time = DIV_ROUND_UP(tmp, pixel_clk_kHz);
+
+ /* all specified in byte-lane clocks */
+ writel(hsa_time, base + VID_HSA_TIME);
+ writel(hbp_time, base + VID_HBP_TIME);
+ writel(hline_time, base + VID_HLINE_TIME);
+
+ writel(vsw, base + VID_VSA_LINES);
+ writel(vbp, base + VID_VBP_LINES);
+ writel(vfp, base + VID_VFP_LINES);
+ writel(mode->vdisplay, base + VID_VACTIVE_LINES);
+ writel(mode->hdisplay, base + VID_PKT_SIZE);
+
+ DRM_DEBUG_DRIVER("htot=%d, hfp=%d, hbp=%d, hsw=%d\n",
+ htot, hfp, hbp, hsw);
+ DRM_DEBUG_DRIVER("vtol=%d, vfp=%d, vbp=%d, vsw=%d\n",
+ vtot, vfp, vbp, vsw);
+ DRM_DEBUG_DRIVER("hsa_time=%d, hbp_time=%d, hline_time=%d\n",
+ hsa_time, hbp_time, hline_time);
+}
+
+static void dsi_set_video_mode(void __iomem *base, unsigned long flags)
+{
+ u32 val;
+ u32 mode_mask = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ u32 non_burst_sync_pulse = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ u32 non_burst_sync_event = MIPI_DSI_MODE_VIDEO;
+
+ /*
+ * choose video mode type
+ */
+ if ((flags & mode_mask) == non_burst_sync_pulse)
+ val = DSI_NON_BURST_SYNC_PULSES;
+ else if ((flags & mode_mask) == non_burst_sync_event)
+ val = DSI_NON_BURST_SYNC_EVENTS;
+ else
+ val = DSI_BURST_SYNC_PULSES_1;
+ writel(val, base + VID_MODE_CFG);
+
+ writel(PHY_TXREQUESTCLKHS, base + LPCLK_CTRL);
+ writel(DSI_VIDEO_MODE, base + MODE_CFG);
+}
+
+static void dsi_mipi_init(struct dw_dsi *dsi)
+{
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ struct mipi_phy_params *phy = &dsi->phy;
+ struct drm_display_mode *mode = &dsi->cur_mode;
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ void __iomem *base = ctx->base;
+ u32 dphy_req_kHz;
+
+ /*
+ * count phy params
+ */
+ dphy_req_kHz = mode->clock * bpp / dsi->lanes;
+ dsi_get_phy_params(dphy_req_kHz, phy);
+
+ /* reset Core */
+ writel(RESET, base + PWR_UP);
+
+ /* set dsi phy params */
+ dsi_set_mipi_phy(base, phy, dsi->lanes);
+
+ /* set dsi mode timing */
+ dsi_set_mode_timing(base, phy->lane_byte_clk_kHz, mode, dsi->format);
+
+ /* set dsi video mode */
+ dsi_set_video_mode(base, dsi->mode_flags);
+
+ /* dsi wake up */
+ writel(POWERUP, base + PWR_UP);
+
+ DRM_DEBUG_DRIVER("lanes=%d, pixel_clk=%d kHz, bytes_freq=%d kHz\n",
+ dsi->lanes, mode->clock, phy->lane_byte_clk_kHz);
+}
+
+static void dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ void __iomem *base = ctx->base;
+
+ if (!dsi->enable)
+ return;
+
+ writel(0, base + PWR_UP);
+ writel(0, base + LPCLK_CTRL);
+ writel(0, base + PHY_RSTZ);
+ clk_disable_unprepare(ctx->pclk);
+
+ dsi->enable = false;
+}
+
+static void dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ int ret;
+
+ if (dsi->enable)
+ return;
+
+ ret = clk_prepare_enable(ctx->pclk);
+ if (ret) {
+ DRM_ERROR("fail to enable pclk: %d\n", ret);
+ return;
+ }
+
+ dsi_mipi_init(dsi);
+
+ dsi->enable = true;
+}
+
+static void dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dw_dsi *dsi = encoder_to_dsi(encoder);
+
+ drm_mode_copy(&dsi->cur_mode, adj_mode);
+}
+
+static int dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ /* do nothing */
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
+ .atomic_check = dsi_encoder_atomic_check,
+ .mode_set = dsi_encoder_mode_set,
+ .enable = dsi_encoder_enable,
+ .disable = dsi_encoder_disable
+};
+
+static const struct drm_encoder_funcs dw_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int dw_drm_encoder_init(struct device *dev,
+ struct drm_device *drm_dev,
+ struct drm_encoder *encoder)
+{
+ int ret;
+ u32 crtc_mask = drm_of_find_possible_crtcs(drm_dev, dev->of_node);
+
+ if (!crtc_mask) {
+ DRM_ERROR("failed to find crtc mask\n");
+ return -EINVAL;
+ }
+
+ encoder->possible_crtcs = crtc_mask;
+ ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init dsi encoder\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &dw_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *mdsi)
+{
+ struct dw_dsi *dsi = host_to_dsi(host);
+
+ if (mdsi->lanes < 1 || mdsi->lanes > 4) {
+ DRM_ERROR("dsi device params invalid\n");
+ return -EINVAL;
+ }
+
+ dsi->lanes = mdsi->lanes;
+ dsi->format = mdsi->format;
+ dsi->mode_flags = mdsi->mode_flags;
+
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *mdsi)
+{
+ /* do nothing */
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+};
+
+static int dsi_host_init(struct device *dev, struct dw_dsi *dsi)
+{
+ struct mipi_dsi_host *host = &dsi->host;
+ int ret;
+
+ host->dev = dev;
+ host->ops = &dsi_host_ops;
+ ret = mipi_dsi_host_register(host);
+ if (ret) {
+ DRM_ERROR("failed to register dsi host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
+{
+ struct drm_encoder *encoder = &dsi->encoder;
+ struct drm_bridge *bridge = dsi->bridge;
+ int ret;
+
+ /* associate the bridge to dsi encoder */
+ encoder->bridge = bridge;
+ bridge->encoder = encoder;
+
+ ret = drm_bridge_attach(dev, bridge);
+ if (ret) {
+ DRM_ERROR("failed to attach external bridge\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dsi_data *ddata = dev_get_drvdata(dev);
+ struct dw_dsi *dsi = &ddata->dsi;
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = dw_drm_encoder_init(dev, drm_dev, &dsi->encoder);
+ if (ret)
+ return ret;
+
+ ret = dsi_host_init(dev, dsi);
+ if (ret)
+ return ret;
+
+ ret = dsi_bridge_init(drm_dev, dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops dsi_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
+{
+ struct dsi_hw_ctx *ctx = dsi->ctx;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *endpoint, *bridge_node;
+ struct drm_bridge *bridge;
+ struct resource *res;
+
+ /*
+ * Get the endpoint node. In our case, dsi has one output port1
+ * to which the external HDMI bridge is connected.
+ */
+ endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
+ if (!endpoint) {
+ DRM_ERROR("no valid endpoint node\n");
+ return -ENODEV;
+ }
+ of_node_put(endpoint);
+
+ bridge_node = of_graph_get_remote_port_parent(endpoint);
+ if (!bridge_node) {
+ DRM_ERROR("no valid bridge node\n");
+ return -ENODEV;
+ }
+ of_node_put(bridge_node);
+
+ bridge = of_drm_find_bridge(bridge_node);
+ if (!bridge) {
+ DRM_INFO("wait for external HDMI bridge driver.\n");
+ return -EPROBE_DEFER;
+ }
+ dsi->bridge = bridge;
+
+ ctx->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(ctx->pclk)) {
+ DRM_ERROR("failed to get pclk clock\n");
+ return PTR_ERR(ctx->pclk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctx->base)) {
+ DRM_ERROR("failed to remap dsi io region\n");
+ return PTR_ERR(ctx->base);
+ }
+
+ return 0;
+}
+
+static int dsi_probe(struct platform_device *pdev)
+{
+ struct dsi_data *data;
+ struct dw_dsi *dsi;
+ struct dsi_hw_ctx *ctx;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ DRM_ERROR("failed to allocate dsi data.\n");
+ return -ENOMEM;
+ }
+ dsi = &data->dsi;
+ ctx = &data->ctx;
+ dsi->ctx = ctx;
+
+ ret = dsi_parse_dt(pdev, dsi);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, data);
+
+ return component_add(&pdev->dev, &dsi_ops);
+}
+
+static int dsi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dsi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id dsi_of_match[] = {
+ {.compatible = "hisilicon,hi6220-dsi"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dsi_of_match);
+
+static struct platform_driver dsi_driver = {
+ .probe = dsi_probe,
+ .remove = dsi_remove,
+ .driver = {
+ .name = "dw-dsi",
+ .of_match_table = dsi_of_match,
+ },
+};
+
+module_platform_driver(dsi_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("DesignWare MIPI DSI Host Controller v1.02 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h b/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h
new file mode 100644
index 000000000..18808fc9f
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_dsi_reg.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DW_DSI_REG_H__
+#define __DW_DSI_REG_H__
+
+#define MASK(x) (BIT(x) - 1)
+
+/*
+ * regs
+ */
+#define PWR_UP 0x04 /* Core power-up */
+#define RESET 0
+#define POWERUP BIT(0)
+#define PHY_IF_CFG 0xA4 /* D-PHY interface configuration */
+#define CLKMGR_CFG 0x08 /* the internal clock dividers */
+#define PHY_RSTZ 0xA0 /* D-PHY reset control */
+#define PHY_ENABLECLK BIT(2)
+#define PHY_UNRSTZ BIT(1)
+#define PHY_UNSHUTDOWNZ BIT(0)
+#define PHY_TST_CTRL0 0xB4 /* D-PHY test interface control 0 */
+#define PHY_TST_CTRL1 0xB8 /* D-PHY test interface control 1 */
+#define CLK_TLPX 0x10
+#define CLK_THS_PREPARE 0x11
+#define CLK_THS_ZERO 0x12
+#define CLK_THS_TRAIL 0x13
+#define CLK_TWAKEUP 0x14
+#define DATA_TLPX(x) (0x20 + ((x) << 4))
+#define DATA_THS_PREPARE(x) (0x21 + ((x) << 4))
+#define DATA_THS_ZERO(x) (0x22 + ((x) << 4))
+#define DATA_THS_TRAIL(x) (0x23 + ((x) << 4))
+#define DATA_TTA_GO(x) (0x24 + ((x) << 4))
+#define DATA_TTA_GET(x) (0x25 + ((x) << 4))
+#define DATA_TWAKEUP(x) (0x26 + ((x) << 4))
+#define PHY_CFG_I 0x60
+#define PHY_CFG_PLL_I 0x63
+#define PHY_CFG_PLL_II 0x64
+#define PHY_CFG_PLL_III 0x65
+#define PHY_CFG_PLL_IV 0x66
+#define PHY_CFG_PLL_V 0x67
+#define DPI_COLOR_CODING 0x10 /* DPI color coding */
+#define DPI_CFG_POL 0x14 /* DPI polarity configuration */
+#define VID_HSA_TIME 0x48 /* Horizontal Sync Active time */
+#define VID_HBP_TIME 0x4C /* Horizontal Back Porch time */
+#define VID_HLINE_TIME 0x50 /* Line time */
+#define VID_VSA_LINES 0x54 /* Vertical Sync Active period */
+#define VID_VBP_LINES 0x58 /* Vertical Back Porch period */
+#define VID_VFP_LINES 0x5C /* Vertical Front Porch period */
+#define VID_VACTIVE_LINES 0x60 /* Vertical resolution */
+#define VID_PKT_SIZE 0x3C /* Video packet size */
+#define VID_MODE_CFG 0x38 /* Video mode configuration */
+#define PHY_TMR_CFG 0x9C /* Data lanes timing configuration */
+#define BTA_TO_CNT 0x8C /* Response timeout definition */
+#define PHY_TMR_LPCLK_CFG 0x98 /* clock lane timing configuration */
+#define CLK_DATA_TMR_CFG 0xCC
+#define LPCLK_CTRL 0x94 /* Low-power in clock lane */
+#define PHY_TXREQUESTCLKHS BIT(0)
+#define MODE_CFG 0x34 /* Video or Command mode selection */
+#define PHY_STATUS 0xB0 /* D-PHY PPI status interface */
+
+#define PHY_STOP_WAIT_TIME 0x30
+
+/*
+ * regs relevant enum
+ */
+enum dpi_color_coding {
+ DSI_24BITS_1 = 5,
+};
+
+enum dsi_video_mode_type {
+ DSI_NON_BURST_SYNC_PULSES = 0,
+ DSI_NON_BURST_SYNC_EVENTS,
+ DSI_BURST_SYNC_PULSES_1,
+ DSI_BURST_SYNC_PULSES_2
+};
+
+enum dsi_work_mode {
+ DSI_VIDEO_MODE = 0,
+ DSI_COMMAND_MODE
+};
+
+/*
+ * Register Write/Read Helper functions
+ */
+static inline void dw_update_bits(void __iomem *addr, u32 bit_start,
+ u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(addr);
+ tmp = orig & ~(mask << bit_start);
+ tmp |= (val & mask) << bit_start;
+ writel(tmp, addr);
+}
+
+#endif /* __DW_DRM_DSI_H__ */
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
new file mode 100644
index 000000000..4cf281b7e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __KIRIN_ADE_REG_H__
+#define __KIRIN_ADE_REG_H__
+
+/*
+ * ADE Registers
+ */
+#define MASK(x) (BIT(x) - 1)
+
+#define ADE_CTRL 0x0004
+#define FRM_END_START_OFST 0
+#define FRM_END_START_MASK MASK(2)
+#define AUTO_CLK_GATE_EN_OFST 0
+#define AUTO_CLK_GATE_EN BIT(0)
+#define ADE_DISP_SRC_CFG 0x0018
+#define ADE_CTRL1 0x008C
+#define ADE_EN 0x0100
+#define ADE_DISABLE 0
+#define ADE_ENABLE 1
+/* reset and reload regs */
+#define ADE_SOFT_RST_SEL(x) (0x0078 + (x) * 0x4)
+#define ADE_RELOAD_DIS(x) (0x00AC + (x) * 0x4)
+#define RDMA_OFST 0
+#define CLIP_OFST 15
+#define SCL_OFST 21
+#define CTRAN_OFST 24
+#define OVLY_OFST 37 /* 32+5 */
+/* channel regs */
+#define RD_CH_CTRL(x) (0x1004 + (x) * 0x80)
+#define RD_CH_ADDR(x) (0x1008 + (x) * 0x80)
+#define RD_CH_SIZE(x) (0x100C + (x) * 0x80)
+#define RD_CH_STRIDE(x) (0x1010 + (x) * 0x80)
+#define RD_CH_SPACE(x) (0x1014 + (x) * 0x80)
+#define RD_CH_EN(x) (0x1020 + (x) * 0x80)
+/* overlay regs */
+#define ADE_OVLY1_TRANS_CFG 0x002C
+#define ADE_OVLY_CTL 0x0098
+#define ADE_OVLY_CH_XY0(x) (0x2004 + (x) * 4)
+#define ADE_OVLY_CH_XY1(x) (0x2024 + (x) * 4)
+#define ADE_OVLY_CH_CTL(x) (0x204C + (x) * 4)
+#define ADE_OVLY_OUTPUT_SIZE(x) (0x2070 + (x) * 8)
+#define OUTPUT_XSIZE_OFST 16
+#define ADE_OVLYX_CTL(x) (0x209C + (x) * 4)
+#define CH_OVLY_SEL_OFST(x) ((x) * 4)
+#define CH_OVLY_SEL_MASK MASK(2)
+#define CH_OVLY_SEL_VAL(x) ((x) + 1)
+#define CH_ALP_MODE_OFST 0
+#define CH_ALP_SEL_OFST 2
+#define CH_UNDER_ALP_SEL_OFST 4
+#define CH_EN_OFST 6
+#define CH_ALP_GBL_OFST 15
+#define CH_SEL_OFST 28
+/* ctran regs */
+#define ADE_CTRAN_DIS(x) (0x5004 + (x) * 0x100)
+#define CTRAN_BYPASS_ON 1
+#define CTRAN_BYPASS_OFF 0
+#define ADE_CTRAN_IMAGE_SIZE(x) (0x503C + (x) * 0x100)
+/* clip regs */
+#define ADE_CLIP_DISABLE(x) (0x6800 + (x) * 0x100)
+#define ADE_CLIP_SIZE0(x) (0x6804 + (x) * 0x100)
+#define ADE_CLIP_SIZE1(x) (0x6808 + (x) * 0x100)
+
+/*
+ * LDI Registers
+ */
+#define LDI_HRZ_CTRL0 0x7400
+#define HBP_OFST 20
+#define LDI_HRZ_CTRL1 0x7404
+#define LDI_VRT_CTRL0 0x7408
+#define VBP_OFST 20
+#define LDI_VRT_CTRL1 0x740C
+#define LDI_PLR_CTRL 0x7410
+#define FLAG_NVSYNC BIT(0)
+#define FLAG_NHSYNC BIT(1)
+#define FLAG_NPIXCLK BIT(2)
+#define FLAG_NDE BIT(3)
+#define LDI_DSP_SIZE 0x7414
+#define VSIZE_OFST 20
+#define LDI_INT_EN 0x741C
+#define FRAME_END_INT_EN_OFST 1
+#define LDI_CTRL 0x7420
+#define BPP_OFST 3
+#define DATA_GATE_EN BIT(2)
+#define LDI_EN BIT(0)
+#define LDI_MSK_INT 0x7428
+#define LDI_INT_CLR 0x742C
+#define LDI_WORK_MODE 0x7430
+#define LDI_HDMI_DSI_GT 0x7434
+
+/*
+ * ADE media bus service regs
+ */
+#define ADE0_QOSGENERATOR_MODE 0x010C
+#define QOSGENERATOR_MODE_MASK MASK(2)
+#define ADE0_QOSGENERATOR_EXTCONTROL 0x0118
+#define SOCKET_QOS_EN BIT(0)
+#define ADE1_QOSGENERATOR_MODE 0x020C
+#define ADE1_QOSGENERATOR_EXTCONTROL 0x0218
+
+/*
+ * ADE regs relevant enums
+ */
+enum frame_end_start {
+ /* regs take effect in every vsync */
+ REG_EFFECTIVE_IN_VSYNC = 0,
+ /* regs take effect in fist ade en and every frame end */
+ REG_EFFECTIVE_IN_ADEEN_FRMEND,
+ /* regs take effect in ade en immediately */
+ REG_EFFECTIVE_IN_ADEEN,
+ /* regs take effect in first vsync and every frame end */
+ REG_EFFECTIVE_IN_VSYNC_FRMEND
+};
+
+enum ade_fb_format {
+ ADE_RGB_565 = 0,
+ ADE_BGR_565,
+ ADE_XRGB_8888,
+ ADE_XBGR_8888,
+ ADE_ARGB_8888,
+ ADE_ABGR_8888,
+ ADE_RGBA_8888,
+ ADE_BGRA_8888,
+ ADE_RGB_888,
+ ADE_BGR_888 = 9,
+ ADE_FORMAT_UNSUPPORT = 800
+};
+
+enum ade_channel {
+ ADE_CH1 = 0, /* channel 1 for primary plane */
+ ADE_CH_NUM
+};
+
+enum ade_scale {
+ ADE_SCL1 = 0,
+ ADE_SCL2,
+ ADE_SCL3,
+ ADE_SCL_NUM
+};
+
+enum ade_ctran {
+ ADE_CTRAN1 = 0,
+ ADE_CTRAN2,
+ ADE_CTRAN3,
+ ADE_CTRAN4,
+ ADE_CTRAN5,
+ ADE_CTRAN6,
+ ADE_CTRAN_NUM
+};
+
+enum ade_overlay {
+ ADE_OVLY1 = 0,
+ ADE_OVLY2,
+ ADE_OVLY3,
+ ADE_OVLY_NUM
+};
+
+enum ade_alpha_mode {
+ ADE_ALP_GLOBAL = 0,
+ ADE_ALP_PIXEL,
+ ADE_ALP_PIXEL_AND_GLB
+};
+
+enum ade_alpha_blending_mode {
+ ADE_ALP_MUL_COEFF_0 = 0, /* alpha */
+ ADE_ALP_MUL_COEFF_1, /* 1-alpha */
+ ADE_ALP_MUL_COEFF_2, /* 0 */
+ ADE_ALP_MUL_COEFF_3 /* 1 */
+};
+
+/*
+ * LDI regs relevant enums
+ */
+enum dsi_pclk_en {
+ DSI_PCLK_ON = 0,
+ DSI_PCLK_OFF
+};
+
+enum ldi_output_format {
+ LDI_OUT_RGB_565 = 0,
+ LDI_OUT_RGB_666,
+ LDI_OUT_RGB_888
+};
+
+enum ldi_work_mode {
+ TEST_MODE = 0,
+ NORMAL_MODE
+};
+
+enum ldi_input_source {
+ DISP_SRC_NONE = 0,
+ DISP_SRC_OVLY2,
+ DISP_SRC_DISP,
+ DISP_SRC_ROT,
+ DISP_SRC_SCL2
+};
+
+/*
+ * ADE media bus service relevant enums
+ */
+enum qos_generator_mode {
+ FIXED_MODE = 0,
+ LIMITER_MODE,
+ BYPASS_MODE,
+ REGULATOR_MODE
+};
+
+/*
+ * Register Write/Read Helper functions
+ */
+static inline void ade_update_bits(void __iomem *addr, u32 bit_start,
+ u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(addr);
+ tmp = orig & ~(mask << bit_start);
+ tmp |= (val & mask) << bit_start;
+ writel(tmp, addr);
+}
+
+#endif
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
new file mode 100644
index 000000000..fba6372d0
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -0,0 +1,1057 @@
+/*
+ * Hisilicon Hi6220 SoC ADE(Advanced Display Engine)'s crtc&plane driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ * Xinliang Liu <z.liuxinliang@hisilicon.com>
+ * Xinliang Liu <xinliang.liu@linaro.org>
+ * Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <video/display_timing.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "kirin_drm_drv.h"
+#include "kirin_ade_reg.h"
+
+#define PRIMARY_CH ADE_CH1 /* primary plane */
+#define OUT_OVLY ADE_OVLY2 /* output overlay compositor */
+#define ADE_DEBUG 1
+
+#define to_ade_crtc(crtc) \
+ container_of(crtc, struct ade_crtc, base)
+
+#define to_ade_plane(plane) \
+ container_of(plane, struct ade_plane, base)
+
+struct ade_hw_ctx {
+ void __iomem *base;
+ struct regmap *noc_regmap;
+ struct clk *ade_core_clk;
+ struct clk *media_noc_clk;
+ struct clk *ade_pix_clk;
+ struct reset_control *reset;
+ bool power_on;
+ int irq;
+};
+
+struct ade_crtc {
+ struct drm_crtc base;
+ struct ade_hw_ctx *ctx;
+ bool enable;
+ u32 out_format;
+};
+
+struct ade_plane {
+ struct drm_plane base;
+ void *ctx;
+ u8 ch; /* channel */
+};
+
+struct ade_data {
+ struct ade_crtc acrtc;
+ struct ade_plane aplane[ADE_CH_NUM];
+ struct ade_hw_ctx ctx;
+};
+
+/* ade-format info: */
+struct ade_format {
+ u32 pixel_format;
+ enum ade_fb_format ade_format;
+};
+
+static const struct ade_format ade_formats[] = {
+ /* 16bpp RGB: */
+ { DRM_FORMAT_RGB565, ADE_RGB_565 },
+ { DRM_FORMAT_BGR565, ADE_BGR_565 },
+ /* 24bpp RGB: */
+ { DRM_FORMAT_RGB888, ADE_RGB_888 },
+ { DRM_FORMAT_BGR888, ADE_BGR_888 },
+ /* 32bpp [A]RGB: */
+ { DRM_FORMAT_XRGB8888, ADE_XRGB_8888 },
+ { DRM_FORMAT_XBGR8888, ADE_XBGR_8888 },
+ { DRM_FORMAT_RGBA8888, ADE_RGBA_8888 },
+ { DRM_FORMAT_BGRA8888, ADE_BGRA_8888 },
+ { DRM_FORMAT_ARGB8888, ADE_ARGB_8888 },
+ { DRM_FORMAT_ABGR8888, ADE_ABGR_8888 },
+};
+
+static const u32 channel_formats1[] = {
+ /* channel 1,2,3,4 */
+ DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888
+};
+
+u32 ade_get_channel_formats(u8 ch, const u32 **formats)
+{
+ switch (ch) {
+ case ADE_CH1:
+ *formats = channel_formats1;
+ return ARRAY_SIZE(channel_formats1);
+ default:
+ DRM_ERROR("no this channel %d\n", ch);
+ *formats = NULL;
+ return 0;
+ }
+}
+
+/* convert from fourcc format to ade format */
+static u32 ade_get_format(u32 pixel_format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ade_formats); i++)
+ if (ade_formats[i].pixel_format == pixel_format)
+ return ade_formats[i].ade_format;
+
+ /* not found */
+ DRM_ERROR("Not found pixel format!!fourcc_format= %d\n",
+ pixel_format);
+ return ADE_FORMAT_UNSUPPORT;
+}
+
+static void ade_update_reload_bit(void __iomem *base, u32 bit_num, u32 val)
+{
+ u32 bit_ofst, reg_num;
+
+ bit_ofst = bit_num % 32;
+ reg_num = bit_num / 32;
+
+ ade_update_bits(base + ADE_RELOAD_DIS(reg_num), bit_ofst,
+ MASK(1), !!val);
+}
+
+static u32 ade_read_reload_bit(void __iomem *base, u32 bit_num)
+{
+ u32 tmp, bit_ofst, reg_num;
+
+ bit_ofst = bit_num % 32;
+ reg_num = bit_num / 32;
+
+ tmp = readl(base + ADE_RELOAD_DIS(reg_num));
+ return !!(BIT(bit_ofst) & tmp);
+}
+
+static void ade_init(struct ade_hw_ctx *ctx)
+{
+ void __iomem *base = ctx->base;
+
+ /* enable clk gate */
+ ade_update_bits(base + ADE_CTRL1, AUTO_CLK_GATE_EN_OFST,
+ AUTO_CLK_GATE_EN, ADE_ENABLE);
+ /* clear overlay */
+ writel(0, base + ADE_OVLY1_TRANS_CFG);
+ writel(0, base + ADE_OVLY_CTL);
+ writel(0, base + ADE_OVLYX_CTL(OUT_OVLY));
+ /* clear reset and reload regs */
+ writel(MASK(32), base + ADE_SOFT_RST_SEL(0));
+ writel(MASK(32), base + ADE_SOFT_RST_SEL(1));
+ writel(MASK(32), base + ADE_RELOAD_DIS(0));
+ writel(MASK(32), base + ADE_RELOAD_DIS(1));
+ /*
+ * for video mode, all the ade registers should
+ * become effective at frame end.
+ */
+ ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
+ FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
+}
+
+static void ade_set_pix_clk(struct ade_hw_ctx *ctx,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ u32 clk_Hz = mode->clock * 1000;
+ int ret;
+
+ /*
+ * Success should be guaranteed in mode_valid call back,
+ * so failure shouldn't happen here
+ */
+ ret = clk_set_rate(ctx->ade_pix_clk, clk_Hz);
+ if (ret)
+ DRM_ERROR("failed to set pixel clk %dHz (%d)\n", clk_Hz, ret);
+ adj_mode->clock = clk_get_rate(ctx->ade_pix_clk) / 1000;
+}
+
+static void ade_ldi_set_mode(struct ade_crtc *acrtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 width = mode->hdisplay;
+ u32 height = mode->vdisplay;
+ u32 hfp, hbp, hsw, vfp, vbp, vsw;
+ u32 plr_flags;
+
+ plr_flags = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? FLAG_NVSYNC : 0;
+ plr_flags |= (mode->flags & DRM_MODE_FLAG_NHSYNC) ? FLAG_NHSYNC : 0;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hsw = mode->hsync_end - mode->hsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ if (vsw > 15) {
+ DRM_DEBUG_DRIVER("vsw exceeded 15\n");
+ vsw = 15;
+ }
+
+ writel((hbp << HBP_OFST) | hfp, base + LDI_HRZ_CTRL0);
+ /* the configured value is actual value - 1 */
+ writel(hsw - 1, base + LDI_HRZ_CTRL1);
+ writel((vbp << VBP_OFST) | vfp, base + LDI_VRT_CTRL0);
+ /* the configured value is actual value - 1 */
+ writel(vsw - 1, base + LDI_VRT_CTRL1);
+ /* the configured value is actual value - 1 */
+ writel(((height - 1) << VSIZE_OFST) | (width - 1),
+ base + LDI_DSP_SIZE);
+ writel(plr_flags, base + LDI_PLR_CTRL);
+
+ /* set overlay compositor output size */
+ writel(((width - 1) << OUTPUT_XSIZE_OFST) | (height - 1),
+ base + ADE_OVLY_OUTPUT_SIZE(OUT_OVLY));
+
+ /* ctran6 setting */
+ writel(CTRAN_BYPASS_ON, base + ADE_CTRAN_DIS(ADE_CTRAN6));
+ /* the configured value is actual value - 1 */
+ writel(width * height - 1, base + ADE_CTRAN_IMAGE_SIZE(ADE_CTRAN6));
+ ade_update_reload_bit(base, CTRAN_OFST + ADE_CTRAN6, 0);
+
+ ade_set_pix_clk(ctx, mode, adj_mode);
+
+ DRM_DEBUG_DRIVER("set mode: %dx%d\n", width, height);
+}
+
+static int ade_power_up(struct ade_hw_ctx *ctx)
+{
+ int ret;
+
+ ret = clk_prepare_enable(ctx->media_noc_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable media_noc_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ctx->reset);
+ if (ret) {
+ DRM_ERROR("failed to deassert reset\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(ctx->ade_core_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable ade_core_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ade_init(ctx);
+ ctx->power_on = true;
+ return 0;
+}
+
+static void ade_power_down(struct ade_hw_ctx *ctx)
+{
+ void __iomem *base = ctx->base;
+
+ writel(ADE_DISABLE, base + LDI_CTRL);
+ /* dsi pixel off */
+ writel(DSI_PCLK_OFF, base + LDI_HDMI_DSI_GT);
+
+ clk_disable_unprepare(ctx->ade_core_clk);
+ reset_control_assert(ctx->reset);
+ clk_disable_unprepare(ctx->media_noc_clk);
+ ctx->power_on = false;
+}
+
+static void ade_set_medianoc_qos(struct ade_crtc *acrtc)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct regmap *map = ctx->noc_regmap;
+
+ regmap_update_bits(map, ADE0_QOSGENERATOR_MODE,
+ QOSGENERATOR_MODE_MASK, BYPASS_MODE);
+ regmap_update_bits(map, ADE0_QOSGENERATOR_EXTCONTROL,
+ SOCKET_QOS_EN, SOCKET_QOS_EN);
+
+ regmap_update_bits(map, ADE1_QOSGENERATOR_MODE,
+ QOSGENERATOR_MODE_MASK, BYPASS_MODE);
+ regmap_update_bits(map, ADE1_QOSGENERATOR_EXTCONTROL,
+ SOCKET_QOS_EN, SOCKET_QOS_EN);
+}
+
+static int ade_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+ struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+
+ if (!ctx->power_on)
+ (void)ade_power_up(ctx);
+
+ ade_update_bits(base + LDI_INT_EN, FRAME_END_INT_EN_OFST,
+ MASK(1), 1);
+
+ return 0;
+}
+
+static void ade_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+ struct ade_crtc *acrtc = to_ade_crtc(priv->crtc[pipe]);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+
+ if (!ctx->power_on) {
+ DRM_ERROR("power is down! vblank disable fail\n");
+ return;
+ }
+
+ ade_update_bits(base + LDI_INT_EN, FRAME_END_INT_EN_OFST,
+ MASK(1), 0);
+}
+
+static irqreturn_t ade_irq_handler(int irq, void *data)
+{
+ struct ade_crtc *acrtc = data;
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct drm_crtc *crtc = &acrtc->base;
+ void __iomem *base = ctx->base;
+ u32 status;
+
+ status = readl(base + LDI_MSK_INT);
+ DRM_DEBUG_VBL("LDI IRQ: status=0x%X\n", status);
+
+ /* vblank irq */
+ if (status & BIT(FRAME_END_INT_EN_OFST)) {
+ ade_update_bits(base + LDI_INT_CLR, FRAME_END_INT_EN_OFST,
+ MASK(1), 1);
+ drm_crtc_handle_vblank(crtc);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ade_display_enable(struct ade_crtc *acrtc)
+{
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+ u32 out_fmt = acrtc->out_format;
+
+ /* enable output overlay compositor */
+ writel(ADE_ENABLE, base + ADE_OVLYX_CTL(OUT_OVLY));
+ ade_update_reload_bit(base, OVLY_OFST + OUT_OVLY, 0);
+
+ /* display source setting */
+ writel(DISP_SRC_OVLY2, base + ADE_DISP_SRC_CFG);
+
+ /* enable ade */
+ writel(ADE_ENABLE, base + ADE_EN);
+ /* enable ldi */
+ writel(NORMAL_MODE, base + LDI_WORK_MODE);
+ writel((out_fmt << BPP_OFST) | DATA_GATE_EN | LDI_EN,
+ base + LDI_CTRL);
+ /* dsi pixel on */
+ writel(DSI_PCLK_ON, base + LDI_HDMI_DSI_GT);
+}
+
+#if ADE_DEBUG
+static void ade_rdma_dump_regs(void __iomem *base, u32 ch)
+{
+ u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
+ u32 val;
+
+ reg_ctrl = RD_CH_CTRL(ch);
+ reg_addr = RD_CH_ADDR(ch);
+ reg_size = RD_CH_SIZE(ch);
+ reg_stride = RD_CH_STRIDE(ch);
+ reg_space = RD_CH_SPACE(ch);
+ reg_en = RD_CH_EN(ch);
+
+ val = ade_read_reload_bit(base, RDMA_OFST + ch);
+ DRM_DEBUG_DRIVER("[rdma%d]: reload(%d)\n", ch + 1, val);
+ val = readl(base + reg_ctrl);
+ DRM_DEBUG_DRIVER("[rdma%d]: reg_ctrl(0x%08x)\n", ch + 1, val);
+ val = readl(base + reg_addr);
+ DRM_DEBUG_DRIVER("[rdma%d]: reg_addr(0x%08x)\n", ch + 1, val);
+ val = readl(base + reg_size);
+ DRM_DEBUG_DRIVER("[rdma%d]: reg_size(0x%08x)\n", ch + 1, val);
+ val = readl(base + reg_stride);
+ DRM_DEBUG_DRIVER("[rdma%d]: reg_stride(0x%08x)\n", ch + 1, val);
+ val = readl(base + reg_space);
+ DRM_DEBUG_DRIVER("[rdma%d]: reg_space(0x%08x)\n", ch + 1, val);
+ val = readl(base + reg_en);
+ DRM_DEBUG_DRIVER("[rdma%d]: reg_en(0x%08x)\n", ch + 1, val);
+}
+
+static void ade_clip_dump_regs(void __iomem *base, u32 ch)
+{
+ u32 val;
+
+ val = ade_read_reload_bit(base, CLIP_OFST + ch);
+ DRM_DEBUG_DRIVER("[clip%d]: reload(%d)\n", ch + 1, val);
+ val = readl(base + ADE_CLIP_DISABLE(ch));
+ DRM_DEBUG_DRIVER("[clip%d]: reg_clip_disable(0x%08x)\n", ch + 1, val);
+ val = readl(base + ADE_CLIP_SIZE0(ch));
+ DRM_DEBUG_DRIVER("[clip%d]: reg_clip_size0(0x%08x)\n", ch + 1, val);
+ val = readl(base + ADE_CLIP_SIZE1(ch));
+ DRM_DEBUG_DRIVER("[clip%d]: reg_clip_size1(0x%08x)\n", ch + 1, val);
+}
+
+static void ade_compositor_routing_dump_regs(void __iomem *base, u32 ch)
+{
+ u8 ovly_ch = 0; /* TODO: Only primary plane now */
+ u32 val;
+
+ val = readl(base + ADE_OVLY_CH_XY0(ovly_ch));
+ DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_xy0(0x%08x)\n", ovly_ch, val);
+ val = readl(base + ADE_OVLY_CH_XY1(ovly_ch));
+ DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_xy1(0x%08x)\n", ovly_ch, val);
+ val = readl(base + ADE_OVLY_CH_CTL(ovly_ch));
+ DRM_DEBUG_DRIVER("[overlay ch%d]: reg_ch_ctl(0x%08x)\n", ovly_ch, val);
+}
+
+static void ade_dump_overlay_compositor_regs(void __iomem *base, u32 comp)
+{
+ u32 val;
+
+ val = ade_read_reload_bit(base, OVLY_OFST + comp);
+ DRM_DEBUG_DRIVER("[overlay%d]: reload(%d)\n", comp + 1, val);
+ writel(ADE_ENABLE, base + ADE_OVLYX_CTL(comp));
+ DRM_DEBUG_DRIVER("[overlay%d]: reg_ctl(0x%08x)\n", comp + 1, val);
+ val = readl(base + ADE_OVLY_CTL);
+ DRM_DEBUG_DRIVER("ovly_ctl(0x%08x)\n", val);
+}
+
+static void ade_dump_regs(void __iomem *base)
+{
+ u32 i;
+
+ /* dump channel regs */
+ for (i = 0; i < ADE_CH_NUM; i++) {
+ /* dump rdma regs */
+ ade_rdma_dump_regs(base, i);
+
+ /* dump clip regs */
+ ade_clip_dump_regs(base, i);
+
+ /* dump compositor routing regs */
+ ade_compositor_routing_dump_regs(base, i);
+ }
+
+ /* dump overlay compositor regs */
+ ade_dump_overlay_compositor_regs(base, OUT_OVLY);
+}
+#else
+static void ade_dump_regs(void __iomem *base) { }
+#endif
+
+static void ade_crtc_enable(struct drm_crtc *crtc)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ int ret;
+
+ if (acrtc->enable)
+ return;
+
+ if (!ctx->power_on) {
+ ret = ade_power_up(ctx);
+ if (ret)
+ return;
+ }
+
+ ade_set_medianoc_qos(acrtc);
+ ade_display_enable(acrtc);
+ ade_dump_regs(ctx->base);
+ acrtc->enable = true;
+}
+
+static void ade_crtc_disable(struct drm_crtc *crtc)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+
+ if (!acrtc->enable)
+ return;
+
+ ade_power_down(ctx);
+ acrtc->enable = false;
+}
+
+static int ade_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ /* do nothing */
+ return 0;
+}
+
+static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ struct drm_display_mode *mode = &crtc->state->mode;
+ struct drm_display_mode *adj_mode = &crtc->state->adjusted_mode;
+
+ if (!ctx->power_on)
+ (void)ade_power_up(ctx);
+ ade_ldi_set_mode(acrtc, mode, adj_mode);
+}
+
+static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+
+ if (!ctx->power_on)
+ (void)ade_power_up(ctx);
+}
+
+static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+
+{
+ struct ade_crtc *acrtc = to_ade_crtc(crtc);
+ struct ade_hw_ctx *ctx = acrtc->ctx;
+ void __iomem *base = ctx->base;
+
+ /* only crtc is enabled regs take effect */
+ if (acrtc->enable) {
+ ade_dump_regs(base);
+ /* flush ade registers */
+ writel(ADE_ENABLE, base + ADE_EN);
+ }
+}
+
+static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
+ .enable = ade_crtc_enable,
+ .disable = ade_crtc_disable,
+ .atomic_check = ade_crtc_atomic_check,
+ .mode_set_nofb = ade_crtc_mode_set_nofb,
+ .atomic_begin = ade_crtc_atomic_begin,
+ .atomic_flush = ade_crtc_atomic_flush,
+};
+
+static const struct drm_crtc_funcs ade_crtc_funcs = {
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_property = drm_atomic_helper_crtc_set_property,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int ade_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *plane)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+ struct device_node *port;
+ int ret;
+
+ /* set crtc port so that
+ * drm_of_find_possible_crtcs call works
+ */
+ port = of_get_child_by_name(dev->dev->of_node, "port");
+ if (!port) {
+ DRM_ERROR("no port node found in %s\n",
+ dev->dev->of_node->full_name);
+ return -EINVAL;
+ }
+ of_node_put(port);
+ crtc->port = port;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &ade_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("failed to init crtc.\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &ade_crtc_helper_funcs);
+ priv->crtc[drm_crtc_index(crtc)] = crtc;
+
+ return 0;
+}
+
+static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
+ u32 ch, u32 y, u32 in_h, u32 fmt)
+{
+ struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
+ u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
+ u32 stride = fb->pitches[0];
+ u32 addr = (u32)obj->paddr + y * stride;
+
+ DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
+ ch + 1, y, in_h, stride, (u32)obj->paddr);
+ DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
+ addr, fb->width, fb->height, fmt,
+ drm_get_format_name(fb->pixel_format));
+
+ /* get reg offset */
+ reg_ctrl = RD_CH_CTRL(ch);
+ reg_addr = RD_CH_ADDR(ch);
+ reg_size = RD_CH_SIZE(ch);
+ reg_stride = RD_CH_STRIDE(ch);
+ reg_space = RD_CH_SPACE(ch);
+ reg_en = RD_CH_EN(ch);
+
+ /*
+ * TODO: set rotation
+ */
+ writel((fmt << 16) & 0x1f0000, base + reg_ctrl);
+ writel(addr, base + reg_addr);
+ writel((in_h << 16) | stride, base + reg_size);
+ writel(stride, base + reg_stride);
+ writel(in_h * stride, base + reg_space);
+ writel(ADE_ENABLE, base + reg_en);
+ ade_update_reload_bit(base, RDMA_OFST + ch, 0);
+}
+
+static void ade_rdma_disable(void __iomem *base, u32 ch)
+{
+ u32 reg_en;
+
+ /* get reg offset */
+ reg_en = RD_CH_EN(ch);
+ writel(0, base + reg_en);
+ ade_update_reload_bit(base, RDMA_OFST + ch, 1);
+}
+
+static void ade_clip_set(void __iomem *base, u32 ch, u32 fb_w, u32 x,
+ u32 in_w, u32 in_h)
+{
+ u32 disable_val;
+ u32 clip_left;
+ u32 clip_right;
+
+ /*
+ * clip width, no need to clip height
+ */
+ if (fb_w == in_w) { /* bypass */
+ disable_val = 1;
+ clip_left = 0;
+ clip_right = 0;
+ } else {
+ disable_val = 0;
+ clip_left = x;
+ clip_right = fb_w - (x + in_w) - 1;
+ }
+
+ DRM_DEBUG_DRIVER("clip%d: clip_left=%d, clip_right=%d\n",
+ ch + 1, clip_left, clip_right);
+
+ writel(disable_val, base + ADE_CLIP_DISABLE(ch));
+ writel((fb_w - 1) << 16 | (in_h - 1), base + ADE_CLIP_SIZE0(ch));
+ writel(clip_left << 16 | clip_right, base + ADE_CLIP_SIZE1(ch));
+ ade_update_reload_bit(base, CLIP_OFST + ch, 0);
+}
+
+static void ade_clip_disable(void __iomem *base, u32 ch)
+{
+ writel(1, base + ADE_CLIP_DISABLE(ch));
+ ade_update_reload_bit(base, CLIP_OFST + ch, 1);
+}
+
+static bool has_Alpha_channel(int format)
+{
+ switch (format) {
+ case ADE_ARGB_8888:
+ case ADE_ABGR_8888:
+ case ADE_RGBA_8888:
+ case ADE_BGRA_8888:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ade_get_blending_params(u32 fmt, u8 glb_alpha, u8 *alp_mode,
+ u8 *alp_sel, u8 *under_alp_sel)
+{
+ bool has_alpha = has_Alpha_channel(fmt);
+
+ /*
+ * get alp_mode
+ */
+ if (has_alpha && glb_alpha < 255)
+ *alp_mode = ADE_ALP_PIXEL_AND_GLB;
+ else if (has_alpha)
+ *alp_mode = ADE_ALP_PIXEL;
+ else
+ *alp_mode = ADE_ALP_GLOBAL;
+
+ /*
+ * get alp sel
+ */
+ *alp_sel = ADE_ALP_MUL_COEFF_3; /* 1 */
+ *under_alp_sel = ADE_ALP_MUL_COEFF_2; /* 0 */
+}
+
+static void ade_compositor_routing_set(void __iomem *base, u8 ch,
+ u32 x0, u32 y0,
+ u32 in_w, u32 in_h, u32 fmt)
+{
+ u8 ovly_ch = 0; /* TODO: This is the zpos, only one plane now */
+ u8 glb_alpha = 255;
+ u32 x1 = x0 + in_w - 1;
+ u32 y1 = y0 + in_h - 1;
+ u32 val;
+ u8 alp_sel;
+ u8 under_alp_sel;
+ u8 alp_mode;
+
+ ade_get_blending_params(fmt, glb_alpha, &alp_mode, &alp_sel,
+ &under_alp_sel);
+
+ /* overlay routing setting
+ */
+ writel(x0 << 16 | y0, base + ADE_OVLY_CH_XY0(ovly_ch));
+ writel(x1 << 16 | y1, base + ADE_OVLY_CH_XY1(ovly_ch));
+ val = (ch + 1) << CH_SEL_OFST | BIT(CH_EN_OFST) |
+ alp_sel << CH_ALP_SEL_OFST |
+ under_alp_sel << CH_UNDER_ALP_SEL_OFST |
+ glb_alpha << CH_ALP_GBL_OFST |
+ alp_mode << CH_ALP_MODE_OFST;
+ writel(val, base + ADE_OVLY_CH_CTL(ovly_ch));
+ /* connect this plane/channel to overlay2 compositor */
+ ade_update_bits(base + ADE_OVLY_CTL, CH_OVLY_SEL_OFST(ovly_ch),
+ CH_OVLY_SEL_MASK, CH_OVLY_SEL_VAL(OUT_OVLY));
+}
+
+static void ade_compositor_routing_disable(void __iomem *base, u32 ch)
+{
+ u8 ovly_ch = 0; /* TODO: Only primary plane now */
+
+ /* disable this plane/channel */
+ ade_update_bits(base + ADE_OVLY_CH_CTL(ovly_ch), CH_EN_OFST,
+ MASK(1), 0);
+ /* dis-connect this plane/channel of overlay2 compositor */
+ ade_update_bits(base + ADE_OVLY_CTL, CH_OVLY_SEL_OFST(ovly_ch),
+ CH_OVLY_SEL_MASK, 0);
+}
+
+/*
+ * Typicaly, a channel looks like: DMA-->clip-->scale-->ctrans-->compositor
+ */
+static void ade_update_channel(struct ade_plane *aplane,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, u32 src_x,
+ u32 src_y, u32 src_w, u32 src_h)
+{
+ struct ade_hw_ctx *ctx = aplane->ctx;
+ void __iomem *base = ctx->base;
+ u32 fmt = ade_get_format(fb->pixel_format);
+ u32 ch = aplane->ch;
+ u32 in_w;
+ u32 in_h;
+
+ DRM_DEBUG_DRIVER("channel%d: src:(%d, %d)-%dx%d, crtc:(%d, %d)-%dx%d",
+ ch + 1, src_x, src_y, src_w, src_h,
+ crtc_x, crtc_y, crtc_w, crtc_h);
+
+ /* 1) DMA setting */
+ in_w = src_w;
+ in_h = src_h;
+ ade_rdma_set(base, fb, ch, src_y, in_h, fmt);
+
+ /* 2) clip setting */
+ ade_clip_set(base, ch, fb->width, src_x, in_w, in_h);
+
+ /* 3) TODO: scale setting for overlay planes */
+
+ /* 4) TODO: ctran/csc setting for overlay planes */
+
+ /* 5) compositor routing setting */
+ ade_compositor_routing_set(base, ch, crtc_x, crtc_y, in_w, in_h, fmt);
+}
+
+static void ade_disable_channel(struct ade_plane *aplane)
+{
+ struct ade_hw_ctx *ctx = aplane->ctx;
+ void __iomem *base = ctx->base;
+ u32 ch = aplane->ch;
+
+ DRM_DEBUG_DRIVER("disable channel%d\n", ch + 1);
+
+ /* disable read DMA */
+ ade_rdma_disable(base, ch);
+
+ /* disable clip */
+ ade_clip_disable(base, ch);
+
+ /* disable compositor routing */
+ ade_compositor_routing_disable(base, ch);
+}
+
+static int ade_plane_prepare_fb(struct drm_plane *plane,
+ const struct drm_plane_state *new_state)
+{
+ /* do nothing */
+ return 0;
+}
+
+static void ade_plane_cleanup_fb(struct drm_plane *plane,
+ const struct drm_plane_state *old_state)
+{
+ /* do nothing */
+}
+
+static int ade_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+ u32 src_x = state->src_x >> 16;
+ u32 src_y = state->src_y >> 16;
+ u32 src_w = state->src_w >> 16;
+ u32 src_h = state->src_h >> 16;
+ int crtc_x = state->crtc_x;
+ int crtc_y = state->crtc_y;
+ u32 crtc_w = state->crtc_w;
+ u32 crtc_h = state->crtc_h;
+ u32 fmt;
+
+ if (!crtc || !fb)
+ return 0;
+
+ fmt = ade_get_format(fb->pixel_format);
+ if (fmt == ADE_FORMAT_UNSUPPORT)
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (src_w != crtc_w || src_h != crtc_h) {
+ DRM_ERROR("Scale not support!!!\n");
+ return -EINVAL;
+ }
+
+ if (src_x + src_w > fb->width ||
+ src_y + src_h > fb->height)
+ return -EINVAL;
+
+ if (crtc_x < 0 || crtc_y < 0)
+ return -EINVAL;
+
+ if (crtc_x + crtc_w > crtc_state->adjusted_mode.hdisplay ||
+ crtc_y + crtc_h > crtc_state->adjusted_mode.vdisplay)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ade_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ struct ade_plane *aplane = to_ade_plane(plane);
+
+ ade_update_channel(aplane, state->fb, state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h,
+ state->src_x >> 16, state->src_y >> 16,
+ state->src_w >> 16, state->src_h >> 16);
+}
+
+static void ade_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct ade_plane *aplane = to_ade_plane(plane);
+
+ ade_disable_channel(aplane);
+}
+
+static const struct drm_plane_helper_funcs ade_plane_helper_funcs = {
+ .prepare_fb = ade_plane_prepare_fb,
+ .cleanup_fb = ade_plane_cleanup_fb,
+ .atomic_check = ade_plane_atomic_check,
+ .atomic_update = ade_plane_atomic_update,
+ .atomic_disable = ade_plane_atomic_disable,
+};
+
+static struct drm_plane_funcs ade_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static int ade_plane_init(struct drm_device *dev, struct ade_plane *aplane,
+ enum drm_plane_type type)
+{
+ const u32 *fmts;
+ u32 fmts_cnt;
+ int ret = 0;
+
+ /* get properties */
+ fmts_cnt = ade_get_channel_formats(aplane->ch, &fmts);
+ if (ret)
+ return ret;
+
+ ret = drm_universal_plane_init(dev, &aplane->base, 1, &ade_plane_funcs,
+ fmts, fmts_cnt, type, NULL);
+ if (ret) {
+ DRM_ERROR("fail to init plane, ch=%d\n", aplane->ch);
+ return ret;
+ }
+
+ drm_plane_helper_add(&aplane->base, &ade_plane_helper_funcs);
+
+ return 0;
+}
+
+static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ctx->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ctx->base)) {
+ DRM_ERROR("failed to remap ade io base\n");
+ return PTR_ERR(ctx->base);
+ }
+
+ ctx->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(ctx->reset))
+ return PTR_ERR(ctx->reset);
+
+ ctx->noc_regmap =
+ syscon_regmap_lookup_by_phandle(np, "hisilicon,noc-syscon");
+ if (IS_ERR(ctx->noc_regmap)) {
+ DRM_ERROR("failed to get noc regmap\n");
+ return PTR_ERR(ctx->noc_regmap);
+ }
+
+ ctx->irq = platform_get_irq(pdev, 0);
+ if (ctx->irq < 0) {
+ DRM_ERROR("failed to get irq\n");
+ return -ENODEV;
+ }
+
+ ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
+ if (!ctx->ade_core_clk) {
+ DRM_ERROR("failed to parse clk ADE_CORE\n");
+ return -ENODEV;
+ }
+
+ ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
+ if (!ctx->media_noc_clk) {
+ DRM_ERROR("failed to parse clk CODEC_JPEG\n");
+ return -ENODEV;
+ }
+
+ ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
+ if (!ctx->ade_pix_clk) {
+ DRM_ERROR("failed to parse clk ADE_PIX\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int ade_drm_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct ade_data *ade;
+ struct ade_hw_ctx *ctx;
+ struct ade_crtc *acrtc;
+ struct ade_plane *aplane;
+ enum drm_plane_type type;
+ int ret;
+ int i;
+
+ ade = devm_kzalloc(dev->dev, sizeof(*ade), GFP_KERNEL);
+ if (!ade) {
+ DRM_ERROR("failed to alloc ade_data\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, ade);
+
+ ctx = &ade->ctx;
+ acrtc = &ade->acrtc;
+ acrtc->ctx = ctx;
+ acrtc->out_format = LDI_OUT_RGB_888;
+
+ ret = ade_dts_parse(pdev, ctx);
+ if (ret)
+ return ret;
+
+ /*
+ * plane init
+ * TODO: Now only support primary plane, overlay planes
+ * need to do.
+ */
+ for (i = 0; i < ADE_CH_NUM; i++) {
+ aplane = &ade->aplane[i];
+ aplane->ch = i;
+ aplane->ctx = ctx;
+ type = i == PRIMARY_CH ? DRM_PLANE_TYPE_PRIMARY :
+ DRM_PLANE_TYPE_OVERLAY;
+
+ ret = ade_plane_init(dev, aplane, type);
+ if (ret)
+ return ret;
+ }
+
+ /* crtc init */
+ ret = ade_crtc_init(dev, &acrtc->base, &ade->aplane[PRIMARY_CH].base);
+ if (ret)
+ return ret;
+
+ /* vblank irq init */
+ ret = devm_request_irq(dev->dev, ctx->irq, ade_irq_handler,
+ IRQF_SHARED, dev->driver->name, acrtc);
+ if (ret)
+ return ret;
+ dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
+ dev->driver->enable_vblank = ade_enable_vblank;
+ dev->driver->disable_vblank = ade_disable_vblank;
+
+ return 0;
+}
+
+static void ade_drm_cleanup(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct ade_data *ade = platform_get_drvdata(pdev);
+ struct drm_crtc *crtc = &ade->acrtc.base;
+
+ drm_crtc_cleanup(crtc);
+}
+
+const struct kirin_dc_ops ade_dc_ops = {
+ .init = ade_drm_init,
+ .cleanup = ade_drm_cleanup
+};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
new file mode 100644
index 000000000..3f94785fb
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -0,0 +1,343 @@
+/*
+ * Hisilicon Kirin SoCs drm master driver
+ *
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * Author:
+ * Xinliang Liu <z.liuxinliang@hisilicon.com>
+ * Xinliang Liu <xinliang.liu@linaro.org>
+ * Xinwei Kong <kong.kongxinwei@hisilicon.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/of_platform.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "kirin_drm_drv.h"
+
+static struct kirin_dc_ops *dc_ops;
+
+static int kirin_drm_kms_cleanup(struct drm_device *dev)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (priv->fbdev) {
+ drm_fbdev_cma_fini(priv->fbdev);
+ priv->fbdev = NULL;
+ }
+#endif
+ drm_kms_helper_poll_fini(dev);
+ drm_vblank_cleanup(dev);
+ dc_ops->cleanup(dev);
+ drm_mode_config_cleanup(dev);
+ devm_kfree(dev->dev, priv);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+static void kirin_fbdev_output_poll_changed(struct drm_device *dev)
+{
+ struct kirin_drm_private *priv = dev->dev_private;
+
+ if (priv->fbdev) {
+ drm_fbdev_cma_hotplug_event(priv->fbdev);
+ } else {
+ priv->fbdev = drm_fbdev_cma_init(dev, 32,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+ if (IS_ERR(priv->fbdev))
+ priv->fbdev = NULL;
+ }
+}
+#endif
+
+static const struct drm_mode_config_funcs kirin_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ .output_poll_changed = kirin_fbdev_output_poll_changed,
+#endif
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void kirin_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.funcs = &kirin_drm_mode_config_funcs;
+}
+
+static int kirin_drm_kms_init(struct drm_device *dev)
+{
+ struct kirin_drm_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev->dev_private = priv;
+ dev_set_drvdata(dev->dev, dev);
+
+ /* dev->mode_config initialization */
+ drm_mode_config_init(dev);
+ kirin_drm_mode_config_init(dev);
+
+ /* display controller init */
+ ret = dc_ops->init(dev);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ /* bind and init sub drivers */
+ ret = component_bind_all(dev->dev, dev);
+ if (ret) {
+ DRM_ERROR("failed to bind all component.\n");
+ goto err_dc_cleanup;
+ }
+
+ /* vblank init */
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret) {
+ DRM_ERROR("failed to initialize vblank.\n");
+ goto err_unbind_all;
+ }
+ /* with irq_enabled = true, we can use the vblank feature. */
+ dev->irq_enabled = true;
+
+ /* reset all the states of crtc/plane/encoder/connector */
+ drm_mode_config_reset(dev);
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(dev);
+
+ /* force detection after connectors init */
+ (void)drm_helper_hpd_irq_event(dev);
+
+ return 0;
+
+err_unbind_all:
+ component_unbind_all(dev->dev, dev);
+err_dc_cleanup:
+ dc_ops->cleanup(dev);
+err_mode_config_cleanup:
+ drm_mode_config_cleanup(dev);
+ devm_kfree(dev->dev, priv);
+ dev->dev_private = NULL;
+
+ return ret;
+}
+
+static const struct file_operations kirin_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static int kirin_gem_cma_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
+static struct drm_driver kirin_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
+ .fops = &kirin_drm_fops,
+ .set_busid = drm_platform_set_busid,
+
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = kirin_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .name = "kirin",
+ .desc = "Hisilicon Kirin SoCs' DRM Driver",
+ .date = "20150718",
+ .major = 1,
+ .minor = 0,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int kirin_drm_bind(struct device *dev)
+{
+ struct drm_driver *driver = &kirin_drm_driver;
+ struct drm_device *drm_dev;
+ int ret;
+
+ drm_dev = drm_dev_alloc(driver, dev);
+ if (!drm_dev)
+ return -ENOMEM;
+
+ drm_dev->platformdev = to_platform_device(dev);
+
+ ret = kirin_drm_kms_init(drm_dev);
+ if (ret)
+ goto err_drm_dev_unref;
+
+ ret = drm_dev_register(drm_dev, 0);
+ if (ret)
+ goto err_kms_cleanup;
+
+ /* connectors should be registered after drm device register */
+ ret = drm_connector_register_all(drm_dev);
+ if (ret)
+ goto err_drm_dev_unregister;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, drm_dev->primary->index);
+
+ return 0;
+
+err_drm_dev_unregister:
+ drm_dev_unregister(drm_dev);
+err_kms_cleanup:
+ kirin_drm_kms_cleanup(drm_dev);
+err_drm_dev_unref:
+ drm_dev_unref(drm_dev);
+
+ return ret;
+}
+
+static void kirin_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ drm_connector_unregister_all(drm_dev);
+ drm_dev_unregister(drm_dev);
+ kirin_drm_kms_cleanup(drm_dev);
+ drm_dev_unref(drm_dev);
+}
+
+static const struct component_master_ops kirin_drm_ops = {
+ .bind = kirin_drm_bind,
+ .unbind = kirin_drm_unbind,
+};
+
+static struct device_node *kirin_get_remote_node(struct device_node *np)
+{
+ struct device_node *endpoint, *remote;
+
+ /* get the first endpoint, in our case only one remote node
+ * is connected to display controller.
+ */
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ DRM_ERROR("no valid endpoint node\n");
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(endpoint);
+
+ remote = of_graph_get_remote_port_parent(endpoint);
+ if (!remote) {
+ DRM_ERROR("no valid remote node\n");
+ return ERR_PTR(-ENODEV);
+ }
+ of_node_put(remote);
+
+ if (!of_device_is_available(remote)) {
+ DRM_ERROR("not available for remote node\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ return remote;
+}
+
+static int kirin_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct component_match *match = NULL;
+ struct device_node *remote;
+
+ dc_ops = (struct kirin_dc_ops *)of_device_get_match_data(dev);
+ if (!dc_ops) {
+ DRM_ERROR("failed to get dt id data\n");
+ return -EINVAL;
+ }
+
+ remote = kirin_get_remote_node(np);
+ if (IS_ERR(remote))
+ return PTR_ERR(remote);
+
+ component_match_add(dev, &match, compare_of, remote);
+
+ return component_master_add_with_match(dev, &kirin_drm_ops, match);
+
+ return 0;
+}
+
+static int kirin_drm_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &kirin_drm_ops);
+ dc_ops = NULL;
+ return 0;
+}
+
+static const struct of_device_id kirin_drm_dt_ids[] = {
+ { .compatible = "hisilicon,hi6220-ade",
+ .data = &ade_dc_ops,
+ },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
+
+static struct platform_driver kirin_drm_platform_driver = {
+ .probe = kirin_drm_platform_probe,
+ .remove = kirin_drm_platform_remove,
+ .driver = {
+ .name = "kirin-drm",
+ .of_match_table = kirin_drm_dt_ids,
+ },
+};
+
+module_platform_driver(kirin_drm_platform_driver);
+
+MODULE_AUTHOR("Xinliang Liu <xinliang.liu@linaro.org>");
+MODULE_AUTHOR("Xinliang Liu <z.liuxinliang@hisilicon.com>");
+MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>");
+MODULE_DESCRIPTION("hisilicon Kirin SoCs' DRM master driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
new file mode 100644
index 000000000..1a07caf8e
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 Linaro Limited.
+ * Copyright (c) 2014-2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __KIRIN_DRM_DRV_H__
+#define __KIRIN_DRM_DRV_H__
+
+#define MAX_CRTC 2
+
+/* display controller init/cleanup ops */
+struct kirin_dc_ops {
+ int (*init)(struct drm_device *dev);
+ void (*cleanup)(struct drm_device *dev);
+};
+
+struct kirin_drm_private {
+ struct drm_crtc *crtc[MAX_CRTC];
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ struct drm_fbdev_cma *fbdev;
+#endif
+};
+
+extern const struct kirin_dc_ops ade_dc_ops;
+
+#endif /* __KIRIN_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 20a5d0455..29a32b119 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -56,3 +56,9 @@ config DRM_I915_USERPTR
selected to enabled full userptr support.
If in doubt, say "Y".
+
+menu "drm/i915 Debugging"
+depends on DRM_I915
+depends on EXPERT
+source drivers/gpu/drm/i915/Kconfig.debug
+endmenu
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
new file mode 100644
index 000000000..8f4041033
--- /dev/null
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -0,0 +1,41 @@
+config DRM_I915_WERROR
+ bool "Force GCC to throw an error instead of a warning when compiling"
+ # As this may inadvertently break the build, only allow the user
+ # to shoot oneself in the foot iff they aim really hard
+ depends on EXPERT
+ # We use the dependency on !COMPILE_TEST to not be enabled in
+ # allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ default n
+ help
+ Add -Werror to the build flags for (and only for) i915.ko.
+ Do not enable this unless you are writing code for the i915.ko module.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_I915_DEBUG
+ bool "Enable additional driver debugging"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_I915_DEBUG_GEM
+ bool "Insert extra checks into the GEM internals"
+ default n
+ depends on DRM_I915_WERROR
+ help
+ Enable extra sanity checks (including BUGs) along the GEM driver
+ paths that may slow the system down and if hit hang the machine.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0851de07b..0b88ba0f3 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -2,6 +2,8 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
+
# Please keep these build lists sorted!
# core driver code
@@ -55,7 +57,9 @@ i915-y += intel_audio.o \
intel_atomic.o \
intel_atomic_plane.o \
intel_bios.o \
+ intel_color.o \
intel_display.o \
+ intel_dpll_mgr.o \
intel_fbc.o \
intel_fifo_underrun.o \
intel_frontbuffer.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 814d894ed..a337f33be 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -444,6 +444,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(CL_PRIMITIVES_COUNT),
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
+ REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
@@ -471,6 +472,25 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG32(GEN7_L3SQCREG1),
REG32(GEN7_L3CNTLREG2),
REG32(GEN7_L3CNTLREG3),
+};
+
+static const struct drm_i915_reg_descriptor hsw_render_regs[] = {
+ REG64_IDX(HSW_CS_GPR, 0),
+ REG64_IDX(HSW_CS_GPR, 1),
+ REG64_IDX(HSW_CS_GPR, 2),
+ REG64_IDX(HSW_CS_GPR, 3),
+ REG64_IDX(HSW_CS_GPR, 4),
+ REG64_IDX(HSW_CS_GPR, 5),
+ REG64_IDX(HSW_CS_GPR, 6),
+ REG64_IDX(HSW_CS_GPR, 7),
+ REG64_IDX(HSW_CS_GPR, 8),
+ REG64_IDX(HSW_CS_GPR, 9),
+ REG64_IDX(HSW_CS_GPR, 10),
+ REG64_IDX(HSW_CS_GPR, 11),
+ REG64_IDX(HSW_CS_GPR, 12),
+ REG64_IDX(HSW_CS_GPR, 13),
+ REG64_IDX(HSW_CS_GPR, 14),
+ REG64_IDX(HSW_CS_GPR, 15),
REG32(HSW_SCRATCH1,
.mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
.value = 0),
@@ -500,6 +520,33 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
#undef REG64
#undef REG32
+struct drm_i915_reg_table {
+ const struct drm_i915_reg_descriptor *regs;
+ int num_regs;
+ bool master;
+};
+
+static const struct drm_i915_reg_table ivb_render_reg_tables[] = {
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
+ { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+};
+
+static const struct drm_i915_reg_table ivb_blt_reg_tables[] = {
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
+ { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true },
+};
+
+static const struct drm_i915_reg_table hsw_render_reg_tables[] = {
+ { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false },
+ { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false },
+ { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+};
+
+static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
+ { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false },
+ { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true },
+};
+
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
@@ -555,7 +602,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
return 0;
}
-static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+static bool validate_cmds_sorted(struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
@@ -577,7 +624,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
if (curr < previous) {
DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
- ring->id, i, j, curr, previous);
+ engine->id, i, j, curr, previous);
ret = false;
}
@@ -611,11 +658,18 @@ static bool check_sorted(int ring_id,
return ret;
}
-static bool validate_regs_sorted(struct intel_engine_cs *ring)
+static bool validate_regs_sorted(struct intel_engine_cs *engine)
{
- return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
- check_sorted(ring->id, ring->master_reg_table,
- ring->master_reg_count);
+ int i;
+ const struct drm_i915_reg_table *table;
+
+ for (i = 0; i < engine->reg_table_count; i++) {
+ table = &engine->reg_tables[i];
+ if (!check_sorted(engine->id, table->regs, table->num_regs))
+ return false;
+ }
+
+ return true;
}
struct cmd_node {
@@ -639,13 +693,13 @@ struct cmd_node {
*/
#define CMD_HASH_MASK STD_MI_OPCODE_MASK
-static int init_hash_table(struct intel_engine_cs *ring,
+static int init_hash_table(struct intel_engine_cs *engine,
const struct drm_i915_cmd_table *cmd_tables,
int cmd_table_count)
{
int i, j;
- hash_init(ring->cmd_hash);
+ hash_init(engine->cmd_hash);
for (i = 0; i < cmd_table_count; i++) {
const struct drm_i915_cmd_table *table = &cmd_tables[i];
@@ -660,7 +714,7 @@ static int init_hash_table(struct intel_engine_cs *ring,
return -ENOMEM;
desc_node->desc = desc;
- hash_add(ring->cmd_hash, &desc_node->node,
+ hash_add(engine->cmd_hash, &desc_node->node,
desc->cmd.value & CMD_HASH_MASK);
}
}
@@ -668,13 +722,13 @@ static int init_hash_table(struct intel_engine_cs *ring,
return 0;
}
-static void fini_hash_table(struct intel_engine_cs *ring)
+static void fini_hash_table(struct intel_engine_cs *engine)
{
struct hlist_node *tmp;
struct cmd_node *desc_node;
int i;
- hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) {
+ hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
hash_del(&desc_node->node);
kfree(desc_node);
}
@@ -690,18 +744,18 @@ static void fini_hash_table(struct intel_engine_cs *ring)
*
* Return: non-zero if initialization fails
*/
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
+int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
{
const struct drm_i915_cmd_table *cmd_tables;
int cmd_table_count;
int ret;
- if (!IS_GEN7(ring->dev))
+ if (!IS_GEN7(engine->dev))
return 0;
- switch (ring->id) {
+ switch (engine->id) {
case RCS:
- if (IS_HASWELL(ring->dev)) {
+ if (IS_HASWELL(engine->dev)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
ARRAY_SIZE(hsw_render_ring_cmds);
@@ -710,26 +764,23 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
}
- ring->reg_table = gen7_render_regs;
- ring->reg_count = ARRAY_SIZE(gen7_render_regs);
-
- if (IS_HASWELL(ring->dev)) {
- ring->master_reg_table = hsw_master_regs;
- ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+ if (IS_HASWELL(engine->dev)) {
+ engine->reg_tables = hsw_render_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
} else {
- ring->master_reg_table = ivb_master_regs;
- ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+ engine->reg_tables = ivb_render_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables);
}
- ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+ engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
case VCS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
- ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
case BCS:
- if (IS_HASWELL(ring->dev)) {
+ if (IS_HASWELL(engine->dev)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
} else {
@@ -737,44 +788,41 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
}
- ring->reg_table = gen7_blt_regs;
- ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
-
- if (IS_HASWELL(ring->dev)) {
- ring->master_reg_table = hsw_master_regs;
- ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+ if (IS_HASWELL(engine->dev)) {
+ engine->reg_tables = hsw_blt_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
- ring->master_reg_table = ivb_master_regs;
- ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+ engine->reg_tables = ivb_blt_reg_tables;
+ engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
}
- ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+ engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
case VECS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
- ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n",
- ring->id);
+ engine->id);
BUG();
}
- BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
- BUG_ON(!validate_regs_sorted(ring));
+ BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count));
+ BUG_ON(!validate_regs_sorted(engine));
- WARN_ON(!hash_empty(ring->cmd_hash));
+ WARN_ON(!hash_empty(engine->cmd_hash));
- ret = init_hash_table(ring, cmd_tables, cmd_table_count);
+ ret = init_hash_table(engine, cmd_tables, cmd_table_count);
if (ret) {
DRM_ERROR("CMD: cmd_parser_init failed!\n");
- fini_hash_table(ring);
+ fini_hash_table(engine);
return ret;
}
- ring->needs_cmd_parser = true;
+ engine->needs_cmd_parser = true;
return 0;
}
@@ -786,21 +834,21 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
* Releases any resources related to command parsing that may have been
* initialized for the specified ring.
*/
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine)
{
- if (!ring->needs_cmd_parser)
+ if (!engine->needs_cmd_parser)
return;
- fini_hash_table(ring);
+ fini_hash_table(engine);
}
static const struct drm_i915_cmd_descriptor*
-find_cmd_in_table(struct intel_engine_cs *ring,
+find_cmd_in_table(struct intel_engine_cs *engine,
u32 cmd_header)
{
struct cmd_node *desc_node;
- hash_for_each_possible(ring->cmd_hash, desc_node, node,
+ hash_for_each_possible(engine->cmd_hash, desc_node, node,
cmd_header & CMD_HASH_MASK) {
const struct drm_i915_cmd_descriptor *desc = desc_node->desc;
u32 masked_cmd = desc->cmd.mask & cmd_header;
@@ -822,18 +870,18 @@ find_cmd_in_table(struct intel_engine_cs *ring,
* ring's default length encoding and returns default_desc.
*/
static const struct drm_i915_cmd_descriptor*
-find_cmd(struct intel_engine_cs *ring,
+find_cmd(struct intel_engine_cs *engine,
u32 cmd_header,
struct drm_i915_cmd_descriptor *default_desc)
{
const struct drm_i915_cmd_descriptor *desc;
u32 mask;
- desc = find_cmd_in_table(ring, cmd_header);
+ desc = find_cmd_in_table(engine, cmd_header);
if (desc)
return desc;
- mask = ring->get_cmd_length_mask(cmd_header);
+ mask = engine->get_cmd_length_mask(cmd_header);
if (!mask)
return NULL;
@@ -848,12 +896,31 @@ static const struct drm_i915_reg_descriptor *
find_reg(const struct drm_i915_reg_descriptor *table,
int count, u32 addr)
{
- if (table) {
- int i;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (i915_mmio_reg_offset(table[i].addr) == addr)
+ return &table[i];
+ }
- for (i = 0; i < count; i++) {
- if (i915_mmio_reg_offset(table[i].addr) == addr)
- return &table[i];
+ return NULL;
+}
+
+static const struct drm_i915_reg_descriptor *
+find_reg_in_tables(const struct drm_i915_reg_table *tables,
+ int count, bool is_master, u32 addr)
+{
+ int i;
+ const struct drm_i915_reg_table *table;
+ const struct drm_i915_reg_descriptor *reg;
+
+ for (i = 0; i < count; i++) {
+ table = &tables[i];
+ if (!table->master || is_master) {
+ reg = find_reg(table->regs, table->num_regs,
+ addr);
+ if (reg != NULL)
+ return reg;
}
}
@@ -963,18 +1030,18 @@ unpin_src:
*
* Return: true if the ring requires software command parsing
*/
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
{
- if (!ring->needs_cmd_parser)
+ if (!engine->needs_cmd_parser)
return false;
- if (!USES_PPGTT(ring->dev))
+ if (!USES_PPGTT(engine->dev))
return false;
return (i915.enable_cmd_parser == 1);
}
-static bool check_cmd(const struct intel_engine_cs *ring,
+static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length,
const bool is_master,
@@ -1004,17 +1071,14 @@ static bool check_cmd(const struct intel_engine_cs *ring,
offset += step) {
const u32 reg_addr = cmd[offset] & desc->reg.mask;
const struct drm_i915_reg_descriptor *reg =
- find_reg(ring->reg_table, ring->reg_count,
- reg_addr);
-
- if (!reg && is_master)
- reg = find_reg(ring->master_reg_table,
- ring->master_reg_count,
- reg_addr);
+ find_reg_in_tables(engine->reg_tables,
+ engine->reg_table_count,
+ is_master,
+ reg_addr);
if (!reg) {
DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
- reg_addr, *cmd, ring->id);
+ reg_addr, *cmd, engine->id);
return false;
}
@@ -1087,7 +1151,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
*cmd,
desc->bits[i].mask,
desc->bits[i].expected,
- dword, ring->id);
+ dword, engine->id);
return false;
}
}
@@ -1113,7 +1177,7 @@ static bool check_cmd(const struct intel_engine_cs *ring,
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
* if the batch appears legal but should use hardware parsing
*/
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_parse_cmds(struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
@@ -1147,7 +1211,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
if (*cmd == MI_BATCH_BUFFER_END)
break;
- desc = find_cmd(ring, *cmd, &default_desc);
+ desc = find_cmd(engine, *cmd, &default_desc);
if (!desc) {
DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n",
*cmd);
@@ -1179,7 +1243,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
break;
}
- if (!check_cmd(ring, desc, cmd, length, is_master,
+ if (!check_cmd(engine, desc, cmd, length, is_master,
&oacontrol_set)) {
ret = -EINVAL;
break;
@@ -1223,6 +1287,7 @@ int i915_cmd_parser_get_version(void)
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
* 5. GPGPU dispatch compute indirect registers.
+ * 6. TIMESTAMP register and Haswell CS GPR registers
*/
- return 5;
+ return 6;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e3f4c725a..103546834 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,27 +89,34 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char *get_pin_flag(struct drm_i915_gem_object *obj)
+static const char get_active_flag(struct drm_i915_gem_object *obj)
{
- if (obj->pin_display)
- return "p";
- else
- return " ";
+ return obj->active ? '*' : ' ';
}
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
+static const char get_pin_flag(struct drm_i915_gem_object *obj)
+{
+ return obj->pin_display ? 'p' : ' ';
+}
+
+static const char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (obj->tiling_mode) {
default:
- case I915_TILING_NONE: return " ";
- case I915_TILING_X: return "X";
- case I915_TILING_Y: return "Y";
+ case I915_TILING_NONE: return ' ';
+ case I915_TILING_X: return 'X';
+ case I915_TILING_Y: return 'Y';
}
}
-static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
+static inline const char get_global_flag(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
+ return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
+}
+
+static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
+{
+ return obj->mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@@ -129,23 +136,26 @@ static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct i915_vma *vma;
int pin_count = 0;
- int i;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
- seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
+ seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
&obj->base,
- obj->active ? "*" : " ",
+ get_active_flag(obj),
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
+ get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
- for_each_ring(ring, dev_priv, i)
+ for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "%x ",
- i915_gem_request_get_seqno(obj->last_read_req[i]));
+ i915_gem_request_get_seqno(obj->last_read_req[id]));
seq_printf(m, "] %x %x%s%s%s",
i915_gem_request_get_seqno(obj->last_write_req),
i915_gem_request_get_seqno(obj->last_fenced_req),
@@ -184,7 +194,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->last_write_req != NULL)
seq_printf(m, " (%s)",
- i915_gem_request_get_ring(obj->last_write_req)->name);
+ i915_gem_request_get_engine(obj->last_write_req)->name);
if (obj->frontbuffer_bits)
seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
@@ -202,8 +212,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
u64 total_obj_size, total_gtt_size;
int count, ret;
@@ -216,11 +226,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) {
case ACTIVE_LIST:
seq_puts(m, "Active:\n");
- head = &vm->active_list;
+ head = &ggtt->base.active_list;
break;
case INACTIVE_LIST:
seq_puts(m, "Inactive:\n");
- head = &vm->inactive_list;
+ head = &ggtt->base.inactive_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
@@ -397,15 +407,15 @@ static void print_batch_pool_stats(struct seq_file *m,
{
struct drm_i915_gem_object *obj;
struct file_stats stats;
- struct intel_engine_cs *ring;
- int i, j;
+ struct intel_engine_cs *engine;
+ int j;
memset(&stats, 0, sizeof(stats));
- for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+ for_each_engine(engine, dev_priv) {
+ for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link)
per_file_stats(0, obj, &stats);
}
@@ -429,11 +439,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size;
+ unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
+ u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
struct i915_vma *vma;
int ret;
@@ -452,12 +464,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->active_list, vm_link);
+ count_vmas(&ggtt->base.active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_vmas(&vm->inactive_list, vm_link);
+ count_vmas(&ggtt->base.inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -466,6 +478,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size += obj->base.size, ++count;
if (obj->madv == I915_MADV_DONTNEED)
purgeable_size += obj->base.size, ++purgeable_count;
+ if (obj->mapping) {
+ pin_mapped_count++;
+ pin_mapped_size += obj->base.size;
+ if (obj->pages_pin_count == 0) {
+ pin_mapped_purgeable_count++;
+ pin_mapped_purgeable_size += obj->base.size;
+ }
+ }
}
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
@@ -483,6 +503,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
purgeable_size += obj->base.size;
++purgeable_count;
}
+ if (obj->mapping) {
+ pin_mapped_count++;
+ pin_mapped_size += obj->base.size;
+ if (obj->pages_pin_count == 0) {
+ pin_mapped_purgeable_count++;
+ pin_mapped_purgeable_size += obj->base.size;
+ }
+ }
}
seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
@@ -490,13 +518,20 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
mappable_count, mappable_size);
seq_printf(m, "%u fault mappable objects, %llu bytes\n",
count, size);
+ seq_printf(m,
+ "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
+ pin_mapped_count, pin_mapped_purgeable_count,
+ pin_mapped_size, pin_mapped_purgeable_size);
seq_printf(m, "%llu [%llu] gtt total\n",
- dev_priv->gtt.base.total,
- (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+ ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
struct task_struct *task;
@@ -517,8 +552,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
}
-
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->filelist_mutex);
return 0;
}
@@ -591,14 +625,13 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->flip_queued_req) {
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(work->flip_queued_req);
+ struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
- ring->name,
+ engine->name,
i915_gem_request_get_seqno(work->flip_queued_req),
dev_priv->next_seqno,
- ring->get_seqno(ring, true),
+ engine->get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req, true));
} else
seq_printf(m, "Flip not associated with any ring\n");
@@ -637,28 +670,28 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int total = 0;
- int ret, i, j;
+ int ret, j;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
+ for_each_engine(engine, dev_priv) {
+ for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
int count;
count = 0;
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link)
count++;
seq_printf(m, "%s cache[%d]: %d objects\n",
- ring->name, j, count);
+ engine->name, j, count);
list_for_each_entry(obj,
- &ring->batch_pool.cache_list[j],
+ &engine->batch_pool.cache_list[j],
batch_pool_link) {
seq_puts(m, " ");
describe_obj(m, obj);
@@ -681,26 +714,26 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_gem_request *req;
- int ret, any, i;
+ int ret, any;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
any = 0;
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
int count;
count = 0;
- list_for_each_entry(req, &ring->request_list, list)
+ list_for_each_entry(req, &engine->request_list, list)
count++;
if (count == 0)
continue;
- seq_printf(m, "%s requests: %d\n", ring->name, count);
- list_for_each_entry(req, &ring->request_list, list) {
+ seq_printf(m, "%s requests: %d\n", engine->name, count);
+ list_for_each_entry(req, &engine->request_list, list) {
struct task_struct *task;
rcu_read_lock();
@@ -726,12 +759,12 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
}
static void i915_ring_seqno_info(struct seq_file *m,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- if (ring->get_seqno) {
- seq_printf(m, "Current sequence (%s): %x\n",
- ring->name, ring->get_seqno(ring, false));
- }
+ seq_printf(m, "Current sequence (%s): %x\n",
+ engine->name, engine->get_seqno(engine));
+ seq_printf(m, "Current user interrupts (%s): %x\n",
+ engine->name, READ_ONCE(engine->user_interrupts));
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
@@ -739,16 +772,16 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int ret, i;
+ struct intel_engine_cs *engine;
+ int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, i)
- i915_ring_seqno_info(m, ring);
+ for_each_engine(engine, dev_priv)
+ i915_ring_seqno_info(m, engine);
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -762,7 +795,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -934,13 +967,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
if (INTEL_INFO(dev)->gen >= 6) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
- ring->name, I915_READ_IMR(ring));
+ engine->name, I915_READ_IMR(engine));
}
- i915_ring_seqno_info(m, ring);
+ i915_ring_seqno_info(m, engine);
}
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -981,12 +1014,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
const u32 *hws;
int i;
- ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = ring->status_page.page_addr;
+ engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
+ hws = engine->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -1216,12 +1249,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
rpstat = I915_READ(GEN6_RPSTAT1);
- rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
- rpcurup = I915_READ(GEN6_RP_CUR_UP);
- rpprevup = I915_READ(GEN6_RP_PREV_UP);
- rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
- rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
- rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+ rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
+ rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
+ rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
+ rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
+ rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
+ rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
if (IS_GEN9(dev))
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -1261,21 +1294,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
- GEN6_CURICONT_MASK);
- seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
- GEN6_CURBSYTAVG_MASK);
- seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
- GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
+ rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
+ seq_printf(m, "RP CUR UP: %d (%dus)\n",
+ rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
+ seq_printf(m, "RP PREV UP: %d (%dus)\n",
+ rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
dev_priv->rps.up_threshold);
- seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
- GEN6_CURIAVG_MASK);
- seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
- GEN6_CURBSYTAVG_MASK);
- seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
- GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
+ rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
+ seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
+ rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
+ seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
+ rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
@@ -1331,11 +1364,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- u64 acthd[I915_NUM_RINGS];
- u32 seqno[I915_NUM_RINGS];
+ struct intel_engine_cs *engine;
+ u64 acthd[I915_NUM_ENGINES];
+ u32 seqno[I915_NUM_ENGINES];
u32 instdone[I915_NUM_INSTDONE_REG];
- int i, j;
+ enum intel_engine_id id;
+ int j;
if (!i915.enable_hangcheck) {
seq_printf(m, "Hangcheck disabled\n");
@@ -1344,9 +1378,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, i) {
- seqno[i] = ring->get_seqno(ring, false);
- acthd[i] = intel_ring_get_active_head(ring);
+ for_each_engine_id(engine, dev_priv, id) {
+ acthd[id] = intel_ring_get_active_head(engine);
+ seqno[id] = engine->get_seqno(engine);
}
i915_get_extra_instdone(dev, instdone);
@@ -1360,19 +1394,22 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
} else
seq_printf(m, "Hangcheck inactive\n");
- for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "%s:\n", ring->name);
- seq_printf(m, "\tseqno = %x [current %x]\n",
- ring->hangcheck.seqno, seqno[i]);
+ for_each_engine_id(engine, dev_priv, id) {
+ seq_printf(m, "%s:\n", engine->name);
+ seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
+ engine->hangcheck.seqno,
+ seqno[id],
+ engine->last_submitted_seqno);
+ seq_printf(m, "\tuser interrupts = %x [current %x]\n",
+ engine->hangcheck.user_interrupts,
+ READ_ONCE(engine->user_interrupts));
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)ring->hangcheck.acthd,
- (long long)acthd[i]);
- seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
- (long long)ring->hangcheck.max_acthd);
- seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
- seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
-
- if (ring->id == RCS) {
+ (long long)engine->hangcheck.acthd,
+ (long long)acthd[id]);
+ seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
+ seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
+
+ if (engine->id == RCS) {
seq_puts(m, "\tinstdone read =");
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
@@ -1382,7 +1419,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
seq_printf(m, " 0x%08x",
- ring->hangcheck.instdone[j]);
+ engine->hangcheck.instdone[j]);
seq_puts(m, "\n");
}
@@ -1465,12 +1502,11 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore_forcewake_domain *fw_domain;
- int i;
spin_lock_irq(&dev_priv->uncore.lock);
- for_each_fw_domain(fw_domain, dev_priv, i) {
+ for_each_fw_domain(fw_domain, dev_priv) {
seq_printf(m, "%s.wake_count = %u\n",
- intel_uncore_forcewake_domain_to_str(i),
+ intel_uncore_forcewake_domain_to_str(fw_domain->id),
fw_domain->wake_count);
}
spin_unlock_irq(&dev_priv->uncore.lock);
@@ -1897,6 +1933,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct intel_framebuffer *fbdev_fb = NULL;
struct drm_framebuffer *drm_fb;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (to_i915(dev)->fbdev) {
@@ -1908,7 +1949,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fbdev_fb->base.depth,
fbdev_fb->base.bits_per_pixel,
fbdev_fb->base.modifier[0],
- atomic_read(&fbdev_fb->base.refcount.refcount));
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
describe_obj(m, fbdev_fb->obj);
seq_putc(m, '\n');
}
@@ -1926,11 +1967,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.depth,
fb->base.bits_per_pixel,
fb->base.modifier[0],
- atomic_read(&fb->base.refcount.refcount));
+ drm_framebuffer_read_refcount(&fb->base));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1948,9 +1990,10 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
- int ret, i;
+ enum intel_engine_id id;
+ int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -1968,13 +2011,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (i915.enable_execlists) {
seq_putc(m, '\n');
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
struct drm_i915_gem_object *ctx_obj =
- ctx->engine[i].state;
+ ctx->engine[id].state;
struct intel_ringbuffer *ringbuf =
- ctx->engine[i].ringbuf;
+ ctx->engine[id].ringbuf;
- seq_printf(m, "%s: ", ring->name);
+ seq_printf(m, "%s: ", engine->name);
if (ctx_obj)
describe_obj(m, ctx_obj);
if (ringbuf)
@@ -1995,22 +2038,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
static void i915_dump_lrc_obj(struct seq_file *m,
struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
struct page *page;
uint32_t *reg_state;
int j;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
unsigned long ggtt_offset = 0;
if (ctx_obj == NULL) {
seq_printf(m, "Context on %s with no gem object\n",
- ring->name);
+ engine->name);
return;
}
- seq_printf(m, "CONTEXT: %s %u\n", ring->name,
- intel_execlists_ctx_id(ctx, ring));
+ seq_printf(m, "CONTEXT: %s %u\n", engine->name,
+ intel_execlists_ctx_id(ctx, engine));
if (!i915_gem_obj_ggtt_bound(ctx_obj))
seq_puts(m, "\tNot bound in GGTT\n");
@@ -2043,9 +2086,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
- int ret, i;
+ int ret;
if (!i915.enable_execlists) {
seq_printf(m, "Logical Ring Contexts are disabled\n");
@@ -2058,8 +2101,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
list_for_each_entry(ctx, &dev_priv->context_list, link)
if (ctx != dev_priv->kernel_context)
- for_each_ring(ring, dev_priv, i)
- i915_dump_lrc_obj(m, ctx, ring);
+ for_each_engine(engine, dev_priv)
+ i915_dump_lrc_obj(m, ctx, engine);
mutex_unlock(&dev->struct_mutex);
@@ -2071,15 +2114,14 @@ static int i915_execlists(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 status_pointer;
u8 read_pointer;
u8 write_pointer;
u32 status;
u32 ctx_id;
struct list_head *cursor;
- int ring_id, i;
- int ret;
+ int i, ret;
if (!i915.enable_execlists) {
seq_puts(m, "Logical Ring Contexts are disabled\n");
@@ -2092,22 +2134,21 @@ static int i915_execlists(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
- for_each_ring(ring, dev_priv, ring_id) {
+ for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *head_req = NULL;
int count = 0;
- unsigned long flags;
- seq_printf(m, "%s\n", ring->name);
+ seq_printf(m, "%s\n", engine->name);
- status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
- ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
+ status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
+ ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
status, ctx_id);
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+ status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
- read_pointer = ring->next_context_status_buffer;
+ read_pointer = engine->next_context_status_buffer;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
@@ -2115,24 +2156,25 @@ static int i915_execlists(struct seq_file *m, void *data)
read_pointer, write_pointer);
for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
- status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
- ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
+ status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
+ ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
i, status, ctx_id);
}
- spin_lock_irqsave(&ring->execlist_lock, flags);
- list_for_each(cursor, &ring->execlist_queue)
+ spin_lock_bh(&engine->execlist_lock);
+ list_for_each(cursor, &engine->execlist_queue)
count++;
- head_req = list_first_entry_or_null(&ring->execlist_queue,
- struct drm_i915_gem_request, execlist_link);
- spin_unlock_irqrestore(&ring->execlist_lock, flags);
+ head_req = list_first_entry_or_null(&engine->execlist_queue,
+ struct drm_i915_gem_request,
+ execlist_link);
+ spin_unlock_bh(&engine->execlist_lock);
seq_printf(m, "\t%d requests in queue\n", count);
if (head_req) {
seq_printf(m, "\tHead request id: %u\n",
- intel_execlists_ctx_id(head_req->ctx, ring));
+ intel_execlists_ctx_id(head_req->ctx, engine));
seq_printf(m, "\tHead request tail: %u\n",
head_req->tail);
}
@@ -2248,19 +2290,19 @@ static int per_file_ctx(int id, void *ptr, void *data)
static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- int unused, i;
+ int i;
if (!ppgtt)
return;
- for_each_ring(ring, dev_priv, unused) {
- seq_printf(m, "%s\n", ring->name);
+ for_each_engine(engine, dev_priv) {
+ seq_printf(m, "%s\n", engine->name);
for (i = 0; i < 4; i++) {
- u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
+ u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
pdp <<= 32;
- pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
+ pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
}
}
@@ -2269,19 +2311,22 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen == 6)
seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
- for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "%s\n", ring->name);
+ for_each_engine(engine, dev_priv) {
+ seq_printf(m, "%s\n", engine->name);
if (INTEL_INFO(dev)->gen == 7)
- seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
- seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
- seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
- seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
+ seq_printf(m, "GFX_MODE: 0x%08x\n",
+ I915_READ(RING_MODE_GEN7(engine)));
+ seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE(engine)));
+ seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
+ I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
+ I915_READ(RING_PP_DIR_DCLV(engine)));
}
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2312,6 +2357,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
else if (INTEL_INFO(dev)->gen >= 6)
gen6_ppgtt_info(m, dev);
+ mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task;
@@ -2319,15 +2365,16 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
task = get_pid_task(file->pid, PIDTYPE_PID);
if (!task) {
ret = -ESRCH;
- goto out_put;
+ goto out_unlock;
}
seq_printf(m, "\nproc: %s\n", task->comm);
put_task_struct(task);
idr_for_each(&file_priv->context_idr, per_file_ctx,
(void *)(unsigned long)m);
}
+out_unlock:
+ mutex_unlock(&dev->filelist_mutex);
-out_put:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -2336,12 +2383,11 @@ out_put:
static int count_irq_waiters(struct drm_i915_private *i915)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int count = 0;
- int i;
- for_each_ring(ring, i915, i)
- count += ring->irq_refcount;
+ for_each_engine(engine, i915)
+ count += engine->irq_refcount;
return count;
}
@@ -2362,6 +2408,8 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+
+ mutex_lock(&dev->filelist_mutex);
spin_lock(&dev_priv->rps.client_lock);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -2384,6 +2432,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
spin_unlock(&dev_priv->rps.client_lock);
+ mutex_unlock(&dev->filelist_mutex);
return 0;
}
@@ -2393,10 +2442,11 @@ static int i915_llc(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const bool edram = INTEL_GEN(dev_priv) > 8;
- /* Size calculation for LLC is a bit of a pain. Ignore for now. */
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
- seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+ seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
+ intel_uncore_edram_size(dev_priv)/1024/1024);
return 0;
}
@@ -2408,7 +2458,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
u32 tmp, i;
- if (!HAS_GUC_UCODE(dev_priv->dev))
+ if (!HAS_GUC_UCODE(dev_priv))
return 0;
seq_printf(m, "GuC firmware status:\n");
@@ -2449,9 +2499,8 @@ static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint64_t tot = 0;
- uint32_t i;
seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
client->priority, client->ctx_index, client->proc_desc_offset);
@@ -2464,11 +2513,11 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[ring->guc_id],
- ring->name);
- tot += client->submissions[ring->guc_id];
+ client->submissions[engine->guc_id],
+ engine->name);
+ tot += client->submissions[engine->guc_id];
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
@@ -2480,11 +2529,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc guc;
struct i915_guc_client client = {};
- struct intel_engine_cs *ring;
- enum intel_ring_id i;
+ struct intel_engine_cs *engine;
u64 total = 0;
- if (!HAS_GUC_SCHED(dev_priv->dev))
+ if (!HAS_GUC_SCHED(dev_priv))
return 0;
if (mutex_lock_interruptible(&dev->struct_mutex))
@@ -2504,11 +2552,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- ring->name, guc.submissions[ring->guc_id],
- guc.last_seqno[ring->guc_id]);
- total += guc.submissions[ring->guc_id];
+ engine->name, guc.submissions[engine->guc_id],
+ guc.last_seqno[engine->guc_id]);
+ total += guc.submissions[engine->guc_id];
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
@@ -2688,10 +2736,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!HAS_RUNTIME_PM(dev)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_RUNTIME_PM(dev_priv))
+ seq_puts(m, "Runtime power management not supported\n");
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
@@ -2702,6 +2748,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
#else
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
#endif
+ seq_printf(m, "PCI device power state: %s [%d]\n",
+ pci_power_name(dev_priv->dev->pdev->current_state),
+ dev_priv->dev->pdev->current_state);
return 0;
}
@@ -3114,9 +3163,10 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
- int i, j, ret;
+ enum intel_engine_id id;
+ int j, ret;
if (!i915_semaphore_is_enabled(dev)) {
seq_puts(m, "Semaphores are disabled\n");
@@ -3135,14 +3185,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
seqno = (uint64_t *)kmap_atomic(page);
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
uint64_t offset;
- seq_printf(m, "%s\n", ring->name);
+ seq_printf(m, "%s\n", engine->name);
seq_puts(m, " Last signal:");
for (j = 0; j < num_rings; j++) {
- offset = i * I915_NUM_RINGS + j;
+ offset = id * I915_NUM_ENGINES + j;
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
@@ -3150,7 +3200,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
seq_puts(m, " Last wait: ");
for (j = 0; j < num_rings; j++) {
- offset = i + (j * I915_NUM_RINGS);
+ offset = id + (j * I915_NUM_ENGINES);
seq_printf(m, "0x%08llx (0x%02llx) ",
seqno[offset], offset * 8);
}
@@ -3160,18 +3210,18 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
kunmap_atomic(seqno);
} else {
seq_puts(m, " Last signal:");
- for_each_ring(ring, dev_priv, i)
+ for_each_engine(engine, dev_priv)
for (j = 0; j < num_rings; j++)
seq_printf(m, "0x%08x\n",
- I915_READ(ring->semaphore.mbox.signal[j]));
+ I915_READ(engine->semaphore.mbox.signal[j]));
seq_putc(m, '\n');
}
seq_puts(m, "\nSync seqno:\n");
- for_each_ring(ring, dev_priv, i) {
- for (j = 0; j < num_rings; j++) {
- seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
- }
+ for_each_engine(engine, dev_priv) {
+ for (j = 0; j < num_rings; j++)
+ seq_printf(m, " 0x%08x ",
+ engine->semaphore.sync_seqno[j]);
seq_putc(m, '\n');
}
seq_putc(m, '\n');
@@ -3193,8 +3243,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
- seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
- pll->config.crtc_mask, pll->active, yesno(pll->on));
+ seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
+ pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
seq_printf(m, " dpll_md: 0x%08x\n",
@@ -3212,11 +3262,12 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
{
int i;
int ret;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *workarounds = &dev_priv->workarounds;
+ enum intel_engine_id id;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -3225,9 +3276,9 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
- for_each_ring(ring, dev_priv, i)
+ for_each_engine_id(engine, dev_priv, id)
seq_printf(m, "HW whitelist count for %s: %d\n",
- ring->name, workarounds->hw_whitelist_count[i]);
+ engine->name, workarounds->hw_whitelist_count[id]);
for (i = 0; i < workarounds->count; ++i) {
i915_reg_t addr;
u32 mask, value, read;
@@ -3417,7 +3468,8 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
intel_dig_port = enc_to_dig_port(encoder);
if (!intel_dig_port->dp.can_mst)
continue;
-
+ seq_printf(m, "MST Source Port %c\n",
+ port_name(intel_dig_port->port));
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
drm_modeset_unlock_all(dev);
@@ -4693,7 +4745,7 @@ i915_wedged_get(void *data, u64 *val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
- *val = atomic_read(&dev_priv->gpu_error.reset_counter);
+ *val = i915_terminally_wedged(&dev_priv->gpu_error);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1c6d227aa..b3198fcd0 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,66 @@
#include <linux/pm_runtime.h>
#include <linux/oom.h>
+static unsigned int i915_load_fail_count;
+
+bool __i915_inject_load_failure(const char *func, int line)
+{
+ if (i915_load_fail_count >= i915.inject_load_failure)
+ return false;
+
+ if (++i915_load_fail_count == i915.inject_load_failure) {
+ DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
+ i915.inject_load_failure, func, line);
+ return true;
+ }
+
+ return false;
+}
+
+#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
+#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
+ "providing the dmesg log by booting with drm.debug=0xf"
+
+void
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...)
+{
+ static bool shown_bug_once;
+ struct device *dev = dev_priv->dev->dev;
+ bool is_error = level[1] <= KERN_ERR[1];
+ bool is_debug = level[1] == KERN_DEBUG[1];
+ struct va_format vaf;
+ va_list args;
+
+ if (is_debug && !(drm_debug & DRM_UT_DRIVER))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ if (is_error && !shown_bug_once) {
+ dev_notice(dev, "%s", FDO_BUG_MSG);
+ shown_bug_once = true;
+ }
+
+ va_end(args);
+}
+
+static bool i915_error_injected(struct drm_i915_private *dev_priv)
+{
+ return i915.inject_load_failure &&
+ i915_load_fail_count == i915.inject_load_failure;
+}
+
+#define i915_load_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, \
+ i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
+ fmt, ##__VA_ARGS__)
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -87,16 +147,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = 1;
break;
case I915_PARAM_HAS_BSD:
- value = intel_ring_initialized(&dev_priv->ring[VCS]);
+ value = intel_engine_initialized(&dev_priv->engine[VCS]);
break;
case I915_PARAM_HAS_BLT:
- value = intel_ring_initialized(&dev_priv->ring[BCS]);
+ value = intel_engine_initialized(&dev_priv->engine[BCS]);
break;
case I915_PARAM_HAS_VEBOX:
- value = intel_ring_initialized(&dev_priv->ring[VECS]);
+ value = intel_engine_initialized(&dev_priv->engine[VECS]);
break;
case I915_PARAM_HAS_BSD2:
- value = intel_ring_initialized(&dev_priv->ring[VCS2]);
+ value = intel_engine_initialized(&dev_priv->engine[VCS2]);
break;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
@@ -197,13 +257,6 @@ static int i915_get_bridge_dev(struct drm_device *dev)
return 0;
}
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define DEVEN_MCHBAR_EN (1 << 28)
-
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
@@ -265,7 +318,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@@ -283,7 +336,7 @@ intel_setup_mchbar(struct drm_device *dev)
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@@ -296,17 +349,24 @@ intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- temp &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+ u32 deven_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+ &deven_val);
+ deven_val &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+ deven_val);
} else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- temp &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+ u32 mchbar_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ &mchbar_val);
+ mchbar_val &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ mchbar_val);
}
}
@@ -370,6 +430,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
ret = intel_bios_init(dev_priv);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
@@ -413,9 +476,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- /* Always safe in the mode setting case. */
- /* FIXME: do pre/post-mode set stuff in core KMS code */
- dev->vblank_disable_allowed = true;
if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
@@ -444,7 +504,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
cleanup_irq:
@@ -453,6 +513,7 @@ cleanup_irq:
intel_teardown_gmbus(dev);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
+ intel_power_domains_fini(dev_priv);
vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -465,6 +526,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool primary;
int ret;
@@ -472,8 +534,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = dev_priv->gtt.mappable_base;
- ap->ranges[0].size = dev_priv->gtt.mappable_end;
+ ap->ranges[0].base = ggtt->mappable_base;
+ ap->ranges[0].size = ggtt->mappable_end;
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -853,6 +915,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
else if (INTEL_INFO(dev)->gen >= 9)
gen9_sseu_info_init(dev);
+ /* Snooping is broken on BXT A stepping. */
+ info->has_snoop = !info->has_llc;
+ info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
+
DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
@@ -929,6 +995,84 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
destroy_workqueue(dev_priv->wq);
}
+/**
+ * i915_driver_init_early - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_init_early(struct drm_i915_private *dev_priv,
+ struct drm_device *dev,
+ struct intel_device_info *info)
+{
+ struct intel_device_info *device_info;
+ int ret = 0;
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
+
+ /* Setup the write-once "constant" device info */
+ device_info = (struct intel_device_info *)&dev_priv->info;
+ memcpy(device_info, info, sizeof(dev_priv->info));
+ device_info->device_id = dev->pdev->device;
+
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ mutex_init(&dev_priv->backlight_lock);
+ spin_lock_init(&dev_priv->uncore.lock);
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
+ mutex_init(&dev_priv->sb_lock);
+ mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->av_mutex);
+ mutex_init(&dev_priv->wm.wm_mutex);
+ mutex_init(&dev_priv->pps_mutex);
+
+ ret = i915_workqueues_init(dev_priv);
+ if (ret < 0)
+ return ret;
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev);
+
+ intel_pm_setup(dev);
+ intel_init_dpio(dev_priv);
+ intel_power_domains_init(dev_priv);
+ intel_irq_init(dev_priv);
+ intel_init_display_hooks(dev_priv);
+ intel_init_clock_gating_hooks(dev_priv);
+ intel_init_audio_hooks(dev_priv);
+ i915_gem_load_init(dev);
+
+ intel_display_crc_init(dev);
+
+ i915_dump_device_info(dev_priv);
+
+ /* Not all pre-production machines fall into this category, only the
+ * very first ones. Almost everything should work, except for maybe
+ * suspend/resume. And we don't implement workarounds that affect only
+ * pre-production machines. */
+ if (IS_HSW_EARLY_SDV(dev))
+ DRM_INFO("This is an early pre-production Haswell machine. "
+ "It may not be fully functional.\n");
+
+ return 0;
+}
+
+/**
+ * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+{
+ i915_gem_load_cleanup(dev_priv->dev);
+ i915_workqueues_cleanup(dev_priv);
+}
+
static int i915_mmio_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -970,97 +1114,93 @@ static void i915_mmio_cleanup(struct drm_device *dev)
}
/**
- * i915_driver_load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
+ * i915_driver_init_mmio - setup device MMIO
+ * @dev_priv: device private
*
- * The driver load routine has to do several things:
- * - drive output discovery via intel_modeset_init()
- * - initialize the memory manager
- * - allocate initial config memory
- * - setup the DRM framebuffer with the allocated memory
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
*/
-int i915_driver_load(struct drm_device *dev, unsigned long flags)
+static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv;
- struct intel_device_info *info, *device_info;
- int ret = 0;
- uint32_t aperture_size;
-
- info = (struct intel_device_info *) flags;
-
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- dev->dev_private = dev_priv;
- dev_priv->dev = dev;
+ struct drm_device *dev = dev_priv->dev;
+ int ret;
- /* Setup the write-once "constant" device info */
- device_info = (struct intel_device_info *)&dev_priv->info;
- memcpy(device_info, info, sizeof(dev_priv->info));
- device_info->device_id = dev->pdev->device;
+ if (i915_inject_load_failure())
+ return -ENODEV;
- spin_lock_init(&dev_priv->irq_lock);
- spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->backlight_lock);
- spin_lock_init(&dev_priv->uncore.lock);
- spin_lock_init(&dev_priv->mm.object_stat_lock);
- spin_lock_init(&dev_priv->mmio_flip_lock);
- mutex_init(&dev_priv->sb_lock);
- mutex_init(&dev_priv->modeset_restore_lock);
- mutex_init(&dev_priv->av_mutex);
+ if (i915_get_bridge_dev(dev))
+ return -EIO;
- ret = i915_workqueues_init(dev_priv);
+ ret = i915_mmio_setup(dev);
if (ret < 0)
- goto out_free_priv;
+ goto put_bridge;
- intel_pm_setup(dev);
+ intel_uncore_init(dev);
- intel_runtime_pm_get(dev_priv);
+ return 0;
- intel_display_crc_init(dev);
+put_bridge:
+ pci_dev_put(dev_priv->bridge_dev);
- i915_dump_device_info(dev_priv);
+ return ret;
+}
- /* Not all pre-production machines fall into this category, only the
- * very first ones. Almost everything should work, except for maybe
- * suspend/resume. And we don't implement workarounds that affect only
- * pre-production machines. */
- if (IS_HSW_EARLY_SDV(dev))
- DRM_INFO("This is an early pre-production Haswell machine. "
- "It may not be fully functional.\n");
+/**
+ * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
- if (i915_get_bridge_dev(dev)) {
- ret = -EIO;
- goto out_runtime_pm_put;
- }
+ intel_uncore_fini(dev);
+ i915_mmio_cleanup(dev);
+ pci_dev_put(dev_priv->bridge_dev);
+}
- ret = i915_mmio_setup(dev);
- if (ret < 0)
- goto put_bridge;
+/**
+ * i915_driver_init_hw - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ uint32_t aperture_size;
+ int ret;
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(dev);
+ if (i915_inject_load_failure())
+ return -ENODEV;
- intel_uncore_init(dev);
+ intel_device_info_runtime_init(dev);
- ret = i915_gem_gtt_init(dev);
+ ret = i915_ggtt_init_hw(dev);
if (ret)
- goto out_uncore_fini;
+ return ret;
+
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret) {
+ DRM_ERROR("failed to enable GGTT\n");
+ goto out_ggtt;
+ }
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
ret = i915_kick_out_firmware_fb(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
- goto out_gtt;
+ goto out_ggtt;
}
ret = i915_kick_out_vgacon(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
- goto out_gtt;
+ goto out_ggtt;
}
pci_set_master(dev->pdev);
@@ -1080,26 +1220,27 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
- aperture_size = dev_priv->gtt.mappable_end;
+ aperture_size = ggtt->mappable_end;
- dev_priv->gtt.mappable =
- io_mapping_create_wc(dev_priv->gtt.mappable_base,
+ ggtt->mappable =
+ io_mapping_create_wc(ggtt->mappable_base,
aperture_size);
- if (dev_priv->gtt.mappable == NULL) {
+ if (!ggtt->mappable) {
ret = -EIO;
- goto out_gtt;
+ goto out_ggtt;
}
- dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+ ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
aperture_size);
- intel_irq_init(dev_priv);
+ pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
intel_uncore_sanitize(dev);
intel_opregion_setup(dev);
- i915_gem_load_init(dev);
- i915_gem_shrinker_init(dev_priv);
+ i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
@@ -1117,24 +1258,44 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG_DRIVER("can't enable MSI");
}
- intel_device_info_runtime_init(dev);
+ return 0;
- intel_init_dpio(dev_priv);
+out_ggtt:
+ i915_ggtt_cleanup_hw(dev);
- if (INTEL_INFO(dev)->num_pipes) {
- ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
- if (ret)
- goto out_gem_unload;
- }
+ return ret;
+}
- intel_power_domains_init(dev_priv);
+/**
+ * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * @dev_priv: device private
+ */
+static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
- ret = i915_load_modeset_init(dev);
- if (ret < 0) {
- DRM_ERROR("failed to init modeset\n");
- goto out_power_well;
- }
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+ pm_qos_remove_request(&dev_priv->pm_qos);
+ arch_phys_wc_del(ggtt->mtrr);
+ io_mapping_free(ggtt->mappable);
+ i915_ggtt_cleanup_hw(dev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ i915_gem_shrinker_init(dev_priv);
/*
* Notify a valid surface after modesetting,
* when running inside a VM.
@@ -1144,48 +1305,107 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
i915_setup_sysfs(dev);
- if (INTEL_INFO(dev)->num_pipes) {
+ if (INTEL_INFO(dev_priv)->num_pipes) {
/* Must be done after probing outputs */
intel_opregion_init(dev);
acpi_video_register();
}
- if (IS_GEN5(dev))
+ if (IS_GEN5(dev_priv))
intel_gpu_ips_init(dev_priv);
- intel_runtime_pm_enable(dev_priv);
-
i915_audio_component_init(dev_priv);
+}
+
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+ i915_audio_component_cleanup(dev_priv);
+ intel_gpu_ips_teardown();
+ acpi_video_unregister();
+ intel_opregion_fini(dev_priv->dev);
+ i915_teardown_sysfs(dev_priv->dev);
+ i915_gem_shrinker_cleanup(dev_priv);
+}
+
+/**
+ * i915_driver_load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_i915_private *dev_priv;
+ int ret = 0;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev->dev_private = dev_priv;
+ /* Must be set before calling __i915_printk */
+ dev_priv->dev = dev;
+
+ ret = i915_driver_init_early(dev_priv, dev,
+ (struct intel_device_info *)flags);
+
+ if (ret < 0)
+ goto out_free_priv;
+
+ intel_runtime_pm_get(dev_priv);
+
+ ret = i915_driver_init_mmio(dev_priv);
+ if (ret < 0)
+ goto out_runtime_pm_put;
+
+ ret = i915_driver_init_hw(dev_priv);
+ if (ret < 0)
+ goto out_cleanup_mmio;
+
+ /*
+ * TODO: move the vblank init and parts of modeset init steps into one
+ * of the i915_driver_init_/i915_driver_register functions according
+ * to the role/effect of the given init step.
+ */
+ if (INTEL_INFO(dev)->num_pipes) {
+ ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
+ if (ret)
+ goto out_cleanup_hw;
+ }
+
+ ret = i915_load_modeset_init(dev);
+ if (ret < 0)
+ goto out_cleanup_vblank;
+
+ i915_driver_register(dev_priv);
+
+ intel_runtime_pm_enable(dev_priv);
intel_runtime_pm_put(dev_priv);
return 0;
-out_power_well:
- intel_power_domains_fini(dev_priv);
+out_cleanup_vblank:
drm_vblank_cleanup(dev);
-out_gem_unload:
- i915_gem_shrinker_cleanup(dev_priv);
-
- if (dev->pdev->msi_enabled)
- pci_disable_msi(dev->pdev);
-
- intel_teardown_mchbar(dev);
- pm_qos_remove_request(&dev_priv->pm_qos);
- arch_phys_wc_del(dev_priv->gtt.mtrr);
- io_mapping_free(dev_priv->gtt.mappable);
-out_gtt:
- i915_global_gtt_cleanup(dev);
-out_uncore_fini:
- intel_uncore_fini(dev);
- i915_mmio_cleanup(dev);
-put_bridge:
- pci_dev_put(dev_priv->bridge_dev);
- i915_gem_load_cleanup(dev);
+out_cleanup_hw:
+ i915_driver_cleanup_hw(dev_priv);
+out_cleanup_mmio:
+ i915_driver_cleanup_mmio(dev_priv);
out_runtime_pm_put:
intel_runtime_pm_put(dev_priv);
- i915_workqueues_cleanup(dev_priv);
+ i915_driver_cleanup_early(dev_priv);
out_free_priv:
+ i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+
kfree(dev_priv);
return ret;
@@ -1198,26 +1418,15 @@ int i915_driver_unload(struct drm_device *dev)
intel_fbdev_fini(dev);
- i915_audio_component_cleanup(dev_priv);
-
ret = i915_gem_suspend(dev);
if (ret) {
DRM_ERROR("failed to idle hardware: %d\n", ret);
return ret;
}
- intel_power_domains_fini(dev_priv);
-
- intel_gpu_ips_teardown();
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- i915_teardown_sysfs(dev);
-
- i915_gem_shrinker_cleanup(dev_priv);
-
- io_mapping_free(dev_priv->gtt.mappable);
- arch_phys_wc_del(dev_priv->gtt.mtrr);
-
- acpi_video_unregister();
+ i915_driver_unregister(dev_priv);
drm_vblank_cleanup(dev);
@@ -1246,31 +1455,24 @@ int i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
i915_destroy_error_state(dev);
- if (dev->pdev->msi_enabled)
- pci_disable_msi(dev->pdev);
-
- intel_opregion_fini(dev);
-
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
intel_guc_ucode_fini(dev);
mutex_lock(&dev->struct_mutex);
- i915_gem_cleanup_ringbuffer(dev);
+ i915_gem_cleanup_engines(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
intel_fbc_cleanup_cfb(dev_priv);
- pm_qos_remove_request(&dev_priv->pm_qos);
+ intel_power_domains_fini(dev_priv);
- i915_global_gtt_cleanup(dev);
+ i915_driver_cleanup_hw(dev_priv);
+ i915_driver_cleanup_mmio(dev_priv);
- intel_uncore_fini(dev);
- i915_mmio_cleanup(dev);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- i915_gem_load_cleanup(dev);
- pci_dev_put(dev_priv->bridge_dev);
- i915_workqueues_cleanup(dev_priv);
+ i915_driver_cleanup_early(dev_priv);
kfree(dev_priv);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6d2fb3f4a..85c4debf4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -66,6 +66,11 @@ static struct drm_driver driver;
#define IVB_CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+#define BDW_COLORS \
+ .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define CHV_COLORS \
+ .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+
static const struct intel_device_info intel_i830_info = {
.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -288,24 +293,28 @@ static const struct intel_device_info intel_haswell_m_info = {
.is_mobile = 1,
};
+#define BDW_FEATURES \
+ HSW_FEATURES, \
+ BDW_COLORS
+
static const struct intel_device_info intel_broadwell_d_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.gen = 8,
};
static const struct intel_device_info intel_broadwell_m_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.gen = 8, .is_mobile = 1,
};
static const struct intel_device_info intel_broadwell_gt3d_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.gen = 8,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_broadwell_gt3m_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.gen = 8, .is_mobile = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
@@ -318,16 +327,17 @@ static const struct intel_device_info intel_cherryview_info = {
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
+ CHV_COLORS,
};
static const struct intel_device_info intel_skylake_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_skylake_gt3_info = {
- HSW_FEATURES,
+ BDW_FEATURES,
.is_skylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -345,18 +355,17 @@ static const struct intel_device_info intel_broxton_info = {
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
+ BDW_COLORS,
};
static const struct intel_device_info intel_kabylake_info = {
- HSW_FEATURES,
- .is_preliminary = 1,
+ BDW_FEATURES,
.is_kabylake = 1,
.gen = 9,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
- HSW_FEATURES,
- .is_preliminary = 1,
+ BDW_FEATURES,
.is_kabylake = 1,
.gen = 9,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -503,7 +512,12 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev));
+ } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+ dev_priv->pch_type = PCH_KBP;
+ DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+ WARN_ON(!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
+ (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
pch->subsystem_vendor == 0x1af4 &&
pch->subsystem_device == 0x1100)) {
@@ -557,10 +571,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
drm_modeset_unlock_all(dev);
}
-static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
-static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
+static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
@@ -630,8 +643,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false);
- if (HAS_CSR(dev_priv))
- flush_work(&dev_priv->csr.work);
+ intel_csr_ucode_suspend(dev_priv);
out:
enable_rpm_wakeref_asserts(dev_priv);
@@ -647,7 +659,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
- fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
+ fw_csr = !IS_BROXTON(dev_priv) &&
+ suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
* deinit the power domains. This also means the CSR/DMC firmware will
@@ -658,7 +671,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
if (!fw_csr)
intel_power_domains_suspend(dev_priv);
- ret = intel_suspend_complete(dev_priv);
+ ret = 0;
+ if (IS_BROXTON(dev_priv))
+ bxt_enable_dc9(dev_priv);
+ else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_enable_pc8(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ ret = vlv_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
@@ -719,9 +738,16 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
disable_rpm_wakeref_asserts(dev_priv);
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret)
+ DRM_ERROR("failed to re-enable GGTT\n");
+
+ intel_csr_ucode_resume(dev_priv);
+
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
@@ -850,21 +876,25 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_early_sanitize(dev, true);
- if (IS_BROXTON(dev))
- ret = bxt_resume_prepare(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_BROXTON(dev)) {
+ if (!dev_priv->suspended_to_idle)
+ gen9_sanitize_dc_state(dev_priv);
+ bxt_disable_dc9(dev_priv);
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_disable_pc8(dev_priv);
+ }
intel_uncore_sanitize(dev);
- if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
+ if (IS_BROXTON(dev_priv) ||
+ !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
+ enable_rpm_wakeref_asserts(dev_priv);
+
out:
dev_priv->suspended_to_idle = false;
- enable_rpm_wakeref_asserts(dev_priv);
-
return ret;
}
@@ -900,23 +930,32 @@ int i915_resume_switcheroo(struct drm_device *dev)
int i915_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- bool simulated;
+ struct i915_gpu_error *error = &dev_priv->gpu_error;
+ unsigned reset_counter;
int ret;
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
- i915_gem_reset(dev);
+ /* Clear any previous failed attempts at recovery. Time to try again. */
+ atomic_andnot(I915_WEDGED, &error->reset_counter);
- simulated = dev_priv->gpu_error.stop_rings != 0;
+ /* Clear the reset-in-progress flag and increment the reset epoch. */
+ reset_counter = atomic_inc_return(&error->reset_counter);
+ if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
+ ret = -EIO;
+ goto error;
+ }
+
+ i915_gem_reset(dev);
- ret = intel_gpu_reset(dev);
+ ret = intel_gpu_reset(dev, ALL_ENGINES);
/* Also reset the gpu hangman. */
- if (simulated) {
+ if (error->stop_rings != 0) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
- dev_priv->gpu_error.stop_rings = 0;
+ error->stop_rings = 0;
if (ret == -ENODEV) {
DRM_INFO("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
@@ -928,9 +967,11 @@ int i915_reset(struct drm_device *dev)
pr_notice("drm/i915: Resetting chip after gpu hang\n");
if (ret) {
- DRM_ERROR("Failed to reset chip: %i\n", ret);
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ if (ret != -ENODEV)
+ DRM_ERROR("Failed to reset chip: %i\n", ret);
+ else
+ DRM_DEBUG_DRIVER("GPU reset disabled\n");
+ goto error;
}
intel_overlay_reset(dev_priv);
@@ -949,20 +990,14 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
-
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- dev_priv->gpu_error.reload_in_reset = true;
-
ret = i915_gem_init_hw(dev);
-
- dev_priv->gpu_error.reload_in_reset = false;
-
- mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
- return ret;
+ goto error;
}
+ mutex_unlock(&dev->struct_mutex);
+
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
@@ -973,6 +1008,11 @@ int i915_reset(struct drm_device *dev)
intel_enable_gt_powersave(dev);
return 0;
+
+error:
+ atomic_or(I915_WEDGED, &error->reset_counter);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1079,44 +1119,6 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev);
}
-static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
-{
- hsw_enable_pc8(dev_priv);
-
- return 0;
-}
-
-static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- /* TODO: when DC5 support is added disable DC5 here. */
-
- broxton_ddi_phy_uninit(dev);
- broxton_uninit_cdclk(dev);
- bxt_enable_dc9(dev_priv);
-
- return 0;
-}
-
-static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = dev_priv->dev;
-
- /* TODO: when CSR FW support is added make sure the FW is loaded */
-
- bxt_disable_dc9(dev_priv);
-
- /*
- * TODO: when DC5 support is added enable DC5 here if the CSR FW
- * is available.
- */
- broxton_init_cdclk(dev);
- broxton_ddi_phy_init(dev);
-
- return 0;
-}
-
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1420,7 +1422,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
if (err)
goto err2;
- if (!IS_CHERRYVIEW(dev_priv->dev))
+ if (!IS_CHERRYVIEW(dev_priv))
vlv_save_gunit_s0ix_state(dev_priv);
err = vlv_force_gfx_clock(dev_priv, false);
@@ -1452,7 +1454,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
*/
ret = vlv_force_gfx_clock(dev_priv, true);
- if (!IS_CHERRYVIEW(dev_priv->dev))
+ if (!IS_CHERRYVIEW(dev_priv))
vlv_restore_gunit_s0ix_state(dev_priv);
err = vlv_allow_gt_wake(dev_priv, true);
@@ -1522,7 +1524,16 @@ static int intel_runtime_suspend(struct device *device)
intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
- ret = intel_suspend_complete(dev_priv);
+ ret = 0;
+ if (IS_BROXTON(dev_priv)) {
+ bxt_display_core_uninit(dev_priv);
+ bxt_enable_dc9(dev_priv);
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ hsw_enable_pc8(dev_priv);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ ret = vlv_suspend_complete(dev_priv);
+ }
+
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1596,12 +1607,17 @@ static int intel_runtime_resume(struct device *device)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
- if (IS_BROXTON(dev))
- ret = bxt_resume_prepare(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_BROXTON(dev)) {
+ bxt_disable_dc9(dev_priv);
+ bxt_display_core_init(dev_priv, true);
+ if (dev_priv->csr.dmc_payload &&
+ (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
+ gen9_enable_dc5(dev_priv);
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_disable_pc8(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ret = vlv_resume_prepare(dev_priv, true);
+ }
/*
* No point of rolling back things in case of an error, as the best
@@ -1632,26 +1648,6 @@ static int intel_runtime_resume(struct device *device)
return ret;
}
-/*
- * This function implements common functionality of runtime and system
- * suspend sequence.
- */
-static int intel_suspend_complete(struct drm_i915_private *dev_priv)
-{
- int ret;
-
- if (IS_BROXTON(dev_priv))
- ret = bxt_suspend_complete(dev_priv);
- else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- ret = hsw_suspend_complete(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_suspend_complete(dev_priv);
- else
- ret = 0;
-
- return ret;
-}
-
static const struct dev_pm_ops i915_pm_ops = {
/*
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
@@ -1772,10 +1768,8 @@ static int __init i915_init(void)
if (i915.modeset == 0)
driver.driver_features &= ~DRIVER_MODESET;
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && i915.modeset == -1)
driver.driver_features &= ~DRIVER_MODESET;
-#endif
if (!(driver.driver_features & DRIVER_MODESET)) {
/* Silently fail loading to not upset userspace. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3f69a6792..489c72885 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -33,33 +33,40 @@
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
-#include <drm/drmP.h>
-#include "i915_params.h"
-#include "i915_reg.h"
-#include "intel_bios.h"
-#include "intel_ringbuffer.h"
-#include "intel_lrc.h"
-#include "i915_gem_gtt.h"
-#include "i915_gem_render_state.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <drm/intel-gtt.h>
-#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
-#include <drm/drm_gem.h>
#include <linux/backlight.h>
#include <linux/hashtable.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+#include <drm/intel-gtt.h>
+#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
+#include <drm/drm_gem.h>
+
+#include "i915_params.h"
+#include "i915_reg.h"
+
+#include "intel_bios.h"
+#include "intel_dpll_mgr.h"
#include "intel_guc.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+#include "i915_gem.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_render_state.h"
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160229"
+#define DRIVER_DATE "20160425"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -97,6 +104,10 @@
#define I915_STATE_WARN_ON(x) \
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+bool __i915_inject_load_failure(const char *func, int line);
+#define i915_inject_load_failure() \
+ __i915_inject_load_failure(__func__, __LINE__)
+
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
@@ -122,9 +133,35 @@ enum transcoder {
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP,
+ TRANSCODER_DSI_A,
+ TRANSCODER_DSI_C,
I915_MAX_TRANSCODERS
};
-#define transcoder_name(t) ((t) + 'A')
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+ switch (transcoder) {
+ case TRANSCODER_A:
+ return "A";
+ case TRANSCODER_B:
+ return "B";
+ case TRANSCODER_C:
+ return "C";
+ case TRANSCODER_EDP:
+ return "EDP";
+ case TRANSCODER_DSI_A:
+ return "DSI A";
+ case TRANSCODER_DSI_C:
+ return "DSI C";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+ return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
/*
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
@@ -176,6 +213,8 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
@@ -273,6 +312,10 @@ struct i915_hotplug {
(__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
(__s)++)
+#define for_each_port_masked(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if ((__ports_mask) & (1 << (__port)))
+
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -340,81 +383,6 @@ struct drm_i915_file_private {
unsigned int bsd_ring;
};
-enum intel_dpll_id {
- DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
- /* real shared dpll ids must be >= 0 */
- DPLL_ID_PCH_PLL_A = 0,
- DPLL_ID_PCH_PLL_B = 1,
- /* hsw/bdw */
- DPLL_ID_WRPLL1 = 0,
- DPLL_ID_WRPLL2 = 1,
- DPLL_ID_SPLL = 2,
-
- /* skl */
- DPLL_ID_SKL_DPLL1 = 0,
- DPLL_ID_SKL_DPLL2 = 1,
- DPLL_ID_SKL_DPLL3 = 2,
-};
-#define I915_NUM_PLLS 3
-
-struct intel_dpll_hw_state {
- /* i9xx, pch plls */
- uint32_t dpll;
- uint32_t dpll_md;
- uint32_t fp0;
- uint32_t fp1;
-
- /* hsw, bdw */
- uint32_t wrpll;
- uint32_t spll;
-
- /* skl */
- /*
- * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
- * lower part of ctrl1 and they get shifted into position when writing
- * the register. This allows us to easily compare the state to share
- * the DPLL.
- */
- uint32_t ctrl1;
- /* HDMI only, 0 when used for DP */
- uint32_t cfgcr1, cfgcr2;
-
- /* bxt */
- uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
- pcsdw12;
-};
-
-struct intel_shared_dpll_config {
- unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
- struct intel_dpll_hw_state hw_state;
-};
-
-struct intel_shared_dpll {
- struct intel_shared_dpll_config config;
-
- int active; /* count of number of active CRTCs (i.e. DPMS on) */
- bool on; /* is the PLL actually active? Disabled during modeset */
- const char *name;
- /* should match the index in the dev_priv->shared_dplls array */
- enum intel_dpll_id id;
- /* The mode_set hook is optional and should be used together with the
- * intel_prepare_shared_dpll function. */
- void (*mode_set)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
- void (*enable)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
- void (*disable)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
- bool (*get_hw_state)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state);
-};
-
-#define SKL_DPLL0 0
-#define SKL_DPLL1 1
-#define SKL_DPLL2 2
-#define SKL_DPLL3 3
-
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
@@ -533,7 +501,8 @@ struct drm_i915_error_state {
u32 cpu_ring_head;
u32 cpu_ring_tail;
- u32 semaphore_seqno[I915_NUM_RINGS - 1];
+ u32 last_seqno;
+ u32 semaphore_seqno[I915_NUM_ENGINES - 1];
/* Register state */
u32 start;
@@ -553,7 +522,7 @@ struct drm_i915_error_state {
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
- u32 semaphore_mboxes[I915_NUM_RINGS - 1];
+ u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct drm_i915_error_object {
int page_count;
@@ -561,6 +530,8 @@ struct drm_i915_error_state {
u32 *pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
+ struct drm_i915_error_object *wa_ctx;
+
struct drm_i915_error_request {
long jiffies;
u32 seqno;
@@ -577,12 +548,12 @@ struct drm_i915_error_state {
pid_t pid;
char comm[TASK_COMM_LEN];
- } ring[I915_NUM_RINGS];
+ } ring[I915_NUM_ENGINES];
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 rseqno[I915_NUM_RINGS], wseqno;
+ u32 rseqno[I915_NUM_ENGINES], wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -611,27 +582,12 @@ struct dpll;
struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
- /**
- * find_dpll() - Find the best values for the PLL
- * @limit: limits for the PLL
- * @crtc: current CRTC
- * @target: target frequency in kHz
- * @refclk: reference clock frequency in kHz
- * @match_clock: if provided, @best_clock P divider must
- * match the P divider from @match_clock
- * used for LVDS downclocking
- * @best_clock: best PLL values found
- *
- * Returns true on success, false on failure.
- */
- bool (*find_dpll)(const struct intel_limit *limit,
- struct intel_crtc_state *crtc_state,
- int target, int refclk,
- struct dpll *match_clock,
- struct dpll *best_clock);
- int (*compute_pipe_wm)(struct intel_crtc *crtc,
- struct drm_atomic_state *state);
- void (*program_watermarks)(struct intel_crtc_state *cstate);
+ int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
+ int (*compute_intermediate_wm)(struct drm_device *dev,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *newstate);
+ void (*initial_watermarks)(struct intel_crtc_state *cstate);
+ void (*optimize_watermarks)(struct intel_crtc_state *cstate);
void (*update_wm)(struct drm_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -662,6 +618,9 @@ struct drm_i915_display_funcs {
/* render clock increase/decrease */
/* display clock increase/decrease */
/* pll clock increase/decrease */
+
+ void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
+ void (*load_luts)(struct drm_crtc_state *crtc_state);
};
enum forcewake_domain_id {
@@ -681,6 +640,13 @@ enum forcewake_domains {
FORCEWAKE_MEDIA)
};
+#define FW_REG_READ (1)
+#define FW_REG_WRITE (2)
+
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, unsigned int op);
+
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
@@ -713,8 +679,9 @@ struct intel_uncore {
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
enum forcewake_domain_id id;
+ enum forcewake_domains mask;
unsigned wake_count;
- struct timer_list timer;
+ struct hrtimer timer;
i915_reg_t reg_set;
u32 val_set;
u32 val_clear;
@@ -727,14 +694,14 @@ struct intel_uncore {
};
/* Iterate over initialised fw domains */
-#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
- for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
- (i__) < FW_DOMAIN_ID_COUNT; \
- (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
- for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
+#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
+ for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
+ (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
+ (domain__)++) \
+ for_each_if ((mask__) & (domain__)->mask)
-#define for_each_fw_domain(domain__, dev_priv__, i__) \
- for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
+#define for_each_fw_domain(domain__, dev_priv__) \
+ for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
@@ -750,6 +717,7 @@ struct intel_csr {
i915_reg_t mmioaddr[8];
uint32_t mmiodata[8];
uint32_t dc_state;
+ uint32_t allowed_dc_mask;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -779,6 +747,7 @@ struct intel_csr {
func(overlay_needs_physical) sep \
func(supports_tv) sep \
func(has_llc) sep \
+ func(has_snoop) sep \
func(has_ddi) sep \
func(has_fpga_dbg)
@@ -810,6 +779,11 @@ struct intel_device_info {
u8 has_slice_pg:1;
u8 has_subslice_pg:1;
u8 has_eu_pg:1;
+
+ struct color_luts {
+ u16 degamma_lut_size;
+ u16 gamma_lut_size;
+ } color;
};
#undef DEFINE_FLAG
@@ -891,7 +865,7 @@ struct intel_context {
struct i915_vma *lrc_vma;
u64 lrc_desc;
uint32_t *lrc_reg_state;
- } engine[I915_NUM_RINGS];
+ } engine[I915_NUM_ENGINES];
struct list_head link;
};
@@ -1016,6 +990,7 @@ enum intel_pch {
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
+ PCH_KBP, /* Kabypoint PCH */
PCH_NOP,
};
@@ -1036,6 +1011,7 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
+#define GMBUS_FORCE_BIT_RETRY (1U << 31)
u32 force_bit;
u32 reg0;
i915_reg_t gpio_reg;
@@ -1159,6 +1135,7 @@ struct intel_gen6_power_mgmt {
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
+ u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
u8 up_threshold; /* Current %busy required to uplock */
u8 down_threshold; /* Current %busy required to downclock */
@@ -1298,6 +1275,7 @@ struct i915_gem_mm {
struct i915_hw_ppgtt *aliasing_ppgtt;
struct notifier_block oom_notifier;
+ struct notifier_block vmap_notifier;
struct shrinker shrinker;
bool shrinker_no_lock_stealing;
@@ -1423,9 +1401,6 @@ struct i915_gpu_error {
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
-
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- bool reload_in_reset;
};
enum modeset_restore {
@@ -1482,21 +1457,23 @@ struct intel_vbt_data {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
- unsigned int has_mipi:1;
+ unsigned int panel_type:4;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
enum drrs_support_type drrs_type;
- /* eDP */
- int edp_rate;
- int edp_lanes;
- int edp_preemphasis;
- int edp_vswing;
- bool edp_initialized;
- bool edp_support;
- int edp_bpp;
- struct edp_power_seq edp_pps;
+ struct {
+ int rate;
+ int lanes;
+ int preemphasis;
+ int vswing;
+ bool low_vswing;
+ bool initialized;
+ bool support;
+ int bpp;
+ struct edp_power_seq pps;
+ } edp;
struct {
bool full_link;
@@ -1516,7 +1493,6 @@ struct intel_vbt_data {
/* MIPI DSI */
struct {
- u16 port;
u16 panel_id;
struct mipi_config *config;
struct mipi_pps_data *pps;
@@ -1532,6 +1508,7 @@ struct intel_vbt_data {
union child_device_config *child_dev;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
+ struct sdvo_device_mapping sdvo_mappings[2];
};
enum intel_ddb_partitioning {
@@ -1706,7 +1683,7 @@ struct i915_wa_reg {
struct i915_workarounds {
struct i915_wa_reg reg[I915_MAX_WA_REGS];
u32 count;
- u32 hw_whitelist_count[I915_NUM_RINGS];
+ u32 hw_whitelist_count[I915_NUM_ENGINES];
};
struct i915_virtual_gpu {
@@ -1719,7 +1696,7 @@ struct i915_execbuffer_params {
uint32_t dispatch_flags;
uint32_t args_batch_start_offset;
uint64_t batch_obj_vm_offset;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_gem_object *batch_obj;
struct intel_context *ctx;
struct drm_i915_gem_request *request;
@@ -1771,7 +1748,7 @@ struct drm_i915_private {
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
- struct intel_engine_cs ring[I915_NUM_RINGS];
+ struct intel_engine_cs engine[I915_NUM_ENGINES];
struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
@@ -1829,6 +1806,7 @@ struct drm_i915_private {
unsigned int skl_boot_cdclk;
unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
unsigned int max_dotclk_freq;
+ unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int czclk_freq;
@@ -1855,7 +1833,7 @@ struct drm_i915_private {
struct drm_atomic_state *modeset_restore_state;
struct list_head vm_list; /* Global list of all address spaces */
- struct i915_gtt gtt; /* VM representing the global address space */
+ struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
DECLARE_HASHTABLE(mm_structs, 7);
@@ -1863,8 +1841,6 @@ struct drm_i915_private {
/* Kernel Modesetting */
- struct sdvo_device_mapping sdvo_mappings[2];
-
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
wait_queue_head_t pending_flip_queue;
@@ -1876,6 +1852,14 @@ struct drm_i915_private {
/* dpll and cdclk state is protected by connection_mutex */
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *dpll_mgr;
+
+ /*
+ * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
+ * Must be global rather than per dpll, because on some platforms
+ * plls share registers.
+ */
+ struct mutex dpll_lock;
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
@@ -1884,9 +1868,6 @@ struct drm_i915_private {
struct i915_workarounds workarounds;
- /* Reclocking support */
- bool render_reclock_avail;
-
struct i915_frontbuffer_tracking fb_tracking;
u16 orig_clock;
@@ -1896,7 +1877,7 @@ struct drm_i915_private {
struct intel_l3_parity l3_parity;
/* Cannot be determined by PCIID. You must always read a register. */
- size_t ellc_size;
+ u32 edram_cap;
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
@@ -1936,7 +1917,15 @@ struct drm_i915_private {
u32 fdi_rx_config;
+ /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
u32 chv_phy_control;
+ /*
+ * Shadows for CHV DPLL_MD regs to keep the state
+ * checker somewhat working in the presence hardware
+ * crappiness (can't read out DPLL_MD for pipes B & C).
+ */
+ u32 chv_dpll_md[I915_MAX_PIPES];
+ u32 bxt_phy_grc;
u32 suspend_count;
bool suspended_to_idle;
@@ -1982,6 +1971,13 @@ struct drm_i915_private {
};
uint8_t max_level;
+
+ /*
+ * Should be held around atomic WM register writing; also
+ * protects * intel_crtc->wm.active and
+ * cstate->wm.need_postvbl_update.
+ */
+ struct mutex wm_mutex;
} wm;
struct i915_runtime_pm pm;
@@ -1991,15 +1987,13 @@ struct drm_i915_private {
int (*execbuf_submit)(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
- int (*init_rings)(struct drm_device *dev);
- void (*cleanup_ring)(struct intel_engine_cs *ring);
- void (*stop_ring)(struct intel_engine_cs *ring);
+ int (*init_engines)(struct drm_device *dev);
+ void (*cleanup_engine)(struct intel_engine_cs *engine);
+ void (*stop_engine)(struct intel_engine_cs *engine);
} gt;
struct intel_context *kernel_context;
- bool edp_low_vswing;
-
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -2026,10 +2020,28 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
return container_of(guc, struct drm_i915_private, guc);
}
-/* Iterate over initialised rings */
-#define for_each_ring(ring__, dev_priv__, i__) \
- for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
- for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))))
+/* Simple iterator over all initialised engines */
+#define for_each_engine(engine__, dev_priv__) \
+ for ((engine__) = &(dev_priv__)->engine[0]; \
+ (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+ (engine__)++) \
+ for_each_if (intel_engine_initialized(engine__))
+
+/* Iterator with engine_id */
+#define for_each_engine_id(engine__, dev_priv__, id__) \
+ for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \
+ (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+ (engine__)++) \
+ for_each_if (((id__) = (engine__)->id, \
+ intel_engine_initialized(engine__)))
+
+/* Iterator over subset of engines selected by mask */
+#define for_each_engine_masked(engine__, dev_priv__, mask__) \
+ for ((engine__) = &(dev_priv__)->engine[0]; \
+ (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \
+ (engine__)++) \
+ for_each_if (((mask__) & intel_engine_flag(engine__)) && \
+ intel_engine_initialized(engine__))
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
@@ -2099,7 +2111,7 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
struct list_head global_list;
- struct list_head ring_list[I915_NUM_RINGS];
+ struct list_head engine_list[I915_NUM_ENGINES];
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
@@ -2110,7 +2122,7 @@ struct drm_i915_gem_object {
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
- unsigned int active:I915_NUM_RINGS;
+ unsigned int active:I915_NUM_ENGINES;
/**
* This is set if the object has been written to since last bound
@@ -2174,10 +2186,7 @@ struct drm_i915_gem_object {
struct scatterlist *sg;
int last;
} get_page;
-
- /* prime dma-buf support */
- void *dma_buf_vmapping;
- int vmapping_count;
+ void *mapping;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer, but we allow for multiple readers.
@@ -2189,7 +2198,7 @@ struct drm_i915_gem_object {
* read request. This allows for the CPU to read from an active
* buffer by only waiting for the write to complete.
* */
- struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
+ struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
struct drm_i915_gem_request *last_fenced_req;
@@ -2244,7 +2253,8 @@ struct drm_i915_gem_request {
/** On Which ring this request was generated */
struct drm_i915_private *i915;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
+ unsigned reset_counter;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
@@ -2325,7 +2335,6 @@ struct drm_i915_gem_request {
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx);
-void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
@@ -2337,9 +2346,9 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
}
static inline struct intel_engine_cs *
-i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+i915_gem_request_get_engine(struct drm_i915_gem_request *req)
{
- return req ? req->ring : NULL;
+ return req ? req->engine : NULL;
}
static inline struct drm_i915_gem_request *
@@ -2353,7 +2362,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
- WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
@@ -2365,7 +2374,7 @@ i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
if (!req)
return;
- dev = req->ring->dev;
+ dev = req->engine->dev;
if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
}
@@ -2495,6 +2504,7 @@ struct drm_i915_cmd_table {
__p; \
})
#define INTEL_INFO(p) (&__I915__(p)->info)
+#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
@@ -2593,6 +2603,15 @@ struct drm_i915_cmd_table {
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
+#define KBL_REVID_A0 0x0
+#define KBL_REVID_B0 0x1
+#define KBL_REVID_C0 0x2
+#define KBL_REVID_D0 0x3
+#define KBL_REVID_E0 0x4
+
+#define IS_KBL_REVID(p, since, until) \
+ (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2613,13 +2632,17 @@ struct drm_i915_cmd_table {
#define BLT_RING (1<<BCS)
#define VEBOX_RING (1<<VECS)
#define BSD2_RING (1<<VCS2)
+#define ALL_ENGINES (~0)
+
#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING)
#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
+#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- __I915__(dev)->ellc_size)
+ HAS_EDRAM(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -2673,7 +2696,7 @@ struct drm_i915_cmd_table {
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
- IS_KABYLAKE(dev))
+ IS_KABYLAKE(dev) || IS_BROXTON(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
@@ -2697,10 +2720,13 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
+#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
@@ -2729,6 +2755,13 @@ extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
extern int i915_resume_switcheroo(struct drm_device *dev);
/* i915_dma.c */
+void __printf(3, 4)
+__i915_printk(struct drm_i915_private *dev_priv, const char *level,
+ const char *fmt, ...);
+
+#define i915_report_error(dev_priv, fmt, ...) \
+ __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
+
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2741,9 +2774,11 @@ extern void i915_driver_postclose(struct drm_device *dev,
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
-extern int intel_gpu_reset(struct drm_device *dev);
+extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask);
extern bool intel_has_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
+extern int intel_guc_reset(struct drm_i915_private *dev_priv);
+extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -2760,7 +2795,7 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
__printf(3, 4)
-void i915_handle_error(struct drm_device *dev, bool wedged,
+void i915_handle_error(struct drm_device *dev, u32 engine_mask,
const char *fmt, ...);
extern void intel_irq_init(struct drm_i915_private *dev_priv);
@@ -2787,6 +2822,8 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
+
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
static inline bool intel_vgpu_active(struct drm_device *dev)
{
@@ -2865,7 +2902,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req);
-void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
@@ -2896,6 +2932,7 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load_init(struct drm_device *dev);
void i915_gem_load_cleanup(struct drm_device *dev);
+void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2980,12 +3017,46 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
BUG_ON(obj->pages == NULL);
obj->pages_pin_count++;
}
+
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--;
}
+/**
+ * i915_gem_object_pin_map - return a contiguous mapping of the entire object
+ * @obj - the object to map into kernel address space
+ *
+ * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
+ * pages and then returns a contiguous mapping of the backing storage into
+ * the kernel address space.
+ *
+ * The caller must hold the struct_mutex, and is responsible for calling
+ * i915_gem_object_unpin_map() when the mapping is no longer required.
+ *
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
+ */
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
+
+/**
+ * i915_gem_object_unpin_map - releases an earlier mapping
+ * @obj - the object to unmap
+ *
+ * After pinning the object and mapping its pages, once you are finished
+ * with your access, call i915_gem_object_unpin_map() to release the pin
+ * upon the mapping. Once the pin count reaches zero, that mapping may be
+ * removed.
+ *
+ * The caller must hold the struct_mutex.
+ */
+static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+}
+
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *to,
@@ -3009,42 +3080,68 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
- u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
- return i915_seqno_passed(seqno, req->previous_seqno);
+ if (!lazy_coherency && req->engine->irq_seqno_barrier)
+ req->engine->irq_seqno_barrier(req->engine);
+ return i915_seqno_passed(req->engine->get_seqno(req->engine),
+ req->previous_seqno);
}
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
- u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
- return i915_seqno_passed(seqno, req->seqno);
+ if (!lazy_coherency && req->engine->irq_seqno_barrier)
+ req->engine->irq_seqno_barrier(req->engine);
+ return i915_seqno_passed(req->engine->get_seqno(req->engine),
+ req->seqno);
}
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring);
+i915_gem_find_active_request(struct intel_engine_cs *engine);
bool i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
-int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
- bool interruptible);
+void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
+
+static inline u32 i915_reset_counter(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter);
+}
+
+static inline bool __i915_reset_in_progress(u32 reset)
+{
+ return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
+{
+ return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool __i915_terminally_wedged(u32 reset)
+{
+ return unlikely(reset & I915_WEDGED);
+}
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
- return unlikely(atomic_read(&error->reset_counter)
- & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+ return __i915_reset_in_progress(i915_reset_counter(error));
+}
+
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+{
+ return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return atomic_read(&error->reset_counter) & I915_WEDGED;
+ return __i915_terminally_wedged(i915_reset_counter(error));
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+ return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
@@ -3062,11 +3159,11 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_init(struct drm_device *dev);
-int i915_gem_init_rings(struct drm_device *dev);
+int i915_gem_init_engines(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
void __i915_add_request(struct drm_i915_gem_request *req,
@@ -3077,7 +3174,6 @@ void __i915_add_request(struct drm_i915_gem_request *req,
#define i915_add_request_no_flush(req) \
__i915_add_request(req, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
- unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct intel_rps_client *rps);
@@ -3157,13 +3253,9 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
-#define i915_obj_to_ggtt(obj) \
- (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
-
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
- WARN_ON(i915_is_ggtt(vm));
return container_of(vm, struct i915_hw_ppgtt, base);
}
@@ -3176,7 +3268,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+ return i915_gem_obj_size(obj, &ggtt->base);
}
static inline int __must_check
@@ -3184,7 +3279,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
unsigned flags)
{
- return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+ return i915_gem_object_pin(obj, &ggtt->base,
alignment, flags | PIN_GLOBAL);
}
@@ -3299,6 +3397,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8
+#define I915_SHRINK_VMAPS 0x10
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
@@ -3345,7 +3444,7 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
-void i915_capture_error_state(struct drm_device *dev, bool wedge,
+void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
const char *error_msg);
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
@@ -3357,10 +3456,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(void);
-int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
-void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
-bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
-int i915_parse_cmds(struct intel_engine_cs *ring,
+int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
+void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
+bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
+int i915_parse_cmds(struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
u32 batch_start_offset,
@@ -3394,6 +3493,14 @@ extern void intel_i2c_reset(struct drm_device *dev);
/* intel_bios.c */
int intel_bios_init(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
+bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+ enum port port);
/* intel_opregion.c */
#ifdef CONFIG_ACPI
@@ -3405,6 +3512,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
+extern int intel_opregion_get_panel_type(struct drm_device *dev);
#else
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; }
@@ -3420,6 +3528,10 @@ intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
return 0;
}
+static inline int intel_opregion_get_panel_type(struct drm_device *dev)
+{
+ return -ENODEV;
+}
#endif
/* intel_acpi.c */
@@ -3579,11 +3691,6 @@ static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
return VGACNTRL;
}
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
@@ -3631,11 +3738,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
+static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
struct drm_i915_gem_request *req)
{
- if (ring->trace_irq_req == NULL && ring->irq_get(ring))
- i915_gem_request_assign(&ring->trace_irq_req, req);
+ if (engine->trace_irq_req == NULL && engine->irq_get(engine))
+ i915_gem_request_assign(&engine->trace_irq_req, req);
}
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dabc08987..aad26851c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -32,14 +32,13 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_mocs.h"
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
-#define RQ_BUG_ON(expr)
-
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static void
@@ -85,9 +84,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
-#define EXIT_COND (!i915_reset_in_progress(error) || \
- i915_terminally_wedged(error))
- if (EXIT_COND)
+ if (!i915_reset_in_progress(error))
return 0;
/*
@@ -96,17 +93,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
* we should simply try to bail out and fail as gracefully as possible.
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
- EXIT_COND,
+ !i915_reset_in_progress(error),
10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
+ } else {
+ return 0;
}
-#undef EXIT_COND
-
- return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
@@ -130,9 +126,9 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_get_aperture *args = data;
- struct i915_gtt *ggtt = &dev_priv->gtt;
struct i915_vma *vma;
size_t pinned;
@@ -146,7 +142,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->gtt.base.total;
+ args->aper_size = ggtt->base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -211,11 +207,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret) {
+ if (WARN_ON(ret)) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
- WARN_ON(ret != -EIO);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -324,7 +319,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
- char __user *user_data = to_user_ptr(args->data_ptr);
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
int ret = 0;
/* We manually control the domain here and pretend that it
@@ -605,7 +600,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
int needs_clflush = 0;
struct sg_page_iter sg_iter;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -692,7 +687,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_WRITE,
- to_user_ptr(args->data_ptr),
+ u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
@@ -700,7 +695,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -765,7 +760,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
ssize_t remain;
loff_t offset, page_base;
char __user *user_data;
@@ -783,7 +779,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (ret)
goto out_unpin;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
@@ -807,7 +803,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev_priv->gtt.mappable, page_base,
+ if (fast_user_write(ggtt->mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_flush;
@@ -907,7 +903,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int needs_clflush_before = 0;
struct sg_page_iter sg_iter;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -1036,12 +1032,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_READ,
- to_user_ptr(args->data_ptr),
+ u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
if (likely(!i915.prefault_disable)) {
- ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
args->size);
if (ret)
return -EFAULT;
@@ -1053,7 +1049,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
goto put_rpm;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -1109,27 +1105,19 @@ put_rpm:
return ret;
}
-int
-i915_gem_check_wedge(struct i915_gpu_error *error,
- bool interruptible)
+static int
+i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
{
- if (i915_reset_in_progress(error)) {
+ if (__i915_terminally_wedged(reset_counter))
+ return -EIO;
+
+ if (__i915_reset_in_progress(reset_counter)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
- /* Recovery complete, but the reset failed ... */
- if (i915_terminally_wedged(error))
- return -EIO;
-
- /*
- * Check if GPU Reset is in progress - we need intel_ring_begin
- * to work properly to reinit the hw state while the gpu is
- * still marked as reset-in-progress. Handle this with a flag.
- */
- if (!error->reload_in_reset)
- return -EAGAIN;
+ return -EAGAIN;
}
return 0;
@@ -1141,9 +1129,9 @@ static void fake_irq(unsigned long data)
}
static bool missed_irq(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+ return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
}
static unsigned long local_clock_us(unsigned *cpu)
@@ -1193,7 +1181,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* takes to sleep on a request, on the order of a microsecond.
*/
- if (req->ring->irq_refcount)
+ if (req->engine->irq_refcount)
return -EBUSY;
/* Only spin if we know the GPU is processing this request */
@@ -1223,7 +1211,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
/**
* __i915_wait_request - wait until execution of request has finished
* @req: duh!
- * @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
@@ -1238,16 +1225,15 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* errno with remaining time filled in timeout argument.
*/
int __i915_wait_request(struct drm_i915_gem_request *req,
- unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct intel_rps_client *rps)
{
- struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
- ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+ ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
@@ -1288,7 +1274,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
if (ret == 0)
goto out;
- if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
+ if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
ret = -ENODEV;
goto out;
}
@@ -1296,16 +1282,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
struct timer_list timer;
- prepare_to_wait(&ring->irq_queue, &wait, state);
+ prepare_to_wait(&engine->irq_queue, &wait, state);
/* We need to check whether any gpu reset happened in between
- * the caller grabbing the seqno and now ... */
- if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
- /* ... but upgrade the -EAGAIN to an -EIO if the gpu
- * is truely gone. */
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
- if (ret == 0)
- ret = -EAGAIN;
+ * the request being submitted and now. If a reset has occurred,
+ * the request is effectively complete (we either are in the
+ * process of or have discarded the rendering and completely
+ * reset the GPU. The results of the request are lost and we
+ * are free to continue on with the original operation.
+ */
+ if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
+ ret = 0;
break;
}
@@ -1325,11 +1312,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
}
timer.function = NULL;
- if (timeout || missed_irq(dev_priv, ring)) {
+ if (timeout || missed_irq(dev_priv, engine)) {
unsigned long expire;
setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
- expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
+ expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
mod_timer(&timer, expire);
}
@@ -1341,9 +1328,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
}
}
if (!irq_test_in_progress)
- ring->irq_put(ring);
+ engine->irq_put(engine);
- finish_wait(&ring->irq_queue, &wait);
+ finish_wait(&engine->irq_queue, &wait);
out:
trace_i915_gem_request_wait_end(req);
@@ -1370,7 +1357,6 @@ out:
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file)
{
- struct drm_i915_private *dev_private;
struct drm_i915_file_private *file_priv;
WARN_ON(!req || !file || req->file_priv);
@@ -1381,7 +1367,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
if (req->file_priv)
return -EINVAL;
- dev_private = req->ring->dev->dev_private;
file_priv = file->driver_priv;
spin_lock(&file_priv->mm.lock);
@@ -1434,7 +1419,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
static void
__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&engine->dev->struct_mutex);
@@ -1459,30 +1444,22 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
int
i915_wait_request(struct drm_i915_gem_request *req)
{
- struct drm_device *dev;
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *dev_priv = req->i915;
bool interruptible;
int ret;
- BUG_ON(req == NULL);
-
- dev = req->ring->dev;
- dev_priv = dev->dev_private;
interruptible = dev_priv->mm.interruptible;
- BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+ ret = __i915_wait_request(req, interruptible, NULL, NULL);
if (ret)
return ret;
- ret = __i915_wait_request(req,
- atomic_read(&dev_priv->gpu_error.reset_counter),
- interruptible, NULL, NULL);
- if (ret)
- return ret;
+ /* If the GPU hung, we want to keep the requests to find the guilty. */
+ if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+ __i915_gem_request_retire__upto(req);
- __i915_gem_request_retire__upto(req);
return 0;
}
@@ -1505,14 +1482,14 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- i = obj->last_write_req->ring->id;
+ i = obj->last_write_req->engine->id;
if (obj->last_read_req[i] == obj->last_write_req)
i915_gem_object_retire__read(obj, i);
else
i915_gem_object_retire__write(obj);
}
} else {
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL)
continue;
@@ -1522,7 +1499,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
i915_gem_object_retire__read(obj, i);
}
- RQ_BUG_ON(obj->active);
+ GEM_BUG_ON(obj->active);
}
return 0;
@@ -1532,14 +1509,15 @@ static void
i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req)
{
- int ring = req->ring->id;
+ int ring = req->engine->id;
if (obj->last_read_req[ring] == req)
i915_gem_object_retire__read(obj, ring);
else if (obj->last_write_req == req)
i915_gem_object_retire__write(obj);
- __i915_gem_request_retire__upto(req);
+ if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+ __i915_gem_request_retire__upto(req);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1552,8 +1530,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_request *requests[I915_NUM_RINGS];
- unsigned reset_counter;
+ struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1562,12 +1539,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (!obj->active)
return 0;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
- if (ret)
- return ret;
-
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
-
if (readonly) {
struct drm_i915_gem_request *req;
@@ -1577,7 +1548,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
requests[n++] = i915_gem_request_reference(req);
} else {
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
@@ -1589,9 +1560,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
}
mutex_unlock(&dev->struct_mutex);
+ ret = 0;
for (i = 0; ret == 0 && i < n; i++)
- ret = __i915_wait_request(requests[i], reset_counter, true,
- NULL, rps);
+ ret = __i915_wait_request(requests[i], true, NULL, rps);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++) {
@@ -1640,7 +1611,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -1688,7 +1659,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -1732,10 +1703,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
- if (args->flags & I915_MMAP_WC && !cpu_has_pat)
+ if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (obj == NULL)
return -ENOENT;
@@ -1754,7 +1725,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- down_write(&mm->mmap_sem);
+ if (down_write_killable(&mm->mmap_sem)) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -EINTR;
+ }
vma = find_vma(mm, addr);
if (vma)
vma->vm_page_prot =
@@ -1792,7 +1766,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_ggtt_view view = i915_ggtt_view_normal;
pgoff_t page_offset;
unsigned long pfn;
@@ -1827,7 +1802,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Use a partial view if the object is bigger than the aperture. */
- if (obj->base.size >= dev_priv->gtt.mappable_end &&
+ if (obj->base.size >= ggtt->mappable_end &&
obj->tiling_mode == I915_TILING_NONE) {
static const unsigned int chunk_size = 256; // 1 MiB
@@ -1855,7 +1830,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin;
/* Finally, remap it using the new GTT offset */
- pfn = dev_priv->gtt.mappable_base +
+ pfn = ggtt->mappable_base +
i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT;
@@ -1964,11 +1939,27 @@ out:
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
+ /* Serialisation between user GTT access and our code depends upon
+ * revoking the CPU's PTE whilst the mutex is held. The next user
+ * pagefault then has to wait until we release the mutex.
+ */
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
if (!obj->fault_mappable)
return;
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
+
+ /* Ensure that the CPU's PTE are revoked and there are not outstanding
+ * memory transactions from userspace before we return. The TLB
+ * flushing implied above by changing the PTE above *should* be
+ * sufficient, an extra barrier here just provides us with a bit
+ * of paranoid documentation about our requirement to serialise
+ * memory writes before touching registers / GSM.
+ */
+ wmb();
+
obj->fault_mappable = false;
}
@@ -2033,9 +2024,6 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (drm_vma_node_has_offset(&obj->base.vma_node))
- return 0;
-
dev_priv->mm.shrinker_no_lock_stealing = true;
ret = drm_gem_create_mmap_offset(&obj->base);
@@ -2084,7 +2072,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -2180,11 +2168,10 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret) {
+ if (WARN_ON(ret)) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
- WARN_ON(ret != -EIO);
i915_gem_clflush_object(obj, true);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2232,6 +2219,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
* lists early. */
list_del(&obj->global_list);
+ if (obj->mapping) {
+ if (is_vmalloc_addr(obj->mapping))
+ vunmap(obj->mapping);
+ else
+ kunmap(kmap_to_page(obj->mapping));
+ obj->mapping = NULL;
+ }
+
ops->put_pages(obj);
obj->pages = NULL;
@@ -2400,21 +2395,64 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ return ERR_PTR(ret);
+
+ i915_gem_object_pin_pages(obj);
+
+ if (obj->mapping == NULL) {
+ struct page **pages;
+
+ pages = NULL;
+ if (obj->base.size == PAGE_SIZE)
+ obj->mapping = kmap(sg_page(obj->pages->sgl));
+ else
+ pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
+ sizeof(*pages),
+ GFP_TEMPORARY);
+ if (pages != NULL) {
+ struct sg_page_iter sg_iter;
+ int n;
+
+ n = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter,
+ obj->pages->nents, 0)
+ pages[n++] = sg_page_iter_page(&sg_iter);
+
+ obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+ }
+ if (obj->mapping == NULL) {
+ i915_gem_object_unpin_pages(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ return obj->mapping;
+}
+
void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req)
{
struct drm_i915_gem_object *obj = vma->obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
- ring = i915_gem_request_get_ring(req);
+ engine = i915_gem_request_get_engine(req);
/* Add a reference if we're newly entering the active list. */
if (obj->active == 0)
drm_gem_object_reference(&obj->base);
- obj->active |= intel_ring_flag(ring);
+ obj->active |= intel_engine_flag(engine);
- list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
- i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+ list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
+ i915_gem_request_assign(&obj->last_read_req[engine->id], req);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
@@ -2422,8 +2460,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
- RQ_BUG_ON(obj->last_write_req == NULL);
- RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+ GEM_BUG_ON(obj->last_write_req == NULL);
+ GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
i915_gem_request_assign(&obj->last_write_req, NULL);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2434,13 +2472,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
{
struct i915_vma *vma;
- RQ_BUG_ON(obj->last_read_req[ring] == NULL);
- RQ_BUG_ON(!(obj->active & (1 << ring)));
+ GEM_BUG_ON(obj->last_read_req[ring] == NULL);
+ GEM_BUG_ON(!(obj->active & (1 << ring)));
- list_del_init(&obj->ring_list[ring]);
+ list_del_init(&obj->engine_list[ring]);
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
- if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+ if (obj->last_write_req && obj->last_write_req->engine->id == ring)
i915_gem_object_retire__write(obj);
obj->active &= ~(1 << ring);
@@ -2467,24 +2505,20 @@ static int
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int ret, i, j;
+ struct intel_engine_cs *engine;
+ int ret;
/* Carefully retire all requests without writing to the rings */
- for_each_ring(ring, dev_priv, i) {
- ret = intel_ring_idle(ring);
+ for_each_engine(engine, dev_priv) {
+ ret = intel_engine_idle(engine);
if (ret)
return ret;
}
i915_gem_retire_requests(dev);
/* Finally reset hw state */
- for_each_ring(ring, dev_priv, i) {
- intel_ring_init_seqno(ring, seqno);
-
- for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
- ring->semaphore.sync_seqno[j] = 0;
- }
+ for_each_engine(engine, dev_priv)
+ intel_ring_init_seqno(engine, seqno);
return 0;
}
@@ -2542,7 +2576,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_i915_gem_object *obj,
bool flush_caches)
{
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
@@ -2551,8 +2585,8 @@ void __i915_add_request(struct drm_i915_gem_request *request,
if (WARN_ON(request == NULL))
return;
- ring = request->ring;
- dev_priv = ring->dev->dev_private;
+ engine = request->engine;
+ dev_priv = request->i915;
ringbuf = request->ringbuf;
/*
@@ -2579,6 +2613,28 @@ void __i915_add_request(struct drm_i915_gem_request *request,
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
}
+ trace_i915_gem_request_add(request);
+
+ request->head = request_start;
+
+ /* Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when this
+ * request is retired will the the batch_obj be moved onto the
+ * inactive_list and lose its active reference. Hence we do not need
+ * to explicitly hold another reference here.
+ */
+ request->batch_obj = obj;
+
+ /* Seal the request and mark it as pending execution. Note that
+ * we may inspect this state, without holding any locks, during
+ * hangcheck. Hence we apply the barrier to ensure that we do not
+ * see a more recent value in the hws than we are tracking.
+ */
+ request->emitted_jiffies = jiffies;
+ request->previous_seqno = engine->last_submitted_seqno;
+ smp_store_mb(engine->last_submitted_seqno, request->seqno);
+ list_add_tail(&request->list, &engine->request_list);
+
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
@@ -2587,33 +2643,16 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->postfix = intel_ring_get_tail(ringbuf);
if (i915.enable_execlists)
- ret = ring->emit_request(request);
+ ret = engine->emit_request(request);
else {
- ret = ring->add_request(request);
+ ret = engine->add_request(request);
request->tail = intel_ring_get_tail(ringbuf);
}
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
- request->head = request_start;
-
- /* Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when this
- * request is retired will the the batch_obj be moved onto the
- * inactive_list and lose its active reference. Hence we do not need
- * to explicitly hold another reference here.
- */
- request->batch_obj = obj;
-
- request->emitted_jiffies = jiffies;
- request->previous_seqno = ring->last_submitted_seqno;
- ring->last_submitted_seqno = request->seqno;
- list_add_tail(&request->list, &ring->request_list);
-
- trace_i915_gem_request_add(request);
-
- i915_queue_hangcheck(ring->dev);
+ i915_queue_hangcheck(engine->dev);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
@@ -2680,7 +2719,7 @@ void i915_gem_request_free(struct kref *req_ref)
if (ctx) {
if (i915.enable_execlists && ctx != req->i915->kernel_context)
- intel_lr_context_unpin(ctx, req->ring);
+ intel_lr_context_unpin(ctx, req->engine);
i915_gem_context_unreference(ctx);
}
@@ -2689,11 +2728,12 @@ void i915_gem_request_free(struct kref *req_ref)
}
static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *ring,
+__i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req;
int ret;
@@ -2702,17 +2742,26 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
*req_out = NULL;
+ /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+ * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+ * and restart.
+ */
+ ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+ if (ret)
+ return ret;
+
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (req == NULL)
return -ENOMEM;
- ret = i915_gem_get_seqno(ring->dev, &req->seqno);
+ ret = i915_gem_get_seqno(engine->dev, &req->seqno);
if (ret)
goto err;
kref_init(&req->ref);
req->i915 = dev_priv;
- req->ring = ring;
+ req->engine = engine;
+ req->reset_counter = reset_counter;
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
@@ -2742,7 +2791,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring,
* fully prepared. Thus it can be cleaned up using the proper
* free code.
*/
- i915_gem_request_cancel(req);
+ intel_ring_reserved_space_cancel(req->ringbuf);
+ i915_gem_request_unreference(req);
return ret;
}
@@ -2779,19 +2829,12 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
return err ? ERR_PTR(err) : req;
}
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
- intel_ring_reserved_space_cancel(req->ringbuf);
-
- i915_gem_request_unreference(req);
-}
-
struct drm_i915_gem_request *
-i915_gem_find_active_request(struct intel_engine_cs *ring)
+i915_gem_find_active_request(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
- list_for_each_entry(request, &ring->request_list, list) {
+ list_for_each_entry(request, &engine->request_list, list) {
if (i915_gem_request_completed(request, false))
continue;
@@ -2801,38 +2844,38 @@ i915_gem_find_active_request(struct intel_engine_cs *ring)
return NULL;
}
-static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
bool ring_hung;
- request = i915_gem_find_active_request(ring);
+ request = i915_gem_find_active_request(engine);
if (request == NULL)
return;
- ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
+ ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
i915_set_reset_status(dev_priv, request->ctx, ring_hung);
- list_for_each_entry_continue(request, &ring->request_list, list)
+ list_for_each_entry_continue(request, &engine->request_list, list)
i915_set_reset_status(dev_priv, request->ctx, false);
}
-static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *ring)
+static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
{
struct intel_ringbuffer *buffer;
- while (!list_empty(&ring->active_list)) {
+ while (!list_empty(&engine->active_list)) {
struct drm_i915_gem_object *obj;
- obj = list_first_entry(&ring->active_list,
+ obj = list_first_entry(&engine->active_list,
struct drm_i915_gem_object,
- ring_list[ring->id]);
+ engine_list[engine->id]);
- i915_gem_object_retire__read(obj, ring->id);
+ i915_gem_object_retire__read(obj, engine->id);
}
/*
@@ -2842,14 +2885,16 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
*/
if (i915.enable_execlists) {
- spin_lock_irq(&ring->execlist_lock);
+ /* Ensure irq handler finishes or is cancelled. */
+ tasklet_kill(&engine->irq_tasklet);
+ spin_lock_bh(&engine->execlist_lock);
/* list_splice_tail_init checks for empty lists */
- list_splice_tail_init(&ring->execlist_queue,
- &ring->execlist_retired_req_list);
+ list_splice_tail_init(&engine->execlist_queue,
+ &engine->execlist_retired_req_list);
+ spin_unlock_bh(&engine->execlist_lock);
- spin_unlock_irq(&ring->execlist_lock);
- intel_execlists_retire_requests(ring);
+ intel_execlists_retire_requests(engine);
}
/*
@@ -2859,10 +2904,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* implicit references on things like e.g. ppgtt address spaces through
* the request.
*/
- while (!list_empty(&ring->request_list)) {
+ while (!list_empty(&engine->request_list)) {
struct drm_i915_gem_request *request;
- request = list_first_entry(&ring->request_list,
+ request = list_first_entry(&engine->request_list,
struct drm_i915_gem_request,
list);
@@ -2876,28 +2921,29 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
* upon reset is less than when we start. Do one more pass over
* all the ringbuffers to reset last_retired_head.
*/
- list_for_each_entry(buffer, &ring->buffers, link) {
+ list_for_each_entry(buffer, &engine->buffers, link) {
buffer->last_retired_head = buffer->tail;
intel_ring_update_space(buffer);
}
+
+ intel_ring_init_seqno(engine, engine->last_submitted_seqno);
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
/*
* Before we free the objects from the requests, we need to inspect
* them for finding the guilty party. As the requests only borrow
* their reference to the objects, the inspection must be done first.
*/
- for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_status(dev_priv, ring);
+ for_each_engine(engine, dev_priv)
+ i915_gem_reset_engine_status(dev_priv, engine);
- for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_cleanup(dev_priv, ring);
+ for_each_engine(engine, dev_priv)
+ i915_gem_reset_engine_cleanup(dev_priv, engine);
i915_gem_context_reset(dev);
@@ -2910,19 +2956,19 @@ void i915_gem_reset(struct drm_device *dev)
* This function clears the request list as sequence numbers are passed.
*/
void
-i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
+i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
{
- WARN_ON(i915_verify_lists(ring->dev));
+ WARN_ON(i915_verify_lists(engine->dev));
/* Retire requests first as we use it above for the early return.
* If we retire requests last, we may use a later seqno and so clear
* the requests lists without clearing the active list, leading to
* confusion.
*/
- while (!list_empty(&ring->request_list)) {
+ while (!list_empty(&engine->request_list)) {
struct drm_i915_gem_request *request;
- request = list_first_entry(&ring->request_list,
+ request = list_first_entry(&engine->request_list,
struct drm_i915_gem_request,
list);
@@ -2936,45 +2982,44 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
*/
- while (!list_empty(&ring->active_list)) {
+ while (!list_empty(&engine->active_list)) {
struct drm_i915_gem_object *obj;
- obj = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list[ring->id]);
+ obj = list_first_entry(&engine->active_list,
+ struct drm_i915_gem_object,
+ engine_list[engine->id]);
- if (!list_empty(&obj->last_read_req[ring->id]->list))
+ if (!list_empty(&obj->last_read_req[engine->id]->list))
break;
- i915_gem_object_retire__read(obj, ring->id);
+ i915_gem_object_retire__read(obj, engine->id);
}
- if (unlikely(ring->trace_irq_req &&
- i915_gem_request_completed(ring->trace_irq_req, true))) {
- ring->irq_put(ring);
- i915_gem_request_assign(&ring->trace_irq_req, NULL);
+ if (unlikely(engine->trace_irq_req &&
+ i915_gem_request_completed(engine->trace_irq_req, true))) {
+ engine->irq_put(engine);
+ i915_gem_request_assign(&engine->trace_irq_req, NULL);
}
- WARN_ON(i915_verify_lists(ring->dev));
+ WARN_ON(i915_verify_lists(engine->dev));
}
bool
i915_gem_retire_requests(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool idle = true;
- int i;
- for_each_ring(ring, dev_priv, i) {
- i915_gem_retire_requests_ring(ring);
- idle &= list_empty(&ring->request_list);
+ for_each_engine(engine, dev_priv) {
+ i915_gem_retire_requests_ring(engine);
+ idle &= list_empty(&engine->request_list);
if (i915.enable_execlists) {
- spin_lock_irq(&ring->execlist_lock);
- idle &= list_empty(&ring->execlist_queue);
- spin_unlock_irq(&ring->execlist_lock);
+ spin_lock_bh(&engine->execlist_lock);
+ idle &= list_empty(&engine->execlist_queue);
+ spin_unlock_bh(&engine->execlist_lock);
- intel_execlists_retire_requests(ring);
+ intel_execlists_retire_requests(engine);
}
}
@@ -3011,25 +3056,21 @@ i915_gem_idle_work_handler(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), mm.idle_work.work);
struct drm_device *dev = dev_priv->dev;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- if (!list_empty(&ring->request_list))
+ for_each_engine(engine, dev_priv)
+ if (!list_empty(&engine->request_list))
return;
/* we probably should sync with hangcheck here, using cancel_work_sync.
- * Also locking seems to be fubar here, ring->request_list is protected
+ * Also locking seems to be fubar here, engine->request_list is protected
* by dev->struct_mutex. */
intel_mark_idle(dev);
if (mutex_trylock(&dev->struct_mutex)) {
- struct intel_engine_cs *ring;
- int i;
-
- for_each_ring(ring, dev_priv, i)
- i915_gem_batch_pool_fini(&ring->batch_pool);
+ for_each_engine(engine, dev_priv)
+ i915_gem_batch_pool_fini(&engine->batch_pool);
mutex_unlock(&dev->struct_mutex);
}
@@ -3048,7 +3089,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (!obj->active)
return 0;
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
@@ -3093,11 +3134,9 @@ retire:
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
- struct drm_i915_gem_request *req[I915_NUM_RINGS];
- unsigned reset_counter;
+ struct drm_i915_gem_request *req[I915_NUM_ENGINES];
int i, n = 0;
int ret;
@@ -3108,7 +3147,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
if (&obj->base == NULL) {
mutex_unlock(&dev->struct_mutex);
return -ENOENT;
@@ -3131,9 +3170,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
drm_gem_object_unreference(&obj->base);
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL)
continue;
@@ -3144,7 +3182,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < n; i++) {
if (ret == 0)
- ret = __i915_wait_request(req[i], reset_counter, true,
+ ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]);
@@ -3166,7 +3204,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_engine_cs *from;
int ret;
- from = i915_gem_request_get_ring(from_req);
+ from = i915_gem_request_get_engine(from_req);
if (to == from)
return 0;
@@ -3176,7 +3214,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
- atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
&i915->rps.semaphores);
@@ -3260,7 +3297,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request **to_req)
{
const bool readonly = obj->base.pending_write_domain == 0;
- struct drm_i915_gem_request *req[I915_NUM_RINGS];
+ struct drm_i915_gem_request *req[I915_NUM_ENGINES];
int ret, i, n;
if (!obj->active)
@@ -3274,7 +3311,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (obj->last_write_req)
req[n++] = obj->last_write_req;
} else {
- for (i = 0; i < I915_NUM_RINGS; i++)
+ for (i = 0; i < I915_NUM_ENGINES; i++)
if (obj->last_read_req[i])
req[n++] = obj->last_read_req[i];
}
@@ -3297,9 +3334,6 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
- /* Wait for any direct GTT access to complete */
- mb();
-
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@@ -3391,28 +3425,25 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
int i915_gpu_idle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int ret, i;
+ struct intel_engine_cs *engine;
+ int ret;
/* Flush everything onto the inactive list. */
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = i915_switch_context(req);
- if (ret) {
- i915_gem_request_cancel(req);
- return ret;
- }
-
i915_add_request_no_flush(req);
+ if (ret)
+ return ret;
}
- ret = intel_ring_idle(ring);
+ ret = intel_engine_idle(engine);
if (ret)
return ret;
}
@@ -3466,7 +3497,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
uint64_t flags)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 fence_alignment, unfenced_alignment;
u32 search_flag, alloc_flag;
u64 start, end;
@@ -3513,7 +3545,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
end = vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->gtt.mappable_end);
+ end = min_t(u64, end, ggtt->mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
@@ -3720,6 +3752,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t old_write_domain, old_read_domains;
struct i915_vma *vma;
int ret;
@@ -3774,7 +3809,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->vm_link,
- &to_i915(obj->base.dev)->gtt.base.inactive_list);
+ &ggtt->base.inactive_list);
return 0;
}
@@ -3906,7 +3941,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
@@ -3949,7 +3984,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
* cacheline, whereas normally such cachelines would get
* invalidated.
*/
- if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
+ if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
return -ENODEV;
level = I915_CACHE_LLC;
@@ -3967,7 +4002,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
if (ret)
goto rpm_put;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -4128,16 +4163,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
- unsigned reset_counter;
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
- ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
- if (ret)
- return ret;
+ /* ABI: return -EIO if already wedged */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return -EIO;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -4153,7 +4187,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (target)
i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
@@ -4161,7 +4194,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -4211,7 +4244,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
- to_i915(obj->base.dev)->gtt.mappable_end);
+ to_i915(obj->base.dev)->ggtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
}
@@ -4243,9 +4276,6 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
i915_gem_obj_to_vma(obj, vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
if (vma) {
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
@@ -4308,10 +4338,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
uint64_t flags)
{
- if (WARN_ONCE(!view, "no view specified"))
- return -EINVAL;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
- return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+ BUG_ON(!view);
+
+ return i915_gem_object_do_pin(obj, &ggtt->base, view,
alignment, flags | PIN_GLOBAL);
}
@@ -4321,7 +4354,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
- BUG_ON(!vma);
WARN_ON(vma->pin_count == 0);
WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
@@ -4340,7 +4372,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -4359,15 +4391,15 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (obj->active) {
int i;
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
if (req)
- args->busy |= 1 << (16 + req->ring->exec_id);
+ args->busy |= 1 << (16 + req->engine->exec_id);
}
if (obj->last_write_req)
- args->busy |= obj->last_write_req->ring->exec_id;
+ args->busy |= obj->last_write_req->engine->exec_id;
}
unref:
@@ -4405,7 +4437,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
@@ -4447,8 +4479,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
int i;
INIT_LIST_HEAD(&obj->global_list);
- for (i = 0; i < I915_NUM_RINGS; i++)
- INIT_LIST_HEAD(&obj->ring_list[i]);
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ INIT_LIST_HEAD(&obj->engine_list[i]);
INIT_LIST_HEAD(&obj->obj_exec_link);
INIT_LIST_HEAD(&obj->vma_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4623,14 +4655,15 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
- if (WARN_ONCE(!view, "no view specified"))
- return ERR_PTR(-EINVAL);
+ BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
@@ -4653,14 +4686,13 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
}
static void
-i915_gem_stop_ringbuffers(struct drm_device *dev)
+i915_gem_stop_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- dev_priv->gt.stop_ring(ring);
+ for_each_engine(engine, dev_priv)
+ dev_priv->gt.stop_engine(engine);
}
int
@@ -4676,7 +4708,7 @@ i915_gem_suspend(struct drm_device *dev)
i915_gem_retire_requests(dev);
- i915_gem_stop_ringbuffers(dev);
+ i915_gem_stop_engines(dev);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4697,8 +4729,8 @@ err:
int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
int i, ret;
@@ -4716,12 +4748,12 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
* at initialization time.
*/
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
- intel_ring_emit(ring, remap_info[i]);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
+ intel_ring_emit(engine, remap_info[i]);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return ret;
}
@@ -4778,7 +4810,7 @@ static void init_unused_rings(struct drm_device *dev)
}
}
-int i915_gem_init_rings(struct drm_device *dev)
+int i915_gem_init_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -4814,13 +4846,13 @@ int i915_gem_init_rings(struct drm_device *dev)
return 0;
cleanup_vebox_ring:
- intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
+ intel_cleanup_engine(&dev_priv->engine[VECS]);
cleanup_blt_ring:
- intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
+ intel_cleanup_engine(&dev_priv->engine[BCS]);
cleanup_bsd_ring:
- intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
+ intel_cleanup_engine(&dev_priv->engine[VCS]);
cleanup_render_ring:
- intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
+ intel_cleanup_engine(&dev_priv->engine[RCS]);
return ret;
}
@@ -4829,16 +4861,13 @@ int
i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int ret, i, j;
-
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
- return -EIO;
+ struct intel_engine_cs *engine;
+ int ret, j;
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (dev_priv->ellc_size)
+ if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (IS_HASWELL(dev))
@@ -4876,12 +4905,14 @@ i915_gem_init_hw(struct drm_device *dev)
}
/* Need to do basic initialisation of all rings first: */
- for_each_ring(ring, dev_priv, i) {
- ret = ring->init_hw(ring);
+ for_each_engine(engine, dev_priv) {
+ ret = engine->init_hw(engine);
if (ret)
goto out;
}
+ intel_mocs_init_l3cc_table(dev);
+
/* We can't enable contexts until all firmware is loaded */
if (HAS_GUC_UCODE(dev)) {
ret = intel_guc_ucode_load(dev);
@@ -4901,38 +4932,39 @@ i915_gem_init_hw(struct drm_device *dev)
goto out;
/* Now it is safe to go back round and do everything else: */
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
- i915_gem_cleanup_ringbuffer(dev);
- goto out;
+ break;
}
- if (ring->id == RCS) {
- for (j = 0; j < NUM_L3_SLICES(dev); j++)
- i915_gem_l3_remap(req, j);
+ if (engine->id == RCS) {
+ for (j = 0; j < NUM_L3_SLICES(dev); j++) {
+ ret = i915_gem_l3_remap(req, j);
+ if (ret)
+ goto err_request;
+ }
}
ret = i915_ppgtt_init_ring(req);
- if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
- i915_gem_request_cancel(req);
- i915_gem_cleanup_ringbuffer(dev);
- goto out;
- }
+ if (ret)
+ goto err_request;
ret = i915_gem_context_enable(req);
- if (ret && ret != -EIO) {
- DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
- i915_gem_request_cancel(req);
- i915_gem_cleanup_ringbuffer(dev);
- goto out;
- }
+ if (ret)
+ goto err_request;
+err_request:
i915_add_request_no_flush(req);
+ if (ret) {
+ DRM_ERROR("Failed to enable %s, error=%d\n",
+ engine->name, ret);
+ i915_gem_cleanup_engines(dev);
+ break;
+ }
}
out:
@@ -4952,14 +4984,14 @@ int i915_gem_init(struct drm_device *dev)
if (!i915.enable_execlists) {
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
- dev_priv->gt.init_rings = i915_gem_init_rings;
- dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
- dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+ dev_priv->gt.init_engines = i915_gem_init_engines;
+ dev_priv->gt.cleanup_engine = intel_cleanup_engine;
+ dev_priv->gt.stop_engine = intel_stop_engine;
} else {
dev_priv->gt.execbuf_submit = intel_execlists_submission;
- dev_priv->gt.init_rings = intel_logical_rings_init;
- dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
- dev_priv->gt.stop_ring = intel_logical_ring_stop;
+ dev_priv->gt.init_engines = intel_logical_rings_init;
+ dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
+ dev_priv->gt.stop_engine = intel_logical_ring_stop;
}
/* This is just a security blanket to placate dragons.
@@ -4974,13 +5006,13 @@ int i915_gem_init(struct drm_device *dev)
if (ret)
goto out_unlock;
- i915_gem_init_global_gtt(dev);
+ i915_gem_init_ggtt(dev);
ret = i915_gem_context_init(dev);
if (ret)
goto out_unlock;
- ret = dev_priv->gt.init_rings(dev);
+ ret = dev_priv->gt.init_engines(dev);
if (ret)
goto out_unlock;
@@ -5003,29 +5035,52 @@ out_unlock:
}
void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- dev_priv->gt.cleanup_ring(ring);
+ for_each_engine(engine, dev_priv)
+ dev_priv->gt.cleanup_engine(engine);
- if (i915.enable_execlists)
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode.
- */
- intel_gpu_reset(dev);
+ if (i915.enable_execlists)
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode.
+ */
+ intel_gpu_reset(dev, ALL_ENGINES);
}
static void
-init_ring_lists(struct intel_engine_cs *ring)
+init_engine_lists(struct intel_engine_cs *engine)
{
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+}
+
+void
+i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv))
+ dev_priv->num_fence_regs = 32;
+ else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
+ IS_I945GM(dev_priv) || IS_G33(dev_priv))
+ dev_priv->num_fence_regs = 16;
+ else
+ dev_priv->num_fence_regs = 8;
+
+ if (intel_vgpu_active(dev))
+ dev_priv->num_fence_regs =
+ I915_READ(vgtif_reg(avail_rs.fence_num));
+
+ /* Initialize fence registers to zero */
+ i915_gem_restore_fences(dev);
+
+ i915_gem_detect_bit_6_swizzle(dev);
}
void
@@ -5055,8 +5110,8 @@ i915_gem_load_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- for (i = 0; i < I915_NUM_RINGS; i++)
- init_ring_lists(&dev_priv->ring[i]);
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ init_engine_lists(&dev_priv->engine[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -5067,17 +5122,6 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
- if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
- dev_priv->num_fence_regs = 32;
- else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- dev_priv->num_fence_regs = 16;
- else
- dev_priv->num_fence_regs = 8;
-
- if (intel_vgpu_active(dev))
- dev_priv->num_fence_regs =
- I915_READ(vgtif_reg(avail_rs.fence_num));
-
/*
* Set initial sequence number for requests.
* Using this number allows the wraparound to happen early,
@@ -5086,11 +5130,8 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->next_seqno = ((u32)~0 - 0x1100);
dev_priv->last_seqno = ((u32)~0 - 0x1101);
- /* Initialize fence registers to zero */
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
- i915_gem_restore_fences(dev);
- i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
@@ -5213,11 +5254,12 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+ struct drm_i915_private *dev_priv = to_i915(o->base.dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
@@ -5244,11 +5286,12 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
+ struct drm_i915_private *dev_priv = to_i915(o->base.dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == ggtt &&
+ if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
new file mode 100644
index 000000000..8292e797d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_H__
+#define __I915_GEM_H__
+
+#ifdef CONFIG_DRM_I915_DEBUG_GEM
+#define GEM_BUG_ON(expr) BUG_ON(expr)
+#else
+#define GEM_BUG_ON(expr)
+#endif
+
+#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5dd84e148..e5acc3916 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -342,15 +342,15 @@ void i915_gem_context_reset(struct drm_device *dev)
struct intel_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link)
- intel_lr_context_reset(dev, ctx);
+ intel_lr_context_reset(dev_priv, ctx);
}
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct intel_engine_cs *engine = &dev_priv->engine[i];
- if (ring->last_context) {
- i915_gem_context_unpin(ring->last_context, ring);
- ring->last_context = NULL;
+ if (engine->last_context) {
+ i915_gem_context_unpin(engine->last_context, engine);
+ engine->last_context = NULL;
}
}
@@ -413,7 +413,7 @@ void i915_gem_context_fini(struct drm_device *dev)
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
- intel_gpu_reset(dev);
+ intel_gpu_reset(dev, ALL_ENGINES);
/* When default context is created and switched to, base object refcount
* will be 2 (+1 from object creation and +1 from do_switch()).
@@ -421,17 +421,17 @@ void i915_gem_context_fini(struct drm_device *dev)
* to default context. So we need to unreference the base object once
* to offset the do_switch part, so that i915_gem_context_unreference()
* can then free the base object correctly. */
- WARN_ON(!dev_priv->ring[RCS].last_context);
+ WARN_ON(!dev_priv->engine[RCS].last_context);
i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
- for (i = I915_NUM_RINGS; --i >= 0;) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ for (i = I915_NUM_ENGINES; --i >= 0;) {
+ struct intel_engine_cs *engine = &dev_priv->engine[i];
- if (ring->last_context) {
- i915_gem_context_unpin(ring->last_context, ring);
- ring->last_context = NULL;
+ if (engine->last_context) {
+ i915_gem_context_unpin(engine->last_context, engine);
+ engine->last_context = NULL;
}
}
@@ -441,14 +441,14 @@ void i915_gem_context_fini(struct drm_device *dev)
int i915_gem_context_enable(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
if (i915.enable_execlists) {
- if (ring->init_context == NULL)
+ if (engine->init_context == NULL)
return 0;
- ret = ring->init_context(req);
+ ret = engine->init_context(req);
} else
ret = i915_switch_context(req);
@@ -510,133 +510,147 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
u32 flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
- i915_semaphore_is_enabled(ring->dev) ?
- hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+ i915_semaphore_is_enabled(engine->dev) ?
+ hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 :
0;
- int len, i, ret;
+ int len, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
- if (IS_GEN6(ring->dev)) {
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+ if (IS_GEN6(engine->dev)) {
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
/* These flags are for resource streamer on HSW+ */
- if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+ if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8)
flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_INFO(ring->dev)->gen < 8)
+ else if (INTEL_INFO(engine->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
len = 4;
- if (INTEL_INFO(ring->dev)->gen >= 7)
- len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+ if (INTEL_INFO(engine->dev)->gen >= 7)
+ len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(ring->dev)->gen >= 7) {
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ if (INTEL_INFO(engine->dev)->gen >= 7) {
+ intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
if (num_rings) {
struct intel_engine_cs *signaller;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
- for_each_ring(signaller, to_i915(ring->dev), i) {
- if (signaller == ring)
+ intel_ring_emit(engine,
+ MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_engine(signaller, to_i915(engine->dev)) {
+ if (signaller == engine)
continue;
- intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ intel_ring_emit_reg(engine,
+ RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(engine,
+ _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_SET_CONTEXT);
+ intel_ring_emit(engine,
+ i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) |
flags);
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- if (INTEL_INFO(ring->dev)->gen >= 7) {
+ if (INTEL_INFO(engine->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
+ i915_reg_t last_reg = {}; /* keep gcc quiet */
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
- for_each_ring(signaller, to_i915(ring->dev), i) {
- if (signaller == ring)
+ intel_ring_emit(engine,
+ MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_engine(signaller, to_i915(engine->dev)) {
+ if (signaller == engine)
continue;
- intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ last_reg = RING_PSMI_CTL(signaller->mmio_base);
+ intel_ring_emit_reg(engine, last_reg);
+ intel_ring_emit(engine,
+ _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
+
+ /* Insert a delay before the next switch! */
+ intel_ring_emit(engine,
+ MI_STORE_REGISTER_MEM |
+ MI_SRM_LRM_GLOBAL_GTT);
+ intel_ring_emit_reg(engine, last_reg);
+ intel_ring_emit(engine, engine->scratch.gtt_offset);
+ intel_ring_emit(engine, MI_NOOP);
}
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return ret;
}
-static inline bool should_skip_switch(struct intel_engine_cs *ring,
- struct intel_context *from,
- struct intel_context *to)
+static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
+ struct intel_context *to)
{
if (to->remap_slice)
return false;
- if (to->ppgtt && from == to &&
- !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings))
- return true;
+ if (!to->legacy_hw_ctx.initialized)
+ return false;
- return false;
+ if (to->ppgtt &&
+ !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ return false;
+
+ return to == engine->last_context;
}
static bool
-needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
if (!to->ppgtt)
return false;
- if (INTEL_INFO(ring->dev)->gen < 8)
+ if (engine->last_context == to &&
+ !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+ return false;
+
+ if (engine->id != RCS)
return true;
- if (ring != &dev_priv->ring[RCS])
+ if (INTEL_INFO(engine->dev)->gen < 8)
return true;
return false;
}
static bool
-needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
- u32 hw_flags)
+needs_pd_load_post(struct intel_context *to, u32 hw_flags)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
if (!to->ppgtt)
return false;
- if (!IS_GEN8(ring->dev))
- return false;
-
- if (ring != &dev_priv->ring[RCS])
+ if (!IS_GEN8(to->i915))
return false;
if (hw_flags & MI_RESTORE_INHIBIT)
@@ -645,58 +659,32 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
return false;
}
-static int do_switch(struct drm_i915_gem_request *req)
+static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct intel_context *to = req->ctx;
- struct intel_engine_cs *ring = req->ring;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct intel_context *from = ring->last_context;
- u32 hw_flags = 0;
- bool uninitialized = false;
+ struct intel_engine_cs *engine = req->engine;
+ struct intel_context *from;
+ u32 hw_flags;
int ret, i;
- if (from != NULL && ring == &dev_priv->ring[RCS]) {
- BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
- BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
- }
-
- if (should_skip_switch(ring, from, to))
+ if (skip_rcs_switch(engine, to))
return 0;
/* Trying to pin first makes error handling easier. */
- if (ring == &dev_priv->ring[RCS]) {
- ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
- get_context_alignment(ring->dev), 0);
- if (ret)
- return ret;
- }
+ ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
+ get_context_alignment(engine->dev),
+ 0);
+ if (ret)
+ return ret;
/*
* Pin can switch back to the default context if we end up calling into
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
+ *
+ * XXX: Doing so is painfully broken!
*/
- from = ring->last_context;
-
- if (needs_pd_load_pre(ring, to)) {
- /* Older GENs and non render rings still want the load first,
- * "PP_DCLV followed by PP_DIR_BASE register through Load
- * Register Immediate commands in Ring Buffer before submitting
- * a context."*/
- trace_switch_mm(ring, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
- if (ret)
- goto unpin_out;
-
- /* Doing a PD load always reloads the page dirs */
- to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
- }
-
- if (ring != &dev_priv->ring[RCS]) {
- if (from)
- i915_gem_context_unreference(from);
- goto done;
- }
+ from = engine->last_context;
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
@@ -710,53 +698,37 @@ static int do_switch(struct drm_i915_gem_request *req)
if (ret)
goto unpin_out;
- if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
- hw_flags |= MI_RESTORE_INHIBIT;
+ if (needs_pd_load_pre(engine, to)) {
+ /* Older GENs and non render rings still want the load first,
+ * "PP_DCLV followed by PP_DIR_BASE register through Load
+ * Register Immediate commands in Ring Buffer before submitting
+ * a context."*/
+ trace_switch_mm(engine, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ if (ret)
+ goto unpin_out;
+ }
+
+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
- } else if (to->ppgtt &&
- (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
- hw_flags |= MI_FORCE_RESTORE;
- to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
- }
+ hw_flags = MI_RESTORE_INHIBIT;
+ else if (to->ppgtt &&
+ intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+ hw_flags = MI_FORCE_RESTORE;
+ else
+ hw_flags = 0;
/* We should never emit switch_mm more than once */
- WARN_ON(needs_pd_load_pre(ring, to) &&
- needs_pd_load_post(ring, to, hw_flags));
-
- ret = mi_set_context(req, hw_flags);
- if (ret)
- goto unpin_out;
+ WARN_ON(needs_pd_load_pre(engine, to) &&
+ needs_pd_load_post(to, hw_flags));
- /* GEN8 does *not* require an explicit reload if the PDPs have been
- * setup, and we do not wish to move them.
- */
- if (needs_pd_load_post(ring, to, hw_flags)) {
- trace_switch_mm(ring, to);
- ret = to->ppgtt->switch_mm(to->ppgtt, req);
- /* The hardware context switch is emitted, but we haven't
- * actually changed the state - so it's probably safe to bail
- * here. Still, let the user know something dangerous has
- * happened.
- */
- if (ret) {
- DRM_ERROR("Failed to change address space on context switch\n");
- goto unpin_out;
- }
- }
-
- for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(to->remap_slice & (1<<i)))
- continue;
-
- ret = i915_gem_l3_remap(req, i);
- /* If it failed, try again next round */
+ if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
+ ret = mi_set_context(req, hw_flags);
if (ret)
- DRM_DEBUG_DRIVER("L3 remapping failed\n");
- else
- to->remap_slice &= ~(1<<i);
+ goto unpin_out;
}
/* The backing object for the context is done after switching to the
@@ -781,27 +753,51 @@ static int do_switch(struct drm_i915_gem_request *req)
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
+ i915_gem_context_reference(to);
+ engine->last_context = to;
+
+ /* GEN8 does *not* require an explicit reload if the PDPs have been
+ * setup, and we do not wish to move them.
+ */
+ if (needs_pd_load_post(to, hw_flags)) {
+ trace_switch_mm(engine, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ /* The hardware context switch is emitted, but we haven't
+ * actually changed the state - so it's probably safe to bail
+ * here. Still, let the user know something dangerous has
+ * happened.
+ */
+ if (ret)
+ return ret;
+ }
- uninitialized = !to->legacy_hw_ctx.initialized;
- to->legacy_hw_ctx.initialized = true;
+ if (to->ppgtt)
+ to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
-done:
- i915_gem_context_reference(to);
- ring->last_context = to;
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(to->remap_slice & (1<<i)))
+ continue;
+
+ ret = i915_gem_l3_remap(req, i);
+ if (ret)
+ return ret;
- if (uninitialized) {
- if (ring->init_context) {
- ret = ring->init_context(req);
+ to->remap_slice &= ~(1<<i);
+ }
+
+ if (!to->legacy_hw_ctx.initialized) {
+ if (engine->init_context) {
+ ret = engine->init_context(req);
if (ret)
- DRM_ERROR("ring init context: %d\n", ret);
+ return ret;
}
+ to->legacy_hw_ctx.initialized = true;
}
return 0;
unpin_out:
- if (ring->id == RCS)
- i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
+ i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
@@ -820,23 +816,39 @@ unpin_out:
*/
int i915_switch_context(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_i915_private *dev_priv = req->i915;
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
- if (req->ctx != ring->last_context) {
- i915_gem_context_reference(req->ctx);
- if (ring->last_context)
- i915_gem_context_unreference(ring->last_context);
- ring->last_context = req->ctx;
+ if (engine->id != RCS ||
+ req->ctx->legacy_hw_ctx.rcs_state == NULL) {
+ struct intel_context *to = req->ctx;
+
+ if (needs_pd_load_pre(engine, to)) {
+ int ret;
+
+ trace_switch_mm(engine, to);
+ ret = to->ppgtt->switch_mm(to->ppgtt, req);
+ if (ret)
+ return ret;
+
+ /* Doing a PD load always reloads the page dirs */
+ to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
+
+ if (to != engine->last_context) {
+ i915_gem_context_reference(to);
+ if (engine->last_context)
+ i915_gem_context_unreference(engine->last_context);
+ engine->last_context = to;
+ }
+
return 0;
}
- return do_switch(req);
+ return do_rcs_switch(req);
}
static bool contexts_enabled(struct drm_device *dev)
@@ -937,7 +949,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
else
- args->value = to_i915(dev)->gtt.base.total;
+ args->value = to_i915(dev)->ggtt.base.total;
break;
default:
ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 17299d041..a56516482 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -36,29 +36,29 @@ i915_verify_lists(struct drm_device *dev)
static int warned;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
int err = 0;
- int i;
if (warned)
return 0;
- for_each_ring(ring, dev_priv, i) {
- list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) {
+ for_each_engine(engine, dev_priv) {
+ list_for_each_entry(obj, &engine->active_list,
+ engine_list[engine->id]) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("%s: freed active obj %p\n",
- ring->name, obj);
+ engine->name, obj);
err++;
break;
} else if (!obj->active ||
- obj->last_read_req[ring->id] == NULL) {
+ obj->last_read_req[engine->id] == NULL) {
DRM_ERROR("%s: invalid active obj %p\n",
- ring->name, obj);
+ engine->name, obj);
err++;
} else if (obj->base.write_domain) {
DRM_ERROR("%s: invalid write obj %p (w %x)\n",
- ring->name,
+ engine->name,
obj, obj->base.write_domain);
err++;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 0506016e1..80bbe43a2 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
- mutex_lock(&obj->base.dev->struct_mutex);
-
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
+ mutex_lock(&obj->base.dev->struct_mutex);
i915_gem_object_unpin_pages(obj);
-
mutex_unlock(&obj->base.dev->struct_mutex);
}
@@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
- struct sg_page_iter sg_iter;
- struct page **pages;
- int ret, i;
+ void *addr;
+ int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ERR_PTR(ret);
- if (obj->dma_buf_vmapping) {
- obj->vmapping_count++;
- goto out_unlock;
- }
-
- ret = i915_gem_object_get_pages(obj);
- if (ret)
- goto err;
-
- i915_gem_object_pin_pages(obj);
-
- ret = -ENOMEM;
-
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
- if (pages == NULL)
- goto err_unpin;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
- pages[i++] = sg_page_iter_page(&sg_iter);
-
- obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
- drm_free_large(pages);
-
- if (!obj->dma_buf_vmapping)
- goto err_unpin;
-
- obj->vmapping_count = 1;
-out_unlock:
+ addr = i915_gem_object_pin_map(obj);
mutex_unlock(&dev->struct_mutex);
- return obj->dma_buf_vmapping;
-err_unpin:
- i915_gem_object_unpin_pages(obj);
-err:
- mutex_unlock(&dev->struct_mutex);
- return ERR_PTR(ret);
+ return addr;
}
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
struct drm_device *dev = obj->base.dev;
mutex_lock(&dev->struct_mutex);
- if (--obj->vmapping_count == 0) {
- vunmap(obj->dma_buf_vmapping);
- obj->dma_buf_vmapping = NULL;
-
- i915_gem_object_unpin_pages(obj);
- }
+ i915_gem_object_unpin_map(obj);
mutex_unlock(&dev->struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1328bc502..33df74d98 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
uint64_t target_offset)
{
struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset;
void __iomem *reloc_page;
@@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
@@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page =
- io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ io_mapping_map_atomic_wc(ggtt->mappable,
offset);
}
@@ -488,7 +489,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
ret = relocate_entry_cpu(obj, reloc, target_offset);
else if (obj->map_and_fenceable)
ret = relocate_entry_gtt(obj, reloc, target_offset);
- else if (cpu_has_clflush)
+ else if (static_cpu_has(X86_FEATURE_CLFLUSH))
ret = relocate_entry_clflush(obj, reloc, target_offset);
else {
WARN_ONCE(1, "Impossible case in relocation handling\n");
@@ -514,7 +515,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
- user_relocs = to_user_ptr(entry->relocs_ptr);
+ user_relocs = u64_to_user_ptr(entry->relocs_ptr);
remain = entry->relocation_count;
while (remain) {
@@ -535,9 +536,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
return ret;
if (r->presumed_offset != offset &&
- __copy_to_user_inatomic(&user_relocs->presumed_offset,
- &r->presumed_offset,
- sizeof(r->presumed_offset))) {
+ __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
return -EFAULT;
}
@@ -599,7 +598,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
static int
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
bool *need_reloc)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -713,7 +712,7 @@ eb_vma_misplaced(struct i915_vma *vma)
}
static int
-i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
+i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head *vmas,
struct intel_context *ctx,
bool *need_relocs)
@@ -723,10 +722,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
struct i915_address_space *vm;
struct list_head ordered_vmas;
struct list_head pinned_vmas;
- bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+ bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4;
int retry;
- i915_gem_retire_requests_ring(ring);
+ i915_gem_retire_requests_ring(engine);
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -788,7 +787,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (eb_vma_misplaced(vma))
ret = i915_vma_unbind(vma);
else
- ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma,
+ engine,
+ need_relocs);
if (ret)
goto err;
}
@@ -798,7 +799,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
if (drm_mm_node_allocated(&vma->node))
continue;
- ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_vma(vma, engine,
+ need_relocs);
if (ret)
goto err;
}
@@ -821,7 +823,7 @@ static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
struct eb_vmas *eb,
struct drm_i915_gem_exec_object2 *exec,
struct intel_context *ctx)
@@ -865,7 +867,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
u64 invalid_offset = (u64)-1;
int j;
- user_relocs = to_user_ptr(exec[i].relocs_ptr);
+ user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
@@ -910,7 +912,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+ &need_relocs);
if (ret)
goto err;
@@ -938,7 +941,7 @@ static int
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_ring_flag(req->ring);
+ const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -948,7 +951,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, req->ring, &req);
+ ret = i915_gem_object_sync(obj, req->engine, &req);
if (ret)
return ret;
}
@@ -960,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
}
if (flush_chipset)
- i915_gem_chipset_flush(req->ring->dev);
+ i915_gem_chipset_flush(req->engine->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -1009,7 +1012,7 @@ validate_exec_list(struct drm_device *dev,
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
for (i = 0; i < count; i++) {
- char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
+ char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
int length; /* limited by fault_in_pages_readable() */
if (exec[i].flags & invalid_flags)
@@ -1062,12 +1065,12 @@ validate_exec_list(struct drm_device *dev,
static struct intel_context *
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
- struct intel_engine_cs *ring, const u32 ctx_id)
+ struct intel_engine_cs *engine, const u32 ctx_id)
{
struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
- if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
+ if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -1080,8 +1083,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ERR_PTR(-EIO);
}
- if (i915.enable_execlists && !ctx->engine[ring->id].state) {
- int ret = intel_lr_context_deferred_alloc(ctx, ring);
+ if (i915.enable_execlists && !ctx->engine[engine->id].state) {
+ int ret = intel_lr_context_deferred_alloc(ctx, engine);
if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret);
@@ -1095,7 +1098,7 @@ void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
+ struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
@@ -1122,7 +1125,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
i915_gem_request_assign(&obj->last_fenced_req, req);
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
&dev_priv->mm.fence_list);
}
@@ -1132,11 +1135,11 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
}
}
-void
+static void
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
- params->ring->gpu_caches_dirty = true;
+ params->engine->gpu_caches_dirty = true;
/* Add a breadcrumb for the completion of the batch buffer */
__i915_add_request(params->request, params->batch_obj, true);
@@ -1146,11 +1149,11 @@ static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
- if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+ if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -1160,18 +1163,18 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return ret;
for (i = 0; i < 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
- intel_ring_emit(ring, 0);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(engine, 0);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
static struct drm_i915_gem_object*
-i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
@@ -1183,12 +1186,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
struct i915_vma *vma;
int ret;
- shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
+ shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
PAGE_ALIGN(batch_len));
if (IS_ERR(shadow_batch_obj))
return shadow_batch_obj;
- ret = i915_parse_cmds(ring,
+ ret = i915_parse_cmds(engine,
batch_obj,
shadow_batch_obj,
batch_start_offset,
@@ -1229,7 +1232,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
- struct intel_engine_cs *ring = params->ring;
+ struct intel_engine_cs *engine = params->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
u64 exec_start, exec_len;
int instp_mode;
@@ -1244,8 +1247,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
- WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
- "%s didn't clear reload\n", ring->name);
+ WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
+ "%s didn't clear reload\n", engine->name);
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
instp_mask = I915_EXEC_CONSTANTS_MASK;
@@ -1253,7 +1256,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
@@ -1280,17 +1283,17 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
- if (ring == &dev_priv->ring[RCS] &&
+ if (engine == &dev_priv->engine[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, INSTPM);
- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, INSTPM);
+ intel_ring_emit(engine, instp_mask << 16 | instp_mode);
+ intel_ring_advance(engine);
dev_priv->relative_constants_mode = instp_mode;
}
@@ -1308,7 +1311,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
- ret = ring->dispatch_execbuffer(params->request,
+ ret = engine->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
if (ret)
@@ -1317,7 +1320,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
- i915_gem_execbuffer_retire_commands(params);
return 0;
}
@@ -1365,7 +1367,7 @@ eb_get_batch(struct eb_vmas *eb)
#define I915_USER_RINGS (4)
-static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_DEFAULT] = RCS,
[I915_EXEC_RENDER] = RCS,
[I915_EXEC_BLT] = BCS,
@@ -1408,12 +1410,12 @@ eb_select_ring(struct drm_i915_private *dev_priv,
return -EINVAL;
}
- *ring = &dev_priv->ring[_VCS(bsd_idx)];
+ *ring = &dev_priv->engine[_VCS(bsd_idx)];
} else {
- *ring = &dev_priv->ring[user_ring_map[user_ring_id]];
+ *ring = &dev_priv->engine[user_ring_map[user_ring_id]];
}
- if (!intel_ring_initialized(*ring)) {
+ if (!intel_engine_initialized(*ring)) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return -EINVAL;
}
@@ -1427,12 +1429,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx;
struct i915_address_space *vm;
struct i915_execbuffer_params params_master; /* XXX: will be removed later */
@@ -1459,7 +1462,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
- ret = eb_select_ring(dev_priv, file, args, &ring);
+ ret = eb_select_ring(dev_priv, file, args, &engine);
if (ret)
return ret;
@@ -1473,9 +1476,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
return -EINVAL;
}
- if (ring->id != RCS) {
+ if (engine->id != RCS) {
DRM_DEBUG("RS is not available on %s\n",
- ring->name);
+ engine->name);
return -EINVAL;
}
@@ -1488,7 +1491,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
- ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
+ ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
if (IS_ERR(ctx)) {
mutex_unlock(&dev->struct_mutex);
ret = PTR_ERR(ctx);
@@ -1500,7 +1503,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx->ppgtt)
vm = &ctx->ppgtt->base;
else
- vm = &dev_priv->gtt.base;
+ vm = &ggtt->base;
memset(&params_master, 0x00, sizeof(params_master));
@@ -1522,7 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
+ &need_relocs);
if (ret)
goto err;
@@ -1531,7 +1535,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_execbuffer_relocate(eb);
if (ret) {
if (ret == -EFAULT) {
- ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+ ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
+ engine,
eb, exec, ctx);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
@@ -1547,16 +1552,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
params->args_batch_start_offset = args->batch_start_offset;
- if (i915_needs_cmd_parser(ring) && args->batch_len) {
+ if (i915_needs_cmd_parser(engine) && args->batch_len) {
struct drm_i915_gem_object *parsed_batch_obj;
- parsed_batch_obj = i915_gem_execbuffer_parse(ring,
- &shadow_exec_entry,
- eb,
- batch_obj,
- args->batch_start_offset,
- args->batch_len,
- file->is_master);
+ parsed_batch_obj = i915_gem_execbuffer_parse(engine,
+ &shadow_exec_entry,
+ eb,
+ batch_obj,
+ args->batch_start_offset,
+ args->batch_len,
+ file->is_master);
if (IS_ERR(parsed_batch_obj)) {
ret = PTR_ERR(parsed_batch_obj);
goto err;
@@ -1608,7 +1613,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */
- req = i915_gem_request_alloc(ring, ctx);
+ req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
@@ -1616,7 +1621,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_request_add_to_client(req, file);
if (ret)
- goto err_batch_unpin;
+ goto err_request;
/*
* Save assorted stuff away to pass through to *_submission().
@@ -1626,13 +1631,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
params->dev = dev;
params->file = file;
- params->ring = ring;
+ params->engine = engine;
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+err_request:
+ i915_gem_execbuffer_retire_commands(params);
err_batch_unpin:
/*
@@ -1649,14 +1656,6 @@ err:
i915_gem_context_unreference(ctx);
eb_destroy(eb);
- /*
- * If the request was created but not successfully submitted then it
- * must be freed again. If it was submitted then it is being tracked
- * on the active request list and no clean up is required here.
- */
- if (ret && !IS_ERR_OR_NULL(req))
- i915_gem_request_cancel(req);
-
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
@@ -1696,7 +1695,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- to_user_ptr(args->buffers_ptr),
+ u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1732,7 +1731,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
struct drm_i915_gem_exec_object __user *user_exec_list =
- to_user_ptr(args->buffers_ptr);
+ u64_to_user_ptr(args->buffers_ptr);
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) {
@@ -1775,18 +1774,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
- exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
- GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
- if (exec2_list == NULL)
- exec2_list = drm_malloc_ab(sizeof(*exec2_list),
- args->buffer_count);
+ exec2_list = drm_malloc_gfp(args->buffer_count,
+ sizeof(*exec2_list),
+ GFP_TEMPORARY);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
- to_user_ptr(args->buffers_ptr),
+ u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1799,7 +1796,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
struct drm_i915_gem_exec_object2 __user *user_exec_list =
- to_user_ptr(args->buffers_ptr);
+ u64_to_user_ptr(args->buffers_ptr);
int i;
for (i = 0; i < args->buffer_count; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 49e4f26b7..92acdff9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -658,7 +658,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
BUG_ON(entry >= 4);
@@ -667,13 +667,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
- intel_ring_emit(ring, upper_32_bits(addr));
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
- intel_ring_emit(ring, lower_32_bits(addr));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
+ intel_ring_emit(engine, upper_32_bits(addr));
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
+ intel_ring_emit(engine, lower_32_bits(addr));
+ intel_ring_advance(engine);
return 0;
}
@@ -706,8 +706,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
uint64_t length,
gen8_pte_t scratch_pte)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t *pt_vaddr;
unsigned pdpe = gen8_pdpe_index(start);
unsigned pde = gen8_pde_index(start);
@@ -746,7 +745,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
num_entries--;
}
- kunmap_px(ppgtt, pt);
+ kunmap_px(ppgtt, pt_vaddr);
pte = 0;
if (++pde == I915_PDES) {
@@ -762,8 +761,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool use_scratch)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
I915_CACHE_LLC, use_scratch);
@@ -788,8 +786,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen8_pte_t *pt_vaddr;
unsigned pdpe = gen8_pdpe_index(start);
unsigned pde = gen8_pde_index(start);
@@ -829,8 +826,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 unused)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sg_page_iter sg_iter;
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
@@ -909,11 +905,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
enum vgt_g2v_type msg;
- struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
- if (USES_FULL_48BIT_PPGTT(dev)) {
+ if (USES_FULL_48BIT_PPGTT(dev_priv)) {
u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@@ -981,8 +976,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (intel_vgpu_active(vm->dev))
gen8_ppgtt_notify_vgt(ppgtt, false);
@@ -1216,8 +1210,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
struct drm_device *dev = vm->dev;
struct i915_page_directory *pd;
@@ -1329,8 +1322,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
uint64_t length)
{
DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
int ret = 0;
@@ -1376,8 +1368,7 @@ err_out:
static int gen8_alloc_va_range(struct i915_address_space *vm,
uint64_t start, uint64_t length)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (USES_FULL_48BIT_PPGTT(vm->dev))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
@@ -1629,6 +1620,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd,
uint32_t start, uint32_t length)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_table *pt;
uint32_t pde, temp;
@@ -1637,7 +1629,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
- readl(dev_priv->gtt.gsm);
+ readl(ggtt->gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
@@ -1650,11 +1642,11 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -1662,13 +1654,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+ intel_ring_emit(engine, PP_DIR_DCLV_2G);
+ intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+ intel_ring_emit(engine, get_pd_offset(ppgtt));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -1676,22 +1668,22 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
return 0;
}
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
@@ -1699,17 +1691,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
+ intel_ring_emit(engine, PP_DIR_DCLV_2G);
+ intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
+ intel_ring_emit(engine, get_pd_offset(ppgtt));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
/* XXX: RCS is the only one to auto invalidate the TLBs? */
- if (ring->id != RCS) {
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (engine->id != RCS) {
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
@@ -1720,15 +1712,15 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
- I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
- POSTING_READ(RING_PP_DIR_DCLV(ring));
+ POSTING_READ(RING_PP_DIR_DCLV(engine));
return 0;
}
@@ -1736,12 +1728,11 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
static void gen8_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int j;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, j) {
+ for_each_engine(engine, dev_priv) {
u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
- I915_WRITE(RING_MODE_GEN7(ring),
+ I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
}
}
@@ -1749,9 +1740,8 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
static void gen7_ppgtt_enable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t ecochk, ecobits;
- int i;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
@@ -1765,9 +1755,9 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
}
I915_WRITE(GAM_ECOCHK, ecochk);
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
/* GFX_MODE is per-ring on gen7+ */
- I915_WRITE(RING_MODE_GEN7(ring),
+ I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -1796,8 +1786,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool use_scratch)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr, scratch_pte;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@@ -1831,8 +1820,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 flags)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
gen6_pte_t *pt_vaddr;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
@@ -1864,9 +1852,9 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
struct drm_device *dev = vm->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
uint32_t start, length, start_save, length_save;
uint32_t pde, temp;
@@ -1932,7 +1920,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
- readl(dev_priv->gtt.gsm);
+ readl(ggtt->gsm);
mark_tlbs_dirty(ppgtt);
return 0;
@@ -1978,8 +1966,7 @@ static void gen6_free_scratch(struct i915_address_space *vm)
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
uint32_t pde;
@@ -1997,7 +1984,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false;
int ret;
@@ -2005,23 +1993,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
ret = gen6_init_scratch(vm);
if (ret)
return ret;
alloc:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+ ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
- 0, dev_priv->gtt.base.total,
+ 0, ggtt->base.total,
DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
- ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+ ret = i915_gem_evict_something(dev, &ggtt->base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE,
- 0, dev_priv->gtt.base.total,
+ 0, ggtt->base.total,
0);
if (ret)
goto err_out;
@@ -2034,7 +2022,7 @@ alloc:
goto err_out;
- if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ if (ppgtt->node.start < ggtt->mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
return 0;
@@ -2062,10 +2050,11 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ ppgtt->base.pte_encode = ggtt->base.pte_encode;
if (IS_GEN6(dev)) {
ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) {
@@ -2095,7 +2084,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
@@ -2192,7 +2181,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
{
- struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = req->i915;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
if (i915.enable_execlists)
@@ -2263,9 +2252,10 @@ static bool needs_idle_maps(struct drm_device *dev)
static bool do_idling(struct drm_i915_private *dev_priv)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool ret = dev_priv->mm.interruptible;
- if (unlikely(dev_priv->gtt.do_idle_maps)) {
+ if (unlikely(ggtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
@@ -2279,22 +2269,23 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
- if (unlikely(dev_priv->gtt.do_idle_maps))
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
+ if (unlikely(ggtt->do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
void i915_check_and_clear_faults(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
if (INTEL_INFO(dev)->gen < 6)
return;
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
u32 fault_reg;
- fault_reg = I915_READ(RING_FAULT_REG(ring));
+ fault_reg = I915_READ(RING_FAULT_REG(engine));
if (fault_reg & RING_FAULT_VALID) {
DRM_DEBUG_DRIVER("Unexpected fault\n"
"\tAddr: 0x%08lx\n"
@@ -2305,16 +2296,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
RING_FAULT_SRCID(fault_reg),
RING_FAULT_FAULT_TYPE(fault_reg));
- I915_WRITE(RING_FAULT_REG(ring),
+ I915_WRITE(RING_FAULT_REG(engine),
fault_reg & ~RING_FAULT_VALID);
}
}
- POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+ POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
}
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv->dev)->gen < 6) {
+ if (INTEL_INFO(dev_priv)->gen < 6) {
intel_gtt_chipset_flush();
} else {
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
@@ -2324,7 +2315,8 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
@@ -2334,10 +2326,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
+ true);
i915_ggtt_flush(dev_priv);
}
@@ -2367,10 +2357,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 unused)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT;
gen8_pte_t __iomem *gtt_entries =
- (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
@@ -2444,10 +2435,11 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 flags)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT;
gen6_pte_t __iomem *gtt_entries =
- (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
@@ -2487,12 +2479,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool use_scratch)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
int rpm_atomic_seq;
@@ -2518,12 +2511,13 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length,
bool use_scratch)
{
- struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
+ const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
int rpm_atomic_seq;
@@ -2613,32 +2607,31 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_device *dev = vma->vm->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj = vma->obj;
- struct sg_table *pages = obj->pages;
- u32 pte_flags = 0;
+ u32 pte_flags;
int ret;
ret = i915_get_ggtt_vma_pages(vma);
if (ret)
return ret;
- pages = vma->ggtt_view.pages;
/* Currently applicable only to VLV */
- if (obj->gt_ro)
+ pte_flags = 0;
+ if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
if (flags & GLOBAL_BIND) {
- vma->vm->insert_entries(vma->vm, pages,
+ vma->vm->insert_entries(vma->vm,
+ vma->ggtt_view.pages,
vma->node.start,
cache_level, pte_flags);
}
if (flags & LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
- appgtt->base.insert_entries(&appgtt->base, pages,
+ struct i915_hw_ppgtt *appgtt =
+ to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+ appgtt->base.insert_entries(&appgtt->base,
+ vma->ggtt_view.pages,
vma->node.start,
cache_level, pte_flags);
}
@@ -2717,8 +2710,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@@ -2726,13 +2719,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
- ggtt_vm->start = start;
+ ggtt->base.start = start;
/* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm */
- ggtt_vm->total = end - start - PAGE_SIZE;
- i915_address_space_init(ggtt_vm, dev_priv);
- ggtt_vm->total += PAGE_SIZE;
+ ggtt->base.total = end - start - PAGE_SIZE;
+ i915_address_space_init(&ggtt->base, dev_priv);
+ ggtt->base.total += PAGE_SIZE;
if (intel_vgpu_active(dev)) {
ret = intel_vgt_balloon(dev);
@@ -2741,36 +2734,36 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
}
if (!HAS_LLC(dev))
- ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
+ ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
WARN_ON(i915_gem_obj_ggtt_bound(obj));
- ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+ ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
return ret;
}
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
}
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- ggtt_vm->clear_range(ggtt_vm, hole_start,
+ ggtt->base.clear_range(&ggtt->base, hole_start,
hole_end - hole_start, true);
}
/* And finally clear the reserved guard page */
- ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
+ ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt;
@@ -2801,28 +2794,33 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
true);
dev_priv->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
- dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
+ WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+ ggtt->base.bind_vma = aliasing_gtt_bind_vma;
}
return 0;
}
-void i915_gem_init_global_gtt(struct drm_device *dev)
+/**
+ * i915_gem_init_ggtt - Initialize GEM for Global GTT
+ * @dev: DRM device
+ */
+void i915_gem_init_ggtt(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u64 gtt_size, mappable_size;
-
- gtt_size = dev_priv->gtt.base.total;
- mappable_size = dev_priv->gtt.mappable_end;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
- i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
+ i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
}
-void i915_global_gtt_cleanup(struct drm_device *dev)
+/**
+ * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
+ * @dev: DRM device
+ */
+void i915_ggtt_cleanup_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2832,15 +2830,15 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
i915_gem_cleanup_stolen(dev);
- if (drm_mm_initialized(&vm->mm)) {
+ if (drm_mm_initialized(&ggtt->base.mm)) {
if (intel_vgpu_active(dev))
intel_vgt_deballoon();
- drm_mm_takedown(&vm->mm);
- list_del(&vm->global_link);
+ drm_mm_takedown(&ggtt->base.mm);
+ list_del(&ggtt->base.global_link);
}
- vm->cleanup(vm);
+ ggtt->base.cleanup(&ggtt->base);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2924,13 +2922,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_scratch *scratch_page;
- phys_addr_t gtt_phys_addr;
+ phys_addr_t ggtt_phys_addr;
/* For Modern GENs the PTEs and register space are split in the BAR */
- gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
- (pci_resource_len(dev->pdev, 0) / 2);
+ ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
+ (pci_resource_len(dev->pdev, 0) / 2);
/*
* On BXT writes larger than 64 bit to the GTT pagetable range will be
@@ -2940,10 +2939,10 @@ static int ggtt_probe_common(struct drm_device *dev,
* readback check when writing GTT PTE entries.
*/
if (IS_BROXTON(dev))
- dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+ ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
else
- dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
- if (!dev_priv->gtt.gsm) {
+ ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
+ if (!ggtt->gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
@@ -2952,11 +2951,11 @@ static int ggtt_probe_common(struct drm_device *dev,
if (IS_ERR(scratch_page)) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
- iounmap(dev_priv->gtt.gsm);
+ iounmap(ggtt->gsm);
return PTR_ERR(scratch_page);
}
- dev_priv->gtt.base.scratch_page = scratch_page;
+ ggtt->base.scratch_page = scratch_page;
return 0;
}
@@ -2977,7 +2976,7 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
- if (!USES_PPGTT(dev_priv->dev))
+ if (!USES_PPGTT(dev_priv))
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@@ -3034,20 +3033,16 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
}
-static int gen8_gmch_probe(struct drm_device *dev,
- u64 *gtt_total,
- size_t *stolen,
- phys_addr_t *mappable_base,
- u64 *mappable_end)
+static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u64 gtt_size;
+ struct drm_device *dev = ggtt->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
u16 snb_gmch_ctl;
int ret;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- *mappable_base = pci_resource_start(dev->pdev, 2);
- *mappable_end = pci_resource_len(dev->pdev, 2);
+ ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
+ ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
@@ -3055,56 +3050,50 @@ static int gen8_gmch_probe(struct drm_device *dev,
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (INTEL_INFO(dev)->gen >= 9) {
- *stolen = gen9_get_stolen_size(snb_gmch_ctl);
- gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
+ ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
} else if (IS_CHERRYVIEW(dev)) {
- *stolen = chv_get_stolen_size(snb_gmch_ctl);
- gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
+ ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
- *stolen = gen8_get_stolen_size(snb_gmch_ctl);
- gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
+ ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
- *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
+ ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
- ret = ggtt_probe_common(dev, gtt_size);
-
- dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ ret = ggtt_probe_common(dev, ggtt->size);
+ ggtt->base.clear_range = gen8_ggtt_clear_range;
if (IS_CHERRYVIEW(dev_priv))
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ else
+ ggtt->base.insert_entries = gen8_ggtt_insert_entries;
+ ggtt->base.bind_vma = ggtt_bind_vma;
+ ggtt->base.unbind_vma = ggtt_unbind_vma;
return ret;
}
-static int gen6_gmch_probe(struct drm_device *dev,
- u64 *gtt_total,
- size_t *stolen,
- phys_addr_t *mappable_base,
- u64 *mappable_end)
+static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int gtt_size;
+ struct drm_device *dev = ggtt->base.dev;
u16 snb_gmch_ctl;
int ret;
- *mappable_base = pci_resource_start(dev->pdev, 2);
- *mappable_end = pci_resource_len(dev->pdev, 2);
+ ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
+ ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
*/
- if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
- DRM_ERROR("Unknown GMADR size (%llx)\n",
- dev_priv->gtt.mappable_end);
+ if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
+ DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
return -ENXIO;
}
@@ -3112,37 +3101,32 @@ static int gen6_gmch_probe(struct drm_device *dev,
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- *stolen = gen6_get_stolen_size(snb_gmch_ctl);
+ ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
+ ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
+ ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
- gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
- *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
+ ret = ggtt_probe_common(dev, ggtt->size);
- ret = ggtt_probe_common(dev, gtt_size);
-
- dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.clear_range = gen6_ggtt_clear_range;
+ ggtt->base.insert_entries = gen6_ggtt_insert_entries;
+ ggtt->base.bind_vma = ggtt_bind_vma;
+ ggtt->base.unbind_vma = ggtt_unbind_vma;
return ret;
}
static void gen6_gmch_remove(struct i915_address_space *vm)
{
+ struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
- struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
-
- iounmap(gtt->gsm);
+ iounmap(ggtt->gsm);
free_scratch_page(vm->dev, vm->scratch_page);
}
-static int i915_gmch_probe(struct drm_device *dev,
- u64 *gtt_total,
- size_t *stolen,
- phys_addr_t *mappable_base,
- u64 *mappable_end)
+static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = ggtt->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
@@ -3151,15 +3135,16 @@ static int i915_gmch_probe(struct drm_device *dev,
return -EIO;
}
- intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
+ intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
+ &ggtt->mappable_base, &ggtt->mappable_end);
- dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
- dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
+ ggtt->base.insert_entries = i915_ggtt_insert_entries;
+ ggtt->base.clear_range = i915_ggtt_clear_range;
+ ggtt->base.bind_vma = ggtt_bind_vma;
+ ggtt->base.unbind_vma = ggtt_unbind_vma;
- if (unlikely(dev_priv->gtt.do_idle_maps))
+ if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
return 0;
@@ -3170,41 +3155,53 @@ static void i915_gmch_remove(struct i915_address_space *vm)
intel_gmch_remove();
}
-int i915_gem_gtt_init(struct drm_device *dev)
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev: DRM device
+ */
+int i915_ggtt_init_hw(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_gtt *gtt = &dev_priv->gtt;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
- gtt->gtt_probe = i915_gmch_probe;
- gtt->base.cleanup = i915_gmch_remove;
+ ggtt->probe = i915_gmch_probe;
+ ggtt->base.cleanup = i915_gmch_remove;
} else if (INTEL_INFO(dev)->gen < 8) {
- gtt->gtt_probe = gen6_gmch_probe;
- gtt->base.cleanup = gen6_gmch_remove;
- if (IS_HASWELL(dev) && dev_priv->ellc_size)
- gtt->base.pte_encode = iris_pte_encode;
+ ggtt->probe = gen6_gmch_probe;
+ ggtt->base.cleanup = gen6_gmch_remove;
+
+ if (HAS_EDRAM(dev))
+ ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
- gtt->base.pte_encode = hsw_pte_encode;
+ ggtt->base.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
- gtt->base.pte_encode = byt_pte_encode;
+ ggtt->base.pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
- gtt->base.pte_encode = ivb_pte_encode;
+ ggtt->base.pte_encode = ivb_pte_encode;
else
- gtt->base.pte_encode = snb_pte_encode;
+ ggtt->base.pte_encode = snb_pte_encode;
} else {
- dev_priv->gtt.gtt_probe = gen8_gmch_probe;
- dev_priv->gtt.base.cleanup = gen6_gmch_remove;
+ ggtt->probe = gen8_gmch_probe;
+ ggtt->base.cleanup = gen6_gmch_remove;
}
- gtt->base.dev = dev;
- gtt->base.is_ggtt = true;
+ ggtt->base.dev = dev;
+ ggtt->base.is_ggtt = true;
- ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
- &gtt->mappable_base, &gtt->mappable_end);
+ ret = ggtt->probe(ggtt);
if (ret)
return ret;
+ if ((ggtt->base.total - 1) >> 32) {
+ DRM_ERROR("We never expected a Global GTT with more than 32bits"
+ "of address space! Found %lldM!\n",
+ ggtt->base.total >> 20);
+ ggtt->base.total = 1ULL << 32;
+ ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ }
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
@@ -3215,9 +3212,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
- gtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+ ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
@@ -3234,33 +3231,38 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
out_gtt_cleanup:
- gtt->base.cleanup(&dev_priv->gtt.base);
+ ggtt->base.cleanup(&ggtt->base);
return ret;
}
+int i915_ggtt_enable_hw(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm;
struct i915_vma *vma;
bool flush;
i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
- true);
+ ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
+ true);
/* Cache flush objects bound into GGTT and rebind them. */
- vm = &dev_priv->gtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->vm != vm)
+ if (vma->vm != &ggtt->base)
continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level,
@@ -3283,15 +3285,17 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
}
if (USES_PPGTT(dev)) {
+ struct i915_address_space *vm;
+
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */
- struct i915_hw_ppgtt *ppgtt =
- container_of(vm, struct i915_hw_ppgtt,
- base);
+ struct i915_hw_ppgtt *ppgtt;
- if (i915_is_ggtt(vm))
+ if (vm->is_ggtt)
ppgtt = dev_priv->mm.aliasing_ppgtt;
+ else
+ ppgtt = i915_vm_to_ppgtt(vm);
gen6_write_page_range(dev_priv, &ppgtt->pd,
0, ppgtt->base.total);
@@ -3350,19 +3354,13 @@ struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
- struct i915_vma *vma;
-
- if (WARN_ON(!view))
- return ERR_PTR(-EINVAL);
-
- vma = i915_gem_obj_to_ggtt_view(obj, view);
-
- if (IS_ERR(vma))
- return vma;
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
if (!vma)
- vma = __i915_gem_vma_create(obj, ggtt, view);
+ vma = __i915_gem_vma_create(obj, &ggtt->base, view);
return vma;
@@ -3377,11 +3375,6 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int column, row;
unsigned int src_idx;
- if (!sg) {
- st->nents = 0;
- sg = st->sgl;
- }
-
for (column = 0; column < width; column++) {
src_idx = stride * (height - 1) + column;
for (row = 0; row < height; row++) {
@@ -3405,7 +3398,7 @@ static struct sg_table *
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
- unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
+ unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
unsigned int size_pages_uv;
struct sg_page_iter sg_iter;
unsigned long i;
@@ -3416,14 +3409,15 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */
- page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE,
- sizeof(dma_addr_t));
+ page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE,
+ sizeof(dma_addr_t),
+ GFP_TEMPORARY);
if (!page_addr_list)
return ERR_PTR(ret);
/* Account for UV plane with NV12. */
if (rot_info->pixel_format == DRM_FORMAT_NV12)
- size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
+ size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
else
size_pages_uv = 0;
@@ -3443,11 +3437,14 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
i++;
}
+ st->nents = 0;
+ sg = st->sgl;
+
/* Rotate the pages. */
sg = rotate_pages(page_addr_list, 0,
- rot_info->width_pages, rot_info->height_pages,
- rot_info->width_pages,
- st, NULL);
+ rot_info->plane[0].width, rot_info->plane[0].height,
+ rot_info->plane[0].width,
+ st, sg);
/* Append the UV plane if NV12. */
if (rot_info->pixel_format == DRM_FORMAT_NV12) {
@@ -3459,18 +3456,15 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
rot_info->uv_start_page = uv_start_page;
- rotate_pages(page_addr_list, uv_start_page,
- rot_info->width_pages_uv,
- rot_info->height_pages_uv,
- rot_info->width_pages_uv,
- st, sg);
+ sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
+ rot_info->plane[1].width, rot_info->plane[1].height,
+ rot_info->plane[1].width,
+ st, sg);
}
- DRM_DEBUG_KMS(
- "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
- obj->base.size, rot_info->pitch, rot_info->height,
- rot_info->pixel_format, rot_info->width_pages,
- rot_info->height_pages, size_pages + size_pages_uv,
+ DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
+ obj->base.size, rot_info->plane[0].width,
+ rot_info->plane[0].height, size_pages + size_pages_uv,
size_pages);
drm_free_large(page_addr_list);
@@ -3482,11 +3476,9 @@ err_sg_alloc:
err_st_alloc:
drm_free_large(page_addr_list);
- DRM_DEBUG_KMS(
- "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
- obj->base.size, ret, rot_info->pitch, rot_info->height,
- rot_info->pixel_format, rot_info->width_pages,
- rot_info->height_pages, size_pages + size_pages_uv,
+ DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
+ obj->base.size, ret, rot_info->plane[0].width,
+ rot_info->plane[0].height, size_pages + size_pages_uv,
size_pages);
return ERR_PTR(ret);
}
@@ -3634,7 +3626,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL) {
return obj->base.size;
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
- return view->params.rotated.size;
+ return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
return view->params.partial.size << PAGE_SHIFT;
} else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8774f1ba4..0008543d5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t;
typedef uint64_t gen8_ppgtt_pdpe_t;
typedef uint64_t gen8_ppgtt_pml4e_t;
-#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
+#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
@@ -135,16 +135,13 @@ enum i915_ggtt_view_type {
};
struct intel_rotation_info {
- unsigned int height;
- unsigned int pitch;
unsigned int uv_offset;
uint32_t pixel_format;
- uint64_t fb_modifier;
- unsigned int width_pages, height_pages;
- uint64_t size;
- unsigned int width_pages_uv, height_pages_uv;
- uint64_t size_uv;
unsigned int uv_start_page;
+ struct {
+ /* tiles */
+ unsigned int width, height;
+ } plane[2];
};
struct i915_ggtt_view {
@@ -342,13 +339,14 @@ struct i915_address_space {
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
-struct i915_gtt {
+struct i915_ggtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */
size_t stolen_reserved_base;
size_t stolen_reserved_size;
+ size_t size; /* Total size of Global GTT */
u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
@@ -360,10 +358,7 @@ struct i915_gtt {
int mtrr;
- /* global gtt ops */
- int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
- size_t *stolen, phys_addr_t *mappable_base,
- u64 *mappable_end);
+ int (*probe)(struct i915_ggtt *ggtt);
};
struct i915_hw_ppgtt {
@@ -518,10 +513,10 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
px_dma(ppgtt->base.scratch_pd);
}
-int i915_gem_gtt_init(struct drm_device *dev);
-void i915_gem_init_global_gtt(struct drm_device *dev);
-void i915_global_gtt_cleanup(struct drm_device *dev);
-
+int i915_ggtt_init_hw(struct drm_device *dev);
+int i915_ggtt_enable_hw(struct drm_device *dev);
+void i915_gem_init_ggtt(struct drm_device *dev);
+void i915_ggtt_cleanup_hw(struct drm_device *dev);
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
int i915_ppgtt_init_hw(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index fc7e6d5c6..71611bf21 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so)
drm_gem_object_unreference(&so->obj->base);
}
-int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so)
{
int ret;
- if (WARN_ON(ring->id != RCS))
+ if (WARN_ON(engine->id != RCS))
return -ENOENT;
- ret = render_state_init(so, ring->dev);
+ ret = render_state_init(so, engine->dev);
if (ret)
return ret;
@@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
struct render_state so;
int ret;
- ret = i915_gem_render_state_prepare(req->ring, &so);
+ ret = i915_gem_render_state_prepare(req->engine, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
- ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
+ ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
if (so.aux_batch_size > 8) {
- ret = req->ring->dispatch_execbuffer(req,
+ ret = req->engine->dispatch_execbuffer(req,
(so.ggtt_offset +
so.aux_batch_offset),
so.aux_batch_size,
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index e641bb093..6aaa3a10a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -43,7 +43,7 @@ struct render_state {
int i915_gem_render_state_init(struct drm_i915_gem_request *req);
void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
+int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
struct render_state *so);
#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 3af40616b..66571466e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -28,6 +28,7 @@
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
@@ -69,6 +70,10 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
+ /* Only shmemfs objects are backed by swap */
+ if (!obj->base.filp)
+ return false;
+
/* Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
@@ -166,6 +171,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
obj->madv != I915_MADV_DONTNEED)
continue;
+ if (flags & I915_SHRINK_VMAPS &&
+ !is_vmalloc_addr(obj->mapping))
+ continue;
+
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
continue;
@@ -246,7 +255,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
- if (obj->pages_pin_count == 0)
+ if (can_release_pages(obj))
count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -288,67 +297,82 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return freed;
}
-static int
-i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
-{
- struct drm_i915_private *dev_priv =
- container_of(nb, struct drm_i915_private, mm.oom_notifier);
- struct drm_device *dev = dev_priv->dev;
- struct drm_i915_gem_object *obj;
- unsigned long timeout = msecs_to_jiffies(5000) + 1;
- unsigned long pinned, bound, unbound, freed_pages;
+struct shrinker_lock_uninterruptible {
bool was_interruptible;
bool unlock;
+};
+
+static bool
+i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
+ struct shrinker_lock_uninterruptible *slu,
+ int timeout_ms)
+{
+ unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
+ while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
schedule_timeout_killable(1);
if (fatal_signal_pending(current))
- return NOTIFY_DONE;
- }
- if (timeout == 0) {
- pr_err("Unable to purge GPU memory due lock contention.\n");
- return NOTIFY_DONE;
+ return false;
+ if (--timeout == 0) {
+ pr_err("Unable to lock GPU to purge memory.\n");
+ return false;
+ }
}
- was_interruptible = dev_priv->mm.interruptible;
+ slu->was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
+ return true;
+}
- freed_pages = i915_gem_shrink_all(dev_priv);
+static void
+i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
+ struct shrinker_lock_uninterruptible *slu)
+{
+ dev_priv->mm.interruptible = slu->was_interruptible;
+ if (slu->unlock)
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
+static int
+i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.oom_notifier);
+ struct shrinker_lock_uninterruptible slu;
+ struct drm_i915_gem_object *obj;
+ unsigned long unevictable, bound, unbound, freed_pages;
- dev_priv->mm.interruptible = was_interruptible;
+ if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
+ return NOTIFY_DONE;
+
+ freed_pages = i915_gem_shrink_all(dev_priv);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
- unbound = bound = pinned = 0;
+ unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
- if (!obj->base.filp) /* not backed by a freeable object */
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
+ if (!can_release_pages(obj))
+ unevictable += obj->base.size >> PAGE_SHIFT;
else
- unbound += obj->base.size;
+ unbound += obj->base.size >> PAGE_SHIFT;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if (!obj->base.filp)
- continue;
-
- if (obj->pages_pin_count)
- pinned += obj->base.size;
+ if (!can_release_pages(obj))
+ unevictable += obj->base.size >> PAGE_SHIFT;
else
- bound += obj->base.size;
+ bound += obj->base.size >> PAGE_SHIFT;
}
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
if (freed_pages || unbound || bound)
- pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
- freed_pages << PAGE_SHIFT, pinned);
+ pr_info("Purging GPU memory, %lu pages freed, "
+ "%lu pages still pinned.\n",
+ freed_pages, unevictable);
if (unbound || bound)
- pr_err("%lu and %lu bytes still available in the "
+ pr_err("%lu and %lu pages still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);
@@ -356,6 +380,29 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
return NOTIFY_DONE;
}
+static int
+i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(nb, struct drm_i915_private, mm.vmap_notifier);
+ struct shrinker_lock_uninterruptible slu;
+ unsigned long freed_pages;
+
+ if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
+ return NOTIFY_DONE;
+
+ freed_pages = i915_gem_shrink(dev_priv, -1UL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_VMAPS);
+
+ i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
+
+ *(unsigned long *)ptr += freed_pages;
+ return NOTIFY_DONE;
+}
+
/**
* i915_gem_shrinker_init - Initialize i915 shrinker
* @dev_priv: i915 device
@@ -371,6 +418,9 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+
+ dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
}
/**
@@ -381,6 +431,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
*/
void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
{
+ WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
unregister_shrinker(&dev_priv->mm.shrinker);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 2e6e9fb6f..44004e3f0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
- if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+ * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+ */
+ if (start < 4096 && (IS_GEN8(dev_priv) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock);
@@ -72,9 +74,11 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
- alignment, 0,
- dev_priv->gtt.stolen_usable_size);
+ alignment, 0,
+ ggtt->stolen_usable_size);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
@@ -87,14 +91,15 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
u32 base;
/* Almost universally we can find the Graphics Base of Stolen Memory
- * at offset 0x5c in the igfx configuration space. On a few (desktop)
- * machines this is also mirrored in the bridge device at different
- * locations, or in the MCHBAR.
+ * at register BSM (0x5c) in the igfx configuration space. On a few
+ * (desktop) machines this is also mirrored in the bridge device at
+ * different locations, or in the MCHBAR.
*
* On 865 we just check the TOUD register.
*
@@ -104,9 +109,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 3) {
- /* Read Graphics Base of Stolen Memory directly */
- pci_read_config_dword(dev->pdev, 0x5c, &base);
- base &= ~((1<<20) - 1);
+ u32 bsm;
+
+ pci_read_config_dword(dev->pdev, BSM, &bsm);
+
+ base = bsm & BSM_MASK;
} else if (IS_I865G(dev)) {
u16 toud = 0;
@@ -134,7 +141,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I85X_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_845G(dev)) {
u32 tseg_size = 0;
u32 tom;
@@ -158,7 +165,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_I830(dev)) {
u32 tseg_size = 0;
u32 tom;
@@ -178,7 +185,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - ggtt->stolen_size;
}
if (base == 0)
@@ -189,41 +196,41 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
struct {
u32 start, end;
} stolen[2] = {
- { .start = base, .end = base + dev_priv->gtt.stolen_size, },
- { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + ggtt->stolen_size, },
+ { .start = base, .end = base + ggtt->stolen_size, },
};
- u64 gtt_start, gtt_end;
+ u64 ggtt_start, ggtt_end;
- gtt_start = I915_READ(PGTBL_CTL);
+ ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev))
- gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
- (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+ ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
+ (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
- gtt_start &= PGTBL_ADDRESS_LO_MASK;
- gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+ ggtt_start &= PGTBL_ADDRESS_LO_MASK;
+ ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
- if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
- stolen[0].end = gtt_start;
- if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
- stolen[1].start = gtt_end;
+ if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
+ stolen[0].end = ggtt_start;
+ if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
+ stolen[1].start = ggtt_end;
/* pick the larger of the two chunks */
if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) {
base = stolen[0].start;
- dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ ggtt->stolen_size = stolen[0].end - stolen[0].start;
} else {
base = stolen[1].start;
- dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ ggtt->stolen_size = stolen[1].end - stolen[1].start;
}
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
- (unsigned long long) gtt_start,
- (unsigned long long) gtt_end - 1);
+ (unsigned long long)ggtt_start,
+ (unsigned long long)ggtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
- base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ base, base + (u32)ggtt->stolen_size - 1);
}
}
@@ -233,7 +240,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
@@ -245,7 +252,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* reservation starting from 1 instead of 0.
*/
r = devm_request_mem_region(dev->dev, base + 1,
- dev_priv->gtt.stolen_size - 1,
+ ggtt->stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
@@ -253,7 +260,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/
if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base, base + (uint32_t)ggtt->stolen_size);
base = 0;
}
}
@@ -274,11 +281,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base +
- dev_priv->gtt.stolen_size;
+ ggtt->stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@@ -369,10 +377,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top;
- stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+ stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@@ -388,7 +397,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top;
@@ -401,14 +411,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
}
#endif
- if (dev_priv->gtt.stolen_size == 0)
+ if (ggtt->stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
- stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+ stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
@@ -458,19 +468,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
return 0;
}
- dev_priv->gtt.stolen_reserved_base = reserved_base;
- dev_priv->gtt.stolen_reserved_size = reserved_size;
+ ggtt->stolen_reserved_base = reserved_base;
+ ggtt->stolen_reserved_size = reserved_size;
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
- dev_priv->gtt.stolen_size >> 10,
- (dev_priv->gtt.stolen_size - reserved_total) >> 10);
+ ggtt->stolen_size >> 10,
+ (ggtt->stolen_size - reserved_total) >> 10);
- dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
- reserved_total;
+ ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
/*
* Basic memrange allocator for stolen space.
@@ -483,7 +492,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later.
*/
- drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
+ drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
return 0;
}
@@ -492,12 +501,13 @@ static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st;
struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+ BUG_ON(offset > ggtt->stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -628,8 +638,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset,
u32 size)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
@@ -675,7 +685,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
- vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
+ vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -688,8 +698,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
*/
vma->node.start = gtt_offset;
vma->node.size = size;
- if (drm_mm_initialized(&ggtt->mm)) {
- ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+ if (drm_mm_initialized(&ggtt->base.mm)) {
+ ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err;
@@ -697,7 +707,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
- list_add_tail(&vma->vm_link, &ggtt->inactive_list);
+ list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
}
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 7410f6c96..b9bdb3403 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -166,7 +166,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
int ret = 0;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
@@ -297,7 +297,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
if (&obj->base == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 4d30b60de..32d9726e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -34,7 +34,7 @@
struct i915_mm_struct {
struct mm_struct *mm;
- struct drm_device *dev;
+ struct drm_i915_private *i915;
struct i915_mmu_notifier *mn;
struct hlist_node node;
struct kref kref;
@@ -49,6 +49,7 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
+ struct workqueue_struct *wq;
};
struct i915_mmu_object {
@@ -60,6 +61,37 @@ struct i915_mmu_object {
bool attached;
};
+static void wait_rendering(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
+ int i, n;
+
+ if (!obj->active)
+ return;
+
+ n = 0;
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct drm_i915_gem_request *req;
+
+ req = obj->last_read_req[i];
+ if (req == NULL)
+ continue;
+
+ requests[n++] = i915_gem_request_reference(req);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ for (i = 0; i < n; i++)
+ __i915_wait_request(requests[i], false, NULL, NULL);
+
+ mutex_lock(&dev->struct_mutex);
+
+ for (i = 0; i < n; i++)
+ i915_gem_request_unreference(requests[i]);
+}
+
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
@@ -75,13 +107,13 @@ static void cancel_userptr(struct work_struct *work)
struct i915_vma *vma, *tmp;
bool was_interruptible;
+ wait_rendering(obj);
+
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
- int ret = i915_vma_unbind(vma);
- WARN_ON(ret && ret != -EIO);
- }
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
+ WARN_ON(i915_vma_unbind(vma));
WARN_ON(i915_gem_object_put_pages(obj));
dev_priv->mm.interruptible = was_interruptible;
@@ -140,7 +172,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
*/
mo = container_of(it, struct i915_mmu_object, it);
if (kref_get_unless_zero(&mo->obj->base.refcount))
- schedule_work(&mo->work);
+ queue_work(mn->wq, &mo->work);
list_add(&mo->link, &cancelled);
it = interval_tree_iter_next(it, start, end);
@@ -148,6 +180,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
list_for_each_entry(mo, &cancelled, link)
del_object(mo);
spin_unlock(&mn->lock);
+
+ flush_workqueue(mn->wq);
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -167,10 +201,16 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT;
+ mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+ if (mn->wq == NULL) {
+ kfree(mn);
+ return ERR_PTR(-ENOMEM);
+ }
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
+ destroy_workqueue(mn->wq);
kfree(mn);
return ERR_PTR(ret);
}
@@ -205,13 +245,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
return mn;
down_write(&mm->mm->mmap_sem);
- mutex_lock(&to_i915(mm->dev)->mm_lock);
+ mutex_lock(&mm->i915->mm_lock);
if ((mn = mm->mn) == NULL) {
mn = i915_mmu_notifier_create(mm->mm);
if (!IS_ERR(mn))
mm->mn = mn;
}
- mutex_unlock(&to_i915(mm->dev)->mm_lock);
+ mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
return mn;
@@ -256,6 +296,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
return;
mmu_notifier_unregister(&mn->mn, mm);
+ destroy_workqueue(mn->wq);
kfree(mn);
}
@@ -327,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
}
kref_init(&mm->kref);
- mm->dev = obj->base.dev;
+ mm->i915 = to_i915(obj->base.dev);
mm->mm = current->mm;
atomic_inc(&current->mm->mm_count);
@@ -362,7 +403,7 @@ __i915_mm_struct_free(struct kref *kref)
/* Protected by dev_priv->mm_lock */
hash_del(&mm->node);
- mutex_unlock(&to_i915(mm->dev)->mm_lock);
+ mutex_unlock(&mm->i915->mm_lock);
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
schedule_work(&mm->work);
@@ -494,10 +535,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
ret = -ENOMEM;
pinned = 0;
- pvec = kmalloc(npages*sizeof(struct page *),
- GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
- if (pvec == NULL)
- pvec = drm_malloc_ab(npages, sizeof(struct page *));
+ pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
@@ -639,14 +677,11 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = NULL;
pinned = 0;
if (obj->userptr.mm->mm == current->mm) {
- pvec = kmalloc(num_pages*sizeof(struct page *),
- GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+ GFP_TEMPORARY);
if (pvec == NULL) {
- pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
- if (pvec == NULL) {
- __i915_gem_userptr_set_active(obj, false);
- return -ENOMEM;
- }
+ __i915_gem_userptr_set_active(obj, false);
+ return -ENOMEM;
}
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -763,6 +798,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
int ret;
u32 handle;
+ if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) {
+ /* We cannot support coherent userptr objects on hw without
+ * LLC and broken snooping.
+ */
+ return -ENODEV;
+ }
+
if (args->flags & ~(I915_USERPTR_READ_ONLY |
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 831895b8c..89725c9ef 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -198,7 +198,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err->size,
err->read_domains,
err->write_domain);
- for (i = 0; i < I915_NUM_RINGS; i++)
+ for (i = 0; i < I915_NUM_ENGINES; i++)
err_printf(m, "%02x ", err->rseqno[i]);
err_printf(m, "] %02x", err->wseqno);
@@ -230,8 +230,6 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
return "wait";
case HANGCHECK_ACTIVE:
return "active";
- case HANGCHECK_ACTIVE_LOOP:
- return "active (loop)";
case HANGCHECK_KICK:
return "kick";
case HANGCHECK_HUNG:
@@ -298,6 +296,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
}
}
err_printf(m, " seqno: 0x%08x\n", ring->seqno);
+ err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno);
err_printf(m, " waiting: %s\n", yesno(ring->waiting));
err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail);
@@ -433,7 +432,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
obj = error->ring[i].batchbuffer;
if (obj) {
- err_puts(m, dev_priv->ring[i].name);
+ err_puts(m, dev_priv->engine[i].name);
if (error->ring[i].pid != -1)
err_printf(m, " (submitted by %s [%d])",
error->ring[i].comm,
@@ -447,14 +446,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = error->ring[i].wa_batchbuffer;
if (obj) {
err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
- dev_priv->ring[i].name,
+ dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
if (error->ring[i].num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->ring[i].name,
+ dev_priv->engine[i].name,
error->ring[i].num_requests);
for (j = 0; j < error->ring[i].num_requests; j++) {
err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
@@ -466,7 +465,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if ((obj = error->ring[i].ringbuffer)) {
err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->ring[i].name,
+ dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
@@ -480,7 +479,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
hws_page = &obj->pages[LRC_PPHWSP_PN][0];
}
err_printf(m, "%s --- HW Status = 0x%08llx\n",
- dev_priv->ring[i].name, hws_offset);
+ dev_priv->engine[i].name, hws_offset);
offset = 0;
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n",
@@ -493,9 +492,31 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ obj = error->ring[i].wa_ctx;
+ if (obj) {
+ u64 wa_ctx_offset = obj->gtt_offset;
+ u32 *wa_ctx_page = &obj->pages[0][0];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+ u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size +
+ engine->wa_ctx.per_ctx.size);
+
+ err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n",
+ dev_priv->engine[i].name, wa_ctx_offset);
+ offset = 0;
+ for (elt = 0; elt < wa_ctx_size; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ wa_ctx_page[elt + 0],
+ wa_ctx_page[elt + 1],
+ wa_ctx_page[elt + 2],
+ wa_ctx_page[elt + 3]);
+ offset += 16;
+ }
+ }
+
if ((obj = error->ring[i].ctx)) {
err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->ring[i].name,
+ dev_priv->engine[i].name,
lower_32_bits(obj->gtt_offset));
print_error_obj(m, obj);
}
@@ -585,6 +606,7 @@ static void i915_error_state_free(struct kref *error_ref)
i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
kfree(error->ring[i].requests);
+ i915_error_object_free(error->ring[i].wa_ctx);
}
i915_error_object_free(error->semaphore_obj);
@@ -606,6 +628,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src,
struct i915_address_space *vm)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_error_object *dst;
struct i915_vma *vma = NULL;
int num_pages;
@@ -632,7 +655,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) &&
- reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+ reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
@@ -642,12 +665,13 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
- if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
+ if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
goto unwind;
}
/* Cannot access snooped pages through the aperture */
- if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
+ if (use_ggtt && src->cache_level != I915_CACHE_NONE &&
+ !HAS_LLC(dev_priv))
goto unwind;
dst->page_count = num_pages;
@@ -668,7 +692,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
- s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ s = io_mapping_map_atomic_wc(ggtt->mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
@@ -701,7 +725,7 @@ unwind:
return NULL;
}
#define i915_error_ggtt_object_create(dev_priv, src) \
- i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
+ i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
@@ -711,7 +735,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
- for (i = 0; i < I915_NUM_RINGS; i++)
+ for (i = 0; i < I915_NUM_ENGINES; i++)
err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
err->gtt_offset = vma->node.start;
@@ -726,7 +750,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->userptr = obj->userptr.mm != NULL;
err->ring = obj->last_write_req ?
- i915_gem_request_get_ring(obj->last_write_req)->id : -1;
+ i915_gem_request_get_engine(obj->last_write_req)->id : -1;
err->cache_level = obj->cache_level;
}
@@ -788,7 +812,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
* synchronization commands which almost always appear in the case
* strictly a client bug. Use instdone to differentiate those some.
*/
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
if (ring_id)
*ring_id = i;
@@ -821,11 +845,11 @@ static void i915_gem_record_fences(struct drm_device *dev,
static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
struct intel_engine_cs *to;
- int i;
+ enum intel_engine_id id;
if (!i915_semaphore_is_enabled(dev_priv->dev))
return;
@@ -835,68 +859,69 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
i915_error_ggtt_object_create(dev_priv,
dev_priv->semaphore_obj);
- for_each_ring(to, dev_priv, i) {
+ for_each_engine_id(to, dev_priv, id) {
int idx;
u16 signal_offset;
u32 *tmp;
- if (ring == to)
+ if (engine == to)
continue;
- signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+ signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
/ 4;
tmp = error->semaphore_obj->pages[0];
- idx = intel_ring_sync_index(ring, to);
+ idx = intel_ring_sync_index(engine, to);
ering->semaphore_mboxes[idx] = tmp[signal_offset];
- ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+ ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
}
}
static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
- ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
- ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
- ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
- ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+ ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
+ ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
+ ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0];
+ ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1];
- if (HAS_VEBOX(dev_priv->dev)) {
+ if (HAS_VEBOX(dev_priv)) {
ering->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(ring->mmio_base));
- ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ I915_READ(RING_SYNC_2(engine->mmio_base));
+ ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2];
}
}
static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
- struct intel_engine_cs *ring,
+ struct intel_engine_cs *engine,
struct drm_i915_error_ring *ering)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 6) {
- ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
- ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+ ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
+ ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
if (INTEL_INFO(dev)->gen >= 8)
- gen8_record_semaphore_state(dev_priv, error, ring, ering);
+ gen8_record_semaphore_state(dev_priv, error, engine,
+ ering);
else
- gen6_record_semaphore_state(dev_priv, ring, ering);
+ gen6_record_semaphore_state(dev_priv, engine, ering);
}
if (INTEL_INFO(dev)->gen >= 4) {
- ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
- ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
- ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
- ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
- ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
- ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
+ ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
+ ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
+ ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
+ ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
if (INTEL_INFO(dev)->gen >= 8) {
- ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
- ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+ ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
+ ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
}
- ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
+ ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
} else {
ering->faddr = I915_READ(DMA_FADD_I8XX);
ering->ipeir = I915_READ(IPEIR);
@@ -904,20 +929,21 @@ static void i915_record_ring_state(struct drm_device *dev,
ering->instdone = I915_READ(GEN2_INSTDONE);
}
- ering->waiting = waitqueue_active(&ring->irq_queue);
- ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
- ering->seqno = ring->get_seqno(ring, false);
- ering->acthd = intel_ring_get_active_head(ring);
- ering->start = I915_READ_START(ring);
- ering->head = I915_READ_HEAD(ring);
- ering->tail = I915_READ_TAIL(ring);
- ering->ctl = I915_READ_CTL(ring);
+ ering->waiting = waitqueue_active(&engine->irq_queue);
+ ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+ ering->acthd = intel_ring_get_active_head(engine);
+ ering->seqno = engine->get_seqno(engine);
+ ering->last_seqno = engine->last_submitted_seqno;
+ ering->start = I915_READ_START(engine);
+ ering->head = I915_READ_HEAD(engine);
+ ering->tail = I915_READ_TAIL(engine);
+ ering->ctl = I915_READ_CTL(engine);
if (I915_NEED_GFX_HWS(dev)) {
i915_reg_t mmio;
if (IS_GEN7(dev)) {
- switch (ring->id) {
+ switch (engine->id) {
default:
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
@@ -932,51 +958,51 @@ static void i915_record_ring_state(struct drm_device *dev,
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(ring->dev)) {
- mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else if (IS_GEN6(engine->dev)) {
+ mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
- mmio = RING_HWS_PGA(ring->mmio_base);
+ mmio = RING_HWS_PGA(engine->mmio_base);
}
ering->hws = I915_READ(mmio);
}
- ering->hangcheck_score = ring->hangcheck.score;
- ering->hangcheck_action = ring->hangcheck.action;
+ ering->hangcheck_score = engine->hangcheck.score;
+ ering->hangcheck_action = engine->hangcheck.action;
if (USES_PPGTT(dev)) {
int i;
- ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
+ ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
if (IS_GEN6(dev))
ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE_READ(ring));
+ I915_READ(RING_PP_DIR_BASE_READ(engine));
else if (IS_GEN7(dev))
ering->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE(ring));
+ I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_INFO(dev)->gen >= 8)
for (i = 0; i < 4; i++) {
ering->vm_info.pdp[i] =
- I915_READ(GEN8_RING_PDP_UDW(ring, i));
+ I915_READ(GEN8_RING_PDP_UDW(engine, i));
ering->vm_info.pdp[i] <<= 32;
ering->vm_info.pdp[i] |=
- I915_READ(GEN8_RING_PDP_LDW(ring, i));
+ I915_READ(GEN8_RING_PDP_LDW(engine, i));
}
}
}
-static void i915_gem_record_active_context(struct intel_engine_cs *ring,
+static void i915_gem_record_active_context(struct intel_engine_cs *engine,
struct drm_i915_error_state *error,
struct drm_i915_error_ring *ering)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct drm_i915_gem_object *obj;
/* Currently render ring is the only HW context user */
- if (ring->id != RCS || !error->ccid)
+ if (engine->id != RCS || !error->ccid)
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@@ -993,30 +1019,31 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request;
int i, count;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct intel_engine_cs *ring = &dev_priv->ring[i];
+ for (i = 0; i < I915_NUM_ENGINES; i++) {
+ struct intel_engine_cs *engine = &dev_priv->engine[i];
struct intel_ringbuffer *rbuf;
error->ring[i].pid = -1;
- if (ring->dev == NULL)
+ if (engine->dev == NULL)
continue;
error->ring[i].valid = true;
- i915_record_ring_state(dev, error, ring, &error->ring[i]);
+ i915_record_ring_state(dev, error, engine, &error->ring[i]);
- request = i915_gem_find_active_request(ring);
+ request = i915_gem_find_active_request(engine);
if (request) {
struct i915_address_space *vm;
vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
- &dev_priv->gtt.base;
+ &ggtt->base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
@@ -1027,10 +1054,10 @@ static void i915_gem_record_rings(struct drm_device *dev,
request->batch_obj,
vm);
- if (HAS_BROKEN_CS_TLB(dev_priv->dev))
+ if (HAS_BROKEN_CS_TLB(dev_priv))
error->ring[i].wa_batchbuffer =
i915_error_ggtt_object_create(dev_priv,
- ring->scratch.obj);
+ engine->scratch.obj);
if (request->pid) {
struct task_struct *task;
@@ -1052,11 +1079,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
* executed).
*/
if (request)
- rbuf = request->ctx->engine[ring->id].ringbuf;
+ rbuf = request->ctx->engine[engine->id].ringbuf;
else
- rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
+ rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
} else
- rbuf = ring->buffer;
+ rbuf = engine->buffer;
error->ring[i].cpu_ring_head = rbuf->head;
error->ring[i].cpu_ring_tail = rbuf->tail;
@@ -1065,12 +1092,19 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_ggtt_object_create(dev_priv, rbuf->obj);
error->ring[i].hws_page =
- i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+ i915_error_ggtt_object_create(dev_priv,
+ engine->status_page.obj);
+
+ if (engine->wa_ctx.obj) {
+ error->ring[i].wa_ctx =
+ i915_error_ggtt_object_create(dev_priv,
+ engine->wa_ctx.obj);
+ }
- i915_gem_record_active_context(ring, error, &error->ring[i]);
+ i915_gem_record_active_context(engine, error, &error->ring[i]);
count = 0;
- list_for_each_entry(request, &ring->request_list, list)
+ list_for_each_entry(request, &engine->request_list, list)
count++;
error->ring[i].num_requests = count;
@@ -1083,7 +1117,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
count = 0;
- list_for_each_entry(request, &ring->request_list, list) {
+ list_for_each_entry(request, &engine->request_list, list) {
struct drm_i915_error_request *erq;
if (count >= error->ring[i].num_requests) {
@@ -1272,7 +1306,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
static void i915_error_capture_msg(struct drm_device *dev,
struct drm_i915_error_state *error,
- bool wedged,
+ u32 engine_mask,
const char *error_msg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1295,7 +1329,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
", reason: %s, action: %s",
error_msg,
- wedged ? "reset" : "continue");
+ engine_mask ? "reset" : "continue");
}
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
@@ -1318,7 +1352,7 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
-void i915_capture_error_state(struct drm_device *dev, bool wedged,
+void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
const char *error_msg)
{
static bool warned;
@@ -1346,7 +1380,7 @@ void i915_capture_error_state(struct drm_device *dev, bool wedged,
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
- i915_error_capture_msg(dev, error, wedged, error_msg);
+ i915_error_capture_msg(dev, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index e4ba58222..80786d9f9 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -27,9 +27,12 @@
/* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS _MMIO(0xc000)
+#define GS_RESET_SHIFT 0
+#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT)
#define GS_BOOTROM_SHIFT 1
#define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT)
#define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT)
+#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT)
#define GS_UKERNEL_SHIFT 8
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
@@ -37,7 +40,13 @@
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
-#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
+#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT)
+#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT)
+#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT)
+#define GS_AUTH_STATUS_SHIFT 30
+#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT)
+#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT)
+#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT)
#define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4)
#define SOFT_SCRATCH_COUNT 16
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d7543efc8..d40c13fb6 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -179,15 +179,11 @@ static void guc_init_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_doorbell_info *doorbell;
- void *base;
- base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
- doorbell = base + client->doorbell_offset;
+ doorbell = client->client_base + client->doorbell_offset;
- doorbell->db_status = 1;
+ doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = 0;
-
- kunmap_atomic(base);
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
@@ -195,11 +191,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
struct guc_process_desc *desc;
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
- void *base;
int attempt = 2, ret = -EAGAIN;
- base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
- desc = base + gc->proc_desc_offset;
+ desc = gc->client_base + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
@@ -215,7 +209,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = base + gc->doorbell_offset;
+ db = gc->client_base + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@@ -244,10 +238,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
}
- /* Finally, update the cached copy of the GuC's WQ head */
- gc->wq_head = desc->head;
-
- kunmap_atomic(base);
return ret;
}
@@ -256,16 +246,12 @@ static void guc_disable_doorbell(struct intel_guc *guc,
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct guc_doorbell_info *doorbell;
- void *base;
i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
int value;
- base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
- doorbell = base + client->doorbell_offset;
+ doorbell = client->client_base + client->doorbell_offset;
- doorbell->db_status = 0;
-
- kunmap_atomic(base);
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
@@ -341,10 +327,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_process_desc *desc;
- void *base;
- base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
- desc = base + client->proc_desc_offset;
+ desc = client->client_base + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
@@ -361,8 +345,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
desc->wq_size_bytes = client->wq_size;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
-
- kunmap_atomic(base);
}
/*
@@ -376,12 +358,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
+ struct drm_i915_gem_object *client_obj = client->client_obj;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
- int i;
+ enum intel_engine_id id;
+ u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
@@ -390,8 +374,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for_each_ring(ring, dev_priv, i) {
- struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
+ for_each_engine_id(engine, dev_priv, id) {
+ struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
@@ -402,48 +386,44 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- obj = ctx->engine[i].state;
+ obj = ctx->engine[id].state;
if (!obj)
break; /* XXX: continue? */
- ctx_desc = intel_lr_context_descriptor(ctx, ring);
+ ctx_desc = intel_lr_context_descriptor(ctx, engine);
lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */
- lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
- LRC_STATE_PN * PAGE_SIZE;
+ gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
- (ring->guc_id << GUC_ELC_ENGINE_OFFSET);
+ (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
- obj = ctx->engine[i].ringbuf->obj;
+ obj = ctx->engine[id].ringbuf->obj;
+ gfx_addr = i915_gem_obj_ggtt_offset(obj);
- lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
- lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
- lrc->ring_next_free_location = lrc->ring_begin;
+ lrc->ring_begin = gfx_addr;
+ lrc->ring_end = gfx_addr + obj->base.size - 1;
+ lrc->ring_next_free_location = gfx_addr;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << ring->guc_id);
+ desc.engines_used |= (1 << engine->guc_id);
}
WARN_ON(desc.engines_used == 0);
/*
- * The CPU address is only needed at certain points, so kmap_atomic on
- * demand instead of storing it in the ctx descriptor.
- * XXX: May make debug easier to have it mapped
+ * The doorbell, process descriptor, and workqueue are all parts
+ * of the client object, which the GuC will reference via the GGTT
*/
- desc.db_trigger_cpu = 0;
- desc.db_trigger_uk = client->doorbell_offset +
- i915_gem_obj_ggtt_offset(client->client_obj);
- desc.db_trigger_phy = client->doorbell_offset +
- sg_dma_address(client->client_obj->pages->sgl);
-
- desc.process_desc = client->proc_desc_offset +
- i915_gem_obj_ggtt_offset(client->client_obj);
-
- desc.wq_addr = client->wq_offset +
- i915_gem_obj_ggtt_offset(client->client_obj);
-
+ gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
+ desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
+ client->doorbell_offset;
+ desc.db_trigger_cpu = (uintptr_t)client->client_base +
+ client->doorbell_offset;
+ desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
+ desc.process_desc = gfx_addr + client->proc_desc_offset;
+ desc.wq_addr = gfx_addr + client->wq_offset;
desc.wq_size = client->wq_size;
/*
@@ -474,25 +454,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
- void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
if (!gc)
return 0;
- /* Quickly return if wq space is available since last time we cache the
- * head position. */
- if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
- return 0;
-
- base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
- desc = base + gc->proc_desc_offset;
+ desc = gc->client_base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
- gc->wq_head = desc->head;
-
- if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
+ if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
ret = 0;
break;
}
@@ -501,19 +472,19 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
usleep_range(1000, 2000);
};
- kunmap_atomic(base);
-
return ret;
}
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
+ struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off, space;
- space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+ desc = gc->client_base + gc->proc_desc_offset;
+ space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
if (WARN_ON(space < sizeof(struct guc_wq_item)))
return -ENOSPC; /* shouldn't happen */
@@ -542,11 +513,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) |
- (rq->ring->guc_id << WQ_TARGET_SHIFT) |
+ (rq->engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */
- wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
+ wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
+ rq->engine);
/* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->ringbuf->tail >> 3;
@@ -569,7 +541,7 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
struct intel_guc *guc = client->guc;
- unsigned int engine_id = rq->ring->guc_id;
+ unsigned int engine_id = rq->engine->guc_id;
int q_ret, b_ret;
q_ret = guc_add_workqueue_item(client, rq);
@@ -660,21 +632,28 @@ static void guc_client_free(struct drm_device *dev,
if (!client)
return;
- if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
- /*
- * First disable the doorbell, then tell the GuC we've
- * finished with it, finally deallocate it in our bitmap
- */
- guc_disable_doorbell(guc, client);
- host2guc_release_doorbell(guc, client);
- release_doorbell(guc, client->doorbell_id);
- }
-
/*
* XXX: wait for any outstanding submissions before freeing memory.
* Be sure to drop any locks
*/
+ if (client->client_base) {
+ /*
+ * If we got as far as setting up a doorbell, make sure
+ * we shut it down before unmapping & deallocating the
+ * memory. So first disable the doorbell, then tell the
+ * GuC that we've finished with it, finally deallocate
+ * it in our bitmap
+ */
+ if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
+ guc_disable_doorbell(guc, client);
+ host2guc_release_doorbell(guc, client);
+ release_doorbell(guc, client->doorbell_id);
+ }
+
+ kunmap(kmap_to_page(client->client_base));
+ }
+
gem_release_guc_obj(client->client_obj);
if (client->ctx_index != GUC_INVALID_CTX_ID) {
@@ -695,7 +674,7 @@ static void guc_client_free(struct drm_device *dev,
* @ctx: the context that owns the client (we use the default render
* context)
*
- * Return: An i915_guc_client object if success.
+ * Return: An i915_guc_client object if success, else NULL.
*/
static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
uint32_t priority,
@@ -727,7 +706,9 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
if (!obj)
goto err;
+ /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->client_obj = obj;
+ client->client_base = kmap(i915_gem_object_get_page(obj, 0));
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
@@ -839,9 +820,9 @@ static void guc_create_ads(struct intel_guc *guc)
struct guc_ads *ads;
struct guc_policies *policies;
struct guc_mmio_reg_state *reg_state;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct page *page;
- u32 size, i;
+ u32 size;
/* The ads obj includes the struct itself and buffers passed to GuC */
size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
@@ -867,11 +848,11 @@ static void guc_create_ads(struct intel_guc *guc)
* so its address won't change after we've told the GuC where
* to find it.
*/
- ring = &dev_priv->ring[RCS];
- ads->golden_context_lrca = ring->status_page.gfx_addr;
+ engine = &dev_priv->engine[RCS];
+ ads->golden_context_lrca = engine->status_page.gfx_addr;
- for_each_ring(ring, dev_priv, i)
- ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
+ for_each_engine(engine, dev_priv)
+ ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
@@ -883,12 +864,12 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for_each_ring(ring, dev_priv, i) {
- reg_state->mmio_white_list[ring->guc_id].mmio_start =
- ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
+ for_each_engine(engine, dev_priv) {
+ reg_state->mmio_white_list[engine->guc_id].mmio_start =
+ engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- reg_state->mmio_white_list[ring->guc_id].count = 0;
+ reg_state->mmio_white_list[engine->guc_id].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d1a46ef5a..aab47f7bb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -994,14 +994,15 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
return;
}
-static void notify_ring(struct intel_engine_cs *ring)
+static void notify_ring(struct intel_engine_cs *engine)
{
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- trace_i915_gem_request_notify(ring);
+ trace_i915_gem_request_notify(engine);
+ engine->user_interrupts++;
- wake_up_all(&ring->irq_queue);
+ wake_up_all(&engine->irq_queue);
}
static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1079,11 +1080,10 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
static bool any_waiters(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- if (ring->irq_refcount)
+ for_each_engine(engine, dev_priv)
+ if (engine->irq_refcount)
return true;
return false;
@@ -1219,7 +1219,7 @@ static void ivybridge_parity_work(struct work_struct *work)
i915_reg_t reg;
slice--;
- if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+ if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
break;
dev_priv->l3_parity.which_slice &= ~(1<<slice);
@@ -1258,24 +1258,23 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irq(&dev_priv->irq_lock);
- gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&dev_priv->irq_lock);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
+static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
+ u32 iir)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_L3_DPF(dev))
+ if (!HAS_L3_DPF(dev_priv))
return;
spin_lock(&dev_priv->irq_lock);
- gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+ gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock(&dev_priv->irq_lock);
- iir &= GT_PARITY_ERROR(dev);
+ iir &= GT_PARITY_ERROR(dev_priv);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
dev_priv->l3_parity.which_slice |= 1 << 1;
@@ -1285,102 +1284,85 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
-static void ilk_gt_irq_handler(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
+static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->engine[VCS]);
}
-static void snb_gt_irq_handler(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
+static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir &
(GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->engine[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[BCS]);
+ notify_ring(&dev_priv->engine[BCS]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
- if (gt_iir & GT_PARITY_ERROR(dev))
- ivybridge_parity_error_irq_handler(dev, gt_iir);
+ if (gt_iir & GT_PARITY_ERROR(dev_priv))
+ ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
}
static __always_inline void
-gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
+gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
- notify_ring(ring);
+ notify_ring(engine);
if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
- intel_lrc_irq_handler(ring);
+ tasklet_schedule(&engine->irq_tasklet);
}
-static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
- u32 master_ctl)
+static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
+ u32 master_ctl,
+ u32 gt_iir[4])
{
irqreturn_t ret = IRQ_NONE;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
- if (iir) {
- I915_WRITE_FW(GEN8_GT_IIR(0), iir);
+ gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
+ if (gt_iir[0]) {
+ I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
ret = IRQ_HANDLED;
-
- gen8_cs_irq_handler(&dev_priv->ring[RCS],
- iir, GEN8_RCS_IRQ_SHIFT);
-
- gen8_cs_irq_handler(&dev_priv->ring[BCS],
- iir, GEN8_BCS_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
- if (iir) {
- I915_WRITE_FW(GEN8_GT_IIR(1), iir);
+ gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
+ if (gt_iir[1]) {
+ I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
ret = IRQ_HANDLED;
-
- gen8_cs_irq_handler(&dev_priv->ring[VCS],
- iir, GEN8_VCS1_IRQ_SHIFT);
-
- gen8_cs_irq_handler(&dev_priv->ring[VCS2],
- iir, GEN8_VCS2_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
- if (iir) {
- I915_WRITE_FW(GEN8_GT_IIR(3), iir);
+ gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
+ if (gt_iir[3]) {
+ I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
ret = IRQ_HANDLED;
-
- gen8_cs_irq_handler(&dev_priv->ring[VECS],
- iir, GEN8_VECS_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
if (master_ctl & GEN8_GT_PM_IRQ) {
- u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
- if (iir & dev_priv->pm_rps_events) {
+ gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
+ if (gt_iir[2] & dev_priv->pm_rps_events) {
I915_WRITE_FW(GEN8_GT_IIR(2),
- iir & dev_priv->pm_rps_events);
+ gt_iir[2] & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
- gen6_rps_irq_handler(dev_priv, iir);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1388,6 +1370,31 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
return ret;
}
+static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
+ u32 gt_iir[4])
+{
+ if (gt_iir[0]) {
+ gen8_cs_irq_handler(&dev_priv->engine[RCS],
+ gt_iir[0], GEN8_RCS_IRQ_SHIFT);
+ gen8_cs_irq_handler(&dev_priv->engine[BCS],
+ gt_iir[0], GEN8_BCS_IRQ_SHIFT);
+ }
+
+ if (gt_iir[1]) {
+ gen8_cs_irq_handler(&dev_priv->engine[VCS],
+ gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
+ gen8_cs_irq_handler(&dev_priv->engine[VCS2],
+ gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
+ }
+
+ if (gt_iir[3])
+ gen8_cs_irq_handler(&dev_priv->engine[VECS],
+ gt_iir[3], GEN8_VECS_IRQ_SHIFT);
+
+ if (gt_iir[2] & dev_priv->pm_rps_events)
+ gen6_rps_irq_handler(dev_priv, gt_iir[2]);
+}
+
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
@@ -1627,9 +1634,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_INFO(dev_priv)->gen >= 8)
return;
- if (HAS_VEBOX(dev_priv->dev)) {
+ if (HAS_VEBOX(dev_priv)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[VECS]);
+ notify_ring(&dev_priv->engine[VECS]);
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
@@ -1644,10 +1651,10 @@ static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
return true;
}
-static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
+static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
+ u32 pipe_stats[I915_MAX_PIPES])
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pipe_stats[I915_MAX_PIPES] = { };
int pipe;
spin_lock(&dev_priv->irq_lock);
@@ -1701,6 +1708,13 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
I915_WRITE(reg, pipe_stats[pipe]);
}
spin_unlock(&dev_priv->irq_lock);
+}
+
+static void valleyview_pipestat_irq_handler(struct drm_device *dev,
+ u32 pipe_stats[I915_MAX_PIPES])
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -1723,21 +1737,20 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
gmbus_irq_handler(dev);
}
-static void i9xx_hpd_irq_handler(struct drm_device *dev)
+static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- u32 pin_mask = 0, long_mask = 0;
- if (!hotplug_status)
- return;
+ if (hotplug_status)
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- /*
- * Make sure hotplug status is cleared before we clear IIR, or else we
- * may miss hotplug events.
- */
- POSTING_READ(PORT_HOTPLUG_STAT);
+ return hotplug_status;
+}
+
+static void i9xx_hpd_irq_handler(struct drm_device *dev,
+ u32 hotplug_status)
+{
+ u32 pin_mask = 0, long_mask = 0;
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
@@ -1768,7 +1781,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1777,40 +1789,72 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- while (true) {
- /* Find, clear, then process each source of interrupt */
+ do {
+ u32 iir, gt_iir, pm_iir;
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 hotplug_status = 0;
+ u32 ier = 0;
gt_iir = I915_READ(GTIIR);
- if (gt_iir)
- I915_WRITE(GTIIR, gt_iir);
-
pm_iir = I915_READ(GEN6_PMIIR);
- if (pm_iir)
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
iir = I915_READ(VLV_IIR);
- if (iir) {
- /* Consume port before clearing IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
- I915_WRITE(VLV_IIR, iir);
- }
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
- goto out;
+ break;
ret = IRQ_HANDLED;
+ /*
+ * Theory on interrupt generation, based on empirical evidence:
+ *
+ * x = ((VLV_IIR & VLV_IER) ||
+ * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
+ * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
+ *
+ * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
+ * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
+ * guarantee the CPU interrupt will be raised again even if we
+ * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
+ * bits this time around.
+ */
+ I915_WRITE(VLV_MASTER_IER, 0);
+ ier = I915_READ(VLV_IER);
+ I915_WRITE(VLV_IER, 0);
+
if (gt_iir)
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ I915_WRITE(GTIIR, gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_handler(dev, iir);
- }
+ valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
+
+ /*
+ * VLV_IIR is single buffered, and reflects the level
+ * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
+ */
+ if (iir)
+ I915_WRITE(VLV_IIR, iir);
+
+ I915_WRITE(VLV_IER, ier);
+ I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ POSTING_READ(VLV_MASTER_IER);
+
+ if (gt_iir)
+ snb_gt_irq_handler(dev_priv, gt_iir);
+ if (pm_iir)
+ gen6_rps_irq_handler(dev_priv, pm_iir);
+
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev, hotplug_status);
+
+ valleyview_pipestat_irq_handler(dev, pipe_stats);
+ } while (0);
-out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
@@ -1820,7 +1864,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 master_ctl, iir;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -1829,7 +1872,13 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
- for (;;) {
+ do {
+ u32 master_ctl, iir;
+ u32 gt_iir[4] = {};
+ u32 pipe_stats[I915_MAX_PIPES] = {};
+ u32 hotplug_status = 0;
+ u32 ier = 0;
+
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@@ -1838,26 +1887,50 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
+ /*
+ * Theory on interrupt generation, based on empirical evidence:
+ *
+ * x = ((VLV_IIR & VLV_IER) ||
+ * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
+ * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
+ *
+ * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
+ * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
+ * guarantee the CPU interrupt will be raised again even if we
+ * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
+ * bits this time around.
+ */
I915_WRITE(GEN8_MASTER_IRQ, 0);
+ ier = I915_READ(VLV_IER);
+ I915_WRITE(VLV_IER, 0);
- /* Find, clear, then process each source of interrupt */
-
- if (iir) {
- /* Consume port before clearing IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
- I915_WRITE(VLV_IIR, iir);
- }
+ gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
- gen8_gt_irq_handler(dev_priv, master_ctl);
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ hotplug_status = i9xx_hpd_irq_ack(dev_priv);
/* Call regardless, as some status bits might not be
* signalled in iir */
- valleyview_pipestat_irq_handler(dev, iir);
+ valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
- I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ /*
+ * VLV_IIR is single buffered, and reflects the level
+ * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
+ */
+ if (iir)
+ I915_WRITE(VLV_IIR, iir);
+
+ I915_WRITE(VLV_IER, ier);
+ I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
- }
+
+ gen8_gt_irq_handler(dev_priv, gt_iir);
+
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev, hotplug_status);
+
+ valleyview_pipestat_irq_handler(dev, pipe_stats);
+ } while (0);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2217,9 +2290,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 6)
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ snb_gt_irq_handler(dev_priv, gt_iir);
else
- ilk_gt_irq_handler(dev, dev_priv, gt_iir);
+ ilk_gt_irq_handler(dev_priv, gt_iir);
}
de_iir = I915_READ(DEIIR);
@@ -2398,7 +2471,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_SPT(dev_priv))
+ if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
spt_irq_handler(dev, iir);
else
cpt_irq_handler(dev, iir);
@@ -2419,6 +2492,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl;
+ u32 gt_iir[4] = {};
irqreturn_t ret;
if (!intel_irqs_enabled(dev_priv))
@@ -2435,7 +2509,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
disable_rpm_wakeref_asserts(dev_priv);
/* Find, clear, then process each source of interrupt */
- ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+ ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
+ gen8_gt_irq_handler(dev_priv, gt_iir);
ret |= gen8_de_irq_handler(dev_priv, master_ctl);
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2449,8 +2524,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
static void i915_error_wake_up(struct drm_i915_private *dev_priv,
bool reset_completed)
{
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
/*
* Notify all waiters for GPU completion events that reset state has
@@ -2460,8 +2534,8 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
*/
/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
- for_each_ring(ring, dev_priv, i)
- wake_up_all(&ring->irq_queue);
+ for_each_engine(engine, dev_priv)
+ wake_up_all(&engine->irq_queue);
/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
wake_up_all(&dev_priv->pending_flip_queue);
@@ -2484,7 +2558,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
static void i915_reset_and_wakeup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@@ -2502,7 +2575,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
- if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
+ if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
reset_event);
@@ -2530,25 +2603,9 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
intel_runtime_pm_put(dev_priv);
- if (ret == 0) {
- /*
- * After all the gem state is reset, increment the reset
- * counter and wake up everyone waiting for the reset to
- * complete.
- *
- * Since unlock operations are a one-sided barrier only,
- * we need to insert a barrier here to order any seqno
- * updates before
- * the counter increment.
- */
- smp_mb__before_atomic();
- atomic_inc(&dev_priv->gpu_error.reset_counter);
-
+ if (ret == 0)
kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
- } else {
- atomic_or(I915_WEDGED, &error->reset_counter);
- }
/*
* Note: The wake_up also serves as a memory barrier so that
@@ -2653,14 +2710,14 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
/**
* i915_handle_error - handle a gpu error
* @dev: drm device
- *
+ * @engine_mask: mask representing engines that are hung
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_device *dev, bool wedged,
+void i915_handle_error(struct drm_device *dev, u32 engine_mask,
const char *fmt, ...)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2671,10 +2728,10 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
- i915_capture_error_state(dev, wedged, error_msg);
+ i915_capture_error_state(dev, engine_mask, error_msg);
i915_report_and_clear_eir(dev);
- if (wedged) {
+ if (engine_mask) {
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter);
@@ -2805,10 +2862,10 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
}
static bool
-ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ring_idle(struct intel_engine_cs *engine, u32 seqno)
{
- return (list_empty(&ring->request_list) ||
- i915_seqno_passed(seqno, ring->last_submitted_seqno));
+ return i915_seqno_passed(seqno,
+ READ_ONCE(engine->last_submitted_seqno));
}
static bool
@@ -2824,42 +2881,42 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
}
static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
+ u64 offset)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_engine_cs *signaller;
- int i;
- if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
- for_each_ring(signaller, dev_priv, i) {
- if (ring == signaller)
+ if (INTEL_INFO(dev_priv)->gen >= 8) {
+ for_each_engine(signaller, dev_priv) {
+ if (engine == signaller)
continue;
- if (offset == signaller->semaphore.signal_ggtt[ring->id])
+ if (offset == signaller->semaphore.signal_ggtt[engine->id])
return signaller;
}
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
- for_each_ring(signaller, dev_priv, i) {
- if(ring == signaller)
+ for_each_engine(signaller, dev_priv) {
+ if(engine == signaller)
continue;
- if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
+ if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
return signaller;
}
}
DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
- ring->id, ipehr, offset);
+ engine->id, ipehr, offset);
return NULL;
}
static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
+semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
u32 cmd, ipehr, head;
u64 offset = 0;
int i, backwards;
@@ -2881,11 +2938,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* Therefore, this function does not support execlist mode in its
* current form. Just return NULL and move on.
*/
- if (ring->buffer == NULL)
+ if (engine->buffer == NULL)
return NULL;
- ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
- if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
+ ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
+ if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
return NULL;
/*
@@ -2896,8 +2953,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
- backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
+ head = I915_READ_HEAD(engine) & HEAD_ADDR;
+ backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
for (i = backwards; i; --i) {
/*
@@ -2905,10 +2962,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
* our ring is smaller than what the hardware (and hence
* HEAD_ADDR) allows. Also handles wrap-around.
*/
- head &= ring->buffer->size - 1;
+ head &= engine->buffer->size - 1;
/* This here seems to blow up */
- cmd = ioread32(ring->buffer->virtual_start + head);
+ cmd = ioread32(engine->buffer->virtual_start + head);
if (cmd == ipehr)
break;
@@ -2918,32 +2975,32 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
if (!i)
return NULL;
- *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
- if (INTEL_INFO(ring->dev)->gen >= 8) {
- offset = ioread32(ring->buffer->virtual_start + head + 12);
+ *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+ if (INTEL_INFO(engine->dev)->gen >= 8) {
+ offset = ioread32(engine->buffer->virtual_start + head + 12);
offset <<= 32;
- offset = ioread32(ring->buffer->virtual_start + head + 8);
+ offset = ioread32(engine->buffer->virtual_start + head + 8);
}
- return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
+ return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
}
-static int semaphore_passed(struct intel_engine_cs *ring)
+static int semaphore_passed(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct intel_engine_cs *signaller;
u32 seqno;
- ring->hangcheck.deadlock++;
+ engine->hangcheck.deadlock++;
- signaller = semaphore_waits_for(ring, &seqno);
+ signaller = semaphore_waits_for(engine, &seqno);
if (signaller == NULL)
return -1;
/* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
+ if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
return -1;
- if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+ if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
return 1;
/* cursory check for an unkickable deadlock */
@@ -2956,23 +3013,22 @@ static int semaphore_passed(struct intel_engine_cs *ring)
static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i)
- ring->hangcheck.deadlock = 0;
+ for_each_engine(engine, dev_priv)
+ engine->hangcheck.deadlock = 0;
}
-static bool subunits_stuck(struct intel_engine_cs *ring)
+static bool subunits_stuck(struct intel_engine_cs *engine)
{
u32 instdone[I915_NUM_INSTDONE_REG];
bool stuck;
int i;
- if (ring->id != RCS)
+ if (engine->id != RCS)
return true;
- i915_get_extra_instdone(ring->dev, instdone);
+ i915_get_extra_instdone(engine->dev, instdone);
/* There might be unstable subunit states even when
* actual head is not moving. Filter out the unstable ones by
@@ -2981,49 +3037,44 @@ static bool subunits_stuck(struct intel_engine_cs *ring)
*/
stuck = true;
for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
- const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
+ const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
- if (tmp != ring->hangcheck.instdone[i])
+ if (tmp != engine->hangcheck.instdone[i])
stuck = false;
- ring->hangcheck.instdone[i] |= tmp;
+ engine->hangcheck.instdone[i] |= tmp;
}
return stuck;
}
static enum intel_ring_hangcheck_action
-head_stuck(struct intel_engine_cs *ring, u64 acthd)
+head_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- if (acthd != ring->hangcheck.acthd) {
+ if (acthd != engine->hangcheck.acthd) {
/* Clear subunit states on head movement */
- memset(ring->hangcheck.instdone, 0,
- sizeof(ring->hangcheck.instdone));
-
- if (acthd > ring->hangcheck.max_acthd) {
- ring->hangcheck.max_acthd = acthd;
- return HANGCHECK_ACTIVE;
- }
+ memset(engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
- return HANGCHECK_ACTIVE_LOOP;
+ return HANGCHECK_ACTIVE;
}
- if (!subunits_stuck(ring))
+ if (!subunits_stuck(engine))
return HANGCHECK_ACTIVE;
return HANGCHECK_HUNG;
}
static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+ring_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_ring_hangcheck_action ha;
u32 tmp;
- ha = head_stuck(ring, acthd);
+ ha = head_stuck(engine, acthd);
if (ha != HANGCHECK_HUNG)
return ha;
@@ -3035,24 +3086,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- tmp = I915_READ_CTL(ring);
+ tmp = I915_READ_CTL(engine);
if (tmp & RING_WAIT) {
- i915_handle_error(dev, false,
+ i915_handle_error(dev, 0,
"Kicking stuck wait on %s",
- ring->name);
- I915_WRITE_CTL(ring, tmp);
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
}
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(ring)) {
+ switch (semaphore_passed(engine)) {
default:
return HANGCHECK_HUNG;
case 1:
- i915_handle_error(dev, false,
+ i915_handle_error(dev, 0,
"Kicking stuck semaphore on %s",
- ring->name);
- I915_WRITE_CTL(ring, tmp);
+ engine->name);
+ I915_WRITE_CTL(engine, tmp);
return HANGCHECK_KICK;
case 0:
return HANGCHECK_WAIT;
@@ -3062,6 +3113,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
return HANGCHECK_HUNG;
}
+static unsigned kick_waiters(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = to_i915(engine->dev);
+ unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
+
+ if (engine->hangcheck.user_interrupts == user_interrupts &&
+ !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
+ if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
+ DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+ engine->name);
+ else
+ DRM_INFO("Fake missed irq on %s\n",
+ engine->name);
+ wake_up_all(&engine->irq_queue);
+ }
+
+ return user_interrupts;
+}
/*
* This is called when the chip hasn't reported back with completed
* batchbuffers in a long time. We keep track per ring seqno progress and
@@ -3076,13 +3145,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
container_of(work, typeof(*dev_priv),
gpu_error.hangcheck_work.work);
struct drm_device *dev = dev_priv->dev;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int busy_count = 0, rings_hung = 0;
- bool stuck[I915_NUM_RINGS] = { 0 };
+ bool stuck[I915_NUM_ENGINES] = { 0 };
#define BUSY 1
#define KICK 5
#define HUNG 20
+#define ACTIVE_DECAY 15
if (!i915.enable_hangcheck)
return;
@@ -3100,33 +3170,37 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine_id(engine, dev_priv, id) {
u64 acthd;
u32 seqno;
+ unsigned user_interrupts;
bool busy = true;
semaphore_clear_deadlocks(dev_priv);
- seqno = ring->get_seqno(ring, false);
- acthd = intel_ring_get_active_head(ring);
-
- if (ring->hangcheck.seqno == seqno) {
- if (ring_idle(ring, seqno)) {
- ring->hangcheck.action = HANGCHECK_IDLE;
-
- if (waitqueue_active(&ring->irq_queue)) {
- /* Issue a wake-up to catch stuck h/w. */
- if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
- if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
- DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
- ring->name);
- else
- DRM_INFO("Fake missed irq on %s\n",
- ring->name);
- wake_up_all(&ring->irq_queue);
- }
+ /* We don't strictly need an irq-barrier here, as we are not
+ * serving an interrupt request, be paranoid in case the
+ * barrier has side-effects (such as preventing a broken
+ * cacheline snoop) and so be sure that we can see the seqno
+ * advance. If the seqno should stick, due to a stale
+ * cacheline, we would erroneously declare the GPU hung.
+ */
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ acthd = intel_ring_get_active_head(engine);
+ seqno = engine->get_seqno(engine);
+
+ /* Reset stuck interrupts between batch advances */
+ user_interrupts = 0;
+
+ if (engine->hangcheck.seqno == seqno) {
+ if (ring_idle(engine, seqno)) {
+ engine->hangcheck.action = HANGCHECK_IDLE;
+ if (waitqueue_active(&engine->irq_queue)) {
/* Safeguard against driver failure */
- ring->hangcheck.score += BUSY;
+ user_interrupts = kick_waiters(engine);
+ engine->hangcheck.score += BUSY;
} else
busy = false;
} else {
@@ -3145,58 +3219,60 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
* being repeatedly kicked and so responsible
* for stalling the machine.
*/
- ring->hangcheck.action = ring_stuck(ring,
- acthd);
+ engine->hangcheck.action = ring_stuck(engine,
+ acthd);
- switch (ring->hangcheck.action) {
+ switch (engine->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
- case HANGCHECK_ACTIVE:
break;
- case HANGCHECK_ACTIVE_LOOP:
- ring->hangcheck.score += BUSY;
+ case HANGCHECK_ACTIVE:
+ engine->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
- ring->hangcheck.score += KICK;
+ engine->hangcheck.score += KICK;
break;
case HANGCHECK_HUNG:
- ring->hangcheck.score += HUNG;
- stuck[i] = true;
+ engine->hangcheck.score += HUNG;
+ stuck[id] = true;
break;
}
}
} else {
- ring->hangcheck.action = HANGCHECK_ACTIVE;
+ engine->hangcheck.action = HANGCHECK_ACTIVE;
/* Gradually reduce the count so that we catch DoS
* attempts across multiple batches.
*/
- if (ring->hangcheck.score > 0)
- ring->hangcheck.score--;
+ if (engine->hangcheck.score > 0)
+ engine->hangcheck.score -= ACTIVE_DECAY;
+ if (engine->hangcheck.score < 0)
+ engine->hangcheck.score = 0;
/* Clear head and subunit states on seqno movement */
- ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+ acthd = 0;
- memset(ring->hangcheck.instdone, 0,
- sizeof(ring->hangcheck.instdone));
+ memset(engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
}
- ring->hangcheck.seqno = seqno;
- ring->hangcheck.acthd = acthd;
+ engine->hangcheck.seqno = seqno;
+ engine->hangcheck.acthd = acthd;
+ engine->hangcheck.user_interrupts = user_interrupts;
busy_count += busy;
}
- for_each_ring(ring, dev_priv, i) {
- if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
+ for_each_engine_id(engine, dev_priv, id) {
+ if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
DRM_INFO("%s on %s\n",
- stuck[i] ? "stuck" : "no progress",
- ring->name);
- rings_hung++;
+ stuck[id] ? "stuck" : "no progress",
+ engine->name);
+ rings_hung |= intel_engine_flag(engine);
}
}
if (rings_hung) {
- i915_handle_error(dev, true, "Ring hung");
+ i915_handle_error(dev, rings_hung, "Engine(s) hung");
goto out;
}
@@ -3267,6 +3343,55 @@ static void gen5_gt_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN6_PM);
}
+static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+ else
+ I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+
+ i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
+ I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+
+ for_each_pipe(dev_priv, pipe) {
+ I915_WRITE(PIPESTAT(pipe),
+ PIPE_FIFO_UNDERRUN_STATUS |
+ PIPESTAT_INT_STATUS_MASK);
+ dev_priv->pipestat_irq_mask[pipe] = 0;
+ }
+
+ GEN5_IRQ_RESET(VLV_);
+ dev_priv->irq_mask = ~0;
+}
+
+static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ u32 pipestat_mask;
+ u32 enable_mask;
+ enum pipe pipe;
+
+ pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
+ PIPE_CRC_DONE_INTERRUPT_STATUS;
+
+ i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
+ for_each_pipe(dev_priv, pipe)
+ i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
+
+ enable_mask = I915_DISPLAY_PORT_INTERRUPT |
+ I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+ if (IS_CHERRYVIEW(dev_priv))
+ enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+
+ WARN_ON(dev_priv->irq_mask != ~0);
+
+ dev_priv->irq_mask = ~enable_mask;
+
+ GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+}
+
/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
@@ -3284,34 +3409,19 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
-static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
-{
- enum pipe pipe;
-
- i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), 0xffff);
-
- GEN5_IRQ_RESET(VLV_);
-}
-
static void valleyview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /* VLV magic */
- I915_WRITE(VLV_IMR, 0);
- I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
- I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
- I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
+ I915_WRITE(VLV_MASTER_IER, 0);
+ POSTING_READ(VLV_MASTER_IER);
gen5_gt_irq_reset(dev);
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
-
- vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_reset(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3384,9 +3494,10 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
-
- vlv_display_irq_reset(dev_priv);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_reset(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
@@ -3506,6 +3617,26 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
hotplug = I915_READ(PCH_PORT_HOTPLUG);
hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
PORTA_HOTPLUG_ENABLE;
+
+ DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
+ hotplug, enabled_irqs);
+ hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
+
+ /*
+ * For BXT invert bit has to be set based on AOB design
+ * for HPD detection logic, update it based on VBT fields.
+ */
+
+ if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
+ intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
+ hotplug |= BXT_DDIA_HPD_INVERT;
+ if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
+ intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
+ hotplug |= BXT_DDIB_HPD_INVERT;
+ if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
+ intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
+ hotplug |= BXT_DDIC_HPD_INVERT;
+
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
@@ -3613,74 +3744,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
-static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
-{
- u32 pipestat_mask;
- u32 iir_mask;
- enum pipe pipe;
-
- pipestat_mask = PIPESTAT_INT_STATUS_MASK |
- PIPE_FIFO_UNDERRUN_STATUS;
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), pipestat_mask);
- POSTING_READ(PIPESTAT(PIPE_A));
-
- pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
-
- i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
-
- iir_mask = I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
- iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
- dev_priv->irq_mask &= ~iir_mask;
-
- I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- POSTING_READ(VLV_IMR);
-}
-
-static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
-{
- u32 pipestat_mask;
- u32 iir_mask;
- enum pipe pipe;
-
- iir_mask = I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
- if (IS_CHERRYVIEW(dev_priv))
- iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
-
- dev_priv->irq_mask |= iir_mask;
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- I915_WRITE(VLV_IIR, iir_mask);
- I915_WRITE(VLV_IIR, iir_mask);
- POSTING_READ(VLV_IIR);
-
- pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
- PIPE_CRC_DONE_INTERRUPT_STATUS;
-
- i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
- for_each_pipe(dev_priv, pipe)
- i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
-
- pipestat_mask = PIPESTAT_INT_STATUS_MASK |
- PIPE_FIFO_UNDERRUN_STATUS;
-
- for_each_pipe(dev_priv, pipe)
- I915_WRITE(PIPESTAT(pipe), pipestat_mask);
- POSTING_READ(PIPESTAT(PIPE_A));
-}
-
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
@@ -3690,8 +3753,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = true;
- if (intel_irqs_enabled(dev_priv))
- valleyview_display_irqs_install(dev_priv);
+ if (intel_irqs_enabled(dev_priv)) {
+ vlv_display_irq_reset(dev_priv);
+ vlv_display_irq_postinstall(dev_priv);
+ }
}
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
@@ -3704,45 +3769,23 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
- valleyview_display_irqs_uninstall(dev_priv);
+ vlv_display_irq_reset(dev_priv);
}
-static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
-{
- dev_priv->irq_mask = ~0;
-
- i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
- POSTING_READ(PORT_HOTPLUG_EN);
-
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
- I915_WRITE(VLV_IMR, dev_priv->irq_mask);
- POSTING_READ(VLV_IMR);
-
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
- valleyview_display_irqs_install(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-}
static int valleyview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- vlv_display_irq_postinstall(dev_priv);
-
gen5_gt_irq_postinstall(dev);
- /* ack & enable invalid PTE error interrupts */
-#if 0 /* FIXME: add support to irq handler for checking these bits */
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
- I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
-#endif
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_postinstall(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
+ POSTING_READ(VLV_MASTER_IER);
return 0;
}
@@ -3753,7 +3796,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
@@ -3765,6 +3807,9 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
+ if (HAS_L3_DPF(dev_priv))
+ gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
@@ -3832,7 +3877,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ibx_irq_postinstall(dev);
- I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
+ I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
return 0;
@@ -3842,11 +3887,14 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- vlv_display_irq_postinstall(dev_priv);
-
gen8_gt_irq_postinstall(dev_priv);
- I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_postinstall(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
return 0;
@@ -3862,20 +3910,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
gen8_irq_reset(dev);
}
-static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
-{
- /* Interrupt setup is already guaranteed to be single-threaded, this is
- * just to make the assert_spin_locked check happy. */
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display_irqs_enabled)
- valleyview_display_irqs_uninstall(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
- vlv_display_irq_reset(dev_priv);
-
- dev_priv->irq_mask = ~0;
-}
-
static void valleyview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3884,12 +3918,16 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
return;
I915_WRITE(VLV_MASTER_IER, 0);
+ POSTING_READ(VLV_MASTER_IER);
gen5_gt_irq_reset(dev);
I915_WRITE(HWSTAM, 0xffffffff);
- vlv_display_irq_uninstall(dev_priv);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_reset(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void cherryview_irq_uninstall(struct drm_device *dev)
@@ -3906,7 +3944,10 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
- vlv_display_irq_uninstall(dev_priv);
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ vlv_display_irq_reset(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -4044,7 +4085,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
new_iir = I915_READ16(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4233,14 +4274,17 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
/* Consume port. Then clear IIR or we'll miss events */
if (I915_HAS_HOTPLUG(dev) &&
- iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
+ iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev, hotplug_status);
+ }
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
for_each_pipe(dev_priv, pipe) {
int plane = pipe;
@@ -4463,16 +4507,19 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
/* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
+ if (iir & I915_DISPLAY_PORT_INTERRUPT) {
+ u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
+ if (hotplug_status)
+ i9xx_hpd_irq_handler(dev, hotplug_status);
+ }
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
if (iir & I915_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[RCS]);
+ notify_ring(&dev_priv->engine[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
- notify_ring(&dev_priv->ring[VCS]);
+ notify_ring(&dev_priv->engine[VCS]);
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@@ -4567,8 +4614,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
- pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
-
if (IS_GEN2(dev_priv)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4616,7 +4661,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev))
+ else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 278c9c40c..1779f02e6 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -56,6 +56,8 @@ struct i915_params i915 __read_mostly = {
.edp_vswing = 0,
.enable_guc_submission = false,
.guc_log_level = -1,
+ .enable_dp_mst = true,
+ .inject_load_failure = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -201,3 +203,10 @@ MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)")
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
+
+module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
+MODULE_PARM_DESC(enable_dp_mst,
+ "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
+module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
+MODULE_PARM_DESC(inject_load_failure,
+ "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index bd5026b15..02bc27804 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -49,6 +49,7 @@ struct i915_params {
int use_mmio_flip;
int mmio_debug;
int edp_vswing;
+ unsigned int inject_load_failure;
/* leave bools at the end to not create holes */
bool enable_hangcheck;
bool fastboot;
@@ -59,6 +60,7 @@ struct i915_params {
bool enable_guc_submission;
bool verbose_state_checks;
bool nuclear_pageflip;
+ bool enable_dp_mst;
};
extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3d13d0e55..720aab9a5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -79,6 +79,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* PCI config space */
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4 * 4096)
+
+#define DEVEN 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+#define BSM 0x5c
+#define BSM_MASK (0xFFFF << 20)
+
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
#define GC_CLOCK_133_200 (0 << 0)
@@ -90,6 +100,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GC_CLOCK_166_266 (6 << 0)
#define GC_CLOCK_166_250 (7 << 0)
+#define I915_GDRST 0xc0 /* PCI config register */
+#define GRDOM_FULL (0 << 2)
+#define GRDOM_RENDER (1 << 2)
+#define GRDOM_MEDIA (3 << 2)
+#define GRDOM_MASK (3 << 2)
+#define GRDOM_RESET_STATUS (1 << 1)
+#define GRDOM_RESET_ENABLE (1 << 0)
+
+#define GCDGMBUS 0xcc
+
#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
@@ -121,18 +141,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
-#define GCDGMBUS 0xcc
-#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
+#define ASLE 0xe4
+#define ASLS 0xfc
+
+#define SWSCI 0xe8
+#define SWSCI_SCISEL (1 << 15)
+#define SWSCI_GSSCIE (1 << 0)
+
+#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
-/* Graphics reset regs */
-#define I915_GDRST 0xc0 /* PCI config register */
-#define GRDOM_FULL (0<<2)
-#define GRDOM_RENDER (1<<2)
-#define GRDOM_MEDIA (3<<2)
-#define GRDOM_MASK (3<<2)
-#define GRDOM_RESET_STATUS (1<<1)
-#define GRDOM_RESET_ENABLE (1<<0)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
#define ILK_GRDOM_FULL (0<<1)
@@ -164,6 +182,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN6_GRDOM_RENDER (1 << 1)
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
+#define GEN6_GRDOM_VECS (1 << 4)
+#define GEN9_GRDOM_GUC (1 << 5)
+#define GEN8_GRDOM_MEDIA2 (1 << 7)
#define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518)
@@ -199,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3)
+#define GEN8_CONFIG0 _MMIO(0xD00)
+#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
+
#define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8)
@@ -586,6 +610,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504)
#define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508)
+/* There are the 16 64-bit CS General Purpose Registers */
+#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8)
+#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4)
+
#define OACONTROL _MMIO(0x2360)
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
@@ -621,6 +649,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IOSF_PORT_GPIO_SC 0x48
#define IOSF_PORT_GPIO_SUS 0xa8
#define IOSF_PORT_CCU 0xa9
+#define CHV_IOSF_PORT_GPIO_N 0x13
+#define CHV_IOSF_PORT_GPIO_SE 0x48
+#define CHV_IOSF_PORT_GPIO_E 0xa8
+#define CHV_IOSF_PORT_GPIO_SW 0xb2
#define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104)
#define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108)
@@ -785,7 +817,9 @@ enum skl_disp_power_wells {
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_CZ_CLOCK_CONTROL 0x62
+#define CCK_GPLL_CLOCK_CONTROL 0x67
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
+#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c
#define CCK_TRUNK_FORCE_ON (1 << 17)
#define CCK_TRUNK_FORCE_OFF (1 << 16)
#define CCK_FREQUENCY_STATUS (0x1f << 8)
@@ -1317,6 +1351,7 @@ enum skl_disp_power_wells {
#define _PORT_CL1CM_DW0_A 0x162000
#define _PORT_CL1CM_DW0_BC 0x6C000
#define PHY_POWER_GOOD (1 << 16)
+#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \
_PORT_CL1CM_DW0_A)
@@ -1361,14 +1396,10 @@ enum skl_disp_power_wells {
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
-/*
- * FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
- * after testing.
- */
-#define GRC_CODE_SHIFT 23
-#define GRC_CODE_MASK (0x1FF << GRC_CODE_SHIFT)
+#define GRC_CODE_SHIFT 24
+#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT)
#define GRC_CODE_FAST_SHIFT 16
-#define GRC_CODE_FAST_MASK (0x7F << GRC_CODE_FAST_SHIFT)
+#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT)
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
@@ -1641,6 +1672,9 @@ enum skl_disp_power_wells {
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
+#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
+#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@@ -1776,6 +1810,22 @@ enum skl_disp_power_wells {
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
+#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+
+/* WaClearTdlStateAckDirtyBits */
+#define GEN8_STATE_ACK _MMIO(0x20F0)
+#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
+#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100)
+#define GEN9_STATE_ACK_TDL0 (1 << 12)
+#define GEN9_STATE_ACK_TDL1 (1 << 13)
+#define GEN9_STATE_ACK_TDL2 (1 << 14)
+#define GEN9_STATE_ACK_TDL3 (1 << 15)
+#define GEN9_SUBSLICE_TDL_ACK_BITS \
+ (GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \
+ GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0)
+
#define GFX_MODE _MMIO(0x2520)
#define GFX_MODE_GEN7 _MMIO(0x229c)
#define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c)
@@ -1795,6 +1845,7 @@ enum skl_disp_power_wells {
#define VLV_DISPLAY_BASE 0x180000
#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE 0x60000
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
@@ -2159,6 +2210,8 @@ enum skl_disp_power_wells {
#define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
+#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
+#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
@@ -2923,6 +2976,15 @@ enum skl_disp_power_wells {
INTERVAL_1_33_US(us)) : \
INTERVAL_1_28_US(us))
+#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
+#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
+#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
+#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
+ (IS_BROXTON(dev_priv) ? \
+ INTERVAL_0_833_TO_US(interval) : \
+ INTERVAL_1_33_TO_US(interval)) : \
+ INTERVAL_1_28_TO_US(interval))
+
/*
* Logical Context regs
*/
@@ -4784,6 +4846,10 @@ enum skl_disp_power_wells {
#define CBR_PND_DEADLINE_DISABLE (1<<31)
#define CBR_PWM_CLOCK_MUX_SELECT (1<<30)
+#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450)
+#define CBR_DPLLBMD_PIPE_C (1<<29)
+#define CBR_DPLLBMD_PIPE_B (1<<18)
+
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
@@ -5977,6 +6043,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14)
+#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -5985,6 +6052,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL _MMIO(0x45000)
+#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
@@ -5998,6 +6066,9 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define MASK_WAKEMEM (1<<13)
+
#define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
@@ -6015,6 +6086,7 @@ enum skl_disp_power_wells {
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
+#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580)
/* GEN7 chicken */
@@ -6022,6 +6094,7 @@ enum skl_disp_power_wells {
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
+# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN _MMIO(0x7018)
@@ -6184,6 +6257,7 @@ enum skl_disp_power_wells {
/* digital port hotplug */
#define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */
#define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */
+#define BXT_DDIA_HPD_INVERT (1 << 27)
#define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */
#define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */
@@ -6199,6 +6273,7 @@ enum skl_disp_power_wells {
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
+#define BXT_DDIC_HPD_INVERT (1 << 11)
#define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */
#define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */
@@ -6209,6 +6284,7 @@ enum skl_disp_power_wells {
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
+#define BXT_DDIB_HPD_INVERT (1 << 3)
#define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */
#define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */
@@ -6218,6 +6294,9 @@ enum skl_disp_power_wells {
#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
+#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \
+ BXT_DDIB_HPD_INVERT | \
+ BXT_DDIC_HPD_INVERT)
#define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */
#define PORTE_HOTPLUG_ENABLE (1 << 4)
@@ -6836,6 +6915,8 @@ enum skl_disp_power_wells {
#define VLV_SPAREG2H _MMIO(0xA194)
#define GTFIFODBG _MMIO(0x120000)
+#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
+#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
#define GT_FIFO_SBDROPERR (1<<6)
#define GT_FIFO_BLOBDROPERR (1<<5)
#define GT_FIFO_SB_READ_ABORTERR (1<<4)
@@ -6852,10 +6933,14 @@ enum skl_disp_power_wells {
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
-#define HSW_EDRAM_PRESENT _MMIO(0x120010)
+#define HSW_EDRAM_CAP _MMIO(0x120010)
#define EDRAM_ENABLED 0x1
+#define EDRAM_NUM_BANKS(cap) (((cap) >> 1) & 0xf)
+#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
+#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
#define GEN6_UCGCTL1 _MMIO(0x9400)
+# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -6872,6 +6957,7 @@ enum skl_disp_power_wells {
#define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
+#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414)
@@ -7114,6 +7200,7 @@ enum skl_disp_power_wells {
#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3)
#define GEN8_ROW_CHICKEN _MMIO(0xe4f0)
+#define FLOW_CONTROL_ENABLE (1<<15)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
@@ -7135,6 +7222,7 @@ enum skl_disp_power_wells {
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
+#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
@@ -7374,9 +7462,11 @@ enum skl_disp_power_wells {
/* SBI offsets */
#define SBI_SSCDIVINTPHASE 0x0200
#define SBI_SSCDIVINTPHASE6 0x0600
-#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
+#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1
+#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f<<1)
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
-#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
+#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8
+#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f<<8)
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
@@ -7386,6 +7476,8 @@ enum skl_disp_power_wells {
#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
+#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4
+#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1<<4)
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
#define SBI_GEN0 0x1f00
@@ -7665,6 +7757,59 @@ enum skl_disp_power_wells {
#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+/* pipe degamma/gamma LUTs on IVB+ */
+#define _PAL_PREC_INDEX_A 0x4A400
+#define _PAL_PREC_INDEX_B 0x4AC00
+#define _PAL_PREC_INDEX_C 0x4B400
+#define PAL_PREC_10_12_BIT (0 << 31)
+#define PAL_PREC_SPLIT_MODE (1 << 31)
+#define PAL_PREC_AUTO_INCREMENT (1 << 15)
+#define _PAL_PREC_DATA_A 0x4A404
+#define _PAL_PREC_DATA_B 0x4AC04
+#define _PAL_PREC_DATA_C 0x4B404
+#define _PAL_PREC_GC_MAX_A 0x4A410
+#define _PAL_PREC_GC_MAX_B 0x4AC10
+#define _PAL_PREC_GC_MAX_C 0x4B410
+#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
+#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
+#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
+
+#define PREC_PAL_INDEX(pipe) _MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B)
+#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
+#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
+#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
+
+/* pipe CSC & degamma/gamma LUTs on CHV */
+#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
+#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
+#define _CGM_PIPE_A_CSC_COEFF45 (VLV_DISPLAY_BASE + 0x67908)
+#define _CGM_PIPE_A_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6790C)
+#define _CGM_PIPE_A_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x67910)
+#define _CGM_PIPE_A_DEGAMMA (VLV_DISPLAY_BASE + 0x66000)
+#define _CGM_PIPE_A_GAMMA (VLV_DISPLAY_BASE + 0x67000)
+#define _CGM_PIPE_A_MODE (VLV_DISPLAY_BASE + 0x67A00)
+#define CGM_PIPE_MODE_GAMMA (1 << 2)
+#define CGM_PIPE_MODE_CSC (1 << 1)
+#define CGM_PIPE_MODE_DEGAMMA (1 << 0)
+
+#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900)
+#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904)
+#define _CGM_PIPE_B_CSC_COEFF45 (VLV_DISPLAY_BASE + 0x69908)
+#define _CGM_PIPE_B_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6990C)
+#define _CGM_PIPE_B_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x69910)
+#define _CGM_PIPE_B_DEGAMMA (VLV_DISPLAY_BASE + 0x68000)
+#define _CGM_PIPE_B_GAMMA (VLV_DISPLAY_BASE + 0x69000)
+#define _CGM_PIPE_B_MODE (VLV_DISPLAY_BASE + 0x69A00)
+
+#define CGM_PIPE_CSC_COEFF01(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF01, _CGM_PIPE_B_CSC_COEFF01)
+#define CGM_PIPE_CSC_COEFF23(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF23, _CGM_PIPE_B_CSC_COEFF23)
+#define CGM_PIPE_CSC_COEFF45(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF45, _CGM_PIPE_B_CSC_COEFF45)
+#define CGM_PIPE_CSC_COEFF67(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF67, _CGM_PIPE_B_CSC_COEFF67)
+#define CGM_PIPE_CSC_COEFF8(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF8, _CGM_PIPE_B_CSC_COEFF8)
+#define CGM_PIPE_DEGAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_DEGAMMA, _CGM_PIPE_B_DEGAMMA) + (i) * 8 + (w) * 4)
+#define CGM_PIPE_GAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_GAMMA, _CGM_PIPE_B_GAMMA) + (i) * 8 + (w) * 4)
+#define CGM_PIPE_MODE(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_MODE, _CGM_PIPE_B_MODE)
+
/* MIPI DSI registers */
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
@@ -7679,58 +7824,62 @@ enum skl_disp_power_wells {
#define BXT_MIPI_DIV_SHIFT(port) \
_MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
BXT_MIPI2_DIV_SHIFT)
-/* Var clock divider to generate TX source. Result must be < 39.5 M */
-#define BXT_MIPI1_ESCLK_VAR_DIV_MASK (0x3F << 26)
-#define BXT_MIPI2_ESCLK_VAR_DIV_MASK (0x3F << 10)
-#define BXT_MIPI_ESCLK_VAR_DIV_MASK(port) \
- _MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \
- BXT_MIPI2_ESCLK_VAR_DIV_MASK)
-
-#define BXT_MIPI_ESCLK_VAR_DIV(port, val) \
- (val << BXT_MIPI_DIV_SHIFT(port))
+
/* TX control divider to select actual TX clock output from (8x/var) */
-#define BXT_MIPI1_TX_ESCLK_SHIFT 21
-#define BXT_MIPI2_TX_ESCLK_SHIFT 5
+#define BXT_MIPI1_TX_ESCLK_SHIFT 26
+#define BXT_MIPI2_TX_ESCLK_SHIFT 10
#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
BXT_MIPI2_TX_ESCLK_SHIFT)
-#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (3 << 21)
-#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (3 << 5)
+#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26)
+#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10)
#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
- BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
-#define BXT_MIPI_TX_ESCLK_8XDIV_BY2(port) \
- (0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-#define BXT_MIPI_TX_ESCLK_8XDIV_BY4(port) \
- (0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-#define BXT_MIPI_TX_ESCLK_8XDIV_BY8(port) \
- (0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port))
-/* RX control divider to select actual RX clock output from 8x*/
-#define BXT_MIPI1_RX_ESCLK_SHIFT 19
-#define BXT_MIPI2_RX_ESCLK_SHIFT 3
-#define BXT_MIPI_RX_ESCLK_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \
- BXT_MIPI2_RX_ESCLK_SHIFT)
-#define BXT_MIPI1_RX_ESCLK_FIXDIV_MASK (3 << 19)
-#define BXT_MIPI2_RX_ESCLK_FIXDIV_MASK (3 << 3)
-#define BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port) \
- (3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define BXT_MIPI_RX_ESCLK_8X_BY2(port) \
- (1 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define BXT_MIPI_RX_ESCLK_8X_BY3(port) \
- (2 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-#define BXT_MIPI_RX_ESCLK_8X_BY4(port) \
- (3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
-/* BXT-A WA: Always prog DPHY dividers to 00 */
-#define BXT_MIPI1_DPHY_DIV_SHIFT 16
-#define BXT_MIPI2_DPHY_DIV_SHIFT 0
-#define BXT_MIPI_DPHY_DIV_SHIFT(port) \
- _MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \
- BXT_MIPI2_DPHY_DIV_SHIFT)
-#define BXT_MIPI_1_DPHY_DIVIDER_MASK (3 << 16)
-#define BXT_MIPI_2_DPHY_DIVIDER_MASK (3 << 0)
-#define BXT_MIPI_DPHY_DIVIDER_MASK(port) \
- (3 << BXT_MIPI_DPHY_DIV_SHIFT(port))
+ BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
+#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \
+ ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port))
+/* RX upper control divider to select actual RX clock output from 8x */
+#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21
+#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5
+#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \
+ BXT_MIPI2_RX_ESCLK_UPPER_SHIFT)
+#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21)
+#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5)
+#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \
+ BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK)
+#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \
+ ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port))
+/* 8/3X divider to select the actual 8/3X clock output from 8x */
+#define BXT_MIPI1_8X_BY3_SHIFT 19
+#define BXT_MIPI2_8X_BY3_SHIFT 3
+#define BXT_MIPI_8X_BY3_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \
+ BXT_MIPI2_8X_BY3_SHIFT)
+#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19)
+#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3)
+#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \
+ BXT_MIPI2_8X_BY3_DIVIDER_MASK)
+#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \
+ ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port))
+/* RX lower control divider to select actual RX clock output from 8x */
+#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16
+#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0
+#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \
+ BXT_MIPI2_RX_ESCLK_LOWER_SHIFT)
+#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16)
+#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0)
+#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \
+ _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \
+ BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK)
+#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \
+ ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port))
+
+#define RX_DIVIDER_BIT_1_2 0x3
+#define RX_DIVIDER_BIT_3_4 0xC
/* BXT MIPI mode configure */
#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
@@ -7755,9 +7904,11 @@ enum skl_disp_power_wells {
#define BXT_DSIC_16X_BY2 (1 << 10)
#define BXT_DSIC_16X_BY3 (2 << 10)
#define BXT_DSIC_16X_BY4 (3 << 10)
+#define BXT_DSIC_16X_MASK (3 << 10)
#define BXT_DSIA_16X_BY2 (1 << 8)
#define BXT_DSIA_16X_BY3 (2 << 8)
#define BXT_DSIA_16X_BY4 (3 << 8)
+#define BXT_DSIA_16X_MASK (3 << 8)
#define BXT_DSI_FREQ_SEL_SHIFT 8
#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
@@ -7892,8 +8043,8 @@ enum skl_disp_power_wells {
#define VID_MODE_FORMAT_MASK (0xf << 7)
#define VID_MODE_NOT_SUPPORTED (0 << 7)
#define VID_MODE_FORMAT_RGB565 (1 << 7)
-#define VID_MODE_FORMAT_RGB666 (2 << 7)
-#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7)
+#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7)
+#define VID_MODE_FORMAT_RGB666 (3 << 7)
#define VID_MODE_FORMAT_RGB888 (4 << 7)
#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5
#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5)
@@ -8149,6 +8300,7 @@ enum skl_disp_power_wells {
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
+#define BXT_PIPE_SELECT_SHIFT 7
#define BXT_PIPE_SELECT_MASK (7 << 7)
#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index c6188dddb..2d576b7ff 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -370,6 +370,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+ intel_runtime_pm_get(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
val = intel_freq_opcode(dev_priv, val);
@@ -378,6 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val > dev_priv->rps.max_freq ||
val < dev_priv->rps.min_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
@@ -398,6 +401,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
+
return count;
}
@@ -433,6 +438,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+ intel_runtime_pm_get(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
val = intel_freq_opcode(dev_priv, val);
@@ -441,6 +448,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val > dev_priv->rps.max_freq ||
val > dev_priv->rps.max_freq_softlimit) {
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
@@ -457,6 +465,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->rps.hw_lock);
+ intel_runtime_pm_put(dev_priv);
+
return count;
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fa09e5581..dc0def210 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
TP_fast_assign(
__entry->dev = from->dev->primary->index;
__entry->sync_from = from->id;
- __entry->sync_to = to_req->ring->id;
+ __entry->sync_to = to_req->engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
@@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
),
TP_fast_assign(
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(req);
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
+ struct intel_engine_cs *engine =
+ i915_gem_request_get_engine(req);
+ __entry->dev = engine->dev->primary->index;
+ __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->flags = flags;
- i915_trace_irq_get(ring, req);
+ i915_trace_irq_get(engine, req);
),
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush,
),
TP_fast_assign(
- __entry->dev = req->ring->dev->primary->index;
- __entry->ring = req->ring->id;
+ __entry->dev = req->engine->dev->primary->index;
+ __entry->ring = req->engine->id;
__entry->invalidate = invalidate;
__entry->flush = flush;
),
@@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request,
),
TP_fast_assign(
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(req);
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
+ struct intel_engine_cs *engine =
+ i915_gem_request_get_engine(req);
+ __entry->dev = engine->dev->primary->index;
+ __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
),
@@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
);
TRACE_EVENT(i915_gem_request_notify,
- TP_PROTO(struct intel_engine_cs *ring),
- TP_ARGS(ring),
+ TP_PROTO(struct intel_engine_cs *engine),
+ TP_ARGS(engine),
TP_STRUCT__entry(
__field(u32, dev)
@@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify,
),
TP_fast_assign(
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
- __entry->seqno = ring->get_seqno(ring, false);
+ __entry->dev = engine->dev->primary->index;
+ __entry->ring = engine->id;
+ __entry->seqno = engine->get_seqno(engine);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
* less desirable.
*/
TP_fast_assign(
- struct intel_engine_cs *ring =
- i915_gem_request_get_ring(req);
- __entry->dev = ring->dev->primary->index;
- __entry->ring = ring->id;
+ struct intel_engine_cs *engine =
+ i915_gem_request_get_engine(req);
+ __entry->dev = engine->dev->primary->index;
+ __entry->ring = engine->id;
__entry->seqno = i915_gem_request_get_seqno(req);
__entry->blocking =
- mutex_is_locked(&ring->dev->struct_mutex);
+ mutex_is_locked(&engine->dev->struct_mutex);
),
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free,
* called only if full ppgtt is enabled.
*/
TRACE_EVENT(switch_mm,
- TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
+ TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to),
- TP_ARGS(ring, to),
+ TP_ARGS(engine, to),
TP_STRUCT__entry(
__field(u32, ring)
@@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm,
),
TP_fast_assign(
- __entry->ring = ring->id;
+ __entry->ring = engine->id;
__entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
- __entry->dev = ring->dev->primary->index;
+ __entry->dev = engine->dev->primary->index;
),
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dea7429be..d02efb8ca 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm,
int intel_vgt_balloon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
- unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
@@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev)
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
- if (mappable_base < ggtt_vm->start ||
- mappable_end > dev_priv->gtt.mappable_end ||
- unmappable_base < dev_priv->gtt.mappable_end ||
- unmappable_end > ggtt_vm_end) {
+ if (mappable_base < ggtt->base.start ||
+ mappable_end > ggtt->mappable_end ||
+ unmappable_base < ggtt->mappable_end ||
+ unmappable_end > ggtt_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL;
}
/* Unmappable graphic memory ballooning */
- if (unmappable_base > dev_priv->gtt.mappable_end) {
- ret = vgt_balloon_space(&ggtt_vm->mm,
+ if (unmappable_base > ggtt->mappable_end) {
+ ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[2],
- dev_priv->gtt.mappable_end,
+ ggtt->mappable_end,
unmappable_base);
if (ret)
@@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev)
* No need to partition out the last physical page,
* because it is reserved to the guard page.
*/
- if (unmappable_end < ggtt_vm_end - PAGE_SIZE) {
- ret = vgt_balloon_space(&ggtt_vm->mm,
+ if (unmappable_end < ggtt_end - PAGE_SIZE) {
+ ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[3],
unmappable_end,
- ggtt_vm_end - PAGE_SIZE);
+ ggtt_end - PAGE_SIZE);
if (ret)
goto err;
}
/* Mappable graphic memory ballooning */
- if (mappable_base > ggtt_vm->start) {
- ret = vgt_balloon_space(&ggtt_vm->mm,
+ if (mappable_base > ggtt->base.start) {
+ ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[0],
- ggtt_vm->start, mappable_base);
+ ggtt->base.start, mappable_base);
if (ret)
goto err;
}
- if (mappable_end < dev_priv->gtt.mappable_end) {
- ret = vgt_balloon_space(&ggtt_vm->mm,
+ if (mappable_end < ggtt->mappable_end) {
+ ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[1],
mappable_end,
- dev_priv->gtt.mappable_end);
+ ggtt->mappable_end);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index e7c1686e4..50ff90aea 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -99,6 +99,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
+ crtc_state->wm.need_postvbl_update = false;
+ crtc_state->fb_bits = 0;
return &crtc_state->base;
}
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 7d281b400..02a7527ce 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -372,7 +372,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
if (WARN_ON(port == PORT_A))
return;
- if (HAS_PCH_IBX(dev_priv->dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -561,23 +561,21 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
}
/**
- * intel_init_audio - Set up chip specific audio functions
- * @dev: drm device
+ * intel_init_audio_hooks - Set up chip specific audio hooks
+ * @dev_priv: device private
*/
-void intel_init_audio(struct drm_device *dev)
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_G4X(dev)) {
+ if (IS_G4X(dev_priv)) {
dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
- } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
+ } else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) {
dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index bf62a19c8..b9022fa05 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -29,7 +29,9 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "intel_bios.h"
+
+#define _INTEL_BIOS_PRIVATE
+#include "intel_vbt_defs.h"
/**
* DOC: Video BIOS Table (VBT)
@@ -56,8 +58,6 @@
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
-static int panel_type;
-
/* Get BDB block size given a pointer to Block ID. */
static u32 _get_blocksize(const u8 *block_base)
{
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
+ panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
+ dvo_timing->himage_lo;
+ panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
+ dvo_timing->vimage_lo;
+
/* Some VBTs have bogus h/vtotal values */
if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -203,17 +208,32 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
+ int panel_type;
int drrs_mode;
+ int ret;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
- if (lvds_options->panel_type == 0xff)
- return;
- panel_type = lvds_options->panel_type;
+ ret = intel_opregion_get_panel_type(dev_priv->dev);
+ if (ret >= 0) {
+ WARN_ON(ret > 0xf);
+ panel_type = ret;
+ DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+ } else {
+ if (lvds_options->panel_type > 0xf) {
+ DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
+ lvds_options->panel_type);
+ return;
+ }
+ panel_type = lvds_options->panel_type;
+ DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+ }
+
+ dev_priv->vbt.panel_type = panel_type;
drrs_mode = (lvds_options->dps_panel_type_bits
>> (panel_type * 2)) & MODE_MASK;
@@ -249,7 +269,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
- lvds_options->panel_type);
+ panel_type);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
@@ -264,7 +284,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
- lvds_options->panel_type);
+ panel_type);
if (fp_timing) {
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
@@ -282,6 +302,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
+ int panel_type = dev_priv->vbt.panel_type;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
@@ -480,7 +501,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
- p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]);
+ p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
if (!p_mapping->initialized) {
p_mapping->dvo_port = child->dvo_port;
p_mapping->slave_addr = child->slave_addr;
@@ -525,10 +546,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
return;
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
- dev_priv->vbt.edp_support = 1;
-
- if (driver->dual_frequency)
- dev_priv->render_reclock_avail = true;
+ dev_priv->vbt.edp.support = 1;
DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
/*
@@ -547,23 +565,24 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_link_params *edp_link_params;
+ int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
if (!edp) {
- if (dev_priv->vbt.edp_support)
+ if (dev_priv->vbt.edp.support)
DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
return;
}
switch ((edp->color_depth >> (panel_type * 2)) & 3) {
case EDP_18BPP:
- dev_priv->vbt.edp_bpp = 18;
+ dev_priv->vbt.edp.bpp = 18;
break;
case EDP_24BPP:
- dev_priv->vbt.edp_bpp = 24;
+ dev_priv->vbt.edp.bpp = 24;
break;
case EDP_30BPP:
- dev_priv->vbt.edp_bpp = 30;
+ dev_priv->vbt.edp.bpp = 30;
break;
}
@@ -571,14 +590,14 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->link_params[panel_type];
- dev_priv->vbt.edp_pps = *edp_pps;
+ dev_priv->vbt.edp.pps = *edp_pps;
switch (edp_link_params->rate) {
case EDP_RATE_1_62:
- dev_priv->vbt.edp_rate = DP_LINK_BW_1_62;
+ dev_priv->vbt.edp.rate = DP_LINK_BW_1_62;
break;
case EDP_RATE_2_7:
- dev_priv->vbt.edp_rate = DP_LINK_BW_2_7;
+ dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
@@ -588,13 +607,13 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->lanes) {
case EDP_LANE_1:
- dev_priv->vbt.edp_lanes = 1;
+ dev_priv->vbt.edp.lanes = 1;
break;
case EDP_LANE_2:
- dev_priv->vbt.edp_lanes = 2;
+ dev_priv->vbt.edp.lanes = 2;
break;
case EDP_LANE_4:
- dev_priv->vbt.edp_lanes = 4;
+ dev_priv->vbt.edp.lanes = 4;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
@@ -604,16 +623,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->preemphasis) {
case EDP_PREEMPHASIS_NONE:
- dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
+ dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
break;
case EDP_PREEMPHASIS_3_5dB:
- dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
+ dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
break;
case EDP_PREEMPHASIS_6dB:
- dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
+ dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
break;
case EDP_PREEMPHASIS_9_5dB:
- dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
+ dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
@@ -623,16 +642,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
switch (edp_link_params->vswing) {
case EDP_VSWING_0_4V:
- dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
break;
case EDP_VSWING_0_6V:
- dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
break;
case EDP_VSWING_0_8V:
- dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
break;
case EDP_VSWING_1_2V:
- dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
@@ -645,10 +664,10 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* Don't read from VBT if module parameter has valid value*/
if (i915.edp_vswing) {
- dev_priv->edp_low_vswing = i915.edp_vswing == 1;
+ dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
- dev_priv->edp_low_vswing = vswing == 0;
+ dev_priv->vbt.edp.low_vswing = vswing == 0;
}
}
}
@@ -658,6 +677,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
+ int panel_type = dev_priv->vbt.panel_type;
psr = find_section(bdb, BDB_PSR);
if (!psr) {
@@ -704,9 +724,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_mipi_config *start;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
+ int panel_type = dev_priv->vbt.panel_type;
/* parse MIPI blocks only if LFP type is MIPI */
- if (!dev_priv->vbt.has_mipi)
+ if (!intel_bios_is_dsi_present(dev_priv, NULL))
return;
/* Initialize this to undefined indicating no generic MIPI support */
@@ -911,6 +932,7 @@ static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
+ int panel_type = dev_priv->vbt.panel_type;
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;
@@ -1124,7 +1146,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
/* Parse the I_boost config for SKL and above */
- if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) {
+ if (bdb->version >= 196 && child->common.iboost) {
info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level);
@@ -1170,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
}
if (bdb->version < 106) {
expected_size = 22;
- } else if (bdb->version < 109) {
+ } else if (bdb->version < 111) {
expected_size = 27;
} else if (bdb->version < 195) {
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
@@ -1232,14 +1254,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
continue;
}
- if (p_child->common.dvo_port >= DVO_PORT_MIPIA
- && p_child->common.dvo_port <= DVO_PORT_MIPID
- &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) {
- DRM_DEBUG_KMS("Found MIPI as LFP\n");
- dev_priv->vbt.has_mipi = 1;
- dev_priv->vbt.dsi.port = p_child->common.dvo_port;
- }
-
child_dev_ptr = dev_priv->vbt.child_dev + count;
count++;
@@ -1250,6 +1264,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
*/
memcpy(child_dev_ptr, p_child,
min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
+
+ /*
+ * copied full block, now init values when they are not
+ * available in current version
+ */
+ if (bdb->version < 196) {
+ /* Set default values for bits added from v196 */
+ child_dev_ptr->common.iboost = 0;
+ child_dev_ptr->common.hpd_invert = 0;
+ }
+
+ if (bdb->version < 192)
+ child_dev_ptr->common.lspcon = 0;
}
return;
}
@@ -1431,3 +1458,285 @@ intel_bios_init(struct drm_i915_private *dev_priv)
return 0;
}
+
+/**
+ * intel_bios_is_tv_present - is integrated TV present in VBT
+ * @dev_priv: i915 device instance
+ *
+ * Return true if TV is present. If no child devices were parsed from VBT,
+ * assume TV is present.
+ */
+bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
+{
+ union child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->vbt.int_tv_support)
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
+ /*
+ * If the device type is not TV, continue.
+ */
+ switch (p_child->old.device_type) {
+ case DEVICE_TYPE_INT_TV:
+ case DEVICE_TYPE_TV:
+ case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
+ break;
+ default:
+ continue;
+ }
+ /* Only when the addin_offset is non-zero, it is regarded
+ * as present.
+ */
+ if (p_child->old.addin_offset)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_lvds_present - is LVDS present in VBT
+ * @dev_priv: i915 device instance
+ * @i2c_pin: i2c pin for LVDS if present
+ *
+ * Return true if LVDS is present. If no child devices were parsed from VBT,
+ * assume LVDS is present.
+ */
+bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
+{
+ int i;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return true;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+ struct old_child_dev_config *child = &uchild->old;
+
+ /* If the device type is not LFP, continue.
+ * We have to check both the new identifiers as well as the
+ * old for compatibility with some BIOSes.
+ */
+ if (child->device_type != DEVICE_TYPE_INT_LFP &&
+ child->device_type != DEVICE_TYPE_LFP)
+ continue;
+
+ if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
+ *i2c_pin = child->i2c_pin;
+
+ /* However, we cannot trust the BIOS writers to populate
+ * the VBT correctly. Since LVDS requires additional
+ * information from AIM blocks, a non-zero addin offset is
+ * a good indicator that the LVDS is actually present.
+ */
+ if (child->addin_offset)
+ return true;
+
+ /* But even then some BIOS writers perform some black magic
+ * and instantiate the device without reference to any
+ * additional data. Trust that if the VBT was written into
+ * the OpRegion then they have validated the LVDS's existence.
+ */
+ if (dev_priv->opregion.vbt)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ /* FIXME maybe deal with port A as well? */
+ if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_port_edp - is the device in given port eDP
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if the device in %port is eDP.
+ */
+bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
+{
+ union child_device_config *p_child;
+ static const short port_mapping[] = {
+ [PORT_B] = DVO_PORT_DPB,
+ [PORT_C] = DVO_PORT_DPC,
+ [PORT_D] = DVO_PORT_DPD,
+ [PORT_E] = DVO_PORT_DPE,
+ };
+ int i;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
+
+ if (p_child->common.dvo_port == port_mapping[port] &&
+ (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+ (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
+ return true;
+ }
+
+ return false;
+}
+
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ /*
+ * Buggy VBTs may declare DP ports as having
+ * HDMI type dvo_port :( So let's check both.
+ */
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_dsi_present - is DSI present in VBT
+ * @dev_priv: i915 device instance
+ * @port: port for DSI if present
+ *
+ * Return true if DSI is present, and return the port in %port.
+ */
+bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
+ enum port *port)
+{
+ union child_device_config *p_child;
+ u8 dvo_port;
+ int i;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ p_child = dev_priv->vbt.child_dev + i;
+
+ if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ continue;
+
+ dvo_port = p_child->common.dvo_port;
+
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ case DVO_PORT_MIPIC:
+ if (port)
+ *port = dvo_port - DVO_PORT_MIPIA;
+ return true;
+ case DVO_PORT_MIPIB:
+ case DVO_PORT_MIPID:
+ DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
+ port_name(dvo_port - DVO_PORT_MIPIA));
+ break;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * intel_bios_is_port_hpd_inverted - is HPD inverted for %port
+ * @dev_priv: i915 device instance
+ * @port: port to check
+ *
+ * Return true if HPD should be inverted for %port.
+ */
+bool
+intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
+ continue;
+
+ switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ case DVO_PORT_DPA:
+ case DVO_PORT_HDMIA:
+ if (port == PORT_A)
+ return true;
+ break;
+ case DVO_PORT_DPB:
+ case DVO_PORT_HDMIB:
+ if (port == PORT_B)
+ return true;
+ break;
+ case DVO_PORT_DPC:
+ case DVO_PORT_HDMIC:
+ if (port == PORT_C)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 350d4e0f7..ab0ea315e 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -1,5 +1,5 @@
/*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,543 +19,16 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- *
- */
-
-#ifndef _INTEL_BIOS_H_
-#define _INTEL_BIOS_H_
-
-/**
- * struct vbt_header - VBT Header structure
- * @signature: VBT signature, always starts with "$VBT"
- * @version: Version of this structure
- * @header_size: Size of this structure
- * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
- * @vbt_checksum: Checksum
- * @reserved0: Reserved
- * @bdb_offset: Offset of &struct bdb_header from beginning of VBT
- * @aim_offset: Offsets of add-in data blocks from beginning of VBT
- */
-struct vbt_header {
- u8 signature[20];
- u16 version;
- u16 header_size;
- u16 vbt_size;
- u8 vbt_checksum;
- u8 reserved0;
- u32 bdb_offset;
- u32 aim_offset[4];
-} __packed;
-
-/**
- * struct bdb_header - BDB Header structure
- * @signature: BDB signature "BIOS_DATA_BLOCK"
- * @version: Version of the data block definitions
- * @header_size: Size of this structure
- * @bdb_size: Size of BDB (BDB Header and data blocks)
- */
-struct bdb_header {
- u8 signature[16];
- u16 version;
- u16 header_size;
- u16 bdb_size;
-} __packed;
-
-/* strictly speaking, this is a "skip" block, but it has interesting info */
-struct vbios_data {
- u8 type; /* 0 == desktop, 1 == mobile */
- u8 relstage;
- u8 chipset;
- u8 lvds_present:1;
- u8 tv_present:1;
- u8 rsvd2:6; /* finish byte */
- u8 rsvd3[4];
- u8 signon[155];
- u8 copyright[61];
- u16 code_segment;
- u8 dos_boot_mode;
- u8 bandwidth_percent;
- u8 rsvd4; /* popup memory size */
- u8 resize_pci_bios;
- u8 rsvd5; /* is crt already on ddc2 */
-} __packed;
-
-/*
- * There are several types of BIOS data blocks (BDBs), each block has
- * an ID and size in the first 3 bytes (ID in first, size in next 2).
- * Known types are listed below.
*/
-#define BDB_GENERAL_FEATURES 1
-#define BDB_GENERAL_DEFINITIONS 2
-#define BDB_OLD_TOGGLE_LIST 3
-#define BDB_MODE_SUPPORT_LIST 4
-#define BDB_GENERIC_MODE_TABLE 5
-#define BDB_EXT_MMIO_REGS 6
-#define BDB_SWF_IO 7
-#define BDB_SWF_MMIO 8
-#define BDB_PSR 9
-#define BDB_MODE_REMOVAL_TABLE 10
-#define BDB_CHILD_DEVICE_TABLE 11
-#define BDB_DRIVER_FEATURES 12
-#define BDB_DRIVER_PERSISTENCE 13
-#define BDB_EXT_TABLE_PTRS 14
-#define BDB_DOT_CLOCK_OVERRIDE 15
-#define BDB_DISPLAY_SELECT 16
-/* 17 rsvd */
-#define BDB_DRIVER_ROTATION 18
-#define BDB_DISPLAY_REMOVE 19
-#define BDB_OEM_CUSTOM 20
-#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
-#define BDB_SDVO_LVDS_OPTIONS 22
-#define BDB_SDVO_PANEL_DTDS 23
-#define BDB_SDVO_LVDS_PNP_IDS 24
-#define BDB_SDVO_LVDS_POWER_SEQ 25
-#define BDB_TV_OPTIONS 26
-#define BDB_EDP 27
-#define BDB_LVDS_OPTIONS 40
-#define BDB_LVDS_LFP_DATA_PTRS 41
-#define BDB_LVDS_LFP_DATA 42
-#define BDB_LVDS_BACKLIGHT 43
-#define BDB_LVDS_POWER 44
-#define BDB_MIPI_CONFIG 52
-#define BDB_MIPI_SEQUENCE 53
-#define BDB_SKIP 254 /* VBIOS private block, ignore */
-
-struct bdb_general_features {
- /* bits 1 */
- u8 panel_fitting:2;
- u8 flexaim:1;
- u8 msg_enable:1;
- u8 clear_screen:3;
- u8 color_flip:1;
-
- /* bits 2 */
- u8 download_ext_vbt:1;
- u8 enable_ssc:1;
- u8 ssc_freq:1;
- u8 enable_lfp_on_override:1;
- u8 disable_ssc_ddt:1;
- u8 rsvd7:1;
- u8 display_clock_mode:1;
- u8 rsvd8:1; /* finish byte */
-
- /* bits 3 */
- u8 disable_smooth_vision:1;
- u8 single_dvi:1;
- u8 rsvd9:1;
- u8 fdi_rx_polarity_inverted:1;
- u8 rsvd10:4; /* finish byte */
-
- /* bits 4 */
- u8 legacy_monitor_detect;
-
- /* bits 5 */
- u8 int_crt_support:1;
- u8 int_tv_support:1;
- u8 int_efp_support:1;
- u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
- u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
- u8 rsvd11:3; /* finish byte */
-} __packed;
-
-/* pre-915 */
-#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
-#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
-#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
-#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
-
-/* Pre 915 */
-#define DEVICE_TYPE_NONE 0x00
-#define DEVICE_TYPE_CRT 0x01
-#define DEVICE_TYPE_TV 0x09
-#define DEVICE_TYPE_EFP 0x12
-#define DEVICE_TYPE_LFP 0x22
-/* On 915+ */
-#define DEVICE_TYPE_CRT_DPMS 0x6001
-#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
-#define DEVICE_TYPE_TV_COMPOSITE 0x0209
-#define DEVICE_TYPE_TV_MACROVISION 0x0289
-#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
-#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
-#define DEVICE_TYPE_TV_SCART 0x0209
-#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
-#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
-#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
-#define DEVICE_TYPE_EFP_DVI_I 0x6053
-#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
-#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
-#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
-#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
-#define DEVICE_TYPE_LFP_PANELLINK 0x5012
-#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
-#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
-#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
-#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
-
-#define DEVICE_CFG_NONE 0x00
-#define DEVICE_CFG_12BIT_DVOB 0x01
-#define DEVICE_CFG_12BIT_DVOC 0x02
-#define DEVICE_CFG_24BIT_DVOBC 0x09
-#define DEVICE_CFG_24BIT_DVOCB 0x0a
-#define DEVICE_CFG_DUAL_DVOB 0x11
-#define DEVICE_CFG_DUAL_DVOC 0x12
-#define DEVICE_CFG_DUAL_DVOBC 0x13
-#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
-#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
-
-#define DEVICE_WIRE_NONE 0x00
-#define DEVICE_WIRE_DVOB 0x01
-#define DEVICE_WIRE_DVOC 0x02
-#define DEVICE_WIRE_DVOBC 0x03
-#define DEVICE_WIRE_DVOBB 0x05
-#define DEVICE_WIRE_DVOCC 0x06
-#define DEVICE_WIRE_DVOB_MASTER 0x0d
-#define DEVICE_WIRE_DVOC_MASTER 0x0e
-
-#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
-#define DEVICE_PORT_DVOB 0x01
-#define DEVICE_PORT_DVOC 0x02
/*
- * We used to keep this struct but without any version control. We should avoid
- * using it in the future, but it should be safe to keep using it in the old
- * code. Do not change; we rely on its size.
+ * Please use intel_vbt_defs.h for VBT private data, to hide and abstract away
+ * the VBT from the rest of the driver. Add the parsed, clean data to struct
+ * intel_vbt_data within struct drm_i915_private.
*/
-struct old_child_dev_config {
- u16 handle;
- u16 device_type;
- u8 device_id[10]; /* ascii string */
- u16 addin_offset;
- u8 dvo_port; /* See Device_PORT_* above */
- u8 i2c_pin;
- u8 slave_addr;
- u8 ddc_pin;
- u16 edid_ptr;
- u8 dvo_cfg; /* See DEVICE_CFG_* above */
- u8 dvo2_port;
- u8 i2c2_pin;
- u8 slave2_addr;
- u8 ddc2_pin;
- u8 capabilities;
- u8 dvo_wiring;/* See DEVICE_WIRE_* above */
- u8 dvo2_wiring;
- u16 extended_type;
- u8 dvo_function;
-} __packed;
-
-/* This one contains field offsets that are known to be common for all BDB
- * versions. Notice that the meaning of the contents contents may still change,
- * but at least the offsets are consistent. */
-
-/* Definitions for flags_1 */
-#define IBOOST_ENABLE (1<<3)
-
-struct common_child_dev_config {
- u16 handle;
- u16 device_type;
- u8 not_common1[12];
- u8 dvo_port;
- u8 not_common2[2];
- u8 ddc_pin;
- u16 edid_ptr;
- u8 obsolete;
- u8 flags_1;
- u8 not_common3[13];
- u8 iboost_level;
-} __packed;
-
-
-/* This field changes depending on the BDB version, so the most reliable way to
- * read it is by checking the BDB version and reading the raw pointer. */
-union child_device_config {
- /* This one is safe to be used anywhere, but the code should still check
- * the BDB version. */
- u8 raw[33];
- /* This one should only be kept for legacy code. */
- struct old_child_dev_config old;
- /* This one should also be safe to use anywhere, even without version
- * checks. */
- struct common_child_dev_config common;
-} __packed;
-
-struct bdb_general_definitions {
- /* DDC GPIO */
- u8 crt_ddc_gmbus_pin;
-
- /* DPMS bits */
- u8 dpms_acpi:1;
- u8 skip_boot_crt_detect:1;
- u8 dpms_aim:1;
- u8 rsvd1:5; /* finish byte */
-
- /* boot device bits */
- u8 boot_display[2];
- u8 child_dev_size;
-
- /*
- * Device info:
- * If TV is present, it'll be at devices[0].
- * LVDS will be next, either devices[0] or [1], if present.
- * On some platforms the number of device is 6. But could be as few as
- * 4 if both TV and LVDS are missing.
- * And the device num is related with the size of general definition
- * block. It is obtained by using the following formula:
- * number = (block_size - sizeof(bdb_general_definitions))/
- * defs->child_dev_size;
- */
- uint8_t devices[0];
-} __packed;
-
-/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
-#define MODE_MASK 0x3
-
-struct bdb_lvds_options {
- u8 panel_type;
- u8 rsvd1;
- /* LVDS capabilities, stored in a dword */
- u8 pfit_mode:2;
- u8 pfit_text_mode_enhanced:1;
- u8 pfit_gfx_mode_enhanced:1;
- u8 pfit_ratio_auto:1;
- u8 pixel_dither:1;
- u8 lvds_edid:1;
- u8 rsvd2:1;
- u8 rsvd4;
- /* LVDS Panel channel bits stored here */
- u32 lvds_panel_channel_bits;
- /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
- u16 ssc_bits;
- u16 ssc_freq;
- u16 ssc_ddt;
- /* Panel color depth defined here */
- u16 panel_color_depth;
- /* LVDS panel type bits stored here */
- u32 dps_panel_type_bits;
- /* LVDS backlight control type bits stored here */
- u32 blt_control_type_bits;
-} __packed;
-
-/* LFP pointer table contains entries to the struct below */
-struct bdb_lvds_lfp_data_ptr {
- u16 fp_timing_offset; /* offsets are from start of bdb */
- u8 fp_table_size;
- u16 dvo_timing_offset;
- u8 dvo_table_size;
- u16 panel_pnp_id_offset;
- u8 pnp_table_size;
-} __packed;
-
-struct bdb_lvds_lfp_data_ptrs {
- u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
- struct bdb_lvds_lfp_data_ptr ptr[16];
-} __packed;
-
-/* LFP data has 3 blocks per entry */
-struct lvds_fp_timing {
- u16 x_res;
- u16 y_res;
- u32 lvds_reg;
- u32 lvds_reg_val;
- u32 pp_on_reg;
- u32 pp_on_reg_val;
- u32 pp_off_reg;
- u32 pp_off_reg_val;
- u32 pp_cycle_reg;
- u32 pp_cycle_reg_val;
- u32 pfit_reg;
- u32 pfit_reg_val;
- u16 terminator;
-} __packed;
-
-struct lvds_dvo_timing {
- u16 clock; /**< In 10khz */
- u8 hactive_lo;
- u8 hblank_lo;
- u8 hblank_hi:4;
- u8 hactive_hi:4;
- u8 vactive_lo;
- u8 vblank_lo;
- u8 vblank_hi:4;
- u8 vactive_hi:4;
- u8 hsync_off_lo;
- u8 hsync_pulse_width;
- u8 vsync_pulse_width:4;
- u8 vsync_off:4;
- u8 rsvd0:6;
- u8 hsync_off_hi:2;
- u8 h_image;
- u8 v_image;
- u8 max_hv;
- u8 h_border;
- u8 v_border;
- u8 rsvd1:3;
- u8 digital:2;
- u8 vsync_positive:1;
- u8 hsync_positive:1;
- u8 rsvd2:1;
-} __packed;
-
-struct lvds_pnp_id {
- u16 mfg_name;
- u16 product_code;
- u32 serial;
- u8 mfg_week;
- u8 mfg_year;
-} __packed;
-
-struct bdb_lvds_lfp_data_entry {
- struct lvds_fp_timing fp_timing;
- struct lvds_dvo_timing dvo_timing;
- struct lvds_pnp_id pnp_id;
-} __packed;
-
-struct bdb_lvds_lfp_data {
- struct bdb_lvds_lfp_data_entry data[16];
-} __packed;
-
-#define BDB_BACKLIGHT_TYPE_NONE 0
-#define BDB_BACKLIGHT_TYPE_PWM 2
-
-struct bdb_lfp_backlight_data_entry {
- u8 type:2;
- u8 active_low_pwm:1;
- u8 obsolete1:5;
- u16 pwm_freq_hz;
- u8 min_brightness;
- u8 obsolete2;
- u8 obsolete3;
-} __packed;
-
-struct bdb_lfp_backlight_data {
- u8 entry_size;
- struct bdb_lfp_backlight_data_entry data[16];
- u8 level[16];
-} __packed;
-
-struct aimdb_header {
- char signature[16];
- char oem_device[20];
- u16 aimdb_version;
- u16 aimdb_header_size;
- u16 aimdb_size;
-} __packed;
-
-struct aimdb_block {
- u8 aimdb_id;
- u16 aimdb_size;
-} __packed;
-struct vch_panel_data {
- u16 fp_timing_offset;
- u8 fp_timing_size;
- u16 dvo_timing_offset;
- u8 dvo_timing_size;
- u16 text_fitting_offset;
- u8 text_fitting_size;
- u16 graphics_fitting_offset;
- u8 graphics_fitting_size;
-} __packed;
-
-struct vch_bdb_22 {
- struct aimdb_block aimdb_block;
- struct vch_panel_data panels[16];
-} __packed;
-
-struct bdb_sdvo_lvds_options {
- u8 panel_backlight;
- u8 h40_set_panel_type;
- u8 panel_type;
- u8 ssc_clk_freq;
- u16 als_low_trip;
- u16 als_high_trip;
- u8 sclalarcoeff_tab_row_num;
- u8 sclalarcoeff_tab_row_size;
- u8 coefficient[8];
- u8 panel_misc_bits_1;
- u8 panel_misc_bits_2;
- u8 panel_misc_bits_3;
- u8 panel_misc_bits_4;
-} __packed;
-
-
-#define BDB_DRIVER_FEATURE_NO_LVDS 0
-#define BDB_DRIVER_FEATURE_INT_LVDS 1
-#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
-#define BDB_DRIVER_FEATURE_EDP 3
-
-struct bdb_driver_features {
- u8 boot_dev_algorithm:1;
- u8 block_display_switch:1;
- u8 allow_display_switch:1;
- u8 hotplug_dvo:1;
- u8 dual_view_zoom:1;
- u8 int15h_hook:1;
- u8 sprite_in_clone:1;
- u8 primary_lfp_id:1;
-
- u16 boot_mode_x;
- u16 boot_mode_y;
- u8 boot_mode_bpp;
- u8 boot_mode_refresh;
-
- u16 enable_lfp_primary:1;
- u16 selective_mode_pruning:1;
- u16 dual_frequency:1;
- u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
- u16 nt_clone_support:1;
- u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
- u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
- u16 cui_aspect_scaling:1;
- u16 preserve_aspect_ratio:1;
- u16 sdvo_device_power_down:1;
- u16 crt_hotplug:1;
- u16 lvds_config:2;
- u16 tv_hotplug:1;
- u16 hdmi_config:2;
-
- u8 static_display:1;
- u8 reserved2:7;
- u16 legacy_crt_max_x;
- u16 legacy_crt_max_y;
- u8 legacy_crt_max_refresh;
-
- u8 hdmi_termination;
- u8 custom_vbt_version;
- /* Driver features data block */
- u16 rmpm_enabled:1;
- u16 s2ddt_enabled:1;
- u16 dpst_enabled:1;
- u16 bltclt_enabled:1;
- u16 adb_enabled:1;
- u16 drrs_enabled:1;
- u16 grs_enabled:1;
- u16 gpmt_enabled:1;
- u16 tbt_enabled:1;
- u16 psr_enabled:1;
- u16 ips_enabled:1;
- u16 reserved3:4;
- u16 pc_feature_valid:1;
-} __packed;
-
-#define EDP_18BPP 0
-#define EDP_24BPP 1
-#define EDP_30BPP 2
-#define EDP_RATE_1_62 0
-#define EDP_RATE_2_7 1
-#define EDP_LANE_1 0
-#define EDP_LANE_2 1
-#define EDP_LANE_4 3
-#define EDP_PREEMPHASIS_NONE 0
-#define EDP_PREEMPHASIS_3_5dB 1
-#define EDP_PREEMPHASIS_6dB 2
-#define EDP_PREEMPHASIS_9_5dB 3
-#define EDP_VSWING_0_4V 0
-#define EDP_VSWING_0_6V 1
-#define EDP_VSWING_0_8V 2
-#define EDP_VSWING_1_2V 3
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
struct edp_power_seq {
u16 t1_t3;
@@ -565,245 +38,37 @@ struct edp_power_seq {
u16 t11_t12;
} __packed;
-struct edp_link_params {
- u8 rate:4;
- u8 lanes:4;
- u8 preemphasis:4;
- u8 vswing:4;
-} __packed;
-
-struct bdb_edp {
- struct edp_power_seq power_seqs[16];
- u32 color_depth;
- struct edp_link_params link_params[16];
- u32 sdrrs_msa_timing_delay;
-
- /* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature;
- u16 edp_t3_optimization;
- u64 edp_vswing_preemph; /* v173 */
-} __packed;
-
-struct psr_table {
- /* Feature bits */
- u8 full_link:1;
- u8 require_aux_to_wakeup:1;
- u8 feature_bits_rsvd:6;
-
- /* Wait times */
- u8 idle_frames:4;
- u8 lines_to_wait:3;
- u8 wait_times_rsvd:1;
-
- /* TP wake up time in multiple of 100 */
- u16 tp1_wakeup_time;
- u16 tp2_tp3_wakeup_time;
-} __packed;
-
-struct bdb_psr {
- struct psr_table psr_table[16];
-} __packed;
-
-/*
- * Driver<->VBIOS interaction occurs through scratch bits in
- * GR18 & SWF*.
- */
-
-/* GR18 bits are set on display switch and hotkey events */
-#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
-#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
-#define GR18_HK_NONE (0x0<<3)
-#define GR18_HK_LFP_STRETCH (0x1<<3)
-#define GR18_HK_TOGGLE_DISP (0x2<<3)
-#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
-#define GR18_HK_POPUP_DISABLED (0x6<<3)
-#define GR18_HK_POPUP_ENABLED (0x7<<3)
-#define GR18_HK_PFIT (0x8<<3)
-#define GR18_HK_APM_CHANGE (0xa<<3)
-#define GR18_HK_MULTIPLE (0xc<<3)
-#define GR18_USER_INT_EN (1<<2)
-#define GR18_A0000_FLUSH_EN (1<<1)
-#define GR18_SMM_EN (1<<0)
-
-/* Set by driver, cleared by VBIOS */
-#define SWF00_YRES_SHIFT 16
-#define SWF00_XRES_SHIFT 0
-#define SWF00_RES_MASK 0xffff
-
-/* Set by VBIOS at boot time and driver at runtime */
-#define SWF01_TV2_FORMAT_SHIFT 8
-#define SWF01_TV1_FORMAT_SHIFT 0
-#define SWF01_TV_FORMAT_MASK 0xffff
-
-#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
-#define SWF10_GTT_OVERRIDE_EN (1<<28)
-#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
-#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
-#define SWF10_OLD_TOGGLE 0x0
-#define SWF10_TOGGLE_LIST_1 0x1
-#define SWF10_TOGGLE_LIST_2 0x2
-#define SWF10_TOGGLE_LIST_3 0x3
-#define SWF10_TOGGLE_LIST_4 0x4
-#define SWF10_PANNING_EN (1<<23)
-#define SWF10_DRIVER_LOADED (1<<22)
-#define SWF10_EXTENDED_DESKTOP (1<<21)
-#define SWF10_EXCLUSIVE_MODE (1<<20)
-#define SWF10_OVERLAY_EN (1<<19)
-#define SWF10_PLANEB_HOLDOFF (1<<18)
-#define SWF10_PLANEA_HOLDOFF (1<<17)
-#define SWF10_VGA_HOLDOFF (1<<16)
-#define SWF10_ACTIVE_DISP_MASK 0xffff
-#define SWF10_PIPEB_LFP2 (1<<15)
-#define SWF10_PIPEB_EFP2 (1<<14)
-#define SWF10_PIPEB_TV2 (1<<13)
-#define SWF10_PIPEB_CRT2 (1<<12)
-#define SWF10_PIPEB_LFP (1<<11)
-#define SWF10_PIPEB_EFP (1<<10)
-#define SWF10_PIPEB_TV (1<<9)
-#define SWF10_PIPEB_CRT (1<<8)
-#define SWF10_PIPEA_LFP2 (1<<7)
-#define SWF10_PIPEA_EFP2 (1<<6)
-#define SWF10_PIPEA_TV2 (1<<5)
-#define SWF10_PIPEA_CRT2 (1<<4)
-#define SWF10_PIPEA_LFP (1<<3)
-#define SWF10_PIPEA_EFP (1<<2)
-#define SWF10_PIPEA_TV (1<<1)
-#define SWF10_PIPEA_CRT (1<<0)
-
-#define SWF11_MEMORY_SIZE_SHIFT 16
-#define SWF11_SV_TEST_EN (1<<15)
-#define SWF11_IS_AGP (1<<14)
-#define SWF11_DISPLAY_HOLDOFF (1<<13)
-#define SWF11_DPMS_REDUCED (1<<12)
-#define SWF11_IS_VBE_MODE (1<<11)
-#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
-#define SWF11_DPMS_MASK 0x07
-#define SWF11_DPMS_OFF (1<<2)
-#define SWF11_DPMS_SUSPEND (1<<1)
-#define SWF11_DPMS_STANDBY (1<<0)
-#define SWF11_DPMS_ON 0
-
-#define SWF14_GFX_PFIT_EN (1<<31)
-#define SWF14_TEXT_PFIT_EN (1<<30)
-#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
-#define SWF14_POPUP_EN (1<<28)
-#define SWF14_DISPLAY_HOLDOFF (1<<27)
-#define SWF14_DISP_DETECT_EN (1<<26)
-#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
-#define SWF14_DRIVER_STATUS (1<<24)
-#define SWF14_OS_TYPE_WIN9X (1<<23)
-#define SWF14_OS_TYPE_WINNT (1<<22)
-/* 21:19 rsvd */
-#define SWF14_PM_TYPE_MASK 0x00070000
-#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
-#define SWF14_PM_ACPI (0x3 << 16)
-#define SWF14_PM_APM_12 (0x2 << 16)
-#define SWF14_PM_APM_11 (0x1 << 16)
-#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
- /* if GR18 indicates a display switch */
-#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
-#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
-#define SWF14_DS_PIPEB_TV2_EN (1<<13)
-#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
-#define SWF14_DS_PIPEB_LFP_EN (1<<11)
-#define SWF14_DS_PIPEB_EFP_EN (1<<10)
-#define SWF14_DS_PIPEB_TV_EN (1<<9)
-#define SWF14_DS_PIPEB_CRT_EN (1<<8)
-#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
-#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
-#define SWF14_DS_PIPEA_TV2_EN (1<<5)
-#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
-#define SWF14_DS_PIPEA_LFP_EN (1<<3)
-#define SWF14_DS_PIPEA_EFP_EN (1<<2)
-#define SWF14_DS_PIPEA_TV_EN (1<<1)
-#define SWF14_DS_PIPEA_CRT_EN (1<<0)
- /* if GR18 indicates a panel fitting request */
-#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
- /* if GR18 indicates an APM change request */
-#define SWF14_APM_HIBERNATE 0x4
-#define SWF14_APM_SUSPEND 0x3
-#define SWF14_APM_STANDBY 0x1
-#define SWF14_APM_RESTORE 0x0
-
-/* Add the device class for LFP, TV, HDMI */
-#define DEVICE_TYPE_INT_LFP 0x1022
-#define DEVICE_TYPE_INT_TV 0x1009
-#define DEVICE_TYPE_HDMI 0x60D2
-#define DEVICE_TYPE_DP 0x68C6
-#define DEVICE_TYPE_eDP 0x78C6
-
-#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
-#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
-#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
-#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
-#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
-#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
-#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
-#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
-#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
-#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
-#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
-#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
-#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
-#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
-#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
-
-/*
- * Bits we care about when checking for DEVICE_TYPE_eDP
- * Depending on the system, the other bits may or may not
- * be set for eDP outputs.
- */
-#define DEVICE_TYPE_eDP_BITS \
- (DEVICE_TYPE_INTERNAL_CONNECTOR | \
- DEVICE_TYPE_MIPI_OUTPUT | \
- DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_DUAL_CHANNEL | \
- DEVICE_TYPE_LVDS_SINGALING | \
- DEVICE_TYPE_TMDS_DVI_SIGNALING | \
- DEVICE_TYPE_VIDEO_SIGNALING | \
- DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
- DEVICE_TYPE_ANALOG_OUTPUT)
-
-/* define the DVO port for HDMI output type */
-#define DVO_B 1
-#define DVO_C 2
-#define DVO_D 3
-
-/* Possible values for the "DVO Port" field for versions >= 155: */
-#define DVO_PORT_HDMIA 0
-#define DVO_PORT_HDMIB 1
-#define DVO_PORT_HDMIC 2
-#define DVO_PORT_HDMID 3
-#define DVO_PORT_LVDS 4
-#define DVO_PORT_TV 5
-#define DVO_PORT_CRT 6
-#define DVO_PORT_DPB 7
-#define DVO_PORT_DPC 8
-#define DVO_PORT_DPD 9
-#define DVO_PORT_DPA 10
-#define DVO_PORT_DPE 11
-#define DVO_PORT_HDMIE 12
-#define DVO_PORT_MIPIA 21
-#define DVO_PORT_MIPIB 22
-#define DVO_PORT_MIPIC 23
-#define DVO_PORT_MIPID 24
+/* MIPI Sequence Block definitions */
+enum mipi_seq {
+ MIPI_SEQ_END = 0,
+ MIPI_SEQ_ASSERT_RESET,
+ MIPI_SEQ_INIT_OTP,
+ MIPI_SEQ_DISPLAY_ON,
+ MIPI_SEQ_DISPLAY_OFF,
+ MIPI_SEQ_DEASSERT_RESET,
+ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
+ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
+ MIPI_SEQ_POWER_ON, /* sequence block v3+ */
+ MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
+ MIPI_SEQ_MAX
+};
-/* Block 52 contains MIPI Panel info
- * 6 such enteries will there. Index into correct
- * entery is based on the panel_index in #40 LFP
- */
-#define MAX_MIPI_CONFIGURATIONS 6
+enum mipi_seq_element {
+ MIPI_SEQ_ELEM_END = 0,
+ MIPI_SEQ_ELEM_SEND_PKT,
+ MIPI_SEQ_ELEM_DELAY,
+ MIPI_SEQ_ELEM_GPIO,
+ MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
+ MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_MAX
+};
#define MIPI_DSI_UNDEFINED_PANEL_ID 0
#define MIPI_DSI_GENERIC_PANEL_ID 1
-/*
- * PMIC vs SoC Backlight support specified in pwm_blc
- * field in mipi_config block below.
-*/
-#define PPS_BLC_PMIC 0
-#define PPS_BLC_SOC 1
-
struct mipi_config {
u16 panel_id;
@@ -821,6 +86,8 @@ struct mipi_config {
u32 video_transfer_mode:2;
u32 cabc_supported:1;
+#define PPS_BLC_PMIC 0
+#define PPS_BLC_SOC 1
u32 pwm_blc:1;
/* Bit 13:10 */
@@ -924,12 +191,7 @@ struct mipi_config {
} __packed;
-/* Block 52 contains MIPI configuration block
- * 6 * bdb_mipi_config, followed by 6 pps data
- * block below
- *
- * all delays has a unit of 100us
- */
+/* all delays have a unit of 100us */
struct mipi_pps_data {
u16 panel_on_delay;
u16 bl_enable_delay;
@@ -938,57 +200,4 @@ struct mipi_pps_data {
u16 panel_power_cycle_delay;
} __packed;
-struct bdb_mipi_config {
- struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
- struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
-} __packed;
-
-/* Block 53 contains MIPI sequences as needed by the panel
- * for enabling it. This block can be variable in size and
- * can be maximum of 6 blocks
- */
-struct bdb_mipi_sequence {
- u8 version;
- u8 data[0];
-} __packed;
-
-/* MIPI Sequnece Block definitions */
-enum mipi_seq {
- MIPI_SEQ_END = 0,
- MIPI_SEQ_ASSERT_RESET,
- MIPI_SEQ_INIT_OTP,
- MIPI_SEQ_DISPLAY_ON,
- MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_DEASSERT_RESET,
- MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
- MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
- MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
- MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
- MIPI_SEQ_POWER_ON, /* sequence block v3+ */
- MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
- MIPI_SEQ_MAX
-};
-
-enum mipi_seq_element {
- MIPI_SEQ_ELEM_END = 0,
- MIPI_SEQ_ELEM_SEND_PKT,
- MIPI_SEQ_ELEM_DELAY,
- MIPI_SEQ_ELEM_GPIO,
- MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
- MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
- MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
- MIPI_SEQ_ELEM_MAX
-};
-
-enum mipi_gpio_pin_index {
- MIPI_GPIO_UNDEFINED = 0,
- MIPI_GPIO_PANEL_ENABLE,
- MIPI_GPIO_BL_ENABLE,
- MIPI_GPIO_PWM_ENABLE,
- MIPI_GPIO_RESET_N,
- MIPI_GPIO_PWR_DOWN_R,
- MIPI_GPIO_STDBY_RST_N,
- MIPI_GPIO_MAX
-};
-
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
new file mode 100644
index 000000000..1b3f97449
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -0,0 +1,553 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "intel_drv.h"
+
+#define CTM_COEFF_SIGN (1ULL << 63)
+
+#define CTM_COEFF_1_0 (1ULL << 32)
+#define CTM_COEFF_2_0 (CTM_COEFF_1_0 << 1)
+#define CTM_COEFF_4_0 (CTM_COEFF_2_0 << 1)
+#define CTM_COEFF_8_0 (CTM_COEFF_4_0 << 1)
+#define CTM_COEFF_0_5 (CTM_COEFF_1_0 >> 1)
+#define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1)
+#define CTM_COEFF_0_125 (CTM_COEFF_0_25 >> 1)
+
+#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255)
+
+#define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0)
+#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
+
+#define LEGACY_LUT_LENGTH (sizeof(struct drm_color_lut) * 256)
+
+/*
+ * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
+ * format). This macro takes the coefficient we want transformed and the
+ * number of fractional bits.
+ *
+ * We only have a 9 bits precision window which slides depending on the value
+ * of the CTM coefficient and we write the value from bit 3. We also round the
+ * value.
+ */
+#define I9XX_CSC_COEFF_FP(coeff, fbits) \
+ (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
+
+#define I9XX_CSC_COEFF_LIMITED_RANGE \
+ I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
+#define I9XX_CSC_COEFF_1_0 \
+ ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+
+static bool crtc_state_is_legacy(struct drm_crtc_state *state)
+{
+ return !state->degamma_lut &&
+ !state->ctm &&
+ state->gamma_lut &&
+ state->gamma_lut->length == LEGACY_LUT_LENGTH;
+}
+
+/*
+ * When using limited range, multiply the matrix given by userspace by
+ * the matrix that we would use for the limited range. We do the
+ * multiplication in U2.30 format.
+ */
+static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
+{
+ int i;
+
+ for (i = 0; i < 9; i++)
+ result[i] = 0;
+
+ for (i = 0; i < 3; i++) {
+ int64_t user_coeff = input[i * 3 + i];
+ uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2;
+ uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff),
+ 0,
+ CTM_COEFF_4_0 - 1) >> 2;
+
+ result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27;
+ if (CTM_COEFF_NEGATIVE(user_coeff))
+ result[i * 3 + i] |= CTM_COEFF_SIGN;
+ }
+}
+
+/* Set up the pipe CSC unit. */
+static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int i, pipe = intel_crtc->pipe;
+ uint16_t coeffs[9] = { 0, };
+
+ if (crtc_state->ctm) {
+ struct drm_color_ctm *ctm =
+ (struct drm_color_ctm *)crtc_state->ctm->data;
+ uint64_t input[9] = { 0, };
+
+ if (intel_crtc->config->limited_color_range) {
+ ctm_mult_by_limited(input, ctm->matrix);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(input); i++)
+ input[i] = ctm->matrix[i];
+ }
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i];
+
+ /*
+ * Clamp input value to min/max supported by
+ * hardware.
+ */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[i]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ I9XX_CSC_COEFF_FP(abs_coeff, 7);
+ }
+ } else {
+ /*
+ * Load an identity matrix if no coefficients are provided.
+ *
+ * TODO: Check what kind of values actually come out of the
+ * pipe with these coeff/postoff values and adjust to get the
+ * best accuracy. Perhaps we even need to take the bpc value
+ * into consideration.
+ */
+ for (i = 0; i < 3; i++) {
+ if (intel_crtc->config->limited_color_range)
+ coeffs[i * 3 + i] =
+ I9XX_CSC_COEFF_LIMITED_RANGE;
+ else
+ coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0;
+ }
+ }
+
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
+
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
+
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
+
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+
+ if (INTEL_INFO(dev)->gen > 6) {
+ uint16_t postoff = 0;
+
+ if (intel_crtc->config->limited_color_range)
+ postoff = (16 * (1 << 12) / 255) & 0x1fff;
+
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ } else {
+ uint32_t mode = CSC_MODE_YUV_TO_RGB;
+
+ if (intel_crtc->config->limited_color_range)
+ mode |= CSC_BLACK_SCREEN_OFFSET;
+
+ I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ }
+}
+
+/*
+ * Set up the pipe CSC unit on CherryView.
+ */
+static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe = to_intel_crtc(crtc)->pipe;
+ uint32_t mode;
+
+ if (state->ctm) {
+ struct drm_color_ctm *ctm =
+ (struct drm_color_ctm *) state->ctm->data;
+ uint16_t coeffs[9] = { 0, };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ uint64_t abs_coeff =
+ ((1ULL << 63) - 1) & ctm->matrix[i];
+
+ /* Round coefficient. */
+ abs_coeff += 1 << (32 - 13);
+ /* Clamp to hardware limits. */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+
+ /* Write coefficients in S3.12 format. */
+ if (ctm->matrix[i] & (1ULL << 63))
+ coeffs[i] = 1 << 15;
+ coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+ coeffs[i] |= (abs_coeff >> 20) & 0xfff;
+ }
+
+ I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+ }
+
+ mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
+ if (!crtc_state_is_legacy(state)) {
+ mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+ (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
+ }
+ I915_WRITE(CGM_PIPE_MODE(pipe), mode);
+}
+
+void intel_color_set_csc(struct drm_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc_state->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.load_csc_matrix)
+ dev_priv->display.load_csc_matrix(crtc_state);
+}
+
+/* Loads the legacy palette/gamma unit for the CRTC. */
+static void i9xx_load_luts_internal(struct drm_crtc *crtc,
+ struct drm_property_blob *blob)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ int i;
+
+ if (HAS_GMCH_DISPLAY(dev)) {
+ if (intel_crtc->config->has_dsi_encoder)
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, pipe);
+ }
+
+ if (blob) {
+ struct drm_color_lut *lut = (struct drm_color_lut *) blob->data;
+ for (i = 0; i < 256; i++) {
+ uint32_t word =
+ (drm_color_lut_extract(lut[i].red, 8) << 16) |
+ (drm_color_lut_extract(lut[i].green, 8) << 8) |
+ drm_color_lut_extract(lut[i].blue, 8);
+
+ if (HAS_GMCH_DISPLAY(dev))
+ I915_WRITE(PALETTE(pipe, i), word);
+ else
+ I915_WRITE(LGC_PALETTE(pipe, i), word);
+ }
+ } else {
+ for (i = 0; i < 256; i++) {
+ uint32_t word = (i << 16) | (i << 8) | i;
+
+ if (HAS_GMCH_DISPLAY(dev))
+ I915_WRITE(PALETTE(pipe, i), word);
+ else
+ I915_WRITE(LGC_PALETTE(pipe, i), word);
+ }
+ }
+}
+
+static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
+{
+ i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut);
+}
+
+/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
+static void haswell_load_luts(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *intel_crtc_state =
+ to_intel_crtc_state(crtc_state);
+ bool reenable_ips = false;
+
+ /*
+ * Workaround : Do not read or write the pipe palette/gamma data while
+ * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+ */
+ if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
+ (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
+ hsw_disable_ips(intel_crtc);
+ reenable_ips = true;
+ }
+
+ intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+ I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
+
+ i9xx_load_luts(crtc_state);
+
+ if (reenable_ips)
+ hsw_enable_ips(intel_crtc);
+}
+
+/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
+static void broadwell_load_luts(struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+
+ if (crtc_state_is_legacy(state)) {
+ haswell_load_luts(state);
+ return;
+ }
+
+ I915_WRITE(PREC_PAL_INDEX(pipe),
+ PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
+
+ if (state->degamma_lut) {
+ struct drm_color_lut *lut =
+ (struct drm_color_lut *) state->degamma_lut->data;
+
+ for (i = 0; i < lut_size; i++) {
+ uint32_t word =
+ drm_color_lut_extract(lut[i].red, 10) << 20 |
+ drm_color_lut_extract(lut[i].green, 10) << 10 |
+ drm_color_lut_extract(lut[i].blue, 10);
+
+ I915_WRITE(PREC_PAL_DATA(pipe), word);
+ }
+ } else {
+ for (i = 0; i < lut_size; i++) {
+ uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+
+ I915_WRITE(PREC_PAL_DATA(pipe),
+ (v << 20) | (v << 10) | v);
+ }
+ }
+
+ if (state->gamma_lut) {
+ struct drm_color_lut *lut =
+ (struct drm_color_lut *) state->gamma_lut->data;
+
+ for (i = 0; i < lut_size; i++) {
+ uint32_t word =
+ (drm_color_lut_extract(lut[i].red, 10) << 20) |
+ (drm_color_lut_extract(lut[i].green, 10) << 10) |
+ drm_color_lut_extract(lut[i].blue, 10);
+
+ I915_WRITE(PREC_PAL_DATA(pipe), word);
+ }
+
+ /* Program the max register to clamp values > 1.0. */
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
+ drm_color_lut_extract(lut[i].red, 16));
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
+ drm_color_lut_extract(lut[i].green, 16));
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 2),
+ drm_color_lut_extract(lut[i].blue, 16));
+ } else {
+ for (i = 0; i < lut_size; i++) {
+ uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+
+ I915_WRITE(PREC_PAL_DATA(pipe),
+ (v << 20) | (v << 10) | v);
+ }
+
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1);
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
+ I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
+ }
+
+ intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+ I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
+ POSTING_READ(GAMMA_MODE(pipe));
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+}
+
+/* Loads the palette/gamma unit for the CRTC on CherryView. */
+static void cherryview_load_luts(struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_color_lut *lut;
+ uint32_t i, lut_size;
+ uint32_t word0, word1;
+
+ if (crtc_state_is_legacy(state)) {
+ /* Turn off degamma/gamma on CGM block. */
+ I915_WRITE(CGM_PIPE_MODE(pipe),
+ (state->ctm ? CGM_PIPE_MODE_CSC : 0));
+ i9xx_load_luts_internal(crtc, state->gamma_lut);
+ return;
+ }
+
+ if (state->degamma_lut) {
+ lut = (struct drm_color_lut *) state->degamma_lut->data;
+ lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
+ for (i = 0; i < lut_size; i++) {
+ /* Write LUT in U0.14 format. */
+ word0 =
+ (drm_color_lut_extract(lut[i].green, 14) << 16) |
+ drm_color_lut_extract(lut[i].blue, 14);
+ word1 = drm_color_lut_extract(lut[i].red, 14);
+
+ I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0), word0);
+ I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1), word1);
+ }
+ }
+
+ if (state->gamma_lut) {
+ lut = (struct drm_color_lut *) state->gamma_lut->data;
+ lut_size = INTEL_INFO(dev)->color.gamma_lut_size;
+ for (i = 0; i < lut_size; i++) {
+ /* Write LUT in U0.10 format. */
+ word0 =
+ (drm_color_lut_extract(lut[i].green, 10) << 16) |
+ drm_color_lut_extract(lut[i].blue, 10);
+ word1 = drm_color_lut_extract(lut[i].red, 10);
+
+ I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0), word0);
+ I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
+ }
+ }
+
+ I915_WRITE(CGM_PIPE_MODE(pipe),
+ (state->ctm ? CGM_PIPE_MODE_CSC : 0) |
+ (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+ (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
+
+ /*
+ * Also program a linear LUT in the legacy block (behind the
+ * CGM block).
+ */
+ i9xx_load_luts_internal(crtc, NULL);
+}
+
+void intel_color_load_luts(struct drm_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc_state->crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.load_luts(crtc_state);
+}
+
+int intel_color_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->dev;
+ size_t gamma_length, degamma_length;
+
+ degamma_length = INTEL_INFO(dev)->color.degamma_lut_size *
+ sizeof(struct drm_color_lut);
+ gamma_length = INTEL_INFO(dev)->color.gamma_lut_size *
+ sizeof(struct drm_color_lut);
+
+ /*
+ * We allow both degamma & gamma luts at the right size or
+ * NULL.
+ */
+ if ((!crtc_state->degamma_lut ||
+ crtc_state->degamma_lut->length == degamma_length) &&
+ (!crtc_state->gamma_lut ||
+ crtc_state->gamma_lut->length == gamma_length))
+ return 0;
+
+ /*
+ * We also allow no degamma lut and a gamma lut at the legacy
+ * size (256 entries).
+ */
+ if (!crtc_state->degamma_lut &&
+ crtc_state->gamma_lut &&
+ crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH)
+ return 0;
+
+ return -EINVAL;
+}
+
+void intel_color_init(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ drm_mode_crtc_set_gamma_size(crtc, 256);
+
+ if (IS_CHERRYVIEW(dev)) {
+ dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
+ dev_priv->display.load_luts = cherryview_load_luts;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_luts = haswell_load_luts;
+ } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) ||
+ IS_BROXTON(dev) || IS_KABYLAKE(dev)) {
+ dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_luts = broadwell_load_luts;
+ } else {
+ dev_priv->display.load_luts = i9xx_load_luts;
+ }
+
+ /* Enable color management support when we have degamma & gamma LUTs. */
+ if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
+ INTEL_INFO(dev)->color.gamma_lut_size != 0)
+ drm_helper_crtc_enable_color_mgmt(crtc,
+ INTEL_INFO(dev)->color.degamma_lut_size,
+ INTEL_INFO(dev)->color.gamma_lut_size);
+}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 036429236..3fbb6fc66 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -120,22 +120,16 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
- int dotclock;
-
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
- dotclock = pipe_config->port_clock;
-
- if (HAS_PCH_SPLIT(dev))
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void hsw_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
intel_ddi_get_config(encoder, pipe_config);
pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
@@ -143,6 +137,8 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_NVSYNC);
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+ pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv);
}
/* Note: The caller is required to filter out dpms modes not supported by the
@@ -222,18 +218,26 @@ intel_crt_mode_valid(struct drm_connector *connector,
{
struct drm_device *dev = connector->dev;
int max_dotclk = to_i915(dev)->max_dotclk_freq;
+ int max_clock;
- int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
- if (IS_GEN2(dev))
- max_clock = 350000;
- else
+ if (HAS_PCH_LPT(dev))
+ max_clock = 180000;
+ else if (IS_VALLEYVIEW(dev))
+ /*
+ * 270 MHz due to current DPLL limits,
+ * DAC limit supposedly 355 MHz.
+ */
+ max_clock = 270000;
+ else if (IS_GEN3(dev) || IS_GEN4(dev))
max_clock = 400000;
+ else
+ max_clock = 350000;
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
@@ -267,15 +271,9 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
}
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev)) {
- pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
+ if (HAS_DDI(dev))
pipe_config->port_clock = 135000 * 2;
- pipe_config->dpll_hw_state.wrpll = 0;
- pipe_config->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
- }
-
return true;
}
@@ -658,6 +656,8 @@ intel_crt_detect(struct drm_connector *connector, bool force)
else if (INTEL_INFO(dev)->gen < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
+ else if (i915.load_detect_test)
+ status = connector_status_disconnected;
else
status = connector_status_unknown;
intel_release_load_detect_pipe(connector, &tmp, &ctx);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index b1a547378..ecadf8709 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -41,14 +41,22 @@
* be moved to FW_FAILED.
*/
+#define I915_CSR_KBL "/*(DEBLOBBED)*/"
+/*(DEBLOBBED)*/
+#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+
#define I915_CSR_SKL "/*(DEBLOBBED)*/"
+/*(DEBLOBBED)*/
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+
#define I915_CSR_BXT "/*(DEBLOBBED)*/"
+/*(DEBLOBBED)*/
+#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "/*(DEBLOBBED)*/"
-/*(DEBLOBBED)*/
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
+
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@@ -167,12 +175,10 @@ struct stepping_info {
char substepping;
};
-/*
- * Kabylake derivated from Skylake H0, so SKL H0
- * is the right firmware for KBL A0 (revid 0).
- */
static const struct stepping_info kbl_stepping_info[] = {
- {'H', '0'}, {'I', '0'}
+ {'A', '0'}, {'B', '0'}, {'C', '0'},
+ {'D', '0'}, {'E', '0'}, {'F', '0'},
+ {'G', '0'}, {'H', '0'}, {'I', '0'},
};
static const struct stepping_info skl_stepping_info[] = {
@@ -187,28 +193,49 @@ static const struct stepping_info bxt_stepping_info[] = {
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
-static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev)
+static const struct stepping_info no_stepping_info = { '*', '*' };
+
+static const struct stepping_info *
+intel_get_stepping_info(struct drm_i915_private *dev_priv)
{
const struct stepping_info *si;
unsigned int size;
- if (IS_KABYLAKE(dev)) {
+ if (IS_KABYLAKE(dev_priv)) {
size = ARRAY_SIZE(kbl_stepping_info);
si = kbl_stepping_info;
- } else if (IS_SKYLAKE(dev)) {
+ } else if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
size = ARRAY_SIZE(bxt_stepping_info);
si = bxt_stepping_info;
} else {
- return NULL;
+ size = 0;
}
- if (INTEL_REVID(dev) < size)
- return si + INTEL_REVID(dev);
+ if (INTEL_REVID(dev_priv) < size)
+ return si + INTEL_REVID(dev_priv);
- return NULL;
+ return &no_stepping_info;
+}
+
+static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
+{
+ uint32_t val, mask;
+
+ mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
+
+ if (IS_BROXTON(dev_priv))
+ mask |= DC_STATE_DEBUG_MASK_CORES;
+
+ /* The below bit doesn't need to be cleared ever afterwards */
+ val = I915_READ(DC_STATE_DEBUG);
+ if ((val & mask) != mask) {
+ val |= mask;
+ I915_WRITE(DC_STATE_DEBUG, val);
+ POSTING_READ(DC_STATE_DEBUG);
+ }
}
/**
@@ -219,19 +246,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de
* Everytime display comes back from low power state this function is called to
* copy the firmware from internal memory to registers.
*/
-bool intel_csr_load_program(struct drm_i915_private *dev_priv)
+void intel_csr_load_program(struct drm_i915_private *dev_priv)
{
u32 *payload = dev_priv->csr.dmc_payload;
uint32_t i, fw_size;
if (!IS_GEN9(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
- return false;
+ return;
}
if (!dev_priv->csr.dmc_payload) {
DRM_ERROR("Tried to program CSR with empty payload\n");
- return false;
+ return;
}
fw_size = dev_priv->csr.dmc_fw_size;
@@ -245,34 +272,25 @@ bool intel_csr_load_program(struct drm_i915_private *dev_priv)
dev_priv->csr.dc_state = 0;
- return true;
+ gen9_set_dc_state_debugmask(dev_priv);
}
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
const struct firmware *fw)
{
- struct drm_device *dev = dev_priv->dev;
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr;
- const struct stepping_info *stepping_info = intel_get_stepping_info(dev);
- char stepping, substepping;
+ const struct stepping_info *si = intel_get_stepping_info(dev_priv);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
+ uint32_t required_min_version;
if (!fw)
return NULL;
- if (!stepping_info) {
- DRM_ERROR("Unknown stepping info, firmware loading failed\n");
- return NULL;
- }
-
- stepping = stepping_info->stepping;
- substepping = stepping_info->substepping;
-
/* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
@@ -284,15 +302,25 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
- csr->version < SKL_CSR_VERSION_REQUIRED) {
- DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
+ if (IS_KABYLAKE(dev_priv)) {
+ required_min_version = KBL_CSR_VERSION_REQUIRED;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ required_min_version = SKL_CSR_VERSION_REQUIRED;
+ } else if (IS_BROXTON(dev_priv)) {
+ required_min_version = BXT_CSR_VERSION_REQUIRED;
+ } else {
+ MISSING_CASE(INTEL_REVID(dev_priv));
+ required_min_version = 0;
+ }
+
+ if (csr->version < required_min_version) {
+ DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
- CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
- CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
+ CSR_VERSION_MAJOR(required_min_version),
+ CSR_VERSION_MINOR(required_min_version));
return NULL;
}
@@ -312,11 +340,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Search for dmc_offset to find firware binary. */
for (i = 0; i < package_header->num_entries; i++) {
if (package_header->fw_info[i].substepping == '*' &&
- stepping == package_header->fw_info[i].stepping) {
+ si->stepping == package_header->fw_info[i].stepping) {
dmc_offset = package_header->fw_info[i].offset;
break;
- } else if (stepping == package_header->fw_info[i].stepping &&
- substepping == package_header->fw_info[i].substepping) {
+ } else if (si->stepping == package_header->fw_info[i].stepping &&
+ si->substepping == package_header->fw_info[i].substepping) {
dmc_offset = package_header->fw_info[i].offset;
break;
} else if (package_header->fw_info[i].stepping == '*' &&
@@ -324,7 +352,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
dmc_offset = package_header->fw_info[i].offset;
}
if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
- DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
+ DRM_ERROR("Firmware not supported for %c stepping\n",
+ si->stepping);
return NULL;
}
readcount += dmc_offset;
@@ -370,9 +399,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return NULL;
}
- memcpy(dmc_payload, &fw->data[readcount], nbytes);
-
- return dmc_payload;
+ return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}
static void csr_load_work_fn(struct work_struct *work)
@@ -387,18 +414,12 @@ static void csr_load_work_fn(struct work_struct *work)
ret = reject_firmware(&fw, dev_priv->csr.fw_path,
&dev_priv->dev->pdev->dev);
- if (!fw)
- goto out;
-
- dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
- if (!dev_priv->csr.dmc_payload)
- goto out;
+ if (fw)
+ dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
- /* load csr program during system boot, as needed for DC states */
- intel_csr_load_program(dev_priv);
-
-out:
if (dev_priv->csr.dmc_payload) {
+ intel_csr_load_program(dev_priv);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
DRM_INFO("Finished loading %s (v%u.%u)\n",
@@ -431,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv))
+ csr->fw_path = I915_CSR_KBL;
+ else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
@@ -452,10 +475,50 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
}
/**
+ * intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
+ * @dev_priv: i915 drm device
+ *
+ * Prepare the DMC firmware before entering system suspend. This includes
+ * flushing pending work items and releasing any resources acquired during
+ * init.
+ */
+void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_CSR(dev_priv))
+ return;
+
+ flush_work(&dev_priv->csr.work);
+
+ /* Drop the reference held in case DMC isn't loaded. */
+ if (!dev_priv->csr.dmc_payload)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+}
+
+/**
+ * intel_csr_ucode_resume() - init CSR firmware during system resume
+ * @dev_priv: i915 drm device
+ *
+ * Reinitialize the DMC firmware during system resume, reacquiring any
+ * resources released in intel_csr_ucode_suspend().
+ */
+void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_CSR(dev_priv))
+ return;
+
+ /*
+ * Reacquire the reference to keep RPM disabled in case DMC isn't
+ * loaded.
+ */
+ if (!dev_priv->csr.dmc_payload)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+}
+
+/**
* intel_csr_ucode_fini() - unload the CSR firmware.
* @dev_priv: i915 drm device.
*
- * Firmmware unloading includes freeing the internal momory and reset the
+ * Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
@@ -463,7 +526,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- flush_work(&dev_priv->csr.work);
+ intel_csr_ucode_suspend(dev_priv);
kfree(dev_priv->csr.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 50f5b0c97..01e523df3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -315,6 +315,9 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
*dig_port = enc_to_mst(encoder)->primary;
*port = (*dig_port)->port;
break;
+ default:
+ WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
+ /* fallthrough and treat as unknown */
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_EDP:
case INTEL_OUTPUT_HDMI:
@@ -326,9 +329,6 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
*dig_port = NULL;
*port = PORT_E;
break;
- default:
- WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
- break;
}
}
@@ -360,7 +360,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (dev_priv->edp_low_vswing) {
+ if (dev_priv->vbt.edp.low_vswing) {
if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
@@ -444,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
- if (dev_priv->edp_low_vswing) {
+ if (dev_priv->vbt.edp.low_vswing) {
ddi_translations_edp = bdw_ddi_translations_edp;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
} else {
@@ -637,6 +637,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
break;
}
+ rx_ctl_val &= ~FDI_RX_ENABLE;
+ I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ POSTING_READ(FDI_RX_CTL(PIPE_A));
+
temp = I915_READ(DDI_BUF_CTL(PORT_E));
temp &= ~DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
@@ -651,10 +655,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
- rx_ctl_val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
-
/* Reset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -732,160 +732,6 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
}
#define LC_FREQ 2700
-#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
-
-#define P_MIN 2
-#define P_MAX 64
-#define P_INC 2
-
-/* Constraints for PLL good behavior */
-#define REF_MIN 48
-#define REF_MAX 400
-#define VCO_MIN 2400
-#define VCO_MAX 4800
-
-#define abs_diff(a, b) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- (void) (&__a == &__b); \
- __a > __b ? (__a - __b) : (__b - __a); })
-
-struct hsw_wrpll_rnp {
- unsigned p, n2, r2;
-};
-
-static unsigned hsw_wrpll_get_budget_for_freq(int clock)
-{
- unsigned budget;
-
- switch (clock) {
- case 25175000:
- case 25200000:
- case 27000000:
- case 27027000:
- case 37762500:
- case 37800000:
- case 40500000:
- case 40541000:
- case 54000000:
- case 54054000:
- case 59341000:
- case 59400000:
- case 72000000:
- case 74176000:
- case 74250000:
- case 81000000:
- case 81081000:
- case 89012000:
- case 89100000:
- case 108000000:
- case 108108000:
- case 111264000:
- case 111375000:
- case 148352000:
- case 148500000:
- case 162000000:
- case 162162000:
- case 222525000:
- case 222750000:
- case 296703000:
- case 297000000:
- budget = 0;
- break;
- case 233500000:
- case 245250000:
- case 247750000:
- case 253250000:
- case 298000000:
- budget = 1500;
- break;
- case 169128000:
- case 169500000:
- case 179500000:
- case 202000000:
- budget = 2000;
- break;
- case 256250000:
- case 262500000:
- case 270000000:
- case 272500000:
- case 273750000:
- case 280750000:
- case 281250000:
- case 286000000:
- case 291750000:
- budget = 4000;
- break;
- case 267250000:
- case 268500000:
- budget = 5000;
- break;
- default:
- budget = 1000;
- break;
- }
-
- return budget;
-}
-
-static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
- unsigned r2, unsigned n2, unsigned p,
- struct hsw_wrpll_rnp *best)
-{
- uint64_t a, b, c, d, diff, diff_best;
-
- /* No best (r,n,p) yet */
- if (best->p == 0) {
- best->p = p;
- best->n2 = n2;
- best->r2 = r2;
- return;
- }
-
- /*
- * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
- * freq2k.
- *
- * delta = 1e6 *
- * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
- * freq2k;
- *
- * and we would like delta <= budget.
- *
- * If the discrepancy is above the PPM-based budget, always prefer to
- * improve upon the previous solution. However, if you're within the
- * budget, try to maximize Ref * VCO, that is N / (P * R^2).
- */
- a = freq2k * budget * p * r2;
- b = freq2k * budget * best->p * best->r2;
- diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
- diff_best = abs_diff(freq2k * best->p * best->r2,
- LC_FREQ_2K * best->n2);
- c = 1000000 * diff;
- d = 1000000 * diff_best;
-
- if (a < c && b < d) {
- /* If both are above the budget, pick the closer */
- if (best->p * best->r2 * diff < p * r2 * diff_best) {
- best->p = p;
- best->n2 = n2;
- best->r2 = r2;
- }
- } else if (a >= c && b < d) {
- /* If A is below the threshold but B is above it? Update. */
- best->p = p;
- best->n2 = n2;
- best->r2 = r2;
- } else if (a >= c && b >= d) {
- /* Both are below the limit, so pick the higher n2/(r2*r2) */
- if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
- best->p = p;
- best->n2 = n2;
- best->r2 = r2;
- }
- }
- /* Otherwise a < c && b >= d, do nothing */
-}
static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
i915_reg_t reg)
@@ -1147,363 +993,20 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config);
}
-static void
-hsw_ddi_calculate_wrpll(int clock /* in Hz */,
- unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
-{
- uint64_t freq2k;
- unsigned p, n2, r2;
- struct hsw_wrpll_rnp best = { 0, 0, 0 };
- unsigned budget;
-
- freq2k = clock / 100;
-
- budget = hsw_wrpll_get_budget_for_freq(clock);
-
- /* Special case handling for 540 pixel clock: bypass WR PLL entirely
- * and directly pass the LC PLL to it. */
- if (freq2k == 5400000) {
- *n2_out = 2;
- *p_out = 1;
- *r2_out = 2;
- return;
- }
-
- /*
- * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
- * the WR PLL.
- *
- * We want R so that REF_MIN <= Ref <= REF_MAX.
- * Injecting R2 = 2 * R gives:
- * REF_MAX * r2 > LC_FREQ * 2 and
- * REF_MIN * r2 < LC_FREQ * 2
- *
- * Which means the desired boundaries for r2 are:
- * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
- *
- */
- for (r2 = LC_FREQ * 2 / REF_MAX + 1;
- r2 <= LC_FREQ * 2 / REF_MIN;
- r2++) {
-
- /*
- * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
- *
- * Once again we want VCO_MIN <= VCO <= VCO_MAX.
- * Injecting R2 = 2 * R and N2 = 2 * N, we get:
- * VCO_MAX * r2 > n2 * LC_FREQ and
- * VCO_MIN * r2 < n2 * LC_FREQ)
- *
- * Which means the desired boundaries for n2 are:
- * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
- */
- for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
- n2 <= VCO_MAX * r2 / LC_FREQ;
- n2++) {
-
- for (p = P_MIN; p <= P_MAX; p += P_INC)
- hsw_wrpll_update_rnp(freq2k, budget,
- r2, n2, p, &best);
- }
- }
-
- *n2_out = best.n2;
- *p_out = best.p;
- *r2_out = best.r2;
-}
-
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
{
- int clock = crtc_state->port_clock;
-
- if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
- struct intel_shared_dpll *pll;
- uint32_t val;
- unsigned p, n2, r2;
-
- hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
- WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
- WRPLL_DIVIDER_POST(p);
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- crtc_state->dpll_hw_state.wrpll = val;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
- return false;
- }
-
- crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
- } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
- struct drm_atomic_state *state = crtc_state->base.state;
- struct intel_shared_dpll_config *spll =
- &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
-
- if (spll->crtc_mask &&
- WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
- return false;
-
- crtc_state->shared_dpll = DPLL_ID_SPLL;
- spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
- spll->crtc_mask |= 1 << intel_crtc->pipe;
- }
-
- return true;
-}
-
-struct skl_wrpll_context {
- uint64_t min_deviation; /* current minimal deviation */
- uint64_t central_freq; /* chosen central freq */
- uint64_t dco_freq; /* chosen dco freq */
- unsigned int p; /* chosen divider */
-};
-
-static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
-{
- memset(ctx, 0, sizeof(*ctx));
-
- ctx->min_deviation = U64_MAX;
-}
-
-/* DCO freq must be within +1%/-6% of the DCO central freq */
-#define SKL_DCO_MAX_PDEVIATION 100
-#define SKL_DCO_MAX_NDEVIATION 600
-
-static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
- uint64_t central_freq,
- uint64_t dco_freq,
- unsigned int divider)
-{
- uint64_t deviation;
-
- deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
- central_freq);
-
- /* positive deviation */
- if (dco_freq >= central_freq) {
- if (deviation < SKL_DCO_MAX_PDEVIATION &&
- deviation < ctx->min_deviation) {
- ctx->min_deviation = deviation;
- ctx->central_freq = central_freq;
- ctx->dco_freq = dco_freq;
- ctx->p = divider;
- }
- /* negative deviation */
- } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
- deviation < ctx->min_deviation) {
- ctx->min_deviation = deviation;
- ctx->central_freq = central_freq;
- ctx->dco_freq = dco_freq;
- ctx->p = divider;
- }
-}
-
-static void skl_wrpll_get_multipliers(unsigned int p,
- unsigned int *p0 /* out */,
- unsigned int *p1 /* out */,
- unsigned int *p2 /* out */)
-{
- /* even dividers */
- if (p % 2 == 0) {
- unsigned int half = p / 2;
-
- if (half == 1 || half == 2 || half == 3 || half == 5) {
- *p0 = 2;
- *p1 = 1;
- *p2 = half;
- } else if (half % 2 == 0) {
- *p0 = 2;
- *p1 = half / 2;
- *p2 = 2;
- } else if (half % 3 == 0) {
- *p0 = 3;
- *p1 = half / 3;
- *p2 = 2;
- } else if (half % 7 == 0) {
- *p0 = 7;
- *p1 = half / 7;
- *p2 = 2;
- }
- } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
- *p0 = 3;
- *p1 = 1;
- *p2 = p / 3;
- } else if (p == 5 || p == 7) {
- *p0 = p;
- *p1 = 1;
- *p2 = 1;
- } else if (p == 15) {
- *p0 = 3;
- *p1 = 1;
- *p2 = 5;
- } else if (p == 21) {
- *p0 = 7;
- *p1 = 1;
- *p2 = 3;
- } else if (p == 35) {
- *p0 = 7;
- *p1 = 1;
- *p2 = 5;
- }
-}
-
-struct skl_wrpll_params {
- uint32_t dco_fraction;
- uint32_t dco_integer;
- uint32_t qdiv_ratio;
- uint32_t qdiv_mode;
- uint32_t kdiv;
- uint32_t pdiv;
- uint32_t central_freq;
-};
-
-static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
- uint64_t afe_clock,
- uint64_t central_freq,
- uint32_t p0, uint32_t p1, uint32_t p2)
-{
- uint64_t dco_freq;
-
- switch (central_freq) {
- case 9600000000ULL:
- params->central_freq = 0;
- break;
- case 9000000000ULL:
- params->central_freq = 1;
- break;
- case 8400000000ULL:
- params->central_freq = 3;
- }
-
- switch (p0) {
- case 1:
- params->pdiv = 0;
- break;
- case 2:
- params->pdiv = 1;
- break;
- case 3:
- params->pdiv = 2;
- break;
- case 7:
- params->pdiv = 4;
- break;
- default:
- WARN(1, "Incorrect PDiv\n");
- }
-
- switch (p2) {
- case 5:
- params->kdiv = 0;
- break;
- case 2:
- params->kdiv = 1;
- break;
- case 3:
- params->kdiv = 2;
- break;
- case 1:
- params->kdiv = 3;
- break;
- default:
- WARN(1, "Incorrect KDiv\n");
- }
-
- params->qdiv_ratio = p1;
- params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
-
- dco_freq = p0 * p1 * p2 * afe_clock;
-
- /*
- * Intermediate values are in Hz.
- * Divide by MHz to match bsepc
- */
- params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
- params->dco_fraction =
- div_u64((div_u64(dco_freq, 24) -
- params->dco_integer * MHz(1)) * 0x8000, MHz(1));
-}
-
-static bool
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
- struct skl_wrpll_params *wrpll_params)
-{
- uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
- uint64_t dco_central_freq[3] = {8400000000ULL,
- 9000000000ULL,
- 9600000000ULL};
- static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
- 24, 28, 30, 32, 36, 40, 42, 44,
- 48, 52, 54, 56, 60, 64, 66, 68,
- 70, 72, 76, 78, 80, 84, 88, 90,
- 92, 96, 98 };
- static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
- static const struct {
- const int *list;
- int n_dividers;
- } dividers[] = {
- { even_dividers, ARRAY_SIZE(even_dividers) },
- { odd_dividers, ARRAY_SIZE(odd_dividers) },
- };
- struct skl_wrpll_context ctx;
- unsigned int dco, d, i;
- unsigned int p0, p1, p2;
-
- skl_wrpll_context_init(&ctx);
-
- for (d = 0; d < ARRAY_SIZE(dividers); d++) {
- for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
- for (i = 0; i < dividers[d].n_dividers; i++) {
- unsigned int p = dividers[d].list[i];
- uint64_t dco_freq = p * afe_clock;
-
- skl_wrpll_try_divider(&ctx,
- dco_central_freq[dco],
- dco_freq,
- p);
- /*
- * Skip the remaining dividers if we're sure to
- * have found the definitive divider, we can't
- * improve a 0 deviation.
- */
- if (ctx.min_deviation == 0)
- goto skip_remaining_dividers;
- }
- }
-
-skip_remaining_dividers:
- /*
- * If a solution is found with an even divider, prefer
- * this one.
- */
- if (d == 0 && ctx.p)
- break;
- }
-
- if (!ctx.p) {
- DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
- return false;
- }
+ struct intel_shared_dpll *pll;
- /*
- * gcc incorrectly analyses that these can be used without being
- * initialized. To be fair, it's hard to guess.
- */
- p0 = p1 = p2 = 0;
- skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
- skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
- p0, p1, p2);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state,
+ intel_encoder);
+ if (!pll)
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
- return true;
+ return pll;
}
static bool
@@ -1512,218 +1015,23 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_encoder *intel_encoder)
{
struct intel_shared_dpll *pll;
- uint32_t ctrl1, cfgcr1, cfgcr2;
- int clock = crtc_state->port_clock;
-
- /*
- * See comment in intel_dpll_hw_state to understand why we always use 0
- * as the DPLL id in this function.
- */
-
- ctrl1 = DPLL_CTRL1_OVERRIDE(0);
-
- if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
- struct skl_wrpll_params wrpll_params = { 0, };
-
- ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
-
- if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
- return false;
-
- cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
- DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
- wrpll_params.dco_integer;
-
- cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
- DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
- DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
- DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
- wrpll_params.central_freq;
- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_DP_MST) {
- switch (crtc_state->port_clock / 2) {
- case 81000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
- break;
- case 135000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
- break;
- case 270000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
- break;
- }
-
- cfgcr1 = cfgcr2 = 0;
- } else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
- return true;
- } else
- return false;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- crtc_state->dpll_hw_state.ctrl1 = ctrl1;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
return false;
}
- /* shared DPLL id 0 is DPLL 1 */
- crtc_state->ddi_pll_sel = pll->id + 1;
-
return true;
}
-/* bxt clock parameters */
-struct bxt_clk_div {
- int clock;
- uint32_t p1;
- uint32_t p2;
- uint32_t m2_int;
- uint32_t m2_frac;
- bool m2_frac_en;
- uint32_t n;
-};
-
-/* pre-calculated values for DP linkrates */
-static const struct bxt_clk_div bxt_dp_clk_val[] = {
- {162000, 4, 2, 32, 1677722, 1, 1},
- {270000, 4, 1, 27, 0, 0, 1},
- {540000, 2, 1, 27, 0, 0, 1},
- {216000, 3, 2, 32, 1677722, 1, 1},
- {243000, 4, 1, 24, 1258291, 1, 1},
- {324000, 4, 1, 32, 1677722, 1, 1},
- {432000, 3, 1, 32, 1677722, 1, 1}
-};
-
static bool
bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *intel_encoder)
{
- struct intel_shared_dpll *pll;
- struct bxt_clk_div clk_div = {0};
- int vco = 0;
- uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
- uint32_t lanestagger;
- int clock = crtc_state->port_clock;
-
- if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
- intel_clock_t best_clock;
-
- /* Calculate HDMI div */
- /*
- * FIXME: tie the following calculation into
- * i9xx_crtc_compute_clock
- */
- if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
- DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- clock, pipe_name(intel_crtc->pipe));
- return false;
- }
-
- clk_div.p1 = best_clock.p1;
- clk_div.p2 = best_clock.p2;
- WARN_ON(best_clock.m1 != 2);
- clk_div.n = best_clock.n;
- clk_div.m2_int = best_clock.m2 >> 22;
- clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
- clk_div.m2_frac_en = clk_div.m2_frac != 0;
-
- vco = best_clock.vco;
- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP) {
- int i;
-
- clk_div = bxt_dp_clk_val[0];
- for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
- if (bxt_dp_clk_val[i].clock == clock) {
- clk_div = bxt_dp_clk_val[i];
- break;
- }
- }
- vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
- }
-
- if (vco >= 6200000 && vco <= 6700000) {
- prop_coef = 4;
- int_coef = 9;
- gain_ctl = 3;
- targ_cnt = 8;
- } else if ((vco > 5400000 && vco < 6200000) ||
- (vco >= 4800000 && vco < 5400000)) {
- prop_coef = 5;
- int_coef = 11;
- gain_ctl = 3;
- targ_cnt = 9;
- } else if (vco == 5400000) {
- prop_coef = 3;
- int_coef = 8;
- gain_ctl = 1;
- targ_cnt = 9;
- } else {
- DRM_ERROR("Invalid VCO\n");
- return false;
- }
-
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- if (clock > 270000)
- lanestagger = 0x18;
- else if (clock > 135000)
- lanestagger = 0x0d;
- else if (clock > 67000)
- lanestagger = 0x07;
- else if (clock > 33000)
- lanestagger = 0x04;
- else
- lanestagger = 0x02;
-
- crtc_state->dpll_hw_state.ebb0 =
- PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
- crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
- crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
- crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
-
- if (clk_div.m2_frac_en)
- crtc_state->dpll_hw_state.pll3 =
- PORT_PLL_M2_FRAC_ENABLE;
-
- crtc_state->dpll_hw_state.pll6 =
- prop_coef | PORT_PLL_INT_COEFF(int_coef);
- crtc_state->dpll_hw_state.pll6 |=
- PORT_PLL_GAIN_CTL(gain_ctl);
-
- crtc_state->dpll_hw_state.pll8 = targ_cnt;
-
- crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
-
- crtc_state->dpll_hw_state.pll10 =
- PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
- | PORT_PLL_DCO_AMP_OVR_EN_H;
-
- crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
-
- crtc_state->dpll_hw_state.pcsdw12 =
- LANESTAGGER_STRAP_OVRD | lanestagger;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
- return false;
- }
-
- /* shared DPLL id 0 is DPLL A */
- crtc_state->ddi_pll_sel = pll->id;
-
- return true;
+ return !!intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
}
/*
@@ -1761,6 +1069,8 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
uint32_t temp;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
+ WARN_ON(transcoder_is_dsi(cpu_transcoder));
+
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config->pipe_bpp) {
case 18:
@@ -2129,7 +1439,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
u32 n_entries, i;
uint32_t val;
- if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) {
+ if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
ddi_translations = bxt_ddi_translations_edp;
} else if (type == INTEL_OUTPUT_DISPLAYPORT
@@ -2267,24 +1577,6 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
uint32_t dpll = pipe_config->ddi_pll_sel;
uint32_t val;
- /*
- * DPLL0 is used for eDP and is the only "private" DPLL (as
- * opposed to shared) on SKL
- */
- if (encoder->type == INTEL_OUTPUT_EDP) {
- WARN_ON(dpll != SKL_DPLL0);
-
- val = I915_READ(DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
- DPLL_CTRL1_SSC(dpll) |
- DPLL_CTRL1_LINK_RATE_MASK(dpll));
- val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6);
-
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
- }
-
/* DDI -> PLL mapping */
val = I915_READ(DPLL_CTRL2);
@@ -2450,251 +1742,101 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
}
}
-static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
- POSTING_READ(WRPLL_CTL(pll->id));
- udelay(20);
-}
-
-static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
- POSTING_READ(SPLL_CTL);
- udelay(20);
-}
-
-static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- uint32_t val;
-
- val = I915_READ(WRPLL_CTL(pll->id));
- I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL(pll->id));
-}
-
-static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- uint32_t val;
-
- val = I915_READ(SPLL_CTL);
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
-}
-
-static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- uint32_t val;
-
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
return false;
- val = I915_READ(WRPLL_CTL(pll->id));
- hw_state->wrpll = val;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
- return val & WRPLL_PLL_ENABLE;
-}
-
-static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- uint32_t val;
+ if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
+ phy);
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ }
- val = I915_READ(SPLL_CTL);
- hw_state->spll = val;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
- return val & SPLL_PLL_ENABLE;
-}
-
-
-static const char * const hsw_ddi_pll_names[] = {
- "WRPLL 1",
- "WRPLL 2",
- "SPLL"
-};
+ if (phy == DPIO_PHY1 &&
+ !(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
+ DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
-static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
-{
- int i;
+ return false;
+ }
- dev_priv->num_shared_dpll = 3;
+ if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
+ phy);
- for (i = 0; i < 2; i++) {
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
- dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
- dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
- dev_priv->shared_dplls[i].get_hw_state =
- hsw_ddi_wrpll_get_hw_state;
+ return false;
}
- /* SPLL is special, but needs to be initialized anyway.. */
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
- dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
- dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
- dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
-
+ return true;
}
-static const char * const skl_ddi_pll_names[] = {
- "DPLL 1",
- "DPLL 2",
- "DPLL 3",
-};
-
-struct skl_dpll_regs {
- i915_reg_t ctl, cfgcr1, cfgcr2;
-};
-
-/* this array is indexed by the *shared* pll id */
-static const struct skl_dpll_regs skl_dpll_regs[3] = {
- {
- /* DPLL 1 */
- .ctl = LCPLL2_CTL,
- .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
- .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
- },
- {
- /* DPLL 2 */
- .ctl = WRPLL_CTL(0),
- .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
- .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
- },
- {
- /* DPLL 3 */
- .ctl = WRPLL_CTL(1),
- .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
- .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
- },
-};
-
-static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- uint32_t val;
- unsigned int dpll;
- const struct skl_dpll_regs *regs = skl_dpll_regs;
+ u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
- /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
- dpll = pll->id + 1;
-
- val = I915_READ(DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
- DPLL_CTRL1_LINK_RATE_MASK(dpll));
- val |= pll->config.hw_state.ctrl1 << (dpll * 6);
-
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
-
- I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
- I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
- POSTING_READ(regs[pll->id].cfgcr1);
- POSTING_READ(regs[pll->id].cfgcr2);
-
- /* the enable bit is always bit 31 */
- I915_WRITE(regs[pll->id].ctl,
- I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
-
- if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
- DRM_ERROR("DPLL %d not locked\n", dpll);
+ return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
-static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
{
- const struct skl_dpll_regs *regs = skl_dpll_regs;
-
- /* the enable bit is always bit 31 */
- I915_WRITE(regs[pll->id].ctl,
- I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
- POSTING_READ(regs[pll->id].ctl);
+ if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
+ DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
-static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- uint32_t val;
- unsigned int dpll;
- const struct skl_dpll_regs *regs = skl_dpll_regs;
- bool ret;
-
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
- return false;
+static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy);
- ret = false;
-
- /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
- dpll = pll->id + 1;
-
- val = I915_READ(regs[pll->id].ctl);
- if (!(val & LCPLL_PLL_ENABLE))
- goto out;
-
- val = I915_READ(DPLL_CTRL1);
- hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
-
- /* avoid reading back stale values if HDMI mode is not enabled */
- if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
- hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
- hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
- }
- ret = true;
-
-out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+static void broxton_phy_init(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ enum port port;
+ u32 ports, val;
- return ret;
-}
+ if (broxton_phy_is_enabled(dev_priv, phy)) {
+ /* Still read out the GRC value for state verification */
+ if (phy == DPIO_PHY0)
+ dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
-static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
-{
- int i;
+ if (broxton_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
- dev_priv->num_shared_dpll = 3;
+ return;
+ }
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
- dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
- dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
- dev_priv->shared_dplls[i].get_hw_state =
- skl_ddi_pll_get_hw_state;
+ DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
+ } else {
+ DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
}
-}
-
-static void broxton_phy_init(struct drm_i915_private *dev_priv,
- enum dpio_phy phy)
-{
- enum port port;
- uint32_t val;
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
- /* Considering 10ms timeout until BSpec is updated */
- if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10))
+ /*
+ * The PHY registers start out inaccessible and respond to reads with
+ * all 1s. Eventually they become accessible as they power up, then
+ * the reserved bit will give the default 0. Poll on the reserved bit
+ * becoming 0 to find when the PHY is accessible.
+ * HW team confirmed that the time to reach phypowergood status is
+ * anywhere between 50 us and 100us.
+ */
+ if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
DRM_ERROR("timeout during PHY%d power on\n", phy);
+ }
+
+ if (phy == DPIO_PHY0)
+ ports = BIT(PORT_B) | BIT(PORT_C);
+ else
+ ports = BIT(PORT_A);
- for (port = (phy == DPIO_PHY0 ? PORT_B : PORT_A);
- port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) {
+ for_each_port_masked(port, ports) {
int lane;
for (lane = 0; lane < 4; lane++) {
@@ -2742,6 +1884,9 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* enabled.
* TODO: port C is only connected on BXT-P, so on BXT0/1 we should
* power down the second channel on PHY0 as well.
+ *
+ * FIXME: Clarify programming of the following, the register is
+ * read-only with bit 6 fixed at 0 at least in stepping A.
*/
if (phy == DPIO_PHY1)
val |= OCL2_LDOFUSE_PWR_DIS;
@@ -2754,12 +1899,10 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
- if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
- 10))
- DRM_ERROR("timeout waiting for PHY1 GRC\n");
+ broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
- val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
- val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
+ val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
+ DPIO_PHY1);
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
@@ -2769,17 +1912,27 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
}
+ /*
+ * During PHY1 init delay waiting for GRC calibration to finish, since
+ * it can happen in parallel with the subsequent PHY0 init.
+ */
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
}
-void broxton_ddi_phy_init(struct drm_device *dev)
+void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
{
/* Enable PHY1 first since it provides Rcomp for PHY0 */
- broxton_phy_init(dev->dev_private, DPIO_PHY1);
- broxton_phy_init(dev->dev_private, DPIO_PHY0);
+ broxton_phy_init(dev_priv, DPIO_PHY1);
+ broxton_phy_init(dev_priv, DPIO_PHY0);
+
+ /*
+ * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
+ * PHY1 GRC calibration to finish, so wait for it here.
+ */
+ broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
}
static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
@@ -2790,260 +1943,126 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val &= ~GT_DISPLAY_POWER_ON(phy);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
-void broxton_ddi_phy_uninit(struct drm_device *dev)
+void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
broxton_phy_uninit(dev_priv, DPIO_PHY1);
broxton_phy_uninit(dev_priv, DPIO_PHY0);
-
- /* FIXME: do this in broxton_phy_uninit per phy */
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
}
-static const char * const bxt_ddi_pll_names[] = {
- "PORT PLL A",
- "PORT PLL B",
- "PORT PLL C",
-};
-
-static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static bool __printf(6, 7)
+__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
+ i915_reg_t reg, u32 mask, u32 expected,
+ const char *reg_fmt, ...)
{
- uint32_t temp;
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
-
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_REF_SEL;
- /* Non-SSC reference */
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
-
- /* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
- temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-
- /* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
- temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
- temp |= pll->config.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
-
- /* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(port, 0));
- temp &= ~PORT_PLL_M2_MASK;
- temp |= pll->config.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(port, 0), temp);
-
- /* Write N */
- temp = I915_READ(BXT_PORT_PLL(port, 1));
- temp &= ~PORT_PLL_N_MASK;
- temp |= pll->config.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(port, 1), temp);
-
- /* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(port, 2));
- temp &= ~PORT_PLL_M2_FRAC_MASK;
- temp |= pll->config.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(port, 2), temp);
-
- /* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(port, 3));
- temp &= ~PORT_PLL_M2_FRAC_ENABLE;
- temp |= pll->config.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(port, 3), temp);
-
- /* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(port, 6));
- temp &= ~PORT_PLL_PROP_COEFF_MASK;
- temp &= ~PORT_PLL_INT_COEFF_MASK;
- temp &= ~PORT_PLL_GAIN_CTL_MASK;
- temp |= pll->config.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(port, 6), temp);
-
- /* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(port, 8));
- temp &= ~PORT_PLL_TARGET_CNT_MASK;
- temp |= pll->config.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(port, 8), temp);
-
- temp = I915_READ(BXT_PORT_PLL(port, 9));
- temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= pll->config.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(port, 9), temp);
-
- temp = I915_READ(BXT_PORT_PLL(port, 10));
- temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
- temp &= ~PORT_PLL_DCO_AMP_MASK;
- temp |= pll->config.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(port, 10), temp);
-
- /* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
- temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
- temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- temp |= pll->config.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
-
- /* Enable PLL */
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
- temp |= PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
-
- if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_LOCK), 200))
- DRM_ERROR("PLL %d not locked\n", port);
+ struct va_format vaf;
+ va_list args;
+ u32 val;
- /*
- * While we write to the group register to program all lanes at once we
- * can read only lane registers and we pick lanes 0/1 for that.
- */
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
- temp &= ~LANE_STAGGER_MASK;
- temp &= ~LANESTAGGER_STRAP_OVRD;
- temp |= pll->config.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
-}
-
-static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
- uint32_t temp;
-
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
- temp &= ~PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
-}
+ val = I915_READ(reg);
+ if ((val & mask) == expected)
+ return true;
-static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
- uint32_t val;
- bool ret;
+ va_start(args, reg_fmt);
+ vaf.fmt = reg_fmt;
+ vaf.va = &args;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
- return false;
+ DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ "current %08x, expected %08x (mask %08x)\n",
+ phy, &vaf, reg.reg, val, (val & ~mask) | expected,
+ mask);
- ret = false;
+ va_end(args);
- val = I915_READ(BXT_PORT_PLL_ENABLE(port));
- if (!(val & PORT_PLL_ENABLE))
- goto out;
+ return false;
+}
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
- hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum dpio_phy phy)
+{
+ enum port port;
+ u32 ports;
+ uint32_t mask;
+ bool ok;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
- hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+#define _CHK(reg, mask, exp, fmt, ...) \
+ __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
+ ## __VA_ARGS__)
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
- hw_state->pll0 &= PORT_PLL_M2_MASK;
+ /* We expect the PHY to be always enabled */
+ if (!broxton_phy_is_enabled(dev_priv, phy))
+ return false;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
- hw_state->pll1 &= PORT_PLL_N_MASK;
+ ok = true;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
- hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+ if (phy == DPIO_PHY0)
+ ports = BIT(PORT_B) | BIT(PORT_C);
+ else
+ ports = BIT(PORT_A);
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
- hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+ for_each_port_masked(port, ports) {
+ int lane;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
- hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
- PORT_PLL_INT_COEFF_MASK |
- PORT_PLL_GAIN_CTL_MASK;
+ for (lane = 0; lane < 4; lane++)
+ ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
+ LATENCY_OPTIM,
+ lane != 1 ? LATENCY_OPTIM : 0,
+ "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
+ }
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
- hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+ /* PLL Rcomp code offset */
+ ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
+ IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW9(%d)", phy);
+ ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
+ IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
+ "BXT_PORT_CL1CM_DW10(%d)", phy);
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
- hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+ /* Power gating */
+ mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
+ ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
+ "BXT_PORT_CL1CM_DW28(%d)", phy);
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
- hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
- PORT_PLL_DCO_AMP_MASK;
+ if (phy == DPIO_PHY0)
+ ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
+ DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
+ "BXT_PORT_CL2CM_DW6_BC");
/*
- * While we write to the group register to program all lanes at once we
- * can read only lane registers. We configure all lanes the same way, so
- * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+ * TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
+ * at least on stepping A this bit is read-only and fixed at 0.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
- DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
- hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
- hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
-
- ret = true;
-
-out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
- return ret;
-}
-
-static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
-{
- int i;
+ if (phy == DPIO_PHY0) {
+ u32 grc_code = dev_priv->bxt_phy_grc;
- dev_priv->num_shared_dpll = 3;
+ grc_code = grc_code << GRC_CODE_FAST_SHIFT |
+ grc_code << GRC_CODE_SLOW_SHIFT |
+ grc_code;
+ mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
+ GRC_CODE_NOM_MASK;
+ ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
+ "BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = bxt_ddi_pll_names[i];
- dev_priv->shared_dplls[i].disable = bxt_ddi_pll_disable;
- dev_priv->shared_dplls[i].enable = bxt_ddi_pll_enable;
- dev_priv->shared_dplls[i].get_hw_state =
- bxt_ddi_pll_get_hw_state;
+ mask = GRC_DIS | GRC_RDY_OVRD;
+ ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
+ "BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
}
+
+ return ok;
+#undef _CHK
}
-void intel_ddi_pll_init(struct drm_device *dev)
+void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val = I915_READ(LCPLL_CTL);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
- skl_shared_dplls_init(dev_priv);
- else if (IS_BROXTON(dev))
- bxt_shared_dplls_init(dev_priv);
- else
- hsw_shared_dplls_init(dev_priv);
-
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
- int cdclk_freq;
-
- cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
- dev_priv->skl_boot_cdclk = cdclk_freq;
- if (skl_sanitize_cdclk(dev_priv))
- DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
- if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
- DRM_ERROR("LCPLL1 is disabled\n");
- } else if (IS_BROXTON(dev)) {
- broxton_init_cdclk(dev);
- broxton_ddi_phy_init(dev);
- } else {
- /*
- * The LCPLL register should be turned on by the BIOS. For now
- * let's just check its state and print errors in case
- * something is wrong. Don't even try to turn it on.
- */
-
- if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
-
- if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
- }
+ if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
+ !broxton_phy_verify_state(dev_priv, DPIO_PHY1))
+ i915_report_error(dev_priv, "DDI PHY state mismatch\n");
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3098,12 +2117,18 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
uint32_t val;
- intel_ddi_post_disable(intel_encoder);
-
+ /*
+ * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
+ * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
+ * step 13 is the correct place for it. Step 18 is where it was
+ * originally before the BUN.
+ */
val = I915_READ(FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_ddi_post_disable(intel_encoder);
+
val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
@@ -3127,6 +2152,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi;
u32 temp, flags = 0;
+ /* XXX: DSI transcoder paranoia */
+ if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+ return;
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -3163,8 +2192,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
- break;
+ /* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
+ pipe_config->lane_count = 4;
+ break;
case TRANS_DDI_MODE_SELECT_FDI:
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
@@ -3184,8 +2215,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_audio = true;
}
- if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
/*
* This is a big fat ugly hack.
*
@@ -3200,8 +2231,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
* load.
*/
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
- dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
intel_ddi_clock_get(encoder, pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e5db9e1f6..3074c56a6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -36,6 +36,7 @@
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -96,12 +97,13 @@ static int intel_framebuffer_init(struct drm_device *dev,
struct drm_i915_gem_object *obj);
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
+static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
struct intel_link_m_n *m_n,
struct intel_link_m_n *m2_n2);
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
-static void intel_set_pipe_csc(struct drm_crtc *crtc);
+static void haswell_set_pipemisc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -110,13 +112,11 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
- int num_connectors);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
-static void intel_pre_disable_primary(struct drm_crtc *crtc);
+static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
typedef struct {
int min, max;
@@ -147,15 +147,12 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv)
return vco_freq[hpll_freq] * 1000;
}
-static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
- const char *name, u32 reg)
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq)
{
u32 val;
int divider;
- if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
-
mutex_lock(&dev_priv->sb_lock);
val = vlv_cck_read(dev_priv, reg);
mutex_unlock(&dev_priv->sb_lock);
@@ -166,52 +163,75 @@ static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
(divider << CCK_FREQUENCY_STATUS_SHIFT),
"%s change in progress\n", name);
- return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
+ return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
-int
-intel_pch_rawclk(struct drm_device *dev)
+static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->hpll_freq == 0)
+ dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
- WARN_ON(!HAS_PCH_SPLIT(dev));
+ return vlv_get_cck_clock(dev_priv, name, reg,
+ dev_priv->hpll_freq);
+}
- return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
+static int
+intel_pch_rawclk(struct drm_i915_private *dev_priv)
+{
+ return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
-/* hrawclock is 1/4 the FSB frequency */
-int intel_hrawclk(struct drm_device *dev)
+static int
+intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t clkcfg;
+ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+ CCK_DISPLAY_REF_CLOCK_CONTROL);
+}
- /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- return 200;
+static int
+intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
+{
+ uint32_t clkcfg;
+ /* hrawclock is 1/4 the FSB frequency */
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
- return 100;
+ return 100000;
case CLKCFG_FSB_533:
- return 133;
+ return 133333;
case CLKCFG_FSB_667:
- return 166;
+ return 166667;
case CLKCFG_FSB_800:
- return 200;
+ return 200000;
case CLKCFG_FSB_1067:
- return 266;
+ return 266667;
case CLKCFG_FSB_1333:
- return 333;
+ return 333333;
/* these two are just a guess; one of them might be right */
case CLKCFG_FSB_1600:
case CLKCFG_FSB_1600_ALT:
- return 400;
+ return 400000;
default:
- return 133;
+ return 133333;
}
}
+static void intel_update_rawclk(struct drm_i915_private *dev_priv)
+{
+ if (HAS_PCH_SPLIT(dev_priv))
+ dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
+ else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
+ dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
+ else
+ return; /* no rawclk on other platforms, or no need to know it */
+
+ DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+}
+
static void intel_update_czclk(struct drm_i915_private *dev_priv)
{
if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
@@ -224,13 +244,15 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
}
static inline u32 /* units of 100MHz */
-intel_fdi_link_freq(struct drm_device *dev)
+intel_fdi_link_freq(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
{
- if (IS_GEN5(dev)) {
- struct drm_i915_private *dev_priv = dev->dev_private;
- return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
- } else
- return 27;
+ if (HAS_DDI(dev_priv))
+ return pipe_config->port_clock; /* SPLL */
+ else if (IS_GEN5(dev_priv))
+ return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
+ else
+ return 270000;
}
static const intel_limit_t intel_limits_i8xx_dac = {
@@ -550,89 +572,6 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
return false;
}
-static const intel_limit_t *
-intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- const intel_limit_t *limit;
-
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_is_dual_link_lvds(dev)) {
- if (refclk == 100000)
- limit = &intel_limits_ironlake_dual_lvds_100m;
- else
- limit = &intel_limits_ironlake_dual_lvds;
- } else {
- if (refclk == 100000)
- limit = &intel_limits_ironlake_single_lvds_100m;
- else
- limit = &intel_limits_ironlake_single_lvds;
- }
- } else
- limit = &intel_limits_ironlake_dac;
-
- return limit;
-}
-
-static const intel_limit_t *
-intel_g4x_limit(struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- const intel_limit_t *limit;
-
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
- if (intel_is_dual_link_lvds(dev))
- limit = &intel_limits_g4x_dual_channel_lvds;
- else
- limit = &intel_limits_g4x_single_channel_lvds;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
- intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- limit = &intel_limits_g4x_hdmi;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
- limit = &intel_limits_g4x_sdvo;
- } else /* The option is for other outputs */
- limit = &intel_limits_i9xx_sdvo;
-
- return limit;
-}
-
-static const intel_limit_t *
-intel_limit(struct intel_crtc_state *crtc_state, int refclk)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- const intel_limit_t *limit;
-
- if (IS_BROXTON(dev))
- limit = &intel_limits_bxt;
- else if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc_state, refclk);
- else if (IS_G4X(dev)) {
- limit = intel_g4x_limit(crtc_state);
- } else if (IS_PINEVIEW(dev)) {
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_pineview_lvds;
- else
- limit = &intel_limits_pineview_sdvo;
- } else if (IS_CHERRYVIEW(dev)) {
- limit = &intel_limits_chv;
- } else if (IS_VALLEYVIEW(dev)) {
- limit = &intel_limits_vlv;
- } else if (!IS_GEN2(dev)) {
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_i9xx_lvds;
- else
- limit = &intel_limits_i9xx_sdvo;
- } else {
- if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_i8xx_lvds;
- else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
- limit = &intel_limits_i8xx_dvo;
- else
- limit = &intel_limits_i8xx_dac;
- }
- return limit;
-}
-
/*
* Platform specific helpers to calculate the port PLL loopback- (clock.m),
* and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -763,6 +702,16 @@ i9xx_select_p2_div(const intel_limit_t *limit,
}
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
static bool
i9xx_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -810,6 +759,16 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
return (err != target);
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
static bool
pnv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -855,6 +814,16 @@ pnv_find_best_dpll(const intel_limit_t *limit,
return (err != target);
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ *
+ * Target and reference clocks are specified in kHz.
+ *
+ * If match_clock is provided, then best_clock P divider must match the P
+ * divider from @match_clock used for LVDS downclocking.
+ */
static bool
g4x_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -943,6 +912,11 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
return *error_ppm + 10 < best_error_ppm;
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
static bool
vlv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -997,6 +971,11 @@ vlv_find_best_dpll(const intel_limit_t *limit,
return found;
}
+/*
+ * Returns a set of divisors for the desired target clock with the given
+ * refclk, or FALSE. The returned values represent the clock equation:
+ * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
+ */
static bool
chv_find_best_dpll(const intel_limit_t *limit,
struct intel_crtc_state *crtc_state,
@@ -1058,9 +1037,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock)
{
- int refclk = i9xx_get_refclk(crtc_state, 0);
+ int refclk = 100000;
+ const intel_limit_t *limit = &intel_limits_bxt;
- return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
+ return chv_find_best_dpll(limit, crtc_state,
target_clock, refclk, NULL, best_clock);
}
@@ -1165,7 +1145,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
}
/* XXX: the dsi pll is shared between MIPI DSI ports */
-static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
{
u32 val;
bool cur_state;
@@ -1179,36 +1159,6 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
"DSI PLL state assertion failure (expected %s, current %s)\n",
onoff(state), onoff(cur_state));
}
-#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-
-struct intel_shared_dpll *
-intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
- if (crtc->config->shared_dpll < 0)
- return NULL;
-
- return &dev_priv->shared_dplls[crtc->config->shared_dpll];
-}
-
-/* For ILK+ */
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- bool state)
-{
- bool cur_state;
- struct intel_dpll_hw_state hw_state;
-
- if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
- return;
-
- cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
- I915_STATE_WARN(cur_state != state,
- "%s assertion failure (expected %s, current %s)\n",
- pll->name, onoff(state), onoff(cur_state));
-}
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -1217,7 +1167,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
- if (HAS_DDI(dev_priv->dev)) {
+ if (HAS_DDI(dev_priv)) {
/* DDI does not have a specific FDI_TX register */
u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1253,11 +1203,11 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (INTEL_INFO(dev_priv->dev)->gen == 5)
+ if (INTEL_INFO(dev_priv)->gen == 5)
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (HAS_DDI(dev_priv->dev))
+ if (HAS_DDI(dev_priv))
return;
val = I915_READ(FDI_TX_CTL(pipe));
@@ -1446,21 +1396,8 @@ static void assert_vblank_disabled(struct drm_crtc *crtc)
drm_crtc_vblank_put(crtc);
}
-static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
-{
- u32 val;
- bool enabled;
-
- I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
-
- val = I915_READ(PCH_DREF_CONTROL);
- enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
- DREF_SUPERSPREAD_SOURCE_MASK));
- I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
-}
-
-static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
{
u32 val;
bool enabled;
@@ -1478,11 +1415,11 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
if ((val & DP_PORT_EN) == 0)
return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
return false;
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
return false;
} else {
@@ -1498,10 +1435,10 @@ static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
if ((val & SDVO_ENABLE) == 0)
return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
return false;
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
return false;
} else {
@@ -1517,7 +1454,7 @@ static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
if ((val & LVDS_PORT_EN) == 0)
return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
return false;
} else {
@@ -1532,7 +1469,7 @@ static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
{
if ((val & ADPA_DAC_ENABLE) == 0)
return false;
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ if (HAS_PCH_CPT(dev_priv)) {
if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
return false;
} else {
@@ -1551,7 +1488,7 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
i915_mmio_reg_offset(reg), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
@@ -1564,7 +1501,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
i915_mmio_reg_offset(reg), pipe_name(pipe));
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
+ I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
@@ -1593,53 +1530,47 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
+static void _vlv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ POSTING_READ(DPLL(pipe));
+ udelay(150);
+
+ if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", pipe);
+}
+
static void vlv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- i915_reg_t reg = DPLL(crtc->pipe);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
- assert_pipe_disabled(dev_priv, crtc->pipe);
+ assert_pipe_disabled(dev_priv, pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev_priv->dev))
- assert_panel_unlocked(dev_priv, crtc->pipe);
-
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
- udelay(150);
-
- if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
- DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+ assert_panel_unlocked(dev_priv, pipe);
- I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(crtc->pipe));
+ if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+ _vlv_enable_pll(crtc, pipe_config);
- /* We do this three times for luck */
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
- udelay(150); /* wait for warmup */
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
- udelay(150); /* wait for warmup */
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
- udelay(150); /* wait for warmup */
+ I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(pipe));
}
-static void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
+
+static void _chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 tmp;
- assert_pipe_disabled(dev_priv, crtc->pipe);
-
mutex_lock(&dev_priv->sb_lock);
/* Enable back the 10bit clock to display controller */
@@ -1660,10 +1591,43 @@ static void chv_enable_pll(struct intel_crtc *crtc,
/* Check PLL is locked */
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
+}
- /* not sure when this should be written */
- I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+static void chv_enable_pll(struct intel_crtc *crtc,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_pipe_disabled(dev_priv, pipe);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_panel_unlocked(dev_priv, pipe);
+
+ if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
+ _chv_enable_pll(crtc, pipe_config);
+
+ if (pipe != PIPE_A) {
+ /*
+ * WaPixelRepeatModeFixForC0:chv
+ *
+ * DPLLCMD is AWOL. Use chicken bits to propagate
+ * the value from DPLLBMD to either pipe B or C.
+ */
+ I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
+ I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
+ I915_WRITE(CBR4_VLV, 0);
+ dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
+
+ /*
+ * DPLLB VGA mode also seems to cause problems.
+ * We should always have it disabled.
+ */
+ WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+ } else {
+ I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(pipe));
+ }
}
static int intel_num_dvo_pipes(struct drm_device *dev)
@@ -1687,9 +1651,6 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
assert_pipe_disabled(dev_priv, crtc->pipe);
- /* No really, not for ILK+ */
- BUG_ON(INTEL_INFO(dev)->gen >= 5);
-
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev) && !IS_I830(dev))
assert_panel_unlocked(dev_priv, crtc->pipe);
@@ -1788,16 +1749,13 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- /*
- * Leave integrated clock source and reference clock enabled for pipe B.
- * The latter is needed for VGA hotplug / manual detection.
- */
- val = DPLL_VGA_MODE_DIS;
- if (pipe == PIPE_B)
- val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
+ val = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (pipe != PIPE_A)
+ val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
-
}
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1808,11 +1766,11 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- /* Set PLL en = 0 */
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
I915_WRITE(DPLL(pipe), val);
POSTING_READ(DPLL(pipe));
@@ -1856,100 +1814,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
}
-static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
- if (WARN_ON(pll == NULL))
- return;
-
- WARN_ON(!pll->config.crtc_mask);
- if (pll->active == 0) {
- DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
- WARN_ON(pll->on);
- assert_shared_dpll_disabled(dev_priv, pll);
-
- pll->mode_set(dev_priv, pll);
- }
-}
-
-/**
- * intel_enable_shared_dpll - enable PCH PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- *
- * The PCH PLL needs to be enabled before the PCH transcoder, since it
- * drives the transcoder clock.
- */
-static void intel_enable_shared_dpll(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
- if (WARN_ON(pll == NULL))
- return;
-
- if (WARN_ON(pll->config.crtc_mask == 0))
- return;
-
- DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
- pll->name, pll->active, pll->on,
- crtc->base.base.id);
-
- if (pll->active++) {
- WARN_ON(!pll->on);
- assert_shared_dpll_enabled(dev_priv, pll);
- return;
- }
- WARN_ON(pll->on);
-
- intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
-
- DRM_DEBUG_KMS("enabling %s\n", pll->name);
- pll->enable(dev_priv, pll);
- pll->on = true;
-}
-
-static void intel_disable_shared_dpll(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
-
- /* PCH only available on ILK+ */
- if (INTEL_INFO(dev)->gen < 5)
- return;
-
- if (pll == NULL)
- return;
-
- if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
- return;
-
- DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
- pll->name, pll->active, pll->on,
- crtc->base.base.id);
-
- if (WARN_ON(pll->active == 0)) {
- assert_shared_dpll_disabled(dev_priv, pll);
- return;
- }
-
- assert_shared_dpll_enabled(dev_priv, pll);
- WARN_ON(!pll->on);
- if (--pll->active)
- return;
-
- DRM_DEBUG_KMS("disabling %s\n", pll->name);
- pll->disable(dev_priv, pll);
- pll->on = false;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-}
-
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -1959,12 +1823,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
i915_reg_t reg;
uint32_t val, pipeconf_val;
- /* PCH only available on ILK+ */
- BUG_ON(!HAS_PCH_SPLIT(dev));
-
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(dev_priv,
- intel_crtc_to_shared_dpll(intel_crtc));
+ assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1983,7 +1843,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
- if (HAS_PCH_IBX(dev_priv->dev)) {
+ if (HAS_PCH_IBX(dev_priv)) {
/*
* Make the BPC in transcoder be consistent with
* that in pipeconf reg. For HDMI we must use 8bpc
@@ -1998,7 +1858,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
- if (HAS_PCH_IBX(dev_priv->dev) &&
+ if (HAS_PCH_IBX(dev_priv) &&
intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
@@ -2016,9 +1876,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
{
u32 val, pipeconf_val;
- /* PCH only available on ILK+ */
- BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
-
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
@@ -2113,7 +1970,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
assert_cursor_disabled(dev_priv, pipe);
assert_sprites_disabled(dev_priv, pipe);
- if (HAS_PCH_LPT(dev_priv->dev))
+ if (HAS_PCH_LPT(dev_priv))
pch_transcoder = TRANSCODER_A;
else
pch_transcoder = pipe;
@@ -2123,7 +1980,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
- if (HAS_GMCH_DISPLAY(dev_priv->dev))
+ if (HAS_GMCH_DISPLAY(dev_priv))
if (crtc->config->has_dsi_encoder)
assert_dsi_pll_enabled(dev_priv);
else
@@ -2225,8 +2082,8 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
return IS_GEN2(dev_priv) ? 2048 : 4096;
}
-static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, unsigned int cpp)
+static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
+ uint64_t fb_modifier, unsigned int cpp)
{
switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
@@ -2269,7 +2126,21 @@ unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
return 1;
else
return intel_tile_size(dev_priv) /
- intel_tile_width(dev_priv, fb_modifier, cpp);
+ intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+}
+
+/* Return the tile dimensions in pixel units */
+static void intel_tile_dims(const struct drm_i915_private *dev_priv,
+ unsigned int *tile_width,
+ unsigned int *tile_height,
+ uint64_t fb_modifier,
+ unsigned int cpp)
+{
+ unsigned int tile_width_bytes =
+ intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+
+ *tile_width = tile_width_bytes / cpp;
+ *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
}
unsigned int
@@ -2282,48 +2153,54 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height,
return ALIGN(height, tile_height);
}
-static void
-intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
- const struct drm_plane_state *plane_state)
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- struct intel_rotation_info *info = &view->params.rotated;
- unsigned int tile_size, tile_width, tile_height, cpp;
-
- *view = i915_ggtt_view_normal;
+ unsigned int size = 0;
+ int i;
- if (!plane_state)
- return;
+ for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
+ size += rot_info->plane[i].width * rot_info->plane[i].height;
- if (!intel_rotation_90_or_270(plane_state->rotation))
- return;
+ return size;
+}
- *view = i915_ggtt_view_rotated;
+static void
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
+ const struct drm_framebuffer *fb,
+ unsigned int rotation)
+{
+ if (intel_rotation_90_or_270(rotation)) {
+ *view = i915_ggtt_view_rotated;
+ view->params.rotated = to_intel_framebuffer(fb)->rot_info;
+ } else {
+ *view = i915_ggtt_view_normal;
+ }
+}
- info->height = fb->height;
- info->pixel_format = fb->pixel_format;
- info->pitch = fb->pitches[0];
- info->uv_offset = fb->offsets[1];
- info->fb_modifier = fb->modifier[0];
+static void
+intel_fill_fb_info(struct drm_i915_private *dev_priv,
+ struct drm_framebuffer *fb)
+{
+ struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
+ unsigned int tile_size, tile_width, tile_height, cpp;
tile_size = intel_tile_size(dev_priv);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
- tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
- tile_height = tile_size / tile_width;
+ intel_tile_dims(dev_priv, &tile_width, &tile_height,
+ fb->modifier[0], cpp);
- info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
- info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
- info->size = info->width_pages * info->height_pages * tile_size;
+ info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
+ info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
if (info->pixel_format == DRM_FORMAT_NV12) {
cpp = drm_format_plane_cpp(fb->pixel_format, 1);
- tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
- tile_height = tile_size / tile_width;
+ intel_tile_dims(dev_priv, &tile_width, &tile_height,
+ fb->modifier[1], cpp);
- info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
- info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
- info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
+ info->uv_offset = fb->offsets[1];
+ info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
+ info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
}
}
@@ -2360,9 +2237,8 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv
}
int
-intel_pin_and_fence_fb_obj(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- const struct drm_plane_state *plane_state)
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ unsigned int rotation)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2375,7 +2251,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
- intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ intel_fill_fb_ggtt_view(&view, fb, rotation);
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
@@ -2433,15 +2309,14 @@ err_pm:
return ret;
}
-static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
- const struct drm_plane_state *plane_state)
+static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct i915_ggtt_view view;
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
- intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ intel_fill_fb_ggtt_view(&view, fb, rotation);
if (view.type == I915_GGTT_VIEW_NORMAL)
i915_gem_object_unpin_fence(obj);
@@ -2449,38 +2324,93 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
i915_gem_object_unpin_from_display_plane(obj, &view);
}
-/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
- * is assumed to be a power-of-two. */
-u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- uint64_t fb_modifier,
- unsigned int cpp,
- unsigned int pitch)
+/*
+ * Adjust the tile offset by moving the difference into
+ * the x/y offsets.
+ *
+ * Input tile dimensions and pitch must already be
+ * rotated to match x and y, and in pixel units.
+ */
+static u32 intel_adjust_tile_offset(int *x, int *y,
+ unsigned int tile_width,
+ unsigned int tile_height,
+ unsigned int tile_size,
+ unsigned int pitch_tiles,
+ u32 old_offset,
+ u32 new_offset)
{
+ unsigned int tiles;
+
+ WARN_ON(old_offset & (tile_size - 1));
+ WARN_ON(new_offset & (tile_size - 1));
+ WARN_ON(new_offset > old_offset);
+
+ tiles = (old_offset - new_offset) / tile_size;
+
+ *y += tiles / pitch_tiles * tile_height;
+ *x += tiles % pitch_tiles * tile_width;
+
+ return new_offset;
+}
+
+/*
+ * Computes the linear offset to the base tile and adjusts
+ * x, y. bytes per pixel is assumed to be a power-of-two.
+ *
+ * In the 90/270 rotated case, x and y are assumed
+ * to be already rotated to match the rotated GTT view, and
+ * pitch is the tile_height aligned framebuffer height.
+ */
+u32 intel_compute_tile_offset(int *x, int *y,
+ const struct drm_framebuffer *fb, int plane,
+ unsigned int pitch,
+ unsigned int rotation)
+{
+ const struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ uint64_t fb_modifier = fb->modifier[plane];
+ unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ u32 offset, offset_aligned, alignment;
+
+ alignment = intel_surf_alignment(dev_priv, fb_modifier);
+ if (alignment)
+ alignment--;
+
if (fb_modifier != DRM_FORMAT_MOD_NONE) {
unsigned int tile_size, tile_width, tile_height;
- unsigned int tile_rows, tiles;
+ unsigned int tile_rows, tiles, pitch_tiles;
tile_size = intel_tile_size(dev_priv);
- tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
- tile_height = tile_size / tile_width;
+ intel_tile_dims(dev_priv, &tile_width, &tile_height,
+ fb_modifier, cpp);
+
+ if (intel_rotation_90_or_270(rotation)) {
+ pitch_tiles = pitch / tile_height;
+ swap(tile_width, tile_height);
+ } else {
+ pitch_tiles = pitch / (tile_width * cpp);
+ }
tile_rows = *y / tile_height;
*y %= tile_height;
- tiles = *x / (tile_width/cpp);
- *x %= tile_width/cpp;
+ tiles = *x / tile_width;
+ *x %= tile_width;
- return tile_rows * pitch * tile_height + tiles * tile_size;
- } else {
- unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
- unsigned int offset;
+ offset = (tile_rows * pitch_tiles + tiles) * tile_size;
+ offset_aligned = offset & ~alignment;
+ intel_adjust_tile_offset(x, y, tile_width, tile_height,
+ tile_size, pitch_tiles,
+ offset, offset_aligned);
+ } else {
offset = *y * pitch + *x * cpp;
+ offset_aligned = offset & ~alignment;
+
*y = (offset & alignment) / pitch;
*x = ((offset & alignment) - *y * pitch) / cpp;
- return offset & ~alignment;
}
+
+ return offset_aligned;
}
static int i9xx_format_to_fourcc(int format)
@@ -2536,6 +2466,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -2551,7 +2482,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
+ if (size_aligned * 2 > ggtt->stolen_usable_size)
return false;
mutex_lock(&dev->struct_mutex);
@@ -2667,7 +2598,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
*/
to_intel_plane_state(plane_state)->visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
- intel_pre_disable_primary(&intel_crtc->base);
+ intel_pre_disable_primary_noatomic(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
return;
@@ -2716,6 +2647,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
u32 linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
+ unsigned int rotation = plane_state->base.rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int x = plane_state->src.x1 >> 16;
int y = plane_state->src.y1 >> 16;
@@ -2780,15 +2712,14 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
}
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
x += (crtc_state->pipe_src_w - 1);
@@ -2846,6 +2777,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
u32 linear_offset;
u32 dspcntr;
i915_reg_t reg = DSPCNTR(plane);
+ unsigned int rotation = plane_state->base.rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
int x = plane_state->src.x1 >> 16;
int y = plane_state->src.y1 >> 16;
@@ -2887,11 +2819,10 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
linear_offset = y * fb->pitches[0] + x * cpp;
intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= intel_crtc->dspaddr_offset;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
dspcntr |= DISPPLANE_ROTATE_180;
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
@@ -2931,7 +2862,7 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
} else {
int cpp = drm_format_plane_cpp(pixel_format, 0);
- return intel_tile_width(dev_priv, fb_modifier, cpp);
+ return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
}
}
@@ -2944,7 +2875,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
u64 offset;
intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
- intel_plane->base.state);
+ intel_plane->base.state->rotation);
vma = i915_gem_obj_to_ggtt_view(obj, &view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
@@ -3284,12 +3215,12 @@ void intel_finish_reset(struct drm_device *dev)
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned reset_counter;
bool pending;
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
+ if (intel_crtc->reset_counter != reset_counter)
return false;
spin_lock_irq(&dev->event_lock);
@@ -3314,9 +3245,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
- if (HAS_DDI(dev))
- intel_set_pipe_csc(&crtc->base);
-
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
@@ -3894,9 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
intel_crtc->unpin_work = NULL;
if (work->event)
- drm_send_vblank_event(intel_crtc->base.dev,
- intel_crtc->pipe,
- work->event);
+ drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
drm_crtc_vblank_put(&intel_crtc->base);
@@ -3955,37 +3881,35 @@ static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
lpt_disable_iclkip(dev_priv);
- /* 20MHz is a corner case which is out of range for the 7-bit divisor */
- if (clock == 20000) {
- auxdiv = 1;
- divsel = 0x41;
- phaseinc = 0x20;
- } else {
- /* The iCLK virtual clock root frequency is in MHz,
- * but the adjusted_mode->crtc_clock in in KHz. To get the
- * divisors, it is necessary to divide one by another, so we
- * convert the virtual clock precision to KHz here for higher
- * precision.
- */
+ /* The iCLK virtual clock root frequency is in MHz,
+ * but the adjusted_mode->crtc_clock in in KHz. To get the
+ * divisors, it is necessary to divide one by another, so we
+ * convert the virtual clock precision to KHz here for higher
+ * precision.
+ */
+ for (auxdiv = 0; auxdiv < 2; auxdiv++) {
u32 iclk_virtual_root_freq = 172800 * 1000;
u32 iclk_pi_range = 64;
- u32 desired_divisor, msb_divisor_value, pi_value;
+ u32 desired_divisor;
- desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
- msb_divisor_value = desired_divisor / iclk_pi_range;
- pi_value = desired_divisor % iclk_pi_range;
+ desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+ clock << auxdiv);
+ divsel = (desired_divisor / iclk_pi_range) - 2;
+ phaseinc = desired_divisor % iclk_pi_range;
- auxdiv = 0;
- divsel = msb_divisor_value - 2;
- phaseinc = pi_value;
+ /*
+ * Near 20MHz is a corner case which is
+ * out of range for the 7-bit divisor
+ */
+ if (divsel <= 0x7f)
+ break;
}
/* This should not happen with any sane values */
@@ -4032,6 +3956,43 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
+int lpt_get_iclkip(struct drm_i915_private *dev_priv)
+{
+ u32 divsel, phaseinc, auxdiv;
+ u32 iclk_virtual_root_freq = 172800 * 1000;
+ u32 iclk_pi_range = 64;
+ u32 desired_divisor;
+ u32 temp;
+
+ if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ return 0;
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
+ if (temp & SBI_SSCCTL_DISABLE) {
+ mutex_unlock(&dev_priv->sb_lock);
+ return 0;
+ }
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
+ divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
+ SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
+ phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
+ SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
+
+ temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
+ auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
+ SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
+
+ return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
+ desired_divisor << auxdiv);
+}
+
static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
enum pipe pch_transcoder)
{
@@ -4142,12 +4103,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
- /*
- * Sometimes spurious CPU pipe underruns happen during FDI
- * training, at least with VGA+HDMI cloning. Suppress them.
- */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
@@ -4159,7 +4114,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp = I915_READ(PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
+ if (intel_crtc->config->shared_dpll ==
+ intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
temp &= ~sel;
@@ -4181,8 +4137,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
const struct drm_display_mode *adjusted_mode =
@@ -4238,113 +4192,6 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- struct intel_shared_dpll *pll;
- struct intel_shared_dpll_config *shared_dpll;
- enum intel_dpll_id i;
- int max = dev_priv->num_shared_dpll;
-
- shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
-
- if (HAS_PCH_IBX(dev_priv->dev)) {
- /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
- i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->shared_dplls[i];
-
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
-
- WARN_ON(shared_dpll[i].crtc_mask);
-
- goto found;
- }
-
- if (IS_BROXTON(dev_priv->dev)) {
- /* PLL is attached to port in bxt */
- struct intel_encoder *encoder;
- struct intel_digital_port *intel_dig_port;
-
- encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
- if (WARN_ON(!encoder))
- return NULL;
-
- intel_dig_port = enc_to_dig_port(&encoder->base);
- /* 1:1 mapping between ports and PLLs */
- i = (enum intel_dpll_id)intel_dig_port->port;
- pll = &dev_priv->shared_dplls[i];
- DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
- crtc->base.base.id, pll->name);
- WARN_ON(shared_dpll[i].crtc_mask);
-
- goto found;
- } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
- /* Do not consider SPLL */
- max = 2;
-
- for (i = 0; i < max; i++) {
- pll = &dev_priv->shared_dplls[i];
-
- /* Only want to check enabled timings first */
- if (shared_dpll[i].crtc_mask == 0)
- continue;
-
- if (memcmp(&crtc_state->dpll_hw_state,
- &shared_dpll[i].hw_state,
- sizeof(crtc_state->dpll_hw_state)) == 0) {
- DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
- crtc->base.base.id, pll->name,
- shared_dpll[i].crtc_mask,
- pll->active);
- goto found;
- }
- }
-
- /* Ok no matching timings, maybe there's a free one? */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- pll = &dev_priv->shared_dplls[i];
- if (shared_dpll[i].crtc_mask == 0) {
- DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
- crtc->base.base.id, pll->name);
- goto found;
- }
- }
-
- return NULL;
-
-found:
- if (shared_dpll[i].crtc_mask == 0)
- shared_dpll[i].hw_state =
- crtc_state->dpll_hw_state;
-
- crtc_state->shared_dpll = i;
- DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
- pipe_name(crtc->pipe));
-
- shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
-
- return pll;
-}
-
-static void intel_shared_dpll_commit(struct drm_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_shared_dpll_config *shared_dpll;
- struct intel_shared_dpll *pll;
- enum intel_dpll_id i;
-
- if (!to_intel_atomic_state(state)->dpll_set)
- return;
-
- shared_dpll = to_intel_atomic_state(state)->shared_dpll;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- pll = &dev_priv->shared_dplls[i];
- pll->config = shared_dpll[i];
- }
-}
-
static void cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4576,8 +4423,11 @@ void hsw_enable_ips(struct intel_crtc *crtc)
if (!crtc->config->ips_enabled)
return;
- /* We can only enable IPS after we enable a plane and wait for a vblank */
- intel_wait_for_vblank(dev, crtc->pipe);
+ /*
+ * We can only enable IPS after we enable a plane and wait for a vblank
+ * This function is called from post_plane_update, which is run after
+ * a vblank wait.
+ */
assert_plane_enabled(dev_priv, crtc->plane);
if (IS_BROADWELL(dev)) {
@@ -4626,55 +4476,6 @@ void hsw_disable_ips(struct intel_crtc *crtc)
intel_wait_for_vblank(dev, crtc->pipe);
}
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-static void intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
- int i;
- bool reenable_ips = false;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->state->active)
- return;
-
- if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
- if (intel_crtc->config->has_dsi_encoder)
- assert_dsi_pll_enabled(dev_priv);
- else
- assert_pll_enabled(dev_priv, pipe);
- }
-
- /* Workaround : Do not read or write the pipe palette/gamma data while
- * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
- */
- if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
- ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
- GAMMA_MODE_MODE_SPLIT)) {
- hsw_disable_ips(intel_crtc);
- reenable_ips = true;
- }
-
- for (i = 0; i < 256; i++) {
- i915_reg_t palreg;
-
- if (HAS_GMCH_DISPLAY(dev))
- palreg = PALETTE(pipe, i);
- else
- palreg = LGC_PALETTE(pipe, i);
-
- I915_WRITE(palreg,
- (intel_crtc->lut_r[i] << 16) |
- (intel_crtc->lut_g[i] << 8) |
- intel_crtc->lut_b[i]);
- }
-
- if (reenable_ips)
- hsw_enable_ips(intel_crtc);
-}
-
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
if (intel_crtc->overlay) {
@@ -4734,16 +4535,7 @@ intel_post_enable_primary(struct drm_crtc *crtc)
intel_check_pch_fifo_underruns(dev_priv);
}
-/**
- * intel_pre_disable_primary - Perform operations before disabling primary plane
- * @crtc: the CRTC whose primary plane is to be disabled
- *
- * Performs potentially sleeping operations that must be done before the
- * primary plane is disabled, such as updating FBC and IPS. Note that this may
- * be called due to an explicit primary plane update, or due to an implicit
- * disable that is caused when a sprite plane completely hides the primary
- * plane.
- */
+/* FIXME move all this to pre_plane_update() with proper state tracking */
static void
intel_pre_disable_primary(struct drm_crtc *crtc)
{
@@ -4762,6 +4554,26 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
+ hsw_disable_ips(intel_crtc);
+}
+
+/* FIXME get rid of this and use pre_plane_update */
+static void
+intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+
+ intel_pre_disable_primary(crtc);
+
+ /*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
* moment. So to make sure the plane gets truly disabled, disable
@@ -4775,37 +4587,39 @@ intel_pre_disable_primary(struct drm_crtc *crtc)
dev_priv->wm.vlv.cxsr = false;
intel_wait_for_vblank(dev, pipe);
}
-
- /*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_disable_ips(intel_crtc);
}
-static void intel_post_plane_update(struct intel_crtc *crtc)
+static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
{
- struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_device *dev = crtc->base.dev;
+ struct drm_plane *primary = crtc->base.primary;
+ struct drm_plane_state *old_pri_state =
+ drm_atomic_get_existing_plane_state(old_state, primary);
- intel_frontbuffer_flip(dev, atomic->fb_bits);
+ intel_frontbuffer_flip(dev, pipe_config->fb_bits);
crtc->wm.cxsr_allowed = true;
if (pipe_config->update_wm_post && pipe_config->base.active)
intel_update_watermarks(&crtc->base);
- if (atomic->update_fbc)
- intel_fbc_post_update(crtc);
+ if (old_pri_state) {
+ struct intel_plane_state *primary_state =
+ to_intel_plane_state(primary->state);
+ struct intel_plane_state *old_primary_state =
+ to_intel_plane_state(old_pri_state);
- if (atomic->post_enable_primary)
- intel_post_enable_primary(&crtc->base);
+ intel_fbc_post_update(crtc);
- memset(atomic, 0, sizeof(*atomic));
+ if (primary_state->visible &&
+ (needs_modeset(&pipe_config->base) ||
+ !old_primary_state->visible))
+ intel_post_enable_primary(&crtc->base);
+ }
}
static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
@@ -4813,7 +4627,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4822,15 +4635,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
drm_atomic_get_existing_plane_state(old_state, primary);
bool modeset = needs_modeset(&pipe_config->base);
- if (atomic->update_fbc)
- intel_fbc_pre_update(crtc);
-
if (old_pri_state) {
struct intel_plane_state *primary_state =
to_intel_plane_state(primary->state);
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
+ intel_fbc_pre_update(crtc);
+
if (old_primary_state->visible &&
(modeset || !primary_state->visible))
intel_pre_disable_primary(&crtc->base);
@@ -4839,11 +4651,58 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
if (pipe_config->disable_cxsr) {
crtc->wm.cxsr_allowed = false;
- if (old_crtc_state->base.active)
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (old_crtc_state->base.active) {
intel_set_memory_cxsr(dev_priv, false);
+ dev_priv->wm.vlv.cxsr = false;
+ intel_wait_for_vblank(dev, crtc->pipe);
+ }
+ }
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ *
+ * WaCxSRDisabledForSpriteScaling:ivb
+ */
+ if (pipe_config->disable_lp_wm) {
+ ilk_disable_lp_wm(dev);
+ intel_wait_for_vblank(dev, crtc->pipe);
}
- if (!needs_modeset(&pipe_config->base) && pipe_config->update_wm_pre)
+ /*
+ * If we're doing a modeset, we're done. No need to do any pre-vblank
+ * watermark programming here.
+ */
+ if (needs_modeset(&pipe_config->base))
+ return;
+
+ /*
+ * For platforms that support atomic watermarks, program the
+ * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
+ * will be the intermediate values that are safe for both pre- and
+ * post- vblank; when vblank happens, the 'active' values will be set
+ * to the final 'target' values and we'll do this again to get the
+ * optimal watermarks. For gen9+ platforms, the values we program here
+ * will be the final target values which will get automatically latched
+ * at vblank time; no further programming will be necessary.
+ *
+ * If a platform hasn't been transitioned to atomic watermarks yet,
+ * we'll continue to update watermarks the old way, if flags tell
+ * us to.
+ */
+ if (dev_priv->display.initial_watermarks != NULL)
+ dev_priv->display.initial_watermarks(pipe_config);
+ else if (pipe_config->update_wm_pre)
intel_update_watermarks(&crtc->base);
}
@@ -4874,10 +4733,24 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
if (WARN_ON(intel_crtc->active))
return;
+ /*
+ * Sometimes spurious CPU pipe underruns happen during FDI
+ * training, at least with VGA+HDMI cloning. Suppress them.
+ *
+ * On ILK we get an occasional spurious CPU pipe underruns
+ * between eDP port A enable and vdd enable. Also PCH port
+ * enable seems to result in the occasional CPU pipe underrun.
+ *
+ * Spurious PCH underruns also occur during PCH enabling.
+ */
+ if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -4888,6 +4761,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_src_size(intel_crtc);
if (intel_crtc->config->has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
@@ -4898,8 +4772,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
@@ -4920,9 +4792,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_crtc_load_lut(crtc);
+ intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(crtc);
+ if (dev_priv->display.initial_watermarks != NULL)
+ dev_priv->display.initial_watermarks(intel_crtc->config);
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
@@ -4940,6 +4813,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
intel_wait_for_vblank(dev, pipe);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -4956,6 +4830,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
+ enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
@@ -4966,16 +4841,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
false);
- if (intel_crtc_to_shared_dpll(intel_crtc))
+ if (intel_crtc->config->shared_dpll)
intel_enable_shared_dpll(intel_crtc);
if (intel_crtc->config->has_dp_encoder)
intel_dp_set_m_n(intel_crtc, M1_N1);
- intel_set_pipe_timings(intel_crtc);
+ if (!intel_crtc->config->has_dsi_encoder)
+ intel_set_pipe_timings(intel_crtc);
+
+ intel_set_pipe_src_size(intel_crtc);
- if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
- I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
+ if (cpu_transcoder != TRANSCODER_EDP &&
+ !transcoder_is_dsi(cpu_transcoder)) {
+ I915_WRITE(PIPE_MULT(cpu_transcoder),
intel_crtc->config->pixel_multiplier - 1);
}
@@ -4984,9 +4863,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
&intel_crtc->config->fdi_m_n, NULL);
}
- haswell_set_pipeconf(crtc);
+ if (!intel_crtc->config->has_dsi_encoder)
+ haswell_set_pipeconf(crtc);
+
+ haswell_set_pipemisc(crtc);
- intel_set_pipe_csc(crtc);
+ intel_color_set_csc(&pipe_config->base);
intel_crtc->active = true;
@@ -5015,14 +4897,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_crtc_load_lut(crtc);
+ intel_color_load_luts(&pipe_config->base);
intel_ddi_set_pipe_settings(crtc);
if (!intel_crtc->config->has_dsi_encoder)
intel_ddi_enable_transcoder_func(crtc);
- intel_update_watermarks(crtc);
- intel_enable_pipe(intel_crtc);
+ if (dev_priv->display.initial_watermarks != NULL)
+ dev_priv->display.initial_watermarks(pipe_config);
+ else
+ intel_update_watermarks(crtc);
+
+ /* XXX: Do the pipe assertions at the right place for BXT DSI. */
+ if (!intel_crtc->config->has_dsi_encoder)
+ intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
lpt_pch_enable(crtc);
@@ -5078,8 +4966,15 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- if (intel_crtc->config->has_pch_encoder)
+ /*
+ * Sometimes spurious CPU pipe underruns happen when the
+ * pipe is already disabled, but FDI RX/TX is still enabled.
+ * Happens at least with VGA+HDMI cloning. Suppress them.
+ */
+ if (intel_crtc->config->has_pch_encoder) {
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
+ }
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -5087,22 +4982,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- /*
- * Sometimes spurious CPU pipe underruns happen when the
- * pipe is already disabled, but FDI RX/TX is still enabled.
- * Happens at least with VGA+HDMI cloning. Suppress them.
- */
- if (intel_crtc->config->has_pch_encoder)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
intel_disable_pipe(intel_crtc);
ironlake_pfit_disable(intel_crtc, false);
- if (intel_crtc->config->has_pch_encoder) {
+ if (intel_crtc->config->has_pch_encoder)
ironlake_fdi_disable(crtc);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- }
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
@@ -5132,6 +5017,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_fdi_pll_disable(intel_crtc);
}
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@@ -5155,7 +5041,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
- intel_disable_pipe(intel_crtc);
+ /* XXX: Do the pipe assertions at the right place for BXT DSI. */
+ if (!intel_crtc->config->has_dsi_encoder)
+ intel_disable_pipe(intel_crtc);
if (intel_crtc->config->dp_encoder_is_mst)
intel_ddi_set_vc_payload_alloc(crtc, false);
@@ -5330,6 +5218,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
mask |= BIT(intel_display_port_power_domain(intel_encoder));
}
+ if (crtc_state->shared_dpll)
+ mask |= BIT(POWER_DOMAIN_PLLS);
+
return mask;
}
@@ -5393,6 +5284,8 @@ static void intel_update_max_cdclk(struct drm_device *dev)
dev_priv->max_cdclk_freq = 450000;
else
dev_priv->max_cdclk_freq = 337500;
+ } else if (IS_BROXTON(dev)) {
+ dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev)) {
/*
* FIXME with extra cooling we can allow
@@ -5452,9 +5345,8 @@ static void intel_update_cdclk(struct drm_device *dev)
intel_update_max_cdclk(dev);
}
-static void broxton_set_cdclk(struct drm_device *dev, int frequency)
+static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t divider;
uint32_t ratio;
uint32_t current_freq;
@@ -5568,33 +5460,46 @@ static void broxton_set_cdclk(struct drm_device *dev, int frequency)
return;
}
- intel_update_cdclk(dev);
+ intel_update_cdclk(dev_priv->dev);
}
-void broxton_init_cdclk(struct drm_device *dev)
+static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t val;
+ if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
+ return false;
- /*
- * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
- * or else the reset will hang because there is no PCH to respond.
- * Move the handshake programming to initialization sequence.
- * Previously was left up to BIOS.
- */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val &= ~RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+ /* TODO: Check for a valid CDCLK rate */
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
+ DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
+
+ return false;
+ }
+
+ if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
+ DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
- /* Enable PG1 for cdclk */
- intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+ return false;
+ }
+
+ return true;
+}
+bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
+{
+ return broxton_cdclk_is_enabled(dev_priv);
+}
+
+void broxton_init_cdclk(struct drm_i915_private *dev_priv)
+{
/* check if cd clock is enabled */
- if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
- DRM_DEBUG_KMS("Display already initialized\n");
+ if (broxton_cdclk_is_enabled(dev_priv)) {
+ DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
return;
}
+ DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
+
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
@@ -5602,7 +5507,7 @@ void broxton_init_cdclk(struct drm_device *dev)
* - check if setting the max (or any) cdclk freq is really necessary
* here, it belongs to modeset time
*/
- broxton_set_cdclk(dev, 624000);
+ broxton_set_cdclk(dev_priv, 624000);
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
@@ -5613,10 +5518,8 @@ void broxton_init_cdclk(struct drm_device *dev)
DRM_ERROR("DBuf power enable timeout!\n");
}
-void broxton_uninit_cdclk(struct drm_device *dev)
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
@@ -5626,9 +5529,7 @@ void broxton_uninit_cdclk(struct drm_device *dev)
DRM_ERROR("DBuf power disable timeout!\n");
/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
- broxton_set_cdclk(dev, 19200);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ broxton_set_cdclk(dev_priv, 19200);
}
static const struct skl_cdclk_entry {
@@ -6165,6 +6066,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
int pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
@@ -6174,6 +6077,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_src_size(intel_crtc);
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6192,14 +6096,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- if (!intel_crtc->config->has_dsi_encoder) {
- if (IS_CHERRYVIEW(dev)) {
- chv_prepare_pll(intel_crtc, intel_crtc->config);
- chv_enable_pll(intel_crtc, intel_crtc->config);
- } else {
- vlv_prepare_pll(intel_crtc, intel_crtc->config);
- vlv_enable_pll(intel_crtc, intel_crtc->config);
- }
+ if (IS_CHERRYVIEW(dev)) {
+ chv_prepare_pll(intel_crtc, intel_crtc->config);
+ chv_enable_pll(intel_crtc, intel_crtc->config);
+ } else {
+ vlv_prepare_pll(intel_crtc, intel_crtc->config);
+ vlv_enable_pll(intel_crtc, intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -6208,7 +6110,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
i9xx_pfit_enable(intel_crtc);
- intel_crtc_load_lut(crtc);
+ intel_color_load_luts(&pipe_config->base);
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
@@ -6235,7 +6137,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
- int pipe = intel_crtc->pipe;
+ struct intel_crtc_state *pipe_config =
+ to_intel_crtc_state(crtc->state);
+ enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@@ -6246,6 +6150,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_dp_set_m_n(intel_crtc, M1_N1);
intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_src_size(intel_crtc);
i9xx_set_pipeconf(intel_crtc);
@@ -6262,7 +6167,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
i9xx_pfit_enable(intel_crtc);
- intel_crtc_load_lut(crtc);
+ intel_color_load_luts(&pipe_config->base);
intel_update_watermarks(crtc);
intel_enable_pipe(intel_crtc);
@@ -6300,10 +6205,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
- * We also need to wait on all gmch platforms because of the
- * self-refresh mode constraint explained above.
*/
- intel_wait_for_vblank(dev, pipe);
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -6338,6 +6242,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
{
+ struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
@@ -6349,14 +6254,27 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
if (to_intel_plane_state(crtc->primary->state)->visible) {
WARN_ON(intel_crtc->unpin_work);
- intel_pre_disable_primary(crtc);
+ intel_pre_disable_primary_noatomic(crtc);
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
to_intel_plane_state(crtc->primary->state)->visible = false;
}
dev_priv->display.crtc_disable(crtc);
+
+ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.id);
+
+ WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
+ crtc->state->active = false;
intel_crtc->active = false;
+ crtc->enabled = false;
+ crtc->state->connector_mask = 0;
+ crtc->state->encoder_mask = 0;
+
+ for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
+ encoder->base.crtc = NULL;
+
intel_fbc_disable(intel_crtc);
intel_update_watermarks(crtc);
intel_disable_shared_dpll(intel_crtc);
@@ -6399,7 +6317,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
-static void intel_connector_check_state(struct intel_connector *connector)
+static void intel_connector_verify_state(struct intel_connector *connector)
{
struct drm_crtc *crtc = connector->base.state->crtc;
@@ -6569,7 +6487,7 @@ retry:
* Hence the bw of each lane in terms of the mode signal
* is:
*/
- link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
+ link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
fdi_dotclock = adjusted_mode->crtc_clock;
@@ -6581,8 +6499,7 @@ retry:
intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
link_bw, &pipe_config->fdi_m_n);
- ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
- intel_crtc->pipe, pipe_config);
+ ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6606,7 +6523,7 @@ static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
return false;
/* HSW can handle pixel rate up to cdclk? */
- if (IS_HASWELL(dev_priv->dev))
+ if (IS_HASWELL(dev_priv))
return true;
/*
@@ -7134,30 +7051,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
- int num_connectors)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk;
-
- WARN_ON(!crtc_state->base.state);
-
- if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
- refclk = 100000;
- } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
- } else if (!IS_GEN2(dev)) {
- refclk = 96000;
- } else {
- refclk = 48000;
- }
-
- return refclk;
-}
-
static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
{
return (1 << dpll->n) << 16 | dpll->m2;
@@ -7301,24 +7194,34 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
static void vlv_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- u32 dpll, dpll_md;
+ pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- /*
- * Enable DPIO clock input. We should never disable the reference
- * clock for pipe B, since VGA hotplug / manual detection depends
- * on it.
- */
- dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
- DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
- /* We should never disable this, set it here for state tracking */
- if (crtc->pipe == PIPE_B)
- dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
- dpll |= DPLL_VCO_ENABLE;
- pipe_config->dpll_hw_state.dpll = dpll;
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!pipe_config->has_dsi_encoder)
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ DPLL_EXT_BUFFER_ENABLE_VLV;
- dpll_md = (pipe_config->pixel_multiplier - 1)
- << DPLL_MD_UDI_MULTIPLIER_SHIFT;
- pipe_config->dpll_hw_state.dpll_md = dpll_md;
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+}
+
+static void chv_compute_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
+ if (crtc->pipe != PIPE_A)
+ pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ /* DPLL not used with DSI, but still need the rest set up */
+ if (!pipe_config->has_dsi_encoder)
+ pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+
+ pipe_config->dpll_hw_state.dpll_md =
+ (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
static void vlv_prepare_pll(struct intel_crtc *crtc,
@@ -7326,11 +7229,20 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
u32 coreclk, reg_val;
+ /* Enable Refclk */
+ I915_WRITE(DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll &
+ ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+ /* No need to actually set up the DPLL with DSI */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
mutex_lock(&dev_priv->sb_lock);
bestn = pipe_config->dpll.n;
@@ -7412,32 +7324,26 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
mutex_unlock(&dev_priv->sb_lock);
}
-static void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
- DPLL_VCO_ENABLE;
- if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-}
-
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe = crtc->pipe;
- i915_reg_t dpll_reg = DPLL(crtc->pipe);
+ enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
u32 dpio_val;
int vco;
+ /* Enable Refclk and SSC */
+ I915_WRITE(DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+ /* No need to actually set up the DPLL with DSI */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
bestm1 = pipe_config->dpll.m1;
@@ -7448,12 +7354,6 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
dpio_val = 0;
loopfilter = 0;
- /*
- * Enable Refclk and SSC
- */
- I915_WRITE(dpll_reg,
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
mutex_lock(&dev_priv->sb_lock);
/* p1 and p2 divider */
@@ -7587,8 +7487,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
static void i9xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock,
- int num_connectors)
+ intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7647,7 +7546,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
if (crtc_state->sdvo_tv_clock)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -7664,8 +7563,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
static void i8xx_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- intel_clock_t *reduced_clock,
- int num_connectors)
+ intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7691,7 +7589,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_DVO_2X_MODE;
if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
@@ -7760,6 +7658,14 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
(pipe == PIPE_B || pipe == PIPE_C))
I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+}
+
+static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum pipe pipe = intel_crtc->pipe;
+
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
@@ -7801,6 +7707,14 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->base.adjusted_mode.crtc_vtotal += 1;
pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
}
+}
+
+static void intel_get_pipe_src_size(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
tmp = I915_READ(PIPESRC(crtc->pipe));
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
@@ -7898,69 +7812,192 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
POSTING_READ(PIPECONF(intel_crtc->pipe));
}
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk, num_connectors = 0;
- intel_clock_t clock;
- bool ok;
const intel_limit_t *limit;
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
+ int refclk = 48000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (crtc_state->has_dsi_encoder)
- return 0;
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ }
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc == &crtc->base)
- num_connectors++;
+ limit = &intel_limits_i8xx_lvds;
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
+ limit = &intel_limits_i8xx_dvo;
+ } else {
+ limit = &intel_limits_i8xx_dac;
}
- if (!crtc_state->clock_set) {
- refclk = i9xx_get_refclk(crtc_state, num_connectors);
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
- /*
- * Returns a set of divisors for the desired target clock with
- * the given refclk, or FALSE. The returned values represent
- * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
- * 2) / p1 / p2.
- */
- limit = intel_limit(crtc_state, refclk);
- ok = dev_priv->display.find_dpll(limit, crtc_state,
- crtc_state->port_clock,
- refclk, NULL, &clock);
- if (!ok) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
- return -EINVAL;
+ i8xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ }
+
+ if (intel_is_dual_link_lvds(dev))
+ limit = &intel_limits_g4x_dual_channel_lvds;
+ else
+ limit = &intel_limits_g4x_single_channel_lvds;
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
+ limit = &intel_limits_g4x_hdmi;
+ } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
+ limit = &intel_limits_g4x_sdvo;
+ } else {
+ /* The option is for other outputs */
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
- /* Compat-code for transition, will disappear. */
- crtc_state->dpll.n = clock.n;
- crtc_state->dpll.m1 = clock.m1;
- crtc_state->dpll.m2 = clock.m2;
- crtc_state->dpll.p1 = clock.p1;
- crtc_state->dpll.p2 = clock.p2;
+ limit = &intel_limits_pineview_lvds;
+ } else {
+ limit = &intel_limits_pineview_sdvo;
}
- if (IS_GEN2(dev)) {
- i8xx_compute_dpll(crtc, crtc_state, NULL,
- num_connectors);
- } else if (IS_CHERRYVIEW(dev)) {
- chv_compute_dpll(crtc, crtc_state);
- } else if (IS_VALLEYVIEW(dev)) {
- vlv_compute_dpll(crtc, crtc_state);
+ if (!crtc_state->clock_set &&
+ !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const intel_limit_t *limit;
+ int refclk = 96000;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ }
+
+ limit = &intel_limits_i9xx_lvds;
} else {
- i9xx_compute_dpll(crtc, crtc_state, NULL,
- num_connectors);
+ limit = &intel_limits_i9xx_sdvo;
+ }
+
+ if (!crtc_state->clock_set &&
+ !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
}
+ i9xx_compute_dpll(crtc, crtc_state, NULL);
+
+ return 0;
+}
+
+static int chv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const intel_limit_t *limit = &intel_limits_chv;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ chv_compute_dpll(crtc, crtc_state);
+
+ return 0;
+}
+
+static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ int refclk = 100000;
+ const intel_limit_t *limit = &intel_limits_vlv;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (!crtc_state->clock_set &&
+ !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
+ DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ return -EINVAL;
+ }
+
+ vlv_compute_dpll(crtc, crtc_state);
+
return 0;
}
@@ -8001,8 +8038,8 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
u32 mdiv;
int refclk = 100000;
- /* In case of MIPI DPLL will not even be used */
- if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
mutex_lock(&dev_priv->sb_lock);
@@ -8098,6 +8135,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
mutex_lock(&dev_priv->sb_lock);
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
@@ -8131,7 +8172,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ pipe_config->shared_dpll = NULL;
ret = false;
@@ -8163,11 +8204,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
intel_get_pipe_timings(crtc, pipe_config);
+ intel_get_pipe_src_size(crtc, pipe_config);
i9xx_get_pfit_config(crtc, pipe_config);
if (INTEL_INFO(dev)->gen >= 4) {
- tmp = I915_READ(DPLL_MD(crtc->pipe));
+ /* No way to read it out on pipes B and C */
+ if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
+ tmp = dev_priv->chv_dpll_md[crtc->pipe];
+ else
+ tmp = I915_READ(DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
@@ -8401,16 +8447,16 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
DRM_ERROR("FDI mPHY reset assert timeout\n");
tmp = I915_READ(SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
DRM_ERROR("FDI mPHY reset de-assert timeout\n");
}
@@ -8656,42 +8702,6 @@ void intel_init_pch_refclk(struct drm_device *dev)
lpt_init_pch_refclk(dev);
}
-static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
-{
- struct drm_device *dev = crtc_state->base.crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_atomic_state *state = crtc_state->base.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- struct intel_encoder *encoder;
- int num_connectors = 0, i;
- bool is_lvds = false;
-
- for_each_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc_state->base.crtc)
- continue;
-
- encoder = to_intel_encoder(connector_state->best_encoder);
-
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- default:
- break;
- }
- num_connectors++;
- }
-
- if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
- return dev_priv->vbt.lvds_ssc_freq;
- }
-
- return 120000;
-}
-
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -8734,82 +8744,14 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
POSTING_READ(PIPECONF(pipe));
}
-/*
- * Set up the pipe CSC unit.
- *
- * Currently only full range RGB to limited range RGB conversion
- * is supported, but eventually this should handle various
- * RGB<->YCbCr scenarios as well.
- */
-static void intel_set_pipe_csc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- uint16_t coeff = 0x7800; /* 1.0 */
-
- /*
- * TODO: Check what kind of values actually come out of the pipe
- * with these coeff/postoff values and adjust to get the best
- * accuracy. Perhaps we even need to take the bpc value into
- * consideration.
- */
-
- if (intel_crtc->config->limited_color_range)
- coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
-
- /*
- * GY/GU and RY/RU should be the other way around according
- * to BSpec, but reality doesn't agree. Just set them up in
- * a way that results in the correct picture.
- */
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
-
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
-
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
-
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
-
- if (INTEL_INFO(dev)->gen > 6) {
- uint16_t postoff = 0;
-
- if (intel_crtc->config->limited_color_range)
- postoff = (16 * (1 << 12) / 255) & 0x1fff;
-
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
-
- I915_WRITE(PIPE_CSC_MODE(pipe), 0);
- } else {
- uint32_t mode = CSC_MODE_YUV_TO_RGB;
-
- if (intel_crtc->config->limited_color_range)
- mode |= CSC_BLACK_SCREEN_OFFSET;
-
- I915_WRITE(PIPE_CSC_MODE(pipe), mode);
- }
-}
-
static void haswell_set_pipeconf(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- uint32_t val;
-
- val = 0;
+ u32 val = 0;
- if (IS_HASWELL(dev) && intel_crtc->config->dither)
+ if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
@@ -8819,12 +8761,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
+}
- I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
- POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
+static void haswell_set_pipemisc(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
- val = 0;
+ if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
+ u32 val = 0;
switch (intel_crtc->config->pipe_bpp) {
case 18:
@@ -8847,39 +8792,10 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
if (intel_crtc->config->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
- I915_WRITE(PIPEMISC(pipe), val);
+ I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
}
}
-static bool ironlake_compute_clocks(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- intel_clock_t *clock,
- bool *has_reduced_clock,
- intel_clock_t *reduced_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int refclk;
- const intel_limit_t *limit;
- bool ret;
-
- refclk = ironlake_get_refclk(crtc_state);
-
- /*
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
- limit = intel_limit(crtc_state, refclk);
- ret = dev_priv->display.find_dpll(limit, crtc_state,
- crtc_state->port_clock,
- refclk, NULL, clock);
- if (!ret)
- return false;
-
- return true;
-}
-
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
{
/*
@@ -8896,10 +8812,9 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
-static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- u32 *fp,
- intel_clock_t *reduced_clock, u32 *fp2)
+static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state,
+ intel_clock_t *reduced_clock)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
@@ -8908,8 +8823,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
- uint32_t dpll;
- int factor, num_connectors = 0, i;
+ u32 dpll, fp, fp2;
+ int factor, i;
bool is_lvds = false, is_sdvo = false;
for_each_connector_in_state(state, connector, connector_state, i) {
@@ -8929,8 +8844,6 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
default:
break;
}
-
- num_connectors++;
}
/* Enable autotuning of the PLL clock (if permissible) */
@@ -8943,11 +8856,19 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
} else if (crtc_state->sdvo_tv_clock)
factor = 20;
+ fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
+
if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
- *fp |= FP_CB_TUNE;
+ fp |= FP_CB_TUNE;
+
+ if (reduced_clock) {
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
- if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
- *fp2 |= FP_CB_TUNE;
+ if (reduced_clock->m < factor * reduced_clock->n)
+ fp2 |= FP_CB_TUNE;
+ } else {
+ fp2 = fp;
+ }
dpll = 0;
@@ -8984,76 +8905,80 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
break;
}
- if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
+ if (is_lvds && intel_panel_use_ssc(dev_priv))
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
- return dpll | DPLL_VCO_ENABLE;
+ dpll |= DPLL_VCO_ENABLE;
+
+ crtc_state->dpll_hw_state.dpll = dpll;
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
}
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->base.dev;
- intel_clock_t clock, reduced_clock;
- u32 dpll = 0, fp = 0, fp2 = 0;
- bool ok, has_reduced_clock = false;
- bool is_lvds = false;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_clock_t reduced_clock;
+ bool has_reduced_clock = false;
struct intel_shared_dpll *pll;
+ const intel_limit_t *limit;
+ int refclk = 120000;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
+ crtc->lowfreq_avail = false;
+
+ /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
+ if (!crtc_state->has_pch_encoder)
+ return 0;
- WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
- "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
+ if (intel_panel_use_ssc(dev_priv)) {
+ DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
+ refclk = dev_priv->vbt.lvds_ssc_freq;
+ }
- ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
- &has_reduced_clock, &reduced_clock);
- if (!ok && !crtc_state->clock_set) {
+ if (intel_is_dual_link_lvds(dev)) {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100000)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else {
+ limit = &intel_limits_ironlake_dac;
+ }
+
+ if (!crtc_state->clock_set &&
+ !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
+ refclk, NULL, &crtc_state->dpll)) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
- /* Compat-code for transition, will disappear. */
- if (!crtc_state->clock_set) {
- crtc_state->dpll.n = clock.n;
- crtc_state->dpll.m1 = clock.m1;
- crtc_state->dpll.m2 = clock.m2;
- crtc_state->dpll.p1 = clock.p1;
- crtc_state->dpll.p2 = clock.p2;
- }
- /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
- if (crtc_state->has_pch_encoder) {
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (has_reduced_clock)
- fp2 = i9xx_dpll_compute_fp(&reduced_clock);
-
- dpll = ironlake_compute_dpll(crtc, crtc_state,
- &fp, &reduced_clock,
- has_reduced_clock ? &fp2 : NULL);
-
- crtc_state->dpll_hw_state.dpll = dpll;
- crtc_state->dpll_hw_state.fp0 = fp;
- if (has_reduced_clock)
- crtc_state->dpll_hw_state.fp1 = fp2;
- else
- crtc_state->dpll_hw_state.fp1 = fp;
+ ironlake_compute_dpll(crtc, crtc_state,
+ has_reduced_clock ? &reduced_clock : NULL);
- pll = intel_get_shared_dpll(crtc, crtc_state);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
+ pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
}
- if (is_lvds && has_reduced_clock)
+ if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
+ has_reduced_clock)
crtc->lowfreq_avail = true;
- else
- crtc->lowfreq_avail = false;
return 0;
}
@@ -9355,7 +9280,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ pipe_config->shared_dpll = NULL;
ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -9384,6 +9309,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
+ enum intel_dpll_id pll_id;
pipe_config->has_pch_encoder = true;
@@ -9393,21 +9319,22 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_fdi_m_n_config(crtc, pipe_config);
- if (HAS_PCH_IBX(dev_priv->dev)) {
- pipe_config->shared_dpll =
- (enum intel_dpll_id) crtc->pipe;
+ if (HAS_PCH_IBX(dev_priv)) {
+ pll_id = (enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
- pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
+ pll_id = DPLL_ID_PCH_PLL_B;
else
- pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
+ pll_id= DPLL_ID_PCH_PLL_A;
}
- pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+ pipe_config->shared_dpll =
+ intel_get_shared_dpll_by_id(dev_priv, pll_id);
+ pll = pipe_config->shared_dpll;
- WARN_ON(!pll->get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
@@ -9420,6 +9347,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
}
intel_get_pipe_timings(crtc, pipe_config);
+ intel_get_pipe_src_size(crtc, pipe_config);
ironlake_get_pfit_config(crtc, pipe_config);
@@ -9512,8 +9440,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
val = I915_READ(LCPLL_CTL);
@@ -9586,8 +9514,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
}
@@ -9659,7 +9587,7 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
to_intel_atomic_state(old_state);
unsigned int req_cdclk = old_intel_state->dev_cdclk;
- broxton_set_cdclk(dev, req_cdclk);
+ broxton_set_cdclk(to_i915(dev), req_cdclk);
}
/* compute the max rate for new configuration */
@@ -9727,8 +9655,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
val |= LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
DRM_ERROR("Switching to FCLK failed\n");
val = I915_READ(LCPLL_CTL);
@@ -9762,8 +9690,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
val &= ~LCPLL_CD_SOURCE_FCLK;
I915_WRITE(LCPLL_CTL, val);
- if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
DRM_ERROR("Switching back to LCPLL failed\n");
mutex_lock(&dev_priv->rps.hw_lock);
@@ -9842,72 +9770,193 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
+ enum intel_dpll_id id;
+
switch (port) {
case PORT_A:
pipe_config->ddi_pll_sel = SKL_DPLL0;
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+ id = DPLL_ID_SKL_DPLL0;
break;
case PORT_B:
pipe_config->ddi_pll_sel = SKL_DPLL1;
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+ id = DPLL_ID_SKL_DPLL1;
break;
case PORT_C:
pipe_config->ddi_pll_sel = SKL_DPLL2;
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+ id = DPLL_ID_SKL_DPLL2;
break;
default:
DRM_ERROR("Incorrect port type\n");
+ return;
}
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
- u32 temp, dpll_ctl1;
+ enum intel_dpll_id id;
+ u32 temp;
temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
switch (pipe_config->ddi_pll_sel) {
case SKL_DPLL0:
- /*
- * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
- * of the shared DPLL framework and thus needs to be read out
- * separately
- */
- dpll_ctl1 = I915_READ(DPLL_CTRL1);
- pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
+ id = DPLL_ID_SKL_DPLL0;
break;
case SKL_DPLL1:
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
+ id = DPLL_ID_SKL_DPLL1;
break;
case SKL_DPLL2:
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
+ id = DPLL_ID_SKL_DPLL2;
break;
case SKL_DPLL3:
- pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
+ id = DPLL_ID_SKL_DPLL3;
break;
+ default:
+ MISSING_CASE(pipe_config->ddi_pll_sel);
+ return;
}
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
}
static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
+ enum intel_dpll_id id;
+
pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
switch (pipe_config->ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
- pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+ id = DPLL_ID_WRPLL1;
break;
case PORT_CLK_SEL_WRPLL2:
- pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+ id = DPLL_ID_WRPLL2;
break;
case PORT_CLK_SEL_SPLL:
- pipe_config->shared_dpll = DPLL_ID_SPLL;
+ id = DPLL_ID_SPLL;
+ break;
+ case PORT_CLK_SEL_LCPLL_810:
+ id = DPLL_ID_LCPLL_810;
break;
+ case PORT_CLK_SEL_LCPLL_1350:
+ id = DPLL_ID_LCPLL_1350;
+ break;
+ case PORT_CLK_SEL_LCPLL_2700:
+ id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ MISSING_CASE(pipe_config->ddi_pll_sel);
+ /* fall through */
+ case PORT_CLK_SEL_NONE:
+ return;
}
+
+ pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+}
+
+static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ unsigned long *power_domain_mask)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ u32 tmp;
+
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+
+ /*
+ * XXX: Do intel_display_power_get_if_enabled before reading this (for
+ * consistency and less surprising code; it's in always on power).
+ */
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+ if (tmp & TRANS_DDI_FUNC_ENABLE) {
+ enum pipe trans_edp_pipe;
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ WARN(1, "unknown pipe linked to edp transcoder\n");
+ case TRANS_DDI_EDP_INPUT_A_ONOFF:
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ trans_edp_pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ trans_edp_pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ trans_edp_pipe = PIPE_C;
+ break;
+ }
+
+ if (trans_edp_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ }
+
+ power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return false;
+ *power_domain_mask |= BIT(power_domain);
+
+ tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+
+ return tmp & PIPECONF_ENABLE;
+}
+
+static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ unsigned long *power_domain_mask)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
+ enum port port;
+ enum transcoder cpu_transcoder;
+ u32 tmp;
+
+ pipe_config->has_dsi_encoder = false;
+
+ for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
+ if (port == PORT_A)
+ cpu_transcoder = TRANSCODER_DSI_A;
+ else
+ cpu_transcoder = TRANSCODER_DSI_C;
+
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ continue;
+ *power_domain_mask |= BIT(power_domain);
+
+ /*
+ * The PLL needs to be enabled with a valid divider
+ * configuration, otherwise accessing DSI registers will hang
+ * the machine. See BSpec North Display Engine
+ * registers/MIPI[BXT]. We can break out here early, since we
+ * need the same DSI PLL to be enabled for both DSI ports.
+ */
+ if (!intel_dsi_pll_is_enabled(dev_priv))
+ break;
+
+ /* XXX: this works for video mode only */
+ tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
+ if (!(tmp & DPI_ENABLE))
+ continue;
+
+ tmp = I915_READ(MIPI_CTRL(port));
+ if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
+ continue;
+
+ pipe_config->cpu_transcoder = cpu_transcoder;
+ pipe_config->has_dsi_encoder = true;
+ break;
+ }
+
+ return pipe_config->has_dsi_encoder;
}
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
@@ -9930,11 +9979,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
- if (pipe_config->shared_dpll >= 0) {
- pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
-
- WARN_ON(!pll->get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ pll = pipe_config->shared_dpll;
+ if (pll) {
+ WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
}
/*
@@ -9961,53 +10009,37 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
unsigned long power_domain_mask;
- uint32_t tmp;
- bool ret;
+ bool active;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
power_domain_mask = BIT(power_domain);
- ret = false;
+ pipe_config->shared_dpll = NULL;
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
- pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if (tmp & TRANS_DDI_FUNC_ENABLE) {
- enum pipe trans_edp_pipe;
- switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
- default:
- WARN(1, "unknown pipe linked to edp transcoder\n");
- case TRANS_DDI_EDP_INPUT_A_ONOFF:
- case TRANS_DDI_EDP_INPUT_A_ON:
- trans_edp_pipe = PIPE_A;
- break;
- case TRANS_DDI_EDP_INPUT_B_ONOFF:
- trans_edp_pipe = PIPE_B;
- break;
- case TRANS_DDI_EDP_INPUT_C_ONOFF:
- trans_edp_pipe = PIPE_C;
- break;
- }
-
- if (trans_edp_pipe == crtc->pipe)
- pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ if (IS_BROXTON(dev_priv)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config,
+ &power_domain_mask);
+ WARN_ON(active && pipe_config->has_dsi_encoder);
+ if (pipe_config->has_dsi_encoder)
+ active = true;
}
- power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!active)
goto out;
- power_domain_mask |= BIT(power_domain);
- tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
- if (!(tmp & PIPECONF_ENABLE))
- goto out;
+ if (!pipe_config->has_dsi_encoder) {
+ haswell_get_ddi_port_state(crtc, pipe_config);
+ intel_get_pipe_timings(crtc, pipe_config);
+ }
- haswell_get_ddi_port_state(crtc, pipe_config);
+ intel_get_pipe_src_size(crtc, pipe_config);
- intel_get_pipe_timings(crtc, pipe_config);
+ pipe_config->gamma_mode =
+ I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
if (INTEL_INFO(dev)->gen >= 9) {
skl_init_scalers(dev, crtc, pipe_config);
@@ -10031,20 +10063,19 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
(I915_READ(IPS_CTL) & IPS_ENABLE);
- if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
+ if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
+ !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
- ret = true;
-
out:
for_each_power_domain(power_domain, power_domain_mask)
intel_display_power_put(dev_priv, power_domain);
- return ret;
+ return active;
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
@@ -10237,21 +10268,6 @@ static bool cursor_size_ok(struct drm_device *dev,
return true;
}
-static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t start, uint32_t size)
-{
- int end = (start + size > 256) ? 256 : start + size, i;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- for (i = start; i < end; i++) {
- intel_crtc->lut_r[i] = red[i] >> 8;
- intel_crtc->lut_g[i] = green[i] >> 8;
- intel_crtc->lut_b[i] = blue[i] >> 8;
- }
-
- intel_crtc_load_lut(crtc);
-}
-
/* VESA 640x480x72Hz mode to set on the pipe */
static struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@ -10739,19 +10755,18 @@ int intel_dotclock_calculate(int link_freq,
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/* read out port_clock from the DPLL */
i9xx_crtc_clock_get(crtc, pipe_config);
/*
- * This value does not include pixel_multiplier.
- * We will check that port_clock and adjusted_mode.crtc_clock
- * agree once we know their relationship in the encoder's
- * get_config() function.
+ * In case there is an active pipe without active ports,
+ * we may need some idea for the dotclock anyway.
+ * Calculate one based on the FDI configuration.
*/
pipe_config->base.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+ intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
&pipe_config->fdi_m_n);
}
@@ -10870,7 +10885,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
struct drm_plane *primary = crtc->base.primary;
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb, primary->state);
+ intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
drm_gem_object_unreference(&work->pending_flip_obj->base);
if (work->flip_queued_req)
@@ -10944,9 +10959,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reset_counter;
- if (i915_reset_in_progress(&dev_priv->gpu_error) ||
- crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
+ reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+ if (crtc->reset_counter != reset_counter)
return true;
/*
@@ -11024,7 +11040,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
@@ -11040,13 +11056,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, 0); /* aux display base address, unused */
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@@ -11059,7 +11075,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
int ret;
@@ -11072,13 +11088,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, MI_NOOP);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@@ -11091,7 +11107,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
@@ -11105,10 +11121,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
+ intel_ring_emit(engine, fb->pitches[0]);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11117,7 +11133,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@@ -11130,7 +11146,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
@@ -11140,10 +11156,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
if (ret)
return ret;
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
+ intel_ring_emit(engine, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -11153,7 +11169,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ intel_ring_emit(engine, pf | pipesrc);
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
@@ -11166,7 +11182,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t plane_bit = 0;
int len, ret;
@@ -11187,7 +11203,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
}
len = 4;
- if (ring->id == RCS) {
+ if (engine->id == RCS) {
len += 6;
/*
* On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11225,36 +11241,36 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* for the RCS also doesn't appear to drop events. Setting the DERRMR
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
- if (ring->id == RCS) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
+ if (engine->id == RCS) {
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit_reg(engine, DERRMR);
+ intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE));
if (IS_GEN8(dev))
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
+ intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT);
else
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
+ intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
+ intel_ring_emit_reg(engine, DERRMR);
+ intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
if (IS_GEN8(dev)) {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
}
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
- intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
- intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
+ intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
+ intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
+ intel_ring_emit(engine, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc->unpin_work);
return 0;
}
-static bool use_mmio_flip(struct intel_engine_cs *ring,
+static bool use_mmio_flip(struct intel_engine_cs *engine,
struct drm_i915_gem_object *obj)
{
/*
@@ -11265,10 +11281,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
* So using MMIO flips there would disrupt this mechanism.
*/
- if (ring == NULL)
+ if (engine == NULL)
return true;
- if (INTEL_INFO(ring->dev)->gen < 5)
+ if (INTEL_INFO(engine->dev)->gen < 5)
return false;
if (i915.use_mmio_flip < 0)
@@ -11282,7 +11298,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
false))
return true;
else
- return ring != i915_gem_request_get_ring(obj->last_write_req);
+ return engine != i915_gem_request_get_engine(obj->last_write_req);
}
static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11400,7 +11416,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
if (mmio_flip->req) {
WARN_ON(__i915_wait_request(mmio_flip->req,
- mmio_flip->crtc->reset_counter,
false, NULL,
&mmio_flip->i915->rps.mmioflips));
i915_gem_request_unreference__unlocked(mmio_flip->req);
@@ -11528,7 +11543,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_plane *primary = crtc->primary;
enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool mmio_flip;
struct drm_i915_gem_request *request = NULL;
int ret;
@@ -11608,28 +11623,33 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup;
+ intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+ if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
+ ret = -EIO;
+ goto cleanup;
+ }
+
atomic_inc(&intel_crtc->unpin_work_count);
- intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- ring = &dev_priv->ring[BCS];
+ engine = &dev_priv->engine[BCS];
if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
- ring = NULL;
+ engine = NULL;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- ring = &dev_priv->ring[BCS];
+ engine = &dev_priv->engine[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
- ring = i915_gem_request_get_ring(obj->last_write_req);
- if (ring == NULL || ring->id != RCS)
- ring = &dev_priv->ring[BCS];
+ engine = i915_gem_request_get_engine(obj->last_write_req);
+ if (engine == NULL || engine->id != RCS)
+ engine = &dev_priv->engine[BCS];
} else {
- ring = &dev_priv->ring[RCS];
+ engine = &dev_priv->engine[RCS];
}
- mmio_flip = use_mmio_flip(ring, obj);
+ mmio_flip = use_mmio_flip(engine, obj);
/* When using CS flips, we want to emit semaphores between rings.
* However, when using mmio flips we will create a task to do the
@@ -11637,13 +11657,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* into the display plane and skip any waits.
*/
if (!mmio_flip) {
- ret = i915_gem_object_sync(obj, ring, &request);
+ ret = i915_gem_object_sync(obj, engine, &request);
if (ret)
goto cleanup_pending;
}
- ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
- crtc->primary->state);
+ ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
if (ret)
goto cleanup_pending;
@@ -11660,7 +11679,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj->last_write_req);
} else {
if (!request) {
- request = i915_gem_request_alloc(ring, NULL);
+ request = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
@@ -11693,10 +11712,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_unpin:
- intel_unpin_fb_obj(fb, crtc->primary->state);
+ intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
cleanup_pending:
if (!IS_ERR_OR_NULL(request))
- i915_gem_request_cancel(request);
+ i915_add_request_no_flush(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
cleanup:
@@ -11746,7 +11765,7 @@ retry:
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
- drm_send_vblank_event(dev, pipe, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&dev->event_lock);
}
}
@@ -11806,6 +11825,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_plane *plane = plane_state->plane;
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->state);
int idx = intel_crtc->base.base.id, ret;
@@ -11872,32 +11892,25 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
pipe_config->update_wm_post = true;
}
- if (visible || was_visible)
- intel_crtc->atomic.fb_bits |=
- to_intel_plane(plane)->frontbuffer_bit;
+ /* Pre-gen9 platforms need two-step watermark updates */
+ if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
+ INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
+ to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- intel_crtc->atomic.post_enable_primary = turn_on;
- intel_crtc->atomic.update_fbc = true;
+ if (visible || was_visible)
+ pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
- break;
- case DRM_PLANE_TYPE_CURSOR:
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- /*
- * WaCxSRDisabledForSpriteScaling:ivb
- *
- * cstate->update_wm was already set above, so this flag will
- * take effect when we commit and program watermarks.
- */
- if (IS_IVYBRIDGE(dev) &&
- needs_scaling(to_intel_plane_state(plane_state)) &&
- !needs_scaling(old_plane_state))
- pipe_config->disable_lp_wm = true;
+ /*
+ * WaCxSRDisabledForSpriteScaling:ivb
+ *
+ * cstate->update_wm was already set above, so this flag will
+ * take effect when we commit and program watermarks.
+ */
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
+ needs_scaling(to_intel_plane_state(plane_state)) &&
+ !needs_scaling(old_plane_state))
+ pipe_config->disable_lp_wm = true;
- break;
- }
return 0;
}
@@ -11973,18 +11986,54 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
if (mode_changed && crtc_state->enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
+ !WARN_ON(pipe_config->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(intel_crtc,
pipe_config);
if (ret)
return ret;
}
+ if (crtc_state->color_mgmt_changed) {
+ ret = intel_color_check(crtc, crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Changing color management on Intel hardware is
+ * handled as part of planes update.
+ */
+ crtc_state->planes_changed = true;
+ }
+
ret = 0;
if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
- if (ret)
+ ret = dev_priv->display.compute_pipe_wm(pipe_config);
+ if (ret) {
+ DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
return ret;
+ }
+ }
+
+ if (dev_priv->display.compute_intermediate_wm &&
+ !to_intel_atomic_state(state)->skip_intermediate_wm) {
+ if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+ return 0;
+
+ /*
+ * Calculate 'intermediate' watermarks that satisfy both the
+ * old state and the new state. We can program these
+ * immediately.
+ */
+ ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
+ intel_crtc,
+ pipe_config);
+ if (ret) {
+ DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
+ return ret;
+ }
+ } else if (dev_priv->display.compute_intermediate_wm) {
+ if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
+ pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
}
if (INTEL_INFO(dev)->gen >= 9) {
@@ -12001,7 +12050,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
- .load_lut = intel_crtc_load_lut,
.atomic_begin = intel_begin_crtc_commit,
.atomic_flush = intel_finish_crtc_commit,
.atomic_check = intel_crtc_atomic_check,
@@ -12012,11 +12060,16 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
struct intel_connector *connector;
for_each_intel_connector(dev, connector) {
+ if (connector->base.state->crtc)
+ drm_connector_unreference(&connector->base);
+
if (connector->base.encoder) {
connector->base.state->best_encoder =
connector->base.encoder;
connector->base.state->crtc =
connector->base.encoder->crtc;
+
+ drm_connector_reference(&connector->base);
} else {
connector->base.state->best_encoder = NULL;
connector->base.state->crtc = NULL;
@@ -12118,7 +12171,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
context, pipe_config, pipe_name(crtc->pipe));
- DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
+ DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
pipe_config->pipe_bpp, pipe_config->dither);
DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
@@ -12194,7 +12247,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->dpll_hw_state.cfgcr1,
pipe_config->dpll_hw_state.cfgcr2);
} else if (HAS_DDI(dev)) {
- DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
pipe_config->ddi_pll_sel,
pipe_config->dpll_hw_state.wrpll,
pipe_config->dpll_hw_state.spll);
@@ -12297,7 +12350,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
struct drm_crtc_state tmp_state;
struct intel_crtc_scaler_state scaler_state;
struct intel_dpll_hw_state dpll_hw_state;
- enum intel_dpll_id shared_dpll;
+ struct intel_shared_dpll *shared_dpll;
uint32_t ddi_pll_sel;
bool force_thru;
@@ -12567,6 +12620,15 @@ intel_pipe_config_compare(struct drm_device *dev,
ret = false; \
}
+#define PIPE_CONF_CHECK_P(name) \
+ if (current_config->name != pipe_config->name) { \
+ INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ "(expected %p, found %p)\n", \
+ current_config->name, \
+ pipe_config->name); \
+ ret = false; \
+ }
+
#define PIPE_CONF_CHECK_M_N(name) \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
@@ -12587,6 +12649,11 @@ intel_pipe_config_compare(struct drm_device *dev,
ret = false; \
}
+/* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name, adjust) && \
@@ -12614,22 +12681,6 @@ intel_pipe_config_compare(struct drm_device *dev,
ret = false; \
}
-/* This is required for BDW+ where there is only one set of registers for
- * switching between high and low RR.
- * This macro can be used whenever a comparison has to be made between one
- * hw state and multiple sw state variables.
- */
-#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
- if ((current_config->name != pipe_config->name) && \
- (current_config->alt_name != pipe_config->name)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
- "(expected %i or %i, found %i)\n", \
- current_config->name, \
- current_config->alt_name, \
- pipe_config->name); \
- ret = false; \
- }
-
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
@@ -12710,7 +12761,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
if (!adjust) {
@@ -12734,7 +12785,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(ddi_pll_sel);
- PIPE_CONF_CHECK_I(shared_dpll);
+ PIPE_CONF_CHECK_P(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
@@ -12745,6 +12796,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
+ PIPE_CONF_CHECK_X(dsi_pll.ctrl);
+ PIPE_CONF_CHECK_X(dsi_pll.div);
+
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -12753,7 +12807,7 @@ intel_pipe_config_compare(struct drm_device *dev,
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
-#undef PIPE_CONF_CHECK_I_ALT
+#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
@@ -12762,48 +12816,61 @@ intel_pipe_config_compare(struct drm_device *dev,
return ret;
}
-static void check_wm_state(struct drm_device *dev)
+static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *pipe_config)
{
+ if (pipe_config->has_pch_encoder) {
+ int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
+ &pipe_config->fdi_m_n);
+ int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
+
+ /*
+ * FDI already provided one idea for the dotclock.
+ * Yell if the encoder disagrees.
+ */
+ WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
+ }
+}
+
+static void verify_wm_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_state)
+{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct intel_crtc *intel_crtc;
+ struct skl_ddb_entry *hw_entry, *sw_entry;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ const enum pipe pipe = intel_crtc->pipe;
int plane;
- if (INTEL_INFO(dev)->gen < 9)
+ if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
return;
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
- for_each_intel_crtc(dev, intel_crtc) {
- struct skl_ddb_entry *hw_entry, *sw_entry;
- const enum pipe pipe = intel_crtc->pipe;
+ /* planes */
+ for_each_plane(dev_priv, pipe, plane) {
+ hw_entry = &hw_ddb.plane[pipe][plane];
+ sw_entry = &sw_ddb->plane[pipe][plane];
- if (!intel_crtc->active)
+ if (skl_ddb_entry_equal(hw_entry, sw_entry))
continue;
- /* planes */
- for_each_plane(dev_priv, pipe, plane) {
- hw_entry = &hw_ddb.plane[pipe][plane];
- sw_entry = &sw_ddb->plane[pipe][plane];
-
- if (skl_ddb_entry_equal(hw_entry, sw_entry))
- continue;
-
- DRM_ERROR("mismatch in DDB state pipe %c plane %d "
- "(expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_entry->start, sw_entry->end,
- hw_entry->start, hw_entry->end);
- }
+ DRM_ERROR("mismatch in DDB state pipe %c plane %d "
+ "(expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_entry->start, sw_entry->end,
+ hw_entry->start, hw_entry->end);
+ }
- /* cursor */
- hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
-
- if (skl_ddb_entry_equal(hw_entry, sw_entry))
- continue;
+ /* cursor */
+ hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
+ sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor "
"(expected (%u,%u), found (%u,%u))\n",
pipe_name(pipe),
@@ -12813,20 +12880,18 @@ static void check_wm_state(struct drm_device *dev)
}
static void
-check_connector_state(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
{
- struct drm_connector_state *old_conn_state;
struct drm_connector *connector;
- int i;
- for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ drm_for_each_connector(connector, dev) {
struct drm_encoder *encoder = connector->encoder;
struct drm_connector_state *state = connector->state;
- /* This also checks the encoder/connector hw state with the
- * ->get_hw_state callbacks. */
- intel_connector_check_state(to_intel_connector(connector));
+ if (state->crtc != crtc)
+ continue;
+
+ intel_connector_verify_state(to_intel_connector(connector));
I915_STATE_WARN(state->best_encoder != encoder,
"connector's atomic encoder doesn't match legacy encoder\n");
@@ -12834,7 +12899,7 @@ check_connector_state(struct drm_device *dev,
}
static void
-check_encoder_state(struct drm_device *dev)
+verify_encoder_state(struct drm_device *dev)
{
struct intel_encoder *encoder;
struct intel_connector *connector;
@@ -12874,149 +12939,186 @@ check_encoder_state(struct drm_device *dev)
}
static void
-check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
+verify_crtc_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
- struct drm_crtc_state *old_crtc_state;
- struct drm_crtc *crtc;
- int i;
-
- for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config, *sw_config;
- bool active;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc_state *pipe_config, *sw_config;
+ struct drm_atomic_state *old_state;
+ bool active;
- if (!needs_modeset(crtc->state) &&
- !to_intel_crtc_state(crtc->state)->update_pipe)
- continue;
+ old_state = old_crtc_state->state;
+ __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
+ pipe_config = to_intel_crtc_state(old_crtc_state);
+ memset(pipe_config, 0, sizeof(*pipe_config));
+ pipe_config->base.crtc = crtc;
+ pipe_config->base.state = old_state;
- __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
- pipe_config = to_intel_crtc_state(old_crtc_state);
- memset(pipe_config, 0, sizeof(*pipe_config));
- pipe_config->base.crtc = crtc;
- pipe_config->base.state = old_state;
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
- DRM_DEBUG_KMS("[CRTC:%d]\n",
- crtc->base.id);
+ active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
- active = dev_priv->display.get_pipe_config(intel_crtc,
- pipe_config);
+ /* hw state is inconsistent with the pipe quirk */
+ if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
+ (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
+ active = new_crtc_state->active;
- /* hw state is inconsistent with the pipe quirk */
- if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
- (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
- active = crtc->state->active;
+ I915_STATE_WARN(new_crtc_state->active != active,
+ "crtc active state doesn't match with hw state "
+ "(expected %i, found %i)\n", new_crtc_state->active, active);
- I915_STATE_WARN(crtc->state->active != active,
- "crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", crtc->state->active, active);
+ I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
+ "transitional active state does not match atomic hw state "
+ "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
- I915_STATE_WARN(intel_crtc->active != crtc->state->active,
- "transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
+ for_each_encoder_on_crtc(dev, crtc, encoder) {
+ enum pipe pipe;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
- enum pipe pipe;
+ active = encoder->get_hw_state(encoder, &pipe);
+ I915_STATE_WARN(active != new_crtc_state->active,
+ "[ENCODER:%i] active %i with crtc active %i\n",
+ encoder->base.base.id, active, new_crtc_state->active);
- active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != crtc->state->active,
- "[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active, crtc->state->active);
+ I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+ "Encoder connected to wrong pipe %c\n",
+ pipe_name(pipe));
- I915_STATE_WARN(active && intel_crtc->pipe != pipe,
- "Encoder connected to wrong pipe %c\n",
- pipe_name(pipe));
+ if (active)
+ encoder->get_config(encoder, pipe_config);
+ }
- if (active)
- encoder->get_config(encoder, pipe_config);
- }
+ if (!new_crtc_state->active)
+ return;
- if (!crtc->state->active)
- continue;
+ intel_pipe_config_sanity_check(dev_priv, pipe_config);
- sw_config = to_intel_crtc_state(crtc->state);
- if (!intel_pipe_config_compare(dev, sw_config,
- pipe_config, false)) {
- I915_STATE_WARN(1, "pipe state doesn't match!\n");
- intel_dump_pipe_config(intel_crtc, pipe_config,
- "[hw state]");
- intel_dump_pipe_config(intel_crtc, sw_config,
- "[sw state]");
- }
+ sw_config = to_intel_crtc_state(crtc->state);
+ if (!intel_pipe_config_compare(dev, sw_config,
+ pipe_config, false)) {
+ I915_STATE_WARN(1, "pipe state doesn't match!\n");
+ intel_dump_pipe_config(intel_crtc, pipe_config,
+ "[hw state]");
+ intel_dump_pipe_config(intel_crtc, sw_config,
+ "[sw state]");
}
}
static void
-check_shared_dpll_state(struct drm_device *dev)
+verify_single_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_state)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *crtc;
struct intel_dpll_hw_state dpll_hw_state;
- int i;
+ unsigned crtc_mask;
+ bool active;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- int enabled_crtcs = 0, active_crtcs = 0;
- bool active;
+ memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
+ DRM_DEBUG_KMS("%s\n", pll->name);
- DRM_DEBUG_KMS("%s\n", pll->name);
+ active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
- active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
-
- I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
- "more active pll users than references: %i vs %i\n",
- pll->active, hweight32(pll->config.crtc_mask));
- I915_STATE_WARN(pll->active && !pll->on,
+ if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
+ I915_STATE_WARN(!pll->on && pll->active_mask,
"pll in active use but not on in sw tracking\n");
- I915_STATE_WARN(pll->on && !pll->active,
- "pll in on but not on in use in sw tracking\n");
+ I915_STATE_WARN(pll->on && !pll->active_mask,
+ "pll is on but not used by any active crtc\n");
I915_STATE_WARN(pll->on != active,
"pll on state mismatch (expected %i, found %i)\n",
pll->on, active);
+ }
- for_each_intel_crtc(dev, crtc) {
- if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
- enabled_crtcs++;
- if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
- active_crtcs++;
- }
- I915_STATE_WARN(pll->active != active_crtcs,
- "pll active crtcs mismatch (expected %i, found %i)\n",
- pll->active, active_crtcs);
- I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
- "pll enabled crtcs mismatch (expected %i, found %i)\n",
- hweight32(pll->config.crtc_mask), enabled_crtcs);
+ if (!crtc) {
+ I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
+ "more active pll users than references: %x vs %x\n",
+ pll->active_mask, pll->config.crtc_mask);
- I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
- sizeof(dpll_hw_state)),
- "pll hw state mismatch\n");
+ return;
}
+
+ crtc_mask = 1 << drm_crtc_index(crtc);
+
+ if (new_state->active)
+ I915_STATE_WARN(!(pll->active_mask & crtc_mask),
+ "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
+ pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+ else
+ I915_STATE_WARN(pll->active_mask & crtc_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
+ pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+
+ I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
+ "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
+ crtc_mask, pll->config.crtc_mask);
+
+ I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
+ &dpll_hw_state,
+ sizeof(dpll_hw_state)),
+ "pll hw state mismatch\n");
}
static void
-intel_modeset_check_state(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
{
- check_wm_state(dev);
- check_connector_state(dev, old_state);
- check_encoder_state(dev);
- check_crtc_state(dev, old_state);
- check_shared_dpll_state(dev);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
+ struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
+
+ if (new_state->shared_dpll)
+ verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+
+ if (old_state->shared_dpll &&
+ old_state->shared_dpll != new_state->shared_dpll) {
+ unsigned crtc_mask = 1 << drm_crtc_index(crtc);
+ struct intel_shared_dpll *pll = old_state->shared_dpll;
+
+ I915_STATE_WARN(pll->active_mask & crtc_mask,
+ "pll active mismatch (didn't expect pipe %c in active mask)\n",
+ pipe_name(drm_crtc_index(crtc)));
+ I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
+ "pll enabled crtcs mismatch (found %x in enabled mask)\n",
+ pipe_name(drm_crtc_index(crtc)));
+ }
}
-void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
- int dotclock)
+static void
+intel_modeset_verify_crtc(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state,
+ struct drm_crtc_state *new_state)
{
- /*
- * FDI already provided one idea for the dotclock.
- * Yell if the encoder disagrees.
- */
- WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
- "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- pipe_config->base.adjusted_mode.crtc_clock, dotclock);
+ if (!needs_modeset(new_state) &&
+ !to_intel_crtc_state(new_state)->update_pipe)
+ return;
+
+ verify_wm_state(crtc, new_state);
+ verify_connector_state(crtc->dev, crtc);
+ verify_crtc_state(crtc, old_state, new_state);
+ verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
+}
+
+static void
+verify_disabled_dpll_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++)
+ verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+}
+
+static void
+intel_modeset_verify_disabled(struct drm_device *dev)
+{
+ verify_encoder_state(dev);
+ verify_connector_state(dev, NULL);
+ verify_disabled_dpll_state(dev);
}
static void update_scanline_offset(struct intel_crtc *crtc)
@@ -13071,20 +13173,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll;
+ struct intel_shared_dpll *old_dpll =
+ to_intel_crtc_state(crtc->state)->shared_dpll;
if (!needs_modeset(crtc_state))
continue;
- to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE;
+ to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
- if (old_dpll == DPLL_ID_PRIVATE)
+ if (!old_dpll)
continue;
if (!shared_dpll)
shared_dpll = intel_atomic_get_shared_dpll_state(state);
- shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
+ intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
}
}
@@ -13296,9 +13399,6 @@ static int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
- memset(&to_intel_crtc(crtc)->atomic, 0,
- sizeof(struct intel_crtc_atomic_commit));
-
/* Catch I915_MODE_FLAG_INHERITED */
if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
crtc_state->mode_changed = true;
@@ -13364,7 +13464,7 @@ static int intel_atomic_check(struct drm_device *dev,
static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async)
+ bool nonblock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_plane_state *plane_state;
@@ -13373,8 +13473,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_crtc *crtc;
int i, ret;
- if (async) {
- DRM_DEBUG_KMS("i915 does not yet support async commit\n");
+ if (nonblock) {
+ DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
return -EINVAL;
}
@@ -13395,12 +13495,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
- if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
- u32 reset_counter;
-
- reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ if (!ret && !nonblock) {
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
@@ -13409,25 +13506,18 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
continue;
ret = __i915_wait_request(intel_plane_state->wait_req,
- reset_counter, true,
- NULL, NULL);
-
- /* Swallow -EIO errors to allow updates during hw lockup. */
- if (ret == -EIO)
- ret = 0;
-
- if (ret)
+ true, NULL, NULL);
+ if (ret) {
+ /* Any hang should be swallowed by the wait */
+ WARN_ON(ret == -EIO);
+ mutex_lock(&dev->struct_mutex);
+ drm_atomic_helper_cleanup_planes(dev, state);
+ mutex_unlock(&dev->struct_mutex);
break;
+ }
}
-
- if (!ret)
- return 0;
-
- mutex_lock(&dev->struct_mutex);
- drm_atomic_helper_cleanup_planes(dev, state);
}
- mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -13469,7 +13559,7 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
- WARN_ON(!lret);
+ WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
drm_crtc_vblank_put(crtc);
}
@@ -13500,39 +13590,41 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
* intel_atomic_commit - commit validated state object
* @dev: DRM device
* @state: the top-level driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
*
* This function commits a top-level state object that has been validated
* with drm_atomic_helper_check().
*
* FIXME: Atomic modeset support for i915 is not yet complete. At the moment
* we can only handle plane-related operations and do not yet support
- * asynchronous commit.
+ * nonblocking commit.
*
* RETURNS
* Zero for success or -errno.
*/
static int intel_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async)
+ bool nonblock)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *old_crtc_state;
struct drm_crtc *crtc;
+ struct intel_crtc_state *intel_cstate;
int ret = 0, i;
bool hw_check = intel_state->modeset;
unsigned long put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
- ret = intel_atomic_prepare_commit(dev, state, async);
+ ret = intel_atomic_prepare_commit(dev, state, nonblock);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
return ret;
}
drm_atomic_helper_swap_state(dev, state);
- dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
+ dev_priv->wm.config = intel_state->wm_config;
+ intel_shared_dpll_commit(state);
if (intel_state->modeset) {
memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
@@ -13543,7 +13635,7 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
}
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
if (needs_modeset(crtc->state) ||
@@ -13558,10 +13650,10 @@ static int intel_atomic_commit(struct drm_device *dev,
if (!needs_modeset(crtc->state))
continue;
- intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
- if (crtc_state->active) {
- intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
+ if (old_crtc_state->active) {
+ intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
@@ -13584,17 +13676,17 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_modeset_update_crtc_state(state);
if (intel_state->modeset) {
- intel_shared_dpll_commit(state);
-
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
if (dev_priv->display.modeset_commit_cdclk &&
intel_state->dev_cdclk != dev_priv->cdclk_freq)
dev_priv->display.modeset_commit_cdclk(state);
+
+ intel_modeset_verify_disabled(dev);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state);
struct intel_crtc_state *pipe_config =
@@ -13607,14 +13699,15 @@ static int intel_atomic_commit(struct drm_device *dev,
}
if (!modeset)
- intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
- if (crtc->state->active && intel_crtc->atomic.update_fbc)
+ if (crtc->state->active &&
+ drm_atomic_get_existing_plane_state(state, crtc->primary))
intel_fbc_enable(intel_crtc);
if (crtc->state->active &&
(crtc->state->planes_changed || update_pipe))
- drm_atomic_helper_commit_planes_on_crtc(crtc_state);
+ drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
if (pipe_config->base.active && needs_vblank_wait(pipe_config))
crtc_vblank_mask |= 1 << i;
@@ -13625,11 +13718,27 @@ static int intel_atomic_commit(struct drm_device *dev,
if (!state->legacy_cursor_update)
intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- intel_post_plane_update(to_intel_crtc(crtc));
+ /*
+ * Now that the vblank has passed, we can go ahead and program the
+ * optimal watermarks on platforms that need two-step watermark
+ * programming.
+ *
+ * TODO: Move this (and other cleanup) to an async worker eventually.
+ */
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ intel_cstate = to_intel_crtc_state(crtc->state);
+
+ if (dev_priv->display.optimize_watermarks)
+ dev_priv->display.optimize_watermarks(intel_cstate);
+ }
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
+
+ intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
}
if (intel_state->modeset)
@@ -13639,9 +13748,6 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
- if (hw_check)
- intel_modeset_check_state(dev, state);
-
drm_atomic_state_free(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13701,116 +13807,15 @@ out:
#undef for_each_intel_crtc_masked
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .gamma_set = intel_crtc_gamma_set,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.set_config = drm_atomic_helper_set_config,
+ .set_property = drm_atomic_helper_crtc_set_property,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
};
-static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
-{
- uint32_t val;
-
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
- return false;
-
- val = I915_READ(PCH_DPLL(pll->id));
- hw_state->dpll = val;
- hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
- hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
-
- return val & DPLL_VCO_ENABLE;
-}
-
-static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
- I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
-}
-
-static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- /* PCH refclock must be enabled first */
- ibx_assert_pch_refclk_enabled(dev_priv);
-
- I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(200);
-}
-
-static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- struct drm_device *dev = dev_priv->dev;
- struct intel_crtc *crtc;
-
- /* Make sure no transcoder isn't still depending on us. */
- for_each_intel_crtc(dev, crtc) {
- if (intel_crtc_to_shared_dpll(crtc) == pll)
- assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
- }
-
- I915_WRITE(PCH_DPLL(pll->id), 0);
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(200);
-}
-
-static char *ibx_pch_dpll_names[] = {
- "PCH DPLL A",
- "PCH DPLL B",
-};
-
-static void ibx_pch_dpll_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- dev_priv->num_shared_dpll = 2;
-
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- dev_priv->shared_dplls[i].id = i;
- dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
- dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
- dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
- dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
- dev_priv->shared_dplls[i].get_hw_state =
- ibx_pch_dpll_get_hw_state;
- }
-}
-
-static void intel_shared_dpll_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (HAS_DDI(dev))
- intel_ddi_pll_init(dev);
- else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
- ibx_pch_dpll_init(dev);
- else
- dev_priv->num_shared_dpll = 0;
-
- BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
-}
-
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
@@ -13856,10 +13861,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
if (needs_modeset(crtc_state))
ret = i915_gem_object_wait_rendering(old_obj, true);
-
- /* Swallow -EIO errors to allow updates during hw lockup. */
- if (ret && ret != -EIO)
+ if (ret) {
+ /* GPU hangs should have been swallowed by the wait */
+ WARN_ON(ret == -EIO);
return ret;
+ }
}
/* For framebuffer backed by dmabuf, wait for fence */
@@ -13884,7 +13890,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (ret)
DRM_DEBUG_KMS("failed to attach phys object\n");
} else {
- ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
+ ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
}
if (ret == 0) {
@@ -13928,7 +13934,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
!INTEL_INFO(dev)->cursor_needs_physical))
- intel_unpin_fb_obj(old_state->fb, old_state);
+ intel_unpin_fb_obj(old_state->fb, old_state->rotation);
/* prepare_fb aborted? */
if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
@@ -13936,7 +13942,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
i915_gem_request_assign(&old_intel_state->wait_req, NULL);
-
}
int
@@ -14011,6 +14016,11 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (modeset)
return;
+ if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
+ intel_color_set_csc(crtc->state);
+ intel_color_load_luts(crtc->state);
+ }
+
if (to_intel_crtc_state(crtc->state)->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_state);
else if (INTEL_INFO(dev)->gen >= 9)
@@ -14054,20 +14064,19 @@ const struct drm_plane_funcs intel_plane_funcs = {
static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
int pipe)
{
- struct intel_plane *primary;
- struct intel_plane_state *state;
+ struct intel_plane *primary = NULL;
+ struct intel_plane_state *state = NULL;
const uint32_t *intel_primary_formats;
unsigned int num_formats;
+ int ret;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL)
- return NULL;
+ if (!primary)
+ goto fail;
state = intel_create_plane_state(&primary->base);
- if (!state) {
- kfree(primary);
- return NULL;
- }
+ if (!state)
+ goto fail;
primary->base.state = &state->base;
primary->can_scale = false;
@@ -14109,10 +14118,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->disable_plane = i9xx_disable_primary_plane;
}
- drm_universal_plane_init(dev, &primary->base, 0,
- &intel_plane_funcs,
- intel_primary_formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ ret = drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ goto fail;
if (INTEL_INFO(dev)->gen >= 4)
intel_create_rotation_property(dev, primary);
@@ -14120,6 +14131,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
return &primary->base;
+
+fail:
+ kfree(state);
+ kfree(primary);
+
+ return NULL;
}
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
@@ -14236,18 +14253,17 @@ intel_update_cursor_plane(struct drm_plane *plane,
static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
int pipe)
{
- struct intel_plane *cursor;
- struct intel_plane_state *state;
+ struct intel_plane *cursor = NULL;
+ struct intel_plane_state *state = NULL;
+ int ret;
cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (cursor == NULL)
- return NULL;
+ if (!cursor)
+ goto fail;
state = intel_create_plane_state(&cursor->base);
- if (!state) {
- kfree(cursor);
- return NULL;
- }
+ if (!state)
+ goto fail;
cursor->base.state = &state->base;
cursor->can_scale = false;
@@ -14259,11 +14275,13 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
- drm_universal_plane_init(dev, &cursor->base, 0,
- &intel_plane_funcs,
- intel_cursor_formats,
- ARRAY_SIZE(intel_cursor_formats),
- DRM_PLANE_TYPE_CURSOR, NULL);
+ ret = drm_universal_plane_init(dev, &cursor->base, 0,
+ &intel_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ DRM_PLANE_TYPE_CURSOR, NULL);
+ if (ret)
+ goto fail;
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
@@ -14283,6 +14301,12 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return &cursor->base;
+
+fail:
+ kfree(state);
+ kfree(cursor);
+
+ return NULL;
}
static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
@@ -14308,7 +14332,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
struct intel_crtc_state *crtc_state = NULL;
struct drm_plane *primary = NULL;
struct drm_plane *cursor = NULL;
- int i, ret;
+ int ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
@@ -14344,13 +14368,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
if (ret)
goto fail;
- drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
- for (i = 0; i < 256; i++) {
- intel_crtc->lut_r[i] = i;
- intel_crtc->lut_g[i] = i;
- intel_crtc->lut_b[i] = i;
- }
-
/*
* On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
* is hooked to pipe B. Hence we want plane A feeding pipe B.
@@ -14375,6 +14392,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+ intel_color_init(&intel_crtc->base);
+
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
return;
@@ -14499,6 +14518,8 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_ddi_init(dev, PORT_A);
intel_ddi_init(dev, PORT_B);
intel_ddi_init(dev, PORT_C);
+
+ intel_dsi_init(dev);
} else if (HAS_DDI(dev)) {
int found;
@@ -14559,6 +14580,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ bool has_edp, has_port;
+
/*
* The DP_DETECTED bit is the latched state of the DDC
* SDA pin at boot. However since eDP doesn't require DDC
@@ -14567,27 +14590,37 @@ static void intel_setup_outputs(struct drm_device *dev)
* Thus we can't rely on the DP_DETECTED bit alone to detect
* eDP ports. Consult the VBT as well as DP_DETECTED to
* detect eDP ports.
+ *
+ * Sadly the straps seem to be missing sometimes even for HDMI
+ * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+ * and VBT for the presence of the port. Additionally we can't
+ * trust the port type the VBT declares as we've seen at least
+ * HDMI ports that the VBT claim are DP or eDP.
*/
- if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_B))
+ has_edp = intel_dp_is_edp(dev, PORT_B);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+ if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_B))
- intel_dp_init(dev, VLV_DP_B, PORT_B);
- if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
- !intel_dp_is_edp(dev, PORT_C))
+ has_edp = intel_dp_is_edp(dev, PORT_C);
+ has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+ if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED ||
- intel_dp_is_edp(dev, PORT_C))
- intel_dp_init(dev, VLV_DP_C, PORT_C);
if (IS_CHERRYVIEW(dev)) {
- /* eDP not supported on port D, so don't check VBT */
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED)
+ /*
+ * eDP not supported on port D,
+ * so no need to worry about it
+ */
+ has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+ if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev, CHV_DP_D, PORT_D);
+ if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ intel_hdmi_init(dev, CHV_HDMID, PORT_D);
}
intel_dsi_init(dev);
@@ -14868,6 +14901,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
+ intel_fill_fb_info(dev_priv, &intel_fb->base);
+
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -14888,8 +14923,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
- mode_cmd.handles[0]));
+ obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
@@ -14915,23 +14949,13 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_state_clear = intel_atomic_state_clear,
};
-/* Set up chip specific display functions */
-static void intel_init_display(struct drm_device *dev)
+/**
+ * intel_init_display_hooks - initialize the display modesetting hooks
+ * @dev_priv: device private
+ */
+void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
- dev_priv->display.find_dpll = g4x_find_best_dpll;
- else if (IS_CHERRYVIEW(dev))
- dev_priv->display.find_dpll = chv_find_best_dpll;
- else if (IS_VALLEYVIEW(dev))
- dev_priv->display.find_dpll = vlv_find_best_dpll;
- else if (IS_PINEVIEW(dev))
- dev_priv->display.find_dpll = pnv_find_best_dpll;
- else
- dev_priv->display.find_dpll = i9xx_find_best_dpll;
-
- if (INTEL_INFO(dev)->gen >= 9) {
+ if (INTEL_INFO(dev_priv)->gen >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
skylake_get_initial_plane_config;
@@ -14939,7 +14963,7 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- } else if (HAS_DDI(dev)) {
+ } else if (HAS_DDI(dev_priv)) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
ironlake_get_initial_plane_config;
@@ -14947,7 +14971,7 @@ static void intel_init_display(struct drm_device *dev)
haswell_crtc_compute_clock;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- } else if (HAS_PCH_SPLIT(dev)) {
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
dev_priv->display.get_initial_plane_config =
ironlake_get_initial_plane_config;
@@ -14955,106 +14979,134 @@ static void intel_init_display(struct drm_device *dev)
ironlake_crtc_compute_clock;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
- dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+ dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+ dev_priv->display.crtc_enable = valleyview_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ } else if (IS_G4X(dev_priv)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ } else if (!IS_GEN2(dev_priv)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ } else {
+ dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_initial_plane_config =
+ i9xx_get_initial_plane_config;
+ dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+ dev_priv->display.crtc_enable = i9xx_crtc_enable;
+ dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
/* Returns the core display clock speed */
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
- else if (IS_BROXTON(dev))
+ else if (IS_BROXTON(dev_priv))
dev_priv->display.get_display_clock_speed =
broxton_get_display_clock_speed;
- else if (IS_BROADWELL(dev))
+ else if (IS_BROADWELL(dev_priv))
dev_priv->display.get_display_clock_speed =
broadwell_get_display_clock_speed;
- else if (IS_HASWELL(dev))
+ else if (IS_HASWELL(dev_priv))
dev_priv->display.get_display_clock_speed =
haswell_get_display_clock_speed;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display.get_display_clock_speed =
valleyview_get_display_clock_speed;
- else if (IS_GEN5(dev))
+ else if (IS_GEN5(dev_priv))
dev_priv->display.get_display_clock_speed =
ilk_get_display_clock_speed;
- else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
- IS_GEN6(dev) || IS_IVYBRIDGE(dev))
+ else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
+ IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
- else if (IS_GM45(dev))
+ else if (IS_GM45(dev_priv))
dev_priv->display.get_display_clock_speed =
gm45_get_display_clock_speed;
- else if (IS_CRESTLINE(dev))
+ else if (IS_CRESTLINE(dev_priv))
dev_priv->display.get_display_clock_speed =
i965gm_get_display_clock_speed;
- else if (IS_PINEVIEW(dev))
+ else if (IS_PINEVIEW(dev_priv))
dev_priv->display.get_display_clock_speed =
pnv_get_display_clock_speed;
- else if (IS_G33(dev) || IS_G4X(dev))
+ else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
dev_priv->display.get_display_clock_speed =
g33_get_display_clock_speed;
- else if (IS_I915G(dev))
+ else if (IS_I915G(dev_priv))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev))
+ else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
- else if (IS_I915GM(dev))
+ else if (IS_I915GM(dev_priv))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
- else if (IS_I865G(dev))
+ else if (IS_I865G(dev_priv))
dev_priv->display.get_display_clock_speed =
i865_get_display_clock_speed;
- else if (IS_I85X(dev))
+ else if (IS_I85X(dev_priv))
dev_priv->display.get_display_clock_speed =
i85x_get_display_clock_speed;
else { /* 830 */
- WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
+ WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
}
- if (IS_GEN5(dev)) {
+ if (IS_GEN5(dev_priv)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev_priv)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- if (IS_BROADWELL(dev)) {
+ if (IS_BROADWELL(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
broadwell_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
broadwell_modeset_calc_cdclk;
}
- } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
valleyview_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
valleyview_modeset_calc_cdclk;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
broxton_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
broxton_modeset_calc_cdclk;
}
- switch (INTEL_INFO(dev)->gen) {
+ switch (INTEL_INFO(dev_priv)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
break;
@@ -15081,8 +15133,6 @@ static void intel_init_display(struct drm_device *dev)
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
}
-
- mutex_init(&dev_priv->pps_mutex);
}
/*
@@ -15305,7 +15355,7 @@ static void sanitize_watermarks(struct drm_device *dev)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.program_watermarks)
+ if (!dev_priv->display.optimize_watermarks)
return;
/*
@@ -15326,6 +15376,13 @@ retry:
if (WARN_ON(IS_ERR(state)))
goto fail;
+ /*
+ * Hardware readout is the only time we don't want to calculate
+ * intermediate watermarks (since we don't trust the current
+ * watermarks).
+ */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
ret = intel_atomic_check(dev, state);
if (ret) {
/*
@@ -15348,7 +15405,8 @@ retry:
for_each_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
- dev_priv->display.program_watermarks(cs);
+ cs->wm.need_postvbl_update = true;
+ dev_priv->display.optimize_watermarks(cs);
}
drm_atomic_state_free(state);
@@ -15359,7 +15417,8 @@ fail:
void intel_modeset_init(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int sprite, ret;
enum pipe pipe;
struct intel_crtc *crtc;
@@ -15401,9 +15460,6 @@ void intel_modeset_init(struct drm_device *dev)
}
}
- intel_init_display(dev);
- intel_init_audio(dev);
-
if (IS_GEN2(dev)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
@@ -15426,7 +15482,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
- dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
+ dev->mode_config.fb_base = ggtt->mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev)->num_pipes,
@@ -15443,6 +15499,7 @@ void intel_modeset_init(struct drm_device *dev)
}
intel_update_czclk(dev_priv);
+ intel_update_rawclk(dev_priv);
intel_update_cdclk(dev);
intel_shared_dpll_init(dev);
@@ -15555,10 +15612,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
- I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ if (!transcoder_is_dsi(cpu_transcoder)) {
+ i915_reg_t reg = PIPECONF(cpu_transcoder);
+
+ I915_WRITE(reg,
+ I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
+ }
/* restore vblank interrupts to correct state */
drm_crtc_vblank_reset(&crtc->base);
@@ -15606,38 +15668,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- if (!intel_crtc_has_encoders(crtc))
+ if (crtc->active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base);
- if (crtc->active != crtc->base.state->active) {
- struct intel_encoder *encoder;
-
- /* This can happen either due to bugs in the get_hw_state
- * functions or because of calls to intel_crtc_disable_noatomic,
- * or because the pipe is force-enabled due to the
- * pipe A quirk. */
- DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
- crtc->base.base.id,
- crtc->base.state->enable ? "enabled" : "disabled",
- crtc->active ? "enabled" : "disabled");
-
- WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
- crtc->base.state->active = crtc->active;
- crtc->base.enabled = crtc->active;
- crtc->base.state->connector_mask = 0;
- crtc->base.state->encoder_mask = 0;
-
- /* Because we only establish the connector -> encoder ->
- * crtc links if something is active, this means the
- * crtc is now deactivated. Break the links. connector
- * -> encoder links are only establish when things are
- * actually up, hence no need to break them. */
- WARN_ON(crtc->active);
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- encoder->base.crtc = NULL;
- }
-
if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
/*
* We start out with underrun reporting disabled to avoid races.
@@ -15767,7 +15800,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_crtc_state *crtc_state = crtc->config;
int pixclk = 0;
- __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
+ __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
crtc_state->base.crtc = &crtc->base;
@@ -15806,22 +15839,17 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- pll->on = pll->get_hw_state(dev_priv, pll,
- &pll->config.hw_state);
- pll->active = 0;
+ pll->on = pll->funcs.get_hw_state(dev_priv, pll,
+ &pll->config.hw_state);
pll->config.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
- if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
- pll->active++;
+ if (crtc->active && crtc->config->shared_dpll == pll)
pll->config.crtc_mask |= 1 << crtc->pipe;
- }
}
+ pll->active_mask = pll->config.crtc_mask;
DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
pll->name, pll->config.crtc_mask, pll->on);
-
- if (pll->config.crtc_mask)
- intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
for_each_intel_encoder(dev, encoder) {
@@ -15903,6 +15931,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
}
+
+ intel_pipe_config_sanity_check(dev_priv, crtc->config);
}
}
@@ -15937,12 +15967,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
- if (!pll->on || pll->active)
+ if (!pll->on || pll->active_mask)
continue;
DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
- pll->disable(dev_priv, pll);
+ pll->funcs.disable(dev_priv, pll);
pll->on = false;
}
@@ -15987,18 +16017,6 @@ void intel_display_resume(struct drm_device *dev)
retry:
ret = drm_modeset_lock_all_ctx(dev, &ctx);
- /*
- * With MST, the number of connectors can change between suspend and
- * resume, which means that the state we want to restore might now be
- * impossible to use since it'll be pointing to non-existant
- * connectors.
- */
- if (ret == 0 && state &&
- state->num_connector != dev->mode_config.num_connector) {
- drm_atomic_state_free(state);
- state = NULL;
- }
-
if (ret == 0 && !setup) {
setup = true;
@@ -16013,6 +16031,9 @@ retry:
state->acquire_ctx = &ctx;
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* Force recalculation even if we restore
@@ -16063,9 +16084,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
continue;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(c->primary,
- c->primary->fb,
- c->primary->state);
+ ret = intel_pin_and_fence_fb_obj(c->primary->fb,
+ c->primary->state->rotation);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
@@ -16274,8 +16294,9 @@ intel_display_capture_error_state(struct drm_device *dev)
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
+ /* Note: this does not include DSI transcoders. */
error->num_transcoders = INTEL_INFO(dev)->num_pipes;
- if (HAS_DDI(dev_priv->dev))
+ if (HAS_DDI(dev_priv))
error->num_transcoders++; /* Account for eDP. */
for (i = 0; i < error->num_transcoders; i++) {
@@ -16346,7 +16367,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
}
for (i = 0; i < error->num_transcoders; i++) {
- err_printf(m, "CPU transcoder: %c\n",
+ err_printf(m, "CPU transcoder: %s\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
onoff(error->transcoder[i].power_domain_on));
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 69054ef97..891107f92 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -129,6 +129,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
static void vlv_steal_power_sequencer(struct drm_device *dev,
enum pipe pipe);
+static void intel_dp_unset_edid(struct intel_dp *intel_dp);
static unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@@ -662,7 +663,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies_timeout(10));
else
- done = wait_for_atomic(C, 10) == 0;
+ done = wait_for(C, 10) == 0;
if (!done)
DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
has_aux_irq);
@@ -671,60 +672,55 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
return status;
}
-static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+
+ if (index)
+ return 0;
/*
* The clock divider is based off the hrawclk, and would like to run at
- * 2MHz. So, take the hrawclk value and divide by 2 and use that
+ * 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
+ return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
if (index)
return 0;
- if (intel_dig_port->port == PORT_A) {
+ /*
+ * The clock divider is based off the cdclk or PCH rawclk, and would
+ * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
+ * divide by 2000 and use that
+ */
+ if (intel_dig_port->port == PORT_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
-
- } else {
- return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
- }
+ else
+ return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
- if (intel_dig_port->port == PORT_A) {
- if (index)
- return 0;
- return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
- } else if (HAS_PCH_LPT_H(dev_priv)) {
+ if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
case 1: return 72;
default: return 0;
}
- } else {
- return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
}
-}
-static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
-{
- return index ? 0 : 100;
+ return ilk_get_aux_clock_divider(intel_dp, index);
}
static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -737,10 +733,10 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 1;
}
-static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
- bool has_aux_irq,
- int send_bytes,
- uint32_t aux_clock_divider)
+static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ bool has_aux_irq,
+ int send_bytes,
+ uint32_t aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@@ -1229,71 +1225,6 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
intel_connector_unregister(intel_connector);
}
-static void
-skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
-{
- u32 ctrl1;
-
- memset(&pipe_config->dpll_hw_state, 0,
- sizeof(pipe_config->dpll_hw_state));
-
- pipe_config->ddi_pll_sel = SKL_DPLL0;
- pipe_config->dpll_hw_state.cfgcr1 = 0;
- pipe_config->dpll_hw_state.cfgcr2 = 0;
-
- ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- switch (pipe_config->port_clock / 2) {
- case 81000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
- SKL_DPLL0);
- break;
- case 135000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
- SKL_DPLL0);
- break;
- case 270000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
- SKL_DPLL0);
- break;
- case 162000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
- SKL_DPLL0);
- break;
- /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
- results in CDCLK change. Need to handle the change of CDCLK by
- disabling pipes and re-enabling them */
- case 108000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
- SKL_DPLL0);
- break;
- case 216000:
- ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
- SKL_DPLL0);
- break;
-
- }
- pipe_config->dpll_hw_state.ctrl1 = ctrl1;
-}
-
-void
-hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
-{
- memset(&pipe_config->dpll_hw_state, 0,
- sizeof(pipe_config->dpll_hw_state));
-
- switch (pipe_config->port_clock / 2) {
- case 81000:
- pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
- break;
- case 135000:
- pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
- break;
- case 270000:
- pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
- break;
- }
-}
-
static int
intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
{
@@ -1570,10 +1501,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
- (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
+ (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp_bpp);
- bpp = dev_priv->vbt.edp_bpp;
+ dev_priv->vbt.edp.bpp);
+ bpp = dev_priv->vbt.edp.bpp;
}
/*
@@ -1651,13 +1582,7 @@ found:
&pipe_config->dp_m2_n2);
}
- if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
- skl_edp_set_pll_config(pipe_config);
- else if (IS_BROXTON(dev))
- /* handled in ddi */;
- else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- hsw_dp_set_ddi_pll_sel(pipe_config);
- else
+ if (!HAS_DDI(dev))
intel_dp_set_clock(encoder, pipe_config);
return true;
@@ -1779,11 +1704,11 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
+ if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
+ 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- }
DRM_DEBUG_KMS("Wait complete\n");
}
@@ -2290,6 +2215,15 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
POSTING_READ(DP_A);
udelay(500);
+ /*
+ * [DevILK] Work around required when enabling DP PLL
+ * while a pipe is enabled going to FDI:
+ * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
+ * 2. Program DP PLL enable
+ */
+ if (IS_GEN5(dev_priv))
+ intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
+
intel_dp->DP |= DP_PLL_ENABLE;
I915_WRITE(DP_A, intel_dp->DP);
@@ -2409,7 +2343,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- int dotclock;
tmp = I915_READ(intel_dp->output_reg);
@@ -2459,16 +2392,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
pipe_config->port_clock = 270000;
}
- dotclock = intel_dotclock_calculate(pipe_config->port_clock,
- &pipe_config->dp_m_n);
+ pipe_config->base.adjusted_mode.crtc_clock =
+ intel_dotclock_calculate(pipe_config->port_clock,
+ &pipe_config->dp_m_n);
- if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
-
- if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
- pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+ if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
+ pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
/*
* This is a big fat ugly hack.
*
@@ -2483,8 +2412,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
* load.
*/
DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
- dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
}
@@ -2710,7 +2639,6 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
- enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = crtc->pipe;
if (WARN_ON(dp_reg & DP_PORT_EN))
@@ -2721,35 +2649,12 @@ static void intel_enable_dp(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
- /*
- * We get an occasional spurious underrun between the port
- * enable and vdd enable, when enabling port A eDP.
- *
- * FIXME: Not sure if this applies to (PCH) port D eDP as well
- */
- if (port == PORT_A)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
intel_dp_enable_port(intel_dp);
- if (port == PORT_A && IS_GEN5(dev_priv)) {
- /*
- * Underrun reporting for the other pipe was disabled in
- * g4x_pre_enable_dp(). The eDP PLL and port have now been
- * enabled, so it's now safe to re-enable underrun reporting.
- */
- intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
- intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
- }
-
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
- if (port == PORT_A)
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
pps_unlock(intel_dp);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -2791,26 +2696,11 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
intel_dp_prepare(encoder);
- if (port == PORT_A && IS_GEN5(dev_priv)) {
- /*
- * We get FIFO underruns on the other pipe when
- * enabling the CPU eDP PLL, and when enabling CPU
- * eDP port. We could potentially avoid the PLL
- * underrun with a vblank wait just prior to enabling
- * the PLL, but that doesn't appear to help the port
- * enable case. Just sweep it all under the rug.
- */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
- intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
- }
-
/* Only ilk+ has port A */
if (port == PORT_A)
ironlake_edp_pll_on(intel_dp);
@@ -3184,47 +3074,14 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
}
/*
- * Native read with retry for link status and receiver capability reads for
- * cases where the sink may still be asleep.
- *
- * Sinks are *supposed* to come up within 1ms from an off state, but we're also
- * supposed to retry 3 times per the spec.
- */
-static ssize_t
-intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
- void *buffer, size_t size)
-{
- ssize_t ret;
- int i;
-
- /*
- * Sometime we just get the same incorrect byte repeated
- * over the entire buffer. Doing just one throw away read
- * initially seems to "solve" it.
- */
- drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
-
- for (i = 0; i < 3; i++) {
- ret = drm_dp_dpcd_read(aux, offset, buffer, size);
- if (ret == size)
- return ret;
- msleep(1);
- }
-
- return ret;
-}
-
-/*
* Fetch AUX CH registers 0x202 - 0x207 which contain
* link status information
*/
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- return intel_dp_dpcd_read_wake(&intel_dp->aux,
- DP_LANE0_1_STATUS,
- link_status,
- DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
+ return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
+ DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
/* These are source-specific values. */
@@ -3238,7 +3095,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
if (IS_BROXTON(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->edp_low_vswing && port == PORT_A)
+ if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
@@ -3859,8 +3716,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint8_t rev;
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
- sizeof(intel_dp->dpcd)) < 0)
+ if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
+ sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
@@ -3868,12 +3725,33 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
+ &intel_dp->sink_count, 1) < 0)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!is_edp(intel_dp) && !intel_dp->sink_count)
+ return false;
+
/* Check if the panel supports PSR */
memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
if (is_edp(intel_dp)) {
- intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
dev_priv->psr.sink_support = true;
DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
@@ -3884,9 +3762,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
uint8_t frame_sync_cap;
dev_priv->psr.sink_support = true;
- intel_dp_dpcd_read_wake(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
- &frame_sync_cap, 1);
+ drm_dp_dpcd_read(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
+ &frame_sync_cap, 1);
dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
/* PSR2 needs frame sync as well */
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
@@ -3902,15 +3780,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
/* Intermediate frequency support */
if (is_edp(intel_dp) &&
(intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
+ (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
(rev >= 0x03)) { /* eDp v1.4 or higher */
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
- intel_dp_dpcd_read_wake(&intel_dp->aux,
- DP_SUPPORTED_LINK_RATES,
- sink_rates,
- sizeof(sink_rates));
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
int val = le16_to_cpu(sink_rates[i]);
@@ -3933,9 +3809,9 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
return true; /* no per-port downstream info */
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
- intel_dp->downstream_ports,
- DP_MAX_DOWNSTREAM_PORTS) < 0)
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
+ intel_dp->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS) < 0)
return false; /* downstream port status fetch failed */
return true;
@@ -3949,11 +3825,11 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
@@ -3963,13 +3839,16 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
{
u8 buf[1];
+ if (!i915.enable_dp_mst)
+ return false;
+
if (!intel_dp->can_mst)
return false;
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
if (buf[0] & DP_MST_CAP) {
DRM_DEBUG_KMS("Sink is MST capable\n");
intel_dp->is_mst = true;
@@ -4106,7 +3985,7 @@ stop:
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
- return intel_dp_dpcd_read_wake(&intel_dp->aux,
+ return drm_dp_dpcd_read(&intel_dp->aux,
DP_DEVICE_SERVICE_IRQ_VECTOR,
sink_irq_vector, 1) == 1;
}
@@ -4116,7 +3995,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
int ret;
- ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+ ret = drm_dp_dpcd_read(&intel_dp->aux,
DP_SINK_COUNT_ESI,
sink_irq_vector, 14);
if (ret != 14)
@@ -4292,6 +4171,36 @@ go_again:
return -EINVAL;
}
+static void
+intel_dp_check_link_status(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (!intel_dp_get_link_status(intel_dp, link_status)) {
+ DRM_ERROR("Failed to get link status\n");
+ return;
+ }
+
+ if (!intel_encoder->base.crtc)
+ return;
+
+ if (!to_intel_crtc(intel_encoder->base.crtc)->active)
+ return;
+
+ /* if link training is requested we should perform it always */
+ if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
+ (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
+ DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
+ intel_encoder->base.name);
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -4299,16 +4208,19 @@ go_again:
* 2. Configure link according to Receiver Capabilities
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
+ *
+ * intel_dp_short_pulse - handles short pulse interrupts
+ * when full detection is not required.
+ * Returns %true if short pulse is handled and full detection
+ * is NOT required and %false otherwise.
*/
-static void
-intel_dp_check_link_status(struct intel_dp *intel_dp)
+static bool
+intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
- u8 link_status[DP_LINK_STATUS_SIZE];
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ u8 old_sink_count = intel_dp->sink_count;
+ bool ret;
/*
* Clearing compliance test variables to allow capturing
@@ -4318,20 +4230,17 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
- if (!intel_encoder->base.crtc)
- return;
-
- if (!to_intel_crtc(intel_encoder->base.crtc)->active)
- return;
-
- /* Try to read receiver status if the link appears to be up */
- if (!intel_dp_get_link_status(intel_dp, link_status)) {
- return;
- }
+ /*
+ * Now read the DPCD to see if it's actually running
+ * If the current value of sink count doesn't match with
+ * the value that was stored earlier or dpcd read failed
+ * we need to do full detection
+ */
+ ret = intel_dp_get_dpcd(intel_dp);
- /* Now read the DPCD to see if it's actually running */
- if (!intel_dp_get_dpcd(intel_dp)) {
- return;
+ if ((old_sink_count != intel_dp->sink_count) || !ret) {
+ /* No need to proceed if we are going to do full detect */
+ return false;
}
/* Try to read the source of the interrupt */
@@ -4348,14 +4257,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
- /* if link training is requested we should perform it always */
- if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
- (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
- DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
- intel_encoder->base.name);
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
- }
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return true;
}
/* XXX this is probably wrong for multiple downstream ports */
@@ -4368,6 +4274,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
+ if (is_edp(intel_dp))
+ return connector_status_connected;
+
/* if there's no downstream port, we're done */
if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
return connector_status_connected;
@@ -4375,14 +4284,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
/* If we're HPD-aware, SINK_COUNT changes dynamically */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
- uint8_t reg;
-
- if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
- &reg, 1) < 0)
- return connector_status_unknown;
- return DP_GET_SINK_COUNT(reg) ? connector_status_connected
- : connector_status_disconnected;
+ return intel_dp->sink_count ?
+ connector_status_connected : connector_status_disconnected;
}
/* If no HPD, poke DDC gently */
@@ -4591,6 +4495,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct edid *edid;
+ intel_dp_unset_edid(intel_dp);
edid = intel_dp_get_edid(intel_dp);
intel_connector->detect_edid = edid;
@@ -4611,9 +4516,10 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = false;
}
-static enum drm_connector_status
-intel_dp_detect(struct drm_connector *connector, bool force)
+static void
+intel_dp_long_pulse(struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -4623,17 +4529,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
bool ret;
u8 sink_irq_vector;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- intel_dp_unset_edid(intel_dp);
-
- if (intel_dp->is_mst) {
- /* MST devices are disconnected from a monitor POV */
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
- return connector_status_disconnected;
- }
-
power_domain = intel_display_port_aux_power_domain(intel_encoder);
intel_display_power_get(to_i915(dev), power_domain);
@@ -4651,19 +4546,42 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
+ if (intel_dp->is_mst) {
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
+ }
+
goto out;
}
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+
intel_dp_probe_oui(intel_dp);
ret = intel_dp_probe_mst(intel_dp);
if (ret) {
- /* if we are in MST mode then this connector
- won't appear connected or have anything with EDID on it */
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ /*
+ * If we are in MST mode then this connector
+ * won't appear connected or have anything
+ * with EDID on it
+ */
status = connector_status_disconnected;
goto out;
+ } else if (connector->status == connector_status_connected) {
+ /*
+ * If display was connected already and is still connected
+ * check links status, there has been known issues of
+ * link loss triggerring long pulse!!!!
+ */
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ goto out;
}
/*
@@ -4676,9 +4594,8 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp_set_edid(intel_dp);
- if (intel_encoder->type != INTEL_OUTPUT_EDP)
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_connected;
+ intel_dp->detect_done = true;
/* Try to read the source of the interrupt */
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
@@ -4695,8 +4612,43 @@ intel_dp_detect(struct drm_connector *connector, bool force)
}
out:
+ if ((status != connector_status_connected) &&
+ (intel_dp->is_mst == false))
+ intel_dp_unset_edid(intel_dp);
+
intel_display_power_put(to_i915(dev), power_domain);
- return status;
+ return;
+}
+
+static enum drm_connector_status
+intel_dp_detect(struct drm_connector *connector, bool force)
+{
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+
+ if (intel_dp->is_mst) {
+ /* MST devices are disconnected from a monitor POV */
+ intel_dp_unset_edid(intel_dp);
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ return connector_status_disconnected;
+ }
+
+ /* If full detect is not performed yet, do a full detect */
+ if (!intel_dp->detect_done)
+ intel_dp_long_pulse(intel_dp->attached_connector);
+
+ intel_dp->detect_done = false;
+
+ if (is_edp(intel_dp) || intel_connector->detect_edid)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
}
static void
@@ -4835,6 +4787,11 @@ intel_dp_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
+ val == DRM_MODE_SCALE_CENTER) {
+ DRM_DEBUG_KMS("centering not supported\n");
+ return -EINVAL;
+ }
if (intel_connector->panel.fitting_mode == val) {
/* the eDP scaling property is not changed */
@@ -5022,44 +4979,37 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) {
- if (!intel_digital_port_connected(dev_priv, intel_dig_port))
- goto mst_fail;
+ intel_dp_long_pulse(intel_dp->attached_connector);
+ if (intel_dp->is_mst)
+ ret = IRQ_HANDLED;
+ goto put_power;
- if (!intel_dp_get_dpcd(intel_dp)) {
- goto mst_fail;
- }
-
- intel_dp_probe_oui(intel_dp);
-
- if (!intel_dp_probe_mst(intel_dp)) {
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- intel_dp_check_link_status(intel_dp);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- goto mst_fail;
- }
} else {
if (intel_dp->is_mst) {
- if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
- goto mst_fail;
+ if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+ /*
+ * If we were in MST mode, and device is not
+ * there, get out of MST mode
+ */
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
+ intel_dp->is_mst);
+ goto put_power;
+ }
}
if (!intel_dp->is_mst) {
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- intel_dp_check_link_status(intel_dp);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (!intel_dp_short_pulse(intel_dp)) {
+ intel_dp_long_pulse(intel_dp->attached_connector);
+ goto put_power;
+ }
}
}
ret = IRQ_HANDLED;
- goto put_power;
-mst_fail:
- /* if we were in MST mode, and device is not there get out of MST mode */
- if (intel_dp->is_mst) {
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
- intel_dp->is_mst = false;
- drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
- }
put_power:
intel_display_power_put(dev_priv, power_domain);
@@ -5070,14 +5020,6 @@ put_power:
bool intel_dp_is_edp(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- union child_device_config *p_child;
- int i;
- static const short port_mapping[] = {
- [PORT_B] = DVO_PORT_DPB,
- [PORT_C] = DVO_PORT_DPC,
- [PORT_D] = DVO_PORT_DPD,
- [PORT_E] = DVO_PORT_DPE,
- };
/*
* eDP not supported on g4x. so bail out early just
@@ -5089,18 +5031,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
if (port == PORT_A)
return true;
- if (!dev_priv->vbt.child_dev_num)
- return false;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
-
- if (p_child->common.dvo_port == port_mapping[port] &&
- (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
- (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
- return true;
- }
- return false;
+ return intel_bios_is_port_edp(dev_priv, port);
}
void
@@ -5207,7 +5138,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
- vbt = dev_priv->vbt.edp_pps;
+ vbt = dev_priv->vbt.edp.pps;
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -5258,7 +5189,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div, port_sel = 0;
- int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
+ int div = dev_priv->rawclk_freq / 1000;
i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
enum port port = dp_to_dig_port(intel_dp)->port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
@@ -5793,8 +5724,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev,
dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode)
+ if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ }
}
mutex_unlock(&dev->mode_config.mutex);
@@ -5851,19 +5785,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* intel_dp vfuncs */
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
else if (HAS_PCH_SPLIT(dev))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
- intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
+ intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
if (INTEL_INFO(dev)->gen >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
- intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
+ intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
if (HAS_DDI(dev))
intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
@@ -5993,9 +5925,9 @@ fail:
return false;
}
-void
-intel_dp_init(struct drm_device *dev,
- i915_reg_t output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+ i915_reg_t output_reg,
+ enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
@@ -6005,7 +5937,7 @@ intel_dp_init(struct drm_device *dev,
intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
if (!intel_dig_port)
- return;
+ return false;
intel_connector = intel_connector_alloc();
if (!intel_connector)
@@ -6062,7 +5994,7 @@ intel_dp_init(struct drm_device *dev,
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
- return;
+ return true;
err_init_connector:
drm_encoder_cleanup(encoder);
@@ -6070,8 +6002,7 @@ err_encoder_init:
kfree(intel_connector);
err_connector_alloc:
kfree(intel_dig_port);
-
- return;
+ return false;
}
void intel_dp_mst_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 926a1e6ea..60fb39cd2 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -160,7 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
break;
}
-
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 2c999725b..7a34090ce 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -33,7 +33,6 @@
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = encoder->base.dev;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
@@ -90,9 +89,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->dp_m_n.tu = slots;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- hsw_dp_set_ddi_pll_sel(pipe_config);
-
return true;
}
@@ -106,7 +102,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->connector->port);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
if (ret) {
@@ -127,10 +123,11 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
/* and this can also fail */
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
- drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->connector->port);
intel_dp->active_mst_links--;
- intel_mst->port = NULL;
+
+ intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) {
intel_dig_port->base.post_disable(&intel_dig_port->base);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
@@ -170,7 +167,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
found->encoder = encoder;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- intel_mst->port = found->port;
+
+ intel_mst->connector = found;
if (intel_dp->active_mst_links == 0) {
intel_prepare_ddi_buffer(&intel_dig_port->base);
@@ -188,7 +186,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
}
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
- intel_mst->port,
+ intel_mst->connector->port,
intel_crtc->config->pbn, &slots);
if (ret == false) {
DRM_ERROR("failed to allocate vcpi\n");
@@ -229,7 +227,7 @@ static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
*pipe = intel_mst->pipe;
- if (intel_mst->port)
+ if (intel_mst->connector)
return true;
return false;
}
@@ -290,10 +288,11 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
- if (!edid)
- return 0;
+ if (!intel_dp) {
+ return intel_connector_update_modes(connector, NULL);
+ }
+ edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
@@ -306,6 +305,8 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
+ if (!intel_dp)
+ return connector_status_disconnected;
return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
}
@@ -371,6 +372,8 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
struct intel_dp *intel_dp = intel_connector->mst_port;
struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+ if (!intel_dp)
+ return NULL;
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
@@ -378,6 +381,8 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
+ if (!intel_dp)
+ return NULL;
return &intel_dp->mst_encoders[0]->base.base;
}
@@ -488,23 +493,11 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
/* need to nuke the connector */
drm_modeset_lock_all(dev);
- if (connector->state->crtc) {
- struct drm_mode_set set;
- int ret;
-
- memset(&set, 0, sizeof(set));
- set.crtc = connector->state->crtc,
-
- ret = drm_atomic_helper_set_config(&set);
-
- WARN(ret, "Disabling mst crtc failed with %i\n", ret);
- }
-
intel_connector_remove_from_fbdev(intel_connector);
- drm_connector_cleanup(connector);
+ intel_connector->mst_port = NULL;
drm_modeset_unlock_all(dev);
- kfree(intel_connector);
+ drm_connector_unreference(&intel_connector->base);
DRM_DEBUG_KMS("\n");
}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
new file mode 100644
index 000000000..58f60b278
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -0,0 +1,1786 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_drv.h"
+
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id)
+{
+ return &dev_priv->shared_dplls[id];
+}
+
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ if (WARN_ON(pll < dev_priv->shared_dplls||
+ pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+ return -1;
+
+ return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+}
+
+void
+intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
+
+ config[id].crtc_mask |= 1 << crtc->pipe;
+}
+
+void
+intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
+
+ config[id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+/* For ILK+ */
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state)
+{
+ bool cur_state;
+ struct intel_dpll_hw_state hw_state;
+
+ if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
+ return;
+
+ cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state);
+ I915_STATE_WARN(cur_state != state,
+ "%s assertion failure (expected %s, current %s)\n",
+ pll->name, onoff(state), onoff(cur_state));
+}
+
+void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+
+ if (WARN_ON(pll == NULL))
+ return;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ WARN_ON(!pll->config.crtc_mask);
+ if (!pll->active_mask) {
+ DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
+ WARN_ON(pll->on);
+ assert_shared_dpll_disabled(dev_priv, pll);
+
+ pll->funcs.mode_set(dev_priv, pll);
+ }
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+/**
+ * intel_enable_shared_dpll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+void intel_enable_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+ unsigned old_mask;
+
+ if (WARN_ON(pll == NULL))
+ return;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ old_mask = pll->active_mask;
+
+ if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
+ WARN_ON(pll->active_mask & crtc_mask))
+ goto out;
+
+ pll->active_mask |= crtc_mask;
+
+ DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
+ pll->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
+
+ if (old_mask) {
+ WARN_ON(!pll->on);
+ assert_shared_dpll_enabled(dev_priv, pll);
+ goto out;
+ }
+ WARN_ON(pll->on);
+
+ DRM_DEBUG_KMS("enabling %s\n", pll->name);
+ pll->funcs.enable(dev_priv, pll);
+ pll->on = true;
+
+out:
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+void intel_disable_shared_dpll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
+
+ /* PCH only available on ILK+ */
+ if (INTEL_INFO(dev)->gen < 5)
+ return;
+
+ if (pll == NULL)
+ return;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ if (WARN_ON(!(pll->active_mask & crtc_mask)))
+ goto out;
+
+ DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
+ pll->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
+
+ assert_shared_dpll_enabled(dev_priv, pll);
+ WARN_ON(!pll->on);
+
+ pll->active_mask &= ~crtc_mask;
+ if (pll->active_mask)
+ goto out;
+
+ DRM_DEBUG_KMS("disabling %s\n", pll->name);
+ pll->funcs.disable(dev_priv, pll);
+ pll->on = false;
+
+out:
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static struct intel_shared_dpll *
+intel_find_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ enum intel_dpll_id range_min,
+ enum intel_dpll_id range_max)
+{
+ struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+ struct intel_shared_dpll *pll;
+ struct intel_shared_dpll_config *shared_dpll;
+ enum intel_dpll_id i;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+ for (i = range_min; i <= range_max; i++) {
+ pll = &dev_priv->shared_dplls[i];
+
+ /* Only want to check enabled timings first */
+ if (shared_dpll[i].crtc_mask == 0)
+ continue;
+
+ if (memcmp(&crtc_state->dpll_hw_state,
+ &shared_dpll[i].hw_state,
+ sizeof(crtc_state->dpll_hw_state)) == 0) {
+ DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ crtc->base.base.id, pll->name,
+ shared_dpll[i].crtc_mask,
+ pll->active_mask);
+ return pll;
+ }
+ }
+
+ /* Ok no matching timings, maybe there's a free one? */
+ for (i = range_min; i <= range_max; i++) {
+ pll = &dev_priv->shared_dplls[i];
+ if (shared_dpll[i].crtc_mask == 0) {
+ DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
+ crtc->base.base.id, pll->name);
+ return pll;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+intel_reference_shared_dpll(struct intel_shared_dpll *pll,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_shared_dpll_config *shared_dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum intel_dpll_id i = pll->id;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+
+ if (shared_dpll[i].crtc_mask == 0)
+ shared_dpll[i].hw_state =
+ crtc_state->dpll_hw_state;
+
+ crtc_state->shared_dpll = pll;
+ DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
+ pipe_name(crtc->pipe));
+
+ intel_shared_dpll_config_get(shared_dpll, pll, crtc);
+}
+
+void intel_shared_dpll_commit(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_shared_dpll_config *shared_dpll;
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ if (!to_intel_atomic_state(state)->dpll_set)
+ return;
+
+ shared_dpll = to_intel_atomic_state(state)->shared_dpll;
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ pll = &dev_priv->shared_dplls[i];
+ pll->config = shared_dpll[i];
+ }
+}
+
+static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(PCH_DPLL(pll->id));
+ hw_state->dpll = val;
+ hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
+ hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return val & DPLL_VCO_ENABLE;
+}
+
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
+}
+
+static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ /* PCH refclock must be enabled first */
+ ibx_assert_pch_refclk_enabled(dev_priv);
+
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(200);
+}
+
+static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+
+ /* Make sure no transcoder isn't still depending on us. */
+ for_each_intel_crtc(dev, crtc) {
+ if (crtc->config->shared_dpll == pll)
+ assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
+ }
+
+ I915_WRITE(PCH_DPLL(pll->id), 0);
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(200);
+}
+
+static struct intel_shared_dpll *
+ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
+ i = (enum intel_dpll_id) crtc->pipe;
+ pll = &dev_priv->shared_dplls[i];
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+ crtc->base.base.id, pll->name);
+ } else {
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_PCH_PLL_A,
+ DPLL_ID_PCH_PLL_B);
+ }
+
+ if (!pll)
+ return NULL;
+
+ /* reference the pll */
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ return pll;
+}
+
+static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
+ .mode_set = ibx_pch_dpll_mode_set,
+ .enable = ibx_pch_dpll_enable,
+ .disable = ibx_pch_dpll_disable,
+ .get_hw_state = ibx_pch_dpll_get_hw_state,
+};
+
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
+ POSTING_READ(WRPLL_CTL(pll->id));
+ udelay(20);
+}
+
+static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
+ POSTING_READ(SPLL_CTL);
+ udelay(20);
+}
+
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(SPLL_CTL);
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+}
+
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ hw_state->wrpll = val;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return val & WRPLL_PLL_ENABLE;
+}
+
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(SPLL_CTL);
+ hw_state->spll = val;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return val & SPLL_PLL_ENABLE;
+}
+
+static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
+{
+ switch (pll->id) {
+ case DPLL_ID_WRPLL1:
+ return PORT_CLK_SEL_WRPLL1;
+ case DPLL_ID_WRPLL2:
+ return PORT_CLK_SEL_WRPLL2;
+ case DPLL_ID_SPLL:
+ return PORT_CLK_SEL_SPLL;
+ case DPLL_ID_LCPLL_810:
+ return PORT_CLK_SEL_LCPLL_810;
+ case DPLL_ID_LCPLL_1350:
+ return PORT_CLK_SEL_LCPLL_1350;
+ case DPLL_ID_LCPLL_2700:
+ return PORT_CLK_SEL_LCPLL_2700;
+ default:
+ return PORT_CLK_SEL_NONE;
+ }
+}
+
+#define LC_FREQ 2700
+#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
+
+#define P_MIN 2
+#define P_MAX 64
+#define P_INC 2
+
+/* Constraints for PLL good behavior */
+#define REF_MIN 48
+#define REF_MAX 400
+#define VCO_MIN 2400
+#define VCO_MAX 4800
+
+struct hsw_wrpll_rnp {
+ unsigned p, n2, r2;
+};
+
+static unsigned hsw_wrpll_get_budget_for_freq(int clock)
+{
+ unsigned budget;
+
+ switch (clock) {
+ case 25175000:
+ case 25200000:
+ case 27000000:
+ case 27027000:
+ case 37762500:
+ case 37800000:
+ case 40500000:
+ case 40541000:
+ case 54000000:
+ case 54054000:
+ case 59341000:
+ case 59400000:
+ case 72000000:
+ case 74176000:
+ case 74250000:
+ case 81000000:
+ case 81081000:
+ case 89012000:
+ case 89100000:
+ case 108000000:
+ case 108108000:
+ case 111264000:
+ case 111375000:
+ case 148352000:
+ case 148500000:
+ case 162000000:
+ case 162162000:
+ case 222525000:
+ case 222750000:
+ case 296703000:
+ case 297000000:
+ budget = 0;
+ break;
+ case 233500000:
+ case 245250000:
+ case 247750000:
+ case 253250000:
+ case 298000000:
+ budget = 1500;
+ break;
+ case 169128000:
+ case 169500000:
+ case 179500000:
+ case 202000000:
+ budget = 2000;
+ break;
+ case 256250000:
+ case 262500000:
+ case 270000000:
+ case 272500000:
+ case 273750000:
+ case 280750000:
+ case 281250000:
+ case 286000000:
+ case 291750000:
+ budget = 4000;
+ break;
+ case 267250000:
+ case 268500000:
+ budget = 5000;
+ break;
+ default:
+ budget = 1000;
+ break;
+ }
+
+ return budget;
+}
+
+static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
+ unsigned r2, unsigned n2, unsigned p,
+ struct hsw_wrpll_rnp *best)
+{
+ uint64_t a, b, c, d, diff, diff_best;
+
+ /* No best (r,n,p) yet */
+ if (best->p == 0) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ return;
+ }
+
+ /*
+ * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
+ * freq2k.
+ *
+ * delta = 1e6 *
+ * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
+ * freq2k;
+ *
+ * and we would like delta <= budget.
+ *
+ * If the discrepancy is above the PPM-based budget, always prefer to
+ * improve upon the previous solution. However, if you're within the
+ * budget, try to maximize Ref * VCO, that is N / (P * R^2).
+ */
+ a = freq2k * budget * p * r2;
+ b = freq2k * budget * best->p * best->r2;
+ diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
+ diff_best = abs_diff(freq2k * best->p * best->r2,
+ LC_FREQ_2K * best->n2);
+ c = 1000000 * diff;
+ d = 1000000 * diff_best;
+
+ if (a < c && b < d) {
+ /* If both are above the budget, pick the closer */
+ if (best->p * best->r2 * diff < p * r2 * diff_best) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ } else if (a >= c && b < d) {
+ /* If A is below the threshold but B is above it? Update. */
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ } else if (a >= c && b >= d) {
+ /* Both are below the limit, so pick the higher n2/(r2*r2) */
+ if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
+ best->p = p;
+ best->n2 = n2;
+ best->r2 = r2;
+ }
+ }
+ /* Otherwise a < c && b >= d, do nothing */
+}
+
+static void
+hsw_ddi_calculate_wrpll(int clock /* in Hz */,
+ unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
+{
+ uint64_t freq2k;
+ unsigned p, n2, r2;
+ struct hsw_wrpll_rnp best = { 0, 0, 0 };
+ unsigned budget;
+
+ freq2k = clock / 100;
+
+ budget = hsw_wrpll_get_budget_for_freq(clock);
+
+ /* Special case handling for 540 pixel clock: bypass WR PLL entirely
+ * and directly pass the LC PLL to it. */
+ if (freq2k == 5400000) {
+ *n2_out = 2;
+ *p_out = 1;
+ *r2_out = 2;
+ return;
+ }
+
+ /*
+ * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
+ * the WR PLL.
+ *
+ * We want R so that REF_MIN <= Ref <= REF_MAX.
+ * Injecting R2 = 2 * R gives:
+ * REF_MAX * r2 > LC_FREQ * 2 and
+ * REF_MIN * r2 < LC_FREQ * 2
+ *
+ * Which means the desired boundaries for r2 are:
+ * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
+ *
+ */
+ for (r2 = LC_FREQ * 2 / REF_MAX + 1;
+ r2 <= LC_FREQ * 2 / REF_MIN;
+ r2++) {
+
+ /*
+ * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
+ *
+ * Once again we want VCO_MIN <= VCO <= VCO_MAX.
+ * Injecting R2 = 2 * R and N2 = 2 * N, we get:
+ * VCO_MAX * r2 > n2 * LC_FREQ and
+ * VCO_MIN * r2 < n2 * LC_FREQ)
+ *
+ * Which means the desired boundaries for n2 are:
+ * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
+ */
+ for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
+ n2 <= VCO_MAX * r2 / LC_FREQ;
+ n2++) {
+
+ for (p = P_MIN; p <= P_MAX; p += P_INC)
+ hsw_wrpll_update_rnp(freq2k, budget,
+ r2, n2, p, &best);
+ }
+ }
+
+ *n2_out = best.n2;
+ *p_out = best.p;
+ *r2_out = best.r2;
+}
+
+static struct intel_shared_dpll *
+hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ int clock = crtc_state->port_clock;
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ uint32_t val;
+ unsigned p, n2, r2;
+
+ hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
+ WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
+ WRPLL_DIVIDER_POST(p);
+
+ crtc_state->dpll_hw_state.wrpll = val;
+
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
+
+ } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ encoder->type == INTEL_OUTPUT_DP_MST ||
+ encoder->type == INTEL_OUTPUT_EDP) {
+ enum intel_dpll_id pll_id;
+
+ switch (clock / 2) {
+ case 81000:
+ pll_id = DPLL_ID_LCPLL_810;
+ break;
+ case 135000:
+ pll_id = DPLL_ID_LCPLL_1350;
+ break;
+ case 270000:
+ pll_id = DPLL_ID_LCPLL_2700;
+ break;
+ default:
+ DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+ return NULL;
+ }
+
+ pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
+
+ } else if (encoder->type == INTEL_OUTPUT_ANALOG) {
+ if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ return NULL;
+
+ crtc_state->dpll_hw_state.spll =
+ SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
+
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_SPLL, DPLL_ID_SPLL);
+ } else {
+ return NULL;
+ }
+
+ if (!pll)
+ return NULL;
+
+ crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll);
+
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ return pll;
+}
+
+
+static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
+ .enable = hsw_ddi_wrpll_enable,
+ .disable = hsw_ddi_wrpll_disable,
+ .get_hw_state = hsw_ddi_wrpll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
+ .enable = hsw_ddi_spll_enable,
+ .disable = hsw_ddi_spll_disable,
+ .get_hw_state = hsw_ddi_spll_get_hw_state,
+};
+
+static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return true;
+}
+
+static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
+ .enable = hsw_ddi_lcpll_enable,
+ .disable = hsw_ddi_lcpll_disable,
+ .get_hw_state = hsw_ddi_lcpll_get_hw_state,
+};
+
+struct skl_dpll_regs {
+ i915_reg_t ctl, cfgcr1, cfgcr2;
+};
+
+/* this array is indexed by the *shared* pll id */
+static const struct skl_dpll_regs skl_dpll_regs[4] = {
+ {
+ /* DPLL 0 */
+ .ctl = LCPLL1_CTL,
+ /* DPLL 0 doesn't support HDMI mode */
+ },
+ {
+ /* DPLL 1 */
+ .ctl = LCPLL2_CTL,
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
+ },
+ {
+ /* DPLL 2 */
+ .ctl = WRPLL_CTL(0),
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
+ },
+ {
+ /* DPLL 3 */
+ .ctl = WRPLL_CTL(1),
+ .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
+ .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
+ },
+};
+
+static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
+ DPLL_CTRL1_LINK_RATE_MASK(pll->id));
+ val |= pll->config.hw_state.ctrl1 << (pll->id * 6);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+}
+
+static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ skl_ddi_pll_write_ctrl1(dev_priv, pll);
+
+ I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
+ I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+ POSTING_READ(regs[pll->id].cfgcr1);
+ POSTING_READ(regs[pll->id].cfgcr2);
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
+
+ if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5))
+ DRM_ERROR("DPLL %d not locked\n", pll->id);
+}
+
+static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ skl_ddi_pll_write_ctrl1(dev_priv, pll);
+}
+
+static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+
+ /* the enable bit is always bit 31 */
+ I915_WRITE(regs[pll->id].ctl,
+ I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
+ POSTING_READ(regs[pll->id].ctl);
+}
+
+static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+}
+
+static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+ bool ret;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ ret = false;
+
+ val = I915_READ(regs[pll->id].ctl);
+ if (!(val & LCPLL_PLL_ENABLE))
+ goto out;
+
+ val = I915_READ(DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+
+ /* avoid reading back stale values if HDMI mode is not enabled */
+ if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) {
+ hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
+ hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
+ }
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
+}
+
+static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+ const struct skl_dpll_regs *regs = skl_dpll_regs;
+ bool ret;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ ret = false;
+
+ /* DPLL0 is always enabled since it drives CDCLK */
+ val = I915_READ(regs[pll->id].ctl);
+ if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
+ goto out;
+
+ val = I915_READ(DPLL_CTRL1);
+ hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f;
+
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
+}
+
+struct skl_wrpll_context {
+ uint64_t min_deviation; /* current minimal deviation */
+ uint64_t central_freq; /* chosen central freq */
+ uint64_t dco_freq; /* chosen dco freq */
+ unsigned int p; /* chosen divider */
+};
+
+static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->min_deviation = U64_MAX;
+}
+
+/* DCO freq must be within +1%/-6% of the DCO central freq */
+#define SKL_DCO_MAX_PDEVIATION 100
+#define SKL_DCO_MAX_NDEVIATION 600
+
+static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
+ uint64_t central_freq,
+ uint64_t dco_freq,
+ unsigned int divider)
+{
+ uint64_t deviation;
+
+ deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
+ central_freq);
+
+ /* positive deviation */
+ if (dco_freq >= central_freq) {
+ if (deviation < SKL_DCO_MAX_PDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+ /* negative deviation */
+ } else if (deviation < SKL_DCO_MAX_NDEVIATION &&
+ deviation < ctx->min_deviation) {
+ ctx->min_deviation = deviation;
+ ctx->central_freq = central_freq;
+ ctx->dco_freq = dco_freq;
+ ctx->p = divider;
+ }
+}
+
+static void skl_wrpll_get_multipliers(unsigned int p,
+ unsigned int *p0 /* out */,
+ unsigned int *p1 /* out */,
+ unsigned int *p2 /* out */)
+{
+ /* even dividers */
+ if (p % 2 == 0) {
+ unsigned int half = p / 2;
+
+ if (half == 1 || half == 2 || half == 3 || half == 5) {
+ *p0 = 2;
+ *p1 = 1;
+ *p2 = half;
+ } else if (half % 2 == 0) {
+ *p0 = 2;
+ *p1 = half / 2;
+ *p2 = 2;
+ } else if (half % 3 == 0) {
+ *p0 = 3;
+ *p1 = half / 3;
+ *p2 = 2;
+ } else if (half % 7 == 0) {
+ *p0 = 7;
+ *p1 = half / 7;
+ *p2 = 2;
+ }
+ } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = p / 3;
+ } else if (p == 5 || p == 7) {
+ *p0 = p;
+ *p1 = 1;
+ *p2 = 1;
+ } else if (p == 15) {
+ *p0 = 3;
+ *p1 = 1;
+ *p2 = 5;
+ } else if (p == 21) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 3;
+ } else if (p == 35) {
+ *p0 = 7;
+ *p1 = 1;
+ *p2 = 5;
+ }
+}
+
+struct skl_wrpll_params {
+ uint32_t dco_fraction;
+ uint32_t dco_integer;
+ uint32_t qdiv_ratio;
+ uint32_t qdiv_mode;
+ uint32_t kdiv;
+ uint32_t pdiv;
+ uint32_t central_freq;
+};
+
+static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
+ uint64_t afe_clock,
+ uint64_t central_freq,
+ uint32_t p0, uint32_t p1, uint32_t p2)
+{
+ uint64_t dco_freq;
+
+ switch (central_freq) {
+ case 9600000000ULL:
+ params->central_freq = 0;
+ break;
+ case 9000000000ULL:
+ params->central_freq = 1;
+ break;
+ case 8400000000ULL:
+ params->central_freq = 3;
+ }
+
+ switch (p0) {
+ case 1:
+ params->pdiv = 0;
+ break;
+ case 2:
+ params->pdiv = 1;
+ break;
+ case 3:
+ params->pdiv = 2;
+ break;
+ case 7:
+ params->pdiv = 4;
+ break;
+ default:
+ WARN(1, "Incorrect PDiv\n");
+ }
+
+ switch (p2) {
+ case 5:
+ params->kdiv = 0;
+ break;
+ case 2:
+ params->kdiv = 1;
+ break;
+ case 3:
+ params->kdiv = 2;
+ break;
+ case 1:
+ params->kdiv = 3;
+ break;
+ default:
+ WARN(1, "Incorrect KDiv\n");
+ }
+
+ params->qdiv_ratio = p1;
+ params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
+
+ dco_freq = p0 * p1 * p2 * afe_clock;
+
+ /*
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_fraction =
+ div_u64((div_u64(dco_freq, 24) -
+ params->dco_integer * MHz(1)) * 0x8000, MHz(1));
+}
+
+static bool
+skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ struct skl_wrpll_params *wrpll_params)
+{
+ uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ uint64_t dco_central_freq[3] = {8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL};
+ static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
+ 24, 28, 30, 32, 36, 40, 42, 44,
+ 48, 52, 54, 56, 60, 64, 66, 68,
+ 70, 72, 76, 78, 80, 84, 88, 90,
+ 92, 96, 98 };
+ static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
+ static const struct {
+ const int *list;
+ int n_dividers;
+ } dividers[] = {
+ { even_dividers, ARRAY_SIZE(even_dividers) },
+ { odd_dividers, ARRAY_SIZE(odd_dividers) },
+ };
+ struct skl_wrpll_context ctx;
+ unsigned int dco, d, i;
+ unsigned int p0, p1, p2;
+
+ skl_wrpll_context_init(&ctx);
+
+ for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+ for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
+ for (i = 0; i < dividers[d].n_dividers; i++) {
+ unsigned int p = dividers[d].list[i];
+ uint64_t dco_freq = p * afe_clock;
+
+ skl_wrpll_try_divider(&ctx,
+ dco_central_freq[dco],
+ dco_freq,
+ p);
+ /*
+ * Skip the remaining dividers if we're sure to
+ * have found the definitive divider, we can't
+ * improve a 0 deviation.
+ */
+ if (ctx.min_deviation == 0)
+ goto skip_remaining_dividers;
+ }
+ }
+
+skip_remaining_dividers:
+ /*
+ * If a solution is found with an even divider, prefer
+ * this one.
+ */
+ if (d == 0 && ctx.p)
+ break;
+ }
+
+ if (!ctx.p) {
+ DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
+ return false;
+ }
+
+ /*
+ * gcc incorrectly analyses that these can be used without being
+ * initialized. To be fair, it's hard to guess.
+ */
+ p0 = p1 = p2 = 0;
+ skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
+ p0, p1, p2);
+
+ return true;
+}
+
+static struct intel_shared_dpll *
+skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_shared_dpll *pll;
+ uint32_t ctrl1, cfgcr1, cfgcr2;
+ int clock = crtc_state->port_clock;
+
+ /*
+ * See comment in intel_dpll_hw_state to understand why we always use 0
+ * as the DPLL id in this function.
+ */
+
+ ctrl1 = DPLL_CTRL1_OVERRIDE(0);
+
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ struct skl_wrpll_params wrpll_params = { 0, };
+
+ ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
+
+ if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+ return NULL;
+
+ cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
+ DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
+ wrpll_params.dco_integer;
+
+ cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
+ DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
+ DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
+ DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
+ wrpll_params.central_freq;
+ } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ encoder->type == INTEL_OUTPUT_DP_MST ||
+ encoder->type == INTEL_OUTPUT_EDP) {
+ switch (crtc_state->port_clock / 2) {
+ case 81000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
+ break;
+ case 135000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
+ break;
+ case 270000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
+ break;
+ /* eDP 1.4 rates */
+ case 162000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
+ break;
+ /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
+ results in CDCLK change. Need to handle the change of CDCLK by
+ disabling pipes and re-enabling them */
+ case 108000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
+ break;
+ case 216000:
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
+ break;
+ }
+
+ cfgcr1 = cfgcr2 = 0;
+ } else {
+ return NULL;
+ }
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
+
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_SKL_DPLL0,
+ DPLL_ID_SKL_DPLL0);
+ else
+ pll = intel_find_shared_dpll(crtc, crtc_state,
+ DPLL_ID_SKL_DPLL1,
+ DPLL_ID_SKL_DPLL3);
+ if (!pll)
+ return NULL;
+
+ crtc_state->ddi_pll_sel = pll->id;
+
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ return pll;
+}
+
+static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
+ .enable = skl_ddi_pll_enable,
+ .disable = skl_ddi_pll_disable,
+ .get_hw_state = skl_ddi_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
+ .enable = skl_ddi_dpll0_enable,
+ .disable = skl_ddi_dpll0_disable,
+ .get_hw_state = skl_ddi_dpll0_get_hw_state,
+};
+
+static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t temp;
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+
+ /* Non-SSC reference */
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_REF_SEL;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+ /* Disable 10 bit clock */
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+ /* Write P1 & P2 */
+ temp = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
+ temp |= pll->config.hw_state.ebb0;
+ I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp);
+
+ /* Write M2 integer */
+ temp = I915_READ(BXT_PORT_PLL(port, 0));
+ temp &= ~PORT_PLL_M2_MASK;
+ temp |= pll->config.hw_state.pll0;
+ I915_WRITE(BXT_PORT_PLL(port, 0), temp);
+
+ /* Write N */
+ temp = I915_READ(BXT_PORT_PLL(port, 1));
+ temp &= ~PORT_PLL_N_MASK;
+ temp |= pll->config.hw_state.pll1;
+ I915_WRITE(BXT_PORT_PLL(port, 1), temp);
+
+ /* Write M2 fraction */
+ temp = I915_READ(BXT_PORT_PLL(port, 2));
+ temp &= ~PORT_PLL_M2_FRAC_MASK;
+ temp |= pll->config.hw_state.pll2;
+ I915_WRITE(BXT_PORT_PLL(port, 2), temp);
+
+ /* Write M2 fraction enable */
+ temp = I915_READ(BXT_PORT_PLL(port, 3));
+ temp &= ~PORT_PLL_M2_FRAC_ENABLE;
+ temp |= pll->config.hw_state.pll3;
+ I915_WRITE(BXT_PORT_PLL(port, 3), temp);
+
+ /* Write coeff */
+ temp = I915_READ(BXT_PORT_PLL(port, 6));
+ temp &= ~PORT_PLL_PROP_COEFF_MASK;
+ temp &= ~PORT_PLL_INT_COEFF_MASK;
+ temp &= ~PORT_PLL_GAIN_CTL_MASK;
+ temp |= pll->config.hw_state.pll6;
+ I915_WRITE(BXT_PORT_PLL(port, 6), temp);
+
+ /* Write calibration val */
+ temp = I915_READ(BXT_PORT_PLL(port, 8));
+ temp &= ~PORT_PLL_TARGET_CNT_MASK;
+ temp |= pll->config.hw_state.pll8;
+ I915_WRITE(BXT_PORT_PLL(port, 8), temp);
+
+ temp = I915_READ(BXT_PORT_PLL(port, 9));
+ temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
+ temp |= pll->config.hw_state.pll9;
+ I915_WRITE(BXT_PORT_PLL(port, 9), temp);
+
+ temp = I915_READ(BXT_PORT_PLL(port, 10));
+ temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
+ temp &= ~PORT_PLL_DCO_AMP_MASK;
+ temp |= pll->config.hw_state.pll10;
+ I915_WRITE(BXT_PORT_PLL(port, 10), temp);
+
+ /* Recalibrate with new settings */
+ temp = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ temp |= PORT_PLL_RECALIBRATE;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+ temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
+ temp |= pll->config.hw_state.ebb4;
+ I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp);
+
+ /* Enable PLL */
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+ if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+ 200))
+ DRM_ERROR("PLL %d not locked\n", port);
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers and we pick lanes 0/1 for that.
+ */
+ temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ temp &= ~LANE_STAGGER_MASK;
+ temp &= ~LANESTAGGER_STRAP_OVRD;
+ temp |= pll->config.hw_state.pcsdw12;
+ I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp);
+}
+
+static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ uint32_t temp;
+
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp &= ~PORT_PLL_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+}
+
+static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
+ uint32_t val;
+ bool ret;
+
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ ret = false;
+
+ val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ if (!(val & PORT_PLL_ENABLE))
+ goto out;
+
+ hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
+ hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
+
+ hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port));
+ hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
+
+ hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0));
+ hw_state->pll0 &= PORT_PLL_M2_MASK;
+
+ hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1));
+ hw_state->pll1 &= PORT_PLL_N_MASK;
+
+ hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2));
+ hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
+
+ hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3));
+ hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
+
+ hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6));
+ hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
+ PORT_PLL_INT_COEFF_MASK |
+ PORT_PLL_GAIN_CTL_MASK;
+
+ hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8));
+ hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
+
+ hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9));
+ hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
+
+ hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10));
+ hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
+ PORT_PLL_DCO_AMP_MASK;
+
+ /*
+ * While we write to the group register to program all lanes at once we
+ * can read only lane registers. We configure all lanes the same way, so
+ * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
+ */
+ hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
+ if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
+ DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+ hw_state->pcsdw12,
+ I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
+ hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
+
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
+}
+
+/* bxt clock parameters */
+struct bxt_clk_div {
+ int clock;
+ uint32_t p1;
+ uint32_t p2;
+ uint32_t m2_int;
+ uint32_t m2_frac;
+ bool m2_frac_en;
+ uint32_t n;
+};
+
+/* pre-calculated values for DP linkrates */
+static const struct bxt_clk_div bxt_dp_clk_val[] = {
+ {162000, 4, 2, 32, 1677722, 1, 1},
+ {270000, 4, 1, 27, 0, 0, 1},
+ {540000, 2, 1, 27, 0, 0, 1},
+ {216000, 3, 2, 32, 1677722, 1, 1},
+ {243000, 4, 1, 24, 1258291, 1, 1},
+ {324000, 4, 1, 32, 1677722, 1, 1},
+ {432000, 3, 1, 32, 1677722, 1, 1}
+};
+
+static struct intel_shared_dpll *
+bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll;
+ enum intel_dpll_id i;
+ struct intel_digital_port *intel_dig_port;
+ struct bxt_clk_div clk_div = {0};
+ int vco = 0;
+ uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
+ uint32_t lanestagger;
+ int clock = crtc_state->port_clock;
+
+ if (encoder->type == INTEL_OUTPUT_HDMI) {
+ intel_clock_t best_clock;
+
+ /* Calculate HDMI div */
+ /*
+ * FIXME: tie the following calculation into
+ * i9xx_crtc_compute_clock
+ */
+ if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+ DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
+ clock, pipe_name(crtc->pipe));
+ return NULL;
+ }
+
+ clk_div.p1 = best_clock.p1;
+ clk_div.p2 = best_clock.p2;
+ WARN_ON(best_clock.m1 != 2);
+ clk_div.n = best_clock.n;
+ clk_div.m2_int = best_clock.m2 >> 22;
+ clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1);
+ clk_div.m2_frac_en = clk_div.m2_frac != 0;
+
+ vco = best_clock.vco;
+ } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ encoder->type == INTEL_OUTPUT_EDP) {
+ int i;
+
+ clk_div = bxt_dp_clk_val[0];
+ for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
+ if (bxt_dp_clk_val[i].clock == clock) {
+ clk_div = bxt_dp_clk_val[i];
+ break;
+ }
+ }
+ vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2;
+ }
+
+ if (vco >= 6200000 && vco <= 6700000) {
+ prop_coef = 4;
+ int_coef = 9;
+ gain_ctl = 3;
+ targ_cnt = 8;
+ } else if ((vco > 5400000 && vco < 6200000) ||
+ (vco >= 4800000 && vco < 5400000)) {
+ prop_coef = 5;
+ int_coef = 11;
+ gain_ctl = 3;
+ targ_cnt = 9;
+ } else if (vco == 5400000) {
+ prop_coef = 3;
+ int_coef = 8;
+ gain_ctl = 1;
+ targ_cnt = 9;
+ } else {
+ DRM_ERROR("Invalid VCO\n");
+ return NULL;
+ }
+
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ if (clock > 270000)
+ lanestagger = 0x18;
+ else if (clock > 135000)
+ lanestagger = 0x0d;
+ else if (clock > 67000)
+ lanestagger = 0x07;
+ else if (clock > 33000)
+ lanestagger = 0x04;
+ else
+ lanestagger = 0x02;
+
+ crtc_state->dpll_hw_state.ebb0 =
+ PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2);
+ crtc_state->dpll_hw_state.pll0 = clk_div.m2_int;
+ crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n);
+ crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac;
+
+ if (clk_div.m2_frac_en)
+ crtc_state->dpll_hw_state.pll3 =
+ PORT_PLL_M2_FRAC_ENABLE;
+
+ crtc_state->dpll_hw_state.pll6 =
+ prop_coef | PORT_PLL_INT_COEFF(int_coef);
+ crtc_state->dpll_hw_state.pll6 |=
+ PORT_PLL_GAIN_CTL(gain_ctl);
+
+ crtc_state->dpll_hw_state.pll8 = targ_cnt;
+
+ crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
+
+ crtc_state->dpll_hw_state.pll10 =
+ PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
+ | PORT_PLL_DCO_AMP_OVR_EN_H;
+
+ crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
+
+ crtc_state->dpll_hw_state.pcsdw12 =
+ LANESTAGGER_STRAP_OVRD | lanestagger;
+
+ intel_dig_port = enc_to_dig_port(&encoder->base);
+
+ /* 1:1 mapping between ports and PLLs */
+ i = (enum intel_dpll_id) intel_dig_port->port;
+ pll = intel_get_shared_dpll_by_id(dev_priv, i);
+
+ DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
+ crtc->base.base.id, pll->name);
+
+ intel_reference_shared_dpll(pll, crtc_state);
+
+ /* shared DPLL id 0 is DPLL A */
+ crtc_state->ddi_pll_sel = pll->id;
+
+ return pll;
+}
+
+static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
+ .enable = bxt_ddi_pll_enable,
+ .disable = bxt_ddi_pll_disable,
+ .get_hw_state = bxt_ddi_pll_get_hw_state,
+};
+
+static void intel_ddi_pll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val = I915_READ(LCPLL_CTL);
+
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ int cdclk_freq;
+
+ cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ dev_priv->skl_boot_cdclk = cdclk_freq;
+ if (skl_sanitize_cdclk(dev_priv))
+ DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
+ if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
+ DRM_ERROR("LCPLL1 is disabled\n");
+ } else if (!IS_BROXTON(dev_priv)) {
+ /*
+ * The LCPLL register should be turned on by the BIOS. For now
+ * let's just check its state and print errors in case
+ * something is wrong. Don't even try to turn it on.
+ */
+
+ if (val & LCPLL_CD_SOURCE_FCLK)
+ DRM_ERROR("CDCLK source is not LCPLL\n");
+
+ if (val & LCPLL_PLL_DISABLE)
+ DRM_ERROR("LCPLL is disabled\n");
+ }
+}
+
+struct dpll_info {
+ const char *name;
+ const int id;
+ const struct intel_shared_dpll_funcs *funcs;
+ uint32_t flags;
+};
+
+struct intel_dpll_mgr {
+ const struct dpll_info *dpll_info;
+
+ struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder);
+};
+
+static const struct dpll_info pch_plls[] = {
+ { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 },
+ { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 },
+ { NULL, -1, NULL, 0 },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+ .dpll_info = pch_plls,
+ .get_dpll = ibx_get_dpll,
+};
+
+static const struct dpll_info hsw_plls[] = {
+ { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 },
+ { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 },
+ { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 },
+ { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON },
+ { NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+ .dpll_info = hsw_plls,
+ .get_dpll = hsw_get_dpll,
+};
+
+static const struct dpll_info skl_plls[] = {
+ { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
+ { NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+ .dpll_info = skl_plls,
+ .get_dpll = skl_get_dpll,
+};
+
+static const struct dpll_info bxt_plls[] = {
+ { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 },
+ { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 },
+ { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 },
+ { NULL, -1, NULL, },
+};
+
+static const struct intel_dpll_mgr bxt_pll_mgr = {
+ .dpll_info = bxt_plls,
+ .get_dpll = bxt_get_dpll,
+};
+
+void intel_shared_dpll_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_dpll_mgr *dpll_mgr = NULL;
+ const struct dpll_info *dpll_info;
+ int i;
+
+ if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
+ dpll_mgr = &skl_pll_mgr;
+ else if (IS_BROXTON(dev))
+ dpll_mgr = &bxt_pll_mgr;
+ else if (HAS_DDI(dev))
+ dpll_mgr = &hsw_pll_mgr;
+ else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ dpll_mgr = &pch_pll_mgr;
+
+ if (!dpll_mgr) {
+ dev_priv->num_shared_dpll = 0;
+ return;
+ }
+
+ dpll_info = dpll_mgr->dpll_info;
+
+ for (i = 0; dpll_info[i].id >= 0; i++) {
+ WARN_ON(i != dpll_info[i].id);
+
+ dev_priv->shared_dplls[i].id = dpll_info[i].id;
+ dev_priv->shared_dplls[i].name = dpll_info[i].name;
+ dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs;
+ dev_priv->shared_dplls[i].flags = dpll_info[i].flags;
+ }
+
+ dev_priv->dpll_mgr = dpll_mgr;
+ dev_priv->num_shared_dpll = i;
+ mutex_init(&dev_priv->dpll_lock);
+
+ BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+
+ /* FIXME: Move this to a more suitable place */
+ if (HAS_DDI(dev))
+ intel_ddi_pll_init(dev);
+}
+
+struct intel_shared_dpll *
+intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+ if (WARN_ON(!dpll_mgr))
+ return NULL;
+
+ return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
+}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
new file mode 100644
index 000000000..89c5ada1a
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright © 2012-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DPLL_MGR_H_
+#define _INTEL_DPLL_MGR_H_
+
+/*FIXME: Move this to a more appropriate place. */
+#define abs_diff(a, b) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ (void) (&__a == &__b); \
+ __a > __b ? (__a - __b) : (__b - __a); })
+
+struct drm_i915_private;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+
+struct intel_shared_dpll;
+struct intel_dpll_mgr;
+
+enum intel_dpll_id {
+ DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
+ /* real shared dpll ids must be >= 0 */
+ DPLL_ID_PCH_PLL_A = 0,
+ DPLL_ID_PCH_PLL_B = 1,
+ /* hsw/bdw */
+ DPLL_ID_WRPLL1 = 0,
+ DPLL_ID_WRPLL2 = 1,
+ DPLL_ID_SPLL = 2,
+ DPLL_ID_LCPLL_810 = 3,
+ DPLL_ID_LCPLL_1350 = 4,
+ DPLL_ID_LCPLL_2700 = 5,
+
+ /* skl */
+ DPLL_ID_SKL_DPLL0 = 0,
+ DPLL_ID_SKL_DPLL1 = 1,
+ DPLL_ID_SKL_DPLL2 = 2,
+ DPLL_ID_SKL_DPLL3 = 3,
+};
+#define I915_NUM_PLLS 6
+
+/** Inform the state checker that the DPLL is kept enabled even if not
+ * in use by any crtc.
+ */
+#define INTEL_DPLL_ALWAYS_ON (1 << 0)
+
+struct intel_dpll_hw_state {
+ /* i9xx, pch plls */
+ uint32_t dpll;
+ uint32_t dpll_md;
+ uint32_t fp0;
+ uint32_t fp1;
+
+ /* hsw, bdw */
+ uint32_t wrpll;
+ uint32_t spll;
+
+ /* skl */
+ /*
+ * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
+ * lower part of ctrl1 and they get shifted into position when writing
+ * the register. This allows us to easily compare the state to share
+ * the DPLL.
+ */
+ uint32_t ctrl1;
+ /* HDMI only, 0 when used for DP */
+ uint32_t cfgcr1, cfgcr2;
+
+ /* bxt */
+ uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
+ pcsdw12;
+};
+
+struct intel_shared_dpll_config {
+ unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+ struct intel_dpll_hw_state hw_state;
+};
+
+struct intel_shared_dpll_funcs {
+ /* The mode_set hook is optional and should be used together with the
+ * intel_prepare_shared_dpll function. */
+ void (*mode_set)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ void (*enable)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ void (*disable)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+ bool (*get_hw_state)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state);
+};
+
+struct intel_shared_dpll {
+ struct intel_shared_dpll_config config;
+
+ unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */
+ bool on; /* is the PLL actually active? Disabled during modeset */
+ const char *name;
+ /* should match the index in the dev_priv->shared_dplls array */
+ enum intel_dpll_id id;
+
+ struct intel_shared_dpll_funcs funcs;
+
+ uint32_t flags;
+};
+
+#define SKL_DPLL0 0
+#define SKL_DPLL1 1
+#define SKL_DPLL2 2
+#define SKL_DPLL3 3
+
+/* shared dpll functions */
+struct intel_shared_dpll *
+intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id id);
+enum intel_dpll_id
+intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+void
+intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc);
+void
+intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
+ struct intel_shared_dpll *pll,
+ struct intel_crtc *crtc);
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
+ struct intel_crtc_state *state,
+ struct intel_encoder *encoder);
+void intel_prepare_shared_dpll(struct intel_crtc *crtc);
+void intel_enable_shared_dpll(struct intel_crtc *crtc);
+void intel_disable_shared_dpll(struct intel_crtc *crtc);
+void intel_shared_dpll_commit(struct drm_atomic_state *state);
+void intel_shared_dpll_init(struct drm_device *dev);
+
+
+#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8dd2cc564..f7f0f0181 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -45,9 +45,13 @@
* contexts. Note that it's important that we check the condition again after
* having timed out, since the timeout could be due to preemption or similar and
* we've never had a chance to check the condition before the timeout.
+ *
+ * TODO: When modesetting has fully transitioned to atomic, the below
+ * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts
+ * added.
*/
-#define _wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+#define _wait_for(COND, US, W) ({ \
+ unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
@@ -56,7 +60,7 @@
break; \
} \
if ((W) && drm_can_sleep()) { \
- usleep_range((W)*1000, (W)*2000); \
+ usleep_range((W), (W)*2); \
} else { \
cpu_relax(); \
} \
@@ -64,10 +68,40 @@
ret__; \
})
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
-#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
-#define wait_for_atomic_us(COND, US) _wait_for((COND), \
- DIV_ROUND_UP((US), 1000), 0)
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
+#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
+
+/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
+#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
+# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US) ({ \
+ unsigned long end__; \
+ int ret__ = 0; \
+ _WAIT_FOR_ATOMIC_CHECK; \
+ BUILD_BUG_ON((US) > 50000); \
+ end__ = (local_clock() >> 10) + (US) + 1; \
+ while (!(COND)) { \
+ if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
+ /* Unlike the regular wait_for(), this atomic variant \
+ * cannot be preempted (and we'll just ignore the issue\
+ * of irq interruptions) and so we know that no time \
+ * has passed since the last check of COND and can \
+ * immediately report the timeout. \
+ */ \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ } \
+ ret__; \
+})
+
+#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000)
+#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US))
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
@@ -119,6 +153,7 @@ enum intel_output_type {
struct intel_framebuffer {
struct drm_framebuffer base;
struct drm_i915_gem_object *obj;
+ struct intel_rotation_info rot_info;
};
struct intel_fbdev {
@@ -261,6 +296,12 @@ struct intel_atomic_state {
struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
struct intel_wm_config wm_config;
+
+ /*
+ * Current watermarks can't be trusted during hardware readout, so
+ * don't bother calculating intermediate watermarks.
+ */
+ bool skip_intermediate_wm;
};
struct intel_plane_state {
@@ -350,6 +391,7 @@ struct intel_crtc_scaler_state {
struct intel_pipe_wm {
struct intel_wm_level wm[5];
+ struct intel_wm_level raw_wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
@@ -377,6 +419,7 @@ struct intel_crtc_state {
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks;
+ unsigned fb_bits; /* framebuffers to flip */
bool update_pipe; /* can a fast modeset be performed? */
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
@@ -395,7 +438,8 @@ struct intel_crtc_state {
bool has_infoframe;
/* CPU Transcoder for the pipe. Currently this can only differ from the
- * pipe on Haswell (where we have a special eDP transcoder). */
+ * pipe on Haswell and later (where we have a special eDP transcoder)
+ * and Broxton (where we have special DSI transcoders). */
enum transcoder cpu_transcoder;
/*
@@ -442,8 +486,8 @@ struct intel_crtc_state {
* haswell. */
struct dpll dpll;
- /* Selected dpll when shared or DPLL_ID_PRIVATE. */
- enum intel_dpll_id shared_dpll;
+ /* Selected dpll when shared or NULL. */
+ struct intel_shared_dpll *shared_dpll;
/*
* - PORT_CLK_SEL for DDI ports on HSW/BDW.
@@ -454,6 +498,11 @@ struct intel_crtc_state {
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
+ /* DSI PLL registers */
+ struct {
+ u32 ctrl, div;
+ } dsi_pll;
+
int pipe_bpp;
struct intel_link_m_n dp_m_n;
@@ -511,14 +560,33 @@ struct intel_crtc_state {
struct {
/*
- * optimal watermarks, programmed post-vblank when this state
- * is committed
+ * Optimal watermarks, programmed post-vblank when this state
+ * is committed.
*/
union {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} optimal;
+
+ /*
+ * Intermediate watermarks; these can be programmed immediately
+ * since they satisfy both the current configuration we're
+ * switching away from and the new configuration we're switching
+ * to.
+ */
+ struct intel_pipe_wm intermediate;
+
+ /*
+ * Platforms with two-step watermark programming will need to
+ * update watermark programming post-vblank to switch from the
+ * safe intermediate watermarks to the optimal final
+ * watermarks.
+ */
+ bool need_postvbl_update;
} wm;
+
+ /* Gamma mode programmed on the pipe */
+ uint32_t gamma_mode;
};
struct vlv_wm_state {
@@ -538,23 +606,6 @@ struct intel_mmio_flip {
unsigned int rotation;
};
-/*
- * Tracking of operations that need to be performed at the beginning/end of an
- * atomic commit, outside the atomic section where interrupts are disabled.
- * These are generally operations that grab mutexes or might otherwise sleep
- * and thus can't be run with interrupts disabled.
- */
-struct intel_crtc_atomic_commit {
- /* Sleepable operations to perform before commit */
-
- /* Sleepable operations to perform after commit */
- unsigned fb_bits;
- bool post_enable_primary;
-
- /* Sleepable operations to perform before and after commit */
- bool update_fbc;
-};
-
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -601,6 +652,7 @@ struct intel_crtc {
struct intel_pipe_wm ilk;
struct skl_pipe_wm skl;
} active;
+
/* allow CxSR on this pipe */
bool cxsr_allowed;
} wm;
@@ -614,8 +666,6 @@ struct intel_crtc {
int scanline_start;
} debug;
- struct intel_crtc_atomic_commit atomic;
-
/* scalers available on this crtc */
int num_scalers;
@@ -756,7 +806,9 @@ struct intel_dp {
uint32_t DP;
int link_rate;
uint8_t lane_count;
+ uint8_t sink_count;
bool has_audio;
+ bool detect_done;
enum hdmi_force_audio force_audio;
bool limited_color_range;
bool color_range_auto;
@@ -834,7 +886,7 @@ struct intel_dp_mst_encoder {
struct intel_encoder base;
enum pipe pipe;
struct intel_digital_port *primary;
- void *port; /* store this opaque as its illegal to dereference it */
+ struct intel_connector *connector;
};
static inline enum dpio_channel
@@ -1010,7 +1062,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_device *dev, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_pll_init(struct drm_device *dev);
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
@@ -1052,17 +1103,19 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format);
/* intel_audio.c */
-void intel_init_audio(struct drm_device *dev);
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
void intel_audio_codec_enable(struct intel_encoder *encoder);
void intel_audio_codec_disable(struct intel_encoder *encoder);
void i915_audio_component_init(struct drm_i915_private *dev_priv);
void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
/* intel_display.c */
+int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg, int ref_freq);
extern const struct drm_plane_funcs intel_plane_funcs;
+void intel_init_display_hooks(struct drm_i915_private *dev_priv);
+unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
-int intel_pch_rawclk(struct drm_device *dev);
-int intel_hrawclk(struct drm_device *dev);
void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
@@ -1107,9 +1160,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
-int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
- struct drm_framebuffer *fb,
- const struct drm_plane_state *plane_state);
+int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ unsigned int rotation);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1145,19 +1197,13 @@ intel_rotation_90_or_270(unsigned int rotation)
void intel_create_rotation_property(struct drm_device *dev,
struct intel_plane *plane);
-/* shared dpll functions */
-struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
-void assert_shared_dpll(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- bool state);
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *state);
+void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe);
int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
+int lpt_get_iclkip(struct drm_i915_private *dev_priv);
/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1166,6 +1212,9 @@ void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
@@ -1173,21 +1222,24 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
- int *x, int *y,
- uint64_t fb_modifier,
- unsigned int cpp,
- unsigned int pitch);
+u32 intel_compute_tile_offset(int *x, int *y,
+ const struct drm_framebuffer *fb, int plane,
+ unsigned int pitch,
+ unsigned int rotation);
void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void broxton_init_cdclk(struct drm_device *dev);
-void broxton_uninit_cdclk(struct drm_device *dev);
-void broxton_ddi_phy_init(struct drm_device *dev);
-void broxton_ddi_phy_uninit(struct drm_device *dev);
+void broxton_init_cdclk(struct drm_i915_private *dev_priv);
+void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
+bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
+void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
+void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
@@ -1197,9 +1249,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-void
-ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
- int dotclock);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
intel_clock_t *best_clock);
int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock);
@@ -1227,11 +1276,13 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
-bool intel_csr_load_program(struct drm_i915_private *);
+void intel_csr_load_program(struct drm_i915_private *);
void intel_csr_ucode_fini(struct drm_i915_private *);
+void intel_csr_ucode_suspend(struct drm_i915_private *);
+void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -1269,7 +1320,6 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
-void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1427,8 +1477,8 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
-void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
-void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
+void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
@@ -1545,6 +1595,7 @@ void intel_suspend_hw(struct drm_device *dev);
int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_init_pm(struct drm_device *dev);
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
@@ -1569,6 +1620,7 @@ void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
+bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
/* intel_sdvo.c */
@@ -1610,6 +1662,18 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
+
+static inline struct intel_plane_state *
+intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_existing_plane_state(state, &plane->base);
+
+ return to_intel_plane_state(plane_state);
+}
+
int intel_atomic_setup_scalers(struct drm_device *dev,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
@@ -1621,4 +1685,10 @@ void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+/* intel_color.c */
+void intel_color_init(struct drm_crtc *crtc);
+int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
+void intel_color_set_csc(struct drm_crtc_state *crtc_state);
+void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 01b8e9f4c..4756ef639 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -46,6 +46,40 @@ static const struct {
},
};
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
+ 8 * 100), lane_count);
+}
+
+/* return pixels equvalent to txbyteclkhs */
+static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100),
+ (bpp * burst_mode_ratio));
+}
+
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
+{
+ /* It just so happens the VBT matches register contents. */
+ switch (fmt) {
+ case VID_MODE_FORMAT_RGB888:
+ return MIPI_DSI_FMT_RGB888;
+ case VID_MODE_FORMAT_RGB666:
+ return MIPI_DSI_FMT_RGB666;
+ case VID_MODE_FORMAT_RGB666_PACKED:
+ return MIPI_DSI_FMT_RGB666_PACKED;
+ case VID_MODE_FORMAT_RGB565:
+ return MIPI_DSI_FMT_RGB565;
+ default:
+ MISSING_CASE(fmt);
+ return MIPI_DSI_FMT_RGB666;
+ }
+}
+
static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
@@ -268,22 +302,47 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int ret;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
- if (fixed_mode)
+ if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ intel_gmch_panel_fitting(crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ else
+ intel_pch_panel_fitting(crtc, pipe_config,
+ intel_connector->panel.fitting_mode);
+ }
+
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
+ if (IS_BROXTON(dev_priv)) {
+ /* Dual link goes to DSI transcoder A. */
+ if (intel_dsi->ports == BIT(PORT_C))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
+ }
+
+ ret = intel_compute_dsi_pll(encoder, pipe_config);
+ if (ret)
+ return false;
+
+ pipe_config->clock_set = true;
+
return true;
}
@@ -403,7 +462,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
- if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
+ if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
temp |= intel_crtc->pipe ?
@@ -471,14 +530,19 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port;
u32 tmp;
DRM_DEBUG_KMS("\n");
- intel_enable_dsi_pll(encoder);
+ /*
+ * The BIOS may leave the PLL in a wonky state where it doesn't
+ * lock. It needs to be fully powered down to fix it.
+ */
+ intel_disable_dsi_pll(encoder);
+ intel_enable_dsi_pll(encoder, crtc->config);
+
intel_dsi_prepare(encoder);
/* Panel Enable over CRC PMIC */
@@ -488,19 +552,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
- /*
- * Disable DPOunit clock gating, can stall pipe
- * and we need DPLL REFA always enabled
- */
- tmp = I915_READ(DPLL(pipe));
- tmp |= DPLL_REF_CLK_ENABLE_VLV;
- I915_WRITE(DPLL(pipe), tmp);
-
- /* update the hw state for DPLL */
- intel_crtc->config->dpll_hw_state.dpll =
- DPLL_INTEGRATED_REF_CLK_VLV |
- DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
-
+ /* Disable DPOunit clock gating, can stall pipe */
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
@@ -652,11 +704,16 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
drm_panel_unprepare(intel_dsi->panel);
msleep(intel_dsi->panel_off_delay);
- msleep(intel_dsi->panel_pwr_cycle_delay);
/* Panel Disable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
+
+ /*
+ * FIXME As we do with eDP, just make a note of the time here
+ * and perform the wait before the next panel power on.
+ */
+ msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -667,7 +724,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
- bool ret;
+ bool active = false;
DRM_DEBUG_KMS("\n");
@@ -675,55 +732,234 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- ret = false;
+ /*
+ * On Broxton the PLL needs to be enabled with a valid divider
+ * configuration, otherwise accessing DSI registers will hang the
+ * machine. See BSpec North Display Engine registers/MIPI[BXT].
+ */
+ if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+ goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- u32 dpi_enabled, func;
+ bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
- func = I915_READ(MIPI_DSI_FUNC_PRG(port));
- dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
-
- /* Due to some hardware limitations on BYT, MIPI Port C DPI
- * Enable bit does not get set. To check whether DSI Port C
- * was enabled in BIOS, check the Pipe B enable bit
+ /*
+ * Due to some hardware limitations on VLV/CHV, the DPI enable
+ * bit in port C control register does not get set. As a
+ * workaround, check pipe B conf instead.
*/
- if (IS_VALLEYVIEW(dev) && port == PORT_C)
- dpi_enabled = I915_READ(PIPECONF(PIPE_B)) &
- PIPECONF_ENABLE;
+ if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
+ enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+
+ /* Try command mode if video mode not enabled */
+ if (!enabled) {
+ u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
+ }
+
+ if (!enabled)
+ continue;
+
+ if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ continue;
+
+ if (IS_BROXTON(dev_priv)) {
+ u32 tmp = I915_READ(MIPI_CTRL(port));
+ tmp &= BXT_PIPE_SELECT_MASK;
+ tmp >>= BXT_PIPE_SELECT_SHIFT;
- if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
- if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
- *pipe = port == PORT_A ? PIPE_A : PIPE_B;
- ret = true;
+ if (WARN_ON(tmp > PIPE_C))
+ continue;
- goto out;
- }
+ *pipe = tmp;
+ } else {
+ *pipe = port == PORT_A ? PIPE_A : PIPE_B;
}
+
+ active = true;
+ break;
}
-out:
+
+out_put_power:
intel_display_power_put(dev_priv, power_domain);
- return ret;
+ return active;
+}
+
+static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode_sw;
+ struct intel_crtc *intel_crtc;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ unsigned int lane_count = intel_dsi->lane_count;
+ unsigned int bpp, fmt;
+ enum port port;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u16 hfp_sw, hsync_sw, hbp_sw;
+ u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
+ crtc_hblank_start_sw, crtc_hblank_end_sw;
+
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
+ adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
+
+ /*
+ * Atleast one port is active as encoder->get_config called only if
+ * encoder->get_hw_state() returns true.
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ break;
+ }
+
+ fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ pipe_config->pipe_bpp =
+ mipi_dsi_pixel_format_to_bpp(
+ pixel_format_from_register_bits(fmt));
+ bpp = pipe_config->pipe_bpp;
+
+ /* In terms of pixels */
+ adjusted_mode->crtc_hdisplay =
+ I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
+ adjusted_mode->crtc_vdisplay =
+ I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
+ adjusted_mode->crtc_vtotal =
+ I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+
+ hactive = adjusted_mode->crtc_hdisplay;
+ hfp = I915_READ(MIPI_HFP_COUNT(port));
+
+ /*
+ * Meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes
+ */
+ hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
+ hbp = I915_READ(MIPI_HBP_COUNT(port));
+
+ /* harizontal values are in terms of high speed byte clock */
+ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp *= 2;
+ hsync *= 2;
+ hbp *= 2;
+ }
+
+ /* vertical values are in terms of lines */
+ vfp = I915_READ(MIPI_VFP_COUNT(port));
+ vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
+ vbp = I915_READ(MIPI_VBP_COUNT(port));
+
+ adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
+ adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start;
+ adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
+
+ adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
+ adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
+
+ /*
+ * In BXT DSI there is no regs programmed with few horizontal timings
+ * in Pixels but txbyteclkhs.. So retrieval process adds some
+ * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs.
+ * Actually here for the given adjusted_mode, we are calculating the
+ * value programmed to the port and then back to the horizontal timing
+ * param in pixels. This is the expected value, including roundup errors
+ * And if that is same as retrieved value from port, then
+ * (HW state) adjusted_mode's horizontal timings are corrected to
+ * match with SW state to nullify the errors.
+ */
+ /* Calculating the value programmed to the Port register */
+ hfp_sw = adjusted_mode_sw->crtc_hsync_start -
+ adjusted_mode_sw->crtc_hdisplay;
+ hsync_sw = adjusted_mode_sw->crtc_hsync_end -
+ adjusted_mode_sw->crtc_hsync_start;
+ hbp_sw = adjusted_mode_sw->crtc_htotal -
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (intel_dsi->dual_link) {
+ hfp_sw /= 2;
+ hsync_sw /= 2;
+ hbp_sw /= 2;
+ }
+
+ hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ /* Reverse calculating the adjusted mode parameters from port reg vals*/
+ hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp_sw *= 2;
+ hsync_sw *= 2;
+ hbp_sw *= 2;
+ }
+
+ crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw +
+ hsync_sw + hbp_sw;
+ crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay;
+ crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw;
+ crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay;
+ crtc_hblank_end_sw = crtc_htotal_sw;
+
+ if (adjusted_mode->crtc_htotal == crtc_htotal_sw)
+ adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal;
+
+ if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw)
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode_sw->crtc_hsync_start;
+
+ if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw)
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw)
+ adjusted_mode->crtc_hblank_start =
+ adjusted_mode_sw->crtc_hblank_start;
+
+ if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw)
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode_sw->crtc_hblank_end;
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct drm_device *dev = encoder->base.dev;
u32 pclk;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
- /*
- * DPLL_MD is not used in case of DSI, reading will get some default value
- * set dpll_md = 0
- */
- pipe_config->dpll_hw_state.dpll_md = 0;
+ if (IS_BROXTON(dev))
+ bxt_dsi_get_pipe_config(encoder, pipe_config);
- pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
+ pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
+ pipe_config);
if (!pclk)
return;
@@ -736,7 +972,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
DRM_DEBUG_KMS("\n");
@@ -772,14 +1008,6 @@ static u16 txclkesc(u32 divider, unsigned int us)
}
}
-/* return pixels in terms of txbyteclkhs */
-static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
- u16 burst_mode_ratio)
-{
- return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
- 8 * 100), lane_count);
-}
-
static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
@@ -787,7 +1015,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
+ unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
unsigned int lane_count = intel_dsi->lane_count;
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
@@ -849,6 +1077,23 @@ static void set_dsi_timings(struct drm_encoder *encoder,
}
}
+static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
+{
+ switch (fmt) {
+ case MIPI_DSI_FMT_RGB888:
+ return VID_MODE_FORMAT_RGB888;
+ case MIPI_DSI_FMT_RGB666:
+ return VID_MODE_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return VID_MODE_FORMAT_RGB666_PACKED;
+ case MIPI_DSI_FMT_RGB565:
+ return VID_MODE_FORMAT_RGB565;
+ default:
+ MISSING_CASE(fmt);
+ return VID_MODE_FORMAT_RGB666;
+ }
+}
+
static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
@@ -858,7 +1103,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
enum port port;
- unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format);
+ unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 val, tmp;
u16 mode_hdisplay;
@@ -917,9 +1162,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
} else {
val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
-
- /* XXX: cross-check bpp vs. pixel format? */
- val |= intel_dsi->pixel_format;
+ val |= pixel_format_to_reg(intel_dsi->pixel_format);
}
tmp = 0;
@@ -1059,6 +1302,48 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
return 1;
}
+static int intel_dsi_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_crtc *crtc;
+ int ret;
+
+ ret = drm_object_property_set_value(&connector->base, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ if (val == DRM_MODE_SCALE_NONE) {
+ DRM_DEBUG_KMS("no scaling not supported\n");
+ return -EINVAL;
+ }
+ if (HAS_GMCH_DISPLAY(dev) &&
+ val == DRM_MODE_SCALE_CENTER) {
+ DRM_DEBUG_KMS("centering not supported\n");
+ return -EINVAL;
+ }
+
+ if (intel_connector->panel.fitting_mode == val)
+ return 0;
+
+ intel_connector->panel.fitting_mode = val;
+ }
+
+ crtc = intel_attached_encoder(connector)->base.crtc;
+ if (crtc && crtc->state->enable) {
+ /*
+ * If the CRTC is enabled, the display will be changed
+ * according to the new panel fitting mode.
+ */
+ intel_crtc_restore_mode(crtc);
+ }
+
+ return 0;
+}
+
static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1101,11 +1386,25 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.detect = intel_dsi_detect,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dsi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
+static void intel_dsi_add_properties(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+
+ if (connector->panel.fixed_mode) {
+ drm_mode_create_scaling_mode_property(dev);
+ drm_object_attach_property(&connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_ASPECT);
+ connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
+ }
+}
+
void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
@@ -1121,11 +1420,13 @@ void intel_dsi_init(struct drm_device *dev)
DRM_DEBUG_KMS("\n");
/* There is no detection method for MIPI so rely on VBT */
- if (!dev_priv->vbt.has_mipi)
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
return;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else if (IS_BROXTON(dev)) {
+ dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
return;
@@ -1161,17 +1462,21 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_connector->unregister = intel_connector_unregister;
- /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
- if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
- intel_encoder->crtc_mask = (1 << PIPE_A);
- intel_dsi->ports = (1 << PORT_A);
- } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
- intel_encoder->crtc_mask = (1 << PIPE_B);
- intel_dsi->ports = (1 << PORT_C);
- }
+ /*
+ * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
+ * port C. BXT isn't limited like this.
+ */
+ if (IS_BROXTON(dev_priv))
+ intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ else if (port == PORT_A)
+ intel_encoder->crtc_mask = BIT(PIPE_A);
+ else
+ intel_encoder->crtc_mask = BIT(PIPE_B);
if (dev_priv->vbt.dsi.config->dual_link)
- intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
+ else
+ intel_dsi->ports = BIT(port);
/* Create a DSI host (and a device) for each port. */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1223,8 +1528,6 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_connector_register(connector);
-
drm_panel_attach(intel_dsi->panel, connector);
mutex_lock(&dev->mode_config.mutex);
@@ -1242,7 +1545,15 @@ void intel_dsi_init(struct drm_device *dev)
goto err;
}
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+
+ intel_dsi_add_properties(intel_connector);
+
+ drm_connector_register(connector);
+
intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 92f39227b..61a6957fc 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -34,8 +34,6 @@
#define DSI_DUAL_LINK_FRONT_BACK 1
#define DSI_DUAL_LINK_PIXEL_ALT 2
-int dsi_pixel_format_bpp(int pixel_format);
-
struct intel_dsi_host;
struct intel_dsi {
@@ -64,8 +62,12 @@ struct intel_dsi {
/* number of DSI lanes */
unsigned int lane_count;
- /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
- u32 pixel_format;
+ /*
+ * video mode pixel format
+ *
+ * XXX: consolidate on .format in struct mipi_dsi_device.
+ */
+ enum mipi_dsi_pixel_format pixel_format;
/* video mode format for MIPI_VIDEO_MODE_FORMAT register */
u32 video_mode_format;
@@ -117,21 +119,25 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
return container_of(h, struct intel_dsi_host, base);
}
-#define for_each_dsi_port(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if ((__ports_mask) & (1 << (__port)))
+#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask)
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_dsi, base.base);
}
-extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
-extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
-extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
- enum port port);
+bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
+int intel_compute_dsi_pll(struct intel_encoder *encoder,
+ struct intel_crtc_state *config);
+void intel_enable_dsi_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config);
+void intel_disable_dsi_pll(struct intel_encoder *encoder);
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config);
+void intel_dsi_reset_clocks(struct intel_encoder *encoder,
+ enum port port);
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 7f145b4fe..e498f1c32 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -58,50 +58,41 @@ static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
#define NS_KHZ_RATIO 1000000
-#define GPI0_NC_0_HV_DDI0_HPD 0x4130
-#define GPIO_NC_0_HV_DDI0_PAD 0x4138
-#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
-#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128
-#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
-#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118
-#define GPIO_NC_3_PANEL0_VDDEN 0x4140
-#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148
-#define GPIO_NC_4_PANEL0_BLKEN 0x4150
-#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158
-#define GPIO_NC_5_PANEL0_BLKCTL 0x4160
-#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168
-#define GPIO_NC_6_PCONF0 0x4180
-#define GPIO_NC_6_PAD 0x4188
-#define GPIO_NC_7_PCONF0 0x4190
-#define GPIO_NC_7_PAD 0x4198
-#define GPIO_NC_8_PCONF0 0x4170
-#define GPIO_NC_8_PAD 0x4178
-#define GPIO_NC_9_PCONF0 0x4100
-#define GPIO_NC_9_PAD 0x4108
-#define GPIO_NC_10_PCONF0 0x40E0
-#define GPIO_NC_10_PAD 0x40E8
-#define GPIO_NC_11_PCONF0 0x40F0
-#define GPIO_NC_11_PAD 0x40F8
-
-struct gpio_table {
- u16 function_reg;
- u16 pad_reg;
- u8 init;
+/* base offsets for gpio pads */
+#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
+#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
+#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
+#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
+#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
+#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
+#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
+#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
+#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
+#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
+#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
+#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
+
+#define VLV_GPIO_PCONF0(base_offset) (base_offset)
+#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
+
+struct gpio_map {
+ u16 base_offset;
+ bool init;
};
-static struct gpio_table gtable[] = {
- { GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 },
- { GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 },
- { GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 },
- { GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 },
- { GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 },
- { GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 },
- { GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 },
- { GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 },
- { GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 },
- { GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 },
- { GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0},
- { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0}
+static struct gpio_map vlv_gpio_table[] = {
+ { VLV_GPIO_NC_0_HV_DDI0_HPD },
+ { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
+ { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
+ { VLV_GPIO_NC_3_PANEL0_VDDEN },
+ { VLV_GPIO_NC_4_PANEL0_BKLTEN },
+ { VLV_GPIO_NC_5_PANEL0_BKLTCTL },
+ { VLV_GPIO_NC_6_HV_DDI1_HPD },
+ { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
+ { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
+ { VLV_GPIO_NC_9_PANEL1_VDDEN },
+ { VLV_GPIO_NC_10_PANEL1_BKLTEN },
+ { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
};
static inline enum port intel_dsi_seq_port_to_port(u8 port)
@@ -196,56 +187,76 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
{
- u8 gpio, action;
- u16 function, pad;
- u32 val;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (dev_priv->vbt.dsi.seq_version >= 3)
- data++;
-
- gpio = *data++;
+ struct gpio_map *map;
+ u16 pconf0, padval;
+ u32 tmp;
+ u8 port;
- /* pull up/down */
- action = *data++ & 1;
-
- if (gpio >= ARRAY_SIZE(gtable)) {
- DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
- goto out;
+ if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
+ DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+ return;
}
- if (!IS_VALLEYVIEW(dev_priv)) {
- DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
- goto out;
- }
+ map = &vlv_gpio_table[gpio_index];
if (dev_priv->vbt.dsi.seq_version >= 3) {
DRM_DEBUG_KMS("GPIO element v3 not supported\n");
- goto out;
+ return;
+ } else {
+ if (gpio_source == 0) {
+ port = IOSF_PORT_GPIO_NC;
+ } else if (gpio_source == 1) {
+ port = IOSF_PORT_GPIO_SC;
+ } else {
+ DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ return;
+ }
}
- function = gtable[gpio].function_reg;
- pad = gtable[gpio].pad_reg;
+ pconf0 = VLV_GPIO_PCONF0(map->base_offset);
+ padval = VLV_GPIO_PAD_VAL(map->base_offset);
mutex_lock(&dev_priv->sb_lock);
- if (!gtable[gpio].init) {
- /* program the function */
+ if (!map->init) {
/* FIXME: remove constant below */
- vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
- 0x2000CC00);
- gtable[gpio].init = 1;
+ vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
+ map->init = true;
}
- val = 0x4 | action;
+ tmp = 0x4 | value;
+ vlv_iosf_sb_write(dev_priv, port, padval, tmp);
+ mutex_unlock(&dev_priv->sb_lock);
+}
+
+static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 gpio_source, gpio_index;
+ bool value;
+
+ if (dev_priv->vbt.dsi.seq_version >= 3)
+ data++;
+
+ gpio_index = *data++;
+
+ /* gpio source in sequence v2 only */
+ if (dev_priv->vbt.dsi.seq_version == 2)
+ gpio_source = (*data >> 1) & 3;
+ else
+ gpio_source = 0;
/* pull up/down */
- vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
- mutex_unlock(&dev_priv->sb_lock);
+ value = *data++ & 1;
+
+ if (IS_VALLEYVIEW(dev_priv))
+ vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else
+ DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
-out:
return data;
}
@@ -420,7 +431,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
struct vbt_panel *vbt_panel;
- u32 bits_per_pixel = 24;
+ u32 bpp;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
@@ -436,12 +447,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
intel_dsi->lane_count = mipi_config->lane_cnt + 1;
- intel_dsi->pixel_format = mipi_config->videomode_color_format << 7;
+ intel_dsi->pixel_format =
+ pixel_format_from_register_bits(
+ mipi_config->videomode_color_format << 7);
+ bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
intel_dsi->dual_link = mipi_config->dual_link;
intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
-
- bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format);
-
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
@@ -475,8 +487,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
*/
if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
if (mipi_config->target_burst_mode_freq) {
- computed_ddr =
- (pclk * bits_per_pixel) / intel_dsi->lane_count;
+ computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
@@ -499,7 +510,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->burst_mode_ratio = burst_mode_ratio;
intel_dsi->pclk = pclk;
- bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
+ bitrate = (pclk * bpp) / intel_dsi->lane_count;
switch (intel_dsi->escape_clk_div) {
case 0:
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 70883c54c..1765e6e18 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -30,33 +30,7 @@
#include "i915_drv.h"
#include "intel_dsi.h"
-int dsi_pixel_format_bpp(int pixel_format)
-{
- int bpp;
-
- switch (pixel_format) {
- default:
- case VID_MODE_FORMAT_RGB888:
- case VID_MODE_FORMAT_RGB666_LOOSE:
- bpp = 24;
- break;
- case VID_MODE_FORMAT_RGB666:
- bpp = 18;
- break;
- case VID_MODE_FORMAT_RGB565:
- bpp = 16;
- break;
- }
-
- return bpp;
-}
-
-struct dsi_mnp {
- u32 dsi_pll_ctrl;
- u32 dsi_pll_div;
-};
-
-static const u32 lfsr_converts[] = {
+static const u16 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */
@@ -64,10 +38,11 @@ static const u32 lfsr_converts[] = {
};
/* Get DSI clock from pixel clock */
-static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
+static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
+ int lane_count)
{
u32 dsi_clk_khz;
- u32 bpp = dsi_pixel_format_bpp(pixel_format);
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(fmt);
/* DSI data rate = pixel clock * bits per pixel / lane count
pixel clock is converted from KHz to Hz */
@@ -77,7 +52,8 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
}
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
- struct dsi_mnp *dsi_mnp, int target_dsi_clk)
+ struct intel_crtc_state *config,
+ int target_dsi_clk)
{
unsigned int calc_m = 0, calc_p = 0;
unsigned int m_min, m_max, p_min = 2, p_max = 6;
@@ -123,8 +99,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* register has log2(N1), this works fine for powers of two */
n = ffs(n) - 1;
m_seed = lfsr_converts[calc_m - 62];
- dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
- dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
+ config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+ config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
m_seed << DSI_PLL_M1_DIV_SHIFT;
return 0;
@@ -134,54 +110,55 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
*/
-static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
- struct dsi_mnp dsi_mnp;
u32 dsi_clk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
- ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
+ ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
if (ret) {
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
- return;
+ return ret;
}
if (intel_dsi->ports & (1 << PORT_A))
- dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+ config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
if (intel_dsi->ports & (1 << PORT_C))
- dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+ config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
+
+ config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
- dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+ config->dsi_pll.div, config->dsi_pll.ctrl);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+ return 0;
}
-static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- u32 tmp;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
DRM_DEBUG_KMS("\n");
mutex_lock(&dev_priv->sb_lock);
- vlv_configure_dsi_pll(encoder);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
+ config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
/* wait at least 0.5 us after ungating before enabling VCO */
usleep_range(1, 10);
- tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- tmp |= DSI_PLL_VCO_EN;
- vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+ vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
@@ -197,7 +174,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
DRM_DEBUG_KMS("\n");
@@ -212,9 +189,39 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock);
}
+static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+{
+ bool enabled;
+ u32 val;
+ u32 mask;
+
+ mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
+ val = I915_READ(BXT_DSI_PLL_ENABLE);
+ enabled = (val & mask) == mask;
+
+ if (!enabled)
+ return false;
+
+ /*
+ * Both dividers must be programmed with valid values even if only one
+ * of the PLL is used, see BSpec/Broxton Clocks. Check this here for
+ * paranoia, since BIOS is known to misconfigure PLLs in this way at
+ * times, and since accessing DSI registers with invalid dividers
+ * causes a system hang.
+ */
+ val = I915_READ(BXT_DSI_PLL_CTL);
+ if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
+ DRM_DEBUG_DRIVER("PLL is enabled with invalid divider settings (%08x)\n",
+ val);
+ enabled = false;
+ }
+
+ return enabled;
+}
+
static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
DRM_DEBUG_KMS("\n");
@@ -232,23 +239,24 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
-static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
+static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
{
- int bpp = dsi_pixel_format_bpp(pixel_format);
+ int bpp = mipi_dsi_pixel_format_to_bpp(fmt);
WARN(bpp != pipe_bpp,
"bpp match assertion failure (expected %d, current %d)\n",
bpp, pipe_bpp);
}
-static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
- int refclk = 25000;
+ int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
int i;
DRM_DEBUG_KMS("\n");
@@ -258,6 +266,9 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
mutex_unlock(&dev_priv->sb_lock);
+ config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
+ config->dsi_pll.div = pll_div;
+
/* mask out other bits and extract the P1 divisor */
pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
@@ -303,7 +314,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
-static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
u32 pclk;
u32 dsi_clk;
@@ -317,15 +329,9 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return 0;
}
- dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
- BXT_DSI_PLL_RATIO_MASK;
+ config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
- /* Invalid DSI ratio ? */
- if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
- dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
- DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
- return 0;
- }
+ dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
@@ -338,12 +344,13 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
-u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+ struct intel_crtc_state *config)
{
if (IS_BROXTON(encoder->base.dev))
- return bxt_dsi_get_pclk(encoder, pipe_bpp);
+ return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
- return vlv_dsi_get_pclk(encoder, pipe_bpp);
+ return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
}
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@@ -360,51 +367,72 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
}
/* Program BXT Mipi clocks and dividers */
-static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
+static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
+ const struct intel_crtc_state *config)
{
- u32 tmp;
- u32 divider;
- u32 dsi_rate;
- u32 pll_ratio;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+ u32 dsi_rate = 0;
+ u32 pll_ratio = 0;
+ u32 rx_div;
+ u32 tx_div;
+ u32 rx_div_upper;
+ u32 rx_div_lower;
+ u32 mipi_8by3_divider;
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
- tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
/* Get the current DSI rate(actual) */
- pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
- BXT_DSI_PLL_RATIO_MASK;
+ pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
- /* Max possible output of clock is 39.5 MHz, program value -1 */
- divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1;
- tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider);
+ /*
+ * tx clock should be <= 20MHz and the div value must be
+ * subtracted by 1 as per bspec
+ */
+ tx_div = DIV_ROUND_UP(dsi_rate, 20000) - 1;
+ /*
+ * rx clock should be <= 150MHz and the div value must be
+ * subtracted by 1 as per bspec
+ */
+ rx_div = DIV_ROUND_UP(dsi_rate, 150000) - 1;
/*
- * Tx escape clock must be as close to 20MHz possible, but should
- * not exceed it. Hence select divide by 2
+ * rx divider value needs to be updated in the
+ * two differnt bit fields in the register hence splitting the
+ * rx divider value accordingly
*/
- tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port);
+ rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
+ rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2;
- tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port);
+ /* As per bpsec program the 8/3X clock divider to the below value */
+ if (dev_priv->vbt.dsi.config->is_cmd_mode)
+ mipi_8by3_divider = 0x2;
+ else
+ mipi_8by3_divider = 0x3;
+
+ tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider);
+ tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div);
+ tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
+ tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
-static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
+static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
{
- struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u8 dsi_ratio;
u32 dsi_clk;
- u32 val;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
- intel_dsi->lane_count);
+ intel_dsi->lane_count);
/*
* From clock diagram, to get PLL ratio divider, divide double of DSI
@@ -413,9 +441,9 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
- dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
+ dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
- return false;
+ return -ECHRNG;
}
/*
@@ -423,27 +451,19 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
* Spec says both have to be programmed, even if one is not getting
* used. Configure MIPI_CLOCK_CTL dividers in modeset
*/
- val = I915_READ(BXT_DSI_PLL_CTL);
- val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
- val &= ~BXT_DSI_FREQ_SEL_MASK;
- val &= ~BXT_DSI_PLL_RATIO_MASK;
- val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
+ config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
- if (dsi_ratio <= 50) {
- val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
- val |= BXT_DSI_PLL_PVD_RATIO_1;
- }
+ if (dsi_ratio <= 50)
+ config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
- I915_WRITE(BXT_DSI_PLL_CTL, val);
- POSTING_READ(BXT_DSI_PLL_CTL);
-
- return true;
+ return 0;
}
-static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
+static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -452,23 +472,13 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
- val = I915_READ(BXT_DSI_PLL_ENABLE);
-
- if (val & BXT_DSI_PLL_DO_ENABLE) {
- WARN(1, "DSI PLL already enabled. Disabling it.\n");
- val &= ~BXT_DSI_PLL_DO_ENABLE;
- I915_WRITE(BXT_DSI_PLL_ENABLE, val);
- }
-
/* Configure PLL vales */
- if (!bxt_configure_dsi_pll(encoder)) {
- DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
- return;
- }
+ I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ POSTING_READ(BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
for_each_dsi_port(port, intel_dsi->ports)
- bxt_dsi_program_clocks(encoder->base.dev, port);
+ bxt_dsi_program_clocks(encoder->base.dev, port, config);
/* Enable DSI PLL */
val = I915_READ(BXT_DSI_PLL_ENABLE);
@@ -484,14 +494,38 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
DRM_DEBUG_KMS("DSI PLL locked\n");
}
-void intel_enable_dsi_pll(struct intel_encoder *encoder)
+bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
+{
+ if (IS_BROXTON(dev_priv))
+ return bxt_dsi_pll_is_enabled(dev_priv);
+
+ MISSING_CASE(INTEL_DEVID(dev_priv));
+
+ return false;
+}
+
+int intel_compute_dsi_pll(struct intel_encoder *encoder,
+ struct intel_crtc_state *config)
+{
+ struct drm_device *dev = encoder->base.dev;
+
+ if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ return vlv_compute_dsi_pll(encoder, config);
+ else if (IS_BROXTON(dev))
+ return bxt_compute_dsi_pll(encoder, config);
+
+ return -ENODEV;
+}
+
+void intel_enable_dsi_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *config)
{
struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
- vlv_enable_dsi_pll(encoder);
+ vlv_enable_dsi_pll(encoder, config);
else if (IS_BROXTON(dev))
- bxt_enable_dsi_pll(encoder);
+ bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
@@ -513,9 +547,9 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
/* Clear old configurations */
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
- tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 28f440772..647127f3a 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size,
int fb_cpp)
{
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int compression_threshold = 1;
int ret;
u64 end;
@@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
+ end = ggtt->stolen_size - 8 * 1024 * 1024;
else
- end = dev_priv->gtt.stolen_usable_size;
+ end = ggtt->stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index c607217c1..ab8d09a81 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj = NULL;
int size, ret;
@@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size * 2 < dev_priv->gtt.stolen_usable_size)
+ if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
@@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
@@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
* This also validates that any existing fb inherited from the
* BIOS is suitable for own access.
*/
- ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL);
+ ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
if (ret)
goto out_unlock;
@@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
+ info->apertures->ranges[0].size = ggtt->mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+ ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
@@ -379,6 +381,7 @@ retry:
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *new_crtc;
+ struct intel_crtc *intel_crtc;
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
@@ -420,6 +423,13 @@ retry:
num_connectors_enabled++;
+ intel_crtc = to_intel_crtc(connector->state->crtc);
+ for (j = 0; j < 256; j++) {
+ intel_crtc->lut_r[j] = j;
+ intel_crtc->lut_g[j] = j;
+ intel_crtc->lut_b[j] = j;
+ }
+
new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc);
/*
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index bda526660..9be839a24 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -212,7 +212,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
POSTING_READ(SERR_INT);
- DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
+ DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
transcoder_name(pch_transcoder));
}
@@ -235,7 +235,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
if (old && I915_READ(SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
+ DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n",
transcoder_name(pch_transcoder));
}
}
@@ -333,7 +333,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
old = !intel_crtc->pch_fifo_underrun_disabled;
intel_crtc->pch_fifo_underrun_disabled = !enable;
- if (HAS_PCH_IBX(dev_priv->dev))
+ if (HAS_PCH_IBX(dev_priv))
ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
enable);
else
@@ -363,7 +363,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
return;
/* GMCH can't disable fifo underruns, filter them. */
- if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
return;
@@ -386,7 +386,7 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
{
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false))
- DRM_ERROR("PCH transcoder %c FIFO underrun\n",
+ DRM_ERROR("PCH transcoder %s FIFO underrun\n",
transcoder_name(pch_transcoder));
}
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 73002e901..9d79c4c3e 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -27,8 +27,34 @@
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
+struct drm_i915_gem_request;
+
+/*
+ * This structure primarily describes the GEM object shared with the GuC.
+ * The GEM object is held for the entire lifetime of our interaction with
+ * the GuC, being allocated before the GuC is loaded with its firmware.
+ * Because there's no way to update the address used by the GuC after
+ * initialisation, the shared object must stay pinned into the GGTT as
+ * long as the GuC is in use. We also keep the first page (only) mapped
+ * into kernel address space, as it includes shared data that must be
+ * updated on every request submission.
+ *
+ * The single GEM object described here is actually made up of several
+ * separate areas, as far as the GuC is concerned. The first page (kept
+ * kmap'd) includes the "process decriptor" which holds sequence data for
+ * the doorbell, and one cacheline which actually *is* the doorbell; a
+ * write to this will "ring the doorbell" (i.e. send an interrupt to the
+ * GuC). The subsequent pages of the client object constitute the work
+ * queue (a circular array of work items), again described in the process
+ * descriptor. Work queue pages are mapped momentarily as required.
+ *
+ * Finally, we also keep a few statistics here, including the number of
+ * submissions to each engine, and a record of the last submission failure
+ * (if any).
+ */
struct i915_guc_client {
struct drm_i915_gem_object *client_obj;
+ void *client_base; /* first page (only) of above */
struct intel_context *owner;
struct intel_guc *guc;
uint32_t priority;
@@ -43,13 +69,14 @@ struct i915_guc_client {
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
- uint32_t wq_head;
+ uint32_t unused; /* Was 'wq_head' */
/* GuC submission statistics & status */
uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
+ int spare; /* pad to 32 DWords */
};
enum intel_guc_fw_status {
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index ef46f976f..6021d5872 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -81,14 +81,14 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
- int i, irqs;
+ struct intel_engine_cs *engine;
+ int irqs;
/* tell all command streamers NOT to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MODE_GEN7(ring), irqs);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route all GT interrupts to the host */
I915_WRITE(GUC_BCS_RCS_IER, 0);
@@ -98,14 +98,14 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
{
- struct intel_engine_cs *ring;
- int i, irqs;
+ struct intel_engine_cs *engine;
+ int irqs;
/* tell all command streamers to forward interrupts and vblank to GuC */
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS);
irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MODE_GEN7(ring), irqs);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
@@ -353,6 +353,24 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
return ret;
}
+static int i915_reset_guc(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ u32 guc_status;
+
+ ret = intel_guc_reset(dev_priv);
+ if (ret) {
+ DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ guc_status = I915_READ(GUC_STATUS);
+ WARN(!(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
+
+ return ret;
+}
+
/**
* intel_guc_ucode_load() - load GuC uCode into the device
* @dev: drm device
@@ -369,7 +387,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- int err = 0;
+ int retries, err = 0;
if (!i915.enable_guc_submission)
return 0;
@@ -417,9 +435,33 @@ int intel_guc_ucode_load(struct drm_device *dev)
if (err)
goto fail;
- err = guc_ucode_xfer(dev_priv);
- if (err)
- goto fail;
+ /*
+ * WaEnableuKernelHeaderValidFix:skl,bxt
+ * For BXT, this is only upto B0 but below WA is required for later
+ * steppings also so this is extended as well.
+ */
+ /* WaEnableGuCBootHashCheckNotSet:skl,bxt */
+ for (retries = 3; ; ) {
+ /*
+ * Always reset the GuC just before (re)loading, so
+ * that the state and timing are fairly predictable
+ */
+ err = i915_reset_guc(dev_priv);
+ if (err) {
+ DRM_ERROR("GuC reset failed, err %d\n", err);
+ goto fail;
+ }
+
+ err = guc_ucode_xfer(dev_priv);
+ if (!err)
+ break;
+
+ if (--retries == 0)
+ goto fail;
+
+ DRM_INFO("GuC fw load failed, err %d; will reset and "
+ "retry %d more time(s)\n", err, retries);
+ }
guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -440,6 +482,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
return 0;
fail:
+ DRM_ERROR("GuC firmware load failed, err %d\n", err);
if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
@@ -595,8 +638,8 @@ void intel_guc_ucode_init(struct drm_device *dev)
fw_path = NULL;
} else if (IS_SKYLAKE(dev)) {
fw_path = I915_SKL_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = 4;
- guc_fw->guc_fw_minor_wanted = 3;
+ guc_fw->guc_fw_major_wanted = 6;
+ guc_fw->guc_fw_minor_wanted = 1;
} else {
i915.enable_guc_submission = false;
fw_path = ""; /* unknown device */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 3ddb4fac5..a8844702d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -638,7 +638,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
- else if (HAS_PCH_SPLIT(dev_priv->dev))
+ else if (HAS_PCH_SPLIT(dev_priv))
reg = TVIDEO_DIP_GCP(crtc->pipe);
else
return false;
@@ -970,10 +970,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- if (HAS_PCH_SPLIT(dev_priv->dev))
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+
+ pipe_config->lane_count = 4;
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
@@ -1375,6 +1374,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+ pipe_config->lane_count = 4;
+
return true;
}
@@ -1395,16 +1396,38 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
}
static void
-intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ enum port port = hdmi_to_dig_port(hdmi)->port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
- if (type == DRM_DP_DUAL_MODE_NONE ||
- type == DRM_DP_DUAL_MODE_UNKNOWN)
+ /*
+ * Type 1 DVI adaptors are not required to implement any
+ * registers, so we can't always detect their presence.
+ * Ideally we should be able to check the state of the
+ * CONFIG1 pin, but no such luck on our hardware.
+ *
+ * The only method left to us is to check the VBT to see
+ * if the port is a dual mode capable DP port. But let's
+ * only do that when we sucesfully read the EDID, to avoid
+ * confusing log messages about DP dual mode adaptors when
+ * there's nothing connected to the port.
+ */
+ if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
+ if (has_edid &&
+ intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+ DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+ type = DRM_DP_DUAL_MODE_TYPE1_DVI;
+ } else {
+ type = DRM_DP_DUAL_MODE_NONE;
+ }
+ }
+
+ if (type == DRM_DP_DUAL_MODE_NONE)
return;
hdmi->dp_dual_mode.type = type;
@@ -1431,7 +1454,7 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
- intel_hdmi_dp_dual_mode_detect(connector);
+ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
}
@@ -2119,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
+ DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+ port_name(port));
+
if (WARN(intel_dig_port->max_lanes < 4,
"Not enough lanes (%d) for HDMI on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 52fbe530f..81de23098 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -124,7 +124,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev_priv->dev))
+ if (!IS_PINEVIEW(dev_priv))
return;
val = I915_READ(DSPCLK_GATE_D);
@@ -264,7 +264,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2 = 0;
DEFINE_WAIT(wait);
- if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ if (!HAS_GMBUS_IRQ(dev_priv))
gmbus4_irq_en = 0;
/* Important: The hw handles only the first bit, so set only one! Since
@@ -300,7 +300,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
- if (!HAS_GMBUS_IRQ(dev_priv->dev))
+ if (!HAS_GMBUS_IRQ(dev_priv))
return wait_for(C, 10);
/* Important: The hw handles only the first bit, so set only one! */
@@ -571,15 +571,14 @@ clear_err:
goto out;
timeout:
- DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
- bus->adapter.name, bus->reg0 & 0xff);
+ DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0, 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
* instead. Use EAGAIN to have i2c core retry.
*/
- bus->force_bit = 1;
ret = -EAGAIN;
out:
@@ -597,10 +596,15 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
- if (bus->force_bit)
+ if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
- else
+ if (ret < 0)
+ bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY;
+ } else {
ret = do_gmbus_xfer(adapter, msgs, num);
+ if (ret == -EAGAIN)
+ bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
+ }
mutex_unlock(&dev_priv->gmbus_mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
@@ -718,11 +722,16 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
+ struct drm_i915_private *dev_priv = bus->dev_priv;
+
+ mutex_lock(&dev_priv->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
+
+ mutex_unlock(&dev_priv->gmbus_mutex);
}
void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5c6080fd0..7f2d8415e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -131,6 +131,7 @@
* preemption, but just sampling the new tail pointer).
*
*/
+#include <linux/interrupt.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
@@ -228,9 +229,6 @@ enum {
static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine);
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *default_ctx_obj);
-
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@@ -266,20 +264,23 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
}
static void
-logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
+
+ if (IS_GEN8(dev) || IS_GEN9(dev))
+ engine->idle_lite_restore_wa = ~0;
- ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+ engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
- (ring->id == VCS || ring->id == VCS2);
+ (engine->id == VCS || engine->id == VCS2);
- ring->ctx_desc_template = GEN8_CTX_VALID;
- ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+ engine->ctx_desc_template = GEN8_CTX_VALID;
+ engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
if (IS_GEN8(dev))
- ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
- ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+ engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+ engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
/* TODO: WaDisableLiteRestore when we start using semaphore
* signalling between Command Streamers */
@@ -287,8 +288,8 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
/* WaEnableForceRestoreInCtxtDescForVCS:skl */
/* WaEnableForceRestoreInCtxtDescForVCS:bxt */
- if (ring->disable_lite_restore_wa)
- ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+ if (engine->disable_lite_restore_wa)
+ engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
}
/**
@@ -311,24 +312,24 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
*/
static void
intel_lr_context_descriptor_update(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
uint64_t lrca, desc;
- lrca = ctx->engine[ring->id].lrc_vma->node.start +
+ lrca = ctx->engine[engine->id].lrc_vma->node.start +
LRC_PPHWSP_PN * PAGE_SIZE;
- desc = ring->ctx_desc_template; /* bits 0-11 */
+ desc = engine->ctx_desc_template; /* bits 0-11 */
desc |= lrca; /* bits 12-31 */
desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
- ctx->engine[ring->id].lrc_desc = desc;
+ ctx->engine[engine->id].lrc_desc = desc;
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- return ctx->engine[ring->id].lrc_desc;
+ return ctx->engine[engine->id].lrc_desc;
}
/**
@@ -348,98 +349,103 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
* Return: 20-bits globally unique context ID.
*/
u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
+ return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
}
static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
- struct intel_engine_cs *ring = rq0->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = rq0->engine;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint64_t desc[2];
if (rq1) {
- desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
+ desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
rq1->elsp_submitted++;
} else {
desc[1] = 0;
}
- desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
+ desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
rq0->elsp_submitted++;
/* You must always write both descriptors in the order below. */
- spin_lock(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
- I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
- I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
+ I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
- I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+ I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
/* The context is automatically loaded after the following */
- I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+ I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
/* ELSP is a wo register, use another nearby reg for posting */
- POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
- spin_unlock(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
}
-static int execlists_update_context(struct drm_i915_gem_request *rq)
+static void
+execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
{
- struct intel_engine_cs *ring = rq->ring;
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
+ ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+}
+
+static void execlists_update_context(struct drm_i915_gem_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
- uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
+ uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
- if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
- /* True 32b PPGTT with dynamic page allocation: update PDP
- * registers and point the unallocated PDPs to scratch page.
- * PML4 is allocated during ppgtt init, so this is not needed
- * in 48-bit mode.
- */
- ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
- }
-
- return 0;
+ /* True 32b PPGTT with dynamic page allocation: update PDP
+ * registers and point the unallocated PDPs to scratch page.
+ * PML4 is allocated during ppgtt init, so this is not needed
+ * in 48-bit mode.
+ */
+ if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ execlists_update_context_pdps(ppgtt, reg_state);
}
static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
+ struct drm_i915_private *dev_priv = rq0->i915;
+ unsigned int fw_domains = rq0->engine->fw_domains;
+
execlists_update_context(rq0);
if (rq1)
execlists_update_context(rq1);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+
execlists_elsp_write(rq0, rq1);
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
-static void execlists_context_unqueue(struct intel_engine_cs *ring)
+static void execlists_context_unqueue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
- struct drm_i915_gem_request *cursor = NULL, *tmp = NULL;
+ struct drm_i915_gem_request *cursor, *tmp;
- assert_spin_locked(&ring->execlist_lock);
+ assert_spin_locked(&engine->execlist_lock);
/*
* If irqs are not active generate a warning as batches that finish
* without the irqs may get lost and a GPU Hang may occur.
*/
- WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
-
- if (list_empty(&ring->execlist_queue))
- return;
+ WARN_ON(!intel_irqs_enabled(engine->dev->dev_private));
/* Try to read in pairs */
- list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
+ list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
execlist_link) {
if (!req0) {
req0 = cursor;
@@ -448,172 +454,179 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_move_tail(&req0->execlist_link,
- &ring->execlist_retired_req_list);
+ &engine->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
+ WARN_ON(req1->elsp_submitted);
break;
}
}
- if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) {
+ if (unlikely(!req0))
+ return;
+
+ if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
/*
- * WaIdleLiteRestore: make sure we never cause a lite
- * restore with HEAD==TAIL
+ * WaIdleLiteRestore: make sure we never cause a lite restore
+ * with HEAD==TAIL.
+ *
+ * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we
+ * resubmit the request. See gen8_emit_request() for where we
+ * prepare the padding after the end of the request.
*/
- if (req0->elsp_submitted) {
- /*
- * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
- * as we resubmit the request. See gen8_emit_request()
- * for where we prepare the padding after the end of the
- * request.
- */
- struct intel_ringbuffer *ringbuf;
-
- ringbuf = req0->ctx->engine[ring->id].ringbuf;
- req0->tail += 8;
- req0->tail &= ringbuf->size - 1;
- }
- }
+ struct intel_ringbuffer *ringbuf;
- WARN_ON(req1 && req1->elsp_submitted);
+ ringbuf = req0->ctx->engine[engine->id].ringbuf;
+ req0->tail += 8;
+ req0->tail &= ringbuf->size - 1;
+ }
execlists_submit_requests(req0, req1);
}
-static bool execlists_check_remove_request(struct intel_engine_cs *ring,
- u32 request_id)
+static unsigned int
+execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
{
struct drm_i915_gem_request *head_req;
- assert_spin_locked(&ring->execlist_lock);
+ assert_spin_locked(&engine->execlist_lock);
- head_req = list_first_entry_or_null(&ring->execlist_queue,
+ head_req = list_first_entry_or_null(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
- if (head_req != NULL) {
- if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
- WARN(head_req->elsp_submitted == 0,
- "Never submitted head request\n");
+ if (!head_req)
+ return 0;
- if (--head_req->elsp_submitted <= 0) {
- list_move_tail(&head_req->execlist_link,
- &ring->execlist_retired_req_list);
- return true;
- }
- }
- }
+ if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
+ return 0;
+
+ WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
+
+ if (--head_req->elsp_submitted > 0)
+ return 0;
+
+ list_move_tail(&head_req->execlist_link,
+ &engine->execlist_retired_req_list);
- return false;
+ return 1;
}
-static void get_context_status(struct intel_engine_cs *ring,
- u8 read_pointer,
- u32 *status, u32 *context_id)
+static u32
+get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
+ u32 *context_id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ u32 status;
- if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
- return;
+ read_pointer %= GEN8_CSB_ENTRIES;
+
+ status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer));
+
+ if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+ return 0;
+
+ *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine,
+ read_pointer));
- *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
- *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+ return status;
}
/**
* intel_lrc_irq_handler() - handle Context Switch interrupts
- * @ring: Engine Command Streamer to handle.
+ * @engine: Engine Command Streamer to handle.
*
* Check the unread Context Status Buffers and manage the submission of new
* contexts to the ELSP accordingly.
*/
-void intel_lrc_irq_handler(struct intel_engine_cs *ring)
+static void intel_lrc_irq_handler(unsigned long data)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
u32 status_pointer;
- u8 read_pointer;
- u8 write_pointer;
- u32 status = 0;
- u32 status_id;
- u32 submit_contexts = 0;
+ unsigned int read_pointer, write_pointer;
+ u32 csb[GEN8_CSB_ENTRIES][2];
+ unsigned int csb_read = 0, i;
+ unsigned int submit_contexts = 0;
+
+ intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
- status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+ status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
- read_pointer = ring->next_context_status_buffer;
+ read_pointer = engine->next_context_status_buffer;
write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
if (read_pointer > write_pointer)
write_pointer += GEN8_CSB_ENTRIES;
- spin_lock(&ring->execlist_lock);
-
while (read_pointer < write_pointer) {
+ if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES))
+ break;
+ csb[csb_read][0] = get_context_status(engine, ++read_pointer,
+ &csb[csb_read][1]);
+ csb_read++;
+ }
- get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
- &status, &status_id);
+ engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
- if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
- continue;
+ /* Update the read pointer to the old write pointer. Manual ringbuffer
+ * management ftw </sarcasm> */
+ I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine),
+ _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+ engine->next_context_status_buffer << 8));
- if (status & GEN8_CTX_STATUS_PREEMPTED) {
- if (status & GEN8_CTX_STATUS_LITE_RESTORE) {
- if (execlists_check_remove_request(ring, status_id))
+ intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
+
+ spin_lock(&engine->execlist_lock);
+
+ for (i = 0; i < csb_read; i++) {
+ if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) {
+ if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) {
+ if (execlists_check_remove_request(engine, csb[i][1]))
WARN(1, "Lite Restored request removed from queue\n");
} else
WARN(1, "Preemption without Lite Restore\n");
}
- if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
- (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
- if (execlists_check_remove_request(ring, status_id))
- submit_contexts++;
- }
+ if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE |
+ GEN8_CTX_STATUS_ELEMENT_SWITCH))
+ submit_contexts +=
+ execlists_check_remove_request(engine, csb[i][1]);
}
- if (ring->disable_lite_restore_wa) {
- /* Prevent a ctx to preempt itself */
- if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
- (submit_contexts != 0))
- execlists_context_unqueue(ring);
- } else if (submit_contexts != 0) {
- execlists_context_unqueue(ring);
+ if (submit_contexts) {
+ if (!engine->disable_lite_restore_wa ||
+ (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
+ execlists_context_unqueue(engine);
}
- spin_unlock(&ring->execlist_lock);
+ spin_unlock(&engine->execlist_lock);
if (unlikely(submit_contexts > 2))
DRM_ERROR("More than two context complete events?\n");
-
- ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
-
- /* Update the read pointer to the old write pointer. Manual ringbuffer
- * management ftw </sarcasm> */
- I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
- ring->next_context_status_buffer << 8));
}
-static int execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_context_queue(struct drm_i915_gem_request *request)
{
- struct intel_engine_cs *ring = request->ring;
+ struct intel_engine_cs *engine = request->engine;
struct drm_i915_gem_request *cursor;
int num_elements = 0;
if (request->ctx != request->i915->kernel_context)
- intel_lr_context_pin(request->ctx, ring);
+ intel_lr_context_pin(request->ctx, engine);
i915_gem_request_reference(request);
- spin_lock_irq(&ring->execlist_lock);
+ spin_lock_bh(&engine->execlist_lock);
- list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+ list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
if (++num_elements > 2)
break;
if (num_elements > 2) {
struct drm_i915_gem_request *tail_req;
- tail_req = list_last_entry(&ring->execlist_queue,
+ tail_req = list_last_entry(&engine->execlist_queue,
struct drm_i915_gem_request,
execlist_link);
@@ -621,41 +634,39 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_move_tail(&tail_req->execlist_link,
- &ring->execlist_retired_req_list);
+ &engine->execlist_retired_req_list);
}
}
- list_add_tail(&request->execlist_link, &ring->execlist_queue);
+ list_add_tail(&request->execlist_link, &engine->execlist_queue);
if (num_elements == 0)
- execlists_context_unqueue(ring);
-
- spin_unlock_irq(&ring->execlist_lock);
+ execlists_context_unqueue(engine);
- return 0;
+ spin_unlock_bh(&engine->execlist_lock);
}
static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
uint32_t flush_domains;
int ret;
flush_domains = 0;
- if (ring->gpu_caches_dirty)
+ if (engine->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_ring_flag(req->ring);
+ const unsigned other_rings = ~intel_engine_flag(req->engine);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -665,7 +676,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, req->ring, &req);
+ ret = i915_gem_object_sync(obj, req->engine, &req);
if (ret)
return ret;
}
@@ -689,7 +700,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
{
int ret = 0;
- request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+ request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
if (i915.enable_guc_submission) {
/*
@@ -705,53 +716,11 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
}
if (request->ctx != request->i915->kernel_context)
- ret = intel_lr_context_pin(request->ctx, request->ring);
+ ret = intel_lr_context_pin(request->ctx, request->engine);
return ret;
}
-static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
- int bytes)
-{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct intel_engine_cs *ring = req->ring;
- struct drm_i915_gem_request *target;
- unsigned space;
- int ret;
-
- if (intel_ring_space(ringbuf) >= bytes)
- return 0;
-
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
- list_for_each_entry(target, &ring->request_list, list) {
- /*
- * The request queue is per-engine, so can contain requests
- * from multiple ringbuffers. Here, we must ignore any that
- * aren't from the ringbuffer we're considering.
- */
- if (target->ringbuf != ringbuf)
- continue;
-
- /* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ringbuf->tail,
- ringbuf->size);
- if (space >= bytes)
- break;
- }
-
- if (WARN_ON(&target->list == &ring->request_list))
- return -ENOSPC;
-
- ret = i915_wait_request(target);
- if (ret)
- return ret;
-
- ringbuf->space = space;
- return 0;
-}
-
/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @request: Request to advance the logical ringbuffer of.
@@ -766,7 +735,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct drm_i915_private *dev_priv = request->i915;
- struct intel_engine_cs *engine = request->ring;
+ struct intel_engine_cs *engine = request->engine;
intel_logical_ring_advance(ringbuf);
request->tail = ringbuf->tail;
@@ -781,7 +750,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, MI_NOOP);
intel_logical_ring_advance(ringbuf);
- if (intel_ring_stopped(engine))
+ if (intel_engine_stopped(engine))
return 0;
if (engine->last_context != request->ctx) {
@@ -803,101 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
return 0;
}
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
-}
-
-static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
-{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
- int remain_actual = ringbuf->size - ringbuf->tail;
- int ret, total_bytes, wait_bytes = 0;
- bool need_wrap = false;
-
- if (ringbuf->reserved_in_use)
- total_bytes = bytes;
- else
- total_bytes = bytes + ringbuf->reserved_size;
-
- if (unlikely(bytes > remain_usable)) {
- /*
- * Not enough space for the basic request. So need to flush
- * out the remainder and then wait for base + reserved.
- */
- wait_bytes = remain_actual + total_bytes;
- need_wrap = true;
- } else {
- if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + ringbuf->reserved_size;
- } else if (total_bytes > ringbuf->space) {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
- }
- }
-
- if (wait_bytes) {
- ret = logical_ring_wait_for_space(req, wait_bytes);
- if (unlikely(ret))
- return ret;
-
- if (need_wrap)
- __wrap_ring_buffer(ringbuf);
- }
-
- return 0;
-}
-
-/**
- * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
- *
- * @req: The request to start some new work for
- * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
- *
- * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
- * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
- * and also preallocates a request (every workload submission is still mediated through
- * requests, same as it did with legacy ringbuffer submission).
- *
- * Return: non-zero if the ringbuffer is not ready to be written to.
- */
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
-{
- struct drm_i915_private *dev_priv;
- int ret;
-
- WARN_ON(req == NULL);
- dev_priv = req->ring->dev->dev_private;
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- return ret;
-
- ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
-
- req->ringbuf->space -= num_dwords * sizeof(uint32_t);
- return 0;
-}
-
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
{
/*
@@ -910,7 +784,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
*/
intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
- return intel_logical_ring_begin(request, 0);
+ return intel_ring_begin(request, 0);
}
/**
@@ -935,9 +809,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct list_head *vmas)
{
struct drm_device *dev = params->dev;
- struct intel_engine_cs *ring = params->ring;
+ struct intel_engine_cs *engine = params->engine;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf;
+ struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
u64 exec_start;
int instp_mode;
u32 instp_mask;
@@ -949,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
return -EINVAL;
}
@@ -978,9 +852,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
if (ret)
return ret;
- if (ring == &dev_priv->ring[RCS] &&
+ if (engine == &dev_priv->engine[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(params->request, 4);
+ ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
@@ -996,116 +870,116 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
exec_start = params->batch_obj_vm_offset +
args->batch_start_offset;
- ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+ ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
if (ret)
return ret;
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
- i915_gem_execbuffer_retire_commands(params);
return 0;
}
-void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+void intel_execlists_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req, *tmp;
struct list_head retired_list;
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
- if (list_empty(&ring->execlist_retired_req_list))
+ WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+ if (list_empty(&engine->execlist_retired_req_list))
return;
INIT_LIST_HEAD(&retired_list);
- spin_lock_irq(&ring->execlist_lock);
- list_replace_init(&ring->execlist_retired_req_list, &retired_list);
- spin_unlock_irq(&ring->execlist_lock);
+ spin_lock_bh(&engine->execlist_lock);
+ list_replace_init(&engine->execlist_retired_req_list, &retired_list);
+ spin_unlock_bh(&engine->execlist_lock);
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx;
struct drm_i915_gem_object *ctx_obj =
- ctx->engine[ring->id].state;
+ ctx->engine[engine->id].state;
if (ctx_obj && (ctx != req->i915->kernel_context))
- intel_lr_context_unpin(ctx, ring);
+ intel_lr_context_unpin(ctx, engine);
list_del(&req->execlist_link);
i915_gem_request_unreference(req);
}
}
-void intel_logical_ring_stop(struct intel_engine_cs *ring)
+void intel_logical_ring_stop(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
int ret;
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- ret = intel_ring_idle(ring);
- if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+ ret = intel_engine_idle(engine);
+ if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
- ring->name, ret);
+ engine->name, ret);
/* TODO: Is this correct with Execlists enabled? */
- I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
- DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+ I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
return;
}
- I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+ I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
- if (!ring->gpu_caches_dirty)
+ if (!engine->gpu_caches_dirty)
return 0;
- ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+ ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
static int intel_lr_context_do_pin(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
- struct page *lrc_state_page;
- uint32_t *lrc_reg_state;
+ struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+ struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+ void *vaddr;
+ u32 *lrc_reg_state;
int ret;
- WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
return ret;
- lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- if (WARN_ON(!lrc_state_page)) {
- ret = -ENODEV;
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
}
- ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+ lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+
+ ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
if (ret)
- goto unpin_ctx_obj;
+ goto unpin_map;
- ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
- intel_lr_context_descriptor_update(ctx, ring);
- lrc_reg_state = kmap(lrc_state_page);
+ ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+ intel_lr_context_descriptor_update(ctx, engine);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
- ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
+ ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
/* Invalidate GuC TLB. */
@@ -1114,6 +988,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
return ret;
+unpin_map:
+ i915_gem_object_unpin_map(ctx_obj);
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
@@ -1146,7 +1022,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
if (--ctx->engine[engine->id].pin_count == 0) {
- kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+ i915_gem_object_unpin_map(ctx_obj);
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
ctx->engine[engine->id].lrc_vma = NULL;
@@ -1160,21 +1036,21 @@ void intel_lr_context_unpin(struct intel_context *ctx,
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (w->count == 0)
return 0;
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
- ret = intel_logical_ring_begin(req, w->count * 2 + 2);
+ ret = intel_ring_begin(req, w->count * 2 + 2);
if (ret)
return ret;
@@ -1187,7 +1063,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
intel_logical_ring_advance(ringbuf);
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = logical_ring_flush_all_caches(req);
if (ret)
return ret;
@@ -1223,25 +1099,27 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
* This WA is also required for Gen9 so extracting as a function avoids
* code duplication.
*/
-static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
+static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *const batch,
uint32_t index)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
- * WaDisableLSQCROPERFforOCL:skl
+ * WaDisableLSQCROPERFforOCL:skl,kbl
* This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush.
*/
- if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0))
+ if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+ IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
@@ -1259,7 +1137,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256);
+ wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256);
wa_ctx_emit(batch, index, 0);
return index;
@@ -1312,7 +1190,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
* Return: non-zero if we exceed the PAGE_SIZE limit.
*/
-static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
@@ -1324,8 +1202,8 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
- if (IS_BROADWELL(ring->dev)) {
- int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+ if (IS_BROADWELL(engine->dev)) {
+ int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (rc < 0)
return rc;
index = rc;
@@ -1333,7 +1211,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
/* Actual scratch location is at 128 bytes offset */
- scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES;
+ scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
@@ -1375,7 +1253,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
* This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
* to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
*/
-static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
@@ -1390,13 +1268,14 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *ring,
return wa_ctx_end(wa_ctx, *offset = index, 1);
}
-static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1405,11 +1284,27 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
- ret = gen8_emit_flush_coherentl3_wa(ring, batch, index);
+ ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
if (ret < 0)
return ret;
index = ret;
+ /* WaClearSlmSpaceAtContextSwitch:kbl */
+ /* Actual scratch location is at 128 bytes offset */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
+ uint32_t scratch_addr
+ = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+ wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+ wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE));
+ wa_ctx_emit(batch, index, scratch_addr);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ wa_ctx_emit(batch, index, 0);
+ }
/* Pad to end of cacheline */
while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP);
@@ -1417,12 +1312,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring,
return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
}
-static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
+static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
struct i915_wa_ctx_bb *wa_ctx,
uint32_t *const batch,
uint32_t *offset)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
@@ -1435,6 +1330,25 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
wa_ctx_emit(batch, index, MI_NOOP);
}
+ /* WaClearTdlStateAckDirtyBits:bxt */
+ if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) {
+ wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
+
+ wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
+ wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+ wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
+ wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+ wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
+ wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
+
+ wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
+ /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
+ wa_ctx_emit(batch, index, 0x0);
+ wa_ctx_emit(batch, index, MI_NOOP);
+ }
+
/* WaDisableCtxRestoreArbitration:skl,bxt */
if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
IS_BXT_REVID(dev, 0, BXT_REVID_A1))
@@ -1445,60 +1359,61 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring,
return wa_ctx_end(wa_ctx, *offset = index, 1);
}
-static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size)
+static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
{
int ret;
- ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size));
- if (!ring->wa_ctx.obj) {
+ engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev,
+ PAGE_ALIGN(size));
+ if (!engine->wa_ctx.obj) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
return -ENOMEM;
}
- ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0);
+ ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
if (ret) {
DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
ret);
- drm_gem_object_unreference(&ring->wa_ctx.obj->base);
+ drm_gem_object_unreference(&engine->wa_ctx.obj->base);
return ret;
}
return 0;
}
-static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring)
+static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
{
- if (ring->wa_ctx.obj) {
- i915_gem_object_ggtt_unpin(ring->wa_ctx.obj);
- drm_gem_object_unreference(&ring->wa_ctx.obj->base);
- ring->wa_ctx.obj = NULL;
+ if (engine->wa_ctx.obj) {
+ i915_gem_object_ggtt_unpin(engine->wa_ctx.obj);
+ drm_gem_object_unreference(&engine->wa_ctx.obj->base);
+ engine->wa_ctx.obj = NULL;
}
}
-static int intel_init_workaround_bb(struct intel_engine_cs *ring)
+static int intel_init_workaround_bb(struct intel_engine_cs *engine)
{
int ret;
uint32_t *batch;
uint32_t offset;
struct page *page;
- struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+ struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
- WARN_ON(ring->id != RCS);
+ WARN_ON(engine->id != RCS);
/* update this when WA for higher Gen are added */
- if (INTEL_INFO(ring->dev)->gen > 9) {
+ if (INTEL_INFO(engine->dev)->gen > 9) {
DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
- INTEL_INFO(ring->dev)->gen);
+ INTEL_INFO(engine->dev)->gen);
return 0;
}
/* some WA perform writes to scratch page, ensure it is valid */
- if (ring->scratch.obj == NULL) {
- DRM_ERROR("scratch page not allocated for %s\n", ring->name);
+ if (engine->scratch.obj == NULL) {
+ DRM_ERROR("scratch page not allocated for %s\n", engine->name);
return -EINVAL;
}
- ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE);
+ ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
if (ret) {
DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
return ret;
@@ -1508,29 +1423,29 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
batch = kmap_atomic(page);
offset = 0;
- if (INTEL_INFO(ring->dev)->gen == 8) {
- ret = gen8_init_indirectctx_bb(ring,
+ if (INTEL_INFO(engine->dev)->gen == 8) {
+ ret = gen8_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
&offset);
if (ret)
goto out;
- ret = gen8_init_perctx_bb(ring,
+ ret = gen8_init_perctx_bb(engine,
&wa_ctx->per_ctx,
batch,
&offset);
if (ret)
goto out;
- } else if (INTEL_INFO(ring->dev)->gen == 9) {
- ret = gen9_init_indirectctx_bb(ring,
+ } else if (INTEL_INFO(engine->dev)->gen == 9) {
+ ret = gen9_init_indirectctx_bb(engine,
&wa_ctx->indirect_ctx,
batch,
&offset);
if (ret)
goto out;
- ret = gen9_init_perctx_bb(ring,
+ ret = gen9_init_perctx_bb(engine,
&wa_ctx->per_ctx,
batch,
&offset);
@@ -1541,27 +1456,36 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring)
out:
kunmap_atomic(batch);
if (ret)
- lrc_destroy_wa_ctx_obj(ring);
+ lrc_destroy_wa_ctx_obj(engine);
return ret;
}
-static int gen8_init_common_ring(struct intel_engine_cs *ring)
+static void lrc_init_hws(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+ I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+ (u32)engine->status_page.gfx_addr);
+ POSTING_READ(RING_HWS_PGA(engine->mmio_base));
+}
+
+static int gen8_init_common_ring(struct intel_engine_cs *engine)
+{
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u8 next_context_status_buffer_hw;
+ unsigned int next_context_status_buffer_hw;
- lrc_setup_hardware_status_page(ring,
- dev_priv->kernel_context->engine[ring->id].state);
+ lrc_init_hws(engine);
- I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
- I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
- I915_WRITE(RING_MODE_GEN7(ring),
+ I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
- POSTING_READ(RING_MODE_GEN7(ring));
+ POSTING_READ(RING_MODE_GEN7(engine));
/*
* Instead of resetting the Context Status Buffer (CSB) read pointer to
@@ -1576,7 +1500,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
* BXT | ? | ? |
*/
next_context_status_buffer_hw =
- GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
+ GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine)));
/*
* When the CSB registers are reset (also after power-up / gpu reset),
@@ -1586,21 +1510,21 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
- ring->next_context_status_buffer = next_context_status_buffer_hw;
- DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
+ engine->next_context_status_buffer = next_context_status_buffer_hw;
+ DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
- memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+ intel_engine_init_hangcheck(engine);
- return 0;
+ return intel_mocs_init_engine(engine);
}
-static int gen8_init_render_ring(struct intel_engine_cs *ring)
+static int gen8_init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = gen8_init_common_ring(ring);
+ ret = gen8_init_common_ring(engine);
if (ret)
return ret;
@@ -1614,29 +1538,29 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
- return init_workarounds_ring(ring);
+ return init_workarounds_ring(engine);
}
-static int gen9_init_render_ring(struct intel_engine_cs *ring)
+static int gen9_init_render_ring(struct intel_engine_cs *engine)
{
int ret;
- ret = gen8_init_common_ring(ring);
+ ret = gen8_init_common_ring(engine);
if (ret)
return ret;
- return init_workarounds_ring(ring);
+ return init_workarounds_ring(engine);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
- ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+ ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
if (ret)
return ret;
@@ -1644,9 +1568,11 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+ intel_logical_ring_emit_reg(ringbuf,
+ GEN8_RING_PDP_UDW(engine, i));
intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
- intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+ intel_logical_ring_emit_reg(ringbuf,
+ GEN8_RING_PDP_LDW(engine, i));
intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
}
@@ -1670,7 +1596,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
if (req->ctx->ppgtt &&
- (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+ (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
if (!USES_FULL_48BIT_PPGTT(req->i915) &&
!intel_vgpu_active(req->i915->dev)) {
ret = intel_logical_ring_emit_pdps(req);
@@ -1678,10 +1604,10 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return ret;
}
- req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+ req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
}
- ret = intel_logical_ring_begin(req, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -1698,9 +1624,9 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return 0;
}
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1708,25 +1634,26 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
- POSTING_READ(RING_IMR(ring->mmio_base));
+ if (engine->irq_refcount++ == 0) {
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ POSTING_READ(RING_IMR(engine->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
- POSTING_READ(RING_IMR(ring->mmio_base));
+ if (--engine->irq_refcount == 0) {
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ POSTING_READ(RING_IMR(engine->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1736,13 +1663,13 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
u32 unused)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = ringbuf->engine;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(request, 4);
+ ret = intel_ring_begin(request, 4);
if (ret)
return ret;
@@ -1757,7 +1684,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
cmd |= MI_INVALIDATE_TLB;
- if (ring == &dev_priv->ring[VCS])
+ if (engine == &dev_priv->engine[VCS])
cmd |= MI_INVALIDATE_BSD;
}
@@ -1777,11 +1704,12 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 flush_domains)
{
struct intel_ringbuffer *ringbuf = request->ringbuf;
- struct intel_engine_cs *ring = ringbuf->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
- bool vf_flush_wa = false;
+ struct intel_engine_cs *engine = ringbuf->engine;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ bool vf_flush_wa = false, dc_flush_wa = false;
u32 flags = 0;
int ret;
+ int len;
flags |= PIPE_CONTROL_CS_STALL;
@@ -1806,11 +1734,23 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(ring->dev))
+ if (IS_GEN9(engine->dev))
vf_flush_wa = true;
+
+ /* WaForGAMHang:kbl */
+ if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+ dc_flush_wa = true;
}
- ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
+ len = 6;
+
+ if (vf_flush_wa)
+ len += 6;
+
+ if (dc_flush_wa)
+ len += 12;
+
+ ret = intel_ring_begin(request, len);
if (ret)
return ret;
@@ -1823,30 +1763,48 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
intel_logical_ring_emit(ringbuf, 0);
}
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
+
+ if (dc_flush_wa) {
+ intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+ intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ intel_logical_ring_emit(ringbuf, 0);
+ }
+
intel_logical_ring_advance(ringbuf);
return 0;
}
-static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static u32 gen8_get_seqno(struct intel_engine_cs *engine)
{
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
-static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
}
-static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
{
-
/*
* On BXT A steppings there is a HW coherency issue whereby the
* MI_STORE_DATA_IMM storing the completed request's seqno
@@ -1857,19 +1815,15 @@ static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
* bxt_a_set_seqno(), where we also do a clflush after the write. So
* this clflush in practice becomes an invalidate operation.
*/
-
- if (!lazy_coherency)
- intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
-
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
}
-static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
/* See bxt_a_get_seqno() explaining the reason for the clflush. */
- intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
+ intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
}
/*
@@ -1889,7 +1843,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
@@ -1899,7 +1853,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf,
(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
intel_logical_ring_emit(ringbuf,
- hws_seqno_address(request->ring) |
+ hws_seqno_address(request->engine) |
MI_FLUSH_DW_USE_GTT);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
@@ -1913,7 +1867,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
+ ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
@@ -1929,7 +1883,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
- intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
+ intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
/* We're thrashing one dword of HWS. */
@@ -1944,19 +1898,19 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
struct render_state so;
int ret;
- ret = i915_gem_render_state_prepare(req->ring, &so);
+ ret = i915_gem_render_state_prepare(req->engine, &so);
if (ret)
return ret;
if (so.rodata == NULL)
return 0;
- ret = req->ring->emit_bb_start(req, so.ggtt_offset,
+ ret = req->engine->emit_bb_start(req, so.ggtt_offset,
I915_DISPATCH_SECURE);
if (ret)
goto out;
- ret = req->ring->emit_bb_start(req,
+ ret = req->engine->emit_bb_start(req,
(so.ggtt_offset + so.aux_batch_offset),
I915_DISPATCH_SECURE);
if (ret)
@@ -1994,146 +1948,197 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
* @ring: Engine Command Streamer.
*
*/
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- dev_priv = ring->dev->dev_private;
+ /*
+ * Tasklet cannot be active at this point due intel_mark_active/idle
+ * so this is just for documentation.
+ */
+ if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
+ tasklet_kill(&engine->irq_tasklet);
+
+ dev_priv = engine->dev->dev_private;
- if (ring->buffer) {
- intel_logical_ring_stop(ring);
- WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ if (engine->buffer) {
+ intel_logical_ring_stop(engine);
+ WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
}
- if (ring->cleanup)
- ring->cleanup(ring);
+ if (engine->cleanup)
+ engine->cleanup(engine);
- i915_cmd_parser_fini_ring(ring);
- i915_gem_batch_pool_fini(&ring->batch_pool);
+ i915_cmd_parser_fini_ring(engine);
+ i915_gem_batch_pool_fini(&engine->batch_pool);
- if (ring->status_page.obj) {
- kunmap(sg_page(ring->status_page.obj->pages->sgl));
- ring->status_page.obj = NULL;
+ if (engine->status_page.obj) {
+ i915_gem_object_unpin_map(engine->status_page.obj);
+ engine->status_page.obj = NULL;
}
- ring->disable_lite_restore_wa = false;
- ring->ctx_desc_template = 0;
+ engine->idle_lite_restore_wa = 0;
+ engine->disable_lite_restore_wa = false;
+ engine->ctx_desc_template = 0;
- lrc_destroy_wa_ctx_obj(ring);
- ring->dev = NULL;
+ lrc_destroy_wa_ctx_obj(engine);
+ engine->dev = NULL;
}
static void
logical_ring_default_vfuncs(struct drm_device *dev,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
- ring->init_hw = gen8_init_common_ring;
- ring->emit_request = gen8_emit_request;
- ring->emit_flush = gen8_emit_flush;
- ring->irq_get = gen8_logical_ring_get_irq;
- ring->irq_put = gen8_logical_ring_put_irq;
- ring->emit_bb_start = gen8_emit_bb_start;
+ engine->init_hw = gen8_init_common_ring;
+ engine->emit_request = gen8_emit_request;
+ engine->emit_flush = gen8_emit_flush;
+ engine->irq_get = gen8_logical_ring_get_irq;
+ engine->irq_put = gen8_logical_ring_put_irq;
+ engine->emit_bb_start = gen8_emit_bb_start;
+ engine->get_seqno = gen8_get_seqno;
+ engine->set_seqno = gen8_set_seqno;
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ring->get_seqno = bxt_a_get_seqno;
- ring->set_seqno = bxt_a_set_seqno;
- } else {
- ring->get_seqno = gen8_get_seqno;
- ring->set_seqno = gen8_set_seqno;
+ engine->irq_seqno_barrier = bxt_a_seqno_barrier;
+ engine->set_seqno = bxt_a_set_seqno;
}
}
static inline void
-logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
+logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
+{
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+ engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+}
+
+static int
+lrc_setup_hws(struct intel_engine_cs *engine,
+ struct drm_i915_gem_object *dctx_obj)
{
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
- ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ void *hws;
+
+ /* The HWSP is part of the default context object in LRC mode. */
+ engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
+ LRC_PPHWSP_PN * PAGE_SIZE;
+ hws = i915_gem_object_pin_map(dctx_obj);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+ engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
+ engine->status_page.obj = dctx_obj;
+
+ return 0;
}
static int
-logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
{
- struct intel_context *dctx = to_i915(dev)->kernel_context;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_context *dctx = dev_priv->kernel_context;
+ enum forcewake_domains fw_domains;
int ret;
/* Intentionally left blank. */
- ring->buffer = NULL;
+ engine->buffer = NULL;
+
+ engine->dev = dev;
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+ i915_gem_batch_pool_init(dev, &engine->batch_pool);
+ init_waitqueue_head(&engine->irq_queue);
+
+ INIT_LIST_HEAD(&engine->buffers);
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ INIT_LIST_HEAD(&engine->execlist_retired_req_list);
+ spin_lock_init(&engine->execlist_lock);
- ring->dev = dev;
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
- i915_gem_batch_pool_init(dev, &ring->batch_pool);
- init_waitqueue_head(&ring->irq_queue);
+ tasklet_init(&engine->irq_tasklet,
+ intel_lrc_irq_handler, (unsigned long)engine);
- INIT_LIST_HEAD(&ring->buffers);
- INIT_LIST_HEAD(&ring->execlist_queue);
- INIT_LIST_HEAD(&ring->execlist_retired_req_list);
- spin_lock_init(&ring->execlist_lock);
+ logical_ring_init_platform_invariants(engine);
- logical_ring_init_platform_invariants(ring);
+ fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
+ RING_ELSP(engine),
+ FW_REG_WRITE);
- ret = i915_cmd_parser_init_ring(ring);
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_PTR(engine),
+ FW_REG_READ | FW_REG_WRITE);
+
+ fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ RING_CONTEXT_STATUS_BUF_BASE(engine),
+ FW_REG_READ);
+
+ engine->fw_domains = fw_domains;
+
+ ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
- ret = intel_lr_context_deferred_alloc(dctx, ring);
+ ret = intel_lr_context_deferred_alloc(dctx, engine);
if (ret)
goto error;
/* As this is the default context, always pin it */
- ret = intel_lr_context_do_pin(dctx, ring);
+ ret = intel_lr_context_do_pin(dctx, engine);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
- ring->name, ret);
+ engine->name, ret);
+ goto error;
+ }
+
+ /* And setup the hardware status page. */
+ ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
+ if (ret) {
+ DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
goto error;
}
return 0;
error:
- intel_logical_ring_cleanup(ring);
+ intel_logical_ring_cleanup(engine);
return ret;
}
static int logical_render_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret;
- ring->name = "render ring";
- ring->id = RCS;
- ring->exec_id = I915_EXEC_RENDER;
- ring->guc_id = GUC_RENDER_ENGINE;
- ring->mmio_base = RENDER_RING_BASE;
+ engine->name = "render ring";
+ engine->id = RCS;
+ engine->exec_id = I915_EXEC_RENDER;
+ engine->guc_id = GUC_RENDER_ENGINE;
+ engine->mmio_base = RENDER_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
+ logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
if (HAS_L3_DPF(dev))
- ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_vfuncs(dev, engine);
/* Override some for render ring. */
if (INTEL_INFO(dev)->gen >= 9)
- ring->init_hw = gen9_init_render_ring;
+ engine->init_hw = gen9_init_render_ring;
else
- ring->init_hw = gen8_init_render_ring;
- ring->init_context = gen8_init_rcs_context;
- ring->cleanup = intel_fini_pipe_control;
- ring->emit_flush = gen8_emit_flush_render;
- ring->emit_request = gen8_emit_request_render;
+ engine->init_hw = gen8_init_render_ring;
+ engine->init_context = gen8_init_rcs_context;
+ engine->cleanup = intel_fini_pipe_control;
+ engine->emit_flush = gen8_emit_flush_render;
+ engine->emit_request = gen8_emit_request_render;
- ring->dev = dev;
+ engine->dev = dev;
- ret = intel_init_pipe_control(ring);
+ ret = intel_init_pipe_control(engine);
if (ret)
return ret;
- ret = intel_init_workaround_bb(ring);
+ ret = intel_init_workaround_bb(engine);
if (ret) {
/*
* We continue even if we fail to initialize WA batch
@@ -2144,9 +2149,9 @@ static int logical_render_ring_init(struct drm_device *dev)
ret);
}
- ret = logical_ring_init(dev, ring);
+ ret = logical_ring_init(dev, engine);
if (ret) {
- lrc_destroy_wa_ctx_obj(ring);
+ lrc_destroy_wa_ctx_obj(engine);
}
return ret;
@@ -2155,69 +2160,69 @@ static int logical_render_ring_init(struct drm_device *dev)
static int logical_bsd_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[VCS];
- ring->name = "bsd ring";
- ring->id = VCS;
- ring->exec_id = I915_EXEC_BSD;
- ring->guc_id = GUC_VIDEO_ENGINE;
- ring->mmio_base = GEN6_BSD_RING_BASE;
+ engine->name = "bsd ring";
+ engine->id = VCS;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->guc_id = GUC_VIDEO_ENGINE;
+ engine->mmio_base = GEN6_BSD_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_bsd2_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+ struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
- ring->name = "bsd2 ring";
- ring->id = VCS2;
- ring->exec_id = I915_EXEC_BSD;
- ring->guc_id = GUC_VIDEO_ENGINE2;
- ring->mmio_base = GEN8_BSD2_RING_BASE;
+ engine->name = "bsd2 ring";
+ engine->id = VCS2;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->guc_id = GUC_VIDEO_ENGINE2;
+ engine->mmio_base = GEN8_BSD2_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_blt_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[BCS];
- ring->name = "blitter ring";
- ring->id = BCS;
- ring->exec_id = I915_EXEC_BLT;
- ring->guc_id = GUC_BLITTER_ENGINE;
- ring->mmio_base = BLT_RING_BASE;
+ engine->name = "blitter ring";
+ engine->id = BCS;
+ engine->exec_id = I915_EXEC_BLT;
+ engine->guc_id = GUC_BLITTER_ENGINE;
+ engine->mmio_base = BLT_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
static int logical_vebox_ring_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+ struct intel_engine_cs *engine = &dev_priv->engine[VECS];
- ring->name = "video enhancement ring";
- ring->id = VECS;
- ring->exec_id = I915_EXEC_VEBOX;
- ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
- ring->mmio_base = VEBOX_RING_BASE;
+ engine->name = "video enhancement ring";
+ engine->id = VECS;
+ engine->exec_id = I915_EXEC_VEBOX;
+ engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
+ engine->mmio_base = VEBOX_RING_BASE;
- logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
- logical_ring_default_vfuncs(dev, ring);
+ logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT);
+ logical_ring_default_vfuncs(dev, engine);
- return logical_ring_init(dev, ring);
+ return logical_ring_init(dev, engine);
}
/**
@@ -2225,7 +2230,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
* @dev: DRM device.
*
* This function inits the engines for an Execlists submission style (the equivalent in the
- * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for
+ * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for
* those engines that are present in the hardware.
*
* Return: non-zero if the initialization failed.
@@ -2266,13 +2271,13 @@ int intel_logical_rings_init(struct drm_device *dev)
return 0;
cleanup_vebox_ring:
- intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
+ intel_logical_ring_cleanup(&dev_priv->engine[VECS]);
cleanup_blt_ring:
- intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
+ intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
cleanup_bsd_ring:
- intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
+ intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
cleanup_render_ring:
- intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
+ intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
return ret;
}
@@ -2320,13 +2325,13 @@ make_rpcs(struct drm_device *dev)
return rpcs;
}
-static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
+static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
{
u32 indirect_ctx_offset;
- switch (INTEL_INFO(ring->dev)->gen) {
+ switch (INTEL_INFO(engine->dev)->gen) {
default:
- MISSING_CASE(INTEL_INFO(ring->dev)->gen);
+ MISSING_CASE(INTEL_INFO(engine->dev)->gen);
/* fall through */
case 9:
indirect_ctx_offset =
@@ -2342,14 +2347,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring)
}
static int
-populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
- struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
+populate_lr_context(struct intel_context *ctx,
+ struct drm_i915_gem_object *ctx_obj,
+ struct intel_engine_cs *engine,
+ struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
- struct page *page;
- uint32_t *reg_state;
+ void *vaddr;
+ u32 *reg_state;
int ret;
if (!ppgtt)
@@ -2361,18 +2368,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
return ret;
}
- ret = i915_gem_object_get_pages(ctx_obj);
- if (ret) {
- DRM_DEBUG_DRIVER("Could not get object pages\n");
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
-
- i915_gem_object_pin_pages(ctx_obj);
+ ctx_obj->dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
+ reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
* commands followed by (reg, value) pairs. The values we are setting here are
@@ -2380,33 +2386,47 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
* recreate this batchbuffer with new values (including all the missing
* MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
reg_state[CTX_LRI_HEADER_0] =
- MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
- ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring),
+ MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
+ ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
+ RING_CONTEXT_CONTROL(engine),
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
(HAS_RESOURCE_STREAMER(dev) ?
CTX_CTRL_RS_CTX_ENABLE : 0)));
- ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
+ 0);
/* Ring buffer start address is not known until the buffer is pinned.
* It is written to the context image in execlists_update_context()
*/
- ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base),
+ ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
+ RING_START(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
+ RING_CTL(engine->mmio_base),
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
- ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base),
+ ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
+ RING_BBADDR_UDW(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
+ RING_BBADDR(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
+ RING_BBSTATE(engine->mmio_base),
RING_BB_PPGTT);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0);
- if (ring->id == RCS) {
- ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0);
- if (ring->wa_ctx.obj) {
- struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx;
+ ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
+ RING_SBBADDR_UDW(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
+ RING_SBBADDR(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
+ RING_SBBSTATE(engine->mmio_base), 0);
+ if (engine->id == RCS) {
+ ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
+ RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
+ RING_INDIRECT_CTX(engine->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
+ RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
+ if (engine->wa_ctx.obj) {
+ struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj);
reg_state[CTX_RCS_INDIRECT_CTX+1] =
@@ -2414,7 +2434,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
(wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
- intel_lr_indirect_ctx_offset(ring) << 6;
+ intel_lr_indirect_ctx_offset(engine) << 6;
reg_state[CTX_BB_PER_CTX_PTR+1] =
(ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
@@ -2422,16 +2442,25 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
}
}
reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
- ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
+ RING_CTX_TIMESTAMP(engine->mmio_base), 0);
/* PDP values well be assigned later if needed */
- ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
+ 0);
+ ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
+ 0);
if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* 64b PPGTT (48bit canonical)
@@ -2445,20 +2474,16 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
* With dynamic page allocation, PDPs may not be allocated at
* this point. Point the unallocated PDPs to the scratch page
*/
- ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
+ execlists_update_context_pdps(ppgtt, reg_state);
}
- if (ring->id == RCS) {
+ if (engine->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
make_rpcs(dev));
}
- kunmap_atomic(reg_state);
- i915_gem_object_unpin_pages(ctx_obj);
+ i915_gem_object_unpin_map(ctx_obj);
return 0;
}
@@ -2475,7 +2500,7 @@ void intel_lr_context_free(struct intel_context *ctx)
{
int i;
- for (i = I915_NUM_RINGS; --i >= 0; ) {
+ for (i = I915_NUM_ENGINES; --i >= 0; ) {
struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
@@ -2485,6 +2510,7 @@ void intel_lr_context_free(struct intel_context *ctx)
if (ctx == ctx->i915->kernel_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
+ i915_gem_object_unpin_map(ctx_obj);
}
WARN_ON(ctx->engine[i].pin_count);
@@ -2507,15 +2533,15 @@ void intel_lr_context_free(struct intel_context *ctx)
* in LRC mode, but does not include the "shared data page" used with
* GuC submission. The caller should account for this if using the GuC.
*/
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
{
int ret = 0;
- WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
+ WARN_ON(INTEL_INFO(engine->dev)->gen < 8);
- switch (ring->id) {
+ switch (engine->id) {
case RCS:
- if (INTEL_INFO(ring->dev)->gen >= 9)
+ if (INTEL_INFO(engine->dev)->gen >= 9)
ret = GEN9_LR_CONTEXT_RENDER_SIZE;
else
ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2531,24 +2557,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
return ret;
}
-static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
- struct drm_i915_gem_object *default_ctx_obj)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct page *page;
-
- /* The HWSP is part of the default context object in LRC mode. */
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
- + LRC_PPHWSP_PN * PAGE_SIZE;
- page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
- ring->status_page.page_addr = kmap(page);
- ring->status_page.obj = default_ctx_obj;
-
- I915_WRITE(RING_HWS_PGA(ring->mmio_base),
- (u32)ring->status_page.gfx_addr);
- POSTING_READ(RING_HWS_PGA(ring->mmio_base));
-}
-
/**
* intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
@@ -2564,18 +2572,18 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
*/
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- WARN_ON(ctx->engine[ring->id].state);
+ WARN_ON(ctx->engine[engine->id].state);
- context_size = round_up(intel_lr_context_size(ring), 4096);
+ context_size = round_up(intel_lr_context_size(engine), 4096);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2586,39 +2594,38 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
return -ENOMEM;
}
- ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
+ ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error_deref_obj;
}
- ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
+ ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error_ringbuf;
}
- ctx->engine[ring->id].ringbuf = ringbuf;
- ctx->engine[ring->id].state = ctx_obj;
+ ctx->engine[engine->id].ringbuf = ringbuf;
+ ctx->engine[engine->id].state = ctx_obj;
- if (ctx != ctx->i915->kernel_context && ring->init_context) {
+ if (ctx != ctx->i915->kernel_context && engine->init_context) {
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, ctx);
+ req = i915_gem_request_alloc(engine, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
DRM_ERROR("ring create req: %d\n", ret);
goto error_ringbuf;
}
- ret = ring->init_context(req);
+ ret = engine->init_context(req);
+ i915_add_request_no_flush(req);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
- i915_gem_request_cancel(req);
goto error_ringbuf;
}
- i915_add_request_no_flush(req);
}
return 0;
@@ -2626,40 +2633,38 @@ error_ringbuf:
intel_ringbuffer_free(ringbuf);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
- ctx->engine[ring->id].ringbuf = NULL;
- ctx->engine[ring->id].state = NULL;
+ ctx->engine[engine->id].ringbuf = NULL;
+ ctx->engine[engine->id].state = NULL;
return ret;
}
-void intel_lr_context_reset(struct drm_device *dev,
- struct intel_context *ctx)
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+ struct intel_context *ctx)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
- int i;
+ struct intel_engine_cs *engine;
- for_each_ring(ring, dev_priv, i) {
+ for_each_engine(engine, dev_priv) {
struct drm_i915_gem_object *ctx_obj =
- ctx->engine[ring->id].state;
+ ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf =
- ctx->engine[ring->id].ringbuf;
+ ctx->engine[engine->id].ringbuf;
+ void *vaddr;
uint32_t *reg_state;
- struct page *page;
if (!ctx_obj)
continue;
- if (i915_gem_object_get_pages(ctx_obj)) {
- WARN(1, "Failed get_pages for context obj\n");
+ vaddr = i915_gem_object_pin_map(ctx_obj);
+ if (WARN_ON(IS_ERR(vaddr)))
continue;
- }
- page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
- reg_state = kmap_atomic(page);
+
+ reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ ctx_obj->dirty = true;
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0;
- kunmap_atomic(reg_state);
+ i915_gem_object_unpin_map(ctx_obj);
ringbuf->head = 0;
ringbuf->tail = 0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index e6cda3e22..60a7385bc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -24,6 +24,8 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
+#include "intel_ringbuffer.h"
+
#define GEN8_LR_CONTEXT_ALIGN 4096
/* Execlists regs */
@@ -34,6 +36,7 @@
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
+#define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370)
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
@@ -57,10 +60,9 @@
/* Logical Rings */
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
-void intel_logical_ring_stop(struct intel_engine_cs *ring);
-void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
+void intel_logical_ring_stop(struct intel_engine_cs *engine);
+void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_logical_rings_init(struct drm_device *dev);
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
/**
@@ -98,18 +100,21 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
void intel_lr_context_free(struct intel_context *ctx);
-uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
+uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *engine);
void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine);
-void intel_lr_context_reset(struct drm_device *dev,
- struct intel_context *ctx);
+
+struct drm_i915_private;
+
+void intel_lr_context_reset(struct drm_i915_private *dev_priv,
+ struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *engine);
u32 intel_execlists_ctx_id(struct intel_context *ctx,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *engine);
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -118,7 +123,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
-void intel_lrc_irq_handler(struct intel_engine_cs *ring);
-void intel_execlists_retire_requests(struct intel_engine_cs *ring);
+void intel_execlists_retire_requests(struct intel_engine_cs *engine);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 10dc3517b..96281e628 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -109,7 +109,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
- int dotclock;
tmp = I915_READ(lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
@@ -134,12 +133,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
- dotclock = pipe_config->port_clock;
-
- if (HAS_PCH_SPLIT(dev_priv->dev))
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
- pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+ pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
@@ -155,7 +149,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
if (HAS_PCH_SPLIT(dev)) {
assert_fdi_rx_pll_disabled(dev_priv, pipe);
assert_shared_dpll_disabled(dev_priv,
- intel_crtc_to_shared_dpll(crtc));
+ crtc->config->shared_dpll);
} else {
assert_pll_disabled(dev_priv, pipe);
}
@@ -782,57 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the LVDS is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the LVDS is present.
- */
-static bool lvds_is_present_in_vbt(struct drm_device *dev,
- u8 *i2c_pin)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- if (!dev_priv->vbt.child_dev_num)
- return true;
-
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- union child_device_config *uchild = dev_priv->vbt.child_dev + i;
- struct old_child_dev_config *child = &uchild->old;
-
- /* If the device type is not LFP, continue.
- * We have to check both the new identifiers as well as the
- * old for compatibility with some BIOSes.
- */
- if (child->device_type != DEVICE_TYPE_INT_LFP &&
- child->device_type != DEVICE_TYPE_LFP)
- continue;
-
- if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin))
- *i2c_pin = child->i2c_pin;
-
- /* However, we cannot trust the BIOS writers to populate
- * the VBT correctly. Since LVDS requires additional
- * information from AIM blocks, a non-zero addin offset is
- * a good indicator that the LVDS is actually present.
- */
- if (child->addin_offset)
- return true;
-
- /* But even then some BIOS writers perform some black magic
- * and instantiate the device without reference to any
- * additional data. Trust that if the VBT was written into
- * the OpRegion then they have validated the LVDS's existence.
- */
- if (dev_priv->opregion.vbt)
- return true;
- }
-
- return false;
-}
-
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
{
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
@@ -982,14 +925,14 @@ void intel_lvds_init(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
if ((lvds & LVDS_DETECTED) == 0)
return;
- if (dev_priv->vbt.edp_support) {
+ if (dev_priv->vbt.edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
return;
}
}
pin = GMBUS_PIN_PANEL;
- if (!lvds_is_present_in_vbt(dev, &pin)) {
+ if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
@@ -1139,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev)
fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
goto out;
}
}
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index fed7bea19..6ba4bf7f2 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -128,9 +128,9 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
/**
* get_mocs_settings()
- * @dev: DRM device.
+ * @dev_priv: i915 device.
* @table: Output table that will be made to point at appropriate
- * MOCS values for the device.
+ * MOCS values for the device.
*
* This function will return the values of the MOCS table that needs to
* be programmed for the platform. It will return the values that need
@@ -138,28 +138,28 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
*
* Return: true if there are applicable MOCS settings for the device.
*/
-static bool get_mocs_settings(struct drm_device *dev,
+static bool get_mocs_settings(struct drm_i915_private *dev_priv,
struct drm_i915_mocs_table *table)
{
bool result = false;
- if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
- } else if (IS_BROXTON(dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->table = broxton_mocs_table;
result = true;
} else {
- WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+ WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
"Platform that should have a MOCS table does not.\n");
}
return result;
}
-static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
+static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
{
switch (ring) {
case RCS:
@@ -179,10 +179,49 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
}
/**
+ * intel_mocs_init_engine() - emit the mocs control table
+ * @engine: The engine for whom to emit the registers.
+ *
+ * This function simply emits a MI_LOAD_REGISTER_IMM command for the
+ * given table starting at the given address.
+ *
+ * Return: 0 on success, otherwise the error status.
+ */
+int intel_mocs_init_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_mocs_table table;
+ unsigned int index;
+
+ if (!get_mocs_settings(dev_priv, &table))
+ return 0;
+
+ if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES))
+ return -ENODEV;
+
+ for (index = 0; index < table.size; index++)
+ I915_WRITE(mocs_register(engine->id, index),
+ table.table[index].control_value);
+
+ /*
+ * Ok, now set the unused entries to uncached. These entries
+ * are officially undefined and no contract for the contents
+ * and settings is given for these entries.
+ *
+ * Entry 0 in the table is uncached - so we are just writing
+ * that value to all the used entries.
+ */
+ for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
+ I915_WRITE(mocs_register(engine->id, index),
+ table.table[0].control_value);
+
+ return 0;
+}
+
+/**
* emit_mocs_control_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
- * @ring: The engine for whom to emit the registers.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address.
@@ -190,27 +229,26 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, int index)
* Return: 0 on success, otherwise the error status.
*/
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
- const struct drm_i915_mocs_table *table,
- enum intel_ring_id ring)
+ const struct drm_i915_mocs_table *table)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
+ enum intel_engine_id engine = req->engine->id;
unsigned int index;
int ret;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
- if (ret) {
- DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ if (ret)
return ret;
- }
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
for (index = 0; index < table->size; index++) {
- intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
+ intel_logical_ring_emit_reg(ringbuf,
+ mocs_register(engine, index));
intel_logical_ring_emit(ringbuf,
table->table[index].control_value);
}
@@ -224,8 +262,10 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
- intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
- intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+ intel_logical_ring_emit_reg(ringbuf,
+ mocs_register(engine, index));
+ intel_logical_ring_emit(ringbuf,
+ table->table[0].control_value);
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -234,6 +274,14 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
return 0;
}
+static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
+ u16 low,
+ u16 high)
+{
+ return table->table[low].l3cc_value |
+ table->table[high].l3cc_value << 16;
+}
+
/**
* emit_mocs_l3cc_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
@@ -249,39 +297,31 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
- unsigned int count;
unsigned int i;
- u32 value;
- u32 filler = (table->table[0].l3cc_value & 0xffff) |
- ((table->table[0].l3cc_value & 0xffff) << 16);
int ret;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
- if (ret) {
- DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ if (ret)
return ret;
- }
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
- for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
- value = (table->table[count].l3cc_value & 0xffff) |
- ((table->table[count + 1].l3cc_value & 0xffff) << 16);
-
+ for (i = 0; i < table->size/2; i++) {
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_logical_ring_emit(ringbuf, value);
+ intel_logical_ring_emit(ringbuf,
+ l3cc_combine(table, 2*i, 2*i+1));
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
- value = (table->table[count].l3cc_value & 0xffff) |
- ((table->table[0].l3cc_value & 0xffff) << 16);
- } else
- value = filler;
+ intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+ intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+ i++;
+ }
/*
* Now set the rest of the table to uncached - use entry 0 as
@@ -290,9 +330,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
- intel_logical_ring_emit(ringbuf, value);
-
- value = filler;
+ intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
@@ -302,6 +340,47 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
}
/**
+ * intel_mocs_init_l3cc_table() - program the mocs control table
+ * @dev: The the device to be programmed.
+ *
+ * This function simply programs the mocs registers for the given table
+ * starting at the given address. This register set is programmed in pairs.
+ *
+ * These registers may get programmed more than once, it is simpler to
+ * re-program 32 registers than maintain the state of when they were programmed.
+ * We are always reprogramming with the same values and this only on context
+ * start.
+ *
+ * Return: Nothing.
+ */
+void intel_mocs_init_l3cc_table(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_mocs_table table;
+ unsigned int i;
+
+ if (!get_mocs_settings(dev_priv, &table))
+ return;
+
+ for (i = 0; i < table.size/2; i++)
+ I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1));
+
+ /* Odd table size - 1 left over */
+ if (table.size & 0x01) {
+ I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0));
+ i++;
+ }
+
+ /*
+ * Now set the rest of the table to uncached - use entry 0 as
+ * this will be uncached. Leave the last pair as initialised as
+ * they are reserved by the hardware.
+ */
+ for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++)
+ I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0));
+}
+
+/**
* intel_rcs_context_init_mocs() - program the MOCS register.
* @req: Request to set up the MOCS tables for.
*
@@ -322,17 +401,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
struct drm_i915_mocs_table t;
int ret;
- if (get_mocs_settings(req->ring->dev, &t)) {
- struct drm_i915_private *dev_priv = req->i915;
- struct intel_engine_cs *ring;
- enum intel_ring_id ring_id;
-
- /* Program the control registers */
- for_each_ring(ring, dev_priv, ring_id) {
- ret = emit_mocs_control_table(req, &t, ring_id);
- if (ret)
- return ret;
- }
+ if (get_mocs_settings(req->i915, &t)) {
+ /* Program the RCS control registers */
+ ret = emit_mocs_control_table(req, &t);
+ if (ret)
+ return ret;
/* Now program the l3cc registers */
ret = emit_mocs_l3cc_table(req, &t);
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index 76e45b174..4640299e0 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -53,5 +53,7 @@
#include "i915_drv.h"
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
+void intel_mocs_init_l3cc_table(struct drm_device *dev);
+int intel_mocs_init_engine(struct intel_engine_cs *ring);
#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index c15718b48..16e209d32 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -34,12 +34,6 @@
#include "i915_drv.h"
#include "intel_drv.h"
-#define PCI_ASLE 0xe4
-#define PCI_ASLS 0xfc
-#define PCI_SWSCI 0xe8
-#define PCI_SWSCI_SCISEL (1 << 15)
-#define PCI_SWSCI_GSSCIE (1 << 0)
-
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define ACPI_CLID 0x01ac /* current lid state indicator */
@@ -246,13 +240,12 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
-#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
u32 main_function, sub_function, scic;
- u16 pci_swsci;
+ u16 swsci_val;
u32 dslp;
if (!swsci)
@@ -300,16 +293,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
swsci->scic = scic;
/* Ensure SCI event is selected and event trigger is cleared. */
- pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
- if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
- pci_swsci |= PCI_SWSCI_SCISEL;
- pci_swsci &= ~PCI_SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+ pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
+ if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
+ swsci_val |= SWSCI_SCISEL;
+ swsci_val &= ~SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, SWSCI, swsci_val);
}
/* Use event trigger to tell bios to check the mail. */
- pci_swsci |= PCI_SWSCI_GSSCIE;
- pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+ swsci_val |= SWSCI_GSSCIE;
+ pci_write_config_word(dev->pdev, SWSCI, swsci_val);
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -905,9 +898,6 @@ static void swsci_setup(struct drm_device *dev)
opregion->swsci_gbda_sub_functions,
opregion->swsci_sbcb_sub_functions);
}
-#else /* CONFIG_ACPI */
-static inline void swsci_setup(struct drm_device *dev) {}
-#endif /* CONFIG_ACPI */
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
{
@@ -943,16 +933,14 @@ int intel_opregion_setup(struct drm_device *dev)
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
- pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+ pci_read_config_dword(dev->pdev, ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
-#ifdef CONFIG_ACPI
INIT_WORK(&opregion->asle_work, asle_work);
-#endif
base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
if (!base)
@@ -1024,3 +1012,42 @@ err_out:
memunmap(base);
return err;
}
+
+int
+intel_opregion_get_panel_type(struct drm_device *dev)
+{
+ u32 panel_details;
+ int ret;
+
+ ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = (panel_details >> 8) & 0xff;
+ if (ret > 0x10) {
+ DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+ return -EINVAL;
+ }
+
+ /* fall back to VBT panel type? */
+ if (ret == 0x0) {
+ DRM_DEBUG_KMS("No panel type in OpRegion\n");
+ return -ENODEV;
+ }
+
+ /*
+ * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+ * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+ * vswing instead. Low vswing results in some display flickers, so
+ * let's simply ignore the OpRegion panel type on SKL for now.
+ */
+ if (IS_SKYLAKE(dev)) {
+ DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ return -ENODEV;
+ }
+
+ return ret - 1;
+}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9168413fe..bd38e49f7 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -190,13 +190,14 @@ struct intel_overlay {
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+ regs = io_mapping_map_wc(ggtt->mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
@@ -233,30 +234,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
int ret;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 4);
if (ret) {
- i915_gem_request_cancel(req);
+ i915_add_request_no_flush(req);
return ret;
}
overlay->active = true;
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+ intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
@@ -267,7 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
@@ -283,19 +284,19 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
- i915_gem_request_cancel(req);
+ i915_add_request_no_flush(req);
return ret;
}
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_advance(engine);
WARN_ON(overlay->last_flip_req);
i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -336,7 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_request *req;
u32 flip_addr = overlay->flip_addr;
int ret;
@@ -349,33 +350,34 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 6);
if (ret) {
- i915_gem_request_cancel(req);
+ i915_add_request_no_flush(req);
return ret;
}
/* wait for overlay to go idle */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
/* turn overlay off */
if (IS_I830(dev)) {
/* Workaround: Don't disable the overlay fully, since otherwise
* it dies on the next OVERLAY_ON cmd. */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
} else {
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(engine, flip_addr);
+ intel_ring_emit(engine,
+ MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
}
@@ -408,7 +410,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -423,19 +425,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* synchronous slowpath */
struct drm_i915_gem_request *req;
- req = i915_gem_request_alloc(ring, NULL);
+ req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
ret = intel_ring_begin(req, 2);
if (ret) {
- i915_gem_request_cancel(req);
+ i915_add_request_no_flush(req);
return ret;
}
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine,
+ MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
@@ -1124,7 +1127,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
crtc = to_intel_crtc(drmmode_crtc);
- new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ new_bo = to_intel_bo(drm_gem_object_lookup(file_priv,
put_image_rec->bo_handle));
if (&new_bo->base == NULL) {
ret = -ENOENT;
@@ -1479,7 +1482,8 @@ struct intel_overlay_error_state {
static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(overlay->dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -1488,7 +1492,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ regs = io_mapping_map_atomic_wc(ggtt->mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 21ee6477b..aba940998 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
if (panel->backlight.combination_mode) {
u8 lbpc;
- pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc);
+ pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
val *= lbpc;
}
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
- pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc);
+ pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
}
if (IS_GEN4(dev_priv)) {
@@ -1240,7 +1240,7 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
*/
static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- return KHz(19200) / pwm_freq_hz;
+ return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
}
/*
@@ -1251,16 +1251,14 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 mul, clock;
+ u32 mul;
if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY)
mul = 128;
else
mul = 16;
- clock = MHz(24);
-
- return clock / (pwm_freq_hz * mul);
+ return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
}
/*
@@ -1283,7 +1281,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
clock = MHz(24); /* LPT:LP */
- return clock / (pwm_freq_hz * mul);
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
}
/*
@@ -1292,10 +1290,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_device *dev = connector->base.dev;
- int clock = MHz(intel_pch_rawclk(dev));
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return clock / (pwm_freq_hz * 128);
+ return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
}
/*
@@ -1308,16 +1305,15 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int clock;
- if (IS_PINEVIEW(dev))
- clock = MHz(intel_hrawclk(dev));
+ if (IS_PINEVIEW(dev_priv))
+ clock = KHz(dev_priv->rawclk_freq);
else
- clock = 1000 * dev_priv->cdclk_freq;
+ clock = KHz(dev_priv->cdclk_freq);
- return clock / (pwm_freq_hz * 32);
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
/*
@@ -1332,11 +1328,11 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_G4X(dev_priv))
- clock = MHz(intel_hrawclk(dev));
+ clock = KHz(dev_priv->rawclk_freq);
else
- clock = 1000 * dev_priv->cdclk_freq;
+ clock = KHz(dev_priv->cdclk_freq);
- return clock / (pwm_freq_hz * 128);
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
/*
@@ -1346,19 +1342,21 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int clock;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int mul, clock;
if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
- if (IS_CHERRYVIEW(dev))
- return KHz(19200) / (pwm_freq_hz * 16);
+ if (IS_CHERRYVIEW(dev_priv))
+ clock = KHz(19200);
else
- return MHz(25) / (pwm_freq_hz * 16);
+ clock = MHz(25);
+ mul = 16;
} else {
- clock = intel_hrawclk(dev);
- return MHz(clock) / (pwm_freq_hz * 128);
+ clock = KHz(dev_priv->rawclk_freq);
+ mul = 128;
}
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
}
static u32 get_backlight_max_vbt(struct intel_connector *connector)
@@ -1640,6 +1638,12 @@ static int pwm_setup_backlight(struct intel_connector *connector,
return -ENODEV;
}
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(panel->backlight.pwm);
+
retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
CRC_PMIC_PWM_PERIOD_NS);
if (retval < 0) {
@@ -1727,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
+ } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
@@ -1745,7 +1750,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.get = pch_get_backlight;
panel->backlight.hz_to_pwm = pch_hz_to_pwm;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (dev_priv->vbt.has_mipi) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
panel->backlight.setup = pwm_setup_backlight;
panel->backlight.enable = pwm_enable_backlight;
panel->backlight.disable = pwm_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 989b94df6..68a1f4cc2 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -54,10 +54,38 @@
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
+ I915_WRITE(CHICKEN_PAR1_1,
+ I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+
+ I915_WRITE(GEN8_CONFIG0,
+ I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
+
+ /* WaEnableChickenDCPR:skl,bxt,kbl */
+ I915_WRITE(GEN8_CHICKEN_DCPR_1,
+ I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+ /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
+ /* WaFbcWakeMemOn:skl,bxt,kbl */
+ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+ DISP_FBC_WM_DIS |
+ DISP_FBC_MEMORY_WAKE);
+
+ /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_DISABLE_DUMMY0);
+}
+
static void bxt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ gen9_init_clock_gating(dev);
+
/* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -487,20 +515,6 @@ static const struct intel_watermark_params g4x_cursor_wm_info = {
.guard_size = 2,
.cacheline_size = G4X_FIFO_LINE_SIZE,
};
-static const struct intel_watermark_params valleyview_wm_info = {
- .fifo_size = VALLEYVIEW_FIFO_SIZE,
- .max_wm = VALLEYVIEW_MAX_WM,
- .default_wm = VALLEYVIEW_MAX_WM,
- .guard_size = 2,
- .cacheline_size = G4X_FIFO_LINE_SIZE,
-};
-static const struct intel_watermark_params valleyview_cursor_wm_info = {
- .fifo_size = I965_CURSOR_FIFO,
- .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
- .default_wm = I965_CURSOR_DFT_WM,
- .guard_size = 2,
- .cacheline_size = G4X_FIFO_LINE_SIZE,
-};
static const struct intel_watermark_params i965_cursor_wm_info = {
.fifo_size = I965_CURSOR_FIFO,
.max_wm = I965_CURSOR_MAX_WM,
@@ -2010,11 +2024,18 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
cur_latency *= 5;
}
- result->pri_val = ilk_compute_pri_wm(cstate, pristate,
- pri_latency, level);
- result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
- result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
- result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+ if (pristate) {
+ result->pri_val = ilk_compute_pri_wm(cstate, pristate,
+ pri_latency, level);
+ result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+ }
+
+ if (sprstate)
+ result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+
+ if (curstate)
+ result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+
result->enable = true;
}
@@ -2278,96 +2299,167 @@ static void skl_setup_wm_latency(struct drm_device *dev)
intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
}
+static bool ilk_validate_pipe_wm(struct drm_device *dev,
+ struct intel_pipe_wm *pipe_wm)
+{
+ /* LP0 watermark maximums depend on this pipe alone */
+ const struct intel_wm_config config = {
+ .num_pipes_active = 1,
+ .sprites_enabled = pipe_wm->sprites_enabled,
+ .sprites_scaled = pipe_wm->sprites_scaled,
+ };
+ struct ilk_wm_maximums max;
+
+ /* LP0 watermarks always use 1/2 DDB partitioning */
+ ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+
+ /* At least LP0 must be valid */
+ if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
+ DRM_DEBUG_KMS("LP0 watermark invalid\n");
+ return false;
+ }
+
+ return true;
+}
+
/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
- struct drm_atomic_state *state)
+static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
struct intel_pipe_wm *pipe_wm;
- struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc_state *cstate = NULL;
struct intel_plane *intel_plane;
- struct drm_plane_state *ps;
struct intel_plane_state *pristate = NULL;
struct intel_plane_state *sprstate = NULL;
struct intel_plane_state *curstate = NULL;
- int level, max_level = ilk_wm_max_level(dev);
- /* LP0 watermark maximums depend on this pipe alone */
- struct intel_wm_config config = {
- .num_pipes_active = 1,
- };
+ int level, max_level = ilk_wm_max_level(dev), usable_level;
struct ilk_wm_maximums max;
- cstate = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(cstate))
- return PTR_ERR(cstate);
-
pipe_wm = &cstate->wm.optimal.ilk;
- memset(pipe_wm, 0, sizeof(*pipe_wm));
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- ps = drm_atomic_get_plane_state(state,
- &intel_plane->base);
- if (IS_ERR(ps))
- return PTR_ERR(ps);
+ struct intel_plane_state *ps;
+
+ ps = intel_atomic_get_existing_plane_state(state,
+ intel_plane);
+ if (!ps)
+ continue;
if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
- pristate = to_intel_plane_state(ps);
+ pristate = ps;
else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
- sprstate = to_intel_plane_state(ps);
+ sprstate = ps;
else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
- curstate = to_intel_plane_state(ps);
+ curstate = ps;
}
- config.sprites_enabled = sprstate->visible;
- config.sprites_scaled = sprstate->visible &&
- (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
- drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
-
pipe_wm->pipe_enabled = cstate->base.active;
- pipe_wm->sprites_enabled = config.sprites_enabled;
- pipe_wm->sprites_scaled = config.sprites_scaled;
+ if (sprstate) {
+ pipe_wm->sprites_enabled = sprstate->visible;
+ pipe_wm->sprites_scaled = sprstate->visible &&
+ (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
+ drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
+ }
+
+ usable_level = max_level;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
- max_level = 1;
+ if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled)
+ usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
- if (config.sprites_scaled)
- max_level = 0;
+ if (pipe_wm->sprites_scaled)
+ usable_level = 0;
ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
- pristate, sprstate, curstate, &pipe_wm->wm[0]);
+ pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
+
+ memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
+ pipe_wm->wm[0] = pipe_wm->raw_wm[0];
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
- /* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
-
- /* At least LP0 must be valid */
- if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
+ if (!ilk_validate_pipe_wm(dev, pipe_wm))
return -EINVAL;
ilk_compute_wm_reg_maximums(dev, 1, &max);
for (level = 1; level <= max_level; level++) {
- struct intel_wm_level wm = {};
+ struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
- pristate, sprstate, curstate, &wm);
+ pristate, sprstate, curstate, wm);
/*
* Disable any watermark level that exceeds the
* register maximums since such watermarks are
* always invalid.
*/
- if (!ilk_validate_wm_level(level, &max, &wm))
- break;
+ if (level > usable_level)
+ continue;
+
+ if (ilk_validate_wm_level(level, &max, wm))
+ pipe_wm->wm[level] = *wm;
+ else
+ usable_level = level;
+ }
+
+ return 0;
+}
+
+/*
+ * Build a set of 'intermediate' watermark values that satisfy both the old
+ * state and the new state. These can be programmed to the hardware
+ * immediately.
+ */
+static int ilk_compute_intermediate_wm(struct drm_device *dev,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *newstate)
+{
+ struct intel_pipe_wm *a = &newstate->wm.intermediate;
+ struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
+ int level, max_level = ilk_wm_max_level(dev);
+
+ /*
+ * Start with the final, target watermarks, then combine with the
+ * currently active watermarks to get values that are safe both before
+ * and after the vblank.
+ */
+ *a = newstate->wm.optimal.ilk;
+ a->pipe_enabled |= b->pipe_enabled;
+ a->sprites_enabled |= b->sprites_enabled;
+ a->sprites_scaled |= b->sprites_scaled;
- pipe_wm->wm[level] = wm;
+ for (level = 0; level <= max_level; level++) {
+ struct intel_wm_level *a_wm = &a->wm[level];
+ const struct intel_wm_level *b_wm = &b->wm[level];
+
+ a_wm->enable &= b_wm->enable;
+ a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val);
+ a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val);
+ a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val);
+ a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val);
}
+ /*
+ * We need to make sure that these merged watermark values are
+ * actually a valid configuration themselves. If they're not,
+ * there's no safe way to transition from the old state to
+ * the new state, so we need to fail the atomic transaction.
+ */
+ if (!ilk_validate_pipe_wm(dev, a))
+ return -EINVAL;
+
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0)
+ newstate->wm.need_postvbl_update = false;
+
return 0;
}
@@ -2383,9 +2475,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
ret_wm->enable = true;
for_each_intel_crtc(dev, intel_crtc) {
- const struct intel_crtc_state *cstate =
- to_intel_crtc_state(intel_crtc->base.state);
- const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk;
+ const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
if (!active->pipe_enabled)
@@ -2421,7 +2511,7 @@ static void ilk_wm_merge(struct drm_device *dev,
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
config->num_pipes_active > 1)
- return;
+ last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
@@ -2533,15 +2623,14 @@ static void ilk_compute_wm_results(struct drm_device *dev,
/* LP0 register values */
for_each_intel_crtc(dev, intel_crtc) {
- const struct intel_crtc_state *cstate =
- to_intel_crtc_state(intel_crtc->base.state);
enum pipe pipe = intel_crtc->pipe;
- const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0];
+ const struct intel_wm_level *r =
+ &intel_crtc->wm.active.ilk.wm[0];
if (WARN_ON(!r->enable))
continue;
- results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime;
+ results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
results->wm_pipe[pipe] =
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
@@ -2748,7 +2837,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
dev_priv->wm.hw = *results;
}
-static bool ilk_disable_lp_wm(struct drm_device *dev)
+bool ilk_disable_lp_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3764,11 +3853,9 @@ static void ilk_compute_wm_config(struct drm_device *dev,
}
}
-static void ilk_program_watermarks(struct intel_crtc_state *cstate)
+static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_crtc *crtc = cstate->base.crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = dev_priv->dev;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
@@ -3799,28 +3886,28 @@ static void ilk_program_watermarks(struct intel_crtc_state *cstate)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_update_wm(struct drm_crtc *crtc)
+static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
-
- WARN_ON(cstate->base.active != intel_crtc->active);
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- /*
- * IVB workaround: must disable low power watermarks for at least
- * one frame before enabling scaling. LP watermarks can be re-enabled
- * when scaling is disabled.
- *
- * WaCxSRDisabledForSpriteScaling:ivb
- */
- if (cstate->disable_lp_wm) {
- ilk_disable_lp_wm(crtc->dev);
- intel_wait_for_vblank(crtc->dev, intel_crtc->pipe);
- }
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ intel_crtc->wm.active.ilk = cstate->wm.intermediate;
+ ilk_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+}
- intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
+{
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- ilk_program_watermarks(cstate);
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ if (cstate->wm.need_postvbl_update) {
+ intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
+ ilk_program_watermarks(dev_priv);
+ }
+ mutex_unlock(&dev_priv->wm.wm_mutex);
}
static void skl_pipe_wm_active_state(uint32_t val,
@@ -4354,7 +4441,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val)
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
- if (IS_GEN9(dev_priv->dev)) {
+ if (IS_GEN9(dev_priv)) {
limits = (dev_priv->rps.max_freq_softlimit) << 23;
if (val <= dev_priv->rps.min_freq_softlimit)
limits |= (dev_priv->rps.min_freq_softlimit) << 14;
@@ -4639,7 +4726,7 @@ void intel_set_rps(struct drm_device *dev, u8 val)
gen6_set_rps(dev, val);
}
-static void gen9_disable_rps(struct drm_device *dev)
+static void gen9_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4647,12 +4734,20 @@ static void gen9_disable_rps(struct drm_device *dev)
I915_WRITE(GEN9_PG_ENABLE, 0);
}
+static void gen9_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RP_CONTROL, 0);
+}
+
static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_RP_CONTROL, 0);
}
static void cherryview_disable_rps(struct drm_device *dev)
@@ -4696,7 +4791,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
@@ -4710,9 +4806,9 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
* for this check.
*/
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
- (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
- dev_priv->gtt.stolen_reserved_size))) {
+ if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
+ (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
+ ggtt->stolen_reserved_size))) {
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -4855,6 +4951,16 @@ static void gen9_enable_rps(struct drm_device *dev)
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+ /*
+ * BIOS could leave the Hw Turbo enabled, so need to explicitly
+ * clear out the Control register just to avoid inconsitency
+ * with debugfs interface, which will show Turbo as enabled
+ * only and that is not expected by the User after adding the
+ * WaGsvDisableTurbo. Apart from this there is no problem even
+ * if the Turbo is left enabled in the Control register, as the
+ * Up/Down interrupts would remain masked.
+ */
+ gen9_disable_rps(dev);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
@@ -4873,7 +4979,7 @@ static void gen9_enable_rps(struct drm_device *dev)
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
dev_priv->rps.power = HIGH_POWER; /* force a reset */
- gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -4881,9 +4987,8 @@ static void gen9_enable_rps(struct drm_device *dev)
static void gen9_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
- int unused;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -4904,8 +5009,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, unused)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
if (HAS_GUC_UCODE(dev))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
@@ -4951,9 +5056,8 @@ static void gen9_enable_rc6(struct drm_device *dev)
static void gen8_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
uint32_t rc6_mask = 0;
- int unused;
/* 1a: Software RC state - RC0 */
I915_WRITE(GEN6_RC_STATE, 0);
@@ -4972,8 +5076,8 @@ static void gen8_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, unused)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
if (IS_BROADWELL(dev))
I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
@@ -5033,11 +5137,11 @@ static void gen8_enable_rps(struct drm_device *dev)
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
- int i, ret;
+ int ret;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5050,7 +5154,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_STATE, 0);
/* Clear the DBG now so we don't confuse earlier errors */
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ gtfifodbg = I915_READ(GTFIFODBG);
+ if (gtfifodbg) {
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
@@ -5069,8 +5174,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
@@ -5355,9 +5460,9 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
static void cherryview_setup_pctx(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long pctx_paddr, paddr;
- struct i915_gtt *gtt = &dev_priv->gtt;
u32 pcbr;
int pctx_size = 32*1024;
@@ -5365,7 +5470,7 @@ static void cherryview_setup_pctx(struct drm_device *dev)
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
- (gtt->stolen_size - pctx_size));
+ (ggtt->stolen_size - pctx_size));
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
@@ -5433,6 +5538,17 @@ static void valleyview_cleanup_pctx(struct drm_device *dev)
dev_priv->vlv_pctx = NULL;
}
+static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
+{
+ dev_priv->rps.gpll_ref_freq =
+ vlv_get_cck_clock(dev_priv, "GPLL ref",
+ CCK_GPLL_CLOCK_CONTROL,
+ dev_priv->czclk_freq);
+
+ DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
+ dev_priv->rps.gpll_ref_freq);
+}
+
static void valleyview_init_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5440,6 +5556,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
valleyview_setup_pctx(dev);
+ vlv_init_gpll_ref_freq(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
@@ -5497,6 +5615,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
cherryview_setup_pctx(dev);
+ vlv_init_gpll_ref_freq(dev_priv);
+
mutex_lock(&dev_priv->rps.hw_lock);
mutex_lock(&dev_priv->sb_lock);
@@ -5561,13 +5681,13 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
static void cherryview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0, pcbr;
- int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- gtfifodbg = I915_READ(GTFIFODBG);
+ gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
+ GT_FIFO_FREE_ENTRIES_CHV);
if (gtfifodbg) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
@@ -5588,8 +5708,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
/* TO threshold set to 500 us ( 0x186 * 1.28 us) */
@@ -5648,10 +5768,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+ dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -5659,15 +5779,15 @@ static void cherryview_enable_rps(struct drm_device *dev)
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
u32 gtfifodbg, val, rc6_mode = 0;
- int i;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
valleyview_check_pctx(dev_priv);
- if ((gtfifodbg = I915_READ(GTFIFODBG))) {
+ gtfifodbg = I915_READ(GTFIFODBG);
+ if (gtfifodbg) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
@@ -5699,8 +5819,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
- for_each_ring(ring, dev_priv, i)
- I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ for_each_engine(engine, dev_priv)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
@@ -5738,10 +5858,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
dev_priv->rps.cur_freq);
DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
- intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
- dev_priv->rps.efficient_freq);
+ intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
+ dev_priv->rps.idle_freq);
- valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -6076,17 +6196,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
bool ret = false;
- int i;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
- for_each_ring(ring, dev_priv, i)
- ret |= !list_empty(&ring->request_list);
+ for_each_engine(engine, dev_priv)
+ ret |= !list_empty(&engine->request_list);
out_unlock:
spin_unlock_irq(&mchdev_lock);
@@ -6306,9 +6425,10 @@ void intel_disable_gt_powersave(struct drm_device *dev)
intel_suspend_gt_powersave(dev);
mutex_lock(&dev_priv->rps.hw_lock);
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9) {
+ gen9_disable_rc6(dev);
gen9_disable_rps(dev);
- else if (IS_CHERRYVIEW(dev))
+ } else if (IS_CHERRYVIEW(dev))
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
@@ -6715,6 +6835,38 @@ static void lpt_suspend_hw(struct drm_device *dev)
}
}
+static void kabylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaDisableSDEUnitClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+ GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableGamClockGating:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+ GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaFbcNukeOnHostModify:kbl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void skylake_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ gen9_init_clock_gating(dev);
+
+ /* WaFbcNukeOnHostModify:skl */
+ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+ ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
static void broadwell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6929,23 +7081,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
gen6_check_mch_setup(dev);
}
-static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
-
- /*
- * Disable trickle feed and enable pnd deadline calculation
- */
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- I915_WRITE(CBR1_VLV, 0);
-}
-
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- vlv_init_display_clock_gating(dev_priv);
-
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@@ -7028,8 +7167,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- vlv_init_display_clock_gating(dev_priv);
-
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,
@@ -7169,8 +7306,7 @@ void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->display.init_clock_gating)
- dev_priv->display.init_clock_gating(dev);
+ dev_priv->display.init_clock_gating(dev);
}
void intel_suspend_hw(struct drm_device *dev)
@@ -7179,6 +7315,60 @@ void intel_suspend_hw(struct drm_device *dev)
lpt_suspend_hw(dev);
}
+static void nop_init_clock_gating(struct drm_device *dev)
+{
+ DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
+}
+
+/**
+ * intel_init_clock_gating_hooks - setup the clock gating hooks
+ * @dev_priv: device private
+ *
+ * Setup the hooks that configure which clocks of a given platform can be
+ * gated and also apply various GT and display specific workarounds for these
+ * platforms. Note that some GT specific workarounds are applied separately
+ * when GPU contexts or batchbuffers start their execution.
+ */
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
+{
+ if (IS_SKYLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = skylake_init_clock_gating;
+ else if (IS_KABYLAKE(dev_priv))
+ dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
+ else if (IS_BROXTON(dev_priv))
+ dev_priv->display.init_clock_gating = bxt_init_clock_gating;
+ else if (IS_BROADWELL(dev_priv))
+ dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
+ else if (IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
+ else if (IS_HASWELL(dev_priv))
+ dev_priv->display.init_clock_gating = haswell_init_clock_gating;
+ else if (IS_IVYBRIDGE(dev_priv))
+ dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
+ else if (IS_VALLEYVIEW(dev_priv))
+ dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
+ else if (IS_GEN6(dev_priv))
+ dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ else if (IS_GEN5(dev_priv))
+ dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
+ else if (IS_G4X(dev_priv))
+ dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ else if (IS_CRESTLINE(dev_priv))
+ dev_priv->display.init_clock_gating = crestline_init_clock_gating;
+ else if (IS_BROADWATER(dev_priv))
+ dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
+ else if (IS_GEN3(dev_priv))
+ dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
+ dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ else if (IS_GEN2(dev_priv))
+ dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ else {
+ MISSING_CASE(INTEL_DEVID(dev_priv));
+ dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ }
+}
+
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
@@ -7195,10 +7385,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (INTEL_INFO(dev)->gen >= 9) {
skl_setup_wm_latency(dev);
-
- if (IS_BROXTON(dev))
- dev_priv->display.init_clock_gating =
- bxt_init_clock_gating;
dev_priv->display.update_wm = skl_update_wm;
} else if (HAS_PCH_SPLIT(dev)) {
ilk_setup_wm_latency(dev);
@@ -7207,36 +7393,23 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
(!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
- dev_priv->display.update_wm = ilk_update_wm;
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
- dev_priv->display.program_watermarks = ilk_program_watermarks;
+ dev_priv->display.compute_intermediate_wm =
+ ilk_compute_intermediate_wm;
+ dev_priv->display.initial_watermarks =
+ ilk_initial_watermarks;
+ dev_priv->display.optimize_watermarks =
+ ilk_optimize_watermarks;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
}
-
- if (IS_GEN5(dev))
- dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
- else if (IS_GEN6(dev))
- dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- else if (IS_IVYBRIDGE(dev))
- dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
- else if (IS_HASWELL(dev))
- dev_priv->display.init_clock_gating = haswell_init_clock_gating;
- else if (INTEL_INFO(dev)->gen == 8)
- dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
} else if (IS_CHERRYVIEW(dev)) {
vlv_setup_wm_latency(dev);
-
dev_priv->display.update_wm = vlv_update_wm;
- dev_priv->display.init_clock_gating =
- cherryview_init_clock_gating;
} else if (IS_VALLEYVIEW(dev)) {
vlv_setup_wm_latency(dev);
-
dev_priv->display.update_wm = vlv_update_wm;
- dev_priv->display.init_clock_gating =
- valleyview_init_clock_gating;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
@@ -7252,20 +7425,13 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
dev_priv->display.update_wm = g4x_update_wm;
- dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
dev_priv->display.update_wm = i965_update_wm;
- if (IS_CRESTLINE(dev))
- dev_priv->display.init_clock_gating = crestline_init_clock_gating;
- else if (IS_BROADWATER(dev))
- dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
} else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_GEN2(dev)) {
if (INTEL_INFO(dev)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
@@ -7274,11 +7440,6 @@ void intel_init_pm(struct drm_device *dev)
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
-
- if (IS_I85X(dev) || IS_I865G(dev))
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- else
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
} else {
DRM_ERROR("unexpected fall-through in intel_init_pm\n");
}
@@ -7332,78 +7493,43 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val
return 0;
}
-static int vlv_gpu_freq_div(unsigned int czclk_freq)
-{
- switch (czclk_freq) {
- case 200:
- return 10;
- case 267:
- return 12;
- case 320:
- case 333:
- return 16;
- case 400:
- return 20;
- default:
- return -1;
- }
-}
-
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
- div = vlv_gpu_freq_div(czclk_freq);
- if (div < 0)
- return div;
-
- return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
+ /*
+ * N = val - 0xb7
+ * Slow = Fast = GPLL ref * N
+ */
+ return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000);
}
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
- mul = vlv_gpu_freq_div(czclk_freq);
- if (mul < 0)
- return mul;
-
- return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
+ return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7;
}
static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
- div = vlv_gpu_freq_div(czclk_freq);
- if (div < 0)
- return div;
- div /= 2;
-
- return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
+ /*
+ * N = val / 2
+ * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
+ */
+ return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000);
}
static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
-
- mul = vlv_gpu_freq_div(czclk_freq);
- if (mul < 0)
- return mul;
- mul /= 2;
-
/* CHV needs even values */
- return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
+ return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2;
}
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
- if (IS_GEN9(dev_priv->dev))
+ if (IS_GEN9(dev_priv))
return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
GEN9_FREQ_SCALER);
- else if (IS_CHERRYVIEW(dev_priv->dev))
+ else if (IS_CHERRYVIEW(dev_priv))
return chv_gpu_freq(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv->dev))
+ else if (IS_VALLEYVIEW(dev_priv))
return byt_gpu_freq(dev_priv, val);
else
return val * GT_FREQUENCY_MULTIPLIER;
@@ -7411,12 +7537,12 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
- if (IS_GEN9(dev_priv->dev))
+ if (IS_GEN9(dev_priv))
return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
GT_FREQUENCY_MULTIPLIER);
- else if (IS_CHERRYVIEW(dev_priv->dev))
+ else if (IS_CHERRYVIEW(dev_priv))
return chv_freq_opcode(dev_priv, val);
- else if (IS_VALLEYVIEW(dev_priv->dev))
+ else if (IS_VALLEYVIEW(dev_priv))
return byt_freq_opcode(dev_priv, val);
else
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
@@ -7433,7 +7559,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(req, true))
- gen6_rps_boost(to_i915(req->ring->dev), NULL,
+ gen6_rps_boost(to_i915(req->engine->dev), NULL,
req->emitted_jiffies);
i915_gem_request_unreference__unlocked(req);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index bd322d8fb..a788d1e95 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -546,7 +546,8 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
/* Wait till PSR is idle */
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ EDP_PSR_STATUS_STATE_MASK) == 0,
+ 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
@@ -601,7 +602,7 @@ static void intel_psr_work(struct work_struct *work)
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (HAS_DDI(dev_priv->dev)) {
+ if (HAS_DDI(dev_priv)) {
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
@@ -819,8 +820,7 @@ void intel_psr_init(struct drm_device *dev)
/* Per platform default */
if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev) || IS_BROADWELL(dev) ||
- IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
i915.enable_psr = 1;
else
i915.enable_psr = 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9121646d7..68c5af079 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,25 +53,19 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
ringbuf->tail, ringbuf->size);
}
-int intel_ring_space(struct intel_ringbuffer *ringbuf)
+bool intel_engine_stopped(struct intel_engine_cs *engine)
{
- intel_ring_update_space(ringbuf);
- return ringbuf->space;
-}
-
-bool intel_ring_stopped(struct intel_engine_cs *ring)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
}
-static void __intel_ring_advance(struct intel_engine_cs *ring)
+static void __intel_ring_advance(struct intel_engine_cs *engine)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf = engine->buffer;
ringbuf->tail &= ringbuf->size - 1;
- if (intel_ring_stopped(ring))
+ if (intel_engine_stopped(engine))
return;
- ring->write_tail(ring, ringbuf->tail);
+ engine->write_tail(engine, ringbuf->tail);
}
static int
@@ -79,7 +73,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
u32 cmd;
int ret;
@@ -94,9 +88,9 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -106,8 +100,8 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_device *dev = engine->dev;
u32 cmd;
int ret;
@@ -153,9 +147,9 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -200,34 +194,34 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_engine_cs *engine = req->engine;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(ring, 0); /* low dword */
- intel_ring_emit(ring, 0); /* high dword */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(engine, 0); /* low dword */
+ intel_ring_emit(engine, 0); /* high dword */
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
+ intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -236,9 +230,9 @@ static int
gen6_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -276,11 +270,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
@@ -288,19 +282,19 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
@@ -309,9 +303,9 @@ static int
gen7_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
u32 flags = 0;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/*
@@ -360,11 +354,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
@@ -373,20 +367,20 @@ static int
gen8_emit_pipe_control(struct drm_i915_gem_request *req,
u32 flags, u32 scratch_addr)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 6);
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(engine, flags);
+ intel_ring_emit(engine, scratch_addr);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, 0);
+ intel_ring_advance(engine);
return 0;
}
@@ -396,7 +390,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
- u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
flags |= PIPE_CONTROL_CS_STALL;
@@ -429,51 +423,51 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
return gen8_emit_pipe_control(req, flags, scratch_addr);
}
-static void ring_write_tail(struct intel_engine_cs *ring,
+static void ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- I915_WRITE_TAIL(ring, value);
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ I915_WRITE_TAIL(engine, value);
}
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring)
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
u64 acthd;
- if (INTEL_INFO(ring->dev)->gen >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
- RING_ACTHD_UDW(ring->mmio_base));
- else if (INTEL_INFO(ring->dev)->gen >= 4)
- acthd = I915_READ(RING_ACTHD(ring->mmio_base));
+ if (INTEL_INFO(engine->dev)->gen >= 8)
+ acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
+ RING_ACTHD_UDW(engine->mmio_base));
+ else if (INTEL_INFO(engine->dev)->gen >= 4)
+ acthd = I915_READ(RING_ACTHD(engine->mmio_base));
else
acthd = I915_READ(ACTHD);
return acthd;
}
-static void ring_setup_phys_status_page(struct intel_engine_cs *ring)
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
u32 addr;
addr = dev_priv->status_page_dmah->busaddr;
- if (INTEL_INFO(ring->dev)->gen >= 4)
+ if (INTEL_INFO(engine->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
}
-static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
+static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = engine->dev;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
i915_reg_t mmio;
/* The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
if (IS_GEN7(dev)) {
- switch (ring->id) {
+ switch (engine->id) {
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
@@ -492,14 +486,14 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(ring->dev)) {
- mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+ } else if (IS_GEN6(engine->dev)) {
+ mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
- mmio = RING_HWS_PGA(ring->mmio_base);
+ mmio = RING_HWS_PGA(engine->mmio_base);
}
- I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ I915_WRITE(mmio, (u32)engine->status_page.gfx_addr);
POSTING_READ(mmio);
/*
@@ -510,10 +504,10 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
* invalidating the TLB?
*/
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {
- i915_reg_t reg = RING_INSTPM(ring->mmio_base);
+ i915_reg_t reg = RING_INSTPM(engine->mmio_base);
/* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -521,117 +515,125 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring)
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- ring->name);
+ engine->name);
}
}
-static bool stop_ring(struct intel_engine_cs *ring)
+static bool stop_ring(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
- if (!IS_GEN2(ring->dev)) {
- I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
- if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
- DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
+ if (!IS_GEN2(engine->dev)) {
+ I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s : timed out trying to stop ring\n",
+ engine->name);
/* Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
- if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
+ if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
return false;
}
}
- I915_WRITE_CTL(ring, 0);
- I915_WRITE_HEAD(ring, 0);
- ring->write_tail(ring, 0);
+ I915_WRITE_CTL(engine, 0);
+ I915_WRITE_HEAD(engine, 0);
+ engine->write_tail(engine, 0);
- if (!IS_GEN2(ring->dev)) {
- (void)I915_READ_CTL(ring);
- I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+ if (!IS_GEN2(engine->dev)) {
+ (void)I915_READ_CTL(engine);
+ I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
}
- return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+ return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
}
-static int init_ring_common(struct intel_engine_cs *ring)
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
+}
+
+static int init_ring_common(struct intel_engine_cs *engine)
+{
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf = engine->buffer;
struct drm_i915_gem_object *obj = ringbuf->obj;
int ret = 0;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- if (!stop_ring(ring)) {
+ if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ engine->name,
+ I915_READ_CTL(engine),
+ I915_READ_HEAD(engine),
+ I915_READ_TAIL(engine),
+ I915_READ_START(engine));
- if (!stop_ring(ring)) {
+ if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ engine->name,
+ I915_READ_CTL(engine),
+ I915_READ_HEAD(engine),
+ I915_READ_TAIL(engine),
+ I915_READ_START(engine));
ret = -EIO;
goto out;
}
}
if (I915_NEED_GFX_HWS(dev))
- intel_ring_setup_status_page(ring);
+ intel_ring_setup_status_page(engine);
else
- ring_setup_phys_status_page(ring);
+ ring_setup_phys_status_page(engine);
/* Enforce ordering by reading HEAD register back */
- I915_READ_HEAD(ring);
+ I915_READ_HEAD(engine);
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
+ I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (I915_READ_HEAD(ring))
+ if (I915_READ_HEAD(engine))
DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
- ring->name, I915_READ_HEAD(ring));
- I915_WRITE_HEAD(ring, 0);
- (void)I915_READ_HEAD(ring);
+ engine->name, I915_READ_HEAD(engine));
+ I915_WRITE_HEAD(engine, 0);
+ (void)I915_READ_HEAD(engine);
- I915_WRITE_CTL(ring,
+ I915_WRITE_CTL(engine,
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
- (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
+ if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 &&
+ I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) &&
+ (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n",
- ring->name,
- I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID,
- I915_READ_HEAD(ring), I915_READ_TAIL(ring),
- I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));
+ engine->name,
+ I915_READ_CTL(engine),
+ I915_READ_CTL(engine) & RING_VALID,
+ I915_READ_HEAD(engine), I915_READ_TAIL(engine),
+ I915_READ_START(engine),
+ (unsigned long)i915_gem_obj_ggtt_offset(obj));
ret = -EIO;
goto out;
}
ringbuf->last_retired_head = -1;
- ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
+ ringbuf->head = I915_READ_HEAD(engine);
+ ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
intel_ring_update_space(ringbuf);
- memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
+ intel_engine_init_hangcheck(engine);
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -640,59 +642,60 @@ out:
}
void
-intel_fini_pipe_control(struct intel_engine_cs *ring)
+intel_fini_pipe_control(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
- if (ring->scratch.obj == NULL)
+ if (engine->scratch.obj == NULL)
return;
if (INTEL_INFO(dev)->gen >= 5) {
- kunmap(sg_page(ring->scratch.obj->pages->sgl));
- i915_gem_object_ggtt_unpin(ring->scratch.obj);
+ kunmap(sg_page(engine->scratch.obj->pages->sgl));
+ i915_gem_object_ggtt_unpin(engine->scratch.obj);
}
- drm_gem_object_unreference(&ring->scratch.obj->base);
- ring->scratch.obj = NULL;
+ drm_gem_object_unreference(&engine->scratch.obj->base);
+ engine->scratch.obj = NULL;
}
int
-intel_init_pipe_control(struct intel_engine_cs *ring)
+intel_init_pipe_control(struct intel_engine_cs *engine)
{
int ret;
- WARN_ON(ring->scratch.obj);
+ WARN_ON(engine->scratch.obj);
- ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
- if (ring->scratch.obj == NULL) {
+ engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096);
+ if (engine->scratch.obj == NULL) {
DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM;
goto err;
}
- ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
+ ret = i915_gem_object_set_cache_level(engine->scratch.obj,
+ I915_CACHE_LLC);
if (ret)
goto err_unref;
- ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
+ ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
if (ret)
goto err_unref;
- ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
- ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
- if (ring->scratch.cpu_page == NULL) {
+ engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj);
+ engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl));
+ if (engine->scratch.cpu_page == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
- ring->name, ring->scratch.gtt_offset);
+ engine->name, engine->scratch.gtt_offset);
return 0;
err_unpin:
- i915_gem_object_ggtt_unpin(ring->scratch.obj);
+ i915_gem_object_ggtt_unpin(engine->scratch.obj);
err_unref:
- drm_gem_object_unreference(&ring->scratch.obj->base);
+ drm_gem_object_unreference(&engine->scratch.obj->base);
err:
return ret;
}
@@ -700,15 +703,15 @@ err:
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_workarounds *w = &dev_priv->workarounds;
if (w->count == 0)
return 0;
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
@@ -717,16 +720,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (ret)
return ret;
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+ intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ring, w->reg[i].addr);
- intel_ring_emit(ring, w->reg[i].value);
+ intel_ring_emit_reg(engine, w->reg[i].addr);
+ intel_ring_emit(engine, w->reg[i].value);
}
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
- ring->gpu_caches_dirty = true;
+ engine->gpu_caches_dirty = true;
ret = intel_ring_flush_all_caches(req);
if (ret)
return ret;
@@ -789,25 +792,26 @@ static int wa_add(struct drm_i915_private *dev_priv,
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+ i915_reg_t reg)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct i915_workarounds *wa = &dev_priv->workarounds;
- const uint32_t index = wa->hw_whitelist_count[ring->id];
+ const uint32_t index = wa->hw_whitelist_count[engine->id];
if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
return -EINVAL;
- WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
+ WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
i915_mmio_reg_offset(reg));
- wa->hw_whitelist_count[ring->id]++;
+ wa->hw_whitelist_count[engine->id]++;
return 0;
}
-static int gen8_init_workarounds(struct intel_engine_cs *ring)
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
@@ -857,13 +861,13 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring)
return 0;
}
-static int bdw_init_workarounds(struct intel_engine_cs *ring)
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- ret = gen8_init_workarounds(ring);
+ ret = gen8_init_workarounds(engine);
if (ret)
return ret;
@@ -886,13 +890,13 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
return 0;
}
-static int chv_init_workarounds(struct intel_engine_cs *ring)
+static int chv_init_workarounds(struct intel_engine_cs *engine)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- ret = gen8_init_workarounds(ring);
+ ret = gen8_init_workarounds(engine);
if (ret)
return ret;
@@ -905,26 +909,30 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
return 0;
}
-static int gen9_init_workarounds(struct intel_engine_cs *ring)
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t tmp;
int ret;
- /* WaEnableLbsSlaRetryTimerDecrement:skl */
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
- /* WaDisableKillLogic:bxt,skl */
+ /* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB);
- /* WaDisablePartialInstShootdown:skl,bxt */
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics:skl,bxt */
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -946,17 +954,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
*/
}
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
- if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_YV12_BUGFIX);
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_YV12_BUGFIX |
+ GEN9_ENABLE_GPGPU_PREEMPTION);
- /* Wa4x4STCOptimizationDisable:skl,bxt */
- /* WaDisablePartialResolveInVc:skl,bxt */
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
- /* WaCcsTlbPrefetchDisable:skl,bxt */
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
@@ -966,41 +975,67 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE);
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
- tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
- if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
- IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
- tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
- WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+
+ /* WaForceEnableNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
- if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
- /* WaDisableSTUnitPowerOptimization:skl,bxt */
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
- /* WaOCLCoherentLineFlush:skl,bxt */
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES));
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
- ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
- ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
+ ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ if (ret)
+ return ret;
+
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret)
return ret;
return 0;
}
-static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -1040,13 +1075,13 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
return 0;
}
-static int skl_init_workarounds(struct intel_engine_cs *ring)
+static int skl_init_workarounds(struct intel_engine_cs *engine)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- ret = gen9_init_workarounds(ring);
+ ret = gen9_init_workarounds(engine);
if (ret)
return ret;
@@ -1085,22 +1120,6 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
- /* This is tied to WaForceContextSaveRestoreNonCoherent */
- if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
- /*
- *Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
-
- /* WaDisableHDCInvalidation:skl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
- }
-
/* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
@@ -1113,21 +1132,24 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ /* WaDisableGafsUnitClkGating:skl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
/* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
- return skl_tune_iz_hashing(ring);
+ return skl_tune_iz_hashing(engine);
}
-static int bxt_init_workarounds(struct intel_engine_cs *ring)
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
{
int ret;
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- ret = gen9_init_workarounds(ring);
+ ret = gen9_init_workarounds(engine);
if (ret)
return ret;
@@ -1158,48 +1180,108 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
/* WaDisableObjectLevelPreemtionForInstanceId:bxt */
/* WaDisableLSQCROPERFforOCL:bxt */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
- ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
+ ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
if (ret)
return ret;
- ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
}
+ /* WaInsertDummyPushConstPs:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
return 0;
}
-int init_workarounds_ring(struct intel_engine_cs *ring)
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT(GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+ * involving this register should also be added to WA batch as required.
+ */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_RO_PERF_DIS);
+
+ /* WaInsertDummyPushConstPs:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int init_workarounds_ring(struct intel_engine_cs *engine)
+{
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- WARN_ON(ring->id != RCS);
+ WARN_ON(engine->id != RCS);
dev_priv->workarounds.count = 0;
dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
if (IS_BROADWELL(dev))
- return bdw_init_workarounds(ring);
+ return bdw_init_workarounds(engine);
if (IS_CHERRYVIEW(dev))
- return chv_init_workarounds(ring);
+ return chv_init_workarounds(engine);
if (IS_SKYLAKE(dev))
- return skl_init_workarounds(ring);
+ return skl_init_workarounds(engine);
if (IS_BROXTON(dev))
- return bxt_init_workarounds(ring);
+ return bxt_init_workarounds(engine);
+
+ if (IS_KABYLAKE(dev_priv))
+ return kbl_init_workarounds(engine);
return 0;
}
-static int init_render_ring(struct intel_engine_cs *ring)
+static int init_render_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = init_ring_common(ring);
+ int ret = init_ring_common(engine);
if (ret)
return ret;
@@ -1242,14 +1324,14 @@ static int init_render_ring(struct intel_engine_cs *ring)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (HAS_L3_DPF(dev))
- I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
- return init_workarounds_ring(ring);
+ return init_workarounds_ring(engine);
}
-static void render_ring_cleanup(struct intel_engine_cs *ring)
+static void render_ring_cleanup(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->semaphore_obj) {
@@ -1258,18 +1340,19 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
dev_priv->semaphore_obj = NULL;
}
- intel_fini_pipe_control(ring);
+ intel_fini_pipe_control(engine);
}
static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 8
- struct intel_engine_cs *signaller = signaller_req->ring;
+ struct intel_engine_cs *signaller = signaller_req->engine;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
- int i, ret, num_rings;
+ enum intel_engine_id id;
+ int ret, num_rings;
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
@@ -1279,9 +1362,9 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
if (ret)
return ret;
- for_each_ring(waiter, dev_priv, i) {
+ for_each_engine_id(waiter, dev_priv, id) {
u32 seqno;
- u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
@@ -1295,7 +1378,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->id));
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
}
@@ -1306,11 +1389,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
#define MBOX_UPDATE_DWORDS 6
- struct intel_engine_cs *signaller = signaller_req->ring;
+ struct intel_engine_cs *signaller = signaller_req->engine;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *waiter;
- int i, ret, num_rings;
+ enum intel_engine_id id;
+ int ret, num_rings;
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
@@ -1320,9 +1404,9 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
if (ret)
return ret;
- for_each_ring(waiter, dev_priv, i) {
+ for_each_engine_id(waiter, dev_priv, id) {
u32 seqno;
- u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
@@ -1334,7 +1418,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->id));
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
}
@@ -1344,11 +1428,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
static int gen6_signal(struct drm_i915_gem_request *signaller_req,
unsigned int num_dwords)
{
- struct intel_engine_cs *signaller = signaller_req->ring;
+ struct intel_engine_cs *signaller = signaller_req->engine;
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
- int i, ret, num_rings;
+ enum intel_engine_id id;
+ int ret, num_rings;
#define MBOX_UPDATE_DWORDS 3
num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
@@ -1359,8 +1444,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
if (ret)
return ret;
- for_each_ring(useless, dev_priv, i) {
- i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
+ for_each_engine_id(useless, dev_priv, id) {
+ i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
if (i915_mmio_reg_valid(mbox_reg)) {
u32 seqno = i915_gem_request_get_seqno(signaller_req);
@@ -1389,22 +1474,23 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
static int
gen6_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
- if (ring->semaphore.signal)
- ret = ring->semaphore.signal(req, 4);
+ if (engine->semaphore.signal)
+ ret = engine->semaphore.signal(req, 4);
else
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ __intel_ring_advance(engine);
return 0;
}
@@ -1429,7 +1515,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
- struct intel_engine_cs *waiter = waiter_req->ring;
+ struct intel_engine_cs *waiter = waiter_req->engine;
struct drm_i915_private *dev_priv = waiter->dev->dev_private;
int ret;
@@ -1455,7 +1541,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
struct intel_engine_cs *signaller,
u32 seqno)
{
- struct intel_engine_cs *waiter = waiter_req->ring;
+ struct intel_engine_cs *waiter = waiter_req->engine;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
@@ -1503,8 +1589,8 @@ do { \
static int
pc_render_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+ struct intel_engine_cs *engine = req->engine;
+ u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
int ret;
/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
@@ -1519,78 +1605,93 @@ pc_render_add_request(struct drm_i915_gem_request *req)
if (ret)
return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ intel_ring_emit(engine,
+ GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
- intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, 0);
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ intel_ring_emit(engine,
+ engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, 0);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
scratch_addr += 2 * CACHELINE_BYTES;
- PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ PIPE_CONTROL_FLUSH(engine, scratch_addr);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+ intel_ring_emit(engine,
+ GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
- intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, 0);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine,
+ engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, 0);
+ __intel_ring_advance(engine);
return 0;
}
-static u32
-gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+static void
+gen6_seqno_barrier(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
- * ACTHD) before reading the status page. */
- if (!lazy_coherency) {
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- POSTING_READ(RING_ACTHD(ring->mmio_base));
- }
-
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ * ACTHD) before reading the status page.
+ *
+ * Note that this effectively stalls the read by the time it takes to
+ * do a memory transaction, which more or less ensures that the write
+ * from the GPU has sufficient time to invalidate the CPU cacheline.
+ * Alternatively we could delay the interrupt from the CS ring to give
+ * the write time to land, but that would incur a delay after every
+ * batch i.e. much more frequent than a delay when waiting for the
+ * interrupt (with the same net latency).
+ *
+ * Also note that to prevent whole machine hangs on gen7, we have to
+ * take the spinlock to guard against concurrent cacheline access.
+ */
+ spin_lock_irq(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
static u32
-ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+ring_get_seqno(struct intel_engine_cs *engine)
{
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
}
static void
-ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+ring_set_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
}
static u32
-pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)
+pc_render_get_seqno(struct intel_engine_cs *engine)
{
- return ring->scratch.cpu_page[0];
+ return engine->scratch.cpu_page[0];
}
static void
-pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)
+pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- ring->scratch.cpu_page[0] = seqno;
+ engine->scratch.cpu_page[0] = seqno;
}
static bool
-gen5_ring_get_irq(struct intel_engine_cs *ring)
+gen5_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1598,30 +1699,30 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0)
- gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ if (engine->irq_refcount++ == 0)
+ gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
static void
-gen5_ring_put_irq(struct intel_engine_cs *ring)
+gen5_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0)
- gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ if (--engine->irq_refcount == 0)
+ gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-i9xx_ring_get_irq(struct intel_engine_cs *ring)
+i9xx_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1629,8 +1730,8 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ if (engine->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
@@ -1640,15 +1741,15 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
}
static void
-i9xx_ring_put_irq(struct intel_engine_cs *ring)
+i9xx_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- dev_priv->irq_mask |= ring->irq_enable_mask;
+ if (--engine->irq_refcount == 0) {
+ dev_priv->irq_mask |= engine->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
@@ -1656,9 +1757,9 @@ i9xx_ring_put_irq(struct intel_engine_cs *ring)
}
static bool
-i8xx_ring_get_irq(struct intel_engine_cs *ring)
+i8xx_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1666,8 +1767,8 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~ring->irq_enable_mask;
+ if (engine->irq_refcount++ == 0) {
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
@@ -1677,15 +1778,15 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
}
static void
-i8xx_ring_put_irq(struct intel_engine_cs *ring)
+i8xx_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- dev_priv->irq_mask |= ring->irq_enable_mask;
+ if (--engine->irq_refcount == 0) {
+ dev_priv->irq_mask |= engine->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
@@ -1697,42 +1798,43 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate_domains,
u32 flush_domains)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_FLUSH);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
static int
i9xx_add_request(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 4);
if (ret)
return ret;
- intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(ring, i915_gem_request_get_seqno(req));
- intel_ring_emit(ring, MI_USER_INTERRUPT);
- __intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+ intel_ring_emit(engine, MI_USER_INTERRUPT);
+ __intel_ring_advance(engine);
return 0;
}
static bool
-gen6_ring_get_irq(struct intel_engine_cs *ring)
+gen6_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1740,14 +1842,14 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && ring->id == RCS)
- I915_WRITE_IMR(ring,
- ~(ring->irq_enable_mask |
+ if (engine->irq_refcount++ == 0) {
+ if (HAS_L3_DPF(dev) && engine->id == RCS)
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
GT_PARITY_ERROR(dev)));
else
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1755,27 +1857,27 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
}
static void
-gen6_ring_put_irq(struct intel_engine_cs *ring)
+gen6_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && ring->id == RCS)
- I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
+ if (--engine->irq_refcount == 0) {
+ if (HAS_L3_DPF(dev) && engine->id == RCS)
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
else
- I915_WRITE_IMR(ring, ~0);
- gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ I915_WRITE_IMR(engine, ~0);
+ gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-hsw_vebox_get_irq(struct intel_engine_cs *ring)
+hsw_vebox_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1783,9 +1885,9 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+ if (engine->irq_refcount++ == 0) {
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1793,24 +1895,24 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
}
static void
-hsw_vebox_put_irq(struct intel_engine_cs *ring)
+hsw_vebox_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- I915_WRITE_IMR(ring, ~0);
- gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+ if (--engine->irq_refcount == 0) {
+ I915_WRITE_IMR(engine, ~0);
+ gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static bool
-gen8_ring_get_irq(struct intel_engine_cs *ring)
+gen8_ring_get_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
@@ -1818,15 +1920,15 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev) && ring->id == RCS) {
- I915_WRITE_IMR(ring,
- ~(ring->irq_enable_mask |
+ if (engine->irq_refcount++ == 0) {
+ if (HAS_L3_DPF(dev) && engine->id == RCS) {
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
} else {
- I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
}
- POSTING_READ(RING_IMR(ring->mmio_base));
+ POSTING_READ(RING_IMR(engine->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1834,21 +1936,21 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
}
static void
-gen8_ring_put_irq(struct intel_engine_cs *ring)
+gen8_ring_put_irq(struct intel_engine_cs *engine)
{
- struct drm_device *dev = ring->dev;
+ struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount == 0) {
- if (HAS_L3_DPF(dev) && ring->id == RCS) {
- I915_WRITE_IMR(ring,
+ if (--engine->irq_refcount == 0) {
+ if (HAS_L3_DPF(dev) && engine->id == RCS) {
+ I915_WRITE_IMR(engine,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
} else {
- I915_WRITE_IMR(ring, ~0);
+ I915_WRITE_IMR(engine, ~0);
}
- POSTING_READ(RING_IMR(ring->mmio_base));
+ POSTING_READ(RING_IMR(engine->mmio_base));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1858,20 +1960,20 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
@@ -1885,8 +1987,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
- u32 cs_offset = ring->scratch.gtt_offset;
+ struct intel_engine_cs *engine = req->engine;
+ u32 cs_offset = engine->scratch.gtt_offset;
int ret;
ret = intel_ring_begin(req, 6);
@@ -1894,13 +1996,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
return ret;
/* Evict the invalid PTE TLBs */
- intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
- intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0xdeadbeef);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+ intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+ intel_ring_emit(engine, cs_offset);
+ intel_ring_emit(engine, 0xdeadbeef);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
@@ -1914,16 +2016,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
- intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 4096);
- intel_ring_emit(ring, offset);
-
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+ intel_ring_emit(engine,
+ BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
+ intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+ intel_ring_emit(engine, cs_offset);
+ intel_ring_emit(engine, 4096);
+ intel_ring_emit(engine, offset);
+
+ intel_ring_emit(engine, MI_FLUSH);
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
/* ... and execute it. */
offset = cs_offset;
@@ -1933,10 +2036,10 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(engine);
return 0;
}
@@ -1946,55 +2049,55 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+ intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE));
+ intel_ring_advance(engine);
return 0;
}
-static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+static void cleanup_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
if (!dev_priv->status_page_dmah)
return;
- drm_pci_free(ring->dev, dev_priv->status_page_dmah);
- ring->status_page.page_addr = NULL;
+ drm_pci_free(engine->dev, dev_priv->status_page_dmah);
+ engine->status_page.page_addr = NULL;
}
-static void cleanup_status_page(struct intel_engine_cs *ring)
+static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
- obj = ring->status_page.obj;
+ obj = engine->status_page.obj;
if (obj == NULL)
return;
kunmap(sg_page(obj->pages->sgl));
i915_gem_object_ggtt_unpin(obj);
drm_gem_object_unreference(&obj->base);
- ring->status_page.obj = NULL;
+ engine->status_page.obj = NULL;
}
-static int init_status_page(struct intel_engine_cs *ring)
+static int init_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_object *obj = ring->status_page.obj;
+ struct drm_i915_gem_object *obj = engine->status_page.obj;
if (obj == NULL) {
unsigned flags;
int ret;
- obj = i915_gem_alloc_object(ring->dev, 4096);
+ obj = i915_gem_alloc_object(engine->dev, 4096);
if (obj == NULL) {
DRM_ERROR("Failed to allocate status page\n");
return -ENOMEM;
@@ -2005,7 +2108,7 @@ static int init_status_page(struct intel_engine_cs *ring)
goto err_unref;
flags = 0;
- if (!HAS_LLC(ring->dev))
+ if (!HAS_LLC(engine->dev))
/* On g33, we cannot place HWS above 256MiB, so
* restrict its pinning to the low mappable arena.
* Though this restriction is not documented for
@@ -2024,32 +2127,32 @@ err_unref:
return ret;
}
- ring->status_page.obj = obj;
+ engine->status_page.obj = obj;
}
- ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
- ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
+ engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
+ memset(engine->status_page.page_addr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
- ring->name, ring->status_page.gfx_addr);
+ engine->name, engine->status_page.gfx_addr);
return 0;
}
-static int init_phys_status_page(struct intel_engine_cs *ring)
+static int init_phys_status_page(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
- drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
+ drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(engine->status_page.page_addr, 0, PAGE_SIZE);
return 0;
}
@@ -2057,7 +2160,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
{
if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
- vunmap(ringbuf->virtual_start);
+ i915_gem_object_unpin_map(ringbuf->obj);
else
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
@@ -2065,34 +2168,15 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
-static u32 *vmap_obj(struct drm_i915_gem_object *obj)
-{
- struct sg_page_iter sg_iter;
- struct page **pages;
- void *addr;
- int i;
-
- pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
- if (pages == NULL)
- return NULL;
-
- i = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
- pages[i++] = sg_page_iter_page(&sg_iter);
-
- addr = vmap(pages, i, 0, PAGE_KERNEL);
- drm_free_large(pages);
-
- return addr;
-}
-
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
+ void *addr;
int ret;
if (HAS_LLC(dev_priv) && !obj->stolen) {
@@ -2101,15 +2185,13 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return ret;
ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret) {
- i915_gem_object_ggtt_unpin(obj);
- return ret;
- }
+ if (ret)
+ goto err_unpin;
- ringbuf->virtual_start = vmap_obj(obj);
- if (ringbuf->virtual_start == NULL) {
- i915_gem_object_ggtt_unpin(obj);
- return -ENOMEM;
+ addr = i915_gem_object_pin_map(obj);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err_unpin;
}
} else {
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
@@ -2118,25 +2200,27 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
return ret;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret) {
- i915_gem_object_ggtt_unpin(obj);
- return ret;
- }
+ if (ret)
+ goto err_unpin;
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
- ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
- i915_gem_obj_ggtt_offset(obj), ringbuf->size);
- if (ringbuf->virtual_start == NULL) {
- i915_gem_object_ggtt_unpin(obj);
- return -EINVAL;
+ addr = ioremap_wc(ggtt->mappable_base +
+ i915_gem_obj_ggtt_offset(obj), ringbuf->size);
+ if (addr == NULL) {
+ ret = -ENOMEM;
+ goto err_unpin;
}
}
+ ringbuf->virtual_start = addr;
ringbuf->vma = i915_gem_obj_to_ggtt(obj);
-
return 0;
+
+err_unpin:
+ i915_gem_object_ggtt_unpin(obj);
+ return ret;
}
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
@@ -2179,7 +2263,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
return ERR_PTR(-ENOMEM);
}
- ring->ring = engine;
+ ring->engine = engine;
list_add(&ring->link, &engine->buffers);
ring->size = size;
@@ -2215,37 +2299,38 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
}
static int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *engine)
{
struct intel_ringbuffer *ringbuf;
int ret;
- WARN_ON(ring->buffer);
+ WARN_ON(engine->buffer);
- ring->dev = dev;
- INIT_LIST_HEAD(&ring->active_list);
- INIT_LIST_HEAD(&ring->request_list);
- INIT_LIST_HEAD(&ring->execlist_queue);
- INIT_LIST_HEAD(&ring->buffers);
- i915_gem_batch_pool_init(dev, &ring->batch_pool);
- memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
+ engine->dev = dev;
+ INIT_LIST_HEAD(&engine->active_list);
+ INIT_LIST_HEAD(&engine->request_list);
+ INIT_LIST_HEAD(&engine->execlist_queue);
+ INIT_LIST_HEAD(&engine->buffers);
+ i915_gem_batch_pool_init(dev, &engine->batch_pool);
+ memset(engine->semaphore.sync_seqno, 0,
+ sizeof(engine->semaphore.sync_seqno));
- init_waitqueue_head(&ring->irq_queue);
+ init_waitqueue_head(&engine->irq_queue);
- ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
+ ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf);
goto error;
}
- ring->buffer = ringbuf;
+ engine->buffer = ringbuf;
if (I915_NEED_GFX_HWS(dev)) {
- ret = init_status_page(ring);
+ ret = init_status_page(engine);
if (ret)
goto error;
} else {
- WARN_ON(ring->id != RCS);
- ret = init_phys_status_page(ring);
+ WARN_ON(engine->id != RCS);
+ ret = init_phys_status_page(engine);
if (ret)
goto error;
}
@@ -2253,122 +2338,76 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
- ring->name, ret);
+ engine->name, ret);
intel_destroy_ringbuffer_obj(ringbuf);
goto error;
}
- ret = i915_cmd_parser_init_ring(ring);
+ ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
return 0;
error:
- intel_cleanup_ring_buffer(ring);
+ intel_cleanup_engine(engine);
return ret;
}
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+void intel_cleanup_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv;
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- dev_priv = to_i915(ring->dev);
+ dev_priv = to_i915(engine->dev);
- if (ring->buffer) {
- intel_stop_ring_buffer(ring);
- WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
+ if (engine->buffer) {
+ intel_stop_engine(engine);
+ WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
- intel_unpin_ringbuffer_obj(ring->buffer);
- intel_ringbuffer_free(ring->buffer);
- ring->buffer = NULL;
+ intel_unpin_ringbuffer_obj(engine->buffer);
+ intel_ringbuffer_free(engine->buffer);
+ engine->buffer = NULL;
}
- if (ring->cleanup)
- ring->cleanup(ring);
+ if (engine->cleanup)
+ engine->cleanup(engine);
- if (I915_NEED_GFX_HWS(ring->dev)) {
- cleanup_status_page(ring);
+ if (I915_NEED_GFX_HWS(engine->dev)) {
+ cleanup_status_page(engine);
} else {
- WARN_ON(ring->id != RCS);
- cleanup_phys_status_page(ring);
- }
-
- i915_cmd_parser_fini_ring(ring);
- i915_gem_batch_pool_fini(&ring->batch_pool);
- ring->dev = NULL;
-}
-
-static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
-{
- struct intel_ringbuffer *ringbuf = ring->buffer;
- struct drm_i915_gem_request *request;
- unsigned space;
- int ret;
-
- if (intel_ring_space(ringbuf) >= n)
- return 0;
-
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
- list_for_each_entry(request, &ring->request_list, list) {
- space = __intel_ring_space(request->postfix, ringbuf->tail,
- ringbuf->size);
- if (space >= n)
- break;
+ WARN_ON(engine->id != RCS);
+ cleanup_phys_status_page(engine);
}
- if (WARN_ON(&request->list == &ring->request_list))
- return -ENOSPC;
-
- ret = i915_wait_request(request);
- if (ret)
- return ret;
-
- ringbuf->space = space;
- return 0;
-}
-
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
+ i915_cmd_parser_fini_ring(engine);
+ i915_gem_batch_pool_fini(&engine->batch_pool);
+ engine->dev = NULL;
}
-int intel_ring_idle(struct intel_engine_cs *ring)
+int intel_engine_idle(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req;
/* Wait upon the last request to be completed */
- if (list_empty(&ring->request_list))
+ if (list_empty(&engine->request_list))
return 0;
- req = list_entry(ring->request_list.prev,
- struct drm_i915_gem_request,
- list);
+ req = list_entry(engine->request_list.prev,
+ struct drm_i915_gem_request,
+ list);
/* Make sure we do not trigger any retires */
return __i915_wait_request(req,
- atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
- to_i915(ring->dev)->mm.interruptible,
+ req->i915->mm.interruptible,
NULL, NULL);
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
- request->ringbuf = request->ring->buffer;
+ request->ringbuf = request->engine->buffer;
return 0;
}
@@ -2389,63 +2428,82 @@ int intel_ring_reserve_space(struct drm_i915_gem_request *request)
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
{
- WARN_ON(ringbuf->reserved_size);
- WARN_ON(ringbuf->reserved_in_use);
-
+ GEM_BUG_ON(ringbuf->reserved_size);
ringbuf->reserved_size = size;
}
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(ringbuf->reserved_in_use);
-
+ GEM_BUG_ON(!ringbuf->reserved_size);
ringbuf->reserved_size = 0;
- ringbuf->reserved_in_use = false;
}
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(ringbuf->reserved_in_use);
-
- ringbuf->reserved_in_use = true;
- ringbuf->reserved_tail = ringbuf->tail;
+ GEM_BUG_ON(!ringbuf->reserved_size);
+ ringbuf->reserved_size = 0;
}
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(!ringbuf->reserved_in_use);
- if (ringbuf->tail > ringbuf->reserved_tail) {
- WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
- "request reserved size too small: %d vs %d!\n",
- ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
- } else {
+ GEM_BUG_ON(ringbuf->reserved_size);
+}
+
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
+{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_i915_gem_request *target;
+
+ intel_ring_update_space(ringbuf);
+ if (ringbuf->space >= bytes)
+ return 0;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the request,
+ * as that cannot be allowed to fail. During request finalisation,
+ * reserved_space is set to 0 to stop the overallocation and the
+ * assumption is that then we never need to wait (which has the
+ * risk of failing with EINTR).
+ *
+ * See also i915_gem_request_alloc() and i915_add_request().
+ */
+ GEM_BUG_ON(!ringbuf->reserved_size);
+
+ list_for_each_entry(target, &engine->request_list, list) {
+ unsigned space;
+
/*
- * The ring was wrapped while the reserved space was in use.
- * That means that some unknown amount of the ring tail was
- * no-op filled and skipped. Thus simply adding the ring size
- * to the tail and doing the above space check will not work.
- * Rather than attempt to track how much tail was skipped,
- * it is much simpler to say that also skipping the sanity
- * check every once in a while is not a big issue.
+ * The request queue is per-engine, so can contain requests
+ * from multiple ringbuffers. Here, we must ignore any that
+ * aren't from the ringbuffer we're considering.
*/
+ if (target->ringbuf != ringbuf)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ space = __intel_ring_space(target->postfix, ringbuf->tail,
+ ringbuf->size);
+ if (space >= bytes)
+ break;
}
- ringbuf->reserved_size = 0;
- ringbuf->reserved_in_use = false;
+ if (WARN_ON(&target->list == &engine->request_list))
+ return -ENOSPC;
+
+ return i915_wait_request(target);
}
-static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
+int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
int remain_actual = ringbuf->size - ringbuf->tail;
- int ret, total_bytes, wait_bytes = 0;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int bytes = num_dwords * sizeof(u32);
+ int total_bytes, wait_bytes;
bool need_wrap = false;
- if (ringbuf->reserved_in_use)
- total_bytes = bytes;
- else
- total_bytes = bytes + ringbuf->reserved_size;
+ total_bytes = bytes + ringbuf->reserved_size;
if (unlikely(bytes > remain_usable)) {
/*
@@ -2454,62 +2512,50 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
*/
wait_bytes = remain_actual + total_bytes;
need_wrap = true;
+ } else if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
} else {
- if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + ringbuf->reserved_size;
- } else if (total_bytes > ringbuf->space) {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
- }
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
}
- if (wait_bytes) {
- ret = ring_wait_for_space(ring, wait_bytes);
+ if (wait_bytes > ringbuf->space) {
+ int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
- if (need_wrap)
- __wrap_ring_buffer(ringbuf);
+ intel_ring_update_space(ringbuf);
+ if (unlikely(ringbuf->space < wait_bytes))
+ return -EAGAIN;
}
- return 0;
-}
-
-int intel_ring_begin(struct drm_i915_gem_request *req,
- int num_dwords)
-{
- struct intel_engine_cs *ring;
- struct drm_i915_private *dev_priv;
- int ret;
-
- WARN_ON(req == NULL);
- ring = req->ring;
- dev_priv = ring->dev->dev_private;
-
- ret = i915_gem_check_wedge(&dev_priv->gpu_error,
- dev_priv->mm.interruptible);
- if (ret)
- return ret;
+ if (unlikely(need_wrap)) {
+ GEM_BUG_ON(remain_actual > ringbuf->space);
+ GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
- ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
+ /* Fill the tail with MI_NOOP */
+ memset(ringbuf->virtual_start + ringbuf->tail,
+ 0, remain_actual);
+ ringbuf->tail = 0;
+ ringbuf->space -= remain_actual;
+ }
- ring->buffer->space -= num_dwords * sizeof(uint32_t);
+ ringbuf->space -= bytes;
+ GEM_BUG_ON(ringbuf->space < 0);
return 0;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
- int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ struct intel_engine_cs *engine = req->engine;
+ int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
int ret;
if (num_dwords == 0)
@@ -2521,33 +2567,52 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
return ret;
while (num_dwords--)
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(engine->dev);
- if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
- I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
- I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
- if (HAS_VEBOX(dev))
- I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
+ /* Our semaphore implementation is strictly monotonic (i.e. we proceed
+ * so long as the semaphore value in the register/page is greater
+ * than the sync value), so whenever we reset the seqno,
+ * so long as we reset the tracking semaphore value to 0, it will
+ * always be before the next request's seqno. If we don't reset
+ * the semaphore value, then when the seqno moves backwards all
+ * future waits will complete instantly (causing rendering corruption).
+ */
+ if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) {
+ I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+ if (HAS_VEBOX(dev_priv))
+ I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+ }
+ if (dev_priv->semaphore_obj) {
+ struct drm_i915_gem_object *obj = dev_priv->semaphore_obj;
+ struct page *page = i915_gem_object_get_dirty_page(obj, 0);
+ void *semaphores = kmap(page);
+ memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
+ 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
+ kunmap(page);
}
+ memset(engine->semaphore.sync_seqno, 0,
+ sizeof(engine->semaphore.sync_seqno));
- ring->set_seqno(ring, seqno);
- ring->hangcheck.seqno = seqno;
+ engine->set_seqno(engine, seqno);
+ engine->last_submitted_seqno = seqno;
+
+ engine->hangcheck.seqno = seqno;
}
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
+static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
u32 value)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
/* Every tail move must follow the sequence below */
@@ -2567,8 +2632,8 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
- I915_WRITE_TAIL(ring, value);
- POSTING_READ(RING_TAIL(ring->mmio_base));
+ I915_WRITE_TAIL(engine, value);
+ POSTING_READ(RING_TAIL(engine->mmio_base));
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
@@ -2580,7 +2645,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
uint32_t cmd;
int ret;
@@ -2589,7 +2654,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
return ret;
cmd = MI_FLUSH_DW;
- if (INTEL_INFO(ring->dev)->gen >= 8)
+ if (INTEL_INFO(engine->dev)->gen >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
@@ -2608,16 +2673,17 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
- if (INTEL_INFO(ring->dev)->gen >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ if (INTEL_INFO(engine->dev)->gen >= 8) {
+ intel_ring_emit(engine, 0); /* upper addr */
+ intel_ring_emit(engine, 0); /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
@@ -2626,8 +2692,8 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
- bool ppgtt = USES_PPGTT(ring->dev) &&
+ struct intel_engine_cs *engine = req->engine;
+ bool ppgtt = USES_PPGTT(engine->dev) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
int ret;
@@ -2636,13 +2702,13 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
return ret;
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+ intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, lower_32_bits(offset));
+ intel_ring_emit(engine, upper_32_bits(offset));
+ intel_ring_emit(engine, MI_NOOP);
+ intel_ring_advance(engine);
return 0;
}
@@ -2652,22 +2718,22 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
(dispatch_flags & I915_DISPATCH_RS ?
MI_BATCH_RESOURCE_STREAMER : 0));
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
@@ -2677,20 +2743,20 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned dispatch_flags)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
ret = intel_ring_begin(req, 2);
if (ret)
return ret;
- intel_ring_emit(ring,
+ intel_ring_emit(engine,
MI_BATCH_BUFFER_START |
(dispatch_flags & I915_DISPATCH_SECURE ?
0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ intel_ring_emit(engine, offset);
+ intel_ring_advance(engine);
return 0;
}
@@ -2700,8 +2766,8 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
static int gen6_ring_flush(struct drm_i915_gem_request *req,
u32 invalidate, u32 flush)
{
- struct intel_engine_cs *ring = req->ring;
- struct drm_device *dev = ring->dev;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_device *dev = engine->dev;
uint32_t cmd;
int ret;
@@ -2728,16 +2794,17 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
*/
if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(engine, cmd);
+ intel_ring_emit(engine,
+ I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(dev)->gen >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ intel_ring_emit(engine, 0); /* upper addr */
+ intel_ring_emit(engine, 0); /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(engine, 0);
+ intel_ring_emit(engine, MI_NOOP);
}
- intel_ring_advance(ring);
+ intel_ring_advance(engine);
return 0;
}
@@ -2745,14 +2812,15 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[RCS];
struct drm_i915_gem_object *obj;
int ret;
- ring->name = "render ring";
- ring->id = RCS;
- ring->exec_id = I915_EXEC_RENDER;
- ring->mmio_base = RENDER_RING_BASE;
+ engine->name = "render ring";
+ engine->id = RCS;
+ engine->exec_id = I915_EXEC_RENDER;
+ engine->hw_id = 0;
+ engine->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 8) {
if (i915_semaphore_is_enabled(dev)) {
@@ -2772,34 +2840,36 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
}
}
- ring->init_context = intel_rcs_ctx_init;
- ring->add_request = gen6_add_request;
- ring->flush = gen8_render_ring_flush;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->init_context = intel_rcs_ctx_init;
+ engine->add_request = gen6_add_request;
+ engine->flush = gen8_render_ring_flush;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (i915_semaphore_is_enabled(dev)) {
WARN_ON(!dev_priv->semaphore_obj);
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_rcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_rcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else if (INTEL_INFO(dev)->gen >= 6) {
- ring->init_context = intel_rcs_ctx_init;
- ring->add_request = gen6_add_request;
- ring->flush = gen7_render_ring_flush;
+ engine->init_context = intel_rcs_ctx_init;
+ engine->add_request = gen6_add_request;
+ engine->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
- ring->flush = gen6_render_ring_flush;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->flush = gen6_render_ring_flush;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
@@ -2807,59 +2877,59 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
* initialized as INVALID. Gen8 will initialize the
* sema between VCS2 and RCS later.
*/
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
} else if (IS_GEN5(dev)) {
- ring->add_request = pc_render_add_request;
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = pc_render_get_seqno;
- ring->set_seqno = pc_render_set_seqno;
- ring->irq_get = gen5_ring_get_irq;
- ring->irq_put = gen5_ring_put_irq;
- ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
+ engine->add_request = pc_render_add_request;
+ engine->flush = gen4_render_ring_flush;
+ engine->get_seqno = pc_render_get_seqno;
+ engine->set_seqno = pc_render_set_seqno;
+ engine->irq_get = gen5_ring_get_irq;
+ engine->irq_put = gen5_ring_put_irq;
+ engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
} else {
- ring->add_request = i9xx_add_request;
+ engine->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
- ring->flush = gen2_render_ring_flush;
+ engine->flush = gen2_render_ring_flush;
else
- ring->flush = gen4_render_ring_flush;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->flush = gen4_render_ring_flush;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
- ring->irq_get = i8xx_ring_get_irq;
- ring->irq_put = i8xx_ring_put_irq;
+ engine->irq_get = i8xx_ring_get_irq;
+ engine->irq_put = i8xx_ring_put_irq;
} else {
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
+ engine->irq_get = i9xx_ring_get_irq;
+ engine->irq_put = i9xx_ring_put_irq;
}
- ring->irq_enable_mask = I915_USER_INTERRUPT;
+ engine->irq_enable_mask = I915_USER_INTERRUPT;
}
- ring->write_tail = ring_write_tail;
+ engine->write_tail = ring_write_tail;
if (IS_HASWELL(dev))
- ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 6)
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i965_dispatch_execbuffer;
else if (IS_I830(dev) || IS_845G(dev))
- ring->dispatch_execbuffer = i830_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i830_dispatch_execbuffer;
else
- ring->dispatch_execbuffer = i915_dispatch_execbuffer;
- ring->init_hw = init_render_ring;
- ring->cleanup = render_ring_cleanup;
+ engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+ engine->init_hw = init_render_ring;
+ engine->cleanup = render_ring_cleanup;
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
@@ -2876,16 +2946,16 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return ret;
}
- ring->scratch.obj = obj;
- ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ engine->scratch.obj = obj;
+ engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
}
- ret = intel_init_ring_buffer(dev, ring);
+ ret = intel_init_ring_buffer(dev, engine);
if (ret)
return ret;
if (INTEL_INFO(dev)->gen >= 5) {
- ret = intel_init_pipe_control(ring);
+ ret = intel_init_pipe_control(engine);
if (ret)
return ret;
}
@@ -2896,75 +2966,77 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+ struct intel_engine_cs *engine = &dev_priv->engine[VCS];
- ring->name = "bsd ring";
- ring->id = VCS;
- ring->exec_id = I915_EXEC_BSD;
+ engine->name = "bsd ring";
+ engine->id = VCS;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->hw_id = 1;
- ring->write_tail = ring_write_tail;
+ engine->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
- ring->mmio_base = GEN6_BSD_RING_BASE;
+ engine->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
- ring->write_tail = gen6_bsd_ring_write_tail;
- ring->flush = gen6_bsd_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->write_tail = gen6_bsd_ring_write_tail;
+ engine->flush = gen6_bsd_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
} else {
- ring->mmio_base = BSD_RING_BASE;
- ring->flush = bsd_ring_flush;
- ring->add_request = i9xx_add_request;
- ring->get_seqno = ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->mmio_base = BSD_RING_BASE;
+ engine->flush = bsd_ring_flush;
+ engine->add_request = i9xx_add_request;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
- ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
- ring->irq_get = gen5_ring_get_irq;
- ring->irq_put = gen5_ring_put_irq;
+ engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
+ engine->irq_get = gen5_ring_get_irq;
+ engine->irq_put = gen5_ring_put_irq;
} else {
- ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
- ring->irq_get = i9xx_ring_get_irq;
- ring->irq_put = i9xx_ring_put_irq;
+ engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
+ engine->irq_get = i9xx_ring_get_irq;
+ engine->irq_put = i9xx_ring_put_irq;
}
- ring->dispatch_execbuffer = i965_dispatch_execbuffer;
+ engine->dispatch_execbuffer = i965_dispatch_execbuffer;
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
/**
@@ -2973,68 +3045,72 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
int intel_init_bsd2_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
-
- ring->name = "bsd2 ring";
- ring->id = VCS2;
- ring->exec_id = I915_EXEC_BSD;
-
- ring->write_tail = ring_write_tail;
- ring->mmio_base = GEN8_BSD2_RING_BASE;
- ring->flush = gen6_bsd_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask =
+ struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
+
+ engine->name = "bsd2 ring";
+ engine->id = VCS2;
+ engine->exec_id = I915_EXEC_BSD;
+ engine->hw_id = 4;
+
+ engine->write_tail = ring_write_tail;
+ engine->mmio_base = GEN8_BSD2_RING_BASE;
+ engine->flush = gen6_bsd_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer =
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[BCS];
-
- ring->name = "blitter ring";
- ring->id = BCS;
- ring->exec_id = I915_EXEC_BLT;
-
- ring->mmio_base = BLT_RING_BASE;
- ring->write_tail = ring_write_tail;
- ring->flush = gen6_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ struct intel_engine_cs *engine = &dev_priv->engine[BCS];
+
+ engine->name = "blitter ring";
+ engine->id = BCS;
+ engine->exec_id = I915_EXEC_BLT;
+ engine->hw_id = 2;
+
+ engine->mmio_base = BLT_RING_BASE;
+ engine->write_tail = ring_write_tail;
+ engine->flush = gen6_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ engine->irq_get = gen6_ring_get_irq;
+ engine->irq_put = gen6_ring_put_irq;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.sync_to = gen6_ring_sync;
/*
* The current semaphore is only applied on pre-gen8
* platform. And there is no VCS2 ring on the pre-gen8
@@ -3042,127 +3118,129 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
* initialized as INVALID. Gen8 will initialize the
* sema between BCS and VCS2 later.
*/
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int intel_init_vebox_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+ struct intel_engine_cs *engine = &dev_priv->engine[VECS];
- ring->name = "video enhancement ring";
- ring->id = VECS;
- ring->exec_id = I915_EXEC_VEBOX;
+ engine->name = "video enhancement ring";
+ engine->id = VECS;
+ engine->exec_id = I915_EXEC_VEBOX;
+ engine->hw_id = 3;
- ring->mmio_base = VEBOX_RING_BASE;
- ring->write_tail = ring_write_tail;
- ring->flush = gen6_ring_flush;
- ring->add_request = gen6_add_request;
- ring->get_seqno = gen6_ring_get_seqno;
- ring->set_seqno = ring_set_seqno;
+ engine->mmio_base = VEBOX_RING_BASE;
+ engine->write_tail = ring_write_tail;
+ engine->flush = gen6_ring_flush;
+ engine->add_request = gen6_add_request;
+ engine->irq_seqno_barrier = gen6_seqno_barrier;
+ engine->get_seqno = ring_get_seqno;
+ engine->set_seqno = ring_set_seqno;
if (INTEL_INFO(dev)->gen >= 8) {
- ring->irq_enable_mask =
+ engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ engine->irq_get = gen8_ring_get_irq;
+ engine->irq_put = gen8_ring_put_irq;
+ engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen8_ring_sync;
- ring->semaphore.signal = gen8_xcs_signal;
- GEN8_RING_SEMAPHORE_INIT;
+ engine->semaphore.sync_to = gen8_ring_sync;
+ engine->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT(engine);
}
} else {
- ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- ring->irq_get = hsw_vebox_get_irq;
- ring->irq_put = hsw_vebox_put_irq;
- ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
+ engine->irq_get = hsw_vebox_get_irq;
+ engine->irq_put = hsw_vebox_put_irq;
+ engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev)) {
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ engine->semaphore.sync_to = gen6_ring_sync;
+ engine->semaphore.signal = gen6_signal;
+ engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
}
}
- ring->init_hw = init_ring_common;
+ engine->init_hw = init_ring_common;
- return intel_init_ring_buffer(dev, ring);
+ return intel_init_ring_buffer(dev, engine);
}
int
intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
int ret;
- if (!ring->gpu_caches_dirty)
+ if (!engine->gpu_caches_dirty)
return 0;
- ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+ ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
int
intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *ring = req->ring;
+ struct intel_engine_cs *engine = req->engine;
uint32_t flush_domains;
int ret;
flush_domains = 0;
- if (ring->gpu_caches_dirty)
+ if (engine->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
- ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+ ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
- ring->gpu_caches_dirty = false;
+ engine->gpu_caches_dirty = false;
return 0;
}
void
-intel_stop_ring_buffer(struct intel_engine_cs *ring)
+intel_stop_engine(struct intel_engine_cs *engine)
{
int ret;
- if (!intel_ring_initialized(ring))
+ if (!intel_engine_initialized(engine))
return;
- ret = intel_ring_idle(ring);
- if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error))
+ ret = intel_engine_idle(engine);
+ if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
- ring->name, ret);
+ engine->name, ret);
- stop_ring(ring);
+ stop_ring(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 566b0ae10..ff126485d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -52,34 +52,32 @@ struct intel_hw_status_page {
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
*/
-#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define gen8_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SEMAPHORE_OFFSET(__from, __to) \
+ (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size)
#define GEN8_SIGNAL_OFFSET(__ring, to) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
- ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
- (i915_semaphore_seqno_size * (to)))
-
+ GEN8_SEMAPHORE_OFFSET((__ring)->id, (to)))
#define GEN8_WAIT_OFFSET(__ring, from) \
(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
- ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
- (i915_semaphore_seqno_size * (__ring)->id))
+ GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
-#define GEN8_RING_SEMAPHORE_INIT do { \
+#define GEN8_RING_SEMAPHORE_INIT(e) do { \
if (!dev_priv->semaphore_obj) { \
break; \
} \
- ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
- ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
- ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
- ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
- ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
- ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+ (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
+ (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
+ (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
+ (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
+ (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
+ (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
} while(0)
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
- HANGCHECK_ACTIVE_LOOP,
HANGCHECK_KICK,
HANGCHECK_HUNG,
};
@@ -88,8 +86,8 @@ enum intel_ring_hangcheck_action {
struct intel_ring_hangcheck {
u64 acthd;
- u64 max_acthd;
u32 seqno;
+ unsigned user_interrupts;
int score;
enum intel_ring_hangcheck_action action;
int deadlock;
@@ -101,7 +99,7 @@ struct intel_ringbuffer {
void __iomem *virtual_start;
struct i915_vma *vma;
- struct intel_engine_cs *ring;
+ struct intel_engine_cs *engine;
struct list_head link;
u32 head;
@@ -110,8 +108,6 @@ struct intel_ringbuffer {
int size;
int effective_size;
int reserved_size;
- int reserved_tail;
- bool reserved_in_use;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -125,7 +121,7 @@ struct intel_ringbuffer {
};
struct intel_context;
-struct drm_i915_reg_descriptor;
+struct drm_i915_reg_table;
/*
* we use a single page to load ctx workarounds so all of these
@@ -148,17 +144,18 @@ struct i915_ctx_workarounds {
struct intel_engine_cs {
const char *name;
- enum intel_ring_id {
+ enum intel_engine_id {
RCS = 0,
BCS,
VCS,
VCS2, /* Keep instances of the same type engine together. */
VECS
} id;
-#define I915_NUM_RINGS 5
+#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
- unsigned int guc_id;
+ unsigned int hw_id;
+ unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
@@ -196,8 +193,8 @@ struct intel_engine_cs {
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
- u32 (*get_seqno)(struct intel_engine_cs *ring,
- bool lazy_coherency);
+ void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
+ u32 (*get_seqno)(struct intel_engine_cs *ring);
void (*set_seqno)(struct intel_engine_cs *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
@@ -246,16 +243,16 @@ struct intel_engine_cs {
* ie. transpose of f(x, y)
*/
struct {
- u32 sync_seqno[I915_NUM_RINGS-1];
+ u32 sync_seqno[I915_NUM_ENGINES-1];
union {
struct {
/* our mbox written by others */
- u32 wait[I915_NUM_RINGS];
+ u32 wait[I915_NUM_ENGINES];
/* mboxes this ring signals to */
- i915_reg_t signal[I915_NUM_RINGS];
+ i915_reg_t signal[I915_NUM_ENGINES];
} mbox;
- u64 signal_ggtt[I915_NUM_RINGS];
+ u64 signal_ggtt[I915_NUM_ENGINES];
};
/* AKA wait() */
@@ -268,10 +265,13 @@ struct intel_engine_cs {
} semaphore;
/* Execlists */
- spinlock_t execlist_lock;
+ struct tasklet_struct irq_tasklet;
+ spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
- u8 next_context_status_buffer;
+ unsigned int fw_domains;
+ unsigned int next_context_status_buffer;
+ unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
@@ -306,6 +306,7 @@ struct intel_engine_cs {
* inspecting request list.
*/
u32 last_submitted_seqno;
+ unsigned user_interrupts;
bool gpu_caches_dirty;
@@ -332,15 +333,8 @@ struct intel_engine_cs {
/*
* Table of registers allowed in commands that read/write registers.
*/
- const struct drm_i915_reg_descriptor *reg_table;
- int reg_count;
-
- /*
- * Table of registers allowed in commands that read/write registers, but
- * only from the DRM master.
- */
- const struct drm_i915_reg_descriptor *master_reg_table;
- int master_reg_count;
+ const struct drm_i915_reg_table *reg_tables;
+ int reg_table_count;
/*
* Returns the bitmask for the length field of the specified command.
@@ -356,19 +350,19 @@ struct intel_engine_cs {
};
static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
+intel_engine_initialized(struct intel_engine_cs *engine)
{
- return ring->dev != NULL;
+ return engine->dev != NULL;
}
static inline unsigned
-intel_ring_flag(struct intel_engine_cs *ring)
+intel_engine_flag(struct intel_engine_cs *engine)
{
- return 1 << ring->id;
+ return 1 << engine->id;
}
static inline u32
-intel_ring_sync_index(struct intel_engine_cs *ring,
+intel_ring_sync_index(struct intel_engine_cs *engine,
struct intel_engine_cs *other)
{
int idx;
@@ -381,34 +375,33 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
* vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
- idx = (other - ring) - 1;
+ idx = (other - engine) - 1;
if (idx < 0)
- idx += I915_NUM_RINGS;
+ idx += I915_NUM_ENGINES;
return idx;
}
static inline void
-intel_flush_status_page(struct intel_engine_cs *ring, int reg)
+intel_flush_status_page(struct intel_engine_cs *engine, int reg)
{
- drm_clflush_virt_range(&ring->status_page.page_addr[reg],
- sizeof(uint32_t));
+ mb();
+ clflush(&engine->status_page.page_addr[reg]);
+ mb();
}
static inline u32
-intel_read_status_page(struct intel_engine_cs *ring,
- int reg)
+intel_read_status_page(struct intel_engine_cs *engine, int reg)
{
/* Ensure that the compiler doesn't optimize away the load. */
- barrier();
- return ring->status_page.page_addr[reg];
+ return READ_ONCE(engine->status_page.page_addr[reg]);
}
static inline void
-intel_write_status_page(struct intel_engine_cs *ring,
+intel_write_status_page(struct intel_engine_cs *engine,
int reg, u32 value)
{
- ring->status_page.page_addr[reg] = value;
+ engine->status_page.page_addr[reg] = value;
}
/*
@@ -439,42 +432,41 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
void intel_ringbuffer_free(struct intel_ringbuffer *ring);
-void intel_stop_ring_buffer(struct intel_engine_cs *ring);
-void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
+void intel_stop_engine(struct intel_engine_cs *engine);
+void intel_cleanup_engine(struct intel_engine_cs *engine);
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
+static inline void intel_ring_emit(struct intel_engine_cs *engine,
u32 data)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf = engine->buffer;
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
ringbuf->tail += 4;
}
-static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
+static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
i915_reg_t reg)
{
- intel_ring_emit(ring, i915_mmio_reg_offset(reg));
+ intel_ring_emit(engine, i915_mmio_reg_offset(reg));
}
-static inline void intel_ring_advance(struct intel_engine_cs *ring)
+static inline void intel_ring_advance(struct intel_engine_cs *engine)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
+ struct intel_ringbuffer *ringbuf = engine->buffer;
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-int intel_ring_space(struct intel_ringbuffer *ringbuf);
-bool intel_ring_stopped(struct intel_engine_cs *ring);
+bool intel_engine_stopped(struct intel_engine_cs *engine);
-int __must_check intel_ring_idle(struct intel_engine_cs *ring);
-void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
+int __must_check intel_engine_idle(struct intel_engine_cs *engine);
+void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
-void intel_fini_pipe_control(struct intel_engine_cs *ring);
-int intel_init_pipe_control(struct intel_engine_cs *ring);
+void intel_fini_pipe_control(struct intel_engine_cs *engine);
+int intel_init_pipe_control(struct intel_engine_cs *engine);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -482,9 +474,9 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
int intel_init_vebox_ring_buffer(struct drm_device *dev);
-u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
+u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
-int init_workarounds_ring(struct intel_engine_cs *ring);
+int init_workarounds_ring(struct intel_engine_cs *engine);
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 6e54d978d..7fb1da4e7 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -89,6 +89,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_DSI_A:
+ return "TRANSCODER_DSI_A";
+ case POWER_DOMAIN_TRANSCODER_DSI_C:
+ return "TRANSCODER_DSI_C";
case POWER_DOMAIN_PORT_DDI_A_LANES:
return "PORT_DDI_A_LANES";
case POWER_DOMAIN_PORT_DDI_B_LANES:
@@ -393,11 +397,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~( \
- SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \
- BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
@@ -415,36 +414,21 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_PLLS) | \
- BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
-#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
- BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
- BIT(POWER_DOMAIN_INIT))
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
- WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
- WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
- WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
+ WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
+ WARN_ONCE(intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
/*
* TODO: check for the following to verify the conditions to enter DC9
@@ -457,11 +441,10 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
{
- WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
- WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be disabled.\n");
- WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
+ WARN_ONCE(intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
/*
* TODO: check for the following to verify DC9 state was indeed
@@ -472,24 +455,6 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
*/
}
-static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
-{
- uint32_t val, mask;
-
- mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
-
- if (IS_BROXTON(dev_priv))
- mask |= DC_STATE_DEBUG_MASK_CORES;
-
- /* The below bit doesn't need to be cleared ever afterwards */
- val = I915_READ(DC_STATE_DEBUG);
- if ((val & mask) != mask) {
- val |= mask;
- I915_WRITE(DC_STATE_DEBUG, val);
- POSTING_READ(DC_STATE_DEBUG);
- }
-}
-
static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
u32 state)
{
@@ -527,10 +492,9 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
state, rewrites);
}
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
{
- uint32_t val;
- uint32_t mask;
+ u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
if (IS_BROXTON(dev_priv))
@@ -538,14 +502,30 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
else
mask |= DC_STATE_EN_UPTO_DC6;
- WARN_ON_ONCE(state & ~mask);
+ return mask;
+}
+
+void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
+{
+ u32 val;
- if (i915.enable_dc == 0)
- state = DC_STATE_DISABLE;
- else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5)
- state = DC_STATE_EN_UPTO_DC5;
+ val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+
+ DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->csr.dc_state, val);
+ dev_priv->csr.dc_state = val;
+}
+
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+{
+ uint32_t val;
+ uint32_t mask;
+
+ if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+ state &= dev_priv->csr.allowed_dc_mask;
val = I915_READ(DC_STATE_EN);
+ mask = gen9_dc_mask(dev_priv);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
@@ -590,13 +570,9 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv)
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
- WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
- "Platform doesn't support DC5.\n");
- WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
@@ -606,19 +582,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
-{
- /*
- * During initialization, the firmware may not be loaded yet.
- * We still want to make sure that the DC enabling flag is cleared.
- */
- if (dev_priv->power_domains.initializing)
- return;
-
- assert_rpm_wakelock_held(dev_priv);
-}
-
-static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
@@ -629,11 +593,6 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
-
- WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
- "Platform doesn't support DC6.\n");
- WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
@@ -642,47 +601,60 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
- /*
- * During initialization, the firmware may not be loaded yet.
- * We still want to make sure that the DC enabling flag is cleared.
- */
- if (dev_priv->power_domains.initializing)
- return;
+ assert_can_enable_dc6(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC6\n");
+
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be disabled.\n");
}
-static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
+void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
- assert_can_disable_dc5(dev_priv);
-
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
- i915.enable_dc != 0 && i915.enable_dc != 1)
- assert_can_disable_dc6(dev_priv);
+ DRM_DEBUG_KMS("Disabling DC6\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
-void skl_enable_dc6(struct drm_i915_private *dev_priv)
+static void
+gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
{
- assert_can_enable_dc6(dev_priv);
+ enum skl_disp_power_wells power_well_id = power_well->data;
+ u32 val;
+ u32 mask;
- DRM_DEBUG_KMS("Enabling DC6\n");
+ mask = SKL_POWER_WELL_REQ(power_well_id);
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ val = I915_READ(HSW_PWR_WELL_KVMR);
+ if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
+ power_well->name))
+ I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
-}
+ val = I915_READ(HSW_PWR_WELL_BIOS);
+ val |= I915_READ(HSW_PWR_WELL_DEBUG);
-void skl_disable_dc6(struct drm_i915_private *dev_priv)
-{
- assert_can_disable_dc6(dev_priv);
+ if (!(val & mask))
+ return;
- DRM_DEBUG_KMS("Disabling DC6\n");
+ /*
+ * DMC is known to force on the request bits for power well 1 on SKL
+ * and BXT and the misc IO power well on SKL but we don't expect any
+ * other request bits to be set, so WARN for those.
+ */
+ if (power_well_id == SKL_DISP_PW_1 ||
+ ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ power_well_id == SKL_DISP_PW_MISC_IO))
+ DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
+ "by DMC\n", power_well->name);
+ else
+ WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
+ power_well->name);
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+ I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
+ I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
}
static void skl_set_power_well(struct drm_i915_private *dev_priv,
@@ -739,10 +711,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
- if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- state_mask), 1))
- DRM_ERROR("%s enable timeout\n",
- power_well->name);
check_fuse_status = true;
}
} else {
@@ -751,8 +719,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
}
+
+ if (IS_GEN9(dev_priv))
+ gen9_sanitize_power_well_requests(dev_priv, power_well);
}
+ if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
+ 1))
+ DRM_ERROR("%s %s timeout\n",
+ power_well->name, enable ? "enable" : "disable");
+
if (check_fuse_status) {
if (power_well->data == SKL_DISP_PW_1) {
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
@@ -833,32 +809,33 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- gen9_disable_dc5_dc6(dev_priv);
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ if (IS_BROXTON(dev_priv)) {
+ broxton_cdclk_verify_state(dev_priv);
+ broxton_ddi_phy_verify_state(dev_priv);
+ }
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
- i915.enable_dc != 0 && i915.enable_dc != 1)
+ if (!dev_priv->csr.dmc_payload)
+ return;
+
+ if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(dev_priv);
- else
+ else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
gen9_enable_dc5(dev_priv);
}
static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if (power_well->count > 0) {
- gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- } else {
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
- i915.enable_dc != 0 &&
- i915.enable_dc != 1)
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- else
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
- }
+ if (power_well->count > 0)
+ gen9_dc_off_power_well_enable(dev_priv, power_well);
+ else
+ gen9_dc_off_power_well_disable(dev_priv, power_well);
}
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
@@ -962,6 +939,17 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
return enabled;
}
+static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
+
+ /*
+ * Disable trickle feed and enable pnd deadline calculation
+ */
+ I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ I915_WRITE(CBR1_VLV, 0);
+}
+
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@@ -984,6 +972,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
I915_WRITE(DPLL(pipe), val);
}
+ vlv_init_display_clock_gating(dev_priv);
+
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1622,34 +1612,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
}
-#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
- BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+#define HSW_DISPLAY_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_PLLS) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_AUX_D) | \
- BIT(POWER_DOMAIN_GMBUS) | \
- BIT(POWER_DOMAIN_INIT))
-#define HSW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_INIT))
-#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
- HSW_ALWAYS_ON_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
-#define BDW_DISPLAY_POWER_DOMAINS ( \
- (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
+#define BDW_DISPLAY_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_INIT))
-#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
-#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
+#define VLV_DISPLAY_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DSI) | \
+ BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_GMBUS) | \
+ BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
@@ -1679,6 +1691,28 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
+#define CHV_DISPLAY_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PIPE_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DSI) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUX_D) | \
+ BIT(POWER_DOMAIN_GMBUS) | \
+ BIT(POWER_DOMAIN_INIT))
+
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
@@ -1746,7 +1780,7 @@ static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@@ -1760,7 +1794,7 @@ static struct i915_power_well bdw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@@ -1795,7 +1829,7 @@ static struct i915_power_well vlv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.data = PUNIT_POWER_WELL_ALWAYS_ON,
},
@@ -1853,7 +1887,7 @@ static struct i915_power_well chv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@@ -1863,7 +1897,7 @@ static struct i915_power_well chv_power_wells[] = {
* power wells don't actually exist. Pipe A power well is
* required for any pipe to work.
*/
- .domains = VLV_DISPLAY_POWER_DOMAINS,
+ .domains = CHV_DISPLAY_POWER_DOMAINS,
.data = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
@@ -1897,7 +1931,7 @@ static struct i915_power_well skl_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.data = SKL_DISP_PW_ALWAYS_ON,
},
@@ -1953,44 +1987,16 @@ static struct i915_power_well skl_power_wells[] = {
},
};
-void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *well;
-
- if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
- return;
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_enable(dev_priv, well);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_enable(dev_priv, well);
-}
-
-void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
-{
- struct i915_power_well *well;
-
- if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
- return;
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
- intel_power_well_disable(dev_priv, well);
-
- well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
- intel_power_well_disable(dev_priv, well);
-}
-
static struct i915_power_well bxt_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
- .domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
+ .domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "power well 1",
- .domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
+ .domains = 0,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_1,
},
@@ -2015,12 +2021,56 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
if (disable_power_well >= 0)
return !!disable_power_well;
- if (IS_BROXTON(dev_priv)) {
- DRM_DEBUG_KMS("Disabling display power well support\n");
- return 0;
+ return 1;
+}
+
+static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+ int enable_dc)
+{
+ uint32_t mask;
+ int requested_dc;
+ int max_dc;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ max_dc = 2;
+ mask = 0;
+ } else if (IS_BROXTON(dev_priv)) {
+ max_dc = 1;
+ /*
+ * DC9 has a separate HW flow from the rest of the DC states,
+ * not depending on the DMC firmware. It's needed by system
+ * suspend/resume, so allow it unconditionally.
+ */
+ mask = DC_STATE_EN_DC9;
+ } else {
+ max_dc = 0;
+ mask = 0;
}
- return 1;
+ if (!i915.disable_power_well)
+ max_dc = 0;
+
+ if (enable_dc >= 0 && enable_dc <= max_dc) {
+ requested_dc = enable_dc;
+ } else if (enable_dc == -1) {
+ requested_dc = max_dc;
+ } else if (enable_dc > max_dc && enable_dc <= 2) {
+ DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
+ enable_dc, max_dc);
+ requested_dc = max_dc;
+ } else {
+ DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+ requested_dc = max_dc;
+ }
+
+ if (requested_dc > 1)
+ mask |= DC_STATE_EN_UPTO_DC6;
+ if (requested_dc > 0)
+ mask |= DC_STATE_EN_UPTO_DC5;
+
+ DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+
+ return mask;
}
#define set_power_wells(power_domains, __power_wells) ({ \
@@ -2041,6 +2091,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
i915.disable_power_well);
+ dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
+ i915.enable_dc);
BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
@@ -2050,17 +2102,17 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_HASWELL(dev_priv->dev)) {
+ if (IS_HASWELL(dev_priv)) {
set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_BROADWELL(dev_priv->dev)) {
+ } else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) {
+ } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_power_wells(power_domains, skl_power_wells);
- } else if (IS_BROXTON(dev_priv->dev)) {
+ } else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
- } else if (IS_CHERRYVIEW(dev_priv->dev)) {
+ } else if (IS_CHERRYVIEW(dev_priv)) {
set_power_wells(power_domains, chv_power_wells);
- } else if (IS_VALLEYVIEW(dev_priv->dev)) {
+ } else if (IS_VALLEYVIEW(dev_priv)) {
set_power_wells(power_domains, vlv_power_wells);
} else {
set_power_wells(power_domains, i9xx_always_on_power_well);
@@ -2120,9 +2172,10 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+ bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -2133,7 +2186,13 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
- skl_pw1_misc_io_init(dev_priv);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+ intel_power_well_enable(dev_priv, well);
+
mutex_unlock(&power_domains->lock);
if (!resume)
@@ -2141,13 +2200,14 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
skl_init_cdclk(dev_priv);
- if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv))
- gen9_set_dc_state_debugmask(dev_priv);
+ if (dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
}
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -2155,8 +2215,73 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
+
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
+ intel_power_well_disable(dev_priv, well);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+void bxt_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+ uint32_t val;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ /*
+ * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
+ * or else the reset will hang because there is no PCH to respond.
+ * Move the handshake programming to initialization sequence.
+ * Previously was left up to BIOS.
+ */
+ val = I915_READ(HSW_NDE_RSTWRN_OPT);
+ val &= ~RESET_PCH_HANDSHAKE_ENABLE;
+ I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+
+ /* Enable PG1 */
+ mutex_lock(&power_domains->lock);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_enable(dev_priv, well);
+
+ mutex_unlock(&power_domains->lock);
+
+ broxton_init_cdclk(dev_priv);
+ broxton_ddi_phy_init(dev_priv);
+
+ broxton_cdclk_verify_state(dev_priv);
+ broxton_ddi_phy_verify_state(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
+}
+
+void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *well;
+
+ gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
+
+ broxton_ddi_phy_uninit(dev_priv);
+ broxton_uninit_cdclk(dev_priv);
+
+ /* The spec doesn't call for removing the reset handshake flag */
+
+ /* Disable PG1 */
mutex_lock(&power_domains->lock);
- skl_pw1_misc_io_fini(dev_priv);
+
+ well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
+ intel_power_well_disable(dev_priv, well);
+
mutex_unlock(&power_domains->lock);
}
@@ -2291,6 +2416,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
skl_display_core_init(dev_priv, resume);
+ } else if (IS_BROXTON(dev)) {
+ bxt_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
@@ -2328,6 +2455,8 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
+ else if (IS_BROXTON(dev_priv))
+ bxt_display_core_uninit(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4ecc076c4..2128fae56 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1398,12 +1398,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
dotclock = pipe_config->port_clock;
+
if (pipe_config->pixel_multiplier)
dotclock /= pipe_config->pixel_multiplier;
- if (HAS_PCH_SPLIT(dev))
- ironlake_check_encoder_dotclock(pipe_config, dotclock);
-
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
/* Cross check the port pixel multiplier with the sdvo encoder state. */
@@ -2262,9 +2260,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
struct sdvo_device_mapping *mapping;
if (sdvo->port == PORT_B)
- mapping = &(dev_priv->sdvo_mappings[0]);
+ mapping = &dev_priv->vbt.sdvo_mappings[0];
else
- mapping = &(dev_priv->sdvo_mappings[1]);
+ mapping = &dev_priv->vbt.sdvo_mappings[1];
if (mapping->initialized)
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
@@ -2280,9 +2278,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
u8 pin;
if (sdvo->port == PORT_B)
- mapping = &dev_priv->sdvo_mappings[0];
+ mapping = &dev_priv->vbt.sdvo_mappings[0];
else
- mapping = &dev_priv->sdvo_mappings[1];
+ mapping = &dev_priv->vbt.sdvo_mappings[1];
if (mapping->initialized &&
intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin))
@@ -2318,11 +2316,11 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
- my_mapping = &dev_priv->sdvo_mappings[0];
- other_mapping = &dev_priv->sdvo_mappings[1];
+ my_mapping = &dev_priv->vbt.sdvo_mappings[0];
+ other_mapping = &dev_priv->vbt.sdvo_mappings[1];
} else {
- my_mapping = &dev_priv->sdvo_mappings[1];
- other_mapping = &dev_priv->sdvo_mappings[0];
+ my_mapping = &dev_priv->vbt.sdvo_mappings[1];
+ other_mapping = &dev_priv->vbt.sdvo_mappings[0];
}
/* If the BIOS described our SDVO device, take advantage of it. */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a2582c455..0f3e2303e 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -193,7 +193,7 @@ skl_update_plane(struct drm_plane *drm_plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr;
u32 tile_height, plane_offset, plane_size;
- unsigned int rotation;
+ unsigned int rotation = plane_state->base.rotation;
int x_offset, y_offset;
int crtc_x = plane_state->dst.x1;
int crtc_y = plane_state->dst.y1;
@@ -213,7 +213,6 @@ skl_update_plane(struct drm_plane *drm_plane,
plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
- rotation = plane_state->base.rotation;
plane_ctl |= skl_plane_ctl_rotation(rotation);
stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
@@ -351,6 +350,7 @@ vlv_update_plane(struct drm_plane *dplane,
int plane = intel_plane->plane;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
+ unsigned int rotation = dplane->state->rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
@@ -423,12 +423,11 @@ vlv_update_plane(struct drm_plane *dplane,
crtc_h--;
linear_offset = y * fb->pitches[0] + x * cpp;
- sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= sprsurf_offset;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SP_ROTATE_180;
x += src_w;
@@ -493,6 +492,7 @@ ivb_update_plane(struct drm_plane *plane,
enum pipe pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
u32 sprsurf_offset, linear_offset;
+ unsigned int rotation = plane_state->base.rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
@@ -556,12 +556,11 @@ ivb_update_plane(struct drm_plane *plane,
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = y * fb->pitches[0] + x * cpp;
- sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= sprsurf_offset;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
sprctl |= SPRITE_ROTATE_180;
/* HSW and BDW does this automagically in hardware */
@@ -634,6 +633,7 @@ ilk_update_plane(struct drm_plane *plane,
int pipe = intel_plane->pipe;
u32 dvscntr, dvsscale;
u32 dvssurf_offset, linear_offset;
+ unsigned int rotation = plane_state->base.rotation;
int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->dst.x1;
@@ -693,12 +693,11 @@ ilk_update_plane(struct drm_plane *plane,
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
linear_offset = y * fb->pitches[0] + x * cpp;
- dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
- fb->modifier[0], cpp,
- fb->pitches[0]);
+ dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0,
+ fb->pitches[0], rotation);
linear_offset -= dvssurf_offset;
- if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+ if (rotation == BIT(DRM_ROTATE_180)) {
dvscntr |= DVS_ROTATE_180;
x += src_w;
@@ -1026,8 +1025,8 @@ static uint32_t skl_plane_formats[] = {
int
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
{
- struct intel_plane *intel_plane;
- struct intel_plane_state *state;
+ struct intel_plane *intel_plane = NULL;
+ struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
@@ -1037,13 +1036,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
return -ENODEV;
intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
- if (!intel_plane)
- return -ENOMEM;
+ if (!intel_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
state = intel_create_plane_state(&intel_plane->base);
if (!state) {
- kfree(intel_plane);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
intel_plane->base.state = &state->base;
@@ -1098,28 +1099,34 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
break;
default:
- kfree(intel_plane);
- return -ENODEV;
+ MISSING_CASE(INTEL_INFO(dev)->gen);
+ ret = -ENODEV;
+ goto fail;
}
intel_plane->pipe = pipe;
intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
+
possible_crtcs = (1 << pipe);
+
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs,
plane_formats, num_plane_formats,
DRM_PLANE_TYPE_OVERLAY, NULL);
- if (ret) {
- kfree(intel_plane);
- goto out;
- }
+ if (ret)
+ goto fail;
intel_create_rotation_property(dev, intel_plane);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
-out:
+ return 0;
+
+fail:
+ kfree(state);
+ kfree(intel_plane);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6745bad5b..223129d3c 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -326,24 +326,12 @@ static const struct color_conversion sdtv_csc_yprpb = {
.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
};
-static const struct color_conversion sdtv_csc_rgb = {
- .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
- .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
- .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
-};
-
static const struct color_conversion hdtv_csc_yprpb = {
.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
};
-static const struct color_conversion hdtv_csc_rgb = {
- .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166,
- .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166,
- .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166,
-};
-
static const struct video_levels component_levels = {
.blank = 279, .black = 279, .burst = 0,
};
@@ -1531,47 +1519,6 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-/*
- * Enumerate the child dev array parsed from VBT to check whether
- * the integrated TV is present.
- * If it is present, return 1.
- * If it is not present, return false.
- * If no child dev is parsed from VBT, it assumes that the TV is present.
- */
-static int tv_is_present_in_vbt(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- union child_device_config *p_child;
- int i, ret;
-
- if (!dev_priv->vbt.child_dev_num)
- return 1;
-
- ret = 0;
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
- /*
- * If the device type is not TV, continue.
- */
- switch (p_child->old.device_type) {
- case DEVICE_TYPE_INT_TV:
- case DEVICE_TYPE_TV:
- case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
- break;
- default:
- continue;
- }
- /* Only when the addin_offset is non-zero, it is regarded
- * as present.
- */
- if (p_child->old.addin_offset) {
- ret = 1;
- break;
- }
- }
- return ret;
-}
-
void
intel_tv_init(struct drm_device *dev)
{
@@ -1587,13 +1534,10 @@ intel_tv_init(struct drm_device *dev)
if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
- if (!tv_is_present_in_vbt(dev)) {
+ if (!intel_bios_is_tv_present(dev_priv)) {
DRM_DEBUG_KMS("Integrated TV is not present.\n");
return;
}
- /* Even if we have an encoder we may not have a connector */
- if (!dev_priv->vbt.int_tv_support)
- return;
/*
* Sanity check the TV output by checking to see if the
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 68b6f69aa..4f1dfe616 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -60,7 +60,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
- mod_timer_pinned(&d->timer, jiffies + 1);
+ d->wake_count++;
+ hrtimer_start_range_ns(&d->timer,
+ ktime_set(0, NSEC_PER_MSEC),
+ NSEC_PER_MSEC,
+ HRTIMER_MODE_REL);
}
static inline void
@@ -107,22 +111,22 @@ static void
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
- enum forcewake_domain_id id;
- for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(d, fw_domains, dev_priv) {
fw_domain_wait_ack_clear(d);
fw_domain_get(d);
- fw_domain_wait_ack(d);
}
+
+ for_each_fw_domain_masked(d, fw_domains, dev_priv)
+ fw_domain_wait_ack(d);
}
static void
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
- enum forcewake_domain_id id;
- for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(d, fw_domains, dev_priv) {
fw_domain_put(d);
fw_domain_posting_read(d);
}
@@ -132,10 +136,9 @@ static void
fw_domains_posting_read(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *d;
- enum forcewake_domain_id id;
/* No need to do for all, just do for first found */
- for_each_fw_domain(d, dev_priv, id) {
+ for_each_fw_domain(d, dev_priv) {
fw_domain_posting_read(d);
break;
}
@@ -145,12 +148,11 @@ static void
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
- enum forcewake_domain_id id;
if (dev_priv->uncore.fw_domains == 0)
return;
- for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
+ for_each_fw_domain_masked(d, fw_domains, dev_priv)
fw_domain_reset(d);
fw_domains_posting_read(dev_priv);
@@ -204,7 +206,7 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
- if (IS_VALLEYVIEW(dev_priv->dev))
+ if (IS_VALLEYVIEW(dev_priv))
dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv);
if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
@@ -224,9 +226,11 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
-static void intel_uncore_fw_release_timer(unsigned long arg)
+static enum hrtimer_restart
+intel_uncore_fw_release_timer(struct hrtimer *timer)
{
- struct intel_uncore_forcewake_domain *domain = (void *)arg;
+ struct intel_uncore_forcewake_domain *domain =
+ container_of(timer, struct intel_uncore_forcewake_domain, timer);
unsigned long irqflags;
assert_rpm_device_not_suspended(domain->i915);
@@ -240,6 +244,8 @@ static void intel_uncore_fw_release_timer(unsigned long arg)
1 << domain->id);
spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
+
+ return HRTIMER_NORESTART;
}
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
@@ -248,7 +254,6 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
- enum forcewake_domain_id id;
enum forcewake_domains fw = 0, active_domains;
/* Hold uncore.lock across reset to prevent any register access
@@ -258,18 +263,18 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
while (1) {
active_domains = 0;
- for_each_fw_domain(domain, dev_priv, id) {
- if (del_timer_sync(&domain->timer) == 0)
+ for_each_fw_domain(domain, dev_priv) {
+ if (hrtimer_cancel(&domain->timer) == 0)
continue;
- intel_uncore_fw_release_timer((unsigned long)domain);
+ intel_uncore_fw_release_timer(&domain->timer);
}
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- for_each_fw_domain(domain, dev_priv, id) {
- if (timer_pending(&domain->timer))
- active_domains |= (1 << id);
+ for_each_fw_domain(domain, dev_priv) {
+ if (hrtimer_active(&domain->timer))
+ active_domains |= domain->mask;
}
if (active_domains == 0)
@@ -286,9 +291,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
WARN_ON(active_domains);
- for_each_fw_domain(domain, dev_priv, id)
+ for_each_fw_domain(domain, dev_priv)
if (domain->wake_count)
- fw |= 1 << id;
+ fw |= domain->mask;
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@@ -310,21 +315,49 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void intel_uncore_ellc_detect(struct drm_device *dev)
+static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ const unsigned int sets[4] = { 1, 1, 2, 2 };
+ const u32 cap = dev_priv->edram_cap;
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)] *
+ 1024 * 1024;
+}
+
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_EDRAM(dev_priv))
+ return 0;
- if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
- INTEL_INFO(dev)->gen >= 9) &&
- (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
- /* The docs do not explain exactly how the calculation can be
- * made. It is somewhat guessable, but for now, it's always
- * 128MB.
- * NB: We can't write IDICR yet because we do not have gt funcs
+ /* The needed capability bits for size calculation
+ * are not there with pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ return 128 * 1024 * 1024;
+
+ return gen9_edram_size(dev_priv);
+}
+
+static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
+{
+ if (IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 9) {
+ dev_priv->edram_cap = __raw_i915_read32(dev_priv,
+ HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
- dev_priv->ellc_size = 128;
- DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+ } else {
+ dev_priv->edram_cap = 0;
}
+
+ if (HAS_EDRAM(dev_priv))
+ DRM_INFO("Found %lluMB of eDRAM\n",
+ intel_uncore_edram_size(dev_priv) / (1024 * 1024));
}
static bool
@@ -410,16 +443,15 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (domain->wake_count++)
- fw_domains &= ~(1 << id);
+ fw_domains &= ~domain->mask;
}
if (fw_domains)
@@ -477,21 +509,19 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (WARN_ON(domain->wake_count == 0))
continue;
if (--domain->wake_count)
continue;
- domain->wake_count++;
fw_domain_arm_timer(domain);
}
}
@@ -539,18 +569,27 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
- for_each_fw_domain(domain, dev_priv, id)
+ for_each_fw_domain(domain, dev_priv)
WARN_ON(domain->wake_count);
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
+#define __gen6_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd; \
+ if (NEEDS_FORCE_WAKE(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else \
+ __fwd = 0; \
+ __fwd; \
+})
+
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
@@ -564,6 +603,48 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x22000, 0x24000) || \
REG_RANGE((reg), 0x30000, 0x40000))
+#define __vlv_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (!NEEDS_FORCE_WAKE(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ __fwd; \
+})
+
+static const i915_reg_t gen8_shadowed_regs[] = {
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen8_shadowed(u32 offset)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
+ if (offset == gen8_shadowed_regs[i].reg)
+ return true;
+
+ return false;
+}
+
+#define __gen8_reg_write_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd; \
+ if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else \
+ __fwd = 0; \
+ __fwd; \
+})
+
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
REG_RANGE((reg), 0x5200, 0x8000) || \
@@ -586,6 +667,34 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x9000, 0xB000) || \
REG_RANGE((reg), 0xF000, 0x10000))
+#define __chv_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (!NEEDS_FORCE_WAKE(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ __fwd; \
+})
+
+#define __chv_reg_write_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd = 0; \
+ if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ __fwd; \
+})
+
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
@@ -618,6 +727,61 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
!FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
+#define SKL_NEEDS_FORCE_WAKE(reg) \
+ ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
+
+#define __gen9_reg_read_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd; \
+ if (!SKL_NEEDS_FORCE_WAKE(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ __fwd = FORCEWAKE_BLITTER; \
+ __fwd; \
+})
+
+static const i915_reg_t gen9_shadowed_regs[] = {
+ RING_TAIL(RENDER_RING_BASE),
+ RING_TAIL(GEN6_BSD_RING_BASE),
+ RING_TAIL(VEBOX_RING_BASE),
+ RING_TAIL(BLT_RING_BASE),
+ GEN6_RPNSWREQ,
+ GEN6_RC_VIDEO_FREQ,
+ /* TODO: Other registers are not yet used */
+};
+
+static bool is_gen9_shadowed(u32 offset)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
+ if (offset == gen9_shadowed_regs[i].reg)
+ return true;
+
+ return false;
+}
+
+#define __gen9_reg_write_fw_domains(offset) \
+({ \
+ enum forcewake_domains __fwd; \
+ if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
+ __fwd = 0; \
+ else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER; \
+ else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_MEDIA; \
+ else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
+ __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ else \
+ __fwd = FORCEWAKE_BLITTER; \
+ __fwd; \
+})
+
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@@ -633,15 +797,6 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const bool read,
const bool before)
{
- /* XXX. We limit the auto arming traces for mmio
- * debugs on these platforms. There are just too many
- * revealed by these and CI/Bat suffers from the noise.
- * Please fix and then re-enable the automatic traces.
- */
- if (i915.mmio_debug < 2 &&
- (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
- return;
-
if (WARN(check_for_unclaimed_mmio(dev_priv),
"Unclaimed register detected %s %s register 0x%x\n",
before ? "before" : "after",
@@ -716,23 +871,21 @@ __gen2_read(64)
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static inline void __force_wake_get(struct drm_i915_private *dev_priv,
- enum forcewake_domains fw_domains)
+static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
- enum forcewake_domain_id id;
if (WARN_ON(!fw_domains))
return;
/* Ideally GCC would be constant-fold and eliminate this loop */
- for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (domain->wake_count) {
- fw_domains &= ~(1 << id);
+ fw_domains &= ~domain->mask;
continue;
}
- domain->wake_count++;
fw_domain_arm_timer(domain);
}
@@ -743,9 +896,11 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+ enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- if (NEEDS_FORCE_WAKE(offset)) \
- __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ fw_engine = __gen6_reg_read_fw_domains(offset); \
+ if (fw_engine) \
+ __force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
@@ -753,16 +908,11 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine = 0; \
+ enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- if (!NEEDS_FORCE_WAKE(offset)) \
- fw_engine = 0; \
- else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
+ fw_engine = __vlv_reg_read_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
@@ -770,42 +920,23 @@ vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine = 0; \
+ enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- if (!NEEDS_FORCE_WAKE(offset)) \
- fw_engine = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ fw_engine = __chv_reg_read_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
-#define SKL_NEEDS_FORCE_WAKE(reg) \
- ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
-
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- if (!SKL_NEEDS_FORCE_WAKE(offset)) \
- fw_engine = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- fw_engine = FORCEWAKE_BLITTER; \
+ fw_engine = __gen9_reg_read_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
@@ -942,34 +1073,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
GEN6_WRITE_FOOTER; \
}
-static const i915_reg_t gen8_shadowed_regs[] = {
- FORCEWAKE_MT,
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
- /* TODO: Other registers are not yet used */
-};
-
-static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
- if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
- return true;
-
- return false;
-}
-
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+ enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
- __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
+ fw_engine = __gen8_reg_write_fw_domains(offset); \
+ if (fw_engine) \
+ __force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
@@ -977,66 +1088,24 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- enum forcewake_domains fw_engine = 0; \
+ enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- if (!NEEDS_FORCE_WAKE(offset) || \
- is_gen8_shadowed(dev_priv, reg)) \
- fw_engine = 0; \
- else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
+ fw_engine = __chv_reg_write_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
-static const i915_reg_t gen9_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE),
- RING_TAIL(GEN6_BSD_RING_BASE),
- RING_TAIL(VEBOX_RING_BASE),
- RING_TAIL(BLT_RING_BASE),
- FORCEWAKE_BLITTER_GEN9,
- FORCEWAKE_RENDER_GEN9,
- FORCEWAKE_MEDIA_GEN9,
- GEN6_RPNSWREQ,
- GEN6_RC_VIDEO_FREQ,
- /* TODO: Other registers are not yet used */
-};
-
-static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
- if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
- return true;
-
- return false;
-}
-
#define __gen9_write(x) \
static void \
gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- if (!SKL_NEEDS_FORCE_WAKE(offset) || \
- is_gen9_shadowed(dev_priv, reg)) \
- fw_engine = 0; \
- else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER; \
- else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_MEDIA; \
- else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
- fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
- else \
- fw_engine = FORCEWAKE_BLITTER; \
+ fw_engine = __gen9_reg_write_fw_domains(offset); \
if (fw_engine) \
- __force_wake_get(dev_priv, fw_engine); \
+ __force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
@@ -1150,7 +1219,14 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
d->i915 = dev_priv;
d->id = domain_id;
- setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
+ BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
+ BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
+ BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
+
+ d->mask = 1 << domain_id;
+
+ hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ d->timer.function = intel_uncore_fw_release_timer;
dev_priv->uncore.fw_domains |= (1 << domain_id);
@@ -1161,7 +1237,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (INTEL_INFO(dev_priv->dev)->gen <= 5)
+ if (INTEL_INFO(dev_priv)->gen <= 5)
return;
if (IS_GEN9(dev)) {
@@ -1257,7 +1333,7 @@ void intel_uncore_init(struct drm_device *dev)
i915_check_vgpu(dev);
- intel_uncore_ellc_detect(dev);
+ intel_uncore_edram_detect(dev_priv);
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
@@ -1437,7 +1513,7 @@ static int i915_reset_complete(struct drm_device *dev)
return (gdrst & GRDOM_RESET_STATUS) == 0;
}
-static int i915_do_reset(struct drm_device *dev)
+static int i915_do_reset(struct drm_device *dev, unsigned engine_mask)
{
/* assert reset for at least 20 usec */
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
@@ -1454,13 +1530,13 @@ static int g4x_reset_complete(struct drm_device *dev)
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_device *dev)
+static int g33_do_reset(struct drm_device *dev, unsigned engine_mask)
{
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for(g4x_reset_complete(dev), 500);
}
-static int g4x_do_reset(struct drm_device *dev)
+static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1490,7 +1566,7 @@ static int g4x_do_reset(struct drm_device *dev)
return 0;
}
-static int ironlake_do_reset(struct drm_device *dev)
+static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -1514,75 +1590,132 @@ static int ironlake_do_reset(struct drm_device *dev)
return 0;
}
-static int gen6_do_reset(struct drm_device *dev)
+/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
+static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+ u32 hw_domain_mask)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- /* Reset the chip */
+ int ret;
/* GEN6_GDRST is not in the gt power well, no need to check
* for fifo space for the write or forcewake the chip for
* the read
*/
- __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+ __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
+
+#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
+ /* Spin waiting for the device to ack the reset requests */
+ ret = wait_for(ACKED, 500);
+#undef ACKED
+
+ return ret;
+}
- /* Spin waiting for the device to ack the reset request */
- ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+/**
+ * gen6_reset_engines - reset individual engines
+ * @dev: DRM device
+ * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
+ *
+ * This function will reset the individual engines that are set in engine_mask.
+ * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
+ *
+ * Note: It is responsibility of the caller to handle the difference between
+ * asking full domain reset versus reset for all available individual engines.
+ *
+ * Returns 0 on success, nonzero on error.
+ */
+static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *engine;
+ const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+ [RCS] = GEN6_GRDOM_RENDER,
+ [BCS] = GEN6_GRDOM_BLT,
+ [VCS] = GEN6_GRDOM_MEDIA,
+ [VCS2] = GEN8_GRDOM_MEDIA2,
+ [VECS] = GEN6_GRDOM_VECS,
+ };
+ u32 hw_mask;
+ int ret;
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN6_GRDOM_FULL;
+ } else {
+ hw_mask = 0;
+ for_each_engine_masked(engine, dev_priv, engine_mask)
+ hw_mask |= hw_engine_mask[engine->id];
+ }
+
+ ret = gen6_hw_domain_reset(dev_priv, hw_mask);
intel_uncore_forcewake_reset(dev, true);
return ret;
}
-static int wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms)
+static int wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ const u32 mask,
+ const u32 value,
+ const unsigned long timeout_ms)
{
- return wait_for((I915_READ(reg) & mask) == value, timeout_ms);
+ return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms);
}
-static int gen8_do_reset(struct drm_device *dev)
+static int gen8_request_engine_reset(struct intel_engine_cs *engine)
+{
+ int ret;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+ I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+ ret = wait_for_register_fw(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700);
+ if (ret)
+ DRM_ERROR("%s: reset request timeout\n", engine->name);
+
+ return ret;
+}
+
+static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
+ I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+}
+
+static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- int i;
- for_each_ring(engine, dev_priv, i) {
- I915_WRITE(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
-
- if (wait_for_register(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700)) {
- DRM_ERROR("%s: reset request timeout\n", engine->name);
+ for_each_engine_masked(engine, dev_priv, engine_mask)
+ if (gen8_request_engine_reset(engine))
goto not_ready;
- }
- }
- return gen6_do_reset(dev);
+ return gen6_reset_engines(dev, engine_mask);
not_ready:
- for_each_ring(engine, dev_priv, i)
- I915_WRITE(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+ for_each_engine_masked(engine, dev_priv, engine_mask)
+ gen8_unrequest_engine_reset(engine);
return -EIO;
}
-static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
+static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *,
+ unsigned engine_mask)
{
if (!i915.reset)
return NULL;
if (INTEL_INFO(dev)->gen >= 8)
- return gen8_do_reset;
+ return gen8_reset_engines;
else if (INTEL_INFO(dev)->gen >= 6)
- return gen6_do_reset;
+ return gen6_reset_engines;
else if (IS_GEN5(dev))
return ironlake_do_reset;
else if (IS_G4X(dev))
@@ -1595,10 +1728,10 @@ static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *)
return NULL;
}
-int intel_gpu_reset(struct drm_device *dev)
+int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int (*reset)(struct drm_device *);
+ int (*reset)(struct drm_device *, unsigned);
int ret;
reset = intel_get_gpu_reset(dev);
@@ -1609,7 +1742,7 @@ int intel_gpu_reset(struct drm_device *dev)
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = reset(dev);
+ ret = reset(dev, engine_mask);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;
@@ -1620,6 +1753,25 @@ bool intel_has_gpu_reset(struct drm_device *dev)
return intel_get_gpu_reset(dev) != NULL;
}
+int intel_guc_reset(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ unsigned long irqflags;
+
+ if (!i915.enable_guc_submission)
+ return -EINVAL;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ return ret;
+}
+
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
return check_for_unclaimed_mmio(dev_priv);
@@ -1643,3 +1795,111 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
return false;
}
+
+static enum forcewake_domains
+intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
+ i915_reg_t reg)
+{
+ enum forcewake_domains fw_domains;
+
+ if (intel_vgpu_active(dev_priv->dev))
+ return 0;
+
+ switch (INTEL_INFO(dev_priv)->gen) {
+ case 9:
+ fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ case 8:
+ if (IS_CHERRYVIEW(dev_priv))
+ fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ else
+ fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ case 7:
+ case 6:
+ if (IS_VALLEYVIEW(dev_priv))
+ fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ else
+ fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ default:
+ MISSING_CASE(INTEL_INFO(dev_priv)->gen);
+ case 5: /* forcewake was introduced with gen6 */
+ case 4:
+ case 3:
+ case 2:
+ return 0;
+ }
+
+ WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+
+ return fw_domains;
+}
+
+static enum forcewake_domains
+intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
+ i915_reg_t reg)
+{
+ enum forcewake_domains fw_domains;
+
+ if (intel_vgpu_active(dev_priv->dev))
+ return 0;
+
+ switch (INTEL_INFO(dev_priv)->gen) {
+ case 9:
+ fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ case 8:
+ if (IS_CHERRYVIEW(dev_priv))
+ fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+ else
+ fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
+ break;
+ case 7:
+ case 6:
+ fw_domains = FORCEWAKE_RENDER;
+ break;
+ default:
+ MISSING_CASE(INTEL_INFO(dev_priv)->gen);
+ case 5:
+ case 4:
+ case 3:
+ case 2:
+ return 0;
+ }
+
+ WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+
+ return fw_domains;
+}
+
+/**
+ * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
+ * a register
+ * @dev_priv: pointer to struct drm_i915_private
+ * @reg: register in question
+ * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
+ *
+ * Returns a set of forcewake domains required to be taken with for example
+ * intel_uncore_forcewake_get for the specified register to be accessible in the
+ * specified mode (read, write or read/write) with raw mmio accessors.
+ *
+ * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
+ * callers to do FIFO management on their own or risk losing writes.
+ */
+enum forcewake_domains
+intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, unsigned int op)
+{
+ enum forcewake_domains fw_domains = 0;
+
+ WARN_ON(!op);
+
+ if (op & FW_REG_READ)
+ fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
+
+ if (op & FW_REG_WRITE)
+ fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
+
+ return fw_domains;
+}
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
new file mode 100644
index 000000000..44fb0b35e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -0,0 +1,845 @@
+/*
+ * Copyright © 2006-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/*
+ * This information is private to VBT parsing in intel_bios.c.
+ *
+ * Please do NOT include anywhere else.
+ */
+#ifndef _INTEL_BIOS_PRIVATE
+#error "intel_vbt_defs.h is private to intel_bios.c"
+#endif
+
+#ifndef _INTEL_VBT_DEFS_H_
+#define _INTEL_VBT_DEFS_H_
+
+#include "intel_bios.h"
+
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature: VBT signature, always starts with "$VBT"
+ * @version: Version of this structure
+ * @header_size: Size of this structure
+ * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum: Checksum
+ * @reserved0: Reserved
+ * @bdb_offset: Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset: Offsets of add-in data blocks from beginning of VBT
+ */
+struct vbt_header {
+ u8 signature[20];
+ u16 version;
+ u16 header_size;
+ u16 vbt_size;
+ u8 vbt_checksum;
+ u8 reserved0;
+ u32 bdb_offset;
+ u32 aim_offset[4];
+} __packed;
+
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature: BDB signature "BIOS_DATA_BLOCK"
+ * @version: Version of the data block definitions
+ * @header_size: Size of this structure
+ * @bdb_size: Size of BDB (BDB Header and data blocks)
+ */
+struct bdb_header {
+ u8 signature[16];
+ u16 version;
+ u16 header_size;
+ u16 bdb_size;
+} __packed;
+
+/* strictly speaking, this is a "skip" block, but it has interesting info */
+struct vbios_data {
+ u8 type; /* 0 == desktop, 1 == mobile */
+ u8 relstage;
+ u8 chipset;
+ u8 lvds_present:1;
+ u8 tv_present:1;
+ u8 rsvd2:6; /* finish byte */
+ u8 rsvd3[4];
+ u8 signon[155];
+ u8 copyright[61];
+ u16 code_segment;
+ u8 dos_boot_mode;
+ u8 bandwidth_percent;
+ u8 rsvd4; /* popup memory size */
+ u8 resize_pci_bios;
+ u8 rsvd5; /* is crt already on ddc2 */
+} __packed;
+
+/*
+ * There are several types of BIOS data blocks (BDBs), each block has
+ * an ID and size in the first 3 bytes (ID in first, size in next 2).
+ * Known types are listed below.
+ */
+#define BDB_GENERAL_FEATURES 1
+#define BDB_GENERAL_DEFINITIONS 2
+#define BDB_OLD_TOGGLE_LIST 3
+#define BDB_MODE_SUPPORT_LIST 4
+#define BDB_GENERIC_MODE_TABLE 5
+#define BDB_EXT_MMIO_REGS 6
+#define BDB_SWF_IO 7
+#define BDB_SWF_MMIO 8
+#define BDB_PSR 9
+#define BDB_MODE_REMOVAL_TABLE 10
+#define BDB_CHILD_DEVICE_TABLE 11
+#define BDB_DRIVER_FEATURES 12
+#define BDB_DRIVER_PERSISTENCE 13
+#define BDB_EXT_TABLE_PTRS 14
+#define BDB_DOT_CLOCK_OVERRIDE 15
+#define BDB_DISPLAY_SELECT 16
+/* 17 rsvd */
+#define BDB_DRIVER_ROTATION 18
+#define BDB_DISPLAY_REMOVE 19
+#define BDB_OEM_CUSTOM 20
+#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */
+#define BDB_SDVO_LVDS_OPTIONS 22
+#define BDB_SDVO_PANEL_DTDS 23
+#define BDB_SDVO_LVDS_PNP_IDS 24
+#define BDB_SDVO_LVDS_POWER_SEQ 25
+#define BDB_TV_OPTIONS 26
+#define BDB_EDP 27
+#define BDB_LVDS_OPTIONS 40
+#define BDB_LVDS_LFP_DATA_PTRS 41
+#define BDB_LVDS_LFP_DATA 42
+#define BDB_LVDS_BACKLIGHT 43
+#define BDB_LVDS_POWER 44
+#define BDB_MIPI_CONFIG 52
+#define BDB_MIPI_SEQUENCE 53
+#define BDB_SKIP 254 /* VBIOS private block, ignore */
+
+struct bdb_general_features {
+ /* bits 1 */
+ u8 panel_fitting:2;
+ u8 flexaim:1;
+ u8 msg_enable:1;
+ u8 clear_screen:3;
+ u8 color_flip:1;
+
+ /* bits 2 */
+ u8 download_ext_vbt:1;
+ u8 enable_ssc:1;
+ u8 ssc_freq:1;
+ u8 enable_lfp_on_override:1;
+ u8 disable_ssc_ddt:1;
+ u8 rsvd7:1;
+ u8 display_clock_mode:1;
+ u8 rsvd8:1; /* finish byte */
+
+ /* bits 3 */
+ u8 disable_smooth_vision:1;
+ u8 single_dvi:1;
+ u8 rsvd9:1;
+ u8 fdi_rx_polarity_inverted:1;
+ u8 rsvd10:4; /* finish byte */
+
+ /* bits 4 */
+ u8 legacy_monitor_detect;
+
+ /* bits 5 */
+ u8 int_crt_support:1;
+ u8 int_tv_support:1;
+ u8 int_efp_support:1;
+ u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
+ u8 rsvd11:3; /* finish byte */
+} __packed;
+
+/* pre-915 */
+#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
+#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
+#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
+#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
+
+/* Pre 915 */
+#define DEVICE_TYPE_NONE 0x00
+#define DEVICE_TYPE_CRT 0x01
+#define DEVICE_TYPE_TV 0x09
+#define DEVICE_TYPE_EFP 0x12
+#define DEVICE_TYPE_LFP 0x22
+/* On 915+ */
+#define DEVICE_TYPE_CRT_DPMS 0x6001
+#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
+#define DEVICE_TYPE_TV_COMPOSITE 0x0209
+#define DEVICE_TYPE_TV_MACROVISION 0x0289
+#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
+#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
+#define DEVICE_TYPE_TV_SCART 0x0209
+#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
+#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
+#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
+#define DEVICE_TYPE_EFP_DVI_I 0x6053
+#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
+#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
+#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
+#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
+#define DEVICE_TYPE_LFP_PANELLINK 0x5012
+#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
+#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
+#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
+#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+
+#define DEVICE_CFG_NONE 0x00
+#define DEVICE_CFG_12BIT_DVOB 0x01
+#define DEVICE_CFG_12BIT_DVOC 0x02
+#define DEVICE_CFG_24BIT_DVOBC 0x09
+#define DEVICE_CFG_24BIT_DVOCB 0x0a
+#define DEVICE_CFG_DUAL_DVOB 0x11
+#define DEVICE_CFG_DUAL_DVOC 0x12
+#define DEVICE_CFG_DUAL_DVOBC 0x13
+#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
+#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
+
+#define DEVICE_WIRE_NONE 0x00
+#define DEVICE_WIRE_DVOB 0x01
+#define DEVICE_WIRE_DVOC 0x02
+#define DEVICE_WIRE_DVOBC 0x03
+#define DEVICE_WIRE_DVOBB 0x05
+#define DEVICE_WIRE_DVOCC 0x06
+#define DEVICE_WIRE_DVOB_MASTER 0x0d
+#define DEVICE_WIRE_DVOC_MASTER 0x0e
+
+#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
+#define DEVICE_PORT_DVOB 0x01
+#define DEVICE_PORT_DVOC 0x02
+
+/*
+ * We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. Do not change; we rely on its size.
+ */
+struct old_child_dev_config {
+ u16 handle;
+ u16 device_type;
+ u8 device_id[10]; /* ascii string */
+ u16 addin_offset;
+ u8 dvo_port; /* See Device_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ u8 capabilities;
+ u8 dvo_wiring;/* See DEVICE_WIRE_* above */
+ u8 dvo2_wiring;
+ u16 extended_type;
+ u8 dvo_function;
+} __packed;
+
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+
+struct common_child_dev_config {
+ u16 handle;
+ u16 device_type;
+ u8 not_common1[12];
+ u8 dvo_port;
+ u8 not_common2[2];
+ u8 ddc_pin;
+ u16 edid_ptr;
+ u8 dvo_cfg; /* See DEVICE_CFG_* above */
+ u8 efp_routed:1;
+ u8 lane_reversal:1;
+ u8 lspcon:1;
+ u8 iboost:1;
+ u8 hpd_invert:1;
+ u8 flag_reserved:3;
+ u8 hdmi_support:1;
+ u8 dp_support:1;
+ u8 tmds_support:1;
+ u8 support_reserved:5;
+ u8 not_common3[12];
+ u8 iboost_level;
+} __packed;
+
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+ /* This one is safe to be used anywhere, but the code should still check
+ * the BDB version. */
+ u8 raw[33];
+ /* This one should only be kept for legacy code. */
+ struct old_child_dev_config old;
+ /* This one should also be safe to use anywhere, even without version
+ * checks. */
+ struct common_child_dev_config common;
+} __packed;
+
+struct bdb_general_definitions {
+ /* DDC GPIO */
+ u8 crt_ddc_gmbus_pin;
+
+ /* DPMS bits */
+ u8 dpms_acpi:1;
+ u8 skip_boot_crt_detect:1;
+ u8 dpms_aim:1;
+ u8 rsvd1:5; /* finish byte */
+
+ /* boot device bits */
+ u8 boot_display[2];
+ u8 child_dev_size;
+
+ /*
+ * Device info:
+ * If TV is present, it'll be at devices[0].
+ * LVDS will be next, either devices[0] or [1], if present.
+ * On some platforms the number of device is 6. But could be as few as
+ * 4 if both TV and LVDS are missing.
+ * And the device num is related with the size of general definition
+ * block. It is obtained by using the following formula:
+ * number = (block_size - sizeof(bdb_general_definitions))/
+ * defs->child_dev_size;
+ */
+ uint8_t devices[0];
+} __packed;
+
+/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */
+#define MODE_MASK 0x3
+
+struct bdb_lvds_options {
+ u8 panel_type;
+ u8 rsvd1;
+ /* LVDS capabilities, stored in a dword */
+ u8 pfit_mode:2;
+ u8 pfit_text_mode_enhanced:1;
+ u8 pfit_gfx_mode_enhanced:1;
+ u8 pfit_ratio_auto:1;
+ u8 pixel_dither:1;
+ u8 lvds_edid:1;
+ u8 rsvd2:1;
+ u8 rsvd4;
+ /* LVDS Panel channel bits stored here */
+ u32 lvds_panel_channel_bits;
+ /* LVDS SSC (Spread Spectrum Clock) bits stored here. */
+ u16 ssc_bits;
+ u16 ssc_freq;
+ u16 ssc_ddt;
+ /* Panel color depth defined here */
+ u16 panel_color_depth;
+ /* LVDS panel type bits stored here */
+ u32 dps_panel_type_bits;
+ /* LVDS backlight control type bits stored here */
+ u32 blt_control_type_bits;
+} __packed;
+
+/* LFP pointer table contains entries to the struct below */
+struct bdb_lvds_lfp_data_ptr {
+ u16 fp_timing_offset; /* offsets are from start of bdb */
+ u8 fp_table_size;
+ u16 dvo_timing_offset;
+ u8 dvo_table_size;
+ u16 panel_pnp_id_offset;
+ u8 pnp_table_size;
+} __packed;
+
+struct bdb_lvds_lfp_data_ptrs {
+ u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
+ struct bdb_lvds_lfp_data_ptr ptr[16];
+} __packed;
+
+/* LFP data has 3 blocks per entry */
+struct lvds_fp_timing {
+ u16 x_res;
+ u16 y_res;
+ u32 lvds_reg;
+ u32 lvds_reg_val;
+ u32 pp_on_reg;
+ u32 pp_on_reg_val;
+ u32 pp_off_reg;
+ u32 pp_off_reg_val;
+ u32 pp_cycle_reg;
+ u32 pp_cycle_reg_val;
+ u32 pfit_reg;
+ u32 pfit_reg_val;
+ u16 terminator;
+} __packed;
+
+struct lvds_dvo_timing {
+ u16 clock; /**< In 10khz */
+ u8 hactive_lo;
+ u8 hblank_lo;
+ u8 hblank_hi:4;
+ u8 hactive_hi:4;
+ u8 vactive_lo;
+ u8 vblank_lo;
+ u8 vblank_hi:4;
+ u8 vactive_hi:4;
+ u8 hsync_off_lo;
+ u8 hsync_pulse_width;
+ u8 vsync_pulse_width:4;
+ u8 vsync_off:4;
+ u8 rsvd0:6;
+ u8 hsync_off_hi:2;
+ u8 himage_lo;
+ u8 vimage_lo;
+ u8 vimage_hi:4;
+ u8 himage_hi:4;
+ u8 h_border;
+ u8 v_border;
+ u8 rsvd1:3;
+ u8 digital:2;
+ u8 vsync_positive:1;
+ u8 hsync_positive:1;
+ u8 rsvd2:1;
+} __packed;
+
+struct lvds_pnp_id {
+ u16 mfg_name;
+ u16 product_code;
+ u32 serial;
+ u8 mfg_week;
+ u8 mfg_year;
+} __packed;
+
+struct bdb_lvds_lfp_data_entry {
+ struct lvds_fp_timing fp_timing;
+ struct lvds_dvo_timing dvo_timing;
+ struct lvds_pnp_id pnp_id;
+} __packed;
+
+struct bdb_lvds_lfp_data {
+ struct bdb_lvds_lfp_data_entry data[16];
+} __packed;
+
+#define BDB_BACKLIGHT_TYPE_NONE 0
+#define BDB_BACKLIGHT_TYPE_PWM 2
+
+struct bdb_lfp_backlight_data_entry {
+ u8 type:2;
+ u8 active_low_pwm:1;
+ u8 obsolete1:5;
+ u16 pwm_freq_hz;
+ u8 min_brightness;
+ u8 obsolete2;
+ u8 obsolete3;
+} __packed;
+
+struct bdb_lfp_backlight_data {
+ u8 entry_size;
+ struct bdb_lfp_backlight_data_entry data[16];
+ u8 level[16];
+} __packed;
+
+struct aimdb_header {
+ char signature[16];
+ char oem_device[20];
+ u16 aimdb_version;
+ u16 aimdb_header_size;
+ u16 aimdb_size;
+} __packed;
+
+struct aimdb_block {
+ u8 aimdb_id;
+ u16 aimdb_size;
+} __packed;
+
+struct vch_panel_data {
+ u16 fp_timing_offset;
+ u8 fp_timing_size;
+ u16 dvo_timing_offset;
+ u8 dvo_timing_size;
+ u16 text_fitting_offset;
+ u8 text_fitting_size;
+ u16 graphics_fitting_offset;
+ u8 graphics_fitting_size;
+} __packed;
+
+struct vch_bdb_22 {
+ struct aimdb_block aimdb_block;
+ struct vch_panel_data panels[16];
+} __packed;
+
+struct bdb_sdvo_lvds_options {
+ u8 panel_backlight;
+ u8 h40_set_panel_type;
+ u8 panel_type;
+ u8 ssc_clk_freq;
+ u16 als_low_trip;
+ u16 als_high_trip;
+ u8 sclalarcoeff_tab_row_num;
+ u8 sclalarcoeff_tab_row_size;
+ u8 coefficient[8];
+ u8 panel_misc_bits_1;
+ u8 panel_misc_bits_2;
+ u8 panel_misc_bits_3;
+ u8 panel_misc_bits_4;
+} __packed;
+
+
+#define BDB_DRIVER_FEATURE_NO_LVDS 0
+#define BDB_DRIVER_FEATURE_INT_LVDS 1
+#define BDB_DRIVER_FEATURE_SDVO_LVDS 2
+#define BDB_DRIVER_FEATURE_EDP 3
+
+struct bdb_driver_features {
+ u8 boot_dev_algorithm:1;
+ u8 block_display_switch:1;
+ u8 allow_display_switch:1;
+ u8 hotplug_dvo:1;
+ u8 dual_view_zoom:1;
+ u8 int15h_hook:1;
+ u8 sprite_in_clone:1;
+ u8 primary_lfp_id:1;
+
+ u16 boot_mode_x;
+ u16 boot_mode_y;
+ u8 boot_mode_bpp;
+ u8 boot_mode_refresh;
+
+ u16 enable_lfp_primary:1;
+ u16 selective_mode_pruning:1;
+ u16 dual_frequency:1;
+ u16 render_clock_freq:1; /* 0: high freq; 1: low freq */
+ u16 nt_clone_support:1;
+ u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */
+ u16 sprite_display_assign:1; /* 0: secondary; 1: primary */
+ u16 cui_aspect_scaling:1;
+ u16 preserve_aspect_ratio:1;
+ u16 sdvo_device_power_down:1;
+ u16 crt_hotplug:1;
+ u16 lvds_config:2;
+ u16 tv_hotplug:1;
+ u16 hdmi_config:2;
+
+ u8 static_display:1;
+ u8 reserved2:7;
+ u16 legacy_crt_max_x;
+ u16 legacy_crt_max_y;
+ u8 legacy_crt_max_refresh;
+
+ u8 hdmi_termination;
+ u8 custom_vbt_version;
+ /* Driver features data block */
+ u16 rmpm_enabled:1;
+ u16 s2ddt_enabled:1;
+ u16 dpst_enabled:1;
+ u16 bltclt_enabled:1;
+ u16 adb_enabled:1;
+ u16 drrs_enabled:1;
+ u16 grs_enabled:1;
+ u16 gpmt_enabled:1;
+ u16 tbt_enabled:1;
+ u16 psr_enabled:1;
+ u16 ips_enabled:1;
+ u16 reserved3:4;
+ u16 pc_feature_valid:1;
+} __packed;
+
+#define EDP_18BPP 0
+#define EDP_24BPP 1
+#define EDP_30BPP 2
+#define EDP_RATE_1_62 0
+#define EDP_RATE_2_7 1
+#define EDP_LANE_1 0
+#define EDP_LANE_2 1
+#define EDP_LANE_4 3
+#define EDP_PREEMPHASIS_NONE 0
+#define EDP_PREEMPHASIS_3_5dB 1
+#define EDP_PREEMPHASIS_6dB 2
+#define EDP_PREEMPHASIS_9_5dB 3
+#define EDP_VSWING_0_4V 0
+#define EDP_VSWING_0_6V 1
+#define EDP_VSWING_0_8V 2
+#define EDP_VSWING_1_2V 3
+
+
+struct edp_link_params {
+ u8 rate:4;
+ u8 lanes:4;
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __packed;
+
+struct bdb_edp {
+ struct edp_power_seq power_seqs[16];
+ u32 color_depth;
+ struct edp_link_params link_params[16];
+ u32 sdrrs_msa_timing_delay;
+
+ /* ith bit indicates enabled/disabled for (i+1)th panel */
+ u16 edp_s3d_feature;
+ u16 edp_t3_optimization;
+ u64 edp_vswing_preemph; /* v173 */
+} __packed;
+
+struct psr_table {
+ /* Feature bits */
+ u8 full_link:1;
+ u8 require_aux_to_wakeup:1;
+ u8 feature_bits_rsvd:6;
+
+ /* Wait times */
+ u8 idle_frames:4;
+ u8 lines_to_wait:3;
+ u8 wait_times_rsvd:1;
+
+ /* TP wake up time in multiple of 100 */
+ u16 tp1_wakeup_time;
+ u16 tp2_tp3_wakeup_time;
+} __packed;
+
+struct bdb_psr {
+ struct psr_table psr_table[16];
+} __packed;
+
+/*
+ * Driver<->VBIOS interaction occurs through scratch bits in
+ * GR18 & SWF*.
+ */
+
+/* GR18 bits are set on display switch and hotkey events */
+#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */
+#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */
+#define GR18_HK_NONE (0x0<<3)
+#define GR18_HK_LFP_STRETCH (0x1<<3)
+#define GR18_HK_TOGGLE_DISP (0x2<<3)
+#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */
+#define GR18_HK_POPUP_DISABLED (0x6<<3)
+#define GR18_HK_POPUP_ENABLED (0x7<<3)
+#define GR18_HK_PFIT (0x8<<3)
+#define GR18_HK_APM_CHANGE (0xa<<3)
+#define GR18_HK_MULTIPLE (0xc<<3)
+#define GR18_USER_INT_EN (1<<2)
+#define GR18_A0000_FLUSH_EN (1<<1)
+#define GR18_SMM_EN (1<<0)
+
+/* Set by driver, cleared by VBIOS */
+#define SWF00_YRES_SHIFT 16
+#define SWF00_XRES_SHIFT 0
+#define SWF00_RES_MASK 0xffff
+
+/* Set by VBIOS at boot time and driver at runtime */
+#define SWF01_TV2_FORMAT_SHIFT 8
+#define SWF01_TV1_FORMAT_SHIFT 0
+#define SWF01_TV_FORMAT_MASK 0xffff
+
+#define SWF10_VBIOS_BLC_I2C_EN (1<<29)
+#define SWF10_GTT_OVERRIDE_EN (1<<28)
+#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */
+#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24)
+#define SWF10_OLD_TOGGLE 0x0
+#define SWF10_TOGGLE_LIST_1 0x1
+#define SWF10_TOGGLE_LIST_2 0x2
+#define SWF10_TOGGLE_LIST_3 0x3
+#define SWF10_TOGGLE_LIST_4 0x4
+#define SWF10_PANNING_EN (1<<23)
+#define SWF10_DRIVER_LOADED (1<<22)
+#define SWF10_EXTENDED_DESKTOP (1<<21)
+#define SWF10_EXCLUSIVE_MODE (1<<20)
+#define SWF10_OVERLAY_EN (1<<19)
+#define SWF10_PLANEB_HOLDOFF (1<<18)
+#define SWF10_PLANEA_HOLDOFF (1<<17)
+#define SWF10_VGA_HOLDOFF (1<<16)
+#define SWF10_ACTIVE_DISP_MASK 0xffff
+#define SWF10_PIPEB_LFP2 (1<<15)
+#define SWF10_PIPEB_EFP2 (1<<14)
+#define SWF10_PIPEB_TV2 (1<<13)
+#define SWF10_PIPEB_CRT2 (1<<12)
+#define SWF10_PIPEB_LFP (1<<11)
+#define SWF10_PIPEB_EFP (1<<10)
+#define SWF10_PIPEB_TV (1<<9)
+#define SWF10_PIPEB_CRT (1<<8)
+#define SWF10_PIPEA_LFP2 (1<<7)
+#define SWF10_PIPEA_EFP2 (1<<6)
+#define SWF10_PIPEA_TV2 (1<<5)
+#define SWF10_PIPEA_CRT2 (1<<4)
+#define SWF10_PIPEA_LFP (1<<3)
+#define SWF10_PIPEA_EFP (1<<2)
+#define SWF10_PIPEA_TV (1<<1)
+#define SWF10_PIPEA_CRT (1<<0)
+
+#define SWF11_MEMORY_SIZE_SHIFT 16
+#define SWF11_SV_TEST_EN (1<<15)
+#define SWF11_IS_AGP (1<<14)
+#define SWF11_DISPLAY_HOLDOFF (1<<13)
+#define SWF11_DPMS_REDUCED (1<<12)
+#define SWF11_IS_VBE_MODE (1<<11)
+#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */
+#define SWF11_DPMS_MASK 0x07
+#define SWF11_DPMS_OFF (1<<2)
+#define SWF11_DPMS_SUSPEND (1<<1)
+#define SWF11_DPMS_STANDBY (1<<0)
+#define SWF11_DPMS_ON 0
+
+#define SWF14_GFX_PFIT_EN (1<<31)
+#define SWF14_TEXT_PFIT_EN (1<<30)
+#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */
+#define SWF14_POPUP_EN (1<<28)
+#define SWF14_DISPLAY_HOLDOFF (1<<27)
+#define SWF14_DISP_DETECT_EN (1<<26)
+#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */
+#define SWF14_DRIVER_STATUS (1<<24)
+#define SWF14_OS_TYPE_WIN9X (1<<23)
+#define SWF14_OS_TYPE_WINNT (1<<22)
+/* 21:19 rsvd */
+#define SWF14_PM_TYPE_MASK 0x00070000
+#define SWF14_PM_ACPI_VIDEO (0x4 << 16)
+#define SWF14_PM_ACPI (0x3 << 16)
+#define SWF14_PM_APM_12 (0x2 << 16)
+#define SWF14_PM_APM_11 (0x1 << 16)
+#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */
+ /* if GR18 indicates a display switch */
+#define SWF14_DS_PIPEB_LFP2_EN (1<<15)
+#define SWF14_DS_PIPEB_EFP2_EN (1<<14)
+#define SWF14_DS_PIPEB_TV2_EN (1<<13)
+#define SWF14_DS_PIPEB_CRT2_EN (1<<12)
+#define SWF14_DS_PIPEB_LFP_EN (1<<11)
+#define SWF14_DS_PIPEB_EFP_EN (1<<10)
+#define SWF14_DS_PIPEB_TV_EN (1<<9)
+#define SWF14_DS_PIPEB_CRT_EN (1<<8)
+#define SWF14_DS_PIPEA_LFP2_EN (1<<7)
+#define SWF14_DS_PIPEA_EFP2_EN (1<<6)
+#define SWF14_DS_PIPEA_TV2_EN (1<<5)
+#define SWF14_DS_PIPEA_CRT2_EN (1<<4)
+#define SWF14_DS_PIPEA_LFP_EN (1<<3)
+#define SWF14_DS_PIPEA_EFP_EN (1<<2)
+#define SWF14_DS_PIPEA_TV_EN (1<<1)
+#define SWF14_DS_PIPEA_CRT_EN (1<<0)
+ /* if GR18 indicates a panel fitting request */
+#define SWF14_PFIT_EN (1<<0) /* 0 means disable */
+ /* if GR18 indicates an APM change request */
+#define SWF14_APM_HIBERNATE 0x4
+#define SWF14_APM_SUSPEND 0x3
+#define SWF14_APM_STANDBY 0x1
+#define SWF14_APM_RESTORE 0x0
+
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
+#define DEVICE_TYPE_eDP 0x78C6
+
+#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
+#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP
+ * Depending on the system, the other bits may or may not
+ * be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_DUAL_CHANNEL | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
+/* define the DVO port for HDMI output type */
+#define DVO_B 1
+#define DVO_C 2
+#define DVO_D 3
+
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+#define DVO_PORT_DPE 11
+#define DVO_PORT_HDMIE 12
+#define DVO_PORT_MIPIA 21
+#define DVO_PORT_MIPIB 22
+#define DVO_PORT_MIPIC 23
+#define DVO_PORT_MIPID 24
+
+/* Block 52 contains MIPI configuration block
+ * 6 * bdb_mipi_config, followed by 6 pps data block
+ * block below
+ */
+#define MAX_MIPI_CONFIGURATIONS 6
+
+struct bdb_mipi_config {
+ struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
+ struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
+} __packed;
+
+/* Block 53 contains MIPI sequences as needed by the panel
+ * for enabling it. This block can be variable in size and
+ * can be maximum of 6 blocks
+ */
+struct bdb_mipi_sequence {
+ u8 version;
+ u8 data[0];
+} __packed;
+
+enum mipi_gpio_pin_index {
+ MIPI_GPIO_UNDEFINED = 0,
+ MIPI_GPIO_PANEL_ENABLE,
+ MIPI_GPIO_BL_ENABLE,
+ MIPI_GPIO_PWM_ENABLE,
+ MIPI_GPIO_RESET_N,
+ MIPI_GPIO_PWR_DOWN_R,
+ MIPI_GPIO_STDBY_RST_N,
+ MIPI_GPIO_MAX
+};
+
+#endif /* _INTEL_VBT_DEFS_H_ */
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 05229b960..82656654f 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -97,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
return NULL;
}
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
- int hsync_pin, int vsync_pin)
+int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
+ int hsync_pin, int vsync_pin, u32 bus_flags)
{
struct imx_drm_crtc_helper_funcs *helper;
struct imx_drm_crtc *imx_crtc;
@@ -110,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format,
helper = &imx_crtc->imx_drm_helper_funcs;
if (helper->set_interface_pix_fmt)
return helper->set_interface_pix_fmt(encoder->crtc,
- bus_format, hsync_pin, vsync_pin);
+ bus_format, hsync_pin, vsync_pin,
+ bus_flags);
return 0;
}
-EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins);
+EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
{
- return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3);
+ return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE);
}
EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
@@ -253,13 +256,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
if (ret)
goto err_kms;
- /*
- * with vblank_disable_allowed = true, vblank interrupt will be
- * disabled by drm timer once a current process gives up ownership
- * of vblank event. (after drm_vblank_put function is called)
- */
- drm->vblank_disable_allowed = true;
-
platform_set_drvdata(drm->platformdev, drm);
/* Now try and bind all our sub-components */
@@ -412,7 +408,7 @@ static struct drm_driver imx_drm_driver = {
.unload = imx_drm_driver_unload,
.lastclose = imx_drm_driver_lastclose,
.set_busid = drm_platform_set_busid,
- .gem_free_object = drm_gem_cma_free_object,
+ .gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index b0241b9d1..74320a172 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs {
int (*enable_vblank)(struct drm_crtc *crtc);
void (*disable_vblank)(struct drm_crtc *crtc);
int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin);
+ u32 bus_format, int hsync_pin, int vsync_pin,
+ u32 bus_flags);
const struct drm_crtc_helper_funcs *crtc_helper_funcs;
const struct drm_crtc_funcs *crtc_funcs;
};
@@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
-int imx_drm_set_bus_format_pins(struct drm_encoder *encoder,
- u32 bus_format, int hsync_pin, int vsync_pin);
+int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
+ int hsync_pin, int vsync_pin, u32 bus_flags);
int imx_drm_set_bus_format(struct drm_encoder *encoder,
u32 bus_format);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index a58eee595..beff793bb 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -25,6 +25,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
@@ -59,6 +60,7 @@ struct imx_ldb_channel {
struct drm_encoder encoder;
struct drm_panel *panel;
struct device_node *child;
+ struct i2c_adapter *ddc;
int chno;
void *edid;
int edid_len;
@@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
+ if (!imx_ldb_ch->edid && imx_ldb_ch->ddc)
+ imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc);
+
if (imx_ldb_ch->edid) {
drm_mode_connector_update_edid_property(connector,
imx_ldb_ch->edid);
@@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
for_each_child_of_node(np, child) {
struct imx_ldb_channel *channel;
- struct device_node *port;
+ struct device_node *ddc_node;
+ struct device_node *ep;
ret = of_property_read_u32(child, "reg", &i);
if (ret || i < 0 || i > 1)
@@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
* The output port is port@4 with an external 4-port mux or
* port@2 with the internal 2-port mux.
*/
- port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2);
- if (port) {
- struct device_node *endpoint, *remote;
-
- endpoint = of_get_child_by_name(port, "endpoint");
- if (endpoint) {
- remote = of_graph_get_remote_port_parent(endpoint);
- if (remote)
- channel->panel = of_drm_find_panel(remote);
- else
- return -EPROBE_DEFER;
- if (!channel->panel) {
- dev_err(dev, "panel not found: %s\n",
- remote->full_name);
- return -EPROBE_DEFER;
- }
+ ep = of_graph_get_endpoint_by_regs(child,
+ imx_ldb->lvds_mux ? 4 : 2,
+ -1);
+ if (ep) {
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (remote)
+ channel->panel = of_drm_find_panel(remote);
+ else
+ return -EPROBE_DEFER;
+ of_node_put(remote);
+ if (!channel->panel) {
+ dev_err(dev, "panel not found: %s\n",
+ remote->full_name);
+ return -EPROBE_DEFER;
}
}
- edidp = of_get_property(child, "edid", &channel->edid_len);
- if (edidp) {
- channel->edid = kmemdup(edidp, channel->edid_len,
- GFP_KERNEL);
- } else if (!channel->panel) {
- ret = of_get_drm_display_mode(child, &channel->mode, 0);
- if (!ret)
- channel->mode_valid = 1;
+ ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0);
+ if (ddc_node) {
+ channel->ddc = of_find_i2c_adapter_by_node(ddc_node);
+ of_node_put(ddc_node);
+ if (!channel->ddc) {
+ dev_warn(dev, "failed to get ddc i2c adapter\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (!channel->ddc) {
+ /* if no DDC available, fallback to hardcoded EDID */
+ dev_dbg(dev, "no ddc available\n");
+
+ edidp = of_get_property(child, "edid",
+ &channel->edid_len);
+ if (edidp) {
+ channel->edid = kmemdup(edidp,
+ channel->edid_len,
+ GFP_KERNEL);
+ } else if (!channel->panel) {
+ /* fallback to display-timings node */
+ ret = of_get_drm_display_mode(child,
+ &channel->mode,
+ OF_USE_NATIVE_MODE);
+ if (!ret)
+ channel->mode_valid = 1;
+ }
}
channel->bus_format = of_get_bus_format(dev, child);
@@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master,
channel->encoder.funcs->destroy(&channel->encoder);
kfree(channel->edid);
+ i2c_put_adapter(channel->ddc);
}
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index ae7a9fb3b..baf788121 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
switch (tve->mode) {
case TVE_MODE_VGA:
- imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24,
- tve->hsync_pin, tve->vsync_pin);
+ imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
+ tve->hsync_pin, tve->vsync_pin,
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE);
break;
case TVE_MODE_TVOUT:
imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index b2c30b8d9..fc040417e 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -66,6 +66,7 @@ struct ipu_crtc {
struct ipu_flip_work *flip_work;
int irq;
u32 bus_format;
+ u32 bus_flags;
int di_hsync_pin;
int di_vsync_pin;
};
@@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
else
sig_cfg.clkflags = 0;
- sig_cfg.enable_pol = 1;
- sig_cfg.clk_pol = 0;
+ sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW);
+ /* Default to driving pixel data on negative clock edges */
+ sig_cfg.clk_pol = !!(ipu_crtc->bus_flags &
+ DRM_BUS_FLAG_PIXDATA_POSEDGE);
sig_cfg.bus_format = ipu_crtc->bus_format;
sig_cfg.v_to_h_sync = 0;
sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
@@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
}
static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
- u32 bus_format, int hsync_pin, int vsync_pin)
+ u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
ipu_crtc->bus_format = bus_format;
+ ipu_crtc->bus_flags = bus_flags;
ipu_crtc->di_hsync_pin = hsync_pin;
ipu_crtc->di_vsync_pin = vsync_pin;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 681ec6eb7..a4bb44118 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420,
@@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (crtc != plane->crtc)
dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
plane->crtc, crtc);
- plane->crtc = crtc;
if (!ipu_plane->enabled)
ipu_plane_enable(ipu_plane);
@@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
kfree(ipu_plane);
}
-static struct drm_plane_funcs ipu_plane_funcs = {
+static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane = ipu_update_plane,
.disable_plane = ipu_disable_plane,
.destroy = ipu_plane_destroy,
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 363e2c774..2d1fd02cd 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -35,7 +35,6 @@ struct imx_parallel_display {
void *edid;
int edid_len;
u32 bus_format;
- int mode_valid;
struct drm_display_mode mode;
struct drm_panel *panel;
};
@@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
num_modes = drm_add_edid_modes(connector, imxpd->edid);
}
- if (imxpd->mode_valid) {
- struct drm_display_mode *mode = drm_mode_create(connector->dev);
-
- if (!mode)
- return -EINVAL;
- drm_mode_copy(mode, &imxpd->mode);
- mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
- drm_mode_probed_add(connector, mode);
- num_modes++;
- }
-
if (np) {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
@@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode)
static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
{
struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
-
- imx_drm_set_bus_format(encoder, imxpd->bus_format);
+ imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
+ imxpd->connector.display_info.bus_flags);
}
static void imx_pd_encoder_commit(struct drm_encoder *encoder)
@@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = data;
struct device_node *np = dev->of_node;
- struct device_node *port;
+ struct device_node *ep;
const u8 *edidp;
struct imx_parallel_display *imxpd;
int ret;
@@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
}
/* port@1 is the output port */
- port = of_graph_get_port_by_id(np, 1);
- if (port) {
- struct device_node *endpoint, *remote;
-
- endpoint = of_get_child_by_name(port, "endpoint");
- if (endpoint) {
- remote = of_graph_get_remote_port_parent(endpoint);
- if (remote)
- imxpd->panel = of_drm_find_panel(remote);
- if (!imxpd->panel)
- return -EPROBE_DEFER;
+ ep = of_graph_get_endpoint_by_regs(np, 1, -1);
+ if (ep) {
+ struct device_node *remote;
+
+ remote = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (remote) {
+ imxpd->panel = of_drm_find_panel(remote);
+ of_node_put(remote);
}
+ if (!imxpd->panel)
+ return -EPROBE_DEFER;
}
imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
new file mode 100644
index 000000000..eeefc9718
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -0,0 +1,16 @@
+config DRM_MEDIATEK
+ tristate "DRM Support for Mediatek SoCs"
+ depends on DRM
+ depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ select IOMMU_DMA
+ select MEMORY
+ select MTK_SMI
+ help
+ Choose this option if you have a Mediatek SoCs.
+ The module will be called mediatek-drm
+ This driver provides kernel mode setting and
+ buffer management to userspace.
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
new file mode 100644
index 000000000..5fcf58e87
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -0,0 +1,14 @@
+mediatek-drm-y := mtk_disp_ovl.o \
+ mtk_disp_rdma.o \
+ mtk_drm_crtc.o \
+ mtk_drm_ddp.o \
+ mtk_drm_ddp_comp.o \
+ mtk_drm_drv.o \
+ mtk_drm_fb.o \
+ mtk_drm_gem.o \
+ mtk_drm_plane.o \
+ mtk_dsi.o \
+ mtk_mipi_tx.o \
+ mtk_dpi.o
+
+obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
new file mode 100644
index 000000000..8f62671fc
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_REG_OVL_INTEN 0x0004
+#define OVL_FME_CPL_INT BIT(1)
+#define DISP_REG_OVL_INTSTA 0x0008
+#define DISP_REG_OVL_EN 0x000c
+#define DISP_REG_OVL_RST 0x0014
+#define DISP_REG_OVL_ROI_SIZE 0x0020
+#define DISP_REG_OVL_ROI_BGCLR 0x0028
+#define DISP_REG_OVL_SRC_CON 0x002c
+#define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n))
+#define DISP_REG_OVL_SRC_SIZE(n) (0x0038 + 0x20 * (n))
+#define DISP_REG_OVL_OFFSET(n) (0x003c + 0x20 * (n))
+#define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
+#define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
+#define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
+#define DISP_REG_OVL_ADDR(n) (0x0f40 + 0x20 * (n))
+
+#define OVL_RDMA_MEM_GMC 0x40402020
+
+#define OVL_CON_BYTE_SWAP BIT(24)
+#define OVL_CON_CLRFMT_RGB565 (0 << 12)
+#define OVL_CON_CLRFMT_RGB888 (1 << 12)
+#define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
+#define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
+#define OVL_CON_AEN BIT(8)
+#define OVL_CON_ALPHA 0xff
+
+/**
+ * struct mtk_disp_ovl - DISP_OVL driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report vblank events to
+ */
+struct mtk_disp_ovl {
+ struct mtk_ddp_comp ddp_comp;
+ struct drm_crtc *crtc;
+};
+
+static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
+{
+ struct mtk_disp_ovl *priv = dev_id;
+ struct mtk_ddp_comp *ovl = &priv->ddp_comp;
+
+ /* Clear frame completion interrupt */
+ writel(0x0, ovl->regs + DISP_REG_OVL_INTSTA);
+
+ if (!priv->crtc)
+ return IRQ_NONE;
+
+ mtk_crtc_ddp_irq(priv->crtc, ovl);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
+ struct drm_crtc *crtc)
+{
+ struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
+ ddp_comp);
+
+ priv->crtc = crtc;
+ writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
+}
+
+static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
+{
+ struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
+ ddp_comp);
+
+ priv->crtc = NULL;
+ writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
+}
+
+static void mtk_ovl_start(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x1, comp->regs + DISP_REG_OVL_EN);
+}
+
+static void mtk_ovl_stop(struct mtk_ddp_comp *comp)
+{
+ writel_relaxed(0x0, comp->regs + DISP_REG_OVL_EN);
+}
+
+static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh)
+{
+ if (w != 0 && h != 0)
+ writel_relaxed(h << 16 | w, comp->regs + DISP_REG_OVL_ROI_SIZE);
+ writel_relaxed(0x0, comp->regs + DISP_REG_OVL_ROI_BGCLR);
+
+ writel(0x1, comp->regs + DISP_REG_OVL_RST);
+ writel(0x0, comp->regs + DISP_REG_OVL_RST);
+}
+
+static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
+{
+ unsigned int reg;
+
+ writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
+ writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx));
+
+ reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
+ reg = reg | BIT(idx);
+ writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+}
+
+static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
+{
+ unsigned int reg;
+
+ reg = readl(comp->regs + DISP_REG_OVL_SRC_CON);
+ reg = reg & ~BIT(idx);
+ writel(reg, comp->regs + DISP_REG_OVL_SRC_CON);
+
+ writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
+}
+
+static unsigned int ovl_fmt_convert(unsigned int fmt)
+{
+ switch (fmt) {
+ default:
+ case DRM_FORMAT_RGB565:
+ return OVL_CON_CLRFMT_RGB565;
+ case DRM_FORMAT_BGR565:
+ return OVL_CON_CLRFMT_RGB565 | OVL_CON_BYTE_SWAP;
+ case DRM_FORMAT_RGB888:
+ return OVL_CON_CLRFMT_RGB888;
+ case DRM_FORMAT_BGR888:
+ return OVL_CON_CLRFMT_RGB888 | OVL_CON_BYTE_SWAP;
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBA8888:
+ return OVL_CON_CLRFMT_ARGB8888;
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_BGRA8888:
+ return OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return OVL_CON_CLRFMT_RGBA8888;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP;
+ }
+}
+
+static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ struct mtk_plane_pending_state *pending = &state->pending;
+ unsigned int addr = pending->addr;
+ unsigned int pitch = pending->pitch & 0xffff;
+ unsigned int fmt = pending->format;
+ unsigned int offset = (pending->y << 16) | pending->x;
+ unsigned int src_size = (pending->height << 16) | pending->width;
+ unsigned int con;
+
+ if (!pending->enable)
+ mtk_ovl_layer_off(comp, idx);
+
+ con = ovl_fmt_convert(fmt);
+ if (idx != 0)
+ con |= OVL_CON_AEN | OVL_CON_ALPHA;
+
+ writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx));
+ writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
+ writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
+ writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
+ writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(idx));
+
+ if (pending->enable)
+ mtk_ovl_layer_on(comp, idx);
+}
+
+static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
+ .config = mtk_ovl_config,
+ .start = mtk_ovl_start,
+ .stop = mtk_ovl_stop,
+ .enable_vblank = mtk_ovl_enable_vblank,
+ .disable_vblank = mtk_ovl_disable_vblank,
+ .layer_on = mtk_ovl_layer_on,
+ .layer_off = mtk_ovl_layer_off,
+ .layer_config = mtk_ovl_layer_config,
+};
+
+static int mtk_disp_ovl_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register component %s: %d\n",
+ dev->of_node->full_name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mtk_disp_ovl_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_disp_ovl *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
+}
+
+static const struct component_ops mtk_disp_ovl_component_ops = {
+ .bind = mtk_disp_ovl_bind,
+ .unbind = mtk_disp_ovl_unbind,
+};
+
+static int mtk_disp_ovl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_ovl *priv;
+ int comp_id;
+ int irq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
+ IRQF_TRIGGER_NONE, dev_name(dev), priv);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+ return ret;
+ }
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
+ if (comp_id < 0) {
+ dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+ return comp_id;
+ }
+
+ ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
+ &mtk_disp_ovl_funcs);
+ if (ret) {
+ dev_err(dev, "Failed to initialize component: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_ovl_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_ovl_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_ovl_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8173-disp-ovl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
+
+struct platform_driver mtk_disp_ovl_driver = {
+ .probe = mtk_disp_ovl_probe,
+ .remove = mtk_disp_ovl_remove,
+ .driver = {
+ .name = "mediatek-disp-ovl",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_ovl_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
new file mode 100644
index 000000000..5fb80cbe4
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_REG_RDMA_INT_ENABLE 0x0000
+#define DISP_REG_RDMA_INT_STATUS 0x0004
+#define RDMA_TARGET_LINE_INT BIT(5)
+#define RDMA_FIFO_UNDERFLOW_INT BIT(4)
+#define RDMA_EOF_ABNORMAL_INT BIT(3)
+#define RDMA_FRAME_END_INT BIT(2)
+#define RDMA_FRAME_START_INT BIT(1)
+#define RDMA_REG_UPDATE_INT BIT(0)
+#define DISP_REG_RDMA_GLOBAL_CON 0x0010
+#define RDMA_ENGINE_EN BIT(0)
+#define DISP_REG_RDMA_SIZE_CON_0 0x0014
+#define DISP_REG_RDMA_SIZE_CON_1 0x0018
+#define DISP_REG_RDMA_TARGET_LINE 0x001c
+#define DISP_REG_RDMA_FIFO_CON 0x0040
+#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
+#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
+#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
+
+/**
+ * struct mtk_disp_rdma - DISP_RDMA driver structure
+ * @ddp_comp - structure containing type enum and hardware resources
+ * @crtc - associated crtc to report irq events to
+ */
+struct mtk_disp_rdma {
+ struct mtk_ddp_comp ddp_comp;
+ struct drm_crtc *crtc;
+};
+
+static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
+{
+ struct mtk_disp_rdma *priv = dev_id;
+ struct mtk_ddp_comp *rdma = &priv->ddp_comp;
+
+ /* Clear frame completion interrupt */
+ writel(0x0, rdma->regs + DISP_REG_RDMA_INT_STATUS);
+
+ if (!priv->crtc)
+ return IRQ_NONE;
+
+ mtk_crtc_ddp_irq(priv->crtc, rdma);
+
+ return IRQ_HANDLED;
+}
+
+static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ unsigned int tmp = readl(comp->regs + reg);
+
+ tmp = (tmp & ~mask) | (val & mask);
+ writel(tmp, comp->regs + reg);
+}
+
+static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
+ struct drm_crtc *crtc)
+{
+ struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
+ ddp_comp);
+
+ priv->crtc = crtc;
+ rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
+ RDMA_FRAME_END_INT);
+}
+
+static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
+{
+ struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
+ ddp_comp);
+
+ priv->crtc = NULL;
+ rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
+}
+
+static void mtk_rdma_start(struct mtk_ddp_comp *comp)
+{
+ rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN,
+ RDMA_ENGINE_EN);
+}
+
+static void mtk_rdma_stop(struct mtk_ddp_comp *comp)
+{
+ rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON, RDMA_ENGINE_EN, 0);
+}
+
+static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
+ unsigned int height, unsigned int vrefresh)
+{
+ unsigned int threshold;
+ unsigned int reg;
+
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
+
+ /*
+ * Enable FIFO underflow since DSI and DPI can't be blocked.
+ * Keep the FIFO pseudo size reset default of 8 KiB. Set the
+ * output threshold to 6 microseconds with 7/6 overhead to
+ * account for blanking, and with a pixel depth of 4 bytes:
+ */
+ threshold = width * height * vrefresh * 4 * 7 / 1000000;
+ reg = RDMA_FIFO_UNDERFLOW_EN |
+ RDMA_FIFO_PSEUDO_SIZE(SZ_8K) |
+ RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
+ writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
+}
+
+static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
+ .config = mtk_rdma_config,
+ .start = mtk_rdma_start,
+ .stop = mtk_rdma_stop,
+ .enable_vblank = mtk_rdma_enable_vblank,
+ .disable_vblank = mtk_rdma_disable_vblank,
+};
+
+static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register component %s: %d\n",
+ dev->of_node->full_name, ret);
+ return ret;
+ }
+
+ return 0;
+
+}
+
+static void mtk_disp_rdma_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
+}
+
+static const struct component_ops mtk_disp_rdma_component_ops = {
+ .bind = mtk_disp_rdma_bind,
+ .unbind = mtk_disp_rdma_unbind,
+};
+
+static int mtk_disp_rdma_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_disp_rdma *priv;
+ int comp_id;
+ int irq;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_RDMA);
+ if (comp_id < 0) {
+ dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+ return comp_id;
+ }
+
+ ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
+ &mtk_disp_rdma_funcs);
+ if (ret) {
+ dev_err(dev, "Failed to initialize component: %d\n", ret);
+ return ret;
+ }
+
+ /* Disable and clear pending interrupts */
+ writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_ENABLE);
+ writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_STATUS);
+
+ ret = devm_request_irq(dev, irq, mtk_disp_rdma_irq_handler,
+ IRQF_TRIGGER_NONE, dev_name(dev), priv);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = component_add(dev, &mtk_disp_rdma_component_ops);
+ if (ret)
+ dev_err(dev, "Failed to add component: %d\n", ret);
+
+ return ret;
+}
+
+static int mtk_disp_rdma_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_disp_rdma_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8173-disp-rdma", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
+
+struct platform_driver mtk_disp_rdma_driver = {
+ .probe = mtk_disp_rdma_probe,
+ .remove = mtk_disp_rdma_remove,
+ .driver = {
+ .name = "mediatek-disp-rdma",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_disp_rdma_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
new file mode 100644
index 000000000..0186e500d
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -0,0 +1,764 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <linux/kernel.h>
+#include <linux/component.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+
+#include "mtk_dpi_regs.h"
+#include "mtk_drm_ddp_comp.h"
+
+enum mtk_dpi_out_bit_num {
+ MTK_DPI_OUT_BIT_NUM_8BITS,
+ MTK_DPI_OUT_BIT_NUM_10BITS,
+ MTK_DPI_OUT_BIT_NUM_12BITS,
+ MTK_DPI_OUT_BIT_NUM_16BITS
+};
+
+enum mtk_dpi_out_yc_map {
+ MTK_DPI_OUT_YC_MAP_RGB,
+ MTK_DPI_OUT_YC_MAP_CYCY,
+ MTK_DPI_OUT_YC_MAP_YCYC,
+ MTK_DPI_OUT_YC_MAP_CY,
+ MTK_DPI_OUT_YC_MAP_YC
+};
+
+enum mtk_dpi_out_channel_swap {
+ MTK_DPI_OUT_CHANNEL_SWAP_RGB,
+ MTK_DPI_OUT_CHANNEL_SWAP_GBR,
+ MTK_DPI_OUT_CHANNEL_SWAP_BRG,
+ MTK_DPI_OUT_CHANNEL_SWAP_RBG,
+ MTK_DPI_OUT_CHANNEL_SWAP_GRB,
+ MTK_DPI_OUT_CHANNEL_SWAP_BGR
+};
+
+enum mtk_dpi_out_color_format {
+ MTK_DPI_COLOR_FORMAT_RGB,
+ MTK_DPI_COLOR_FORMAT_RGB_FULL,
+ MTK_DPI_COLOR_FORMAT_YCBCR_444,
+ MTK_DPI_COLOR_FORMAT_YCBCR_422,
+ MTK_DPI_COLOR_FORMAT_XV_YCC,
+ MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL,
+ MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL
+};
+
+struct mtk_dpi {
+ struct mtk_ddp_comp ddp_comp;
+ struct drm_encoder encoder;
+ void __iomem *regs;
+ struct device *dev;
+ struct clk *engine_clk;
+ struct clk *pixel_clk;
+ struct clk *tvd_clk;
+ int irq;
+ struct drm_display_mode mode;
+ enum mtk_dpi_out_color_format color_format;
+ enum mtk_dpi_out_yc_map yc_map;
+ enum mtk_dpi_out_bit_num bit_num;
+ enum mtk_dpi_out_channel_swap channel_swap;
+ bool power_sta;
+ u8 power_ctl;
+};
+
+static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e)
+{
+ return container_of(e, struct mtk_dpi, encoder);
+}
+
+enum mtk_dpi_polarity {
+ MTK_DPI_POLARITY_RISING,
+ MTK_DPI_POLARITY_FALLING,
+};
+
+enum mtk_dpi_power_ctl {
+ DPI_POWER_START = BIT(0),
+ DPI_POWER_ENABLE = BIT(1),
+};
+
+struct mtk_dpi_polarities {
+ enum mtk_dpi_polarity de_pol;
+ enum mtk_dpi_polarity ck_pol;
+ enum mtk_dpi_polarity hsync_pol;
+ enum mtk_dpi_polarity vsync_pol;
+};
+
+struct mtk_dpi_sync_param {
+ u32 sync_width;
+ u32 front_porch;
+ u32 back_porch;
+ bool shift_half_line;
+};
+
+struct mtk_dpi_yc_limit {
+ u16 y_top;
+ u16 y_bottom;
+ u16 c_top;
+ u16 c_bottom;
+};
+
+static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
+{
+ u32 tmp = readl(dpi->regs + offset) & ~mask;
+
+ tmp |= (val & mask);
+ writel(tmp, dpi->regs + offset);
+}
+
+static void mtk_dpi_sw_reset(struct mtk_dpi *dpi, bool reset)
+{
+ mtk_dpi_mask(dpi, DPI_RET, reset ? RST : 0, RST);
+}
+
+static void mtk_dpi_enable(struct mtk_dpi *dpi)
+{
+ mtk_dpi_mask(dpi, DPI_EN, EN, EN);
+}
+
+static void mtk_dpi_disable(struct mtk_dpi *dpi)
+{
+ mtk_dpi_mask(dpi, DPI_EN, 0, EN);
+}
+
+static void mtk_dpi_config_hsync(struct mtk_dpi *dpi,
+ struct mtk_dpi_sync_param *sync)
+{
+ mtk_dpi_mask(dpi, DPI_TGEN_HWIDTH,
+ sync->sync_width << HPW, HPW_MASK);
+ mtk_dpi_mask(dpi, DPI_TGEN_HPORCH,
+ sync->back_porch << HBP, HBP_MASK);
+ mtk_dpi_mask(dpi, DPI_TGEN_HPORCH, sync->front_porch << HFP,
+ HFP_MASK);
+}
+
+static void mtk_dpi_config_vsync(struct mtk_dpi *dpi,
+ struct mtk_dpi_sync_param *sync,
+ u32 width_addr, u32 porch_addr)
+{
+ mtk_dpi_mask(dpi, width_addr,
+ sync->sync_width << VSYNC_WIDTH_SHIFT,
+ VSYNC_WIDTH_MASK);
+ mtk_dpi_mask(dpi, width_addr,
+ sync->shift_half_line << VSYNC_HALF_LINE_SHIFT,
+ VSYNC_HALF_LINE_MASK);
+ mtk_dpi_mask(dpi, porch_addr,
+ sync->back_porch << VSYNC_BACK_PORCH_SHIFT,
+ VSYNC_BACK_PORCH_MASK);
+ mtk_dpi_mask(dpi, porch_addr,
+ sync->front_porch << VSYNC_FRONT_PORCH_SHIFT,
+ VSYNC_FRONT_PORCH_MASK);
+}
+
+static void mtk_dpi_config_vsync_lodd(struct mtk_dpi *dpi,
+ struct mtk_dpi_sync_param *sync)
+{
+ mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH, DPI_TGEN_VPORCH);
+}
+
+static void mtk_dpi_config_vsync_leven(struct mtk_dpi *dpi,
+ struct mtk_dpi_sync_param *sync)
+{
+ mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_LEVEN,
+ DPI_TGEN_VPORCH_LEVEN);
+}
+
+static void mtk_dpi_config_vsync_rodd(struct mtk_dpi *dpi,
+ struct mtk_dpi_sync_param *sync)
+{
+ mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_RODD,
+ DPI_TGEN_VPORCH_RODD);
+}
+
+static void mtk_dpi_config_vsync_reven(struct mtk_dpi *dpi,
+ struct mtk_dpi_sync_param *sync)
+{
+ mtk_dpi_config_vsync(dpi, sync, DPI_TGEN_VWIDTH_REVEN,
+ DPI_TGEN_VPORCH_REVEN);
+}
+
+static void mtk_dpi_config_pol(struct mtk_dpi *dpi,
+ struct mtk_dpi_polarities *dpi_pol)
+{
+ unsigned int pol;
+
+ pol = (dpi_pol->ck_pol == MTK_DPI_POLARITY_RISING ? 0 : CK_POL) |
+ (dpi_pol->de_pol == MTK_DPI_POLARITY_RISING ? 0 : DE_POL) |
+ (dpi_pol->hsync_pol == MTK_DPI_POLARITY_RISING ? 0 : HSYNC_POL) |
+ (dpi_pol->vsync_pol == MTK_DPI_POLARITY_RISING ? 0 : VSYNC_POL);
+ mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, pol,
+ CK_POL | DE_POL | HSYNC_POL | VSYNC_POL);
+}
+
+static void mtk_dpi_config_3d(struct mtk_dpi *dpi, bool en_3d)
+{
+ mtk_dpi_mask(dpi, DPI_CON, en_3d ? TDFP_EN : 0, TDFP_EN);
+}
+
+static void mtk_dpi_config_interface(struct mtk_dpi *dpi, bool inter)
+{
+ mtk_dpi_mask(dpi, DPI_CON, inter ? INTL_EN : 0, INTL_EN);
+}
+
+static void mtk_dpi_config_fb_size(struct mtk_dpi *dpi, u32 width, u32 height)
+{
+ mtk_dpi_mask(dpi, DPI_SIZE, width << HSIZE, HSIZE_MASK);
+ mtk_dpi_mask(dpi, DPI_SIZE, height << VSIZE, VSIZE_MASK);
+}
+
+static void mtk_dpi_config_channel_limit(struct mtk_dpi *dpi,
+ struct mtk_dpi_yc_limit *limit)
+{
+ mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit->y_bottom << Y_LIMINT_BOT,
+ Y_LIMINT_BOT_MASK);
+ mtk_dpi_mask(dpi, DPI_Y_LIMIT, limit->y_top << Y_LIMINT_TOP,
+ Y_LIMINT_TOP_MASK);
+ mtk_dpi_mask(dpi, DPI_C_LIMIT, limit->c_bottom << C_LIMIT_BOT,
+ C_LIMIT_BOT_MASK);
+ mtk_dpi_mask(dpi, DPI_C_LIMIT, limit->c_top << C_LIMIT_TOP,
+ C_LIMIT_TOP_MASK);
+}
+
+static void mtk_dpi_config_bit_num(struct mtk_dpi *dpi,
+ enum mtk_dpi_out_bit_num num)
+{
+ u32 val;
+
+ switch (num) {
+ case MTK_DPI_OUT_BIT_NUM_8BITS:
+ val = OUT_BIT_8;
+ break;
+ case MTK_DPI_OUT_BIT_NUM_10BITS:
+ val = OUT_BIT_10;
+ break;
+ case MTK_DPI_OUT_BIT_NUM_12BITS:
+ val = OUT_BIT_12;
+ break;
+ case MTK_DPI_OUT_BIT_NUM_16BITS:
+ val = OUT_BIT_16;
+ break;
+ default:
+ val = OUT_BIT_8;
+ break;
+ }
+ mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << OUT_BIT,
+ OUT_BIT_MASK);
+}
+
+static void mtk_dpi_config_yc_map(struct mtk_dpi *dpi,
+ enum mtk_dpi_out_yc_map map)
+{
+ u32 val;
+
+ switch (map) {
+ case MTK_DPI_OUT_YC_MAP_RGB:
+ val = YC_MAP_RGB;
+ break;
+ case MTK_DPI_OUT_YC_MAP_CYCY:
+ val = YC_MAP_CYCY;
+ break;
+ case MTK_DPI_OUT_YC_MAP_YCYC:
+ val = YC_MAP_YCYC;
+ break;
+ case MTK_DPI_OUT_YC_MAP_CY:
+ val = YC_MAP_CY;
+ break;
+ case MTK_DPI_OUT_YC_MAP_YC:
+ val = YC_MAP_YC;
+ break;
+ default:
+ val = YC_MAP_RGB;
+ break;
+ }
+
+ mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << YC_MAP, YC_MAP_MASK);
+}
+
+static void mtk_dpi_config_channel_swap(struct mtk_dpi *dpi,
+ enum mtk_dpi_out_channel_swap swap)
+{
+ u32 val;
+
+ switch (swap) {
+ case MTK_DPI_OUT_CHANNEL_SWAP_RGB:
+ val = SWAP_RGB;
+ break;
+ case MTK_DPI_OUT_CHANNEL_SWAP_GBR:
+ val = SWAP_GBR;
+ break;
+ case MTK_DPI_OUT_CHANNEL_SWAP_BRG:
+ val = SWAP_BRG;
+ break;
+ case MTK_DPI_OUT_CHANNEL_SWAP_RBG:
+ val = SWAP_RBG;
+ break;
+ case MTK_DPI_OUT_CHANNEL_SWAP_GRB:
+ val = SWAP_GRB;
+ break;
+ case MTK_DPI_OUT_CHANNEL_SWAP_BGR:
+ val = SWAP_BGR;
+ break;
+ default:
+ val = SWAP_RGB;
+ break;
+ }
+
+ mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING, val << CH_SWAP, CH_SWAP_MASK);
+}
+
+static void mtk_dpi_config_yuv422_enable(struct mtk_dpi *dpi, bool enable)
+{
+ mtk_dpi_mask(dpi, DPI_CON, enable ? YUV422_EN : 0, YUV422_EN);
+}
+
+static void mtk_dpi_config_csc_enable(struct mtk_dpi *dpi, bool enable)
+{
+ mtk_dpi_mask(dpi, DPI_CON, enable ? CSC_ENABLE : 0, CSC_ENABLE);
+}
+
+static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable)
+{
+ mtk_dpi_mask(dpi, DPI_CON, enable ? IN_RB_SWAP : 0, IN_RB_SWAP);
+}
+
+static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi)
+{
+ mtk_dpi_mask(dpi, DPI_H_FRE_CON, H_FRE_2N, H_FRE_2N);
+}
+
+static void mtk_dpi_config_color_format(struct mtk_dpi *dpi,
+ enum mtk_dpi_out_color_format format)
+{
+ if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_444) ||
+ (format == MTK_DPI_COLOR_FORMAT_YCBCR_444_FULL)) {
+ mtk_dpi_config_yuv422_enable(dpi, false);
+ mtk_dpi_config_csc_enable(dpi, true);
+ mtk_dpi_config_swap_input(dpi, false);
+ mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_BGR);
+ } else if ((format == MTK_DPI_COLOR_FORMAT_YCBCR_422) ||
+ (format == MTK_DPI_COLOR_FORMAT_YCBCR_422_FULL)) {
+ mtk_dpi_config_yuv422_enable(dpi, true);
+ mtk_dpi_config_csc_enable(dpi, true);
+ mtk_dpi_config_swap_input(dpi, true);
+ mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+ } else {
+ mtk_dpi_config_yuv422_enable(dpi, false);
+ mtk_dpi_config_csc_enable(dpi, false);
+ mtk_dpi_config_swap_input(dpi, false);
+ mtk_dpi_config_channel_swap(dpi, MTK_DPI_OUT_CHANNEL_SWAP_RGB);
+ }
+}
+
+static void mtk_dpi_power_off(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
+{
+ dpi->power_ctl &= ~pctl;
+
+ if ((dpi->power_ctl & DPI_POWER_START) ||
+ (dpi->power_ctl & DPI_POWER_ENABLE))
+ return;
+
+ if (!dpi->power_sta)
+ return;
+
+ mtk_dpi_disable(dpi);
+ clk_disable_unprepare(dpi->pixel_clk);
+ clk_disable_unprepare(dpi->engine_clk);
+ dpi->power_sta = false;
+}
+
+static int mtk_dpi_power_on(struct mtk_dpi *dpi, enum mtk_dpi_power_ctl pctl)
+{
+ int ret;
+
+ dpi->power_ctl |= pctl;
+
+ if (!(dpi->power_ctl & DPI_POWER_START) &&
+ !(dpi->power_ctl & DPI_POWER_ENABLE))
+ return 0;
+
+ if (dpi->power_sta)
+ return 0;
+
+ ret = clk_prepare_enable(dpi->engine_clk);
+ if (ret) {
+ dev_err(dpi->dev, "Failed to enable engine clock: %d\n", ret);
+ goto err_eng;
+ }
+
+ ret = clk_prepare_enable(dpi->pixel_clk);
+ if (ret) {
+ dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret);
+ goto err_pixel;
+ }
+
+ mtk_dpi_enable(dpi);
+ dpi->power_sta = true;
+ return 0;
+
+err_pixel:
+ clk_disable_unprepare(dpi->engine_clk);
+err_eng:
+ dpi->power_ctl &= ~pctl;
+ return ret;
+}
+
+static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
+ struct drm_display_mode *mode)
+{
+ struct mtk_dpi_yc_limit limit;
+ struct mtk_dpi_polarities dpi_pol;
+ struct mtk_dpi_sync_param hsync;
+ struct mtk_dpi_sync_param vsync_lodd = { 0 };
+ struct mtk_dpi_sync_param vsync_leven = { 0 };
+ struct mtk_dpi_sync_param vsync_rodd = { 0 };
+ struct mtk_dpi_sync_param vsync_reven = { 0 };
+ unsigned long pix_rate;
+ unsigned long pll_rate;
+ unsigned int factor;
+
+ pix_rate = 1000UL * mode->clock;
+ if (mode->clock <= 74000)
+ factor = 8 * 3;
+ else
+ factor = 4 * 3;
+ pll_rate = pix_rate * factor;
+
+ dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n",
+ pll_rate, pix_rate);
+
+ clk_set_rate(dpi->tvd_clk, pll_rate);
+ pll_rate = clk_get_rate(dpi->tvd_clk);
+
+ pix_rate = pll_rate / factor;
+ clk_set_rate(dpi->pixel_clk, pix_rate);
+ pix_rate = clk_get_rate(dpi->pixel_clk);
+
+ dev_dbg(dpi->dev, "Got PLL %lu Hz, pixel clock %lu Hz\n",
+ pll_rate, pix_rate);
+
+ limit.c_bottom = 0x0010;
+ limit.c_top = 0x0FE0;
+ limit.y_bottom = 0x0010;
+ limit.y_top = 0x0FE0;
+
+ dpi_pol.ck_pol = MTK_DPI_POLARITY_FALLING;
+ dpi_pol.de_pol = MTK_DPI_POLARITY_RISING;
+ dpi_pol.hsync_pol = mode->flags & DRM_MODE_FLAG_PHSYNC ?
+ MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
+ dpi_pol.vsync_pol = mode->flags & DRM_MODE_FLAG_PVSYNC ?
+ MTK_DPI_POLARITY_FALLING : MTK_DPI_POLARITY_RISING;
+
+ hsync.sync_width = mode->hsync_end - mode->hsync_start;
+ hsync.back_porch = mode->htotal - mode->hsync_end;
+ hsync.front_porch = mode->hsync_start - mode->hdisplay;
+ hsync.shift_half_line = false;
+
+ vsync_lodd.sync_width = mode->vsync_end - mode->vsync_start;
+ vsync_lodd.back_porch = mode->vtotal - mode->vsync_end;
+ vsync_lodd.front_porch = mode->vsync_start - mode->vdisplay;
+ vsync_lodd.shift_half_line = false;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+ mode->flags & DRM_MODE_FLAG_3D_MASK) {
+ vsync_leven = vsync_lodd;
+ vsync_rodd = vsync_lodd;
+ vsync_reven = vsync_lodd;
+ vsync_leven.shift_half_line = true;
+ vsync_reven.shift_half_line = true;
+ } else if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
+ !(mode->flags & DRM_MODE_FLAG_3D_MASK)) {
+ vsync_leven = vsync_lodd;
+ vsync_leven.shift_half_line = true;
+ } else if (!(mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+ mode->flags & DRM_MODE_FLAG_3D_MASK) {
+ vsync_rodd = vsync_lodd;
+ }
+ mtk_dpi_sw_reset(dpi, true);
+ mtk_dpi_config_pol(dpi, &dpi_pol);
+
+ mtk_dpi_config_hsync(dpi, &hsync);
+ mtk_dpi_config_vsync_lodd(dpi, &vsync_lodd);
+ mtk_dpi_config_vsync_rodd(dpi, &vsync_rodd);
+ mtk_dpi_config_vsync_leven(dpi, &vsync_leven);
+ mtk_dpi_config_vsync_reven(dpi, &vsync_reven);
+
+ mtk_dpi_config_3d(dpi, !!(mode->flags & DRM_MODE_FLAG_3D_MASK));
+ mtk_dpi_config_interface(dpi, !!(mode->flags &
+ DRM_MODE_FLAG_INTERLACE));
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ mtk_dpi_config_fb_size(dpi, mode->hdisplay, mode->vdisplay / 2);
+ else
+ mtk_dpi_config_fb_size(dpi, mode->hdisplay, mode->vdisplay);
+
+ mtk_dpi_config_channel_limit(dpi, &limit);
+ mtk_dpi_config_bit_num(dpi, dpi->bit_num);
+ mtk_dpi_config_channel_swap(dpi, dpi->channel_swap);
+ mtk_dpi_config_yc_map(dpi, dpi->yc_map);
+ mtk_dpi_config_color_format(dpi, dpi->color_format);
+ mtk_dpi_config_2n_h_fre(dpi);
+ mtk_dpi_sw_reset(dpi, false);
+
+ return 0;
+}
+
+static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
+ .destroy = mtk_dpi_encoder_destroy,
+};
+
+static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mtk_dpi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+
+ drm_mode_copy(&dpi->mode, adjusted_mode);
+}
+
+static void mtk_dpi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+
+ mtk_dpi_power_off(dpi, DPI_POWER_ENABLE);
+}
+
+static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+
+ mtk_dpi_power_on(dpi, DPI_POWER_ENABLE);
+ mtk_dpi_set_display_mode(dpi, &dpi->mode);
+}
+
+static int mtk_dpi_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs mtk_dpi_encoder_helper_funcs = {
+ .mode_fixup = mtk_dpi_encoder_mode_fixup,
+ .mode_set = mtk_dpi_encoder_mode_set,
+ .disable = mtk_dpi_encoder_disable,
+ .enable = mtk_dpi_encoder_enable,
+ .atomic_check = mtk_dpi_atomic_check,
+};
+
+static void mtk_dpi_start(struct mtk_ddp_comp *comp)
+{
+ struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+
+ mtk_dpi_power_on(dpi, DPI_POWER_START);
+}
+
+static void mtk_dpi_stop(struct mtk_ddp_comp *comp)
+{
+ struct mtk_dpi *dpi = container_of(comp, struct mtk_dpi, ddp_comp);
+
+ mtk_dpi_power_off(dpi, DPI_POWER_START);
+}
+
+static const struct mtk_ddp_comp_funcs mtk_dpi_funcs = {
+ .start = mtk_dpi_start,
+ .stop = mtk_dpi_stop,
+};
+
+static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct mtk_dpi *dpi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ ret = mtk_ddp_comp_register(drm_dev, &dpi->ddp_comp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register component %s: %d\n",
+ dev->of_node->full_name, ret);
+ return ret;
+ }
+
+ ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to initialize decoder: %d\n", ret);
+ goto err_unregister;
+ }
+ drm_encoder_helper_add(&dpi->encoder, &mtk_dpi_encoder_helper_funcs);
+
+ /* Currently DPI0 is fixed to be driven by OVL1 */
+ dpi->encoder.possible_crtcs = BIT(1);
+
+ dpi->encoder.bridge->encoder = &dpi->encoder;
+ ret = drm_bridge_attach(dpi->encoder.dev, dpi->encoder.bridge);
+ if (ret) {
+ dev_err(dev, "Failed to attach bridge: %d\n", ret);
+ goto err_cleanup;
+ }
+
+ dpi->bit_num = MTK_DPI_OUT_BIT_NUM_8BITS;
+ dpi->channel_swap = MTK_DPI_OUT_CHANNEL_SWAP_RGB;
+ dpi->yc_map = MTK_DPI_OUT_YC_MAP_RGB;
+ dpi->color_format = MTK_DPI_COLOR_FORMAT_RGB;
+
+ return 0;
+
+err_cleanup:
+ drm_encoder_cleanup(&dpi->encoder);
+err_unregister:
+ mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
+ return ret;
+}
+
+static void mtk_dpi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_dpi *dpi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+
+ drm_encoder_cleanup(&dpi->encoder);
+ mtk_ddp_comp_unregister(drm_dev, &dpi->ddp_comp);
+}
+
+static const struct component_ops mtk_dpi_component_ops = {
+ .bind = mtk_dpi_bind,
+ .unbind = mtk_dpi_unbind,
+};
+
+static int mtk_dpi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_dpi *dpi;
+ struct resource *mem;
+ struct device_node *ep, *bridge_node = NULL;
+ int comp_id;
+ int ret;
+
+ dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+ if (!dpi)
+ return -ENOMEM;
+
+ dpi->dev = dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dpi->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(dpi->regs)) {
+ ret = PTR_ERR(dpi->regs);
+ dev_err(dev, "Failed to ioremap mem resource: %d\n", ret);
+ return ret;
+ }
+
+ dpi->engine_clk = devm_clk_get(dev, "engine");
+ if (IS_ERR(dpi->engine_clk)) {
+ ret = PTR_ERR(dpi->engine_clk);
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
+ return ret;
+ }
+
+ dpi->pixel_clk = devm_clk_get(dev, "pixel");
+ if (IS_ERR(dpi->pixel_clk)) {
+ ret = PTR_ERR(dpi->pixel_clk);
+ dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+ return ret;
+ }
+
+ dpi->tvd_clk = devm_clk_get(dev, "pll");
+ if (IS_ERR(dpi->tvd_clk)) {
+ ret = PTR_ERR(dpi->tvd_clk);
+ dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+ return ret;
+ }
+
+ dpi->irq = platform_get_irq(pdev, 0);
+ if (dpi->irq <= 0) {
+ dev_err(dev, "Failed to get irq: %d\n", dpi->irq);
+ return -EINVAL;
+ }
+
+ ep = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (ep) {
+ bridge_node = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ }
+ if (!bridge_node) {
+ dev_err(dev, "Failed to find bridge node\n");
+ return -ENODEV;
+ }
+
+ dev_info(dev, "Found bridge node: %s\n", bridge_node->full_name);
+
+ dpi->encoder.bridge = of_drm_find_bridge(bridge_node);
+ of_node_put(bridge_node);
+ if (!dpi->encoder.bridge)
+ return -EPROBE_DEFER;
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
+ if (comp_id < 0) {
+ dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+ return comp_id;
+ }
+
+ ret = mtk_ddp_comp_init(dev, dev->of_node, &dpi->ddp_comp, comp_id,
+ &mtk_dpi_funcs);
+ if (ret) {
+ dev_err(dev, "Failed to initialize component: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dpi);
+
+ ret = component_add(dev, &mtk_dpi_component_ops);
+ if (ret) {
+ dev_err(dev, "Failed to add component: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_dpi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &mtk_dpi_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_dpi_of_ids[] = {
+ { .compatible = "mediatek,mt8173-dpi", },
+ {}
+};
+
+struct platform_driver mtk_dpi_driver = {
+ .probe = mtk_dpi_probe,
+ .remove = mtk_dpi_remove,
+ .driver = {
+ .name = "mediatek-dpi",
+ .of_match_table = mtk_dpi_of_ids,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi_regs.h b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
new file mode 100644
index 000000000..4b6ad4751
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dpi_regs.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MTK_DPI_REGS_H
+#define __MTK_DPI_REGS_H
+
+#define DPI_EN 0x00
+#define EN BIT(0)
+
+#define DPI_RET 0x04
+#define RST BIT(0)
+
+#define DPI_INTEN 0x08
+#define INT_VSYNC_EN BIT(0)
+#define INT_VDE_EN BIT(1)
+#define INT_UNDERFLOW_EN BIT(2)
+
+#define DPI_INTSTA 0x0C
+#define INT_VSYNC_STA BIT(0)
+#define INT_VDE_STA BIT(1)
+#define INT_UNDERFLOW_STA BIT(2)
+
+#define DPI_CON 0x10
+#define BG_ENABLE BIT(0)
+#define IN_RB_SWAP BIT(1)
+#define INTL_EN BIT(2)
+#define TDFP_EN BIT(3)
+#define CLPF_EN BIT(4)
+#define YUV422_EN BIT(5)
+#define CSC_ENABLE BIT(6)
+#define R601_SEL BIT(7)
+#define EMBSYNC_EN BIT(8)
+#define VS_LODD_EN BIT(16)
+#define VS_LEVEN_EN BIT(17)
+#define VS_RODD_EN BIT(18)
+#define VS_REVEN BIT(19)
+#define FAKE_DE_LODD BIT(20)
+#define FAKE_DE_LEVEN BIT(21)
+#define FAKE_DE_RODD BIT(22)
+#define FAKE_DE_REVEN BIT(23)
+
+#define DPI_OUTPUT_SETTING 0x14
+#define CH_SWAP 0
+#define CH_SWAP_MASK (0x7 << 0)
+#define SWAP_RGB 0x00
+#define SWAP_GBR 0x01
+#define SWAP_BRG 0x02
+#define SWAP_RBG 0x03
+#define SWAP_GRB 0x04
+#define SWAP_BGR 0x05
+#define BIT_SWAP BIT(3)
+#define B_MASK BIT(4)
+#define G_MASK BIT(5)
+#define R_MASK BIT(6)
+#define DE_MASK BIT(8)
+#define HS_MASK BIT(9)
+#define VS_MASK BIT(10)
+#define DE_POL BIT(12)
+#define HSYNC_POL BIT(13)
+#define VSYNC_POL BIT(14)
+#define CK_POL BIT(15)
+#define OEN_OFF BIT(16)
+#define EDGE_SEL BIT(17)
+#define OUT_BIT 18
+#define OUT_BIT_MASK (0x3 << 18)
+#define OUT_BIT_8 0x00
+#define OUT_BIT_10 0x01
+#define OUT_BIT_12 0x02
+#define OUT_BIT_16 0x03
+#define YC_MAP 20
+#define YC_MAP_MASK (0x7 << 20)
+#define YC_MAP_RGB 0x00
+#define YC_MAP_CYCY 0x04
+#define YC_MAP_YCYC 0x05
+#define YC_MAP_CY 0x06
+#define YC_MAP_YC 0x07
+
+#define DPI_SIZE 0x18
+#define HSIZE 0
+#define HSIZE_MASK (0x1FFF << 0)
+#define VSIZE 16
+#define VSIZE_MASK (0x1FFF << 16)
+
+#define DPI_DDR_SETTING 0x1C
+#define DDR_EN BIT(0)
+#define DDDR_SEL BIT(1)
+#define DDR_4PHASE BIT(2)
+#define DDR_WIDTH (0x3 << 4)
+#define DDR_PAD_MODE (0x1 << 8)
+
+#define DPI_TGEN_HWIDTH 0x20
+#define HPW 0
+#define HPW_MASK (0xFFF << 0)
+
+#define DPI_TGEN_HPORCH 0x24
+#define HBP 0
+#define HBP_MASK (0xFFF << 0)
+#define HFP 16
+#define HFP_MASK (0xFFF << 16)
+
+#define DPI_TGEN_VWIDTH 0x28
+#define DPI_TGEN_VPORCH 0x2C
+
+#define VSYNC_WIDTH_SHIFT 0
+#define VSYNC_WIDTH_MASK (0xFFF << 0)
+#define VSYNC_HALF_LINE_SHIFT 16
+#define VSYNC_HALF_LINE_MASK BIT(16)
+#define VSYNC_BACK_PORCH_SHIFT 0
+#define VSYNC_BACK_PORCH_MASK (0xFFF << 0)
+#define VSYNC_FRONT_PORCH_SHIFT 16
+#define VSYNC_FRONT_PORCH_MASK (0xFFF << 16)
+
+#define DPI_BG_HCNTL 0x30
+#define BG_RIGHT (0x1FFF << 0)
+#define BG_LEFT (0x1FFF << 16)
+
+#define DPI_BG_VCNTL 0x34
+#define BG_BOT (0x1FFF << 0)
+#define BG_TOP (0x1FFF << 16)
+
+#define DPI_BG_COLOR 0x38
+#define BG_B (0xF << 0)
+#define BG_G (0xF << 8)
+#define BG_R (0xF << 16)
+
+#define DPI_FIFO_CTL 0x3C
+#define FIFO_VALID_SET (0x1F << 0)
+#define FIFO_RST_SEL (0x1 << 8)
+
+#define DPI_STATUS 0x40
+#define VCOUNTER (0x1FFF << 0)
+#define DPI_BUSY BIT(16)
+#define OUTEN BIT(17)
+#define FIELD BIT(20)
+#define TDLR BIT(21)
+
+#define DPI_TMODE 0x44
+#define DPI_OEN_ON BIT(0)
+
+#define DPI_CHECKSUM 0x48
+#define DPI_CHECKSUM_MASK (0xFFFFFF << 0)
+#define DPI_CHECKSUM_READY BIT(30)
+#define DPI_CHECKSUM_EN BIT(31)
+
+#define DPI_DUMMY 0x50
+#define DPI_DUMMY_MASK (0xFFFFFFFF << 0)
+
+#define DPI_TGEN_VWIDTH_LEVEN 0x68
+#define DPI_TGEN_VPORCH_LEVEN 0x6C
+#define DPI_TGEN_VWIDTH_RODD 0x70
+#define DPI_TGEN_VPORCH_RODD 0x74
+#define DPI_TGEN_VWIDTH_REVEN 0x78
+#define DPI_TGEN_VPORCH_REVEN 0x7C
+
+#define DPI_ESAV_VTIMING_LODD 0x80
+#define ESAV_VOFST_LODD (0xFFF << 0)
+#define ESAV_VWID_LODD (0xFFF << 16)
+
+#define DPI_ESAV_VTIMING_LEVEN 0x84
+#define ESAV_VOFST_LEVEN (0xFFF << 0)
+#define ESAV_VWID_LEVEN (0xFFF << 16)
+
+#define DPI_ESAV_VTIMING_RODD 0x88
+#define ESAV_VOFST_RODD (0xFFF << 0)
+#define ESAV_VWID_RODD (0xFFF << 16)
+
+#define DPI_ESAV_VTIMING_REVEN 0x8C
+#define ESAV_VOFST_REVEN (0xFFF << 0)
+#define ESAV_VWID_REVEN (0xFFF << 16)
+
+#define DPI_ESAV_FTIMING 0x90
+#define ESAV_FOFST_ODD (0xFFF << 0)
+#define ESAV_FOFST_EVEN (0xFFF << 16)
+
+#define DPI_CLPF_SETTING 0x94
+#define CLPF_TYPE (0x3 << 0)
+#define ROUND_EN BIT(4)
+
+#define DPI_Y_LIMIT 0x98
+#define Y_LIMINT_BOT 0
+#define Y_LIMINT_BOT_MASK (0xFFF << 0)
+#define Y_LIMINT_TOP 16
+#define Y_LIMINT_TOP_MASK (0xFFF << 16)
+
+#define DPI_C_LIMIT 0x9C
+#define C_LIMIT_BOT 0
+#define C_LIMIT_BOT_MASK (0xFFF << 0)
+#define C_LIMIT_TOP 16
+#define C_LIMIT_TOP_MASK (0xFFF << 16)
+
+#define DPI_YUV422_SETTING 0xA0
+#define UV_SWAP BIT(0)
+#define CR_DELSEL BIT(4)
+#define CB_DELSEL BIT(5)
+#define Y_DELSEL BIT(6)
+#define DE_DELSEL BIT(7)
+
+#define DPI_EMBSYNC_SETTING 0xA4
+#define EMBSYNC_R_CR_EN BIT(0)
+#define EMPSYNC_G_Y_EN BIT(1)
+#define EMPSYNC_B_CB_EN BIT(2)
+#define ESAV_F_INV BIT(4)
+#define ESAV_V_INV BIT(5)
+#define ESAV_H_INV BIT(6)
+#define ESAV_CODE_MAN BIT(8)
+#define VS_OUT_SEL (0x7 << 12)
+
+#define DPI_ESAV_CODE_SET0 0xA8
+#define ESAV_CODE0 (0xFFF << 0)
+#define ESAV_CODE1 (0xFFF << 16)
+
+#define DPI_ESAV_CODE_SET1 0xAC
+#define ESAV_CODE2 (0xFFF << 0)
+#define ESAV_CODE3_MSB BIT(16)
+
+#define DPI_H_FRE_CON 0xE0
+#define H_FRE_2N BIT(25)
+#endif /* __MTK_DPI_REGS_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
new file mode 100644
index 000000000..24aa3bad1
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -0,0 +1,582 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/barrier.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <soc/mediatek/smi.h>
+
+#include "mtk_drm_drv.h"
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp.h"
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_gem.h"
+#include "mtk_drm_plane.h"
+
+/**
+ * struct mtk_drm_crtc - MediaTek specific crtc structure.
+ * @base: crtc object.
+ * @enabled: records whether crtc_enable succeeded
+ * @planes: array of 4 mtk_drm_plane structures, one for each overlay plane
+ * @pending_planes: whether any plane has pending changes to be applied
+ * @config_regs: memory mapped mmsys configuration register space
+ * @mutex: handle to one of the ten disp_mutex streams
+ * @ddp_comp_nr: number of components in ddp_comp
+ * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
+ */
+struct mtk_drm_crtc {
+ struct drm_crtc base;
+ bool enabled;
+
+ bool pending_needs_vblank;
+ struct drm_pending_vblank_event *event;
+
+ struct mtk_drm_plane planes[OVL_LAYER_NR];
+ bool pending_planes;
+
+ void __iomem *config_regs;
+ struct mtk_disp_mutex *mutex;
+ unsigned int ddp_comp_nr;
+ struct mtk_ddp_comp **ddp_comp;
+};
+
+struct mtk_crtc_state {
+ struct drm_crtc_state base;
+
+ bool pending_config;
+ unsigned int pending_width;
+ unsigned int pending_height;
+ unsigned int pending_vrefresh;
+};
+
+static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
+{
+ return container_of(c, struct mtk_drm_crtc, base);
+}
+
+static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
+{
+ return container_of(s, struct mtk_crtc_state, base);
+}
+
+static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+{
+ struct drm_crtc *crtc = &mtk_crtc->base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
+ drm_crtc_vblank_put(crtc);
+ mtk_crtc->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
+{
+ drm_crtc_handle_vblank(&mtk_crtc->base);
+ if (mtk_crtc->pending_needs_vblank) {
+ mtk_drm_crtc_finish_page_flip(mtk_crtc);
+ mtk_crtc->pending_needs_vblank = false;
+ }
+}
+
+static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ int i;
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
+
+ mtk_disp_mutex_put(mtk_crtc->mutex);
+
+ drm_crtc_cleanup(crtc);
+}
+
+static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mtk_crtc_state *state;
+
+ if (crtc->state) {
+ if (crtc->state->mode_blob)
+ drm_property_unreference_blob(crtc->state->mode_blob);
+
+ state = to_mtk_crtc_state(crtc->state);
+ memset(state, 0, sizeof(*state));
+ } else {
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+ crtc->state = &state->base;
+ }
+
+ state->base.crtc = crtc;
+}
+
+static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct mtk_crtc_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ WARN_ON(state->base.crtc != crtc);
+ state->base.crtc = crtc;
+
+ return &state->base;
+}
+
+static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_mtk_crtc_state(state));
+}
+
+static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Nothing to do here, but this callback is mandatory. */
+ return true;
+}
+
+static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+ struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
+
+ state->pending_width = crtc->mode.hdisplay;
+ state->pending_height = crtc->mode.vdisplay;
+ state->pending_vrefresh = crtc->mode.vrefresh;
+ wmb(); /* Make sure the above parameters are set before update */
+ state->pending_config = true;
+}
+
+int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct mtk_drm_private *priv = drm->dev_private;
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(priv->crtc[pipe]);
+ struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+
+ mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
+
+ return 0;
+}
+
+void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct mtk_drm_private *priv = drm->dev_private;
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(priv->crtc[pipe]);
+ struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+
+ mtk_ddp_comp_disable_vblank(ovl);
+}
+
+static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
+{
+ int ret;
+ int i;
+
+ DRM_DEBUG_DRIVER("%s\n", __func__);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ ret = clk_enable(mtk_crtc->ddp_comp[i]->clk);
+ if (ret) {
+ DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ while (--i >= 0)
+ clk_disable(mtk_crtc->ddp_comp[i]->clk);
+ return ret;
+}
+
+static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
+{
+ int i;
+
+ DRM_DEBUG_DRIVER("%s\n", __func__);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ clk_disable(mtk_crtc->ddp_comp[i]->clk);
+}
+
+static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
+{
+ struct drm_crtc *crtc = &mtk_crtc->base;
+ unsigned int width, height, vrefresh;
+ int ret;
+ int i;
+
+ DRM_DEBUG_DRIVER("%s\n", __func__);
+ if (WARN_ON(!crtc->state))
+ return -EINVAL;
+
+ width = crtc->state->adjusted_mode.hdisplay;
+ height = crtc->state->adjusted_mode.vdisplay;
+ vrefresh = crtc->state->adjusted_mode.vrefresh;
+
+ ret = pm_runtime_get_sync(crtc->dev->dev);
+ if (ret < 0) {
+ DRM_ERROR("Failed to enable power domain: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_disp_mutex_prepare(mtk_crtc->mutex);
+ if (ret < 0) {
+ DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
+ goto err_pm_runtime_put;
+ }
+
+ ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
+ if (ret < 0) {
+ DRM_ERROR("Failed to enable component clocks: %d\n", ret);
+ goto err_mutex_unprepare;
+ }
+
+ DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
+ for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
+ mtk_ddp_add_comp_to_path(mtk_crtc->config_regs,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_disp_mutex_add_comp(mtk_crtc->mutex,
+ mtk_crtc->ddp_comp[i]->id);
+ }
+ mtk_disp_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+ mtk_disp_mutex_enable(mtk_crtc->mutex);
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
+
+ mtk_ddp_comp_config(comp, width, height, vrefresh);
+ mtk_ddp_comp_start(comp);
+ }
+
+ /* Initially configure all planes */
+ for (i = 0; i < OVL_LAYER_NR; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i].base;
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+ mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i,
+ plane_state);
+ }
+
+ return 0;
+
+err_mutex_unprepare:
+ mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+err_pm_runtime_put:
+ pm_runtime_put(crtc->dev->dev);
+ return ret;
+}
+
+static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
+{
+ struct drm_device *drm = mtk_crtc->base.dev;
+ int i;
+
+ DRM_DEBUG_DRIVER("%s\n", __func__);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
+ mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+ mtk_crtc->ddp_comp[i]->id);
+ mtk_disp_mutex_disable(mtk_crtc->mutex);
+ for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
+ mtk_ddp_remove_comp_from_path(mtk_crtc->config_regs,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
+ mtk_crtc->ddp_comp[i]->id);
+ }
+ mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
+ mtk_crtc_ddp_clk_disable(mtk_crtc);
+ mtk_disp_mutex_unprepare(mtk_crtc->mutex);
+
+ pm_runtime_put(drm->dev);
+}
+
+static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ int ret;
+
+ DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
+
+ ret = mtk_smi_larb_get(ovl->larb_dev);
+ if (ret) {
+ DRM_ERROR("Failed to get larb: %d\n", ret);
+ return;
+ }
+
+ ret = mtk_crtc_ddp_hw_init(mtk_crtc);
+ if (ret) {
+ mtk_smi_larb_put(ovl->larb_dev);
+ return;
+ }
+
+ drm_crtc_vblank_on(crtc);
+ mtk_crtc->enabled = true;
+}
+
+static void mtk_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ int i;
+
+ DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
+ if (!mtk_crtc->enabled)
+ return;
+
+ /* Set all pending plane state to disabled */
+ for (i = 0; i < OVL_LAYER_NR; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i].base;
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+ plane_state->pending.enable = false;
+ plane_state->pending.config = true;
+ }
+ mtk_crtc->pending_planes = true;
+
+ /* Wait for planes to be disabled */
+ drm_crtc_wait_one_vblank(crtc);
+
+ drm_crtc_vblank_off(crtc);
+ mtk_crtc_ddp_hw_fini(mtk_crtc);
+ mtk_smi_larb_put(ovl->larb_dev);
+
+ mtk_crtc->enabled = false;
+}
+
+static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+
+ if (mtk_crtc->event && state->base.event)
+ DRM_ERROR("new event while there is still a pending event\n");
+
+ if (state->base.event) {
+ state->base.event->pipe = drm_crtc_index(crtc);
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ mtk_crtc->event = state->base.event;
+ state->base.event = NULL;
+ }
+}
+
+static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ unsigned int pending_planes = 0;
+ int i;
+
+ if (mtk_crtc->event)
+ mtk_crtc->pending_needs_vblank = true;
+ for (i = 0; i < OVL_LAYER_NR; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i].base;
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+ if (plane_state->pending.dirty) {
+ plane_state->pending.config = true;
+ plane_state->pending.dirty = false;
+ pending_planes |= BIT(i);
+ }
+ }
+ if (pending_planes)
+ mtk_crtc->pending_planes = true;
+}
+
+static const struct drm_crtc_funcs mtk_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .destroy = mtk_drm_crtc_destroy,
+ .reset = mtk_drm_crtc_reset,
+ .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
+ .atomic_destroy_state = mtk_drm_crtc_destroy_state,
+};
+
+static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
+ .mode_fixup = mtk_drm_crtc_mode_fixup,
+ .mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
+ .enable = mtk_drm_crtc_enable,
+ .disable = mtk_drm_crtc_disable,
+ .atomic_begin = mtk_drm_crtc_atomic_begin,
+ .atomic_flush = mtk_drm_crtc_atomic_flush,
+};
+
+static int mtk_drm_crtc_init(struct drm_device *drm,
+ struct mtk_drm_crtc *mtk_crtc,
+ struct drm_plane *primary,
+ struct drm_plane *cursor, unsigned int pipe)
+{
+ int ret;
+
+ ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
+ &mtk_crtc_funcs, NULL);
+ if (ret)
+ goto err_cleanup_crtc;
+
+ drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
+
+ return 0;
+
+err_cleanup_crtc:
+ drm_crtc_cleanup(&mtk_crtc->base);
+ return ret;
+}
+
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
+ unsigned int i;
+
+ /*
+ * TODO: instead of updating the registers here, we should prepare
+ * working registers in atomic_commit and let the hardware command
+ * queue update module registers on vblank.
+ */
+ if (state->pending_config) {
+ mtk_ddp_comp_config(ovl, state->pending_width,
+ state->pending_height,
+ state->pending_vrefresh);
+
+ state->pending_config = false;
+ }
+
+ if (mtk_crtc->pending_planes) {
+ for (i = 0; i < OVL_LAYER_NR; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i].base;
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (plane_state->pending.config) {
+ mtk_ddp_comp_layer_config(ovl, i, plane_state);
+ plane_state->pending.config = false;
+ }
+ }
+ mtk_crtc->pending_planes = false;
+ }
+
+ mtk_drm_finish_page_flip(mtk_crtc);
+}
+
+int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ const enum mtk_ddp_comp_id *path, unsigned int path_len)
+{
+ struct mtk_drm_private *priv = drm_dev->dev_private;
+ struct device *dev = drm_dev->dev;
+ struct mtk_drm_crtc *mtk_crtc;
+ enum drm_plane_type type;
+ unsigned int zpos;
+ int pipe = priv->num_pipes;
+ int ret;
+ int i;
+
+ for (i = 0; i < path_len; i++) {
+ enum mtk_ddp_comp_id comp_id = path[i];
+ struct device_node *node;
+
+ node = priv->comp_node[comp_id];
+ if (!node) {
+ dev_info(dev,
+ "Not creating crtc %d because component %d is disabled or missing\n",
+ pipe, comp_id);
+ return 0;
+ }
+ }
+
+ mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
+ if (!mtk_crtc)
+ return -ENOMEM;
+
+ mtk_crtc->config_regs = priv->config_regs;
+ mtk_crtc->ddp_comp_nr = path_len;
+ mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
+ sizeof(*mtk_crtc->ddp_comp),
+ GFP_KERNEL);
+
+ mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe);
+ if (IS_ERR(mtk_crtc->mutex)) {
+ ret = PTR_ERR(mtk_crtc->mutex);
+ dev_err(dev, "Failed to get mutex: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
+ enum mtk_ddp_comp_id comp_id = path[i];
+ struct mtk_ddp_comp *comp;
+ struct device_node *node;
+
+ node = priv->comp_node[comp_id];
+ comp = priv->ddp_comp[comp_id];
+ if (!comp) {
+ dev_err(dev, "Component %s not initialized\n",
+ node->full_name);
+ ret = -ENODEV;
+ goto unprepare;
+ }
+
+ ret = clk_prepare(comp->clk);
+ if (ret) {
+ dev_err(dev,
+ "Failed to prepare clock for component %s: %d\n",
+ node->full_name, ret);
+ goto unprepare;
+ }
+
+ mtk_crtc->ddp_comp[i] = comp;
+ }
+
+ for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
+ type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
+ (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
+ DRM_PLANE_TYPE_OVERLAY;
+ ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
+ BIT(pipe), type, zpos);
+ if (ret)
+ goto unprepare;
+ }
+
+ ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0].base,
+ &mtk_crtc->planes[1].base, pipe);
+ if (ret < 0)
+ goto unprepare;
+
+ priv->crtc[pipe] = &mtk_crtc->base;
+ priv->num_pipes++;
+
+ return 0;
+
+unprepare:
+ while (--i >= 0)
+ clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
new file mode 100644
index 000000000..81e5566ec
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_CRTC_H
+#define MTK_DRM_CRTC_H
+
+#include <drm/drm_crtc.h>
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_plane.h"
+
+#define OVL_LAYER_NR 4
+
+int mtk_drm_crtc_enable_vblank(struct drm_device *drm, unsigned int pipe);
+void mtk_drm_crtc_disable_vblank(struct drm_device *drm, unsigned int pipe);
+void mtk_drm_crtc_check_flush(struct drm_crtc *crtc);
+void mtk_drm_crtc_commit(struct drm_crtc *crtc);
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
+int mtk_drm_crtc_create(struct drm_device *drm_dev,
+ const enum mtk_ddp_comp_id *path,
+ unsigned int path_len);
+
+#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
new file mode 100644
index 000000000..17ba9355a
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "mtk_drm_ddp.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
+#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
+#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
+#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
+#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
+#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
+#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
+#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
+#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8
+#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+
+#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
+#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
+#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
+
+#define MUTEX_MOD_DISP_OVL0 BIT(11)
+#define MUTEX_MOD_DISP_OVL1 BIT(12)
+#define MUTEX_MOD_DISP_RDMA0 BIT(13)
+#define MUTEX_MOD_DISP_RDMA1 BIT(14)
+#define MUTEX_MOD_DISP_RDMA2 BIT(15)
+#define MUTEX_MOD_DISP_WDMA0 BIT(16)
+#define MUTEX_MOD_DISP_WDMA1 BIT(17)
+#define MUTEX_MOD_DISP_COLOR0 BIT(18)
+#define MUTEX_MOD_DISP_COLOR1 BIT(19)
+#define MUTEX_MOD_DISP_AAL BIT(20)
+#define MUTEX_MOD_DISP_GAMMA BIT(21)
+#define MUTEX_MOD_DISP_UFOE BIT(22)
+#define MUTEX_MOD_DISP_PWM0 BIT(23)
+#define MUTEX_MOD_DISP_PWM1 BIT(24)
+#define MUTEX_MOD_DISP_OD BIT(25)
+
+#define MUTEX_SOF_SINGLE_MODE 0
+#define MUTEX_SOF_DSI0 1
+#define MUTEX_SOF_DSI1 2
+#define MUTEX_SOF_DPI0 3
+
+#define OVL0_MOUT_EN_COLOR0 0x1
+#define OD_MOUT_EN_RDMA0 0x1
+#define UFOE_MOUT_EN_DSI0 0x1
+#define COLOR0_SEL_IN_OVL0 0x1
+#define OVL1_MOUT_EN_COLOR1 0x1
+#define GAMMA_MOUT_EN_RDMA1 0x1
+#define RDMA1_MOUT_DPI0 0x2
+#define DPI0_SEL_IN_RDMA1 0x1
+#define COLOR1_SEL_IN_OVL1 0x1
+
+struct mtk_disp_mutex {
+ int id;
+ bool claimed;
+};
+
+struct mtk_ddp {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *regs;
+ struct mtk_disp_mutex mutex[10];
+};
+
+static const unsigned int mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL] = MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_COLOR0] = MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_GAMMA] = MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OD] = MUTEX_MOD_DISP_OD,
+ [DDP_COMPONENT_OVL0] = MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_RDMA0] = MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MUTEX_MOD_DISP_WDMA1,
+};
+
+static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next,
+ unsigned int *addr)
+{
+ unsigned int value;
+
+ if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
+ value = OVL0_MOUT_EN_COLOR0;
+ } else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
+ value = OD_MOUT_EN_RDMA0;
+ } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
+ value = UFOE_MOUT_EN_DSI0;
+ } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+ *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
+ value = OVL1_MOUT_EN_COLOR1;
+ } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
+ *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
+ value = GAMMA_MOUT_EN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN;
+ value = RDMA1_MOUT_DPI0;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next,
+ unsigned int *addr)
+{
+ unsigned int value;
+
+ if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
+ *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
+ value = COLOR0_SEL_IN_OVL0;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
+ *addr = DISP_REG_CONFIG_DPI_SEL_IN;
+ value = DPI0_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
+ *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
+ value = COLOR1_SEL_IN_OVL1;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ unsigned int addr, value, reg;
+
+ value = mtk_ddp_mout_en(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) | value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+
+ value = mtk_ddp_sel_in(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) | value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+}
+
+void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ unsigned int addr, value, reg;
+
+ value = mtk_ddp_mout_en(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) & ~value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+
+ value = mtk_ddp_sel_in(cur, next, &addr);
+ if (value) {
+ reg = readl_relaxed(config_regs + addr) & ~value;
+ writel_relaxed(reg, config_regs + addr);
+ }
+}
+
+struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id)
+{
+ struct mtk_ddp *ddp = dev_get_drvdata(dev);
+
+ if (id >= 10)
+ return ERR_PTR(-EINVAL);
+ if (ddp->mutex[id].claimed)
+ return ERR_PTR(-EBUSY);
+
+ ddp->mutex[id].claimed = true;
+
+ return &ddp->mutex[id];
+}
+
+void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+
+ WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+ mutex->claimed = false;
+}
+
+int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+ return clk_prepare_enable(ddp->clk);
+}
+
+void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+ clk_disable_unprepare(ddp->clk);
+}
+
+void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
+ enum mtk_ddp_comp_id id)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+ unsigned int reg;
+
+ WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+ switch (id) {
+ case DDP_COMPONENT_DSI0:
+ reg = MUTEX_SOF_DSI0;
+ break;
+ case DDP_COMPONENT_DSI1:
+ reg = MUTEX_SOF_DSI0;
+ break;
+ case DDP_COMPONENT_DPI0:
+ reg = MUTEX_SOF_DPI0;
+ break;
+ default:
+ reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ reg |= mutex_mod[id];
+ writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ return;
+ }
+
+ writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+}
+
+void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
+ enum mtk_ddp_comp_id id)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+ unsigned int reg;
+
+ WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+ switch (id) {
+ case DDP_COMPONENT_DSI0:
+ case DDP_COMPONENT_DSI1:
+ case DDP_COMPONENT_DPI0:
+ writel_relaxed(MUTEX_SOF_SINGLE_MODE,
+ ddp->regs + DISP_REG_MUTEX_SOF(mutex->id));
+ break;
+ default:
+ reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ reg &= ~mutex_mod[id];
+ writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
+ break;
+ }
+}
+
+void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+
+ WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+ writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+}
+
+void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+
+ WARN_ON(&ddp->mutex[mutex->id] != mutex);
+
+ writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+}
+
+static int mtk_ddp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ddp *ddp;
+ struct resource *regs;
+ int i;
+
+ ddp = devm_kzalloc(dev, sizeof(*ddp), GFP_KERNEL);
+ if (!ddp)
+ return -ENOMEM;
+
+ for (i = 0; i < 10; i++)
+ ddp->mutex[i].id = i;
+
+ ddp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ddp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(ddp->clk);
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddp->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(ddp->regs)) {
+ dev_err(dev, "Failed to map mutex registers\n");
+ return PTR_ERR(ddp->regs);
+ }
+
+ platform_set_drvdata(pdev, ddp);
+
+ return 0;
+}
+
+static int mtk_ddp_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id ddp_driver_dt_match[] = {
+ { .compatible = "mediatek,mt8173-disp-mutex" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
+
+struct platform_driver mtk_ddp_driver = {
+ .probe = mtk_ddp_probe,
+ .remove = mtk_ddp_remove,
+ .driver = {
+ .name = "mediatek-ddp",
+ .owner = THIS_MODULE,
+ .of_match_table = ddp_driver_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
new file mode 100644
index 000000000..92c11752f
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_DDP_H
+#define MTK_DRM_DDP_H
+
+#include "mtk_drm_ddp_comp.h"
+
+struct regmap;
+struct device;
+struct mtk_disp_mutex;
+
+void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next);
+void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next);
+
+struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
+int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
+ enum mtk_ddp_comp_id id);
+void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
+ enum mtk_ddp_comp_id id);
+void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
+
+#endif /* MTK_DRM_DDP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
new file mode 100644
index 000000000..3970fcf0f
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Authors:
+ * YT Shen <yt.shen@mediatek.com>
+ * CK Hu <ck.hu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include "mtk_drm_drv.h"
+#include "mtk_drm_plane.h"
+#include "mtk_drm_ddp_comp.h"
+
+#define DISP_OD_EN 0x0000
+#define DISP_OD_INTEN 0x0008
+#define DISP_OD_INTSTA 0x000c
+#define DISP_OD_CFG 0x0020
+#define DISP_OD_SIZE 0x0030
+
+#define DISP_REG_UFO_START 0x0000
+
+#define DISP_COLOR_CFG_MAIN 0x0400
+#define DISP_COLOR_START 0x0c00
+#define DISP_COLOR_WIDTH 0x0c50
+#define DISP_COLOR_HEIGHT 0x0c54
+
+#define OD_RELAY_MODE BIT(0)
+
+#define UFO_BYPASS BIT(2)
+
+#define COLOR_BYPASS_ALL BIT(7)
+#define COLOR_SEQ_SEL BIT(13)
+
+static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh)
+{
+ writel(w, comp->regs + DISP_COLOR_WIDTH);
+ writel(h, comp->regs + DISP_COLOR_HEIGHT);
+}
+
+static void mtk_color_start(struct mtk_ddp_comp *comp)
+{
+ writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
+ comp->regs + DISP_COLOR_CFG_MAIN);
+ writel(0x1, comp->regs + DISP_COLOR_START);
+}
+
+static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh)
+{
+ writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
+}
+
+static void mtk_od_start(struct mtk_ddp_comp *comp)
+{
+ writel(OD_RELAY_MODE, comp->regs + DISP_OD_CFG);
+ writel(1, comp->regs + DISP_OD_EN);
+}
+
+static void mtk_ufoe_start(struct mtk_ddp_comp *comp)
+{
+ writel(UFO_BYPASS, comp->regs + DISP_REG_UFO_START);
+}
+
+static const struct mtk_ddp_comp_funcs ddp_color = {
+ .config = mtk_color_config,
+ .start = mtk_color_start,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_od = {
+ .config = mtk_od_config,
+ .start = mtk_od_start,
+};
+
+static const struct mtk_ddp_comp_funcs ddp_ufoe = {
+ .start = mtk_ufoe_start,
+};
+
+static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
+ [MTK_DISP_OVL] = "ovl",
+ [MTK_DISP_RDMA] = "rdma",
+ [MTK_DISP_WDMA] = "wdma",
+ [MTK_DISP_COLOR] = "color",
+ [MTK_DISP_AAL] = "aal",
+ [MTK_DISP_GAMMA] = "gamma",
+ [MTK_DISP_UFOE] = "ufoe",
+ [MTK_DSI] = "dsi",
+ [MTK_DPI] = "dpi",
+ [MTK_DISP_PWM] = "pwm",
+ [MTK_DISP_MUTEX] = "mutex",
+ [MTK_DISP_OD] = "od",
+};
+
+struct mtk_ddp_comp_match {
+ enum mtk_ddp_comp_type type;
+ int alias_id;
+ const struct mtk_ddp_comp_funcs *funcs;
+};
+
+static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, NULL },
+ [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
+ [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
+ [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
+ [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL },
+ [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL },
+ [DDP_COMPONENT_GAMMA] = { MTK_DISP_GAMMA, 0, NULL },
+ [DDP_COMPONENT_OD] = { MTK_DISP_OD, 0, &ddp_od },
+ [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL },
+ [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL },
+ [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL },
+ [DDP_COMPONENT_RDMA0] = { MTK_DISP_RDMA, 0, NULL },
+ [DDP_COMPONENT_RDMA1] = { MTK_DISP_RDMA, 1, NULL },
+ [DDP_COMPONENT_RDMA2] = { MTK_DISP_RDMA, 2, NULL },
+ [DDP_COMPONENT_UFOE] = { MTK_DISP_UFOE, 0, &ddp_ufoe },
+ [DDP_COMPONENT_WDMA0] = { MTK_DISP_WDMA, 0, NULL },
+ [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
+};
+
+int mtk_ddp_comp_get_id(struct device_node *node,
+ enum mtk_ddp_comp_type comp_type)
+{
+ int id = of_alias_get_id(node, mtk_ddp_comp_stem[comp_type]);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_ddp_matches); i++) {
+ if (comp_type == mtk_ddp_matches[i].type &&
+ (id < 0 || id == mtk_ddp_matches[i].alias_id))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
+ struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
+ const struct mtk_ddp_comp_funcs *funcs)
+{
+ enum mtk_ddp_comp_type type;
+ struct device_node *larb_node;
+ struct platform_device *larb_pdev;
+
+ if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
+ return -EINVAL;
+
+ comp->id = comp_id;
+ comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
+
+ if (comp_id == DDP_COMPONENT_DPI0 ||
+ comp_id == DDP_COMPONENT_DSI0 ||
+ comp_id == DDP_COMPONENT_PWM0) {
+ comp->regs = NULL;
+ comp->clk = NULL;
+ comp->irq = 0;
+ return 0;
+ }
+
+ comp->regs = of_iomap(node, 0);
+ comp->irq = of_irq_get(node, 0);
+ comp->clk = of_clk_get(node, 0);
+ if (IS_ERR(comp->clk))
+ comp->clk = NULL;
+
+ type = mtk_ddp_matches[comp_id].type;
+
+ /* Only DMA capable components need the LARB property */
+ comp->larb_dev = NULL;
+ if (type != MTK_DISP_OVL &&
+ type != MTK_DISP_RDMA &&
+ type != MTK_DISP_WDMA)
+ return 0;
+
+ larb_node = of_parse_phandle(node, "mediatek,larb", 0);
+ if (!larb_node) {
+ dev_err(dev,
+ "Missing mediadek,larb phandle in %s node\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ larb_pdev = of_find_device_by_node(larb_node);
+ if (!larb_pdev) {
+ dev_warn(dev, "Waiting for larb device %s\n",
+ larb_node->full_name);
+ of_node_put(larb_node);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(larb_node);
+
+ comp->larb_dev = &larb_pdev->dev;
+
+ return 0;
+}
+
+int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp)
+{
+ struct mtk_drm_private *private = drm->dev_private;
+
+ if (private->ddp_comp[comp->id])
+ return -EBUSY;
+
+ private->ddp_comp[comp->id] = comp;
+ return 0;
+}
+
+void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp)
+{
+ struct mtk_drm_private *private = drm->dev_private;
+
+ private->ddp_comp[comp->id] = NULL;
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
new file mode 100644
index 000000000..6b13ba970
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_DDP_COMP_H
+#define MTK_DRM_DDP_COMP_H
+
+#include <linux/io.h>
+
+struct device;
+struct device_node;
+struct drm_crtc;
+struct drm_device;
+struct mtk_plane_state;
+
+enum mtk_ddp_comp_type {
+ MTK_DISP_OVL,
+ MTK_DISP_RDMA,
+ MTK_DISP_WDMA,
+ MTK_DISP_COLOR,
+ MTK_DISP_AAL,
+ MTK_DISP_GAMMA,
+ MTK_DISP_UFOE,
+ MTK_DSI,
+ MTK_DPI,
+ MTK_DISP_PWM,
+ MTK_DISP_MUTEX,
+ MTK_DISP_OD,
+ MTK_DDP_COMP_TYPE_MAX,
+};
+
+enum mtk_ddp_comp_id {
+ DDP_COMPONENT_AAL,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_DPI0,
+ DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_DSI1,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_OD,
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_PWM0,
+ DDP_COMPONENT_PWM1,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_RDMA2,
+ DDP_COMPONENT_UFOE,
+ DDP_COMPONENT_WDMA0,
+ DDP_COMPONENT_WDMA1,
+ DDP_COMPONENT_ID_MAX,
+};
+
+struct mtk_ddp_comp;
+
+struct mtk_ddp_comp_funcs {
+ void (*config)(struct mtk_ddp_comp *comp, unsigned int w,
+ unsigned int h, unsigned int vrefresh);
+ void (*start)(struct mtk_ddp_comp *comp);
+ void (*stop)(struct mtk_ddp_comp *comp);
+ void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
+ void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
+ void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
+ void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *state);
+};
+
+struct mtk_ddp_comp {
+ struct clk *clk;
+ void __iomem *regs;
+ int irq;
+ struct device *larb_dev;
+ enum mtk_ddp_comp_id id;
+ const struct mtk_ddp_comp_funcs *funcs;
+};
+
+static inline void mtk_ddp_comp_config(struct mtk_ddp_comp *comp,
+ unsigned int w, unsigned int h,
+ unsigned int vrefresh)
+{
+ if (comp->funcs && comp->funcs->config)
+ comp->funcs->config(comp, w, h, vrefresh);
+}
+
+static inline void mtk_ddp_comp_start(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->start)
+ comp->funcs->start(comp);
+}
+
+static inline void mtk_ddp_comp_stop(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->stop)
+ comp->funcs->stop(comp);
+}
+
+static inline void mtk_ddp_comp_enable_vblank(struct mtk_ddp_comp *comp,
+ struct drm_crtc *crtc)
+{
+ if (comp->funcs && comp->funcs->enable_vblank)
+ comp->funcs->enable_vblank(comp, crtc);
+}
+
+static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->disable_vblank)
+ comp->funcs->disable_vblank(comp);
+}
+
+static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
+ unsigned int idx)
+{
+ if (comp->funcs && comp->funcs->layer_on)
+ comp->funcs->layer_on(comp, idx);
+}
+
+static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp,
+ unsigned int idx)
+{
+ if (comp->funcs && comp->funcs->layer_off)
+ comp->funcs->layer_off(comp, idx);
+}
+
+static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp,
+ unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ if (comp->funcs && comp->funcs->layer_config)
+ comp->funcs->layer_config(comp, idx, state);
+}
+
+int mtk_ddp_comp_get_id(struct device_node *node,
+ enum mtk_ddp_comp_type comp_type);
+int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
+ struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
+ const struct mtk_ddp_comp_funcs *funcs);
+int mtk_ddp_comp_register(struct drm_device *drm, struct mtk_ddp_comp *comp);
+void mtk_ddp_comp_unregister(struct drm_device *drm, struct mtk_ddp_comp *comp);
+
+#endif /* MTK_DRM_DDP_COMP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
new file mode 100644
index 000000000..b1223d54d
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: YT SHEN <yt.shen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <linux/component.h>
+#include <linux/iommu.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp.h"
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
+#include "mtk_drm_fb.h"
+#include "mtk_drm_gem.h"
+
+#define DRIVER_NAME "mediatek"
+#define DRIVER_DESC "Mediatek SoC DRM"
+#define DRIVER_DATE "20150513"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+static void mtk_atomic_schedule(struct mtk_drm_private *private,
+ struct drm_atomic_state *state)
+{
+ private->commit.state = state;
+ schedule_work(&private->commit.work);
+}
+
+static void mtk_atomic_wait_for_fences(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int i;
+
+ for_each_plane_in_state(state, plane, plane_state, i)
+ mtk_fb_wait(plane->state->fb);
+}
+
+static void mtk_atomic_complete(struct mtk_drm_private *private,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = private->drm;
+
+ mtk_atomic_wait_for_fences(state);
+
+ drm_atomic_helper_commit_modeset_disables(drm, state);
+ drm_atomic_helper_commit_planes(drm, state, false);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
+ drm_atomic_helper_wait_for_vblanks(drm, state);
+ drm_atomic_helper_cleanup_planes(drm, state);
+ drm_atomic_state_free(state);
+}
+
+static void mtk_atomic_work(struct work_struct *work)
+{
+ struct mtk_drm_private *private = container_of(work,
+ struct mtk_drm_private, commit.work);
+
+ mtk_atomic_complete(private, private->commit.state);
+}
+
+static int mtk_atomic_commit(struct drm_device *drm,
+ struct drm_atomic_state *state,
+ bool async)
+{
+ struct mtk_drm_private *private = drm->dev_private;
+ int ret;
+
+ ret = drm_atomic_helper_prepare_planes(drm, state);
+ if (ret)
+ return ret;
+
+ mutex_lock(&private->commit.lock);
+ flush_work(&private->commit.work);
+
+ drm_atomic_helper_swap_state(drm, state);
+
+ if (async)
+ mtk_atomic_schedule(private, state);
+ else
+ mtk_atomic_complete(private, state);
+
+ mutex_unlock(&private->commit.lock);
+
+ return 0;
+}
+
+static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
+ .fb_create = mtk_drm_mode_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = mtk_atomic_commit,
+};
+
+static const enum mtk_ddp_comp_id mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_AAL,
+ DDP_COMPONENT_OD,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_UFOE,
+ DDP_COMPONENT_DSI0,
+ DDP_COMPONENT_PWM0,
+};
+
+static const enum mtk_ddp_comp_id mtk_ddp_ext[] = {
+ DDP_COMPONENT_OVL1,
+ DDP_COMPONENT_COLOR1,
+ DDP_COMPONENT_GAMMA,
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI0,
+};
+
+static int mtk_drm_kms_init(struct drm_device *drm)
+{
+ struct mtk_drm_private *private = drm->dev_private;
+ struct platform_device *pdev;
+ struct device_node *np;
+ int ret;
+
+ if (!iommu_present(&platform_bus_type))
+ return -EPROBE_DEFER;
+
+ pdev = of_find_device_by_node(private->mutex_node);
+ if (!pdev) {
+ dev_err(drm->dev, "Waiting for disp-mutex device %s\n",
+ private->mutex_node->full_name);
+ of_node_put(private->mutex_node);
+ return -EPROBE_DEFER;
+ }
+ private->mutex_dev = &pdev->dev;
+
+ drm_mode_config_init(drm);
+
+ drm->mode_config.min_width = 64;
+ drm->mode_config.min_height = 64;
+
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ drm->mode_config.max_width = 4096;
+ drm->mode_config.max_height = 4096;
+ drm->mode_config.funcs = &mtk_drm_mode_config_funcs;
+
+ ret = component_bind_all(drm->dev, drm);
+ if (ret)
+ goto err_config_cleanup;
+
+ /*
+ * We currently support two fixed data streams, each optional,
+ * and each statically assigned to a crtc:
+ * OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ...
+ */
+ ret = mtk_drm_crtc_create(drm, mtk_ddp_main, ARRAY_SIZE(mtk_ddp_main));
+ if (ret < 0)
+ goto err_component_unbind;
+ /* ... and OVL1 -> COLOR1 -> GAMMA -> RDMA1 -> DPI0. */
+ ret = mtk_drm_crtc_create(drm, mtk_ddp_ext, ARRAY_SIZE(mtk_ddp_ext));
+ if (ret < 0)
+ goto err_component_unbind;
+
+ /* Use OVL device for all DMA memory allocations */
+ np = private->comp_node[mtk_ddp_main[0]] ?:
+ private->comp_node[mtk_ddp_ext[0]];
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ ret = -ENODEV;
+ dev_err(drm->dev, "Need at least one OVL device\n");
+ goto err_component_unbind;
+ }
+
+ private->dma_dev = &pdev->dev;
+
+ /*
+ * We don't use the drm_irq_install() helpers provided by the DRM
+ * core, so we need to set this manually in order to allow the
+ * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+ */
+ drm->irq_enabled = true;
+ ret = drm_vblank_init(drm, MAX_CRTC);
+ if (ret < 0)
+ goto err_component_unbind;
+
+ drm_kms_helper_poll_init(drm);
+ drm_mode_config_reset(drm);
+
+ return 0;
+
+err_component_unbind:
+ component_unbind_all(drm->dev, drm);
+err_config_cleanup:
+ drm_mode_config_cleanup(drm);
+
+ return ret;
+}
+
+static void mtk_drm_kms_deinit(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+
+ drm_vblank_cleanup(drm);
+ component_unbind_all(drm->dev, drm);
+ drm_mode_config_cleanup(drm);
+}
+
+static const struct file_operations mtk_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = mtk_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+};
+
+static struct drm_driver mtk_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = mtk_drm_crtc_enable_vblank,
+ .disable_vblank = mtk_drm_crtc_disable_vblank,
+
+ .gem_free_object = mtk_drm_gem_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = mtk_drm_gem_dumb_create,
+ .dumb_map_offset = mtk_drm_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
+ .gem_prime_mmap = mtk_drm_gem_mmap_buf,
+ .fops = &mtk_drm_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int mtk_drm_bind(struct device *dev)
+{
+ struct mtk_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&mtk_drm_driver, dev);
+ if (!drm)
+ return -ENOMEM;
+
+ drm_dev_set_unique(drm, dev_name(dev));
+
+ drm->dev_private = private;
+ private->drm = drm;
+
+ ret = mtk_drm_kms_init(drm);
+ if (ret < 0)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto err_deinit;
+
+ ret = drm_connector_register_all(drm);
+ if (ret < 0)
+ goto err_unregister;
+
+ return 0;
+
+err_unregister:
+ drm_dev_unregister(drm);
+err_deinit:
+ mtk_drm_kms_deinit(drm);
+err_free:
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void mtk_drm_unbind(struct device *dev)
+{
+ struct mtk_drm_private *private = dev_get_drvdata(dev);
+
+ drm_put_dev(private->drm);
+ private->drm = NULL;
+}
+
+static const struct component_master_ops mtk_drm_ops = {
+ .bind = mtk_drm_bind,
+ .unbind = mtk_drm_unbind,
+};
+
+static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
+ { .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA },
+ { .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
+ { .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL},
+ { .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
+ { .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI },
+ { .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM },
+ { .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD },
+ { }
+};
+
+static int mtk_drm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_drm_private *private;
+ struct resource *mem;
+ struct device_node *node;
+ struct component_match *match = NULL;
+ int ret;
+ int i;
+
+ private = devm_kzalloc(dev, sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ mutex_init(&private->commit.lock);
+ INIT_WORK(&private->commit.work, mtk_atomic_work);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ private->config_regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(private->config_regs)) {
+ ret = PTR_ERR(private->config_regs);
+ dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Iterate over sibling DISP function blocks */
+ for_each_child_of_node(dev->of_node->parent, node) {
+ const struct of_device_id *of_id;
+ enum mtk_ddp_comp_type comp_type;
+ int comp_id;
+
+ of_id = of_match_node(mtk_ddp_comp_dt_ids, node);
+ if (!of_id)
+ continue;
+
+ if (!of_device_is_available(node)) {
+ dev_dbg(dev, "Skipping disabled component %s\n",
+ node->full_name);
+ continue;
+ }
+
+ comp_type = (enum mtk_ddp_comp_type)of_id->data;
+
+ if (comp_type == MTK_DISP_MUTEX) {
+ private->mutex_node = of_node_get(node);
+ continue;
+ }
+
+ comp_id = mtk_ddp_comp_get_id(node, comp_type);
+ if (comp_id < 0) {
+ dev_warn(dev, "Skipping unknown component %s\n",
+ node->full_name);
+ continue;
+ }
+
+ private->comp_node[comp_id] = of_node_get(node);
+
+ /*
+ * Currently only the OVL, RDMA, DSI, and DPI blocks have
+ * separate component platform drivers and initialize their own
+ * DDP component structure. The others are initialized here.
+ */
+ if (comp_type == MTK_DISP_OVL ||
+ comp_type == MTK_DISP_RDMA ||
+ comp_type == MTK_DSI ||
+ comp_type == MTK_DPI) {
+ dev_info(dev, "Adding component match for %s\n",
+ node->full_name);
+ component_match_add(dev, &match, compare_of, node);
+ } else {
+ struct mtk_ddp_comp *comp;
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ if (!comp) {
+ ret = -ENOMEM;
+ goto err_node;
+ }
+
+ ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+ if (ret)
+ goto err_node;
+
+ private->ddp_comp[comp_id] = comp;
+ }
+ }
+
+ if (!private->mutex_node) {
+ dev_err(dev, "Failed to find disp-mutex node\n");
+ ret = -ENODEV;
+ goto err_node;
+ }
+
+ pm_runtime_enable(dev);
+
+ platform_set_drvdata(pdev, private);
+
+ ret = component_master_add_with_match(dev, &mtk_drm_ops, match);
+ if (ret)
+ goto err_pm;
+
+ return 0;
+
+err_pm:
+ pm_runtime_disable(dev);
+err_node:
+ of_node_put(private->mutex_node);
+ for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
+ of_node_put(private->comp_node[i]);
+ return ret;
+}
+
+static int mtk_drm_remove(struct platform_device *pdev)
+{
+ struct mtk_drm_private *private = platform_get_drvdata(pdev);
+ struct drm_device *drm = private->drm;
+ int i;
+
+ drm_connector_unregister_all(drm);
+ drm_dev_unregister(drm);
+ mtk_drm_kms_deinit(drm);
+ drm_dev_unref(drm);
+
+ component_master_del(&pdev->dev, &mtk_drm_ops);
+ pm_runtime_disable(&pdev->dev);
+ of_node_put(private->mutex_node);
+ for (i = 0; i < DDP_COMPONENT_ID_MAX; i++)
+ of_node_put(private->comp_node[i]);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_drm_sys_suspend(struct device *dev)
+{
+ struct mtk_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+
+ drm_kms_helper_poll_disable(drm);
+
+ private->suspend_state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(private->suspend_state)) {
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(private->suspend_state);
+ }
+
+ DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
+ return 0;
+}
+
+static int mtk_drm_sys_resume(struct device *dev)
+{
+ struct mtk_drm_private *private = dev_get_drvdata(dev);
+ struct drm_device *drm = private->drm;
+
+ drm_atomic_helper_resume(drm, private->suspend_state);
+ drm_kms_helper_poll_enable(drm);
+
+ DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
+ mtk_drm_sys_resume);
+
+static const struct of_device_id mtk_drm_of_ids[] = {
+ { .compatible = "mediatek,mt8173-mmsys", },
+ { }
+};
+
+static struct platform_driver mtk_drm_platform_driver = {
+ .probe = mtk_drm_probe,
+ .remove = mtk_drm_remove,
+ .driver = {
+ .name = "mediatek-drm",
+ .of_match_table = mtk_drm_of_ids,
+ .pm = &mtk_drm_pm_ops,
+ },
+};
+
+static struct platform_driver * const mtk_drm_drivers[] = {
+ &mtk_ddp_driver,
+ &mtk_disp_ovl_driver,
+ &mtk_disp_rdma_driver,
+ &mtk_dpi_driver,
+ &mtk_drm_platform_driver,
+ &mtk_dsi_driver,
+ &mtk_mipi_tx_driver,
+};
+
+static int __init mtk_drm_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mtk_drm_drivers); i++) {
+ ret = platform_driver_register(mtk_drm_drivers[i]);
+ if (ret < 0) {
+ pr_err("Failed to register %s driver: %d\n",
+ mtk_drm_drivers[i]->driver.name, ret);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (--i >= 0)
+ platform_driver_unregister(mtk_drm_drivers[i]);
+
+ return ret;
+}
+
+static void __exit mtk_drm_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(mtk_drm_drivers) - 1; i >= 0; i--)
+ platform_driver_unregister(mtk_drm_drivers[i]);
+}
+
+module_init(mtk_drm_init);
+module_exit(mtk_drm_exit);
+
+MODULE_AUTHOR("YT SHEN <yt.shen@mediatek.com>");
+MODULE_DESCRIPTION("Mediatek SoC DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
new file mode 100644
index 000000000..aa9389446
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_DRV_H
+#define MTK_DRM_DRV_H
+
+#include <linux/io.h>
+#include "mtk_drm_ddp_comp.h"
+
+#define MAX_CRTC 2
+#define MAX_CONNECTOR 2
+
+struct device;
+struct device_node;
+struct drm_crtc;
+struct drm_device;
+struct drm_fb_helper;
+struct drm_property;
+struct regmap;
+
+struct mtk_drm_private {
+ struct drm_device *drm;
+ struct device *dma_dev;
+
+ struct drm_crtc *crtc[MAX_CRTC];
+ unsigned int num_pipes;
+
+ struct device_node *mutex_node;
+ struct device *mutex_dev;
+ void __iomem *config_regs;
+ struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
+ struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
+
+ struct {
+ struct drm_atomic_state *state;
+ struct work_struct work;
+ struct mutex lock;
+ } commit;
+
+ struct drm_atomic_state *suspend_state;
+};
+
+extern struct platform_driver mtk_ddp_driver;
+extern struct platform_driver mtk_disp_ovl_driver;
+extern struct platform_driver mtk_disp_rdma_driver;
+extern struct platform_driver mtk_dpi_driver;
+extern struct platform_driver mtk_dsi_driver;
+extern struct platform_driver mtk_mipi_tx_driver;
+
+#endif /* MTK_DRM_DRV_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
new file mode 100644
index 000000000..147df8539
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "mtk_drm_drv.h"
+#include "mtk_drm_fb.h"
+#include "mtk_drm_gem.h"
+
+/*
+ * mtk specific framebuffer structure.
+ *
+ * @fb: drm framebuffer object.
+ * @gem_obj: array of gem objects.
+ */
+struct mtk_drm_fb {
+ struct drm_framebuffer base;
+ /* For now we only support a single plane */
+ struct drm_gem_object *gem_obj;
+};
+
+#define to_mtk_fb(x) container_of(x, struct mtk_drm_fb, base)
+
+struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb)
+{
+ struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
+
+ return mtk_fb->gem_obj;
+}
+
+static int mtk_drm_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
+
+ return drm_gem_handle_create(file_priv, mtk_fb->gem_obj, handle);
+}
+
+static void mtk_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct mtk_drm_fb *mtk_fb = to_mtk_fb(fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ drm_gem_object_unreference_unlocked(mtk_fb->gem_obj);
+
+ kfree(mtk_fb);
+}
+
+static const struct drm_framebuffer_funcs mtk_drm_fb_funcs = {
+ .create_handle = mtk_drm_fb_create_handle,
+ .destroy = mtk_drm_fb_destroy,
+};
+
+static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode,
+ struct drm_gem_object *obj)
+{
+ struct mtk_drm_fb *mtk_fb;
+ int ret;
+
+ if (drm_format_num_planes(mode->pixel_format) != 1)
+ return ERR_PTR(-EINVAL);
+
+ mtk_fb = kzalloc(sizeof(*mtk_fb), GFP_KERNEL);
+ if (!mtk_fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(&mtk_fb->base, mode);
+
+ mtk_fb->gem_obj = obj;
+
+ ret = drm_framebuffer_init(dev, &mtk_fb->base, &mtk_drm_fb_funcs);
+ if (ret) {
+ DRM_ERROR("failed to initialize framebuffer\n");
+ kfree(mtk_fb);
+ return ERR_PTR(ret);
+ }
+
+ return mtk_fb;
+}
+
+/*
+ * Wait for any exclusive fence in fb's gem object's reservation object.
+ *
+ * Returns -ERESTARTSYS if interrupted, else 0.
+ */
+int mtk_fb_wait(struct drm_framebuffer *fb)
+{
+ struct drm_gem_object *gem;
+ struct reservation_object *resv;
+ long ret;
+
+ if (!fb)
+ return 0;
+
+ gem = mtk_fb_get_gem_obj(fb);
+ if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
+ return 0;
+
+ resv = gem->dma_buf->resv;
+ ret = reservation_object_wait_timeout_rcu(resv, false, true,
+ MAX_SCHEDULE_TIMEOUT);
+ /* MAX_SCHEDULE_TIMEOUT on success, -ERESTARTSYS if interrupted */
+ if (WARN_ON(ret < 0))
+ return ret;
+
+ return 0;
+}
+
+struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *cmd)
+{
+ struct mtk_drm_fb *mtk_fb;
+ struct drm_gem_object *gem;
+ unsigned int width = cmd->width;
+ unsigned int height = cmd->height;
+ unsigned int size, bpp;
+ int ret;
+
+ if (drm_format_num_planes(cmd->pixel_format) != 1)
+ return ERR_PTR(-EINVAL);
+
+ gem = drm_gem_object_lookup(file, cmd->handles[0]);
+ if (!gem)
+ return ERR_PTR(-ENOENT);
+
+ bpp = drm_format_plane_cpp(cmd->pixel_format, 0);
+ size = (height - 1) * cmd->pitches[0] + width * bpp;
+ size += cmd->offsets[0];
+
+ if (gem->size < size) {
+ ret = -EINVAL;
+ goto unreference;
+ }
+
+ mtk_fb = mtk_drm_framebuffer_init(dev, cmd, gem);
+ if (IS_ERR(mtk_fb)) {
+ ret = PTR_ERR(mtk_fb);
+ goto unreference;
+ }
+
+ return &mtk_fb->base;
+
+unreference:
+ drm_gem_object_unreference_unlocked(gem);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
new file mode 100644
index 000000000..9b2ae345a
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MTK_DRM_FB_H
+#define MTK_DRM_FB_H
+
+struct drm_gem_object *mtk_fb_get_gem_obj(struct drm_framebuffer *fb);
+int mtk_fb_wait(struct drm_framebuffer *fb);
+struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *cmd);
+
+#endif /* MTK_DRM_FB_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
new file mode 100644
index 000000000..fa2ec0cd0
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <linux/dma-buf.h>
+
+#include "mtk_drm_drv.h"
+#include "mtk_drm_gem.h"
+
+static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
+ unsigned long size)
+{
+ struct mtk_drm_gem_obj *mtk_gem_obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+
+ mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
+ if (!mtk_gem_obj)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize gem object\n");
+ kfree(mtk_gem_obj);
+ return ERR_PTR(ret);
+ }
+
+ return mtk_gem_obj;
+}
+
+struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
+ size_t size, bool alloc_kmap)
+{
+ struct mtk_drm_private *priv = dev->dev_private;
+ struct mtk_drm_gem_obj *mtk_gem;
+ struct drm_gem_object *obj;
+ int ret;
+
+ mtk_gem = mtk_drm_gem_init(dev, size);
+ if (IS_ERR(mtk_gem))
+ return ERR_CAST(mtk_gem);
+
+ obj = &mtk_gem->base;
+
+ init_dma_attrs(&mtk_gem->dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);
+
+ if (!alloc_kmap)
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);
+
+ mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
+ &mtk_gem->dma_addr, GFP_KERNEL,
+ &mtk_gem->dma_attrs);
+ if (!mtk_gem->cookie) {
+ DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
+ ret = -ENOMEM;
+ goto err_gem_free;
+ }
+
+ if (alloc_kmap)
+ mtk_gem->kvaddr = mtk_gem->cookie;
+
+ DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n",
+ mtk_gem->cookie, &mtk_gem->dma_addr,
+ size);
+
+ return mtk_gem;
+
+err_gem_free:
+ drm_gem_object_release(obj);
+ kfree(mtk_gem);
+ return ERR_PTR(ret);
+}
+
+void mtk_drm_gem_free_object(struct drm_gem_object *obj)
+{
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_drm_private *priv = obj->dev->dev_private;
+
+ if (mtk_gem->sg)
+ drm_prime_gem_destroy(obj, mtk_gem->sg);
+ else
+ dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
+ mtk_gem->dma_addr, &mtk_gem->dma_attrs);
+
+ /* release file pointer to gem object. */
+ drm_gem_object_release(obj);
+
+ kfree(mtk_gem);
+}
+
+int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct mtk_drm_gem_obj *mtk_gem;
+ int ret;
+
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ args->size = args->pitch * args->height;
+
+ mtk_gem = mtk_drm_gem_create(dev, args->size, false);
+ if (IS_ERR(mtk_gem))
+ return PTR_ERR(mtk_gem);
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, &mtk_gem->base, &args->handle);
+ if (ret)
+ goto err_handle_create;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(&mtk_gem->base);
+
+ return 0;
+
+err_handle_create:
+ mtk_drm_gem_free_object(&mtk_gem->base);
+ return ret;
+}
+
+int mtk_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
+
+out:
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+
+{
+ int ret;
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_drm_private *priv = obj->dev->dev_private;
+
+ /*
+ * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
+ * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
+ */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_pgoff = 0;
+
+ ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
+ mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs);
+ if (ret)
+ drm_gem_vm_close(vma);
+
+ return ret;
+}
+
+int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ if (ret)
+ return ret;
+
+ return mtk_drm_gem_object_mmap(obj, vma);
+}
+
+int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ obj = vma->vm_private_data;
+
+ return mtk_drm_gem_object_mmap(obj, vma);
+}
+
+/*
+ * Allocate a sg_table for this GEM object.
+ * Note: Both the table's contents, and the sg_table itself must be freed by
+ * the caller.
+ * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
+ */
+struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);
+ struct mtk_drm_private *priv = obj->dev->dev_private;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
+ mtk_gem->dma_addr, obj->size,
+ &mtk_gem->dma_attrs);
+ if (ret) {
+ DRM_ERROR("failed to allocate sgt, %d\n", ret);
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
+}
+
+struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg)
+{
+ struct mtk_drm_gem_obj *mtk_gem;
+ int ret;
+ struct scatterlist *s;
+ unsigned int i;
+ dma_addr_t expected;
+
+ mtk_gem = mtk_drm_gem_init(dev, attach->dmabuf->size);
+
+ if (IS_ERR(mtk_gem))
+ return ERR_PTR(PTR_ERR(mtk_gem));
+
+ expected = sg_dma_address(sg->sgl);
+ for_each_sg(sg->sgl, s, sg->nents, i) {
+ if (sg_dma_address(s) != expected) {
+ DRM_ERROR("sg_table is not contiguous");
+ ret = -EINVAL;
+ goto err_gem_free;
+ }
+ expected = sg_dma_address(s) + sg_dma_len(s);
+ }
+
+ mtk_gem->dma_addr = sg_dma_address(sg->sgl);
+ mtk_gem->sg = sg;
+
+ return &mtk_gem->base;
+
+err_gem_free:
+ kfree(mtk_gem);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.h b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
new file mode 100644
index 000000000..3a2a5624a
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_DRM_GEM_H_
+#define _MTK_DRM_GEM_H_
+
+#include <drm/drm_gem.h>
+
+/*
+ * mtk drm buffer structure.
+ *
+ * @base: a gem object.
+ * - a new handle to this gem object would be created
+ * by drm_gem_handle_create().
+ * @cookie: the return value of dma_alloc_attrs(), keep it for dma_free_attrs()
+ * @kvaddr: kernel virtual address of gem buffer.
+ * @dma_addr: dma address of gem buffer.
+ * @dma_attrs: dma attributes of gem buffer.
+ *
+ * P.S. this object would be transferred to user as kms_bo.handle so
+ * user can access the buffer through kms_bo.handle.
+ */
+struct mtk_drm_gem_obj {
+ struct drm_gem_object base;
+ void *cookie;
+ void *kvaddr;
+ dma_addr_t dma_addr;
+ struct dma_attrs dma_attrs;
+ struct sg_table *sg;
+};
+
+#define to_mtk_gem_obj(x) container_of(x, struct mtk_drm_gem_obj, base)
+
+void mtk_drm_gem_free_object(struct drm_gem_object *gem);
+struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, size_t size,
+ bool alloc_kmap);
+int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int mtk_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int mtk_drm_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach, struct sg_table *sg);
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
new file mode 100644
index 000000000..51bc8988f
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: CK Hu <ck.hu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "mtk_drm_crtc.h"
+#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
+#include "mtk_drm_fb.h"
+#include "mtk_drm_gem.h"
+#include "mtk_drm_plane.h"
+
+static const u32 formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+};
+
+static void mtk_plane_enable(struct mtk_drm_plane *mtk_plane, bool enable,
+ dma_addr_t addr, struct drm_rect *dest)
+{
+ struct drm_plane *plane = &mtk_plane->base;
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+ unsigned int pitch, format;
+ int x, y;
+
+ if (WARN_ON(!plane->state || (enable && !plane->state->fb)))
+ return;
+
+ if (plane->state->fb) {
+ pitch = plane->state->fb->pitches[0];
+ format = plane->state->fb->pixel_format;
+ } else {
+ pitch = 0;
+ format = DRM_FORMAT_RGBA8888;
+ }
+
+ x = plane->state->crtc_x;
+ y = plane->state->crtc_y;
+
+ if (x < 0) {
+ addr -= x * 4;
+ x = 0;
+ }
+
+ if (y < 0) {
+ addr -= y * pitch;
+ y = 0;
+ }
+
+ state->pending.enable = enable;
+ state->pending.pitch = pitch;
+ state->pending.format = format;
+ state->pending.addr = addr;
+ state->pending.x = x;
+ state->pending.y = y;
+ state->pending.width = dest->x2 - dest->x1;
+ state->pending.height = dest->y2 - dest->y1;
+ wmb(); /* Make sure the above parameters are set before update */
+ state->pending.dirty = true;
+}
+
+static void mtk_plane_reset(struct drm_plane *plane)
+{
+ struct mtk_plane_state *state;
+
+ if (plane->state) {
+ if (plane->state->fb)
+ drm_framebuffer_unreference(plane->state->fb);
+
+ state = to_mtk_plane_state(plane->state);
+ memset(state, 0, sizeof(*state));
+ } else {
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return;
+ plane->state = &state->base;
+ }
+
+ state->base.plane = plane;
+ state->pending.format = DRM_FORMAT_RGB565;
+}
+
+static struct drm_plane_state *mtk_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct mtk_plane_state *old_state = to_mtk_plane_state(plane->state);
+ struct mtk_plane_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+
+ WARN_ON(state->base.plane != plane);
+
+ state->pending = old_state->pending;
+
+ return &state->base;
+}
+
+static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_mtk_plane_state(state));
+}
+
+static const struct drm_plane_funcs mtk_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = mtk_plane_reset,
+ .atomic_duplicate_state = mtk_plane_duplicate_state,
+ .atomic_destroy_state = mtk_drm_plane_destroy_state,
+};
+
+static int mtk_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc_state *crtc_state;
+ bool visible;
+ struct drm_rect dest = {
+ .x1 = state->crtc_x,
+ .y1 = state->crtc_y,
+ .x2 = state->crtc_x + state->crtc_w,
+ .y2 = state->crtc_y + state->crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = state->src_x,
+ .y1 = state->src_y,
+ .x2 = state->src_x + state->src_w,
+ .y2 = state->src_y + state->src_h,
+ };
+ struct drm_rect clip = { 0, };
+
+ if (!fb)
+ return 0;
+
+ if (!mtk_fb_get_gem_obj(fb)) {
+ DRM_DEBUG_KMS("buffer is null\n");
+ return -EFAULT;
+ }
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ clip.x2 = crtc_state->mode.hdisplay;
+ clip.y2 = crtc_state->mode.vdisplay;
+
+ return drm_plane_helper_check_update(plane, state->crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true, &visible);
+}
+
+static void mtk_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+ struct drm_crtc *crtc = state->base.crtc;
+ struct drm_gem_object *gem;
+ struct mtk_drm_gem_obj *mtk_gem;
+ struct mtk_drm_plane *mtk_plane = to_mtk_plane(plane);
+ struct drm_rect dest = {
+ .x1 = state->base.crtc_x,
+ .y1 = state->base.crtc_y,
+ .x2 = state->base.crtc_x + state->base.crtc_w,
+ .y2 = state->base.crtc_y + state->base.crtc_h,
+ };
+ struct drm_rect clip = { 0, };
+
+ if (!crtc)
+ return;
+
+ clip.x2 = state->base.crtc->state->mode.hdisplay;
+ clip.y2 = state->base.crtc->state->mode.vdisplay;
+ drm_rect_intersect(&dest, &clip);
+
+ gem = mtk_fb_get_gem_obj(state->base.fb);
+ mtk_gem = to_mtk_gem_obj(gem);
+ mtk_plane_enable(mtk_plane, true, mtk_gem->dma_addr, &dest);
+}
+
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+ state->pending.enable = false;
+ wmb(); /* Make sure the above parameter is set before update */
+ state->pending.dirty = true;
+}
+
+static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
+ .atomic_check = mtk_plane_atomic_check,
+ .atomic_update = mtk_plane_atomic_update,
+ .atomic_disable = mtk_plane_atomic_disable,
+};
+
+int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int zpos)
+{
+ int err;
+
+ err = drm_universal_plane_init(dev, &mtk_plane->base, possible_crtcs,
+ &mtk_plane_funcs, formats,
+ ARRAY_SIZE(formats), type, NULL);
+ if (err) {
+ DRM_ERROR("failed to initialize plane\n");
+ return err;
+ }
+
+ drm_plane_helper_add(&mtk_plane->base, &mtk_plane_helper_funcs);
+ mtk_plane->idx = zpos;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
new file mode 100644
index 000000000..72a7b3e4c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: CK Hu <ck.hu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MTK_DRM_PLANE_H_
+#define _MTK_DRM_PLANE_H_
+
+#include <drm/drm_crtc.h>
+#include <linux/types.h>
+
+struct mtk_drm_plane {
+ struct drm_plane base;
+ unsigned int idx;
+};
+
+struct mtk_plane_pending_state {
+ bool config;
+ bool enable;
+ dma_addr_t addr;
+ unsigned int pitch;
+ unsigned int format;
+ unsigned int x;
+ unsigned int y;
+ unsigned int width;
+ unsigned int height;
+ bool dirty;
+};
+
+struct mtk_plane_state {
+ struct drm_plane_state base;
+ struct mtk_plane_pending_state pending;
+};
+
+static inline struct mtk_drm_plane *to_mtk_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct mtk_drm_plane, base);
+}
+
+static inline struct mtk_plane_state *
+to_mtk_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct mtk_plane_state, base);
+}
+
+int mtk_plane_init(struct drm_device *dev, struct mtk_drm_plane *mtk_plane,
+ unsigned long possible_crtcs, enum drm_plane_type type,
+ unsigned int zpos);
+
+#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
new file mode 100644
index 000000000..769559124
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -0,0 +1,911 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_graph.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <video/videomode.h>
+
+#include "mtk_drm_ddp_comp.h"
+
+#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
+#define DSI_HOST_FIFO_DEPTH 64
+
+#define DSI_START 0x00
+
+#define DSI_CON_CTRL 0x10
+#define DSI_RESET BIT(0)
+#define DSI_EN BIT(1)
+
+#define DSI_MODE_CTRL 0x14
+#define MODE (3)
+#define CMD_MODE 0
+#define SYNC_PULSE_MODE 1
+#define SYNC_EVENT_MODE 2
+#define BURST_MODE 3
+#define FRM_MODE BIT(16)
+#define MIX_MODE BIT(17)
+
+#define DSI_TXRX_CTRL 0x18
+#define VC_NUM (2 << 0)
+#define LANE_NUM (0xf << 2)
+#define DIS_EOT BIT(6)
+#define NULL_EN BIT(7)
+#define TE_FREERUN BIT(8)
+#define EXT_TE_EN BIT(9)
+#define EXT_TE_EDGE BIT(10)
+#define MAX_RTN_SIZE (0xf << 12)
+#define HSTX_CKLP_EN BIT(16)
+
+#define DSI_PSCTRL 0x1c
+#define DSI_PS_WC 0x3fff
+#define DSI_PS_SEL (3 << 16)
+#define PACKED_PS_16BIT_RGB565 (0 << 16)
+#define LOOSELY_PS_18BIT_RGB666 (1 << 16)
+#define PACKED_PS_18BIT_RGB666 (2 << 16)
+#define PACKED_PS_24BIT_RGB888 (3 << 16)
+
+#define DSI_VSA_NL 0x20
+#define DSI_VBP_NL 0x24
+#define DSI_VFP_NL 0x28
+#define DSI_VACT_NL 0x2C
+#define DSI_HSA_WC 0x50
+#define DSI_HBP_WC 0x54
+#define DSI_HFP_WC 0x58
+
+#define DSI_HSTX_CKL_WC 0x64
+
+#define DSI_PHY_LCCON 0x104
+#define LC_HS_TX_EN BIT(0)
+#define LC_ULPM_EN BIT(1)
+#define LC_WAKEUP_EN BIT(2)
+
+#define DSI_PHY_LD0CON 0x108
+#define LD0_HS_TX_EN BIT(0)
+#define LD0_ULPM_EN BIT(1)
+#define LD0_WAKEUP_EN BIT(2)
+
+#define DSI_PHY_TIMECON0 0x110
+#define LPX (0xff << 0)
+#define HS_PRPR (0xff << 8)
+#define HS_ZERO (0xff << 16)
+#define HS_TRAIL (0xff << 24)
+
+#define DSI_PHY_TIMECON1 0x114
+#define TA_GO (0xff << 0)
+#define TA_SURE (0xff << 8)
+#define TA_GET (0xff << 16)
+#define DA_HS_EXIT (0xff << 24)
+
+#define DSI_PHY_TIMECON2 0x118
+#define CONT_DET (0xff << 0)
+#define CLK_ZERO (0xff << 16)
+#define CLK_TRAIL (0xff << 24)
+
+#define DSI_PHY_TIMECON3 0x11c
+#define CLK_HS_PRPR (0xff << 0)
+#define CLK_HS_POST (0xff << 8)
+#define CLK_HS_EXIT (0xff << 16)
+
+#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
+
+struct phy;
+
+struct mtk_dsi {
+ struct mtk_ddp_comp ddp_comp;
+ struct device *dev;
+ struct mipi_dsi_host host;
+ struct drm_encoder encoder;
+ struct drm_connector conn;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ struct phy *phy;
+
+ void __iomem *regs;
+
+ struct clk *engine_clk;
+ struct clk *digital_clk;
+ struct clk *hs_clk;
+
+ u32 data_rate;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+ struct videomode vm;
+ int refcount;
+ bool enabled;
+};
+
+static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
+{
+ return container_of(e, struct mtk_dsi, encoder);
+}
+
+static inline struct mtk_dsi *connector_to_dsi(struct drm_connector *c)
+{
+ return container_of(c, struct mtk_dsi, conn);
+}
+
+static inline struct mtk_dsi *host_to_dsi(struct mipi_dsi_host *h)
+{
+ return container_of(h, struct mtk_dsi, host);
+}
+
+static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
+{
+ u32 temp = readl(dsi->regs + offset);
+
+ writel((temp & ~mask) | (data & mask), dsi->regs + offset);
+}
+
+static void dsi_phy_timconfig(struct mtk_dsi *dsi)
+{
+ u32 timcon0, timcon1, timcon2, timcon3;
+ unsigned int ui, cycle_time;
+ unsigned int lpx;
+
+ ui = 1000 / dsi->data_rate + 0x01;
+ cycle_time = 8000 / dsi->data_rate + 0x01;
+ lpx = 5;
+
+ timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx;
+ timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 |
+ (4 * lpx);
+ timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
+ (NS_TO_CYCLE(0x150, cycle_time) << 16);
+ timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 |
+ NS_TO_CYCLE(0x40, cycle_time);
+
+ writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
+ writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
+ writel(timcon2, dsi->regs + DSI_PHY_TIMECON2);
+ writel(timcon3, dsi->regs + DSI_PHY_TIMECON3);
+}
+
+static void mtk_dsi_enable(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, DSI_EN);
+}
+
+static void mtk_dsi_disable(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
+}
+
+static void mtk_dsi_reset(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
+ mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
+}
+
+static int mtk_dsi_poweron(struct mtk_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ int ret;
+
+ if (++dsi->refcount != 1)
+ return 0;
+
+ /**
+ * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio;
+ * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000.
+ * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi.
+ * we set mipi_ratio is 1.05.
+ */
+ dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10);
+
+ ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set data rate: %d\n", ret);
+ goto err_refcount;
+ }
+
+ phy_power_on(dsi->phy);
+
+ ret = clk_prepare_enable(dsi->engine_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable engine clock: %d\n", ret);
+ goto err_phy_power_off;
+ }
+
+ ret = clk_prepare_enable(dsi->digital_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable digital clock: %d\n", ret);
+ goto err_disable_engine_clk;
+ }
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset(dsi);
+ dsi_phy_timconfig(dsi);
+
+ return 0;
+
+err_disable_engine_clk:
+ clk_disable_unprepare(dsi->engine_clk);
+err_phy_power_off:
+ phy_power_off(dsi->phy);
+err_refcount:
+ dsi->refcount--;
+ return ret;
+}
+
+static void dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
+ mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
+}
+
+static void dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
+ mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
+ mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
+}
+
+static void dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
+ mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
+}
+
+static void dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
+ mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
+ mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
+}
+
+static bool dsi_clk_hs_state(struct mtk_dsi *dsi)
+{
+ u32 tmp_reg1;
+
+ tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
+ return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
+}
+
+static void dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
+{
+ if (enter && !dsi_clk_hs_state(dsi))
+ mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
+ else if (!enter && dsi_clk_hs_state(dsi))
+ mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
+}
+
+static void dsi_set_mode(struct mtk_dsi *dsi)
+{
+ u32 vid_mode = CMD_MODE;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ vid_mode = SYNC_PULSE_MODE;
+
+ if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
+ !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
+ vid_mode = BURST_MODE;
+ }
+
+ writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
+}
+
+static void dsi_ps_control_vact(struct mtk_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+ u32 dsi_buf_bpp, ps_wc;
+ u32 ps_bpp_mode;
+
+ if (dsi->format == MIPI_DSI_FMT_RGB565)
+ dsi_buf_bpp = 2;
+ else
+ dsi_buf_bpp = 3;
+
+ ps_wc = vm->hactive * dsi_buf_bpp;
+ ps_bpp_mode = ps_wc;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
+ break;
+ }
+
+ writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+ writel(ps_bpp_mode, dsi->regs + DSI_PSCTRL);
+ writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
+}
+
+static void dsi_rxtx_control(struct mtk_dsi *dsi)
+{
+ u32 tmp_reg;
+
+ switch (dsi->lanes) {
+ case 1:
+ tmp_reg = 1 << 2;
+ break;
+ case 2:
+ tmp_reg = 3 << 2;
+ break;
+ case 3:
+ tmp_reg = 7 << 2;
+ break;
+ case 4:
+ tmp_reg = 0xf << 2;
+ break;
+ default:
+ tmp_reg = 0xf << 2;
+ break;
+ }
+
+ writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
+}
+
+static void dsi_ps_control(struct mtk_dsi *dsi)
+{
+ unsigned int dsi_tmp_buf_bpp;
+ u32 tmp_reg;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ tmp_reg = PACKED_PS_24BIT_RGB888;
+ dsi_tmp_buf_bpp = 3;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ tmp_reg = LOOSELY_PS_18BIT_RGB666;
+ dsi_tmp_buf_bpp = 3;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ tmp_reg = PACKED_PS_18BIT_RGB666;
+ dsi_tmp_buf_bpp = 3;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ tmp_reg = PACKED_PS_16BIT_RGB565;
+ dsi_tmp_buf_bpp = 2;
+ break;
+ default:
+ tmp_reg = PACKED_PS_24BIT_RGB888;
+ dsi_tmp_buf_bpp = 3;
+ break;
+ }
+
+ tmp_reg += dsi->vm.hactive * dsi_tmp_buf_bpp & DSI_PS_WC;
+ writel(tmp_reg, dsi->regs + DSI_PSCTRL);
+}
+
+static void dsi_config_vdo_timing(struct mtk_dsi *dsi)
+{
+ unsigned int horizontal_sync_active_byte;
+ unsigned int horizontal_backporch_byte;
+ unsigned int horizontal_frontporch_byte;
+ unsigned int dsi_tmp_buf_bpp;
+
+ struct videomode *vm = &dsi->vm;
+
+ if (dsi->format == MIPI_DSI_FMT_RGB565)
+ dsi_tmp_buf_bpp = 2;
+ else
+ dsi_tmp_buf_bpp = 3;
+
+ writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
+ writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
+ writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
+ writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+
+ horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ horizontal_backporch_byte =
+ (vm->hback_porch * dsi_tmp_buf_bpp - 10);
+ else
+ horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) *
+ dsi_tmp_buf_bpp - 10);
+
+ horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12);
+
+ writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
+ writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
+ writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
+
+ dsi_ps_control(dsi);
+}
+
+static void mtk_dsi_start(struct mtk_dsi *dsi)
+{
+ writel(0, dsi->regs + DSI_START);
+ writel(1, dsi->regs + DSI_START);
+}
+
+static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
+{
+ if (WARN_ON(dsi->refcount == 0))
+ return;
+
+ if (--dsi->refcount != 0)
+ return;
+
+ dsi_lane0_ulp_mode_enter(dsi);
+ dsi_clk_ulp_mode_enter(dsi);
+
+ mtk_dsi_disable(dsi);
+
+ clk_disable_unprepare(dsi->engine_clk);
+ clk_disable_unprepare(dsi->digital_clk);
+
+ phy_power_off(dsi->phy);
+}
+
+static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
+{
+ int ret;
+
+ if (dsi->enabled)
+ return;
+
+ if (dsi->panel) {
+ if (drm_panel_prepare(dsi->panel)) {
+ DRM_ERROR("failed to setup the panel\n");
+ return;
+ }
+ }
+
+ ret = mtk_dsi_poweron(dsi);
+ if (ret < 0) {
+ DRM_ERROR("failed to power on dsi\n");
+ return;
+ }
+
+ dsi_rxtx_control(dsi);
+
+ dsi_clk_ulp_mode_leave(dsi);
+ dsi_lane0_ulp_mode_leave(dsi);
+ dsi_clk_hs_mode(dsi, 0);
+ dsi_set_mode(dsi);
+
+ dsi_ps_control_vact(dsi);
+ dsi_config_vdo_timing(dsi);
+
+ dsi_set_mode(dsi);
+ dsi_clk_hs_mode(dsi, 1);
+
+ mtk_dsi_start(dsi);
+
+ dsi->enabled = true;
+}
+
+static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
+{
+ if (!dsi->enabled)
+ return;
+
+ if (dsi->panel) {
+ if (drm_panel_disable(dsi->panel)) {
+ DRM_ERROR("failed to disable the panel\n");
+ return;
+ }
+ }
+
+ mtk_dsi_poweroff(dsi);
+
+ dsi->enabled = false;
+}
+
+static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
+ .destroy = mtk_dsi_encoder_destroy,
+};
+
+static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mtk_dsi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+
+ dsi->vm.pixelclock = adjusted->clock;
+ dsi->vm.hactive = adjusted->hdisplay;
+ dsi->vm.hback_porch = adjusted->htotal - adjusted->hsync_end;
+ dsi->vm.hfront_porch = adjusted->hsync_start - adjusted->hdisplay;
+ dsi->vm.hsync_len = adjusted->hsync_end - adjusted->hsync_start;
+
+ dsi->vm.vactive = adjusted->vdisplay;
+ dsi->vm.vback_porch = adjusted->vtotal - adjusted->vsync_end;
+ dsi->vm.vfront_porch = adjusted->vsync_start - adjusted->vdisplay;
+ dsi->vm.vsync_len = adjusted->vsync_end - adjusted->vsync_start;
+}
+
+static void mtk_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+
+ mtk_output_dsi_disable(dsi);
+}
+
+static void mtk_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct mtk_dsi *dsi = encoder_to_dsi(encoder);
+
+ mtk_output_dsi_enable(dsi);
+}
+
+static enum drm_connector_status mtk_dsi_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
+{
+ struct mtk_dsi *dsi = connector_to_dsi(connector);
+
+ return drm_panel_get_modes(dsi->panel);
+}
+
+static struct drm_encoder *mtk_dsi_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct mtk_dsi *dsi = connector_to_dsi(connector);
+
+ return &dsi->encoder;
+}
+
+static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
+ .mode_fixup = mtk_dsi_encoder_mode_fixup,
+ .mode_set = mtk_dsi_encoder_mode_set,
+ .disable = mtk_dsi_encoder_disable,
+ .enable = mtk_dsi_encoder_enable,
+};
+
+static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = mtk_dsi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs
+ mtk_dsi_connector_helper_funcs = {
+ .get_modes = mtk_dsi_connector_get_modes,
+ .best_encoder = mtk_dsi_connector_best_encoder,
+};
+
+static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
+ struct drm_encoder *encoder)
+{
+ int ret;
+
+ if (!bridge)
+ return -ENOENT;
+
+ encoder->bridge = bridge;
+ bridge->encoder = encoder;
+ ret = drm_bridge_attach(encoder->dev, bridge);
+ if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+ encoder->bridge = NULL;
+ bridge->encoder = NULL;
+ }
+
+ return ret;
+}
+
+static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+ int ret;
+
+ ret = drm_connector_init(drm, &dsi->conn, &mtk_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (ret) {
+ DRM_ERROR("Failed to connector init to drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(&dsi->conn, &mtk_dsi_connector_helper_funcs);
+
+ dsi->conn.dpms = DRM_MODE_DPMS_OFF;
+ drm_mode_connector_attach_encoder(&dsi->conn, &dsi->encoder);
+
+ if (dsi->panel) {
+ ret = drm_panel_attach(dsi->panel, &dsi->conn);
+ if (ret) {
+ DRM_ERROR("Failed to attach panel to drm\n");
+ goto err_connector_cleanup;
+ }
+ }
+
+ return 0;
+
+err_connector_cleanup:
+ drm_connector_cleanup(&dsi->conn);
+ return ret;
+}
+
+static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+ int ret;
+
+ ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to encoder init to drm\n");
+ return ret;
+ }
+ drm_encoder_helper_add(&dsi->encoder, &mtk_dsi_encoder_helper_funcs);
+
+ /*
+ * Currently display data paths are statically assigned to a crtc each.
+ * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
+ */
+ dsi->encoder.possible_crtcs = 1;
+
+ /* If there's a bridge, attach to it and let it create the connector */
+ ret = mtk_drm_attach_bridge(dsi->bridge, &dsi->encoder);
+ if (ret) {
+ /* Otherwise create our own connector and attach to a panel */
+ ret = mtk_dsi_create_connector(drm, dsi);
+ if (ret)
+ goto err_encoder_cleanup;
+ }
+
+ return 0;
+
+err_encoder_cleanup:
+ drm_encoder_cleanup(&dsi->encoder);
+ return ret;
+}
+
+static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
+{
+ drm_encoder_cleanup(&dsi->encoder);
+ /* Skip connector cleanup if creation was delegated to the bridge */
+ if (dsi->conn.dev)
+ drm_connector_cleanup(&dsi->conn);
+}
+
+static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
+{
+ struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+
+ mtk_dsi_poweron(dsi);
+}
+
+static void mtk_dsi_ddp_stop(struct mtk_ddp_comp *comp)
+{
+ struct mtk_dsi *dsi = container_of(comp, struct mtk_dsi, ddp_comp);
+
+ mtk_dsi_poweroff(dsi);
+}
+
+static const struct mtk_ddp_comp_funcs mtk_dsi_funcs = {
+ .start = mtk_dsi_ddp_start,
+ .stop = mtk_dsi_ddp_stop,
+};
+
+static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
+
+ if (dsi->conn.dev)
+ drm_helper_hpd_irq_event(dsi->conn.dev);
+
+ return 0;
+}
+
+static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+
+ if (dsi->conn.dev)
+ drm_helper_hpd_irq_event(dsi->conn.dev);
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops mtk_dsi_ops = {
+ .attach = mtk_dsi_host_attach,
+ .detach = mtk_dsi_host_detach,
+};
+
+static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+ struct drm_device *drm = data;
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ ret = mtk_ddp_comp_register(drm, &dsi->ddp_comp);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register component %s: %d\n",
+ dev->of_node->full_name, ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_host_register(&dsi->host);
+ if (ret < 0) {
+ dev_err(dev, "failed to register DSI host: %d\n", ret);
+ goto err_ddp_comp_unregister;
+ }
+
+ ret = mtk_dsi_create_conn_enc(drm, dsi);
+ if (ret) {
+ DRM_ERROR("Encoder create failed with %d\n", ret);
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ mipi_dsi_host_unregister(&dsi->host);
+err_ddp_comp_unregister:
+ mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
+ return ret;
+}
+
+static void mtk_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ mtk_dsi_destroy_conn_enc(dsi);
+ mipi_dsi_host_unregister(&dsi->host);
+ mtk_ddp_comp_unregister(drm, &dsi->ddp_comp);
+}
+
+static const struct component_ops mtk_dsi_component_ops = {
+ .bind = mtk_dsi_bind,
+ .unbind = mtk_dsi_unbind,
+};
+
+static int mtk_dsi_probe(struct platform_device *pdev)
+{
+ struct mtk_dsi *dsi;
+ struct device *dev = &pdev->dev;
+ struct device_node *remote_node, *endpoint;
+ struct resource *regs;
+ int comp_id;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->host.ops = &mtk_dsi_ops;
+ dsi->host.dev = dev;
+
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (endpoint) {
+ remote_node = of_graph_get_remote_port_parent(endpoint);
+ if (!remote_node) {
+ dev_err(dev, "No panel connected\n");
+ return -ENODEV;
+ }
+
+ dsi->bridge = of_drm_find_bridge(remote_node);
+ dsi->panel = of_drm_find_panel(remote_node);
+ of_node_put(remote_node);
+ if (!dsi->bridge && !dsi->panel) {
+ dev_info(dev, "Waiting for bridge or panel driver\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ dsi->engine_clk = devm_clk_get(dev, "engine");
+ if (IS_ERR(dsi->engine_clk)) {
+ ret = PTR_ERR(dsi->engine_clk);
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
+ return ret;
+ }
+
+ dsi->digital_clk = devm_clk_get(dev, "digital");
+ if (IS_ERR(dsi->digital_clk)) {
+ ret = PTR_ERR(dsi->digital_clk);
+ dev_err(dev, "Failed to get digital clock: %d\n", ret);
+ return ret;
+ }
+
+ dsi->hs_clk = devm_clk_get(dev, "hs");
+ if (IS_ERR(dsi->hs_clk)) {
+ ret = PTR_ERR(dsi->hs_clk);
+ dev_err(dev, "Failed to get hs clock: %d\n", ret);
+ return ret;
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->regs = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(dsi->regs)) {
+ ret = PTR_ERR(dsi->regs);
+ dev_err(dev, "Failed to ioremap memory: %d\n", ret);
+ return ret;
+ }
+
+ dsi->phy = devm_phy_get(dev, "dphy");
+ if (IS_ERR(dsi->phy)) {
+ ret = PTR_ERR(dsi->phy);
+ dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret);
+ return ret;
+ }
+
+ comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI);
+ if (comp_id < 0) {
+ dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
+ return comp_id;
+ }
+
+ ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id,
+ &mtk_dsi_funcs);
+ if (ret) {
+ dev_err(dev, "Failed to initialize component: %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, dsi);
+
+ return component_add(&pdev->dev, &mtk_dsi_component_ops);
+}
+
+static int mtk_dsi_remove(struct platform_device *pdev)
+{
+ struct mtk_dsi *dsi = platform_get_drvdata(pdev);
+
+ mtk_output_dsi_disable(dsi);
+ component_del(&pdev->dev, &mtk_dsi_component_ops);
+
+ return 0;
+}
+
+static const struct of_device_id mtk_dsi_of_match[] = {
+ { .compatible = "mediatek,mt8173-dsi" },
+ { },
+};
+
+struct platform_driver mtk_dsi_driver = {
+ .probe = mtk_dsi_probe,
+ .remove = mtk_dsi_remove,
+ .driver = {
+ .name = "mtk-dsi",
+ .of_match_table = mtk_dsi_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
new file mode 100644
index 000000000..cf8f38d39
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+
+#define MIPITX_DSI_CON 0x00
+#define RG_DSI_LDOCORE_EN BIT(0)
+#define RG_DSI_CKG_LDOOUT_EN BIT(1)
+#define RG_DSI_BCLK_SEL (3 << 2)
+#define RG_DSI_LD_IDX_SEL (7 << 4)
+#define RG_DSI_PHYCLK_SEL (2 << 8)
+#define RG_DSI_DSICLK_FREQ_SEL BIT(10)
+#define RG_DSI_LPTX_CLMP_EN BIT(11)
+
+#define MIPITX_DSI_CLOCK_LANE 0x04
+#define MIPITX_DSI_DATA_LANE0 0x08
+#define MIPITX_DSI_DATA_LANE1 0x0c
+#define MIPITX_DSI_DATA_LANE2 0x10
+#define MIPITX_DSI_DATA_LANE3 0x14
+#define RG_DSI_LNTx_LDOOUT_EN BIT(0)
+#define RG_DSI_LNTx_CKLANE_EN BIT(1)
+#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2)
+#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3)
+#define RG_DSI_LNTx_LPTX_IMINUS BIT(4)
+#define RG_DSI_LNTx_LPCD_IPLUS BIT(5)
+#define RG_DSI_LNTx_LPCD_IMINUS BIT(6)
+#define RG_DSI_LNTx_RT_CODE (0xf << 8)
+
+#define MIPITX_DSI_TOP_CON 0x40
+#define RG_DSI_LNT_INTR_EN BIT(0)
+#define RG_DSI_LNT_HS_BIAS_EN BIT(1)
+#define RG_DSI_LNT_IMP_CAL_EN BIT(2)
+#define RG_DSI_LNT_TESTMODE_EN BIT(3)
+#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4)
+#define RG_DSI_LNT_AIO_SEL (7 << 8)
+#define RG_DSI_PAD_TIE_LOW_EN BIT(11)
+#define RG_DSI_DEBUG_INPUT_EN BIT(12)
+#define RG_DSI_PRESERVE (7 << 13)
+
+#define MIPITX_DSI_BG_CON 0x44
+#define RG_DSI_BG_CORE_EN BIT(0)
+#define RG_DSI_BG_CKEN BIT(1)
+#define RG_DSI_BG_DIV (0x3 << 2)
+#define RG_DSI_BG_FAST_CHARGE BIT(4)
+#define RG_DSI_VOUT_MSK (0x3ffff << 5)
+#define RG_DSI_V12_SEL (7 << 5)
+#define RG_DSI_V10_SEL (7 << 8)
+#define RG_DSI_V072_SEL (7 << 11)
+#define RG_DSI_V04_SEL (7 << 14)
+#define RG_DSI_V032_SEL (7 << 17)
+#define RG_DSI_V02_SEL (7 << 20)
+#define RG_DSI_BG_R1_TRIM (0xf << 24)
+#define RG_DSI_BG_R2_TRIM (0xf << 28)
+
+#define MIPITX_DSI_PLL_CON0 0x50
+#define RG_DSI_MPPLL_PLL_EN BIT(0)
+#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1)
+#define RG_DSI_MPPLL_PREDIV (3 << 1)
+#define RG_DSI_MPPLL_TXDIV0 (3 << 3)
+#define RG_DSI_MPPLL_TXDIV1 (3 << 5)
+#define RG_DSI_MPPLL_POSDIV (7 << 7)
+#define RG_DSI_MPPLL_MONVC_EN BIT(10)
+#define RG_DSI_MPPLL_MONREF_EN BIT(11)
+#define RG_DSI_MPPLL_VOD_EN BIT(12)
+
+#define MIPITX_DSI_PLL_CON1 0x54
+#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0)
+#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1)
+#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2)
+#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16)
+
+#define MIPITX_DSI_PLL_CON2 0x58
+
+#define MIPITX_DSI_PLL_PWR 0x68
+#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
+#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
+#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8)
+
+#define MIPITX_DSI_SW_CTRL 0x80
+#define SW_CTRL_EN BIT(0)
+
+#define MIPITX_DSI_SW_CTRL_CON0 0x84
+#define SW_LNTC_LPTX_PRE_OE BIT(0)
+#define SW_LNTC_LPTX_OE BIT(1)
+#define SW_LNTC_LPTX_P BIT(2)
+#define SW_LNTC_LPTX_N BIT(3)
+#define SW_LNTC_HSTX_PRE_OE BIT(4)
+#define SW_LNTC_HSTX_OE BIT(5)
+#define SW_LNTC_HSTX_ZEROCLK BIT(6)
+#define SW_LNT0_LPTX_PRE_OE BIT(7)
+#define SW_LNT0_LPTX_OE BIT(8)
+#define SW_LNT0_LPTX_P BIT(9)
+#define SW_LNT0_LPTX_N BIT(10)
+#define SW_LNT0_HSTX_PRE_OE BIT(11)
+#define SW_LNT0_HSTX_OE BIT(12)
+#define SW_LNT0_LPRX_EN BIT(13)
+#define SW_LNT1_LPTX_PRE_OE BIT(14)
+#define SW_LNT1_LPTX_OE BIT(15)
+#define SW_LNT1_LPTX_P BIT(16)
+#define SW_LNT1_LPTX_N BIT(17)
+#define SW_LNT1_HSTX_PRE_OE BIT(18)
+#define SW_LNT1_HSTX_OE BIT(19)
+#define SW_LNT2_LPTX_PRE_OE BIT(20)
+#define SW_LNT2_LPTX_OE BIT(21)
+#define SW_LNT2_LPTX_P BIT(22)
+#define SW_LNT2_LPTX_N BIT(23)
+#define SW_LNT2_HSTX_PRE_OE BIT(24)
+#define SW_LNT2_HSTX_OE BIT(25)
+
+struct mtk_mipi_tx {
+ struct device *dev;
+ void __iomem *regs;
+ unsigned int data_rate;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+};
+
+static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw)
+{
+ return container_of(hw, struct mtk_mipi_tx, pll_hw);
+}
+
+static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
+{
+ u32 temp = readl(mipi_tx->regs + offset);
+
+ writel(temp & ~bits, mipi_tx->regs + offset);
+}
+
+static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 bits)
+{
+ u32 temp = readl(mipi_tx->regs + offset);
+
+ writel(temp | bits, mipi_tx->regs + offset);
+}
+
+static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
+ u32 mask, u32 data)
+{
+ u32 temp = readl(mipi_tx->regs + offset);
+
+ writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset);
+}
+
+static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+ unsigned int txdiv, txdiv0, txdiv1;
+ u64 pcw;
+
+ dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
+
+ if (mipi_tx->data_rate >= 500000000) {
+ txdiv = 1;
+ txdiv0 = 0;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 250000000) {
+ txdiv = 2;
+ txdiv0 = 1;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate >= 125000000) {
+ txdiv = 4;
+ txdiv0 = 2;
+ txdiv1 = 0;
+ } else if (mipi_tx->data_rate > 62000000) {
+ txdiv = 8;
+ txdiv0 = 2;
+ txdiv1 = 1;
+ } else if (mipi_tx->data_rate >= 50000000) {
+ txdiv = 16;
+ txdiv0 = 2;
+ txdiv1 = 2;
+ } else {
+ return -EINVAL;
+ }
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_VOUT_MSK |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN,
+ (4 << 20) | (4 << 17) | (4 << 14) |
+ (4 << 11) | (4 << 8) | (4 << 5) |
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ usleep_range(30, 100);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN,
+ (8 << 4) | RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_PWR_ON |
+ RG_DSI_MPPLL_SDM_ISO_EN,
+ RG_DSI_MPPLL_SDM_PWR_ON);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 |
+ RG_DSI_MPPLL_PREDIV,
+ (txdiv0 << 3) | (txdiv1 << 5));
+
+ /*
+ * PLL PCW config
+ * PCW bit 24~30 = integer part of pcw
+ * PCW bit 0~23 = fractional part of pcw
+ * pcw = data_Rate*4*txdiv/(Ref_clk*2);
+ * Post DIV =4, so need data_Rate*4
+ * Ref_clk is 26MHz
+ */
+ pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24,
+ 26000000);
+ writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_FRA_EN);
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN);
+
+ usleep_range(20, 100);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
+ RG_DSI_MPPLL_SDM_SSC_EN);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ dev_dbg(mipi_tx->dev, "unprepare\n");
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_PLL_EN);
+
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
+ RG_DSI_MPPLL_SDM_ISO_EN |
+ RG_DSI_MPPLL_SDM_PWR_ON,
+ RG_DSI_MPPLL_SDM_ISO_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_LNT_HS_BIAS_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON,
+ RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON,
+ RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
+ RG_DSI_MPPLL_DIV_MSK);
+}
+
+static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clamp_val(rate, 50000000, 1250000000);
+}
+
+static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ dev_dbg(mipi_tx->dev, "set rate: %lu Hz\n", rate);
+
+ mipi_tx->data_rate = rate;
+
+ return 0;
+}
+
+static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
+
+ return mipi_tx->data_rate;
+}
+
+static const struct clk_ops mtk_mipi_tx_pll_ops = {
+ .prepare = mtk_mipi_tx_pll_prepare,
+ .unprepare = mtk_mipi_tx_pll_unprepare,
+ .round_rate = mtk_mipi_tx_pll_round_rate,
+ .set_rate = mtk_mipi_tx_pll_set_rate,
+ .recalc_rate = mtk_mipi_tx_pll_recalc_rate,
+};
+
+static int mtk_mipi_tx_power_on_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ unsigned int reg;
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+
+ mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+
+ return 0;
+}
+
+static int mtk_mipi_tx_power_on(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ int ret;
+
+ /* Power up core and enable PLL */
+ ret = clk_prepare_enable(mipi_tx->pll);
+ if (ret < 0)
+ return ret;
+
+ /* Enable DSI Lane LDO outputs, disable pad tie low */
+ mtk_mipi_tx_power_on_signal(phy);
+
+ return 0;
+}
+
+static void mtk_mipi_tx_power_off_signal(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+ unsigned int reg;
+
+ mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
+ RG_DSI_PAD_TIE_LOW_EN);
+
+ for (reg = MIPITX_DSI_CLOCK_LANE;
+ reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
+ mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN);
+}
+
+static int mtk_mipi_tx_power_off(struct phy *phy)
+{
+ struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
+
+ /* Enable pad tie low, disable DSI Lane LDO outputs */
+ mtk_mipi_tx_power_off_signal(phy);
+
+ /* Disable PLL and power down core */
+ clk_disable_unprepare(mipi_tx->pll);
+
+ return 0;
+}
+
+static const struct phy_ops mtk_mipi_tx_ops = {
+ .power_on = mtk_mipi_tx_power_on,
+ .power_off = mtk_mipi_tx_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int mtk_mipi_tx_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_mipi_tx *mipi_tx;
+ struct resource *mem;
+ struct clk *ref_clk;
+ const char *ref_clk_name;
+ struct clk_init_data clk_init = {
+ .ops = &mtk_mipi_tx_pll_ops,
+ .num_parents = 1,
+ .parent_names = (const char * const *)&ref_clk_name,
+ .flags = CLK_SET_RATE_GATE,
+ };
+ struct phy *phy;
+ struct phy_provider *phy_provider;
+ int ret;
+
+ mipi_tx = devm_kzalloc(dev, sizeof(*mipi_tx), GFP_KERNEL);
+ if (!mipi_tx)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mipi_tx->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(mipi_tx->regs)) {
+ ret = PTR_ERR(mipi_tx->regs);
+ dev_err(dev, "Failed to get memory resource: %d\n", ret);
+ return ret;
+ }
+
+ ref_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ref_clk)) {
+ ret = PTR_ERR(ref_clk);
+ dev_err(dev, "Failed to get reference clock: %d\n", ret);
+ return ret;
+ }
+ ref_clk_name = __clk_get_name(ref_clk);
+
+ ret = of_property_read_string(dev->of_node, "clock-output-names",
+ &clk_init.name);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
+ return ret;
+ }
+
+ mipi_tx->pll_hw.init = &clk_init;
+ mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
+ if (IS_ERR(mipi_tx->pll)) {
+ ret = PTR_ERR(mipi_tx->pll);
+ dev_err(dev, "Failed to register PLL: %d\n", ret);
+ return ret;
+ }
+
+ phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy);
+ dev_err(dev, "Failed to create MIPI D-PHY: %d\n", ret);
+ return ret;
+ }
+ phy_set_drvdata(phy, mipi_tx);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy)) {
+ ret = PTR_ERR(phy_provider);
+ return ret;
+ }
+
+ mipi_tx->dev = dev;
+
+ return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
+ mipi_tx->pll);
+}
+
+static int mtk_mipi_tx_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static const struct of_device_id mtk_mipi_tx_match[] = {
+ { .compatible = "mediatek,mt8173-mipi-tx", },
+ {},
+};
+
+struct platform_driver mtk_mipi_tx_driver = {
+ .probe = mtk_mipi_tx_probe,
+ .remove = mtk_mipi_tx_remove,
+ .driver = {
+ .name = "mediatek-mipi-tx",
+ .of_match_table = mtk_mipi_tx_match,
+ },
+};
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index a7bf6a90e..2ac3fcbfe 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -75,7 +75,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
}
- obj = drm_gem_object_lookup(dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj)
return -ENOENT;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index b0af77454..ebb470ff7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -116,10 +116,8 @@ static struct pci_driver mgag200_pci_driver = {
static int __init mgag200_init(void)
{
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && mgag200_modeset == -1)
return -EINVAL;
-#endif
if (mgag200_modeset == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 205b2801d..3e02ac207 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -281,7 +281,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9147444d5..615cbb08b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -53,7 +53,7 @@ mgag200_user_framebuffer_create(struct drm_device *dev,
struct mga_framebuffer *mga_fb;
int ret;
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
@@ -358,7 +358,7 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
struct drm_gem_object *obj;
struct mgag200_bo *bo;
- obj = drm_gem_object_lookup(dev, file, handle);
+ obj = drm_gem_object_lookup(file, handle);
if (obj == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 05108b505..9d5083d0f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -245,6 +245,8 @@ struct ttm_bo_driver mgag200_bo_driver = {
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int mgag200_mm_init(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 215495c27..167a4971f 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -23,6 +23,13 @@ config DRM_MSM_REGISTER_LOGGING
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+config DRM_MSM_HDMI_HDCP
+ bool "Enable HDMI HDCP support in MSM DRM driver"
+ depends on DRM_MSM && QCOM_SCM
+ default y
+ help
+ Choose this option to enable HDCP state machine
+
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index ddb4c9d09..60cb02624 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -10,7 +10,6 @@ msm-y := \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
hdmi/hdmi_connector.o \
- hdmi/hdmi_hdcp.o \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
@@ -40,8 +39,10 @@ msm-y := \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_atomic.o \
+ msm_debugfs.o \
msm_drv.o \
msm_fb.o \
+ msm_fence.o \
msm_gem.o \
msm_gem_prime.o \
msm_gem_submit.o \
@@ -56,6 +57,8 @@ msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
+msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
+
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
mdp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 8880f3eae..7e25f94af 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -120,8 +120,8 @@ void adreno_recover(struct msm_gpu *gpu)
/* reset ringbuffer: */
gpu->rb->cur = gpu->rb->start;
- /* reset completed fence seqno, just discard anything pending: */
- adreno_gpu->memptrs->fence = gpu->submitted_fence;
+ /* reset completed fence seqno: */
+ adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
adreno_gpu->memptrs->rptr = 0;
adreno_gpu->memptrs->wptr = 0;
@@ -133,7 +133,7 @@ void adreno_recover(struct msm_gpu *gpu)
}
}
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -168,7 +168,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT2(ring);
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
- OUT_RING(ring, submit->fence);
+ OUT_RING(ring, submit->fence->seqno);
if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
/* Flush HLSQ lazy updates to make sure there is nothing
@@ -185,7 +185,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
OUT_RING(ring, rbmemptr(adreno_gpu, fence));
- OUT_RING(ring, submit->fence);
+ OUT_RING(ring, submit->fence->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
OUT_PKT3(ring, CP_INTERRUPT, 1);
@@ -212,8 +212,6 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
#endif
gpu->funcs->flush(gpu);
-
- return 0;
}
void adreno_flush(struct msm_gpu *gpu)
@@ -254,7 +252,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.patchid);
seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
+ gpu->fctx->last_fence);
seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
@@ -295,7 +293,7 @@ void adreno_dump_info(struct msm_gpu *gpu)
adreno_gpu->rev.patchid);
printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
+ gpu->fctx->last_fence);
printk("rptr: %d\n", get_rptr(adreno_gpu));
printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
printk("rb wptr: %d\n", get_wptr(gpu->rb));
@@ -410,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
- if (!adreno_gpu->memptrs) {
+ if (IS_ERR(adreno_gpu->memptrs)) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 1d07511f4..a54f6e036 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -238,7 +238,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
uint32_t adreno_last_fence(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu);
void adreno_idle(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 749fbb28e..03f115f53 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -41,8 +41,6 @@ enum msm_dsi_phy_type {
/* Regulators for DSI devices */
struct dsi_reg_entry {
char name[32];
- int min_voltage;
- int max_voltage;
int enable_load;
int disable_load;
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index e58e9b91b..93c1ee094 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -22,9 +22,9 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
.reg_cfg = {
.num = 3,
.regs = {
- {"vdda", 1200000, 1200000, 100000, 100},
- {"avdd", 3000000, 3000000, 110000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
+ {"vdda", 100000, 100}, /* 1.2 V */
+ {"avdd", 10000, 100}, /* 3.0 V */
+ {"vddio", 100000, 100}, /* 1.8 V */
},
},
.bus_clk_names = dsi_v2_bus_clk_names,
@@ -40,10 +40,10 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
.reg_cfg = {
.num = 4,
.regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdd", 3000000, 3000000, 150000, 100},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
+ {"gdsc", -1, -1},
+ {"vdd", 150000, 100}, /* 3.0 V */
+ {"vdda", 100000, 100}, /* 1.2 V */
+ {"vddio", 100000, 100}, /* 1.8 V */
},
},
.bus_clk_names = dsi_6g_bus_clk_names,
@@ -59,9 +59,9 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
.reg_cfg = {
.num = 3,
.regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdda", 1200000, 1200000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
+ {"gdsc", -1, -1},
+ {"vdda", 100000, 100}, /* 1.2 V */
+ {"vddio", 100000, 100}, /* 1.8 V */
},
},
.bus_clk_names = dsi_8916_bus_clk_names,
@@ -73,13 +73,13 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
.reg_cfg = {
.num = 7,
.regs = {
- {"gdsc", -1, -1, -1, -1},
- {"vdda", 1250000, 1250000, 100000, 100},
- {"vddio", 1800000, 1800000, 100000, 100},
- {"vcca", 1000000, 1000000, 10000, 100},
- {"vdd", 1800000, 1800000, 100000, 100},
- {"lab_reg", -1, -1, -1, -1},
- {"ibb_reg", -1, -1, -1, -1},
+ {"gdsc", -1, -1},
+ {"vdda", 100000, 100}, /* 1.25 V */
+ {"vddio", 100000, 100}, /* 1.8 V */
+ {"vcca", 10000, 100}, /* 1.0 V */
+ {"vdd", 100000, 100}, /* 1.8 V */
+ {"lab_reg", -1, -1},
+ {"ibb_reg", -1, -1},
},
},
.bus_clk_names = dsi_6g_bus_clk_names,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4282ec6bb..a3e47ad83 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -325,18 +325,6 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
return ret;
}
- for (i = 0; i < num; i++) {
- if (regulator_can_change_voltage(s[i].consumer)) {
- ret = regulator_set_voltage(s[i].consumer,
- regs[i].min_voltage, regs[i].max_voltage);
- if (ret < 0) {
- pr_err("regulator %d set voltage failed, %d\n",
- i, ret);
- return ret;
- }
- }
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 58ba7ec17..c8d1f19c9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -198,9 +198,13 @@ static enum drm_connector_status dsi_mgr_connector_detect(
static void dsi_mgr_connector_destroy(struct drm_connector *connector)
{
+ struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+
DBG("");
- drm_connector_unregister(connector);
+
drm_connector_cleanup(connector);
+
+ kfree(dsi_connector);
}
static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
@@ -538,12 +542,9 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
struct dsi_connector *dsi_connector;
int ret, i;
- dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
- sizeof(*dsi_connector), GFP_KERNEL);
- if (!dsi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
+ if (!dsi_connector)
+ return ERR_PTR(-ENOMEM);
dsi_connector->id = id;
@@ -552,7 +553,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
ret = drm_connector_init(msm_dsi->dev, connector,
&dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
@@ -565,21 +566,11 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_connector_register(connector);
- if (ret)
- goto fail;
-
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
drm_mode_connector_attach_encoder(connector,
msm_dsi->encoders[i]);
return connector;
-
-fail:
- if (connector)
- dsi_mgr_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
/* initialize bridge */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 91a95fb04..e2f42d8ea 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -177,19 +177,6 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
return ret;
}
- for (i = 0; i < num; i++) {
- if (regulator_can_change_voltage(s[i].consumer)) {
- ret = regulator_set_voltage(s[i].consumer,
- regs[i].min_voltage, regs[i].max_voltage);
- if (ret < 0) {
- dev_err(dev,
- "regulator %d set voltage failed, %d\n",
- i, ret);
- return ret;
- }
- }
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 2e9ba118d..f4bc11af8 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -138,8 +138,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
.reg_cfg = {
.num = 2,
.regs = {
- {"vddio", 1800000, 1800000, 100000, 100},
- {"vcca", 1000000, 1000000, 10000, 100},
+ {"vddio", 100000, 100}, /* 1.8 V */
+ {"vcca", 10000, 100}, /* 1.0 V */
},
},
.ops = {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index edf74110c..96d1852af 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -138,7 +138,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
.reg_cfg = {
.num = 1,
.regs = {
- {"vddio", 1800000, 1800000, 100000, 100},
+ {"vddio", 100000, 100},
},
},
.ops = {
@@ -153,7 +153,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
.reg_cfg = {
.num = 1,
.regs = {
- {"vddio", 1800000, 1800000, 100000, 100},
+ {"vddio", 100000, 100}, /* 1.8 V */
},
},
.ops = {
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 197b039ca..213355a3e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -185,7 +185,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
.reg_cfg = {
.num = 1,
.regs = {
- {"vddio", 1800000, 1800000, 100000, 100},
+ {"vddio", 100000, 100}, /* 1.8 V */
},
},
.ops = {
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index b4d1b4698..72360cd03 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -37,7 +37,7 @@ static void edp_connector_destroy(struct drm_connector *connector)
struct edp_connector *edp_connector = to_edp_connector(connector);
DBG("");
- drm_connector_unregister(connector);
+
drm_connector_cleanup(connector);
kfree(edp_connector);
@@ -124,10 +124,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
int ret;
edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL);
- if (!edp_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!edp_connector)
+ return ERR_PTR(-ENOMEM);
edp_connector->edp = edp;
@@ -136,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs,
DRM_MODE_CONNECTOR_eDP);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
drm_connector_helper_add(connector, &edp_connector_helper_funcs);
@@ -147,17 +145,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- ret = drm_connector_register(connector);
- if (ret)
- goto fail;
-
drm_mode_connector_attach_encoder(connector, edp->encoder);
return connector;
-
-fail:
- if (connector)
- edp_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 81200e9be..149bfe7dd 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -21,8 +21,6 @@
#include "edp.h"
#include "edp.xml.h"
-#define VDDA_MIN_UV 1800000 /* uV units */
-#define VDDA_MAX_UV 1800000 /* uV units */
#define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */
@@ -67,7 +65,7 @@ struct edp_ctrl {
void __iomem *base;
/* regulators */
- struct regulator *vdda_vreg;
+ struct regulator *vdda_vreg; /* 1.8 V */
struct regulator *lvl_vreg;
/* clocks */
@@ -302,21 +300,24 @@ static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask)
static int edp_regulator_init(struct edp_ctrl *ctrl)
{
struct device *dev = &ctrl->pdev->dev;
+ int ret;
DBG("");
ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
- if (IS_ERR(ctrl->vdda_vreg)) {
- pr_err("%s: Could not get vdda reg, ret = %ld\n", __func__,
- PTR_ERR(ctrl->vdda_vreg));
+ ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg);
+ if (ret) {
+ pr_err("%s: Could not get vdda reg, ret = %d\n", __func__,
+ ret);
ctrl->vdda_vreg = NULL;
- return PTR_ERR(ctrl->vdda_vreg);
+ return ret;
}
ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
- if (IS_ERR(ctrl->lvl_vreg)) {
- pr_err("Could not get lvl-vdd reg, %ld",
- PTR_ERR(ctrl->lvl_vreg));
+ ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg);
+ if (ret) {
+ pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__,
+ ret);
ctrl->lvl_vreg = NULL;
- return PTR_ERR(ctrl->lvl_vreg);
+ return ret;
}
return 0;
@@ -326,12 +327,6 @@ static int edp_regulator_enable(struct edp_ctrl *ctrl)
{
int ret;
- ret = regulator_set_voltage(ctrl->vdda_vreg, VDDA_MIN_UV, VDDA_MAX_UV);
- if (ret) {
- pr_err("%s:vdda_vreg set_voltage failed, %d\n", __func__, ret);
- goto vdda_set_fail;
- }
-
ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
if (ret < 0) {
pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 65428cf23..bc7ba0bde 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -243,10 +243,21 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi);
/*
* hdcp
*/
+#ifdef CONFIG_DRM_MSM_HDMI_HDCP
struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi);
void msm_hdmi_hdcp_destroy(struct hdmi *hdmi);
void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+#else
+static inline struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
+{
+ return ERR_PTR(-ENXIO);
+}
+static inline void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) {}
+static inline void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+static inline void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+static inline void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+#endif
#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 26129bff2..b15d72683 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -112,6 +112,9 @@ static int gpio_config(struct hdmi *hdmi, bool on)
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
struct hdmi_gpio_data gpio = config->gpios[i];
+ if (gpio.num == -1)
+ continue;
+
if (gpio.output) {
int value = gpio.value ? 0 : 1;
@@ -126,8 +129,10 @@ static int gpio_config(struct hdmi *hdmi, bool on)
return 0;
err:
- while (i--)
- gpio_free(config->gpios[i].num);
+ while (i--) {
+ if (config->gpios[i].num != -1)
+ gpio_free(config->gpios[i].num);
+ }
return ret;
}
@@ -341,7 +346,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
hdp_disable(hdmi_connector);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(hdmi_connector);
@@ -433,10 +437,8 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
- if (!hdmi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!hdmi_connector)
+ return ERR_PTR(-ENOMEM);
hdmi_connector->hdmi = hdmi;
INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
@@ -453,21 +455,13 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_connector_register(connector);
-
ret = hpd_enable(hdmi_connector);
if (ret) {
dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- goto fail;
+ return ERR_PTR(ret);
}
drm_mode_connector_attach_encoder(connector, hdmi->encoder);
return connector;
-
-fail:
- if (connector)
- hdmi_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index e233acf52..9527dafc3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -121,7 +121,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
if (!file || (event->base.file_priv == file)) {
mdp4_crtc->event = NULL;
DBG("%s: send event: %p", mdp4_crtc->name, event);
- drm_send_vblank_event(dev, mdp4_crtc->id, event);
+ drm_crtc_send_vblank_event(crtc, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -427,7 +427,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (handle) {
- cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
+ cursor_bo = drm_gem_object_lookup(file_priv, handle);
if (!cursor_bo)
return -ENOENT;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 76e1dfb5d..67442d50a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -50,30 +50,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
mdp4_kms->rev = minor;
- if (mdp4_kms->dsi_pll_vdda) {
- if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
- ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
- 1200000, 1200000);
- if (ret) {
- dev_err(dev->dev,
- "failed to set dsi_pll_vdda voltage: %d\n", ret);
- goto out;
- }
- }
- }
-
- if (mdp4_kms->dsi_pll_vddio) {
- if (mdp4_kms->rev == 2) {
- ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
- 1800000, 1800000);
- if (ret) {
- dev_err(dev->dev,
- "failed to set dsi_pll_vddio voltage: %d\n", ret);
- goto out;
- }
- }
- }
-
if (mdp4_kms->rev > 1) {
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
@@ -485,16 +461,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mdp4_kms->dsi_pll_vdda =
- devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda");
- if (IS_ERR(mdp4_kms->dsi_pll_vdda))
- mdp4_kms->dsi_pll_vdda = NULL;
-
- mdp4_kms->dsi_pll_vddio =
- devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio");
- if (IS_ERR(mdp4_kms->dsi_pll_vddio))
- mdp4_kms->dsi_pll_vddio = NULL;
-
/* NOTE: driver for this regulator still missing upstream.. use
* _get_exclusive() and ignore the error if it does not exist
* (and hope that the bootloader left it on for us)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index b2828717b..c5d045d56 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -37,8 +37,6 @@ struct mdp4_kms {
void __iomem *mmio;
- struct regulator *dsi_pll_vdda;
- struct regulator *dsi_pll_vddio;
struct regulator *vdd;
struct clk *clk;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index e73e1742b..2648cd763 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -48,7 +48,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(mdp4_lvds_connector);
@@ -121,13 +120,10 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct mdp4_lvds_connector *mdp4_lvds_connector;
- int ret;
mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
- if (!mdp4_lvds_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!mdp4_lvds_connector)
+ return ERR_PTR(-ENOMEM);
mdp4_lvds_connector->encoder = encoder;
mdp4_lvds_connector->panel_node = panel_node;
@@ -143,15 +139,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_connector_register(connector);
-
drm_mode_connector_attach_encoder(connector, encoder);
return connector;
-
-fail:
- if (connector)
- mdp4_lvds_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 9673b9520..88fe256c1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -149,7 +149,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
if (!file || (event->base.file_priv == file)) {
mdp5_crtc->event = NULL;
DBG("%s: send event: %p", mdp5_crtc->name, event);
- drm_send_vblank_event(dev, mdp5_crtc->id, event);
+ drm_crtc_send_vblank_event(crtc, event);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -518,7 +518,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
goto set_cursor;
}
- cursor_bo = drm_gem_object_lookup(dev, file, handle);
+ cursor_bo = drm_gem_object_lookup(file, handle);
if (!cursor_bo)
return -ENOENT;
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 1c2caffc9..b4a8aa449 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -105,6 +105,12 @@ static const struct mdp_format formats[] = {
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(XBGR8888, 8, 8, 8, 8, 2, 0, 1, 3, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(RGBX8888, 8, 8, 8, 8, 3, 1, 0, 2, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
+ FMT(BGRX8888, 8, 8, 8, 8, 3, 2, 0, 1, false, true, 4, 4,
+ MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
MDP_PLANE_INTERLEAVED, CHROMA_FULL, false),
FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 7eb253bc2..e3892c263 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -18,16 +18,16 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
+#include "msm_fence.h"
struct msm_commit {
struct drm_device *dev;
struct drm_atomic_state *state;
- uint32_t fence;
- struct msm_fence_cb fence_cb;
+ struct work_struct work;
uint32_t crtc_mask;
};
-static void fence_cb(struct msm_fence_cb *cb);
+static void commit_worker(struct work_struct *work);
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
@@ -69,11 +69,7 @@ static struct msm_commit *commit_init(struct drm_atomic_state *state)
c->dev = state->dev;
c->state = state;
- /* TODO we might need a way to indicate to run the cb on a
- * different wq so wait_for_vblanks() doesn't block retiring
- * bo's..
- */
- INIT_FENCE_CB(&c->fence_cb, fence_cb);
+ INIT_WORK(&c->work, commit_worker);
return c;
}
@@ -114,13 +110,15 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
-static void complete_commit(struct msm_commit *c)
+static void complete_commit(struct msm_commit *c, bool async)
{
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
+ drm_atomic_helper_wait_for_fences(dev, state);
+
kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -153,17 +151,9 @@ static void complete_commit(struct msm_commit *c)
commit_destroy(c);
}
-static void fence_cb(struct msm_fence_cb *cb)
-{
- struct msm_commit *c =
- container_of(cb, struct msm_commit, fence_cb);
- complete_commit(c);
-}
-
-static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
+static void commit_worker(struct work_struct *work)
{
- struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
- c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+ complete_commit(container_of(work, struct msm_commit, work), true);
}
int msm_atomic_check(struct drm_device *dev,
@@ -190,21 +180,20 @@ int msm_atomic_check(struct drm_device *dev,
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
*
* This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * object. This can still fail when e.g. the framebuffer reservation fails.
*
* RETURNS
* Zero for success or -errno.
*/
int msm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool async)
+ struct drm_atomic_state *state, bool nonblock)
{
+ struct msm_drm_private *priv = dev->dev_private;
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
- ktime_t timeout;
struct msm_commit *c;
int i, ret;
@@ -238,8 +227,12 @@ int msm_atomic_commit(struct drm_device *dev,
if (!plane)
continue;
- if ((plane->state->fb != new_state->fb) && new_state->fb)
- add_fb(c, new_state->fb);
+ if ((plane->state->fb != new_state->fb) && new_state->fb) {
+ struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ }
}
/*
@@ -276,17 +269,12 @@ int msm_atomic_commit(struct drm_device *dev,
* current layout.
*/
- if (async) {
- msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+ if (nonblock) {
+ queue_work(priv->atomic_wq, &c->work);
return 0;
}
- timeout = ktime_add_ms(ktime_get(), 1000);
-
- /* uninterruptible wait */
- msm_wait_fence(dev, c->fence, &timeout, false);
-
- complete_commit(c);
+ complete_commit(c, false);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
new file mode 100644
index 000000000..663f2b6ef
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+#include "msm_drv.h"
+#include "msm_gpu.h"
+
+static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (gpu) {
+ seq_printf(m, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, m);
+ }
+
+ return 0;
+}
+
+static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (gpu) {
+ seq_printf(m, "Active Objects (%s):\n", gpu->name);
+ msm_gem_describe_objects(&gpu->active_list, m);
+ }
+
+ seq_printf(m, "Inactive Objects:\n");
+ msm_gem_describe_objects(&priv->inactive_list, m);
+
+ return 0;
+}
+
+static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
+{
+ return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
+}
+
+static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+ if (priv->fbdev) {
+ seq_printf(m, "fbcon ");
+ fbdev_fb = priv->fbdev->fb;
+ msm_framebuffer_describe(fbdev_fb, m);
+ }
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user ");
+ msm_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int show_locked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = show(dev, m);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static struct drm_info_list msm_debugfs_list[] = {
+ {"gpu", show_locked, 0, msm_gpu_show},
+ {"gem", show_locked, 0, msm_gem_show},
+ { "mm", show_locked, 0, msm_mm_show },
+ { "fb", show_locked, 0, msm_fb_show },
+};
+
+static int late_init_minor(struct drm_minor *minor)
+{
+ int ret;
+
+ if (!minor)
+ return 0;
+
+ ret = msm_rd_debugfs_init(minor);
+ if (ret) {
+ dev_err(minor->dev->dev, "could not install rd debugfs\n");
+ return ret;
+ }
+
+ ret = msm_perf_debugfs_init(minor);
+ if (ret) {
+ dev_err(minor->dev->dev, "could not install perf debugfs\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int msm_debugfs_late_init(struct drm_device *dev)
+{
+ int ret;
+ ret = late_init_minor(dev->primary);
+ if (ret)
+ return ret;
+ ret = late_init_minor(dev->render);
+ if (ret)
+ return ret;
+ ret = late_init_minor(dev->control);
+ return ret;
+}
+
+int msm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install msm_debugfs_list\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void msm_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list), minor);
+ if (!minor->dev->dev_private)
+ return;
+ msm_rd_debugfs_cleanup(minor);
+ msm_perf_debugfs_cleanup(minor);
+}
+#endif
+
diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h
new file mode 100644
index 000000000..6110c972f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_debugfs.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_DEBUGFS_H__
+#define __MSM_DEBUGFS_H__
+
+#ifdef CONFIG_DEBUG_FS
+int msm_debugfs_init(struct drm_minor *minor);
+void msm_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+#endif /* __MSM_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 701c51ed3..9c654092e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -16,6 +16,8 @@
*/
#include "msm_drv.h"
+#include "msm_debugfs.h"
+#include "msm_fence.h"
#include "msm_gpu.h"
#include "msm_kms.h"
@@ -173,13 +175,11 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
return 0;
}
-/*
- * DRM operations:
- */
-
-static int msm_unload(struct drm_device *dev)
+static int msm_drm_uninit(struct device *dev)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
@@ -195,31 +195,37 @@ static int msm_unload(struct drm_device *dev)
kfree(vbl_ev);
}
- drm_kms_helper_poll_fini(dev);
+ drm_kms_helper_poll_fini(ddev);
+
+ drm_connector_unregister_all(ddev);
+
+ drm_dev_unregister(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fbdev && priv->fbdev)
- msm_fbdev_free(dev);
+ msm_fbdev_free(ddev);
#endif
- drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
+ drm_mode_config_cleanup(ddev);
- pm_runtime_get_sync(dev->dev);
- drm_irq_uninstall(dev);
- pm_runtime_put_sync(dev->dev);
+ pm_runtime_get_sync(dev);
+ drm_irq_uninstall(ddev);
+ pm_runtime_put_sync(dev);
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
+ flush_workqueue(priv->atomic_wq);
+ destroy_workqueue(priv->atomic_wq);
+
if (kms) {
- pm_runtime_disable(dev->dev);
+ pm_runtime_disable(dev);
kms->funcs->destroy(kms);
}
if (gpu) {
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&ddev->struct_mutex);
gpu->funcs->pm_suspend(gpu);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&ddev->struct_mutex);
gpu->funcs->destroy(gpu);
}
@@ -227,13 +233,14 @@ static int msm_unload(struct drm_device *dev)
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
drm_mm_takedown(&priv->vram.mm);
- dma_free_attrs(dev->dev, priv->vram.size, NULL,
- priv->vram.paddr, &attrs);
+ dma_free_attrs(dev, priv->vram.size, NULL,
+ priv->vram.paddr, &attrs);
}
- component_unbind_all(dev->dev, dev);
+ component_unbind_all(dev, ddev);
- dev->dev_private = NULL;
+ ddev->dev_private = NULL;
+ drm_dev_unref(ddev);
kfree(priv);
@@ -321,50 +328,60 @@ static int msm_init_vram(struct drm_device *dev)
return ret;
}
-static int msm_load(struct drm_device *dev, unsigned long flags)
+static int msm_drm_init(struct device *dev, struct drm_driver *drv)
{
- struct platform_device *pdev = dev->platformdev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *ddev;
struct msm_drm_private *priv;
struct msm_kms *kms;
int ret;
+ ddev = drm_dev_alloc(drv, dev);
+ if (!ddev) {
+ dev_err(dev, "failed to allocate drm_device\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, ddev);
+ ddev->platformdev = pdev;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(dev->dev, "failed to allocate private data\n");
+ drm_dev_unref(ddev);
return -ENOMEM;
}
- dev->dev_private = priv;
+ ddev->dev_private = priv;
priv->wq = alloc_ordered_workqueue("msm", 0);
- init_waitqueue_head(&priv->fence_event);
+ priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
- INIT_LIST_HEAD(&priv->fence_cbs);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
- drm_mode_config_init(dev);
-
- platform_set_drvdata(pdev, dev);
+ drm_mode_config_init(ddev);
/* Bind all our sub-components: */
- ret = component_bind_all(dev->dev, dev);
- if (ret)
+ ret = component_bind_all(dev, ddev);
+ if (ret) {
+ kfree(priv);
+ drm_dev_unref(ddev);
return ret;
+ }
- ret = msm_init_vram(dev);
+ ret = msm_init_vram(ddev);
if (ret)
goto fail;
switch (get_mdp_ver(pdev)) {
case 4:
- kms = mdp4_kms_init(dev);
+ kms = mdp4_kms_init(ddev);
break;
case 5:
- kms = mdp5_kms_init(dev);
+ kms = mdp5_kms_init(ddev);
break;
default:
kms = ERR_PTR(-ENODEV);
@@ -378,7 +395,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
* and (for example) use dmabuf/prime to share buffers with
* imx drm driver on iMX5
*/
- dev_err(dev->dev, "failed to load kms\n");
+ dev_err(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
goto fail;
}
@@ -386,50 +403,64 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
priv->kms = kms;
if (kms) {
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(dev);
ret = kms->funcs->hw_init(kms);
if (ret) {
- dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+ dev_err(dev, "kms hw init failed: %d\n", ret);
goto fail;
}
}
- dev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.funcs = &mode_config_funcs;
- ret = drm_vblank_init(dev, priv->num_crtcs);
+ ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ dev_err(dev, "failed to initialize vblank\n");
goto fail;
}
- pm_runtime_get_sync(dev->dev);
- ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
- pm_runtime_put_sync(dev->dev);
+ pm_runtime_get_sync(dev);
+ ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
+ pm_runtime_put_sync(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to install IRQ handler\n");
+ dev_err(dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto fail;
+
+ ret = drm_connector_register_all(ddev);
+ if (ret) {
+ dev_err(dev, "failed to register connectors\n");
goto fail;
}
- drm_mode_config_reset(dev);
+ drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fbdev)
- priv->fbdev = msm_fbdev_init(dev);
+ priv->fbdev = msm_fbdev_init(ddev);
#endif
- ret = msm_debugfs_late_init(dev);
+ ret = msm_debugfs_late_init(ddev);
if (ret)
goto fail;
- drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_init(ddev);
return 0;
fail:
- msm_unload(dev);
+ msm_drm_uninit(dev);
return ret;
}
+/*
+ * DRM operations:
+ */
+
static void load_gpu(struct drm_device *dev)
{
static DEFINE_MUTEX(init_lock);
@@ -535,265 +566,6 @@ static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
}
/*
- * DRM debugfs:
- */
-
-#ifdef CONFIG_DEBUG_FS
-static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_gpu *gpu = priv->gpu;
-
- if (gpu) {
- seq_printf(m, "%s Status:\n", gpu->name);
- gpu->funcs->show(gpu, m);
- }
-
- return 0;
-}
-
-static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_gpu *gpu = priv->gpu;
-
- if (gpu) {
- seq_printf(m, "Active Objects (%s):\n", gpu->name);
- msm_gem_describe_objects(&gpu->active_list, m);
- }
-
- seq_printf(m, "Inactive Objects:\n");
- msm_gem_describe_objects(&priv->inactive_list, m);
-
- return 0;
-}
-
-static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
-{
- return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
-}
-
-static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_framebuffer *fb, *fbdev_fb = NULL;
-
- if (priv->fbdev) {
- seq_printf(m, "fbcon ");
- fbdev_fb = priv->fbdev->fb;
- msm_framebuffer_describe(fbdev_fb, m);
- }
-
- mutex_lock(&dev->mode_config.fb_lock);
- list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
- if (fb == fbdev_fb)
- continue;
-
- seq_printf(m, "user ");
- msm_framebuffer_describe(fb, m);
- }
- mutex_unlock(&dev->mode_config.fb_lock);
-
- return 0;
-}
-
-static int show_locked(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int (*show)(struct drm_device *dev, struct seq_file *m) =
- node->info_ent->data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ret = show(dev, m);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-static struct drm_info_list msm_debugfs_list[] = {
- {"gpu", show_locked, 0, msm_gpu_show},
- {"gem", show_locked, 0, msm_gem_show},
- { "mm", show_locked, 0, msm_mm_show },
- { "fb", show_locked, 0, msm_fb_show },
-};
-
-static int late_init_minor(struct drm_minor *minor)
-{
- int ret;
-
- if (!minor)
- return 0;
-
- ret = msm_rd_debugfs_init(minor);
- if (ret) {
- dev_err(minor->dev->dev, "could not install rd debugfs\n");
- return ret;
- }
-
- ret = msm_perf_debugfs_init(minor);
- if (ret) {
- dev_err(minor->dev->dev, "could not install perf debugfs\n");
- return ret;
- }
-
- return 0;
-}
-
-int msm_debugfs_late_init(struct drm_device *dev)
-{
- int ret;
- ret = late_init_minor(dev->primary);
- if (ret)
- return ret;
- ret = late_init_minor(dev->render);
- if (ret)
- return ret;
- ret = late_init_minor(dev->control);
- return ret;
-}
-
-static int msm_debugfs_init(struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(msm_debugfs_list,
- ARRAY_SIZE(msm_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install msm_debugfs_list\n");
- return ret;
- }
-
- return 0;
-}
-
-static void msm_debugfs_cleanup(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(msm_debugfs_list,
- ARRAY_SIZE(msm_debugfs_list), minor);
- if (!minor->dev->dev_private)
- return;
- msm_rd_debugfs_cleanup(minor);
- msm_perf_debugfs_cleanup(minor);
-}
-#endif
-
-/*
- * Fences:
- */
-
-int msm_wait_fence(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout , bool interruptible)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int ret;
-
- if (!priv->gpu)
- return 0;
-
- if (fence > priv->gpu->submitted_fence) {
- DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
- fence, priv->gpu->submitted_fence);
- return -EINVAL;
- }
-
- if (!timeout) {
- /* no-wait: */
- ret = fence_completed(dev, fence) ? 0 : -EBUSY;
- } else {
- ktime_t now = ktime_get();
- unsigned long remaining_jiffies;
-
- if (ktime_compare(*timeout, now) < 0) {
- remaining_jiffies = 0;
- } else {
- ktime_t rem = ktime_sub(*timeout, now);
- struct timespec ts = ktime_to_timespec(rem);
- remaining_jiffies = timespec_to_jiffies(&ts);
- }
-
- if (interruptible)
- ret = wait_event_interruptible_timeout(priv->fence_event,
- fence_completed(dev, fence),
- remaining_jiffies);
- else
- ret = wait_event_timeout(priv->fence_event,
- fence_completed(dev, fence),
- remaining_jiffies);
-
- if (ret == 0) {
- DBG("timeout waiting for fence: %u (completed: %u)",
- fence, priv->completed_fence);
- ret = -ETIMEDOUT;
- } else if (ret != -ERESTARTSYS) {
- ret = 0;
- }
- }
-
- return ret;
-}
-
-int msm_queue_fence_cb(struct drm_device *dev,
- struct msm_fence_cb *cb, uint32_t fence)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int ret = 0;
-
- mutex_lock(&dev->struct_mutex);
- if (!list_empty(&cb->work.entry)) {
- ret = -EINVAL;
- } else if (fence > priv->completed_fence) {
- cb->fence = fence;
- list_add_tail(&cb->work.entry, &priv->fence_cbs);
- } else {
- queue_work(priv->wq, &cb->work);
- }
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-/* called from workqueue */
-void msm_update_fence(struct drm_device *dev, uint32_t fence)
-{
- struct msm_drm_private *priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- priv->completed_fence = max(fence, priv->completed_fence);
-
- while (!list_empty(&priv->fence_cbs)) {
- struct msm_fence_cb *cb;
-
- cb = list_first_entry(&priv->fence_cbs,
- struct msm_fence_cb, work.entry);
-
- if (cb->fence > priv->completed_fence)
- break;
-
- list_del_init(&cb->work.entry);
- queue_work(priv->wq, &cb->work);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- wake_up_all(&priv->fence_event);
-}
-
-void __msm_fence_worker(struct work_struct *work)
-{
- struct msm_fence_cb *cb = container_of(work, struct msm_fence_cb, work);
- cb->func(cb);
-}
-
-/*
* DRM ioctls:
*/
@@ -850,7 +622,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
return -EINVAL;
}
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -868,7 +640,7 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
struct drm_gem_object *obj;
int ret;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -889,7 +661,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
if (args->pad)
return -EINVAL;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
@@ -903,6 +675,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
ktime_t timeout = to_ktime(args->timeout);
@@ -911,7 +684,10 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
return -EINVAL;
}
- return msm_wait_fence(dev, args->fence, &timeout, true);
+ if (!priv->gpu)
+ return 0;
+
+ return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
}
static const struct drm_ioctl_desc msm_ioctls[] = {
@@ -951,8 +727,6 @@ static struct drm_driver msm_driver = {
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET,
- .load = msm_load,
- .unload = msm_unload,
.open = msm_open,
.preclose = msm_preclose,
.lastclose = msm_lastclose,
@@ -1052,12 +826,12 @@ static int add_components(struct device *dev, struct component_match **matchptr,
static int msm_drm_bind(struct device *dev)
{
- return drm_platform_init(&msm_driver, to_platform_device(dev));
+ return msm_drm_init(dev, &msm_driver);
}
static void msm_drm_unbind(struct device *dev)
{
- drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+ msm_drm_uninit(dev);
}
static const struct component_master_ops msm_drm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 870dbe58c..5b2963f32 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -49,6 +49,8 @@ struct msm_mmu;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
+struct msm_fence_context;
+struct msm_fence_cb;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
@@ -100,9 +102,6 @@ struct msm_drm_private {
struct drm_fb_helper *fbdev;
- uint32_t next_fence, completed_fence;
- wait_queue_head_t fence_event;
-
struct msm_rd_state *rd;
struct msm_perf_state *perf;
@@ -110,9 +109,7 @@ struct msm_drm_private {
struct list_head inactive_list;
struct workqueue_struct *wq;
-
- /* callbacks deferred until bo is inactive: */
- struct list_head fence_cbs;
+ struct workqueue_struct *atomic_wq;
/* crtcs pending async atomic updates: */
uint32_t pending_crtcs;
@@ -157,33 +154,14 @@ struct msm_format {
uint32_t pixel_format;
};
-/* callback from wq once fence has passed: */
-struct msm_fence_cb {
- struct work_struct work;
- uint32_t fence;
- void (*func)(struct msm_fence_cb *cb);
-};
-
-void __msm_fence_worker(struct work_struct *work);
-
-#define INIT_FENCE_CB(_cb, _func) do { \
- INIT_WORK(&(_cb)->work, __msm_fence_worker); \
- (_cb)->func = _func; \
- } while (0)
-
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool async);
+ struct drm_atomic_state *state, bool nonblock);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-int msm_wait_fence(struct drm_device *dev, uint32_t fence,
- ktime_t *timeout, bool interruptible);
-int msm_queue_fence_cb(struct drm_device *dev,
- struct msm_fence_cb *cb, uint32_t fence);
-void msm_update_fence(struct drm_device *dev, uint32_t fence);
-
+void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -213,13 +191,12 @@ int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj);
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
- struct msm_fence_cb *cb);
+int msm_gem_sync_object(struct drm_gem_object *obj,
+ struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool write, uint32_t fence);
+ struct msm_gpu *gpu, bool exclusive, struct fence *fence);
void msm_gem_move_to_inactive(struct drm_gem_object *obj);
-int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
- ktime_t *timeout);
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
@@ -227,7 +204,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
- uint32_t size, struct sg_table *sgt);
+ struct dma_buf *dmabuf, struct sg_table *sgt);
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
@@ -303,12 +280,6 @@ u32 msm_readl(const void __iomem *addr);
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
-static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
-{
- struct msm_drm_private *priv = dev->dev_private;
- return priv->completed_fence >= fence;
-}
-
static inline int align_pitch(int width, int bpp)
{
int bytespp = (bpp + 7) / 8;
@@ -327,5 +298,20 @@ static inline int align_pitch(int width, int bpp)
/* for conditionally setting boolean flag(s): */
#define COND(bool, val) ((bool) ? (val) : 0)
+static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
+{
+ ktime_t now = ktime_get();
+ unsigned long remaining_jiffies;
+
+ if (ktime_compare(*timeout, now) < 0) {
+ remaining_jiffies = 0;
+ } else {
+ ktime_t rem = ktime_sub(*timeout, now);
+ struct timespec ts = ktime_to_timespec(rem);
+ remaining_jiffies = timespec_to_jiffies(&ts);
+ }
+
+ return remaining_jiffies;
+}
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index a474d6cf5..461dc8b87 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -77,7 +77,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
fb->width, fb->height, (char *)&fb->pixel_format,
- fb->refcount.refcount.counter, fb->base.id);
+ drm_framebuffer_read_refcount(fb), fb->base.id);
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
@@ -145,8 +145,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
for (i = 0; i < n; i++) {
- bos[i] = drm_gem_object_lookup(dev, file,
- mode_cmd->handles[i]);
+ bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!bos[i]) {
ret = -ENXIO;
goto out_unref;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index d9759bf34..c6cf837c5 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ if (IS_ERR(fbi->screen_base)) {
+ ret = PTR_ERR(fbi->screen_base);
+ goto fail_unlock;
+ }
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = paddr;
fbi->fix.smem_len = fbdev->bo->size;
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
new file mode 100644
index 000000000..a9b9b1c95
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fence.h>
+
+#include "msm_drv.h"
+#include "msm_fence.h"
+
+
+struct msm_fence_context *
+msm_fence_context_alloc(struct drm_device *dev, const char *name)
+{
+ struct msm_fence_context *fctx;
+
+ fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return ERR_PTR(-ENOMEM);
+
+ fctx->dev = dev;
+ fctx->name = name;
+ fctx->context = fence_context_alloc(1);
+ init_waitqueue_head(&fctx->event);
+ spin_lock_init(&fctx->spinlock);
+
+ return fctx;
+}
+
+void msm_fence_context_free(struct msm_fence_context *fctx)
+{
+ kfree(fctx);
+}
+
+static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
+{
+ return (int32_t)(fctx->completed_fence - fence) >= 0;
+}
+
+/* legacy path for WAIT_FENCE ioctl: */
+int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
+ ktime_t *timeout, bool interruptible)
+{
+ int ret;
+
+ if (fence > fctx->last_fence) {
+ DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+ fctx->name, fence, fctx->last_fence);
+ return -EINVAL;
+ }
+
+ if (!timeout) {
+ /* no-wait: */
+ ret = fence_completed(fctx, fence) ? 0 : -EBUSY;
+ } else {
+ unsigned long remaining_jiffies = timeout_to_jiffies(timeout);
+
+ if (interruptible)
+ ret = wait_event_interruptible_timeout(fctx->event,
+ fence_completed(fctx, fence),
+ remaining_jiffies);
+ else
+ ret = wait_event_timeout(fctx->event,
+ fence_completed(fctx, fence),
+ remaining_jiffies);
+
+ if (ret == 0) {
+ DBG("timeout waiting for fence: %u (completed: %u)",
+ fence, fctx->completed_fence);
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+/* called from workqueue */
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
+{
+ spin_lock(&fctx->spinlock);
+ fctx->completed_fence = max(fence, fctx->completed_fence);
+ spin_unlock(&fctx->spinlock);
+
+ wake_up_all(&fctx->event);
+}
+
+struct msm_fence {
+ struct msm_fence_context *fctx;
+ struct fence base;
+};
+
+static inline struct msm_fence *to_msm_fence(struct fence *fence)
+{
+ return container_of(fence, struct msm_fence, base);
+}
+
+static const char *msm_fence_get_driver_name(struct fence *fence)
+{
+ return "msm";
+}
+
+static const char *msm_fence_get_timeline_name(struct fence *fence)
+{
+ struct msm_fence *f = to_msm_fence(fence);
+ return f->fctx->name;
+}
+
+static bool msm_fence_enable_signaling(struct fence *fence)
+{
+ return true;
+}
+
+static bool msm_fence_signaled(struct fence *fence)
+{
+ struct msm_fence *f = to_msm_fence(fence);
+ return fence_completed(f->fctx, f->base.seqno);
+}
+
+static void msm_fence_release(struct fence *fence)
+{
+ struct msm_fence *f = to_msm_fence(fence);
+ kfree_rcu(f, base.rcu);
+}
+
+static const struct fence_ops msm_fence_ops = {
+ .get_driver_name = msm_fence_get_driver_name,
+ .get_timeline_name = msm_fence_get_timeline_name,
+ .enable_signaling = msm_fence_enable_signaling,
+ .signaled = msm_fence_signaled,
+ .wait = fence_default_wait,
+ .release = msm_fence_release,
+};
+
+struct fence *
+msm_fence_alloc(struct msm_fence_context *fctx)
+{
+ struct msm_fence *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return ERR_PTR(-ENOMEM);
+
+ f->fctx = fctx;
+
+ fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
+ fctx->context, ++fctx->last_fence);
+
+ return &f->base;
+}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
new file mode 100644
index 000000000..ceb5b3d31
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013-2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_FENCE_H__
+#define __MSM_FENCE_H__
+
+#include "msm_drv.h"
+
+struct msm_fence_context {
+ struct drm_device *dev;
+ const char *name;
+ unsigned context;
+ /* last_fence == completed_fence --> no pending work */
+ uint32_t last_fence; /* last assigned fence */
+ uint32_t completed_fence; /* last completed fence */
+ wait_queue_head_t event;
+ spinlock_t spinlock;
+};
+
+struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
+ const char *name);
+void msm_fence_context_free(struct msm_fence_context *fctx);
+
+int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
+ ktime_t *timeout, bool interruptible);
+int msm_queue_fence_cb(struct msm_fence_context *fctx,
+ struct msm_fence_cb *cb, uint32_t fence);
+void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
+
+struct fence * msm_fence_alloc(struct msm_fence_context *fctx);
+
+#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3cedb8d5c..69836f568 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -21,6 +21,7 @@
#include <linux/pfn_t.h>
#include "msm_drv.h"
+#include "msm_fence.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_mmu.h"
@@ -373,7 +374,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
int ret = 0;
/* GEM does all our handle to object mapping */
- obj = drm_gem_object_lookup(dev, file, handle);
+ obj = drm_gem_object_lookup(file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto fail;
@@ -397,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
return ERR_CAST(pages);
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (msm_obj->vaddr == NULL)
+ return ERR_PTR(-ENOMEM);
}
return msm_obj->vaddr;
}
@@ -410,27 +413,62 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
return ret;
}
-/* setup callback for when bo is no longer busy..
- * TODO probably want to differentiate read vs write..
- */
-int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
- struct msm_fence_cb *cb)
+/* must be called before _move_to_active().. */
+int msm_gem_sync_object(struct drm_gem_object *obj,
+ struct msm_fence_context *fctx, bool exclusive)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- uint32_t fence = msm_gem_fence(msm_obj,
- MSM_PREP_READ | MSM_PREP_WRITE);
- return msm_queue_fence_cb(obj->dev, cb, fence);
+ struct reservation_object_list *fobj;
+ struct fence *fence;
+ int i, ret;
+
+ if (!exclusive) {
+ /* NOTE: _reserve_shared() must happen before _add_shared_fence(),
+ * which makes this a slightly strange place to call it. OTOH this
+ * is a convenient can-fail point to hook it in. (And similar to
+ * how etnaviv and nouveau handle this.)
+ */
+ ret = reservation_object_reserve_shared(msm_obj->resv);
+ if (ret)
+ return ret;
+ }
+
+ fobj = reservation_object_get_list(msm_obj->resv);
+ if (!fobj || (fobj->shared_count == 0)) {
+ fence = reservation_object_get_excl(msm_obj->resv);
+ /* don't need to wait on our own fences, since ring is fifo */
+ if (fence && (fence->context != fctx->context)) {
+ ret = fence_wait(fence, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (!exclusive || !fobj)
+ return 0;
+
+ for (i = 0; i < fobj->shared_count; i++) {
+ fence = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(msm_obj->resv));
+ if (fence->context != fctx->context) {
+ ret = fence_wait(fence, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
- struct msm_gpu *gpu, bool write, uint32_t fence)
+ struct msm_gpu *gpu, bool exclusive, struct fence *fence)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
msm_obj->gpu = gpu;
- if (write)
- msm_obj->write_fence = fence;
+ if (exclusive)
+ reservation_object_add_excl_fence(msm_obj->resv, fence);
else
- msm_obj->read_fence = fence;
+ reservation_object_add_shared_fence(msm_obj->resv, fence);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -444,30 +482,30 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
msm_obj->gpu = NULL;
- msm_obj->read_fence = 0;
- msm_obj->write_fence = 0;
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
}
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
- struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret = 0;
+ bool write = !!(op & MSM_PREP_WRITE);
- if (is_active(msm_obj)) {
- uint32_t fence = msm_gem_fence(msm_obj, op);
-
- if (op & MSM_PREP_NOSYNC)
- timeout = NULL;
+ if (op & MSM_PREP_NOSYNC) {
+ if (!reservation_object_test_signaled_rcu(msm_obj->resv, write))
+ return -EBUSY;
+ } else {
+ int ret;
- ret = msm_wait_fence(dev, fence, timeout, true);
+ ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+ true, timeout_to_jiffies(timeout));
+ if (ret <= 0)
+ return ret == 0 ? -ETIMEDOUT : ret;
}
/* TODO cache maintenance */
- return ret;
+ return 0;
}
int msm_gem_cpu_fini(struct drm_gem_object *obj)
@@ -477,18 +515,46 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
}
#ifdef CONFIG_DEBUG_FS
+static void describe_fence(struct fence *fence, const char *type,
+ struct seq_file *m)
+{
+ if (!fence_is_signaled(fence))
+ seq_printf(m, "\t%9s: %s %s seq %u\n", type,
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ fence->seqno);
+}
+
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
- struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct reservation_object *robj = msm_obj->resv;
+ struct reservation_object_list *fobj;
+ struct fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
- msm_obj->read_fence, msm_obj->write_fence,
obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr, obj->size);
+
+ rcu_read_lock();
+ fobj = rcu_dereference(robj->fence);
+ if (fobj) {
+ unsigned int i, shared_count = fobj->shared_count;
+
+ for (i = 0; i < shared_count; i++) {
+ fence = rcu_dereference(fobj->shared[i]);
+ describe_fence(fence, "Shared", m);
+ }
+ }
+
+ fence = rcu_dereference(robj->fence_excl);
+ if (fence)
+ describe_fence(fence, "Exclusive", m);
+ rcu_read_unlock();
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -583,6 +649,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
+ struct reservation_object *resv,
struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -622,8 +689,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
- msm_obj->resv = &msm_obj->_resv;
- reservation_object_init(msm_obj->resv);
+ if (resv) {
+ msm_obj->resv = resv;
+ } else {
+ msm_obj->resv = &msm_obj->_resv;
+ reservation_object_init(msm_obj->resv);
+ }
INIT_LIST_HEAD(&msm_obj->submit_entry);
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
@@ -643,7 +714,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size);
- ret = msm_gem_new_impl(dev, size, flags, &obj);
+ ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
if (ret)
goto fail;
@@ -665,10 +736,11 @@ fail:
}
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
- uint32_t size, struct sg_table *sgt)
+ struct dma_buf *dmabuf, struct sg_table *sgt)
{
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
+ uint32_t size;
int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */
@@ -677,9 +749,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- size = PAGE_ALIGN(size);
+ size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 6fc59bfee..9facd4b6f 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -39,7 +39,6 @@ struct msm_gem_object {
*/
struct list_head mm_list;
struct msm_gpu *gpu; /* non-null if active */
- uint32_t read_fence, write_fence;
/* Transiently in the process of submit ioctl, objects associated
* with the submit are on submit->bo_list.. this only lasts for
@@ -73,19 +72,6 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
-static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
- uint32_t op)
-{
- uint32_t fence = 0;
-
- if (op & MSM_PREP_READ)
- fence = msm_obj->write_fence;
- if (op & MSM_PREP_WRITE)
- fence = max(fence, msm_obj->read_fence);
-
- return fence;
-}
-
#define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
@@ -99,8 +85,9 @@ struct msm_gem_submit {
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
- uint32_t fence;
- bool valid;
+ struct fence *fence;
+ struct pid *pid; /* submitting process */
+ bool valid; /* true if no cmdstream patching needed */
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 121975b07..6b90890fa 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -55,7 +55,7 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
- return msm_gem_import(dev, attach->dmabuf->size, sg);
+ return msm_gem_import(dev, attach->dmabuf, sg);
}
int msm_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 43d218123..eb4bb8b2f 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -24,15 +24,10 @@
*/
/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
-#define BO_VALID 0x8000
+#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
#define BO_LOCKED 0x4000
#define BO_PINNED 0x2000
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, int nr)
{
@@ -40,21 +35,33 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
- if (submit) {
- submit->dev = dev;
- submit->gpu = gpu;
+ if (!submit)
+ return NULL;
- /* initially, until copy_from_user() and bo lookup succeeds: */
- submit->nr_bos = 0;
- submit->nr_cmds = 0;
+ submit->dev = dev;
+ submit->gpu = gpu;
+ submit->fence = NULL;
+ submit->pid = get_pid(task_pid(current));
- INIT_LIST_HEAD(&submit->bo_list);
- ww_acquire_init(&submit->ticket, &reservation_ww_class);
- }
+ /* initially, until copy_from_user() and bo lookup succeeds: */
+ submit->nr_bos = 0;
+ submit->nr_cmds = 0;
+
+ INIT_LIST_HEAD(&submit->node);
+ INIT_LIST_HEAD(&submit->bo_list);
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
return submit;
}
+void msm_gem_submit_free(struct msm_gem_submit *submit)
+{
+ fence_put(submit->fence);
+ list_del(&submit->node);
+ put_pid(submit->pid);
+ kfree(submit);
+}
+
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
@@ -68,7 +75,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
void __user *userptr =
- to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+
+ /* make sure we don't have garbage flags, in case we hit
+ * error path before flags is initialized:
+ */
+ submit->bos[i].flags = 0;
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
@@ -136,16 +148,13 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
}
/* This is where we make sure all the bo's are reserved and pin'd: */
-static int submit_validate_objects(struct msm_gem_submit *submit)
+static int submit_lock_objects(struct msm_gem_submit *submit)
{
int contended, slow_locked = -1, i, ret = 0;
retry:
- submit->valid = true;
-
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
if (slow_locked == i)
slow_locked = -1;
@@ -159,30 +168,6 @@ retry:
goto fail;
submit->bos[i].flags |= BO_LOCKED;
}
-
-
- /* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
-
- /* this would break the logic in the fail path.. there is no
- * reason for this to happen, but just to be on the safe side
- * let's notice if this starts happening in the future:
- */
- WARN_ON(ret == -EDEADLK);
-
- if (ret)
- goto fail;
-
- submit->bos[i].flags |= BO_PINNED;
-
- if (iova == submit->bos[i].iova) {
- submit->bos[i].flags |= BO_VALID;
- } else {
- submit->bos[i].iova = iova;
- submit->bos[i].flags &= ~BO_VALID;
- submit->valid = false;
- }
}
ww_acquire_done(&submit->ticket);
@@ -211,6 +196,54 @@ fail:
return ret;
}
+static int submit_fence_sync(struct msm_gem_submit *submit)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
+
+ ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int submit_pin_objects(struct msm_gem_submit *submit)
+{
+ int i, ret = 0;
+
+ submit->valid = true;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ uint32_t iova;
+
+ /* if locking succeeded, pin bo: */
+ ret = msm_gem_get_iova_locked(&msm_obj->base,
+ submit->gpu->id, &iova);
+
+ if (ret)
+ break;
+
+ submit->bos[i].flags |= BO_PINNED;
+
+ if (iova == submit->bos[i].iova) {
+ submit->bos[i].flags |= BO_VALID;
+ } else {
+ submit->bos[i].iova = iova;
+ /* iova changed, so address in cmdstream is not valid: */
+ submit->bos[i].flags &= ~BO_VALID;
+ submit->valid = false;
+ }
+ }
+
+ return ret;
+}
+
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct msm_gem_object **obj, uint32_t *iova, bool *valid)
{
@@ -257,7 +290,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
for (i = 0; i < nr_relocs; i++) {
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
- to_user_ptr(relocs + (i * sizeof(submit_reloc)));
+ u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
uint32_t iova, off;
bool valid;
@@ -302,7 +335,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return 0;
}
-static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
+static void submit_cleanup(struct msm_gem_submit *submit)
{
unsigned i;
@@ -349,14 +382,22 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = submit_validate_objects(submit);
+ ret = submit_lock_objects(submit);
+ if (ret)
+ goto out;
+
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto out;
+
+ ret = submit_pin_objects(submit);
if (ret)
goto out;
for (i = 0; i < args->nr_cmds; i++) {
struct drm_msm_gem_submit_cmd submit_cmd;
void __user *userptr =
- to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+ u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
uint32_t iova;
@@ -415,10 +456,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = msm_gpu_submit(gpu, submit, ctx);
- args->fence = submit->fence;
+ args->fence = submit->fence->seqno;
out:
- submit_cleanup(submit, !!ret);
+ submit_cleanup(submit);
+ if (ret)
+ msm_gem_submit_free(submit);
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 6b02ada65..36ed53e66 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -18,6 +18,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
+#include "msm_fence.h"
/*
@@ -265,22 +266,38 @@ static void inactive_start(struct msm_gpu *gpu)
* Hangcheck detection for locked gpu:
*/
-static void retire_submits(struct msm_gpu *gpu, uint32_t fence);
+static void retire_submits(struct msm_gpu *gpu);
static void recover_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
+ struct msm_gem_submit *submit;
+ uint32_t fence = gpu->funcs->last_fence(gpu);
- dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+ msm_update_fence(gpu->fctx, fence + 1);
mutex_lock(&dev->struct_mutex);
- if (msm_gpu_active(gpu)) {
- struct msm_gem_submit *submit;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+ list_for_each_entry(submit, &gpu->submit_list, node) {
+ if (submit->fence->seqno == (fence + 1)) {
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(submit->pid, PIDTYPE_PID);
+ if (task) {
+ dev_err(dev->dev, "%s: offending task: %s\n",
+ gpu->name, task->comm);
+ }
+ rcu_read_unlock();
+ break;
+ }
+ }
+
+ if (msm_gpu_active(gpu)) {
/* retire completed submits, plus the one that hung: */
- retire_submits(gpu, fence + 1);
+ retire_submits(gpu);
inactive_cancel(gpu);
gpu->funcs->recover(gpu);
@@ -290,6 +307,7 @@ static void recover_worker(struct work_struct *work)
gpu->funcs->submit(gpu, submit, NULL);
}
}
+
mutex_unlock(&dev->struct_mutex);
msm_gpu_retire(gpu);
@@ -312,7 +330,7 @@ static void hangcheck_handler(unsigned long data)
if (fence != gpu->hangcheck_fence) {
/* some progress has been made.. ya! */
gpu->hangcheck_fence = fence;
- } else if (fence < gpu->submitted_fence) {
+ } else if (fence < gpu->fctx->last_fence) {
/* no progress and not done.. hung! */
gpu->hangcheck_fence = fence;
dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
@@ -320,12 +338,12 @@ static void hangcheck_handler(unsigned long data)
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, gpu->submitted_fence);
+ gpu->name, gpu->fctx->last_fence);
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (gpu->submitted_fence > gpu->hangcheck_fence)
+ if (gpu->fctx->last_fence > gpu->hangcheck_fence)
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -431,7 +449,22 @@ out:
* Cmdstream submission/retirement:
*/
-static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
+static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ int i;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ /* move to inactive: */
+ msm_gem_move_to_inactive(&msm_obj->base);
+ msm_gem_put_iova(&msm_obj->base, gpu->id);
+ drm_gem_object_unreference(&msm_obj->base);
+ }
+
+ msm_gem_submit_free(submit);
+}
+
+static void retire_submits(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
@@ -443,9 +476,8 @@ static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
submit = list_first_entry(&gpu->submit_list,
struct msm_gem_submit, node);
- if (submit->fence <= fence) {
- list_del(&submit->node);
- kfree(submit);
+ if (fence_is_signaled(submit->fence)) {
+ retire_submit(gpu, submit);
} else {
break;
}
@@ -458,29 +490,10 @@ static void retire_worker(struct work_struct *work)
struct drm_device *dev = gpu->dev;
uint32_t fence = gpu->funcs->last_fence(gpu);
- msm_update_fence(gpu->dev, fence);
+ msm_update_fence(gpu->fctx, fence);
mutex_lock(&dev->struct_mutex);
-
- retire_submits(gpu, fence);
-
- while (!list_empty(&gpu->active_list)) {
- struct msm_gem_object *obj;
-
- obj = list_first_entry(&gpu->active_list,
- struct msm_gem_object, mm_list);
-
- if ((obj->read_fence <= fence) &&
- (obj->write_fence <= fence)) {
- /* move to inactive: */
- msm_gem_move_to_inactive(&obj->base);
- msm_gem_put_iova(&obj->base, gpu->id);
- drm_gem_object_unreference(&obj->base);
- } else {
- break;
- }
- }
-
+ retire_submits(gpu);
mutex_unlock(&dev->struct_mutex);
if (!msm_gpu_active(gpu))
@@ -505,9 +518,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- submit->fence = ++priv->next_fence;
-
- gpu->submitted_fence = submit->fence;
+ submit->fence = msm_fence_alloc(gpu->fctx);
+ if (IS_ERR(submit->fence)) {
+ ret = PTR_ERR(submit->fence);
+ submit->fence = NULL;
+ return ret;
+ }
inactive_cancel(gpu);
@@ -515,40 +531,34 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_rd_dump_submit(submit);
- gpu->submitted_fence = submit->fence;
-
update_sw_cntrs(gpu);
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ uint32_t iova;
/* can't happen yet.. but when we add 2d support we'll have
* to deal w/ cross-ring synchronization:
*/
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
- if (!is_active(msm_obj)) {
- uint32_t iova;
-
- /* ring takes a reference to the bo and iova: */
- drm_gem_object_reference(&msm_obj->base);
- msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
- }
-
- if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
- msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+ /* submit takes a reference to the bo and iova until retired: */
+ drm_gem_object_reference(&msm_obj->base);
+ msm_gem_get_iova_locked(&msm_obj->base,
+ submit->gpu->id, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
+ else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
+ msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
}
- ret = gpu->funcs->submit(gpu, submit, ctx);
+ gpu->funcs->submit(gpu, submit, ctx);
priv->lastctx = ctx;
hangcheck_timer_reset(gpu);
- return ret;
+ return 0;
}
/*
@@ -580,6 +590,12 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->funcs = funcs;
gpu->name = name;
gpu->inactive = true;
+ gpu->fctx = msm_fence_context_alloc(drm, name);
+ if (IS_ERR(gpu->fctx)) {
+ ret = PTR_ERR(gpu->fctx);
+ gpu->fctx = NULL;
+ goto fail;
+ }
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
@@ -700,4 +716,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
if (gpu->mmu)
gpu->mmu->funcs->destroy(gpu->mmu);
+
+ if (gpu->fctx)
+ msm_fence_context_free(gpu->fctx);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2bbe85a3d..c9022837a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -22,6 +22,7 @@
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
+#include "msm_fence.h"
#include "msm_ringbuffer.h"
struct msm_gem_submit;
@@ -46,7 +47,7 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
- int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void (*flush)(struct msm_gpu *gpu);
void (*idle)(struct msm_gpu *gpu);
@@ -77,13 +78,15 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
+ /* ringbuffer: */
struct msm_ringbuffer *rb;
uint32_t rb_iova;
/* list of GEM active objects: */
struct list_head active_list;
- uint32_t submitted_fence;
+ /* fencing: */
+ struct msm_fence_context *fctx;
/* is gpu powered/active? */
int active_cnt;
@@ -125,7 +128,7 @@ struct msm_gpu {
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+ return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
}
/* Perf-Counters:
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 9a78c4881..0857710c2 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -296,7 +296,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, current->comm, task_pid_nr(current),
- submit->fence);
+ submit->fence->seqno);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
@@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
+ if (IS_ERR(buf))
+ continue;
+
buf += iova - submit->bos[idx].iova;
rd_write_section(rd, RD_GPUADDR,
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 1f14b908b..42f5359cf 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->start = msm_gem_vaddr_locked(ring->bo);
+ if (IS_ERR(ring->start)) {
+ ret = PTR_ERR(ring->start);
+ goto fail;
+ }
ring->end = ring->start + (size / 4);
ring->cur = ring->start;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 82bd4658a..a555681c3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -23,7 +23,7 @@
#include <drm/drmP.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "hw.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 55ccbf006..6f318c54d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -28,8 +28,9 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
+#include "nouveau_ttm.h"
#include "nouveau_bo.h"
#include "nouveau_gem.h"
#include "nouveau_encoder.h"
@@ -995,7 +996,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
if (width != 64 || height != 64)
return -EINVAL;
- gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
+ gem = drm_gem_object_lookup(file_priv, buffer_handle);
if (!gem)
return -ENOENT;
cursor = nouveau_gem_object(gem);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index 4e61173c3..c83116a30 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -1,6 +1,6 @@
#include <drm/drmP.h>
#include <drm/drm_mode.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_crtc.h"
#include "hw.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index b48eec395..b6cc7766e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -27,7 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index 05bfd151d..c2947ef7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -27,7 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index b4a6bc433..aea81a547 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -25,7 +25,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "hw.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 6c9a1e898..7030307d2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -1,6 +1,6 @@
#ifndef __NV04_DISPLAY_H__
#define __NV04_DISPLAY_H__
-
+#include <subdev/bios.h>
#include <subdev/bios/pll.h>
#include "nouveau_display.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 956a833b8..74856a8b8 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -23,7 +23,7 @@
*/
#include <drm/drmP.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "hw.h"
#include <subdev/bios/pll.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index aeebdd402..ec444eac6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -27,7 +27,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_bo.h"
#include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 903c473d2..2b83b2c39 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "hw.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 54e9fb9eb..477a8d072 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -25,7 +25,7 @@
*/
#include <drm/drmP.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 163317d26..a665b78b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -26,7 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 4993a863a..126a85cc8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -7,6 +7,7 @@ enum nvkm_devidx {
NVKM_SUBDEV_PCI,
NVKM_SUBDEV_VBIOS,
NVKM_SUBDEV_DEVINIT,
+ NVKM_SUBDEV_TOP,
NVKM_SUBDEV_IBUS,
NVKM_SUBDEV_GPIO,
NVKM_SUBDEV_I2C,
@@ -15,9 +16,9 @@ enum nvkm_devidx {
NVKM_SUBDEV_MC,
NVKM_SUBDEV_BUS,
NVKM_SUBDEV_TIMER,
+ NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_FB,
NVKM_SUBDEV_LTC,
- NVKM_SUBDEV_INSTMEM,
NVKM_SUBDEV_MMU,
NVKM_SUBDEV_BAR,
NVKM_SUBDEV_PMU,
@@ -131,6 +132,7 @@ struct nvkm_device {
struct nvkm_secboot *secboot;
struct nvkm_therm *therm;
struct nvkm_timer *timer;
+ struct nvkm_top *top;
struct nvkm_volt *volt;
struct nvkm_engine *bsp;
@@ -200,6 +202,7 @@ struct nvkm_device_chip {
int (*secboot )(struct nvkm_device *, int idx, struct nvkm_secboot **);
int (*therm )(struct nvkm_device *, int idx, struct nvkm_therm **);
int (*timer )(struct nvkm_device *, int idx, struct nvkm_timer **);
+ int (*top )(struct nvkm_device *, int idx, struct nvkm_top **);
int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index 48bf12845..9ebfd8782 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -38,11 +38,9 @@ struct nvkm_engine_func {
};
int nvkm_engine_ctor(const struct nvkm_engine_func *, struct nvkm_device *,
- int index, u32 pmc_enable, bool enable,
- struct nvkm_engine *);
+ int index, bool enable, struct nvkm_engine *);
int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
- int index, u32 pmc_enable, bool enable,
- struct nvkm_engine **);
+ int index, bool enable, struct nvkm_engine **);
struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
void nvkm_engine_unref(struct nvkm_engine **);
void nvkm_engine_tile(struct nvkm_engine *, int region);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 3b5dc9c63..57adefa8b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -6,7 +6,6 @@ struct nvkm_subdev {
const struct nvkm_subdev_func *func;
struct nvkm_device *device;
enum nvkm_devidx index;
- u32 pmc_enable;
struct mutex mutex;
u32 debug;
@@ -24,7 +23,7 @@ struct nvkm_subdev_func {
extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
- int index, u32 pmc_enable, struct nvkm_subdev *);
+ int index, struct nvkm_subdev *);
void nvkm_subdev_del(struct nvkm_subdev **);
int nvkm_subdev_preinit(struct nvkm_subdev *);
int nvkm_subdev_init(struct nvkm_subdev *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 81c0bc66a..e6baf039c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -40,7 +40,6 @@ struct nvkm_falcon_func {
u32 *data;
u32 size;
} data;
- u32 pmc_enable;
void (*init)(struct nvkm_falcon *);
void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
struct nvkm_sclass sclass[];
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index 3128d21a5..b1fcc4167 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -15,7 +15,6 @@ int nvkm_xtensa_new_(const struct nvkm_xtensa_func *, struct nvkm_device *,
int index, bool enable, u32 addr, struct nvkm_engine **);
struct nvkm_xtensa_func {
- u32 pmc_enable;
u32 fifo_val;
u32 unkd28;
struct nvkm_sclass sclass[];
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 193626c69..709d786f1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -7,6 +7,7 @@ struct nvkm_devinit {
const struct nvkm_devinit_func *func;
struct nvkm_subdev subdev;
bool post;
+ bool force_post;
};
u32 nvkm_devinit_mmio(struct nvkm_devinit *, u32 addr);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 85ab72c7f..0a734fd06 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -55,6 +55,9 @@ struct nvkm_fb {
struct nvkm_fb_tile region[16];
int regions;
} tile;
+
+ struct nvkm_memory *mmu_rd;
+ struct nvkm_memory *mmu_wr;
};
bool nvkm_fb_memtype_valid(struct nvkm_fb *, u32 memtype);
@@ -87,6 +90,7 @@ int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index 530c6215f..3c2ddd975 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -3,15 +3,13 @@
#include <core/subdev.h>
-struct nkvm_iccsense_rail;
struct nvkm_iccsense {
struct nvkm_subdev subdev;
- u8 rail_count;
bool data_valid;
- struct nvkm_iccsense_rail *rails;
+ struct list_head sensors;
+ struct list_head rails;
};
int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
-int nvkm_iccsense_read(struct nvkm_iccsense *iccsense, u8 idx);
int nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 4de05e718..2e80682b2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -10,12 +10,18 @@ struct nvkm_mc {
void nvkm_mc_intr(struct nvkm_mc *, bool *handled);
void nvkm_mc_intr_unarm(struct nvkm_mc *);
void nvkm_mc_intr_rearm(struct nvkm_mc *);
+void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx);
void nvkm_mc_unk260(struct nvkm_mc *, u32 data);
int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int nv17_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int nv44_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int nv50_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int g84_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int g98_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
new file mode 100644
index 000000000..8fb575a92
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -0,0 +1,17 @@
+#ifndef __NVKM_TOP_H__
+#define __NVKM_TOP_H__
+#include <core/subdev.h>
+
+struct nvkm_top {
+ const struct nvkm_top_func *func;
+ struct nvkm_subdev subdev;
+ struct list_head device;
+};
+
+u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx);
+u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs);
+enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault);
+enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn);
+
+int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index a59e524c0..eb7de487a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -29,7 +29,7 @@
#include <nvif/cla06f.h>
#include <nvif/unpack.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
#include "nouveau_chan.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index cdf522770..db76b94e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -6,7 +6,7 @@
#include <drm/drm_edid.h>
#include <acpi/video.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_acpi.h"
#define NOUVEAU_DSM_LED 0x02
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 89eb46040..f5101be80 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -32,7 +32,7 @@
#include <linux/backlight.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 4dca65a63..a1570b109 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -24,7 +24,7 @@
#include <drm/drmP.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2cdaea586..5e3f3e826 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -30,7 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -312,7 +312,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
bool force = false, evict = false;
int ret;
- ret = ttm_bo_reserve(bo, false, false, false, NULL);
+ ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
@@ -385,7 +385,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret, ref;
- ret = ttm_bo_reserve(bo, false, false, false, NULL);
+ ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
@@ -420,7 +420,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
{
int ret;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
@@ -1322,7 +1322,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
}
/* Fallback to software copy. */
- ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
+ ret = ttm_bo_wait(bo, intr, no_wait_gpu);
if (ret == 0)
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
@@ -1611,6 +1611,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
struct nvkm_vma *
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 879655c03..b1d2527c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -34,7 +34,7 @@
/*XXX*/
#include <core/client.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_bo.h"
#include "nouveau_chan.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e81aefe5f..c1084088f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -34,7 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include "nouveau_reg.h"
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "dispnv04/hw.h"
#include "nouveau_acpi.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 3d0dc199b..411c12cdb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -32,7 +32,7 @@
#include <nvif/class.h>
#include <nvif/if0001.h>
#include "nouveau_debugfs.h"
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
static int
nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index b8c03ff5b..eab588114 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -5,7 +5,7 @@
#if defined(CONFIG_DEBUG_FS)
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
struct nouveau_debugfs {
struct nvif_object ctrl;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7ce7fa5cb..7c77f960c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -279,7 +279,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *gem;
int ret = -ENOMEM;
- gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
@@ -296,7 +296,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
err:
kfree(nouveau_fb);
err_unref:
- drm_gem_object_unreference(gem);
+ drm_gem_object_unreference_unlocked(gem);
return ERR_PTR(ret);
}
@@ -739,7 +739,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
mutex_lock(&cli->mutex);
- ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL);
+ ret = ttm_bo_reserve(&new_bo->bo, true, false, NULL);
if (ret)
goto fail_unpin;
@@ -753,7 +753,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (new_bo != old_bo) {
ttm_bo_unreserve(&new_bo->bo);
- ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
+ ret = ttm_bo_reserve(&old_bo->bo, true, false, NULL);
if (ret)
goto fail_unpin;
}
@@ -916,7 +916,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
{
struct drm_gem_object *gem;
- gem = drm_gem_object_lookup(dev, file_priv, handle);
+ gem = drm_gem_object_lookup(file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 5a57d8b47..24273bacd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -3,7 +3,7 @@
#include <subdev/mmu.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
struct nouveau_framebuffer {
struct drm_framebuffer base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index d168c6353..2634a1a79 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,7 +24,7 @@
*
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index e17e15ec7..87d52d36f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -25,7 +25,7 @@
#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d06877d9c..11f8dd9c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -44,7 +44,7 @@
#include <nvif/cla06f.h>
#include <nvif/if0004.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
@@ -1083,10 +1083,8 @@ nouveau_drm_init(void)
nouveau_display_options();
if (nouveau_modeset == -1) {
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
nouveau_modeset = 0;
-#endif
}
if (!nouveau_modeset)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5c363ed1c..822a0212c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1,5 +1,5 @@
-#ifndef __NOUVEAU_DRMCLI_H__
-#define __NOUVEAU_DRMCLI_H__
+#ifndef __NOUVEAU_DRV_H__
+#define __NOUVEAU_DRV_H__
#define DRIVER_AUTHOR "Nouveau Project"
#define DRIVER_EMAIL "nouveau@lists.freedesktop.org"
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index e40a1b07a..d1f248fd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -43,7 +43,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_gem.h"
#include "nouveau_bo.h"
#include "nouveau_fbcon.h"
@@ -386,8 +386,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
}
}
- mutex_lock(&dev->struct_mutex);
-
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
@@ -426,8 +424,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- mutex_unlock(&dev->struct_mutex);
-
if (chan)
nouveau_fbcon_accel_init(dev);
nouveau_fbcon_zfill(dev, fbcon);
@@ -441,7 +437,6 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
return 0;
out_unlock:
- mutex_unlock(&dev->struct_mutex);
if (chan)
nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma);
nouveau_bo_unmap(nvbo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 9a8c5b727..4bb9ab892 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -34,7 +34,7 @@
#include <nvif/notify.h>
#include <nvif/event.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a0865c49e..72e2399bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,7 +24,7 @@
*
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
#include "nouveau_abi16.h"
@@ -71,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!cli->vm)
return 0;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return ret;
@@ -126,7 +126,7 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
list_del(&vma->head);
if (fobj && fobj->shared_count > 1)
- ttm_bo_wait(&nvbo->bo, true, false, false);
+ ttm_bo_wait(&nvbo->bo, false, false);
else if (fobj && fobj->shared_count == 1)
fence = rcu_dereference_protected(fobj->shared[0],
reservation_object_held(resv));
@@ -156,7 +156,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!cli->vm)
return;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
if (ret)
return;
@@ -368,7 +368,6 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
int nr_buffers, struct validate_op *op)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct drm_device *dev = chan->drm->dev;
int trycnt = 0;
int ret, i;
struct nouveau_bo *res_bo = NULL;
@@ -388,7 +387,7 @@ retry:
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
- gem = drm_gem_object_lookup(dev, file_priv, b->handle);
+ gem = drm_gem_object_lookup(file_priv, b->handle);
if (!gem) {
NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
ret = -ENOENT;
@@ -409,7 +408,7 @@ retry:
break;
}
- ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
+ ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
if (ret) {
list_splice_tail_init(&vram_list, &op->list);
list_splice_tail_init(&gart_list, &op->list);
@@ -651,7 +650,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
data |= r->vor;
}
- ret = ttm_bo_wait(&nvbo->bo, true, false, false);
+ ret = ttm_bo_wait(&nvbo->bo, false, false);
if (ret) {
NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
break;
@@ -864,7 +863,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
int ret;
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
@@ -896,7 +895,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
@@ -914,7 +913,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
int ret;
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
+ gem = drm_gem_object_lookup(file_priv, req->handle);
if (!gem)
return -ENOENT;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index e4049faca..7e32da2e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -3,7 +3,7 @@
#include <drm/drmP.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_bo.h"
#define nouveau_bo_tile_layout(nvbo) \
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 67edd2f5b..1ff4166af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -31,7 +31,7 @@
#include <drm/drmP.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_hwmon.h"
#include <nvkm/subdev/iccsense.h>
@@ -689,7 +689,7 @@ nouveau_hwmon_init(struct drm_device *dev)
goto error;
}
- if (iccsense && iccsense->data_valid && iccsense->rail_count) {
+ if (iccsense && iccsense->data_valid && !list_empty(&iccsense->rails)) {
ret = sysfs_create_group(&hwmon_dev->kobj,
&hwmon_power_attrgroup);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
index 55eb94284..15f0925ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_nvif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -36,7 +36,7 @@
#include <nvif/event.h>
#include <nvif/ioctl.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_usif.h"
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index f41056d0f..a90d72767 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -21,7 +21,7 @@
*/
#ifndef __NOUVEAU_PLATFORM_H__
#define __NOUVEAU_PLATFORM_H__
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
extern struct platform_driver nouveau_platform_driver;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index dd32ad6db..a0a9704cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -25,7 +25,7 @@
#include <drm/drmP.h>
#include <linux/dma-buf.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_gem.h"
struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 8c3053a17..db35ab588 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,7 +1,7 @@
#include <linux/pagemap.h>
#include <linux/slab.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_ttm.h"
struct nouveau_sgdma_be {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index d2e7d209f..bcee91497 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,7 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index e9f52ef0b..675e9e077 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_usif.h"
#include "nouveau_abi16.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index af89c3665..c6a180a0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -4,7 +4,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_acpi.h"
#include "nouveau_fbcon.h"
#include "nouveau_vga.h"
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 8f715fead..7d9248b8c 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,7 +22,7 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 3022d24ed..1915b7b82 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 2c35213da..4e3de34ff 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 6a141c9bf..7d5e562a5 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
#include <nvif/cl0002.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a43445caa..3ffc2b005 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -39,7 +39,7 @@
#include <nvif/cl507d.h>
#include <nvif/cl507e.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
#include "nouveau_connector.h"
@@ -1305,7 +1305,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct drm_device *dev = crtc->dev;
struct drm_gem_object *gem = NULL;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
@@ -1314,7 +1313,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
if (width != 64 || height != 64)
return -EINVAL;
- gem = drm_gem_object_lookup(dev, file_priv, handle);
+ gem = drm_gem_object_lookup(file_priv, handle);
if (unlikely(!gem))
return -ENOENT;
nvbo = nouveau_gem_object(gem);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index a4e259a00..1aeb698e9 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 3695ccce6..4d6f202b7 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
#include <nvif/cl0002.h>
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 412c5be5a..18bde9d8e 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index f28315e86..839f4c8c1 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index becf19abd..b79775788 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,7 +22,7 @@
* Authors: Ben Skeggs
*/
-#include "nouveau_drm.h"
+#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
index 8a7bae7bd..ee8e5831f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/engine.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c
@@ -137,11 +137,10 @@ nvkm_engine_func = {
int
nvkm_engine_ctor(const struct nvkm_engine_func *func,
- struct nvkm_device *device, int index, u32 pmc_enable,
- bool enable, struct nvkm_engine *engine)
+ struct nvkm_device *device, int index, bool enable,
+ struct nvkm_engine *engine)
{
- nvkm_subdev_ctor(&nvkm_engine_func, device, index,
- pmc_enable, &engine->subdev);
+ nvkm_subdev_ctor(&nvkm_engine_func, device, index, &engine->subdev);
engine->func = func;
if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) {
@@ -155,11 +154,10 @@ nvkm_engine_ctor(const struct nvkm_engine_func *func,
int
nvkm_engine_new_(const struct nvkm_engine_func *func,
- struct nvkm_device *device, int index, u32 pmc_enable,
- bool enable, struct nvkm_engine **pengine)
+ struct nvkm_device *device, int index, bool enable,
+ struct nvkm_engine **pengine)
{
if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL)))
return -ENOMEM;
- return nvkm_engine_ctor(func, device, index, pmc_enable,
- enable, *pengine);
+ return nvkm_engine_ctor(func, device, index, enable, *pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 3bf08cb1a..b18557858 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -24,6 +24,7 @@
#include <core/subdev.h>
#include <core/device.h>
#include <core/option.h>
+#include <subdev/mc.h>
static struct lock_class_key nvkm_subdev_lock_class[NVKM_SUBDEV_NR];
@@ -50,6 +51,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_SUBDEV_SECBOOT ] = "secboot",
[NVKM_SUBDEV_THERM ] = "therm",
[NVKM_SUBDEV_TIMER ] = "tmr",
+ [NVKM_SUBDEV_TOP ] = "top",
[NVKM_SUBDEV_VOLT ] = "volt",
[NVKM_ENGINE_BSP ] = "bsp",
[NVKM_ENGINE_CE0 ] = "ce0",
@@ -89,7 +91,6 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_device *device = subdev->device;
const char *action = suspend ? "suspend" : "fini";
- u32 pmc_enable = subdev->pmc_enable;
s64 time;
nvkm_trace(subdev, "%s running...\n", action);
@@ -104,11 +105,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
}
}
- if (pmc_enable) {
- nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
- nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
- nvkm_rd32(device, 0x000200);
- }
+ nvkm_mc_reset(device->mc, subdev->index);
time = ktime_to_us(ktime_get()) - time;
nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
@@ -193,14 +190,13 @@ nvkm_subdev_del(struct nvkm_subdev **psubdev)
void
nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
- struct nvkm_device *device, int index, u32 pmc_enable,
+ struct nvkm_device *device, int index,
struct nvkm_subdev *subdev)
{
const char *name = nvkm_subdev_name[index];
subdev->func = func;
subdev->device = device;
subdev->index = index;
- subdev->pmc_enable = pmc_enable;
__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
subdev->debug = nvkm_dbgopt(device->dbgopt, name);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 3ef01071f..8e2e24a74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -27,7 +27,6 @@
static const struct nvkm_xtensa_func
g84_bsp = {
- .pmc_enable = 0x04008000,
.fifo_val = 0x1111,
.unkd28 = 0x90044,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
index 92a9f35df..ad9f855c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gf100.c
@@ -40,7 +40,6 @@ gf100_ce0 = {
.code.size = sizeof(gf100_ce_code),
.data.data = gf100_ce_data,
.data.size = sizeof(gf100_ce_data),
- .pmc_enable = 0x00000040,
.init = gf100_ce_init,
.intr = gt215_ce_intr,
.sclass = {
@@ -55,7 +54,6 @@ gf100_ce1 = {
.code.size = sizeof(gf100_ce_code),
.data.data = gf100_ce_data,
.data.size = sizeof(gf100_ce_data),
- .pmc_enable = 0x00000080,
.init = gf100_ce_init,
.intr = gt215_ce_intr,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
index e2b944dce..9e0b53a10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gk104.c
@@ -97,17 +97,5 @@ int
gk104_ce_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
- if (index == NVKM_ENGINE_CE0) {
- return nvkm_engine_new_(&gk104_ce, device, index,
- 0x00000040, true, pengine);
- } else
- if (index == NVKM_ENGINE_CE1) {
- return nvkm_engine_new_(&gk104_ce, device, index,
- 0x00000080, true, pengine);
- } else
- if (index == NVKM_ENGINE_CE2) {
- return nvkm_engine_new_(&gk104_ce, device, index,
- 0x00200000, true, pengine);
- }
- return -ENODEV;
+ return nvkm_engine_new_(&gk104_ce, device, index, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
index 4c2f42919..c0df7daa8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm107.c
@@ -39,17 +39,5 @@ int
gm107_ce_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
- if (index == NVKM_ENGINE_CE0) {
- return nvkm_engine_new_(&gm107_ce, device, index,
- 0x00000040, true, pengine);
- } else
- if (index == NVKM_ENGINE_CE1) {
- return nvkm_engine_new_(&gm107_ce, device, index,
- 0x00000080, true, pengine);
- } else
- if (index == NVKM_ENGINE_CE2) {
- return nvkm_engine_new_(&gm107_ce, device, index,
- 0x00200000, true, pengine);
- }
- return -ENODEV;
+ return nvkm_engine_new_(&gm107_ce, device, index, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
index 13f07b32c..c6fa8b207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gm200.c
@@ -38,17 +38,5 @@ int
gm200_ce_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
- if (index == NVKM_ENGINE_CE0) {
- return nvkm_engine_new_(&gm200_ce, device, index,
- 0x00000040, true, pengine);
- } else
- if (index == NVKM_ENGINE_CE1) {
- return nvkm_engine_new_(&gm200_ce, device, index,
- 0x00000080, true, pengine);
- } else
- if (index == NVKM_ENGINE_CE2) {
- return nvkm_engine_new_(&gm200_ce, device, index,
- 0x00200000, true, pengine);
- }
- return -ENODEV;
+ return nvkm_engine_new_(&gm200_ce, device, index, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
index 402dcbcc2..63ac51a54 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c
@@ -67,7 +67,6 @@ gt215_ce = {
.code.size = sizeof(gt215_ce_code),
.data.data = gt215_ce_data,
.data.size = sizeof(gt215_ce_data),
- .pmc_enable = 0x00802000,
.intr = gt215_ce_intr,
.sclass = {
{ -1, -1, GT212_DMA },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
index bfd01625e..68ffb5205 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
@@ -130,6 +130,5 @@ int
g84_cipher_new(struct nvkm_device *device, int index,
struct nvkm_engine **pengine)
{
- return nvkm_engine_new_(&g84_cipher, device, index,
- 0x00004000, true, pengine);
+ return nvkm_engine_new_(&g84_cipher, device, index, true, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 9f32c8739..4572debcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -146,7 +146,7 @@ nv11_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv11_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -190,7 +190,7 @@ nv17_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -212,7 +212,7 @@ nv18_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -256,7 +256,7 @@ nv1f_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -278,7 +278,7 @@ nv20_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -300,7 +300,7 @@ nv25_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -322,7 +322,7 @@ nv28_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -344,7 +344,7 @@ nv2a_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -366,7 +366,7 @@ nv30_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -388,7 +388,7 @@ nv31_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -411,7 +411,7 @@ nv34_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -434,7 +434,7 @@ nv35_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -456,7 +456,7 @@ nv36_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv04_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv04_pci_new,
.timer = nv04_timer_new,
@@ -479,7 +479,7 @@ nv40_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
@@ -505,7 +505,7 @@ nv41_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv41_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
@@ -531,7 +531,7 @@ nv42_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv41_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
@@ -557,7 +557,7 @@ nv43_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv41_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
@@ -609,7 +609,7 @@ nv45_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
@@ -661,7 +661,7 @@ nv47_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv41_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
@@ -687,7 +687,7 @@ nv49_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv41_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
@@ -739,7 +739,7 @@ nv4b_chipset = {
.gpio = nv10_gpio_new,
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
- .mc = nv04_mc_new,
+ .mc = nv17_mc_new,
.mmu = nv41_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
@@ -926,7 +926,7 @@ nv84_chipset = {
.gpio = nv50_gpio_new,
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
- .mc = nv50_mc_new,
+ .mc = g84_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g84_pci_new,
@@ -958,7 +958,7 @@ nv86_chipset = {
.gpio = nv50_gpio_new,
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
- .mc = nv50_mc_new,
+ .mc = g84_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g84_pci_new,
@@ -990,7 +990,7 @@ nv92_chipset = {
.gpio = nv50_gpio_new,
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
- .mc = nv50_mc_new,
+ .mc = g84_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g84_pci_new,
@@ -1022,7 +1022,7 @@ nv94_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
- .mc = nv50_mc_new,
+ .mc = g84_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
@@ -1054,7 +1054,7 @@ nv96_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
- .mc = nv50_mc_new,
+ .mc = g84_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
@@ -1118,7 +1118,7 @@ nva0_chipset = {
.gpio = g94_gpio_new,
.i2c = nv50_i2c_new,
.imem = nv50_instmem_new,
- .mc = g98_mc_new,
+ .mc = g84_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
@@ -1150,7 +1150,7 @@ nva3_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
- .mc = g98_mc_new,
+ .mc = gt215_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
@@ -1184,7 +1184,7 @@ nva5_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
- .mc = g98_mc_new,
+ .mc = gt215_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
@@ -1217,7 +1217,7 @@ nva8_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
- .mc = g98_mc_new,
+ .mc = gt215_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
@@ -1314,7 +1314,7 @@ nvaf_chipset = {
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
.imem = nv50_instmem_new,
- .mc = g98_mc_new,
+ .mc = gt215_mc_new,
.mmu = nv50_mmu_new,
.mxm = nv50_mxm_new,
.pci = g94_pci_new,
@@ -1676,13 +1676,14 @@ nve4_chipset = {
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
- .mc = gf100_mc_new,
+ .mc = gk104_mc_new,
.mmu = gf100_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gk104_ce_new,
.ce[1] = gk104_ce_new,
@@ -1714,13 +1715,14 @@ nve6_chipset = {
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
- .mc = gf100_mc_new,
+ .mc = gk104_mc_new,
.mmu = gf100_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gk104_ce_new,
.ce[1] = gk104_ce_new,
@@ -1752,13 +1754,14 @@ nve7_chipset = {
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
- .mc = gf100_mc_new,
+ .mc = gk104_mc_new,
.mmu = gf100_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gk104_ce_new,
.ce[1] = gk104_ce_new,
@@ -1789,6 +1792,7 @@ nvea_chipset = {
.mmu = gf100_mmu_new,
.pmu = gk20a_pmu_new,
.timer = gk20a_timer_new,
+ .top = gk104_top_new,
.volt = gk20a_volt_new,
.ce[2] = gk104_ce_new,
.dma = gf119_dma_new,
@@ -1814,13 +1818,14 @@ nvf0_chipset = {
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
- .mc = gf100_mc_new,
+ .mc = gk104_mc_new,
.mmu = gf100_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gk104_ce_new,
.ce[1] = gk104_ce_new,
@@ -1851,13 +1856,14 @@ nvf1_chipset = {
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
.ltc = gk104_ltc_new,
- .mc = gf100_mc_new,
+ .mc = gk104_mc_new,
.mmu = gf100_mmu_new,
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gk104_ce_new,
.ce[1] = gk104_ce_new,
@@ -1895,6 +1901,7 @@ nv106_chipset = {
.pmu = gk208_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gk104_ce_new,
.ce[1] = gk104_ce_new,
@@ -1932,6 +1939,7 @@ nv108_chipset = {
.pmu = gk208_pmu_new,
.therm = gf119_therm_new,
.timer = nv41_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gk104_ce_new,
.ce[1] = gk104_ce_new,
@@ -1969,6 +1977,41 @@ nv117_chipset = {
.pmu = gm107_pmu_new,
.therm = gm107_therm_new,
.timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .volt = gk104_volt_new,
+ .ce[0] = gm107_ce_new,
+ .ce[2] = gm107_ce_new,
+ .disp = gm107_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gm107_fifo_new,
+ .gr = gm107_gr_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv118_chipset = {
+ .name = "GM108",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .clk = gk104_clk_new,
+ .devinit = gm107_devinit_new,
+ .fb = gm107_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gf119_i2c_new,
+ .ibus = gk104_ibus_new,
+ .iccsense = gf100_iccsense_new,
+ .imem = nv50_instmem_new,
+ .ltc = gm107_ltc_new,
+ .mc = gk20a_mc_new,
+ .mmu = gf100_mmu_new,
+ .mxm = nv50_mxm_new,
+ .pci = gk104_pci_new,
+ .pmu = gm107_pmu_new,
+ .therm = gm107_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gm107_ce_new,
.ce[2] = gm107_ce_new,
@@ -1986,7 +2029,7 @@ nv120_chipset = {
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
- .fb = gm107_fb_new,
+ .fb = gm200_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
@@ -2001,6 +2044,7 @@ nv120_chipset = {
.pmu = gm107_pmu_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gm200_ce_new,
.ce[1] = gm200_ce_new,
@@ -2019,7 +2063,7 @@ nv124_chipset = {
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
- .fb = gm107_fb_new,
+ .fb = gm200_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
@@ -2034,6 +2078,7 @@ nv124_chipset = {
.pmu = gm107_pmu_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gm200_ce_new,
.ce[1] = gm200_ce_new,
@@ -2052,7 +2097,7 @@ nv126_chipset = {
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
.devinit = gm200_devinit_new,
- .fb = gm107_fb_new,
+ .fb = gm200_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
@@ -2067,6 +2112,7 @@ nv126_chipset = {
.pmu = gm107_pmu_new,
.secboot = gm200_secboot_new,
.timer = gk20a_timer_new,
+ .top = gk104_top_new,
.volt = gk104_volt_new,
.ce[0] = gm200_ce_new,
.ce[1] = gm200_ce_new,
@@ -2093,6 +2139,7 @@ nv12b_chipset = {
.mmu = gf100_mmu_new,
.secboot = gm20b_secboot_new,
.timer = gk20a_timer_new,
+ .top = gk104_top_new,
.ce[2] = gm200_ce_new,
.volt = gm20b_volt_new,
.dma = gf119_dma_new,
@@ -2150,6 +2197,7 @@ nvkm_device_subdev(struct nvkm_device *device, int index)
_(SECBOOT , device->secboot , &device->secboot->subdev);
_(THERM , device->therm , &device->therm->subdev);
_(TIMER , device->timer , &device->timer->subdev);
+ _(TOP , device->top , &device->top->subdev);
_(VOLT , device->volt , &device->volt->subdev);
#undef _
default:
@@ -2523,6 +2571,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x106: device->chip = &nv106_chipset; break;
case 0x108: device->chip = &nv108_chipset; break;
case 0x117: device->chip = &nv117_chipset; break;
+ case 0x118: device->chip = &nv118_chipset; break;
case 0x120: device->chip = &nv120_chipset; break;
case 0x124: device->chip = &nv124_chipset; break;
case 0x126: device->chip = &nv126_chipset; break;
@@ -2604,6 +2653,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_SUBDEV_SECBOOT , secboot);
_(NVKM_SUBDEV_THERM , therm);
_(NVKM_SUBDEV_TIMER , timer);
+ _(NVKM_SUBDEV_TOP , top);
_(NVKM_SUBDEV_VOLT , volt);
_(NVKM_ENGINE_BSP , bsp);
_(NVKM_ENGINE_CE0 , ce[0]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index e80f6ab1c..1a06ac175 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -22,6 +22,7 @@
#include <subdev/pmu.h>
#include <subdev/therm.h>
#include <subdev/timer.h>
+#include <subdev/top.h>
#include <subdev/volt.h>
#include <subdev/secboot.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 785fa76d0..1efe91b1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -298,8 +298,7 @@ nvkm_disp_ctor(const struct nvkm_disp_func *func, struct nvkm_device *device,
disp->func = func;
disp->head.nr = heads;
- ret = nvkm_engine_ctor(&nvkm_disp, device, index, 0,
- true, &disp->engine);
+ ret = nvkm_engine_ctor(&nvkm_disp, device, index, true, &disp->engine);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
index 9769fc0d5..f11ebdd16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/base.c
@@ -152,6 +152,5 @@ nvkm_dma_new_(const struct nvkm_dma_func *func, struct nvkm_device *device,
return -ENOMEM;
dma->func = func;
- return nvkm_engine_ctor(&nvkm_dma, device, index,
- 0, true, &dma->engine);
+ return nvkm_engine_ctor(&nvkm_dma, device, index, true, &dma->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index acc8693bb..a20b6aff7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -348,6 +348,6 @@ nvkm_falcon_new_(const struct nvkm_falcon_func *func,
falcon->data.size = func->data.size;
*pengine = &falcon->engine;
- return nvkm_engine_ctor(&nvkm_falcon, device, index, func->pmc_enable,
+ return nvkm_engine_ctor(&nvkm_falcon, device, index,
enable, &falcon->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
index cfc7d5725..1c9682ae3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
@@ -178,6 +178,17 @@ nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index,
const struct nvkm_fifo_chan_oclass *sclass;
int c = 0;
+ if (fifo->func->class_get) {
+ int ret = fifo->func->class_get(fifo, index, &sclass);
+ if (ret == 0) {
+ oclass->base = sclass->base;
+ oclass->engn = sclass;
+ *class = &nvkm_fifo_class;
+ return 0;
+ }
+ return ret;
+ }
+
while ((sclass = fifo->func->chan[c])) {
if (c++ == index) {
oclass->base = sclass->base;
@@ -261,8 +272,7 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
fifo->nr = nr;
bitmap_clear(fifo->mask, 0, fifo->nr);
- ret = nvkm_engine_ctor(&nvkm_fifo, device, index, 0x00000100,
- true, &fifo->engine);
+ ret = nvkm_engine_ctor(&nvkm_fifo, device, index, true, &fifo->engine);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 68acb36b3..743f3a189 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -25,21 +25,36 @@
#include "changk104.h"
#include <core/client.h>
-#include <core/enum.h>
#include <core/gpuobj.h>
#include <subdev/bar.h>
+#include <subdev/top.h>
#include <engine/sw.h>
#include <nvif/class.h>
-void
+static int
+gk104_fifo_class_get(struct nvkm_fifo *base, int index,
+ const struct nvkm_fifo_chan_oclass **psclass)
+{
+ struct gk104_fifo *fifo = gk104_fifo(base);
+ int c = 0;
+
+ while ((*psclass = fifo->func->chan[c])) {
+ if (c++ == index)
+ return 0;
+ }
+
+ return c;
+}
+
+static void
gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
}
-void
+static void
gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
{
struct nvkm_device *device = fifo->engine.subdev.device;
@@ -267,111 +282,6 @@ gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
}
-static const struct nvkm_enum
-gk104_fifo_fault_engine[] = {
- { 0x00, "GR", NULL, NVKM_ENGINE_GR },
- { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
- { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
- { 0x05, "BAR3", NULL, NVKM_SUBDEV_INSTMEM },
- { 0x07, "PBDMA0", NULL, NVKM_ENGINE_FIFO },
- { 0x08, "PBDMA1", NULL, NVKM_ENGINE_FIFO },
- { 0x09, "PBDMA2", NULL, NVKM_ENGINE_FIFO },
- { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
- { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
- { 0x13, "PERF" },
- { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
- { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
- { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
- { 0x17, "PMU" },
- { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
- { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
- {}
-};
-
-static const struct nvkm_enum
-gk104_fifo_fault_reason[] = {
- { 0x00, "PDE" },
- { 0x01, "PDE_SIZE" },
- { 0x02, "PTE" },
- { 0x03, "VA_LIMIT_VIOLATION" },
- { 0x04, "UNBOUND_INST_BLOCK" },
- { 0x05, "PRIV_VIOLATION" },
- { 0x06, "RO_VIOLATION" },
- { 0x07, "WO_VIOLATION" },
- { 0x08, "PITCH_MASK_VIOLATION" },
- { 0x09, "WORK_CREATION" },
- { 0x0a, "UNSUPPORTED_APERTURE" },
- { 0x0b, "COMPRESSION_FAILURE" },
- { 0x0c, "UNSUPPORTED_KIND" },
- { 0x0d, "REGION_VIOLATION" },
- { 0x0e, "BOTH_PTES_VALID" },
- { 0x0f, "INFO_TYPE_POISONED" },
- {}
-};
-
-static const struct nvkm_enum
-gk104_fifo_fault_hubclient[] = {
- { 0x00, "VIP" },
- { 0x01, "CE0" },
- { 0x02, "CE1" },
- { 0x03, "DNISO" },
- { 0x04, "FE" },
- { 0x05, "FECS" },
- { 0x06, "HOST" },
- { 0x07, "HOST_CPU" },
- { 0x08, "HOST_CPU_NB" },
- { 0x09, "ISO" },
- { 0x0a, "MMU" },
- { 0x0b, "MSPDEC" },
- { 0x0c, "MSPPP" },
- { 0x0d, "MSVLD" },
- { 0x0e, "NISO" },
- { 0x0f, "P2P" },
- { 0x10, "PD" },
- { 0x11, "PERF" },
- { 0x12, "PMU" },
- { 0x13, "RASTERTWOD" },
- { 0x14, "SCC" },
- { 0x15, "SCC_NB" },
- { 0x16, "SEC" },
- { 0x17, "SSYNC" },
- { 0x18, "GR_CE" },
- { 0x19, "CE2" },
- { 0x1a, "XV" },
- { 0x1b, "MMU_NB" },
- { 0x1c, "MSENC" },
- { 0x1d, "DFALCON" },
- { 0x1e, "SKED" },
- { 0x1f, "AFALCON" },
- {}
-};
-
-static const struct nvkm_enum
-gk104_fifo_fault_gpcclient[] = {
- { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
- { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
- { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
- { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
- { 0x0c, "RAST" },
- { 0x0d, "GCC" },
- { 0x0e, "GPCCS" },
- { 0x0f, "PROP_0" },
- { 0x10, "PROP_1" },
- { 0x11, "PROP_2" },
- { 0x12, "PROP_3" },
- { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
- { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
- { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
- { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
- { 0x1f, "GPM" },
- { 0x20, "LTP_UTLB_0" },
- { 0x21, "LTP_UTLB_1" },
- { 0x22, "LTP_UTLB_2" },
- { 0x23, "LTP_UTLB_3" },
- { 0x24, "GPC_RGG_UTLB" },
- {}
-};
-
static void
gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
{
@@ -390,14 +300,14 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
struct nvkm_engine *engine = NULL;
struct nvkm_fifo_chan *chan;
unsigned long flags;
- char gpcid[8] = "";
+ char gpcid[8] = "", en[16] = "";
- er = nvkm_enum_find(gk104_fifo_fault_reason, reason);
- eu = nvkm_enum_find(gk104_fifo_fault_engine, unit);
+ er = nvkm_enum_find(fifo->func->fault.reason, reason);
+ eu = nvkm_enum_find(fifo->func->fault.engine, unit);
if (hub) {
- ec = nvkm_enum_find(gk104_fifo_fault_hubclient, client);
+ ec = nvkm_enum_find(fifo->func->fault.hubclient, client);
} else {
- ec = nvkm_enum_find(gk104_fifo_fault_gpcclient, client);
+ ec = nvkm_enum_find(fifo->func->fault.gpcclient, client);
snprintf(gpcid, sizeof(gpcid), "GPC%d/", gpc);
}
@@ -418,13 +328,27 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
}
}
+ if (eu == NULL) {
+ enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit);
+ if (engidx < NVKM_SUBDEV_NR) {
+ const char *src = nvkm_subdev_name[engidx];
+ char *dst = en;
+ do {
+ *dst++ = toupper(*src++);
+ } while(*src);
+ engine = nvkm_device_engine(device, engidx);
+ }
+ } else {
+ snprintf(en, sizeof(en), "%s", eu->name);
+ }
+
chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
nvkm_error(subdev,
"%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
"reason %02x [%s] on channel %d [%010llx %s]\n",
write ? "write" : "read", (u64)vahi << 32 | valo,
- unit, eu ? eu->name : "", client, gpcid, ec ? ec->name : "",
+ unit, en, client, gpcid, ec ? ec->name : "",
reason, er ? er->name : "", chan ? chan->chid : -1,
(u64)inst << 12,
chan ? chan->object.client->name : "unknown");
@@ -557,7 +481,7 @@ gk104_fifo_intr_engine(struct gk104_fifo *fifo)
nvkm_fifo_uevent(&fifo->base);
}
-void
+static void
gk104_fifo_intr(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -649,7 +573,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
}
}
-void
+static void
gk104_fifo_fini(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -659,13 +583,15 @@ gk104_fifo_fini(struct nvkm_fifo *base)
nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
}
-int
+static int
gk104_fifo_oneinit(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
- int ret, i;
+ struct nvkm_top *top = device->top;
+ int engn, runl, pbid, ret, i, j;
+ enum nvkm_devidx engidx;
u32 *map;
/* Determine number of PBDMAs by checking valid enable bits. */
@@ -680,86 +606,26 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
for (i = 0; i < fifo->pbdma_nr; i++)
map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
- /* Read device topology from HW. */
- for (i = 0; i < 64; i++) {
- int type = -1, pbid = -1, engidx = -1;
- int engn = -1, runl = -1, intr = -1, mcen = -1;
- int fault = -1, j;
- u32 data, addr = 0;
-
- do {
- data = nvkm_rd32(device, 0x022700 + (i * 0x04));
- nvkm_trace(subdev, "%02x: %08x\n", i, data);
- switch (data & 0x00000003) {
- case 0x00000000: /* NOT_VALID */
- continue;
- case 0x00000001: /* DATA */
- addr = (data & 0x00fff000);
- fault = (data & 0x000000f8) >> 3;
- break;
- case 0x00000002: /* ENUM */
- if (data & 0x00000020)
- engn = (data & 0x3c000000) >> 26;
- if (data & 0x00000010)
- runl = (data & 0x01e00000) >> 21;
- if (data & 0x00000008)
- intr = (data & 0x000f8000) >> 15;
- if (data & 0x00000004)
- mcen = (data & 0x00003e00) >> 9;
- break;
- case 0x00000003: /* ENGINE_TYPE */
- type = (data & 0x7ffffffc) >> 2;
- break;
- }
- } while ((data & 0x80000000) && ++i < 64);
-
- if (!data)
- continue;
-
+ /* Determine runlist configuration from topology device info. */
+ i = 0;
+ while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) {
/* Determine which PBDMA handles requests for this engine. */
- for (j = 0; runl >= 0 && j < fifo->pbdma_nr; j++) {
+ for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
if (map[j] & (1 << runl)) {
pbid = j;
break;
}
}
- /* Translate engine type to NVKM engine identifier. */
- switch (type) {
- case 0x00000000: engidx = NVKM_ENGINE_GR; break;
- case 0x00000001: engidx = NVKM_ENGINE_CE0; break;
- case 0x00000002: engidx = NVKM_ENGINE_CE1; break;
- case 0x00000003: engidx = NVKM_ENGINE_CE2; break;
- case 0x00000008: engidx = NVKM_ENGINE_MSPDEC; break;
- case 0x00000009: engidx = NVKM_ENGINE_MSPPP; break;
- case 0x0000000a: engidx = NVKM_ENGINE_MSVLD; break;
- case 0x0000000b: engidx = NVKM_ENGINE_MSENC; break;
- case 0x0000000c: engidx = NVKM_ENGINE_VIC; break;
- case 0x0000000d: engidx = NVKM_ENGINE_SEC; break;
- case 0x0000000e: engidx = NVKM_ENGINE_NVENC0; break;
- case 0x0000000f: engidx = NVKM_ENGINE_NVENC1; break;
- case 0x00000010: engidx = NVKM_ENGINE_NVDEC; break;
- break;
- default:
- break;
- }
+ nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n",
+ engn, runl, pbid);
- nvkm_debug(subdev, "%02x (%8s): engine %2d runlist %2d "
- "pbdma %2d intr %2d reset %2d "
- "fault %2d addr %06x\n", type,
- engidx < 0 ? NULL : nvkm_subdev_name[engidx],
- engn, runl, pbid, intr, mcen, fault, addr);
-
- /* Mark the engine as supported if everything checks out. */
- if (engn >= 0 && runl >= 0) {
- fifo->engine[engn].engine = engidx < 0 ? NULL :
- nvkm_device_engine(device, engidx);
- fifo->engine[engn].runl = runl;
- fifo->engine[engn].pbid = pbid;
- fifo->engine_nr = max(fifo->engine_nr, engn + 1);
- fifo->runlist[runl].engm |= 1 << engn;
- fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
- }
+ fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
+ fifo->engine[engn].runl = runl;
+ fifo->engine[engn].pbid = pbid;
+ fifo->engine_nr = max(fifo->engine_nr, engn + 1);
+ fifo->runlist[runl].engm |= 1 << engn;
+ fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
}
kfree(map);
@@ -796,7 +662,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
return 0;
}
-void
+static void
gk104_fifo_init(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -825,7 +691,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x002140, 0x7fffffff);
}
-void *
+static void *
gk104_fifo_dtor(struct nvkm_fifo *base)
{
struct gk104_fifo *fifo = gk104_fifo(base);
@@ -842,29 +708,154 @@ gk104_fifo_dtor(struct nvkm_fifo *base)
return fifo;
}
+static const struct nvkm_fifo_func
+gk104_fifo_ = {
+ .dtor = gk104_fifo_dtor,
+ .oneinit = gk104_fifo_oneinit,
+ .init = gk104_fifo_init,
+ .fini = gk104_fifo_fini,
+ .intr = gk104_fifo_intr,
+ .uevent_init = gk104_fifo_uevent_init,
+ .uevent_fini = gk104_fifo_uevent_fini,
+ .class_get = gk104_fifo_class_get,
+};
+
int
-gk104_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
+gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
int index, int nr, struct nvkm_fifo **pfifo)
{
struct gk104_fifo *fifo;
if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
return -ENOMEM;
+ fifo->func = func;
INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
*pfifo = &fifo->base;
- return nvkm_fifo_ctor(func, device, index, nr, &fifo->base);
+ return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
}
-static const struct nvkm_fifo_func
+const struct nvkm_enum
+gk104_fifo_fault_engine[] = {
+ { 0x00, "GR", NULL, NVKM_ENGINE_GR },
+ { 0x01, "DISPLAY" },
+ { 0x02, "CAPTURE" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "SCHED" },
+ { 0x07, "HOST0" },
+ { 0x08, "HOST1" },
+ { 0x09, "HOST2" },
+ { 0x0a, "HOST3" },
+ { 0x0b, "HOST4" },
+ { 0x0c, "HOST5" },
+ { 0x0d, "HOST6" },
+ { 0x0e, "HOST7" },
+ { 0x0f, "HOSTSR" },
+ { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
+ { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
+ { 0x13, "PERF" },
+ { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
+ { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
+ { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
+ { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_reason[] = {
+ { 0x00, "PDE" },
+ { 0x01, "PDE_SIZE" },
+ { 0x02, "PTE" },
+ { 0x03, "VA_LIMIT_VIOLATION" },
+ { 0x04, "UNBOUND_INST_BLOCK" },
+ { 0x05, "PRIV_VIOLATION" },
+ { 0x06, "RO_VIOLATION" },
+ { 0x07, "WO_VIOLATION" },
+ { 0x08, "PITCH_MASK_VIOLATION" },
+ { 0x09, "WORK_CREATION" },
+ { 0x0a, "UNSUPPORTED_APERTURE" },
+ { 0x0b, "COMPRESSION_FAILURE" },
+ { 0x0c, "UNSUPPORTED_KIND" },
+ { 0x0d, "REGION_VIOLATION" },
+ { 0x0e, "BOTH_PTES_VALID" },
+ { 0x0f, "INFO_TYPE_POISONED" },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_hubclient[] = {
+ { 0x00, "VIP" },
+ { 0x01, "CE0" },
+ { 0x02, "CE1" },
+ { 0x03, "DNISO" },
+ { 0x04, "FE" },
+ { 0x05, "FECS" },
+ { 0x06, "HOST" },
+ { 0x07, "HOST_CPU" },
+ { 0x08, "HOST_CPU_NB" },
+ { 0x09, "ISO" },
+ { 0x0a, "MMU" },
+ { 0x0b, "MSPDEC" },
+ { 0x0c, "MSPPP" },
+ { 0x0d, "MSVLD" },
+ { 0x0e, "NISO" },
+ { 0x0f, "P2P" },
+ { 0x10, "PD" },
+ { 0x11, "PERF" },
+ { 0x12, "PMU" },
+ { 0x13, "RASTERTWOD" },
+ { 0x14, "SCC" },
+ { 0x15, "SCC_NB" },
+ { 0x16, "SEC" },
+ { 0x17, "SSYNC" },
+ { 0x18, "GR_CE" },
+ { 0x19, "CE2" },
+ { 0x1a, "XV" },
+ { 0x1b, "MMU_NB" },
+ { 0x1c, "MSENC" },
+ { 0x1d, "DFALCON" },
+ { 0x1e, "SKED" },
+ { 0x1f, "AFALCON" },
+ {}
+};
+
+const struct nvkm_enum
+gk104_fifo_fault_gpcclient[] = {
+ { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
+ { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
+ { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
+ { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
+ { 0x0c, "RAST" },
+ { 0x0d, "GCC" },
+ { 0x0e, "GPCCS" },
+ { 0x0f, "PROP_0" },
+ { 0x10, "PROP_1" },
+ { 0x11, "PROP_2" },
+ { 0x12, "PROP_3" },
+ { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
+ { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
+ { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
+ { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
+ { 0x1f, "GPM" },
+ { 0x20, "LTP_UTLB_0" },
+ { 0x21, "LTP_UTLB_1" },
+ { 0x22, "LTP_UTLB_2" },
+ { 0x23, "LTP_UTLB_3" },
+ { 0x24, "GPC_RGG_UTLB" },
+ {}
+};
+
+static const struct gk104_fifo_func
gk104_fifo = {
- .dtor = gk104_fifo_dtor,
- .oneinit = gk104_fifo_oneinit,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
- .intr = gk104_fifo_intr,
- .uevent_init = gk104_fifo_uevent_init,
- .uevent_fini = gk104_fifo_uevent_fini,
+ .fault.engine = gk104_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
.chan = {
&gk104_fifo_gpfifo_oclass,
NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 9e5d00ba3..679f3ec31 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -3,10 +3,12 @@
#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
#include "priv.h"
+#include <core/enum.h>
#include <subdev/mmu.h>
struct gk104_fifo_chan;
struct gk104_fifo {
+ const struct gk104_fifo_func *func;
struct nvkm_fifo base;
struct {
@@ -39,15 +41,19 @@ struct gk104_fifo {
} user;
};
-int gk104_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *,
+struct gk104_fifo_func {
+ struct {
+ const struct nvkm_enum *engine;
+ const struct nvkm_enum *reason;
+ const struct nvkm_enum *hubclient;
+ const struct nvkm_enum *gpcclient;
+ } fault;
+
+ const struct nvkm_fifo_chan_oclass *chan[];
+};
+
+int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
int index, int nr, struct nvkm_fifo **);
-void *gk104_fifo_dtor(struct nvkm_fifo *);
-int gk104_fifo_oneinit(struct nvkm_fifo *);
-void gk104_fifo_init(struct nvkm_fifo *);
-void gk104_fifo_fini(struct nvkm_fifo *);
-void gk104_fifo_intr(struct nvkm_fifo *);
-void gk104_fifo_uevent_init(struct nvkm_fifo *);
-void gk104_fifo_uevent_fini(struct nvkm_fifo *);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
@@ -70,4 +76,11 @@ gk104_fifo_engine_subdev(int engine)
return 0;
}
}
+
+extern const struct nvkm_enum gk104_fifo_fault_engine[];
+extern const struct nvkm_enum gk104_fifo_fault_reason[];
+extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
+extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
+
+extern const struct nvkm_enum gm107_fifo_fault_engine[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index 41307fcd4..b2f8ab7bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -24,15 +24,12 @@
#include "gk104.h"
#include "changk104.h"
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
gk110_fifo = {
- .dtor = gk104_fifo_dtor,
- .oneinit = gk104_fifo_oneinit,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
- .intr = gk104_fifo_intr,
- .uevent_init = gk104_fifo_uevent_init,
- .uevent_fini = gk104_fifo_uevent_fini,
+ .fault.engine = gk104_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
.chan = {
&gk110_fifo_gpfifo_oclass,
NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index ce01c1a7d..160617d37 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -24,15 +24,12 @@
#include "gk104.h"
#include "changk104.h"
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
gk208_fifo = {
- .dtor = gk104_fifo_dtor,
- .oneinit = gk104_fifo_oneinit,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
- .intr = gk104_fifo_intr,
- .uevent_init = gk104_fifo_uevent_init,
- .uevent_fini = gk104_fifo_uevent_fini,
+ .fault.engine = gk104_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
.chan = {
&gk104_fifo_gpfifo_oclass,
NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index b47fe98f4..be9f5c16e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -22,15 +22,12 @@
#include "gk104.h"
#include "changk104.h"
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
gk20a_fifo = {
- .dtor = gk104_fifo_dtor,
- .oneinit = gk104_fifo_oneinit,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
- .intr = gk104_fifo_intr,
- .uevent_init = gk104_fifo_uevent_init,
- .uevent_fini = gk104_fifo_uevent_fini,
+ .fault.engine = gk104_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
.chan = {
&gk104_fifo_gpfifo_oclass,
NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 6d59d6579..bd1ff877a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -24,15 +24,35 @@
#include "gk104.h"
#include "changk104.h"
-static const struct nvkm_fifo_func
+const struct nvkm_enum
+gm107_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x02, "CAPTURE" },
+ { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ { 0x06, "SCHED" },
+ { 0x07, "HOST0" },
+ { 0x08, "HOST1" },
+ { 0x09, "HOST2" },
+ { 0x0a, "HOST3" },
+ { 0x0b, "HOST4" },
+ { 0x0c, "HOST5" },
+ { 0x0d, "HOST6" },
+ { 0x0e, "HOST7" },
+ { 0x0f, "HOSTSR" },
+ { 0x13, "PERF" },
+ { 0x17, "PMU" },
+ { 0x18, "PTP" },
+ {}
+};
+
+static const struct gk104_fifo_func
gm107_fifo = {
- .dtor = gk104_fifo_dtor,
- .oneinit = gk104_fifo_oneinit,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
- .intr = gk104_fifo_intr,
- .uevent_init = gk104_fifo_uevent_init,
- .uevent_fini = gk104_fifo_uevent_fini,
+ .fault.engine = gm107_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
.chan = {
&gk110_fifo_gpfifo_oclass,
NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 4bdd43078..b069f785c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -24,15 +24,12 @@
#include "gk104.h"
#include "changk104.h"
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
gm200_fifo = {
- .dtor = gk104_fifo_dtor,
- .oneinit = gk104_fifo_oneinit,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
- .intr = gk104_fifo_intr,
- .uevent_init = gk104_fifo_uevent_init,
- .uevent_fini = gk104_fifo_uevent_fini,
+ .fault.engine = gm107_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
.chan = {
&gm200_fifo_gpfifo_oclass,
NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 4c91d4aa1..2ed87c2e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -22,15 +22,12 @@
#include "gk104.h"
#include "changk104.h"
-static const struct nvkm_fifo_func
+static const struct gk104_fifo_func
gm20b_fifo = {
- .dtor = gk104_fifo_dtor,
- .oneinit = gk104_fifo_oneinit,
- .init = gk104_fifo_init,
- .fini = gk104_fifo_fini,
- .intr = gk104_fifo_intr,
- .uevent_init = gk104_fifo_uevent_init,
- .uevent_fini = gk104_fifo_uevent_fini,
+ .fault.engine = gm107_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
.chan = {
&gm200_fifo_gpfifo_oclass,
NULL
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index cb1432e9b..f6dfb37d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -7,6 +7,7 @@ int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
int index, int nr, struct nvkm_fifo *);
void nvkm_fifo_uevent(struct nvkm_fifo *);
+struct nvkm_fifo_chan_oclass;
struct nvkm_fifo_func {
void *(*dtor)(struct nvkm_fifo *);
int (*oneinit)(struct nvkm_fifo *);
@@ -17,6 +18,8 @@ struct nvkm_fifo_func {
void (*start)(struct nvkm_fifo *, unsigned long *);
void (*uevent_init)(struct nvkm_fifo *);
void (*uevent_fini)(struct nvkm_fifo *);
+ int (*class_get)(struct nvkm_fifo *, int index,
+ const struct nvkm_fifo_chan_oclass **);
const struct nvkm_fifo_chan_oclass *chan[];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
index 090765ff0..467065d1b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
@@ -128,9 +128,8 @@ nvkm_gr = {
int
nvkm_gr_ctor(const struct nvkm_gr_func *func, struct nvkm_device *device,
- int index, u32 pmc_enable, bool enable, struct nvkm_gr *gr)
+ int index, bool enable, struct nvkm_gr *gr)
{
gr->func = func;
- return nvkm_engine_ctor(&nvkm_gr, device, index, pmc_enable,
- enable, &gr->engine);
+ return nvkm_engine_ctor(&nvkm_gr, device, index, enable, &gr->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index 56f392d3d..b02d8f50e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1181,20 +1181,20 @@ gf100_grctx_generate_r418bb8(struct gf100_gr *gr)
/* GPC_BROADCAST */
nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
- gr->magic_not_rop_nr);
+ gr->screen_tile_row_offset);
for (i = 0; i < 6; i++)
nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
/* GPC_BROADCAST.TP_BROADCAST */
nvkm_wr32(device, 0x419bd0, (gr->tpc_total << 8) |
- gr->magic_not_rop_nr | data2[0]);
+ gr->screen_tile_row_offset | data2[0]);
nvkm_wr32(device, 0x419be4, data2[1]);
for (i = 0; i < 6; i++)
nvkm_wr32(device, 0x419b00 + (i * 4), data[i]);
/* UNK78xx */
nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
- gr->magic_not_rop_nr);
+ gr->screen_tile_row_offset);
for (i = 0; i < 6; i++)
nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
}
@@ -1238,6 +1238,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
+ u32 idle_timeout;
nvkm_mc_unk260(device->mc, 0);
@@ -1247,7 +1248,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
- nvkm_wr32(device, 0x404154, 0x00000000);
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
grctx->bundle(info);
grctx->pagepool(info);
@@ -1261,7 +1262,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_grctx_generate_r406800(gr);
gf100_gr_icmd(gr, grctx->icmd);
- nvkm_wr32(device, 0x404154, 0x00000400);
+ nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc_unk260(device->mc, 1);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 3c8673958..ac895edce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -81,8 +81,6 @@ void gk104_grctx_generate_bundle(struct gf100_grctx *);
void gk104_grctx_generate_pagepool(struct gf100_grctx *);
void gk104_grctx_generate_unkn(struct gf100_gr *);
void gk104_grctx_generate_r418bb8(struct gf100_gr *);
-void gk104_grctx_generate_rop_active_fbps(struct gf100_gr *);
-
void gm107_grctx_generate_bundle(struct gf100_grctx *);
void gm107_grctx_generate_pagepool(struct gf100_grctx *);
@@ -98,7 +96,6 @@ void gm107_grctx_generate_pagepool(struct gf100_grctx *);
void gm107_grctx_generate_attrib(struct gf100_grctx *);
extern const struct gf100_grctx_func gm200_grctx;
-void gm200_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gm200_grctx_generate_tpcid(struct gf100_gr *);
void gm200_grctx_generate_405b60(struct gf100_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index 74de7a96c..f521de11a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -223,6 +223,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
+ u32 idle_timeout;
int i;
nvkm_mc_unk260(device->mc, 0);
@@ -233,7 +234,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
- nvkm_wr32(device, 0x404154, 0x00000000);
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
grctx->bundle(info);
grctx->pagepool(info);
@@ -250,7 +251,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
gf100_gr_icmd(gr, grctx->icmd);
- nvkm_wr32(device, 0x404154, 0x00000400);
+ nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc_unk260(device->mc, 1);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index a843e3689..9ba337778 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -924,38 +924,30 @@ gk104_grctx_generate_r418bb8(struct gf100_gr *gr)
/* GPC_BROADCAST */
nvkm_wr32(device, 0x418bb8, (gr->tpc_total << 8) |
- gr->magic_not_rop_nr);
+ gr->screen_tile_row_offset);
for (i = 0; i < 6; i++)
nvkm_wr32(device, 0x418b08 + (i * 4), data[i]);
/* GPC_BROADCAST.TP_BROADCAST */
nvkm_wr32(device, 0x41bfd0, (gr->tpc_total << 8) |
- gr->magic_not_rop_nr | data2[0]);
+ gr->screen_tile_row_offset | data2[0]);
nvkm_wr32(device, 0x41bfe4, data2[1]);
for (i = 0; i < 6; i++)
nvkm_wr32(device, 0x41bf00 + (i * 4), data[i]);
/* UNK78xx */
nvkm_wr32(device, 0x4078bc, (gr->tpc_total << 8) |
- gr->magic_not_rop_nr);
+ gr->screen_tile_row_offset);
for (i = 0; i < 6; i++)
nvkm_wr32(device, 0x40780c + (i * 4), data[i]);
}
void
-gk104_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
-{
- struct nvkm_device *device = gr->base.engine.subdev.device;
- const u32 fbp_count = nvkm_rd32(device, 0x120074);
- nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
- nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
-}
-
-void
gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
+ u32 idle_timeout;
int i;
nvkm_mc_unk260(device->mc, 0);
@@ -966,7 +958,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
- nvkm_wr32(device, 0x404154, 0x00000000);
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
grctx->bundle(info);
grctx->pagepool(info);
@@ -982,11 +974,10 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
- gk104_grctx_generate_rop_active_fbps(gr);
nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
gf100_gr_icmd(gr, grctx->icmd);
- nvkm_wr32(device, 0x404154, 0x00000400);
+ nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc_unk260(device->mc, 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
index ad0a6cfe7..da7c35a6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk20a.c
@@ -29,15 +29,14 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
- int idle_timeout_save;
+ u32 idle_timeout;
int i;
gf100_gr_mmio(gr, gr->fuc_sw_ctx);
gf100_gr_wait_idle(gr);
- idle_timeout_save = nvkm_rd32(device, 0x404154);
- nvkm_wr32(device, 0x404154, 0x00000000);
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
grctx->attrib(info);
@@ -53,13 +52,11 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
- gk104_grctx_generate_rop_active_fbps(gr);
-
nvkm_mask(device, 0x5044b0, 0x08000000, 0x08000000);
gf100_gr_wait_idle(gr);
- nvkm_wr32(device, 0x404154, idle_timeout_save);
+ nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_wait_idle(gr);
gf100_gr_mthd(gr, gr->fuc_method);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
index 95f59e316..6d3c5011e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c
@@ -920,13 +920,15 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
const u32 u = 0x418ea0 + (n * 0x04);
const u32 o = PPC_UNIT(gpc, ppc, 0);
+ if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+ continue;
mmio_wr32(info, o + 0xc0, bs);
mmio_wr32(info, o + 0xf4, bo);
bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, o + 0xe4, as);
mmio_wr32(info, o + 0xf8, ao);
ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
- mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
+ mmio_wr32(info, u, ((bs / 3) << 16) | bs);
}
}
}
@@ -957,6 +959,7 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
+ u32 idle_timeout;
int i;
gf100_gr_mmio(gr, grctx->hub);
@@ -965,7 +968,7 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
- nvkm_wr32(device, 0x404154, 0x00000000);
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
grctx->bundle(info);
grctx->pagepool(info);
@@ -984,10 +987,8 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
- gk104_grctx_generate_rop_active_fbps(gr);
-
gf100_gr_icmd(gr, grctx->icmd);
- nvkm_wr32(device, 0x404154, 0x00000400);
+ nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
index e586699fc..db209d33f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm200.c
@@ -33,7 +33,7 @@ gm200_grctx_generate_tpcid(struct gf100_gr *gr)
struct nvkm_device *device = gr->base.engine.subdev.device;
int gpc, tpc, id;
- for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (tpc = 0, id = 0; tpc < TPC_MAX_PER_GPC; tpc++) {
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
if (tpc < gr->tpc_nr[gpc]) {
nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), id);
@@ -45,15 +45,6 @@ gm200_grctx_generate_tpcid(struct gf100_gr *gr)
}
}
-static void
-gm200_grctx_generate_rop_active_fbps(struct gf100_gr *gr)
-{
- struct nvkm_device *device = gr->base.engine.subdev.device;
- const u32 fbp_count = nvkm_rd32(device, 0x12006c);
- nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
- nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
-}
-
void
gm200_grctx_generate_405b60(struct gf100_gr *gr)
{
@@ -86,17 +77,17 @@ gm200_grctx_generate_405b60(struct gf100_gr *gr)
nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
}
-void
+static void
gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
- u32 tmp;
+ u32 idle_timeout, tmp;
int i;
gf100_gr_mmio(gr, gr->fuc_sw_ctx);
- nvkm_wr32(device, 0x404154, 0x00000000);
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
grctx->bundle(info);
grctx->pagepool(info);
@@ -113,8 +104,6 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
- gm200_grctx_generate_rop_active_fbps(gr);
-
for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 4);
nvkm_wr32(device, 0x4041c4, tmp);
@@ -122,7 +111,7 @@ gm200_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gm200_grctx_generate_405b60(gr);
gf100_gr_icmd(gr, gr->fuc_bundle);
- nvkm_wr32(device, 0x404154, 0x00000800);
+ nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_mthd(gr, gr->fuc_method);
nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
index a8827efa9..e5702e3e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm20b.c
@@ -40,15 +40,14 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const struct gf100_grctx_func *grctx = gr->func->grctx;
- int idle_timeout_save;
+ u32 idle_timeout;
int i, tmp;
gf100_gr_mmio(gr, gr->fuc_sw_ctx);
gf100_gr_wait_idle(gr);
- idle_timeout_save = nvkm_rd32(device, 0x404154);
- nvkm_wr32(device, 0x404154, 0x00000000);
+ idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
grctx->attrib(info);
@@ -63,7 +62,6 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
- gk104_grctx_generate_rop_active_fbps(gr);
nvkm_wr32(device, 0x408908, nvkm_rd32(device, 0x410108) | 0x80000000);
for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
@@ -74,7 +72,7 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_wait_idle(gr);
- nvkm_wr32(device, 0x404154, idle_timeout_save);
+ nvkm_wr32(device, 0x404154, idle_timeout);
gf100_gr_wait_idle(gr);
gf100_gr_mthd(gr, gr->fuc_method);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
index dc60509f7..4984b0069 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc
@@ -291,12 +291,13 @@ init:
// Main program loop, very simple, sleeps until woken up by the interrupt
// handler, pulls a command from the queue and executes its handler
//
-main:
- bset $flags $p0
+wait:
sleep $p0
+ bset $flags $p0
+main:
mov $r13 #cmd_queue
call(queue_get)
- bra $p1 #main
+ bra $p1 #wait
// 0x0000-0x0003 are all context transfers
cmpu b32 $r14 0x04
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 5f4ddfee4..8cb240b65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -370,9 +370,10 @@ uint32_t gf100_grgpc_code[] = {
0xf11f29f0,
0xf0080007,
0x02d00203,
-/* 0x04bb: main */
+/* 0x04bb: wait */
0xf404bd00,
- 0x28f40031,
+ 0x31f40028,
+/* 0x04c1: main */
0x1cd7f000,
0xf43921f4,
0xe4b0f401,
@@ -384,10 +385,10 @@ uint32_t gf100_grgpc_code[] = {
0x0018fe05,
0x05b421f5,
/* 0x04eb: main_not_ctx_xfer */
- 0x94d30ef4,
+ 0x94d90ef4,
0xf5f010ef,
0x7e21f501,
- 0xc60ef403,
+ 0xcc0ef403,
/* 0x04f8: ih */
0x80f900f9,
0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index 03381b163..550d6ba09 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -397,9 +397,10 @@ uint32_t gf117_grgpc_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0002,
-/* 0x0508: main */
- 0xf40031f4,
- 0xd7f00028,
+/* 0x0508: wait */
+ 0xf40028f4,
+/* 0x050e: main */
+ 0xd7f00031,
0x3921f424,
0xb0f401f4,
0x18f404e4,
@@ -409,13 +410,13 @@ uint32_t gf117_grgpc_code[] = {
0xfd01e4b6,
0x18fe051e,
0x0121f500,
- 0xd30ef406,
+ 0xd90ef406,
/* 0x0538: main_not_ctx_xfer */
0xf010ef94,
0x21f501f5,
0x0ef4037e,
/* 0x0545: ih */
- 0xf900f9c6,
+ 0xf900f9cc,
0x0188fe80,
0x90f980f9,
0xb0f9a0f9,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 99d9b48a3..271b59d36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -397,9 +397,10 @@ uint32_t gk104_grgpc_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0002,
-/* 0x0508: main */
- 0xf40031f4,
- 0xd7f00028,
+/* 0x0508: wait */
+ 0xf40028f4,
+/* 0x050e: main */
+ 0xd7f00031,
0x3921f424,
0xb0f401f4,
0x18f404e4,
@@ -409,13 +410,13 @@ uint32_t gk104_grgpc_code[] = {
0xfd01e4b6,
0x18fe051e,
0x0121f500,
- 0xd30ef406,
+ 0xd90ef406,
/* 0x0538: main_not_ctx_xfer */
0xf010ef94,
0x21f501f5,
0x0ef4037e,
/* 0x0545: ih */
- 0xf900f9c6,
+ 0xf900f9cc,
0x0188fe80,
0x90f980f9,
0xb0f9a0f9,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index f7267696c..73b4a32c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -397,9 +397,10 @@ uint32_t gk110_grgpc_code[] = {
0x300007f1,
0xd00203f0,
0x04bd0002,
-/* 0x0508: main */
- 0xf40031f4,
- 0xd7f00028,
+/* 0x0508: wait */
+ 0xf40028f4,
+/* 0x050e: main */
+ 0xd7f00031,
0x3921f424,
0xb0f401f4,
0x18f404e4,
@@ -409,13 +410,13 @@ uint32_t gk110_grgpc_code[] = {
0xfd01e4b6,
0x18fe051e,
0x0121f500,
- 0xd30ef406,
+ 0xd90ef406,
/* 0x0538: main_not_ctx_xfer */
0xf010ef94,
0x21f501f5,
0x0ef4037e,
/* 0x0545: ih */
- 0xf900f9c6,
+ 0xf900f9cc,
0x0188fe80,
0x90f980f9,
0xb0f9a0f9,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 387d1fa3e..018169818 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -349,9 +349,10 @@ uint32_t gk208_grgpc_code[] = {
0x801f29f0,
0xf6023000,
0x04bd0002,
-/* 0x0448: main */
- 0xf40031f4,
- 0x240d0028,
+/* 0x0448: wait */
+ 0xf40028f4,
+/* 0x044e: main */
+ 0x240d0031,
0x0000377e,
0xb0f401f4,
0x18f404e4,
@@ -362,10 +363,10 @@ uint32_t gk208_grgpc_code[] = {
0x0018fe05,
0x00051f7e,
/* 0x0477: main_not_ctx_xfer */
- 0x94d40ef4,
+ 0x94da0ef4,
0xf5f010ef,
0x02f87e01,
- 0xc70ef400,
+ 0xcd0ef400,
/* 0x0484: ih */
0x80f900f9,
0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index fa9f3c0c5..eca007f03 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -427,9 +427,10 @@ uint32_t gm107_grgpc_code[] = {
0x1f29f024,
0x02300080,
0xbd0002f6,
-/* 0x0571: main */
- 0x0031f404,
- 0x0d0028f4,
+/* 0x0571: wait */
+ 0x0028f404,
+/* 0x0577: main */
+ 0x0d0031f4,
0x00377e24,
0xf401f400,
0xf404e4b0,
@@ -439,13 +440,13 @@ uint32_t gm107_grgpc_code[] = {
0xfd01e4b6,
0x18fe051e,
0x06487e00,
- 0xd40ef400,
+ 0xda0ef400,
/* 0x05a0: main_not_ctx_xfer */
0xf010ef94,
0xf87e01f5,
0x0ef40002,
/* 0x05ad: ih */
- 0xf900f9c7,
+ 0xf900f9cd,
0x0188fe80,
0x90f980f9,
0xb0f9a0f9,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
index e3a2fb308..4d416d4f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hub.fuc
@@ -218,13 +218,14 @@ init:
// Main program loop, very simple, sleeps until woken up by the interrupt
// handler, pulls a command from the queue and executes its handler
//
-main:
+wait:
// sleep until we have something to do
- bset $flags $p0
sleep $p0
+ bset $flags $p0
+main:
mov $r13 #cmd_queue
call(queue_get)
- bra $p1 #main
+ bra $p1 #wait
// context switch, requested by GPU?
cmpu b32 $r14 0x4001
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 397921a9a..8015b40a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -584,9 +584,10 @@ uint32_t gf100_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0564: main */
- 0xf40031f4,
- 0xd7f00028,
+/* 0x0564: wait */
+ 0xf40028f4,
+/* 0x056a: main */
+ 0xd7f00031,
0x3921f410,
0xb1f401f4,
0xf54001e4,
@@ -650,7 +651,7 @@ uint32_t gf100_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xff080ef5,
+ 0xff0e0ef5,
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
@@ -675,12 +676,12 @@ uint32_t gf100_grhub_code[] = {
0xf501f5f0,
0xf5037e21,
/* 0x06b3: main_done */
- 0xbdfeb50e,
+ 0xbdfebb0e,
0x1f29f024,
0x080007f1,
0xd00203f0,
0x04bd0002,
- 0xfea00ef5,
+ 0xfea60ef5,
/* 0x06c8: ih */
0x80f900f9,
0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index 50c97163d..2af90ec68 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -584,9 +584,10 @@ uint32_t gf117_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0564: main */
- 0xf40031f4,
- 0xd7f00028,
+/* 0x0564: wait */
+ 0xf40028f4,
+/* 0x056a: main */
+ 0xd7f00031,
0x3921f410,
0xb1f401f4,
0xf54001e4,
@@ -650,7 +651,7 @@ uint32_t gf117_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xff080ef5,
+ 0xff0e0ef5,
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
@@ -675,12 +676,12 @@ uint32_t gf117_grhub_code[] = {
0xf501f5f0,
0xf5037e21,
/* 0x06b3: main_done */
- 0xbdfeb50e,
+ 0xbdfebb0e,
0x1f29f024,
0x080007f1,
0xd00203f0,
0x04bd0002,
- 0xfea00ef5,
+ 0xfea60ef5,
/* 0x06c8: ih */
0x80f900f9,
0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index 125824b39..e8b8c1c94 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -584,9 +584,10 @@ uint32_t gk104_grhub_code[] = {
0x080007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0564: main */
- 0xf40031f4,
- 0xd7f00028,
+/* 0x0564: wait */
+ 0xf40028f4,
+/* 0x056a: main */
+ 0xd7f00031,
0x3921f410,
0xb1f401f4,
0xf54001e4,
@@ -650,7 +651,7 @@ uint32_t gk104_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xff080ef5,
+ 0xff0e0ef5,
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
@@ -675,12 +676,12 @@ uint32_t gk104_grhub_code[] = {
0xf501f5f0,
0xf5037e21,
/* 0x06b3: main_done */
- 0xbdfeb50e,
+ 0xbdfebb0e,
0x1f29f024,
0x080007f1,
0xd00203f0,
0x04bd0002,
- 0xfea00ef5,
+ 0xfea60ef5,
/* 0x06c8: ih */
0x80f900f9,
0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index 0a1b8c0b8..f4ed2fb6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -584,9 +584,10 @@ uint32_t gk110_grhub_code[] = {
0x300007f1,
0xd00203f0,
0x04bd0001,
-/* 0x0564: main */
- 0xf40031f4,
- 0xd7f00028,
+/* 0x0564: wait */
+ 0xf40028f4,
+/* 0x056a: main */
+ 0xd7f00031,
0x3921f410,
0xb1f401f4,
0xf54001e4,
@@ -650,7 +651,7 @@ uint32_t gk110_grhub_code[] = {
0x170007f1,
0xd00203f0,
0x04bd0009,
- 0xff080ef5,
+ 0xff0e0ef5,
/* 0x0660: main_not_ctx_switch */
0xf401e4b0,
0xf2b90d1b,
@@ -675,12 +676,12 @@ uint32_t gk110_grhub_code[] = {
0xf501f5f0,
0xf5037e21,
/* 0x06b3: main_done */
- 0xbdfeb50e,
+ 0xbdfebb0e,
0x1f29f024,
0x300007f1,
0xd00203f0,
0x04bd0002,
- 0xfea00ef5,
+ 0xfea60ef5,
/* 0x06c8: ih */
0x80f900f9,
0xf90188fe,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index 16869d0b1..ed488973c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -531,9 +531,10 @@ uint32_t gk208_grhub_code[] = {
0x1f19f014,
0x02300080,
0xbd0001f6,
-/* 0x0491: main */
- 0x0031f404,
- 0x0d0028f4,
+/* 0x0491: wait */
+ 0x0028f404,
+/* 0x0497: main */
+ 0x0d0031f4,
0x00377e10,
0xf401f400,
0x4001e4b1,
@@ -590,7 +591,7 @@ uint32_t gk208_grhub_code[] = {
0x09f60217,
0xf504bd00,
/* 0x056b: main_not_ctx_switch */
- 0xb0ff2a0e,
+ 0xb0ff300e,
0x1bf401e4,
0x7ef2b20c,
0xf4000820,
@@ -612,11 +613,11 @@ uint32_t gk208_grhub_code[] = {
0x7e01f5f0,
0xf50002f8,
/* 0x05b7: main_done */
- 0xbdfede0e,
+ 0xbdfee40e,
0x1f29f024,
0x02300080,
0xbd0002f6,
- 0xcc0ef504,
+ 0xd20ef504,
/* 0x05c9: ih */
0xf900f9fe,
0x0188fe80,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index d6343d2a6..5c9051839 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -531,9 +531,10 @@ uint32_t gm107_grhub_code[] = {
0x1f19f014,
0x02300080,
0xbd0001f6,
-/* 0x0491: main */
- 0x0031f404,
- 0x0d0028f4,
+/* 0x0491: wait */
+ 0x0028f404,
+/* 0x0497: main */
+ 0x0d0031f4,
0x00377e10,
0xf401f400,
0x4001e4b1,
@@ -590,7 +591,7 @@ uint32_t gm107_grhub_code[] = {
0x09f60217,
0xf504bd00,
/* 0x056b: main_not_ctx_switch */
- 0xb0ff2a0e,
+ 0xb0ff300e,
0x1bf401e4,
0x7ef2b20c,
0xf4000820,
@@ -612,11 +613,11 @@ uint32_t gm107_grhub_code[] = {
0x7e01f5f0,
0xf50002f8,
/* 0x05b7: main_done */
- 0xbdfede0e,
+ 0xbdfee40e,
0x1f29f024,
0x02300080,
0xbd0002f6,
- 0xcc0ef504,
+ 0xd20ef504,
/* 0x05c9: ih */
0xf900f9fe,
0x0188fe80,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index b0c721616..ae9ab5b1a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -702,6 +702,13 @@ gf100_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
+int
+gf100_gr_rops(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ return (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
+}
+
void
gf100_gr_zbc_init(struct gf100_gr *gr)
{
@@ -1628,32 +1635,12 @@ gf100_gr_oneinit(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
struct nvkm_device *device = gr->base.engine.subdev.device;
- int ret, i, j;
+ int i, j;
nvkm_pmu_pgob(device->pmu, false);
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
- &gr->unk4188b4);
- if (ret)
- return ret;
-
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 256, false,
- &gr->unk4188b8);
- if (ret)
- return ret;
-
- nvkm_kmap(gr->unk4188b4);
- for (i = 0; i < 0x1000; i += 4)
- nvkm_wo32(gr->unk4188b4, i, 0x00000010);
- nvkm_done(gr->unk4188b4);
-
- nvkm_kmap(gr->unk4188b8);
- for (i = 0; i < 0x1000; i += 4)
- nvkm_wo32(gr->unk4188b8, i, 0x00000010);
- nvkm_done(gr->unk4188b8);
-
- gr->rop_nr = (nvkm_rd32(device, 0x409604) & 0x001f0000) >> 16;
- gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
+ gr->rop_nr = gr->func->rops(gr);
+ gr->gpc_nr = nvkm_rd32(device, 0x409604) & 0x0000001f;
for (i = 0; i < gr->gpc_nr; i++) {
gr->tpc_nr[i] = nvkm_rd32(device, GPC_UNIT(i, 0x2608));
gr->tpc_total += gr->tpc_nr[i];
@@ -1670,38 +1657,38 @@ gf100_gr_oneinit(struct nvkm_gr *base)
switch (device->chipset) {
case 0xc0:
if (gr->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
- gr->magic_not_rop_nr = 0x07;
+ gr->screen_tile_row_offset = 0x07;
} else
if (gr->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
- gr->magic_not_rop_nr = 0x05;
+ gr->screen_tile_row_offset = 0x05;
} else
if (gr->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
- gr->magic_not_rop_nr = 0x06;
+ gr->screen_tile_row_offset = 0x06;
}
break;
case 0xc3: /* 450, 4/0/0/0, 2 */
- gr->magic_not_rop_nr = 0x03;
+ gr->screen_tile_row_offset = 0x03;
break;
case 0xc4: /* 460, 3/4/0/0, 4 */
- gr->magic_not_rop_nr = 0x01;
+ gr->screen_tile_row_offset = 0x01;
break;
case 0xc1: /* 2/0/0/0, 1 */
- gr->magic_not_rop_nr = 0x01;
+ gr->screen_tile_row_offset = 0x01;
break;
case 0xc8: /* 4/4/3/4, 5 */
- gr->magic_not_rop_nr = 0x06;
+ gr->screen_tile_row_offset = 0x06;
break;
case 0xce: /* 4/4/0/0, 4 */
- gr->magic_not_rop_nr = 0x03;
+ gr->screen_tile_row_offset = 0x03;
break;
case 0xcf: /* 4/0/0/0, 3 */
- gr->magic_not_rop_nr = 0x03;
+ gr->screen_tile_row_offset = 0x03;
break;
case 0xd7:
case 0xd9: /* 1/0/0/0, 1 */
case 0xea: /* gk20a */
case 0x12b: /* gm20b */
- gr->magic_not_rop_nr = 0x01;
+ gr->screen_tile_row_offset = 0x01;
break;
}
@@ -1748,8 +1735,6 @@ gf100_gr_dtor(struct nvkm_gr *base)
gf100_gr_dtor_init(gr->fuc_sw_ctx);
gf100_gr_dtor_init(gr->fuc_sw_nonctx);
- nvkm_memory_del(&gr->unk4188b8);
- nvkm_memory_del(&gr->unk4188b4);
return gr;
}
@@ -1795,7 +1780,7 @@ gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
func->fecs.ucode == NULL);
- ret = nvkm_gr_ctor(&gf100_gr_, device, index, 0x08001000,
+ ret = nvkm_gr_ctor(&gf100_gr_, device, index,
gr->firmware || func->fecs.ucode != NULL,
&gr->base);
if (ret)
@@ -1834,6 +1819,7 @@ int
gf100_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fb *fb = device->fb;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
@@ -1846,8 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
gf100_gr_mmio(gr, gr->func->mmio);
@@ -1870,9 +1856,9 @@ gf100_gr_init(struct gf100_gr *gr)
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
- gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- gr->tpc_total);
+ gr->tpc_total);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
@@ -1965,6 +1951,7 @@ gf100_gr = {
.mmio = gf100_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.grctx = &gf100_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index f0c6acb0f..2b98abdb9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -31,7 +31,8 @@
#include <subdev/mmu.h>
#define GPC_MAX 32
-#define TPC_MAX (GPC_MAX * 8)
+#define TPC_MAX_PER_GPC 8
+#define TPC_MAX (GPC_MAX * TPC_MAX_PER_GPC)
#define ROP_BCAST(r) (0x408800 + (r))
#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
@@ -100,15 +101,12 @@ struct gf100_gr {
u8 ppc_mask[GPC_MAX];
u8 ppc_tpc_nr[GPC_MAX][4];
- struct nvkm_memory *unk4188b4;
- struct nvkm_memory *unk4188b8;
-
struct gf100_gr_data mmio_data[4];
struct gf100_gr_mmio mmio_list[4096/8];
u32 size;
u32 *data;
- u8 magic_not_rop_nr;
+ u8 screen_tile_row_offset;
};
int gf100_gr_ctor(const struct gf100_gr_func *, struct nvkm_device *,
@@ -121,6 +119,8 @@ struct gf100_gr_func {
void (*dtor)(struct gf100_gr *);
int (*init)(struct gf100_gr *);
void (*init_gpc_mmu)(struct gf100_gr *);
+ void (*init_rop_active_fbps)(struct gf100_gr *);
+ void (*init_ppc_exceptions)(struct gf100_gr *);
void (*set_hww_esr_report_mask)(struct gf100_gr *);
const struct gf100_gr_pack *mmio;
struct {
@@ -129,18 +129,23 @@ struct gf100_gr_func {
struct {
struct gf100_gr_ucode *ucode;
} gpccs;
+ int (*rops)(struct gf100_gr *);
int ppc_nr;
const struct gf100_grctx_func *grctx;
struct nvkm_sclass sclass[];
};
int gf100_gr_init(struct gf100_gr *);
+int gf100_gr_rops(struct gf100_gr *);
int gk104_gr_init(struct gf100_gr *);
+void gk104_gr_init_rop_active_fbps(struct gf100_gr *);
+void gk104_gr_init_ppc_exceptions(struct gf100_gr *);
int gk20a_gr_init(struct gf100_gr *);
int gm200_gr_init(struct gf100_gr *);
+int gm200_gr_rops(struct gf100_gr *);
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
index 8f253e0a2..d736dcd55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf104.c
@@ -118,6 +118,7 @@ gf104_gr = {
.mmio = gf104_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.grctx = &gf104_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
index 815a5aafa..2f0d24498 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf108.c
@@ -109,6 +109,7 @@ gf108_gr = {
.mmio = gf108_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.grctx = &gf108_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
index d081ee41f..d1d942eb8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf110.c
@@ -90,6 +90,7 @@ gf110_gr = {
.mmio = gf110_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.grctx = &gf110_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
index d8e8af4d3..70335f65c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf117.c
@@ -126,6 +126,7 @@ gf117_gr = {
.mmio = gf117_gr_pack_mmio,
.fecs.ucode = &gf117_gr_fecs_ucode,
.gpccs.ucode = &gf117_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.ppc_nr = 1,
.grctx = &gf117_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
index 01faf9a73..8d8e4cafe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf119.c
@@ -181,6 +181,7 @@ gf119_gr = {
.mmio = gf119_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.grctx = &gf119_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index abf54928a..ec22da6c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
+#include <subdev/fb.h>
+
#include <nvif/class.h>
/*******************************************************************************
@@ -177,10 +179,35 @@ gk104_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
+void
+gk104_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 fbp_count = nvkm_rd32(device, 0x120074);
+ nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+void
+gk104_gr_init_ppc_exceptions(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ int gpc, ppc;
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++) {
+ if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+ continue;
+ nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
+ }
+ }
+}
+
int
gk104_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fb *fb = device->fb;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
@@ -193,8 +220,8 @@ gk104_gr_init(struct gf100_gr *gr)
nvkm_wr32(device, GPC_BCAST(0x088c), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
gf100_gr_mmio(gr, gr->func->mmio);
@@ -218,15 +245,17 @@ gk104_gr_init(struct gf100_gr *gr)
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
- gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- gr->tpc_total);
+ gr->tpc_total);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ gr->func->init_rop_active_fbps(gr);
+
nvkm_wr32(device, 0x400500, 0x00010001);
nvkm_wr32(device, 0x400100, 0xffffffff);
@@ -246,8 +275,9 @@ gk104_gr_init(struct gf100_gr *gr)
nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
nvkm_mask(device, 0x419eb4, 0x00001000, 0x00001000);
+ gr->func->init_ppc_exceptions(gr);
+
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
- nvkm_wr32(device, GPC_UNIT(gpc, 0x3038), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
@@ -309,9 +339,12 @@ gk104_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk104_gr = {
.init = gk104_gr_init,
+ .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk104_gr_pack_mmio,
.fecs.ucode = &gk104_gr_fecs_ucode,
.gpccs.ucode = &gk104_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.ppc_nr = 1,
.grctx = &gk104_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index 32aa2946e..f31b171a4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -183,9 +183,12 @@ gk110_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk110_gr = {
.init = gk104_gr_init,
+ .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk110_gr_pack_mmio,
.fecs.ucode = &gk110_gr_fecs_ucode,
.gpccs.ucode = &gk110_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.ppc_nr = 2,
.grctx = &gk110_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
index 22f88afbf..d76dd1780 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110b.c
@@ -103,9 +103,12 @@ gk110b_gr_pack_mmio[] = {
static const struct gf100_gr_func
gk110b_gr = {
.init = gk104_gr_init,
+ .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk110b_gr_pack_mmio,
.fecs.ucode = &gk110_gr_fecs_ucode,
.gpccs.ucode = &gk110_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.ppc_nr = 2,
.grctx = &gk110b_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
index ee7554fc8..14bbe6ed0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk208.c
@@ -162,9 +162,12 @@ gk208_gr_gpccs_ucode = {
static const struct gf100_gr_func
gk208_gr = {
.init = gk104_gr_init,
+ .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gk208_gr_pack_mmio,
.fecs.ucode = &gk208_gr_fecs_ucode,
.gpccs.ucode = &gk208_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.ppc_nr = 1,
.grctx = &gk208_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 7ffb8a626..4ca8ed151 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -239,9 +239,6 @@ gk20a_gr_init(struct gf100_gr *gr)
return ret;
/* MMU debug buffer */
- nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
- nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
-
if (gr->func->init_gpc_mmu)
gr->func->init_gpc_mmu(gr);
@@ -267,7 +264,7 @@ gk20a_gr_init(struct gf100_gr *gr)
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
- gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
gr->tpc_total);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
@@ -275,6 +272,8 @@ gk20a_gr_init(struct gf100_gr *gr)
nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
+ gr->func->init_rop_active_fbps(gr);
+
/* Enable FIFO access */
nvkm_wr32(device, 0x400500, 0x00010001);
@@ -312,7 +311,9 @@ gk20a_gr_init(struct gf100_gr *gr)
static const struct gf100_gr_func
gk20a_gr = {
.init = gk20a_gr_init,
+ .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,
+ .rops = gf100_gr_rops,
.ppc_nr = 1,
.grctx = &gk20a_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
index 56e960212..45f965f60 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm107.c
@@ -26,6 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/P0260.h>
+#include <subdev/fb.h>
#include <nvif/class.h>
@@ -311,17 +312,18 @@ int
gm107_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_fb *fb = device->fb;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
- int gpc, tpc, ppc, rop;
+ int gpc, tpc, rop;
int i;
nvkm_wr32(device, GPC_BCAST(0x0880), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0890), 0x00000000);
nvkm_wr32(device, GPC_BCAST(0x0894), 0x00000000);
- nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(gr->unk4188b4) >> 8);
- nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(gr->unk4188b8) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b4), nvkm_memory_addr(fb->mmu_wr) >> 8);
+ nvkm_wr32(device, GPC_BCAST(0x08b8), nvkm_memory_addr(fb->mmu_rd) >> 8);
gf100_gr_mmio(gr, gr->func->mmio);
@@ -347,15 +349,17 @@ gm107_gr_init(struct gf100_gr *gr)
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
- gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- gr->tpc_total);
+ gr->tpc_total);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ gr->func->init_rop_active_fbps(gr);
+
nvkm_wr32(device, 0x400500, 0x00010001);
nvkm_wr32(device, 0x400100, 0xffffffff);
@@ -373,9 +377,9 @@ gm107_gr_init(struct gf100_gr *gr)
nvkm_wr32(device, 0x405844, 0x00ffffff);
nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+ gr->func->init_ppc_exceptions(gr);
+
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
- for (ppc = 0; ppc < 2 /* gr->ppc_nr[gpc] */; ppc++)
- nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
@@ -438,9 +442,12 @@ gm107_gr_gpccs_ucode = {
static const struct gf100_gr_func
gm107_gr = {
.init = gm107_gr_init,
+ .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.mmio = gm107_gr_pack_mmio,
.fecs.ucode = &gm107_gr_fecs_ucode,
.gpccs.ucode = &gm107_gr_gpccs_ucode,
+ .rops = gf100_gr_rops,
.ppc_nr = 2,
.grctx = &gm107_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 058fc1d22..4dfa4513b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -33,27 +33,45 @@
******************************************************************************/
int
+gm200_gr_rops(struct gf100_gr *gr)
+{
+ return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
+}
+
+static void
+gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, 0x418880, nvkm_rd32(device, 0x100c80) & 0xf0001fff);
+ nvkm_wr32(device, 0x418890, 0x00000000);
+ nvkm_wr32(device, 0x418894, 0x00000000);
+
+ nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8));
+ nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc));
+ nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4));
+}
+
+static void
+gm200_gr_init_rop_active_fbps(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ const u32 fbp_count = nvkm_rd32(device, 0x12006c);
+ nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
+ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
+}
+
+int
gm200_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
- u32 data[TPC_MAX / 8] = {}, tmp;
+ u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
- int gpc, tpc, ppc, rop;
+ int gpc, tpc, rop;
int i;
- tmp = nvkm_rd32(device, 0x100c80); /*XXX: mask? */
- nvkm_wr32(device, 0x418880, 0x00001000 | (tmp & 0x00000fff));
- nvkm_wr32(device, 0x418890, 0x00000000);
- nvkm_wr32(device, 0x418894, 0x00000000);
- nvkm_wr32(device, 0x4188b4, nvkm_memory_addr(gr->unk4188b4) >> 8);
- nvkm_wr32(device, 0x4188b8, nvkm_memory_addr(gr->unk4188b8) >> 8);
- nvkm_mask(device, 0x4188b0, 0x00040000, 0x00040000);
-
- /*XXX: belongs in fb */
- nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(gr->unk4188b4) >> 8);
- nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(gr->unk4188b8) >> 8);
- nvkm_mask(device, 0x100cc4, 0x00040000, 0x00040000);
+ gr->func->init_gpc_mmu(gr);
gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
@@ -79,9 +97,9 @@ gm200_gr_init(struct gf100_gr *gr)
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
- gr->magic_not_rop_nr << 8 | gr->tpc_nr[gpc]);
+ gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
- gr->tpc_total);
+ gr->tpc_total);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
@@ -89,6 +107,8 @@ gm200_gr_init(struct gf100_gr *gr)
nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+ gr->func->init_rop_active_fbps(gr);
+
nvkm_wr32(device, 0x400500, 0x00010001);
nvkm_wr32(device, 0x400100, 0xffffffff);
nvkm_wr32(device, 0x40013c, 0xffffffff);
@@ -106,9 +126,9 @@ gm200_gr_init(struct gf100_gr *gr)
nvkm_wr32(device, 0x405844, 0x00ffffff);
nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
+ gr->func->init_ppc_exceptions(gr);
+
for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
- for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++)
- nvkm_wr32(device, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
@@ -189,6 +209,10 @@ gm200_gr_new_(const struct gf100_gr_func *func, struct nvkm_device *device,
static const struct gf100_gr_func
gm200_gr = {
.init = gm200_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gm200_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .rops = gm200_gr_rops,
.ppc_nr = 2,
.grctx = &gm200_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
index 29732bc14..69479af1d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm20b.c
@@ -42,7 +42,7 @@ gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
}
val = nvkm_rd32(device, 0x100c80);
- val &= 0xf000087f;
+ val &= 0xf000187f;
nvkm_wr32(device, 0x418880, val);
nvkm_wr32(device, 0x418890, 0);
nvkm_wr32(device, 0x418894, 0);
@@ -66,7 +66,9 @@ static const struct gf100_gr_func
gm20b_gr = {
.init = gk20a_gr_init,
.init_gpc_mmu = gm20b_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gk104_gr_init_rop_active_fbps,
.set_hww_esr_report_mask = gm20b_gr_set_hww_esr_report_mask,
+ .rops = gm200_gr_rops,
.ppc_nr = 1,
.grctx = &gm20b_grctx,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
index 85c5b7fea..9c2e985dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv04.c
@@ -1422,6 +1422,5 @@ nv04_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(&nv04_gr, device, index, 0x00001000,
- true, &gr->base);
+ return nvkm_gr_ctor(&nv04_gr, device, index, true, &gr->base);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 4542867fa..4ebbfbdd8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -1182,7 +1182,7 @@ nv10_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+ return nvkm_gr_ctor(func, device, index, true, &gr->base);
}
static const struct nvkm_gr_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index 5caef65d3..d1dc92999 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -337,7 +337,7 @@ nv20_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
return -ENOMEM;
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+ return nvkm_gr_ctor(func, device, index, true, &gr->base);
}
static const struct nvkm_gr_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index 05a895496..5f1ad8344 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -438,7 +438,7 @@ nv40_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
*pgr = &gr->base;
INIT_LIST_HEAD(&gr->chan);
- return nvkm_gr_ctor(func, device, index, 0x00001000, true, &gr->base);
+ return nvkm_gr_ctor(func, device, index, true, &gr->base);
}
static const struct nvkm_gr_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
index b19b912d5..fca67de43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c
@@ -768,7 +768,7 @@ nv50_gr_new_(const struct nvkm_gr_func *func, struct nvkm_device *device,
spin_lock_init(&gr->lock);
*pgr = &gr->base;
- return nvkm_gr_ctor(func, device, index, 0x00201000, true, &gr->base);
+ return nvkm_gr_ctor(func, device, index, true, &gr->base);
}
static const struct nvkm_gr_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index a234590be..d8adcdf69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -7,8 +7,7 @@ struct nvkm_fb_tile;
struct nvkm_fifo_chan;
int nvkm_gr_ctor(const struct nvkm_gr_func *, struct nvkm_device *,
- int index, u32 pmc_enable, bool enable,
- struct nvkm_gr *);
+ int index, bool enable, struct nvkm_gr *);
bool nv04_gr_idle(struct nvkm_gr *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
index 34ff0014a..c0e11a071 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/g84.c
@@ -39,6 +39,5 @@ g84_mpeg = {
int
g84_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
{
- return nvkm_engine_new_(&g84_mpeg, device, index, 0x00000002,
- true, pmpeg);
+ return nvkm_engine_new_(&g84_mpeg, device, index, true, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index d4d8942b1..003ac915e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -278,7 +278,7 @@ nv31_mpeg_new_(const struct nv31_mpeg_func *func, struct nvkm_device *device,
mpeg->func = func;
*pmpeg = &mpeg->engine;
- return nvkm_engine_ctor(&nv31_mpeg_, device, index, 0x00000002,
+ return nvkm_engine_ctor(&nv31_mpeg_, device, index,
true, &mpeg->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index d433cfa4a..e536f37e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -212,6 +212,5 @@ nv44_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
INIT_LIST_HEAD(&mpeg->chan);
*pmpeg = &mpeg->engine;
- return nvkm_engine_ctor(&nv44_mpeg, device, index, 0x00000002,
- true, &mpeg->engine);
+ return nvkm_engine_ctor(&nv44_mpeg, device, index, true, &mpeg->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
index c3a85dffc..4e528851e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv50.c
@@ -130,6 +130,5 @@ nv50_mpeg = {
int
nv50_mpeg_new(struct nvkm_device *device, int index, struct nvkm_engine **pmpeg)
{
- return nvkm_engine_new_(&nv50_mpeg, device, index, 0x00400002,
- true, pmpeg);
+ return nvkm_engine_new_(&nv50_mpeg, device, index, true, pmpeg);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
index 1f1a99e92..f30cf1dcf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/g98.c
@@ -35,7 +35,6 @@ g98_mspdec_init(struct nvkm_falcon *mspdec)
static const struct nvkm_falcon_func
g98_mspdec = {
- .pmc_enable = 0x01020000,
.init = g98_mspdec_init,
.sclass = {
{ -1, -1, G98_MSPDEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
index 371fd6c3c..cfe1aa81b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gf100.c
@@ -35,7 +35,6 @@ gf100_mspdec_init(struct nvkm_falcon *mspdec)
static const struct nvkm_falcon_func
gf100_mspdec = {
- .pmc_enable = 0x00020000,
.init = gf100_mspdec_init,
.sclass = {
{ -1, -1, GF100_MSPDEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
index de804a15b..24272b492 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gk104.c
@@ -27,7 +27,6 @@
static const struct nvkm_falcon_func
gk104_mspdec = {
- .pmc_enable = 0x00020000,
.init = gf100_mspdec_init,
.sclass = {
{ -1, -1, GK104_MSPDEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
index 835631713..cf6e59ad6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/gt215.c
@@ -27,7 +27,6 @@
static const struct nvkm_falcon_func
gt215_mspdec = {
- .pmc_enable = 0x01020000,
.init = g98_mspdec_init,
.sclass = {
{ -1, -1, GT212_MSPDEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
index 73f633ae2..c45dbf79d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/g98.c
@@ -35,7 +35,6 @@ g98_msppp_init(struct nvkm_falcon *msppp)
static const struct nvkm_falcon_func
g98_msppp = {
- .pmc_enable = 0x00400002,
.init = g98_msppp_init,
.sclass = {
{ -1, -1, G98_MSPPP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
index c42c0c07e..803c62ab5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gf100.c
@@ -35,7 +35,6 @@ gf100_msppp_init(struct nvkm_falcon *msppp)
static const struct nvkm_falcon_func
gf100_msppp = {
- .pmc_enable = 0x00000002,
.init = gf100_msppp_init,
.sclass = {
{ -1, -1, GF100_MSPPP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
index 00e7795f1..49cbf72ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/gt215.c
@@ -27,7 +27,6 @@
static const struct nvkm_falcon_func
gt215_msppp = {
- .pmc_enable = 0x00400002,
.init = g98_msppp_init,
.sclass = {
{ -1, -1, GT212_MSPPP },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
index 47e2929bf..4a2a9f049 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/g98.c
@@ -35,7 +35,6 @@ g98_msvld_init(struct nvkm_falcon *msvld)
static const struct nvkm_falcon_func
g98_msvld = {
- .pmc_enable = 0x04008000,
.init = g98_msvld_init,
.sclass = {
{ -1, -1, G98_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
index 1ac581ba9..1695e532c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gf100.c
@@ -35,7 +35,6 @@ gf100_msvld_init(struct nvkm_falcon *msvld)
static const struct nvkm_falcon_func
gf100_msvld = {
- .pmc_enable = 0x00008000,
.init = gf100_msvld_init,
.sclass = {
{ -1, -1, GF100_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
index 4bba16e0f..b640cd63e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gk104.c
@@ -27,7 +27,6 @@
static const struct nvkm_falcon_func
gk104_msvld = {
- .pmc_enable = 0x00008000,
.init = gf100_msvld_init,
.sclass = {
{ -1, -1, GK104_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
index e17cb5605..201e8ef35 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/gt215.c
@@ -27,7 +27,6 @@
static const struct nvkm_falcon_func
gt215_msvld = {
- .pmc_enable = 0x04008000,
.init = g98_msvld_init,
.sclass = {
{ -1, -1, GT212_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
index 511800f6a..a0f540ef2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/mcp89.c
@@ -27,7 +27,6 @@
static const struct nvkm_falcon_func
mcp89_msvld = {
- .pmc_enable = 0x04008000,
.init = g98_msvld_init,
.sclass = {
{ -1, -1, IGT21A_MSVLD },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index f19fabef8..8616636ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -863,5 +863,5 @@ nvkm_pm_ctor(const struct nvkm_pm_func *func, struct nvkm_device *device,
pm->func = func;
INIT_LIST_HEAD(&pm->domains);
INIT_LIST_HEAD(&pm->sources);
- return nvkm_engine_ctor(&nvkm_pm, device, index, 0, true, &pm->engine);
+ return nvkm_engine_ctor(&nvkm_pm, device, index, true, &pm->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
index 995c2c5ec..6d2a7f0af 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c
@@ -66,7 +66,6 @@ g98_sec = {
.code.size = sizeof(g98_sec_code),
.data.data = g98_sec_data,
.data.size = sizeof(g98_sec_data),
- .pmc_enable = 0x00004000,
.intr = g98_sec_intr,
.sclass = {
{ -1, -1, G98_SEC },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
index 53c1f7e75..7be3198e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c
@@ -106,5 +106,5 @@ nvkm_sw_new_(const struct nvkm_sw_func *func, struct nvkm_device *device,
INIT_LIST_HEAD(&sw->chan);
sw->func = func;
- return nvkm_engine_ctor(&nvkm_sw, device, index, 0, true, &sw->engine);
+ return nvkm_engine_ctor(&nvkm_sw, device, index, true, &sw->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
index 4188c77ac..7a9617878 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/vp/g84.c
@@ -27,7 +27,6 @@
static const struct nvkm_xtensa_func
g84_vp = {
- .pmc_enable = 0x01020000,
.fifo_val = 0x111,
.unkd28 = 0x9c544,
.sclass = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
index 3692c1694..6a00ac0c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/xtensa.c
@@ -187,6 +187,6 @@ nvkm_xtensa_new_(const struct nvkm_xtensa_func *func,
xtensa->addr = addr;
*pengine = &xtensa->engine;
- return nvkm_engine_ctor(&nvkm_xtensa, device, index, func->pmc_enable,
+ return nvkm_engine_ctor(&nvkm_xtensa, device, index,
enable, &xtensa->engine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
index 642d27dc9..3f5d38d74 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/Kbuild
@@ -19,4 +19,5 @@ include $(src)/nvkm/subdev/pmu/Kbuild
include $(src)/nvkm/subdev/secboot/Kbuild
include $(src)/nvkm/subdev/therm/Kbuild
include $(src)/nvkm/subdev/timer/Kbuild
+include $(src)/nvkm/subdev/top/Kbuild
include $(src)/nvkm/subdev/volt/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index a9433ad45..c561d148c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -77,7 +77,7 @@ void
nvkm_bar_ctor(const struct nvkm_bar_func *func, struct nvkm_device *device,
int index, struct nvkm_bar *bar)
{
- nvkm_subdev_ctor(&nvkm_bar, device, index, 0, &bar->subdev);
+ nvkm_subdev_ctor(&nvkm_bar, device, index, &bar->subdev);
bar->func = func;
spin_lock_init(&bar->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index 79536897e..e15b9627b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -105,7 +105,7 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_bios, device, index, 0, &bios->subdev);
+ nvkm_subdev_ctor(&nvkm_bios, device, index, &bios->subdev);
ret = nvbios_shadow(bios);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index 125ec2ed6..91a7dc56e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -81,9 +81,11 @@ static u16
pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
{
struct bit_entry bit_C;
+ u16 data = 0x0000;
- if (!bit_entry(bios, 'C', &bit_C) && bit_C.length >= 10) {
- u16 data = nvbios_rd16(bios, bit_C.offset + 8);
+ if (!bit_entry(bios, 'C', &bit_C)) {
+ if (bit_C.version == 1 && bit_C.length >= 10)
+ data = nvbios_rd16(bios, bit_C.offset + 8);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
*hdr = nvbios_rd08(bios, data + 1);
@@ -94,7 +96,7 @@ pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
if (bmp_version(bios) >= 0x0524) {
- u16 data = nvbios_rd16(bios, bios->bmp_offset + 142);
+ data = nvbios_rd16(bios, bios->bmp_offset + 142);
if (data) {
*ver = nvbios_rd08(bios, data + 0);
*hdr = 1;
@@ -105,7 +107,7 @@ pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
}
*ver = 0x00;
- return 0x0000;
+ return data;
}
static struct pll_mapping *
@@ -156,7 +158,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
}
map = pll_map(bios);
- while (map->reg) {
+ while (map && map->reg) {
if (map->reg == reg && *ver >= 0x20) {
u16 addr = (data += hdr);
*type = map->type;
@@ -198,7 +200,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
}
map = pll_map(bios);
- while (map->reg) {
+ while (map && map->reg) {
if (map->type == type && *ver >= 0x20) {
u16 addr = (data += hdr);
*reg = map->reg;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
index dc5a10f18..52ad73bce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/base.c
@@ -58,7 +58,7 @@ nvkm_bus_new_(const struct nvkm_bus_func *func, struct nvkm_device *device,
struct nvkm_bus *bus;
if (!(bus = *pbus = kzalloc(sizeof(*bus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_bus, device, index, 0, &bus->subdev);
+ nvkm_subdev_ctor(&nvkm_bus, device, index, &bus->subdev);
bus->func = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
index 889cce2eb..7102c2532 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/base.c
@@ -564,7 +564,7 @@ nvkm_clk_ctor(const struct nvkm_clk_func *func, struct nvkm_device *device,
int ret, idx, arglen;
const char *mode;
- nvkm_subdev_ctor(&nvkm_clk, device, index, 0, &clk->subdev);
+ nvkm_subdev_ctor(&nvkm_clk, device, index, &clk->subdev);
clk->func = func;
INIT_LIST_HEAD(&clk->states);
clk->domains = func->domains;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index 5f25402f6..4756019dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -83,6 +83,12 @@ nvkm_devinit_preinit(struct nvkm_subdev *subdev)
if (init->func->preinit)
init->func->preinit(init);
+ /* Override the post flag during the first call if NvForcePost is set */
+ if (init->force_post) {
+ init->post = init->force_post;
+ init->force_post = false;
+ }
+
/* unlock the extended vga crtc regs */
nvkm_lockvgac(subdev->device, false);
return 0;
@@ -124,7 +130,7 @@ nvkm_devinit_ctor(const struct nvkm_devinit_func *func,
struct nvkm_device *device, int index,
struct nvkm_devinit *init)
{
- nvkm_subdev_ctor(&nvkm_devinit, device, index, 0, &init->subdev);
+ nvkm_subdev_ctor(&nvkm_devinit, device, index, &init->subdev);
init->func = func;
- init->post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
+ init->force_post = nvkm_boolopt(device->cfgopt, "NvForcePost", false);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 2923598b5..8b1b34c3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -97,9 +97,11 @@ gf100_devinit_preinit(struct nvkm_devinit *base)
struct nvkm_subdev *subdev = &init->base.subdev;
struct nvkm_device *device = subdev->device;
- /* This bit is set by devinit, and flips back to 0 on suspend */
- if (!base->post)
- base->post = ((nvkm_rd32(device, 0x2240c) & BIT(1)) == 0);
+ /*
+ * This bit is set by devinit, and flips back to 0 on suspend. We
+ * can use it as a reliable way to know whether we should run devinit.
+ */
+ base->post = ((nvkm_rd32(device, 0x2240c) & BIT(1)) == 0);
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 08105701a..842d5de96 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -23,6 +23,7 @@ nvkm-y += nvkm/subdev/fb/gf100.o
nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
+nvkm-y += nvkm/subdev/fb/gm200.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index a719b9bec..ce90242b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -24,6 +24,7 @@
#include "priv.h"
#include "ram.h"
+#include <core/memory.h>
#include <subdev/bios.h>
#include <subdev/bios/M0203.h>
#include <engine/gr.h>
@@ -98,6 +99,7 @@ static int
nvkm_fb_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_fb *fb = nvkm_fb(subdev);
+
if (fb->func->ram_new) {
int ret = fb->func->ram_new(fb, &fb->ram);
if (ret) {
@@ -105,6 +107,13 @@ nvkm_fb_oneinit(struct nvkm_subdev *subdev)
return ret;
}
}
+
+ if (fb->func->oneinit) {
+ int ret = fb->func->oneinit(fb);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -134,6 +143,9 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
struct nvkm_fb *fb = nvkm_fb(subdev);
int i;
+ nvkm_memory_del(&fb->mmu_wr);
+ nvkm_memory_del(&fb->mmu_rd);
+
for (i = 0; i < fb->tile.regions; i++)
fb->func->tile.fini(fb, i, &fb->tile.region[i]);
@@ -156,7 +168,7 @@ void
nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
int index, struct nvkm_fb *fb)
{
- nvkm_subdev_ctor(&nvkm_fb, device, index, 0, &fb->subdev);
+ nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 008bb9849..e649ead5c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -24,6 +24,9 @@
#include "gf100.h"
#include "ram.h"
+#include <core/memory.h>
+#include <core/option.h>
+
extern const u8 gf100_pte_storage_type_map[256];
bool
@@ -46,6 +49,28 @@ gf100_fb_intr(struct nvkm_fb *base)
nvkm_debug(subdev, "PBFB intr\n");
}
+int
+gf100_fb_oneinit(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ int ret, size = 0x1000;
+
+ size = nvkm_longopt(device->cfgopt, "MmuDebugBufferSize", size);
+ size = min(size, 0x1000);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
+ false, &fb->mmu_rd);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000,
+ false, &fb->mmu_wr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
void
gf100_fb_init(struct nvkm_fb *base)
{
@@ -98,6 +123,7 @@ gf100_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
static const struct nvkm_fb_func
gf100_fb = {
.dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.intr = gf100_fb_intr,
.ram_new = gf100_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 0edb3c316..b41f0f700 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -27,6 +27,7 @@
static const struct nvkm_fb_func
gk104_fb = {
.dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 81447eb4c..7306f7dfc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -21,15 +21,20 @@
*/
#include "priv.h"
+#include <core/memory.h>
+
static void
gk20a_fb_init(struct nvkm_fb *fb)
{
struct nvkm_device *device = fb->subdev.device;
nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
}
static const struct nvkm_fb_func
gk20a_fb = {
+ .oneinit = gf100_fb_oneinit,
.init = gk20a_fb_init,
.memtype_valid = gf100_fb_memtype_valid,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 2a91df865..4869fdb75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -27,6 +27,7 @@
static const struct nvkm_fb_func
gm107_fb = {
.dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
.init = gf100_fb_init,
.intr = gf100_fb_intr,
.ram_new = gm107_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
new file mode 100644
index 000000000..44f5716f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "gf100.h"
+#include "ram.h"
+
+#include <core/memory.h>
+
+static void
+gm200_fb_init(struct nvkm_fb *base)
+{
+ struct gf100_fb *fb = gf100_fb(base);
+ struct nvkm_device *device = fb->base.subdev.device;
+
+ if (fb->r100c10_page)
+ nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+ nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
+
+ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
+ nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
+ nvkm_mask(device, 0x100cc4, 0x00060000,
+ min(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
+}
+
+static const struct nvkm_fb_func
+gm200_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .intr = gf100_fb_intr,
+ .ram_new = gm107_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gm200_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gm200_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 62b9feb53..d97d640e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -6,6 +6,7 @@ struct nvkm_bios;
struct nvkm_fb_func {
void *(*dtor)(struct nvkm_fb *);
+ int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
@@ -58,5 +59,6 @@ void nv44_fb_tile_prog(struct nvkm_fb *, int, struct nvkm_fb_tile *);
void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nvkm_fb_tile *);
+int gf100_fb_oneinit(struct nvkm_fb *);
bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
index f4144979a..1c3c18ea8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/base.c
@@ -47,7 +47,7 @@ nvkm_fuse_new_(const struct nvkm_fuse_func *func, struct nvkm_device *device,
struct nvkm_fuse *fuse;
if (!(fuse = *pfuse = kzalloc(sizeof(*fuse), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_fuse, device, index, 0, &fuse->subdev);
+ nvkm_subdev_ctor(&nvkm_fuse, device, index, &fuse->subdev);
fuse->func = func;
spin_lock_init(&fuse->lock);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index d45ec99f0..77c649723 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -216,7 +216,7 @@ nvkm_gpio_new_(const struct nvkm_gpio_func *func, struct nvkm_device *device,
if (!(gpio = *pgpio = kzalloc(sizeof(*gpio), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_gpio, device, index, 0, &gpio->subdev);
+ nvkm_subdev_ctor(&nvkm_gpio, device, index, &gpio->subdev);
gpio->func = func;
return nvkm_event_init(&nvkm_gpio_intr_func, 2, func->lines,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 243a71ff0..4f197b15a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -254,7 +254,7 @@ nvkm_i2c_new_(const struct nvkm_i2c_func *func, struct nvkm_device *device,
if (!(i2c = *pi2c = kzalloc(sizeof(*i2c), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_i2c, device, index, 0, &i2c->subdev);
+ nvkm_subdev_ctor(&nvkm_i2c, device, index, &i2c->subdev);
i2c->func = func;
INIT_LIST_HEAD(&i2c->pad);
INIT_LIST_HEAD(&i2c->bus);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 72d6330d2..2c6b374f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -117,6 +117,6 @@ gf100_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev *ibus;
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&gf100_ibus, device, index, 0, ibus);
+ nvkm_subdev_ctor(&gf100_ibus, device, index, ibus);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
index f69f263c5..3905a80da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
@@ -46,6 +46,6 @@ gf117_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev *ibus;
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&gf117_ibus, device, index, 0, ibus);
+ nvkm_subdev_ctor(&gf117_ibus, device, index, ibus);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index b5cee3f89..c673853f3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -120,6 +120,6 @@ gk104_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev *ibus;
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&gk104_ibus, device, index, 0, ibus);
+ nvkm_subdev_ctor(&gk104_ibus, device, index, ibus);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 3484079e8..b7159b338 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -84,6 +84,6 @@ gk20a_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev *ibus;
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&gk20a_ibus, device, index, 0, ibus);
+ nvkm_subdev_ctor(&gk20a_ibus, device, index, ibus);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
index ef0b7f3b1..c63328152 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
@@ -35,6 +35,6 @@ gm200_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev *ibus;
if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&gm200_ibus, device, index, 0, ibus);
+ nvkm_subdev_ctor(&gm200_ibus, device, index, ibus);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
index c44a85228..41bd5d0f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
@@ -30,15 +30,14 @@
static bool
nvkm_iccsense_validate_device(struct i2c_adapter *i2c, u8 addr,
- enum nvbios_extdev_type type, u8 rail)
+ enum nvbios_extdev_type type)
{
switch (type) {
case NVBIOS_EXTDEV_INA209:
case NVBIOS_EXTDEV_INA219:
- return rail == 0 && nv_rd16i2cr(i2c, addr, 0x0) >= 0;
+ return nv_rd16i2cr(i2c, addr, 0x0) >= 0;
case NVBIOS_EXTDEV_INA3221:
- return rail <= 3 &&
- nv_rd16i2cr(i2c, addr, 0xff) == 0x3220 &&
+ return nv_rd16i2cr(i2c, addr, 0xff) == 0x3220 &&
nv_rd16i2cr(i2c, addr, 0xfe) == 0x5449;
default:
return false;
@@ -67,8 +66,9 @@ nvkm_iccsense_ina2x9_read(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_rail *rail,
u8 shunt_reg, u8 bus_reg)
{
- return nvkm_iccsense_poll_lane(rail->i2c, rail->addr, shunt_reg, 0,
- bus_reg, 3, rail->mohm, 10 * 4);
+ return nvkm_iccsense_poll_lane(rail->sensor->i2c, rail->sensor->addr,
+ shunt_reg, 0, bus_reg, 3, rail->mohm,
+ 10 * 4);
}
static int
@@ -89,37 +89,87 @@ static int
nvkm_iccsense_ina3221_read(struct nvkm_iccsense *iccsense,
struct nvkm_iccsense_rail *rail)
{
- return nvkm_iccsense_poll_lane(rail->i2c, rail->addr,
- 1 + (rail->rail * 2), 3,
- 2 + (rail->rail * 2), 3, rail->mohm,
+ return nvkm_iccsense_poll_lane(rail->sensor->i2c, rail->sensor->addr,
+ 1 + (rail->idx * 2), 3,
+ 2 + (rail->idx * 2), 3, rail->mohm,
40 * 8);
}
-int
-nvkm_iccsense_read(struct nvkm_iccsense *iccsense, u8 idx)
+static void
+nvkm_iccsense_ina209_config(struct nvkm_iccsense *iccsense,
+ struct nvkm_iccsense_sensor *sensor)
{
- struct nvkm_iccsense_rail *rail;
-
- if (!iccsense || idx >= iccsense->rail_count)
- return -EINVAL;
+ struct nvkm_subdev *subdev = &iccsense->subdev;
+ /* configuration:
+ * 0x0007: 0x0007 shunt and bus continous
+ * 0x0078: 0x0078 128 samples shunt
+ * 0x0780: 0x0780 128 samples bus
+ * 0x1800: 0x0000 +-40 mV shunt range
+ * 0x2000: 0x0000 16V FSR
+ */
+ u16 value = 0x07ff;
+ nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
+ nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
+}
- rail = &iccsense->rails[idx];
- if (!rail->read)
- return -ENODEV;
+static void
+nvkm_iccsense_ina3221_config(struct nvkm_iccsense *iccsense,
+ struct nvkm_iccsense_sensor *sensor)
+{
+ struct nvkm_subdev *subdev = &iccsense->subdev;
+ /* configuration:
+ * 0x0007: 0x0007 shunt and bus continous
+ * 0x0031: 0x0000 140 us conversion time shunt
+ * 0x01c0: 0x0000 140 us conversion time bus
+ * 0x0f00: 0x0f00 1024 samples
+ * 0x7000: 0x?000 channels
+ */
+ u16 value = 0x0e07;
+ if (sensor->rail_mask & 0x1)
+ value |= 0x1 << 14;
+ if (sensor->rail_mask & 0x2)
+ value |= 0x1 << 13;
+ if (sensor->rail_mask & 0x4)
+ value |= 0x1 << 12;
+ nvkm_debug(subdev, "config for sensor id %i: 0x%x\n", sensor->id, value);
+ nv_wr16i2cr(sensor->i2c, sensor->addr, 0x00, value);
+}
- return rail->read(iccsense, rail);
+static void
+nvkm_iccsense_sensor_config(struct nvkm_iccsense *iccsense,
+ struct nvkm_iccsense_sensor *sensor)
+{
+ switch (sensor->type) {
+ case NVBIOS_EXTDEV_INA209:
+ case NVBIOS_EXTDEV_INA219:
+ nvkm_iccsense_ina209_config(iccsense, sensor);
+ break;
+ case NVBIOS_EXTDEV_INA3221:
+ nvkm_iccsense_ina3221_config(iccsense, sensor);
+ break;
+ default:
+ break;
+ }
}
int
nvkm_iccsense_read_all(struct nvkm_iccsense *iccsense)
{
- int result = 0, i;
- for (i = 0; i < iccsense->rail_count; ++i) {
- int res = nvkm_iccsense_read(iccsense, i);
- if (res >= 0)
- result += res;
- else
+ int result = 0;
+ struct nvkm_iccsense_rail *rail;
+
+ if (!iccsense)
+ return -EINVAL;
+
+ list_for_each_entry(rail, &iccsense->rails, head) {
+ int res;
+ if (!rail->read)
+ return -ENODEV;
+
+ res = rail->read(iccsense, rail);
+ if (res < 0)
return res;
+ result += res;
}
return result;
}
@@ -128,89 +178,160 @@ static void *
nvkm_iccsense_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
+ struct nvkm_iccsense_sensor *sensor, *tmps;
+ struct nvkm_iccsense_rail *rail, *tmpr;
- if (iccsense->rails)
- kfree(iccsense->rails);
+ list_for_each_entry_safe(sensor, tmps, &iccsense->sensors, head) {
+ list_del(&sensor->head);
+ kfree(sensor);
+ }
+ list_for_each_entry_safe(rail, tmpr, &iccsense->rails, head) {
+ list_del(&rail->head);
+ kfree(rail);
+ }
return iccsense;
}
+static struct nvkm_iccsense_sensor*
+nvkm_iccsense_create_sensor(struct nvkm_iccsense *iccsense, u8 id)
+{
+
+ struct nvkm_subdev *subdev = &iccsense->subdev;
+ struct nvkm_bios *bios = subdev->device->bios;
+ struct nvkm_i2c *i2c = subdev->device->i2c;
+ struct nvbios_extdev_func extdev;
+ struct nvkm_i2c_bus *i2c_bus;
+ struct nvkm_iccsense_sensor *sensor;
+ u8 addr;
+
+ if (!i2c || !bios || nvbios_extdev_parse(bios, id, &extdev))
+ return NULL;
+
+ if (extdev.type == 0xff)
+ return NULL;
+
+ if (extdev.type != NVBIOS_EXTDEV_INA209 &&
+ extdev.type != NVBIOS_EXTDEV_INA219 &&
+ extdev.type != NVBIOS_EXTDEV_INA3221) {
+ iccsense->data_valid = false;
+ nvkm_error(subdev, "Unknown sensor type %x, power reading "
+ "disabled\n", extdev.type);
+ return NULL;
+ }
+
+ if (extdev.bus)
+ i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_SEC);
+ else
+ i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
+ if (!i2c_bus)
+ return NULL;
+
+ addr = extdev.addr >> 1;
+ if (!nvkm_iccsense_validate_device(&i2c_bus->i2c, addr,
+ extdev.type)) {
+ iccsense->data_valid = false;
+ nvkm_warn(subdev, "found invalid sensor id: %i, power reading"
+ "might be invalid\n", id);
+ return NULL;
+ }
+
+ sensor = kmalloc(sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return NULL;
+
+ list_add_tail(&sensor->head, &iccsense->sensors);
+ sensor->id = id;
+ sensor->type = extdev.type;
+ sensor->i2c = &i2c_bus->i2c;
+ sensor->addr = addr;
+ sensor->rail_mask = 0x0;
+ return sensor;
+}
+
+static struct nvkm_iccsense_sensor*
+nvkm_iccsense_get_sensor(struct nvkm_iccsense *iccsense, u8 id)
+{
+ struct nvkm_iccsense_sensor *sensor;
+ list_for_each_entry(sensor, &iccsense->sensors, head) {
+ if (sensor->id == id)
+ return sensor;
+ }
+ return nvkm_iccsense_create_sensor(iccsense, id);
+}
+
static int
nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
{
struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
struct nvkm_bios *bios = subdev->device->bios;
- struct nvkm_i2c *i2c = subdev->device->i2c;
struct nvbios_iccsense stbl;
int i;
- if (!i2c || !bios || nvbios_iccsense_parse(bios, &stbl)
- || !stbl.nr_entry)
+ if (!bios || nvbios_iccsense_parse(bios, &stbl) || !stbl.nr_entry)
return 0;
- iccsense->rails = kmalloc(sizeof(*iccsense->rails) * stbl.nr_entry,
- GFP_KERNEL);
- if (!iccsense->rails)
- return -ENOMEM;
-
iccsense->data_valid = true;
for (i = 0; i < stbl.nr_entry; ++i) {
struct pwr_rail_t *r = &stbl.rail[i];
- struct nvbios_extdev_func extdev;
struct nvkm_iccsense_rail *rail;
- struct nvkm_i2c_bus *i2c_bus;
- u8 addr;
+ struct nvkm_iccsense_sensor *sensor;
+ int (*read)(struct nvkm_iccsense *,
+ struct nvkm_iccsense_rail *);
if (!r->mode || r->resistor_mohm == 0)
continue;
- if (nvbios_extdev_parse(bios, r->extdev_id, &extdev))
- continue;
-
- if (extdev.type == 0xff)
+ sensor = nvkm_iccsense_get_sensor(iccsense, r->extdev_id);
+ if (!sensor)
continue;
- if (extdev.bus)
- i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_SEC);
- else
- i2c_bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
- if (!i2c_bus)
- continue;
-
- addr = extdev.addr >> 1;
- if (!nvkm_iccsense_validate_device(&i2c_bus->i2c, addr,
- extdev.type, r->rail)) {
- iccsense->data_valid = false;
- nvkm_warn(subdev, "found unknown or invalid rail entry"
- " type 0x%x rail %i, power reading might be"
- " invalid\n", extdev.type, r->rail);
- continue;
- }
-
- rail = &iccsense->rails[iccsense->rail_count];
- switch (extdev.type) {
+ switch (sensor->type) {
case NVBIOS_EXTDEV_INA209:
- rail->read = nvkm_iccsense_ina209_read;
+ if (r->rail != 0)
+ continue;
+ read = nvkm_iccsense_ina209_read;
break;
case NVBIOS_EXTDEV_INA219:
- rail->read = nvkm_iccsense_ina219_read;
+ if (r->rail != 0)
+ continue;
+ read = nvkm_iccsense_ina219_read;
break;
case NVBIOS_EXTDEV_INA3221:
- rail->read = nvkm_iccsense_ina3221_read;
+ if (r->rail >= 3)
+ continue;
+ read = nvkm_iccsense_ina3221_read;
break;
+ default:
+ continue;
}
- rail->addr = addr;
- rail->rail = r->rail;
+ rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+ if (!rail)
+ return -ENOMEM;
+ sensor->rail_mask |= 1 << r->rail;
+ rail->read = read;
+ rail->sensor = sensor;
+ rail->idx = r->rail;
rail->mohm = r->resistor_mohm;
- rail->i2c = &i2c_bus->i2c;
- ++iccsense->rail_count;
+ list_add_tail(&rail->head, &iccsense->rails);
}
return 0;
}
+static int
+nvkm_iccsense_init(struct nvkm_subdev *subdev)
+{
+ struct nvkm_iccsense *iccsense = nvkm_iccsense(subdev);
+ struct nvkm_iccsense_sensor *sensor;
+ list_for_each_entry(sensor, &iccsense->sensors, head)
+ nvkm_iccsense_sensor_config(iccsense, sensor);
+ return 0;
+}
+
struct nvkm_subdev_func iccsense_func = {
.oneinit = nvkm_iccsense_oneinit,
+ .init = nvkm_iccsense_init,
.dtor = nvkm_iccsense_dtor,
};
@@ -218,7 +339,7 @@ void
nvkm_iccsense_ctor(struct nvkm_device *device, int index,
struct nvkm_iccsense *iccsense)
{
- nvkm_subdev_ctor(&iccsense_func, device, index, 0, &iccsense->subdev);
+ nvkm_subdev_ctor(&iccsense_func, device, index, &iccsense->subdev);
}
int
@@ -227,6 +348,8 @@ nvkm_iccsense_new_(struct nvkm_device *device, int index,
{
if (!(*iccsense = kzalloc(sizeof(**iccsense), GFP_KERNEL)))
return -ENOMEM;
+ INIT_LIST_HEAD(&(*iccsense)->sensors);
+ INIT_LIST_HEAD(&(*iccsense)->rails);
nvkm_iccsense_ctor(device, index, *iccsense);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index ed398b81e..b72c31d2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -2,12 +2,22 @@
#define __NVKM_ICCSENSE_PRIV_H__
#define nvkm_iccsense(p) container_of((p), struct nvkm_iccsense, subdev)
#include <subdev/iccsense.h>
+#include <subdev/bios/extdev.h>
-struct nvkm_iccsense_rail {
- int (*read)(struct nvkm_iccsense *, struct nvkm_iccsense_rail *);
+struct nvkm_iccsense_sensor {
+ struct list_head head;
+ int id;
+ enum nvbios_extdev_type type;
struct i2c_adapter *i2c;
u8 addr;
- u8 rail;
+ u8 rail_mask;
+};
+
+struct nvkm_iccsense_rail {
+ struct list_head head;
+ int (*read)(struct nvkm_iccsense *, struct nvkm_iccsense_rail *);
+ struct nvkm_iccsense_sensor *sensor;
+ u8 idx;
u8 mohm;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index 1d7dd3829..8ed8f65ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -311,7 +311,7 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func,
struct nvkm_device *device, int index,
struct nvkm_instmem *imem)
{
- nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev);
+ nvkm_subdev_ctor(&nvkm_instmem, device, index, &imem->subdev);
imem->func = func;
spin_lock_init(&imem->lock);
INIT_LIST_HEAD(&imem->list);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
index 85b1464c0..39c2a38e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/base.c
@@ -138,7 +138,7 @@ nvkm_ltc_new_(const struct nvkm_ltc_func *func, struct nvkm_device *device,
if (!(ltc = *pltc = kzalloc(sizeof(*ltc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_ltc, device, index, 0, &ltc->subdev);
+ nvkm_subdev_ctor(&nvkm_ltc, device, index, &ltc->subdev);
ltc->func = func;
ltc->zbc_min = 1; /* reserve 0 for disabled */
ltc->zbc_max = min(func->zbc, NVKM_LTC_MAX_ZBC_CNT) - 1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index bef325dcb..49695ac7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -1,7 +1,12 @@
nvkm-y += nvkm/subdev/mc/base.o
nvkm-y += nvkm/subdev/mc/nv04.o
+nvkm-y += nvkm/subdev/mc/nv11.o
+nvkm-y += nvkm/subdev/mc/nv17.o
nvkm-y += nvkm/subdev/mc/nv44.o
nvkm-y += nvkm/subdev/mc/nv50.o
+nvkm-y += nvkm/subdev/mc/g84.o
nvkm-y += nvkm/subdev/mc/g98.o
+nvkm-y += nvkm/subdev/mc/gt215.o
nvkm-y += nvkm/subdev/mc/gf100.o
+nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 954fbbe56..350a8caa8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -24,6 +24,7 @@
#include "priv.h"
#include <core/option.h>
+#include <subdev/top.h>
void
nvkm_mc_unk260(struct nvkm_mc *mc, u32 data)
@@ -58,10 +59,19 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
{
struct nvkm_device *device = mc->subdev.device;
struct nvkm_subdev *subdev;
- const struct nvkm_mc_intr *map = mc->func->intr;
- u32 stat, intr;
+ const struct nvkm_mc_map *map = mc->func->intr;
+ u32 stat, intr = nvkm_mc_intr_mask(mc);
+ u64 subdevs;
+
+ stat = nvkm_top_intr(device->top, intr, &subdevs);
+ while (subdevs) {
+ enum nvkm_devidx subidx = __ffs64(subdevs);
+ subdev = nvkm_device_subdev(device, subidx);
+ if (subdev)
+ nvkm_subdev_intr(subdev);
+ subdevs &= ~BIT_ULL(subidx);
+ }
- stat = intr = nvkm_mc_intr_mask(mc);
while (map->stat) {
if (intr & map->stat) {
subdev = nvkm_device_subdev(device, map->unit);
@@ -77,6 +87,36 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
*handled = intr != 0;
}
+static void
+nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ const struct nvkm_mc_map *map;
+ u64 pmc_enable;
+
+ if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) {
+ for (map = mc->func->reset; map && map->stat; map++) {
+ if (map->unit == devidx) {
+ pmc_enable = map->stat;
+ break;
+ }
+ }
+ }
+
+ if (pmc_enable) {
+ nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
+ nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
+ nvkm_rd32(device, 0x000200);
+ }
+}
+
+void
+nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx)
+{
+ if (likely(mc))
+ nvkm_mc_reset_(mc, devidx);
+}
+
static int
nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
{
@@ -117,7 +157,7 @@ nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_mc, device, index, 0, &mc->subdev);
+ nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
mc->func = func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
new file mode 100644
index 000000000..5c85b47f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static const struct nvkm_mc_map
+g84_mc_reset[] = {
+ { 0x04008000, NVKM_ENGINE_BSP },
+ { 0x02004000, NVKM_ENGINE_CIPHER },
+ { 0x01020000, NVKM_ENGINE_VP },
+ { 0x00400002, NVKM_ENGINE_MPEG },
+ { 0x00201000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ {}
+};
+
+const struct nvkm_mc_map
+g84_mc_intr[] = {
+ { 0x04000000, NVKM_ENGINE_DISP },
+ { 0x00020000, NVKM_ENGINE_VP },
+ { 0x00008000, NVKM_ENGINE_BSP },
+ { 0x00004000, NVKM_ENGINE_CIPHER },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00000001, NVKM_ENGINE_MPEG },
+ { 0x0002d101, NVKM_SUBDEV_FB },
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x00200000, NVKM_SUBDEV_GPIO },
+ { 0x00200000, NVKM_SUBDEV_I2C },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ {},
+};
+
+static const struct nvkm_mc_func
+g84_mc = {
+ .init = nv50_mc_init,
+ .intr = g84_mc_intr,
+ .intr_unarm = nv04_mc_intr_unarm,
+ .intr_rearm = nv04_mc_intr_rearm,
+ .intr_mask = nv04_mc_intr_mask,
+ .reset = g84_mc_reset,
+};
+
+int
+g84_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&g84_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 7344ad659..0280b43cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -23,24 +23,31 @@
*/
#include "priv.h"
-static const struct nvkm_mc_intr
-g98_mc_intr[] = {
- { 0x04000000, NVKM_ENGINE_DISP }, /* DISP first, so pageflip timestamps work */
- { 0x00000001, NVKM_ENGINE_MSPPP },
+static const struct nvkm_mc_map
+g98_mc_reset[] = {
+ { 0x04008000, NVKM_ENGINE_MSVLD },
+ { 0x02004000, NVKM_ENGINE_SEC },
+ { 0x01020000, NVKM_ENGINE_MSPDEC },
+ { 0x00400002, NVKM_ENGINE_MSPPP },
+ { 0x00201000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
- { 0x00001000, NVKM_ENGINE_GR },
- { 0x00004000, NVKM_ENGINE_SEC }, /* NV84:NVA3 */
- { 0x00008000, NVKM_ENGINE_MSVLD },
+ {}
+};
+
+static const struct nvkm_mc_map
+g98_mc_intr[] = {
+ { 0x04000000, NVKM_ENGINE_DISP },
{ 0x00020000, NVKM_ENGINE_MSPDEC },
- { 0x00040000, NVKM_SUBDEV_PMU }, /* NVA3:NVC0 */
- { 0x00080000, NVKM_SUBDEV_THERM }, /* NVA3:NVC0 */
- { 0x00100000, NVKM_SUBDEV_TIMER },
- { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
- { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
- { 0x00400000, NVKM_ENGINE_CE0 }, /* NVA3- */
+ { 0x00008000, NVKM_ENGINE_MSVLD },
+ { 0x00004000, NVKM_ENGINE_SEC },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00000001, NVKM_ENGINE_MSPPP },
+ { 0x0002d101, NVKM_SUBDEV_FB },
{ 0x10000000, NVKM_SUBDEV_BUS },
- { 0x80000000, NVKM_ENGINE_SW },
- { 0x0042d101, NVKM_SUBDEV_FB },
+ { 0x00200000, NVKM_SUBDEV_GPIO },
+ { 0x00200000, NVKM_SUBDEV_I2C },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
{},
};
@@ -51,6 +58,7 @@ g98_mc = {
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
.intr_mask = nv04_mc_intr_mask,
+ .reset = g98_mc_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 122fe69e8..8397e223b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -23,28 +23,38 @@
*/
#include "priv.h"
-const struct nvkm_mc_intr
-gf100_mc_intr[] = {
- { 0x04000000, NVKM_ENGINE_DISP }, /* DISP first, so pageflip timestamps work. */
- { 0x00000001, NVKM_ENGINE_MSPPP },
- { 0x00000020, NVKM_ENGINE_CE0 },
- { 0x00000040, NVKM_ENGINE_CE1 },
- { 0x00000080, NVKM_ENGINE_CE2 },
- { 0x00000100, NVKM_ENGINE_FIFO },
- { 0x00001000, NVKM_ENGINE_GR },
- { 0x00002000, NVKM_SUBDEV_FB },
+static const struct nvkm_mc_map
+gf100_mc_reset[] = {
+ { 0x00020000, NVKM_ENGINE_MSPDEC },
{ 0x00008000, NVKM_ENGINE_MSVLD },
- { 0x00040000, NVKM_SUBDEV_THERM },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00000080, NVKM_ENGINE_CE1 },
+ { 0x00000040, NVKM_ENGINE_CE0 },
+ { 0x00000002, NVKM_ENGINE_MSPPP },
+ {}
+};
+
+static const struct nvkm_mc_map
+gf100_mc_intr[] = {
+ { 0x04000000, NVKM_ENGINE_DISP },
{ 0x00020000, NVKM_ENGINE_MSPDEC },
- { 0x00100000, NVKM_SUBDEV_TIMER },
- { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
- { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
- { 0x01000000, NVKM_SUBDEV_PMU },
- { 0x02000000, NVKM_SUBDEV_LTC },
- { 0x08000000, NVKM_SUBDEV_FB },
- { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x00008000, NVKM_ENGINE_MSVLD },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00000040, NVKM_ENGINE_CE1 },
+ { 0x00000020, NVKM_ENGINE_CE0 },
+ { 0x00000001, NVKM_ENGINE_MSPPP },
{ 0x40000000, NVKM_SUBDEV_IBUS },
- { 0x80000000, NVKM_ENGINE_SW },
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x08000000, NVKM_SUBDEV_FB },
+ { 0x02000000, NVKM_SUBDEV_LTC },
+ { 0x01000000, NVKM_SUBDEV_PMU },
+ { 0x00200000, NVKM_SUBDEV_GPIO },
+ { 0x00200000, NVKM_SUBDEV_I2C },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ { 0x00040000, NVKM_SUBDEV_THERM },
+ { 0x00002000, NVKM_SUBDEV_FB },
{},
};
@@ -87,6 +97,7 @@ gf100_mc = {
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .reset = gf100_mc_reset,
.unk260 = gf100_mc_unk260,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
new file mode 100644
index 000000000..317464212
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+const struct nvkm_mc_map
+gk104_mc_reset[] = {
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ {}
+};
+
+const struct nvkm_mc_map
+gk104_mc_intr[] = {
+ { 0x04000000, NVKM_ENGINE_DISP },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x40000000, NVKM_SUBDEV_IBUS },
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x08000000, NVKM_SUBDEV_FB },
+ { 0x02000000, NVKM_SUBDEV_LTC },
+ { 0x01000000, NVKM_SUBDEV_PMU },
+ { 0x00200000, NVKM_SUBDEV_GPIO },
+ { 0x00200000, NVKM_SUBDEV_I2C },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ { 0x00040000, NVKM_SUBDEV_THERM },
+ { 0x00002000, NVKM_SUBDEV_FB },
+ {},
+};
+
+static const struct nvkm_mc_func
+gk104_mc = {
+ .init = nv50_mc_init,
+ .intr = gk104_mc_intr,
+ .intr_unarm = gf100_mc_intr_unarm,
+ .intr_rearm = gf100_mc_intr_rearm,
+ .intr_mask = gf100_mc_intr_mask,
+ .reset = gk104_mc_reset,
+ .unk260 = gf100_mc_unk260,
+};
+
+int
+gk104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&gk104_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index d92efb33b..60b044f51 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -26,10 +26,11 @@
static const struct nvkm_mc_func
gk20a_mc = {
.init = nv50_mc_init,
- .intr = gf100_mc_intr,
+ .intr = gk104_mc_intr,
.intr_unarm = gf100_mc_intr_unarm,
.intr_rearm = gf100_mc_intr_rearm,
.intr_mask = gf100_mc_intr_mask,
+ .reset = gk104_mc_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
new file mode 100644
index 000000000..aad0ba95b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+#include "priv.h"
+
+static const struct nvkm_mc_map
+gt215_mc_reset[] = {
+ { 0x04008000, NVKM_ENGINE_MSVLD },
+ { 0x01020000, NVKM_ENGINE_MSPDEC },
+ { 0x00802000, NVKM_ENGINE_CE0 },
+ { 0x00400002, NVKM_ENGINE_MSPPP },
+ { 0x00201000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ {}
+};
+
+static const struct nvkm_mc_map
+gt215_mc_intr[] = {
+ { 0x04000000, NVKM_ENGINE_DISP },
+ { 0x00400000, NVKM_ENGINE_CE0 },
+ { 0x00020000, NVKM_ENGINE_MSPDEC },
+ { 0x00008000, NVKM_ENGINE_MSVLD },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00000001, NVKM_ENGINE_MSPPP },
+ { 0x00429101, NVKM_SUBDEV_FB },
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x00200000, NVKM_SUBDEV_GPIO },
+ { 0x00200000, NVKM_SUBDEV_I2C },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ { 0x00080000, NVKM_SUBDEV_THERM },
+ { 0x00040000, NVKM_SUBDEV_PMU },
+ {},
+};
+
+static const struct nvkm_mc_func
+gt215_mc = {
+ .init = nv50_mc_init,
+ .intr = gt215_mc_intr,
+ .intr_unarm = nv04_mc_intr_unarm,
+ .intr_rearm = nv04_mc_intr_rearm,
+ .intr_mask = nv04_mc_intr_mask,
+ .reset = gt215_mc_reset,
+};
+
+int
+gt215_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&gt215_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index d282ec155..a062624e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -23,18 +23,20 @@
*/
#include "priv.h"
-const struct nvkm_mc_intr
-nv04_mc_intr[] = {
- { 0x00000001, NVKM_ENGINE_MPEG }, /* NV17- MPEG/ME */
+const struct nvkm_mc_map
+nv04_mc_reset[] = {
+ { 0x00001000, NVKM_ENGINE_GR },
{ 0x00000100, NVKM_ENGINE_FIFO },
+ {}
+};
+
+static const struct nvkm_mc_map
+nv04_mc_intr[] = {
+ { 0x01010000, NVKM_ENGINE_DISP },
{ 0x00001000, NVKM_ENGINE_GR },
- { 0x00010000, NVKM_ENGINE_DISP },
- { 0x00020000, NVKM_ENGINE_VP }, /* NV40- */
- { 0x00100000, NVKM_SUBDEV_TIMER },
- { 0x01000000, NVKM_ENGINE_DISP }, /* NV04- PCRTC0 */
- { 0x02000000, NVKM_ENGINE_DISP }, /* NV11- PCRTC1 */
+ { 0x00000100, NVKM_ENGINE_FIFO },
{ 0x10000000, NVKM_SUBDEV_BUS },
- { 0x80000000, NVKM_ENGINE_SW },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
{}
};
@@ -74,6 +76,7 @@ nv04_mc = {
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
.intr_mask = nv04_mc_intr_mask,
+ .reset = nv04_mc_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
new file mode 100644
index 000000000..55f0b9166
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static const struct nvkm_mc_map
+nv11_mc_intr[] = {
+ { 0x03010000, NVKM_ENGINE_DISP },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ {}
+};
+
+static const struct nvkm_mc_func
+nv11_mc = {
+ .init = nv04_mc_init,
+ .intr = nv11_mc_intr,
+ .intr_unarm = nv04_mc_intr_unarm,
+ .intr_rearm = nv04_mc_intr_rearm,
+ .intr_mask = nv04_mc_intr_mask,
+ .reset = nv04_mc_reset,
+};
+
+int
+nv11_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&nv11_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
new file mode 100644
index 000000000..c40fa67f7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+const struct nvkm_mc_map
+nv17_mc_reset[] = {
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00000002, NVKM_ENGINE_MPEG },
+ {}
+};
+
+const struct nvkm_mc_map
+nv17_mc_intr[] = {
+ { 0x03010000, NVKM_ENGINE_DISP },
+ { 0x00001000, NVKM_ENGINE_GR },
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00000001, NVKM_ENGINE_MPEG },
+ { 0x10000000, NVKM_SUBDEV_BUS },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
+ {}
+};
+
+static const struct nvkm_mc_func
+nv17_mc = {
+ .init = nv04_mc_init,
+ .intr = nv17_mc_intr,
+ .intr_unarm = nv04_mc_intr_unarm,
+ .intr_rearm = nv04_mc_intr_rearm,
+ .intr_mask = nv04_mc_intr_mask,
+ .reset = nv17_mc_reset,
+};
+
+int
+nv17_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return nvkm_mc_new_(&nv17_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index 9a3ac9965..cc56271db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -40,10 +40,11 @@ nv44_mc_init(struct nvkm_mc *mc)
static const struct nvkm_mc_func
nv44_mc = {
.init = nv44_mc_init,
- .intr = nv04_mc_intr,
+ .intr = nv17_mc_intr,
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
.intr_mask = nv04_mc_intr_mask,
+ .reset = nv17_mc_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 5f27d7b8f..343b60785 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -23,21 +23,17 @@
*/
#include "priv.h"
-const struct nvkm_mc_intr
+static const struct nvkm_mc_map
nv50_mc_intr[] = {
- { 0x04000000, NVKM_ENGINE_DISP }, /* DISP before FIFO, so pageflip-timestamping works! */
- { 0x00000001, NVKM_ENGINE_MPEG },
- { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x04000000, NVKM_ENGINE_DISP },
{ 0x00001000, NVKM_ENGINE_GR },
- { 0x00004000, NVKM_ENGINE_CIPHER }, /* NV84- */
- { 0x00008000, NVKM_ENGINE_BSP }, /* NV84- */
- { 0x00020000, NVKM_ENGINE_VP }, /* NV84- */
- { 0x00100000, NVKM_SUBDEV_TIMER },
- { 0x00200000, NVKM_SUBDEV_GPIO }, /* PMGR->GPIO */
- { 0x00200000, NVKM_SUBDEV_I2C }, /* PMGR->I2C/AUX */
+ { 0x00000100, NVKM_ENGINE_FIFO },
+ { 0x00000001, NVKM_ENGINE_MPEG },
+ { 0x00001101, NVKM_SUBDEV_FB },
{ 0x10000000, NVKM_SUBDEV_BUS },
- { 0x80000000, NVKM_ENGINE_SW },
- { 0x0002d101, NVKM_SUBDEV_FB },
+ { 0x00200000, NVKM_SUBDEV_GPIO },
+ { 0x00200000, NVKM_SUBDEV_I2C },
+ { 0x00100000, NVKM_SUBDEV_TIMER },
{},
};
@@ -55,6 +51,7 @@ nv50_mc = {
.intr_unarm = nv04_mc_intr_unarm,
.intr_rearm = nv04_mc_intr_rearm,
.intr_mask = nv04_mc_intr_mask,
+ .reset = nv17_mc_reset,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 307f6c692..a12038118 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -6,37 +6,42 @@
int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
int index, struct nvkm_mc **);
-struct nvkm_mc_intr {
+struct nvkm_mc_map {
u32 stat;
u32 unit;
};
struct nvkm_mc_func {
void (*init)(struct nvkm_mc *);
- const struct nvkm_mc_intr *intr;
+ const struct nvkm_mc_map *intr;
/* disable reporting of interrupts to host */
void (*intr_unarm)(struct nvkm_mc *);
/* enable reporting of interrupts to host */
void (*intr_rearm)(struct nvkm_mc *);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
u32 (*intr_mask)(struct nvkm_mc *);
+ const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
void nv04_mc_init(struct nvkm_mc *);
-extern const struct nvkm_mc_intr nv04_mc_intr[];
void nv04_mc_intr_unarm(struct nvkm_mc *);
void nv04_mc_intr_rearm(struct nvkm_mc *);
u32 nv04_mc_intr_mask(struct nvkm_mc *);
+extern const struct nvkm_mc_map nv04_mc_reset[];
+
+extern const struct nvkm_mc_map nv17_mc_intr[];
+extern const struct nvkm_mc_map nv17_mc_reset[];
void nv44_mc_init(struct nvkm_mc *);
void nv50_mc_init(struct nvkm_mc *);
-extern const struct nvkm_mc_intr nv50_mc_intr[];
-extern const struct nvkm_mc_intr gf100_mc_intr[];
void gf100_mc_intr_unarm(struct nvkm_mc *);
void gf100_mc_intr_rearm(struct nvkm_mc *);
u32 gf100_mc_intr_mask(struct nvkm_mc *);
void gf100_mc_unk260(struct nvkm_mc *, u32);
+
+extern const struct nvkm_mc_map gk104_mc_intr[];
+extern const struct nvkm_mc_map gk104_mc_reset[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index e04a2296e..5df9669ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -524,7 +524,7 @@ void
nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
int index, struct nvkm_mmu *mmu)
{
- nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
+ nvkm_subdev_ctor(&nvkm_mmu, device, index, &mmu->subdev);
mmu->func = func;
mmu->limit = func->limit;
mmu->dma_bits = func->dma_bits;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
index 9700a7625..21b65ee25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/base.c
@@ -241,7 +241,7 @@ nvkm_mxm_new_(struct nvkm_device *device, int index, struct nvkm_mxm **pmxm)
if (!(mxm = *pmxm = kzalloc(sizeof(*mxm), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_mxm, device, index, 0, &mxm->subdev);
+ nvkm_subdev_ctor(&nvkm_mxm, device, index, &mxm->subdev);
data = mxm_table(bios, &ver, &len);
if (!data || !(ver = nvbios_rd08(bios, data))) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 65057c831..6b0328bd7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -168,7 +168,7 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev);
+ nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev);
pci->func = func;
pci->pdev = device->func->pci(device)->pdev;
pci->irq = -1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index d95eb8659..8dd164d13 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -40,21 +40,23 @@ nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
struct nvkm_device *device = subdev->device;
u32 addr;
+ mutex_lock(&subdev->mutex);
/* wait for a free slot in the fifo */
addr = nvkm_rd32(device, 0x10a4a0);
if (nvkm_msec(device, 2000,
u32 tmp = nvkm_rd32(device, 0x10a4b0);
if (tmp != (addr ^ 8))
break;
- ) < 0)
+ ) < 0) {
+ mutex_unlock(&subdev->mutex);
return -EBUSY;
+ }
/* we currently only support a single process at a time waiting
* on a synchronous reply, take the PMU mutex and tell the
* receive handler what we're waiting for
*/
if (reply) {
- mutex_lock(&subdev->mutex);
pmu->recv.message = message;
pmu->recv.process = process;
}
@@ -81,9 +83,9 @@ nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2],
wait_event(pmu->recv.wait, (pmu->recv.process == 0));
reply[0] = pmu->recv.data[0];
reply[1] = pmu->recv.data[1];
- mutex_unlock(&subdev->mutex);
}
+ mutex_unlock(&subdev->mutex);
return 0;
}
@@ -272,7 +274,7 @@ nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
struct nvkm_pmu *pmu;
if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_pmu, device, index, 0, &pmu->subdev);
+ nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
pmu->func = func;
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
index 6689d0290..f996d90c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c
@@ -220,7 +220,7 @@ gk20a_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
pmu->base.func = &func;
*ppmu = &pmu->base;
- nvkm_subdev_ctor(&gk20a_pmu, device, index, 0, &pmu->base.subdev);
+ nvkm_subdev_ctor(&gk20a_pmu, device, index, &pmu->base.subdev);
pmu->data = &gk20a_dvfs_data;
nvkm_alarm_init(&pmu->alarm, gk20a_pmu_dvfs_work);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 520facf9b..213fdba6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -264,7 +264,7 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
{
unsigned long fid;
- nvkm_subdev_ctor(&nvkm_secboot, device, index, 0, &sb->subdev);
+ nvkm_subdev_ctor(&nvkm_secboot, device, index, &sb->subdev);
sb->func = func;
/* setup the performing falcon's base address and masks */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 949dc6101..8894fee30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -366,7 +366,7 @@ nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_therm, device, index, 0, &therm->subdev);
+ nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
therm->func = func;
nvkm_alarm_init(&therm->alarm, nvkm_therm_alarm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d4dae1f12..07dc82bfe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -143,7 +143,7 @@ nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
return -ENOMEM;
- nvkm_subdev_ctor(&nvkm_timer, device, index, 0, &tmr->subdev);
+ nvkm_subdev_ctor(&nvkm_timer, device, index, &tmr->subdev);
tmr->func = func;
INIT_LIST_HEAD(&tmr->alarms);
spin_lock_init(&tmr->lock);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
new file mode 100644
index 000000000..1078401cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/Kbuild
@@ -0,0 +1,2 @@
+nvkm-y += nvkm/subdev/top/base.o
+nvkm-y += nvkm/subdev/top/gk104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
new file mode 100644
index 000000000..a1b264664
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+struct nvkm_top_device *
+nvkm_top_device_new(struct nvkm_top *top)
+{
+ struct nvkm_top_device *info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (info) {
+ info->index = NVKM_SUBDEV_NR;
+ info->addr = 0;
+ info->fault = -1;
+ info->engine = -1;
+ info->runlist = -1;
+ info->reset = -1;
+ info->intr = -1;
+ list_add_tail(&info->head, &top->device);
+ }
+ return info;
+}
+
+u32
+nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
+{
+ struct nvkm_top_device *info;
+
+ if (top) {
+ list_for_each_entry(info, &top->device, head) {
+ if (info->index == index && info->reset >= 0)
+ return BIT(info->reset);
+ }
+ }
+
+ return 0;
+}
+
+u32
+nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
+{
+ struct nvkm_top_device *info;
+ u64 subdevs = 0;
+ u32 handled = 0;
+
+ if (top) {
+ list_for_each_entry(info, &top->device, head) {
+ if (info->index != NVKM_SUBDEV_NR && info->intr >= 0) {
+ if (intr & BIT(info->intr)) {
+ subdevs |= BIT_ULL(info->index);
+ handled |= BIT(info->intr);
+ }
+ }
+ }
+ }
+
+ *psubdevs = subdevs;
+ return intr & ~handled;
+}
+
+enum nvkm_devidx
+nvkm_top_fault(struct nvkm_top *top, int fault)
+{
+ struct nvkm_top_device *info;
+
+ list_for_each_entry(info, &top->device, head) {
+ if (info->fault == fault)
+ return info->index;
+ }
+
+ return NVKM_SUBDEV_NR;
+}
+
+enum nvkm_devidx
+nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn)
+{
+ struct nvkm_top_device *info;
+ int n = 0;
+
+ list_for_each_entry(info, &top->device, head) {
+ if (info->engine >= 0 && info->runlist >= 0 && n++ == index) {
+ *runl = info->runlist;
+ *engn = info->engine;
+ return info->index;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int
+nvkm_top_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_top *top = nvkm_top(subdev);
+ return top->func->oneinit(top);
+}
+
+static void *
+nvkm_top_dtor(struct nvkm_subdev *subdev)
+{
+ struct nvkm_top *top = nvkm_top(subdev);
+ struct nvkm_top_device *info, *temp;
+
+ list_for_each_entry_safe(info, temp, &top->device, head) {
+ list_del(&info->head);
+ kfree(info);
+ }
+
+ return top;
+}
+
+static const struct nvkm_subdev_func
+nvkm_top = {
+ .dtor = nvkm_top_dtor,
+ .oneinit = nvkm_top_oneinit,
+};
+
+int
+nvkm_top_new_(const struct nvkm_top_func *func, struct nvkm_device *device,
+ int index, struct nvkm_top **ptop)
+{
+ struct nvkm_top *top;
+ if (!(top = *ptop = kzalloc(sizeof(*top), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&nvkm_top, device, index, &top->subdev);
+ top->func = func;
+ INIT_LIST_HEAD(&top->device);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
new file mode 100644
index 000000000..e06acc340
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "priv.h"
+
+static int
+gk104_top_oneinit(struct nvkm_top *top)
+{
+ struct nvkm_subdev *subdev = &top->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_top_device *info = NULL;
+ u32 data, type;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if (!info) {
+ if (!(info = nvkm_top_device_new(top)))
+ return -ENOMEM;
+ type = ~0;
+ }
+
+ data = nvkm_rd32(device, 0x022700 + (i * 0x04));
+ nvkm_trace(subdev, "%02x: %08x\n", i, data);
+ switch (data & 0x00000003) {
+ case 0x00000000: /* NOT_VALID */
+ continue;
+ case 0x00000001: /* DATA */
+ info->addr = (data & 0x00fff000);
+ info->fault = (data & 0x000000f8) >> 3;
+ break;
+ case 0x00000002: /* ENUM */
+ if (data & 0x00000020)
+ info->engine = (data & 0x3c000000) >> 26;
+ if (data & 0x00000010)
+ info->runlist = (data & 0x01e00000) >> 21;
+ if (data & 0x00000008)
+ info->intr = (data & 0x000f8000) >> 15;
+ if (data & 0x00000004)
+ info->reset = (data & 0x00003e00) >> 9;
+ break;
+ case 0x00000003: /* ENGINE_TYPE */
+ type = (data & 0x7ffffffc) >> 2;
+ break;
+ }
+
+ if (data & 0x80000000)
+ continue;
+
+ /* Translate engine type to NVKM engine identifier. */
+ switch (type) {
+ case 0x00000000: info->index = NVKM_ENGINE_GR; break;
+ case 0x00000001: info->index = NVKM_ENGINE_CE0; break;
+ case 0x00000002: info->index = NVKM_ENGINE_CE1; break;
+ case 0x00000003: info->index = NVKM_ENGINE_CE2; break;
+ case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break;
+ case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break;
+ case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break;
+ case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break;
+ case 0x0000000c: info->index = NVKM_ENGINE_VIC; break;
+ case 0x0000000d: info->index = NVKM_ENGINE_SEC; break;
+ case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break;
+ case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break;
+ case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break;
+ break;
+ default:
+ break;
+ }
+
+ nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d "
+ "runlist %2d intr %2d reset %2d\n", type,
+ info->index == NVKM_SUBDEV_NR ? NULL :
+ nvkm_subdev_name[info->index],
+ info->addr, info->fault, info->engine, info->runlist,
+ info->intr, info->reset);
+ info = NULL;
+ }
+
+ return 0;
+}
+
+static const struct nvkm_top_func
+gk104_top = {
+ .oneinit = gk104_top_oneinit,
+};
+
+int
+gk104_top_new(struct nvkm_device *device, int index, struct nvkm_top **ptop)
+{
+ return nvkm_top_new_(&gk104_top, device, index, ptop);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
new file mode 100644
index 000000000..adb3ed03d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
@@ -0,0 +1,25 @@
+#ifndef __NVKM_TOP_PRIV_H__
+#define __NVKM_TOP_PRIV_H__
+#define nvkm_top(p) container_of((p), struct nvkm_top, subdev)
+#include <subdev/top.h>
+
+struct nvkm_top_func {
+ int (*oneinit)(struct nvkm_top *);
+};
+
+int nvkm_top_new_(const struct nvkm_top_func *, struct nvkm_device *,
+ int, struct nvkm_top **);
+
+struct nvkm_top_device {
+ enum nvkm_devidx index;
+ u32 addr;
+ int fault;
+ int engine;
+ int runlist;
+ int reset;
+ int intr;
+ struct list_head head;
+};
+
+struct nvkm_top_device *nvkm_top_device_new(struct nvkm_top *);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 50b5649ad..6b2d7531a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -177,7 +177,7 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
struct nvkm_bios *bios = device->bios;
int i;
- nvkm_subdev_ctor(&nvkm_volt, device, index, 0, &volt->subdev);
+ nvkm_subdev_ctor(&nvkm_volt, device, index, &volt->subdev);
volt->func = func;
/* Assuming the non-bios device should build the voltage table later */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
index b735173a1..420bd84d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c
@@ -56,7 +56,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv)
/* the blob uses this crystal frequency, let's use it too. */
div = 27648000 / bios->pwm_freq;
- duty = (uv - bios->base) * div / bios->pwm_range;
+ duty = DIV_ROUND_UP((uv - bios->base) * div, bios->pwm_range);
nvkm_wr32(device, 0x20340, div);
nvkm_wr32(device, 0x20344, 0x80000000 | duty);
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 73241c4eb..336ad4de9 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -2,6 +2,7 @@ config DRM_OMAP
tristate "OMAP DRM"
depends on DRM
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
+ select OMAP2_DSS
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 225fd8d6a..667ca4a24 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -9,6 +9,7 @@
* the Free Software Foundation.
*/
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 8c246c213..9594ff7a2 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -14,7 +14,7 @@
* the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 2fd560288..671806ca7 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -9,7 +9,7 @@
* the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index e780fd4f8..7c2331be8 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -9,7 +9,7 @@
* the Free Software Foundation.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 36485c213..2b118071b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -14,7 +14,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 458f77bc4..ac680e1de 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -15,6 +15,7 @@
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 780cb263a..38d2920a9 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -15,7 +15,7 @@
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
#include <video/omapdss.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 529a01760..4363fffc8 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -10,7 +10,7 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 31efcca80..deb416736 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -29,7 +29,7 @@
#include <linux/sched.h>
#include <linux/backlight.h>
#include <linux/fb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 03e2beb7b..d93175b03 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 8730646a0..56c43f355 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
- int r;
if (dsi->vdds_dsi_reg != NULL)
return 0;
@@ -1180,15 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev)
return PTR_ERR(vdds_dsi);
}
- if (regulator_can_change_voltage(vdds_dsi)) {
- r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
- if (r) {
- devm_regulator_put(vdds_dsi);
- DSSERR("can't set the DSI regulator voltage\n");
- return r;
- }
- }
-
dsi->vdds_dsi_reg = vdds_dsi;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index f95ff319e..3303cfad4 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -30,6 +30,7 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/gfp.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index f892ae157..4d46cdf7a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -33,6 +33,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
+#include <linux/of.h>
#include <video/omapdss.h>
#include <sound/omap-hdmi-audio.h>
@@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
@@ -114,15 +114,6 @@ static int hdmi_init_regulator(void)
return PTR_ERR(reg);
}
- if (regulator_can_change_voltage(reg)) {
- r = regulator_set_voltage(reg, 1800000, 1800000);
- if (r) {
- devm_regulator_put(reg);
- DSSWARN("can't set the regulator voltage\n");
- return r;
- }
- }
-
hdmi.vdda_reg = reg;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index fa72e735d..ef3afe99e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
{
DSSDBG("Enter hdmi_core_powerdown_disable\n");
- REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0);
+ REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0);
}
static void hdmi_core_swreset_release(struct hdmi_core_data *core)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index a43f7b10e..9255c0e1e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -38,6 +38,7 @@
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
+#include <linux/of.h>
#include <video/omapdss.h>
#include <sound/omap-hdmi-audio.h>
@@ -119,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
static int hdmi_init_regulator(void)
{
- int r;
struct regulator *reg;
if (hdmi.vdda_reg != NULL)
@@ -131,15 +131,6 @@ static int hdmi_init_regulator(void)
return PTR_ERR(reg);
}
- if (regulator_can_change_voltage(reg)) {
- r = regulator_set_voltage(reg, 1800000, 1800000);
- if (r) {
- devm_regulator_put(reg);
- DSSWARN("can't set the regulator voltage\n");
- return r;
- }
- }
-
hdmi.vdda_reg = reg;
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 6a397520c..8ab2093da 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned ss_scl_high = 4000; /* ns */
- const unsigned ss_scl_low = 4700; /* ns */
+ const unsigned ss_scl_high = 4600; /* ns */
+ const unsigned ss_scl_low = 5400; /* ns */
const unsigned fs_scl_high = 600; /* ns */
const unsigned fs_scl_low = 1300; /* ns */
const unsigned sda_hold = 1000; /* ns */
@@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
c = (ptr[1] >> 6) & 0x3;
m = (ptr[1] >> 4) & 0x3;
- r = (ptr[1] >> 0) & 0x3;
+ r = (ptr[1] >> 0) & 0xf;
itc = (ptr[2] >> 7) & 0x1;
ec = (ptr[2] >> 4) & 0x7;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index 1f5d19c11..f98b750fc 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
#include <video/omapdss.h>
#include "dss.h"
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 06e23a7c4..f1015e8b8 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/seq_file.h>
#include <video/omapdss.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 13442b905..055f62fca 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <video/omapdss.h>
#include "dss.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 6f5fc14fc..479bf2405 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -17,6 +17,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/seq_file.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index de275a5be..4ceed7a97 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/platform_device.h> /* platform_device() */
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 80398a684..d86f54793 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -138,7 +138,7 @@ static bool omap_atomic_is_pending(struct omap_drm_private *priv,
}
static int omap_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool async)
+ struct drm_atomic_state *state, bool nonblock)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_atomic_state_commit *commit;
@@ -177,7 +177,7 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(dev, state);
- if (async)
+ if (nonblock)
schedule_work(&commit->work);
else
omap_atomic_complete(commit);
@@ -561,7 +561,7 @@ static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
@@ -584,7 +584,7 @@ static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
@@ -608,7 +608,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj)
return -ENOENT;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 0fbe17d0e..3f823c368 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -257,14 +257,14 @@ struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
/* should these be made into common util helpers?
*/
-static inline int objects_lookup(struct drm_device *dev,
+static inline int objects_lookup(
struct drm_file *filp, uint32_t pixel_format,
struct drm_gem_object **bos, const uint32_t *handles)
{
int i, n = drm_format_num_planes(pixel_format);
for (i = 0; i < n; i++) {
- bos[i] = drm_gem_object_lookup(dev, filp, handles[i]);
+ bos[i] = drm_gem_object_lookup(filp, handles[i]);
if (!bos[i])
goto fail;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 610962396..f84570d16 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -17,6 +17,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/seq_file.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -378,7 +380,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
int ret;
- ret = objects_lookup(dev, file, mode_cmd->pixel_format,
+ ret = objects_lookup(file, mode_cmd->pixel_format,
bos, mode_cmd->handles);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 3cb16f0cf..89da41ac6 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -153,7 +153,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
- drm_gem_object_unreference(fbdev->bo);
+ drm_gem_object_unreference_unlocked(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 907154f5b..03698b6c8 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -17,6 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/seq_file.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/pfn_t.h>
@@ -687,7 +688,7 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
int ret = 0;
/* GEM does all our handle to object mapping */
- obj = drm_gem_object_lookup(dev, file, handle);
+ obj = drm_gem_object_lookup(file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto fail;
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 93ee538a9..5252ab720 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -245,7 +245,7 @@ omap_plane_atomic_duplicate_state(struct drm_plane *plane)
static void omap_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- __drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(to_omap_plane_state(state));
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index ceb20486d..3a7bdf1c8 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -72,6 +72,7 @@ struct panel_desc {
} delay;
u32 bus_format;
+ u32 bus_flags;
};
struct panel_simple {
@@ -116,7 +117,11 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
}
drm_display_mode_from_videomode(&vm, mode);
- drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
num++;
@@ -132,6 +137,11 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
continue;
}
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
@@ -144,6 +154,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
if (panel->desc->bus_format)
drm_display_info_set_bus_formats(&connector->display_info,
&panel->desc->bus_format, 1);
+ connector->display_info.bus_flags = panel->desc->bus_flags;
return num;
}
@@ -813,6 +824,29 @@ static const struct panel_desc innolux_at043tn24 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode innolux_at070tn92_mode = {
+ .clock = 33333,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 20,
+ .htotal = 800 + 210 + 20 + 46,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 10,
+ .vtotal = 480 + 22 + 23 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_at070tn92 = {
+ .modes = &innolux_at070tn92_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode innolux_g121i1_l01_mode = {
.clock = 71000,
.hdisplay = 1280,
@@ -1051,7 +1085,8 @@ static const struct panel_desc nec_nl4827hc19_05b = {
.width = 95,
.height = 54,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
};
static const struct display_timing okaya_rs800480t_7x0gp_timing = {
@@ -1084,6 +1119,63 @@ static const struct panel_desc okaya_rs800480t_7x0gp = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode olimex_lcd_olinuxino_43ts_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 5,
+ .hsync_end = 480 + 5 + 30,
+ .htotal = 480 + 5 + 30 + 10,
+ .vdisplay = 272,
+ .vsync_start = 272 + 8,
+ .vsync_end = 272 + 8 + 5,
+ .vtotal = 272 + 8 + 5 + 3,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc olimex_lcd_olinuxino_43ts = {
+ .modes = &olimex_lcd_olinuxino_43ts_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 105,
+ .height = 67,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
+/*
+ * 800x480 CVT. The panel appears to be quite accepting, at least as far as
+ * pixel clocks, but this is the timing that was being used in the Adafruit
+ * installation instructions.
+ */
+static const struct drm_display_mode ontat_yx700wv03_mode = {
+ .clock = 29500,
+ .hdisplay = 800,
+ .hsync_start = 824,
+ .hsync_end = 896,
+ .htotal = 992,
+ .vdisplay = 480,
+ .vsync_start = 483,
+ .vsync_end = 493,
+ .vtotal = 500,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+/*
+ * Specification at:
+ * https://www.adafruit.com/images/product-files/2406/c3163.pdf
+ */
+static const struct panel_desc ontat_yx700wv03 = {
+ .modes = &ontat_yx700wv03_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 83,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
.clock = 25000,
.hdisplay = 480,
@@ -1201,6 +1293,51 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode tpk_f07a_0102_mode = {
+ .clock = 33260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 128,
+ .htotal = 800 + 40 + 128 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 10,
+ .vsync_end = 480 + 10 + 2,
+ .vtotal = 480 + 10 + 2 + 33,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc tpk_f07a_0102 = {
+ .modes = &tpk_f07a_0102_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct drm_display_mode tpk_f10a_0102_mode = {
+ .clock = 45000,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 176,
+ .hsync_end = 1024 + 176 + 5,
+ .htotal = 1024 + 176 + 5 + 88,
+ .vdisplay = 600,
+ .vsync_start = 600 + 20,
+ .vsync_end = 600 + 20 + 5,
+ .vtotal = 600 + 20 + 5 + 25,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc tpk_f10a_0102 = {
+ .modes = &tpk_f10a_0102_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 223,
+ .height = 125,
+ },
+};
+
static const struct display_timing urt_umsh_8596md_timing = {
.pixelclock = { 33260000, 33260000, 33260000 },
.hactive = { 800, 800, 800 },
@@ -1296,6 +1433,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,at043tn24",
.data = &innolux_at043tn24,
}, {
+ .compatible = "innolux,at070tn92",
+ .data = &innolux_at070tn92,
+ }, {
.compatible ="innolux,g121i1-l01",
.data = &innolux_g121i1_l01
}, {
@@ -1329,6 +1469,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "okaya,rs800480t-7x0gp",
.data = &okaya_rs800480t_7x0gp,
}, {
+ .compatible = "olimex,lcd-olinuxino-43-ts",
+ .data = &olimex_lcd_olinuxino_43ts,
+ }, {
+ .compatible = "ontat,yx700wv03",
+ .data = &ontat_yx700wv03,
+ }, {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
@@ -1344,6 +1490,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ .compatible = "tpk,f07a-0102",
+ .data = &tpk_f07a_0102,
+ }, {
+ .compatible = "tpk,f10a-0102",
+ .data = &tpk_f10a_0102,
+ }, {
.compatible = "urt,umsh-8596md-t",
.data = &urt_umsh_8596md_parallel,
}, {
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index fdc1833b1..b5d4b4136 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -624,7 +624,7 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
if (stall)
mutex_unlock(&qdev->surf_evict_mutex);
- ret = ttm_bo_wait(&surf->tbo, true, true, !stall);
+ ret = ttm_bo_wait(&surf->tbo, true, !stall);
if (stall)
mutex_lock(&qdev->surf_evict_mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 030409a3e..8b5d54385 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -318,7 +318,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
if (!handle)
return qxl_hide_cursor(qdev);
- obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("cannot find cursor object\n");
return -ENOENT;
@@ -465,7 +465,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
.page_flip = qxl_crtc_page_flip,
};
-static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
+void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
@@ -527,12 +527,13 @@ int
qxl_framebuffer_init(struct drm_device *dev,
struct qxl_framebuffer *qfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+ struct drm_gem_object *obj,
+ const struct drm_framebuffer_funcs *funcs)
{
int ret;
qfb->obj = obj;
- ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs);
+ ret = drm_framebuffer_init(dev, &qfb->base, funcs);
if (ret) {
qfb->obj = NULL;
return ret;
@@ -993,13 +994,15 @@ qxl_user_framebuffer_create(struct drm_device *dev,
struct qxl_framebuffer *qxl_fb;
int ret;
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
+ if (!obj)
+ return NULL;
qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL);
if (qxl_fb == NULL)
return NULL;
- ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj);
+ ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
if (ret) {
kfree(qxl_fb);
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 7307b07fe..dc9df5fe5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -272,10 +272,8 @@ static struct drm_driver qxl_driver = {
static int __init qxl_init(void)
{
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && qxl_modeset == -1)
return -EINVAL;
-#endif
if (qxl_modeset == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3f3897eb4..3ad6604b3 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -324,8 +324,6 @@ struct qxl_device {
struct workqueue_struct *gc_queue;
struct work_struct gc_work;
- struct work_struct fb_work;
-
struct drm_property *hotplug_mode_update_property;
int monitors_config_width;
int monitors_config_height;
@@ -389,11 +387,13 @@ int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state);
/* qxl_display.c */
+void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb);
int
qxl_framebuffer_init(struct drm_device *dev,
struct qxl_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
+ struct drm_gem_object *obj,
+ const struct drm_framebuffer_funcs *funcs);
void qxl_display_read_client_monitors_config(struct qxl_device *qdev);
void qxl_send_monitors_config(struct qxl_device *qdev);
int qxl_create_monitors_object(struct qxl_device *qdev);
@@ -553,7 +553,6 @@ int qxl_irq_init(struct qxl_device *qdev);
irqreturn_t qxl_irq_handler(int irq, void *arg);
/* qxl_fb.c */
-int qxl_fb_init(struct qxl_device *qdev);
bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
int qxl_debugfs_add_files(struct qxl_device *qdev,
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index d34bb4130..5e65d5d2d 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -76,7 +76,7 @@ int qxl_mode_dumb_mmap(struct drm_file *file_priv,
struct qxl_bo *qobj;
BUG_ON(!offset_p);
- gobj = drm_gem_object_lookup(dev, file_priv, handle);
+ gobj = drm_gem_object_lookup(file_priv, handle);
if (gobj == NULL)
return -ENOENT;
qobj = gem_to_qxl_bo(gobj);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 7136e521e..5ea57f632 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -46,15 +46,6 @@ struct qxl_fbdev {
struct list_head delayed_ops;
void *shadow;
int size;
-
- /* dirty memory logging */
- struct {
- spinlock_t lock;
- unsigned x1;
- unsigned y1;
- unsigned x2;
- unsigned y2;
- } dirty;
};
static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
@@ -82,169 +73,18 @@ static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image,
}
}
-static void qxl_fb_dirty_flush(struct fb_info *info)
-{
- struct qxl_fbdev *qfbdev = info->par;
- struct qxl_device *qdev = qfbdev->qdev;
- struct qxl_fb_image qxl_fb_image;
- struct fb_image *image = &qxl_fb_image.fb_image;
- unsigned long flags;
- u32 x1, x2, y1, y2;
-
- /* TODO: hard coding 32 bpp */
- int stride = qfbdev->qfb.base.pitches[0];
-
- spin_lock_irqsave(&qfbdev->dirty.lock, flags);
-
- x1 = qfbdev->dirty.x1;
- x2 = qfbdev->dirty.x2;
- y1 = qfbdev->dirty.y1;
- y2 = qfbdev->dirty.y2;
- qfbdev->dirty.x1 = 0;
- qfbdev->dirty.x2 = 0;
- qfbdev->dirty.y1 = 0;
- qfbdev->dirty.y2 = 0;
-
- spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
-
- /*
- * we are using a shadow draw buffer, at qdev->surface0_shadow
- */
- qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2);
- image->dx = x1;
- image->dy = y1;
- image->width = x2 - x1 + 1;
- image->height = y2 - y1 + 1;
- image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
- warnings */
- image->bg_color = 0;
- image->depth = 32; /* TODO: take from somewhere? */
- image->cmap.start = 0;
- image->cmap.len = 0;
- image->cmap.red = NULL;
- image->cmap.green = NULL;
- image->cmap.blue = NULL;
- image->cmap.transp = NULL;
- image->data = qfbdev->shadow + (x1 * 4) + (stride * y1);
-
- qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
- qxl_draw_opaque_fb(&qxl_fb_image, stride);
-}
-
-static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
- int x, int y, int width, int height)
-{
- struct qxl_device *qdev = qfbdev->qdev;
- unsigned long flags;
- int x2, y2;
-
- x2 = x + width - 1;
- y2 = y + height - 1;
-
- spin_lock_irqsave(&qfbdev->dirty.lock, flags);
-
- if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
- (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
- if (qfbdev->dirty.y1 < y)
- y = qfbdev->dirty.y1;
- if (qfbdev->dirty.y2 > y2)
- y2 = qfbdev->dirty.y2;
- if (qfbdev->dirty.x1 < x)
- x = qfbdev->dirty.x1;
- if (qfbdev->dirty.x2 > x2)
- x2 = qfbdev->dirty.x2;
- }
-
- qfbdev->dirty.x1 = x;
- qfbdev->dirty.x2 = x2;
- qfbdev->dirty.y1 = y;
- qfbdev->dirty.y2 = y2;
-
- spin_unlock_irqrestore(&qfbdev->dirty.lock, flags);
-
- schedule_work(&qdev->fb_work);
-}
-
-static void qxl_deferred_io(struct fb_info *info,
- struct list_head *pagelist)
-{
- struct qxl_fbdev *qfbdev = info->par;
- unsigned long start, end, min, max;
- struct page *page;
- int y1, y2;
-
- min = ULONG_MAX;
- max = 0;
- list_for_each_entry(page, pagelist, lru) {
- start = page->index << PAGE_SHIFT;
- end = start + PAGE_SIZE - 1;
- min = min(min, start);
- max = max(max, end);
- }
-
- if (min < max) {
- y1 = min / info->fix.line_length;
- y2 = (max / info->fix.line_length) + 1;
- qxl_dirty_update(qfbdev, 0, y1, info->var.xres, y2 - y1);
- }
-};
-
static struct fb_deferred_io qxl_defio = {
.delay = QXL_DIRTY_DELAY,
- .deferred_io = qxl_deferred_io,
+ .deferred_io = drm_fb_helper_deferred_io,
};
-static void qxl_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct qxl_fbdev *qfbdev = info->par;
-
- drm_fb_helper_sys_fillrect(info, rect);
- qxl_dirty_update(qfbdev, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void qxl_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct qxl_fbdev *qfbdev = info->par;
-
- drm_fb_helper_sys_copyarea(info, area);
- qxl_dirty_update(qfbdev, area->dx, area->dy, area->width,
- area->height);
-}
-
-static void qxl_fb_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct qxl_fbdev *qfbdev = info->par;
-
- drm_fb_helper_sys_imageblit(info, image);
- qxl_dirty_update(qfbdev, image->dx, image->dy, image->width,
- image->height);
-}
-
-static void qxl_fb_work(struct work_struct *work)
-{
- struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
- struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
-
- qxl_fb_dirty_flush(qfbdev->helper.fbdev);
-}
-
-int qxl_fb_init(struct qxl_device *qdev)
-{
- INIT_WORK(&qdev->fb_work, qxl_fb_work);
- return 0;
-}
-
static struct fb_ops qxlfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */
- .fb_fillrect = qxl_fb_fillrect,
- .fb_copyarea = qxl_fb_copyarea,
- .fb_imageblit = qxl_fb_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -338,6 +178,57 @@ out_unref:
return ret;
}
+/*
+ * FIXME
+ * It should not be necessary to have a special dirty() callback for fbdev.
+ */
+static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips)
+{
+ struct qxl_device *qdev = fb->dev->dev_private;
+ struct fb_info *info = qdev->fbdev_info;
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_fb_image qxl_fb_image;
+ struct fb_image *image = &qxl_fb_image.fb_image;
+
+ /* TODO: hard coding 32 bpp */
+ int stride = qfbdev->qfb.base.pitches[0];
+
+ /*
+ * we are using a shadow draw buffer, at qdev->surface0_shadow
+ */
+ qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", clips->x1, clips->x2,
+ clips->y1, clips->y2);
+ image->dx = clips->x1;
+ image->dy = clips->y1;
+ image->width = clips->x2 - clips->x1;
+ image->height = clips->y2 - clips->y1;
+ image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized
+ warnings */
+ image->bg_color = 0;
+ image->depth = 32; /* TODO: take from somewhere? */
+ image->cmap.start = 0;
+ image->cmap.len = 0;
+ image->cmap.red = NULL;
+ image->cmap.green = NULL;
+ image->cmap.blue = NULL;
+ image->cmap.transp = NULL;
+ image->data = qfbdev->shadow + (clips->x1 * 4) + (stride * clips->y1);
+
+ qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL);
+ qxl_draw_opaque_fb(&qxl_fb_image, stride);
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs qxlfb_fb_funcs = {
+ .destroy = qxl_user_framebuffer_destroy,
+ .dirty = qxlfb_framebuffer_dirty,
+};
+
static int qxlfb_create(struct qxl_fbdev *qfbdev,
struct drm_fb_helper_surface_size *sizes)
{
@@ -360,6 +251,9 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
+ if (ret < 0)
+ return ret;
+
qbo = gem_to_qxl_bo(gobj);
QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
mode_cmd.height, mode_cmd.pitches[0]);
@@ -383,7 +277,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
info->par = qfbdev;
- qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj);
+ qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
+ &qxlfb_fb_funcs);
fb = &qfbdev->qfb.base;
@@ -443,11 +338,11 @@ out_unref:
}
}
if (fb && ret) {
- drm_gem_object_unreference(gobj);
+ drm_gem_object_unreference_unlocked(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
- drm_gem_object_unreference(gobj);
+ drm_gem_object_unreference_unlocked(gobj);
return ret;
}
@@ -504,7 +399,6 @@ int qxl_fbdev_init(struct qxl_device *qdev)
qfbdev->qdev = qdev;
qdev->mode_info.qfbdev = qfbdev;
spin_lock_init(&qfbdev->delayed_ops_lock);
- spin_lock_init(&qfbdev->dirty.lock);
INIT_LIST_HEAD(&qfbdev->delayed_ops);
drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 7c2e78201..5a4c8c492 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -107,15 +107,14 @@ apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
}
/* return holding the reference to this object */
-static int qxlhw_handle_to_bo(struct qxl_device *qdev,
- struct drm_file *file_priv, uint64_t handle,
+static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
struct qxl_release *release, struct qxl_bo **qbo_p)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
int ret;
- gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
+ gobj = drm_gem_object_lookup(file_priv, handle);
if (!gobj)
return -EINVAL;
@@ -221,7 +220,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
reloc_info[i].type = reloc.reloc_type;
if (reloc.dst_handle) {
- ret = qxlhw_handle_to_bo(qdev, file_priv, reloc.dst_handle, release,
+ ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release,
&reloc_info[i].dst_bo);
if (ret)
goto out_free_bos;
@@ -234,7 +233,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
/* reserve and validate the reloc dst bo */
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) {
- ret = qxlhw_handle_to_bo(qdev, file_priv, reloc.src_handle, release,
+ ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release,
&reloc_info[i].src_bo);
if (ret)
goto out_free_bos;
@@ -314,7 +313,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
update_area->top >= update_area->bottom)
return -EINVAL;
- gobj = drm_gem_object_lookup(dev, file, update_area->handle);
+ gobj = drm_gem_object_lookup(file, update_area->handle);
if (gobj == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index b2977a181..2319800b7 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -261,10 +261,6 @@ static int qxl_device_init(struct qxl_device *qdev,
qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
INIT_WORK(&qdev->gc_work, qxl_gc_work);
- r = qxl_fb_init(qdev);
- if (r)
- return r;
-
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 37af1bc0d..4d8311373 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -79,7 +79,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ r = ttm_bo_wait(&bo->tbo, true, no_wait);
ttm_bo_unreserve(&bo->tbo);
return r;
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 953412766..0738d74c8 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -384,6 +384,8 @@ static struct ttm_bo_driver qxl_bo_driver = {
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
.move_notify = &qxl_bo_move_notify,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int qxl_ttm_init(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 532127c55..259cd6e6d 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
/* use frac fb div on RS780/RS880 */
- if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ && !radeon_crtc->ss_enabled)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (radeon_crtc->ss.refdiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
- if (ASIC_IS_AVIVO(rdev))
+ if (rdev->family >= CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
@@ -1375,6 +1376,11 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
break;
}
+ /* Make sure surface address is updated at vertical blank rather than
+ * horizontal blank
+ */
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, 0);
+
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(fb_location));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
@@ -1427,12 +1433,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* pageflip setup */
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
- tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
- WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
-
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
@@ -1466,7 +1466,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
- u32 tmp, viewport_w, viewport_h;
+ u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
@@ -1581,6 +1581,11 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
else
WREG32(AVIVO_D2VGA_CONTROL, 0);
+ /* Make sure surface address is update at vertical blank rather than
+ * horizontal blank
+ */
+ WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, 0);
+
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id) {
WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location));
@@ -1627,12 +1632,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
- /* pageflip setup */
- /* make sure flip is at vb rather than hb */
- tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
- tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
- WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
-
/* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 9c897d1ed..807756e4a 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -5182,15 +5182,21 @@ static void cik_gpu_pci_config_reset(struct radeon_device *rdev)
* cik_asic_reset - soft reset GPU
*
* @rdev: radeon_device pointer
+ * @hard: force hard reset
*
* Look up which blocks are hung and attempt
* to reset them.
* Returns 0 for success.
*/
-int cik_asic_reset(struct radeon_device *rdev)
+int cik_asic_reset(struct radeon_device *rdev, bool hard)
{
u32 reset_mask;
+ if (hard) {
+ cik_gpu_pci_config_reset(rdev);
+ return 0;
+ }
+
reset_mask = cik_gpu_check_soft_reset(rdev);
if (reset_mask)
@@ -8058,6 +8064,164 @@ restart_ih:
/*
* startup/shutdown callbacks
*/
+static void cik_uvd_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = radeon_uvd_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+ /*
+ * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+ * to early fails cik_uvd_start() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable uvd here.
+ */
+ rdev->has_uvd = 0;
+ return;
+ }
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void cik_uvd_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = radeon_uvd_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+ goto error;
+ }
+ r = uvd_v4_2_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD 4.2 resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void cik_uvd_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+ return;
+ }
+ r = uvd_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+ return;
+ }
+}
+
+static void cik_vce_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_vce)
+ return;
+
+ r = radeon_vce_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+ /*
+ * At this point rdev->vce.vcpu_bo is NULL which trickles down
+ * to early fails cik_vce_start() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable vce here.
+ */
+ rdev->has_vce = 0;
+ return;
+ }
+ rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+ rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void cik_vce_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_vce)
+ return;
+
+ r = radeon_vce_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+ goto error;
+ }
+ r = vce_v2_0_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+ rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void cik_vce_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+ return;
+ }
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+ return;
+ }
+ r = vce_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+ return;
+ }
+}
+
/**
* cik_startup - program the asic to a functional state
*
@@ -8160,34 +8324,8 @@ static int cik_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_uvd_resume(rdev);
- if (!r) {
- r = uvd_v4_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
- }
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
- r = radeon_vce_resume(rdev);
- if (!r) {
- r = vce_v2_0_resume(rdev);
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE1_INDEX);
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE2_INDEX);
- }
- if (r) {
- dev_err(rdev->dev, "VCE init error (%d).\n", r);
- rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
- rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
- }
+ cik_uvd_start(rdev);
+ cik_vce_start(rdev);
/* Enable IRQ */
if (!rdev->irq.installed) {
@@ -8263,32 +8401,8 @@ static int cik_startup(struct radeon_device *rdev)
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
-
- r = -ENOENT;
-
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- VCE_CMD_NO_OP);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- VCE_CMD_NO_OP);
-
- if (!r)
- r = vce_v1_0_init(rdev);
- else if (r != -ENOENT)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ cik_uvd_resume(rdev);
+ cik_vce_resume(rdev);
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -8364,9 +8478,12 @@ int cik_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
cik_sdma_enable(rdev, false);
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
- radeon_vce_suspend(rdev);
+ if (rdev->has_uvd) {
+ uvd_v1_0_fini(rdev);
+ radeon_uvd_suspend(rdev);
+ }
+ if (rdev->has_vce)
+ radeon_vce_suspend(rdev);
cik_fini_pg(rdev);
cik_fini_cg(rdev);
cik_irq_suspend(rdev);
@@ -8492,23 +8609,8 @@ int cik_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 256 * 1024);
- r = radeon_uvd_init(rdev);
- if (!r) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
-
- r = radeon_vce_init(rdev);
- if (!r) {
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
+ cik_uvd_init(rdev);
+ cik_vce_init(rdev);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 391ff9d5d..cead2284f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2071,6 +2071,7 @@
#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
#define UVD_LMI_EXT40_ADDR 0xf498
+#define UVD_GP_SCRATCH4 0xf4e0
#define UVD_LMI_ADDR_EXT 0xf594
#define UVD_VCPU_CACHE_OFFSET0 0xf608
#define UVD_VCPU_CACHE_SIZE0 0xf60c
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 34f7a29d9..db275b7ed 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1407,11 +1407,14 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
* Triggers the actual pageflip by updating the primary
* surface base address (evergreen+).
*/
-void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base,
+ bool async)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* update the scanout addresses */
+ WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+ async ? EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
upper_32_bits(crtc_base));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -1864,7 +1867,8 @@ void evergreen_hpd_init(struct radeon_device *rdev)
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
- enabled |= 1 << radeon_connector->hpd.hpd;
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ enabled |= 1 << radeon_connector->hpd.hpd;
}
radeon_irq_kms_enable_hpd(rdev, enabled);
}
@@ -1907,7 +1911,8 @@ void evergreen_hpd_fini(struct radeon_device *rdev)
default:
break;
}
- disabled |= 1 << radeon_connector->hpd.hpd;
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ disabled |= 1 << radeon_connector->hpd.hpd;
}
radeon_irq_kms_disable_hpd(rdev, disabled);
}
@@ -4136,10 +4141,15 @@ void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
}
}
-int evergreen_asic_reset(struct radeon_device *rdev)
+int evergreen_asic_reset(struct radeon_device *rdev, bool hard)
{
u32 reset_mask;
+ if (hard) {
+ evergreen_gpu_pci_config_reset(rdev);
+ return 0;
+ }
+
reset_mask = evergreen_gpu_check_soft_reset(rdev);
if (reset_mask)
@@ -5515,6 +5525,73 @@ restart_ih:
return IRQ_HANDLED;
}
+static void evergreen_uvd_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = radeon_uvd_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+ /*
+ * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+ * to early fails uvd_v2_2_resume() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable uvd here.
+ */
+ rdev->has_uvd = 0;
+ return;
+ }
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void evergreen_uvd_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = uvd_v2_2_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void evergreen_uvd_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+ return;
+ }
+ r = uvd_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+ return;
+ }
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -5579,16 +5656,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
- r = uvd_v2_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
-
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+ evergreen_uvd_start(rdev);
/* Enable IRQ */
if (!rdev->irq.installed) {
@@ -5627,16 +5695,7 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
-
- if (r)
- DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
- }
+ evergreen_uvd_resume(rdev);
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -5691,8 +5750,10 @@ int evergreen_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
radeon_audio_fini(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
+ if (rdev->has_uvd) {
+ uvd_v1_0_fini(rdev);
+ radeon_uvd_suspend(rdev);
+ }
r700_cp_stop(rdev);
r600_dma_stop(rdev);
evergreen_irq_suspend(rdev);
@@ -5793,12 +5854,7 @@ int evergreen_init(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
- r = radeon_uvd_init(rdev);
- if (!r) {
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
- 4096);
- }
+ evergreen_uvd_init(rdev);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 9e93205eb..0d3f744de 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2608,6 +2608,51 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
}
break;
+ case PACKET3_SET_APPEND_CNT:
+ {
+ uint32_t areg;
+ uint32_t allowed_reg_base;
+ uint32_t source_sel;
+ if (pkt->count != 2) {
+ DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+ return -EINVAL;
+ }
+
+ allowed_reg_base = GDS_APPEND_COUNT_0;
+ allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
+ allowed_reg_base >>= 2;
+
+ areg = idx_value >> 16;
+ if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
+ dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
+ areg, idx);
+ return -EINVAL;
+ }
+
+ source_sel = G_PACKET3_SET_APPEND_CNT_SRC_SELECT(idx_value);
+ if (source_sel == PACKET3_SAC_SRC_SEL_MEM) {
+ uint64_t offset;
+ uint32_t swap;
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n");
+ return -EINVAL;
+ }
+ offset = radeon_get_ib_value(p, idx + 1);
+ swap = offset & 0x3;
+ offset &= ~0x3;
+
+ offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
+
+ offset += reloc->gpu_offset;
+ ib[idx+1] = (offset & 0xfffffffc) | swap;
+ ib[idx+2] = upper_32_bits(offset) & 0xff;
+ } else {
+ DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n");
+ return -EINVAL;
+ }
+ break;
+ }
case PACKET3_NOP:
break;
default:
@@ -3438,6 +3483,27 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
}
}
break;
+ case PACKET3_SET_APPEND_CNT: {
+ uint32_t areg;
+ uint32_t allowed_reg_base;
+
+ if (pkt->count != 2) {
+ DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+ return -EINVAL;
+ }
+
+ allowed_reg_base = GDS_APPEND_COUNT_0;
+ allowed_reg_base -= PACKET3_SET_CONTEXT_REG_START;
+ allowed_reg_base >>= 2;
+
+ areg = idx_value >> 16;
+ if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
+ DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n",
+ areg, idx);
+ return -EINVAL;
+ }
+ break;
+ }
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 13b6029d6..0b174e14e 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1689,6 +1689,36 @@
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
#define PACKET3_SET_RESOURCE_INDIRECT 0x74
#define PACKET3_SET_APPEND_CNT 0x75
+/* SET_APPEND_CNT - documentation
+ * 1. header
+ * 2. COMMAND
+ * 1:0 - SOURCE SEL
+ * 15:2 - Reserved
+ * 31:16 - WR_REG_OFFSET - context register to write source data to.
+ * (one of R_02872C_GDS_APPEND_COUNT_0-11)
+ * 3. CONTROL
+ * (for source == mem)
+ * 31:2 SRC_ADDRESS_LO
+ * 0:1 SWAP
+ * (for source == GDS)
+ * 31:0 GDS offset
+ * (for source == DATA)
+ * 31:0 DATA
+ * (for source == REG)
+ * 31:0 REG
+ * 4. SRC_ADDRESS_HI[7:0]
+ * kernel driver 2.44 only supports SRC == MEM.
+ */
+#define PACKET3_SET_APPEND_CNT_SRC_SELECT(x) ((x) << 0)
+#define G_PACKET3_SET_APPEND_CNT_SRC_SELECT(x) ((x & 0x3) >> 0)
+/* source is from the data in CONTROL */
+#define PACKET3_SAC_SRC_SEL_DATA 0x0
+/* source is from register */
+#define PACKET3_SAC_SRC_SEL_REG 0x1
+/* source is from GDS offset in CONTROL */
+#define PACKET3_SAC_SRC_SEL_GDS 0x2
+/* source is from memory address */
+#define PACKET3_SAC_SRC_SEL_MEM 0x3
#define SQ_RESOURCE_CONSTANT_WORD7_0 0x3001c
#define S__SQ_CONSTANT_TYPE(x) (((x) & 3) << 30)
@@ -2005,6 +2035,19 @@
#define GDS_ADDR_BASE 0x28720
+#define GDS_APPEND_COUNT_0 0x2872C
+#define GDS_APPEND_COUNT_1 0x28730
+#define GDS_APPEND_COUNT_2 0x28734
+#define GDS_APPEND_COUNT_3 0x28738
+#define GDS_APPEND_COUNT_4 0x2873C
+#define GDS_APPEND_COUNT_5 0x28740
+#define GDS_APPEND_COUNT_6 0x28744
+#define GDS_APPEND_COUNT_7 0x28748
+#define GDS_APPEND_COUNT_8 0x2874c
+#define GDS_APPEND_COUNT_9 0x28750
+#define GDS_APPEND_COUNT_10 0x28754
+#define GDS_APPEND_COUNT_11 0x28758
+
#define CB_IMMED0_BASE 0x28b9c
#define CB_IMMED1_BASE 0x28ba0
#define CB_IMMED2_BASE 0x28ba4
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index d0240743a..a7e978677 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2164,7 +2164,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index d0eb2b3a2..05920c4c9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1939,10 +1939,15 @@ static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
evergreen_print_gpu_status_regs(rdev);
}
-int cayman_asic_reset(struct radeon_device *rdev)
+int cayman_asic_reset(struct radeon_device *rdev, bool hard)
{
u32 reset_mask;
+ if (hard) {
+ evergreen_gpu_pci_config_reset(rdev);
+ return 0;
+ }
+
reset_mask = cayman_gpu_check_soft_reset(rdev);
if (reset_mask)
@@ -1982,6 +1987,160 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
+static void cayman_uvd_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = radeon_uvd_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+ /*
+ * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+ * to early fails uvd_v2_2_resume() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable uvd here.
+ */
+ rdev->has_uvd = 0;
+ return;
+ }
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void cayman_uvd_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = uvd_v2_2_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void cayman_uvd_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+ return;
+ }
+ r = uvd_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+ return;
+ }
+}
+
+static void cayman_vce_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Only set for CHIP_ARUBA */
+ if (!rdev->has_vce)
+ return;
+
+ r = radeon_vce_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+ /*
+ * At this point rdev->vce.vcpu_bo is NULL which trickles down
+ * to early fails cayman_vce_start() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable vce here.
+ */
+ rdev->has_vce = 0;
+ return;
+ }
+ rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+ rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void cayman_vce_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_vce)
+ return;
+
+ r = radeon_vce_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+ goto error;
+ }
+ r = vce_v1_0_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+ rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void cayman_vce_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+ return;
+ }
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+ return;
+ }
+ r = vce_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+ return;
+ }
+}
+
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -2036,34 +2195,8 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = uvd_v2_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
- if (rdev->family == CHIP_ARUBA) {
- r = radeon_vce_resume(rdev);
- if (!r)
- r = vce_v1_0_resume(rdev);
-
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE1_INDEX);
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE2_INDEX);
-
- if (r) {
- dev_err(rdev->dev, "VCE init error (%d).\n", r);
- rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
- rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
- }
- }
+ cayman_uvd_start(rdev);
+ cayman_vce_start(rdev);
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
if (r) {
@@ -2132,30 +2265,8 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
-
- if (rdev->family == CHIP_ARUBA) {
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
-
- if (!r)
- r = vce_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
- }
+ cayman_uvd_resume(rdev);
+ cayman_vce_resume(rdev);
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -2210,8 +2321,10 @@ int cayman_suspend(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
+ if (rdev->has_uvd) {
+ uvd_v1_0_fini(rdev);
+ radeon_uvd_suspend(rdev);
+ }
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
@@ -2305,25 +2418,8 @@ int cayman_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
- r = radeon_uvd_init(rdev);
- if (!r) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
-
- if (rdev->family == CHIP_ARUBA) {
- r = radeon_vce_init(rdev);
- if (!r) {
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
- }
+ cayman_uvd_init(rdev);
+ cayman_vce_init(rdev);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2378,7 +2474,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_irq_kms_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
- if (rdev->family == CHIP_ARUBA)
+ if (rdev->has_vce)
radeon_vce_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 41336fa83..2b1a35d47 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -147,7 +147,7 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
* bit to go high, when it does, we release the lock, and allow the
* double buffered update to take place.
*/
-void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
@@ -586,7 +586,8 @@ void r100_hpd_init(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- enable |= 1 << radeon_connector->hpd.hpd;
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
radeon_irq_kms_enable_hpd(rdev, enable);
@@ -608,7 +609,8 @@ void r100_hpd_fini(struct radeon_device *rdev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- disable |= 1 << radeon_connector->hpd.hpd;
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ disable |= 1 << radeon_connector->hpd.hpd;
}
radeon_irq_kms_disable_hpd(rdev, disable);
}
@@ -2549,7 +2551,7 @@ void r100_bm_disable(struct radeon_device *rdev)
mdelay(1);
}
-int r100_asic_reset(struct radeon_device *rdev)
+int r100_asic_reset(struct radeon_device *rdev, bool hard)
{
struct r100_mc_save save;
u32 status, tmp;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 718b12b03..7e417d8dc 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -410,7 +410,7 @@ static void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-int r300_asic_reset(struct radeon_device *rdev)
+int r300_asic_reset(struct radeon_device *rdev, bool hard)
{
struct r100_mc_save save;
u32 status, tmp;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 78df3bd52..1a4002ed0 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -954,7 +954,8 @@ void r600_hpd_init(struct radeon_device *rdev)
break;
}
}
- enable |= 1 << radeon_connector->hpd.hpd;
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
radeon_irq_kms_enable_hpd(rdev, enable);
@@ -1007,7 +1008,8 @@ void r600_hpd_fini(struct radeon_device *rdev)
break;
}
}
- disable |= 1 << radeon_connector->hpd.hpd;
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ disable |= 1 << radeon_connector->hpd.hpd;
}
radeon_irq_kms_disable_hpd(rdev, disable);
}
@@ -1823,10 +1825,15 @@ static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
}
}
-int r600_asic_reset(struct radeon_device *rdev)
+int r600_asic_reset(struct radeon_device *rdev, bool hard)
{
u32 reset_mask;
+ if (hard) {
+ r600_gpu_pci_config_reset(rdev);
+ return 0;
+ }
+
reset_mask = r600_gpu_check_soft_reset(rdev);
if (reset_mask)
@@ -2987,6 +2994,73 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
/* FIXME: implement */
}
+static void r600_uvd_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = radeon_uvd_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+ /*
+ * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+ * to early fails uvd_v1_0_resume() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable uvd here.
+ */
+ rdev->has_uvd = 0;
+ return;
+ }
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void r600_uvd_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = uvd_v1_0_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void r600_uvd_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+ return;
+ }
+ r = uvd_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+ return;
+ }
+}
+
static int r600_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -3022,17 +3096,7 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
- if (rdev->has_uvd) {
- r = uvd_v1_0_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
- }
- }
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
- }
+ r600_uvd_start(rdev);
/* Enable IRQ */
if (!rdev->irq.installed) {
@@ -3062,17 +3126,7 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- if (rdev->has_uvd) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
- }
+ r600_uvd_resume(rdev);
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -3216,13 +3270,7 @@ int r600_init(struct radeon_device *rdev)
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
- if (rdev->has_uvd) {
- r = radeon_uvd_init(rdev);
- if (!r) {
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
- }
- }
+ r600_uvd_init(rdev);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 007be29a0..80b24a495 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -113,6 +113,8 @@ extern int radeon_bapm;
extern int radeon_backlight;
extern int radeon_auxch;
extern int radeon_mst;
+extern int radeon_uvd;
+extern int radeon_vce;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -744,6 +746,7 @@ struct radeon_flip_work {
struct drm_pending_vblank_event *event;
struct radeon_bo *old_rbo;
struct fence *fence;
+ bool async;
};
struct r500_irq_stat_regs {
@@ -1671,14 +1674,18 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
/*
* UVD
*/
-#define RADEON_MAX_UVD_HANDLES 10
-#define RADEON_UVD_STACK_SIZE (1024*1024)
-#define RADEON_UVD_HEAP_SIZE (1024*1024)
+#define RADEON_DEFAULT_UVD_HANDLES 10
+#define RADEON_MAX_UVD_HANDLES 30
+#define RADEON_UVD_STACK_SIZE (200*1024)
+#define RADEON_UVD_HEAP_SIZE (256*1024)
+#define RADEON_UVD_SESSION_SIZE (50*1024)
struct radeon_uvd {
+ bool fw_header_present;
struct radeon_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
+ unsigned max_handles;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
unsigned img_size[RADEON_MAX_UVD_HANDLES];
@@ -1852,7 +1859,7 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- int (*asic_reset)(struct radeon_device *rdev);
+ int (*asic_reset)(struct radeon_device *rdev, bool hard);
/* Flush the HDP cache via MMIO */
void (*mmio_hdp_flush)(struct radeon_device *rdev);
/* check if 3D engine is idle */
@@ -1998,7 +2005,7 @@ struct radeon_asic {
} dpm;
/* pageflipping */
struct {
- void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base, bool async);
bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
} pflip;
};
@@ -2394,7 +2401,6 @@ struct radeon_device {
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool shutdown;
- bool suspend;
bool need_dma32;
bool accel_working;
bool fastfb_working; /* IGP feature*/
@@ -2423,6 +2429,7 @@ struct radeon_device {
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
bool has_uvd;
+ bool has_vce;
struct r600_audio audio; /* audio stuff */
struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
@@ -2717,7 +2724,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
+#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev), false)
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
@@ -2775,7 +2782,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
-#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
+#define radeon_page_flip(rdev, crtc, base, async) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base), (async))
#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
@@ -2832,7 +2839,8 @@ extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+extern int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+ bool fbcon, bool freeze);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
extern void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 7d5a36dd5..bc5121d1a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2324,6 +2324,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->num_crtc = 2;
rdev->has_uvd = false;
+ rdev->has_vce = false;
switch (rdev->family) {
case CHIP_R100:
@@ -2454,6 +2455,7 @@ int radeon_asic_init(struct radeon_device *rdev)
/* set num crtcs */
rdev->num_crtc = 4;
rdev->has_uvd = true;
+ rdev->has_vce = true;
rdev->cg_flags =
RADEON_CG_SUPPORT_VCE_MGCG;
break;
@@ -2470,10 +2472,13 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->num_crtc = 2;
else
rdev->num_crtc = 6;
- if (rdev->family == CHIP_HAINAN)
+ if (rdev->family == CHIP_HAINAN) {
rdev->has_uvd = false;
- else
+ rdev->has_vce = false;
+ } else {
rdev->has_uvd = true;
+ rdev->has_vce = true;
+ }
switch (rdev->family) {
case CHIP_TAHITI:
rdev->cg_flags =
@@ -2578,6 +2583,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &ci_asic;
rdev->num_crtc = 6;
rdev->has_uvd = true;
+ rdev->has_vce = true;
if (rdev->family == CHIP_BONAIRE) {
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
@@ -2678,6 +2684,7 @@ int radeon_asic_init(struct radeon_device *rdev)
RADEON_PG_SUPPORT_SAMU;*/
}
rdev->has_uvd = true;
+ rdev->has_vce = true;
break;
default:
/* FIXME: not supported yet */
@@ -2689,6 +2696,11 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic->pm.set_memory_clock = NULL;
}
+ if (!radeon_uvd)
+ rdev->has_uvd = false;
+ if (!radeon_vce)
+ rdev->has_vce = false;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e0aa33262..e3f036c20 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -64,7 +64,7 @@ int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int r100_asic_reset(struct radeon_device *rdev);
+int r100_asic_reset(struct radeon_device *rdev, bool hard);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
@@ -138,7 +138,7 @@ extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r100_page_flip(struct radeon_device *rdev, int crtc,
- u64 crtc_base);
+ u64 crtc_base, bool async);
extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
@@ -167,7 +167,7 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern int r300_asic_reset(struct radeon_device *rdev);
+extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -225,7 +225,7 @@ extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
/*
* rs600.
*/
-extern int rs600_asic_reset(struct radeon_device *rdev);
+extern int rs600_asic_reset(struct radeon_device *rdev, bool hard);
extern int rs600_init(struct radeon_device *rdev);
extern void rs600_fini(struct radeon_device *rdev);
extern int rs600_suspend(struct radeon_device *rdev);
@@ -250,7 +250,7 @@ extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
extern void rs600_page_flip(struct radeon_device *rdev, int crtc,
- u64 crtc_base);
+ u64 crtc_base, bool async);
extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
@@ -334,7 +334,7 @@ bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int r600_asic_reset(struct radeon_device *rdev);
+int r600_asic_reset(struct radeon_device *rdev, bool hard);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -464,7 +464,8 @@ void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev);
-void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base,
+ bool async);
bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
@@ -513,7 +514,7 @@ int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int evergreen_asic_reset(struct radeon_device *rdev);
+int evergreen_asic_reset(struct radeon_device *rdev, bool hard);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
void evergreen_hpd_init(struct radeon_device *rdev);
@@ -534,7 +535,7 @@ extern void btc_pm_init_profile(struct radeon_device *rdev);
int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
extern void evergreen_page_flip(struct radeon_device *rdev, int crtc,
- u64 crtc_base);
+ u64 crtc_base, bool async);
extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
@@ -606,7 +607,7 @@ int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
-int cayman_asic_reset(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev, bool hard);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
@@ -712,7 +713,7 @@ int si_suspend(struct radeon_device *rdev);
int si_resume(struct radeon_device *rdev);
bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int si_asic_reset(struct radeon_device *rdev);
+int si_asic_reset(struct radeon_device *rdev, bool hard);
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
@@ -817,7 +818,7 @@ void cik_fini(struct radeon_device *rdev);
int cik_suspend(struct radeon_device *rdev);
int cik_resume(struct radeon_device *rdev);
bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int cik_asic_reset(struct radeon_device *rdev);
+int cik_asic_reset(struct radeon_device *rdev, bool hard);
void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ab39b85e0..510ea371d 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -74,7 +74,6 @@ static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
- struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
struct radeon_cs_buckets buckets;
unsigned i;
@@ -101,7 +100,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
unsigned priority;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
+ gobj = drm_gem_object_lookup(p->filp, r->handle);
if (gobj == NULL) {
DRM_ERROR("gem object lookup failed 0x%x\n",
r->handle);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index afaf346bd..2a10e24b3 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -274,7 +274,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
return -ENOENT;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index cb2986876..21c44b229 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1251,7 +1251,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- radeon_suspend_kms(dev, true, true);
+ radeon_suspend_kms(dev, true, true, false);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1576,7 +1576,8 @@ void radeon_device_fini(struct radeon_device *rdev)
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+ bool fbcon, bool freeze)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
@@ -1651,7 +1652,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
radeon_agp_suspend(rdev);
pci_save_state(dev->pdev);
- if (suspend) {
+ if (freeze && rdev->family >= CHIP_CEDAR) {
+ rdev->asic->asic_reset(rdev, true);
+ pci_restore_state(dev->pdev);
+ } else if (suspend) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index fcc7483d3..6a41b4982 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -377,7 +377,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
/* wakeup userspace */
if (work->event)
- drm_send_vblank_event(rdev->ddev, crtc_id, work->event);
+ drm_crtc_send_vblank_event(&radeon_crtc->base, work->event);
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
@@ -490,7 +490,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
vblank->linedur_ns / 1000, stat, vpos, hpos);
/* do the flip (mmio) */
- radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+ radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base, work->async);
radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -525,6 +525,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
work->event = event;
+ work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/* schedule unpin of the old buffer */
old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
@@ -1367,7 +1368,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
struct radeon_framebuffer *radeon_fb;
int ret;
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handles[0]);
@@ -1630,6 +1631,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
+ if (radeon_use_pflipirq == 2 && rdev->family >= CHIP_R600)
+ rdev->ddev->mode_config.async_page_flip = true;
+
if (ASIC_IS_DCE5(rdev)) {
rdev->ddev->mode_config.max_width = 16384;
rdev->ddev->mode_config.max_height = 16384;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ccd4ad4ee..b55aa7401 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -93,9 +93,11 @@
* 2.41.0 - evergreen/cayman: Add SET_BASE/DRAW_INDIRECT command parsing support
* 2.42.0 - Add VCE/VUI (Video Usability Information) support
* 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
+ * 2.44.0 - SET_APPEND_CNT packet3 support
+ * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 43
+#define KMS_DRIVER_MINOR 45
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -105,7 +107,8 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+int radeon_suspend_kms(struct drm_device *dev, bool suspend,
+ bool fbcon, bool freeze);
int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
@@ -196,6 +199,8 @@ int radeon_bapm = -1;
int radeon_backlight = -1;
int radeon_auxch = -1;
int radeon_mst = 0;
+int radeon_uvd = 1;
+int radeon_vce = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -287,6 +292,12 @@ module_param_named(auxch, radeon_auxch, int, 0444);
MODULE_PARM_DESC(mst, "DisplayPort MST experimental support (1 = enable, 0 = disable)");
module_param_named(mst, radeon_mst, int, 0444);
+MODULE_PARM_DESC(uvd, "uvd enable/disable uvd support (1 = enable, 0 = disable)");
+module_param_named(uvd, radeon_uvd, int, 0444);
+
+MODULE_PARM_DESC(vce, "vce enable/disable vce support (1 = enable, 0 = disable)");
+module_param_named(vce, radeon_vce, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
@@ -358,7 +369,7 @@ static int radeon_pmops_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return radeon_suspend_kms(drm_dev, true, true);
+ return radeon_suspend_kms(drm_dev, true, true, false);
}
static int radeon_pmops_resume(struct device *dev)
@@ -372,7 +383,7 @@ static int radeon_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return radeon_suspend_kms(drm_dev, false, true);
+ return radeon_suspend_kms(drm_dev, false, true, true);
}
static int radeon_pmops_thaw(struct device *dev)
@@ -397,7 +408,7 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
- ret = radeon_suspend_kms(drm_dev, false, false);
+ ret = radeon_suspend_kms(drm_dev, false, false, false);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
@@ -525,7 +536,7 @@ static struct drm_driver kms_driver = {
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.ioctls = radeon_ioctls_kms,
- .gem_free_object = radeon_gem_object_free,
+ .gem_free_object_unlocked = radeon_gem_object_free,
.gem_open_object = radeon_gem_object_open,
.gem_close_object = radeon_gem_object_close,
.dumb_create = radeon_mode_dumb_create,
@@ -566,12 +577,10 @@ static struct pci_driver radeon_kms_pci_driver = {
static int __init radeon_init(void)
{
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && radeon_modeset == -1) {
DRM_INFO("VGACON disable radeon kernel modesetting.\n");
radeon_modeset = 0;
}
-#endif
/* set to modesetting by default if not nomodeset */
if (radeon_modeset == -1)
radeon_modeset = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index e26c963f2..deb951172 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -382,7 +382,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
down_read(&rdev->exclusive_lock);
/* just do a BO wait for now */
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL) {
up_read(&rdev->exclusive_lock);
return -ENOENT;
@@ -404,7 +404,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_gem_object *gobj;
struct radeon_bo *robj;
- gobj = drm_gem_object_lookup(dev, filp, handle);
+ gobj = drm_gem_object_lookup(filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
@@ -435,7 +435,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
int r;
uint32_t cur_placement = 0;
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
@@ -464,7 +464,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
uint32_t cur_placement = 0;
long ret;
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
@@ -495,7 +495,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
int r = 0;
DRM_DEBUG("%d \n", args->handle);
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
robj = gem_to_radeon_bo(gobj);
@@ -513,7 +513,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
int r = 0;
DRM_DEBUG("\n");
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
rbo = gem_to_radeon_bo(gobj);
@@ -648,7 +648,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL) {
args->operation = RADEON_VA_RESULT_ERROR;
return -ENOENT;
@@ -703,7 +703,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
struct radeon_bo *robj;
int r;
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 1e9304d1c..c084cadcb 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -291,7 +291,6 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
if (r) {
return r;
}
- rdev->ddev->vblank_disable_allowed = true;
/* enable msi */
rdev->msi_enabled = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index eef006c48..896f2cf51 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -186,7 +186,9 @@ static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
struct radeon_mn *rmn;
int r;
- down_write(&mm->mmap_sem);
+ if (down_write_killable(&mm->mmap_sem))
+ return ERR_PTR(-EINTR);
+
mutex_lock(&rdev->mn_lock);
hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2d901bf28..be30861af 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -832,13 +832,13 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0))
return r;
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ r = ttm_bo_wait(&bo->tbo, true, no_wait);
ttm_bo_unreserve(&bo->tbo);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d8d295ee7..a10bb3dee 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -65,7 +65,7 @@ static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 90f739478..590b0377f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -865,6 +865,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int radeon_ttm_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index ca7d2a840..e50963d4f 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -34,6 +34,7 @@
#include <drm/drm.h>
#include "radeon.h"
+#include "radeon_ucode.h"
#include "r600d.h"
/* 1 second timeout */
@@ -47,6 +48,7 @@
#define FIRMWARE_CYPRESS "/*(DEBLOBBED)*/"
#define FIRMWARE_SUMO "/*(DEBLOBBED)*/"
#define FIRMWARE_TAHITI "/*(DEBLOBBED)*/"
+#define FIRMWARE_BONAIRE_LEGACY "/*(DEBLOBBED)*/"
#define FIRMWARE_BONAIRE "/*(DEBLOBBED)*/"
/*(DEBLOBBED)*/
@@ -56,7 +58,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
int radeon_uvd_init(struct radeon_device *rdev)
{
unsigned long bo_size;
- const char *fw_name;
+ const char *fw_name = NULL, *legacy_fw_name = NULL;
int i, r;
INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
@@ -67,22 +69,22 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_RV670:
case CHIP_RV620:
case CHIP_RV635:
- fw_name = FIRMWARE_R600;
+ legacy_fw_name = FIRMWARE_R600;
break;
case CHIP_RS780:
case CHIP_RS880:
- fw_name = FIRMWARE_RS780;
+ legacy_fw_name = FIRMWARE_RS780;
break;
case CHIP_RV770:
- fw_name = FIRMWARE_RV770;
+ legacy_fw_name = FIRMWARE_RV770;
break;
case CHIP_RV710:
case CHIP_RV730:
case CHIP_RV740:
- fw_name = FIRMWARE_RV710;
+ legacy_fw_name = FIRMWARE_RV710;
break;
case CHIP_CYPRESS:
@@ -90,7 +92,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_JUNIPER:
case CHIP_REDWOOD:
case CHIP_CEDAR:
- fw_name = FIRMWARE_CYPRESS;
+ legacy_fw_name = FIRMWARE_CYPRESS;
break;
case CHIP_SUMO:
@@ -100,7 +102,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
- fw_name = FIRMWARE_SUMO;
+ legacy_fw_name = FIRMWARE_SUMO;
break;
case CHIP_TAHITI:
@@ -108,7 +110,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_PITCAIRN:
case CHIP_ARUBA:
case CHIP_OLAND:
- fw_name = FIRMWARE_TAHITI;
+ legacy_fw_name = FIRMWARE_TAHITI;
break;
case CHIP_BONAIRE:
@@ -116,6 +118,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
case CHIP_KAVERI:
case CHIP_HAWAII:
case CHIP_MULLINS:
+ legacy_fw_name = FIRMWARE_BONAIRE_LEGACY;
fw_name = FIRMWARE_BONAIRE;
break;
@@ -123,16 +126,56 @@ int radeon_uvd_init(struct radeon_device *rdev)
return -EINVAL;
}
- r = reject_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
- if (r) {
- dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
+ rdev->uvd.fw_header_present = false;
+ rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
+ if (fw_name) {
+ /* Let's try to load the newer firmware first */
+ r = reject_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+ fw_name);
+ } else {
+ struct common_firmware_header *hdr = (void *)rdev->uvd_fw->data;
+ unsigned version_major, version_minor, family_id;
+
+ r = radeon_ucode_validate(rdev->uvd_fw);
+ if (r)
+ return r;
+
+ rdev->uvd.fw_header_present = true;
+
+ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ version_major, version_minor, family_id);
+
+ /*
+ * Limit the number of UVD handles depending on
+ * microcode major and minor versions.
+ */
+ if ((version_major >= 0x01) && (version_minor >= 0x37))
+ rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
+ }
+ }
+
+ /*
+ * In case there is only legacy firmware, or we encounter an error
+ * while loading the new firmware, we fall back to loading the legacy
+ * firmware now.
+ */
+ if (!fw_name || r) {
+ r = reject_firmware(&rdev->uvd_fw, legacy_fw_name, rdev->dev);
+ if (r) {
+ dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
+ legacy_fw_name);
+ return r;
+ }
}
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
- RADEON_GPU_PAGE_SIZE;
+ RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
NULL, &rdev->uvd.vcpu_bo);
@@ -165,7 +208,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < rdev->uvd.max_handles; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
rdev->uvd.filp[i] = NULL;
rdev->uvd.img_size[i] = 0;
@@ -202,7 +245,7 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
if (rdev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < rdev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
if (handle != 0) {
struct radeon_fence *fence;
@@ -277,7 +320,7 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
{
int i, r;
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < rdev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
if (handle != 0 && rdev->uvd.filp[i] == filp) {
struct radeon_fence *fence;
@@ -462,7 +505,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return r;
/* try to alloc a new handle */
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
DRM_ERROR("Handle 0x%x already in use!\n", handle);
return -EINVAL;
@@ -488,7 +531,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return r;
/* validate the handle */
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
if (p->rdev->uvd.filp[i] != p->filp) {
DRM_ERROR("UVD handle collision detected!\n");
@@ -503,7 +546,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
case 2:
/* it's a destroy msg, free the handle */
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ for (i = 0; i < p->rdev->uvd.max_handles; ++i)
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
radeon_bo_kunmap(bo);
return 0;
@@ -802,7 +845,7 @@ static void radeon_uvd_count_handles(struct radeon_device *rdev,
*sd = 0;
*hd = 0;
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ for (i = 0; i < rdev->uvd.max_handles; ++i) {
if (!atomic_read(&rdev->uvd.handles[i]))
continue;
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6244f4e44..f16af119c 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -110,7 +110,7 @@ void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
}
-void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -121,6 +121,8 @@ void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* update the scanout addresses */
+ WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+ async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
(u32)crtc_base);
WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
@@ -413,7 +415,8 @@ void rs600_hpd_init(struct radeon_device *rdev)
default:
break;
}
- enable |= 1 << radeon_connector->hpd.hpd;
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
radeon_irq_kms_enable_hpd(rdev, enable);
@@ -439,12 +442,13 @@ void rs600_hpd_fini(struct radeon_device *rdev)
default:
break;
}
- disable |= 1 << radeon_connector->hpd.hpd;
+ if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+ disable |= 1 << radeon_connector->hpd.hpd;
}
radeon_irq_kms_disable_hpd(rdev, disable);
}
-int rs600_asic_reset(struct radeon_device *rdev)
+int rs600_asic_reset(struct radeon_device *rdev, bool hard)
{
struct rv515_mc_save save;
u32 status, tmp;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 01ee96acb..1c120a4c3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -801,7 +801,7 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
return reference_clock;
}
-void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -812,6 +812,8 @@ void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* update the scanout addresses */
+ WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
+ async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
if (radeon_crtc->crtc_id) {
WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
@@ -1681,6 +1683,73 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
+static void rv770_uvd_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = radeon_uvd_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+ /*
+ * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+ * to early fails uvd_v2_2_resume() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable uvd here.
+ */
+ rdev->has_uvd = 0;
+ return;
+ }
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void rv770_uvd_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = uvd_v2_2_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void rv770_uvd_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+ return;
+ }
+ r = uvd_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+ return;
+ }
+}
+
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -1723,16 +1792,7 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
- r = uvd_v2_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
-
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+ rv770_uvd_start(rdev);
/* Enable IRQ */
if (!rdev->irq.installed) {
@@ -1772,16 +1832,7 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
-
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
+ rv770_uvd_resume(rdev);
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -1831,8 +1882,10 @@ int rv770_suspend(struct radeon_device *rdev)
{
radeon_pm_suspend(rdev);
radeon_audio_fini(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
+ if (rdev->has_uvd) {
+ uvd_v1_0_fini(rdev);
+ radeon_uvd_suspend(rdev);
+ }
r700_cp_stop(rdev);
r600_dma_stop(rdev);
r600_irq_suspend(rdev);
@@ -1917,12 +1970,7 @@ int rv770_init(struct radeon_device *rdev)
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
- r = radeon_uvd_init(rdev);
- if (!r) {
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
- 4096);
- }
+ rv770_uvd_init(rdev);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 2ffb87051..073454a2f 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3961,10 +3961,15 @@ static void si_gpu_pci_config_reset(struct radeon_device *rdev)
}
}
-int si_asic_reset(struct radeon_device *rdev)
+int si_asic_reset(struct radeon_device *rdev, bool hard)
{
u32 reset_mask;
+ if (hard) {
+ si_gpu_pci_config_reset(rdev);
+ return 0;
+ }
+
reset_mask = si_gpu_check_soft_reset(rdev);
if (reset_mask)
@@ -4286,6 +4291,10 @@ static bool si_vm_reg_valid(u32 reg)
if (reg >= 0x28000)
return true;
+ /* shader regs are also fine */
+ if (reg >= 0xB000 && reg < 0xC000)
+ return true;
+
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
@@ -6748,6 +6757,159 @@ restart_ih:
/*
* startup/shutdown callbacks
*/
+static void si_uvd_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = radeon_uvd_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
+ /*
+ * At this point rdev->uvd.vcpu_bo is NULL which trickles down
+ * to early fails uvd_v2_2_resume() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable uvd here.
+ */
+ rdev->has_uvd = 0;
+ return;
+ }
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
+}
+
+static void si_uvd_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_uvd)
+ return;
+
+ r = uvd_v2_2_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+}
+
+static void si_uvd_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, RADEON_CP_PACKET2);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
+ return;
+ }
+ r = uvd_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
+ return;
+ }
+}
+
+static void si_vce_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_vce)
+ return;
+
+ r = radeon_vce_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
+ /*
+ * At this point rdev->vce.vcpu_bo is NULL which trickles down
+ * to early fails si_vce_start() and thus nothing happens
+ * there. So it is pointless to try to go through that code
+ * hence why we disable vce here.
+ */
+ rdev->has_vce = 0;
+ return;
+ }
+ rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
+ rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
+}
+
+static void si_vce_start(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->has_vce)
+ return;
+
+ r = radeon_vce_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+ goto error;
+ }
+ r = vce_v1_0_resume(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
+ goto error;
+ }
+ r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
+ goto error;
+ }
+ return;
+
+error:
+ rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
+ rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
+}
+
+static void si_vce_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ int r;
+
+ if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
+ return;
+
+ ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+ return;
+ }
+ ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
+ return;
+ }
+ r = vce_v1_0_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
+ return;
+ }
+}
+
static int si_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -6826,33 +6988,8 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
- if (rdev->has_uvd) {
- r = uvd_v2_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
- }
-
- r = radeon_vce_resume(rdev);
- if (!r) {
- r = vce_v1_0_resume(rdev);
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE1_INDEX);
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE2_INDEX);
- }
- if (r) {
- dev_err(rdev->dev, "VCE init error (%d).\n", r);
- rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
- rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
- }
+ si_uvd_start(rdev);
+ si_vce_start(rdev);
/* Enable IRQ */
if (!rdev->irq.installed) {
@@ -6910,34 +7047,8 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
- if (rdev->has_uvd) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
- }
-
- r = -ENOENT;
-
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- VCE_CMD_NO_OP);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- VCE_CMD_NO_OP);
-
- if (!r)
- r = vce_v1_0_init(rdev);
- else if (r != -ENOENT)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
+ si_uvd_resume(rdev);
+ si_vce_resume(rdev);
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -6997,8 +7108,9 @@ int si_suspend(struct radeon_device *rdev)
if (rdev->has_uvd) {
uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
- radeon_vce_suspend(rdev);
}
+ if (rdev->has_vce)
+ radeon_vce_suspend(rdev);
si_fini_pg(rdev);
si_fini_cg(rdev);
si_irq_suspend(rdev);
@@ -7096,25 +7208,8 @@ int si_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
- if (rdev->has_uvd) {
- r = radeon_uvd_init(rdev);
- if (!r) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
- }
-
- r = radeon_vce_init(rdev);
- if (!r) {
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
+ si_uvd_init(rdev);
+ si_vce_init(rdev);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -7167,8 +7262,9 @@ void si_fini(struct radeon_device *rdev)
if (rdev->has_uvd) {
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
- radeon_vce_fini(rdev);
}
+ if (rdev->has_vce)
+ radeon_vce_fini(rdev);
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index 12ddcfa82..0dbeb504a 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -124,12 +124,13 @@ int uvd_v1_0_resume(struct radeon_device *rdev)
WREG32(UVD_VCPU_CACHE_SIZE0, size);
addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
WREG32(UVD_VCPU_CACHE_SIZE1, size);
addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
+ size = (RADEON_UVD_STACK_SIZE +
+ (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
WREG32(UVD_VCPU_CACHE_SIZE2, size);
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 7ed778cec..9071e656a 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -116,12 +116,13 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
WREG32(UVD_VCPU_CACHE_SIZE0, size);
addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
WREG32(UVD_VCPU_CACHE_SIZE1, size);
addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
+ size = (RADEON_UVD_STACK_SIZE +
+ (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
WREG32(UVD_VCPU_CACHE_SIZE2, size);
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
index d04d5073e..91613b8a9 100644
--- a/drivers/gpu/drm/radeon/uvd_v4_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -41,18 +41,25 @@ int uvd_v4_2_resume(struct radeon_device *rdev)
uint32_t size;
/* programm the VCPU memory controller bits 0-27 */
- addr = rdev->uvd.gpu_addr >> 3;
+
+ /* skip over the header of the new firmware format */
+ if (rdev->uvd.fw_header_present)
+ addr = (rdev->uvd.gpu_addr + 0x200) >> 3;
+ else
+ addr = rdev->uvd.gpu_addr >> 3;
+
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
WREG32(UVD_VCPU_CACHE_SIZE0, size);
addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
WREG32(UVD_VCPU_CACHE_SIZE1, size);
addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
+ size = (RADEON_UVD_STACK_SIZE +
+ (RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles)) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
WREG32(UVD_VCPU_CACHE_SIZE2, size);
@@ -64,5 +71,8 @@ int uvd_v4_2_resume(struct radeon_device *rdev)
addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+ if (rdev->uvd.fw_header_present)
+ WREG32(UVD_GP_SCRATCH4, rdev->uvd.max_handles);
+
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 1f10fa092..7fc3ca5ce 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -2,7 +2,7 @@ config DRM_RCAR_DU
tristate "DRM Support for R-Car Display Unit"
depends on DRM && OF
depends on ARM || ARM64
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
@@ -27,6 +27,6 @@ config DRM_RCAR_LVDS
config DRM_RCAR_VSP
bool "R-Car DU VSP Compositor Support"
depends on DRM_RCAR_DU
- depends on VIDEO_RENESAS_VSP1
+ depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m)
help
Enable support to expose the R-Car VSP Compositor as KMS planes.
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index d9f06cc36..0d8bdda73 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -314,7 +314,7 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
return;
spin_lock_irqsave(&dev->event_lock, flags);
- drm_send_vblank_event(dev, rcrtc->index, event);
+ drm_crtc_send_vblank_event(&rcrtc->crtc, event);
wake_up(&rcrtc->flip_wait);
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index ed6006bf6..fb9242d27 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -278,10 +278,7 @@ static int rcar_du_remove(struct platform_device *pdev)
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
struct drm_device *ddev = rcdu->ddev;
- mutex_lock(&ddev->mode_config.mutex);
- drm_connector_unplug_all(ddev);
- mutex_unlock(&ddev->mode_config.mutex);
-
+ drm_connector_unregister_all(ddev);
drm_dev_unregister(ddev);
if (rcdu->fbdev)
@@ -300,7 +297,6 @@ static int rcar_du_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct rcar_du_device *rcdu;
- struct drm_connector *connector;
struct drm_device *ddev;
struct resource *mem;
int ret;
@@ -364,14 +360,7 @@ static int rcar_du_probe(struct platform_device *pdev)
if (ret)
goto error;
- mutex_lock(&ddev->mode_config.mutex);
- drm_for_each_connector(connector, ddev) {
- ret = drm_connector_register(connector);
- if (ret < 0)
- break;
- }
- mutex_unlock(&ddev->mode_config.mutex);
-
+ ret = drm_connector_register_all(ddev);
if (ret < 0)
goto error;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 24725bf85..e70a4f33d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -283,7 +283,8 @@ static void rcar_du_atomic_work(struct work_struct *work)
}
static int rcar_du_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool async)
+ struct drm_atomic_state *state,
+ bool nonblock)
{
struct rcar_du_device *rcdu = dev->dev_private;
struct rcar_du_commit *commit;
@@ -328,7 +329,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(dev, state);
- if (async)
+ if (nonblock)
schedule_work(&commit->work);
else
rcar_du_atomic_complete(commit);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 8460ae1ff..d445e67f7 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -635,7 +635,7 @@ rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- __drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(to_rcar_plane_state(state));
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index de7ef0411..e671a7cd3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -251,7 +251,7 @@ rcar_du_vsp_plane_atomic_duplicate_state(struct drm_plane *plane)
static void rcar_du_vsp_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- __drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(to_rcar_vsp_plane_state(state));
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 76b3362c5..d30bdc38a 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -16,6 +16,15 @@ config DRM_ROCKCHIP
2D or 3D acceleration; acceleration is performed by other
IP found on the SoC.
+config ROCKCHIP_ANALOGIX_DP
+ tristate "Rockchip specific extensions for Analogix DP driver"
+ depends on DRM_ROCKCHIP
+ select DRM_ANALOGIX_DP
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the Analogix Core DP driver. If you want to enable DP
+ on RK3288 based SoC, you should selet this option.
+
config ROCKCHIP_DW_HDMI
tristate "Rockchip specific extensions for Synopsys DW HDMI"
depends on DRM_ROCKCHIP
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index df8fbef17..05d07138a 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -6,6 +6,7 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
rockchip_drm_gem.o rockchip_drm_vop.o
rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
+obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
new file mode 100644
index 000000000..7f6a55cae
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -0,0 +1,390 @@
+/*
+ * Rockchip SoC DP (Display Port) interface driver.
+ *
+ * Copyright (C) Fuzhou Rockchip Electronics Co., Ltd.
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ * Yakir Yang <ykk@rock-chips.com>
+ * Jeff Chen <jeff.chen@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#include <drm/bridge/analogix_dp.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm)
+
+/* dp grf register offset */
+#define GRF_SOC_CON6 0x025c
+#define GRF_EDP_LCD_SEL_MASK BIT(5)
+#define GRF_EDP_SEL_VOP_LIT BIT(5)
+#define GRF_EDP_SEL_VOP_BIG 0
+
+struct rockchip_dp_device {
+ struct drm_device *drm_dev;
+ struct device *dev;
+ struct drm_encoder encoder;
+ struct drm_display_mode mode;
+
+ struct clk *pclk;
+ struct regmap *grf;
+ struct reset_control *rst;
+
+ struct analogix_dp_plat_data plat_data;
+};
+
+static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
+{
+ reset_control_assert(dp->rst);
+ usleep_range(10, 20);
+ reset_control_deassert(dp->rst);
+
+ return 0;
+}
+
+static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
+{
+ struct rockchip_dp_device *dp = to_dp(plat_data);
+ int ret;
+
+ ret = clk_prepare_enable(dp->pclk);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable pclk %d\n", ret);
+ return ret;
+ }
+
+ ret = rockchip_dp_pre_init(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to dp pre init %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
+{
+ struct rockchip_dp_device *dp = to_dp(plat_data);
+
+ clk_disable_unprepare(dp->pclk);
+
+ return 0;
+}
+
+static bool
+rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* do nothing */
+ return true;
+}
+
+static void rockchip_dp_drm_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted)
+{
+ /* do nothing */
+}
+
+static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rockchip_dp_device *dp = to_dp(encoder);
+ int ret;
+ u32 val;
+
+ ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
+ if (ret < 0)
+ return;
+
+ if (ret)
+ val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16);
+ else
+ val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16);
+
+ dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
+
+ ret = regmap_write(dp->grf, GRF_SOC_CON6, val);
+ if (ret != 0) {
+ dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
+ return;
+ }
+}
+
+static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
+{
+ /* do nothing */
+}
+
+static int
+rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ /*
+ * FIXME(Yakir): driver should configure the CRTC output video
+ * mode with the display information which indicated the monitor
+ * support colorimetry.
+ *
+ * But don't know why the CRTC driver seems could only output the
+ * RGBaaa rightly. For example, if connect the "innolux,n116bge"
+ * eDP screen, EDID would indicated that screen only accepted the
+ * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
+ * screen would show a blue picture (RGB888 show a green picture).
+ * But if I configure CTRC to RGBaaa, and eDP driver still keep
+ * RGB666 input video mode, then screen would works prefect.
+ */
+ s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ s->output_type = DRM_MODE_CONNECTOR_eDP;
+
+ return 0;
+}
+
+static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
+ .mode_fixup = rockchip_dp_drm_encoder_mode_fixup,
+ .mode_set = rockchip_dp_drm_encoder_mode_set,
+ .enable = rockchip_dp_drm_encoder_enable,
+ .disable = rockchip_dp_drm_encoder_nop,
+ .atomic_check = rockchip_dp_drm_encoder_atomic_check,
+};
+
+static void rockchip_dp_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static struct drm_encoder_funcs rockchip_dp_encoder_funcs = {
+ .destroy = rockchip_dp_drm_encoder_destroy,
+};
+
+static int rockchip_dp_init(struct rockchip_dp_device *dp)
+{
+ struct device *dev = dp->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(dp->grf)) {
+ dev_err(dev, "failed to get rockchip,grf property\n");
+ return PTR_ERR(dp->grf);
+ }
+
+ dp->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(dp->pclk)) {
+ dev_err(dev, "failed to get pclk property\n");
+ return PTR_ERR(dp->pclk);
+ }
+
+ dp->rst = devm_reset_control_get(dev, "dp");
+ if (IS_ERR(dp->rst)) {
+ dev_err(dev, "failed to get dp reset control\n");
+ return PTR_ERR(dp->rst);
+ }
+
+ ret = clk_prepare_enable(dp->pclk);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable pclk %d\n", ret);
+ return ret;
+ }
+
+ ret = rockchip_dp_pre_init(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to pre init %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
+{
+ struct drm_encoder *encoder = &dp->encoder;
+ struct drm_device *drm_dev = dp->drm_dev;
+ struct device *dev = dp->dev;
+ int ret;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dev->of_node);
+ DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+ ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret) {
+ DRM_ERROR("failed to initialize encoder with drm\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &rockchip_dp_encoder_helper_funcs);
+
+ return 0;
+}
+
+static int rockchip_dp_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct rockchip_dp_device *dp = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ int ret;
+
+ /*
+ * Just like the probe function said, we don't need the
+ * device drvrate anymore, we should leave the charge to
+ * analogix dp driver, set the device drvdata to NULL.
+ */
+ dev_set_drvdata(dev, NULL);
+
+ ret = rockchip_dp_init(dp);
+ if (ret < 0)
+ return ret;
+
+ dp->drm_dev = drm_dev;
+
+ ret = rockchip_dp_drm_create_encoder(dp);
+ if (ret) {
+ DRM_ERROR("failed to create drm encoder\n");
+ return ret;
+ }
+
+ dp->plat_data.encoder = &dp->encoder;
+
+ dp->plat_data.dev_type = RK3288_DP;
+ dp->plat_data.power_on = rockchip_dp_poweron;
+ dp->plat_data.power_off = rockchip_dp_powerdown;
+
+ return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
+}
+
+static void rockchip_dp_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ return analogix_dp_unbind(dev, master, data);
+}
+
+static const struct component_ops rockchip_dp_component_ops = {
+ .bind = rockchip_dp_bind,
+ .unbind = rockchip_dp_unbind,
+};
+
+static int rockchip_dp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *panel_node, *port, *endpoint;
+ struct rockchip_dp_device *dp;
+ struct drm_panel *panel;
+
+ port = of_graph_get_port_by_id(dev->of_node, 1);
+ if (!port) {
+ dev_err(dev, "can't find output port\n");
+ return -EINVAL;
+ }
+
+ endpoint = of_get_child_by_name(port, "endpoint");
+ of_node_put(port);
+ if (!endpoint) {
+ dev_err(dev, "no output endpoint found\n");
+ return -EINVAL;
+ }
+
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (!panel_node) {
+ dev_err(dev, "no output node found\n");
+ return -EINVAL;
+ }
+
+ panel = of_drm_find_panel(panel_node);
+ if (!panel) {
+ DRM_ERROR("failed to find panel\n");
+ of_node_put(panel_node);
+ return -EPROBE_DEFER;
+ }
+
+ of_node_put(panel_node);
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dev = dev;
+
+ dp->plat_data.panel = panel;
+
+ /*
+ * We just use the drvdata until driver run into component
+ * add function, and then we would set drvdata to null, so
+ * that analogix dp driver could take charge of the drvdata.
+ */
+ platform_set_drvdata(pdev, dp);
+
+ return component_add(dev, &rockchip_dp_component_ops);
+}
+
+static int rockchip_dp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &rockchip_dp_component_ops);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_dp_suspend(struct device *dev)
+{
+ return analogix_dp_suspend(dev);
+}
+
+static int rockchip_dp_resume(struct device *dev)
+{
+ return analogix_dp_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops rockchip_dp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume)
+};
+
+static const struct of_device_id rockchip_dp_dt_ids[] = {
+ {.compatible = "rockchip,rk3288-dp",},
+ {}
+};
+MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
+
+static struct platform_driver rockchip_dp_driver = {
+ .probe = rockchip_dp_probe,
+ .remove = rockchip_dp_remove,
+ .driver = {
+ .name = "rockchip-dp",
+ .owner = THIS_MODULE,
+ .pm = &rockchip_dp_pm_ops,
+ .of_match_table = of_match_ptr(rockchip_dp_dt_ids),
+ },
+};
+
+module_platform_driver(rockchip_dp_driver);
+
+MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
+MODULE_AUTHOR("Jeff chen <jeff.chen@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip Specific Analogix-DP Driver Extension");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index 797515806..dedc65b40 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -879,7 +879,6 @@ static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder)
{
struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder);
- u32 interface_pix_fmt;
u32 val;
if (clk_prepare_enable(dsi->pclk)) {
@@ -895,31 +894,41 @@ static void dw_mipi_dsi_encoder_commit(struct drm_encoder *encoder)
clk_disable_unprepare(dsi->pclk);
+ if (mux)
+ val = DSI0_SEL_VOP_LIT | (DSI0_SEL_VOP_LIT << 16);
+ else
+ val = DSI0_SEL_VOP_LIT << 16;
+
+ regmap_write(dsi->grf_regmap, GRF_SOC_CON6, val);
+ dev_dbg(dsi->dev, "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
+}
+
+static int
+dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
+
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
- interface_pix_fmt = ROCKCHIP_OUT_MODE_P888;
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
break;
case MIPI_DSI_FMT_RGB666:
- interface_pix_fmt = ROCKCHIP_OUT_MODE_P666;
+ s->output_mode = ROCKCHIP_OUT_MODE_P666;
break;
case MIPI_DSI_FMT_RGB565:
- interface_pix_fmt = ROCKCHIP_OUT_MODE_P565;
+ s->output_mode = ROCKCHIP_OUT_MODE_P565;
break;
default:
WARN_ON(1);
- return;
+ return -EINVAL;
}
- rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_DSI,
- interface_pix_fmt);
+ s->output_type = DRM_MODE_CONNECTOR_DSI;
- if (mux)
- val = DSI0_SEL_VOP_LIT | (DSI0_SEL_VOP_LIT << 16);
- else
- val = DSI0_SEL_VOP_LIT << 16;
-
- regmap_write(dsi->grf_regmap, GRF_SOC_CON6, val);
- dev_dbg(dsi->dev, "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
+ return 0;
}
static struct drm_encoder_helper_funcs
@@ -927,6 +936,7 @@ dw_mipi_dsi_encoder_helper_funcs = {
.commit = dw_mipi_dsi_encoder_commit,
.mode_set = dw_mipi_dsi_encoder_mode_set,
.disable = dw_mipi_dsi_encoder_disable,
+ .atomic_check = dw_mipi_dsi_encoder_atomic_check,
};
static struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index d5cfef75f..801110f65 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -201,9 +201,6 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
u32 val;
int mux;
- rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
- ROCKCHIP_OUT_MODE_AAAA);
-
mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
if (mux)
val = HDMI_SEL_VOP_LIT | (HDMI_SEL_VOP_LIT << 16);
@@ -215,11 +212,25 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
(mux) ? "LIT" : "BIG");
}
+static int
+dw_hdmi_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ return 0;
+}
+
static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_funcs = {
.mode_fixup = dw_hdmi_rockchip_encoder_mode_fixup,
.mode_set = dw_hdmi_rockchip_encoder_mode_set,
.enable = dw_hdmi_rockchip_encoder_enable,
.disable = dw_hdmi_rockchip_encoder_disable,
+ .atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
};
static const struct dw_hdmi_plat_data rockchip_hdmi_drv_data = {
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 10d62fff2..f8b4feb60 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -500,9 +500,6 @@ static void inno_hdmi_encoder_enable(struct drm_encoder *encoder)
{
struct inno_hdmi *hdmi = to_inno_hdmi(encoder);
- rockchip_drm_crtc_mode_config(encoder->crtc, DRM_MODE_CONNECTOR_HDMIA,
- ROCKCHIP_OUT_MODE_P888);
-
inno_hdmi_set_pwr_mode(hdmi, NORMAL);
}
@@ -520,11 +517,25 @@ static bool inno_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
+static int
+inno_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ return 0;
+}
+
static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
.enable = inno_hdmi_encoder_enable,
.disable = inno_hdmi_encoder_disable,
.mode_fixup = inno_hdmi_encoder_mode_fixup,
.mode_set = inno_hdmi_encoder_mode_set,
+ .atomic_check = inno_hdmi_encoder_atomic_check,
};
static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
@@ -855,8 +866,9 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
hdmi->ddc = inno_hdmi_i2c_adapter(hdmi);
if (IS_ERR(hdmi->ddc)) {
+ ret = PTR_ERR(hdmi->ddc);
hdmi->ddc = NULL;
- return PTR_ERR(hdmi->ddc);
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index f556a8f4f..a409d1f70 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -36,6 +36,8 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
+static bool is_support_iommu = true;
+
/*
* Attach a (component) device to the shared drm dma mapping from master drm
* device. This is used by the VOPs to map GEM buffers to a common DMA
@@ -47,6 +49,9 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
int ret;
+ if (!is_support_iommu)
+ return 0;
+
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -59,6 +64,9 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct device *dev)
{
+ if (!is_support_iommu)
+ return;
+
arm_iommu_detach_device(dev);
}
@@ -127,7 +135,7 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
{
struct rockchip_drm_private *private;
- struct dma_iommu_mapping *mapping;
+ struct dma_iommu_mapping *mapping = NULL;
struct device *dev = drm_dev->dev;
struct drm_connector *connector;
int ret;
@@ -152,23 +160,26 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
goto err_config_cleanup;
}
- /* TODO(djkurtz): fetch the mapping start/size from somewhere */
- mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
- SZ_2G);
- if (IS_ERR(mapping)) {
- ret = PTR_ERR(mapping);
- goto err_config_cleanup;
- }
+ if (is_support_iommu) {
+ /* TODO(djkurtz): fetch the mapping start/size from somewhere */
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ 0x00000000,
+ SZ_2G);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ goto err_config_cleanup;
+ }
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (ret)
- goto err_release_mapping;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_release_mapping;
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- ret = arm_iommu_attach_device(dev, mapping);
- if (ret)
- goto err_release_mapping;
+ ret = arm_iommu_attach_device(dev, mapping);
+ if (ret)
+ goto err_release_mapping;
+ }
/* Try to bind all sub drivers. */
ret = component_bind_all(dev, drm_dev);
@@ -205,19 +216,14 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
if (ret)
goto err_kms_helper_poll_fini;
- /*
- * with vblank_disable_allowed = true, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(after drm_vblank_put function is called)
- */
- drm_dev->vblank_disable_allowed = true;
-
drm_mode_config_reset(drm_dev);
ret = rockchip_drm_fbdev_init(drm_dev);
if (ret)
goto err_vblank_cleanup;
+ if (is_support_iommu)
+ arm_iommu_release_mapping(mapping);
return 0;
err_vblank_cleanup:
drm_vblank_cleanup(drm_dev);
@@ -226,9 +232,11 @@ err_kms_helper_poll_fini:
err_unbind:
component_unbind_all(dev, drm_dev);
err_detach_device:
- arm_iommu_detach_device(dev);
+ if (is_support_iommu)
+ arm_iommu_detach_device(dev);
err_release_mapping:
- arm_iommu_release_mapping(dev->archdata.mapping);
+ if (is_support_iommu)
+ arm_iommu_release_mapping(mapping);
err_config_cleanup:
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
@@ -243,8 +251,8 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
drm_vblank_cleanup(drm_dev);
drm_kms_helper_poll_fini(drm_dev);
component_unbind_all(dev, drm_dev);
- arm_iommu_detach_device(dev);
- arm_iommu_release_mapping(dev->archdata.mapping);
+ if (is_support_iommu)
+ arm_iommu_detach_device(dev);
drm_mode_config_cleanup(drm_dev);
drm_dev->dev_private = NULL;
@@ -488,6 +496,8 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
* works as expected.
*/
for (i = 0;; i++) {
+ struct device_node *iommu;
+
port = of_parse_phandle(np, "ports", i);
if (!port)
break;
@@ -497,6 +507,17 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
continue;
}
+ iommu = of_parse_phandle(port->parent, "iommus", 0);
+ if (!iommu || !of_device_is_available(iommu->parent)) {
+ dev_dbg(dev, "no iommu attached for %s, using non-iommu buffers\n",
+ port->parent->full_name);
+ /*
+ * if there is a crtc not support iommu, force set all
+ * crtc use non-iommu buffer.
+ */
+ is_support_iommu = false;
+ }
+
component_match_add(dev, &match, compare_of, port->parent);
of_node_put(port);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 00d17d71a..56f43a364 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -50,6 +50,14 @@ struct rockchip_atomic_commit {
struct mutex lock;
};
+struct rockchip_crtc_state {
+ struct drm_crtc_state base;
+ int output_type;
+ int output_mode;
+};
+#define to_rockchip_crtc_state(s) \
+ container_of(s, struct rockchip_crtc_state, base)
+
/*
* Rockchip drm private structure.
*
@@ -68,8 +76,6 @@ void rockchip_drm_atomic_work(struct work_struct *work);
int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
const struct rockchip_crtc_funcs *crtc_funcs);
void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
-int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
- int out_mode);
int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
struct device *dev);
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 3b8f65269..755cfdba6 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -123,8 +123,7 @@ rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
unsigned int height = mode_cmd->height / (i ? vsub : 1);
unsigned int min_size;
- obj = drm_gem_object_lookup(dev, file_priv,
- mode_cmd->handles[i]);
+ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) {
dev_err(dev->dev, "Failed to lookup GEM object\n");
ret = -ENXIO;
@@ -276,7 +275,7 @@ void rockchip_drm_atomic_work(struct work_struct *work)
int rockchip_drm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async)
+ bool nonblock)
{
struct rockchip_drm_private *private = dev->dev_private;
struct rockchip_atomic_commit *commit = &private->commit;
@@ -286,7 +285,7 @@ int rockchip_drm_atomic_commit(struct drm_device *dev,
if (ret)
return ret;
- /* serialize outstanding asynchronous commits */
+ /* serialize outstanding nonblocking commits */
mutex_lock(&commit->lock);
flush_work(&commit->work);
@@ -295,7 +294,7 @@ int rockchip_drm_atomic_commit(struct drm_device *dev,
commit->dev = dev;
commit->state = state;
- if (async)
+ if (nonblock)
schedule_work(&commit->work);
else
rockchip_atomic_commit_complete(commit);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 18e07338c..9c2d8a894 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -198,7 +198,7 @@ int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_gem_object *obj;
int ret;
- obj = drm_gem_object_lookup(dev, file_priv, handle);
+ obj = drm_gem_object_lookup(file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index a619f120f..1c4d5b5a7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -310,7 +310,7 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
uint16_t vsu_mode;
uint16_t lb_mode;
uint32_t val;
- int vskiplines;
+ int vskiplines = 0;
if (dst_w > 3840) {
DRM_ERROR("Maximum destination width (3840) exceeded\n");
@@ -560,6 +560,22 @@ static void vop_plane_destroy(struct drm_plane *plane)
drm_plane_cleanup(plane);
}
+static int vop_plane_prepare_fb(struct drm_plane *plane,
+ const struct drm_plane_state *new_state)
+{
+ if (plane->state->fb)
+ drm_framebuffer_reference(plane->state->fb);
+
+ return 0;
+}
+
+static void vop_plane_cleanup_fb(struct drm_plane *plane,
+ const struct drm_plane_state *old_state)
+{
+ if (old_state->fb)
+ drm_framebuffer_unreference(old_state->fb);
+}
+
static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -756,6 +772,8 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
+ .prepare_fb = vop_plane_prepare_fb,
+ .cleanup_fb = vop_plane_cleanup_fb,
.atomic_check = vop_plane_atomic_check,
.atomic_update = vop_plane_atomic_update,
.atomic_disable = vop_plane_atomic_disable,
@@ -804,7 +822,7 @@ static void vop_atomic_plane_destroy_state(struct drm_plane *plane,
{
struct vop_plane_state *vop_state = to_vop_plane_state(state);
- __drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(vop_state);
}
@@ -818,38 +836,6 @@ static const struct drm_plane_funcs vop_plane_funcs = {
.atomic_destroy_state = vop_atomic_plane_destroy_state,
};
-int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
- int connector_type,
- int out_mode)
-{
- struct vop *vop = to_vop(crtc);
-
- if (WARN_ON(!vop->is_enabled))
- return -EINVAL;
-
- switch (connector_type) {
- case DRM_MODE_CONNECTOR_LVDS:
- VOP_CTRL_SET(vop, rgb_en, 1);
- break;
- case DRM_MODE_CONNECTOR_eDP:
- VOP_CTRL_SET(vop, edp_en, 1);
- break;
- case DRM_MODE_CONNECTOR_HDMIA:
- VOP_CTRL_SET(vop, hdmi_en, 1);
- break;
- case DRM_MODE_CONNECTOR_DSI:
- VOP_CTRL_SET(vop, mipi_en, 1);
- break;
- default:
- DRM_ERROR("unsupport connector_type[%d]\n", connector_type);
- return -EINVAL;
- };
- VOP_CTRL_SET(vop, out_mode, out_mode);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rockchip_drm_crtc_mode_config);
-
static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
@@ -931,6 +917,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
static void vop_crtc_enable(struct drm_crtc *crtc)
{
struct vop *vop = to_vop(crtc);
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
u16 hdisplay = adjusted_mode->hdisplay;
@@ -985,6 +972,23 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
VOP_CTRL_SET(vop, pin_pol, val);
+ switch (s->output_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ VOP_CTRL_SET(vop, rgb_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ VOP_CTRL_SET(vop, edp_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_CTRL_SET(vop, hdmi_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ VOP_CTRL_SET(vop, mipi_en, 1);
+ break;
+ default:
+ DRM_ERROR("unsupport connector_type[%d]\n", s->output_type);
+ }
+ VOP_CTRL_SET(vop, out_mode, s->output_mode);
VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
val = hact_st << 16;
@@ -1044,13 +1048,34 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
}
+static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct rockchip_crtc_state *rockchip_state;
+
+ rockchip_state = kzalloc(sizeof(*rockchip_state), GFP_KERNEL);
+ if (!rockchip_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &rockchip_state->base);
+ return &rockchip_state->base;
+}
+
+static void vop_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&s->base);
+ kfree(s);
+}
+
static const struct drm_crtc_funcs vop_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.destroy = vop_crtc_destroy,
.reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = vop_crtc_duplicate_state,
+ .atomic_destroy_state = vop_crtc_destroy_state,
};
static bool vop_win_pending_is_complete(struct vop_win *vop_win)
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 88643ab16..1e154fc77 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -440,7 +440,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
event = scrtc->event;
scrtc->event = NULL;
if (event) {
- drm_send_vblank_event(dev, 0, event);
+ drm_crtc_send_vblank_event(&scrtc->crtc, event);
drm_vblank_put(dev, 0);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 505620c7c..e04deedab 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -51,15 +51,6 @@ static void sti_crtc_disabling(struct drm_crtc *crtc)
mixer->status = STI_MIXER_DISABLING;
}
-static bool sti_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* accept the provided drm_display_mode, do not fix it up */
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- return true;
-}
-
static int
sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
@@ -230,7 +221,6 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
.enable = sti_crtc_enable,
.disable = sti_crtc_disabling,
- .mode_fixup = sti_crtc_mode_fixup,
.mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = sti_crtc_mode_set_nofb,
.mode_set_base = drm_helper_crtc_mode_set_base,
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 3abb40015..4e9902997 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -6,6 +6,8 @@
* License terms: GNU General Public License (GPL), version 2
*/
+#include <linux/seq_file.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 6bd6abaa5..872495e72 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -202,7 +202,7 @@ static void sti_atomic_work(struct work_struct *work)
}
static int sti_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state, bool async)
+ struct drm_atomic_state *state, bool nonblock)
{
struct sti_private *private = drm->dev_private;
int err;
@@ -211,7 +211,7 @@ static int sti_atomic_commit(struct drm_device *drm,
if (err)
return err;
- /* serialize outstanding asynchronous commits */
+ /* serialize outstanding nonblocking commits */
mutex_lock(&private->commit.lock);
flush_work(&private->commit.work);
@@ -223,7 +223,7 @@ static int sti_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(drm, state);
- if (async)
+ if (nonblock)
sti_atomic_schedule(private, state);
else
sti_atomic_complete(private, state);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index ff3d3e7e7..ff33c38da 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -5,6 +5,7 @@
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
+#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index ec0d017ea..f7d3464cd 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -8,6 +8,7 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 00bbbf862..52102045c 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -7,6 +7,7 @@
#include <linux/component.h>
#include <linux/firmware.h>
#include <linux/reset.h>
+#include <linux/seq_file.h>
#include <drm/drm_atomic.h>
#include <drm/drm_fb_cma_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index e7425c38f..aed7801b5 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -5,6 +5,7 @@
* for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
+#include <linux/seq_file.h>
#include "sti_compositor.h"
#include "sti_mixer.h"
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index 2c9901644..f983db5a5 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -12,6 +12,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/seq_file.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 5a2c5dc36..523ed19f5 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -3,6 +3,7 @@
* Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*/
+#include <linux/seq_file.h>
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 32c7986b6..6bf4ce466 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -437,7 +437,7 @@ static int vtg_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
} else {
vtg->irq = platform_get_irq(pdev, 0);
- if (IS_ERR_VALUE(vtg->irq)) {
+ if (vtg->irq < 0) {
DRM_ERROR("Failed to get VTG interrupt\n");
return vtg->irq;
}
@@ -447,7 +447,7 @@ static int vtg_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
vtg_irq_thread, IRQF_ONESHOT,
dev_name(dev), vtg);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
DRM_ERROR("Failed to register VTG interrupt\n");
return ret;
}
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
new file mode 100644
index 000000000..a4b357db8
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -0,0 +1,14 @@
+config DRM_SUN4I
+ tristate "DRM Support for Allwinner A10 Display Engine"
+ depends on DRM && ARM && COMMON_CLK
+ depends on ARCH_SUNXI || COMPILE_TEST
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_PANEL
+ select REGMAP_MMIO
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have an Allwinner SoC with a
+ Display Engine. If M is selected the module will be called
+ sun4i-drm.
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
new file mode 100644
index 000000000..58cd55149
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -0,0 +1,13 @@
+sun4i-drm-y += sun4i_crtc.o
+sun4i-drm-y += sun4i_drv.o
+sun4i-drm-y += sun4i_framebuffer.o
+sun4i-drm-y += sun4i_layer.o
+
+sun4i-tcon-y += sun4i_tcon.o
+sun4i-tcon-y += sun4i_rgb.o
+sun4i-tcon-y += sun4i_dotclock.o
+
+obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o
+obj-$(CONFIG_DRM_SUN4I) += sun4i_backend.o
+
+obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
new file mode 100644
index 000000000..3ab560450
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/component.h>
+#include <linux/reset.h>
+
+#include "sun4i_backend.h"
+#include "sun4i_drv.h"
+
+static u32 sunxi_rgb2yuv_coef[12] = {
+ 0x00000107, 0x00000204, 0x00000064, 0x00000108,
+ 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
+ 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
+};
+
+void sun4i_backend_apply_color_correction(struct sun4i_backend *backend)
+{
+ int i;
+
+ DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
+
+ /* Set color correction */
+ regmap_write(backend->regs, SUN4I_BACKEND_OCCTL_REG,
+ SUN4I_BACKEND_OCCTL_ENABLE);
+
+ for (i = 0; i < 12; i++)
+ regmap_write(backend->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
+ sunxi_rgb2yuv_coef[i]);
+}
+EXPORT_SYMBOL(sun4i_backend_apply_color_correction);
+
+void sun4i_backend_disable_color_correction(struct sun4i_backend *backend)
+{
+ DRM_DEBUG_DRIVER("Disabling color correction\n");
+
+ /* Disable color correction */
+ regmap_update_bits(backend->regs, SUN4I_BACKEND_OCCTL_REG,
+ SUN4I_BACKEND_OCCTL_ENABLE, 0);
+}
+EXPORT_SYMBOL(sun4i_backend_disable_color_correction);
+
+void sun4i_backend_commit(struct sun4i_backend *backend)
+{
+ DRM_DEBUG_DRIVER("Committing changes\n");
+
+ regmap_write(backend->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
+ SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
+ SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
+}
+EXPORT_SYMBOL(sun4i_backend_commit);
+
+void sun4i_backend_layer_enable(struct sun4i_backend *backend,
+ int layer, bool enable)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("Enabling layer %d\n", layer);
+
+ if (enable)
+ val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
+ else
+ val = 0;
+
+ regmap_update_bits(backend->regs, SUN4I_BACKEND_MODCTL_REG,
+ SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
+}
+EXPORT_SYMBOL(sun4i_backend_layer_enable);
+
+static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
+ break;
+
+ case DRM_FORMAT_XRGB8888:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
+ break;
+
+ case DRM_FORMAT_RGB888:
+ *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+
+ DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
+ state->crtc_w, state->crtc_h);
+ regmap_write(backend->regs, SUN4I_BACKEND_DISSIZE_REG,
+ SUN4I_BACKEND_DISSIZE(state->crtc_w,
+ state->crtc_h));
+ }
+
+ /* Set the line width */
+ DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
+ regmap_write(backend->regs, SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
+ fb->pitches[0] * 8);
+
+ /* Set height and width */
+ DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
+ state->crtc_w, state->crtc_h);
+ regmap_write(backend->regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
+ SUN4I_BACKEND_LAYSIZE(state->crtc_w,
+ state->crtc_h));
+
+ /* Set base coordinates */
+ DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
+ state->crtc_x, state->crtc_y);
+ regmap_write(backend->regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
+ SUN4I_BACKEND_LAYCOOR(state->crtc_x,
+ state->crtc_y));
+
+ return 0;
+}
+EXPORT_SYMBOL(sun4i_backend_update_layer_coord);
+
+int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ bool interlaced = false;
+ u32 val;
+ int ret;
+
+ if (plane->state->crtc)
+ interlaced = plane->state->crtc->state->adjusted_mode.flags
+ & DRM_MODE_FLAG_INTERLACE;
+
+ regmap_update_bits(backend->regs, SUN4I_BACKEND_MODCTL_REG,
+ SUN4I_BACKEND_MODCTL_ITLMOD_EN,
+ interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
+
+ DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
+ interlaced ? "on" : "off");
+
+ ret = sun4i_backend_drm_format_to_layer(fb->pixel_format, &val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid format\n");
+ return val;
+ }
+
+ regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG1(layer),
+ SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun4i_backend_update_layer_formats);
+
+int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 lo_paddr, hi_paddr;
+ dma_addr_t paddr;
+ int bpp;
+
+ /* Get the physical address of the buffer in memory */
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+
+ DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
+
+ /* Compute the start of the displayed memory */
+ bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ paddr = gem->paddr + fb->offsets[0];
+ paddr += (state->src_x >> 16) * bpp;
+ paddr += (state->src_y >> 16) * fb->pitches[0];
+
+ DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
+
+ /* Write the 32 lower bits of the address (in bits) */
+ lo_paddr = paddr << 3;
+ DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
+ regmap_write(backend->regs, SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
+ lo_paddr);
+
+ /* And the upper bits */
+ hi_paddr = paddr >> 29;
+ DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
+ regmap_update_bits(backend->regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
+ SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
+ SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
+
+ return 0;
+}
+EXPORT_SYMBOL(sun4i_backend_update_layer_buffer);
+
+static struct regmap_config sun4i_backend_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x5800,
+};
+
+static int sun4i_backend_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_backend *backend;
+ struct resource *res;
+ void __iomem *regs;
+ int i, ret;
+
+ backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
+ if (!backend)
+ return -ENOMEM;
+ dev_set_drvdata(dev, backend);
+ drv->backend = backend;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "Couldn't map the backend registers\n");
+ return PTR_ERR(regs);
+ }
+
+ backend->regs = devm_regmap_init_mmio(dev, regs,
+ &sun4i_backend_regmap_config);
+ if (IS_ERR(backend->regs)) {
+ dev_err(dev, "Couldn't create the backend0 regmap\n");
+ return PTR_ERR(backend->regs);
+ }
+
+ backend->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(backend->reset)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(backend->reset);
+ }
+
+ ret = reset_control_deassert(backend->reset);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert our reset line\n");
+ return ret;
+ }
+
+ backend->bus_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(backend->bus_clk)) {
+ dev_err(dev, "Couldn't get the backend bus clock\n");
+ ret = PTR_ERR(backend->bus_clk);
+ goto err_assert_reset;
+ }
+ clk_prepare_enable(backend->bus_clk);
+
+ backend->mod_clk = devm_clk_get(dev, "mod");
+ if (IS_ERR(backend->mod_clk)) {
+ dev_err(dev, "Couldn't get the backend module clock\n");
+ ret = PTR_ERR(backend->mod_clk);
+ goto err_disable_bus_clk;
+ }
+ clk_prepare_enable(backend->mod_clk);
+
+ backend->ram_clk = devm_clk_get(dev, "ram");
+ if (IS_ERR(backend->ram_clk)) {
+ dev_err(dev, "Couldn't get the backend RAM clock\n");
+ ret = PTR_ERR(backend->ram_clk);
+ goto err_disable_mod_clk;
+ }
+ clk_prepare_enable(backend->ram_clk);
+
+ /* Reset the registers */
+ for (i = 0x800; i < 0x1000; i += 4)
+ regmap_write(backend->regs, i, 0);
+
+ /* Disable registers autoloading */
+ regmap_write(backend->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
+ SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
+
+ /* Enable the backend */
+ regmap_write(backend->regs, SUN4I_BACKEND_MODCTL_REG,
+ SUN4I_BACKEND_MODCTL_DEBE_EN |
+ SUN4I_BACKEND_MODCTL_START_CTL);
+
+ return 0;
+
+err_disable_mod_clk:
+ clk_disable_unprepare(backend->mod_clk);
+err_disable_bus_clk:
+ clk_disable_unprepare(backend->bus_clk);
+err_assert_reset:
+ reset_control_assert(backend->reset);
+ return ret;
+}
+
+static void sun4i_backend_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun4i_backend *backend = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(backend->ram_clk);
+ clk_disable_unprepare(backend->mod_clk);
+ clk_disable_unprepare(backend->bus_clk);
+ reset_control_assert(backend->reset);
+}
+
+static struct component_ops sun4i_backend_ops = {
+ .bind = sun4i_backend_bind,
+ .unbind = sun4i_backend_unbind,
+};
+
+static int sun4i_backend_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &sun4i_backend_ops);
+}
+
+static int sun4i_backend_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun4i_backend_ops);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_backend_of_table[] = {
+ { .compatible = "allwinner,sun5i-a13-display-backend" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
+
+static struct platform_driver sun4i_backend_platform_driver = {
+ .probe = sun4i_backend_probe,
+ .remove = sun4i_backend_remove,
+ .driver = {
+ .name = "sun4i-backend",
+ .of_match_table = sun4i_backend_of_table,
+ },
+};
+module_platform_driver(sun4i_backend_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
new file mode 100644
index 000000000..7070bb343
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_BACKEND_H_
+#define _SUN4I_BACKEND_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define SUN4I_BACKEND_MODCTL_REG 0x800
+#define SUN4I_BACKEND_MODCTL_LINE_SEL BIT(29)
+#define SUN4I_BACKEND_MODCTL_ITLMOD_EN BIT(28)
+#define SUN4I_BACKEND_MODCTL_OUT_SEL GENMASK(22, 20)
+#define SUN4I_BACKEND_MODCTL_OUT_LCD (0 << 20)
+#define SUN4I_BACKEND_MODCTL_OUT_FE0 (6 << 20)
+#define SUN4I_BACKEND_MODCTL_OUT_FE1 (7 << 20)
+#define SUN4I_BACKEND_MODCTL_HWC_EN BIT(16)
+#define SUN4I_BACKEND_MODCTL_LAY_EN(l) BIT(8 + l)
+#define SUN4I_BACKEND_MODCTL_OCSC_EN BIT(5)
+#define SUN4I_BACKEND_MODCTL_DFLK_EN BIT(4)
+#define SUN4I_BACKEND_MODCTL_DLP_START_CTL BIT(2)
+#define SUN4I_BACKEND_MODCTL_START_CTL BIT(1)
+#define SUN4I_BACKEND_MODCTL_DEBE_EN BIT(0)
+
+#define SUN4I_BACKEND_BACKCOLOR_REG 0x804
+#define SUN4I_BACKEND_BACKCOLOR(r, g, b) (((r) << 16) | ((g) << 8) | (b))
+
+#define SUN4I_BACKEND_DISSIZE_REG 0x808
+#define SUN4I_BACKEND_DISSIZE(w, h) (((((h) - 1) & 0xffff) << 16) | \
+ (((w) - 1) & 0xffff))
+
+#define SUN4I_BACKEND_LAYSIZE_REG(l) (0x810 + (0x4 * (l)))
+#define SUN4I_BACKEND_LAYSIZE(w, h) (((((h) - 1) & 0x1fff) << 16) | \
+ (((w) - 1) & 0x1fff))
+
+#define SUN4I_BACKEND_LAYCOOR_REG(l) (0x820 + (0x4 * (l)))
+#define SUN4I_BACKEND_LAYCOOR(x, y) ((((u32)(y) & 0xffff) << 16) | \
+ ((u32)(x) & 0xffff))
+
+#define SUN4I_BACKEND_LAYLINEWIDTH_REG(l) (0x840 + (0x4 * (l)))
+
+#define SUN4I_BACKEND_LAYFB_L32ADD_REG(l) (0x850 + (0x4 * (l)))
+
+#define SUN4I_BACKEND_LAYFB_H4ADD_REG 0x860
+#define SUN4I_BACKEND_LAYFB_H4ADD_MSK(l) GENMASK(3 + ((l) * 8), 0)
+#define SUN4I_BACKEND_LAYFB_H4ADD(l, val) ((val) << ((l) * 8))
+
+#define SUN4I_BACKEND_REGBUFFCTL_REG 0x870
+#define SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS BIT(1)
+#define SUN4I_BACKEND_REGBUFFCTL_LOADCTL BIT(0)
+
+#define SUN4I_BACKEND_CKMAX_REG 0x880
+#define SUN4I_BACKEND_CKMIN_REG 0x884
+#define SUN4I_BACKEND_CKCFG_REG 0x888
+#define SUN4I_BACKEND_ATTCTL_REG0(l) (0x890 + (0x4 * (l)))
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK BIT(15)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(x) ((x) << 15)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK GENMASK(11, 10)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(x) ((x) << 10)
+
+#define SUN4I_BACKEND_ATTCTL_REG1(l) (0x8a0 + (0x4 * (l)))
+#define SUN4I_BACKEND_ATTCTL_REG1_LAY_HSCAFCT GENMASK(15, 14)
+#define SUN4I_BACKEND_ATTCTL_REG1_LAY_WSCAFCT GENMASK(13, 12)
+#define SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT GENMASK(11, 8)
+#define SUN4I_BACKEND_LAY_FBFMT_1BPP (0 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_2BPP (1 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_4BPP (2 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_8BPP (3 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGB655 (4 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGB565 (5 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGB556 (6 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_ARGB1555 (7 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGBA5551 (8 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_XRGB8888 (9 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_ARGB8888 (10 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGB888 (11 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_ARGB4444 (12 << 8)
+#define SUN4I_BACKEND_LAY_FBFMT_RGBA4444 (13 << 8)
+
+#define SUN4I_BACKEND_DLCDPCTL_REG 0x8b0
+#define SUN4I_BACKEND_DLCDPFRMBUF_ADDRCTL_REG 0x8b4
+#define SUN4I_BACKEND_DLCDPCOOR_REG0 0x8b8
+#define SUN4I_BACKEND_DLCDPCOOR_REG1 0x8bc
+
+#define SUN4I_BACKEND_INT_EN_REG 0x8c0
+#define SUN4I_BACKEND_INT_FLAG_REG 0x8c4
+#define SUN4I_BACKEND_REG_LOAD_FINISHED BIT(1)
+
+#define SUN4I_BACKEND_HWCCTL_REG 0x8d8
+#define SUN4I_BACKEND_HWCFBCTL_REG 0x8e0
+#define SUN4I_BACKEND_WBCTL_REG 0x8f0
+#define SUN4I_BACKEND_WBADD_REG 0x8f4
+#define SUN4I_BACKEND_WBLINEWIDTH_REG 0x8f8
+#define SUN4I_BACKEND_SPREN_REG 0x900
+#define SUN4I_BACKEND_SPRFMTCTL_REG 0x908
+#define SUN4I_BACKEND_SPRALPHACTL_REG 0x90c
+#define SUN4I_BACKEND_IYUVCTL_REG 0x920
+#define SUN4I_BACKEND_IYUVADD_REG(c) (0x930 + (0x4 * (c)))
+#define SUN4I_BACKEND_IYUVLINEWITDTH_REG(c) (0x940 + (0x4 * (c)))
+#define SUN4I_BACKEND_YGCOEF_REG(c) (0x950 + (0x4 * (c)))
+#define SUN4I_BACKEND_YGCONS_REG 0x95c
+#define SUN4I_BACKEND_URCOEF_REG(c) (0x960 + (0x4 * (c)))
+#define SUN4I_BACKEND_URCONS_REG 0x96c
+#define SUN4I_BACKEND_VBCOEF_REG(c) (0x970 + (0x4 * (c)))
+#define SUN4I_BACKEND_VBCONS_REG 0x97c
+#define SUN4I_BACKEND_KSCTL_REG 0x980
+#define SUN4I_BACKEND_KSBKCOLOR_REG 0x984
+#define SUN4I_BACKEND_KSFSTLINEWIDTH_REG 0x988
+#define SUN4I_BACKEND_KSVSCAFCT_REG 0x98c
+#define SUN4I_BACKEND_KSHSCACOEF_REG(x) (0x9a0 + (0x4 * (x)))
+#define SUN4I_BACKEND_OCCTL_REG 0x9c0
+#define SUN4I_BACKEND_OCCTL_ENABLE BIT(0)
+
+#define SUN4I_BACKEND_OCRCOEF_REG(x) (0x9d0 + (0x4 * (x)))
+#define SUN4I_BACKEND_OCRCONS_REG 0x9dc
+#define SUN4I_BACKEND_OCGCOEF_REG(x) (0x9e0 + (0x4 * (x)))
+#define SUN4I_BACKEND_OCGCONS_REG 0x9ec
+#define SUN4I_BACKEND_OCBCOEF_REG(x) (0x9f0 + (0x4 * (x)))
+#define SUN4I_BACKEND_OCBCONS_REG 0x9fc
+#define SUN4I_BACKEND_SPRCOORCTL_REG(s) (0xa00 + (0x4 * (s)))
+#define SUN4I_BACKEND_SPRATTCTL_REG(s) (0xb00 + (0x4 * (s)))
+#define SUN4I_BACKEND_SPRADD_REG(s) (0xc00 + (0x4 * (s)))
+#define SUN4I_BACKEND_SPRLINEWIDTH_REG(s) (0xd00 + (0x4 * (s)))
+
+#define SUN4I_BACKEND_SPRPALTAB_OFF 0x4000
+#define SUN4I_BACKEND_GAMMATAB_OFF 0x4400
+#define SUN4I_BACKEND_HWCPATTERN_OFF 0x4800
+#define SUN4I_BACKEND_HWCCOLORTAB_OFF 0x4c00
+#define SUN4I_BACKEND_PIPE_OFF(p) (0x5000 + (0x400 * (p)))
+
+struct sun4i_backend {
+ struct regmap *regs;
+
+ struct reset_control *reset;
+
+ struct clk *bus_clk;
+ struct clk *mod_clk;
+ struct clk *ram_clk;
+};
+
+void sun4i_backend_apply_color_correction(struct sun4i_backend *backend);
+void sun4i_backend_disable_color_correction(struct sun4i_backend *backend);
+
+void sun4i_backend_commit(struct sun4i_backend *backend);
+
+void sun4i_backend_layer_enable(struct sun4i_backend *backend,
+ int layer, bool enable);
+int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane);
+int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane);
+int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane);
+
+#endif /* _SUN4I_BACKEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
new file mode 100644
index 000000000..41cacecbe
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modes.h>
+
+#include <linux/clk-provider.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+
+#include <video/videomode.h>
+
+#include "sun4i_backend.h"
+#include "sun4i_crtc.h"
+#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
+
+static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ if (crtc->state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ scrtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ crtc->state->event = NULL;
+ }
+}
+
+static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+ struct sun4i_drv *drv = scrtc->drv;
+
+ DRM_DEBUG_DRIVER("Committing plane changes\n");
+
+ sun4i_backend_commit(drv->backend);
+}
+
+static void sun4i_crtc_disable(struct drm_crtc *crtc)
+{
+ struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+ struct sun4i_drv *drv = scrtc->drv;
+
+ DRM_DEBUG_DRIVER("Disabling the CRTC\n");
+
+ sun4i_tcon_disable(drv->tcon);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static void sun4i_crtc_enable(struct drm_crtc *crtc)
+{
+ struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
+ struct sun4i_drv *drv = scrtc->drv;
+
+ DRM_DEBUG_DRIVER("Enabling the CRTC\n");
+
+ sun4i_tcon_enable(drv->tcon);
+}
+
+static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
+ .atomic_begin = sun4i_crtc_atomic_begin,
+ .atomic_flush = sun4i_crtc_atomic_flush,
+ .disable = sun4i_crtc_disable,
+ .enable = sun4i_crtc_enable,
+};
+
+static const struct drm_crtc_funcs sun4i_crtc_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+};
+
+struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm)
+{
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_crtc *scrtc;
+ int ret;
+
+ scrtc = devm_kzalloc(drm->dev, sizeof(*scrtc), GFP_KERNEL);
+ if (!scrtc)
+ return NULL;
+ scrtc->drv = drv;
+
+ ret = drm_crtc_init_with_planes(drm, &scrtc->crtc,
+ drv->primary,
+ NULL,
+ &sun4i_crtc_funcs,
+ NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't init DRM CRTC\n");
+ return NULL;
+ }
+
+ drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs);
+
+ return scrtc;
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.h b/drivers/gpu/drm/sun4i/sun4i_crtc.h
new file mode 100644
index 000000000..dec8ce4d9
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_CRTC_H_
+#define _SUN4I_CRTC_H_
+
+struct sun4i_crtc {
+ struct drm_crtc crtc;
+ struct drm_pending_vblank_event *event;
+
+ struct sun4i_drv *drv;
+};
+
+static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct sun4i_crtc, crtc);
+}
+
+struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm);
+
+#endif /* _SUN4I_CRTC_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
new file mode 100644
index 000000000..5b3463197
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2016 Free Electrons
+ * Copyright (C) 2016 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "sun4i_tcon.h"
+
+struct sun4i_dclk {
+ struct clk_hw hw;
+ struct regmap *regmap;
+};
+
+static inline struct sun4i_dclk *hw_to_dclk(struct clk_hw *hw)
+{
+ return container_of(hw, struct sun4i_dclk, hw);
+}
+
+static void sun4i_dclk_disable(struct clk_hw *hw)
+{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+
+ regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
+ BIT(SUN4I_TCON0_DCLK_GATE_BIT), 0);
+}
+
+static int sun4i_dclk_enable(struct clk_hw *hw)
+{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+
+ return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
+ BIT(SUN4I_TCON0_DCLK_GATE_BIT),
+ BIT(SUN4I_TCON0_DCLK_GATE_BIT));
+}
+
+static int sun4i_dclk_is_enabled(struct clk_hw *hw)
+{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+ u32 val;
+
+ regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
+
+ return val & BIT(SUN4I_TCON0_DCLK_GATE_BIT);
+}
+
+static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+ u32 val;
+
+ regmap_read(dclk->regmap, SUN4I_TCON0_DCLK_REG, &val);
+
+ val >>= SUN4I_TCON0_DCLK_DIV_SHIFT;
+ val &= SUN4I_TCON0_DCLK_DIV_WIDTH;
+
+ if (!val)
+ val = 1;
+
+ return parent_rate / val;
+}
+
+static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned long best_parent = 0;
+ u8 best_div = 1;
+ int i;
+
+ for (i = 6; i < 127; i++) {
+ unsigned long ideal = rate * i;
+ unsigned long rounded;
+
+ rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
+ ideal);
+
+ if (rounded == ideal) {
+ best_parent = rounded;
+ best_div = i;
+ goto out;
+ }
+
+ if ((rounded < ideal) && (rounded > best_parent)) {
+ best_parent = rounded;
+ best_div = i;
+ }
+ }
+
+out:
+ *parent_rate = best_parent;
+
+ return best_parent / best_div;
+}
+
+static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+ u8 div = parent_rate / rate;
+
+ return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
+ GENMASK(6, 0), div);
+}
+
+static int sun4i_dclk_get_phase(struct clk_hw *hw)
+{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+ u32 val;
+
+ regmap_read(dclk->regmap, SUN4I_TCON0_IO_POL_REG, &val);
+
+ val >>= 28;
+ val &= 3;
+
+ return val * 120;
+}
+
+static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
+{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+
+ regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
+ GENMASK(29, 28),
+ degrees / 120);
+
+ return 0;
+}
+
+static const struct clk_ops sun4i_dclk_ops = {
+ .disable = sun4i_dclk_disable,
+ .enable = sun4i_dclk_enable,
+ .is_enabled = sun4i_dclk_is_enabled,
+
+ .recalc_rate = sun4i_dclk_recalc_rate,
+ .round_rate = sun4i_dclk_round_rate,
+ .set_rate = sun4i_dclk_set_rate,
+
+ .get_phase = sun4i_dclk_get_phase,
+ .set_phase = sun4i_dclk_set_phase,
+};
+
+int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
+{
+ const char *clk_name, *parent_name;
+ struct clk_init_data init;
+ struct sun4i_dclk *dclk;
+ int ret;
+
+ parent_name = __clk_get_name(tcon->sclk0);
+ ret = of_property_read_string_index(dev->of_node,
+ "clock-output-names", 0,
+ &clk_name);
+ if (ret)
+ return ret;
+
+ dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
+ if (!dclk)
+ return -ENOMEM;
+
+ init.name = clk_name;
+ init.ops = &sun4i_dclk_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_PARENT;
+
+ dclk->regmap = tcon->regs;
+ dclk->hw.init = &init;
+
+ tcon->dclk = clk_register(dev, &dclk->hw);
+ if (IS_ERR(tcon->dclk))
+ return PTR_ERR(tcon->dclk);
+
+ return 0;
+}
+EXPORT_SYMBOL(sun4i_dclk_create);
+
+int sun4i_dclk_free(struct sun4i_tcon *tcon)
+{
+ clk_unregister(tcon->dclk);
+ return 0;
+}
+EXPORT_SYMBOL(sun4i_dclk_free);
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.h b/drivers/gpu/drm/sun4i/sun4i_dotclock.h
new file mode 100644
index 000000000..d5e25fa9e
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_DOTCLOCK_H_
+#define _SUN4I_DOTCLOCK_H_
+
+struct sun4i_tcon;
+
+int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon);
+int sun4i_dclk_free(struct sun4i_tcon *tcon);
+
+#endif /* _SUN4I_DOTCLOCK_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
new file mode 100644
index 000000000..937394cbc
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+
+#include "sun4i_crtc.h"
+#include "sun4i_drv.h"
+#include "sun4i_framebuffer.h"
+#include "sun4i_layer.h"
+#include "sun4i_tcon.h"
+
+static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ DRM_DEBUG_DRIVER("Enabling VBLANK on pipe %d\n", pipe);
+
+ sun4i_tcon_enable_vblank(tcon, true);
+
+ return 0;
+}
+
+static void sun4i_drv_disable_vblank(struct drm_device *drm, unsigned int pipe)
+{
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ DRM_DEBUG_DRIVER("Disabling VBLANK on pipe %d\n", pipe);
+
+ sun4i_tcon_enable_vblank(tcon, false);
+}
+
+static const struct file_operations sun4i_drv_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = drm_gem_cma_mmap,
+};
+
+static struct drm_driver sun4i_drv_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+
+ /* Generic Operations */
+ .fops = &sun4i_drv_fops,
+ .name = "sun4i-drm",
+ .desc = "Allwinner sun4i Display Engine",
+ .date = "20150629",
+ .major = 1,
+ .minor = 0,
+
+ /* GEM Operations */
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+
+ /* PRIME Operations */
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ /* Frame Buffer Operations */
+
+ /* VBlank Operations */
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = sun4i_drv_enable_vblank,
+ .disable_vblank = sun4i_drv_disable_vblank,
+};
+
+static void sun4i_remove_framebuffers(void)
+{
+ struct apertures_struct *ap;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ /* The framebuffer can be located anywhere in RAM */
+ ap->ranges[0].base = 0;
+ ap->ranges[0].size = ~0;
+
+ remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+ kfree(ap);
+}
+
+static int sun4i_drv_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ struct sun4i_drv *drv;
+ int ret;
+
+ drm = drm_dev_alloc(&sun4i_drv_driver, dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = drm_dev_set_unique(drm, dev_name(drm->dev));
+ if (ret)
+ goto free_drm;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv) {
+ ret = -ENOMEM;
+ goto free_drm;
+ }
+ drm->dev_private = drv;
+
+ drm_vblank_init(drm, 1);
+ drm_mode_config_init(drm);
+
+ ret = component_bind_all(drm->dev, drm);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't bind all pipelines components\n");
+ goto free_drm;
+ }
+
+ /* Create our layers */
+ drv->layers = sun4i_layers_init(drm);
+ if (!drv->layers) {
+ dev_err(drm->dev, "Couldn't create the planes\n");
+ ret = -EINVAL;
+ goto free_drm;
+ }
+
+ /* Create our CRTC */
+ drv->crtc = sun4i_crtc_init(drm);
+ if (!drv->crtc) {
+ dev_err(drm->dev, "Couldn't create the CRTC\n");
+ ret = -EINVAL;
+ goto free_drm;
+ }
+ drm->irq_enabled = true;
+
+ /* Remove early framebuffers (ie. simplefb) */
+ sun4i_remove_framebuffers();
+
+ /* Create our framebuffer */
+ drv->fbdev = sun4i_framebuffer_init(drm);
+ if (IS_ERR(drv->fbdev)) {
+ dev_err(drm->dev, "Couldn't create our framebuffer\n");
+ ret = PTR_ERR(drv->fbdev);
+ goto free_drm;
+ }
+
+ /* Enable connectors polling */
+ drm_kms_helper_poll_init(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto free_drm;
+
+ ret = drm_connector_register_all(drm);
+ if (ret)
+ goto unregister_drm;
+
+ return 0;
+
+unregister_drm:
+ drm_dev_unregister(drm);
+free_drm:
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void sun4i_drv_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_connector_unregister_all(drm);
+ drm_dev_unregister(drm);
+ drm_kms_helper_poll_fini(drm);
+ sun4i_framebuffer_free(drm);
+ drm_vblank_cleanup(drm);
+ drm_dev_unref(drm);
+}
+
+static const struct component_master_ops sun4i_drv_master_ops = {
+ .bind = sun4i_drv_bind,
+ .unbind = sun4i_drv_unbind,
+};
+
+static bool sun4i_drv_node_is_frontend(struct device_node *node)
+{
+ return of_device_is_compatible(node,
+ "allwinner,sun5i-a13-display-frontend");
+}
+
+static bool sun4i_drv_node_is_tcon(struct device_node *node)
+{
+ return of_device_is_compatible(node, "allwinner,sun5i-a13-tcon");
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ DRM_DEBUG_DRIVER("Comparing of node %s with %s\n",
+ of_node_full_name(dev->of_node),
+ of_node_full_name(data));
+
+ return dev->of_node == data;
+}
+
+static int sun4i_drv_add_endpoints(struct device *dev,
+ struct component_match **match,
+ struct device_node *node)
+{
+ struct device_node *port, *ep, *remote;
+ int count = 0;
+
+ /*
+ * We don't support the frontend for now, so we will never
+ * have a device bound. Just skip over it, but we still want
+ * the rest our pipeline to be added.
+ */
+ if (!sun4i_drv_node_is_frontend(node) &&
+ !of_device_is_available(node))
+ return 0;
+
+ if (!sun4i_drv_node_is_frontend(node)) {
+ /* Add current component */
+ DRM_DEBUG_DRIVER("Adding component %s\n",
+ of_node_full_name(node));
+ component_match_add(dev, match, compare_of, node);
+ count++;
+ }
+
+ /* Inputs are listed first, then outputs */
+ port = of_graph_get_port_by_id(node, 1);
+ if (!port) {
+ DRM_DEBUG_DRIVER("No output to bind\n");
+ return count;
+ }
+
+ for_each_available_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote) {
+ DRM_DEBUG_DRIVER("Error retrieving the output node\n");
+ of_node_put(remote);
+ continue;
+ }
+
+ /*
+ * If the node is our TCON, the first port is used for our
+ * panel, and will not be part of the
+ * component framework.
+ */
+ if (sun4i_drv_node_is_tcon(node)) {
+ struct of_endpoint endpoint;
+
+ if (of_graph_parse_endpoint(ep, &endpoint)) {
+ DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+ continue;
+ }
+
+ if (!endpoint.id) {
+ DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+ continue;
+ }
+ }
+
+ /* Walk down our tree */
+ count += sun4i_drv_add_endpoints(dev, match, remote);
+
+ of_node_put(remote);
+ }
+
+ return count;
+}
+
+static int sun4i_drv_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ struct device_node *np = pdev->dev.of_node;
+ int i, count = 0;
+
+ for (i = 0;; i++) {
+ struct device_node *pipeline = of_parse_phandle(np,
+ "allwinner,pipelines",
+ i);
+ if (!pipeline)
+ break;
+
+ count += sun4i_drv_add_endpoints(&pdev->dev, &match,
+ pipeline);
+ of_node_put(pipeline);
+
+ DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
+ count, i);
+ }
+
+ if (count)
+ return component_master_add_with_match(&pdev->dev,
+ &sun4i_drv_master_ops,
+ match);
+ else
+ return 0;
+}
+
+static int sun4i_drv_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id sun4i_drv_of_table[] = {
+ { .compatible = "allwinner,sun5i-a13-display-engine" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
+
+static struct platform_driver sun4i_drv_platform_driver = {
+ .probe = sun4i_drv_probe,
+ .remove = sun4i_drv_remove,
+ .driver = {
+ .name = "sun4i-drm",
+ .of_match_table = sun4i_drv_of_table,
+ },
+};
+module_platform_driver(sun4i_drv_platform_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 Display Engine DRM/KMS Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.h b/drivers/gpu/drm/sun4i/sun4i_drv.h
new file mode 100644
index 000000000..597353eab
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_DRV_H_
+#define _SUN4I_DRV_H_
+
+#include <linux/clk.h>
+#include <linux/regmap.h>
+
+struct sun4i_drv {
+ struct sun4i_backend *backend;
+ struct sun4i_crtc *crtc;
+ struct sun4i_tcon *tcon;
+
+ struct drm_plane *primary;
+ struct drm_fbdev_cma *fbdev;
+
+ struct sun4i_layer **layers;
+};
+
+#endif /* _SUN4I_DRV_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
new file mode 100644
index 000000000..a0b30c216
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drmP.h>
+
+#include "sun4i_drv.h"
+
+static void sun4i_de_output_poll_changed(struct drm_device *drm)
+{
+ struct sun4i_drv *drv = drm->dev_private;
+
+ if (drv->fbdev)
+ drm_fbdev_cma_hotplug_event(drv->fbdev);
+}
+
+static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
+ .output_poll_changed = sun4i_de_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+ .fb_create = drm_fb_cma_create,
+};
+
+struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
+{
+ drm_mode_config_reset(drm);
+
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+
+ drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
+
+ return drm_fbdev_cma_init(drm, 32,
+ drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+}
+
+void sun4i_framebuffer_free(struct drm_device *drm)
+{
+ struct sun4i_drv *drv = drm->dev_private;
+
+ drm_fbdev_cma_fini(drv->fbdev);
+ drm_mode_config_cleanup(drm);
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
new file mode 100644
index 000000000..3afd65252
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_FRAMEBUFFER_H_
+#define _SUN4I_FRAMEBUFFER_H_
+
+struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm);
+void sun4i_framebuffer_free(struct drm_device *drm);
+
+#endif /* _SUN4I_FRAMEBUFFER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
new file mode 100644
index 000000000..068ab8063
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drmP.h>
+
+#include "sun4i_backend.h"
+#include "sun4i_drv.h"
+#include "sun4i_layer.h"
+
+#define SUN4I_NUM_LAYERS 2
+
+static int sun4i_backend_layer_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ return 0;
+}
+
+static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
+ struct sun4i_drv *drv = layer->drv;
+ struct sun4i_backend *backend = drv->backend;
+
+ sun4i_backend_layer_enable(backend, layer->id, false);
+}
+
+static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
+ struct sun4i_drv *drv = layer->drv;
+ struct sun4i_backend *backend = drv->backend;
+
+ sun4i_backend_update_layer_coord(backend, layer->id, plane);
+ sun4i_backend_update_layer_formats(backend, layer->id, plane);
+ sun4i_backend_update_layer_buffer(backend, layer->id, plane);
+ sun4i_backend_layer_enable(backend, layer->id, true);
+}
+
+static struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
+ .atomic_check = sun4i_backend_layer_atomic_check,
+ .atomic_disable = sun4i_backend_layer_atomic_disable,
+ .atomic_update = sun4i_backend_layer_atomic_update,
+};
+
+static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .destroy = drm_plane_cleanup,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = drm_atomic_helper_plane_reset,
+ .update_plane = drm_atomic_helper_update_plane,
+};
+
+static const uint32_t sun4i_backend_layer_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+};
+
+static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
+ enum drm_plane_type type)
+{
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_layer *layer;
+ int ret;
+
+ layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
+ if (!layer)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_universal_plane_init(drm, &layer->plane, BIT(0),
+ &sun4i_backend_layer_funcs,
+ sun4i_backend_layer_formats,
+ ARRAY_SIZE(sun4i_backend_layer_formats),
+ type,
+ NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialize layer\n");
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(&layer->plane,
+ &sun4i_backend_layer_helper_funcs);
+ layer->drv = drv;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drv->primary = &layer->plane;
+
+ return layer;
+}
+
+struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
+{
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_layer **layers;
+ int i;
+
+ layers = devm_kcalloc(drm->dev, SUN4I_NUM_LAYERS, sizeof(**layers),
+ GFP_KERNEL);
+ if (!layers)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * The hardware is a bit unusual here.
+ *
+ * Even though it supports 4 layers, it does the composition
+ * in two separate steps.
+ *
+ * The first one is assigning a layer to one of its two
+ * pipes. If more that 1 layer is assigned to the same pipe,
+ * and if pixels overlaps, the pipe will take the pixel from
+ * the layer with the highest priority.
+ *
+ * The second step is the actual alpha blending, that takes
+ * the two pipes as input, and uses the eventual alpha
+ * component to do the transparency between the two.
+ *
+ * This two steps scenario makes us unable to guarantee a
+ * robust alpha blending between the 4 layers in all
+ * situations. So we just expose two layers, one per pipe. On
+ * SoCs that support it, sprites could fill the need for more
+ * layers.
+ */
+ for (i = 0; i < SUN4I_NUM_LAYERS; i++) {
+ enum drm_plane_type type = (i == 0)
+ ? DRM_PLANE_TYPE_PRIMARY
+ : DRM_PLANE_TYPE_OVERLAY;
+ struct sun4i_layer *layer = layers[i];
+
+ layer = sun4i_layer_init_one(drm, type);
+ if (IS_ERR(layer)) {
+ dev_err(drm->dev, "Couldn't initialize %s plane\n",
+ i ? "overlay" : "primary");
+ return ERR_CAST(layer);
+ };
+
+ DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
+ i ? "overlay" : "primary", i);
+ regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(i));
+
+ layer->id = i;
+ };
+
+ return layers;
+}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.h b/drivers/gpu/drm/sun4i/sun4i_layer.h
new file mode 100644
index 000000000..a2f65d7a3
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_LAYER_H_
+#define _SUN4I_LAYER_H_
+
+struct sun4i_layer {
+ struct drm_plane plane;
+ struct sun4i_drv *drv;
+ int id;
+};
+
+static inline struct sun4i_layer *
+plane_to_sun4i_layer(struct drm_plane *plane)
+{
+ return container_of(plane, struct sun4i_layer, plane);
+}
+
+struct sun4i_layer **sun4i_layers_init(struct drm_device *drm);
+
+#endif /* _SUN4I_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
new file mode 100644
index 000000000..aaffe9e64
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
+
+struct sun4i_rgb {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+
+ struct sun4i_drv *drv;
+};
+
+static inline struct sun4i_rgb *
+drm_connector_to_sun4i_rgb(struct drm_connector *connector)
+{
+ return container_of(connector, struct sun4i_rgb,
+ connector);
+}
+
+static inline struct sun4i_rgb *
+drm_encoder_to_sun4i_rgb(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct sun4i_rgb,
+ encoder);
+}
+
+static int sun4i_rgb_get_modes(struct drm_connector *connector)
+{
+ struct sun4i_rgb *rgb =
+ drm_connector_to_sun4i_rgb(connector);
+ struct sun4i_drv *drv = rgb->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ return drm_panel_get_modes(tcon->panel);
+}
+
+static int sun4i_rgb_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+ struct sun4i_drv *drv = rgb->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+ u32 hsync = mode->hsync_end - mode->hsync_start;
+ u32 vsync = mode->vsync_end - mode->vsync_start;
+ unsigned long rate = mode->clock * 1000;
+ long rounded_rate;
+
+ DRM_DEBUG_DRIVER("Validating modes...\n");
+
+ if (hsync < 1)
+ return MODE_HSYNC_NARROW;
+
+ if (hsync > 0x3ff)
+ return MODE_HSYNC_WIDE;
+
+ if ((mode->hdisplay < 1) || (mode->htotal < 1))
+ return MODE_H_ILLEGAL;
+
+ if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
+ return MODE_BAD_HVALUE;
+
+ DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
+
+ if (vsync < 1)
+ return MODE_VSYNC_NARROW;
+
+ if (vsync > 0x3ff)
+ return MODE_VSYNC_WIDE;
+
+ if ((mode->vdisplay < 1) || (mode->vtotal < 1))
+ return MODE_V_ILLEGAL;
+
+ if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
+ return MODE_BAD_VVALUE;
+
+ DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+
+ rounded_rate = clk_round_rate(tcon->dclk, rate);
+ if (rounded_rate < rate)
+ return MODE_CLOCK_LOW;
+
+ if (rounded_rate > rate)
+ return MODE_CLOCK_HIGH;
+
+ DRM_DEBUG_DRIVER("Clock rate OK\n");
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+sun4i_rgb_best_encoder(struct drm_connector *connector)
+{
+ struct sun4i_rgb *rgb =
+ drm_connector_to_sun4i_rgb(connector);
+
+ return &rgb->encoder;
+}
+
+static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
+ .get_modes = sun4i_rgb_get_modes,
+ .mode_valid = sun4i_rgb_mode_valid,
+ .best_encoder = sun4i_rgb_best_encoder,
+};
+
+static enum drm_connector_status
+sun4i_rgb_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void
+sun4i_rgb_connector_destroy(struct drm_connector *connector)
+{
+ struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+ struct sun4i_drv *drv = rgb->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ drm_panel_detach(tcon->panel);
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs sun4i_rgb_con_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = sun4i_rgb_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = sun4i_rgb_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int sun4i_rgb_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
+{
+ struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
+ struct sun4i_drv *drv = rgb->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ DRM_DEBUG_DRIVER("Enabling RGB output\n");
+
+ drm_panel_enable(tcon->panel);
+ sun4i_tcon_channel_enable(tcon, 0);
+}
+
+static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
+ struct sun4i_drv *drv = rgb->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ DRM_DEBUG_DRIVER("Disabling RGB output\n");
+
+ sun4i_tcon_channel_disable(tcon, 0);
+ drm_panel_disable(tcon->panel);
+}
+
+static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
+ struct sun4i_drv *drv = rgb->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ sun4i_tcon0_mode_set(tcon, mode);
+
+ clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
+
+ /* FIXME: This seems to be board specific */
+ clk_set_phase(tcon->dclk, 120);
+}
+
+static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
+ .atomic_check = sun4i_rgb_atomic_check,
+ .mode_set = sun4i_rgb_encoder_mode_set,
+ .disable = sun4i_rgb_encoder_disable,
+ .enable = sun4i_rgb_encoder_enable,
+};
+
+static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
+ .destroy = sun4i_rgb_enc_destroy,
+};
+
+int sun4i_rgb_init(struct drm_device *drm)
+{
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_rgb *rgb;
+ int ret;
+
+ /* If we don't have a panel, there's no point in going on */
+ if (IS_ERR(tcon->panel))
+ return -ENODEV;
+
+ rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
+ if (!rgb)
+ return -ENOMEM;
+ rgb->drv = drv;
+
+ drm_encoder_helper_add(&rgb->encoder,
+ &sun4i_rgb_enc_helper_funcs);
+ ret = drm_encoder_init(drm,
+ &rgb->encoder,
+ &sun4i_rgb_enc_funcs,
+ DRM_MODE_ENCODER_NONE,
+ NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
+ goto err_out;
+ }
+
+ /* The RGB encoder can only work with the TCON channel 0 */
+ rgb->encoder.possible_crtcs = BIT(0);
+
+ drm_connector_helper_add(&rgb->connector,
+ &sun4i_rgb_con_helper_funcs);
+ ret = drm_connector_init(drm, &rgb->connector,
+ &sun4i_rgb_con_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialise the rgb connector\n");
+ goto err_cleanup_connector;
+ }
+
+ drm_mode_connector_attach_encoder(&rgb->connector, &rgb->encoder);
+
+ drm_panel_attach(tcon->panel, &rgb->connector);
+
+ return 0;
+
+err_cleanup_connector:
+ drm_encoder_cleanup(&rgb->encoder);
+err_out:
+ return ret;
+}
+EXPORT_SYMBOL(sun4i_rgb_init);
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.h b/drivers/gpu/drm/sun4i/sun4i_rgb.h
new file mode 100644
index 000000000..7c4da4c8a
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef _SUN4I_RGB_H_
+#define _SUN4I_RGB_H_
+
+int sun4i_rgb_init(struct drm_device *drm);
+
+#endif /* _SUN4I_RGB_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
new file mode 100644
index 000000000..652385f09
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "sun4i_crtc.h"
+#include "sun4i_dotclock.h"
+#include "sun4i_drv.h"
+#include "sun4i_rgb.h"
+#include "sun4i_tcon.h"
+
+void sun4i_tcon_disable(struct sun4i_tcon *tcon)
+{
+ DRM_DEBUG_DRIVER("Disabling TCON\n");
+
+ /* Disable the TCON */
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+ SUN4I_TCON_GCTL_TCON_ENABLE, 0);
+}
+EXPORT_SYMBOL(sun4i_tcon_disable);
+
+void sun4i_tcon_enable(struct sun4i_tcon *tcon)
+{
+ DRM_DEBUG_DRIVER("Enabling TCON\n");
+
+ /* Enable the TCON */
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+ SUN4I_TCON_GCTL_TCON_ENABLE,
+ SUN4I_TCON_GCTL_TCON_ENABLE);
+}
+EXPORT_SYMBOL(sun4i_tcon_enable);
+
+void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel)
+{
+ /* Disable the TCON's channel */
+ if (channel == 0) {
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+ SUN4I_TCON0_CTL_TCON_ENABLE, 0);
+ clk_disable_unprepare(tcon->dclk);
+ } else if (channel == 1) {
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_TCON_ENABLE, 0);
+ clk_disable_unprepare(tcon->sclk1);
+ }
+}
+EXPORT_SYMBOL(sun4i_tcon_channel_disable);
+
+void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel)
+{
+ /* Enable the TCON's channel */
+ if (channel == 0) {
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+ SUN4I_TCON0_CTL_TCON_ENABLE,
+ SUN4I_TCON0_CTL_TCON_ENABLE);
+ clk_prepare_enable(tcon->dclk);
+ } else if (channel == 1) {
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_TCON_ENABLE,
+ SUN4I_TCON1_CTL_TCON_ENABLE);
+ clk_prepare_enable(tcon->sclk1);
+ }
+}
+EXPORT_SYMBOL(sun4i_tcon_channel_enable);
+
+void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable)
+{
+ u32 mask, val = 0;
+
+ DRM_DEBUG_DRIVER("%sabling VBLANK interrupt\n", enable ? "En" : "Dis");
+
+ mask = SUN4I_TCON_GINT0_VBLANK_ENABLE(0) |
+ SUN4I_TCON_GINT0_VBLANK_ENABLE(1);
+
+ if (enable)
+ val = mask;
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG, mask, val);
+}
+EXPORT_SYMBOL(sun4i_tcon_enable_vblank);
+
+static int sun4i_tcon_get_clk_delay(struct drm_display_mode *mode,
+ int channel)
+{
+ int delay = mode->vtotal - mode->vdisplay;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ delay /= 2;
+
+ if (channel == 1)
+ delay -= 2;
+
+ delay = min(delay, 30);
+
+ DRM_DEBUG_DRIVER("TCON %d clock delay %u\n", channel, delay);
+
+ return delay;
+}
+
+void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
+ struct drm_display_mode *mode)
+{
+ unsigned int bp, hsync, vsync;
+ u8 clk_delay;
+ u32 val = 0;
+
+ /* Adjust clock delay */
+ clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+ SUN4I_TCON0_CTL_CLK_DELAY_MASK,
+ SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
+
+ /* Set the resolution */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
+ SUN4I_TCON0_BASIC0_X(mode->crtc_hdisplay) |
+ SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
+
+ /*
+ * This is called a backporch in the register documentation,
+ * but it really is the front porch + hsync
+ */
+ bp = mode->crtc_htotal - mode->crtc_hsync_start;
+ DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
+ mode->crtc_htotal, bp);
+
+ /* Set horizontal display timings */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC1_REG,
+ SUN4I_TCON0_BASIC1_H_TOTAL(mode->crtc_htotal) |
+ SUN4I_TCON0_BASIC1_H_BACKPORCH(bp));
+
+ /*
+ * This is called a backporch in the register documentation,
+ * but it really is the front porch + hsync
+ */
+ bp = mode->crtc_vtotal - mode->crtc_vsync_start;
+ DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
+ mode->crtc_vtotal, bp);
+
+ /* Set vertical display timings */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC2_REG,
+ SUN4I_TCON0_BASIC2_V_TOTAL(mode->crtc_vtotal) |
+ SUN4I_TCON0_BASIC2_V_BACKPORCH(bp));
+
+ /* Set Hsync and Vsync length */
+ hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ DRM_DEBUG_DRIVER("Setting HSYNC %d, VSYNC %d\n", hsync, vsync);
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC3_REG,
+ SUN4I_TCON0_BASIC3_V_SYNC(vsync) |
+ SUN4I_TCON0_BASIC3_H_SYNC(hsync));
+
+ /* Setup the polarity of the various signals */
+ if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+ val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+ if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+ val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
+ SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
+ val);
+
+ /* Map output pins to channel 0 */
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+ SUN4I_TCON_GCTL_IOMAP_MASK,
+ SUN4I_TCON_GCTL_IOMAP_TCON0);
+
+ /* Enable the output on the pins */
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0);
+}
+EXPORT_SYMBOL(sun4i_tcon0_mode_set);
+
+void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
+ struct drm_display_mode *mode)
+{
+ unsigned int bp, hsync, vsync;
+ u8 clk_delay;
+ u32 val;
+
+ /* Adjust clock delay */
+ clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_CLK_DELAY_MASK,
+ SUN4I_TCON1_CTL_CLK_DELAY(clk_delay));
+
+ /* Set interlaced mode */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ val = SUN4I_TCON1_CTL_INTERLACE_ENABLE;
+ else
+ val = 0;
+ regmap_update_bits(tcon->regs, SUN4I_TCON1_CTL_REG,
+ SUN4I_TCON1_CTL_INTERLACE_ENABLE,
+ val);
+
+ /* Set the input resolution */
+ regmap_write(tcon->regs, SUN4I_TCON1_BASIC0_REG,
+ SUN4I_TCON1_BASIC0_X(mode->crtc_hdisplay) |
+ SUN4I_TCON1_BASIC0_Y(mode->crtc_vdisplay));
+
+ /* Set the upscaling resolution */
+ regmap_write(tcon->regs, SUN4I_TCON1_BASIC1_REG,
+ SUN4I_TCON1_BASIC1_X(mode->crtc_hdisplay) |
+ SUN4I_TCON1_BASIC1_Y(mode->crtc_vdisplay));
+
+ /* Set the output resolution */
+ regmap_write(tcon->regs, SUN4I_TCON1_BASIC2_REG,
+ SUN4I_TCON1_BASIC2_X(mode->crtc_hdisplay) |
+ SUN4I_TCON1_BASIC2_Y(mode->crtc_vdisplay));
+
+ /* Set horizontal display timings */
+ bp = mode->crtc_htotal - mode->crtc_hsync_end;
+ DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
+ mode->htotal, bp);
+ regmap_write(tcon->regs, SUN4I_TCON1_BASIC3_REG,
+ SUN4I_TCON1_BASIC3_H_TOTAL(mode->crtc_htotal) |
+ SUN4I_TCON1_BASIC3_H_BACKPORCH(bp));
+
+ /* Set vertical display timings */
+ bp = mode->crtc_vtotal - mode->crtc_vsync_end;
+ DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
+ mode->vtotal, bp);
+ regmap_write(tcon->regs, SUN4I_TCON1_BASIC4_REG,
+ SUN4I_TCON1_BASIC4_V_TOTAL(mode->vtotal) |
+ SUN4I_TCON1_BASIC4_V_BACKPORCH(bp));
+
+ /* Set Hsync and Vsync length */
+ hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ DRM_DEBUG_DRIVER("Setting HSYNC %d, VSYNC %d\n", hsync, vsync);
+ regmap_write(tcon->regs, SUN4I_TCON1_BASIC5_REG,
+ SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
+ SUN4I_TCON1_BASIC5_H_SYNC(hsync));
+
+ /* Map output pins to channel 1 */
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+ SUN4I_TCON_GCTL_IOMAP_MASK,
+ SUN4I_TCON_GCTL_IOMAP_TCON1);
+
+ /*
+ * FIXME: Undocumented bits
+ */
+ if (tcon->has_mux)
+ regmap_write(tcon->regs, SUN4I_TCON_MUX_CTRL_REG, 1);
+}
+EXPORT_SYMBOL(sun4i_tcon1_mode_set);
+
+static void sun4i_tcon_finish_page_flip(struct drm_device *dev,
+ struct sun4i_crtc *scrtc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (scrtc->event) {
+ drm_crtc_send_vblank_event(&scrtc->crtc, scrtc->event);
+ drm_crtc_vblank_put(&scrtc->crtc);
+ scrtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static irqreturn_t sun4i_tcon_handler(int irq, void *private)
+{
+ struct sun4i_tcon *tcon = private;
+ struct drm_device *drm = tcon->drm;
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_crtc *scrtc = drv->crtc;
+ unsigned int status;
+
+ regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
+
+ if (!(status & (SUN4I_TCON_GINT0_VBLANK_INT(0) |
+ SUN4I_TCON_GINT0_VBLANK_INT(1))))
+ return IRQ_NONE;
+
+ drm_crtc_handle_vblank(&scrtc->crtc);
+ sun4i_tcon_finish_page_flip(drm, scrtc);
+
+ /* Acknowledge the interrupt */
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GINT0_REG,
+ SUN4I_TCON_GINT0_VBLANK_INT(0) |
+ SUN4I_TCON_GINT0_VBLANK_INT(1),
+ 0);
+
+ return IRQ_HANDLED;
+}
+
+static int sun4i_tcon_init_clocks(struct device *dev,
+ struct sun4i_tcon *tcon)
+{
+ tcon->clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(tcon->clk)) {
+ dev_err(dev, "Couldn't get the TCON bus clock\n");
+ return PTR_ERR(tcon->clk);
+ }
+ clk_prepare_enable(tcon->clk);
+
+ tcon->sclk0 = devm_clk_get(dev, "tcon-ch0");
+ if (IS_ERR(tcon->sclk0)) {
+ dev_err(dev, "Couldn't get the TCON channel 0 clock\n");
+ return PTR_ERR(tcon->sclk0);
+ }
+
+ tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
+ if (IS_ERR(tcon->sclk1)) {
+ dev_err(dev, "Couldn't get the TCON channel 1 clock\n");
+ return PTR_ERR(tcon->sclk1);
+ }
+
+ return sun4i_dclk_create(dev, tcon);
+}
+
+static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
+{
+ sun4i_dclk_free(tcon);
+ clk_disable_unprepare(tcon->clk);
+}
+
+static int sun4i_tcon_init_irq(struct device *dev,
+ struct sun4i_tcon *tcon)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Couldn't retrieve the TCON interrupt\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0,
+ dev_name(dev), tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't request the IRQ\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct regmap_config sun4i_tcon_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x800,
+};
+
+static int sun4i_tcon_init_regmap(struct device *dev,
+ struct sun4i_tcon *tcon)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+ void __iomem *regs;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "Couldn't map the TCON registers\n");
+ return PTR_ERR(regs);
+ }
+
+ tcon->regs = devm_regmap_init_mmio(dev, regs,
+ &sun4i_tcon_regmap_config);
+ if (IS_ERR(tcon->regs)) {
+ dev_err(dev, "Couldn't create the TCON regmap\n");
+ return PTR_ERR(tcon->regs);
+ }
+
+ /* Make sure the TCON is disabled and all IRQs are off */
+ regmap_write(tcon->regs, SUN4I_TCON_GCTL_REG, 0);
+ regmap_write(tcon->regs, SUN4I_TCON_GINT0_REG, 0);
+ regmap_write(tcon->regs, SUN4I_TCON_GINT1_REG, 0);
+
+ /* Disable IO lines and set them to tristate */
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, ~0);
+ regmap_write(tcon->regs, SUN4I_TCON1_IO_TRI_REG, ~0);
+
+ return 0;
+}
+
+static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
+{
+ struct device_node *port, *remote, *child;
+ struct device_node *end_node = NULL;
+
+ /* Inputs are listed first, then outputs */
+ port = of_graph_get_port_by_id(node, 1);
+
+ /*
+ * Our first output is the RGB interface where the panel will
+ * be connected.
+ */
+ for_each_child_of_node(port, child) {
+ u32 reg;
+
+ of_property_read_u32(child, "reg", &reg);
+ if (reg == 0)
+ end_node = child;
+ }
+
+ if (!end_node) {
+ DRM_DEBUG_DRIVER("Missing panel endpoint\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ remote = of_graph_get_remote_port_parent(end_node);
+ if (!remote) {
+ DRM_DEBUG_DRIVER("Unable to parse remote node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
+}
+
+static int sun4i_tcon_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = data;
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_tcon *tcon;
+ int ret;
+
+ tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL);
+ if (!tcon)
+ return -ENOMEM;
+ dev_set_drvdata(dev, tcon);
+ drv->tcon = tcon;
+ tcon->drm = drm;
+
+ if (of_device_is_compatible(dev->of_node, "allwinner,sun5i-a13-tcon"))
+ tcon->has_mux = true;
+
+ tcon->lcd_rst = devm_reset_control_get(dev, "lcd");
+ if (IS_ERR(tcon->lcd_rst)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(tcon->lcd_rst);
+ }
+
+ /* Make sure our TCON is reset */
+ if (!reset_control_status(tcon->lcd_rst))
+ reset_control_assert(tcon->lcd_rst);
+
+ ret = reset_control_deassert(tcon->lcd_rst);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert our reset line\n");
+ return ret;
+ }
+
+ ret = sun4i_tcon_init_regmap(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't init our TCON regmap\n");
+ goto err_assert_reset;
+ }
+
+ ret = sun4i_tcon_init_clocks(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't init our TCON clocks\n");
+ goto err_assert_reset;
+ }
+
+ ret = sun4i_tcon_init_irq(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't init our TCON interrupts\n");
+ goto err_free_clocks;
+ }
+
+ tcon->panel = sun4i_tcon_find_panel(dev->of_node);
+ if (IS_ERR(tcon->panel)) {
+ dev_info(dev, "No panel found... RGB output disabled\n");
+ return 0;
+ }
+
+ ret = sun4i_rgb_init(drm);
+ if (ret < 0)
+ goto err_free_clocks;
+
+ return 0;
+
+err_free_clocks:
+ sun4i_tcon_free_clocks(tcon);
+err_assert_reset:
+ reset_control_assert(tcon->lcd_rst);
+ return ret;
+}
+
+static void sun4i_tcon_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun4i_tcon *tcon = dev_get_drvdata(dev);
+
+ sun4i_tcon_free_clocks(tcon);
+}
+
+static struct component_ops sun4i_tcon_ops = {
+ .bind = sun4i_tcon_bind,
+ .unbind = sun4i_tcon_unbind,
+};
+
+static int sun4i_tcon_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct drm_panel *panel;
+
+ /*
+ * The panel is not ready.
+ * Defer the probe.
+ */
+ panel = sun4i_tcon_find_panel(node);
+
+ /*
+ * If we don't have a panel endpoint, just go on
+ */
+ if (PTR_ERR(panel) == -EPROBE_DEFER) {
+ DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+ return -EPROBE_DEFER;
+ }
+
+ return component_add(&pdev->dev, &sun4i_tcon_ops);
+}
+
+static int sun4i_tcon_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun4i_tcon_ops);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_tcon_of_table[] = {
+ { .compatible = "allwinner,sun5i-a13-tcon" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
+
+static struct platform_driver sun4i_tcon_platform_driver = {
+ .probe = sun4i_tcon_probe,
+ .remove = sun4i_tcon_remove,
+ .driver = {
+ .name = "sun4i-tcon",
+ .of_match_table = sun4i_tcon_of_table,
+ },
+};
+module_platform_driver(sun4i_tcon_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 Timing Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
new file mode 100644
index 000000000..0e0b11db4
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#ifndef __SUN4I_TCON_H__
+#define __SUN4I_TCON_H__
+
+#include <drm/drm_crtc.h>
+
+#include <linux/kernel.h>
+#include <linux/reset.h>
+
+#define SUN4I_TCON_GCTL_REG 0x0
+#define SUN4I_TCON_GCTL_TCON_ENABLE BIT(31)
+#define SUN4I_TCON_GCTL_IOMAP_MASK BIT(0)
+#define SUN4I_TCON_GCTL_IOMAP_TCON1 (1 << 0)
+#define SUN4I_TCON_GCTL_IOMAP_TCON0 (0 << 0)
+
+#define SUN4I_TCON_GINT0_REG 0x4
+#define SUN4I_TCON_GINT0_VBLANK_ENABLE(pipe) BIT(31 - (pipe))
+#define SUN4I_TCON_GINT0_VBLANK_INT(pipe) BIT(15 - (pipe))
+
+#define SUN4I_TCON_GINT1_REG 0x8
+#define SUN4I_TCON_FRM_CTL_REG 0x10
+
+#define SUN4I_TCON0_CTL_REG 0x40
+#define SUN4I_TCON0_CTL_TCON_ENABLE BIT(31)
+#define SUN4I_TCON0_CTL_CLK_DELAY_MASK GENMASK(8, 4)
+#define SUN4I_TCON0_CTL_CLK_DELAY(delay) ((delay << 4) & SUN4I_TCON0_CTL_CLK_DELAY_MASK)
+
+#define SUN4I_TCON0_DCLK_REG 0x44
+#define SUN4I_TCON0_DCLK_GATE_BIT (31)
+#define SUN4I_TCON0_DCLK_DIV_SHIFT (0)
+#define SUN4I_TCON0_DCLK_DIV_WIDTH (7)
+
+#define SUN4I_TCON0_BASIC0_REG 0x48
+#define SUN4I_TCON0_BASIC0_X(width) ((((width) - 1) & 0xfff) << 16)
+#define SUN4I_TCON0_BASIC0_Y(height) (((height) - 1) & 0xfff)
+
+#define SUN4I_TCON0_BASIC1_REG 0x4c
+#define SUN4I_TCON0_BASIC1_H_TOTAL(total) ((((total) - 1) & 0x1fff) << 16)
+#define SUN4I_TCON0_BASIC1_H_BACKPORCH(bp) (((bp) - 1) & 0xfff)
+
+#define SUN4I_TCON0_BASIC2_REG 0x50
+#define SUN4I_TCON0_BASIC2_V_TOTAL(total) ((((total) * 2) & 0x1fff) << 16)
+#define SUN4I_TCON0_BASIC2_V_BACKPORCH(bp) (((bp) - 1) & 0xfff)
+
+#define SUN4I_TCON0_BASIC3_REG 0x54
+#define SUN4I_TCON0_BASIC3_H_SYNC(width) ((((width) - 1) & 0x7ff) << 16)
+#define SUN4I_TCON0_BASIC3_V_SYNC(height) (((height) - 1) & 0x7ff)
+
+#define SUN4I_TCON0_HV_IF_REG 0x58
+#define SUN4I_TCON0_CPU_IF_REG 0x60
+#define SUN4I_TCON0_CPU_WR_REG 0x64
+#define SUN4I_TCON0_CPU_RD0_REG 0x68
+#define SUN4I_TCON0_CPU_RDA_REG 0x6c
+#define SUN4I_TCON0_TTL0_REG 0x70
+#define SUN4I_TCON0_TTL1_REG 0x74
+#define SUN4I_TCON0_TTL2_REG 0x78
+#define SUN4I_TCON0_TTL3_REG 0x7c
+#define SUN4I_TCON0_TTL4_REG 0x80
+#define SUN4I_TCON0_LVDS_IF_REG 0x84
+#define SUN4I_TCON0_IO_POL_REG 0x88
+#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
+#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
+#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
+
+#define SUN4I_TCON0_IO_TRI_REG 0x8c
+#define SUN4I_TCON0_IO_TRI_HSYNC_DISABLE BIT(25)
+#define SUN4I_TCON0_IO_TRI_VSYNC_DISABLE BIT(24)
+#define SUN4I_TCON0_IO_TRI_DATA_PINS_DISABLE(pins) GENMASK(pins, 0)
+
+#define SUN4I_TCON1_CTL_REG 0x90
+#define SUN4I_TCON1_CTL_TCON_ENABLE BIT(31)
+#define SUN4I_TCON1_CTL_INTERLACE_ENABLE BIT(20)
+#define SUN4I_TCON1_CTL_CLK_DELAY_MASK GENMASK(8, 4)
+#define SUN4I_TCON1_CTL_CLK_DELAY(delay) ((delay << 4) & SUN4I_TCON1_CTL_CLK_DELAY_MASK)
+
+#define SUN4I_TCON1_BASIC0_REG 0x94
+#define SUN4I_TCON1_BASIC0_X(width) ((((width) - 1) & 0xfff) << 16)
+#define SUN4I_TCON1_BASIC0_Y(height) (((height) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC1_REG 0x98
+#define SUN4I_TCON1_BASIC1_X(width) ((((width) - 1) & 0xfff) << 16)
+#define SUN4I_TCON1_BASIC1_Y(height) (((height) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC2_REG 0x9c
+#define SUN4I_TCON1_BASIC2_X(width) ((((width) - 1) & 0xfff) << 16)
+#define SUN4I_TCON1_BASIC2_Y(height) (((height) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC3_REG 0xa0
+#define SUN4I_TCON1_BASIC3_H_TOTAL(total) ((((total) - 1) & 0x1fff) << 16)
+#define SUN4I_TCON1_BASIC3_H_BACKPORCH(bp) (((bp) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC4_REG 0xa4
+#define SUN4I_TCON1_BASIC4_V_TOTAL(total) (((total) & 0x1fff) << 16)
+#define SUN4I_TCON1_BASIC4_V_BACKPORCH(bp) (((bp) - 1) & 0xfff)
+
+#define SUN4I_TCON1_BASIC5_REG 0xa8
+#define SUN4I_TCON1_BASIC5_H_SYNC(width) ((((width) - 1) & 0x3ff) << 16)
+#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
+
+#define SUN4I_TCON1_IO_POL_REG 0xf0
+#define SUN4I_TCON1_IO_TRI_REG 0xf4
+#define SUN4I_TCON_CEU_CTL_REG 0x100
+#define SUN4I_TCON_CEU_MUL_RR_REG 0x110
+#define SUN4I_TCON_CEU_MUL_RG_REG 0x114
+#define SUN4I_TCON_CEU_MUL_RB_REG 0x118
+#define SUN4I_TCON_CEU_ADD_RC_REG 0x11c
+#define SUN4I_TCON_CEU_MUL_GR_REG 0x120
+#define SUN4I_TCON_CEU_MUL_GG_REG 0x124
+#define SUN4I_TCON_CEU_MUL_GB_REG 0x128
+#define SUN4I_TCON_CEU_ADD_GC_REG 0x12c
+#define SUN4I_TCON_CEU_MUL_BR_REG 0x130
+#define SUN4I_TCON_CEU_MUL_BG_REG 0x134
+#define SUN4I_TCON_CEU_MUL_BB_REG 0x138
+#define SUN4I_TCON_CEU_ADD_BC_REG 0x13c
+#define SUN4I_TCON_CEU_RANGE_R_REG 0x140
+#define SUN4I_TCON_CEU_RANGE_G_REG 0x144
+#define SUN4I_TCON_CEU_RANGE_B_REG 0x148
+#define SUN4I_TCON_MUX_CTRL_REG 0x200
+#define SUN4I_TCON1_FILL_CTL_REG 0x300
+#define SUN4I_TCON1_FILL_BEG0_REG 0x304
+#define SUN4I_TCON1_FILL_END0_REG 0x308
+#define SUN4I_TCON1_FILL_DATA0_REG 0x30c
+#define SUN4I_TCON1_FILL_BEG1_REG 0x310
+#define SUN4I_TCON1_FILL_END1_REG 0x314
+#define SUN4I_TCON1_FILL_DATA1_REG 0x318
+#define SUN4I_TCON1_FILL_BEG2_REG 0x31c
+#define SUN4I_TCON1_FILL_END2_REG 0x320
+#define SUN4I_TCON1_FILL_DATA2_REG 0x324
+#define SUN4I_TCON1_GAMMA_TABLE_REG 0x400
+
+#define SUN4I_TCON_MAX_CHANNELS 2
+
+struct sun4i_tcon {
+ struct drm_device *drm;
+ struct regmap *regs;
+
+ /* Main bus clock */
+ struct clk *clk;
+
+ /* Clocks for the TCON channels */
+ struct clk *sclk0;
+ struct clk *sclk1;
+
+ /* Pixel clock */
+ struct clk *dclk;
+
+ /* Reset control */
+ struct reset_control *lcd_rst;
+
+ /* Platform adjustments */
+ bool has_mux;
+
+ struct drm_panel *panel;
+};
+
+/* Global Control */
+void sun4i_tcon_disable(struct sun4i_tcon *tcon);
+void sun4i_tcon_enable(struct sun4i_tcon *tcon);
+
+/* Channel Control */
+void sun4i_tcon_channel_disable(struct sun4i_tcon *tcon, int channel);
+void sun4i_tcon_channel_enable(struct sun4i_tcon *tcon, int channel);
+
+void sun4i_tcon_enable_vblank(struct sun4i_tcon *tcon, bool enable);
+
+/* Mode Related Controls */
+void sun4i_tcon_switch_interlace(struct sun4i_tcon *tcon,
+ bool enable);
+void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
+ struct drm_display_mode *mode);
+void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
+ struct drm_display_mode *mode);
+
+#endif /* __SUN4I_TCON_H__ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
new file mode 100644
index 000000000..bc047f923
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -0,0 +1,708 @@
+/*
+ * Copyright (C) 2015 Free Electrons
+ * Copyright (C) 2015 NextThing Co
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+
+#include "sun4i_backend.h"
+#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
+
+#define SUN4I_TVE_EN_REG 0x000
+#define SUN4I_TVE_EN_DAC_MAP_MASK GENMASK(19, 4)
+#define SUN4I_TVE_EN_DAC_MAP(dac, out) (((out) & 0xf) << (dac + 1) * 4)
+#define SUN4I_TVE_EN_ENABLE BIT(0)
+
+#define SUN4I_TVE_CFG0_REG 0x004
+#define SUN4I_TVE_CFG0_DAC_CONTROL_54M BIT(26)
+#define SUN4I_TVE_CFG0_CORE_DATAPATH_54M BIT(25)
+#define SUN4I_TVE_CFG0_CORE_CONTROL_54M BIT(24)
+#define SUN4I_TVE_CFG0_YC_EN BIT(17)
+#define SUN4I_TVE_CFG0_COMP_EN BIT(16)
+#define SUN4I_TVE_CFG0_RES(x) ((x) & 0xf)
+#define SUN4I_TVE_CFG0_RES_480i SUN4I_TVE_CFG0_RES(0)
+#define SUN4I_TVE_CFG0_RES_576i SUN4I_TVE_CFG0_RES(1)
+
+#define SUN4I_TVE_DAC0_REG 0x008
+#define SUN4I_TVE_DAC0_CLOCK_INVERT BIT(24)
+#define SUN4I_TVE_DAC0_LUMA(x) (((x) & 3) << 20)
+#define SUN4I_TVE_DAC0_LUMA_0_4 SUN4I_TVE_DAC0_LUMA(3)
+#define SUN4I_TVE_DAC0_CHROMA(x) (((x) & 3) << 18)
+#define SUN4I_TVE_DAC0_CHROMA_0_75 SUN4I_TVE_DAC0_CHROMA(3)
+#define SUN4I_TVE_DAC0_INTERNAL_DAC(x) (((x) & 3) << 16)
+#define SUN4I_TVE_DAC0_INTERNAL_DAC_37_5_OHMS SUN4I_TVE_DAC0_INTERNAL_DAC(3)
+#define SUN4I_TVE_DAC0_DAC_EN(dac) BIT(dac)
+
+#define SUN4I_TVE_NOTCH_REG 0x00c
+#define SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(dac, x) ((4 - (x)) << (dac * 3))
+
+#define SUN4I_TVE_CHROMA_FREQ_REG 0x010
+
+#define SUN4I_TVE_PORCH_REG 0x014
+#define SUN4I_TVE_PORCH_BACK(x) ((x) << 16)
+#define SUN4I_TVE_PORCH_FRONT(x) (x)
+
+#define SUN4I_TVE_LINE_REG 0x01c
+#define SUN4I_TVE_LINE_FIRST(x) ((x) << 16)
+#define SUN4I_TVE_LINE_NUMBER(x) (x)
+
+#define SUN4I_TVE_LEVEL_REG 0x020
+#define SUN4I_TVE_LEVEL_BLANK(x) ((x) << 16)
+#define SUN4I_TVE_LEVEL_BLACK(x) (x)
+
+#define SUN4I_TVE_DAC1_REG 0x024
+#define SUN4I_TVE_DAC1_AMPLITUDE(dac, x) ((x) << (dac * 8))
+
+#define SUN4I_TVE_DETECT_STA_REG 0x038
+#define SUN4I_TVE_DETECT_STA_DAC(dac) BIT((dac * 8))
+#define SUN4I_TVE_DETECT_STA_UNCONNECTED 0
+#define SUN4I_TVE_DETECT_STA_CONNECTED 1
+#define SUN4I_TVE_DETECT_STA_GROUND 2
+
+#define SUN4I_TVE_CB_CR_LVL_REG 0x10c
+#define SUN4I_TVE_CB_CR_LVL_CR_BURST(x) ((x) << 8)
+#define SUN4I_TVE_CB_CR_LVL_CB_BURST(x) (x)
+
+#define SUN4I_TVE_TINT_BURST_PHASE_REG 0x110
+#define SUN4I_TVE_TINT_BURST_PHASE_CHROMA(x) (x)
+
+#define SUN4I_TVE_BURST_WIDTH_REG 0x114
+#define SUN4I_TVE_BURST_WIDTH_BREEZEWAY(x) ((x) << 16)
+#define SUN4I_TVE_BURST_WIDTH_BURST_WIDTH(x) ((x) << 8)
+#define SUN4I_TVE_BURST_WIDTH_HSYNC_WIDTH(x) (x)
+
+#define SUN4I_TVE_CB_CR_GAIN_REG 0x118
+#define SUN4I_TVE_CB_CR_GAIN_CR(x) ((x) << 8)
+#define SUN4I_TVE_CB_CR_GAIN_CB(x) (x)
+
+#define SUN4I_TVE_SYNC_VBI_REG 0x11c
+#define SUN4I_TVE_SYNC_VBI_SYNC(x) ((x) << 16)
+#define SUN4I_TVE_SYNC_VBI_VBLANK(x) (x)
+
+#define SUN4I_TVE_ACTIVE_LINE_REG 0x124
+#define SUN4I_TVE_ACTIVE_LINE(x) (x)
+
+#define SUN4I_TVE_CHROMA_REG 0x128
+#define SUN4I_TVE_CHROMA_COMP_GAIN(x) ((x) & 3)
+#define SUN4I_TVE_CHROMA_COMP_GAIN_50 SUN4I_TVE_CHROMA_COMP_GAIN(2)
+
+#define SUN4I_TVE_12C_REG 0x12c
+#define SUN4I_TVE_12C_NOTCH_WIDTH_WIDE BIT(8)
+#define SUN4I_TVE_12C_COMP_YUV_EN BIT(0)
+
+#define SUN4I_TVE_RESYNC_REG 0x130
+#define SUN4I_TVE_RESYNC_FIELD BIT(31)
+#define SUN4I_TVE_RESYNC_LINE(x) ((x) << 16)
+#define SUN4I_TVE_RESYNC_PIXEL(x) (x)
+
+#define SUN4I_TVE_SLAVE_REG 0x134
+
+#define SUN4I_TVE_WSS_DATA2_REG 0x244
+
+struct color_gains {
+ u16 cb;
+ u16 cr;
+};
+
+struct burst_levels {
+ u16 cb;
+ u16 cr;
+};
+
+struct video_levels {
+ u16 black;
+ u16 blank;
+};
+
+struct resync_parameters {
+ bool field;
+ u16 line;
+ u16 pixel;
+};
+
+struct tv_mode {
+ char *name;
+
+ u32 mode;
+ u32 chroma_freq;
+ u16 back_porch;
+ u16 front_porch;
+ u16 line_number;
+ u16 vblank_level;
+
+ u32 hdisplay;
+ u16 hfront_porch;
+ u16 hsync_len;
+ u16 hback_porch;
+
+ u32 vdisplay;
+ u16 vfront_porch;
+ u16 vsync_len;
+ u16 vback_porch;
+
+ bool yc_en;
+ bool dac3_en;
+ bool dac_bit25_en;
+
+ struct color_gains *color_gains;
+ struct burst_levels *burst_levels;
+ struct video_levels *video_levels;
+ struct resync_parameters *resync_params;
+};
+
+struct sun4i_tv {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+
+ struct clk *clk;
+ struct regmap *regs;
+ struct reset_control *reset;
+
+ struct sun4i_drv *drv;
+};
+
+struct video_levels ntsc_video_levels = {
+ .black = 282, .blank = 240,
+};
+
+struct video_levels pal_video_levels = {
+ .black = 252, .blank = 252,
+};
+
+struct burst_levels ntsc_burst_levels = {
+ .cb = 79, .cr = 0,
+};
+
+struct burst_levels pal_burst_levels = {
+ .cb = 40, .cr = 40,
+};
+
+struct color_gains ntsc_color_gains = {
+ .cb = 160, .cr = 160,
+};
+
+struct color_gains pal_color_gains = {
+ .cb = 224, .cr = 224,
+};
+
+struct resync_parameters ntsc_resync_parameters = {
+ .field = false, .line = 14, .pixel = 12,
+};
+
+struct resync_parameters pal_resync_parameters = {
+ .field = true, .line = 13, .pixel = 12,
+};
+
+struct tv_mode tv_modes[] = {
+ {
+ .name = "NTSC",
+ .mode = SUN4I_TVE_CFG0_RES_480i,
+ .chroma_freq = 0x21f07c1f,
+ .yc_en = true,
+ .dac3_en = true,
+ .dac_bit25_en = true,
+
+ .back_porch = 118,
+ .front_porch = 32,
+ .line_number = 525,
+
+ .hdisplay = 720,
+ .hfront_porch = 18,
+ .hsync_len = 2,
+ .hback_porch = 118,
+
+ .vdisplay = 480,
+ .vfront_porch = 26,
+ .vsync_len = 2,
+ .vback_porch = 17,
+
+ .vblank_level = 240,
+
+ .color_gains = &ntsc_color_gains,
+ .burst_levels = &ntsc_burst_levels,
+ .video_levels = &ntsc_video_levels,
+ .resync_params = &ntsc_resync_parameters,
+ },
+ {
+ .name = "PAL",
+ .mode = SUN4I_TVE_CFG0_RES_576i,
+ .chroma_freq = 0x2a098acb,
+
+ .back_porch = 138,
+ .front_porch = 24,
+ .line_number = 625,
+
+ .hdisplay = 720,
+ .hfront_porch = 3,
+ .hsync_len = 2,
+ .hback_porch = 139,
+
+ .vdisplay = 576,
+ .vfront_porch = 28,
+ .vsync_len = 2,
+ .vback_porch = 19,
+
+ .vblank_level = 252,
+
+ .color_gains = &pal_color_gains,
+ .burst_levels = &pal_burst_levels,
+ .video_levels = &pal_video_levels,
+ .resync_params = &pal_resync_parameters,
+ },
+};
+
+static inline struct sun4i_tv *
+drm_encoder_to_sun4i_tv(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct sun4i_tv,
+ encoder);
+}
+
+static inline struct sun4i_tv *
+drm_connector_to_sun4i_tv(struct drm_connector *connector)
+{
+ return container_of(connector, struct sun4i_tv,
+ connector);
+}
+
+/*
+ * FIXME: If only the drm_display_mode private field was usable, this
+ * could go away...
+ *
+ * So far, it doesn't seem to be preserved when the mode is passed by
+ * to mode_set for some reason.
+ */
+static struct tv_mode *sun4i_tv_find_tv_by_mode(struct drm_display_mode *mode)
+{
+ int i;
+
+ /* First try to identify the mode by name */
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ struct tv_mode *tv_mode = &tv_modes[i];
+
+ DRM_DEBUG_DRIVER("Comparing mode %s vs %s",
+ mode->name, tv_mode->name);
+
+ if (!strcmp(mode->name, tv_mode->name))
+ return tv_mode;
+ }
+
+ /* Then by number of lines */
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ struct tv_mode *tv_mode = &tv_modes[i];
+
+ DRM_DEBUG_DRIVER("Comparing mode %s vs %s (X: %d vs %d)",
+ mode->name, tv_mode->name,
+ mode->vdisplay, tv_mode->vdisplay);
+
+ if (mode->vdisplay == tv_mode->vdisplay)
+ return tv_mode;
+ }
+
+ return NULL;
+}
+
+static void sun4i_tv_mode_to_drm_mode(struct tv_mode *tv_mode,
+ struct drm_display_mode *mode)
+{
+ DRM_DEBUG_DRIVER("Creating mode %s\n", mode->name);
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ mode->clock = 13500;
+ mode->flags = DRM_MODE_FLAG_INTERLACE;
+
+ mode->hdisplay = tv_mode->hdisplay;
+ mode->hsync_start = mode->hdisplay + tv_mode->hfront_porch;
+ mode->hsync_end = mode->hsync_start + tv_mode->hsync_len;
+ mode->htotal = mode->hsync_end + tv_mode->hback_porch;
+
+ mode->vdisplay = tv_mode->vdisplay;
+ mode->vsync_start = mode->vdisplay + tv_mode->vfront_porch;
+ mode->vsync_end = mode->vsync_start + tv_mode->vsync_len;
+ mode->vtotal = mode->vsync_end + tv_mode->vback_porch;
+}
+
+static int sun4i_tv_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+static void sun4i_tv_disable(struct drm_encoder *encoder)
+{
+ struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
+ struct sun4i_drv *drv = tv->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ DRM_DEBUG_DRIVER("Disabling the TV Output\n");
+
+ sun4i_tcon_channel_disable(tcon, 1);
+
+ regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+ SUN4I_TVE_EN_ENABLE,
+ 0);
+ sun4i_backend_disable_color_correction(drv->backend);
+}
+
+static void sun4i_tv_enable(struct drm_encoder *encoder)
+{
+ struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
+ struct sun4i_drv *drv = tv->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+
+ DRM_DEBUG_DRIVER("Enabling the TV Output\n");
+
+ sun4i_backend_apply_color_correction(drv->backend);
+
+ regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+ SUN4I_TVE_EN_ENABLE,
+ SUN4I_TVE_EN_ENABLE);
+
+ sun4i_tcon_channel_enable(tcon, 1);
+}
+
+static void sun4i_tv_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
+ struct sun4i_drv *drv = tv->drv;
+ struct sun4i_tcon *tcon = drv->tcon;
+ struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
+
+ sun4i_tcon1_mode_set(tcon, mode);
+
+ /* Enable and map the DAC to the output */
+ regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
+ SUN4I_TVE_EN_DAC_MAP_MASK,
+ SUN4I_TVE_EN_DAC_MAP(0, 1) |
+ SUN4I_TVE_EN_DAC_MAP(1, 2) |
+ SUN4I_TVE_EN_DAC_MAP(2, 3) |
+ SUN4I_TVE_EN_DAC_MAP(3, 4));
+
+ /* Set PAL settings */
+ regmap_write(tv->regs, SUN4I_TVE_CFG0_REG,
+ tv_mode->mode |
+ (tv_mode->yc_en ? SUN4I_TVE_CFG0_YC_EN : 0) |
+ SUN4I_TVE_CFG0_COMP_EN |
+ SUN4I_TVE_CFG0_DAC_CONTROL_54M |
+ SUN4I_TVE_CFG0_CORE_DATAPATH_54M |
+ SUN4I_TVE_CFG0_CORE_CONTROL_54M);
+
+ /* Configure the DAC for a composite output */
+ regmap_write(tv->regs, SUN4I_TVE_DAC0_REG,
+ SUN4I_TVE_DAC0_DAC_EN(0) |
+ (tv_mode->dac3_en ? SUN4I_TVE_DAC0_DAC_EN(3) : 0) |
+ SUN4I_TVE_DAC0_INTERNAL_DAC_37_5_OHMS |
+ SUN4I_TVE_DAC0_CHROMA_0_75 |
+ SUN4I_TVE_DAC0_LUMA_0_4 |
+ SUN4I_TVE_DAC0_CLOCK_INVERT |
+ (tv_mode->dac_bit25_en ? BIT(25) : 0) |
+ BIT(30));
+
+ /* Configure the sample delay between DAC0 and the other DAC */
+ regmap_write(tv->regs, SUN4I_TVE_NOTCH_REG,
+ SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(1, 0) |
+ SUN4I_TVE_NOTCH_DAC0_TO_DAC_DLY(2, 0));
+
+ regmap_write(tv->regs, SUN4I_TVE_CHROMA_FREQ_REG,
+ tv_mode->chroma_freq);
+
+ /* Set the front and back porch */
+ regmap_write(tv->regs, SUN4I_TVE_PORCH_REG,
+ SUN4I_TVE_PORCH_BACK(tv_mode->back_porch) |
+ SUN4I_TVE_PORCH_FRONT(tv_mode->front_porch));
+
+ /* Set the lines setup */
+ regmap_write(tv->regs, SUN4I_TVE_LINE_REG,
+ SUN4I_TVE_LINE_FIRST(22) |
+ SUN4I_TVE_LINE_NUMBER(tv_mode->line_number));
+
+ regmap_write(tv->regs, SUN4I_TVE_LEVEL_REG,
+ SUN4I_TVE_LEVEL_BLANK(tv_mode->video_levels->blank) |
+ SUN4I_TVE_LEVEL_BLACK(tv_mode->video_levels->black));
+
+ regmap_write(tv->regs, SUN4I_TVE_DAC1_REG,
+ SUN4I_TVE_DAC1_AMPLITUDE(0, 0x18) |
+ SUN4I_TVE_DAC1_AMPLITUDE(1, 0x18) |
+ SUN4I_TVE_DAC1_AMPLITUDE(2, 0x18) |
+ SUN4I_TVE_DAC1_AMPLITUDE(3, 0x18));
+
+ regmap_write(tv->regs, SUN4I_TVE_CB_CR_LVL_REG,
+ SUN4I_TVE_CB_CR_LVL_CB_BURST(tv_mode->burst_levels->cb) |
+ SUN4I_TVE_CB_CR_LVL_CR_BURST(tv_mode->burst_levels->cr));
+
+ /* Set burst width for a composite output */
+ regmap_write(tv->regs, SUN4I_TVE_BURST_WIDTH_REG,
+ SUN4I_TVE_BURST_WIDTH_HSYNC_WIDTH(126) |
+ SUN4I_TVE_BURST_WIDTH_BURST_WIDTH(68) |
+ SUN4I_TVE_BURST_WIDTH_BREEZEWAY(22));
+
+ regmap_write(tv->regs, SUN4I_TVE_CB_CR_GAIN_REG,
+ SUN4I_TVE_CB_CR_GAIN_CB(tv_mode->color_gains->cb) |
+ SUN4I_TVE_CB_CR_GAIN_CR(tv_mode->color_gains->cr));
+
+ regmap_write(tv->regs, SUN4I_TVE_SYNC_VBI_REG,
+ SUN4I_TVE_SYNC_VBI_SYNC(0x10) |
+ SUN4I_TVE_SYNC_VBI_VBLANK(tv_mode->vblank_level));
+
+ regmap_write(tv->regs, SUN4I_TVE_ACTIVE_LINE_REG,
+ SUN4I_TVE_ACTIVE_LINE(1440));
+
+ /* Set composite chroma gain to 50 % */
+ regmap_write(tv->regs, SUN4I_TVE_CHROMA_REG,
+ SUN4I_TVE_CHROMA_COMP_GAIN_50);
+
+ regmap_write(tv->regs, SUN4I_TVE_12C_REG,
+ SUN4I_TVE_12C_COMP_YUV_EN |
+ SUN4I_TVE_12C_NOTCH_WIDTH_WIDE);
+
+ regmap_write(tv->regs, SUN4I_TVE_RESYNC_REG,
+ SUN4I_TVE_RESYNC_PIXEL(tv_mode->resync_params->pixel) |
+ SUN4I_TVE_RESYNC_LINE(tv_mode->resync_params->line) |
+ (tv_mode->resync_params->field ?
+ SUN4I_TVE_RESYNC_FIELD : 0));
+
+ regmap_write(tv->regs, SUN4I_TVE_SLAVE_REG, 0);
+
+ clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
+}
+
+static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
+ .atomic_check = sun4i_tv_atomic_check,
+ .disable = sun4i_tv_disable,
+ .enable = sun4i_tv_enable,
+ .mode_set = sun4i_tv_mode_set,
+};
+
+static void sun4i_tv_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+}
+
+static struct drm_encoder_funcs sun4i_tv_funcs = {
+ .destroy = sun4i_tv_destroy,
+};
+
+static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ struct drm_display_mode *mode = drm_mode_create(connector->dev);
+ struct tv_mode *tv_mode = &tv_modes[i];
+
+ strcpy(mode->name, tv_mode->name);
+
+ sun4i_tv_mode_to_drm_mode(tv_mode, mode);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+
+static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO */
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+sun4i_tv_comp_best_encoder(struct drm_connector *connector)
+{
+ struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector);
+
+ return &tv->encoder;
+}
+
+static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
+ .get_modes = sun4i_tv_comp_get_modes,
+ .mode_valid = sun4i_tv_comp_mode_valid,
+ .best_encoder = sun4i_tv_comp_best_encoder,
+};
+
+static enum drm_connector_status
+sun4i_tv_comp_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void
+sun4i_tv_comp_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_cleanup(connector);
+}
+
+static struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = sun4i_tv_comp_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = sun4i_tv_comp_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct regmap_config sun4i_tv_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SUN4I_TVE_WSS_DATA2_REG,
+ .name = "tv-encoder",
+};
+
+static int sun4i_tv_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct sun4i_drv *drv = drm->dev_private;
+ struct sun4i_tv *tv;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ tv = devm_kzalloc(dev, sizeof(*tv), GFP_KERNEL);
+ if (!tv)
+ return -ENOMEM;
+ tv->drv = drv;
+ dev_set_drvdata(dev, tv);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "Couldn't map the TV encoder registers\n");
+ return PTR_ERR(regs);
+ }
+
+ tv->regs = devm_regmap_init_mmio(dev, regs,
+ &sun4i_tv_regmap_config);
+ if (IS_ERR(tv->regs)) {
+ dev_err(dev, "Couldn't create the TV encoder regmap\n");
+ return PTR_ERR(tv->regs);
+ }
+
+ tv->reset = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(tv->reset)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(tv->reset);
+ }
+
+ ret = reset_control_deassert(tv->reset);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert our reset line\n");
+ return ret;
+ }
+
+ tv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(tv->clk)) {
+ dev_err(dev, "Couldn't get the TV encoder clock\n");
+ ret = PTR_ERR(tv->clk);
+ goto err_assert_reset;
+ }
+ clk_prepare_enable(tv->clk);
+
+ drm_encoder_helper_add(&tv->encoder,
+ &sun4i_tv_helper_funcs);
+ ret = drm_encoder_init(drm,
+ &tv->encoder,
+ &sun4i_tv_funcs,
+ DRM_MODE_ENCODER_TVDAC,
+ NULL);
+ if (ret) {
+ dev_err(dev, "Couldn't initialise the TV encoder\n");
+ goto err_disable_clk;
+ }
+
+ tv->encoder.possible_crtcs = BIT(0);
+
+ drm_connector_helper_add(&tv->connector,
+ &sun4i_tv_comp_connector_helper_funcs);
+ ret = drm_connector_init(drm, &tv->connector,
+ &sun4i_tv_comp_connector_funcs,
+ DRM_MODE_CONNECTOR_Composite);
+ if (ret) {
+ dev_err(dev,
+ "Couldn't initialise the Composite connector\n");
+ goto err_cleanup_connector;
+ }
+ tv->connector.interlace_allowed = true;
+
+ drm_mode_connector_attach_encoder(&tv->connector, &tv->encoder);
+
+ return 0;
+
+err_cleanup_connector:
+ drm_encoder_cleanup(&tv->encoder);
+err_disable_clk:
+ clk_disable_unprepare(tv->clk);
+err_assert_reset:
+ reset_control_assert(tv->reset);
+ return ret;
+}
+
+static void sun4i_tv_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sun4i_tv *tv = dev_get_drvdata(dev);
+
+ drm_connector_cleanup(&tv->connector);
+ drm_encoder_cleanup(&tv->encoder);
+ clk_disable_unprepare(tv->clk);
+}
+
+static struct component_ops sun4i_tv_ops = {
+ .bind = sun4i_tv_bind,
+ .unbind = sun4i_tv_unbind,
+};
+
+static int sun4i_tv_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &sun4i_tv_ops);
+}
+
+static int sun4i_tv_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sun4i_tv_ops);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_tv_of_table[] = {
+ { .compatible = "allwinner,sun4i-a10-tv-encoder" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun4i_tv_of_table);
+
+static struct platform_driver sun4i_tv_platform_driver = {
+ .probe = sun4i_tv_probe,
+ .remove = sun4i_tv_remove,
+ .driver = {
+ .name = "sun4i-tve",
+ .of_match_table = sun4i_tv_of_table,
+ },
+};
+module_platform_driver(sun4i_tv_platform_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A10 TV Encoder Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index fb2b4b027..39940f5b7 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -434,7 +434,7 @@ static void tegra_plane_reset(struct drm_plane *plane)
struct tegra_plane_state *state;
if (plane->state)
- __drm_atomic_helper_plane_destroy_state(plane, plane->state);
+ __drm_atomic_helper_plane_destroy_state(plane->state);
kfree(plane->state);
plane->state = NULL;
@@ -466,7 +466,7 @@ static struct drm_plane_state *tegra_plane_atomic_duplicate_state(struct drm_pla
static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- __drm_atomic_helper_plane_destroy_state(plane, state);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(state);
}
@@ -998,7 +998,7 @@ static void tegra_crtc_reset(struct drm_crtc *crtc)
struct tegra_dc_state *state;
if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc, crtc->state);
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
kfree(crtc->state);
crtc->state = NULL;
@@ -1034,7 +1034,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
static void tegra_crtc_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- __drm_atomic_helper_crtc_destroy_state(crtc, state);
+ __drm_atomic_helper_crtc_destroy_state(state);
kfree(state);
}
@@ -1722,7 +1722,6 @@ static int tegra_dc_init(struct host1x_client *client)
if (err < 0)
goto cleanup;
- drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
/*
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 8e6b18caa..b59c3bf0d 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -74,7 +74,7 @@ static void tegra_atomic_work(struct work_struct *work)
}
static int tegra_atomic_commit(struct drm_device *drm,
- struct drm_atomic_state *state, bool async)
+ struct drm_atomic_state *state, bool nonblock)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
@@ -83,7 +83,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
if (err)
return err;
- /* serialize outstanding asynchronous commits */
+ /* serialize outstanding nonblocking commits */
mutex_lock(&tegra->commit.lock);
flush_work(&tegra->commit.work);
@@ -95,7 +95,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(drm, state);
- if (async)
+ if (nonblock)
tegra_atomic_schedule(tegra, state);
else
tegra_atomic_complete(tegra, state);
@@ -180,7 +180,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
/* syncpoints are used for full 32-bit hardware VBLANK counters */
drm->max_vblank_count = 0xffffffff;
- drm->vblank_disable_allowed = true;
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
@@ -268,12 +267,12 @@ static void tegra_drm_lastclose(struct drm_device *drm)
}
static struct host1x_bo *
-host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
+host1x_bo_lookup(struct drm_file *file, u32 handle)
{
struct drm_gem_object *gem;
struct tegra_bo *bo;
- gem = drm_gem_object_lookup(drm, file, handle);
+ gem = drm_gem_object_lookup(file, handle);
if (!gem)
return NULL;
@@ -311,11 +310,11 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
if (err < 0)
return err;
- dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf);
+ dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
- dest->target.bo = host1x_bo_lookup(drm, file, target);
+ dest->target.bo = host1x_bo_lookup(file, target);
if (!dest->target.bo)
return -ENOENT;
@@ -363,7 +362,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
goto fail;
}
- bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+ bo = host1x_bo_lookup(file, cmdbuf.handle);
if (!bo) {
err = -ENOENT;
goto fail;
@@ -463,7 +462,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
struct drm_gem_object *gem;
struct tegra_bo *bo;
- gem = drm_gem_object_lookup(drm, file, args->handle);
+ gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -EINVAL;
@@ -672,7 +671,7 @@ static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
return -EINVAL;
}
- gem = drm_gem_object_lookup(drm, file, args->handle);
+ gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
@@ -694,7 +693,7 @@ static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
struct tegra_bo *bo;
int err = 0;
- gem = drm_gem_object_lookup(drm, file, args->handle);
+ gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
@@ -736,7 +735,7 @@ static int tegra_gem_set_flags(struct drm_device *drm, void *data,
if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
return -EINVAL;
- gem = drm_gem_object_lookup(drm, file, args->handle);
+ gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
@@ -758,7 +757,7 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
struct drm_gem_object *gem;
struct tegra_bo *bo;
- gem = drm_gem_object_lookup(drm, file, args->handle);
+ gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
@@ -878,7 +877,7 @@ static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
fb->base.id, fb->width, fb->height, fb->depth,
fb->bits_per_pixel,
- atomic_read(&fb->refcount.refcount));
+ drm_framebuffer_read_refcount(fb));
}
mutex_unlock(&drm->mode_config.fb_lock);
@@ -932,7 +931,7 @@ static struct drm_driver tegra_drm_driver = {
.debugfs_cleanup = tegra_debugfs_cleanup,
#endif
- .gem_free_object = tegra_bo_free_object,
+ .gem_free_object_unlocked = tegra_bo_free_object,
.gem_vm_ops = &tegra_bo_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 8a10f5b7d..f52d6cb24 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -121,7 +121,7 @@ struct tegra_dc {
spinlock_t lock;
struct drm_crtc base;
- int powergate;
+ unsigned int powergate;
int pipe;
struct clk *clk;
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 44e102799..d1239ebc1 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -745,13 +745,17 @@ static void tegra_dsi_soft_reset(struct tegra_dsi *dsi)
static void tegra_dsi_connector_reset(struct drm_connector *connector)
{
- struct tegra_dsi_state *state =
- kzalloc(sizeof(*state), GFP_KERNEL);
+ struct tegra_dsi_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state) {
+ if (!state)
+ return;
+
+ if (connector->state) {
+ __drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state);
- __drm_atomic_helper_connector_reset(connector, &state->base);
}
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
}
static struct drm_connector_state *
@@ -764,6 +768,9 @@ tegra_dsi_connector_duplicate_state(struct drm_connector *connector)
if (!copy)
return NULL;
+ __drm_atomic_helper_connector_duplicate_state(connector,
+ &copy->base);
+
return &copy->base;
}
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index ca84de9cc..1b12aa7a7 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -149,7 +149,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
unsigned int height = cmd->height / (i ? vsub : 1);
unsigned int size, bpp;
- gem = drm_gem_object_lookup(drm, file, cmd->handles[i]);
+ gem = drm_gem_object_lookup(file, cmd->handles[i]);
if (!gem) {
err = -ENXIO;
goto unreference;
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 3b0d8c392..aa60d9909 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -401,7 +401,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
struct drm_gem_object *gem;
struct tegra_bo *bo;
- gem = drm_gem_object_lookup(drm, file, handle);
+ gem = drm_gem_object_lookup(file, handle);
if (!gem) {
dev_err(drm->dev, "failed to lookup GEM object\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 051e5e1b7..79027b1c6 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -707,7 +707,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
event = tilcdc_crtc->event;
tilcdc_crtc->event = NULL;
if (event)
- drm_send_vblank_event(dev, 0, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
index 106679bca..f9c79dabc 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
@@ -157,7 +157,7 @@ struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
if (!overlay_data || kfree_table_add(kft, overlay_data))
return NULL;
- of_fdt_unflatten_tree(overlay_data, &overlay);
+ of_fdt_unflatten_tree(overlay_data, NULL, &overlay);
if (!overlay) {
pr_warn("%s: Unfattening overlay tree failed\n", __func__);
return NULL;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 7716f42f8..6b8c5b3bf 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -342,7 +342,7 @@ static int tfp410_probe(struct platform_device *pdev)
tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
0, NULL);
- if (IS_ERR_VALUE(tfp410_mod->gpio)) {
+ if (tfp410_mod->gpio < 0) {
dev_warn(&pdev->dev, "No power down GPIO\n");
} else {
ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b433b9f04..f92325800 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -2,9 +2,10 @@
# Makefile for the drm device driver. This driver provides support for the
ccflags-y := -Iinclude/drm
-ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
+ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
ttm_bo_manager.o ttm_page_alloc_dma.o
+ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 764be3639..028ab6007 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -34,7 +34,6 @@
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#ifdef TTM_HAS_AGP
#include <drm/ttm/ttm_placement.h>
#include <linux/agp_backend.h>
#include <linux/module.h>
@@ -148,5 +147,3 @@ void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm);
}
EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
-
-#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 3e7c9ac50..a71cf98c6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -164,7 +164,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
lockdep_assert_held(&bo->resv->lock.base);
@@ -172,12 +171,11 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
BUG_ON(!list_empty(&bo->lru));
- man = &bdev->man[bo->mem.mem_type];
- list_add_tail(&bo->lru, &man->lru);
+ list_add(&bo->lru, bdev->driver->lru_tail(bo));
kref_get(&bo->list_kref);
if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- list_add_tail(&bo->swap, &bo->glob->swap_lru);
+ list_add(&bo->swap, bdev->driver->swap_lru_tail(bo));
kref_get(&bo->list_kref);
}
}
@@ -186,8 +184,12 @@ EXPORT_SYMBOL(ttm_bo_add_to_lru);
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
+ struct ttm_bo_device *bdev = bo->bdev;
int put_count = 0;
+ if (bdev->driver->lru_removal)
+ bdev->driver->lru_removal(bo);
+
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
++put_count;
@@ -197,11 +199,6 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
++put_count;
}
- /*
- * TODO: Add a driver hook to delete from
- * driver-specific LRU's here.
- */
-
return put_count;
}
@@ -230,16 +227,32 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
+ struct ttm_bo_device *bdev = bo->bdev;
int put_count = 0;
lockdep_assert_held(&bo->resv->lock.base);
+ if (bdev->driver->lru_removal)
+ bdev->driver->lru_removal(bo);
+
put_count = ttm_bo_del_from_lru(bo);
ttm_bo_list_ref_sub(bo, put_count, true);
ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
+struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
+{
+ return bo->bdev->man[bo->mem.mem_type].lru.prev;
+}
+EXPORT_SYMBOL(ttm_bo_default_lru_tail);
+
+struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
+{
+ return bo->glob->swap_lru.prev;
+}
+EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
+
/*
* Call bo->mutex locked.
*/
@@ -443,10 +456,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret) {
- if (!ttm_bo_wait(bo, false, false, true)) {
+ if (!ttm_bo_wait(bo, false, true)) {
put_count = ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
@@ -499,7 +512,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
int put_count;
int ret;
- ret = ttm_bo_wait(bo, false, false, true);
+ ret = ttm_bo_wait(bo, false, true);
if (ret && !no_wait_gpu) {
long lret;
@@ -517,7 +530,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return -EBUSY;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = __ttm_bo_reserve(bo, false, true, NULL);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -536,7 +549,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
* remove sync_obj with ttm_bo_wait, the wait should be
* finished, and no new wait object should have been added.
*/
- ret = ttm_bo_wait(bo, false, false, true);
+ ret = ttm_bo_wait(bo, false, true);
WARN_ON(ret);
}
@@ -586,11 +599,10 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = __ttm_bo_reserve(entry, false, true, false, NULL);
+ ret = __ttm_bo_reserve(entry, false, true, NULL);
if (remove_all && ret) {
spin_unlock(&glob->lru_lock);
- ret = __ttm_bo_reserve(entry, false, false,
- false, NULL);
+ ret = __ttm_bo_reserve(entry, false, false, NULL);
spin_lock(&glob->lru_lock);
}
@@ -676,7 +688,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
- ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
@@ -732,7 +744,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret) {
if (place && (place->fpfn || place->lpfn)) {
/* Don't evict this BO if it's outside of the
@@ -989,13 +1001,19 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
lockdep_assert_held(&bo->resv->lock.base);
/*
- * FIXME: It's possible to pipeline buffer moves.
- * Have the driver move function wait for idle when necessary,
- * instead of doing it here.
+ * Don't wait for the BO on initial allocation. This is important when
+ * the BO has an imported reservation object.
*/
- ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
- if (ret)
- return ret;
+ if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
+ /*
+ * FIXME: It's possible to pipeline buffer moves.
+ * Have the driver move function wait for idle when necessary,
+ * instead of doing it here.
+ */
+ ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ if (ret)
+ return ret;
+ }
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
mem.page_alignment = bo->mem.page_alignment;
@@ -1207,7 +1225,7 @@ size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
size_t size = 0;
size += ttm_round_pot(struct_size);
- size += PAGE_ALIGN(npages * sizeof(void *));
+ size += ttm_round_pot(npages * sizeof(void *));
size += ttm_round_pot(sizeof(struct ttm_tt));
return size;
}
@@ -1221,8 +1239,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
size_t size = 0;
size += ttm_round_pot(struct_size);
- size += PAGE_ALIGN(npages * sizeof(void *));
- size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+ size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
size += ttm_round_pot(sizeof(struct ttm_dma_tt));
return size;
}
@@ -1501,7 +1518,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->dev_mapping = mapping;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
- bdev->val_seq = 0;
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
@@ -1555,7 +1571,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
int ttm_bo_wait(struct ttm_buffer_object *bo,
- bool lazy, bool interruptible, bool no_wait)
+ bool interruptible, bool no_wait)
{
struct reservation_object_list *fobj;
struct reservation_object *resv;
@@ -1610,10 +1626,10 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
- ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
+ ret = ttm_bo_reserve(bo, true, no_wait, NULL);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_wait(bo, false, true, no_wait);
+ ret = ttm_bo_wait(bo, true, no_wait);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
@@ -1643,7 +1659,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = __ttm_bo_reserve(bo, false, true, NULL);
if (!ret)
break;
}
@@ -1670,7 +1686,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* Wait for GPU, then move to system cached.
*/
- ret = ttm_bo_wait(bo, false, false, false);
+ ret = ttm_bo_wait(bo, false, false);
if (unlikely(ret != 0))
goto out;
@@ -1742,7 +1758,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
return -ERESTARTSYS;
if (!ww_mutex_is_locked(&bo->resv->lock))
goto out_unlock;
- ret = __ttm_bo_reserve(bo, true, false, false, NULL);
+ ret = __ttm_bo_reserve(bo, true, false, NULL);
if (unlikely(ret != 0))
goto out_unlock;
__ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ac6fe40b9..d98315597 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -645,7 +645,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
reservation_object_add_excl_fence(bo->resv, fence);
if (evict) {
- ret = ttm_bo_wait(bo, false, false, false);
+ ret = ttm_bo_wait(bo, false, false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 06d26dc43..3216878bc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -54,7 +54,7 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
/*
* Quick non-stalling check for idle.
*/
- ret = ttm_bo_wait(bo, false, false, true);
+ ret = ttm_bo_wait(bo, false, true);
if (likely(ret == 0))
goto out_unlock;
@@ -68,14 +68,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
up_read(&vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait(bo, false, true, false);
+ (void) ttm_bo_wait(bo, true, false);
goto out_unlock;
}
/*
* Ordinary wait.
*/
- ret = ttm_bo_wait(bo, false, true, false);
+ ret = ttm_bo_wait(bo, true, false);
if (unlikely(ret != 0))
ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE;
@@ -108,7 +108,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
- ret = ttm_bo_reserve(bo, true, true, false, NULL);
+ ret = ttm_bo_reserve(bo, true, true, NULL);
if (unlikely(ret != 0)) {
if (ret != -EBUSY)
return VM_FAULT_NOPAGE;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 3820ae97a..a80717b35 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -112,8 +112,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
- ticket);
+ ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
__ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 025c42905..a37de5db5 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -48,7 +48,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
#endif
@@ -219,7 +219,7 @@ static struct ttm_pool_manager *_manager;
#ifndef CONFIG_X86
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
int i;
for (i = 0; i < addrinarray; i++)
@@ -230,7 +230,7 @@ static int set_pages_array_wb(struct page **pages, int addrinarray)
static int set_pages_array_wc(struct page **pages, int addrinarray)
{
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
int i;
for (i = 0; i < addrinarray; i++)
@@ -241,7 +241,7 @@ static int set_pages_array_wc(struct page **pages, int addrinarray)
static int set_pages_array_uc(struct page **pages, int addrinarray)
{
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
int i;
for (i = 0; i < addrinarray; i++)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 624d941aa..bef9f6feb 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -50,7 +50,7 @@
#include <linux/kthread.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
#endif
@@ -271,7 +271,7 @@ static struct kobj_type ttm_pool_kobj_type = {
#ifndef CONFIG_X86
static int set_pages_array_wb(struct page **pages, int addrinarray)
{
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
int i;
for (i = 0; i < addrinarray; i++)
@@ -282,7 +282,7 @@ static int set_pages_array_wb(struct page **pages, int addrinarray)
static int set_pages_array_wc(struct page **pages, int addrinarray)
{
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
int i;
for (i = 0; i < addrinarray; i++)
@@ -293,7 +293,7 @@ static int set_pages_array_wc(struct page **pages, int addrinarray)
static int set_pages_array_uc(struct page **pages, int addrinarray)
{
-#ifdef TTM_HAS_AGP
+#if IS_ENABLED(CONFIG_AGP)
int i;
for (i = 0; i < addrinarray; i++)
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 772ec9e1f..c20408940 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -94,7 +94,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
struct drm_device *dev = usb_get_intfdata(interface);
drm_kms_helper_poll_disable(dev);
- drm_connector_unplug_all(dev);
+ drm_connector_unregister_all(dev);
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_unplug_dev(dev);
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 4a064efce..0b03d34ff 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -81,8 +81,6 @@ struct udl_framebuffer {
struct drm_framebuffer base;
struct udl_gem_object *obj;
bool active_16; /* active on the 16-bit channel */
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
};
#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index fd1eb9d03..d5df555ae 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -77,68 +77,6 @@ static uint16_t rgb16(uint32_t col)
}
#endif
-/*
- * NOTE: fb_defio.c is holding info->fbdefio.mutex
- * Touching ANY framebuffer memory that triggers a page fault
- * in fb_defio will cause a deadlock, when it also tries to
- * grab the same mutex.
- */
-static void udlfb_dpy_deferred_io(struct fb_info *info,
- struct list_head *pagelist)
-{
- struct page *cur;
- struct fb_deferred_io *fbdefio = info->fbdefio;
- struct udl_fbdev *ufbdev = info->par;
- struct drm_device *dev = ufbdev->ufb.base.dev;
- struct udl_device *udl = dev->dev_private;
- struct urb *urb;
- char *cmd;
- cycles_t start_cycles, end_cycles;
- int bytes_sent = 0;
- int bytes_identical = 0;
- int bytes_rendered = 0;
-
- if (!fb_defio)
- return;
-
- start_cycles = get_cycles();
-
- urb = udl_get_urb(dev);
- if (!urb)
- return;
-
- cmd = urb->transfer_buffer;
-
- /* walk the written page list and render each to device */
- list_for_each_entry(cur, &fbdefio->pagelist, lru) {
-
- if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
- &urb, (char *) info->fix.smem_start,
- &cmd, cur->index << PAGE_SHIFT,
- cur->index << PAGE_SHIFT,
- PAGE_SIZE, &bytes_identical, &bytes_sent))
- goto error;
- bytes_rendered += PAGE_SIZE;
- }
-
- if (cmd > (char *) urb->transfer_buffer) {
- /* Send partial buffer remaining before exiting */
- int len = cmd - (char *) urb->transfer_buffer;
- udl_submit_urb(dev, urb, len);
- bytes_sent += len;
- } else
- udl_urb_completion(urb);
-
-error:
- atomic_add(bytes_sent, &udl->bytes_sent);
- atomic_add(bytes_identical, &udl->bytes_identical);
- atomic_add(bytes_rendered, &udl->bytes_rendered);
- end_cycles = get_cycles();
- atomic_add(((unsigned int) ((end_cycles - start_cycles)
- >> 10)), /* Kcycles */
- &udl->cpu_kcycles_used);
-}
-
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height)
{
@@ -152,9 +90,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
struct urb *urb;
int aligned_x;
int bpp = (fb->base.bits_per_pixel / 8);
- int x2, y2;
- bool store_for_later = false;
- unsigned long flags;
if (!fb->active_16)
return 0;
@@ -180,38 +115,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
(y + height > fb->base.height))
return -EINVAL;
- /* if we are in atomic just store the info
- can't test inside spin lock */
- if (in_atomic())
- store_for_later = true;
-
- x2 = x + width - 1;
- y2 = y + height - 1;
-
- spin_lock_irqsave(&fb->dirty_lock, flags);
-
- if (fb->y1 < y)
- y = fb->y1;
- if (fb->y2 > y2)
- y2 = fb->y2;
- if (fb->x1 < x)
- x = fb->x1;
- if (fb->x2 > x2)
- x2 = fb->x2;
-
- if (store_for_later) {
- fb->x1 = x;
- fb->x2 = x2;
- fb->y1 = y;
- fb->y2 = y2;
- spin_unlock_irqrestore(&fb->dirty_lock, flags);
- return 0;
- }
-
- fb->x1 = fb->y1 = INT_MAX;
- fb->x2 = fb->y2 = 0;
-
- spin_unlock_irqrestore(&fb->dirty_lock, flags);
start_cycles = get_cycles();
urb = udl_get_urb(dev);
@@ -219,14 +122,14 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
return 0;
cmd = urb->transfer_buffer;
- for (i = y; i <= y2 ; i++) {
+ for (i = y; i < height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
const int byte_offset = line_offset + (x * bpp);
const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
if (udl_render_hline(dev, bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
- (x2 - x + 1) * bpp,
+ width * bpp,
&bytes_identical, &bytes_sent))
goto error;
}
@@ -283,36 +186,6 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return 0;
}
-static void udl_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
- struct udl_fbdev *ufbdev = info->par;
-
- drm_fb_helper_sys_fillrect(info, rect);
-
- udl_handle_damage(&ufbdev->ufb, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void udl_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
-{
- struct udl_fbdev *ufbdev = info->par;
-
- drm_fb_helper_sys_copyarea(info, region);
-
- udl_handle_damage(&ufbdev->ufb, region->dx, region->dy, region->width,
- region->height);
-}
-
-static void udl_fb_imageblit(struct fb_info *info, const struct fb_image *image)
-{
- struct udl_fbdev *ufbdev = info->par;
-
- drm_fb_helper_sys_imageblit(info, image);
-
- udl_handle_damage(&ufbdev->ufb, image->dx, image->dy, image->width,
- image->height);
-}
-
/*
* It's common for several clients to have framebuffer open simultaneously.
* e.g. both fbcon and X. Makes things interesting.
@@ -339,7 +212,7 @@ static int udl_fb_open(struct fb_info *info, int user)
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
- fbdefio->deferred_io = udlfb_dpy_deferred_io;
+ fbdefio->deferred_io = drm_fb_helper_deferred_io;
}
info->fbdefio = fbdefio;
@@ -379,9 +252,9 @@ static struct fb_ops udlfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = udl_fb_fillrect,
- .fb_copyarea = udl_fb_copyarea,
- .fb_imageblit = udl_fb_imageblit,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -458,7 +331,6 @@ udl_framebuffer_init(struct drm_device *dev,
{
int ret;
- spin_lock_init(&ufb->dirty_lock);
ufb->obj = obj;
drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
@@ -628,7 +500,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
int ret;
uint32_t size;
- obj = drm_gem_object_lookup(dev, file, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index d7528e0d8..818e70712 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -217,7 +217,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
int ret = 0;
mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(dev, file, handle);
+ obj = drm_gem_object_lookup(file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 584810474..e53df59cb 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -5,6 +5,7 @@ config DRM_VC4
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
+ select DRM_PANEL
help
Choose this option if you have a system that has a Broadcom
VC4 GPU, such as the Raspberry Pi or other BCM2708/BCM2835.
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 4c6a99f03..fb77db755 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -7,6 +7,7 @@ vc4-y := \
vc4_bo.o \
vc4_crtc.o \
vc4_drv.o \
+ vc4_dpi.o \
vc4_kms.o \
vc4_gem.o \
vc4_hdmi.o \
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 9807bc9d2..e5a9d3aaf 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -457,7 +457,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_vc4_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
- gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 355ee4b09..0f18b76c7 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -49,6 +49,10 @@ struct vc4_crtc {
/* Which HVS channel we're using for our CRTC. */
int channel;
+ u8 lut_r[256];
+ u8 lut_g[256];
+ u8 lut_b[256];
+
struct drm_pending_vblank_event *event;
};
@@ -147,6 +151,46 @@ static void vc4_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
}
+static void
+vc4_crtc_lut_load(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ u32 i;
+
+ /* The LUT memory is laid out with each HVS channel in order,
+ * each of which takes 256 writes for R, 256 for G, then 256
+ * for B.
+ */
+ HVS_WRITE(SCALER_GAMADDR,
+ SCALER_GAMADDR_AUTOINC |
+ (vc4_crtc->channel * 3 * crtc->gamma_size));
+
+ for (i = 0; i < crtc->gamma_size; i++)
+ HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]);
+ for (i = 0; i < crtc->gamma_size; i++)
+ HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
+ for (i = 0; i < crtc->gamma_size; i++)
+ HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
+}
+
+static void
+vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+ uint32_t start, uint32_t size)
+{
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ u32 i;
+
+ for (i = start; i < start + size; i++) {
+ vc4_crtc->lut_r[i] = r[i] >> 8;
+ vc4_crtc->lut_g[i] = g[i] >> 8;
+ vc4_crtc->lut_b[i] = b[i] >> 8;
+ }
+
+ vc4_crtc_lut_load(crtc);
+}
+
static u32 vc4_get_fifo_full_level(u32 format)
{
static const u32 fifo_len_bytes = 64;
@@ -260,8 +304,14 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
SCALER_DISPBKGND_AUTOHS |
+ SCALER_DISPBKGND_GAMMA |
(interlace ? SCALER_DISPBKGND_INTERLACE : 0));
+ /* Reload the LUT, since the SRAMs would have been disabled if
+ * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
+ */
+ vc4_crtc_lut_load(crtc);
+
if (debug_dump_regs) {
DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
vc4_crtc_dump_regs(vc4_crtc);
@@ -406,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
- HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
- vc4_state->mm.start);
-
- if (debug_dump_regs) {
- DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
- vc4_hvs_dump_state(dev);
- }
-
if (crtc->state->event) {
unsigned long flags;
@@ -423,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
vc4_crtc->event = crtc->state->event;
- spin_unlock_irqrestore(&dev->event_lock, flags);
crtc->state->event = NULL;
+
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ } else {
+ HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
+ vc4_state->mm.start);
+ }
+
+ if (debug_dump_regs) {
+ DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+ vc4_hvs_dump_state(dev);
}
}
@@ -450,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
{
struct drm_crtc *crtc = &vc4_crtc->base;
struct drm_device *dev = crtc->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
+ u32 chan = vc4_crtc->channel;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (vc4_crtc->event) {
+ if (vc4_crtc->event &&
+ (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) {
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -506,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ drm_crtc_vblank_put(crtc);
drm_framebuffer_unreference(flip_state->fb);
kfree(flip_state);
@@ -548,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
return ret;
}
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
/* Immediately update the plane's legacy fb pointer, so that later
* modeset prep sees the state that will be present when the semaphore
* is released.
@@ -600,7 +662,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
}
- __drm_atomic_helper_crtc_destroy_state(crtc, state);
+ __drm_atomic_helper_crtc_destroy_state(state);
}
static const struct drm_crtc_funcs vc4_crtc_funcs = {
@@ -613,6 +675,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = vc4_crtc_duplicate_state,
.atomic_destroy_state = vc4_crtc_destroy_state,
+ .gamma_set = vc4_crtc_gamma_set,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -711,6 +774,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
primary_plane->crtc = crtc;
vc4->crtc[drm_crtc_index(crtc)] = vc4_crtc;
vc4_crtc->channel = vc4_crtc->data->hvs_channel;
+ drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
/* Set up some arbitrary number of planes. We're not limited
* by a set number of physical registers, just the space in
@@ -751,6 +815,12 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
vc4_set_crtc_possible_masks(drm, crtc);
+ for (i = 0; i < crtc->gamma_size; i++) {
+ vc4_crtc->lut_r[i] = i;
+ vc4_crtc->lut_g[i] = i;
+ vc4_crtc->lut_b[i] = i;
+ }
+
platform_set_drvdata(pdev, vc4_crtc);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index d76ad10b0..245115d49 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -17,6 +17,7 @@
static const struct drm_info_list vc4_debugfs_list[] = {
{"bo_stats", vc4_bo_stats_debugfs, 0},
+ {"dpi_regs", vc4_dpi_debugfs_regs, 0},
{"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
{"hvs_regs", vc4_hvs_debugfs_regs, 0},
{"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
new file mode 100644
index 000000000..9817dbfa4
--- /dev/null
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -0,0 +1,520 @@
+/*
+ * Copyright (C) 2016 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * DOC: VC4 DPI module
+ *
+ * The VC4 DPI hardware supports MIPI DPI type 4 and Nokia ViSSI
+ * signals, which are routed out to GPIO0-27 with the ALT2 function.
+ */
+
+#include "drm_atomic_helper.h"
+#include "drm_crtc_helper.h"
+#include "drm_edid.h"
+#include "drm_panel.h"
+#include "linux/clk.h"
+#include "linux/component.h"
+#include "linux/of_graph.h"
+#include "linux/of_platform.h"
+#include "vc4_drv.h"
+#include "vc4_regs.h"
+
+#define DPI_C 0x00
+# define DPI_OUTPUT_ENABLE_MODE BIT(16)
+
+/* The order field takes the incoming 24 bit RGB from the pixel valve
+ * and shuffles the 3 channels.
+ */
+# define DPI_ORDER_MASK VC4_MASK(15, 14)
+# define DPI_ORDER_SHIFT 14
+# define DPI_ORDER_RGB 0
+# define DPI_ORDER_BGR 1
+# define DPI_ORDER_GRB 2
+# define DPI_ORDER_BRG 3
+
+/* The format field takes the ORDER-shuffled pixel valve data and
+ * formats it onto the output lines.
+ */
+# define DPI_FORMAT_MASK VC4_MASK(13, 11)
+# define DPI_FORMAT_SHIFT 11
+/* This define is named in the hardware, but actually just outputs 0. */
+# define DPI_FORMAT_9BIT_666_RGB 0
+/* Outputs 00000000rrrrrggggggbbbbb */
+# define DPI_FORMAT_16BIT_565_RGB_1 1
+/* Outputs 000rrrrr00gggggg000bbbbb */
+# define DPI_FORMAT_16BIT_565_RGB_2 2
+/* Outputs 00rrrrr000gggggg00bbbbb0 */
+# define DPI_FORMAT_16BIT_565_RGB_3 3
+/* Outputs 000000rrrrrrggggggbbbbbb */
+# define DPI_FORMAT_18BIT_666_RGB_1 4
+/* Outputs 00rrrrrr00gggggg00bbbbbb */
+# define DPI_FORMAT_18BIT_666_RGB_2 5
+/* Outputs rrrrrrrrggggggggbbbbbbbb */
+# define DPI_FORMAT_24BIT_888_RGB 6
+
+/* Reverses the polarity of the corresponding signal */
+# define DPI_PIXEL_CLK_INVERT BIT(10)
+# define DPI_HSYNC_INVERT BIT(9)
+# define DPI_VSYNC_INVERT BIT(8)
+# define DPI_OUTPUT_ENABLE_INVERT BIT(7)
+
+/* Outputs the signal the falling clock edge instead of rising. */
+# define DPI_HSYNC_NEGATE BIT(6)
+# define DPI_VSYNC_NEGATE BIT(5)
+# define DPI_OUTPUT_ENABLE_NEGATE BIT(4)
+
+/* Disables the signal */
+# define DPI_HSYNC_DISABLE BIT(3)
+# define DPI_VSYNC_DISABLE BIT(2)
+# define DPI_OUTPUT_ENABLE_DISABLE BIT(1)
+
+/* Power gate to the device, full reset at 0 -> 1 transition */
+# define DPI_ENABLE BIT(0)
+
+/* All other registers besides DPI_C return the ID */
+#define DPI_ID 0x04
+# define DPI_ID_VALUE 0x00647069
+
+/* General DPI hardware state. */
+struct vc4_dpi {
+ struct platform_device *pdev;
+
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct drm_panel *panel;
+
+ void __iomem *regs;
+
+ struct clk *pixel_clock;
+ struct clk *core_clock;
+};
+
+#define DPI_READ(offset) readl(dpi->regs + (offset))
+#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
+
+/* VC4 DPI encoder KMS struct */
+struct vc4_dpi_encoder {
+ struct vc4_encoder base;
+ struct vc4_dpi *dpi;
+};
+
+static inline struct vc4_dpi_encoder *
+to_vc4_dpi_encoder(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct vc4_dpi_encoder, base.base);
+}
+
+/* VC4 DPI connector KMS struct */
+struct vc4_dpi_connector {
+ struct drm_connector base;
+ struct vc4_dpi *dpi;
+
+ /* Since the connector is attached to just the one encoder,
+ * this is the reference to it so we can do the best_encoder()
+ * hook.
+ */
+ struct drm_encoder *encoder;
+};
+
+static inline struct vc4_dpi_connector *
+to_vc4_dpi_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct vc4_dpi_connector, base);
+}
+
+#define DPI_REG(reg) { reg, #reg }
+static const struct {
+ u32 reg;
+ const char *name;
+} dpi_regs[] = {
+ DPI_REG(DPI_C),
+ DPI_REG(DPI_ID),
+};
+
+static void vc4_dpi_dump_regs(struct vc4_dpi *dpi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
+ DRM_INFO("0x%04x (%s): 0x%08x\n",
+ dpi_regs[i].reg, dpi_regs[i].name,
+ DPI_READ(dpi_regs[i].reg));
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_dpi *dpi = vc4->dpi;
+ int i;
+
+ if (!dpi)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ dpi_regs[i].name, dpi_regs[i].reg,
+ DPI_READ(dpi_regs[i].reg));
+ }
+
+ return 0;
+}
+#endif
+
+static enum drm_connector_status
+vc4_dpi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct vc4_dpi_connector *vc4_connector =
+ to_vc4_dpi_connector(connector);
+ struct vc4_dpi *dpi = vc4_connector->dpi;
+
+ if (dpi->panel)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void vc4_dpi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
+{
+ struct vc4_dpi_connector *vc4_connector =
+ to_vc4_dpi_connector(connector);
+ struct vc4_dpi *dpi = vc4_connector->dpi;
+
+ if (dpi->panel)
+ return drm_panel_get_modes(dpi->panel);
+
+ return 0;
+}
+
+static struct drm_encoder *
+vc4_dpi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct vc4_dpi_connector *dpi_connector =
+ to_vc4_dpi_connector(connector);
+ return dpi_connector->encoder;
+}
+
+static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .detect = vc4_dpi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = vc4_dpi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
+ .get_modes = vc4_dpi_connector_get_modes,
+ .best_encoder = vc4_dpi_connector_best_encoder,
+};
+
+static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
+ struct vc4_dpi *dpi)
+{
+ struct drm_connector *connector = NULL;
+ struct vc4_dpi_connector *dpi_connector;
+ int ret = 0;
+
+ dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
+ GFP_KERNEL);
+ if (!dpi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ connector = &dpi_connector->base;
+
+ dpi_connector->encoder = dpi->encoder;
+ dpi_connector->dpi = dpi;
+
+ drm_connector_init(dev, connector, &vc4_dpi_connector_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ drm_connector_helper_add(connector, &vc4_dpi_connector_helper_funcs);
+
+ connector->polled = 0;
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_mode_connector_attach_encoder(connector, dpi->encoder);
+
+ return connector;
+
+ fail:
+ if (connector)
+ vc4_dpi_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
+
+static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
+ struct vc4_dpi *dpi = vc4_encoder->dpi;
+
+ drm_panel_disable(dpi->panel);
+
+ clk_disable_unprepare(dpi->pixel_clock);
+
+ drm_panel_unprepare(dpi->panel);
+}
+
+static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct drm_display_mode *mode = &encoder->crtc->mode;
+ struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
+ struct vc4_dpi *dpi = vc4_encoder->dpi;
+ u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE;
+ int ret;
+
+ ret = drm_panel_prepare(dpi->panel);
+ if (ret) {
+ DRM_ERROR("Panel failed to prepare\n");
+ return;
+ }
+
+ if (dpi->connector->display_info.num_bus_formats) {
+ u32 bus_format = dpi->connector->display_info.bus_formats[0];
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB,
+ DPI_FORMAT);
+ dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
+ DPI_FORMAT);
+ break;
+ default:
+ DRM_ERROR("Unknown media bus format %d\n", bus_format);
+ break;
+ }
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ dpi_c |= DPI_HSYNC_INVERT;
+ else if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+ dpi_c |= DPI_HSYNC_DISABLE;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ dpi_c |= DPI_VSYNC_INVERT;
+ else if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+ dpi_c |= DPI_VSYNC_DISABLE;
+
+ DPI_WRITE(DPI_C, dpi_c);
+
+ ret = clk_set_rate(dpi->pixel_clock, mode->clock * 1000);
+ if (ret)
+ DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+ ret = clk_prepare_enable(dpi->pixel_clock);
+ if (ret)
+ DRM_ERROR("Failed to set clock rate: %d\n", ret);
+
+ ret = drm_panel_enable(dpi->panel);
+ if (ret) {
+ DRM_ERROR("Panel failed to enable\n");
+ drm_panel_unprepare(dpi->panel);
+ return;
+ }
+}
+
+static const struct drm_encoder_helper_funcs vc4_dpi_encoder_helper_funcs = {
+ .disable = vc4_dpi_encoder_disable,
+ .enable = vc4_dpi_encoder_enable,
+};
+
+static const struct of_device_id vc4_dpi_dt_match[] = {
+ { .compatible = "brcm,bcm2835-dpi", .data = NULL },
+ {}
+};
+
+/* Walks the OF graph to find the panel node and then asks DRM to look
+ * up the panel.
+ */
+static struct drm_panel *vc4_dpi_get_panel(struct device *dev)
+{
+ struct device_node *endpoint, *panel_node;
+ struct device_node *np = dev->of_node;
+ struct drm_panel *panel;
+
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ dev_err(dev, "no endpoint to fetch DPI panel\n");
+ return NULL;
+ }
+
+ /* don't proceed if we have an endpoint but no panel_node tied to it */
+ panel_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (!panel_node) {
+ dev_err(dev, "no valid panel node\n");
+ return NULL;
+ }
+
+ panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+
+ return panel;
+}
+
+static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_dpi *dpi;
+ struct vc4_dpi_encoder *vc4_dpi_encoder;
+ int ret;
+
+ dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
+ if (!dpi)
+ return -ENOMEM;
+
+ vc4_dpi_encoder = devm_kzalloc(dev, sizeof(*vc4_dpi_encoder),
+ GFP_KERNEL);
+ if (!vc4_dpi_encoder)
+ return -ENOMEM;
+ vc4_dpi_encoder->base.type = VC4_ENCODER_TYPE_DPI;
+ vc4_dpi_encoder->dpi = dpi;
+ dpi->encoder = &vc4_dpi_encoder->base.base;
+
+ dpi->pdev = pdev;
+ dpi->regs = vc4_ioremap_regs(pdev, 0);
+ if (IS_ERR(dpi->regs))
+ return PTR_ERR(dpi->regs);
+
+ vc4_dpi_dump_regs(dpi);
+
+ if (DPI_READ(DPI_ID) != DPI_ID_VALUE) {
+ dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
+ DPI_READ(DPI_ID), DPI_ID_VALUE);
+ return -ENODEV;
+ }
+
+ dpi->core_clock = devm_clk_get(dev, "core");
+ if (IS_ERR(dpi->core_clock)) {
+ ret = PTR_ERR(dpi->core_clock);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get core clock: %d\n", ret);
+ return ret;
+ }
+ dpi->pixel_clock = devm_clk_get(dev, "pixel");
+ if (IS_ERR(dpi->pixel_clock)) {
+ ret = PTR_ERR(dpi->pixel_clock);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get pixel clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dpi->core_clock);
+ if (ret)
+ DRM_ERROR("Failed to turn on core clock: %d\n", ret);
+
+ dpi->panel = vc4_dpi_get_panel(dev);
+
+ drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
+ DRM_MODE_ENCODER_DPI, NULL);
+ drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
+
+ dpi->connector = vc4_dpi_connector_init(drm, dpi);
+ if (IS_ERR(dpi->connector)) {
+ ret = PTR_ERR(dpi->connector);
+ goto err_destroy_encoder;
+ }
+
+ if (dpi->panel)
+ drm_panel_attach(dpi->panel, dpi->connector);
+
+ dev_set_drvdata(dev, dpi);
+
+ vc4->dpi = dpi;
+
+ return 0;
+
+err_destroy_encoder:
+ drm_encoder_cleanup(dpi->encoder);
+ clk_disable_unprepare(dpi->core_clock);
+ return ret;
+}
+
+static void vc4_dpi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_dev *vc4 = to_vc4_dev(drm);
+ struct vc4_dpi *dpi = dev_get_drvdata(dev);
+
+ if (dpi->panel)
+ drm_panel_detach(dpi->panel);
+
+ vc4_dpi_connector_destroy(dpi->connector);
+ drm_encoder_cleanup(dpi->encoder);
+
+ clk_disable_unprepare(dpi->core_clock);
+
+ vc4->dpi = NULL;
+}
+
+static const struct component_ops vc4_dpi_ops = {
+ .bind = vc4_dpi_bind,
+ .unbind = vc4_dpi_unbind,
+};
+
+static int vc4_dpi_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &vc4_dpi_ops);
+}
+
+static int vc4_dpi_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vc4_dpi_ops);
+ return 0;
+}
+
+struct platform_driver vc4_dpi_driver = {
+ .probe = vc4_dpi_dev_probe,
+ .remove = vc4_dpi_dev_remove,
+ .driver = {
+ .name = "vc4_dpi",
+ .of_match_table = vc4_dpi_dt_match,
+ },
+};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index b7d2ff0e6..250ed7e37 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = {
};
static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0),
- DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
DRM_ROOT_ONLY),
};
@@ -81,6 +81,7 @@ static struct drm_driver vc4_drm_driver = {
DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_HAVE_IRQ |
+ DRIVER_RENDER |
DRIVER_PRIME),
.lastclose = vc4_lastclose,
.irq_handler = vc4_irq,
@@ -90,7 +91,7 @@ static struct drm_driver vc4_drm_driver = {
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
- .get_vblank_counter = drm_vblank_count,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
@@ -153,6 +154,24 @@ static void vc4_match_add_drivers(struct device *dev,
}
}
+static void vc4_kick_out_firmware_fb(void)
+{
+ struct apertures_struct *ap;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ /* Since VC4 is a UMA device, the simplefb node may have been
+ * located anywhere in memory.
+ */
+ ap->ranges[0].base = 0;
+ ap->ranges[0].size = ~0;
+
+ remove_conflicting_framebuffers(ap, "vc4drmfb", false);
+ kfree(ap);
+}
+
static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -186,6 +205,8 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
goto gem_destroy;
+ vc4_kick_out_firmware_fb();
+
ret = drm_dev_register(drm, 0);
if (ret < 0)
goto unbind_all;
@@ -237,6 +258,7 @@ static const struct component_master_ops vc4_drm_ops = {
static struct platform_driver *const component_drivers[] = {
&vc4_hdmi_driver,
+ &vc4_dpi_driver,
&vc4_crtc_driver,
&vc4_hvs_driver,
&vc4_v3d_driver,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index fa2ad15d4..37cac5940 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -16,6 +16,7 @@ struct vc4_dev {
struct vc4_hvs *hvs;
struct vc4_crtc *crtc[3];
struct vc4_v3d *v3d;
+ struct vc4_dpi *dpi;
struct drm_fbdev_cma *fbdev;
@@ -422,6 +423,10 @@ void vc4_debugfs_cleanup(struct drm_minor *minor);
/* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
+/* vc4_dpi.c */
+extern struct platform_driver vc4_dpi_driver;
+int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
+
/* vc4_gem.c */
void vc4_gem_init(struct drm_device *dev);
void vc4_gem_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 8d4384f8b..46899d6de 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -822,7 +822,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
return -EINVAL;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index d8b864925..fd2644d23 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -573,7 +573,7 @@ err_unprepare_hsm:
err_unprepare_pix:
clk_disable_unprepare(hdmi->pixel_clock);
err_put_i2c:
- put_device(&vc4->hdmi->ddc->dev);
+ put_device(&hdmi->ddc->dev);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 4718ae517..861a623bc 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -93,7 +93,7 @@ static struct vc4_commit *commit_init(struct drm_atomic_state *state)
* vc4_atomic_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
- * @async: asynchronous commit
+ * @nonblock: nonblocking commit
*
* This function commits a with drm_atomic_helper_check() pre-validated state
* object. This can still fail when e.g. the framebuffer reservation fails. For
@@ -104,7 +104,7 @@ static struct vc4_commit *commit_init(struct drm_atomic_state *state)
*/
static int vc4_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
- bool async)
+ bool nonblock)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
@@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev,
return -ENOMEM;
/* Make sure that any outstanding modesets have finished. */
- ret = down_interruptible(&vc4->async_modeset);
- if (ret) {
- kfree(c);
- return ret;
+ if (nonblock) {
+ ret = down_trylock(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return -EBUSY;
+ }
+ } else {
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
+ }
}
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -170,7 +178,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* current layout.
*/
- if (async) {
+ if (nonblock) {
vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
vc4_atomic_complete_commit_seqno_cb);
} else {
@@ -207,8 +215,6 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
- dev->vblank_disable_allowed = true;
-
drm_mode_config_reset(dev);
vc4->fbdev = drm_fbdev_cma_init(dev, 32,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 7b0c72ae0..4037b52fd 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -208,7 +208,7 @@ static void vc4_plane_destroy_state(struct drm_plane *plane,
}
kfree(vc4_state->dlist);
- __drm_atomic_helper_plane_destroy_state(plane, &vc4_state->base);
+ __drm_atomic_helper_plane_destroy_state(&vc4_state->base);
kfree(state);
}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index bf42a8e87..f99eece4c 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -341,6 +341,10 @@
#define SCALER_DISPLACT0 0x00000030
#define SCALER_DISPLACT1 0x00000034
#define SCALER_DISPLACT2 0x00000038
+#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \
+ (x) * (SCALER_DISPLACT1 - \
+ SCALER_DISPLACT0))
+
#define SCALER_DISPCTRL0 0x00000040
# define SCALER_DISPCTRLX_ENABLE BIT(31)
# define SCALER_DISPCTRLX_RESET BIT(30)
@@ -390,6 +394,12 @@
#define SCALER_DISPBASE2 0x0000006c
#define SCALER_DISPALPHA2 0x00000070
#define SCALER_GAMADDR 0x00000078
+# define SCALER_GAMADDR_AUTOINC BIT(31)
+/* Enables all gamma ramp SRAMs, not just those of CRTCs with gamma
+ * enabled.
+ */
+# define SCALER_GAMADDR_SRAMENB BIT(30)
+
#define SCALER_GAMDATA 0x000000e0
#define SCALER_DLIST_START 0x00002000
#define SCALER_DLIST_SIZE 0x00004000
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index c503a840f..341f9be3d 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -89,7 +89,6 @@ int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_vgem_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->base.dev;
loff_t num_pages;
pgoff_t page_offset;
int ret;
@@ -103,12 +102,8 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
- mutex_lock(&dev->struct_mutex);
-
ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
obj->pages[page_offset]);
-
- mutex_unlock(&dev->struct_mutex);
switch (ret) {
case 0:
return VM_FAULT_NOPAGE;
@@ -154,6 +149,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
if (err)
goto out;
+ err = vgem_gem_get_pages(obj);
+ if (err)
+ goto out;
+
err = drm_gem_handle_create(file, gem_object, handle);
if (err)
goto handle_out;
@@ -201,37 +200,23 @@ int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
int ret = 0;
struct drm_gem_object *obj;
- mutex_lock(&dev->struct_mutex);
- obj = drm_gem_object_lookup(dev, file, handle);
- if (!obj) {
- ret = -ENOENT;
- goto unlock;
- }
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
- if (!drm_vma_node_has_offset(&obj->vma_node)) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto unref;
- }
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto unref;
BUG_ON(!obj->filp);
obj->filp->private_data = obj;
- ret = vgem_gem_get_pages(to_vgem_bo(obj));
- if (ret)
- goto fail_get_pages;
-
*offset = drm_vma_node_offset_addr(&obj->vma_node);
- goto unref;
-
-fail_get_pages:
- drm_gem_free_mmap_offset(obj);
unref:
- drm_gem_object_unreference(obj);
-unlock:
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
+
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 5fd1fd06e..d4305da88 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -38,13 +38,6 @@
#define XRES_MAX 8192
#define YRES_MAX 8192
-static void virtio_gpu_crtc_gamma_set(struct drm_crtc *crtc,
- u16 *red, u16 *green, u16 *blue,
- uint32_t start, uint32_t size)
-{
- /* TODO */
-}
-
static void
virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
@@ -75,7 +68,7 @@ static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
}
/* lookup the cursor */
- gobj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ gobj = drm_gem_object_lookup(file_priv, handle);
if (gobj == NULL)
return -ENOENT;
@@ -173,7 +166,6 @@ static int virtio_gpu_page_flip(struct drm_crtc *crtc,
static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.cursor_set2 = virtio_gpu_crtc_cursor_set,
.cursor_move = virtio_gpu_crtc_cursor_move,
- .gamma_set = virtio_gpu_crtc_gamma_set,
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
@@ -428,7 +420,6 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
return PTR_ERR(plane);
drm_crtc_init_with_planes(dev, crtc, plane, NULL,
&virtio_gpu_crtc_funcs, NULL);
- drm_mode_crtc_set_gamma_size(crtc, 256);
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
plane->crtc = crtc;
@@ -456,7 +447,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
int ret;
/* lookup object associated with res handle */
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!obj)
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 7f898cfdc..3cc7afa77 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -42,10 +42,8 @@ module_param_named(modeset, virtio_gpu_modeset, int, 0400);
static int virtio_gpu_probe(struct virtio_device *vdev)
{
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && virtio_gpu_modeset == -1)
return -EINVAL;
-#endif
if (virtio_gpu_modeset == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 8f486f4c7..0a54f43f8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -400,7 +400,7 @@ static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct virtio_gpu_device *qdev =
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 1feb7cee3..336a57fd6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -130,7 +130,7 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_gem_object *gobj;
struct virtio_gpu_object *obj;
BUG_ON(!offset_p);
- gobj = drm_gem_object_lookup(dev, file_priv, handle);
+ gobj = drm_gem_object_lookup(file_priv, handle);
if (gobj == NULL)
return -ENOENT;
obj = gem_to_virtio_gpu_obj(gobj);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index b4de18e65..c046903cb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -133,8 +133,7 @@ static int virtio_gpu_execbuffer(struct drm_device *dev,
}
for (i = 0; i < exbuf->num_bo_handles; i++) {
- gobj = drm_gem_object_lookup(dev,
- drm_file, bo_handles[i]);
+ gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
if (!gobj) {
drm_free_large(bo_handles);
drm_free_large(buflist);
@@ -345,7 +344,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
- gobj = drm_gem_object_lookup(dev, file_priv, ri->bo_handle);
+ gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
if (gobj == NULL)
return -ENOENT;
@@ -374,7 +373,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
- gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
+ gobj = drm_gem_object_lookup(file, args->bo_handle);
if (gobj == NULL)
return -ENOENT;
@@ -418,7 +417,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
int ret;
u32 offset = args->offset;
- gobj = drm_gem_object_lookup(dev, file, args->bo_handle);
+ gobj = drm_gem_object_lookup(file, args->bo_handle);
if (gobj == NULL)
return -ENOENT;
@@ -464,7 +463,7 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
int ret;
bool nowait = false;
- gobj = drm_gem_object_lookup(dev, file, args->handle);
+ gobj = drm_gem_object_lookup(file, args->handle);
if (gobj == NULL)
return -ENOENT;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index f300eba95..1483daebe 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -155,10 +155,10 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0))
return r;
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ r = ttm_bo_wait(&bo->tbo, true, no_wait);
ttm_bo_unreserve(&bo->tbo);
return r;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 9fd924cd2..a05808156 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -426,6 +426,8 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index d281575bb..473d00451 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,6 +8,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
- vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o
+ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 3329f623c..78b75ee3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -839,7 +839,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
- ttm_bo_wait(bo, false, false, false);
+ ttm_bo_wait(bo, false, false);
}
@@ -857,4 +857,6 @@ struct ttm_bo_driver vmw_bo_driver = {
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
+ .lru_tail = &ttm_bo_default_lru_tail,
+ .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 092ea81ee..265c81e6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -421,9 +421,9 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
}
bo = &buf->base;
- WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL));
+ WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
- ret = ttm_bo_wait(old_bo, false, false, false);
+ ret = ttm_bo_wait(old_bo, false, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed waiting for cotable unbind.\n");
goto out_wait;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index eadc981ee..0cd889015 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -57,7 +57,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -105,7 +105,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -188,7 +188,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
return ret;
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
@@ -244,7 +244,7 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
+ ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2a505464c..8d528fcf6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,6 +1,6 @@
/**************************************************************************
*
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -44,6 +44,12 @@
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
+#ifndef VMWGFX_GIT_VERSION
+#define VMWGFX_GIT_VERSION "Unknown"
+#endif
+
+#define VMWGFX_REPO "In Tree"
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
@@ -329,7 +335,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
+ ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true);
@@ -616,6 +622,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
uint32_t svga_id;
enum vmw_res_type i;
bool refuse_dma = false;
+ char host_log[100] = {0};
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) {
@@ -886,6 +893,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+ snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
+ VMWGFX_REPO, VMWGFX_GIT_VERSION);
+ vmw_host_log(host_log);
+
+ memset(host_log, 0, sizeof(host_log));
+ snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
+ VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
+ VMWGFX_DRIVER_PATCHLEVEL);
+ vmw_host_log(host_log);
+
if (dev_priv->enable_fb) {
vmw_fifo_resource_inc(dev_priv);
vmw_svga_enable(dev_priv);
@@ -1543,10 +1560,8 @@ static int __init vmwgfx_init(void)
{
int ret;
-#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
return -EINVAL;
-#endif
ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index cab0c54b4..89fb19443 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1236,4 +1236,10 @@ static inline void vmw_mmio_write(u32 value, u32 *addr)
{
WRITE_ONCE(*addr, value);
}
+
+/**
+ * Add vmw_msg module function
+ */
+extern int vmw_host_log(const char *log);
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 6ccd61d37..e29da45a2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -98,7 +98,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
@@ -318,7 +318,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(bo, true, false, false, NULL);
+ ret = ttm_bo_reserve(bo, true, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
@@ -1855,7 +1855,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo = &buf->base;
int ret;
- ttm_bo_reserve(bo, false, false, interruptible, NULL);
+ ttm_bo_reserve(bo, false, false, NULL);
ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
validate_as_mob);
if (ret)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 23db16008..b6126a5f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -222,7 +222,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
if (bo) {
int ret;
- ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
vmw_fence_single_bo(bo, NULL);
@@ -262,7 +262,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out_no_bo;
- ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
BUG_ON(ret != 0);
ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
if (unlikely(ret != 0))
@@ -357,7 +357,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
vmw_takedown_otable_base(dev_priv, i,
&batch->otables[i]);
- ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
vmw_fence_single_bo(bo, NULL);
@@ -440,7 +440,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(mob->pt_bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
BUG_ON(ret != 0);
ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
@@ -545,7 +545,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
const struct vmw_sg_table *vsgt;
int ret;
- ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
vsgt = vmw_bo_sg_table(bo);
@@ -595,7 +595,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo = mob->pt_bo;
if (bo) {
- ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(bo, false, true, NULL);
/*
* Noone else should be using this buffer.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
new file mode 100644
index 000000000..e57a0bad7
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright © 2016 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/frame.h>
+#include <asm/hypervisor.h>
+#include "drmP.h"
+#include "vmwgfx_msg.h"
+
+
+#define MESSAGE_STATUS_SUCCESS 0x0001
+#define MESSAGE_STATUS_DORECV 0x0002
+#define MESSAGE_STATUS_CPT 0x0010
+#define MESSAGE_STATUS_HB 0x0080
+
+#define RPCI_PROTOCOL_NUM 0x49435052
+#define GUESTMSG_FLAG_COOKIE 0x80000000
+
+#define RETRIES 3
+
+#define VMW_HYPERVISOR_MAGIC 0x564D5868
+#define VMW_HYPERVISOR_PORT 0x5658
+#define VMW_HYPERVISOR_HB_PORT 0x5659
+
+#define VMW_PORT_CMD_MSG 30
+#define VMW_PORT_CMD_HB_MSG 0
+#define VMW_PORT_CMD_OPEN_CHANNEL (MSG_TYPE_OPEN << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_CLOSE_CHANNEL (MSG_TYPE_CLOSE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_SENDSIZE (MSG_TYPE_SENDSIZE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_RECVSIZE (MSG_TYPE_RECVSIZE << 16 | VMW_PORT_CMD_MSG)
+#define VMW_PORT_CMD_RECVSTATUS (MSG_TYPE_RECVSTATUS << 16 | VMW_PORT_CMD_MSG)
+
+#define HIGH_WORD(X) ((X & 0xFFFF0000) >> 16)
+
+static u32 vmw_msg_enabled = 1;
+
+enum rpc_msg_type {
+ MSG_TYPE_OPEN,
+ MSG_TYPE_SENDSIZE,
+ MSG_TYPE_SENDPAYLOAD,
+ MSG_TYPE_RECVSIZE,
+ MSG_TYPE_RECVPAYLOAD,
+ MSG_TYPE_RECVSTATUS,
+ MSG_TYPE_CLOSE,
+};
+
+struct rpc_channel {
+ u16 channel_id;
+ u32 cookie_high;
+ u32 cookie_low;
+};
+
+
+
+/**
+ * vmw_open_channel
+ *
+ * @channel: RPC channel
+ * @protocol:
+ *
+ * Returns: 0 on success
+ */
+static int vmw_open_channel(struct rpc_channel *channel, unsigned int protocol)
+{
+ unsigned long eax, ebx, ecx, edx, si = 0, di = 0;
+
+ VMW_PORT(VMW_PORT_CMD_OPEN_CHANNEL,
+ (protocol | GUESTMSG_FLAG_COOKIE), si, di,
+ VMW_HYPERVISOR_PORT,
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+ return -EINVAL;
+
+ channel->channel_id = HIGH_WORD(edx);
+ channel->cookie_high = si;
+ channel->cookie_low = di;
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_close_channel
+ *
+ * @channel: RPC channel
+ *
+ * Returns: 0 on success
+ */
+static int vmw_close_channel(struct rpc_channel *channel)
+{
+ unsigned long eax, ebx, ecx, edx, si, di;
+
+ /* Set up additional parameters */
+ si = channel->cookie_high;
+ di = channel->cookie_low;
+
+ VMW_PORT(VMW_PORT_CMD_CLOSE_CHANNEL,
+ 0, si, di,
+ (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+
+/**
+ * vmw_send_msg: Sends a message to the host
+ *
+ * @channel: RPC channel
+ * @logmsg: NULL terminated string
+ *
+ * Returns: 0 on success
+ */
+static int vmw_send_msg(struct rpc_channel *channel, const char *msg)
+{
+ unsigned long eax, ebx, ecx, edx, si, di, bp;
+ size_t msg_len = strlen(msg);
+ int retries = 0;
+
+
+ while (retries < RETRIES) {
+ retries++;
+
+ /* Set up additional parameters */
+ si = channel->cookie_high;
+ di = channel->cookie_low;
+
+ VMW_PORT(VMW_PORT_CMD_SENDSIZE,
+ msg_len, si, di,
+ VMW_HYPERVISOR_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
+ (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
+ /* Expected success + high-bandwidth. Give up. */
+ return -EINVAL;
+ }
+
+ /* Send msg */
+ si = (uintptr_t) msg;
+ di = channel->cookie_low;
+ bp = channel->cookie_high;
+
+ VMW_PORT_HB_OUT(
+ (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+ msg_len, si, di,
+ VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC, bp,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) != 0) {
+ return 0;
+ } else if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
+ /* A checkpoint occurred. Retry. */
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ return -EINVAL;
+}
+STACK_FRAME_NON_STANDARD(vmw_send_msg);
+
+
+/**
+ * vmw_recv_msg: Receives a message from the host
+ *
+ * Note: It is the caller's responsibility to call kfree() on msg.
+ *
+ * @channel: channel opened by vmw_open_channel
+ * @msg: [OUT] message received from the host
+ * @msg_len: message length
+ */
+static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
+ size_t *msg_len)
+{
+ unsigned long eax, ebx, ecx, edx, si, di, bp;
+ char *reply;
+ size_t reply_len;
+ int retries = 0;
+
+
+ *msg_len = 0;
+ *msg = NULL;
+
+ while (retries < RETRIES) {
+ retries++;
+
+ /* Set up additional parameters */
+ si = channel->cookie_high;
+ di = channel->cookie_low;
+
+ VMW_PORT(VMW_PORT_CMD_RECVSIZE,
+ 0, si, di,
+ (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0 ||
+ (HIGH_WORD(ecx) & MESSAGE_STATUS_HB) == 0) {
+ DRM_ERROR("Failed to get reply size\n");
+ return -EINVAL;
+ }
+
+ /* No reply available. This is okay. */
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_DORECV) == 0)
+ return 0;
+
+ reply_len = ebx;
+ reply = kzalloc(reply_len + 1, GFP_KERNEL);
+ if (reply == NULL) {
+ DRM_ERROR("Cannot allocate memory for reply\n");
+ return -ENOMEM;
+ }
+
+
+ /* Receive buffer */
+ si = channel->cookie_high;
+ di = (uintptr_t) reply;
+ bp = channel->cookie_low;
+
+ VMW_PORT_HB_IN(
+ (MESSAGE_STATUS_SUCCESS << 16) | VMW_PORT_CMD_HB_MSG,
+ reply_len, si, di,
+ VMW_HYPERVISOR_HB_PORT | (channel->channel_id << 16),
+ VMW_HYPERVISOR_MAGIC, bp,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
+ kfree(reply);
+
+ if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
+ /* A checkpoint occurred. Retry. */
+ continue;
+ }
+
+ return -EINVAL;
+ }
+
+ reply[reply_len] = '\0';
+
+
+ /* Ack buffer */
+ si = channel->cookie_high;
+ di = channel->cookie_low;
+
+ VMW_PORT(VMW_PORT_CMD_RECVSTATUS,
+ MESSAGE_STATUS_SUCCESS, si, di,
+ (VMW_HYPERVISOR_PORT | (channel->channel_id << 16)),
+ VMW_HYPERVISOR_MAGIC,
+ eax, ebx, ecx, edx, si, di);
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
+ kfree(reply);
+
+ if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
+ /* A checkpoint occurred. Retry. */
+ continue;
+ }
+
+ return -EINVAL;
+ }
+
+ break;
+ }
+
+ if (retries == RETRIES)
+ return -EINVAL;
+
+ *msg_len = reply_len;
+ *msg = reply;
+
+ return 0;
+}
+STACK_FRAME_NON_STANDARD(vmw_recv_msg);
+
+
+/**
+ * vmw_host_get_guestinfo: Gets a GuestInfo parameter
+ *
+ * Gets the value of a GuestInfo.* parameter. The value returned will be in
+ * a string, and it is up to the caller to post-process.
+ *
+ * @guest_info_param: Parameter to get, e.g. GuestInfo.svga.gl3
+ * @buffer: if NULL, *reply_len will contain reply size.
+ * @length: size of the reply_buf. Set to size of reply upon return
+ *
+ * Returns: 0 on success
+ */
+int vmw_host_get_guestinfo(const char *guest_info_param,
+ char *buffer, size_t *length)
+{
+ struct rpc_channel channel;
+ char *msg, *reply = NULL;
+ size_t msg_len, reply_len = 0;
+ int ret = 0;
+
+
+ if (!vmw_msg_enabled)
+ return -ENODEV;
+
+ if (!guest_info_param || !length)
+ return -EINVAL;
+
+ msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
+ msg = kzalloc(msg_len, GFP_KERNEL);
+ if (msg == NULL) {
+ DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
+ return -ENOMEM;
+ }
+
+ sprintf(msg, "info-get %s", guest_info_param);
+
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
+ vmw_send_msg(&channel, msg) ||
+ vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
+ vmw_close_channel(&channel)) {
+ DRM_ERROR("Failed to get %s", guest_info_param);
+
+ ret = -EINVAL;
+ }
+
+ if (buffer && reply && reply_len > 0) {
+ /* Remove reply code, which are the first 2 characters of
+ * the reply
+ */
+ reply_len = max(reply_len - 2, (size_t) 0);
+ reply_len = min(reply_len, *length);
+
+ if (reply_len > 0)
+ memcpy(buffer, reply + 2, reply_len);
+ }
+
+ *length = reply_len;
+
+ kfree(reply);
+ kfree(msg);
+
+ return ret;
+}
+
+
+
+/**
+ * vmw_host_log: Sends a log message to the host
+ *
+ * @log: NULL terminated string
+ *
+ * Returns: 0 on success
+ */
+int vmw_host_log(const char *log)
+{
+ struct rpc_channel channel;
+ char *msg;
+ int msg_len;
+ int ret = 0;
+
+
+ if (!vmw_msg_enabled)
+ return -ENODEV;
+
+ if (!log)
+ return ret;
+
+ msg_len = strlen(log) + strlen("log ") + 1;
+ msg = kzalloc(msg_len, GFP_KERNEL);
+ if (msg == NULL) {
+ DRM_ERROR("Cannot allocate memory for log message\n");
+ return -ENOMEM;
+ }
+
+ sprintf(msg, "log %s", log);
+
+ if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
+ vmw_send_msg(&channel, msg) ||
+ vmw_close_channel(&channel)) {
+ DRM_ERROR("Failed to send log\n");
+
+ ret = -EINVAL;
+ }
+
+ kfree(msg);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
new file mode 100644
index 000000000..557a033fb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2016, VMware, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * Based on code from vmware.c and vmmouse.c.
+ * Author:
+ * Sinclair Yeh <syeh@vmware.com>
+ */
+#ifndef _VMWGFX_MSG_H
+#define _VMWGFX_MSG_H
+
+
+/**
+ * Hypervisor-specific bi-directional communication channel. Should never
+ * execute on bare metal hardware. The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last two parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ebx: [IN] Message Len, through EBX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set ot 0 if not used
+ * @port_num: [IN] port number + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si: [OUT]
+ * @di: [OUT]
+ */
+#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
+ port_num, magic, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("inl %%dx, %%eax;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(in_ebx), \
+ "c"(cmd), \
+ "d"(port_num), \
+ "S"(in_si), \
+ "D"(in_di) : \
+ "memory"); \
+})
+
+
+/**
+ * Hypervisor-specific bi-directional communication channel. Should never
+ * execute on bare metal hardware. The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last 3 parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ecx: [IN] Message Len, through ECX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set to 0 if not used
+ * @port_num: [IN] port number + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @bp: [IN]
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si: [OUT]
+ * @di: [OUT]
+ */
+#ifdef __x86_64__
+
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
+ port_num, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %%rbp;" \
+ "mov %12, %%rbp;" \
+ "rep outsb;" \
+ "pop %%rbp;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(port_num), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "r"(bp) : \
+ "memory", "cc"); \
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
+ port_num, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %%rbp;" \
+ "mov %12, %%rbp;" \
+ "rep insb;" \
+ "pop %%rbp" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(port_num), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "r"(bp) : \
+ "memory", "cc"); \
+})
+
+#else
+
+/* In the 32-bit version of this macro, we use "m" because there is no
+ * more register left for bp
+ */
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
+ port_num, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %%ebp;" \
+ "mov %12, %%ebp;" \
+ "rep outsb;" \
+ "pop %%ebp;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(port_num), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "m"(bp) : \
+ "memory", "cc"); \
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
+ port_num, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %%ebp;" \
+ "mov %12, %%ebp;" \
+ "rep insb;" \
+ "pop %%ebp" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(port_num), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "m"(bp) : \
+ "memory", "cc"); \
+})
+#endif /* #if __x86_64__ */
+
+#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e57667ca7..6a328d507 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -129,7 +129,7 @@ static void vmw_resource_release(struct kref *kref)
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
- ttm_bo_reserve(bo, false, false, false, NULL);
+ ttm_bo_reserve(bo, false, false, NULL);
if (!list_empty(&res->mob_head) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
@@ -1512,7 +1512,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
list_del_init(&res->mob_head);
}
- (void) ttm_bo_wait(bo, false, false, false);
+ (void) ttm_bo_wait(bo, false, false);
}
}
@@ -1605,7 +1605,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
if (fence != NULL)
vmw_fence_obj_unreference(&fence);
- (void) ttm_bo_wait(bo, false, false, false);
+ (void) ttm_bo_wait(bo, false, false);
} else
mutex_unlock(&dev_priv->binding_mutex);
@@ -1717,8 +1717,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
if (res->backup) {
vbo = res->backup;
- ttm_bo_reserve(&vbo->base, interruptible, false, false,
- NULL);
+ ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
if (!vbo->pin_count) {
ret = ttm_bo_validate
(&vbo->base,
@@ -1773,7 +1772,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
if (--res->pin_count == 0 && res->backup) {
struct vmw_dma_buffer *vbo = res->backup;
- ttm_bo_reserve(&vbo->base, false, false, false, NULL);
+ ttm_bo_reserve(&vbo->base, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->base);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index fd47547b0..92f8b1d04 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -988,7 +988,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+ ret = ttm_bo_reserve(&buf->base, false, true, NULL);
if (unlikely(ret != 0))
goto no_reserve;
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 498b37e39..e1e31e9e6 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -85,7 +85,7 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
err = devm_request_irq(host->dev, host->intr_syncpt_irq,
syncpt_thresh_isr, IRQF_SHARED,
"host1x_syncpt", host);
- if (IS_ERR_VALUE(err)) {
+ if (err < 0) {
WARN_ON(1);
return err;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 411722570..5646ca4b9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -134,6 +134,16 @@ config HID_APPLEIR
Say Y here if you want support for Apple infrared remote control.
+config HID_ASUS
+ tristate "Asus"
+ depends on I2C_HID
+ ---help---
+ Support for Asus notebook built-in keyboard via i2c.
+
+ Supported devices:
+ - EeeBook X205TA
+ - VivoBook E200HA
+
config HID_AUREAL
tristate "Aureal"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index be56ab6f7..a2fb562de 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o
+obj-$(CONFIG_HID_ASUS) += hid-asus.o
obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
new file mode 100644
index 000000000..7a811ec4f
--- /dev/null
+++ b/drivers/hid/hid-asus.c
@@ -0,0 +1,52 @@
+/*
+ * HID driver for Asus notebook built-in keyboard.
+ * Fixes small logical maximum to match usage maximum.
+ *
+ * Currently supported devices are:
+ * EeeBook X205TA
+ * VivoBook E200HA
+ *
+ * Copyright (c) 2016 Yusuke Fujimaki <usk.fujimaki@gmail.com>
+ *
+ * This module based on hid-ortek by
+ * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
+ * Copyright (c) 2011 Jiri Kosina
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x65) {
+ hid_info(hdev, "Fixing up Asus notebook report descriptor\n");
+ rdesc[55] = 0xdd;
+ }
+ return rdesc;
+}
+
+static const struct hid_device_id asus_devices[] = {
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, asus_devices);
+
+static struct hid_driver asus_driver = {
+ .name = "asus",
+ .id_table = asus_devices,
+ .report_fixup = asus_report_fixup
+};
+module_hid_driver(asus_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4f9c5c6de..8ea3a2636 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1129,49 +1129,46 @@ EXPORT_SYMBOL_GPL(hid_field_extract);
static void __implement(u8 *report, unsigned offset, int n, u32 value)
{
unsigned int idx = offset / 8;
- unsigned int size = offset + n;
unsigned int bit_shift = offset % 8;
int bits_to_set = 8 - bit_shift;
- u8 bit_mask = 0xff << bit_shift;
while (n - bits_to_set >= 0) {
- report[idx] &= ~bit_mask;
+ report[idx] &= ~(0xff << bit_shift);
report[idx] |= value << bit_shift;
value >>= bits_to_set;
n -= bits_to_set;
bits_to_set = 8;
- bit_mask = 0xff;
bit_shift = 0;
idx++;
}
/* last nibble */
if (n) {
- if (size % 8)
- bit_mask &= (1U << (size % 8)) - 1;
- report[idx] &= ~bit_mask;
- report[idx] |= (value << bit_shift) & bit_mask;
+ u8 bit_mask = ((1U << n) - 1);
+ report[idx] &= ~(bit_mask << bit_shift);
+ report[idx] |= value << bit_shift;
}
}
static void implement(const struct hid_device *hid, u8 *report,
unsigned offset, unsigned n, u32 value)
{
- u64 m;
-
- if (n > 32) {
+ if (unlikely(n > 32)) {
hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
__func__, n, current->comm);
n = 32;
+ } else if (n < 32) {
+ u32 m = (1U << n) - 1;
+
+ if (unlikely(value > m)) {
+ hid_warn(hid,
+ "%s() called with too large value %d (n: %d)! (%s)\n",
+ __func__, value, n, current->comm);
+ WARN_ON(1);
+ value &= m;
+ }
}
- m = (1ULL << n) - 1;
- if (value > m)
- hid_warn(hid, "%s() called with too large value %d! (%s)\n",
- __func__, value, current->comm);
- WARN_ON(value > m);
- value &= m;
-
__implement(report, offset, n, value);
}
@@ -1856,6 +1853,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0238f0169..3eec09a13 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -163,6 +163,7 @@
#define USB_VENDOR_ID_ASUSTEK 0x0b05
#define USB_DEVICE_ID_ASUSTEK_LCM 0x1726
#define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b
+#define USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD 0x8585
#define USB_VENDOR_ID_ATEN 0x0557
#define USB_DEVICE_ID_ATEN_UC100KM 0x2004
@@ -258,6 +259,13 @@
#define USB_VENDOR_ID_CORSAIR 0x1b1c
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
+#define USB_VENDOR_ID_CORSAIR 0x1b1c
+#define USB_DEVICE_ID_CORSAIR_K70R 0x1b09
+#define USB_DEVICE_ID_CORSAIR_K95RGB 0x1b11
+#define USB_DEVICE_ID_CORSAIR_M65RGB 0x1b12
+#define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13
+#define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17
+
#define USB_VENDOR_ID_CREATIVELABS 0x041e
#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c
#define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 0088979f7..fb6f1f447 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1408,6 +1408,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
USB_DEVICE_ID_NOVATEK_PCT) },
+ /* Ntrig Panel */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_NTRIG, 0x1b05) },
+
/* PixArt optical touch screen */
{ .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 65c4ccfcb..76d06cf87 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -421,14 +421,13 @@ static int __init roccat_init(void)
retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR,
ROCCAT_MAX_DEVICES, "roccat");
-
- roccat_major = MAJOR(dev_id);
-
if (retval < 0) {
pr_warn("can't get major number\n");
goto error;
}
+ roccat_major = MAJOR(dev_id);
+
cdev_init(&roccat_cdev, &roccat_ops);
retval = cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES);
diff --git a/drivers/hid/hid-thingm.c b/drivers/hid/hid-thingm.c
index 847a497cd..9ad9c6ec5 100644
--- a/drivers/hid/hid-thingm.c
+++ b/drivers/hid/hid-thingm.c
@@ -148,13 +148,21 @@ static int thingm_led_set(struct led_classdev *ldev,
enum led_brightness brightness)
{
struct thingm_led *led = container_of(ldev, struct thingm_led, ldev);
- int ret;
- ret = thingm_write_color(led->rgb);
- if (ret)
- hid_err(led->rgb->tdev->hdev, "failed to write color\n");
+ return thingm_write_color(led->rgb);
+}
- return ret;
+static int thingm_init_led(struct thingm_led *led, const char *color_name,
+ struct thingm_rgb *rgb, int minor)
+{
+ snprintf(led->name, sizeof(led->name), "thingm%d:%s:led%d",
+ minor, color_name, rgb->num);
+ led->ldev.name = led->name;
+ led->ldev.max_brightness = 255;
+ led->ldev.brightness_set_blocking = thingm_led_set;
+ led->ldev.flags = LED_HW_PLUGGABLE;
+ led->rgb = rgb;
+ return devm_led_classdev_register(&rgb->tdev->hdev->dev, &led->ldev);
}
static int thingm_init_rgb(struct thingm_rgb *rgb)
@@ -163,42 +171,17 @@ static int thingm_init_rgb(struct thingm_rgb *rgb)
int err;
/* Register the red diode */
- snprintf(rgb->red.name, sizeof(rgb->red.name),
- "thingm%d:red:led%d", minor, rgb->num);
- rgb->red.ldev.name = rgb->red.name;
- rgb->red.ldev.max_brightness = 255;
- rgb->red.ldev.brightness_set_blocking = thingm_led_set;
- rgb->red.rgb = rgb;
-
- err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
- &rgb->red.ldev);
+ err = thingm_init_led(&rgb->red, "red", rgb, minor);
if (err)
return err;
/* Register the green diode */
- snprintf(rgb->green.name, sizeof(rgb->green.name),
- "thingm%d:green:led%d", minor, rgb->num);
- rgb->green.ldev.name = rgb->green.name;
- rgb->green.ldev.max_brightness = 255;
- rgb->green.ldev.brightness_set_blocking = thingm_led_set;
- rgb->green.rgb = rgb;
-
- err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
- &rgb->green.ldev);
+ err = thingm_init_led(&rgb->green, "green", rgb, minor);
if (err)
return err;
/* Register the blue diode */
- snprintf(rgb->blue.name, sizeof(rgb->blue.name),
- "thingm%d:blue:led%d", minor, rgb->num);
- rgb->blue.ldev.name = rgb->blue.name;
- rgb->blue.ldev.max_brightness = 255;
- rgb->blue.ldev.brightness_set_blocking = thingm_led_set;
- rgb->blue.rgb = rgb;
-
- err = devm_led_classdev_register(&rgb->tdev->hdev->dev,
- &rgb->blue.ldev);
- return err;
+ return thingm_init_led(&rgb->blue, "blue", rgb, minor);
}
static int thingm_probe(struct hid_device *hdev, const struct hid_device_id *id)
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 9c2d7c23f..f0e2757cb 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -34,6 +34,7 @@
#include <linux/hid.h>
#include <linux/mutex.h>
#include <linux/sched.h>
+#include <linux/string.h>
#include <linux/hidraw.h>
@@ -123,7 +124,6 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
dev = hidraw_table[minor]->hid;
-
if (count > HID_MAX_BUFFER_SIZE) {
hid_warn(dev, "pid %d passed too large report\n",
task_pid_nr(current));
@@ -138,17 +138,12 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
goto out;
}
- buf = kmalloc(count * sizeof(__u8), GFP_KERNEL);
- if (!buf) {
- ret = -ENOMEM;
+ buf = memdup_user(buffer, count);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
goto out;
}
- if (copy_from_user(buf, buffer, count)) {
- ret = -EFAULT;
- goto out_free;
- }
-
if ((report_type == HID_OUTPUT_REPORT) &&
!(dev->quirks & HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP)) {
ret = hid_hw_output_report(dev, buf, count);
@@ -587,14 +582,13 @@ int __init hidraw_init(void)
result = alloc_chrdev_region(&dev_id, HIDRAW_FIRST_MINOR,
HIDRAW_MAX_DEVICES, "hidraw");
-
- hidraw_major = MAJOR(dev_id);
-
if (result < 0) {
pr_warn("can't get major number\n");
goto out;
}
+ hidraw_major = MAJOR(dev_id);
+
hidraw_class = class_create(THIS_MODULE, "hidraw");
if (IS_ERR(hidraw_class)) {
result = PTR_ERR(hidraw_class);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 53fc856d6..b4b8c6abb 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -71,6 +71,11 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70R, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_M65RGB, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K95RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index ccf188331..499cc8213 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -493,7 +493,8 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
features->x_fuzz = 4;
features->y_fuzz = 4;
features->pressure_fuzz = 0;
- features->distance_fuzz = 0;
+ features->distance_fuzz = 1;
+ features->tilt_fuzz = 1;
/*
* The wireless device HID is basic and layout conflicts with
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index cf2ba4345..1eae13cdc 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2344,12 +2344,13 @@ static void wacom_setup_basic_pro_pen(struct wacom_wac *wacom_wac)
__set_bit(BTN_STYLUS2, input_dev->keybit);
input_set_abs_params(input_dev, ABS_DISTANCE,
- 0, wacom_wac->features.distance_max, 0, 0);
+ 0, wacom_wac->features.distance_max, wacom_wac->features.distance_fuzz, 0);
}
static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
{
struct input_dev *input_dev = wacom_wac->pen_input;
+ struct wacom_features *features = &wacom_wac->features;
wacom_setup_basic_pro_pen(wacom_wac);
@@ -2359,9 +2360,9 @@ static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
__set_bit(BTN_TOOL_AIRBRUSH, input_dev->keybit);
input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
- input_set_abs_params(input_dev, ABS_TILT_X, -64, 63, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_X, -64, 63, features->tilt_fuzz, 0);
input_abs_set_res(input_dev, ABS_TILT_X, 57);
- input_set_abs_params(input_dev, ABS_TILT_Y, -64, 63, 0, 0);
+ input_set_abs_params(input_dev, ABS_TILT_Y, -64, 63, features->tilt_fuzz, 0);
input_abs_set_res(input_dev, ABS_TILT_Y, 57);
}
@@ -2507,7 +2508,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
case WACOM_G4:
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
- 0, 0);
+ features->distance_fuzz, 0);
/* fall through */
case GRAPHIRE:
@@ -2569,7 +2570,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
- 0, 0);
+ features->distance_fuzz, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_abs_set_res(input_dev, ABS_Z, 287);
@@ -2628,7 +2629,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_STYLUS2, input_dev->keybit);
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
- 0, 0);
+ features->distance_fuzz, 0);
}
break;
case BAMBOO_PAD:
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index e2084d914..53d16537f 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -177,6 +177,7 @@ struct wacom_features {
int y_fuzz;
int pressure_fuzz;
int distance_fuzz;
+ int tilt_fuzz;
unsigned quirks;
unsigned touch_max;
int oVid;
diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig
index 6aba27808..48e4eda18 100644
--- a/drivers/hsi/controllers/Kconfig
+++ b/drivers/hsi/controllers/Kconfig
@@ -5,15 +5,11 @@ comment "HSI controllers"
config OMAP_SSI
tristate "OMAP SSI hardware driver"
- depends on HSI && OF && (ARCH_OMAP3 || (ARM && COMPILE_TEST))
+ depends on HSI && OF && ARM && COMMON_CLK
+ depends on ARCH_OMAP3 || COMPILE_TEST
---help---
SSI is a legacy version of HSI. It is usually used to connect
an application engine with a cellular modem.
If you say Y here, you will enable the OMAP SSI hardware driver.
If unsure, say N.
-
-config OMAP_SSI_PORT
- tristate
- default m if OMAP_SSI=m
- default y if OMAP_SSI=y
diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile
index d2665cf9c..7aba9c7f7 100644
--- a/drivers/hsi/controllers/Makefile
+++ b/drivers/hsi/controllers/Makefile
@@ -2,5 +2,5 @@
# Makefile for HSI controllers drivers
#
-obj-$(CONFIG_OMAP_SSI) += omap_ssi.o
-obj-$(CONFIG_OMAP_SSI_PORT) += omap_ssi_port.o
+omap_ssi-objs += omap_ssi_core.o omap_ssi_port.o
+obj-$(CONFIG_OMAP_SSI) += omap_ssi.o
diff --git a/drivers/hsi/controllers/omap_ssi.h b/drivers/hsi/controllers/omap_ssi.h
index f9aaf3726..7b4dec2c6 100644
--- a/drivers/hsi/controllers/omap_ssi.h
+++ b/drivers/hsi/controllers/omap_ssi.h
@@ -27,7 +27,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/hsi/hsi.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -97,7 +97,7 @@ struct omap_ssi_port {
struct list_head brkqueue;
unsigned int irq;
int wake_irq;
- int wake_gpio;
+ struct gpio_desc *wake_gpio;
struct tasklet_struct pio_tasklet;
struct tasklet_struct wake_tasklet;
bool wktest:1; /* FIXME: HACK to be removed */
@@ -134,6 +134,8 @@ struct gdd_trn {
* @gdd_tasklet: bottom half for DMA transfers
* @gdd_trn: Array of GDD transaction data for ongoing GDD transfers
* @lock: lock to serialize access to GDD
+ * @fck_nb: DVFS notfifier block
+ * @fck_rate: clock rate
* @loss_count: To follow if we need to restore context or not
* @max_speed: Maximum TX speed (Kb/s) set by the clients.
* @sysconfig: SSI controller saved context
@@ -151,6 +153,7 @@ struct omap_ssi_controller {
struct tasklet_struct gdd_tasklet;
struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH];
spinlock_t lock;
+ struct notifier_block fck_nb;
unsigned long fck_rate;
u32 loss_count;
u32 max_speed;
@@ -164,4 +167,9 @@ struct omap_ssi_controller {
#endif
};
+void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
+ struct omap_ssi_port *omap_port);
+
+extern struct platform_driver ssi_port_pdriver;
+
#endif /* __LINUX_HSI_OMAP_SSI_H__ */
diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi_core.c
index 27b91f14b..a3e0febfb 100644
--- a/drivers/hsi/controllers/omap_ssi.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
-#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/platform_device.h>
@@ -36,6 +35,7 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/of_platform.h>
#include <linux/hsi/hsi.h>
@@ -141,7 +141,7 @@ static const struct file_operations ssi_gdd_regs_fops = {
.release = single_release,
};
-static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi)
+static int ssi_debug_add_ctrl(struct hsi_controller *ssi)
{
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
struct dentry *dir;
@@ -291,7 +291,65 @@ static unsigned long ssi_get_clk_rate(struct hsi_controller *ssi)
return rate;
}
-static int __init ssi_get_iomem(struct platform_device *pd,
+static int ssi_clk_event(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct omap_ssi_controller *omap_ssi = container_of(nb,
+ struct omap_ssi_controller, fck_nb);
+ struct hsi_controller *ssi = to_hsi_controller(omap_ssi->dev);
+ struct clk_notifier_data *clk_data = data;
+ struct omap_ssi_port *omap_port;
+ int i;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ dev_dbg(&ssi->device, "pre rate change\n");
+
+ for (i = 0; i < ssi->num_ports; i++) {
+ omap_port = omap_ssi->port[i];
+
+ if (!omap_port)
+ continue;
+
+ /* Workaround for SWBREAK + CAwake down race in CMT */
+ tasklet_disable(&omap_port->wake_tasklet);
+
+ /* stop all ssi communication */
+ pinctrl_pm_select_idle_state(omap_port->pdev);
+ udelay(1); /* wait for racing frames */
+ }
+
+ break;
+ case ABORT_RATE_CHANGE:
+ dev_dbg(&ssi->device, "abort rate change\n");
+ /* Fall through */
+ case POST_RATE_CHANGE:
+ dev_dbg(&ssi->device, "post rate change (%lu -> %lu)\n",
+ clk_data->old_rate, clk_data->new_rate);
+ omap_ssi->fck_rate = DIV_ROUND_CLOSEST(clk_data->new_rate, 1000); /* KHz */
+
+ for (i = 0; i < ssi->num_ports; i++) {
+ omap_port = omap_ssi->port[i];
+
+ if (!omap_port)
+ continue;
+
+ omap_ssi_port_update_fclk(ssi, omap_port);
+
+ /* resume ssi communication */
+ pinctrl_pm_select_default_state(omap_port->pdev);
+ tasklet_enable(&omap_port->wake_tasklet);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int ssi_get_iomem(struct platform_device *pd,
const char *name, void __iomem **pbase, dma_addr_t *phy)
{
struct resource *mem;
@@ -311,7 +369,7 @@ static int __init ssi_get_iomem(struct platform_device *pd,
return 0;
}
-static int __init ssi_add_controller(struct hsi_controller *ssi,
+static int ssi_add_controller(struct hsi_controller *ssi,
struct platform_device *pd)
{
struct omap_ssi_controller *omap_ssi;
@@ -370,6 +428,10 @@ static int __init ssi_add_controller(struct hsi_controller *ssi,
goto out_err;
}
+ omap_ssi->fck_nb.notifier_call = ssi_clk_event;
+ omap_ssi->fck_nb.priority = INT_MAX;
+ clk_notifier_register(omap_ssi->fck, &omap_ssi->fck_nb);
+
/* TODO: find register, which can be used to detect context loss */
omap_ssi->get_loss = NULL;
@@ -387,7 +449,7 @@ out_err:
return err;
}
-static int __init ssi_hw_init(struct hsi_controller *ssi)
+static int ssi_hw_init(struct hsi_controller *ssi)
{
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
unsigned int i;
@@ -433,6 +495,7 @@ static void ssi_remove_controller(struct hsi_controller *ssi)
int id = ssi->id;
tasklet_kill(&omap_ssi->gdd_tasklet);
hsi_unregister_controller(ssi);
+ clk_notifier_unregister(omap_ssi->fck, &omap_ssi->fck_nb);
ida_simple_remove(&platform_omap_ssi_ida, id);
}
@@ -452,12 +515,16 @@ static int ssi_remove_ports(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
+ if (!dev->of_node)
+ return 0;
+
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
of_device_unregister(pdev);
return 0;
}
-static int __init ssi_probe(struct platform_device *pd)
+static int ssi_probe(struct platform_device *pd)
{
struct platform_device *childpdev;
struct device_node *np = pd->dev.of_node;
@@ -523,10 +590,13 @@ out1:
return err;
}
-static int __exit ssi_remove(struct platform_device *pd)
+static int ssi_remove(struct platform_device *pd)
{
struct hsi_controller *ssi = platform_get_drvdata(pd);
+ /* cleanup of of_platform_populate() call */
+ device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
+
#ifdef CONFIG_DEBUG_FS
ssi_debug_remove_ctrl(ssi);
#endif
@@ -535,9 +605,6 @@ static int __exit ssi_remove(struct platform_device *pd)
pm_runtime_disable(&pd->dev);
- /* cleanup of of_platform_populate() call */
- device_for_each_child(&pd->dev, NULL, ssi_remove_ports);
-
return 0;
}
@@ -593,7 +660,8 @@ MODULE_DEVICE_TABLE(of, omap_ssi_of_match);
#endif
static struct platform_driver ssi_pdriver = {
- .remove = __exit_p(ssi_remove),
+ .probe = ssi_probe,
+ .remove = ssi_remove,
.driver = {
.name = "omap_ssi",
.pm = DEV_PM_OPS,
@@ -601,7 +669,22 @@ static struct platform_driver ssi_pdriver = {
},
};
-module_platform_driver_probe(ssi_pdriver, ssi_probe);
+static int __init ssi_init(void) {
+ int ret;
+
+ ret = platform_driver_register(&ssi_pdriver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&ssi_port_pdriver);
+}
+module_init(ssi_init);
+
+static void __exit ssi_exit(void) {
+ platform_driver_unregister(&ssi_port_pdriver);
+ platform_driver_unregister(&ssi_pdriver);
+}
+module_exit(ssi_exit);
MODULE_ALIAS("platform:omap_ssi");
MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index e80a66e20..6b8f77397 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -23,8 +23,10 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
+#include <linux/delay.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/debugfs.h>
#include "omap_ssi_regs.h"
@@ -43,7 +45,7 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
static inline unsigned int ssi_wakein(struct hsi_port *port)
{
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
- return gpio_get_value(omap_port->wake_gpio);
+ return gpiod_get_value(omap_port->wake_gpio);
}
#ifdef CONFIG_DEBUG_FS
@@ -171,7 +173,7 @@ static int ssi_div_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
-static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port,
+static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
struct dentry *dir)
{
struct hsi_port *port = to_hsi_port(omap_port->dev);
@@ -514,6 +516,11 @@ static int ssi_flush(struct hsi_client *cl)
pm_runtime_get_sync(omap_port->pdev);
spin_lock_bh(&omap_port->lock);
+
+ /* stop all ssi communication */
+ pinctrl_pm_select_idle_state(omap_port->pdev);
+ udelay(1); /* wait for racing frames */
+
/* Stop all DMA transfers */
for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
msg = omap_ssi->gdd_trn[i].msg;
@@ -550,6 +557,10 @@ static int ssi_flush(struct hsi_client *cl)
ssi_flush_queue(&omap_port->rxqueue[i], NULL);
}
ssi_flush_queue(&omap_port->brkqueue, NULL);
+
+ /* Resume SSI communication */
+ pinctrl_pm_select_default_state(omap_port->pdev);
+
spin_unlock_bh(&omap_port->lock);
pm_runtime_put_sync(omap_port->pdev);
@@ -1007,7 +1018,7 @@ static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port)
return IRQ_HANDLED;
}
-static int __init ssi_port_irq(struct hsi_port *port,
+static int ssi_port_irq(struct hsi_port *port,
struct platform_device *pd)
{
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
@@ -1029,19 +1040,19 @@ static int __init ssi_port_irq(struct hsi_port *port,
return err;
}
-static int __init ssi_wake_irq(struct hsi_port *port,
+static int ssi_wake_irq(struct hsi_port *port,
struct platform_device *pd)
{
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
int cawake_irq;
int err;
- if (omap_port->wake_gpio == -1) {
+ if (!omap_port->wake_gpio) {
omap_port->wake_irq = -1;
return 0;
}
- cawake_irq = gpio_to_irq(omap_port->wake_gpio);
+ cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
omap_port->wake_irq = cawake_irq;
tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
@@ -1060,7 +1071,7 @@ static int __init ssi_wake_irq(struct hsi_port *port,
return err;
}
-static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
+static void ssi_queues_init(struct omap_ssi_port *omap_port)
{
unsigned int ch;
@@ -1071,7 +1082,7 @@ static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
INIT_LIST_HEAD(&omap_port->brkqueue);
}
-static int __init ssi_port_get_iomem(struct platform_device *pd,
+static int ssi_port_get_iomem(struct platform_device *pd,
const char *name, void __iomem **pbase, dma_addr_t *phy)
{
struct hsi_port *port = platform_get_drvdata(pd);
@@ -1104,24 +1115,19 @@ static int __init ssi_port_get_iomem(struct platform_device *pd,
return 0;
}
-static int __init ssi_port_probe(struct platform_device *pd)
+static int ssi_port_probe(struct platform_device *pd)
{
struct device_node *np = pd->dev.of_node;
struct hsi_port *port;
struct omap_ssi_port *omap_port;
struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
- int cawake_gpio = 0;
+ struct gpio_desc *cawake_gpio = NULL;
u32 port_id;
int err;
dev_dbg(&pd->dev, "init ssi port...\n");
- if (!try_module_get(ssi->owner)) {
- dev_err(&pd->dev, "could not increment parent module refcount\n");
- return -ENODEV;
- }
-
if (!ssi->port || !omap_ssi->port) {
dev_err(&pd->dev, "ssi controller not initialized!\n");
err = -ENODEV;
@@ -1147,20 +1153,10 @@ static int __init ssi_port_probe(struct platform_device *pd)
goto error;
}
- err = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0);
- if (err < 0) {
- dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n",
- err);
- goto error;
- }
- cawake_gpio = err;
-
- err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN,
- "cawake");
- if (err) {
- dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n",
- err);
- err = -ENXIO;
+ cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
+ if (IS_ERR(cawake_gpio)) {
+ err = PTR_ERR(cawake_gpio);
+ dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
goto error;
}
@@ -1219,8 +1215,7 @@ static int __init ssi_port_probe(struct platform_device *pd)
hsi_add_clients_from_dt(port, np);
- dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n",
- port_id, cawake_gpio);
+ dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
return 0;
@@ -1228,7 +1223,7 @@ error:
return err;
}
-static int __exit ssi_port_remove(struct platform_device *pd)
+static int ssi_port_remove(struct platform_device *pd)
{
struct hsi_port *port = platform_get_drvdata(pd);
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
@@ -1253,12 +1248,28 @@ static int __exit ssi_port_remove(struct platform_device *pd)
omap_ssi->port[omap_port->port_id] = NULL;
platform_set_drvdata(pd, NULL);
- module_put(ssi->owner);
pm_runtime_disable(&pd->dev);
return 0;
}
+static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
+{
+ writel_relaxed(omap_port->sst.divisor,
+ omap_port->sst_base + SSI_SST_DIVISOR_REG);
+
+ return 0;
+}
+
+void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
+ struct omap_ssi_port *omap_port)
+{
+ /* update divisor */
+ u32 div = ssi_calculate_div(ssi);
+ omap_port->sst.divisor = div;
+ ssi_restore_divisor(omap_port);
+}
+
#ifdef CONFIG_PM
static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
{
@@ -1311,14 +1322,6 @@ static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
return 0;
}
-static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
-{
- writel_relaxed(omap_port->sst.divisor,
- omap_port->sst_base + SSI_SST_DIVISOR_REG);
-
- return 0;
-}
-
static int omap_ssi_port_runtime_suspend(struct device *dev)
{
struct hsi_port *port = dev_get_drvdata(dev);
@@ -1380,19 +1383,12 @@ MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
#define omap_ssi_port_of_match NULL
#endif
-static struct platform_driver ssi_port_pdriver = {
- .remove = __exit_p(ssi_port_remove),
+struct platform_driver ssi_port_pdriver = {
+ .probe = ssi_port_probe,
+ .remove = ssi_port_remove,
.driver = {
.name = "omap_ssi_port",
.of_match_table = omap_ssi_port_of_match,
.pm = DEV_PM_OPS,
},
};
-
-module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe);
-
-MODULE_ALIAS("platform:omap_ssi_port");
-MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
-MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
-MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 38b682bab..b6c1211b4 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -597,27 +597,55 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
static void vmbus_wait_for_unload(void)
{
- int cpu = smp_processor_id();
- void *page_addr = hv_context.synic_message_page[cpu];
- struct hv_message *msg = (struct hv_message *)page_addr +
- VMBUS_MESSAGE_SINT;
+ int cpu;
+ void *page_addr;
+ struct hv_message *msg;
struct vmbus_channel_message_header *hdr;
- bool unloaded = false;
+ u32 message_type;
+ /*
+ * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
+ * used for initial contact or to CPU0 depending on host version. When
+ * we're crashing on a different CPU let's hope that IRQ handler on
+ * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
+ * functional and vmbus_unload_response() will complete
+ * vmbus_connection.unload_event. If not, the last thing we can do is
+ * read message pages for all CPUs directly.
+ */
while (1) {
- if (READ_ONCE(msg->header.message_type) == HVMSG_NONE) {
- mdelay(10);
- continue;
- }
+ if (completion_done(&vmbus_connection.unload_event))
+ break;
- hdr = (struct vmbus_channel_message_header *)msg->u.payload;
- if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
- unloaded = true;
+ for_each_online_cpu(cpu) {
+ page_addr = hv_context.synic_message_page[cpu];
+ msg = (struct hv_message *)page_addr +
+ VMBUS_MESSAGE_SINT;
- vmbus_signal_eom(msg);
+ message_type = READ_ONCE(msg->header.message_type);
+ if (message_type == HVMSG_NONE)
+ continue;
- if (unloaded)
- break;
+ hdr = (struct vmbus_channel_message_header *)
+ msg->u.payload;
+
+ if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
+ complete(&vmbus_connection.unload_event);
+
+ vmbus_signal_eom(msg, message_type);
+ }
+
+ mdelay(10);
+ }
+
+ /*
+ * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
+ * maybe-pending messages on all CPUs to be able to receive new
+ * messages after we reconnect.
+ */
+ for_each_online_cpu(cpu) {
+ page_addr = hv_context.synic_message_page[cpu];
+ msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+ msg->header.message_type = HVMSG_NONE;
}
}
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index d02f1373d..fcf8a02dc 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -495,3 +495,4 @@ void vmbus_set_event(struct vmbus_channel *channel)
hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
}
+EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b853b4b08..df35fb7ed 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -714,7 +714,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
- if ((start_pfn >= has->end_pfn))
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
/*
* If the current hot add-request extends beyond
@@ -768,7 +768,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
- if ((start_pfn >= has->end_pfn))
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
old_covered_state = has->covered_end_pfn;
@@ -1400,6 +1400,7 @@ static void balloon_onchannelcallback(void *context)
* This is a normal hot-add request specifying
* hot-add memory.
*/
+ dm->host_specified_ha_region = false;
ha_pg_range = &ha_msg->range;
dm->ha_wrk.ha_page_range = *ha_pg_range;
dm->ha_wrk.ha_region_range.page_range = 0;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 9b9b370fe..cb1a9160a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -78,9 +78,11 @@ static void kvp_send_key(struct work_struct *dummy);
static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
static void kvp_timeout_func(struct work_struct *dummy);
+static void kvp_host_handshake_func(struct work_struct *dummy);
static void kvp_register(int);
static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
+static DECLARE_DELAYED_WORK(kvp_host_handshake_work, kvp_host_handshake_func);
static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
static const char kvp_devname[] = "vmbus/hv_kvp";
@@ -130,6 +132,11 @@ static void kvp_timeout_func(struct work_struct *dummy)
hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
+static void kvp_host_handshake_func(struct work_struct *dummy)
+{
+ hv_poll_channel(kvp_transaction.recv_channel, hv_kvp_onchannelcallback);
+}
+
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
{
switch (msg->kvp_hdr.operation) {
@@ -154,6 +161,12 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
pr_debug("KVP: userspace daemon ver. %d registered\n",
KVP_OP_REGISTER);
kvp_register(dm_reg_value);
+
+ /*
+ * If we're still negotiating with the host cancel the timeout
+ * work to not poll the channel twice.
+ */
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
return 0;
@@ -594,7 +607,22 @@ void hv_kvp_onchannelcallback(void *context)
struct icmsg_negotiate *negop = NULL;
int util_fw_version;
int kvp_srv_version;
+ static enum {NEGO_NOT_STARTED,
+ NEGO_IN_PROGRESS,
+ NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
+ if (host_negotiatied == NEGO_NOT_STARTED &&
+ kvp_transaction.state < HVUTIL_READY) {
+ /*
+ * If userspace daemon is not connected and host is asking
+ * us to negotiate we need to delay to not lose messages.
+ * This is important for Failover IP setting.
+ */
+ host_negotiatied = NEGO_IN_PROGRESS;
+ schedule_delayed_work(&kvp_host_handshake_work,
+ HV_UTIL_NEGO_TIMEOUT * HZ);
+ return;
+ }
if (kvp_transaction.state > HVUTIL_READY)
return;
@@ -672,6 +700,8 @@ void hv_kvp_onchannelcallback(void *context)
vmbus_sendpacket(channel, recv_buffer,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
+
+ host_negotiatied = NEGO_FINISHED;
}
}
@@ -708,6 +738,7 @@ hv_kvp_init(struct hv_util_service *srv)
void hv_kvp_deinit(void)
{
kvp_transaction.state = HVUTIL_DEVICE_DYING;
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work);
hvutil_transport_destroy(hvt);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 12321b93a..718b5c72f 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -36,6 +36,11 @@
#define HV_UTIL_TIMEOUT 30
/*
+ * Timeout for guest-host handshake for services.
+ */
+#define HV_UTIL_NEGO_TIMEOUT 60
+
+/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
* is set by CPUID(HVCPUID_VERSION_FEATURES).
*/
@@ -620,9 +625,21 @@ extern struct vmbus_channel_message_table_entry
channel_message_table[CHANNELMSG_COUNT];
/* Free the message slot and signal end-of-message if required */
-static inline void vmbus_signal_eom(struct hv_message *msg)
+static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
{
- msg->header.message_type = HVMSG_NONE;
+ /*
+ * On crash we're reading some other CPU's message page and we need
+ * to be careful: this other CPU may already had cleared the header
+ * and the host may already had delivered some other message there.
+ * In case we blindly write msg->header.message_type we're going
+ * to lose it. We can still lose a message of the same type but
+ * we count on the fact that there can only be one
+ * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
+ * on crash.
+ */
+ if (cmpxchg(&msg->header.message_type, old_msg_type,
+ HVMSG_NONE) != old_msg_type)
+ return;
/*
* Make sure the write to MessageType (ie set to
@@ -667,8 +684,6 @@ void vmbus_disconnect(void);
int vmbus_post_msg(void *buffer, size_t buflen);
-void vmbus_set_event(struct vmbus_channel *channel);
-
void vmbus_on_event(unsigned long data);
void vmbus_on_msg_dpc(unsigned long data);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index a40a73a7b..fe586bf74 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -33,25 +33,21 @@
void hv_begin_read(struct hv_ring_buffer_info *rbi)
{
rbi->ring_buffer->interrupt_mask = 1;
- mb();
+ virt_mb();
}
u32 hv_end_read(struct hv_ring_buffer_info *rbi)
{
- u32 read;
- u32 write;
rbi->ring_buffer->interrupt_mask = 0;
- mb();
+ virt_mb();
/*
* Now check to see if the ring buffer is still empty.
* If it is not, we raced and we need to process new
* incoming messages.
*/
- hv_get_ringbuffer_availbytes(rbi, &read, &write);
-
- return read;
+ return hv_get_bytes_to_read(rbi);
}
/*
@@ -72,69 +68,17 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
{
- mb();
- if (rbi->ring_buffer->interrupt_mask)
+ virt_mb();
+ if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
return false;
/* check interrupt_mask before read_index */
- rmb();
+ virt_rmb();
/*
* This is the only case we need to signal when the
* ring transitions from being empty to non-empty.
*/
- if (old_write == rbi->ring_buffer->read_index)
- return true;
-
- return false;
-}
-
-/*
- * To optimize the flow management on the send-side,
- * when the sender is blocked because of lack of
- * sufficient space in the ring buffer, potential the
- * consumer of the ring buffer can signal the producer.
- * This is controlled by the following parameters:
- *
- * 1. pending_send_sz: This is the size in bytes that the
- * producer is trying to send.
- * 2. The feature bit feat_pending_send_sz set to indicate if
- * the consumer of the ring will signal when the ring
- * state transitions from being full to a state where
- * there is room for the producer to send the pending packet.
- */
-
-static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
-{
- u32 cur_write_sz;
- u32 r_size;
- u32 write_loc;
- u32 read_loc = rbi->ring_buffer->read_index;
- u32 pending_sz;
-
- /*
- * Issue a full memory barrier before making the signaling decision.
- * Here is the reason for having this barrier:
- * If the reading of the pend_sz (in this function)
- * were to be reordered and read before we commit the new read
- * index (in the calling function) we could
- * have a problem. If the host were to set the pending_sz after we
- * have sampled pending_sz and go to sleep before we commit the
- * read index, we could miss sending the interrupt. Issue a full
- * memory barrier to address this.
- */
- mb();
-
- pending_sz = rbi->ring_buffer->pending_send_sz;
- write_loc = rbi->ring_buffer->write_index;
- /* If the other end is not blocked on write don't bother. */
- if (pending_sz == 0)
- return false;
-
- r_size = rbi->ring_datasize;
- cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
- read_loc - write_loc;
-
- if (cur_write_sz >= pending_sz)
+ if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
return true;
return false;
@@ -188,17 +132,9 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
u32 next_read_location)
{
ring_info->ring_buffer->read_index = next_read_location;
+ ring_info->priv_read_index = next_read_location;
}
-
-/* Get the start of the ring buffer. */
-static inline void *
-hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
-{
- return (void *)ring_info->ring_buffer->buffer;
-}
-
-
/* Get the size of the ring buffer. */
static inline u32
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
@@ -332,7 +268,6 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
{
int i = 0;
u32 bytes_avail_towrite;
- u32 bytes_avail_toread;
u32 totalbytes_towrite = 0;
u32 next_write_location;
@@ -348,9 +283,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
if (lock)
spin_lock_irqsave(&outring_info->ring_lock, flags);
- hv_get_ringbuffer_availbytes(outring_info,
- &bytes_avail_toread,
- &bytes_avail_towrite);
+ bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
/*
* If there is only room for the packet, assume it is full.
@@ -384,7 +317,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
sizeof(u64));
/* Issue a full memory barrier before updating the write index */
- mb();
+ virt_mb();
/* Now, update the write location */
hv_set_next_write_location(outring_info, next_write_location);
@@ -401,7 +334,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
void *buffer, u32 buflen, u32 *buffer_actual_len,
u64 *requestid, bool *signal, bool raw)
{
- u32 bytes_avail_towrite;
u32 bytes_avail_toread;
u32 next_read_location = 0;
u64 prev_indices = 0;
@@ -417,10 +349,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
*buffer_actual_len = 0;
*requestid = 0;
- hv_get_ringbuffer_availbytes(inring_info,
- &bytes_avail_toread,
- &bytes_avail_towrite);
-
+ bytes_avail_toread = hv_get_bytes_to_read(inring_info);
/* Make sure there is something to read */
if (bytes_avail_toread < sizeof(desc)) {
/*
@@ -464,7 +393,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
* the writer may start writing to the read area once the read index
* is updated.
*/
- mb();
+ virt_mb();
/* Update the read index */
hv_set_next_read_location(inring_info, next_read_location);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 64713ff47..952f20fdc 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -41,6 +41,7 @@
#include <linux/ptrace.h>
#include <linux/screen_info.h>
#include <linux/kdebug.h>
+#include <linux/efi.h>
#include "hyperv_vmbus.h"
static struct acpi_device *hv_acpi_dev;
@@ -101,7 +102,10 @@ static struct notifier_block hyperv_panic_block = {
.notifier_call = hyperv_panic_event,
};
+static const char *fb_mmio_name = "fb_range";
+static struct resource *fb_mmio;
struct resource *hyperv_mmio;
+DEFINE_SEMAPHORE(hyperv_mmio_lock);
static int vmbus_exists(void)
{
@@ -708,7 +712,7 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
if (dev->event_handler)
dev->event_handler(dev);
- vmbus_signal_eom(msg);
+ vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
}
void vmbus_on_msg_dpc(unsigned long data)
@@ -720,8 +724,9 @@ void vmbus_on_msg_dpc(unsigned long data)
struct vmbus_channel_message_header *hdr;
struct vmbus_channel_message_table_entry *entry;
struct onmessage_work_context *ctx;
+ u32 message_type = msg->header.message_type;
- if (msg->header.message_type == HVMSG_NONE)
+ if (message_type == HVMSG_NONE)
/* no msg */
return;
@@ -746,7 +751,7 @@ void vmbus_on_msg_dpc(unsigned long data)
entry->message_handler(hdr);
msg_handled:
- vmbus_signal_eom(msg);
+ vmbus_signal_eom(msg, message_type);
}
static void vmbus_isr(void)
@@ -1048,7 +1053,6 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
new_res->end = end;
/*
- * Stick ranges from higher in address space at the front of the list.
* If two ranges are adjacent, merge them.
*/
do {
@@ -1069,7 +1073,7 @@ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
break;
}
- if ((*old_res)->end < new_res->start) {
+ if ((*old_res)->start > new_res->end) {
new_res->sibling = *old_res;
if (prev_res)
(*prev_res)->sibling = new_res;
@@ -1091,6 +1095,12 @@ static int vmbus_acpi_remove(struct acpi_device *device)
struct resource *next_res;
if (hyperv_mmio) {
+ if (fb_mmio) {
+ __release_region(hyperv_mmio, fb_mmio->start,
+ resource_size(fb_mmio));
+ fb_mmio = NULL;
+ }
+
for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
next_res = cur_res->sibling;
kfree(cur_res);
@@ -1100,6 +1110,30 @@ static int vmbus_acpi_remove(struct acpi_device *device)
return 0;
}
+static void vmbus_reserve_fb(void)
+{
+ int size;
+ /*
+ * Make a claim for the frame buffer in the resource tree under the
+ * first node, which will be the one below 4GB. The length seems to
+ * be underreported, particularly in a Generation 1 VM. So start out
+ * reserving a larger area and make it smaller until it succeeds.
+ */
+
+ if (screen_info.lfb_base) {
+ if (efi_enabled(EFI_BOOT))
+ size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ else
+ size = max_t(__u32, screen_info.lfb_size, 0x4000000);
+
+ for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
+ fb_mmio = __request_region(hyperv_mmio,
+ screen_info.lfb_base, size,
+ fb_mmio_name, 0);
+ }
+ }
+}
+
/**
* vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
* @new: If successful, supplied a pointer to the
@@ -1128,11 +1162,33 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
resource_size_t size, resource_size_t align,
bool fb_overlap_ok)
{
- struct resource *iter;
- resource_size_t range_min, range_max, start, local_min, local_max;
+ struct resource *iter, *shadow;
+ resource_size_t range_min, range_max, start;
const char *dev_n = dev_name(&device_obj->device);
- u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
- int i;
+ int retval;
+
+ retval = -ENXIO;
+ down(&hyperv_mmio_lock);
+
+ /*
+ * If overlaps with frame buffers are allowed, then first attempt to
+ * make the allocation from within the reserved region. Because it
+ * is already reserved, no shadow allocation is necessary.
+ */
+ if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
+ !(max < fb_mmio->start)) {
+
+ range_min = fb_mmio->start;
+ range_max = fb_mmio->end;
+ start = (range_min + align - 1) & ~(align - 1);
+ for (; start + size - 1 <= range_max; start += align) {
+ *new = request_mem_region_exclusive(start, size, dev_n);
+ if (*new) {
+ retval = 0;
+ goto exit;
+ }
+ }
+ }
for (iter = hyperv_mmio; iter; iter = iter->sibling) {
if ((iter->start >= max) || (iter->end <= min))
@@ -1140,46 +1196,56 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
range_min = iter->start;
range_max = iter->end;
-
- /* If this range overlaps the frame buffer, split it into
- two tries. */
- for (i = 0; i < 2; i++) {
- local_min = range_min;
- local_max = range_max;
- if (fb_overlap_ok || (range_min >= fb_end) ||
- (range_max <= screen_info.lfb_base)) {
- i++;
- } else {
- if ((range_min <= screen_info.lfb_base) &&
- (range_max >= screen_info.lfb_base)) {
- /*
- * The frame buffer is in this window,
- * so trim this into the part that
- * preceeds the frame buffer.
- */
- local_max = screen_info.lfb_base - 1;
- range_min = fb_end;
- } else {
- range_min = fb_end;
- continue;
- }
+ start = (range_min + align - 1) & ~(align - 1);
+ for (; start + size - 1 <= range_max; start += align) {
+ shadow = __request_region(iter, start, size, NULL,
+ IORESOURCE_BUSY);
+ if (!shadow)
+ continue;
+
+ *new = request_mem_region_exclusive(start, size, dev_n);
+ if (*new) {
+ shadow->name = (char *)*new;
+ retval = 0;
+ goto exit;
}
- start = (local_min + align - 1) & ~(align - 1);
- for (; start + size - 1 <= local_max; start += align) {
- *new = request_mem_region_exclusive(start, size,
- dev_n);
- if (*new)
- return 0;
- }
+ __release_region(iter, start, size);
}
}
- return -ENXIO;
+exit:
+ up(&hyperv_mmio_lock);
+ return retval;
}
EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
/**
+ * vmbus_free_mmio() - Free a memory-mapped I/O range.
+ * @start: Base address of region to release.
+ * @size: Size of the range to be allocated
+ *
+ * This function releases anything requested by
+ * vmbus_mmio_allocate().
+ */
+void vmbus_free_mmio(resource_size_t start, resource_size_t size)
+{
+ struct resource *iter;
+
+ down(&hyperv_mmio_lock);
+ for (iter = hyperv_mmio; iter; iter = iter->sibling) {
+ if ((iter->start >= start + size) || (iter->end <= start))
+ continue;
+
+ __release_region(iter, start, size);
+ }
+ release_mem_region(start, size);
+ up(&hyperv_mmio_lock);
+
+}
+EXPORT_SYMBOL_GPL(vmbus_free_mmio);
+
+/**
* vmbus_cpu_number_to_vp_number() - Map CPU to VP.
* @cpu_number: CPU number in Linux terms
*
@@ -1219,8 +1285,10 @@ static int vmbus_acpi_add(struct acpi_device *device)
if (ACPI_FAILURE(result))
continue;
- if (hyperv_mmio)
+ if (hyperv_mmio) {
+ vmbus_reserve_fb();
break;
+ }
}
ret_val = 0;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 5c2d13a68..ff940075b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -288,7 +288,7 @@ config SENSORS_K10TEMP
config SENSORS_FAM15H_POWER
tristate "AMD Family 15h processor power"
- depends on X86 && PCI
+ depends on X86 && PCI && CPU_SUP_AMD
help
If you say yes here you get support for processor power
information of your AMD family 15h CPU.
@@ -621,7 +621,8 @@ config SENSORS_IT87
If you say yes here you get support for ITE IT8705F, IT8712F, IT8716F,
IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8732F, IT8758E,
IT8771E, IT8772E, IT8781F, IT8782F, IT8783E/F, IT8786E, IT8790E,
- IT8603E, IT8620E, and IT8623E sensor chips, and the SiS950 clone.
+ IT8603E, IT8620E, IT8623E, and IT8628E sensor chips, and the SiS950
+ clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -821,6 +822,16 @@ config SENSORS_MAX197
This driver can also be built as a module. If so, the module
will be called max197.
+config SENSORS_MAX31722
+tristate "MAX31722 temperature sensor"
+ depends on SPI
+ help
+ Support for the Maxim Integrated MAX31722/MAX31723 digital
+ thermometers/thermostats operating over an SPI interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called max31722.
+
config SENSORS_MAX6639
tristate "Maxim MAX6639 sensor chip"
depends on I2C
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 58cc3acba..2ef5b7c4c 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -112,6 +112,7 @@ obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
+obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index a9356a3de..2ac87d553 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/ctype.h>
#include <linux/i8k.h>
@@ -403,6 +404,10 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case I8K_BIOS_VERSION:
+ if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) ||
+ !isdigit(bios_version[2]))
+ return -EINVAL;
+
val = (bios_version[0] << 16) |
(bios_version[1] << 8) | bios_version[2];
break;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 952fe692d..24e395c59 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -58,7 +58,7 @@ static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
*/
static int apd = -1;
module_param(apd, bint, 0);
-MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
+MODULE_PARM_DESC(apd, "Set to zero to disable anti-parallel diode mode");
struct temperature {
s8 degrees;
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 4f695d8fc..15aa49d08 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -1,7 +1,7 @@
/*
* fam15h_power.c - AMD Family 15h processor power monitoring
*
- * Copyright (c) 2011 Advanced Micro Devices, Inc.
+ * Copyright (c) 2011-2016 Advanced Micro Devices, Inc.
* Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
*
*
@@ -25,6 +25,10 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/time.h>
+#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/msr.h>
@@ -44,8 +48,14 @@ MODULE_LICENSE("GPL");
#define FAM15H_MIN_NUM_ATTRS 2
#define FAM15H_NUM_GROUPS 2
+#define MAX_CUS 8
+/* set maximum interval as 1 second */
+#define MAX_INTERVAL 1000
+
+#define MSR_F15H_CU_PWR_ACCUMULATOR 0xc001007a
#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b
+#define MSR_F15H_PTSC 0xc0010280
#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F4 0x15b4
@@ -59,8 +69,20 @@ struct fam15h_power_data {
struct attribute_group group;
/* maximum accumulated power of a compute unit */
u64 max_cu_acc_power;
+ /* accumulated power of the compute units */
+ u64 cu_acc_power[MAX_CUS];
+ /* performance timestamp counter */
+ u64 cpu_sw_pwr_ptsc[MAX_CUS];
+ /* online/offline status of current compute unit */
+ int cu_on[MAX_CUS];
+ unsigned long power_period;
};
+static bool is_carrizo_or_later(void)
+{
+ return boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60;
+}
+
static ssize_t show_power(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -77,7 +99,7 @@ static ssize_t show_power(struct device *dev,
* On Carrizo and later platforms, TdpRunAvgAccCap bit field
* is extended to 4:31 from 4:25.
*/
- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60) {
+ if (is_carrizo_or_later()) {
running_avg_capture = val >> 4;
running_avg_capture = sign_extend32(running_avg_capture, 27);
} else {
@@ -94,7 +116,7 @@ static ssize_t show_power(struct device *dev,
* On Carrizo and later platforms, ApmTdpLimit bit field
* is extended to 16:31 from 16:28.
*/
- if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60)
+ if (is_carrizo_or_later())
tdp_limit = val >> 16;
else
tdp_limit = (val >> 16) & 0x1fff;
@@ -125,6 +147,163 @@ static ssize_t show_power_crit(struct device *dev,
}
static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL);
+static void do_read_registers_on_cu(void *_data)
+{
+ struct fam15h_power_data *data = _data;
+ int cpu, cu;
+
+ cpu = smp_processor_id();
+
+ /*
+ * With the new x86 topology modelling, cpu core id actually
+ * is compute unit id.
+ */
+ cu = cpu_data(cpu).cpu_core_id;
+
+ rdmsrl_safe(MSR_F15H_CU_PWR_ACCUMULATOR, &data->cu_acc_power[cu]);
+ rdmsrl_safe(MSR_F15H_PTSC, &data->cpu_sw_pwr_ptsc[cu]);
+
+ data->cu_on[cu] = 1;
+}
+
+/*
+ * This function is only able to be called when CPUID
+ * Fn8000_0007:EDX[12] is set.
+ */
+static int read_registers(struct fam15h_power_data *data)
+{
+ int core, this_core;
+ cpumask_var_t mask;
+ int ret, cpu;
+
+ ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ memset(data->cu_on, 0, sizeof(int) * MAX_CUS);
+
+ get_online_cpus();
+
+ /*
+ * Choose the first online core of each compute unit, and then
+ * read their MSR value of power and ptsc in a single IPI,
+ * because the MSR value of CPU core represent the compute
+ * unit's.
+ */
+ core = -1;
+
+ for_each_online_cpu(cpu) {
+ this_core = topology_core_id(cpu);
+
+ if (this_core == core)
+ continue;
+
+ core = this_core;
+
+ /* get any CPU on this compute unit */
+ cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask);
+ }
+
+ on_each_cpu_mask(mask, do_read_registers_on_cu, data, true);
+
+ put_online_cpus();
+ free_cpumask_var(mask);
+
+ return 0;
+}
+
+static ssize_t acc_show_power(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fam15h_power_data *data = dev_get_drvdata(dev);
+ u64 prev_cu_acc_power[MAX_CUS], prev_ptsc[MAX_CUS],
+ jdelta[MAX_CUS];
+ u64 tdelta, avg_acc;
+ int cu, cu_num, ret;
+ signed long leftover;
+
+ /*
+ * With the new x86 topology modelling, x86_max_cores is the
+ * compute unit number.
+ */
+ cu_num = boot_cpu_data.x86_max_cores;
+
+ ret = read_registers(data);
+ if (ret)
+ return 0;
+
+ for (cu = 0; cu < cu_num; cu++) {
+ prev_cu_acc_power[cu] = data->cu_acc_power[cu];
+ prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
+ }
+
+ leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
+ if (leftover)
+ return 0;
+
+ ret = read_registers(data);
+ if (ret)
+ return 0;
+
+ for (cu = 0, avg_acc = 0; cu < cu_num; cu++) {
+ /* check if current compute unit is online */
+ if (data->cu_on[cu] == 0)
+ continue;
+
+ if (data->cu_acc_power[cu] < prev_cu_acc_power[cu]) {
+ jdelta[cu] = data->max_cu_acc_power + data->cu_acc_power[cu];
+ jdelta[cu] -= prev_cu_acc_power[cu];
+ } else {
+ jdelta[cu] = data->cu_acc_power[cu] - prev_cu_acc_power[cu];
+ }
+ tdelta = data->cpu_sw_pwr_ptsc[cu] - prev_ptsc[cu];
+ jdelta[cu] *= data->cpu_pwr_sample_ratio * 1000;
+ do_div(jdelta[cu], tdelta);
+
+ /* the unit is microWatt */
+ avg_acc += jdelta[cu];
+ }
+
+ return sprintf(buf, "%llu\n", (unsigned long long)avg_acc);
+}
+static DEVICE_ATTR(power1_average, S_IRUGO, acc_show_power, NULL);
+
+static ssize_t acc_show_power_period(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fam15h_power_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%lu\n", data->power_period);
+}
+
+static ssize_t acc_set_power_period(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fam15h_power_data *data = dev_get_drvdata(dev);
+ unsigned long temp;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &temp);
+ if (ret)
+ return ret;
+
+ if (temp > MAX_INTERVAL)
+ return -EINVAL;
+
+ /* the interval value should be greater than 0 */
+ if (temp <= 0)
+ return -EINVAL;
+
+ data->power_period = temp;
+
+ return count;
+}
+static DEVICE_ATTR(power1_average_interval, S_IRUGO | S_IWUSR,
+ acc_show_power_period, acc_set_power_period);
+
static int fam15h_power_init_attrs(struct pci_dev *pdev,
struct fam15h_power_data *data)
{
@@ -137,6 +316,10 @@ static int fam15h_power_init_attrs(struct pci_dev *pdev,
(c->x86_model >= 0x60 && c->x86_model <= 0x7f)))
n += 1;
+ /* check if processor supports accumulated power */
+ if (boot_cpu_has(X86_FEATURE_ACC_POWER))
+ n += 2;
+
fam15h_power_attrs = devm_kcalloc(&pdev->dev, n,
sizeof(*fam15h_power_attrs),
GFP_KERNEL);
@@ -151,6 +334,11 @@ static int fam15h_power_init_attrs(struct pci_dev *pdev,
(c->x86_model >= 0x60 && c->x86_model <= 0x7f)))
fam15h_power_attrs[n++] = &dev_attr_power1_input.attr;
+ if (boot_cpu_has(X86_FEATURE_ACC_POWER)) {
+ fam15h_power_attrs[n++] = &dev_attr_power1_average.attr;
+ fam15h_power_attrs[n++] = &dev_attr_power1_average_interval.attr;
+ }
+
data->group.attrs = fam15h_power_attrs;
return 0;
@@ -216,7 +404,7 @@ static int fam15h_power_resume(struct pci_dev *pdev)
static int fam15h_power_init_data(struct pci_dev *f4,
struct fam15h_power_data *data)
{
- u32 val, eax, ebx, ecx, edx;
+ u32 val;
u64 tmp;
int ret;
@@ -243,10 +431,9 @@ static int fam15h_power_init_data(struct pci_dev *f4,
if (ret)
return ret;
- cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
/* CPUID Fn8000_0007:EDX[12] indicates to support accumulated power */
- if (!(edx & BIT(12)))
+ if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
return 0;
/*
@@ -254,7 +441,7 @@ static int fam15h_power_init_data(struct pci_dev *f4,
* sample period to the PTSC counter period by executing CPUID
* Fn8000_0007:ECX
*/
- data->cpu_pwr_sample_ratio = ecx;
+ data->cpu_pwr_sample_ratio = cpuid_ecx(0x80000007);
if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &tmp)) {
pr_err("Failed to read max compute unit power accumulator MSR\n");
@@ -263,7 +450,15 @@ static int fam15h_power_init_data(struct pci_dev *f4,
data->max_cu_acc_power = tmp;
- return 0;
+ /*
+ * Milliseconds are a reasonable interval for the measurement.
+ * But it shouldn't set too long here, because several seconds
+ * would cause the read function to hang. So set default
+ * interval as 10 ms.
+ */
+ data->power_period = 10;
+
+ return read_registers(data);
}
static int fam15h_power_probe(struct pci_dev *pdev,
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1896e26df..730d84028 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -13,6 +13,7 @@
* Supports: IT8603E Super I/O chip w/LPC interface
* IT8620E Super I/O chip w/LPC interface
* IT8623E Super I/O chip w/LPC interface
+ * IT8628E Super I/O chip w/LPC interface
* IT8705F Super I/O chip w/LPC interface
* IT8712F Super I/O chip w/LPC interface
* IT8716F Super I/O chip w/LPC interface
@@ -44,14 +45,11 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -72,17 +70,18 @@
enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8732,
it8771, it8772, it8781, it8782, it8783, it8786, it8790, it8603,
- it8620 };
+ it8620, it8628 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static struct platform_device *pdev;
+static struct platform_device *it87_pdev[2];
+
+#define REG_2E 0x2e /* The register to read/write */
+#define REG_4E 0x4e /* Secondary register to read/write */
-#define REG 0x2e /* The register to read/write */
#define DEV 0x07 /* Register: Logical device select */
-#define VAL 0x2f /* The value to read/write */
#define PME 0x04 /* The device with the fan registers in it */
/* The device with the IT8718F/IT8720F VID value in it */
@@ -91,54 +90,55 @@ static struct platform_device *pdev;
#define DEVID 0x20 /* Register: Device ID */
#define DEVREV 0x22 /* Register: Device Revision */
-static inline int superio_inb(int reg)
+static inline int superio_inb(int ioreg, int reg)
{
- outb(reg, REG);
- return inb(VAL);
+ outb(reg, ioreg);
+ return inb(ioreg + 1);
}
-static inline void superio_outb(int reg, int val)
+static inline void superio_outb(int ioreg, int reg, int val)
{
- outb(reg, REG);
- outb(val, VAL);
+ outb(reg, ioreg);
+ outb(val, ioreg + 1);
}
-static int superio_inw(int reg)
+static int superio_inw(int ioreg, int reg)
{
int val;
- outb(reg++, REG);
- val = inb(VAL) << 8;
- outb(reg, REG);
- val |= inb(VAL);
+
+ outb(reg++, ioreg);
+ val = inb(ioreg + 1) << 8;
+ outb(reg, ioreg);
+ val |= inb(ioreg + 1);
return val;
}
-static inline void superio_select(int ldn)
+static inline void superio_select(int ioreg, int ldn)
{
- outb(DEV, REG);
- outb(ldn, VAL);
+ outb(DEV, ioreg);
+ outb(ldn, ioreg + 1);
}
-static inline int superio_enter(void)
+static inline int superio_enter(int ioreg)
{
/*
- * Try to reserve REG and REG + 1 for exclusive access.
+ * Try to reserve ioreg and ioreg + 1 for exclusive access.
*/
- if (!request_muxed_region(REG, 2, DRVNAME))
+ if (!request_muxed_region(ioreg, 2, DRVNAME))
return -EBUSY;
- outb(0x87, REG);
- outb(0x01, REG);
- outb(0x55, REG);
- outb(0x55, REG);
+ outb(0x87, ioreg);
+ outb(0x01, ioreg);
+ outb(0x55, ioreg);
+ outb(ioreg == REG_4E ? 0xaa : 0x55, ioreg);
return 0;
}
-static inline void superio_exit(void)
+static inline void superio_exit(int ioreg)
{
- outb(0x02, REG);
- outb(0x02, VAL);
- release_region(REG, 2);
+ outb(0x02, ioreg);
+ outb(0x02, ioreg + 1);
+ release_region(ioreg, 2);
}
/* Logical device 4 registers */
@@ -161,6 +161,7 @@ static inline void superio_exit(void)
#define IT8603E_DEVID 0x8603
#define IT8620E_DEVID 0x8620
#define IT8623E_DEVID 0x8623
+#define IT8628E_DEVID 0x8628
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -168,6 +169,7 @@ static inline void superio_exit(void)
#define IT87_SIO_GPIO1_REG 0x25
#define IT87_SIO_GPIO2_REG 0x26
#define IT87_SIO_GPIO3_REG 0x27
+#define IT87_SIO_GPIO4_REG 0x28
#define IT87_SIO_GPIO5_REG 0x29
#define IT87_SIO_PINX1_REG 0x2a /* Pin selection */
#define IT87_SIO_PINX2_REG 0x2c /* Pin selection */
@@ -217,7 +219,12 @@ static bool fix_pwm_polarity;
#define IT87_REG_FAN_DIV 0x0b
#define IT87_REG_FAN_16BIT 0x0c
-/* Monitors: 9 voltage (0 to 7, battery), 3 temp (1 to 3), 3 fan (1 to 3) */
+/*
+ * Monitors:
+ * - up to 13 voltage (0 to 7, battery, avcc, 10 to 12)
+ * - up to 6 temp (1 to 6)
+ * - up to 6 fan (1 to 6)
+ */
static const u8 IT87_REG_FAN[] = { 0x0d, 0x0e, 0x0f, 0x80, 0x82, 0x4c };
static const u8 IT87_REG_FAN_MIN[] = { 0x10, 0x11, 0x12, 0x84, 0x86, 0x4e };
@@ -227,10 +234,12 @@ static const u8 IT87_REG_TEMP_OFFSET[] = { 0x56, 0x57, 0x59 };
#define IT87_REG_FAN_MAIN_CTRL 0x13
#define IT87_REG_FAN_CTL 0x14
-#define IT87_REG_PWM(nr) (0x15 + (nr))
-#define IT87_REG_PWM_DUTY(nr) (0x63 + (nr) * 8)
+static const u8 IT87_REG_PWM[] = { 0x15, 0x16, 0x17, 0x7f, 0xa7, 0xaf };
+static const u8 IT87_REG_PWM_DUTY[] = { 0x63, 0x6b, 0x73, 0x7b, 0xa3, 0xab };
+
+static const u8 IT87_REG_VIN[] = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26,
+ 0x27, 0x28, 0x2f, 0x2c, 0x2d, 0x2e };
-#define IT87_REG_VIN(nr) (0x20 + (nr))
#define IT87_REG_TEMP(nr) (0x29 + (nr))
#define IT87_REG_VIN_MAX(nr) (0x30 + (nr) * 2)
@@ -245,30 +254,48 @@ static const u8 IT87_REG_TEMP_OFFSET[] = { 0x56, 0x57, 0x59 };
#define IT87_REG_CHIPID 0x58
-#define IT87_REG_AUTO_TEMP(nr, i) (0x60 + (nr) * 8 + (i))
-#define IT87_REG_AUTO_PWM(nr, i) (0x65 + (nr) * 8 + (i))
+static const u8 IT87_REG_AUTO_BASE[] = { 0x60, 0x68, 0x70, 0x78, 0xa0, 0xa8 };
+
+#define IT87_REG_AUTO_TEMP(nr, i) (IT87_REG_AUTO_BASE[nr] + (i))
+#define IT87_REG_AUTO_PWM(nr, i) (IT87_REG_AUTO_BASE[nr] + 5 + (i))
+
+#define IT87_REG_TEMP456_ENABLE 0x77
+
+#define NUM_VIN ARRAY_SIZE(IT87_REG_VIN)
+#define NUM_VIN_LIMIT 8
+#define NUM_TEMP 6
+#define NUM_TEMP_OFFSET ARRAY_SIZE(IT87_REG_TEMP_OFFSET)
+#define NUM_TEMP_LIMIT 3
+#define NUM_FAN ARRAY_SIZE(IT87_REG_FAN)
+#define NUM_FAN_DIV 3
+#define NUM_PWM ARRAY_SIZE(IT87_REG_PWM)
+#define NUM_AUTO_PWM ARRAY_SIZE(IT87_REG_PWM)
struct it87_devices {
const char *name;
const char * const suffix;
- u16 features;
+ u32 features;
u8 peci_mask;
u8 old_peci_mask;
};
-#define FEAT_12MV_ADC (1 << 0)
-#define FEAT_NEWER_AUTOPWM (1 << 1)
-#define FEAT_OLD_AUTOPWM (1 << 2)
-#define FEAT_16BIT_FANS (1 << 3)
-#define FEAT_TEMP_OFFSET (1 << 4)
-#define FEAT_TEMP_PECI (1 << 5)
-#define FEAT_TEMP_OLD_PECI (1 << 6)
-#define FEAT_FAN16_CONFIG (1 << 7) /* Need to enable 16-bit fans */
-#define FEAT_FIVE_FANS (1 << 8) /* Supports five fans */
-#define FEAT_VID (1 << 9) /* Set if chip supports VID */
-#define FEAT_IN7_INTERNAL (1 << 10) /* Set if in7 is internal */
-#define FEAT_SIX_FANS (1 << 11) /* Supports six fans */
-#define FEAT_10_9MV_ADC (1 << 12)
+#define FEAT_12MV_ADC BIT(0)
+#define FEAT_NEWER_AUTOPWM BIT(1)
+#define FEAT_OLD_AUTOPWM BIT(2)
+#define FEAT_16BIT_FANS BIT(3)
+#define FEAT_TEMP_OFFSET BIT(4)
+#define FEAT_TEMP_PECI BIT(5)
+#define FEAT_TEMP_OLD_PECI BIT(6)
+#define FEAT_FAN16_CONFIG BIT(7) /* Need to enable 16-bit fans */
+#define FEAT_FIVE_FANS BIT(8) /* Supports five fans */
+#define FEAT_VID BIT(9) /* Set if chip supports VID */
+#define FEAT_IN7_INTERNAL BIT(10) /* Set if in7 is internal */
+#define FEAT_SIX_FANS BIT(11) /* Supports six fans */
+#define FEAT_10_9MV_ADC BIT(12)
+#define FEAT_AVCC3 BIT(13) /* Chip supports in9/AVCC3 */
+#define FEAT_SIX_PWM BIT(14) /* Chip supports 6 pwm chn */
+#define FEAT_PWM_FREQ2 BIT(15) /* Separate pwm freq 2 */
+#define FEAT_SIX_TEMP BIT(16) /* Up to 6 temp sensors */
static const struct it87_devices it87_devices[] = {
[it87] = {
@@ -286,20 +313,22 @@ static const struct it87_devices it87_devices[] = {
.name = "it8716",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
- | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+ | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_PWM_FREQ2,
},
[it8718] = {
.name = "it8718",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS
+ | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8720] = {
.name = "it8720",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS
+ | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8721] = {
@@ -307,7 +336,8 @@ static const struct it87_devices it87_devices[] = {
.suffix = "F",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
- | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_IN7_INTERNAL,
+ | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
.peci_mask = 0x05,
.old_peci_mask = 0x02, /* Actually reports PCH */
},
@@ -316,7 +346,7 @@ static const struct it87_devices it87_devices[] = {
.suffix = "F",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_FIVE_FANS
- | FEAT_IN7_INTERNAL,
+ | FEAT_IN7_INTERNAL | FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
[it8732] = {
@@ -332,7 +362,8 @@ static const struct it87_devices it87_devices[] = {
.name = "it8771",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
/* PECI: guesswork */
/* 12mV ADC (OHM) */
/* 16 bit fans (OHM) */
@@ -343,7 +374,8 @@ static const struct it87_devices it87_devices[] = {
.name = "it8772",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
/* PECI (coreboot) */
/* 12mV ADC (HWSensors4, OHM) */
/* 16 bit fans (HWSensors4, OHM) */
@@ -354,42 +386,45 @@ static const struct it87_devices it87_devices[] = {
.name = "it8781",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8782] = {
.name = "it8782",
.suffix = "F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8783] = {
.name = "it8783",
.suffix = "E/F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
- | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG,
+ | FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8786] = {
.name = "it8786",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
[it8790] = {
.name = "it8790",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
[it8603] = {
.name = "it8603",
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
- | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL,
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
+ | FEAT_AVCC3 | FEAT_PWM_FREQ2,
.peci_mask = 0x07,
},
[it8620] = {
@@ -397,7 +432,17 @@ static const struct it87_devices it87_devices[] = {
.suffix = "E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
- | FEAT_IN7_INTERNAL,
+ | FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
+ | FEAT_SIX_TEMP,
+ .peci_mask = 0x07,
+ },
+ [it8628] = {
+ .name = "it8628",
+ .suffix = "E",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
+ | FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
+ | FEAT_SIX_TEMP,
.peci_mask = 0x07,
},
};
@@ -409,16 +454,20 @@ static const struct it87_devices it87_devices[] = {
#define has_old_autopwm(data) ((data)->features & FEAT_OLD_AUTOPWM)
#define has_temp_offset(data) ((data)->features & FEAT_TEMP_OFFSET)
#define has_temp_peci(data, nr) (((data)->features & FEAT_TEMP_PECI) && \
- ((data)->peci_mask & (1 << nr)))
+ ((data)->peci_mask & BIT(nr)))
#define has_temp_old_peci(data, nr) \
(((data)->features & FEAT_TEMP_OLD_PECI) && \
- ((data)->old_peci_mask & (1 << nr)))
+ ((data)->old_peci_mask & BIT(nr)))
#define has_fan16_config(data) ((data)->features & FEAT_FAN16_CONFIG)
#define has_five_fans(data) ((data)->features & (FEAT_FIVE_FANS | \
FEAT_SIX_FANS))
#define has_vid(data) ((data)->features & FEAT_VID)
#define has_in7_internal(data) ((data)->features & FEAT_IN7_INTERNAL)
#define has_six_fans(data) ((data)->features & FEAT_SIX_FANS)
+#define has_avcc3(data) ((data)->features & FEAT_AVCC3)
+#define has_six_pwm(data) ((data)->features & FEAT_SIX_PWM)
+#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
+#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
struct it87_sio_data {
enum chips type;
@@ -440,7 +489,7 @@ struct it87_sio_data {
* The structure is dynamically allocated.
*/
struct it87_data {
- struct device *hwmon_dev;
+ const struct attribute_group *groups[7];
enum chips type;
u16 features;
u8 peci_mask;
@@ -453,17 +502,21 @@ struct it87_data {
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
- u8 in[10][3]; /* [nr][0]=in, [1]=min, [2]=max */
+ u16 in_internal; /* Bitfield, internal sensors (for labels) */
+ u16 has_in; /* Bitfield, voltage sensors enabled */
+ u8 in[NUM_VIN][3]; /* [nr][0]=in, [1]=min, [2]=max */
u8 has_fan; /* Bitfield, fans enabled */
- u16 fan[6][2]; /* Register values, [nr][0]=fan, [1]=min */
+ u16 fan[NUM_FAN][2]; /* Register values, [nr][0]=fan, [1]=min */
u8 has_temp; /* Bitfield, temp sensors enabled */
- s8 temp[3][4]; /* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
+ s8 temp[NUM_TEMP][4]; /* [nr][0]=temp, [1]=min, [2]=max, [3]=offset */
u8 sensor; /* Register value (IT87_REG_TEMP_ENABLE) */
u8 extra; /* Register value (IT87_REG_TEMP_EXTRA) */
- u8 fan_div[3]; /* Register encoding, shifted right */
+ u8 fan_div[NUM_FAN_DIV];/* Register encoding, shifted right */
+ bool has_vid; /* True if VID supported */
u8 vid; /* Register encoding, combined */
u8 vrm;
u32 alarms; /* Register encoding, combined */
+ bool has_beep; /* true if beep supported */
u8 beeps; /* Register encoding */
u8 fan_main_ctrl; /* Register value */
u8 fan_ctl; /* Register value */
@@ -478,13 +531,14 @@ struct it87_data {
* is no longer needed, but it is still done to keep the driver
* simple.
*/
- u8 pwm_ctrl[3]; /* Register value */
- u8 pwm_duty[3]; /* Manual PWM value set by user */
- u8 pwm_temp_map[3]; /* PWM to temp. chan. mapping (bits 1-0) */
+ u8 has_pwm; /* Bitfield, pwm control enabled */
+ u8 pwm_ctrl[NUM_PWM]; /* Register value */
+ u8 pwm_duty[NUM_PWM]; /* Manual PWM value set by user */
+ u8 pwm_temp_map[NUM_PWM];/* PWM to temp. chan. mapping (bits 1-0) */
/* Automatic fan speed control registers */
- u8 auto_pwm[3][4]; /* [nr][3] is hard-coded */
- s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
+ u8 auto_pwm[NUM_AUTO_PWM][4]; /* [nr][3] is hard-coded */
+ s8 auto_temp[NUM_AUTO_PWM][5]; /* [nr][0] is point1_temp_hyst */
};
static int adc_lsb(const struct it87_data *data, int nr)
@@ -497,7 +551,7 @@ static int adc_lsb(const struct it87_data *data, int nr)
lsb = 109;
else
lsb = 160;
- if (data->in_scaled & (1 << nr))
+ if (data->in_scaled & BIT(nr))
lsb <<= 1;
return lsb;
}
@@ -554,15 +608,16 @@ static int pwm_from_reg(const struct it87_data *data, u8 reg)
return (reg & 0x7f) << 1;
}
-
static int DIV_TO_REG(int val)
{
int answer = 0;
+
while (answer < 7 && (val >>= 1))
answer++;
return answer;
}
-#define DIV_FROM_REG(val) (1 << (val))
+
+#define DIV_FROM_REG(val) BIT(val)
/*
* PWM base frequencies. The frequency has to be divided by either 128 or 256,
@@ -585,32 +640,204 @@ static const unsigned int pwm_freq[8] = {
750000,
};
-static int it87_probe(struct platform_device *pdev);
-static int it87_remove(struct platform_device *pdev);
+/*
+ * Must be called with data->update_lock held, except during initialization.
+ * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
+ * would slow down the IT87 access and should not be necessary.
+ */
+static int it87_read_value(struct it87_data *data, u8 reg)
+{
+ outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+ return inb_p(data->addr + IT87_DATA_REG_OFFSET);
+}
-static int it87_read_value(struct it87_data *data, u8 reg);
-static void it87_write_value(struct it87_data *data, u8 reg, u8 value);
-static struct it87_data *it87_update_device(struct device *dev);
-static int it87_check_pwm(struct device *dev);
-static void it87_init_device(struct platform_device *pdev);
+/*
+ * Must be called with data->update_lock held, except during initialization.
+ * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
+ * would slow down the IT87 access and should not be necessary.
+ */
+static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
+{
+ outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
+ outb_p(value, data->addr + IT87_DATA_REG_OFFSET);
+}
+static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
+{
+ data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM[nr]);
+ if (has_newer_autopwm(data)) {
+ data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
+ data->pwm_duty[nr] = it87_read_value(data,
+ IT87_REG_PWM_DUTY[nr]);
+ } else {
+ if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
+ data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
+ else /* Manual mode */
+ data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
+ }
-static struct platform_driver it87_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = it87_probe,
- .remove = it87_remove,
-};
+ if (has_old_autopwm(data)) {
+ int i;
+
+ for (i = 0; i < 5 ; i++)
+ data->auto_temp[nr][i] = it87_read_value(data,
+ IT87_REG_AUTO_TEMP(nr, i));
+ for (i = 0; i < 3 ; i++)
+ data->auto_pwm[nr][i] = it87_read_value(data,
+ IT87_REG_AUTO_PWM(nr, i));
+ } else if (has_newer_autopwm(data)) {
+ int i;
+
+ /*
+ * 0: temperature hysteresis (base + 5)
+ * 1: fan off temperature (base + 0)
+ * 2: fan start temperature (base + 1)
+ * 3: fan max temperature (base + 2)
+ */
+ data->auto_temp[nr][0] =
+ it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 5));
+
+ for (i = 0; i < 3 ; i++)
+ data->auto_temp[nr][i + 1] =
+ it87_read_value(data,
+ IT87_REG_AUTO_TEMP(nr, i));
+ /*
+ * 0: start pwm value (base + 3)
+ * 1: pwm slope (base + 4, 1/8th pwm)
+ */
+ data->auto_pwm[nr][0] =
+ it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 3));
+ data->auto_pwm[nr][1] =
+ it87_read_value(data, IT87_REG_AUTO_TEMP(nr, 4));
+ }
+}
+
+static struct it87_data *it87_update_device(struct device *dev)
+{
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ + HZ / 2) ||
+ !data->valid) {
+ if (update_vbat) {
+ /*
+ * Cleared after each update, so reenable. Value
+ * returned by this read will be previous value
+ */
+ it87_write_value(data, IT87_REG_CONFIG,
+ it87_read_value(data, IT87_REG_CONFIG) | 0x40);
+ }
+ for (i = 0; i < NUM_VIN; i++) {
+ if (!(data->has_in & BIT(i)))
+ continue;
+
+ data->in[i][0] =
+ it87_read_value(data, IT87_REG_VIN[i]);
+
+ /* VBAT and AVCC don't have limit registers */
+ if (i >= NUM_VIN_LIMIT)
+ continue;
+
+ data->in[i][1] =
+ it87_read_value(data, IT87_REG_VIN_MIN(i));
+ data->in[i][2] =
+ it87_read_value(data, IT87_REG_VIN_MAX(i));
+ }
+
+ for (i = 0; i < NUM_FAN; i++) {
+ /* Skip disabled fans */
+ if (!(data->has_fan & BIT(i)))
+ continue;
+
+ data->fan[i][1] =
+ it87_read_value(data, IT87_REG_FAN_MIN[i]);
+ data->fan[i][0] = it87_read_value(data,
+ IT87_REG_FAN[i]);
+ /* Add high byte if in 16-bit mode */
+ if (has_16bit_fans(data)) {
+ data->fan[i][0] |= it87_read_value(data,
+ IT87_REG_FANX[i]) << 8;
+ data->fan[i][1] |= it87_read_value(data,
+ IT87_REG_FANX_MIN[i]) << 8;
+ }
+ }
+ for (i = 0; i < NUM_TEMP; i++) {
+ if (!(data->has_temp & BIT(i)))
+ continue;
+ data->temp[i][0] =
+ it87_read_value(data, IT87_REG_TEMP(i));
+
+ if (has_temp_offset(data) && i < NUM_TEMP_OFFSET)
+ data->temp[i][3] =
+ it87_read_value(data,
+ IT87_REG_TEMP_OFFSET[i]);
+
+ if (i >= NUM_TEMP_LIMIT)
+ continue;
+
+ data->temp[i][1] =
+ it87_read_value(data, IT87_REG_TEMP_LOW(i));
+ data->temp[i][2] =
+ it87_read_value(data, IT87_REG_TEMP_HIGH(i));
+ }
+
+ /* Newer chips don't have clock dividers */
+ if ((data->has_fan & 0x07) && !has_16bit_fans(data)) {
+ i = it87_read_value(data, IT87_REG_FAN_DIV);
+ data->fan_div[0] = i & 0x07;
+ data->fan_div[1] = (i >> 3) & 0x07;
+ data->fan_div[2] = (i & 0x40) ? 3 : 1;
+ }
+
+ data->alarms =
+ it87_read_value(data, IT87_REG_ALARM1) |
+ (it87_read_value(data, IT87_REG_ALARM2) << 8) |
+ (it87_read_value(data, IT87_REG_ALARM3) << 16);
+ data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+
+ data->fan_main_ctrl = it87_read_value(data,
+ IT87_REG_FAN_MAIN_CTRL);
+ data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
+ for (i = 0; i < NUM_PWM; i++) {
+ if (!(data->has_pwm & BIT(i)))
+ continue;
+ it87_update_pwm_ctrl(data, i);
+ }
+
+ data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
+ data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
+ /*
+ * The IT8705F does not have VID capability.
+ * The IT8718F and later don't use IT87_REG_VID for the
+ * same purpose.
+ */
+ if (data->type == it8712 || data->type == it8716) {
+ data->vid = it87_read_value(data, IT87_REG_VID);
+ /*
+ * The older IT8712F revisions had only 5 VID pins,
+ * but we assume it is always safe to read 6 bits.
+ */
+ data->vid &= 0x3f;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- int nr = sattr->nr;
+ struct it87_data *data = it87_update_device(dev);
int index = sattr->index;
+ int nr = sattr->nr;
- struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%d\n", in_from_reg(data, nr, data->in[nr][index]));
}
@@ -618,10 +845,9 @@ static ssize_t set_in(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
- int nr = sattr->nr;
- int index = sattr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int index = sattr->index;
+ int nr = sattr->nr;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
@@ -687,8 +913,11 @@ static SENSOR_DEVICE_ATTR_2(in7_max, S_IRUGO | S_IWUSR, show_in, set_in,
static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 8, 0);
static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 9, 0);
+static SENSOR_DEVICE_ATTR_2(in10_input, S_IRUGO, show_in, NULL, 10, 0);
+static SENSOR_DEVICE_ATTR_2(in11_input, S_IRUGO, show_in, NULL, 11, 0);
+static SENSOR_DEVICE_ATTR_2(in12_input, S_IRUGO, show_in, NULL, 12, 0);
-/* 3 temperatures */
+/* Up to 6 temperatures */
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -761,6 +990,9 @@ static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp,
2, 2);
static SENSOR_DEVICE_ATTR_2(temp3_offset, S_IRUGO | S_IWUSR, show_temp,
set_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 3, 0);
+static SENSOR_DEVICE_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 4, 0);
+static SENSOR_DEVICE_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 5, 0);
static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -771,8 +1003,8 @@ static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
u8 reg = data->sensor; /* In case value is updated while used */
u8 extra = data->extra;
- if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1))
- || (has_temp_old_peci(data, nr) && (extra & 0x80)))
+ if ((has_temp_peci(data, nr) && (reg >> 6 == nr + 1)) ||
+ (has_temp_old_peci(data, nr) && (extra & 0x80)))
return sprintf(buf, "6\n"); /* Intel PECI */
if (reg & (1 << nr))
return sprintf(buf, "3\n"); /* thermal diode */
@@ -837,18 +1069,19 @@ static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR, show_temp_type,
static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR, show_temp_type,
set_temp_type, 2);
-/* 3 Fans */
+/* 6 Fans */
static int pwm_mode(const struct it87_data *data, int nr)
{
- int ctrl = data->fan_main_ctrl & (1 << nr);
-
- if (ctrl == 0 && data->type != it8603) /* Full speed */
- return 0;
- if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
- return 2;
- else /* Manual mode */
- return 1;
+ if (data->type != it8603 && nr < 3 && !(data->fan_main_ctrl & BIT(nr)))
+ return 0; /* Full speed */
+ if (data->pwm_ctrl[nr] & 0x80)
+ return 2; /* Automatic mode */
+ if ((data->type == it8603 || nr >= 3) &&
+ data->pwm_duty[nr] == pwm_to_reg(data, 0xff))
+ return 0; /* Full speed */
+
+ return 1; /* Manual mode */
}
static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
@@ -868,39 +1101,49 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
}
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ struct it87_data *data = it87_update_device(dev);
int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
+ return sprintf(buf, "%lu\n", DIV_FROM_REG(data->fan_div[nr]));
}
+
static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ struct it87_data *data = it87_update_device(dev);
int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%d\n", pwm_mode(data, nr));
}
+
static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ struct it87_data *data = it87_update_device(dev);
int nr = sensor_attr->index;
- struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%d\n",
pwm_from_reg(data, data->pwm_duty[nr]));
}
+
static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct it87_data *data = it87_update_device(dev);
- int index = (data->fan_ctl >> 4) & 0x07;
+ int nr = sensor_attr->index;
unsigned int freq;
+ int index;
+
+ if (has_pwm_freq2(data) && nr == 1)
+ index = (data->extra >> 4) & 0x07;
+ else
+ index = (data->fan_ctl >> 4) & 0x07;
freq = pwm_freq[index] / (has_newer_autopwm(data) ? 256 : 128);
@@ -953,12 +1196,11 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *attr,
}
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
unsigned long val;
int min;
u8 old;
@@ -1013,6 +1255,11 @@ static int check_trip_points(struct device *dev, int nr)
if (data->auto_pwm[nr][i] > data->auto_pwm[nr][i + 1])
err = -EINVAL;
}
+ } else if (has_newer_autopwm(data)) {
+ for (i = 1; i < 3; i++) {
+ if (data->auto_temp[nr][i] > data->auto_temp[nr][i + 1])
+ err = -EINVAL;
+ }
}
if (err) {
@@ -1023,13 +1270,12 @@ static int check_trip_points(struct device *dev, int nr)
return err;
}
-static ssize_t set_pwm_enable(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
long val;
if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 2)
@@ -1041,21 +1287,30 @@ static ssize_t set_pwm_enable(struct device *dev,
return -EINVAL;
}
- /* IT8603E does not have on/off mode */
- if (val == 0 && data->type == it8603)
- return -EINVAL;
-
mutex_lock(&data->update_lock);
if (val == 0) {
- int tmp;
- /* make sure the fan is on when in on/off mode */
- tmp = it87_read_value(data, IT87_REG_FAN_CTL);
- it87_write_value(data, IT87_REG_FAN_CTL, tmp | (1 << nr));
- /* set on/off mode */
- data->fan_main_ctrl &= ~(1 << nr);
- it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
- data->fan_main_ctrl);
+ if (nr < 3 && data->type != it8603) {
+ int tmp;
+ /* make sure the fan is on when in on/off mode */
+ tmp = it87_read_value(data, IT87_REG_FAN_CTL);
+ it87_write_value(data, IT87_REG_FAN_CTL, tmp | BIT(nr));
+ /* set on/off mode */
+ data->fan_main_ctrl &= ~BIT(nr);
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+ data->fan_main_ctrl);
+ } else {
+ /* No on/off mode, set maximum pwm value */
+ data->pwm_duty[nr] = pwm_to_reg(data, 0xff);
+ it87_write_value(data, IT87_REG_PWM_DUTY[nr],
+ data->pwm_duty[nr]);
+ /* and set manual mode */
+ data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
+ data->pwm_temp_map[nr] :
+ data->pwm_duty[nr];
+ it87_write_value(data, IT87_REG_PWM[nr],
+ data->pwm_ctrl[nr]);
+ }
} else {
if (val == 1) /* Manual mode */
data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
@@ -1063,11 +1318,11 @@ static ssize_t set_pwm_enable(struct device *dev,
data->pwm_duty[nr];
else /* Automatic mode */
data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
- it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
+ it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
- if (data->type != it8603) {
+ if (data->type != it8603 && nr < 3) {
/* set SmartGuardian mode */
- data->fan_main_ctrl |= (1 << nr);
+ data->fan_main_ctrl |= BIT(nr);
it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
data->fan_main_ctrl);
}
@@ -1076,13 +1331,13 @@ static ssize_t set_pwm_enable(struct device *dev,
mutex_unlock(&data->update_lock);
return count;
}
+
static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
long val;
if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
@@ -1099,7 +1354,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
return -EBUSY;
}
data->pwm_duty[nr] = pwm_to_reg(data, val);
- it87_write_value(data, IT87_REG_PWM_DUTY(nr),
+ it87_write_value(data, IT87_REG_PWM_DUTY[nr],
data->pwm_duty[nr]);
} else {
data->pwm_duty[nr] = pwm_to_reg(data, val);
@@ -1109,17 +1364,20 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
*/
if (!(data->pwm_ctrl[nr] & 0x80)) {
data->pwm_ctrl[nr] = data->pwm_duty[nr];
- it87_write_value(data, IT87_REG_PWM(nr),
+ it87_write_value(data, IT87_REG_PWM[nr],
data->pwm_ctrl[nr]);
}
}
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t set_pwm_freq(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+
+static ssize_t set_pwm_freq(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
unsigned long val;
int i;
@@ -1131,63 +1389,66 @@ static ssize_t set_pwm_freq(struct device *dev,
/* Search for the nearest available frequency */
for (i = 0; i < 7; i++) {
- if (val > (pwm_freq[i] + pwm_freq[i+1]) / 2)
+ if (val > (pwm_freq[i] + pwm_freq[i + 1]) / 2)
break;
}
mutex_lock(&data->update_lock);
- data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL) & 0x8f;
- data->fan_ctl |= i << 4;
- it87_write_value(data, IT87_REG_FAN_CTL, data->fan_ctl);
+ if (nr == 0) {
+ data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL) & 0x8f;
+ data->fan_ctl |= i << 4;
+ it87_write_value(data, IT87_REG_FAN_CTL, data->fan_ctl);
+ } else {
+ data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x8f;
+ data->extra |= i << 4;
+ it87_write_value(data, IT87_REG_TEMP_EXTRA, data->extra);
+ }
mutex_unlock(&data->update_lock);
return count;
}
+
static ssize_t show_pwm_temp_map(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = it87_update_device(dev);
+ int nr = sensor_attr->index;
int map;
- if (data->pwm_temp_map[nr] < 3)
- map = 1 << data->pwm_temp_map[nr];
- else
- map = 0; /* Should never happen */
- return sprintf(buf, "%d\n", map);
+ map = data->pwm_temp_map[nr];
+ if (map >= 3)
+ map = 0; /* Should never happen */
+ if (nr >= 3) /* pwm channels 3..6 map to temp4..6 */
+ map += 3;
+
+ return sprintf(buf, "%d\n", (int)BIT(map));
}
+
static ssize_t set_pwm_temp_map(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- int nr = sensor_attr->index;
-
struct it87_data *data = dev_get_drvdata(dev);
+ int nr = sensor_attr->index;
long val;
u8 reg;
- /*
- * This check can go away if we ever support automatic fan speed
- * control on newer chips.
- */
- if (!has_old_autopwm(data)) {
- dev_notice(dev, "Mapping change disabled for safety reasons\n");
- return -EINVAL;
- }
-
if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
+ if (nr >= 3)
+ val -= 3;
+
switch (val) {
- case (1 << 0):
+ case BIT(0):
reg = 0x00;
break;
- case (1 << 1):
+ case BIT(1):
reg = 0x01;
break;
- case (1 << 2):
+ case BIT(2):
reg = 0x02;
break;
default:
@@ -1202,14 +1463,14 @@ static ssize_t set_pwm_temp_map(struct device *dev,
*/
if (data->pwm_ctrl[nr] & 0x80) {
data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
- it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
+ it87_write_value(data, IT87_REG_PWM[nr], data->pwm_ctrl[nr]);
}
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_auto_pwm(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_auto_pwm(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
@@ -1221,14 +1482,15 @@ static ssize_t show_auto_pwm(struct device *dev,
pwm_from_reg(data, data->auto_pwm[nr][point]));
}
-static ssize_t set_auto_pwm(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_auto_pwm(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int point = sensor_attr->index;
+ int regaddr;
long val;
if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
@@ -1236,26 +1498,65 @@ static ssize_t set_auto_pwm(struct device *dev,
mutex_lock(&data->update_lock);
data->auto_pwm[nr][point] = pwm_to_reg(data, val);
- it87_write_value(data, IT87_REG_AUTO_PWM(nr, point),
- data->auto_pwm[nr][point]);
+ if (has_newer_autopwm(data))
+ regaddr = IT87_REG_AUTO_TEMP(nr, 3);
+ else
+ regaddr = IT87_REG_AUTO_PWM(nr, point);
+ it87_write_value(data, regaddr, data->auto_pwm[nr][point]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_auto_temp(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t show_auto_pwm_slope(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct it87_data *data = it87_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+
+ return sprintf(buf, "%d\n", data->auto_pwm[nr][1] & 0x7f);
+}
+
+static ssize_t set_auto_pwm_slope(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct it87_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ unsigned long val;
+
+ if (kstrtoul(buf, 10, &val) < 0 || val > 127)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->auto_pwm[nr][1] = (data->auto_pwm[nr][1] & 0x80) | val;
+ it87_write_value(data, IT87_REG_AUTO_TEMP(nr, 4),
+ data->auto_pwm[nr][1]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_auto_temp(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int point = sensor_attr->index;
+ int reg;
+
+ if (has_old_autopwm(data) || point)
+ reg = data->auto_temp[nr][point];
+ else
+ reg = data->auto_temp[nr][1] - (data->auto_temp[nr][0] & 0x1f);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->auto_temp[nr][point]));
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(reg));
}
-static ssize_t set_auto_temp(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t set_auto_temp(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr =
@@ -1263,14 +1564,24 @@ static ssize_t set_auto_temp(struct device *dev,
int nr = sensor_attr->nr;
int point = sensor_attr->index;
long val;
+ int reg;
if (kstrtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
return -EINVAL;
mutex_lock(&data->update_lock);
- data->auto_temp[nr][point] = TEMP_TO_REG(val);
- it87_write_value(data, IT87_REG_AUTO_TEMP(nr, point),
- data->auto_temp[nr][point]);
+ if (has_newer_autopwm(data) && !point) {
+ reg = data->auto_temp[nr][1] - TEMP_TO_REG(val);
+ reg = clamp_val(reg, 0, 0x1f) | (data->auto_temp[nr][0] & 0xe0);
+ data->auto_temp[nr][0] = reg;
+ it87_write_value(data, IT87_REG_AUTO_TEMP(nr, 5), reg);
+ } else {
+ reg = TEMP_TO_REG(val);
+ data->auto_temp[nr][point] = reg;
+ if (has_newer_autopwm(data))
+ point--;
+ it87_write_value(data, IT87_REG_AUTO_TEMP(nr, point), reg);
+ }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1308,8 +1619,9 @@ static SENSOR_DEVICE_ATTR_2(fan6_min, S_IRUGO | S_IWUSR, show_fan, set_fan,
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
show_pwm_enable, set_pwm_enable, 0);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 0);
-static DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq, set_pwm_freq);
-static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm1_freq, S_IRUGO | S_IWUSR, show_pwm_freq,
+ set_pwm_freq, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO,
show_pwm_temp_map, set_pwm_temp_map, 0);
static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR,
show_auto_pwm, set_auto_pwm, 0, 0);
@@ -1329,12 +1641,16 @@ static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 0, 3);
static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 0, 4);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 0, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 0);
static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
show_pwm_enable, set_pwm_enable, 1);
static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 1);
-static DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, NULL);
-static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm2_freq, S_IRUGO, show_pwm_freq, set_pwm_freq, 1);
+static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IRUGO,
show_pwm_temp_map, set_pwm_temp_map, 1);
static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR,
show_auto_pwm, set_auto_pwm, 1, 0);
@@ -1354,12 +1670,16 @@ static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 1, 3);
static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 1, 4);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 1, 0);
+static SENSOR_DEVICE_ATTR(pwm2_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 1);
static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
show_pwm_enable, set_pwm_enable, 2);
static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 2);
-static DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL);
-static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(pwm3_freq, S_IRUGO, show_pwm_freq, NULL, 2);
+static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IRUGO,
show_pwm_temp_map, set_pwm_temp_map, 2);
static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR,
show_auto_pwm, set_auto_pwm, 2, 0);
@@ -1379,30 +1699,94 @@ static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 2, 3);
static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_temp, S_IRUGO | S_IWUSR,
show_auto_temp, set_auto_temp, 2, 4);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 2, 0);
+static SENSOR_DEVICE_ATTR(pwm3_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 2);
+
+static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 3);
+static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 3);
+static SENSOR_DEVICE_ATTR(pwm4_freq, S_IRUGO, show_pwm_freq, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm4_auto_channels_temp, S_IRUGO,
+ show_pwm_temp_map, set_pwm_temp_map, 3);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm4_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 3, 0);
+static SENSOR_DEVICE_ATTR(pwm4_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 3);
+
+static SENSOR_DEVICE_ATTR(pwm5_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 4);
+static SENSOR_DEVICE_ATTR(pwm5, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 4);
+static SENSOR_DEVICE_ATTR(pwm5_freq, S_IRUGO, show_pwm_freq, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm5_auto_channels_temp, S_IRUGO,
+ show_pwm_temp_map, set_pwm_temp_map, 4);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm5_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 4, 0);
+static SENSOR_DEVICE_ATTR(pwm5_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 4);
+
+static SENSOR_DEVICE_ATTR(pwm6_enable, S_IRUGO | S_IWUSR,
+ show_pwm_enable, set_pwm_enable, 5);
+static SENSOR_DEVICE_ATTR(pwm6, S_IRUGO | S_IWUSR, show_pwm, set_pwm, 5);
+static SENSOR_DEVICE_ATTR(pwm6_freq, S_IRUGO, show_pwm_freq, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm6_auto_channels_temp, S_IRUGO,
+ show_pwm_temp_map, set_pwm_temp_map, 5);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point1_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point2_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 2);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_point3_temp, S_IRUGO | S_IWUSR,
+ show_auto_temp, set_auto_temp, 2, 3);
+static SENSOR_DEVICE_ATTR_2(pwm6_auto_start, S_IRUGO | S_IWUSR,
+ show_auto_pwm, set_auto_pwm, 5, 0);
+static SENSOR_DEVICE_ATTR(pwm6_auto_slope, S_IRUGO | S_IWUSR,
+ show_auto_pwm_slope, set_auto_pwm_slope, 5);
/* Alarms */
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
+
return sprintf(buf, "%u\n", data->alarms);
}
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- int bitnr = to_sensor_dev_attr(attr)->index;
struct it87_data *data = it87_update_device(dev);
+ int bitnr = to_sensor_dev_attr(attr)->index;
+
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
-static ssize_t clear_intrusion(struct device *dev, struct device_attribute
- *attr, const char *buf, size_t count)
+static ssize_t clear_intrusion(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
- long val;
int config;
+ long val;
if (kstrtol(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
@@ -1412,7 +1796,7 @@ static ssize_t clear_intrusion(struct device *dev, struct device_attribute
if (config < 0) {
count = config;
} else {
- config |= 1 << 5;
+ config |= BIT(5);
it87_write_value(data, IT87_REG_CONFIG, config);
/* Invalidate cache to force re-read */
data->valid = 0;
@@ -1443,29 +1827,30 @@ static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
show_alarm, clear_intrusion, 4);
static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
- int bitnr = to_sensor_dev_attr(attr)->index;
struct it87_data *data = it87_update_device(dev);
+ int bitnr = to_sensor_dev_attr(attr)->index;
+
return sprintf(buf, "%u\n", (data->beeps >> bitnr) & 1);
}
+
static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (kstrtol(buf, 10, &val) < 0
- || (val != 0 && val != 1))
+ if (kstrtol(buf, 10, &val) < 0 || (val != 0 && val != 1))
return -EINVAL;
mutex_lock(&data->update_lock);
data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
if (val)
- data->beeps |= (1 << bitnr);
+ data->beeps |= BIT(bitnr);
else
- data->beeps &= ~(1 << bitnr);
+ data->beeps &= ~BIT(bitnr);
it87_write_value(data, IT87_REG_BEEP_ENABLE, data->beeps);
mutex_unlock(&data->update_lock);
return count;
@@ -1493,13 +1878,15 @@ static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO, show_beep, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO, show_beep, NULL, 2);
static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct it87_data *data = dev_get_drvdata(dev);
+
return sprintf(buf, "%u\n", data->vrm);
}
+
static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
@@ -1514,15 +1901,16 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
+
+ return sprintf(buf, "%ld\n", (long)vid_from_reg(data->vid, data->vrm));
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid_reg, NULL);
static ssize_t show_label(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
static const char * const labels[] = {
"+5V",
@@ -1548,227 +1936,348 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 2);
-/* special AVCC3 IT8603E in9 */
+/* AVCC3 */
static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 0);
-static ssize_t show_name(struct device *dev, struct device_attribute
- *devattr, char *buf)
+static umode_t it87_in_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
struct it87_data *data = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", data->name);
+ int i = index / 5; /* voltage index */
+ int a = index % 5; /* attribute index */
+
+ if (index >= 40) { /* in8 and higher only have input attributes */
+ i = index - 40 + 8;
+ a = 0;
+ }
+
+ if (!(data->has_in & BIT(i)))
+ return 0;
+
+ if (a == 4 && !data->has_beep)
+ return 0;
+
+ return attr->mode;
}
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static struct attribute *it87_attributes_in[10][5] = {
-{
+static struct attribute *it87_attributes_in[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in0_beep.dev_attr.attr, /* 4 */
+
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in1_beep.dev_attr.attr, /* 9 */
+
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in2_beep.dev_attr.attr, /* 14 */
+
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in3_beep.dev_attr.attr, /* 19 */
+
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in4_beep.dev_attr.attr, /* 24 */
+
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in5_beep.dev_attr.attr, /* 29 */
+
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in6_min.dev_attr.attr,
&sensor_dev_attr_in6_max.dev_attr.attr,
&sensor_dev_attr_in6_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_in6_beep.dev_attr.attr, /* 34 */
+
&sensor_dev_attr_in7_input.dev_attr.attr,
&sensor_dev_attr_in7_min.dev_attr.attr,
&sensor_dev_attr_in7_max.dev_attr.attr,
&sensor_dev_attr_in7_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_in8_input.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_in9_input.dev_attr.attr,
- NULL
-} };
-
-static const struct attribute_group it87_group_in[10] = {
- { .attrs = it87_attributes_in[0] },
- { .attrs = it87_attributes_in[1] },
- { .attrs = it87_attributes_in[2] },
- { .attrs = it87_attributes_in[3] },
- { .attrs = it87_attributes_in[4] },
- { .attrs = it87_attributes_in[5] },
- { .attrs = it87_attributes_in[6] },
- { .attrs = it87_attributes_in[7] },
- { .attrs = it87_attributes_in[8] },
- { .attrs = it87_attributes_in[9] },
+ &sensor_dev_attr_in7_beep.dev_attr.attr, /* 39 */
+
+ &sensor_dev_attr_in8_input.dev_attr.attr, /* 40 */
+ &sensor_dev_attr_in9_input.dev_attr.attr, /* 41 */
+ &sensor_dev_attr_in10_input.dev_attr.attr, /* 41 */
+ &sensor_dev_attr_in11_input.dev_attr.attr, /* 41 */
+ &sensor_dev_attr_in12_input.dev_attr.attr, /* 41 */
};
-static struct attribute *it87_attributes_temp[3][6] = {
+static const struct attribute_group it87_group_in = {
+ .attrs = it87_attributes_in,
+ .is_visible = it87_in_is_visible,
+};
+
+static umode_t it87_temp_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i = index / 7; /* temperature index */
+ int a = index % 7; /* attribute index */
+
+ if (index >= 21) {
+ i = index - 21 + 3;
+ a = 0;
+ }
+
+ if (!(data->has_temp & BIT(i)))
+ return 0;
+
+ if (a == 5 && !has_temp_offset(data))
+ return 0;
+
+ if (a == 6 && !data->has_beep)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute *it87_attributes_temp[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_type.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
- NULL
-} , {
- &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_offset.dev_attr.attr, /* 5 */
+ &sensor_dev_attr_temp1_beep.dev_attr.attr, /* 6 */
+
+ &sensor_dev_attr_temp2_input.dev_attr.attr, /* 7 */
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_type.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
- NULL
-} , {
- &sensor_dev_attr_temp3_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
+ &sensor_dev_attr_temp2_beep.dev_attr.attr,
+
+ &sensor_dev_attr_temp3_input.dev_attr.attr, /* 14 */
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_type.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
- NULL
-} };
+ &sensor_dev_attr_temp3_offset.dev_attr.attr,
+ &sensor_dev_attr_temp3_beep.dev_attr.attr,
-static const struct attribute_group it87_group_temp[3] = {
- { .attrs = it87_attributes_temp[0] },
- { .attrs = it87_attributes_temp[1] },
- { .attrs = it87_attributes_temp[2] },
+ &sensor_dev_attr_temp4_input.dev_attr.attr, /* 21 */
+ &sensor_dev_attr_temp5_input.dev_attr.attr,
+ &sensor_dev_attr_temp6_input.dev_attr.attr,
+ NULL
};
-static struct attribute *it87_attributes_temp_offset[] = {
- &sensor_dev_attr_temp1_offset.dev_attr.attr,
- &sensor_dev_attr_temp2_offset.dev_attr.attr,
- &sensor_dev_attr_temp3_offset.dev_attr.attr,
+static const struct attribute_group it87_group_temp = {
+ .attrs = it87_attributes_temp,
+ .is_visible = it87_temp_is_visible,
};
+static umode_t it87_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+
+ if ((index == 2 || index == 3) && !data->has_vid)
+ return 0;
+
+ if (index > 3 && !(data->in_internal & BIT(index - 4)))
+ return 0;
+
+ return attr->mode;
+}
+
static struct attribute *it87_attributes[] = {
&dev_attr_alarms.attr,
&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
- &dev_attr_name.attr,
+ &dev_attr_vrm.attr, /* 2 */
+ &dev_attr_cpu0_vid.attr, /* 3 */
+ &sensor_dev_attr_in3_label.dev_attr.attr, /* 4 .. 7 */
+ &sensor_dev_attr_in7_label.dev_attr.attr,
+ &sensor_dev_attr_in8_label.dev_attr.attr,
+ &sensor_dev_attr_in9_label.dev_attr.attr,
NULL
};
static const struct attribute_group it87_group = {
.attrs = it87_attributes,
+ .is_visible = it87_is_visible,
};
-static struct attribute *it87_attributes_in_beep[] = {
- &sensor_dev_attr_in0_beep.dev_attr.attr,
- &sensor_dev_attr_in1_beep.dev_attr.attr,
- &sensor_dev_attr_in2_beep.dev_attr.attr,
- &sensor_dev_attr_in3_beep.dev_attr.attr,
- &sensor_dev_attr_in4_beep.dev_attr.attr,
- &sensor_dev_attr_in5_beep.dev_attr.attr,
- &sensor_dev_attr_in6_beep.dev_attr.attr,
- &sensor_dev_attr_in7_beep.dev_attr.attr,
- NULL,
- NULL,
-};
+static umode_t it87_fan_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i = index / 5; /* fan index */
+ int a = index % 5; /* attribute index */
-static struct attribute *it87_attributes_temp_beep[] = {
- &sensor_dev_attr_temp1_beep.dev_attr.attr,
- &sensor_dev_attr_temp2_beep.dev_attr.attr,
- &sensor_dev_attr_temp3_beep.dev_attr.attr,
-};
+ if (index >= 15) { /* fan 4..6 don't have divisor attributes */
+ i = (index - 15) / 4 + 3;
+ a = (index - 15) % 4;
+ }
-static struct attribute *it87_attributes_fan[6][3+1] = { {
+ if (!(data->has_fan & BIT(i)))
+ return 0;
+
+ if (a == 3) { /* beep */
+ if (!data->has_beep)
+ return 0;
+ /* first fan beep attribute is writable */
+ if (i == __ffs(data->has_fan))
+ return attr->mode | S_IWUSR;
+ }
+
+ if (a == 4 && has_16bit_fans(data)) /* divisor */
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute *it87_attributes_fan[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_fan1_beep.dev_attr.attr, /* 3 */
+ &sensor_dev_attr_fan1_div.dev_attr.attr, /* 4 */
+
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
- NULL
-}, {
+ &sensor_dev_attr_fan2_beep.dev_attr.attr,
+ &sensor_dev_attr_fan2_div.dev_attr.attr, /* 9 */
+
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan4_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_beep.dev_attr.attr,
+ &sensor_dev_attr_fan3_div.dev_attr.attr, /* 14 */
+
+ &sensor_dev_attr_fan4_input.dev_attr.attr, /* 15 */
&sensor_dev_attr_fan4_min.dev_attr.attr,
&sensor_dev_attr_fan4_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan5_input.dev_attr.attr,
+ &sensor_dev_attr_fan4_beep.dev_attr.attr,
+
+ &sensor_dev_attr_fan5_input.dev_attr.attr, /* 19 */
&sensor_dev_attr_fan5_min.dev_attr.attr,
&sensor_dev_attr_fan5_alarm.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_fan6_input.dev_attr.attr,
+ &sensor_dev_attr_fan5_beep.dev_attr.attr,
+
+ &sensor_dev_attr_fan6_input.dev_attr.attr, /* 23 */
&sensor_dev_attr_fan6_min.dev_attr.attr,
&sensor_dev_attr_fan6_alarm.dev_attr.attr,
+ &sensor_dev_attr_fan6_beep.dev_attr.attr,
NULL
-} };
-
-static const struct attribute_group it87_group_fan[6] = {
- { .attrs = it87_attributes_fan[0] },
- { .attrs = it87_attributes_fan[1] },
- { .attrs = it87_attributes_fan[2] },
- { .attrs = it87_attributes_fan[3] },
- { .attrs = it87_attributes_fan[4] },
- { .attrs = it87_attributes_fan[5] },
};
-static const struct attribute *it87_attributes_fan_div[] = {
- &sensor_dev_attr_fan1_div.dev_attr.attr,
- &sensor_dev_attr_fan2_div.dev_attr.attr,
- &sensor_dev_attr_fan3_div.dev_attr.attr,
+static const struct attribute_group it87_group_fan = {
+ .attrs = it87_attributes_fan,
+ .is_visible = it87_fan_is_visible,
};
-static struct attribute *it87_attributes_pwm[3][4+1] = { {
+static umode_t it87_pwm_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i = index / 4; /* pwm index */
+ int a = index % 4; /* attribute index */
+
+ if (!(data->has_pwm & BIT(i)))
+ return 0;
+
+ /* pwmX_auto_channels_temp is only writable if auto pwm is supported */
+ if (a == 3 && (has_old_autopwm(data) || has_newer_autopwm(data)))
+ return attr->mode | S_IWUSR;
+
+ /* pwm2_freq is writable if there are two pwm frequency selects */
+ if (has_pwm_freq2(data) && i == 1 && a == 2)
+ return attr->mode | S_IWUSR;
+
+ return attr->mode;
+}
+
+static struct attribute *it87_attributes_pwm[] = {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
- &dev_attr_pwm1_freq.attr,
+ &sensor_dev_attr_pwm1_freq.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
- NULL
-}, {
+
&sensor_dev_attr_pwm2_enable.dev_attr.attr,
&sensor_dev_attr_pwm2.dev_attr.attr,
- &dev_attr_pwm2_freq.attr,
+ &sensor_dev_attr_pwm2_freq.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
- NULL
-}, {
+
&sensor_dev_attr_pwm3_enable.dev_attr.attr,
&sensor_dev_attr_pwm3.dev_attr.attr,
- &dev_attr_pwm3_freq.attr,
+ &sensor_dev_attr_pwm3_freq.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm4_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm4.dev_attr.attr,
+ &sensor_dev_attr_pwm4_freq.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_channels_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm5_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm5.dev_attr.attr,
+ &sensor_dev_attr_pwm5_freq.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_channels_temp.dev_attr.attr,
+
+ &sensor_dev_attr_pwm6_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm6.dev_attr.attr,
+ &sensor_dev_attr_pwm6_freq.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_channels_temp.dev_attr.attr,
+
NULL
-} };
+};
-static const struct attribute_group it87_group_pwm[3] = {
- { .attrs = it87_attributes_pwm[0] },
- { .attrs = it87_attributes_pwm[1] },
- { .attrs = it87_attributes_pwm[2] },
+static const struct attribute_group it87_group_pwm = {
+ .attrs = it87_attributes_pwm,
+ .is_visible = it87_pwm_is_visible,
};
-static struct attribute *it87_attributes_autopwm[3][9+1] = { {
+static umode_t it87_auto_pwm_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct it87_data *data = dev_get_drvdata(dev);
+ int i = index / 11; /* pwm index */
+ int a = index % 11; /* attribute index */
+
+ if (index >= 33) { /* pwm 4..6 */
+ i = (index - 33) / 6 + 3;
+ a = (index - 33) % 6 + 4;
+ }
+
+ if (!(data->has_pwm & BIT(i)))
+ return 0;
+
+ if (has_newer_autopwm(data)) {
+ if (a < 4) /* no auto point pwm */
+ return 0;
+ if (a == 8) /* no auto_point4 */
+ return 0;
+ }
+ if (has_old_autopwm(data)) {
+ if (a >= 9) /* no pwm_auto_start, pwm_auto_slope */
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static struct attribute *it87_attributes_auto_pwm[] = {
&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
@@ -1778,9 +2287,10 @@ static struct attribute *it87_attributes_autopwm[3][9+1] = { {
&sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr, /* 11 */
&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
@@ -1789,9 +2299,10 @@ static struct attribute *it87_attributes_autopwm[3][9+1] = { {
&sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
- NULL
-}, {
- &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr, /* 22 */
&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point3_pwm.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point4_pwm.dev_attr.attr,
@@ -1800,61 +2311,53 @@ static struct attribute *it87_attributes_autopwm[3][9+1] = { {
&sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr,
- NULL
-} };
-
-static const struct attribute_group it87_group_autopwm[3] = {
- { .attrs = it87_attributes_autopwm[0] },
- { .attrs = it87_attributes_autopwm[1] },
- { .attrs = it87_attributes_autopwm[2] },
-};
+ &sensor_dev_attr_pwm3_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm4_auto_point1_temp.dev_attr.attr, /* 33 */
+ &sensor_dev_attr_pwm4_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm4_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm5_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm5_auto_slope.dev_attr.attr,
+
+ &sensor_dev_attr_pwm6_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_start.dev_attr.attr,
+ &sensor_dev_attr_pwm6_auto_slope.dev_attr.attr,
-static struct attribute *it87_attributes_fan_beep[] = {
- &sensor_dev_attr_fan1_beep.dev_attr.attr,
- &sensor_dev_attr_fan2_beep.dev_attr.attr,
- &sensor_dev_attr_fan3_beep.dev_attr.attr,
- &sensor_dev_attr_fan4_beep.dev_attr.attr,
- &sensor_dev_attr_fan5_beep.dev_attr.attr,
- &sensor_dev_attr_fan6_beep.dev_attr.attr,
-};
-
-static struct attribute *it87_attributes_vid[] = {
- &dev_attr_vrm.attr,
- &dev_attr_cpu0_vid.attr,
- NULL
-};
-
-static const struct attribute_group it87_group_vid = {
- .attrs = it87_attributes_vid,
-};
-
-static struct attribute *it87_attributes_label[] = {
- &sensor_dev_attr_in3_label.dev_attr.attr,
- &sensor_dev_attr_in7_label.dev_attr.attr,
- &sensor_dev_attr_in8_label.dev_attr.attr,
- &sensor_dev_attr_in9_label.dev_attr.attr,
- NULL
+ NULL,
};
-static const struct attribute_group it87_group_label = {
- .attrs = it87_attributes_label,
+static const struct attribute_group it87_group_auto_pwm = {
+ .attrs = it87_attributes_auto_pwm,
+ .is_visible = it87_auto_pwm_is_visible,
};
/* SuperIO detection - will change isa_address if a chip is found */
-static int __init it87_find(unsigned short *address,
- struct it87_sio_data *sio_data)
+static int __init it87_find(int sioaddr, unsigned short *address,
+ struct it87_sio_data *sio_data)
{
int err;
u16 chip_type;
const char *board_vendor, *board_name;
const struct it87_devices *config;
- err = superio_enter();
+ err = superio_enter(sioaddr);
if (err)
return err;
err = -ENODEV;
- chip_type = force_id ? force_id : superio_inw(DEVID);
+ chip_type = force_id ? force_id : superio_inw(sioaddr, DEVID);
switch (chip_type) {
case IT8705F_DEVID:
@@ -1910,6 +2413,9 @@ static int __init it87_find(unsigned short *address,
case IT8620E_DEVID:
sio_data->type = it8620;
break;
+ case IT8628E_DEVID:
+ sio_data->type = it8628;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -1917,20 +2423,20 @@ static int __init it87_find(unsigned short *address,
goto exit;
}
- superio_select(PME);
- if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
+ superio_select(sioaddr, PME);
+ if (!(superio_inb(sioaddr, IT87_ACT_REG) & 0x01)) {
pr_info("Device not activated, skipping\n");
goto exit;
}
- *address = superio_inw(IT87_BASE_REG) & ~(IT87_EXTENT - 1);
+ *address = superio_inw(sioaddr, IT87_BASE_REG) & ~(IT87_EXTENT - 1);
if (*address == 0) {
pr_info("Base address not set, skipping\n");
goto exit;
}
err = 0;
- sio_data->revision = superio_inb(DEVREV) & 0x0f;
+ sio_data->revision = superio_inb(sioaddr, DEVREV) & 0x0f;
pr_info("Found IT%04x%s chip at 0x%x, revision %d\n", chip_type,
it87_devices[sio_data->type].suffix,
*address, sio_data->revision);
@@ -1939,14 +2445,19 @@ static int __init it87_find(unsigned short *address,
/* in7 (VSB or VCCH5V) is always internal on some chips */
if (has_in7_internal(config))
- sio_data->internal |= (1 << 1);
+ sio_data->internal |= BIT(1);
/* in8 (Vbat) is always internal */
- sio_data->internal |= (1 << 2);
+ sio_data->internal |= BIT(2);
- /* Only the IT8603E has in9 */
- if (sio_data->type != it8603)
- sio_data->skip_in |= (1 << 9);
+ /* in9 (AVCC3), always internal if supported */
+ if (has_avcc3(config))
+ sio_data->internal |= BIT(3); /* in9 is AVCC */
+ else
+ sio_data->skip_in |= BIT(9);
+
+ if (!has_six_pwm(config))
+ sio_data->skip_pwm |= BIT(3) | BIT(4) | BIT(5);
if (!has_vid(config))
sio_data->skip_vid = 1;
@@ -1954,45 +2465,46 @@ static int __init it87_find(unsigned short *address,
/* Read GPIO config and VID value from LDN 7 (GPIO) */
if (sio_data->type == it87) {
/* The IT8705F has a different LD number for GPIO */
- superio_select(5);
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ superio_select(sioaddr, 5);
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else if (sio_data->type == it8783) {
int reg25, reg27, reg2a, reg2c, regef;
- superio_select(GPIO);
+ superio_select(sioaddr, GPIO);
- reg25 = superio_inb(IT87_SIO_GPIO1_REG);
- reg27 = superio_inb(IT87_SIO_GPIO3_REG);
- reg2a = superio_inb(IT87_SIO_PINX1_REG);
- reg2c = superio_inb(IT87_SIO_PINX2_REG);
- regef = superio_inb(IT87_SIO_SPI_REG);
+ reg25 = superio_inb(sioaddr, IT87_SIO_GPIO1_REG);
+ reg27 = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+ reg2a = superio_inb(sioaddr, IT87_SIO_PINX1_REG);
+ reg2c = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
+ regef = superio_inb(sioaddr, IT87_SIO_SPI_REG);
/* Check if fan3 is there or not */
- if ((reg27 & (1 << 0)) || !(reg2c & (1 << 2)))
- sio_data->skip_fan |= (1 << 2);
- if ((reg25 & (1 << 4))
- || (!(reg2a & (1 << 1)) && (regef & (1 << 0))))
- sio_data->skip_pwm |= (1 << 2);
+ if ((reg27 & BIT(0)) || !(reg2c & BIT(2)))
+ sio_data->skip_fan |= BIT(2);
+ if ((reg25 & BIT(4)) ||
+ (!(reg2a & BIT(1)) && (regef & BIT(0))))
+ sio_data->skip_pwm |= BIT(2);
/* Check if fan2 is there or not */
- if (reg27 & (1 << 7))
- sio_data->skip_fan |= (1 << 1);
- if (reg27 & (1 << 3))
- sio_data->skip_pwm |= (1 << 1);
+ if (reg27 & BIT(7))
+ sio_data->skip_fan |= BIT(1);
+ if (reg27 & BIT(3))
+ sio_data->skip_pwm |= BIT(1);
/* VIN5 */
- if ((reg27 & (1 << 0)) || (reg2c & (1 << 2)))
- sio_data->skip_in |= (1 << 5); /* No VIN5 */
+ if ((reg27 & BIT(0)) || (reg2c & BIT(2)))
+ sio_data->skip_in |= BIT(5); /* No VIN5 */
/* VIN6 */
- if (reg27 & (1 << 1))
- sio_data->skip_in |= (1 << 6); /* No VIN6 */
+ if (reg27 & BIT(1))
+ sio_data->skip_in |= BIT(6); /* No VIN6 */
/*
* VIN7
* Does not depend on bit 2 of Reg2C, contrary to datasheet.
*/
- if (reg27 & (1 << 2)) {
+ if (reg27 & BIT(2)) {
/*
* The data sheet is a bit unclear regarding the
* internal voltage divider for VCCH5V. It says
@@ -2006,81 +2518,121 @@ static int __init it87_find(unsigned short *address,
* not the case, and ask the user to report if the
* resulting voltage is sane.
*/
- if (!(reg2c & (1 << 1))) {
- reg2c |= (1 << 1);
- superio_outb(IT87_SIO_PINX2_REG, reg2c);
+ if (!(reg2c & BIT(1))) {
+ reg2c |= BIT(1);
+ superio_outb(sioaddr, IT87_SIO_PINX2_REG,
+ reg2c);
pr_notice("Routing internal VCCH5V to in7.\n");
}
pr_notice("in7 routed to internal voltage divider, with external pin disabled.\n");
pr_notice("Please report if it displays a reasonable voltage.\n");
}
- if (reg2c & (1 << 0))
- sio_data->internal |= (1 << 0);
- if (reg2c & (1 << 1))
- sio_data->internal |= (1 << 1);
+ if (reg2c & BIT(0))
+ sio_data->internal |= BIT(0);
+ if (reg2c & BIT(1))
+ sio_data->internal |= BIT(1);
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else if (sio_data->type == it8603) {
int reg27, reg29;
- superio_select(GPIO);
+ superio_select(sioaddr, GPIO);
- reg27 = superio_inb(IT87_SIO_GPIO3_REG);
+ reg27 = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
/* Check if fan3 is there or not */
- if (reg27 & (1 << 6))
- sio_data->skip_pwm |= (1 << 2);
- if (reg27 & (1 << 7))
- sio_data->skip_fan |= (1 << 2);
+ if (reg27 & BIT(6))
+ sio_data->skip_pwm |= BIT(2);
+ if (reg27 & BIT(7))
+ sio_data->skip_fan |= BIT(2);
/* Check if fan2 is there or not */
- reg29 = superio_inb(IT87_SIO_GPIO5_REG);
- if (reg29 & (1 << 1))
- sio_data->skip_pwm |= (1 << 1);
- if (reg29 & (1 << 2))
- sio_data->skip_fan |= (1 << 1);
-
- sio_data->skip_in |= (1 << 5); /* No VIN5 */
- sio_data->skip_in |= (1 << 6); /* No VIN6 */
-
- sio_data->internal |= (1 << 3); /* in9 is AVCC */
-
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
- } else if (sio_data->type == it8620) {
+ reg29 = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+ if (reg29 & BIT(1))
+ sio_data->skip_pwm |= BIT(1);
+ if (reg29 & BIT(2))
+ sio_data->skip_fan |= BIT(1);
+
+ sio_data->skip_in |= BIT(5); /* No VIN5 */
+ sio_data->skip_in |= BIT(6); /* No VIN6 */
+
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ } else if (sio_data->type == it8620 || sio_data->type == it8628) {
int reg;
- superio_select(GPIO);
+ superio_select(sioaddr, GPIO);
+
+ /* Check for pwm5 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO1_REG);
+ if (reg & BIT(6))
+ sio_data->skip_pwm |= BIT(4);
/* Check for fan4, fan5 */
- reg = superio_inb(IT87_SIO_GPIO2_REG);
- if (!(reg & (1 << 5)))
- sio_data->skip_fan |= (1 << 3);
- if (!(reg & (1 << 4)))
- sio_data->skip_fan |= (1 << 4);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO2_REG);
+ if (!(reg & BIT(5)))
+ sio_data->skip_fan |= BIT(3);
+ if (!(reg & BIT(4)))
+ sio_data->skip_fan |= BIT(4);
/* Check for pwm3, fan3 */
- reg = superio_inb(IT87_SIO_GPIO3_REG);
- if (reg & (1 << 6))
- sio_data->skip_pwm |= (1 << 2);
- if (reg & (1 << 7))
- sio_data->skip_fan |= (1 << 2);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
+ if (reg & BIT(6))
+ sio_data->skip_pwm |= BIT(2);
+ if (reg & BIT(7))
+ sio_data->skip_fan |= BIT(2);
+
+ /* Check for pwm4 */
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO4_REG);
+ if (!(reg & BIT(2)))
+ sio_data->skip_pwm |= BIT(3);
/* Check for pwm2, fan2 */
- reg = superio_inb(IT87_SIO_GPIO5_REG);
- if (reg & (1 << 1))
- sio_data->skip_pwm |= (1 << 1);
- if (reg & (1 << 2))
- sio_data->skip_fan |= (1 << 1);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+ if (reg & BIT(1))
+ sio_data->skip_pwm |= BIT(1);
+ if (reg & BIT(2))
+ sio_data->skip_fan |= BIT(1);
+ /* Check for pwm6, fan6 */
+ if (!(reg & BIT(7))) {
+ sio_data->skip_pwm |= BIT(5);
+ sio_data->skip_fan |= BIT(5);
+ }
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else {
int reg;
bool uart6;
- superio_select(GPIO);
+ superio_select(sioaddr, GPIO);
+
+ /* Check for fan4, fan5 */
+ if (has_five_fans(config)) {
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO2_REG);
+ switch (sio_data->type) {
+ case it8718:
+ if (reg & BIT(5))
+ sio_data->skip_fan |= BIT(3);
+ if (reg & BIT(4))
+ sio_data->skip_fan |= BIT(4);
+ break;
+ case it8720:
+ case it8721:
+ case it8728:
+ if (!(reg & BIT(5)))
+ sio_data->skip_fan |= BIT(3);
+ if (!(reg & BIT(4)))
+ sio_data->skip_fan |= BIT(4);
+ break;
+ default:
+ break;
+ }
+ }
- reg = superio_inb(IT87_SIO_GPIO3_REG);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO3_REG);
if (!sio_data->skip_vid) {
/* We need at least 4 VID pins */
if (reg & 0x0f) {
@@ -2090,25 +2642,26 @@ static int __init it87_find(unsigned short *address,
}
/* Check if fan3 is there or not */
- if (reg & (1 << 6))
- sio_data->skip_pwm |= (1 << 2);
- if (reg & (1 << 7))
- sio_data->skip_fan |= (1 << 2);
+ if (reg & BIT(6))
+ sio_data->skip_pwm |= BIT(2);
+ if (reg & BIT(7))
+ sio_data->skip_fan |= BIT(2);
/* Check if fan2 is there or not */
- reg = superio_inb(IT87_SIO_GPIO5_REG);
- if (reg & (1 << 1))
- sio_data->skip_pwm |= (1 << 1);
- if (reg & (1 << 2))
- sio_data->skip_fan |= (1 << 1);
+ reg = superio_inb(sioaddr, IT87_SIO_GPIO5_REG);
+ if (reg & BIT(1))
+ sio_data->skip_pwm |= BIT(1);
+ if (reg & BIT(2))
+ sio_data->skip_fan |= BIT(1);
- if ((sio_data->type == it8718 || sio_data->type == it8720)
- && !(sio_data->skip_vid))
- sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
+ if ((sio_data->type == it8718 || sio_data->type == it8720) &&
+ !(sio_data->skip_vid))
+ sio_data->vid_value = superio_inb(sioaddr,
+ IT87_SIO_VID_REG);
- reg = superio_inb(IT87_SIO_PINX2_REG);
+ reg = superio_inb(sioaddr, IT87_SIO_PINX2_REG);
- uart6 = sio_data->type == it8782 && (reg & (1 << 2));
+ uart6 = sio_data->type == it8782 && (reg & BIT(2));
/*
* The IT8720F has no VIN7 pin, so VCCH should always be
@@ -2124,15 +2677,15 @@ static int __init it87_find(unsigned short *address,
* If UART6 is enabled, re-route VIN7 to the internal divider
* if that is not already the case.
*/
- if ((sio_data->type == it8720 || uart6) && !(reg & (1 << 1))) {
- reg |= (1 << 1);
- superio_outb(IT87_SIO_PINX2_REG, reg);
+ if ((sio_data->type == it8720 || uart6) && !(reg & BIT(1))) {
+ reg |= BIT(1);
+ superio_outb(sioaddr, IT87_SIO_PINX2_REG, reg);
pr_notice("Routing internal VCCH to in7\n");
}
- if (reg & (1 << 0))
- sio_data->internal |= (1 << 0);
- if (reg & (1 << 1))
- sio_data->internal |= (1 << 1);
+ if (reg & BIT(0))
+ sio_data->internal |= BIT(0);
+ if (reg & BIT(1))
+ sio_data->internal |= BIT(1);
/*
* On IT8782F, UART6 pins overlap with VIN5, VIN6, and VIN7.
@@ -2144,11 +2697,12 @@ static int __init it87_find(unsigned short *address,
* temperature source here, skip_temp is preliminary.
*/
if (uart6) {
- sio_data->skip_in |= (1 << 5) | (1 << 6);
- sio_data->skip_temp |= (1 << 2);
+ sio_data->skip_in |= BIT(5) | BIT(6);
+ sio_data->skip_temp |= BIT(2);
}
- sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
+ sio_data->beep_pin = superio_inb(sioaddr,
+ IT87_SIO_BEEP_PIN_REG) & 0x3f;
}
if (sio_data->beep_pin)
pr_info("Beeping is supported\n");
@@ -2157,8 +2711,8 @@ static int __init it87_find(unsigned short *address,
board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (board_vendor && board_name) {
- if (strcmp(board_vendor, "nVIDIA") == 0
- && strcmp(board_name, "FN68PT") == 0) {
+ if (strcmp(board_vendor, "nVIDIA") == 0 &&
+ strcmp(board_name, "FN68PT") == 0) {
/*
* On the Shuttle SN68PT, FAN_CTL2 is apparently not
* connected to a fan, but to something else. One user
@@ -2168,373 +2722,15 @@ static int __init it87_find(unsigned short *address,
* the same board is ever used in other systems.
*/
pr_info("Disabling pwm2 due to hardware constraints\n");
- sio_data->skip_pwm = (1 << 1);
+ sio_data->skip_pwm = BIT(1);
}
}
exit:
- superio_exit();
+ superio_exit(sioaddr);
return err;
}
-static void it87_remove_files(struct device *dev)
-{
- struct it87_data *data = platform_get_drvdata(pdev);
- struct it87_sio_data *sio_data = dev_get_platdata(dev);
- int i;
-
- sysfs_remove_group(&dev->kobj, &it87_group);
- for (i = 0; i < 10; i++) {
- if (sio_data->skip_in & (1 << i))
- continue;
- sysfs_remove_group(&dev->kobj, &it87_group_in[i]);
- if (it87_attributes_in_beep[i])
- sysfs_remove_file(&dev->kobj,
- it87_attributes_in_beep[i]);
- }
- for (i = 0; i < 3; i++) {
- if (!(data->has_temp & (1 << i)))
- continue;
- sysfs_remove_group(&dev->kobj, &it87_group_temp[i]);
- if (has_temp_offset(data))
- sysfs_remove_file(&dev->kobj,
- it87_attributes_temp_offset[i]);
- if (sio_data->beep_pin)
- sysfs_remove_file(&dev->kobj,
- it87_attributes_temp_beep[i]);
- }
- for (i = 0; i < 6; i++) {
- if (!(data->has_fan & (1 << i)))
- continue;
- sysfs_remove_group(&dev->kobj, &it87_group_fan[i]);
- if (sio_data->beep_pin)
- sysfs_remove_file(&dev->kobj,
- it87_attributes_fan_beep[i]);
- if (i < 3 && !has_16bit_fans(data))
- sysfs_remove_file(&dev->kobj,
- it87_attributes_fan_div[i]);
- }
- for (i = 0; i < 3; i++) {
- if (sio_data->skip_pwm & (1 << i))
- continue;
- sysfs_remove_group(&dev->kobj, &it87_group_pwm[i]);
- if (has_old_autopwm(data))
- sysfs_remove_group(&dev->kobj,
- &it87_group_autopwm[i]);
- }
- if (!sio_data->skip_vid)
- sysfs_remove_group(&dev->kobj, &it87_group_vid);
- sysfs_remove_group(&dev->kobj, &it87_group_label);
-}
-
-static int it87_probe(struct platform_device *pdev)
-{
- struct it87_data *data;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct it87_sio_data *sio_data = dev_get_platdata(dev);
- int err = 0, i;
- int enable_pwm_interface;
- int fan_beep_need_rw;
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
- DRVNAME)) {
- dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
- (unsigned long)res->start,
- (unsigned long)(res->start + IT87_EC_EXTENT - 1));
- return -EBUSY;
- }
-
- data = devm_kzalloc(&pdev->dev, sizeof(struct it87_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->addr = res->start;
- data->type = sio_data->type;
- data->features = it87_devices[sio_data->type].features;
- data->peci_mask = it87_devices[sio_data->type].peci_mask;
- data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
- data->name = it87_devices[sio_data->type].name;
- /*
- * IT8705F Datasheet 0.4.1, 3h == Version G.
- * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
- * These are the first revisions with 16-bit tachometer support.
- */
- switch (data->type) {
- case it87:
- if (sio_data->revision >= 0x03) {
- data->features &= ~FEAT_OLD_AUTOPWM;
- data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS;
- }
- break;
- case it8712:
- if (sio_data->revision >= 0x08) {
- data->features &= ~FEAT_OLD_AUTOPWM;
- data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS |
- FEAT_FIVE_FANS;
- }
- break;
- default:
- break;
- }
-
- /* Now, we do the remaining detection. */
- if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80)
- || it87_read_value(data, IT87_REG_CHIPID) != 0x90)
- return -ENODEV;
-
- platform_set_drvdata(pdev, data);
-
- mutex_init(&data->update_lock);
-
- /* Check PWM configuration */
- enable_pwm_interface = it87_check_pwm(dev);
-
- /* Starting with IT8721F, we handle scaling of internal voltages */
- if (has_12mv_adc(data)) {
- if (sio_data->internal & (1 << 0))
- data->in_scaled |= (1 << 3); /* in3 is AVCC */
- if (sio_data->internal & (1 << 1))
- data->in_scaled |= (1 << 7); /* in7 is VSB */
- if (sio_data->internal & (1 << 2))
- data->in_scaled |= (1 << 8); /* in8 is Vbat */
- if (sio_data->internal & (1 << 3))
- data->in_scaled |= (1 << 9); /* in9 is AVCC */
- } else if (sio_data->type == it8781 || sio_data->type == it8782 ||
- sio_data->type == it8783) {
- if (sio_data->internal & (1 << 0))
- data->in_scaled |= (1 << 3); /* in3 is VCC5V */
- if (sio_data->internal & (1 << 1))
- data->in_scaled |= (1 << 7); /* in7 is VCCH5V */
- }
-
- data->has_temp = 0x07;
- if (sio_data->skip_temp & (1 << 2)) {
- if (sio_data->type == it8782
- && !(it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x80))
- data->has_temp &= ~(1 << 2);
- }
-
- /* Initialize the IT87 chip */
- it87_init_device(pdev);
-
- /* Register sysfs hooks */
- err = sysfs_create_group(&dev->kobj, &it87_group);
- if (err)
- return err;
-
- for (i = 0; i < 10; i++) {
- if (sio_data->skip_in & (1 << i))
- continue;
- err = sysfs_create_group(&dev->kobj, &it87_group_in[i]);
- if (err)
- goto error;
- if (sio_data->beep_pin && it87_attributes_in_beep[i]) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_in_beep[i]);
- if (err)
- goto error;
- }
- }
-
- for (i = 0; i < 3; i++) {
- if (!(data->has_temp & (1 << i)))
- continue;
- err = sysfs_create_group(&dev->kobj, &it87_group_temp[i]);
- if (err)
- goto error;
- if (has_temp_offset(data)) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_temp_offset[i]);
- if (err)
- goto error;
- }
- if (sio_data->beep_pin) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_temp_beep[i]);
- if (err)
- goto error;
- }
- }
-
- /* Do not create fan files for disabled fans */
- fan_beep_need_rw = 1;
- for (i = 0; i < 6; i++) {
- if (!(data->has_fan & (1 << i)))
- continue;
- err = sysfs_create_group(&dev->kobj, &it87_group_fan[i]);
- if (err)
- goto error;
-
- if (i < 3 && !has_16bit_fans(data)) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_fan_div[i]);
- if (err)
- goto error;
- }
-
- if (sio_data->beep_pin) {
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_fan_beep[i]);
- if (err)
- goto error;
- if (!fan_beep_need_rw)
- continue;
-
- /*
- * As we have a single beep enable bit for all fans,
- * only the first enabled fan has a writable attribute
- * for it.
- */
- if (sysfs_chmod_file(&dev->kobj,
- it87_attributes_fan_beep[i],
- S_IRUGO | S_IWUSR))
- dev_dbg(dev, "chmod +w fan%d_beep failed\n",
- i + 1);
- fan_beep_need_rw = 0;
- }
- }
-
- if (enable_pwm_interface) {
- for (i = 0; i < 3; i++) {
- if (sio_data->skip_pwm & (1 << i))
- continue;
- err = sysfs_create_group(&dev->kobj,
- &it87_group_pwm[i]);
- if (err)
- goto error;
-
- if (!has_old_autopwm(data))
- continue;
- err = sysfs_create_group(&dev->kobj,
- &it87_group_autopwm[i]);
- if (err)
- goto error;
- }
- }
-
- if (!sio_data->skip_vid) {
- data->vrm = vid_which_vrm();
- /* VID reading from Super-I/O config space if available */
- data->vid = sio_data->vid_value;
- err = sysfs_create_group(&dev->kobj, &it87_group_vid);
- if (err)
- goto error;
- }
-
- /* Export labels for internal sensors */
- for (i = 0; i < 4; i++) {
- if (!(sio_data->internal & (1 << i)))
- continue;
- err = sysfs_create_file(&dev->kobj,
- it87_attributes_label[i]);
- if (err)
- goto error;
- }
-
- data->hwmon_dev = hwmon_device_register(dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto error;
- }
-
- return 0;
-
-error:
- it87_remove_files(dev);
- return err;
-}
-
-static int it87_remove(struct platform_device *pdev)
-{
- struct it87_data *data = platform_get_drvdata(pdev);
-
- hwmon_device_unregister(data->hwmon_dev);
- it87_remove_files(&pdev->dev);
-
- return 0;
-}
-
-/*
- * Must be called with data->update_lock held, except during initialization.
- * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
- * would slow down the IT87 access and should not be necessary.
- */
-static int it87_read_value(struct it87_data *data, u8 reg)
-{
- outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
- return inb_p(data->addr + IT87_DATA_REG_OFFSET);
-}
-
-/*
- * Must be called with data->update_lock held, except during initialization.
- * We ignore the IT87 BUSY flag at this moment - it could lead to deadlocks,
- * would slow down the IT87 access and should not be necessary.
- */
-static void it87_write_value(struct it87_data *data, u8 reg, u8 value)
-{
- outb_p(reg, data->addr + IT87_ADDR_REG_OFFSET);
- outb_p(value, data->addr + IT87_DATA_REG_OFFSET);
-}
-
-/* Return 1 if and only if the PWM interface is safe to use */
-static int it87_check_pwm(struct device *dev)
-{
- struct it87_data *data = dev_get_drvdata(dev);
- /*
- * Some BIOSes fail to correctly configure the IT87 fans. All fans off
- * and polarity set to active low is sign that this is the case so we
- * disable pwm control to protect the user.
- */
- int tmp = it87_read_value(data, IT87_REG_FAN_CTL);
- if ((tmp & 0x87) == 0) {
- if (fix_pwm_polarity) {
- /*
- * The user asks us to attempt a chip reconfiguration.
- * This means switching to active high polarity and
- * inverting all fan speed values.
- */
- int i;
- u8 pwm[3];
-
- for (i = 0; i < 3; i++)
- pwm[i] = it87_read_value(data,
- IT87_REG_PWM(i));
-
- /*
- * If any fan is in automatic pwm mode, the polarity
- * might be correct, as suspicious as it seems, so we
- * better don't change anything (but still disable the
- * PWM interface).
- */
- if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
- dev_info(dev,
- "Reconfiguring PWM to active high polarity\n");
- it87_write_value(data, IT87_REG_FAN_CTL,
- tmp | 0x87);
- for (i = 0; i < 3; i++)
- it87_write_value(data,
- IT87_REG_PWM(i),
- 0x7f & ~pwm[i]);
- return 1;
- }
-
- dev_info(dev,
- "PWM configuration is too broken to be fixed\n");
- }
-
- dev_info(dev,
- "Detected broken BIOS defaults, disabling PWM interface\n");
- return 0;
- } else if (fix_pwm_polarity) {
- dev_info(dev,
- "PWM configuration looks sane, won't touch\n");
- }
-
- return 1;
-}
-
/* Called when we have found a new IT87. */
static void it87_init_device(struct platform_device *pdev)
{
@@ -2556,7 +2752,7 @@ static void it87_init_device(struct platform_device *pdev)
* these have separate registers for the temperature mapping and the
* manual duty cycle.
*/
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < NUM_AUTO_PWM; i++) {
data->pwm_temp_map[i] = i;
data->pwm_duty[i] = 0x7f; /* Full speed */
data->auto_pwm[i][3] = 0x7f; /* Full speed, hard-coded */
@@ -2569,12 +2765,12 @@ static void it87_init_device(struct platform_device *pdev)
* means -1 degree C, which surprisingly doesn't trigger an alarm,
* but is still confusing, so change to 127 degrees C.
*/
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < NUM_VIN_LIMIT; i++) {
tmp = it87_read_value(data, IT87_REG_VIN_MIN(i));
if (tmp == 0xff)
it87_write_value(data, IT87_REG_VIN_MIN(i), 0);
}
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < NUM_TEMP_LIMIT; i++) {
tmp = it87_read_value(data, IT87_REG_TEMP_HIGH(i));
if (tmp == 0xff)
it87_write_value(data, IT87_REG_TEMP_HIGH(i), 127);
@@ -2619,158 +2815,245 @@ static void it87_init_device(struct platform_device *pdev)
/* Check for additional fans */
if (has_five_fans(data)) {
- if (tmp & (1 << 4))
- data->has_fan |= (1 << 3); /* fan4 enabled */
- if (tmp & (1 << 5))
- data->has_fan |= (1 << 4); /* fan5 enabled */
- if (has_six_fans(data) && (tmp & (1 << 2)))
- data->has_fan |= (1 << 5); /* fan6 enabled */
+ if (tmp & BIT(4))
+ data->has_fan |= BIT(3); /* fan4 enabled */
+ if (tmp & BIT(5))
+ data->has_fan |= BIT(4); /* fan5 enabled */
+ if (has_six_fans(data) && (tmp & BIT(2)))
+ data->has_fan |= BIT(5); /* fan6 enabled */
}
/* Fan input pins may be used for alternative functions */
data->has_fan &= ~sio_data->skip_fan;
+ /* Check if pwm5, pwm6 are enabled */
+ if (has_six_pwm(data)) {
+ /* The following code may be IT8620E specific */
+ tmp = it87_read_value(data, IT87_REG_FAN_DIV);
+ if ((tmp & 0xc0) == 0xc0)
+ sio_data->skip_pwm |= BIT(4);
+ if (!(tmp & BIT(3)))
+ sio_data->skip_pwm |= BIT(5);
+ }
+
/* Start monitoring */
it87_write_value(data, IT87_REG_CONFIG,
(it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
| (update_vbat ? 0x41 : 0x01));
}
-static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
+/* Return 1 if and only if the PWM interface is safe to use */
+static int it87_check_pwm(struct device *dev)
{
- data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr));
- if (has_newer_autopwm(data)) {
- data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
- data->pwm_duty[nr] = it87_read_value(data,
- IT87_REG_PWM_DUTY(nr));
- } else {
- if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
- data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
- else /* Manual mode */
- data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
- }
+ struct it87_data *data = dev_get_drvdata(dev);
+ /*
+ * Some BIOSes fail to correctly configure the IT87 fans. All fans off
+ * and polarity set to active low is sign that this is the case so we
+ * disable pwm control to protect the user.
+ */
+ int tmp = it87_read_value(data, IT87_REG_FAN_CTL);
- if (has_old_autopwm(data)) {
- int i;
+ if ((tmp & 0x87) == 0) {
+ if (fix_pwm_polarity) {
+ /*
+ * The user asks us to attempt a chip reconfiguration.
+ * This means switching to active high polarity and
+ * inverting all fan speed values.
+ */
+ int i;
+ u8 pwm[3];
- for (i = 0; i < 5 ; i++)
- data->auto_temp[nr][i] = it87_read_value(data,
- IT87_REG_AUTO_TEMP(nr, i));
- for (i = 0; i < 3 ; i++)
- data->auto_pwm[nr][i] = it87_read_value(data,
- IT87_REG_AUTO_PWM(nr, i));
+ for (i = 0; i < ARRAY_SIZE(pwm); i++)
+ pwm[i] = it87_read_value(data,
+ IT87_REG_PWM[i]);
+
+ /*
+ * If any fan is in automatic pwm mode, the polarity
+ * might be correct, as suspicious as it seems, so we
+ * better don't change anything (but still disable the
+ * PWM interface).
+ */
+ if (!((pwm[0] | pwm[1] | pwm[2]) & 0x80)) {
+ dev_info(dev,
+ "Reconfiguring PWM to active high polarity\n");
+ it87_write_value(data, IT87_REG_FAN_CTL,
+ tmp | 0x87);
+ for (i = 0; i < 3; i++)
+ it87_write_value(data,
+ IT87_REG_PWM[i],
+ 0x7f & ~pwm[i]);
+ return 1;
+ }
+
+ dev_info(dev,
+ "PWM configuration is too broken to be fixed\n");
+ }
+
+ dev_info(dev,
+ "Detected broken BIOS defaults, disabling PWM interface\n");
+ return 0;
+ } else if (fix_pwm_polarity) {
+ dev_info(dev,
+ "PWM configuration looks sane, won't touch\n");
}
+
+ return 1;
}
-static struct it87_data *it87_update_device(struct device *dev)
+static int it87_probe(struct platform_device *pdev)
{
- struct it87_data *data = dev_get_drvdata(dev);
- int i;
+ struct it87_data *data;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct it87_sio_data *sio_data = dev_get_platdata(dev);
+ int enable_pwm_interface;
+ struct device *hwmon_dev;
- mutex_lock(&data->update_lock);
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!devm_request_region(&pdev->dev, res->start, IT87_EC_EXTENT,
+ DRVNAME)) {
+ dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
+ (unsigned long)res->start,
+ (unsigned long)(res->start + IT87_EC_EXTENT - 1));
+ return -EBUSY;
+ }
- if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
- || !data->valid) {
- if (update_vbat) {
- /*
- * Cleared after each update, so reenable. Value
- * returned by this read will be previous value
- */
- it87_write_value(data, IT87_REG_CONFIG,
- it87_read_value(data, IT87_REG_CONFIG) | 0x40);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct it87_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->addr = res->start;
+ data->type = sio_data->type;
+ data->features = it87_devices[sio_data->type].features;
+ data->peci_mask = it87_devices[sio_data->type].peci_mask;
+ data->old_peci_mask = it87_devices[sio_data->type].old_peci_mask;
+ /*
+ * IT8705F Datasheet 0.4.1, 3h == Version G.
+ * IT8712F Datasheet 0.9.1, section 8.3.5 indicates 8h == Version J.
+ * These are the first revisions with 16-bit tachometer support.
+ */
+ switch (data->type) {
+ case it87:
+ if (sio_data->revision >= 0x03) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS;
}
- for (i = 0; i <= 7; i++) {
- data->in[i][0] =
- it87_read_value(data, IT87_REG_VIN(i));
- data->in[i][1] =
- it87_read_value(data, IT87_REG_VIN_MIN(i));
- data->in[i][2] =
- it87_read_value(data, IT87_REG_VIN_MAX(i));
+ break;
+ case it8712:
+ if (sio_data->revision >= 0x08) {
+ data->features &= ~FEAT_OLD_AUTOPWM;
+ data->features |= FEAT_FAN16_CONFIG | FEAT_16BIT_FANS |
+ FEAT_FIVE_FANS;
}
- /* in8 (battery) has no limit registers */
- data->in[8][0] = it87_read_value(data, IT87_REG_VIN(8));
- if (data->type == it8603)
- data->in[9][0] = it87_read_value(data, 0x2f);
+ break;
+ default:
+ break;
+ }
- for (i = 0; i < 6; i++) {
- /* Skip disabled fans */
- if (!(data->has_fan & (1 << i)))
- continue;
+ /* Now, we do the remaining detection. */
+ if ((it87_read_value(data, IT87_REG_CONFIG) & 0x80) ||
+ it87_read_value(data, IT87_REG_CHIPID) != 0x90)
+ return -ENODEV;
- data->fan[i][1] =
- it87_read_value(data, IT87_REG_FAN_MIN[i]);
- data->fan[i][0] = it87_read_value(data,
- IT87_REG_FAN[i]);
- /* Add high byte if in 16-bit mode */
- if (has_16bit_fans(data)) {
- data->fan[i][0] |= it87_read_value(data,
- IT87_REG_FANX[i]) << 8;
- data->fan[i][1] |= it87_read_value(data,
- IT87_REG_FANX_MIN[i]) << 8;
- }
- }
- for (i = 0; i < 3; i++) {
- if (!(data->has_temp & (1 << i)))
- continue;
- data->temp[i][0] =
- it87_read_value(data, IT87_REG_TEMP(i));
- data->temp[i][1] =
- it87_read_value(data, IT87_REG_TEMP_LOW(i));
- data->temp[i][2] =
- it87_read_value(data, IT87_REG_TEMP_HIGH(i));
- if (has_temp_offset(data))
- data->temp[i][3] =
- it87_read_value(data,
- IT87_REG_TEMP_OFFSET[i]);
- }
+ platform_set_drvdata(pdev, data);
- /* Newer chips don't have clock dividers */
- if ((data->has_fan & 0x07) && !has_16bit_fans(data)) {
- i = it87_read_value(data, IT87_REG_FAN_DIV);
- data->fan_div[0] = i & 0x07;
- data->fan_div[1] = (i >> 3) & 0x07;
- data->fan_div[2] = (i & 0x40) ? 3 : 1;
- }
+ mutex_init(&data->update_lock);
- data->alarms =
- it87_read_value(data, IT87_REG_ALARM1) |
- (it87_read_value(data, IT87_REG_ALARM2) << 8) |
- (it87_read_value(data, IT87_REG_ALARM3) << 16);
- data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+ /* Check PWM configuration */
+ enable_pwm_interface = it87_check_pwm(dev);
- data->fan_main_ctrl = it87_read_value(data,
- IT87_REG_FAN_MAIN_CTRL);
- data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
- for (i = 0; i < 3; i++)
- it87_update_pwm_ctrl(data, i);
+ /* Starting with IT8721F, we handle scaling of internal voltages */
+ if (has_12mv_adc(data)) {
+ if (sio_data->internal & BIT(0))
+ data->in_scaled |= BIT(3); /* in3 is AVCC */
+ if (sio_data->internal & BIT(1))
+ data->in_scaled |= BIT(7); /* in7 is VSB */
+ if (sio_data->internal & BIT(2))
+ data->in_scaled |= BIT(8); /* in8 is Vbat */
+ if (sio_data->internal & BIT(3))
+ data->in_scaled |= BIT(9); /* in9 is AVCC */
+ } else if (sio_data->type == it8781 || sio_data->type == it8782 ||
+ sio_data->type == it8783) {
+ if (sio_data->internal & BIT(0))
+ data->in_scaled |= BIT(3); /* in3 is VCC5V */
+ if (sio_data->internal & BIT(1))
+ data->in_scaled |= BIT(7); /* in7 is VCCH5V */
+ }
- data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
- data->extra = it87_read_value(data, IT87_REG_TEMP_EXTRA);
- /*
- * The IT8705F does not have VID capability.
- * The IT8718F and later don't use IT87_REG_VID for the
- * same purpose.
- */
- if (data->type == it8712 || data->type == it8716) {
- data->vid = it87_read_value(data, IT87_REG_VID);
- /*
- * The older IT8712F revisions had only 5 VID pins,
- * but we assume it is always safe to read 6 bits.
- */
- data->vid &= 0x3f;
- }
- data->last_updated = jiffies;
- data->valid = 1;
+ data->has_temp = 0x07;
+ if (sio_data->skip_temp & BIT(2)) {
+ if (sio_data->type == it8782 &&
+ !(it87_read_value(data, IT87_REG_TEMP_EXTRA) & 0x80))
+ data->has_temp &= ~BIT(2);
}
- mutex_unlock(&data->update_lock);
+ data->in_internal = sio_data->internal;
+ data->has_in = 0x3ff & ~sio_data->skip_in;
+
+ if (has_six_temp(data)) {
+ u8 reg = it87_read_value(data, IT87_REG_TEMP456_ENABLE);
+
+ /* Check for additional temperature sensors */
+ if ((reg & 0x03) >= 0x02)
+ data->has_temp |= BIT(3);
+ if (((reg >> 2) & 0x03) >= 0x02)
+ data->has_temp |= BIT(4);
+ if (((reg >> 4) & 0x03) >= 0x02)
+ data->has_temp |= BIT(5);
+
+ /* Check for additional voltage sensors */
+ if ((reg & 0x03) == 0x01)
+ data->has_in |= BIT(10);
+ if (((reg >> 2) & 0x03) == 0x01)
+ data->has_in |= BIT(11);
+ if (((reg >> 4) & 0x03) == 0x01)
+ data->has_in |= BIT(12);
+ }
- return data;
+ data->has_beep = !!sio_data->beep_pin;
+
+ /* Initialize the IT87 chip */
+ it87_init_device(pdev);
+
+ if (!sio_data->skip_vid) {
+ data->has_vid = true;
+ data->vrm = vid_which_vrm();
+ /* VID reading from Super-I/O config space if available */
+ data->vid = sio_data->vid_value;
+ }
+
+ /* Prepare for sysfs hooks */
+ data->groups[0] = &it87_group;
+ data->groups[1] = &it87_group_in;
+ data->groups[2] = &it87_group_temp;
+ data->groups[3] = &it87_group_fan;
+
+ if (enable_pwm_interface) {
+ data->has_pwm = BIT(ARRAY_SIZE(IT87_REG_PWM)) - 1;
+ data->has_pwm &= ~sio_data->skip_pwm;
+
+ data->groups[4] = &it87_group_pwm;
+ if (has_old_autopwm(data) || has_newer_autopwm(data))
+ data->groups[5] = &it87_group_auto_pwm;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+ it87_devices[sio_data->type].name,
+ data, data->groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static int __init it87_device_add(unsigned short address,
+static struct platform_driver it87_driver = {
+ .driver = {
+ .name = DRVNAME,
+ },
+ .probe = it87_probe,
+};
+
+static int __init it87_device_add(int index, unsigned short address,
const struct it87_sio_data *sio_data)
{
+ struct platform_device *pdev;
struct resource res = {
.start = address + IT87_EC_OFFSET,
.end = address + IT87_EC_OFFSET + IT87_EC_EXTENT - 1,
@@ -2781,14 +3064,11 @@ static int __init it87_device_add(unsigned short address,
err = acpi_check_resource_conflict(&res);
if (err)
- goto exit;
+ return err;
pdev = platform_device_alloc(DRVNAME, address);
- if (!pdev) {
- err = -ENOMEM;
- pr_err("Device allocation failed\n");
- goto exit;
- }
+ if (!pdev)
+ return -ENOMEM;
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
@@ -2809,44 +3089,61 @@ static int __init it87_device_add(unsigned short address,
goto exit_device_put;
}
+ it87_pdev[index] = pdev;
return 0;
exit_device_put:
platform_device_put(pdev);
-exit:
return err;
}
static int __init sm_it87_init(void)
{
- int err;
- unsigned short isa_address = 0;
+ int sioaddr[2] = { REG_2E, REG_4E };
struct it87_sio_data sio_data;
+ unsigned short isa_address;
+ bool found = false;
+ int i, err;
- memset(&sio_data, 0, sizeof(struct it87_sio_data));
- err = it87_find(&isa_address, &sio_data);
- if (err)
- return err;
err = platform_driver_register(&it87_driver);
if (err)
return err;
- err = it87_device_add(isa_address, &sio_data);
- if (err) {
- platform_driver_unregister(&it87_driver);
- return err;
+ for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
+ memset(&sio_data, 0, sizeof(struct it87_sio_data));
+ isa_address = 0;
+ err = it87_find(sioaddr[i], &isa_address, &sio_data);
+ if (err || isa_address == 0)
+ continue;
+
+ err = it87_device_add(i, isa_address, &sio_data);
+ if (err)
+ goto exit_dev_unregister;
+ found = true;
}
+ if (!found) {
+ err = -ENODEV;
+ goto exit_unregister;
+ }
return 0;
+
+exit_dev_unregister:
+ /* NULL check handled by platform_device_unregister */
+ platform_device_unregister(it87_pdev[0]);
+exit_unregister:
+ platform_driver_unregister(&it87_driver);
+ return err;
}
static void __exit sm_it87_exit(void)
{
- platform_device_unregister(pdev);
+ /* NULL check handled by platform_device_unregister */
+ platform_device_unregister(it87_pdev[1]);
+ platform_device_unregister(it87_pdev[0]);
platform_driver_unregister(&it87_driver);
}
-
MODULE_AUTHOR("Chris Gauthron, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
module_param(update_vbat, bool, 0);
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 0addc84ba..69166ab31 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -77,7 +77,6 @@ static const u8 LM75_REG_TEMP[3] = {
struct lm75_data {
struct i2c_client *client;
struct device *hwmon_dev;
- struct thermal_zone_device *tz;
struct mutex update_lock;
u8 orig_conf;
u8 resolution; /* In bits, between 9 and 12 */
@@ -306,11 +305,9 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (IS_ERR(data->hwmon_dev))
return PTR_ERR(data->hwmon_dev);
- data->tz = thermal_zone_of_sensor_register(data->hwmon_dev, 0,
- data->hwmon_dev,
- &lm75_of_thermal_ops);
- if (IS_ERR(data->tz))
- data->tz = NULL;
+ devm_thermal_zone_of_sensor_register(data->hwmon_dev, 0,
+ data->hwmon_dev,
+ &lm75_of_thermal_ops);
dev_info(dev, "%s: sensor '%s'\n",
dev_name(data->hwmon_dev), client->name);
@@ -322,7 +319,6 @@ static int lm75_remove(struct i2c_client *client)
{
struct lm75_data *data = i2c_get_clientdata(client);
- thermal_zone_of_sensor_unregister(data->hwmon_dev, data->tz);
hwmon_device_unregister(data->hwmon_dev);
lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
return 0;
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index c9ff08dbe..e30a5939d 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -375,7 +375,7 @@ struct lm90_data {
int kind;
u32 flags;
- int update_interval; /* in milliseconds */
+ unsigned int update_interval; /* in milliseconds */
u8 config_orig; /* Original configuration register value */
u8 convrate_orig; /* Original conversion rate register value */
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
new file mode 100644
index 000000000..30a100e70
--- /dev/null
+++ b/drivers/hwmon/max31722.c
@@ -0,0 +1,165 @@
+/*
+ * max31722 - hwmon driver for Maxim Integrated MAX31722/MAX31723 SPI
+ * digital thermometer and thermostats.
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#define MAX31722_REG_CFG 0x00
+#define MAX31722_REG_TEMP_LSB 0x01
+
+#define MAX31722_MODE_CONTINUOUS 0x00
+#define MAX31722_MODE_STANDBY 0x01
+#define MAX31722_MODE_MASK 0xFE
+#define MAX31722_RESOLUTION_12BIT 0x06
+#define MAX31722_WRITE_MASK 0x80
+
+struct max31722_data {
+ struct device *hwmon_dev;
+ struct spi_device *spi_device;
+ u8 mode;
+};
+
+static int max31722_set_mode(struct max31722_data *data, u8 mode)
+{
+ int ret;
+ struct spi_device *spi = data->spi_device;
+ u8 buf[2] = {
+ MAX31722_REG_CFG | MAX31722_WRITE_MASK,
+ (data->mode & MAX31722_MODE_MASK) | mode
+ };
+
+ ret = spi_write(spi, &buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(&spi->dev, "failed to set sensor mode.\n");
+ return ret;
+ }
+ data->mode = (data->mode & MAX31722_MODE_MASK) | mode;
+
+ return 0;
+}
+
+static ssize_t max31722_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct max31722_data *data = dev_get_drvdata(dev);
+
+ ret = spi_w8r16(data->spi_device, MAX31722_REG_TEMP_LSB);
+ if (ret < 0)
+ return ret;
+ /* Keep 12 bits and multiply by the scale of 62.5 millidegrees/bit. */
+ return sprintf(buf, "%d\n", (s16)le16_to_cpu(ret) * 125 / 32);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ max31722_show_temp, NULL, 0);
+
+static struct attribute *max31722_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(max31722);
+
+static int max31722_probe(struct spi_device *spi)
+{
+ int ret;
+ struct max31722_data *data;
+
+ data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, data);
+ data->spi_device = spi;
+ /*
+ * Set SD bit to 0 so we can have continuous measurements.
+ * Set resolution to 12 bits for maximum precision.
+ */
+ data->mode = MAX31722_MODE_CONTINUOUS | MAX31722_RESOLUTION_12BIT;
+ ret = max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
+ if (ret < 0)
+ return ret;
+
+ data->hwmon_dev = hwmon_device_register_with_groups(&spi->dev,
+ spi->modalias,
+ data,
+ max31722_groups);
+ if (IS_ERR(data->hwmon_dev)) {
+ max31722_set_mode(data, MAX31722_MODE_STANDBY);
+ return PTR_ERR(data->hwmon_dev);
+ }
+
+ return 0;
+}
+
+static int max31722_remove(struct spi_device *spi)
+{
+ struct max31722_data *data = spi_get_drvdata(spi);
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ return max31722_set_mode(data, MAX31722_MODE_STANDBY);
+}
+
+static int __maybe_unused max31722_suspend(struct device *dev)
+{
+ struct spi_device *spi_device = to_spi_device(dev);
+ struct max31722_data *data = spi_get_drvdata(spi_device);
+
+ return max31722_set_mode(data, MAX31722_MODE_STANDBY);
+}
+
+static int __maybe_unused max31722_resume(struct device *dev)
+{
+ struct spi_device *spi_device = to_spi_device(dev);
+ struct max31722_data *data = spi_get_drvdata(spi_device);
+
+ return max31722_set_mode(data, MAX31722_MODE_CONTINUOUS);
+}
+
+static SIMPLE_DEV_PM_OPS(max31722_pm_ops, max31722_suspend, max31722_resume);
+
+static const struct spi_device_id max31722_spi_id[] = {
+ {"max31722", 0},
+ {"max31723", 0},
+ {}
+};
+
+static const struct acpi_device_id __maybe_unused max31722_acpi_id[] = {
+ {"MAX31722", 0},
+ {"MAX31723", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(spi, max31722_spi_id);
+
+static struct spi_driver max31722_driver = {
+ .driver = {
+ .name = "max31722",
+ .pm = &max31722_pm_ops,
+ .acpi_match_table = ACPI_PTR(max31722_acpi_id),
+ },
+ .probe = max31722_probe,
+ .remove = max31722_remove,
+ .id_table = max31722_spi_id,
+};
+
+module_spi_driver(max31722_driver);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("max31722 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index faa6e8dfb..8ef7b713c 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -259,7 +259,6 @@ struct ntc_data {
struct device *dev;
int n_comp;
char name[PLATFORM_NAME_SIZE];
- struct thermal_zone_device *tz;
};
#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
@@ -579,6 +578,7 @@ static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = {
static int ntc_thermistor_probe(struct platform_device *pdev)
{
+ struct thermal_zone_device *tz;
const struct of_device_id *of_id =
of_match_device(of_match_ptr(ntc_match), &pdev->dev);
const struct platform_device_id *pdev_id;
@@ -677,12 +677,10 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
pdev_id->name);
- data->tz = thermal_zone_of_sensor_register(data->dev, 0, data->dev,
- &ntc_of_thermal_ops);
- if (IS_ERR(data->tz)) {
+ tz = devm_thermal_zone_of_sensor_register(data->dev, 0, data->dev,
+ &ntc_of_thermal_ops);
+ if (IS_ERR(tz))
dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n");
- data->tz = NULL;
- }
return 0;
err_after_sysfs:
@@ -700,8 +698,6 @@ static int ntc_thermistor_remove(struct platform_device *pdev)
sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
ntc_iio_channel_release(pdata);
- thermal_zone_of_sensor_unregister(data->dev, data->tz);
-
return 0;
}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 3e23003f7..f9af3935b 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -40,15 +40,18 @@ struct pwm_fan_ctx {
static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm)
{
+ struct pwm_args pargs;
unsigned long duty;
int ret = 0;
+ pwm_get_args(ctx->pwm, &pargs);
+
mutex_lock(&ctx->lock);
if (ctx->pwm_value == pwm)
goto exit_set_pwm_err;
- duty = DIV_ROUND_UP(pwm * (ctx->pwm->period - 1), MAX_PWM);
- ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+ duty = DIV_ROUND_UP(pwm * (pargs.period - 1), MAX_PWM);
+ ret = pwm_config(ctx->pwm, duty, pargs.period);
if (ret)
goto exit_set_pwm_err;
@@ -215,6 +218,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
struct pwm_fan_ctx *ctx;
+ struct pwm_args pargs;
struct device *hwmon;
int duty_cycle;
int ret;
@@ -233,11 +237,19 @@ static int pwm_fan_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to the
+ * atomic PWM API.
+ */
+ pwm_apply_args(ctx->pwm);
+
/* Set duty cycle to maximum allowed */
- duty_cycle = ctx->pwm->period - 1;
+ pwm_get_args(ctx->pwm, &pargs);
+
+ duty_cycle = pargs.period - 1;
ctx->pwm_value = MAX_PWM;
- ret = pwm_config(ctx->pwm, duty_cycle, ctx->pwm->period);
+ ret = pwm_config(ctx->pwm, duty_cycle, pargs.period);
if (ret) {
dev_err(&pdev->dev, "Failed to configure PWM\n");
return ret;
@@ -303,14 +315,16 @@ static int pwm_fan_suspend(struct device *dev)
static int pwm_fan_resume(struct device *dev)
{
struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
+ struct pwm_args pargs;
unsigned long duty;
int ret;
if (ctx->pwm_value == 0)
return 0;
- duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
- ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
+ pwm_get_args(ctx->pwm, &pargs);
+ duty = DIV_ROUND_UP(ctx->pwm_value * (pargs.period - 1), MAX_PWM);
+ ret = pwm_config(ctx->pwm, duty, pargs.period);
if (ret)
return ret;
return pwm_enable(ctx->pwm);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 131a2815d..d24d7b604 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -449,7 +449,7 @@ static int sch5636_probe(struct platform_device *pdev)
}
revision[i] = val;
}
- pr_info("Found %s chip at %#hx, revison: %d.%02d\n", DEVNAME,
+ pr_info("Found %s chip at %#hx, revision: %d.%02d\n", DEVNAME,
data->addr, revision[0], revision[1]);
/* Read all temp + fan ctrl registers to determine which are active */
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 912b449c8..25b44e689 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -31,10 +31,8 @@ struct sensor_data {
};
struct scpi_thermal_zone {
- struct list_head list;
int sensor_id;
struct scpi_sensors *scpi_sensors;
- struct thermal_zone_device *tzd;
};
struct scpi_sensors {
@@ -92,20 +90,6 @@ scpi_show_label(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n", sensor->info.name);
}
-static void
-unregister_thermal_zones(struct platform_device *pdev,
- struct scpi_sensors *scpi_sensors)
-{
- struct list_head *pos;
-
- list_for_each(pos, &scpi_sensors->thermal_zones) {
- struct scpi_thermal_zone *zone;
-
- zone = list_entry(pos, struct scpi_thermal_zone, list);
- thermal_zone_of_sensor_unregister(&pdev->dev, zone->tzd);
- }
-}
-
static struct thermal_zone_of_device_ops scpi_sensor_ops = {
.get_temp = scpi_read_temp,
};
@@ -118,7 +102,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
struct scpi_ops *scpi_ops;
struct device *hwdev, *dev = &pdev->dev;
struct scpi_sensors *scpi_sensors;
- int ret, idx;
+ int idx, ret;
scpi_ops = get_scpi_ops();
if (!scpi_ops)
@@ -232,48 +216,35 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&scpi_sensors->thermal_zones);
for (i = 0; i < nr_sensors; i++) {
struct sensor_data *sensor = &scpi_sensors->data[i];
+ struct thermal_zone_device *z;
struct scpi_thermal_zone *zone;
if (sensor->info.class != TEMPERATURE)
continue;
zone = devm_kzalloc(dev, sizeof(*zone), GFP_KERNEL);
- if (!zone) {
- ret = -ENOMEM;
- goto unregister_tzd;
- }
+ if (!zone)
+ return -ENOMEM;
zone->sensor_id = i;
zone->scpi_sensors = scpi_sensors;
- zone->tzd = thermal_zone_of_sensor_register(dev,
- sensor->info.sensor_id, zone, &scpi_sensor_ops);
+ z = devm_thermal_zone_of_sensor_register(dev,
+ sensor->info.sensor_id,
+ zone,
+ &scpi_sensor_ops);
/*
* The call to thermal_zone_of_sensor_register returns
* an error for sensors that are not associated with
* any thermal zones or if the thermal subsystem is
* not configured.
*/
- if (IS_ERR(zone->tzd)) {
+ if (IS_ERR(z)) {
devm_kfree(dev, zone);
continue;
}
- list_add(&zone->list, &scpi_sensors->thermal_zones);
}
return 0;
-
-unregister_tzd:
- unregister_thermal_zones(pdev, scpi_sensors);
- return ret;
-}
-
-static int scpi_hwmon_remove(struct platform_device *pdev)
-{
- struct scpi_sensors *scpi_sensors = platform_get_drvdata(pdev);
-
- unregister_thermal_zones(pdev, scpi_sensors);
-
- return 0;
}
static const struct of_device_id scpi_of_match[] = {
@@ -288,7 +259,6 @@ static struct platform_driver scpi_hwmon_platdrv = {
.of_match_table = scpi_of_match,
},
.probe = scpi_hwmon_probe,
- .remove = scpi_hwmon_remove,
};
module_platform_driver(scpi_hwmon_platdrv);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5289aa098..f1e96fd7f 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -53,7 +53,6 @@
struct tmp102 {
struct i2c_client *client;
struct device *hwmon_dev;
- struct thermal_zone_device *tz;
struct mutex lock;
u16 config_orig;
unsigned long last_update;
@@ -232,10 +231,8 @@ static int tmp102_probe(struct i2c_client *client,
goto fail_restore_config;
}
tmp102->hwmon_dev = hwmon_dev;
- tmp102->tz = thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
- &tmp102_of_thermal_ops);
- if (IS_ERR(tmp102->tz))
- tmp102->tz = NULL;
+ devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
+ &tmp102_of_thermal_ops);
dev_info(dev, "initialized\n");
@@ -251,7 +248,6 @@ static int tmp102_remove(struct i2c_client *client)
{
struct tmp102 *tmp102 = i2c_get_clientdata(client);
- thermal_zone_of_sensor_unregister(tmp102->hwmon_dev, tmp102->tz);
hwmon_device_unregister(tmp102->hwmon_dev);
/* Stop monitoring if device was stopped originally */
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index d50c701b1..407444144 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -313,7 +313,7 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
hwlock = radix_tree_deref_slot(slot);
if (unlikely(!hwlock))
continue;
- if (radix_tree_is_indirect_ptr(hwlock)) {
+ if (radix_tree_deref_retry(hwlock)) {
slot = radix_tree_iter_retry(&iter);
continue;
}
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index db0541031..130cb2114 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -78,4 +78,15 @@ config CORESIGHT_QCOM_REPLICATOR
programmable ATB replicator sends the ATB trace stream from the
ETB/ETF to the TPIUi and ETR.
+config CORESIGHT_STM
+ bool "CoreSight System Trace Macrocell driver"
+ depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
+ select CORESIGHT_LINKS_AND_SINKS
+ select STM
+ help
+ This driver provides support for hardware assisted software
+ instrumentation based tracing. This is primarily used for
+ logging useful software events or data coming from various entities
+ in the system, possibly running different OSs
+
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index cf8c6d689..af480d9c1 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -1,15 +1,18 @@
#
# Makefile for CoreSight drivers.
#
-obj-$(CONFIG_CORESIGHT) += coresight.o
+obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o
obj-$(CONFIG_OF) += of_coresight.o
-obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
+obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
+ coresight-tmc-etf.o \
+ coresight-tmc-etr.o
obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
coresight-replicator.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
- coresight-etm3x-sysfs.o \
- coresight-etm-perf.o
-obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o
+ coresight-etm3x-sysfs.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
+ coresight-etm4x-sysfs.o
obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
+obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index acbce7993..4d20b0be0 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -71,26 +71,6 @@
#define ETB_FRAME_SIZE_WORDS 4
/**
- * struct cs_buffer - keep track of a recording session' specifics
- * @cur: index of the current buffer
- * @nr_pages: max number of pages granted to us
- * @offset: offset within the current buffer
- * @data_size: how much we collected in this run
- * @lost: other than zero if we had a HW buffer wrap around
- * @snapshot: is this run in snapshot mode
- * @data_pages: a handle the ring buffer
- */
-struct cs_buffers {
- unsigned int cur;
- unsigned int nr_pages;
- unsigned long offset;
- local_t data_size;
- local_t lost;
- bool snapshot;
- void **data_pages;
-};
-
-/**
* struct etb_drvdata - specifics associated to an ETB component
* @base: memory mapped base address for this component.
* @dev: the device entity associated to this component.
@@ -440,7 +420,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
/* The new read pointer must be frame size aligned */
- to_read -= handle->size & mask;
+ to_read = handle->size & mask;
/*
* Move the RAM read pointer up, keeping in mind that
* everything is in frame size units.
@@ -448,7 +428,8 @@ static void etb_update_buffer(struct coresight_device *csdev,
read_ptr = (write_ptr + drvdata->buffer_depth) -
to_read / ETB_FRAME_SIZE_WORDS;
/* Wrap around if need be*/
- read_ptr &= ~(drvdata->buffer_depth - 1);
+ if (read_ptr > (drvdata->buffer_depth - 1))
+ read_ptr -= drvdata->buffer_depth;
/* let the decoder know we've skipped ahead */
local_inc(&buf->lost);
}
@@ -579,47 +560,29 @@ static const struct file_operations etb_fops = {
.llseek = no_llseek,
};
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- unsigned long flags;
- u32 etb_rdr, etb_sr, etb_rrp, etb_rwp;
- u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr;
- struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
- CS_UNLOCK(drvdata->base);
-
- etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
- etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG);
- etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
- etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
- etb_trg = readl_relaxed(drvdata->base + ETB_TRG);
- etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG);
- etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR);
- etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
-
- CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- pm_runtime_put(drvdata->dev);
-
- return sprintf(buf,
- "Depth:\t\t0x%x\n"
- "Status:\t\t0x%x\n"
- "RAM read ptr:\t0x%x\n"
- "RAM wrt ptr:\t0x%x\n"
- "Trigger cnt:\t0x%x\n"
- "Control:\t0x%x\n"
- "Flush status:\t0x%x\n"
- "Flush ctrl:\t0x%x\n",
- etb_rdr, etb_sr, etb_rrp, etb_rwp,
- etb_trg, etb_cr, etb_ffsr, etb_ffcr);
-
- return -EINVAL;
-}
-static DEVICE_ATTR_RO(status);
+#define coresight_etb10_simple_func(name, offset) \
+ coresight_simple_func(struct etb_drvdata, name, offset)
+
+coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG);
+coresight_etb10_simple_func(sts, ETB_STATUS_REG);
+coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER);
+coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER);
+coresight_etb10_simple_func(trg, ETB_TRG);
+coresight_etb10_simple_func(ctl, ETB_CTL_REG);
+coresight_etb10_simple_func(ffsr, ETB_FFSR);
+coresight_etb10_simple_func(ffcr, ETB_FFCR);
+
+static struct attribute *coresight_etb_mgmt_attrs[] = {
+ &dev_attr_rdp.attr,
+ &dev_attr_sts.attr,
+ &dev_attr_rrp.attr,
+ &dev_attr_rwp.attr,
+ &dev_attr_trg.attr,
+ &dev_attr_ctl.attr,
+ &dev_attr_ffsr.attr,
+ &dev_attr_ffcr.attr,
+ NULL,
+};
static ssize_t trigger_cntr_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -649,10 +612,23 @@ static DEVICE_ATTR_RW(trigger_cntr);
static struct attribute *coresight_etb_attrs[] = {
&dev_attr_trigger_cntr.attr,
- &dev_attr_status.attr,
NULL,
};
-ATTRIBUTE_GROUPS(coresight_etb);
+
+static const struct attribute_group coresight_etb_group = {
+ .attrs = coresight_etb_attrs,
+};
+
+static const struct attribute_group coresight_etb_mgmt_group = {
+ .attrs = coresight_etb_mgmt_attrs,
+ .name = "mgmt",
+};
+
+const struct attribute_group *coresight_etb_groups[] = {
+ &coresight_etb_group,
+ &coresight_etb_mgmt_group,
+ NULL,
+};
static int etb_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -729,7 +705,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
goto err_misc_register;
- dev_info(dev, "ETB initialized\n");
return 0;
err_misc_register:
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index cbb4046c1..02d4b6298 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -1221,26 +1221,19 @@ static struct attribute *coresight_etm_attrs[] = {
NULL,
};
-#define coresight_simple_func(name, offset) \
-static ssize_t name##_show(struct device *_dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
- readl_relaxed(drvdata->base + offset)); \
-} \
-DEVICE_ATTR_RO(name)
-
-coresight_simple_func(etmccr, ETMCCR);
-coresight_simple_func(etmccer, ETMCCER);
-coresight_simple_func(etmscr, ETMSCR);
-coresight_simple_func(etmidr, ETMIDR);
-coresight_simple_func(etmcr, ETMCR);
-coresight_simple_func(etmtraceidr, ETMTRACEIDR);
-coresight_simple_func(etmteevr, ETMTEEVR);
-coresight_simple_func(etmtssvr, ETMTSSCR);
-coresight_simple_func(etmtecr1, ETMTECR1);
-coresight_simple_func(etmtecr2, ETMTECR2);
+#define coresight_etm3x_simple_func(name, offset) \
+ coresight_simple_func(struct etm_drvdata, name, offset)
+
+coresight_etm3x_simple_func(etmccr, ETMCCR);
+coresight_etm3x_simple_func(etmccer, ETMCCER);
+coresight_etm3x_simple_func(etmscr, ETMSCR);
+coresight_etm3x_simple_func(etmidr, ETMIDR);
+coresight_etm3x_simple_func(etmcr, ETMCR);
+coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR);
+coresight_etm3x_simple_func(etmteevr, ETMTEEVR);
+coresight_etm3x_simple_func(etmtssvr, ETMTSSCR);
+coresight_etm3x_simple_func(etmtecr1, ETMTECR1);
+coresight_etm3x_simple_func(etmtecr2, ETMTECR2);
static struct attribute *coresight_etm_mgmt_attrs[] = {
&dev_attr_etmccr.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
new file mode 100644
index 000000000..7c84308c5
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -0,0 +1,2126 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/sysfs.h>
+#include "coresight-etm4x.h"
+
+static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
+{
+ u8 idx;
+ struct etmv4_config *config = &drvdata->config;
+
+ idx = config->addr_idx;
+
+ /*
+ * TRCACATRn.TYPE bit[1:0]: type of comparison
+ * the trace unit performs
+ */
+ if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
+ if (idx % 2 != 0)
+ return -EINVAL;
+
+ /*
+ * We are performing instruction address comparison. Set the
+ * relevant bit of ViewInst Include/Exclude Control register
+ * for corresponding address comparator pair.
+ */
+ if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
+ config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
+ return -EINVAL;
+
+ if (exclude == true) {
+ /*
+ * Set exclude bit and unset the include bit
+ * corresponding to comparator pair
+ */
+ config->viiectlr |= BIT(idx / 2 + 16);
+ config->viiectlr &= ~BIT(idx / 2);
+ } else {
+ /*
+ * Set include bit and unset exclude bit
+ * corresponding to comparator pair
+ */
+ config->viiectlr |= BIT(idx / 2);
+ config->viiectlr &= ~BIT(idx / 2 + 16);
+ }
+ }
+ return 0;
+}
+
+static ssize_t nr_pe_cmp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_pe_cmp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_pe_cmp);
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_addr_cmp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_cntr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ext_inp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_ext_inp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ext_inp);
+
+static ssize_t numcidc_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->numcidc;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(numcidc);
+
+static ssize_t numvmidc_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->numvmidc;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(numvmidc);
+
+static ssize_t nrseqstate_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nrseqstate;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nrseqstate);
+
+static ssize_t nr_resource_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_resource;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_resource);
+
+static ssize_t nr_ss_cmp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->nr_ss_cmp;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ss_cmp);
+
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int i;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ if (val)
+ config->mode = 0x0;
+
+ /* Disable data tracing: do not trace load and store data transfers */
+ config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
+ config->cfg &= ~(BIT(1) | BIT(2));
+
+ /* Disable data value and data address tracing */
+ config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
+ ETM_MODE_DATA_TRACE_VAL);
+ config->cfg &= ~(BIT(16) | BIT(17));
+
+ /* Disable all events tracing */
+ config->eventctrl0 = 0x0;
+ config->eventctrl1 = 0x0;
+
+ /* Disable timestamp event */
+ config->ts_ctrl = 0x0;
+
+ /* Disable stalling */
+ config->stall_ctrl = 0x0;
+
+ /* Reset trace synchronization period to 2^8 = 256 bytes*/
+ if (drvdata->syncpr == false)
+ config->syncfreq = 0x8;
+
+ /*
+ * Enable ViewInst to trace everything with start-stop logic in
+ * started state. ARM recommends start-stop logic is set before
+ * each trace run.
+ */
+ config->vinst_ctrl |= BIT(0);
+ if (drvdata->nr_addr_cmp == true) {
+ config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
+ /* SSSTATUS, bit[9] */
+ config->vinst_ctrl |= BIT(9);
+ }
+
+ /* No address range filtering for ViewInst */
+ config->viiectlr = 0x0;
+
+ /* No start-stop filtering for ViewInst */
+ config->vissctlr = 0x0;
+
+ /* Disable seq events */
+ for (i = 0; i < drvdata->nrseqstate-1; i++)
+ config->seq_ctrl[i] = 0x0;
+ config->seq_rst = 0x0;
+ config->seq_state = 0x0;
+
+ /* Disable external input events */
+ config->ext_inp = 0x0;
+
+ config->cntr_idx = 0x0;
+ for (i = 0; i < drvdata->nr_cntr; i++) {
+ config->cntrldvr[i] = 0x0;
+ config->cntr_ctrl[i] = 0x0;
+ config->cntr_val[i] = 0x0;
+ }
+
+ config->res_idx = 0x0;
+ for (i = 0; i < drvdata->nr_resource; i++)
+ config->res_ctrl[i] = 0x0;
+
+ for (i = 0; i < drvdata->nr_ss_cmp; i++) {
+ config->ss_ctrl[i] = 0x0;
+ config->ss_pe_cmp[i] = 0x0;
+ }
+
+ config->addr_idx = 0x0;
+ for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
+ config->addr_val[i] = 0x0;
+ config->addr_acc[i] = 0x0;
+ config->addr_type[i] = ETM_ADDR_TYPE_NONE;
+ }
+
+ config->ctxid_idx = 0x0;
+ for (i = 0; i < drvdata->numcidc; i++) {
+ config->ctxid_pid[i] = 0x0;
+ config->ctxid_vpid[i] = 0x0;
+ }
+
+ config->ctxid_mask0 = 0x0;
+ config->ctxid_mask1 = 0x0;
+
+ config->vmid_idx = 0x0;
+ for (i = 0; i < drvdata->numvmidc; i++)
+ config->vmid_val[i] = 0x0;
+ config->vmid_mask0 = 0x0;
+ config->vmid_mask1 = 0x0;
+
+ drvdata->trcid = drvdata->cpu + 1;
+
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->mode;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val, mode;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->mode = val & ETMv4_MODE_ALL;
+
+ if (config->mode & ETM_MODE_EXCLUDE)
+ etm4_set_mode_exclude(drvdata, true);
+ else
+ etm4_set_mode_exclude(drvdata, false);
+
+ if (drvdata->instrp0 == true) {
+ /* start by clearing instruction P0 field */
+ config->cfg &= ~(BIT(1) | BIT(2));
+ if (config->mode & ETM_MODE_LOAD)
+ /* 0b01 Trace load instructions as P0 instructions */
+ config->cfg |= BIT(1);
+ if (config->mode & ETM_MODE_STORE)
+ /* 0b10 Trace store instructions as P0 instructions */
+ config->cfg |= BIT(2);
+ if (config->mode & ETM_MODE_LOAD_STORE)
+ /*
+ * 0b11 Trace load and store instructions
+ * as P0 instructions
+ */
+ config->cfg |= BIT(1) | BIT(2);
+ }
+
+ /* bit[3], Branch broadcast mode */
+ if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
+ config->cfg |= BIT(3);
+ else
+ config->cfg &= ~BIT(3);
+
+ /* bit[4], Cycle counting instruction trace bit */
+ if ((config->mode & ETMv4_MODE_CYCACC) &&
+ (drvdata->trccci == true))
+ config->cfg |= BIT(4);
+ else
+ config->cfg &= ~BIT(4);
+
+ /* bit[6], Context ID tracing bit */
+ if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
+ config->cfg |= BIT(6);
+ else
+ config->cfg &= ~BIT(6);
+
+ if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
+ config->cfg |= BIT(7);
+ else
+ config->cfg &= ~BIT(7);
+
+ /* bits[10:8], Conditional instruction tracing bit */
+ mode = ETM_MODE_COND(config->mode);
+ if (drvdata->trccond == true) {
+ config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
+ config->cfg |= mode << 8;
+ }
+
+ /* bit[11], Global timestamp tracing bit */
+ if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
+ config->cfg |= BIT(11);
+ else
+ config->cfg &= ~BIT(11);
+
+ /* bit[12], Return stack enable bit */
+ if ((config->mode & ETM_MODE_RETURNSTACK) &&
+ (drvdata->retstack == true))
+ config->cfg |= BIT(12);
+ else
+ config->cfg &= ~BIT(12);
+
+ /* bits[14:13], Q element enable field */
+ mode = ETM_MODE_QELEM(config->mode);
+ /* start by clearing QE bits */
+ config->cfg &= ~(BIT(13) | BIT(14));
+ /* if supported, Q elements with instruction counts are enabled */
+ if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
+ config->cfg |= BIT(13);
+ /*
+ * if supported, Q elements with and without instruction
+ * counts are enabled
+ */
+ if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
+ config->cfg |= BIT(14);
+
+ /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
+ if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
+ (drvdata->atbtrig == true))
+ config->eventctrl1 |= BIT(11);
+ else
+ config->eventctrl1 &= ~BIT(11);
+
+ /* bit[12], Low-power state behavior override bit */
+ if ((config->mode & ETM_MODE_LPOVERRIDE) &&
+ (drvdata->lpoverride == true))
+ config->eventctrl1 |= BIT(12);
+ else
+ config->eventctrl1 &= ~BIT(12);
+
+ /* bit[8], Instruction stall bit */
+ if (config->mode & ETM_MODE_ISTALL_EN)
+ config->stall_ctrl |= BIT(8);
+ else
+ config->stall_ctrl &= ~BIT(8);
+
+ /* bit[10], Prioritize instruction trace bit */
+ if (config->mode & ETM_MODE_INSTPRIO)
+ config->stall_ctrl |= BIT(10);
+ else
+ config->stall_ctrl &= ~BIT(10);
+
+ /* bit[13], Trace overflow prevention bit */
+ if ((config->mode & ETM_MODE_NOOVERFLOW) &&
+ (drvdata->nooverflow == true))
+ config->stall_ctrl |= BIT(13);
+ else
+ config->stall_ctrl &= ~BIT(13);
+
+ /* bit[9] Start/stop logic control bit */
+ if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
+ config->vinst_ctrl |= BIT(9);
+ else
+ config->vinst_ctrl &= ~BIT(9);
+
+ /* bit[10], Whether a trace unit must trace a Reset exception */
+ if (config->mode & ETM_MODE_TRACE_RESET)
+ config->vinst_ctrl |= BIT(10);
+ else
+ config->vinst_ctrl &= ~BIT(10);
+
+ /* bit[11], Whether a trace unit must trace a system error exception */
+ if ((config->mode & ETM_MODE_TRACE_ERR) &&
+ (drvdata->trc_error == true))
+ config->vinst_ctrl |= BIT(11);
+ else
+ config->vinst_ctrl &= ~BIT(11);
+
+ if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
+ etm4_config_trace_mode(config);
+
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t pe_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->pe_sel;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t pe_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ if (val > drvdata->nr_pe) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+
+ config->pe_sel = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(pe);
+
+static ssize_t event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->eventctrl0;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ switch (drvdata->nr_event) {
+ case 0x0:
+ /* EVENT0, bits[7:0] */
+ config->eventctrl0 = val & 0xFF;
+ break;
+ case 0x1:
+ /* EVENT1, bits[15:8] */
+ config->eventctrl0 = val & 0xFFFF;
+ break;
+ case 0x2:
+ /* EVENT2, bits[23:16] */
+ config->eventctrl0 = val & 0xFFFFFF;
+ break;
+ case 0x3:
+ /* EVENT3, bits[31:24] */
+ config->eventctrl0 = val;
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(event);
+
+static ssize_t event_instren_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = BMVAL(config->eventctrl1, 0, 3);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_instren_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /* start by clearing all instruction event enable bits */
+ config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
+ switch (drvdata->nr_event) {
+ case 0x0:
+ /* generate Event element for event 1 */
+ config->eventctrl1 |= val & BIT(1);
+ break;
+ case 0x1:
+ /* generate Event element for event 1 and 2 */
+ config->eventctrl1 |= val & (BIT(0) | BIT(1));
+ break;
+ case 0x2:
+ /* generate Event element for event 1, 2 and 3 */
+ config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
+ break;
+ case 0x3:
+ /* generate Event element for all 4 events */
+ config->eventctrl1 |= val & 0xF;
+ break;
+ default:
+ break;
+ }
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(event_instren);
+
+static ssize_t event_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->ts_ctrl;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!drvdata->ts_size)
+ return -EINVAL;
+
+ config->ts_ctrl = val & ETMv4_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(event_ts);
+
+static ssize_t syncfreq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->syncfreq;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t syncfreq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (drvdata->syncpr == true)
+ return -EINVAL;
+
+ config->syncfreq = val & ETMv4_SYNC_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(syncfreq);
+
+static ssize_t cyc_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->ccctlr;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cyc_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val < drvdata->ccitmin)
+ return -EINVAL;
+
+ config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(cyc_threshold);
+
+static ssize_t bb_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->bb_ctrl;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t bb_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (drvdata->trcbb == false)
+ return -EINVAL;
+ if (!drvdata->nr_addr_cmp)
+ return -EINVAL;
+ /*
+ * Bit[7:0] selects which address range comparator is used for
+ * branch broadcast control.
+ */
+ if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
+ return -EINVAL;
+
+ config->bb_ctrl = val;
+ return size;
+}
+static DEVICE_ATTR_RW(bb_ctrl);
+
+static ssize_t event_vinst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->vinst_ctrl & ETMv4_EVENT_MASK;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t event_vinst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ val &= ETMv4_EVENT_MASK;
+ config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
+ config->vinst_ctrl |= val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(event_vinst);
+
+static ssize_t s_exlevel_vinst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = BMVAL(config->vinst_ctrl, 16, 19);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t s_exlevel_vinst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
+ config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
+ /* enable instruction tracing for corresponding exception level */
+ val &= drvdata->s_ex_level;
+ config->vinst_ctrl |= (val << 16);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(s_exlevel_vinst);
+
+static ssize_t ns_exlevel_vinst_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ /* EXLEVEL_NS, bits[23:20] */
+ val = BMVAL(config->vinst_ctrl, 20, 23);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ns_exlevel_vinst_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /* clear EXLEVEL_NS bits (bit[23] is never implemented */
+ config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
+ /* enable instruction tracing for corresponding exception level */
+ val &= drvdata->ns_ex_level;
+ config->vinst_ctrl |= (val << 20);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(ns_exlevel_vinst);
+
+static ssize_t addr_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->addr_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nr_addr_cmp * 2)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->addr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_instdatatype_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len;
+ u8 val, idx;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ val = BMVAL(config->addr_acc[idx], 0, 1);
+ len = scnprintf(buf, PAGE_SIZE, "%s\n",
+ val == ETM_INSTR_ADDR ? "instr" :
+ (val == ETM_DATA_LOAD_ADDR ? "data_load" :
+ (val == ETM_DATA_STORE_ADDR ? "data_store" :
+ "data_load_store")));
+ spin_unlock(&drvdata->spinlock);
+ return len;
+}
+
+static ssize_t addr_instdatatype_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ char str[20] = "";
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (strlen(buf) >= 20)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!strcmp(str, "instr"))
+ /* TYPE, bits[1:0] */
+ config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_instdatatype);
+
+static ssize_t addr_single_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ idx = config->addr_idx;
+ spin_lock(&drvdata->spinlock);
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ val = (unsigned long)config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = (u64)val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+ if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val1 = (unsigned long)config->addr_val[idx];
+ val2 = (unsigned long)config->addr_val[idx + 1];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val1, val2;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+ /* lower address comparator cannot have a higher address value */
+ if (val1 > val2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (idx % 2 != 0) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+ (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+ config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = (u64)val1;
+ config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+ config->addr_val[idx + 1] = (u64)val2;
+ config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+ /*
+ * Program include or exclude control bits for vinst or vdata
+ * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
+ */
+ if (config->mode & ETM_MODE_EXCLUDE)
+ etm4_set_mode_exclude(drvdata, true);
+ else
+ etm4_set_mode_exclude(drvdata, false);
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = (unsigned long)config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!drvdata->nr_addr_cmp) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = (u64)val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_START;
+ config->vissctlr |= BIT(idx);
+ /* SSSTATUS, bit[9] - turn on start/stop logic */
+ config->vinst_ctrl |= BIT(9);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ val = (unsigned long)config->addr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!drvdata->nr_addr_cmp) {
+ spin_unlock(&drvdata->spinlock);
+ return -EINVAL;
+ }
+ if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+ config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+ spin_unlock(&drvdata->spinlock);
+ return -EPERM;
+ }
+
+ config->addr_val[idx] = (u64)val;
+ config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+ config->vissctlr |= BIT(idx + 16);
+ /* SSSTATUS, bit[9] - turn on start/stop logic */
+ config->vinst_ctrl |= BIT(9);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_ctxtype_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len;
+ u8 idx, val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ /* CONTEXTTYPE, bits[3:2] */
+ val = BMVAL(config->addr_acc[idx], 2, 3);
+ len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
+ (val == ETM_CTX_CTXID ? "ctxid" :
+ (val == ETM_CTX_VMID ? "vmid" : "all")));
+ spin_unlock(&drvdata->spinlock);
+ return len;
+}
+
+static ssize_t addr_ctxtype_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ char str[10] = "";
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (strlen(buf) >= 10)
+ return -EINVAL;
+ if (sscanf(buf, "%s", str) != 1)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ if (!strcmp(str, "none"))
+ /* start by clearing context type bits */
+ config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
+ else if (!strcmp(str, "ctxid")) {
+ /* 0b01 The trace unit performs a Context ID */
+ if (drvdata->numcidc) {
+ config->addr_acc[idx] |= BIT(2);
+ config->addr_acc[idx] &= ~BIT(3);
+ }
+ } else if (!strcmp(str, "vmid")) {
+ /* 0b10 The trace unit performs a VMID */
+ if (drvdata->numvmidc) {
+ config->addr_acc[idx] &= ~BIT(2);
+ config->addr_acc[idx] |= BIT(3);
+ }
+ } else if (!strcmp(str, "all")) {
+ /*
+ * 0b11 The trace unit performs a Context ID
+ * comparison and a VMID
+ */
+ if (drvdata->numcidc)
+ config->addr_acc[idx] |= BIT(2);
+ if (drvdata->numvmidc)
+ config->addr_acc[idx] |= BIT(3);
+ }
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_ctxtype);
+
+static ssize_t addr_context_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ /* context ID comparator bits[6:4] */
+ val = BMVAL(config->addr_acc[idx], 4, 6);
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t addr_context_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
+ return -EINVAL;
+ if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
+ drvdata->numcidc : drvdata->numvmidc))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->addr_idx;
+ /* clear context ID comparator bits[6:4] */
+ config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
+ config->addr_acc[idx] |= (val << 4);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(addr_context);
+
+static ssize_t seq_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->seq_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nrseqstate - 1)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->seq_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(seq_idx);
+
+static ssize_t seq_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->seq_state;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nrseqstate)
+ return -EINVAL;
+
+ config->seq_state = val;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_state);
+
+static ssize_t seq_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->seq_idx;
+ val = config->seq_ctrl[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->seq_idx;
+ /* RST, bits[7:0] */
+ config->seq_ctrl[idx] = val & 0xFF;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(seq_event);
+
+static ssize_t seq_reset_event_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->seq_rst;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t seq_reset_event_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (!(drvdata->nrseqstate))
+ return -EINVAL;
+
+ config->seq_rst = val & ETMv4_EVENT_MASK;
+ return size;
+}
+static DEVICE_ATTR_RW(seq_reset_event);
+
+static ssize_t cntr_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->cntr_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->nr_cntr)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->cntr_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntrldvr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->cntr_idx;
+ val = config->cntrldvr[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntrldvr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val > ETM_CNTR_MAX_VAL)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->cntr_idx;
+ config->cntrldvr[idx] = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cntrldvr);
+
+static ssize_t cntr_val_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->cntr_idx;
+ val = config->cntr_val[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val > ETM_CNTR_MAX_VAL)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->cntr_idx;
+ config->cntr_val[idx] = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t cntr_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->cntr_idx;
+ val = config->cntr_ctrl[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t cntr_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->cntr_idx;
+ config->cntr_ctrl[idx] = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cntr_ctrl);
+
+static ssize_t res_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->res_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t res_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ /* Resource selector pair 0 is always implemented and reserved */
+ if ((val == 0) || (val >= drvdata->nr_resource))
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->res_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(res_idx);
+
+static ssize_t res_ctrl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->res_idx;
+ val = config->res_ctrl[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t res_ctrl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->res_idx;
+ /* For odd idx pair inversal bit is RES0 */
+ if (idx % 2 != 0)
+ /* PAIRINV, bit[21] */
+ val &= ~BIT(21);
+ config->res_ctrl[idx] = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(res_ctrl);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->ctxid_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->numcidc)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->ctxid_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_pid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 idx;
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ctxid_idx;
+ val = (unsigned long)config->ctxid_vpid[idx];
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t ctxid_pid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 idx;
+ unsigned long vpid, pid;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ /*
+ * only implemented when ctxid tracing is enabled, i.e. at least one
+ * ctxid comparator is implemented and ctxid is greater than 0 bits
+ * in length
+ */
+ if (!drvdata->ctxid_size || !drvdata->numcidc)
+ return -EINVAL;
+ if (kstrtoul(buf, 16, &vpid))
+ return -EINVAL;
+
+ pid = coresight_vpid_to_pid(vpid);
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->ctxid_idx;
+ config->ctxid_pid[idx] = (u64)pid;
+ config->ctxid_vpid[idx] = (u64)vpid;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_pid);
+
+static ssize_t ctxid_masks_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val1, val2;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val1 = config->ctxid_mask0;
+ val2 = config->ctxid_mask1;
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t ctxid_masks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 i, j, maskbyte;
+ unsigned long val1, val2, mask;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ /*
+ * only implemented when ctxid tracing is enabled, i.e. at least one
+ * ctxid comparator is implemented and ctxid is greater than 0 bits
+ * in length
+ */
+ if (!drvdata->ctxid_size || !drvdata->numcidc)
+ return -EINVAL;
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ /*
+ * each byte[0..3] controls mask value applied to ctxid
+ * comparator[0..3]
+ */
+ switch (drvdata->numcidc) {
+ case 0x1:
+ /* COMP0, bits[7:0] */
+ config->ctxid_mask0 = val1 & 0xFF;
+ break;
+ case 0x2:
+ /* COMP1, bits[15:8] */
+ config->ctxid_mask0 = val1 & 0xFFFF;
+ break;
+ case 0x3:
+ /* COMP2, bits[23:16] */
+ config->ctxid_mask0 = val1 & 0xFFFFFF;
+ break;
+ case 0x4:
+ /* COMP3, bits[31:24] */
+ config->ctxid_mask0 = val1;
+ break;
+ case 0x5:
+ /* COMP4, bits[7:0] */
+ config->ctxid_mask0 = val1;
+ config->ctxid_mask1 = val2 & 0xFF;
+ break;
+ case 0x6:
+ /* COMP5, bits[15:8] */
+ config->ctxid_mask0 = val1;
+ config->ctxid_mask1 = val2 & 0xFFFF;
+ break;
+ case 0x7:
+ /* COMP6, bits[23:16] */
+ config->ctxid_mask0 = val1;
+ config->ctxid_mask1 = val2 & 0xFFFFFF;
+ break;
+ case 0x8:
+ /* COMP7, bits[31:24] */
+ config->ctxid_mask0 = val1;
+ config->ctxid_mask1 = val2;
+ break;
+ default:
+ break;
+ }
+ /*
+ * If software sets a mask bit to 1, it must program relevant byte
+ * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
+ * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
+ * of ctxid comparator0 value (corresponding to byte 0) register.
+ */
+ mask = config->ctxid_mask0;
+ for (i = 0; i < drvdata->numcidc; i++) {
+ /* mask value of corresponding ctxid comparator */
+ maskbyte = mask & ETMv4_EVENT_MASK;
+ /*
+ * each bit corresponds to a byte of respective ctxid comparator
+ * value register
+ */
+ for (j = 0; j < 8; j++) {
+ if (maskbyte & 1)
+ config->ctxid_pid[i] &= ~(0xFF << (j * 8));
+ maskbyte >>= 1;
+ }
+ /* Select the next ctxid comparator mask value */
+ if (i == 3)
+ /* ctxid comparators[4-7] */
+ mask = config->ctxid_mask1;
+ else
+ mask >>= 0x8;
+ }
+
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(ctxid_masks);
+
+static ssize_t vmid_idx_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = config->vmid_idx;
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t vmid_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+ if (val >= drvdata->numvmidc)
+ return -EINVAL;
+
+ /*
+ * Use spinlock to ensure index doesn't change while it gets
+ * dereferenced multiple times within a spinlock block elsewhere.
+ */
+ spin_lock(&drvdata->spinlock);
+ config->vmid_idx = val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vmid_idx);
+
+static ssize_t vmid_val_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ val = (unsigned long)config->vmid_val[config->vmid_idx];
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t vmid_val_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ unsigned long val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ /*
+ * only implemented when vmid tracing is enabled, i.e. at least one
+ * vmid comparator is implemented and at least 8 bit vmid size
+ */
+ if (!drvdata->vmid_size || !drvdata->numvmidc)
+ return -EINVAL;
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ config->vmid_val[config->vmid_idx] = (u64)val;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vmid_val);
+
+static ssize_t vmid_masks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val1, val2;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ spin_lock(&drvdata->spinlock);
+ val1 = config->vmid_mask0;
+ val2 = config->vmid_mask1;
+ spin_unlock(&drvdata->spinlock);
+ return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t vmid_masks_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ u8 i, j, maskbyte;
+ unsigned long val1, val2, mask;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_config *config = &drvdata->config;
+
+ /*
+ * only implemented when vmid tracing is enabled, i.e. at least one
+ * vmid comparator is implemented and at least 8 bit vmid size
+ */
+ if (!drvdata->vmid_size || !drvdata->numvmidc)
+ return -EINVAL;
+ if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+
+ /*
+ * each byte[0..3] controls mask value applied to vmid
+ * comparator[0..3]
+ */
+ switch (drvdata->numvmidc) {
+ case 0x1:
+ /* COMP0, bits[7:0] */
+ config->vmid_mask0 = val1 & 0xFF;
+ break;
+ case 0x2:
+ /* COMP1, bits[15:8] */
+ config->vmid_mask0 = val1 & 0xFFFF;
+ break;
+ case 0x3:
+ /* COMP2, bits[23:16] */
+ config->vmid_mask0 = val1 & 0xFFFFFF;
+ break;
+ case 0x4:
+ /* COMP3, bits[31:24] */
+ config->vmid_mask0 = val1;
+ break;
+ case 0x5:
+ /* COMP4, bits[7:0] */
+ config->vmid_mask0 = val1;
+ config->vmid_mask1 = val2 & 0xFF;
+ break;
+ case 0x6:
+ /* COMP5, bits[15:8] */
+ config->vmid_mask0 = val1;
+ config->vmid_mask1 = val2 & 0xFFFF;
+ break;
+ case 0x7:
+ /* COMP6, bits[23:16] */
+ config->vmid_mask0 = val1;
+ config->vmid_mask1 = val2 & 0xFFFFFF;
+ break;
+ case 0x8:
+ /* COMP7, bits[31:24] */
+ config->vmid_mask0 = val1;
+ config->vmid_mask1 = val2;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * If software sets a mask bit to 1, it must program relevant byte
+ * of vmid comparator value 0x0, otherwise behavior is unpredictable.
+ * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
+ * of vmid comparator0 value (corresponding to byte 0) register.
+ */
+ mask = config->vmid_mask0;
+ for (i = 0; i < drvdata->numvmidc; i++) {
+ /* mask value of corresponding vmid comparator */
+ maskbyte = mask & ETMv4_EVENT_MASK;
+ /*
+ * each bit corresponds to a byte of respective vmid comparator
+ * value register
+ */
+ for (j = 0; j < 8; j++) {
+ if (maskbyte & 1)
+ config->vmid_val[i] &= ~(0xFF << (j * 8));
+ maskbyte >>= 1;
+ }
+ /* Select the next vmid comparator mask value */
+ if (i == 3)
+ /* vmid comparators[4-7] */
+ mask = config->vmid_mask1;
+ else
+ mask >>= 0x8;
+ }
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(vmid_masks);
+
+static ssize_t cpu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->cpu;
+ return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+
+}
+static DEVICE_ATTR_RO(cpu);
+
+static struct attribute *coresight_etmv4_attrs[] = {
+ &dev_attr_nr_pe_cmp.attr,
+ &dev_attr_nr_addr_cmp.attr,
+ &dev_attr_nr_cntr.attr,
+ &dev_attr_nr_ext_inp.attr,
+ &dev_attr_numcidc.attr,
+ &dev_attr_numvmidc.attr,
+ &dev_attr_nrseqstate.attr,
+ &dev_attr_nr_resource.attr,
+ &dev_attr_nr_ss_cmp.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_pe.attr,
+ &dev_attr_event.attr,
+ &dev_attr_event_instren.attr,
+ &dev_attr_event_ts.attr,
+ &dev_attr_syncfreq.attr,
+ &dev_attr_cyc_threshold.attr,
+ &dev_attr_bb_ctrl.attr,
+ &dev_attr_event_vinst.attr,
+ &dev_attr_s_exlevel_vinst.attr,
+ &dev_attr_ns_exlevel_vinst.attr,
+ &dev_attr_addr_idx.attr,
+ &dev_attr_addr_instdatatype.attr,
+ &dev_attr_addr_single.attr,
+ &dev_attr_addr_range.attr,
+ &dev_attr_addr_start.attr,
+ &dev_attr_addr_stop.attr,
+ &dev_attr_addr_ctxtype.attr,
+ &dev_attr_addr_context.attr,
+ &dev_attr_seq_idx.attr,
+ &dev_attr_seq_state.attr,
+ &dev_attr_seq_event.attr,
+ &dev_attr_seq_reset_event.attr,
+ &dev_attr_cntr_idx.attr,
+ &dev_attr_cntrldvr.attr,
+ &dev_attr_cntr_val.attr,
+ &dev_attr_cntr_ctrl.attr,
+ &dev_attr_res_idx.attr,
+ &dev_attr_res_ctrl.attr,
+ &dev_attr_ctxid_idx.attr,
+ &dev_attr_ctxid_pid.attr,
+ &dev_attr_ctxid_masks.attr,
+ &dev_attr_vmid_idx.attr,
+ &dev_attr_vmid_val.attr,
+ &dev_attr_vmid_masks.attr,
+ &dev_attr_cpu.attr,
+ NULL,
+};
+
+#define coresight_etm4x_simple_func(name, offset) \
+ coresight_simple_func(struct etmv4_drvdata, name, offset)
+
+coresight_etm4x_simple_func(trcoslsr, TRCOSLSR);
+coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
+coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
+coresight_etm4x_simple_func(trclsr, TRCLSR);
+coresight_etm4x_simple_func(trcconfig, TRCCONFIGR);
+coresight_etm4x_simple_func(trctraceid, TRCTRACEIDR);
+coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
+coresight_etm4x_simple_func(trcdevid, TRCDEVID);
+coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
+coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
+coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
+coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
+coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
+
+static struct attribute *coresight_etmv4_mgmt_attrs[] = {
+ &dev_attr_trcoslsr.attr,
+ &dev_attr_trcpdcr.attr,
+ &dev_attr_trcpdsr.attr,
+ &dev_attr_trclsr.attr,
+ &dev_attr_trcconfig.attr,
+ &dev_attr_trctraceid.attr,
+ &dev_attr_trcauthstatus.attr,
+ &dev_attr_trcdevid.attr,
+ &dev_attr_trcdevtype.attr,
+ &dev_attr_trcpidr0.attr,
+ &dev_attr_trcpidr1.attr,
+ &dev_attr_trcpidr2.attr,
+ &dev_attr_trcpidr3.attr,
+ NULL,
+};
+
+coresight_etm4x_simple_func(trcidr0, TRCIDR0);
+coresight_etm4x_simple_func(trcidr1, TRCIDR1);
+coresight_etm4x_simple_func(trcidr2, TRCIDR2);
+coresight_etm4x_simple_func(trcidr3, TRCIDR3);
+coresight_etm4x_simple_func(trcidr4, TRCIDR4);
+coresight_etm4x_simple_func(trcidr5, TRCIDR5);
+/* trcidr[6,7] are reserved */
+coresight_etm4x_simple_func(trcidr8, TRCIDR8);
+coresight_etm4x_simple_func(trcidr9, TRCIDR9);
+coresight_etm4x_simple_func(trcidr10, TRCIDR10);
+coresight_etm4x_simple_func(trcidr11, TRCIDR11);
+coresight_etm4x_simple_func(trcidr12, TRCIDR12);
+coresight_etm4x_simple_func(trcidr13, TRCIDR13);
+
+static struct attribute *coresight_etmv4_trcidr_attrs[] = {
+ &dev_attr_trcidr0.attr,
+ &dev_attr_trcidr1.attr,
+ &dev_attr_trcidr2.attr,
+ &dev_attr_trcidr3.attr,
+ &dev_attr_trcidr4.attr,
+ &dev_attr_trcidr5.attr,
+ /* trcidr[6,7] are reserved */
+ &dev_attr_trcidr8.attr,
+ &dev_attr_trcidr9.attr,
+ &dev_attr_trcidr10.attr,
+ &dev_attr_trcidr11.attr,
+ &dev_attr_trcidr12.attr,
+ &dev_attr_trcidr13.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_etmv4_group = {
+ .attrs = coresight_etmv4_attrs,
+};
+
+static const struct attribute_group coresight_etmv4_mgmt_group = {
+ .attrs = coresight_etmv4_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group coresight_etmv4_trcidr_group = {
+ .attrs = coresight_etmv4_trcidr_attrs,
+ .name = "trcidr",
+};
+
+const struct attribute_group *coresight_etmv4_groups[] = {
+ &coresight_etmv4_group,
+ &coresight_etmv4_mgmt_group,
+ &coresight_etmv4_trcidr_group,
+ NULL,
+};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 1c59bd368..462f0dc15 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -26,15 +26,19 @@
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
#include <linux/pm_wakeup.h>
#include <linux/amba/bus.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
+#include <linux/perf_event.h>
#include <linux/pm_runtime.h>
#include <linux/perf_event.h>
#include <asm/sections.h>
+#include <asm/local.h>
#include "coresight-etm4x.h"
+#include "coresight-etm-perf.h"
static int boot_enable;
module_param_named(boot_enable, boot_enable, int, S_IRUGO);
@@ -42,13 +46,13 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
/* The number of ETMv4 currently registered */
static int etm4_count;
static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
+static void etm4_set_default(struct etmv4_config *config);
-static void etm4_os_unlock(void *info)
+static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
{
- struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
-
/* Writing any value to ETMOSLAR unlocks the trace registers */
writel_relaxed(0x0, drvdata->base + TRCOSLAR);
+ drvdata->os_unlock = true;
isb();
}
@@ -76,7 +80,7 @@ static int etm4_trace_id(struct coresight_device *csdev)
unsigned long flags;
int trace_id = -1;
- if (!drvdata->enable)
+ if (!local_read(&drvdata->mode))
return drvdata->trcid;
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -95,6 +99,7 @@ static void etm4_enable_hw(void *info)
{
int i;
struct etmv4_drvdata *drvdata = info;
+ struct etmv4_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
@@ -109,69 +114,69 @@ static void etm4_enable_hw(void *info)
"timeout observed when probing at offset %#x\n",
TRCSTATR);
- writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR);
- writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR);
+ writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
+ writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
/* nothing specific implemented */
writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
- writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R);
- writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R);
- writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR);
- writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR);
- writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR);
- writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR);
- writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR);
+ writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
+ writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
+ writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
+ writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
+ writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
+ writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
+ writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
- writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR);
- writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR);
- writel_relaxed(drvdata->vissctlr,
+ writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
+ writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
+ writel_relaxed(config->vissctlr,
drvdata->base + TRCVISSCTLR);
- writel_relaxed(drvdata->vipcssctlr,
+ writel_relaxed(config->vipcssctlr,
drvdata->base + TRCVIPCSSCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
- writel_relaxed(drvdata->seq_ctrl[i],
+ writel_relaxed(config->seq_ctrl[i],
drvdata->base + TRCSEQEVRn(i));
- writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR);
- writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR);
- writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR);
+ writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
+ writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
+ writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
- writel_relaxed(drvdata->cntrldvr[i],
+ writel_relaxed(config->cntrldvr[i],
drvdata->base + TRCCNTRLDVRn(i));
- writel_relaxed(drvdata->cntr_ctrl[i],
+ writel_relaxed(config->cntr_ctrl[i],
drvdata->base + TRCCNTCTLRn(i));
- writel_relaxed(drvdata->cntr_val[i],
+ writel_relaxed(config->cntr_val[i],
drvdata->base + TRCCNTVRn(i));
}
/* Resource selector pair 0 is always implemented and reserved */
- for (i = 2; i < drvdata->nr_resource * 2; i++)
- writel_relaxed(drvdata->res_ctrl[i],
+ for (i = 0; i < drvdata->nr_resource * 2; i++)
+ writel_relaxed(config->res_ctrl[i],
drvdata->base + TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- writel_relaxed(drvdata->ss_ctrl[i],
+ writel_relaxed(config->ss_ctrl[i],
drvdata->base + TRCSSCCRn(i));
- writel_relaxed(drvdata->ss_status[i],
+ writel_relaxed(config->ss_status[i],
drvdata->base + TRCSSCSRn(i));
- writel_relaxed(drvdata->ss_pe_cmp[i],
+ writel_relaxed(config->ss_pe_cmp[i],
drvdata->base + TRCSSPCICRn(i));
}
for (i = 0; i < drvdata->nr_addr_cmp; i++) {
- writeq_relaxed(drvdata->addr_val[i],
+ writeq_relaxed(config->addr_val[i],
drvdata->base + TRCACVRn(i));
- writeq_relaxed(drvdata->addr_acc[i],
+ writeq_relaxed(config->addr_acc[i],
drvdata->base + TRCACATRn(i));
}
for (i = 0; i < drvdata->numcidc; i++)
- writeq_relaxed(drvdata->ctxid_pid[i],
+ writeq_relaxed(config->ctxid_pid[i],
drvdata->base + TRCCIDCVRn(i));
- writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
- writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
+ writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
+ writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
for (i = 0; i < drvdata->numvmidc; i++)
- writeq_relaxed(drvdata->vmid_val[i],
+ writeq_relaxed(config->vmid_val[i],
drvdata->base + TRCVMIDCVRn(i));
- writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
- writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
+ writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
+ writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
/* Enable the trace unit */
writel_relaxed(1, drvdata->base + TRCPRGCTLR);
@@ -187,2120 +192,210 @@ static void etm4_enable_hw(void *info)
dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
}
-static int etm4_enable(struct coresight_device *csdev,
- struct perf_event_attr *attr, u32 mode)
+static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
+ struct perf_event_attr *attr)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- int ret;
-
- spin_lock(&drvdata->spinlock);
-
- /*
- * Executing etm4_enable_hw on the cpu whose ETM is being enabled
- * ensures that register writes occur when cpu is powered.
- */
- ret = smp_call_function_single(drvdata->cpu,
- etm4_enable_hw, drvdata, 1);
- if (ret)
- goto err;
- drvdata->enable = true;
- drvdata->sticky_enable = true;
-
- spin_unlock(&drvdata->spinlock);
+ struct etmv4_config *config = &drvdata->config;
- dev_info(drvdata->dev, "ETM tracing enabled\n");
- return 0;
-err:
- spin_unlock(&drvdata->spinlock);
- return ret;
-}
+ if (!attr)
+ return -EINVAL;
-static void etm4_disable_hw(void *info)
-{
- u32 control;
- struct etmv4_drvdata *drvdata = info;
+ /* Clear configuration from previous run */
+ memset(config, 0, sizeof(struct etmv4_config));
- CS_UNLOCK(drvdata->base);
+ if (attr->exclude_kernel)
+ config->mode = ETM_MODE_EXCL_KERN;
- control = readl_relaxed(drvdata->base + TRCPRGCTLR);
+ if (attr->exclude_user)
+ config->mode = ETM_MODE_EXCL_USER;
- /* EN, bit[0] Trace unit enable bit */
- control &= ~0x1;
-
- /* make sure everything completes before disabling */
- mb();
- isb();
- writel_relaxed(control, drvdata->base + TRCPRGCTLR);
-
- CS_LOCK(drvdata->base);
-
- dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
-}
-
-static void etm4_disable(struct coresight_device *csdev)
-{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ /* Always start from the default config */
+ etm4_set_default(config);
/*
- * Taking hotplug lock here protects from clocks getting disabled
- * with tracing being left on (crash scenario) if user disable occurs
- * after cpu online mask indicates the cpu is offline but before the
- * DYING hotplug callback is serviced by the ETM driver.
+ * By default the tracers are configured to trace the whole address
+ * range. Narrow the field only if requested by user space.
*/
- get_online_cpus();
- spin_lock(&drvdata->spinlock);
-
- /*
- * Executing etm4_disable_hw on the cpu whose ETM is being disabled
- * ensures that register writes occur when cpu is powered.
- */
- smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
- drvdata->enable = false;
-
- spin_unlock(&drvdata->spinlock);
- put_online_cpus();
-
- dev_info(drvdata->dev, "ETM tracing disabled\n");
-}
-
-static const struct coresight_ops_source etm4_source_ops = {
- .cpu_id = etm4_cpu_id,
- .trace_id = etm4_trace_id,
- .enable = etm4_enable,
- .disable = etm4_disable,
-};
-
-static const struct coresight_ops etm4_cs_ops = {
- .source_ops = &etm4_source_ops,
-};
+ if (config->mode)
+ etm4_config_trace_mode(config);
-static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
-{
- u8 idx = drvdata->addr_idx;
+ /* Go from generic option to ETMv4 specifics */
+ if (attr->config & BIT(ETM_OPT_CYCACC))
+ config->cfg |= ETMv4_MODE_CYCACC;
+ if (attr->config & BIT(ETM_OPT_TS))
+ config->cfg |= ETMv4_MODE_TIMESTAMP;
- /*
- * TRCACATRn.TYPE bit[1:0]: type of comparison
- * the trace unit performs
- */
- if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
- if (idx % 2 != 0)
- return -EINVAL;
-
- /*
- * We are performing instruction address comparison. Set the
- * relevant bit of ViewInst Include/Exclude Control register
- * for corresponding address comparator pair.
- */
- if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
- drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
- return -EINVAL;
-
- if (exclude == true) {
- /*
- * Set exclude bit and unset the include bit
- * corresponding to comparator pair
- */
- drvdata->viiectlr |= BIT(idx / 2 + 16);
- drvdata->viiectlr &= ~BIT(idx / 2);
- } else {
- /*
- * Set include bit and unset exclude bit
- * corresponding to comparator pair
- */
- drvdata->viiectlr |= BIT(idx / 2);
- drvdata->viiectlr &= ~BIT(idx / 2 + 16);
- }
- }
return 0;
}
-static ssize_t nr_pe_cmp_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int etm4_enable_perf(struct coresight_device *csdev,
+ struct perf_event_attr *attr)
{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_pe_cmp;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_pe_cmp);
-
-static ssize_t nr_addr_cmp_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_addr_cmp;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_addr_cmp);
-
-static ssize_t nr_cntr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_cntr;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_cntr);
-
-static ssize_t nr_ext_inp_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_ext_inp;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ext_inp);
-
-static ssize_t numcidc_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->numcidc;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(numcidc);
-
-static ssize_t numvmidc_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->numvmidc;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(numvmidc);
-
-static ssize_t nrseqstate_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nrseqstate;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nrseqstate);
-
-static ssize_t nr_resource_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_resource;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_resource);
-
-static ssize_t nr_ss_cmp_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->nr_ss_cmp;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static DEVICE_ATTR_RO(nr_ss_cmp);
-
-static ssize_t reset_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int i;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- if (kstrtoul(buf, 16, &val))
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
- if (val)
- drvdata->mode = 0x0;
-
- /* Disable data tracing: do not trace load and store data transfers */
- drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
- drvdata->cfg &= ~(BIT(1) | BIT(2));
-
- /* Disable data value and data address tracing */
- drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
- ETM_MODE_DATA_TRACE_VAL);
- drvdata->cfg &= ~(BIT(16) | BIT(17));
-
- /* Disable all events tracing */
- drvdata->eventctrl0 = 0x0;
- drvdata->eventctrl1 = 0x0;
-
- /* Disable timestamp event */
- drvdata->ts_ctrl = 0x0;
-
- /* Disable stalling */
- drvdata->stall_ctrl = 0x0;
-
- /* Reset trace synchronization period to 2^8 = 256 bytes*/
- if (drvdata->syncpr == false)
- drvdata->syncfreq = 0x8;
-
- /*
- * Enable ViewInst to trace everything with start-stop logic in
- * started state. ARM recommends start-stop logic is set before
- * each trace run.
- */
- drvdata->vinst_ctrl |= BIT(0);
- if (drvdata->nr_addr_cmp == true) {
- drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
- /* SSSTATUS, bit[9] */
- drvdata->vinst_ctrl |= BIT(9);
- }
-
- /* No address range filtering for ViewInst */
- drvdata->viiectlr = 0x0;
-
- /* No start-stop filtering for ViewInst */
- drvdata->vissctlr = 0x0;
-
- /* Disable seq events */
- for (i = 0; i < drvdata->nrseqstate-1; i++)
- drvdata->seq_ctrl[i] = 0x0;
- drvdata->seq_rst = 0x0;
- drvdata->seq_state = 0x0;
-
- /* Disable external input events */
- drvdata->ext_inp = 0x0;
-
- drvdata->cntr_idx = 0x0;
- for (i = 0; i < drvdata->nr_cntr; i++) {
- drvdata->cntrldvr[i] = 0x0;
- drvdata->cntr_ctrl[i] = 0x0;
- drvdata->cntr_val[i] = 0x0;
- }
-
- /* Resource selector pair 0 is always implemented and reserved */
- drvdata->res_idx = 0x2;
- for (i = 2; i < drvdata->nr_resource * 2; i++)
- drvdata->res_ctrl[i] = 0x0;
-
- for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- drvdata->ss_ctrl[i] = 0x0;
- drvdata->ss_pe_cmp[i] = 0x0;
- }
-
- drvdata->addr_idx = 0x0;
- for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
- drvdata->addr_val[i] = 0x0;
- drvdata->addr_acc[i] = 0x0;
- drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
- }
-
- drvdata->ctxid_idx = 0x0;
- for (i = 0; i < drvdata->numcidc; i++) {
- drvdata->ctxid_pid[i] = 0x0;
- drvdata->ctxid_vpid[i] = 0x0;
- }
-
- drvdata->ctxid_mask0 = 0x0;
- drvdata->ctxid_mask1 = 0x0;
-
- drvdata->vmid_idx = 0x0;
- for (i = 0; i < drvdata->numvmidc; i++)
- drvdata->vmid_val[i] = 0x0;
- drvdata->vmid_mask0 = 0x0;
- drvdata->vmid_mask1 = 0x0;
-
- drvdata->trcid = drvdata->cpu + 1;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_WO(reset);
+ /* Configure the tracer based on the session's specifics */
+ etm4_parse_event_config(drvdata, attr);
+ /* And enable it */
+ etm4_enable_hw(drvdata);
-static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->mode;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+ return 0;
}
-static ssize_t mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static int etm4_enable_sysfs(struct coresight_device *csdev)
{
- unsigned long val, mode;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
spin_lock(&drvdata->spinlock);
- drvdata->mode = val & ETMv4_MODE_ALL;
-
- if (drvdata->mode & ETM_MODE_EXCLUDE)
- etm4_set_mode_exclude(drvdata, true);
- else
- etm4_set_mode_exclude(drvdata, false);
-
- if (drvdata->instrp0 == true) {
- /* start by clearing instruction P0 field */
- drvdata->cfg &= ~(BIT(1) | BIT(2));
- if (drvdata->mode & ETM_MODE_LOAD)
- /* 0b01 Trace load instructions as P0 instructions */
- drvdata->cfg |= BIT(1);
- if (drvdata->mode & ETM_MODE_STORE)
- /* 0b10 Trace store instructions as P0 instructions */
- drvdata->cfg |= BIT(2);
- if (drvdata->mode & ETM_MODE_LOAD_STORE)
- /*
- * 0b11 Trace load and store instructions
- * as P0 instructions
- */
- drvdata->cfg |= BIT(1) | BIT(2);
- }
-
- /* bit[3], Branch broadcast mode */
- if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
- drvdata->cfg |= BIT(3);
- else
- drvdata->cfg &= ~BIT(3);
-
- /* bit[4], Cycle counting instruction trace bit */
- if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
- (drvdata->trccci == true))
- drvdata->cfg |= BIT(4);
- else
- drvdata->cfg &= ~BIT(4);
-
- /* bit[6], Context ID tracing bit */
- if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
- drvdata->cfg |= BIT(6);
- else
- drvdata->cfg &= ~BIT(6);
-
- if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
- drvdata->cfg |= BIT(7);
- else
- drvdata->cfg &= ~BIT(7);
-
- /* bits[10:8], Conditional instruction tracing bit */
- mode = ETM_MODE_COND(drvdata->mode);
- if (drvdata->trccond == true) {
- drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
- drvdata->cfg |= mode << 8;
- }
-
- /* bit[11], Global timestamp tracing bit */
- if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
- drvdata->cfg |= BIT(11);
- else
- drvdata->cfg &= ~BIT(11);
- /* bit[12], Return stack enable bit */
- if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
- (drvdata->retstack == true))
- drvdata->cfg |= BIT(12);
- else
- drvdata->cfg &= ~BIT(12);
-
- /* bits[14:13], Q element enable field */
- mode = ETM_MODE_QELEM(drvdata->mode);
- /* start by clearing QE bits */
- drvdata->cfg &= ~(BIT(13) | BIT(14));
- /* if supported, Q elements with instruction counts are enabled */
- if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
- drvdata->cfg |= BIT(13);
/*
- * if supported, Q elements with and without instruction
- * counts are enabled
+ * Executing etm4_enable_hw on the cpu whose ETM is being enabled
+ * ensures that register writes occur when cpu is powered.
*/
- if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
- drvdata->cfg |= BIT(14);
-
- /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
- if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
- (drvdata->atbtrig == true))
- drvdata->eventctrl1 |= BIT(11);
- else
- drvdata->eventctrl1 &= ~BIT(11);
-
- /* bit[12], Low-power state behavior override bit */
- if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
- (drvdata->lpoverride == true))
- drvdata->eventctrl1 |= BIT(12);
- else
- drvdata->eventctrl1 &= ~BIT(12);
-
- /* bit[8], Instruction stall bit */
- if (drvdata->mode & ETM_MODE_ISTALL_EN)
- drvdata->stall_ctrl |= BIT(8);
- else
- drvdata->stall_ctrl &= ~BIT(8);
-
- /* bit[10], Prioritize instruction trace bit */
- if (drvdata->mode & ETM_MODE_INSTPRIO)
- drvdata->stall_ctrl |= BIT(10);
- else
- drvdata->stall_ctrl &= ~BIT(10);
-
- /* bit[13], Trace overflow prevention bit */
- if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
- (drvdata->nooverflow == true))
- drvdata->stall_ctrl |= BIT(13);
- else
- drvdata->stall_ctrl &= ~BIT(13);
-
- /* bit[9] Start/stop logic control bit */
- if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
- drvdata->vinst_ctrl |= BIT(9);
- else
- drvdata->vinst_ctrl &= ~BIT(9);
-
- /* bit[10], Whether a trace unit must trace a Reset exception */
- if (drvdata->mode & ETM_MODE_TRACE_RESET)
- drvdata->vinst_ctrl |= BIT(10);
- else
- drvdata->vinst_ctrl &= ~BIT(10);
-
- /* bit[11], Whether a trace unit must trace a system error exception */
- if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
- (drvdata->trc_error == true))
- drvdata->vinst_ctrl |= BIT(11);
- else
- drvdata->vinst_ctrl &= ~BIT(11);
-
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(mode);
-
-static ssize_t pe_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->pe_sel;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t pe_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- if (val > drvdata->nr_pe) {
- spin_unlock(&drvdata->spinlock);
- return -EINVAL;
- }
+ ret = smp_call_function_single(drvdata->cpu,
+ etm4_enable_hw, drvdata, 1);
+ if (ret)
+ goto err;
- drvdata->pe_sel = val;
+ drvdata->sticky_enable = true;
spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(pe);
-
-static ssize_t event_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->eventctrl0;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-static ssize_t event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
+ dev_info(drvdata->dev, "ETM tracing enabled\n");
+ return 0;
- spin_lock(&drvdata->spinlock);
- switch (drvdata->nr_event) {
- case 0x0:
- /* EVENT0, bits[7:0] */
- drvdata->eventctrl0 = val & 0xFF;
- break;
- case 0x1:
- /* EVENT1, bits[15:8] */
- drvdata->eventctrl0 = val & 0xFFFF;
- break;
- case 0x2:
- /* EVENT2, bits[23:16] */
- drvdata->eventctrl0 = val & 0xFFFFFF;
- break;
- case 0x3:
- /* EVENT3, bits[31:24] */
- drvdata->eventctrl0 = val;
- break;
- default:
- break;
- }
+err:
spin_unlock(&drvdata->spinlock);
- return size;
+ return ret;
}
-static DEVICE_ATTR_RW(event);
-static ssize_t event_instren_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int etm4_enable(struct coresight_device *csdev,
+ struct perf_event_attr *attr, u32 mode)
{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = BMVAL(drvdata->eventctrl1, 0, 3);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
+ int ret;
+ u32 val;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-static ssize_t event_instren_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
+ /* Someone is already using the tracer */
+ if (val)
+ return -EBUSY;
- spin_lock(&drvdata->spinlock);
- /* start by clearing all instruction event enable bits */
- drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
- switch (drvdata->nr_event) {
- case 0x0:
- /* generate Event element for event 1 */
- drvdata->eventctrl1 |= val & BIT(1);
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ ret = etm4_enable_sysfs(csdev);
break;
- case 0x1:
- /* generate Event element for event 1 and 2 */
- drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
- break;
- case 0x2:
- /* generate Event element for event 1, 2 and 3 */
- drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
- break;
- case 0x3:
- /* generate Event element for all 4 events */
- drvdata->eventctrl1 |= val & 0xF;
+ case CS_MODE_PERF:
+ ret = etm4_enable_perf(csdev, attr);
break;
default:
- break;
- }
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(event_instren);
-
-static ssize_t event_ts_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->ts_ctrl;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t event_ts_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (!drvdata->ts_size)
- return -EINVAL;
-
- drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(event_ts);
-
-static ssize_t syncfreq_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->syncfreq;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t syncfreq_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (drvdata->syncpr == true)
- return -EINVAL;
-
- drvdata->syncfreq = val & ETMv4_SYNC_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(syncfreq);
-
-static ssize_t cyc_threshold_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->ccctlr;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t cyc_threshold_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val < drvdata->ccitmin)
- return -EINVAL;
-
- drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(cyc_threshold);
-
-static ssize_t bb_ctrl_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->bb_ctrl;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t bb_ctrl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (drvdata->trcbb == false)
- return -EINVAL;
- if (!drvdata->nr_addr_cmp)
- return -EINVAL;
- /*
- * Bit[7:0] selects which address range comparator is used for
- * branch broadcast control.
- */
- if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
- return -EINVAL;
-
- drvdata->bb_ctrl = val;
- return size;
-}
-static DEVICE_ATTR_RW(bb_ctrl);
-
-static ssize_t event_vinst_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t event_vinst_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- val &= ETMv4_EVENT_MASK;
- drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
- drvdata->vinst_ctrl |= val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(event_vinst);
-
-static ssize_t s_exlevel_vinst_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = BMVAL(drvdata->vinst_ctrl, 16, 19);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t s_exlevel_vinst_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
- drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
- /* enable instruction tracing for corresponding exception level */
- val &= drvdata->s_ex_level;
- drvdata->vinst_ctrl |= (val << 16);
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(s_exlevel_vinst);
-
-static ssize_t ns_exlevel_vinst_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- /* EXLEVEL_NS, bits[23:20] */
- val = BMVAL(drvdata->vinst_ctrl, 20, 23);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t ns_exlevel_vinst_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- /* clear EXLEVEL_NS bits (bit[23] is never implemented */
- drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
- /* enable instruction tracing for corresponding exception level */
- val &= drvdata->ns_ex_level;
- drvdata->vinst_ctrl |= (val << 20);
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(ns_exlevel_vinst);
-
-static ssize_t addr_idx_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->addr_idx;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val >= drvdata->nr_addr_cmp * 2)
- return -EINVAL;
-
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->addr_idx = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(addr_idx);
-
-static ssize_t addr_instdatatype_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t len;
- u8 val, idx;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- val = BMVAL(drvdata->addr_acc[idx], 0, 1);
- len = scnprintf(buf, PAGE_SIZE, "%s\n",
- val == ETM_INSTR_ADDR ? "instr" :
- (val == ETM_DATA_LOAD_ADDR ? "data_load" :
- (val == ETM_DATA_STORE_ADDR ? "data_store" :
- "data_load_store")));
- spin_unlock(&drvdata->spinlock);
- return len;
-}
-
-static ssize_t addr_instdatatype_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- char str[20] = "";
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (strlen(buf) >= 20)
- return -EINVAL;
- if (sscanf(buf, "%s", str) != 1)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!strcmp(str, "instr"))
- /* TYPE, bits[1:0] */
- drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
-
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(addr_instdatatype);
-
-static ssize_t addr_single_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- idx = drvdata->addr_idx;
- spin_lock(&drvdata->spinlock);
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
- val = (unsigned long)drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_single_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = (u64)val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(addr_single);
-
-static ssize_t addr_range_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val1, val2;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
- if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
- (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val1 = (unsigned long)drvdata->addr_val[idx];
- val2 = (unsigned long)drvdata->addr_val[idx + 1];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
-}
-
-static ssize_t addr_range_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val1, val2;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
- return -EINVAL;
- /* lower address comparator cannot have a higher address value */
- if (val1 > val2)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (idx % 2 != 0) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
- (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
- drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = (u64)val1;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
- drvdata->addr_val[idx + 1] = (u64)val2;
- drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
- /*
- * Program include or exclude control bits for vinst or vdata
- * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
- */
- if (drvdata->mode & ETM_MODE_EXCLUDE)
- etm4_set_mode_exclude(drvdata, true);
- else
- etm4_set_mode_exclude(drvdata, false);
-
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(addr_range);
-
-static ssize_t addr_start_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
-
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val = (unsigned long)drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_start_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!drvdata->nr_addr_cmp) {
- spin_unlock(&drvdata->spinlock);
- return -EINVAL;
- }
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = (u64)val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
- drvdata->vissctlr |= BIT(idx);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- drvdata->vinst_ctrl |= BIT(9);
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(addr_start);
-
-static ssize_t addr_stop_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
-
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- val = (unsigned long)drvdata->addr_val[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_stop_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!drvdata->nr_addr_cmp) {
- spin_unlock(&drvdata->spinlock);
- return -EINVAL;
- }
- if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
- drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
- spin_unlock(&drvdata->spinlock);
- return -EPERM;
- }
-
- drvdata->addr_val[idx] = (u64)val;
- drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
- drvdata->vissctlr |= BIT(idx + 16);
- /* SSSTATUS, bit[9] - turn on start/stop logic */
- drvdata->vinst_ctrl |= BIT(9);
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(addr_stop);
-
-static ssize_t addr_ctxtype_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- ssize_t len;
- u8 idx, val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- /* CONTEXTTYPE, bits[3:2] */
- val = BMVAL(drvdata->addr_acc[idx], 2, 3);
- len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
- (val == ETM_CTX_CTXID ? "ctxid" :
- (val == ETM_CTX_VMID ? "vmid" : "all")));
- spin_unlock(&drvdata->spinlock);
- return len;
-}
-
-static ssize_t addr_ctxtype_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- char str[10] = "";
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (strlen(buf) >= 10)
- return -EINVAL;
- if (sscanf(buf, "%s", str) != 1)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- if (!strcmp(str, "none"))
- /* start by clearing context type bits */
- drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
- else if (!strcmp(str, "ctxid")) {
- /* 0b01 The trace unit performs a Context ID */
- if (drvdata->numcidc) {
- drvdata->addr_acc[idx] |= BIT(2);
- drvdata->addr_acc[idx] &= ~BIT(3);
- }
- } else if (!strcmp(str, "vmid")) {
- /* 0b10 The trace unit performs a VMID */
- if (drvdata->numvmidc) {
- drvdata->addr_acc[idx] &= ~BIT(2);
- drvdata->addr_acc[idx] |= BIT(3);
- }
- } else if (!strcmp(str, "all")) {
- /*
- * 0b11 The trace unit performs a Context ID
- * comparison and a VMID
- */
- if (drvdata->numcidc)
- drvdata->addr_acc[idx] |= BIT(2);
- if (drvdata->numvmidc)
- drvdata->addr_acc[idx] |= BIT(3);
+ ret = -EINVAL;
}
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(addr_ctxtype);
-
-static ssize_t addr_context_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- /* context ID comparator bits[6:4] */
- val = BMVAL(drvdata->addr_acc[idx], 4, 6);
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t addr_context_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
- return -EINVAL;
- if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
- drvdata->numcidc : drvdata->numvmidc))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->addr_idx;
- /* clear context ID comparator bits[6:4] */
- drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
- drvdata->addr_acc[idx] |= (val << 4);
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(addr_context);
-
-static ssize_t seq_idx_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_idx;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t seq_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val >= drvdata->nrseqstate - 1)
- return -EINVAL;
-
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->seq_idx = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(seq_idx);
-
-static ssize_t seq_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
- val = drvdata->seq_state;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t seq_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val >= drvdata->nrseqstate)
- return -EINVAL;
-
- drvdata->seq_state = val;
- return size;
-}
-static DEVICE_ATTR_RW(seq_state);
-
-static ssize_t seq_event_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->seq_idx;
- val = drvdata->seq_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t seq_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->seq_idx;
- /* RST, bits[7:0] */
- drvdata->seq_ctrl[idx] = val & 0xFF;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(seq_event);
-
-static ssize_t seq_reset_event_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->seq_rst;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t seq_reset_event_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (!(drvdata->nrseqstate))
- return -EINVAL;
-
- drvdata->seq_rst = val & ETMv4_EVENT_MASK;
- return size;
-}
-static DEVICE_ATTR_RW(seq_reset_event);
-
-static ssize_t cntr_idx_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->cntr_idx;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t cntr_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val >= drvdata->nr_cntr)
- return -EINVAL;
-
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->cntr_idx = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(cntr_idx);
-
-static ssize_t cntrldvr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->cntr_idx;
- val = drvdata->cntrldvr[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t cntrldvr_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val > ETM_CNTR_MAX_VAL)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->cntr_idx;
- drvdata->cntrldvr[idx] = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(cntrldvr);
-
-static ssize_t cntr_val_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->cntr_idx;
- val = drvdata->cntr_val[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t cntr_val_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val > ETM_CNTR_MAX_VAL)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->cntr_idx;
- drvdata->cntr_val[idx] = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(cntr_val);
-
-static ssize_t cntr_ctrl_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->cntr_idx;
- val = drvdata->cntr_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t cntr_ctrl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->cntr_idx;
- drvdata->cntr_ctrl[idx] = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(cntr_ctrl);
-
-static ssize_t res_idx_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->res_idx;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t res_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- /* Resource selector pair 0 is always implemented and reserved */
- if (val < 2 || val >= drvdata->nr_resource * 2)
- return -EINVAL;
+ /* The tracer didn't start */
+ if (ret)
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->res_idx = val;
- spin_unlock(&drvdata->spinlock);
- return size;
+ return ret;
}
-static DEVICE_ATTR_RW(res_idx);
-static ssize_t res_ctrl_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static void etm4_disable_hw(void *info)
{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ u32 control;
+ struct etmv4_drvdata *drvdata = info;
- spin_lock(&drvdata->spinlock);
- idx = drvdata->res_idx;
- val = drvdata->res_ctrl[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
+ CS_UNLOCK(drvdata->base);
-static ssize_t res_ctrl_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ control = readl_relaxed(drvdata->base + TRCPRGCTLR);
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
+ /* EN, bit[0] Trace unit enable bit */
+ control &= ~0x1;
- spin_lock(&drvdata->spinlock);
- idx = drvdata->res_idx;
- /* For odd idx pair inversal bit is RES0 */
- if (idx % 2 != 0)
- /* PAIRINV, bit[21] */
- val &= ~BIT(21);
- drvdata->res_ctrl[idx] = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(res_ctrl);
+ /* make sure everything completes before disabling */
+ mb();
+ isb();
+ writel_relaxed(control, drvdata->base + TRCPRGCTLR);
-static ssize_t ctxid_idx_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ CS_LOCK(drvdata->base);
- val = drvdata->ctxid_idx;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+ dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
}
-static ssize_t ctxid_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static int etm4_disable_perf(struct coresight_device *csdev)
{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val >= drvdata->numcidc)
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return -EINVAL;
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->ctxid_idx = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_idx);
-
-static ssize_t ctxid_pid_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- u8 idx;
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->ctxid_idx;
- val = (unsigned long)drvdata->ctxid_vpid[idx];
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+ etm4_disable_hw(drvdata);
+ return 0;
}
-static ssize_t ctxid_pid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static void etm4_disable_sysfs(struct coresight_device *csdev)
{
- u8 idx;
- unsigned long vpid, pid;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
- * only implemented when ctxid tracing is enabled, i.e. at least one
- * ctxid comparator is implemented and ctxid is greater than 0 bits
- * in length
+ * Taking hotplug lock here protects from clocks getting disabled
+ * with tracing being left on (crash scenario) if user disable occurs
+ * after cpu online mask indicates the cpu is offline but before the
+ * DYING hotplug callback is serviced by the ETM driver.
*/
- if (!drvdata->ctxid_size || !drvdata->numcidc)
- return -EINVAL;
- if (kstrtoul(buf, 16, &vpid))
- return -EINVAL;
-
- pid = coresight_vpid_to_pid(vpid);
-
- spin_lock(&drvdata->spinlock);
- idx = drvdata->ctxid_idx;
- drvdata->ctxid_pid[idx] = (u64)pid;
- drvdata->ctxid_vpid[idx] = (u64)vpid;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_pid);
-
-static ssize_t ctxid_masks_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val1, val2;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
+ get_online_cpus();
spin_lock(&drvdata->spinlock);
- val1 = drvdata->ctxid_mask0;
- val2 = drvdata->ctxid_mask1;
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
-}
-static ssize_t ctxid_masks_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 i, j, maskbyte;
- unsigned long val1, val2, mask;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- /*
- * only implemented when ctxid tracing is enabled, i.e. at least one
- * ctxid comparator is implemented and ctxid is greater than 0 bits
- * in length
- */
- if (!drvdata->ctxid_size || !drvdata->numcidc)
- return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
- /*
- * each byte[0..3] controls mask value applied to ctxid
- * comparator[0..3]
- */
- switch (drvdata->numcidc) {
- case 0x1:
- /* COMP0, bits[7:0] */
- drvdata->ctxid_mask0 = val1 & 0xFF;
- break;
- case 0x2:
- /* COMP1, bits[15:8] */
- drvdata->ctxid_mask0 = val1 & 0xFFFF;
- break;
- case 0x3:
- /* COMP2, bits[23:16] */
- drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
- break;
- case 0x4:
- /* COMP3, bits[31:24] */
- drvdata->ctxid_mask0 = val1;
- break;
- case 0x5:
- /* COMP4, bits[7:0] */
- drvdata->ctxid_mask0 = val1;
- drvdata->ctxid_mask1 = val2 & 0xFF;
- break;
- case 0x6:
- /* COMP5, bits[15:8] */
- drvdata->ctxid_mask0 = val1;
- drvdata->ctxid_mask1 = val2 & 0xFFFF;
- break;
- case 0x7:
- /* COMP6, bits[23:16] */
- drvdata->ctxid_mask0 = val1;
- drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
- break;
- case 0x8:
- /* COMP7, bits[31:24] */
- drvdata->ctxid_mask0 = val1;
- drvdata->ctxid_mask1 = val2;
- break;
- default:
- break;
- }
/*
- * If software sets a mask bit to 1, it must program relevant byte
- * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
- * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
- * of ctxid comparator0 value (corresponding to byte 0) register.
+ * Executing etm4_disable_hw on the cpu whose ETM is being disabled
+ * ensures that register writes occur when cpu is powered.
*/
- mask = drvdata->ctxid_mask0;
- for (i = 0; i < drvdata->numcidc; i++) {
- /* mask value of corresponding ctxid comparator */
- maskbyte = mask & ETMv4_EVENT_MASK;
- /*
- * each bit corresponds to a byte of respective ctxid comparator
- * value register
- */
- for (j = 0; j < 8; j++) {
- if (maskbyte & 1)
- drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
- maskbyte >>= 1;
- }
- /* Select the next ctxid comparator mask value */
- if (i == 3)
- /* ctxid comparators[4-7] */
- mask = drvdata->ctxid_mask1;
- else
- mask >>= 0x8;
- }
+ smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(ctxid_masks);
-
-static ssize_t vmid_idx_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->vmid_idx;
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t vmid_idx_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
- if (val >= drvdata->numvmidc)
- return -EINVAL;
-
- /*
- * Use spinlock to ensure index doesn't change while it gets
- * dereferenced multiple times within a spinlock block elsewhere.
- */
- spin_lock(&drvdata->spinlock);
- drvdata->vmid_idx = val;
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(vmid_idx);
-
-static ssize_t vmid_val_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
- return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
-}
-
-static ssize_t vmid_val_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned long val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- /*
- * only implemented when vmid tracing is enabled, i.e. at least one
- * vmid comparator is implemented and at least 8 bit vmid size
- */
- if (!drvdata->vmid_size || !drvdata->numvmidc)
- return -EINVAL;
- if (kstrtoul(buf, 16, &val))
- return -EINVAL;
+ put_online_cpus();
- spin_lock(&drvdata->spinlock);
- drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
- spin_unlock(&drvdata->spinlock);
- return size;
+ dev_info(drvdata->dev, "ETM tracing disabled\n");
}
-static DEVICE_ATTR_RW(vmid_val);
-static ssize_t vmid_masks_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static void etm4_disable(struct coresight_device *csdev)
{
- unsigned long val1, val2;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- spin_lock(&drvdata->spinlock);
- val1 = drvdata->vmid_mask0;
- val2 = drvdata->vmid_mask1;
- spin_unlock(&drvdata->spinlock);
- return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
-}
+ u32 mode;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-static ssize_t vmid_masks_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- u8 i, j, maskbyte;
- unsigned long val1, val2, mask;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
/*
- * only implemented when vmid tracing is enabled, i.e. at least one
- * vmid comparator is implemented and at least 8 bit vmid size
+ * For as long as the tracer isn't disabled another entity can't
+ * change its status. As such we can read the status here without
+ * fearing it will change under us.
*/
- if (!drvdata->vmid_size || !drvdata->numvmidc)
- return -EINVAL;
- if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
- return -EINVAL;
-
- spin_lock(&drvdata->spinlock);
+ mode = local_read(&drvdata->mode);
- /*
- * each byte[0..3] controls mask value applied to vmid
- * comparator[0..3]
- */
- switch (drvdata->numvmidc) {
- case 0x1:
- /* COMP0, bits[7:0] */
- drvdata->vmid_mask0 = val1 & 0xFF;
- break;
- case 0x2:
- /* COMP1, bits[15:8] */
- drvdata->vmid_mask0 = val1 & 0xFFFF;
- break;
- case 0x3:
- /* COMP2, bits[23:16] */
- drvdata->vmid_mask0 = val1 & 0xFFFFFF;
+ switch (mode) {
+ case CS_MODE_DISABLED:
break;
- case 0x4:
- /* COMP3, bits[31:24] */
- drvdata->vmid_mask0 = val1;
+ case CS_MODE_SYSFS:
+ etm4_disable_sysfs(csdev);
break;
- case 0x5:
- /* COMP4, bits[7:0] */
- drvdata->vmid_mask0 = val1;
- drvdata->vmid_mask1 = val2 & 0xFF;
- break;
- case 0x6:
- /* COMP5, bits[15:8] */
- drvdata->vmid_mask0 = val1;
- drvdata->vmid_mask1 = val2 & 0xFFFF;
- break;
- case 0x7:
- /* COMP6, bits[23:16] */
- drvdata->vmid_mask0 = val1;
- drvdata->vmid_mask1 = val2 & 0xFFFFFF;
- break;
- case 0x8:
- /* COMP7, bits[31:24] */
- drvdata->vmid_mask0 = val1;
- drvdata->vmid_mask1 = val2;
- break;
- default:
+ case CS_MODE_PERF:
+ etm4_disable_perf(csdev);
break;
}
- /*
- * If software sets a mask bit to 1, it must program relevant byte
- * of vmid comparator value 0x0, otherwise behavior is unpredictable.
- * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
- * of vmid comparator0 value (corresponding to byte 0) register.
- */
- mask = drvdata->vmid_mask0;
- for (i = 0; i < drvdata->numvmidc; i++) {
- /* mask value of corresponding vmid comparator */
- maskbyte = mask & ETMv4_EVENT_MASK;
- /*
- * each bit corresponds to a byte of respective vmid comparator
- * value register
- */
- for (j = 0; j < 8; j++) {
- if (maskbyte & 1)
- drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
- maskbyte >>= 1;
- }
- /* Select the next vmid comparator mask value */
- if (i == 3)
- /* vmid comparators[4-7] */
- mask = drvdata->vmid_mask1;
- else
- mask >>= 0x8;
- }
- spin_unlock(&drvdata->spinlock);
- return size;
-}
-static DEVICE_ATTR_RW(vmid_masks);
-
-static ssize_t cpu_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- int val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = drvdata->cpu;
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
-
+ if (mode)
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
}
-static DEVICE_ATTR_RO(cpu);
-
-static struct attribute *coresight_etmv4_attrs[] = {
- &dev_attr_nr_pe_cmp.attr,
- &dev_attr_nr_addr_cmp.attr,
- &dev_attr_nr_cntr.attr,
- &dev_attr_nr_ext_inp.attr,
- &dev_attr_numcidc.attr,
- &dev_attr_numvmidc.attr,
- &dev_attr_nrseqstate.attr,
- &dev_attr_nr_resource.attr,
- &dev_attr_nr_ss_cmp.attr,
- &dev_attr_reset.attr,
- &dev_attr_mode.attr,
- &dev_attr_pe.attr,
- &dev_attr_event.attr,
- &dev_attr_event_instren.attr,
- &dev_attr_event_ts.attr,
- &dev_attr_syncfreq.attr,
- &dev_attr_cyc_threshold.attr,
- &dev_attr_bb_ctrl.attr,
- &dev_attr_event_vinst.attr,
- &dev_attr_s_exlevel_vinst.attr,
- &dev_attr_ns_exlevel_vinst.attr,
- &dev_attr_addr_idx.attr,
- &dev_attr_addr_instdatatype.attr,
- &dev_attr_addr_single.attr,
- &dev_attr_addr_range.attr,
- &dev_attr_addr_start.attr,
- &dev_attr_addr_stop.attr,
- &dev_attr_addr_ctxtype.attr,
- &dev_attr_addr_context.attr,
- &dev_attr_seq_idx.attr,
- &dev_attr_seq_state.attr,
- &dev_attr_seq_event.attr,
- &dev_attr_seq_reset_event.attr,
- &dev_attr_cntr_idx.attr,
- &dev_attr_cntrldvr.attr,
- &dev_attr_cntr_val.attr,
- &dev_attr_cntr_ctrl.attr,
- &dev_attr_res_idx.attr,
- &dev_attr_res_ctrl.attr,
- &dev_attr_ctxid_idx.attr,
- &dev_attr_ctxid_pid.attr,
- &dev_attr_ctxid_masks.attr,
- &dev_attr_vmid_idx.attr,
- &dev_attr_vmid_val.attr,
- &dev_attr_vmid_masks.attr,
- &dev_attr_cpu.attr,
- NULL,
-};
-
-#define coresight_simple_func(name, offset) \
-static ssize_t name##_show(struct device *_dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
- readl_relaxed(drvdata->base + offset)); \
-} \
-static DEVICE_ATTR_RO(name)
-
-coresight_simple_func(trcoslsr, TRCOSLSR);
-coresight_simple_func(trcpdcr, TRCPDCR);
-coresight_simple_func(trcpdsr, TRCPDSR);
-coresight_simple_func(trclsr, TRCLSR);
-coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
-coresight_simple_func(trcdevid, TRCDEVID);
-coresight_simple_func(trcdevtype, TRCDEVTYPE);
-coresight_simple_func(trcpidr0, TRCPIDR0);
-coresight_simple_func(trcpidr1, TRCPIDR1);
-coresight_simple_func(trcpidr2, TRCPIDR2);
-coresight_simple_func(trcpidr3, TRCPIDR3);
-
-static struct attribute *coresight_etmv4_mgmt_attrs[] = {
- &dev_attr_trcoslsr.attr,
- &dev_attr_trcpdcr.attr,
- &dev_attr_trcpdsr.attr,
- &dev_attr_trclsr.attr,
- &dev_attr_trcauthstatus.attr,
- &dev_attr_trcdevid.attr,
- &dev_attr_trcdevtype.attr,
- &dev_attr_trcpidr0.attr,
- &dev_attr_trcpidr1.attr,
- &dev_attr_trcpidr2.attr,
- &dev_attr_trcpidr3.attr,
- NULL,
-};
-coresight_simple_func(trcidr0, TRCIDR0);
-coresight_simple_func(trcidr1, TRCIDR1);
-coresight_simple_func(trcidr2, TRCIDR2);
-coresight_simple_func(trcidr3, TRCIDR3);
-coresight_simple_func(trcidr4, TRCIDR4);
-coresight_simple_func(trcidr5, TRCIDR5);
-/* trcidr[6,7] are reserved */
-coresight_simple_func(trcidr8, TRCIDR8);
-coresight_simple_func(trcidr9, TRCIDR9);
-coresight_simple_func(trcidr10, TRCIDR10);
-coresight_simple_func(trcidr11, TRCIDR11);
-coresight_simple_func(trcidr12, TRCIDR12);
-coresight_simple_func(trcidr13, TRCIDR13);
-
-static struct attribute *coresight_etmv4_trcidr_attrs[] = {
- &dev_attr_trcidr0.attr,
- &dev_attr_trcidr1.attr,
- &dev_attr_trcidr2.attr,
- &dev_attr_trcidr3.attr,
- &dev_attr_trcidr4.attr,
- &dev_attr_trcidr5.attr,
- /* trcidr[6,7] are reserved */
- &dev_attr_trcidr8.attr,
- &dev_attr_trcidr9.attr,
- &dev_attr_trcidr10.attr,
- &dev_attr_trcidr11.attr,
- &dev_attr_trcidr12.attr,
- &dev_attr_trcidr13.attr,
- NULL,
-};
-
-static const struct attribute_group coresight_etmv4_group = {
- .attrs = coresight_etmv4_attrs,
-};
-
-static const struct attribute_group coresight_etmv4_mgmt_group = {
- .attrs = coresight_etmv4_mgmt_attrs,
- .name = "mgmt",
-};
-
-static const struct attribute_group coresight_etmv4_trcidr_group = {
- .attrs = coresight_etmv4_trcidr_attrs,
- .name = "trcidr",
+static const struct coresight_ops_source etm4_source_ops = {
+ .cpu_id = etm4_cpu_id,
+ .trace_id = etm4_trace_id,
+ .enable = etm4_enable,
+ .disable = etm4_disable,
};
-static const struct attribute_group *coresight_etmv4_groups[] = {
- &coresight_etmv4_group,
- &coresight_etmv4_mgmt_group,
- &coresight_etmv4_trcidr_group,
- NULL,
+static const struct coresight_ops etm4_cs_ops = {
+ .source_ops = &etm4_source_ops,
};
static void etm4_init_arch_data(void *info)
@@ -2313,6 +408,9 @@ static void etm4_init_arch_data(void *info)
u32 etmidr5;
struct etmv4_drvdata *drvdata = info;
+ /* Make sure all registers are accessible */
+ etm4_os_unlock(drvdata);
+
CS_UNLOCK(drvdata->base);
/* find all capabilities of the tracing unit */
@@ -2464,93 +562,115 @@ static void etm4_init_arch_data(void *info)
CS_LOCK(drvdata->base);
}
-static void etm4_init_default_data(struct etmv4_drvdata *drvdata)
+static void etm4_set_default(struct etmv4_config *config)
{
- int i;
+ if (WARN_ON_ONCE(!config))
+ return;
- drvdata->pe_sel = 0x0;
- drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID |
- ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK);
+ /*
+ * Make default initialisation trace everything
+ *
+ * Select the "always true" resource selector on the
+ * "Enablign Event" line and configure address range comparator
+ * '0' to trace all the possible address range. From there
+ * configure the "include/exclude" engine to include address
+ * range comparator '0'.
+ */
/* disable all events tracing */
- drvdata->eventctrl0 = 0x0;
- drvdata->eventctrl1 = 0x0;
+ config->eventctrl0 = 0x0;
+ config->eventctrl1 = 0x0;
/* disable stalling */
- drvdata->stall_ctrl = 0x0;
+ config->stall_ctrl = 0x0;
+
+ /* enable trace synchronization every 4096 bytes, if available */
+ config->syncfreq = 0xC;
/* disable timestamp event */
- drvdata->ts_ctrl = 0x0;
+ config->ts_ctrl = 0x0;
- /* enable trace synchronization every 4096 bytes for trace */
- if (drvdata->syncpr == false)
- drvdata->syncfreq = 0xC;
+ /* TRCVICTLR::EVENT = 0x01, select the always on logic */
+ config->vinst_ctrl |= BIT(0);
/*
- * enable viewInst to trace everything with start-stop logic in
- * started state
+ * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
+ * in the started state
*/
- drvdata->vinst_ctrl |= BIT(0);
- /* set initial state of start-stop logic */
- if (drvdata->nr_addr_cmp)
- drvdata->vinst_ctrl |= BIT(9);
+ config->vinst_ctrl |= BIT(9);
- /* no address range filtering for ViewInst */
- drvdata->viiectlr = 0x0;
- /* no start-stop filtering for ViewInst */
- drvdata->vissctlr = 0x0;
+ /*
+ * Configure address range comparator '0' to encompass all
+ * possible addresses.
+ */
- /* disable seq events */
- for (i = 0; i < drvdata->nrseqstate-1; i++)
- drvdata->seq_ctrl[i] = 0x0;
- drvdata->seq_rst = 0x0;
- drvdata->seq_state = 0x0;
+ /* First half of default address comparator: start at address 0 */
+ config->addr_val[ETM_DEFAULT_ADDR_COMP] = 0x0;
+ /* trace instruction addresses */
+ config->addr_acc[ETM_DEFAULT_ADDR_COMP] &= ~(BIT(0) | BIT(1));
+ /* EXLEVEL_NS, bits[12:15], only trace application and kernel space */
+ config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= ETM_EXLEVEL_NS_HYP;
+ /* EXLEVEL_S, bits[11:8], don't trace anything in secure state */
+ config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= (ETM_EXLEVEL_S_APP |
+ ETM_EXLEVEL_S_OS |
+ ETM_EXLEVEL_S_HYP);
+ config->addr_type[ETM_DEFAULT_ADDR_COMP] = ETM_ADDR_TYPE_RANGE;
- /* disable external input events */
- drvdata->ext_inp = 0x0;
+ /*
+ * Second half of default address comparator: go all
+ * the way to the top.
+ */
+ config->addr_val[ETM_DEFAULT_ADDR_COMP + 1] = ~0x0;
+ /* trace instruction addresses */
+ config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] &= ~(BIT(0) | BIT(1));
+ /* Address comparator type must be equal for both halves */
+ config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] =
+ config->addr_acc[ETM_DEFAULT_ADDR_COMP];
+ config->addr_type[ETM_DEFAULT_ADDR_COMP + 1] = ETM_ADDR_TYPE_RANGE;
- for (i = 0; i < drvdata->nr_cntr; i++) {
- drvdata->cntrldvr[i] = 0x0;
- drvdata->cntr_ctrl[i] = 0x0;
- drvdata->cntr_val[i] = 0x0;
- }
+ /*
+ * Configure the ViewInst function to filter on address range
+ * comparator '0'.
+ */
+ config->viiectlr = BIT(0);
- /* Resource selector pair 0 is always implemented and reserved */
- drvdata->res_idx = 0x2;
- for (i = 2; i < drvdata->nr_resource * 2; i++)
- drvdata->res_ctrl[i] = 0x0;
+ /* no start-stop filtering for ViewInst */
+ config->vissctlr = 0x0;
+}
- for (i = 0; i < drvdata->nr_ss_cmp; i++) {
- drvdata->ss_ctrl[i] = 0x0;
- drvdata->ss_pe_cmp[i] = 0x0;
- }
+void etm4_config_trace_mode(struct etmv4_config *config)
+{
+ u32 addr_acc, mode;
- if (drvdata->nr_addr_cmp >= 1) {
- drvdata->addr_val[0] = (unsigned long)_stext;
- drvdata->addr_val[1] = (unsigned long)_etext;
- drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
- drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
- }
+ mode = config->mode;
+ mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
- for (i = 0; i < drvdata->numcidc; i++) {
- drvdata->ctxid_pid[i] = 0x0;
- drvdata->ctxid_vpid[i] = 0x0;
- }
+ /* excluding kernel AND user space doesn't make sense */
+ WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
- drvdata->ctxid_mask0 = 0x0;
- drvdata->ctxid_mask1 = 0x0;
+ /* nothing to do if neither flags are set */
+ if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
+ return;
- for (i = 0; i < drvdata->numvmidc; i++)
- drvdata->vmid_val[i] = 0x0;
- drvdata->vmid_mask0 = 0x0;
- drvdata->vmid_mask1 = 0x0;
+ addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
+ /* clear default config */
+ addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS);
/*
- * A trace ID value of 0 is invalid, so let's start at some
- * random value that fits in 7 bits. ETMv3.x has 0x10 so let's
- * start at 0x20.
+ * EXLEVEL_NS, bits[15:12]
+ * The Exception levels are:
+ * Bit[12] Exception level 0 - Application
+ * Bit[13] Exception level 1 - OS
+ * Bit[14] Exception level 2 - Hypervisor
+ * Bit[15] Never implemented
*/
- drvdata->trcid = 0x20 + drvdata->cpu;
+ if (mode & ETM_MODE_EXCL_KERN)
+ addr_acc |= ETM_EXLEVEL_NS_OS;
+ else
+ addr_acc |= ETM_EXLEVEL_NS_APP;
+
+ config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
+ config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
}
static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
@@ -2569,7 +689,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
etmdrvdata[cpu]->os_unlock = true;
}
- if (etmdrvdata[cpu]->enable)
+ if (local_read(&etmdrvdata[cpu]->mode))
etm4_enable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
break;
@@ -2582,7 +702,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
case CPU_DYING:
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (etmdrvdata[cpu]->enable)
+ if (local_read(&etmdrvdata[cpu]->mode))
etm4_disable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
break;
@@ -2595,6 +715,11 @@ static struct notifier_block etm4_cpu_notifier = {
.notifier_call = etm4_cpu_callback,
};
+static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
+{
+ drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
+}
+
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -2638,9 +763,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
get_online_cpus();
etmdrvdata[drvdata->cpu] = drvdata;
- if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
- drvdata->os_unlock = true;
-
if (smp_call_function_single(drvdata->cpu,
etm4_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
@@ -2654,9 +776,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
ret = -EINVAL;
goto err_arch_supported;
}
- etm4_init_default_data(drvdata);
- pm_runtime_put(&adev->dev);
+ etm4_init_trace_id(drvdata);
+ etm4_set_default(&drvdata->config);
desc->type = CORESIGHT_DEV_TYPE_SOURCE;
desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -2667,9 +789,16 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto err_coresight_register;
+ goto err_arch_supported;
}
+ ret = etm_perf_symlink(drvdata->csdev, true);
+ if (ret) {
+ coresight_unregister(drvdata->csdev);
+ goto err_arch_supported;
+ }
+
+ pm_runtime_put(&adev->dev);
dev_info(dev, "%s initialized\n", (char *)id->data);
if (boot_enable) {
@@ -2680,8 +809,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
err_arch_supported:
- pm_runtime_put(&adev->dev);
-err_coresight_register:
if (--etm4_count == 0)
unregister_hotcpu_notifier(&etm4_cpu_notifier);
return ret;
@@ -2698,6 +825,11 @@ static struct amba_id etm4_ids[] = {
.mask = 0x000fffff,
.data = "ETM 4.0",
},
+ { /* ETM 4.0 - A72, Maia, HiSilicon */
+ .id = 0x000bb95a,
+ .mask = 0x000fffff,
+ .data = "ETM 4.0",
+ },
{ 0, 0},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index c34100205..5359c5197 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -13,6 +13,7 @@
#ifndef _CORESIGHT_CORESIGHT_ETM_H
#define _CORESIGHT_CORESIGHT_ETM_H
+#include <asm/local.h>
#include <linux/spinlock.h>
#include "coresight-priv.h"
@@ -175,71 +176,38 @@
#define ETM_MODE_TRACE_RESET BIT(25)
#define ETM_MODE_TRACE_ERR BIT(26)
#define ETM_MODE_VIEWINST_STARTSTOP BIT(27)
-#define ETMv4_MODE_ALL 0xFFFFFFF
+#define ETMv4_MODE_ALL (GENMASK(27, 0) | \
+ ETM_MODE_EXCL_KERN | \
+ ETM_MODE_EXCL_USER)
#define TRCSTATR_IDLE_BIT 0
+#define ETM_DEFAULT_ADDR_COMP 0
+
+/* secure state access levels */
+#define ETM_EXLEVEL_S_APP BIT(8)
+#define ETM_EXLEVEL_S_OS BIT(9)
+#define ETM_EXLEVEL_S_NA BIT(10)
+#define ETM_EXLEVEL_S_HYP BIT(11)
+/* non-secure state access levels */
+#define ETM_EXLEVEL_NS_APP BIT(12)
+#define ETM_EXLEVEL_NS_OS BIT(13)
+#define ETM_EXLEVEL_NS_HYP BIT(14)
+#define ETM_EXLEVEL_NS_NA BIT(15)
/**
- * struct etm4_drvdata - specifics associated to an ETM component
- * @base: Memory mapped base address for this component.
- * @dev: The device entity associated to this component.
- * @csdev: Component vitals needed by the framework.
- * @spinlock: Only one at a time pls.
- * @cpu: The cpu this component is affined to.
- * @arch: ETM version number.
- * @enable: Is this ETM currently tracing.
- * @sticky_enable: true if ETM base configuration has been done.
- * @boot_enable:True if we should start tracing at boot time.
- * @os_unlock: True if access to management registers is allowed.
- * @nr_pe: The number of processing entity available for tracing.
- * @nr_pe_cmp: The number of processing entity comparator inputs that are
- * available for tracing.
- * @nr_addr_cmp:Number of pairs of address comparators available
- * as found in ETMIDR4 0-3.
- * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30.
- * @nr_ext_inp: Number of external input.
- * @numcidc: Number of contextID comparators.
- * @numvmidc: Number of VMID comparators.
- * @nrseqstate: The number of sequencer states that are implemented.
- * @nr_event: Indicates how many events the trace unit support.
- * @nr_resource:The number of resource selection pairs available for tracing.
- * @nr_ss_cmp: Number of single-shot comparator controls that are available.
+ * struct etmv4_config - configuration information related to an ETMv4
* @mode: Controls various modes supported by this ETM.
- * @trcid: value of the current ID for this component.
- * @trcid_size: Indicates the trace ID width.
- * @instrp0: Tracing of load and store instructions
- * as P0 elements is supported.
- * @trccond: If the trace unit supports conditional
- * instruction tracing.
- * @retstack: Indicates if the implementation supports a return stack.
- * @trc_error: Whether a trace unit can trace a system
- * error exception.
- * @atbtrig: If the implementation can support ATB triggers
- * @lpoverride: If the implementation can support low-power state over.
* @pe_sel: Controls which PE to trace.
* @cfg: Controls the tracing options.
* @eventctrl0: Controls the tracing of arbitrary events.
* @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects.
* @stallctl: If functionality that prevents trace unit buffer overflows
* is available.
- * @sysstall: Does the system support stall control of the PE?
- * @nooverflow: Indicate if overflow prevention is supported.
- * @stall_ctrl: Enables trace unit functionality that prevents trace
- * unit buffer overflows.
- * @ts_size: Global timestamp size field.
* @ts_ctrl: Controls the insertion of global timestamps in the
* trace streams.
- * @syncpr: Indicates if an implementation has a fixed
- * synchronization period.
* @syncfreq: Controls how often trace synchronization requests occur.
- * @trccci: Indicates if the trace unit supports cycle counting
- * for instruction.
- * @ccsize: Indicates the size of the cycle counter in bits.
- * @ccitmin: minimum value that can be programmed in
* the TRCCCCTLR register.
* @ccctlr: Sets the threshold value for cycle counting.
- * @trcbb: Indicates if the trace unit supports branch broadcast tracing.
- * @q_support: Q element support characteristics.
* @vinst_ctrl: Controls instruction trace filtering.
* @viiectlr: Set or read, the address range comparators.
* @vissctlr: Set, or read, the single address comparators that control the
@@ -264,73 +232,28 @@
* @addr_acc: Address comparator access type.
* @addr_type: Current status of the comparator register.
* @ctxid_idx: Context ID index selector.
- * @ctxid_size: Size of the context ID field to consider.
* @ctxid_pid: Value of the context ID comparator.
* @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
* the same value of ctxid_pid.
* @ctxid_mask0:Context ID comparator mask for comparator 0-3.
* @ctxid_mask1:Context ID comparator mask for comparator 4-7.
* @vmid_idx: VM ID index selector.
- * @vmid_size: Size of the VM ID comparator to consider.
* @vmid_val: Value of the VM ID comparator.
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
- * @s_ex_level: In secure state, indicates whether instruction tracing is
- * supported for the corresponding Exception level.
- * @ns_ex_level:In non-secure state, indicates whether instruction tracing is
- * supported for the corresponding Exception level.
* @ext_inp: External input selection.
*/
-struct etmv4_drvdata {
- void __iomem *base;
- struct device *dev;
- struct coresight_device *csdev;
- spinlock_t spinlock;
- int cpu;
- u8 arch;
- bool enable;
- bool sticky_enable;
- bool boot_enable;
- bool os_unlock;
- u8 nr_pe;
- u8 nr_pe_cmp;
- u8 nr_addr_cmp;
- u8 nr_cntr;
- u8 nr_ext_inp;
- u8 numcidc;
- u8 numvmidc;
- u8 nrseqstate;
- u8 nr_event;
- u8 nr_resource;
- u8 nr_ss_cmp;
+struct etmv4_config {
u32 mode;
- u8 trcid;
- u8 trcid_size;
- bool instrp0;
- bool trccond;
- bool retstack;
- bool trc_error;
- bool atbtrig;
- bool lpoverride;
u32 pe_sel;
u32 cfg;
u32 eventctrl0;
u32 eventctrl1;
- bool stallctl;
- bool sysstall;
- bool nooverflow;
u32 stall_ctrl;
- u8 ts_size;
u32 ts_ctrl;
- bool syncpr;
u32 syncfreq;
- bool trccci;
- u8 ccsize;
- u8 ccitmin;
u32 ccctlr;
- bool trcbb;
u32 bb_ctrl;
- bool q_support;
u32 vinst_ctrl;
u32 viiectlr;
u32 vissctlr;
@@ -353,19 +276,119 @@ struct etmv4_drvdata {
u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP];
u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
u8 ctxid_idx;
- u8 ctxid_size;
u64 ctxid_pid[ETMv4_MAX_CTXID_CMP];
u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP];
u32 ctxid_mask0;
u32 ctxid_mask1;
u8 vmid_idx;
- u8 vmid_size;
u64 vmid_val[ETM_MAX_VMID_CMP];
u32 vmid_mask0;
u32 vmid_mask1;
+ u32 ext_inp;
+};
+
+/**
+ * struct etm4_drvdata - specifics associated to an ETM component
+ * @base: Memory mapped base address for this component.
+ * @dev: The device entity associated to this component.
+ * @csdev: Component vitals needed by the framework.
+ * @spinlock: Only one at a time pls.
+ * @mode: This tracer's mode, i.e sysFS, Perf or disabled.
+ * @cpu: The cpu this component is affined to.
+ * @arch: ETM version number.
+ * @nr_pe: The number of processing entity available for tracing.
+ * @nr_pe_cmp: The number of processing entity comparator inputs that are
+ * available for tracing.
+ * @nr_addr_cmp:Number of pairs of address comparators available
+ * as found in ETMIDR4 0-3.
+ * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30.
+ * @nr_ext_inp: Number of external input.
+ * @numcidc: Number of contextID comparators.
+ * @numvmidc: Number of VMID comparators.
+ * @nrseqstate: The number of sequencer states that are implemented.
+ * @nr_event: Indicates how many events the trace unit support.
+ * @nr_resource:The number of resource selection pairs available for tracing.
+ * @nr_ss_cmp: Number of single-shot comparator controls that are available.
+ * @trcid: value of the current ID for this component.
+ * @trcid_size: Indicates the trace ID width.
+ * @ts_size: Global timestamp size field.
+ * @ctxid_size: Size of the context ID field to consider.
+ * @vmid_size: Size of the VM ID comparator to consider.
+ * @ccsize: Indicates the size of the cycle counter in bits.
+ * @ccitmin: minimum value that can be programmed in
+ * @s_ex_level: In secure state, indicates whether instruction tracing is
+ * supported for the corresponding Exception level.
+ * @ns_ex_level:In non-secure state, indicates whether instruction tracing is
+ * supported for the corresponding Exception level.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:True if we should start tracing at boot time.
+ * @os_unlock: True if access to management registers is allowed.
+ * @instrp0: Tracing of load and store instructions
+ * as P0 elements is supported.
+ * @trcbb: Indicates if the trace unit supports branch broadcast tracing.
+ * @trccond: If the trace unit supports conditional
+ * instruction tracing.
+ * @retstack: Indicates if the implementation supports a return stack.
+ * @trccci: Indicates if the trace unit supports cycle counting
+ * for instruction.
+ * @q_support: Q element support characteristics.
+ * @trc_error: Whether a trace unit can trace a system
+ * error exception.
+ * @syncpr: Indicates if an implementation has a fixed
+ * synchronization period.
+ * @stall_ctrl: Enables trace unit functionality that prevents trace
+ * unit buffer overflows.
+ * @sysstall: Does the system support stall control of the PE?
+ * @nooverflow: Indicate if overflow prevention is supported.
+ * @atbtrig: If the implementation can support ATB triggers
+ * @lpoverride: If the implementation can support low-power state over.
+ * @config: structure holding configuration parameters.
+ */
+struct etmv4_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ local_t mode;
+ int cpu;
+ u8 arch;
+ u8 nr_pe;
+ u8 nr_pe_cmp;
+ u8 nr_addr_cmp;
+ u8 nr_cntr;
+ u8 nr_ext_inp;
+ u8 numcidc;
+ u8 numvmidc;
+ u8 nrseqstate;
+ u8 nr_event;
+ u8 nr_resource;
+ u8 nr_ss_cmp;
+ u8 trcid;
+ u8 trcid_size;
+ u8 ts_size;
+ u8 ctxid_size;
+ u8 vmid_size;
+ u8 ccsize;
+ u8 ccitmin;
u8 s_ex_level;
u8 ns_ex_level;
- u32 ext_inp;
+ u8 q_support;
+ bool sticky_enable;
+ bool boot_enable;
+ bool os_unlock;
+ bool instrp0;
+ bool trcbb;
+ bool trccond;
+ bool retstack;
+ bool trccci;
+ bool trc_error;
+ bool syncpr;
+ bool stallctl;
+ bool sysstall;
+ bool nooverflow;
+ bool atbtrig;
+ bool lpoverride;
+ struct etmv4_config config;
};
/* Address comparator access types */
@@ -391,4 +414,7 @@ enum etm_addr_type {
ETM_ADDR_TYPE_START,
ETM_ADDR_TYPE_STOP,
};
+
+extern const struct attribute_group *coresight_etmv4_groups[];
+void etm4_config_trace_mode(struct etmv4_config *config);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 0600ca306..05df78905 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -221,7 +221,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
- dev_info(dev, "FUNNEL initialized\n");
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 333eddaed..ad975c580 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -37,12 +37,42 @@
#define ETM_MODE_EXCL_KERN BIT(30)
#define ETM_MODE_EXCL_USER BIT(31)
+#define coresight_simple_func(type, name, offset) \
+static ssize_t name##_show(struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ type *drvdata = dev_get_drvdata(_dev->parent); \
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
+ readl_relaxed(drvdata->base + offset)); \
+} \
+static DEVICE_ATTR_RO(name)
+
enum cs_mode {
CS_MODE_DISABLED,
CS_MODE_SYSFS,
CS_MODE_PERF,
};
+/**
+ * struct cs_buffer - keep track of a recording session' specifics
+ * @cur: index of the current buffer
+ * @nr_pages: max number of pages granted to us
+ * @offset: offset within the current buffer
+ * @data_size: how much we collected in this run
+ * @lost: other than zero if we had a HW buffer wrap around
+ * @snapshot: is this run in snapshot mode
+ * @data_pages: a handle the ring buffer
+ */
+struct cs_buffers {
+ unsigned int cur;
+ unsigned int nr_pages;
+ unsigned long offset;
+ local_t data_size;
+ local_t lost;
+ bool snapshot;
+ void **data_pages;
+};
+
static inline void CS_LOCK(void __iomem *addr)
{
do {
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 4299c0569..c6982e312 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -114,7 +114,6 @@ static int replicator_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- dev_info(dev, "REPLICATOR initialized\n");
return 0;
out_disable_pm:
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
new file mode 100644
index 000000000..73be58a11
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -0,0 +1,920 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * Description: CoreSight System Trace Macrocell driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Initial implementation by Pratik Patel
+ * (C) 2014-2015 Pratik Patel <pratikp@codeaurora.org>
+ *
+ * Serious refactoring, code cleanup and upgrading to the Coresight upstream
+ * framework by Mathieu Poirier
+ * (C) 2015-2016 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * Guaranteed timing and support for various packet type coming from the
+ * generic STM API by Chunyan Zhang
+ * (C) 2015-2016 Chunyan Zhang <zhang.chunyan@linaro.org>
+ */
+#include <asm/local.h>
+#include <linux/amba/bus.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/coresight-stm.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/of_address.h>
+#include <linux/perf_event.h>
+#include <linux/pm_runtime.h>
+#include <linux/stm.h>
+
+#include "coresight-priv.h"
+
+#define STMDMASTARTR 0xc04
+#define STMDMASTOPR 0xc08
+#define STMDMASTATR 0xc0c
+#define STMDMACTLR 0xc10
+#define STMDMAIDR 0xcfc
+#define STMHEER 0xd00
+#define STMHETER 0xd20
+#define STMHEBSR 0xd60
+#define STMHEMCR 0xd64
+#define STMHEMASTR 0xdf4
+#define STMHEFEAT1R 0xdf8
+#define STMHEIDR 0xdfc
+#define STMSPER 0xe00
+#define STMSPTER 0xe20
+#define STMPRIVMASKR 0xe40
+#define STMSPSCR 0xe60
+#define STMSPMSCR 0xe64
+#define STMSPOVERRIDER 0xe68
+#define STMSPMOVERRIDER 0xe6c
+#define STMSPTRIGCSR 0xe70
+#define STMTCSR 0xe80
+#define STMTSSTIMR 0xe84
+#define STMTSFREQR 0xe8c
+#define STMSYNCR 0xe90
+#define STMAUXCR 0xe94
+#define STMSPFEAT1R 0xea0
+#define STMSPFEAT2R 0xea4
+#define STMSPFEAT3R 0xea8
+#define STMITTRIGGER 0xee8
+#define STMITATBDATA0 0xeec
+#define STMITATBCTR2 0xef0
+#define STMITATBID 0xef4
+#define STMITATBCTR0 0xef8
+
+#define STM_32_CHANNEL 32
+#define BYTES_PER_CHANNEL 256
+#define STM_TRACE_BUF_SIZE 4096
+#define STM_SW_MASTER_END 127
+
+/* Register bit definition */
+#define STMTCSR_BUSY_BIT 23
+/* Reserve the first 10 channels for kernel usage */
+#define STM_CHANNEL_OFFSET 0
+
+enum stm_pkt_type {
+ STM_PKT_TYPE_DATA = 0x98,
+ STM_PKT_TYPE_FLAG = 0xE8,
+ STM_PKT_TYPE_TRIG = 0xF8,
+};
+
+#define stm_channel_addr(drvdata, ch) (drvdata->chs.base + \
+ (ch * BYTES_PER_CHANNEL))
+#define stm_channel_off(type, opts) (type & ~opts)
+
+static int boot_nr_channel;
+
+/*
+ * Not really modular but using module_param is the easiest way to
+ * remain consistent with existing use cases for now.
+ */
+module_param_named(
+ boot_nr_channel, boot_nr_channel, int, S_IRUGO
+);
+
+/**
+ * struct channel_space - central management entity for extended ports
+ * @base: memory mapped base address where channels start.
+ * @guaraneed: is the channel delivery guaranteed.
+ */
+struct channel_space {
+ void __iomem *base;
+ unsigned long *guaranteed;
+};
+
+/**
+ * struct stm_drvdata - specifics associated to an STM component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @atclk: optional clock for the core parts of the STM.
+ * @csdev: component vitals needed by the framework.
+ * @spinlock: only one at a time pls.
+ * @chs: the channels accociated to this STM.
+ * @stm: structure associated to the generic STM interface.
+ * @mode: this tracer's mode, i.e sysFS, or disabled.
+ * @traceid: value of the current ID for this component.
+ * @write_bytes: Maximus bytes this STM can write at a time.
+ * @stmsper: settings for register STMSPER.
+ * @stmspscr: settings for register STMSPSCR.
+ * @numsp: the total number of stimulus port support by this STM.
+ * @stmheer: settings for register STMHEER.
+ * @stmheter: settings for register STMHETER.
+ * @stmhebsr: settings for register STMHEBSR.
+ */
+struct stm_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *atclk;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ struct channel_space chs;
+ struct stm_data stm;
+ local_t mode;
+ u8 traceid;
+ u32 write_bytes;
+ u32 stmsper;
+ u32 stmspscr;
+ u32 numsp;
+ u32 stmheer;
+ u32 stmheter;
+ u32 stmhebsr;
+};
+
+static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(drvdata->stmhebsr, drvdata->base + STMHEBSR);
+ writel_relaxed(drvdata->stmheter, drvdata->base + STMHETER);
+ writel_relaxed(drvdata->stmheer, drvdata->base + STMHEER);
+ writel_relaxed(0x01 | /* Enable HW event tracing */
+ 0x04, /* Error detection on event tracing */
+ drvdata->base + STMHEMCR);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void stm_port_enable_hw(struct stm_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+ /* ATB trigger enable on direct writes to TRIG locations */
+ writel_relaxed(0x10,
+ drvdata->base + STMSPTRIGCSR);
+ writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
+ writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void stm_enable_hw(struct stm_drvdata *drvdata)
+{
+ if (drvdata->stmheer)
+ stm_hwevent_enable_hw(drvdata);
+
+ stm_port_enable_hw(drvdata);
+
+ CS_UNLOCK(drvdata->base);
+
+ /* 4096 byte between synchronisation packets */
+ writel_relaxed(0xFFF, drvdata->base + STMSYNCR);
+ writel_relaxed((drvdata->traceid << 16 | /* trace id */
+ 0x02 | /* timestamp enable */
+ 0x01), /* global STM enable */
+ drvdata->base + STMTCSR);
+
+ CS_LOCK(drvdata->base);
+}
+
+static int stm_enable(struct coresight_device *csdev,
+ struct perf_event_attr *attr, u32 mode)
+{
+ u32 val;
+ struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (mode != CS_MODE_SYSFS)
+ return -EINVAL;
+
+ val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
+
+ /* Someone is already using the tracer */
+ if (val)
+ return -EBUSY;
+
+ pm_runtime_get_sync(drvdata->dev);
+
+ spin_lock(&drvdata->spinlock);
+ stm_enable_hw(drvdata);
+ spin_unlock(&drvdata->spinlock);
+
+ dev_info(drvdata->dev, "STM tracing enabled\n");
+ return 0;
+}
+
+static void stm_hwevent_disable_hw(struct stm_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(0x0, drvdata->base + STMHEMCR);
+ writel_relaxed(0x0, drvdata->base + STMHEER);
+ writel_relaxed(0x0, drvdata->base + STMHETER);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void stm_port_disable_hw(struct stm_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ writel_relaxed(0x0, drvdata->base + STMSPER);
+ writel_relaxed(0x0, drvdata->base + STMSPTRIGCSR);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void stm_disable_hw(struct stm_drvdata *drvdata)
+{
+ u32 val;
+
+ CS_UNLOCK(drvdata->base);
+
+ val = readl_relaxed(drvdata->base + STMTCSR);
+ val &= ~0x1; /* clear global STM enable [0] */
+ writel_relaxed(val, drvdata->base + STMTCSR);
+
+ CS_LOCK(drvdata->base);
+
+ stm_port_disable_hw(drvdata);
+ if (drvdata->stmheer)
+ stm_hwevent_disable_hw(drvdata);
+}
+
+static void stm_disable(struct coresight_device *csdev)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ /*
+ * For as long as the tracer isn't disabled another entity can't
+ * change its status. As such we can read the status here without
+ * fearing it will change under us.
+ */
+ if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ spin_lock(&drvdata->spinlock);
+ stm_disable_hw(drvdata);
+ spin_unlock(&drvdata->spinlock);
+
+ /* Wait until the engine has completely stopped */
+ coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0);
+
+ pm_runtime_put(drvdata->dev);
+
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
+ dev_info(drvdata->dev, "STM tracing disabled\n");
+ }
+}
+
+static int stm_trace_id(struct coresight_device *csdev)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->traceid;
+}
+
+static const struct coresight_ops_source stm_source_ops = {
+ .trace_id = stm_trace_id,
+ .enable = stm_enable,
+ .disable = stm_disable,
+};
+
+static const struct coresight_ops stm_cs_ops = {
+ .source_ops = &stm_source_ops,
+};
+
+static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes)
+{
+ return ((unsigned long)addr & (write_bytes - 1));
+}
+
+static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes)
+{
+ u8 paload[8];
+
+ if (stm_addr_unaligned(data, write_bytes)) {
+ memcpy(paload, data, size);
+ data = paload;
+ }
+
+ /* now we are 64bit/32bit aligned */
+ switch (size) {
+#ifdef CONFIG_64BIT
+ case 8:
+ writeq_relaxed(*(u64 *)data, addr);
+ break;
+#endif
+ case 4:
+ writel_relaxed(*(u32 *)data, addr);
+ break;
+ case 2:
+ writew_relaxed(*(u16 *)data, addr);
+ break;
+ case 1:
+ writeb_relaxed(*(u8 *)data, addr);
+ break;
+ default:
+ break;
+ }
+}
+
+static int stm_generic_link(struct stm_data *stm_data,
+ unsigned int master, unsigned int channel)
+{
+ struct stm_drvdata *drvdata = container_of(stm_data,
+ struct stm_drvdata, stm);
+ if (!drvdata || !drvdata->csdev)
+ return -EINVAL;
+
+ return coresight_enable(drvdata->csdev);
+}
+
+static void stm_generic_unlink(struct stm_data *stm_data,
+ unsigned int master, unsigned int channel)
+{
+ struct stm_drvdata *drvdata = container_of(stm_data,
+ struct stm_drvdata, stm);
+ if (!drvdata || !drvdata->csdev)
+ return;
+
+ stm_disable(drvdata->csdev);
+}
+
+static long stm_generic_set_options(struct stm_data *stm_data,
+ unsigned int master,
+ unsigned int channel,
+ unsigned int nr_chans,
+ unsigned long options)
+{
+ struct stm_drvdata *drvdata = container_of(stm_data,
+ struct stm_drvdata, stm);
+ if (!(drvdata && local_read(&drvdata->mode)))
+ return -EINVAL;
+
+ if (channel >= drvdata->numsp)
+ return -EINVAL;
+
+ switch (options) {
+ case STM_OPTION_GUARANTEED:
+ set_bit(channel, drvdata->chs.guaranteed);
+ break;
+
+ case STM_OPTION_INVARIANT:
+ clear_bit(channel, drvdata->chs.guaranteed);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t stm_generic_packet(struct stm_data *stm_data,
+ unsigned int master,
+ unsigned int channel,
+ unsigned int packet,
+ unsigned int flags,
+ unsigned int size,
+ const unsigned char *payload)
+{
+ unsigned long ch_addr;
+ struct stm_drvdata *drvdata = container_of(stm_data,
+ struct stm_drvdata, stm);
+
+ if (!(drvdata && local_read(&drvdata->mode)))
+ return 0;
+
+ if (channel >= drvdata->numsp)
+ return 0;
+
+ ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
+
+ flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
+ flags |= test_bit(channel, drvdata->chs.guaranteed) ?
+ STM_FLAG_GUARANTEED : 0;
+
+ if (size > drvdata->write_bytes)
+ size = drvdata->write_bytes;
+ else
+ size = rounddown_pow_of_two(size);
+
+ switch (packet) {
+ case STP_PACKET_FLAG:
+ ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags);
+
+ /*
+ * The generic STM core sets a size of '0' on flag packets.
+ * As such send a flag packet of size '1' and tell the
+ * core we did so.
+ */
+ stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes);
+ size = 1;
+ break;
+
+ case STP_PACKET_DATA:
+ ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags);
+ stm_send((void *)ch_addr, payload, size,
+ drvdata->write_bytes);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return size;
+}
+
+static ssize_t hwevent_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->stmheer;
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t hwevent_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret = 0;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return -EINVAL;
+
+ drvdata->stmheer = val;
+ /* HW event enable and trigger go hand in hand */
+ drvdata->stmheter = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(hwevent_enable);
+
+static ssize_t hwevent_select_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val = drvdata->stmhebsr;
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t hwevent_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret = 0;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return -EINVAL;
+
+ drvdata->stmhebsr = val;
+
+ return size;
+}
+static DEVICE_ATTR_RW(hwevent_select);
+
+static ssize_t port_select_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!local_read(&drvdata->mode)) {
+ val = drvdata->stmspscr;
+ } else {
+ spin_lock(&drvdata->spinlock);
+ val = readl_relaxed(drvdata->base + STMSPSCR);
+ spin_unlock(&drvdata->spinlock);
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t port_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val, stmsper;
+ int ret = 0;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->stmspscr = val;
+
+ if (local_read(&drvdata->mode)) {
+ CS_UNLOCK(drvdata->base);
+ /* Process as per ARM's TRM recommendation */
+ stmsper = readl_relaxed(drvdata->base + STMSPER);
+ writel_relaxed(0x0, drvdata->base + STMSPER);
+ writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
+ writel_relaxed(stmsper, drvdata->base + STMSPER);
+ CS_LOCK(drvdata->base);
+ }
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(port_select);
+
+static ssize_t port_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if (!local_read(&drvdata->mode)) {
+ val = drvdata->stmsper;
+ } else {
+ spin_lock(&drvdata->spinlock);
+ val = readl_relaxed(drvdata->base + STMSPER);
+ spin_unlock(&drvdata->spinlock);
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t port_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+ int ret = 0;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->stmsper = val;
+
+ if (local_read(&drvdata->mode)) {
+ CS_UNLOCK(drvdata->base);
+ writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
+ CS_LOCK(drvdata->base);
+ }
+ spin_unlock(&drvdata->spinlock);
+
+ return size;
+}
+static DEVICE_ATTR_RW(port_enable);
+
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->traceid;
+ return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret)
+ return ret;
+
+ /* traceid field is 7bit wide on STM32 */
+ drvdata->traceid = val & 0x7f;
+ return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+#define coresight_stm_simple_func(name, offset) \
+ coresight_simple_func(struct stm_drvdata, name, offset)
+
+coresight_stm_simple_func(tcsr, STMTCSR);
+coresight_stm_simple_func(tsfreqr, STMTSFREQR);
+coresight_stm_simple_func(syncr, STMSYNCR);
+coresight_stm_simple_func(sper, STMSPER);
+coresight_stm_simple_func(spter, STMSPTER);
+coresight_stm_simple_func(privmaskr, STMPRIVMASKR);
+coresight_stm_simple_func(spscr, STMSPSCR);
+coresight_stm_simple_func(spmscr, STMSPMSCR);
+coresight_stm_simple_func(spfeat1r, STMSPFEAT1R);
+coresight_stm_simple_func(spfeat2r, STMSPFEAT2R);
+coresight_stm_simple_func(spfeat3r, STMSPFEAT3R);
+coresight_stm_simple_func(devid, CORESIGHT_DEVID);
+
+static struct attribute *coresight_stm_attrs[] = {
+ &dev_attr_hwevent_enable.attr,
+ &dev_attr_hwevent_select.attr,
+ &dev_attr_port_enable.attr,
+ &dev_attr_port_select.attr,
+ &dev_attr_traceid.attr,
+ NULL,
+};
+
+static struct attribute *coresight_stm_mgmt_attrs[] = {
+ &dev_attr_tcsr.attr,
+ &dev_attr_tsfreqr.attr,
+ &dev_attr_syncr.attr,
+ &dev_attr_sper.attr,
+ &dev_attr_spter.attr,
+ &dev_attr_privmaskr.attr,
+ &dev_attr_spscr.attr,
+ &dev_attr_spmscr.attr,
+ &dev_attr_spfeat1r.attr,
+ &dev_attr_spfeat2r.attr,
+ &dev_attr_spfeat3r.attr,
+ &dev_attr_devid.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_stm_group = {
+ .attrs = coresight_stm_attrs,
+};
+
+static const struct attribute_group coresight_stm_mgmt_group = {
+ .attrs = coresight_stm_mgmt_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *coresight_stm_groups[] = {
+ &coresight_stm_group,
+ &coresight_stm_mgmt_group,
+ NULL,
+};
+
+static int stm_get_resource_byname(struct device_node *np,
+ char *ch_base, struct resource *res)
+{
+ const char *name = NULL;
+ int index = 0, found = 0;
+
+ while (!of_property_read_string_index(np, "reg-names", index, &name)) {
+ if (strcmp(ch_base, name)) {
+ index++;
+ continue;
+ }
+
+ /* We have a match and @index is where it's at */
+ found = 1;
+ break;
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return of_address_to_resource(np, index, res);
+}
+
+static u32 stm_fundamental_data_size(struct stm_drvdata *drvdata)
+{
+ u32 stmspfeat2r;
+
+ if (!IS_ENABLED(CONFIG_64BIT))
+ return 4;
+
+ stmspfeat2r = readl_relaxed(drvdata->base + STMSPFEAT2R);
+
+ /*
+ * bit[15:12] represents the fundamental data size
+ * 0 - 32-bit data
+ * 1 - 64-bit data
+ */
+ return BMVAL(stmspfeat2r, 12, 15) ? 8 : 4;
+}
+
+static u32 stm_num_stimulus_port(struct stm_drvdata *drvdata)
+{
+ u32 numsp;
+
+ numsp = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+ /*
+ * NUMPS in STMDEVID is 17 bit long and if equal to 0x0,
+ * 32 stimulus ports are supported.
+ */
+ numsp &= 0x1ffff;
+ if (!numsp)
+ numsp = STM_32_CHANNEL;
+ return numsp;
+}
+
+static void stm_init_default_data(struct stm_drvdata *drvdata)
+{
+ /* Don't use port selection */
+ drvdata->stmspscr = 0x0;
+ /*
+ * Enable all channel regardless of their number. When port
+ * selection isn't used (see above) STMSPER applies to all
+ * 32 channel group available, hence setting all 32 bits to 1
+ */
+ drvdata->stmsper = ~0x0;
+
+ /*
+ * The trace ID value for *ETM* tracers start at CPU_ID * 2 + 0x10 and
+ * anything equal to or higher than 0x70 is reserved. Since 0x00 is
+ * also reserved the STM trace ID needs to be higher than 0x00 and
+ * lowner than 0x10.
+ */
+ drvdata->traceid = 0x1;
+
+ /* Set invariant transaction timing on all channels */
+ bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp);
+}
+
+static void stm_init_generic_data(struct stm_drvdata *drvdata)
+{
+ drvdata->stm.name = dev_name(drvdata->dev);
+
+ /*
+ * MasterIDs are assigned at HW design phase. As such the core is
+ * using a single master for interaction with this device.
+ */
+ drvdata->stm.sw_start = 1;
+ drvdata->stm.sw_end = 1;
+ drvdata->stm.hw_override = true;
+ drvdata->stm.sw_nchannels = drvdata->numsp;
+ drvdata->stm.packet = stm_generic_packet;
+ drvdata->stm.link = stm_generic_link;
+ drvdata->stm.unlink = stm_generic_unlink;
+ drvdata->stm.set_options = stm_generic_set_options;
+}
+
+static int stm_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+ void __iomem *base;
+ unsigned long *guaranteed;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata = NULL;
+ struct stm_drvdata *drvdata;
+ struct resource *res = &adev->res;
+ struct resource ch_res;
+ size_t res_size, bitmap_size;
+ struct coresight_desc *desc;
+ struct device_node *np = adev->dev.of_node;
+
+ if (np) {
+ pdata = of_get_coresight_platform_data(dev, np);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+ }
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
+ if (!IS_ERR(drvdata->atclk)) {
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ return ret;
+ }
+ dev_set_drvdata(dev, drvdata);
+
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ drvdata->base = base;
+
+ ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res);
+ if (ret)
+ return ret;
+
+ base = devm_ioremap_resource(dev, &ch_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ drvdata->chs.base = base;
+
+ drvdata->write_bytes = stm_fundamental_data_size(drvdata);
+
+ if (boot_nr_channel) {
+ drvdata->numsp = boot_nr_channel;
+ res_size = min((resource_size_t)(boot_nr_channel *
+ BYTES_PER_CHANNEL), resource_size(res));
+ } else {
+ drvdata->numsp = stm_num_stimulus_port(drvdata);
+ res_size = min((resource_size_t)(drvdata->numsp *
+ BYTES_PER_CHANNEL), resource_size(res));
+ }
+ bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
+
+ guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
+ if (!guaranteed)
+ return -ENOMEM;
+ drvdata->chs.guaranteed = guaranteed;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ stm_init_default_data(drvdata);
+ stm_init_generic_data(drvdata);
+
+ if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) {
+ dev_info(dev,
+ "stm_register_device failed, probing deffered\n");
+ return -EPROBE_DEFER;
+ }
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto stm_unregister;
+ }
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
+ desc->ops = &stm_cs_ops;
+ desc->pdata = pdata;
+ desc->dev = dev;
+ desc->groups = coresight_stm_groups;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ ret = PTR_ERR(drvdata->csdev);
+ goto stm_unregister;
+ }
+
+ pm_runtime_put(&adev->dev);
+
+ dev_info(dev, "%s initialized\n", (char *)id->data);
+ return 0;
+
+stm_unregister:
+ stm_unregister_device(&drvdata->stm);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int stm_runtime_suspend(struct device *dev)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_disable_unprepare(drvdata->atclk);
+
+ return 0;
+}
+
+static int stm_runtime_resume(struct device *dev)
+{
+ struct stm_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata && !IS_ERR(drvdata->atclk))
+ clk_prepare_enable(drvdata->atclk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops stm_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
+};
+
+static struct amba_id stm_ids[] = {
+ {
+ .id = 0x0003b962,
+ .mask = 0x0003ffff,
+ .data = "STM32",
+ },
+ { 0, 0},
+};
+
+static struct amba_driver stm_driver = {
+ .drv = {
+ .name = "coresight-stm",
+ .owner = THIS_MODULE,
+ .pm = &stm_dev_pm_ops,
+ .suppress_bind_attrs = true,
+ },
+ .probe = stm_probe,
+ .id_table = stm_ids,
+};
+
+builtin_amba_driver(stm_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
new file mode 100644
index 000000000..466af86fd
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright(C) 2016 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/coresight.h>
+#include <linux/perf_event.h>
+#include <linux/slab.h>
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ /* Wait for TMCSReady bit to be set */
+ tmc_wait_for_tmcready(drvdata);
+
+ writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+ writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
+ TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
+ TMC_FFCR_TRIGON_TRIGIN,
+ drvdata->base + TMC_FFCR);
+
+ writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+ tmc_enable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
+{
+ char *bufp;
+ u32 read_data;
+ int i;
+
+ bufp = drvdata->buf;
+ while (1) {
+ for (i = 0; i < drvdata->memwidth; i++) {
+ read_data = readl_relaxed(drvdata->base + TMC_RRD);
+ if (read_data == 0xFFFFFFFF)
+ return;
+ memcpy(bufp, &read_data, 4);
+ bufp += 4;
+ }
+ }
+}
+
+static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+ /*
+ * When operating in sysFS mode the content of the buffer needs to be
+ * read before the TMC is disabled.
+ */
+ if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+ tmc_etb_dump_hw(drvdata);
+ tmc_disable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ /* Wait for TMCSReady bit to be set */
+ tmc_wait_for_tmcready(drvdata);
+
+ writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
+ writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
+ drvdata->base + TMC_FFCR);
+ writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
+ tmc_enable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+ tmc_disable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
+{
+ int ret = 0;
+ bool used = false;
+ char *buf = NULL;
+ long val;
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ /* This shouldn't be happening */
+ if (WARN_ON(mode != CS_MODE_SYSFS))
+ return -EINVAL;
+
+ /*
+ * If we don't have a buffer release the lock and allocate memory.
+ * Otherwise keep the lock and move along.
+ */
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->buf) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ /* Allocating the memory here while outside of the spinlock */
+ buf = kzalloc(drvdata->size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Let's try again */
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ }
+
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ val = local_xchg(&drvdata->mode, mode);
+ /*
+ * In sysFS mode we can have multiple writers per sink. Since this
+ * sink is already enabled no memory is needed and the HW need not be
+ * touched.
+ */
+ if (val == CS_MODE_SYSFS)
+ goto out;
+
+ /*
+ * If drvdata::buf isn't NULL, memory was allocated for a previous
+ * trace run but wasn't read. If so simply zero-out the memory.
+ * Otherwise use the memory allocated above.
+ *
+ * The memory is freed when users read the buffer using the
+ * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
+ * details.
+ */
+ if (drvdata->buf) {
+ memset(drvdata->buf, 0, drvdata->size);
+ } else {
+ used = true;
+ drvdata->buf = buf;
+ }
+
+ tmc_etb_enable_hw(drvdata);
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ /* Free memory outside the spinlock if need be */
+ if (!used && buf)
+ kfree(buf);
+
+ if (!ret)
+ dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+
+ return ret;
+}
+
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode)
+{
+ int ret = 0;
+ long val;
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ /* This shouldn't be happening */
+ if (WARN_ON(mode != CS_MODE_PERF))
+ return -EINVAL;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ val = local_xchg(&drvdata->mode, mode);
+ /*
+ * In Perf mode there can be only one writer per sink. There
+ * is also no need to continue if the ETB/ETR is already operated
+ * from sysFS.
+ */
+ if (val != CS_MODE_DISABLED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tmc_etb_enable_hw(drvdata);
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return ret;
+}
+
+static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
+{
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ return tmc_enable_etf_sink_sysfs(csdev, mode);
+ case CS_MODE_PERF:
+ return tmc_enable_etf_sink_perf(csdev, mode);
+ }
+
+ /* We shouldn't be here */
+ return -EINVAL;
+}
+
+static void tmc_disable_etf_sink(struct coresight_device *csdev)
+{
+ long val;
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return;
+ }
+
+ val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
+ /* Disable the TMC only if it needs to */
+ if (val != CS_MODE_DISABLED)
+ tmc_etb_disable_hw(drvdata);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
+}
+
+static int tmc_enable_etf_link(struct coresight_device *csdev,
+ int inport, int outport)
+{
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EBUSY;
+ }
+
+ tmc_etf_enable_hw(drvdata);
+ local_set(&drvdata->mode, CS_MODE_SYSFS);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC-ETF enabled\n");
+ return 0;
+}
+
+static void tmc_disable_etf_link(struct coresight_device *csdev,
+ int inport, int outport)
+{
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return;
+ }
+
+ tmc_etf_disable_hw(drvdata);
+ local_set(&drvdata->mode, CS_MODE_DISABLED);
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC disabled\n");
+}
+
+static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
+ void **pages, int nr_pages, bool overwrite)
+{
+ int node;
+ struct cs_buffers *buf;
+
+ if (cpu == -1)
+ cpu = smp_processor_id();
+ node = cpu_to_node(cpu);
+
+ /* Allocate memory structure for interaction with Perf */
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+ if (!buf)
+ return NULL;
+
+ buf->snapshot = overwrite;
+ buf->nr_pages = nr_pages;
+ buf->data_pages = pages;
+
+ return buf;
+}
+
+static void tmc_free_etf_buffer(void *config)
+{
+ struct cs_buffers *buf = config;
+
+ kfree(buf);
+}
+
+static int tmc_set_etf_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config)
+{
+ int ret = 0;
+ unsigned long head;
+ struct cs_buffers *buf = sink_config;
+
+ /* wrap head around to the amount of space we have */
+ head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+
+ /* find the page to write to */
+ buf->cur = head / PAGE_SIZE;
+
+ /* and offset within that page */
+ buf->offset = head % PAGE_SIZE;
+
+ local_set(&buf->data_size, 0);
+
+ return ret;
+}
+
+static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config, bool *lost)
+{
+ long size = 0;
+ struct cs_buffers *buf = sink_config;
+
+ if (buf) {
+ /*
+ * In snapshot mode ->data_size holds the new address of the
+ * ring buffer's head. The size itself is the whole address
+ * range since we want the latest information.
+ */
+ if (buf->snapshot)
+ handle->head = local_xchg(&buf->data_size,
+ buf->nr_pages << PAGE_SHIFT);
+ /*
+ * Tell the tracer PMU how much we got in this run and if
+ * something went wrong along the way. Nobody else can use
+ * this cs_buffers instance until we are done. As such
+ * resetting parameters here and squaring off with the ring
+ * buffer API in the tracer PMU is fine.
+ */
+ *lost = !!local_xchg(&buf->lost, 0);
+ size = local_xchg(&buf->data_size, 0);
+ }
+
+ return size;
+}
+
+static void tmc_update_etf_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config)
+{
+ int i, cur;
+ u32 *buf_ptr;
+ u32 read_ptr, write_ptr;
+ u32 status, to_read;
+ unsigned long offset;
+ struct cs_buffers *buf = sink_config;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (!buf)
+ return;
+
+ /* This shouldn't happen */
+ if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
+ return;
+
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+
+ read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
+ write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
+
+ /*
+ * Get a hold of the status register and see if a wrap around
+ * has occurred. If so adjust things accordingly.
+ */
+ status = readl_relaxed(drvdata->base + TMC_STS);
+ if (status & TMC_STS_FULL) {
+ local_inc(&buf->lost);
+ to_read = drvdata->size;
+ } else {
+ to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
+ }
+
+ /*
+ * The TMC RAM buffer may be bigger than the space available in the
+ * perf ring buffer (handle->size). If so advance the RRP so that we
+ * get the latest trace data.
+ */
+ if (to_read > handle->size) {
+ u32 mask = 0;
+
+ /*
+ * The value written to RRP must be byte-address aligned to
+ * the width of the trace memory databus _and_ to a frame
+ * boundary (16 byte), whichever is the biggest. For example,
+ * for 32-bit, 64-bit and 128-bit wide trace memory, the four
+ * LSBs must be 0s. For 256-bit wide trace memory, the five
+ * LSBs must be 0s.
+ */
+ switch (drvdata->memwidth) {
+ case TMC_MEM_INTF_WIDTH_32BITS:
+ case TMC_MEM_INTF_WIDTH_64BITS:
+ case TMC_MEM_INTF_WIDTH_128BITS:
+ mask = GENMASK(31, 5);
+ break;
+ case TMC_MEM_INTF_WIDTH_256BITS:
+ mask = GENMASK(31, 6);
+ break;
+ }
+
+ /*
+ * Make sure the new size is aligned in accordance with the
+ * requirement explained above.
+ */
+ to_read = handle->size & mask;
+ /* Move the RAM read pointer up */
+ read_ptr = (write_ptr + drvdata->size) - to_read;
+ /* Make sure we are still within our limits */
+ if (read_ptr > (drvdata->size - 1))
+ read_ptr -= drvdata->size;
+ /* Tell the HW */
+ writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
+ local_inc(&buf->lost);
+ }
+
+ cur = buf->cur;
+ offset = buf->offset;
+
+ /* for every byte to read */
+ for (i = 0; i < to_read; i += 4) {
+ buf_ptr = buf->data_pages[cur] + offset;
+ *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
+
+ offset += 4;
+ if (offset >= PAGE_SIZE) {
+ offset = 0;
+ cur++;
+ /* wrap around at the end of the buffer */
+ cur &= buf->nr_pages - 1;
+ }
+ }
+
+ /*
+ * In snapshot mode all we have to do is communicate to
+ * perf_aux_output_end() the address of the current head. In full
+ * trace mode the same function expects a size to move rb->aux_head
+ * forward.
+ */
+ if (buf->snapshot)
+ local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
+ else
+ local_add(to_read, &buf->data_size);
+
+ CS_LOCK(drvdata->base);
+}
+
+static const struct coresight_ops_sink tmc_etf_sink_ops = {
+ .enable = tmc_enable_etf_sink,
+ .disable = tmc_disable_etf_sink,
+ .alloc_buffer = tmc_alloc_etf_buffer,
+ .free_buffer = tmc_free_etf_buffer,
+ .set_buffer = tmc_set_etf_buffer,
+ .reset_buffer = tmc_reset_etf_buffer,
+ .update_buffer = tmc_update_etf_buffer,
+};
+
+static const struct coresight_ops_link tmc_etf_link_ops = {
+ .enable = tmc_enable_etf_link,
+ .disable = tmc_disable_etf_link,
+};
+
+const struct coresight_ops tmc_etb_cs_ops = {
+ .sink_ops = &tmc_etf_sink_ops,
+};
+
+const struct coresight_ops tmc_etf_cs_ops = {
+ .sink_ops = &tmc_etf_sink_ops,
+ .link_ops = &tmc_etf_link_ops,
+};
+
+int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
+{
+ long val;
+ enum tmc_mode mode;
+ int ret = 0;
+ unsigned long flags;
+
+ /* config types are set a boot time and never change */
+ if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
+ drvdata->config_type != TMC_CONFIG_TYPE_ETF))
+ return -EINVAL;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ val = local_read(&drvdata->mode);
+ /* Don't interfere if operated from Perf */
+ if (val == CS_MODE_PERF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If drvdata::buf is NULL the trace data has been read already */
+ if (drvdata->buf == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Disable the TMC if need be */
+ if (val == CS_MODE_SYSFS)
+ tmc_etb_disable_hw(drvdata);
+
+ drvdata->reading = true;
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return ret;
+}
+
+int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
+{
+ char *buf = NULL;
+ enum tmc_mode mode;
+ unsigned long flags;
+
+ /* config types are set a boot time and never change */
+ if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
+ drvdata->config_type != TMC_CONFIG_TYPE_ETF))
+ return -EINVAL;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* There is no point in reading a TMC in HW FIFO mode */
+ mode = readl_relaxed(drvdata->base + TMC_MODE);
+ if (mode != TMC_MODE_CIRCULAR_BUFFER) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return -EINVAL;
+ }
+
+ /* Re-enable the TMC if need be */
+ if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ /*
+ * The trace run will continue with the same allocated trace
+ * buffer. As such zero-out the buffer so that we don't end
+ * up with stale data.
+ *
+ * Since the tracer is still enabled drvdata::buf
+ * can't be NULL.
+ */
+ memset(drvdata->buf, 0, drvdata->size);
+ tmc_etb_enable_hw(drvdata);
+ } else {
+ /*
+ * The ETB/ETF is not tracing and the buffer was just read.
+ * As such prepare to free the trace buffer.
+ */
+ buf = drvdata->buf;
+ drvdata->buf = NULL;
+ }
+
+ drvdata->reading = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ /*
+ * Free allocated memory outside of the spinlock. There is no need
+ * to assert the validity of 'buf' since calling kfree(NULL) is safe.
+ */
+ kfree(buf);
+
+ return 0;
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
new file mode 100644
index 000000000..688be9e06
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright(C) 2016 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/coresight.h>
+#include <linux/dma-mapping.h>
+#include "coresight-priv.h"
+#include "coresight-tmc.h"
+
+void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+{
+ u32 axictl;
+
+ /* Zero out the memory to help with debug */
+ memset(drvdata->vaddr, 0, drvdata->size);
+
+ CS_UNLOCK(drvdata->base);
+
+ /* Wait for TMCSReady bit to be set */
+ tmc_wait_for_tmcready(drvdata);
+
+ writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
+ writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+
+ axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
+ axictl |= TMC_AXICTL_WR_BURST_16;
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+ axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+ axictl = (axictl &
+ ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
+ TMC_AXICTL_PROT_CTL_B1;
+ writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+
+ writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
+ writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
+ writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
+ TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
+ TMC_FFCR_TRIGON_TRIGIN,
+ drvdata->base + TMC_FFCR);
+ writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+ tmc_enable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
+{
+ u32 rwp, val;
+
+ rwp = readl_relaxed(drvdata->base + TMC_RWP);
+ val = readl_relaxed(drvdata->base + TMC_STS);
+
+ /* How much memory do we still have */
+ if (val & BIT(0))
+ drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
+ else
+ drvdata->buf = drvdata->vaddr;
+}
+
+static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+ /*
+ * When operating in sysFS mode the content of the buffer needs to be
+ * read before the TMC is disabled.
+ */
+ if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
+ tmc_etr_dump_hw(drvdata);
+ tmc_disable_hw(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
+{
+ int ret = 0;
+ bool used = false;
+ long val;
+ unsigned long flags;
+ void __iomem *vaddr = NULL;
+ dma_addr_t paddr;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ /* This shouldn't be happening */
+ if (WARN_ON(mode != CS_MODE_SYSFS))
+ return -EINVAL;
+
+ /*
+ * If we don't have a buffer release the lock and allocate memory.
+ * Otherwise keep the lock and move along.
+ */
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (!drvdata->vaddr) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ /*
+ * Contiguous memory can't be allocated while a spinlock is
+ * held. As such allocate memory here and free it if a buffer
+ * has already been allocated (from a previous session).
+ */
+ vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
+ &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ /* Let's try again */
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ }
+
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ val = local_xchg(&drvdata->mode, mode);
+ /*
+ * In sysFS mode we can have multiple writers per sink. Since this
+ * sink is already enabled no memory is needed and the HW need not be
+ * touched.
+ */
+ if (val == CS_MODE_SYSFS)
+ goto out;
+
+ /*
+ * If drvdata::buf == NULL, use the memory allocated above.
+ * Otherwise a buffer still exists from a previous session, so
+ * simply use that.
+ */
+ if (drvdata->buf == NULL) {
+ used = true;
+ drvdata->vaddr = vaddr;
+ drvdata->paddr = paddr;
+ drvdata->buf = drvdata->vaddr;
+ }
+
+ memset(drvdata->vaddr, 0, drvdata->size);
+
+ tmc_etr_enable_hw(drvdata);
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ /* Free memory outside the spinlock if need be */
+ if (!used && vaddr)
+ dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+
+ if (!ret)
+ dev_info(drvdata->dev, "TMC-ETR enabled\n");
+
+ return ret;
+}
+
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
+{
+ int ret = 0;
+ long val;
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ /* This shouldn't be happening */
+ if (WARN_ON(mode != CS_MODE_PERF))
+ return -EINVAL;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ val = local_xchg(&drvdata->mode, mode);
+ /*
+ * In Perf mode there can be only one writer per sink. There
+ * is also no need to continue if the ETR is already operated
+ * from sysFS.
+ */
+ if (val != CS_MODE_DISABLED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tmc_etr_enable_hw(drvdata);
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return ret;
+}
+
+static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
+{
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ return tmc_enable_etr_sink_sysfs(csdev, mode);
+ case CS_MODE_PERF:
+ return tmc_enable_etr_sink_perf(csdev, mode);
+ }
+
+ /* We shouldn't be here */
+ return -EINVAL;
+}
+
+static void tmc_disable_etr_sink(struct coresight_device *csdev)
+{
+ long val;
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return;
+ }
+
+ val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
+ /* Disable the TMC only if it needs to */
+ if (val != CS_MODE_DISABLED)
+ tmc_etr_disable_hw(drvdata);
+
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ dev_info(drvdata->dev, "TMC-ETR disabled\n");
+}
+
+static const struct coresight_ops_sink tmc_etr_sink_ops = {
+ .enable = tmc_enable_etr_sink,
+ .disable = tmc_disable_etr_sink,
+};
+
+const struct coresight_ops tmc_etr_cs_ops = {
+ .sink_ops = &tmc_etr_sink_ops,
+};
+
+int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
+{
+ int ret = 0;
+ long val;
+ unsigned long flags;
+
+ /* config types are set a boot time and never change */
+ if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
+ return -EINVAL;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ val = local_read(&drvdata->mode);
+ /* Don't interfere if operated from Perf */
+ if (val == CS_MODE_PERF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If drvdata::buf is NULL the trace data has been read already */
+ if (drvdata->buf == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Disable the TMC if need be */
+ if (val == CS_MODE_SYSFS)
+ tmc_etr_disable_hw(drvdata);
+
+ drvdata->reading = true;
+out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ return ret;
+}
+
+int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
+{
+ unsigned long flags;
+ dma_addr_t paddr;
+ void __iomem *vaddr = NULL;
+
+ /* config types are set a boot time and never change */
+ if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
+ return -EINVAL;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* RE-enable the TMC if need be */
+ if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ /*
+ * The trace run will continue with the same allocated trace
+ * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
+ * so we don't have to explicitly clear it. Also, since the
+ * tracer is still enabled drvdata::buf can't be NULL.
+ */
+ tmc_etr_enable_hw(drvdata);
+ } else {
+ /*
+ * The ETR is not tracing and the buffer was just read.
+ * As such prepare to free the trace buffer.
+ */
+ vaddr = drvdata->vaddr;
+ paddr = drvdata->paddr;
+ drvdata->buf = drvdata->vaddr = NULL;
+ }
+
+ drvdata->reading = false;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ /* Free allocated memory out side of the spinlock */
+ if (vaddr)
+ dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
+
+ return 0;
+}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1be191f5d..9e02ac963 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -30,127 +30,27 @@
#include <linux/amba/bus.h>
#include "coresight-priv.h"
+#include "coresight-tmc.h"
-#define TMC_RSZ 0x004
-#define TMC_STS 0x00c
-#define TMC_RRD 0x010
-#define TMC_RRP 0x014
-#define TMC_RWP 0x018
-#define TMC_TRG 0x01c
-#define TMC_CTL 0x020
-#define TMC_RWD 0x024
-#define TMC_MODE 0x028
-#define TMC_LBUFLEVEL 0x02c
-#define TMC_CBUFLEVEL 0x030
-#define TMC_BUFWM 0x034
-#define TMC_RRPHI 0x038
-#define TMC_RWPHI 0x03c
-#define TMC_AXICTL 0x110
-#define TMC_DBALO 0x118
-#define TMC_DBAHI 0x11c
-#define TMC_FFSR 0x300
-#define TMC_FFCR 0x304
-#define TMC_PSCR 0x308
-#define TMC_ITMISCOP0 0xee0
-#define TMC_ITTRFLIN 0xee8
-#define TMC_ITATBDATA0 0xeec
-#define TMC_ITATBCTR2 0xef0
-#define TMC_ITATBCTR1 0xef4
-#define TMC_ITATBCTR0 0xef8
-
-/* register description */
-/* TMC_CTL - 0x020 */
-#define TMC_CTL_CAPT_EN BIT(0)
-/* TMC_STS - 0x00C */
-#define TMC_STS_TRIGGERED BIT(1)
-/* TMC_AXICTL - 0x110 */
-#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
-#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
-#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
-#define TMC_AXICTL_WR_BURST_LEN 0xF00
-/* TMC_FFCR - 0x304 */
-#define TMC_FFCR_EN_FMT BIT(0)
-#define TMC_FFCR_EN_TI BIT(1)
-#define TMC_FFCR_FON_FLIN BIT(4)
-#define TMC_FFCR_FON_TRIG_EVT BIT(5)
-#define TMC_FFCR_FLUSHMAN BIT(6)
-#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
-#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
-
-#define TMC_STS_TRIGGERED_BIT 2
-#define TMC_FFCR_FLUSHMAN_BIT 6
-
-enum tmc_config_type {
- TMC_CONFIG_TYPE_ETB,
- TMC_CONFIG_TYPE_ETR,
- TMC_CONFIG_TYPE_ETF,
-};
-
-enum tmc_mode {
- TMC_MODE_CIRCULAR_BUFFER,
- TMC_MODE_SOFTWARE_FIFO,
- TMC_MODE_HARDWARE_FIFO,
-};
-
-enum tmc_mem_intf_width {
- TMC_MEM_INTF_WIDTH_32BITS = 0x2,
- TMC_MEM_INTF_WIDTH_64BITS = 0x3,
- TMC_MEM_INTF_WIDTH_128BITS = 0x4,
- TMC_MEM_INTF_WIDTH_256BITS = 0x5,
-};
-
-/**
- * struct tmc_drvdata - specifics associated to an TMC component
- * @base: memory mapped base address for this component.
- * @dev: the device entity associated to this component.
- * @csdev: component vitals needed by the framework.
- * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
- * @spinlock: only one at a time pls.
- * @read_count: manages preparation of buffer for reading.
- * @buf: area of memory where trace data get sent.
- * @paddr: DMA start location in RAM.
- * @vaddr: virtual representation of @paddr.
- * @size: @buf size.
- * @enable: this TMC is being used.
- * @config_type: TMC variant, must be of type @tmc_config_type.
- * @trigger_cntr: amount of words to store after a trigger.
- */
-struct tmc_drvdata {
- void __iomem *base;
- struct device *dev;
- struct coresight_device *csdev;
- struct miscdevice miscdev;
- spinlock_t spinlock;
- int read_count;
- bool reading;
- char *buf;
- dma_addr_t paddr;
- void *vaddr;
- u32 size;
- bool enable;
- enum tmc_config_type config_type;
- u32 trigger_cntr;
-};
-
-static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
+void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
/* Ensure formatter, unformatter and hardware fifo are empty */
if (coresight_timeout(drvdata->base,
- TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
+ TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
dev_err(drvdata->dev,
"timeout observed when probing at offset %#x\n",
TMC_STS);
}
}
-static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
+void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
{
u32 ffcr;
ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
ffcr |= TMC_FFCR_STOP_ON_FLUSH;
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
- ffcr |= TMC_FFCR_FLUSHMAN;
+ ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
/* Ensure flush completes */
if (coresight_timeout(drvdata->base,
@@ -160,338 +60,73 @@ static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
TMC_FFCR);
}
- tmc_wait_for_ready(drvdata);
+ tmc_wait_for_tmcready(drvdata);
}
-static void tmc_enable_hw(struct tmc_drvdata *drvdata)
+void tmc_enable_hw(struct tmc_drvdata *drvdata)
{
writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
}
-static void tmc_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_disable_hw(struct tmc_drvdata *drvdata)
{
writel_relaxed(0x0, drvdata->base + TMC_CTL);
}
-static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
-{
- /* Zero out the memory to help with debug */
- memset(drvdata->buf, 0, drvdata->size);
-
- CS_UNLOCK(drvdata->base);
-
- writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
- writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
- TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
- TMC_FFCR_TRIGON_TRIGIN,
- drvdata->base + TMC_FFCR);
-
- writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
- tmc_enable_hw(drvdata);
-
- CS_LOCK(drvdata->base);
-}
-
-static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
-{
- u32 axictl;
-
- /* Zero out the memory to help with debug */
- memset(drvdata->vaddr, 0, drvdata->size);
-
- CS_UNLOCK(drvdata->base);
-
- writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
- writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
-
- axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
- axictl |= TMC_AXICTL_WR_BURST_LEN;
- writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
- writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
- axictl = (axictl &
- ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
- TMC_AXICTL_PROT_CTL_B1;
- writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
-
- writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
- writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
- writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
- TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
- TMC_FFCR_TRIGON_TRIGIN,
- drvdata->base + TMC_FFCR);
- writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
- tmc_enable_hw(drvdata);
-
- CS_LOCK(drvdata->base);
-}
-
-static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
-{
- CS_UNLOCK(drvdata->base);
-
- writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
- writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
- drvdata->base + TMC_FFCR);
- writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
- tmc_enable_hw(drvdata);
-
- CS_LOCK(drvdata->base);
-}
-
-static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- return -EBUSY;
- }
-
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
- tmc_etb_enable_hw(drvdata);
- } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- tmc_etr_enable_hw(drvdata);
- } else {
- if (mode == TMC_MODE_CIRCULAR_BUFFER)
- tmc_etb_enable_hw(drvdata);
- else
- tmc_etf_enable_hw(drvdata);
- }
- drvdata->enable = true;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- dev_info(drvdata->dev, "TMC enabled\n");
- return 0;
-}
-
-static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
-{
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
-}
-
-static int tmc_enable_link(struct coresight_device *csdev, int inport,
- int outport)
+static int tmc_read_prepare(struct tmc_drvdata *drvdata)
{
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
-}
+ int ret = 0;
-static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
-{
- enum tmc_mem_intf_width memwidth;
- u8 memwords;
- char *bufp;
- u32 read_data;
- int i;
-
- memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
- if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
- memwords = 1;
- else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
- memwords = 2;
- else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
- memwords = 4;
- else
- memwords = 8;
-
- bufp = drvdata->buf;
- while (1) {
- for (i = 0; i < memwords; i++) {
- read_data = readl_relaxed(drvdata->base + TMC_RRD);
- if (read_data == 0xFFFFFFFF)
- return;
- memcpy(bufp, &read_data, 4);
- bufp += 4;
- }
+ switch (drvdata->config_type) {
+ case TMC_CONFIG_TYPE_ETB:
+ case TMC_CONFIG_TYPE_ETF:
+ ret = tmc_read_prepare_etb(drvdata);
+ break;
+ case TMC_CONFIG_TYPE_ETR:
+ ret = tmc_read_prepare_etr(drvdata);
+ break;
+ default:
+ ret = -EINVAL;
}
-}
-
-static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
-{
- CS_UNLOCK(drvdata->base);
-
- tmc_flush_and_stop(drvdata);
- tmc_etb_dump_hw(drvdata);
- tmc_disable_hw(drvdata);
-
- CS_LOCK(drvdata->base);
-}
-
-static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
-{
- u32 rwp, val;
- rwp = readl_relaxed(drvdata->base + TMC_RWP);
- val = readl_relaxed(drvdata->base + TMC_STS);
-
- /* How much memory do we still have */
- if (val & BIT(0))
- drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
- else
- drvdata->buf = drvdata->vaddr;
-}
+ if (!ret)
+ dev_info(drvdata->dev, "TMC read start\n");
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
-{
- CS_UNLOCK(drvdata->base);
-
- tmc_flush_and_stop(drvdata);
- tmc_etr_dump_hw(drvdata);
- tmc_disable_hw(drvdata);
-
- CS_LOCK(drvdata->base);
-}
-
-static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
-{
- CS_UNLOCK(drvdata->base);
-
- tmc_flush_and_stop(drvdata);
- tmc_disable_hw(drvdata);
-
- CS_LOCK(drvdata->base);
+ return ret;
}
-static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
+static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
{
- unsigned long flags;
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading)
- goto out;
+ int ret = 0;
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
- tmc_etb_disable_hw(drvdata);
- } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- tmc_etr_disable_hw(drvdata);
- } else {
- if (mode == TMC_MODE_CIRCULAR_BUFFER)
- tmc_etb_disable_hw(drvdata);
- else
- tmc_etf_disable_hw(drvdata);
+ switch (drvdata->config_type) {
+ case TMC_CONFIG_TYPE_ETB:
+ case TMC_CONFIG_TYPE_ETF:
+ ret = tmc_read_unprepare_etb(drvdata);
+ break;
+ case TMC_CONFIG_TYPE_ETR:
+ ret = tmc_read_unprepare_etr(drvdata);
+ break;
+ default:
+ ret = -EINVAL;
}
-out:
- drvdata->enable = false;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- dev_info(drvdata->dev, "TMC disabled\n");
-}
-
-static void tmc_disable_sink(struct coresight_device *csdev)
-{
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
-}
-
-static void tmc_disable_link(struct coresight_device *csdev, int inport,
- int outport)
-{
- struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
-}
-
-static const struct coresight_ops_sink tmc_sink_ops = {
- .enable = tmc_enable_sink,
- .disable = tmc_disable_sink,
-};
-
-static const struct coresight_ops_link tmc_link_ops = {
- .enable = tmc_enable_link,
- .disable = tmc_disable_link,
-};
-
-static const struct coresight_ops tmc_etb_cs_ops = {
- .sink_ops = &tmc_sink_ops,
-};
-
-static const struct coresight_ops tmc_etr_cs_ops = {
- .sink_ops = &tmc_sink_ops,
-};
-
-static const struct coresight_ops tmc_etf_cs_ops = {
- .sink_ops = &tmc_sink_ops,
- .link_ops = &tmc_link_ops,
-};
-
-static int tmc_read_prepare(struct tmc_drvdata *drvdata)
-{
- int ret;
- unsigned long flags;
- enum tmc_mode mode;
- spin_lock_irqsave(&drvdata->spinlock, flags);
- if (!drvdata->enable)
- goto out;
+ if (!ret)
+ dev_info(drvdata->dev, "TMC read end\n");
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
- tmc_etb_disable_hw(drvdata);
- } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- tmc_etr_disable_hw(drvdata);
- } else {
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode == TMC_MODE_CIRCULAR_BUFFER) {
- tmc_etb_disable_hw(drvdata);
- } else {
- ret = -ENODEV;
- goto err;
- }
- }
-out:
- drvdata->reading = true;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- dev_info(drvdata->dev, "TMC read start\n");
- return 0;
-err:
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
-static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
-{
- unsigned long flags;
- enum tmc_mode mode;
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
- if (!drvdata->enable)
- goto out;
-
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
- tmc_etb_enable_hw(drvdata);
- } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- tmc_etr_enable_hw(drvdata);
- } else {
- mode = readl_relaxed(drvdata->base + TMC_MODE);
- if (mode == TMC_MODE_CIRCULAR_BUFFER)
- tmc_etb_enable_hw(drvdata);
- }
-out:
- drvdata->reading = false;
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- dev_info(drvdata->dev, "TMC read end\n");
-}
-
static int tmc_open(struct inode *inode, struct file *file)
{
+ int ret;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- int ret = 0;
-
- if (drvdata->read_count++)
- goto out;
ret = tmc_read_prepare(drvdata);
if (ret)
return ret;
-out:
+
nonseekable_open(inode, file);
dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -531,19 +166,14 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
static int tmc_release(struct inode *inode, struct file *file)
{
+ int ret;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- if (--drvdata->read_count) {
- if (drvdata->read_count < 0) {
- dev_err(drvdata->dev, "mismatched close\n");
- drvdata->read_count = 0;
- }
- goto out;
- }
+ ret = tmc_read_unprepare(drvdata);
+ if (ret)
+ return ret;
- tmc_read_unprepare(drvdata);
-out:
dev_dbg(drvdata->dev, "%s: released\n", __func__);
return 0;
}
@@ -556,56 +186,71 @@ static const struct file_operations tmc_fops = {
.llseek = no_llseek,
};
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
{
- unsigned long flags;
- u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
- u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
- u32 devid;
- struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ enum tmc_mem_intf_width memwidth;
- pm_runtime_get_sync(drvdata->dev);
- spin_lock_irqsave(&drvdata->spinlock, flags);
- CS_UNLOCK(drvdata->base);
-
- tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ);
- tmc_sts = readl_relaxed(drvdata->base + TMC_STS);
- tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP);
- tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP);
- tmc_trg = readl_relaxed(drvdata->base + TMC_TRG);
- tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL);
- tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR);
- tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
- tmc_mode = readl_relaxed(drvdata->base + TMC_MODE);
- tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR);
- devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+ /*
+ * Excerpt from the TRM:
+ *
+ * DEVID::MEMWIDTH[10:8]
+ * 0x2 Memory interface databus is 32 bits wide.
+ * 0x3 Memory interface databus is 64 bits wide.
+ * 0x4 Memory interface databus is 128 bits wide.
+ * 0x5 Memory interface databus is 256 bits wide.
+ */
+ switch (BMVAL(devid, 8, 10)) {
+ case 0x2:
+ memwidth = TMC_MEM_INTF_WIDTH_32BITS;
+ break;
+ case 0x3:
+ memwidth = TMC_MEM_INTF_WIDTH_64BITS;
+ break;
+ case 0x4:
+ memwidth = TMC_MEM_INTF_WIDTH_128BITS;
+ break;
+ case 0x5:
+ memwidth = TMC_MEM_INTF_WIDTH_256BITS;
+ break;
+ default:
+ memwidth = 0;
+ }
- CS_LOCK(drvdata->base);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(drvdata->dev);
-
- return sprintf(buf,
- "Depth:\t\t0x%x\n"
- "Status:\t\t0x%x\n"
- "RAM read ptr:\t0x%x\n"
- "RAM wrt ptr:\t0x%x\n"
- "Trigger cnt:\t0x%x\n"
- "Control:\t0x%x\n"
- "Flush status:\t0x%x\n"
- "Flush ctrl:\t0x%x\n"
- "Mode:\t\t0x%x\n"
- "PSRC:\t\t0x%x\n"
- "DEVID:\t\t0x%x\n",
- tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
- tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
-
- return -EINVAL;
+ return memwidth;
}
-static DEVICE_ATTR_RO(status);
-static ssize_t trigger_cntr_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+#define coresight_tmc_simple_func(name, offset) \
+ coresight_simple_func(struct tmc_drvdata, name, offset)
+
+coresight_tmc_simple_func(rsz, TMC_RSZ);
+coresight_tmc_simple_func(sts, TMC_STS);
+coresight_tmc_simple_func(rrp, TMC_RRP);
+coresight_tmc_simple_func(rwp, TMC_RWP);
+coresight_tmc_simple_func(trg, TMC_TRG);
+coresight_tmc_simple_func(ctl, TMC_CTL);
+coresight_tmc_simple_func(ffsr, TMC_FFSR);
+coresight_tmc_simple_func(ffcr, TMC_FFCR);
+coresight_tmc_simple_func(mode, TMC_MODE);
+coresight_tmc_simple_func(pscr, TMC_PSCR);
+coresight_tmc_simple_func(devid, CORESIGHT_DEVID);
+
+static struct attribute *coresight_tmc_mgmt_attrs[] = {
+ &dev_attr_rsz.attr,
+ &dev_attr_sts.attr,
+ &dev_attr_rrp.attr,
+ &dev_attr_rwp.attr,
+ &dev_attr_trg.attr,
+ &dev_attr_ctl.attr,
+ &dev_attr_ffsr.attr,
+ &dev_attr_ffcr.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_pscr.attr,
+ &dev_attr_devid.attr,
+ NULL,
+};
+
+ssize_t trigger_cntr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->trigger_cntr;
@@ -630,26 +275,25 @@ static ssize_t trigger_cntr_store(struct device *dev,
}
static DEVICE_ATTR_RW(trigger_cntr);
-static struct attribute *coresight_etb_attrs[] = {
+static struct attribute *coresight_tmc_attrs[] = {
&dev_attr_trigger_cntr.attr,
- &dev_attr_status.attr,
NULL,
};
-ATTRIBUTE_GROUPS(coresight_etb);
-static struct attribute *coresight_etr_attrs[] = {
- &dev_attr_trigger_cntr.attr,
- &dev_attr_status.attr,
- NULL,
+static const struct attribute_group coresight_tmc_group = {
+ .attrs = coresight_tmc_attrs,
};
-ATTRIBUTE_GROUPS(coresight_etr);
-static struct attribute *coresight_etf_attrs[] = {
- &dev_attr_trigger_cntr.attr,
- &dev_attr_status.attr,
+static const struct attribute_group coresight_tmc_mgmt_group = {
+ .attrs = coresight_tmc_mgmt_attrs,
+ .name = "mgmt",
+};
+
+const struct attribute_group *coresight_tmc_groups[] = {
+ &coresight_tmc_group,
+ &coresight_tmc_mgmt_group,
NULL,
};
-ATTRIBUTE_GROUPS(coresight_etf);
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
@@ -688,6 +332,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
+ drvdata->memwidth = tmc_get_memwidth(devid);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
if (np)
@@ -702,20 +347,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
pm_runtime_put(&adev->dev);
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
- drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
- if (!drvdata->vaddr)
- return -ENOMEM;
-
- memset(drvdata->vaddr, 0, drvdata->size);
- drvdata->buf = drvdata->vaddr;
- } else {
- drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
- if (!drvdata->buf)
- return -ENOMEM;
- }
-
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc) {
ret = -ENOMEM;
@@ -725,20 +356,18 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
desc->pdata = pdata;
desc->dev = dev;
desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc->groups = coresight_tmc_groups;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
desc->type = CORESIGHT_DEV_TYPE_SINK;
desc->ops = &tmc_etb_cs_ops;
- desc->groups = coresight_etb_groups;
} else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
desc->type = CORESIGHT_DEV_TYPE_SINK;
desc->ops = &tmc_etr_cs_ops;
- desc->groups = coresight_etr_groups;
} else {
desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
desc->ops = &tmc_etf_cs_ops;
- desc->groups = coresight_etf_groups;
}
drvdata->csdev = coresight_register(desc);
@@ -754,7 +383,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
if (ret)
goto err_misc_register;
- dev_info(dev, "TMC initialized\n");
return 0;
err_misc_register:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
new file mode 100644
index 000000000..5c5fe2ad2
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright(C) 2015 Linaro Limited. All rights reserved.
+ * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CORESIGHT_TMC_H
+#define _CORESIGHT_TMC_H
+
+#include <linux/miscdevice.h>
+
+#define TMC_RSZ 0x004
+#define TMC_STS 0x00c
+#define TMC_RRD 0x010
+#define TMC_RRP 0x014
+#define TMC_RWP 0x018
+#define TMC_TRG 0x01c
+#define TMC_CTL 0x020
+#define TMC_RWD 0x024
+#define TMC_MODE 0x028
+#define TMC_LBUFLEVEL 0x02c
+#define TMC_CBUFLEVEL 0x030
+#define TMC_BUFWM 0x034
+#define TMC_RRPHI 0x038
+#define TMC_RWPHI 0x03c
+#define TMC_AXICTL 0x110
+#define TMC_DBALO 0x118
+#define TMC_DBAHI 0x11c
+#define TMC_FFSR 0x300
+#define TMC_FFCR 0x304
+#define TMC_PSCR 0x308
+#define TMC_ITMISCOP0 0xee0
+#define TMC_ITTRFLIN 0xee8
+#define TMC_ITATBDATA0 0xeec
+#define TMC_ITATBCTR2 0xef0
+#define TMC_ITATBCTR1 0xef4
+#define TMC_ITATBCTR0 0xef8
+
+/* register description */
+/* TMC_CTL - 0x020 */
+#define TMC_CTL_CAPT_EN BIT(0)
+/* TMC_STS - 0x00C */
+#define TMC_STS_TMCREADY_BIT 2
+#define TMC_STS_FULL BIT(0)
+#define TMC_STS_TRIGGERED BIT(1)
+/* TMC_AXICTL - 0x110 */
+#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
+#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
+#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
+#define TMC_AXICTL_WR_BURST_16 0xF00
+/* TMC_FFCR - 0x304 */
+#define TMC_FFCR_FLUSHMAN_BIT 6
+#define TMC_FFCR_EN_FMT BIT(0)
+#define TMC_FFCR_EN_TI BIT(1)
+#define TMC_FFCR_FON_FLIN BIT(4)
+#define TMC_FFCR_FON_TRIG_EVT BIT(5)
+#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
+#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
+
+
+enum tmc_config_type {
+ TMC_CONFIG_TYPE_ETB,
+ TMC_CONFIG_TYPE_ETR,
+ TMC_CONFIG_TYPE_ETF,
+};
+
+enum tmc_mode {
+ TMC_MODE_CIRCULAR_BUFFER,
+ TMC_MODE_SOFTWARE_FIFO,
+ TMC_MODE_HARDWARE_FIFO,
+};
+
+enum tmc_mem_intf_width {
+ TMC_MEM_INTF_WIDTH_32BITS = 1,
+ TMC_MEM_INTF_WIDTH_64BITS = 2,
+ TMC_MEM_INTF_WIDTH_128BITS = 4,
+ TMC_MEM_INTF_WIDTH_256BITS = 8,
+};
+
+/**
+ * struct tmc_drvdata - specifics associated to an TMC component
+ * @base: memory mapped base address for this component.
+ * @dev: the device entity associated to this component.
+ * @csdev: component vitals needed by the framework.
+ * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
+ * @spinlock: only one at a time pls.
+ * @buf: area of memory where trace data get sent.
+ * @paddr: DMA start location in RAM.
+ * @vaddr: virtual representation of @paddr.
+ * @size: @buf size.
+ * @mode: how this TMC is being used.
+ * @config_type: TMC variant, must be of type @tmc_config_type.
+ * @memwidth: width of the memory interface databus, in bytes.
+ * @trigger_cntr: amount of words to store after a trigger.
+ */
+struct tmc_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct miscdevice miscdev;
+ spinlock_t spinlock;
+ bool reading;
+ char *buf;
+ dma_addr_t paddr;
+ void __iomem *vaddr;
+ u32 size;
+ local_t mode;
+ enum tmc_config_type config_type;
+ enum tmc_mem_intf_width memwidth;
+ u32 trigger_cntr;
+};
+
+/* Generic functions */
+void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
+void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
+void tmc_enable_hw(struct tmc_drvdata *drvdata);
+void tmc_disable_hw(struct tmc_drvdata *drvdata);
+
+/* ETB/ETF functions */
+int tmc_read_prepare_etb(struct tmc_drvdata *drvdata);
+int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata);
+extern const struct coresight_ops tmc_etb_cs_ops;
+extern const struct coresight_ops tmc_etf_cs_ops;
+
+/* ETR functions */
+int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
+int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
+extern const struct coresight_ops tmc_etr_cs_ops;
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 8fb09d923..4e471e2e9 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -167,7 +167,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
- dev_info(dev, "TPIU initialized\n");
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 2ea596109..d08d1ab9b 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -43,7 +43,15 @@ struct coresight_node {
* When operating Coresight drivers from the sysFS interface, only a single
* path can exist from a tracer (associated to a CPU) to a sink.
*/
-static DEFINE_PER_CPU(struct list_head *, sysfs_path);
+static DEFINE_PER_CPU(struct list_head *, tracer_path);
+
+/*
+ * As of this writing only a single STM can be found in CS topologies. Since
+ * there is no way to know if we'll ever see more and what kind of
+ * configuration they will enact, for the time being only define a single path
+ * for STM.
+ */
+static struct list_head *stm_path;
static int coresight_id_match(struct device *dev, void *data)
{
@@ -257,15 +265,27 @@ static void coresight_disable_source(struct coresight_device *csdev)
void coresight_disable_path(struct list_head *path)
{
+ u32 type;
struct coresight_node *nd;
struct coresight_device *csdev, *parent, *child;
list_for_each_entry(nd, path, link) {
csdev = nd->csdev;
+ type = csdev->type;
+
+ /*
+ * ETF devices are tricky... They can be a link or a sink,
+ * depending on how they are configured. If an ETF has been
+ * "activated" it will be configured as a sink, otherwise
+ * go ahead with the link configuration.
+ */
+ if (type == CORESIGHT_DEV_TYPE_LINKSINK)
+ type = (csdev == coresight_get_sink(path)) ?
+ CORESIGHT_DEV_TYPE_SINK :
+ CORESIGHT_DEV_TYPE_LINK;
- switch (csdev->type) {
+ switch (type) {
case CORESIGHT_DEV_TYPE_SINK:
- case CORESIGHT_DEV_TYPE_LINKSINK:
coresight_disable_sink(csdev);
break;
case CORESIGHT_DEV_TYPE_SOURCE:
@@ -286,15 +306,27 @@ int coresight_enable_path(struct list_head *path, u32 mode)
{
int ret = 0;
+ u32 type;
struct coresight_node *nd;
struct coresight_device *csdev, *parent, *child;
list_for_each_entry_reverse(nd, path, link) {
csdev = nd->csdev;
+ type = csdev->type;
- switch (csdev->type) {
+ /*
+ * ETF devices are tricky... They can be a link or a sink,
+ * depending on how they are configured. If an ETF has been
+ * "activated" it will be configured as a sink, otherwise
+ * go ahead with the link configuration.
+ */
+ if (type == CORESIGHT_DEV_TYPE_LINKSINK)
+ type = (csdev == coresight_get_sink(path)) ?
+ CORESIGHT_DEV_TYPE_SINK :
+ CORESIGHT_DEV_TYPE_LINK;
+
+ switch (type) {
case CORESIGHT_DEV_TYPE_SINK:
- case CORESIGHT_DEV_TYPE_LINKSINK:
ret = coresight_enable_sink(csdev, mode);
if (ret)
goto err;
@@ -353,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev,
int i;
bool found = false;
struct coresight_node *node;
- struct coresight_connection *conn;
/* An activated sink has been found. Enqueue the element */
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -362,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev,
/* Not a sink - recursively explore each port found on this element */
for (i = 0; i < csdev->nr_outport; i++) {
- conn = &csdev->conns[i];
- if (_coresight_build_path(conn->child_dev, path) == 0) {
+ struct coresight_device *child_dev = csdev->conns[i].child_dev;
+
+ if (child_dev && _coresight_build_path(child_dev, path) == 0) {
found = true;
break;
}
@@ -393,6 +425,7 @@ out:
struct list_head *coresight_build_path(struct coresight_device *csdev)
{
struct list_head *path;
+ int rc;
path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!path)
@@ -400,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev)
INIT_LIST_HEAD(path);
- if (_coresight_build_path(csdev, path)) {
+ rc = _coresight_build_path(csdev, path);
+ if (rc) {
kfree(path);
- path = NULL;
+ return ERR_PTR(rc);
}
return path;
@@ -432,24 +466,52 @@ void coresight_release_path(struct list_head *path)
path = NULL;
}
+/** coresight_validate_source - make sure a source has the right credentials
+ * @csdev: the device structure for a source.
+ * @function: the function this was called from.
+ *
+ * Assumes the coresight_mutex is held.
+ */
+static int coresight_validate_source(struct coresight_device *csdev,
+ const char *function)
+{
+ u32 type, subtype;
+
+ type = csdev->type;
+ subtype = csdev->subtype.source_subtype;
+
+ if (type != CORESIGHT_DEV_TYPE_SOURCE) {
+ dev_err(&csdev->dev, "wrong device type in %s\n", function);
+ return -EINVAL;
+ }
+
+ if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE) {
+ dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int coresight_enable(struct coresight_device *csdev)
{
- int ret = 0;
- int cpu;
+ int cpu, ret = 0;
struct list_head *path;
mutex_lock(&coresight_mutex);
- if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
- ret = -EINVAL;
- dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
+
+ ret = coresight_validate_source(csdev, __func__);
+ if (ret)
goto out;
- }
+
if (csdev->enable)
goto out;
path = coresight_build_path(csdev);
- if (!path) {
+ if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
+ ret = PTR_ERR(path);
goto out;
}
@@ -461,15 +523,25 @@ int coresight_enable(struct coresight_device *csdev)
if (ret)
goto err_source;
- /*
- * When working from sysFS it is important to keep track
- * of the paths that were created so that they can be
- * undone in 'coresight_disable()'. Since there can only
- * be a single session per tracer (when working from sysFS)
- * a per-cpu variable will do just fine.
- */
- cpu = source_ops(csdev)->cpu_id(csdev);
- per_cpu(sysfs_path, cpu) = path;
+ switch (csdev->subtype.source_subtype) {
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
+ /*
+ * When working from sysFS it is important to keep track
+ * of the paths that were created so that they can be
+ * undone in 'coresight_disable()'. Since there can only
+ * be a single session per tracer (when working from sysFS)
+ * a per-cpu variable will do just fine.
+ */
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ per_cpu(tracer_path, cpu) = path;
+ break;
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+ stm_path = path;
+ break;
+ default:
+ /* We can't be here */
+ break;
+ }
out:
mutex_unlock(&coresight_mutex);
@@ -486,23 +558,36 @@ EXPORT_SYMBOL_GPL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
{
- int cpu;
- struct list_head *path;
+ int cpu, ret;
+ struct list_head *path = NULL;
mutex_lock(&coresight_mutex);
- if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
- dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
+
+ ret = coresight_validate_source(csdev, __func__);
+ if (ret)
goto out;
- }
+
if (!csdev->enable)
goto out;
- cpu = source_ops(csdev)->cpu_id(csdev);
- path = per_cpu(sysfs_path, cpu);
+ switch (csdev->subtype.source_subtype) {
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ path = per_cpu(tracer_path, cpu);
+ per_cpu(tracer_path, cpu) = NULL;
+ break;
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+ path = stm_path;
+ stm_path = NULL;
+ break;
+ default:
+ /* We can't be here */
+ break;
+ }
+
coresight_disable_source(csdev);
coresight_disable_path(path);
coresight_release_path(path);
- per_cpu(sysfs_path, cpu) = NULL;
out:
mutex_unlock(&coresight_mutex);
@@ -514,7 +599,7 @@ static ssize_t enable_sink_show(struct device *dev,
{
struct coresight_device *csdev = to_coresight_device(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->activated);
}
static ssize_t enable_sink_store(struct device *dev,
@@ -544,7 +629,7 @@ static ssize_t enable_source_show(struct device *dev,
{
struct coresight_device *csdev = to_coresight_device(dev);
- return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->enable);
}
static ssize_t enable_source_store(struct device *dev,
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 4272f2ce5..1be543e8e 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -71,6 +71,15 @@ static int intel_th_probe(struct device *dev)
if (ret)
return ret;
+ if (thdrv->attr_group) {
+ ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group);
+ if (ret) {
+ thdrv->remove(thdev);
+
+ return ret;
+ }
+ }
+
if (thdev->type == INTEL_TH_OUTPUT &&
!intel_th_output_assigned(thdev))
ret = hubdrv->assign(hub, thdev);
@@ -91,6 +100,9 @@ static int intel_th_remove(struct device *dev)
return err;
}
+ if (thdrv->attr_group)
+ sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group);
+
thdrv->remove(thdev);
if (intel_th_output_assigned(thdev)) {
@@ -171,7 +183,14 @@ static DEVICE_ATTR_RO(port);
static int intel_th_output_activate(struct intel_th_device *thdev)
{
- struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver);
+ struct intel_th_driver *thdrv =
+ to_intel_th_driver_or_null(thdev->dev.driver);
+
+ if (!thdrv)
+ return -ENODEV;
+
+ if (!try_module_get(thdrv->driver.owner))
+ return -ENODEV;
if (thdrv->activate)
return thdrv->activate(thdev);
@@ -183,12 +202,18 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
static void intel_th_output_deactivate(struct intel_th_device *thdev)
{
- struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver);
+ struct intel_th_driver *thdrv =
+ to_intel_th_driver_or_null(thdev->dev.driver);
+
+ if (!thdrv)
+ return;
if (thdrv->deactivate)
thdrv->deactivate(thdev);
else
intel_th_trace_disable(thdev);
+
+ module_put(thdrv->driver.owner);
}
static ssize_t active_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index eedd09332..0df22e306 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -115,6 +115,7 @@ intel_th_output_assigned(struct intel_th_device *thdev)
* @enable: enable tracing for a given output device
* @disable: disable tracing for a given output device
* @fops: file operations for device nodes
+ * @attr_group: attributes provided by the driver
*
* Callbacks @probe and @remove are required for all device types.
* Switch device driver needs to fill in @assign, @enable and @disable
@@ -139,6 +140,8 @@ struct intel_th_driver {
void (*deactivate)(struct intel_th_device *thdev);
/* file_operations for those who want a device node */
const struct file_operations *fops;
+ /* optional attributes */
+ struct attribute_group *attr_group;
/* source ops */
int (*set_output)(struct intel_th_device *thdev,
@@ -148,6 +151,9 @@ struct intel_th_driver {
#define to_intel_th_driver(_d) \
container_of((_d), struct intel_th_driver, driver)
+#define to_intel_th_driver_or_null(_d) \
+ ((_d) ? to_intel_th_driver(_d) : NULL)
+
static inline struct intel_th_device *
to_intel_th_hub(struct intel_th_device *thdev)
{
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index d9d6022c5..e8d55a153 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -122,7 +122,6 @@ struct msc {
atomic_t mmap_count;
struct mutex buf_mutex;
- struct mutex iter_mutex;
struct list_head iter_list;
/* config */
@@ -257,23 +256,37 @@ static struct msc_iter *msc_iter_install(struct msc *msc)
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
- return NULL;
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&msc->buf_mutex);
+
+ /*
+ * Reading and tracing are mutually exclusive; if msc is
+ * enabled, open() will fail; otherwise existing readers
+ * will prevent enabling the msc and the rest of fops don't
+ * need to worry about it.
+ */
+ if (msc->enabled) {
+ kfree(iter);
+ iter = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
msc_iter_init(iter);
iter->msc = msc;
- mutex_lock(&msc->iter_mutex);
list_add_tail(&iter->entry, &msc->iter_list);
- mutex_unlock(&msc->iter_mutex);
+unlock:
+ mutex_unlock(&msc->buf_mutex);
return iter;
}
static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
{
- mutex_lock(&msc->iter_mutex);
+ mutex_lock(&msc->buf_mutex);
list_del(&iter->entry);
- mutex_unlock(&msc->iter_mutex);
+ mutex_unlock(&msc->buf_mutex);
kfree(iter);
}
@@ -454,7 +467,6 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
{
struct msc_window *win;
- mutex_lock(&msc->buf_mutex);
list_for_each_entry(win, &msc->win_list, entry) {
unsigned int blk;
size_t hw_sz = sizeof(struct msc_block_desc) -
@@ -466,7 +478,6 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
memset(&bdesc->hw_tag, 0, hw_sz);
}
}
- mutex_unlock(&msc->buf_mutex);
}
/**
@@ -474,12 +485,15 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
* @msc: the MSC device to configure
*
* Program storage mode, wrapping, burst length and trace buffer address
- * into a given MSC. If msc::enabled is set, enable the trace, too.
+ * into a given MSC. Then, enable tracing and set msc::enabled.
+ * The latter is serialized on msc::buf_mutex, so make sure to hold it.
*/
static int msc_configure(struct msc *msc)
{
u32 reg;
+ lockdep_assert_held(&msc->buf_mutex);
+
if (msc->mode > MSC_MODE_MULTI)
return -ENOTSUPP;
@@ -497,21 +511,19 @@ static int msc_configure(struct msc *msc)
reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
+ reg |= MSC_EN;
reg |= msc->mode << __ffs(MSC_MODE);
reg |= msc->burst_len << __ffs(MSC_LEN);
- /*if (msc->mode == MSC_MODE_MULTI)
- reg |= MSC_RD_HDR_OVRD; */
+
if (msc->wrap)
reg |= MSC_WRAPEN;
- if (msc->enabled)
- reg |= MSC_EN;
iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
- if (msc->enabled) {
- msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
- intel_th_trace_enable(msc->thdev);
- }
+ msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
+ intel_th_trace_enable(msc->thdev);
+ msc->enabled = 1;
+
return 0;
}
@@ -521,15 +533,14 @@ static int msc_configure(struct msc *msc)
* @msc: MSC device to disable
*
* If @msc is enabled, disable tracing on the switch and then disable MSC
- * storage.
+ * storage. Caller must hold msc::buf_mutex.
*/
static void msc_disable(struct msc *msc)
{
unsigned long count;
u32 reg;
- if (!msc->enabled)
- return;
+ lockdep_assert_held(&msc->buf_mutex);
intel_th_trace_disable(msc->thdev);
@@ -569,33 +580,35 @@ static void msc_disable(struct msc *msc)
static int intel_th_msc_activate(struct intel_th_device *thdev)
{
struct msc *msc = dev_get_drvdata(&thdev->dev);
- int ret = 0;
+ int ret = -EBUSY;
if (!atomic_inc_unless_negative(&msc->user_count))
return -ENODEV;
- mutex_lock(&msc->iter_mutex);
- if (!list_empty(&msc->iter_list))
- ret = -EBUSY;
- mutex_unlock(&msc->iter_mutex);
+ mutex_lock(&msc->buf_mutex);
- if (ret) {
- atomic_dec(&msc->user_count);
- return ret;
- }
+ /* if there are readers, refuse */
+ if (list_empty(&msc->iter_list))
+ ret = msc_configure(msc);
- msc->enabled = 1;
+ mutex_unlock(&msc->buf_mutex);
+
+ if (ret)
+ atomic_dec(&msc->user_count);
- return msc_configure(msc);
+ return ret;
}
static void intel_th_msc_deactivate(struct intel_th_device *thdev)
{
struct msc *msc = dev_get_drvdata(&thdev->dev);
- msc_disable(msc);
-
- atomic_dec(&msc->user_count);
+ mutex_lock(&msc->buf_mutex);
+ if (msc->enabled) {
+ msc_disable(msc);
+ atomic_dec(&msc->user_count);
+ }
+ mutex_unlock(&msc->buf_mutex);
}
/**
@@ -1035,8 +1048,8 @@ static int intel_th_msc_open(struct inode *inode, struct file *file)
return -EPERM;
iter = msc_iter_install(msc);
- if (!iter)
- return -ENOMEM;
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
file->private_data = iter;
@@ -1101,11 +1114,6 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
if (!atomic_inc_unless_negative(&msc->user_count))
return 0;
- if (msc->enabled) {
- ret = -EBUSY;
- goto put_count;
- }
-
if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
size = msc->single_sz;
else
@@ -1164,7 +1172,7 @@ static void msc_mmap_close(struct vm_area_struct *vma)
if (!atomic_dec_and_mutex_lock(&msc->mmap_count, &msc->buf_mutex))
return;
- /* drop page _counts */
+ /* drop page _refcounts */
for (pg = 0; pg < msc->nr_pages; pg++) {
struct page *page = msc_buffer_get_page(msc, pg);
@@ -1245,6 +1253,7 @@ static const struct file_operations intel_th_msc_fops = {
.read = intel_th_msc_read,
.mmap = intel_th_msc_mmap,
.llseek = no_llseek,
+ .owner = THIS_MODULE,
};
static int intel_th_msc_init(struct msc *msc)
@@ -1254,8 +1263,6 @@ static int intel_th_msc_init(struct msc *msc)
msc->mode = MSC_MODE_MULTI;
mutex_init(&msc->buf_mutex);
INIT_LIST_HEAD(&msc->win_list);
-
- mutex_init(&msc->iter_mutex);
INIT_LIST_HEAD(&msc->iter_list);
msc->burst_len =
@@ -1393,6 +1400,11 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
do {
end = memchr(p, ',', len);
s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
+ if (!s) {
+ ret = -ENOMEM;
+ goto free_win;
+ }
+
ret = kstrtoul(s, 10, &val);
kfree(s);
@@ -1473,10 +1485,6 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
if (err)
return err;
- err = sysfs_create_group(&dev->kobj, &msc_output_group);
- if (err)
- return err;
-
dev_set_drvdata(dev, msc);
return 0;
@@ -1484,7 +1492,18 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
static void intel_th_msc_remove(struct intel_th_device *thdev)
{
- sysfs_remove_group(&thdev->dev.kobj, &msc_output_group);
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
+ int ret;
+
+ intel_th_msc_deactivate(thdev);
+
+ /*
+ * Buffers should not be used at this point except if the
+ * output character device is still open and the parent
+ * device gets detached from its bus, which is a FIXME.
+ */
+ ret = msc_buffer_free_unless_used(msc);
+ WARN_ON_ONCE(ret);
}
static struct intel_th_driver intel_th_msc_driver = {
@@ -1493,6 +1512,7 @@ static struct intel_th_driver intel_th_msc_driver = {
.activate = intel_th_msc_activate,
.deactivate = intel_th_msc_deactivate,
.fops = &intel_th_msc_fops,
+ .attr_group = &msc_output_group,
.driver = {
.name = "msc",
.owner = THIS_MODULE,
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index bca7a2ac0..5e25c7eb3 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -75,6 +75,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Broxton B-step */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 57cbfdcc7..35738b5bf 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -200,7 +200,6 @@ static int intel_th_pti_probe(struct intel_th_device *thdev)
struct resource *res;
struct pti_device *pti;
void __iomem *base;
- int ret;
res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
if (!res)
@@ -219,10 +218,6 @@ static int intel_th_pti_probe(struct intel_th_device *thdev)
read_hw_config(pti);
- ret = sysfs_create_group(&dev->kobj, &pti_output_group);
- if (ret)
- return ret;
-
dev_set_drvdata(dev, pti);
return 0;
@@ -237,6 +232,7 @@ static struct intel_th_driver intel_th_pti_driver = {
.remove = intel_th_pti_remove,
.activate = intel_th_pti_activate,
.deactivate = intel_th_pti_deactivate,
+ .attr_group = &pti_output_group,
.driver = {
.name = "pti",
.owner = THIS_MODULE,
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index de80d45d8..ff31108b0 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -67,9 +67,24 @@ static ssize_t channels_show(struct device *dev,
static DEVICE_ATTR_RO(channels);
+static ssize_t hw_override_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct stm_device *stm = to_stm_device(dev);
+ int ret;
+
+ ret = sprintf(buf, "%u\n", stm->data->hw_override);
+
+ return ret;
+}
+
+static DEVICE_ATTR_RO(hw_override);
+
static struct attribute *stm_attrs[] = {
&dev_attr_masters.attr,
&dev_attr_channels.attr,
+ &dev_attr_hw_override.attr,
NULL,
};
@@ -546,8 +561,6 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (ret)
goto err_free;
- ret = 0;
-
if (stm->data->link)
ret = stm->data->link(stm->data, stmf->output.master,
stmf->output.channel);
@@ -668,18 +681,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
stm->dev.parent = parent;
stm->dev.release = stm_device_release;
- err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
- if (err)
- goto err_device;
-
- err = device_add(&stm->dev);
- if (err)
- goto err_device;
-
mutex_init(&stm->link_mutex);
spin_lock_init(&stm->link_lock);
INIT_LIST_HEAD(&stm->link_list);
+ /* initialize the object before it is accessible via sysfs */
spin_lock_init(&stm->mc_lock);
mutex_init(&stm->policy_mutex);
stm->sw_nmasters = nmasters;
@@ -687,9 +693,19 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
stm->data = stm_data;
stm_data->stm = stm;
+ err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
+ if (err)
+ goto err_device;
+
+ err = device_add(&stm->dev);
+ if (err)
+ goto err_device;
+
return 0;
err_device:
+ unregister_chrdev(stm->major, stm_data->name);
+
/* matches device_initialize() above */
put_device(&stm->dev);
err_free:
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index 310adf57e..a86612d98 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -46,9 +46,7 @@ static struct stm_data dummy_stm[DUMMY_STM_MAX];
static int nr_dummies = 4;
-module_param(nr_dummies, int, 0600);
-
-static unsigned int dummy_stm_nr;
+module_param(nr_dummies, int, 0400);
static unsigned int fail_mode;
@@ -65,12 +63,12 @@ static int dummy_stm_link(struct stm_data *data, unsigned int master,
static int dummy_stm_init(void)
{
- int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies);
+ int i, ret = -ENOMEM;
- if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX)
+ if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX)
return -EINVAL;
- for (i = 0; i < __nr_dummies; i++) {
+ for (i = 0; i < nr_dummies; i++) {
dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
if (!dummy_stm[i].name)
goto fail_unregister;
@@ -86,8 +84,6 @@ static int dummy_stm_init(void)
goto fail_free;
}
- dummy_stm_nr = __nr_dummies;
-
return 0;
fail_unregister:
@@ -105,7 +101,7 @@ static void dummy_stm_exit(void)
{
int i;
- for (i = 0; i < dummy_stm_nr; i++) {
+ for (i = 0; i < nr_dummies; i++) {
stm_unregister_device(&dummy_stm[i]);
kfree(dummy_stm[i].name);
}
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 0133571b5..3da7b673a 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -26,7 +26,7 @@
static int nr_devs = 4;
static int interval_ms = 10;
-module_param(nr_devs, int, 0600);
+module_param(nr_devs, int, 0400);
module_param(interval_ms, int, 0600);
static struct stm_heartbeat {
@@ -35,8 +35,6 @@ static struct stm_heartbeat {
unsigned int active;
} stm_heartbeat[STM_HEARTBEAT_MAX];
-static unsigned int nr_instances;
-
static const char str[] = "heartbeat stm source driver is here to serve you";
static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
@@ -74,12 +72,12 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
static int stm_heartbeat_init(void)
{
- int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs);
+ int i, ret = -ENOMEM;
- if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX)
+ if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
return -EINVAL;
- for (i = 0; i < __nr_instances; i++) {
+ for (i = 0; i < nr_devs; i++) {
stm_heartbeat[i].data.name =
kasprintf(GFP_KERNEL, "heartbeat.%d", i);
if (!stm_heartbeat[i].data.name)
@@ -98,8 +96,6 @@ static int stm_heartbeat_init(void)
goto fail_free;
}
- nr_instances = __nr_instances;
-
return 0;
fail_unregister:
@@ -116,7 +112,7 @@ static void stm_heartbeat_exit(void)
{
int i;
- for (i = 0; i < nr_instances; i++) {
+ for (i = 0; i < nr_devs; i++) {
stm_source_unregister_device(&stm_heartbeat[i].data);
kfree(stm_heartbeat[i].data.name);
}
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 1db189657..6c0ae2996 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -107,8 +107,7 @@ stp_policy_node_masters_store(struct config_item *item, const char *page,
goto unlock;
/* must be within [sw_start..sw_end], which is an inclusive range */
- if (first > INT_MAX || last > INT_MAX || first > last ||
- first < stm->data->sw_start ||
+ if (first > last || first < stm->data->sw_start ||
last > stm->data->sw_end) {
ret = -ERANGE;
goto unlock;
@@ -342,7 +341,7 @@ stp_policies_make(struct config_group *group, const char *name)
return ERR_PTR(-EINVAL);
}
- *p++ = '\0';
+ *p = '\0';
stm = stm_find_device(devname);
kfree(devname);
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 9d233bbde..a8e89df66 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -617,7 +617,7 @@ const struct i2c_algorithm i2c_bit_algo = {
};
EXPORT_SYMBOL(i2c_bit_algo);
-const struct i2c_adapter_quirks i2c_bit_quirk_no_clk_stretch = {
+static const struct i2c_adapter_quirks i2c_bit_quirk_no_clk_stretch = {
.flags = I2C_AQ_NO_CLK_STRETCH,
};
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 0967e1a5b..f167021b8 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -663,7 +663,7 @@ config I2C_MT65XX
config I2C_MV64XXX
tristate "Marvell mv64xxx I2C Controller"
- depends on MV64X60 || PLAT_ORION || ARCH_SUNXI
+ depends on MV64X60 || PLAT_ORION || ARCH_SUNXI || ARCH_MVEBU
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Marvell 64xxx line of host bridges.
@@ -965,7 +965,7 @@ config I2C_XILINX
config I2C_XLR
tristate "Netlogic XLR and Sigma Designs I2C support"
- depends on CPU_XLR || ARCH_TANGOX
+ depends on CPU_XLR || ARCH_TANGO
help
This driver enables support for the on-chip I2C interface of
the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
@@ -985,6 +985,7 @@ config I2C_XLP9XX
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
+ depends on HAS_DMA
depends on ARCH_RENESAS || COMPILE_TEST
select I2C_SLAVE
help
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 921d32bfc..f23372669 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1013,7 +1013,7 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
error:
if (ret != -EPROBE_DEFER)
- dev_info(dev->dev, "can't use DMA, error %d\n", ret);
+ dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
if (dma->chan_rx)
dma_release_channel(dma->chan_rx);
if (dma->chan_tx)
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index b9f0fff4e..19c843828 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -267,7 +267,7 @@ static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
iproc_i2c->msg = msg;
/* format and load slave address into the TX FIFO */
- addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
+ addr = i2c_8bit_addr_from_msg(msg);
writel(addr, iproc_i2c->base + M_TX_OFFSET);
/*
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c
index 2c9d9b1c8..ac9f47679 100644
--- a/drivers/i2c/busses/i2c-bcm-kona.c
+++ b/drivers/i2c/busses/i2c-bcm-kona.c
@@ -501,10 +501,7 @@ static int bcm_kona_i2c_do_addr(struct bcm_kona_i2c_dev *dev,
return -EREMOTEIO;
}
} else {
- addr = msg->addr << 1;
-
- if (msg->flags & I2C_M_RD)
- addr |= 1;
+ addr = i2c_8bit_addr_from_msg(msg);
if (bcm_kona_i2c_write_byte(dev, addr, 0) < 0)
return -EREMOTEIO;
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 4a45408dd..6a8cfc134 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -446,9 +446,7 @@ static int brcmstb_i2c_do_addr(struct brcmstb_i2c_dev *dev,
}
} else {
- addr = msg->addr << 1;
- if (msg->flags & I2C_M_RD)
- addr |= 1;
+ addr = i2c_8bit_addr_from_msg(msg);
bsc_writel(dev, addr, chip_address);
}
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b167ab253..ee57c1e86 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -197,9 +197,7 @@ static void cpm_i2c_parse_message(struct i2c_adapter *adap,
tbdf = cpm->tbase + tx;
rbdf = cpm->rbase + rx;
- addr = pmsg->addr << 1;
- if (pmsg->flags & I2C_M_RD)
- addr |= 1;
+ addr = i2c_8bit_addr_from_msg(pmsg);
tb = cpm->txbuf[tx];
rb = cpm->rxbuf[rx];
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 1600edd57..f2eb4f765 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/mfd/dln2.h>
+#include <linux/acpi.h>
#define DLN2_I2C_MODULE_ID 0x03
#define DLN2_I2C_CMD(cmd) DLN2_CMD(cmd, DLN2_I2C_MODULE_ID)
@@ -210,6 +211,7 @@ static int dln2_i2c_probe(struct platform_device *pdev)
dln2->adapter.algo = &dln2_i2c_usb_algorithm;
dln2->adapter.quirks = &dln2_i2c_quirks;
dln2->adapter.dev.parent = dev;
+ ACPI_COMPANION_SET(&dln2->adapter.dev, ACPI_COMPANION(&pdev->dev));
dln2->adapter.dev.of_node = dev->of_node;
i2c_set_adapdata(&dln2->adapter, dln2);
snprintf(dln2->adapter.name, sizeof(dln2->adapter.name), "%s-%s-%d",
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index f54ece8fc..c0e3ada02 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -861,14 +861,8 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
#endif
static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend_noirq = exynos5_i2c_suspend_noirq,
- .resume_noirq = exynos5_i2c_resume_noirq,
- .freeze_noirq = exynos5_i2c_suspend_noirq,
- .thaw_noirq = exynos5_i2c_resume_noirq,
- .poweroff_noirq = exynos5_i2c_suspend_noirq,
- .restore_noirq = exynos5_i2c_resume_noirq,
-#endif
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos5_i2c_suspend_noirq,
+ exynos5_i2c_resume_noirq)
};
static struct platform_driver exynos5_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 585a3b791..4a60ad214 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -94,6 +94,7 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/platform_data/itco_wdt.h>
+#include <linux/pm_runtime.h>
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
defined CONFIG_DMI
@@ -244,6 +245,13 @@ struct i801_priv {
struct platform_device *mux_pdev;
#endif
struct platform_device *tco_pdev;
+
+ /*
+ * If set to true the host controller registers are reserved for
+ * ACPI AML use. Protected by acpi_lock.
+ */
+ bool acpi_reserved;
+ struct mutex acpi_lock;
};
#define FEATURE_SMBUS_PEC (1 << 0)
@@ -714,9 +722,17 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
{
int hwpec;
int block = 0;
- int ret, xact = 0;
+ int ret = 0, xact = 0;
struct i801_priv *priv = i2c_get_adapdata(adap);
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved) {
+ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
+ }
+
+ pm_runtime_get_sync(&priv->pci_dev->dev);
+
hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
&& size != I2C_SMBUS_QUICK
&& size != I2C_SMBUS_I2C_BLOCK_DATA;
@@ -773,7 +789,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
default:
dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
size);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (hwpec) /* enable/disable hardware PEC */
@@ -796,11 +813,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
if (block)
- return ret;
+ goto out;
if (ret)
- return ret;
+ goto out;
if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
- return 0;
+ goto out;
switch (xact & 0x7f) {
case I801_BYTE: /* Result put in SMBHSTDAT0 */
@@ -812,7 +829,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
(inb_p(SMBHSTDAT1(priv)) << 8);
break;
}
- return 0;
+
+out:
+ pm_runtime_mark_last_busy(&priv->pci_dev->dev);
+ pm_runtime_put_autosuspend(&priv->pci_dev->dev);
+ mutex_unlock(&priv->acpi_lock);
+ return ret;
}
@@ -1249,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv)
priv->tco_pdev = pdev;
}
+#ifdef CONFIG_ACPI
+static acpi_status
+i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ u64 *value, void *handler_context, void *region_context)
+{
+ struct i801_priv *priv = handler_context;
+ struct pci_dev *pdev = priv->pci_dev;
+ acpi_status status;
+
+ /*
+ * Once BIOS AML code touches the OpRegion we warn and inhibit any
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved) {
+ priv->acpi_reserved = true;
+
+ dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
+ dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n");
+
+ /*
+ * BIOS is accessing the host controller so prevent it from
+ * suspending automatically from now on.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+ }
+
+ if ((function & ACPI_IO_MASK) == ACPI_READ)
+ status = acpi_os_read_port(address, (u32 *)value, bits);
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+}
+
+static int i801_acpi_probe(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (adev) {
+ status = acpi_install_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler,
+ NULL, priv);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ }
+
+ return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]);
+}
+
+static void i801_acpi_remove(struct i801_priv *priv)
+{
+ struct acpi_device *adev;
+
+ adev = ACPI_COMPANION(&priv->pci_dev->dev);
+ if (!adev)
+ return;
+
+ acpi_remove_address_space_handler(adev->handle,
+ ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler);
+
+ mutex_lock(&priv->acpi_lock);
+ if (priv->acpi_reserved)
+ pm_runtime_put(&priv->pci_dev->dev);
+ mutex_unlock(&priv->acpi_lock);
+}
+#else
+static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; }
+static inline void i801_acpi_remove(struct i801_priv *priv) { }
+#endif
+
static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
unsigned char temp;
@@ -1266,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
priv->adapter.dev.parent = &dev->dev;
ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
priv->adapter.retries = 3;
+ mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
switch (dev->device) {
@@ -1328,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
}
- err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
- if (err) {
+ if (i801_acpi_probe(priv))
return -ENODEV;
- }
err = pcim_iomap_regions(dev, 1 << SMBBAR,
dev_driver_string(&dev->dev));
@@ -1340,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"Failed to request SMBus region 0x%lx-0x%Lx\n",
priv->smba,
(unsigned long long)pci_resource_end(dev, SMBBAR));
+ i801_acpi_remove(priv);
return err;
}
@@ -1404,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
+ i801_acpi_remove(priv);
return err;
}
@@ -1413,6 +1513,11 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
pci_set_drvdata(dev, priv);
+ pm_runtime_set_autosuspend_delay(&dev->dev, 1000);
+ pm_runtime_use_autosuspend(&dev->dev);
+ pm_runtime_put_autosuspend(&dev->dev);
+ pm_runtime_allow(&dev->dev);
+
return 0;
}
@@ -1420,8 +1525,12 @@ static void i801_remove(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_get_noresume(&dev->dev);
+
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
+ i801_acpi_remove(priv);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
platform_device_unregister(priv->tco_pdev);
@@ -1433,34 +1542,32 @@ static void i801_remove(struct pci_dev *dev)
}
#ifdef CONFIG_PM
-static int i801_suspend(struct pci_dev *dev, pm_message_t mesg)
+static int i801_suspend(struct device *dev)
{
- struct i801_priv *priv = pci_get_drvdata(dev);
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct i801_priv *priv = pci_get_drvdata(pci_dev);
- pci_save_state(dev);
- pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
- pci_set_power_state(dev, pci_choose_state(dev, mesg));
+ pci_write_config_byte(pci_dev, SMBHSTCFG, priv->original_hstcfg);
return 0;
}
-static int i801_resume(struct pci_dev *dev)
+static int i801_resume(struct device *dev)
{
- pci_set_power_state(dev, PCI_D0);
- pci_restore_state(dev);
return 0;
}
-#else
-#define i801_suspend NULL
-#define i801_resume NULL
#endif
+static UNIVERSAL_DEV_PM_OPS(i801_pm_ops, i801_suspend,
+ i801_resume, NULL);
+
static struct pci_driver i801_driver = {
.name = "i801_smbus",
.id_table = i801_ids,
.probe = i801_probe,
.remove = i801_remove,
- .suspend = i801_suspend,
- .resume = i801_resume,
+ .driver = {
+ .pm = &i801_pm_ops,
+ },
};
static int __init i2c_i801_init(void)
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index b6c080334..cdaa7be2c 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -269,7 +269,7 @@ static int iic_smbus_quick(struct ibm_iic_private* dev, const struct i2c_msg* p)
ndelay(t->hd_sta);
/* Send address */
- v = (u8)((p->addr << 1) | ((p->flags & I2C_M_RD) ? 1 : 0));
+ v = i2c_8bit_addr_from_msg(p);
for (i = 0, mask = 0x80; i < 8; ++i, mask >>= 1){
out_8(&iic->directcntl, sda);
ndelay(t->low / 2);
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 379ef9c31..ea20425b6 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -751,9 +751,7 @@ static unsigned int img_i2c_atomic(struct img_i2c *i2c,
switch (i2c->at_cur_cmd) {
case CMD_GEN_START:
next_cmd = CMD_GEN_DATA;
- next_data = (i2c->msg.addr << 1);
- if (i2c->msg.flags & I2C_M_RD)
- next_data |= 0x1;
+ next_data = i2c_8bit_addr_from_msg(&i2c->msg);
break;
case CMD_GEN_DATA:
if (i2c->line_status & LINESTAT_INPUT_HELD_V)
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1ca7ef231..1844bc9f7 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -525,7 +525,7 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode, i2c_imx, IMX_I2C_I2CR);
/* Wait controller to be stable */
- udelay(50);
+ usleep_range(50, 150);
/* Start I2C transaction */
temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index 72d6161cf..85cbe4b55 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -50,10 +50,7 @@ iic_cook_addr(struct i2c_msg *msg)
{
unsigned char addr;
- addr = (msg->addr << 1);
-
- if (msg->flags & I2C_M_RD)
- addr |= 1;
+ addr = i2c_8bit_addr_from_msg(msg);
return addr;
}
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 8560a13bf..586a15205 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -133,9 +133,7 @@ static void i2c_lpc2k_pump_msg(struct lpc2k_i2c *i2c)
case M_START:
case M_REPSTART:
/* Start bit was just sent out, send out addr and dir */
- data = i2c->msg->addr << 1;
- if (i2c->msg->flags & I2C_M_RD)
- data |= 1;
+ data = i2c_8bit_addr_from_msg(i2c->msg);
writel(data, i2c->base + LPC24XX_I2DAT);
writel(LPC24XX_STA, i2c->base + LPC24XX_I2CONCLR);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 453358b4d..d9373e60b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -413,10 +413,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
else
writew(I2C_FS_START_CON, i2c->base + OFFSET_EXT_CONF);
- addr_reg = msgs->addr << 1;
- if (i2c->op == I2C_MASTER_RD)
- addr_reg |= 0x1;
-
+ addr_reg = i2c_8bit_addr_from_msg(msgs);
writew(addr_reg, i2c->base + OFFSET_SLAVE_ADDR);
/* Clear interrupt status */
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 43207f52e..b4dec0841 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -134,9 +134,7 @@ struct mv64xxx_i2c_data {
int rc;
u32 freq_m;
u32 freq_n;
-#if defined(CONFIG_HAVE_CLK)
struct clk *clk;
-#endif
wait_queue_head_t waitq;
spinlock_t lock;
struct i2c_msg *msg;
@@ -757,7 +755,6 @@ static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
#ifdef CONFIG_OF
-#ifdef CONFIG_HAVE_CLK
static int
mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data,
const int tclk, const int n, const int m)
@@ -791,25 +788,20 @@ mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data,
return false;
return true;
}
-#endif /* CONFIG_HAVE_CLK */
static int
mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
struct device *dev)
{
- /* CLK is mandatory when using DT to describe the i2c bus. We
- * need to know tclk in order to calculate bus clock
- * factors.
- */
-#if !defined(CONFIG_HAVE_CLK)
- /* Have OF but no CLK */
- return -ENODEV;
-#else
const struct of_device_id *device;
struct device_node *np = dev->of_node;
u32 bus_freq, tclk;
int rc = 0;
+ /* CLK is mandatory when using DT to describe the i2c bus. We
+ * need to know tclk in order to calculate bus clock
+ * factors.
+ */
if (IS_ERR(drv_data->clk)) {
rc = -ENODEV;
goto out;
@@ -869,7 +861,6 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
out:
return rc;
-#endif
}
#else /* CONFIG_OF */
static int
@@ -907,14 +898,13 @@ mv64xxx_i2c_probe(struct platform_device *pd)
init_waitqueue_head(&drv_data->waitq);
spin_lock_init(&drv_data->lock);
-#if defined(CONFIG_HAVE_CLK)
/* Not all platforms have a clk */
drv_data->clk = devm_clk_get(&pd->dev, NULL);
- if (!IS_ERR(drv_data->clk)) {
- clk_prepare(drv_data->clk);
- clk_enable(drv_data->clk);
- }
-#endif
+ if (IS_ERR(drv_data->clk) && PTR_ERR(drv_data->clk) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (!IS_ERR(drv_data->clk))
+ clk_prepare_enable(drv_data->clk);
+
if (pdata) {
drv_data->freq_m = pdata->freq_m;
drv_data->freq_n = pdata->freq_n;
@@ -964,13 +954,10 @@ exit_reset:
if (!IS_ERR_OR_NULL(drv_data->rstc))
reset_control_assert(drv_data->rstc);
exit_clk:
-#if defined(CONFIG_HAVE_CLK)
/* Not all platforms have a clk */
- if (!IS_ERR(drv_data->clk)) {
- clk_disable(drv_data->clk);
- clk_unprepare(drv_data->clk);
- }
-#endif
+ if (!IS_ERR(drv_data->clk))
+ clk_disable_unprepare(drv_data->clk);
+
return rc;
}
@@ -983,13 +970,9 @@ mv64xxx_i2c_remove(struct platform_device *dev)
free_irq(drv_data->irq, drv_data);
if (!IS_ERR_OR_NULL(drv_data->rstc))
reset_control_assert(drv_data->rstc);
-#if defined(CONFIG_HAVE_CLK)
/* Not all platforms have a clk */
- if (!IS_ERR(drv_data->clk)) {
- clk_disable(drv_data->clk);
- clk_unprepare(drv_data->clk);
- }
-#endif
+ if (!IS_ERR(drv_data->clk))
+ clk_disable_unprepare(drv_data->clk);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 70b3c9158..42fcc9458 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -127,7 +127,7 @@ static struct pci_driver nforce2_driver;
/* For multiplexing support, we need a global reference to the 1st
SMBus channel */
-#if defined CONFIG_I2C_NFORCE2_S4985 || defined CONFIG_I2C_NFORCE2_S4985_MODULE
+#if IS_ENABLED(CONFIG_I2C_NFORCE2_S4985)
struct i2c_adapter *nforce2_smbus;
EXPORT_SYMBOL_GPL(nforce2_smbus);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 11b7b8731..dfa7a4b4a 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -178,10 +178,7 @@ static void ocores_process(struct ocores_i2c *i2c)
if (i2c->nmsgs) { /* end? */
/* send start? */
if (!(msg->flags & I2C_M_NOSTART)) {
- u8 addr = (msg->addr << 1);
-
- if (msg->flags & I2C_M_RD)
- addr |= 1;
+ u8 addr = i2c_8bit_addr_from_msg(msg);
i2c->state = STATE_START;
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 46fb6c429..30ae35146 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -11,6 +11,7 @@
* warranty of any kind, whether express or implied.
*/
+#include <linux/atomic.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -29,13 +30,23 @@
/* Register offsets */
#define SW_TWSI 0x00
#define TWSI_INT 0x10
+#define SW_TWSI_EXT 0x18
/* Controller command patterns */
#define SW_TWSI_V BIT_ULL(63) /* Valid bit */
+#define SW_TWSI_EIA BIT_ULL(61) /* Extended internal address */
#define SW_TWSI_R BIT_ULL(56) /* Result or read bit */
+#define SW_TWSI_SOVR BIT_ULL(55) /* Size override */
+#define SW_TWSI_SIZE_SHIFT 52
+#define SW_TWSI_ADDR_SHIFT 40
+#define SW_TWSI_IA_SHIFT 32 /* Internal address */
/* Controller opcode word (bits 60:57) */
#define SW_TWSI_OP_SHIFT 57
+#define SW_TWSI_OP_7 (0ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_7_IA (1ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_10 (2ULL << SW_TWSI_OP_SHIFT)
+#define SW_TWSI_OP_10_IA (3ULL << SW_TWSI_OP_SHIFT)
#define SW_TWSI_OP_TWSI_CLK (4ULL << SW_TWSI_OP_SHIFT)
#define SW_TWSI_OP_EOP (6ULL << SW_TWSI_OP_SHIFT) /* Extended opcode */
@@ -48,46 +59,93 @@
#define SW_TWSI_EOP_TWSI_RST (SW_TWSI_OP_EOP | 7ULL << SW_TWSI_EOP_SHIFT)
/* Controller command and status bits */
-#define TWSI_CTL_CE 0x80
+#define TWSI_CTL_CE 0x80 /* High level controller enable */
#define TWSI_CTL_ENAB 0x40 /* Bus enable */
#define TWSI_CTL_STA 0x20 /* Master-mode start, HW clears when done */
#define TWSI_CTL_STP 0x10 /* Master-mode stop, HW clears when done */
#define TWSI_CTL_IFLG 0x08 /* HW event, SW writes 0 to ACK */
#define TWSI_CTL_AAK 0x04 /* Assert ACK */
-/* Some status values */
+/* Status values */
+#define STAT_ERROR 0x00
#define STAT_START 0x08
-#define STAT_RSTART 0x10
+#define STAT_REP_START 0x10
#define STAT_TXADDR_ACK 0x18
+#define STAT_TXADDR_NAK 0x20
#define STAT_TXDATA_ACK 0x28
+#define STAT_TXDATA_NAK 0x30
+#define STAT_LOST_ARB_38 0x38
#define STAT_RXADDR_ACK 0x40
+#define STAT_RXADDR_NAK 0x48
#define STAT_RXDATA_ACK 0x50
+#define STAT_RXDATA_NAK 0x58
+#define STAT_SLAVE_60 0x60
+#define STAT_LOST_ARB_68 0x68
+#define STAT_SLAVE_70 0x70
+#define STAT_LOST_ARB_78 0x78
+#define STAT_SLAVE_80 0x80
+#define STAT_SLAVE_88 0x88
+#define STAT_GENDATA_ACK 0x90
+#define STAT_GENDATA_NAK 0x98
+#define STAT_SLAVE_A0 0xA0
+#define STAT_SLAVE_A8 0xA8
+#define STAT_LOST_ARB_B0 0xB0
+#define STAT_SLAVE_LOST 0xB8
+#define STAT_SLAVE_NAK 0xC0
+#define STAT_SLAVE_ACK 0xC8
+#define STAT_AD2W_ACK 0xD0
+#define STAT_AD2W_NAK 0xD8
#define STAT_IDLE 0xF8
/* TWSI_INT values */
+#define TWSI_INT_ST_INT BIT_ULL(0)
+#define TWSI_INT_TS_INT BIT_ULL(1)
+#define TWSI_INT_CORE_INT BIT_ULL(2)
+#define TWSI_INT_ST_EN BIT_ULL(4)
+#define TWSI_INT_TS_EN BIT_ULL(5)
#define TWSI_INT_CORE_EN BIT_ULL(6)
#define TWSI_INT_SDA_OVR BIT_ULL(8)
#define TWSI_INT_SCL_OVR BIT_ULL(9)
+#define TWSI_INT_SDA BIT_ULL(10)
+#define TWSI_INT_SCL BIT_ULL(11)
+
+#define I2C_OCTEON_EVENT_WAIT 80 /* microseconds */
struct octeon_i2c {
wait_queue_head_t queue;
struct i2c_adapter adap;
int irq;
+ int hlc_irq; /* For cn7890 only */
u32 twsi_freq;
int sys_freq;
void __iomem *twsi_base;
struct device *dev;
+ bool hlc_enabled;
+ bool broken_irq_mode;
+ bool broken_irq_check;
+ void (*int_enable)(struct octeon_i2c *);
+ void (*int_disable)(struct octeon_i2c *);
+ void (*hlc_int_enable)(struct octeon_i2c *);
+ void (*hlc_int_disable)(struct octeon_i2c *);
+ atomic_t int_enable_cnt;
+ atomic_t hlc_int_enable_cnt;
};
+static void octeon_i2c_writeq_flush(u64 val, void __iomem *addr)
+{
+ __raw_writeq(val, addr);
+ __raw_readq(addr); /* wait for write to land */
+}
+
/**
- * octeon_i2c_write_sw - write an I2C core register
+ * octeon_i2c_reg_write - write an I2C core register
* @i2c: The struct octeon_i2c
* @eop_reg: Register selector
* @data: Value to be written
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static void octeon_i2c_write_sw(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
+static void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
{
u64 tmp;
@@ -97,8 +155,13 @@ static void octeon_i2c_write_sw(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
} while ((tmp & SW_TWSI_V) != 0);
}
+#define octeon_i2c_ctl_write(i2c, val) \
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CTL, val)
+#define octeon_i2c_data_write(i2c, val) \
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_DATA, val)
+
/**
- * octeon_i2c_read_sw - read lower bits of an I2C core register
+ * octeon_i2c_reg_read - read lower bits of an I2C core register
* @i2c: The struct octeon_i2c
* @eop_reg: Register selector
*
@@ -106,7 +169,7 @@ static void octeon_i2c_write_sw(struct octeon_i2c *i2c, u64 eop_reg, u8 data)
*
* The I2C core registers are accessed indirectly via the SW_TWSI CSR.
*/
-static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
+static u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg)
{
u64 tmp;
@@ -118,6 +181,24 @@ static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
return tmp & 0xFF;
}
+#define octeon_i2c_ctl_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL)
+#define octeon_i2c_data_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA)
+#define octeon_i2c_stat_read(i2c) \
+ octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT)
+
+/**
+ * octeon_i2c_read_int - read the TWSI_INT register
+ * @i2c: The struct octeon_i2c
+ *
+ * Returns the value of the register.
+ */
+static u64 octeon_i2c_read_int(struct octeon_i2c *i2c)
+{
+ return __raw_readq(i2c->twsi_base + TWSI_INT);
+}
+
/**
* octeon_i2c_write_int - write the TWSI_INT register
* @i2c: The struct octeon_i2c
@@ -125,8 +206,7 @@ static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg)
*/
static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data)
{
- __raw_writeq(data, i2c->twsi_base + TWSI_INT);
- __raw_readq(i2c->twsi_base + TWSI_INT);
+ octeon_i2c_writeq_flush(data, i2c->twsi_base + TWSI_INT);
}
/**
@@ -149,30 +229,96 @@ static void octeon_i2c_int_disable(struct octeon_i2c *i2c)
}
/**
- * octeon_i2c_unblock - unblock the bus
+ * octeon_i2c_int_enable78 - enable the CORE interrupt
+ * @i2c: The struct octeon_i2c
+ *
+ * The interrupt will be asserted when there is non-STAT_IDLE state in the
+ * SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_int_enable78(struct octeon_i2c *i2c)
+{
+ atomic_inc_return(&i2c->int_enable_cnt);
+ enable_irq(i2c->irq);
+}
+
+static void __octeon_i2c_irq_disable(atomic_t *cnt, int irq)
+{
+ int count;
+
+ /*
+ * The interrupt can be disabled in two places, but we only
+ * want to make the disable_irq_nosync() call once, so keep
+ * track with the atomic variable.
+ */
+ count = atomic_dec_if_positive(cnt);
+ if (count >= 0)
+ disable_irq_nosync(irq);
+}
+
+/* disable the CORE interrupt */
+static void octeon_i2c_int_disable78(struct octeon_i2c *i2c)
+{
+ __octeon_i2c_irq_disable(&i2c->int_enable_cnt, i2c->irq);
+}
+
+/**
+ * octeon_i2c_hlc_int_enable78 - enable the ST interrupt
* @i2c: The struct octeon_i2c
*
- * If there was a reset while a device was driving 0 to bus, bus is blocked.
- * We toggle it free manually by some clock cycles and send a stop.
+ * The interrupt will be asserted when there is non-STAT_IDLE state in
+ * the SW_TWSI_EOP_TWSI_STAT register.
+ */
+static void octeon_i2c_hlc_int_enable78(struct octeon_i2c *i2c)
+{
+ atomic_inc_return(&i2c->hlc_int_enable_cnt);
+ enable_irq(i2c->hlc_irq);
+}
+
+/* disable the ST interrupt */
+static void octeon_i2c_hlc_int_disable78(struct octeon_i2c *i2c)
+{
+ __octeon_i2c_irq_disable(&i2c->hlc_int_enable_cnt, i2c->hlc_irq);
+}
+
+/*
+ * Cleanup low-level state & enable high-level controller.
*/
-static void octeon_i2c_unblock(struct octeon_i2c *i2c)
+static void octeon_i2c_hlc_enable(struct octeon_i2c *i2c)
{
- int i;
+ int try = 0;
+ u64 val;
+
+ if (i2c->hlc_enabled)
+ return;
+ i2c->hlc_enabled = true;
+
+ while (1) {
+ val = octeon_i2c_ctl_read(i2c);
+ if (!(val & (TWSI_CTL_STA | TWSI_CTL_STP)))
+ break;
- dev_dbg(i2c->dev, "%s\n", __func__);
+ /* clear IFLG event */
+ if (val & TWSI_CTL_IFLG)
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
+
+ if (try++ > 100) {
+ pr_err("%s: giving up\n", __func__);
+ break;
+ }
- for (i = 0; i < 9; i++) {
- octeon_i2c_write_int(i2c, 0);
- udelay(5);
- octeon_i2c_write_int(i2c, TWSI_INT_SCL_OVR);
- udelay(5);
+ /* spin until any start/stop has finished */
+ udelay(10);
}
- /* hand-crank a STOP */
- octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR | TWSI_INT_SCL_OVR);
- udelay(5);
- octeon_i2c_write_int(i2c, TWSI_INT_SDA_OVR);
- udelay(5);
- octeon_i2c_write_int(i2c, 0);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_CE | TWSI_CTL_AAK | TWSI_CTL_ENAB);
+}
+
+static void octeon_i2c_hlc_disable(struct octeon_i2c *i2c)
+{
+ if (!i2c->hlc_enabled)
+ return;
+
+ i2c->hlc_enabled = false;
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
}
/* interrupt service routine */
@@ -180,16 +326,44 @@ static irqreturn_t octeon_i2c_isr(int irq, void *dev_id)
{
struct octeon_i2c *i2c = dev_id;
- octeon_i2c_int_disable(i2c);
+ i2c->int_disable(i2c);
+ wake_up(&i2c->queue);
+
+ return IRQ_HANDLED;
+}
+
+/* HLC interrupt service routine */
+static irqreturn_t octeon_i2c_hlc_isr78(int irq, void *dev_id)
+{
+ struct octeon_i2c *i2c = dev_id;
+
+ i2c->hlc_int_disable(i2c);
wake_up(&i2c->queue);
return IRQ_HANDLED;
}
+static bool octeon_i2c_test_iflg(struct octeon_i2c *i2c)
+{
+ return (octeon_i2c_ctl_read(i2c) & TWSI_CTL_IFLG);
+}
-static int octeon_i2c_test_iflg(struct octeon_i2c *i2c)
+static bool octeon_i2c_test_ready(struct octeon_i2c *i2c, bool *first)
{
- return (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_CTL) & TWSI_CTL_IFLG) != 0;
+ if (octeon_i2c_test_iflg(i2c))
+ return true;
+
+ if (*first) {
+ *first = false;
+ return false;
+ }
+
+ /*
+ * IRQ has signaled an event but IFLG hasn't changed.
+ * Sleep and retry once.
+ */
+ usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
+ return octeon_i2c_test_iflg(i2c);
}
/**
@@ -201,64 +375,493 @@ static int octeon_i2c_test_iflg(struct octeon_i2c *i2c)
static int octeon_i2c_wait(struct octeon_i2c *i2c)
{
long time_left;
+ bool first = 1;
- octeon_i2c_int_enable(i2c);
- time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_iflg(i2c),
+ /*
+ * Some chip revisions don't assert the irq in the interrupt
+ * controller. So we must poll for the IFLG change.
+ */
+ if (i2c->broken_irq_mode) {
+ u64 end = get_jiffies_64() + i2c->adap.timeout;
+
+ while (!octeon_i2c_test_iflg(i2c) &&
+ time_before64(get_jiffies_64(), end))
+ usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
+
+ return octeon_i2c_test_iflg(i2c) ? 0 : -ETIMEDOUT;
+ }
+
+ i2c->int_enable(i2c);
+ time_left = wait_event_timeout(i2c->queue, octeon_i2c_test_ready(i2c, &first),
i2c->adap.timeout);
- octeon_i2c_int_disable(i2c);
- if (!time_left) {
- dev_dbg(i2c->dev, "%s: timeout\n", __func__);
- return -ETIMEDOUT;
+ i2c->int_disable(i2c);
+
+ if (i2c->broken_irq_check && !time_left &&
+ octeon_i2c_test_iflg(i2c)) {
+ dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
+ i2c->broken_irq_mode = true;
+ return 0;
}
+ if (!time_left)
+ return -ETIMEDOUT;
+
return 0;
}
+static int octeon_i2c_check_status(struct octeon_i2c *i2c, int final_read)
+{
+ u8 stat = octeon_i2c_stat_read(i2c);
+
+ switch (stat) {
+ /* Everything is fine */
+ case STAT_IDLE:
+ case STAT_AD2W_ACK:
+ case STAT_RXADDR_ACK:
+ case STAT_TXADDR_ACK:
+ case STAT_TXDATA_ACK:
+ return 0;
+
+ /* ACK allowed on pre-terminal bytes only */
+ case STAT_RXDATA_ACK:
+ if (!final_read)
+ return 0;
+ return -EIO;
+
+ /* NAK allowed on terminal byte only */
+ case STAT_RXDATA_NAK:
+ if (final_read)
+ return 0;
+ return -EIO;
+
+ /* Arbitration lost */
+ case STAT_LOST_ARB_38:
+ case STAT_LOST_ARB_68:
+ case STAT_LOST_ARB_78:
+ case STAT_LOST_ARB_B0:
+ return -EAGAIN;
+
+ /* Being addressed as slave, should back off & listen */
+ case STAT_SLAVE_60:
+ case STAT_SLAVE_70:
+ case STAT_GENDATA_ACK:
+ case STAT_GENDATA_NAK:
+ return -EOPNOTSUPP;
+
+ /* Core busy as slave */
+ case STAT_SLAVE_80:
+ case STAT_SLAVE_88:
+ case STAT_SLAVE_A0:
+ case STAT_SLAVE_A8:
+ case STAT_SLAVE_LOST:
+ case STAT_SLAVE_NAK:
+ case STAT_SLAVE_ACK:
+ return -EOPNOTSUPP;
+
+ case STAT_TXDATA_NAK:
+ return -EIO;
+ case STAT_TXADDR_NAK:
+ case STAT_RXADDR_NAK:
+ case STAT_AD2W_NAK:
+ return -ENXIO;
+ default:
+ dev_err(i2c->dev, "unhandled state: %d\n", stat);
+ return -EIO;
+ }
+}
+
+static bool octeon_i2c_hlc_test_valid(struct octeon_i2c *i2c)
+{
+ return (__raw_readq(i2c->twsi_base + SW_TWSI) & SW_TWSI_V) == 0;
+}
+
+static bool octeon_i2c_hlc_test_ready(struct octeon_i2c *i2c, bool *first)
+{
+ /* check if valid bit is cleared */
+ if (octeon_i2c_hlc_test_valid(i2c))
+ return true;
+
+ if (*first) {
+ *first = false;
+ return false;
+ }
+
+ /*
+ * IRQ has signaled an event but valid bit isn't cleared.
+ * Sleep and retry once.
+ */
+ usleep_range(I2C_OCTEON_EVENT_WAIT, 2 * I2C_OCTEON_EVENT_WAIT);
+ return octeon_i2c_hlc_test_valid(i2c);
+}
+
+static void octeon_i2c_hlc_int_enable(struct octeon_i2c *i2c)
+{
+ octeon_i2c_write_int(i2c, TWSI_INT_ST_EN);
+}
+
+static void octeon_i2c_hlc_int_clear(struct octeon_i2c *i2c)
+{
+ /* clear ST/TS events, listen for neither */
+ octeon_i2c_write_int(i2c, TWSI_INT_ST_INT | TWSI_INT_TS_INT);
+}
+
/**
- * octeon_i2c_start - send START to the bus
+ * octeon_i2c_hlc_wait - wait for an HLC operation to complete
* @i2c: The struct octeon_i2c
*
- * Returns 0 on success, otherwise a negative errno.
+ * Returns 0 on success, otherwise -ETIMEDOUT.
*/
-static int octeon_i2c_start(struct octeon_i2c *i2c)
+static int octeon_i2c_hlc_wait(struct octeon_i2c *i2c)
{
- int result;
- u8 data;
+ bool first = 1;
+ int time_left;
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB | TWSI_CTL_STA);
+ /*
+ * Some cn38xx boards don't assert the irq in the interrupt
+ * controller. So we must poll for the valid bit change.
+ */
+ if (i2c->broken_irq_mode) {
+ u64 end = get_jiffies_64() + i2c->adap.timeout;
- result = octeon_i2c_wait(i2c);
- if (result) {
- if (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT) == STAT_IDLE) {
+ while (!octeon_i2c_hlc_test_valid(i2c) &&
+ time_before64(get_jiffies_64(), end))
+ usleep_range(I2C_OCTEON_EVENT_WAIT / 2, I2C_OCTEON_EVENT_WAIT);
+
+ return octeon_i2c_hlc_test_valid(i2c) ? 0 : -ETIMEDOUT;
+ }
+
+ i2c->hlc_int_enable(i2c);
+ time_left = wait_event_timeout(i2c->queue,
+ octeon_i2c_hlc_test_ready(i2c, &first),
+ i2c->adap.timeout);
+ i2c->hlc_int_disable(i2c);
+ if (!time_left)
+ octeon_i2c_hlc_int_clear(i2c);
+
+ if (i2c->broken_irq_check && !time_left &&
+ octeon_i2c_hlc_test_valid(i2c)) {
+ dev_err(i2c->dev, "broken irq connection detected, switching to polling mode.\n");
+ i2c->broken_irq_mode = true;
+ return 0;
+ }
+
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+/* high-level-controller pure read of up to 8 bytes */
+static int octeon_i2c_hlc_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ int i, j, ret = 0;
+ u64 cmd;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_hlc_int_clear(i2c);
+
+ cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR;
+ /* SIZE */
+ cmd |= (u64)(msgs[0].len - 1) << SW_TWSI_SIZE_SHIFT;
+ /* A */
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ if (msgs[0].flags & I2C_M_TEN)
+ cmd |= SW_TWSI_OP_10;
+ else
+ cmd |= SW_TWSI_OP_7;
+
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ ret = octeon_i2c_hlc_wait(i2c);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ if ((cmd & SW_TWSI_R) == 0)
+ return -EAGAIN;
+
+ for (i = 0, j = msgs[0].len - 1; i < msgs[0].len && i < 4; i++, j--)
+ msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
+
+ if (msgs[0].len > 4) {
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT);
+ for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
+ msgs[0].buf[j] = (cmd >> (8 * i)) & 0xff;
+ }
+
+err:
+ return ret;
+}
+
+/* high-level-controller pure write of up to 8 bytes */
+static int octeon_i2c_hlc_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ int i, j, ret = 0;
+ u64 cmd;
+
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_hlc_int_clear(i2c);
+
+ cmd = SW_TWSI_V | SW_TWSI_SOVR;
+ /* SIZE */
+ cmd |= (u64)(msgs[0].len - 1) << SW_TWSI_SIZE_SHIFT;
+ /* A */
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ if (msgs[0].flags & I2C_M_TEN)
+ cmd |= SW_TWSI_OP_10;
+ else
+ cmd |= SW_TWSI_OP_7;
+
+ for (i = 0, j = msgs[0].len - 1; i < msgs[0].len && i < 4; i++, j--)
+ cmd |= (u64)msgs[0].buf[j] << (8 * i);
+
+ if (msgs[0].len > 4) {
+ u64 ext = 0;
+
+ for (i = 0; i < msgs[0].len - 4 && i < 4; i++, j--)
+ ext |= (u64)msgs[0].buf[j] << (8 * i);
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+ }
+
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+ ret = octeon_i2c_hlc_wait(i2c);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ if ((cmd & SW_TWSI_R) == 0)
+ return -EAGAIN;
+
+ ret = octeon_i2c_check_status(i2c, false);
+
+err:
+ return ret;
+}
+
+/* high-level-controller composite write+read, msg0=addr, msg1=data */
+static int octeon_i2c_hlc_comp_read(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ int i, j, ret = 0;
+ u64 cmd;
+
+ octeon_i2c_hlc_enable(i2c);
+
+ cmd = SW_TWSI_V | SW_TWSI_R | SW_TWSI_SOVR;
+ /* SIZE */
+ cmd |= (u64)(msgs[1].len - 1) << SW_TWSI_SIZE_SHIFT;
+ /* A */
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ if (msgs[0].flags & I2C_M_TEN)
+ cmd |= SW_TWSI_OP_10_IA;
+ else
+ cmd |= SW_TWSI_OP_7_IA;
+
+ if (msgs[0].len == 2) {
+ u64 ext = 0;
+
+ cmd |= SW_TWSI_EIA;
+ ext = (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
+ cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+ } else {
+ cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
+ }
+
+ octeon_i2c_hlc_int_clear(i2c);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+
+ ret = octeon_i2c_hlc_wait(i2c);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ if ((cmd & SW_TWSI_R) == 0)
+ return -EAGAIN;
+
+ for (i = 0, j = msgs[1].len - 1; i < msgs[1].len && i < 4; i++, j--)
+ msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
+
+ if (msgs[1].len > 4) {
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI_EXT);
+ for (i = 0; i < msgs[1].len - 4 && i < 4; i++, j--)
+ msgs[1].buf[j] = (cmd >> (8 * i)) & 0xff;
+ }
+
+err:
+ return ret;
+}
+
+/* high-level-controller composite write+write, m[0]len<=2, m[1]len<=8 */
+static int octeon_i2c_hlc_comp_write(struct octeon_i2c *i2c, struct i2c_msg *msgs)
+{
+ bool set_ext = false;
+ int i, j, ret = 0;
+ u64 cmd, ext = 0;
+
+ octeon_i2c_hlc_enable(i2c);
+
+ cmd = SW_TWSI_V | SW_TWSI_SOVR;
+ /* SIZE */
+ cmd |= (u64)(msgs[1].len - 1) << SW_TWSI_SIZE_SHIFT;
+ /* A */
+ cmd |= (u64)(msgs[0].addr & 0x7full) << SW_TWSI_ADDR_SHIFT;
+
+ if (msgs[0].flags & I2C_M_TEN)
+ cmd |= SW_TWSI_OP_10_IA;
+ else
+ cmd |= SW_TWSI_OP_7_IA;
+
+ if (msgs[0].len == 2) {
+ cmd |= SW_TWSI_EIA;
+ ext |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
+ set_ext = true;
+ cmd |= (u64)msgs[0].buf[1] << SW_TWSI_IA_SHIFT;
+ } else {
+ cmd |= (u64)msgs[0].buf[0] << SW_TWSI_IA_SHIFT;
+ }
+
+ for (i = 0, j = msgs[1].len - 1; i < msgs[1].len && i < 4; i++, j--)
+ cmd |= (u64)msgs[1].buf[j] << (8 * i);
+
+ if (msgs[1].len > 4) {
+ for (i = 0; i < msgs[1].len - 4 && i < 4; i++, j--)
+ ext |= (u64)msgs[1].buf[j] << (8 * i);
+ set_ext = true;
+ }
+ if (set_ext)
+ octeon_i2c_writeq_flush(ext, i2c->twsi_base + SW_TWSI_EXT);
+
+ octeon_i2c_hlc_int_clear(i2c);
+ octeon_i2c_writeq_flush(cmd, i2c->twsi_base + SW_TWSI);
+
+ ret = octeon_i2c_hlc_wait(i2c);
+ if (ret)
+ goto err;
+
+ cmd = __raw_readq(i2c->twsi_base + SW_TWSI);
+ if ((cmd & SW_TWSI_R) == 0)
+ return -EAGAIN;
+
+ ret = octeon_i2c_check_status(i2c, false);
+
+err:
+ return ret;
+}
+
+/* calculate and set clock divisors */
+static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
+{
+ int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
+ int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
+
+ for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
+ /*
+ * An mdiv value of less than 2 seems to not work well
+ * with ds1337 RTCs, so we constrain it to larger values.
+ */
+ for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) {
/*
- * Controller refused to send start flag May
- * be a client is holding SDA low - let's try
- * to free it.
+ * For given ndiv and mdiv values check the
+ * two closest thp values.
*/
- octeon_i2c_unblock(i2c);
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB | TWSI_CTL_STA);
- result = octeon_i2c_wait(i2c);
+ tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
+ tclk *= (1 << ndiv_idx);
+ thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
+
+ for (inc = 0; inc <= 1; inc++) {
+ thp_idx = thp_base + inc;
+ if (thp_idx < 5 || thp_idx > 0xff)
+ continue;
+
+ foscl = i2c->sys_freq / (2 * (thp_idx + 1));
+ foscl = foscl / (1 << ndiv_idx);
+ foscl = foscl / (mdiv_idx + 1) / 10;
+ diff = abs(foscl - i2c->twsi_freq);
+ if (diff < delta_hz) {
+ delta_hz = diff;
+ thp = thp_idx;
+ mdiv = mdiv_idx;
+ ndiv = ndiv_idx;
+ }
+ }
}
- if (result)
- return result;
+ }
+ octeon_i2c_reg_write(i2c, SW_TWSI_OP_TWSI_CLK, thp);
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
+}
+
+static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
+{
+ u8 status = 0;
+ int tries;
+
+ /* reset controller */
+ octeon_i2c_reg_write(i2c, SW_TWSI_EOP_TWSI_RST, 0);
+
+ for (tries = 10; tries && status != STAT_IDLE; tries--) {
+ udelay(1);
+ status = octeon_i2c_stat_read(i2c);
+ if (status == STAT_IDLE)
+ break;
}
- data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
- if ((data != STAT_START) && (data != STAT_RSTART)) {
- dev_err(i2c->dev, "%s: bad status (0x%x)\n", __func__, data);
+ if (status != STAT_IDLE) {
+ dev_err(i2c->dev, "%s: TWSI_RST failed! (0x%x)\n",
+ __func__, status);
return -EIO;
}
+ /* toggle twice to force both teardowns */
+ octeon_i2c_hlc_enable(i2c);
+ octeon_i2c_hlc_disable(i2c);
return 0;
}
+static int octeon_i2c_recovery(struct octeon_i2c *i2c)
+{
+ int ret;
+
+ ret = i2c_recover_bus(&i2c->adap);
+ if (ret)
+ /* recover failed, try hardware re-init */
+ ret = octeon_i2c_init_lowlevel(i2c);
+ return ret;
+}
+
+/**
+ * octeon_i2c_start - send START to the bus
+ * @i2c: The struct octeon_i2c
+ *
+ * Returns 0 on success, otherwise a negative errno.
+ */
+static int octeon_i2c_start(struct octeon_i2c *i2c)
+{
+ int ret;
+ u8 stat;
+
+ octeon_i2c_hlc_disable(i2c);
+
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STA);
+ ret = octeon_i2c_wait(i2c);
+ if (ret)
+ goto error;
+
+ stat = octeon_i2c_stat_read(i2c);
+ if (stat == STAT_START || stat == STAT_REP_START)
+ /* START successful, bail out */
+ return 0;
+
+error:
+ /* START failed, try to recover */
+ ret = octeon_i2c_recovery(i2c);
+ return (ret) ? ret : -EAGAIN;
+}
+
/* send STOP to the bus */
static void octeon_i2c_stop(struct octeon_i2c *i2c)
{
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB | TWSI_CTL_STP);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_STP);
}
/**
@@ -276,31 +879,21 @@ static int octeon_i2c_write(struct octeon_i2c *i2c, int target,
const u8 *data, int length)
{
int i, result;
- u8 tmp;
-
- result = octeon_i2c_start(i2c);
- if (result)
- return result;
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, target << 1);
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
+ octeon_i2c_data_write(i2c, target << 1);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
result = octeon_i2c_wait(i2c);
if (result)
return result;
for (i = 0; i < length; i++) {
- tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
-
- if ((tmp != STAT_TXADDR_ACK) && (tmp != STAT_TXDATA_ACK)) {
- dev_err(i2c->dev,
- "%s: bad status before write (0x%x)\n",
- __func__, tmp);
- return -EIO;
- }
+ result = octeon_i2c_check_status(i2c, false);
+ if (result)
+ return result;
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, data[i]);
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
+ octeon_i2c_data_write(i2c, data[i]);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
result = octeon_i2c_wait(i2c);
if (result)
@@ -326,53 +919,52 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
u8 *data, u16 *rlength, bool recv_len)
{
int i, result, length = *rlength;
- u8 tmp;
+ bool final_read = false;
- if (length < 1)
- return -EINVAL;
+ octeon_i2c_data_write(i2c, (target << 1) | 1);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
- result = octeon_i2c_start(i2c);
+ result = octeon_i2c_wait(i2c);
if (result)
return result;
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target << 1) | 1);
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
-
- result = octeon_i2c_wait(i2c);
+ /* address OK ? */
+ result = octeon_i2c_check_status(i2c, false);
if (result)
return result;
for (i = 0; i < length; i++) {
- tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
-
- if ((tmp != STAT_RXDATA_ACK) && (tmp != STAT_RXADDR_ACK)) {
- dev_err(i2c->dev,
- "%s: bad status before read (0x%x)\n",
- __func__, tmp);
- return -EIO;
- }
+ /*
+ * For the last byte to receive TWSI_CTL_AAK must not be set.
+ *
+ * A special case is I2C_M_RECV_LEN where we don't know the
+ * additional length yet. If recv_len is set we assume we're
+ * not reading the final byte and therefore need to set
+ * TWSI_CTL_AAK.
+ */
+ if ((i + 1 == length) && !(recv_len && i == 0))
+ final_read = true;
- if (i + 1 < length)
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB | TWSI_CTL_AAK);
+ /* clear iflg to allow next event */
+ if (final_read)
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB);
else
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL,
- TWSI_CTL_ENAB);
+ octeon_i2c_ctl_write(i2c, TWSI_CTL_ENAB | TWSI_CTL_AAK);
result = octeon_i2c_wait(i2c);
if (result)
return result;
- data[i] = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_DATA);
+ data[i] = octeon_i2c_data_read(i2c);
if (recv_len && i == 0) {
- if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) {
- dev_err(i2c->dev,
- "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n",
- __func__, data[i]);
+ if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
return -EPROTO;
- }
length += data[i];
}
+
+ result = octeon_i2c_check_status(i2c, final_read);
+ if (result)
+ return result;
}
*rlength = length;
return 0;
@@ -392,13 +984,41 @@ static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
struct octeon_i2c *i2c = i2c_get_adapdata(adap);
int i, ret = 0;
+ if (num == 1) {
+ if (msgs[0].len > 0 && msgs[0].len <= 8) {
+ if (msgs[0].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_write(i2c, msgs);
+ goto out;
+ }
+ } else if (num == 2) {
+ if ((msgs[0].flags & I2C_M_RD) == 0 &&
+ (msgs[1].flags & I2C_M_RECV_LEN) == 0 &&
+ msgs[0].len > 0 && msgs[0].len <= 2 &&
+ msgs[1].len > 0 && msgs[1].len <= 8 &&
+ msgs[0].addr == msgs[1].addr) {
+ if (msgs[1].flags & I2C_M_RD)
+ ret = octeon_i2c_hlc_comp_read(i2c, msgs);
+ else
+ ret = octeon_i2c_hlc_comp_write(i2c, msgs);
+ goto out;
+ }
+ }
+
for (i = 0; ret == 0 && i < num; i++) {
struct i2c_msg *pmsg = &msgs[i];
- dev_dbg(i2c->dev,
- "Doing %s %d byte(s) to/from 0x%02x - %d of %d messages\n",
- pmsg->flags & I2C_M_RD ? "read" : "write",
- pmsg->len, pmsg->addr, i + 1, num);
+ /* zero-length messages are not supported */
+ if (!pmsg->len) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = octeon_i2c_start(i2c);
+ if (ret)
+ return ret;
+
if (pmsg->flags & I2C_M_RD)
ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf,
&pmsg->len, pmsg->flags & I2C_M_RECV_LEN);
@@ -407,102 +1027,105 @@ static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
pmsg->len);
}
octeon_i2c_stop(i2c);
-
+out:
return (ret != 0) ? ret : num;
}
-static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
+static int octeon_i2c_get_scl(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
- I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
+ struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ u64 state;
+
+ state = octeon_i2c_read_int(i2c);
+ return state & TWSI_INT_SCL;
}
-static const struct i2c_algorithm octeon_i2c_algo = {
- .master_xfer = octeon_i2c_xfer,
- .functionality = octeon_i2c_functionality,
-};
+static void octeon_i2c_set_scl(struct i2c_adapter *adap, int val)
+{
+ struct octeon_i2c *i2c = i2c_get_adapdata(adap);
-static struct i2c_adapter octeon_i2c_ops = {
- .owner = THIS_MODULE,
- .name = "OCTEON adapter",
- .algo = &octeon_i2c_algo,
- .timeout = HZ / 50,
-};
+ octeon_i2c_write_int(i2c, TWSI_INT_SCL_OVR);
+}
-/* calculate and set clock divisors */
-static void octeon_i2c_set_clock(struct octeon_i2c *i2c)
+static int octeon_i2c_get_sda(struct i2c_adapter *adap)
{
- int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff;
- int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000;
+ struct octeon_i2c *i2c = i2c_get_adapdata(adap);
+ u64 state;
- for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) {
- /*
- * An mdiv value of less than 2 seems to not work well
- * with ds1337 RTCs, so we constrain it to larger values.
- */
- for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) {
- /*
- * For given ndiv and mdiv values check the
- * two closest thp values.
- */
- tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10;
- tclk *= (1 << ndiv_idx);
- thp_base = (i2c->sys_freq / (tclk * 2)) - 1;
+ state = octeon_i2c_read_int(i2c);
+ return state & TWSI_INT_SDA;
+}
- for (inc = 0; inc <= 1; inc++) {
- thp_idx = thp_base + inc;
- if (thp_idx < 5 || thp_idx > 0xff)
- continue;
+static void octeon_i2c_prepare_recovery(struct i2c_adapter *adap)
+{
+ struct octeon_i2c *i2c = i2c_get_adapdata(adap);
- foscl = i2c->sys_freq / (2 * (thp_idx + 1));
- foscl = foscl / (1 << ndiv_idx);
- foscl = foscl / (mdiv_idx + 1) / 10;
- diff = abs(foscl - i2c->twsi_freq);
- if (diff < delta_hz) {
- delta_hz = diff;
- thp = thp_idx;
- mdiv = mdiv_idx;
- ndiv = ndiv_idx;
- }
- }
- }
- }
- octeon_i2c_write_sw(i2c, SW_TWSI_OP_TWSI_CLK, thp);
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv);
+ /*
+ * The stop resets the state machine, does not _transmit_ STOP unless
+ * engine was active.
+ */
+ octeon_i2c_stop(i2c);
+
+ octeon_i2c_hlc_disable(i2c);
+ octeon_i2c_write_int(i2c, 0);
}
-static int octeon_i2c_init_lowlevel(struct octeon_i2c *i2c)
+static void octeon_i2c_unprepare_recovery(struct i2c_adapter *adap)
{
- u8 status;
- int tries;
+ struct octeon_i2c *i2c = i2c_get_adapdata(adap);
- /* disable high level controller, enable bus access */
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB);
+ octeon_i2c_write_int(i2c, 0);
+}
- /* reset controller */
- octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_RST, 0);
+static struct i2c_bus_recovery_info octeon_i2c_recovery_info = {
+ .recover_bus = i2c_generic_scl_recovery,
+ .get_scl = octeon_i2c_get_scl,
+ .set_scl = octeon_i2c_set_scl,
+ .get_sda = octeon_i2c_get_sda,
+ .prepare_recovery = octeon_i2c_prepare_recovery,
+ .unprepare_recovery = octeon_i2c_unprepare_recovery,
+};
- for (tries = 10; tries; tries--) {
- udelay(1);
- status = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT);
- if (status == STAT_IDLE)
- return 0;
- }
- dev_err(i2c->dev, "%s: TWSI_RST failed! (0x%x)\n", __func__, status);
- return -EIO;
+static u32 octeon_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_SMBUS_BLOCK_PROC_CALL;
}
+static const struct i2c_algorithm octeon_i2c_algo = {
+ .master_xfer = octeon_i2c_xfer,
+ .functionality = octeon_i2c_functionality,
+};
+
+static struct i2c_adapter octeon_i2c_ops = {
+ .owner = THIS_MODULE,
+ .name = "OCTEON adapter",
+ .algo = &octeon_i2c_algo,
+};
+
static int octeon_i2c_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ int irq, result = 0, hlc_irq = 0;
struct resource *res_mem;
struct octeon_i2c *i2c;
- int irq, result = 0;
-
- /* All adaptors have an irq. */
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ bool cn78xx_style;
+
+ cn78xx_style = of_device_is_compatible(node, "cavium,octeon-7890-twsi");
+ if (cn78xx_style) {
+ hlc_irq = platform_get_irq(pdev, 0);
+ if (hlc_irq < 0)
+ return hlc_irq;
+
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ return irq;
+ } else {
+ /* All adaptors have an irq. */
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ }
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c) {
@@ -537,6 +1160,31 @@ static int octeon_i2c_probe(struct platform_device *pdev)
i2c->irq = irq;
+ if (cn78xx_style) {
+ i2c->hlc_irq = hlc_irq;
+
+ i2c->int_enable = octeon_i2c_int_enable78;
+ i2c->int_disable = octeon_i2c_int_disable78;
+ i2c->hlc_int_enable = octeon_i2c_hlc_int_enable78;
+ i2c->hlc_int_disable = octeon_i2c_hlc_int_disable78;
+
+ irq_set_status_flags(i2c->irq, IRQ_NOAUTOEN);
+ irq_set_status_flags(i2c->hlc_irq, IRQ_NOAUTOEN);
+
+ result = devm_request_irq(&pdev->dev, i2c->hlc_irq,
+ octeon_i2c_hlc_isr78, 0,
+ DRV_NAME, i2c);
+ if (result < 0) {
+ dev_err(i2c->dev, "failed to attach interrupt\n");
+ goto out;
+ }
+ } else {
+ i2c->int_enable = octeon_i2c_int_enable;
+ i2c->int_disable = octeon_i2c_int_disable;
+ i2c->hlc_int_enable = octeon_i2c_hlc_int_enable;
+ i2c->hlc_int_disable = octeon_i2c_int_disable;
+ }
+
result = devm_request_irq(&pdev->dev, i2c->irq,
octeon_i2c_isr, 0, DRV_NAME, i2c);
if (result < 0) {
@@ -544,6 +1192,9 @@ static int octeon_i2c_probe(struct platform_device *pdev)
goto out;
}
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ i2c->broken_irq_check = true;
+
result = octeon_i2c_init_lowlevel(i2c);
if (result) {
dev_err(i2c->dev, "init low level failed\n");
@@ -553,6 +1204,9 @@ static int octeon_i2c_probe(struct platform_device *pdev)
octeon_i2c_set_clock(i2c);
i2c->adap = octeon_i2c_ops;
+ i2c->adap.timeout = msecs_to_jiffies(2);
+ i2c->adap.retries = 5;
+ i2c->adap.bus_recovery_info = &octeon_i2c_recovery_info;
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = node;
i2c_set_adapdata(&i2c->adap, i2c);
@@ -580,6 +1234,7 @@ static int octeon_i2c_remove(struct platform_device *pdev)
static const struct of_device_id octeon_i2c_match[] = {
{ .compatible = "cavium,octeon-3860-twsi", },
+ { .compatible = "cavium,octeon-7890-twsi", },
{},
};
MODULE_DEVICE_TABLE(of, octeon_i2c_match);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 13c45296c..ab1279b8e 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -185,7 +185,6 @@ enum {
#define OMAP_I2C_IP_V2_INTERRUPTS_MASK 0x6FFF
struct omap_i2c_dev {
- spinlock_t lock; /* IRQ synchronization */
struct device *dev;
void __iomem *base; /* virtual */
int irq;
@@ -995,15 +994,12 @@ omap_i2c_isr(int irq, void *dev_id)
u16 mask;
u16 stat;
- spin_lock(&omap->lock);
- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
if (stat & mask)
ret = IRQ_WAKE_THREAD;
- spin_unlock(&omap->lock);
-
return ret;
}
@@ -1011,12 +1007,10 @@ static irqreturn_t
omap_i2c_isr_thread(int this_irq, void *dev_id)
{
struct omap_i2c_dev *omap = dev_id;
- unsigned long flags;
u16 bits;
u16 stat;
int err = 0, count = 0;
- spin_lock_irqsave(&omap->lock, flags);
do {
bits = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
@@ -1142,8 +1136,6 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
omap_i2c_complete_cmd(omap, err);
out:
- spin_unlock_irqrestore(&omap->lock, flags);
-
return IRQ_HANDLED;
}
@@ -1330,8 +1322,6 @@ omap_i2c_probe(struct platform_device *pdev)
omap->dev = &pdev->dev;
omap->irq = irq;
- spin_lock_init(&omap->lock);
-
platform_set_drvdata(pdev, omap);
init_completion(&omap->cmd_complete);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 6abcf696e..b0d9dee14 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -150,13 +150,11 @@ static int i2c_powermac_master_xfer( struct i2c_adapter *adap,
{
struct pmac_i2c_bus *bus = i2c_get_adapdata(adap);
int rc = 0;
- int read;
int addrdir;
if (msgs->flags & I2C_M_TEN)
return -EINVAL;
- read = (msgs->flags & I2C_M_RD) != 0;
- addrdir = (msgs->addr << 1) | read;
+ addrdir = i2c_8bit_addr_from_msg(msgs);
rc = pmac_i2c_open(bus, 0);
if (rc) {
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 23eaabb19..041050edd 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -515,7 +515,7 @@ static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
static int qup_i2c_set_tags(u8 *tags, struct qup_i2c_dev *qup,
struct i2c_msg *msg, int is_dma)
{
- u16 addr = (msg->addr << 1) | ((msg->flags & I2C_M_RD) == I2C_M_RD);
+ u16 addr = i2c_8bit_addr_from_msg(msg);
int len = 0;
int data_len;
@@ -1268,6 +1268,8 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
}
}
+ idx = 0;
+
do {
if (msgs[idx].len == 0) {
ret = -EINVAL;
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 68ecb5630..52407f3c9 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -21,6 +21,8 @@
*/
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -43,6 +45,8 @@
#define ICSAR 0x1C /* slave address */
#define ICMAR 0x20 /* master address */
#define ICRXTX 0x24 /* data port */
+#define ICDMAER 0x3c /* DMA enable */
+#define ICFBSCR 0x38 /* first bit setup cycle */
/* ICSCR */
#define SDBS (1 << 3) /* slave data buffer select */
@@ -78,6 +82,16 @@
#define MDR (1 << 1)
#define MAT (1 << 0) /* slave addr xfer done */
+/* ICDMAER */
+#define RSDMAE (1 << 3) /* DMA Slave Received Enable */
+#define TSDMAE (1 << 2) /* DMA Slave Transmitted Enable */
+#define RMDMAE (1 << 1) /* DMA Master Received Enable */
+#define TMDMAE (1 << 0) /* DMA Master Transmitted Enable */
+
+/* ICFBSCR */
+#define TCYC06 0x04 /* 6*Tcyc delay 1st bit between SDA and SCL */
+#define TCYC17 0x0f /* 17*Tcyc delay 1st bit between SDA and SCL */
+
#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
@@ -120,6 +134,12 @@ struct rcar_i2c_priv {
u32 flags;
enum rcar_i2c_type devtype;
struct i2c_client *slave;
+
+ struct resource *res;
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+ struct scatterlist sg;
+ enum dma_data_direction dma_direction;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -287,6 +307,118 @@ static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
/*
* interrupt functions
*/
+static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
+{
+ struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE
+ ? priv->dma_rx : priv->dma_tx;
+
+ /* Disable DMA Master Received/Transmitted */
+ rcar_i2c_write(priv, ICDMAER, 0);
+
+ /* Reset default delay */
+ rcar_i2c_write(priv, ICFBSCR, TCYC06);
+
+ dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
+ priv->msg->len, priv->dma_direction);
+
+ priv->dma_direction = DMA_NONE;
+}
+
+static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv)
+{
+ if (priv->dma_direction == DMA_NONE)
+ return;
+ else if (priv->dma_direction == DMA_FROM_DEVICE)
+ dmaengine_terminate_all(priv->dma_rx);
+ else if (priv->dma_direction == DMA_TO_DEVICE)
+ dmaengine_terminate_all(priv->dma_tx);
+
+ rcar_i2c_dma_unmap(priv);
+}
+
+static void rcar_i2c_dma_callback(void *data)
+{
+ struct rcar_i2c_priv *priv = data;
+
+ priv->pos += sg_dma_len(&priv->sg);
+
+ rcar_i2c_dma_unmap(priv);
+}
+
+static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
+{
+ struct device *dev = rcar_i2c_priv_to_dev(priv);
+ struct i2c_msg *msg = priv->msg;
+ bool read = msg->flags & I2C_M_RD;
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ struct dma_chan *chan = read ? priv->dma_rx : priv->dma_tx;
+ struct dma_async_tx_descriptor *txdesc;
+ dma_addr_t dma_addr;
+ dma_cookie_t cookie;
+ unsigned char *buf;
+ int len;
+
+ /* Do not use DMA if it's not available or for messages < 8 bytes */
+ if (IS_ERR(chan) || msg->len < 8)
+ return;
+
+ if (read) {
+ /*
+ * The last two bytes needs to be fetched using PIO in
+ * order for the STOP phase to work.
+ */
+ buf = priv->msg->buf;
+ len = priv->msg->len - 2;
+ } else {
+ /*
+ * First byte in message was sent using PIO.
+ */
+ buf = priv->msg->buf + 1;
+ len = priv->msg->len - 1;
+ }
+
+ dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_dbg(dev, "dma map failed, using PIO\n");
+ return;
+ }
+
+ sg_dma_len(&priv->sg) = len;
+ sg_dma_address(&priv->sg) = dma_addr;
+
+ priv->dma_direction = dir;
+
+ txdesc = dmaengine_prep_slave_sg(chan, &priv->sg, 1,
+ read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc) {
+ dev_dbg(dev, "dma prep slave sg failed, using PIO\n");
+ rcar_i2c_cleanup_dma(priv);
+ return;
+ }
+
+ txdesc->callback = rcar_i2c_dma_callback;
+ txdesc->callback_param = priv;
+
+ cookie = dmaengine_submit(txdesc);
+ if (dma_submit_error(cookie)) {
+ dev_dbg(dev, "submitting dma failed, using PIO\n");
+ rcar_i2c_cleanup_dma(priv);
+ return;
+ }
+
+ /* Set delay for DMA operations */
+ rcar_i2c_write(priv, ICFBSCR, TCYC17);
+
+ /* Enable DMA Master Received/Transmitted */
+ if (read)
+ rcar_i2c_write(priv, ICDMAER, RMDMAE);
+ else
+ rcar_i2c_write(priv, ICDMAER, TMDMAE);
+
+ dma_async_issue_pending(chan);
+}
+
static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
{
struct i2c_msg *msg = priv->msg;
@@ -306,6 +438,12 @@ static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
rcar_i2c_write(priv, ICRXTX, msg->buf[priv->pos]);
priv->pos++;
+ /*
+ * Try to use DMA to transmit the rest of the data if
+ * address transfer pashe just finished.
+ */
+ if (msr & MAT)
+ rcar_i2c_dma(priv);
} else {
/*
* The last data was pushed to ICRXTX on _PREV_ empty irq.
@@ -340,7 +478,11 @@ static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
return;
if (msr & MAT) {
- /* Address transfer phase finished, but no data at this point. */
+ /*
+ * Address transfer phase finished, but no data at this point.
+ * Try to use DMA to receive data.
+ */
+ rcar_i2c_dma(priv);
} else if (priv->pos < msg->len) {
/* get received data */
msg->buf[priv->pos] = rcar_i2c_read(priv, ICRXTX);
@@ -472,6 +614,81 @@ out:
return IRQ_HANDLED;
}
+static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev,
+ enum dma_transfer_direction dir,
+ dma_addr_t port_addr)
+{
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
+ int ret;
+
+ chan = dma_request_chan(dev, chan_name);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
+ dev_dbg(dev, "request_channel failed for %s (%d)\n",
+ chan_name, ret);
+ return chan;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = port_addr;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else {
+ cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_dbg(dev, "slave_config failed for %s (%d)\n",
+ chan_name, ret);
+ dma_release_channel(chan);
+ return ERR_PTR(ret);
+ }
+
+ dev_dbg(dev, "got DMA channel for %s\n", chan_name);
+ return chan;
+}
+
+static void rcar_i2c_request_dma(struct rcar_i2c_priv *priv,
+ struct i2c_msg *msg)
+{
+ struct device *dev = rcar_i2c_priv_to_dev(priv);
+ bool read;
+ struct dma_chan *chan;
+ enum dma_transfer_direction dir;
+
+ read = msg->flags & I2C_M_RD;
+
+ chan = read ? priv->dma_rx : priv->dma_tx;
+ if (PTR_ERR(chan) != -EPROBE_DEFER)
+ return;
+
+ dir = read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ chan = rcar_i2c_request_dma_chan(dev, dir, priv->res->start + ICRXTX);
+
+ if (read)
+ priv->dma_rx = chan;
+ else
+ priv->dma_tx = chan;
+}
+
+static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
+{
+ if (!IS_ERR(priv->dma_tx)) {
+ dma_release_channel(priv->dma_tx);
+ priv->dma_tx = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ if (!IS_ERR(priv->dma_rx)) {
+ dma_release_channel(priv->dma_rx);
+ priv->dma_rx = ERR_PTR(-EPROBE_DEFER);
+ }
+}
+
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs,
int num)
@@ -493,6 +710,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
goto out;
}
+ rcar_i2c_request_dma(priv, msgs + i);
}
/* init first message */
@@ -504,6 +722,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE,
num * adap->timeout);
if (!time_left) {
+ rcar_i2c_cleanup_dma(priv);
rcar_i2c_init(priv);
ret = -ETIMEDOUT;
} else if (priv->flags & ID_NACK) {
@@ -591,7 +810,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
{
struct rcar_i2c_priv *priv;
struct i2c_adapter *adap;
- struct resource *res;
struct device *dev = &pdev->dev;
struct i2c_timings i2c_t;
int irq, ret;
@@ -606,8 +824,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->io = devm_ioremap_resource(dev, res);
+ priv->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ priv->io = devm_ioremap_resource(dev, priv->res);
if (IS_ERR(priv->io))
return PTR_ERR(priv->io);
@@ -626,6 +845,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
i2c_parse_fw_timings(dev, &i2c_t, false);
+ /* Init DMA */
+ sg_init_table(&priv->sg, 1);
+ priv->dma_direction = DMA_NONE;
+ priv->dma_rx = priv->dma_tx = ERR_PTR(-EPROBE_DEFER);
+
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
ret = rcar_i2c_clock_calculate(priv, &i2c_t);
@@ -673,6 +897,7 @@ static int rcar_i2c_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
i2c_del_adapter(&priv->adap);
+ rcar_i2c_release_dma(priv);
if (priv->flags & ID_P_PM_BLOCKED)
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 3dcc5f3f2..80bed02cd 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -101,10 +101,7 @@ struct rk3x_i2c {
struct notifier_block clk_rate_nb;
/* Settings */
- unsigned int scl_frequency;
- unsigned int scl_rise_ns;
- unsigned int scl_fall_ns;
- unsigned int sda_fall_ns;
+ struct i2c_timings t;
/* Synchronization & notification */
spinlock_t lock;
@@ -437,10 +434,7 @@ out:
* Calculate divider values for desired SCL frequency
*
* @clk_rate: I2C input clock rate
- * @scl_rate: Desired SCL rate
- * @scl_rise_ns: How many ns it takes for SCL to rise.
- * @scl_fall_ns: How many ns it takes for SCL to fall.
- * @sda_fall_ns: How many ns it takes for SDA to fall.
+ * @t: Known I2C timing information.
* @div_low: Divider output for low
* @div_high: Divider output for high
*
@@ -448,11 +442,10 @@ out:
* a best-effort divider value is returned in divs. If the target rate is
* too high, we silently use the highest possible rate.
*/
-static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
- unsigned long scl_rise_ns,
- unsigned long scl_fall_ns,
- unsigned long sda_fall_ns,
- unsigned long *div_low, unsigned long *div_high)
+static int rk3x_i2c_calc_divs(unsigned long clk_rate,
+ struct i2c_timings *t,
+ unsigned long *div_low,
+ unsigned long *div_high)
{
unsigned long spec_min_low_ns, spec_min_high_ns;
unsigned long spec_setup_start, spec_max_data_hold_ns;
@@ -472,12 +465,12 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
int ret = 0;
/* Only support standard-mode and fast-mode */
- if (WARN_ON(scl_rate > 400000))
- scl_rate = 400000;
+ if (WARN_ON(t->bus_freq_hz > 400000))
+ t->bus_freq_hz = 400000;
/* prevent scl_rate_khz from becoming 0 */
- if (WARN_ON(scl_rate < 1000))
- scl_rate = 1000;
+ if (WARN_ON(t->bus_freq_hz < 1000))
+ t->bus_freq_hz = 1000;
/*
* min_low_ns: The minimum number of ns we need to hold low to
@@ -491,7 +484,7 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
* This is because the i2c host on Rockchip holds the data line
* for half the low time.
*/
- if (scl_rate <= 100000) {
+ if (t->bus_freq_hz <= 100000) {
/* Standard-mode */
spec_min_low_ns = 4700;
spec_setup_start = 4700;
@@ -506,7 +499,7 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
spec_max_data_hold_ns = 900;
data_hold_buffer_ns = 50;
}
- min_high_ns = scl_rise_ns + spec_min_high_ns;
+ min_high_ns = t->scl_rise_ns + spec_min_high_ns;
/*
* Timings for repeated start:
@@ -517,18 +510,18 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
* we meet tSU;STA and tHD;STA times.
*/
min_high_ns = max(min_high_ns,
- DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875));
+ DIV_ROUND_UP((t->scl_rise_ns + spec_setup_start) * 1000, 875));
min_high_ns = max(min_high_ns,
- DIV_ROUND_UP((scl_rise_ns + spec_setup_start +
- sda_fall_ns + spec_min_high_ns), 2));
+ DIV_ROUND_UP((t->scl_rise_ns + spec_setup_start +
+ t->sda_fall_ns + spec_min_high_ns), 2));
- min_low_ns = scl_fall_ns + spec_min_low_ns;
+ min_low_ns = t->scl_fall_ns + spec_min_low_ns;
max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
min_total_ns = min_low_ns + min_high_ns;
/* Adjust to avoid overflow */
clk_rate_khz = DIV_ROUND_UP(clk_rate, 1000);
- scl_rate_khz = scl_rate / 1000;
+ scl_rate_khz = t->bus_freq_hz / 1000;
/*
* We need the total div to be >= this number
@@ -616,14 +609,13 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
{
+ struct i2c_timings *t = &i2c->t;
unsigned long div_low, div_high;
u64 t_low_ns, t_high_ns;
int ret;
- ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns,
- i2c->scl_fall_ns, i2c->sda_fall_ns,
- &div_low, &div_high);
- WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
+ ret = rk3x_i2c_calc_divs(clk_rate, t, &div_low, &div_high);
+ WARN_ONCE(ret != 0, "Could not reach SCL freq %u", t->bus_freq_hz);
clk_enable(i2c->clk);
i2c_writel(i2c, (div_high << 16) | (div_low & 0xffff), REG_CLKDIV);
@@ -634,7 +626,7 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
dev_dbg(i2c->dev,
"CLK %lukhz, Req %uns, Act low %lluns high %lluns\n",
clk_rate / 1000,
- 1000000000 / i2c->scl_frequency,
+ 1000000000 / t->bus_freq_hz,
t_low_ns, t_high_ns);
}
@@ -664,9 +656,7 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
switch (event) {
case PRE_RATE_CHANGE:
- if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
- i2c->scl_rise_ns, i2c->scl_fall_ns,
- i2c->sda_fall_ns,
+ if (rk3x_i2c_calc_divs(ndata->new_rate, &i2c->t,
&div_low, &div_high) != 0)
return NOTIFY_STOP;
@@ -880,37 +870,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
match = of_match_node(rk3x_i2c_match, np);
i2c->soc_data = (struct rk3x_i2c_soc_data *)match->data;
- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &i2c->scl_frequency)) {
- dev_info(&pdev->dev, "using default SCL frequency: %d\n",
- DEFAULT_SCL_RATE);
- i2c->scl_frequency = DEFAULT_SCL_RATE;
- }
-
- if (i2c->scl_frequency == 0 || i2c->scl_frequency > 400 * 1000) {
- dev_warn(&pdev->dev, "invalid SCL frequency specified.\n");
- dev_warn(&pdev->dev, "using default SCL frequency: %d\n",
- DEFAULT_SCL_RATE);
- i2c->scl_frequency = DEFAULT_SCL_RATE;
- }
-
- /*
- * Read rise and fall time from device tree. If not available use
- * the default maximum timing from the specification.
- */
- if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns",
- &i2c->scl_rise_ns)) {
- if (i2c->scl_frequency <= 100000)
- i2c->scl_rise_ns = 1000;
- else
- i2c->scl_rise_ns = 300;
- }
- if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns",
- &i2c->scl_fall_ns))
- i2c->scl_fall_ns = 300;
- if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
- &i2c->sda_fall_ns))
- i2c->sda_fall_ns = i2c->scl_fall_ns;
+ /* use common interface to get I2C timing properties */
+ i2c_parse_fw_timings(&pdev->dev, &i2c->t, true);
strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
i2c->adap.owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 362a6de54..38dc1cacf 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -163,15 +163,14 @@ static const struct of_device_id s3c24xx_i2c_match[] = {
MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
#endif
-/* s3c24xx_get_device_quirks
- *
+/*
* Get controller type either from device tree or platform device variant.
-*/
-
+ */
static inline kernel_ulong_t s3c24xx_get_device_quirks(struct platform_device *pdev)
{
if (pdev->dev.of_node) {
const struct of_device_id *match;
+
match = of_match_node(s3c24xx_i2c_match, pdev->dev.of_node);
return (kernel_ulong_t)match->data;
}
@@ -179,12 +178,10 @@ static inline kernel_ulong_t s3c24xx_get_device_quirks(struct platform_device *p
return platform_get_device_id(pdev)->driver_data;
}
-/* s3c24xx_i2c_master_complete
- *
- * complete the message and wake up the caller, using the given return code,
+/*
+ * Complete the message and wake up the caller, using the given return code,
* or zero to mean ok.
-*/
-
+ */
static inline void s3c24xx_i2c_master_complete(struct s3c24xx_i2c *i2c, int ret)
{
dev_dbg(i2c->dev, "master_complete %d\n", ret);
@@ -217,7 +214,6 @@ static inline void s3c24xx_i2c_enable_ack(struct s3c24xx_i2c *i2c)
}
/* irq enable/disable functions */
-
static inline void s3c24xx_i2c_disable_irq(struct s3c24xx_i2c *i2c)
{
unsigned long tmp;
@@ -251,11 +247,9 @@ static bool is_ack(struct s3c24xx_i2c *i2c)
return false;
}
-/* s3c24xx_i2c_message_start
- *
+/*
* put the start of a message onto the bus
-*/
-
+ */
static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
struct i2c_msg *msg)
{
@@ -284,9 +278,10 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c,
dev_dbg(i2c->dev, "START: %08lx to IICSTAT, %02x to DS\n", stat, addr);
writeb(addr, i2c->regs + S3C2410_IICDS);
- /* delay here to ensure the data byte has gotten onto the bus
- * before the transaction is started */
-
+ /*
+ * delay here to ensure the data byte has gotten onto the bus
+ * before the transaction is started
+ */
ndelay(i2c->tx_setup);
dev_dbg(i2c->dev, "iiccon, %08lx\n", iiccon);
@@ -361,50 +356,46 @@ static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret)
s3c24xx_i2c_disable_irq(i2c);
}
-/* helper functions to determine the current state in the set of
- * messages we are sending */
+/*
+ * helper functions to determine the current state in the set of
+ * messages we are sending
+ */
-/* is_lastmsg()
- *
+/*
* returns TRUE if the current message is the last in the set
-*/
-
+ */
static inline int is_lastmsg(struct s3c24xx_i2c *i2c)
{
return i2c->msg_idx >= (i2c->msg_num - 1);
}
-/* is_msglast
- *
+/*
* returns TRUE if we this is the last byte in the current message
-*/
-
+ */
static inline int is_msglast(struct s3c24xx_i2c *i2c)
{
- /* msg->len is always 1 for the first byte of smbus block read.
+ /*
+ * msg->len is always 1 for the first byte of smbus block read.
* Actual length will be read from slave. More bytes will be
- * read according to the length then. */
+ * read according to the length then.
+ */
if (i2c->msg->flags & I2C_M_RECV_LEN && i2c->msg->len == 1)
return 0;
return i2c->msg_ptr == i2c->msg->len-1;
}
-/* is_msgend
- *
+/*
* returns TRUE if we reached the end of the current message
-*/
-
+ */
static inline int is_msgend(struct s3c24xx_i2c *i2c)
{
return i2c->msg_ptr >= i2c->msg->len;
}
-/* i2c_s3c_irq_nextbyte
- *
+/*
* process an interrupt and work out what to do
*/
-
static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
{
unsigned long tmp;
@@ -423,14 +414,13 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
goto out_ack;
case STATE_START:
- /* last thing we did was send a start condition on the
+ /*
+ * last thing we did was send a start condition on the
* bus, or started a new i2c message
*/
-
if (iicstat & S3C2410_IICSTAT_LASTBIT &&
!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
/* ack was not received... */
-
dev_dbg(i2c->dev, "ack was not received\n");
s3c24xx_i2c_stop(i2c, -ENXIO);
goto out_ack;
@@ -441,9 +431,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
else
i2c->state = STATE_WRITE;
- /* terminate the transfer if there is nothing to do
- * as this is used by the i2c probe to find devices. */
-
+ /*
+ * Terminate the transfer if there is nothing to do
+ * as this is used by the i2c probe to find devices.
+ */
if (is_lastmsg(i2c) && i2c->msg->len == 0) {
s3c24xx_i2c_stop(i2c, 0);
goto out_ack;
@@ -452,14 +443,16 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
if (i2c->state == STATE_READ)
goto prepare_read;
- /* fall through to the write state, as we will need to
- * send a byte as well */
+ /*
+ * fall through to the write state, as we will need to
+ * send a byte as well
+ */
case STATE_WRITE:
- /* we are writing data to the device... check for the
+ /*
+ * we are writing data to the device... check for the
* end of the message, and if so, work out what to do
*/
-
if (!(i2c->msg->flags & I2C_M_IGNORE_NAK)) {
if (iicstat & S3C2410_IICSTAT_LASTBIT) {
dev_dbg(i2c->dev, "WRITE: No Ack\n");
@@ -475,12 +468,13 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
byte = i2c->msg->buf[i2c->msg_ptr++];
writeb(byte, i2c->regs + S3C2410_IICDS);
- /* delay after writing the byte to allow the
+ /*
+ * delay after writing the byte to allow the
* data setup time on the bus, as writing the
* data to the register causes the first bit
* to appear on SDA, and SCL will change as
- * soon as the interrupt is acknowledged */
-
+ * soon as the interrupt is acknowledged
+ */
ndelay(i2c->tx_setup);
} else if (!is_lastmsg(i2c)) {
@@ -496,10 +490,11 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
if (i2c->msg->flags & I2C_M_NOSTART) {
if (i2c->msg->flags & I2C_M_RD) {
- /* cannot do this, the controller
+ /*
+ * cannot do this, the controller
* forces us to send a new START
- * when we change direction */
-
+ * when we change direction
+ */
s3c24xx_i2c_stop(i2c, -EINVAL);
}
@@ -512,17 +507,16 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
} else {
/* send stop */
-
s3c24xx_i2c_stop(i2c, 0);
}
break;
case STATE_READ:
- /* we have a byte of data in the data register, do
+ /*
+ * we have a byte of data in the data register, do
* something with it, and then work out whether we are
* going to do any more read/write
*/
-
byte = readb(i2c->regs + S3C2410_IICDS);
i2c->msg->buf[i2c->msg_ptr++] = byte;
@@ -537,9 +531,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
s3c24xx_i2c_disable_ack(i2c);
} else if (is_msgend(i2c)) {
- /* ok, we've read the entire buffer, see if there
- * is anything else we need to do */
-
+ /*
+ * ok, we've read the entire buffer, see if there
+ * is anything else we need to do
+ */
if (is_lastmsg(i2c)) {
/* last message, send stop and complete */
dev_dbg(i2c->dev, "READ: Send Stop\n");
@@ -568,11 +563,9 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
return ret;
}
-/* s3c24xx_i2c_irq
- *
+/*
* top level IRQ servicing routine
-*/
-
+ */
static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
{
struct s3c24xx_i2c *i2c = dev_id;
@@ -595,9 +588,10 @@ static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
goto out;
}
- /* pretty much this leaves us with the fact that we've
- * transmitted or received whatever byte we last sent */
-
+ /*
+ * pretty much this leaves us with the fact that we've
+ * transmitted or received whatever byte we last sent
+ */
i2c_s3c_irq_nextbyte(i2c, status);
out:
@@ -630,11 +624,9 @@ static inline void s3c24xx_i2c_disable_bus(struct s3c24xx_i2c *i2c)
}
-/* s3c24xx_i2c_set_master
- *
+/*
* get the i2c bus for a master transaction
-*/
-
+ */
static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
{
unsigned long iicstat;
@@ -652,11 +644,9 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
return -ETIMEDOUT;
}
-/* s3c24xx_i2c_wait_idle
- *
+/*
* wait for the i2c bus to become idle.
-*/
-
+ */
static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
{
unsigned long iicstat;
@@ -706,11 +696,9 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c)
dev_warn(i2c->dev, "timeout waiting for bus idle\n");
}
-/* s3c24xx_i2c_doxfer
- *
+/*
* this starts an i2c transfer
-*/
-
+ */
static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
struct i2c_msg *msgs, int num)
{
@@ -749,9 +737,10 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
ret = i2c->msg_idx;
- /* having these next two as dev_err() makes life very
- * noisy when doing an i2cdetect */
-
+ /*
+ * Having these next two as dev_err() makes life very
+ * noisy when doing an i2cdetect
+ */
if (timeout == 0)
dev_dbg(i2c->dev, "timeout\n");
else if (ret != num)
@@ -771,12 +760,10 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
return ret;
}
-/* s3c24xx_i2c_xfer
- *
+/*
* first port of call from the i2c bus code when an message needs
* transferring across the i2c bus.
-*/
-
+ */
static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
@@ -814,17 +801,14 @@ static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
}
/* i2c bus registration info */
-
static const struct i2c_algorithm s3c24xx_i2c_algorithm = {
.master_xfer = s3c24xx_i2c_xfer,
.functionality = s3c24xx_i2c_func,
};
-/* s3c24xx_i2c_calcdivisor
- *
+/*
* return the divisor settings for a given frequency
-*/
-
+ */
static int s3c24xx_i2c_calcdivisor(unsigned long clkin, unsigned int wanted,
unsigned int *div1, unsigned int *divs)
{
@@ -850,13 +834,11 @@ static int s3c24xx_i2c_calcdivisor(unsigned long clkin, unsigned int wanted,
return clkin / (calc_divs * calc_div1);
}
-/* s3c24xx_i2c_clockrate
- *
+/*
* work out a divisor for the user requested frequency setting,
* either by the requested frequency, or scanning the acceptable
* range of frequencies until something is found
-*/
-
+ */
static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
{
struct s3c2410_platform_i2c *pdata = i2c->pdata;
@@ -944,7 +926,7 @@ static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
i2c_unlock_adapter(&i2c->adap);
if (ret < 0)
- dev_err(i2c->dev, "cannot find frequency\n");
+ dev_err(i2c->dev, "cannot find frequency (%d)\n", ret);
else
dev_info(i2c->dev, "setting freq %d\n", got);
}
@@ -995,7 +977,8 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
ret = gpio_request(gpio, "i2c-bus");
if (ret) {
- dev_err(i2c->dev, "gpio [%d] request failed\n", gpio);
+ dev_err(i2c->dev, "gpio [%d] request failed (%d)\n",
+ gpio, ret);
goto free_gpio;
}
}
@@ -1028,11 +1011,9 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
}
#endif
-/* s3c24xx_i2c_init
- *
+/*
* initialise the controller, set the IO lines and frequency
-*/
-
+ */
static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
{
struct s3c2410_platform_i2c *pdata;
@@ -1068,11 +1049,9 @@ static int s3c24xx_i2c_init(struct s3c24xx_i2c *i2c)
}
#ifdef CONFIG_OF
-/* s3c24xx_i2c_parse_dt
- *
+/*
* Parse the device tree node and retreive the platform data.
-*/
-
+ */
static void
s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
{
@@ -1105,17 +1084,9 @@ s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
}
#else
static void
-s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c)
-{
- return;
-}
+s3c24xx_i2c_parse_dt(struct device_node *np, struct s3c24xx_i2c *i2c) { }
#endif
-/* s3c24xx_i2c_probe
- *
- * called by the bus driver when a suitable device is found
-*/
-
static int s3c24xx_i2c_probe(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c;
@@ -1156,7 +1127,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
init_waitqueue_head(&i2c->wait);
/* find the clock and enable it */
-
i2c->dev = &pdev->dev;
i2c->clk = devm_clk_get(&pdev->dev, "i2c");
if (IS_ERR(i2c->clk)) {
@@ -1166,9 +1136,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
-
/* map the registers */
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, res);
@@ -1179,33 +1147,35 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->regs, res);
/* setup info block for the i2c core */
-
i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev;
-
i2c->pctrl = devm_pinctrl_get_select_default(i2c->dev);
/* inititalise the i2c gpio lines */
-
- if (i2c->pdata->cfg_gpio) {
+ if (i2c->pdata->cfg_gpio)
i2c->pdata->cfg_gpio(to_platform_device(i2c->dev));
- } else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c)) {
+ else if (IS_ERR(i2c->pctrl) && s3c24xx_i2c_parse_dt_gpio(i2c))
return -EINVAL;
- }
/* initialise the i2c controller */
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "I2C clock enable failed\n");
+ return ret;
+ }
- clk_prepare_enable(i2c->clk);
ret = s3c24xx_i2c_init(i2c);
clk_disable(i2c->clk);
if (ret != 0) {
dev_err(&pdev->dev, "I2C controller init failed\n");
+ clk_unprepare(i2c->clk);
return ret;
}
- /* find the IRQ for this unit (note, this relies on the init call to
+
+ /*
+ * find the IRQ for this unit (note, this relies on the init call to
* ensure no current IRQs pending
*/
-
if (!(i2c->quirks & QUIRK_POLL)) {
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
@@ -1214,9 +1184,8 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_request_irq(&pdev->dev, i2c->irq, s3c24xx_i2c_irq, 0,
- dev_name(&pdev->dev), i2c);
-
+ ret = devm_request_irq(&pdev->dev, i2c->irq, s3c24xx_i2c_irq,
+ 0, dev_name(&pdev->dev), i2c);
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
clk_unprepare(i2c->clk);
@@ -1231,12 +1200,12 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
return ret;
}
- /* Note, previous versions of the driver used i2c_add_adapter()
+ /*
+ * Note, previous versions of the driver used i2c_add_adapter()
* to add the bus at any number. We now pass the bus number via
* the platform data, so if unset it will now default to always
* being bus 0.
*/
-
i2c->adap.nr = i2c->pdata->bus_num;
i2c->adap.dev.of_node = pdev->dev.of_node;
@@ -1257,11 +1226,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
return 0;
}
-/* s3c24xx_i2c_remove
- *
- * called when device is removed from the bus
-*/
-
static int s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
@@ -1316,14 +1280,8 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
#ifdef CONFIG_PM
static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend_noirq = s3c24xx_i2c_suspend_noirq,
- .resume_noirq = s3c24xx_i2c_resume_noirq,
- .freeze_noirq = s3c24xx_i2c_suspend_noirq,
- .thaw_noirq = s3c24xx_i2c_resume_noirq,
- .poweroff_noirq = s3c24xx_i2c_suspend_noirq,
- .restore_noirq = s3c24xx_i2c_resume_noirq,
-#endif
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c24xx_i2c_suspend_noirq,
+ s3c24xx_i2c_resume_noirq)
};
#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
@@ -1331,8 +1289,6 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
#define S3C24XX_DEV_PM_OPS NULL
#endif
-/* device driver for platform bus bits */
-
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove = s3c24xx_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 7d2bd3ec2..6fb3e2645 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -398,8 +398,7 @@ static void sh_mobile_i2c_get_data(struct sh_mobile_i2c_data *pd,
{
switch (pd->pos) {
case -1:
- *buf = (pd->msg->addr & 0x7f) << 1;
- *buf |= (pd->msg->flags & I2C_M_RD) ? 1 : 0;
+ *buf = i2c_8bit_addr_from_msg(pd->msg);
break;
default:
*buf = pd->msg->buf[pd->pos];
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 13e51ef6a..792a42bdd 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -190,9 +190,7 @@ static void i2c_sirfsoc_set_address(struct sirfsoc_i2c *siic,
writel(regval, siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
- addr = msg->addr << 1; /* Generate address */
- if (msg->flags & I2C_M_RD)
- addr |= 1;
+ addr = i2c_8bit_addr_from_msg(msg);
/* Reverse direction bit */
if (msg->flags & I2C_M_REV_DIR_ADDR)
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 6ee77159a..944ec4205 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -337,10 +337,42 @@ static void st_i2c_hw_config(struct st_i2c_dev *i2c_dev)
writel_relaxed(val, i2c_dev->base + SSC_NOISE_SUPP_WIDTH_DATAOUT);
}
+static int st_i2c_recover_bus(struct i2c_adapter *i2c_adap)
+{
+ struct st_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+ u32 ctl;
+
+ dev_dbg(i2c_dev->dev, "Trying to recover bus\n");
+
+ /*
+ * SSP IP is dual role SPI/I2C to generate 9 clock pulses
+ * we switch to SPI node, 9 bit words and write a 0. This
+ * has been validate with a oscilloscope and is easier
+ * than switching to GPIO mode.
+ */
+
+ /* Disable interrupts */
+ writel_relaxed(0, i2c_dev->base + SSC_IEN);
+
+ st_i2c_hw_config(i2c_dev);
+
+ ctl = SSC_CTL_EN | SSC_CTL_MS | SSC_CTL_EN_RX_FIFO | SSC_CTL_EN_TX_FIFO;
+ st_i2c_set_bits(i2c_dev->base + SSC_CTL, ctl);
+
+ st_i2c_clr_bits(i2c_dev->base + SSC_I2C, SSC_I2C_I2CM);
+ usleep_range(8000, 10000);
+
+ writel_relaxed(0, i2c_dev->base + SSC_TBUF);
+ usleep_range(2000, 4000);
+ st_i2c_set_bits(i2c_dev->base + SSC_I2C, SSC_I2C_I2CM);
+
+ return 0;
+}
+
static int st_i2c_wait_free_bus(struct st_i2c_dev *i2c_dev)
{
u32 sta;
- int i;
+ int i, ret;
for (i = 0; i < 10; i++) {
sta = readl_relaxed(i2c_dev->base + SSC_STA);
@@ -352,6 +384,12 @@ static int st_i2c_wait_free_bus(struct st_i2c_dev *i2c_dev)
dev_err(i2c_dev->dev, "bus not free (status = 0x%08x)\n", sta);
+ ret = i2c_recover_bus(&i2c_dev->adap);
+ if (ret) {
+ dev_err(i2c_dev->dev, "Failed to recover the bus (%d)\n", ret);
+ return ret;
+ }
+
return -EBUSY;
}
@@ -614,8 +652,7 @@ static int st_i2c_xfer_msg(struct st_i2c_dev *i2c_dev, struct i2c_msg *msg,
unsigned long timeout;
int ret;
- c->addr = (u8)(msg->addr << 1);
- c->addr |= (msg->flags & I2C_M_RD);
+ c->addr = i2c_8bit_addr_from_msg(msg);
c->buf = msg->buf;
c->count = msg->len;
c->xfered = 0;
@@ -744,6 +781,10 @@ static struct i2c_algorithm st_i2c_algo = {
.functionality = st_i2c_func,
};
+static struct i2c_bus_recovery_info st_i2c_recovery_info = {
+ .recover_bus = st_i2c_recover_bus,
+};
+
static int st_i2c_of_get_deglitch(struct device_node *np,
struct st_i2c_dev *i2c_dev)
{
@@ -826,6 +867,7 @@ static int st_i2c_probe(struct platform_device *pdev)
adap->timeout = 2 * HZ;
adap->retries = 0;
adap->algo = &st_i2c_algo;
+ adap->bus_recovery_info = &st_i2c_recovery_info;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 929185a72..b126dbaa4 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -38,6 +38,7 @@
#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
#define I2C_CNFG_PACKET_MODE_EN (1<<10)
#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
+#define I2C_CNFG_MULTI_MASTER_MODE (1<<17)
#define I2C_STATUS 0x01C
#define I2C_SL_CNFG 0x020
#define I2C_SL_CNFG_NACK (1<<1)
@@ -106,6 +107,9 @@
#define I2C_SLV_CONFIG_LOAD (1 << 1)
#define I2C_TIMEOUT_CONFIG_LOAD (1 << 2)
+#define I2C_CLKEN_OVERRIDE 0x090
+#define I2C_MST_CORE_CLKEN_OVR (1 << 0)
+
/*
* msg_end_type: The bus control which need to be send at end of transfer.
* @MSG_END_STOP: Send stop pulse at end of transfer.
@@ -143,6 +147,8 @@ struct tegra_i2c_hw_feature {
int clk_divisor_hs_mode;
int clk_divisor_std_fast_mode;
u16 clk_divisor_fast_plus_mode;
+ bool has_multi_master_mode;
+ bool has_slcg_override_reg;
};
/**
@@ -184,6 +190,7 @@ struct tegra_i2c_dev {
u32 bus_clk_rate;
u16 clk_divisor_non_hs_mode;
bool is_suspended;
+ bool is_multimaster_mode;
};
static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
@@ -438,6 +445,10 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN |
(0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
+
+ if (i2c_dev->hw->has_multi_master_mode)
+ val |= I2C_CNFG_MULTI_MASTER_MODE;
+
i2c_writel(i2c_dev, val, I2C_CNFG);
i2c_writel(i2c_dev, 0, I2C_INT_MASK);
@@ -463,25 +474,29 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (tegra_i2c_flush_fifos(i2c_dev))
err = -ETIMEDOUT;
+ if (i2c_dev->is_multimaster_mode && i2c_dev->hw->has_slcg_override_reg)
+ i2c_writel(i2c_dev, I2C_MST_CORE_CLKEN_OVR, I2C_CLKEN_OVERRIDE);
+
if (i2c_dev->hw->has_config_load_reg) {
i2c_writel(i2c_dev, I2C_MSTR_CONFIG_LOAD, I2C_CONFIG_LOAD);
while (i2c_readl(i2c_dev, I2C_CONFIG_LOAD) != 0) {
if (time_after(jiffies, timeout)) {
dev_warn(i2c_dev->dev,
"timeout waiting for config load\n");
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err;
}
msleep(1);
}
}
- tegra_i2c_clock_disable(i2c_dev);
-
if (i2c_dev->irq_disabled) {
i2c_dev->irq_disabled = 0;
enable_irq(i2c_dev->irq);
}
+err:
+ tegra_i2c_clock_disable(i2c_dev);
return err;
}
@@ -688,6 +703,20 @@ static u32 tegra_i2c_func(struct i2c_adapter *adap)
return ret;
}
+static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
+{
+ struct device_node *np = i2c_dev->dev->of_node;
+ int ret;
+
+ ret = of_property_read_u32(np, "clock-frequency",
+ &i2c_dev->bus_clk_rate);
+ if (ret)
+ i2c_dev->bus_clk_rate = 100000; /* default clock rate */
+
+ i2c_dev->is_multimaster_mode = of_property_read_bool(np,
+ "multi-master");
+}
+
static const struct i2c_algorithm tegra_i2c_algo = {
.master_xfer = tegra_i2c_xfer,
.functionality = tegra_i2c_func,
@@ -707,6 +736,8 @@ static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
.clk_divisor_std_fast_mode = 0,
.clk_divisor_fast_plus_mode = 0,
.has_config_load_reg = false,
+ .has_multi_master_mode = false,
+ .has_slcg_override_reg = false,
};
static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
@@ -717,6 +748,8 @@ static const struct tegra_i2c_hw_feature tegra30_i2c_hw = {
.clk_divisor_std_fast_mode = 0,
.clk_divisor_fast_plus_mode = 0,
.has_config_load_reg = false,
+ .has_multi_master_mode = false,
+ .has_slcg_override_reg = false,
};
static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
@@ -727,6 +760,8 @@ static const struct tegra_i2c_hw_feature tegra114_i2c_hw = {
.clk_divisor_std_fast_mode = 0x19,
.clk_divisor_fast_plus_mode = 0x10,
.has_config_load_reg = false,
+ .has_multi_master_mode = false,
+ .has_slcg_override_reg = false,
};
static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
@@ -737,10 +772,25 @@ static const struct tegra_i2c_hw_feature tegra124_i2c_hw = {
.clk_divisor_std_fast_mode = 0x19,
.clk_divisor_fast_plus_mode = 0x10,
.has_config_load_reg = true,
+ .has_multi_master_mode = false,
+ .has_slcg_override_reg = true,
+};
+
+static const struct tegra_i2c_hw_feature tegra210_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .has_single_clk_source = true,
+ .clk_divisor_hs_mode = 1,
+ .clk_divisor_std_fast_mode = 0x19,
+ .clk_divisor_fast_plus_mode = 0x10,
+ .has_config_load_reg = true,
+ .has_multi_master_mode = true,
+ .has_slcg_override_reg = true,
};
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra210-i2c", .data = &tegra210_i2c_hw, },
{ .compatible = "nvidia,tegra124-i2c", .data = &tegra124_i2c_hw, },
{ .compatible = "nvidia,tegra114-i2c", .data = &tegra114_i2c_hw, },
{ .compatible = "nvidia,tegra30-i2c", .data = &tegra30_i2c_hw, },
@@ -797,10 +847,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
return PTR_ERR(i2c_dev->rst);
}
- ret = of_property_read_u32(i2c_dev->dev->of_node, "clock-frequency",
- &i2c_dev->bus_clk_rate);
- if (ret)
- i2c_dev->bus_clk_rate = 100000; /* default clock rate */
+ tegra_i2c_parse_dt(i2c_dev);
i2c_dev->hw = &tegra20_i2c_hw;
@@ -853,17 +900,26 @@ static int tegra_i2c_probe(struct platform_device *pdev)
goto unprepare_fast_clk;
}
+ if (i2c_dev->is_multimaster_mode) {
+ ret = clk_enable(i2c_dev->div_clk);
+ if (ret < 0) {
+ dev_err(i2c_dev->dev, "div_clk enable failed %d\n",
+ ret);
+ goto unprepare_div_clk;
+ }
+ }
+
ret = tegra_i2c_init(i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize i2c controller");
- goto unprepare_div_clk;
+ goto disable_div_clk;
}
ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
tegra_i2c_isr, 0, dev_name(&pdev->dev), i2c_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
- goto unprepare_div_clk;
+ goto disable_div_clk;
}
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
@@ -878,11 +934,15 @@ static int tegra_i2c_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
if (ret) {
dev_err(&pdev->dev, "Failed to add I2C adapter\n");
- goto unprepare_div_clk;
+ goto disable_div_clk;
}
return 0;
+disable_div_clk:
+ if (i2c_dev->is_multimaster_mode)
+ clk_disable(i2c_dev->div_clk);
+
unprepare_div_clk:
clk_unprepare(i2c_dev->div_clk);
@@ -898,6 +958,9 @@ static int tegra_i2c_remove(struct platform_device *pdev)
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
+ if (i2c_dev->is_multimaster_mode)
+ clk_disable(i2c_dev->div_clk);
+
clk_unprepare(i2c_dev->div_clk);
if (!i2c_dev->hw->has_single_clk_source)
clk_unprepare(i2c_dev->fast_clk);
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index 213ba55e1..aeead0d27 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -524,7 +524,7 @@ static int uniphier_fi2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "failed to get IRQ number");
+ dev_err(dev, "failed to get IRQ number\n");
return irq;
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 89eaa8a7e..475a5eb51 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -381,7 +381,7 @@ static int uniphier_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "failed to get IRQ number");
+ dev_err(dev, "failed to get IRQ number\n");
return irq;
}
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index e33022e2d..6e5fac6a5 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
* The board info passed can safely be __initdata, but be careful of embedded
* pointers (for platform_data, functions, etc) since that won't be copied.
*/
-int __init
-i2c_register_board_info(int busnum,
- struct i2c_board_info const *info, unsigned len)
+int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
{
int status;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e584d88ee..af11b6589 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -954,48 +954,40 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
}
/**
- * i2c_lock_adapter - Get exclusive access to an I2C bus segment
+ * i2c_adapter_lock_bus - Get exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER locks the root i2c adapter, I2C_LOCK_SEGMENT
+ * locks only this branch in the adapter tree
*/
-void i2c_lock_adapter(struct i2c_adapter *adapter)
+static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
{
- struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
-
- if (parent)
- i2c_lock_adapter(parent);
- else
- rt_mutex_lock(&adapter->bus_lock);
+ rt_mutex_lock(&adapter->bus_lock);
}
-EXPORT_SYMBOL_GPL(i2c_lock_adapter);
/**
- * i2c_trylock_adapter - Try to get exclusive access to an I2C bus segment
+ * i2c_adapter_trylock_bus - Try to get exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER trylocks the root i2c adapter, I2C_LOCK_SEGMENT
+ * trylocks only this branch in the adapter tree
*/
-static int i2c_trylock_adapter(struct i2c_adapter *adapter)
+static int i2c_adapter_trylock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
{
- struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
-
- if (parent)
- return i2c_trylock_adapter(parent);
- else
- return rt_mutex_trylock(&adapter->bus_lock);
+ return rt_mutex_trylock(&adapter->bus_lock);
}
/**
- * i2c_unlock_adapter - Release exclusive access to an I2C bus segment
+ * i2c_adapter_unlock_bus - Release exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
+ * @flags: I2C_LOCK_ROOT_ADAPTER unlocks the root i2c adapter, I2C_LOCK_SEGMENT
+ * unlocks only this branch in the adapter tree
*/
-void i2c_unlock_adapter(struct i2c_adapter *adapter)
+static void i2c_adapter_unlock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
{
- struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
-
- if (parent)
- i2c_unlock_adapter(parent);
- else
- rt_mutex_unlock(&adapter->bus_lock);
+ rt_mutex_unlock(&adapter->bus_lock);
}
-EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
static void i2c_dev_set_name(struct i2c_adapter *adap,
struct i2c_client *client)
@@ -1541,7 +1533,14 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
return -EINVAL;
}
+ if (!adap->lock_bus) {
+ adap->lock_bus = i2c_adapter_lock_bus;
+ adap->trylock_bus = i2c_adapter_trylock_bus;
+ adap->unlock_bus = i2c_adapter_unlock_bus;
+ }
+
rt_mutex_init(&adap->bus_lock);
+ rt_mutex_init(&adap->mux_lock);
mutex_init(&adap->userspace_clients_lock);
INIT_LIST_HEAD(&adap->userspace_clients);
@@ -1559,6 +1558,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);
pm_runtime_no_callbacks(&adap->dev);
+ pm_suspend_ignore_children(&adap->dev, true);
pm_runtime_enable(&adap->dev);
#ifdef CONFIG_I2C_COMPAT
@@ -1594,10 +1594,12 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
bri->get_scl = get_scl_gpio_value;
bri->set_scl = set_scl_gpio_value;
- } else if (!bri->set_scl || !bri->get_scl) {
+ } else if (bri->recover_bus == i2c_generic_scl_recovery) {
/* Generic SCL recovery */
- dev_err(&adap->dev, "No {get|set}_gpio() found, not using recovery\n");
- adap->bus_recovery_info = NULL;
+ if (!bri->set_scl || !bri->get_scl) {
+ dev_err(&adap->dev, "No {get|set}_scl() found, not using recovery\n");
+ adap->bus_recovery_info = NULL;
+ }
}
}
@@ -2309,16 +2311,16 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
#endif
if (in_atomic() || irqs_disabled()) {
- ret = i2c_trylock_adapter(adap);
+ ret = adap->trylock_bus(adap, I2C_LOCK_SEGMENT);
if (!ret)
/* I2C activity is ongoing. */
return -EAGAIN;
} else {
- i2c_lock_adapter(adap);
+ i2c_lock_bus(adap, I2C_LOCK_SEGMENT);
}
ret = __i2c_transfer(adap, msgs, num);
- i2c_unlock_adapter(adap);
+ i2c_unlock_bus(adap, I2C_LOCK_SEGMENT);
return ret;
} else {
@@ -2646,7 +2648,7 @@ static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count)
static u8 i2c_smbus_msg_pec(u8 pec, struct i2c_msg *msg)
{
/* The address will be sent first */
- u8 addr = (msg->addr << 1) | !!(msg->flags & I2C_M_RD);
+ u8 addr = i2c_8bit_addr_from_msg(msg);
pec = i2c_smbus_pec(pec, &addr, 1);
/* The data buffer follows */
@@ -3093,7 +3095,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
if (adapter->algo->smbus_xfer) {
- i2c_lock_adapter(adapter);
+ i2c_lock_bus(adapter, I2C_LOCK_SEGMENT);
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
@@ -3107,7 +3109,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
orig_jiffies + adapter->timeout))
break;
}
- i2c_unlock_adapter(adapter);
+ i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT);
if (res != -EOPNOTSUPP || !adapter->algo->master_xfer)
goto trace;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 0b1108d3c..6ecfd7627 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -22,6 +22,7 @@
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
+#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/i2c-dev.h>
@@ -47,9 +48,10 @@ struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
struct device *dev;
+ struct cdev cdev;
};
-#define I2C_MINORS 256
+#define I2C_MINORS MINORMASK
static LIST_HEAD(i2c_dev_list);
static DEFINE_SPINLOCK(i2c_dev_list_lock);
@@ -89,7 +91,7 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
return i2c_dev;
}
-static void return_i2c_dev(struct i2c_dev *i2c_dev)
+static void put_i2c_dev(struct i2c_dev *i2c_dev)
{
spin_lock(&i2c_dev_list_lock);
list_del(&i2c_dev->list);
@@ -552,6 +554,12 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
if (IS_ERR(i2c_dev))
return PTR_ERR(i2c_dev);
+ cdev_init(&i2c_dev->cdev, &i2cdev_fops);
+ i2c_dev->cdev.owner = THIS_MODULE;
+ res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1);
+ if (res)
+ goto error_cdev;
+
/* register this i2c device with the driver core */
i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
MKDEV(I2C_MAJOR, adap->nr), NULL,
@@ -565,7 +573,9 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
adap->name, adap->nr);
return 0;
error:
- return_i2c_dev(i2c_dev);
+ cdev_del(&i2c_dev->cdev);
+error_cdev:
+ put_i2c_dev(i2c_dev);
return res;
}
@@ -582,7 +592,8 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
- return_i2c_dev(i2c_dev);
+ cdev_del(&i2c_dev->cdev);
+ put_i2c_dev(i2c_dev);
device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
@@ -620,7 +631,7 @@ static int __init i2c_dev_init(void)
printk(KERN_INFO "i2c /dev entries driver\n");
- res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops);
+ res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c");
if (res)
goto out;
@@ -644,7 +655,7 @@ static int __init i2c_dev_init(void)
out_unreg_class:
class_destroy(i2c_dev_class);
out_unreg_chrdev:
- unregister_chrdev(I2C_MAJOR, "i2c");
+ unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
out:
printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
return res;
@@ -655,7 +666,7 @@ static void __exit i2c_dev_exit(void)
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
i2c_for_each_dev(NULL, i2cdev_detach_adapter);
class_destroy(i2c_dev_class);
- unregister_chrdev(I2C_MAJOR, "i2c");
+ unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d4022878b..8eee98634 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -31,30 +31,66 @@
struct i2c_mux_priv {
struct i2c_adapter adap;
struct i2c_algorithm algo;
-
- struct i2c_adapter *parent;
- struct device *mux_dev;
- void *mux_priv;
+ struct i2c_mux_core *muxc;
u32 chan_id;
-
- int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
- int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
};
+static int __i2c_mux_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct i2c_mux_priv *priv = adap->algo_data;
+ struct i2c_mux_core *muxc = priv->muxc;
+ struct i2c_adapter *parent = muxc->parent;
+ int ret;
+
+ /* Switch to the right mux port and perform the transfer. */
+
+ ret = muxc->select(muxc, priv->chan_id);
+ if (ret >= 0)
+ ret = __i2c_transfer(parent, msgs, num);
+ if (muxc->deselect)
+ muxc->deselect(muxc, priv->chan_id);
+
+ return ret;
+}
+
static int i2c_mux_master_xfer(struct i2c_adapter *adap,
struct i2c_msg msgs[], int num)
{
struct i2c_mux_priv *priv = adap->algo_data;
- struct i2c_adapter *parent = priv->parent;
+ struct i2c_mux_core *muxc = priv->muxc;
+ struct i2c_adapter *parent = muxc->parent;
int ret;
/* Switch to the right mux port and perform the transfer. */
- ret = priv->select(parent, priv->mux_priv, priv->chan_id);
+ ret = muxc->select(muxc, priv->chan_id);
if (ret >= 0)
- ret = __i2c_transfer(parent, msgs, num);
- if (priv->deselect)
- priv->deselect(parent, priv->mux_priv, priv->chan_id);
+ ret = i2c_transfer(parent, msgs, num);
+ if (muxc->deselect)
+ muxc->deselect(muxc, priv->chan_id);
+
+ return ret;
+}
+
+static int __i2c_mux_smbus_xfer(struct i2c_adapter *adap,
+ u16 addr, unsigned short flags,
+ char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct i2c_mux_priv *priv = adap->algo_data;
+ struct i2c_mux_core *muxc = priv->muxc;
+ struct i2c_adapter *parent = muxc->parent;
+ int ret;
+
+ /* Select the right mux port and perform the transfer. */
+
+ ret = muxc->select(muxc, priv->chan_id);
+ if (ret >= 0)
+ ret = parent->algo->smbus_xfer(parent, addr, flags,
+ read_write, command, size, data);
+ if (muxc->deselect)
+ muxc->deselect(muxc, priv->chan_id);
return ret;
}
@@ -65,17 +101,18 @@ static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
int size, union i2c_smbus_data *data)
{
struct i2c_mux_priv *priv = adap->algo_data;
- struct i2c_adapter *parent = priv->parent;
+ struct i2c_mux_core *muxc = priv->muxc;
+ struct i2c_adapter *parent = muxc->parent;
int ret;
/* Select the right mux port and perform the transfer. */
- ret = priv->select(parent, priv->mux_priv, priv->chan_id);
+ ret = muxc->select(muxc, priv->chan_id);
if (ret >= 0)
- ret = parent->algo->smbus_xfer(parent, addr, flags,
- read_write, command, size, data);
- if (priv->deselect)
- priv->deselect(parent, priv->mux_priv, priv->chan_id);
+ ret = i2c_smbus_xfer(parent, addr, flags,
+ read_write, command, size, data);
+ if (muxc->deselect)
+ muxc->deselect(muxc, priv->chan_id);
return ret;
}
@@ -84,7 +121,7 @@ static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
static u32 i2c_mux_functionality(struct i2c_adapter *adap)
{
struct i2c_mux_priv *priv = adap->algo_data;
- struct i2c_adapter *parent = priv->parent;
+ struct i2c_adapter *parent = priv->muxc->parent;
return parent->algo->functionality(parent);
}
@@ -102,38 +139,167 @@ static unsigned int i2c_mux_parent_classes(struct i2c_adapter *parent)
return class;
}
-struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
- struct device *mux_dev,
- void *mux_priv, u32 force_nr, u32 chan_id,
- unsigned int class,
- int (*select) (struct i2c_adapter *,
- void *, u32),
- int (*deselect) (struct i2c_adapter *,
- void *, u32))
+static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_mux_priv *priv = adapter->algo_data;
+ struct i2c_adapter *parent = priv->muxc->parent;
+
+ rt_mutex_lock(&parent->mux_lock);
+ if (!(flags & I2C_LOCK_ROOT_ADAPTER))
+ return;
+ i2c_lock_bus(parent, flags);
+}
+
+static int i2c_mux_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_mux_priv *priv = adapter->algo_data;
+ struct i2c_adapter *parent = priv->muxc->parent;
+
+ if (!rt_mutex_trylock(&parent->mux_lock))
+ return 0; /* mux_lock not locked, failure */
+ if (!(flags & I2C_LOCK_ROOT_ADAPTER))
+ return 1; /* we only want mux_lock, success */
+ if (parent->trylock_bus(parent, flags))
+ return 1; /* parent locked too, success */
+ rt_mutex_unlock(&parent->mux_lock);
+ return 0; /* parent not locked, failure */
+}
+
+static void i2c_mux_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_mux_priv *priv = adapter->algo_data;
+ struct i2c_adapter *parent = priv->muxc->parent;
+
+ if (flags & I2C_LOCK_ROOT_ADAPTER)
+ i2c_unlock_bus(parent, flags);
+ rt_mutex_unlock(&parent->mux_lock);
+}
+
+static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct i2c_mux_priv *priv = adapter->algo_data;
+ struct i2c_adapter *parent = priv->muxc->parent;
+
+ rt_mutex_lock(&parent->mux_lock);
+ i2c_lock_bus(parent, flags);
+}
+
+static int i2c_parent_trylock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct i2c_mux_priv *priv = adapter->algo_data;
+ struct i2c_adapter *parent = priv->muxc->parent;
+
+ if (!rt_mutex_trylock(&parent->mux_lock))
+ return 0; /* mux_lock not locked, failure */
+ if (parent->trylock_bus(parent, flags))
+ return 1; /* parent locked too, success */
+ rt_mutex_unlock(&parent->mux_lock);
+ return 0; /* parent not locked, failure */
+}
+
+static void i2c_parent_unlock_bus(struct i2c_adapter *adapter,
+ unsigned int flags)
+{
+ struct i2c_mux_priv *priv = adapter->algo_data;
+ struct i2c_adapter *parent = priv->muxc->parent;
+
+ i2c_unlock_bus(parent, flags);
+ rt_mutex_unlock(&parent->mux_lock);
+}
+
+struct i2c_adapter *i2c_root_adapter(struct device *dev)
+{
+ struct device *i2c;
+ struct i2c_adapter *i2c_root;
+
+ /*
+ * Walk up the device tree to find an i2c adapter, indicating
+ * that this is an i2c client device. Check all ancestors to
+ * handle mfd devices etc.
+ */
+ for (i2c = dev; i2c; i2c = i2c->parent) {
+ if (i2c->type == &i2c_adapter_type)
+ break;
+ }
+ if (!i2c)
+ return NULL;
+
+ /* Continue up the tree to find the root i2c adapter */
+ i2c_root = to_i2c_adapter(i2c);
+ while (i2c_parent_is_i2c_adapter(i2c_root))
+ i2c_root = i2c_parent_is_i2c_adapter(i2c_root);
+
+ return i2c_root;
+}
+EXPORT_SYMBOL_GPL(i2c_root_adapter);
+
+struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
+ struct device *dev, int max_adapters,
+ int sizeof_priv, u32 flags,
+ int (*select)(struct i2c_mux_core *, u32),
+ int (*deselect)(struct i2c_mux_core *, u32))
+{
+ struct i2c_mux_core *muxc;
+
+ muxc = devm_kzalloc(dev, sizeof(*muxc)
+ + max_adapters * sizeof(muxc->adapter[0])
+ + sizeof_priv, GFP_KERNEL);
+ if (!muxc)
+ return NULL;
+ if (sizeof_priv)
+ muxc->priv = &muxc->adapter[max_adapters];
+
+ muxc->parent = parent;
+ muxc->dev = dev;
+ if (flags & I2C_MUX_LOCKED)
+ muxc->mux_locked = true;
+ muxc->select = select;
+ muxc->deselect = deselect;
+ muxc->max_adapters = max_adapters;
+
+ return muxc;
+}
+EXPORT_SYMBOL_GPL(i2c_mux_alloc);
+
+int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
+ u32 force_nr, u32 chan_id,
+ unsigned int class)
{
+ struct i2c_adapter *parent = muxc->parent;
struct i2c_mux_priv *priv;
char symlink_name[20];
int ret;
- priv = kzalloc(sizeof(struct i2c_mux_priv), GFP_KERNEL);
+ if (muxc->num_adapters >= muxc->max_adapters) {
+ dev_err(muxc->dev, "No room for more i2c-mux adapters\n");
+ return -EINVAL;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
- return NULL;
+ return -ENOMEM;
/* Set up private adapter data */
- priv->parent = parent;
- priv->mux_dev = mux_dev;
- priv->mux_priv = mux_priv;
+ priv->muxc = muxc;
priv->chan_id = chan_id;
- priv->select = select;
- priv->deselect = deselect;
/* Need to do algo dynamically because we don't know ahead
* of time what sort of physical adapter we'll be dealing with.
*/
- if (parent->algo->master_xfer)
- priv->algo.master_xfer = i2c_mux_master_xfer;
- if (parent->algo->smbus_xfer)
- priv->algo.smbus_xfer = i2c_mux_smbus_xfer;
+ if (parent->algo->master_xfer) {
+ if (muxc->mux_locked)
+ priv->algo.master_xfer = i2c_mux_master_xfer;
+ else
+ priv->algo.master_xfer = __i2c_mux_master_xfer;
+ }
+ if (parent->algo->smbus_xfer) {
+ if (muxc->mux_locked)
+ priv->algo.smbus_xfer = i2c_mux_smbus_xfer;
+ else
+ priv->algo.smbus_xfer = __i2c_mux_smbus_xfer;
+ }
priv->algo.functionality = i2c_mux_functionality;
/* Now fill out new adapter structure */
@@ -146,6 +312,15 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
priv->adap.retries = parent->retries;
priv->adap.timeout = parent->timeout;
priv->adap.quirks = parent->quirks;
+ if (muxc->mux_locked) {
+ priv->adap.lock_bus = i2c_mux_lock_bus;
+ priv->adap.trylock_bus = i2c_mux_trylock_bus;
+ priv->adap.unlock_bus = i2c_mux_unlock_bus;
+ } else {
+ priv->adap.lock_bus = i2c_parent_lock_bus;
+ priv->adap.trylock_bus = i2c_parent_trylock_bus;
+ priv->adap.unlock_bus = i2c_parent_unlock_bus;
+ }
/* Sanity check on class */
if (i2c_mux_parent_classes(parent) & class)
@@ -159,11 +334,11 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
* Try to populate the mux adapter's of_node, expands to
* nothing if !CONFIG_OF.
*/
- if (mux_dev->of_node) {
+ if (muxc->dev->of_node) {
struct device_node *child;
u32 reg;
- for_each_child_of_node(mux_dev->of_node, child) {
+ for_each_child_of_node(muxc->dev->of_node, child) {
ret = of_property_read_u32(child, "reg", &reg);
if (ret)
continue;
@@ -177,8 +352,9 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
/*
* Associate the mux channel with an ACPI node.
*/
- if (has_acpi_companion(mux_dev))
- acpi_preset_companion(&priv->adap.dev, ACPI_COMPANION(mux_dev),
+ if (has_acpi_companion(muxc->dev))
+ acpi_preset_companion(&priv->adap.dev,
+ ACPI_COMPANION(muxc->dev),
chan_id);
if (force_nr) {
@@ -192,35 +368,45 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
"failed to add mux-adapter (error=%d)\n",
ret);
kfree(priv);
- return NULL;
+ return ret;
}
- WARN(sysfs_create_link(&priv->adap.dev.kobj, &mux_dev->kobj, "mux_device"),
- "can't create symlink to mux device\n");
+ WARN(sysfs_create_link(&priv->adap.dev.kobj, &muxc->dev->kobj,
+ "mux_device"),
+ "can't create symlink to mux device\n");
snprintf(symlink_name, sizeof(symlink_name), "channel-%u", chan_id);
- WARN(sysfs_create_link(&mux_dev->kobj, &priv->adap.dev.kobj, symlink_name),
- "can't create symlink for channel %u\n", chan_id);
+ WARN(sysfs_create_link(&muxc->dev->kobj, &priv->adap.dev.kobj,
+ symlink_name),
+ "can't create symlink for channel %u\n", chan_id);
dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
i2c_adapter_id(&priv->adap));
- return &priv->adap;
+ muxc->adapter[muxc->num_adapters++] = &priv->adap;
+ return 0;
}
-EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
+EXPORT_SYMBOL_GPL(i2c_mux_add_adapter);
-void i2c_del_mux_adapter(struct i2c_adapter *adap)
+void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
{
- struct i2c_mux_priv *priv = adap->algo_data;
char symlink_name[20];
- snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
- sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);
+ while (muxc->num_adapters) {
+ struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
+ struct i2c_mux_priv *priv = adap->algo_data;
+
+ muxc->adapter[muxc->num_adapters] = NULL;
+
+ snprintf(symlink_name, sizeof(symlink_name),
+ "channel-%u", priv->chan_id);
+ sysfs_remove_link(&muxc->dev->kobj, symlink_name);
- sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
- i2c_del_adapter(adap);
- kfree(priv);
+ sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
+ i2c_del_adapter(adap);
+ kfree(priv);
+ }
}
-EXPORT_SYMBOL_GPL(i2c_del_mux_adapter);
+EXPORT_SYMBOL_GPL(i2c_mux_del_adapters);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("I2C driver for multiplexed I2C busses");
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 402e3a6c6..a90bbc403 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -28,8 +28,6 @@
/**
* struct i2c_arbitrator_data - Driver data for I2C arbitrator
*
- * @parent: Parent adapter
- * @child: Child bus
* @our_gpio: GPIO we'll use to claim.
* @our_gpio_release: 0 if active high; 1 if active low; AKA if the GPIO ==
* this then consider it released.
@@ -42,8 +40,6 @@
*/
struct i2c_arbitrator_data {
- struct i2c_adapter *parent;
- struct i2c_adapter *child;
int our_gpio;
int our_gpio_release;
int their_gpio;
@@ -59,9 +55,9 @@ struct i2c_arbitrator_data {
*
* Use the GPIO-based signalling protocol; return -EBUSY if we fail.
*/
-static int i2c_arbitrator_select(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_arbitrator_select(struct i2c_mux_core *muxc, u32 chan)
{
- const struct i2c_arbitrator_data *arb = data;
+ const struct i2c_arbitrator_data *arb = i2c_mux_priv(muxc);
unsigned long stop_retry, stop_time;
/* Start a round of trying to claim the bus */
@@ -93,7 +89,7 @@ static int i2c_arbitrator_select(struct i2c_adapter *adap, void *data, u32 chan)
/* Give up, release our claim */
gpio_set_value(arb->our_gpio, arb->our_gpio_release);
udelay(arb->slew_delay_us);
- dev_err(&adap->dev, "Could not claim bus, timeout\n");
+ dev_err(muxc->dev, "Could not claim bus, timeout\n");
return -EBUSY;
}
@@ -102,10 +98,9 @@ static int i2c_arbitrator_select(struct i2c_adapter *adap, void *data, u32 chan)
*
* Release the I2C bus using the GPIO-based signalling protocol.
*/
-static int i2c_arbitrator_deselect(struct i2c_adapter *adap, void *data,
- u32 chan)
+static int i2c_arbitrator_deselect(struct i2c_mux_core *muxc, u32 chan)
{
- const struct i2c_arbitrator_data *arb = data;
+ const struct i2c_arbitrator_data *arb = i2c_mux_priv(muxc);
/* Release the bus and wait for the other master to notice */
gpio_set_value(arb->our_gpio, arb->our_gpio_release);
@@ -119,6 +114,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *parent_np;
+ struct i2c_mux_core *muxc;
struct i2c_arbitrator_data *arb;
enum of_gpio_flags gpio_flags;
unsigned long out_init;
@@ -134,12 +130,13 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
return -EINVAL;
}
- arb = devm_kzalloc(dev, sizeof(*arb), GFP_KERNEL);
- if (!arb) {
- dev_err(dev, "Cannot allocate i2c_arbitrator_data\n");
+ muxc = i2c_mux_alloc(NULL, dev, 1, sizeof(*arb), 0,
+ i2c_arbitrator_select, i2c_arbitrator_deselect);
+ if (!muxc)
return -ENOMEM;
- }
- platform_set_drvdata(pdev, arb);
+ arb = i2c_mux_priv(muxc);
+
+ platform_set_drvdata(pdev, muxc);
/* Request GPIOs */
ret = of_get_named_gpio_flags(np, "our-claim-gpio", 0, &gpio_flags);
@@ -196,21 +193,18 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
dev_err(dev, "Cannot parse i2c-parent\n");
return -EINVAL;
}
- arb->parent = of_get_i2c_adapter_by_node(parent_np);
+ muxc->parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
- if (!arb->parent) {
+ if (!muxc->parent) {
dev_err(dev, "Cannot find parent bus\n");
return -EPROBE_DEFER;
}
/* Actually add the mux adapter */
- arb->child = i2c_add_mux_adapter(arb->parent, dev, arb, 0, 0, 0,
- i2c_arbitrator_select,
- i2c_arbitrator_deselect);
- if (!arb->child) {
+ ret = i2c_mux_add_adapter(muxc, 0, 0, 0);
+ if (ret) {
dev_err(dev, "Failed to add adapter\n");
- ret = -ENODEV;
- i2c_put_adapter(arb->parent);
+ i2c_put_adapter(muxc->parent);
}
return ret;
@@ -218,11 +212,10 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
static int i2c_arbitrator_remove(struct platform_device *pdev)
{
- struct i2c_arbitrator_data *arb = platform_get_drvdata(pdev);
-
- i2c_del_mux_adapter(arb->child);
- i2c_put_adapter(arb->parent);
+ struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
+ i2c_mux_del_adapters(muxc);
+ i2c_put_adapter(muxc->parent);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index b8e11c16d..e5cf26eef 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -15,11 +15,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
+#include "../../gpio/gpiolib.h"
#include <linux/of_gpio.h>
struct gpiomux {
- struct i2c_adapter *parent;
- struct i2c_adapter **adap; /* child busses */
struct i2c_mux_gpio_platform_data data;
unsigned gpio_base;
};
@@ -33,18 +32,18 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
val & (1 << i));
}
-static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_select(struct i2c_mux_core *muxc, u32 chan)
{
- struct gpiomux *mux = data;
+ struct gpiomux *mux = i2c_mux_priv(muxc);
i2c_mux_gpio_set(mux, chan);
return 0;
}
-static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+static int i2c_mux_gpio_deselect(struct i2c_mux_core *muxc, u32 chan)
{
- struct gpiomux *mux = data;
+ struct gpiomux *mux = i2c_mux_priv(muxc);
i2c_mux_gpio_set(mux, mux->data.idle);
@@ -136,19 +135,16 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
static int i2c_mux_gpio_probe(struct platform_device *pdev)
{
+ struct i2c_mux_core *muxc;
struct gpiomux *mux;
struct i2c_adapter *parent;
- int (*deselect) (struct i2c_adapter *, void *, u32);
+ struct i2c_adapter *root;
unsigned initial_state, gpio_base;
int i, ret;
mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
- if (!mux) {
- dev_err(&pdev->dev, "Cannot allocate gpiomux structure");
+ if (!mux)
return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, mux);
if (!dev_get_platdata(&pdev->dev)) {
ret = i2c_mux_gpio_probe_dt(mux, pdev);
@@ -180,27 +176,32 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
if (!parent)
return -EPROBE_DEFER;
- mux->parent = parent;
- mux->gpio_base = gpio_base;
-
- mux->adap = devm_kzalloc(&pdev->dev,
- sizeof(*mux->adap) * mux->data.n_values,
- GFP_KERNEL);
- if (!mux->adap) {
- dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
+ muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
+ i2c_mux_gpio_select, NULL);
+ if (!muxc) {
ret = -ENOMEM;
goto alloc_failed;
}
+ muxc->priv = mux;
+
+ platform_set_drvdata(pdev, muxc);
+
+ root = i2c_root_adapter(&parent->dev);
+
+ muxc->mux_locked = true;
+ mux->gpio_base = gpio_base;
if (mux->data.idle != I2C_MUX_GPIO_NO_IDLE) {
initial_state = mux->data.idle;
- deselect = i2c_mux_gpio_deselect;
+ muxc->deselect = i2c_mux_gpio_deselect;
} else {
initial_state = mux->data.values[0];
- deselect = NULL;
}
for (i = 0; i < mux->data.n_gpios; i++) {
+ struct device *gpio_dev;
+ struct gpio_desc *gpio_desc;
+
ret = gpio_request(gpio_base + mux->data.gpios[i], "i2c-mux-gpio");
if (ret) {
dev_err(&pdev->dev, "Failed to request GPIO %d\n",
@@ -217,17 +218,24 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
i++; /* gpio_request above succeeded, so must free */
goto err_request_gpio;
}
+
+ if (!muxc->mux_locked)
+ continue;
+
+ gpio_desc = gpio_to_desc(gpio_base + mux->data.gpios[i]);
+ gpio_dev = &gpio_desc->gdev->dev;
+ muxc->mux_locked = i2c_root_adapter(gpio_dev) == root;
}
+ if (muxc->mux_locked)
+ dev_info(&pdev->dev, "mux-locked i2c mux\n");
+
for (i = 0; i < mux->data.n_values; i++) {
u32 nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
unsigned int class = mux->data.classes ? mux->data.classes[i] : 0;
- mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr,
- mux->data.values[i], class,
- i2c_mux_gpio_select, deselect);
- if (!mux->adap[i]) {
- ret = -ENODEV;
+ ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
+ if (ret) {
dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
goto add_adapter_failed;
}
@@ -239,8 +247,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
return 0;
add_adapter_failed:
- for (; i > 0; i--)
- i2c_del_mux_adapter(mux->adap[i - 1]);
+ i2c_mux_del_adapters(muxc);
i = mux->data.n_gpios;
err_request_gpio:
for (; i > 0; i--)
@@ -253,16 +260,16 @@ alloc_failed:
static int i2c_mux_gpio_remove(struct platform_device *pdev)
{
- struct gpiomux *mux = platform_get_drvdata(pdev);
+ struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
+ struct gpiomux *mux = i2c_mux_priv(muxc);
int i;
- for (i = 0; i < mux->data.n_values; i++)
- i2c_del_mux_adapter(mux->adap[i]);
+ i2c_mux_del_adapters(muxc);
for (i = 0; i < mux->data.n_gpios; i++)
gpio_free(mux->gpio_base + mux->data.gpios[i]);
- i2c_put_adapter(mux->parent);
+ i2c_put_adapter(muxc->parent);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index d0ba424ad..3cb8af635 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -73,7 +73,7 @@
#define SELECT_DELAY_LONG 1000
struct pca9541 {
- struct i2c_adapter *mux_adap;
+ struct i2c_client *client;
unsigned long select_timeout;
unsigned long arb_timeout;
};
@@ -217,7 +217,8 @@ static const u8 pca9541_control[16] = {
*/
static int pca9541_arbitrate(struct i2c_client *client)
{
- struct pca9541 *data = i2c_get_clientdata(client);
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct pca9541 *data = i2c_mux_priv(muxc);
int reg;
reg = pca9541_reg_read(client, PCA9541_CONTROL);
@@ -285,9 +286,10 @@ static int pca9541_arbitrate(struct i2c_client *client)
return 0;
}
-static int pca9541_select_chan(struct i2c_adapter *adap, void *client, u32 chan)
+static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
- struct pca9541 *data = i2c_get_clientdata(client);
+ struct pca9541 *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
int ret;
unsigned long timeout = jiffies + ARB2_TIMEOUT;
/* give up after this time */
@@ -309,9 +311,11 @@ static int pca9541_select_chan(struct i2c_adapter *adap, void *client, u32 chan)
return -ETIMEDOUT;
}
-static int pca9541_release_chan(struct i2c_adapter *adap,
- void *client, u32 chan)
+static int pca9541_release_chan(struct i2c_mux_core *muxc, u32 chan)
{
+ struct pca9541 *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
+
pca9541_release_bus(client);
return 0;
}
@@ -324,20 +328,13 @@ static int pca9541_probe(struct i2c_client *client,
{
struct i2c_adapter *adap = client->adapter;
struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
+ struct i2c_mux_core *muxc;
struct pca9541 *data;
int force;
- int ret = -ENODEV;
+ int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE_DATA))
- goto err;
-
- data = kzalloc(sizeof(struct pca9541), GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
- goto err;
- }
-
- i2c_set_clientdata(client, data);
+ return -ENODEV;
/*
* I2C accesses are unprotected here.
@@ -352,34 +349,33 @@ static int pca9541_probe(struct i2c_client *client,
force = 0;
if (pdata)
force = pdata->modes[0].adap_id;
- data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
- force, 0, 0,
- pca9541_select_chan,
- pca9541_release_chan);
+ muxc = i2c_mux_alloc(adap, &client->dev, 1, sizeof(*data), 0,
+ pca9541_select_chan, pca9541_release_chan);
+ if (!muxc)
+ return -ENOMEM;
- if (data->mux_adap == NULL) {
+ data = i2c_mux_priv(muxc);
+ data->client = client;
+
+ i2c_set_clientdata(client, muxc);
+
+ ret = i2c_mux_add_adapter(muxc, force, 0, 0);
+ if (ret) {
dev_err(&client->dev, "failed to register master selector\n");
- goto exit_free;
+ return ret;
}
dev_info(&client->dev, "registered master selector for I2C %s\n",
client->name);
return 0;
-
-exit_free:
- kfree(data);
-err:
- return ret;
}
static int pca9541_remove(struct i2c_client *client)
{
- struct pca9541 *data = i2c_get_clientdata(client);
-
- i2c_del_mux_adapter(data->mux_adap);
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
- kfree(data);
+ i2c_mux_del_adapters(muxc);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index acfcef3d4..528e755c4 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -60,9 +60,10 @@ enum pca_type {
struct pca954x {
enum pca_type type;
- struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS];
u8 last_chan; /* last register value */
+ u8 deselect;
+ struct i2c_client *client;
};
struct chip_desc {
@@ -146,10 +147,10 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
return ret;
}
-static int pca954x_select_chan(struct i2c_adapter *adap,
- void *client, u32 chan)
+static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
- struct pca954x *data = i2c_get_clientdata(client);
+ struct pca954x *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
const struct chip_desc *chip = &chips[data->type];
u8 regval;
int ret = 0;
@@ -162,21 +163,24 @@ static int pca954x_select_chan(struct i2c_adapter *adap,
/* Only select the channel if its different from the last channel */
if (data->last_chan != regval) {
- ret = pca954x_reg_write(adap, client, regval);
+ ret = pca954x_reg_write(muxc->parent, client, regval);
data->last_chan = regval;
}
return ret;
}
-static int pca954x_deselect_mux(struct i2c_adapter *adap,
- void *client, u32 chan)
+static int pca954x_deselect_mux(struct i2c_mux_core *muxc, u32 chan)
{
- struct pca954x *data = i2c_get_clientdata(client);
+ struct pca954x *data = i2c_mux_priv(muxc);
+ struct i2c_client *client = data->client;
+
+ if (!(data->deselect & (1 << chan)))
+ return 0;
/* Deselect active channel */
data->last_chan = 0;
- return pca954x_reg_write(adap, client, data->last_chan);
+ return pca954x_reg_write(muxc->parent, client, data->last_chan);
}
/*
@@ -191,17 +195,22 @@ static int pca954x_probe(struct i2c_client *client,
bool idle_disconnect_dt;
struct gpio_desc *gpio;
int num, force, class;
+ struct i2c_mux_core *muxc;
struct pca954x *data;
int ret;
if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_BYTE))
return -ENODEV;
- data = devm_kzalloc(&client->dev, sizeof(struct pca954x), GFP_KERNEL);
- if (!data)
+ muxc = i2c_mux_alloc(adap, &client->dev,
+ PCA954X_MAX_NCHANS, sizeof(*data), 0,
+ pca954x_select_chan, pca954x_deselect_mux);
+ if (!muxc)
return -ENOMEM;
+ data = i2c_mux_priv(muxc);
- i2c_set_clientdata(client, data);
+ i2c_set_clientdata(client, muxc);
+ data->client = client;
/* Get the mux out of reset if a reset GPIO is specified. */
gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_LOW);
@@ -238,16 +247,13 @@ static int pca954x_probe(struct i2c_client *client,
/* discard unconfigured channels */
break;
idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
+ data->deselect |= (idle_disconnect_pd
+ || idle_disconnect_dt) << num;
}
- data->virt_adaps[num] =
- i2c_add_mux_adapter(adap, &client->dev, client,
- force, num, class, pca954x_select_chan,
- (idle_disconnect_pd || idle_disconnect_dt)
- ? pca954x_deselect_mux : NULL);
+ ret = i2c_mux_add_adapter(muxc, force, num, class);
- if (data->virt_adaps[num] == NULL) {
- ret = -ENODEV;
+ if (ret) {
dev_err(&client->dev,
"failed to register multiplexed adapter"
" %d as bus %d\n", num, force);
@@ -263,23 +269,15 @@ static int pca954x_probe(struct i2c_client *client,
return 0;
virt_reg_failed:
- for (num--; num >= 0; num--)
- i2c_del_mux_adapter(data->virt_adaps[num]);
+ i2c_mux_del_adapters(muxc);
return ret;
}
static int pca954x_remove(struct i2c_client *client)
{
- struct pca954x *data = i2c_get_clientdata(client);
- const struct chip_desc *chip = &chips[data->type];
- int i;
-
- for (i = 0; i < chip->nchans; ++i)
- if (data->virt_adaps[i]) {
- i2c_del_mux_adapter(data->virt_adaps[i]);
- data->virt_adaps[i] = NULL;
- }
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ i2c_mux_del_adapters(muxc);
return 0;
}
@@ -287,7 +285,8 @@ static int pca954x_remove(struct i2c_client *client)
static int pca954x_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct pca954x *data = i2c_get_clientdata(client);
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
+ struct pca954x *data = i2c_mux_priv(muxc);
data->last_chan = 0;
return i2c_smbus_write_byte(client, 0);
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index b5a982ba8..35bb775e1 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -24,36 +24,32 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include "../../pinctrl/core.h"
struct i2c_mux_pinctrl {
- struct device *dev;
struct i2c_mux_pinctrl_platform_data *pdata;
struct pinctrl *pinctrl;
struct pinctrl_state **states;
struct pinctrl_state *state_idle;
- struct i2c_adapter *parent;
- struct i2c_adapter **busses;
};
-static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
- u32 chan)
+static int i2c_mux_pinctrl_select(struct i2c_mux_core *muxc, u32 chan)
{
- struct i2c_mux_pinctrl *mux = data;
+ struct i2c_mux_pinctrl *mux = i2c_mux_priv(muxc);
return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
}
-static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
- u32 chan)
+static int i2c_mux_pinctrl_deselect(struct i2c_mux_core *muxc, u32 chan)
{
- struct i2c_mux_pinctrl *mux = data;
+ struct i2c_mux_pinctrl *mux = i2c_mux_priv(muxc);
return pinctrl_select_state(mux->pinctrl, mux->state_idle);
}
#ifdef CONFIG_OF
static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
- struct platform_device *pdev)
+ struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int num_names, i, ret;
@@ -64,15 +60,12 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
return 0;
mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
- if (!mux->pdata) {
- dev_err(mux->dev,
- "Cannot allocate i2c_mux_pinctrl_platform_data\n");
+ if (!mux->pdata)
return -ENOMEM;
- }
num_names = of_property_count_strings(np, "pinctrl-names");
if (num_names < 0) {
- dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+ dev_err(&pdev->dev, "Cannot parse pinctrl-names: %d\n",
num_names);
return num_names;
}
@@ -80,23 +73,22 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
sizeof(*mux->pdata->pinctrl_states) * num_names,
GFP_KERNEL);
- if (!mux->pdata->pinctrl_states) {
- dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+ if (!mux->pdata->pinctrl_states)
return -ENOMEM;
- }
for (i = 0; i < num_names; i++) {
ret = of_property_read_string_index(np, "pinctrl-names", i,
&mux->pdata->pinctrl_states[mux->pdata->bus_count]);
if (ret < 0) {
- dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+ dev_err(&pdev->dev, "Cannot parse pinctrl-names: %d\n",
ret);
return ret;
}
if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
"idle")) {
if (i != num_names - 1) {
- dev_err(mux->dev, "idle state must be last\n");
+ dev_err(&pdev->dev,
+ "idle state must be last\n");
return -EINVAL;
}
mux->pdata->pinctrl_state_idle = "idle";
@@ -107,13 +99,13 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
adapter_np = of_parse_phandle(np, "i2c-parent", 0);
if (!adapter_np) {
- dev_err(mux->dev, "Cannot parse i2c-parent\n");
+ dev_err(&pdev->dev, "Cannot parse i2c-parent\n");
return -ENODEV;
}
adapter = of_find_i2c_adapter_by_node(adapter_np);
of_node_put(adapter_np);
if (!adapter) {
- dev_err(mux->dev, "Cannot find parent bus\n");
+ dev_err(&pdev->dev, "Cannot find parent bus\n");
return -EPROBE_DEFER;
}
mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
@@ -129,21 +121,38 @@ static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
}
#endif
+static struct i2c_adapter *i2c_mux_pinctrl_root_adapter(
+ struct pinctrl_state *state)
+{
+ struct i2c_adapter *root = NULL;
+ struct pinctrl_setting *setting;
+ struct i2c_adapter *pin_root;
+
+ list_for_each_entry(setting, &state->settings, node) {
+ pin_root = i2c_root_adapter(setting->pctldev->dev);
+ if (!pin_root)
+ return NULL;
+ if (!root)
+ root = pin_root;
+ else if (root != pin_root)
+ return NULL;
+ }
+
+ return root;
+}
+
static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
{
+ struct i2c_mux_core *muxc;
struct i2c_mux_pinctrl *mux;
- int (*deselect)(struct i2c_adapter *, void *, u32);
+ struct i2c_adapter *root;
int i, ret;
mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
if (!mux) {
- dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
ret = -ENOMEM;
goto err;
}
- platform_set_drvdata(pdev, mux);
-
- mux->dev = &pdev->dev;
mux->pdata = dev_get_platdata(&pdev->dev);
if (!mux->pdata) {
@@ -166,14 +175,15 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
goto err;
}
- mux->busses = devm_kzalloc(&pdev->dev,
- sizeof(*mux->busses) * mux->pdata->bus_count,
- GFP_KERNEL);
- if (!mux->busses) {
- dev_err(&pdev->dev, "Cannot allocate busses\n");
+ muxc = i2c_mux_alloc(NULL, &pdev->dev, mux->pdata->bus_count, 0, 0,
+ i2c_mux_pinctrl_select, NULL);
+ if (!muxc) {
ret = -ENOMEM;
goto err;
}
+ muxc->priv = mux;
+
+ platform_set_drvdata(pdev, muxc);
mux->pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR(mux->pinctrl)) {
@@ -184,13 +194,13 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
for (i = 0; i < mux->pdata->bus_count; i++) {
mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
mux->pdata->pinctrl_states[i]);
- if (IS_ERR(mux->states[i])) {
- ret = PTR_ERR(mux->states[i]);
- dev_err(&pdev->dev,
- "Cannot look up pinctrl state %s: %d\n",
- mux->pdata->pinctrl_states[i], ret);
- goto err;
- }
+ if (IS_ERR(mux->states[i])) {
+ ret = PTR_ERR(mux->states[i]);
+ dev_err(&pdev->dev,
+ "Cannot look up pinctrl state %s: %d\n",
+ mux->pdata->pinctrl_states[i], ret);
+ goto err;
+ }
}
if (mux->pdata->pinctrl_state_idle) {
mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
@@ -203,29 +213,39 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
goto err;
}
- deselect = i2c_mux_pinctrl_deselect;
- } else {
- deselect = NULL;
+ muxc->deselect = i2c_mux_pinctrl_deselect;
}
- mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
- if (!mux->parent) {
+ muxc->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+ if (!muxc->parent) {
dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
mux->pdata->parent_bus_num);
ret = -EPROBE_DEFER;
goto err;
}
+ root = i2c_root_adapter(&muxc->parent->dev);
+
+ muxc->mux_locked = true;
+ for (i = 0; i < mux->pdata->bus_count; i++) {
+ if (root != i2c_mux_pinctrl_root_adapter(mux->states[i])) {
+ muxc->mux_locked = false;
+ break;
+ }
+ }
+ if (muxc->mux_locked && mux->pdata->pinctrl_state_idle &&
+ root != i2c_mux_pinctrl_root_adapter(mux->state_idle))
+ muxc->mux_locked = false;
+
+ if (muxc->mux_locked)
+ dev_info(&pdev->dev, "mux-locked i2c mux\n");
+
for (i = 0; i < mux->pdata->bus_count; i++) {
u32 bus = mux->pdata->base_bus_num ?
(mux->pdata->base_bus_num + i) : 0;
- mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
- mux, bus, i, 0,
- i2c_mux_pinctrl_select,
- deselect);
- if (!mux->busses[i]) {
- ret = -ENODEV;
+ ret = i2c_mux_add_adapter(muxc, bus, i, 0);
+ if (ret) {
dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
goto err_del_adapter;
}
@@ -234,23 +254,18 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
return 0;
err_del_adapter:
- for (; i > 0; i--)
- i2c_del_mux_adapter(mux->busses[i - 1]);
- i2c_put_adapter(mux->parent);
+ i2c_mux_del_adapters(muxc);
+ i2c_put_adapter(muxc->parent);
err:
return ret;
}
static int i2c_mux_pinctrl_remove(struct platform_device *pdev)
{
- struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < mux->pdata->bus_count; i++)
- i2c_del_mux_adapter(mux->busses[i]);
-
- i2c_put_adapter(mux->parent);
+ struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
+ i2c_mux_del_adapters(muxc);
+ i2c_put_adapter(muxc->parent);
return 0;
}
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c
index 5fbd5bd08..c6a90b4a9 100644
--- a/drivers/i2c/muxes/i2c-mux-reg.c
+++ b/drivers/i2c/muxes/i2c-mux-reg.c
@@ -21,8 +21,6 @@
#include <linux/slab.h>
struct regmux {
- struct i2c_adapter *parent;
- struct i2c_adapter **adap; /* child busses */
struct i2c_mux_reg_platform_data data;
};
@@ -64,18 +62,16 @@ static int i2c_mux_reg_set(const struct regmux *mux, unsigned int chan_id)
return 0;
}
-static int i2c_mux_reg_select(struct i2c_adapter *adap, void *data,
- unsigned int chan)
+static int i2c_mux_reg_select(struct i2c_mux_core *muxc, u32 chan)
{
- struct regmux *mux = data;
+ struct regmux *mux = i2c_mux_priv(muxc);
return i2c_mux_reg_set(mux, chan);
}
-static int i2c_mux_reg_deselect(struct i2c_adapter *adap, void *data,
- unsigned int chan)
+static int i2c_mux_reg_deselect(struct i2c_mux_core *muxc, u32 chan)
{
- struct regmux *mux = data;
+ struct regmux *mux = i2c_mux_priv(muxc);
if (mux->data.idle_in_use)
return i2c_mux_reg_set(mux, mux->data.idle);
@@ -85,7 +81,7 @@ static int i2c_mux_reg_deselect(struct i2c_adapter *adap, void *data,
#ifdef CONFIG_OF
static int i2c_mux_reg_probe_dt(struct regmux *mux,
- struct platform_device *pdev)
+ struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *adapter_np, *child;
@@ -107,7 +103,6 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
if (!adapter)
return -EPROBE_DEFER;
- mux->parent = adapter;
mux->data.parent = i2c_adapter_id(adapter);
put_device(&adapter->dev);
@@ -150,7 +145,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
mux->data.idle_in_use = true;
/* map address from "reg" if exists */
- if (of_address_to_resource(np, 0, &res)) {
+ if (of_address_to_resource(np, 0, &res) == 0) {
mux->data.reg_size = resource_size(&res);
mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
if (IS_ERR(mux->data.reg))
@@ -161,7 +156,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
}
#else
static int i2c_mux_reg_probe_dt(struct regmux *mux,
- struct platform_device *pdev)
+ struct platform_device *pdev)
{
return 0;
}
@@ -169,10 +164,10 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
static int i2c_mux_reg_probe(struct platform_device *pdev)
{
+ struct i2c_mux_core *muxc;
struct regmux *mux;
struct i2c_adapter *parent;
struct resource *res;
- int (*deselect)(struct i2c_adapter *, void *, u32);
unsigned int class;
int i, ret, nr;
@@ -180,17 +175,9 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
if (!mux)
return -ENOMEM;
- platform_set_drvdata(pdev, mux);
-
if (dev_get_platdata(&pdev->dev)) {
memcpy(&mux->data, dev_get_platdata(&pdev->dev),
sizeof(mux->data));
-
- parent = i2c_get_adapter(mux->data.parent);
- if (!parent)
- return -EPROBE_DEFER;
-
- mux->parent = parent;
} else {
ret = i2c_mux_reg_probe_dt(mux, pdev);
if (ret < 0) {
@@ -199,6 +186,10 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
}
}
+ parent = i2c_get_adapter(mux->data.parent);
+ if (!parent)
+ return -EPROBE_DEFER;
+
if (!mux->data.reg) {
dev_info(&pdev->dev,
"Register not set, using platform resource\n");
@@ -215,55 +206,45 @@ static int i2c_mux_reg_probe(struct platform_device *pdev)
return -EINVAL;
}
- mux->adap = devm_kzalloc(&pdev->dev,
- sizeof(*mux->adap) * mux->data.n_values,
- GFP_KERNEL);
- if (!mux->adap) {
- dev_err(&pdev->dev, "Cannot allocate i2c_adapter structure");
+ muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0,
+ i2c_mux_reg_select, NULL);
+ if (!muxc)
return -ENOMEM;
- }
+ muxc->priv = mux;
+
+ platform_set_drvdata(pdev, muxc);
if (mux->data.idle_in_use)
- deselect = i2c_mux_reg_deselect;
- else
- deselect = NULL;
+ muxc->deselect = i2c_mux_reg_deselect;
for (i = 0; i < mux->data.n_values; i++) {
nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0;
class = mux->data.classes ? mux->data.classes[i] : 0;
- mux->adap[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, mux,
- nr, mux->data.values[i],
- class, i2c_mux_reg_select,
- deselect);
- if (!mux->adap[i]) {
- ret = -ENODEV;
+ ret = i2c_mux_add_adapter(muxc, nr, mux->data.values[i], class);
+ if (ret) {
dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
goto add_adapter_failed;
}
}
dev_dbg(&pdev->dev, "%d port mux on %s adapter\n",
- mux->data.n_values, mux->parent->name);
+ mux->data.n_values, muxc->parent->name);
return 0;
add_adapter_failed:
- for (; i > 0; i--)
- i2c_del_mux_adapter(mux->adap[i - 1]);
+ i2c_mux_del_adapters(muxc);
return ret;
}
static int i2c_mux_reg_remove(struct platform_device *pdev)
{
- struct regmux *mux = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < mux->data.n_values; i++)
- i2c_del_mux_adapter(mux->adap[i]);
+ struct i2c_mux_core *muxc = platform_get_drvdata(pdev);
- i2c_put_adapter(mux->parent);
+ i2c_mux_del_adapters(muxc);
+ i2c_put_adapter(muxc->parent);
return 0;
}
@@ -279,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = {
.remove = i2c_mux_reg_remove,
.driver = {
.name = "i2c-mux-reg",
+ .of_match_table = of_match_ptr(i2c_mux_reg_of_match),
},
};
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 37a8a907f..05dbcce70 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -522,7 +522,7 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
static void update_flush(ide_drive_t *drive)
{
u16 *id = drive->id;
- unsigned flush = 0;
+ bool wc = false;
if (drive->dev_flags & IDE_DFLAG_WCACHE) {
unsigned long long capacity;
@@ -546,12 +546,12 @@ static void update_flush(ide_drive_t *drive)
drive->name, barrier ? "" : "not ");
if (barrier) {
- flush = REQ_FLUSH;
+ wc = true;
blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
}
}
- blk_queue_flush(drive->queue, flush);
+ blk_queue_write_cache(drive->queue, wc, false);
}
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index c6935de42..c96649292 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -766,6 +766,67 @@ static struct cpuidle_state knl_cstates[] = {
.enter = NULL }
};
+static struct cpuidle_state bxt_cstates[] = {
+ {
+ .name = "C1-BXT",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C1E-BXT",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01),
+ .exit_latency = 10,
+ .target_residency = 20,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C6-BXT",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 133,
+ .target_residency = 133,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C7s-BXT",
+ .desc = "MWAIT 0x31",
+ .flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 155,
+ .target_residency = 155,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C8-BXT",
+ .desc = "MWAIT 0x40",
+ .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 1000,
+ .target_residency = 1000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C9-BXT",
+ .desc = "MWAIT 0x50",
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 2000,
+ .target_residency = 2000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .name = "C10-BXT",
+ .desc = "MWAIT 0x60",
+ .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 10000,
+ .target_residency = 10000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
+ .enter = NULL }
+};
+
/**
* intel_idle
* @dev: cpuidle_device
@@ -950,6 +1011,11 @@ static const struct idle_cpu idle_cpu_knl = {
.state_table = knl_cstates,
};
+static const struct idle_cpu idle_cpu_bxt = {
+ .state_table = bxt_cstates,
+ .disable_promotion_to_c1e = true,
+};
+
#define ICPU(model, cpu) \
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
@@ -985,6 +1051,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(0x9e, idle_cpu_skl),
ICPU(0x55, idle_cpu_skx),
ICPU(0x57, idle_cpu_knl),
+ ICPU(0x5c, idle_cpu_bxt),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -1075,6 +1142,73 @@ static void ivt_idle_state_table_update(void)
/* else, 1 and 2 socket systems use default ivt_cstates */
}
+
+/*
+ * Translate IRTL (Interrupt Response Time Limit) MSR to usec
+ */
+
+static unsigned int irtl_ns_units[] = {
+ 1, 32, 1024, 32768, 1048576, 33554432, 0, 0 };
+
+static unsigned long long irtl_2_usec(unsigned long long irtl)
+{
+ unsigned long long ns;
+
+ ns = irtl_ns_units[(irtl >> 10) & 0x3];
+
+ return div64_u64((irtl & 0x3FF) * ns, 1000);
+}
+/*
+ * bxt_idle_state_table_update(void)
+ *
+ * On BXT, we trust the IRTL to show the definitive maximum latency
+ * We use the same value for target_residency.
+ */
+static void bxt_idle_state_table_update(void)
+{
+ unsigned long long msr;
+
+ rdmsrl(MSR_PKGC6_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[2].exit_latency = usec;
+ bxt_cstates[2].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC7_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[3].exit_latency = usec;
+ bxt_cstates[3].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC8_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[4].exit_latency = usec;
+ bxt_cstates[4].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC9_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[5].exit_latency = usec;
+ bxt_cstates[5].target_residency = usec;
+ }
+
+ rdmsrl(MSR_PKGC10_IRTL, msr);
+ if (msr) {
+ unsigned int usec = irtl_2_usec(msr);
+
+ bxt_cstates[6].exit_latency = usec;
+ bxt_cstates[6].target_residency = usec;
+ }
+
+}
/*
* sklh_idle_state_table_update(void)
*
@@ -1130,6 +1264,9 @@ static void intel_idle_state_table_update(void)
case 0x3e: /* IVT */
ivt_idle_state_table_update();
break;
+ case 0x5c: /* BXT */
+ bxt_idle_state_table_update();
+ break;
case 0x5e: /* SKL-H */
sklh_idle_state_table_update();
break;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index b0d3ecf33..e4a758cd7 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -64,7 +64,7 @@ config IIO_ST_ACCEL_3AXIS
help
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
- LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12.
+ LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL.
This driver can also be built as a module. If so, these modules
will be created:
@@ -143,7 +143,8 @@ config MMA8452
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the following Freescale 3-axis
- accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC.
+ accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC,
+ FXLS8471Q.
To compile this driver as a module, choose M here: the module
will be called mma8452.
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 2072a31e8..197e693e7 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -138,6 +137,7 @@ enum bmc150_accel_axis {
AXIS_X,
AXIS_Y,
AXIS_Z,
+ AXIS_MAX,
};
enum bmc150_power_modes {
@@ -188,7 +188,6 @@ enum bmc150_accel_trigger_id {
struct bmc150_accel_data {
struct regmap *regmap;
- struct device *dev;
int irq;
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
atomic_t active_intr;
@@ -246,16 +245,18 @@ static const struct {
{500000, BMC150_ACCEL_SLEEP_500_MS},
{1000000, BMC150_ACCEL_SLEEP_1_SEC} };
-static const struct regmap_config bmc150_i2c_regmap_conf = {
+const struct regmap_config bmc150_regmap_conf = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x3f,
};
+EXPORT_SYMBOL_GPL(bmc150_regmap_conf);
static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
enum bmc150_power_modes mode,
int dur_us)
{
+ struct device *dev = regmap_get_device(data->regmap);
int i;
int ret;
u8 lpw_bits;
@@ -279,11 +280,11 @@ static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT;
lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT);
- dev_dbg(data->dev, "Set Mode bits %x\n", lpw_bits);
+ dev_dbg(dev, "Set Mode bits %x\n", lpw_bits);
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_pmu_lpw\n");
+ dev_err(dev, "Error writing reg_pmu_lpw\n");
return ret;
}
@@ -316,23 +317,24 @@ static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
static int bmc150_accel_update_slope(struct bmc150_accel_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6,
data->slope_thres);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_int_6\n");
+ dev_err(dev, "Error writing reg_int_6\n");
return ret;
}
ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5,
BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur);
if (ret < 0) {
- dev_err(data->dev, "Error updating reg_int_5\n");
+ dev_err(dev, "Error updating reg_int_5\n");
return ret;
}
- dev_dbg(data->dev, "%s: %x %x\n", __func__, data->slope_thres,
+ dev_dbg(dev, "%s: %x %x\n", __func__, data->slope_thres,
data->slope_dur);
return ret;
@@ -378,20 +380,21 @@ static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
if (on) {
- ret = pm_runtime_get_sync(data->dev);
+ ret = pm_runtime_get_sync(dev);
} else {
- pm_runtime_mark_last_busy(data->dev);
- ret = pm_runtime_put_autosuspend(data->dev);
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
}
if (ret < 0) {
- dev_err(data->dev,
+ dev_err(dev,
"Failed: bmc150_accel_set_power_state for %d\n", on);
if (on)
- pm_runtime_put_noidle(data->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -445,6 +448,7 @@ static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
bool state)
{
+ struct device *dev = regmap_get_device(data->regmap);
struct bmc150_accel_interrupt *intr = &data->interrupts[i];
const struct bmc150_accel_interrupt_info *info = intr->info;
int ret;
@@ -474,7 +478,7 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask,
(state ? info->map_bitmask : 0));
if (ret < 0) {
- dev_err(data->dev, "Error updating reg_int_map\n");
+ dev_err(dev, "Error updating reg_int_map\n");
goto out_fix_power_state;
}
@@ -482,7 +486,7 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask,
(state ? info->en_bitmask : 0));
if (ret < 0) {
- dev_err(data->dev, "Error updating reg_int_en\n");
+ dev_err(dev, "Error updating reg_int_en\n");
goto out_fix_power_state;
}
@@ -500,6 +504,7 @@ out_fix_power_state:
static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret, i;
for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) {
@@ -508,8 +513,7 @@ static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
BMC150_ACCEL_REG_PMU_RANGE,
data->chip_info->scale_table[i].reg_range);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing pmu_range\n");
+ dev_err(dev, "Error writing pmu_range\n");
return ret;
}
@@ -523,6 +527,7 @@ static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
unsigned int value;
@@ -530,7 +535,7 @@ static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_temp\n");
+ dev_err(dev, "Error reading reg_temp\n");
mutex_unlock(&data->mutex);
return ret;
}
@@ -545,6 +550,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
struct iio_chan_spec const *chan,
int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
int axis = chan->scan_index;
__le16 raw_val;
@@ -559,7 +565,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
&raw_val, sizeof(raw_val));
if (ret < 0) {
- dev_err(data->dev, "Error reading axis %d\n", axis);
+ dev_err(dev, "Error reading axis %d\n", axis);
bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
@@ -831,6 +837,7 @@ static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val)
static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
char *buffer, int samples)
{
+ struct device *dev = regmap_get_device(data->regmap);
int sample_length = 3 * 2;
int ret;
int total_length = samples * sample_length;
@@ -854,7 +861,8 @@ static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
}
if (ret)
- dev_err(data->dev, "Error transferring data from fifo in single steps of %zu\n",
+ dev_err(dev,
+ "Error transferring data from fifo in single steps of %zu\n",
step);
return ret;
@@ -864,6 +872,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
unsigned samples, bool irq)
{
struct bmc150_accel_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
int ret, i;
u8 count;
u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
@@ -873,7 +882,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_fifo_status\n");
+ dev_err(dev, "Error reading reg_fifo_status\n");
return ret;
}
@@ -1105,27 +1114,23 @@ static const struct iio_info bmc150_accel_info_fifo = {
.driver_module = THIS_MODULE,
};
+static const unsigned long bmc150_accel_scan_masks[] = {
+ BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+ 0};
+
static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmc150_accel_data *data = iio_priv(indio_dev);
- int bit, ret, i = 0;
- unsigned int raw_val;
+ int ret;
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- ret = regmap_bulk_read(data->regmap,
- BMC150_ACCEL_AXIS_TO_REG(bit), &raw_val,
- 2);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
- goto err_read;
- }
- data->buffer[i++] = raw_val;
- }
+ ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_REG_XOUT_L,
+ data->buffer, AXIS_MAX * 2);
mutex_unlock(&data->mutex);
+ if (ret < 0)
+ goto err_read;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
pf->timestamp);
@@ -1139,6 +1144,7 @@ static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
{
struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
struct bmc150_accel_data *data = t->data;
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
/* new data interrupts don't need ack */
@@ -1152,8 +1158,7 @@ static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
BMC150_ACCEL_INT_MODE_LATCH_RESET);
mutex_unlock(&data->mutex);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_int_rst_latch\n");
+ dev_err(dev, "Error writing reg_int_rst_latch\n");
return ret;
}
@@ -1204,13 +1209,14 @@ static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
{
struct bmc150_accel_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
int dir;
int ret;
unsigned int val;
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_int_status_2\n");
+ dev_err(dev, "Error reading reg_int_status_2\n");
return ret;
}
@@ -1253,6 +1259,7 @@ static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct bmc150_accel_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
bool ack = false;
int ret;
@@ -1276,7 +1283,7 @@ static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
BMC150_ACCEL_INT_MODE_LATCH_INT |
BMC150_ACCEL_INT_MODE_LATCH_RESET);
if (ret)
- dev_err(data->dev, "Error writing reg_int_rst_latch\n");
+ dev_err(dev, "Error writing reg_int_rst_latch\n");
ret = IRQ_HANDLED;
} else {
@@ -1347,13 +1354,14 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
struct bmc150_accel_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int i, ret;
for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
struct bmc150_accel_trigger *t = &data->triggers[i];
- t->indio_trig = devm_iio_trigger_alloc(data->dev,
- bmc150_accel_triggers[i].name,
+ t->indio_trig = devm_iio_trigger_alloc(dev,
+ bmc150_accel_triggers[i].name,
indio_dev->name,
indio_dev->id);
if (!t->indio_trig) {
@@ -1361,7 +1369,7 @@ static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
break;
}
- t->indio_trig->dev.parent = data->dev;
+ t->indio_trig->dev.parent = dev;
t->indio_trig->ops = &bmc150_accel_trigger_ops;
t->intr = bmc150_accel_triggers[i].intr;
t->data = data;
@@ -1385,12 +1393,13 @@ static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1;
int ret;
ret = regmap_write(data->regmap, reg, data->fifo_mode);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_fifo_config1\n");
+ dev_err(dev, "Error writing reg_fifo_config1\n");
return ret;
}
@@ -1400,7 +1409,7 @@ static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0,
data->watermark);
if (ret < 0)
- dev_err(data->dev, "Error writing reg_fifo_config0\n");
+ dev_err(dev, "Error writing reg_fifo_config0\n");
return ret;
}
@@ -1484,17 +1493,17 @@ static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = {
static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret, i;
unsigned int val;
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
if (ret < 0) {
- dev_err(data->dev,
- "Error: Reading chip id\n");
+ dev_err(dev, "Error: Reading chip id\n");
return ret;
}
- dev_dbg(data->dev, "Chip Id %x\n", val);
+ dev_dbg(dev, "Chip Id %x\n", val);
for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
if (bmc150_accel_chip_info_tbl[i].chip_id == val) {
data->chip_info = &bmc150_accel_chip_info_tbl[i];
@@ -1503,7 +1512,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
}
if (!data->chip_info) {
- dev_err(data->dev, "Invalid chip %x\n", val);
+ dev_err(dev, "Invalid chip %x\n", val);
return -ENODEV;
}
@@ -1520,8 +1529,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE,
BMC150_ACCEL_DEF_RANGE_4G);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_pmu_range\n");
+ dev_err(dev, "Error writing reg_pmu_range\n");
return ret;
}
@@ -1539,8 +1547,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
BMC150_ACCEL_INT_MODE_LATCH_INT |
BMC150_ACCEL_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_int_rst_latch\n");
+ dev_err(dev, "Error writing reg_int_rst_latch\n");
return ret;
}
@@ -1560,7 +1567,6 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
data = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
- data->dev = dev;
data->irq = irq;
data->regmap = regmap;
@@ -1575,6 +1581,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
indio_dev->channels = data->chip_info->channels;
indio_dev->num_channels = data->chip_info->num_channels;
indio_dev->name = name ? name : data->chip_info->name;
+ indio_dev->available_scan_masks = bmc150_accel_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmc150_accel_info;
@@ -1583,13 +1590,13 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
bmc150_accel_trigger_handler,
&bmc150_accel_buffer_ops);
if (ret < 0) {
- dev_err(data->dev, "Failed: iio triggered buffer setup\n");
+ dev_err(dev, "Failed: iio triggered buffer setup\n");
return ret;
}
if (data->irq > 0) {
ret = devm_request_threaded_irq(
- data->dev, data->irq,
+ dev, data->irq,
bmc150_accel_irq_handler,
bmc150_accel_irq_thread_handler,
IRQF_TRIGGER_RISING,
@@ -1607,7 +1614,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
BMC150_ACCEL_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_int_rst_latch\n");
+ dev_err(dev, "Error writing reg_int_rst_latch\n");
goto err_buffer_cleanup;
}
@@ -1656,9 +1663,9 @@ int bmc150_accel_core_remove(struct device *dev)
iio_device_unregister(indio_dev);
- pm_runtime_disable(data->dev);
- pm_runtime_set_suspended(data->dev);
- pm_runtime_put_noidle(data->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
@@ -1707,7 +1714,7 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
int ret;
- dev_dbg(data->dev, __func__);
+ dev_dbg(dev, __func__);
ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
if (ret < 0)
return -EAGAIN;
@@ -1722,7 +1729,7 @@ static int bmc150_accel_runtime_resume(struct device *dev)
int ret;
int sleep_val;
- dev_dbg(data->dev, __func__);
+ dev_dbg(dev, __func__);
ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
if (ret < 0)
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index b41404ba3..8ca804126 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -28,11 +28,6 @@
#include "bmc150-accel.h"
-static const struct regmap_config bmc150_i2c_regmap_conf = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
static int bmc150_accel_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -43,7 +38,7 @@ static int bmc150_accel_probe(struct i2c_client *client,
i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK);
- regmap = devm_regmap_init_i2c(client, &bmc150_i2c_regmap_conf);
+ regmap = devm_regmap_init_i2c(client, &bmc150_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to initialize i2c regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 16b66f2a7..006794a70 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -25,18 +25,12 @@
#include "bmc150-accel.h"
-static const struct regmap_config bmc150_spi_regmap_conf = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x3f,
-};
-
static int bmc150_accel_probe(struct spi_device *spi)
{
struct regmap *regmap;
const struct spi_device_id *id = spi_get_device_id(spi);
- regmap = devm_regmap_init_spi(spi, &bmc150_spi_regmap_conf);
+ regmap = devm_regmap_init_spi(spi, &bmc150_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Failed to initialize spi regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
index ba0335987..38a8b11f8 100644
--- a/drivers/iio/accel/bmc150-accel.h
+++ b/drivers/iio/accel/bmc150-accel.h
@@ -16,5 +16,6 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name, bool block_supported);
int bmc150_accel_core_remove(struct device *dev);
extern const struct dev_pm_ops bmc150_accel_pm_ops;
+extern const struct regmap_config bmc150_regmap_conf;
#endif /* _BMC150_ACCEL_H_ */
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index edec1d099..bfe219a8b 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -115,6 +114,7 @@ enum kxcjk1013_axis {
AXIS_X,
AXIS_Y,
AXIS_Z,
+ AXIS_MAX,
};
enum kxcjk1013_mode {
@@ -922,7 +922,7 @@ static const struct iio_event_spec kxcjk1013_event = {
.realbits = 12, \
.storagebits = 16, \
.shift = 4, \
- .endianness = IIO_CPU, \
+ .endianness = IIO_LE, \
}, \
.event_spec = &kxcjk1013_event, \
.num_event_specs = 1 \
@@ -953,25 +953,23 @@ static const struct iio_info kxcjk1013_info = {
.driver_module = THIS_MODULE,
};
+static const unsigned long kxcjk1013_scan_masks[] = {0x7, 0};
+
static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct kxcjk1013_data *data = iio_priv(indio_dev);
- int bit, ret, i = 0;
+ int ret;
mutex_lock(&data->mutex);
-
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- ret = kxcjk1013_get_acc_reg(data, bit);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
- goto err;
- }
- data->buffer[i++] = ret;
- }
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client,
+ KXCJK1013_REG_XOUT_L,
+ AXIS_MAX * 2,
+ (u8 *)data->buffer);
mutex_unlock(&data->mutex);
+ if (ret < 0)
+ goto err;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
data->timestamp);
@@ -1204,6 +1202,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->channels = kxcjk1013_channels;
indio_dev->num_channels = ARRAY_SIZE(kxcjk1013_channels);
+ indio_dev->available_scan_masks = kxcjk1013_scan_masks;
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &kxcjk1013_info;
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index c633cc2c0..c902f54c2 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -55,11 +55,11 @@
struct mma7455_data {
struct regmap *regmap;
- struct device *dev;
};
static int mma7455_drdy(struct mma7455_data *mma7455)
{
+ struct device *dev = regmap_get_device(mma7455->regmap);
unsigned int reg;
int tries = 3;
int ret;
@@ -75,7 +75,7 @@ static int mma7455_drdy(struct mma7455_data *mma7455)
msleep(20);
}
- dev_warn(mma7455->dev, "data not ready\n");
+ dev_warn(dev, "data not ready\n");
return -EIO;
}
@@ -260,7 +260,6 @@ int mma7455_core_probe(struct device *dev, struct regmap *regmap,
dev_set_drvdata(dev, indio_dev);
mma7455 = iio_priv(indio_dev);
mma7455->regmap = regmap;
- mma7455->dev = dev;
indio_dev->info = &mma7455_info;
indio_dev->name = name;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 7f4994f32..e225d3c53 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -6,6 +6,7 @@
* MMA8453Q (10 bit)
* MMA8652FC (12 bit)
* MMA8653FC (10 bit)
+ * FXLS8471Q (14 bit)
*
* Copyright 2015 Martin Kepplinger <martin.kepplinger@theobroma-systems.com>
* Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
@@ -16,7 +17,7 @@
*
* 7-bit I2C slave address 0x1c/0x1d (pin selectable)
*
- * TODO: orientation events, autosleep
+ * TODO: orientation events
*/
#include <linux/module.h>
@@ -31,6 +32,7 @@
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
#define MMA8452_STATUS 0x00
#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
@@ -91,6 +93,9 @@
#define MMA8453_DEVICE_ID 0x3a
#define MMA8652_DEVICE_ID 0x4a
#define MMA8653_DEVICE_ID 0x5a
+#define FXLS8471_DEVICE_ID 0x6a
+
+#define MMA8452_AUTO_SUSPEND_DELAY_MS 2000
struct mma8452_data {
struct i2c_client *client;
@@ -172,6 +177,31 @@ static int mma8452_drdy(struct mma8452_data *data)
return -EIO;
}
+static int mma8452_set_runtime_pm_state(struct i2c_client *client, bool on)
+{
+#ifdef CONFIG_PM
+ int ret;
+
+ if (on) {
+ ret = pm_runtime_get_sync(&client->dev);
+ } else {
+ pm_runtime_mark_last_busy(&client->dev);
+ ret = pm_runtime_put_autosuspend(&client->dev);
+ }
+
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "failed to change power state to %d\n", on);
+ if (on)
+ pm_runtime_put_noidle(&client->dev);
+
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+
static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
{
int ret = mma8452_drdy(data);
@@ -179,8 +209,16 @@ static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
if (ret < 0)
return ret;
- return i2c_smbus_read_i2c_block_data(data->client, MMA8452_OUT_X,
- 3 * sizeof(__be16), (u8 *)buf);
+ ret = mma8452_set_runtime_pm_state(data->client, true);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_i2c_block_data(data->client, MMA8452_OUT_X,
+ 3 * sizeof(__be16), (u8 *)buf);
+
+ ret = mma8452_set_runtime_pm_state(data->client, false);
+
+ return ret;
}
static ssize_t mma8452_show_int_plus_micros(char *buf, const int (*vals)[2],
@@ -357,7 +395,8 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
ret = i2c_smbus_read_byte_data(data->client,
- MMA8452_OFF_X + chan->scan_index);
+ MMA8452_OFF_X +
+ chan->scan_index);
if (ret < 0)
return ret;
@@ -392,24 +431,47 @@ static int mma8452_active(struct mma8452_data *data)
data->ctrl_reg1);
}
+/* returns >0 if active, 0 if in standby and <0 on error */
+static int mma8452_is_active(struct mma8452_data *data)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(data->client, MMA8452_CTRL_REG1);
+ if (reg < 0)
+ return reg;
+
+ return reg & MMA8452_CTRL_ACTIVE;
+}
+
static int mma8452_change_config(struct mma8452_data *data, u8 reg, u8 val)
{
int ret;
+ int is_active;
mutex_lock(&data->lock);
- /* config can only be changed when in standby */
- ret = mma8452_standby(data);
- if (ret < 0)
+ is_active = mma8452_is_active(data);
+ if (is_active < 0) {
+ ret = is_active;
goto fail;
+ }
+
+ /* config can only be changed when in standby */
+ if (is_active > 0) {
+ ret = mma8452_standby(data);
+ if (ret < 0)
+ goto fail;
+ }
ret = i2c_smbus_write_byte_data(data->client, reg, val);
if (ret < 0)
goto fail;
- ret = mma8452_active(data);
- if (ret < 0)
- goto fail;
+ if (is_active > 0) {
+ ret = mma8452_active(data);
+ if (ret < 0)
+ goto fail;
+ }
ret = 0;
fail:
@@ -418,7 +480,7 @@ fail:
return ret;
}
-/* returns >0 if in freefall mode, 0 if not or <0 if an error occured */
+/* returns >0 if in freefall mode, 0 if not or <0 if an error occurred */
static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
{
int val;
@@ -668,7 +730,8 @@ static int mma8452_read_event_config(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift));
+ return !!(ret & BIT(chan->scan_index +
+ chip->ev_cfg_chan_shift));
default:
return -EINVAL;
}
@@ -682,7 +745,11 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
{
struct mma8452_data *data = iio_priv(indio_dev);
const struct mma_chip_info *chip = data->chip_info;
- int val;
+ int val, ret;
+
+ ret = mma8452_set_runtime_pm_state(data->client, state);
+ if (ret)
+ return ret;
switch (dir) {
case IIO_EV_DIR_FALLING:
@@ -990,6 +1057,7 @@ enum {
mma8453,
mma8652,
mma8653,
+ fxls8471,
};
static const struct mma_chip_info mma_chip_info_table[] = {
@@ -1003,7 +1071,7 @@ static const struct mma_chip_info mma_chip_info_table[] = {
* bit.
* The userspace interface uses m/s^2 and we declare micro units
* So scale factor for 12 bit here is given by:
- * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
*/
.mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
.ev_cfg = MMA8452_TRANSIENT_CFG,
@@ -1081,6 +1149,22 @@ static const struct mma_chip_info mma_chip_info_table[] = {
.ev_ths_mask = MMA8452_FF_MT_THS_MASK,
.ev_count = MMA8452_FF_MT_COUNT,
},
+ [fxls8471] = {
+ .chip_id = FXLS8471_DEVICE_ID,
+ .channels = mma8451_channels,
+ .num_channels = ARRAY_SIZE(mma8451_channels),
+ .mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
+ .ev_cfg = MMA8452_TRANSIENT_CFG,
+ .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
+ .ev_cfg_chan_shift = 1,
+ .ev_src = MMA8452_TRANSIENT_SRC,
+ .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
+ .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
+ .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
+ .ev_ths = MMA8452_TRANSIENT_THS,
+ .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
+ .ev_count = MMA8452_TRANSIENT_COUNT,
+ },
};
static struct attribute *mma8452_attributes[] = {
@@ -1114,7 +1198,11 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct mma8452_data *data = iio_priv(indio_dev);
- int reg;
+ int reg, ret;
+
+ ret = mma8452_set_runtime_pm_state(data->client, state);
+ if (ret)
+ return ret;
reg = i2c_smbus_read_byte_data(data->client, MMA8452_CTRL_REG4);
if (reg < 0)
@@ -1206,6 +1294,7 @@ static const struct of_device_id mma8452_dt_ids[] = {
{ .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] },
{ .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] },
{ .compatible = "fsl,mma8653", .data = &mma_chip_info_table[mma8653] },
+ { .compatible = "fsl,fxls8471", .data = &mma_chip_info_table[fxls8471] },
{ }
};
MODULE_DEVICE_TABLE(of, mma8452_dt_ids);
@@ -1243,6 +1332,7 @@ static int mma8452_probe(struct i2c_client *client,
case MMA8453_DEVICE_ID:
case MMA8652_DEVICE_ID:
case MMA8653_DEVICE_ID:
+ case FXLS8471_DEVICE_ID:
if (ret == data->chip_info->chip_id)
break;
default:
@@ -1340,6 +1430,15 @@ static int mma8452_probe(struct i2c_client *client,
goto buffer_cleanup;
}
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ MMA8452_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
ret = iio_device_register(indio_dev);
if (ret < 0)
goto buffer_cleanup;
@@ -1364,6 +1463,11 @@ static int mma8452_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
iio_triggered_buffer_cleanup(indio_dev);
mma8452_trigger_cleanup(indio_dev);
mma8452_standby(iio_priv(indio_dev));
@@ -1371,6 +1475,45 @@ static int mma8452_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int mma8452_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma8452_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = mma8452_standby(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "powering off device failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int mma8452_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma8452_data *data = iio_priv(indio_dev);
+ int ret, sleep_val;
+
+ ret = mma8452_active(data);
+ if (ret < 0)
+ return ret;
+
+ ret = mma8452_get_odr_index(data);
+ sleep_val = 1000 / mma8452_samp_freq[ret][0];
+ if (sleep_val < 20)
+ usleep_range(sleep_val * 1000, 20000);
+ else
+ msleep_interruptible(sleep_val);
+
+ return 0;
+}
+#endif
+
#ifdef CONFIG_PM_SLEEP
static int mma8452_suspend(struct device *dev)
{
@@ -1383,18 +1526,21 @@ static int mma8452_resume(struct device *dev)
return mma8452_active(iio_priv(i2c_get_clientdata(
to_i2c_client(dev))));
}
-
-static SIMPLE_DEV_PM_OPS(mma8452_pm_ops, mma8452_suspend, mma8452_resume);
-#define MMA8452_PM_OPS (&mma8452_pm_ops)
-#else
-#define MMA8452_PM_OPS NULL
#endif
+static const struct dev_pm_ops mma8452_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mma8452_suspend, mma8452_resume)
+ SET_RUNTIME_PM_OPS(mma8452_runtime_suspend,
+ mma8452_runtime_resume, NULL)
+};
+
static const struct i2c_device_id mma8452_id[] = {
+ { "mma8451", mma8451 },
{ "mma8452", mma8452 },
{ "mma8453", mma8453 },
{ "mma8652", mma8652 },
{ "mma8653", mma8653 },
+ { "fxls8471", fxls8471 },
{ }
};
MODULE_DEVICE_TABLE(i2c, mma8452_id);
@@ -1403,7 +1549,7 @@ static struct i2c_driver mma8452_driver = {
.driver = {
.name = "mma8452",
.of_match_table = of_match_ptr(mma8452_dt_ids),
- .pm = MMA8452_PM_OPS,
+ .pm = &mma8452_pm_ops,
},
.probe = mma8452_probe,
.remove = mma8452_remove,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index fa7d36217..bb05f3efd 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -17,7 +17,6 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index e72e218c2..c23f47af7 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -17,7 +17,6 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -380,31 +379,6 @@ static const struct iio_trigger_ops mxc4005_trigger_ops = {
.owner = THIS_MODULE,
};
-static int mxc4005_gpio_probe(struct i2c_client *client,
- struct mxc4005_data *data)
-{
- struct device *dev;
- struct gpio_desc *gpio;
- int ret;
-
- if (!client)
- return -EINVAL;
-
- dev = &client->dev;
-
- gpio = devm_gpiod_get_index(dev, "mxc4005_int", 0, GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(dev, "failed to get acpi gpio index\n");
- return PTR_ERR(gpio);
- }
-
- ret = gpiod_to_irq(gpio);
-
- dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
-
- return ret;
-}
-
static int mxc4005_chip_init(struct mxc4005_data *data)
{
int ret;
@@ -470,9 +444,6 @@ static int mxc4005_probe(struct i2c_client *client,
return ret;
}
- if (client->irq < 0)
- client->irq = mxc4005_gpio_probe(client, data);
-
if (client->irq > 0) {
data->dready_trig = devm_iio_trigger_alloc(&client->dev,
"%s-dev%d",
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 5d4a1897b..57f83a679 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/iio/common/st_sensors.h>
+#define H3LIS331DL_DRIVER_NAME "h3lis331dl_accel"
#define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel"
#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
#define LIS3DH_ACCEL_DEV_NAME "lis3dh"
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c
index a1e642ee1..7fddc137e 100644
--- a/drivers/iio/accel/st_accel_buffer.c
+++ b/drivers/iio/accel/st_accel_buffer.c
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
int st_accel_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
}
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index a03a1417d..4d95bfc47 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -39,6 +39,9 @@
#define ST_ACCEL_FS_AVL_6G 6
#define ST_ACCEL_FS_AVL_8G 8
#define ST_ACCEL_FS_AVL_16G 16
+#define ST_ACCEL_FS_AVL_100G 100
+#define ST_ACCEL_FS_AVL_200G 200
+#define ST_ACCEL_FS_AVL_400G 400
/* CUSTOM VALUES FOR SENSOR 1 */
#define ST_ACCEL_1_WAI_EXP 0x33
@@ -96,6 +99,8 @@
#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
+#define ST_ACCEL_2_OD_IRQ_ADDR 0x22
+#define ST_ACCEL_2_OD_IRQ_MASK 0x40
#define ST_ACCEL_2_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 3 */
@@ -177,10 +182,39 @@
#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
+#define ST_ACCEL_5_OD_IRQ_ADDR 0x22
+#define ST_ACCEL_5_OD_IRQ_MASK 0x40
#define ST_ACCEL_5_IG1_EN_ADDR 0x21
#define ST_ACCEL_5_IG1_EN_MASK 0x08
#define ST_ACCEL_5_MULTIREAD_BIT false
+/* CUSTOM VALUES FOR SENSOR 6 */
+#define ST_ACCEL_6_WAI_EXP 0x32
+#define ST_ACCEL_6_ODR_ADDR 0x20
+#define ST_ACCEL_6_ODR_MASK 0x18
+#define ST_ACCEL_6_ODR_AVL_50HZ_VAL 0x00
+#define ST_ACCEL_6_ODR_AVL_100HZ_VAL 0x01
+#define ST_ACCEL_6_ODR_AVL_400HZ_VAL 0x02
+#define ST_ACCEL_6_ODR_AVL_1000HZ_VAL 0x03
+#define ST_ACCEL_6_PW_ADDR 0x20
+#define ST_ACCEL_6_PW_MASK 0x20
+#define ST_ACCEL_6_FS_ADDR 0x23
+#define ST_ACCEL_6_FS_MASK 0x30
+#define ST_ACCEL_6_FS_AVL_100_VAL 0x00
+#define ST_ACCEL_6_FS_AVL_200_VAL 0x01
+#define ST_ACCEL_6_FS_AVL_400_VAL 0x03
+#define ST_ACCEL_6_FS_AVL_100_GAIN IIO_G_TO_M_S_2(49000)
+#define ST_ACCEL_6_FS_AVL_200_GAIN IIO_G_TO_M_S_2(98000)
+#define ST_ACCEL_6_FS_AVL_400_GAIN IIO_G_TO_M_S_2(195000)
+#define ST_ACCEL_6_BDU_ADDR 0x23
+#define ST_ACCEL_6_BDU_MASK 0x80
+#define ST_ACCEL_6_DRDY_IRQ_ADDR 0x22
+#define ST_ACCEL_6_DRDY_IRQ_INT1_MASK 0x02
+#define ST_ACCEL_6_DRDY_IRQ_INT2_MASK 0x10
+#define ST_ACCEL_6_IHL_IRQ_ADDR 0x22
+#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
+#define ST_ACCEL_6_MULTIREAD_BIT true
+
static const struct iio_chan_spec st_accel_8bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -302,6 +336,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
.mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
.bootime = 2,
@@ -367,6 +402,9 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
.mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
+ .addr_od = ST_ACCEL_2_OD_IRQ_ADDR,
+ .mask_od = ST_ACCEL_2_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
.bootime = 2,
@@ -444,6 +482,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
.mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
.ig1 = {
.en_addr = ST_ACCEL_3_IG1_EN_ADDR,
.en_mask = ST_ACCEL_3_IG1_EN_MASK,
@@ -502,6 +541,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.drdy_irq = {
.addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
.bootime = 2, /* guess */
@@ -553,10 +593,75 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
.mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
+ .addr_od = ST_ACCEL_5_OD_IRQ_ADDR,
+ .mask_od = ST_ACCEL_5_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
.bootime = 2, /* guess */
},
+ {
+ .wai = ST_ACCEL_6_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = H3LIS331DL_DRIVER_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_6_ODR_ADDR,
+ .mask = ST_ACCEL_6_ODR_MASK,
+ .odr_avl = {
+ { 50, ST_ACCEL_6_ODR_AVL_50HZ_VAL },
+ { 100, ST_ACCEL_6_ODR_AVL_100HZ_VAL, },
+ { 400, ST_ACCEL_6_ODR_AVL_400HZ_VAL, },
+ { 1000, ST_ACCEL_6_ODR_AVL_1000HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_6_PW_ADDR,
+ .mask = ST_ACCEL_6_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_6_FS_ADDR,
+ .mask = ST_ACCEL_6_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_100G,
+ .value = ST_ACCEL_6_FS_AVL_100_VAL,
+ .gain = ST_ACCEL_6_FS_AVL_100_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_200G,
+ .value = ST_ACCEL_6_FS_AVL_200_VAL,
+ .gain = ST_ACCEL_6_FS_AVL_200_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_400G,
+ .value = ST_ACCEL_6_FS_AVL_400_VAL,
+ .gain = ST_ACCEL_6_FS_AVL_400_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_6_BDU_ADDR,
+ .mask = ST_ACCEL_6_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_6_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_ACCEL_6_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_ACCEL_6_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_6_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_6_IHL_IRQ_MASK,
+ },
+ .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
+ .bootime = 2,
+ },
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -636,6 +741,7 @@ static const struct iio_info accel_info = {
static const struct iio_trigger_ops st_accel_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
#else
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 294a32f89..7333ee9fb 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -76,6 +76,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis2dh12-accel",
.data = LIS2DH12_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,h3lis331dl-accel",
+ .data = H3LIS331DL_DRIVER_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 85fe7f724..e31023dc5 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -11,7 +11,6 @@
*/
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 5709d9eb8..300d955ba 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -11,7 +11,6 @@
*/
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 82c718c51..25378c588 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -242,6 +242,16 @@ config LP8788_ADC
To compile this driver as a module, choose M here: the module will be
called lp8788_adc.
+config LPC18XX_ADC
+ tristate "NXP LPC18xx ADC driver"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ Say yes here to build support for NXP LPC18XX ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called lpc18xx_adc.
+
config MAX1027
tristate "Maxim max1027 ADC driver"
depends on SPI
@@ -375,11 +385,11 @@ config ROCKCHIP_SARADC
module will be called rockchip_saradc.
config TI_ADC081C
- tristate "Texas Instruments ADC081C021/027"
+ tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
help
- If you say yes here you get support for Texas Instruments ADC081C021
- and ADC081C027 ADC chips.
+ If you say yes here you get support for Texas Instruments ADC081C,
+ ADC101C and ADC121C ADC chips.
This driver can also be built as a module. If so, the module will be
called ti-adc081c.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 0cb79210a..38638d46f 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_HI8435) += hi8435.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
+obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 01d71588d..a3f5254f4 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -477,7 +477,7 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
*val = (ret >> chan->scan_type.shift) &
- GENMASK(chan->scan_type.realbits - 1 , 0);
+ GENMASK(chan->scan_type.realbits - 1, 0);
return IIO_VAL_INT;
}
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 2e154cb51..e10dca3ed 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -66,8 +66,10 @@
#define AT91_SAMA5D2_MR_PRESCAL(v) ((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET)
#define AT91_SAMA5D2_MR_PRESCAL_OFFSET 8
#define AT91_SAMA5D2_MR_PRESCAL_MAX 0xff
+#define AT91_SAMA5D2_MR_PRESCAL_MASK GENMASK(15, 8)
/* Startup Time */
#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16)
+#define AT91_SAMA5D2_MR_STARTUP_MASK GENMASK(19, 16)
/* Analog Change */
#define AT91_SAMA5D2_MR_ANACH BIT(23)
/* Tracking Time */
@@ -92,13 +94,13 @@
/* Last Converted Data Register */
#define AT91_SAMA5D2_LCDR 0x20
/* Interrupt Enable Register */
-#define AT91_SAMA5D2_IER 0x24
+#define AT91_SAMA5D2_IER 0x24
/* Interrupt Disable Register */
-#define AT91_SAMA5D2_IDR 0x28
+#define AT91_SAMA5D2_IDR 0x28
/* Interrupt Mask Register */
-#define AT91_SAMA5D2_IMR 0x2c
+#define AT91_SAMA5D2_IMR 0x2c
/* Interrupt Status Register */
-#define AT91_SAMA5D2_ISR 0x30
+#define AT91_SAMA5D2_ISR 0x30
/* Last Channel Trigger Mode Register */
#define AT91_SAMA5D2_LCTMR 0x34
/* Last Channel Compare Window Register */
@@ -106,17 +108,20 @@
/* Overrun Status Register */
#define AT91_SAMA5D2_OVER 0x3c
/* Extended Mode Register */
-#define AT91_SAMA5D2_EMR 0x40
+#define AT91_SAMA5D2_EMR 0x40
/* Compare Window Register */
-#define AT91_SAMA5D2_CWR 0x44
+#define AT91_SAMA5D2_CWR 0x44
/* Channel Gain Register */
-#define AT91_SAMA5D2_CGR 0x48
+#define AT91_SAMA5D2_CGR 0x48
+
/* Channel Offset Register */
-#define AT91_SAMA5D2_COR 0x4c
+#define AT91_SAMA5D2_COR 0x4c
+#define AT91_SAMA5D2_COR_DIFF_OFFSET 16
+
/* Channel Data Register 0 */
#define AT91_SAMA5D2_CDR0 0x50
/* Analog Control Register */
-#define AT91_SAMA5D2_ACR 0x94
+#define AT91_SAMA5D2_ACR 0x94
/* Touchscreen Mode Register */
#define AT91_SAMA5D2_TSMR 0xb0
/* Touchscreen X Position Register */
@@ -130,7 +135,7 @@
/* Correction Select Register */
#define AT91_SAMA5D2_COSR 0xd0
/* Correction Value Register */
-#define AT91_SAMA5D2_CVR 0xd4
+#define AT91_SAMA5D2_CVR 0xd4
/* Channel Error Correction Register */
#define AT91_SAMA5D2_CECR 0xd8
/* Write Protection Mode Register */
@@ -140,7 +145,7 @@
/* Version Register */
#define AT91_SAMA5D2_VERSION 0xfc
-#define AT91_AT91_SAMA5D2_CHAN(num, addr) \
+#define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
{ \
.type = IIO_VOLTAGE, \
.channel = num, \
@@ -156,6 +161,24 @@
.indexed = 1, \
}
+#define AT91_SAMA5D2_CHAN_DIFF(num, num2, addr) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .channel = num, \
+ .channel2 = num2, \
+ .address = addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .datasheet_name = "CH"#num"-CH"#num2, \
+ .indexed = 1, \
+ }
+
#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
@@ -185,18 +208,24 @@ struct at91_adc_state {
};
static const struct iio_chan_spec at91_adc_channels[] = {
- AT91_AT91_SAMA5D2_CHAN(0, 0x50),
- AT91_AT91_SAMA5D2_CHAN(1, 0x54),
- AT91_AT91_SAMA5D2_CHAN(2, 0x58),
- AT91_AT91_SAMA5D2_CHAN(3, 0x5c),
- AT91_AT91_SAMA5D2_CHAN(4, 0x60),
- AT91_AT91_SAMA5D2_CHAN(5, 0x64),
- AT91_AT91_SAMA5D2_CHAN(6, 0x68),
- AT91_AT91_SAMA5D2_CHAN(7, 0x6c),
- AT91_AT91_SAMA5D2_CHAN(8, 0x70),
- AT91_AT91_SAMA5D2_CHAN(9, 0x74),
- AT91_AT91_SAMA5D2_CHAN(10, 0x78),
- AT91_AT91_SAMA5D2_CHAN(11, 0x7c),
+ AT91_SAMA5D2_CHAN_SINGLE(0, 0x50),
+ AT91_SAMA5D2_CHAN_SINGLE(1, 0x54),
+ AT91_SAMA5D2_CHAN_SINGLE(2, 0x58),
+ AT91_SAMA5D2_CHAN_SINGLE(3, 0x5c),
+ AT91_SAMA5D2_CHAN_SINGLE(4, 0x60),
+ AT91_SAMA5D2_CHAN_SINGLE(5, 0x64),
+ AT91_SAMA5D2_CHAN_SINGLE(6, 0x68),
+ AT91_SAMA5D2_CHAN_SINGLE(7, 0x6c),
+ AT91_SAMA5D2_CHAN_SINGLE(8, 0x70),
+ AT91_SAMA5D2_CHAN_SINGLE(9, 0x74),
+ AT91_SAMA5D2_CHAN_SINGLE(10, 0x78),
+ AT91_SAMA5D2_CHAN_SINGLE(11, 0x7c),
+ AT91_SAMA5D2_CHAN_DIFF(0, 1, 0x50),
+ AT91_SAMA5D2_CHAN_DIFF(2, 3, 0x58),
+ AT91_SAMA5D2_CHAN_DIFF(4, 5, 0x60),
+ AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68),
+ AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70),
+ AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78),
};
static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -226,7 +255,7 @@ static unsigned at91_adc_startup_time(unsigned startup_time_min,
static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
{
struct iio_dev *indio_dev = iio_priv_to_dev(st);
- unsigned f_per, prescal, startup;
+ unsigned f_per, prescal, startup, mr;
f_per = clk_get_rate(st->per_clk);
prescal = (f_per / (2 * freq)) - 1;
@@ -234,10 +263,11 @@ static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
startup = at91_adc_startup_time(st->soc_info.startup_time,
freq / 1000);
- at91_adc_writel(st, AT91_SAMA5D2_MR,
- AT91_SAMA5D2_MR_TRANSFER(2)
- | AT91_SAMA5D2_MR_STARTUP(startup)
- | AT91_SAMA5D2_MR_PRESCAL(prescal));
+ mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
+ mr &= ~(AT91_SAMA5D2_MR_STARTUP_MASK | AT91_SAMA5D2_MR_PRESCAL_MASK);
+ mr |= AT91_SAMA5D2_MR_STARTUP(startup);
+ mr |= AT91_SAMA5D2_MR_PRESCAL(prescal);
+ at91_adc_writel(st, AT91_SAMA5D2_MR, mr);
dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
freq, startup, prescal);
@@ -278,6 +308,7 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ u32 cor = 0;
int ret;
switch (mask) {
@@ -286,6 +317,11 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
st->chan = chan;
+ if (chan->differential)
+ cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET;
+
+ at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
@@ -298,6 +334,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
if (ret > 0) {
*val = st->conversion_value;
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(*val, 11);
ret = IIO_VAL_INT;
st->conversion_done = false;
}
@@ -310,6 +348,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uv / 1000;
+ if (chan->differential)
+ *val *= 2;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -444,6 +484,12 @@ static int at91_adc_probe(struct platform_device *pdev)
at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
+ /*
+ * Transfer field must be set to 2 according to the datasheet and
+ * allows different analog settings for each channel.
+ */
+ at91_adc_writel(st, AT91_SAMA5D2_MR,
+ AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index f284cd6a9..52430ba17 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -797,8 +797,8 @@ static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
* Startup Time = <lookup_table_value> / ADC Clock
*/
const int startup_lookup[] = {
- 0 , 8 , 16 , 24 ,
- 64 , 80 , 96 , 112,
+ 0, 8, 16, 24,
+ 64, 80, 96, 112,
512, 576, 640, 704,
768, 832, 896, 960
};
@@ -924,14 +924,14 @@ static int at91_adc_probe_dt(struct at91_adc_state *st,
ret = -EINVAL;
goto error_ret;
}
- trig->name = name;
+ trig->name = name;
if (of_property_read_u32(trig_node, "trigger-value", &prop)) {
dev_err(&idev->dev, "Missing trigger-value property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- trig->value = prop;
+ trig->value = prop;
trig->is_external = of_property_read_bool(trig_node, "trigger-external");
i++;
}
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 65909d585..502f2fbe8 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -185,9 +185,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->address) {
case INA2XX_SHUNT_VOLTAGE:
- /* processed (mV) = raw*1000/shunt_div */
+ /* processed (mV) = raw/shunt_div */
*val2 = chip->config->shunt_div;
- *val = 1000;
+ *val = 1;
return IIO_VAL_FRACTIONAL;
case INA2XX_BUS_VOLTAGE:
@@ -350,6 +350,23 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
return len;
}
+/*
+ * Set current LSB to 1mA, shunt is in uOhms
+ * (equation 13 in datasheet). We hardcode a Current_LSB
+ * of 1.0 x10-6. The only remaining parameter is RShunt.
+ * There is no need to expose the CALIBRATION register
+ * to the user for now. But we need to reset this register
+ * if the user updates RShunt after driver init, e.g upon
+ * reading an EEPROM/Probe-type value.
+ */
+static int ina2xx_set_calibration(struct ina2xx_chip_info *chip)
+{
+ u16 regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
+ chip->shunt_resistor);
+
+ return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
+}
+
static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
{
if (val <= 0 || val > chip->config->calibration_factor)
@@ -385,6 +402,11 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
if (ret)
return ret;
+ /* Update the Calibration register */
+ ret = ina2xx_set_calibration(chip);
+ if (ret)
+ return ret;
+
return len;
}
@@ -602,24 +624,11 @@ static const struct iio_info ina2xx_info = {
/* Initialize the configuration and calibration registers. */
static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config)
{
- u16 regval;
- int ret;
-
- ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
+ int ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
if (ret)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet). We hardcode a Current_LSB
- * of 1.0 x10-6. The only remaining parameter is RShunt.
- * There is no need to expose the CALIBRATION register
- * to the user for now.
- */
- regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
- chip->shunt_resistor);
-
- return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
+ return ina2xx_set_calibration(chip);
}
static int ina2xx_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
new file mode 100644
index 000000000..3ef18f4b2
--- /dev/null
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -0,0 +1,231 @@
+/*
+ * IIO ADC driver for NXP LPC18xx ADC
+ *
+ * Copyright (C) 2016 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * UNSUPPORTED hardware features:
+ * - Hardware triggers
+ * - Burst mode
+ * - Interrupts
+ * - DMA
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+/* LPC18XX ADC registers and bits */
+#define LPC18XX_ADC_CR 0x000
+#define LPC18XX_ADC_CR_CLKDIV_SHIFT 8
+#define LPC18XX_ADC_CR_PDN BIT(21)
+#define LPC18XX_ADC_CR_START_NOW (0x1 << 24)
+#define LPC18XX_ADC_GDR 0x004
+
+/* Data register bits */
+#define LPC18XX_ADC_SAMPLE_SHIFT 6
+#define LPC18XX_ADC_SAMPLE_MASK 0x3ff
+#define LPC18XX_ADC_CONV_DONE BIT(31)
+
+/* Clock should be 4.5 MHz or less */
+#define LPC18XX_ADC_CLK_TARGET 4500000
+
+struct lpc18xx_adc {
+ struct regulator *vref;
+ void __iomem *base;
+ struct device *dev;
+ struct mutex lock;
+ struct clk *clk;
+ u32 cr_reg;
+};
+
+#define LPC18XX_ADC_CHAN(_idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _idx, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec lpc18xx_adc_iio_channels[] = {
+ LPC18XX_ADC_CHAN(0),
+ LPC18XX_ADC_CHAN(1),
+ LPC18XX_ADC_CHAN(2),
+ LPC18XX_ADC_CHAN(3),
+ LPC18XX_ADC_CHAN(4),
+ LPC18XX_ADC_CHAN(5),
+ LPC18XX_ADC_CHAN(6),
+ LPC18XX_ADC_CHAN(7),
+};
+
+static int lpc18xx_adc_read_chan(struct lpc18xx_adc *adc, unsigned int ch)
+{
+ int ret;
+ u32 reg;
+
+ reg = adc->cr_reg | BIT(ch) | LPC18XX_ADC_CR_START_NOW;
+ writel(reg, adc->base + LPC18XX_ADC_CR);
+
+ ret = readl_poll_timeout(adc->base + LPC18XX_ADC_GDR, reg,
+ reg & LPC18XX_ADC_CONV_DONE, 3, 9);
+ if (ret) {
+ dev_warn(adc->dev, "adc read timed out\n");
+ return ret;
+ }
+
+ return (reg >> LPC18XX_ADC_SAMPLE_SHIFT) & LPC18XX_ADC_SAMPLE_MASK;
+}
+
+static int lpc18xx_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct lpc18xx_adc *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ *val = lpc18xx_adc_read_chan(adc, chan->channel);
+ mutex_unlock(&adc->lock);
+ if (*val < 0)
+ return *val;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = regulator_get_voltage(adc->vref) / 1000;
+ *val2 = 10;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lpc18xx_adc_info = {
+ .read_raw = lpc18xx_adc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int lpc18xx_adc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct lpc18xx_adc *adc;
+ struct resource *res;
+ unsigned int clkdiv;
+ unsigned long rate;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ adc = iio_priv(indio_dev);
+ adc->dev = &pdev->dev;
+ mutex_init(&adc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(adc->base))
+ return PTR_ERR(adc->base);
+
+ adc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(adc->clk)) {
+ dev_err(&pdev->dev, "error getting clock\n");
+ return PTR_ERR(adc->clk);
+ }
+
+ rate = clk_get_rate(adc->clk);
+ clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
+
+ adc->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(adc->vref)) {
+ dev_err(&pdev->dev, "error getting regulator\n");
+ return PTR_ERR(adc->vref);
+ }
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &lpc18xx_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = lpc18xx_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lpc18xx_adc_iio_channels);
+
+ ret = regulator_enable(adc->vref);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(adc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable clock\n");
+ goto dis_reg;
+ }
+
+ adc->cr_reg = (clkdiv << LPC18XX_ADC_CR_CLKDIV_SHIFT) |
+ LPC18XX_ADC_CR_PDN;
+ writel(adc->cr_reg, adc->base + LPC18XX_ADC_CR);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ goto dis_clk;
+ }
+
+ return 0;
+
+dis_clk:
+ writel(0, adc->base + LPC18XX_ADC_CR);
+ clk_disable_unprepare(adc->clk);
+dis_reg:
+ regulator_disable(adc->vref);
+ return ret;
+}
+
+static int lpc18xx_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct lpc18xx_adc *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ writel(0, adc->base + LPC18XX_ADC_CR);
+ clk_disable_unprepare(adc->clk);
+ regulator_disable(adc->vref);
+
+ return 0;
+}
+
+static const struct of_device_id lpc18xx_adc_match[] = {
+ { .compatible = "nxp,lpc1850-adc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_adc_match);
+
+static struct platform_driver lpc18xx_adc_driver = {
+ .probe = lpc18xx_adc_probe,
+ .remove = lpc18xx_adc_remove,
+ .driver = {
+ .name = "lpc18xx-adc",
+ .of_match_table = lpc18xx_adc_match,
+ },
+};
+module_platform_driver(lpc18xx_adc_driver);
+
+MODULE_DESCRIPTION("LPC18xx ADC driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index d7b36efd2..d1172dc1e 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -61,9 +61,9 @@
static const int mcp3422_scales[4][4] = {
{ 1000000, 500000, 250000, 125000 },
- { 250000 , 125000, 62500 , 31250 },
- { 62500 , 31250 , 15625 , 7812 },
- { 15625 , 7812 , 3906 , 1953 } };
+ { 250000, 125000, 62500, 31250 },
+ { 62500, 31250, 15625, 7812 },
+ { 15625, 7812, 3906, 1953 } };
/* Constant msleep times for data acquisitions */
static const int mcp3422_read_times[4] = {
diff --git a/drivers/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c
index 33051b87a..ad26da1ed 100644
--- a/drivers/iio/adc/mxs-lradc.c
+++ b/drivers/iio/adc/mxs-lradc.c
@@ -686,6 +686,17 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
{
+ /* Configure the touchscreen type */
+ if (lradc->soc == IMX28_LRADC) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
+ LRADC_CTRL0);
+
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
+ mxs_lradc_reg_set(lradc,
+ LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
+ LRADC_CTRL0);
+ }
+
mxs_lradc_setup_touch_detection(lradc);
lradc->cur_plate = LRADC_TOUCH;
@@ -1127,6 +1138,7 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
__set_bit(EV_ABS, input->evbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK,
@@ -1475,18 +1487,13 @@ static const struct iio_chan_spec mx28_lradc_chan_spec[] = {
MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
};
-static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
+static void mxs_lradc_hw_init(struct mxs_lradc *lradc)
{
/* The ADC always uses DELAY CHANNEL 0. */
const u32 adc_cfg =
(1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) |
(LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET);
- int ret = stmp_reset_block(lradc->base);
-
- if (ret)
- return ret;
-
/* Configure DELAY CHANNEL 0 for generic ADC sampling. */
mxs_lradc_reg_wrt(lradc, adc_cfg, LRADC_DELAY(0));
@@ -1495,20 +1502,8 @@ static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
- /* Configure the touchscreen type */
- if (lradc->soc == IMX28_LRADC) {
- mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
- LRADC_CTRL0);
-
- if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
- mxs_lradc_reg_set(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
- LRADC_CTRL0);
- }
-
/* Start internal temperature sensing. */
mxs_lradc_reg_wrt(lradc, 0, LRADC_CTRL2);
-
- return 0;
}
static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
@@ -1708,11 +1703,13 @@ static int mxs_lradc_probe(struct platform_device *pdev)
}
}
- /* Configure the hardware. */
- ret = mxs_lradc_hw_init(lradc);
+ ret = stmp_reset_block(lradc->base);
if (ret)
goto err_dev;
+ /* Configure the hardware. */
+ mxs_lradc_hw_init(lradc);
+
/* Register the touchscreen input device. */
if (touch_ret == 0) {
ret = mxs_lradc_ts_register(lradc);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 9c311c1e1..f9ad6c2d6 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -159,6 +159,22 @@ static const struct rockchip_saradc_data rk3066_tsadc_data = {
.clk_rate = 50000,
};
+static const struct iio_chan_spec rockchip_rk3399_saradc_iio_channels[] = {
+ ADC_CHANNEL(0, "adc0"),
+ ADC_CHANNEL(1, "adc1"),
+ ADC_CHANNEL(2, "adc2"),
+ ADC_CHANNEL(3, "adc3"),
+ ADC_CHANNEL(4, "adc4"),
+ ADC_CHANNEL(5, "adc5"),
+};
+
+static const struct rockchip_saradc_data rk3399_saradc_data = {
+ .num_bits = 10,
+ .channels = rockchip_rk3399_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3399_saradc_iio_channels),
+ .clk_rate = 1000000,
+};
+
static const struct of_device_id rockchip_saradc_match[] = {
{
.compatible = "rockchip,saradc",
@@ -166,6 +182,9 @@ static const struct of_device_id rockchip_saradc_match[] = {
}, {
.compatible = "rockchip,rk3066-tsadc",
.data = &rk3066_tsadc_data,
+ }, {
+ .compatible = "rockchip,rk3399-saradc",
+ .data = &rk3399_saradc_data,
},
{},
};
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index ecbc12138..9fd032d9f 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -1,9 +1,21 @@
/*
+ * TI ADC081C/ADC101C/ADC121C 8/10/12-bit ADC driver
+ *
* Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2016 Intel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * Datasheets:
+ * http://www.ti.com/lit/ds/symlink/adc081c021.pdf
+ * http://www.ti.com/lit/ds/symlink/adc101c021.pdf
+ * http://www.ti.com/lit/ds/symlink/adc121c021.pdf
+ *
+ * The devices have a very similar interface and differ mostly in the number of
+ * bits handled. For the 8-bit and 10-bit models the least-significant 4 or 2
+ * bits of value registers are reserved.
*/
#include <linux/err.h>
@@ -12,11 +24,17 @@
#include <linux/of.h>
#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/regulator/consumer.h>
struct adc081c {
struct i2c_client *i2c;
struct regulator *ref;
+
+ /* 8, 10 or 12 */
+ int bits;
};
#define REG_CONV_RES 0x00
@@ -34,7 +52,7 @@ static int adc081c_read_raw(struct iio_dev *iio,
if (err < 0)
return err;
- *value = (err >> 4) & 0xff;
+ *value = (err & 0xFFF) >> (12 - adc->bits);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -43,7 +61,7 @@ static int adc081c_read_raw(struct iio_dev *iio,
return err;
*value = err / 1000;
- *shift = 8;
+ *shift = adc->bits;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -54,10 +72,53 @@ static int adc081c_read_raw(struct iio_dev *iio,
return -EINVAL;
}
-static const struct iio_chan_spec adc081c_channel = {
- .type = IIO_VOLTAGE,
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+#define ADCxx1C_CHAN(_bits) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 12 - (_bits), \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define DEFINE_ADCxx1C_CHANNELS(_name, _bits) \
+ static const struct iio_chan_spec _name ## _channels[] = { \
+ ADCxx1C_CHAN((_bits)), \
+ IIO_CHAN_SOFT_TIMESTAMP(1), \
+ }; \
+
+#define ADC081C_NUM_CHANNELS 2
+
+struct adcxx1c_model {
+ const struct iio_chan_spec* channels;
+ int bits;
+};
+
+#define ADCxx1C_MODEL(_name, _bits) \
+ { \
+ .channels = _name ## _channels, \
+ .bits = (_bits), \
+ }
+
+DEFINE_ADCxx1C_CHANNELS(adc081c, 8);
+DEFINE_ADCxx1C_CHANNELS(adc101c, 10);
+DEFINE_ADCxx1C_CHANNELS(adc121c, 12);
+
+/* Model ids are indexes in _models array */
+enum adcxx1c_model_id {
+ ADC081C = 0,
+ ADC101C = 1,
+ ADC121C = 2,
+};
+
+static struct adcxx1c_model adcxx1c_models[] = {
+ ADCxx1C_MODEL(adc081c, 8),
+ ADCxx1C_MODEL(adc101c, 10),
+ ADCxx1C_MODEL(adc121c, 12),
};
static const struct iio_info adc081c_info = {
@@ -65,11 +126,30 @@ static const struct iio_info adc081c_info = {
.driver_module = THIS_MODULE,
};
+static irqreturn_t adc081c_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adc081c *data = iio_priv(indio_dev);
+ u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */
+ int ret;
+
+ ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES);
+ if (ret < 0)
+ goto out;
+ buf[0] = ret;
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
static int adc081c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *iio;
struct adc081c *adc;
+ struct adcxx1c_model *model = &adcxx1c_models[id->driver_data];
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
@@ -81,6 +161,7 @@ static int adc081c_probe(struct i2c_client *client,
adc = iio_priv(iio);
adc->i2c = client;
+ adc->bits = model->bits;
adc->ref = devm_regulator_get(&client->dev, "vref");
if (IS_ERR(adc->ref))
@@ -95,18 +176,26 @@ static int adc081c_probe(struct i2c_client *client,
iio->modes = INDIO_DIRECT_MODE;
iio->info = &adc081c_info;
- iio->channels = &adc081c_channel;
- iio->num_channels = 1;
+ iio->channels = model->channels;
+ iio->num_channels = ADC081C_NUM_CHANNELS;
+
+ err = iio_triggered_buffer_setup(iio, NULL, adc081c_trigger_handler, NULL);
+ if (err < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ goto err_regulator_disable;
+ }
err = iio_device_register(iio);
if (err < 0)
- goto regulator_disable;
+ goto err_buffer_cleanup;
i2c_set_clientdata(client, iio);
return 0;
-regulator_disable:
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(iio);
+err_regulator_disable:
regulator_disable(adc->ref);
return err;
@@ -118,13 +207,16 @@ static int adc081c_remove(struct i2c_client *client)
struct adc081c *adc = iio_priv(iio);
iio_device_unregister(iio);
+ iio_triggered_buffer_cleanup(iio);
regulator_disable(adc->ref);
return 0;
}
static const struct i2c_device_id adc081c_id[] = {
- { "adc081c", 0 },
+ { "adc081c", ADC081C },
+ { "adc101c", ADC101C },
+ { "adc121c", ADC121C },
{ }
};
MODULE_DEVICE_TABLE(i2c, adc081c_id);
@@ -132,6 +224,8 @@ MODULE_DEVICE_TABLE(i2c, adc081c_id);
#ifdef CONFIG_OF
static const struct of_device_id adc081c_of_match[] = {
{ .compatible = "ti,adc081c" },
+ { .compatible = "ti,adc101c" },
+ { .compatible = "ti,adc121c" },
{ }
};
MODULE_DEVICE_TABLE(of, adc081c_of_match);
@@ -149,5 +243,5 @@ static struct i2c_driver adc081c_driver = {
module_i2c_driver(adc081c_driver);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_DESCRIPTION("Texas Instruments ADC081C021/027 driver");
+MODULE_DESCRIPTION("Texas Instruments ADC081C/ADC101C/ADC121C driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index b10f629cc..653bf1379 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -714,19 +714,19 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
int i;
switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- for (i = 0;
- i < ARRAY_SIZE(info->sample_freq_avail);
- i++)
- if (val == info->sample_freq_avail[i]) {
- info->adc_feature.sample_rate = i;
- vf610_adc_sample_set(info);
- return 0;
- }
- break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0;
+ i < ARRAY_SIZE(info->sample_freq_avail);
+ i++)
+ if (val == info->sample_freq_avail[i]) {
+ info->adc_feature.sample_rate = i;
+ vf610_adc_sample_set(info);
+ return 0;
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
return -EINVAL;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 595511022..5b41f9d0d 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -115,7 +115,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
return ret;
}
- return 0;
+ return 0;
#else
atomic_set(&st->user_requested_state, state);
return _hid_sensor_power_state(st, state);
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
index 669dc7c27..ecf7721ec 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
@@ -106,7 +106,7 @@ int ms_sensors_convert_and_read(void *cli, u8 conv, u8 rd,
unsigned int delay, u32 *adc)
{
int ret;
- __be32 buf = 0;
+ __be32 buf = 0;
struct i2c_client *client = (struct i2c_client *)cli;
/* Trigger conversion */
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index e18bc6782..f1693dbeb 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -24,81 +24,30 @@
int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
{
- u8 *addr;
- int i, n = 0, len;
+ int i, len;
+ int total = 0;
struct st_sensor_data *sdata = iio_priv(indio_dev);
unsigned int num_data_channels = sdata->num_data_channels;
- unsigned int byte_for_channel =
- indio_dev->channels[0].scan_type.storagebits >> 3;
-
- addr = kmalloc(num_data_channels, GFP_KERNEL);
- if (!addr) {
- len = -ENOMEM;
- goto st_sensors_get_buffer_element_error;
- }
for (i = 0; i < num_data_channels; i++) {
+ unsigned int bytes_to_read;
+
if (test_bit(i, indio_dev->active_scan_mask)) {
- addr[n] = indio_dev->channels[i].address;
- n++;
- }
- }
- switch (n) {
- case 1:
- len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
- addr[0], byte_for_channel, buf, sdata->multiread_bit);
- break;
- case 2:
- if ((addr[1] - addr[0]) == byte_for_channel) {
+ bytes_to_read = indio_dev->channels[i].scan_type.storagebits >> 3;
len = sdata->tf->read_multiple_byte(&sdata->tb,
- sdata->dev, addr[0], byte_for_channel * n,
- buf, sdata->multiread_bit);
- } else {
- u8 *rx_array;
- rx_array = kmalloc(byte_for_channel * num_data_channels,
- GFP_KERNEL);
- if (!rx_array) {
- len = -ENOMEM;
- goto st_sensors_free_memory;
- }
+ sdata->dev, indio_dev->channels[i].address,
+ bytes_to_read,
+ buf + total, sdata->multiread_bit);
- len = sdata->tf->read_multiple_byte(&sdata->tb,
- sdata->dev, addr[0],
- byte_for_channel * num_data_channels,
- rx_array, sdata->multiread_bit);
- if (len < 0) {
- kfree(rx_array);
- goto st_sensors_free_memory;
- }
-
- for (i = 0; i < n * byte_for_channel; i++) {
- if (i < n)
- buf[i] = rx_array[i];
- else
- buf[i] = rx_array[n + i];
- }
- kfree(rx_array);
- len = byte_for_channel * n;
+ if (len < bytes_to_read)
+ return -EIO;
+
+ /* Advance the buffer pointer */
+ total += len;
}
- break;
- case 3:
- len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
- addr[0], byte_for_channel * num_data_channels,
- buf, sdata->multiread_bit);
- break;
- default:
- len = -EINVAL;
- goto st_sensors_free_memory;
- }
- if (len != byte_for_channel * n) {
- len = -EIO;
- goto st_sensors_free_memory;
}
-st_sensors_free_memory:
- kfree(addr);
-st_sensors_get_buffer_element_error:
- return len;
+ return total;
}
EXPORT_SYMBOL(st_sensors_get_buffer_element);
@@ -108,13 +57,20 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ s64 timestamp;
+
+ /* If we do timetamping here, do it before reading the values */
+ if (sdata->hw_irq_trigger)
+ timestamp = sdata->hw_timestamp;
+ else
+ timestamp = iio_get_time_ns();
len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
if (len < 0)
goto st_sensors_get_buffer_element_error;
iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
- pf->timestamp);
+ timestamp);
st_sensors_get_buffer_element_error:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index f5a2d445d..9e59c90f6 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -301,6 +301,14 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
return -EINVAL;
}
+ if (pdata->open_drain) {
+ if (!sdata->sensor_settings->drdy_irq.addr_od)
+ dev_err(&indio_dev->dev,
+ "open drain requested but unsupported.\n");
+ else
+ sdata->int_pin_open_drain = true;
+ }
+
return 0;
}
@@ -321,6 +329,8 @@ static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
else
pdata->drdy_int_pin = defdata ? defdata->drdy_int_pin : 0;
+ pdata->open_drain = of_property_read_bool(np, "drive-open-drain");
+
return pdata;
}
#else
@@ -353,6 +363,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
if (err < 0)
return err;
+ /* Disable DRDY, this might be still be enabled after reboot. */
+ err = st_sensors_set_dataready_irq(indio_dev, false);
+ if (err < 0)
+ return err;
+
if (sdata->current_fullscale) {
err = st_sensors_set_fullscale(indio_dev,
sdata->current_fullscale->num);
@@ -374,6 +389,16 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
return err;
}
+ if (sdata->int_pin_open_drain) {
+ dev_info(&indio_dev->dev,
+ "set interrupt line to open drain mode\n");
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->drdy_irq.addr_od,
+ sdata->sensor_settings->drdy_irq.mask_od, 1);
+ if (err < 0)
+ return err;
+ }
+
err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
return err;
@@ -404,6 +429,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
else
drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
+ /* Flag to the poll function that the hardware trigger is in use */
+ sdata->hw_irq_trigger = enable;
+
/* Enable/Disable the interrupt generator for data ready. */
err = st_sensors_write_data_with_mask(indio_dev,
sdata->sensor_settings->drdy_irq.addr,
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 6a8c98327..296e4ff19 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -17,6 +17,73 @@
#include <linux/iio/common/st_sensors.h>
#include "st_sensors_core.h"
+/**
+ * st_sensors_irq_handler() - top half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_handler(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+ /* Get the time stamp as close in time as possible */
+ sdata->hw_timestamp = iio_get_time_ns();
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * st_sensors_irq_thread() - bottom half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_thread(int irq, void *p)
+{
+ struct iio_trigger *trig = p;
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct st_sensor_data *sdata = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * If this trigger is backed by a hardware interrupt and we have a
+ * status register, check if this IRQ came from us
+ */
+ if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
+ u8 status;
+
+ ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+ &status);
+ if (ret < 0) {
+ dev_err(sdata->dev, "could not read channel status\n");
+ goto out_poll;
+ }
+ /*
+ * the lower bits of .active_scan_mask[0] is directly mapped
+ * to the channels on the sensor: either bit 0 for
+ * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+ * gyroscopes or magnetometers. No sensor use more than 3
+ * channels, so cut the other status bits here.
+ */
+ status &= 0x07;
+
+ /*
+ * If this was not caused by any channels on this sensor,
+ * return IRQ_NONE
+ */
+ if (!indio_dev->active_scan_mask)
+ return IRQ_NONE;
+ if (!(status & (u8)indio_dev->active_scan_mask[0]))
+ return IRQ_NONE;
+ }
+
+out_poll:
+ /* It's our IRQ: proceed to handle the register polling */
+ iio_trigger_poll_chained(p);
+ return IRQ_HANDLED;
+}
+
int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
const struct iio_trigger_ops *trigger_ops)
{
@@ -30,6 +97,10 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
return -ENOMEM;
}
+ iio_trigger_set_drvdata(sdata->trig, indio_dev);
+ sdata->trig->ops = trigger_ops;
+ sdata->trig->dev.parent = sdata->dev;
+
irq = sdata->get_irq_data_ready(indio_dev);
irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
/*
@@ -64,9 +135,25 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
"rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
- err = request_threaded_irq(irq,
- iio_trigger_generic_data_rdy_poll,
- NULL,
+
+ /*
+ * If the interrupt pin is Open Drain, by definition this
+ * means that the interrupt line may be shared with other
+ * peripherals. But to do this we also need to have a status
+ * register and mask to figure out if this sensor was firing
+ * the IRQ or not, so we can tell the interrupt handle that
+ * it was "our" interrupt.
+ */
+ if (sdata->int_pin_open_drain &&
+ sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ irq_trig |= IRQF_SHARED;
+
+ /* Let's create an interrupt thread masking the hard IRQ here */
+ irq_trig |= IRQF_ONESHOT;
+
+ err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+ st_sensors_irq_handler,
+ st_sensors_irq_thread,
irq_trig,
sdata->trig->name,
sdata->trig);
@@ -75,10 +162,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
goto iio_trigger_free;
}
- iio_trigger_set_drvdata(sdata->trig, indio_dev);
- sdata->trig->ops = trigger_ops;
- sdata->trig->dev.parent = sdata->dev;
-
err = iio_trigger_register(sdata->trig);
if (err < 0) {
dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
@@ -106,6 +189,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL(st_sensors_deallocate_trigger);
+int st_sensors_validate_device(struct iio_trigger *trig,
+ struct iio_dev *indio_dev)
+{
+ struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+
+ if (indio != indio_dev)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(st_sensors_validate_device);
+
MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index a995139f9..f7c71da42 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -74,6 +74,33 @@ config AD5449
To compile this driver as a module, choose M here: the
module will be called ad5449.
+config AD5592R_BASE
+ tristate
+
+config AD5592R
+ tristate "Analog Devices AD5592R ADC/DAC driver"
+ depends on SPI_MASTER
+ select GPIOLIB
+ select AD5592R_BASE
+ help
+ Say yes here to build support for Analog Devices AD5592R
+ Digital to Analog / Analog to Digital Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5592r.
+
+config AD5593R
+ tristate "Analog Devices AD5593R ADC/DAC driver"
+ depends on I2C
+ select GPIOLIB
+ select AD5592R_BASE
+ help
+ Say yes here to build support for Analog Devices AD5593R
+ Digital to Analog / Analog to Digital Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5593r.
+
config AD5504
tristate "Analog Devices AD5504/AD5501 DAC SPI driver"
depends on SPI
@@ -154,6 +181,16 @@ config AD7303
To compile this driver as module choose M here: the module will be called
ad7303.
+config LPC18XX_DAC
+ tristate "NXP LPC18xx DAC driver"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ Say yes here to build support for NXP LPC18XX DAC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called lpc18xx_dac.
+
config M62332
tristate "Mitsubishi M62332 DAC driver"
depends on I2C
@@ -210,7 +247,7 @@ config MCP4922
config STX104
tristate "Apex Embedded Systems STX104 DAC driver"
- depends on ISA
+ depends on X86 && ISA_BUS_API
help
Say yes here to build support for the 2-channel DAC on the Apex
Embedded Systems STX104 integrated analog PC/104 card. The base port
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 67b484296..8b78d5ca9 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -11,12 +11,16 @@ obj-$(CONFIG_AD5064) += ad5064.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
obj-$(CONFIG_AD5449) += ad5449.o
+obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
+obj-$(CONFIG_AD5592R) += ad5592r.o
+obj-$(CONFIG_AD5593R) += ad5593r.o
obj-$(CONFIG_AD5755) += ad5755.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD7303) += ad7303.o
+obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
obj-$(CONFIG_MAX5821) += max5821.o
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
new file mode 100644
index 000000000..69bde5909
--- /dev/null
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -0,0 +1,691 @@
+/*
+ * AD5592R Digital <-> Analog converters driver
+ *
+ * Copyright 2014-2016 Analog Devices Inc.
+ * Author: Paul Cercueil <paul.cercueil@analog.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio.h>
+#include <linux/property.h>
+
+#include <dt-bindings/iio/adi,ad5592r.h>
+
+#include "ad5592r-base.h"
+
+static int ad5592r_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+ int ret = 0;
+ u8 val;
+
+ mutex_lock(&st->gpio_lock);
+
+ if (st->gpio_out & BIT(offset))
+ val = st->gpio_val;
+ else
+ ret = st->ops->gpio_read(st, &val);
+
+ mutex_unlock(&st->gpio_lock);
+
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void ad5592r_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+
+ mutex_lock(&st->gpio_lock);
+
+ if (value)
+ st->gpio_val |= BIT(offset);
+ else
+ st->gpio_val &= ~BIT(offset);
+
+ st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
+
+ mutex_unlock(&st->gpio_lock);
+}
+
+static int ad5592r_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+ int ret;
+
+ mutex_lock(&st->gpio_lock);
+
+ st->gpio_out &= ~BIT(offset);
+ st->gpio_in |= BIT(offset);
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
+
+err_unlock:
+ mutex_unlock(&st->gpio_lock);
+
+ return ret;
+}
+
+static int ad5592r_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+ int ret;
+
+ mutex_lock(&st->gpio_lock);
+
+ if (value)
+ st->gpio_val |= BIT(offset);
+ else
+ st->gpio_val &= ~BIT(offset);
+
+ st->gpio_in &= ~BIT(offset);
+ st->gpio_out |= BIT(offset);
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
+
+err_unlock:
+ mutex_unlock(&st->gpio_lock);
+
+ return ret;
+}
+
+static int ad5592r_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+
+ if (!(st->gpio_map & BIT(offset))) {
+ dev_err(st->dev, "GPIO %d is reserved by alternate function\n",
+ offset);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int ad5592r_gpio_init(struct ad5592r_state *st)
+{
+ if (!st->gpio_map)
+ return 0;
+
+ st->gpiochip.label = dev_name(st->dev);
+ st->gpiochip.base = -1;
+ st->gpiochip.ngpio = 8;
+ st->gpiochip.parent = st->dev;
+ st->gpiochip.can_sleep = true;
+ st->gpiochip.direction_input = ad5592r_gpio_direction_input;
+ st->gpiochip.direction_output = ad5592r_gpio_direction_output;
+ st->gpiochip.get = ad5592r_gpio_get;
+ st->gpiochip.set = ad5592r_gpio_set;
+ st->gpiochip.request = ad5592r_gpio_request;
+ st->gpiochip.owner = THIS_MODULE;
+
+ mutex_init(&st->gpio_lock);
+
+ return gpiochip_add_data(&st->gpiochip, st);
+}
+
+static void ad5592r_gpio_cleanup(struct ad5592r_state *st)
+{
+ if (st->gpio_map)
+ gpiochip_remove(&st->gpiochip);
+}
+
+static int ad5592r_reset(struct ad5592r_state *st)
+{
+ struct gpio_desc *gpio;
+ struct iio_dev *iio_dev = iio_priv_to_dev(st);
+
+ gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ udelay(1);
+ gpiod_set_value(gpio, 1);
+ } else {
+ mutex_lock(&iio_dev->mlock);
+ /* Writing this magic value resets the device */
+ st->ops->reg_write(st, AD5592R_REG_RESET, 0xdac);
+ mutex_unlock(&iio_dev->mlock);
+ }
+
+ udelay(250);
+
+ return 0;
+}
+
+static int ad5592r_get_vref(struct ad5592r_state *st)
+{
+ int ret;
+
+ if (st->reg) {
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ return ret;
+
+ return ret / 1000;
+ } else {
+ return 2500;
+ }
+}
+
+static int ad5592r_set_channel_modes(struct ad5592r_state *st)
+{
+ const struct ad5592r_rw_ops *ops = st->ops;
+ int ret;
+ unsigned i;
+ struct iio_dev *iio_dev = iio_priv_to_dev(st);
+ u8 pulldown = 0, tristate = 0, dac = 0, adc = 0;
+ u16 read_back;
+
+ for (i = 0; i < st->num_channels; i++) {
+ switch (st->channel_modes[i]) {
+ case CH_MODE_DAC:
+ dac |= BIT(i);
+ break;
+
+ case CH_MODE_ADC:
+ adc |= BIT(i);
+ break;
+
+ case CH_MODE_DAC_AND_ADC:
+ dac |= BIT(i);
+ adc |= BIT(i);
+ break;
+
+ case CH_MODE_GPIO:
+ st->gpio_map |= BIT(i);
+ st->gpio_in |= BIT(i); /* Default to input */
+ break;
+
+ case CH_MODE_UNUSED:
+ /* fall-through */
+ default:
+ switch (st->channel_offstate[i]) {
+ case CH_OFFSTATE_OUT_TRISTATE:
+ tristate |= BIT(i);
+ break;
+
+ case CH_OFFSTATE_OUT_LOW:
+ st->gpio_out |= BIT(i);
+ break;
+
+ case CH_OFFSTATE_OUT_HIGH:
+ st->gpio_out |= BIT(i);
+ st->gpio_val |= BIT(i);
+ break;
+
+ case CH_OFFSTATE_PULLDOWN:
+ /* fall-through */
+ default:
+ pulldown |= BIT(i);
+ break;
+ }
+ }
+ }
+
+ mutex_lock(&iio_dev->mlock);
+
+ /* Pull down unused pins to GND */
+ ret = ops->reg_write(st, AD5592R_REG_PULLDOWN, pulldown);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_TRISTATE, tristate);
+ if (ret)
+ goto err_unlock;
+
+ /* Configure pins that we use */
+ ret = ops->reg_write(st, AD5592R_REG_DAC_EN, dac);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_ADC_EN, adc);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
+ if (ret)
+ goto err_unlock;
+
+ /* Verify that we can read back at least one register */
+ ret = ops->reg_read(st, AD5592R_REG_ADC_EN, &read_back);
+ if (!ret && (read_back & 0xff) != adc)
+ ret = -EIO;
+
+err_unlock:
+ mutex_unlock(&iio_dev->mlock);
+ return ret;
+}
+
+static int ad5592r_reset_channel_modes(struct ad5592r_state *st)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(st->channel_modes); i++)
+ st->channel_modes[i] = CH_MODE_UNUSED;
+
+ return ad5592r_set_channel_modes(st);
+}
+
+static int ad5592r_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct ad5592r_state *st = iio_priv(iio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ if (!chan->output)
+ return -EINVAL;
+
+ mutex_lock(&iio_dev->mlock);
+ ret = st->ops->write_dac(st, chan->channel, val);
+ if (!ret)
+ st->cached_dac[chan->channel] = val;
+ mutex_unlock(&iio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_VOLTAGE) {
+ bool gain;
+
+ if (val == st->scale_avail[0][0] &&
+ val2 == st->scale_avail[0][1])
+ gain = false;
+ else if (val == st->scale_avail[1][0] &&
+ val2 == st->scale_avail[1][1])
+ gain = true;
+ else
+ return -EINVAL;
+
+ mutex_lock(&iio_dev->mlock);
+
+ ret = st->ops->reg_read(st, AD5592R_REG_CTRL,
+ &st->cached_gp_ctrl);
+ if (ret < 0) {
+ mutex_unlock(&iio_dev->mlock);
+ return ret;
+ }
+
+ if (chan->output) {
+ if (gain)
+ st->cached_gp_ctrl |=
+ AD5592R_REG_CTRL_DAC_RANGE;
+ else
+ st->cached_gp_ctrl &=
+ ~AD5592R_REG_CTRL_DAC_RANGE;
+ } else {
+ if (gain)
+ st->cached_gp_ctrl |=
+ AD5592R_REG_CTRL_ADC_RANGE;
+ else
+ st->cached_gp_ctrl &=
+ ~AD5592R_REG_CTRL_ADC_RANGE;
+ }
+
+ ret = st->ops->reg_write(st, AD5592R_REG_CTRL,
+ st->cached_gp_ctrl);
+ mutex_unlock(&iio_dev->mlock);
+
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad5592r_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct ad5592r_state *st = iio_priv(iio_dev);
+ u16 read_val;
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&iio_dev->mlock);
+
+ if (!chan->output) {
+ ret = st->ops->read_adc(st, chan->channel, &read_val);
+ if (ret)
+ goto unlock;
+
+ if ((read_val >> 12 & 0x7) != (chan->channel & 0x7)) {
+ dev_err(st->dev, "Error while reading channel %u\n",
+ chan->channel);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ read_val &= GENMASK(11, 0);
+
+ } else {
+ read_val = st->cached_dac[chan->channel];
+ }
+
+ dev_dbg(st->dev, "Channel %u read: 0x%04hX\n",
+ chan->channel, read_val);
+
+ *val = (int) read_val;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = ad5592r_get_vref(st);
+
+ if (chan->type == IIO_TEMP) {
+ s64 tmp = *val * (3767897513LL / 25LL);
+ *val = div_s64_rem(tmp, 1000000000LL, val2);
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ } else {
+ int mult;
+
+ mutex_lock(&iio_dev->mlock);
+
+ if (chan->output)
+ mult = !!(st->cached_gp_ctrl &
+ AD5592R_REG_CTRL_DAC_RANGE);
+ else
+ mult = !!(st->cached_gp_ctrl &
+ AD5592R_REG_CTRL_ADC_RANGE);
+
+ *val *= ++mult;
+
+ *val2 = chan->scan_type.realbits;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ }
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = ad5592r_get_vref(st);
+
+ mutex_lock(&iio_dev->mlock);
+
+ if (st->cached_gp_ctrl & AD5592R_REG_CTRL_ADC_RANGE)
+ *val = (-34365 * 25) / ret;
+ else
+ *val = (-75365 * 25) / ret;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&iio_dev->mlock);
+ return ret;
+}
+
+static int ad5592r_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5592r_info = {
+ .read_raw = ad5592r_read_raw,
+ .write_raw = ad5592r_write_raw,
+ .write_raw_get_fmt = ad5592r_write_raw_get_fmt,
+ .driver_module = THIS_MODULE,
+};
+
+static ssize_t ad5592r_show_scale_available(struct iio_dev *iio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad5592r_state *st = iio_priv(iio_dev);
+
+ return sprintf(buf, "%d.%09u %d.%09u\n",
+ st->scale_avail[0][0], st->scale_avail[0][1],
+ st->scale_avail[1][0], st->scale_avail[1][1]);
+}
+
+static struct iio_chan_spec_ext_info ad5592r_ext_info[] = {
+ {
+ .name = "scale_available",
+ .read = ad5592r_show_scale_available,
+ .shared = true,
+ },
+ {},
+};
+
+static void ad5592r_setup_channel(struct iio_dev *iio_dev,
+ struct iio_chan_spec *chan, bool output, unsigned id)
+{
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->output = output;
+ chan->channel = id;
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 12;
+ chan->scan_type.storagebits = 16;
+ chan->ext_info = ad5592r_ext_info;
+}
+
+static int ad5592r_alloc_channels(struct ad5592r_state *st)
+{
+ unsigned i, curr_channel = 0,
+ num_channels = st->num_channels;
+ struct iio_dev *iio_dev = iio_priv_to_dev(st);
+ struct iio_chan_spec *channels;
+ struct fwnode_handle *child;
+ u32 reg, tmp;
+ int ret;
+
+ device_for_each_child_node(st->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg >= ARRAY_SIZE(st->channel_modes))
+ continue;
+
+ ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
+ if (!ret)
+ st->channel_modes[reg] = tmp;
+
+ fwnode_property_read_u32(child, "adi,off-state", &tmp);
+ if (!ret)
+ st->channel_offstate[reg] = tmp;
+ }
+
+ channels = devm_kzalloc(st->dev,
+ (1 + 2 * num_channels) * sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < num_channels; i++) {
+ switch (st->channel_modes[i]) {
+ case CH_MODE_DAC:
+ ad5592r_setup_channel(iio_dev, &channels[curr_channel],
+ true, i);
+ curr_channel++;
+ break;
+
+ case CH_MODE_ADC:
+ ad5592r_setup_channel(iio_dev, &channels[curr_channel],
+ false, i);
+ curr_channel++;
+ break;
+
+ case CH_MODE_DAC_AND_ADC:
+ ad5592r_setup_channel(iio_dev, &channels[curr_channel],
+ true, i);
+ curr_channel++;
+ ad5592r_setup_channel(iio_dev, &channels[curr_channel],
+ false, i);
+ curr_channel++;
+ break;
+
+ default:
+ continue;
+ }
+ }
+
+ channels[curr_channel].type = IIO_TEMP;
+ channels[curr_channel].channel = 8;
+ channels[curr_channel].info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET);
+ curr_channel++;
+
+ iio_dev->num_channels = curr_channel;
+ iio_dev->channels = channels;
+
+ return 0;
+}
+
+static void ad5592r_init_scales(struct ad5592r_state *st, int vref_mV)
+{
+ s64 tmp = (s64)vref_mV * 1000000000LL >> 12;
+
+ st->scale_avail[0][0] =
+ div_s64_rem(tmp, 1000000000LL, &st->scale_avail[0][1]);
+ st->scale_avail[1][0] =
+ div_s64_rem(tmp * 2, 1000000000LL, &st->scale_avail[1][1]);
+}
+
+int ad5592r_probe(struct device *dev, const char *name,
+ const struct ad5592r_rw_ops *ops)
+{
+ struct iio_dev *iio_dev;
+ struct ad5592r_state *st;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(iio_dev);
+ st->dev = dev;
+ st->ops = ops;
+ st->num_channels = 8;
+ dev_set_drvdata(dev, iio_dev);
+
+ st->reg = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(st->reg)) {
+ if ((PTR_ERR(st->reg) != -ENODEV) && dev->of_node)
+ return PTR_ERR(st->reg);
+
+ st->reg = NULL;
+ } else {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ }
+
+ iio_dev->dev.parent = dev;
+ iio_dev->name = name;
+ iio_dev->info = &ad5592r_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ ad5592r_init_scales(st, ad5592r_get_vref(st));
+
+ ret = ad5592r_reset(st);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ops->reg_write(st, AD5592R_REG_PD,
+ (st->reg == NULL) ? AD5592R_REG_PD_EN_REF : 0);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad5592r_alloc_channels(st);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad5592r_set_channel_modes(st);
+ if (ret)
+ goto error_reset_ch_modes;
+
+ ret = iio_device_register(iio_dev);
+ if (ret)
+ goto error_reset_ch_modes;
+
+ ret = ad5592r_gpio_init(st);
+ if (ret)
+ goto error_dev_unregister;
+
+ return 0;
+
+error_dev_unregister:
+ iio_device_unregister(iio_dev);
+
+error_reset_ch_modes:
+ ad5592r_reset_channel_modes(st);
+
+error_disable_reg:
+ if (st->reg)
+ regulator_disable(st->reg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad5592r_probe);
+
+int ad5592r_remove(struct device *dev)
+{
+ struct iio_dev *iio_dev = dev_get_drvdata(dev);
+ struct ad5592r_state *st = iio_priv(iio_dev);
+
+ iio_device_unregister(iio_dev);
+ ad5592r_reset_channel_modes(st);
+ ad5592r_gpio_cleanup(st);
+
+ if (st->reg)
+ regulator_disable(st->reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ad5592r_remove);
+
+MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5592r-base.h b/drivers/iio/dac/ad5592r-base.h
new file mode 100644
index 000000000..841457e93
--- /dev/null
+++ b/drivers/iio/dac/ad5592r-base.h
@@ -0,0 +1,76 @@
+/*
+ * AD5592R / AD5593R Digital <-> Analog converters driver
+ *
+ * Copyright 2015-2016 Analog Devices Inc.
+ * Author: Paul Cercueil <paul.cercueil@analog.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRIVERS_IIO_DAC_AD5592R_BASE_H__
+#define __DRIVERS_IIO_DAC_AD5592R_BASE_H__
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/mutex.h>
+#include <linux/gpio/driver.h>
+
+struct device;
+struct ad5592r_state;
+
+enum ad5592r_registers {
+ AD5592R_REG_NOOP = 0x0,
+ AD5592R_REG_DAC_READBACK = 0x1,
+ AD5592R_REG_ADC_SEQ = 0x2,
+ AD5592R_REG_CTRL = 0x3,
+ AD5592R_REG_ADC_EN = 0x4,
+ AD5592R_REG_DAC_EN = 0x5,
+ AD5592R_REG_PULLDOWN = 0x6,
+ AD5592R_REG_LDAC = 0x7,
+ AD5592R_REG_GPIO_OUT_EN = 0x8,
+ AD5592R_REG_GPIO_SET = 0x9,
+ AD5592R_REG_GPIO_IN_EN = 0xA,
+ AD5592R_REG_PD = 0xB,
+ AD5592R_REG_OPEN_DRAIN = 0xC,
+ AD5592R_REG_TRISTATE = 0xD,
+ AD5592R_REG_RESET = 0xF,
+};
+
+#define AD5592R_REG_PD_EN_REF BIT(9)
+#define AD5592R_REG_CTRL_ADC_RANGE BIT(5)
+#define AD5592R_REG_CTRL_DAC_RANGE BIT(4)
+
+struct ad5592r_rw_ops {
+ int (*write_dac)(struct ad5592r_state *st, unsigned chan, u16 value);
+ int (*read_adc)(struct ad5592r_state *st, unsigned chan, u16 *value);
+ int (*reg_write)(struct ad5592r_state *st, u8 reg, u16 value);
+ int (*reg_read)(struct ad5592r_state *st, u8 reg, u16 *value);
+ int (*gpio_read)(struct ad5592r_state *st, u8 *value);
+};
+
+struct ad5592r_state {
+ struct device *dev;
+ struct regulator *reg;
+ struct gpio_chip gpiochip;
+ struct mutex gpio_lock; /* Protect cached gpio_out, gpio_val, etc. */
+ unsigned int num_channels;
+ const struct ad5592r_rw_ops *ops;
+ int scale_avail[2][2];
+ u16 cached_dac[8];
+ u16 cached_gp_ctrl;
+ u8 channel_modes[8];
+ u8 channel_offstate[8];
+ u8 gpio_map;
+ u8 gpio_out;
+ u8 gpio_in;
+ u8 gpio_val;
+
+ __be16 spi_msg ____cacheline_aligned;
+ __be16 spi_msg_nop;
+};
+
+int ad5592r_probe(struct device *dev, const char *name,
+ const struct ad5592r_rw_ops *ops);
+int ad5592r_remove(struct device *dev);
+
+#endif /* __DRIVERS_IIO_DAC_AD5592R_BASE_H__ */
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
new file mode 100644
index 000000000..0b235a2c7
--- /dev/null
+++ b/drivers/iio/dac/ad5592r.c
@@ -0,0 +1,164 @@
+/*
+ * AD5592R Digital <-> Analog converters driver
+ *
+ * Copyright 2015-2016 Analog Devices Inc.
+ * Author: Paul Cercueil <paul.cercueil@analog.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include "ad5592r-base.h"
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#define AD5592R_GPIO_READBACK_EN BIT(10)
+#define AD5592R_LDAC_READBACK_EN BIT(6)
+
+static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, u16 *buf)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+ struct spi_transfer t = {
+ .tx_buf = &st->spi_msg_nop,
+ .rx_buf = buf,
+ .len = 2
+ };
+
+ st->spi_msg_nop = 0; /* NOP */
+
+ return spi_sync_transfer(spi, &t, 1);
+}
+
+static int ad5592r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+
+ st->spi_msg = cpu_to_be16(BIT(15) | (chan << 12) | value);
+
+ return spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
+}
+
+static int ad5592r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+ int ret;
+
+ st->spi_msg = cpu_to_be16((AD5592R_REG_ADC_SEQ << 11) | BIT(chan));
+
+ ret = spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
+ if (ret)
+ return ret;
+
+ /*
+ * Invalid data:
+ * See Figure 40. Single-Channel ADC Conversion Sequence
+ */
+ ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
+ if (ret)
+ return ret;
+
+ ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
+ if (ret)
+ return ret;
+
+ *value = be16_to_cpu(st->spi_msg);
+
+ return 0;
+}
+
+static int ad5592r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+
+ st->spi_msg = cpu_to_be16((reg << 11) | value);
+
+ return spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
+}
+
+static int ad5592r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+ int ret;
+
+ st->spi_msg = cpu_to_be16((AD5592R_REG_LDAC << 11) |
+ AD5592R_LDAC_READBACK_EN | (reg << 2));
+
+ ret = spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
+ if (ret)
+ return ret;
+
+ ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
+ if (ret)
+ return ret;
+
+ *value = be16_to_cpu(st->spi_msg);
+
+ return 0;
+}
+
+static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
+{
+ int ret;
+
+ ret = ad5592r_reg_write(st, AD5592R_REG_GPIO_IN_EN,
+ AD5592R_GPIO_READBACK_EN | st->gpio_in);
+ if (ret)
+ return ret;
+
+ ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
+ if (ret)
+ return ret;
+
+ *value = (u8) be16_to_cpu(st->spi_msg);
+
+ return 0;
+}
+
+static const struct ad5592r_rw_ops ad5592r_rw_ops = {
+ .write_dac = ad5592r_write_dac,
+ .read_adc = ad5592r_read_adc,
+ .reg_write = ad5592r_reg_write,
+ .reg_read = ad5592r_reg_read,
+ .gpio_read = ad5593r_gpio_read,
+};
+
+static int ad5592r_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ return ad5592r_probe(&spi->dev, id->name, &ad5592r_rw_ops);
+}
+
+static int ad5592r_spi_remove(struct spi_device *spi)
+{
+ return ad5592r_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad5592r_spi_ids[] = {
+ { .name = "ad5592r", },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5592r_spi_ids);
+
+static const struct of_device_id ad5592r_of_match[] = {
+ { .compatible = "adi,ad5592r", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad5592r_of_match);
+
+static struct spi_driver ad5592r_spi_driver = {
+ .driver = {
+ .name = "ad5592r",
+ .of_match_table = of_match_ptr(ad5592r_of_match),
+ },
+ .probe = ad5592r_spi_probe,
+ .remove = ad5592r_spi_remove,
+ .id_table = ad5592r_spi_ids,
+};
+module_spi_driver(ad5592r_spi_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
new file mode 100644
index 000000000..dca158a88
--- /dev/null
+++ b/drivers/iio/dac/ad5593r.c
@@ -0,0 +1,131 @@
+/*
+ * AD5593R Digital <-> Analog converters driver
+ *
+ * Copyright 2015-2016 Analog Devices Inc.
+ * Author: Paul Cercueil <paul.cercueil@analog.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include "ad5592r-base.h"
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#define AD5593R_MODE_CONF (0 << 4)
+#define AD5593R_MODE_DAC_WRITE (1 << 4)
+#define AD5593R_MODE_ADC_READBACK (4 << 4)
+#define AD5593R_MODE_DAC_READBACK (5 << 4)
+#define AD5593R_MODE_GPIO_READBACK (6 << 4)
+#define AD5593R_MODE_REG_READBACK (7 << 4)
+
+static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+
+ return i2c_smbus_write_word_swapped(i2c,
+ AD5593R_MODE_DAC_WRITE | chan, value);
+}
+
+static int ad5593r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+ s32 val;
+
+ val = i2c_smbus_write_word_swapped(i2c,
+ AD5593R_MODE_CONF | AD5592R_REG_ADC_SEQ, BIT(chan));
+ if (val < 0)
+ return (int) val;
+
+ val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK);
+ if (val < 0)
+ return (int) val;
+
+ *value = (u16) val;
+
+ return 0;
+}
+
+static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+
+ return i2c_smbus_write_word_swapped(i2c,
+ AD5593R_MODE_CONF | reg, value);
+}
+
+static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+ s32 val;
+
+ val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg);
+ if (val < 0)
+ return (int) val;
+
+ *value = (u16) val;
+
+ return 0;
+}
+
+static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+ s32 val;
+
+ val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK);
+ if (val < 0)
+ return (int) val;
+
+ *value = (u8) val;
+
+ return 0;
+}
+
+static const struct ad5592r_rw_ops ad5593r_rw_ops = {
+ .write_dac = ad5593r_write_dac,
+ .read_adc = ad5593r_read_adc,
+ .reg_write = ad5593r_reg_write,
+ .reg_read = ad5593r_reg_read,
+ .gpio_read = ad5593r_gpio_read,
+};
+
+static int ad5593r_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ return ad5592r_probe(&i2c->dev, id->name, &ad5593r_rw_ops);
+}
+
+static int ad5593r_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5592r_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5593r_i2c_ids[] = {
+ { .name = "ad5593r", },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ad5593r_i2c_ids);
+
+static const struct of_device_id ad5593r_of_match[] = {
+ { .compatible = "adi,ad5593r", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad5593r_of_match);
+
+static struct i2c_driver ad5593r_driver = {
+ .driver = {
+ .name = "ad5593r",
+ .of_match_table = of_match_ptr(ad5593r_of_match),
+ },
+ .probe = ad5593r_i2c_probe,
+ .remove = ad5593r_i2c_remove,
+ .id_table = ad5593r_i2c_ids,
+};
+module_i2c_driver(ad5593r_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
new file mode 100644
index 000000000..55d1456a0
--- /dev/null
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -0,0 +1,210 @@
+/*
+ * IIO DAC driver for NXP LPC18xx DAC
+ *
+ * Copyright (C) 2016 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * UNSUPPORTED hardware features:
+ * - Interrupts
+ * - DMA
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+/* LPC18XX DAC registers and bits */
+#define LPC18XX_DAC_CR 0x000
+#define LPC18XX_DAC_CR_VALUE_SHIFT 6
+#define LPC18XX_DAC_CR_VALUE_MASK 0x3ff
+#define LPC18XX_DAC_CR_BIAS BIT(16)
+#define LPC18XX_DAC_CTRL 0x004
+#define LPC18XX_DAC_CTRL_DMA_ENA BIT(3)
+
+struct lpc18xx_dac {
+ struct regulator *vref;
+ void __iomem *base;
+ struct mutex lock;
+ struct clk *clk;
+};
+
+static const struct iio_chan_spec lpc18xx_dac_iio_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int lpc18xx_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct lpc18xx_dac *dac = iio_priv(indio_dev);
+ u32 reg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ reg = readl(dac->base + LPC18XX_DAC_CR);
+ *val = reg >> LPC18XX_DAC_CR_VALUE_SHIFT;
+ *val &= LPC18XX_DAC_CR_VALUE_MASK;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = regulator_get_voltage(dac->vref) / 1000;
+ *val2 = 10;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static int lpc18xx_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct lpc18xx_dac *dac = iio_priv(indio_dev);
+ u32 reg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val < 0 || val > LPC18XX_DAC_CR_VALUE_MASK)
+ return -EINVAL;
+
+ reg = LPC18XX_DAC_CR_BIAS;
+ reg |= val << LPC18XX_DAC_CR_VALUE_SHIFT;
+
+ mutex_lock(&dac->lock);
+ writel(reg, dac->base + LPC18XX_DAC_CR);
+ writel(LPC18XX_DAC_CTRL_DMA_ENA, dac->base + LPC18XX_DAC_CTRL);
+ mutex_unlock(&dac->lock);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lpc18xx_dac_info = {
+ .read_raw = lpc18xx_dac_read_raw,
+ .write_raw = lpc18xx_dac_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int lpc18xx_dac_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct lpc18xx_dac *dac;
+ struct resource *res;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ dac = iio_priv(indio_dev);
+ mutex_init(&dac->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dac->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dac->base))
+ return PTR_ERR(dac->base);
+
+ dac->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dac->clk)) {
+ dev_err(&pdev->dev, "error getting clock\n");
+ return PTR_ERR(dac->clk);
+ }
+
+ dac->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(dac->vref)) {
+ dev_err(&pdev->dev, "error getting regulator\n");
+ return PTR_ERR(dac->vref);
+ }
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &lpc18xx_dac_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = lpc18xx_dac_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lpc18xx_dac_iio_channels);
+
+ ret = regulator_enable(dac->vref);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dac->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable clock\n");
+ goto dis_reg;
+ }
+
+ writel(0, dac->base + LPC18XX_DAC_CTRL);
+ writel(0, dac->base + LPC18XX_DAC_CR);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ goto dis_clk;
+ }
+
+ return 0;
+
+dis_clk:
+ clk_disable_unprepare(dac->clk);
+dis_reg:
+ regulator_disable(dac->vref);
+ return ret;
+}
+
+static int lpc18xx_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct lpc18xx_dac *dac = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ writel(0, dac->base + LPC18XX_DAC_CTRL);
+ clk_disable_unprepare(dac->clk);
+ regulator_disable(dac->vref);
+
+ return 0;
+}
+
+static const struct of_device_id lpc18xx_dac_match[] = {
+ { .compatible = "nxp,lpc1850-dac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_dac_match);
+
+static struct platform_driver lpc18xx_dac_driver = {
+ .probe = lpc18xx_dac_probe,
+ .remove = lpc18xx_dac_remove,
+ .driver = {
+ .name = "lpc18xx-dac",
+ .of_match_table = lpc18xx_dac_match,
+ },
+};
+module_platform_driver(lpc18xx_dac_driver);
+
+MODULE_DESCRIPTION("LPC18xx DAC driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/stx104.c b/drivers/iio/dac/stx104.c
index 174f4b75c..279412208 100644
--- a/drivers/iio/dac/stx104.c
+++ b/drivers/iio/dac/stx104.c
@@ -33,16 +33,9 @@
}
#define STX104_EXTENT 16
-/**
- * The highest base address possible for an ISA device is 0x3FF; this results in
- * 1024 possible base addresses. Dividing the number of possible base addresses
- * by the address extent taken by each device results in the maximum number of
- * devices on a system.
- */
-#define MAX_NUM_STX104 (1024 / STX104_EXTENT)
-static unsigned base[MAX_NUM_STX104];
-static unsigned num_stx104;
+static unsigned int base[max_num_isa_dev(STX104_EXTENT)];
+static unsigned int num_stx104;
module_param_array(base, uint, &num_stx104, 0);
MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses");
@@ -134,18 +127,7 @@ static struct isa_driver stx104_driver = {
}
};
-static void __exit stx104_exit(void)
-{
- isa_unregister_driver(&stx104_driver);
-}
-
-static int __init stx104_init(void)
-{
- return isa_register_driver(&stx104_driver, num_stx104);
-}
-
-module_init(stx104_init);
-module_exit(stx104_exit);
+module_isa_driver(stx104_driver, num_stx104);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("Apex Embedded Systems STX104 DAC driver");
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 44a30f286..99eba524f 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -284,7 +284,7 @@ struct ad9523_state {
} data[2] ____cacheline_aligned;
};
-static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
+static int ad9523_read(struct iio_dev *indio_dev, unsigned int addr)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
@@ -318,7 +318,8 @@ static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
return ret;
};
-static int ad9523_write(struct iio_dev *indio_dev, unsigned addr, unsigned val)
+static int ad9523_write(struct iio_dev *indio_dev,
+ unsigned int addr, unsigned int val)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
@@ -351,11 +352,11 @@ static int ad9523_io_update(struct iio_dev *indio_dev)
}
static int ad9523_vco_out_map(struct iio_dev *indio_dev,
- unsigned ch, unsigned out)
+ unsigned int ch, unsigned int out)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
- unsigned mask;
+ unsigned int mask;
switch (ch) {
case 0 ... 3:
@@ -405,7 +406,7 @@ static int ad9523_vco_out_map(struct iio_dev *indio_dev,
}
static int ad9523_set_clock_provider(struct iio_dev *indio_dev,
- unsigned ch, unsigned long freq)
+ unsigned int ch, unsigned long freq)
{
struct ad9523_state *st = iio_priv(indio_dev);
long tmp1, tmp2;
@@ -619,7 +620,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad9523_state *st = iio_priv(indio_dev);
- unsigned code;
+ unsigned int code;
int ret;
mutex_lock(&indio_dev->mlock);
@@ -655,7 +656,7 @@ static int ad9523_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad9523_state *st = iio_priv(indio_dev);
- unsigned reg;
+ unsigned int reg;
int ret, tmp, code;
mutex_lock(&indio_dev->mlock);
@@ -709,8 +710,8 @@ out:
}
static int ad9523_reg_access(struct iio_dev *indio_dev,
- unsigned reg, unsigned writeval,
- unsigned *readval)
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
{
int ret;
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index e816d29d6..205a84420 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -93,7 +93,7 @@ config IIO_ST_GYRO_3AXIS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics gyroscopes:
- L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
+ L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330, LSM9DS0.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 4dac567e7..7ccc04406 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -17,7 +17,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -31,7 +30,6 @@
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
-#define BMG160_GPIO_NAME "gpio_int"
#define BMG160_REG_CHIP_ID 0x00
#define BMG160_CHIP_ID_VAL 0x0F
@@ -97,7 +95,6 @@
#define BMG160_AUTO_SUSPEND_DELAY_MS 2000
struct bmg160_data {
- struct device *dev;
struct regmap *regmap;
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
@@ -116,6 +113,7 @@ enum bmg160_axis {
AXIS_X,
AXIS_Y,
AXIS_Z,
+ AXIS_MAX,
};
static const struct {
@@ -138,11 +136,12 @@ static const struct {
static int bmg160_set_mode(struct bmg160_data *data, u8 mode)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
ret = regmap_write(data->regmap, BMG160_REG_PMU_LPW, mode);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_pmu_lpw\n");
+ dev_err(dev, "Error writing reg_pmu_lpw\n");
return ret;
}
@@ -163,6 +162,7 @@ static int bmg160_convert_freq_to_bit(int val)
static int bmg160_set_bw(struct bmg160_data *data, int val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
int bw_bits;
@@ -172,7 +172,7 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
ret = regmap_write(data->regmap, BMG160_REG_PMU_BW, bw_bits);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_pmu_bw\n");
+ dev_err(dev, "Error writing reg_pmu_bw\n");
return ret;
}
@@ -183,18 +183,19 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
static int bmg160_chip_init(struct bmg160_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
unsigned int val;
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_chip_id\n");
+ dev_err(dev, "Error reading reg_chip_id\n");
return ret;
}
- dev_dbg(data->dev, "Chip Id %x\n", val);
+ dev_dbg(dev, "Chip Id %x\n", val);
if (val != BMG160_CHIP_ID_VAL) {
- dev_err(data->dev, "invalid chip %x\n", val);
+ dev_err(dev, "invalid chip %x\n", val);
return -ENODEV;
}
@@ -213,14 +214,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
/* Set Default Range */
ret = regmap_write(data->regmap, BMG160_REG_RANGE, BMG160_RANGE_500DPS);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_range\n");
+ dev_err(dev, "Error writing reg_range\n");
return ret;
}
data->dps_range = BMG160_RANGE_500DPS;
ret = regmap_read(data->regmap, BMG160_REG_SLOPE_THRES, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_slope_thres\n");
+ dev_err(dev, "Error reading reg_slope_thres\n");
return ret;
}
data->slope_thres = val;
@@ -229,7 +230,7 @@ static int bmg160_chip_init(struct bmg160_data *data)
ret = regmap_update_bits(data->regmap, BMG160_REG_INT_EN_1,
BMG160_INT1_BIT_OD, 0);
if (ret < 0) {
- dev_err(data->dev, "Error updating bits in reg_int_en_1\n");
+ dev_err(dev, "Error updating bits in reg_int_en_1\n");
return ret;
}
@@ -237,7 +238,7 @@ static int bmg160_chip_init(struct bmg160_data *data)
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
+ dev_err(dev,
"Error writing reg_motion_intr\n");
return ret;
}
@@ -248,20 +249,21 @@ static int bmg160_chip_init(struct bmg160_data *data)
static int bmg160_set_power_state(struct bmg160_data *data, bool on)
{
#ifdef CONFIG_PM
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
if (on)
- ret = pm_runtime_get_sync(data->dev);
+ ret = pm_runtime_get_sync(dev);
else {
- pm_runtime_mark_last_busy(data->dev);
- ret = pm_runtime_put_autosuspend(data->dev);
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
}
if (ret < 0) {
- dev_err(data->dev,
- "Failed: bmg160_set_power_state for %d\n", on);
+ dev_err(dev, "Failed: bmg160_set_power_state for %d\n", on);
+
if (on)
- pm_runtime_put_noidle(data->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -273,6 +275,7 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
bool status)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
/* Enable/Disable INT_MAP0 mapping */
@@ -280,7 +283,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
BMG160_INT_MAP_0_BIT_ANY,
(status ? BMG160_INT_MAP_0_BIT_ANY : 0));
if (ret < 0) {
- dev_err(data->dev, "Error updating bits reg_int_map0\n");
+ dev_err(dev, "Error updating bits reg_int_map0\n");
return ret;
}
@@ -290,8 +293,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
ret = regmap_write(data->regmap, BMG160_REG_SLOPE_THRES,
data->slope_thres);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_slope_thres\n");
+ dev_err(dev, "Error writing reg_slope_thres\n");
return ret;
}
@@ -299,8 +301,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
BMG160_INT_MOTION_X | BMG160_INT_MOTION_Y |
BMG160_INT_MOTION_Z);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_motion_intr\n");
+ dev_err(dev, "Error writing reg_motion_intr\n");
return ret;
}
@@ -315,8 +316,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_rst_latch\n");
+ dev_err(dev, "Error writing reg_rst_latch\n");
return ret;
}
}
@@ -329,7 +329,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
}
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_int_en0\n");
+ dev_err(dev, "Error writing reg_int_en0\n");
return ret;
}
@@ -339,6 +339,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
bool status)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
/* Enable/Disable INT_MAP1 mapping */
@@ -346,7 +347,7 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
BMG160_INT_MAP_1_BIT_NEW_DATA,
(status ? BMG160_INT_MAP_1_BIT_NEW_DATA : 0));
if (ret < 0) {
- dev_err(data->dev, "Error updating bits in reg_int_map1\n");
+ dev_err(dev, "Error updating bits in reg_int_map1\n");
return ret;
}
@@ -355,9 +356,8 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
BMG160_INT_MODE_NON_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_rst_latch\n");
- return ret;
+ dev_err(dev, "Error writing reg_rst_latch\n");
+ return ret;
}
ret = regmap_write(data->regmap, BMG160_REG_INT_EN_0,
@@ -369,16 +369,15 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_rst_latch\n");
- return ret;
+ dev_err(dev, "Error writing reg_rst_latch\n");
+ return ret;
}
ret = regmap_write(data->regmap, BMG160_REG_INT_EN_0, 0);
}
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_int_en0\n");
+ dev_err(dev, "Error writing reg_int_en0\n");
return ret;
}
@@ -401,6 +400,7 @@ static int bmg160_get_bw(struct bmg160_data *data, int *val)
static int bmg160_set_scale(struct bmg160_data *data, int val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret, i;
for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
@@ -408,8 +408,7 @@ static int bmg160_set_scale(struct bmg160_data *data, int val)
ret = regmap_write(data->regmap, BMG160_REG_RANGE,
bmg160_scale_table[i].dps_range);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_range\n");
+ dev_err(dev, "Error writing reg_range\n");
return ret;
}
data->dps_range = bmg160_scale_table[i].dps_range;
@@ -422,6 +421,7 @@ static int bmg160_set_scale(struct bmg160_data *data, int val)
static int bmg160_get_temp(struct bmg160_data *data, int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
unsigned int raw_val;
@@ -434,7 +434,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
ret = regmap_read(data->regmap, BMG160_REG_TEMP, &raw_val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_temp\n");
+ dev_err(dev, "Error reading reg_temp\n");
bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
@@ -451,6 +451,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
__le16 raw_val;
@@ -464,7 +465,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
sizeof(raw_val));
if (ret < 0) {
- dev_err(data->dev, "Error reading axis %d\n", axis);
+ dev_err(dev, "Error reading axis %d\n", axis);
bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
@@ -764,26 +765,23 @@ static const struct iio_info bmg160_info = {
.driver_module = THIS_MODULE,
};
+static const unsigned long bmg160_accel_scan_masks[] = {
+ BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+ 0};
+
static irqreturn_t bmg160_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmg160_data *data = iio_priv(indio_dev);
- int bit, ret, i = 0;
- unsigned int val;
+ int ret;
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(bit),
- &val, 2);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
- goto err;
- }
- data->buffer[i++] = val;
- }
+ ret = regmap_bulk_read(data->regmap, BMG160_REG_XOUT_L,
+ data->buffer, AXIS_MAX * 2);
mutex_unlock(&data->mutex);
+ if (ret < 0)
+ goto err;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
pf->timestamp);
@@ -797,6 +795,7 @@ static int bmg160_trig_try_reen(struct iio_trigger *trig)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct bmg160_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
/* new data interrupts don't need ack */
@@ -808,7 +807,7 @@ static int bmg160_trig_try_reen(struct iio_trigger *trig)
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_rst_latch\n");
+ dev_err(dev, "Error writing reg_rst_latch\n");
return ret;
}
@@ -868,13 +867,14 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct bmg160_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
int dir;
unsigned int val;
ret = regmap_read(data->regmap, BMG160_REG_INT_STATUS_2, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_int_status2\n");
+ dev_err(dev, "Error reading reg_int_status2\n");
goto ack_intr_status;
}
@@ -911,8 +911,7 @@ ack_intr_status:
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0)
- dev_err(data->dev,
- "Error writing reg_rst_latch\n");
+ dev_err(dev, "Error writing reg_rst_latch\n");
}
return IRQ_HANDLED;
@@ -956,29 +955,6 @@ static const struct iio_buffer_setup_ops bmg160_buffer_setup_ops = {
.postdisable = bmg160_buffer_postdisable,
};
-static int bmg160_gpio_probe(struct bmg160_data *data)
-
-{
- struct device *dev;
- struct gpio_desc *gpio;
-
- dev = data->dev;
-
- /* data ready gpio interrupt pin */
- gpio = devm_gpiod_get_index(dev, BMG160_GPIO_NAME, 0, GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(dev, "acpi gpio get index failed\n");
- return PTR_ERR(gpio);
- }
-
- data->irq = gpiod_to_irq(gpio);
-
- dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio),
- data->irq);
-
- return 0;
-}
-
static const char *bmg160_match_acpi_device(struct device *dev)
{
const struct acpi_device_id *id;
@@ -1003,7 +979,6 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
data = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
- data->dev = dev;
data->irq = irq;
data->regmap = regmap;
@@ -1020,12 +995,10 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
indio_dev->channels = bmg160_channels;
indio_dev->num_channels = ARRAY_SIZE(bmg160_channels);
indio_dev->name = name;
+ indio_dev->available_scan_masks = bmg160_accel_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmg160_info;
- if (data->irq <= 0)
- bmg160_gpio_probe(data);
-
if (data->irq > 0) {
ret = devm_request_threaded_irq(dev,
data->irq,
@@ -1168,7 +1141,7 @@ static int bmg160_runtime_suspend(struct device *dev)
ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
if (ret < 0) {
- dev_err(data->dev, "set mode failed\n");
+ dev_err(dev, "set mode failed\n");
return -EAGAIN;
}
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index 5353d6328..a5c5c4e29 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -21,6 +21,7 @@
#define L3GD20_GYRO_DEV_NAME "l3gd20"
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
+#define LSM9DS0_GYRO_DEV_NAME "lsm9ds0_gyro"
/**
* struct st_sensors_platform_data - gyro platform data
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c
index d67b17b6a..a5377044e 100644
--- a/drivers/iio/gyro/st_gyro_buffer.c
+++ b/drivers/iio/gyro/st_gyro_buffer.c
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
int st_gyro_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
}
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 110f95b6e..a8012955a 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -190,6 +190,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
.bootime = 2,
@@ -203,6 +204,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
[2] = LSM330DLC_GYRO_DEV_NAME,
[3] = L3G4IS_GYRO_DEV_NAME,
[4] = LSM330_GYRO_DEV_NAME,
+ [5] = LSM9DS0_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
@@ -258,6 +260,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
.bootime = 2,
@@ -322,6 +325,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
.bootime = 2,
@@ -405,6 +409,7 @@ static const struct iio_info gyro_info = {
static const struct iio_trigger_ops st_gyro_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
#else
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 6848451f8..40056b821 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -48,6 +48,10 @@ static const struct of_device_id st_gyro_of_match[] = {
.compatible = "st,lsm330-gyro",
.data = LSM330_GYRO_DEV_NAME,
},
+ {
+ .compatible = "st,lsm9ds0-gyro",
+ .data = LSM9DS0_GYRO_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_gyro_of_match);
@@ -93,6 +97,7 @@ static const struct i2c_device_id st_gyro_id_table[] = {
{ L3GD20_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
+ { LSM9DS0_GYRO_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index d2b7a5fa3..fbf2faed5 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -54,6 +54,7 @@ static const struct spi_device_id st_gyro_id_table[] = {
{ L3GD20_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
+ { LSM9DS0_GYRO_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 866dda133..738a86d9e 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -3,6 +3,16 @@
#
menu "Humidity sensors"
+config AM2315
+ tristate "Aosong AM2315 relative humidity and temperature sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the Aosong AM2315
+ relative humidity and ambient temperature sensor.
+
+ This driver can also be built as a module. If so, the module will
+ be called am2315.
+
config DHT11
tristate "DHT11 (and compatible sensors) driver"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index c9f089a9a..4a73442fc 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -2,6 +2,7 @@
# Makefile for IIO humidity sensor drivers
#
+obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
obj-$(CONFIG_HTU21) += htu21.o
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
new file mode 100644
index 000000000..11535911a
--- /dev/null
+++ b/drivers/iio/humidity/am2315.c
@@ -0,0 +1,301 @@
+/**
+ * Aosong AM2315 relative humidity and temperature
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C address: 0x5C.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define AM2315_REG_HUM_MSB 0x00
+#define AM2315_REG_HUM_LSB 0x01
+#define AM2315_REG_TEMP_MSB 0x02
+#define AM2315_REG_TEMP_LSB 0x03
+
+#define AM2315_FUNCTION_READ 0x03
+#define AM2315_HUM_OFFSET 2
+#define AM2315_TEMP_OFFSET 4
+#define AM2315_ALL_CHANNEL_MASK GENMASK(1, 0)
+
+#define AM2315_DRIVER_NAME "am2315"
+
+struct am2315_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ s16 buffer[8]; /* 2x16-bit channels + 2x16 padding + 4x16 timestamp */
+};
+
+struct am2315_sensor_data {
+ s16 hum_data;
+ s16 temp_data;
+};
+
+static const struct iio_chan_spec am2315_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+/* CRC calculation algorithm, as specified in the datasheet (page 13). */
+static u16 am2315_crc(u8 *data, u8 nr_bytes)
+{
+ int i;
+ u16 crc = 0xffff;
+
+ while (nr_bytes--) {
+ crc ^= *data++;
+ for (i = 0; i < 8; i++) {
+ if (crc & 0x01) {
+ crc >>= 1;
+ crc ^= 0xA001;
+ } else {
+ crc >>= 1;
+ }
+ }
+ }
+
+ return crc;
+}
+
+/* Simple function that sends a few bytes to the device to wake it up. */
+static void am2315_ping(struct i2c_client *client)
+{
+ i2c_smbus_read_byte_data(client, AM2315_REG_HUM_MSB);
+}
+
+static int am2315_read_data(struct am2315_data *data,
+ struct am2315_sensor_data *sensor_data)
+{
+ int ret;
+ /* tx_buf format: <function code> <start addr> <nr of regs to read> */
+ u8 tx_buf[3] = { AM2315_FUNCTION_READ, AM2315_REG_HUM_MSB, 4 };
+ /*
+ * rx_buf format:
+ * <function code> <number of registers read>
+ * <humidity MSB> <humidity LSB> <temp MSB> <temp LSB>
+ * <CRC LSB> <CRC MSB>
+ */
+ u8 rx_buf[8];
+ u16 crc;
+
+ /* First wake up the device. */
+ am2315_ping(data->client);
+
+ mutex_lock(&data->lock);
+ ret = i2c_master_send(data->client, tx_buf, sizeof(tx_buf));
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to send read request\n");
+ goto exit_unlock;
+ }
+ /* Wait 2-3 ms, then read back the data sent by the device. */
+ usleep_range(2000, 3000);
+ /* Do a bulk data read, then pick out what we need. */
+ ret = i2c_master_recv(data->client, rx_buf, sizeof(rx_buf));
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to read sensor data\n");
+ goto exit_unlock;
+ }
+ mutex_unlock(&data->lock);
+ /*
+ * Do a CRC check on the data and compare it to the value
+ * calculated by the device.
+ */
+ crc = am2315_crc(rx_buf, sizeof(rx_buf) - 2);
+ if ((crc & 0xff) != rx_buf[6] || (crc >> 8) != rx_buf[7]) {
+ dev_err(&data->client->dev, "failed to verify sensor data\n");
+ return -EIO;
+ }
+
+ sensor_data->hum_data = (rx_buf[AM2315_HUM_OFFSET] << 8) |
+ rx_buf[AM2315_HUM_OFFSET + 1];
+ sensor_data->temp_data = (rx_buf[AM2315_TEMP_OFFSET] << 8) |
+ rx_buf[AM2315_TEMP_OFFSET + 1];
+
+ return ret;
+
+exit_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static irqreturn_t am2315_trigger_handler(int irq, void *p)
+{
+ int i;
+ int ret;
+ int bit;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct am2315_data *data = iio_priv(indio_dev);
+ struct am2315_sensor_data sensor_data;
+
+ ret = am2315_read_data(data, &sensor_data);
+ if (ret < 0)
+ goto err;
+
+ mutex_lock(&data->lock);
+ if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
+ data->buffer[0] = sensor_data.hum_data;
+ data->buffer[1] = sensor_data.temp_data;
+ } else {
+ i = 0;
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ data->buffer[i] = (bit ? sensor_data.temp_data :
+ sensor_data.hum_data);
+ i++;
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int am2315_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct am2315_sensor_data sensor_data;
+ struct am2315_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = am2315_read_data(data, &sensor_data);
+ if (ret < 0)
+ return ret;
+ *val = (chan->type == IIO_HUMIDITYRELATIVE) ?
+ sensor_data.hum_data : sensor_data.temp_data;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 100;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info am2315_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = am2315_read_raw,
+};
+
+static int am2315_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct am2315_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &am2315_info;
+ indio_dev->name = AM2315_DRIVER_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = am2315_channels;
+ indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ am2315_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto err_buffer_cleanup;
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int am2315_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id am2315_i2c_id[] = {
+ {"am2315", 0},
+ {}
+};
+
+static const struct acpi_device_id am2315_acpi_id[] = {
+ {"AOS2315", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, am2315_acpi_id);
+
+static struct i2c_driver am2315_driver = {
+ .driver = {
+ .name = "am2315",
+ .acpi_match_table = ACPI_PTR(am2315_acpi_id),
+ },
+ .probe = am2315_probe,
+ .remove = am2315_remove,
+ .id_table = am2315_i2c_id,
+};
+
+module_i2c_driver(am2315_driver);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("Aosong AM2315 relative humidity and temperature");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 20b500da9..9c47bc98f 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -96,6 +96,24 @@ struct dht11 {
struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ];
};
+#ifdef CONFIG_DYNAMIC_DEBUG
+/*
+ * dht11_edges_print: show the data as actually received by the
+ * driver.
+ */
+static void dht11_edges_print(struct dht11 *dht11)
+{
+ int i;
+
+ dev_dbg(dht11->dev, "%d edges detected:\n", dht11->num_edges);
+ for (i = 1; i < dht11->num_edges; ++i) {
+ dev_dbg(dht11->dev, "%d: %lld ns %s\n", i,
+ dht11->edges[i].ts - dht11->edges[i - 1].ts,
+ dht11->edges[i - 1].value ? "high" : "low");
+ }
+}
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
static unsigned char dht11_decode_byte(char *bits)
{
unsigned char ret = 0;
@@ -119,8 +137,12 @@ static int dht11_decode(struct dht11 *dht11, int offset)
for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
t = dht11->edges[offset + 2 * i + 2].ts -
dht11->edges[offset + 2 * i + 1].ts;
- if (!dht11->edges[offset + 2 * i + 1].value)
- return -EIO; /* lost synchronisation */
+ if (!dht11->edges[offset + 2 * i + 1].value) {
+ dev_dbg(dht11->dev,
+ "lost synchronisation at edge %d\n",
+ offset + 2 * i + 1);
+ return -EIO;
+ }
bits[i] = t > DHT11_THRESHOLD;
}
@@ -130,8 +152,10 @@ static int dht11_decode(struct dht11 *dht11, int offset)
temp_dec = dht11_decode_byte(&bits[24]);
checksum = dht11_decode_byte(&bits[32]);
- if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
+ if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum) {
+ dev_dbg(dht11->dev, "invalid checksum\n");
return -EIO;
+ }
dht11->timestamp = ktime_get_boot_ns();
if (hum_int < 20) { /* DHT22 */
@@ -182,6 +206,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
mutex_lock(&dht11->lock);
if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
timeres = ktime_get_resolution_ns();
+ dev_dbg(dht11->dev, "current timeresolution: %dns\n", timeres);
if (timeres > DHT11_MIN_TIMERES) {
dev_err(dht11->dev, "timeresolution %dns too low\n",
timeres);
@@ -219,10 +244,13 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
free_irq(dht11->irq, iio_dev);
+#ifdef CONFIG_DYNAMIC_DEBUG
+ dht11_edges_print(dht11);
+#endif
+
if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
- dev_err(&iio_dev->dev,
- "Only %d signal edges detected\n",
- dht11->num_edges);
+ dev_err(dht11->dev, "Only %d signal edges detected\n",
+ dht11->num_edges);
ret = -ETIMEDOUT;
}
if (ret < 0)
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 5e610f7de..1f1ad41ef 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -25,6 +25,8 @@ config ADIS16480
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
+source "drivers/iio/imu/bmi160/Kconfig"
+
config KMX61
tristate "Kionix KMX61 6-axis accelerometer and magnetometer"
depends on I2C
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index e1e6e3d70..c71bcd30d 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -13,6 +13,7 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
+obj-y += bmi160/
obj-y += inv_mpu6050/
obj-$(CONFIG_KMX61) += kmx61.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 911255d41..ad6f91d06 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -324,7 +324,12 @@ static int adis_self_test(struct adis *adis)
msleep(adis->data->startup_delay);
- return adis_check_status(adis);
+ ret = adis_check_status(adis);
+
+ if (adis->data->self_test_no_autoclear)
+ adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
+
+ return ret;
}
/**
diff --git a/drivers/iio/imu/bmi160/Kconfig b/drivers/iio/imu/bmi160/Kconfig
new file mode 100644
index 000000000..005c17ccc
--- /dev/null
+++ b/drivers/iio/imu/bmi160/Kconfig
@@ -0,0 +1,32 @@
+#
+# BMI160 IMU driver
+#
+
+config BMI160
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config BMI160_I2C
+ tristate "Bosch BMI160 I2C driver"
+ depends on I2C
+ select BMI160
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for BMI160 IMU on I2C with
+ accelerometer, gyroscope and external BMG160 magnetometer.
+
+ This driver can also be built as a module. If so, the module will be
+ called bmi160_i2c.
+
+config BMI160_SPI
+ tristate "Bosch BMI160 SPI driver"
+ depends on SPI
+ select BMI160
+ select REGMAP_SPI
+ help
+ If you say yes here you get support for BMI160 IMU on SPI with
+ accelerometer, gyroscope and external BMG160 magnetometer.
+
+ This driver can also be built as a module. If so, the module will be
+ called bmi160_spi.
diff --git a/drivers/iio/imu/bmi160/Makefile b/drivers/iio/imu/bmi160/Makefile
new file mode 100644
index 000000000..10365e493
--- /dev/null
+++ b/drivers/iio/imu/bmi160/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Bosch BMI160 IMU
+#
+obj-$(CONFIG_BMI160) += bmi160_core.o
+obj-$(CONFIG_BMI160_I2C) += bmi160_i2c.o
+obj-$(CONFIG_BMI160_SPI) += bmi160_spi.o
diff --git a/drivers/iio/imu/bmi160/bmi160.h b/drivers/iio/imu/bmi160/bmi160.h
new file mode 100644
index 000000000..d2ae6ed70
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160.h
@@ -0,0 +1,10 @@
+#ifndef BMI160_H_
+#define BMI160_H_
+
+extern const struct regmap_config bmi160_regmap_config;
+
+int bmi160_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi);
+void bmi160_core_remove(struct device *dev);
+
+#endif /* BMI160_H_ */
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
new file mode 100644
index 000000000..b8a290ec9
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -0,0 +1,596 @@
+/*
+ * BMI160 - Bosch IMU (accel, gyro plus external magnetometer)
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO core driver for BMI160, with support for I2C/SPI busses
+ *
+ * TODO: magnetometer, interrupts, hardware FIFO
+ */
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+
+#include "bmi160.h"
+
+#define BMI160_REG_CHIP_ID 0x00
+#define BMI160_CHIP_ID_VAL 0xD1
+
+#define BMI160_REG_PMU_STATUS 0x03
+
+/* X axis data low byte address, the rest can be obtained using axis offset */
+#define BMI160_REG_DATA_MAGN_XOUT_L 0x04
+#define BMI160_REG_DATA_GYRO_XOUT_L 0x0C
+#define BMI160_REG_DATA_ACCEL_XOUT_L 0x12
+
+#define BMI160_REG_ACCEL_CONFIG 0x40
+#define BMI160_ACCEL_CONFIG_ODR_MASK GENMASK(3, 0)
+#define BMI160_ACCEL_CONFIG_BWP_MASK GENMASK(6, 4)
+
+#define BMI160_REG_ACCEL_RANGE 0x41
+#define BMI160_ACCEL_RANGE_2G 0x03
+#define BMI160_ACCEL_RANGE_4G 0x05
+#define BMI160_ACCEL_RANGE_8G 0x08
+#define BMI160_ACCEL_RANGE_16G 0x0C
+
+#define BMI160_REG_GYRO_CONFIG 0x42
+#define BMI160_GYRO_CONFIG_ODR_MASK GENMASK(3, 0)
+#define BMI160_GYRO_CONFIG_BWP_MASK GENMASK(5, 4)
+
+#define BMI160_REG_GYRO_RANGE 0x43
+#define BMI160_GYRO_RANGE_2000DPS 0x00
+#define BMI160_GYRO_RANGE_1000DPS 0x01
+#define BMI160_GYRO_RANGE_500DPS 0x02
+#define BMI160_GYRO_RANGE_250DPS 0x03
+#define BMI160_GYRO_RANGE_125DPS 0x04
+
+#define BMI160_REG_CMD 0x7E
+#define BMI160_CMD_ACCEL_PM_SUSPEND 0x10
+#define BMI160_CMD_ACCEL_PM_NORMAL 0x11
+#define BMI160_CMD_ACCEL_PM_LOW_POWER 0x12
+#define BMI160_CMD_GYRO_PM_SUSPEND 0x14
+#define BMI160_CMD_GYRO_PM_NORMAL 0x15
+#define BMI160_CMD_GYRO_PM_FAST_STARTUP 0x17
+#define BMI160_CMD_SOFTRESET 0xB6
+
+#define BMI160_REG_DUMMY 0x7F
+
+#define BMI160_ACCEL_PMU_MIN_USLEEP 3200
+#define BMI160_ACCEL_PMU_MAX_USLEEP 3800
+#define BMI160_GYRO_PMU_MIN_USLEEP 55000
+#define BMI160_GYRO_PMU_MAX_USLEEP 80000
+#define BMI160_SOFTRESET_USLEEP 1000
+
+#define BMI160_CHANNEL(_type, _axis, _index) { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+/* scan indexes follow DATA register order */
+enum bmi160_scan_axis {
+ BMI160_SCAN_EXT_MAGN_X = 0,
+ BMI160_SCAN_EXT_MAGN_Y,
+ BMI160_SCAN_EXT_MAGN_Z,
+ BMI160_SCAN_RHALL,
+ BMI160_SCAN_GYRO_X,
+ BMI160_SCAN_GYRO_Y,
+ BMI160_SCAN_GYRO_Z,
+ BMI160_SCAN_ACCEL_X,
+ BMI160_SCAN_ACCEL_Y,
+ BMI160_SCAN_ACCEL_Z,
+ BMI160_SCAN_TIMESTAMP,
+};
+
+enum bmi160_sensor_type {
+ BMI160_ACCEL = 0,
+ BMI160_GYRO,
+ BMI160_EXT_MAGN,
+ BMI160_NUM_SENSORS /* must be last */
+};
+
+struct bmi160_data {
+ struct regmap *regmap;
+};
+
+const struct regmap_config bmi160_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+EXPORT_SYMBOL(bmi160_regmap_config);
+
+struct bmi160_regs {
+ u8 data; /* LSB byte register for X-axis */
+ u8 config;
+ u8 config_odr_mask;
+ u8 config_bwp_mask;
+ u8 range;
+ u8 pmu_cmd_normal;
+ u8 pmu_cmd_suspend;
+};
+
+static struct bmi160_regs bmi160_regs[] = {
+ [BMI160_ACCEL] = {
+ .data = BMI160_REG_DATA_ACCEL_XOUT_L,
+ .config = BMI160_REG_ACCEL_CONFIG,
+ .config_odr_mask = BMI160_ACCEL_CONFIG_ODR_MASK,
+ .config_bwp_mask = BMI160_ACCEL_CONFIG_BWP_MASK,
+ .range = BMI160_REG_ACCEL_RANGE,
+ .pmu_cmd_normal = BMI160_CMD_ACCEL_PM_NORMAL,
+ .pmu_cmd_suspend = BMI160_CMD_ACCEL_PM_SUSPEND,
+ },
+ [BMI160_GYRO] = {
+ .data = BMI160_REG_DATA_GYRO_XOUT_L,
+ .config = BMI160_REG_GYRO_CONFIG,
+ .config_odr_mask = BMI160_GYRO_CONFIG_ODR_MASK,
+ .config_bwp_mask = BMI160_GYRO_CONFIG_BWP_MASK,
+ .range = BMI160_REG_GYRO_RANGE,
+ .pmu_cmd_normal = BMI160_CMD_GYRO_PM_NORMAL,
+ .pmu_cmd_suspend = BMI160_CMD_GYRO_PM_SUSPEND,
+ },
+};
+
+struct bmi160_pmu_time {
+ unsigned long min;
+ unsigned long max;
+};
+
+static struct bmi160_pmu_time bmi160_pmu_time[] = {
+ [BMI160_ACCEL] = {
+ .min = BMI160_ACCEL_PMU_MIN_USLEEP,
+ .max = BMI160_ACCEL_PMU_MAX_USLEEP
+ },
+ [BMI160_GYRO] = {
+ .min = BMI160_GYRO_PMU_MIN_USLEEP,
+ .max = BMI160_GYRO_PMU_MIN_USLEEP,
+ },
+};
+
+struct bmi160_scale {
+ u8 bits;
+ int uscale;
+};
+
+struct bmi160_odr {
+ u8 bits;
+ int odr;
+ int uodr;
+};
+
+static const struct bmi160_scale bmi160_accel_scale[] = {
+ { BMI160_ACCEL_RANGE_2G, 598},
+ { BMI160_ACCEL_RANGE_4G, 1197},
+ { BMI160_ACCEL_RANGE_8G, 2394},
+ { BMI160_ACCEL_RANGE_16G, 4788},
+};
+
+static const struct bmi160_scale bmi160_gyro_scale[] = {
+ { BMI160_GYRO_RANGE_2000DPS, 1065},
+ { BMI160_GYRO_RANGE_1000DPS, 532},
+ { BMI160_GYRO_RANGE_500DPS, 266},
+ { BMI160_GYRO_RANGE_250DPS, 133},
+ { BMI160_GYRO_RANGE_125DPS, 66},
+};
+
+struct bmi160_scale_item {
+ const struct bmi160_scale *tbl;
+ int num;
+};
+
+static const struct bmi160_scale_item bmi160_scale_table[] = {
+ [BMI160_ACCEL] = {
+ .tbl = bmi160_accel_scale,
+ .num = ARRAY_SIZE(bmi160_accel_scale),
+ },
+ [BMI160_GYRO] = {
+ .tbl = bmi160_gyro_scale,
+ .num = ARRAY_SIZE(bmi160_gyro_scale),
+ },
+};
+
+static const struct bmi160_odr bmi160_accel_odr[] = {
+ {0x01, 0, 781250},
+ {0x02, 1, 562500},
+ {0x03, 3, 125000},
+ {0x04, 6, 250000},
+ {0x05, 12, 500000},
+ {0x06, 25, 0},
+ {0x07, 50, 0},
+ {0x08, 100, 0},
+ {0x09, 200, 0},
+ {0x0A, 400, 0},
+ {0x0B, 800, 0},
+ {0x0C, 1600, 0},
+};
+
+static const struct bmi160_odr bmi160_gyro_odr[] = {
+ {0x06, 25, 0},
+ {0x07, 50, 0},
+ {0x08, 100, 0},
+ {0x09, 200, 0},
+ {0x0A, 400, 0},
+ {0x0B, 800, 0},
+ {0x0C, 1600, 0},
+ {0x0D, 3200, 0},
+};
+
+struct bmi160_odr_item {
+ const struct bmi160_odr *tbl;
+ int num;
+};
+
+static const struct bmi160_odr_item bmi160_odr_table[] = {
+ [BMI160_ACCEL] = {
+ .tbl = bmi160_accel_odr,
+ .num = ARRAY_SIZE(bmi160_accel_odr),
+ },
+ [BMI160_GYRO] = {
+ .tbl = bmi160_gyro_odr,
+ .num = ARRAY_SIZE(bmi160_gyro_odr),
+ },
+};
+
+static const struct iio_chan_spec bmi160_channels[] = {
+ BMI160_CHANNEL(IIO_ACCEL, X, BMI160_SCAN_ACCEL_X),
+ BMI160_CHANNEL(IIO_ACCEL, Y, BMI160_SCAN_ACCEL_Y),
+ BMI160_CHANNEL(IIO_ACCEL, Z, BMI160_SCAN_ACCEL_Z),
+ BMI160_CHANNEL(IIO_ANGL_VEL, X, BMI160_SCAN_GYRO_X),
+ BMI160_CHANNEL(IIO_ANGL_VEL, Y, BMI160_SCAN_GYRO_Y),
+ BMI160_CHANNEL(IIO_ANGL_VEL, Z, BMI160_SCAN_GYRO_Z),
+ IIO_CHAN_SOFT_TIMESTAMP(BMI160_SCAN_TIMESTAMP),
+};
+
+static enum bmi160_sensor_type bmi160_to_sensor(enum iio_chan_type iio_type)
+{
+ switch (iio_type) {
+ case IIO_ACCEL:
+ return BMI160_ACCEL;
+ case IIO_ANGL_VEL:
+ return BMI160_GYRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
+ bool mode)
+{
+ int ret;
+ u8 cmd;
+
+ if (mode)
+ cmd = bmi160_regs[t].pmu_cmd_normal;
+ else
+ cmd = bmi160_regs[t].pmu_cmd_suspend;
+
+ ret = regmap_write(data->regmap, BMI160_REG_CMD, cmd);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(bmi160_pmu_time[t].min, bmi160_pmu_time[t].max);
+
+ return 0;
+}
+
+static
+int bmi160_set_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
+ int uscale)
+{
+ int i;
+
+ for (i = 0; i < bmi160_scale_table[t].num; i++)
+ if (bmi160_scale_table[t].tbl[i].uscale == uscale)
+ break;
+
+ if (i == bmi160_scale_table[t].num)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, bmi160_regs[t].range,
+ bmi160_scale_table[t].tbl[i].bits);
+}
+
+static
+int bmi160_get_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
+ int *uscale)
+{
+ int i, ret, val;
+
+ ret = regmap_read(data->regmap, bmi160_regs[t].range, &val);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < bmi160_scale_table[t].num; i++)
+ if (bmi160_scale_table[t].tbl[i].bits == val) {
+ *uscale = bmi160_scale_table[t].tbl[i].uscale;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bmi160_get_data(struct bmi160_data *data, int chan_type,
+ int axis, int *val)
+{
+ u8 reg;
+ int ret;
+ __le16 sample;
+ enum bmi160_sensor_type t = bmi160_to_sensor(chan_type);
+
+ reg = bmi160_regs[t].data + (axis - IIO_MOD_X) * sizeof(__le16);
+
+ ret = regmap_bulk_read(data->regmap, reg, &sample, sizeof(__le16));
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpu(sample), 15);
+
+ return 0;
+}
+
+static
+int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
+ int odr, int uodr)
+{
+ int i;
+
+ for (i = 0; i < bmi160_odr_table[t].num; i++)
+ if (bmi160_odr_table[t].tbl[i].odr == odr &&
+ bmi160_odr_table[t].tbl[i].uodr == uodr)
+ break;
+
+ if (i >= bmi160_odr_table[t].num)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ bmi160_regs[t].config,
+ bmi160_regs[t].config_odr_mask,
+ bmi160_odr_table[t].tbl[i].bits);
+}
+
+static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
+ int *odr, int *uodr)
+{
+ int i, val, ret;
+
+ ret = regmap_read(data->regmap, bmi160_regs[t].config, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= bmi160_regs[t].config_odr_mask;
+
+ for (i = 0; i < bmi160_odr_table[t].num; i++)
+ if (val == bmi160_odr_table[t].tbl[i].bits)
+ break;
+
+ if (i >= bmi160_odr_table[t].num)
+ return -EINVAL;
+
+ *odr = bmi160_odr_table[t].tbl[i].odr;
+ *uodr = bmi160_odr_table[t].tbl[i].uodr;
+
+ return 0;
+}
+
+static irqreturn_t bmi160_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmi160_data *data = iio_priv(indio_dev);
+ s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */
+ int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
+ __le16 sample;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = regmap_bulk_read(data->regmap, base + i * sizeof(__le16),
+ &sample, sizeof(__le16));
+ if (ret < 0)
+ goto done;
+ buf[j++] = sample;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int bmi160_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct bmi160_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = bmi160_get_data(data, chan->type, chan->channel2, val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ ret = bmi160_get_scale(data,
+ bmi160_to_sensor(chan->type), val2);
+ return ret < 0 ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = bmi160_get_odr(data, bmi160_to_sensor(chan->type),
+ val, val2);
+ return ret < 0 ? ret : IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bmi160_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bmi160_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return bmi160_set_scale(data,
+ bmi160_to_sensor(chan->type), val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return bmi160_set_odr(data, bmi160_to_sensor(chan->type),
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct iio_info bmi160_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = bmi160_read_raw,
+ .write_raw = bmi160_write_raw,
+};
+
+static const char *bmi160_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ return dev_name(dev);
+}
+
+static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
+{
+ int ret;
+ unsigned int val;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1);
+
+ /*
+ * CS rising edge is needed before starting SPI, so do a dummy read
+ * See Section 3.2.1, page 86 of the datasheet
+ */
+ if (use_spi) {
+ ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(dev, "Error reading chip id\n");
+ return ret;
+ }
+ if (val != BMI160_CHIP_ID_VAL) {
+ dev_err(dev, "Wrong chip id, got %x expected %x\n",
+ val, BMI160_CHIP_ID_VAL);
+ return -ENODEV;
+ }
+
+ ret = bmi160_set_mode(data, BMI160_ACCEL, true);
+ if (ret < 0)
+ return ret;
+
+ ret = bmi160_set_mode(data, BMI160_GYRO, true);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void bmi160_chip_uninit(struct bmi160_data *data)
+{
+ bmi160_set_mode(data, BMI160_GYRO, false);
+ bmi160_set_mode(data, BMI160_ACCEL, false);
+}
+
+int bmi160_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi)
+{
+ struct iio_dev *indio_dev;
+ struct bmi160_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+
+ ret = bmi160_chip_init(data, use_spi);
+ if (ret < 0)
+ return ret;
+
+ if (!name && ACPI_HANDLE(dev))
+ name = bmi160_match_acpi_device(dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = bmi160_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bmi160_channels);
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &bmi160_info;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ bmi160_trigger_handler, NULL);
+ if (ret < 0)
+ goto uninit;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ return 0;
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+uninit:
+ bmi160_chip_uninit(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bmi160_core_probe);
+
+void bmi160_core_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmi160_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ bmi160_chip_uninit(data);
+}
+EXPORT_SYMBOL_GPL(bmi160_core_remove);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
+MODULE_DESCRIPTION("Bosch BMI160 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
new file mode 100644
index 000000000..07a179d8f
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -0,0 +1,72 @@
+/*
+ * BMI160 - Bosch IMU, I2C bits
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address is:
+ * - 0x68 if SDO is pulled to GND
+ * - 0x69 if SDO is pulled to VDDIO
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+
+#include "bmi160.h"
+
+static int bmi160_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+
+ regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ if (id)
+ name = id->name;
+
+ return bmi160_core_probe(&client->dev, regmap, name, false);
+}
+
+static int bmi160_i2c_remove(struct i2c_client *client)
+{
+ bmi160_core_remove(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id bmi160_i2c_id[] = {
+ {"bmi160", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bmi160_i2c_id);
+
+static const struct acpi_device_id bmi160_acpi_match[] = {
+ {"BMI0160", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
+
+static struct i2c_driver bmi160_i2c_driver = {
+ .driver = {
+ .name = "bmi160_i2c",
+ .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
+ },
+ .probe = bmi160_i2c_probe,
+ .remove = bmi160_i2c_remove,
+ .id_table = bmi160_i2c_id,
+};
+module_i2c_driver(bmi160_i2c_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("BMI160 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
new file mode 100644
index 000000000..1ec8b12bd
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -0,0 +1,63 @@
+/*
+ * BMI160 - Bosch IMU, SPI bits
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+
+#include "bmi160.h"
+
+static int bmi160_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+ return bmi160_core_probe(&spi->dev, regmap, id->name, true);
+}
+
+static int bmi160_spi_remove(struct spi_device *spi)
+{
+ bmi160_core_remove(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id bmi160_spi_id[] = {
+ {"bmi160", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, bmi160_spi_id);
+
+static const struct acpi_device_id bmi160_acpi_match[] = {
+ {"BMI0160", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
+
+static struct spi_driver bmi160_spi_driver = {
+ .probe = bmi160_spi_probe,
+ .remove = bmi160_spi_remove,
+ .id_table = bmi160_spi_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
+ .name = "bmi160_spi",
+ },
+};
+module_spi_driver(bmi160_spi_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
+MODULE_DESCRIPTION("Bosch BMI160 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 847455a2d..f756feecf 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -13,10 +13,8 @@ config INV_MPU6050_I2C
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6050 devices.
- This driver can also support MPU6500 in MPU6050 compatibility mode
- and also in MPU6500 mode with some limitations.
- It is a gyroscope/accelerometer combo device.
+ This driver supports the Invensense MPU6050/6500/9150 motion tracking
+ devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -26,7 +24,7 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6050 devices.
- It is a gyroscope/accelerometer combo device.
+ This driver supports the Invensense MPU6000/6500/9150 motion tracking
+ devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index 2771106fd..dd6fc6d21 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -56,6 +56,7 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
int i;
acpi_status status;
union acpi_object *cpm;
+ int ret;
status = acpi_evaluate_object(adev->handle, "CNF0", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -82,10 +83,10 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
}
}
}
-
+ ret = cpm->package.count;
kfree(buffer.pointer);
- return cpm->package.count;
+ return ret;
}
static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)
@@ -183,7 +184,7 @@ int inv_mpu_acpi_create_mux_client(struct i2c_client *client)
} else
return 0; /* no secondary addr, which is OK */
}
- st->mux_client = i2c_new_device(st->mux_adapter, &info);
+ st->mux_client = i2c_new_device(st->muxc->adapter[0], &info);
if (!st->mux_client)
return -ENODEV;
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index d192953e9..ee40dae5a 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -23,7 +23,6 @@
#include <linux/kfifo.h>
#include <linux/spinlock.h>
#include <linux/iio/iio.h>
-#include <linux/i2c-mux.h>
#include <linux/acpi.h>
#include "inv_mpu_iio.h"
@@ -88,16 +87,29 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.accl_fs = INV_MPU6050_FS_02G,
};
+/* Indexed by enum inv_devices */
static const struct inv_mpu6050_hw hw_info[] = {
{
- .num_reg = 117,
+ .whoami = INV_MPU6050_WHOAMI_VALUE,
+ .name = "MPU6050",
+ .reg = &reg_set_6050,
+ .config = &chip_config_6050,
+ },
+ {
+ .whoami = INV_MPU6500_WHOAMI_VALUE,
.name = "MPU6500",
.reg = &reg_set_6500,
.config = &chip_config_6050,
},
{
- .num_reg = 117,
- .name = "MPU6050",
+ .whoami = INV_MPU6000_WHOAMI_VALUE,
+ .name = "MPU6000",
+ .reg = &reg_set_6050,
+ .config = &chip_config_6050,
+ },
+ {
+ .whoami = INV_MPU9150_WHOAMI_VALUE,
+ .name = "MPU9150",
.reg = &reg_set_6050,
.config = &chip_config_6050,
},
@@ -600,6 +612,10 @@ inv_fifo_rate_show(struct device *dev, struct device_attribute *attr,
/**
* inv_attr_show() - calling this function will show current
* parameters.
+ *
+ * Deprecated in favor of IIO mounting matrix API.
+ *
+ * See inv_get_mount_matrix()
*/
static ssize_t inv_attr_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -644,6 +660,18 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
return 0;
}
+static const struct iio_mount_matrix *
+inv_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ return &((struct inv_mpu6050_state *)iio_priv(indio_dev))->orientation;
+}
+
+static const struct iio_chan_spec_ext_info inv_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, inv_get_mount_matrix),
+ { },
+};
+
#define INV_MPU6050_CHAN(_type, _channel2, _index) \
{ \
.type = _type, \
@@ -660,6 +688,7 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
.shift = 0, \
.endianness = IIO_BE, \
}, \
+ .ext_info = inv_ext_info, \
}
static const struct iio_chan_spec inv_mpu_channels[] = {
@@ -692,14 +721,16 @@ static IIO_CONST_ATTR(in_accel_scale_available,
"0.000598 0.001196 0.002392 0.004785");
static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show,
inv_mpu6050_fifo_rate_store);
+
+/* Deprecated: kept for userspace backward compatibility. */
static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL,
ATTR_GYRO_MATRIX);
static IIO_DEVICE_ATTR(in_accel_matrix, S_IRUGO, inv_attr_show, NULL,
ATTR_ACCL_MATRIX);
static struct attribute *inv_attributes[] = {
- &iio_dev_attr_in_gyro_matrix.dev_attr.attr,
- &iio_dev_attr_in_accel_matrix.dev_attr.attr,
+ &iio_dev_attr_in_gyro_matrix.dev_attr.attr, /* deprecated */
+ &iio_dev_attr_in_accel_matrix.dev_attr.attr, /* deprecated */
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_const_attr_in_accel_scale_available.dev_attr.attr,
@@ -726,6 +757,7 @@ static const struct iio_info mpu_info = {
static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
{
int result;
+ unsigned int regval;
st->hw = &hw_info[st->chip_type];
st->reg = hw_info[st->chip_type].reg;
@@ -736,6 +768,17 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
if (result)
return result;
msleep(INV_MPU6050_POWER_UP_TIME);
+
+ /* check chip self-identification */
+ result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, &regval);
+ if (result)
+ return result;
+ if (regval != st->hw->whoami) {
+ dev_warn(regmap_get_device(st->map),
+ "whoami mismatch got %#02x expected %#02hhx for %s\n",
+ regval, st->hw->whoami, st->hw->name);
+ }
+
/*
* toggle power state. After reset, the sleep bit could be on
* or off depending on the OTP settings. Toggling power would
@@ -774,14 +817,31 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
if (!indio_dev)
return -ENOMEM;
+ BUILD_BUG_ON(ARRAY_SIZE(hw_info) != INV_NUM_PARTS);
+ if (chip_type < 0 || chip_type >= INV_NUM_PARTS) {
+ dev_err(dev, "Bad invensense chip_type=%d name=%s\n",
+ chip_type, name);
+ return -ENODEV;
+ }
st = iio_priv(indio_dev);
st->chip_type = chip_type;
st->powerup_count = 0;
st->irq = irq;
st->map = regmap;
+
pdata = dev_get_platdata(dev);
- if (pdata)
+ if (!pdata) {
+ result = of_iio_read_mount_matrix(dev, "mount-matrix",
+ &st->orientation);
+ if (result) {
+ dev_err(dev, "Failed to retrieve mounting matrix %d\n",
+ result);
+ return result;
+ }
+ } else {
st->plat_data = *pdata;
+ }
+
/* power is turned on inside check chip type*/
result = inv_check_and_setup_chip(st);
if (result)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 5ee4e0dc0..e1fd7fa53 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -15,7 +15,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
-#include <linux/i2c-mux.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
#include "inv_mpu_iio.h"
@@ -25,46 +24,16 @@ static const struct regmap_config inv_mpu_regmap_config = {
.val_bits = 8,
};
-/*
- * The i2c read/write needs to happen in unlocked mode. As the parent
- * adapter is common. If we use locked versions, it will fail as
- * the mux adapter will lock the parent i2c adapter, while calling
- * select/deselect functions.
- */
-static int inv_mpu6050_write_reg_unlocked(struct i2c_client *client,
- u8 reg, u8 d)
+static int inv_mpu6050_select_bypass(struct i2c_mux_core *muxc, u32 chan_id)
{
- int ret;
- u8 buf[2] = {reg, d};
- struct i2c_msg msg[1] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = sizeof(buf),
- .buf = buf,
- }
- };
-
- ret = __i2c_transfer(client->adapter, msg, 1);
- if (ret != 1)
- return ret;
-
- return 0;
-}
-
-static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
- u32 chan_id)
-{
- struct i2c_client *client = mux_priv;
- struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct iio_dev *indio_dev = i2c_mux_priv(muxc);
struct inv_mpu6050_state *st = iio_priv(indio_dev);
int ret = 0;
/* Use the same mutex which was used everywhere to protect power-op */
mutex_lock(&indio_dev->mlock);
if (!st->powerup_count) {
- ret = inv_mpu6050_write_reg_unlocked(client,
- st->reg->pwr_mgmt_1, 0);
+ ret = regmap_write(st->map, st->reg->pwr_mgmt_1, 0);
if (ret)
goto write_error;
@@ -73,10 +42,9 @@ static int inv_mpu6050_select_bypass(struct i2c_adapter *adap, void *mux_priv,
}
if (!ret) {
st->powerup_count++;
- ret = inv_mpu6050_write_reg_unlocked(client,
- st->reg->int_pin_cfg,
- INV_MPU6050_INT_PIN_CFG |
- INV_MPU6050_BIT_BYPASS_EN);
+ ret = regmap_write(st->map, st->reg->int_pin_cfg,
+ INV_MPU6050_INT_PIN_CFG |
+ INV_MPU6050_BIT_BYPASS_EN);
}
write_error:
mutex_unlock(&indio_dev->mlock);
@@ -84,21 +52,18 @@ write_error:
return ret;
}
-static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
- void *mux_priv, u32 chan_id)
+static int inv_mpu6050_deselect_bypass(struct i2c_mux_core *muxc, u32 chan_id)
{
- struct i2c_client *client = mux_priv;
- struct iio_dev *indio_dev = dev_get_drvdata(&client->dev);
+ struct iio_dev *indio_dev = i2c_mux_priv(muxc);
struct inv_mpu6050_state *st = iio_priv(indio_dev);
mutex_lock(&indio_dev->mlock);
/* It doesn't really mattter, if any of the calls fails */
- inv_mpu6050_write_reg_unlocked(client, st->reg->int_pin_cfg,
- INV_MPU6050_INT_PIN_CFG);
+ regmap_write(st->map, st->reg->int_pin_cfg, INV_MPU6050_INT_PIN_CFG);
st->powerup_count--;
if (!st->powerup_count)
- inv_mpu6050_write_reg_unlocked(client, st->reg->pwr_mgmt_1,
- INV_MPU6050_BIT_SLEEP);
+ regmap_write(st->map, st->reg->pwr_mgmt_1,
+ INV_MPU6050_BIT_SLEEP);
mutex_unlock(&indio_dev->mlock);
return 0;
@@ -160,16 +125,18 @@ static int inv_mpu_probe(struct i2c_client *client,
return result;
st = iio_priv(dev_get_drvdata(&client->dev));
- st->mux_adapter = i2c_add_mux_adapter(client->adapter,
- &client->dev,
- client,
- 0, 0, 0,
- inv_mpu6050_select_bypass,
- inv_mpu6050_deselect_bypass);
- if (!st->mux_adapter) {
- result = -ENODEV;
+ st->muxc = i2c_mux_alloc(client->adapter, &client->dev,
+ 1, 0, I2C_MUX_LOCKED,
+ inv_mpu6050_select_bypass,
+ inv_mpu6050_deselect_bypass);
+ if (!st->muxc) {
+ result = -ENOMEM;
goto out_unreg_device;
}
+ st->muxc->priv = dev_get_drvdata(&client->dev);
+ result = i2c_mux_add_adapter(st->muxc, 0, 0, 0);
+ if (result)
+ goto out_unreg_device;
result = inv_mpu_acpi_create_mux_client(client);
if (result)
@@ -178,7 +145,7 @@ static int inv_mpu_probe(struct i2c_client *client,
return 0;
out_del_mux:
- i2c_del_mux_adapter(st->mux_adapter);
+ i2c_mux_del_adapters(st->muxc);
out_unreg_device:
inv_mpu_core_remove(&client->dev);
return result;
@@ -190,7 +157,7 @@ static int inv_mpu_remove(struct i2c_client *client)
struct inv_mpu6050_state *st = iio_priv(indio_dev);
inv_mpu_acpi_delete_mux_client(client);
- i2c_del_mux_adapter(st->mux_adapter);
+ i2c_mux_del_adapters(st->muxc);
return inv_mpu_core_remove(&client->dev);
}
@@ -202,13 +169,14 @@ static int inv_mpu_remove(struct i2c_client *client)
static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
+ {"mpu9150", INV_MPU9150},
{}
};
MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
static const struct acpi_device_id inv_acpi_match[] = {
- {"INVN6500", 0},
+ {"INVN6500", INV_MPU6500},
{ },
};
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index e302a4970..3bf8544cc 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -11,6 +11,7 @@
* GNU General Public License for more details.
*/
#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
#include <linux/kfifo.h>
#include <linux/spinlock.h>
#include <linux/iio/iio.h>
@@ -68,6 +69,7 @@ enum inv_devices {
INV_MPU6050,
INV_MPU6500,
INV_MPU6000,
+ INV_MPU9150,
INV_NUM_PARTS
};
@@ -93,13 +95,13 @@ struct inv_mpu6050_chip_config {
/**
* struct inv_mpu6050_hw - Other important hardware information.
- * @num_reg: Number of registers on device.
+ * @whoami: Self identification byte from WHO_AM_I register
* @name: name of the chip.
* @reg: register map of the chip.
* @config: configuration of the chip.
*/
struct inv_mpu6050_hw {
- u8 num_reg;
+ u8 whoami;
u8 *name;
const struct inv_mpu6050_reg_map *reg;
const struct inv_mpu6050_chip_config *config;
@@ -114,7 +116,8 @@ struct inv_mpu6050_hw {
* @hw: Other hardware-specific information.
* @chip_type: chip type.
* @time_stamp_lock: spin lock to time stamp.
- * @plat_data: platform data.
+ * @plat_data: platform data (deprecated in favor of @orientation).
+ * @orientation: sensor chip orientation relative to main hardware.
* @timestamps: kfifo queue to store time stamp.
* @map regmap pointer.
* @irq interrupt number.
@@ -127,10 +130,11 @@ struct inv_mpu6050_state {
const struct inv_mpu6050_hw *hw;
enum inv_devices chip_type;
spinlock_t time_stamp_lock;
- struct i2c_adapter *mux_adapter;
+ struct i2c_mux_core *muxc;
struct i2c_client *mux_client;
unsigned int powerup_count;
struct inv_mpu6050_platform_data plat_data;
+ struct iio_mount_matrix orientation;
DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
struct regmap *map;
int irq;
@@ -215,6 +219,13 @@ struct inv_mpu6050_state {
#define INV_MPU6050_MIN_FIFO_RATE 4
#define INV_MPU6050_ONE_K_HZ 1000
+#define INV_MPU6050_REG_WHOAMI 117
+
+#define INV_MPU6000_WHOAMI_VALUE 0x68
+#define INV_MPU6050_WHOAMI_VALUE 0x68
+#define INV_MPU6500_WHOAMI_VALUE 0x70
+#define INV_MPU9150_WHOAMI_VALUE 0x68
+
/* scan element definition */
enum inv_mpu6050_scan {
INV_MPU6050_SCAN_ACCL_X,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 7bcb8d839..190a4a51c 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -44,9 +44,19 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
static int inv_mpu_probe(struct spi_device *spi)
{
struct regmap *regmap;
- const struct spi_device_id *id = spi_get_device_id(spi);
- const char *name = id ? id->name : NULL;
- const int chip_type = id ? id->driver_data : 0;
+ const struct spi_device_id *spi_id;
+ const struct acpi_device_id *acpi_id;
+ const char *name = NULL;
+ enum inv_devices chip_type;
+
+ if ((spi_id = spi_get_device_id(spi))) {
+ chip_type = (enum inv_devices)spi_id->driver_data;
+ name = spi_id->name;
+ } else if ((acpi_id = acpi_match_device(spi->dev.driver->acpi_match_table, &spi->dev))) {
+ chip_type = (enum inv_devices)acpi_id->driver_data;
+ } else {
+ return -ENODEV;
+ }
regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
@@ -70,13 +80,15 @@ static int inv_mpu_remove(struct spi_device *spi)
*/
static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
+ {"mpu6500", INV_MPU6500},
+ {"mpu9150", INV_MPU9150},
{}
};
MODULE_DEVICE_TABLE(spi, inv_mpu_id);
static const struct acpi_device_id inv_acpi_match[] = {
- {"INVN6000", 0},
+ {"INVN6000", INV_MPU6000},
{ },
};
MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index e5306b4e0..2e7dd5754 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 70cb7eb0a..e6319a934 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/anon_inodes.h>
#include <linux/debugfs.h>
+#include <linux/mutex.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
@@ -78,6 +79,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_CONCENTRATION] = "concentration",
[IIO_RESISTANCE] = "resistance",
[IIO_PH] = "ph",
+ [IIO_UVINDEX] = "uvindex",
};
static const char * const iio_modifier_names[] = {
@@ -100,6 +102,7 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_LIGHT_RED] = "red",
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
+ [IIO_MOD_LIGHT_UV] = "uv",
[IIO_MOD_QUATERNION] = "quaternion",
[IIO_MOD_TEMP_AMBIENT] = "ambient",
[IIO_MOD_TEMP_OBJECT] = "object",
@@ -409,6 +412,88 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(iio_enum_write);
+static const struct iio_mount_matrix iio_mount_idmatrix = {
+ .rotation = {
+ "1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1"
+ }
+};
+
+static int iio_setup_mount_idmatrix(const struct device *dev,
+ struct iio_mount_matrix *matrix)
+{
+ *matrix = iio_mount_idmatrix;
+ dev_info(dev, "mounting matrix not found: using identity...\n");
+ return 0;
+}
+
+ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *)
+ priv)(indio_dev, chan);
+
+ if (IS_ERR(mtx))
+ return PTR_ERR(mtx);
+
+ if (!mtx)
+ mtx = &iio_mount_idmatrix;
+
+ return snprintf(buf, PAGE_SIZE, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
+ mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
+ mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
+ mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
+}
+EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
+
+/**
+ * of_iio_read_mount_matrix() - retrieve iio device mounting matrix from
+ * device-tree "mount-matrix" property
+ * @dev: device the mounting matrix property is assigned to
+ * @propname: device specific mounting matrix property name
+ * @matrix: where to store retrieved matrix
+ *
+ * If device is assigned no mounting matrix property, a default 3x3 identity
+ * matrix will be filled in.
+ *
+ * Return: 0 if success, or a negative error code on failure.
+ */
+#ifdef CONFIG_OF
+int of_iio_read_mount_matrix(const struct device *dev,
+ const char *propname,
+ struct iio_mount_matrix *matrix)
+{
+ if (dev->of_node) {
+ int err = of_property_read_string_array(dev->of_node,
+ propname, matrix->rotation,
+ ARRAY_SIZE(iio_mount_idmatrix.rotation));
+
+ if (err == ARRAY_SIZE(iio_mount_idmatrix.rotation))
+ return 0;
+
+ if (err >= 0)
+ /* Invalid number of matrix entries. */
+ return -EINVAL;
+
+ if (err != -EINVAL)
+ /* Invalid matrix declaration format. */
+ return err;
+ }
+
+ /* Matrix was not declared at all: fallback to identity. */
+ return iio_setup_mount_idmatrix(dev, matrix);
+}
+#else
+int of_iio_read_mount_matrix(const struct device *dev,
+ const char *propname,
+ struct iio_mount_matrix *matrix)
+{
+ return iio_setup_mount_idmatrix(dev, matrix);
+}
+#endif
+EXPORT_SYMBOL(of_iio_read_mount_matrix);
+
/**
* iio_format_value() - Formats a IIO value into its string representation
* @buf: The buffer to which the formatted value gets written
@@ -1375,6 +1460,44 @@ void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
}
EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
+/**
+ * iio_device_claim_direct_mode - Keep device in direct mode
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * If the device is in direct mode it is guaranteed to stay
+ * that way until iio_device_release_direct_mode() is called.
+ *
+ * Use with iio_device_release_direct_mode()
+ *
+ * Returns: 0 on success, -EBUSY on failure
+ */
+int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
+{
+ mutex_lock(&indio_dev->mlock);
+
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
+
+/**
+ * iio_device_release_direct_mode - releases claim on direct mode
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * Release the claim. Device is no longer guaranteed to stay
+ * in direct mode.
+ *
+ * Use with iio_device_claim_direct_mode()
+ */
+void iio_device_release_direct_mode(struct iio_dev *indio_dev)
+{
+ mutex_unlock(&indio_dev->mlock);
+}
+EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
+
subsys_initcall(iio_init);
module_exit(iio_exit);
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 734a0042d..c4757e636 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -356,6 +356,54 @@ void iio_channel_release(struct iio_channel *channel)
}
EXPORT_SYMBOL_GPL(iio_channel_release);
+static void devm_iio_channel_free(struct device *dev, void *res)
+{
+ struct iio_channel *channel = *(struct iio_channel **)res;
+
+ iio_channel_release(channel);
+}
+
+static int devm_iio_channel_match(struct device *dev, void *res, void *data)
+{
+ struct iio_channel **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ return *r == data;
+}
+
+struct iio_channel *devm_iio_channel_get(struct device *dev,
+ const char *channel_name)
+{
+ struct iio_channel **ptr, *channel;
+
+ ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ channel = iio_channel_get(dev, channel_name);
+ if (IS_ERR(channel)) {
+ devres_free(ptr);
+ return channel;
+ }
+
+ *ptr = channel;
+ devres_add(dev, ptr);
+
+ return channel;
+}
+EXPORT_SYMBOL_GPL(devm_iio_channel_get);
+
+void devm_iio_channel_release(struct device *dev, struct iio_channel *channel)
+{
+ WARN_ON(devres_release(dev, devm_iio_channel_free,
+ devm_iio_channel_match, channel));
+}
+EXPORT_SYMBOL_GPL(devm_iio_channel_release);
+
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
@@ -441,6 +489,42 @@ void iio_channel_release_all(struct iio_channel *channels)
}
EXPORT_SYMBOL_GPL(iio_channel_release_all);
+static void devm_iio_channel_free_all(struct device *dev, void *res)
+{
+ struct iio_channel *channels = *(struct iio_channel **)res;
+
+ iio_channel_release_all(channels);
+}
+
+struct iio_channel *devm_iio_channel_get_all(struct device *dev)
+{
+ struct iio_channel **ptr, *channels;
+
+ ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ channels = iio_channel_get_all(dev);
+ if (IS_ERR(channels)) {
+ devres_free(ptr);
+ return channels;
+ }
+
+ *ptr = channels;
+ devres_add(dev, ptr);
+
+ return channels;
+}
+EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
+
+void devm_iio_channel_release_all(struct device *dev,
+ struct iio_channel *channels)
+{
+ WARN_ON(devres_release(dev, devm_iio_channel_free_all,
+ devm_iio_channel_match, channels));
+}
+EXPORT_SYMBOL_GPL(devm_iio_channel_release_all);
+
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
@@ -452,7 +536,7 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
if (val2 == NULL)
val2 = &unused;
- if(!iio_channel_has_info(chan->channel, info))
+ if (!iio_channel_has_info(chan->channel, info))
return -EINVAL;
if (chan->indio_dev->info->read_raw_multi) {
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index cfd3df841..7c566f516 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -73,6 +73,17 @@ config BH1750
To compile this driver as a module, choose M here: the module will
be called bh1750.
+config BH1780
+ tristate "ROHM BH1780 ambient light sensor"
+ depends on I2C
+ depends on !SENSORS_BH1780
+ help
+ Say Y here to build support for the ROHM BH1780GLI ambient
+ light sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called bh1780.
+
config CM32181
depends on I2C
tristate "CM32181 driver"
@@ -223,6 +234,17 @@ config LTR501
This driver can also be built as a module. If so, the module
will be called ltr501.
+config MAX44000
+ tristate "MAX44000 Ambient and Infrared Proximity Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build support for Maxim Integrated's
+ MAX44000 ambient and infrared proximity sensor device.
+
+ To compile this driver as a module, choose M here:
+ the module will be called max44000.
+
config OPT3001
tristate "Texas Instruments OPT3001 Light Sensor"
depends on I2C
@@ -320,4 +342,14 @@ config VCNL4000
To compile this driver as a module, choose M here: the
module will be called vcnl4000.
+config VEML6070
+ tristate "VEML6070 UV A light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6070 UV A
+ light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6070.
+
endmenu
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index b2c31053d..6f2a3c62d 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AL3320A) += al3320a.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9960) += apds9960.o
obj-$(CONFIG_BH1750) += bh1750.o
+obj-$(CONFIG_BH1780) += bh1780.o
obj-$(CONFIG_CM32181) += cm32181.o
obj-$(CONFIG_CM3232) += cm3232.o
obj-$(CONFIG_CM3323) += cm3323.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_ISL29125) += isl29125.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_LTR501) += ltr501.o
+obj-$(CONFIG_MAX44000) += max44000.o
obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
obj-$(CONFIG_RPR0521) += rpr0521.o
@@ -30,3 +32,4 @@ obj-$(CONFIG_TCS3472) += tcs3472.o
obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
+obj-$(CONFIG_VEML6070) += veml6070.o
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index 6443aad80..651d57b8a 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -321,8 +321,12 @@ static const struct iio_chan_spec apds9960_channels[] = {
};
/* integration time in us */
-static const int apds9960_int_time[][2] =
- { {28000, 246}, {100000, 219}, {200000, 182}, {700000, 0} };
+static const int apds9960_int_time[][2] = {
+ { 28000, 246},
+ {100000, 219},
+ {200000, 182},
+ {700000, 0}
+};
/* gain mapping */
static const int apds9960_pxs_gain_map[] = {1, 2, 4, 8};
@@ -491,9 +495,10 @@ static int apds9960_read_raw(struct iio_dev *indio_dev,
case IIO_INTENSITY:
ret = regmap_bulk_read(data->regmap, chan->address,
&buf, 2);
- if (!ret)
+ if (!ret) {
ret = IIO_VAL_INT;
- *val = le16_to_cpu(buf);
+ *val = le16_to_cpu(buf);
+ }
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
new file mode 100644
index 000000000..b54dcba05
--- /dev/null
+++ b/drivers/iio/light/bh1780.c
@@ -0,0 +1,299 @@
+/*
+ * ROHM 1780GLI Ambient Light Sensor Driver
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ * Loosely based on the previous BH1780 ALS misc driver
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ */
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/bitops.h>
+
+#define BH1780_CMD_BIT BIT(7)
+#define BH1780_REG_CONTROL 0x00
+#define BH1780_REG_PARTID 0x0A
+#define BH1780_REG_MANFID 0x0B
+#define BH1780_REG_DLOW 0x0C
+#define BH1780_REG_DHIGH 0x0D
+
+#define BH1780_REVMASK GENMASK(3,0)
+#define BH1780_POWMASK GENMASK(1,0)
+#define BH1780_POFF (0x0)
+#define BH1780_PON (0x3)
+
+/* power on settling time in ms */
+#define BH1780_PON_DELAY 2
+/* max time before value available in ms */
+#define BH1780_INTERVAL 250
+
+struct bh1780_data {
+ struct i2c_client *client;
+};
+
+static int bh1780_write(struct bh1780_data *bh1780, u8 reg, u8 val)
+{
+ int ret = i2c_smbus_write_byte_data(bh1780->client,
+ BH1780_CMD_BIT | reg,
+ val);
+ if (ret < 0)
+ dev_err(&bh1780->client->dev,
+ "i2c_smbus_write_byte_data failed error "
+ "%d, register %01x\n",
+ ret, reg);
+ return ret;
+}
+
+static int bh1780_read(struct bh1780_data *bh1780, u8 reg)
+{
+ int ret = i2c_smbus_read_byte_data(bh1780->client,
+ BH1780_CMD_BIT | reg);
+ if (ret < 0)
+ dev_err(&bh1780->client->dev,
+ "i2c_smbus_read_byte_data failed error "
+ "%d, register %01x\n",
+ ret, reg);
+ return ret;
+}
+
+static int bh1780_read_word(struct bh1780_data *bh1780, u8 reg)
+{
+ int ret = i2c_smbus_read_word_data(bh1780->client,
+ BH1780_CMD_BIT | reg);
+ if (ret < 0)
+ dev_err(&bh1780->client->dev,
+ "i2c_smbus_read_word_data failed error "
+ "%d, register %01x\n",
+ ret, reg);
+ return ret;
+}
+
+static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
+ int ret;
+
+ if (!readval)
+ return bh1780_write(bh1780, (u8)reg, (u8)writeval);
+
+ ret = bh1780_read(bh1780, (u8)reg);
+ if (ret < 0)
+ return ret;
+
+ *readval = ret;
+
+ return 0;
+}
+
+static int bh1780_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
+ int value;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ pm_runtime_get_sync(&bh1780->client->dev);
+ value = bh1780_read_word(bh1780, BH1780_REG_DLOW);
+ if (value < 0)
+ return value;
+ pm_runtime_mark_last_busy(&bh1780->client->dev);
+ pm_runtime_put_autosuspend(&bh1780->client->dev);
+ *val = value;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = BH1780_INTERVAL * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bh1780_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = bh1780_read_raw,
+ .debugfs_reg_access = bh1780_debugfs_reg_access,
+};
+
+static const struct iio_chan_spec bh1780_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME)
+ }
+};
+
+static int bh1780_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct bh1780_data *bh1780;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct iio_dev *indio_dev;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ return -EIO;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*bh1780));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ bh1780 = iio_priv(indio_dev);
+ bh1780->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ /* Power up the device */
+ ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
+ if (ret < 0)
+ return ret;
+ msleep(BH1780_PON_DELAY);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = bh1780_read(bh1780, BH1780_REG_PARTID);
+ if (ret < 0)
+ goto out_disable_pm;
+ dev_info(&client->dev,
+ "Ambient Light Sensor, Rev : %lu\n",
+ (ret & BH1780_REVMASK));
+
+ /*
+ * As the device takes 250 ms to even come up with a fresh
+ * measurement after power-on, do not shut it down unnecessarily.
+ * Set autosuspend to a five seconds.
+ */
+ pm_runtime_set_autosuspend_delay(&client->dev, 5000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put(&client->dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &bh1780_info;
+ indio_dev->name = "bh1780";
+ indio_dev->channels = bh1780_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto out_disable_pm;
+ return 0;
+
+out_disable_pm:
+ pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(&client->dev);
+ return ret;
+}
+
+static int bh1780_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
+ int ret;
+
+ iio_device_unregister(indio_dev);
+ pm_runtime_get_sync(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(&client->dev);
+ ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to power off\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bh1780_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
+ int ret;
+
+ ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
+ if (ret < 0) {
+ dev_err(dev, "failed to runtime suspend\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bh1780_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
+ int ret;
+
+ ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
+ if (ret < 0) {
+ dev_err(dev, "failed to runtime resume\n");
+ return ret;
+ }
+
+ /* Wait for power on, then for a value to be available */
+ msleep(BH1780_PON_DELAY + BH1780_INTERVAL);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops bh1780_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(bh1780_runtime_suspend,
+ bh1780_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id bh1780_id[] = {
+ { "bh1780", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, bh1780_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_bh1780_match[] = {
+ { .compatible = "rohm,bh1780gli", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_bh1780_match);
+#endif
+
+static struct i2c_driver bh1780_driver = {
+ .probe = bh1780_probe,
+ .remove = bh1780_remove,
+ .id_table = bh1780_id,
+ .driver = {
+ .name = "bh1780",
+ .pm = &bh1780_dev_pm_ops,
+ .of_match_table = of_match_ptr(of_bh1780_match),
+ },
+};
+
+module_i2c_driver(bh1780_driver);
+
+MODULE_DESCRIPTION("ROHM BH1780GLI Ambient Light Sensor Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
new file mode 100644
index 000000000..f17cb2ea1
--- /dev/null
+++ b/drivers/iio/light/max44000.c
@@ -0,0 +1,638 @@
+/*
+ * MAX44000 Ambient and Infrared Proximity Sensor
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Data sheet: https://datasheets.maximintegrated.com/en/ds/MAX44000.pdf
+ *
+ * 7-bit I2C slave address 0x4a
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/util_macros.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/acpi.h>
+
+#define MAX44000_DRV_NAME "max44000"
+
+/* Registers in datasheet order */
+#define MAX44000_REG_STATUS 0x00
+#define MAX44000_REG_CFG_MAIN 0x01
+#define MAX44000_REG_CFG_RX 0x02
+#define MAX44000_REG_CFG_TX 0x03
+#define MAX44000_REG_ALS_DATA_HI 0x04
+#define MAX44000_REG_ALS_DATA_LO 0x05
+#define MAX44000_REG_PRX_DATA 0x16
+#define MAX44000_REG_ALS_UPTHR_HI 0x06
+#define MAX44000_REG_ALS_UPTHR_LO 0x07
+#define MAX44000_REG_ALS_LOTHR_HI 0x08
+#define MAX44000_REG_ALS_LOTHR_LO 0x09
+#define MAX44000_REG_PST 0x0a
+#define MAX44000_REG_PRX_IND 0x0b
+#define MAX44000_REG_PRX_THR 0x0c
+#define MAX44000_REG_TRIM_GAIN_GREEN 0x0f
+#define MAX44000_REG_TRIM_GAIN_IR 0x10
+
+/* REG_CFG bits */
+#define MAX44000_CFG_ALSINTE 0x01
+#define MAX44000_CFG_PRXINTE 0x02
+#define MAX44000_CFG_MASK 0x1c
+#define MAX44000_CFG_MODE_SHUTDOWN 0x00
+#define MAX44000_CFG_MODE_ALS_GIR 0x04
+#define MAX44000_CFG_MODE_ALS_G 0x08
+#define MAX44000_CFG_MODE_ALS_IR 0x0c
+#define MAX44000_CFG_MODE_ALS_PRX 0x10
+#define MAX44000_CFG_MODE_PRX 0x14
+#define MAX44000_CFG_TRIM 0x20
+
+/*
+ * Upper 4 bits are not documented but start as 1 on powerup
+ * Setting them to 0 causes proximity to misbehave so set them to 1
+ */
+#define MAX44000_REG_CFG_RX_DEFAULT 0xf0
+
+/* REG_RX bits */
+#define MAX44000_CFG_RX_ALSTIM_MASK 0x0c
+#define MAX44000_CFG_RX_ALSTIM_SHIFT 2
+#define MAX44000_CFG_RX_ALSPGA_MASK 0x03
+#define MAX44000_CFG_RX_ALSPGA_SHIFT 0
+
+/* REG_TX bits */
+#define MAX44000_LED_CURRENT_MASK 0xf
+#define MAX44000_LED_CURRENT_MAX 11
+#define MAX44000_LED_CURRENT_DEFAULT 6
+
+#define MAX44000_ALSDATA_OVERFLOW 0x4000
+
+struct max44000_data {
+ struct mutex lock;
+ struct regmap *regmap;
+};
+
+/* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
+#define MAX44000_ALS_TO_LUX_DEFAULT_FRACTION_LOG2 5
+
+/* Scale can be multiplied by up to 128x via ALSPGA for measurement gain */
+static const int max44000_alspga_shift[] = {0, 2, 4, 7};
+#define MAX44000_ALSPGA_MAX_SHIFT 7
+
+/*
+ * Scale can be multiplied by up to 64x via ALSTIM because of lost resolution
+ *
+ * This scaling factor is hidden from userspace and instead accounted for when
+ * reading raw values from the device.
+ *
+ * This makes it possible to cleanly expose ALSPGA as IIO_CHAN_INFO_SCALE and
+ * ALSTIM as IIO_CHAN_INFO_INT_TIME without the values affecting each other.
+ *
+ * Handling this internally is also required for buffer support because the
+ * channel's scan_type can't be modified dynamically.
+ */
+static const int max44000_alstim_shift[] = {0, 2, 4, 6};
+#define MAX44000_ALSTIM_SHIFT(alstim) (2 * (alstim))
+
+/* Available integration times with pretty manual alignment: */
+static const int max44000_int_time_avail_ns_array[] = {
+ 100000000,
+ 25000000,
+ 6250000,
+ 1562500,
+};
+static const char max44000_int_time_avail_str[] =
+ "0.100 "
+ "0.025 "
+ "0.00625 "
+ "0.001625";
+
+/* Available scales (internal to ulux) with pretty manual alignment: */
+static const int max44000_scale_avail_ulux_array[] = {
+ 31250,
+ 125000,
+ 500000,
+ 4000000,
+};
+static const char max44000_scale_avail_str[] =
+ "0.03125 "
+ "0.125 "
+ "0.5 "
+ "4";
+
+#define MAX44000_SCAN_INDEX_ALS 0
+#define MAX44000_SCAN_INDEX_PRX 1
+
+static const struct iio_chan_spec max44000_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .scan_index = MAX44000_SCAN_INDEX_ALS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 16,
+ }
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .scan_index = MAX44000_SCAN_INDEX_PRX,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 8,
+ .storagebits = 16,
+ }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .extend_name = "led",
+ .output = 1,
+ .scan_index = -1,
+ },
+};
+
+static int max44000_read_alstim(struct max44000_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, MAX44000_REG_CFG_RX, &val);
+ if (ret < 0)
+ return ret;
+ return (val & MAX44000_CFG_RX_ALSTIM_MASK) >> MAX44000_CFG_RX_ALSTIM_SHIFT;
+}
+
+static int max44000_write_alstim(struct max44000_data *data, int val)
+{
+ return regmap_write_bits(data->regmap, MAX44000_REG_CFG_RX,
+ MAX44000_CFG_RX_ALSTIM_MASK,
+ val << MAX44000_CFG_RX_ALSTIM_SHIFT);
+}
+
+static int max44000_read_alspga(struct max44000_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, MAX44000_REG_CFG_RX, &val);
+ if (ret < 0)
+ return ret;
+ return (val & MAX44000_CFG_RX_ALSPGA_MASK) >> MAX44000_CFG_RX_ALSPGA_SHIFT;
+}
+
+static int max44000_write_alspga(struct max44000_data *data, int val)
+{
+ return regmap_write_bits(data->regmap, MAX44000_REG_CFG_RX,
+ MAX44000_CFG_RX_ALSPGA_MASK,
+ val << MAX44000_CFG_RX_ALSPGA_SHIFT);
+}
+
+static int max44000_read_alsval(struct max44000_data *data)
+{
+ u16 regval;
+ int alstim, ret;
+
+ ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI,
+ &regval, sizeof(regval));
+ if (ret < 0)
+ return ret;
+ alstim = ret = max44000_read_alstim(data);
+ if (ret < 0)
+ return ret;
+
+ regval = be16_to_cpu(regval);
+
+ /*
+ * Overflow is explained on datasheet page 17.
+ *
+ * It's a warning that either the G or IR channel has become saturated
+ * and that the value in the register is likely incorrect.
+ *
+ * The recommendation is to change the scale (ALSPGA).
+ * The driver just returns the max representable value.
+ */
+ if (regval & MAX44000_ALSDATA_OVERFLOW)
+ return 0x3FFF;
+
+ return regval << MAX44000_ALSTIM_SHIFT(alstim);
+}
+
+static int max44000_write_led_current_raw(struct max44000_data *data, int val)
+{
+ /* Maybe we should clamp the value instead? */
+ if (val < 0 || val > MAX44000_LED_CURRENT_MAX)
+ return -ERANGE;
+ if (val >= 8)
+ val += 4;
+ return regmap_write_bits(data->regmap, MAX44000_REG_CFG_TX,
+ MAX44000_LED_CURRENT_MASK, val);
+}
+
+static int max44000_read_led_current_raw(struct max44000_data *data)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, MAX44000_REG_CFG_TX, &regval);
+ if (ret < 0)
+ return ret;
+ regval &= MAX44000_LED_CURRENT_MASK;
+ if (regval >= 8)
+ regval -= 4;
+ return regval;
+}
+
+static int max44000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max44000_data *data = iio_priv(indio_dev);
+ int alstim, alspga;
+ unsigned int regval;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ mutex_lock(&data->lock);
+ ret = max44000_read_alsval(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_PROXIMITY:
+ mutex_lock(&data->lock);
+ ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = regval;
+ return IIO_VAL_INT;
+
+ case IIO_CURRENT:
+ mutex_lock(&data->lock);
+ ret = max44000_read_led_current_raw(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ /* Output register is in 10s of miliamps */
+ *val = 10;
+ return IIO_VAL_INT;
+
+ case IIO_LIGHT:
+ mutex_lock(&data->lock);
+ alspga = ret = max44000_read_alspga(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ /* Avoid negative shifts */
+ *val = (1 << MAX44000_ALSPGA_MAX_SHIFT);
+ *val2 = MAX44000_ALS_TO_LUX_DEFAULT_FRACTION_LOG2
+ + MAX44000_ALSPGA_MAX_SHIFT
+ - max44000_alspga_shift[alspga];
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_INT_TIME:
+ mutex_lock(&data->lock);
+ alstim = ret = max44000_read_alstim(data);
+ mutex_unlock(&data->lock);
+
+ if (ret < 0)
+ return ret;
+ *val = 0;
+ *val2 = max44000_int_time_avail_ns_array[alstim];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max44000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct max44000_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_RAW && chan->type == IIO_CURRENT) {
+ mutex_lock(&data->lock);
+ ret = max44000_write_led_current_raw(data, val);
+ mutex_unlock(&data->lock);
+ return ret;
+ } else if (mask == IIO_CHAN_INFO_INT_TIME && chan->type == IIO_LIGHT) {
+ s64 valns = val * NSEC_PER_SEC + val2;
+ int alstim = find_closest_descending(valns,
+ max44000_int_time_avail_ns_array,
+ ARRAY_SIZE(max44000_int_time_avail_ns_array));
+ mutex_lock(&data->lock);
+ ret = max44000_write_alstim(data, alstim);
+ mutex_unlock(&data->lock);
+ return ret;
+ } else if (mask == IIO_CHAN_INFO_SCALE && chan->type == IIO_LIGHT) {
+ s64 valus = val * USEC_PER_SEC + val2;
+ int alspga = find_closest(valus,
+ max44000_scale_avail_ulux_array,
+ ARRAY_SIZE(max44000_scale_avail_ulux_array));
+ mutex_lock(&data->lock);
+ ret = max44000_write_alspga(data, alspga);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int max44000_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ if (mask == IIO_CHAN_INFO_INT_TIME && chan->type == IIO_LIGHT)
+ return IIO_VAL_INT_PLUS_NANO;
+ else if (mask == IIO_CHAN_INFO_SCALE && chan->type == IIO_LIGHT)
+ return IIO_VAL_INT_PLUS_MICRO;
+ else
+ return IIO_VAL_INT;
+}
+
+static IIO_CONST_ATTR(illuminance_integration_time_available, max44000_int_time_avail_str);
+static IIO_CONST_ATTR(illuminance_scale_available, max44000_scale_avail_str);
+
+static struct attribute *max44000_attributes[] = {
+ &iio_const_attr_illuminance_integration_time_available.dev_attr.attr,
+ &iio_const_attr_illuminance_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group max44000_attribute_group = {
+ .attrs = max44000_attributes,
+};
+
+static const struct iio_info max44000_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = max44000_read_raw,
+ .write_raw = max44000_write_raw,
+ .write_raw_get_fmt = max44000_write_raw_get_fmt,
+ .attrs = &max44000_attribute_group,
+};
+
+static bool max44000_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX44000_REG_STATUS:
+ case MAX44000_REG_CFG_MAIN:
+ case MAX44000_REG_CFG_RX:
+ case MAX44000_REG_CFG_TX:
+ case MAX44000_REG_ALS_DATA_HI:
+ case MAX44000_REG_ALS_DATA_LO:
+ case MAX44000_REG_PRX_DATA:
+ case MAX44000_REG_ALS_UPTHR_HI:
+ case MAX44000_REG_ALS_UPTHR_LO:
+ case MAX44000_REG_ALS_LOTHR_HI:
+ case MAX44000_REG_ALS_LOTHR_LO:
+ case MAX44000_REG_PST:
+ case MAX44000_REG_PRX_IND:
+ case MAX44000_REG_PRX_THR:
+ case MAX44000_REG_TRIM_GAIN_GREEN:
+ case MAX44000_REG_TRIM_GAIN_IR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max44000_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX44000_REG_CFG_MAIN:
+ case MAX44000_REG_CFG_RX:
+ case MAX44000_REG_CFG_TX:
+ case MAX44000_REG_ALS_UPTHR_HI:
+ case MAX44000_REG_ALS_UPTHR_LO:
+ case MAX44000_REG_ALS_LOTHR_HI:
+ case MAX44000_REG_ALS_LOTHR_LO:
+ case MAX44000_REG_PST:
+ case MAX44000_REG_PRX_IND:
+ case MAX44000_REG_PRX_THR:
+ case MAX44000_REG_TRIM_GAIN_GREEN:
+ case MAX44000_REG_TRIM_GAIN_IR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max44000_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX44000_REG_STATUS:
+ case MAX44000_REG_ALS_DATA_HI:
+ case MAX44000_REG_ALS_DATA_LO:
+ case MAX44000_REG_PRX_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max44000_precious_reg(struct device *dev, unsigned int reg)
+{
+ return reg == MAX44000_REG_STATUS;
+}
+
+static const struct regmap_config max44000_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = MAX44000_REG_PRX_DATA,
+ .readable_reg = max44000_readable_reg,
+ .writeable_reg = max44000_writeable_reg,
+ .volatile_reg = max44000_volatile_reg,
+ .precious_reg = max44000_precious_reg,
+
+ .use_single_rw = 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static irqreturn_t max44000_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct max44000_data *data = iio_priv(indio_dev);
+ u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */
+ int index = 0;
+ unsigned int regval;
+ int ret;
+
+ mutex_lock(&data->lock);
+ if (test_bit(MAX44000_SCAN_INDEX_ALS, indio_dev->active_scan_mask)) {
+ ret = max44000_read_alsval(data);
+ if (ret < 0)
+ goto out_unlock;
+ buf[index++] = ret;
+ }
+ if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
+ ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
+ if (ret < 0)
+ goto out_unlock;
+ buf[index] = regval;
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+
+out_unlock:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int max44000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max44000_data *data;
+ struct iio_dev *indio_dev;
+ int ret, reg;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+ data = iio_priv(indio_dev);
+ data->regmap = devm_regmap_init_i2c(client, &max44000_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "regmap_init failed!\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+ mutex_init(&data->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &max44000_info;
+ indio_dev->name = MAX44000_DRV_NAME;
+ indio_dev->channels = max44000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max44000_channels);
+
+ /*
+ * The device doesn't have a reset function so we just clear some
+ * important bits at probe time to ensure sane operation.
+ *
+ * Since we don't support interrupts/events the threshold values are
+ * not important. We also don't touch trim values.
+ */
+
+ /* Reset ALS scaling bits */
+ ret = regmap_write(data->regmap, MAX44000_REG_CFG_RX,
+ MAX44000_REG_CFG_RX_DEFAULT);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to write default CFG_RX: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * By default the LED pulse used for the proximity sensor is disabled.
+ * Set a middle value so that we get some sort of valid data by default.
+ */
+ ret = max44000_write_led_current_raw(data, MAX44000_LED_CURRENT_DEFAULT);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to write init config: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset CFG bits to ALS_PRX mode which allows easy reading of both values. */
+ reg = MAX44000_CFG_TRIM | MAX44000_CFG_MODE_ALS_PRX;
+ ret = regmap_write(data->regmap, MAX44000_REG_CFG_MAIN, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to write init config: %d\n", ret);
+ return ret;
+ }
+
+ /* Read status at least once to clear any stale interrupt bits. */
+ ret = regmap_read(data->regmap, MAX44000_REG_STATUS, &reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read init status: %d\n", ret);
+ return ret;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL, max44000_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+
+ return iio_device_register(indio_dev);
+}
+
+static int max44000_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id max44000_id[] = {
+ {"max44000", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max44000_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id max44000_acpi_match[] = {
+ {"MAX44000", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, max44000_acpi_match);
+#endif
+
+static struct i2c_driver max44000_driver = {
+ .driver = {
+ .name = MAX44000_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(max44000_acpi_match),
+ },
+ .probe = max44000_probe,
+ .remove = max44000_remove,
+ .id_table = max44000_id,
+};
+
+module_i2c_driver(max44000_driver);
+
+MODULE_AUTHOR("Crestez Dan Leonard <leonard.crestez@intel.com>");
+MODULE_DESCRIPTION("MAX44000 Ambient and Infrared Proximity Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 42d334ba6..9e847f8f4 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/gpio/consumer.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 12731d6b8..57b108c30 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -806,8 +806,7 @@ static int tsl2563_probe(struct i2c_client *client,
return 0;
fail:
- cancel_delayed_work(&chip->poweroff_work);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&chip->poweroff_work);
return err;
}
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
new file mode 100644
index 000000000..bc1c4cb78
--- /dev/null
+++ b/drivers/iio/light/veml6070.c
@@ -0,0 +1,218 @@
+/*
+ * veml6070.c - Support for Vishay VEML6070 UV A light sensor
+ *
+ * Copyright 2016 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for VEML6070 (7-bit I2C slave addresses 0x38 and 0x39)
+ *
+ * TODO: integration time, ACK signal
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VEML6070_DRV_NAME "veml6070"
+
+#define VEML6070_ADDR_CONFIG_DATA_MSB 0x38 /* read: MSB data, write: config */
+#define VEML6070_ADDR_DATA_LSB 0x39 /* LSB data */
+
+#define VEML6070_COMMAND_ACK BIT(5) /* raise interrupt when over threshold */
+#define VEML6070_COMMAND_IT GENMASK(3, 2) /* bit mask integration time */
+#define VEML6070_COMMAND_RSRVD BIT(1) /* reserved, set to 1 */
+#define VEML6070_COMMAND_SD BIT(0) /* shutdown mode when set */
+
+#define VEML6070_IT_10 0x04 /* integration time 1x */
+
+struct veml6070_data {
+ struct i2c_client *client1;
+ struct i2c_client *client2;
+ u8 config;
+ struct mutex lock;
+};
+
+static int veml6070_read(struct veml6070_data *data)
+{
+ int ret;
+ u8 msb, lsb;
+
+ mutex_lock(&data->lock);
+
+ /* disable shutdown */
+ ret = i2c_smbus_write_byte(data->client1,
+ data->config & ~VEML6070_COMMAND_SD);
+ if (ret < 0)
+ goto out;
+
+ msleep(125 + 10); /* measurement takes up to 125 ms for IT 1x */
+
+ ret = i2c_smbus_read_byte(data->client2); /* read MSB, address 0x39 */
+ if (ret < 0)
+ goto out;
+ msb = ret;
+
+ ret = i2c_smbus_read_byte(data->client1); /* read LSB, address 0x38 */
+ if (ret < 0)
+ goto out;
+ lsb = ret;
+
+ /* shutdown again */
+ ret = i2c_smbus_write_byte(data->client1, data->config);
+ if (ret < 0)
+ goto out;
+
+ ret = (msb << 8) | lsb;
+
+out:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static const struct iio_chan_spec veml6070_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_UV,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ .type = IIO_UVINDEX,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }
+};
+
+static int veml6070_to_uv_index(unsigned val)
+{
+ /*
+ * conversion of raw UV intensity values to UV index depends on
+ * integration time (IT) and value of the resistor connected to
+ * the RSET pin (default: 270 KOhm)
+ */
+ unsigned uvi[11] = {
+ 187, 373, 560, /* low */
+ 746, 933, 1120, /* moderate */
+ 1308, 1494, /* high */
+ 1681, 1868, 2054}; /* very high */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(uvi); i++)
+ if (val <= uvi[i])
+ return i;
+
+ return 11; /* extreme */
+}
+
+static int veml6070_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct veml6070_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = veml6070_read(data);
+ if (ret < 0)
+ return ret;
+ if (mask == IIO_CHAN_INFO_PROCESSED)
+ *val = veml6070_to_uv_index(ret);
+ else
+ *val = ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info veml6070_info = {
+ .read_raw = veml6070_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int veml6070_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct veml6070_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client1 = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &veml6070_info;
+ indio_dev->channels = veml6070_channels;
+ indio_dev->num_channels = ARRAY_SIZE(veml6070_channels);
+ indio_dev->name = VEML6070_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data->client2 = i2c_new_dummy(client->adapter, VEML6070_ADDR_DATA_LSB);
+ if (!data->client2) {
+ dev_err(&client->dev, "i2c device for second chip address failed\n");
+ return -ENODEV;
+ }
+
+ data->config = VEML6070_IT_10 | VEML6070_COMMAND_RSRVD |
+ VEML6070_COMMAND_SD;
+ ret = i2c_smbus_write_byte(data->client1, data->config);
+ if (ret < 0)
+ goto fail;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto fail;
+
+ return ret;
+
+fail:
+ i2c_unregister_device(data->client2);
+ return ret;
+}
+
+static int veml6070_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct veml6070_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ i2c_unregister_device(data->client2);
+
+ return 0;
+}
+
+static const struct i2c_device_id veml6070_id[] = {
+ { "veml6070", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6070_id);
+
+static struct i2c_driver veml6070_driver = {
+ .driver = {
+ .name = VEML6070_DRV_NAME,
+ },
+ .probe = veml6070_probe,
+ .remove = veml6070_remove,
+ .id_table = veml6070_id,
+};
+
+module_i2c_driver(veml6070_driver);
+
+MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Vishay VEML6070 UV A light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 021dc5361..84e6559cc 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -9,6 +9,8 @@ config AK8975
tristate "Asahi Kasei AK 3-Axis Magnetometer"
depends on I2C
depends on GPIOLIB || COMPILE_TEST
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Asahi Kasei AK8975, AK8963,
AK09911 or AK09912 3-Axis Magnetometer.
@@ -25,22 +27,41 @@ config AK09911
Deprecated: AK09911 is now supported by AK8975 driver.
config BMC150_MAGN
- tristate "Bosch BMC150 Magnetometer Driver"
- depends on I2C
- select REGMAP_I2C
+ tristate
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+
+config BMC150_MAGN_I2C
+ tristate "Bosch BMC150 I2C Magnetometer Driver"
+ depends on I2C
+ select BMC150_MAGN
+ select REGMAP_I2C
help
- Say yes here to build support for the BMC150 magnetometer.
+ Say yes here to build support for the BMC150 magnetometer with
+ I2C interface.
- Currently this only supports the device via an i2c interface.
+ This is a combo module with both accelerometer and magnetometer.
+ This driver is only implementing magnetometer part, which has
+ its own address and register map.
+
+ To compile this driver as a module, choose M here: the module will be
+ called bmc150_magn_i2c.
+
+config BMC150_MAGN_SPI
+ tristate "Bosch BMC150 SPI Magnetometer Driver"
+ depends on SPI
+ select BMC150_MAGN
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the BMC150 magnetometer with
+ SPI interface.
This is a combo module with both accelerometer and magnetometer.
This driver is only implementing magnetometer part, which has
its own address and register map.
To compile this driver as a module, choose M here: the module will be
- called bmc150_magn.
+ called bmc150_magn_spi.
config MAG3110
tristate "Freescale MAG3110 3-Axis Magnetometer"
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index dd03fe524..92a745c9a 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -5,6 +5,9 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AK8975) += ak8975.o
obj-$(CONFIG_BMC150_MAGN) += bmc150_magn.o
+obj-$(CONFIG_BMC150_MAGN_I2C) += bmc150_magn_i2c.o
+obj-$(CONFIG_BMC150_MAGN_SPI) += bmc150_magn_spi.o
+
obj-$(CONFIG_MAG3110) += mag3110.o
obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
obj-$(CONFIG_MMC35240) += mmc35240.o
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 0e931a9a1..609a2c401 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -32,9 +32,17 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/acpi.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/magnetometer/ak8975.h>
+
/*
* Register definitions, as well as various shifts and masks to get at the
* individual fields of the registers.
@@ -361,7 +369,6 @@ static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
struct ak8975_data {
struct i2c_client *client;
const struct ak_def *def;
- struct attribute_group attrs;
struct mutex lock;
u8 asa[3];
long raw_to_gauss[3];
@@ -370,8 +377,41 @@ struct ak8975_data {
wait_queue_head_t data_ready_queue;
unsigned long flags;
u8 cntl_cache;
+ struct iio_mount_matrix orientation;
+ struct regulator *vdd;
};
+/* Enable attached power regulator if any. */
+static int ak8975_power_on(struct i2c_client *client)
+{
+ const struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int ret;
+
+ data->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR_OR_NULL(data->vdd)) {
+ ret = PTR_ERR(data->vdd);
+ if (ret == -ENODEV)
+ ret = 0;
+ } else {
+ ret = regulator_enable(data->vdd);
+ }
+
+ if (ret)
+ dev_err(&client->dev, "failed to enable Vdd supply: %d\n", ret);
+ return ret;
+}
+
+/* Disable attached power regulator if any. */
+static void ak8975_power_off(const struct i2c_client *client)
+{
+ const struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ const struct ak8975_data *data = iio_priv(indio_dev);
+
+ if (!IS_ERR_OR_NULL(data->vdd))
+ regulator_disable(data->vdd);
+}
+
/*
* Return 0 if the i2c device is the one we expect.
* return a negative error number otherwise
@@ -601,22 +641,15 @@ static int wait_conversion_complete_interrupt(struct ak8975_data *data)
return ret > 0 ? 0 : -ETIME;
}
-/*
- * Emits the raw flux value for the x, y, or z axis.
- */
-static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+static int ak8975_start_read_axis(struct ak8975_data *data,
+ const struct i2c_client *client)
{
- struct ak8975_data *data = iio_priv(indio_dev);
- struct i2c_client *client = data->client;
- int ret;
-
- mutex_lock(&data->lock);
-
/* Set up the device for taking a sample. */
- ret = ak8975_set_mode(data, MODE_ONCE);
+ int ret = ak8975_set_mode(data, MODE_ONCE);
+
if (ret < 0) {
dev_err(&client->dev, "Error in setting operating mode\n");
- goto exit;
+ return ret;
}
/* Wait for the conversion to complete. */
@@ -627,7 +660,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
else
ret = wait_conversion_complete_polled(data);
if (ret < 0)
- goto exit;
+ return ret;
/* This will be executed only for non-interrupt based waiting case */
if (ret & data->def->ctrl_masks[ST1_DRDY]) {
@@ -635,32 +668,45 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
data->def->ctrl_regs[ST2]);
if (ret < 0) {
dev_err(&client->dev, "Error in reading ST2\n");
- goto exit;
+ return ret;
}
if (ret & (data->def->ctrl_masks[ST2_DERR] |
data->def->ctrl_masks[ST2_HOFL])) {
dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
- ret = -EINVAL;
- goto exit;
+ return -EINVAL;
}
}
- /* Read the flux value from the appropriate register
- (the register is specified in the iio device attributes). */
- ret = i2c_smbus_read_word_data(client, data->def->data_regs[index]);
- if (ret < 0) {
- dev_err(&client->dev, "Read axis data fails\n");
+ return 0;
+}
+
+/* Retrieve raw flux value for one of the x, y, or z axis. */
+static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+ const struct i2c_client *client = data->client;
+ const struct ak_def *def = data->def;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = ak8975_start_read_axis(data, client);
+ if (ret)
+ goto exit;
+
+ ret = i2c_smbus_read_word_data(client, def->data_regs[index]);
+ if (ret < 0)
goto exit;
- }
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- *val = clamp_t(s16, ret, -data->def->range, data->def->range);
+ *val = clamp_t(s16, ret, -def->range, def->range);
return IIO_VAL_INT;
exit:
mutex_unlock(&data->lock);
+ dev_err(&client->dev, "Error in reading axis\n");
return ret;
}
@@ -682,6 +728,18 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static const struct iio_mount_matrix *
+ak8975_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ return &((struct ak8975_data *)iio_priv(indio_dev))->orientation;
+}
+
+static const struct iio_chan_spec_ext_info ak8975_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, ak8975_get_mount_matrix),
+ { },
+};
+
#define AK8975_CHANNEL(axis, index) \
{ \
.type = IIO_MAGN, \
@@ -690,12 +748,23 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU \
+ }, \
+ .ext_info = ak8975_ext_info, \
}
static const struct iio_chan_spec ak8975_channels[] = {
AK8975_CHANNEL(X, 0), AK8975_CHANNEL(Y, 1), AK8975_CHANNEL(Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
+static const unsigned long ak8975_scan_masks[] = { 0x7, 0 };
+
static const struct iio_info ak8975_info = {
.read_raw = &ak8975_read_raw,
.driver_module = THIS_MODULE,
@@ -724,6 +793,56 @@ static const char *ak8975_match_acpi_device(struct device *dev,
return dev_name(dev);
}
+static void ak8975_fill_buffer(struct iio_dev *indio_dev)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+ const struct i2c_client *client = data->client;
+ const struct ak_def *def = data->def;
+ int ret;
+ s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
+
+ mutex_lock(&data->lock);
+
+ ret = ak8975_start_read_axis(data, client);
+ if (ret)
+ goto unlock;
+
+ /*
+ * For each axis, read the flux value from the appropriate register
+ * (the register is specified in the iio device attributes).
+ */
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(client,
+ def->data_regs[0],
+ 3 * sizeof(buff[0]),
+ (u8 *)buff);
+ if (ret < 0)
+ goto unlock;
+
+ mutex_unlock(&data->lock);
+
+ /* Clamp to valid range. */
+ buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range);
+ buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
+ buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buff, iio_get_time_ns());
+ return;
+
+unlock:
+ mutex_unlock(&data->lock);
+ dev_err(&client->dev, "Error in reading axes block\n");
+}
+
+static irqreturn_t ak8975_handle_trigger(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+
+ ak8975_fill_buffer(indio_dev);
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
static int ak8975_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -733,10 +852,12 @@ static int ak8975_probe(struct i2c_client *client,
int err;
const char *name = NULL;
enum asahi_compass_chipset chipset = AK_MAX_TYPE;
+ const struct ak8975_platform_data *pdata =
+ dev_get_platdata(&client->dev);
/* Grab and set up the supplied GPIO. */
- if (client->dev.platform_data)
- eoc_gpio = *(int *)(client->dev.platform_data);
+ if (pdata)
+ eoc_gpio = pdata->eoc_gpio;
else if (client->dev.of_node)
eoc_gpio = of_get_gpio(client->dev.of_node, 0);
else
@@ -770,13 +891,24 @@ static int ak8975_probe(struct i2c_client *client,
data->eoc_gpio = eoc_gpio;
data->eoc_irq = 0;
+ if (!pdata) {
+ err = of_iio_read_mount_matrix(&client->dev,
+ "mount-matrix",
+ &data->orientation);
+ if (err)
+ return err;
+ } else
+ data->orientation = pdata->orientation;
+
/* id will be NULL when enumerated via ACPI */
if (id) {
chipset = (enum asahi_compass_chipset)(id->driver_data);
name = id->name;
- } else if (ACPI_HANDLE(&client->dev))
+ } else if (ACPI_HANDLE(&client->dev)) {
name = ak8975_match_acpi_device(&client->dev, &chipset);
- else
+ if (!name)
+ return -ENODEV;
+ } else
return -ENOSYS;
if (chipset >= AK_MAX_TYPE) {
@@ -786,10 +918,15 @@ static int ak8975_probe(struct i2c_client *client,
}
data->def = &ak_def_array[chipset];
+
+ err = ak8975_power_on(client);
+ if (err)
+ return err;
+
err = ak8975_who_i_am(client, data->def->type);
if (err < 0) {
dev_err(&client->dev, "Unexpected device\n");
- return err;
+ goto power_off;
}
dev_dbg(&client->dev, "Asahi compass chip %s\n", name);
@@ -797,7 +934,7 @@ static int ak8975_probe(struct i2c_client *client,
err = ak8975_setup(client);
if (err < 0) {
dev_err(&client->dev, "%s initialization fails\n", name);
- return err;
+ goto power_off;
}
mutex_init(&data->lock);
@@ -805,9 +942,41 @@ static int ak8975_probe(struct i2c_client *client,
indio_dev->channels = ak8975_channels;
indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
indio_dev->info = &ak8975_info;
+ indio_dev->available_scan_masks = ak8975_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->name = name;
- return devm_iio_device_register(&client->dev, indio_dev);
+
+ err = iio_triggered_buffer_setup(indio_dev, NULL, ak8975_handle_trigger,
+ NULL);
+ if (err) {
+ dev_err(&client->dev, "triggered buffer setup failed\n");
+ goto power_off;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(&client->dev, "device register failed\n");
+ goto cleanup_buffer;
+ }
+
+ return 0;
+
+cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+power_off:
+ ak8975_power_off(client);
+ return err;
+}
+
+static int ak8975_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ ak8975_power_off(client);
+
+ return 0;
}
static const struct i2c_device_id ak8975_id[] = {
@@ -841,6 +1010,7 @@ static struct i2c_driver ak8975_driver = {
.acpi_match_table = ACPI_PTR(ak_acpi_match),
},
.probe = ak8975_probe,
+ .remove = ak8975_remove,
.id_table = ak8975_id,
};
module_i2c_driver(ak8975_driver);
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index ffcb75ea6..d104fb8d9 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -35,6 +34,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include "bmc150_magn.h"
+
#define BMC150_MAGN_DRV_NAME "bmc150_magn"
#define BMC150_MAGN_IRQ_NAME "bmc150_magn_event"
@@ -135,7 +136,7 @@ struct bmc150_magn_trim_regs {
} __packed;
struct bmc150_magn_data {
- struct i2c_client *client;
+ struct device *dev;
/*
* 1. Protect this structure.
* 2. Serialize sequences that power on/off the device and access HW.
@@ -147,6 +148,7 @@ struct bmc150_magn_data {
struct iio_trigger *dready_trig;
bool dready_trigger_on;
int max_odr;
+ int irq;
};
static const struct {
@@ -216,7 +218,7 @@ static bool bmc150_magn_is_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static const struct regmap_config bmc150_magn_regmap_config = {
+const struct regmap_config bmc150_magn_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -226,6 +228,7 @@ static const struct regmap_config bmc150_magn_regmap_config = {
.writeable_reg = bmc150_magn_is_writeable_reg,
.volatile_reg = bmc150_magn_is_volatile_reg,
};
+EXPORT_SYMBOL(bmc150_magn_regmap_config);
static int bmc150_magn_set_power_mode(struct bmc150_magn_data *data,
enum bmc150_magn_power_modes mode,
@@ -264,17 +267,17 @@ static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(&data->client->dev);
+ ret = pm_runtime_get_sync(data->dev);
} else {
- pm_runtime_mark_last_busy(&data->client->dev);
- ret = pm_runtime_put_autosuspend(&data->client->dev);
+ pm_runtime_mark_last_busy(data->dev);
+ ret = pm_runtime_put_autosuspend(data->dev);
}
if (ret < 0) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"failed to change power state to %d\n", on);
if (on)
- pm_runtime_put_noidle(&data->client->dev);
+ pm_runtime_put_noidle(data->dev);
return ret;
}
@@ -351,7 +354,7 @@ static int bmc150_magn_set_max_odr(struct bmc150_magn_data *data, int rep_xy,
/* the maximum selectable read-out frequency from datasheet */
max_odr = 1000000 / (145 * rep_xy + 500 * rep_z + 980);
if (odr > max_odr) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"Can't set oversampling with sampling freq %d\n",
odr);
return -EINVAL;
@@ -685,27 +688,27 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND,
false);
if (ret < 0) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"Failed to bring up device from suspend mode\n");
return ret;
}
ret = regmap_read(data->regmap, BMC150_MAGN_REG_CHIP_ID, &chip_id);
if (ret < 0) {
- dev_err(&data->client->dev, "Failed reading chip id\n");
+ dev_err(data->dev, "Failed reading chip id\n");
goto err_poweroff;
}
if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
- dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
+ dev_err(data->dev, "Invalid chip id 0x%x\n", chip_id);
ret = -ENODEV;
goto err_poweroff;
}
- dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
+ dev_dbg(data->dev, "Chip id %x\n", chip_id);
preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
ret = bmc150_magn_set_odr(data, preset.odr);
if (ret < 0) {
- dev_err(&data->client->dev, "Failed to set ODR to %d\n",
+ dev_err(data->dev, "Failed to set ODR to %d\n",
preset.odr);
goto err_poweroff;
}
@@ -713,7 +716,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
ret = regmap_write(data->regmap, BMC150_MAGN_REG_REP_XY,
BMC150_MAGN_REPXY_TO_REGVAL(preset.rep_xy));
if (ret < 0) {
- dev_err(&data->client->dev, "Failed to set REP XY to %d\n",
+ dev_err(data->dev, "Failed to set REP XY to %d\n",
preset.rep_xy);
goto err_poweroff;
}
@@ -721,7 +724,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
ret = regmap_write(data->regmap, BMC150_MAGN_REG_REP_Z,
BMC150_MAGN_REPZ_TO_REGVAL(preset.rep_z));
if (ret < 0) {
- dev_err(&data->client->dev, "Failed to set REP Z to %d\n",
+ dev_err(data->dev, "Failed to set REP Z to %d\n",
preset.rep_z);
goto err_poweroff;
}
@@ -734,7 +737,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
true);
if (ret < 0) {
- dev_err(&data->client->dev, "Failed to power on device\n");
+ dev_err(data->dev, "Failed to power on device\n");
goto err_poweroff;
}
@@ -843,41 +846,33 @@ static const char *bmc150_magn_match_acpi_device(struct device *dev)
return dev_name(dev);
}
-static int bmc150_magn_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
+ int irq, const char *name)
{
struct bmc150_magn_data *data;
struct iio_dev *indio_dev;
- const char *name = NULL;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
- data->client = client;
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+ data->irq = irq;
+ data->dev = dev;
- if (id)
- name = id->name;
- else if (ACPI_HANDLE(&client->dev))
- name = bmc150_magn_match_acpi_device(&client->dev);
- else
- return -ENOSYS;
+ if (!name && ACPI_HANDLE(dev))
+ name = bmc150_magn_match_acpi_device(dev);
mutex_init(&data->mutex);
- data->regmap = devm_regmap_init_i2c(client, &bmc150_magn_regmap_config);
- if (IS_ERR(data->regmap)) {
- dev_err(&client->dev, "Failed to allocate register map\n");
- return PTR_ERR(data->regmap);
- }
ret = bmc150_magn_init(data);
if (ret < 0)
return ret;
- indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.parent = dev;
indio_dev->channels = bmc150_magn_channels;
indio_dev->num_channels = ARRAY_SIZE(bmc150_magn_channels);
indio_dev->available_scan_masks = bmc150_magn_scan_masks;
@@ -885,35 +880,34 @@ static int bmc150_magn_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmc150_magn_info;
- if (client->irq > 0) {
- data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+ if (irq > 0) {
+ data->dready_trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
indio_dev->id);
if (!data->dready_trig) {
ret = -ENOMEM;
- dev_err(&client->dev, "iio trigger alloc failed\n");
+ dev_err(dev, "iio trigger alloc failed\n");
goto err_poweroff;
}
- data->dready_trig->dev.parent = &client->dev;
+ data->dready_trig->dev.parent = dev;
data->dready_trig->ops = &bmc150_magn_trigger_ops;
iio_trigger_set_drvdata(data->dready_trig, indio_dev);
ret = iio_trigger_register(data->dready_trig);
if (ret) {
- dev_err(&client->dev, "iio trigger register failed\n");
+ dev_err(dev, "iio trigger register failed\n");
goto err_poweroff;
}
- ret = request_threaded_irq(client->irq,
+ ret = request_threaded_irq(irq,
iio_trigger_generic_data_rdy_poll,
NULL,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
BMC150_MAGN_IRQ_NAME,
data->dready_trig);
if (ret < 0) {
- dev_err(&client->dev, "request irq %d failed\n",
- client->irq);
+ dev_err(dev, "request irq %d failed\n", irq);
goto err_trigger_unregister;
}
}
@@ -923,34 +917,33 @@ static int bmc150_magn_probe(struct i2c_client *client,
bmc150_magn_trigger_handler,
&bmc150_magn_buffer_setup_ops);
if (ret < 0) {
- dev_err(&client->dev,
- "iio triggered buffer setup failed\n");
+ dev_err(dev, "iio triggered buffer setup failed\n");
goto err_free_irq;
}
- ret = pm_runtime_set_active(&client->dev);
+ ret = pm_runtime_set_active(dev);
if (ret)
goto err_buffer_cleanup;
- pm_runtime_enable(&client->dev);
- pm_runtime_set_autosuspend_delay(&client->dev,
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev,
BMC150_MAGN_AUTO_SUSPEND_DELAY_MS);
- pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_use_autosuspend(dev);
ret = iio_device_register(indio_dev);
if (ret < 0) {
- dev_err(&client->dev, "unable to register iio device\n");
+ dev_err(dev, "unable to register iio device\n");
goto err_buffer_cleanup;
}
- dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
+ dev_dbg(dev, "Registered device %s\n", name);
return 0;
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_free_irq:
- if (client->irq > 0)
- free_irq(client->irq, data->dready_trig);
+ if (irq > 0)
+ free_irq(irq, data->dready_trig);
err_trigger_unregister:
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
@@ -958,22 +951,23 @@ err_poweroff:
bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
return ret;
}
+EXPORT_SYMBOL(bmc150_magn_probe);
-static int bmc150_magn_remove(struct i2c_client *client)
+int bmc150_magn_remove(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
iio_triggered_buffer_cleanup(indio_dev);
- if (client->irq > 0)
- free_irq(data->client->irq, data->dready_trig);
+ if (data->irq > 0)
+ free_irq(data->irq, data->dready_trig);
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
@@ -984,11 +978,12 @@ static int bmc150_magn_remove(struct i2c_client *client)
return 0;
}
+EXPORT_SYMBOL(bmc150_magn_remove);
#ifdef CONFIG_PM
static int bmc150_magn_runtime_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
int ret;
@@ -997,7 +992,7 @@ static int bmc150_magn_runtime_suspend(struct device *dev)
true);
mutex_unlock(&data->mutex);
if (ret < 0) {
- dev_err(&data->client->dev, "powering off device failed\n");
+ dev_err(dev, "powering off device failed\n");
return ret;
}
return 0;
@@ -1008,7 +1003,7 @@ static int bmc150_magn_runtime_suspend(struct device *dev)
*/
static int bmc150_magn_runtime_resume(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
@@ -1019,7 +1014,7 @@ static int bmc150_magn_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int bmc150_magn_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
int ret;
@@ -1033,7 +1028,7 @@ static int bmc150_magn_suspend(struct device *dev)
static int bmc150_magn_resume(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
int ret;
@@ -1046,38 +1041,13 @@ static int bmc150_magn_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops bmc150_magn_pm_ops = {
+const struct dev_pm_ops bmc150_magn_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bmc150_magn_suspend, bmc150_magn_resume)
SET_RUNTIME_PM_OPS(bmc150_magn_runtime_suspend,
bmc150_magn_runtime_resume, NULL)
};
-
-static const struct acpi_device_id bmc150_magn_acpi_match[] = {
- {"BMC150B", 0},
- {"BMC156B", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
-
-static const struct i2c_device_id bmc150_magn_id[] = {
- {"bmc150_magn", 0},
- {"bmc156_magn", 0},
- {},
-};
-MODULE_DEVICE_TABLE(i2c, bmc150_magn_id);
-
-static struct i2c_driver bmc150_magn_driver = {
- .driver = {
- .name = BMC150_MAGN_DRV_NAME,
- .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
- .pm = &bmc150_magn_pm_ops,
- },
- .probe = bmc150_magn_probe,
- .remove = bmc150_magn_remove,
- .id_table = bmc150_magn_id,
-};
-module_i2c_driver(bmc150_magn_driver);
+EXPORT_SYMBOL(bmc150_magn_pm_ops);
MODULE_AUTHOR("Irina Tirdea <irina.tirdea@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("BMC150 magnetometer driver");
+MODULE_DESCRIPTION("BMC150 magnetometer core driver");
diff --git a/drivers/iio/magnetometer/bmc150_magn.h b/drivers/iio/magnetometer/bmc150_magn.h
new file mode 100644
index 000000000..9a8e26812
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn.h
@@ -0,0 +1,11 @@
+#ifndef _BMC150_MAGN_H_
+#define _BMC150_MAGN_H_
+
+extern const struct regmap_config bmc150_magn_regmap_config;
+extern const struct dev_pm_ops bmc150_magn_pm_ops;
+
+int bmc150_magn_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name);
+int bmc150_magn_remove(struct device *dev);
+
+#endif /* _BMC150_MAGN_H_ */
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
new file mode 100644
index 000000000..eddc7f0d0
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -0,0 +1,77 @@
+/*
+ * 3-axis magnetometer driver supporting following I2C Bosch-Sensortec chips:
+ * - BMC150
+ * - BMC156
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+
+#include "bmc150_magn.h"
+
+static int bmc150_magn_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+
+ regmap = devm_regmap_init_i2c(client, &bmc150_magn_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to initialize i2c regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ if (id)
+ name = id->name;
+
+ return bmc150_magn_probe(&client->dev, regmap, client->irq, name);
+}
+
+static int bmc150_magn_i2c_remove(struct i2c_client *client)
+{
+ return bmc150_magn_remove(&client->dev);
+}
+
+static const struct acpi_device_id bmc150_magn_acpi_match[] = {
+ {"BMC150B", 0},
+ {"BMC156B", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
+
+static const struct i2c_device_id bmc150_magn_i2c_id[] = {
+ {"bmc150_magn", 0},
+ {"bmc156_magn", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
+
+static struct i2c_driver bmc150_magn_driver = {
+ .driver = {
+ .name = "bmc150_magn_i2c",
+ .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
+ .pm = &bmc150_magn_pm_ops,
+ },
+ .probe = bmc150_magn_i2c_probe,
+ .remove = bmc150_magn_i2c_remove,
+ .id_table = bmc150_magn_i2c_id,
+};
+module_i2c_driver(bmc150_magn_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BMC150 I2C magnetometer driver");
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
new file mode 100644
index 000000000..c4c738a07
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -0,0 +1,68 @@
+/*
+ * 3-axis magnetometer driver support following SPI Bosch-Sensortec chips:
+ * - BMC150
+ * - BMC156
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+
+#include "bmc150_magn.h"
+
+static int bmc150_magn_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ regmap = devm_regmap_init_spi(spi, &bmc150_magn_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+ return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name);
+}
+
+static int bmc150_magn_spi_remove(struct spi_device *spi)
+{
+ bmc150_magn_remove(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id bmc150_magn_spi_id[] = {
+ {"bmc150_magn", 0},
+ {"bmc156_magn", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
+
+static const struct acpi_device_id bmc150_magn_acpi_match[] = {
+ {"BMC150B", 0},
+ {"BMC156B", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
+
+static struct spi_driver bmc150_magn_spi_driver = {
+ .probe = bmc150_magn_spi_probe,
+ .remove = bmc150_magn_spi_remove,
+ .id_table = bmc150_magn_spi_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
+ .name = "bmc150_magn_spi",
+ },
+};
+module_spi_driver(bmc150_magn_spi_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
+MODULE_DESCRIPTION("BMC150 magnetometer SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index ecd3bd0a9..0a9e8fadf 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
int st_magn_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
}
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 501f858df..8250fc322 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -484,6 +484,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
.addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
.mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
.bootime = 2,
@@ -571,6 +572,7 @@ static const struct iio_info magn_info = {
static const struct iio_trigger_ops st_magn_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
#else
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index ffc735c16..6acb23810 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -5,6 +5,34 @@
menu "Digital potentiometers"
+config DS1803
+ tristate "Maxim Integrated DS1803 Digital Potentiometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the Maxim Integrated DS1803
+ digital potentiomenter chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ds1803.
+
+config MCP4131
+ tristate "Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Microchip
+ MCP4131, MCP4132,
+ MCP4141, MCP4142,
+ MCP4151, MCP4152,
+ MCP4161, MCP4162,
+ MCP4231, MCP4232,
+ MCP4241, MCP4242,
+ MCP4251, MCP4252,
+ MCP4261, MCP4262,
+ digital potentiomenter chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mcp4131.
+
config MCP4531
tristate "Microchip MCP45xx/MCP46xx Digital Potentiometer driver"
depends on I2C
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index b563b492b..6007faa2f 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -3,5 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_DS1803) += ds1803.o
+obj-$(CONFIG_MCP4131) += mcp4131.o
obj-$(CONFIG_MCP4531) += mcp4531.o
obj-$(CONFIG_TPL0102) += tpl0102.o
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
new file mode 100644
index 000000000..fb9e2a337
--- /dev/null
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -0,0 +1,173 @@
+/*
+ * Maxim Integrated DS1803 digital potentiometer driver
+ * Copyright (c) 2016 Slawomir Stepien
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/DS1803.pdf
+ *
+ * DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
+ * ds1803 2 256 10, 50, 100 0101xxx
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#define DS1803_MAX_POS 255
+#define DS1803_WRITE(chan) (0xa8 | ((chan) + 1))
+
+enum ds1803_type {
+ DS1803_010,
+ DS1803_050,
+ DS1803_100,
+};
+
+struct ds1803_cfg {
+ int kohms;
+};
+
+static const struct ds1803_cfg ds1803_cfg[] = {
+ [DS1803_010] = { .kohms = 10, },
+ [DS1803_050] = { .kohms = 50, },
+ [DS1803_100] = { .kohms = 100, },
+};
+
+struct ds1803_data {
+ struct i2c_client *client;
+ const struct ds1803_cfg *cfg;
+};
+
+#define DS1803_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec ds1803_channels[] = {
+ DS1803_CHANNEL(0),
+ DS1803_CHANNEL(1),
+};
+
+static int ds1803_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ds1803_data *data = iio_priv(indio_dev);
+ int pot = chan->channel;
+ int ret;
+ u8 result[indio_dev->num_channels];
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_master_recv(data->client, result,
+ indio_dev->num_channels);
+ if (ret < 0)
+ return ret;
+
+ *val = result[pot];
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000 * data->cfg->kohms;
+ *val2 = DS1803_MAX_POS;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int ds1803_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ds1803_data *data = iio_priv(indio_dev);
+ int pot = chan->channel;
+
+ if (val2 != 0)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val > DS1803_MAX_POS || val < 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return i2c_smbus_write_byte_data(data->client, DS1803_WRITE(pot), val);
+}
+
+static const struct iio_info ds1803_info = {
+ .read_raw = ds1803_read_raw,
+ .write_raw = ds1803_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ds1803_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ds1803_data *data;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, indio_dev);
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->cfg = &ds1803_cfg[id->driver_data];
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &ds1803_info;
+ indio_dev->channels = ds1803_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ds1803_channels);
+ indio_dev->name = client->name;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id ds1803_dt_ids[] = {
+ { .compatible = "maxim,ds1803-010", .data = &ds1803_cfg[DS1803_010] },
+ { .compatible = "maxim,ds1803-050", .data = &ds1803_cfg[DS1803_050] },
+ { .compatible = "maxim,ds1803-100", .data = &ds1803_cfg[DS1803_100] },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ds1803_dt_ids);
+#endif /* CONFIG_OF */
+
+static const struct i2c_device_id ds1803_id[] = {
+ { "ds1803-010", DS1803_010 },
+ { "ds1803-050", DS1803_050 },
+ { "ds1803-100", DS1803_100 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ds1803_id);
+
+static struct i2c_driver ds1803_driver = {
+ .driver = {
+ .name = "ds1803",
+ .of_match_table = of_match_ptr(ds1803_dt_ids),
+ },
+ .probe = ds1803_probe,
+ .id_table = ds1803_id,
+};
+
+module_i2c_driver(ds1803_driver);
+
+MODULE_AUTHOR("Slawomir Stepien <sst@poczta.fm>");
+MODULE_DESCRIPTION("DS1803 digital potentiometer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
new file mode 100644
index 000000000..4e7e2c6c5
--- /dev/null
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -0,0 +1,494 @@
+/*
+ * Industrial I/O driver for Microchip digital potentiometers
+ *
+ * Copyright (c) 2016 Slawomir Stepien
+ * Based on: Peter Rosin's code from mcp4531.c
+ *
+ * Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/22060b.pdf
+ *
+ * DEVID #Wipers #Positions Resistor Opts (kOhm)
+ * mcp4131 1 129 5, 10, 50, 100
+ * mcp4132 1 129 5, 10, 50, 100
+ * mcp4141 1 129 5, 10, 50, 100
+ * mcp4142 1 129 5, 10, 50, 100
+ * mcp4151 1 257 5, 10, 50, 100
+ * mcp4152 1 257 5, 10, 50, 100
+ * mcp4161 1 257 5, 10, 50, 100
+ * mcp4162 1 257 5, 10, 50, 100
+ * mcp4231 2 129 5, 10, 50, 100
+ * mcp4232 2 129 5, 10, 50, 100
+ * mcp4241 2 129 5, 10, 50, 100
+ * mcp4242 2 129 5, 10, 50, 100
+ * mcp4251 2 257 5, 10, 50, 100
+ * mcp4252 2 257 5, 10, 50, 100
+ * mcp4261 2 257 5, 10, 50, 100
+ * mcp4262 2 257 5, 10, 50, 100
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/*
+ * TODO:
+ * 1. Write wiper setting to EEPROM for EEPROM capable models.
+ */
+
+#include <linux/cache.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#define MCP4131_WRITE (0x00 << 2)
+#define MCP4131_READ (0x03 << 2)
+
+#define MCP4131_WIPER_SHIFT 4
+#define MCP4131_CMDERR(r) ((r[0]) & 0x02)
+#define MCP4131_RAW(r) ((r[0]) == 0xff ? 0x100 : (r[1]))
+
+struct mcp4131_cfg {
+ int wipers;
+ int max_pos;
+ int kohms;
+};
+
+enum mcp4131_type {
+ MCP413x_502 = 0,
+ MCP413x_103,
+ MCP413x_503,
+ MCP413x_104,
+ MCP414x_502,
+ MCP414x_103,
+ MCP414x_503,
+ MCP414x_104,
+ MCP415x_502,
+ MCP415x_103,
+ MCP415x_503,
+ MCP415x_104,
+ MCP416x_502,
+ MCP416x_103,
+ MCP416x_503,
+ MCP416x_104,
+ MCP423x_502,
+ MCP423x_103,
+ MCP423x_503,
+ MCP423x_104,
+ MCP424x_502,
+ MCP424x_103,
+ MCP424x_503,
+ MCP424x_104,
+ MCP425x_502,
+ MCP425x_103,
+ MCP425x_503,
+ MCP425x_104,
+ MCP426x_502,
+ MCP426x_103,
+ MCP426x_503,
+ MCP426x_104,
+};
+
+static const struct mcp4131_cfg mcp4131_cfg[] = {
+ [MCP413x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
+ [MCP413x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
+ [MCP413x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
+ [MCP413x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
+ [MCP414x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
+ [MCP414x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
+ [MCP414x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
+ [MCP414x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
+ [MCP415x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
+ [MCP415x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
+ [MCP415x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
+ [MCP415x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+ [MCP416x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
+ [MCP416x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
+ [MCP416x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
+ [MCP416x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+ [MCP423x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
+ [MCP423x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
+ [MCP423x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
+ [MCP423x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
+ [MCP424x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
+ [MCP424x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
+ [MCP424x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
+ [MCP424x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
+ [MCP425x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
+ [MCP425x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
+ [MCP425x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
+ [MCP425x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+ [MCP426x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
+ [MCP426x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
+ [MCP426x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
+ [MCP426x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+};
+
+struct mcp4131_data {
+ struct spi_device *spi;
+ const struct mcp4131_cfg *cfg;
+ struct mutex lock;
+ u8 buf[2] ____cacheline_aligned;
+};
+
+#define MCP4131_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec mcp4131_channels[] = {
+ MCP4131_CHANNEL(0),
+ MCP4131_CHANNEL(1),
+};
+
+static int mcp4131_read(struct spi_device *spi, void *buf, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = buf, /* We need to send addr, cmd and 12 bits */
+ .rx_buf = buf,
+ .len = len,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int mcp4131_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int err;
+ struct mcp4131_data *data = iio_priv(indio_dev);
+ int address = chan->channel;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+
+ data->buf[0] = (address << MCP4131_WIPER_SHIFT) | MCP4131_READ;
+ data->buf[1] = 0;
+
+ err = mcp4131_read(data->spi, data->buf, 2);
+ if (err) {
+ mutex_unlock(&data->lock);
+ return err;
+ }
+
+ /* Error, bad address/command combination */
+ if (!MCP4131_CMDERR(data->buf)) {
+ mutex_unlock(&data->lock);
+ return -EIO;
+ }
+
+ *val = MCP4131_RAW(data->buf);
+ mutex_unlock(&data->lock);
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000 * data->cfg->kohms;
+ *val2 = data->cfg->max_pos;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int mcp4131_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int err;
+ struct mcp4131_data *data = iio_priv(indio_dev);
+ int address = chan->channel << MCP4131_WIPER_SHIFT;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val > data->cfg->max_pos || val < 0)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->lock);
+
+ data->buf[0] = address << MCP4131_WIPER_SHIFT;
+ data->buf[0] |= MCP4131_WRITE | (val >> 8);
+ data->buf[1] = val & 0xFF; /* 8 bits here */
+
+ err = spi_write(data->spi, data->buf, 2);
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static const struct iio_info mcp4131_info = {
+ .read_raw = mcp4131_read_raw,
+ .write_raw = mcp4131_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int mcp4131_probe(struct spi_device *spi)
+{
+ int err;
+ struct device *dev = &spi->dev;
+ unsigned long devid = spi_get_device_id(spi)->driver_data;
+ struct mcp4131_data *data;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ data->spi = spi;
+ data->cfg = &mcp4131_cfg[devid];
+
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &mcp4131_info;
+ indio_dev->channels = mcp4131_channels;
+ indio_dev->num_channels = data->cfg->wipers;
+ indio_dev->name = spi_get_device_id(spi)->name;
+
+ err = devm_iio_device_register(dev, indio_dev);
+ if (err) {
+ dev_info(&spi->dev, "Unable to register %s\n", indio_dev->name);
+ return err;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mcp4131_dt_ids[] = {
+ { .compatible = "microchip,mcp4131-502",
+ .data = &mcp4131_cfg[MCP413x_502] },
+ { .compatible = "microchip,mcp4131-103",
+ .data = &mcp4131_cfg[MCP413x_103] },
+ { .compatible = "microchip,mcp4131-503",
+ .data = &mcp4131_cfg[MCP413x_503] },
+ { .compatible = "microchip,mcp4131-104",
+ .data = &mcp4131_cfg[MCP413x_104] },
+ { .compatible = "microchip,mcp4132-502",
+ .data = &mcp4131_cfg[MCP413x_502] },
+ { .compatible = "microchip,mcp4132-103",
+ .data = &mcp4131_cfg[MCP413x_103] },
+ { .compatible = "microchip,mcp4132-503",
+ .data = &mcp4131_cfg[MCP413x_503] },
+ { .compatible = "microchip,mcp4132-104",
+ .data = &mcp4131_cfg[MCP413x_104] },
+ { .compatible = "microchip,mcp4141-502",
+ .data = &mcp4131_cfg[MCP414x_502] },
+ { .compatible = "microchip,mcp4141-103",
+ .data = &mcp4131_cfg[MCP414x_103] },
+ { .compatible = "microchip,mcp4141-503",
+ .data = &mcp4131_cfg[MCP414x_503] },
+ { .compatible = "microchip,mcp4141-104",
+ .data = &mcp4131_cfg[MCP414x_104] },
+ { .compatible = "microchip,mcp4142-502",
+ .data = &mcp4131_cfg[MCP414x_502] },
+ { .compatible = "microchip,mcp4142-103",
+ .data = &mcp4131_cfg[MCP414x_103] },
+ { .compatible = "microchip,mcp4142-503",
+ .data = &mcp4131_cfg[MCP414x_503] },
+ { .compatible = "microchip,mcp4142-104",
+ .data = &mcp4131_cfg[MCP414x_104] },
+ { .compatible = "microchip,mcp4151-502",
+ .data = &mcp4131_cfg[MCP415x_502] },
+ { .compatible = "microchip,mcp4151-103",
+ .data = &mcp4131_cfg[MCP415x_103] },
+ { .compatible = "microchip,mcp4151-503",
+ .data = &mcp4131_cfg[MCP415x_503] },
+ { .compatible = "microchip,mcp4151-104",
+ .data = &mcp4131_cfg[MCP415x_104] },
+ { .compatible = "microchip,mcp4152-502",
+ .data = &mcp4131_cfg[MCP415x_502] },
+ { .compatible = "microchip,mcp4152-103",
+ .data = &mcp4131_cfg[MCP415x_103] },
+ { .compatible = "microchip,mcp4152-503",
+ .data = &mcp4131_cfg[MCP415x_503] },
+ { .compatible = "microchip,mcp4152-104",
+ .data = &mcp4131_cfg[MCP415x_104] },
+ { .compatible = "microchip,mcp4161-502",
+ .data = &mcp4131_cfg[MCP416x_502] },
+ { .compatible = "microchip,mcp4161-103",
+ .data = &mcp4131_cfg[MCP416x_103] },
+ { .compatible = "microchip,mcp4161-503",
+ .data = &mcp4131_cfg[MCP416x_503] },
+ { .compatible = "microchip,mcp4161-104",
+ .data = &mcp4131_cfg[MCP416x_104] },
+ { .compatible = "microchip,mcp4162-502",
+ .data = &mcp4131_cfg[MCP416x_502] },
+ { .compatible = "microchip,mcp4162-103",
+ .data = &mcp4131_cfg[MCP416x_103] },
+ { .compatible = "microchip,mcp4162-503",
+ .data = &mcp4131_cfg[MCP416x_503] },
+ { .compatible = "microchip,mcp4162-104",
+ .data = &mcp4131_cfg[MCP416x_104] },
+ { .compatible = "microchip,mcp4231-502",
+ .data = &mcp4131_cfg[MCP423x_502] },
+ { .compatible = "microchip,mcp4231-103",
+ .data = &mcp4131_cfg[MCP423x_103] },
+ { .compatible = "microchip,mcp4231-503",
+ .data = &mcp4131_cfg[MCP423x_503] },
+ { .compatible = "microchip,mcp4231-104",
+ .data = &mcp4131_cfg[MCP423x_104] },
+ { .compatible = "microchip,mcp4232-502",
+ .data = &mcp4131_cfg[MCP423x_502] },
+ { .compatible = "microchip,mcp4232-103",
+ .data = &mcp4131_cfg[MCP423x_103] },
+ { .compatible = "microchip,mcp4232-503",
+ .data = &mcp4131_cfg[MCP423x_503] },
+ { .compatible = "microchip,mcp4232-104",
+ .data = &mcp4131_cfg[MCP423x_104] },
+ { .compatible = "microchip,mcp4241-502",
+ .data = &mcp4131_cfg[MCP424x_502] },
+ { .compatible = "microchip,mcp4241-103",
+ .data = &mcp4131_cfg[MCP424x_103] },
+ { .compatible = "microchip,mcp4241-503",
+ .data = &mcp4131_cfg[MCP424x_503] },
+ { .compatible = "microchip,mcp4241-104",
+ .data = &mcp4131_cfg[MCP424x_104] },
+ { .compatible = "microchip,mcp4242-502",
+ .data = &mcp4131_cfg[MCP424x_502] },
+ { .compatible = "microchip,mcp4242-103",
+ .data = &mcp4131_cfg[MCP424x_103] },
+ { .compatible = "microchip,mcp4242-503",
+ .data = &mcp4131_cfg[MCP424x_503] },
+ { .compatible = "microchip,mcp4242-104",
+ .data = &mcp4131_cfg[MCP424x_104] },
+ { .compatible = "microchip,mcp4251-502",
+ .data = &mcp4131_cfg[MCP425x_502] },
+ { .compatible = "microchip,mcp4251-103",
+ .data = &mcp4131_cfg[MCP425x_103] },
+ { .compatible = "microchip,mcp4251-503",
+ .data = &mcp4131_cfg[MCP425x_503] },
+ { .compatible = "microchip,mcp4251-104",
+ .data = &mcp4131_cfg[MCP425x_104] },
+ { .compatible = "microchip,mcp4252-502",
+ .data = &mcp4131_cfg[MCP425x_502] },
+ { .compatible = "microchip,mcp4252-103",
+ .data = &mcp4131_cfg[MCP425x_103] },
+ { .compatible = "microchip,mcp4252-503",
+ .data = &mcp4131_cfg[MCP425x_503] },
+ { .compatible = "microchip,mcp4252-104",
+ .data = &mcp4131_cfg[MCP425x_104] },
+ { .compatible = "microchip,mcp4261-502",
+ .data = &mcp4131_cfg[MCP426x_502] },
+ { .compatible = "microchip,mcp4261-103",
+ .data = &mcp4131_cfg[MCP426x_103] },
+ { .compatible = "microchip,mcp4261-503",
+ .data = &mcp4131_cfg[MCP426x_503] },
+ { .compatible = "microchip,mcp4261-104",
+ .data = &mcp4131_cfg[MCP426x_104] },
+ { .compatible = "microchip,mcp4262-502",
+ .data = &mcp4131_cfg[MCP426x_502] },
+ { .compatible = "microchip,mcp4262-103",
+ .data = &mcp4131_cfg[MCP426x_103] },
+ { .compatible = "microchip,mcp4262-503",
+ .data = &mcp4131_cfg[MCP426x_503] },
+ { .compatible = "microchip,mcp4262-104",
+ .data = &mcp4131_cfg[MCP426x_104] },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mcp4131_dt_ids);
+#endif /* CONFIG_OF */
+
+static const struct spi_device_id mcp4131_id[] = {
+ { "mcp4131-502", MCP413x_502 },
+ { "mcp4131-103", MCP413x_103 },
+ { "mcp4131-503", MCP413x_503 },
+ { "mcp4131-104", MCP413x_104 },
+ { "mcp4132-502", MCP413x_502 },
+ { "mcp4132-103", MCP413x_103 },
+ { "mcp4132-503", MCP413x_503 },
+ { "mcp4132-104", MCP413x_104 },
+ { "mcp4141-502", MCP414x_502 },
+ { "mcp4141-103", MCP414x_103 },
+ { "mcp4141-503", MCP414x_503 },
+ { "mcp4141-104", MCP414x_104 },
+ { "mcp4142-502", MCP414x_502 },
+ { "mcp4142-103", MCP414x_103 },
+ { "mcp4142-503", MCP414x_503 },
+ { "mcp4142-104", MCP414x_104 },
+ { "mcp4151-502", MCP415x_502 },
+ { "mcp4151-103", MCP415x_103 },
+ { "mcp4151-503", MCP415x_503 },
+ { "mcp4151-104", MCP415x_104 },
+ { "mcp4152-502", MCP415x_502 },
+ { "mcp4152-103", MCP415x_103 },
+ { "mcp4152-503", MCP415x_503 },
+ { "mcp4152-104", MCP415x_104 },
+ { "mcp4161-502", MCP416x_502 },
+ { "mcp4161-103", MCP416x_103 },
+ { "mcp4161-503", MCP416x_503 },
+ { "mcp4161-104", MCP416x_104 },
+ { "mcp4162-502", MCP416x_502 },
+ { "mcp4162-103", MCP416x_103 },
+ { "mcp4162-503", MCP416x_503 },
+ { "mcp4162-104", MCP416x_104 },
+ { "mcp4231-502", MCP423x_502 },
+ { "mcp4231-103", MCP423x_103 },
+ { "mcp4231-503", MCP423x_503 },
+ { "mcp4231-104", MCP423x_104 },
+ { "mcp4232-502", MCP423x_502 },
+ { "mcp4232-103", MCP423x_103 },
+ { "mcp4232-503", MCP423x_503 },
+ { "mcp4232-104", MCP423x_104 },
+ { "mcp4241-502", MCP424x_502 },
+ { "mcp4241-103", MCP424x_103 },
+ { "mcp4241-503", MCP424x_503 },
+ { "mcp4241-104", MCP424x_104 },
+ { "mcp4242-502", MCP424x_502 },
+ { "mcp4242-103", MCP424x_103 },
+ { "mcp4242-503", MCP424x_503 },
+ { "mcp4242-104", MCP424x_104 },
+ { "mcp4251-502", MCP425x_502 },
+ { "mcp4251-103", MCP425x_103 },
+ { "mcp4251-503", MCP425x_503 },
+ { "mcp4251-104", MCP425x_104 },
+ { "mcp4252-502", MCP425x_502 },
+ { "mcp4252-103", MCP425x_103 },
+ { "mcp4252-503", MCP425x_503 },
+ { "mcp4252-104", MCP425x_104 },
+ { "mcp4261-502", MCP426x_502 },
+ { "mcp4261-103", MCP426x_103 },
+ { "mcp4261-503", MCP426x_503 },
+ { "mcp4261-104", MCP426x_104 },
+ { "mcp4262-502", MCP426x_502 },
+ { "mcp4262-103", MCP426x_103 },
+ { "mcp4262-503", MCP426x_503 },
+ { "mcp4262-104", MCP426x_104 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mcp4131_id);
+
+static struct spi_driver mcp4131_driver = {
+ .driver = {
+ .name = "mcp4131",
+ .of_match_table = of_match_ptr(mcp4131_dt_ids),
+ },
+ .probe = mcp4131_probe,
+ .id_table = mcp4131_id,
+};
+
+module_spi_driver(mcp4131_driver);
+
+MODULE_AUTHOR("Slawomir Stepien <sst@poczta.fm>");
+MODULE_DESCRIPTION("MCP4131 digital potentiometer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 0db67fe14..3b72e1a59 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -79,7 +79,7 @@ static const struct mcp4531_cfg mcp4531_cfg[] = {
struct mcp4531_data {
struct i2c_client *client;
- unsigned long devid;
+ const struct mcp4531_cfg *cfg;
};
#define MCP4531_CHANNEL(ch) { \
@@ -113,8 +113,8 @@ static int mcp4531_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 1000 * mcp4531_cfg[data->devid].kohms;
- *val2 = mcp4531_cfg[data->devid].max_pos;
+ *val = 1000 * data->cfg->kohms;
+ *val2 = data->cfg->max_pos;
return IIO_VAL_FRACTIONAL;
}
@@ -130,7 +130,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val > mcp4531_cfg[data->devid].max_pos || val < 0)
+ if (val > data->cfg->max_pos || val < 0)
return -EINVAL;
break;
default:
@@ -152,7 +152,6 @@ static int mcp4531_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- unsigned long devid = id->driver_data;
struct mcp4531_data *data;
struct iio_dev *indio_dev;
@@ -168,12 +167,12 @@ static int mcp4531_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->devid = devid;
+ data->cfg = &mcp4531_cfg[id->driver_data];
indio_dev->dev.parent = dev;
indio_dev->info = &mcp4531_info;
indio_dev->channels = mcp4531_channels;
- indio_dev->num_channels = mcp4531_cfg[devid].wipers;
+ indio_dev->num_channels = data->cfg->wipers;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index 313124b6f..5c304d42d 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -118,7 +118,7 @@ static int tpl0102_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 31c0e1fd2..cda9f128f 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -6,12 +6,13 @@
menu "Pressure sensors"
config BMP280
- tristate "Bosch Sensortec BMP280 pressure sensor driver"
+ tristate "Bosch Sensortec BMP180 and BMP280 pressure sensor driver"
depends on I2C
+ depends on !(BMP085_I2C=y || BMP085_I2C=m)
select REGMAP_I2C
help
- Say yes here to build support for Bosch Sensortec BMP280
- pressure and temperature sensor.
+ Say yes here to build support for Bosch Sensortec BMP180 and BMP280
+ pressure and temperature sensors.
To compile this driver as a module, choose M here: the module
will be called bmp280.
@@ -30,6 +31,17 @@ config HID_SENSOR_PRESS
To compile this driver as a module, choose M here: the module
will be called hid-sensor-press.
+config HP03
+ tristate "Hope RF HP03 temperature and pressure sensor driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Hope RF HP03 pressure and
+ temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hp03.
+
config MPL115
tristate
@@ -148,4 +160,14 @@ config T5403
To compile this driver as a module, choose M here: the module
will be called t5403.
+config HP206C
+ tristate "HOPERF HP206C precision barometer and altimeter sensor"
+ depends on I2C
+ help
+ Say yes here to build support for the HOPREF HP206C precision
+ barometer and altimeter sensor.
+
+ This driver can also be built as a module. If so, the module will
+ be called hp206c.
+
endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index d336af14f..17d6e7afa 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_BMP280) += bmp280.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
+obj-$(CONFIG_HP03) += hp03.o
obj-$(CONFIG_MPL115) += mpl115.o
obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
obj-$(CONFIG_T5403) += t5403.o
+obj-$(CONFIG_HP206C) += hp206c.o
obj-$(CONFIG_IIO_ST_PRESS_I2C) += st_pressure_i2c.o
obj-$(CONFIG_IIO_ST_PRESS_SPI) += st_pressure_spi.o
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
index a2602d8dd..724452d61 100644
--- a/drivers/iio/pressure/bmp280.c
+++ b/drivers/iio/pressure/bmp280.c
@@ -1,12 +1,15 @@
/*
* Copyright (c) 2014 Intel Corporation
*
- * Driver for Bosch Sensortec BMP280 digital pressure sensor.
+ * Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
+ * Datasheet:
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf
*/
#define pr_fmt(fmt) "bmp280: " fmt
@@ -15,9 +18,11 @@
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+/* BMP280 specific registers */
#define BMP280_REG_TEMP_XLSB 0xFC
#define BMP280_REG_TEMP_LSB 0xFB
#define BMP280_REG_TEMP_MSB 0xFA
@@ -26,10 +31,7 @@
#define BMP280_REG_PRESS_MSB 0xF7
#define BMP280_REG_CONFIG 0xF5
-#define BMP280_REG_CTRL_MEAS 0xF4
#define BMP280_REG_STATUS 0xF3
-#define BMP280_REG_RESET 0xE0
-#define BMP280_REG_ID 0xD0
#define BMP280_REG_COMP_TEMP_START 0x88
#define BMP280_COMP_TEMP_REG_COUNT 6
@@ -46,25 +48,49 @@
#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
#define BMP280_OSRS_TEMP_SKIP 0
-#define BMP280_OSRS_TEMP_1X BIT(5)
-#define BMP280_OSRS_TEMP_2X BIT(6)
-#define BMP280_OSRS_TEMP_4X (BIT(6) | BIT(5))
-#define BMP280_OSRS_TEMP_8X BIT(7)
-#define BMP280_OSRS_TEMP_16X (BIT(7) | BIT(5))
+#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5)
+#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1)
+#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2)
+#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3)
+#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4)
+#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5)
#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
#define BMP280_OSRS_PRESS_SKIP 0
-#define BMP280_OSRS_PRESS_1X BIT(2)
-#define BMP280_OSRS_PRESS_2X BIT(3)
-#define BMP280_OSRS_PRESS_4X (BIT(3) | BIT(2))
-#define BMP280_OSRS_PRESS_8X BIT(4)
-#define BMP280_OSRS_PRESS_16X (BIT(4) | BIT(2))
+#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2)
+#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1)
+#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2)
+#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3)
+#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4)
+#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5)
#define BMP280_MODE_MASK (BIT(1) | BIT(0))
#define BMP280_MODE_SLEEP 0
#define BMP280_MODE_FORCED BIT(0)
#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+/* BMP180 specific registers */
+#define BMP180_REG_OUT_XLSB 0xF8
+#define BMP180_REG_OUT_LSB 0xF7
+#define BMP180_REG_OUT_MSB 0xF6
+
+#define BMP180_REG_CALIB_START 0xAA
+#define BMP180_REG_CALIB_COUNT 22
+
+#define BMP180_MEAS_SCO BIT(5)
+#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0)
+#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1)
+#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2)
+#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3)
+
+/* BMP180 and BMP280 common registers */
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_RESET 0xE0
+#define BMP280_REG_ID 0xD0
+
+#define BMP180_CHIP_ID 0x55
#define BMP280_CHIP_ID 0x58
#define BMP280_SOFT_RESET_VAL 0xB6
@@ -72,6 +98,11 @@ struct bmp280_data {
struct i2c_client *client;
struct mutex lock;
struct regmap *regmap;
+ const struct bmp280_chip_info *chip_info;
+
+ /* log of base 2 of oversampling rate */
+ u8 oversampling_press;
+ u8 oversampling_temp;
/*
* Carryover value from temperature conversion, used in pressure
@@ -80,9 +111,23 @@ struct bmp280_data {
s32 t_fine;
};
+struct bmp280_chip_info {
+ const struct regmap_config *regmap_config;
+
+ const int *oversampling_temp_avail;
+ int num_oversampling_temp_avail;
+
+ const int *oversampling_press_avail;
+ int num_oversampling_press_avail;
+
+ int (*chip_config)(struct bmp280_data *);
+ int (*read_temp)(struct bmp280_data *, int *);
+ int (*read_press)(struct bmp280_data *, int *, int *);
+};
+
/*
* These enums are used for indexing into the array of compensation
- * parameters.
+ * parameters for BMP280.
*/
enum { T1, T2, T3 };
enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
@@ -90,11 +135,13 @@ enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
static const struct iio_chan_spec bmp280_channels[] = {
{
.type = IIO_PRESSURE,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
},
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
},
};
@@ -290,10 +337,25 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_PRESSURE:
- ret = bmp280_read_press(data, val, val2);
+ ret = data->chip_info->read_press(data, val, val2);
break;
case IIO_TEMP:
- ret = bmp280_read_temp(data, val);
+ ret = data->chip_info->read_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = 1 << data->oversampling_press;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ *val = 1 << data->oversampling_temp;
+ ret = IIO_VAL_INT;
break;
default:
ret = -EINVAL;
@@ -310,22 +372,135 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
return ret;
}
+static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
+ int val)
+{
+ int i;
+ const int *avail = data->chip_info->oversampling_temp_avail;
+ const int n = data->chip_info->num_oversampling_temp_avail;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] == val) {
+ data->oversampling_temp = ilog2(val);
+
+ return data->chip_info->chip_config(data);
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
+ int val)
+{
+ int i;
+ const int *avail = data->chip_info->oversampling_press_avail;
+ const int n = data->chip_info->num_oversampling_press_avail;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] == val) {
+ data->oversampling_press = ilog2(val);
+
+ return data->chip_info->chip_config(data);
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret = 0;
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ mutex_lock(&data->lock);
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ ret = bmp280_write_oversampling_ratio_press(data, val);
+ break;
+ case IIO_TEMP:
+ ret = bmp280_write_oversampling_ratio_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->lock);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t bmp280_show_avail(char *buf, const int *vals, const int n)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < n; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", vals[i]);
+
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t bmp280_show_temp_oversampling_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ return bmp280_show_avail(buf, data->chip_info->oversampling_temp_avail,
+ data->chip_info->num_oversampling_temp_avail);
+}
+
+static ssize_t bmp280_show_press_oversampling_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ return bmp280_show_avail(buf, data->chip_info->oversampling_press_avail,
+ data->chip_info->num_oversampling_press_avail);
+}
+
+static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available,
+ S_IRUGO, bmp280_show_temp_oversampling_avail, NULL, 0);
+
+static IIO_DEVICE_ATTR(in_pressure_oversampling_ratio_available,
+ S_IRUGO, bmp280_show_press_oversampling_avail, NULL, 0);
+
+static struct attribute *bmp280_attributes[] = {
+ &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_in_pressure_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bmp280_attrs_group = {
+ .attrs = bmp280_attributes,
+};
+
static const struct iio_info bmp280_info = {
.driver_module = THIS_MODULE,
.read_raw = &bmp280_read_raw,
+ .write_raw = &bmp280_write_raw,
+ .attrs = &bmp280_attrs_group,
};
-static int bmp280_chip_init(struct bmp280_data *data)
+static int bmp280_chip_config(struct bmp280_data *data)
{
int ret;
+ u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
+ BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
BMP280_OSRS_TEMP_MASK |
BMP280_OSRS_PRESS_MASK |
BMP280_MODE_MASK,
- BMP280_OSRS_TEMP_2X |
- BMP280_OSRS_PRESS_16X |
- BMP280_MODE_NORMAL);
+ osrs | BMP280_MODE_NORMAL);
if (ret < 0) {
dev_err(&data->client->dev,
"failed to write ctrl_meas register\n");
@@ -344,6 +519,317 @@ static int bmp280_chip_init(struct bmp280_data *data)
return ret;
}
+static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
+
+static const struct bmp280_chip_info bmp280_chip_info = {
+ .regmap_config = &bmp280_regmap_config,
+
+ .oversampling_temp_avail = bmp280_oversampling_avail,
+ .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .oversampling_press_avail = bmp280_oversampling_avail,
+ .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .chip_config = bmp280_chip_config,
+ .read_temp = bmp280_read_temp,
+ .read_press = bmp280_read_press,
+};
+
+static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP180_REG_OUT_XLSB:
+ case BMP180_REG_OUT_LSB:
+ case BMP180_REG_OUT_MSB:
+ case BMP280_REG_CTRL_MEAS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config bmp180_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP180_REG_OUT_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp180_is_writeable_reg,
+ .volatile_reg = bmp180_is_volatile_reg,
+};
+
+static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
+{
+ int ret;
+ const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
+ unsigned int delay_us;
+ unsigned int ctrl;
+
+ ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
+ if (ret)
+ return ret;
+
+ if (ctrl_meas == BMP180_MEAS_TEMP)
+ delay_us = 4500;
+ else
+ delay_us = conversion_time_max[data->oversampling_press];
+
+ usleep_range(delay_us, delay_us + 1000);
+
+ ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
+ if (ret)
+ return ret;
+
+ /* The value of this bit reset to "0" after conversion is complete */
+ if (ctrl & BMP180_MEAS_SCO)
+ return -EIO;
+
+ return 0;
+}
+
+static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
+{
+ int ret;
+ __be16 tmp = 0;
+
+ ret = bmp180_measure(data, BMP180_MEAS_TEMP);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 2);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(tmp);
+
+ return 0;
+}
+
+/*
+ * These enums are used for indexing into the array of calibration
+ * coefficients for BMP180.
+ */
+enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
+
+struct bmp180_calib {
+ s16 AC1;
+ s16 AC2;
+ s16 AC3;
+ u16 AC4;
+ u16 AC5;
+ u16 AC6;
+ s16 B1;
+ s16 B2;
+ s16 MB;
+ s16 MC;
+ s16 MD;
+};
+
+static int bmp180_read_calib(struct bmp280_data *data,
+ struct bmp180_calib *calib)
+{
+ int ret;
+ int i;
+ __be16 buf[BMP180_REG_CALIB_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START, buf,
+ sizeof(buf));
+
+ if (ret < 0)
+ return ret;
+
+ /* None of the words has the value 0 or 0xFFFF */
+ for (i = 0; i < ARRAY_SIZE(buf); i++) {
+ if (buf[i] == cpu_to_be16(0) || buf[i] == cpu_to_be16(0xffff))
+ return -EIO;
+ }
+
+ calib->AC1 = be16_to_cpu(buf[AC1]);
+ calib->AC2 = be16_to_cpu(buf[AC2]);
+ calib->AC3 = be16_to_cpu(buf[AC3]);
+ calib->AC4 = be16_to_cpu(buf[AC4]);
+ calib->AC5 = be16_to_cpu(buf[AC5]);
+ calib->AC6 = be16_to_cpu(buf[AC6]);
+ calib->B1 = be16_to_cpu(buf[B1]);
+ calib->B2 = be16_to_cpu(buf[B2]);
+ calib->MB = be16_to_cpu(buf[MB]);
+ calib->MC = be16_to_cpu(buf[MC]);
+ calib->MD = be16_to_cpu(buf[MD]);
+
+ return 0;
+}
+
+/*
+ * Returns temperature in DegC, resolution is 0.1 DegC.
+ * t_fine carries fine temperature as global value.
+ *
+ * Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
+ */
+static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
+{
+ int ret;
+ s32 x1, x2;
+ struct bmp180_calib calib;
+
+ ret = bmp180_read_calib(data, &calib);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read calibration coefficients\n");
+ return ret;
+ }
+
+ x1 = ((adc_temp - calib.AC6) * calib.AC5) >> 15;
+ x2 = (calib.MC << 11) / (x1 + calib.MD);
+ data->t_fine = x1 + x2;
+
+ return (data->t_fine + 8) >> 4;
+}
+
+static int bmp180_read_temp(struct bmp280_data *data, int *val)
+{
+ int ret;
+ s32 adc_temp, comp_temp;
+
+ ret = bmp180_read_adc_temp(data, &adc_temp);
+ if (ret)
+ return ret;
+
+ comp_temp = bmp180_compensate_temp(data, adc_temp);
+
+ /*
+ * val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ *val = comp_temp * 100;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
+{
+ int ret;
+ __be32 tmp = 0;
+ u8 oss = data->oversampling_press;
+
+ ret = bmp180_measure(data, BMP180_MEAS_PRESS_X(oss));
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 3);
+ if (ret)
+ return ret;
+
+ *val = (be32_to_cpu(tmp) >> 8) >> (8 - oss);
+
+ return 0;
+}
+
+/*
+ * Returns pressure in Pa, resolution is 1 Pa.
+ *
+ * Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
+ */
+static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
+{
+ int ret;
+ s32 x1, x2, x3, p;
+ s32 b3, b6;
+ u32 b4, b7;
+ s32 oss = data->oversampling_press;
+ struct bmp180_calib calib;
+
+ ret = bmp180_read_calib(data, &calib);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read calibration coefficients\n");
+ return ret;
+ }
+
+ b6 = data->t_fine - 4000;
+ x1 = (calib.B2 * (b6 * b6 >> 12)) >> 11;
+ x2 = calib.AC2 * b6 >> 11;
+ x3 = x1 + x2;
+ b3 = ((((s32)calib.AC1 * 4 + x3) << oss) + 2) / 4;
+ x1 = calib.AC3 * b6 >> 13;
+ x2 = (calib.B1 * ((b6 * b6) >> 12)) >> 16;
+ x3 = (x1 + x2 + 2) >> 2;
+ b4 = calib.AC4 * (u32)(x3 + 32768) >> 15;
+ b7 = ((u32)adc_press - b3) * (50000 >> oss);
+ if (b7 < 0x80000000)
+ p = (b7 * 2) / b4;
+ else
+ p = (b7 / b4) * 2;
+
+ x1 = (p >> 8) * (p >> 8);
+ x1 = (x1 * 3038) >> 16;
+ x2 = (-7357 * p) >> 16;
+
+ return p + ((x1 + x2 + 3791) >> 4);
+}
+
+static int bmp180_read_press(struct bmp280_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ s32 adc_press;
+ u32 comp_press;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp180_read_temp(data, NULL);
+ if (ret)
+ return ret;
+
+ ret = bmp180_read_adc_press(data, &adc_press);
+ if (ret)
+ return ret;
+
+ comp_press = bmp180_compensate_press(data, adc_press);
+
+ *val = comp_press;
+ *val2 = 1000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp180_chip_config(struct bmp280_data *data)
+{
+ return 0;
+}
+
+static const int bmp180_oversampling_temp_avail[] = { 1 };
+static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
+
+static const struct bmp280_chip_info bmp180_chip_info = {
+ .regmap_config = &bmp180_regmap_config,
+
+ .oversampling_temp_avail = bmp180_oversampling_temp_avail,
+ .num_oversampling_temp_avail =
+ ARRAY_SIZE(bmp180_oversampling_temp_avail),
+
+ .oversampling_press_avail = bmp180_oversampling_press_avail,
+ .num_oversampling_press_avail =
+ ARRAY_SIZE(bmp180_oversampling_press_avail),
+
+ .chip_config = bmp180_chip_config,
+ .read_temp = bmp180_read_temp,
+ .read_press = bmp180_read_press,
+};
+
static int bmp280_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -367,7 +853,23 @@ static int bmp280_probe(struct i2c_client *client,
indio_dev->info = &bmp280_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- data->regmap = devm_regmap_init_i2c(client, &bmp280_regmap_config);
+ switch (id->driver_data) {
+ case BMP180_CHIP_ID:
+ data->chip_info = &bmp180_chip_info;
+ data->oversampling_press = ilog2(8);
+ data->oversampling_temp = ilog2(1);
+ break;
+ case BMP280_CHIP_ID:
+ data->chip_info = &bmp280_chip_info;
+ data->oversampling_press = ilog2(16);
+ data->oversampling_temp = ilog2(2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data->regmap = devm_regmap_init_i2c(client,
+ data->chip_info->regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "failed to allocate register map\n");
return PTR_ERR(data->regmap);
@@ -376,13 +878,13 @@ static int bmp280_probe(struct i2c_client *client,
ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id);
if (ret < 0)
return ret;
- if (chip_id != BMP280_CHIP_ID) {
- dev_err(&client->dev, "bad chip id. expected %x got %x\n",
- BMP280_CHIP_ID, chip_id);
+ if (chip_id != id->driver_data) {
+ dev_err(&client->dev, "bad chip id. expected %lx got %x\n",
+ id->driver_data, chip_id);
return -EINVAL;
}
- ret = bmp280_chip_init(data);
+ ret = data->chip_info->chip_config(data);
if (ret < 0)
return ret;
@@ -390,13 +892,17 @@ static int bmp280_probe(struct i2c_client *client,
}
static const struct acpi_device_id bmp280_acpi_match[] = {
- {"BMP0280", 0},
+ {"BMP0280", BMP280_CHIP_ID },
+ {"BMP0180", BMP180_CHIP_ID },
+ {"BMP0085", BMP180_CHIP_ID },
{ },
};
MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match);
static const struct i2c_device_id bmp280_id[] = {
- {"bmp280", 0},
+ {"bmp280", BMP280_CHIP_ID },
+ {"bmp180", BMP180_CHIP_ID },
+ {"bmp085", BMP180_CHIP_ID },
{ },
};
MODULE_DEVICE_TABLE(i2c, bmp280_id);
@@ -412,5 +918,5 @@ static struct i2c_driver bmp280_driver = {
module_i2c_driver(bmp280_driver);
MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
-MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP280 pressure and temperature sensor");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
new file mode 100644
index 000000000..ac76515d5
--- /dev/null
+++ b/drivers/iio/pressure/hp03.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2016 Marek Vasut <marex@denx.de>
+ *
+ * Driver for Hope RF HP03 digital temperature and pressure sensor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "hp03: " fmt
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/*
+ * The HP03 sensor occupies two fixed I2C addresses:
+ * 0x50 ... read-only EEPROM with calibration data
+ * 0x77 ... read-write ADC for pressure and temperature
+ */
+#define HP03_EEPROM_ADDR 0x50
+#define HP03_ADC_ADDR 0x77
+
+#define HP03_EEPROM_CX_OFFSET 0x10
+#define HP03_EEPROM_AB_OFFSET 0x1e
+#define HP03_EEPROM_CD_OFFSET 0x20
+
+#define HP03_ADC_WRITE_REG 0xff
+#define HP03_ADC_READ_REG 0xfd
+#define HP03_ADC_READ_PRESSURE 0xf0 /* D1 in datasheet */
+#define HP03_ADC_READ_TEMP 0xe8 /* D2 in datasheet */
+
+struct hp03_priv {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct gpio_desc *xclr_gpio;
+
+ struct i2c_client *eeprom_client;
+ struct regmap *eeprom_regmap;
+
+ s32 pressure; /* kPa */
+ s32 temp; /* Deg. C */
+};
+
+static const struct iio_chan_spec hp03_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static bool hp03_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static bool hp03_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static const struct regmap_config hp03_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = HP03_EEPROM_CD_OFFSET + 1,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = hp03_is_writeable_reg,
+ .volatile_reg = hp03_is_volatile_reg,
+};
+
+static int hp03_get_temp_pressure(struct hp03_priv *priv, const u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(priv->client, HP03_ADC_WRITE_REG, reg);
+ if (ret < 0)
+ return ret;
+
+ msleep(50); /* Wait for conversion to finish */
+
+ return i2c_smbus_read_word_data(priv->client, HP03_ADC_READ_REG);
+}
+
+static int hp03_update_temp_pressure(struct hp03_priv *priv)
+{
+ struct device *dev = &priv->client->dev;
+ u8 coefs[18];
+ u16 cx_val[7];
+ int ab_val, d1_val, d2_val, diff_val, dut, off, sens, x;
+ int i, ret;
+
+ /* Sample coefficients from EEPROM */
+ ret = regmap_bulk_read(priv->eeprom_regmap, HP03_EEPROM_CX_OFFSET,
+ coefs, sizeof(coefs));
+ if (ret < 0) {
+ dev_err(dev, "Failed to read EEPROM (reg=%02x)\n",
+ HP03_EEPROM_CX_OFFSET);
+ return ret;
+ }
+
+ /* Sample Temperature and Pressure */
+ gpiod_set_value_cansleep(priv->xclr_gpio, 1);
+
+ ret = hp03_get_temp_pressure(priv, HP03_ADC_READ_PRESSURE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read pressure\n");
+ goto err_adc;
+ }
+ d1_val = ret;
+
+ ret = hp03_get_temp_pressure(priv, HP03_ADC_READ_TEMP);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read temperature\n");
+ goto err_adc;
+ }
+ d2_val = ret;
+
+ gpiod_set_value_cansleep(priv->xclr_gpio, 0);
+
+ /* The Cx coefficients and Temp/Pressure values are MSB first. */
+ for (i = 0; i < 7; i++)
+ cx_val[i] = (coefs[2 * i] << 8) | (coefs[(2 * i) + 1] << 0);
+ d1_val = ((d1_val >> 8) & 0xff) | ((d1_val & 0xff) << 8);
+ d2_val = ((d2_val >> 8) & 0xff) | ((d2_val & 0xff) << 8);
+
+ /* Coefficient voodoo from the HP03 datasheet. */
+ if (d2_val >= cx_val[4])
+ ab_val = coefs[14]; /* A-value */
+ else
+ ab_val = coefs[15]; /* B-value */
+
+ diff_val = d2_val - cx_val[4];
+ dut = (ab_val * (diff_val >> 7) * (diff_val >> 7)) >> coefs[16];
+ dut = diff_val - dut;
+
+ off = (cx_val[1] + (((cx_val[3] - 1024) * dut) >> 14)) * 4;
+ sens = cx_val[0] + ((cx_val[2] * dut) >> 10);
+ x = ((sens * (d1_val - 7168)) >> 14) - off;
+
+ priv->pressure = ((x * 100) >> 5) + (cx_val[6] * 10);
+ priv->temp = 250 + ((dut * cx_val[5]) >> 16) - (dut >> coefs[17]);
+
+ return 0;
+
+err_adc:
+ gpiod_set_value_cansleep(priv->xclr_gpio, 0);
+ return ret;
+}
+
+static int hp03_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct hp03_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = hp03_update_temp_pressure(priv);
+ mutex_unlock(&priv->lock);
+
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = priv->pressure;
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val = priv->temp;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 10;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info hp03_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &hp03_read_raw,
+};
+
+static int hp03_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct hp03_priv *priv;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->client = client;
+ mutex_init(&priv->lock);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = hp03_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hp03_channels);
+ indio_dev->info = &hp03_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ priv->xclr_gpio = devm_gpiod_get_index(dev, "xclr", 0, GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->xclr_gpio)) {
+ dev_err(dev, "Failed to claim XCLR GPIO\n");
+ ret = PTR_ERR(priv->xclr_gpio);
+ return ret;
+ }
+
+ /*
+ * Allocate another device for the on-sensor EEPROM,
+ * which has it's dedicated I2C address and contains
+ * the calibration constants for the sensor.
+ */
+ priv->eeprom_client = i2c_new_dummy(client->adapter, HP03_EEPROM_ADDR);
+ if (!priv->eeprom_client) {
+ dev_err(dev, "New EEPROM I2C device failed\n");
+ return -ENODEV;
+ }
+
+ priv->eeprom_regmap = regmap_init_i2c(priv->eeprom_client,
+ &hp03_regmap_config);
+ if (IS_ERR(priv->eeprom_regmap)) {
+ dev_err(dev, "Failed to allocate EEPROM regmap\n");
+ ret = PTR_ERR(priv->eeprom_regmap);
+ goto err_cleanup_eeprom_client;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register IIO device\n");
+ goto err_cleanup_eeprom_regmap;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+
+ return 0;
+
+err_cleanup_eeprom_regmap:
+ regmap_exit(priv->eeprom_regmap);
+
+err_cleanup_eeprom_client:
+ i2c_unregister_device(priv->eeprom_client);
+ return ret;
+}
+
+static int hp03_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct hp03_priv *priv = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regmap_exit(priv->eeprom_regmap);
+ i2c_unregister_device(priv->eeprom_client);
+
+ return 0;
+}
+
+static const struct i2c_device_id hp03_id[] = {
+ { "hp03", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, hp03_id);
+
+static struct i2c_driver hp03_driver = {
+ .driver = {
+ .name = "hp03",
+ },
+ .probe = hp03_probe,
+ .remove = hp03_remove,
+ .id_table = hp03_id,
+};
+module_i2c_driver(hp03_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Driver for Hope RF HP03 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
new file mode 100644
index 000000000..90f2b6e4a
--- /dev/null
+++ b/drivers/iio/pressure/hp206c.c
@@ -0,0 +1,426 @@
+/*
+ * hp206c.c - HOPERF HP206C precision barometer and altimeter sensor
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x76)
+ *
+ * Datasheet:
+ * http://www.hoperf.com/upload/sensor/HP206C_DataSheet_EN_V2.0.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+#include <linux/util_macros.h>
+#include <linux/acpi.h>
+
+/* I2C commands: */
+#define HP206C_CMD_SOFT_RST 0x06
+
+#define HP206C_CMD_ADC_CVT 0x40
+
+#define HP206C_CMD_ADC_CVT_OSR_4096 0x00
+#define HP206C_CMD_ADC_CVT_OSR_2048 0x04
+#define HP206C_CMD_ADC_CVT_OSR_1024 0x08
+#define HP206C_CMD_ADC_CVT_OSR_512 0x0c
+#define HP206C_CMD_ADC_CVT_OSR_256 0x10
+#define HP206C_CMD_ADC_CVT_OSR_128 0x14
+
+#define HP206C_CMD_ADC_CVT_CHNL_PT 0x00
+#define HP206C_CMD_ADC_CVT_CHNL_T 0x02
+
+#define HP206C_CMD_READ_P 0x30
+#define HP206C_CMD_READ_T 0x32
+
+#define HP206C_CMD_READ_REG 0x80
+#define HP206C_CMD_WRITE_REG 0xc0
+
+#define HP206C_REG_INT_EN 0x0b
+#define HP206C_REG_INT_CFG 0x0c
+
+#define HP206C_REG_INT_SRC 0x0d
+#define HP206C_FLAG_DEV_RDY 0x40
+
+#define HP206C_REG_PARA 0x0f
+#define HP206C_FLAG_CMPS_EN 0x80
+
+/* Maximum spin for DEV_RDY */
+#define HP206C_MAX_DEV_RDY_WAIT_COUNT 20
+#define HP206C_DEV_RDY_WAIT_US 20000
+
+struct hp206c_data {
+ struct mutex mutex;
+ struct i2c_client *client;
+ int temp_osr_index;
+ int pres_osr_index;
+};
+
+struct hp206c_osr_setting {
+ u8 osr_mask;
+ unsigned int temp_conv_time_us;
+ unsigned int pres_conv_time_us;
+};
+
+/* Data from Table 5 in datasheet. */
+static const struct hp206c_osr_setting hp206c_osr_settings[] = {
+ { HP206C_CMD_ADC_CVT_OSR_4096, 65600, 131100 },
+ { HP206C_CMD_ADC_CVT_OSR_2048, 32800, 65600 },
+ { HP206C_CMD_ADC_CVT_OSR_1024, 16400, 32800 },
+ { HP206C_CMD_ADC_CVT_OSR_512, 8200, 16400 },
+ { HP206C_CMD_ADC_CVT_OSR_256, 4100, 8200 },
+ { HP206C_CMD_ADC_CVT_OSR_128, 2100, 4100 },
+};
+static const int hp206c_osr_rates[] = { 4096, 2048, 1024, 512, 256, 128 };
+static const char hp206c_osr_rates_str[] = "4096 2048 1024 512 256 128";
+
+static inline int hp206c_read_reg(struct i2c_client *client, u8 reg)
+{
+ return i2c_smbus_read_byte_data(client, HP206C_CMD_READ_REG | reg);
+}
+
+static inline int hp206c_write_reg(struct i2c_client *client, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(client,
+ HP206C_CMD_WRITE_REG | reg, val);
+}
+
+static int hp206c_read_20bit(struct i2c_client *client, u8 cmd)
+{
+ int ret;
+ u8 values[3];
+
+ ret = i2c_smbus_read_i2c_block_data(client, cmd, 3, values);
+ if (ret < 0)
+ return ret;
+ if (ret != 3)
+ return -EIO;
+ return ((values[0] & 0xF) << 16) | (values[1] << 8) | (values[2]);
+}
+
+/* Spin for max 160ms until DEV_RDY is 1, or return error. */
+static int hp206c_wait_dev_rdy(struct iio_dev *indio_dev)
+{
+ int ret;
+ int count = 0;
+ struct hp206c_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ while (++count <= HP206C_MAX_DEV_RDY_WAIT_COUNT) {
+ ret = hp206c_read_reg(client, HP206C_REG_INT_SRC);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed READ_REG INT_SRC: %d\n", ret);
+ return ret;
+ }
+ if (ret & HP206C_FLAG_DEV_RDY)
+ return 0;
+ usleep_range(HP206C_DEV_RDY_WAIT_US, HP206C_DEV_RDY_WAIT_US * 3 / 2);
+ }
+ return -ETIMEDOUT;
+}
+
+static int hp206c_set_compensation(struct i2c_client *client, bool enabled)
+{
+ int val;
+
+ val = hp206c_read_reg(client, HP206C_REG_PARA);
+ if (val < 0)
+ return val;
+ if (enabled)
+ val |= HP206C_FLAG_CMPS_EN;
+ else
+ val &= ~HP206C_FLAG_CMPS_EN;
+
+ return hp206c_write_reg(client, HP206C_REG_PARA, val);
+}
+
+/* Do a soft reset */
+static int hp206c_soft_reset(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct hp206c_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ ret = i2c_smbus_write_byte(client, HP206C_CMD_SOFT_RST);
+ if (ret) {
+ dev_err(&client->dev, "Failed to reset device: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(400, 600);
+
+ ret = hp206c_wait_dev_rdy(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "Device not ready after soft reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = hp206c_set_compensation(client, true);
+ if (ret)
+ dev_err(&client->dev, "Failed to enable compensation: %d\n", ret);
+ return ret;
+}
+
+static int hp206c_conv_and_read(struct iio_dev *indio_dev,
+ u8 conv_cmd, u8 read_cmd,
+ unsigned int sleep_us)
+{
+ int ret;
+ struct hp206c_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ ret = hp206c_wait_dev_rdy(indio_dev);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Device not ready: %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte(client, conv_cmd);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed convert: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(sleep_us, sleep_us * 3 / 2);
+
+ ret = hp206c_wait_dev_rdy(indio_dev);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Device not ready: %d\n", ret);
+ return ret;
+ }
+
+ ret = hp206c_read_20bit(client, read_cmd);
+ if (ret < 0)
+ dev_err(&indio_dev->dev, "Failed read: %d\n", ret);
+
+ return ret;
+}
+
+static int hp206c_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret;
+ struct hp206c_data *data = iio_priv(indio_dev);
+ const struct hp206c_osr_setting *osr_setting;
+ u8 conv_cmd;
+
+ mutex_lock(&data->mutex);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = hp206c_osr_rates[data->temp_osr_index];
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_PRESSURE:
+ *val = hp206c_osr_rates[data->pres_osr_index];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_TEMP:
+ osr_setting = &hp206c_osr_settings[data->temp_osr_index];
+ conv_cmd = HP206C_CMD_ADC_CVT |
+ osr_setting->osr_mask |
+ HP206C_CMD_ADC_CVT_CHNL_T;
+ ret = hp206c_conv_and_read(indio_dev,
+ conv_cmd,
+ HP206C_CMD_READ_T,
+ osr_setting->temp_conv_time_us);
+ if (ret >= 0) {
+ /* 20 significant bits are provided.
+ * Extend sign over the rest.
+ */
+ *val = sign_extend32(ret, 19);
+ ret = IIO_VAL_INT;
+ }
+ break;
+
+ case IIO_PRESSURE:
+ osr_setting = &hp206c_osr_settings[data->pres_osr_index];
+ conv_cmd = HP206C_CMD_ADC_CVT |
+ osr_setting->osr_mask |
+ HP206C_CMD_ADC_CVT_CHNL_PT;
+ ret = hp206c_conv_and_read(indio_dev,
+ conv_cmd,
+ HP206C_CMD_READ_P,
+ osr_setting->pres_conv_time_us);
+ if (ret >= 0) {
+ *val = ret;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = 10000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int hp206c_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret = 0;
+ struct hp206c_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ switch (chan->type) {
+ case IIO_TEMP:
+ data->temp_osr_index = find_closest_descending(val,
+ hp206c_osr_rates, ARRAY_SIZE(hp206c_osr_rates));
+ break;
+ case IIO_PRESSURE:
+ data->pres_osr_index = find_closest_descending(val,
+ hp206c_osr_rates, ARRAY_SIZE(hp206c_osr_rates));
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static const struct iio_chan_spec hp206c_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ }
+};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(hp206c_osr_rates_str);
+
+static struct attribute *hp206c_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group hp206c_attribute_group = {
+ .attrs = hp206c_attributes,
+};
+
+static const struct iio_info hp206c_info = {
+ .attrs = &hp206c_attribute_group,
+ .read_raw = hp206c_read_raw,
+ .write_raw = hp206c_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int hp206c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct hp206c_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ dev_err(&client->dev, "Adapter does not support "
+ "all required i2c functionality\n");
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->mutex);
+
+ indio_dev->info = &hp206c_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = hp206c_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hp206c_channels);
+
+ i2c_set_clientdata(client, indio_dev);
+
+ /* Do a soft reset on probe */
+ ret = hp206c_soft_reset(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "Failed to reset on startup: %d\n", ret);
+ return -ENODEV;
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id hp206c_id[] = {
+ {"hp206c"},
+ {}
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hp206c_acpi_match[] = {
+ {"HOP206C", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hp206c_acpi_match);
+#endif
+
+static struct i2c_driver hp206c_driver = {
+ .probe = hp206c_probe,
+ .id_table = hp206c_id,
+ .driver = {
+ .name = "hp206c",
+ .acpi_match_table = ACPI_PTR(hp206c_acpi_match),
+ },
+};
+
+module_i2c_driver(hp206c_driver);
+
+MODULE_DESCRIPTION("HOPERF HP206C precision barometer and altimeter sensor");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 8b08e4b7e..ccda63c5b 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -16,15 +16,11 @@
#include <linux/iio/iio.h>
#include <linux/mutex.h>
+struct regulator;
+
#define MS5611_RESET 0x1e
#define MS5611_READ_ADC 0x00
#define MS5611_READ_PROM_WORD 0xA0
-#define MS5611_START_TEMP_CONV 0x58
-#define MS5611_START_PRESSURE_CONV 0x48
-
-#define MS5611_CONV_TIME_MIN 9040
-#define MS5611_CONV_TIME_MAX 10000
-
#define MS5611_PROM_WORDS_NB 8
enum {
@@ -39,16 +35,31 @@ struct ms5611_chip_info {
s32 *temp, s32 *pressure);
};
+/*
+ * OverSampling Rate descriptor.
+ * Warning: cmd MUST be kept aligned on a word boundary (see
+ * m5611_spi_read_adc_temp_and_pressure in ms5611_spi.c).
+ */
+struct ms5611_osr {
+ unsigned long conv_usec;
+ u8 cmd;
+ unsigned short rate;
+};
+
struct ms5611_state {
void *client;
struct mutex lock;
+ const struct ms5611_osr *pressure_osr;
+ const struct ms5611_osr *temp_osr;
+
int (*reset)(struct device *dev);
int (*read_prom_word)(struct device *dev, int index, u16 *word);
int (*read_adc_temp_and_pressure)(struct device *dev,
s32 *temp, s32 *pressure);
struct ms5611_chip_info *chip_info;
+ struct regulator *vdd;
};
int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 992ad8d3b..76578b07b 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -18,11 +18,44 @@
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include "ms5611.h"
+#define MS5611_INIT_OSR(_cmd, _conv_usec, _rate) \
+ { .cmd = _cmd, .conv_usec = _conv_usec, .rate = _rate }
+
+static const struct ms5611_osr ms5611_avail_pressure_osr[] = {
+ MS5611_INIT_OSR(0x40, 600, 256),
+ MS5611_INIT_OSR(0x42, 1170, 512),
+ MS5611_INIT_OSR(0x44, 2280, 1024),
+ MS5611_INIT_OSR(0x46, 4540, 2048),
+ MS5611_INIT_OSR(0x48, 9040, 4096)
+};
+
+static const struct ms5611_osr ms5611_avail_temp_osr[] = {
+ MS5611_INIT_OSR(0x50, 600, 256),
+ MS5611_INIT_OSR(0x52, 1170, 512),
+ MS5611_INIT_OSR(0x54, 2280, 1024),
+ MS5611_INIT_OSR(0x56, 4540, 2048),
+ MS5611_INIT_OSR(0x58, 9040, 4096)
+};
+
+static const char ms5611_show_osr[] = "256 512 1024 2048 4096";
+
+static IIO_CONST_ATTR(oversampling_ratio_available, ms5611_show_osr);
+
+static struct attribute *ms5611_attributes[] = {
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ms5611_attribute_group = {
+ .attrs = ms5611_attributes,
+};
+
static bool ms5611_prom_is_valid(u16 *prom, size_t len)
{
int i, j;
@@ -239,11 +272,70 @@ static int ms5611_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if (chan->type != IIO_TEMP && chan->type != IIO_PRESSURE)
+ break;
+ mutex_lock(&st->lock);
+ if (chan->type == IIO_TEMP)
+ *val = (int)st->temp_osr->rate;
+ else
+ *val = (int)st->pressure_osr->rate;
+ mutex_unlock(&st->lock);
+ return IIO_VAL_INT;
}
return -EINVAL;
}
+static const struct ms5611_osr *ms5611_find_osr(int rate,
+ const struct ms5611_osr *osr,
+ size_t count)
+{
+ unsigned int r;
+
+ for (r = 0; r < count; r++)
+ if ((unsigned short)rate == osr[r].rate)
+ break;
+ if (r >= count)
+ return NULL;
+ return &osr[r];
+}
+
+static int ms5611_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ms5611_state *st = iio_priv(indio_dev);
+ const struct ms5611_osr *osr = NULL;
+
+ if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
+ return -EINVAL;
+
+ if (chan->type == IIO_TEMP)
+ osr = ms5611_find_osr(val, ms5611_avail_temp_osr,
+ ARRAY_SIZE(ms5611_avail_temp_osr));
+ else if (chan->type == IIO_PRESSURE)
+ osr = ms5611_find_osr(val, ms5611_avail_pressure_osr,
+ ARRAY_SIZE(ms5611_avail_pressure_osr));
+ if (!osr)
+ return -EINVAL;
+
+ mutex_lock(&st->lock);
+
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&st->lock);
+ return -EBUSY;
+ }
+
+ if (chan->type == IIO_TEMP)
+ st->temp_osr = osr;
+ else
+ st->pressure_osr = osr;
+
+ mutex_unlock(&st->lock);
+ return 0;
+}
+
static const unsigned long ms5611_scan_masks[] = {0x3, 0};
static struct ms5611_chip_info chip_info_tbl[] = {
@@ -259,7 +351,8 @@ static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -271,7 +364,8 @@ static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.scan_index = 1,
.scan_type = {
.sign = 's',
@@ -285,40 +379,68 @@ static const struct iio_chan_spec ms5611_channels[] = {
static const struct iio_info ms5611_info = {
.read_raw = &ms5611_read_raw,
+ .write_raw = &ms5611_write_raw,
+ .attrs = &ms5611_attribute_group,
.driver_module = THIS_MODULE,
};
static int ms5611_init(struct iio_dev *indio_dev)
{
int ret;
- struct regulator *vdd = devm_regulator_get(indio_dev->dev.parent,
- "vdd");
+ struct ms5611_state *st = iio_priv(indio_dev);
/* Enable attached regulator if any. */
- if (!IS_ERR(vdd)) {
- ret = regulator_enable(vdd);
+ st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
+ if (!IS_ERR(st->vdd)) {
+ ret = regulator_enable(st->vdd);
if (ret) {
dev_err(indio_dev->dev.parent,
- "failed to enable Vdd supply: %d\n", ret);
+ "failed to enable Vdd supply: %d\n", ret);
return ret;
}
+ } else {
+ ret = PTR_ERR(st->vdd);
+ if (ret != -ENODEV)
+ return ret;
}
ret = ms5611_reset(indio_dev);
if (ret < 0)
- return ret;
+ goto err_regulator_disable;
- return ms5611_read_prom(indio_dev);
+ ret = ms5611_read_prom(indio_dev);
+ if (ret < 0)
+ goto err_regulator_disable;
+
+ return 0;
+
+err_regulator_disable:
+ if (!IS_ERR_OR_NULL(st->vdd))
+ regulator_disable(st->vdd);
+ return ret;
+}
+
+static void ms5611_fini(const struct iio_dev *indio_dev)
+{
+ const struct ms5611_state *st = iio_priv(indio_dev);
+
+ if (!IS_ERR_OR_NULL(st->vdd))
+ regulator_disable(st->vdd);
}
int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
- const char *name, int type)
+ const char *name, int type)
{
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->chip_info = &chip_info_tbl[type];
+ st->temp_osr =
+ &ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1];
+ st->pressure_osr =
+ &ms5611_avail_pressure_osr[ARRAY_SIZE(ms5611_avail_pressure_osr)
+ - 1];
indio_dev->dev.parent = dev;
indio_dev->name = name;
indio_dev->info = &ms5611_info;
@@ -335,7 +457,7 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
ms5611_trigger_handler, NULL);
if (ret < 0) {
dev_err(dev, "iio triggered buffer setup failed\n");
- return ret;
+ goto err_fini;
}
ret = iio_device_register(indio_dev);
@@ -348,7 +470,8 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
-
+err_fini:
+ ms5611_fini(indio_dev);
return ret;
}
EXPORT_SYMBOL(ms5611_probe);
@@ -357,6 +480,7 @@ int ms5611_remove(struct iio_dev *indio_dev)
{
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
+ ms5611_fini(indio_dev);
return 0;
}
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 7f6fc8eee..55fb5fc0b 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include "ms5611.h"
@@ -62,23 +63,23 @@ static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev,
{
int ret;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
+ const struct ms5611_osr *osr = st->temp_osr;
- ret = i2c_smbus_write_byte(st->client, MS5611_START_TEMP_CONV);
+ ret = i2c_smbus_write_byte(st->client, osr->cmd);
if (ret < 0)
return ret;
- usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX);
-
+ usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
ret = ms5611_i2c_read_adc(st, temp);
if (ret < 0)
return ret;
- ret = i2c_smbus_write_byte(st->client, MS5611_START_PRESSURE_CONV);
+ osr = st->pressure_osr;
+ ret = i2c_smbus_write_byte(st->client, osr->cmd);
if (ret < 0)
return ret;
- usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX);
-
+ usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
return ms5611_i2c_read_adc(st, pressure);
}
@@ -113,6 +114,17 @@ static int ms5611_i2c_remove(struct i2c_client *client)
return ms5611_remove(i2c_get_clientdata(client));
}
+#if defined(CONFIG_OF)
+static const struct of_device_id ms5611_i2c_matches[] = {
+ { .compatible = "meas,ms5611" },
+ { .compatible = "ms5611" },
+ { .compatible = "meas,ms5607" },
+ { .compatible = "ms5607" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ms5611_i2c_matches);
+#endif
+
static const struct i2c_device_id ms5611_id[] = {
{ "ms5611", MS5611 },
{ "ms5607", MS5607 },
@@ -123,6 +135,7 @@ MODULE_DEVICE_TABLE(i2c, ms5611_id);
static struct i2c_driver ms5611_driver = {
.driver = {
.name = "ms5611",
+ .of_match_table = of_match_ptr(ms5611_i2c_matches)
},
.id_table = ms5611_id,
.probe = ms5611_i2c_probe,
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 5cc009e85..932e05001 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
+#include <linux/of_device.h>
#include "ms5611.h"
@@ -55,28 +56,29 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
s32 *temp, s32 *pressure)
{
- u8 cmd;
int ret;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
+ const struct ms5611_osr *osr = st->temp_osr;
- cmd = MS5611_START_TEMP_CONV;
- ret = spi_write_then_read(st->client, &cmd, 1, NULL, 0);
+ /*
+ * Warning: &osr->cmd MUST be aligned on a word boundary since used as
+ * 2nd argument (void*) of spi_write_then_read.
+ */
+ ret = spi_write_then_read(st->client, &osr->cmd, 1, NULL, 0);
if (ret < 0)
return ret;
- usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX);
-
+ usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
ret = ms5611_spi_read_adc(dev, temp);
if (ret < 0)
return ret;
- cmd = MS5611_START_PRESSURE_CONV;
- ret = spi_write_then_read(st->client, &cmd, 1, NULL, 0);
+ osr = st->pressure_osr;
+ ret = spi_write_then_read(st->client, &osr->cmd, 1, NULL, 0);
if (ret < 0)
return ret;
- usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX);
-
+ usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
return ms5611_spi_read_adc(dev, pressure);
}
@@ -106,7 +108,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
st->client = spi;
return ms5611_probe(indio_dev, &spi->dev, spi_get_device_id(spi)->name,
- spi_get_device_id(spi)->driver_data);
+ spi_get_device_id(spi)->driver_data);
}
static int ms5611_spi_remove(struct spi_device *spi)
@@ -114,6 +116,17 @@ static int ms5611_spi_remove(struct spi_device *spi)
return ms5611_remove(spi_get_drvdata(spi));
}
+#if defined(CONFIG_OF)
+static const struct of_device_id ms5611_spi_matches[] = {
+ { .compatible = "meas,ms5611" },
+ { .compatible = "ms5611" },
+ { .compatible = "meas,ms5607" },
+ { .compatible = "ms5607" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ms5611_spi_matches);
+#endif
+
static const struct spi_device_id ms5611_id[] = {
{ "ms5611", MS5611 },
{ "ms5607", MS5607 },
@@ -124,6 +137,7 @@ MODULE_DEVICE_TABLE(spi, ms5611_id);
static struct spi_driver ms5611_driver = {
.driver = {
.name = "ms5611",
+ .of_match_table = of_match_ptr(ms5611_spi_matches)
},
.id_table = ms5611_id,
.probe = ms5611_spi_probe,
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c
index 2ff53f222..99468d0a6 100644
--- a/drivers/iio/pressure/st_pressure_buffer.c
+++ b/drivers/iio/pressure/st_pressure_buffer.c
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
int st_press_allocate_ring(struct iio_dev *indio_dev)
{
- return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+ return iio_triggered_buffer_setup(indio_dev, NULL,
&st_sensors_trigger_handler, &st_press_buffer_setup_ops);
}
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index d3ca32079..92a118c3c 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -67,6 +67,8 @@
#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
+#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
+#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
/* CUSTOM VALUES FOR LPS001WP SENSOR */
@@ -109,6 +111,8 @@
#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
+#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
+#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS25H_MULTIREAD_BIT true
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
#define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
@@ -235,6 +239,9 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
.mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
+ .addr_od = ST_PRESS_LPS331AP_OD_IRQ_ADDR,
+ .mask_od = ST_PRESS_LPS331AP_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
.bootime = 2,
@@ -332,6 +339,9 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
.mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
+ .addr_od = ST_PRESS_LPS25H_OD_IRQ_ADDR,
+ .mask_od = ST_PRESS_LPS25H_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
.bootime = 2,
@@ -435,6 +445,7 @@ static const struct iio_info press_info = {
static const struct iio_trigger_ops st_press_trigger_ops = {
.owner = THIS_MODULE,
.set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
+ .validate_device = st_sensors_validate_device,
};
#define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
#else
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 6425c0e5d..2137adfbd 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -85,4 +85,6 @@ source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
+source "drivers/infiniband/hw/hfi1/Kconfig"
+
endif # INFINIBAND
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index f818538a7..edaae9f98 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,23 +1,19 @@
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
-obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
- ib_cm.o iw_cm.o ib_addr.o \
+obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \
$(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
$(user_access-y)
-ib_core-y := packer.o ud_header.o verbs.o cq.o sysfs.o \
+ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
- roce_gid_mgmt.o
+ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
+ multicast.o mad.o smi.o agent.o mad_rmpp.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
-ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
-
-ib_sa-y := sa_query.o multicast.o
-
ib_cm-y := cm.o
iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o
@@ -28,8 +24,6 @@ rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o
rdma_ucm-y := ucma.o
-ib_addr-y := addr.o
-
ib_umad-y := user_mad.o
ib_ucm-y := ucm.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 337353d86..1374541a4 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -46,10 +46,10 @@
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
#include <rdma/ib.h>
+#include <rdma/rdma_netlink.h>
+#include <net/netlink.h>
-MODULE_AUTHOR("Sean Hefty");
-MODULE_DESCRIPTION("IB Address Translation");
-MODULE_LICENSE("Dual BSD/GPL");
+#include "core_priv.h"
struct addr_req {
struct list_head list;
@@ -62,8 +62,11 @@ struct addr_req {
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
int status;
+ u32 seq;
};
+static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
+
static void process_req(struct work_struct *work);
static DEFINE_MUTEX(lock);
@@ -71,6 +74,126 @@ static LIST_HEAD(req_list);
static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
+static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
+ [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
+ .len = sizeof(struct rdma_nla_ls_gid)},
+};
+
+static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
+{
+ struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
+ int ret;
+
+ if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
+ return false;
+
+ ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_addr_policy);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
+{
+ const struct nlattr *head, *curr;
+ union ib_gid gid;
+ struct addr_req *req;
+ int len, rem;
+ int found = 0;
+
+ head = (const struct nlattr *)nlmsg_data(nlh);
+ len = nlmsg_len(nlh);
+
+ nla_for_each_attr(curr, head, len, rem) {
+ if (curr->nla_type == LS_NLA_TYPE_DGID)
+ memcpy(&gid, nla_data(curr), nla_len(curr));
+ }
+
+ mutex_lock(&lock);
+ list_for_each_entry(req, &req_list, list) {
+ if (nlh->nlmsg_seq != req->seq)
+ continue;
+ /* We set the DGID part, the rest was set earlier */
+ rdma_addr_set_dgid(req->addr, &gid);
+ req->status = 0;
+ found = 1;
+ break;
+ }
+ mutex_unlock(&lock);
+
+ if (!found)
+ pr_info("Couldn't find request waiting for DGID: %pI6\n",
+ &gid);
+}
+
+int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
+
+ if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
+ !(NETLINK_CB(skb).sk) ||
+ !netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (ib_nl_is_good_ip_resp(nlh))
+ ib_nl_process_good_ip_rsep(nlh);
+
+ return skb->len;
+}
+
+static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
+ const void *daddr,
+ u32 seq, u16 family)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ struct rdma_ls_ip_resolve_header *header;
+ void *data;
+ size_t size;
+ int attrtype;
+ int len;
+
+ if (family == AF_INET) {
+ size = sizeof(struct in_addr);
+ attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4;
+ } else {
+ size = sizeof(struct in6_addr);
+ attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6;
+ }
+
+ len = nla_total_size(sizeof(size));
+ len += NLMSG_ALIGN(sizeof(*header));
+
+ skb = nlmsg_new(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS,
+ RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST);
+ if (!data) {
+ nlmsg_free(skb);
+ return -ENODATA;
+ }
+
+ /* Construct the family header first */
+ header = (struct rdma_ls_ip_resolve_header *)
+ skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+ header->ifindex = dev_addr->bound_dev_if;
+ nla_put(skb, attrtype, size, daddr);
+
+ /* Repair the nlmsg header length */
+ nlmsg_end(skb, nlh);
+ ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
+
+ /* Make the request retry, so when we get the response from userspace
+ * we will have something.
+ */
+ return -ENODATA;
+}
+
int rdma_addr_size(struct sockaddr *addr)
{
switch (addr->sa_family) {
@@ -199,6 +322,17 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
+static int ib_nl_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+ const void *daddr, u32 seq, u16 family)
+{
+ if (ibnl_chk_listeners(RDMA_NL_GROUP_LS))
+ return -EADDRNOTAVAIL;
+
+ /* We fill in what we can, the response will fill the rest */
+ rdma_copy_addr(dev_addr, dst->dev, NULL);
+ return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
+}
+
static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
const void *daddr)
{
@@ -223,6 +357,39 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
return ret;
}
+static bool has_gateway(struct dst_entry *dst, sa_family_t family)
+{
+ struct rtable *rt;
+ struct rt6_info *rt6;
+
+ if (family == AF_INET) {
+ rt = container_of(dst, struct rtable, dst);
+ return rt->rt_uses_gateway;
+ }
+
+ rt6 = container_of(dst, struct rt6_info, dst);
+ return rt6->rt6i_flags & RTF_GATEWAY;
+}
+
+static int fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+ const struct sockaddr *dst_in, u32 seq)
+{
+ const struct sockaddr_in *dst_in4 =
+ (const struct sockaddr_in *)dst_in;
+ const struct sockaddr_in6 *dst_in6 =
+ (const struct sockaddr_in6 *)dst_in;
+ const void *daddr = (dst_in->sa_family == AF_INET) ?
+ (const void *)&dst_in4->sin_addr.s_addr :
+ (const void *)&dst_in6->sin6_addr;
+ sa_family_t family = dst_in->sa_family;
+
+ /* Gateway + ARPHRD_INFINIBAND -> IB router */
+ if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND)
+ return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family);
+ else
+ return dst_fetch_ha(dst, dev_addr, daddr);
+}
+
static int addr4_resolve(struct sockaddr_in *src_in,
const struct sockaddr_in *dst_in,
struct rdma_dev_addr *addr,
@@ -246,10 +413,11 @@ static int addr4_resolve(struct sockaddr_in *src_in,
src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = fl4.saddr;
- /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't
- * routable) and we could set the network type accordingly.
+ /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
+ * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
+ * type accordingly.
*/
- if (rt->rt_uses_gateway)
+ if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND)
addr->network = RDMA_NETWORK_IPV4;
addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
@@ -291,10 +459,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
src_in->sin6_addr = fl6.saddr;
}
- /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't
- * routable) and we could set the network type accordingly.
+ /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
+ * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
+ * type accordingly.
*/
- if (rt->rt6i_flags & RTF_GATEWAY)
+ if (rt->rt6i_flags & RTF_GATEWAY &&
+ ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND)
addr->network = RDMA_NETWORK_IPV6;
addr->hoplimit = ip6_dst_hoplimit(dst);
@@ -317,7 +487,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
static int addr_resolve_neigh(struct dst_entry *dst,
const struct sockaddr *dst_in,
- struct rdma_dev_addr *addr)
+ struct rdma_dev_addr *addr,
+ u32 seq)
{
if (dst->dev->flags & IFF_LOOPBACK) {
int ret;
@@ -331,17 +502,8 @@ static int addr_resolve_neigh(struct dst_entry *dst,
}
/* If the device doesn't do ARP internally */
- if (!(dst->dev->flags & IFF_NOARP)) {
- const struct sockaddr_in *dst_in4 =
- (const struct sockaddr_in *)dst_in;
- const struct sockaddr_in6 *dst_in6 =
- (const struct sockaddr_in6 *)dst_in;
-
- return dst_fetch_ha(dst, addr,
- dst_in->sa_family == AF_INET ?
- (const void *)&dst_in4->sin_addr.s_addr :
- (const void *)&dst_in6->sin6_addr);
- }
+ if (!(dst->dev->flags & IFF_NOARP))
+ return fetch_ha(dst, addr, dst_in, seq);
return rdma_copy_addr(addr, dst->dev, NULL);
}
@@ -349,7 +511,8 @@ static int addr_resolve_neigh(struct dst_entry *dst,
static int addr_resolve(struct sockaddr *src_in,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
- bool resolve_neigh)
+ bool resolve_neigh,
+ u32 seq)
{
struct net_device *ndev;
struct dst_entry *dst;
@@ -366,7 +529,7 @@ static int addr_resolve(struct sockaddr *src_in,
return ret;
if (resolve_neigh)
- ret = addr_resolve_neigh(&rt->dst, dst_in, addr);
+ ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
ndev = rt->dst.dev;
dev_hold(ndev);
@@ -383,7 +546,7 @@ static int addr_resolve(struct sockaddr *src_in,
return ret;
if (resolve_neigh)
- ret = addr_resolve_neigh(dst, dst_in, addr);
+ ret = addr_resolve_neigh(dst, dst_in, addr, seq);
ndev = dst->dev;
dev_hold(ndev);
@@ -412,7 +575,7 @@ static void process_req(struct work_struct *work)
src_in = (struct sockaddr *) &req->src_addr;
dst_in = (struct sockaddr *) &req->dst_addr;
req->status = addr_resolve(src_in, dst_in, req->addr,
- true);
+ true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
else if (req->status == -ENODATA)
@@ -471,8 +634,9 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->context = context;
req->client = client;
atomic_inc(&client->refcount);
+ req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
- req->status = addr_resolve(src_in, dst_in, addr, true);
+ req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -510,7 +674,7 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
src_in->sa_family = dst_addr->sa_family;
}
- return addr_resolve(src_in, dst_addr, addr, false);
+ return addr_resolve(src_in, dst_addr, addr, false, 0);
}
EXPORT_SYMBOL(rdma_resolve_ip_route);
@@ -634,7 +798,7 @@ static struct notifier_block nb = {
.notifier_call = netevent_callback
};
-static int __init addr_init(void)
+int addr_init(void)
{
addr_wq = create_singlethread_workqueue("ib_addr");
if (!addr_wq)
@@ -642,15 +806,13 @@ static int __init addr_init(void)
register_netevent_notifier(&nb);
rdma_addr_register_client(&self);
+
return 0;
}
-static void __exit addr_cleanup(void)
+void addr_cleanup(void)
{
rdma_addr_unregister_client(&self);
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
}
-
-module_init(addr_init);
-module_exit(addr_cleanup);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index c2e257d97..1a2984c28 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
{
int ret = 0;
struct net_device *old_net_dev;
+ enum ib_gid_type old_gid_type;
/* in rdma_cap_roce_gid_table, this funciton should be protected by a
* sleep-able lock.
@@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
}
old_net_dev = table->data_vec[ix].attr.ndev;
+ old_gid_type = table->data_vec[ix].attr.gid_type;
if (old_net_dev && old_net_dev != attr->ndev)
dev_put(old_net_dev);
/* if modify_gid failed, just delete the old gid */
@@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port,
attr = &zattr;
table->data_vec[ix].context = NULL;
}
- if (default_gid)
- table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+
memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
+ if (default_gid) {
+ table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
+ if (action == GID_TABLE_WRITE_ACTION_DEL)
+ table->data_vec[ix].attr.gid_type = old_gid_type;
+ }
if (table->data_vec[ix].attr.ndev &&
table->data_vec[ix].attr.ndev != old_net_dev)
dev_hold(table->data_vec[ix].attr.ndev);
@@ -405,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
for (ix = 0; ix < table->sz; ix++)
if (table->data_vec[ix].attr.ndev == ndev)
- if (!del_gid(ib_dev, port, table, ix, false))
+ if (!del_gid(ib_dev, port, table, ix,
+ !!(table->data_vec[ix].props &
+ GID_TABLE_ENTRY_DEFAULT)))
deleted = true;
write_unlock_irq(&table->rwlock);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 93ab0ae97..ad1b1adcf 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
complete(&id_priv->comp);
}
-static int cma_disable_callback(struct rdma_id_private *id_priv,
- enum rdma_cm_state state)
-{
- mutex_lock(&id_priv->handler_mutex);
- if (id_priv->state != state) {
- mutex_unlock(&id_priv->handler_mutex);
- return -EINVAL;
- }
- return 0;
-}
-
struct rdma_cm_id *rdma_create_id(struct net *net,
rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps,
@@ -800,6 +789,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
if (id->device != pd->device)
return -EINVAL;
+ qp_init_attr->port_num = id->port_num;
qp = ib_create_qp(pd, qp_init_attr);
if (IS_ERR(qp))
return PTR_ERR(qp);
@@ -1670,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
struct rdma_cm_event event;
int ret = 0;
+ mutex_lock(&id_priv->handler_mutex);
if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
+ id_priv->state != RDMA_CM_CONNECT) ||
(ib_event->event == IB_CM_TIMEWAIT_EXIT &&
- cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
- return 0;
+ id_priv->state != RDMA_CM_DISCONNECT))
+ goto out;
memset(&event, 0, sizeof event);
switch (ib_event->event) {
@@ -1869,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
{
- struct rdma_id_private *listen_id, *conn_id;
+ struct rdma_id_private *listen_id, *conn_id = NULL;
struct rdma_cm_event event;
struct net_device *net_dev;
int offset, ret;
@@ -1883,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
goto net_dev_put;
}
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
+ mutex_lock(&listen_id->handler_mutex);
+ if (listen_id->state != RDMA_CM_LISTEN) {
ret = -ECONNABORTED;
- goto net_dev_put;
+ goto err1;
}
memset(&event, 0, sizeof event);
@@ -1975,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
- if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_CONNECT)
+ goto out;
memset(&event, 0, sizeof event);
switch (iw_event->event) {
@@ -2028,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
return ret;
}
+out:
mutex_unlock(&id_priv->handler_mutex);
return ret;
}
@@ -2038,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
struct rdma_cm_event event;
- int ret;
+ int ret = -ECONNABORTED;
struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
listen_id = cm_id->context;
- if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
- return -ECONNABORTED;
+
+ mutex_lock(&listen_id->handler_mutex);
+ if (listen_id->state != RDMA_CM_LISTEN)
+ goto out;
/* Create a new RDMA id for the new IW CM ID */
new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
@@ -3215,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
int ret = 0;
- if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_CONNECT)
+ goto out;
memset(&event, 0, sizeof event);
switch (ib_event->event) {
@@ -3672,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
struct rdma_id_private *id_priv;
struct cma_multicast *mc = multicast->context;
struct rdma_cm_event event;
- int ret;
+ int ret = 0;
id_priv = mc->id_priv;
- if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
- cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
- return 0;
+ mutex_lock(&id_priv->handler_mutex);
+ if (id_priv->state != RDMA_CM_ADDR_BOUND &&
+ id_priv->state != RDMA_CM_ADDR_RESOLVED)
+ goto out;
if (!status)
status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
@@ -3719,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
return 0;
}
+out:
mutex_unlock(&id_priv->handler_mutex);
return 0;
}
@@ -3877,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
if (addr->sa_family == AF_INET) {
- if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true);
- if (!err) {
- mc->igmp_joined = true;
- mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+ if (!err)
+ mc->igmp_joined = true;
}
} else {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
@@ -4294,7 +4293,8 @@ static int __init cma_init(void)
if (ret)
goto err;
- if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
+ if (ibnl_add_client(RDMA_NL_RDMA_CM, ARRAY_SIZE(cma_cb_table),
+ cma_cb_table))
pr_warn("RDMA CMA: failed to add netlink callback\n");
cma_configfs_init();
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index eab322157..19d499dca 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -137,4 +137,20 @@ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
return _upper == upper;
}
+int addr_init(void);
+void addr_cleanup(void);
+
+int ib_mad_init(void);
+void ib_mad_cleanup(void);
+
+int ib_sa_init(void);
+void ib_sa_cleanup(void);
+
+int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+ struct netlink_callback *cb);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 109798440..5c155fa91 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device,
if (err || port_attr->subnet_prefix)
return err;
+ if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
+ return 0;
+
err = ib_query_gid(device, port_num, 0, &gid, NULL);
if (err)
return err;
@@ -955,6 +958,29 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);
+static struct ibnl_client_cbs ibnl_ls_cb_table[] = {
+ [RDMA_NL_LS_OP_RESOLVE] = {
+ .dump = ib_nl_handle_resolve_resp,
+ .module = THIS_MODULE },
+ [RDMA_NL_LS_OP_SET_TIMEOUT] = {
+ .dump = ib_nl_handle_set_timeout,
+ .module = THIS_MODULE },
+ [RDMA_NL_LS_OP_IP_RESOLVE] = {
+ .dump = ib_nl_handle_ip_res_resp,
+ .module = THIS_MODULE },
+};
+
+static int ib_add_ibnl_clients(void)
+{
+ return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table),
+ ibnl_ls_cb_table);
+}
+
+static void ib_remove_ibnl_clients(void)
+{
+ ibnl_remove_client(RDMA_NL_LS);
+}
+
static int __init ib_core_init(void)
{
int ret;
@@ -983,10 +1009,42 @@ static int __init ib_core_init(void)
goto err_sysfs;
}
+ ret = addr_init();
+ if (ret) {
+ pr_warn("Could't init IB address resolution\n");
+ goto err_ibnl;
+ }
+
+ ret = ib_mad_init();
+ if (ret) {
+ pr_warn("Couldn't init IB MAD\n");
+ goto err_addr;
+ }
+
+ ret = ib_sa_init();
+ if (ret) {
+ pr_warn("Couldn't init SA\n");
+ goto err_mad;
+ }
+
+ ret = ib_add_ibnl_clients();
+ if (ret) {
+ pr_warn("Couldn't register ibnl clients\n");
+ goto err_sa;
+ }
+
ib_cache_setup();
return 0;
+err_sa:
+ ib_sa_cleanup();
+err_mad:
+ ib_mad_cleanup();
+err_addr:
+ addr_cleanup();
+err_ibnl:
+ ibnl_cleanup();
err_sysfs:
class_unregister(&ib_class);
err_comp:
@@ -999,6 +1057,10 @@ err:
static void __exit ib_core_cleanup(void)
{
ib_cache_cleanup();
+ ib_remove_ibnl_clients();
+ ib_sa_cleanup();
+ ib_mad_cleanup();
+ addr_cleanup();
ibnl_cleanup();
class_unregister(&ib_class);
destroy_workqueue(ib_comp_wq);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index e28a160cd..f0572049d 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -459,7 +459,7 @@ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
if (pm_addr->ss_family == AF_INET) {
struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
- if (pm4_addr->sin_addr.s_addr == INADDR_ANY) {
+ if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
struct sockaddr_in *cm4_addr =
(struct sockaddr_in *)cm_addr;
struct sockaddr_in *cm4_outaddr =
@@ -1175,7 +1175,7 @@ static int __init iw_cm_init(void)
if (ret)
pr_err("iw_cm: couldn't init iwpm\n");
- ret = ibnl_add_client(RDMA_NL_IWCM, RDMA_NL_IWPM_NUM_OPS,
+ ret = ibnl_add_client(RDMA_NL_IWCM, ARRAY_SIZE(iwcm_nl_cb_table),
iwcm_nl_cb_table);
if (ret)
pr_err("iw_cm: couldn't register netlink callbacks\n");
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 43e3fa271..1c41b95ce 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb,
if (!nlmsg_request) {
pr_info("%s: Could not find a matching request (seq = %u)\n",
__func__, msg_seq);
- return -EINVAL;
+ return -EINVAL;
}
pm_msg = nlmsg_request->req_buffer;
local_sockaddr = (struct sockaddr_storage *)
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 9b2bf2fb2..b65e06c56 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -634,6 +634,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {
pr_warn("%s Unable to put NLMSG_DONE\n", __func__);
+ dev_kfree_skb(skb);
return -ENOMEM;
}
nlh->nlmsg_type = NLMSG_DONE;
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9fa5bf33f..2d49228f2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,11 +47,7 @@
#include "smi.h"
#include "opa_smi.h"
#include "agent.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("kernel IB MAD API");
-MODULE_AUTHOR("Hal Rosenstock");
-MODULE_AUTHOR("Sean Hefty");
+#include "core_priv.h"
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
@@ -1642,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
/* Now, check to see if there are any methods still in use */
if (!check_method_table(method)) {
/* If not, release management method table */
- kfree(method);
- class->method_table[mgmt_class] = NULL;
- /* Any management classes left ? */
+ kfree(method);
+ class->method_table[mgmt_class] = NULL;
+ /* Any management classes left ? */
if (!check_class_table(class)) {
/* If not, release management class table */
kfree(class);
@@ -3316,7 +3312,7 @@ static struct ib_client mad_client = {
.remove = ib_mad_remove_device
};
-static int __init ib_mad_init_module(void)
+int ib_mad_init(void)
{
mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
@@ -3334,10 +3330,7 @@ static int __init ib_mad_init_module(void)
return 0;
}
-static void __exit ib_mad_cleanup_module(void)
+void ib_mad_cleanup(void)
{
ib_unregister_client(&mad_client);
}
-
-module_init(ib_mad_init_module);
-module_exit(ib_mad_cleanup_module);
diff --git a/drivers/infiniband/core/mr_pool.c b/drivers/infiniband/core/mr_pool.c
new file mode 100644
index 000000000..49d478b2e
--- /dev/null
+++ b/drivers/infiniband/core/mr_pool.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <rdma/ib_verbs.h>
+#include <rdma/mr_pool.h>
+
+struct ib_mr *ib_mr_pool_get(struct ib_qp *qp, struct list_head *list)
+{
+ struct ib_mr *mr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ mr = list_first_entry_or_null(list, struct ib_mr, qp_entry);
+ if (mr) {
+ list_del(&mr->qp_entry);
+ qp->mrs_used++;
+ }
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+
+ return mr;
+}
+EXPORT_SYMBOL(ib_mr_pool_get);
+
+void ib_mr_pool_put(struct ib_qp *qp, struct list_head *list, struct ib_mr *mr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ list_add(&mr->qp_entry, list);
+ qp->mrs_used--;
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+}
+EXPORT_SYMBOL(ib_mr_pool_put);
+
+int ib_mr_pool_init(struct ib_qp *qp, struct list_head *list, int nr,
+ enum ib_mr_type type, u32 max_num_sg)
+{
+ struct ib_mr *mr;
+ unsigned long flags;
+ int ret, i;
+
+ for (i = 0; i < nr; i++) {
+ mr = ib_alloc_mr(qp->pd, type, max_num_sg);
+ if (IS_ERR(mr)) {
+ ret = PTR_ERR(mr);
+ goto out;
+ }
+
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ list_add_tail(&mr->qp_entry, list);
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+ }
+
+ return 0;
+out:
+ ib_mr_pool_destroy(qp, list);
+ return ret;
+}
+EXPORT_SYMBOL(ib_mr_pool_init);
+
+void ib_mr_pool_destroy(struct ib_qp *qp, struct list_head *list)
+{
+ struct ib_mr *mr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ while (!list_empty(list)) {
+ mr = list_first_entry(list, struct ib_mr, qp_entry);
+ list_del(&mr->qp_entry);
+
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+ ib_dereg_mr(mr);
+ spin_lock_irqsave(&qp->mr_lock, flags);
+ }
+ spin_unlock_irqrestore(&qp->mr_lock, flags);
+}
+EXPORT_SYMBOL(ib_mr_pool_destroy);
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 250937cb9..a83ec28a1 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -93,6 +93,18 @@ enum {
struct mcast_member;
+/*
+* There are 4 types of join states:
+* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
+*/
+enum {
+ FULLMEMBER_JOIN,
+ NONMEMBER_JOIN,
+ SENDONLY_NONMEBER_JOIN,
+ SENDONLY_FULLMEMBER_JOIN,
+ NUM_JOIN_MEMBERSHIP_TYPES,
+};
+
struct mcast_group {
struct ib_sa_mcmember_rec rec;
struct rb_node node;
@@ -102,7 +114,7 @@ struct mcast_group {
struct list_head pending_list;
struct list_head active_list;
struct mcast_member *last_join;
- int members[3];
+ int members[NUM_JOIN_MEMBERSHIP_TYPES];
atomic_t refcount;
enum mcast_group_state state;
struct ib_sa_query *query;
@@ -220,8 +232,9 @@ static void queue_join(struct mcast_member *member)
}
/*
- * A multicast group has three types of members: full member, non member, and
- * send only member. We need to keep track of the number of members of each
+ * A multicast group has four types of members: full member, non member,
+ * sendonly non member and sendonly full member.
+ * We need to keep track of the number of members of each
* type based on their join state. Adjust the number of members the belong to
* the specified join states.
*/
@@ -229,7 +242,7 @@ static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
{
int i;
- for (i = 0; i < 3; i++, join_state >>= 1)
+ for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1)
if (join_state & 0x1)
group->members[i] += inc;
}
@@ -245,7 +258,7 @@ static u8 get_leave_state(struct mcast_group *group)
u8 leave_state = 0;
int i;
- for (i = 0; i < 3; i++)
+ for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++)
if (!group->members[i])
leave_state |= (0x1 << i);
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index d47df9356..9b8c20c82 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -151,12 +151,11 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct ibnl_client *client;
int type = nlh->nlmsg_type;
int index = RDMA_NL_GET_CLIENT(type);
- int op = RDMA_NL_GET_OP(type);
+ unsigned int op = RDMA_NL_GET_OP(type);
list_for_each_entry(client, &client_list, list) {
if (client->index == index) {
- if (op < 0 || op >= client->nops ||
- !client->cb_table[op].dump)
+ if (op >= client->nops || !client->cb_table[op].dump)
return -EINVAL;
/*
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
new file mode 100644
index 000000000..1eb9b1294
--- /dev/null
+++ b/drivers/infiniband/core/rw.c
@@ -0,0 +1,727 @@
+/*
+ * Copyright (c) 2016 HGST, a Western Digital Company.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <rdma/mr_pool.h>
+#include <rdma/rw.h>
+
+enum {
+ RDMA_RW_SINGLE_WR,
+ RDMA_RW_MULTI_WR,
+ RDMA_RW_MR,
+ RDMA_RW_SIG_MR,
+};
+
+static bool rdma_rw_force_mr;
+module_param_named(force_mr, rdma_rw_force_mr, bool, 0);
+MODULE_PARM_DESC(force_mr, "Force usage of MRs for RDMA READ/WRITE operations");
+
+/*
+ * Check if the device might use memory registration. This is currently only
+ * true for iWarp devices. In the future we can hopefully fine tune this based
+ * on HCA driver input.
+ */
+static inline bool rdma_rw_can_use_mr(struct ib_device *dev, u8 port_num)
+{
+ if (rdma_protocol_iwarp(dev, port_num))
+ return true;
+ if (unlikely(rdma_rw_force_mr))
+ return true;
+ return false;
+}
+
+/*
+ * Check if the device will use memory registration for this RW operation.
+ * We currently always use memory registrations for iWarp RDMA READs, and
+ * have a debug option to force usage of MRs.
+ *
+ * XXX: In the future we can hopefully fine tune this based on HCA driver
+ * input.
+ */
+static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
+ enum dma_data_direction dir, int dma_nents)
+{
+ if (rdma_protocol_iwarp(dev, port_num) && dir == DMA_FROM_DEVICE)
+ return true;
+ if (unlikely(rdma_rw_force_mr))
+ return true;
+ return false;
+}
+
+static inline u32 rdma_rw_max_sge(struct ib_device *dev,
+ enum dma_data_direction dir)
+{
+ return dir == DMA_TO_DEVICE ?
+ dev->attrs.max_sge : dev->attrs.max_sge_rd;
+}
+
+static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
+{
+ /* arbitrary limit to avoid allocating gigantic resources */
+ return min_t(u32, dev->attrs.max_fast_reg_page_list_len, 256);
+}
+
+static int rdma_rw_init_one_mr(struct ib_qp *qp, u8 port_num,
+ struct rdma_rw_reg_ctx *reg, struct scatterlist *sg,
+ u32 sg_cnt, u32 offset)
+{
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
+ u32 nents = min(sg_cnt, pages_per_mr);
+ int count = 0, ret;
+
+ reg->mr = ib_mr_pool_get(qp, &qp->rdma_mrs);
+ if (!reg->mr)
+ return -EAGAIN;
+
+ if (reg->mr->need_inval) {
+ reg->inv_wr.opcode = IB_WR_LOCAL_INV;
+ reg->inv_wr.ex.invalidate_rkey = reg->mr->lkey;
+ reg->inv_wr.next = &reg->reg_wr.wr;
+ count++;
+ } else {
+ reg->inv_wr.next = NULL;
+ }
+
+ ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
+ if (ret < nents) {
+ ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
+ return -EINVAL;
+ }
+
+ reg->reg_wr.wr.opcode = IB_WR_REG_MR;
+ reg->reg_wr.mr = reg->mr;
+ reg->reg_wr.access = IB_ACCESS_LOCAL_WRITE;
+ if (rdma_protocol_iwarp(qp->device, port_num))
+ reg->reg_wr.access |= IB_ACCESS_REMOTE_WRITE;
+ count++;
+
+ reg->sge.addr = reg->mr->iova;
+ reg->sge.length = reg->mr->length;
+ return count;
+}
+
+static int rdma_rw_init_mr_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct scatterlist *sg, u32 sg_cnt, u32 offset,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
+ int i, j, ret = 0, count = 0;
+
+ ctx->nr_ops = (sg_cnt + pages_per_mr - 1) / pages_per_mr;
+ ctx->reg = kcalloc(ctx->nr_ops, sizeof(*ctx->reg), GFP_KERNEL);
+ if (!ctx->reg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < ctx->nr_ops; i++) {
+ struct rdma_rw_reg_ctx *prev = i ? &ctx->reg[i - 1] : NULL;
+ struct rdma_rw_reg_ctx *reg = &ctx->reg[i];
+ u32 nents = min(sg_cnt, pages_per_mr);
+
+ ret = rdma_rw_init_one_mr(qp, port_num, reg, sg, sg_cnt,
+ offset);
+ if (ret < 0)
+ goto out_free;
+ count += ret;
+
+ if (prev) {
+ if (reg->mr->need_inval)
+ prev->wr.wr.next = &reg->inv_wr;
+ else
+ prev->wr.wr.next = &reg->reg_wr.wr;
+ }
+
+ reg->reg_wr.wr.next = &reg->wr.wr;
+
+ reg->wr.wr.sg_list = &reg->sge;
+ reg->wr.wr.num_sge = 1;
+ reg->wr.remote_addr = remote_addr;
+ reg->wr.rkey = rkey;
+ if (dir == DMA_TO_DEVICE) {
+ reg->wr.wr.opcode = IB_WR_RDMA_WRITE;
+ } else if (!rdma_cap_read_inv(qp->device, port_num)) {
+ reg->wr.wr.opcode = IB_WR_RDMA_READ;
+ } else {
+ reg->wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
+ reg->wr.wr.ex.invalidate_rkey = reg->mr->lkey;
+ }
+ count++;
+
+ remote_addr += reg->sge.length;
+ sg_cnt -= nents;
+ for (j = 0; j < nents; j++)
+ sg = sg_next(sg);
+ offset = 0;
+ }
+
+ ctx->type = RDMA_RW_MR;
+ return count;
+
+out_free:
+ while (--i >= 0)
+ ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
+ kfree(ctx->reg);
+out:
+ return ret;
+}
+
+static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ struct scatterlist *sg, u32 sg_cnt, u32 offset,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+ struct ib_device *dev = qp->pd->device;
+ u32 max_sge = rdma_rw_max_sge(dev, dir);
+ struct ib_sge *sge;
+ u32 total_len = 0, i, j;
+
+ ctx->nr_ops = DIV_ROUND_UP(sg_cnt, max_sge);
+
+ ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL);
+ if (!ctx->map.sges)
+ goto out;
+
+ ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL);
+ if (!ctx->map.wrs)
+ goto out_free_sges;
+
+ for (i = 0; i < ctx->nr_ops; i++) {
+ struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i];
+ u32 nr_sge = min(sg_cnt, max_sge);
+
+ if (dir == DMA_TO_DEVICE)
+ rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
+ else
+ rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+ rdma_wr->remote_addr = remote_addr + total_len;
+ rdma_wr->rkey = rkey;
+ rdma_wr->wr.sg_list = sge;
+
+ for (j = 0; j < nr_sge; j++, sg = sg_next(sg)) {
+ rdma_wr->wr.num_sge++;
+
+ sge->addr = ib_sg_dma_address(dev, sg) + offset;
+ sge->length = ib_sg_dma_len(dev, sg) - offset;
+ sge->lkey = qp->pd->local_dma_lkey;
+
+ total_len += sge->length;
+ sge++;
+ sg_cnt--;
+ offset = 0;
+ }
+
+ if (i + 1 < ctx->nr_ops)
+ rdma_wr->wr.next = &ctx->map.wrs[i + 1].wr;
+ }
+
+ ctx->type = RDMA_RW_MULTI_WR;
+ return ctx->nr_ops;
+
+out_free_sges:
+ kfree(ctx->map.sges);
+out:
+ return -ENOMEM;
+}
+
+static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ struct scatterlist *sg, u32 offset, u64 remote_addr, u32 rkey,
+ enum dma_data_direction dir)
+{
+ struct ib_device *dev = qp->pd->device;
+ struct ib_rdma_wr *rdma_wr = &ctx->single.wr;
+
+ ctx->nr_ops = 1;
+
+ ctx->single.sge.lkey = qp->pd->local_dma_lkey;
+ ctx->single.sge.addr = ib_sg_dma_address(dev, sg) + offset;
+ ctx->single.sge.length = ib_sg_dma_len(dev, sg) - offset;
+
+ memset(rdma_wr, 0, sizeof(*rdma_wr));
+ if (dir == DMA_TO_DEVICE)
+ rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
+ else
+ rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+ rdma_wr->wr.sg_list = &ctx->single.sge;
+ rdma_wr->wr.num_sge = 1;
+ rdma_wr->remote_addr = remote_addr;
+ rdma_wr->rkey = rkey;
+
+ ctx->type = RDMA_RW_SINGLE_WR;
+ return 1;
+}
+
+/**
+ * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
+ * @ctx: context to initialize
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @sg: scatterlist to READ/WRITE from/to
+ * @sg_cnt: number of entries in @sg
+ * @sg_offset: current byte offset into @sg
+ * @remote_addr:remote address to read/write (relative to @rkey)
+ * @rkey: remote key to operate on
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ *
+ * Returns the number of WQEs that will be needed on the workqueue if
+ * successful, or a negative error code.
+ */
+int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+ struct ib_device *dev = qp->pd->device;
+ int ret;
+
+ ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ if (!ret)
+ return -ENOMEM;
+ sg_cnt = ret;
+
+ /*
+ * Skip to the S/G entry that sg_offset falls into:
+ */
+ for (;;) {
+ u32 len = ib_sg_dma_len(dev, sg);
+
+ if (sg_offset < len)
+ break;
+
+ sg = sg_next(sg);
+ sg_offset -= len;
+ sg_cnt--;
+ }
+
+ ret = -EIO;
+ if (WARN_ON_ONCE(sg_cnt == 0))
+ goto out_unmap_sg;
+
+ if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
+ ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
+ sg_offset, remote_addr, rkey, dir);
+ } else if (sg_cnt > 1) {
+ ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
+ remote_addr, rkey, dir);
+ } else {
+ ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
+ remote_addr, rkey, dir);
+ }
+
+ if (ret < 0)
+ goto out_unmap_sg;
+ return ret;
+
+out_unmap_sg:
+ ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_rw_ctx_init);
+
+/**
+ * rdma_rw_ctx_signature init - initialize a RW context with signature offload
+ * @ctx: context to initialize
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @sg: scatterlist to READ/WRITE from/to
+ * @sg_cnt: number of entries in @sg
+ * @prot_sg: scatterlist to READ/WRITE protection information from/to
+ * @prot_sg_cnt: number of entries in @prot_sg
+ * @sig_attrs: signature offloading algorithms
+ * @remote_addr:remote address to read/write (relative to @rkey)
+ * @rkey: remote key to operate on
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ *
+ * Returns the number of WQEs that will be needed on the workqueue if
+ * successful, or a negative error code.
+ */
+int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ struct scatterlist *prot_sg, u32 prot_sg_cnt,
+ struct ib_sig_attrs *sig_attrs,
+ u64 remote_addr, u32 rkey, enum dma_data_direction dir)
+{
+ struct ib_device *dev = qp->pd->device;
+ u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device);
+ struct ib_rdma_wr *rdma_wr;
+ struct ib_send_wr *prev_wr = NULL;
+ int count = 0, ret;
+
+ if (sg_cnt > pages_per_mr || prot_sg_cnt > pages_per_mr) {
+ pr_err("SG count too large\n");
+ return -EINVAL;
+ }
+
+ ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ if (!ret)
+ return -ENOMEM;
+ sg_cnt = ret;
+
+ ret = ib_dma_map_sg(dev, prot_sg, prot_sg_cnt, dir);
+ if (!ret) {
+ ret = -ENOMEM;
+ goto out_unmap_sg;
+ }
+ prot_sg_cnt = ret;
+
+ ctx->type = RDMA_RW_SIG_MR;
+ ctx->nr_ops = 1;
+ ctx->sig = kcalloc(1, sizeof(*ctx->sig), GFP_KERNEL);
+ if (!ctx->sig) {
+ ret = -ENOMEM;
+ goto out_unmap_prot_sg;
+ }
+
+ ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->data, sg, sg_cnt, 0);
+ if (ret < 0)
+ goto out_free_ctx;
+ count += ret;
+ prev_wr = &ctx->sig->data.reg_wr.wr;
+
+ if (prot_sg_cnt) {
+ ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
+ prot_sg, prot_sg_cnt, 0);
+ if (ret < 0)
+ goto out_destroy_data_mr;
+ count += ret;
+
+ if (ctx->sig->prot.inv_wr.next)
+ prev_wr->next = &ctx->sig->prot.inv_wr;
+ else
+ prev_wr->next = &ctx->sig->prot.reg_wr.wr;
+ prev_wr = &ctx->sig->prot.reg_wr.wr;
+ } else {
+ ctx->sig->prot.mr = NULL;
+ }
+
+ ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs);
+ if (!ctx->sig->sig_mr) {
+ ret = -EAGAIN;
+ goto out_destroy_prot_mr;
+ }
+
+ if (ctx->sig->sig_mr->need_inval) {
+ memset(&ctx->sig->sig_inv_wr, 0, sizeof(ctx->sig->sig_inv_wr));
+
+ ctx->sig->sig_inv_wr.opcode = IB_WR_LOCAL_INV;
+ ctx->sig->sig_inv_wr.ex.invalidate_rkey = ctx->sig->sig_mr->rkey;
+
+ prev_wr->next = &ctx->sig->sig_inv_wr;
+ prev_wr = &ctx->sig->sig_inv_wr;
+ }
+
+ ctx->sig->sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
+ ctx->sig->sig_wr.wr.wr_cqe = NULL;
+ ctx->sig->sig_wr.wr.sg_list = &ctx->sig->data.sge;
+ ctx->sig->sig_wr.wr.num_sge = 1;
+ ctx->sig->sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
+ ctx->sig->sig_wr.sig_attrs = sig_attrs;
+ ctx->sig->sig_wr.sig_mr = ctx->sig->sig_mr;
+ if (prot_sg_cnt)
+ ctx->sig->sig_wr.prot = &ctx->sig->prot.sge;
+ prev_wr->next = &ctx->sig->sig_wr.wr;
+ prev_wr = &ctx->sig->sig_wr.wr;
+ count++;
+
+ ctx->sig->sig_sge.addr = 0;
+ ctx->sig->sig_sge.length = ctx->sig->data.sge.length;
+ if (sig_attrs->wire.sig_type != IB_SIG_TYPE_NONE)
+ ctx->sig->sig_sge.length += ctx->sig->prot.sge.length;
+
+ rdma_wr = &ctx->sig->data.wr;
+ rdma_wr->wr.sg_list = &ctx->sig->sig_sge;
+ rdma_wr->wr.num_sge = 1;
+ rdma_wr->remote_addr = remote_addr;
+ rdma_wr->rkey = rkey;
+ if (dir == DMA_TO_DEVICE)
+ rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
+ else
+ rdma_wr->wr.opcode = IB_WR_RDMA_READ;
+ prev_wr->next = &rdma_wr->wr;
+ prev_wr = &rdma_wr->wr;
+ count++;
+
+ return count;
+
+out_destroy_prot_mr:
+ if (prot_sg_cnt)
+ ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr);
+out_destroy_data_mr:
+ ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr);
+out_free_ctx:
+ kfree(ctx->sig);
+out_unmap_prot_sg:
+ ib_dma_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
+out_unmap_sg:
+ ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
+
+/*
+ * Now that we are going to post the WRs we can update the lkey and need_inval
+ * state on the MRs. If we were doing this at init time, we would get double
+ * or missing invalidations if a context was initialized but not actually
+ * posted.
+ */
+static void rdma_rw_update_lkey(struct rdma_rw_reg_ctx *reg, bool need_inval)
+{
+ reg->mr->need_inval = need_inval;
+ ib_update_fast_reg_key(reg->mr, ib_inc_rkey(reg->mr->lkey));
+ reg->reg_wr.key = reg->mr->lkey;
+ reg->sge.lkey = reg->mr->lkey;
+}
+
+/**
+ * rdma_rw_ctx_wrs - return chain of WRs for a RDMA READ or WRITE operation
+ * @ctx: context to operate on
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @cqe: completion queue entry for the last WR
+ * @chain_wr: WR to append to the posted chain
+ *
+ * Return the WR chain for the set of RDMA READ/WRITE operations described by
+ * @ctx, as well as any memory registration operations needed. If @chain_wr
+ * is non-NULL the WR it points to will be appended to the chain of WRs posted.
+ * If @chain_wr is not set @cqe must be set so that the caller gets a
+ * completion notification.
+ */
+struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+ struct ib_send_wr *first_wr, *last_wr;
+ int i;
+
+ switch (ctx->type) {
+ case RDMA_RW_SIG_MR:
+ rdma_rw_update_lkey(&ctx->sig->data, true);
+ if (ctx->sig->prot.mr)
+ rdma_rw_update_lkey(&ctx->sig->prot, true);
+
+ ctx->sig->sig_mr->need_inval = true;
+ ib_update_fast_reg_key(ctx->sig->sig_mr,
+ ib_inc_rkey(ctx->sig->sig_mr->lkey));
+ ctx->sig->sig_sge.lkey = ctx->sig->sig_mr->lkey;
+
+ if (ctx->sig->data.inv_wr.next)
+ first_wr = &ctx->sig->data.inv_wr;
+ else
+ first_wr = &ctx->sig->data.reg_wr.wr;
+ last_wr = &ctx->sig->data.wr.wr;
+ break;
+ case RDMA_RW_MR:
+ for (i = 0; i < ctx->nr_ops; i++) {
+ rdma_rw_update_lkey(&ctx->reg[i],
+ ctx->reg[i].wr.wr.opcode !=
+ IB_WR_RDMA_READ_WITH_INV);
+ }
+
+ if (ctx->reg[0].inv_wr.next)
+ first_wr = &ctx->reg[0].inv_wr;
+ else
+ first_wr = &ctx->reg[0].reg_wr.wr;
+ last_wr = &ctx->reg[ctx->nr_ops - 1].wr.wr;
+ break;
+ case RDMA_RW_MULTI_WR:
+ first_wr = &ctx->map.wrs[0].wr;
+ last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr;
+ break;
+ case RDMA_RW_SINGLE_WR:
+ first_wr = &ctx->single.wr.wr;
+ last_wr = &ctx->single.wr.wr;
+ break;
+ default:
+ BUG();
+ }
+
+ if (chain_wr) {
+ last_wr->next = chain_wr;
+ } else {
+ last_wr->wr_cqe = cqe;
+ last_wr->send_flags |= IB_SEND_SIGNALED;
+ }
+
+ return first_wr;
+}
+EXPORT_SYMBOL(rdma_rw_ctx_wrs);
+
+/**
+ * rdma_rw_ctx_post - post a RDMA READ or RDMA WRITE operation
+ * @ctx: context to operate on
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @cqe: completion queue entry for the last WR
+ * @chain_wr: WR to append to the posted chain
+ *
+ * Post the set of RDMA READ/WRITE operations described by @ctx, as well as
+ * any memory registration operations needed. If @chain_wr is non-NULL the
+ * WR it points to will be appended to the chain of WRs posted. If @chain_wr
+ * is not set @cqe must be set so that the caller gets a completion
+ * notification.
+ */
+int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+ struct ib_send_wr *first_wr, *bad_wr;
+
+ first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
+ return ib_post_send(qp, first_wr, &bad_wr);
+}
+EXPORT_SYMBOL(rdma_rw_ctx_post);
+
+/**
+ * rdma_rw_ctx_destroy - release all resources allocated by rdma_rw_ctx_init
+ * @ctx: context to release
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @sg: scatterlist that was used for the READ/WRITE
+ * @sg_cnt: number of entries in @sg
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ */
+void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
+ struct scatterlist *sg, u32 sg_cnt, enum dma_data_direction dir)
+{
+ int i;
+
+ switch (ctx->type) {
+ case RDMA_RW_MR:
+ for (i = 0; i < ctx->nr_ops; i++)
+ ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->reg[i].mr);
+ kfree(ctx->reg);
+ break;
+ case RDMA_RW_MULTI_WR:
+ kfree(ctx->map.wrs);
+ kfree(ctx->map.sges);
+ break;
+ case RDMA_RW_SINGLE_WR:
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+}
+EXPORT_SYMBOL(rdma_rw_ctx_destroy);
+
+/**
+ * rdma_rw_ctx_destroy_signature - release all resources allocated by
+ * rdma_rw_ctx_init_signature
+ * @ctx: context to release
+ * @qp: queue pair to operate on
+ * @port_num: port num to which the connection is bound
+ * @sg: scatterlist that was used for the READ/WRITE
+ * @sg_cnt: number of entries in @sg
+ * @prot_sg: scatterlist that was used for the READ/WRITE of the PI
+ * @prot_sg_cnt: number of entries in @prot_sg
+ * @dir: %DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
+ */
+void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
+ u8 port_num, struct scatterlist *sg, u32 sg_cnt,
+ struct scatterlist *prot_sg, u32 prot_sg_cnt,
+ enum dma_data_direction dir)
+{
+ if (WARN_ON_ONCE(ctx->type != RDMA_RW_SIG_MR))
+ return;
+
+ ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->data.mr);
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+
+ if (ctx->sig->prot.mr) {
+ ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr);
+ ib_dma_unmap_sg(qp->pd->device, prot_sg, prot_sg_cnt, dir);
+ }
+
+ ib_mr_pool_put(qp, &qp->sig_mrs, ctx->sig->sig_mr);
+ kfree(ctx->sig);
+}
+EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
+
+void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
+{
+ u32 factor;
+
+ WARN_ON_ONCE(attr->port_num == 0);
+
+ /*
+ * Each context needs at least one RDMA READ or WRITE WR.
+ *
+ * For some hardware we might need more, eventually we should ask the
+ * HCA driver for a multiplier here.
+ */
+ factor = 1;
+
+ /*
+ * If the devices needs MRs to perform RDMA READ or WRITE operations,
+ * we'll need two additional MRs for the registrations and the
+ * invalidation.
+ */
+ if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
+ factor += 6; /* (inv + reg) * (data + prot + sig) */
+ else if (rdma_rw_can_use_mr(dev, attr->port_num))
+ factor += 2; /* inv + reg */
+
+ attr->cap.max_send_wr += factor * attr->cap.max_rdma_ctxs;
+
+ /*
+ * But maybe we were just too high in the sky and the device doesn't
+ * even support all we need, and we'll have to live with what we get..
+ */
+ attr->cap.max_send_wr =
+ min_t(u32, attr->cap.max_send_wr, dev->attrs.max_qp_wr);
+}
+
+int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr)
+{
+ struct ib_device *dev = qp->pd->device;
+ u32 nr_mrs = 0, nr_sig_mrs = 0;
+ int ret = 0;
+
+ if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) {
+ nr_sig_mrs = attr->cap.max_rdma_ctxs;
+ nr_mrs = attr->cap.max_rdma_ctxs * 2;
+ } else if (rdma_rw_can_use_mr(dev, attr->port_num)) {
+ nr_mrs = attr->cap.max_rdma_ctxs;
+ }
+
+ if (nr_mrs) {
+ ret = ib_mr_pool_init(qp, &qp->rdma_mrs, nr_mrs,
+ IB_MR_TYPE_MEM_REG,
+ rdma_rw_fr_page_list_len(dev));
+ if (ret) {
+ pr_err("%s: failed to allocated %d MRs\n",
+ __func__, nr_mrs);
+ return ret;
+ }
+ }
+
+ if (nr_sig_mrs) {
+ ret = ib_mr_pool_init(qp, &qp->sig_mrs, nr_sig_mrs,
+ IB_MR_TYPE_SIGNATURE, 2);
+ if (ret) {
+ pr_err("%s: failed to allocated %d SIG MRs\n",
+ __func__, nr_mrs);
+ goto out_free_rdma_mrs;
+ }
+ }
+
+ return 0;
+
+out_free_rdma_mrs:
+ ib_mr_pool_destroy(qp, &qp->rdma_mrs);
+ return ret;
+}
+
+void rdma_rw_cleanup_mrs(struct ib_qp *qp)
+{
+ ib_mr_pool_destroy(qp, &qp->sig_mrs);
+ ib_mr_pool_destroy(qp, &qp->rdma_mrs);
+}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 8a09c0fb2..e95538650 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -53,10 +53,6 @@
#include "sa.h"
#include "core_priv.h"
-MODULE_AUTHOR("Roland Dreier");
-MODULE_DESCRIPTION("InfiniBand subnet administration query support");
-MODULE_LICENSE("Dual BSD/GPL");
-
#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
@@ -119,6 +115,12 @@ struct ib_sa_guidinfo_query {
struct ib_sa_query sa_query;
};
+struct ib_sa_classport_info_query {
+ void (*callback)(int, struct ib_class_port_info *, void *);
+ void *context;
+ struct ib_sa_query sa_query;
+};
+
struct ib_sa_mcmember_query {
void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
void *context;
@@ -392,6 +394,82 @@ static const struct ib_field service_rec_table[] = {
.size_bits = 2*64 },
};
+#define CLASSPORTINFO_REC_FIELD(field) \
+ .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
+ .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
+ .field_name = "ib_class_port_info:" #field
+
+static const struct ib_field classport_info_rec_table[] = {
+ { CLASSPORTINFO_REC_FIELD(base_version),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 8 },
+ { CLASSPORTINFO_REC_FIELD(class_version),
+ .offset_words = 0,
+ .offset_bits = 8,
+ .size_bits = 8 },
+ { CLASSPORTINFO_REC_FIELD(capability_mask),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(redirect_gid),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
+ .offset_words = 6,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(redirect_lid),
+ .offset_words = 7,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { CLASSPORTINFO_REC_FIELD(redirect_pkey),
+ .offset_words = 7,
+ .offset_bits = 16,
+ .size_bits = 16 },
+
+ { CLASSPORTINFO_REC_FIELD(redirect_qp),
+ .offset_words = 8,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(redirect_qkey),
+ .offset_words = 9,
+ .offset_bits = 0,
+ .size_bits = 32 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_gid),
+ .offset_words = 10,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
+ .offset_words = 14,
+ .offset_bits = 0,
+ .size_bits = 32 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_lid),
+ .offset_words = 15,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { CLASSPORTINFO_REC_FIELD(trap_pkey),
+ .offset_words = 15,
+ .offset_bits = 16,
+ .size_bits = 16 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_hlqp),
+ .offset_words = 16,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(trap_qkey),
+ .offset_words = 17,
+ .offset_bits = 0,
+ .size_bits = 32 },
+};
+
#define GUIDINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
.struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
@@ -536,7 +614,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
if (!data) {
- kfree_skb(skb);
+ nlmsg_free(skb);
return -EMSGSIZE;
}
@@ -705,8 +783,8 @@ static void ib_nl_request_timeout(struct work_struct *work)
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
-static int ib_nl_handle_set_timeout(struct sk_buff *skb,
- struct netlink_callback *cb)
+int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
int timeout, delta, abs_delta;
@@ -782,8 +860,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
return 1;
}
-static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
- struct netlink_callback *cb)
+int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+ struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
unsigned long flags;
@@ -838,15 +916,6 @@ resp_out:
return skb->len;
}
-static struct ibnl_client_cbs ib_sa_cb_table[] = {
- [RDMA_NL_LS_OP_RESOLVE] = {
- .dump = ib_nl_handle_resolve_resp,
- .module = THIS_MODULE },
- [RDMA_NL_LS_OP_SET_TIMEOUT] = {
- .dump = ib_nl_handle_set_timeout,
- .module = THIS_MODULE },
-};
-
static void free_sm_ah(struct kref *kref)
{
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@@ -1645,6 +1714,97 @@ err1:
}
EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
+/* Support get SA ClassPortInfo */
+static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
+ int status,
+ struct ib_sa_mad *mad)
+{
+ struct ib_sa_classport_info_query *query =
+ container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
+
+ if (mad) {
+ struct ib_class_port_info rec;
+
+ ib_unpack(classport_info_rec_table,
+ ARRAY_SIZE(classport_info_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else {
+ query->callback(status, NULL, query->context);
+ }
+}
+
+static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query)
+{
+ kfree(container_of(sa_query, struct ib_sa_classport_info_query,
+ sa_query));
+}
+
+int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num,
+ int timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_class_port_info *resp,
+ void *context),
+ void *context,
+ struct ib_sa_query **sa_query)
+{
+ struct ib_sa_classport_info_query *query;
+ struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_port *port;
+ struct ib_mad_agent *agent;
+ struct ib_sa_mad *mad;
+ int ret;
+
+ if (!sa_dev)
+ return -ENODEV;
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ agent = port->agent;
+
+ query = kzalloc(sizeof(*query), gfp_mask);
+ if (!query)
+ return -ENOMEM;
+
+ query->sa_query.port = port;
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
+ goto err1;
+
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
+
+ mad = query->sa_query.mad_buf->mad;
+ init_mad(mad, agent);
+
+ query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL;
+
+ query->sa_query.release = ib_sa_portclass_info_rec_release;
+ /* support GET only */
+ mad->mad_hdr.method = IB_MGMT_METHOD_GET;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
+ mad->sa_hdr.comp_mask = 0;
+ *sa_query = &query->sa_query;
+
+ ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ *sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
+ free_mad(&query->sa_query);
+
+err1:
+ kfree(query);
+ return ret;
+}
+EXPORT_SYMBOL(ib_sa_classport_info_rec_query);
+
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
@@ -1794,7 +1954,7 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data)
kfree(sa_dev);
}
-static int __init ib_sa_init(void)
+int ib_sa_init(void)
{
int ret;
@@ -1820,17 +1980,10 @@ static int __init ib_sa_init(void)
goto err3;
}
- if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS,
- ib_sa_cb_table)) {
- pr_err("Failed to add netlink callback\n");
- ret = -EINVAL;
- goto err4;
- }
INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
return 0;
-err4:
- destroy_workqueue(ib_nl_wq);
+
err3:
mcast_cleanup();
err2:
@@ -1839,9 +1992,8 @@ err1:
return ret;
}
-static void __exit ib_sa_cleanup(void)
+void ib_sa_cleanup(void)
{
- ibnl_remove_client(RDMA_NL_LS);
cancel_delayed_work(&ib_nl_timed_work);
flush_workqueue(ib_nl_wq);
destroy_workqueue(ib_nl_wq);
@@ -1849,6 +2001,3 @@ static void __exit ib_sa_cleanup(void)
ib_unregister_client(&sa_client);
idr_destroy(&query_idr);
}
-
-module_init(ib_sa_init);
-module_exit(ib_sa_cleanup);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 14606afbf..60df4f8e8 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -56,8 +56,10 @@ struct ib_port {
struct gid_attr_group *gid_attr_group;
struct attribute_group gid_group;
struct attribute_group pkey_group;
- u8 port_num;
struct attribute_group *pma_table;
+ struct attribute_group *hw_stats_ag;
+ struct rdma_hw_stats *hw_stats;
+ u8 port_num;
};
struct port_attribute {
@@ -80,6 +82,18 @@ struct port_table_attribute {
__be16 attr_id;
};
+struct hw_stats_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count);
+ int index;
+ u8 port_num;
+};
+
static ssize_t port_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -516,6 +530,7 @@ static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192);
static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224);
static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256);
static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288);
+static PORT_PMA_ATTR(port_xmit_wait , 0, 32, 320);
/*
* Counters added by extended set
@@ -546,6 +561,7 @@ static struct attribute *pma_attrs[] = {
&port_pma_attr_port_rcv_data.attr.attr,
&port_pma_attr_port_xmit_packets.attr.attr,
&port_pma_attr_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
@@ -565,6 +581,7 @@ static struct attribute *pma_attrs_ext[] = {
&port_pma_attr_ext_port_xmit_data.attr.attr,
&port_pma_attr_ext_port_rcv_data.attr.attr,
&port_pma_attr_ext_port_xmit_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
&port_pma_attr_ext_port_rcv_packets.attr.attr,
&port_pma_attr_ext_unicast_rcv_packets.attr.attr,
&port_pma_attr_ext_unicast_xmit_packets.attr.attr,
@@ -590,6 +607,7 @@ static struct attribute *pma_attrs_noietf[] = {
&port_pma_attr_ext_port_rcv_data.attr.attr,
&port_pma_attr_ext_port_xmit_packets.attr.attr,
&port_pma_attr_ext_port_rcv_packets.attr.attr,
+ &port_pma_attr_port_xmit_wait.attr.attr,
NULL
};
@@ -733,6 +751,220 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
return &pma_group;
}
+static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
+ u8 port_num, int index)
+{
+ int ret;
+
+ if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan))
+ return 0;
+ ret = dev->get_hw_stats(dev, stats, port_num, index);
+ if (ret < 0)
+ return ret;
+ if (ret == stats->num_counters)
+ stats->timestamp = jiffies;
+
+ return 0;
+}
+
+static ssize_t print_hw_stat(struct rdma_hw_stats *stats, int index, char *buf)
+{
+ return sprintf(buf, "%llu\n", stats->value[index]);
+}
+
+static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct ib_device *dev;
+ struct ib_port *port;
+ struct hw_stats_attribute *hsa;
+ struct rdma_hw_stats *stats;
+ int ret;
+
+ hsa = container_of(attr, struct hw_stats_attribute, attr);
+ if (!hsa->port_num) {
+ dev = container_of((struct device *)kobj,
+ struct ib_device, dev);
+ stats = dev->hw_stats;
+ } else {
+ port = container_of(kobj, struct ib_port, kobj);
+ dev = port->ibdev;
+ stats = port->hw_stats;
+ }
+ ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index);
+ if (ret)
+ return ret;
+ return print_hw_stat(stats, hsa->index, buf);
+}
+
+static ssize_t show_stats_lifespan(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct hw_stats_attribute *hsa;
+ int msecs;
+
+ hsa = container_of(attr, struct hw_stats_attribute, attr);
+ if (!hsa->port_num) {
+ struct ib_device *dev = container_of((struct device *)kobj,
+ struct ib_device, dev);
+ msecs = jiffies_to_msecs(dev->hw_stats->lifespan);
+ } else {
+ struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+ msecs = jiffies_to_msecs(p->hw_stats->lifespan);
+ }
+ return sprintf(buf, "%d\n", msecs);
+}
+
+static ssize_t set_stats_lifespan(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hw_stats_attribute *hsa;
+ int msecs;
+ int jiffies;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &msecs);
+ if (ret)
+ return ret;
+ if (msecs < 0 || msecs > 10000)
+ return -EINVAL;
+ jiffies = msecs_to_jiffies(msecs);
+ hsa = container_of(attr, struct hw_stats_attribute, attr);
+ if (!hsa->port_num) {
+ struct ib_device *dev = container_of((struct device *)kobj,
+ struct ib_device, dev);
+ dev->hw_stats->lifespan = jiffies;
+ } else {
+ struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+ p->hw_stats->lifespan = jiffies;
+ }
+ return count;
+}
+
+static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group)
+{
+ struct attribute **attr;
+
+ sysfs_remove_group(kobj, attr_group);
+
+ for (attr = attr_group->attrs; *attr; attr++)
+ kfree(*attr);
+ kfree(attr_group);
+}
+
+static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
+{
+ struct hw_stats_attribute *hsa;
+
+ hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
+ if (!hsa)
+ return NULL;
+
+ hsa->attr.name = (char *)name;
+ hsa->attr.mode = S_IRUGO;
+ hsa->show = show_hw_stats;
+ hsa->store = NULL;
+ hsa->index = index;
+ hsa->port_num = port_num;
+
+ return &hsa->attr;
+}
+
+static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
+{
+ struct hw_stats_attribute *hsa;
+
+ hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
+ if (!hsa)
+ return NULL;
+
+ hsa->attr.name = name;
+ hsa->attr.mode = S_IWUSR | S_IRUGO;
+ hsa->show = show_stats_lifespan;
+ hsa->store = set_stats_lifespan;
+ hsa->index = 0;
+ hsa->port_num = port_num;
+
+ return &hsa->attr;
+}
+
+static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
+ u8 port_num)
+{
+ struct attribute_group *hsag;
+ struct rdma_hw_stats *stats;
+ int i, ret;
+
+ stats = device->alloc_hw_stats(device, port_num);
+
+ if (!stats)
+ return;
+
+ if (!stats->names || stats->num_counters <= 0)
+ goto err_free_stats;
+
+ /*
+ * Two extra attribue elements here, one for the lifespan entry and
+ * one to NULL terminate the list for the sysfs core code
+ */
+ hsag = kzalloc(sizeof(*hsag) +
+ sizeof(void *) * (stats->num_counters + 2),
+ GFP_KERNEL);
+ if (!hsag)
+ goto err_free_stats;
+
+ ret = device->get_hw_stats(device, stats, port_num,
+ stats->num_counters);
+ if (ret != stats->num_counters)
+ goto err_free_hsag;
+
+ stats->timestamp = jiffies;
+
+ hsag->name = "hw_counters";
+ hsag->attrs = (void *)hsag + sizeof(*hsag);
+
+ for (i = 0; i < stats->num_counters; i++) {
+ hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
+ if (!hsag->attrs[i])
+ goto err;
+ sysfs_attr_init(hsag->attrs[i]);
+ }
+
+ /* treat an error here as non-fatal */
+ hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+ if (hsag->attrs[i])
+ sysfs_attr_init(hsag->attrs[i]);
+
+ if (port) {
+ struct kobject *kobj = &port->kobj;
+ ret = sysfs_create_group(kobj, hsag);
+ if (ret)
+ goto err;
+ port->hw_stats_ag = hsag;
+ port->hw_stats = stats;
+ } else {
+ struct kobject *kobj = &device->dev.kobj;
+ ret = sysfs_create_group(kobj, hsag);
+ if (ret)
+ goto err;
+ device->hw_stats_ag = hsag;
+ device->hw_stats = stats;
+ }
+
+ return;
+
+err:
+ for (; i >= 0; i--)
+ kfree(hsag->attrs[i]);
+err_free_hsag:
+ kfree(hsag);
+err_free_stats:
+ kfree(stats);
+ return;
+}
+
static int add_port(struct ib_device *device, int port_num,
int (*port_callback)(struct ib_device *,
u8, struct kobject *))
@@ -835,6 +1067,14 @@ static int add_port(struct ib_device *device, int port_num,
goto err_remove_pkey;
}
+ /*
+ * If port == 0, it means we have only one port and the parent
+ * device, not this port device, should be the holder of the
+ * hw_counters
+ */
+ if (device->alloc_hw_stats && port_num)
+ setup_hw_stats(device, p, port_num);
+
list_add_tail(&p->kobj.entry, &device->port_list);
kobject_uevent(&p->kobj, KOBJ_ADD);
@@ -972,120 +1212,6 @@ static struct device_attribute *ib_class_attributes[] = {
&dev_attr_node_desc
};
-/* Show a given an attribute in the statistics group */
-static ssize_t show_protocol_stat(const struct device *device,
- struct device_attribute *attr, char *buf,
- unsigned offset)
-{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
- union rdma_protocol_stats stats;
- ssize_t ret;
-
- ret = dev->get_protocol_stats(dev, &stats);
- if (ret)
- return ret;
-
- return sprintf(buf, "%llu\n",
- (unsigned long long) ((u64 *) &stats)[offset]);
-}
-
-/* generate a read-only iwarp statistics attribute */
-#define IW_STATS_ENTRY(name) \
-static ssize_t show_##name(struct device *device, \
- struct device_attribute *attr, char *buf) \
-{ \
- return show_protocol_stat(device, attr, buf, \
- offsetof(struct iw_protocol_stats, name) / \
- sizeof (u64)); \
-} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-
-IW_STATS_ENTRY(ipInReceives);
-IW_STATS_ENTRY(ipInHdrErrors);
-IW_STATS_ENTRY(ipInTooBigErrors);
-IW_STATS_ENTRY(ipInNoRoutes);
-IW_STATS_ENTRY(ipInAddrErrors);
-IW_STATS_ENTRY(ipInUnknownProtos);
-IW_STATS_ENTRY(ipInTruncatedPkts);
-IW_STATS_ENTRY(ipInDiscards);
-IW_STATS_ENTRY(ipInDelivers);
-IW_STATS_ENTRY(ipOutForwDatagrams);
-IW_STATS_ENTRY(ipOutRequests);
-IW_STATS_ENTRY(ipOutDiscards);
-IW_STATS_ENTRY(ipOutNoRoutes);
-IW_STATS_ENTRY(ipReasmTimeout);
-IW_STATS_ENTRY(ipReasmReqds);
-IW_STATS_ENTRY(ipReasmOKs);
-IW_STATS_ENTRY(ipReasmFails);
-IW_STATS_ENTRY(ipFragOKs);
-IW_STATS_ENTRY(ipFragFails);
-IW_STATS_ENTRY(ipFragCreates);
-IW_STATS_ENTRY(ipInMcastPkts);
-IW_STATS_ENTRY(ipOutMcastPkts);
-IW_STATS_ENTRY(ipInBcastPkts);
-IW_STATS_ENTRY(ipOutBcastPkts);
-IW_STATS_ENTRY(tcpRtoAlgorithm);
-IW_STATS_ENTRY(tcpRtoMin);
-IW_STATS_ENTRY(tcpRtoMax);
-IW_STATS_ENTRY(tcpMaxConn);
-IW_STATS_ENTRY(tcpActiveOpens);
-IW_STATS_ENTRY(tcpPassiveOpens);
-IW_STATS_ENTRY(tcpAttemptFails);
-IW_STATS_ENTRY(tcpEstabResets);
-IW_STATS_ENTRY(tcpCurrEstab);
-IW_STATS_ENTRY(tcpInSegs);
-IW_STATS_ENTRY(tcpOutSegs);
-IW_STATS_ENTRY(tcpRetransSegs);
-IW_STATS_ENTRY(tcpInErrs);
-IW_STATS_ENTRY(tcpOutRsts);
-
-static struct attribute *iw_proto_stats_attrs[] = {
- &dev_attr_ipInReceives.attr,
- &dev_attr_ipInHdrErrors.attr,
- &dev_attr_ipInTooBigErrors.attr,
- &dev_attr_ipInNoRoutes.attr,
- &dev_attr_ipInAddrErrors.attr,
- &dev_attr_ipInUnknownProtos.attr,
- &dev_attr_ipInTruncatedPkts.attr,
- &dev_attr_ipInDiscards.attr,
- &dev_attr_ipInDelivers.attr,
- &dev_attr_ipOutForwDatagrams.attr,
- &dev_attr_ipOutRequests.attr,
- &dev_attr_ipOutDiscards.attr,
- &dev_attr_ipOutNoRoutes.attr,
- &dev_attr_ipReasmTimeout.attr,
- &dev_attr_ipReasmReqds.attr,
- &dev_attr_ipReasmOKs.attr,
- &dev_attr_ipReasmFails.attr,
- &dev_attr_ipFragOKs.attr,
- &dev_attr_ipFragFails.attr,
- &dev_attr_ipFragCreates.attr,
- &dev_attr_ipInMcastPkts.attr,
- &dev_attr_ipOutMcastPkts.attr,
- &dev_attr_ipInBcastPkts.attr,
- &dev_attr_ipOutBcastPkts.attr,
- &dev_attr_tcpRtoAlgorithm.attr,
- &dev_attr_tcpRtoMin.attr,
- &dev_attr_tcpRtoMax.attr,
- &dev_attr_tcpMaxConn.attr,
- &dev_attr_tcpActiveOpens.attr,
- &dev_attr_tcpPassiveOpens.attr,
- &dev_attr_tcpAttemptFails.attr,
- &dev_attr_tcpEstabResets.attr,
- &dev_attr_tcpCurrEstab.attr,
- &dev_attr_tcpInSegs.attr,
- &dev_attr_tcpOutSegs.attr,
- &dev_attr_tcpRetransSegs.attr,
- &dev_attr_tcpInErrs.attr,
- &dev_attr_tcpOutRsts.attr,
- NULL
-};
-
-static struct attribute_group iw_stats_group = {
- .name = "proto_stats",
- .attrs = iw_proto_stats_attrs,
-};
-
static void free_port_list_attributes(struct ib_device *device)
{
struct kobject *p, *t;
@@ -1093,6 +1219,10 @@ static void free_port_list_attributes(struct ib_device *device)
list_for_each_entry_safe(p, t, &device->port_list, entry) {
struct ib_port *port = container_of(p, struct ib_port, kobj);
list_del(&p->entry);
+ if (port->hw_stats) {
+ kfree(port->hw_stats);
+ free_hsag(&port->kobj, port->hw_stats_ag);
+ }
sysfs_remove_group(p, port->pma_table);
sysfs_remove_group(p, &port->pkey_group);
sysfs_remove_group(p, &port->gid_group);
@@ -1149,11 +1279,8 @@ int ib_device_register_sysfs(struct ib_device *device,
}
}
- if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) {
- ret = sysfs_create_group(&class_dev->kobj, &iw_stats_group);
- if (ret)
- goto err_put;
- }
+ if (device->alloc_hw_stats)
+ setup_hw_stats(device, NULL, 0);
return 0;
@@ -1169,15 +1296,18 @@ err:
void ib_device_unregister_sysfs(struct ib_device *device)
{
- /* Hold kobject until ib_dealloc_device() */
- struct kobject *kobj_dev = kobject_get(&device->dev.kobj);
int i;
- if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats)
- sysfs_remove_group(kobj_dev, &iw_stats_group);
+ /* Hold kobject until ib_dealloc_device() */
+ kobject_get(&device->dev.kobj);
free_port_list_attributes(device);
+ if (device->hw_stats) {
+ kfree(device->hw_stats);
+ free_hsag(&device->dev.kobj, device->hw_stats_ag);
+ }
+
for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
device_remove_file(&device->dev, ib_class_attributes[i]);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6fdc7ecda..825021d10 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file,
struct ib_srq *srq = NULL;
struct ib_qp *qp;
char *buf;
- struct ib_qp_init_attr attr;
+ struct ib_qp_init_attr attr = {};
struct ib_uverbs_ex_create_qp_resp resp;
int ret;
@@ -1833,7 +1833,8 @@ static int create_qp(struct ib_uverbs_file *file,
if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_CROSS_CHANNEL |
IB_QP_CREATE_MANAGED_SEND |
- IB_QP_CREATE_MANAGED_RECV)) {
+ IB_QP_CREATE_MANAGED_RECV |
+ IB_QP_CREATE_SCATTER_FCS)) {
ret = -EINVAL;
goto err_put;
}
@@ -3088,8 +3089,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (cmd.comp_mask)
return -EINVAL;
- if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
- !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
+ if (!capable(CAP_NET_RAW))
return -EPERM;
if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
@@ -3655,6 +3655,11 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.hca_core_clock = attr.hca_core_clock;
resp.response_length += sizeof(resp.hca_core_clock);
+ if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
+ goto end;
+
+ resp.device_cap_flags_ex = attr.device_cap_flags;
+ resp.response_length += sizeof(resp.device_cap_flags_ex);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index b65b3541e..6298f54b4 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -48,6 +48,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
+#include <rdma/rw.h>
#include "core_priv.h"
@@ -510,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
ah_attr->grh.dgid = sgid;
if (!rdma_cap_eth_ah(device, port_num)) {
- ret = ib_find_cached_gid_by_port(device, &dgid,
- IB_GID_TYPE_IB,
- port_num, NULL,
- &gid_index);
- if (ret)
- return ret;
+ if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+ ret = ib_find_cached_gid_by_port(device, &dgid,
+ IB_GID_TYPE_IB,
+ port_num, NULL,
+ &gid_index);
+ if (ret)
+ return ret;
+ } else {
+ gid_index = 0;
+ }
}
ah_attr->grh.sgid_index = (u8) gid_index;
@@ -723,59 +728,89 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
}
EXPORT_SYMBOL(ib_open_qp);
+static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct ib_qp *real_qp = qp;
+
+ qp->event_handler = __ib_shared_qp_event_handler;
+ qp->qp_context = qp;
+ qp->pd = NULL;
+ qp->send_cq = qp->recv_cq = NULL;
+ qp->srq = NULL;
+ qp->xrcd = qp_init_attr->xrcd;
+ atomic_inc(&qp_init_attr->xrcd->usecnt);
+ INIT_LIST_HEAD(&qp->open_list);
+
+ qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
+ qp_init_attr->qp_context);
+ if (!IS_ERR(qp))
+ __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
+ else
+ real_qp->device->destroy_qp(real_qp);
+ return qp;
+}
+
struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
- struct ib_qp *qp, *real_qp;
- struct ib_device *device;
+ struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
+ struct ib_qp *qp;
+ int ret;
+
+ /*
+ * If the callers is using the RDMA API calculate the resources
+ * needed for the RDMA READ/WRITE operations.
+ *
+ * Note that these callers need to pass in a port number.
+ */
+ if (qp_init_attr->cap.max_rdma_ctxs)
+ rdma_rw_init_qp(device, qp_init_attr);
- device = pd ? pd->device : qp_init_attr->xrcd->device;
qp = device->create_qp(pd, qp_init_attr, NULL);
+ if (IS_ERR(qp))
+ return qp;
+
+ qp->device = device;
+ qp->real_qp = qp;
+ qp->uobject = NULL;
+ qp->qp_type = qp_init_attr->qp_type;
+
+ atomic_set(&qp->usecnt, 0);
+ qp->mrs_used = 0;
+ spin_lock_init(&qp->mr_lock);
+ INIT_LIST_HEAD(&qp->rdma_mrs);
+ INIT_LIST_HEAD(&qp->sig_mrs);
+
+ if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
+ return ib_create_xrc_qp(qp, qp_init_attr);
+
+ qp->event_handler = qp_init_attr->event_handler;
+ qp->qp_context = qp_init_attr->qp_context;
+ if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
+ qp->recv_cq = NULL;
+ qp->srq = NULL;
+ } else {
+ qp->recv_cq = qp_init_attr->recv_cq;
+ atomic_inc(&qp_init_attr->recv_cq->usecnt);
+ qp->srq = qp_init_attr->srq;
+ if (qp->srq)
+ atomic_inc(&qp_init_attr->srq->usecnt);
+ }
- if (!IS_ERR(qp)) {
- qp->device = device;
- qp->real_qp = qp;
- qp->uobject = NULL;
- qp->qp_type = qp_init_attr->qp_type;
-
- atomic_set(&qp->usecnt, 0);
- if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
- qp->event_handler = __ib_shared_qp_event_handler;
- qp->qp_context = qp;
- qp->pd = NULL;
- qp->send_cq = qp->recv_cq = NULL;
- qp->srq = NULL;
- qp->xrcd = qp_init_attr->xrcd;
- atomic_inc(&qp_init_attr->xrcd->usecnt);
- INIT_LIST_HEAD(&qp->open_list);
-
- real_qp = qp;
- qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
- qp_init_attr->qp_context);
- if (!IS_ERR(qp))
- __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
- else
- real_qp->device->destroy_qp(real_qp);
- } else {
- qp->event_handler = qp_init_attr->event_handler;
- qp->qp_context = qp_init_attr->qp_context;
- if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
- qp->recv_cq = NULL;
- qp->srq = NULL;
- } else {
- qp->recv_cq = qp_init_attr->recv_cq;
- atomic_inc(&qp_init_attr->recv_cq->usecnt);
- qp->srq = qp_init_attr->srq;
- if (qp->srq)
- atomic_inc(&qp_init_attr->srq->usecnt);
- }
+ qp->pd = pd;
+ qp->send_cq = qp_init_attr->send_cq;
+ qp->xrcd = NULL;
- qp->pd = pd;
- qp->send_cq = qp_init_attr->send_cq;
- qp->xrcd = NULL;
+ atomic_inc(&pd->usecnt);
+ atomic_inc(&qp_init_attr->send_cq->usecnt);
- atomic_inc(&pd->usecnt);
- atomic_inc(&qp_init_attr->send_cq->usecnt);
+ if (qp_init_attr->cap.max_rdma_ctxs) {
+ ret = rdma_rw_init_mrs(qp, qp_init_attr);
+ if (ret) {
+ pr_err("failed to init MR pool ret= %d\n", ret);
+ ib_destroy_qp(qp);
+ qp = ERR_PTR(ret);
}
}
@@ -1250,6 +1285,8 @@ int ib_destroy_qp(struct ib_qp *qp)
struct ib_srq *srq;
int ret;
+ WARN_ON_ONCE(qp->mrs_used > 0);
+
if (atomic_read(&qp->usecnt))
return -EBUSY;
@@ -1261,6 +1298,9 @@ int ib_destroy_qp(struct ib_qp *qp)
rcq = qp->recv_cq;
srq = qp->srq;
+ if (!qp->uobject)
+ rdma_rw_cleanup_mrs(qp);
+
ret = qp->device->destroy_qp(qp);
if (!ret) {
if (pd)
@@ -1343,6 +1383,7 @@ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
mr->pd = pd;
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
+ mr->need_inval = false;
}
return mr;
@@ -1389,6 +1430,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
mr->pd = pd;
mr->uobject = NULL;
atomic_inc(&pd->usecnt);
+ mr->need_inval = false;
}
return mr;
@@ -1597,6 +1639,7 @@ EXPORT_SYMBOL(ib_set_vf_guid);
* @mr: memory region
* @sg: dma mapped scatterlist
* @sg_nents: number of entries in sg
+ * @sg_offset: offset in bytes into sg
* @page_size: page vector desired page size
*
* Constraints:
@@ -1615,17 +1658,15 @@ EXPORT_SYMBOL(ib_set_vf_guid);
* After this completes successfully, the memory region
* is ready for registration.
*/
-int ib_map_mr_sg(struct ib_mr *mr,
- struct scatterlist *sg,
- int sg_nents,
- unsigned int page_size)
+int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset, unsigned int page_size)
{
if (unlikely(!mr->device->map_mr_sg))
return -ENOSYS;
mr->page_size = page_size;
- return mr->device->map_mr_sg(mr, sg, sg_nents);
+ return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
}
EXPORT_SYMBOL(ib_map_mr_sg);
@@ -1635,6 +1676,10 @@ EXPORT_SYMBOL(ib_map_mr_sg);
* @mr: memory region
* @sgl: dma mapped scatterlist
* @sg_nents: number of entries in sg
+ * @sg_offset_p: IN: start offset in bytes into sg
+ * OUT: offset in bytes for element n of the sg of the first
+ * byte that has not been processed where n is the return
+ * value of this function.
* @set_page: driver page assignment function pointer
*
* Core service helper for drivers to convert the largest
@@ -1645,23 +1690,26 @@ EXPORT_SYMBOL(ib_map_mr_sg);
* Returns the number of sg elements that were assigned to
* a page vector.
*/
-int ib_sg_to_pages(struct ib_mr *mr,
- struct scatterlist *sgl,
- int sg_nents,
- int (*set_page)(struct ib_mr *, u64))
+int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
+ unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
{
struct scatterlist *sg;
u64 last_end_dma_addr = 0;
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
unsigned int last_page_off = 0;
u64 page_mask = ~((u64)mr->page_size - 1);
int i, ret;
- mr->iova = sg_dma_address(&sgl[0]);
+ if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
+ return -EINVAL;
+
+ mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
mr->length = 0;
for_each_sg(sgl, sg, sg_nents, i) {
- u64 dma_addr = sg_dma_address(sg);
- unsigned int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg) + sg_offset;
+ u64 prev_addr = dma_addr;
+ unsigned int dma_len = sg_dma_len(sg) - sg_offset;
u64 end_dma_addr = dma_addr + dma_len;
u64 page_addr = dma_addr & page_mask;
@@ -1685,8 +1733,14 @@ int ib_sg_to_pages(struct ib_mr *mr,
do {
ret = set_page(mr, page_addr);
- if (unlikely(ret < 0))
- return i ? : ret;
+ if (unlikely(ret < 0)) {
+ sg_offset = prev_addr - sg_dma_address(sg);
+ mr->length += prev_addr - dma_addr;
+ if (sg_offset_p)
+ *sg_offset_p = sg_offset;
+ return i || sg_offset ? i : ret;
+ }
+ prev_addr = page_addr;
next_page:
page_addr += mr->page_size;
} while (page_addr < end_dma_addr);
@@ -1694,8 +1748,12 @@ next_page:
mr->length += dma_len;
last_end_dma_addr = end_dma_addr;
last_page_off = end_dma_addr & ~page_mask;
+
+ sg_offset = 0;
}
+ if (sg_offset_p)
+ *sg_offset_p = 0;
return i;
}
EXPORT_SYMBOL(ib_sg_to_pages);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index c7ad0a4c8..c0c7cf8af 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_NES) += nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
+obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index de1c61b41..ada2e5009 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -327,7 +327,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
kfree(cq->sw_queue);
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (cq->size_log2))
- * sizeof(struct t3_cqe), cq->queue,
+ * sizeof(struct t3_cqe) + 1, cq->queue,
dma_unmap_addr(cq, mapping));
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
return err;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index d403231a4..3e8431b5c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -367,7 +367,7 @@ static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb)
*/
static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
{
- printk(KERN_ERR MOD "ARP failure duing connect\n");
+ printk(KERN_ERR MOD "ARP failure during connect\n");
kfree_skb(skb);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 3234a8be1..bb1a839d4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -783,15 +783,14 @@ static int iwch_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-static int iwch_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
+static int iwch_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
{
struct iwch_mr *mhp = to_iwch_mr(ibmr);
mhp->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, iwch_set_page);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, iwch_set_page);
}
static int iwch_destroy_qp(struct ib_qp *ib_qp)
@@ -1219,59 +1218,119 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
iwch_dev->rdev.rnic_info.pdev->device);
}
-static int iwch_get_mib(struct ib_device *ibdev,
- union rdma_protocol_stats *stats)
+enum counters {
+ IPINRECEIVES,
+ IPINHDRERRORS,
+ IPINADDRERRORS,
+ IPINUNKNOWNPROTOS,
+ IPINDISCARDS,
+ IPINDELIVERS,
+ IPOUTREQUESTS,
+ IPOUTDISCARDS,
+ IPOUTNOROUTES,
+ IPREASMTIMEOUT,
+ IPREASMREQDS,
+ IPREASMOKS,
+ IPREASMFAILS,
+ TCPACTIVEOPENS,
+ TCPPASSIVEOPENS,
+ TCPATTEMPTFAILS,
+ TCPESTABRESETS,
+ TCPCURRESTAB,
+ TCPINSEGS,
+ TCPOUTSEGS,
+ TCPRETRANSSEGS,
+ TCPINERRS,
+ TCPOUTRSTS,
+ TCPRTOMIN,
+ TCPRTOMAX,
+ NR_COUNTERS
+};
+
+static const char * const names[] = {
+ [IPINRECEIVES] = "ipInReceives",
+ [IPINHDRERRORS] = "ipInHdrErrors",
+ [IPINADDRERRORS] = "ipInAddrErrors",
+ [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
+ [IPINDISCARDS] = "ipInDiscards",
+ [IPINDELIVERS] = "ipInDelivers",
+ [IPOUTREQUESTS] = "ipOutRequests",
+ [IPOUTDISCARDS] = "ipOutDiscards",
+ [IPOUTNOROUTES] = "ipOutNoRoutes",
+ [IPREASMTIMEOUT] = "ipReasmTimeout",
+ [IPREASMREQDS] = "ipReasmReqds",
+ [IPREASMOKS] = "ipReasmOKs",
+ [IPREASMFAILS] = "ipReasmFails",
+ [TCPACTIVEOPENS] = "tcpActiveOpens",
+ [TCPPASSIVEOPENS] = "tcpPassiveOpens",
+ [TCPATTEMPTFAILS] = "tcpAttemptFails",
+ [TCPESTABRESETS] = "tcpEstabResets",
+ [TCPCURRESTAB] = "tcpCurrEstab",
+ [TCPINSEGS] = "tcpInSegs",
+ [TCPOUTSEGS] = "tcpOutSegs",
+ [TCPRETRANSSEGS] = "tcpRetransSegs",
+ [TCPINERRS] = "tcpInErrs",
+ [TCPOUTRSTS] = "tcpOutRsts",
+ [TCPRTOMIN] = "tcpRtoMin",
+ [TCPRTOMAX] = "tcpRtoMax",
+};
+
+static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
+
+ /* Our driver only supports device level stats */
+ if (port_num != 0)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u8 port, int index)
{
struct iwch_dev *dev;
struct tp_mib_stats m;
int ret;
+ if (port != 0 || !stats)
+ return -ENOSYS;
+
PDBG("%s ibdev %p\n", __func__, ibdev);
dev = to_iwch_dev(ibdev);
ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
if (ret)
return -ENOSYS;
- memset(stats, 0, sizeof *stats);
- stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
- m.ipInReceive_lo;
- stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
- m.ipInHdrErrors_lo;
- stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
- m.ipInAddrErrors_lo;
- stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
- m.ipInUnknownProtos_lo;
- stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
- m.ipInDiscards_lo;
- stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
- m.ipInDelivers_lo;
- stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
- m.ipOutRequests_lo;
- stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
- m.ipOutDiscards_lo;
- stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
- m.ipOutNoRoutes_lo;
- stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
- stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
- stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
- stats->iw.ipReasmFails = (u64) m.ipReasmFails;
- stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
- stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
- stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
- stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
- stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
- stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
- stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
- m.tcpInSegs_lo;
- stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
- m.tcpOutSegs_lo;
- stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
- m.tcpRetransSeg_lo;
- stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
- m.tcpInErrs_lo;
- stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
- stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
- return 0;
+ stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
+ stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
+ stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
+ stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
+ stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
+ stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
+ stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
+ stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
+ stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
+ stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
+ stats->value[IPREASMREQDS] = m.ipReasmReqds;
+ stats->value[IPREASMOKS] = m.ipReasmOKs;
+ stats->value[IPREASMFAILS] = m.ipReasmFails;
+ stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
+ stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
+ stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
+ stats->value[TCPESTABRESETS] = m.tcpEstabResets;
+ stats->value[TCPCURRESTAB] = m.tcpOutRsts;
+ stats->value[TCPINSEGS] = m.tcpCurrEstab;
+ stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
+ stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
+ stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
+ stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
+ stats->value[TCPRTOMIN] = m.tcpRtoMin;
+ stats->value[TCPRTOMAX] = m.tcpRtoMax;
+
+ return stats->num_counters;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -1374,7 +1433,8 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.req_notify_cq = iwch_arm_cq;
dev->ibdev.post_send = iwch_post_send;
dev->ibdev.post_recv = iwch_post_receive;
- dev->ibdev.get_protocol_stats = iwch_get_mib;
+ dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
+ dev->ibdev.get_hw_stats = iwch_get_mib;
dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = iwch_port_immutable;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 651711370..a3a67216b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -119,7 +119,7 @@ MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
static int mpa_rev = 2;
module_param(mpa_rev, int, 0644);
MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
- "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
+ "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
" compliant (default=2)");
static int markers_enabled;
@@ -145,19 +145,35 @@ static struct sk_buff_head rxq;
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
static void ep_timeout(unsigned long arg);
static void connect_reply_upcall(struct c4iw_ep *ep, int status);
+static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
static LIST_HEAD(timeout_list);
static spinlock_t timeout_lock;
+static void deref_cm_id(struct c4iw_ep_common *epc)
+{
+ epc->cm_id->rem_ref(epc->cm_id);
+ epc->cm_id = NULL;
+ set_bit(CM_ID_DEREFED, &epc->history);
+}
+
+static void ref_cm_id(struct c4iw_ep_common *epc)
+{
+ set_bit(CM_ID_REFED, &epc->history);
+ epc->cm_id->add_ref(epc->cm_id);
+}
+
static void deref_qp(struct c4iw_ep *ep)
{
c4iw_qp_rem_ref(&ep->com.qp->ibqp);
clear_bit(QP_REFERENCED, &ep->com.flags);
+ set_bit(QP_DEREFED, &ep->com.history);
}
static void ref_qp(struct c4iw_ep *ep)
{
set_bit(QP_REFERENCED, &ep->com.flags);
+ set_bit(QP_REFED, &ep->com.history);
c4iw_qp_add_ref(&ep->com.qp->ibqp);
}
@@ -201,6 +217,8 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
if (error < 0)
kfree_skb(skb);
+ else if (error == NET_XMIT_DROP)
+ return -ENOMEM;
return error < 0 ? error : 0;
}
@@ -290,12 +308,63 @@ static void *alloc_ep(int size, gfp_t gfp)
return epc;
}
+static void remove_ep_tid(struct c4iw_ep *ep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->com.dev->lock, flags);
+ _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
+ spin_unlock_irqrestore(&ep->com.dev->lock, flags);
+}
+
+static void insert_ep_tid(struct c4iw_ep *ep)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->com.dev->lock, flags);
+ _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
+ spin_unlock_irqrestore(&ep->com.dev->lock, flags);
+}
+
+/*
+ * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
+ */
+static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
+{
+ struct c4iw_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ep = idr_find(&dev->hwtid_idr, tid);
+ if (ep)
+ c4iw_get_ep(&ep->com);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ep;
+}
+
+/*
+ * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
+ */
+static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
+ unsigned int stid)
+{
+ struct c4iw_listen_ep *ep;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ ep = idr_find(&dev->stid_idr, stid);
+ if (ep)
+ c4iw_get_ep(&ep->com);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ep;
+}
+
void _c4iw_free_ep(struct kref *kref)
{
struct c4iw_ep *ep;
ep = container_of(kref, struct c4iw_ep, com.kref);
- PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
+ PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
if (test_bit(QP_REFERENCED, &ep->com.flags))
deref_qp(ep);
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
@@ -309,10 +378,11 @@ void _c4iw_free_ep(struct kref *kref)
(const u32 *)&sin6->sin6_addr.s6_addr,
1);
}
- remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
+ if (ep->mpa_skb)
+ kfree_skb(ep->mpa_skb);
}
kfree(ep);
}
@@ -320,6 +390,15 @@ void _c4iw_free_ep(struct kref *kref)
static void release_ep_resources(struct c4iw_ep *ep)
{
set_bit(RELEASE_RESOURCES, &ep->com.flags);
+
+ /*
+ * If we have a hwtid, then remove it from the idr table
+ * so lookups will no longer find this endpoint. Otherwise
+ * we have a race where one thread finds the ep ptr just
+ * before the other thread is freeing the ep memory.
+ */
+ if (ep->hwtid != -1)
+ remove_ep_tid(ep);
c4iw_put_ep(&ep->com);
}
@@ -432,10 +511,74 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
static void arp_failure_discard(void *handle, struct sk_buff *skb)
{
- PDBG("%s c4iw_dev %p\n", __func__, handle);
+ pr_err(MOD "ARP failure\n");
kfree_skb(skb);
}
+static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
+{
+ pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
+}
+
+enum {
+ NUM_FAKE_CPLS = 2,
+ FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
+ FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
+};
+
+static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+
+ ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
+ release_ep_resources(ep);
+ return 0;
+}
+
+static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep;
+
+ ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
+ c4iw_put_ep(&ep->parent_ep->com);
+ release_ep_resources(ep);
+ return 0;
+}
+
+/*
+ * Fake up a special CPL opcode and call sched() so process_work() will call
+ * _put_ep_safe() in a safe context to free the ep resources. This is needed
+ * because ARP error handlers are called in an ATOMIC context, and
+ * _c4iw_free_ep() needs to block.
+ */
+static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
+ int cpl)
+{
+ struct cpl_act_establish *rpl = cplhdr(skb);
+
+ /* Set our special ARP_FAILURE opcode */
+ rpl->ot.opcode = cpl;
+
+ /*
+ * Save ep in the skb->cb area, after where sched() will save the dev
+ * ptr.
+ */
+ *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
+ sched(ep->com.dev, skb);
+}
+
+/* Handle an ARP failure for an accept */
+static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct c4iw_ep *ep = handle;
+
+ pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n",
+ ep->hwtid);
+
+ __state_set(&ep->com, DEAD);
+ queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
+}
+
/*
* Handle an ARP failure for an active open.
*/
@@ -444,9 +587,8 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
struct c4iw_ep *ep = handle;
printk(KERN_ERR MOD "ARP failure during connect\n");
- kfree_skb(skb);
connect_reply_upcall(ep, -EHOSTUNREACH);
- state_set(&ep->com, DEAD);
+ __state_set(&ep->com, DEAD);
if (ep->com.remote_addr.ss_family == AF_INET6) {
struct sockaddr_in6 *sin6 =
(struct sockaddr_in6 *)&ep->com.local_addr;
@@ -455,9 +597,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
}
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
- dst_release(ep->dst);
- cxgb4_l2t_release(ep->l2t);
- c4iw_put_ep(&ep->com);
+ queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
}
/*
@@ -466,15 +606,21 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
*/
static void abort_arp_failure(void *handle, struct sk_buff *skb)
{
- struct c4iw_rdev *rdev = handle;
+ int ret;
+ struct c4iw_ep *ep = handle;
+ struct c4iw_rdev *rdev = &ep->com.dev->rdev;
struct cpl_abort_req *req = cplhdr(skb);
PDBG("%s rdev %p\n", __func__, rdev);
req->cmd = CPL_ABORT_NO_RST;
- c4iw_ofld_send(rdev, skb);
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret) {
+ __state_set(&ep->com, DEAD);
+ queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
+ }
}
-static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
+static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
{
unsigned int flowclen = 80;
struct fw_flowc_wr *flowc;
@@ -530,7 +676,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- c4iw_ofld_send(&ep->com.dev->rdev, skb);
+ return c4iw_ofld_send(&ep->com.dev->rdev, skb);
}
static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
@@ -568,7 +714,7 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
+ t4_set_arp_err_handler(skb, ep, abort_arp_failure);
req = (struct cpl_abort_req *) skb_put(skb, wrlen);
memset(req, 0, wrlen);
INIT_TP_WR(req, ep->hwtid);
@@ -807,10 +953,10 @@ clip_release:
return ret;
}
-static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
- u8 mpa_rev_to_use)
+static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
+ u8 mpa_rev_to_use)
{
- int mpalen, wrlen;
+ int mpalen, wrlen, ret;
struct fw_ofld_tx_data_wr *req;
struct mpa_message *mpa;
struct mpa_v2_conn_params mpa_v2_params;
@@ -826,7 +972,7 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
skb = get_skb(skb, wrlen, GFP_KERNEL);
if (!skb) {
connect_reply_upcall(ep, -ENOMEM);
- return;
+ return -ENOMEM;
}
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
@@ -894,12 +1040,14 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
- c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ if (ret)
+ return ret;
start_ep_timer(ep);
__state_set(&ep->com, MPA_REQ_SENT);
ep->mpa_attr.initiator = 1;
ep->snd_seq += mpalen;
- return;
+ return ret;
}
static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
@@ -975,7 +1123,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
*/
skb_get(skb);
set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
BUG_ON(ep->mpa_skb);
ep->mpa_skb = skb;
ep->snd_seq += mpalen;
@@ -1060,7 +1208,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
* Function fw4_ack() will deref it.
*/
skb_get(skb);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
+ t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
ep->mpa_skb = skb;
__state_set(&ep->com, MPA_REP_SENT);
ep->snd_seq += mpalen;
@@ -1074,6 +1222,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int tid = GET_TID(req);
unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
struct tid_info *t = dev->rdev.lldi.tids;
+ int ret;
ep = lookup_atid(t, atid);
@@ -1086,7 +1235,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
/* setup the hwtid for this connection */
ep->hwtid = tid;
cxgb4_insert_tid(t, ep, tid);
- insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
+ insert_ep_tid(ep);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -1099,13 +1248,22 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
set_bit(ACT_ESTAB, &ep->com.history);
/* start MPA negotiation */
- send_flowc(ep, NULL);
+ ret = send_flowc(ep, NULL);
+ if (ret)
+ goto err;
if (ep->retry_with_mpa_v1)
- send_mpa_req(ep, skb, 1);
+ ret = send_mpa_req(ep, skb, 1);
else
- send_mpa_req(ep, skb, mpa_rev);
+ ret = send_mpa_req(ep, skb, mpa_rev);
+ if (ret)
+ goto err;
mutex_unlock(&ep->com.mutex);
return 0;
+err:
+ mutex_unlock(&ep->com.mutex);
+ connect_reply_upcall(ep, -ENOMEM);
+ c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ return 0;
}
static void close_complete_upcall(struct c4iw_ep *ep, int status)
@@ -1120,20 +1278,11 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
PDBG("close complete delivered ep %p cm_id %p tid %u\n",
ep, ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
+ deref_cm_id(&ep->com);
set_bit(CLOSE_UPCALL, &ep->com.history);
}
}
-static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
-{
- PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- __state_set(&ep->com, ABORTING);
- set_bit(ABORT_CONN, &ep->com.history);
- return send_abort(ep, skb, gfp);
-}
-
static void peer_close_upcall(struct c4iw_ep *ep)
{
struct iw_cm_event event;
@@ -1161,8 +1310,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
ep->com.cm_id, ep->hwtid);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
+ deref_cm_id(&ep->com);
set_bit(ABORT_UPCALL, &ep->com.history);
}
}
@@ -1205,10 +1353,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
set_bit(CONN_RPL_UPCALL, &ep->com.history);
ep->com.cm_id->event_handler(ep->com.cm_id, &event);
- if (status < 0) {
- ep->com.cm_id->rem_ref(ep->com.cm_id);
- ep->com.cm_id = NULL;
- }
+ if (status < 0)
+ deref_cm_id(&ep->com);
}
static int connect_request_upcall(struct c4iw_ep *ep)
@@ -1301,6 +1447,18 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
#define RELAXED_IRD_NEGOTIATION 1
+/*
+ * process_mpa_reply - process streaming mode MPA reply
+ *
+ * Returns:
+ *
+ * 0 upon success indicating a connect request was delivered to the ULP
+ * or the mpa request is incomplete but valid so far.
+ *
+ * 1 if a failure requires the caller to close the connection.
+ *
+ * 2 if a failure requires the caller to abort the connection.
+ */
static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
@@ -1316,20 +1474,12 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
/*
- * Stop mpa timer. If it expired, then
- * we ignore the MPA reply. process_timeout()
- * will abort the connection.
- */
- if (stop_ep_timer(ep))
- return 0;
-
- /*
* If we get more than the supported amount of private data
* then we must fail this connection.
*/
if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
err = -EINVAL;
- goto err;
+ goto err_stop_timer;
}
/*
@@ -1351,11 +1501,11 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
" Received = %d\n", __func__, mpa_rev, mpa->revision);
err = -EPROTO;
- goto err;
+ goto err_stop_timer;
}
if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
err = -EPROTO;
- goto err;
+ goto err_stop_timer;
}
plen = ntohs(mpa->private_data_size);
@@ -1365,7 +1515,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
*/
if (plen > MPA_MAX_PRIVATE_DATA) {
err = -EPROTO;
- goto err;
+ goto err_stop_timer;
}
/*
@@ -1373,7 +1523,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
*/
if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
err = -EPROTO;
- goto err;
+ goto err_stop_timer;
}
ep->plen = (u8) plen;
@@ -1387,10 +1537,18 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
if (mpa->flags & MPA_REJECT) {
err = -ECONNREFUSED;
- goto err;
+ goto err_stop_timer;
}
/*
+ * Stop mpa timer. If it expired, then
+ * we ignore the MPA reply. process_timeout()
+ * will abort the connection.
+ */
+ if (stop_ep_timer(ep))
+ return 0;
+
+ /*
* If we get here we have accumulated the entire mpa
* start reply message including private data. And
* the MPA header is valid.
@@ -1529,15 +1687,28 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
goto out;
}
goto out;
+err_stop_timer:
+ stop_ep_timer(ep);
err:
- __state_set(&ep->com, ABORTING);
- send_abort(ep, skb, GFP_KERNEL);
+ disconnect = 2;
out:
connect_reply_upcall(ep, err);
return disconnect;
}
-static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
+/*
+ * process_mpa_request - process streaming mode MPA request
+ *
+ * Returns:
+ *
+ * 0 upon success indicating a connect request was delivered to the ULP
+ * or the mpa request is incomplete but valid so far.
+ *
+ * 1 if a failure requires the caller to close the connection.
+ *
+ * 2 if a failure requires the caller to abort the connection.
+ */
+static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
struct mpa_v2_conn_params *mpa_v2_params;
@@ -1549,11 +1720,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
* If we get more than the supported amount of private data
* then we must fail this connection.
*/
- if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
+ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
+ goto err_stop_timer;
PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
@@ -1569,7 +1737,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
* We'll continue process when more data arrives.
*/
if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
+ return 0;
PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
mpa = (struct mpa_message *) ep->mpa_pkt;
@@ -1580,43 +1748,32 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
if (mpa->revision > mpa_rev) {
printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"
" Received = %d\n", __func__, mpa_rev, mpa->revision);
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
+ goto err_stop_timer;
}
- if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
+ if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
+ goto err_stop_timer;
plen = ntohs(mpa->private_data_size);
/*
* Fail if there's too much private data.
*/
- if (plen > MPA_MAX_PRIVATE_DATA) {
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
+ if (plen > MPA_MAX_PRIVATE_DATA)
+ goto err_stop_timer;
/*
* If plen does not account for pkt size
*/
- if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
- (void)stop_ep_timer(ep);
- abort_connection(ep, skb, GFP_KERNEL);
- return;
- }
+ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
+ goto err_stop_timer;
ep->plen = (u8) plen;
/*
* If we don't have all the pdata yet, then bail.
*/
if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
+ return 0;
/*
* If we get here we have accumulated the entire mpa
@@ -1665,26 +1822,26 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
ep->mpa_attr.p2p_type);
- /*
- * If the endpoint timer already expired, then we ignore
- * the start request. process_timeout() will abort
- * the connection.
- */
- if (!stop_ep_timer(ep)) {
- __state_set(&ep->com, MPA_REQ_RCVD);
-
- /* drive upcall */
- mutex_lock_nested(&ep->parent_ep->com.mutex,
- SINGLE_DEPTH_NESTING);
- if (ep->parent_ep->com.state != DEAD) {
- if (connect_request_upcall(ep))
- abort_connection(ep, skb, GFP_KERNEL);
- } else {
- abort_connection(ep, skb, GFP_KERNEL);
- }
- mutex_unlock(&ep->parent_ep->com.mutex);
+ __state_set(&ep->com, MPA_REQ_RCVD);
+
+ /* drive upcall */
+ mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
+ if (ep->parent_ep->com.state != DEAD) {
+ if (connect_request_upcall(ep))
+ goto err_unlock_parent;
+ } else {
+ goto err_unlock_parent;
}
- return;
+ mutex_unlock(&ep->parent_ep->com.mutex);
+ return 0;
+
+err_unlock_parent:
+ mutex_unlock(&ep->parent_ep->com.mutex);
+ goto err_out;
+err_stop_timer:
+ (void)stop_ep_timer(ep);
+err_out:
+ return 2;
}
static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
@@ -1693,11 +1850,10 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_rx_data *hdr = cplhdr(skb);
unsigned int dlen = ntohs(hdr->len);
unsigned int tid = GET_TID(hdr);
- struct tid_info *t = dev->rdev.lldi.tids;
__u8 status = hdr->status;
int disconnect = 0;
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
if (!ep)
return 0;
PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
@@ -1715,7 +1871,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
break;
case MPA_REQ_WAIT:
ep->rcv_seq += dlen;
- process_mpa_request(ep, skb);
+ disconnect = process_mpa_request(ep, skb);
break;
case FPDU_MODE: {
struct c4iw_qp_attributes attrs;
@@ -1736,7 +1892,8 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
}
mutex_unlock(&ep->com.mutex);
if (disconnect)
- c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
return 0;
}
@@ -1746,9 +1903,8 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
int release = 0;
unsigned int tid = GET_TID(rpl);
- struct tid_info *t = dev->rdev.lldi.tids;
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
if (!ep) {
printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
return 0;
@@ -1770,10 +1926,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
if (release)
release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
return 0;
}
-static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
+static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
{
struct sk_buff *skb;
struct fw_ofld_connection_wr *req;
@@ -1843,7 +2000,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
set_bit(ACT_OFLD_CONN, &ep->com.history);
- c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
/*
@@ -1986,6 +2143,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
init_timer(&ep->timer);
+ c4iw_init_wr_wait(&ep->com.wr_wait);
/*
* Allocate an active TID to initiate a TCP connection.
@@ -2069,6 +2227,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct sockaddr_in *ra;
struct sockaddr_in6 *la6;
struct sockaddr_in6 *ra6;
+ int ret = 0;
ep = lookup_atid(t, atid);
la = (struct sockaddr_in *)&ep->com.local_addr;
@@ -2104,9 +2263,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
mutex_unlock(&dev->rdev.stats.lock);
if (ep->com.local_addr.ss_family == AF_INET &&
dev->rdev.lldi.enable_fw_ofld_conn) {
- send_fw_act_open_req(ep,
- TID_TID_G(AOPEN_ATID_G(
- ntohl(rpl->atid_status))));
+ ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
+ ntohl(rpl->atid_status))));
+ if (ret)
+ goto fail;
return 0;
}
break;
@@ -2146,6 +2306,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
break;
}
+fail:
connect_reply_upcall(ep, status2errno(status));
state_set(&ep->com, DEAD);
@@ -2170,9 +2331,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_pass_open_rpl *rpl = cplhdr(skb);
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int stid = GET_TID(rpl);
- struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+ struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
if (!ep) {
PDBG("%s stid %d lookup failure!\n", __func__, stid);
@@ -2181,7 +2341,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s ep %p status %d error %d\n", __func__, ep,
rpl->status, status2errno(rpl->status));
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
-
+ c4iw_put_ep(&ep->com);
out:
return 0;
}
@@ -2189,17 +2349,17 @@ out:
static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int stid = GET_TID(rpl);
- struct c4iw_listen_ep *ep = lookup_stid(t, stid);
+ struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
PDBG("%s ep %p\n", __func__, ep);
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
+ c4iw_put_ep(&ep->com);
return 0;
}
-static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
- struct cpl_pass_accept_req *req)
+static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
+ struct cpl_pass_accept_req *req)
{
struct cpl_pass_accept_rpl *rpl;
unsigned int mtu_idx;
@@ -2287,10 +2447,9 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
- t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
- c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
+ t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
- return;
+ return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
@@ -2355,7 +2514,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned short hdrs;
u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
- parent_ep = lookup_stid(t, stid);
+ parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!parent_ep) {
PDBG("%s connect request on invalid stid %d\n", __func__, stid);
goto reject;
@@ -2468,9 +2627,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
- insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
- accept_cr(child_ep, skb, req);
- set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
+ insert_ep_tid(child_ep);
+ if (accept_cr(child_ep, skb, req)) {
+ c4iw_put_ep(&parent_ep->com);
+ release_ep_resources(child_ep);
+ } else {
+ set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
+ }
if (iptype == 6) {
sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
@@ -2479,6 +2642,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto out;
reject:
reject_cr(dev, hwtid, skb);
+ if (parent_ep)
+ c4iw_put_ep(&parent_ep->com);
out:
return 0;
}
@@ -2487,10 +2652,10 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *ep;
struct cpl_pass_establish *req = cplhdr(skb);
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
+ int ret;
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -2501,10 +2666,15 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
set_emss(ep, ntohs(req->tcp_opt));
dst_confirm(ep->dst);
- state_set(&ep->com, MPA_REQ_WAIT);
+ mutex_lock(&ep->com.mutex);
+ ep->com.state = MPA_REQ_WAIT;
start_ep_timer(ep);
- send_flowc(ep, skb);
set_bit(PASS_ESTAB, &ep->com.history);
+ ret = send_flowc(ep, skb);
+ mutex_unlock(&ep->com.mutex);
+ if (ret)
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+ c4iw_put_ep(&ep->com);
return 0;
}
@@ -2516,11 +2686,13 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int disconnect = 1;
int release = 0;
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(hdr);
int ret;
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
dst_confirm(ep->dst);
@@ -2592,6 +2764,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
if (release)
release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
return 0;
}
@@ -2604,10 +2777,12 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
int ret;
int release = 0;
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+
if (is_neg_adv(req->status)) {
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
__func__, ep->hwtid, req->status,
@@ -2616,7 +2791,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.neg_adv++;
mutex_unlock(&dev->rdev.stats.lock);
- return 0;
+ goto deref_ep;
}
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
ep->com.state);
@@ -2633,6 +2808,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
mutex_lock(&ep->com.mutex);
switch (ep->com.state) {
case CONNECTING:
+ c4iw_put_ep(&ep->parent_ep->com);
break;
case MPA_REQ_WAIT:
(void)stop_ep_timer(ep);
@@ -2681,7 +2857,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
case DEAD:
PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
mutex_unlock(&ep->com.mutex);
- return 0;
+ goto deref_ep;
default:
BUG_ON(1);
break;
@@ -2728,6 +2904,10 @@ out:
c4iw_reconnect(ep);
}
+deref_ep:
+ c4iw_put_ep(&ep->com);
+ /* Dereferencing ep, referenced in peer_abort_intr() */
+ c4iw_put_ep(&ep->com);
return 0;
}
@@ -2737,16 +2917,18 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
struct cpl_close_con_rpl *rpl = cplhdr(skb);
int release = 0;
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
BUG_ON(!ep);
/* The cm_id may be null if we failed to connect */
mutex_lock(&ep->com.mutex);
+ set_bit(CLOSE_CON_RPL, &ep->com.history);
switch (ep->com.state) {
case CLOSING:
__state_set(&ep->com, MORIBUND);
@@ -2774,18 +2956,18 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
mutex_unlock(&ep->com.mutex);
if (release)
release_ep_resources(ep);
+ c4iw_put_ep(&ep->com);
return 0;
}
static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_rdma_terminate *rpl = cplhdr(skb);
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(rpl);
struct c4iw_ep *ep;
struct c4iw_qp_attributes attrs;
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
BUG_ON(!ep);
if (ep && ep->com.qp) {
@@ -2796,6 +2978,7 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
} else
printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
+ c4iw_put_ep(&ep->com);
return 0;
}
@@ -2811,15 +2994,16 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_fw4_ack *hdr = cplhdr(skb);
u8 credits = hdr->credits;
unsigned int tid = GET_TID(hdr);
- struct tid_info *t = dev->rdev.lldi.tids;
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
if (credits == 0) {
PDBG("%s 0 credit ack ep %p tid %u state %u\n",
__func__, ep, ep->hwtid, state_read(&ep->com));
- return 0;
+ goto out;
}
dst_confirm(ep->dst);
@@ -2829,7 +3013,13 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
kfree_skb(ep->mpa_skb);
ep->mpa_skb = NULL;
+ mutex_lock(&ep->com.mutex);
+ if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
+ stop_ep_timer(ep);
+ mutex_unlock(&ep->com.mutex);
}
+out:
+ c4iw_put_ep(&ep->com);
return 0;
}
@@ -2841,22 +3031,23 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
- if (ep->com.state == DEAD) {
+ if (ep->com.state != MPA_REQ_RCVD) {
mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
return -ECONNRESET;
}
set_bit(ULP_REJECT, &ep->com.history);
- BUG_ON(ep->com.state != MPA_REQ_RCVD);
if (mpa_rev == 0)
- abort_connection(ep, NULL, GFP_KERNEL);
+ disconnect = 2;
else {
err = send_mpa_reject(ep, pdata, pdata_len);
disconnect = 1;
}
mutex_unlock(&ep->com.mutex);
- if (disconnect)
- err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
+ if (disconnect) {
+ stop_ep_timer(ep);
+ err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
+ }
c4iw_put_ep(&ep->com);
return 0;
}
@@ -2869,24 +3060,23 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct c4iw_ep *ep = to_ep(cm_id);
struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
+ int abort = 0;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
mutex_lock(&ep->com.mutex);
- if (ep->com.state == DEAD) {
+ if (ep->com.state != MPA_REQ_RCVD) {
err = -ECONNRESET;
- goto err;
+ goto err_out;
}
- BUG_ON(ep->com.state != MPA_REQ_RCVD);
BUG_ON(!qp);
set_bit(ULP_ACCEPT, &ep->com.history);
if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
(conn_param->ird > cur_max_read_depth(ep->com.dev))) {
- abort_connection(ep, NULL, GFP_KERNEL);
err = -EINVAL;
- goto err;
+ goto err_abort;
}
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
@@ -2898,9 +3088,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ord = conn_param->ord;
send_mpa_reject(ep, conn_param->private_data,
conn_param->private_data_len);
- abort_connection(ep, NULL, GFP_KERNEL);
err = -ENOMEM;
- goto err;
+ goto err_abort;
}
}
if (conn_param->ird < ep->ord) {
@@ -2908,9 +3097,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->ord <= h->rdev.lldi.max_ordird_qp) {
conn_param->ird = ep->ord;
} else {
- abort_connection(ep, NULL, GFP_KERNEL);
err = -ENOMEM;
- goto err;
+ goto err_abort;
}
}
}
@@ -2929,8 +3117,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
- cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
+ ref_cm_id(&ep->com);
ep->com.qp = qp;
ref_qp(ep);
@@ -2951,23 +3139,27 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
err = c4iw_modify_qp(ep->com.qp->rhp,
ep->com.qp, mask, &attrs, 1);
if (err)
- goto err1;
+ goto err_deref_cm_id;
+
+ set_bit(STOP_MPA_TIMER, &ep->com.flags);
err = send_mpa_reply(ep, conn_param->private_data,
conn_param->private_data_len);
if (err)
- goto err1;
+ goto err_deref_cm_id;
__state_set(&ep->com, FPDU_MODE);
established_upcall(ep);
mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
return 0;
-err1:
- ep->com.cm_id = NULL;
- abort_connection(ep, NULL, GFP_KERNEL);
- cm_id->rem_ref(cm_id);
-err:
+err_deref_cm_id:
+ deref_cm_id(&ep->com);
+err_abort:
+ abort = 1;
+err_out:
mutex_unlock(&ep->com.mutex);
+ if (abort)
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
c4iw_put_ep(&ep->com);
return err;
}
@@ -3067,9 +3259,9 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (peer2peer && ep->ord == 0)
ep->ord = 1;
- cm_id->add_ref(cm_id);
- ep->com.dev = dev;
ep->com.cm_id = cm_id;
+ ref_cm_id(&ep->com);
+ ep->com.dev = dev;
ep->com.qp = get_qhp(dev, conn_param->qpn);
if (!ep->com.qp) {
PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
@@ -3108,7 +3300,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
/*
* Handle loopback requests to INADDR_ANY.
*/
- if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
+ if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
err = pick_local_ipaddrs(dev, cm_id);
if (err)
goto fail1;
@@ -3176,7 +3368,7 @@ fail2:
remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
fail1:
- cm_id->rem_ref(cm_id);
+ deref_cm_id(&ep->com);
c4iw_put_ep(&ep->com);
out:
return err;
@@ -3270,8 +3462,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
goto fail1;
}
PDBG("%s ep %p\n", __func__, ep);
- cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
+ ref_cm_id(&ep->com);
ep->com.dev = dev;
ep->backlog = backlog;
memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
@@ -3311,7 +3503,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
- cm_id->rem_ref(cm_id);
+ deref_cm_id(&ep->com);
c4iw_put_ep(&ep->com);
fail1:
out:
@@ -3350,7 +3542,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
done:
- cm_id->rem_ref(cm_id);
+ deref_cm_id(&ep->com);
c4iw_put_ep(&ep->com);
return err;
}
@@ -3367,6 +3559,12 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
states[ep->com.state], abrupt);
+ /*
+ * Ref the ep here in case we have fatal errors causing the
+ * ep to be released and freed.
+ */
+ c4iw_get_ep(&ep->com);
+
rdev = &ep->com.dev->rdev;
if (c4iw_fatal_error(rdev)) {
fatal = 1;
@@ -3418,10 +3616,30 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
set_bit(EP_DISC_CLOSE, &ep->com.history);
ret = send_halfclose(ep, gfp);
}
- if (ret)
+ if (ret) {
+ set_bit(EP_DISC_FAIL, &ep->com.history);
+ if (!abrupt) {
+ stop_ep_timer(ep);
+ close_complete_upcall(ep, -EIO);
+ }
+ if (ep->com.qp) {
+ struct c4iw_qp_attributes attrs;
+
+ attrs.next_state = C4IW_QP_STATE_ERROR;
+ ret = c4iw_modify_qp(ep->com.qp->rhp,
+ ep->com.qp,
+ C4IW_QP_ATTR_NEXT_STATE,
+ &attrs, 1);
+ if (ret)
+ pr_err(MOD
+ "%s - qp <- error failed!\n",
+ __func__);
+ }
fatal = 1;
+ }
}
mutex_unlock(&ep->com.mutex);
+ c4iw_put_ep(&ep->com);
if (fatal)
release_ep_resources(ep);
return ret;
@@ -3676,7 +3894,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_pass_accept_req *req = (void *)(rss + 1);
struct l2t_entry *e;
struct dst_entry *dst;
- struct c4iw_ep *lep;
+ struct c4iw_ep *lep = NULL;
u16 window;
struct port_info *pi;
struct net_device *pdev;
@@ -3701,7 +3919,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
*/
stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
- lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
+ lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
if (!lep) {
PDBG("%s connect request on invalid stid %d\n", __func__, stid);
goto reject;
@@ -3802,6 +4020,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
free_dst:
dst_release(dst);
reject:
+ if (lep)
+ c4iw_put_ep(&lep->com);
return 0;
}
@@ -3809,7 +4029,7 @@ reject:
* These are the real handlers that are called from a
* work queue.
*/
-static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
+static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
[CPL_ACT_ESTABLISH] = act_establish,
[CPL_ACT_OPEN_RPL] = act_open_rpl,
[CPL_RX_DATA] = rx_data,
@@ -3825,7 +4045,9 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_RDMA_TERMINATE] = terminate,
[CPL_FW4_ACK] = fw4_ack,
[CPL_FW6_MSG] = deferred_fw6_msg,
- [CPL_RX_PKT] = rx_pkt
+ [CPL_RX_PKT] = rx_pkt,
+ [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
+ [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
};
static void process_timeout(struct c4iw_ep *ep)
@@ -3839,11 +4061,12 @@ static void process_timeout(struct c4iw_ep *ep)
set_bit(TIMEDOUT, &ep->com.history);
switch (ep->com.state) {
case MPA_REQ_SENT:
- __state_set(&ep->com, ABORTING);
connect_reply_upcall(ep, -ETIMEDOUT);
break;
case MPA_REQ_WAIT:
- __state_set(&ep->com, ABORTING);
+ case MPA_REQ_RCVD:
+ case MPA_REP_SENT:
+ case FPDU_MODE:
break;
case CLOSING:
case MORIBUND:
@@ -3853,7 +4076,6 @@ static void process_timeout(struct c4iw_ep *ep)
ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
&attrs, 1);
}
- __state_set(&ep->com, ABORTING);
close_complete_upcall(ep, -ETIMEDOUT);
break;
case ABORTING:
@@ -3871,9 +4093,9 @@ static void process_timeout(struct c4iw_ep *ep)
__func__, ep, ep->hwtid, ep->com.state);
abort = 0;
}
- if (abort)
- abort_connection(ep, NULL, GFP_KERNEL);
mutex_unlock(&ep->com.mutex);
+ if (abort)
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
c4iw_put_ep(&ep->com);
}
@@ -4006,10 +4228,10 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_abort_req_rss *req = cplhdr(skb);
struct c4iw_ep *ep;
- struct tid_info *t = dev->rdev.lldi.tids;
unsigned int tid = GET_TID(req);
- ep = lookup_tid(t, tid);
+ ep = get_ep_from_tid(dev, tid);
+ /* This EP will be dereferenced in peer_abort() */
if (!ep) {
printk(KERN_WARNING MOD
"Abort on non-existent endpoint, tid %d\n", tid);
@@ -4020,24 +4242,13 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
__func__, ep->hwtid, req->status,
neg_adv_str(req->status));
- ep->stats.abort_neg_adv++;
- dev->rdev.stats.neg_adv++;
- kfree_skb(skb);
- return 0;
+ goto out;
}
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
ep->com.state);
- /*
- * Wake up any threads in rdma_init() or rdma_fini().
- * However, if we are on MPAv2 and want to retry with MPAv1
- * then, don't wake up yet.
- */
- if (mpa_rev == 2 && !ep->tried_with_mpa_v1) {
- if (ep->com.state != MPA_REQ_SENT)
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
- } else
- c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+ c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
+out:
sched(dev, skb);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index df43f871a..f6f34a75a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -755,6 +755,7 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
TIMEOUT = 4,
QP_REFERENCED = 5,
+ STOP_MPA_TIMER = 7,
};
enum c4iw_ep_history {
@@ -779,7 +780,13 @@ enum c4iw_ep_history {
EP_DISC_ABORT = 18,
CONN_RPL_UPCALL = 19,
ACT_RETRY_NOMEM = 20,
- ACT_RETRY_INUSE = 21
+ ACT_RETRY_INUSE = 21,
+ CLOSE_CON_RPL = 22,
+ EP_DISC_FAIL = 24,
+ QP_REFED = 25,
+ QP_DEREFED = 26,
+ CM_ID_REFED = 27,
+ CM_ID_DEREFED = 28,
};
struct c4iw_ep_common {
@@ -917,9 +924,8 @@ void c4iw_qp_rem_ref(struct ib_qp *qp);
struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int c4iw_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents);
+int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
int c4iw_dealloc_mw(struct ib_mw *mw);
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 008be07d5..55d0651ee 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -86,8 +86,9 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
(wait ? FW_WR_COMPL_F : 0));
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
- req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
- req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
+ req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ T5_ULP_MEMIO_ORDER_V(1) |
+ T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
@@ -690,15 +691,14 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int c4iw_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
+int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
{
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
mhp->mpl_len = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, c4iw_set_page);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
}
int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 7574f394f..dd8a86b72 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -446,20 +446,59 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
c4iw_dev->rdev.lldi.pdev->device);
}
+enum counters {
+ IP4INSEGS,
+ IP4OUTSEGS,
+ IP4RETRANSSEGS,
+ IP4OUTRSTS,
+ IP6INSEGS,
+ IP6OUTSEGS,
+ IP6RETRANSSEGS,
+ IP6OUTRSTS,
+ NR_COUNTERS
+};
+
+static const char * const names[] = {
+ [IP4INSEGS] = "ip4InSegs",
+ [IP4OUTSEGS] = "ip4OutSegs",
+ [IP4RETRANSSEGS] = "ip4RetransSegs",
+ [IP4OUTRSTS] = "ip4OutRsts",
+ [IP6INSEGS] = "ip6InSegs",
+ [IP6OUTSEGS] = "ip6OutSegs",
+ [IP6RETRANSSEGS] = "ip6RetransSegs",
+ [IP6OUTRSTS] = "ip6OutRsts"
+};
+
+static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
+
+ if (port_num != 0)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
static int c4iw_get_mib(struct ib_device *ibdev,
- union rdma_protocol_stats *stats)
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
{
struct tp_tcp_stats v4, v6;
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
- memset(stats, 0, sizeof *stats);
- stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs;
- stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs;
- stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs;
- stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts;
-
- return 0;
+ stats->value[IP4INSEGS] = v4.tcp_in_segs;
+ stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
+ stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
+ stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
+ stats->value[IP6INSEGS] = v6.tcp_in_segs;
+ stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
+ stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
+ stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
+
+ return stats->num_counters;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -562,7 +601,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.req_notify_cq = c4iw_arm_cq;
dev->ibdev.post_send = c4iw_post_send;
dev->ibdev.post_recv = c4iw_post_receive;
- dev->ibdev.get_protocol_stats = c4iw_get_mib;
+ dev->ibdev.alloc_hw_stats = c4iw_alloc_stats;
+ dev->ibdev.get_hw_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
dev->ibdev.drain_sq = c4iw_drain_sq;
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index a925fb0db..f846fd51b 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -3,7 +3,6 @@ config INFINIBAND_HFI1
depends on X86_64 && INFINIBAND_RDMAVT
select MMU_NOTIFIER
select CRC32
- default m
---help---
This is a low-level driver for Intel OPA Gen1 adapter.
config HFI1_DEBUG_SDMA_ORDER
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 8dc59382e..9b5382c94 100644
--- a/drivers/staging/rdma/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -7,7 +7,7 @@
#
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
-hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \
+hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
eprom.o file_ops.o firmware.o \
init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 2cb8ca77f..14d7eeb09 100644
--- a/drivers/staging/rdma/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -53,20 +53,6 @@
#include "sdma.h"
#include "trace.h"
-struct cpu_mask_set {
- struct cpumask mask;
- struct cpumask used;
- uint gen;
-};
-
-struct hfi1_affinity {
- struct cpu_mask_set def_intr;
- struct cpu_mask_set rcv_intr;
- struct cpu_mask_set proc;
- /* spin lock to protect affinity struct */
- spinlock_t lock;
-};
-
/* Name of IRQ types, indexed by enum irq_type */
static const char * const irq_type_names[] = {
"SDMA",
@@ -82,6 +68,48 @@ static inline void init_cpu_mask_set(struct cpu_mask_set *set)
set->gen = 0;
}
+/* Initialize non-HT cpu cores mask */
+int init_real_cpu_mask(struct hfi1_devdata *dd)
+{
+ struct hfi1_affinity *info;
+ int possible, curr_cpu, i, ht;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ cpumask_clear(&info->real_cpu_mask);
+
+ /* Start with cpu online mask as the real cpu mask */
+ cpumask_copy(&info->real_cpu_mask, cpu_online_mask);
+
+ /*
+ * Remove HT cores from the real cpu mask. Do this in two steps below.
+ */
+ possible = cpumask_weight(&info->real_cpu_mask);
+ ht = cpumask_weight(topology_sibling_cpumask(
+ cpumask_first(&info->real_cpu_mask)));
+ /*
+ * Step 1. Skip over the first N HT siblings and use them as the
+ * "real" cores. Assumes that HT cores are not enumerated in
+ * succession (except in the single core case).
+ */
+ curr_cpu = cpumask_first(&info->real_cpu_mask);
+ for (i = 0; i < possible / ht; i++)
+ curr_cpu = cpumask_next(curr_cpu, &info->real_cpu_mask);
+ /*
+ * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
+ * skip any gaps.
+ */
+ for (; i < possible; i++) {
+ cpumask_clear_cpu(curr_cpu, &info->real_cpu_mask);
+ curr_cpu = cpumask_next(curr_cpu, &info->real_cpu_mask);
+ }
+
+ dd->affinity = info;
+ return 0;
+}
+
/*
* Interrupt affinity.
*
@@ -93,20 +121,17 @@ static inline void init_cpu_mask_set(struct cpu_mask_set *set)
* to the node relative 1 as necessary.
*
*/
-int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
+void hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{
int node = pcibus_to_node(dd->pcidev->bus);
- struct hfi1_affinity *info;
+ struct hfi1_affinity *info = dd->affinity;
const struct cpumask *local_mask;
- int curr_cpu, possible, i, ht;
+ int curr_cpu, possible, i;
if (node < 0)
node = numa_node_id();
dd->node = node;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
spin_lock_init(&info->lock);
init_cpu_mask_set(&info->def_intr);
@@ -116,30 +141,8 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
local_mask = cpumask_of_node(dd->node);
if (cpumask_first(local_mask) >= nr_cpu_ids)
local_mask = topology_core_cpumask(0);
- /* use local mask as default */
- cpumask_copy(&info->def_intr.mask, local_mask);
- /*
- * Remove HT cores from the default mask. Do this in two steps below.
- */
- possible = cpumask_weight(&info->def_intr.mask);
- ht = cpumask_weight(topology_sibling_cpumask(
- cpumask_first(&info->def_intr.mask)));
- /*
- * Step 1. Skip over the first N HT siblings and use them as the
- * "real" cores. Assumes that HT cores are not enumerated in
- * succession (except in the single core case).
- */
- curr_cpu = cpumask_first(&info->def_intr.mask);
- for (i = 0; i < possible / ht; i++)
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- /*
- * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
- * skip any gaps.
- */
- for (; i < possible; i++) {
- cpumask_clear_cpu(curr_cpu, &info->def_intr.mask);
- curr_cpu = cpumask_next(curr_cpu, &info->def_intr.mask);
- }
+ /* Use the "real" cpu mask of this node as the default */
+ cpumask_and(&info->def_intr.mask, &info->real_cpu_mask, local_mask);
/* fill in the receive list */
possible = cpumask_weight(&info->def_intr.mask);
@@ -167,8 +170,6 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
}
cpumask_copy(&info->proc.mask, cpu_online_mask);
- dd->affinity = info;
- return 0;
}
void hfi1_dev_affinity_free(struct hfi1_devdata *dd)
@@ -299,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
const struct cpumask *node_mask,
*proc_mask = tsk_cpus_allowed(current);
struct cpu_mask_set *set = &dd->affinity->proc;
- char buf[1024];
/*
* check whether process/context affinity has already
* been set
*/
if (cpumask_weight(proc_mask) == 1) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s",
- current->pid, current->comm, buf);
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
/*
* Mark the pre-set CPU as used. This is atomic so we don't
* need the lock
@@ -317,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_set_cpu(cpu, &set->used);
goto done;
} else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask));
- hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s",
- current->pid, current->comm, buf);
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
goto done;
}
@@ -355,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ?
&dd->affinity->rcv_intr.mask :
&dd->affinity->rcv_intr.used));
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs));
- hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf);
+ hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
+ cpumask_pr_args(intrs));
/*
* If we don't have a NUMA node requested, preference is towards
@@ -365,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
if (node == -1)
node = dd->node;
node_mask = cpumask_of_node(node);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask));
- hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf);
+ hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node,
+ cpumask_pr_args(node_mask));
/* diff will hold all unused cpus */
cpumask_andnot(diff, &set->mask, &set->used);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff));
- hfi1_cdbg(PROC, "unused CPUs (all) %s", buf);
+ hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff));
/* get cpumask of available CPUs on preferred NUMA */
cpumask_and(mask, diff, node_mask);
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "available cpus on NUMA %s", buf);
+ hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask));
/*
* At first, we don't want to place processes on the same
@@ -394,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_andnot(diff, &set->mask, &set->used);
cpumask_andnot(mask, diff, node_mask);
}
- scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask));
- hfi1_cdbg(PROC, "possible CPUs for process %s", buf);
+ hfi1_cdbg(PROC, "possible CPUs for process %*pbl",
+ cpumask_pr_args(mask));
cpu = cpumask_first(mask);
if (cpu >= nr_cpu_ids) /* empty */
diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index b287e4963..20f52fe74 100644
--- a/drivers/staging/rdma/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
@@ -64,10 +64,27 @@ enum affinity_flags {
AFF_IRQ_LOCAL
};
+struct cpu_mask_set {
+ struct cpumask mask;
+ struct cpumask used;
+ uint gen;
+};
+
+struct hfi1_affinity {
+ struct cpu_mask_set def_intr;
+ struct cpu_mask_set rcv_intr;
+ struct cpu_mask_set proc;
+ struct cpumask real_cpu_mask;
+ /* spin lock to protect affinity struct */
+ spinlock_t lock;
+};
+
struct hfi1_msix_entry;
+/* Initialize non-HT cpu cores mask */
+int init_real_cpu_mask(struct hfi1_devdata *);
/* Initialize driver affinity data */
-int hfi1_dev_affinity_init(struct hfi1_devdata *);
+void hfi1_dev_affinity_init(struct hfi1_devdata *);
/* Free driver affinity data */
void hfi1_dev_affinity_free(struct hfi1_devdata *);
/*
diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
index 0d58fe3b4..0d58fe3b4 100644
--- a/drivers/staging/rdma/hfi1/aspm.h
+++ b/drivers/infiniband/hw/hfi1/aspm.h
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 16eb65390..dad4d0ebb 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -123,6 +123,8 @@ struct flag_table {
#define MIN_KERNEL_KCTXTS 2
#define FIRST_KERNEL_KCTXT 1
+/* sizes for both the QP and RSM map tables */
+#define NUM_MAP_ENTRIES 256
#define NUM_MAP_REGS 32
/* Bit offset into the GUID which carries HFI id information */
@@ -1029,9 +1031,13 @@ static int thermal_init(struct hfi1_devdata *dd);
static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
int msecs);
static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
+static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
static void handle_temp_err(struct hfi1_devdata *);
static void dc_shutdown(struct hfi1_devdata *);
static void dc_start(struct hfi1_devdata *);
+static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
+ unsigned int *np);
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
/*
* Error interrupt table entry. This is used as input to the interrupt
@@ -5661,7 +5667,7 @@ static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
sci = &dd->send_contexts[sw_index];
/* there is no information for user (PSM) and ack contexts */
- if (sci->type != SC_KERNEL)
+ if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
return -1;
sc = sci->sc;
@@ -6100,7 +6106,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
}
/* this access is valid only when the link is up */
- if ((ppd->host_link_state & HLS_UP) == 0) {
+ if (ppd->host_link_state & HLS_DOWN) {
dd_dev_info(dd, "%s: link state %s not up\n",
__func__, link_state_name(ppd->host_link_state));
ret = -EBUSY;
@@ -6199,18 +6205,13 @@ static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
/*
* Handle host requests from the 8051.
- *
- * This is a work-queue function outside of the interrupt.
*/
-void handle_8051_request(struct work_struct *work)
+static void handle_8051_request(struct hfi1_pportdata *ppd)
{
- struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- dc_host_req_work);
struct hfi1_devdata *dd = ppd->dd;
u64 reg;
u16 data = 0;
- u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
- u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
+ u8 type;
reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
@@ -6231,46 +6232,11 @@ void handle_8051_request(struct work_struct *work)
case HREQ_READ_CONFIG:
case HREQ_SET_TX_EQ_ABS:
case HREQ_SET_TX_EQ_REL:
+ case HREQ_ENABLE:
dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
type);
hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
break;
-
- case HREQ_ENABLE:
- lanes = data & 0xF;
- for (i = 0; lanes; lanes >>= 1, i++) {
- if (!(lanes & 1))
- continue;
- if (data & 0x200) {
- /* enable TX CDR */
- if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
- cache[QSFP_CDR_INFO_OFFS] & 0x80)
- cdr_ctrl_byte |= (1 << (i + 4));
- } else {
- /* disable TX CDR */
- if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
- cache[QSFP_CDR_INFO_OFFS] & 0x80)
- cdr_ctrl_byte &= ~(1 << (i + 4));
- }
-
- if (data & 0x800) {
- /* enable RX CDR */
- if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
- cache[QSFP_CDR_INFO_OFFS] & 0x40)
- cdr_ctrl_byte |= (1 << i);
- } else {
- /* disable RX CDR */
- if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
- cache[QSFP_CDR_INFO_OFFS] & 0x40)
- cdr_ctrl_byte &= ~(1 << i);
- }
- }
- one_qsfp_write(ppd, dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
- &cdr_ctrl_byte, 1);
- hreq_response(dd, HREQ_SUCCESS, data);
- refresh_qsfp_cache(ppd, &ppd->qsfp_info);
- break;
-
case HREQ_CONFIG_DONE:
hreq_response(dd, HREQ_SUCCESS, 0);
break;
@@ -6278,7 +6244,6 @@ void handle_8051_request(struct work_struct *work)
case HREQ_INTERFACE_TEST:
hreq_response(dd, HREQ_SUCCESS, data);
break;
-
default:
dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
@@ -6849,6 +6814,75 @@ static void reset_neighbor_info(struct hfi1_pportdata *ppd)
ppd->neighbor_fm_security = 0;
}
+static const char * const link_down_reason_strs[] = {
+ [OPA_LINKDOWN_REASON_NONE] = "None",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
+ [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
+ [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
+ [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
+ [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
+ [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
+ [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
+ [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
+ [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
+ [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
+ [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
+ [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
+ [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
+ [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
+ [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
+ [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
+ [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
+ [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
+ [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
+ [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
+ [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
+ [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
+ "Excessive buffer overrun",
+ [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
+ [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
+ [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
+ [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
+ [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
+ [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
+ [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
+ [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
+ "Local media not installed",
+ [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
+ [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
+ [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
+ "End to end not installed",
+ [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
+ [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
+ [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
+ [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
+ [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
+ [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
+};
+
+/* return the neighbor link down reason string */
+static const char *link_down_reason_str(u8 reason)
+{
+ const char *str = NULL;
+
+ if (reason < ARRAY_SIZE(link_down_reason_strs))
+ str = link_down_reason_strs[reason];
+ if (!str)
+ str = "(invalid)";
+
+ return str;
+}
+
/*
* Handle a link down interrupt from the 8051.
*
@@ -6857,8 +6891,11 @@ static void reset_neighbor_info(struct hfi1_pportdata *ppd)
void handle_link_down(struct work_struct *work)
{
u8 lcl_reason, neigh_reason = 0;
+ u8 link_down_reason;
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
- link_down_work);
+ link_down_work);
+ int was_up;
+ static const char ldr_str[] = "Link down reason: ";
if ((ppd->host_link_state &
(HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
@@ -6867,20 +6904,63 @@ void handle_link_down(struct work_struct *work)
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
/* Go offline first, then deal with reading/writing through 8051 */
+ was_up = !!(ppd->host_link_state & HLS_UP);
set_link_state(ppd, HLS_DN_OFFLINE);
- lcl_reason = 0;
- read_planned_down_reason_code(ppd->dd, &neigh_reason);
+ if (was_up) {
+ lcl_reason = 0;
+ /* link down reason is only valid if the link was up */
+ read_link_down_reason(ppd->dd, &link_down_reason);
+ switch (link_down_reason) {
+ case LDR_LINK_TRANSFER_ACTIVE_LOW:
+ /* the link went down, no idle message reason */
+ dd_dev_info(ppd->dd, "%sUnexpected link down\n",
+ ldr_str);
+ break;
+ case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
+ /*
+ * The neighbor reason is only valid if an idle message
+ * was received for it.
+ */
+ read_planned_down_reason_code(ppd->dd, &neigh_reason);
+ dd_dev_info(ppd->dd,
+ "%sNeighbor link down message %d, %s\n",
+ ldr_str, neigh_reason,
+ link_down_reason_str(neigh_reason));
+ break;
+ case LDR_RECEIVED_HOST_OFFLINE_REQ:
+ dd_dev_info(ppd->dd,
+ "%sHost requested link to go offline\n",
+ ldr_str);
+ break;
+ default:
+ dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
+ ldr_str, link_down_reason);
+ break;
+ }
- /*
- * If no reason, assume peer-initiated but missed
- * LinkGoingDown idle flits.
- */
- if (neigh_reason == 0)
- lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
+ /*
+ * If no reason, assume peer-initiated but missed
+ * LinkGoingDown idle flits.
+ */
+ if (neigh_reason == 0)
+ lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
+ } else {
+ /* went down while polling or going up */
+ lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
+ }
set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
+ /* inform the SMA when the link transitions from up to down */
+ if (was_up && ppd->local_link_down_reason.sma == 0 &&
+ ppd->neigh_link_down_reason.sma == 0) {
+ ppd->local_link_down_reason.sma =
+ ppd->local_link_down_reason.latest;
+ ppd->neigh_link_down_reason.sma =
+ ppd->neigh_link_down_reason.latest;
+ }
+
reset_neighbor_info(ppd);
/* disable the port */
@@ -6890,7 +6970,7 @@ void handle_link_down(struct work_struct *work)
* If there is no cable attached, turn the DC off. Otherwise,
* start the link bring up.
*/
- if (!qsfp_mod_present(ppd)) {
+ if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
dc_shutdown(ppd->dd);
} else {
tune_serdes(ppd);
@@ -6988,6 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
__func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
ppd->pkeys[2] = FULL_MGMT_P_KEY;
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
+}
+
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+{
+ if (ppd->pkeys[2] != 0) {
+ ppd->pkeys[2] = 0;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+ hfi1_event_pkey_change(ppd->dd, ppd->port);
+ }
}
/*
@@ -7350,7 +7440,7 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
retry:
mutex_lock(&ppd->hls_lock);
/* only apply if the link is up */
- if (!(ppd->host_link_state & HLS_UP)) {
+ if (ppd->host_link_state & HLS_DOWN) {
/* still going up..wait and retry */
if (ppd->host_link_state & HLS_GOING_UP) {
if (++tries < 1000) {
@@ -7373,7 +7463,11 @@ retry:
ppd->link_width_downgrade_rx_active = rx;
}
- if (lwde == 0) {
+ if (ppd->link_width_downgrade_tx_active == 0 ||
+ ppd->link_width_downgrade_rx_active == 0) {
+ /* the 8051 reported a dead link as a downgrade */
+ dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
+ } else if (lwde == 0) {
/* downgrade is disabled */
/* bounce if not at starting active width */
@@ -7534,7 +7628,7 @@ static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
host_msg &= ~(u64)LINKUP_ACHIEVED;
}
if (host_msg & EXT_DEVICE_CFG_REQ) {
- queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
+ handle_8051_request(ppd);
host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
}
if (host_msg & VERIFY_CAP_FRAME) {
@@ -7740,8 +7834,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
* save first 2 flits in the packet that caused
* the error
*/
- dd->err_info_rcvport.packet_flit1 = hdr0;
- dd->err_info_rcvport.packet_flit2 = hdr1;
+ dd->err_info_rcvport.packet_flit1 = hdr0;
+ dd->err_info_rcvport.packet_flit2 = hdr1;
}
switch (info) {
case 1:
@@ -8660,6 +8754,14 @@ static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
*pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
}
+static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
+{
+ u32 frame;
+
+ read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
+ *ldr = (frame & 0xff);
+}
+
static int read_tx_settings(struct hfi1_devdata *dd,
u8 *enable_lane_tx,
u8 *tx_polarity_inversion,
@@ -9049,9 +9151,9 @@ set_local_link_attributes_fail:
}
/*
- * Call this to start the link. Schedule a retry if the cable is not
- * present or if unable to start polling. Do not do anything if the
- * link is disabled. Returns 0 if link is disabled or moved to polling
+ * Call this to start the link.
+ * Do not do anything if the link is disabled.
+ * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
*/
int start_link(struct hfi1_pportdata *ppd)
{
@@ -9068,15 +9170,14 @@ int start_link(struct hfi1_pportdata *ppd)
return 0;
}
- if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
- loopback == LOOPBACK_LCB ||
- ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
- return set_link_state(ppd, HLS_DN_POLL);
+ /*
+ * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
+ * pkey table can be configured properly if the HFI unit is connected
+ * to switch port with MgmtAllowed=NO
+ */
+ clear_full_mgmt_pkey(ppd);
- dd_dev_info(ppd->dd,
- "%s: stopping link start because no cable is present\n",
- __func__);
- return -EAGAIN;
+ return set_link_state(ppd, HLS_DN_POLL);
}
static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
@@ -9129,9 +9230,6 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
/* Reset the QSFP */
mask = (u64)QSFP_HFI0_RESET_N;
- qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
- qsfp_mask |= mask;
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
qsfp_mask = read_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
@@ -9169,6 +9267,12 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
__func__);
+ /*
+ * The remaining alarms/warnings don't matter if the link is down.
+ */
+ if (ppd->host_link_state & HLS_DOWN)
+ return 0;
+
if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
(qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
@@ -9247,7 +9351,7 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
return 0;
}
-/* This routine will only be scheduled if the QSFP module is present */
+/* This routine will only be scheduled if the QSFP module present is asserted */
void qsfp_event(struct work_struct *work)
{
struct qsfp_data *qd;
@@ -9263,9 +9367,8 @@ void qsfp_event(struct work_struct *work)
return;
/*
- * Turn DC back on after cables has been
- * re-inserted. Up until now, the DC has been in
- * reset to save power.
+ * Turn DC back on after cable has been re-inserted. Up until
+ * now, the DC has been in reset to save power.
*/
dc_start(dd);
@@ -9397,7 +9500,15 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
return ret;
}
- /* tune the SERDES to a ballpark setting for
+ get_port_type(ppd);
+ if (ppd->port_type == PORT_TYPE_QSFP) {
+ set_qsfp_int_n(ppd, 0);
+ wait_for_qsfp_init(ppd);
+ set_qsfp_int_n(ppd, 1);
+ }
+
+ /*
+ * Tune the SerDes to a ballpark setting for
* optimal signal and bit error rate
* Needs to be done before starting the link
*/
@@ -9675,7 +9786,8 @@ static void set_send_length(struct hfi1_pportdata *ppd)
u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
& SEND_LEN_CHECK1_LEN_VL15_MASK) <<
SEND_LEN_CHECK1_LEN_VL15_SHIFT;
- int i;
+ int i, j;
+ u32 thres;
for (i = 0; i < ppd->vls_supported; i++) {
if (dd->vld[i].mtu > maxvlmtu)
@@ -9694,16 +9806,20 @@ static void set_send_length(struct hfi1_pportdata *ppd)
/* adjust kernel credit return thresholds based on new MTUs */
/* all kernel receive contexts have the same hdrqentsize */
for (i = 0; i < ppd->vls_supported; i++) {
- sc_set_cr_threshold(dd->vld[i].sc,
- sc_mtu_to_threshold(dd->vld[i].sc,
- dd->vld[i].mtu,
- dd->rcd[0]->
- rcvhdrqentsize));
- }
- sc_set_cr_threshold(dd->vld[15].sc,
- sc_mtu_to_threshold(dd->vld[15].sc,
- dd->vld[15].mtu,
+ thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
+ sc_mtu_to_threshold(dd->vld[i].sc,
+ dd->vld[i].mtu,
dd->rcd[0]->rcvhdrqentsize));
+ for (j = 0; j < INIT_SC_PER_VL; j++)
+ sc_set_cr_threshold(
+ pio_select_send_context_vl(dd, j, i),
+ thres);
+ }
+ thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
+ sc_mtu_to_threshold(dd->vld[15].sc,
+ dd->vld[15].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
+ sc_set_cr_threshold(dd->vld[15].sc, thres);
/* Adjust maximum MTU for the port in DC */
dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
@@ -9989,7 +10105,7 @@ u32 driver_physical_state(struct hfi1_pportdata *ppd)
*/
u32 driver_logical_state(struct hfi1_pportdata *ppd)
{
- if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
+ if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
return IB_PORT_DOWN;
switch (ppd->host_link_state & HLS_UP) {
@@ -10030,7 +10146,6 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
struct hfi1_devdata *dd = ppd->dd;
struct ib_event event = {.device = NULL};
int ret1, ret = 0;
- int was_up, is_down;
int orig_new_state, poll_bounce;
mutex_lock(&ppd->hls_lock);
@@ -10049,8 +10164,6 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
poll_bounce ? "(bounce) " : "",
link_state_reason_name(ppd, state));
- was_up = !!(ppd->host_link_state & HLS_UP);
-
/*
* If we're going to a (HLS_*) link state that implies the logical
* link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
@@ -10261,17 +10374,6 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
break;
}
- is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
- HLS_DN_DISABLE | HLS_DN_OFFLINE));
-
- if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
- ppd->neigh_link_down_reason.sma == 0) {
- ppd->local_link_down_reason.sma =
- ppd->local_link_down_reason.latest;
- ppd->neigh_link_down_reason.sma =
- ppd->neigh_link_down_reason.latest;
- }
-
goto done;
unexpected:
@@ -11816,7 +11918,7 @@ static void update_synth_timer(unsigned long opaque)
hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
}
-mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
+ mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
}
#define C_MAX_NAME 13 /* 12 chars + one for /0 */
@@ -12673,22 +12775,24 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int total_contexts;
int ret;
unsigned ngroups;
+ int qos_rmt_count;
+ int user_rmt_reduced;
/*
- * Kernel contexts: (to be fixed later):
- * - min or 2 or 1 context/numa
+ * Kernel receive contexts:
+ * - min of 2 or 1 context/numa (excluding control context)
* - Context 0 - control context (VL15/multicast/error)
- * - Context 1 - default context
+ * - Context 1 - first kernel context
+ * - Context 2 - second kernel context
+ * ...
*/
if (n_krcvqs)
/*
- * Don't count context 0 in n_krcvqs since
- * is isn't used for normal verbs traffic.
- *
- * krcvqs will reflect number of kernel
- * receive contexts above 0.
+ * n_krcvqs is the sum of module parameter kernel receive
+ * contexts, krcvqs[]. It does not include the control
+ * context, so add that.
*/
- num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
+ num_kernel_contexts = n_krcvqs + 1;
else
num_kernel_contexts = num_online_nodes() + 1;
num_kernel_contexts =
@@ -12705,12 +12809,13 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
}
/*
- * User contexts: (to be fixed later)
- * - default to 1 user context per CPU if num_user_contexts is
- * negative
+ * User contexts:
+ * - default to 1 user context per real (non-HT) CPU core if
+ * num_user_contexts is negative
*/
if (num_user_contexts < 0)
- num_user_contexts = num_online_cpus();
+ num_user_contexts =
+ cpumask_weight(&dd->affinity->real_cpu_mask);
total_contexts = num_kernel_contexts + num_user_contexts;
@@ -12727,6 +12832,19 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
total_contexts = num_kernel_contexts + num_user_contexts;
}
+ /* each user context requires an entry in the RMT */
+ qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
+ if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
+ user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
+ dd_dev_err(dd,
+ "RMT size is reducing the number of user receive contexts from %d to %d\n",
+ (int)num_user_contexts,
+ user_rmt_reduced);
+ /* recalculate */
+ num_user_contexts = user_rmt_reduced;
+ total_contexts = num_kernel_contexts + num_user_contexts;
+ }
+
/* the first N are kernel contexts, the rest are user contexts */
dd->num_rcv_contexts = total_contexts;
dd->n_krcv_queues = num_kernel_contexts;
@@ -12776,12 +12894,13 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
dd->num_send_contexts = ret;
dd_dev_info(
dd,
- "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
+ "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
dd->chip_send_contexts,
dd->num_send_contexts,
dd->sc_sizes[SC_KERNEL].count,
dd->sc_sizes[SC_ACK].count,
- dd->sc_sizes[SC_USER].count);
+ dd->sc_sizes[SC_USER].count,
+ dd->sc_sizes[SC_VL15].count);
ret = 0; /* success */
}
@@ -13451,122 +13570,224 @@ static void init_qpmap_table(struct hfi1_devdata *dd,
int i;
u64 ctxt = first_ctxt;
- for (i = 0; i < 256;) {
+ for (i = 0; i < 256; i++) {
reg |= ctxt << (8 * (i % 8));
- i++;
ctxt++;
if (ctxt > last_ctxt)
ctxt = first_ctxt;
- if (i % 8 == 0) {
+ if (i % 8 == 7) {
write_csr(dd, regno, reg);
reg = 0;
regno += 8;
}
}
- if (i % 8)
- write_csr(dd, regno, reg);
add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
| RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
}
-/**
- * init_qos - init RX qos
- * @dd - device data
- * @first_context
- *
- * This routine initializes Rule 0 and the
- * RSM map table to implement qos.
- *
- * If all of the limit tests succeed,
- * qos is applied based on the array
- * interpretation of krcvqs where
- * entry 0 is VL0.
- *
- * The number of vl bits (n) and the number of qpn
- * bits (m) are computed to feed both the RSM map table
- * and the single rule.
- *
+struct rsm_map_table {
+ u64 map[NUM_MAP_REGS];
+ unsigned int used;
+};
+
+struct rsm_rule_data {
+ u8 offset;
+ u8 pkt_type;
+ u32 field1_off;
+ u32 field2_off;
+ u32 index1_off;
+ u32 index1_width;
+ u32 index2_off;
+ u32 index2_width;
+ u32 mask1;
+ u32 value1;
+ u32 mask2;
+ u32 value2;
+};
+
+/*
+ * Return an initialized RMT map table for users to fill in. OK if it
+ * returns NULL, indicating no table.
*/
-static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
+static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
{
+ struct rsm_map_table *rmt;
+ u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
+
+ rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
+ if (rmt) {
+ memset(rmt->map, rxcontext, sizeof(rmt->map));
+ rmt->used = 0;
+ }
+
+ return rmt;
+}
+
+/*
+ * Write the final RMT map table to the chip and free the table. OK if
+ * table is NULL.
+ */
+static void complete_rsm_map_table(struct hfi1_devdata *dd,
+ struct rsm_map_table *rmt)
+{
+ int i;
+
+ if (rmt) {
+ /* write table to chip */
+ for (i = 0; i < NUM_MAP_REGS; i++)
+ write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
+
+ /* enable RSM */
+ add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
+ }
+}
+
+/*
+ * Add a receive side mapping rule.
+ */
+static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
+ struct rsm_rule_data *rrd)
+{
+ write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
+ (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
+ 1ull << rule_index | /* enable bit */
+ (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
+ write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
+ (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
+ (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
+ (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
+ (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
+ (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
+ (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
+ write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
+ (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
+ (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
+ (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
+ (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
+}
+
+/* return the number of RSM map table entries that will be used for QOS */
+static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
+ unsigned int *np)
+{
+ int i;
+ unsigned int m, n;
u8 max_by_vl = 0;
- unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
- u64 *rsmmap;
- u64 reg;
- u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
- /* validate */
+ /* is QOS active at all? */
if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
num_vls == 1 ||
krcvqsset <= 1)
- goto bail;
- for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
+ goto no_qos;
+
+ /* determine bits for qpn */
+ for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
if (krcvqs[i] > max_by_vl)
max_by_vl = krcvqs[i];
if (max_by_vl > 32)
- goto bail;
- qpns_per_vl = __roundup_pow_of_two(max_by_vl);
- /* determine bits vl */
- n = ilog2(num_vls);
- /* determine bits for qpn */
- m = ilog2(qpns_per_vl);
+ goto no_qos;
+ m = ilog2(__roundup_pow_of_two(max_by_vl));
+
+ /* determine bits for vl */
+ n = ilog2(__roundup_pow_of_two(num_vls));
+
+ /* reject if too much is used */
if ((m + n) > 7)
+ goto no_qos;
+
+ if (mp)
+ *mp = m;
+ if (np)
+ *np = n;
+
+ return 1 << (m + n);
+
+no_qos:
+ if (mp)
+ *mp = 0;
+ if (np)
+ *np = 0;
+ return 0;
+}
+
+/**
+ * init_qos - init RX qos
+ * @dd - device data
+ * @rmt - RSM map table
+ *
+ * This routine initializes Rule 0 and the RSM map table to implement
+ * quality of service (qos).
+ *
+ * If all of the limit tests succeed, qos is applied based on the array
+ * interpretation of krcvqs where entry 0 is VL0.
+ *
+ * The number of vl bits (n) and the number of qpn bits (m) are computed to
+ * feed both the RSM map table and the single rule.
+ */
+static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
+{
+ struct rsm_rule_data rrd;
+ unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
+ unsigned int rmt_entries;
+ u64 reg;
+
+ if (!rmt)
goto bail;
- if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
+ rmt_entries = qos_rmt_entries(dd, &m, &n);
+ if (rmt_entries == 0)
goto bail;
- rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
- if (!rsmmap)
+ qpns_per_vl = 1 << m;
+
+ /* enough room in the map table? */
+ rmt_entries = 1 << (m + n);
+ if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
goto bail;
- memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
- /* init the local copy of the table */
- for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
+
+ /* add qos entries to the the RSM map table */
+ for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
unsigned tctxt;
for (qpn = 0, tctxt = ctxt;
krcvqs[i] && qpn < qpns_per_vl; qpn++) {
unsigned idx, regoff, regidx;
- /* generate index <= 128 */
- idx = (qpn << n) ^ i;
+ /* generate the index the hardware will produce */
+ idx = rmt->used + ((qpn << n) ^ i);
regoff = (idx % 8) * 8;
regidx = idx / 8;
- reg = rsmmap[regidx];
- /* replace 0xff with context number */
+ /* replace default with context number */
+ reg = rmt->map[regidx];
reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
<< regoff);
reg |= (u64)(tctxt++) << regoff;
- rsmmap[regidx] = reg;
+ rmt->map[regidx] = reg;
if (tctxt == ctxt + krcvqs[i])
tctxt = ctxt;
}
ctxt += krcvqs[i];
}
- /* flush cached copies to chip */
- for (i = 0; i < NUM_MAP_REGS; i++)
- write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
- /* add rule0 */
- write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
- RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK <<
- RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
- 2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
- write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
- LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
- LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
- LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
- ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
- QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
- ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
- write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
- LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
- LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
- LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
- LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
- /* Enable RSM */
- add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
- kfree(rsmmap);
- /* map everything else to first context */
- init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
+
+ rrd.offset = rmt->used;
+ rrd.pkt_type = 2;
+ rrd.field1_off = LRH_BTH_MATCH_OFFSET;
+ rrd.field2_off = LRH_SC_MATCH_OFFSET;
+ rrd.index1_off = LRH_SC_SELECT_OFFSET;
+ rrd.index1_width = n;
+ rrd.index2_off = QPN_SELECT_OFFSET;
+ rrd.index2_width = m + n;
+ rrd.mask1 = LRH_BTH_MASK;
+ rrd.value1 = LRH_BTH_VALUE;
+ rrd.mask2 = LRH_SC_MASK;
+ rrd.value2 = LRH_SC_VALUE;
+
+ /* add rule 0 */
+ add_rsm_rule(dd, 0, &rrd);
+
+ /* mark RSM map entries as used */
+ rmt->used += rmt_entries;
+ /* map everything else to the mcast/err/vl15 context */
+ init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
dd->qos_shift = n + 1;
return;
bail:
@@ -13574,13 +13795,86 @@ bail:
init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
}
+static void init_user_fecn_handling(struct hfi1_devdata *dd,
+ struct rsm_map_table *rmt)
+{
+ struct rsm_rule_data rrd;
+ u64 reg;
+ int i, idx, regoff, regidx;
+ u8 offset;
+
+ /* there needs to be enough room in the map table */
+ if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
+ dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
+ return;
+ }
+
+ /*
+ * RSM will extract the destination context as an index into the
+ * map table. The destination contexts are a sequential block
+ * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
+ * Map entries are accessed as offset + extracted value. Adjust
+ * the added offset so this sequence can be placed anywhere in
+ * the table - as long as the entries themselves do not wrap.
+ * There are only enough bits in offset for the table size, so
+ * start with that to allow for a "negative" offset.
+ */
+ offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
+ (int)dd->first_user_ctxt);
+
+ for (i = dd->first_user_ctxt, idx = rmt->used;
+ i < dd->num_rcv_contexts; i++, idx++) {
+ /* replace with identity mapping */
+ regoff = (idx % 8) * 8;
+ regidx = idx / 8;
+ reg = rmt->map[regidx];
+ reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
+ reg |= (u64)i << regoff;
+ rmt->map[regidx] = reg;
+ }
+
+ /*
+ * For RSM intercept of Expected FECN packets:
+ * o packet type 0 - expected
+ * o match on F (bit 95), using select/match 1, and
+ * o match on SH (bit 133), using select/match 2.
+ *
+ * Use index 1 to extract the 8-bit receive context from DestQP
+ * (start at bit 64). Use that as the RSM map table index.
+ */
+ rrd.offset = offset;
+ rrd.pkt_type = 0;
+ rrd.field1_off = 95;
+ rrd.field2_off = 133;
+ rrd.index1_off = 64;
+ rrd.index1_width = 8;
+ rrd.index2_off = 0;
+ rrd.index2_width = 0;
+ rrd.mask1 = 1;
+ rrd.value1 = 1;
+ rrd.mask2 = 1;
+ rrd.value2 = 1;
+
+ /* add rule 1 */
+ add_rsm_rule(dd, 1, &rrd);
+
+ rmt->used += dd->num_user_contexts;
+}
+
static void init_rxe(struct hfi1_devdata *dd)
{
+ struct rsm_map_table *rmt;
+
/* enable all receive errors */
write_csr(dd, RCV_ERR_MASK, ~0ull);
- /* setup QPN map table - start where VL15 context leaves off */
- init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ?
- MIN_KERNEL_KCTXTS : 0);
+
+ rmt = alloc_rsm_map_table(dd);
+ /* set up QOS, including the QPN map table */
+ init_qos(dd, rmt);
+ init_user_fecn_handling(dd, rmt);
+ complete_rsm_map_table(dd, rmt);
+ kfree(rmt);
+
/*
* make sure RcvCtrl.RcvWcb <= PCIe Device Control
* Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
@@ -13762,6 +14056,7 @@ int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
+ reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
done:
return ret;
@@ -13818,8 +14113,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
{
unsigned long flags;
struct hfi1_devdata *tmp, *peer = NULL;
+ struct hfi1_asic_data *asic_data;
int ret = 0;
+ /* pre-allocate the asic structure in case we are the first device */
+ asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+ if (!asic_data)
+ return -ENOMEM;
+
spin_lock_irqsave(&hfi1_devs_lock, flags);
/* Find our peer device */
list_for_each_entry(tmp, &hfi1_dev_list, list) {
@@ -13831,18 +14132,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
}
if (peer) {
+ /* use already allocated structure */
dd->asic_data = peer->asic_data;
+ kfree(asic_data);
} else {
- dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
- if (!dd->asic_data) {
- ret = -ENOMEM;
- goto done;
- }
+ dd->asic_data = asic_data;
mutex_init(&dd->asic_data->asic_resource_mutex);
}
dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-
-done:
spin_unlock_irqrestore(&hfi1_devs_lock, flags);
return ret;
}
@@ -14148,6 +14445,19 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
(dd->revision >> CCE_REVISION_SW_SHIFT)
& CCE_REVISION_SW_MASK);
+ /*
+ * The real cpu mask is part of the affinity struct but has to be
+ * initialized earlier than the rest of the affinity struct because it
+ * is needed to calculate the number of user contexts in
+ * set_up_context_variables(). However, hfi1_dev_affinity_init(),
+ * which initializes the rest of the affinity struct members,
+ * depends on set_up_context_variables() for the number of kernel
+ * contexts, so it cannot be called before set_up_context_variables().
+ */
+ ret = init_real_cpu_mask(dd);
+ if (ret)
+ goto bail_cleanup;
+
ret = set_up_context_variables(dd);
if (ret)
goto bail_cleanup;
@@ -14161,9 +14471,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
/* set up KDETH QP prefix in both RX and TX CSRs */
init_kdeth_qp(dd);
- ret = hfi1_dev_affinity_init(dd);
- if (ret)
- goto bail_cleanup;
+ hfi1_dev_affinity_init(dd);
/* send contexts must be set up before receive contexts */
ret = init_send_contexts(dd);
@@ -14303,7 +14611,7 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
(reason), (ret))
/*
- * Initialize the Avago Thermal sensor.
+ * Initialize the thermal sensor.
*
* After initialization, enable polling of thermal sensor through
* SBus interface. In order for this to work, the SBus Master
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 4f3b878e4..66a327978 100644
--- a/drivers/staging/rdma/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -389,6 +389,7 @@
#define LAST_REMOTE_STATE_COMPLETE 0x13
#define LINK_QUALITY_INFO 0x14
#define REMOTE_DEVICE_ID 0x15
+#define LINK_DOWN_REASON 0x16
/* 8051 lane specific register field IDs */
#define TX_EQ_SETTINGS 0x00
@@ -397,6 +398,12 @@
/* Lane ID for general configuration registers */
#define GENERAL_CONFIG 4
+/* LINK_TUNING_PARAMETERS fields */
+#define TUNING_METHOD_SHIFT 24
+
+/* LINK_OPTIMIZATION_SETTINGS fields */
+#define ENABLE_EXT_DEV_CONFIG_SHIFT 24
+
/* LOAD_DATA 8051 command shifts and fields */
#define LOAD_DATA_FIELD_ID_SHIFT 40
#define LOAD_DATA_FIELD_ID_MASK 0xfull
@@ -497,6 +504,11 @@
#define PWRM_BER_CONTROL 0x1
#define PWRM_BANDWIDTH_CONTROL 0x2
+/* 8051 link down reasons */
+#define LDR_LINK_TRANSFER_ACTIVE_LOW 0xa
+#define LDR_RECEIVED_LINKDOWN_IDLE_MSG 0xb
+#define LDR_RECEIVED_HOST_OFFLINE_REQ 0xc
+
/* verify capability fabric CRC size bits */
enum {
CAP_CRC_14B = (1 << 0), /* 14b CRC */
@@ -691,7 +703,6 @@ void handle_verify_cap(struct work_struct *work);
void handle_freeze(struct work_struct *work);
void handle_link_up(struct work_struct *work);
void handle_link_down(struct work_struct *work);
-void handle_8051_request(struct work_struct *work);
void handle_link_downgrade(struct work_struct *work);
void handle_link_bounce(struct work_struct *work);
void handle_sma_message(struct work_struct *work);
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 770f05c9b..8744de666 100644
--- a/drivers/staging/rdma/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -771,6 +771,7 @@
#define RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK 0x1ull
#define RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT 0
#define RCV_RSM_CFG_PACKET_TYPE_SHIFT 60
+#define RCV_RSM_CFG_OFFSET_SHIFT 32
#define RCV_RSM_MAP_TABLE (RXE + 0x000000000900)
#define RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK 0xFFull
#define RCV_RSM_MATCH (RXE + 0x000000000800)
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index e9b6bb322..fcc9c217a 100644
--- a/drivers/staging/rdma/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -178,7 +178,8 @@
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_NO_INTEGRITY)
-#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR)
+#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \
+ HFI1_USER_SWMINOR)
#ifndef HFI1_KERN_TYPE
#define HFI1_KERN_TYPE 0
@@ -349,6 +350,8 @@ struct hfi1_message_header {
#define HFI1_BECN_MASK 1
#define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT)
+#define HFI1_PSM_IOC_BASE_SEQ 0x0
+
static inline __u64 rhf_to_cpu(const __le32 *rbuf)
{
return __le64_to_cpu(*((__le64 *)rbuf));
diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index dbab9d9cc..dbab9d9cc 100644
--- a/drivers/staging/rdma/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index b6fb6814f..b6fb6814f 100644
--- a/drivers/staging/rdma/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index c05c39da8..bf64b5a7b 100644
--- a/drivers/staging/rdma/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -60,7 +60,8 @@ static dev_t hfi1_dev;
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev *cdev, struct device **devp,
- bool user_accessible)
+ bool user_accessible,
+ struct kobject *parent)
{
const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
struct device *device = NULL;
@@ -68,6 +69,7 @@ int hfi1_cdev_init(int minor, const char *name,
cdev_init(cdev, fops);
cdev->owner = THIS_MODULE;
+ cdev->kobj.parent = parent;
kobject_set_name(&cdev->kobj, name);
ret = cdev_add(cdev, dev, 1);
@@ -82,13 +84,13 @@ int hfi1_cdev_init(int minor, const char *name,
else
device = device_create(class, NULL, dev, NULL, "%s", name);
- if (!IS_ERR(device))
- goto done;
- ret = PTR_ERR(device);
- device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
- cdev_del(cdev);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ device = NULL;
+ pr_err("Could not create device for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ cdev_del(cdev);
+ }
done:
*devp = device;
return ret;
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h
index 5bb3e83cf..c3ec19cb0 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/infiniband/hw/hfi1/device.h
@@ -50,7 +50,8 @@
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev *cdev, struct device **devp,
- bool user_accessible);
+ bool user_accessible,
+ struct kobject *parent);
void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
const char *class_name(void);
int __init dev_init(void);
diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/infiniband/hw/hfi1/dma.c
index 7e8dab892..7e8dab892 100644
--- a/drivers/staging/rdma/hfi1/dma.c
+++ b/drivers/infiniband/hw/hfi1/dma.c
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 34511e5df..c75b0ae68 100644
--- a/drivers/staging/rdma/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -75,7 +75,8 @@ DEFINE_MUTEX(hfi1_mutex); /* general driver use */
unsigned int hfi1_max_mtu = HFI1_DEFAULT_MAX_MTU;
module_param_named(max_mtu, hfi1_max_mtu, uint, S_IRUGO);
-MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is 8192");
+MODULE_PARM_DESC(max_mtu, "Set max MTU bytes, default is " __stringify(
+ HFI1_DEFAULT_MAX_MTU));
unsigned int hfi1_cu = 1;
module_param_named(cu, hfi1_cu, uint, S_IRUGO);
@@ -1160,7 +1161,7 @@ int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
ppd->lmc = lmc;
hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
- dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid);
+ dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
return 0;
}
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
index 106349fc1..106349fc1 100644
--- a/drivers/staging/rdma/hfi1/efivar.c
+++ b/drivers/infiniband/hw/hfi1/efivar.c
diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h
index 94e9e70de..94e9e70de 100644
--- a/drivers/staging/rdma/hfi1/efivar.h
+++ b/drivers/infiniband/hw/hfi1/efivar.h
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
new file mode 100644
index 000000000..36b77943c
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/delay.h>
+#include "hfi.h"
+#include "common.h"
+#include "eprom.h"
+
+#define CMD_SHIFT 24
+#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
+
+/* controller interface speeds */
+#define EP_SPEED_FULL 0x2 /* full speed */
+
+/*
+ * How long to wait for the EPROM to become available, in ms.
+ * The spec 32 Mb EPROM takes around 40s to erase then write.
+ * Double it for safety.
+ */
+#define EPROM_TIMEOUT 80000 /* ms */
+/*
+ * Initialize the EPROM handler.
+ */
+int eprom_init(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+
+ /* only the discrete chip has an EPROM */
+ if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
+ return 0;
+
+ /*
+ * It is OK if both HFIs reset the EPROM as long as they don't
+ * do it at the same time.
+ */
+ ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: unable to acquire EPROM resource, no EPROM support\n",
+ __func__);
+ goto done_asic;
+ }
+
+ /* reset EPROM to be sure it is in a good state */
+
+ /* set reset */
+ write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
+ /* clear reset, set speed */
+ write_csr(dd, ASIC_EEP_CTL_STAT,
+ EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
+
+ /* wake the device with command "release powerdown NoID" */
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
+
+ dd->eprom_available = true;
+ release_chip_resource(dd, CR_EPROM);
+done_asic:
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h
index d41f0b1af..d41f0b1af 100644
--- a/drivers/staging/rdma/hfi1/eprom.h
+++ b/drivers/infiniband/hw/hfi1/eprom.h
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c1c5bf82a..c702a0096 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -72,8 +72,6 @@
*/
static int hfi1_file_open(struct inode *, struct file *);
static int hfi1_file_close(struct inode *, struct file *);
-static ssize_t hfi1_file_write(struct file *, const char __user *,
- size_t, loff_t *);
static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
@@ -86,8 +84,7 @@ static int get_ctxt_info(struct file *, void __user *, __u32);
static int get_base_info(struct file *, void __user *, __u32);
static int setup_ctxt(struct file *);
static int setup_subctxt(struct hfi1_ctxtdata *);
-static int get_user_context(struct file *, struct hfi1_user_info *,
- int, unsigned);
+static int get_user_context(struct file *, struct hfi1_user_info *, int);
static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
static int allocate_ctxt(struct file *, struct hfi1_devdata *,
struct hfi1_user_info *);
@@ -97,13 +94,15 @@ static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
static int vma_fault(struct vm_area_struct *, struct vm_fault *);
+static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg);
static const struct file_operations hfi1_file_ops = {
.owner = THIS_MODULE,
- .write = hfi1_file_write,
.write_iter = hfi1_write_iter,
.open = hfi1_file_open,
.release = hfi1_file_close,
+ .unlocked_ioctl = hfi1_file_ioctl,
.poll = hfi1_poll,
.mmap = hfi1_file_mmap,
.llseek = noop_llseek,
@@ -169,6 +168,13 @@ static inline int is_valid_mmap(u64 token)
static int hfi1_file_open(struct inode *inode, struct file *fp)
{
+ struct hfi1_devdata *dd = container_of(inode->i_cdev,
+ struct hfi1_devdata,
+ user_cdev);
+
+ /* Just take a ref now. Not all opens result in a context assign */
+ kobject_get(&dd->kobj);
+
/* The real work is performed later in assign_ctxt() */
fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
if (fp->private_data) /* no cpu affinity by default */
@@ -176,127 +182,62 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
return fp->private_data ? 0 : -ENOMEM;
}
-static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
- size_t count, loff_t *offset)
+static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
{
- const struct hfi1_cmd __user *ucmd;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_cmd cmd;
struct hfi1_user_info uinfo;
struct hfi1_tid_info tinfo;
+ int ret = 0;
unsigned long addr;
- ssize_t consumed = 0, copy = 0, ret = 0;
- void *dest = NULL;
- __u64 user_val = 0;
- int uctxt_required = 1;
- int must_be_root = 0;
-
- /* FIXME: This interface cannot continue out of staging */
- if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
- return -EACCES;
-
- if (count < sizeof(cmd)) {
- ret = -EINVAL;
- goto bail;
- }
-
- ucmd = (const struct hfi1_cmd __user *)data;
- if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
- ret = -EFAULT;
- goto bail;
- }
-
- consumed = sizeof(cmd);
-
- switch (cmd.type) {
- case HFI1_CMD_ASSIGN_CTXT:
- uctxt_required = 0; /* assigned user context not required */
- copy = sizeof(uinfo);
- dest = &uinfo;
- break;
- case HFI1_CMD_SDMA_STATUS_UPD:
- case HFI1_CMD_CREDIT_UPD:
- copy = 0;
- break;
- case HFI1_CMD_TID_UPDATE:
- case HFI1_CMD_TID_FREE:
- case HFI1_CMD_TID_INVAL_READ:
- copy = sizeof(tinfo);
- dest = &tinfo;
- break;
- case HFI1_CMD_USER_INFO:
- case HFI1_CMD_RECV_CTRL:
- case HFI1_CMD_POLL_TYPE:
- case HFI1_CMD_ACK_EVENT:
- case HFI1_CMD_CTXT_INFO:
- case HFI1_CMD_SET_PKEY:
- case HFI1_CMD_CTXT_RESET:
- copy = 0;
- user_val = cmd.addr;
- break;
- case HFI1_CMD_EP_INFO:
- case HFI1_CMD_EP_ERASE_CHIP:
- case HFI1_CMD_EP_ERASE_RANGE:
- case HFI1_CMD_EP_READ_RANGE:
- case HFI1_CMD_EP_WRITE_RANGE:
- uctxt_required = 0; /* assigned user context not required */
- must_be_root = 1; /* validate user */
- copy = 0;
- break;
- default:
- ret = -EINVAL;
- goto bail;
- }
+ int uval = 0;
+ unsigned long ul_uval = 0;
+ u16 uval16 = 0;
+
+ hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
+ if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
+ cmd != HFI1_IOCTL_GET_VERS &&
+ !uctxt)
+ return -EINVAL;
- /* If the command comes with user data, copy it. */
- if (copy) {
- if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
- ret = -EFAULT;
- goto bail;
- }
- consumed += copy;
- }
+ switch (cmd) {
+ case HFI1_IOCTL_ASSIGN_CTXT:
+ if (uctxt)
+ return -EINVAL;
- /*
- * Make sure there is a uctxt when needed.
- */
- if (uctxt_required && !uctxt) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* only root can do these operations */
- if (must_be_root && !capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- goto bail;
- }
+ if (copy_from_user(&uinfo,
+ (struct hfi1_user_info __user *)arg,
+ sizeof(uinfo)))
+ return -EFAULT;
- switch (cmd.type) {
- case HFI1_CMD_ASSIGN_CTXT:
ret = assign_ctxt(fp, &uinfo);
if (ret < 0)
- goto bail;
- ret = setup_ctxt(fp);
+ return ret;
+ setup_ctxt(fp);
if (ret)
- goto bail;
+ return ret;
ret = user_init(fp);
break;
- case HFI1_CMD_CTXT_INFO:
- ret = get_ctxt_info(fp, (void __user *)(unsigned long)
- user_val, cmd.len);
+ case HFI1_IOCTL_CTXT_INFO:
+ ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg,
+ sizeof(struct hfi1_ctxt_info));
break;
- case HFI1_CMD_USER_INFO:
- ret = get_base_info(fp, (void __user *)(unsigned long)
- user_val, cmd.len);
+ case HFI1_IOCTL_USER_INFO:
+ ret = get_base_info(fp, (void __user *)(unsigned long)arg,
+ sizeof(struct hfi1_base_info));
break;
- case HFI1_CMD_SDMA_STATUS_UPD:
- break;
- case HFI1_CMD_CREDIT_UPD:
+ case HFI1_IOCTL_CREDIT_UPD:
if (uctxt && uctxt->sc)
sc_return_credits(uctxt->sc);
break;
- case HFI1_CMD_TID_UPDATE:
+
+ case HFI1_IOCTL_TID_UPDATE:
+ if (copy_from_user(&tinfo,
+ (struct hfi11_tid_info __user *)arg,
+ sizeof(tinfo)))
+ return -EFAULT;
+
ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
if (!ret) {
/*
@@ -305,57 +246,82 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
* These fields are adjacent in the structure so
* we can copy them at the same time.
*/
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt) +
sizeof(tinfo.length)))
ret = -EFAULT;
}
break;
- case HFI1_CMD_TID_INVAL_READ:
- ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
+
+ case HFI1_IOCTL_TID_FREE:
+ if (copy_from_user(&tinfo,
+ (struct hfi11_tid_info __user *)arg,
+ sizeof(tinfo)))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
if (ret)
break;
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
ret = -EFAULT;
break;
- case HFI1_CMD_TID_FREE:
- ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
+
+ case HFI1_IOCTL_TID_INVAL_READ:
+ if (copy_from_user(&tinfo,
+ (struct hfi11_tid_info __user *)arg,
+ sizeof(tinfo)))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
if (ret)
break;
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
ret = -EFAULT;
break;
- case HFI1_CMD_RECV_CTRL:
- ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
+
+ case HFI1_IOCTL_RECV_CTRL:
+ ret = get_user(uval, (int __user *)arg);
+ if (ret != 0)
+ return -EFAULT;
+ ret = manage_rcvq(uctxt, fd->subctxt, uval);
break;
- case HFI1_CMD_POLL_TYPE:
- uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
+
+ case HFI1_IOCTL_POLL_TYPE:
+ ret = get_user(uval, (int __user *)arg);
+ if (ret != 0)
+ return -EFAULT;
+ uctxt->poll_type = (typeof(uctxt->poll_type))uval;
break;
- case HFI1_CMD_ACK_EVENT:
- ret = user_event_ack(uctxt, fd->subctxt, user_val);
+
+ case HFI1_IOCTL_ACK_EVENT:
+ ret = get_user(ul_uval, (unsigned long __user *)arg);
+ if (ret != 0)
+ return -EFAULT;
+ ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
break;
- case HFI1_CMD_SET_PKEY:
+
+ case HFI1_IOCTL_SET_PKEY:
+ ret = get_user(uval16, (u16 __user *)arg);
+ if (ret != 0)
+ return -EFAULT;
if (HFI1_CAP_IS_USET(PKEY_CHECK))
- ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
+ ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
else
- ret = -EPERM;
+ return -EPERM;
break;
- case HFI1_CMD_CTXT_RESET: {
+
+ case HFI1_IOCTL_CTXT_RESET: {
struct send_context *sc;
struct hfi1_devdata *dd;
- if (!uctxt || !uctxt->dd || !uctxt->sc) {
- ret = -EINVAL;
- break;
- }
+ if (!uctxt || !uctxt->dd || !uctxt->sc)
+ return -EINVAL;
+
/*
* There is no protection here. User level has to
* guarantee that no one will be writing to the send
@@ -373,10 +339,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
wait_event_interruptible_timeout(
sc->halt_wait, (sc->flags & SCF_HALTED),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (!(sc->flags & SCF_HALTED)) {
- ret = -ENOLCK;
- break;
- }
+ if (!(sc->flags & SCF_HALTED))
+ return -ENOLCK;
+
/*
* If the send context was halted due to a Freeze,
* wait until the device has been "unfrozen" before
@@ -387,18 +352,16 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
dd->event_queue,
!(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (dd->flags & HFI1_FROZEN) {
- ret = -ENOLCK;
- break;
- }
- if (dd->flags & HFI1_FORCED_FREEZE) {
+ if (dd->flags & HFI1_FROZEN)
+ return -ENOLCK;
+
+ if (dd->flags & HFI1_FORCED_FREEZE)
/*
* Don't allow context reset if we are into
* forced freeze
*/
- ret = -ENODEV;
- break;
- }
+ return -ENODEV;
+
sc_disable(sc);
ret = sc_enable(sc);
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
@@ -410,18 +373,17 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
sc_return_credits(sc);
break;
}
- case HFI1_CMD_EP_INFO:
- case HFI1_CMD_EP_ERASE_CHIP:
- case HFI1_CMD_EP_ERASE_RANGE:
- case HFI1_CMD_EP_READ_RANGE:
- case HFI1_CMD_EP_WRITE_RANGE:
- ret = handle_eprom_command(fp, &cmd);
+
+ case HFI1_IOCTL_GET_VERS:
+ uval = HFI1_USER_SWVERSION;
+ if (put_user(uval, (int __user *)arg))
+ return -EFAULT;
break;
+
+ default:
+ return -EINVAL;
}
- if (ret >= 0)
- ret = consumed;
-bail:
return ret;
}
@@ -738,7 +700,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
{
struct hfi1_filedata *fdata = fp->private_data;
struct hfi1_ctxtdata *uctxt = fdata->uctxt;
- struct hfi1_devdata *dd;
+ struct hfi1_devdata *dd = container_of(inode->i_cdev,
+ struct hfi1_devdata,
+ user_cdev);
unsigned long flags, *ev;
fp->private_data = NULL;
@@ -747,7 +711,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
goto done;
hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
- dd = uctxt->dd;
mutex_lock(&hfi1_mutex);
flush_wc();
@@ -813,6 +776,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
mutex_unlock(&hfi1_mutex);
hfi1_free_ctxtdata(dd, uctxt);
done:
+ kobject_put(&dd->kobj);
kfree(fdata);
return 0;
}
@@ -836,7 +800,7 @@ static u64 kvirt_to_phys(void *addr)
static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
{
int i_minor, ret = 0;
- unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
+ unsigned int swmajor, swminor;
swmajor = uinfo->userversion >> 16;
if (swmajor != HFI1_USER_SWMAJOR) {
@@ -846,9 +810,6 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
swminor = uinfo->userversion & 0xffff;
- if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
- alg = uinfo->hfi1_alg;
-
mutex_lock(&hfi1_mutex);
/* First, lets check if we need to setup a shared context? */
if (uinfo->subctxt_cnt) {
@@ -868,7 +829,7 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
*/
if (!ret) {
i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
- ret = get_user_context(fp, uinfo, i_minor - 1, alg);
+ ret = get_user_context(fp, uinfo, i_minor);
}
done_unlock:
mutex_unlock(&hfi1_mutex);
@@ -876,71 +837,26 @@ done:
return ret;
}
-/* return true if the device available for general use */
-static int usable_device(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd = dd->pport;
-
- return driver_lstate(ppd) == IB_PORT_ACTIVE;
-}
-
static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
- int devno, unsigned alg)
+ int devno)
{
struct hfi1_devdata *dd = NULL;
- int ret = 0, devmax, npresent, nup, dev;
+ int devmax, npresent, nup;
devmax = hfi1_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (!nup) {
- ret = -ENETDOWN;
- goto done;
- }
- if (devno >= 0) {
- dd = hfi1_lookup(devno);
- if (!dd)
- ret = -ENODEV;
- else if (!dd->freectxts)
- ret = -EBUSY;
- } else {
- struct hfi1_devdata *pdd;
-
- if (alg == HFI1_ALG_ACROSS) {
- unsigned free = 0U;
-
- for (dev = 0; dev < devmax; dev++) {
- pdd = hfi1_lookup(dev);
- if (!pdd)
- continue;
- if (!usable_device(pdd))
- continue;
- if (pdd->freectxts &&
- pdd->freectxts > free) {
- dd = pdd;
- free = pdd->freectxts;
- }
- }
- } else {
- for (dev = 0; dev < devmax; dev++) {
- pdd = hfi1_lookup(dev);
- if (!pdd)
- continue;
- if (!usable_device(pdd))
- continue;
- if (pdd->freectxts) {
- dd = pdd;
- break;
- }
- }
- }
- if (!dd)
- ret = -EBUSY;
- }
-done:
- return ret ? ret : allocate_ctxt(fp, dd, uinfo);
+ if (!npresent)
+ return -ENXIO;
+
+ if (!nup)
+ return -ENETDOWN;
+
+ dd = hfi1_lookup(devno);
+ if (!dd)
+ return -ENODEV;
+ else if (!dd->freectxts)
+ return -EBUSY;
+
+ return allocate_ctxt(fp, dd, uinfo);
}
static int find_shared_ctxt(struct file *fp,
@@ -1546,170 +1462,10 @@ done:
return ret;
}
-static int ui_open(struct inode *inode, struct file *filp)
-{
- struct hfi1_devdata *dd;
-
- dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
- filp->private_data = dd; /* for other methods */
- return 0;
-}
-
-static int ui_release(struct inode *inode, struct file *filp)
-{
- /* nothing to do */
- return 0;
-}
-
-static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
-{
- struct hfi1_devdata *dd = filp->private_data;
-
- return fixed_size_llseek(filp, offset, whence,
- (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
-}
-
-/* NOTE: assumes unsigned long is 8 bytes */
-static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
- loff_t *f_pos)
-{
- struct hfi1_devdata *dd = filp->private_data;
- void __iomem *base = dd->kregbase;
- unsigned long total, csr_off,
- barlen = (dd->kregend - dd->kregbase);
- u64 data;
-
- /* only read 8 byte quantities */
- if ((count % 8) != 0)
- return -EINVAL;
- /* offset must be 8-byte aligned */
- if ((*f_pos % 8) != 0)
- return -EINVAL;
- /* destination buffer must be 8-byte aligned */
- if ((unsigned long)buf % 8 != 0)
- return -EINVAL;
- /* must be in range */
- if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
- return -EINVAL;
- /* only set the base if we are not starting past the BAR */
- if (*f_pos < barlen)
- base += *f_pos;
- csr_off = *f_pos;
- for (total = 0; total < count; total += 8, csr_off += 8) {
- /* accessing LCB CSRs requires more checks */
- if (is_lcb_offset(csr_off)) {
- if (read_lcb_csr(dd, csr_off, (u64 *)&data))
- break; /* failed */
- }
- /*
- * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
- * false parity error. Avoid the whole issue by not reading
- * them. These registers are defined as having a read value
- * of 0.
- */
- else if (csr_off == ASIC_GPIO_CLEAR ||
- csr_off == ASIC_GPIO_FORCE ||
- csr_off == ASIC_QSFP1_CLEAR ||
- csr_off == ASIC_QSFP1_FORCE ||
- csr_off == ASIC_QSFP2_CLEAR ||
- csr_off == ASIC_QSFP2_FORCE)
- data = 0;
- else if (csr_off >= barlen) {
- /*
- * read_8051_data can read more than just 8 bytes at
- * a time. However, folding this into the loop and
- * handling the reads in 8 byte increments allows us
- * to smoothly transition from chip memory to 8051
- * memory.
- */
- if (read_8051_data(dd,
- (u32)(csr_off - barlen),
- sizeof(data), &data))
- break; /* failed */
- } else
- data = readq(base + total);
- if (put_user(data, (unsigned long __user *)(buf + total)))
- break;
- }
- *f_pos += total;
- return total;
-}
-
-/* NOTE: assumes unsigned long is 8 bytes */
-static ssize_t ui_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *f_pos)
-{
- struct hfi1_devdata *dd = filp->private_data;
- void __iomem *base;
- unsigned long total, data, csr_off;
- int in_lcb;
-
- /* only write 8 byte quantities */
- if ((count % 8) != 0)
- return -EINVAL;
- /* offset must be 8-byte aligned */
- if ((*f_pos % 8) != 0)
- return -EINVAL;
- /* source buffer must be 8-byte aligned */
- if ((unsigned long)buf % 8 != 0)
- return -EINVAL;
- /* must be in range */
- if (*f_pos + count > dd->kregend - dd->kregbase)
- return -EINVAL;
-
- base = (void __iomem *)dd->kregbase + *f_pos;
- csr_off = *f_pos;
- in_lcb = 0;
- for (total = 0; total < count; total += 8, csr_off += 8) {
- if (get_user(data, (unsigned long __user *)(buf + total)))
- break;
- /* accessing LCB CSRs requires a special procedure */
- if (is_lcb_offset(csr_off)) {
- if (!in_lcb) {
- int ret = acquire_lcb_access(dd, 1);
-
- if (ret)
- break;
- in_lcb = 1;
- }
- } else {
- if (in_lcb) {
- release_lcb_access(dd, 1);
- in_lcb = 0;
- }
- }
- writeq(data, base + total);
- }
- if (in_lcb)
- release_lcb_access(dd, 1);
- *f_pos += total;
- return total;
-}
-
-static const struct file_operations ui_file_ops = {
- .owner = THIS_MODULE,
- .llseek = ui_lseek,
- .read = ui_read,
- .write = ui_write,
- .open = ui_open,
- .release = ui_release,
-};
-
-#define UI_OFFSET 192 /* device minor offset for UI devices */
-static int create_ui = 1;
-
-static struct cdev wildcard_cdev;
-static struct device *wildcard_device;
-
-static atomic_t user_count = ATOMIC_INIT(0);
-
static void user_remove(struct hfi1_devdata *dd)
{
- if (atomic_dec_return(&user_count) == 0)
- hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
- hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
}
static int user_add(struct hfi1_devdata *dd)
@@ -1717,34 +1473,13 @@ static int user_add(struct hfi1_devdata *dd)
char name[10];
int ret;
- if (atomic_inc_return(&user_count) == 1) {
- ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
- &wildcard_cdev, &wildcard_device,
- true);
- if (ret)
- goto done;
- }
-
snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
- ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
+ ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
&dd->user_cdev, &dd->user_device,
- true);
+ true, &dd->kobj);
if (ret)
- goto done;
+ user_remove(dd);
- if (create_ui) {
- snprintf(name, sizeof(name),
- "%s_ui%d", class_name(), dd->unit);
- ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
- &dd->ui_cdev, &dd->ui_device,
- false);
- if (ret)
- goto done;
- }
-
- return 0;
-done:
- user_remove(dd);
return ret;
}
@@ -1753,13 +1488,7 @@ done:
*/
int hfi1_device_create(struct hfi1_devdata *dd)
{
- int r, ret;
-
- r = user_add(dd);
- ret = hfi1_diag_add(dd);
- if (r && !ret)
- ret = r;
- return ret;
+ return user_add(dd);
}
/*
@@ -1769,5 +1498,4 @@ int hfi1_device_create(struct hfi1_devdata *dd)
void hfi1_device_remove(struct hfi1_devdata *dd)
{
user_remove(dd);
- hfi1_diag_remove(dd);
}
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index f51570e8f..cbd965cfa 100644
--- a/drivers/staging/rdma/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1413,8 +1413,15 @@ static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
if (resource & CR_DYN_MASK) {
/* a dynamic resource is in use if either HFI has set the bit */
- all_bits = resource_mask(0, resource) |
+ if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 &&
+ (resource & (CR_I2C1 | CR_I2C2))) {
+ /* discrete devices must serialize across both chains */
+ all_bits = resource_mask(0, CR_I2C1 | CR_I2C2) |
+ resource_mask(1, CR_I2C1 | CR_I2C2);
+ } else {
+ all_bits = resource_mask(0, resource) |
resource_mask(1, resource);
+ }
my_bit = resource_mask(dd->hfi1_id, resource);
} else {
/* non-dynamic resources are not split between HFIs */
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 16cbdc407..4417a0fd3 100644
--- a/drivers/staging/rdma/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -453,11 +453,12 @@ struct rvt_sge_state;
#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
+#define HLS_DOWN ~(HLS_UP)
/* use this MTU size if none other is given */
-#define HFI1_DEFAULT_ACTIVE_MTU 8192
+#define HFI1_DEFAULT_ACTIVE_MTU 10240
/* use this MTU size as the default maximum */
-#define HFI1_DEFAULT_MAX_MTU 8192
+#define HFI1_DEFAULT_MAX_MTU 10240
/* default partition key */
#define DEFAULT_PKEY 0xffff
@@ -606,7 +607,6 @@ struct hfi1_pportdata {
struct work_struct link_vc_work;
struct work_struct link_up_work;
struct work_struct link_down_work;
- struct work_struct dc_host_req_work;
struct work_struct sma_message_work;
struct work_struct freeze_work;
struct work_struct link_downgrade_work;
@@ -1169,6 +1169,7 @@ struct hfi1_devdata {
atomic_t aspm_disabled_cnt;
struct hfi1_affinity *affinity;
+ struct kobject kobj;
};
/* 8051 firmware version helper */
@@ -1258,7 +1259,7 @@ void receive_interrupt_work(struct work_struct *work);
static inline int hdr2sc(struct hfi1_message_header *hdr, u64 rhf)
{
return ((be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf) |
- ((!!(rhf & RHF_DC_INFO_MASK)) << 4);
+ ((!!(rhf & RHF_DC_INFO_SMASK)) << 4);
}
static inline u16 generate_jkey(kuid_t uid)
@@ -1333,6 +1334,9 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh);
+#define PKEY_CHECK_INVALID -1
+int egress_pkey_check(struct hfi1_pportdata *ppd, __be16 *lrh, __be32 *bth,
+ u8 sc5, int8_t s_pkey_index);
#define PACKET_EGRESS_TIMEOUT 350
static inline void pause_for_credit_return(struct hfi1_devdata *dd)
@@ -1776,6 +1780,7 @@ extern struct mutex hfi1_mutex;
#define HFI1_PKT_USER_SC_INTEGRITY \
(SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK \
+ | SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK \
| SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK \
| SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
@@ -1879,9 +1884,8 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
get_unit_name((dd)->unit), ##__VA_ARGS__)
#define hfi1_dev_porterr(dd, port, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- get_unit_name((dd)->unit), (dd)->unit, (port), \
- ##__VA_ARGS__)
+ dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
+ get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index cfcdc16b4..eed971ccd 100644
--- a/drivers/staging/rdma/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -422,9 +422,10 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
struct cca_timer *cca_timer;
struct hfi1_pportdata *ppd;
int sl;
- u16 ccti, ccti_timer, ccti_min;
+ u16 ccti_timer, ccti_min;
struct cc_state *cc_state;
unsigned long flags;
+ enum hrtimer_restart ret = HRTIMER_NORESTART;
cca_timer = container_of(t, struct cca_timer, hrtimer);
ppd = cca_timer->ppd;
@@ -450,24 +451,21 @@ static enum hrtimer_restart cca_timer_fn(struct hrtimer *t)
spin_lock_irqsave(&ppd->cca_timer_lock, flags);
- ccti = cca_timer->ccti;
-
- if (ccti > ccti_min) {
+ if (cca_timer->ccti > ccti_min) {
cca_timer->ccti--;
set_link_ipg(ppd);
}
- spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
-
- rcu_read_unlock();
-
- if (ccti > ccti_min) {
+ if (cca_timer->ccti > ccti_min) {
unsigned long nsec = 1024 * ccti_timer;
/* ccti_timer is in units of 1.024 usec */
hrtimer_forward_now(t, ns_to_ktime(nsec));
- return HRTIMER_RESTART;
+ ret = HRTIMER_RESTART;
}
- return HRTIMER_NORESTART;
+
+ spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
+ rcu_read_unlock();
+ return ret;
}
/*
@@ -496,7 +494,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
INIT_WORK(&ppd->link_vc_work, handle_verify_cap);
INIT_WORK(&ppd->link_up_work, handle_link_up);
INIT_WORK(&ppd->link_down_work, handle_link_down);
- INIT_WORK(&ppd->dc_host_req_work, handle_8051_request);
INIT_WORK(&ppd->freeze_work, handle_freeze);
INIT_WORK(&ppd->link_downgrade_work, handle_link_downgrade);
INIT_WORK(&ppd->sma_message_work, handle_sma_message);
@@ -735,12 +732,12 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
lastfail = hfi1_create_rcvhdrq(dd, rcd);
if (!lastfail)
lastfail = hfi1_setup_eagerbufs(rcd);
- if (lastfail)
+ if (lastfail) {
dd_dev_err(dd,
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
+ ret = lastfail;
+ }
}
- if (lastfail)
- ret = lastfail;
/* Allocate enough memory for user event notification. */
len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
@@ -992,8 +989,10 @@ static void release_asic_data(struct hfi1_devdata *dd)
dd->asic_data = NULL;
}
-void hfi1_free_devdata(struct hfi1_devdata *dd)
+static void __hfi1_free_devdata(struct kobject *kobj)
{
+ struct hfi1_devdata *dd =
+ container_of(kobj, struct hfi1_devdata, kobj);
unsigned long flags;
spin_lock_irqsave(&hfi1_devs_lock, flags);
@@ -1007,7 +1006,16 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
free_percpu(dd->rcv_limit);
hfi1_dev_affinity_free(dd);
free_percpu(dd->send_schedule);
- ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
+ rvt_dealloc_device(&dd->verbs_dev.rdi);
+}
+
+static struct kobj_type hfi1_devdata_type = {
+ .release = __hfi1_free_devdata,
+};
+
+void hfi1_free_devdata(struct hfi1_devdata *dd)
+{
+ kobject_put(&dd->kobj);
}
/*
@@ -1105,12 +1113,13 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
&pdev->dev,
"Could not alloc cpulist info, cpu affinity might be wrong\n");
}
+ kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
bail:
if (!list_empty(&dd->list))
list_del_init(&dd->list);
- ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
+ rvt_dealloc_device(&dd->verbs_dev.rdi);
return ERR_PTR(ret);
}
@@ -1303,7 +1312,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
spin_lock(&ppd->cc_state_lock);
cc_state = get_cc_state(ppd);
- rcu_assign_pointer(ppd->cc_state, NULL);
+ RCU_INIT_POINTER(ppd->cc_state, NULL);
spin_unlock(&ppd->cc_state_lock);
if (cc_state)
@@ -1328,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
(void *)dd->rcvhdrtail_dummy_kvaddr,
dd->rcvhdrtail_dummy_physaddr);
- dd->rcvhdrtail_dummy_kvaddr = NULL;
+ dd->rcvhdrtail_dummy_kvaddr = NULL;
}
for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) {
@@ -1374,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret = 0, j, pidx, initfail;
- struct hfi1_devdata *dd = NULL;
+ struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
struct hfi1_pportdata *ppd;
/* First, lock the non-writable module parameters */
diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 65348d16a..65348d16a 100644
--- a/drivers/staging/rdma/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 2ec6ef38d..2ec6ef38d 100644
--- a/drivers/staging/rdma/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index d1e7f4d7c..fca07a1d6 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
memset(data, 0, size);
}
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
+{
+ struct ib_event event;
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev.rdi.ibdev;
+ event.element.port_num = port;
+ ib_dispatch_event(&event);
+}
+
static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
{
struct ib_mad_send_buf *send_buf;
@@ -999,7 +1009,21 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
break;
}
- set_link_state(ppd, link_state);
+ if ((link_state == HLS_DN_POLL ||
+ link_state == HLS_DN_DOWNDEF)) {
+ /*
+ * Going to poll. No matter what the current state,
+ * always move offline first, then tune and start the
+ * link. This correctly handles a FM link bounce and
+ * a link enable. Going offline is a no-op if already
+ * offline.
+ */
+ set_link_state(ppd, HLS_DN_OFFLINE);
+ tune_serdes(ppd);
+ start_link(ppd);
+ } else {
+ set_link_state(ppd, link_state);
+ }
if (link_state == HLS_DN_DISABLE &&
(ppd->offline_disabled_reason >
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
@@ -1389,6 +1413,12 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
if (key == okey)
continue;
/*
+ * Don't update pkeys[2], if an HFI port without MgmtAllowed
+ * by neighbor is a switch.
+ */
+ if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1)
+ continue;
+ /*
* The SM gives us the complete PKey table. We have
* to ensure that we put the PKeys in the matching
* slots.
@@ -1398,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
}
if (changed) {
- struct ib_event event;
-
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
-
- event.event = IB_EVENT_PKEY_CHANGE;
- event.device = &dd->verbs_dev.rdi.ibdev;
- event.element.port_num = port;
- ib_dispatch_event(&event);
+ hfi1_event_pkey_change(dd, port);
}
+
return 0;
}
@@ -3349,6 +3374,50 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
return reply((struct ib_mad_hdr *)smp);
}
+/*
+ * Apply congestion control information stored in the ppd to the
+ * active structure.
+ */
+static void apply_cc_state(struct hfi1_pportdata *ppd)
+{
+ struct cc_state *old_cc_state, *new_cc_state;
+
+ new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
+ if (!new_cc_state)
+ return;
+
+ /*
+ * Hold the lock for updating *and* to prevent ppd information
+ * from changing during the update.
+ */
+ spin_lock(&ppd->cc_state_lock);
+
+ old_cc_state = get_cc_state(ppd);
+ if (!old_cc_state) {
+ /* never active, or shutting down */
+ spin_unlock(&ppd->cc_state_lock);
+ kfree(new_cc_state);
+ return;
+ }
+
+ *new_cc_state = *old_cc_state;
+
+ new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
+ memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
+ ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
+
+ new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
+ new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
+ memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
+ OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
+
+ rcu_assign_pointer(ppd->cc_state, new_cc_state);
+
+ spin_unlock(&ppd->cc_state_lock);
+
+ call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+}
+
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
@@ -3360,6 +3429,11 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
struct opa_congestion_setting_entry_shadow *entries;
int i;
+ /*
+ * Save details from packet into the ppd. Hold the cc_state_lock so
+ * our information is consistent with anyone trying to apply the state.
+ */
+ spin_lock(&ppd->cc_state_lock);
ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
entries = ppd->congestion_entries;
@@ -3370,6 +3444,10 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
p->entries[i].trigger_threshold;
entries[i].ccti_min = p->entries[i].ccti_min;
}
+ spin_unlock(&ppd->cc_state_lock);
+
+ /* now apply the information */
+ apply_cc_state(ppd);
return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
resp_len);
@@ -3512,7 +3590,6 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
int i, j;
u32 sentry, eentry;
u16 ccti_limit;
- struct cc_state *old_cc_state, *new_cc_state;
/* sanity check n_blocks, start_block */
if (n_blocks == 0 ||
@@ -3532,45 +3609,20 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
- new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
- if (!new_cc_state)
- goto getit;
-
+ /*
+ * Save details from packet into the ppd. Hold the cc_state_lock so
+ * our information is consistent with anyone trying to apply the state.
+ */
spin_lock(&ppd->cc_state_lock);
-
- old_cc_state = get_cc_state(ppd);
-
- if (!old_cc_state) {
- spin_unlock(&ppd->cc_state_lock);
- kfree(new_cc_state);
- return reply((struct ib_mad_hdr *)smp);
- }
-
- *new_cc_state = *old_cc_state;
-
- new_cc_state->cct.ccti_limit = ccti_limit;
-
- entries = ppd->ccti_entries;
ppd->total_cct_entry = ccti_limit + 1;
-
+ entries = ppd->ccti_entries;
for (j = 0, i = sentry; i < eentry; j++, i++)
entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
-
- memcpy(new_cc_state->cct.entries, entries,
- eentry * sizeof(struct ib_cc_table_entry));
-
- new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
- new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
- memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
- OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
-
- rcu_assign_pointer(ppd->cc_state, new_cc_state);
-
spin_unlock(&ppd->cc_state_lock);
- call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+ /* now apply the information */
+ apply_cc_state(ppd);
-getit:
return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
}
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 55ee08675..8b734aaae 100644
--- a/drivers/staging/rdma/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
@@ -434,4 +434,6 @@ struct sc2vlnt {
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+
#endif /* _HFI1_MAD_H */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index b3f0682a3..b7a80aa1a 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -45,6 +45,7 @@
*
*/
#include <linux/list.h>
+#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
#include <linux/interval_tree_generic.h>
@@ -91,13 +92,12 @@ static unsigned long mmu_node_start(struct mmu_rb_node *node)
static unsigned long mmu_node_last(struct mmu_rb_node *node)
{
- return PAGE_ALIGN((node->addr & PAGE_MASK) + node->len) - 1;
+ return PAGE_ALIGN(node->addr + node->len) - 1;
}
int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
{
struct mmu_rb_handler *handlr;
- unsigned long flags;
if (!ops->invalidate)
return -EINVAL;
@@ -111,9 +111,9 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
INIT_HLIST_NODE(&handlr->mn.hlist);
spin_lock_init(&handlr->lock);
handlr->mn.ops = &mn_opts;
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_add_tail(&handlr->list, &mmu_rb_handlers);
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
+ spin_lock(&mmu_rb_lock);
+ list_add_tail_rcu(&handlr->list, &mmu_rb_handlers);
+ spin_unlock(&mmu_rb_lock);
return mmu_notifier_register(&handlr->mn, current->mm);
}
@@ -126,10 +126,16 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
if (!handler)
return;
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_del(&handler->list);
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
+ /* Unregister first so we don't get any more notifications. */
+ if (current->mm)
+ mmu_notifier_unregister(&handler->mn, current->mm);
+
+ spin_lock(&mmu_rb_lock);
+ list_del_rcu(&handler->list);
+ spin_unlock(&mmu_rb_lock);
+ synchronize_rcu();
+ spin_lock_irqsave(&handler->lock, flags);
if (!RB_EMPTY_ROOT(root)) {
struct rb_node *node;
struct mmu_rb_node *rbnode;
@@ -141,9 +147,8 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
handler->ops->remove(root, rbnode, NULL);
}
}
+ spin_unlock_irqrestore(&handler->lock, flags);
- if (current->mm)
- mmu_notifier_unregister(&handler->mn, current->mm);
kfree(handler);
}
@@ -235,6 +240,25 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
return node;
}
+struct mmu_rb_node *hfi1_mmu_rb_extract(struct rb_root *root,
+ unsigned long addr, unsigned long len)
+{
+ struct mmu_rb_handler *handler = find_mmu_handler(root);
+ struct mmu_rb_node *node;
+ unsigned long flags;
+
+ if (!handler)
+ return ERR_PTR(-EINVAL);
+
+ spin_lock_irqsave(&handler->lock, flags);
+ node = __mmu_rb_search(handler, addr, len);
+ if (node)
+ __mmu_int_rb_remove(node, handler->root);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ return node;
+}
+
void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
{
struct mmu_rb_handler *handler = find_mmu_handler(root);
@@ -248,16 +272,15 @@ void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
{
struct mmu_rb_handler *handler;
- unsigned long flags;
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_for_each_entry(handler, &mmu_rb_handlers, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) {
if (handler->root == root)
goto unlock;
}
handler = NULL;
unlock:
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
+ rcu_read_unlock();
return handler;
}
@@ -293,9 +316,9 @@ static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
node->addr, node->len);
if (handler->ops->invalidate(root, node)) {
- spin_unlock_irqrestore(&handler->lock, flags);
- __mmu_rb_remove(handler, node, mm);
- spin_lock_irqsave(&handler->lock, flags);
+ __mmu_int_rb_remove(node, root);
+ if (handler->ops->remove)
+ handler->ops->remove(root, node, mm);
}
}
spin_unlock_irqrestore(&handler->lock, flags);
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index 19a306e83..7a57b9c49 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
@@ -70,5 +70,7 @@ int hfi1_mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
void hfi1_mmu_rb_remove(struct rb_root *, struct mmu_rb_node *);
struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *, unsigned long,
unsigned long);
+struct mmu_rb_node *hfi1_mmu_rb_extract(struct rb_root *, unsigned long,
+ unsigned long);
#endif /* _HFI1_MMU_RB_H */
diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h
index 6ef3c1cbd..6ef3c1cbd 100644
--- a/drivers/staging/rdma/hfi1/opa_compat.h
+++ b/drivers/infiniband/hw/hfi1/opa_compat.h
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 0bac21e6a..0bac21e6a 100644
--- a/drivers/staging/rdma/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index c6849ce9e..d4022450b 100644
--- a/drivers/staging/rdma/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -139,23 +139,30 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
/* Send Context Size (SCS) wildcards */
#define SCS_POOL_0 -1
#define SCS_POOL_1 -2
+
/* Send Context Count (SCC) wildcards */
#define SCC_PER_VL -1
#define SCC_PER_CPU -2
-
#define SCC_PER_KRCVQ -3
-#define SCC_ACK_CREDITS 32
+
+/* Send Context Size (SCS) constants */
+#define SCS_ACK_CREDITS 32
+#define SCS_VL15_CREDITS 102 /* 3 pkts of 2048B data + 128B header */
+
+#define PIO_THRESHOLD_CEILING 4096
#define PIO_WAIT_BATCH_SIZE 5
/* default send context sizes */
static struct sc_config_sizes sc_config_sizes[SC_MAX] = {
[SC_KERNEL] = { .size = SCS_POOL_0, /* even divide, pool 0 */
- .count = SCC_PER_VL },/* one per NUMA */
- [SC_ACK] = { .size = SCC_ACK_CREDITS,
+ .count = SCC_PER_VL }, /* one per NUMA */
+ [SC_ACK] = { .size = SCS_ACK_CREDITS,
.count = SCC_PER_KRCVQ },
[SC_USER] = { .size = SCS_POOL_0, /* even divide, pool 0 */
.count = SCC_PER_CPU }, /* one per CPU */
+ [SC_VL15] = { .size = SCS_VL15_CREDITS,
+ .count = 1 },
};
@@ -202,7 +209,8 @@ static int wildcard_to_pool(int wc)
static const char *sc_type_names[SC_MAX] = {
"kernel",
"ack",
- "user"
+ "user",
+ "vl15"
};
static const char *sc_type_name(int index)
@@ -231,6 +239,22 @@ int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
int i;
/*
+ * When SDMA is enabled, kernel context pio packet size is capped by
+ * "piothreshold". Reduce pio buffer allocation for kernel context by
+ * setting it to a fixed size. The allocation allows 3-deep buffering
+ * of the largest pio packets plus up to 128 bytes header, sufficient
+ * to maintain verbs performance.
+ *
+ * When SDMA is disabled, keep the default pooling allocation.
+ */
+ if (HFI1_CAP_IS_KSET(SDMA)) {
+ u16 max_pkt_size = (piothreshold < PIO_THRESHOLD_CEILING) ?
+ piothreshold : PIO_THRESHOLD_CEILING;
+ sc_config_sizes[SC_KERNEL].size =
+ 3 * (max_pkt_size + 128) / PIO_BLOCK_SIZE;
+ }
+
+ /*
* Step 0:
* - copy the centipercents/absolute sizes from the pool config
* - sanity check these values
@@ -311,7 +335,7 @@ int init_sc_pools_and_sizes(struct hfi1_devdata *dd)
if (i == SC_ACK) {
count = dd->n_krcv_queues;
} else if (i == SC_KERNEL) {
- count = (INIT_SC_PER_VL * num_vls) + 1 /* VL15 */;
+ count = INIT_SC_PER_VL * num_vls;
} else if (count == SCC_PER_CPU) {
count = dd->num_rcv_contexts - dd->n_krcv_queues;
} else if (count < 0) {
@@ -596,7 +620,7 @@ u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize)
* Return value is what to write into the CSR: trigger return when
* unreturned credits pass this count.
*/
-static u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
+u32 sc_percent_to_threshold(struct send_context *sc, u32 percent)
{
return (sc->credits * percent) / 100;
}
@@ -790,7 +814,10 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
* For Ack contexts, set a threshold for half the credits.
* For User contexts use the given percentage. This has been
* sanitized on driver start-up.
- * For Kernel contexts, use the default MTU plus a header.
+ * For Kernel contexts, use the default MTU plus a header
+ * or half the credits, whichever is smaller. This should
+ * work for both the 3-deep buffering allocation and the
+ * pooling allocation.
*/
if (type == SC_ACK) {
thresh = sc_percent_to_threshold(sc, 50);
@@ -798,7 +825,9 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
thresh = sc_percent_to_threshold(sc,
user_credit_return_threshold);
} else { /* kernel */
- thresh = sc_mtu_to_threshold(sc, hfi1_max_mtu, hdrqentsize);
+ thresh = min(sc_percent_to_threshold(sc, 50),
+ sc_mtu_to_threshold(sc, hfi1_max_mtu,
+ hdrqentsize));
}
reg = thresh << SC(CREDIT_CTRL_THRESHOLD_SHIFT);
/* add in early return */
@@ -966,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
/* counter is reset if occupancy count changes */
if (reg != reg_prev)
loop = 0;
- if (loop > 500) {
+ if (loop > 50000) {
/* timed out - bounce the link */
dd_dev_err(dd,
"%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
@@ -1531,7 +1560,8 @@ static void sc_piobufavail(struct send_context *sc)
unsigned long flags;
unsigned i, n = 0;
- if (dd->send_contexts[sc->sw_index].type != SC_KERNEL)
+ if (dd->send_contexts[sc->sw_index].type != SC_KERNEL &&
+ dd->send_contexts[sc->sw_index].type != SC_VL15)
return;
list = &sc->piowait;
/*
@@ -1768,6 +1798,21 @@ static void pio_map_rcu_callback(struct rcu_head *list)
}
/*
+ * Set credit return threshold for the kernel send context
+ */
+static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
+{
+ u32 thres;
+
+ thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
+ 50),
+ sc_mtu_to_threshold(dd->kernel_send_context[scontext],
+ dd->vld[i].mtu,
+ dd->rcd[0]->rcvhdrqentsize));
+ sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
+}
+
+/*
* pio_map_init - called when #vls change
* @dd: hfi1_devdata
* @port: port number
@@ -1805,8 +1850,7 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
struct pio_vl_map *oldmap, *newmap;
if (!vl_scontexts) {
- /* send context 0 reserved for VL15 */
- for (i = 1; i < dd->num_send_contexts; i++)
+ for (i = 0; i < dd->num_send_contexts; i++)
if (dd->send_contexts[i].type == SC_KERNEL)
num_kernel_send_contexts++;
/* truncate divide */
@@ -1843,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
if (!newmap->map[i])
goto bail;
newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
- /* assign send contexts */
+ /*
+ * assign send contexts and
+ * adjust credit return threshold
+ */
for (j = 0; j < sz; j++) {
- if (dd->kernel_send_context[scontext])
+ if (dd->kernel_send_context[scontext]) {
newmap->map[i]->ksc[j] =
dd->kernel_send_context[scontext];
+ set_threshold(dd, scontext, i);
+ }
if (++scontext >= first_scontext +
vl_scontexts[i])
/* wrap back to first send context */
@@ -1900,7 +1949,7 @@ int init_pervl_scs(struct hfi1_devdata *dd)
u32 ctxt;
struct hfi1_pportdata *ppd = dd->pport;
- dd->vld[15].sc = sc_alloc(dd, SC_KERNEL,
+ dd->vld[15].sc = sc_alloc(dd, SC_VL15,
dd->rcd[0]->rcvhdrqentsize, dd->node);
if (!dd->vld[15].sc)
goto nomem;
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 0026976ce..464cbd27b 100644
--- a/drivers/staging/rdma/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -49,9 +49,10 @@
/* send context types */
#define SC_KERNEL 0
-#define SC_ACK 1
-#define SC_USER 2
-#define SC_MAX 3
+#define SC_VL15 1
+#define SC_ACK 2
+#define SC_USER 3 /* must be the last one: it may take all left */
+#define SC_MAX 4 /* count of send context types */
/* invalid send context index */
#define INVALID_SCI 0xff
@@ -293,6 +294,7 @@ void sc_group_release_update(struct hfi1_devdata *dd, u32 hw_context);
void sc_add_credit_return_intr(struct send_context *sc);
void sc_del_credit_return_intr(struct send_context *sc);
void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold);
+u32 sc_percent_to_threshold(struct send_context *sc, u32 percent);
u32 sc_mtu_to_threshold(struct send_context *sc, u32 mtu, u32 hdrqentsize);
void hfi1_sc_wantpiobuf_intr(struct send_context *sc, u32 needint);
void sc_wait(struct hfi1_devdata *dd);
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 8c25e1b58..8c25e1b58 100644
--- a/drivers/staging/rdma/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 0a1d07458..03df9322f 100644
--- a/drivers/staging/rdma/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -87,6 +87,17 @@ void free_platform_config(struct hfi1_devdata *dd)
*/
}
+void get_port_type(struct hfi1_pportdata *ppd)
+{
+ int ret;
+
+ ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_PORT_TYPE, &ppd->port_type,
+ 4);
+ if (ret)
+ ppd->port_type = PORT_TYPE_UNKNOWN;
+}
+
int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
{
u8 tx_ctrl_byte = on ? 0x0 : 0xF;
@@ -114,21 +125,11 @@ static int qual_power(struct hfi1_pportdata *ppd)
if (ret)
return ret;
- if (QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]) != 4)
- cable_power_class = QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]);
- else
- cable_power_class = QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]);
+ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
- if (cable_power_class <= 3 && cable_power_class > (power_class_max - 1))
- ppd->offline_disabled_reason =
- HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
- else if (cable_power_class > 4 && cable_power_class > (power_class_max))
+ if (cable_power_class > power_class_max)
ppd->offline_disabled_reason =
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
- /*
- * cable_power_class will never have value 4 as this simply
- * means the high power settings are unused
- */
if (ppd->offline_disabled_reason ==
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
@@ -173,12 +174,9 @@ static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
u8 *cache = ppd->qsfp_info.cache;
int ret;
- if (QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]) != 4)
- cable_power_class = QSFP_HIGH_PWR(cache[QSFP_MOD_PWR_OFFS]);
- else
- cable_power_class = QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]);
+ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
- if (cable_power_class) {
+ if (cable_power_class > QSFP_POWER_CLASS_1) {
power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
power_ctrl_byte |= 1;
@@ -190,8 +188,7 @@ static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
if (ret != 1)
return -EIO;
- if (cable_power_class > 3) {
- /* > power class 4*/
+ if (cable_power_class > QSFP_POWER_CLASS_4) {
power_ctrl_byte |= (1 << 2);
ret = qsfp_write(ppd, ppd->dd->hfi1_id,
QSFP_PWR_CTRL_BYTE_OFFS,
@@ -212,12 +209,21 @@ static void apply_rx_cdr(struct hfi1_pportdata *ppd,
{
u32 rx_preset;
u8 *cache = ppd->qsfp_info.cache;
+ int cable_power_class;
if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
(cache[QSFP_CDR_INFO_OFFS] & 0x40)))
return;
- /* rx_preset preset to zero to catch error */
+ /* RX CDR present, bypass supported */
+ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+
+ if (cable_power_class <= QSFP_POWER_CLASS_3) {
+ /* Power class <= 3, ignore config & turn RX CDR on */
+ *cdr_ctrl_byte |= 0xF;
+ return;
+ }
+
get_platform_config_field(
ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
@@ -250,15 +256,25 @@ static void apply_rx_cdr(struct hfi1_pportdata *ppd,
static void apply_tx_cdr(struct hfi1_pportdata *ppd,
u32 tx_preset_index,
- u8 *ctr_ctrl_byte)
+ u8 *cdr_ctrl_byte)
{
u32 tx_preset;
u8 *cache = ppd->qsfp_info.cache;
+ int cable_power_class;
if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
(cache[QSFP_CDR_INFO_OFFS] & 0x80)))
return;
+ /* TX CDR present, bypass supported */
+ cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
+
+ if (cable_power_class <= QSFP_POWER_CLASS_3) {
+ /* Power class <= 3, ignore config & turn TX CDR on */
+ *cdr_ctrl_byte |= 0xF0;
+ return;
+ }
+
get_platform_config_field(
ppd->dd,
PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
@@ -282,10 +298,10 @@ static void apply_tx_cdr(struct hfi1_pportdata *ppd,
(tx_preset << 2) | (tx_preset << 3));
if (tx_preset)
- *ctr_ctrl_byte |= (tx_preset << 4);
+ *cdr_ctrl_byte |= (tx_preset << 4);
else
/* Preserve current/determined RX CDR status */
- *ctr_ctrl_byte &= ((tx_preset << 4) | 0xF);
+ *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
}
static void apply_cdr_settings(
@@ -524,7 +540,8 @@ static void apply_tunings(
/* Enable external device config if channel is limiting active */
read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
GENERAL_CONFIG, &config_data);
- config_data |= limiting_active;
+ config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
+ config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
GENERAL_CONFIG, config_data);
if (ret != HCMD_SUCCESS)
@@ -537,7 +554,8 @@ static void apply_tunings(
/* Pass tuning method to 8051 */
read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
&config_data);
- config_data |= tuning_method;
+ config_data &= ~(0xff << TUNING_METHOD_SHIFT);
+ config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
config_data);
if (ret != HCMD_SUCCESS)
@@ -559,8 +577,8 @@ static void apply_tunings(
ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
GENERAL_CONFIG, &config_data);
/* Clear, then set the external device config field */
- config_data &= ~(0xFF << 24);
- config_data |= (external_device_config << 24);
+ config_data &= ~(u32)0xFF;
+ config_data |= external_device_config;
ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
GENERAL_CONFIG, config_data);
if (ret != HCMD_SUCCESS)
@@ -598,6 +616,7 @@ static void apply_tunings(
"Applying TX settings");
}
+/* Must be holding the QSFP i2c resource */
static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
u32 *ptr_rx_preset, u32 *ptr_total_atten)
{
@@ -605,26 +624,19 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
u8 *cache = ppd->qsfp_info.cache;
- ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT);
- if (ret) {
- dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
- __func__, (int)ppd->dd->hfi1_id);
- return ret;
- }
-
ppd->qsfp_info.limiting_active = 1;
ret = set_qsfp_tx(ppd, 0);
if (ret)
- goto bail_unlock;
+ return ret;
ret = qual_power(ppd);
if (ret)
- goto bail_unlock;
+ return ret;
ret = qual_bitrate(ppd);
if (ret)
- goto bail_unlock;
+ return ret;
if (ppd->qsfp_info.reset_needed) {
reset_qsfp(ppd);
@@ -636,7 +648,7 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
ret = set_qsfp_high_power(ppd);
if (ret)
- goto bail_unlock;
+ return ret;
if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
ret = get_platform_config_field(
@@ -646,7 +658,7 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
ptr_tx_preset, 4);
if (ret) {
*ptr_tx_preset = OPA_INVALID_INDEX;
- goto bail_unlock;
+ return ret;
}
} else {
ret = get_platform_config_field(
@@ -656,7 +668,7 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
ptr_tx_preset, 4);
if (ret) {
*ptr_tx_preset = OPA_INVALID_INDEX;
- goto bail_unlock;
+ return ret;
}
}
@@ -665,7 +677,7 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
if (ret) {
*ptr_rx_preset = OPA_INVALID_INDEX;
- goto bail_unlock;
+ return ret;
}
if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
@@ -685,8 +697,6 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
ret = set_qsfp_tx(ppd, 1);
-bail_unlock:
- release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
return ret;
}
@@ -787,12 +797,6 @@ void tune_serdes(struct hfi1_pportdata *ppd)
return;
}
- ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_PORT_TYPE, &ppd->port_type,
- 4);
- if (ret)
- ppd->port_type = PORT_TYPE_UNKNOWN;
-
switch (ppd->port_type) {
case PORT_TYPE_DISCONNECTED:
ppd->offline_disabled_reason =
@@ -833,12 +837,22 @@ void tune_serdes(struct hfi1_pportdata *ppd)
total_atten = platform_atten + remote_atten;
tuning_method = OPA_PASSIVE_TUNING;
- } else
+ } else {
ppd->offline_disabled_reason =
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
+ goto bail;
+ }
break;
case PORT_TYPE_QSFP:
if (qsfp_mod_present(ppd)) {
+ ret = acquire_chip_resource(ppd->dd,
+ qsfp_resource(ppd->dd),
+ QSFP_WAIT);
+ if (ret) {
+ dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
+ __func__, (int)ppd->dd->hfi1_id);
+ goto bail;
+ }
refresh_qsfp_cache(ppd, &ppd->qsfp_info);
if (ppd->qsfp_info.cache_valid) {
@@ -853,21 +867,23 @@ void tune_serdes(struct hfi1_pportdata *ppd)
* update the cache to reflect the changes
*/
refresh_qsfp_cache(ppd, &ppd->qsfp_info);
- if (ret)
- goto bail;
-
limiting_active =
ppd->qsfp_info.limiting_active;
} else {
dd_dev_err(dd,
"%s: Reading QSFP memory failed\n",
__func__);
- goto bail;
+ ret = -EINVAL; /* a fail indication */
}
- } else
+ release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
+ if (ret)
+ goto bail;
+ } else {
ppd->offline_disabled_reason =
HFI1_ODR_MASK(
OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
+ goto bail;
+ }
break;
default:
dd_dev_info(ppd->dd, "%s: Unknown port type\n", __func__);
diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
index 19620cf54..e2c21613c 100644
--- a/drivers/staging/rdma/hfi1/platform.h
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -298,6 +298,7 @@ enum link_tuning_encoding {
/* platform.c */
void get_platform_config(struct hfi1_devdata *dd);
void free_platform_config(struct hfi1_devdata *dd);
+void get_port_type(struct hfi1_pportdata *ppd);
int set_qsfp_tx(struct hfi1_pportdata *ppd, int on);
void tune_serdes(struct hfi1_pportdata *ppd);
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 0401955b3..1a942ffba 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -49,7 +49,6 @@
#include <linux/vmalloc.h>
#include <linux/hash.h>
#include <linux/module.h>
-#include <linux/random.h>
#include <linux/seq_file.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
@@ -161,14 +160,15 @@ static inline int opa_mtu_enum_to_int(int mtu)
* This function is what we would push to the core layer if we wanted to be a
* "first class citizen". Instead we hide this here and rely on Verbs ULPs
* to blindly pass the MTU enum value from the PathRecord to us.
- *
- * The actual flag used to determine "8k MTU" will change and is currently
- * unknown.
*/
static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
- int val = opa_mtu_enum_to_int((int)mtu);
+ int val;
+ /* Constraining 10KB packets to 8KB packets */
+ if (mtu == (enum ib_mtu)OPA_MTU_10240)
+ mtu = OPA_MTU_8192;
+ val = opa_mtu_enum_to_int((int)mtu);
if (val > 0)
return val;
return ib_mtu_enum_to_int(mtu);
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index e7bc8d6cf..e7bc8d6cf 100644
--- a/drivers/staging/rdma/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 9ed196301..9fb561682 100644
--- a/drivers/staging/rdma/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
@@ -96,7 +96,7 @@ int i2c_write(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
{
int ret;
- if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
+ if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
/* make sure the TWSI bus is in a sane state */
@@ -162,7 +162,7 @@ int i2c_read(struct hfi1_pportdata *ppd, u32 target, int i2c_addr, int offset,
{
int ret;
- if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
+ if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
/* make sure the TWSI bus is in a sane state */
@@ -192,7 +192,7 @@ int qsfp_write(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int ret;
u8 page;
- if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
+ if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
/* make sure the TWSI bus is in a sane state */
@@ -276,7 +276,7 @@ int qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
int ret;
u8 page;
- if (!check_chip_resource(ppd->dd, qsfp_resource(ppd->dd), __func__))
+ if (!check_chip_resource(ppd->dd, i2c_target(target), __func__))
return -EACCES;
/* make sure the TWSI bus is in a sane state */
@@ -355,6 +355,8 @@ int one_qsfp_read(struct hfi1_pportdata *ppd, u32 target, int addr, void *bp,
* The calls to qsfp_{read,write} in this function correctly handle the
* address map difference between this mapping and the mapping implemented
* by those functions
+ *
+ * The caller must be holding the QSFP i2c chain resource.
*/
int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
{
@@ -371,13 +373,9 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
if (!qsfp_mod_present(ppd)) {
ret = -ENODEV;
- goto bail_no_release;
+ goto bail;
}
- ret = acquire_chip_resource(ppd->dd, qsfp_resource(ppd->dd), QSFP_WAIT);
- if (ret)
- goto bail_no_release;
-
ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE);
if (ret != QSFP_PAGESIZE) {
dd_dev_info(ppd->dd,
@@ -440,8 +438,6 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
}
}
- release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
-
spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
ppd->qsfp_info.cache_valid = 1;
ppd->qsfp_info.cache_refresh_required = 0;
@@ -450,8 +446,6 @@ int refresh_qsfp_cache(struct hfi1_pportdata *ppd, struct qsfp_data *cp)
return 0;
bail:
- release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
-bail_no_release:
memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128));
return ret;
}
@@ -466,7 +460,28 @@ const char * const hfi1_qsfp_devtech[16] = {
#define QSFP_DUMP_CHUNK 16 /* Holds longest string */
#define QSFP_DEFAULT_HDR_CNT 224
-static const char *pwr_codes = "1.5W2.0W2.5W3.5W";
+#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
+#define QSFP_HIGH_PWR(pbyte) ((pbyte) & 3)
+/* For use with QSFP_HIGH_PWR macro */
+#define QSFP_HIGH_PWR_UNUSED 0 /* Bits [1:0] = 00 implies low power module */
+
+/*
+ * Takes power class byte [Page 00 Byte 129] in SFF 8636
+ * Returns power class as integer (1 through 7, per SFF 8636 rev 2.4)
+ */
+int get_qsfp_power_class(u8 power_byte)
+{
+ if (QSFP_HIGH_PWR(power_byte) == QSFP_HIGH_PWR_UNUSED)
+ /* power classes count from 1, their bit encodings from 0 */
+ return (QSFP_PWR(power_byte) + 1);
+ /*
+ * 00 in the high power classes stands for unused, bringing
+ * balance to the off-by-1 offset above, we add 4 here to
+ * account for the difference between the low and high power
+ * groups
+ */
+ return (QSFP_HIGH_PWR(power_byte) + 4);
+}
int qsfp_mod_present(struct hfi1_pportdata *ppd)
{
@@ -537,6 +552,16 @@ set_zeroes:
return ret;
}
+static const char *pwr_codes[8] = {"N/AW",
+ "1.5W",
+ "2.0W",
+ "2.5W",
+ "3.5W",
+ "4.0W",
+ "4.5W",
+ "5.0W"
+ };
+
int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
{
u8 *cache = &ppd->qsfp_info.cache[0];
@@ -546,6 +571,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
int bidx = 0;
u8 *atten = &cache[QSFP_ATTEN_OFFS];
u8 *vendor_oui = &cache[QSFP_VOUI_OFFS];
+ u8 power_byte = 0;
sofar = 0;
lenstr[0] = ' ';
@@ -553,11 +579,12 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
if (ppd->qsfp_info.cache_valid) {
if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
- sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
+ snprintf(lenstr, sizeof(lenstr), "%dM ",
+ cache[QSFP_MOD_LEN_OFFS]);
+ power_byte = cache[QSFP_MOD_PWR_OFFS];
sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
- pwr_codes +
- (QSFP_PWR(cache[QSFP_MOD_PWR_OFFS]) * 4));
+ pwr_codes[get_qsfp_power_class(power_byte)]);
sofar += scnprintf(buf + sofar, len - sofar, "TECH:%s%s\n",
lenstr,
diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index 831fe4cf1..dadc66c44 100644
--- a/drivers/staging/rdma/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
@@ -82,8 +82,9 @@
/* Byte 128 is Identifier: must be 0x0c for QSFP, or 0x0d for QSFP+ */
#define QSFP_MOD_ID_OFFS 128
/*
- * Byte 129 is "Extended Identifier". We only care about D7,D6: Power class
- * 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
+ * Byte 129 is "Extended Identifier".
+ * For bits [7:6]: 0:1.5W, 1:2.0W, 2:2.5W, 3:3.5W
+ * For bits [1:0]: 0:Unused, 1:4W, 2:4.5W, 3:5W
*/
#define QSFP_MOD_PWR_OFFS 129
/* Byte 130 is Connector type. Not Intel req'd */
@@ -190,6 +191,9 @@ extern const char *const hfi1_qsfp_devtech[16];
#define QSFP_HIGH_BIAS_WARNING 0x22
#define QSFP_LOW_BIAS_WARNING 0x11
+#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
+#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
+
/*
* struct qsfp_data encapsulates state of QSFP device for one port.
* it will be part of port-specific data if a board supports QSFP.
@@ -201,12 +205,6 @@ extern const char *const hfi1_qsfp_devtech[16];
* and let the qsfp_lock arbitrate access to common resources.
*
*/
-
-#define QSFP_PWR(pbyte) (((pbyte) >> 6) & 3)
-#define QSFP_HIGH_PWR(pbyte) (((pbyte) & 3) | 4)
-#define QSFP_ATTEN_SDR(attenarray) (attenarray[0])
-#define QSFP_ATTEN_DDR(attenarray) (attenarray[1])
-
struct qsfp_data {
/* Helps to find our way */
struct hfi1_pportdata *ppd;
@@ -223,6 +221,7 @@ struct qsfp_data {
int refresh_qsfp_cache(struct hfi1_pportdata *ppd,
struct qsfp_data *cp);
+int get_qsfp_power_class(u8 power_byte);
int qsfp_mod_present(struct hfi1_pportdata *ppd);
int get_cable_info(struct hfi1_devdata *dd, u32 port_num, u32 addr,
u32 len, u8 *data);
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 0d7e1017f..792f15eb8 100644
--- a/drivers/staging/rdma/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1497,7 +1497,7 @@ reserved:
/* Ignore reserved NAK codes. */
goto bail_stop;
}
- return ret;
+ /* cannot be reached */
bail_stop:
hfi1_stop_rc_timers(qp);
return ret;
@@ -2021,8 +2021,6 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
if (sl >= OPA_MAX_SLS)
return;
- cca_timer = &ppd->cca_timer[sl];
-
cc_state = get_cc_state(ppd);
if (!cc_state)
@@ -2041,6 +2039,7 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
spin_lock_irqsave(&ppd->cca_timer_lock, flags);
+ cca_timer = &ppd->cca_timer[sl];
if (cca_timer->ccti < ccti_limit) {
if (cca_timer->ccti + ccti_incr <= ccti_limit)
cca_timer->ccti += ccti_incr;
@@ -2049,8 +2048,6 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
set_link_ipg(ppd);
}
- spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
-
ccti = cca_timer->ccti;
if (!hrtimer_active(&cca_timer->hrtimer)) {
@@ -2061,6 +2058,8 @@ void process_becn(struct hfi1_pportdata *ppd, u8 sl, u16 rlid, u32 lqpn,
HRTIMER_MODE_REL);
}
+ spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
+
if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
}
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 08813cdbd..a659aec3c 100644
--- a/drivers/staging/rdma/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -831,7 +831,6 @@ void hfi1_do_send(struct rvt_qp *qp)
struct hfi1_pkt_state ps;
struct hfi1_qp_priv *priv = qp->priv;
int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
- unsigned long flags;
unsigned long timeout;
unsigned long timeout_int;
int cpu;
@@ -866,11 +865,11 @@ void hfi1_do_send(struct rvt_qp *qp)
timeout_int = SEND_RESCHED_TIMEOUT;
}
- spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
/* Return if we are already busy processing a work request. */
if (!hfi1_send_ok(qp)) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
return;
}
@@ -884,7 +883,7 @@ void hfi1_do_send(struct rvt_qp *qp)
do {
/* Check for a constructed packet to be sent. */
if (qp->s_hdrwords != 0) {
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
/*
* If the packet cannot be sent now, return and
* the send tasklet will be woken up later.
@@ -897,11 +896,14 @@ void hfi1_do_send(struct rvt_qp *qp)
if (unlikely(time_after(jiffies, timeout))) {
if (workqueue_congested(cpu,
ps.ppd->hfi1_wq)) {
- spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock_irqsave(
+ &qp->s_lock,
+ ps.flags);
qp->s_flags &= ~RVT_S_BUSY;
hfi1_schedule_send(qp);
- spin_unlock_irqrestore(&qp->s_lock,
- flags);
+ spin_unlock_irqrestore(
+ &qp->s_lock,
+ ps.flags);
this_cpu_inc(
*ps.ppd->dd->send_schedule);
return;
@@ -913,11 +915,11 @@ void hfi1_do_send(struct rvt_qp *qp)
}
timeout = jiffies + (timeout_int) / 8;
}
- spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock_irqsave(&qp->s_lock, ps.flags);
}
} while (make_req(qp, &ps));
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, ps.flags);
}
/*
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index abb8ebc1f..f9befc05b 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -134,6 +134,7 @@ static const char * const sdma_state_names[] = {
[sdma_state_s99_running] = "s99_Running",
};
+#ifdef CONFIG_SDMA_VERBOSITY
static const char * const sdma_event_names[] = {
[sdma_event_e00_go_hw_down] = "e00_GoHwDown",
[sdma_event_e10_go_hw_start] = "e10_GoHwStart",
@@ -150,6 +151,7 @@ static const char * const sdma_event_names[] = {
[sdma_event_e85_link_down] = "e85_LinkDown",
[sdma_event_e90_sw_halted] = "e90_SwHalted",
};
+#endif
static const struct sdma_set_state_action sdma_action_table[] = {
[sdma_state_s00_hw_down] = {
@@ -376,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde,
sdma_txclean(sde->dd, tx);
if (complete)
(*complete)(tx, res);
- if (iowait_sdma_dec(wait) && wait)
+ if (wait && iowait_sdma_dec(wait))
iowait_drain_wakeup(wait);
}
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 8f50c99fe..8f50c99fe 100644
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
index bf7d777d7..bf7d777d7 100644
--- a/drivers/staging/rdma/hfi1/sdma_txreq.h
+++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index c7f127119..91fc2aed6 100644
--- a/drivers/staging/rdma/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -84,7 +84,7 @@ static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
rcu_read_unlock();
return -EINVAL;
}
- memcpy(buf, &cc_state->cct, count);
+ memcpy(buf, (void *)&cc_state->cct + pos, count);
rcu_read_unlock();
return count;
@@ -131,7 +131,7 @@ static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
rcu_read_unlock();
return -EINVAL;
}
- memcpy(buf, &cc_state->cong_setting, count);
+ memcpy(buf, (void *)&cc_state->cong_setting + pos, count);
rcu_read_unlock();
return count;
@@ -721,8 +721,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
}
dd_dev_info(dd,
- "IB%u: Congestion Control Agent enabled for port %d\n",
- dd->unit, port_num);
+ "Congestion Control Agent enabled for port %d\n",
+ port_num);
return 0;
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 8b62fefcf..4cfb13771 100644
--- a/drivers/staging/rdma/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -66,6 +66,7 @@ u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
+#define IETH_PRN "ieth rkey 0x%.8x"
#define ATOMICACKETH_PRN "origdata %lld"
#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
@@ -166,6 +167,12 @@ const char *parse_everbs_hdrs(
be32_to_cpu(eh->ud.deth[0]),
be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
break;
+ /* ieth */
+ case OP(RC, SEND_LAST_WITH_INVALIDATE):
+ case OP(RC, SEND_ONLY_WITH_INVALIDATE):
+ trace_seq_printf(p, IETH_PRN,
+ be32_to_cpu(eh->ieth));
+ break;
}
trace_seq_putc(p, 0);
return ret;
@@ -207,19 +214,6 @@ const char *print_u32_array(
return ret;
}
-const char *print_u64_array(
- struct trace_seq *p,
- u64 *arr, int len)
-{
- int i;
- const char *ret = trace_seq_buffer_ptr(p);
-
- for (i = 0; i < len; i++)
- trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]);
- trace_seq_putc(p, 0);
- return ret;
-}
-
__hfi1_trace_fn(PKT);
__hfi1_trace_fn(PROC);
__hfi1_trace_fn(SDMA);
@@ -233,3 +227,4 @@ __hfi1_trace_fn(FIRMWARE);
__hfi1_trace_fn(RCVCTRL);
__hfi1_trace_fn(TID);
__hfi1_trace_fn(MMU);
+__hfi1_trace_fn(IOCTL);
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 963dc948c..28c1d0832 100644
--- a/drivers/staging/rdma/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -74,8 +74,8 @@ __print_symbolic(etype, \
TRACE_EVENT(hfi1_rcvhdr,
TP_PROTO(struct hfi1_devdata *dd,
- u64 eflags,
u32 ctxt,
+ u64 eflags,
u32 etype,
u32 hlen,
u32 tlen,
@@ -392,6 +392,8 @@ __print_symbolic(opcode, \
ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
ib_opcode_name(RC_COMPARE_SWAP), \
ib_opcode_name(RC_FETCH_ADD), \
+ ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \
+ ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \
ib_opcode_name(UC_SEND_FIRST), \
ib_opcode_name(UC_SEND_MIDDLE), \
ib_opcode_name(UC_SEND_LAST), \
@@ -1341,6 +1343,7 @@ __hfi1_trace_def(FIRMWARE);
__hfi1_trace_def(RCVCTRL);
__hfi1_trace_def(TID);
__hfi1_trace_def(MMU);
+__hfi1_trace_def(IOCTL);
#define hfi1_cdbg(which, fmt, ...) \
__hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/infiniband/hw/hfi1/twsi.c
index e82e52a63..e82e52a63 100644
--- a/drivers/staging/rdma/hfi1/twsi.c
+++ b/drivers/infiniband/hw/hfi1/twsi.c
diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/infiniband/hw/hfi1/twsi.h
index 5b8a5b5e7..5b8a5b5e7 100644
--- a/drivers/staging/rdma/hfi1/twsi.h
+++ b/drivers/infiniband/hw/hfi1/twsi.h
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index df773d433..df773d433 100644
--- a/drivers/staging/rdma/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index ae8a70f70..be91f6fa1 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -322,7 +322,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
(lid == ppd->lid ||
(lid == be16_to_cpu(IB_LID_PERMISSIVE) &&
qp->ibqp.qp_type == IB_QPT_GSI)))) {
- unsigned long flags;
+ unsigned long tflags = ps->flags;
/*
* If DMAs are in progress, we can't generate
* a completion for the loopback packet since
@@ -335,10 +335,10 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
}
qp->s_cur = next_cur;
- local_irq_save(flags);
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, tflags);
ud_loopback(qp, wqe);
- spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock_irqsave(&qp->s_lock, tflags);
+ ps->flags = tflags;
hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done_free_tx;
}
@@ -678,8 +678,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
u32 tlen = packet->tlen;
struct rvt_qp *qp = packet->qp;
bool has_grh = rcv_flags & HFI1_HAS_GRH;
- bool sc4_bit = has_sc4_bit(packet);
- u8 sc;
+ u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
u32 bth1;
int is_mcast;
struct ib_grh *grh = NULL;
@@ -697,10 +696,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
*/
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
- u8 sl, sc5;
+ u8 sl;
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
sl = ibp->sc_to_sl[sc5];
process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
@@ -717,10 +714,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
}
@@ -745,10 +738,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
if (qp->ibqp.qp_num > 1) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u16 slid;
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
slid = be16_to_cpu(hdr->lrh[3]);
if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
@@ -790,10 +779,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
/* Received on QP0, and so by definition, this is an SMP */
struct opa_smp *smp = (struct opa_smp *)data;
u16 slid = be16_to_cpu(hdr->lrh[3]);
- u8 sc5;
-
- sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc5 |= sc4_bit;
if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
goto drop;
@@ -890,9 +875,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
}
wc.slid = be16_to_cpu(hdr->lrh[3]);
- sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
- sc |= sc4_bit;
- wc.sl = ibp->sc_to_sl[sc];
+ wc.sl = ibp->sc_to_sl[sc5];
/*
* Save the LMC lower bits if the destination LID is a unicast LID.
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 8bd56d5c7..1b640a35b 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -399,8 +399,11 @@ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
* pages, accept the amount pinned so far and program only that.
* User space knows how to deal with partially programmed buffers.
*/
- if (!hfi1_can_pin_pages(dd, fd->tid_n_pinned, npages))
- return -ENOMEM;
+ if (!hfi1_can_pin_pages(dd, fd->tid_n_pinned, npages)) {
+ ret = -ENOMEM;
+ goto bail;
+ }
+
pinned = hfi1_acquire_user_pages(vaddr, npages, true, pages);
if (pinned <= 0) {
ret = pinned;
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 9bc8d9fba..9bc8d9fba 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 88e10b5f5..88e10b5f5 100644
--- a/drivers/staging/rdma/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index d53a65954..47ffd273e 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -166,6 +166,8 @@ static unsigned initial_pkt_count = 8;
#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
+struct sdma_mmu_node;
+
struct user_sdma_iovec {
struct list_head list;
struct iovec iov;
@@ -178,8 +180,11 @@ struct user_sdma_iovec {
* which we last left off.
*/
u64 offset;
+ struct sdma_mmu_node *node;
};
+#define SDMA_CACHE_NODE_EVICT 0
+
struct sdma_mmu_node {
struct mmu_rb_node rb;
struct list_head list;
@@ -187,6 +192,7 @@ struct sdma_mmu_node {
atomic_t refcount;
struct page **pages;
unsigned npages;
+ unsigned long flags;
};
struct user_sdma_request {
@@ -504,6 +510,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
struct sdma_req_info info;
struct user_sdma_request *req;
u8 opcode, sc, vl;
+ int req_queued = 0;
if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
hfi1_cdbg(
@@ -597,6 +604,13 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
goto free_req;
}
+ /* Checking P_KEY for requests from user-space */
+ if (egress_pkey_check(dd->pport, req->hdr.lrh, req->hdr.bth, sc,
+ PKEY_CHECK_INVALID)) {
+ ret = -EINVAL;
+ goto free_req;
+ }
+
/*
* Also should check the BTH.lnh. If it says the next header is GRH then
* the RXE parsing will be off and will land in the middle of the KDETH
@@ -693,6 +707,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
atomic_inc(&pq->n_reqs);
+ req_queued = 1;
/* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount);
if (unlikely(ret < 0 && ret != -EBUSY)) {
@@ -737,7 +752,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
return 0;
free_req:
user_sdma_free_request(req, true);
- pq_update(pq);
+ if (req_queued)
+ pq_update(pq);
set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
return ret;
}
@@ -1030,27 +1046,29 @@ static inline int num_user_pages(const struct iovec *iov)
return 1 + ((epage - spage) >> PAGE_SHIFT);
}
-/* Caller must hold pq->evict_lock */
static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
{
u32 cleared = 0;
struct sdma_mmu_node *node, *ptr;
+ struct list_head to_evict = LIST_HEAD_INIT(to_evict);
+ spin_lock(&pq->evict_lock);
list_for_each_entry_safe_reverse(node, ptr, &pq->evict, list) {
/* Make sure that no one is still using the node. */
if (!atomic_read(&node->refcount)) {
- /*
- * Need to use the page count now as the remove callback
- * will free the node.
- */
+ set_bit(SDMA_CACHE_NODE_EVICT, &node->flags);
+ list_del_init(&node->list);
+ list_add(&node->list, &to_evict);
cleared += node->npages;
- spin_unlock(&pq->evict_lock);
- hfi1_mmu_rb_remove(&pq->sdma_rb_root, &node->rb);
- spin_lock(&pq->evict_lock);
if (cleared >= npages)
break;
}
}
+ spin_unlock(&pq->evict_lock);
+
+ list_for_each_entry_safe(node, ptr, &to_evict, list)
+ hfi1_mmu_rb_remove(&pq->sdma_rb_root, &node->rb);
+
return cleared;
}
@@ -1062,9 +1080,9 @@ static int pin_vector_pages(struct user_sdma_request *req,
struct sdma_mmu_node *node = NULL;
struct mmu_rb_node *rb_node;
- rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
- (unsigned long)iovec->iov.iov_base,
- iovec->iov.iov_len);
+ rb_node = hfi1_mmu_rb_extract(&pq->sdma_rb_root,
+ (unsigned long)iovec->iov.iov_base,
+ iovec->iov.iov_len);
if (rb_node && !IS_ERR(rb_node))
node = container_of(rb_node, struct sdma_mmu_node, rb);
else
@@ -1076,7 +1094,6 @@ static int pin_vector_pages(struct user_sdma_request *req,
return -ENOMEM;
node->rb.addr = (unsigned long)iovec->iov.iov_base;
- node->rb.len = iovec->iov.iov_len;
node->pq = pq;
atomic_set(&node->refcount, 0);
INIT_LIST_HEAD(&node->list);
@@ -1093,11 +1110,25 @@ static int pin_vector_pages(struct user_sdma_request *req,
memcpy(pages, node->pages, node->npages * sizeof(*pages));
npages -= node->npages;
+
+ /*
+ * If rb_node is NULL, it means that this is brand new node
+ * and, therefore not on the eviction list.
+ * If, however, the rb_node is non-NULL, it means that the
+ * node is already in RB tree and, therefore on the eviction
+ * list (nodes are unconditionally inserted in the eviction
+ * list). In that case, we have to remove the node prior to
+ * calling the eviction function in order to prevent it from
+ * freeing this node.
+ */
+ if (rb_node) {
+ spin_lock(&pq->evict_lock);
+ list_del_init(&node->list);
+ spin_unlock(&pq->evict_lock);
+ }
retry:
if (!hfi1_can_pin_pages(pq->dd, pq->n_locked, npages)) {
- spin_lock(&pq->evict_lock);
cleared = sdma_cache_evict(pq, npages);
- spin_unlock(&pq->evict_lock);
if (cleared >= npages)
goto retry;
}
@@ -1117,37 +1148,33 @@ retry:
goto bail;
}
kfree(node->pages);
+ node->rb.len = iovec->iov.iov_len;
node->pages = pages;
node->npages += pinned;
npages = node->npages;
spin_lock(&pq->evict_lock);
- if (!rb_node)
- list_add(&node->list, &pq->evict);
- else
- list_move(&node->list, &pq->evict);
+ list_add(&node->list, &pq->evict);
pq->n_locked += pinned;
spin_unlock(&pq->evict_lock);
}
iovec->pages = node->pages;
iovec->npages = npages;
+ iovec->node = node;
- if (!rb_node) {
- ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
- if (ret) {
- spin_lock(&pq->evict_lock);
+ ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
+ if (ret) {
+ spin_lock(&pq->evict_lock);
+ if (!list_empty(&node->list))
list_del(&node->list);
- pq->n_locked -= node->npages;
- spin_unlock(&pq->evict_lock);
- ret = 0;
- goto bail;
- }
- } else {
- atomic_inc(&node->refcount);
+ pq->n_locked -= node->npages;
+ spin_unlock(&pq->evict_lock);
+ goto bail;
}
return 0;
bail:
- if (!rb_node)
- kfree(node);
+ if (rb_node)
+ unpin_vector_pages(current->mm, node->pages, 0, node->npages);
+ kfree(node);
return ret;
}
@@ -1328,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req,
*/
SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
req->tidoffset, req->tidoffset / req->omfactor,
- !!(req->omfactor - KDETH_OM_SMALL));
+ req->omfactor != KDETH_OM_SMALL);
KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
req->tidoffset / req->omfactor);
KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
- !!(req->omfactor - KDETH_OM_SMALL));
+ req->omfactor != KDETH_OM_SMALL);
}
done:
trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
@@ -1499,18 +1526,13 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
}
if (req->data_iovs) {
struct sdma_mmu_node *node;
- struct mmu_rb_node *mnode;
int i;
for (i = 0; i < req->data_iovs; i++) {
- mnode = hfi1_mmu_rb_search(
- &req->pq->sdma_rb_root,
- (unsigned long)req->iovs[i].iov.iov_base,
- req->iovs[i].iov.iov_len);
- if (!mnode || IS_ERR(mnode))
+ node = req->iovs[i].node;
+ if (!node)
continue;
- node = container_of(mnode, struct sdma_mmu_node, rb);
if (unpin)
hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
&node->rb);
@@ -1558,7 +1580,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
container_of(mnode, struct sdma_mmu_node, rb);
spin_lock(&node->pq->evict_lock);
- list_del(&node->list);
+ /*
+ * We've been called by the MMU notifier but this node has been
+ * scheduled for eviction. The eviction function will take care
+ * of freeing this node.
+ * We have to take the above lock first because we are racing
+ * against the setting of the bit in the eviction function.
+ */
+ if (mm && test_bit(SDMA_CACHE_NODE_EVICT, &node->flags)) {
+ spin_unlock(&node->pq->evict_lock);
+ return;
+ }
+
+ if (!list_empty(&node->list))
+ list_del(&node->list);
node->pq->n_locked -= node->npages;
spin_unlock(&node->pq->evict_lock);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index b9240e351..b9240e351 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 89f2aad45..849c4b939 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -52,7 +52,6 @@
#include <linux/utsname.h>
#include <linux/rculist.h>
#include <linux/mm.h>
-#include <linux/random.h>
#include <linux/vmalloc.h>
#include "hfi.h"
@@ -336,6 +335,8 @@ const u8 hdr_len_by_opcode[256] = {
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
[IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
[IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
[IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
@@ -545,7 +546,7 @@ static inline int qp_ok(int opcode, struct hfi1_packet *packet)
if (!(ib_rvt_state_ops[packet->qp->state] & RVT_PROCESS_RECV_OK))
goto dropit;
- if (((opcode & OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
+ if (((opcode & RVT_OPCODE_QP_MASK) == packet->qp->allowed_ops) ||
(opcode == IB_OPCODE_CNP))
return 1;
dropit:
@@ -946,7 +947,6 @@ static int pio_wait(struct rvt_qp *qp,
dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
- dev->n_piowait++;
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
list_add_tail(&priv->s_iowait.list, &sc->piowait);
@@ -1089,16 +1089,16 @@ bail:
/*
* egress_pkey_matches_entry - return 1 if the pkey matches ent (ent
- * being an entry from the ingress partition key table), return 0
+ * being an entry from the partition key table), return 0
* otherwise. Use the matching criteria for egress partition keys
* specified in the OPAv1 spec., section 9.1l.7.
*/
static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
{
u16 mkey = pkey & PKEY_LOW_15_MASK;
- u16 ment = ent & PKEY_LOW_15_MASK;
+ u16 mentry = ent & PKEY_LOW_15_MASK;
- if (mkey == ment) {
+ if (mkey == mentry) {
/*
* If pkey[15] is set (full partition member),
* is bit 15 in the corresponding table element
@@ -1111,32 +1111,32 @@ static inline int egress_pkey_matches_entry(u16 pkey, u16 ent)
return 0;
}
-/*
- * egress_pkey_check - return 0 if hdr's pkey matches according to the
- * criteria in the OPAv1 spec., section 9.11.7.
+/**
+ * egress_pkey_check - check P_KEY of a packet
+ * @ppd: Physical IB port data
+ * @lrh: Local route header
+ * @bth: Base transport header
+ * @sc5: SC for packet
+ * @s_pkey_index: It will be used for look up optimization for kernel contexts
+ * only. If it is negative value, then it means user contexts is calling this
+ * function.
+ *
+ * It checks if hdr's pkey is valid.
+ *
+ * Return: 0 on success, otherwise, 1
*/
-static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
- struct hfi1_ib_header *hdr,
- struct rvt_qp *qp)
+int egress_pkey_check(struct hfi1_pportdata *ppd, __be16 *lrh, __be32 *bth,
+ u8 sc5, int8_t s_pkey_index)
{
- struct hfi1_qp_priv *priv = qp->priv;
- struct hfi1_other_headers *ohdr;
struct hfi1_devdata *dd;
- int i = 0;
+ int i;
u16 pkey;
- u8 lnh, sc5 = priv->s_sc;
+ int is_user_ctxt_mechanism = (s_pkey_index < 0);
if (!(ppd->part_enforce & HFI1_PART_ENFORCE_OUT))
return 0;
- /* locate the pkey within the headers */
- lnh = be16_to_cpu(hdr->lrh[0]) & 3;
- if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- ohdr = &hdr->u.oth;
-
- pkey = (u16)be32_to_cpu(ohdr->bth[0]);
+ pkey = (u16)be32_to_cpu(bth[0]);
/* If SC15, pkey[0:14] must be 0x7fff */
if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
@@ -1146,28 +1146,37 @@ static inline int egress_pkey_check(struct hfi1_pportdata *ppd,
if ((pkey & PKEY_LOW_15_MASK) == 0)
goto bad;
- /* The most likely matching pkey has index qp->s_pkey_index */
- if (unlikely(!egress_pkey_matches_entry(pkey,
- ppd->pkeys
- [qp->s_pkey_index]))) {
- /* no match - try the entire table */
- for (; i < MAX_PKEY_VALUES; i++) {
- if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
- break;
- }
+ /*
+ * For the kernel contexts only, if a qp is passed into the function,
+ * the most likely matching pkey has index qp->s_pkey_index
+ */
+ if (!is_user_ctxt_mechanism &&
+ egress_pkey_matches_entry(pkey, ppd->pkeys[s_pkey_index])) {
+ return 0;
}
- if (i < MAX_PKEY_VALUES)
- return 0;
+ for (i = 0; i < MAX_PKEY_VALUES; i++) {
+ if (egress_pkey_matches_entry(pkey, ppd->pkeys[i]))
+ return 0;
+ }
bad:
- incr_cntr64(&ppd->port_xmit_constraint_errors);
- dd = ppd->dd;
- if (!(dd->err_info_xmit_constraint.status & OPA_EI_STATUS_SMASK)) {
- u16 slid = be16_to_cpu(hdr->lrh[3]);
-
- dd->err_info_xmit_constraint.status |= OPA_EI_STATUS_SMASK;
- dd->err_info_xmit_constraint.slid = slid;
- dd->err_info_xmit_constraint.pkey = pkey;
+ /*
+ * For the user-context mechanism, the P_KEY check would only happen
+ * once per SDMA request, not once per packet. Therefore, there's no
+ * need to increment the counter for the user-context mechanism.
+ */
+ if (!is_user_ctxt_mechanism) {
+ incr_cntr64(&ppd->port_xmit_constraint_errors);
+ dd = ppd->dd;
+ if (!(dd->err_info_xmit_constraint.status &
+ OPA_EI_STATUS_SMASK)) {
+ u16 slid = be16_to_cpu(lrh[3]);
+
+ dd->err_info_xmit_constraint.status |=
+ OPA_EI_STATUS_SMASK;
+ dd->err_info_xmit_constraint.slid = slid;
+ dd->err_info_xmit_constraint.pkey = pkey;
+ }
}
return 1;
}
@@ -1227,11 +1236,26 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
{
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_other_headers *ohdr;
+ struct hfi1_ib_header *hdr;
send_routine sr;
int ret;
+ u8 lnh;
+
+ hdr = &ps->s_txreq->phdr.hdr;
+ /* locate the pkey within the headers */
+ lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+ if (lnh == HFI1_LRH_GRH)
+ ohdr = &hdr->u.l.oth;
+ else
+ ohdr = &hdr->u.oth;
sr = get_send_routine(qp, ps->s_txreq);
- ret = egress_pkey_check(dd->pport, &ps->s_txreq->phdr.hdr, qp);
+ ret = egress_pkey_check(dd->pport,
+ hdr->lrh,
+ ohdr->bth,
+ priv->s_sc,
+ qp->s_pkey_index);
if (unlikely(ret)) {
/*
* The value we are returning here does not get propagated to
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 6c4670fff..488356775 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -152,6 +152,7 @@ union ib_ehdrs {
} at;
__be32 imm_data;
__be32 aeth;
+ __be32 ieth;
struct ib_atomic_eth atomic_eth;
} __packed;
@@ -215,6 +216,7 @@ struct hfi1_pkt_state {
struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd;
struct verbs_txreq *s_txreq;
+ unsigned long flags;
};
#define HFI1_PSN_CREDIT 16
@@ -334,9 +336,6 @@ int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
#endif
#define PSN_MODIFY_MASK 0xFFFFFF
-/* Number of bits to pay attention to in the opcode for checking qp type */
-#define OPCODE_QP_MASK 0xE0
-
/*
* Compare the lower 24 bits of the msn values.
* Returns an integer <, ==, or > than zero.
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index bc95c4112..d8fb05652 100644
--- a/drivers/staging/rdma/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
+ __must_hold(&qp->s_lock)
{
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
- unsigned long flags;
- spin_lock_irqsave(&qp->s_lock, flags);
write_seqlock(&dev->iowait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
@@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
}
out:
write_sequnlock(&dev->iowait_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
return tx;
}
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 1cf69b2fe..a1d6e0807 100644
--- a/drivers/staging/rdma/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
+ __must_hold(&qp->slock)
{
struct verbs_txreq *tx;
struct hfi1_qp_priv *priv = qp->priv;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 819767681..b738acdb9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -50,8 +50,6 @@
#include <rdma/ib_pack.h>
#include <rdma/rdma_cm.h>
#include <rdma/iw_cm.h>
-#include <rdma/iw_portmap.h>
-#include <rdma/rdma_netlink.h>
#include <crypto/hash.h>
#include "i40iw_status.h"
@@ -115,6 +113,8 @@
#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
#define IW_CFG_FPM_QP_COUNT 32768
+#define I40IW_MAX_PAGES_PER_FMR 512
+#define I40IW_MIN_PAGES_PER_FMR 1
#define I40IW_MTU_TO_MSS 40
#define I40IW_DEFAULT_MSS 1460
@@ -254,6 +254,7 @@ struct i40iw_device {
u32 arp_table_size;
u32 next_arp_index;
spinlock_t resource_lock; /* hw resource access */
+ spinlock_t qptable_lock;
u32 vendor_id;
u32 vendor_part_id;
u32 of_device_registered;
@@ -392,7 +393,7 @@ void i40iw_flush_wqes(struct i40iw_device *iwdev,
void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
unsigned char *mac_addr,
- __be32 *ip_addr,
+ u32 *ip_addr,
bool ipv4,
u32 action);
@@ -550,7 +551,7 @@ enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
struct i40iw_qp_flush_info *info,
bool wait);
-void i40iw_copy_ip_ntohl(u32 *dst, u32 *src);
+void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src);
struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd,
u64 addr,
u64 size,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 38f917a6c..d2fa72516 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -771,6 +771,7 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
{
struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+ u16 ctrl_ird, ctrl_ord;
/* initialize the upper 5 bytes of the frame */
i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
@@ -779,38 +780,38 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
/* initialize RTR msg */
if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
- rtr_msg->ctrl_ird = IETF_NO_IRD_ORD;
- rtr_msg->ctrl_ord = IETF_NO_IRD_ORD;
+ ctrl_ird = IETF_NO_IRD_ORD;
+ ctrl_ord = IETF_NO_IRD_ORD;
} else {
- rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
IETF_NO_IRD_ORD : cm_node->ird_size;
- rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
IETF_NO_IRD_ORD : cm_node->ord_size;
}
- rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER;
- rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN;
+ ctrl_ird |= IETF_PEER_TO_PEER;
+ ctrl_ird |= IETF_FLPDU_ZERO_LEN;
switch (mpa_key) {
case MPA_KEY_REQUEST:
- rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
- rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ ctrl_ord |= IETF_RDMA0_READ;
break;
case MPA_KEY_REPLY:
switch (cm_node->send_rdma0_op) {
case SEND_RDMA_WRITE_ZERO:
- rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+ ctrl_ord |= IETF_RDMA0_WRITE;
break;
case SEND_RDMA_READ_ZERO:
- rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+ ctrl_ord |= IETF_RDMA0_READ;
break;
}
break;
default:
break;
}
- rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird);
- rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord);
+ rtr_msg->ctrl_ird = htons(ctrl_ird);
+ rtr_msg->ctrl_ord = htons(ctrl_ord);
}
/**
@@ -2107,7 +2108,7 @@ static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
struct in6_addr raddr6;
i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
- return (!memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6));
+ return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
}
/**
@@ -2160,7 +2161,7 @@ static struct i40iw_cm_node *i40iw_make_cm_node(
cm_node->tcp_cntxt.rcv_wnd =
I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
ts = current_kernel_time();
- cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
+ cm_node->tcp_cntxt.loc_seq_num = ts.tv_nsec;
cm_node->tcp_cntxt.mss = iwdev->mss;
cm_node->iwdev = iwdev;
@@ -2234,7 +2235,7 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
if (cm_node->listener) {
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
} else {
- if (!i40iw_listen_port_in_use(cm_core, htons(cm_node->loc_port)) &&
+ if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
cm_node->apbvt_set && cm_node->iwdev) {
i40iw_manage_apbvt(cm_node->iwdev,
cm_node->loc_port,
@@ -2852,7 +2853,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
void *private_data,
struct i40iw_cm_info *cm_info)
{
- int ret;
struct i40iw_cm_node *cm_node;
struct i40iw_cm_listener *loopback_remotelistener;
struct i40iw_cm_node *loopback_remotenode;
@@ -2922,30 +2922,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node(
memcpy(cm_node->pdata_buf, private_data, private_data_len);
cm_node->state = I40IW_CM_STATE_SYN_SENT;
- ret = i40iw_send_syn(cm_node, 0);
-
- if (ret) {
- if (cm_node->ipv4)
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "Api - connect() FAILED: dest addr=%pI4",
- cm_node->rem_addr);
- else
- i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
- "Api - connect() FAILED: dest addr=%pI6",
- cm_node->rem_addr);
- i40iw_rem_ref_cm_node(cm_node);
- cm_node = NULL;
- }
-
- if (cm_node)
- i40iw_debug(cm_node->dev,
- I40IW_DEBUG_CM,
- "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
- cm_node->rem_port,
- cm_node,
- cm_node->cm_id);
-
return cm_node;
}
@@ -3266,11 +3242,13 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
- tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(iwqp->iwdev,
- &tcp_info->dest_ip_addr3,
- true,
- NULL,
- I40IW_ARP_RESOLVE));
+ tcp_info->arp_idx =
+ cpu_to_le16((u16)i40iw_arp_table(
+ iwqp->iwdev,
+ &tcp_info->dest_ip_addr3,
+ true,
+ NULL,
+ I40IW_ARP_RESOLVE));
} else {
tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
@@ -3282,12 +3260,13 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
- tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(
- iwqp->iwdev,
- &tcp_info->dest_ip_addr0,
- false,
- NULL,
- I40IW_ARP_RESOLVE));
+ tcp_info->arp_idx =
+ cpu_to_le16((u16)i40iw_arp_table(
+ iwqp->iwdev,
+ &tcp_info->dest_ip_addr0,
+ false,
+ NULL,
+ I40IW_ARP_RESOLVE));
}
}
@@ -3564,7 +3543,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct i40iw_cm_node *cm_node;
struct ib_qp_attr attr;
int passive_state;
- struct i40iw_ib_device *iwibdev;
struct ib_mr *ibmr;
struct i40iw_pd *iwpd;
u16 buf_len = 0;
@@ -3627,7 +3605,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
!i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
(!cm_node->ipv4 &&
!i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
- iwibdev = iwdev->iwibdev;
iwpd = iwqp->iwpd;
tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
@@ -3752,6 +3729,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in *raddr;
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
+ bool qhash_set = false;
int apbvt_set = 0;
enum i40iw_status_code status;
@@ -3810,6 +3788,7 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
true);
if (status)
return -EINVAL;
+ qhash_set = true;
}
status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
if (status) {
@@ -3828,23 +3807,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
conn_param->private_data_len,
(void *)conn_param->private_data,
&cm_info);
- if (!cm_node) {
- i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
-
- if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
- cm_info.loc_port))
- i40iw_manage_apbvt(iwdev,
- cm_info.loc_port,
- I40IW_MANAGE_APBVT_DEL);
- cm_id->rem_ref(cm_id);
- iwdev->cm_core.stats_connect_errs++;
- return -ENOMEM;
- }
+ if (!cm_node)
+ goto err;
i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
@@ -3852,12 +3816,54 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->ord_size = 1;
cm_node->apbvt_set = apbvt_set;
- cm_node->qhash_set = true;
+ cm_node->qhash_set = qhash_set;
iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id;
i40iw_add_ref(&iwqp->ibqp);
+
+ if (cm_node->state == I40IW_CM_STATE_SYN_SENT) {
+ if (i40iw_send_syn(cm_node, 0)) {
+ i40iw_rem_ref_cm_node(cm_node);
+ goto err;
+ }
+ }
+
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_port,
+ cm_node,
+ cm_node->cm_id);
return 0;
+
+err:
+ if (cm_node) {
+ if (cm_node->ipv4)
+ i40iw_debug(cm_node->dev,
+ I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI4",
+ cm_node->rem_addr);
+ else
+ i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
+ "Api - connect() FAILED: dest addr=%pI6",
+ cm_node->rem_addr);
+ }
+ i40iw_manage_qhash(iwdev,
+ &cm_info,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+
+ if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
+ cm_info.loc_port))
+ i40iw_manage_apbvt(iwdev,
+ cm_info.loc_port,
+ I40IW_MANAGE_APBVT_DEL);
+ cm_id->rem_ref(cm_id);
+ iwdev->cm_core.stats_connect_errs++;
+ return -ENOMEM;
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 5f8ceb4a8..e9046d9f9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -1,6 +1,6 @@
/*******************************************************************************
*
-* Copyright (c) 2015 Intel Corporation. All rights reserved.
+* Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -291,8 +291,6 @@ struct i40iw_cm_listener {
u8 loc_mac[ETH_ALEN];
u32 loc_addr[4];
u16 loc_port;
- u32 map_loc_addr[4];
- u16 map_loc_port;
struct iw_cm_id *cm_id;
atomic_t ref_count;
struct i40iw_device *iwdev;
@@ -317,8 +315,6 @@ struct i40iw_kmem_info {
struct i40iw_cm_node {
u32 loc_addr[4], rem_addr[4];
u16 loc_port, rem_port;
- u32 map_loc_addr[4], map_rem_addr[4];
- u16 map_loc_port, map_rem_port;
u16 vlan_id;
enum i40iw_cm_node_state state;
u8 loc_mac[ETH_ALEN];
@@ -370,10 +366,6 @@ struct i40iw_cm_info {
u16 rem_port;
u32 loc_addr[4];
u32 rem_addr[4];
- u16 map_loc_port;
- u16 map_rem_port;
- u32 map_loc_addr[4];
- u32 map_rem_addr[4];
u16 vlan_id;
int backlog;
u16 user_pri;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index f05802bf6..2c4b4d072 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -114,16 +114,21 @@ static enum i40iw_status_code i40iw_cqp_poll_registers(
* i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
* @buf: ptr to fpm commit buffer
* @info: ptr to i40iw_hmc_obj_info struct
+ * @sd: number of SDs for HMC objects
*
* parses fpm commit info and copy base value
* of hmc objects in hmc_info
*/
static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
u64 *buf,
- struct i40iw_hmc_obj_info *info)
+ struct i40iw_hmc_obj_info *info,
+ u32 *sd)
{
u64 temp;
+ u64 size;
+ u64 base = 0;
u32 i, j;
+ u32 k = 0;
u32 low;
/* copy base values in obj_info */
@@ -131,10 +136,20 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
get_64bit_val(buf, j, &temp);
info[i].base = RS_64_1(temp, 32) * 512;
+ if (info[i].base > base) {
+ base = info[i].base;
+ k = i;
+ }
low = (u32)(temp);
if (low)
info[i].cnt = low;
}
+ size = info[k].cnt * info[k].size + info[k].base;
+ if (size & 0x1FFFFF)
+ *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
+ else
+ *sd = (u32)(size >> 21);
+
return 0;
}
@@ -2909,6 +2924,65 @@ static enum i40iw_status_code i40iw_sc_mw_alloc(
}
/**
+ * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
+ * @qp: sc qp struct
+ * @info: fast mr info
+ * @post_sq: flag for cqp db to ring
+ */
+enum i40iw_status_code i40iw_sc_mr_fast_register(
+ struct i40iw_sc_qp *qp,
+ struct i40iw_fast_reg_stag_info *info,
+ bool post_sq)
+{
+ u64 temp, header;
+ u64 *wqe;
+ u32 wqe_idx;
+
+ wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
+ 0, info->wr_id);
+ if (!wqe)
+ return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+ i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
+ __func__, info->wr_id, wqe_idx,
+ &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
+ temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
+ set_64bit_val(wqe, 0, temp);
+
+ temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
+ set_64bit_val(wqe,
+ 8,
+ LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
+ LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
+
+ set_64bit_val(wqe,
+ 16,
+ info->total_len |
+ LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
+
+ header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
+ LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
+ LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
+ LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
+ LS_64(info->page_size, I40IWQPSQ_HPAGESIZE) |
+ LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
+ LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
+ LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+ LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+ LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+ LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+ i40iw_insert_wqe_hdr(wqe, header);
+
+ i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
+ wqe, I40IW_QP_WQE_MIN_SIZE);
+
+ if (post_sq)
+ i40iw_qp_post_wr(&qp->qp_uk);
+ return 0;
+}
+
+/**
* i40iw_sc_send_lsmm - send last streaming mode message
* @qp: sc qp struct
* @lsmm_buf: buffer with lsmm message
@@ -3147,7 +3221,7 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_
i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
/* parse the fpm_commit_buf and fill hmc obj info */
- i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj);
+ i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
mem_size = sizeof(struct i40iw_hmc_sd_entry) *
(hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
@@ -3221,7 +3295,9 @@ static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev
/* parse the fpm_commit_buf and fill hmc obj info */
if (!ret_code)
- ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf, hmc_info->hmc_obj);
+ ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
+ hmc_info->hmc_obj,
+ &hmc_info->sd_table.sd_cnt);
i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
@@ -3469,6 +3545,40 @@ static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
}
/**
+ * i40iw_est_sd - returns approximate number of SDs for HMC
+ * @dev: sc device struct
+ * @hmc_info: hmc structure, size and count for HMC objects
+ */
+static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
+{
+ int i;
+ u64 size = 0;
+ u64 sd;
+
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
+ size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
+
+ if (dev->is_pf)
+ size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
+
+ if (size & 0x1FFFFF)
+ sd = (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd = size >> 21;
+
+ if (!dev->is_pf) {
+ /* 2MB alignment for VF PBLE HMC */
+ size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
+ if (size & 0x1FFFFF)
+ sd += (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd += size >> 21;
+ }
+
+ return sd;
+}
+
+/**
* i40iw_config_fpm_values - configure HMC objects
* @dev: sc device struct
* @qp_count: desired qp count
@@ -3479,7 +3589,7 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
u32 i, mem_size;
u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
u32 powerof2;
- u64 sd_needed, bytes_needed;
+ u64 sd_needed;
u32 loop_count = 0;
struct i40iw_hmc_info *hmc_info;
@@ -3497,23 +3607,15 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
return ret_code;
}
- bytes_needed = 0;
- for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) {
+ for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
- bytes_needed +=
- (hmc_info->hmc_obj[i].max_cnt) * (hmc_info->hmc_obj[i].size);
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s i[%04d] max_cnt[0x%04X] size[0x%04llx]\n",
- __func__, i, hmc_info->hmc_obj[i].max_cnt,
- hmc_info->hmc_obj[i].size);
- }
- sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up */
+ sd_needed = i40iw_est_sd(dev, hmc_info);
i40iw_debug(dev, I40IW_DEBUG_HMC,
"%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
__func__, sd_needed, hmc_info->first_sd_index);
i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s: bytes_needed=0x%llx sd count %d where max sd is %d\n",
- __func__, bytes_needed, hmc_info->sd_table.sd_cnt,
+ "%s: sd count %d where max sd is %d\n",
+ __func__, hmc_info->sd_table.sd_cnt,
hmc_fpm_misc->max_sds);
qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
@@ -3555,11 +3657,7 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
/* How much memory is needed for all the objects. */
- bytes_needed = 0;
- for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
- bytes_needed +=
- (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size);
- sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1;
+ sd_needed = i40iw_est_sd(dev, hmc_info);
if ((loop_count > 1000) ||
((!(loop_count % 10)) &&
(qpwanted > qpwantedoriginal * 2 / 3))) {
@@ -3580,15 +3678,7 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
pblewanted -= FPM_MULTIPLIER * 1000;
} while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
- bytes_needed = 0;
- for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) {
- bytes_needed += (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size);
- i40iw_debug(dev, I40IW_DEBUG_HMC,
- "%s i[%04d] cnt[0x%04x] size[0x%04llx]\n",
- __func__, i, hmc_info->hmc_obj[i].cnt,
- hmc_info->hmc_obj[i].size);
- }
- sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up not truncate. */
+ sd_needed = i40iw_est_sd(dev, hmc_info);
i40iw_debug(dev, I40IW_DEBUG_HMC,
"loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
@@ -3606,8 +3696,6 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_
return ret_code;
}
- hmc_info->sd_table.sd_cnt = (u32)sd_needed;
-
mem_size = sizeof(struct i40iw_hmc_sd_entry) *
(hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
@@ -3911,11 +3999,11 @@ enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
*/
static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
{
- u16 *mpa;
+ __be16 *mpa;
u32 opcode = 0xffffffff;
if (info->q2_data_written) {
- mpa = (u16 *)pkt;
+ mpa = (__be16 *)pkt;
opcode = ntohs(mpa[1]) & 0xf;
}
return opcode;
@@ -3977,7 +4065,7 @@ static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
if (info->q2_data_written) {
/* Use data from offending packet to fill in ddp & rdma hdrs */
pkt = i40iw_locate_mpa(pkt);
- ddp_seg_len = ntohs(*(u16 *)pkt);
+ ddp_seg_len = ntohs(*(__be16 *)pkt);
if (ddp_seg_len) {
copy_len = 2;
termhdr->hdrct = DDP_LEN_FLAG;
@@ -4188,13 +4276,13 @@ void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *
void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
{
u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
- u32 *mpa;
+ __be32 *mpa;
u8 ddp_ctl;
u8 rdma_ctl;
u16 aeq_id = 0;
struct i40iw_terminate_hdr *termhdr;
- mpa = (u32 *)i40iw_locate_mpa(pkt);
+ mpa = (__be32 *)i40iw_locate_mpa(pkt);
if (info->q2_data_written) {
/* did not validate the frame - do it now */
ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
@@ -4559,17 +4647,18 @@ static struct i40iw_pd_ops iw_pd_ops = {
};
static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
- i40iw_sc_qp_init,
- i40iw_sc_qp_create,
- i40iw_sc_qp_modify,
- i40iw_sc_qp_destroy,
- i40iw_sc_qp_flush_wqes,
- i40iw_sc_qp_upload_context,
- i40iw_sc_qp_setctx,
- i40iw_sc_send_lsmm,
- i40iw_sc_send_lsmm_nostag,
- i40iw_sc_send_rtt,
- i40iw_sc_post_wqe0,
+ .qp_init = i40iw_sc_qp_init,
+ .qp_create = i40iw_sc_qp_create,
+ .qp_modify = i40iw_sc_qp_modify,
+ .qp_destroy = i40iw_sc_qp_destroy,
+ .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
+ .qp_upload_context = i40iw_sc_qp_upload_context,
+ .qp_setctx = i40iw_sc_qp_setctx,
+ .qp_send_lsmm = i40iw_sc_send_lsmm,
+ .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
+ .qp_send_rtt = i40iw_sc_send_rtt,
+ .qp_post_wqe0 = i40iw_sc_post_wqe0,
+ .iw_mr_fast_register = i40iw_sc_mr_fast_register
};
static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
index aab88d65f..bd942da91 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_d.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_d.h
@@ -1290,7 +1290,7 @@
/* wqe size considering 32 bytes per wqe*/
#define I40IWQP_SW_MIN_WQSIZE 4 /* 128 bytes */
-#define I40IWQP_SW_MAX_WQSIZE 16384 /* 524288 bytes */
+#define I40IWQP_SW_MAX_WQSIZE 2048 /* 2048 bytes */
#define I40IWQP_OP_RDMA_WRITE 0
#define I40IWQP_OP_RDMA_READ 1
@@ -1512,6 +1512,8 @@ enum i40iw_alignment {
I40IW_SD_BUF_ALIGNMENT = 0x100
};
+#define I40IW_WQE_SIZE_64 64
+
#define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
index 9fd302425..3ee0cad96 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
@@ -106,7 +106,9 @@ u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
set_bit(2, iwdev->allocated_pds);
spin_lock_init(&iwdev->resource_lock);
- mrdrvbits = 24 - get_count_order(iwdev->max_mr);
+ spin_lock_init(&iwdev->qptable_lock);
+ /* stag index mask has a minimum of 14 bits */
+ mrdrvbits = 24 - max(get_count_order(iwdev->max_mr), 14);
iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
return 0;
}
@@ -301,11 +303,15 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
"%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
__func__, info->ae_id, info->qp, info->qp_cq_id);
if (info->qp) {
+ spin_lock_irqsave(&iwdev->qptable_lock, flags);
iwqp = iwdev->qp_table[info->qp_cq_id];
if (!iwqp) {
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
continue;
}
+ i40iw_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
qp = &iwqp->sc_qp;
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = info->tcp_state;
@@ -411,6 +417,8 @@ void i40iw_process_aeq(struct i40iw_device *iwdev)
i40iw_terminate_connection(qp, info);
break;
}
+ if (info->qp)
+ i40iw_rem_ref(&iwqp->ibqp);
} while (1);
if (aeqcnt)
@@ -460,7 +468,7 @@ int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool ad
*/
void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
unsigned char *mac_addr,
- __be32 *ip_addr,
+ u32 *ip_addr,
bool ipv4,
u32 action)
{
@@ -481,7 +489,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
info = &cqp_info->in.u.add_arp_cache_entry.info;
memset(info, 0, sizeof(*info));
- info->arp_index = cpu_to_le32(arp_index);
+ info->arp_index = cpu_to_le16((u16)arp_index);
info->permanent = true;
ether_addr_copy(info->mac_addr, mac_addr);
cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index 90e5af217..6e9081380 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -270,7 +270,6 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
else
i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
- synchronize_irq(msix_vec->irq);
free_irq(msix_vec->irq, dev_id);
}
@@ -601,8 +600,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
cqp_init_info.scratch_array = cqp->scratch_array;
status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
if (status) {
- i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
- status, maj_err, min_err);
+ i40iw_pr_err("cqp init status %d\n", status);
goto exit;
}
status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
@@ -1147,10 +1145,7 @@ static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iw
if (!status) {
status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
(u8)iwdev->mac_ip_table_idx);
- if (!status)
- status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
- (u8)iwdev->mac_ip_table_idx);
- else
+ if (status)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
}
return status;
@@ -1165,7 +1160,7 @@ static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
struct net_device *ip_dev;
struct inet6_dev *idev;
struct inet6_ifaddr *ifp;
- __be32 local_ipaddr6[4];
+ u32 local_ipaddr6[4];
rcu_read_lock();
for_each_netdev_rcu(&init_net, ip_dev) {
@@ -1512,6 +1507,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
I40IW_HMC_PROFILE_DEFAULT;
iwdev->max_rdma_vfs =
(iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
+ iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
iwdev->netdev = ldev->netdev;
hdl->client = client;
iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
@@ -1531,7 +1527,10 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
goto exit;
iwdev->obj_next = iwdev->obj_mem;
iwdev->push_mode = push_mode;
+
init_waitqueue_head(&iwdev->vchnl_waitq);
+ init_waitqueue_head(&dev->vf_reqs);
+
status = i40iw_initialize_dev(iwdev, ldev);
exit:
if (status) {
@@ -1710,7 +1709,6 @@ static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u
for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
continue;
-
/* free all resources allocated on behalf of vf */
tmp_vfdev = dev->vf_dev[i];
spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
@@ -1819,8 +1817,6 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev,
dev = &hdl->device.sc_dev;
iwdev = dev->back_dev;
- i40iw_debug(dev, I40IW_DEBUG_VIRT, "msg %p, message length %u\n", msg, len);
-
if (dev->vchnl_if.vchnl_recv) {
ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
if (!dev->is_pf) {
@@ -1832,6 +1828,39 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev,
}
/**
+ * i40iw_vf_clear_to_send - wait to send virtual channel message
+ * @dev: iwarp device *
+ * Wait for until virtual channel is clear
+ * before sending the next message
+ *
+ * Returns false if error
+ * Returns true if clear to send
+ */
+bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
+{
+ struct i40iw_device *iwdev;
+ wait_queue_t wait;
+
+ iwdev = dev->back_dev;
+
+ if (!wq_has_sleeper(&dev->vf_reqs) &&
+ (atomic_read(&iwdev->vchnl_msgs) == 0))
+ return true; /* virtual channel is clear */
+
+ init_wait(&wait);
+ add_wait_queue_exclusive(&dev->vf_reqs, &wait);
+
+ if (!wait_event_timeout(dev->vf_reqs,
+ (atomic_read(&iwdev->vchnl_msgs) == 0),
+ I40IW_VCHNL_EVENT_TIMEOUT))
+ dev->vchnl_up = false;
+
+ remove_wait_queue(&dev->vf_reqs, &wait);
+
+ return dev->vchnl_up;
+}
+
+/**
* i40iw_virtchnl_send - send a message through the virtual channel
* @dev: iwarp device
* @vf_id: virtual function id associated with the message
@@ -1848,22 +1877,20 @@ static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
{
struct i40iw_device *iwdev;
struct i40e_info *ldev;
- enum i40iw_status_code ret_code = I40IW_ERR_BAD_PTR;
if (!dev || !dev->back_dev)
- return ret_code;
+ return I40IW_ERR_BAD_PTR;
iwdev = dev->back_dev;
ldev = iwdev->ldev;
if (ldev && ldev->ops && ldev->ops->virtchnl_send)
- ret_code = ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
-
- return ret_code;
+ return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
+ return I40IW_ERR_BAD_PTR;
}
/* client interface functions */
-static struct i40e_client_ops i40e_ops = {
+static const struct i40e_client_ops i40e_ops = {
.open = i40iw_open,
.close = i40iw_close,
.l2_param_change = i40iw_l2param_change,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
index 7e2049351..80f422bf3 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
@@ -172,6 +172,7 @@ struct i40iw_hw;
u8 __iomem *i40iw_get_hw_addr(void *dev);
void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev);
+bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev);
enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr,
u32 length, u32 value);
struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
index ded853d2f..85993dc44 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
@@ -404,13 +404,14 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
if (sd_entry->valid)
return 0;
- if (dev->is_pf)
+ if (dev->is_pf) {
ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
sd_reg_val, idx->sd_idx,
sd_entry->entry_type, true);
- if (ret_code) {
- i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
- goto error;
+ if (ret_code) {
+ i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
+ goto error;
+ }
}
sd_entry->valid = true;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index 8eb400d8a..e9c6e82af 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -1194,7 +1194,7 @@ static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
while (datalen) {
- fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(u16 *)datap));
+ fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap));
if (fpdu_len > pfpdu->max_fpdu_data) {
i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
"%s: error bad fpdu_len\n", __func__);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
index b0110c15e..91c421762 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_status.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_status.h
@@ -95,6 +95,7 @@ enum i40iw_status_code {
I40IW_ERR_INVALID_MAC_ADDR = -65,
I40IW_ERR_BAD_STAG = -66,
I40IW_ERR_CQ_COMPL_ERROR = -67,
+ I40IW_ERR_QUEUE_DESTROYED = -68
};
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
index edb3a8c82..16cc61720 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_type.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_type.h
@@ -479,16 +479,17 @@ struct i40iw_sc_dev {
struct i40iw_virt_mem ieq_mem;
struct i40iw_puda_rsrc *ieq;
- struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
+ const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
struct i40iw_hmc_fpm_misc hmc_fpm_misc;
u16 qs_handle;
- u32 debug_mask;
+ u32 debug_mask;
u16 exception_lan_queue;
u8 hmc_fn_id;
bool is_pf;
bool vchnl_up;
u8 vf_id;
+ wait_queue_head_t vf_reqs;
u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];
struct i40iw_vchnl_vf_msg_buffer vchnl_vf_msg_buf;
u8 hw_rev;
@@ -889,8 +890,8 @@ struct i40iw_qhash_table_info {
u32 qp_num;
u32 dest_ip[4];
u32 src_ip[4];
- u32 dest_port;
- u32 src_port;
+ u16 dest_port;
+ u16 src_port;
};
struct i40iw_local_mac_ipaddr_entry_info {
@@ -1040,6 +1041,9 @@ struct i40iw_priv_qp_ops {
void (*qp_send_lsmm_nostag)(struct i40iw_sc_qp *, void *, u32);
void (*qp_send_rtt)(struct i40iw_sc_qp *, bool);
enum i40iw_status_code (*qp_post_wqe0)(struct i40iw_sc_qp *, u8);
+ enum i40iw_status_code (*iw_mr_fast_register)(struct i40iw_sc_qp *,
+ struct i40iw_fast_reg_stag_info *,
+ bool);
};
struct i40iw_priv_cq_ops {
@@ -1108,7 +1112,7 @@ struct i40iw_hmc_ops {
enum i40iw_status_code (*parse_fpm_query_buf)(u64 *, struct i40iw_hmc_info *,
struct i40iw_hmc_fpm_misc *);
enum i40iw_status_code (*configure_iw_fpm)(struct i40iw_sc_dev *, u8);
- enum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *);
+ enum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *, u32 *sd);
enum i40iw_status_code (*create_hmc_object)(struct i40iw_sc_dev *dev,
struct i40iw_hmc_create_obj_info *);
enum i40iw_status_code (*del_hmc_object)(struct i40iw_sc_dev *dev,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
index f78c3dc8b..e35faea88 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_uk.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c
@@ -56,6 +56,9 @@ static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
wqe = qp->sq_base[wqe_idx].elem;
+
+ qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE;
+
peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
wqe_0 = qp->sq_base[peek_head].elem;
if (peek_head)
@@ -130,7 +133,10 @@ static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)
*/
u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
u32 *wqe_idx,
- u8 wqe_size)
+ u8 wqe_size,
+ u32 total_size,
+ u64 wr_id
+ )
{
u64 *wqe = NULL;
u64 wqe_ptr;
@@ -159,6 +165,17 @@ u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
if (!*wqe_idx)
qp->swqe_polarity = !qp->swqe_polarity;
}
+
+ if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) {
+ i40iw_nop_1(qp);
+ I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return NULL;
+ *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ }
+
for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
if (ret_code)
@@ -169,8 +186,15 @@ u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
wqe_0 = qp->sq_base[peek_head].elem;
- if (peek_head & 0x3)
- wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
+
+ if (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) {
+ if (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity)
+ wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
+ }
+
+ qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id;
+ qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
+ qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size;
return wqe;
}
@@ -249,12 +273,9 @@ static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
if (ret_code)
return ret_code;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
if (!wqe)
return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
- qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
set_64bit_val(wqe, 16,
LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
if (!op_info->rem_addr.stag)
@@ -309,12 +330,9 @@ static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
if (ret_code)
return ret_code;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id);
if (!wqe)
return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
- qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->lo_addr.len;
local_fence |= info->local_fence;
set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
@@ -366,13 +384,11 @@ static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
if (ret_code)
return ret_code;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id);
if (!wqe)
return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
read_fence |= info->read_fence;
- qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
- qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
set_64bit_val(wqe, 16, 0);
header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
LS_64(info->op_type, I40IWQPSQ_OPCODE) |
@@ -427,13 +443,11 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
if (ret_code)
return ret_code;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
if (!wqe)
return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
read_fence |= info->read_fence;
- qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
- qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
set_64bit_val(wqe, 16,
LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
@@ -507,14 +521,11 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
if (ret_code)
return ret_code;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id);
if (!wqe)
return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
read_fence |= info->read_fence;
-
- qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
- qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
LS_64(info->op_type, I40IWQPSQ_OPCODE) |
LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
@@ -574,12 +585,9 @@ static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp
op_info = &info->op.inv_local_stag;
local_fence = info->local_fence;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
if (!wqe)
return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
- qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
set_64bit_val(wqe, 0, 0);
set_64bit_val(wqe, 8,
LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
@@ -619,12 +627,9 @@ static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
op_info = &info->op.bind_window;
local_fence |= info->local_fence;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id);
if (!wqe)
return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
- qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
set_64bit_val(wqe, 8,
LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
@@ -760,7 +765,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
enum i40iw_status_code ret_code2 = 0;
bool move_cq_head = true;
u8 polarity;
- u8 addl_frag_cnt, addl_wqes = 0;
+ u8 addl_wqes = 0;
if (cq->avoid_mem_cflct)
cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
@@ -797,6 +802,10 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
+ if (!qp) {
+ ret_code = I40IW_ERR_QUEUE_DESTROYED;
+ goto exit;
+ }
wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
@@ -827,11 +836,8 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
sw_wqe = qp->sq_base[wqe_idx].elem;
get_64bit_val(sw_wqe, 24, &wqe_qword);
- addl_frag_cnt =
- (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT);
- i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes);
- addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE);
+ addl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE;
I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
} else {
do {
@@ -843,9 +849,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
get_64bit_val(sw_wqe, 24, &wqe_qword);
op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
info->op_type = op_type;
- addl_frag_cnt = (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT);
- i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes);
- addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE);
+ addl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE;
I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
if (op_type != I40IWQP_OP_NOP) {
info->wr_id = qp->sq_wrtrk_array[tail].wrid;
@@ -859,6 +863,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
ret_code = 0;
+exit:
if (!ret_code &&
(info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
if (pring && (I40IW_RING_MORE_WORK(*pring)))
@@ -893,19 +898,21 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
* i40iw_get_wqe_shift - get shift count for maximum wqe size
* @wqdepth: depth of wq required.
* @sge: Maximum Scatter Gather Elements wqe
+ * @inline_data: Maximum inline data size
* @shift: Returns the shift needed based on sge
*
- * Shift can be used to left shift the wqe size based on sge.
- * If sge, == 1, shift =0 (wqe_size of 32 bytes), for sge=2 and 3, shift =1
- * (64 bytes wqes) and 2 otherwise (128 bytes wqe).
+ * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
+ * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes).
+ * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes).
+ * Shift of 2 otherwise (wqe size of 128 bytes).
*/
-enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift)
+enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift)
{
u32 size;
*shift = 0;
- if (sge > 1)
- *shift = (sge < 4) ? 1 : 2;
+ if (sge > 1 || inline_data > 16)
+ *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
/* check if wqdepth is multiple of 2 or not */
@@ -968,11 +975,11 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
return I40IW_ERR_INVALID_FRAG_COUNT;
- ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, &sqshift);
+ ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, info->max_inline_data, &sqshift);
if (ret_code)
return ret_code;
- ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, &rqshift);
+ ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, 0, &rqshift);
if (ret_code)
return ret_code;
@@ -1097,12 +1104,9 @@ enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
u64 header, *wqe;
u32 wqe_idx;
- wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+ wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id);
if (!wqe)
return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
-
- qp->sq_wrtrk_array[wqe_idx].wrid = wr_id;
- qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
set_64bit_val(wqe, 0, 0);
set_64bit_val(wqe, 8, 0);
set_64bit_val(wqe, 16, 0);
@@ -1125,7 +1129,7 @@ enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
* @frag_cnt: number of fragments
* @wqe_size: size of sq wqe returned
*/
-enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size)
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size)
{
switch (frag_cnt) {
case 0:
@@ -1156,7 +1160,7 @@ enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size)
* @frag_cnt: number of fragments
* @wqe_size: size of rq wqe returned
*/
-enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size)
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size)
{
switch (frag_cnt) {
case 0:
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
index 5cd971bb8..4627646fe 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_user.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_user.h
@@ -61,7 +61,7 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_MAX_AEQ_ALLOCATE_COUNT = 255,
I40IW_DB_ID_ZERO = 0,
- I40IW_MAX_WQ_FRAGMENT_COUNT = 6,
+ I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
I40IW_MAX_SGE_RD = 1,
I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647,
I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647,
@@ -70,8 +70,8 @@ enum i40iw_device_capabilities_const {
I40IW_MAX_VF_FPM_ID = 47,
I40IW_MAX_VF_PER_PF = 127,
I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496,
- I40IW_MAX_INLINE_DATA_SIZE = 112,
- I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 112,
+ I40IW_MAX_INLINE_DATA_SIZE = 48,
+ I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48,
I40IW_MAX_IRD_SIZE = 32,
I40IW_QPCTX_ENCD_MAXIRD = 3,
I40IW_MAX_WQ_ENTRIES = 2048,
@@ -102,6 +102,8 @@ enum i40iw_device_capabilities_const {
#define I40IW_STAG_INDEX_FROM_STAG(stag) (((stag) && 0xFFFFFF00) >> 8)
+#define I40IW_MAX_MR_SIZE 0x10000000000L
+
struct i40iw_qp_uk;
struct i40iw_cq_uk;
struct i40iw_srq_uk;
@@ -198,7 +200,7 @@ enum i40iw_completion_notify {
struct i40iw_post_send {
i40iw_sgl sg_list;
- u8 num_sges;
+ u32 num_sges;
};
struct i40iw_post_inline_send {
@@ -220,7 +222,7 @@ struct i40iw_post_inline_send_w_inv {
struct i40iw_rdma_write {
i40iw_sgl lo_sg_list;
- u8 num_lo_sges;
+ u32 num_lo_sges;
struct i40iw_sge rem_addr;
};
@@ -345,7 +347,9 @@ struct i40iw_dev_uk {
struct i40iw_sq_uk_wr_trk_info {
u64 wrid;
- u64 wr_len;
+ u32 wr_len;
+ u8 wqe_size;
+ u8 reserved[3];
};
struct i40iw_qp_quanta {
@@ -367,6 +371,8 @@ struct i40iw_qp_uk {
u32 qp_id;
u32 sq_size;
u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
struct i40iw_qp_uk_ops ops;
bool use_srq;
u8 swqe_polarity;
@@ -374,8 +380,6 @@ struct i40iw_qp_uk {
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
- u8 max_sq_frag_cnt;
- u8 max_rq_frag_cnt;
bool deferred_flag;
};
@@ -404,8 +408,9 @@ struct i40iw_qp_uk_init_info {
u32 qp_id;
u32 sq_size;
u32 rq_size;
- u8 max_sq_frag_cnt;
- u8 max_rq_frag_cnt;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
};
@@ -422,7 +427,10 @@ void i40iw_device_init_uk(struct i40iw_dev_uk *dev);
void i40iw_qp_post_wr(struct i40iw_qp_uk *qp);
u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx,
- u8 wqe_size);
+ u8 wqe_size,
+ u32 total_size,
+ u64 wr_id
+ );
u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx);
u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx);
@@ -434,9 +442,9 @@ enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);
enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
bool signaled, bool post_sq);
-enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size);
-enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size);
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size);
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size);
enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size);
-enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift);
+enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u32 sge, u32 inline_data, u8 *shift);
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 1ceec81bd..0e8db0a35 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -59,7 +59,7 @@
* @action: modify, delete or add
*/
int i40iw_arp_table(struct i40iw_device *iwdev,
- __be32 *ip_addr,
+ u32 *ip_addr,
bool ipv4,
u8 *mac_addr,
u32 action)
@@ -152,7 +152,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
struct net_device *upper_dev;
struct i40iw_device *iwdev;
struct i40iw_handler *hdl;
- __be32 local_ipaddr;
+ u32 local_ipaddr;
hdl = i40iw_find_netdev(event_netdev);
if (!hdl)
@@ -167,11 +167,10 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
switch (event) {
case NETDEV_DOWN:
if (upper_dev)
- local_ipaddr =
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ local_ipaddr = ntohl(
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
else
- local_ipaddr = ifa->ifa_address;
- local_ipaddr = ntohl(local_ipaddr);
+ local_ipaddr = ntohl(ifa->ifa_address);
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
@@ -180,11 +179,10 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
return NOTIFY_OK;
case NETDEV_UP:
if (upper_dev)
- local_ipaddr =
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ local_ipaddr = ntohl(
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
else
- local_ipaddr = ifa->ifa_address;
- local_ipaddr = ntohl(local_ipaddr);
+ local_ipaddr = ntohl(ifa->ifa_address);
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
@@ -194,12 +192,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
case NETDEV_CHANGEADDR:
/* Add the address to the IP table */
if (upper_dev)
- local_ipaddr =
- ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+ local_ipaddr = ntohl(
+ ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address);
else
- local_ipaddr = ifa->ifa_address;
+ local_ipaddr = ntohl(ifa->ifa_address);
- local_ipaddr = ntohl(local_ipaddr);
i40iw_manage_arp_cache(iwdev,
netdev->dev_addr,
&local_ipaddr,
@@ -227,7 +224,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
struct net_device *netdev;
struct i40iw_device *iwdev;
struct i40iw_handler *hdl;
- __be32 local_ipaddr6[4];
+ u32 local_ipaddr6[4];
hdl = i40iw_find_netdev(event_netdev);
if (!hdl)
@@ -506,14 +503,19 @@ void i40iw_rem_ref(struct ib_qp *ibqp)
struct cqp_commands_info *cqp_info;
struct i40iw_device *iwdev;
u32 qp_num;
+ unsigned long flags;
iwqp = to_iwqp(ibqp);
- if (!atomic_dec_and_test(&iwqp->refcount))
+ iwdev = iwqp->iwdev;
+ spin_lock_irqsave(&iwdev->qptable_lock, flags);
+ if (!atomic_dec_and_test(&iwqp->refcount)) {
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
return;
+ }
- iwdev = iwqp->iwdev;
qp_num = iwqp->ibqp.qp_num;
iwdev->qp_table[qp_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
if (!cqp_request)
return;
@@ -985,21 +987,24 @@ enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
{
struct i40iw_device *iwdev = dev->back_dev;
- enum i40iw_status_code err_code = 0;
int timeout_ret;
i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
__func__, __LINE__, dev, iwdev);
- atomic_add(2, &iwdev->vchnl_msgs);
+
+ atomic_set(&iwdev->vchnl_msgs, 2);
timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
(atomic_read(&iwdev->vchnl_msgs) == 1),
I40IW_VCHNL_EVENT_TIMEOUT);
atomic_dec(&iwdev->vchnl_msgs);
if (!timeout_ret) {
i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
- err_code = I40IW_ERR_TIMEOUT;
+ atomic_set(&iwdev->vchnl_msgs, 0);
+ dev->vchnl_up = false;
+ return I40IW_ERR_TIMEOUT;
}
- return err_code;
+ wake_up(&dev->vf_reqs);
+ return 0;
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 1fe3b84a0..283b64c94 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -63,8 +63,8 @@ static int i40iw_query_device(struct ib_device *ibdev,
ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
props->fw_ver = I40IW_FW_VERSION;
props->device_cap_flags = iwdev->device_cap_flags;
- props->vendor_id = iwdev->vendor_id;
- props->vendor_part_id = iwdev->vendor_part_id;
+ props->vendor_id = iwdev->ldev->pcidev->vendor;
+ props->vendor_part_id = iwdev->ldev->pcidev->device;
props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
props->max_qp = iwdev->max_qp;
@@ -74,11 +74,12 @@ static int i40iw_query_device(struct ib_device *ibdev,
props->max_cqe = iwdev->max_cqe;
props->max_mr = iwdev->max_mr;
props->max_pd = iwdev->max_pd;
- props->max_sge_rd = 1;
+ props->max_sge_rd = I40IW_MAX_SGE_RD;
props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
props->max_qp_init_rd_atom = props->max_qp_rd_atom;
props->atomic_cap = IB_ATOMIC_NONE;
props->max_map_per_fmr = 1;
+ props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
return 0;
}
@@ -120,7 +121,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
props->pkey_tbl_len = 1;
props->active_width = IB_WIDTH_4X;
props->active_speed = 1;
- props->max_msg_sz = 0x80000000;
+ props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
return 0;
}
@@ -437,7 +438,6 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
kfree(iwqp->kqp.wrid_mem);
iwqp->kqp.wrid_mem = NULL;
kfree(iwqp->allocated_buffer);
- iwqp->allocated_buffer = NULL;
}
/**
@@ -521,14 +521,12 @@ static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
enum i40iw_status_code status;
struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
- ukinfo->max_sq_frag_cnt = I40IW_MAX_WQ_FRAGMENT_COUNT;
-
sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
- status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, &sqshift);
+ status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift);
if (!status)
- status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, &rqshift);
+ status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, 0, &rqshift);
if (status)
return -ENOSYS;
@@ -609,6 +607,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+ if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT)
+ init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+
memset(&init_info, 0, sizeof(init_info));
sq_size = init_attr->cap.max_send_wr;
@@ -618,6 +619,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
init_info.qp_uk_init_info.rq_size = rq_size;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
+ init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
if (!mem)
@@ -722,8 +724,10 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
iwarp_info = &iwqp->iwarp_info;
iwarp_info->rd_enable = true;
iwarp_info->wr_rdresp_en = true;
- if (!iwqp->user_mode)
+ if (!iwqp->user_mode) {
+ iwarp_info->fast_reg_en = true;
iwarp_info->priv_mode_en = true;
+ }
iwarp_info->ddp_ver = 1;
iwarp_info->rdmap_ver = 1;
@@ -784,6 +788,8 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
return ERR_PTR(err_code);
}
}
+ init_completion(&iwqp->sq_drained);
+ init_completion(&iwqp->rq_drained);
return &iwqp->ibqp;
error:
@@ -1444,6 +1450,167 @@ static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
}
/**
+ * i40iw_hw_alloc_stag - cqp command to allocate stag
+ * @iwdev: iwarp device
+ * @iwmr: iwarp mr pointer
+ */
+static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr)
+{
+ struct i40iw_allocate_stag_info *info;
+ struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ enum i40iw_status_code status;
+ int err = 0;
+ struct i40iw_cqp_request *cqp_request;
+ struct cqp_commands_info *cqp_info;
+
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.alloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->page_size = PAGE_SIZE;
+ info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->total_len = iwmr->length;
+ info->remote_access = true;
+ cqp_info->cqp_cmd = OP_ALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
+ cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
+
+ status = i40iw_handle_cqp_op(iwdev, cqp_request);
+ if (status) {
+ err = -ENOMEM;
+ i40iw_pr_err("CQP-OP MR Reg fail");
+ }
+ return err;
+}
+
+/**
+ * i40iw_alloc_mr - register stag for fast memory registration
+ * @pd: ibpd pointer
+ * @mr_type: memory for stag registrion
+ * @max_num_sg: man number of pages
+ */
+static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct i40iw_pd *iwpd = to_iwpd(pd);
+ struct i40iw_device *iwdev = to_iwdev(pd->device);
+ struct i40iw_pble_alloc *palloc;
+ struct i40iw_pbl *iwpbl;
+ struct i40iw_mr *iwmr;
+ enum i40iw_status_code status;
+ u32 stag;
+ int err_code = -ENOMEM;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ stag = i40iw_create_stag(iwdev);
+ if (!stag) {
+ err_code = -EOVERFLOW;
+ goto err;
+ }
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IW_MEMREG_TYPE_MEM;
+ palloc = &iwpbl->pble_alloc;
+ iwmr->page_cnt = max_num_sg;
+ mutex_lock(&iwdev->pbl_mutex);
+ status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
+ mutex_unlock(&iwdev->pbl_mutex);
+ if (status)
+ goto err1;
+
+ if (palloc->level != I40IW_LEVEL_1)
+ goto err2;
+ err_code = i40iw_hw_alloc_stag(iwdev, iwmr);
+ if (err_code)
+ goto err2;
+ iwpbl->pbl_allocated = true;
+ i40iw_add_pdusecount(iwpd);
+ return &iwmr->ibmr;
+err2:
+ i40iw_free_pble(iwdev->pble_rsrc, palloc);
+err1:
+ i40iw_free_stag(iwdev, stag);
+err:
+ kfree(iwmr);
+ return ERR_PTR(err_code);
+}
+
+/**
+ * i40iw_set_page - populate pbl list for fmr
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @addr: page dma address fro pbl list
+ */
+static int i40iw_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct i40iw_mr *iwmr = to_iwmr(ibmr);
+ struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+ struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+ u64 *pbl;
+
+ if (unlikely(iwmr->npages == iwmr->page_cnt))
+ return -ENOMEM;
+
+ pbl = (u64 *)palloc->level1.addr;
+ pbl[iwmr->npages++] = cpu_to_le64(addr);
+ return 0;
+}
+
+/**
+ * i40iw_map_mr_sg - map of sg list for fmr
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @sg: scatter gather list for fmr
+ * @sg_nents: number of sg pages
+ */
+static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct i40iw_mr *iwmr = to_iwmr(ibmr);
+
+ iwmr->npages = 0;
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page);
+}
+
+/**
+ * i40iw_drain_sq - drain the send queue
+ * @ibqp: ib qp pointer
+ */
+static void i40iw_drain_sq(struct ib_qp *ibqp)
+{
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
+ if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
+ wait_for_completion(&iwqp->sq_drained);
+}
+
+/**
+ * i40iw_drain_rq - drain the receive queue
+ * @ibqp: ib qp pointer
+ */
+static void i40iw_drain_rq(struct ib_qp *ibqp)
+{
+ struct i40iw_qp *iwqp = to_iwqp(ibqp);
+ struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
+ if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
+ wait_for_completion(&iwqp->rq_drained);
+}
+
+/**
* i40iw_hwreg_mr - send cqp command for memory registration
* @iwdev: iwarp device
* @iwmr: iwarp mr pointer
@@ -1526,14 +1693,16 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
struct i40iw_mr *iwmr;
struct ib_umem *region;
struct i40iw_mem_reg_req req;
- u32 pbl_depth = 0;
+ u64 pbl_depth = 0;
u32 stag = 0;
u16 access;
- u32 region_length;
+ u64 region_length;
bool use_pbles = false;
unsigned long flags;
int err = -ENOSYS;
+ if (length > I40IW_MAX_MR_SIZE)
+ return ERR_PTR(-EINVAL);
region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
if (IS_ERR(region))
return (struct ib_mr *)region;
@@ -1564,7 +1733,7 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
palloc = &iwpbl->pble_alloc;
iwmr->type = req.reg_type;
- iwmr->page_cnt = pbl_depth;
+ iwmr->page_cnt = (u32)pbl_depth;
switch (req.reg_type) {
case IW_MEMREG_TYPE_QP:
@@ -1881,12 +2050,14 @@ static int i40iw_post_send(struct ib_qp *ibqp,
enum i40iw_status_code ret;
int err = 0;
unsigned long flags;
+ bool inv_stag;
iwqp = (struct i40iw_qp *)ibqp;
ukqp = &iwqp->sc_qp.qp_uk;
spin_lock_irqsave(&iwqp->lock, flags);
while (ib_wr) {
+ inv_stag = false;
memset(&info, 0, sizeof(info));
info.wr_id = (u64)(ib_wr->wr_id);
if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
@@ -1896,19 +2067,28 @@ static int i40iw_post_send(struct ib_qp *ibqp,
switch (ib_wr->opcode) {
case IB_WR_SEND:
- if (ib_wr->send_flags & IB_SEND_SOLICITED)
- info.op_type = I40IW_OP_TYPE_SEND_SOL;
- else
- info.op_type = I40IW_OP_TYPE_SEND;
+ /* fall-through */
+ case IB_WR_SEND_WITH_INV:
+ if (ib_wr->opcode == IB_WR_SEND) {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = I40IW_OP_TYPE_SEND_SOL;
+ else
+ info.op_type = I40IW_OP_TYPE_SEND;
+ } else {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = I40IW_OP_TYPE_SEND_SOL_INV;
+ else
+ info.op_type = I40IW_OP_TYPE_SEND_INV;
+ }
if (ib_wr->send_flags & IB_SEND_INLINE) {
info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
info.op.inline_send.len = ib_wr->sg_list[0].length;
- ret = ukqp->ops.iw_inline_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false);
+ ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
} else {
info.op.send.num_sges = ib_wr->num_sge;
info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
- ret = ukqp->ops.iw_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false);
+ ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false);
}
if (ret)
@@ -1936,7 +2116,14 @@ static int i40iw_post_send(struct ib_qp *ibqp,
if (ret)
err = -EIO;
break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ inv_stag = true;
+ /* fall-through*/
case IB_WR_RDMA_READ:
+ if (ib_wr->num_sge > I40IW_MAX_SGE_RD) {
+ err = -EINVAL;
+ break;
+ }
info.op_type = I40IW_OP_TYPE_RDMA_READ;
info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
@@ -1944,10 +2131,52 @@ static int i40iw_post_send(struct ib_qp *ibqp,
info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
- ret = ukqp->ops.iw_rdma_read(ukqp, &info, false, false);
+ ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false);
if (ret)
err = -EIO;
break;
+ case IB_WR_LOCAL_INV:
+ info.op_type = I40IW_OP_TYPE_INV_STAG;
+ info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
+ ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true);
+ if (ret)
+ err = -EIO;
+ break;
+ case IB_WR_REG_MR:
+ {
+ struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
+ int page_shift = ilog2(reg_wr(ib_wr)->mr->page_size);
+ int flags = reg_wr(ib_wr)->access;
+ struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
+ struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
+ struct i40iw_fast_reg_stag_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
+ info.access_rights |= i40iw_get_user_access(flags);
+ info.stag_key = reg_wr(ib_wr)->key & 0xff;
+ info.stag_idx = reg_wr(ib_wr)->key >> 8;
+ info.wr_id = ib_wr->wr_id;
+
+ info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
+ info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
+ info.total_len = iwmr->ibmr.length;
+ info.reg_addr_pa = *(u64 *)palloc->level1.addr;
+ info.first_pm_pbl_index = palloc->level1.idx;
+ info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
+ info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
+
+ if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
+ info.chunk_size = 1;
+
+ if (page_shift == 21)
+ info.page_size = 1; /* 2M page */
+
+ ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true);
+ if (ret)
+ err = -EIO;
+ break;
+ }
default:
err = -EINVAL;
i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
@@ -2027,6 +2256,7 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
enum i40iw_status_code ret;
struct i40iw_cq_uk *ukcq;
struct i40iw_sc_qp *qp;
+ struct i40iw_qp *iwqp;
unsigned long flags;
iwcq = (struct i40iw_cq *)ibcq;
@@ -2037,6 +2267,8 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
if (ret == I40IW_ERR_QUEUE_EMPTY) {
break;
+ } else if (ret == I40IW_ERR_QUEUE_DESTROYED) {
+ continue;
} else if (ret) {
if (!cqe_count)
cqe_count = -1;
@@ -2044,10 +2276,12 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
}
entry->wc_flags = 0;
entry->wr_id = cq_poll_info.wr_id;
- if (!cq_poll_info.error)
- entry->status = IB_WC_SUCCESS;
- else
+ if (cq_poll_info.error) {
entry->status = IB_WC_WR_FLUSH_ERR;
+ entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
+ } else {
+ entry->status = IB_WC_SUCCESS;
+ }
switch (cq_poll_info.op_type) {
case I40IW_OP_TYPE_RDMA_WRITE:
@@ -2071,12 +2305,17 @@ static int i40iw_poll_cq(struct ib_cq *ibcq,
break;
}
- entry->vendor_err =
- cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
entry->ex.imm_data = 0;
qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
entry->qp = (struct ib_qp *)qp->back_qp;
entry->src_qp = cq_poll_info.qp_id;
+ iwqp = (struct i40iw_qp *)qp->back_qp;
+ if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) {
+ if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring))
+ complete(&iwqp->sq_drained);
+ if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring))
+ complete(&iwqp->rq_drained);
+ }
entry->byte_len = cq_poll_info.bytes_xfered;
entry++;
cqe_count++;
@@ -2095,13 +2334,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
{
struct i40iw_cq *iwcq;
struct i40iw_cq_uk *ukcq;
- enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+ unsigned long flags;
+ enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
iwcq = (struct i40iw_cq *)ibcq;
ukcq = &iwcq->sc_cq.cq_uk;
- if (notify_flags == IB_CQ_NEXT_COMP)
- cq_notify = IW_CQ_COMPL_EVENT;
+ if (notify_flags == IB_CQ_SOLICITED)
+ cq_notify = IW_CQ_COMPL_SOLICITED;
+ spin_lock_irqsave(&iwcq->lock, flags);
ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
return 0;
}
@@ -2129,62 +2371,130 @@ static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static const char * const i40iw_hw_stat_names[] = {
+ // 32bit names
+ [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
+ [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
+ [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
+ [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
+ [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
+ [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
+ [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
+ [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
+ [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
+ // 64bit names
+ [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4InOctets",
+ [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4InPkts",
+ [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4InReasmRqd",
+ [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4InMcastPkts",
+ [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4OutOctets",
+ [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4OutPkts",
+ [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4OutSegRqd",
+ [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4OutMcastPkts",
+ [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6InOctets",
+ [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6InPkts",
+ [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6InReasmRqd",
+ [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6InMcastPkts",
+ [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6OutOctets",
+ [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6OutPkts",
+ [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6OutSegRqd",
+ [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6OutMcastPkts",
+ [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "tcpInSegs",
+ [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
+ "tcpOutSegs",
+ [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaReads",
+ [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaSends",
+ [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaWrites",
+ [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaReads",
+ [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaSends",
+ [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaWrites",
+ [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwRdmaBnd",
+ [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwRdmaInv"
+};
+
+/**
+ * i40iw_alloc_hw_stats - Allocate a hw stats structure
+ * @ibdev: device pointer from stack
+ * @port_num: port number
+ */
+static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
+ I40IW_HW_STAT_INDEX_MAX_64;
+ unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
+
+ BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
+ (I40IW_HW_STAT_INDEX_MAX_32 +
+ I40IW_HW_STAT_INDEX_MAX_64));
+
+ /*
+ * PFs get the default update lifespan, but VFs only update once
+ * per second
+ */
+ if (!dev->is_pf)
+ lifespan = 1000;
+ return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
+ lifespan);
+}
+
/**
- * i40iw_get_protocol_stats - Populates the rdma_stats structure
- * @ibdev: ib dev struct
- * @stats: iw protocol stats struct
+ * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
+ * @ibdev: device pointer from stack
+ * @stats: stats pointer from stack
+ * @port_num: port number
+ * @index: which hw counter the stack is requesting we update
*/
-static int i40iw_get_protocol_stats(struct ib_device *ibdev,
- union rdma_protocol_stats *stats)
+static int i40iw_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port_num, int index)
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
- struct timespec curr_time;
- static struct timespec last_rd_time = {0, 0};
- enum i40iw_status_code status = 0;
unsigned long flags;
- curr_time = current_kernel_time();
- memset(stats, 0, sizeof(*stats));
-
if (dev->is_pf) {
spin_lock_irqsave(&devstat->stats_lock, flags);
devstat->ops.iw_hw_stat_read_all(devstat,
&devstat->hw_stats);
spin_unlock_irqrestore(&devstat->stats_lock, flags);
} else {
- if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1)
- status = i40iw_vchnl_vf_get_pe_stats(dev,
- &devstat->hw_stats);
-
- if (status)
+ if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
return -ENOSYS;
}
- stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS];
- stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] +
- hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC];
- stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] +
- hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD];
- stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] +
- hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE];
- stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS];
- stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS];
- stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS];
- stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS];
- stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG];
- stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS];
- stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG];
-
- last_rd_time = curr_time;
- return 0;
+ memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
+
+ return stats->num_counters;
}
/**
@@ -2323,10 +2633,15 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
- iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats;
+ iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
+ iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
iwibdev->ibdev.query_device = i40iw_query_device;
iwibdev->ibdev.create_ah = i40iw_create_ah;
iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
+ iwibdev->ibdev.drain_sq = i40iw_drain_sq;
+ iwibdev->ibdev.drain_rq = i40iw_drain_rq;
+ iwibdev->ibdev.alloc_mr = i40iw_alloc_mr;
+ iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg;
iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
if (!iwibdev->ibdev.iwcm) {
ib_dealloc_device(&iwibdev->ibdev);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
index 1101f7708..0069be8a5 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
@@ -92,6 +92,7 @@ struct i40iw_mr {
struct ib_umem *region;
u16 type;
u32 page_cnt;
+ u32 npages;
u32 stag;
u64 length;
u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];
@@ -169,5 +170,7 @@ struct i40iw_qp {
struct i40iw_pbl *iwpbl;
struct i40iw_dma_mem q2_ctx_mem;
struct i40iw_dma_mem ietf_mem;
+ struct completion sq_drained;
+ struct completion rq_drained;
};
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.c b/drivers/infiniband/hw/i40iw/i40iw_vf.c
index cb0f18340..e33d48109 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_vf.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_vf.c
@@ -80,6 +80,6 @@ enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
return 0;
}
-struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {
+const struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {
i40iw_manage_vf_pble_bp
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.h b/drivers/infiniband/hw/i40iw/i40iw_vf.h
index f649f3a62..4359559ec 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_vf.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_vf.h
@@ -57,6 +57,6 @@ enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
u64 scratch,
bool post_sq);
-extern struct i40iw_vf_cqp_ops iw_vf_cqp_ops;
+extern const struct i40iw_vf_cqp_ops iw_vf_cqp_ops;
#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
index 6b68f7890..3041003c9 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
@@ -254,7 +254,7 @@ static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
u32 vf_id,
struct i40iw_virtchnl_op_buf *vchnl_msg,
- struct i40iw_dev_hw_stats hw_stats)
+ struct i40iw_dev_hw_stats *hw_stats)
{
enum i40iw_status_code ret_code;
u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];
@@ -264,7 +264,7 @@ static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
- *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = hw_stats;
+ *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = *hw_stats;
ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
if (ret_code)
i40iw_debug(dev, I40IW_DEBUG_VIRT,
@@ -437,11 +437,9 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
return I40IW_SUCCESS;
}
- for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT;
- iw_vf_idx++) {
+ for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
if (!dev->vf_dev[iw_vf_idx]) {
- if (first_avail_iw_vf ==
- I40IW_MAX_PE_ENABLED_VF_COUNT)
+ if (first_avail_iw_vf == I40IW_MAX_PE_ENABLED_VF_COUNT)
first_avail_iw_vf = iw_vf_idx;
continue;
}
@@ -541,7 +539,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
vf_dev->msg_count--;
- vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, devstat->hw_stats);
+ vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &devstat->hw_stats);
break;
default:
i40iw_debug(dev, I40IW_DEBUG_VIRT,
@@ -596,23 +594,25 @@ enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
+ if (!i40iw_vf_clear_to_send(dev))
+ return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = vchnl_ver;
vchnl_req.parm_len = sizeof(*vchnl_ver);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+
ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);
- if (!ret_code) {
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (!ret_code)
- ret_code = vchnl_req.ret_code;
- else
- dev->vchnl_up = false;
- } else {
+ if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
+ return ret_code;
}
- return ret_code;
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (ret_code)
+ return ret_code;
+ else
+ return vchnl_req.ret_code;
}
/**
@@ -626,23 +626,25 @@ enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
+ if (!i40iw_vf_clear_to_send(dev))
+ return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = hmc_fcn;
vchnl_req.parm_len = sizeof(*hmc_fcn);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+
ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);
- if (!ret_code) {
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (!ret_code)
- ret_code = vchnl_req.ret_code;
- else
- dev->vchnl_up = false;
- } else {
+ if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
+ return ret_code;
}
- return ret_code;
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (ret_code)
+ return ret_code;
+ else
+ return vchnl_req.ret_code;
}
/**
@@ -660,25 +662,27 @@ enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
+ if (!i40iw_vf_clear_to_send(dev))
+ return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+
ret_code = vchnl_vf_send_add_hmc_objs_req(dev,
&vchnl_req,
rsrc_type,
start_index,
rsrc_count);
- if (!ret_code) {
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (!ret_code)
- ret_code = vchnl_req.ret_code;
- else
- dev->vchnl_up = false;
- } else {
+ if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
+ return ret_code;
}
- return ret_code;
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (ret_code)
+ return ret_code;
+ else
+ return vchnl_req.ret_code;
}
/**
@@ -696,25 +700,27 @@ enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
+ if (!i40iw_vf_clear_to_send(dev))
+ return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+
ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
&vchnl_req,
rsrc_type,
start_index,
rsrc_count);
- if (!ret_code) {
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (!ret_code)
- ret_code = vchnl_req.ret_code;
- else
- dev->vchnl_up = false;
- } else {
+ if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
+ return ret_code;
}
- return ret_code;
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (ret_code)
+ return ret_code;
+ else
+ return vchnl_req.ret_code;
}
/**
@@ -728,21 +734,23 @@ enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_req vchnl_req;
enum i40iw_status_code ret_code;
+ if (!i40iw_vf_clear_to_send(dev))
+ return I40IW_ERR_TIMEOUT;
memset(&vchnl_req, 0, sizeof(vchnl_req));
vchnl_req.dev = dev;
vchnl_req.parm = hw_stats;
vchnl_req.parm_len = sizeof(*hw_stats);
vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+
ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
- if (!ret_code) {
- ret_code = i40iw_vf_wait_vchnl_resp(dev);
- if (!ret_code)
- ret_code = vchnl_req.ret_code;
- else
- dev->vchnl_up = false;
- } else {
+ if (ret_code) {
i40iw_debug(dev, I40IW_DEBUG_VIRT,
"%s Send message failed 0x%0x\n", __func__, ret_code);
+ return ret_code;
}
- return ret_code;
+ ret_code = i40iw_vf_wait_vchnl_resp(dev);
+ if (ret_code)
+ return ret_code;
+ else
+ return vchnl_req.ret_code;
}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index d68f506c1..9c2e53d28 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
spin_unlock(&tun_qp->tx_lock);
if (ret)
- goto out;
+ goto end;
tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
wr.wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
-out:
- if (ret)
- ib_destroy_ah(ah);
+ if (!ret)
+ return 0;
+ out:
+ spin_lock(&tun_qp->tx_lock);
+ tun_qp->tx_ix_tail++;
+ spin_unlock(&tun_qp->tx_lock);
+ tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+ ib_destroy_ah(ah);
return ret;
}
@@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+ if (!ret)
+ return 0;
+
+ spin_lock(&sqp->tx_lock);
+ sqp->tx_ix_tail++;
+ spin_unlock(&sqp->tx_lock);
+ sqp->tx_ring[wire_tx_ix].ah = NULL;
out:
- if (ret)
- ib_destroy_ah(ah);
+ ib_destroy_ah(ah);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f014eaf59..42a46078d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
- if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
- props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
+ if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
@@ -1601,7 +1601,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
else if (ret == -ENXIO)
pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
else if (ret)
- pr_err("Invalid argumant. Fail to register network rule.\n");
+ pr_err("Invalid argument. Fail to register network rule.\n");
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return ret;
@@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
int is_bonded = mlx4_is_bonded(dev);
+ if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+ return ERR_PTR(-EINVAL);
+
if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
(flow_attr->type != IB_FLOW_ATTR_NORMAL))
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 99451d887..8f7ad0791 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -96,7 +96,7 @@ struct ib_sa_mcmember_data {
u8 scope_join_state;
u8 proxy_join;
u8 reserved[2];
-};
+} __packed __aligned(4);
struct mcast_group {
struct ib_sa_mcmember_data rec;
@@ -747,14 +747,11 @@ static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
__be64 tid,
union ib_gid *new_mgid)
{
- struct mcast_group *group = NULL, *cur_group;
+ struct mcast_group *group = NULL, *cur_group, *n;
struct mcast_req *req;
- struct list_head *pos;
- struct list_head *n;
mutex_lock(&ctx->mcg_table_lock);
- list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
- group = list_entry(pos, struct mcast_group, mgid0_list);
+ list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
mutex_lock(&group->lock);
if (group->last_req_tid == tid) {
if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 1eca01ceb..29acda249 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
u32 max_pages;
struct mlx4_mr mmr;
struct ib_umem *umem;
- void *pages_alloc;
+ size_t page_map_size;
};
struct mlx4_ib_mw {
@@ -717,9 +717,8 @@ int mlx4_ib_dealloc_mw(struct ib_mw *mw);
struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents);
+int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index ce0b5aa8e..5d73989d9 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
struct mlx4_ib_mr *mr,
int max_pages)
{
- int size = max_pages * sizeof(u64);
- int add_size;
int ret;
- add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+ /* Ensure that size is aligned to DMA cacheline
+ * requirements.
+ * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+ * so page_map_size will never cross PAGE_SIZE.
+ */
+ mr->page_map_size = roundup(max_pages * sizeof(u64),
+ MLX4_MR_PAGES_ALIGN);
- mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
- if (!mr->pages_alloc)
+ /* Prevent cross page boundary allocation. */
+ mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+ if (!mr->pages)
return -ENOMEM;
- mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
-
mr->page_map = dma_map_single(device->dma_device, mr->pages,
- size, DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
if (dma_mapping_error(device->dma_device, mr->page_map)) {
ret = -ENOMEM;
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
}
return 0;
-err:
- kfree(mr->pages_alloc);
+err:
+ free_page((unsigned long)mr->pages);
return ret;
}
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
{
if (mr->pages) {
struct ib_device *device = mr->ibmr.device;
- int size = mr->max_pages * sizeof(u64);
dma_unmap_single(device->dma_device, mr->page_map,
- size, DMA_TO_DEVICE);
- kfree(mr->pages_alloc);
+ mr->page_map_size, DMA_TO_DEVICE);
+ free_page((unsigned long)mr->pages);
mr->pages = NULL;
}
}
@@ -528,9 +530,8 @@ static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
+int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
int rc;
@@ -538,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
mr->npages = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
- sizeof(u64) * mr->max_pages,
- DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
- rc = ib_sg_to_pages(ibmr, sg, sg_nents, mlx4_set_page);
+ rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
- sizeof(u64) * mr->max_pages,
- DMA_TO_DEVICE);
+ mr->page_map_size, DMA_TO_DEVICE);
return rc;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index fd9753476..8db8405c1 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_RC:
return sizeof (struct mlx4_wqe_ctrl_seg) +
- sizeof (struct mlx4_wqe_atomic_seg) +
+ sizeof (struct mlx4_wqe_masked_atomic_seg) +
sizeof (struct mlx4_wqe_raddr_seg);
case MLX4_IB_QPT_SMI:
case MLX4_IB_QPT_GSI:
@@ -419,7 +419,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
}
static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
- enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp)
+ enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp,
+ bool shrink_wqe)
{
int s;
@@ -477,7 +478,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
* We set WQE size to at least 64 bytes, this way stamping
* invalidates each WQE.
*/
- if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
+ if (shrink_wqe && dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
qp->sq_signal_bits && BITS_PER_LONG == 64 &&
type != MLX4_IB_QPT_SMI && type != MLX4_IB_QPT_GSI &&
!(type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_SMI |
@@ -642,6 +643,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
{
int qpn;
int err;
+ struct ib_qp_cap backup_cap;
struct mlx4_ib_sqp *sqp;
struct mlx4_ib_qp *qp;
enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type;
@@ -775,7 +777,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
goto err;
}
- err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
+ memcpy(&backup_cap, &init_attr->cap, sizeof(backup_cap));
+ err = set_kernel_sq_size(dev, &init_attr->cap,
+ qp_type, qp, true);
if (err)
goto err;
@@ -787,9 +791,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*qp->db.db = 0;
}
- if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, &qp->buf, gfp)) {
- err = -ENOMEM;
- goto err_db;
+ if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
+ &qp->buf, gfp)) {
+ memcpy(&init_attr->cap, &backup_cap,
+ sizeof(backup_cap));
+ err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
+ qp, false);
+ if (err)
+ goto err_db;
+
+ if (mlx4_buf_alloc(dev->dev, qp->buf_size,
+ PAGE_SIZE * 2, &qp->buf, gfp)) {
+ err = -ENOMEM;
+ goto err_db;
+ }
}
err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift,
@@ -1176,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
udata, 0, &qp, gfp);
- if (err)
+ if (err) {
+ kfree(qp);
return ERR_PTR(err);
+ }
qp->ibqp.qp_num = qp->mqp.qpn;
qp->xrcdn = xrcdn;
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index a00ba4418..9c0e67bd2 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
int eqn;
int err;
- if (entries < 0)
+ if (entries < 0 ||
+ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
return ERR_PTR(-EINVAL);
if (check_cq_create_flags(attr->flags))
@@ -879,7 +880,10 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
cq->mcq.irqn = irqn;
- cq->mcq.comp = mlx5_ib_cq_comp;
+ if (context)
+ cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
+ else
+ cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
INIT_LIST_HEAD(&cq->wc_list);
@@ -1165,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -ENOSYS;
}
- if (entries < 1)
+ if (entries < 1 ||
+ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+ entries,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
return -EINVAL;
+ }
entries = roundup_pow_of_two(entries + 1);
- if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 1534af113..364aab9f3 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
pma_cnt_ext->port_xmit_data =
cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
transmitted_ib_multicast.octets) >> 2);
- pma_cnt_ext->port_xmit_data =
+ pma_cnt_ext->port_rcv_data =
cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
received_ib_multicast.octets) >> 2);
pma_cnt_ext->port_xmit_packets =
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6ad0489cb..b48ad8531 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -38,6 +38,9 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#if defined(CONFIG_X86)
+#include <asm/pat.h>
+#endif
#include <linux/sched.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
@@ -517,6 +520,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_TSO;
}
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, scatter_fcs))
+ props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
+
+ if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -908,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
- resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+ if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
resp.cache_line_size = L1_CACHE_BYTES;
resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
@@ -981,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (field_avail(typeof(resp), cqe_version, udata->outlen))
resp.response_length += sizeof(resp.cqe_version);
- if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+ /*
+ * We don't want to expose information from the PCI bar that is located
+ * after 4096 bytes, so if the arch only supports larger pages, let's
+ * pretend we don't support reading the HCA's core clock. This is also
+ * forced by mmap function.
+ */
+ if (PAGE_SIZE <= 4096 &&
+ field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
resp.comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
resp.hca_core_clock_offset =
@@ -1068,38 +1086,89 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
+static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
+{
+ switch (cmd) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ return "WC";
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ return "best effort WC";
+ case MLX5_IB_MMAP_NC_PAGE:
+ return "NC";
+ default:
+ return NULL;
+ }
+}
+
+static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
+ struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
+{
+ int err;
+ unsigned long idx;
+ phys_addr_t pfn, pa;
+ pgprot_t prot;
+
+ switch (cmd) {
+ case MLX5_IB_MMAP_WC_PAGE:
+/* Some architectures don't support WC memory */
+#if defined(CONFIG_X86)
+ if (!pat_enabled())
+ return -EPERM;
+#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
+ return -EPERM;
+#endif
+ /* fall through */
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
+ prot = pgprot_writecombine(vma->vm_page_prot);
+ break;
+ case MLX5_IB_MMAP_NC_PAGE:
+ prot = pgprot_noncached(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ idx = get_index(vma->vm_pgoff);
+ if (idx >= uuari->num_uars)
+ return -EINVAL;
+
+ pfn = uar_index2pfn(dev, uuari->uars[idx].index);
+ mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
+
+ vma->vm_page_prot = prot;
+ err = io_remap_pfn_range(vma, vma->vm_start, pfn,
+ PAGE_SIZE, vma->vm_page_prot);
+ if (err) {
+ mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
+ err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
+ return -EAGAIN;
+ }
+
+ pa = pfn << PAGE_SHIFT;
+ mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
+ vma->vm_start, &pa);
+
+ return 0;
+}
+
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_uuar_info *uuari = &context->uuari;
unsigned long command;
- unsigned long idx;
phys_addr_t pfn;
command = get_command(vma->vm_pgoff);
switch (command) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- idx = get_index(vma->vm_pgoff);
- if (idx >= uuari->num_uars)
- return -EINVAL;
-
- pfn = uar_index2pfn(dev, uuari->uars[idx].index);
- mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
- (unsigned long long)pfn);
-
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start, pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
- vma->vm_start,
- (unsigned long long)pfn << PAGE_SHIFT);
- break;
+ return uar_mmap(dev, command, vma, uuari);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
return -ENOSYS;
@@ -1108,7 +1177,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
- if (vma->vm_flags & (VM_WRITE | VM_EXEC))
+ if (vma->vm_flags & VM_WRITE)
return -EPERM;
/* Don't expose to user-space information it shouldn't have */
@@ -1438,7 +1507,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
if (!ft) {
ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries,
- num_groups);
+ num_groups,
+ 0);
if (!IS_ERR(ft)) {
prio->refcount = 0;
@@ -1739,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+ return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
@@ -1807,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
break;
case MLX5_DEV_EVENT_PORT_DOWN:
+ case MLX5_DEV_EVENT_PORT_INITIALIZED:
ibev.event = IB_EVENT_PORT_ERR;
port = (u8)param;
break;
- case MLX5_DEV_EVENT_PORT_INITIALIZED:
- /* not used by ULPs */
- return;
-
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
port = (u8)param;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index b46c25542..c4a982582 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -70,6 +70,8 @@ enum {
enum mlx5_ib_mmap_cmd {
MLX5_IB_MMAP_REGULAR_PAGE = 0,
MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+ MLX5_IB_MMAP_WC_PAGE = 2,
+ MLX5_IB_MMAP_NC_PAGE = 3,
/* 5 is chosen in order to be compatible with old versions of libmlx5 */
MLX5_IB_MMAP_CORE_CLOCK = 5,
};
@@ -356,6 +358,7 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
/* QP uses 1 as its source QP number */
MLX5_IB_QP_SQPN_QP1 = 1 << 6,
+ MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
};
struct mlx5_umr_wr {
@@ -712,9 +715,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents);
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4d5bff151..8cf2ce505 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1751,26 +1751,33 @@ done:
static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
struct scatterlist *sgl,
- unsigned short sg_nents)
+ unsigned short sg_nents,
+ unsigned int *sg_offset_p)
{
struct scatterlist *sg = sgl;
struct mlx5_klm *klms = mr->descs;
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
u32 lkey = mr->ibmr.pd->local_dma_lkey;
int i;
- mr->ibmr.iova = sg_dma_address(sg);
+ mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
mr->ibmr.length = 0;
mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
if (unlikely(i > mr->max_descs))
break;
- klms[i].va = cpu_to_be64(sg_dma_address(sg));
- klms[i].bcount = cpu_to_be32(sg_dma_len(sg));
+ klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
+ klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
klms[i].key = cpu_to_be32(lkey);
mr->ibmr.length += sg_dma_len(sg);
+
+ sg_offset = 0;
}
+ if (sg_offset_p)
+ *sg_offset_p = sg_offset;
+
return i;
}
@@ -1788,9 +1795,8 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
int n;
@@ -1802,9 +1808,10 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
DMA_TO_DEVICE);
if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
- n = mlx5_ib_sg_to_klms(mr, sg, sg_nents);
+ n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
else
- n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+ mlx5_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8dee8bc1e..ce0a7ab35 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
@@ -1028,6 +1030,7 @@ static int get_rq_pas_size(void *qpc)
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin)
{
+ struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
__be64 *qp_pas;
void *in;
@@ -1051,6 +1054,9 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
+ if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
+ MLX5_SET(rqc, rqc, scatter_fcs, 1);
+
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, end_padding_mode,
@@ -1136,11 +1142,12 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
if (qp->rq.wqe_cnt) {
+ rq->base.container_mibqp = qp;
+
err = create_raw_packet_qp_rq(dev, rq, in);
if (err)
goto err_destroy_sq;
- rq->base.container_mibqp = qp;
err = create_raw_packet_qp_tir(dev, rq, tdn);
if (err)
@@ -1252,6 +1259,19 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EOPNOTSUPP;
}
+ if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
+ return -EOPNOTSUPP;
+ }
+ if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
+ !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
+ mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
+ }
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -1833,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct ib_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
- u32 path_flags, const struct ib_qp_attr *attr)
+ u32 path_flags, const struct ib_qp_attr *attr,
+ bool alt)
{
enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port);
int err;
if (attr_mask & IB_QP_PKEY_INDEX)
- path->pkey_index = attr->pkey_index;
+ path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
+ attr->pkey_index);
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >=
@@ -1859,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
ah->grh.sgid_index);
path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4;
} else {
- path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
- path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 :
- 0;
+ path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+ path->fl_free_ar |=
+ (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
path->rlid = cpu_to_be16(ah->dlid);
path->grh_mlid = ah->src_path_bits & 0x7f;
if (ah->ah_flags & IB_AH_GRH)
@@ -1885,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
path->port = port;
if (attr_mask & IB_QP_TIMEOUT)
- path->ackto_lt = attr->timeout << 3;
+ path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
@@ -2246,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
if (attr_mask & IB_QP_PKEY_INDEX)
- context->pri_path.pkey_index = attr->pkey_index;
+ context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
/* todo implement counter_index functionality */
@@ -2259,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_AV) {
err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
- attr_mask, 0, attr);
+ attr_mask, 0, attr, false);
if (err)
goto out;
}
@@ -2270,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (attr_mask & IB_QP_ALT_PATH) {
err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
&context->alt_path,
- attr->alt_port_num, attr_mask, 0, attr);
+ attr->alt_port_num,
+ attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+ 0, attr, true);
if (err)
goto out;
}
@@ -3308,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
return MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
return fence;
-
- } else {
- return 0;
+ } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+ return MLX5_FENCE_MODE_FENCE;
}
+
+ return 0;
}
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
@@ -3995,11 +4020,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
- qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
+ qp_attr->alt_pkey_index =
+ be16_to_cpu(context->alt_path.pkey_index);
qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
- qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
+ qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
qp_attr->port_num = context->pri_path.port;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
@@ -4061,17 +4087,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
} else {
qp_attr->cap.max_send_wr = 0;
qp_attr->cap.max_send_sge = 0;
}
- /* We don't support inline sends for kernel QPs (yet), and we
- * don't know what userspace's value should be.
- */
- qp_attr->cap.max_inline_data = 0;
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 92914539e..2b27d1351 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -356,7 +356,7 @@ static int nes_netdev_stop(struct net_device *netdev)
/**
* nes_nic_send
*/
-static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
+static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
@@ -413,7 +413,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
kfree_skb(skb);
nesvnic->tx_sw_dropped++;
- return NETDEV_TX_LOCKED;
+ return false;
}
set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
@@ -454,8 +454,7 @@ static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
nesnic->sq_head++;
nesnic->sq_head &= nesnic->sq_size - 1;
-
- return NETDEV_TX_OK;
+ return true;
}
@@ -479,7 +478,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
u32 tso_wqe_length;
u32 curr_tcp_seq;
u32 wqe_count=1;
- u32 send_rc;
struct iphdr *iph;
__le16 *wqe_fragment_length;
u32 nr_frags;
@@ -670,13 +668,11 @@ tso_sq_no_longer_full:
skb_linearize(skb);
skb_set_transport_header(skb, hoffset);
skb_set_network_header(skb, nhoffset);
- send_rc = nes_nic_send(skb, netdev);
- if (send_rc != NETDEV_TX_OK)
+ if (!nes_nic_send(skb, netdev))
return NETDEV_TX_OK;
}
} else {
- send_rc = nes_nic_send(skb, netdev);
- if (send_rc != NETDEV_TX_OK)
+ if (!nes_nic_send(skb, netdev))
return NETDEV_TX_OK;
}
@@ -686,7 +682,7 @@ tso_sq_no_longer_full:
nes_write32(nesdev->regs+NES_WQE_ALLOC,
(wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
return NETDEV_TX_OK;
}
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index 6d3a169c0..37331e2fd 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -44,6 +44,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -903,70 +904,15 @@ void nes_clc(unsigned long parm)
*/
void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length)
{
- char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
- 'a', 'b', 'c', 'd', 'e', 'f'};
- char *ptr;
- char hex_buf[80];
- char ascii_buf[20];
- int num_char;
- int num_ascii;
- int num_hex;
-
if (!(nes_debug_level & dump_debug_level)) {
return;
}
- ptr = addr;
if (length > 0x100) {
nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100);
length = 0x100;
}
- nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length);
-
- memset(ascii_buf, 0, 20);
- memset(hex_buf, 0, 80);
-
- num_ascii = 0;
- num_hex = 0;
- for (num_char = 0; num_char < length; num_char++) {
- if (num_ascii == 8) {
- ascii_buf[num_ascii++] = ' ';
- hex_buf[num_hex++] = '-';
- hex_buf[num_hex++] = ' ';
- }
-
- if (*ptr < 0x20 || *ptr > 0x7e)
- ascii_buf[num_ascii++] = '.';
- else
- ascii_buf[num_ascii++] = *ptr;
- hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)];
- hex_buf[num_hex++] = xlate[*ptr & 0x0f];
- hex_buf[num_hex++] = ' ';
- ptr++;
-
- if (num_ascii >= 17) {
- /* output line and reset */
- nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
- memset(ascii_buf, 0, 20);
- memset(hex_buf, 0, 80);
- num_ascii = 0;
- num_hex = 0;
- }
- }
+ nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", addr, length, length);
- /* output the rest */
- if (num_ascii) {
- while (num_ascii < 17) {
- if (num_ascii == 8) {
- hex_buf[num_hex++] = ' ';
- hex_buf[num_hex++] = ' ';
- }
- hex_buf[num_hex++] = ' ';
- hex_buf[num_hex++] = ' ';
- hex_buf[num_hex++] = ' ';
- num_ascii++;
- }
-
- nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
- }
+ print_hex_dump(KERN_ERR, PFX, DUMP_PREFIX_NONE, 16, 1, addr, length, true);
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index fba69a39a..464d6da5f 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -402,15 +402,14 @@ static int nes_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-static int nes_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
+static int nes_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
{
struct nes_mr *nesmr = to_nesmr(ibmr);
nesmr->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, nes_set_page);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, nes_set_page);
}
/**
@@ -981,7 +980,7 @@ static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic,
/**
* nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory.
*/
-static inline void nes_free_qp_mem(struct nes_device *nesdev,
+static void nes_free_qp_mem(struct nes_device *nesdev,
struct nes_qp *nesqp, int virt_wqs)
{
unsigned long flags;
@@ -1315,6 +1314,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
return ERR_PTR(-EINVAL);
}
+ init_completion(&nesqp->sq_drained);
+ init_completion(&nesqp->rq_drained);
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
init_timer(&nesqp->terminate_timer);
@@ -3452,6 +3453,29 @@ out:
return err;
}
+/**
+ * nes_drain_sq - drain sq
+ * @ibqp: pointer to ibqp
+ */
+static void nes_drain_sq(struct ib_qp *ibqp)
+{
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+
+ if (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)
+ wait_for_completion(&nesqp->sq_drained);
+}
+
+/**
+ * nes_drain_rq - drain rq
+ * @ibqp: pointer to ibqp
+ */
+static void nes_drain_rq(struct ib_qp *ibqp)
+{
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+
+ if (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)
+ wait_for_completion(&nesqp->rq_drained);
+}
/**
* nes_poll_cq
@@ -3582,6 +3606,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
}
}
+ if (nesqp->iwarp_state > NES_CQP_QP_IWARP_STATE_RTS) {
+ if (nesqp->hwqp.sq_tail == nesqp->hwqp.sq_head)
+ complete(&nesqp->sq_drained);
+ if (nesqp->hwqp.rq_tail == nesqp->hwqp.rq_head)
+ complete(&nesqp->rq_drained);
+ }
+
entry->wr_id = wrid;
entry++;
cqe_count++;
@@ -3754,6 +3785,8 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
nesibdev->ibdev.post_send = nes_post_send;
nesibdev->ibdev.post_recv = nes_post_recv;
+ nesibdev->ibdev.drain_sq = nes_drain_sq;
+ nesibdev->ibdev.drain_rq = nes_drain_rq;
nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
if (nesibdev->ibdev.iwcm == NULL) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index 70290883d..e02a5662d 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -189,6 +189,8 @@ struct nes_qp {
u8 pau_pending;
u8 pau_state;
__u64 nesuqp_addr;
+ struct completion sq_drained;
+ struct completion rq_drained;
};
struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index a8496a18e..b1a3d91fe 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -3081,13 +3081,12 @@ static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int ocrdma_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
+int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
{
struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
mr->npages = 0;
- return ib_sg_to_pages(ibmr, sg, sg_nents, ocrdma_set_page);
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 8b517fd36..704ef1e92 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -122,8 +122,7 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int ocrdma_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents);
+int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
#endif /* __OCRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 24f4a782e..382466a90 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -824,10 +824,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
phys = dd->physaddr + piobufs;
#if defined(__powerpc__)
- /* There isn't a generic way to specify writethrough mappings */
- pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
- pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
- pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
/*
@@ -2181,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
switch (cmd.type) {
case QIB_CMD_ASSIGN_CTXT:
+ if (rcd) {
+ ret = -EINVAL;
+ goto bail;
+ }
+
ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
if (ret)
goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 82d7c4bf5..ce4034071 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1308,21 +1308,6 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
SYM_LSB(IntMask, fldname##17IntMask)), \
.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
-static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
- INTR_AUTO_P(SDmaInt),
- INTR_AUTO_P(SDmaProgressInt),
- INTR_AUTO_P(SDmaIdleInt),
- INTR_AUTO_P(SDmaCleanupDone),
- INTR_AUTO_C(RcvUrg),
- INTR_AUTO_P(ErrInt),
- INTR_AUTO(ErrInt), /* non-port-specific errs */
- INTR_AUTO(AssertGPIOInt),
- INTR_AUTO_P(SendDoneInt),
- INTR_AUTO(SendBufAvailInt),
- INTR_AUTO_C(RcvAvail),
- { .mask = 0, .sz = 0 }
-};
-
#define TXSYMPTOM_AUTO_P(fldname) \
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
.msg = #fldname, .sz = sizeof(#fldname) }
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 3f062f0dd..f253111e6 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1090,7 +1090,7 @@ void qib_free_devdata(struct qib_devdata *dd)
qib_dbg_ibdev_exit(&dd->verbs_dev);
#endif
free_percpu(dd->int_counter);
- ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
+ rvt_dealloc_device(&dd->verbs_dev.rdi);
}
u64 qib_int_counter(struct qib_devdata *dd)
@@ -1183,7 +1183,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
bail:
if (!list_empty(&dd->list))
list_del_init(&dd->list);
- ib_dealloc_device(&dd->verbs_dev.rdi.ibdev);
+ rvt_dealloc_device(&dd->verbs_dev.rdi);
return ERR_PTR(ret);
}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 0bd18375d..d2ac29861 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1172,11 +1172,13 @@ static int pma_get_classportinfo(struct ib_pma_mad *pmp,
* Set the most significant bit of CM2 to indicate support for
* congestion statistics
*/
- p->reserved[0] = dd->psxmitwait_supported << 7;
+ ib_set_cpi_capmask2(p,
+ dd->psxmitwait_supported <<
+ (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
/*
* Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
*/
- p->resp_time_value = 18;
+ ib_set_cpi_resp_time(p, 18);
return reply((struct ib_smp *) pmp);
}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 4758a3801..6abe1c621 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -144,13 +144,7 @@ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
-#if defined(__powerpc__)
- /* There isn't a generic way to specify writethrough mappings */
- dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
-#else
dd->kregbase = ioremap_nocache(addr, len);
-#endif
-
if (!dd->kregbase)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 9088e26d3..444028a35 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -230,7 +230,7 @@ bail:
*
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_rc_req(struct rvt_qp *qp)
+int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
{
struct qib_qp_priv *priv = qp->priv;
struct qib_ibdev *dev = to_idev(qp->ibqp.device);
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index a5f07a64b..b67779256 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -739,7 +739,7 @@ void qib_do_send(struct rvt_qp *qp)
struct qib_qp_priv *priv = qp->priv;
struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- int (*make_req)(struct rvt_qp *qp);
+ int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
unsigned long flags;
if ((qp->ibqp.qp_type == IB_QPT_RC ||
@@ -781,7 +781,7 @@ void qib_do_send(struct rvt_qp *qp)
qp->s_hdrwords = 0;
spin_lock_irqsave(&qp->s_lock, flags);
}
- } while (make_req(qp));
+ } while (make_req(qp, &flags));
spin_unlock_irqrestore(&qp->s_lock, flags);
}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 7bdbc79ce..1d61bd04f 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -45,7 +45,7 @@
*
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_uc_req(struct rvt_qp *qp)
+int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
{
struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index d9502137d..846e6c726 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -238,7 +238,7 @@ drop:
*
* Return 1 if constructed; otherwise, return 0.
*/
-int qib_make_ud_req(struct rvt_qp *qp)
+int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
{
struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr;
@@ -294,7 +294,7 @@ int qib_make_ud_req(struct rvt_qp *qp)
this_cpu_inc(ibp->pmastats->n_unicast_xmit);
lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
if (unlikely(lid == ppd->lid)) {
- unsigned long flags;
+ unsigned long tflags = *flags;
/*
* If DMAs are in progress, we can't generate
* a completion for the loopback packet since
@@ -307,10 +307,10 @@ int qib_make_ud_req(struct rvt_qp *qp)
goto bail;
}
qp->s_cur = next_cur;
- local_irq_save(flags);
- spin_unlock_irqrestore(&qp->s_lock, flags);
+ spin_unlock_irqrestore(&qp->s_lock, tflags);
qib_ud_loopback(qp, wqe);
- spin_lock_irqsave(&qp->s_lock, flags);
+ spin_lock_irqsave(&qp->s_lock, tflags);
+ *flags = tflags;
qib_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 4b76a8d59..4f878151f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -159,6 +159,7 @@ struct qib_other_headers {
} at;
__be32 imm_data;
__be32 aeth;
+ __be32 ieth;
struct ib_atomic_eth atomic_eth;
} u;
} __packed;
@@ -430,11 +431,11 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
void qib_send_rc_ack(struct rvt_qp *qp);
-int qib_make_rc_req(struct rvt_qp *qp);
+int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
-int qib_make_uc_req(struct rvt_qp *qp);
+int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags);
-int qib_make_ud_req(struct rvt_qp *qp);
+int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags);
int qib_register_ib_device(struct qib_devdata *);
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 7209fbc03..a0b6ebee4 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -36,7 +36,6 @@
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/iommu.h>
#include <linux/workqueue.h>
#include <linux/list.h>
@@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int i;
int flags;
dma_addr_t pa;
- DEFINE_DMA_ATTRS(attrs);
-
- if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
if (!can_do_mlock())
return -EPERM;
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index b1ffc8b4a..6ca6fa80d 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -525,6 +525,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
return PTR_ERR(task);
}
+ set_user_nice(task, MIN_NICE);
cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
kthread_bind(task, cpu);
wake_up_process(task);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 0ff765bfd..0f4d4500f 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -124,11 +124,13 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
int count)
{
int m, i = 0;
+ struct rvt_dev_info *dev = ib_to_rvt(pd->device);
mr->mapsz = 0;
m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
for (; i < m; i++) {
- mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
+ mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
+ dev->dparms.node);
if (!mr->map[i]) {
rvt_deinit_mregion(mr);
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index a0ecf08b2..41ba7e9ca 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
/* wrap to first map page, invert bit 0 */
offset = qpt->incr | ((offset & 1) ^ 1);
}
- /* there can be no bits at shift and below */
- WARN_ON(offset & (rdi->dparms.qos_shift - 1));
+ /* there can be no set bits in low-order QoS bits */
+ WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
qpn = mk_qpn(qpt, map, offset);
}
@@ -397,6 +397,7 @@ static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
{
unsigned n;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
rvt_put_ss(&qp->s_rdma_read_sge);
@@ -431,7 +432,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
if (qp->ibqp.qp_type != IB_QPT_RC)
return;
- for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ for (n = 0; n < rvt_max_atomic(rdi); n++) {
struct rvt_ack_entry *e = &qp->s_ack_queue[n];
if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
@@ -501,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
+ __releases(&qp->s_lock)
+ __releases(&qp->s_hlock)
+ __releases(&qp->r_lock)
+ __acquires(&qp->r_lock)
+ __acquires(&qp->s_hlock)
+ __acquires(&qp->s_lock)
{
if (qp->state != IB_QPS_RESET) {
qp->state = IB_QPS_RESET;
@@ -569,7 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_ssn = 1;
qp->s_lsn = 0;
qp->s_mig_state = IB_MIG_MIGRATED;
- memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
qp->s_num_rd_atomic = 0;
@@ -653,9 +659,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (gfp == GFP_NOIO)
swq = __vmalloc(
(init_attr->cap.max_send_wr + 1) * sz,
- gfp, PAGE_KERNEL);
+ gfp | __GFP_ZERO, PAGE_KERNEL);
else
- swq = vmalloc_node(
+ swq = vzalloc_node(
(init_attr->cap.max_send_wr + 1) * sz,
rdi->dparms.node);
if (!swq)
@@ -677,6 +683,16 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_swq;
RCU_INIT_POINTER(qp->next, NULL);
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qp->s_ack_queue =
+ kzalloc_node(
+ sizeof(*qp->s_ack_queue) *
+ rvt_max_atomic(rdi),
+ gfp,
+ rdi->dparms.node);
+ if (!qp->s_ack_queue)
+ goto bail_qp;
+ }
/*
* Driver needs to set up it's private QP structure and do any
@@ -706,9 +722,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.wq = __vmalloc(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz,
- gfp, PAGE_KERNEL);
+ gfp | __GFP_ZERO, PAGE_KERNEL);
else
- qp->r_rq.wq = vmalloc_node(
+ qp->r_rq.wq = vzalloc_node(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz,
rdi->dparms.node);
@@ -831,13 +847,13 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
case IB_QPT_SMI:
case IB_QPT_GSI:
case IB_QPT_UD:
- qp->allowed_ops = IB_OPCODE_UD_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ qp->allowed_ops = IB_OPCODE_UD;
break;
case IB_QPT_RC:
- qp->allowed_ops = IB_OPCODE_RC_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ qp->allowed_ops = IB_OPCODE_RC;
break;
case IB_QPT_UC:
- qp->allowed_ops = IB_OPCODE_UC_SEND_ONLY & RVT_OPCODE_QP_MASK;
+ qp->allowed_ops = IB_OPCODE_UC;
break;
default:
ret = ERR_PTR(-EINVAL);
@@ -859,6 +875,7 @@ bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
bail_qp:
+ kfree(qp->s_ack_queue);
kfree(qp);
bail_swq:
@@ -1286,6 +1303,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
vfree(qp->r_rq.wq);
vfree(qp->s_wq);
rdi->driver_f.qp_priv_free(rdi, qp);
+ kfree(qp->s_ack_queue);
kfree(qp);
return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 6caf5272b..30c4fda7a 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -106,6 +106,19 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports)
}
EXPORT_SYMBOL(rvt_alloc_device);
+/**
+ * rvt_dealloc_device - deallocate rdi
+ * @rdi: structure to free
+ *
+ * Free a structure allocated with rvt_alloc_device()
+ */
+void rvt_dealloc_device(struct rvt_dev_info *rdi)
+{
+ kfree(rdi->ports);
+ ib_dealloc_device(&rdi->ibdev);
+}
+EXPORT_SYMBOL(rvt_dealloc_device);
+
static int rvt_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -488,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
!rdi->driver_f.quiesce_qp ||
!rdi->driver_f.notify_error_qp ||
!rdi->driver_f.mtu_from_qp ||
- !rdi->driver_f.mtu_to_path_mtu ||
- !rdi->driver_f.shut_down_port ||
- !rdi->driver_f.cap_mask_chg)
+ !rdi->driver_f.mtu_to_path_mtu)
return -EINVAL;
break;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index caec8e9c4..4f7d9b48d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -92,6 +92,9 @@ enum {
IPOIB_FLAG_UMCAST = 10,
IPOIB_STOP_NEIGH_GC = 11,
IPOIB_NEIGH_TBL_FLUSH = 12,
+ IPOIB_FLAG_DEV_ADDR_SET = 13,
+ IPOIB_FLAG_DEV_ADDR_CTRL = 14,
+ IPOIB_FLAG_GOING_DOWN = 15,
IPOIB_MAX_BACKOFF_SECONDS = 16,
@@ -392,6 +395,7 @@ struct ipoib_dev_priv {
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
unsigned max_send_sge;
+ bool sm_fullmember_sendonly_support;
};
struct ipoib_ah {
@@ -476,6 +480,7 @@ void ipoib_reap_ah(struct work_struct *work);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
+int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index c8ed53562..951d9abcc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -766,7 +766,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
} else {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
++tx->tx_head;
if (++priv->tx_outstanding == ipoib_sendq_size) {
@@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
{
struct net_device *dev = to_net_dev(d);
int ret;
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
+ return -EPERM;
if (!rtnl_trylock())
return restart_syscall();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index a53fa5fc0..1502199c8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -36,6 +36,27 @@
#include "ipoib.h"
+struct ipoib_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+};
+
+#define IPOIB_NETDEV_STAT(m) { \
+ .stat_string = #m, \
+ .stat_offset = offsetof(struct rtnl_link_stats64, m) }
+
+static const struct ipoib_stats ipoib_gstrings_stats[] = {
+ IPOIB_NETDEV_STAT(rx_packets),
+ IPOIB_NETDEV_STAT(tx_packets),
+ IPOIB_NETDEV_STAT(rx_bytes),
+ IPOIB_NETDEV_STAT(tx_bytes),
+ IPOIB_NETDEV_STAT(tx_errors),
+ IPOIB_NETDEV_STAT(rx_dropped),
+ IPOIB_NETDEV_STAT(tx_dropped)
+};
+
+#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
+
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
@@ -92,11 +113,57 @@ static int ipoib_set_coalesce(struct net_device *dev,
return 0;
}
+static void ipoib_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ int i;
+ struct net_device_stats *net_stats = &dev->stats;
+ u8 *p = (u8 *)net_stats;
+
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
+ data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
+
+}
+static void ipoib_get_strings(struct net_device __always_unused *dev,
+ u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ipoib_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_TEST:
+ default:
+ break;
+ }
+}
+static int ipoib_get_sset_count(struct net_device __always_unused *dev,
+ int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IPOIB_GLOBAL_STATS_LEN;
+ case ETH_SS_TEST:
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
+ .get_strings = ipoib_get_strings,
+ .get_ethtool_stats = ipoib_get_ethtool_stats,
+ .get_sset_count = ipoib_get_sset_count,
};
void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index f0e55e47e..dc6d241b9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -51,8 +51,6 @@ MODULE_PARM_DESC(data_debug_level,
"Enable data path debug tracing if > 0");
#endif
-static DEFINE_MUTEX(pkey_mutex);
-
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
@@ -637,7 +635,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
} else {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
address->last_send = priv->tx_head;
++priv->tx_head;
@@ -999,6 +997,106 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
return 0;
}
+/*
+ * returns true if the device address of the ipoib interface has changed and the
+ * new address is a valid one (i.e in the gid table), return false otherwise.
+ */
+static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
+{
+ union ib_gid search_gid;
+ union ib_gid gid0;
+ union ib_gid *netdev_gid;
+ int err;
+ u16 index;
+ u8 port;
+ bool ret = false;
+
+ netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
+ if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
+ return false;
+
+ netif_addr_lock_bh(priv->dev);
+
+ /* The subnet prefix may have changed, update it now so we won't have
+ * to do it later
+ */
+ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+ netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
+ search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+
+ search_gid.global.interface_id = priv->local_gid.global.interface_id;
+
+ netif_addr_unlock_bh(priv->dev);
+
+ err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
+ priv->dev, &port, &index);
+
+ netif_addr_lock_bh(priv->dev);
+
+ if (search_gid.global.interface_id !=
+ priv->local_gid.global.interface_id)
+ /* There was a change while we were looking up the gid, bail
+ * here and let the next work sort this out
+ */
+ goto out;
+
+ /* The next section of code needs some background:
+ * Per IB spec the port GUID can't change if the HCA is powered on.
+ * port GUID is the basis for GID at index 0 which is the basis for
+ * the default device address of a ipoib interface.
+ *
+ * so it seems the flow should be:
+ * if user_changed_dev_addr && gid in gid tbl
+ * set bit dev_addr_set
+ * return true
+ * else
+ * return false
+ *
+ * The issue is that there are devices that don't follow the spec,
+ * they change the port GUID when the HCA is powered, so in order
+ * not to break userspace applications, We need to check if the
+ * user wanted to control the device address and we assume that
+ * if he sets the device address back to be based on GID index 0,
+ * he no longer wishs to control it.
+ *
+ * If the user doesn't control the the device address,
+ * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
+ * the port GUID has changed and GID at index 0 has changed
+ * so we need to change priv->local_gid and priv->dev->dev_addr
+ * to reflect the new GID.
+ */
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ if (!err && port == priv->port) {
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+ if (index == 0)
+ clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
+ &priv->flags);
+ else
+ set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
+ ret = true;
+ } else {
+ ret = false;
+ }
+ } else {
+ if (!err && port == priv->port) {
+ ret = true;
+ } else {
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
+ memcpy(&priv->local_gid, &gid0,
+ sizeof(priv->local_gid));
+ memcpy(priv->dev->dev_addr + 4, &gid0,
+ sizeof(priv->local_gid));
+ ret = true;
+ }
+ }
+ }
+
+out:
+ netif_addr_unlock_bh(priv->dev);
+
+ return ret;
+}
+
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level,
int nesting)
@@ -1020,6 +1118,9 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
+ /* Make sure the dev_addr is set even if not flushing */
+ if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
@@ -1031,7 +1132,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
update_parent_pkey(priv);
else
update_child_pkey(priv);
- }
+ } else if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return;
}
@@ -1083,7 +1185,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_up(dev);
- ipoib_mcast_restart_task(&priv->restart_task);
+ if (ipoib_dev_addr_changed_valid(priv))
+ ipoib_mcast_restart_task(&priv->restart_task);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 80807d6e5..5f58c41ef 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -99,6 +99,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
struct ib_device *dev, u8 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
+static int ipoib_set_mac(struct net_device *dev, void *addr);
static struct ib_client ipoib_client = {
.name = "ipoib",
@@ -117,6 +118,8 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+ priv->sm_fullmember_sendonly_support = false;
+
if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
@@ -629,6 +632,77 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
spin_unlock_irq(&priv->lock);
}
+struct classport_info_context {
+ struct ipoib_dev_priv *priv;
+ struct completion done;
+ struct ib_sa_query *sa_query;
+};
+
+static void classport_info_query_cb(int status, struct ib_class_port_info *rec,
+ void *context)
+{
+ struct classport_info_context *cb_ctx = context;
+ struct ipoib_dev_priv *priv;
+
+ WARN_ON(!context);
+
+ priv = cb_ctx->priv;
+
+ if (status || !rec) {
+ pr_debug("device: %s failed query classport_info status: %d\n",
+ priv->dev->name, status);
+ /* keeps the default, will try next mcast_restart */
+ priv->sm_fullmember_sendonly_support = false;
+ goto out;
+ }
+
+ if (ib_get_cpi_capmask2(rec) &
+ IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) {
+ pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
+ priv->dev->name);
+ priv->sm_fullmember_sendonly_support = true;
+ } else {
+ pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
+ priv->dev->name);
+ priv->sm_fullmember_sendonly_support = false;
+ }
+
+out:
+ complete(&cb_ctx->done);
+}
+
+int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
+{
+ struct classport_info_context *callback_context;
+ int ret;
+
+ callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL);
+ if (!callback_context)
+ return -ENOMEM;
+
+ callback_context->priv = priv;
+ init_completion(&callback_context->done);
+
+ ret = ib_sa_classport_info_rec_query(&ipoib_sa_client,
+ priv->ca, priv->port, 3000,
+ GFP_KERNEL,
+ classport_info_query_cb,
+ callback_context,
+ &callback_context->sa_query);
+ if (ret < 0) {
+ pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
+ priv->dev->name, ret);
+ kfree(callback_context);
+ return ret;
+ }
+
+ /* waiting for the callback to finish before returnning */
+ wait_for_completion(&callback_context->done);
+ kfree(callback_context);
+
+ return ret;
+}
+
void ipoib_flush_paths(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1036,7 +1110,7 @@ static void ipoib_timeout(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev);
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
- jiffies_to_msecs(jiffies - dev->trans_start));
+ jiffies_to_msecs(jiffies - dev_trans_start(dev)));
ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
netif_queue_stopped(dev),
priv->tx_head, priv->tx_tail);
@@ -1132,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
neigh = NULL;
goto out_unlock;
}
- neigh->alive = jiffies;
+
+ if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
+ neigh->alive = jiffies;
goto out_unlock;
}
}
@@ -1649,6 +1725,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_get_vf_config = ipoib_get_vf_config,
.ndo_get_vf_stats = ipoib_get_vf_stats,
.ndo_set_vf_guid = ipoib_set_vf_guid,
+ .ndo_set_mac_address = ipoib_set_mac,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -1771,6 +1848,70 @@ int ipoib_add_umcast_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_umcast);
}
+static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *netdev = priv->dev;
+
+ netif_addr_lock_bh(netdev);
+
+ memcpy(&priv->local_gid.global.interface_id,
+ &gid->global.interface_id,
+ sizeof(gid->global.interface_id));
+ memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
+ clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ netif_addr_unlock_bh(netdev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(child_priv, &priv->child_intfs, list)
+ set_base_guid(child_priv, gid);
+ up_read(&priv->vlan_rwsem);
+ }
+}
+
+static int ipoib_check_lladdr(struct net_device *dev,
+ struct sockaddr_storage *ss)
+{
+ union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
+ int ret = 0;
+
+ netif_addr_lock_bh(dev);
+
+ /* Make sure the QPN, reserved and subnet prefix match the current
+ * lladdr, it also makes sure the lladdr is unicast.
+ */
+ if (memcmp(dev->dev_addr, ss->__data,
+ 4 + sizeof(gid->global.subnet_prefix)) ||
+ gid->global.interface_id == 0)
+ ret = -EINVAL;
+
+ netif_addr_unlock_bh(dev);
+
+ return ret;
+}
+
+static int ipoib_set_mac(struct net_device *dev, void *addr)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct sockaddr_storage *ss = addr;
+ int ret;
+
+ if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
+ return -EBUSY;
+
+ ret = ipoib_check_lladdr(dev, ss);
+ if (ret)
+ return ret;
+
+ set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+
+ queue_work(ipoib_workqueue, &priv->flush_light);
+
+ return 0;
+}
+
static ssize_t create_child(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1894,6 +2035,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto device_init_failed;
} else
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
result = ipoib_dev_init(priv->dev, hca, port);
if (result < 0) {
@@ -2001,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
ib_unregister_event_handler(&priv->event_handler);
flush_workqueue(ipoib_workqueue);
+ /* mark interface in the middle of destruction */
+ set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags);
+
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
rtnl_unlock();
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 25889311b..d3394b6ad 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -64,6 +64,9 @@ struct ipoib_mcast_iter {
unsigned int send_only;
};
+/* join state that allows creating mcg with sendonly member request */
+#define SENDONLY_FULLMEMBER_JOIN 8
+
/*
* This should be called with the priv->lock held
*/
@@ -326,12 +329,23 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
carrier_on_task);
struct ib_port_attr attr;
+ int ret;
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
+ /*
+ * Check if can send sendonly MCG's with sendonly-fullmember join state.
+ * It done here after the successfully join to the broadcast group,
+ * because the broadcast group must always be joined first and is always
+ * re-joined if the SM changes substantially.
+ */
+ ret = ipoib_check_sm_sendonly_fullmember_support(priv);
+ if (ret < 0)
+ pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n",
+ priv->dev->name, ret);
/*
* Take rtnl_lock to avoid racing with ipoib_stop() and
@@ -515,22 +529,20 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
/*
- * Send-only IB Multicast joins do not work at the core
- * IB layer yet, so we can't use them here. However,
- * we are emulating an Ethernet multicast send, which
- * does not require a multicast subscription and will
- * still send properly. The most appropriate thing to
+ * Send-only IB Multicast joins work at the core IB layer but
+ * require specific SM support.
+ * We can use such joins here only if the current SM supports that feature.
+ * However, if not, we emulate an Ethernet multicast send,
+ * which does not require a multicast subscription and will
+ * still send properly. The most appropriate thing to
* do is to create the group if it doesn't exist as that
* most closely emulates the behavior, from a user space
- * application perspecitive, of Ethernet multicast
- * operation. For now, we do a full join, maybe later
- * when the core IB layers support send only joins we
- * will use them.
+ * application perspective, of Ethernet multicast operation.
*/
-#if 0
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- rec.join_state = 4;
-#endif
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ priv->sm_fullmember_sendonly_support)
+ /* SM supports sendonly-fullmember, otherwise fallback to full-member */
+ rec.join_state = SENDONLY_FULLMEMBER_JOIN;
}
spin_unlock_irq(&priv->lock);
@@ -570,11 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
return;
}
priv->local_lid = port_attr.lid;
+ netif_addr_lock_bh(dev);
- if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL))
- ipoib_warn(priv, "ib_query_gid() failed\n");
- else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ netif_addr_unlock_bh(dev);
+ return;
+ }
+ netif_addr_unlock_bh(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b809c373e..1e7cbbaa1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -307,5 +307,8 @@ void ipoib_event(struct ib_event_handler *handler,
queue_work(ipoib_workqueue, &priv->flush_normal);
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
queue_work(ipoib_workqueue, &priv->flush_heavy);
+ } else if (record->event == IB_EVENT_GID_CHANGE &&
+ !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ queue_work(ipoib_workqueue, &priv->flush_light);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index fca1a882d..a2f9f29c6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -68,6 +68,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
priv->pkey = pkey;
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
+ memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
priv->dev->broadcast[8] = pkey >> 8;
priv->dev->broadcast[9] = pkey & 0xff;
@@ -129,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey);
priv = ipoib_intf_alloc(intf_name);
@@ -181,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
+ if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags))
+ return -EPERM;
+
if (!rtnl_trylock())
return restart_syscall();
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9a391cc5b..90be56893 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -236,7 +236,7 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
page_vec->npages = 0;
page_vec->fake_mr.page_size = SIZE_4K;
plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
- mem->size, iser_set_page);
+ mem->size, NULL, iser_set_page);
if (unlikely(plen < mem->size)) {
iser_err("page vec too short to hold this SG\n");
iser_data_buf_dump(mem, device->ib_device);
@@ -446,7 +446,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
- n = ib_map_mr_sg(mr, mem->sg, mem->size, SIZE_4K);
+ n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
if (unlikely(n != mem->size)) {
iser_err("failed to map sg (%d/%d)\n",
n, mem->size);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 411e4464c..a990c0420 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -33,7 +33,8 @@
#define ISERT_MAX_CONN 8
#define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
-#define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
+#define ISER_MAX_TX_CQ_LEN \
+ ((ISERT_QP_MAX_REQ_DTOS + ISCSI_DEF_XMIT_CMDS_MAX) * ISERT_MAX_CONN)
#define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
ISERT_MAX_CONN)
@@ -46,14 +47,6 @@ static LIST_HEAD(device_list);
static struct workqueue_struct *isert_comp_wq;
static struct workqueue_struct *isert_release_wq;
-static void
-isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
-static int
-isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
-static void
-isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
-static int
-isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn);
static int
isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
static int
@@ -142,6 +135,7 @@ isert_create_qp(struct isert_conn *isert_conn,
attr.recv_cq = comp->cq;
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
+ attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX;
attr.cap.max_send_sge = device->ib_device->attrs.max_sge;
isert_conn->max_sge = min(device->ib_device->attrs.max_sge,
device->ib_device->attrs.max_sge_rd);
@@ -270,9 +264,9 @@ isert_alloc_comps(struct isert_device *device)
device->ib_device->num_comp_vectors));
isert_info("Using %d CQs, %s supports %d vectors support "
- "Fast registration %d pi_capable %d\n",
+ "pi_capable %d\n",
device->comps_used, device->ib_device->name,
- device->ib_device->num_comp_vectors, device->use_fastreg,
+ device->ib_device->num_comp_vectors,
device->pi_capable);
device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
@@ -313,18 +307,6 @@ isert_create_device_ib_res(struct isert_device *device)
isert_dbg("devattr->max_sge: %d\n", ib_dev->attrs.max_sge);
isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
- /* asign function handlers */
- if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
- ib_dev->attrs.device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
- device->use_fastreg = 1;
- device->reg_rdma_mem = isert_reg_rdma;
- device->unreg_rdma_mem = isert_unreg_rdma;
- } else {
- device->use_fastreg = 0;
- device->reg_rdma_mem = isert_map_rdma;
- device->unreg_rdma_mem = isert_unmap_cmd;
- }
-
ret = isert_alloc_comps(device);
if (ret)
goto out;
@@ -417,146 +399,6 @@ isert_device_get(struct rdma_cm_id *cma_id)
}
static void
-isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
-{
- struct fast_reg_descriptor *fr_desc, *tmp;
- int i = 0;
-
- if (list_empty(&isert_conn->fr_pool))
- return;
-
- isert_info("Freeing conn %p fastreg pool", isert_conn);
-
- list_for_each_entry_safe(fr_desc, tmp,
- &isert_conn->fr_pool, list) {
- list_del(&fr_desc->list);
- ib_dereg_mr(fr_desc->data_mr);
- if (fr_desc->pi_ctx) {
- ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
- ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
- kfree(fr_desc->pi_ctx);
- }
- kfree(fr_desc);
- ++i;
- }
-
- if (i < isert_conn->fr_pool_size)
- isert_warn("Pool still has %d regions registered\n",
- isert_conn->fr_pool_size - i);
-}
-
-static int
-isert_create_pi_ctx(struct fast_reg_descriptor *desc,
- struct ib_device *device,
- struct ib_pd *pd)
-{
- struct pi_context *pi_ctx;
- int ret;
-
- pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
- if (!pi_ctx) {
- isert_err("Failed to allocate pi context\n");
- return -ENOMEM;
- }
-
- pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(pi_ctx->prot_mr)) {
- isert_err("Failed to allocate prot frmr err=%ld\n",
- PTR_ERR(pi_ctx->prot_mr));
- ret = PTR_ERR(pi_ctx->prot_mr);
- goto err_pi_ctx;
- }
- desc->ind |= ISERT_PROT_KEY_VALID;
-
- pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
- if (IS_ERR(pi_ctx->sig_mr)) {
- isert_err("Failed to allocate signature enabled mr err=%ld\n",
- PTR_ERR(pi_ctx->sig_mr));
- ret = PTR_ERR(pi_ctx->sig_mr);
- goto err_prot_mr;
- }
-
- desc->pi_ctx = pi_ctx;
- desc->ind |= ISERT_SIG_KEY_VALID;
- desc->ind &= ~ISERT_PROTECTED;
-
- return 0;
-
-err_prot_mr:
- ib_dereg_mr(pi_ctx->prot_mr);
-err_pi_ctx:
- kfree(pi_ctx);
-
- return ret;
-}
-
-static int
-isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
- struct fast_reg_descriptor *fr_desc)
-{
- fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- ISCSI_ISER_SG_TABLESIZE);
- if (IS_ERR(fr_desc->data_mr)) {
- isert_err("Failed to allocate data frmr err=%ld\n",
- PTR_ERR(fr_desc->data_mr));
- return PTR_ERR(fr_desc->data_mr);
- }
- fr_desc->ind |= ISERT_DATA_KEY_VALID;
-
- isert_dbg("Created fr_desc %p\n", fr_desc);
-
- return 0;
-}
-
-static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
-{
- struct fast_reg_descriptor *fr_desc;
- struct isert_device *device = isert_conn->device;
- struct se_session *se_sess = isert_conn->conn->sess->se_sess;
- struct se_node_acl *se_nacl = se_sess->se_node_acl;
- int i, ret, tag_num;
- /*
- * Setup the number of FRMRs based upon the number of tags
- * available to session in iscsi_target_locate_portal().
- */
- tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
- tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
-
- isert_conn->fr_pool_size = 0;
- for (i = 0; i < tag_num; i++) {
- fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
- if (!fr_desc) {
- isert_err("Failed to allocate fast_reg descriptor\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = isert_create_fr_desc(device->ib_device,
- device->pd, fr_desc);
- if (ret) {
- isert_err("Failed to create fastreg descriptor err=%d\n",
- ret);
- kfree(fr_desc);
- goto err;
- }
-
- list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
- isert_conn->fr_pool_size++;
- }
-
- isert_dbg("Creating conn %p fastreg pool size=%d",
- isert_conn, isert_conn->fr_pool_size);
-
- return 0;
-
-err:
- isert_conn_free_fastreg_pool(isert_conn);
- return ret;
-}
-
-static void
isert_init_conn(struct isert_conn *isert_conn)
{
isert_conn->state = ISER_CONN_INIT;
@@ -565,8 +407,6 @@ isert_init_conn(struct isert_conn *isert_conn)
init_completion(&isert_conn->login_req_comp);
kref_init(&isert_conn->kref);
mutex_init(&isert_conn->mutex);
- spin_lock_init(&isert_conn->pool_lock);
- INIT_LIST_HEAD(&isert_conn->fr_pool);
INIT_WORK(&isert_conn->release_work, isert_release_work);
}
@@ -739,9 +579,6 @@ isert_connect_release(struct isert_conn *isert_conn)
BUG_ON(!device);
- if (device->use_fastreg)
- isert_conn_free_fastreg_pool(isert_conn);
-
isert_free_rx_descriptors(isert_conn);
if (isert_conn->cm_id)
rdma_destroy_id(isert_conn->cm_id);
@@ -1080,7 +917,6 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
- isert_cmd->iser_ib_op = ISER_IB_SEND;
tx_desc->tx_cqe.done = isert_send_done;
send_wr->wr_cqe = &tx_desc->tx_cqe;
@@ -1160,16 +996,6 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
}
if (!login->login_failed) {
if (login->login_complete) {
- if (!conn->sess->sess_ops->SessionType &&
- isert_conn->device->use_fastreg) {
- ret = isert_conn_create_fastreg_pool(isert_conn);
- if (ret) {
- isert_err("Conn: %p failed to create"
- " fastreg pool\n", isert_conn);
- return ret;
- }
- }
-
ret = isert_alloc_rx_descriptors(isert_conn);
if (ret)
return ret;
@@ -1633,97 +1459,26 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
}
-static int
-isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct scatterlist *sg, u32 nents, u32 length, u32 offset,
- enum iser_ib_op_code op, struct isert_data_buf *data)
-{
- struct ib_device *ib_dev = isert_conn->cm_id->device;
-
- data->dma_dir = op == ISER_IB_RDMA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
- data->len = length - offset;
- data->offset = offset;
- data->sg_off = data->offset / PAGE_SIZE;
-
- data->sg = &sg[data->sg_off];
- data->nents = min_t(unsigned int, nents - data->sg_off,
- ISCSI_ISER_SG_TABLESIZE);
- data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
- PAGE_SIZE);
-
- data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
- data->dma_dir);
- if (unlikely(!data->dma_nents)) {
- isert_err("Cmd: unable to dma map SGs %p\n", sg);
- return -EINVAL;
- }
-
- isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
- isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
-
- return 0;
-}
-
-static void
-isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
-{
- struct ib_device *ib_dev = isert_conn->cm_id->device;
-
- ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
- memset(data, 0, sizeof(*data));
-}
-
-
-
static void
-isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
{
- isert_dbg("Cmd %p\n", isert_cmd);
+ struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
+ enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
- if (isert_cmd->data.sg) {
- isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &isert_cmd->data);
- }
-
- if (isert_cmd->rdma_wr) {
- isert_dbg("Cmd %p free send_wr\n", isert_cmd);
- kfree(isert_cmd->rdma_wr);
- isert_cmd->rdma_wr = NULL;
- }
-
- if (isert_cmd->ib_sge) {
- isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
- kfree(isert_cmd->ib_sge);
- isert_cmd->ib_sge = NULL;
- }
-}
-
-static void
-isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
-{
- isert_dbg("Cmd %p\n", isert_cmd);
-
- if (isert_cmd->fr_desc) {
- isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, isert_cmd->fr_desc);
- if (isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
- isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
- isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
- }
- spin_lock_bh(&isert_conn->pool_lock);
- list_add_tail(&isert_cmd->fr_desc->list, &isert_conn->fr_pool);
- spin_unlock_bh(&isert_conn->pool_lock);
- isert_cmd->fr_desc = NULL;
- }
+ if (!cmd->rw.nr_ops)
+ return;
- if (isert_cmd->data.sg) {
- isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
- isert_unmap_data_buf(isert_conn, &isert_cmd->data);
+ if (isert_prot_cmd(conn, se_cmd)) {
+ rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
+ conn->cm_id->port_num, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, se_cmd->t_prot_sg,
+ se_cmd->t_prot_nents, dir);
+ } else {
+ rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
}
- isert_cmd->ib_sge = NULL;
- isert_cmd->rdma_wr = NULL;
+ cmd->rw.nr_ops = 0;
}
static void
@@ -1732,7 +1487,6 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsi_conn *conn = isert_conn->conn;
- struct isert_device *device = isert_conn->device;
struct iscsi_text_rsp *hdr;
isert_dbg("Cmd %p\n", isert_cmd);
@@ -1760,7 +1514,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
}
}
- device->unreg_rdma_mem(isert_cmd, isert_conn);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_SCSI_TMFUNC:
@@ -1894,14 +1648,9 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
isert_dbg("Cmd %p\n", isert_cmd);
- if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
- ret = isert_check_pi_status(cmd,
- isert_cmd->fr_desc->pi_ctx->sig_mr);
- isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
- }
+ ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
- device->unreg_rdma_mem(isert_cmd, isert_conn);
- isert_cmd->rdma_wr_num = 0;
if (ret)
transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
else
@@ -1929,16 +1678,12 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
isert_dbg("Cmd %p\n", isert_cmd);
- if (isert_cmd->fr_desc && isert_cmd->fr_desc->ind & ISERT_PROTECTED) {
- ret = isert_check_pi_status(se_cmd,
- isert_cmd->fr_desc->pi_ctx->sig_mr);
- isert_cmd->fr_desc->ind &= ~ISERT_PROTECTED;
- }
-
iscsit_stop_dataout_timer(cmd);
- device->unreg_rdma_mem(isert_cmd, isert_conn);
- cmd->write_data_done = isert_cmd->data.len;
- isert_cmd->rdma_wr_num = 0;
+
+ if (isert_prot_cmd(isert_conn, se_cmd))
+ ret = isert_check_pi_status(se_cmd, isert_cmd->rw.sig->sig_mr);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
+ cmd->write_data_done = 0;
isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -2111,7 +1856,6 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
@@ -2120,8 +1864,7 @@ isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
-
- device->unreg_rdma_mem(isert_cmd, isert_conn);
+ isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
}
static enum target_prot_op
@@ -2274,234 +2017,6 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
return isert_post_response(isert_conn, isert_cmd);
}
-static int
-isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
- struct ib_sge *ib_sge, struct ib_rdma_wr *rdma_wr,
- u32 data_left, u32 offset)
-{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct scatterlist *sg_start, *tmp_sg;
- struct isert_device *device = isert_conn->device;
- struct ib_device *ib_dev = device->ib_device;
- u32 sg_off, page_off;
- int i = 0, sg_nents;
-
- sg_off = offset / PAGE_SIZE;
- sg_start = &cmd->se_cmd.t_data_sg[sg_off];
- sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
- page_off = offset % PAGE_SIZE;
-
- rdma_wr->wr.sg_list = ib_sge;
- rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
-
- /*
- * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
- */
- for_each_sg(sg_start, tmp_sg, sg_nents, i) {
- isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
- "page_off: %u\n",
- (unsigned long long)tmp_sg->dma_address,
- tmp_sg->length, page_off);
-
- ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
- ib_sge->length = min_t(u32, data_left,
- ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
- ib_sge->lkey = device->pd->local_dma_lkey;
-
- isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
- ib_sge->addr, ib_sge->length, ib_sge->lkey);
- page_off = 0;
- data_left -= ib_sge->length;
- if (!data_left)
- break;
- ib_sge++;
- isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
- }
-
- rdma_wr->wr.num_sge = ++i;
- isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
- rdma_wr->wr.sg_list, rdma_wr->wr.num_sge);
-
- return rdma_wr->wr.num_sge;
-}
-
-static int
-isert_map_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
-{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = conn->context;
- struct isert_data_buf *data = &isert_cmd->data;
- struct ib_rdma_wr *rdma_wr;
- struct ib_sge *ib_sge;
- u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
- int ret = 0, i, ib_sge_cnt;
-
- offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
- cmd->write_data_done : 0;
- ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, se_cmd->data_length,
- offset, isert_cmd->iser_ib_op,
- &isert_cmd->data);
- if (ret)
- return ret;
-
- data_left = data->len;
- offset = data->offset;
-
- ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
- if (!ib_sge) {
- isert_warn("Unable to allocate ib_sge\n");
- ret = -ENOMEM;
- goto unmap_cmd;
- }
- isert_cmd->ib_sge = ib_sge;
-
- isert_cmd->rdma_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
- isert_cmd->rdma_wr = kzalloc(sizeof(struct ib_rdma_wr) *
- isert_cmd->rdma_wr_num, GFP_KERNEL);
- if (!isert_cmd->rdma_wr) {
- isert_dbg("Unable to allocate isert_cmd->rdma_wr\n");
- ret = -ENOMEM;
- goto unmap_cmd;
- }
-
- rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
-
- for (i = 0; i < isert_cmd->rdma_wr_num; i++) {
- rdma_wr = &isert_cmd->rdma_wr[i];
- data_len = min(data_left, rdma_write_max);
-
- rdma_wr->wr.send_flags = 0;
- if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
- isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
-
- rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
- rdma_wr->remote_addr = isert_cmd->read_va + offset;
- rdma_wr->rkey = isert_cmd->read_stag;
- if (i + 1 == isert_cmd->rdma_wr_num)
- rdma_wr->wr.next = &isert_cmd->tx_desc.send_wr;
- else
- rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
- } else {
- isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
-
- rdma_wr->wr.opcode = IB_WR_RDMA_READ;
- rdma_wr->remote_addr = isert_cmd->write_va + va_offset;
- rdma_wr->rkey = isert_cmd->write_stag;
- if (i + 1 == isert_cmd->rdma_wr_num)
- rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
- else
- rdma_wr->wr.next = &isert_cmd->rdma_wr[i + 1].wr;
- }
-
- ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
- rdma_wr, data_len, offset);
- ib_sge += ib_sge_cnt;
-
- offset += data_len;
- va_offset += data_len;
- data_left -= data_len;
- }
-
- return 0;
-unmap_cmd:
- isert_unmap_data_buf(isert_conn, data);
-
- return ret;
-}
-
-static inline void
-isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
-{
- u32 rkey;
-
- memset(inv_wr, 0, sizeof(*inv_wr));
- inv_wr->wr_cqe = NULL;
- inv_wr->opcode = IB_WR_LOCAL_INV;
- inv_wr->ex.invalidate_rkey = mr->rkey;
-
- /* Bump the key */
- rkey = ib_inc_rkey(mr->rkey);
- ib_update_fast_reg_key(mr, rkey);
-}
-
-static int
-isert_fast_reg_mr(struct isert_conn *isert_conn,
- struct fast_reg_descriptor *fr_desc,
- struct isert_data_buf *mem,
- enum isert_indicator ind,
- struct ib_sge *sge)
-{
- struct isert_device *device = isert_conn->device;
- struct ib_device *ib_dev = device->ib_device;
- struct ib_mr *mr;
- struct ib_reg_wr reg_wr;
- struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
- int ret, n;
-
- if (mem->dma_nents == 1) {
- sge->lkey = device->pd->local_dma_lkey;
- sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
- sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
- isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
- sge->addr, sge->length, sge->lkey);
- return 0;
- }
-
- if (ind == ISERT_DATA_KEY_VALID)
- /* Registering data buffer */
- mr = fr_desc->data_mr;
- else
- /* Registering protection buffer */
- mr = fr_desc->pi_ctx->prot_mr;
-
- if (!(fr_desc->ind & ind)) {
- isert_inv_rkey(&inv_wr, mr);
- wr = &inv_wr;
- }
-
- n = ib_map_mr_sg(mr, mem->sg, mem->nents, PAGE_SIZE);
- if (unlikely(n != mem->nents)) {
- isert_err("failed to map mr sg (%d/%d)\n",
- n, mem->nents);
- return n < 0 ? n : -EINVAL;
- }
-
- isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
- fr_desc, mem->nents, mem->offset);
-
- reg_wr.wr.next = NULL;
- reg_wr.wr.opcode = IB_WR_REG_MR;
- reg_wr.wr.wr_cqe = NULL;
- reg_wr.wr.send_flags = 0;
- reg_wr.wr.num_sge = 0;
- reg_wr.mr = mr;
- reg_wr.key = mr->lkey;
- reg_wr.access = IB_ACCESS_LOCAL_WRITE;
-
- if (!wr)
- wr = &reg_wr.wr;
- else
- wr->next = &reg_wr.wr;
-
- ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
- if (ret) {
- isert_err("fast registration failed, ret:%d\n", ret);
- return ret;
- }
- fr_desc->ind &= ~ind;
-
- sge->lkey = mr->lkey;
- sge->addr = mr->iova;
- sge->length = mr->length;
-
- isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
- sge->addr, sge->length, sge->lkey);
-
- return ret;
-}
-
static inline void
isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
struct ib_sig_domain *domain)
@@ -2526,6 +2041,8 @@ isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
static int
isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
{
+ memset(sig_attrs, 0, sizeof(*sig_attrs));
+
switch (se_cmd->prot_op) {
case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP:
@@ -2547,228 +2064,59 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
return -EINVAL;
}
+ sig_attrs->check_mask =
+ (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
+ (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
+ (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
return 0;
}
-static inline u8
-isert_set_prot_checks(u8 prot_checks)
-{
- return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
- (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
- (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
-}
-
static int
-isert_reg_sig_mr(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd,
- struct fast_reg_descriptor *fr_desc)
-{
- struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
- struct ib_sig_handover_wr sig_wr;
- struct ib_send_wr inv_wr, *bad_wr, *wr = NULL;
- struct pi_context *pi_ctx = fr_desc->pi_ctx;
- struct ib_sig_attrs sig_attrs;
+isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
+ struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
+{
+ struct se_cmd *se_cmd = &cmd->iscsi_cmd->se_cmd;
+ enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
+ u8 port_num = conn->cm_id->port_num;
+ u64 addr;
+ u32 rkey, offset;
int ret;
- memset(&sig_attrs, 0, sizeof(sig_attrs));
- ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
- if (ret)
- goto err;
-
- sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
-
- if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
- isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
- wr = &inv_wr;
- }
-
- memset(&sig_wr, 0, sizeof(sig_wr));
- sig_wr.wr.opcode = IB_WR_REG_SIG_MR;
- sig_wr.wr.wr_cqe = NULL;
- sig_wr.wr.sg_list = &isert_cmd->ib_sg[DATA];
- sig_wr.wr.num_sge = 1;
- sig_wr.access_flags = IB_ACCESS_LOCAL_WRITE;
- sig_wr.sig_attrs = &sig_attrs;
- sig_wr.sig_mr = pi_ctx->sig_mr;
- if (se_cmd->t_prot_sg)
- sig_wr.prot = &isert_cmd->ib_sg[PROT];
-
- if (!wr)
- wr = &sig_wr.wr;
- else
- wr->next = &sig_wr.wr;
-
- ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
- if (ret) {
- isert_err("fast registration failed, ret:%d\n", ret);
- goto err;
- }
- fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
-
- isert_cmd->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
- isert_cmd->ib_sg[SIG].addr = 0;
- isert_cmd->ib_sg[SIG].length = se_cmd->data_length;
- if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
- se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
- /*
- * We have protection guards on the wire
- * so we need to set a larget transfer
- */
- isert_cmd->ib_sg[SIG].length += se_cmd->prot_length;
-
- isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
- isert_cmd->ib_sg[SIG].addr, isert_cmd->ib_sg[SIG].length,
- isert_cmd->ib_sg[SIG].lkey);
-err:
- return ret;
-}
-
-static int
-isert_handle_prot_cmd(struct isert_conn *isert_conn,
- struct isert_cmd *isert_cmd)
-{
- struct isert_device *device = isert_conn->device;
- struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
- int ret;
-
- if (!isert_cmd->fr_desc->pi_ctx) {
- ret = isert_create_pi_ctx(isert_cmd->fr_desc,
- device->ib_device,
- device->pd);
- if (ret) {
- isert_err("conn %p failed to allocate pi_ctx\n",
- isert_conn);
- return ret;
- }
- }
-
- if (se_cmd->t_prot_sg) {
- ret = isert_map_data_buf(isert_conn, isert_cmd,
- se_cmd->t_prot_sg,
- se_cmd->t_prot_nents,
- se_cmd->prot_length,
- 0,
- isert_cmd->iser_ib_op,
- &isert_cmd->prot);
- if (ret) {
- isert_err("conn %p failed to map protection buffer\n",
- isert_conn);
- return ret;
- }
-
- memset(&isert_cmd->ib_sg[PROT], 0, sizeof(isert_cmd->ib_sg[PROT]));
- ret = isert_fast_reg_mr(isert_conn, isert_cmd->fr_desc,
- &isert_cmd->prot,
- ISERT_PROT_KEY_VALID,
- &isert_cmd->ib_sg[PROT]);
- if (ret) {
- isert_err("conn %p failed to fast reg mr\n",
- isert_conn);
- goto unmap_prot_cmd;
- }
- }
-
- ret = isert_reg_sig_mr(isert_conn, isert_cmd, isert_cmd->fr_desc);
- if (ret) {
- isert_err("conn %p failed to fast reg mr\n",
- isert_conn);
- goto unmap_prot_cmd;
- }
- isert_cmd->fr_desc->ind |= ISERT_PROTECTED;
-
- return 0;
-
-unmap_prot_cmd:
- if (se_cmd->t_prot_sg)
- isert_unmap_data_buf(isert_conn, &isert_cmd->prot);
-
- return ret;
-}
-
-static int
-isert_reg_rdma(struct isert_cmd *isert_cmd, struct iscsi_conn *conn)
-{
- struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- struct isert_conn *isert_conn = conn->context;
- struct fast_reg_descriptor *fr_desc = NULL;
- struct ib_rdma_wr *rdma_wr;
- struct ib_sge *ib_sg;
- u32 offset;
- int ret = 0;
- unsigned long flags;
-
- offset = isert_cmd->iser_ib_op == ISER_IB_RDMA_READ ?
- cmd->write_data_done : 0;
- ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, se_cmd->data_length,
- offset, isert_cmd->iser_ib_op,
- &isert_cmd->data);
- if (ret)
- return ret;
-
- if (isert_cmd->data.dma_nents != 1 ||
- isert_prot_cmd(isert_conn, se_cmd)) {
- spin_lock_irqsave(&isert_conn->pool_lock, flags);
- fr_desc = list_first_entry(&isert_conn->fr_pool,
- struct fast_reg_descriptor, list);
- list_del(&fr_desc->list);
- spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
- isert_cmd->fr_desc = fr_desc;
- }
-
- ret = isert_fast_reg_mr(isert_conn, fr_desc, &isert_cmd->data,
- ISERT_DATA_KEY_VALID, &isert_cmd->ib_sg[DATA]);
- if (ret)
- goto unmap_cmd;
-
- if (isert_prot_cmd(isert_conn, se_cmd)) {
- ret = isert_handle_prot_cmd(isert_conn, isert_cmd);
- if (ret)
- goto unmap_cmd;
-
- ib_sg = &isert_cmd->ib_sg[SIG];
+ if (dir == DMA_FROM_DEVICE) {
+ addr = cmd->write_va;
+ rkey = cmd->write_stag;
+ offset = cmd->iscsi_cmd->write_data_done;
} else {
- ib_sg = &isert_cmd->ib_sg[DATA];
+ addr = cmd->read_va;
+ rkey = cmd->read_stag;
+ offset = 0;
}
- memcpy(&isert_cmd->s_ib_sge, ib_sg, sizeof(*ib_sg));
- isert_cmd->ib_sge = &isert_cmd->s_ib_sge;
- isert_cmd->rdma_wr_num = 1;
- memset(&isert_cmd->s_rdma_wr, 0, sizeof(isert_cmd->s_rdma_wr));
- isert_cmd->rdma_wr = &isert_cmd->s_rdma_wr;
+ if (isert_prot_cmd(conn, se_cmd)) {
+ struct ib_sig_attrs sig_attrs;
- rdma_wr = &isert_cmd->s_rdma_wr;
- rdma_wr->wr.sg_list = &isert_cmd->s_ib_sge;
- rdma_wr->wr.num_sge = 1;
- rdma_wr->wr.wr_cqe = &isert_cmd->tx_desc.tx_cqe;
- if (isert_cmd->iser_ib_op == ISER_IB_RDMA_WRITE) {
- isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+ ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
+ if (ret)
+ return ret;
- rdma_wr->wr.opcode = IB_WR_RDMA_WRITE;
- rdma_wr->remote_addr = isert_cmd->read_va;
- rdma_wr->rkey = isert_cmd->read_stag;
- rdma_wr->wr.send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
- 0 : IB_SEND_SIGNALED;
+ WARN_ON_ONCE(offset);
+ ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents,
+ se_cmd->t_prot_sg, se_cmd->t_prot_nents,
+ &sig_attrs, addr, rkey, dir);
} else {
- isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
-
- rdma_wr->wr.opcode = IB_WR_RDMA_READ;
- rdma_wr->remote_addr = isert_cmd->write_va;
- rdma_wr->rkey = isert_cmd->write_stag;
- rdma_wr->wr.send_flags = IB_SEND_SIGNALED;
+ ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
+ se_cmd->t_data_sg, se_cmd->t_data_nents,
+ offset, addr, rkey, dir);
}
-
- return 0;
-
-unmap_cmd:
- if (fr_desc) {
- spin_lock_irqsave(&isert_conn->pool_lock, flags);
- list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
- spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
+ if (ret < 0) {
+ isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
+ return ret;
}
- isert_unmap_data_buf(isert_conn, &isert_cmd->data);
+ ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
+ if (ret < 0)
+ isert_err("Cmd: %p failed to post RDMA res\n", cmd);
return ret;
}
@@ -2778,21 +2126,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
- struct ib_send_wr *wr_failed;
+ struct ib_cqe *cqe = NULL;
+ struct ib_send_wr *chain_wr = NULL;
int rc;
isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
- isert_cmd->iser_ib_op = ISER_IB_RDMA_WRITE;
- rc = device->reg_rdma_mem(isert_cmd, conn);
- if (rc) {
- isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
- return rc;
- }
-
- if (!isert_prot_cmd(isert_conn, se_cmd)) {
+ if (isert_prot_cmd(isert_conn, se_cmd)) {
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
+ cqe = &isert_cmd->tx_desc.tx_cqe;
+ } else {
/*
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
@@ -2803,56 +2147,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr);
- isert_cmd->s_rdma_wr.wr.next = &isert_cmd->tx_desc.send_wr;
- isert_cmd->rdma_wr_num += 1;
rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
if (rc) {
isert_err("ib_post_recv failed with %d\n", rc);
return rc;
}
- }
- rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
- if (rc)
- isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
-
- if (!isert_prot_cmd(isert_conn, se_cmd))
- isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
- "READ\n", isert_cmd);
- else
- isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
- isert_cmd);
+ chain_wr = &isert_cmd->tx_desc.send_wr;
+ }
+ isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
+ isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
return 1;
}
static int
isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
{
- struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
- struct isert_conn *isert_conn = conn->context;
- struct isert_device *device = isert_conn->device;
- struct ib_send_wr *wr_failed;
- int rc;
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
- isert_cmd, se_cmd->data_length, cmd->write_data_done);
- isert_cmd->iser_ib_op = ISER_IB_RDMA_READ;
- rc = device->reg_rdma_mem(isert_cmd, conn);
- if (rc) {
- isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
- return rc;
- }
+ isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
- rc = ib_post_send(isert_conn->qp, &isert_cmd->rdma_wr->wr, &wr_failed);
- if (rc)
- isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+ isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
+ isert_rdma_rw_ctx_post(isert_cmd, conn->context,
+ &isert_cmd->tx_desc.tx_cqe, NULL);
isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
-
return 0;
}
@@ -3273,9 +2596,19 @@ static void isert_free_conn(struct iscsi_conn *conn)
isert_put_conn(isert_conn);
}
+static void isert_get_rx_pdu(struct iscsi_conn *conn)
+{
+ struct completion comp;
+
+ init_completion(&comp);
+
+ wait_for_completion_interruptible(&comp);
+}
+
static struct iscsit_transport iser_target_transport = {
.name = "IB/iSER",
.transport_type = ISCSI_INFINIBAND,
+ .rdma_shutdown = true,
.priv_size = sizeof(struct isert_cmd),
.owner = THIS_MODULE,
.iscsit_setup_np = isert_setup_np,
@@ -3291,6 +2624,7 @@ static struct iscsit_transport iser_target_transport = {
.iscsit_queue_data_in = isert_put_datain,
.iscsit_queue_status = isert_put_response,
.iscsit_aborted_task = isert_aborted_task,
+ .iscsit_get_rx_pdu = isert_get_rx_pdu,
.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
};
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 147900cbb..e512ba941 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -3,6 +3,7 @@
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+#include <rdma/rw.h>
#include <scsi/iser.h>
@@ -53,10 +54,7 @@
#define ISERT_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
-#define ISERT_INFLIGHT_DATAOUTS 8
-
-#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
- (1 + ISERT_INFLIGHT_DATAOUTS) + \
+#define ISERT_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS)
@@ -71,13 +69,6 @@ enum isert_desc_type {
ISCSI_TX_DATAIN
};
-enum iser_ib_op_code {
- ISER_IB_RECV,
- ISER_IB_SEND,
- ISER_IB_RDMA_WRITE,
- ISER_IB_RDMA_READ,
-};
-
enum iser_conn_state {
ISER_CONN_INIT,
ISER_CONN_UP,
@@ -118,42 +109,6 @@ static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
return container_of(cqe, struct iser_tx_desc, tx_cqe);
}
-
-enum isert_indicator {
- ISERT_PROTECTED = 1 << 0,
- ISERT_DATA_KEY_VALID = 1 << 1,
- ISERT_PROT_KEY_VALID = 1 << 2,
- ISERT_SIG_KEY_VALID = 1 << 3,
-};
-
-struct pi_context {
- struct ib_mr *prot_mr;
- struct ib_mr *sig_mr;
-};
-
-struct fast_reg_descriptor {
- struct list_head list;
- struct ib_mr *data_mr;
- u8 ind;
- struct pi_context *pi_ctx;
-};
-
-struct isert_data_buf {
- struct scatterlist *sg;
- int nents;
- u32 sg_off;
- u32 len; /* cur_rdma_length */
- u32 offset;
- unsigned int dma_nents;
- enum dma_data_direction dma_dir;
-};
-
-enum {
- DATA = 0,
- PROT = 1,
- SIG = 2,
-};
-
struct isert_cmd {
uint32_t read_stag;
uint32_t write_stag;
@@ -166,16 +121,7 @@ struct isert_cmd {
struct iscsi_cmd *iscsi_cmd;
struct iser_tx_desc tx_desc;
struct iser_rx_desc *rx_desc;
- enum iser_ib_op_code iser_ib_op;
- struct ib_sge *ib_sge;
- struct ib_sge s_ib_sge;
- int rdma_wr_num;
- struct ib_rdma_wr *rdma_wr;
- struct ib_rdma_wr s_rdma_wr;
- struct ib_sge ib_sg[3];
- struct isert_data_buf data;
- struct isert_data_buf prot;
- struct fast_reg_descriptor *fr_desc;
+ struct rdma_rw_ctx rw;
struct work_struct comp_work;
struct scatterlist sg;
};
@@ -210,10 +156,6 @@ struct isert_conn {
struct isert_device *device;
struct mutex mutex;
struct kref kref;
- struct list_head fr_pool;
- int fr_pool_size;
- /* lock to protect fastreg pool */
- spinlock_t pool_lock;
struct work_struct release_work;
bool logout_posted;
bool snd_w_inv;
@@ -236,7 +178,6 @@ struct isert_comp {
};
struct isert_device {
- int use_fastreg;
bool pi_capable;
int refcount;
struct ib_device *ib_device;
@@ -244,10 +185,6 @@ struct isert_device {
struct isert_comp *comps;
int comps_used;
struct list_head dev_node;
- int (*reg_rdma_mem)(struct isert_cmd *isert_cmd,
- struct iscsi_conn *conn);
- void (*unreg_rdma_mem)(struct isert_cmd *isert_cmd,
- struct isert_conn *isert_conn);
};
struct isert_np {
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 845ce90c2..3322ed750 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -70,6 +70,7 @@ static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
static bool prefer_fr = true;
static bool register_always = true;
+static bool never_register;
static int topspin_workarounds = 1;
module_param(srp_sg_tablesize, uint, 0444);
@@ -81,7 +82,7 @@ MODULE_PARM_DESC(cmd_sg_entries,
module_param(indirect_sg_entries, uint, 0444);
MODULE_PARM_DESC(indirect_sg_entries,
- "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
+ "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
module_param(allow_ext_sg, bool, 0444);
MODULE_PARM_DESC(allow_ext_sg,
@@ -99,6 +100,9 @@ module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
"Use memory registration even for contiguous memory regions");
+module_param(never_register, bool, 0444);
+MODULE_PARM_DESC(never_register, "Never register memory");
+
static const struct kernel_param_ops srp_tmo_ops;
static int srp_reconnect_delay = 10;
@@ -316,7 +320,7 @@ static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
struct ib_fmr_pool_param fmr_param;
memset(&fmr_param, 0, sizeof(fmr_param));
- fmr_param.pool_size = target->scsi_host->can_queue;
+ fmr_param.pool_size = target->mr_pool_size;
fmr_param.dirty_watermark = fmr_param.pool_size / 4;
fmr_param.cache = 1;
fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
@@ -441,8 +445,7 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
- return srp_create_fr_pool(dev->dev, dev->pd,
- target->scsi_host->can_queue,
+ return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
dev->max_pages_per_mr);
}
@@ -469,7 +472,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
struct ib_qp *qp;
struct ib_fmr_pool *fmr_pool = NULL;
struct srp_fr_pool *fr_pool = NULL;
- const int m = dev->use_fast_reg ? 3 : 1;
+ const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
@@ -850,7 +853,7 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
for (i = 0; i < target->req_ring_size; ++i) {
req = &ch->req_ring[i];
- mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
+ mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
GFP_KERNEL);
if (!mr_list)
goto out;
@@ -1112,7 +1115,7 @@ static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
}
/**
- * srp_free_req() - Unmap data and add request to the free request list.
+ * srp_free_req() - Unmap data and adjust ch->req_lim.
* @ch: SRP RDMA channel.
* @req: Request to be freed.
* @scmnd: SCSI command associated with @req.
@@ -1299,9 +1302,16 @@ static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
srp_handle_qp_err(cq, wc, "FAST REG");
}
+/*
+ * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
+ * where to start in the first element. If sg_offset_p != NULL then
+ * *sg_offset_p is updated to the offset in state->sg[retval] of the first
+ * byte that has not yet been mapped.
+ */
static int srp_map_finish_fr(struct srp_map_state *state,
struct srp_request *req,
- struct srp_rdma_ch *ch, int sg_nents)
+ struct srp_rdma_ch *ch, int sg_nents,
+ unsigned int *sg_offset_p)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
@@ -1316,13 +1326,14 @@ static int srp_map_finish_fr(struct srp_map_state *state,
WARN_ON_ONCE(!dev->use_fast_reg);
- if (sg_nents == 0)
- return 0;
-
if (sg_nents == 1 && target->global_mr) {
- srp_map_desc(state, sg_dma_address(state->sg),
- sg_dma_len(state->sg),
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+
+ srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
+ sg_dma_len(state->sg) - sg_offset,
target->global_mr->rkey);
+ if (sg_offset_p)
+ *sg_offset_p = 0;
return 1;
}
@@ -1333,9 +1344,17 @@ static int srp_map_finish_fr(struct srp_map_state *state,
rkey = ib_inc_rkey(desc->mr->rkey);
ib_update_fast_reg_key(desc->mr, rkey);
- n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, dev->mr_page_size);
- if (unlikely(n < 0))
+ n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
+ dev->mr_page_size);
+ if (unlikely(n < 0)) {
+ srp_fr_pool_put(ch->fr_pool, &desc, 1);
+ pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
+ dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
+ sg_offset_p ? *sg_offset_p : -1, n);
return n;
+ }
+
+ WARN_ON_ONCE(desc->mr->length == 0);
req->reg_cqe.done = srp_reg_mr_err_done;
@@ -1357,8 +1376,10 @@ static int srp_map_finish_fr(struct srp_map_state *state,
desc->mr->length, desc->mr->rkey);
err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
- if (unlikely(err))
+ if (unlikely(err)) {
+ WARN_ON_ONCE(err == -ENOMEM);
return err;
+ }
return n;
}
@@ -1398,7 +1419,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
/*
* If the last entry of the MR wasn't a full page, then we need to
* close it out and start a new one -- we can only merge at page
- * boundries.
+ * boundaries.
*/
ret = 0;
if (len != dev->mr_page_size)
@@ -1413,10 +1434,9 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct scatterlist *sg;
int i, ret;
- state->desc = req->indirect_desc;
state->pages = req->map_page;
state->fmr.next = req->fmr_list;
- state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt;
+ state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
for_each_sg(scat, sg, count, i) {
ret = srp_map_sg_entry(state, ch, sg, i);
@@ -1428,8 +1448,6 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
if (ret)
return ret;
- req->nmdesc = state->nmdesc;
-
return 0;
}
@@ -1437,15 +1455,19 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
{
- state->desc = req->indirect_desc;
+ unsigned int sg_offset = 0;
+
state->fr.next = req->fr_list;
- state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
+ state->fr.end = req->fr_list + ch->target->mr_per_cmd;
state->sg = scat;
+ if (count == 0)
+ return 0;
+
while (count) {
int i, n;
- n = srp_map_finish_fr(state, req, ch, count);
+ n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
if (unlikely(n < 0))
return n;
@@ -1454,8 +1476,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
state->sg = sg_next(state->sg);
}
- req->nmdesc = state->nmdesc;
-
return 0;
}
@@ -1468,15 +1488,12 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct scatterlist *sg;
int i;
- state->desc = req->indirect_desc;
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
ib_sg_dma_len(dev->dev, sg),
target->global_mr->rkey);
}
- req->nmdesc = state->nmdesc;
-
return 0;
}
@@ -1514,9 +1531,10 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
#ifdef CONFIG_NEED_SG_DMA_LENGTH
idb_sg->dma_length = idb_sg->length; /* hack^2 */
#endif
- ret = srp_map_finish_fr(&state, req, ch, 1);
+ ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
if (ret < 0)
return ret;
+ WARN_ON_ONCE(ret < 1);
} else if (dev->use_fmr) {
state.pages = idb_pages;
state.pages[0] = (req->indirect_dma_addr &
@@ -1534,6 +1552,41 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
return 0;
}
+#if defined(DYNAMIC_DATA_DEBUG)
+static void srp_check_mapping(struct srp_map_state *state,
+ struct srp_rdma_ch *ch, struct srp_request *req,
+ struct scatterlist *scat, int count)
+{
+ struct srp_device *dev = ch->target->srp_host->srp_dev;
+ struct srp_fr_desc **pfr;
+ u64 desc_len = 0, mr_len = 0;
+ int i;
+
+ for (i = 0; i < state->ndesc; i++)
+ desc_len += be32_to_cpu(req->indirect_desc[i].len);
+ if (dev->use_fast_reg)
+ for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
+ mr_len += (*pfr)->mr->length;
+ else if (dev->use_fmr)
+ for (i = 0; i < state->nmdesc; i++)
+ mr_len += be32_to_cpu(req->indirect_desc[i].len);
+ if (desc_len != scsi_bufflen(req->scmnd) ||
+ mr_len > scsi_bufflen(req->scmnd))
+ pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
+ scsi_bufflen(req->scmnd), desc_len, mr_len,
+ state->ndesc, state->nmdesc);
+}
+#endif
+
+/**
+ * srp_map_data() - map SCSI data buffer onto an SRP request
+ * @scmnd: SCSI command to map
+ * @ch: SRP RDMA channel
+ * @req: SRP request
+ *
+ * Returns the length in bytes of the SRP_CMD IU or a negative value if
+ * mapping failed.
+ */
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_request *req)
{
@@ -1600,12 +1653,25 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
+ state.desc = req->indirect_desc;
if (dev->use_fast_reg)
- srp_map_sg_fr(&state, ch, req, scat, count);
+ ret = srp_map_sg_fr(&state, ch, req, scat, count);
else if (dev->use_fmr)
- srp_map_sg_fmr(&state, ch, req, scat, count);
+ ret = srp_map_sg_fmr(&state, ch, req, scat, count);
else
- srp_map_sg_dma(&state, ch, req, scat, count);
+ ret = srp_map_sg_dma(&state, ch, req, scat, count);
+ req->nmdesc = state.nmdesc;
+ if (ret < 0)
+ goto unmap;
+
+#if defined(DYNAMIC_DEBUG)
+ {
+ DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
+ "Memory mapping consistency check");
+ if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT))
+ srp_check_mapping(&state, ch, req, scat, count);
+ }
+#endif
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer. If this
@@ -1628,7 +1694,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
!target->allow_ext_sg)) {
shost_printk(KERN_ERR, target->scsi_host,
"Could not fit S/G list into SRP_CMD\n");
- return -EIO;
+ ret = -EIO;
+ goto unmap;
}
count = min(state.ndesc, target->cmd_sg_cnt);
@@ -1646,7 +1713,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
idb_len, &idb_rkey);
if (ret < 0)
- return ret;
+ goto unmap;
req->nmdesc++;
} else {
idb_rkey = cpu_to_be32(target->global_mr->rkey);
@@ -1672,6 +1739,12 @@ map_complete:
cmd->buf_fmt = fmt;
return len;
+
+unmap:
+ srp_unmap_data(scmnd, ch, req);
+ if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
+ ret = -E2BIG;
+ return ret;
}
/*
@@ -2564,6 +2637,20 @@ static int srp_reset_host(struct scsi_cmnd *scmnd)
return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
+static int srp_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct srp_target_port *target = host_to_target(shost);
+ struct srp_device *srp_dev = target->srp_host->srp_dev;
+ struct ib_device *ibdev = srp_dev->dev;
+
+ if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+ blk_queue_virt_boundary(sdev->request_queue,
+ ~srp_dev->mr_page_mask);
+
+ return 0;
+}
+
static int srp_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
@@ -2755,6 +2842,7 @@ static struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
+ .slave_alloc = srp_slave_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
.queuecommand = srp_queuecommand,
@@ -2819,7 +2907,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
spin_unlock(&host->target_lock);
scsi_scan_target(&target->scsi_host->shost_gendev,
- 0, target->scsi_id, SCAN_WILD_CARD, 0);
+ 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
if (srp_connected_ch(target) < target->ch_count ||
target->qp_in_error) {
@@ -2829,7 +2917,7 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
goto out;
}
- pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
+ pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
dev_name(&target->scsi_host->shost_gendev),
srp_sdev_count(target->scsi_host));
@@ -3097,7 +3185,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
case SRP_OPT_SG_TABLESIZE:
if (match_int(args, &token) || token < 1 ||
- token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
+ token > SG_MAX_SEGMENTS) {
pr_warn("bad max sg_tablesize parameter '%s'\n",
p);
goto out;
@@ -3161,6 +3249,7 @@ static ssize_t srp_create_target(struct device *dev,
struct srp_device *srp_dev = host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
int ret, node_idx, node, cpu, i;
+ unsigned int max_sectors_per_mr, mr_per_cmd = 0;
bool multich = false;
target_host = scsi_host_alloc(&srp_template,
@@ -3217,7 +3306,33 @@ static ssize_t srp_create_target(struct device *dev,
target->sg_tablesize = target->cmd_sg_cnt;
}
+ if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
+ /*
+ * FR and FMR can only map one HCA page per entry. If the
+ * start address is not aligned on a HCA page boundary two
+ * entries will be used for the head and the tail although
+ * these two entries combined contain at most one HCA page of
+ * data. Hence the "+ 1" in the calculation below.
+ *
+ * The indirect data buffer descriptor is contiguous so the
+ * memory for that buffer will only be registered if
+ * register_always is true. Hence add one to mr_per_cmd if
+ * register_always has been set.
+ */
+ max_sectors_per_mr = srp_dev->max_pages_per_mr <<
+ (ilog2(srp_dev->mr_page_size) - 9);
+ mr_per_cmd = register_always +
+ (target->scsi_host->max_sectors + 1 +
+ max_sectors_per_mr - 1) / max_sectors_per_mr;
+ pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
+ target->scsi_host->max_sectors,
+ srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
+ max_sectors_per_mr, mr_per_cmd);
+ }
+
target_host->sg_tablesize = target->sg_tablesize;
+ target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
+ target->mr_per_cmd = mr_per_cmd;
target->indirect_size = target->sg_tablesize *
sizeof (struct srp_direct_buf);
target->max_iu_len = sizeof (struct srp_cmd) +
@@ -3410,21 +3525,10 @@ static void srp_add_one(struct ib_device *device)
int mr_page_shift, p;
u64 max_pages_per_mr;
- srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
+ srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
return;
- srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
- device->map_phys_fmr && device->unmap_fmr);
- srp_dev->has_fr = (device->attrs.device_cap_flags &
- IB_DEVICE_MEM_MGT_EXTENSIONS);
- if (!srp_dev->has_fmr && !srp_dev->has_fr)
- dev_warn(&device->dev, "neither FMR nor FR is supported\n");
-
- srp_dev->use_fast_reg = (srp_dev->has_fr &&
- (!srp_dev->has_fmr || prefer_fr));
- srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
-
/*
* Use the smallest page size supported by the HCA, down to a
* minimum of 4096 bytes. We're unlikely to build large sglists
@@ -3435,8 +3539,25 @@ static void srp_add_one(struct ib_device *device)
srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
max_pages_per_mr = device->attrs.max_mr_size;
do_div(max_pages_per_mr, srp_dev->mr_page_size);
+ pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
+ device->attrs.max_mr_size, srp_dev->mr_page_size,
+ max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
+
+ srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
+ device->map_phys_fmr && device->unmap_fmr);
+ srp_dev->has_fr = (device->attrs.device_cap_flags &
+ IB_DEVICE_MEM_MGT_EXTENSIONS);
+ if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
+ dev_warn(&device->dev, "neither FMR nor FR is supported\n");
+ } else if (!never_register &&
+ device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) {
+ srp_dev->use_fast_reg = (srp_dev->has_fr &&
+ (!srp_dev->has_fmr || prefer_fr));
+ srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
+ }
+
if (srp_dev->use_fast_reg) {
srp_dev->max_pages_per_mr =
min_t(u32, srp_dev->max_pages_per_mr,
@@ -3456,15 +3577,14 @@ static void srp_add_one(struct ib_device *device)
if (IS_ERR(srp_dev->pd))
goto free_dev;
- if (!register_always || (!srp_dev->has_fmr && !srp_dev->has_fr)) {
+ if (never_register || !register_always ||
+ (!srp_dev->has_fmr && !srp_dev->has_fr)) {
srp_dev->global_mr = ib_get_dma_mr(srp_dev->pd,
IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE);
if (IS_ERR(srp_dev->global_mr))
goto err_pd;
- } else {
- srp_dev->global_mr = NULL;
}
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 9e05ce4a0..26bb9b0a7 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -202,6 +202,8 @@ struct srp_target_port {
char target_name[32];
unsigned int scsi_id;
unsigned int sg_tablesize;
+ int mr_pool_size;
+ int mr_per_cmd;
int queue_size;
int req_ring_size;
int comp_vector;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 8b42401d4..4a4155640 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -254,8 +254,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
memset(cif, 0, sizeof(*cif));
cif->base_version = 1;
cif->class_version = 1;
- cif->resp_time_value = 20;
+ ib_set_cpi_resp_time(cif, 20);
mad->mad_hdr.status = 0;
}
@@ -765,52 +765,6 @@ static int srpt_post_recv(struct srpt_device *sdev,
}
/**
- * srpt_post_send() - Post an IB send request.
- *
- * Returns zero upon success and a non-zero value upon failure.
- */
-static int srpt_post_send(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx, int len)
-{
- struct ib_sge list;
- struct ib_send_wr wr, *bad_wr;
- struct srpt_device *sdev = ch->sport->sdev;
- int ret;
-
- atomic_inc(&ch->req_lim);
-
- ret = -ENOMEM;
- if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
- pr_warn("IB send queue full (needed 1)\n");
- goto out;
- }
-
- ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
- DMA_TO_DEVICE);
-
- list.addr = ioctx->ioctx.dma;
- list.length = len;
- list.lkey = sdev->pd->local_dma_lkey;
-
- ioctx->ioctx.cqe.done = srpt_send_done;
- wr.next = NULL;
- wr.wr_cqe = &ioctx->ioctx.cqe;
- wr.sg_list = &list;
- wr.num_sge = 1;
- wr.opcode = IB_WR_SEND;
- wr.send_flags = IB_SEND_SIGNALED;
-
- ret = ib_post_send(ch->qp, &wr, &bad_wr);
-
-out:
- if (ret < 0) {
- atomic_inc(&ch->sq_wr_avail);
- atomic_dec(&ch->req_lim);
- }
- return ret;
-}
-
-/**
* srpt_zerolength_write() - Perform a zero-length RDMA write.
*
* A quote from the InfiniBand specification: C9-88: For an HCA responder
@@ -843,6 +797,110 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
}
}
+static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
+ struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
+ unsigned *sg_cnt)
+{
+ enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ struct scatterlist *prev = NULL;
+ unsigned prev_nents;
+ int ret, i;
+
+ if (nbufs == 1) {
+ ioctx->rw_ctxs = &ioctx->s_rw_ctx;
+ } else {
+ ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
+ GFP_KERNEL);
+ if (!ioctx->rw_ctxs)
+ return -ENOMEM;
+ }
+
+ for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+ u64 remote_addr = be64_to_cpu(db->va);
+ u32 size = be32_to_cpu(db->len);
+ u32 rkey = be32_to_cpu(db->key);
+
+ ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
+ i < nbufs - 1);
+ if (ret)
+ goto unwind;
+
+ ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
+ if (ret < 0) {
+ target_free_sgl(ctx->sg, ctx->nents);
+ goto unwind;
+ }
+
+ ioctx->n_rdma += ret;
+ ioctx->n_rw_ctx++;
+
+ if (prev) {
+ sg_unmark_end(&prev[prev_nents - 1]);
+ sg_chain(prev, prev_nents + 1, ctx->sg);
+ } else {
+ *sg = ctx->sg;
+ }
+
+ prev = ctx->sg;
+ prev_nents = ctx->nents;
+
+ *sg_cnt += ctx->nents;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, dir);
+ target_free_sgl(ctx->sg, ctx->nents);
+ }
+ if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
+ kfree(ioctx->rw_ctxs);
+ return ret;
+}
+
+static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
+ int i;
+
+ for (i = 0; i < ioctx->n_rw_ctx; i++) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
+ ctx->sg, ctx->nents, dir);
+ target_free_sgl(ctx->sg, ctx->nents);
+ }
+
+ if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
+ kfree(ioctx->rw_ctxs);
+}
+
+static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
+{
+ /*
+ * The pointer computations below will only be compiled correctly
+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+ * whether srp_cmd::add_data has been declared as a byte pointer.
+ */
+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
+ !__same_type(srp_cmd->add_data[0], (u8)0));
+
+ /*
+ * According to the SRP spec, the lower two bits of the 'ADDITIONAL
+ * CDB LENGTH' field are reserved and the size in bytes of this field
+ * is four times the value specified in bits 3..7. Hence the "& ~3".
+ */
+ return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
+}
+
/**
* srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
* @ioctx: Pointer to the I/O context associated with the request.
@@ -858,94 +916,59 @@ static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
* -ENOMEM when memory allocation fails and zero upon success.
*/
static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
- struct srp_cmd *srp_cmd,
- enum dma_data_direction *dir, u64 *data_len)
+ struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
+ struct scatterlist **sg, unsigned *sg_cnt, u64 *data_len)
{
- struct srp_indirect_buf *idb;
- struct srp_direct_buf *db;
- unsigned add_cdb_offset;
- int ret;
-
- /*
- * The pointer computations below will only be compiled correctly
- * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
- * whether srp_cmd::add_data has been declared as a byte pointer.
- */
- BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
- && !__same_type(srp_cmd->add_data[0], (u8)0));
-
BUG_ON(!dir);
BUG_ON(!data_len);
- ret = 0;
- *data_len = 0;
-
/*
* The lower four bits of the buffer format field contain the DATA-IN
* buffer descriptor format, and the highest four bits contain the
* DATA-OUT buffer descriptor format.
*/
- *dir = DMA_NONE;
if (srp_cmd->buf_fmt & 0xf)
/* DATA-IN: transfer data from target to initiator (read). */
*dir = DMA_FROM_DEVICE;
else if (srp_cmd->buf_fmt >> 4)
/* DATA-OUT: transfer data from initiator to target (write). */
*dir = DMA_TO_DEVICE;
+ else
+ *dir = DMA_NONE;
+
+ /* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
+ ioctx->cmd.data_direction = *dir;
- /*
- * According to the SRP spec, the lower two bits of the 'ADDITIONAL
- * CDB LENGTH' field are reserved and the size in bytes of this field
- * is four times the value specified in bits 3..7. Hence the "& ~3".
- */
- add_cdb_offset = srp_cmd->add_cdb_len & ~3;
if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
- ioctx->n_rbuf = 1;
- ioctx->rbufs = &ioctx->single_rbuf;
+ struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
- db = (struct srp_direct_buf *)(srp_cmd->add_data
- + add_cdb_offset);
- memcpy(ioctx->rbufs, db, sizeof(*db));
*data_len = be32_to_cpu(db->len);
+ return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
- idb = (struct srp_indirect_buf *)(srp_cmd->add_data
- + add_cdb_offset);
-
- ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
+ struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
+ int nbufs = be32_to_cpu(idb->table_desc.len) /
+ sizeof(struct srp_direct_buf);
- if (ioctx->n_rbuf >
+ if (nbufs >
(srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
pr_err("received unsupported SRP_CMD request"
" type (%u out + %u in != %u / %zu)\n",
srp_cmd->data_out_desc_cnt,
srp_cmd->data_in_desc_cnt,
be32_to_cpu(idb->table_desc.len),
- sizeof(*db));
- ioctx->n_rbuf = 0;
- ret = -EINVAL;
- goto out;
+ sizeof(struct srp_direct_buf));
+ return -EINVAL;
}
- if (ioctx->n_rbuf == 1)
- ioctx->rbufs = &ioctx->single_rbuf;
- else {
- ioctx->rbufs =
- kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
- if (!ioctx->rbufs) {
- ioctx->n_rbuf = 0;
- ret = -ENOMEM;
- goto out;
- }
- }
-
- db = idb->desc_list;
- memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
*data_len = be32_to_cpu(idb->len);
+ return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
+ sg, sg_cnt);
+ } else {
+ *data_len = 0;
+ return 0;
}
-out:
- return ret;
}
/**
@@ -1049,217 +1072,6 @@ static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
}
/**
- * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
- */
-static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct scatterlist *sg;
- enum dma_data_direction dir;
-
- BUG_ON(!ch);
- BUG_ON(!ioctx);
- BUG_ON(ioctx->n_rdma && !ioctx->rdma_wrs);
-
- while (ioctx->n_rdma)
- kfree(ioctx->rdma_wrs[--ioctx->n_rdma].wr.sg_list);
-
- kfree(ioctx->rdma_wrs);
- ioctx->rdma_wrs = NULL;
-
- if (ioctx->mapped_sg_count) {
- sg = ioctx->sg;
- WARN_ON(!sg);
- dir = ioctx->cmd.data_direction;
- BUG_ON(dir == DMA_NONE);
- ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
- target_reverse_dma_direction(&ioctx->cmd));
- ioctx->mapped_sg_count = 0;
- }
-}
-
-/**
- * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
- */
-static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct ib_device *dev = ch->sport->sdev->device;
- struct se_cmd *cmd;
- struct scatterlist *sg, *sg_orig;
- int sg_cnt;
- enum dma_data_direction dir;
- struct ib_rdma_wr *riu;
- struct srp_direct_buf *db;
- dma_addr_t dma_addr;
- struct ib_sge *sge;
- u64 raddr;
- u32 rsize;
- u32 tsize;
- u32 dma_len;
- int count, nrdma;
- int i, j, k;
-
- BUG_ON(!ch);
- BUG_ON(!ioctx);
- cmd = &ioctx->cmd;
- dir = cmd->data_direction;
- BUG_ON(dir == DMA_NONE);
-
- ioctx->sg = sg = sg_orig = cmd->t_data_sg;
- ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
-
- count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
- target_reverse_dma_direction(cmd));
- if (unlikely(!count))
- return -EAGAIN;
-
- ioctx->mapped_sg_count = count;
-
- if (ioctx->rdma_wrs && ioctx->n_rdma_wrs)
- nrdma = ioctx->n_rdma_wrs;
- else {
- nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
- + ioctx->n_rbuf;
-
- ioctx->rdma_wrs = kcalloc(nrdma, sizeof(*ioctx->rdma_wrs),
- GFP_KERNEL);
- if (!ioctx->rdma_wrs)
- goto free_mem;
-
- ioctx->n_rdma_wrs = nrdma;
- }
-
- db = ioctx->rbufs;
- tsize = cmd->data_length;
- dma_len = ib_sg_dma_len(dev, &sg[0]);
- riu = ioctx->rdma_wrs;
-
- /*
- * For each remote desc - calculate the #ib_sge.
- * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
- * each remote desc rdma_iu is required a rdma wr;
- * else
- * we need to allocate extra rdma_iu to carry extra #ib_sge in
- * another rdma wr
- */
- for (i = 0, j = 0;
- j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
- rsize = be32_to_cpu(db->len);
- raddr = be64_to_cpu(db->va);
- riu->remote_addr = raddr;
- riu->rkey = be32_to_cpu(db->key);
- riu->wr.num_sge = 0;
-
- /* calculate how many sge required for this remote_buf */
- while (rsize > 0 && tsize > 0) {
-
- if (rsize >= dma_len) {
- tsize -= dma_len;
- rsize -= dma_len;
- raddr += dma_len;
-
- if (tsize > 0) {
- ++j;
- if (j < count) {
- sg = sg_next(sg);
- dma_len = ib_sg_dma_len(
- dev, sg);
- }
- }
- } else {
- tsize -= rsize;
- dma_len -= rsize;
- rsize = 0;
- }
-
- ++riu->wr.num_sge;
-
- if (rsize > 0 &&
- riu->wr.num_sge == SRPT_DEF_SG_PER_WQE) {
- ++ioctx->n_rdma;
- riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
- sizeof(*riu->wr.sg_list),
- GFP_KERNEL);
- if (!riu->wr.sg_list)
- goto free_mem;
-
- ++riu;
- riu->wr.num_sge = 0;
- riu->remote_addr = raddr;
- riu->rkey = be32_to_cpu(db->key);
- }
- }
-
- ++ioctx->n_rdma;
- riu->wr.sg_list = kmalloc_array(riu->wr.num_sge,
- sizeof(*riu->wr.sg_list),
- GFP_KERNEL);
- if (!riu->wr.sg_list)
- goto free_mem;
- }
-
- db = ioctx->rbufs;
- tsize = cmd->data_length;
- riu = ioctx->rdma_wrs;
- sg = sg_orig;
- dma_len = ib_sg_dma_len(dev, &sg[0]);
- dma_addr = ib_sg_dma_address(dev, &sg[0]);
-
- /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
- for (i = 0, j = 0;
- j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
- rsize = be32_to_cpu(db->len);
- sge = riu->wr.sg_list;
- k = 0;
-
- while (rsize > 0 && tsize > 0) {
- sge->addr = dma_addr;
- sge->lkey = ch->sport->sdev->pd->local_dma_lkey;
-
- if (rsize >= dma_len) {
- sge->length =
- (tsize < dma_len) ? tsize : dma_len;
- tsize -= dma_len;
- rsize -= dma_len;
-
- if (tsize > 0) {
- ++j;
- if (j < count) {
- sg = sg_next(sg);
- dma_len = ib_sg_dma_len(
- dev, sg);
- dma_addr = ib_sg_dma_address(
- dev, sg);
- }
- }
- } else {
- sge->length = (tsize < rsize) ? tsize : rsize;
- tsize -= rsize;
- dma_len -= rsize;
- dma_addr += rsize;
- rsize = 0;
- }
-
- ++k;
- if (k == riu->wr.num_sge && rsize > 0 && tsize > 0) {
- ++riu;
- sge = riu->wr.sg_list;
- k = 0;
- } else if (rsize > 0 && tsize > 0)
- ++sge;
- }
- }
-
- return 0;
-
-free_mem:
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
-
- return -ENOMEM;
-}
-
-/**
* srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
*/
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
@@ -1284,12 +1096,8 @@ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
BUG_ON(ioctx->ch != ch);
spin_lock_init(&ioctx->spinlock);
ioctx->state = SRPT_STATE_NEW;
- ioctx->n_rbuf = 0;
- ioctx->rbufs = NULL;
ioctx->n_rdma = 0;
- ioctx->n_rdma_wrs = 0;
- ioctx->rdma_wrs = NULL;
- ioctx->mapped_sg_count = 0;
+ ioctx->n_rw_ctx = 0;
init_completion(&ioctx->tx_done);
ioctx->queue_status_only = false;
/*
@@ -1359,7 +1167,6 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
* SRP_RSP sending failed or the SRP_RSP send completion has
* not been received in time.
*/
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
break;
case SRPT_STATE_MGMT_RSP_SENT:
@@ -1387,6 +1194,7 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(ioctx->n_rdma <= 0);
atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ ioctx->n_rdma = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
@@ -1403,23 +1211,6 @@ static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
__LINE__, srpt_get_cmd_state(ioctx));
}
-static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- struct srpt_send_ioctx *ioctx =
- container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
-
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
- /*
- * Note: if an RDMA write error completion is received that
- * means that a SEND also has been posted. Defer further
- * processing of the associated command until the send error
- * completion has been received.
- */
- pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
- ioctx, wc->status);
- }
-}
-
/**
* srpt_build_cmd_rsp() - Build an SRP_RSP response.
* @ch: RDMA channel through which the request has been received.
@@ -1537,6 +1328,8 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
{
struct se_cmd *cmd;
struct srp_cmd *srp_cmd;
+ struct scatterlist *sg = NULL;
+ unsigned sg_cnt = 0;
u64 data_len;
enum dma_data_direction dir;
int rc;
@@ -1563,16 +1356,21 @@ static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
break;
}
- if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
- pr_err("0x%llx: parsing SRP descriptor table failed.\n",
- srp_cmd->tag);
+ rc = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &sg, &sg_cnt,
+ &data_len);
+ if (rc) {
+ if (rc != -EAGAIN) {
+ pr_err("0x%llx: parsing SRP descriptor table failed.\n",
+ srp_cmd->tag);
+ }
goto release_ioctx;
}
- rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
+ rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
&send_ioctx->sense_data[0],
scsilun_to_int(&srp_cmd->lun), data_len,
- TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+ TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
+ sg, sg_cnt, NULL, 0, NULL, 0);
if (rc != 0) {
pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
srp_cmd->tag);
@@ -1664,23 +1462,21 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
recv_ioctx->ioctx.dma, srp_max_req_size,
DMA_FROM_DEVICE);
- if (unlikely(ch->state == CH_CONNECTING)) {
- list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
- goto out;
- }
+ if (unlikely(ch->state == CH_CONNECTING))
+ goto out_wait;
if (unlikely(ch->state != CH_LIVE))
- goto out;
+ return;
srp_cmd = recv_ioctx->ioctx.buf;
if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
- if (!send_ioctx)
+ if (!send_ioctx) {
+ if (!list_empty(&ch->cmd_wait_list))
+ goto out_wait;
send_ioctx = srpt_get_send_ioctx(ch);
- if (unlikely(!send_ioctx)) {
- list_add_tail(&recv_ioctx->wait_list,
- &ch->cmd_wait_list);
- goto out;
}
+ if (unlikely(!send_ioctx))
+ goto out_wait;
}
switch (srp_cmd->opcode) {
@@ -1709,8 +1505,10 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
}
srpt_post_recv(ch->sport->sdev, recv_ioctx);
-out:
return;
+
+out_wait:
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
}
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1779,14 +1577,13 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
state != SRPT_STATE_MGMT_RSP_SENT);
- atomic_inc(&ch->sq_wr_avail);
+ atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
if (wc->status != IB_WC_SUCCESS)
pr_info("sending response for ioctx 0x%p failed"
" with status %d\n", ioctx, wc->status);
if (state != SRPT_STATE_DONE) {
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
transport_generic_free_cmd(&ioctx->cmd, 0);
} else {
pr_err("IB completion has been received too late for"
@@ -1832,8 +1629,17 @@ retry:
qp_init->srq = sdev->srq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
qp_init->qp_type = IB_QPT_RC;
- qp_init->cap.max_send_wr = srp_sq_size;
+ /*
+ * We divide up our send queue size into half SEND WRs to send the
+ * completions, and half R/W contexts to actually do the RDMA
+ * READ/WRITE transfers. Note that we need to allocate CQ slots for
+ * both both, as RDMA contexts will also post completions for the
+ * RDMA READ case.
+ */
+ qp_init->cap.max_send_wr = srp_sq_size / 2;
+ qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+ qp_init->port_num = ch->sport->port;
ch->qp = ib_create_qp(sdev->pd, qp_init);
if (IS_ERR(ch->qp)) {
@@ -1960,14 +1766,6 @@ static void __srpt_close_all_ch(struct srpt_device *sdev)
}
}
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
- return 1;
-}
-
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
@@ -2386,95 +2184,6 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
return ret;
}
-/**
- * srpt_perform_rdmas() - Perform IB RDMA.
- *
- * Returns zero upon success or a negative number upon failure.
- */
-static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- struct ib_send_wr *bad_wr;
- int sq_wr_avail, ret, i;
- enum dma_data_direction dir;
- const int n_rdma = ioctx->n_rdma;
-
- dir = ioctx->cmd.data_direction;
- if (dir == DMA_TO_DEVICE) {
- /* write */
- ret = -ENOMEM;
- sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
- if (sq_wr_avail < 0) {
- pr_warn("IB send queue full (needed %d)\n",
- n_rdma);
- goto out;
- }
- }
-
- for (i = 0; i < n_rdma; i++) {
- struct ib_send_wr *wr = &ioctx->rdma_wrs[i].wr;
-
- wr->opcode = (dir == DMA_FROM_DEVICE) ?
- IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
-
- if (i == n_rdma - 1) {
- /* only get completion event for the last rdma read */
- if (dir == DMA_TO_DEVICE) {
- wr->send_flags = IB_SEND_SIGNALED;
- ioctx->rdma_cqe.done = srpt_rdma_read_done;
- } else {
- ioctx->rdma_cqe.done = srpt_rdma_write_done;
- }
- wr->wr_cqe = &ioctx->rdma_cqe;
- wr->next = NULL;
- } else {
- wr->wr_cqe = NULL;
- wr->next = &ioctx->rdma_wrs[i + 1].wr;
- }
- }
-
- ret = ib_post_send(ch->qp, &ioctx->rdma_wrs->wr, &bad_wr);
- if (ret)
- pr_err("%s[%d]: ib_post_send() returned %d for %d/%d\n",
- __func__, __LINE__, ret, i, n_rdma);
-out:
- if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
- atomic_add(n_rdma, &ch->sq_wr_avail);
- return ret;
-}
-
-/**
- * srpt_xfer_data() - Start data transfer from initiator to target.
- */
-static int srpt_xfer_data(struct srpt_rdma_ch *ch,
- struct srpt_send_ioctx *ioctx)
-{
- int ret;
-
- ret = srpt_map_sg_to_ib_sge(ch, ioctx);
- if (ret) {
- pr_err("%s[%d] ret=%d\n", __func__, __LINE__, ret);
- goto out;
- }
-
- ret = srpt_perform_rdmas(ch, ioctx);
- if (ret) {
- if (ret == -EAGAIN || ret == -ENOMEM)
- pr_info("%s[%d] queue full -- ret=%d\n",
- __func__, __LINE__, ret);
- else
- pr_err("%s[%d] fatal error -- ret=%d\n",
- __func__, __LINE__, ret);
- goto out_unmap;
- }
-
-out:
- return ret;
-out_unmap:
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
- goto out;
-}
-
static int srpt_write_pending_status(struct se_cmd *se_cmd)
{
struct srpt_send_ioctx *ioctx;
@@ -2491,11 +2200,42 @@ static int srpt_write_pending(struct se_cmd *se_cmd)
struct srpt_send_ioctx *ioctx =
container_of(se_cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
+ struct ib_send_wr *first_wr = NULL, *bad_wr;
+ struct ib_cqe *cqe = &ioctx->rdma_cqe;
enum srpt_command_state new_state;
+ int ret, i;
new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
WARN_ON(new_state == SRPT_STATE_DONE);
- return srpt_xfer_data(ch, ioctx);
+
+ if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
+ pr_warn("%s: IB send queue full (needed %d)\n",
+ __func__, ioctx->n_rdma);
+ ret = -ENOMEM;
+ goto out_undo;
+ }
+
+ cqe->done = srpt_rdma_read_done;
+ for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
+ cqe, first_wr);
+ cqe = NULL;
+ }
+
+ ret = ib_post_send(ch->qp, first_wr, &bad_wr);
+ if (ret) {
+ pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
+ __func__, ret, ioctx->n_rdma,
+ atomic_read(&ch->sq_wr_avail));
+ goto out_undo;
+ }
+
+ return 0;
+out_undo:
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ return ret;
}
static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
@@ -2517,17 +2257,17 @@ static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
*/
static void srpt_queue_response(struct se_cmd *cmd)
{
- struct srpt_rdma_ch *ch;
- struct srpt_send_ioctx *ioctx;
+ struct srpt_send_ioctx *ioctx =
+ container_of(cmd, struct srpt_send_ioctx, cmd);
+ struct srpt_rdma_ch *ch = ioctx->ch;
+ struct srpt_device *sdev = ch->sport->sdev;
+ struct ib_send_wr send_wr, *first_wr = NULL, *bad_wr;
+ struct ib_sge sge;
enum srpt_command_state state;
unsigned long flags;
- int ret;
- enum dma_data_direction dir;
- int resp_len;
+ int resp_len, ret, i;
u8 srp_tm_status;
- ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
- ch = ioctx->ch;
BUG_ON(!ch);
spin_lock_irqsave(&ioctx->spinlock, flags);
@@ -2554,17 +2294,19 @@ static void srpt_queue_response(struct se_cmd *cmd)
return;
}
- dir = ioctx->cmd.data_direction;
-
/* For read commands, transfer the data to the initiator. */
- if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
+ if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
+ ioctx->cmd.data_length &&
!ioctx->queue_status_only) {
- ret = srpt_xfer_data(ch, ioctx);
- if (ret) {
- pr_err("xfer_data failed for tag %llu\n",
- ioctx->cmd.tag);
- return;
+ for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
+ struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
+
+ first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
+ ch->sport->port, NULL,
+ first_wr ? first_wr : &send_wr);
}
+ } else {
+ first_wr = &send_wr;
}
if (state != SRPT_STATE_MGMT)
@@ -2576,14 +2318,46 @@ static void srpt_queue_response(struct se_cmd *cmd)
resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
ioctx->cmd.tag);
}
- ret = srpt_post_send(ch, ioctx, resp_len);
- if (ret) {
- pr_err("sending cmd response failed for tag %llu\n",
- ioctx->cmd.tag);
- srpt_unmap_sg_to_ib_sge(ch, ioctx);
- srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
- target_put_sess_cmd(&ioctx->cmd);
+
+ atomic_inc(&ch->req_lim);
+
+ if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
+ &ch->sq_wr_avail) < 0)) {
+ pr_warn("%s: IB send queue full (needed %d)\n",
+ __func__, ioctx->n_rdma);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
+ DMA_TO_DEVICE);
+
+ sge.addr = ioctx->ioctx.dma;
+ sge.length = resp_len;
+ sge.lkey = sdev->pd->local_dma_lkey;
+
+ ioctx->ioctx.cqe.done = srpt_send_done;
+ send_wr.next = NULL;
+ send_wr.wr_cqe = &ioctx->ioctx.cqe;
+ send_wr.sg_list = &sge;
+ send_wr.num_sge = 1;
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, first_wr, &bad_wr);
+ if (ret < 0) {
+ pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
+ __func__, ioctx->cmd.tag, ret);
+ goto out;
}
+
+ return;
+
+out:
+ atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
+ atomic_dec(&ch->req_lim);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ target_put_sess_cmd(&ioctx->cmd);
}
static int srpt_queue_data_in(struct se_cmd *cmd)
@@ -2599,10 +2373,6 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
static void srpt_aborted_task(struct se_cmd *cmd)
{
- struct srpt_send_ioctx *ioctx = container_of(cmd,
- struct srpt_send_ioctx, cmd);
-
- srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
}
static int srpt_queue_status(struct se_cmd *cmd)
@@ -2903,12 +2673,10 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
unsigned long flags;
WARN_ON(ioctx->state != SRPT_STATE_DONE);
- WARN_ON(ioctx->mapped_sg_count != 0);
- if (ioctx->n_rbuf > 1) {
- kfree(ioctx->rbufs);
- ioctx->rbufs = NULL;
- ioctx->n_rbuf = 0;
+ if (ioctx->n_rw_ctx) {
+ srpt_free_rw_ctxs(ch, ioctx);
+ ioctx->n_rw_ctx = 0;
}
spin_lock_irqsave(&ch->spinlock, flags);
@@ -3287,7 +3055,6 @@ static const struct target_core_fabric_ops srpt_template = {
.tpg_get_inst_index = srpt_tpg_get_inst_index,
.release_cmd = srpt_release_cmd,
.check_stop_free = srpt_check_stop_free,
- .shutdown_session = srpt_shutdown_session,
.close_session = srpt_close_session,
.sess_get_index = srpt_sess_get_index,
.sess_get_initiator_sid = NULL,
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
index af9b8b527..389030487 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.h
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -42,6 +42,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_sa.h>
#include <rdma/ib_cm.h>
+#include <rdma/rw.h>
#include <scsi/srp.h>
@@ -174,21 +175,17 @@ struct srpt_recv_ioctx {
struct srpt_ioctx ioctx;
struct list_head wait_list;
};
+
+struct srpt_rw_ctx {
+ struct rdma_rw_ctx rw;
+ struct scatterlist *sg;
+ unsigned int nents;
+};
/**
* struct srpt_send_ioctx - SRPT send I/O context.
* @ioctx: See above.
* @ch: Channel pointer.
- * @free_list: Node in srpt_rdma_ch.free_list.
- * @n_rbuf: Number of data buffers in the received SRP command.
- * @rbufs: Pointer to SRP data buffer array.
- * @single_rbuf: SRP data buffer if the command has only a single buffer.
- * @sg: Pointer to sg-list associated with this I/O context.
- * @sg_cnt: SG-list size.
- * @mapped_sg_count: ib_dma_map_sg() return value.
- * @n_rdma_wrs: Number of elements in the rdma_wrs array.
- * @rdma_wrs: Array with information about the RDMA mapping.
- * @tag: Tag of the received SRP information unit.
* @spinlock: Protects 'state'.
* @state: I/O context state.
* @cmd: Target core command data structure.
@@ -197,21 +194,18 @@ struct srpt_recv_ioctx {
struct srpt_send_ioctx {
struct srpt_ioctx ioctx;
struct srpt_rdma_ch *ch;
- struct ib_rdma_wr *rdma_wrs;
+
+ struct srpt_rw_ctx s_rw_ctx;
+ struct srpt_rw_ctx *rw_ctxs;
+
struct ib_cqe rdma_cqe;
- struct srp_direct_buf *rbufs;
- struct srp_direct_buf single_rbuf;
- struct scatterlist *sg;
struct list_head free_list;
spinlock_t spinlock;
enum srpt_command_state state;
struct se_cmd cmd;
struct completion tx_done;
- int sg_cnt;
- int mapped_sg_count;
- u16 n_rdma_wrs;
u8 n_rdma;
- u8 n_rbuf;
+ u8 n_rw_ctx;
bool queue_status_only;
u8 sense_data[TRANSPORT_SENSE_BUFFER];
};
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 6f8b084e1..3d8ff09eb 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -143,9 +143,9 @@ struct analog_port {
#include <linux/i8253.h>
-#define GET_TIME(x) do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
-#define DELTA(x,y) (cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
-#define TIME_NAME (cpu_has_tsc?"TSC":"PIT")
+#define GET_TIME(x) do { if (boot_cpu_has(X86_FEATURE_TSC)) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
+#define DELTA(x,y) (boot_cpu_has(X86_FEATURE_TSC) ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
+#define TIME_NAME (boot_cpu_has(X86_FEATURE_TSC)?"TSC":"PIT")
static unsigned int get_time_pit(void)
{
unsigned long flags;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ca62a6e11..a529a4535 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -87,7 +87,7 @@
#define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>"
#define DRIVER_DESC "X-Box pad driver"
-#define XPAD_PKT_LEN 32
+#define XPAD_PKT_LEN 64
/* xbox d-pads should map to buttons, as is required for DDR pads
but we map them to axes when possible to simplify things */
@@ -129,6 +129,7 @@ static const struct xpad_device {
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -173,9 +174,11 @@ static const struct xpad_device {
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
@@ -183,6 +186,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
@@ -199,6 +203,7 @@ static const struct xpad_device {
{ 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -212,6 +217,8 @@ static const struct xpad_device {
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
@@ -307,13 +314,16 @@ static struct usb_device_id xpad_table[] = {
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
@@ -1021,17 +1031,17 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
case XTYPE_XBOXONE:
packet->data[0] = 0x09; /* activate rumble */
- packet->data[1] = 0x08;
+ packet->data[1] = 0x00;
packet->data[2] = xpad->odata_serial++;
- packet->data[3] = 0x08; /* continuous effect */
- packet->data[4] = 0x00; /* simple rumble mode */
- packet->data[5] = 0x03; /* L and R actuator only */
- packet->data[6] = 0x00; /* TODO: LT actuator */
- packet->data[7] = 0x00; /* TODO: RT actuator */
+ packet->data[3] = 0x09;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x0F;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
packet->data[8] = strong / 512; /* left actuator */
packet->data[9] = weak / 512; /* right actuator */
- packet->data[10] = 0x80; /* length of pulse */
- packet->data[11] = 0x00; /* stop period of pulse */
+ packet->data[10] = 0xFF;
+ packet->data[11] = 0x00;
packet->data[12] = 0x00;
packet->len = 13;
packet->pending = true;
@@ -1421,22 +1431,15 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
int ep_irq_in_idx;
int i, error;
+ if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+ return -ENODEV;
+
for (i = 0; xpad_device[i].idVendor; i++) {
if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
(le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
break;
}
- if (xpad_device[i].xtype == XTYPE_XBOXONE &&
- intf->cur_altsetting->desc.bInterfaceNumber != 0) {
- /*
- * The Xbox One controller lists three interfaces all with the
- * same interface class, subclass and protocol. Differentiate by
- * interface number.
- */
- return -ENODEV;
- }
-
xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
if (!xpad)
return -ENOMEM;
@@ -1468,6 +1471,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
xpad->xtype = XTYPE_XBOX360W;
+ else if (intf->cur_altsetting->desc.bInterfaceProtocol == 208)
+ xpad->xtype = XTYPE_XBOXONE;
else
xpad->xtype = XTYPE_XBOX360;
} else {
@@ -1482,6 +1487,17 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping |= MAP_STICKS_TO_NULL;
}
+ if (xpad->xtype == XTYPE_XBOXONE &&
+ intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+ /*
+ * The Xbox One controller lists three interfaces all with the
+ * same interface class, subclass and protocol. Differentiate by
+ * interface number.
+ */
+ error = -ENODEV;
+ goto err_free_in_urb;
+ }
+
error = xpad_init_output(intf, xpad);
if (error)
goto err_free_in_urb;
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 21a62d0fa..53fe9a3fb 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -73,7 +73,7 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val)
#ifdef CONFIG_GPIOLIB
static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
{
- struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
+ struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
int val;
@@ -93,7 +93,7 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
static void adp5588_gpio_set_value(struct gpio_chip *chip,
unsigned off, int val)
{
- struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
+ struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
@@ -112,7 +112,7 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip,
static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
{
- struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
+ struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
int ret;
@@ -130,7 +130,7 @@ static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
static int adp5588_gpio_direction_output(struct gpio_chip *chip,
unsigned off, int val)
{
- struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
+ struct adp5588_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
int ret;
@@ -210,7 +210,7 @@ static int adp5588_gpio_add(struct adp5588_kpad *kpad)
mutex_init(&kpad->gpio_lock);
- error = gpiochip_add(&kpad->gc);
+ error = gpiochip_add_data(&kpad->gc, kpad);
if (error) {
dev_err(dev, "gpiochip_add failed, err: %d\n", error);
return error;
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index c01a1d648..32d94c63d 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -387,7 +387,7 @@ static int adp5589_write(struct i2c_client *client, u8 reg, u8 val)
#ifdef CONFIG_GPIOLIB
static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off)
{
- struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
+ struct adp5589_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
@@ -399,7 +399,7 @@ static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off)
static void adp5589_gpio_set_value(struct gpio_chip *chip,
unsigned off, int val)
{
- struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
+ struct adp5589_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
@@ -418,7 +418,7 @@ static void adp5589_gpio_set_value(struct gpio_chip *chip,
static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off)
{
- struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
+ struct adp5589_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
int ret;
@@ -438,7 +438,7 @@ static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off)
static int adp5589_gpio_direction_output(struct gpio_chip *chip,
unsigned off, int val)
{
- struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc);
+ struct adp5589_kpad *kpad = gpiochip_get_data(chip);
unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
int ret;
@@ -525,9 +525,9 @@ static int adp5589_gpio_add(struct adp5589_kpad *kpad)
mutex_init(&kpad->gpio_lock);
- error = gpiochip_add(&kpad->gc);
+ error = gpiochip_add_data(&kpad->gc, kpad);
if (error) {
- dev_err(dev, "gpiochip_add failed, err: %d\n", error);
+ dev_err(dev, "gpiochip_add_data() failed, err: %d\n", error);
return error;
}
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index e0d72c8c0..146b26f66 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -64,31 +64,6 @@ static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0);
static unsigned int *row_gpios;
static unsigned int *col_gpios;
-#ifdef CONFIG_ARCH_OMAP2
-static void set_col_gpio_val(struct omap_kp *omap_kp, u8 value)
-{
- int col;
-
- for (col = 0; col < omap_kp->cols; col++)
- gpio_set_value(col_gpios[col], value & (1 << col));
-}
-
-static u8 get_row_gpio_val(struct omap_kp *omap_kp)
-{
- int row;
- u8 value = 0;
-
- for (row = 0; row < omap_kp->rows; row++) {
- if (gpio_get_value(row_gpios[row]))
- value |= (1 << row);
- }
- return value;
-}
-#else
-#define set_col_gpio_val(x, y) do {} while (0)
-#define get_row_gpio_val(x) 0
-#endif
-
static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
{
/* disable keyboard interrupt and schedule for handling */
@@ -133,7 +108,6 @@ static void omap_kp_tasklet(unsigned long data)
unsigned int row_shift = get_count_order(omap_kp_data->cols);
unsigned char new_state[8], changed, key_down = 0;
int col, row;
- int spurious = 0;
/* check for any changes */
omap_kp_scan_keypad(omap_kp_data, new_state);
@@ -170,12 +144,9 @@ static void omap_kp_tasklet(unsigned long data)
memcpy(keypad_state, new_state, sizeof(keypad_state));
if (key_down) {
- int delay = HZ / 20;
/* some key is pressed - keep irq disabled and use timer
* to poll the keypad */
- if (spurious)
- delay = 2 * HZ;
- mod_timer(&omap_kp_data->timer, jiffies + delay);
+ mod_timer(&omap_kp_data->timer, jiffies + HZ / 20);
} else {
/* enable interrupts */
omap_writew(0, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
@@ -216,25 +187,6 @@ static ssize_t omap_kp_enable_store(struct device *dev, struct device_attribute
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, omap_kp_enable_show, omap_kp_enable_store);
-#ifdef CONFIG_PM
-static int omap_kp_suspend(struct platform_device *dev, pm_message_t state)
-{
- /* Nothing yet */
-
- return 0;
-}
-
-static int omap_kp_resume(struct platform_device *dev)
-{
- /* Nothing yet */
-
- return 0;
-}
-#else
-#define omap_kp_suspend NULL
-#define omap_kp_resume NULL
-#endif
-
static int omap_kp_probe(struct platform_device *pdev)
{
struct omap_kp *omap_kp;
@@ -371,8 +323,6 @@ static int omap_kp_remove(struct platform_device *pdev)
static struct platform_driver omap_kp_driver = {
.probe = omap_kp_probe,
.remove = omap_kp_remove,
- .suspend = omap_kp_suspend,
- .resume = omap_kp_resume,
.driver = {
.name = "omap-keypad",
},
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index bbcccd672..323a0fb57 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -61,9 +61,9 @@ struct twl4030_keypad {
unsigned short keymap[TWL4030_KEYMAP_SIZE];
u16 kp_state[TWL4030_MAX_ROWS];
bool autorepeat;
- unsigned n_rows;
- unsigned n_cols;
- unsigned irq;
+ unsigned int n_rows;
+ unsigned int n_cols;
+ unsigned int irq;
struct device *dbg_dev;
struct input_dev *input;
@@ -110,7 +110,7 @@ struct twl4030_keypad {
#define KEYP_CTRL_KBD_ON BIT(6)
/* KEYP_DEB, KEYP_LONG_KEY, KEYP_TIMEOUT_x*/
-#define KEYP_PERIOD_US(t, prescale) ((t) / (31 << (prescale + 1)) - 1)
+#define KEYP_PERIOD_US(t, prescale) ((t) / (31 << ((prescale) + 1)) - 1)
/* KEYP_LK_PTV_REG Fields */
#define KEYP_LK_PTV_PTV_SHIFT 5
@@ -162,9 +162,10 @@ static int twl4030_kpwrite_u8(struct twl4030_keypad *kp, u8 data, u32 reg)
static inline u16 twl4030_col_xlate(struct twl4030_keypad *kp, u8 col)
{
- /* If all bits in a row are active for all coloumns then
+ /*
+ * If all bits in a row are active for all columns then
* we have that row line connected to gnd. Mark this
- * key on as if it was on matrix position n_cols (ie
+ * key on as if it was on matrix position n_cols (i.e.
* one higher than the size of the matrix).
*/
if (col == 0xFF)
@@ -209,9 +210,9 @@ static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
u16 new_state[TWL4030_MAX_ROWS];
int col, row;
- if (release_all)
+ if (release_all) {
memset(new_state, 0, sizeof(new_state));
- else {
+ } else {
/* check for any changes */
int ret = twl4030_read_kp_matrix_state(kp, new_state);
@@ -262,8 +263,10 @@ static irqreturn_t do_kp_irq(int irq, void *_kp)
/* Read & Clear TWL4030 pending interrupt */
ret = twl4030_kpread(kp, &reg, KEYP_ISR1, 1);
- /* Release all keys if I2C has gone bad or
- * the KEYP has gone to idle state */
+ /*
+ * Release all keys if I2C has gone bad or
+ * the KEYP has gone to idle state.
+ */
if (ret >= 0 && (reg & KEYP_IMR1_KP))
twl4030_kp_scan(kp, false);
else
@@ -283,7 +286,8 @@ static int twl4030_kp_program(struct twl4030_keypad *kp)
if (twl4030_kpwrite_u8(kp, reg, KEYP_CTRL) < 0)
return -EIO;
- /* NOTE: we could use sih_setup() here to package keypad
+ /*
+ * NOTE: we could use sih_setup() here to package keypad
* event sources as four different IRQs ... but we don't.
*/
@@ -312,7 +316,7 @@ static int twl4030_kp_program(struct twl4030_keypad *kp)
/*
* Enable Clear-on-Read; disable remembering events that fire
- * after the IRQ but before our handler acks (reads) them,
+ * after the IRQ but before our handler acks (reads) them.
*/
reg = TWL4030_SIH_CTRL_COR_MASK | TWL4030_SIH_CTRL_PENDDIS_MASK;
if (twl4030_kpwrite_u8(kp, reg, KEYP_SIH_CTRL) < 0)
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9365535ba..9cc6d057c 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -76,8 +76,8 @@ enum {
BUZZER_ON = 1 << 5,
- /* up to 256 normal keys, up to 16 special keys */
- KEYMAP_SIZE = 256 + 16,
+ /* up to 256 normal keys, up to 15 special key combinations */
+ KEYMAP_SIZE = 256 + 15,
};
/* CM109 protocol packet */
@@ -139,7 +139,7 @@ static unsigned short special_keymap(int code)
{
if (code > 0xff) {
switch (code - 0xff) {
- case RECORD_MUTE: return KEY_MUTE;
+ case RECORD_MUTE: return KEY_MICMUTE;
case PLAYBACK_MUTE: return KEY_MUTE;
case VOLUME_DOWN: return KEY_VOLUMEDOWN;
case VOLUME_UP: return KEY_VOLUMEUP;
@@ -312,6 +312,32 @@ static void report_key(struct cm109_dev *dev, int key)
input_sync(idev);
}
+/*
+ * Converts data of special key presses (volume, mute) into events
+ * for the input subsystem, sends press-n-release for mute keys.
+ */
+static void cm109_report_special(struct cm109_dev *dev)
+{
+ static const u8 autorelease = RECORD_MUTE | PLAYBACK_MUTE;
+ struct input_dev *idev = dev->idev;
+ u8 data = dev->irq_data->byte[HID_IR0];
+ unsigned short keycode;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ keycode = dev->keymap[0xff + BIT(i)];
+ if (keycode == KEY_RESERVED)
+ continue;
+
+ input_report_key(idev, keycode, data & BIT(i));
+ if (data & autorelease & BIT(i)) {
+ input_sync(idev);
+ input_report_key(idev, keycode, 0);
+ }
+ }
+ input_sync(idev);
+}
+
/******************************************************************************
* CM109 usb communication interface
*****************************************************************************/
@@ -340,6 +366,7 @@ static void cm109_urb_irq_callback(struct urb *urb)
struct cm109_dev *dev = urb->context;
const int status = urb->status;
int error;
+ unsigned long flags;
dev_dbg(&dev->intf->dev, "### URB IRQ: [0x%02x 0x%02x 0x%02x 0x%02x] keybit=0x%02x\n",
dev->irq_data->byte[0],
@@ -357,10 +384,7 @@ static void cm109_urb_irq_callback(struct urb *urb)
}
/* Special keys */
- if (dev->irq_data->byte[HID_IR0] & 0x0f) {
- const int code = (dev->irq_data->byte[HID_IR0] & 0x0f);
- report_key(dev, dev->keymap[0xff + code]);
- }
+ cm109_report_special(dev);
/* Scan key column */
if (dev->keybit == 0xf) {
@@ -381,7 +405,7 @@ static void cm109_urb_irq_callback(struct urb *urb)
out:
- spin_lock(&dev->ctl_submit_lock);
+ spin_lock_irqsave(&dev->ctl_submit_lock, flags);
dev->irq_urb_pending = 0;
@@ -405,7 +429,7 @@ static void cm109_urb_irq_callback(struct urb *urb)
__func__, error);
}
- spin_unlock(&dev->ctl_submit_lock);
+ spin_unlock_irqrestore(&dev->ctl_submit_lock, flags);
}
static void cm109_urb_ctl_callback(struct urb *urb)
@@ -413,6 +437,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
struct cm109_dev *dev = urb->context;
const int status = urb->status;
int error;
+ unsigned long flags;
dev_dbg(&dev->intf->dev, "### URB CTL: [0x%02x 0x%02x 0x%02x 0x%02x]\n",
dev->ctl_data->byte[0],
@@ -427,7 +452,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
__func__, status);
}
- spin_lock(&dev->ctl_submit_lock);
+ spin_lock_irqsave(&dev->ctl_submit_lock, flags);
dev->ctl_urb_pending = 0;
@@ -448,7 +473,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
}
}
- spin_unlock(&dev->ctl_submit_lock);
+ spin_unlock_irqrestore(&dev->ctl_submit_lock, flags);
}
static void cm109_toggle_buzzer_async(struct cm109_dev *dev)
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 6d96bff32..29ddeb7be 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -70,10 +70,13 @@ struct max77693_haptic {
static int max77693_haptic_set_duty_cycle(struct max77693_haptic *haptic)
{
- int delta = (haptic->pwm_dev->period + haptic->pwm_duty) / 2;
+ struct pwm_args pargs;
+ int delta;
int error;
- error = pwm_config(haptic->pwm_dev, delta, haptic->pwm_dev->period);
+ pwm_get_args(haptic->pwm_dev, &pargs);
+ delta = (pargs.period + haptic->pwm_duty) / 2;
+ error = pwm_config(haptic->pwm_dev, delta, pargs.period);
if (error) {
dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
return error;
@@ -234,6 +237,7 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
struct max77693_haptic *haptic = input_get_drvdata(dev);
+ struct pwm_args pargs;
u64 period_mag_multi;
haptic->magnitude = effect->u.rumble.strong_magnitude;
@@ -245,7 +249,8 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
* The formula to convert magnitude to pwm_duty as follows:
* - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF)
*/
- period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude;
+ pwm_get_args(haptic->pwm_dev, &pargs);
+ period_mag_multi = (u64)pargs.period * haptic->magnitude;
haptic->pwm_duty = (unsigned int)(period_mag_multi >>
MAX_MAGNITUDE_SHIFT);
@@ -329,6 +334,12 @@ static int max77693_haptic_probe(struct platform_device *pdev)
return PTR_ERR(haptic->pwm_dev);
}
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to the
+ * atomic PWM API.
+ */
+ pwm_apply_args(haptic->pwm_dev);
+
haptic->motor_reg = devm_regulator_get(&pdev->dev, "haptic");
if (IS_ERR(haptic->motor_reg)) {
dev_err(&pdev->dev, "failed to get regulator\n");
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index 8d6326d7e..99bc76288 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -306,6 +306,12 @@ static int max8997_haptic_probe(struct platform_device *pdev)
error);
goto err_free_mem;
}
+
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(chip->pwm);
break;
default:
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 18663d4ed..5f9655d49 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -115,6 +115,12 @@ static int pwm_beeper_probe(struct platform_device *pdev)
goto err_free;
}
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(beeper->pwm);
+
INIT_WORK(&beeper->work, pwm_beeper_work);
beeper->input = input_allocate_device();
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 96c486de4..c7fc8d4fb 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -47,13 +47,13 @@ struct rotary_encoder {
bool armed;
signed char dir; /* 1 - clockwise, -1 - CCW */
- unsigned last_stable;
+ unsigned int last_stable;
};
-static unsigned rotary_encoder_get_state(struct rotary_encoder *encoder)
+static unsigned int rotary_encoder_get_state(struct rotary_encoder *encoder)
{
int i;
- unsigned ret = 0;
+ unsigned int ret = 0;
for (i = 0; i < encoder->gpios->ndescs; ++i) {
int val = gpiod_get_value_cansleep(encoder->gpios->desc[i]);
@@ -100,7 +100,7 @@ static void rotary_encoder_report_event(struct rotary_encoder *encoder)
static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- unsigned state;
+ unsigned int state;
mutex_lock(&encoder->access_mutex);
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index 42de34b92..5690eb7ff 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -46,7 +46,7 @@ struct vibra_info {
struct device *dev;
struct input_dev *input_dev;
struct work_struct play_work;
- struct mutex mutex;
+
int irq;
bool enabled;
@@ -190,8 +190,6 @@ static void vibra_play_work(struct work_struct *work)
return;
}
- mutex_lock(&info->mutex);
-
if (info->weak_speed || info->strong_speed) {
if (!info->enabled)
twl6040_vibra_enable(info);
@@ -200,7 +198,6 @@ static void vibra_play_work(struct work_struct *work)
} else if (info->enabled)
twl6040_vibra_disable(info);
- mutex_unlock(&info->mutex);
}
static int vibra_play(struct input_dev *input, void *data,
@@ -223,12 +220,8 @@ static void twl6040_vibra_close(struct input_dev *input)
cancel_work_sync(&info->play_work);
- mutex_lock(&info->mutex);
-
if (info->enabled)
twl6040_vibra_disable(info);
-
- mutex_unlock(&info->mutex);
}
static int __maybe_unused twl6040_vibra_suspend(struct device *dev)
@@ -236,13 +229,11 @@ static int __maybe_unused twl6040_vibra_suspend(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct vibra_info *info = platform_get_drvdata(pdev);
- mutex_lock(&info->mutex);
+ cancel_work_sync(&info->play_work);
if (info->enabled)
twl6040_vibra_disable(info);
- mutex_unlock(&info->mutex);
-
return 0;
}
@@ -301,8 +292,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
return -EINVAL;
}
- mutex_init(&info->mutex);
-
error = devm_request_threaded_irq(&pdev->dev, info->irq, NULL,
twl6040_vib_irq_handler,
IRQF_ONESHOT,
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
index e583f8b50..b27aa637f 100644
--- a/drivers/input/mouse/byd.c
+++ b/drivers/input/mouse/byd.c
@@ -478,7 +478,6 @@ int byd_init(struct psmouse *psmouse)
if (!priv)
return -ENOMEM;
- memset(priv, 0, sizeof(*priv));
setup_timer(&priv->timer, byd_clear_touch, (unsigned long) psmouse);
psmouse->private = priv;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 78f93cf68..be5b399da 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1568,13 +1568,7 @@ static int elantech_set_properties(struct elantech_data *etd)
case 5:
etd->hw_version = 3;
break;
- case 6:
- case 7:
- case 8:
- case 9:
- case 10:
- case 13:
- case 14:
+ case 6 ... 14:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index a3f0f5a47..0f586780c 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
return -ENXIO;
}
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
/* Check if the device is present */
response = ~VMMOUSE_PROTO_MAGIC;
VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
- if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
- release_region(VMMOUSE_PROTO_PORT, 4);
+ if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
return -ENXIO;
- }
if (set_properties) {
psmouse->vendor = VMMOUSE_VENDOR;
@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
psmouse->model = version;
}
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return 0;
}
@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
psmouse_reset(psmouse);
input_unregister_device(priv->abs_dev);
kfree(priv);
- release_region(VMMOUSE_PROTO_PORT, 4);
}
/**
@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
struct input_dev *rel_dev = psmouse->dev, *abs_dev;
int error;
- if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
- psmouse_dbg(psmouse, "VMMouse port in use.\n");
- return -EBUSY;
- }
-
psmouse_reset(psmouse);
error = vmmouse_enable(psmouse);
if (error)
- goto release_region;
+ return error;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
abs_dev = input_allocate_device();
@@ -502,8 +487,5 @@ init_fail:
kfree(priv);
psmouse->private = NULL;
-release_region:
- release_region(VMMOUSE_PROTO_PORT, 4);
-
return error;
}
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index b368b0515..253df96be 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -157,11 +157,11 @@ static int rmi_function_match(struct device *dev, struct device_driver *drv)
static void rmi_function_of_probe(struct rmi_function *fn)
{
char of_name[9];
+ struct device_node *node = fn->rmi_dev->xport->dev->of_node;
snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
fn->fd.function_number);
- fn->dev.of_node = of_find_node_by_name(
- fn->rmi_dev->xport->dev->of_node, of_name);
+ fn->dev.of_node = of_get_child_by_name(node, of_name);
}
#else
static inline void rmi_function_of_probe(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index 8dd3fb5e1..88e91559c 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -66,7 +66,7 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
struct rmi_device *rmi_dev = fn->rmi_dev;
int ret;
int offset;
- u8 buf[14];
+ u8 buf[15];
int pitch_x = 0;
int pitch_y = 0;
int clip_x_low = 0;
@@ -86,9 +86,10 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
- if (item->reg_size > 14) {
- dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
- item->reg_size);
+ if (item->reg_size > sizeof(buf)) {
+ dev_err(&fn->dev,
+ "F12 control8 should be no bigger than %zd bytes, not: %ld\n",
+ sizeof(buf), item->reg_size);
return -ENODEV;
}
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index 889f6b77e..e86e377a9 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -49,7 +49,6 @@ MODULE_LICENSE(DRIVER_LICENSE);
struct usb_acecad {
char name[128];
char phys[64];
- struct usb_device *usbdev;
struct usb_interface *intf;
struct input_dev *input;
struct urb *irq;
@@ -64,6 +63,7 @@ static void usb_acecad_irq(struct urb *urb)
unsigned char *data = acecad->data;
struct input_dev *dev = acecad->input;
struct usb_interface *intf = acecad->intf;
+ struct usb_device *udev = interface_to_usbdev(intf);
int prox, status;
switch (urb->status) {
@@ -110,15 +110,15 @@ resubmit:
if (status)
dev_err(&intf->dev,
"can't resubmit intr, %s-%s/input0, status %d\n",
- acecad->usbdev->bus->bus_name,
- acecad->usbdev->devpath, status);
+ udev->bus->bus_name,
+ udev->devpath, status);
}
static int usb_acecad_open(struct input_dev *dev)
{
struct usb_acecad *acecad = input_get_drvdata(dev);
- acecad->irq->dev = acecad->usbdev;
+ acecad->irq->dev = interface_to_usbdev(acecad->intf);
if (usb_submit_urb(acecad->irq, GFP_KERNEL))
return -EIO;
@@ -172,7 +172,6 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
goto fail2;
}
- acecad->usbdev = dev;
acecad->intf = intf;
acecad->input = input_dev;
@@ -251,12 +250,13 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
static void usb_acecad_disconnect(struct usb_interface *intf)
{
struct usb_acecad *acecad = usb_get_intfdata(intf);
+ struct usb_device *udev = interface_to_usbdev(intf);
usb_set_intfdata(intf, NULL);
input_unregister_device(acecad->input);
usb_free_urb(acecad->irq);
- usb_free_coherent(acecad->usbdev, 8, acecad->data, acecad->data_dma);
+ usb_free_coherent(udev, 8, acecad->data, acecad->data_dma);
kfree(acecad);
}
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 78ca44840..4613f0aef 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -307,7 +307,6 @@ struct aiptek_settings {
struct aiptek {
struct input_dev *inputdev; /* input device struct */
- struct usb_device *usbdev; /* usb device struct */
struct usb_interface *intf; /* usb interface struct */
struct urb *urb; /* urb for incoming reports */
dma_addr_t data_dma; /* our dma stuffage */
@@ -847,7 +846,7 @@ static int aiptek_open(struct input_dev *inputdev)
{
struct aiptek *aiptek = input_get_drvdata(inputdev);
- aiptek->urb->dev = aiptek->usbdev;
+ aiptek->urb->dev = interface_to_usbdev(aiptek->intf);
if (usb_submit_urb(aiptek->urb, GFP_KERNEL) != 0)
return -EIO;
@@ -873,8 +872,10 @@ aiptek_set_report(struct aiptek *aiptek,
unsigned char report_type,
unsigned char report_id, void *buffer, int size)
{
- return usb_control_msg(aiptek->usbdev,
- usb_sndctrlpipe(aiptek->usbdev, 0),
+ struct usb_device *udev = interface_to_usbdev(aiptek->intf);
+
+ return usb_control_msg(udev,
+ usb_sndctrlpipe(udev, 0),
USB_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE |
USB_DIR_OUT, (report_type << 8) + report_id,
@@ -886,8 +887,10 @@ aiptek_get_report(struct aiptek *aiptek,
unsigned char report_type,
unsigned char report_id, void *buffer, int size)
{
- return usb_control_msg(aiptek->usbdev,
- usb_rcvctrlpipe(aiptek->usbdev, 0),
+ struct usb_device *udev = interface_to_usbdev(aiptek->intf);
+
+ return usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE |
USB_DIR_IN, (report_type << 8) + report_id,
@@ -1729,7 +1732,6 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
aiptek->inputdev = inputdev;
- aiptek->usbdev = usbdev;
aiptek->intf = intf;
aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber;
aiptek->inDelay = 0;
@@ -1833,8 +1835,8 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
* input.
*/
usb_fill_int_urb(aiptek->urb,
- aiptek->usbdev,
- usb_rcvintpipe(aiptek->usbdev,
+ usbdev,
+ usb_rcvintpipe(usbdev,
endpoint->bEndpointAddress),
aiptek->data, 8, aiptek_irq, aiptek,
endpoint->bInterval);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 7c18249d6..abf09ac42 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -104,7 +104,6 @@ MODULE_DEVICE_TABLE (usb, gtco_usbid_table);
struct gtco {
struct input_dev *inputdevice; /* input device struct pointer */
- struct usb_device *usbdev; /* the usb device for this device */
struct usb_interface *intf; /* the usb interface for this device */
struct urb *urbinfo; /* urb for incoming reports */
dma_addr_t buf_dma; /* dma addr of the data buffer*/
@@ -540,7 +539,7 @@ static int gtco_input_open(struct input_dev *inputdev)
{
struct gtco *device = input_get_drvdata(inputdev);
- device->urbinfo->dev = device->usbdev;
+ device->urbinfo->dev = interface_to_usbdev(device->intf);
if (usb_submit_urb(device->urbinfo, GFP_KERNEL))
return -EIO;
@@ -824,6 +823,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
int result = 0, retry;
int error;
struct usb_endpoint_descriptor *endpoint;
+ struct usb_device *udev = interface_to_usbdev(usbinterface);
/* Allocate memory for device structure */
gtco = kzalloc(sizeof(struct gtco), GFP_KERNEL);
@@ -838,11 +838,10 @@ static int gtco_probe(struct usb_interface *usbinterface,
gtco->inputdevice = input_dev;
/* Save interface information */
- gtco->usbdev = interface_to_usbdev(usbinterface);
gtco->intf = usbinterface;
/* Allocate some data for incoming reports */
- gtco->buffer = usb_alloc_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ gtco->buffer = usb_alloc_coherent(udev, REPORT_MAX_SIZE,
GFP_KERNEL, &gtco->buf_dma);
if (!gtco->buffer) {
dev_err(&usbinterface->dev, "No more memory for us buffers\n");
@@ -907,8 +906,8 @@ static int gtco_probe(struct usb_interface *usbinterface,
/* Couple of tries to get reply */
for (retry = 0; retry < 3; retry++) {
- result = usb_control_msg(gtco->usbdev,
- usb_rcvctrlpipe(gtco->usbdev, 0),
+ result = usb_control_msg(udev,
+ usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_DESCRIPTOR,
USB_RECIP_INTERFACE | USB_DIR_IN,
REPORT_DEVICE_TYPE << 8,
@@ -936,7 +935,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
}
/* Create a device file node */
- usb_make_path(gtco->usbdev, gtco->usbpath, sizeof(gtco->usbpath));
+ usb_make_path(udev, gtco->usbpath, sizeof(gtco->usbpath));
strlcat(gtco->usbpath, "/input0", sizeof(gtco->usbpath));
/* Set Input device functions */
@@ -953,15 +952,15 @@ static int gtco_probe(struct usb_interface *usbinterface,
gtco_setup_caps(input_dev);
/* Set input device required ID information */
- usb_to_input_id(gtco->usbdev, &input_dev->id);
+ usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &usbinterface->dev;
/* Setup the URB, it will be posted later on open of input device */
endpoint = &usbinterface->altsetting[0].endpoint[0].desc;
usb_fill_int_urb(gtco->urbinfo,
- gtco->usbdev,
- usb_rcvintpipe(gtco->usbdev,
+ udev,
+ usb_rcvintpipe(udev,
endpoint->bEndpointAddress),
gtco->buffer,
REPORT_MAX_SIZE,
@@ -985,7 +984,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
err_free_urb:
usb_free_urb(gtco->urbinfo);
err_free_buf:
- usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ usb_free_coherent(udev, REPORT_MAX_SIZE,
gtco->buffer, gtco->buf_dma);
err_free_devs:
input_free_device(input_dev);
@@ -1002,13 +1001,14 @@ static void gtco_disconnect(struct usb_interface *interface)
{
/* Grab private device ptr */
struct gtco *gtco = usb_get_intfdata(interface);
+ struct usb_device *udev = interface_to_usbdev(interface);
/* Now reverse all the registration stuff */
if (gtco) {
input_unregister_device(gtco->inputdevice);
usb_kill_urb(gtco->urbinfo);
usb_free_urb(gtco->urbinfo);
- usb_free_coherent(gtco->usbdev, REPORT_MAX_SIZE,
+ usb_free_coherent(udev, REPORT_MAX_SIZE,
gtco->buffer, gtco->buf_dma);
kfree(gtco);
}
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index d2ac7c2b5..e850d7e8a 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -31,7 +31,6 @@ struct kbtab {
unsigned char *data;
dma_addr_t data_dma;
struct input_dev *dev;
- struct usb_device *usbdev;
struct usb_interface *intf;
struct urb *irq;
char phys[32];
@@ -99,8 +98,9 @@ MODULE_DEVICE_TABLE(usb, kbtab_ids);
static int kbtab_open(struct input_dev *dev)
{
struct kbtab *kbtab = input_get_drvdata(dev);
+ struct usb_device *udev = interface_to_usbdev(kbtab->intf);
- kbtab->irq->dev = kbtab->usbdev;
+ kbtab->irq->dev = udev;
if (usb_submit_urb(kbtab->irq, GFP_KERNEL))
return -EIO;
@@ -135,7 +135,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
if (!kbtab->irq)
goto fail2;
- kbtab->usbdev = dev;
kbtab->intf = intf;
kbtab->dev = input_dev;
@@ -188,12 +187,13 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
static void kbtab_disconnect(struct usb_interface *intf)
{
struct kbtab *kbtab = usb_get_intfdata(intf);
+ struct usb_device *udev = interface_to_usbdev(intf);
usb_set_intfdata(intf, NULL);
input_unregister_device(kbtab->dev);
usb_free_urb(kbtab->irq);
- usb_free_coherent(kbtab->usbdev, 8, kbtab->data, kbtab->data_dma);
+ usb_free_coherent(udev, 8, kbtab->data, kbtab->data_dma);
kfree(kbtab);
}
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 69d299d5d..e4bf1103e 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -379,7 +379,7 @@ static const struct attribute_group ad7879_attr_group = {
static int ad7879_gpio_direction_input(struct gpio_chip *chip,
unsigned gpio)
{
- struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+ struct ad7879 *ts = gpiochip_get_data(chip);
int err;
mutex_lock(&ts->mutex);
@@ -393,7 +393,7 @@ static int ad7879_gpio_direction_input(struct gpio_chip *chip,
static int ad7879_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int level)
{
- struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+ struct ad7879 *ts = gpiochip_get_data(chip);
int err;
mutex_lock(&ts->mutex);
@@ -412,7 +412,7 @@ static int ad7879_gpio_direction_output(struct gpio_chip *chip,
static int ad7879_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
- struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+ struct ad7879 *ts = gpiochip_get_data(chip);
u16 val;
mutex_lock(&ts->mutex);
@@ -425,7 +425,7 @@ static int ad7879_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
static void ad7879_gpio_set_value(struct gpio_chip *chip,
unsigned gpio, int value)
{
- struct ad7879 *ts = container_of(chip, struct ad7879, gc);
+ struct ad7879 *ts = gpiochip_get_data(chip);
mutex_lock(&ts->mutex);
if (value)
@@ -456,7 +456,7 @@ static int ad7879_gpio_add(struct ad7879 *ts,
ts->gc.owner = THIS_MODULE;
ts->gc.parent = ts->dev;
- ret = gpiochip_add(&ts->gc);
+ ret = gpiochip_add_data(&ts->gc, ts);
if (ret)
dev_err(ts->dev, "failed to register gpio %d\n",
ts->gc.base);
diff --git a/drivers/input/touchscreen/bcm_iproc_tsc.c b/drivers/input/touchscreen/bcm_iproc_tsc.c
index ae460a5c9..4d11b27c7 100644
--- a/drivers/input/touchscreen/bcm_iproc_tsc.c
+++ b/drivers/input/touchscreen/bcm_iproc_tsc.c
@@ -23,6 +23,8 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/serio.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#define IPROC_TS_NAME "iproc-ts"
@@ -88,7 +90,11 @@
#define TS_WIRE_MODE_BIT BIT(1)
#define dbg_reg(dev, priv, reg) \
- dev_dbg(dev, "%20s= 0x%08x\n", #reg, readl((priv)->regs + reg))
+do { \
+ u32 val; \
+ regmap_read(priv->regmap, reg, &val); \
+ dev_dbg(dev, "%20s= 0x%08x\n", #reg, val); \
+} while (0)
struct tsc_param {
/* Each step is 1024 us. Valid 1-256 */
@@ -141,7 +147,7 @@ struct iproc_ts_priv {
struct platform_device *pdev;
struct input_dev *idev;
- void __iomem *regs;
+ struct regmap *regmap;
struct clk *tsc_clk;
int pen_status;
@@ -196,22 +202,22 @@ static irqreturn_t iproc_touchscreen_interrupt(int irq, void *data)
int i;
bool needs_sync = false;
- intr_status = readl(priv->regs + INTERRUPT_STATUS);
+ regmap_read(priv->regmap, INTERRUPT_STATUS, &intr_status);
intr_status &= TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK;
if (intr_status == 0)
return IRQ_NONE;
/* Clear all interrupt status bits, write-1-clear */
- writel(intr_status, priv->regs + INTERRUPT_STATUS);
-
+ regmap_write(priv->regmap, INTERRUPT_STATUS, intr_status);
/* Pen up/down */
if (intr_status & TS_PEN_INTR_MASK) {
- if (readl(priv->regs + CONTROLLER_STATUS) & TS_PEN_DOWN)
+ regmap_read(priv->regmap, CONTROLLER_STATUS, &priv->pen_status);
+ if (priv->pen_status & TS_PEN_DOWN)
priv->pen_status = PEN_DOWN_STATUS;
else
priv->pen_status = PEN_UP_STATUS;
- input_report_key(priv->idev, BTN_TOUCH, priv->pen_status);
+ input_report_key(priv->idev, BTN_TOUCH, priv->pen_status);
needs_sync = true;
dev_dbg(&priv->pdev->dev,
@@ -221,7 +227,7 @@ static irqreturn_t iproc_touchscreen_interrupt(int irq, void *data)
/* coordinates in FIFO exceed the theshold */
if (intr_status & TS_FIFO_INTR_MASK) {
for (i = 0; i < priv->cfg_params.fifo_threshold; i++) {
- raw_coordinate = readl(priv->regs + FIFO_DATA);
+ regmap_read(priv->regmap, FIFO_DATA, &raw_coordinate);
if (raw_coordinate == INVALID_COORD)
continue;
@@ -239,7 +245,7 @@ static irqreturn_t iproc_touchscreen_interrupt(int irq, void *data)
x = (x >> 4) & 0x0FFF;
y = (y >> 4) & 0x0FFF;
- /* adjust x y according to lcd tsc mount angle */
+ /* Adjust x y according to LCD tsc mount angle. */
if (priv->cfg_params.invert_x)
x = priv->cfg_params.max_x - x;
@@ -262,9 +268,10 @@ static irqreturn_t iproc_touchscreen_interrupt(int irq, void *data)
static int iproc_ts_start(struct input_dev *idev)
{
- struct iproc_ts_priv *priv = input_get_drvdata(idev);
u32 val;
+ u32 mask;
int error;
+ struct iproc_ts_priv *priv = input_get_drvdata(idev);
/* Enable clock */
error = clk_prepare_enable(priv->tsc_clk);
@@ -279,9 +286,10 @@ static int iproc_ts_start(struct input_dev *idev)
* FIFO reaches the int_th value, and pen event(up/down)
*/
val = TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK;
- writel(val, priv->regs + INTERRUPT_MASK);
+ regmap_update_bits(priv->regmap, INTERRUPT_MASK, val, val);
- writel(priv->cfg_params.fifo_threshold, priv->regs + INTERRUPT_THRES);
+ val = priv->cfg_params.fifo_threshold;
+ regmap_write(priv->regmap, INTERRUPT_THRES, val);
/* Initialize control reg1 */
val = 0;
@@ -289,26 +297,23 @@ static int iproc_ts_start(struct input_dev *idev)
val |= priv->cfg_params.debounce_timeout << DEBOUNCE_TIMEOUT_SHIFT;
val |= priv->cfg_params.settling_timeout << SETTLING_TIMEOUT_SHIFT;
val |= priv->cfg_params.touch_timeout << TOUCH_TIMEOUT_SHIFT;
- writel(val, priv->regs + REGCTL1);
+ regmap_write(priv->regmap, REGCTL1, val);
/* Try to clear all interrupt status */
- val = readl(priv->regs + INTERRUPT_STATUS);
- val |= TS_FIFO_INTR_MASK | TS_PEN_INTR_MASK;
- writel(val, priv->regs + INTERRUPT_STATUS);
+ val = TS_FIFO_INTR_MASK | TS_PEN_INTR_MASK;
+ regmap_update_bits(priv->regmap, INTERRUPT_STATUS, val, val);
/* Initialize control reg2 */
- val = readl(priv->regs + REGCTL2);
- val |= TS_CONTROLLER_EN_BIT | TS_WIRE_MODE_BIT;
-
- val &= ~TS_CONTROLLER_AVGDATA_MASK;
+ val = TS_CONTROLLER_EN_BIT | TS_WIRE_MODE_BIT;
val |= priv->cfg_params.average_data << TS_CONTROLLER_AVGDATA_SHIFT;
- val &= ~(TS_CONTROLLER_PWR_LDO | /* PWR up LDO */
+ mask = (TS_CONTROLLER_AVGDATA_MASK);
+ mask |= (TS_CONTROLLER_PWR_LDO | /* PWR up LDO */
TS_CONTROLLER_PWR_ADC | /* PWR up ADC */
TS_CONTROLLER_PWR_BGP | /* PWR up BGP */
TS_CONTROLLER_PWR_TS); /* PWR up TS */
-
- writel(val, priv->regs + REGCTL2);
+ mask |= val;
+ regmap_update_bits(priv->regmap, REGCTL2, mask, val);
ts_reg_dump(priv);
@@ -320,12 +325,17 @@ static void iproc_ts_stop(struct input_dev *dev)
u32 val;
struct iproc_ts_priv *priv = input_get_drvdata(dev);
- writel(0, priv->regs + INTERRUPT_MASK); /* Disable all interrupts */
+ /*
+ * Disable FIFO int_th and pen event(up/down)Interrupts only
+ * as the interrupt mask register is shared between ADC, TS and
+ * flextimer.
+ */
+ val = TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK;
+ regmap_update_bits(priv->regmap, INTERRUPT_MASK, val, 0);
/* Only power down touch screen controller */
- val = readl(priv->regs + REGCTL2);
- val |= TS_CONTROLLER_PWR_TS;
- writel(val, priv->regs + REGCTL2);
+ val = TS_CONTROLLER_PWR_TS;
+ regmap_update_bits(priv->regmap, REGCTL2, val, val);
clk_disable(priv->tsc_clk);
}
@@ -414,7 +424,6 @@ static int iproc_ts_probe(struct platform_device *pdev)
{
struct iproc_ts_priv *priv;
struct input_dev *idev;
- struct resource *res;
int irq;
int error;
@@ -422,12 +431,12 @@ static int iproc_ts_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- /* touchscreen controller memory mapped regs */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->regs)) {
- error = PTR_ERR(priv->regs);
- dev_err(&pdev->dev, "unable to map I/O memory: %d\n", error);
+ /* touchscreen controller memory mapped regs via syscon*/
+ priv->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "ts_syscon");
+ if (IS_ERR(priv->regmap)) {
+ error = PTR_ERR(priv->regmap);
+ dev_err(&pdev->dev, "unable to map I/O memory:%d\n", error);
return error;
}
diff --git a/drivers/input/touchscreen/cyttsp4_core.c b/drivers/input/touchscreen/cyttsp4_core.c
index 5ed310574..44deca88c 100644
--- a/drivers/input/touchscreen/cyttsp4_core.c
+++ b/drivers/input/touchscreen/cyttsp4_core.c
@@ -1499,7 +1499,7 @@ static int cyttsp4_core_sleep_(struct cyttsp4 *cd)
if (IS_BOOTLOADER(mode[0], mode[1])) {
mutex_unlock(&cd->system_lock);
- dev_err(cd->dev, "%s: Device in BOOTLADER mode.\n", __func__);
+ dev_err(cd->dev, "%s: Device in BOOTLOADER mode.\n", __func__);
rc = -EINVAL;
goto error;
}
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 485794376..d07dd29d4 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -115,7 +115,6 @@
struct sun4i_ts_data {
struct device *dev;
struct input_dev *input;
- struct thermal_zone_device *tz;
void __iomem *base;
unsigned int irq;
bool ignore_fifo_data;
@@ -366,10 +365,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- ts->tz = thermal_zone_of_sensor_register(ts->dev, 0, ts,
- &sun4i_ts_tz_ops);
- if (IS_ERR(ts->tz))
- ts->tz = NULL;
+ devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
@@ -377,7 +373,6 @@ static int sun4i_ts_probe(struct platform_device *pdev)
error = input_register_device(ts->input);
if (error) {
writel(0, ts->base + TP_INT_FIFOC);
- thermal_zone_of_sensor_unregister(ts->dev, ts->tz);
return error;
}
}
@@ -394,8 +389,6 @@ static int sun4i_ts_remove(struct platform_device *pdev)
if (ts->input)
input_unregister_device(ts->input);
- thermal_zone_of_sensor_unregister(ts->dev, ts->tz);
-
/* Deactivate all IRQs */
writel(0, ts->base + TP_INT_FIFOC);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index a21a07c3a..8b3f15ca7 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -487,8 +487,7 @@ static int titsc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int titsc_suspend(struct device *dev)
+static int __maybe_unused titsc_suspend(struct device *dev)
{
struct titsc *ts_dev = dev_get_drvdata(dev);
struct ti_tscadc_dev *tscadc_dev;
@@ -504,7 +503,7 @@ static int titsc_suspend(struct device *dev)
return 0;
}
-static int titsc_resume(struct device *dev)
+static int __maybe_unused titsc_resume(struct device *dev)
{
struct titsc *ts_dev = dev_get_drvdata(dev);
struct ti_tscadc_dev *tscadc_dev;
@@ -521,14 +520,7 @@ static int titsc_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops titsc_pm_ops = {
- .suspend = titsc_suspend,
- .resume = titsc_resume,
-};
-#define TITSC_PM_OPS (&titsc_pm_ops)
-#else
-#define TITSC_PM_OPS NULL
-#endif
+static SIMPLE_DEV_PM_OPS(titsc_pm_ops, titsc_suspend, titsc_resume);
static const struct of_device_id ti_tsc_dt_ids[] = {
{ .compatible = "ti,am3359-tsc", },
@@ -541,7 +533,7 @@ static struct platform_driver ti_tsc_driver = {
.remove = titsc_remove,
.driver = {
.name = "TI-am335x-tsc",
- .pm = TITSC_PM_OPS,
+ .pm = &titsc_pm_ops,
.of_match_table = ti_tsc_dt_ids,
},
};
diff --git a/drivers/input/touchscreen/ts4800-ts.c b/drivers/input/touchscreen/ts4800-ts.c
index 3c3dd7830..fed73eeb4 100644
--- a/drivers/input/touchscreen/ts4800-ts.c
+++ b/drivers/input/touchscreen/ts4800-ts.c
@@ -118,6 +118,13 @@ static int ts4800_parse_dt(struct platform_device *pdev,
return -ENODEV;
}
+ ts->regmap = syscon_node_to_regmap(syscon_np);
+ of_node_put(syscon_np);
+ if (IS_ERR(ts->regmap)) {
+ dev_err(dev, "cannot get parent's regmap\n");
+ return PTR_ERR(ts->regmap);
+ }
+
error = of_property_read_u32_index(np, "syscon", 1, &reg);
if (error < 0) {
dev_err(dev, "no offset in syscon\n");
@@ -134,12 +141,6 @@ static int ts4800_parse_dt(struct platform_device *pdev,
ts->bit = BIT(bit);
- ts->regmap = syscon_node_to_regmap(syscon_np);
- if (IS_ERR(ts->regmap)) {
- dev_err(dev, "cannot get parent's regmap\n");
- return PTR_ERR(ts->regmap);
- }
-
return 0;
}
diff --git a/drivers/input/touchscreen/tsc2004.c b/drivers/input/touchscreen/tsc2004.c
index 7295c198a..6fe55d598 100644
--- a/drivers/input/touchscreen/tsc2004.c
+++ b/drivers/input/touchscreen/tsc2004.c
@@ -22,6 +22,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2004_input_id = {
+ .bustype = BUS_I2C,
+ .product = 2004,
+};
+
static int tsc2004_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
- return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+ return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
tsc2004_cmd);
}
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b9f593dfd..f2c5f0e47 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -24,6 +24,11 @@
#include <linux/regmap.h>
#include "tsc200x-core.h"
+static const struct input_id tsc2005_input_id = {
+ .bustype = BUS_SPI,
+ .product = 2005,
+};
+
static int tsc2005_cmd(struct device *dev, u8 cmd)
{
u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
if (error)
return error;
- return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+ return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
devm_regmap_init_spi(spi, &tsc200x_regmap_config),
tsc2005_cmd);
}
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 15240c1ee..dfa7f1c4f 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
mutex_unlock(&ts->mutex);
}
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd))
{
@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
snprintf(ts->phys, sizeof(ts->phys),
"%s/input-ts", dev_name(dev));
- input_dev->name = "TSC200X touchscreen";
+ if (tsc_id->product == 2004) {
+ input_dev->name = "TSC200X touchscreen";
+ } else {
+ input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+ "TSC%04d touchscreen",
+ tsc_id->product);
+ if (!input_dev->name)
+ return -ENOMEM;
+ }
+
input_dev->phys = ts->phys;
- input_dev->id.bustype = bustype;
+ input_dev->id = *tsc_id;
input_dev->dev.parent = dev;
input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
diff --git a/drivers/input/touchscreen/tsc200x-core.h b/drivers/input/touchscreen/tsc200x-core.h
index 7a482d102..49a63a3c6 100644
--- a/drivers/input/touchscreen/tsc200x-core.h
+++ b/drivers/input/touchscreen/tsc200x-core.h
@@ -70,7 +70,7 @@
extern const struct regmap_config tsc200x_regmap_config;
extern const struct dev_pm_ops tsc200x_pm_ops;
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
struct regmap *regmap,
int (*tsc200x_cmd)(struct device *dev, u8 cmd));
int tsc200x_remove(struct device *dev);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index bab3c6acf..b6fc4bde7 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-#define W8001_MAX_LENGTH 11
+#define W8001_MAX_LENGTH 13
#define W8001_LEAD_MASK 0x80
#define W8001_LEAD_BYTE 0x80
#define W8001_TAB_MASK 0x40
@@ -155,6 +155,7 @@ static void parse_multi_touch(struct w8001 *w8001)
bool touch = data[0] & (1 << i);
input_mt_slot(dev, i);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
if (touch) {
x = (data[6 * i + 1] << 7) | data[6 * i + 2];
y = (data[6 * i + 3] << 7) | data[6 * i + 4];
@@ -339,6 +340,15 @@ static irqreturn_t w8001_interrupt(struct serio *serio,
w8001->idx = 0;
parse_multi_touch(w8001);
break;
+
+ default:
+ /*
+ * ThinkPad X60 Tablet PC (pen only device) sometimes
+ * sends invalid data packets that are larger than
+ * W8001_PKTLEN_TPCPEN. Let's start over again.
+ */
+ if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1)
+ w8001->idx = 0;
}
return IRQ_HANDLED;
@@ -513,6 +523,8 @@ static int w8001_setup_touch(struct w8001 *w8001, char *basename,
0, touch.x, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y,
0, touch.y, 0, 0);
+ input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
+ 0, MT_TOOL_MAX, 0, 0);
strlcat(basename, " 2FG", basename_sz);
if (w8001->max_pen_x && w8001->max_pen_y)
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dd1dc39f8..ad0860383 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -76,8 +76,7 @@ config IOMMU_DMA
config FSL_PAMU
bool "Freescale IOMMU support"
- depends on PPC32
- depends on PPC_E500MC || COMPILE_TEST
+ depends on PPC_E500MC || (COMPILE_TEST && PPC)
select IOMMU_API
select GENERIC_ALLOCATOR
help
@@ -124,16 +123,6 @@ config AMD_IOMMU
your BIOS for an option to enable it or if you have an IVRS ACPI
table.
-config AMD_IOMMU_STATS
- bool "Export AMD IOMMU statistics to debugfs"
- depends on AMD_IOMMU
- select DEBUG_FS
- ---help---
- This option enables code in the AMD IOMMU driver to collect various
- statistics about whats happening in the driver and exports that
- information to userspace via debugfs.
- If unsure, say N.
-
config AMD_IOMMU_V2
tristate "AMD IOMMU Version 2 driver"
depends on AMD_IOMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 5efadad46..634f63639 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -19,6 +19,8 @@
#include <linux/ratelimit.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/amba/bus.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
@@ -72,6 +74,7 @@ static DEFINE_SPINLOCK(dev_data_list_lock);
LIST_HEAD(ioapic_map);
LIST_HEAD(hpet_map);
+LIST_HEAD(acpihid_map);
/*
* Domain for untranslated devices - only allocated
@@ -162,18 +165,65 @@ struct dma_ops_domain {
*
****************************************************************************/
-static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+static inline int match_hid_uid(struct device *dev,
+ struct acpihid_map_entry *entry)
{
- return container_of(dom, struct protection_domain, domain);
+ const char *hid, *uid;
+
+ hid = acpi_device_hid(ACPI_COMPANION(dev));
+ uid = acpi_device_uid(ACPI_COMPANION(dev));
+
+ if (!hid || !(*hid))
+ return -ENODEV;
+
+ if (!uid || !(*uid))
+ return strcmp(hid, entry->hid);
+
+ if (!(*entry->uid))
+ return strcmp(hid, entry->hid);
+
+ return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
}
-static inline u16 get_device_id(struct device *dev)
+static inline u16 get_pci_device_id(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
return PCI_DEVID(pdev->bus->number, pdev->devfn);
}
+static inline int get_acpihid_device_id(struct device *dev,
+ struct acpihid_map_entry **entry)
+{
+ struct acpihid_map_entry *p;
+
+ list_for_each_entry(p, &acpihid_map, list) {
+ if (!match_hid_uid(dev, p)) {
+ if (entry)
+ *entry = p;
+ return p->devid;
+ }
+ }
+ return -EINVAL;
+}
+
+static inline int get_device_id(struct device *dev)
+{
+ int devid;
+
+ if (dev_is_pci(dev))
+ devid = get_pci_device_id(dev);
+ else
+ devid = get_acpihid_device_id(dev, NULL);
+
+ return devid;
+}
+
+static struct protection_domain *to_pdomain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct protection_domain, domain);
+}
+
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@@ -222,6 +272,7 @@ static u16 get_alias(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
u16 devid, ivrs_alias, pci_alias;
+ /* The callers make sure that get_device_id() does not fail here */
devid = get_device_id(dev);
ivrs_alias = amd_iommu_alias_table[devid];
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
@@ -263,8 +314,7 @@ static u16 get_alias(struct device *dev)
*/
if (pci_alias == devid &&
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
- pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- pdev->dma_alias_devfn = ivrs_alias & 0xff;
+ pci_add_dma_alias(pdev, ivrs_alias & 0xff);
pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
dev_name(dev));
@@ -290,6 +340,29 @@ static struct iommu_dev_data *get_dev_data(struct device *dev)
return dev->archdata.iommu;
}
+/*
+* Find or create an IOMMU group for a acpihid device.
+*/
+static struct iommu_group *acpihid_device_group(struct device *dev)
+{
+ struct acpihid_map_entry *p, *entry = NULL;
+ int devid;
+
+ devid = get_acpihid_device_id(dev, &entry);
+ if (devid < 0)
+ return ERR_PTR(devid);
+
+ list_for_each_entry(p, &acpihid_map, list) {
+ if ((devid == p->devid) && p->group)
+ entry->group = p->group;
+ }
+
+ if (!entry->group)
+ entry->group = generic_device_group(dev);
+
+ return entry->group;
+}
+
static bool pci_iommuv2_capable(struct pci_dev *pdev)
{
static const int caps[] = {
@@ -341,9 +414,11 @@ static void init_unity_mappings_for_device(struct device *dev,
struct dma_ops_domain *dma_dom)
{
struct unity_map_entry *e;
- u16 devid;
+ int devid;
devid = get_device_id(dev);
+ if (devid < 0)
+ return;
list_for_each_entry(e, &amd_iommu_unity_map, list) {
if (!(devid >= e->devid_start && devid <= e->devid_end))
@@ -358,16 +433,14 @@ static void init_unity_mappings_for_device(struct device *dev,
*/
static bool check_device(struct device *dev)
{
- u16 devid;
+ int devid;
if (!dev || !dev->dma_mask)
return false;
- /* No PCI device */
- if (!dev_is_pci(dev))
- return false;
-
devid = get_device_id(dev);
+ if (devid < 0)
+ return false;
/* Out of our scope? */
if (devid > amd_iommu_last_bdf)
@@ -402,22 +475,26 @@ out:
static int iommu_init_device(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
+ int devid;
if (dev->archdata.iommu)
return 0;
- dev_data = find_dev_data(get_device_id(dev));
+ devid = get_device_id(dev);
+ if (devid < 0)
+ return devid;
+
+ dev_data = find_dev_data(devid);
if (!dev_data)
return -ENOMEM;
dev_data->alias = get_alias(dev);
- if (pci_iommuv2_capable(pdev)) {
+ if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
struct amd_iommu *iommu;
- iommu = amd_iommu_rlookup_table[dev_data->devid];
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
dev_data->iommu_v2 = iommu->is_iommu_v2;
}
@@ -431,9 +508,13 @@ static int iommu_init_device(struct device *dev)
static void iommu_ignore_device(struct device *dev)
{
- u16 devid, alias;
+ u16 alias;
+ int devid;
devid = get_device_id(dev);
+ if (devid < 0)
+ return;
+
alias = get_alias(dev);
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
@@ -445,8 +526,14 @@ static void iommu_ignore_device(struct device *dev)
static void iommu_uninit_device(struct device *dev)
{
- struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev));
+ int devid;
+ struct iommu_dev_data *dev_data;
+ devid = get_device_id(dev);
+ if (devid < 0)
+ return;
+
+ dev_data = search_dev_data(devid);
if (!dev_data)
return;
@@ -467,70 +554,6 @@ static void iommu_uninit_device(struct device *dev)
*/
}
-#ifdef CONFIG_AMD_IOMMU_STATS
-
-/*
- * Initialization code for statistics collection
- */
-
-DECLARE_STATS_COUNTER(compl_wait);
-DECLARE_STATS_COUNTER(cnt_map_single);
-DECLARE_STATS_COUNTER(cnt_unmap_single);
-DECLARE_STATS_COUNTER(cnt_map_sg);
-DECLARE_STATS_COUNTER(cnt_unmap_sg);
-DECLARE_STATS_COUNTER(cnt_alloc_coherent);
-DECLARE_STATS_COUNTER(cnt_free_coherent);
-DECLARE_STATS_COUNTER(cross_page);
-DECLARE_STATS_COUNTER(domain_flush_single);
-DECLARE_STATS_COUNTER(domain_flush_all);
-DECLARE_STATS_COUNTER(alloced_io_mem);
-DECLARE_STATS_COUNTER(total_map_requests);
-DECLARE_STATS_COUNTER(complete_ppr);
-DECLARE_STATS_COUNTER(invalidate_iotlb);
-DECLARE_STATS_COUNTER(invalidate_iotlb_all);
-DECLARE_STATS_COUNTER(pri_requests);
-
-static struct dentry *stats_dir;
-static struct dentry *de_fflush;
-
-static void amd_iommu_stats_add(struct __iommu_counter *cnt)
-{
- if (stats_dir == NULL)
- return;
-
- cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
- &cnt->value);
-}
-
-static void amd_iommu_stats_init(void)
-{
- stats_dir = debugfs_create_dir("amd-iommu", NULL);
- if (stats_dir == NULL)
- return;
-
- de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
- &amd_iommu_unmap_flush);
-
- amd_iommu_stats_add(&compl_wait);
- amd_iommu_stats_add(&cnt_map_single);
- amd_iommu_stats_add(&cnt_unmap_single);
- amd_iommu_stats_add(&cnt_map_sg);
- amd_iommu_stats_add(&cnt_unmap_sg);
- amd_iommu_stats_add(&cnt_alloc_coherent);
- amd_iommu_stats_add(&cnt_free_coherent);
- amd_iommu_stats_add(&cross_page);
- amd_iommu_stats_add(&domain_flush_single);
- amd_iommu_stats_add(&domain_flush_all);
- amd_iommu_stats_add(&alloced_io_mem);
- amd_iommu_stats_add(&total_map_requests);
- amd_iommu_stats_add(&complete_ppr);
- amd_iommu_stats_add(&invalidate_iotlb);
- amd_iommu_stats_add(&invalidate_iotlb_all);
- amd_iommu_stats_add(&pri_requests);
-}
-
-#endif
-
/****************************************************************************
*
* Interrupt handling functions
@@ -653,8 +676,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
{
struct amd_iommu_fault fault;
- INC_STATS_COUNTER(pri_requests);
-
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
return;
@@ -2284,13 +2305,17 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
static int attach_device(struct device *dev,
struct protection_domain *domain)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *pdev;
struct iommu_dev_data *dev_data;
unsigned long flags;
int ret;
dev_data = get_dev_data(dev);
+ if (!dev_is_pci(dev))
+ goto skip_ats_check;
+
+ pdev = to_pci_dev(dev);
if (domain->flags & PD_IOMMUV2_MASK) {
if (!dev_data->passthrough)
return -EINVAL;
@@ -2309,6 +2334,7 @@ static int attach_device(struct device *dev,
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
}
+skip_ats_check:
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
ret = __attach_device(dev_data, domain);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
@@ -2365,6 +2391,9 @@ static void detach_device(struct device *dev)
__detach_device(dev_data);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
+ if (!dev_is_pci(dev))
+ return;
+
if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
pdev_iommuv2_disable(to_pci_dev(dev));
else if (dev_data->ats.enabled)
@@ -2378,13 +2407,15 @@ static int amd_iommu_add_device(struct device *dev)
struct iommu_dev_data *dev_data;
struct iommu_domain *domain;
struct amd_iommu *iommu;
- u16 devid;
- int ret;
+ int ret, devid;
if (!check_device(dev) || get_dev_data(dev))
return 0;
devid = get_device_id(dev);
+ if (devid < 0)
+ return devid;
+
iommu = amd_iommu_rlookup_table[devid];
ret = iommu_init_device(dev);
@@ -2422,18 +2453,29 @@ out:
static void amd_iommu_remove_device(struct device *dev)
{
struct amd_iommu *iommu;
- u16 devid;
+ int devid;
if (!check_device(dev))
return;
devid = get_device_id(dev);
+ if (devid < 0)
+ return;
+
iommu = amd_iommu_rlookup_table[devid];
iommu_uninit_device(dev);
iommu_completion_wait(iommu);
}
+static struct iommu_group *amd_iommu_device_group(struct device *dev)
+{
+ if (dev_is_pci(dev))
+ return pci_device_group(dev);
+
+ return acpihid_device_group(dev);
+}
+
/*****************************************************************************
*
* The next functions belong to the dma_ops mapping/unmapping code.
@@ -2598,11 +2640,6 @@ static dma_addr_t __map_single(struct device *dev,
pages = iommu_num_pages(paddr, size, PAGE_SIZE);
paddr &= PAGE_MASK;
- INC_STATS_COUNTER(total_map_requests);
-
- if (pages > 1)
- INC_STATS_COUNTER(cross_page);
-
if (align)
align_mask = (1UL << get_order(size)) - 1;
@@ -2623,8 +2660,6 @@ static dma_addr_t __map_single(struct device *dev,
}
address += offset;
- ADD_STATS_COUNTER(alloced_io_mem, size);
-
if (unlikely(amd_iommu_np_cache)) {
domain_flush_pages(&dma_dom->domain, address, size);
domain_flush_complete(&dma_dom->domain);
@@ -2672,8 +2707,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
start += PAGE_SIZE;
}
- SUB_STATS_COUNTER(alloced_io_mem, size);
-
dma_ops_free_addresses(dma_dom, dma_addr, pages);
}
@@ -2689,8 +2722,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
struct protection_domain *domain;
u64 dma_mask;
- INC_STATS_COUNTER(cnt_map_single);
-
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL)
return (dma_addr_t)paddr;
@@ -2711,8 +2742,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
{
struct protection_domain *domain;
- INC_STATS_COUNTER(cnt_unmap_single);
-
domain = get_domain(dev);
if (IS_ERR(domain))
return;
@@ -2735,8 +2764,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
int mapped_elems = 0;
u64 dma_mask;
- INC_STATS_COUNTER(cnt_map_sg);
-
domain = get_domain(dev);
if (IS_ERR(domain))
return 0;
@@ -2782,8 +2809,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *s;
int i;
- INC_STATS_COUNTER(cnt_unmap_sg);
-
domain = get_domain(dev);
if (IS_ERR(domain))
return;
@@ -2806,8 +2831,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
struct protection_domain *domain;
struct page *page;
- INC_STATS_COUNTER(cnt_alloc_coherent);
-
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL) {
page = alloc_pages(flag, get_order(size));
@@ -2861,8 +2884,6 @@ static void free_coherent(struct device *dev, size_t size,
struct protection_domain *domain;
struct page *page;
- INC_STATS_COUNTER(cnt_free_coherent);
-
page = virt_to_page(virt_addr);
size = PAGE_ALIGN(size);
@@ -2927,7 +2948,17 @@ static struct dma_map_ops amd_iommu_dma_ops = {
int __init amd_iommu_init_api(void)
{
- return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
+ int err = 0;
+
+ err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
+ if (err)
+ return err;
+#ifdef CONFIG_ARM_AMBA
+ err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
+ if (err)
+ return err;
+#endif
+ return 0;
}
int __init amd_iommu_init_dma_ops(void)
@@ -2944,8 +2975,6 @@ int __init amd_iommu_init_dma_ops(void)
if (!swiotlb)
dma_ops = &nommu_dma_ops;
- amd_iommu_stats_init();
-
if (amd_iommu_unmap_flush)
pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
else
@@ -3099,12 +3128,14 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
{
struct iommu_dev_data *dev_data = dev->archdata.iommu;
struct amd_iommu *iommu;
- u16 devid;
+ int devid;
if (!check_device(dev))
return;
devid = get_device_id(dev);
+ if (devid < 0)
+ return;
if (dev_data->domain != NULL)
detach_device(dev);
@@ -3222,9 +3253,11 @@ static void amd_iommu_get_dm_regions(struct device *dev,
struct list_head *head)
{
struct unity_map_entry *entry;
- u16 devid;
+ int devid;
devid = get_device_id(dev);
+ if (devid < 0)
+ return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
struct iommu_dm_region *region;
@@ -3271,7 +3304,7 @@ static const struct iommu_ops amd_iommu_ops = {
.iova_to_phys = amd_iommu_iova_to_phys,
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
- .device_group = pci_device_group,
+ .device_group = amd_iommu_device_group,
.get_dm_regions = amd_iommu_get_dm_regions,
.put_dm_regions = amd_iommu_put_dm_regions,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
@@ -3432,8 +3465,6 @@ out:
static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
u64 address)
{
- INC_STATS_COUNTER(invalidate_iotlb);
-
return __flush_pasid(domain, pasid, address, false);
}
@@ -3454,8 +3485,6 @@ EXPORT_SYMBOL(amd_iommu_flush_page);
static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
{
- INC_STATS_COUNTER(invalidate_iotlb_all);
-
return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
true);
}
@@ -3575,8 +3604,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
struct amd_iommu *iommu;
struct iommu_cmd cmd;
- INC_STATS_COUNTER(complete_ppr);
-
dev_data = get_dev_data(&pdev->dev);
iommu = amd_iommu_rlookup_table[dev_data->devid];
@@ -3926,6 +3953,9 @@ static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
case X86_IRQ_ALLOC_TYPE_MSI:
case X86_IRQ_ALLOC_TYPE_MSIX:
devid = get_device_id(&info->msi_dev->dev);
+ if (devid < 0)
+ return NULL;
+
iommu = amd_iommu_rlookup_table[devid];
if (iommu)
return iommu->msi_domain;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 94f1bf772..59741ead7 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -44,7 +44,7 @@
*/
#define IVRS_HEADER_LENGTH 48
-#define ACPI_IVHD_TYPE 0x10
+#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
#define ACPI_IVMD_TYPE_ALL 0x20
#define ACPI_IVMD_TYPE 0x21
#define ACPI_IVMD_TYPE_RANGE 0x22
@@ -58,6 +58,11 @@
#define IVHD_DEV_EXT_SELECT 0x46
#define IVHD_DEV_EXT_SELECT_RANGE 0x47
#define IVHD_DEV_SPECIAL 0x48
+#define IVHD_DEV_ACPI_HID 0xf0
+
+#define UID_NOT_PRESENT 0
+#define UID_IS_INTEGER 1
+#define UID_IS_CHARACTER 2
#define IVHD_SPECIAL_IOAPIC 1
#define IVHD_SPECIAL_HPET 2
@@ -99,7 +104,11 @@ struct ivhd_header {
u64 mmio_phys;
u16 pci_seg;
u16 info;
- u32 efr;
+ u32 efr_attr;
+
+ /* Following only valid on IVHD type 11h and 40h */
+ u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
+ u64 res;
} __attribute__((packed));
/*
@@ -111,6 +120,11 @@ struct ivhd_entry {
u16 devid;
u8 flags;
u32 ext;
+ u32 hidh;
+ u64 cid;
+ u8 uidf;
+ u8 uidl;
+ u8 uid;
} __attribute__((packed));
/*
@@ -133,6 +147,7 @@ bool amd_iommu_irq_remap __read_mostly;
static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
+static int amd_iommu_target_ivhd_type;
u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
@@ -218,8 +233,12 @@ enum iommu_init_state {
#define EARLY_MAP_SIZE 4
static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
+static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
+
static int __initdata early_ioapic_map_size;
static int __initdata early_hpet_map_size;
+static int __initdata early_acpihid_map_size;
+
static bool __initdata cmdline_maps;
static enum iommu_init_state init_state = IOMMU_START_STATE;
@@ -394,6 +413,22 @@ static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
}
+static inline u32 get_ivhd_header_size(struct ivhd_header *h)
+{
+ u32 size = 0;
+
+ switch (h->type) {
+ case 0x10:
+ size = 24;
+ break;
+ case 0x11:
+ case 0x40:
+ size = 40;
+ break;
+ }
+ return size;
+}
+
/****************************************************************************
*
* The functions below belong to the first pass of AMD IOMMU ACPI table
@@ -408,7 +443,15 @@ static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
*/
static inline int ivhd_entry_length(u8 *ivhd)
{
- return 0x04 << (*ivhd >> 6);
+ u32 type = ((struct ivhd_entry *)ivhd)->type;
+
+ if (type < 0x80) {
+ return 0x04 << (*ivhd >> 6);
+ } else if (type == IVHD_DEV_ACPI_HID) {
+ /* For ACPI_HID, offset 21 is uid len */
+ return *((u8 *)ivhd + 21) + 22;
+ }
+ return 0;
}
/*
@@ -420,7 +463,14 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
u8 *p = (void *)h, *end = (void *)h;
struct ivhd_entry *dev;
- p += sizeof(*h);
+ u32 ivhd_size = get_ivhd_header_size(h);
+
+ if (!ivhd_size) {
+ pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
+ return -EINVAL;
+ }
+
+ p += ivhd_size;
end += h->length;
while (p < end) {
@@ -448,6 +498,22 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
return 0;
}
+static int __init check_ivrs_checksum(struct acpi_table_header *table)
+{
+ int i;
+ u8 checksum = 0, *p = (u8 *)table;
+
+ for (i = 0; i < table->length; ++i)
+ checksum += p[i];
+ if (checksum != 0) {
+ /* ACPI table corrupt */
+ pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
/*
* Iterate over all IVHD entries in the ACPI table and find the highest device
* id which we need to handle. This is the first of three functions which parse
@@ -455,31 +521,19 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
*/
static int __init find_last_devid_acpi(struct acpi_table_header *table)
{
- int i;
- u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
+ u8 *p = (u8 *)table, *end = (u8 *)table;
struct ivhd_header *h;
- /*
- * Validate checksum here so we don't need to do it when
- * we actually parse the table
- */
- for (i = 0; i < table->length; ++i)
- checksum += p[i];
- if (checksum != 0)
- /* ACPI table corrupt */
- return -ENODEV;
-
p += IVRS_HEADER_LENGTH;
end += table->length;
while (p < end) {
h = (struct ivhd_header *)p;
- switch (h->type) {
- case ACPI_IVHD_TYPE:
- find_last_devid_from_ivhd(h);
- break;
- default:
- break;
+ if (h->type == amd_iommu_target_ivhd_type) {
+ int ret = find_last_devid_from_ivhd(h);
+
+ if (ret)
+ return ret;
}
p += h->length;
}
@@ -724,6 +778,42 @@ static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
return 0;
}
+static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
+ bool cmd_line)
+{
+ struct acpihid_map_entry *entry;
+ struct list_head *list = &acpihid_map;
+
+ list_for_each_entry(entry, list, list) {
+ if (strcmp(entry->hid, hid) ||
+ (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
+ !entry->cmd_line)
+ continue;
+
+ pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
+ hid, uid);
+ *devid = entry->devid;
+ return 0;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->uid, uid, strlen(uid));
+ memcpy(entry->hid, hid, strlen(hid));
+ entry->devid = *devid;
+ entry->cmd_line = cmd_line;
+ entry->root_devid = (entry->devid & (~0x7));
+
+ pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
+ entry->cmd_line ? "cmd" : "ivrs",
+ entry->hid, entry->uid, entry->root_devid);
+
+ list_add_tail(&entry->list, list);
+ return 0;
+}
+
static int __init add_early_maps(void)
{
int i, ret;
@@ -746,6 +836,15 @@ static int __init add_early_maps(void)
return ret;
}
+ for (i = 0; i < early_acpihid_map_size; ++i) {
+ ret = add_acpi_hid_device(early_acpihid_map[i].hid,
+ early_acpihid_map[i].uid,
+ &early_acpihid_map[i].devid,
+ early_acpihid_map[i].cmd_line);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -785,6 +884,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
u32 dev_i, ext_flags = 0;
bool alias = false;
struct ivhd_entry *e;
+ u32 ivhd_size;
int ret;
@@ -800,7 +900,14 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
/*
* Done. Now parse the device entries
*/
- p += sizeof(struct ivhd_header);
+ ivhd_size = get_ivhd_header_size(h);
+ if (!ivhd_size) {
+ pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
+ return -EINVAL;
+ }
+
+ p += ivhd_size;
+
end += h->length;
@@ -958,6 +1065,70 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
+ case IVHD_DEV_ACPI_HID: {
+ u16 devid;
+ u8 hid[ACPIHID_HID_LEN] = {0};
+ u8 uid[ACPIHID_UID_LEN] = {0};
+ int ret;
+
+ if (h->type != 0x40) {
+ pr_err(FW_BUG "Invalid IVHD device type %#x\n",
+ e->type);
+ break;
+ }
+
+ memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
+ hid[ACPIHID_HID_LEN - 1] = '\0';
+
+ if (!(*hid)) {
+ pr_err(FW_BUG "Invalid HID.\n");
+ break;
+ }
+
+ switch (e->uidf) {
+ case UID_NOT_PRESENT:
+
+ if (e->uidl != 0)
+ pr_warn(FW_BUG "Invalid UID length.\n");
+
+ break;
+ case UID_IS_INTEGER:
+
+ sprintf(uid, "%d", e->uid);
+
+ break;
+ case UID_IS_CHARACTER:
+
+ memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
+ uid[ACPIHID_UID_LEN - 1] = '\0';
+
+ break;
+ default:
+ break;
+ }
+
+ devid = e->devid;
+ DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
+ hid, uid,
+ PCI_BUS_NUM(devid),
+ PCI_SLOT(devid),
+ PCI_FUNC(devid));
+
+ flags = e->flags;
+
+ ret = add_acpi_hid_device(hid, uid, &devid, false);
+ if (ret)
+ return ret;
+
+ /*
+ * add_special_device might update the devid in case a
+ * command-line override is present. So call
+ * set_dev_entry_from_acpi after add_special_device.
+ */
+ set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
+
+ break;
+ }
default:
break;
}
@@ -1078,13 +1249,25 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
- /* Check if IVHD EFR contains proper max banks/counters */
- if ((h->efr != 0) &&
- ((h->efr & (0xF << 13)) != 0) &&
- ((h->efr & (0x3F << 17)) != 0)) {
- iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
- } else {
- iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+ switch (h->type) {
+ case 0x10:
+ /* Check if IVHD EFR contains proper max banks/counters */
+ if ((h->efr_attr != 0) &&
+ ((h->efr_attr & (0xF << 13)) != 0) &&
+ ((h->efr_attr & (0x3F << 17)) != 0))
+ iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
+ else
+ iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+ break;
+ case 0x11:
+ case 0x40:
+ if (h->efr_reg & (1 << 9))
+ iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
+ else
+ iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
+ break;
+ default:
+ return -EINVAL;
}
iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
@@ -1117,6 +1300,32 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
return 0;
}
+/**
+ * get_highest_supported_ivhd_type - Look up the appropriate IVHD type
+ * @ivrs Pointer to the IVRS header
+ *
+ * This function search through all IVDB of the maximum supported IVHD
+ */
+static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
+{
+ u8 *base = (u8 *)ivrs;
+ struct ivhd_header *ivhd = (struct ivhd_header *)
+ (base + IVRS_HEADER_LENGTH);
+ u8 last_type = ivhd->type;
+ u16 devid = ivhd->devid;
+
+ while (((u8 *)ivhd - base < ivrs->length) &&
+ (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
+ u8 *p = (u8 *) ivhd;
+
+ if (ivhd->devid == devid)
+ last_type = ivhd->type;
+ ivhd = (struct ivhd_header *)(p + ivhd->length);
+ }
+
+ return last_type;
+}
+
/*
* Iterates over all IOMMU entries in the ACPI table, allocates the
* IOMMU structure and initializes it with init_iommu_one()
@@ -1133,8 +1342,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
while (p < end) {
h = (struct ivhd_header *)p;
- switch (*p) {
- case ACPI_IVHD_TYPE:
+ if (*p == amd_iommu_target_ivhd_type) {
DUMP_printk("device: %02x:%02x.%01x cap: %04x "
"seg: %d flags: %01x info %04x\n",
@@ -1151,9 +1359,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
ret = init_iommu_one(iommu, h);
if (ret)
return ret;
- break;
- default:
- break;
}
p += h->length;
@@ -1828,18 +2033,20 @@ static void __init free_dma_resources(void)
* remapping setup code.
*
* This function basically parses the ACPI table for AMD IOMMU (IVRS)
- * three times:
+ * four times:
*
- * 1 pass) Find the highest PCI device id the driver has to handle.
+ * 1 pass) Discover the most comprehensive IVHD type to use.
+ *
+ * 2 pass) Find the highest PCI device id the driver has to handle.
* Upon this information the size of the data structures is
* determined that needs to be allocated.
*
- * 2 pass) Initialize the data structures just allocated with the
+ * 3 pass) Initialize the data structures just allocated with the
* information in the ACPI table about available AMD IOMMUs
* in the system. It also maps the PCI devices in the
* system to specific IOMMUs
*
- * 3 pass) After the basic data structures are allocated and
+ * 4 pass) After the basic data structures are allocated and
* initialized we update them with information about memory
* remapping requirements parsed out of the ACPI table in
* this last pass.
@@ -1867,6 +2074,17 @@ static int __init early_amd_iommu_init(void)
}
/*
+ * Validate checksum here so we don't need to do it when
+ * we actually parse the table
+ */
+ ret = check_ivrs_checksum(ivrs_base);
+ if (ret)
+ return ret;
+
+ amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
+ DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
+
+ /*
* First parse ACPI tables to find the largest Bus/Dev/Func
* we need to handle. Upon this information the shared data
* structures for the IOMMUs in the system will be allocated
@@ -2269,10 +2487,43 @@ static int __init parse_ivrs_hpet(char *str)
return 1;
}
+static int __init parse_ivrs_acpihid(char *str)
+{
+ u32 bus, dev, fn;
+ char *hid, *uid, *p;
+ char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
+ int ret, i;
+
+ ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
+ if (ret != 4) {
+ pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
+ return 1;
+ }
+
+ p = acpiid;
+ hid = strsep(&p, ":");
+ uid = p;
+
+ if (!hid || !(*hid) || !uid) {
+ pr_err("AMD-Vi: Invalid command line: hid or uid\n");
+ return 1;
+ }
+
+ i = early_acpihid_map_size++;
+ memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
+ memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
+ early_acpihid_map[i].devid =
+ ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
+ early_acpihid_map[i].cmd_line = true;
+
+ return 1;
+}
+
__setup("amd_iommu_dump", parse_amd_iommu_dump);
__setup("amd_iommu=", parse_amd_iommu_options);
__setup("ivrs_ioapic", parse_ivrs_ioapic);
__setup("ivrs_hpet", parse_ivrs_hpet);
+__setup("ivrs_acpihid", parse_ivrs_acpihid);
IOMMU_INIT_FINISH(amd_iommu_detect,
gart_iommu_hole_init,
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 9d32b20a5..590956ac7 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -527,6 +527,19 @@ struct amd_iommu {
#endif
};
+#define ACPIHID_UID_LEN 256
+#define ACPIHID_HID_LEN 9
+
+struct acpihid_map_entry {
+ struct list_head list;
+ u8 uid[ACPIHID_UID_LEN];
+ u8 hid[ACPIHID_HID_LEN];
+ u16 devid;
+ u16 root_devid;
+ bool cmd_line;
+ struct iommu_group *group;
+};
+
struct devid_map {
struct list_head list;
u8 id;
@@ -537,6 +550,7 @@ struct devid_map {
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
extern struct list_head ioapic_map;
extern struct list_head hpet_map;
+extern struct list_head acpihid_map;
/*
* List with all IOMMUs in the system. This list is not locked because it is
@@ -668,30 +682,4 @@ static inline int get_hpet_devid(int id)
return -EINVAL;
}
-#ifdef CONFIG_AMD_IOMMU_STATS
-
-struct __iommu_counter {
- char *name;
- struct dentry *dent;
- u64 value;
-};
-
-#define DECLARE_STATS_COUNTER(nm) \
- static struct __iommu_counter nm = { \
- .name = #nm, \
- }
-
-#define INC_STATS_COUNTER(name) name.value += 1
-#define ADD_STATS_COUNTER(name, x) name.value += (x)
-#define SUB_STATS_COUNTER(name, x) name.value -= (x)
-
-#else /* CONFIG_AMD_IOMMU_STATS */
-
-#define DECLARE_STATS_COUNTER(name)
-#define INC_STATS_COUNTER(name)
-#define ADD_STATS_COUNTER(name, x)
-#define SUB_STATS_COUNTER(name, x)
-
-#endif /* CONFIG_AMD_IOMMU_STATS */
-
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 3e20208d6..5f6b3bcab 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -590,6 +590,7 @@ struct arm_smmu_device {
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
+ unsigned long pgsize_bitmap;
#define ARM_SMMU_MAX_ASIDS (1 << 16)
unsigned int asid_bits;
@@ -1476,7 +1477,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
- if (IS_ERR_VALUE(asid))
+ if (asid < 0)
return asid;
cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
@@ -1507,7 +1508,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
- if (IS_ERR_VALUE(vmid))
+ if (vmid < 0)
return vmid;
cfg->vmid = (u16)vmid;
@@ -1516,8 +1517,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
return 0;
}
-static struct iommu_ops arm_smmu_ops;
-
static int arm_smmu_domain_finalise(struct iommu_domain *domain)
{
int ret;
@@ -1555,7 +1554,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
}
pgtbl_cfg = (struct io_pgtable_cfg) {
- .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
+ .pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
.tlb = &arm_smmu_gather_ops,
@@ -1566,11 +1565,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (!pgtbl_ops)
return -ENOMEM;
- arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
free_io_pgtable_ops(pgtbl_ops);
return ret;
@@ -1643,7 +1642,7 @@ static void arm_smmu_detach_dev(struct device *dev)
struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
smmu_group->ste.bypass = true;
- if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
+ if (arm_smmu_install_ste_for_group(smmu_group) < 0)
dev_warn(dev, "failed to install bypass STE\n");
smmu_group->domain = NULL;
@@ -1695,7 +1694,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
ret = arm_smmu_install_ste_for_group(smmu_group);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
smmu_group->domain = NULL;
out_unlock:
@@ -2237,7 +2236,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
arm_smmu_evtq_handler,
arm_smmu_evtq_thread,
0, "arm-smmu-v3-evtq", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable evtq irq\n");
}
@@ -2246,7 +2245,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
ret = devm_request_irq(smmu->dev, irq,
arm_smmu_cmdq_sync_handler, 0,
"arm-smmu-v3-cmdq-sync", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
}
@@ -2254,7 +2253,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
if (irq) {
ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
0, "arm-smmu-v3-gerror", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable gerror irq\n");
}
@@ -2266,7 +2265,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
arm_smmu_priq_thread,
0, "arm-smmu-v3-priq",
smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev,
"failed to enable priq irq\n");
else
@@ -2411,7 +2410,6 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
{
u32 reg;
bool coherent;
- unsigned long pgsize_bitmap = 0;
/* IDR0 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
@@ -2542,13 +2540,16 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
/* Page sizes */
if (reg & IDR5_GRAN64K)
- pgsize_bitmap |= SZ_64K | SZ_512M;
+ smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
if (reg & IDR5_GRAN16K)
- pgsize_bitmap |= SZ_16K | SZ_32M;
+ smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
if (reg & IDR5_GRAN4K)
- pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
+ smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
- arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
+ if (arm_smmu_ops.pgsize_bitmap == -1UL)
+ arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
+ else
+ arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
/* Output address size */
switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7c39ac4b9..9345a3fcb 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -34,6 +34,7 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -49,7 +50,7 @@
#include "io-pgtable.h"
/* Maximum number of stream IDs assigned to a single device */
-#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
+#define MAX_MASTER_STREAMIDS 128
/* Maximum number of context banks per SMMU */
#define ARM_SMMU_MAX_CBS 128
@@ -71,16 +72,15 @@
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
? 0x400 : 0))
+/*
+ * Some 64-bit registers only make sense to write atomically, but in such
+ * cases all the data relevant to AArch32 formats lies within the lower word,
+ * therefore this actually makes more sense than it might first appear.
+ */
#ifdef CONFIG_64BIT
-#define smmu_writeq writeq_relaxed
+#define smmu_write_atomic_lq writeq_relaxed
#else
-#define smmu_writeq(reg64, addr) \
- do { \
- u64 __val = (reg64); \
- void __iomem *__addr = (addr); \
- writel_relaxed(__val >> 32, __addr + 4); \
- writel_relaxed(__val, __addr); \
- } while (0)
+#define smmu_write_atomic_lq writel_relaxed
#endif
/* Configuration registers */
@@ -94,9 +94,13 @@
#define sCR0_VMIDPNE (1 << 11)
#define sCR0_PTM (1 << 12)
#define sCR0_FB (1 << 13)
+#define sCR0_VMID16EN (1 << 31)
#define sCR0_BSU_SHIFT 14
#define sCR0_BSU_MASK 0x3
+/* Auxiliary Configuration register */
+#define ARM_SMMU_GR0_sACR 0x10
+
/* Identification registers */
#define ARM_SMMU_GR0_ID0 0x20
#define ARM_SMMU_GR0_ID1 0x24
@@ -116,6 +120,8 @@
#define ID0_NTS (1 << 28)
#define ID0_SMS (1 << 27)
#define ID0_ATOSNS (1 << 26)
+#define ID0_PTFS_NO_AARCH32 (1 << 25)
+#define ID0_PTFS_NO_AARCH32S (1 << 24)
#define ID0_CTTW (1 << 14)
#define ID0_NUMIRPT_SHIFT 16
#define ID0_NUMIRPT_MASK 0xff
@@ -141,6 +147,10 @@
#define ID2_PTFS_4K (1 << 12)
#define ID2_PTFS_16K (1 << 13)
#define ID2_PTFS_64K (1 << 14)
+#define ID2_VMID16 (1 << 15)
+
+#define ID7_MAJOR_SHIFT 4
+#define ID7_MAJOR_MASK 0xf
/* Global TLB invalidation */
#define ARM_SMMU_GR0_TLBIVMID 0x64
@@ -193,12 +203,15 @@
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
#define CBA2R_RW64_32BIT (0 << 0)
#define CBA2R_RW64_64BIT (1 << 0)
+#define CBA2R_VMID_SHIFT 16
+#define CBA2R_VMID_MASK 0xffff
/* Translation context bank */
#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
#define ARM_SMMU_CB_SCTLR 0x0
+#define ARM_SMMU_CB_ACTLR 0x4
#define ARM_SMMU_CB_RESUME 0x8
#define ARM_SMMU_CB_TTBCR2 0x10
#define ARM_SMMU_CB_TTBR0 0x20
@@ -206,11 +219,9 @@
#define ARM_SMMU_CB_TTBCR 0x30
#define ARM_SMMU_CB_S1_MAIR0 0x38
#define ARM_SMMU_CB_S1_MAIR1 0x3c
-#define ARM_SMMU_CB_PAR_LO 0x50
-#define ARM_SMMU_CB_PAR_HI 0x54
+#define ARM_SMMU_CB_PAR 0x50
#define ARM_SMMU_CB_FSR 0x58
-#define ARM_SMMU_CB_FAR_LO 0x60
-#define ARM_SMMU_CB_FAR_HI 0x64
+#define ARM_SMMU_CB_FAR 0x60
#define ARM_SMMU_CB_FSYNR0 0x68
#define ARM_SMMU_CB_S1_TLBIVA 0x600
#define ARM_SMMU_CB_S1_TLBIASID 0x610
@@ -230,6 +241,10 @@
#define SCTLR_M (1 << 0)
#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
+#define ARM_MMU500_ACTLR_CPRE (1 << 1)
+
+#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
+
#define CB_PAR_F (1 << 0)
#define ATSR_ACTIVE (1 << 0)
@@ -270,10 +285,17 @@ MODULE_PARM_DESC(disable_bypass,
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
enum arm_smmu_arch_version {
- ARM_SMMU_V1 = 1,
+ ARM_SMMU_V1,
+ ARM_SMMU_V1_64K,
ARM_SMMU_V2,
};
+enum arm_smmu_implementation {
+ GENERIC_SMMU,
+ ARM_MMU500,
+ CAVIUM_SMMUV2,
+};
+
struct arm_smmu_smr {
u8 idx;
u16 mask;
@@ -305,11 +327,18 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
+#define ARM_SMMU_FEAT_VMID16 (1 << 6)
+#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
+#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
+#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
+#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
+#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
u32 options;
enum arm_smmu_arch_version version;
+ enum arm_smmu_implementation model;
u32 num_context_banks;
u32 num_s2_context_banks;
@@ -322,6 +351,7 @@ struct arm_smmu_device {
unsigned long va_size;
unsigned long ipa_size;
unsigned long pa_size;
+ unsigned long pgsize_bitmap;
u32 num_global_irqs;
u32 num_context_irqs;
@@ -329,17 +359,27 @@ struct arm_smmu_device {
struct list_head list;
struct rb_root masters;
+
+ u32 cavium_id_base; /* Specific to Cavium */
+};
+
+enum arm_smmu_context_fmt {
+ ARM_SMMU_CTX_FMT_NONE,
+ ARM_SMMU_CTX_FMT_AARCH64,
+ ARM_SMMU_CTX_FMT_AARCH32_L,
+ ARM_SMMU_CTX_FMT_AARCH32_S,
};
struct arm_smmu_cfg {
u8 cbndx;
u8 irptndx;
u32 cbar;
+ enum arm_smmu_context_fmt fmt;
};
#define INVALID_IRPTNDX 0xff
-#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
-#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
+#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
+#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
@@ -357,7 +397,11 @@ struct arm_smmu_domain {
struct iommu_domain domain;
};
-static struct iommu_ops arm_smmu_ops;
+struct arm_smmu_phandle_args {
+ struct device_node *np;
+ int args_count;
+ uint32_t args[MAX_MASTER_STREAMIDS];
+};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
static LIST_HEAD(arm_smmu_devices);
@@ -367,6 +411,8 @@ struct arm_smmu_option_prop {
const char *prop;
};
+static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
+
static struct arm_smmu_option_prop arm_smmu_options[] = {
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
{ 0, NULL},
@@ -466,7 +512,7 @@ static int insert_smmu_master(struct arm_smmu_device *smmu,
static int register_smmu_master(struct arm_smmu_device *smmu,
struct device *dev,
- struct of_phandle_args *masterspec)
+ struct arm_smmu_phandle_args *masterspec)
{
int i;
struct arm_smmu_master *master;
@@ -578,11 +624,11 @@ static void arm_smmu_tlb_inv_context(void *cookie)
if (stage1) {
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
- writel_relaxed(ARM_SMMU_CB_ASID(cfg),
+ writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
base + ARM_SMMU_CB_S1_TLBIASID);
} else {
base = ARM_SMMU_GR0(smmu);
- writel_relaxed(ARM_SMMU_CB_VMID(cfg),
+ writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
base + ARM_SMMU_GR0_TLBIVMID);
}
@@ -602,37 +648,33 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
- if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
+ if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova &= ~12UL;
- iova |= ARM_SMMU_CB_ASID(cfg);
+ iova |= ARM_SMMU_CB_ASID(smmu, cfg);
do {
writel_relaxed(iova, reg);
iova += granule;
} while (size -= granule);
-#ifdef CONFIG_64BIT
} else {
iova >>= 12;
- iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
+ iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
do {
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
-#endif
}
-#ifdef CONFIG_64BIT
} else if (smmu->version == ARM_SMMU_V2) {
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
do {
- writeq_relaxed(iova, reg);
+ smmu_write_atomic_lq(iova, reg);
iova += granule >> 12;
} while (size -= granule);
-#endif
} else {
reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
- writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
+ writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
}
}
@@ -645,7 +687,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{
int flags, ret;
- u32 fsr, far, fsynr, resume;
+ u32 fsr, fsynr, resume;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -667,13 +709,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
- far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
- iova = far;
-#ifdef CONFIG_64BIT
- far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
- iova |= ((unsigned long)far << 32);
-#endif
-
+ iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
ret = IRQ_HANDLED;
resume = RESUME_RETRY;
@@ -734,22 +770,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
if (smmu->version > ARM_SMMU_V1) {
- /*
- * CBA2R.
- * *Must* be initialised before CBAR thanks to VMID16
- * architectural oversight affected some implementations.
- */
-#ifdef CONFIG_64BIT
- reg = CBA2R_RW64_64BIT;
-#else
- reg = CBA2R_RW64_32BIT;
-#endif
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ reg = CBA2R_RW64_64BIT;
+ else
+ reg = CBA2R_RW64_32BIT;
+ /* 16-bit VMIDs live in CBA2R */
+ if (smmu->features & ARM_SMMU_FEAT_VMID16)
+ reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
+
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
}
/* CBAR */
reg = cfg->cbar;
- if (smmu->version == ARM_SMMU_V1)
+ if (smmu->version < ARM_SMMU_V2)
reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
/*
@@ -759,8 +793,9 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
if (stage1) {
reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
(CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
- } else {
- reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
+ } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
+ /* 8-bit VMIDs live in CBAR */
+ reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
}
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
@@ -768,15 +803,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
if (stage1) {
reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
- reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
- smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+ reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
- reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
- smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
+ reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
} else {
reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
- smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+ writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
}
/* TTBCR */
@@ -855,16 +890,40 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ /*
+ * Choosing a suitable context format is even more fiddly. Until we
+ * grow some way for the caller to express a preference, and/or move
+ * the decision into the io-pgtable code where it arguably belongs,
+ * just aim for the closest thing to the rest of the system, and hope
+ * that the hardware isn't esoteric enough that we can't assume AArch64
+ * support to be a superset of AArch32 support...
+ */
+ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
+ cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
+ if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
+ (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
+ ARM_SMMU_FEAT_FMT_AARCH64_16K |
+ ARM_SMMU_FEAT_FMT_AARCH64_4K)))
+ cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
+
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks;
ias = smmu->va_size;
oas = smmu->ipa_size;
- if (IS_ENABLED(CONFIG_64BIT))
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
fmt = ARM_64_LPAE_S1;
- else
+ } else {
fmt = ARM_32_LPAE_S1;
+ ias = min(ias, 32UL);
+ oas = min(oas, 40UL);
+ }
break;
case ARM_SMMU_DOMAIN_NESTED:
/*
@@ -876,10 +935,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
start = 0;
ias = smmu->ipa_size;
oas = smmu->pa_size;
- if (IS_ENABLED(CONFIG_64BIT))
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
fmt = ARM_64_LPAE_S2;
- else
+ } else {
fmt = ARM_32_LPAE_S2;
+ ias = min(ias, 40UL);
+ oas = min(oas, 40UL);
+ }
break;
default:
ret = -EINVAL;
@@ -888,11 +950,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto out_unlock;
cfg->cbndx = ret;
- if (smmu->version == ARM_SMMU_V1) {
+ if (smmu->version < ARM_SMMU_V2) {
cfg->irptndx = atomic_inc_return(&smmu->irptndx);
cfg->irptndx %= smmu->num_context_irqs;
} else {
@@ -900,7 +962,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
}
pgtbl_cfg = (struct io_pgtable_cfg) {
- .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
+ .pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
.tlb = &arm_smmu_gather_ops,
@@ -914,8 +976,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
goto out_clear_smmu;
}
- /* Update our support page sizes to reflect the page table format */
- arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+ /* Update the domain's page sizes to reflect the page table format */
+ domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
/* Initialise the context bank with our page table cfg */
arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
@@ -927,7 +989,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
"arm-smmu-context-fault", domain);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
cfg->irptndx = INVALID_IRPTNDX;
@@ -1037,7 +1099,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
for (i = 0; i < cfg->num_streamids; ++i) {
int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
smmu->num_mapping_groups);
- if (IS_ERR_VALUE(idx)) {
+ if (idx < 0) {
dev_err(smmu->dev, "failed to allocate free SMR\n");
goto err_free_smrs;
}
@@ -1171,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/*
@@ -1252,8 +1314,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
/* ATS1 registers can only be written atomically */
va = iova & ~0xfffUL;
if (smmu->version == ARM_SMMU_V2)
- smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
- else
+ smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
+ else /* Register is only 32-bit in v1 */
writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
@@ -1264,9 +1326,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
return ops->iova_to_phys(ops, iova);
}
- phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
- phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
-
+ phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
if (phys & CB_PAR_F) {
dev_err(dev, "translation fault!\n");
dev_err(dev, "PAR = 0x%llx\n", phys);
@@ -1492,7 +1552,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
void __iomem *cb_base;
int i = 0;
- u32 reg;
+ u32 reg, major;
/* clear global FSR */
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
@@ -1505,11 +1565,33 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
}
+ /*
+ * Before clearing ARM_MMU500_ACTLR_CPRE, need to
+ * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
+ * bit is only present in MMU-500r2 onwards.
+ */
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
+ major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
+ if ((smmu->model == ARM_MMU500) && (major >= 2)) {
+ reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
+ reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
+ }
+
/* Make sure all context banks are disabled and clear CB_FSR */
for (i = 0; i < smmu->num_context_banks; ++i) {
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
+ /*
+ * Disable MMU-500's not-particularly-beneficial next-page
+ * prefetcher for the sake of errata #841119 and #826419.
+ */
+ if (smmu->model == ARM_MMU500) {
+ reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
+ reg &= ~ARM_MMU500_ACTLR_CPRE;
+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
+ }
}
/* Invalidate the TLB, just in case */
@@ -1537,6 +1619,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Don't upgrade barriers */
reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+ if (smmu->features & ARM_SMMU_FEAT_VMID16)
+ reg |= sCR0_VMID16EN;
+
/* Push the button */
__arm_smmu_tlb_sync(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -1569,7 +1654,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
bool cttw_dt, cttw_reg;
dev_notice(smmu->dev, "probing hardware configuration...\n");
- dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
+ dev_notice(smmu->dev, "SMMUv%d with:\n",
+ smmu->version == ARM_SMMU_V2 ? 2 : 1);
/* ID0 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
@@ -1601,7 +1687,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
- if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
+ if ((id & ID0_S1TS) &&
+ ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
dev_notice(smmu->dev, "\taddress translation ops\n");
}
@@ -1657,6 +1744,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
ID0_NUMSIDB_MASK;
}
+ if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
+ if (!(id & ID0_PTFS_NO_AARCH32S))
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
+ }
+
/* ID1 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
@@ -1677,6 +1770,17 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
}
dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
smmu->num_context_banks, smmu->num_s2_context_banks);
+ /*
+ * Cavium CN88xx erratum #27704.
+ * Ensure ASID and VMID allocation is unique across all SMMUs in
+ * the system.
+ */
+ if (smmu->model == CAVIUM_SMMUV2) {
+ smmu->cavium_id_base =
+ atomic_add_return(smmu->num_context_banks,
+ &cavium_smmu_context_count);
+ smmu->cavium_id_base -= smmu->num_context_banks;
+ }
/* ID2 */
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
@@ -1687,6 +1791,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
smmu->pa_size = size;
+ if (id & ID2_VMID16)
+ smmu->features |= ARM_SMMU_FEAT_VMID16;
+
/*
* What the page table walker can address actually depends on which
* descriptor format is in use, but since a) we don't know that yet,
@@ -1696,26 +1803,39 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_warn(smmu->dev,
"failed to set DMA mask for table walker\n");
- if (smmu->version == ARM_SMMU_V1) {
+ if (smmu->version < ARM_SMMU_V2) {
smmu->va_size = smmu->ipa_size;
- size = SZ_4K | SZ_2M | SZ_1G;
+ if (smmu->version == ARM_SMMU_V1_64K)
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
} else {
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
smmu->va_size = arm_smmu_id_size_to_bits(size);
-#ifndef CONFIG_64BIT
- smmu->va_size = min(32UL, smmu->va_size);
-#endif
- size = 0;
if (id & ID2_PTFS_4K)
- size |= SZ_4K | SZ_2M | SZ_1G;
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
if (id & ID2_PTFS_16K)
- size |= SZ_16K | SZ_32M;
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
if (id & ID2_PTFS_64K)
- size |= SZ_64K | SZ_512M;
+ smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
}
- arm_smmu_ops.pgsize_bitmap &= size;
- dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
+ /* Now we've corralled the various formats, what'll it do? */
+ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
+ smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
+ if (smmu->features &
+ (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
+ smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
+ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
+ smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
+ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
+ smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
+
+ if (arm_smmu_ops.pgsize_bitmap == -1UL)
+ arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
+ else
+ arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
+ smmu->pgsize_bitmap);
+
if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
@@ -1728,12 +1848,27 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return 0;
}
+struct arm_smmu_match_data {
+ enum arm_smmu_arch_version version;
+ enum arm_smmu_implementation model;
+};
+
+#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
+static struct arm_smmu_match_data name = { .version = ver, .model = imp }
+
+ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
+ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
+ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
+
static const struct of_device_id arm_smmu_of_match[] = {
- { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
- { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
- { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
- { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
- { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
+ { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
+ { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
+ { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
+ { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
+ { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
+ { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
{ },
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
@@ -1741,11 +1876,13 @@ MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
+ const struct arm_smmu_match_data *data;
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
struct rb_node *node;
- struct of_phandle_args masterspec;
+ struct of_phandle_iterator it;
+ struct arm_smmu_phandle_args *masterspec;
int num_irqs, i, err;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
@@ -1756,7 +1893,9 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
smmu->dev = dev;
of_id = of_match_node(arm_smmu_of_match, dev->of_node);
- smmu->version = (enum arm_smmu_arch_version)of_id->data;
+ data = of_id->data;
+ smmu->version = data->version;
+ smmu->model = data->model;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
smmu->base = devm_ioremap_resource(dev, res);
@@ -1806,23 +1945,38 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
i = 0;
smmu->masters = RB_ROOT;
- while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
- "#stream-id-cells", i,
- &masterspec)) {
- err = register_smmu_master(smmu, dev, &masterspec);
+
+ err = -ENOMEM;
+ /* No need to zero the memory for masterspec */
+ masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
+ if (!masterspec)
+ goto out_put_masters;
+
+ of_for_each_phandle(&it, err, dev->of_node,
+ "mmu-masters", "#stream-id-cells", 0) {
+ int count = of_phandle_iterator_args(&it, masterspec->args,
+ MAX_MASTER_STREAMIDS);
+ masterspec->np = of_node_get(it.node);
+ masterspec->args_count = count;
+
+ err = register_smmu_master(smmu, dev, masterspec);
if (err) {
dev_err(dev, "failed to add master %s\n",
- masterspec.np->name);
+ masterspec->np->name);
+ kfree(masterspec);
goto out_put_masters;
}
i++;
}
+
dev_notice(dev, "registered %d master devices\n", i);
+ kfree(masterspec);
+
parse_driver_options(smmu);
- if (smmu->version > ARM_SMMU_V1 &&
+ if (smmu->version == ARM_SMMU_V2 &&
smmu->num_context_banks != smmu->num_context_irqs) {
dev_err(dev,
"found only %d context interrupt(s) but %d required\n",
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 58f2fe687..ea5a9ebf0 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -94,7 +94,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size
return -ENODEV;
/* Use the smallest supported page size for IOVA granularity */
- order = __ffs(domain->ops->pgsize_bitmap);
+ order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
end_pfn = (base + size - 1) >> order;
@@ -190,11 +190,15 @@ static void __iommu_dma_free_pages(struct page **pages, int count)
kvfree(pages);
}
-static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
+static struct page **__iommu_dma_alloc_pages(unsigned int count,
+ unsigned long order_mask, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, array_size = count * sizeof(*pages);
- unsigned int order = MAX_ORDER;
+
+ order_mask &= (2U << MAX_ORDER) - 1;
+ if (!order_mask)
+ return NULL;
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL);
@@ -208,36 +212,38 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
while (count) {
struct page *page = NULL;
- int j;
+ unsigned int order_size;
/*
* Higher-order allocations are a convenience rather
* than a necessity, hence using __GFP_NORETRY until
- * falling back to single-page allocations.
+ * falling back to minimum-order allocations.
*/
- for (order = min_t(unsigned int, order, __fls(count));
- order > 0; order--) {
- page = alloc_pages(gfp | __GFP_NORETRY, order);
+ for (order_mask &= (2U << __fls(count)) - 1;
+ order_mask; order_mask &= ~order_size) {
+ unsigned int order = __fls(order_mask);
+
+ order_size = 1U << order;
+ page = alloc_pages((order_mask - order_size) ?
+ gfp | __GFP_NORETRY : gfp, order);
if (!page)
continue;
- if (PageCompound(page)) {
- if (!split_huge_page(page))
- break;
- __free_pages(page, order);
- } else {
+ if (!order)
+ break;
+ if (!PageCompound(page)) {
split_page(page, order);
break;
+ } else if (!split_huge_page(page)) {
+ break;
}
+ __free_pages(page, order);
}
- if (!page)
- page = alloc_page(gfp);
if (!page) {
__iommu_dma_free_pages(pages, i);
return NULL;
}
- j = 1 << order;
- count -= j;
- while (j--)
+ count -= order_size;
+ while (order_size--)
pages[i++] = page++;
}
return pages;
@@ -267,6 +273,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
* attached to an iommu_dma_domain
* @size: Size of buffer in bytes
* @gfp: Allocation flags
+ * @attrs: DMA attributes for this allocation
* @prot: IOMMU mapping flags
* @handle: Out argument for allocated DMA handle
* @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
@@ -278,8 +285,8 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
* Return: Array of struct page pointers describing the buffer,
* or NULL on failure.
*/
-struct page **iommu_dma_alloc(struct device *dev, size_t size,
- gfp_t gfp, int prot, dma_addr_t *handle,
+struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
+ struct dma_attrs *attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -288,11 +295,22 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size,
struct page **pages;
struct sg_table sgt;
dma_addr_t dma_addr;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
*handle = DMA_ERROR_CODE;
- pages = __iommu_dma_alloc_pages(count, gfp);
+ min_size = alloc_sizes & -alloc_sizes;
+ if (min_size < PAGE_SIZE) {
+ min_size = PAGE_SIZE;
+ alloc_sizes |= PAGE_SIZE;
+ } else {
+ size = ALIGN(size, min_size);
+ }
+ if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+ alloc_sizes = min_size;
+
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
if (!pages)
return NULL;
@@ -389,26 +407,58 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
/*
* Prepare a successfully-mapped scatterlist to give back to the caller.
- * Handling IOVA concatenation can come later, if needed
+ *
+ * At this point the segments are already laid out by iommu_dma_map_sg() to
+ * avoid individually crossing any boundaries, so we merely need to check a
+ * segment's start address to avoid concatenating across one.
*/
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
dma_addr_t dma_addr)
{
- struct scatterlist *s;
- int i;
+ struct scatterlist *s, *cur = sg;
+ unsigned long seg_mask = dma_get_seg_boundary(dev);
+ unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
+ int i, count = 0;
for_each_sg(sg, s, nents, i) {
- /* Un-swizzling the fields here, hence the naming mismatch */
- unsigned int s_offset = sg_dma_address(s);
+ /* Restore this segment's original unaligned fields first */
+ unsigned int s_iova_off = sg_dma_address(s);
unsigned int s_length = sg_dma_len(s);
- unsigned int s_dma_len = s->length;
+ unsigned int s_iova_len = s->length;
- s->offset += s_offset;
+ s->offset += s_iova_off;
s->length = s_length;
- sg_dma_address(s) = dma_addr + s_offset;
- dma_addr += s_dma_len;
+ sg_dma_address(s) = DMA_ERROR_CODE;
+ sg_dma_len(s) = 0;
+
+ /*
+ * Now fill in the real DMA data. If...
+ * - there is a valid output segment to append to
+ * - and this segment starts on an IOVA page boundary
+ * - but doesn't fall at a segment boundary
+ * - and wouldn't make the resulting output segment too long
+ */
+ if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
+ (cur_len + s_length <= max_len)) {
+ /* ...then concatenate it with the previous one */
+ cur_len += s_length;
+ } else {
+ /* Otherwise start the next output segment */
+ if (i > 0)
+ cur = sg_next(cur);
+ cur_len = s_length;
+ count++;
+
+ sg_dma_address(cur) = dma_addr + s_iova_off;
+ }
+
+ sg_dma_len(cur) = cur_len;
+ dma_addr += s_iova_len;
+
+ if (s_length + s_iova_off < s_iova_len)
+ cur_len = 0;
}
- return i;
+ return count;
}
/*
@@ -446,34 +496,40 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
struct scatterlist *s, *prev = NULL;
dma_addr_t dma_addr;
size_t iova_len = 0;
+ unsigned long mask = dma_get_seg_boundary(dev);
int i;
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
* trickery we can modify the list in-place, but reversibly, by
- * hiding the original data in the as-yet-unused DMA fields.
+ * stashing the unaligned parts in the as-yet-unused DMA fields.
*/
for_each_sg(sg, s, nents, i) {
- size_t s_offset = iova_offset(iovad, s->offset);
+ size_t s_iova_off = iova_offset(iovad, s->offset);
size_t s_length = s->length;
+ size_t pad_len = (mask - iova_len + 1) & mask;
- sg_dma_address(s) = s_offset;
+ sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
- s->offset -= s_offset;
- s_length = iova_align(iovad, s_length + s_offset);
+ s->offset -= s_iova_off;
+ s_length = iova_align(iovad, s_length + s_iova_off);
s->length = s_length;
/*
- * The simple way to avoid the rare case of a segment
- * crossing the boundary mask is to pad the previous one
- * to end at a naturally-aligned IOVA for this one's size,
- * at the cost of potentially over-allocating a little.
+ * Due to the alignment of our single IOVA allocation, we can
+ * depend on these assumptions about the segment boundary mask:
+ * - If mask size >= IOVA size, then the IOVA range cannot
+ * possibly fall across a boundary, so we don't care.
+ * - If mask size < IOVA size, then the IOVA range must start
+ * exactly on a boundary, therefore we can lay things out
+ * based purely on segment lengths without needing to know
+ * the actual addresses beforehand.
+ * - The mask must be a power of 2, so pad_len == 0 if
+ * iova_len == 0, thus we cannot dereference prev the first
+ * time through here (i.e. before it has a meaningful value).
*/
- if (prev) {
- size_t pad_len = roundup_pow_of_two(s_length);
-
- pad_len = (pad_len - iova_len) & (pad_len - 1);
+ if (pad_len && pad_len < s_length - 1) {
prev->length += pad_len;
iova_len += pad_len;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8ffd7568f..6a86b5d1d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1579,18 +1579,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP)
- pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
- "fault index %llx\n"
- "INTR-REMAP:[fault reason %02d] %s\n",
- (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+ pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
+ source_id >> 8, PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason);
else
- pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
- "fault addr %llx \n"
- "DMAR:[fault reason %02d] %s\n",
- (type ? "DMA Read" : "DMA Write"),
- (source_id >> 8), PCI_SLOT(source_id & 0xFF),
+ pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
+ type ? "DMA Read" : "DMA Write",
+ source_id >> 8, PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
return 0;
}
@@ -1602,10 +1598,17 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
int reg, fault_index;
u32 fault_status;
unsigned long flag;
+ bool ratelimited;
+ static DEFINE_RATELIMIT_STATE(rs,
+ DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+
+ /* Disable printing, simply clear the fault when ratelimited */
+ ratelimited = !__ratelimit(&rs);
raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
- if (fault_status)
+ if (fault_status && !ratelimited)
pr_err("DRHD: handling fault status reg %x\n", fault_status);
/* TBD: ignore advanced fault log currently */
@@ -1627,24 +1630,28 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
if (!(data & DMA_FRCD_F))
break;
- fault_reason = dma_frcd_fault_reason(data);
- type = dma_frcd_type(data);
+ if (!ratelimited) {
+ fault_reason = dma_frcd_fault_reason(data);
+ type = dma_frcd_type(data);
- data = readl(iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN + 8);
- source_id = dma_frcd_source_id(data);
+ data = readl(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN + 8);
+ source_id = dma_frcd_source_id(data);
+
+ guest_addr = dmar_readq(iommu->reg + reg +
+ fault_index * PRIMARY_FAULT_REG_LEN);
+ guest_addr = dma_frcd_page_addr(guest_addr);
+ }
- guest_addr = dmar_readq(iommu->reg + reg +
- fault_index * PRIMARY_FAULT_REG_LEN);
- guest_addr = dma_frcd_page_addr(guest_addr);
/* clear the fault */
writel(DMA_FRCD_F, iommu->reg + reg +
fault_index * PRIMARY_FAULT_REG_LEN + 12);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
- dmar_fault_do_one(iommu, type, fault_reason,
- source_id, guest_addr);
+ if (!ratelimited)
+ dmar_fault_do_one(iommu, type, fault_reason,
+ source_id, guest_addr);
fault_index++;
if (fault_index >= cap_num_fault_regs(iommu->cap))
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ae364e078..323dac990 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -33,6 +33,7 @@
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
#include <linux/memory.h>
+#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/iova.h>
@@ -390,6 +391,7 @@ struct dmar_domain {
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
+ bool has_iotlb_device;
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
@@ -456,27 +458,32 @@ static LIST_HEAD(dmar_rmrr_units);
static void flush_unmaps_timeout(unsigned long data);
-static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
+struct deferred_flush_entry {
+ unsigned long iova_pfn;
+ unsigned long nrpages;
+ struct dmar_domain *domain;
+ struct page *freelist;
+};
#define HIGH_WATER_MARK 250
-struct deferred_flush_tables {
+struct deferred_flush_table {
int next;
- struct iova *iova[HIGH_WATER_MARK];
- struct dmar_domain *domain[HIGH_WATER_MARK];
- struct page *freelist[HIGH_WATER_MARK];
+ struct deferred_flush_entry entries[HIGH_WATER_MARK];
+};
+
+struct deferred_flush_data {
+ spinlock_t lock;
+ int timer_on;
+ struct timer_list timer;
+ long size;
+ struct deferred_flush_table *tables;
};
-static struct deferred_flush_tables *deferred_flush;
+DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
-static DEFINE_SPINLOCK(async_umap_flush_lock);
-static LIST_HEAD(unmaps_to_do);
-
-static int timer_on;
-static long list_size;
-
static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct dmar_domain *domain,
@@ -1143,7 +1150,7 @@ next:
} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
}
-/* free page table pages. last level pte should already be cleared */
+/* clear last level (leaf) ptes and free page table pages. */
static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
@@ -1458,10 +1465,35 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
return NULL;
}
+static void domain_update_iotlb(struct dmar_domain *domain)
+{
+ struct device_domain_info *info;
+ bool has_iotlb_device = false;
+
+ assert_spin_locked(&device_domain_lock);
+
+ list_for_each_entry(info, &domain->devices, link) {
+ struct pci_dev *pdev;
+
+ if (!info->dev || !dev_is_pci(info->dev))
+ continue;
+
+ pdev = to_pci_dev(info->dev);
+ if (pdev->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
+
+ domain->has_iotlb_device = has_iotlb_device;
+}
+
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
+ assert_spin_locked(&device_domain_lock);
+
if (!info || !dev_is_pci(info->dev))
return;
@@ -1481,6 +1513,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
#endif
if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
+ domain_update_iotlb(info->domain);
info->ats_qdep = pci_ats_queue_depth(pdev);
}
}
@@ -1489,6 +1522,8 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
+ assert_spin_locked(&device_domain_lock);
+
if (!dev_is_pci(info->dev))
return;
@@ -1497,6 +1532,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
if (info->ats_enabled) {
pci_disable_ats(pdev);
info->ats_enabled = 0;
+ domain_update_iotlb(info->domain);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
if (info->pri_enabled) {
@@ -1517,6 +1553,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
unsigned long flags;
struct device_domain_info *info;
+ if (!domain->has_iotlb_device)
+ return;
+
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (!info->ats_enabled)
@@ -1734,6 +1773,7 @@ static struct dmar_domain *alloc_domain(int flags)
memset(domain, 0, sizeof(*domain));
domain->nid = -1;
domain->flags = flags;
+ domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
return domain;
@@ -1918,8 +1958,12 @@ static void domain_exit(struct dmar_domain *domain)
return;
/* Flush any lazy unmaps that may reference this domain */
- if (!intel_iommu_strict)
- flush_unmaps_timeout(0);
+ if (!intel_iommu_strict) {
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ flush_unmaps_timeout(cpu);
+ }
/* Remove associated devices and clear attached or cached domains */
rcu_read_lock();
@@ -3077,7 +3121,7 @@ static int __init init_dmars(void)
bool copied_tables = false;
struct device *dev;
struct intel_iommu *iommu;
- int i, ret;
+ int i, ret, cpu;
/*
* for each drhd
@@ -3110,11 +3154,20 @@ static int __init init_dmars(void)
goto error;
}
- deferred_flush = kzalloc(g_num_of_iommus *
- sizeof(struct deferred_flush_tables), GFP_KERNEL);
- if (!deferred_flush) {
- ret = -ENOMEM;
- goto free_g_iommus;
+ for_each_possible_cpu(cpu) {
+ struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
+ cpu);
+
+ dfd->tables = kzalloc(g_num_of_iommus *
+ sizeof(struct deferred_flush_table),
+ GFP_KERNEL);
+ if (!dfd->tables) {
+ ret = -ENOMEM;
+ goto free_g_iommus;
+ }
+
+ spin_lock_init(&dfd->lock);
+ setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
}
for_each_active_iommu(iommu, drhd) {
@@ -3298,19 +3351,20 @@ free_iommu:
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
- kfree(deferred_flush);
free_g_iommus:
+ for_each_possible_cpu(cpu)
+ kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
kfree(g_iommus);
error:
return ret;
}
/* This takes a number of _MM_ pages, not VTD pages */
-static struct iova *intel_alloc_iova(struct device *dev,
+static unsigned long intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
{
- struct iova *iova = NULL;
+ unsigned long iova_pfn = 0;
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
@@ -3323,19 +3377,19 @@ static struct iova *intel_alloc_iova(struct device *dev,
* DMA_BIT_MASK(32) and if that fails then try allocating
* from higher range
*/
- iova = alloc_iova(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)), 1);
- if (iova)
- return iova;
+ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
+ IOVA_PFN(DMA_BIT_MASK(32)));
+ if (iova_pfn)
+ return iova_pfn;
}
- iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
- if (unlikely(!iova)) {
+ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
+ if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev));
- return NULL;
+ return 0;
}
- return iova;
+ return iova_pfn;
}
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
@@ -3433,7 +3487,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
{
struct dmar_domain *domain;
phys_addr_t start_paddr;
- struct iova *iova;
+ unsigned long iova_pfn;
int prot = 0;
int ret;
struct intel_iommu *iommu;
@@ -3451,8 +3505,8 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
- if (!iova)
+ iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
+ if (!iova_pfn)
goto error;
/*
@@ -3470,7 +3524,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
* might have two guest_addr mapping to the same host paddr, but this
* is not a big problem
*/
- ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
+ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
mm_to_dma_pfn(paddr_pfn), size, prot);
if (ret)
goto error;
@@ -3478,18 +3532,18 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova->pfn_lo),
+ mm_to_dma_pfn(iova_pfn),
size, 0, 1);
else
iommu_flush_write_buffer(iommu);
- start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
+ start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
return start_paddr;
error:
- if (iova)
- __free_iova(&domain->iovad, iova);
+ if (iova_pfn)
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
dev_name(dev), size, (unsigned long long)paddr, dir);
return 0;
@@ -3504,91 +3558,120 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
dir, *dev->dma_mask);
}
-static void flush_unmaps(void)
+static void flush_unmaps(struct deferred_flush_data *flush_data)
{
int i, j;
- timer_on = 0;
+ flush_data->timer_on = 0;
/* just flush them all */
for (i = 0; i < g_num_of_iommus; i++) {
struct intel_iommu *iommu = g_iommus[i];
+ struct deferred_flush_table *flush_table =
+ &flush_data->tables[i];
if (!iommu)
continue;
- if (!deferred_flush[i].next)
+ if (!flush_table->next)
continue;
/* In caching mode, global flushes turn emulation expensive */
if (!cap_caching_mode(iommu->cap))
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- for (j = 0; j < deferred_flush[i].next; j++) {
+ for (j = 0; j < flush_table->next; j++) {
unsigned long mask;
- struct iova *iova = deferred_flush[i].iova[j];
- struct dmar_domain *domain = deferred_flush[i].domain[j];
+ struct deferred_flush_entry *entry =
+ &flush_table->entries[j];
+ unsigned long iova_pfn = entry->iova_pfn;
+ unsigned long nrpages = entry->nrpages;
+ struct dmar_domain *domain = entry->domain;
+ struct page *freelist = entry->freelist;
/* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
- iova->pfn_lo, iova_size(iova),
- !deferred_flush[i].freelist[j], 0);
+ mm_to_dma_pfn(iova_pfn),
+ nrpages, !freelist, 0);
else {
- mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
- iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ mask = ilog2(nrpages);
+ iommu_flush_dev_iotlb(domain,
+ (uint64_t)iova_pfn << PAGE_SHIFT, mask);
}
- __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
- if (deferred_flush[i].freelist[j])
- dma_free_pagelist(deferred_flush[i].freelist[j]);
+ free_iova_fast(&domain->iovad, iova_pfn, nrpages);
+ if (freelist)
+ dma_free_pagelist(freelist);
}
- deferred_flush[i].next = 0;
+ flush_table->next = 0;
}
- list_size = 0;
+ flush_data->size = 0;
}
-static void flush_unmaps_timeout(unsigned long data)
+static void flush_unmaps_timeout(unsigned long cpuid)
{
+ struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
unsigned long flags;
- spin_lock_irqsave(&async_umap_flush_lock, flags);
- flush_unmaps();
- spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+ spin_lock_irqsave(&flush_data->lock, flags);
+ flush_unmaps(flush_data);
+ spin_unlock_irqrestore(&flush_data->lock, flags);
}
-static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
+static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+ unsigned long nrpages, struct page *freelist)
{
unsigned long flags;
- int next, iommu_id;
+ int entry_id, iommu_id;
struct intel_iommu *iommu;
+ struct deferred_flush_entry *entry;
+ struct deferred_flush_data *flush_data;
+ unsigned int cpuid;
- spin_lock_irqsave(&async_umap_flush_lock, flags);
- if (list_size == HIGH_WATER_MARK)
- flush_unmaps();
+ cpuid = get_cpu();
+ flush_data = per_cpu_ptr(&deferred_flush, cpuid);
+
+ /* Flush all CPUs' entries to avoid deferring too much. If
+ * this becomes a bottleneck, can just flush us, and rely on
+ * flush timer for the rest.
+ */
+ if (flush_data->size == HIGH_WATER_MARK) {
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ flush_unmaps_timeout(cpu);
+ }
+
+ spin_lock_irqsave(&flush_data->lock, flags);
iommu = domain_get_iommu(dom);
iommu_id = iommu->seq_id;
- next = deferred_flush[iommu_id].next;
- deferred_flush[iommu_id].domain[next] = dom;
- deferred_flush[iommu_id].iova[next] = iova;
- deferred_flush[iommu_id].freelist[next] = freelist;
- deferred_flush[iommu_id].next++;
+ entry_id = flush_data->tables[iommu_id].next;
+ ++(flush_data->tables[iommu_id].next);
- if (!timer_on) {
- mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
- timer_on = 1;
+ entry = &flush_data->tables[iommu_id].entries[entry_id];
+ entry->domain = dom;
+ entry->iova_pfn = iova_pfn;
+ entry->nrpages = nrpages;
+ entry->freelist = freelist;
+
+ if (!flush_data->timer_on) {
+ mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
+ flush_data->timer_on = 1;
}
- list_size++;
- spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+ flush_data->size++;
+ spin_unlock_irqrestore(&flush_data->lock, flags);
+
+ put_cpu();
}
-static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
+static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
{
struct dmar_domain *domain;
unsigned long start_pfn, last_pfn;
- struct iova *iova;
+ unsigned long nrpages;
+ unsigned long iova_pfn;
struct intel_iommu *iommu;
struct page *freelist;
@@ -3600,13 +3683,11 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
iommu = domain_get_iommu(domain);
- iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
- if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
- (unsigned long long)dev_addr))
- return;
+ iova_pfn = IOVA_PFN(dev_addr);
- start_pfn = mm_to_dma_pfn(iova->pfn_lo);
- last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
+ nrpages = aligned_nrpages(dev_addr, size);
+ start_pfn = mm_to_dma_pfn(iova_pfn);
+ last_pfn = start_pfn + nrpages - 1;
pr_debug("Device %s unmapping: pfn %lx-%lx\n",
dev_name(dev), start_pfn, last_pfn);
@@ -3615,12 +3696,12 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
- last_pfn - start_pfn + 1, !freelist, 0);
+ nrpages, !freelist, 0);
/* free iova */
- __free_iova(&domain->iovad, iova);
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
dma_free_pagelist(freelist);
} else {
- add_unmap(domain, iova, freelist);
+ add_unmap(domain, iova_pfn, nrpages, freelist);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
@@ -3632,7 +3713,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- intel_unmap(dev, dev_addr);
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3691,7 +3772,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
size = PAGE_ALIGN(size);
order = get_order(size);
- intel_unmap(dev, dma_handle);
+ intel_unmap(dev, dma_handle, size);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, order);
}
@@ -3700,7 +3781,16 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- intel_unmap(dev, sglist[0].dma_address);
+ dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
+ unsigned long nrpages = 0;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
+ }
+
+ intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3724,7 +3814,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- struct iova *iova = NULL;
+ unsigned long iova_pfn;
int ret;
struct scatterlist *sg;
unsigned long start_vpfn;
@@ -3743,9 +3833,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
for_each_sg(sglist, sg, nelems, i)
size += aligned_nrpages(sg->offset, sg->length);
- iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
+ iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
*dev->dma_mask);
- if (!iova) {
+ if (!iova_pfn) {
sglist->dma_length = 0;
return 0;
}
@@ -3760,13 +3850,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
- start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
+ start_vpfn = mm_to_dma_pfn(iova_pfn);
ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) {
dma_pte_free_pagetable(domain, start_vpfn,
start_vpfn + size - 1);
- __free_iova(&domain->iovad, iova);
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
return 0;
}
@@ -4505,6 +4595,46 @@ static struct notifier_block intel_iommu_memory_nb = {
.priority = 0
};
+static void free_all_cpu_cached_iovas(unsigned int cpu)
+{
+ int i;
+
+ for (i = 0; i < g_num_of_iommus; i++) {
+ struct intel_iommu *iommu = g_iommus[i];
+ struct dmar_domain *domain;
+ int did;
+
+ if (!iommu)
+ continue;
+
+ for (did = 0; did < cap_ndoms(iommu->cap); did++) {
+ domain = get_iommu_domain(iommu, (u16)did);
+
+ if (!domain)
+ continue;
+ free_cpu_cached_iovas(cpu, &domain->iovad);
+ }
+ }
+}
+
+static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ unsigned int cpu = (unsigned long)v;
+
+ switch (action) {
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_all_cpu_cached_iovas(cpu);
+ flush_unmaps_timeout(cpu);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block intel_iommu_cpu_nb = {
+ .notifier_call = intel_iommu_cpu_notifier,
+};
static ssize_t intel_iommu_show_version(struct device *dev,
struct device_attribute *attr,
@@ -4638,7 +4768,6 @@ int __init intel_iommu_init(void)
up_write(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
- init_timer(&unmap_timer);
#ifdef CONFIG_SWIOTLB
swiotlb = 0;
#endif
@@ -4655,6 +4784,7 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
+ register_hotcpu_notifier(&intel_iommu_cpu_nb);
intel_iommu_enabled = 1;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 9488e3c97..8c6139986 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -121,6 +121,8 @@
#define ARM_V7S_TEX_MASK 0x7
#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
+#define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */
+
/* *well, except for TEX on level 2 large pages, of course :( */
#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
#define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
@@ -258,9 +260,10 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
struct io_pgtable_cfg *cfg)
{
bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
- arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S |
- ARM_V7S_ATTR_TEX(1);
+ arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S;
+ if (!(prot & IOMMU_MMIO))
+ pte |= ARM_V7S_ATTR_TEX(1);
if (ap) {
pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
if (!(prot & IOMMU_WRITE))
@@ -270,7 +273,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
if ((prot & IOMMU_NOEXEC) && ap)
pte |= ARM_V7S_ATTR_XN(lvl);
- if (prot & IOMMU_CACHE)
+ if (prot & IOMMU_MMIO)
+ pte |= ARM_V7S_ATTR_B;
+ else if (prot & IOMMU_CACHE)
pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
return pte;
@@ -279,10 +284,13 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
{
int prot = IOMMU_READ;
+ arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
- if (pte & (ARM_V7S_PTE_AP_RDONLY << ARM_V7S_ATTR_SHIFT(lvl)))
+ if (attr & ARM_V7S_PTE_AP_RDONLY)
prot |= IOMMU_WRITE;
- if (pte & ARM_V7S_ATTR_C)
+ if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
+ prot |= IOMMU_MMIO;
+ else if (pte & ARM_V7S_ATTR_C)
prot |= IOMMU_CACHE;
return prot;
@@ -364,6 +372,9 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
pte |= ARM_V7S_ATTR_NS_SECTION;
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
+ pte |= ARM_V7S_ATTR_MTK_4GB;
+
if (num_entries > 1)
pte = arm_v7s_pte_to_cont(pte, lvl);
@@ -625,9 +636,15 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NO_PERMS |
- IO_PGTABLE_QUIRK_TLBI_ON_MAP))
+ IO_PGTABLE_QUIRK_TLBI_ON_MAP |
+ IO_PGTABLE_QUIRK_ARM_MTK_4GB))
return NULL;
+ /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB &&
+ !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
+ return NULL;
+
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f433b5160..a1ed1b73f 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -355,7 +355,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY;
- if (prot & IOMMU_CACHE)
+ if (prot & IOMMU_MMIO)
+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
+ << ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
} else {
@@ -364,7 +367,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
pte |= ARM_LPAE_PTE_HAP_READ;
if (prot & IOMMU_WRITE)
pte |= ARM_LPAE_PTE_HAP_WRITE;
- if (prot & IOMMU_CACHE)
+ if (prot & IOMMU_MMIO)
+ pte |= ARM_LPAE_PTE_MEMATTR_DEV;
+ else if (prot & IOMMU_CACHE)
pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
else
pte |= ARM_LPAE_PTE_MEMATTR_NC;
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 876f6a76d..127558d83 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -25,8 +25,7 @@
#include "io-pgtable.h"
static const struct io_pgtable_init_fns *
-io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
-{
+io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
[ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
[ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index d4f502742..969d82cc9 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -60,10 +60,16 @@ struct io_pgtable_cfg {
* IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
* (unmapped) entries but the hardware might do so anyway, perform
* TLB maintenance when mapping as well as when unmapping.
+ *
+ * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
+ * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
+ * when the SoC is in "4GB mode" and they can only access the high
+ * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
+ #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b9df1411c..3000051f4 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -337,9 +337,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
if (!domain || domain->type != IOMMU_DOMAIN_DMA)
return 0;
- BUG_ON(!domain->ops->pgsize_bitmap);
+ BUG_ON(!domain->pgsize_bitmap);
- pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
+ pg_size = 1UL << __ffs(domain->pgsize_bitmap);
INIT_LIST_HEAD(&mappings);
iommu_get_dm_regions(dev, &mappings);
@@ -660,8 +660,8 @@ static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
}
/*
- * Look for aliases to or from the given device for exisiting groups. The
- * dma_alias_devfn only supports aliases on the same bus, therefore the search
+ * Look for aliases to or from the given device for existing groups. DMA
+ * aliases are only supported on the same bus, therefore the search
* space is quite small (especially since we're really only looking at pcie
* device, and therefore only expect multiple slots on the root complex or
* downstream switch ports). It's conceivable though that a pair of
@@ -686,11 +686,7 @@ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
continue;
/* We alias them or they alias us */
- if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
- pdev->dma_alias_devfn == tmp->devfn) ||
- ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
- tmp->dma_alias_devfn == pdev->devfn)) {
-
+ if (pci_devs_are_dma_aliases(pdev, tmp)) {
group = get_pci_alias_group(tmp, devfns);
if (group) {
pci_dev_put(tmp);
@@ -1073,6 +1069,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
domain->ops = bus->iommu_ops;
domain->type = type;
+ /* Assume all sizes by default; the driver may override this later */
+ domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
return domain;
}
@@ -1297,7 +1295,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
pgsize = (1UL << (pgsize_idx + 1)) - 1;
/* throw away page sizes not supported by the hardware */
- pgsize &= domain->ops->pgsize_bitmap;
+ pgsize &= domain->pgsize_bitmap;
/* make sure we're still sane */
BUG_ON(!pgsize);
@@ -1319,14 +1317,14 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
int ret = 0;
if (unlikely(domain->ops->map == NULL ||
- domain->ops->pgsize_bitmap == 0UL))
+ domain->pgsize_bitmap == 0UL))
return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
/* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
/*
* both the virtual address and the physical one, as well as
@@ -1373,14 +1371,14 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unsigned long orig_iova = iova;
if (unlikely(domain->ops->unmap == NULL ||
- domain->ops->pgsize_bitmap == 0UL))
+ domain->pgsize_bitmap == 0UL))
return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
/* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
/*
* The virtual address, as well as the size of the mapping, must be
@@ -1426,10 +1424,10 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
unsigned int i, min_pagesz;
int ret;
- if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+ if (unlikely(domain->pgsize_bitmap == 0UL))
return 0;
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
@@ -1510,7 +1508,7 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
break;
case DOMAIN_ATTR_PAGING:
paging = data;
- *paging = (domain->ops->pgsize_bitmap != 0UL);
+ *paging = (domain->pgsize_bitmap != 0UL);
break;
case DOMAIN_ATTR_WINDOWS:
count = data;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index fa0adef32..e23001bfc 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -20,6 +20,17 @@
#include <linux/iova.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/bitops.h>
+
+static bool iova_rcache_insert(struct iova_domain *iovad,
+ unsigned long pfn,
+ unsigned long size);
+static unsigned long iova_rcache_get(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn);
+static void init_iova_rcaches(struct iova_domain *iovad);
+static void free_iova_rcaches(struct iova_domain *iovad);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -38,6 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
+ init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -291,33 +303,18 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
}
EXPORT_SYMBOL_GPL(alloc_iova);
-/**
- * find_iova - find's an iova for a given pfn
- * @iovad: - iova domain in question.
- * @pfn: - page frame number
- * This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
- */
-struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+static struct iova *
+private_find_iova(struct iova_domain *iovad, unsigned long pfn)
{
- unsigned long flags;
- struct rb_node *node;
+ struct rb_node *node = iovad->rbroot.rb_node;
+
+ assert_spin_locked(&iovad->iova_rbtree_lock);
- /* Take the lock so that no other thread is manipulating the rbtree */
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- node = iovad->rbroot.rb_node;
while (node) {
struct iova *iova = container_of(node, struct iova, node);
/* If pfn falls within iova's range, return iova */
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- /* We are not holding the lock while this iova
- * is referenced by the caller as the same thread
- * which called this function also calls __free_iova()
- * and it is by design that only one thread can possibly
- * reference a particular iova and hence no conflict.
- */
return iova;
}
@@ -327,9 +324,35 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
node = node->rb_right;
}
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return NULL;
}
+
+static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+ assert_spin_locked(&iovad->iova_rbtree_lock);
+ __cached_rbnode_delete_update(iovad, iova);
+ rb_erase(&iova->node, &iovad->rbroot);
+ free_iova_mem(iova);
+}
+
+/**
+ * find_iova - finds an iova for a given pfn
+ * @iovad: - iova domain in question.
+ * @pfn: - page frame number
+ * This function finds and returns an iova belonging to the
+ * given doamin which matches the given pfn.
+ */
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+ unsigned long flags;
+ struct iova *iova;
+
+ /* Take the lock so that no other thread is manipulating the rbtree */
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ iova = private_find_iova(iovad, pfn);
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return iova;
+}
EXPORT_SYMBOL_GPL(find_iova);
/**
@@ -344,10 +367,8 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
unsigned long flags;
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- __cached_rbnode_delete_update(iovad, iova);
- rb_erase(&iova->node, &iovad->rbroot);
+ private_free_iova(iovad, iova);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- free_iova_mem(iova);
}
EXPORT_SYMBOL_GPL(__free_iova);
@@ -370,6 +391,65 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
EXPORT_SYMBOL_GPL(free_iova);
/**
+ * alloc_iova_fast - allocates an iova from rcache
+ * @iovad: - iova domain in question
+ * @size: - size of page frames to allocate
+ * @limit_pfn: - max limit address
+ * This function tries to satisfy an iova allocation from the rcache,
+ * and falls back to regular allocation on failure.
+*/
+unsigned long
+alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn)
+{
+ bool flushed_rcache = false;
+ unsigned long iova_pfn;
+ struct iova *new_iova;
+
+ iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
+ if (iova_pfn)
+ return iova_pfn;
+
+retry:
+ new_iova = alloc_iova(iovad, size, limit_pfn, true);
+ if (!new_iova) {
+ unsigned int cpu;
+
+ if (flushed_rcache)
+ return 0;
+
+ /* Try replenishing IOVAs by flushing rcache. */
+ flushed_rcache = true;
+ preempt_disable();
+ for_each_online_cpu(cpu)
+ free_cpu_cached_iovas(cpu, iovad);
+ preempt_enable();
+ goto retry;
+ }
+
+ return new_iova->pfn_lo;
+}
+EXPORT_SYMBOL_GPL(alloc_iova_fast);
+
+/**
+ * free_iova_fast - free iova pfn range into rcache
+ * @iovad: - iova domain in question.
+ * @pfn: - pfn that is allocated previously
+ * @size: - # of pages in range
+ * This functions frees an iova range by trying to put it into the rcache,
+ * falling back to regular iova deallocation via free_iova() if this fails.
+ */
+void
+free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
+{
+ if (iova_rcache_insert(iovad, pfn, size))
+ return;
+
+ free_iova(iovad, pfn);
+}
+EXPORT_SYMBOL_GPL(free_iova_fast);
+
+/**
* put_iova_domain - destroys the iova doamin
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
@@ -379,6 +459,7 @@ void put_iova_domain(struct iova_domain *iovad)
struct rb_node *node;
unsigned long flags;
+ free_iova_rcaches(iovad);
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
node = rb_first(&iovad->rbroot);
while (node) {
@@ -550,5 +631,297 @@ error:
return NULL;
}
+/*
+ * Magazine caches for IOVA ranges. For an introduction to magazines,
+ * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
+ * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
+ * For simplicity, we use a static magazine size and don't implement the
+ * dynamic size tuning described in the paper.
+ */
+
+#define IOVA_MAG_SIZE 128
+
+struct iova_magazine {
+ unsigned long size;
+ unsigned long pfns[IOVA_MAG_SIZE];
+};
+
+struct iova_cpu_rcache {
+ spinlock_t lock;
+ struct iova_magazine *loaded;
+ struct iova_magazine *prev;
+};
+
+static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
+{
+ return kzalloc(sizeof(struct iova_magazine), flags);
+}
+
+static void iova_magazine_free(struct iova_magazine *mag)
+{
+ kfree(mag);
+}
+
+static void
+iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
+{
+ unsigned long flags;
+ int i;
+
+ if (!mag)
+ return;
+
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+
+ for (i = 0 ; i < mag->size; ++i) {
+ struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
+
+ BUG_ON(!iova);
+ private_free_iova(iovad, iova);
+ }
+
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+ mag->size = 0;
+}
+
+static bool iova_magazine_full(struct iova_magazine *mag)
+{
+ return (mag && mag->size == IOVA_MAG_SIZE);
+}
+
+static bool iova_magazine_empty(struct iova_magazine *mag)
+{
+ return (!mag || mag->size == 0);
+}
+
+static unsigned long iova_magazine_pop(struct iova_magazine *mag,
+ unsigned long limit_pfn)
+{
+ BUG_ON(iova_magazine_empty(mag));
+
+ if (mag->pfns[mag->size - 1] >= limit_pfn)
+ return 0;
+
+ return mag->pfns[--mag->size];
+}
+
+static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
+{
+ BUG_ON(iova_magazine_full(mag));
+
+ mag->pfns[mag->size++] = pfn;
+}
+
+static void init_iova_rcaches(struct iova_domain *iovad)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ struct iova_rcache *rcache;
+ unsigned int cpu;
+ int i;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ spin_lock_init(&rcache->lock);
+ rcache->depot_size = 0;
+ rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
+ if (WARN_ON(!rcache->cpu_rcaches))
+ continue;
+ for_each_possible_cpu(cpu) {
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ spin_lock_init(&cpu_rcache->lock);
+ cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
+ cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
+ }
+ }
+}
+
+/*
+ * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
+ * return true on success. Can fail if rcache is full and we can't free
+ * space, and free_iova() (our only caller) will then return the IOVA
+ * range to the rbtree instead.
+ */
+static bool __iova_rcache_insert(struct iova_domain *iovad,
+ struct iova_rcache *rcache,
+ unsigned long iova_pfn)
+{
+ struct iova_magazine *mag_to_free = NULL;
+ struct iova_cpu_rcache *cpu_rcache;
+ bool can_insert = false;
+ unsigned long flags;
+
+ cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_full(cpu_rcache->loaded)) {
+ can_insert = true;
+ } else if (!iova_magazine_full(cpu_rcache->prev)) {
+ swap(cpu_rcache->prev, cpu_rcache->loaded);
+ can_insert = true;
+ } else {
+ struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
+
+ if (new_mag) {
+ spin_lock(&rcache->lock);
+ if (rcache->depot_size < MAX_GLOBAL_MAGS) {
+ rcache->depot[rcache->depot_size++] =
+ cpu_rcache->loaded;
+ } else {
+ mag_to_free = cpu_rcache->loaded;
+ }
+ spin_unlock(&rcache->lock);
+
+ cpu_rcache->loaded = new_mag;
+ can_insert = true;
+ }
+ }
+
+ if (can_insert)
+ iova_magazine_push(cpu_rcache->loaded, iova_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ put_cpu_ptr(rcache->cpu_rcaches);
+
+ if (mag_to_free) {
+ iova_magazine_free_pfns(mag_to_free, iovad);
+ iova_magazine_free(mag_to_free);
+ }
+
+ return can_insert;
+}
+
+static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
+ unsigned long size)
+{
+ unsigned int log_size = order_base_2(size);
+
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+ return false;
+
+ return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
+}
+
+/*
+ * Caller wants to allocate a new IOVA range from 'rcache'. If we can
+ * satisfy the request, return a matching non-NULL range and remove
+ * it from the 'rcache'.
+ */
+static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
+ unsigned long limit_pfn)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ unsigned long iova_pfn = 0;
+ bool has_pfn = false;
+ unsigned long flags;
+
+ cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_empty(cpu_rcache->loaded)) {
+ has_pfn = true;
+ } else if (!iova_magazine_empty(cpu_rcache->prev)) {
+ swap(cpu_rcache->prev, cpu_rcache->loaded);
+ has_pfn = true;
+ } else {
+ spin_lock(&rcache->lock);
+ if (rcache->depot_size > 0) {
+ iova_magazine_free(cpu_rcache->loaded);
+ cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
+ has_pfn = true;
+ }
+ spin_unlock(&rcache->lock);
+ }
+
+ if (has_pfn)
+ iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ put_cpu_ptr(rcache->cpu_rcaches);
+
+ return iova_pfn;
+}
+
+/*
+ * Try to satisfy IOVA allocation range from rcache. Fail if requested
+ * size is too big or the DMA limit we are given isn't satisfied by the
+ * top element in the magazine.
+ */
+static unsigned long iova_rcache_get(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn)
+{
+ unsigned int log_size = order_base_2(size);
+
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+ return 0;
+
+ return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
+}
+
+/*
+ * Free a cpu's rcache.
+ */
+static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
+ struct iova_rcache *rcache)
+{
+ struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
+ iova_magazine_free(cpu_rcache->loaded);
+
+ iova_magazine_free_pfns(cpu_rcache->prev, iovad);
+ iova_magazine_free(cpu_rcache->prev);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+}
+
+/*
+ * free rcache data structures.
+ */
+static void free_iova_rcaches(struct iova_domain *iovad)
+{
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ unsigned int cpu;
+ int i, j;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ for_each_possible_cpu(cpu)
+ free_cpu_iova_rcache(cpu, iovad, rcache);
+ spin_lock_irqsave(&rcache->lock, flags);
+ free_percpu(rcache->cpu_rcaches);
+ for (j = 0; j < rcache->depot_size; ++j) {
+ iova_magazine_free_pfns(rcache->depot[j], iovad);
+ iova_magazine_free(rcache->depot[j]);
+ }
+ spin_unlock_irqrestore(&rcache->lock, flags);
+ }
+}
+
+/*
+ * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
+ */
+void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+ iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
+ iova_magazine_free_pfns(cpu_rcache->prev, iovad);
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ }
+}
+
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 8adaaeae3..49721b4e1 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -36,7 +36,7 @@ static void irq_remapping_disable_io_apic(void)
* As this gets called during crash dump, keep this simple for
* now.
*/
- if (cpu_has_apic || apic_from_smp_config())
+ if (boot_cpu_has(X86_FEATURE_APIC) || apic_from_smp_config())
disconnect_bsp_APIC(0);
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 929a66a81..c3043d875 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -11,6 +11,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <linux/bootmem.h>
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/component.h>
@@ -56,7 +57,7 @@
#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
#define REG_MMU_IVRP_PADDR 0x114
-#define F_MMU_IVRP_PA_SET(pa) ((pa) >> 1)
+#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
#define REG_MMU_INT_CONTROL0 0x120
#define F_L2_MULIT_HIT_EN BIT(0)
@@ -125,6 +126,7 @@ struct mtk_iommu_data {
struct mtk_iommu_domain *m4u_dom;
struct iommu_group *m4u_group;
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
+ bool enable_4GB;
};
static struct iommu_ops mtk_iommu_ops;
@@ -257,6 +259,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
.iommu_dev = data->dev,
};
+ if (data->enable_4GB)
+ dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
+
dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
if (!dom->iop) {
dev_err(data->dev, "Failed to alloc io pgtable\n");
@@ -264,7 +269,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
}
/* Update our support page sizes bitmap */
- mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+ dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
data->base + REG_MMU_PT_BASE_ADDR);
@@ -530,7 +535,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
+ writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
data->base + REG_MMU_IVRP_PADDR);
writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
@@ -591,6 +596,9 @@ static int mtk_iommu_probe(struct platform_device *pdev)
return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
+ /* Whether the current dram is over 4GB */
+ data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT));
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(data->base))
@@ -690,7 +698,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
- writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
+ writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
base + REG_MMU_IVRP_PADDR);
return 0;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5fea665af..af499aea0 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -98,12 +98,12 @@ EXPORT_SYMBOL_GPL(of_get_dma_window);
struct of_iommu_node {
struct list_head list;
struct device_node *np;
- struct iommu_ops *ops;
+ const struct iommu_ops *ops;
};
static LIST_HEAD(of_iommu_list);
static DEFINE_SPINLOCK(of_iommu_lock);
-void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
+void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
{
struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
@@ -119,10 +119,10 @@ void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
spin_unlock(&of_iommu_lock);
}
-struct iommu_ops *of_iommu_get_ops(struct device_node *np)
+const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{
struct of_iommu_node *node;
- struct iommu_ops *ops = NULL;
+ const struct iommu_ops *ops = NULL;
spin_lock(&of_iommu_lock);
list_for_each_entry(node, &of_iommu_list, list)
@@ -134,12 +134,12 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops;
}
-struct iommu_ops *of_iommu_configure(struct device *dev,
- struct device_node *master_np)
+const struct iommu_ops *of_iommu_configure(struct device *dev,
+ struct device_node *master_np)
{
struct of_phandle_args iommu_spec;
struct device_node *np;
- struct iommu_ops *ops = NULL;
+ const struct iommu_ops *ops = NULL;
int idx = 0;
/*
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 9bc20e211..505548aaf 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -136,7 +136,7 @@ static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
struct seq_file *s)
{
seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
- (cr->cam & MMU_CAM_P) ? 1 : 0);
+ (cr->cam & MMU_CAM_P) ? 1 : 0);
return 0;
}
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 3dc5b65f3..e2583cce2 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -628,10 +628,12 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
break;
default:
fn = NULL;
- BUG();
break;
}
+ if (WARN_ON(!fn))
+ return -EINVAL;
+
prot = get_iopte_attr(e);
spin_lock(&obj->page_table_lock);
@@ -987,7 +989,6 @@ static int omap_iommu_remove(struct platform_device *pdev)
{
struct omap_iommu *obj = platform_get_drvdata(pdev);
- iopgtable_clear_entry_all(obj);
omap_iommu_debugfs_remove(obj);
pm_runtime_disable(obj->dev);
@@ -1161,7 +1162,8 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
* should never fail, but please keep this around to ensure
* we keep the hardware happy
*/
- BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
+ if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
+ goto fail_align;
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock);
@@ -1172,6 +1174,8 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
return &omap_domain->domain;
+fail_align:
+ kfree(omap_domain->pgtable);
fail_nomem:
kfree(omap_domain);
out:
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 0ea8d9a24..25b4627cb 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1049,6 +1049,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
for (i = 0; i < pdev->num_resources; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ continue;
iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(iommu->bases[i]))
continue;
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index 035d54492..75dd15d66 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -629,8 +629,7 @@ static void ipoctal_hangup(struct tty_struct *tty)
tty_port_hangup(&channel->tty_port);
ipoctal_reset_channel(channel);
-
- clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
+ tty_port_set_initialized(&channel->tty_port, 0);
wake_up_interruptible(&channel->tty_port.open_wait);
}
@@ -642,7 +641,7 @@ static void ipoctal_shutdown(struct tty_struct *tty)
return;
ipoctal_reset_channel(channel);
- clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
+ tty_port_set_initialized(&channel->tty_port, 0);
}
static void ipoctal_cleanup(struct tty_struct *tty)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3e124793e..fa33c50b0 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -27,6 +27,7 @@ config ARM_GIC_V3
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
select IRQ_DOMAIN_HIERARCHY
+ select PARTITION_PERCPU
config ARM_GIC_V3_ITS
bool
@@ -244,3 +245,18 @@ config IRQ_MXS
config MVEBU_ODMI
bool
select GENERIC_MSI_IRQ_DOMAIN
+
+config LS_SCFG_MSI
+ def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
+ depends on PCI && PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
+
+config PARTITION_PERCPU
+ bool
+
+config EZNPS_GIC
+ bool "NPS400 Global Interrupt Manager (GIM)"
+ depends on ARC || (COMPILE_TEST && !64BIT)
+ select IRQ_DOMAIN
+ help
+ Support the EZchip NPS400 global interrupt controller
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b03cfcbba..38853a187 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
+obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
+obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
obj-$(CONFIG_ARM_VIC) += irq-vic.o
@@ -65,3 +67,5 @@ obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
+obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
+obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 25384255b..63d980995 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <asm/irq.h>
-#include <asm-generic/msi.h>
+#include <asm/msi.h>
/* MSIX message address format: local GIC target */
#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index b6e950d47..72ff1d5c5 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -195,7 +195,7 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
* Ensure that stores to normal memory are visible to the
* other CPUs before issuing the IPI.
*/
- dsb();
+ smp_wmb();
for_each_cpu(cpu, mask) {
writel(1 << ipi, mailbox0_base + 16 * cpu);
@@ -223,6 +223,7 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
.priority = 100,
};
+#ifdef CONFIG_ARM
int __init bcm2836_smp_boot_secondary(unsigned int cpu,
struct task_struct *idle)
{
@@ -238,7 +239,7 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu,
static const struct smp_operations bcm2836_smp_ops __initconst = {
.smp_boot_secondary = bcm2836_smp_boot_secondary,
};
-
+#endif
#endif
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
@@ -252,12 +253,15 @@ bcm2836_arm_irqchip_smp_init(void)
/* Unmask IPIs to the boot CPU. */
bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
CPU_STARTING,
- (void *)smp_processor_id());
+ (void *)(uintptr_t)smp_processor_id());
register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
+
+#ifdef CONFIG_ARM
smp_set_ops(&bcm2836_smp_ops);
#endif
+#endif
}
/*
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index eb5eb0cd4..2223b3f15 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -182,7 +182,7 @@ static int __init _clps711x_intc_init(struct device_node *np,
writel_relaxed(0, clps711x_intc->intmr[2]);
err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
- if (IS_ERR_VALUE(err))
+ if (err < 0)
goto out_iounmap;
clps711x_intc->ops.map = clps711x_intc_irq_map;
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 75573fa43..1eef56a89 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -183,7 +183,7 @@ static int crossbar_domain_translate(struct irq_domain *d,
return -EINVAL;
*hwirq = fwspec->param[1];
- *type = fwspec->param[2];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
new file mode 100644
index 000000000..efbf0e430
--- /dev/null
+++ b/drivers/irqchip/irq-eznps.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <soc/nps/common.h>
+
+#define NPS_NR_CPU_IRQS 8 /* number of interrupt lines of NPS400 CPU */
+#define NPS_TIMER0_IRQ 3
+
+/*
+ * NPS400 core includes an Interrupt Controller (IC) support.
+ * All cores can deactivate level irqs at first level control
+ * at cores mesh layer called MTM.
+ * For devices out side chip e.g. uart, network there is another
+ * level called Global Interrupt Manager (GIM).
+ * This second level can control level and edge interrupt.
+ *
+ * NOTE: AUX_IENABLE and CTOP_AUX_IACK are auxiliary registers
+ * with private HW copy per CPU.
+ */
+
+static void nps400_irq_mask(struct irq_data *irqd)
+{
+ unsigned int ienb;
+ unsigned int irq = irqd_to_hwirq(irqd);
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb &= ~(1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static void nps400_irq_unmask(struct irq_data *irqd)
+{
+ unsigned int ienb;
+ unsigned int irq = irqd_to_hwirq(irqd);
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb |= (1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static void nps400_irq_eoi_global(struct irq_data *irqd)
+{
+ unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
+
+ write_aux_reg(CTOP_AUX_IACK, 1 << irq);
+
+ /* Don't ack GIC before all device access attempts are done */
+ mb();
+
+ nps_ack_gic();
+}
+
+static void nps400_irq_eoi(struct irq_data *irqd)
+{
+ unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
+
+ write_aux_reg(CTOP_AUX_IACK, 1 << irq);
+}
+
+static struct irq_chip nps400_irq_chip_fasteoi = {
+ .name = "NPS400 IC Global",
+ .irq_mask = nps400_irq_mask,
+ .irq_unmask = nps400_irq_unmask,
+ .irq_eoi = nps400_irq_eoi_global,
+};
+
+static struct irq_chip nps400_irq_chip_percpu = {
+ .name = "NPS400 IC",
+ .irq_mask = nps400_irq_mask,
+ .irq_unmask = nps400_irq_unmask,
+ .irq_eoi = nps400_irq_eoi,
+};
+
+static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ switch (hw) {
+ case NPS_TIMER0_IRQ:
+#ifdef CONFIG_SMP
+ case NPS_IPI_IRQ:
+#endif
+ irq_set_percpu_devid(virq);
+ irq_set_chip_and_handler(virq, &nps400_irq_chip_percpu,
+ handle_percpu_devid_irq);
+ break;
+ default:
+ irq_set_chip_and_handler(virq, &nps400_irq_chip_fasteoi,
+ handle_fasteoi_irq);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct irq_domain_ops nps400_irq_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = nps400_irq_map,
+};
+
+static int __init nps400_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ static struct irq_domain *nps400_root_domain;
+
+ if (parent) {
+ pr_err("DeviceTree incore ic not a root irq controller\n");
+ return -EINVAL;
+ }
+
+ nps400_root_domain = irq_domain_add_linear(node, NPS_NR_CPU_IRQS,
+ &nps400_irq_ops, NULL);
+
+ if (!nps400_root_domain) {
+ pr_err("nps400 root irq domain not avail\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Needed for primary domain lookup to succeed
+ * This is a primary irqchip, and can never have a parent
+ */
+ irq_set_default_host(nps400_root_domain);
+
+#ifdef CONFIG_SMP
+ irq_create_mapping(nps400_root_domain, NPS_IPI_IRQ);
+#endif
+
+ return 0;
+}
+IRQCHIP_DECLARE(ezchip_nps400_ic, "ezchip,nps400-ic", nps400_of_init);
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index f174ce0ca..89e7423f0 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -21,6 +21,19 @@
#include "irq-gic-common.h"
+static const struct gic_kvm_info *gic_kvm_info;
+
+const struct gic_kvm_info *gic_get_kvm_info(void)
+{
+ return gic_kvm_info;
+}
+
+void gic_set_kvm_info(const struct gic_kvm_info *info)
+{
+ BUG_ON(gic_kvm_info != NULL);
+ gic_kvm_info = info;
+}
+
void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data)
{
@@ -50,14 +63,26 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
else if (type & IRQ_TYPE_EDGE_BOTH)
val |= confmask;
+ /* If the current configuration is the same, then we are done */
+ if (val == oldval)
+ return 0;
+
/*
* Write back the new configuration, and possibly re-enable
- * the interrupt. If we tried to write a new configuration and failed,
- * return an error.
+ * the interrupt. If we fail to write a new configuration for
+ * an SPI then WARN and return an error. If we fail to write the
+ * configuration for a PPI this is most likely because the GIC
+ * does not allow us to set the configuration or we are in a
+ * non-secure mode, and hence it may not be catastrophic.
*/
writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
- if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval)
- ret = -EINVAL;
+ if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) {
+ if (WARN_ON(irq >= 32))
+ ret = -EINVAL;
+ else
+ pr_warn("GIC: PPI%d is secure or misconfigured\n",
+ irq - 16);
+ }
if (sync_access)
sync_access();
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index fff697db8..205e5fddf 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/arm-gic-common.h>
struct gic_quirk {
const char *desc;
@@ -35,4 +36,6 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data);
+void gic_set_kvm_info(const struct gic_kvm_info *info);
+
#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 28f047c61..ad0d2960b 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -49,6 +49,9 @@
/* APM X-Gene with GICv2m MSI_IIDR register value */
#define XGENE_GICV2M_MSI_IIDR 0x06000170
+/* Broadcom NS2 GICv2m MSI_IIDR register value */
+#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
+
/* List of flags for specific v2m implementation */
#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
@@ -62,6 +65,7 @@ struct v2m_data {
void __iomem *base; /* GICv2m virt address */
u32 spi_start; /* The SPI number that MSIs start */
u32 nr_spis; /* The number of SPIs for MSIs */
+ u32 spi_offset; /* offset to be subtracted from SPI number */
unsigned long *bm; /* MSI vector bitmap */
u32 flags; /* v2m flags for specific implementation */
};
@@ -102,7 +106,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->data = data->hwirq;
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
- msg->data -= v2m->spi_start;
+ msg->data -= v2m->spi_offset;
}
static struct irq_chip gicv2m_irq_chip = {
@@ -340,9 +344,20 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
* different from the standard GICv2m implementation where
* the MSI data is the absolute value within the range from
* spi_start to (spi_start + num_spis).
+ *
+ * Broadom NS2 GICv2m implementation has an erratum where the MSI data
+ * is 'spi_number - 32'
*/
- if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
+ switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
+ case XGENE_GICV2M_MSI_IIDR:
+ v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = v2m->spi_start;
+ break;
+ case BCM_NS2_GICV2M_MSI_IIDR:
v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
+ v2m->spi_offset = 32;
+ break;
+ }
v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
GFP_KERNEL);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 39261798c..5eb1f9e17 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -41,6 +41,7 @@
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
@@ -55,6 +56,16 @@ struct its_collection {
};
/*
+ * The ITS_BASER structure - contains memory information and cached
+ * value of BASER register configuration.
+ */
+struct its_baser {
+ void *base;
+ u64 val;
+ u32 order;
+};
+
+/*
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
* list of devices writing to it.
@@ -66,14 +77,13 @@ struct its_node {
unsigned long phys_base;
struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write;
- struct {
- void *base;
- u32 order;
- } tables[GITS_BASER_NR_REGS];
+ struct its_baser tables[GITS_BASER_NR_REGS];
struct its_collection *collections;
struct list_head its_device_list;
u64 flags;
u32 ite_size;
+ u32 device_ids;
+ int numa_node;
};
#define ITS_ITT_ALIGN SZ_256
@@ -605,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d)
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ unsigned int cpu;
+ const struct cpumask *cpu_mask = cpu_online_mask;
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_collection *target_col;
u32 id = its_get_event_id(d);
+ /* lpi cannot be routed to a redistributor that is on a foreign node */
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ if (its_dev->its->numa_node >= 0) {
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+ if (!cpumask_intersects(mask_val, cpu_mask))
+ return -EINVAL;
+ }
+ }
+
+ cpu = cpumask_any_and(mask_val, cpu_mask);
+
if (cpu >= nr_cpu_ids)
return -EINVAL;
@@ -838,6 +860,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
ids = GITS_TYPER_DEVBITS(typer);
}
+ its->device_ids = ids;
+
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
u64 type = GITS_BASER_TYPE(val);
@@ -913,6 +937,7 @@ retry_baser:
}
val |= alloc_pages - 1;
+ its->tables[i].val = val;
writeq_relaxed(val, its->base + GITS_BASER + i * 8);
tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
@@ -1090,6 +1115,16 @@ static void its_cpu_init_collection(void)
list_for_each_entry(its, &its_nodes, entry) {
u64 target;
+ /* avoid cross node collections and its mapping */
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
+ struct device_node *cpu_node;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (its->numa_node != NUMA_NO_NODE &&
+ its->numa_node != of_node_to_nid(cpu_node))
+ continue;
+ }
+
/*
* We now have to bind each collection to its target
* redistributor.
@@ -1138,9 +1173,22 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
return its_dev;
}
+static struct its_baser *its_get_baser(struct its_node *its, u32 type)
+{
+ int i;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (GITS_BASER_TYPE(its->tables[i].val) == type)
+ return &its->tables[i];
+ }
+
+ return NULL;
+}
+
static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nvecs)
{
+ struct its_baser *baser;
struct its_device *dev;
unsigned long *lpi_map;
unsigned long flags;
@@ -1151,6 +1199,16 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
int nr_ites;
int sz;
+ baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
+
+ /* Don't allow 'dev_id' that exceeds single, flat table limit */
+ if (baser) {
+ if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
+ GITS_BASER_ENTRY_SIZE(baser->val)))
+ return NULL;
+ } else if (ilog2(dev_id) >= its->device_ids)
+ return NULL;
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
/*
* At least one bit of EventID is being used, hence a minimum
@@ -1317,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ const struct cpumask *cpu_mask = cpu_online_mask;
+
+ /* get the cpu_mask of local node */
+ if (its_dev->its->numa_node >= 0)
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
- its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
/* Map the GIC IRQ and event to the device */
its_send_mapvi(its_dev, d->hwirq, event);
@@ -1409,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
}
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+{
+ struct its_node *its = data;
+
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -1418,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_cavium_22375,
},
#endif
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
+ {
+ .desc = "ITS: Cavium erratum 23144",
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
+ .mask = 0xffff0fff,
+ .init = its_enable_quirk_cavium_23144,
+ },
+#endif
{
}
};
@@ -1480,6 +1558,7 @@ static int __init its_probe(struct device_node *node,
its->base = its_base;
its->phys_base = res.start;
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->numa_node = of_node_to_nid(node);
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
if (!its->cmd_base) {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 390e0ece2..2c5ba0e70 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -15,6 +15,8 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define pr_fmt(fmt) "GICv3: " fmt
+
#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpu_pm.h>
@@ -28,7 +30,9 @@
#include <linux/slab.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic-common.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irqchip/irq-partition-percpu.h>
#include <asm/cputype.h>
#include <asm/exception.h>
@@ -44,6 +48,7 @@ struct redist_region {
};
struct gic_chip_data {
+ struct fwnode_handle *fwnode;
void __iomem *dist_base;
struct redist_region *redist_regions;
struct rdists rdists;
@@ -51,11 +56,14 @@ struct gic_chip_data {
u64 redist_stride;
u32 nr_redist_regions;
unsigned int irq_nr;
+ struct partition_desc *ppi_descs[16];
};
static struct gic_chip_data gic_data __read_mostly;
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
+static struct gic_kvm_info gic_v3_kvm_info;
+
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
@@ -147,7 +155,7 @@ static void gic_enable_redist(bool enable)
while (count--) {
val = readl_relaxed(rbase + GICR_WAKER);
- if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+ if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
cpu_relax();
udelay(1);
@@ -831,10 +839,62 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
}
+static int gic_irq_domain_select(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ /* Not for us */
+ if (fwspec->fwnode != d->fwnode)
+ return 0;
+
+ /* If this is not DT, then we have a single domain */
+ if (!is_of_node(fwspec->fwnode))
+ return 1;
+
+ /*
+ * If this is a PPI and we have a 4th (non-null) parameter,
+ * then we need to match the partition domain.
+ */
+ if (fwspec->param_count >= 4 &&
+ fwspec->param[0] == 1 && fwspec->param[3] != 0)
+ return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
+
+ return d == gic_data.domain;
+}
+
static const struct irq_domain_ops gic_irq_domain_ops = {
.translate = gic_irq_domain_translate,
.alloc = gic_irq_domain_alloc,
.free = gic_irq_domain_free,
+ .select = gic_irq_domain_select,
+};
+
+static int partition_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct device_node *np;
+ int ret;
+
+ np = of_find_node_by_phandle(fwspec->param[3]);
+ if (WARN_ON(!np))
+ return -EINVAL;
+
+ ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
+ of_node_to_fwnode(np));
+ if (ret < 0)
+ return ret;
+
+ *hwirq = ret;
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+
+ return 0;
+}
+
+static const struct irq_domain_ops partition_domain_ops = {
+ .translate = partition_domain_translate,
+ .select = gic_irq_domain_select,
};
static void gicv3_enable_quirks(void)
@@ -862,6 +922,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
if (static_key_true(&supports_deactivate))
pr_info("GIC: Using split EOI/Deactivate mode\n");
+ gic_data.fwnode = handle;
gic_data.dist_base = dist_base;
gic_data.redist_regions = rdist_regs;
gic_data.nr_redist_regions = nr_redist_regions;
@@ -920,6 +981,143 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
return 0;
}
+static int get_cpu_number(struct device_node *dn)
+{
+ const __be32 *cell;
+ u64 hwid;
+ int i;
+
+ cell = of_get_property(dn, "reg", NULL);
+ if (!cell)
+ return -1;
+
+ hwid = of_read_number(cell, of_n_addr_cells(dn));
+
+ /*
+ * Non affinity bits must be set to 0 in the DT
+ */
+ if (hwid & ~MPIDR_HWID_BITMASK)
+ return -1;
+
+ for (i = 0; i < num_possible_cpus(); i++)
+ if (cpu_logical_map(i) == hwid)
+ return i;
+
+ return -1;
+}
+
+/* Create all possible partitions at boot time */
+static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
+{
+ struct device_node *parts_node, *child_part;
+ int part_idx = 0, i;
+ int nr_parts;
+ struct partition_affinity *parts;
+
+ parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+ if (!parts_node)
+ return;
+
+ nr_parts = of_get_child_count(parts_node);
+
+ if (!nr_parts)
+ return;
+
+ parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
+ if (WARN_ON(!parts))
+ return;
+
+ for_each_child_of_node(parts_node, child_part) {
+ struct partition_affinity *part;
+ int n;
+
+ part = &parts[part_idx];
+
+ part->partition_id = of_node_to_fwnode(child_part);
+
+ pr_info("GIC: PPI partition %s[%d] { ",
+ child_part->name, part_idx);
+
+ n = of_property_count_elems_of_size(child_part, "affinity",
+ sizeof(u32));
+ WARN_ON(n <= 0);
+
+ for (i = 0; i < n; i++) {
+ int err, cpu;
+ u32 cpu_phandle;
+ struct device_node *cpu_node;
+
+ err = of_property_read_u32_index(child_part, "affinity",
+ i, &cpu_phandle);
+ if (WARN_ON(err))
+ continue;
+
+ cpu_node = of_find_node_by_phandle(cpu_phandle);
+ if (WARN_ON(!cpu_node))
+ continue;
+
+ cpu = get_cpu_number(cpu_node);
+ if (WARN_ON(cpu == -1))
+ continue;
+
+ pr_cont("%s[%d] ", cpu_node->full_name, cpu);
+
+ cpumask_set_cpu(cpu, &part->mask);
+ }
+
+ pr_cont("}\n");
+ part_idx++;
+ }
+
+ for (i = 0; i < 16; i++) {
+ unsigned int irq;
+ struct partition_desc *desc;
+ struct irq_fwspec ppi_fwspec = {
+ .fwnode = gic_data.fwnode,
+ .param_count = 3,
+ .param = {
+ [0] = 1,
+ [1] = i,
+ [2] = IRQ_TYPE_NONE,
+ },
+ };
+
+ irq = irq_create_fwspec_mapping(&ppi_fwspec);
+ if (WARN_ON(!irq))
+ continue;
+ desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
+ irq, &partition_domain_ops);
+ if (WARN_ON(!desc))
+ continue;
+
+ gic_data.ppi_descs[i] = desc;
+ }
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+ int ret;
+ struct resource r;
+ u32 gicv_idx;
+
+ gic_v3_kvm_info.type = GIC_V3;
+
+ gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+ if (!gic_v3_kvm_info.maint_irq)
+ return;
+
+ if (of_property_read_u32(node, "#redistributor-regions",
+ &gicv_idx))
+ gicv_idx = 1;
+
+ gicv_idx += 3; /* Also skip GICD, GICC, GICH */
+ ret = of_address_to_resource(node, gicv_idx, &r);
+ if (!ret)
+ gic_v3_kvm_info.vcpu = r;
+
+ gic_set_kvm_info(&gic_v3_kvm_info);
+}
+
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
@@ -971,8 +1169,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
redist_stride, &node->fwnode);
- if (!err)
- return 0;
+ if (err)
+ goto out_unmap_rdist;
+
+ gic_populate_ppi_partitions(node);
+ gic_of_setup_kvm_info(node);
+ return 0;
out_unmap_rdist:
for (i = 0; i < nr_redist_regions; i++)
@@ -987,19 +1189,25 @@ out_unmap_dist:
IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
#ifdef CONFIG_ACPI
-static void __iomem *dist_base;
-static struct redist_region *redist_regs __initdata;
-static u32 nr_redist_regions __initdata;
-static bool single_redist;
+static struct
+{
+ void __iomem *dist_base;
+ struct redist_region *redist_regs;
+ u32 nr_redist_regions;
+ bool single_redist;
+ u32 maint_irq;
+ int maint_irq_mode;
+ phys_addr_t vcpu_base;
+} acpi_data __initdata;
static void __init
gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
{
static int count = 0;
- redist_regs[count].phys_base = phys_base;
- redist_regs[count].redist_base = redist_base;
- redist_regs[count].single_redist = single_redist;
+ acpi_data.redist_regs[count].phys_base = phys_base;
+ acpi_data.redist_regs[count].redist_base = redist_base;
+ acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
count++;
}
@@ -1027,7 +1235,7 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
{
struct acpi_madt_generic_interrupt *gicc =
(struct acpi_madt_generic_interrupt *)header;
- u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
void __iomem *redist_base;
@@ -1044,7 +1252,7 @@ static int __init gic_acpi_collect_gicr_base(void)
acpi_tbl_entry_handler redist_parser;
enum acpi_madt_type type;
- if (single_redist) {
+ if (acpi_data.single_redist) {
type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
redist_parser = gic_acpi_parse_madt_gicc;
} else {
@@ -1095,14 +1303,14 @@ static int __init gic_acpi_count_gicr_regions(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
gic_acpi_match_gicr, 0);
if (count > 0) {
- single_redist = false;
+ acpi_data.single_redist = false;
return count;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_match_gicc, 0);
if (count > 0)
- single_redist = true;
+ acpi_data.single_redist = true;
return count;
}
@@ -1122,36 +1330,117 @@ static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
if (count <= 0)
return false;
- nr_redist_regions = count;
+ acpi_data.nr_redist_regions = count;
return true;
}
+static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_interrupt *gicc =
+ (struct acpi_madt_generic_interrupt *)header;
+ int maint_irq_mode;
+ static int first_madt = true;
+
+ /* Skip unusable CPUs */
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
+ return 0;
+
+ maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+ ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+
+ if (first_madt) {
+ first_madt = false;
+
+ acpi_data.maint_irq = gicc->vgic_interrupt;
+ acpi_data.maint_irq_mode = maint_irq_mode;
+ acpi_data.vcpu_base = gicc->gicv_base_address;
+
+ return 0;
+ }
+
+ /*
+ * The maintenance interrupt and GICV should be the same for every CPU
+ */
+ if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
+ (acpi_data.maint_irq_mode != maint_irq_mode) ||
+ (acpi_data.vcpu_base != gicc->gicv_base_address))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool __init gic_acpi_collect_virt_info(void)
+{
+ int count;
+
+ count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
+ gic_acpi_parse_virt_madt_gicc, 0);
+
+ return (count > 0);
+}
+
#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
+#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
+#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
+
+static void __init gic_acpi_setup_kvm_info(void)
+{
+ int irq;
+
+ if (!gic_acpi_collect_virt_info()) {
+ pr_warn("Unable to get hardware information used for virtualization\n");
+ return;
+ }
+
+ gic_v3_kvm_info.type = GIC_V3;
+
+ irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
+ acpi_data.maint_irq_mode,
+ ACPI_ACTIVE_HIGH);
+ if (irq <= 0)
+ return;
+
+ gic_v3_kvm_info.maint_irq = irq;
+
+ if (acpi_data.vcpu_base) {
+ struct resource *vcpu = &gic_v3_kvm_info.vcpu;
+
+ vcpu->flags = IORESOURCE_MEM;
+ vcpu->start = acpi_data.vcpu_base;
+ vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
+ }
+
+ gic_set_kvm_info(&gic_v3_kvm_info);
+}
static int __init
gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
{
struct acpi_madt_generic_distributor *dist;
struct fwnode_handle *domain_handle;
+ size_t size;
int i, err;
/* Get distributor base address */
dist = (struct acpi_madt_generic_distributor *)header;
- dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE);
- if (!dist_base) {
+ acpi_data.dist_base = ioremap(dist->base_address,
+ ACPI_GICV3_DIST_MEM_SIZE);
+ if (!acpi_data.dist_base) {
pr_err("Unable to map GICD registers\n");
return -ENOMEM;
}
- err = gic_validate_dist_version(dist_base);
+ err = gic_validate_dist_version(acpi_data.dist_base);
if (err) {
- pr_err("No distributor detected at @%p, giving up", dist_base);
+ pr_err("No distributor detected at @%p, giving up",
+ acpi_data.dist_base);
goto out_dist_unmap;
}
- redist_regs = kzalloc(sizeof(*redist_regs) * nr_redist_regions,
- GFP_KERNEL);
- if (!redist_regs) {
+ size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
+ acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
+ if (!acpi_data.redist_regs) {
err = -ENOMEM;
goto out_dist_unmap;
}
@@ -1160,29 +1449,31 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
if (err)
goto out_redist_unmap;
- domain_handle = irq_domain_alloc_fwnode(dist_base);
+ domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
if (!domain_handle) {
err = -ENOMEM;
goto out_redist_unmap;
}
- err = gic_init_bases(dist_base, redist_regs, nr_redist_regions, 0,
- domain_handle);
+ err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
+ acpi_data.nr_redist_regions, 0, domain_handle);
if (err)
goto out_fwhandle_free;
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
+ gic_acpi_setup_kvm_info();
+
return 0;
out_fwhandle_free:
irq_domain_free_fwnode(domain_handle);
out_redist_unmap:
- for (i = 0; i < nr_redist_regions; i++)
- if (redist_regs[i].redist_base)
- iounmap(redist_regs[i].redist_base);
- kfree(redist_regs);
+ for (i = 0; i < acpi_data.nr_redist_regions; i++)
+ if (acpi_data.redist_regs[i].redist_base)
+ iounmap(acpi_data.redist_regs[i].redist_base);
+ kfree(acpi_data.redist_regs);
out_dist_unmap:
- iounmap(dist_base);
+ iounmap(acpi_data.dist_base);
return err;
}
IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 5c4da5808..fbc4ae2af 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -55,7 +55,7 @@
static void gic_check_cpu_features(void)
{
- WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
+ WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
TAINT_CPU_OUT_OF_SPEC,
"GICv3 system registers enabled, broken firmware!\n");
}
@@ -72,6 +72,9 @@ struct gic_chip_data {
struct irq_chip chip;
union gic_base dist_base;
union gic_base cpu_base;
+ void __iomem *raw_dist_base;
+ void __iomem *raw_cpu_base;
+ u32 percpu_offset;
#ifdef CONFIG_CPU_PM
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
@@ -102,6 +105,8 @@ static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
+static struct gic_kvm_info gic_v2_kvm_info;
+
#ifdef CONFIG_GIC_NON_BANKED
static void __iomem *gic_get_percpu_base(union gic_base *base)
{
@@ -399,20 +404,6 @@ static struct irq_chip gic_chip = {
IRQCHIP_MASK_ON_SUSPEND,
};
-static struct irq_chip gic_eoimode1_chip = {
- .name = "GICv2",
- .irq_mask = gic_eoimode1_mask_irq,
- .irq_unmask = gic_unmask_irq,
- .irq_eoi = gic_eoimode1_eoi_irq,
- .irq_set_type = gic_set_type,
- .irq_get_irqchip_state = gic_irq_get_irqchip_state,
- .irq_set_irqchip_state = gic_irq_set_irqchip_state,
- .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
- .flags = IRQCHIP_SET_TYPE_MASKED |
- IRQCHIP_SKIP_SET_WAKE |
- IRQCHIP_MASK_ON_SUSPEND,
-};
-
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
@@ -481,7 +472,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
}
-static void gic_cpu_init(struct gic_chip_data *gic)
+static int gic_cpu_init(struct gic_chip_data *gic)
{
void __iomem *dist_base = gic_data_dist_base(gic);
void __iomem *base = gic_data_cpu_base(gic);
@@ -497,7 +488,10 @@ static void gic_cpu_init(struct gic_chip_data *gic)
/*
* Get what the GIC says our CPU mask is.
*/
- BUG_ON(cpu >= NR_GIC_CPU_IF);
+ if (WARN_ON(cpu >= NR_GIC_CPU_IF))
+ return -EINVAL;
+
+ gic_check_cpu_features();
cpu_mask = gic_get_cpumask(gic);
gic_cpu_map[cpu] = cpu_mask;
@@ -514,6 +508,8 @@ static void gic_cpu_init(struct gic_chip_data *gic)
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
gic_cpu_if_up(gic);
+
+ return 0;
}
int gic_cpu_if_down(unsigned int gic_nr)
@@ -539,34 +535,35 @@ int gic_cpu_if_down(unsigned int gic_nr)
* this function, no interrupts will be delivered by the GIC, and another
* platform-specific wakeup source must be enabled.
*/
-static void gic_dist_save(unsigned int gic_nr)
+static void gic_dist_save(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
void __iomem *dist_base;
int i;
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ if (WARN_ON(!gic))
+ return;
- gic_irqs = gic_data[gic_nr].gic_irqs;
- dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ gic_irqs = gic->gic_irqs;
+ dist_base = gic_data_dist_base(gic);
if (!dist_base)
return;
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
- gic_data[gic_nr].saved_spi_conf[i] =
+ gic->saved_spi_conf[i] =
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
- gic_data[gic_nr].saved_spi_target[i] =
+ gic->saved_spi_target[i] =
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
- gic_data[gic_nr].saved_spi_enable[i] =
+ gic->saved_spi_enable[i] =
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
- gic_data[gic_nr].saved_spi_active[i] =
+ gic->saved_spi_active[i] =
readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
}
@@ -577,16 +574,17 @@ static void gic_dist_save(unsigned int gic_nr)
* handled normally, but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
-static void gic_dist_restore(unsigned int gic_nr)
+static void gic_dist_restore(struct gic_chip_data *gic)
{
unsigned int gic_irqs;
unsigned int i;
void __iomem *dist_base;
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ if (WARN_ON(!gic))
+ return;
- gic_irqs = gic_data[gic_nr].gic_irqs;
- dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+ gic_irqs = gic->gic_irqs;
+ dist_base = gic_data_dist_base(gic);
if (!dist_base)
return;
@@ -594,7 +592,7 @@ static void gic_dist_restore(unsigned int gic_nr)
writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
- writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
+ writel_relaxed(gic->saved_spi_conf[i],
dist_base + GIC_DIST_CONFIG + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
@@ -602,85 +600,87 @@ static void gic_dist_restore(unsigned int gic_nr)
dist_base + GIC_DIST_PRI + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
- writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
+ writel_relaxed(gic->saved_spi_target[i],
dist_base + GIC_DIST_TARGET + i * 4);
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
- writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
+ writel_relaxed(gic->saved_spi_enable[i],
dist_base + GIC_DIST_ENABLE_SET + i * 4);
}
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
- writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
+ writel_relaxed(gic->saved_spi_active[i],
dist_base + GIC_DIST_ACTIVE_SET + i * 4);
}
writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
}
-static void gic_cpu_save(unsigned int gic_nr)
+static void gic_cpu_save(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ if (WARN_ON(!gic))
+ return;
- dist_base = gic_data_dist_base(&gic_data[gic_nr]);
- cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+ dist_base = gic_data_dist_base(gic);
+ cpu_base = gic_data_cpu_base(gic);
if (!dist_base || !cpu_base)
return;
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ ptr = raw_cpu_ptr(gic->saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
+ ptr = raw_cpu_ptr(gic->saved_ppi_active);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ ptr = raw_cpu_ptr(gic->saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
}
-static void gic_cpu_restore(unsigned int gic_nr)
+static void gic_cpu_restore(struct gic_chip_data *gic)
{
int i;
u32 *ptr;
void __iomem *dist_base;
void __iomem *cpu_base;
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ if (WARN_ON(!gic))
+ return;
- dist_base = gic_data_dist_base(&gic_data[gic_nr]);
- cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
+ dist_base = gic_data_dist_base(gic);
+ cpu_base = gic_data_cpu_base(gic);
if (!dist_base || !cpu_base)
return;
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ ptr = raw_cpu_ptr(gic->saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
}
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
+ ptr = raw_cpu_ptr(gic->saved_ppi_active);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
writel_relaxed(GICD_INT_EN_CLR_X32,
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
}
- ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ ptr = raw_cpu_ptr(gic->saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
@@ -689,7 +689,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
dist_base + GIC_DIST_PRI + i * 4);
writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
- gic_cpu_if_up(&gic_data[gic_nr]);
+ gic_cpu_if_up(gic);
}
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
@@ -704,18 +704,18 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
#endif
switch (cmd) {
case CPU_PM_ENTER:
- gic_cpu_save(i);
+ gic_cpu_save(&gic_data[i]);
break;
case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
- gic_cpu_restore(i);
+ gic_cpu_restore(&gic_data[i]);
break;
case CPU_CLUSTER_PM_ENTER:
- gic_dist_save(i);
+ gic_dist_save(&gic_data[i]);
break;
case CPU_CLUSTER_PM_ENTER_FAILED:
case CPU_CLUSTER_PM_EXIT:
- gic_dist_restore(i);
+ gic_dist_restore(&gic_data[i]);
break;
}
}
@@ -727,26 +727,39 @@ static struct notifier_block gic_notifier_block = {
.notifier_call = gic_notifier,
};
-static void __init gic_pm_init(struct gic_chip_data *gic)
+static int __init gic_pm_init(struct gic_chip_data *gic)
{
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
- BUG_ON(!gic->saved_ppi_enable);
+ if (WARN_ON(!gic->saved_ppi_enable))
+ return -ENOMEM;
gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
sizeof(u32));
- BUG_ON(!gic->saved_ppi_active);
+ if (WARN_ON(!gic->saved_ppi_active))
+ goto free_ppi_enable;
gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
sizeof(u32));
- BUG_ON(!gic->saved_ppi_conf);
+ if (WARN_ON(!gic->saved_ppi_conf))
+ goto free_ppi_active;
if (gic == &gic_data[0])
cpu_pm_register_notifier(&gic_notifier_block);
+
+ return 0;
+
+free_ppi_active:
+ free_percpu(gic->saved_ppi_active);
+free_ppi_enable:
+ free_percpu(gic->saved_ppi_enable);
+
+ return -ENOMEM;
}
#else
-static void __init gic_pm_init(struct gic_chip_data *gic)
+static int __init gic_pm_init(struct gic_chip_data *gic)
{
+ return 0;
}
#endif
@@ -1019,63 +1032,63 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.unmap = gic_irq_domain_unmap,
};
-static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
- void __iomem *dist_base, void __iomem *cpu_base,
- u32 percpu_offset, struct fwnode_handle *handle)
+static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
+ struct fwnode_handle *handle)
{
irq_hw_number_t hwirq_base;
- struct gic_chip_data *gic;
- int gic_irqs, irq_base, i;
-
- BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
+ int gic_irqs, irq_base, i, ret;
- gic_check_cpu_features();
-
- gic = &gic_data[gic_nr];
+ if (WARN_ON(!gic || gic->domain))
+ return -EINVAL;
/* Initialize irq_chip */
- if (static_key_true(&supports_deactivate) && gic_nr == 0) {
- gic->chip = gic_eoimode1_chip;
+ gic->chip = gic_chip;
+
+ if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
+ gic->chip.irq_mask = gic_eoimode1_mask_irq;
+ gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
+ gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
+ gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
} else {
- gic->chip = gic_chip;
- gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
+ gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
+ (int)(gic - &gic_data[0]));
}
#ifdef CONFIG_SMP
- if (gic_nr == 0)
+ if (gic == &gic_data[0])
gic->chip.irq_set_affinity = gic_set_affinity;
#endif
-#ifdef CONFIG_GIC_NON_BANKED
- if (percpu_offset) { /* Frankein-GIC without banked registers... */
+ if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+ /* Frankein-GIC without banked registers... */
unsigned int cpu;
gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
if (WARN_ON(!gic->dist_base.percpu_base ||
!gic->cpu_base.percpu_base)) {
- free_percpu(gic->dist_base.percpu_base);
- free_percpu(gic->cpu_base.percpu_base);
- return;
+ ret = -ENOMEM;
+ goto error;
}
for_each_possible_cpu(cpu) {
u32 mpidr = cpu_logical_map(cpu);
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- unsigned long offset = percpu_offset * core_id;
- *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
- *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
+ unsigned long offset = gic->percpu_offset * core_id;
+ *per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
+ gic->raw_dist_base + offset;
+ *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
+ gic->raw_cpu_base + offset;
}
gic_set_base_accessor(gic, gic_get_percpu_base);
- } else
-#endif
- { /* Normal, sane GIC... */
- WARN(percpu_offset,
+ } else {
+ /* Normal, sane GIC... */
+ WARN(gic->percpu_offset,
"GIC_NON_BANKED not enabled, ignoring %08x offset!",
- percpu_offset);
- gic->dist_base.common_base = dist_base;
- gic->cpu_base.common_base = cpu_base;
+ gic->percpu_offset);
+ gic->dist_base.common_base = gic->raw_dist_base;
+ gic->cpu_base.common_base = gic->raw_cpu_base;
gic_set_base_accessor(gic, gic_get_common_base);
}
@@ -1098,7 +1111,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
* For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too.
*/
- if (gic_nr == 0 && (irq_start & 31) > 0) {
+ if (gic == &gic_data[0] && (irq_start & 31) > 0) {
hwirq_base = 16;
if (irq_start != -1)
irq_start = (irq_start & ~31) + 16;
@@ -1110,7 +1123,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
numa_node_id());
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
irq_start);
irq_base = irq_start;
@@ -1120,10 +1133,12 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
hwirq_base, &gic_irq_domain_ops, gic);
}
- if (WARN_ON(!gic->domain))
- return;
+ if (WARN_ON(!gic->domain)) {
+ ret = -ENODEV;
+ goto error;
+ }
- if (gic_nr == 0) {
+ if (gic == &gic_data[0]) {
/*
* Initialize the CPU interface map to all CPUs.
* It will be refined as each CPU probes its ID.
@@ -1141,19 +1156,57 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
}
gic_dist_init(gic);
- gic_cpu_init(gic);
- gic_pm_init(gic);
+ ret = gic_cpu_init(gic);
+ if (ret)
+ goto error;
+
+ ret = gic_pm_init(gic);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
+ free_percpu(gic->dist_base.percpu_base);
+ free_percpu(gic->cpu_base.percpu_base);
+ }
+
+ kfree(gic->chip.name);
+
+ return ret;
}
void __init gic_init(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base)
{
+ struct gic_chip_data *gic;
+
+ if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
+ return;
+
/*
* Non-DT/ACPI systems won't run a hypervisor, so let's not
* bother with these...
*/
static_key_slow_dec(&supports_deactivate);
- __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL);
+
+ gic = &gic_data[gic_nr];
+ gic->raw_dist_base = dist_base;
+ gic->raw_cpu_base = cpu_base;
+
+ __gic_init_bases(gic, irq_start, NULL);
+}
+
+static void gic_teardown(struct gic_chip_data *gic)
+{
+ if (WARN_ON(!gic))
+ return;
+
+ if (gic->raw_dist_base)
+ iounmap(gic->raw_dist_base);
+ if (gic->raw_cpu_base)
+ iounmap(gic->raw_cpu_base);
}
#ifdef CONFIG_OF
@@ -1197,37 +1250,88 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true;
}
+static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
+{
+ if (!gic || !node)
+ return -EINVAL;
+
+ gic->raw_dist_base = of_iomap(node, 0);
+ if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
+ goto error;
+
+ gic->raw_cpu_base = of_iomap(node, 1);
+ if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
+ goto error;
+
+ if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
+ gic->percpu_offset = 0;
+
+ return 0;
+
+error:
+ gic_teardown(gic);
+
+ return -ENOMEM;
+}
+
+static void __init gic_of_setup_kvm_info(struct device_node *node)
+{
+ int ret;
+ struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
+ struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
+
+ gic_v2_kvm_info.type = GIC_V2;
+
+ gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
+ if (!gic_v2_kvm_info.maint_irq)
+ return;
+
+ ret = of_address_to_resource(node, 2, vctrl_res);
+ if (ret)
+ return;
+
+ ret = of_address_to_resource(node, 3, vcpu_res);
+ if (ret)
+ return;
+
+ gic_set_kvm_info(&gic_v2_kvm_info);
+}
+
int __init
gic_of_init(struct device_node *node, struct device_node *parent)
{
- void __iomem *cpu_base;
- void __iomem *dist_base;
- u32 percpu_offset;
- int irq;
+ struct gic_chip_data *gic;
+ int irq, ret;
if (WARN_ON(!node))
return -ENODEV;
- dist_base = of_iomap(node, 0);
- WARN(!dist_base, "unable to map gic dist registers\n");
+ if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
+ return -EINVAL;
+
+ gic = &gic_data[gic_cnt];
- cpu_base = of_iomap(node, 1);
- WARN(!cpu_base, "unable to map gic cpu registers\n");
+ ret = gic_of_setup(gic, node);
+ if (ret)
+ return ret;
/*
* Disable split EOI/Deactivate if either HYP is not available
* or the CPU interface is too small.
*/
- if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base))
+ if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
static_key_slow_dec(&supports_deactivate);
- if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
- percpu_offset = 0;
+ ret = __gic_init_bases(gic, -1, &node->fwnode);
+ if (ret) {
+ gic_teardown(gic);
+ return ret;
+ }
- __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
- &node->fwnode);
- if (!gic_cnt)
+ if (!gic_cnt) {
gic_init_physaddr(node);
+ gic_of_setup_kvm_info(node);
+ }
if (parent) {
irq = irq_of_parse_and_map(node, 0);
@@ -1253,7 +1357,14 @@ IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
#endif
#ifdef CONFIG_ACPI
-static phys_addr_t cpu_phy_base __initdata;
+static struct
+{
+ phys_addr_t cpu_phys_base;
+ u32 maint_irq;
+ int maint_irq_mode;
+ phys_addr_t vctrl_base;
+ phys_addr_t vcpu_base;
+} acpi_data __initdata;
static int __init
gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
@@ -1273,10 +1384,16 @@ gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header,
* All CPU interface addresses have to be the same.
*/
gic_cpu_base = processor->base_address;
- if (cpu_base_assigned && gic_cpu_base != cpu_phy_base)
+ if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
return -EINVAL;
- cpu_phy_base = gic_cpu_base;
+ acpi_data.cpu_phys_base = gic_cpu_base;
+ acpi_data.maint_irq = processor->vgic_interrupt;
+ acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
+ ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
+ acpi_data.vctrl_base = processor->gich_base_address;
+ acpi_data.vcpu_base = processor->gicv_base_address;
+
cpu_base_assigned = 1;
return 0;
}
@@ -1307,14 +1424,49 @@ static bool __init gic_validate_dist(struct acpi_subtable_header *header,
#define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K)
#define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K)
+#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
+#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
+
+static void __init gic_acpi_setup_kvm_info(void)
+{
+ int irq;
+ struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
+ struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
+
+ gic_v2_kvm_info.type = GIC_V2;
+
+ if (!acpi_data.vctrl_base)
+ return;
+
+ vctrl_res->flags = IORESOURCE_MEM;
+ vctrl_res->start = acpi_data.vctrl_base;
+ vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
+
+ if (!acpi_data.vcpu_base)
+ return;
+
+ vcpu_res->flags = IORESOURCE_MEM;
+ vcpu_res->start = acpi_data.vcpu_base;
+ vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
+
+ irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
+ acpi_data.maint_irq_mode,
+ ACPI_ACTIVE_HIGH);
+ if (irq <= 0)
+ return;
+
+ gic_v2_kvm_info.maint_irq = irq;
+
+ gic_set_kvm_info(&gic_v2_kvm_info);
+}
static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
const unsigned long end)
{
struct acpi_madt_generic_distributor *dist;
- void __iomem *cpu_base, *dist_base;
struct fwnode_handle *domain_handle;
- int count;
+ struct gic_chip_data *gic = &gic_data[0];
+ int count, ret;
/* Collect CPU base addresses */
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
@@ -1324,17 +1476,18 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
return -EINVAL;
}
- cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
- if (!cpu_base) {
+ gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
+ if (!gic->raw_cpu_base) {
pr_err("Unable to map GICC registers\n");
return -ENOMEM;
}
dist = (struct acpi_madt_generic_distributor *)header;
- dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE);
- if (!dist_base) {
+ gic->raw_dist_base = ioremap(dist->base_address,
+ ACPI_GICV2_DIST_MEM_SIZE);
+ if (!gic->raw_dist_base) {
pr_err("Unable to map GICD registers\n");
- iounmap(cpu_base);
+ gic_teardown(gic);
return -ENOMEM;
}
@@ -1349,21 +1502,28 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
/*
* Initialize GIC instance zero (no multi-GIC support).
*/
- domain_handle = irq_domain_alloc_fwnode(dist_base);
+ domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
if (!domain_handle) {
pr_err("Unable to allocate domain handle\n");
- iounmap(cpu_base);
- iounmap(dist_base);
+ gic_teardown(gic);
return -ENOMEM;
}
- __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle);
+ ret = __gic_init_bases(gic, -1, domain_handle);
+ if (ret) {
+ pr_err("Failed to initialise GIC\n");
+ irq_domain_free_fwnode(domain_handle);
+ gic_teardown(gic);
+ return ret;
+ }
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(NULL, gic_data[0].domain);
+ gic_acpi_setup_kvm_info();
+
return 0;
}
IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9688d2e2a..9e25d8ce0 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -402,7 +402,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
pr_err("failed to allocate IRQ numbers\n");
return -EINVAL;
}
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
new file mode 100644
index 000000000..1034aeb2e
--- /dev/null
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/exception.h>
+
+#define LPC32XX_INTC_MASK 0x00
+#define LPC32XX_INTC_RAW 0x04
+#define LPC32XX_INTC_STAT 0x08
+#define LPC32XX_INTC_POL 0x0C
+#define LPC32XX_INTC_TYPE 0x10
+#define LPC32XX_INTC_FIQ 0x14
+
+#define NR_LPC32XX_IC_IRQS 32
+
+struct lpc32xx_irq_chip {
+ void __iomem *base;
+ struct irq_domain *domain;
+ struct irq_chip chip;
+};
+
+static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
+
+static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg)
+{
+ return readl_relaxed(ic->base + reg);
+}
+
+static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic,
+ u32 reg, u32 val)
+{
+ writel_relaxed(val, ic->base + reg);
+}
+
+static void lpc32xx_irq_mask(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_unmask(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
+}
+
+static void lpc32xx_irq_ack(struct irq_data *d)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq);
+
+ lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask);
+}
+
+static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
+ u32 val, mask = BIT(d->hwirq);
+ bool high, edge;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ edge = true;
+ high = true;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = true;
+ high = false;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge = false;
+ high = true;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge = false;
+ high = false;
+ break;
+ default:
+ pr_info("unsupported irq type %d\n", type);
+ return -EINVAL;
+ }
+
+ irqd_set_trigger_type(d, type);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL);
+ if (high)
+ val |= mask;
+ else
+ val &= ~mask;
+ lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val);
+
+ val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE);
+ if (edge) {
+ val |= mask;
+ irq_set_handler_locked(d, handle_edge_irq);
+ } else {
+ val &= ~mask;
+ irq_set_handler_locked(d, handle_level_irq);
+ }
+ lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val);
+
+ return 0;
+}
+
+static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
+{
+ struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
+ u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+ while (hwirq) {
+ irq = __ffs(hwirq);
+ hwirq &= ~BIT(irq);
+ handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
+ }
+}
+
+static void lpc32xx_sic_handler(struct irq_desc *desc)
+{
+ struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
+
+ chained_irq_enter(chip, desc);
+
+ while (hwirq) {
+ irq = __ffs(hwirq);
+ hwirq &= ~BIT(irq);
+ generic_handle_irq(irq_find_mapping(ic->domain, irq));
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct lpc32xx_irq_chip *ic = id->host_data;
+
+ irq_set_chip_data(virq, ic);
+ irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_noprobe(virq);
+
+ return 0;
+}
+
+static void lpc32xx_irq_domain_unmap(struct irq_domain *id, unsigned int virq)
+{
+ irq_set_chip_and_handler(virq, NULL, NULL);
+}
+
+static const struct irq_domain_ops lpc32xx_irq_domain_ops = {
+ .map = lpc32xx_irq_domain_map,
+ .unmap = lpc32xx_irq_domain_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __init lpc32xx_of_ic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct lpc32xx_irq_chip *irqc;
+ bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
+ const __be32 *reg = of_get_property(node, "reg", NULL);
+ u32 parent_irq, i, addr = reg ? be32_to_cpu(*reg) : 0;
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ irqc->base = of_iomap(node, 0);
+ if (!irqc->base) {
+ pr_err("%s: unable to map registers\n", node->full_name);
+ kfree(irqc);
+ return -EINVAL;
+ }
+
+ irqc->chip.irq_ack = lpc32xx_irq_ack;
+ irqc->chip.irq_mask = lpc32xx_irq_mask;
+ irqc->chip.irq_unmask = lpc32xx_irq_unmask;
+ irqc->chip.irq_set_type = lpc32xx_irq_set_type;
+ if (is_mic)
+ irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
+ else
+ irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
+
+ irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
+ &lpc32xx_irq_domain_ops, irqc);
+ if (!irqc->domain) {
+ pr_err("unable to add irq domain\n");
+ iounmap(irqc->base);
+ kfree(irqc->chip.name);
+ kfree(irqc);
+ return -ENODEV;
+ }
+
+ if (is_mic) {
+ lpc32xx_mic_irqc = irqc;
+ set_handle_irq(lpc32xx_handle_irq);
+ } else {
+ for (i = 0; i < of_irq_count(node); i++) {
+ parent_irq = irq_of_parse_and_map(node, i);
+ if (parent_irq)
+ irq_set_chained_handler_and_data(parent_irq,
+ lpc32xx_sic_handler, irqc);
+ }
+ }
+
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_MASK, 0x00);
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_POL, 0x00);
+ lpc32xx_ic_write(irqc, LPC32XX_INTC_TYPE, 0x00);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(nxp_lpc32xx_mic, "nxp,lpc3220-mic", lpc32xx_of_ic_init);
+IRQCHIP_DECLARE(nxp_lpc32xx_sic, "nxp,lpc3220-sic", lpc32xx_of_ic_init);
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
new file mode 100644
index 000000000..02cca74ca
--- /dev/null
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -0,0 +1,240 @@
+/*
+ * Freescale SCFG MSI(-X) support
+ *
+ * Copyright (C) 2016 Freescale Semiconductor.
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define MSI_MAX_IRQS 32
+#define MSI_IBS_SHIFT 3
+#define MSIR 4
+
+struct ls_scfg_msi {
+ spinlock_t lock;
+ struct platform_device *pdev;
+ struct irq_domain *parent;
+ struct irq_domain *msi_domain;
+ void __iomem *regs;
+ phys_addr_t msiir_addr;
+ int irq;
+ DECLARE_BITMAP(used, MSI_MAX_IRQS);
+};
+
+static struct irq_chip ls_scfg_msi_irq_chip = {
+ .name = "MSI",
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static struct msi_domain_info ls_scfg_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS |
+ MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_PCI_MSIX),
+ .chip = &ls_scfg_msi_irq_chip,
+};
+
+static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
+
+ msg->address_hi = upper_32_bits(msi_data->msiir_addr);
+ msg->address_lo = lower_32_bits(msi_data->msiir_addr);
+ msg->data = data->hwirq << MSI_IBS_SHIFT;
+}
+
+static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip ls_scfg_msi_parent_chip = {
+ .name = "SCFG",
+ .irq_compose_msi_msg = ls_scfg_msi_compose_msg,
+ .irq_set_affinity = ls_scfg_msi_set_affinity,
+};
+
+static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *args)
+{
+ struct ls_scfg_msi *msi_data = domain->host_data;
+ int pos, err = 0;
+
+ WARN_ON(nr_irqs != 1);
+
+ spin_lock(&msi_data->lock);
+ pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
+ if (pos < MSI_MAX_IRQS)
+ __set_bit(pos, msi_data->used);
+ else
+ err = -ENOSPC;
+ spin_unlock(&msi_data->lock);
+
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, pos,
+ &ls_scfg_msi_parent_chip, msi_data,
+ handle_simple_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
+ int pos;
+
+ pos = d->hwirq;
+ if (pos < 0 || pos >= MSI_MAX_IRQS) {
+ pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
+ return;
+ }
+
+ spin_lock(&msi_data->lock);
+ __clear_bit(pos, msi_data->used);
+ spin_unlock(&msi_data->lock);
+}
+
+static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
+ .alloc = ls_scfg_msi_domain_irq_alloc,
+ .free = ls_scfg_msi_domain_irq_free,
+};
+
+static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
+{
+ struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
+ unsigned long val;
+ int pos, virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ val = ioread32be(msi_data->regs + MSIR);
+ for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
+ virq = irq_find_mapping(msi_data->parent, (31 - pos));
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
+{
+ /* Initialize MSI domain parent */
+ msi_data->parent = irq_domain_add_linear(NULL,
+ MSI_MAX_IRQS,
+ &ls_scfg_msi_domain_ops,
+ msi_data);
+ if (!msi_data->parent) {
+ dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ msi_data->msi_domain = pci_msi_create_irq_domain(
+ of_node_to_fwnode(msi_data->pdev->dev.of_node),
+ &ls_scfg_msi_domain_info,
+ msi_data->parent);
+ if (!msi_data->msi_domain) {
+ dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
+ irq_domain_remove(msi_data->parent);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ls_scfg_msi_probe(struct platform_device *pdev)
+{
+ struct ls_scfg_msi *msi_data;
+ struct resource *res;
+ int ret;
+
+ msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(msi_data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize 'regs'\n");
+ return PTR_ERR(msi_data->regs);
+ }
+ msi_data->msiir_addr = res->start;
+
+ msi_data->irq = platform_get_irq(pdev, 0);
+ if (msi_data->irq <= 0) {
+ dev_err(&pdev->dev, "failed to get MSI irq\n");
+ return -ENODEV;
+ }
+
+ msi_data->pdev = pdev;
+ spin_lock_init(&msi_data->lock);
+
+ ret = ls_scfg_msi_domains_init(msi_data);
+ if (ret)
+ return ret;
+
+ irq_set_chained_handler_and_data(msi_data->irq,
+ ls_scfg_msi_irq_handler,
+ msi_data);
+
+ platform_set_drvdata(pdev, msi_data);
+
+ return 0;
+}
+
+static int ls_scfg_msi_remove(struct platform_device *pdev)
+{
+ struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
+
+ irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
+
+ irq_domain_remove(msi_data->msi_domain);
+ irq_domain_remove(msi_data->parent);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id ls_scfg_msi_id[] = {
+ { .compatible = "fsl,1s1021a-msi", },
+ { .compatible = "fsl,1s1043a-msi", },
+ {},
+};
+
+static struct platform_driver ls_scfg_msi_driver = {
+ .driver = {
+ .name = "ls-scfg-msi",
+ .of_match_table = ls_scfg_msi_id,
+ },
+ .probe = ls_scfg_msi_probe,
+ .remove = ls_scfg_msi_remove,
+};
+
+module_platform_driver(ls_scfg_msi_driver);
+
+MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
+MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index d67baa231..03b79b061 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -263,8 +263,8 @@ static int mbigen_device_probe(struct platform_device *pdev)
parent = platform_bus_type.dev_root;
child = of_platform_device_create(np, NULL, parent);
- if (IS_ERR(child))
- return PTR_ERR(child);
+ if (!child)
+ return -ENOMEM;
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 40fb1209d..70ed1d015 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -197,7 +197,7 @@ void gic_write_cpu_compare(cycle_t cnt, int cpu)
local_irq_save(flags);
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
if (mips_cm_is64) {
gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
@@ -246,6 +246,14 @@ void gic_stop_count(void)
#endif
+unsigned gic_read_local_vp_id(void)
+{
+ unsigned long ident;
+
+ ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
+ return ident & GIC_VP_IDENT_VCNUM_MSK;
+}
+
static bool gic_local_irq_is_routable(int intr)
{
u32 vpe_ctl;
@@ -553,7 +561,8 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) {
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(i));
gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
}
spin_unlock_irqrestore(&gic_lock, flags);
@@ -567,7 +576,8 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) {
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(i));
gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
}
spin_unlock_irqrestore(&gic_lock, flags);
@@ -607,7 +617,8 @@ static void __init gic_basic_init(void)
for (i = 0; i < gic_vpes; i++) {
unsigned int j;
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(i));
for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
if (!gic_local_irq_is_routable(j))
continue;
@@ -652,7 +663,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
for (i = 0; i < gic_vpes; i++) {
u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
- gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(i));
switch (intr) {
case GIC_LOCAL_INT_WD:
@@ -706,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
gic_map_to_pin(intr, gic_cpu_pin);
- gic_map_to_vpe(intr, vpe);
+ gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
@@ -947,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
switch (bus_token) {
case DOMAIN_BUS_IPI:
is_ipi = d->bus_token == bus_token;
- return to_of_node(d->fwnode) == node && is_ipi;
+ return (!node || to_of_node(d->fwnode) == node) && is_ipi;
break;
default:
return 0;
@@ -966,7 +978,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
unsigned int cpu_vec, unsigned int irqbase,
struct device_node *node)
{
- unsigned int gicconfig;
+ unsigned int gicconfig, cpu;
unsigned int v[2];
__gic_base_addr = gic_base_addr;
@@ -983,6 +995,14 @@ static void __init __gic_init(unsigned long gic_base_addr,
gic_vpes = gic_vpes + 1;
if (cpu_has_veic) {
+ /* Set EIC mode for all VPEs */
+ for_each_present_cpu(cpu) {
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(cpu));
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
+ GIC_VPE_CTL_EIC_MODE_MSK);
+ }
+
/* Always use vector 1 in EIC mode */
gic_cpu_pin = 0;
timer_cpu_pin = gic_cpu_pin;
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
new file mode 100644
index 000000000..ccd72c2cb
--- /dev/null
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2016 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-partition-percpu.h>
+#include <linux/irqdomain.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+struct partition_desc {
+ int nr_parts;
+ struct partition_affinity *parts;
+ struct irq_domain *domain;
+ struct irq_desc *chained_desc;
+ unsigned long *bitmap;
+ struct irq_domain_ops ops;
+};
+
+static bool partition_check_cpu(struct partition_desc *part,
+ unsigned int cpu, unsigned int hwirq)
+{
+ return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
+}
+
+static void partition_irq_mask(struct irq_data *d)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_mask)
+ chip->irq_mask(data);
+}
+
+static void partition_irq_unmask(struct irq_data *d)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_unmask)
+ chip->irq_unmask(data);
+}
+
+static int partition_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool val)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_set_irqchip_state)
+ return chip->irq_set_irqchip_state(data, which, val);
+
+ return -EINVAL;
+}
+
+static int partition_irq_get_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool *val)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
+ chip->irq_get_irqchip_state)
+ return chip->irq_get_irqchip_state(data, which, val);
+
+ return -EINVAL;
+}
+
+static int partition_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ if (chip->irq_set_type)
+ return chip->irq_set_type(data, type);
+
+ return -EINVAL;
+}
+
+static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct partition_desc *part = irq_data_get_irq_chip_data(d);
+ struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
+ struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
+
+ seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+}
+
+static struct irq_chip partition_irq_chip = {
+ .irq_mask = partition_irq_mask,
+ .irq_unmask = partition_irq_unmask,
+ .irq_set_type = partition_irq_set_type,
+ .irq_get_irqchip_state = partition_irq_get_irqchip_state,
+ .irq_set_irqchip_state = partition_irq_set_irqchip_state,
+ .irq_print_chip = partition_irq_print_chip,
+};
+
+static void partition_handle_irq(struct irq_desc *desc)
+{
+ struct partition_desc *part = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ int cpu = smp_processor_id();
+ int hwirq;
+
+ chained_irq_enter(chip, desc);
+
+ for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
+ if (partition_check_cpu(part, cpu, hwirq))
+ break;
+ }
+
+ if (unlikely(hwirq == part->nr_parts)) {
+ handle_bad_irq(desc);
+ } else {
+ unsigned int irq;
+ irq = irq_find_mapping(part->domain, hwirq);
+ generic_handle_irq(irq);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ int ret;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ struct irq_fwspec *fwspec = arg;
+ struct partition_desc *part;
+
+ BUG_ON(nr_irqs != 1);
+ ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ part = domain->host_data;
+
+ set_bit(hwirq, part->bitmap);
+ irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
+ partition_handle_irq, part);
+ irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
+ irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
+ handle_percpu_devid_irq, NULL, NULL);
+ irq_set_status_flags(virq, IRQ_NOAUTOEN);
+
+ return 0;
+}
+
+static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d;
+
+ BUG_ON(nr_irqs != 1);
+
+ d = irq_domain_get_irq_data(domain, virq);
+ irq_set_handler(virq, NULL);
+ irq_domain_reset_irq_data(d);
+}
+
+int partition_translate_id(struct partition_desc *desc, void *partition_id)
+{
+ struct partition_affinity *part = NULL;
+ int i;
+
+ for (i = 0; i < desc->nr_parts; i++) {
+ if (desc->parts[i].partition_id == partition_id) {
+ part = &desc->parts[i];
+ break;
+ }
+ }
+
+ if (WARN_ON(!part)) {
+ pr_err("Failed to find partition\n");
+ return -EINVAL;
+ }
+
+ return i;
+}
+
+struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
+ struct partition_affinity *parts,
+ int nr_parts,
+ int chained_irq,
+ const struct irq_domain_ops *ops)
+{
+ struct partition_desc *desc;
+ struct irq_domain *d;
+
+ BUG_ON(!ops->select || !ops->translate);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ desc->ops = *ops;
+ desc->ops.free = partition_domain_free;
+ desc->ops.alloc = partition_domain_alloc;
+
+ d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
+ if (!d)
+ goto out;
+ desc->domain = d;
+
+ desc->bitmap = kzalloc(sizeof(long) * BITS_TO_LONGS(nr_parts),
+ GFP_KERNEL);
+ if (WARN_ON(!desc->bitmap))
+ goto out;
+
+ desc->chained_desc = irq_to_desc(chained_irq);
+ desc->nr_parts = nr_parts;
+ desc->parts = parts;
+
+ return desc;
+out:
+ if (d)
+ irq_domain_remove(d);
+ kfree(desc);
+
+ return NULL;
+}
+
+struct irq_domain *partition_get_domain(struct partition_desc *dsc)
+{
+ if (dsc)
+ return dsc->domain;
+
+ return NULL;
+}
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
index e7155db01..73addb4b6 100644
--- a/drivers/irqchip/irq-pic32-evic.c
+++ b/drivers/irqchip/irq-pic32-evic.c
@@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data,
/* set polarity for external interrupts only */
for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
if (priv->ext_irqs[i] == data->hwirq) {
- ret = pic32_set_ext_polarity(i + 1, flow_type);
+ ret = pic32_set_ext_polarity(i, flow_type);
if (ret)
return ret;
}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 50be9639e..e902f081e 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -235,7 +235,7 @@ static int tegra_ictlr_domain_translate(struct irq_domain *d,
return -EINVAL;
*hwirq = fwspec->param[1];
- *type = fwspec->param[2];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
return 0;
}
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 598ab3f0e..37dd4645b 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -227,4 +227,5 @@ int __init fpga_irq_of_init(struct device_node *node,
}
IRQCHIP_DECLARE(arm_fpga, "arm,versatile-fpga-irq", fpga_irq_of_init);
IRQCHIP_DECLARE(arm_fpga_sic, "arm,versatile-sic", fpga_irq_of_init);
+IRQCHIP_DECLARE(ox810se_rps, "oxsemi,ox810se-rps-irq", fpga_irq_of_init);
#endif
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 1ccd2abed..1518ba31a 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -232,7 +232,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
nr_irqs += shirq_blocks[i]->nr_irqs;
virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(virq_base)) {
+ if (virq_base < 0) {
pr_err("%s: irq desc alloc failed\n", __func__);
goto err_unmap;
}
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index d7c286656..1a1d99704 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -1147,8 +1147,6 @@ static byte test_c_ind_mask_bit(PLCI *plci, word b)
static void dump_c_ind_mask(PLCI *plci)
{
- static char hex_digit_table[0x10] =
- {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
word i, j, k;
dword d;
char *p;
@@ -1165,7 +1163,7 @@ static void dump_c_ind_mask(PLCI *plci)
d = plci->c_ind_mask_table[i + j];
for (k = 0; k < 8; k++)
{
- *(--p) = hex_digit_table[d & 0xf];
+ *(--p) = hex_asc_lo(d);
d >>= 4;
}
}
@@ -10507,7 +10505,6 @@ static void mixer_set_bchannel_id(PLCI *plci, byte *chi)
static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
{
- static char hex_digit_table[0x10] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
word n, i, j;
char *p;
char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4];
@@ -10690,13 +10687,13 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
n = li_total_channels;
if (n > MIXER_MAX_DUMP_CHANNELS)
n = MIXER_MAX_DUMP_CHANNELS;
+
p = hex_line;
for (j = 0; j < n; j++)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
- *(p++) = hex_digit_table[li_config_table[j].curchnl >> 4];
- *(p++) = hex_digit_table[li_config_table[j].curchnl & 0xf];
+ p = hex_byte_pack(p, li_config_table[j].curchnl);
}
*p = '\0';
dbug(1, dprintf("[%06lx] CURRENT %s",
@@ -10706,8 +10703,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
- *(p++) = hex_digit_table[li_config_table[j].channel >> 4];
- *(p++) = hex_digit_table[li_config_table[j].channel & 0xf];
+ p = hex_byte_pack(p, li_config_table[j].channel);
}
*p = '\0';
dbug(1, dprintf("[%06lx] CHANNEL %s",
@@ -10717,8 +10713,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
- *(p++) = hex_digit_table[li_config_table[j].chflags >> 4];
- *(p++) = hex_digit_table[li_config_table[j].chflags & 0xf];
+ p = hex_byte_pack(p, li_config_table[j].chflags);
}
*p = '\0';
dbug(1, dprintf("[%06lx] CHFLAG %s",
@@ -10730,8 +10725,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
- *(p++) = hex_digit_table[li_config_table[i].flag_table[j] >> 4];
- *(p++) = hex_digit_table[li_config_table[i].flag_table[j] & 0xf];
+ p = hex_byte_pack(p, li_config_table[i].flag_table[j]);
}
*p = '\0';
dbug(1, dprintf("[%06lx] FLAG[%02x]%s",
@@ -10744,8 +10738,7 @@ static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
{
if ((j & 0x7) == 0)
*(p++) = ' ';
- *(p++) = hex_digit_table[li_config_table[i].coef_table[j] >> 4];
- *(p++) = hex_digit_table[li_config_table[i].coef_table[j] & 0xf];
+ p = hex_byte_pack(p, li_config_table[i].coef_table[j]);
}
*p = '\0';
dbug(1, dprintf("[%06lx] COEF[%02x]%s",
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index a0efb4cef..5609deee7 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -127,7 +127,7 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
if (lp->in_idx >= MAX_SKB_BUFFERS)
lp->in_idx = 0; /* wrap around */
lp->sk_count++; /* adjust counter */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* If we just used up the very last entry in the
* TX ring on this device, tell the queueing
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index aa5dd5668..c151c6daa 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1153,7 +1153,7 @@ static void isdn_net_tx_timeout(struct net_device *ndev)
* ever called --KG
*/
}
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_wake_queue(ndev);
}
@@ -1291,7 +1291,7 @@ isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
} else {
/* Device is connected to an ISDN channel */
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
if (!lp->dialstate) {
/* ISDN connection is established, try sending */
int ret;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 947d5c978..63eaa0a9f 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1043,17 +1043,13 @@ isdn_tty_change_speed(modem_info *info)
if (!(cflag & PARODD))
cval |= UART_LCR_EPAR;
- if (cflag & CLOCAL)
- port->flags &= ~ASYNC_CHECK_CD;
- else {
- port->flags |= ASYNC_CHECK_CD;
- }
+ tty_port_set_check_carrier(port, ~cflag & CLOCAL);
}
static int
isdn_tty_startup(modem_info *info)
{
- if (info->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&info->port))
return 0;
isdn_lock_drivers();
#ifdef ISDN_DEBUG_MODEM_OPEN
@@ -1070,7 +1066,7 @@ isdn_tty_startup(modem_info *info)
*/
isdn_tty_change_speed(info);
- info->port.flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 1);
info->msr |= (UART_MSR_DSR | UART_MSR_CTS);
info->send_outstanding = 0;
return 0;
@@ -1083,7 +1079,7 @@ isdn_tty_startup(modem_info *info)
static void
isdn_tty_shutdown(modem_info *info)
{
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
return;
#ifdef ISDN_DEBUG_MODEM_OPEN
printk(KERN_DEBUG "Shutting down isdnmodem port %d ....\n", info->line);
@@ -1103,7 +1099,7 @@ isdn_tty_shutdown(modem_info *info)
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->port.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 0);
}
/* isdn_tty_write() is the main send-routine. It is called from the upper
@@ -1351,7 +1347,7 @@ isdn_tty_tiocmget(struct tty_struct *tty)
if (isdn_tty_paranoia_check(info, tty->name, __func__))
return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
mutex_lock(&modem_info_mutex);
@@ -1378,7 +1374,7 @@ isdn_tty_tiocmset(struct tty_struct *tty,
if (isdn_tty_paranoia_check(info, tty->name, __func__))
return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
#ifdef ISDN_DEBUG_MODEM_IOCTL
@@ -1419,7 +1415,7 @@ isdn_tty_ioctl(struct tty_struct *tty, uint cmd, ulong arg)
if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_ioctl"))
return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
switch (cmd) {
case TCSBRK: /* SVID version: non-zero arg --> no break */
@@ -1581,7 +1577,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
* interrupt driver to stop checking the data ready bit in the
* line status register.
*/
- if (port->flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(port)) {
tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
/*
* Before we drop DTR, make sure the UART transmitter
@@ -1622,7 +1618,7 @@ isdn_tty_hangup(struct tty_struct *tty)
return;
isdn_tty_shutdown(info);
port->count = 0;
- port->flags &= ~ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(port, 0);
port->tty = NULL;
wake_up_interruptible(&port->open_wait);
}
@@ -1979,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
#endif
if (
#ifndef FIX_FILE_TRANSFER
- (info->port.flags & ASYNC_NORMAL_ACTIVE) &&
+ tty_port_active(&info->port) &&
#endif
(info->isdn_driver == -1) &&
(info->isdn_channel == -1) &&
@@ -2018,8 +2014,6 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
return (wret == 2) ? 3 : 0;
}
-#define TTY_IS_ACTIVE(info) (info->port.flags & ASYNC_NORMAL_ACTIVE)
-
int
isdn_tty_stat_callback(int i, isdn_ctrl *c)
{
@@ -2077,7 +2071,7 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
#ifdef ISDN_TTY_STAT_DEBUG
printk(KERN_DEBUG "tty_STAT_DCONN ttyI%d\n", info->line);
#endif
- if (TTY_IS_ACTIVE(info)) {
+ if (tty_port_active(&info->port)) {
if (info->dialing == 1) {
info->dialing = 2;
return 1;
@@ -2088,7 +2082,7 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
#ifdef ISDN_TTY_STAT_DEBUG
printk(KERN_DEBUG "tty_STAT_DHUP ttyI%d\n", info->line);
#endif
- if (TTY_IS_ACTIVE(info)) {
+ if (tty_port_active(&info->port)) {
if (info->dialing == 1)
isdn_tty_modem_result(RESULT_BUSY, info);
if (info->dialing > 1)
@@ -2118,7 +2112,7 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
* waiting for it and
* set DCD-bit of its modem-status.
*/
- if (TTY_IS_ACTIVE(info) ||
+ if (tty_port_active(&info->port) ||
(info->port.blocked_open &&
(info->emu.mdmreg[REG_DCD] & BIT_DCD))) {
info->msr |= UART_MSR_DCD;
@@ -2145,7 +2139,7 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
#ifdef ISDN_TTY_STAT_DEBUG
printk(KERN_DEBUG "tty_STAT_BHUP ttyI%d\n", info->line);
#endif
- if (TTY_IS_ACTIVE(info)) {
+ if (tty_port_active(&info->port)) {
#ifdef ISDN_DEBUG_MODEM_HUP
printk(KERN_DEBUG "Mhup in ISDN_STAT_BHUP\n");
#endif
@@ -2157,7 +2151,7 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
#ifdef ISDN_TTY_STAT_DEBUG
printk(KERN_DEBUG "tty_STAT_NODCH ttyI%d\n", info->line);
#endif
- if (TTY_IS_ACTIVE(info)) {
+ if (tty_port_active(&info->port)) {
if (info->dialing) {
info->dialing = 0;
info->last_l2 = -1;
@@ -2183,14 +2177,14 @@ isdn_tty_stat_callback(int i, isdn_ctrl *c)
return 1;
#ifdef CONFIG_ISDN_TTY_FAX
case ISDN_STAT_FAXIND:
- if (TTY_IS_ACTIVE(info)) {
+ if (tty_port_active(&info->port)) {
isdn_tty_fax_command(info, c);
}
break;
#endif
#ifdef CONFIG_ISDN_AUDIO
case ISDN_STAT_AUDIO:
- if (TTY_IS_ACTIVE(info)) {
+ if (tty_port_active(&info->port)) {
switch (c->parm.num[0]) {
case ISDN_AUDIO_DTMF:
if (info->vonline) {
@@ -2528,7 +2522,7 @@ isdn_tty_modem_result(int code, modem_info *info)
if (info->closing || (!info->port.tty))
return;
- if (info->port.flags & ASYNC_CHECK_CD)
+ if (tty_port_check_carrier(&info->port))
tty_hangup(info->port.tty);
}
}
diff --git a/drivers/isdn/i4l/isdn_x25iface.c b/drivers/isdn/i4l/isdn_x25iface.c
index e2d4e5823..0c5d8de41 100644
--- a/drivers/isdn/i4l/isdn_x25iface.c
+++ b/drivers/isdn/i4l/isdn_x25iface.c
@@ -278,7 +278,7 @@ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
case X25_IFACE_DATA:
if (*state == WAN_CONNECTED) {
skb_pull(skb, 1);
- cprot->net_dev->trans_start = jiffies;
+ netif_trans_update(cprot->net_dev);
ret = (cprot->dops->data_req(cprot, skb));
/* prepare for future retransmissions */
if (ret) skb_push(skb, 1);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 225147863..5ae28340a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -413,10 +413,11 @@ config LEDS_INTEL_SS4200
tristate "LED driver for Intel NAS SS4200 series"
depends on LEDS_CLASS
depends on PCI && DMI
+ depends on X86
help
This option enables support for the Intel SS4200 series of
- Network Attached Storage servers. You may control the hard
- drive or power LEDs on the front panel. Using this driver
+ Network Attached Storage servers. You may control the hard
+ drive or power LEDs on the front panel. Using this driver
can stop the front LED from blinking after startup.
config LEDS_LT3593
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 3495d5d65..3bce44893 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data)
if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
led_set_brightness_nosleep(led_cdev, LED_OFF);
+ led_cdev->flags &= ~LED_BLINK_SW;
return;
}
if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
- led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+ led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
return;
}
@@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
return;
}
+ led_cdev->flags |= LED_BLINK_SW;
mod_timer(&led_cdev->blink_timer, jiffies + 1);
}
@@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
del_timer_sync(&led_cdev->blink_timer);
led_cdev->blink_delay_on = 0;
led_cdev->blink_delay_off = 0;
+ led_cdev->flags &= ~LED_BLINK_SW;
}
EXPORT_SYMBOL_GPL(led_stop_software_blink);
@@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
/*
- * In case blinking is on delay brightness setting
+ * If software blink is active, delay brightness setting
* until the next timer tick.
*/
- if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
+ if (led_cdev->flags & LED_BLINK_SW) {
/*
* If we need to disable soft blinking delegate this to the
* work queue task to avoid problems in case we are called
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 218158179..55fa65e1a 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -26,7 +26,7 @@
* Nests outside led_cdev->trigger_lock
*/
static DECLARE_RWSEM(triggers_list_lock);
-static LIST_HEAD(trigger_list);
+LIST_HEAD(trigger_list);
/* Used by LED Class */
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 61143f555..8229f063b 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -127,6 +127,8 @@ static int create_gpio_led(const struct gpio_led *template,
led_dat->cdev.brightness = state ? LED_FULL : LED_OFF;
if (!template->retain_state_suspended)
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ if (template->panic_indicator)
+ led_dat->cdev.flags |= LED_PANIC_INDICATOR;
ret = gpiod_direction_output(led_dat->gpiod, state);
if (ret < 0)
@@ -200,6 +202,8 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
if (fwnode_property_present(child, "retain-state-suspended"))
led.retain_state_suspended = 1;
+ if (fwnode_property_present(child, "panic-indicator"))
+ led.panic_indicator = 1;
ret = create_gpio_led(&led, &priv->leds[priv->num_leds],
dev, NULL);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 4783bacb2..a9145aa7f 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -91,6 +91,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct device_node *child)
{
struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
+ struct pwm_args pargs;
int ret;
led_data->active_low = led->active_low;
@@ -117,7 +118,15 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
else
led_data->cdev.brightness_set_blocking = led_pwm_set_blocking;
- led_data->period = pwm_get_period(led_data->pwm);
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to the
+ * atomic PWM API.
+ */
+ pwm_apply_args(led_data->pwm);
+
+ pwm_get_args(led_data->pwm, &pargs);
+
+ led_data->period = pargs.period;
if (!led_data->period && (led->pwm_period_ns > 0))
led_data->period = led->pwm_period_ns;
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 046cb7008..732eb86bc 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -101,6 +101,19 @@ static struct dmi_system_id nas_led_whitelist[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
}
},
+ {
+ /*
+ * FUJITSU SIEMENS SCALEO Home Server/SS4200-E
+ * BIOS V090L 12/19/2007
+ */
+ .callback = ss4200_led_dmi_callback,
+ .ident = "Fujitsu Siemens SCALEO Home Server",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SCALEO Home Server"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
+ }
+ },
{}
};
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index c548ea10f..45222a7f4 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -327,6 +327,8 @@ static void set_times(struct tca6507_chip *tca, int bank)
int result;
result = choose_times(tca->bank[bank].ontime, &c1, &c2);
+ if (result < 0)
+ return;
dev_dbg(&tca->client->dev,
"Chose on times %d(%d) %d(%d) for %dms\n",
c1, time_codes[c1],
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index db3f20da7..7d38e6b9a 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -30,5 +30,6 @@ void led_set_brightness_nosleep(struct led_classdev *led_cdev,
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
+extern struct list_head trigger_list;
#endif /* __LEDS_H_INCLUDED */
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 5bda6a9b5..9893d9113 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -41,6 +41,14 @@ config LEDS_TRIGGER_IDE_DISK
This allows LEDs to be controlled by IDE disk activity.
If unsure, say Y.
+config LEDS_TRIGGER_MTD
+ bool "LED MTD (NAND/NOR) Trigger"
+ depends on MTD
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be controlled by MTD activity.
+ If unsure, say N.
+
config LEDS_TRIGGER_HEARTBEAT
tristate "LED Heartbeat Trigger"
depends on LEDS_TRIGGERS
@@ -108,4 +116,14 @@ config LEDS_TRIGGER_CAMERA
This enables direct flash/torch on/off by the driver, kernel space.
If unsure, say Y.
+config LEDS_TRIGGER_PANIC
+ bool "LED Panic Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows LEDs to be configured to blink on a kernel panic.
+ Enabling this option will allow to mark certain LEDs as panic indicators,
+ allowing to blink them on a kernel panic, even if they are set to
+ a different trigger.
+ If unsure, say Y.
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 1abf48dac..8cc64a4f4 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
obj-$(CONFIG_LEDS_TRIGGER_ONESHOT) += ledtrig-oneshot.o
obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
+obj-$(CONFIG_LEDS_TRIGGER_MTD) += ledtrig-mtd.o
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
@@ -8,3 +9,4 @@ obj-$(CONFIG_LEDS_TRIGGER_CPU) += ledtrig-cpu.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
+obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
index 410c39c62..c9f386213 100644
--- a/drivers/leds/trigger/ledtrig-heartbeat.c
+++ b/drivers/leds/trigger/ledtrig-heartbeat.c
@@ -19,6 +19,7 @@
#include <linux/sched.h>
#include <linux/leds.h>
#include <linux/reboot.h>
+#include <linux/suspend.h>
#include "../leds.h"
static int panic_heartbeats;
@@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = {
.deactivate = heartbeat_trig_deactivate,
};
+static int heartbeat_pm_notifier(struct notifier_block *nb,
+ unsigned long pm_event, void *unused)
+{
+ int rc;
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ led_trigger_unregister(&heartbeat_led_trigger);
+ break;
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ rc = led_trigger_register(&heartbeat_led_trigger);
+ if (rc)
+ pr_err("could not re-register heartbeat trigger\n");
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static int heartbeat_reboot_notifier(struct notifier_block *nb,
unsigned long code, void *unused)
{
@@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static struct notifier_block heartbeat_pm_nb = {
+ .notifier_call = heartbeat_pm_notifier,
+};
+
static struct notifier_block heartbeat_reboot_nb = {
.notifier_call = heartbeat_reboot_notifier,
};
@@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void)
atomic_notifier_chain_register(&panic_notifier_list,
&heartbeat_panic_nb);
register_reboot_notifier(&heartbeat_reboot_nb);
+ register_pm_notifier(&heartbeat_pm_nb);
}
return rc;
}
static void __exit heartbeat_trig_exit(void)
{
+ unregister_pm_notifier(&heartbeat_pm_nb);
unregister_reboot_notifier(&heartbeat_reboot_nb);
atomic_notifier_chain_unregister(&panic_notifier_list,
&heartbeat_panic_nb);
diff --git a/drivers/leds/trigger/ledtrig-ide-disk.c b/drivers/leds/trigger/ledtrig-ide-disk.c
index c02a3ac3c..15123d389 100644
--- a/drivers/leds/trigger/ledtrig-ide-disk.c
+++ b/drivers/leds/trigger/ledtrig-ide-disk.c
@@ -18,10 +18,11 @@
#define BLINK_DELAY 30
DEFINE_LED_TRIGGER(ledtrig_ide);
-static unsigned long ide_blink_delay = BLINK_DELAY;
void ledtrig_ide_activity(void)
{
+ unsigned long ide_blink_delay = BLINK_DELAY;
+
led_trigger_blink_oneshot(ledtrig_ide,
&ide_blink_delay, &ide_blink_delay, 0);
}
diff --git a/drivers/leds/trigger/ledtrig-mtd.c b/drivers/leds/trigger/ledtrig-mtd.c
new file mode 100644
index 000000000..99b5b0a4d
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-mtd.c
@@ -0,0 +1,45 @@
+/*
+ * LED MTD trigger
+ *
+ * Copyright 2016 Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+ *
+ * Based on LED IDE-Disk Activity Trigger
+ *
+ * Copyright 2006 Openedhand Ltd.
+ *
+ * Author: Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+
+#define BLINK_DELAY 30
+
+DEFINE_LED_TRIGGER(ledtrig_mtd);
+DEFINE_LED_TRIGGER(ledtrig_nand);
+
+void ledtrig_mtd_activity(void)
+{
+ unsigned long blink_delay = BLINK_DELAY;
+
+ led_trigger_blink_oneshot(ledtrig_mtd,
+ &blink_delay, &blink_delay, 0);
+ led_trigger_blink_oneshot(ledtrig_nand,
+ &blink_delay, &blink_delay, 0);
+}
+EXPORT_SYMBOL(ledtrig_mtd_activity);
+
+static int __init ledtrig_mtd_init(void)
+{
+ led_trigger_register_simple("mtd", &ledtrig_mtd);
+ led_trigger_register_simple("nand-disk", &ledtrig_nand);
+
+ return 0;
+}
+device_initcall(ledtrig_mtd_init);
diff --git a/drivers/leds/trigger/ledtrig-panic.c b/drivers/leds/trigger/ledtrig-panic.c
new file mode 100644
index 000000000..d735526b9
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-panic.c
@@ -0,0 +1,77 @@
+/*
+ * Kernel Panic LED Trigger
+ *
+ * Copyright 2016 Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/leds.h>
+#include "../leds.h"
+
+static struct led_trigger *trigger;
+
+/*
+ * This is called in a special context by the atomic panic
+ * notifier. This means the trigger can be changed without
+ * worrying about locking.
+ */
+static void led_trigger_set_panic(struct led_classdev *led_cdev)
+{
+ struct led_trigger *trig;
+
+ list_for_each_entry(trig, &trigger_list, next_trig) {
+ if (strcmp("panic", trig->name))
+ continue;
+ if (led_cdev->trigger)
+ list_del(&led_cdev->trig_list);
+ list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
+
+ /* Avoid the delayed blink path */
+ led_cdev->blink_delay_on = 0;
+ led_cdev->blink_delay_off = 0;
+
+ led_cdev->trigger = trig;
+ if (trig->activate)
+ trig->activate(led_cdev);
+ break;
+ }
+}
+
+static int led_trigger_panic_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct led_classdev *led_cdev;
+
+ list_for_each_entry(led_cdev, &leds_list, node)
+ if (led_cdev->flags & LED_PANIC_INDICATOR)
+ led_trigger_set_panic(led_cdev);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block led_trigger_panic_nb = {
+ .notifier_call = led_trigger_panic_notifier,
+};
+
+static long led_panic_blink(int state)
+{
+ led_trigger_event(trigger, state ? LED_FULL : LED_OFF);
+ return 0;
+}
+
+static int __init ledtrig_panic_init(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &led_trigger_panic_nb);
+
+ led_trigger_register_simple("panic", &trigger);
+ panic_blink = led_panic_blink;
+ return 0;
+}
+device_initcall(ledtrig_panic_init);
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index adc162c70..6e9042e3d 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -603,7 +603,7 @@ void __init lguest_arch_host_init(void)
* doing this.
*/
get_online_cpus();
- if (cpu_has_pge) { /* We have a broader idea of "global". */
+ if (boot_cpu_has(X86_FEATURE_PGE)) { /* We have a broader idea of "global". */
/* Remember that this was originally set (for cleanup). */
cpu_had_pge = 1;
/*
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 0dc9a80ad..160c1a683 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -30,23 +30,35 @@
#include <linux/sched/sysctl.h>
#include <uapi/linux/lightnvm.h>
-static LIST_HEAD(nvm_targets);
+static LIST_HEAD(nvm_tgt_types);
static LIST_HEAD(nvm_mgrs);
static LIST_HEAD(nvm_devices);
+static LIST_HEAD(nvm_targets);
static DECLARE_RWSEM(nvm_lock);
+static struct nvm_target *nvm_find_target(const char *name)
+{
+ struct nvm_target *tgt;
+
+ list_for_each_entry(tgt, &nvm_targets, list)
+ if (!strcmp(name, tgt->disk->disk_name))
+ return tgt;
+
+ return NULL;
+}
+
static struct nvm_tgt_type *nvm_find_target_type(const char *name)
{
struct nvm_tgt_type *tt;
- list_for_each_entry(tt, &nvm_targets, list)
+ list_for_each_entry(tt, &nvm_tgt_types, list)
if (!strcmp(name, tt->name))
return tt;
return NULL;
}
-int nvm_register_target(struct nvm_tgt_type *tt)
+int nvm_register_tgt_type(struct nvm_tgt_type *tt)
{
int ret = 0;
@@ -54,14 +66,14 @@ int nvm_register_target(struct nvm_tgt_type *tt)
if (nvm_find_target_type(tt->name))
ret = -EEXIST;
else
- list_add(&tt->list, &nvm_targets);
+ list_add(&tt->list, &nvm_tgt_types);
up_write(&nvm_lock);
return ret;
}
-EXPORT_SYMBOL(nvm_register_target);
+EXPORT_SYMBOL(nvm_register_tgt_type);
-void nvm_unregister_target(struct nvm_tgt_type *tt)
+void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
{
if (!tt)
return;
@@ -70,20 +82,20 @@ void nvm_unregister_target(struct nvm_tgt_type *tt)
list_del(&tt->list);
up_write(&nvm_lock);
}
-EXPORT_SYMBOL(nvm_unregister_target);
+EXPORT_SYMBOL(nvm_unregister_tgt_type);
void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
dma_addr_t *dma_handler)
{
- return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags,
+ return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_alloc);
-void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list,
+void nvm_dev_dma_free(struct nvm_dev *dev, void *addr,
dma_addr_t dma_handler)
{
- dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler);
+ dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
}
EXPORT_SYMBOL(nvm_dev_dma_free);
@@ -214,8 +226,8 @@ void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
- if (rqd->nr_pages > 1) {
- for (i = 0; i < rqd->nr_pages; i++)
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++)
rqd->ppa_list[i] = dev_to_generic_addr(dev,
rqd->ppa_list[i]);
} else {
@@ -228,8 +240,8 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
{
int i;
- if (rqd->nr_pages > 1) {
- for (i = 0; i < rqd->nr_pages; i++)
+ if (rqd->nr_ppas > 1) {
+ for (i = 0; i < rqd->nr_ppas; i++)
rqd->ppa_list[i] = generic_to_dev_addr(dev,
rqd->ppa_list[i]);
} else {
@@ -239,33 +251,36 @@ void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd)
EXPORT_SYMBOL(nvm_generic_to_addr_mode);
int nvm_set_rqd_ppalist(struct nvm_dev *dev, struct nvm_rq *rqd,
- struct ppa_addr *ppas, int nr_ppas)
+ struct ppa_addr *ppas, int nr_ppas, int vblk)
{
int i, plane_cnt, pl_idx;
- if (dev->plane_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
- rqd->nr_pages = 1;
+ if ((!vblk || dev->plane_mode == NVM_PLANE_SINGLE) && nr_ppas == 1) {
+ rqd->nr_ppas = nr_ppas;
rqd->ppa_addr = ppas[0];
return 0;
}
- plane_cnt = dev->plane_mode;
- rqd->nr_pages = plane_cnt * nr_ppas;
-
- if (dev->ops->max_phys_sect < rqd->nr_pages)
- return -EINVAL;
-
+ rqd->nr_ppas = nr_ppas;
rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
if (!rqd->ppa_list) {
pr_err("nvm: failed to allocate dma memory\n");
return -ENOMEM;
}
- for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ if (!vblk) {
+ for (i = 0; i < nr_ppas; i++)
+ rqd->ppa_list[i] = ppas[i];
+ } else {
+ plane_cnt = dev->plane_mode;
+ rqd->nr_ppas *= plane_cnt;
+
for (i = 0; i < nr_ppas; i++) {
- ppas[i].g.pl = pl_idx;
- rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
+ ppas[i].g.pl = pl_idx;
+ rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppas[i];
+ }
}
}
@@ -292,7 +307,7 @@ int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr *ppas, int nr_ppas)
memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas);
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppas, nr_ppas, 1);
if (ret)
return ret;
@@ -322,11 +337,10 @@ static void nvm_end_io_sync(struct nvm_rq *rqd)
complete(waiting);
}
-int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
- int opcode, int flags, void *buf, int len)
+int __nvm_submit_ppa(struct nvm_dev *dev, struct nvm_rq *rqd, int opcode,
+ int flags, void *buf, int len)
{
DECLARE_COMPLETION_ONSTACK(wait);
- struct nvm_rq rqd;
struct bio *bio;
int ret;
unsigned long hang_check;
@@ -335,23 +349,21 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
if (IS_ERR_OR_NULL(bio))
return -ENOMEM;
- memset(&rqd, 0, sizeof(struct nvm_rq));
- ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas);
+ nvm_generic_to_addr_mode(dev, rqd);
+
+ rqd->dev = dev;
+ rqd->opcode = opcode;
+ rqd->flags = flags;
+ rqd->bio = bio;
+ rqd->wait = &wait;
+ rqd->end_io = nvm_end_io_sync;
+
+ ret = dev->ops->submit_io(dev, rqd);
if (ret) {
bio_put(bio);
return ret;
}
- rqd.opcode = opcode;
- rqd.bio = bio;
- rqd.wait = &wait;
- rqd.dev = dev;
- rqd.end_io = nvm_end_io_sync;
- rqd.flags = flags;
- nvm_generic_to_addr_mode(dev, &rqd);
-
- ret = dev->ops->submit_io(dev, &rqd);
-
/* Prevent hang_check timer from firing at us during very long I/O */
hang_check = sysctl_hung_task_timeout_secs;
if (hang_check)
@@ -359,12 +371,113 @@ int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
else
wait_for_completion_io(&wait);
+ return rqd->error;
+}
+
+/**
+ * nvm_submit_ppa_list - submit user-defined ppa list to device. The user must
+ * take to free ppa list if necessary.
+ * @dev: device
+ * @ppa_list: user created ppa_list
+ * @nr_ppas: length of ppa_list
+ * @opcode: device opcode
+ * @flags: device flags
+ * @buf: data buffer
+ * @len: data buffer length
+ */
+int nvm_submit_ppa_list(struct nvm_dev *dev, struct ppa_addr *ppa_list,
+ int nr_ppas, int opcode, int flags, void *buf, int len)
+{
+ struct nvm_rq rqd;
+
+ if (dev->ops->max_phys_sect < nr_ppas)
+ return -EINVAL;
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ rqd.nr_ppas = nr_ppas;
+ if (nr_ppas > 1)
+ rqd.ppa_list = ppa_list;
+ else
+ rqd.ppa_addr = ppa_list[0];
+
+ return __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+}
+EXPORT_SYMBOL(nvm_submit_ppa_list);
+
+/**
+ * nvm_submit_ppa - submit PPAs to device. PPAs will automatically be unfolded
+ * as single, dual, quad plane PPAs depending on device type.
+ * @dev: device
+ * @ppa: user created ppa_list
+ * @nr_ppas: length of ppa_list
+ * @opcode: device opcode
+ * @flags: device flags
+ * @buf: data buffer
+ * @len: data buffer length
+ */
+int nvm_submit_ppa(struct nvm_dev *dev, struct ppa_addr *ppa, int nr_ppas,
+ int opcode, int flags, void *buf, int len)
+{
+ struct nvm_rq rqd;
+ int ret;
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+ ret = nvm_set_rqd_ppalist(dev, &rqd, ppa, nr_ppas, 1);
+ if (ret)
+ return ret;
+
+ ret = __nvm_submit_ppa(dev, &rqd, opcode, flags, buf, len);
+
nvm_free_rqd_ppalist(dev, &rqd);
- return rqd.error;
+ return ret;
}
EXPORT_SYMBOL(nvm_submit_ppa);
+/*
+ * folds a bad block list from its plane representation to its virtual
+ * block representation. The fold is done in place and reduced size is
+ * returned.
+ *
+ * If any of the planes status are bad or grown bad block, the virtual block
+ * is marked bad. If not bad, the first plane state acts as the block state.
+ */
+int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
+{
+ int blk, offset, pl, blktype;
+
+ if (nr_blks != dev->blks_per_lun * dev->plane_mode)
+ return -EINVAL;
+
+ for (blk = 0; blk < dev->blks_per_lun; blk++) {
+ offset = blk * dev->plane_mode;
+ blktype = blks[offset];
+
+ /* Bad blocks on any planes take precedence over other types */
+ for (pl = 0; pl < dev->plane_mode; pl++) {
+ if (blks[offset + pl] &
+ (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
+ blktype = blks[offset + pl];
+ break;
+ }
+ }
+
+ blks[blk] = blktype;
+ }
+
+ return dev->blks_per_lun;
+}
+EXPORT_SYMBOL(nvm_bb_tbl_fold);
+
+int nvm_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa, u8 *blks)
+{
+ ppa = generic_to_dev_addr(dev, ppa);
+
+ return dev->ops->get_bb_tbl(dev, ppa, blks);
+}
+EXPORT_SYMBOL(nvm_get_bb_tbl);
+
static int nvm_init_slc_tbl(struct nvm_dev *dev, struct nvm_id_group *grp)
{
int i;
@@ -414,6 +527,7 @@ static int nvm_core_init(struct nvm_dev *dev)
{
struct nvm_id *id = &dev->identity;
struct nvm_id_group *grp = &id->groups[0];
+ int ret;
/* device values */
dev->nr_chnls = grp->num_ch;
@@ -421,6 +535,8 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->pgs_per_blk = grp->num_pg;
dev->blks_per_lun = grp->num_blk;
dev->nr_planes = grp->num_pln;
+ dev->fpg_size = grp->fpg_sz;
+ dev->pfpg_size = grp->fpg_sz * grp->num_pln;
dev->sec_size = grp->csecs;
dev->oob_size = grp->sos;
dev->sec_per_pg = grp->fpg_sz / grp->csecs;
@@ -430,33 +546,16 @@ static int nvm_core_init(struct nvm_dev *dev)
dev->plane_mode = NVM_PLANE_SINGLE;
dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size;
- if (grp->mtype != 0) {
- pr_err("nvm: memory type not supported\n");
- return -EINVAL;
- }
-
- switch (grp->fmtype) {
- case NVM_ID_FMTYPE_SLC:
- if (nvm_init_slc_tbl(dev, grp))
- return -ENOMEM;
- break;
- case NVM_ID_FMTYPE_MLC:
- if (nvm_init_mlc_tbl(dev, grp))
- return -ENOMEM;
- break;
- default:
- pr_err("nvm: flash type not supported\n");
- return -EINVAL;
- }
-
- if (!dev->lps_per_blk)
- pr_info("nvm: lower page programming table missing\n");
-
if (grp->mpos & 0x020202)
dev->plane_mode = NVM_PLANE_DOUBLE;
if (grp->mpos & 0x040404)
dev->plane_mode = NVM_PLANE_QUAD;
+ if (grp->mtype != 0) {
+ pr_err("nvm: memory type not supported\n");
+ return -EINVAL;
+ }
+
/* calculated values */
dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
@@ -468,11 +567,73 @@ static int nvm_core_init(struct nvm_dev *dev)
sizeof(unsigned long), GFP_KERNEL);
if (!dev->lun_map)
return -ENOMEM;
- INIT_LIST_HEAD(&dev->online_targets);
+
+ switch (grp->fmtype) {
+ case NVM_ID_FMTYPE_SLC:
+ if (nvm_init_slc_tbl(dev, grp)) {
+ ret = -ENOMEM;
+ goto err_fmtype;
+ }
+ break;
+ case NVM_ID_FMTYPE_MLC:
+ if (nvm_init_mlc_tbl(dev, grp)) {
+ ret = -ENOMEM;
+ goto err_fmtype;
+ }
+ break;
+ default:
+ pr_err("nvm: flash type not supported\n");
+ ret = -EINVAL;
+ goto err_fmtype;
+ }
+
mutex_init(&dev->mlock);
spin_lock_init(&dev->lock);
return 0;
+err_fmtype:
+ kfree(dev->lun_map);
+ return ret;
+}
+
+static void nvm_remove_target(struct nvm_target *t)
+{
+ struct nvm_tgt_type *tt = t->type;
+ struct gendisk *tdisk = t->disk;
+ struct request_queue *q = tdisk->queue;
+
+ lockdep_assert_held(&nvm_lock);
+
+ del_gendisk(tdisk);
+ blk_cleanup_queue(q);
+
+ if (tt->exit)
+ tt->exit(tdisk->private_data);
+
+ put_disk(tdisk);
+
+ list_del(&t->list);
+ kfree(t);
+}
+
+static void nvm_free_mgr(struct nvm_dev *dev)
+{
+ struct nvm_target *tgt, *tmp;
+
+ if (!dev->mt)
+ return;
+
+ down_write(&nvm_lock);
+ list_for_each_entry_safe(tgt, tmp, &nvm_targets, list) {
+ if (tgt->dev != dev)
+ continue;
+
+ nvm_remove_target(tgt);
+ }
+ up_write(&nvm_lock);
+
+ dev->mt->unregister_mgr(dev);
+ dev->mt = NULL;
}
static void nvm_free(struct nvm_dev *dev)
@@ -480,10 +641,10 @@ static void nvm_free(struct nvm_dev *dev)
if (!dev)
return;
- if (dev->mt)
- dev->mt->unregister_mgr(dev);
+ nvm_free_mgr(dev);
kfree(dev->lptbl);
+ kfree(dev->lun_map);
}
static int nvm_init(struct nvm_dev *dev)
@@ -530,8 +691,8 @@ err:
static void nvm_exit(struct nvm_dev *dev)
{
- if (dev->ppalist_pool)
- dev->ops->destroy_dma_pool(dev->ppalist_pool);
+ if (dev->dma_pool)
+ dev->ops->destroy_dma_pool(dev->dma_pool);
nvm_free(dev);
pr_info("nvm: successfully unloaded\n");
@@ -565,9 +726,9 @@ int nvm_register(struct request_queue *q, char *disk_name,
}
if (dev->ops->max_phys_sect > 1) {
- dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist");
- if (!dev->ppalist_pool) {
- pr_err("nvm: could not create ppa pool\n");
+ dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
+ if (!dev->dma_pool) {
+ pr_err("nvm: could not create dma pool\n");
ret = -ENOMEM;
goto err_init;
}
@@ -613,7 +774,6 @@ void nvm_unregister(char *disk_name)
up_write(&nvm_lock);
nvm_exit(dev);
- kfree(dev->lun_map);
kfree(dev);
}
EXPORT_SYMBOL(nvm_unregister);
@@ -645,12 +805,11 @@ static int nvm_create_target(struct nvm_dev *dev,
return -EINVAL;
}
- list_for_each_entry(t, &dev->online_targets, list) {
- if (!strcmp(create->tgtname, t->disk->disk_name)) {
- pr_err("nvm: target name already exists.\n");
- up_write(&nvm_lock);
- return -EINVAL;
- }
+ t = nvm_find_target(create->tgtname);
+ if (t) {
+ pr_err("nvm: target name already exists.\n");
+ up_write(&nvm_lock);
+ return -EINVAL;
}
up_write(&nvm_lock);
@@ -688,9 +847,10 @@ static int nvm_create_target(struct nvm_dev *dev,
t->type = tt;
t->disk = tdisk;
+ t->dev = dev;
down_write(&nvm_lock);
- list_add_tail(&t->list, &dev->online_targets);
+ list_add_tail(&t->list, &nvm_targets);
up_write(&nvm_lock);
return 0;
@@ -703,26 +863,6 @@ err_t:
return -ENOMEM;
}
-static void nvm_remove_target(struct nvm_target *t)
-{
- struct nvm_tgt_type *tt = t->type;
- struct gendisk *tdisk = t->disk;
- struct request_queue *q = tdisk->queue;
-
- lockdep_assert_held(&nvm_lock);
-
- del_gendisk(tdisk);
- blk_cleanup_queue(q);
-
- if (tt->exit)
- tt->exit(tdisk->private_data);
-
- put_disk(tdisk);
-
- list_del(&t->list);
- kfree(t);
-}
-
static int __nvm_configure_create(struct nvm_ioctl_create *create)
{
struct nvm_dev *dev;
@@ -753,26 +893,19 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
static int __nvm_configure_remove(struct nvm_ioctl_remove *remove)
{
- struct nvm_target *t = NULL;
- struct nvm_dev *dev;
- int ret = -1;
+ struct nvm_target *t;
down_write(&nvm_lock);
- list_for_each_entry(dev, &nvm_devices, devices)
- list_for_each_entry(t, &dev->online_targets, list) {
- if (!strcmp(remove->tgtname, t->disk->disk_name)) {
- nvm_remove_target(t);
- ret = 0;
- break;
- }
- }
- up_write(&nvm_lock);
-
- if (ret) {
+ t = nvm_find_target(remove->tgtname);
+ if (!t) {
pr_err("nvm: target \"%s\" doesn't exist.\n", remove->tgtname);
+ up_write(&nvm_lock);
return -EINVAL;
}
+ nvm_remove_target(t);
+ up_write(&nvm_lock);
+
return 0;
}
@@ -921,7 +1054,7 @@ static long nvm_ioctl_info(struct file *file, void __user *arg)
info->version[2] = NVM_VERSION_PATCH;
down_write(&nvm_lock);
- list_for_each_entry(tt, &nvm_targets, list) {
+ list_for_each_entry(tt, &nvm_tgt_types, list) {
struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
tgt->version[0] = tt->version[0];
@@ -1118,10 +1251,7 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
return -EINVAL;
}
- if (dev->mt) {
- dev->mt->unregister_mgr(dev);
- dev->mt = NULL;
- }
+ nvm_free_mgr(dev);
if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
return nvm_dev_factory(dev, fact.flags);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 72e124a39..ec9fb6876 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -129,27 +129,25 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
return 0;
}
-static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
- void *private)
+static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
+ u8 *blks, int nr_blks)
{
- struct gen_nvm *gn = private;
struct nvm_dev *dev = gn->dev;
struct gen_lun *lun;
struct nvm_block *blk;
int i;
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
- for (i = 0; i < nr_blocks; i++) {
+ for (i = 0; i < nr_blks; i++) {
if (blks[i] == 0)
continue;
blk = &lun->vlun.blocks[i];
- if (!blk) {
- pr_err("gennvm: BB data is out of bounds.\n");
- return -EINVAL;
- }
-
list_move_tail(&blk->list, &lun->bb_list);
lun->vlun.nr_bad_blocks++;
lun->vlun.nr_free_blocks--;
@@ -216,13 +214,21 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
struct gen_lun *lun;
struct nvm_block *block;
sector_t lun_iter, blk_iter, cur_block_id = 0;
- int ret;
+ int ret, nr_blks;
+ u8 *blks;
+
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
gennvm_for_each_lun(gn, lun, lun_iter) {
lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
dev->blks_per_lun);
- if (!lun->vlun.blocks)
+ if (!lun->vlun.blocks) {
+ kfree(blks);
return -ENOMEM;
+ }
for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
block = &lun->vlun.blocks[blk_iter];
@@ -246,14 +252,15 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
ppa.ppa = 0;
ppa.g.ch = lun->vlun.chnl_id;
- ppa.g.lun = lun->vlun.id;
- ppa = generic_to_dev_addr(dev, ppa);
+ ppa.g.lun = lun->vlun.lun_id;
+
+ ret = nvm_get_bb_tbl(dev, ppa, blks);
+ if (ret)
+ pr_err("gennvm: could not get BB table\n");
- ret = dev->ops->get_bb_tbl(dev, ppa,
- dev->blks_per_lun,
- gennvm_block_bb, gn);
+ ret = gennvm_block_bb(gn, ppa, blks, nr_blks);
if (ret)
- pr_err("gennvm: could not read BB table\n");
+ pr_err("gennvm: BB table map failed\n");
}
}
@@ -266,6 +273,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
}
}
+ kfree(blks);
return 0;
}
@@ -399,64 +407,60 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
spin_unlock(&vlun->lock);
}
-static void gennvm_blk_set_type(struct nvm_dev *dev, struct ppa_addr *ppa,
- int type)
+static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
{
struct gen_nvm *gn = dev->mp;
struct gen_lun *lun;
struct nvm_block *blk;
- if (unlikely(ppa->g.ch > dev->nr_chnls ||
- ppa->g.lun > dev->luns_per_chnl ||
- ppa->g.blk > dev->blks_per_lun)) {
+ pr_debug("gennvm: ppa (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
+ ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
+
+ if (unlikely(ppa.g.ch > dev->nr_chnls ||
+ ppa.g.lun > dev->luns_per_chnl ||
+ ppa.g.blk > dev->blks_per_lun)) {
WARN_ON_ONCE(1);
pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
- ppa->g.ch, dev->nr_chnls,
- ppa->g.lun, dev->luns_per_chnl,
- ppa->g.blk, dev->blks_per_lun);
+ ppa.g.ch, dev->nr_chnls,
+ ppa.g.lun, dev->luns_per_chnl,
+ ppa.g.blk, dev->blks_per_lun);
return;
}
- lun = &gn->luns[ppa->g.lun * ppa->g.ch];
- blk = &lun->vlun.blocks[ppa->g.blk];
+ lun = &gn->luns[ppa.g.lun * ppa.g.ch];
+ blk = &lun->vlun.blocks[ppa.g.blk];
/* will be moved to bb list on put_blk from target */
blk->state = type;
}
-/* mark block bad. It is expected the target recover from the error. */
+/*
+ * mark block bad in gennvm. It is expected that the target recovers separately
+ */
static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
{
- int i;
-
- if (!dev->ops->set_bb_tbl)
- return;
-
- if (dev->ops->set_bb_tbl(dev, rqd, 1))
- return;
+ int bit = -1;
+ int max_secs = dev->ops->max_phys_sect;
+ void *comp_bits = &rqd->ppa_status;
nvm_addr_to_generic_mode(dev, rqd);
/* look up blocks and mark them as bad */
- if (rqd->nr_pages > 1)
- for (i = 0; i < rqd->nr_pages; i++)
- gennvm_blk_set_type(dev, &rqd->ppa_list[i],
- NVM_BLK_ST_BAD);
- else
- gennvm_blk_set_type(dev, &rqd->ppa_addr, NVM_BLK_ST_BAD);
+ if (rqd->nr_ppas == 1) {
+ gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
+ return;
+ }
+
+ while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
+ gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
}
static void gennvm_end_io(struct nvm_rq *rqd)
{
struct nvm_tgt_instance *ins = rqd->ins;
- switch (rqd->error) {
- case NVM_RSP_SUCCESS:
- case NVM_RSP_ERR_EMPTYPAGE:
- break;
- case NVM_RSP_ERR_FAILWRITE:
+ if (rqd->error == NVM_RSP_ERR_FAILWRITE)
gennvm_mark_blk_bad(rqd->dev, rqd);
- }
ins->tt->end_io(rqd);
}
@@ -539,6 +543,8 @@ static struct nvmm_type gennvm = {
.submit_io = gennvm_submit_io,
.erase_blk = gennvm_erase_blk,
+ .mark_blk = gennvm_mark_blk,
+
.get_lun = gennvm_get_lun,
.reserve_lun = gennvm_reserve_lun,
.release_lun = gennvm_release_lun,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 3ab6495c3..2103e97a9 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -405,9 +405,8 @@ static void rrpc_block_gc(struct work_struct *work)
ws_gc);
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
+ struct rrpc_lun *rlun = rblk->rlun;
struct nvm_dev *dev = rrpc->dev;
- struct nvm_lun *lun = rblk->parent->lun;
- struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
@@ -508,9 +507,9 @@ static void rrpc_gc_queue(struct work_struct *work)
ws_gc);
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
+ struct rrpc_lun *rlun = rblk->rlun;
struct nvm_lun *lun = rblk->parent->lun;
struct nvm_block *blk = rblk->parent;
- struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
spin_lock(&rlun->lock);
list_add_tail(&rblk->prio, &rlun->prio_list);
@@ -696,7 +695,7 @@ static void rrpc_end_io(struct nvm_rq *rqd)
{
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance);
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
- uint8_t npages = rqd->nr_pages;
+ uint8_t npages = rqd->nr_ppas;
sector_t laddr = rrpc_get_laddr(rqd->bio) - npages;
if (bio_data_dir(rqd->bio) == WRITE)
@@ -711,8 +710,6 @@ static void rrpc_end_io(struct nvm_rq *rqd)
if (npages > 1)
nvm_dev_dma_free(rrpc->dev, rqd->ppa_list, rqd->dma_ppa_list);
- if (rqd->metadata)
- nvm_dev_dma_free(rrpc->dev, rqd->metadata, rqd->dma_metadata);
mempool_free(rqd, rrpc->rq_pool);
}
@@ -886,7 +883,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
bio_get(bio);
rqd->bio = bio;
rqd->ins = &rrpc->instance;
- rqd->nr_pages = nr_pages;
+ rqd->nr_ppas = nr_pages;
rrq->flags = flags;
err = nvm_submit_io(rrpc->dev, rqd);
@@ -895,7 +892,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
bio_put(bio);
if (!(flags & NVM_IOTYPE_GC)) {
rrpc_unlock_rq(rrpc, rqd);
- if (rqd->nr_pages > 1)
+ if (rqd->nr_ppas > 1)
nvm_dev_dma_free(rrpc->dev,
rqd->ppa_list, rqd->dma_ppa_list);
}
@@ -1039,11 +1036,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
sector_t i;
- u64 slba;
int ret;
- slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
-
rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
if (!rrpc->trans_map)
return -ENOMEM;
@@ -1065,8 +1059,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
return 0;
/* Bring up the mapping table from device */
- ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects, rrpc_l2p_update,
- rrpc);
+ ret = dev->ops->get_l2p_tbl(dev, rrpc->soffset, rrpc->nr_sects,
+ rrpc_l2p_update, rrpc);
if (ret) {
pr_err("nvm: rrpc: could not read L2P table.\n");
return -EINVAL;
@@ -1207,10 +1201,6 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock);
-
- rrpc->total_blocks += dev->blks_per_lun;
- rrpc->nr_sects += dev->sec_per_lun;
-
}
return 0;
@@ -1224,18 +1214,24 @@ static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
struct nvm_dev *dev = rrpc->dev;
struct nvmm_type *mt = dev->mt;
sector_t size = rrpc->nr_sects * dev->sec_size;
+ int ret;
size >>= 9;
- return mt->get_area(dev, begin, size);
+ ret = mt->get_area(dev, begin, size);
+ if (!ret)
+ *begin >>= (ilog2(dev->sec_size) - 9);
+
+ return ret;
}
static void rrpc_area_free(struct rrpc *rrpc)
{
struct nvm_dev *dev = rrpc->dev;
struct nvmm_type *mt = dev->mt;
+ sector_t begin = rrpc->soffset << (ilog2(dev->sec_size) - 9);
- mt->put_area(dev, rrpc->soffset);
+ mt->put_area(dev, begin);
}
static void rrpc_free(struct rrpc *rrpc)
@@ -1268,7 +1264,7 @@ static sector_t rrpc_capacity(void *private)
sector_t reserved, provisioned;
/* cur, gc, and two emergency blocks for each lun */
- reserved = rrpc->nr_luns * dev->max_pages_per_blk * 4;
+ reserved = rrpc->nr_luns * dev->sec_per_blk * 4;
provisioned = rrpc->nr_sects - reserved;
if (reserved > rrpc->nr_sects) {
@@ -1388,6 +1384,8 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
rrpc->nr_luns = lun_end - lun_begin + 1;
+ rrpc->total_blocks = (unsigned long)dev->blks_per_lun * rrpc->nr_luns;
+ rrpc->nr_sects = (unsigned long long)dev->sec_per_lun * rrpc->nr_luns;
/* simple round-robin strategy */
atomic_set(&rrpc->next_lun, -1);
@@ -1468,12 +1466,12 @@ static struct nvm_tgt_type tt_rrpc = {
static int __init rrpc_module_init(void)
{
- return nvm_register_target(&tt_rrpc);
+ return nvm_register_tgt_type(&tt_rrpc);
}
static void rrpc_module_exit(void)
{
- nvm_unregister_target(&tt_rrpc);
+ nvm_unregister_tgt_type(&tt_rrpc);
}
module_init(rrpc_module_init);
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 2653484a3..87e84b5fc 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -251,7 +251,7 @@ static inline void rrpc_unlock_laddr(struct rrpc *rrpc,
static inline void rrpc_unlock_rq(struct rrpc *rrpc, struct nvm_rq *rqd)
{
struct rrpc_inflight_rq *r = rrpc_get_inflight_rq(rqd);
- uint8_t pages = rqd->nr_pages;
+ uint8_t pages = rqd->nr_ppas;
BUG_ON((r->l_start + pages) > rrpc->nr_sects);
diff --git a/drivers/lightnvm/sysblk.c b/drivers/lightnvm/sysblk.c
index 321de1f15..994697ac7 100644
--- a/drivers/lightnvm/sysblk.c
+++ b/drivers/lightnvm/sysblk.c
@@ -93,12 +93,51 @@ void nvm_setup_sysblk_scan(struct nvm_dev *dev, struct sysblk_scan *s,
s->nr_rows = nvm_setup_sysblks(dev, sysblk_ppas);
}
-static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int sysblk_get_free_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks,
+ struct sysblk_scan *s)
+{
+ struct ppa_addr *sppa;
+ int i, blkid = 0;
+
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ for (i = 0; i < nr_blks; i++) {
+ if (blks[i] == NVM_BLK_T_HOST)
+ return -EEXIST;
+
+ if (blks[i] != NVM_BLK_T_FREE)
+ continue;
+
+ sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
+ sppa->g.ch = ppa.g.ch;
+ sppa->g.lun = ppa.g.lun;
+ sppa->g.blk = i;
+ s->nr_ppas++;
+ blkid++;
+
+ pr_debug("nvm: use (%u %u %u) as sysblk\n",
+ sppa->g.ch, sppa->g.lun, sppa->g.blk);
+ if (blkid > MAX_BLKS_PR_SYSBLK - 1)
+ return 0;
+ }
+
+ pr_err("nvm: sysblk failed get sysblk\n");
+ return -EINVAL;
+}
+
+static int sysblk_get_host_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks,
+ struct sysblk_scan *s)
{
- struct sysblk_scan *s = private;
int i, nr_sysblk = 0;
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
for (i = 0; i < nr_blks; i++) {
if (blks[i] != NVM_BLK_T_HOST)
continue;
@@ -119,26 +158,42 @@ static int sysblk_get_host_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
}
static int nvm_get_all_sysblks(struct nvm_dev *dev, struct sysblk_scan *s,
- struct ppa_addr *ppas, nvm_bb_update_fn *fn)
+ struct ppa_addr *ppas, int get_free)
{
- struct ppa_addr dppa;
- int i, ret;
+ int i, nr_blks, ret = 0;
+ u8 *blks;
s->nr_ppas = 0;
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
for (i = 0; i < s->nr_rows; i++) {
- dppa = generic_to_dev_addr(dev, ppas[i]);
s->row = i;
- ret = dev->ops->get_bb_tbl(dev, dppa, dev->blks_per_lun, fn, s);
+ ret = nvm_get_bb_tbl(dev, ppas[i], blks);
if (ret) {
pr_err("nvm: failed bb tbl for ppa (%u %u)\n",
ppas[i].g.ch,
ppas[i].g.blk);
- return ret;
+ goto err_get;
}
+
+ if (get_free)
+ ret = sysblk_get_free_blks(dev, ppas[i], blks, nr_blks,
+ s);
+ else
+ ret = sysblk_get_host_blks(dev, ppas[i], blks, nr_blks,
+ s);
+
+ if (ret)
+ goto err_get;
}
+err_get:
+ kfree(blks);
return ret;
}
@@ -154,13 +209,12 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
struct nvm_system_block *sblk)
{
struct nvm_system_block *cur;
- int pg, cursz, ret, found = 0;
+ int pg, ret, found = 0;
/* the full buffer for a flash page is allocated. Only the first of it
* contains the system block information
*/
- cursz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
- cur = kmalloc(cursz, GFP_KERNEL);
+ cur = kmalloc(dev->pfpg_size, GFP_KERNEL);
if (!cur)
return -ENOMEM;
@@ -169,7 +223,7 @@ static int nvm_scan_block(struct nvm_dev *dev, struct ppa_addr *ppa,
ppa->g.pg = ppa_to_slc(dev, pg);
ret = nvm_submit_ppa(dev, ppa, 1, NVM_OP_PREAD, NVM_IO_SLC_MODE,
- cur, cursz);
+ cur, dev->pfpg_size);
if (ret) {
if (ret == NVM_RSP_ERR_EMPTYPAGE) {
pr_debug("nvm: sysblk scan empty ppa (%u %u %u %u)\n",
@@ -223,10 +277,10 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
memset(&rqd, 0, sizeof(struct nvm_rq));
- nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas);
+ nvm_set_rqd_ppalist(dev, &rqd, s->ppas, s->nr_ppas, 1);
nvm_generic_to_addr_mode(dev, &rqd);
- ret = dev->ops->set_bb_tbl(dev, &rqd, type);
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
nvm_free_rqd_ppalist(dev, &rqd);
if (ret) {
pr_err("nvm: sysblk failed bb mark\n");
@@ -236,50 +290,17 @@ static int nvm_set_bb_tbl(struct nvm_dev *dev, struct sysblk_scan *s, int type)
return 0;
}
-static int sysblk_get_free_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
-{
- struct sysblk_scan *s = private;
- struct ppa_addr *sppa;
- int i, blkid = 0;
-
- for (i = 0; i < nr_blks; i++) {
- if (blks[i] == NVM_BLK_T_HOST)
- return -EEXIST;
-
- if (blks[i] != NVM_BLK_T_FREE)
- continue;
-
- sppa = &s->ppas[scan_ppa_idx(s->row, blkid)];
- sppa->g.ch = ppa.g.ch;
- sppa->g.lun = ppa.g.lun;
- sppa->g.blk = i;
- s->nr_ppas++;
- blkid++;
-
- pr_debug("nvm: use (%u %u %u) as sysblk\n",
- sppa->g.ch, sppa->g.lun, sppa->g.blk);
- if (blkid > MAX_BLKS_PR_SYSBLK - 1)
- return 0;
- }
-
- pr_err("nvm: sysblk failed get sysblk\n");
- return -EINVAL;
-}
-
static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
struct sysblk_scan *s)
{
struct nvm_system_block nvmsb;
void *buf;
- int i, sect, ret, bufsz;
+ int i, sect, ret = 0;
struct ppa_addr *ppas;
nvm_cpu_to_sysblk(&nvmsb, info);
- /* buffer for flash page */
- bufsz = dev->sec_size * dev->sec_per_pg * dev->nr_planes;
- buf = kzalloc(bufsz, GFP_KERNEL);
+ buf = kzalloc(dev->pfpg_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, &nvmsb, sizeof(struct nvm_system_block));
@@ -309,7 +330,7 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
}
ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PWRITE,
- NVM_IO_SLC_MODE, buf, bufsz);
+ NVM_IO_SLC_MODE, buf, dev->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed program (%u %u %u)\n",
ppas[0].g.ch,
@@ -319,7 +340,7 @@ static int nvm_write_and_verify(struct nvm_dev *dev, struct nvm_sb_info *info,
}
ret = nvm_submit_ppa(dev, ppas, dev->sec_per_pg, NVM_OP_PREAD,
- NVM_IO_SLC_MODE, buf, bufsz);
+ NVM_IO_SLC_MODE, buf, dev->pfpg_size);
if (ret) {
pr_err("nvm: sysblk failed read (%u %u %u)\n",
ppas[0].g.ch,
@@ -388,7 +409,7 @@ int nvm_get_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (ret)
goto err_sysblk;
@@ -448,7 +469,7 @@ int nvm_update_sysblock(struct nvm_dev *dev, struct nvm_sb_info *new)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (ret)
goto err_sysblk;
@@ -546,7 +567,7 @@ int nvm_init_sysblock(struct nvm_dev *dev, struct nvm_sb_info *info)
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, sysblk_get_free_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 1);
if (ret)
goto err_mark;
@@ -561,52 +582,49 @@ err_mark:
return ret;
}
-struct factory_blks {
- struct nvm_dev *dev;
- int flags;
- unsigned long *blks;
-};
-
static int factory_nblks(int nblks)
{
/* Round up to nearest BITS_PER_LONG */
return (nblks + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
}
-static unsigned int factory_blk_offset(struct nvm_dev *dev, int ch, int lun)
+static unsigned int factory_blk_offset(struct nvm_dev *dev, struct ppa_addr ppa)
{
int nblks = factory_nblks(dev->blks_per_lun);
- return ((ch * dev->luns_per_chnl * nblks) + (lun * nblks)) /
+ return ((ppa.g.ch * dev->luns_per_chnl * nblks) + (ppa.g.lun * nblks)) /
BITS_PER_LONG;
}
-static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
- void *private)
+static int nvm_factory_blks(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks,
+ unsigned long *blk_bitmap, int flags)
{
- struct factory_blks *f = private;
- struct nvm_dev *dev = f->dev;
int i, lunoff;
- lunoff = factory_blk_offset(dev, ppa.g.ch, ppa.g.lun);
+ nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
+ if (nr_blks < 0)
+ return nr_blks;
+
+ lunoff = factory_blk_offset(dev, ppa);
/* non-set bits correspond to the block must be erased */
for (i = 0; i < nr_blks; i++) {
switch (blks[i]) {
case NVM_BLK_T_FREE:
- if (f->flags & NVM_FACTORY_ERASE_ONLY_USER)
- set_bit(i, &f->blks[lunoff]);
+ if (flags & NVM_FACTORY_ERASE_ONLY_USER)
+ set_bit(i, &blk_bitmap[lunoff]);
break;
case NVM_BLK_T_HOST:
- if (!(f->flags & NVM_FACTORY_RESET_HOST_BLKS))
- set_bit(i, &f->blks[lunoff]);
+ if (!(flags & NVM_FACTORY_RESET_HOST_BLKS))
+ set_bit(i, &blk_bitmap[lunoff]);
break;
case NVM_BLK_T_GRWN_BAD:
- if (!(f->flags & NVM_FACTORY_RESET_GRWN_BBLKS))
- set_bit(i, &f->blks[lunoff]);
+ if (!(flags & NVM_FACTORY_RESET_GRWN_BBLKS))
+ set_bit(i, &blk_bitmap[lunoff]);
break;
default:
- set_bit(i, &f->blks[lunoff]);
+ set_bit(i, &blk_bitmap[lunoff]);
break;
}
}
@@ -615,7 +633,7 @@ static int nvm_factory_blks(struct ppa_addr ppa, int nr_blks, u8 *blks,
}
static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
- int max_ppas, struct factory_blks *f)
+ int max_ppas, unsigned long *blk_bitmap)
{
struct ppa_addr ppa;
int ch, lun, blkid, idx, done = 0, ppa_cnt = 0;
@@ -623,111 +641,95 @@ static int nvm_fact_get_blks(struct nvm_dev *dev, struct ppa_addr *erase_list,
while (!done) {
done = 1;
- for (ch = 0; ch < dev->nr_chnls; ch++) {
- for (lun = 0; lun < dev->luns_per_chnl; lun++) {
- idx = factory_blk_offset(dev, ch, lun);
- offset = &f->blks[idx];
-
- blkid = find_first_zero_bit(offset,
- dev->blks_per_lun);
- if (blkid >= dev->blks_per_lun)
- continue;
- set_bit(blkid, offset);
-
- ppa.ppa = 0;
- ppa.g.ch = ch;
- ppa.g.lun = lun;
- ppa.g.blk = blkid;
- pr_debug("nvm: erase ppa (%u %u %u)\n",
- ppa.g.ch,
- ppa.g.lun,
- ppa.g.blk);
-
- erase_list[ppa_cnt] = ppa;
- ppa_cnt++;
- done = 0;
-
- if (ppa_cnt == max_ppas)
- return ppa_cnt;
- }
+ nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ idx = factory_blk_offset(dev, ppa);
+ offset = &blk_bitmap[idx];
+
+ blkid = find_first_zero_bit(offset,
+ dev->blks_per_lun);
+ if (blkid >= dev->blks_per_lun)
+ continue;
+ set_bit(blkid, offset);
+
+ ppa.g.blk = blkid;
+ pr_debug("nvm: erase ppa (%u %u %u)\n",
+ ppa.g.ch,
+ ppa.g.lun,
+ ppa.g.blk);
+
+ erase_list[ppa_cnt] = ppa;
+ ppa_cnt++;
+ done = 0;
+
+ if (ppa_cnt == max_ppas)
+ return ppa_cnt;
}
}
return ppa_cnt;
}
-static int nvm_fact_get_bb_tbl(struct nvm_dev *dev, struct ppa_addr ppa,
- nvm_bb_update_fn *fn, void *priv)
+static int nvm_fact_select_blks(struct nvm_dev *dev, unsigned long *blk_bitmap,
+ int flags)
{
- struct ppa_addr dev_ppa;
- int ret;
+ struct ppa_addr ppa;
+ int ch, lun, nr_blks, ret = 0;
+ u8 *blks;
- dev_ppa = generic_to_dev_addr(dev, ppa);
+ nr_blks = dev->blks_per_lun * dev->plane_mode;
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
- ret = dev->ops->get_bb_tbl(dev, dev_ppa, dev->blks_per_lun, fn, priv);
- if (ret)
- pr_err("nvm: failed bb tbl for ch%u lun%u\n",
+ nvm_for_each_lun_ppa(dev, ppa, ch, lun) {
+ ret = nvm_get_bb_tbl(dev, ppa, blks);
+ if (ret)
+ pr_err("nvm: failed bb tbl for ch%u lun%u\n",
ppa.g.ch, ppa.g.blk);
- return ret;
-}
-static int nvm_fact_select_blks(struct nvm_dev *dev, struct factory_blks *f)
-{
- int ch, lun, ret;
- struct ppa_addr ppa;
-
- ppa.ppa = 0;
- for (ch = 0; ch < dev->nr_chnls; ch++) {
- for (lun = 0; lun < dev->luns_per_chnl; lun++) {
- ppa.g.ch = ch;
- ppa.g.lun = lun;
-
- ret = nvm_fact_get_bb_tbl(dev, ppa, nvm_factory_blks,
- f);
- if (ret)
- return ret;
- }
+ ret = nvm_factory_blks(dev, ppa, blks, nr_blks, blk_bitmap,
+ flags);
+ if (ret)
+ break;
}
- return 0;
+ kfree(blks);
+ return ret;
}
int nvm_dev_factory(struct nvm_dev *dev, int flags)
{
- struct factory_blks f;
struct ppa_addr *ppas;
int ppa_cnt, ret = -ENOMEM;
int max_ppas = dev->ops->max_phys_sect / dev->nr_planes;
struct ppa_addr sysblk_ppas[MAX_SYSBLKS];
struct sysblk_scan s;
+ unsigned long *blk_bitmap;
- f.blks = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
+ blk_bitmap = kzalloc(factory_nblks(dev->blks_per_lun) * dev->nr_luns,
GFP_KERNEL);
- if (!f.blks)
+ if (!blk_bitmap)
return ret;
ppas = kcalloc(max_ppas, sizeof(struct ppa_addr), GFP_KERNEL);
if (!ppas)
goto err_blks;
- f.dev = dev;
- f.flags = flags;
-
/* create list of blks to be erased */
- ret = nvm_fact_select_blks(dev, &f);
+ ret = nvm_fact_select_blks(dev, blk_bitmap, flags);
if (ret)
goto err_ppas;
/* continue to erase until list of blks until empty */
- while ((ppa_cnt = nvm_fact_get_blks(dev, ppas, max_ppas, &f)) > 0)
+ while ((ppa_cnt =
+ nvm_fact_get_blks(dev, ppas, max_ppas, blk_bitmap)) > 0)
nvm_erase_ppa(dev, ppas, ppa_cnt);
/* mark host reserved blocks free */
if (flags & NVM_FACTORY_RESET_HOST_BLKS) {
nvm_setup_sysblk_scan(dev, &s, sysblk_ppas);
mutex_lock(&dev->mlock);
- ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas,
- sysblk_get_host_blks);
+ ret = nvm_get_all_sysblks(dev, &s, sysblk_ppas, 0);
if (!ret)
ret = nvm_set_bb_tbl(dev, &s, NVM_BLK_T_FREE);
mutex_unlock(&dev->mlock);
@@ -735,7 +737,7 @@ int nvm_dev_factory(struct nvm_dev *dev, int flags)
err_ppas:
kfree(ppas);
err_blks:
- kfree(f.blks);
+ kfree(blk_bitmap);
return ret;
}
EXPORT_SYMBOL(nvm_dev_factory);
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index caaec654d..465c52219 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -154,8 +154,8 @@ static void rackmeter_do_pause(struct rackmeter *rm, int pause)
DBDMA_DO_STOP(rm->dma_regs);
return;
}
- memset(rdma->buf1, 0, SAMPLE_COUNT & sizeof(u32));
- memset(rdma->buf2, 0, SAMPLE_COUNT & sizeof(u32));
+ memset(rdma->buf1, 0, ARRAY_SIZE(rdma->buf1));
+ memset(rdma->buf2, 0, ARRAY_SIZE(rdma->buf2));
rm->dma_buf_v->mark = 0;
@@ -227,6 +227,7 @@ static void rackmeter_do_timer(struct work_struct *work)
total_idle_ticks = get_cpu_idle_time(cpu);
idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
+ idle_ticks = min(idle_ticks, total_ticks);
rcpu->prev_idle = total_idle_ticks;
/* We do a very dumb calculation to update the LEDs for now,
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 01ee736fe..f8b6d1403 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1851,7 +1851,7 @@ static int powerbook_sleep_grackle(void)
_set_L2CR(save_l2cr);
/* Restore userland MMU context */
- switch_mmu_context(NULL, current->active_mm);
+ switch_mmu_context(NULL, current->active_mm, NULL);
/* Power things up */
pmu_unlock();
@@ -1940,7 +1940,7 @@ powerbook_sleep_Core99(void)
_set_L3CR(save_l3cr);
/* Restore userland MMU context */
- switch_mmu_context(NULL, current->active_mm);
+ switch_mmu_context(NULL, current->active_mm, NULL);
/* Tell PMU we are ready */
pmu_unlock();
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index 2394cfe89..a334db5c9 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -430,8 +430,8 @@ static int sti_mbox_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdev->base = devm_ioremap_resource(&pdev->dev, res);
- if (!mdev->base)
- return -ENOMEM;
+ if (IS_ERR(mdev->base))
+ return PTR_ERR(mdev->base);
ret = of_property_read_string(np, "mbox-name", &mdev->name);
if (ret)
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index b7f636f15..c5e8b9cb1 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -2,7 +2,7 @@
* OMAP mailbox driver
*
* Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
- * Copyright (C) 2013-2014 Texas Instruments Inc.
+ * Copyright (C) 2013-2016 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
* Suman Anna <s-anna@ti.com>
@@ -15,12 +15,6 @@
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
*/
#include <linux/interrupt.h>
@@ -33,7 +27,6 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/platform_data/mailbox-omap.h>
#include <linux/omap-mailbox.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
@@ -62,12 +55,9 @@
#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
-#define MBOX_REG_SIZE 0x120
-
-#define OMAP4_MBOX_REG_SIZE 0x130
-
-#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
-#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
+/* Interrupt register configuration types */
+#define MBOX_INTR_CFG_TYPE1 0
+#define MBOX_INTR_CFG_TYPE2 1
struct omap_mbox_fifo {
unsigned long msg;
@@ -91,8 +81,10 @@ struct omap_mbox_device {
struct device *dev;
struct mutex cfg_lock;
void __iomem *mbox_base;
+ u32 *irq_ctx;
u32 num_users;
u32 num_fifos;
+ u32 intr_type;
struct omap_mbox **mboxes;
struct mbox_controller controller;
struct list_head elem;
@@ -119,7 +111,6 @@ struct omap_mbox {
struct omap_mbox_device *parent;
struct omap_mbox_fifo tx_fifo;
struct omap_mbox_fifo rx_fifo;
- u32 ctx[OMAP4_MBOX_NR_REGS];
u32 intr_type;
struct mbox_chan *chan;
bool send_no_irq;
@@ -157,24 +148,28 @@ void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
static mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
- return (mbox_msg_t) mbox_read_reg(mbox->parent, fifo->msg);
+
+ return (mbox_msg_t)mbox_read_reg(mbox->parent, fifo->msg);
}
static void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
{
struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+
mbox_write_reg(mbox->parent, msg, fifo->msg);
}
static int mbox_fifo_empty(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+
return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
}
static int mbox_fifo_full(struct omap_mbox *mbox)
{
struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+
return mbox_read_reg(mbox->parent, fifo->fifo_stat);
}
@@ -206,49 +201,6 @@ static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
return (int)(enable & status & bit);
}
-void omap_mbox_save_ctx(struct mbox_chan *chan)
-{
- int i;
- int nr_regs;
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
-
- if (WARN_ON(!mbox))
- return;
-
- if (mbox->intr_type)
- nr_regs = OMAP4_MBOX_NR_REGS;
- else
- nr_regs = MBOX_NR_REGS;
- for (i = 0; i < nr_regs; i++) {
- mbox->ctx[i] = mbox_read_reg(mbox->parent, i * sizeof(u32));
-
- dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
- i, mbox->ctx[i]);
- }
-}
-EXPORT_SYMBOL(omap_mbox_save_ctx);
-
-void omap_mbox_restore_ctx(struct mbox_chan *chan)
-{
- int i;
- int nr_regs;
- struct omap_mbox *mbox = mbox_chan_to_omap_mbox(chan);
-
- if (WARN_ON(!mbox))
- return;
-
- if (mbox->intr_type)
- nr_regs = OMAP4_MBOX_NR_REGS;
- else
- nr_regs = MBOX_NR_REGS;
- for (i = 0; i < nr_regs; i++) {
- mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32));
- dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
- i, mbox->ctx[i]);
- }
-}
-EXPORT_SYMBOL(omap_mbox_restore_ctx);
-
static void _omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
u32 l;
@@ -381,7 +333,7 @@ static struct omap_mbox_queue *mbox_queue_alloc(struct omap_mbox *mbox,
if (!work)
return NULL;
- mq = kzalloc(sizeof(struct omap_mbox_queue), GFP_KERNEL);
+ mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
return NULL;
@@ -525,6 +477,7 @@ static int omap_mbox_register(struct omap_mbox_device *mdev)
mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++) {
struct omap_mbox *mbox = mboxes[i];
+
mbox->dev = device_create(&omap_mbox_class, mdev->dev,
0, mbox, "%s", mbox->name);
if (IS_ERR(mbox->dev)) {
@@ -647,6 +600,52 @@ static const struct mbox_chan_ops omap_mbox_chan_ops = {
.shutdown = omap_mbox_chan_shutdown,
};
+#ifdef CONFIG_PM_SLEEP
+static int omap_mbox_suspend(struct device *dev)
+{
+ struct omap_mbox_device *mdev = dev_get_drvdata(dev);
+ u32 usr, fifo, reg;
+
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
+ if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
+ dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
+ fifo);
+ return -EBUSY;
+ }
+ }
+
+ for (usr = 0; usr < mdev->num_users; usr++) {
+ reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
+ mdev->irq_ctx[usr] = mbox_read_reg(mdev, reg);
+ }
+
+ return 0;
+}
+
+static int omap_mbox_resume(struct device *dev)
+{
+ struct omap_mbox_device *mdev = dev_get_drvdata(dev);
+ u32 usr, reg;
+
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ for (usr = 0; usr < mdev->num_users; usr++) {
+ reg = MAILBOX_IRQENABLE(mdev->intr_type, usr);
+ mbox_write_reg(mdev, mdev->irq_ctx[usr], reg);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap_mbox_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
+};
+
static const struct of_device_id omap_mailbox_of_match[] = {
{
.compatible = "ti,omap2-mailbox",
@@ -696,8 +695,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
int ret;
struct mbox_chan *chnls;
struct omap_mbox **list, *mbox, *mboxblk;
- struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
- struct omap_mbox_dev_info *info = NULL;
struct omap_mbox_fifo_info *finfo, *finfoblk;
struct omap_mbox_device *mdev;
struct omap_mbox_fifo *fifo;
@@ -710,36 +707,26 @@ static int omap_mbox_probe(struct platform_device *pdev)
u32 l;
int i;
- if (!node && (!pdata || !pdata->info_cnt || !pdata->info)) {
- pr_err("%s: platform not supported\n", __func__);
+ if (!node) {
+ pr_err("%s: only DT-based devices are supported\n", __func__);
return -ENODEV;
}
- if (node) {
- match = of_match_device(omap_mailbox_of_match, &pdev->dev);
- if (!match)
- return -ENODEV;
- intr_type = (u32)match->data;
+ match = of_match_device(omap_mailbox_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+ intr_type = (u32)match->data;
- if (of_property_read_u32(node, "ti,mbox-num-users",
- &num_users))
- return -ENODEV;
+ if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
+ return -ENODEV;
- if (of_property_read_u32(node, "ti,mbox-num-fifos",
- &num_fifos))
- return -ENODEV;
+ if (of_property_read_u32(node, "ti,mbox-num-fifos", &num_fifos))
+ return -ENODEV;
- info_count = of_get_available_child_count(node);
- if (!info_count) {
- dev_err(&pdev->dev, "no available mbox devices found\n");
- return -ENODEV;
- }
- } else { /* non-DT device creation */
- info_count = pdata->info_cnt;
- info = pdata->info;
- intr_type = pdata->intr_type;
- num_users = pdata->num_users;
- num_fifos = pdata->num_fifos;
+ info_count = of_get_available_child_count(node);
+ if (!info_count) {
+ dev_err(&pdev->dev, "no available mbox devices found\n");
+ return -ENODEV;
}
finfoblk = devm_kzalloc(&pdev->dev, info_count * sizeof(*finfoblk),
@@ -750,38 +737,28 @@ static int omap_mbox_probe(struct platform_device *pdev)
finfo = finfoblk;
child = NULL;
for (i = 0; i < info_count; i++, finfo++) {
- if (node) {
- child = of_get_next_available_child(node, child);
- ret = of_property_read_u32_array(child, "ti,mbox-tx",
- tmp, ARRAY_SIZE(tmp));
- if (ret)
- return ret;
- finfo->tx_id = tmp[0];
- finfo->tx_irq = tmp[1];
- finfo->tx_usr = tmp[2];
-
- ret = of_property_read_u32_array(child, "ti,mbox-rx",
- tmp, ARRAY_SIZE(tmp));
- if (ret)
- return ret;
- finfo->rx_id = tmp[0];
- finfo->rx_irq = tmp[1];
- finfo->rx_usr = tmp[2];
-
- finfo->name = child->name;
-
- if (of_find_property(child, "ti,mbox-send-noirq", NULL))
- finfo->send_no_irq = true;
- } else {
- finfo->tx_id = info->tx_id;
- finfo->rx_id = info->rx_id;
- finfo->tx_usr = info->usr_id;
- finfo->tx_irq = info->irq_id;
- finfo->rx_usr = info->usr_id;
- finfo->rx_irq = info->irq_id;
- finfo->name = info->name;
- info++;
- }
+ child = of_get_next_available_child(node, child);
+ ret = of_property_read_u32_array(child, "ti,mbox-tx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->tx_id = tmp[0];
+ finfo->tx_irq = tmp[1];
+ finfo->tx_usr = tmp[2];
+
+ ret = of_property_read_u32_array(child, "ti,mbox-rx", tmp,
+ ARRAY_SIZE(tmp));
+ if (ret)
+ return ret;
+ finfo->rx_id = tmp[0];
+ finfo->rx_irq = tmp[1];
+ finfo->rx_usr = tmp[2];
+
+ finfo->name = child->name;
+
+ if (of_find_property(child, "ti,mbox-send-noirq", NULL))
+ finfo->send_no_irq = true;
+
if (finfo->tx_id >= num_fifos || finfo->rx_id >= num_fifos ||
finfo->tx_usr >= num_users || finfo->rx_usr >= num_users)
return -EINVAL;
@@ -796,6 +773,11 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
+ mdev->irq_ctx = devm_kzalloc(&pdev->dev, num_users * sizeof(u32),
+ GFP_KERNEL);
+ if (!mdev->irq_ctx)
+ return -ENOMEM;
+
/* allocate one extra for marking end of list */
list = devm_kzalloc(&pdev->dev, (info_count + 1) * sizeof(*list),
GFP_KERNEL);
@@ -848,6 +830,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
mdev->dev = &pdev->dev;
mdev->num_users = num_users;
mdev->num_fifos = num_fifos;
+ mdev->intr_type = intr_type;
mdev->mboxes = list;
/* OMAP does not have a Tx-Done IRQ, but rather a Tx-Ready IRQ */
@@ -905,6 +888,7 @@ static struct platform_driver omap_mbox_driver = {
.remove = omap_mbox_remove,
.driver = {
.name = "omap-mailbox",
+ .pm = &omap_mbox_pm_ops,
.of_match_table = of_match_ptr(omap_mailbox_of_match),
},
};
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index a4be45107..6f2c8522e 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev)
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
const struct mcb_device_id *found_id;
+ struct module *carrier_mod;
+ int ret;
found_id = mcb_match_id(mdrv->id_table, mdev);
if (!found_id)
return -ENODEV;
- return mdrv->probe(mdev, found_id);
+ carrier_mod = mdev->dev.parent->driver->owner;
+ if (!try_module_get(carrier_mod))
+ return -EINVAL;
+
+ get_device(dev);
+ ret = mdrv->probe(mdev, found_id);
+ if (ret)
+ module_put(carrier_mod);
+
+ return ret;
}
static int mcb_remove(struct device *dev)
{
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
+ struct module *carrier_mod;
mdrv->remove(mdev);
+ carrier_mod = mdev->dev.parent->driver->owner;
+ module_put(carrier_mod);
+
put_device(&mdev->dev);
return 0;
@@ -83,13 +98,67 @@ static int mcb_remove(struct device *dev)
static void mcb_shutdown(struct device *dev)
{
+ struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
- struct mcb_driver *mdrv = mdev->driver;
if (mdrv && mdrv->shutdown)
mdrv->shutdown(mdev);
}
+static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mcb_bus *bus = to_mcb_bus(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bus->revision);
+}
+static DEVICE_ATTR_RO(revision);
+
+static ssize_t model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mcb_bus *bus = to_mcb_bus(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%c\n", bus->model);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t minor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mcb_bus *bus = to_mcb_bus(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bus->minor);
+}
+static DEVICE_ATTR_RO(minor);
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mcb_bus *bus = to_mcb_bus(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", bus->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *mcb_bus_attrs[] = {
+ &dev_attr_revision.attr,
+ &dev_attr_model.attr,
+ &dev_attr_minor.attr,
+ &dev_attr_name.attr,
+ NULL,
+};
+
+static const struct attribute_group mcb_carrier_group = {
+ .attrs = mcb_bus_attrs,
+};
+
+static const struct attribute_group *mcb_carrier_groups[] = {
+ &mcb_carrier_group,
+ NULL,
+};
+
+
static struct bus_type mcb_bus_type = {
.name = "mcb",
.match = mcb_match,
@@ -99,6 +168,11 @@ static struct bus_type mcb_bus_type = {
.shutdown = mcb_shutdown,
};
+static struct device_type mcb_carrier_device_type = {
+ .name = "mcb-carrier",
+ .groups = mcb_carrier_groups,
+};
+
/**
* __mcb_register_driver() - Register a @mcb_driver at the system
* @drv: The @mcb_driver
@@ -155,6 +229,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
int device_id;
device_initialize(&dev->dev);
+ mcb_bus_get(bus);
dev->dev.bus = &mcb_bus_type;
dev->dev.parent = bus->dev.parent;
dev->dev.release = mcb_release_dev;
@@ -178,6 +253,15 @@ out:
}
EXPORT_SYMBOL_GPL(mcb_device_register);
+static void mcb_free_bus(struct device *dev)
+{
+ struct mcb_bus *bus = to_mcb_bus(dev);
+
+ put_device(bus->carrier);
+ ida_simple_remove(&mcb_ida, bus->bus_nr);
+ kfree(bus);
+}
+
/**
* mcb_alloc_bus() - Allocate a new @mcb_bus
*
@@ -187,6 +271,7 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
{
struct mcb_bus *bus;
int bus_nr;
+ int rc;
bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL);
if (!bus)
@@ -194,14 +279,29 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
if (bus_nr < 0) {
- kfree(bus);
- return ERR_PTR(bus_nr);
+ rc = bus_nr;
+ goto err_free;
}
- INIT_LIST_HEAD(&bus->children);
bus->bus_nr = bus_nr;
- bus->carrier = carrier;
+ bus->carrier = get_device(carrier);
+
+ device_initialize(&bus->dev);
+ bus->dev.parent = carrier;
+ bus->dev.bus = &mcb_bus_type;
+ bus->dev.type = &mcb_carrier_device_type;
+ bus->dev.release = &mcb_free_bus;
+
+ dev_set_name(&bus->dev, "mcb:%d", bus_nr);
+ rc = device_add(&bus->dev);
+ if (rc)
+ goto err_free;
+
return bus;
+err_free:
+ put_device(carrier);
+ kfree(bus);
+ return ERR_PTR(rc);
}
EXPORT_SYMBOL_GPL(mcb_alloc_bus);
@@ -224,10 +324,6 @@ static void mcb_devices_unregister(struct mcb_bus *bus)
void mcb_release_bus(struct mcb_bus *bus)
{
mcb_devices_unregister(bus);
-
- ida_simple_remove(&mcb_ida, bus->bus_nr);
-
- kfree(bus);
}
EXPORT_SYMBOL_GPL(mcb_release_bus);
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
index fb7493dcf..5254e0285 100644
--- a/drivers/mcb/mcb-internal.h
+++ b/drivers/mcb/mcb-internal.h
@@ -5,7 +5,6 @@
#define PCI_VENDOR_ID_MEN 0x1a88
#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45
-#define CHAMELEON_FILENAME_LEN 12
#define CHAMELEONV2_MAGIC 0xabce
#define CHAM_HEADER_SIZE 0x200
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index b0155b05c..dbecbed0d 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -113,16 +113,11 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
}
p += hsize;
- pr_debug("header->revision = %d\n", header->revision);
- pr_debug("header->model = 0x%x ('%c')\n", header->model,
- header->model);
- pr_debug("header->minor = %d\n", header->minor);
- pr_debug("header->bus_type = 0x%x\n", header->bus_type);
-
-
- pr_debug("header->magic = 0x%x\n", header->magic);
- pr_debug("header->filename = \"%.*s\"\n", CHAMELEON_FILENAME_LEN,
- header->filename);
+ bus->revision = header->revision;
+ bus->model = header->model;
+ bus->minor = header->minor;
+ snprintf(bus->name, CHAMELEON_FILENAME_LEN + 1, "%s",
+ header->filename);
for_each_chameleon_cell(dtype, p) {
switch (dtype) {
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index 67d5e7d08..b15a0349c 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -35,7 +35,6 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct resource *res;
struct priv *priv;
int ret;
- int num_cells;
unsigned long flags;
priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
@@ -55,19 +54,20 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_disable;
}
- res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
- KBUILD_MODNAME);
+ res = devm_request_mem_region(&pdev->dev, priv->mapbase,
+ CHAM_HEADER_SIZE,
+ KBUILD_MODNAME);
if (!res) {
dev_err(&pdev->dev, "Failed to request PCI memory\n");
ret = -EBUSY;
goto out_disable;
}
- priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE);
+ priv->base = devm_ioremap(&pdev->dev, priv->mapbase, CHAM_HEADER_SIZE);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
- goto out_release;
+ goto out_disable;
}
flags = pci_resource_flags(pdev, 0);
@@ -75,7 +75,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = -ENOTSUPP;
dev_err(&pdev->dev,
"IO mapped PCI devices are not supported\n");
- goto out_iounmap;
+ goto out_disable;
}
pci_set_drvdata(pdev, priv);
@@ -83,7 +83,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
priv->bus = mcb_alloc_bus(&pdev->dev);
if (IS_ERR(priv->bus)) {
ret = PTR_ERR(priv->bus);
- goto out_iounmap;
+ goto out_disable;
}
priv->bus->get_irq = mcb_pci_get_irq;
@@ -91,9 +91,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
if (ret < 0)
goto out_mcb_bus;
- num_cells = ret;
- dev_dbg(&pdev->dev, "Found %d cells\n", num_cells);
+ dev_dbg(&pdev->dev, "Found %d cells\n", ret);
mcb_bus_add_devices(priv->bus);
@@ -101,10 +100,6 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_mcb_bus:
mcb_release_bus(priv->bus);
-out_iounmap:
- iounmap(priv->base);
-out_release:
- pci_release_region(pdev, 0);
out_disable:
pci_disable_device(pdev);
return ret;
@@ -116,8 +111,6 @@ static void mcb_pci_remove(struct pci_dev *pdev)
mcb_release_bus(priv->bus);
- iounmap(priv->base);
- release_region(priv->mapbase, CHAM_HEADER_SIZE);
pci_disable_device(pdev);
}
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 8eeab72b9..ca4abe1cc 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -64,7 +64,6 @@
#include "btree.h"
#include <linux/blkdev.h>
-#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <trace/events/bcache.h>
@@ -288,7 +287,6 @@ do { \
if (kthread_should_stop()) \
return 0; \
\
- try_to_freeze(); \
schedule(); \
mutex_lock(&(ca)->set->bucket_lock); \
} \
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 22b9e34ce..eab505ee0 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/bitops.h>
-#include <linux/freezer.h>
#include <linux/hash.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
@@ -1787,7 +1786,6 @@ again:
mutex_unlock(&c->bucket_lock);
- try_to_freeze();
schedule();
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index a296425a7..f5dbb4e88 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -816,7 +816,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
- blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
+ blk_queue_write_cache(q, true, true);
return 0;
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b9346cd9c..60123677b 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -12,7 +12,6 @@
#include "writeback.h"
#include <linux/delay.h>
-#include <linux/freezer.h>
#include <linux/kthread.h>
#include <trace/events/bcache.h>
@@ -228,7 +227,6 @@ static void read_dirty(struct cached_dev *dc)
*/
while (!kthread_should_stop()) {
- try_to_freeze();
w = bch_keybuf_next(&dc->writeback_keys);
if (!w)
@@ -433,7 +431,6 @@ static int bch_writeback_thread(void *arg)
if (kthread_should_stop())
return 0;
- try_to_freeze();
schedule();
continue;
}
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3fe86b54d..d8129ec93 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -46,7 +46,7 @@ static inline char *bmname(struct bitmap *bitmap)
* allocated while we're using it
*/
static int bitmap_checkpage(struct bitmap_counts *bitmap,
- unsigned long page, int create)
+ unsigned long page, int create, int no_hijack)
__releases(bitmap->lock)
__acquires(bitmap->lock)
{
@@ -90,6 +90,9 @@ __acquires(bitmap->lock)
if (mappage == NULL) {
pr_debug("md/bitmap: map page allocation failed, hijacking\n");
+ /* We don't support hijack for cluster raid */
+ if (no_hijack)
+ return -ENOMEM;
/* failed - set the hijacked flag so that we can use the
* pointer as a counter */
if (!bitmap->bp[page].map)
@@ -756,7 +759,7 @@ static int bitmap_storage_alloc(struct bitmap_storage *store,
bytes += sizeof(bitmap_super_t);
num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE);
- offset = slot_number * (num_pages - 1);
+ offset = slot_number * num_pages;
store->filemap = kmalloc(sizeof(struct page *)
* num_pages, GFP_KERNEL);
@@ -900,6 +903,11 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
struct page *page;
void *kaddr;
unsigned long chunk = block >> bitmap->counts.chunkshift;
+ struct bitmap_storage *store = &bitmap->storage;
+ unsigned long node_offset = 0;
+
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * store->file_pages;
page = filemap_get_page(&bitmap->storage, chunk);
if (!page)
@@ -915,7 +923,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
kunmap_atomic(kaddr);
pr_debug("set file bit %lu page %lu\n", bit, page->index);
/* record page number so it gets flushed to disk when unplug occurs */
- set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY);
+ set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_DIRTY);
}
static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
@@ -924,6 +932,11 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
struct page *page;
void *paddr;
unsigned long chunk = block >> bitmap->counts.chunkshift;
+ struct bitmap_storage *store = &bitmap->storage;
+ unsigned long node_offset = 0;
+
+ if (mddev_is_clustered(bitmap->mddev))
+ node_offset = bitmap->cluster_slot * store->file_pages;
page = filemap_get_page(&bitmap->storage, chunk);
if (!page)
@@ -935,8 +948,8 @@ static void bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
else
clear_bit_le(bit, paddr);
kunmap_atomic(paddr);
- if (!test_page_attr(bitmap, page->index, BITMAP_PAGE_NEEDWRITE)) {
- set_page_attr(bitmap, page->index, BITMAP_PAGE_PENDING);
+ if (!test_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_NEEDWRITE)) {
+ set_page_attr(bitmap, page->index - node_offset, BITMAP_PAGE_PENDING);
bitmap->allclean = 0;
}
}
@@ -1321,7 +1334,7 @@ __acquires(bitmap->lock)
sector_t csize;
int err;
- err = bitmap_checkpage(bitmap, page, create);
+ err = bitmap_checkpage(bitmap, page, create, 0);
if (bitmap->bp[page].hijacked ||
bitmap->bp[page].map == NULL)
@@ -1594,6 +1607,27 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
}
EXPORT_SYMBOL(bitmap_cond_end_sync);
+void bitmap_sync_with_cluster(struct mddev *mddev,
+ sector_t old_lo, sector_t old_hi,
+ sector_t new_lo, sector_t new_hi)
+{
+ struct bitmap *bitmap = mddev->bitmap;
+ sector_t sector, blocks = 0;
+
+ for (sector = old_lo; sector < new_lo; ) {
+ bitmap_end_sync(bitmap, sector, &blocks, 0);
+ sector += blocks;
+ }
+ WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n");
+
+ for (sector = old_hi; sector < new_hi; ) {
+ bitmap_start_sync(bitmap, sector, &blocks, 0);
+ sector += blocks;
+ }
+ WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n");
+}
+EXPORT_SYMBOL(bitmap_sync_with_cluster);
+
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
{
/* For each chunk covered by any of these sectors, set the
@@ -1814,6 +1848,9 @@ int bitmap_load(struct mddev *mddev)
if (!bitmap)
goto out;
+ if (mddev_is_clustered(mddev))
+ md_cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes);
+
/* Clear out old bitmap info first: Either there is none, or we
* are resuming after someone else has possibly changed things,
* so we should forget old cached info.
@@ -1890,14 +1927,14 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
if (clear_bits) {
bitmap_update_sb(bitmap);
- /* Setting this for the ev_page should be enough.
- * And we do not require both write_all and PAGE_DIRT either
- */
+ /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
+ * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
for (i = 0; i < bitmap->storage.file_pages; i++)
- set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY);
- bitmap_write_all(bitmap);
+ if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING))
+ set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE);
bitmap_unplug(bitmap);
}
+ bitmap_unplug(mddev->bitmap);
*low = lo;
*high = hi;
err:
@@ -2032,6 +2069,35 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
chunks << chunkshift);
spin_lock_irq(&bitmap->counts.lock);
+ /* For cluster raid, need to pre-allocate bitmap */
+ if (mddev_is_clustered(bitmap->mddev)) {
+ unsigned long page;
+ for (page = 0; page < pages; page++) {
+ ret = bitmap_checkpage(&bitmap->counts, page, 1, 1);
+ if (ret) {
+ unsigned long k;
+
+ /* deallocate the page memory */
+ for (k = 0; k < page; k++) {
+ kfree(new_bp[k].map);
+ }
+
+ /* restore some fields from old_counts */
+ bitmap->counts.bp = old_counts.bp;
+ bitmap->counts.pages = old_counts.pages;
+ bitmap->counts.missing_pages = old_counts.pages;
+ bitmap->counts.chunkshift = old_counts.chunkshift;
+ bitmap->counts.chunks = old_counts.chunks;
+ bitmap->mddev->bitmap_info.chunksize = 1 << (old_counts.chunkshift +
+ BITMAP_BLOCK_SHIFT);
+ blocks = old_counts.chunks << old_counts.chunkshift;
+ pr_err("Could not pre-allocate in-memory bitmap for cluster raid\n");
+ break;
+ } else
+ bitmap->counts.bp[page].count += 1;
+ }
+ }
+
for (block = 0; block < blocks; ) {
bitmap_counter_t *bmc_old, *bmc_new;
int set;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 5e3fcd6ec..5b6dd63dd 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -258,6 +258,9 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks,
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, int aborted);
void bitmap_close_sync(struct bitmap *bitmap);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
+void bitmap_sync_with_cluster(struct mddev *mddev,
+ sector_t old_lo, sector_t old_hi,
+ sector_t new_lo, sector_t new_hi);
void bitmap_unplug(struct bitmap *bitmap);
void bitmap_daemon_work(struct mddev *mddev);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 2adf81d81..2c7ca258c 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1723,7 +1723,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
if (!dmi) {
unsigned noio_flag;
noio_flag = memalloc_noio_save();
- dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
+ dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_HIGH | __GFP_HIGHMEM, PAGE_KERNEL);
memalloc_noio_restore(noio_flag);
if (dmi)
*param_flags |= DM_PARAMS_VMALLOC;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 677ba223e..52baf8a5b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -76,26 +76,18 @@ struct multipath {
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
- unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
-
- unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */
- bool queue_io:1; /* Must we queue all I/O? */
- bool queue_if_no_path:1; /* Queue I/O if last path fails? */
- bool saved_queue_if_no_path:1; /* Saved state during suspension */
- bool retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
- bool pg_init_disabled:1; /* pg_init is not currently allowed */
- bool pg_init_required:1; /* pg_init needs calling? */
- bool pg_init_delay_retry:1; /* Delay pg_init retry? */
+ unsigned long flags; /* Multipath state flags */
unsigned pg_init_retries; /* Number of times to retry pg_init */
- unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
- struct work_struct trigger_event;
+ atomic_t nr_valid_paths; /* Total number of usable paths */
+ atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
+ atomic_t pg_init_count; /* Number of times pg_init called */
/*
* We must use a mempool of dm_mpath_io structs so that we
@@ -104,6 +96,7 @@ struct multipath {
mempool_t *mpio_pool;
struct mutex work_mutex;
+ struct work_struct trigger_event;
};
/*
@@ -122,6 +115,17 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work);
+/*-----------------------------------------------
+ * Multipath state flags.
+ *-----------------------------------------------*/
+
+#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
+#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
+#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
+#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
+#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
+#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
+#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
/*-----------------------------------------------
* Allocation routines
@@ -189,7 +193,10 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
if (m) {
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
- m->queue_io = true;
+ set_bit(MPATHF_QUEUE_IO, &m->flags);
+ atomic_set(&m->nr_valid_paths, 0);
+ atomic_set(&m->pg_init_in_progress, 0);
+ atomic_set(&m->pg_init_count, 0);
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait);
@@ -274,17 +281,17 @@ static int __pg_init_all_paths(struct multipath *m)
struct pgpath *pgpath;
unsigned long pg_init_delay = 0;
- if (m->pg_init_in_progress || m->pg_init_disabled)
+ if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
return 0;
- m->pg_init_count++;
- m->pg_init_required = false;
+ atomic_inc(&m->pg_init_count);
+ clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
/* Check here to reset pg_init_required */
if (!m->current_pg)
return 0;
- if (m->pg_init_delay_retry)
+ if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
@@ -293,65 +300,99 @@ static int __pg_init_all_paths(struct multipath *m)
continue;
if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
pg_init_delay))
- m->pg_init_in_progress++;
+ atomic_inc(&m->pg_init_in_progress);
}
- return m->pg_init_in_progress;
+ return atomic_read(&m->pg_init_in_progress);
+}
+
+static int pg_init_all_paths(struct multipath *m)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&m->lock, flags);
+ r = __pg_init_all_paths(m);
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return r;
}
-static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
+static void __switch_pg(struct multipath *m, struct priority_group *pg)
{
- m->current_pg = pgpath->pg;
+ m->current_pg = pg;
/* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) {
- m->pg_init_required = true;
- m->queue_io = true;
+ set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
+ set_bit(MPATHF_QUEUE_IO, &m->flags);
} else {
- m->pg_init_required = false;
- m->queue_io = false;
+ clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
}
- m->pg_init_count = 0;
+ atomic_set(&m->pg_init_count, 0);
}
-static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
- size_t nr_bytes)
+static struct pgpath *choose_path_in_pg(struct multipath *m,
+ struct priority_group *pg,
+ size_t nr_bytes)
{
+ unsigned long flags;
struct dm_path *path;
+ struct pgpath *pgpath;
path = pg->ps.type->select_path(&pg->ps, nr_bytes);
if (!path)
- return -ENXIO;
+ return ERR_PTR(-ENXIO);
- m->current_pgpath = path_to_pgpath(path);
+ pgpath = path_to_pgpath(path);
- if (m->current_pg != pg)
- __switch_pg(m, m->current_pgpath);
+ if (unlikely(lockless_dereference(m->current_pg) != pg)) {
+ /* Only update current_pgpath if pg changed */
+ spin_lock_irqsave(&m->lock, flags);
+ m->current_pgpath = pgpath;
+ __switch_pg(m, pg);
+ spin_unlock_irqrestore(&m->lock, flags);
+ }
- return 0;
+ return pgpath;
}
-static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
+static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
{
+ unsigned long flags;
struct priority_group *pg;
+ struct pgpath *pgpath;
bool bypassed = true;
- if (!m->nr_valid_paths) {
- m->queue_io = false;
+ if (!atomic_read(&m->nr_valid_paths)) {
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
goto failed;
}
/* Were we instructed to switch PG? */
- if (m->next_pg) {
+ if (lockless_dereference(m->next_pg)) {
+ spin_lock_irqsave(&m->lock, flags);
pg = m->next_pg;
+ if (!pg) {
+ spin_unlock_irqrestore(&m->lock, flags);
+ goto check_current_pg;
+ }
m->next_pg = NULL;
- if (!__choose_path_in_pg(m, pg, nr_bytes))
- return;
+ spin_unlock_irqrestore(&m->lock, flags);
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath))
+ return pgpath;
}
/* Don't change PG until it has no remaining paths */
- if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
- return;
+check_current_pg:
+ pg = lockless_dereference(m->current_pg);
+ if (pg) {
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath))
+ return pgpath;
+ }
/*
* Loop through priority groups until we find a valid path.
@@ -363,34 +404,38 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
- if (!__choose_path_in_pg(m, pg, nr_bytes)) {
+ pgpath = choose_path_in_pg(m, pg, nr_bytes);
+ if (!IS_ERR_OR_NULL(pgpath)) {
if (!bypassed)
- m->pg_init_delay_retry = true;
- return;
+ set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+ return pgpath;
}
}
} while (bypassed--);
failed:
+ spin_lock_irqsave(&m->lock, flags);
m->current_pgpath = NULL;
m->current_pg = NULL;
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ return NULL;
}
/*
* Check whether bios must be queued in the device-mapper core rather
* than here in the target.
*
- * m->lock must be held on entry.
- *
* If m->queue_if_no_path and m->saved_queue_if_no_path hold the
* same value then we are not between multipath_presuspend()
* and multipath_resume() calls and we have no need to check
* for the DMF_NOFLUSH_SUSPENDING flag.
*/
-static int __must_push_back(struct multipath *m)
+static int must_push_back(struct multipath *m)
{
- return (m->queue_if_no_path ||
- (m->queue_if_no_path != m->saved_queue_if_no_path &&
+ return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
+ ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
+ test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
dm_noflush_suspending(m->ti)));
}
@@ -408,35 +453,31 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
struct block_device *bdev;
struct dm_mpath_io *mpio;
- spin_lock_irq(&m->lock);
-
/* Do we need to select a new pgpath? */
- if (!m->current_pgpath || !m->queue_io)
- __choose_pgpath(m, nr_bytes);
-
- pgpath = m->current_pgpath;
+ pgpath = lockless_dereference(m->current_pgpath);
+ if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
+ pgpath = choose_pgpath(m, nr_bytes);
if (!pgpath) {
- if (!__must_push_back(m))
+ if (!must_push_back(m))
r = -EIO; /* Failed */
- goto out_unlock;
- } else if (m->queue_io || m->pg_init_required) {
- __pg_init_all_paths(m);
- goto out_unlock;
+ return r;
+ } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
+ test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+ pg_init_all_paths(m);
+ return r;
}
mpio = set_mpio(m, map_context);
if (!mpio)
/* ENOMEM, requeue */
- goto out_unlock;
+ return r;
mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes;
bdev = pgpath->path.dev->bdev;
- spin_unlock_irq(&m->lock);
-
if (clone) {
/*
* Old request-based interface: allocated clone is passed in.
@@ -468,11 +509,6 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
&pgpath->path,
nr_bytes);
return DM_MAPIO_REMAPPED;
-
-out_unlock:
- spin_unlock_irq(&m->lock);
-
- return r;
}
static int multipath_map(struct dm_target *ti, struct request *clone,
@@ -503,11 +539,22 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
spin_lock_irqsave(&m->lock, flags);
- if (save_old_value)
- m->saved_queue_if_no_path = m->queue_if_no_path;
+ if (save_old_value) {
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
+ set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ else
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ } else {
+ if (queue_if_no_path)
+ set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ else
+ clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
+ }
+ if (queue_if_no_path)
+ set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
else
- m->saved_queue_if_no_path = queue_if_no_path;
- m->queue_if_no_path = queue_if_no_path;
+ clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+
spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path)
@@ -600,10 +647,10 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
goto bad;
}
- if (m->retain_attached_hw_handler || m->hw_handler_name)
+ if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
q = bdev_get_queue(p->path.dev->bdev);
- if (m->retain_attached_hw_handler) {
+ if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) {
@@ -808,7 +855,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
}
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
- m->retain_attached_hw_handler = true;
+ set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
continue;
}
@@ -884,6 +931,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
+ unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
pg = parse_priority_group(&as, m);
if (IS_ERR(pg)) {
@@ -891,7 +939,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
goto bad;
}
- m->nr_valid_paths += pg->nr_pgpaths;
+ nr_valid_paths += pg->nr_pgpaths;
+ atomic_set(&m->nr_valid_paths, nr_valid_paths);
+
list_add_tail(&pg->list, &m->priority_groups);
pg_count++;
pg->pg_num = pg_count;
@@ -921,19 +971,14 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
static void multipath_wait_for_pg_init_completion(struct multipath *m)
{
DECLARE_WAITQUEUE(wait, current);
- unsigned long flags;
add_wait_queue(&m->pg_init_wait, &wait);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
- spin_lock_irqsave(&m->lock, flags);
- if (!m->pg_init_in_progress) {
- spin_unlock_irqrestore(&m->lock, flags);
+ if (!atomic_read(&m->pg_init_in_progress))
break;
- }
- spin_unlock_irqrestore(&m->lock, flags);
io_schedule();
}
@@ -944,20 +989,16 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
- unsigned long flags;
-
- spin_lock_irqsave(&m->lock, flags);
- m->pg_init_disabled = true;
- spin_unlock_irqrestore(&m->lock, flags);
+ set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_work(&m->trigger_event);
- spin_lock_irqsave(&m->lock, flags);
- m->pg_init_disabled = false;
- spin_unlock_irqrestore(&m->lock, flags);
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ smp_mb__after_atomic();
}
static void multipath_dtr(struct dm_target *ti)
@@ -987,13 +1028,13 @@ static int fail_path(struct pgpath *pgpath)
pgpath->is_active = false;
pgpath->fail_count++;
- m->nr_valid_paths--;
+ atomic_dec(&m->nr_valid_paths);
if (pgpath == m->current_pgpath)
m->current_pgpath = NULL;
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
schedule_work(&m->trigger_event);
@@ -1011,6 +1052,7 @@ static int reinstate_path(struct pgpath *pgpath)
int r = 0, run_queue = 0;
unsigned long flags;
struct multipath *m = pgpath->pg->m;
+ unsigned nr_valid_paths;
spin_lock_irqsave(&m->lock, flags);
@@ -1025,16 +1067,17 @@ static int reinstate_path(struct pgpath *pgpath)
pgpath->is_active = true;
- if (!m->nr_valid_paths++) {
+ nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
+ if (nr_valid_paths == 1) {
m->current_pgpath = NULL;
run_queue = 1;
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
- m->pg_init_in_progress++;
+ atomic_inc(&m->pg_init_in_progress);
}
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
- pgpath->path.dev->name, m->nr_valid_paths);
+ pgpath->path.dev->name, nr_valid_paths);
schedule_work(&m->trigger_event);
@@ -1152,8 +1195,9 @@ static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags);
- if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
- m->pg_init_required = true;
+ if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
+ !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
+ set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
else
limit_reached = true;
@@ -1219,19 +1263,23 @@ static void pg_init_done(void *data, int errors)
m->current_pgpath = NULL;
m->current_pg = NULL;
}
- } else if (!m->pg_init_required)
+ } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg->bypassed = false;
- if (--m->pg_init_in_progress)
+ if (atomic_dec_return(&m->pg_init_in_progress) > 0)
/* Activations of other paths are still on going */
goto out;
- if (m->pg_init_required) {
- m->pg_init_delay_retry = delay_retry;
+ if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
+ if (delay_retry)
+ set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+ else
+ clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+
if (__pg_init_all_paths(m))
goto out;
}
- m->queue_io = false;
+ clear_bit(MPATHF_QUEUE_IO, &m->flags);
/*
* Wake up any thread waiting to suspend.
@@ -1287,7 +1335,6 @@ static int do_end_io(struct multipath *m, struct request *clone,
* clone bios for it and resubmit it later.
*/
int r = DM_ENDIO_REQUEUE;
- unsigned long flags;
if (!error && !clone->errors)
return 0; /* I/O complete */
@@ -1298,17 +1345,15 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (mpio->pgpath)
fail_path(mpio->pgpath);
- spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths) {
- if (!m->queue_if_no_path) {
- if (!__must_push_back(m))
+ if (!atomic_read(&m->nr_valid_paths)) {
+ if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
+ if (!must_push_back(m))
r = -EIO;
} else {
if (error == -EBADE)
r = error;
}
}
- spin_unlock_irqrestore(&m->lock, flags);
return r;
}
@@ -1364,11 +1409,12 @@ static void multipath_postsuspend(struct dm_target *ti)
static void multipath_resume(struct dm_target *ti)
{
struct multipath *m = ti->private;
- unsigned long flags;
- spin_lock_irqsave(&m->lock, flags);
- m->queue_if_no_path = m->saved_queue_if_no_path;
- spin_unlock_irqrestore(&m->lock, flags);
+ if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
+ set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ else
+ clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
+ smp_mb__after_atomic();
}
/*
@@ -1402,19 +1448,20 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
/* Features */
if (type == STATUSTYPE_INFO)
- DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count);
+ DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
+ atomic_read(&m->pg_init_count));
else {
- DMEMIT("%u ", m->queue_if_no_path +
+ DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
(m->pg_init_retries > 0) * 2 +
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
- m->retain_attached_hw_handler);
- if (m->queue_if_no_path)
+ test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags));
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
DMEMIT("queue_if_no_path ");
if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
- if (m->retain_attached_hw_handler)
+ if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
DMEMIT("retain_attached_hw_handler ");
}
@@ -1563,18 +1610,17 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
{
struct multipath *m = ti->private;
- unsigned long flags;
+ struct pgpath *current_pgpath;
int r;
- spin_lock_irqsave(&m->lock, flags);
+ current_pgpath = lockless_dereference(m->current_pgpath);
+ if (!current_pgpath)
+ current_pgpath = choose_pgpath(m, 0);
- if (!m->current_pgpath)
- __choose_pgpath(m, 0);
-
- if (m->current_pgpath) {
- if (!m->queue_io) {
- *bdev = m->current_pgpath->path.dev->bdev;
- *mode = m->current_pgpath->path.dev->mode;
+ if (current_pgpath) {
+ if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
+ *bdev = current_pgpath->path.dev->bdev;
+ *mode = current_pgpath->path.dev->mode;
r = 0;
} else {
/* pg_init has not started or completed */
@@ -1582,23 +1628,19 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
}
} else {
/* No path is available */
- if (m->queue_if_no_path)
+ if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN;
else
r = -EIO;
}
- spin_unlock_irqrestore(&m->lock, flags);
-
if (r == -ENOTCONN) {
- spin_lock_irqsave(&m->lock, flags);
- if (!m->current_pg) {
+ if (!lockless_dereference(m->current_pg)) {
/* Path status changed, redo selection */
- __choose_pgpath(m, 0);
+ (void) choose_pgpath(m, 0);
}
- if (m->pg_init_required)
- __pg_init_all_paths(m);
- spin_unlock_irqrestore(&m->lock, flags);
+ if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
+ pg_init_all_paths(m);
dm_table_run_md_queue_async(m->ti->table);
}
@@ -1649,39 +1691,37 @@ static int multipath_busy(struct dm_target *ti)
{
bool busy = false, has_active = false;
struct multipath *m = ti->private;
- struct priority_group *pg;
+ struct priority_group *pg, *next_pg;
struct pgpath *pgpath;
- unsigned long flags;
-
- spin_lock_irqsave(&m->lock, flags);
/* pg_init in progress or no paths available */
- if (m->pg_init_in_progress ||
- (!m->nr_valid_paths && m->queue_if_no_path)) {
- busy = true;
- goto out;
- }
+ if (atomic_read(&m->pg_init_in_progress) ||
+ (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)))
+ return true;
+
/* Guess which priority_group will be used at next mapping time */
- if (unlikely(!m->current_pgpath && m->next_pg))
- pg = m->next_pg;
- else if (likely(m->current_pg))
- pg = m->current_pg;
- else
+ pg = lockless_dereference(m->current_pg);
+ next_pg = lockless_dereference(m->next_pg);
+ if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg))
+ pg = next_pg;
+
+ if (!pg) {
/*
* We don't know which pg will be used at next mapping time.
- * We don't call __choose_pgpath() here to avoid to trigger
+ * We don't call choose_pgpath() here to avoid to trigger
* pg_init just by busy checking.
* So we don't know whether underlying devices we will be using
* at next mapping time are busy or not. Just try mapping.
*/
- goto out;
+ return busy;
+ }
/*
* If there is one non-busy active path at least, the path selector
* will be able to select it. So we consider such a pg as not busy.
*/
busy = true;
- list_for_each_entry(pgpath, &pg->pgpaths, list)
+ list_for_each_entry(pgpath, &pg->pgpaths, list) {
if (pgpath->is_active) {
has_active = true;
if (!pgpath_busy(pgpath)) {
@@ -1689,17 +1729,16 @@ static int multipath_busy(struct dm_target *ti)
break;
}
}
+ }
- if (!has_active)
+ if (!has_active) {
/*
* No active path in this pg, so this pg won't be used and
* the current_pg will be changed at next mapping time.
* We need to try mapping to determine it.
*/
busy = false;
-
-out:
- spin_unlock_irqrestore(&m->lock, flags);
+ }
return busy;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index a0901214a..52532745a 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1037,6 +1037,11 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
if (!mddev->events && super_init_validation(mddev, rdev))
return -EINVAL;
+ if (le32_to_cpu(sb->features)) {
+ rs->ti->error = "Unable to assemble array: No feature flags supported yet";
+ return -EINVAL;
+ }
+
/* Enable bitmap creation for RAID levels != 0 */
mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
@@ -1718,7 +1723,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 7, 0},
+ .version = {1, 8, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index f9e8f0bef..626a5ec04 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1348,13 +1348,13 @@ static void dm_table_verify_integrity(struct dm_table *t)
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- unsigned flush = (*(unsigned *)data);
+ unsigned long flush = (unsigned long) data;
struct request_queue *q = bdev_get_queue(dev->bdev);
- return q && (q->flush_flags & flush);
+ return q && (q->queue_flags & flush);
}
-static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
+static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
{
struct dm_target *ti;
unsigned i = 0;
@@ -1375,7 +1375,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
return true;
if (ti->type->iterate_devices &&
- ti->type->iterate_devices(ti, device_flush_capable, &flush))
+ ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
return true;
}
@@ -1506,7 +1506,7 @@ static bool dm_table_supports_discards(struct dm_table *t)
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
- unsigned flush = 0;
+ bool wc = false, fua = false;
/*
* Copy table's limits to the DM device's request_queue
@@ -1518,12 +1518,12 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
- if (dm_table_supports_flush(t, REQ_FLUSH)) {
- flush |= REQ_FLUSH;
- if (dm_table_supports_flush(t, REQ_FUA))
- flush |= REQ_FUA;
+ if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
+ wc = true;
+ if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
+ fua = true;
}
- blk_queue_flush(q, flush);
+ blk_queue_write_cache(q, wc, fua);
if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 92237b6fa..fc803d50f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -322,56 +322,6 @@ struct thin_c {
/*----------------------------------------------------------------*/
-/**
- * __blkdev_issue_discard_async - queue a discard with async completion
- * @bdev: blockdev to issue discard for
- * @sector: start sector
- * @nr_sects: number of sectors to discard
- * @gfp_mask: memory allocation flags (for bio_alloc)
- * @flags: BLKDEV_IFL_* flags to control behaviour
- * @parent_bio: parent discard bio that all sub discards get chained to
- *
- * Description:
- * Asynchronously issue a discard request for the sectors in question.
- */
-static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
- struct bio *parent_bio)
-{
- struct request_queue *q = bdev_get_queue(bdev);
- int type = REQ_WRITE | REQ_DISCARD;
- struct bio *bio;
-
- if (!q || !nr_sects)
- return -ENXIO;
-
- if (!blk_queue_discard(q))
- return -EOPNOTSUPP;
-
- if (flags & BLKDEV_DISCARD_SECURE) {
- if (!blk_queue_secdiscard(q))
- return -EOPNOTSUPP;
- type |= REQ_SECURE;
- }
-
- /*
- * Required bio_put occurs in bio_endio thanks to bio_chain below
- */
- bio = bio_alloc(gfp_mask, 1);
- if (!bio)
- return -ENOMEM;
-
- bio_chain(bio, parent_bio);
-
- bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_size = nr_sects << 9;
-
- submit_bio(type, bio);
-
- return 0;
-}
-
static bool block_size_is_power_of_two(struct pool *pool)
{
return pool->sectors_per_block_shift >= 0;
@@ -384,14 +334,55 @@ static sector_t block_to_sectors(struct pool *pool, dm_block_t b)
(b * pool->sectors_per_block);
}
-static int issue_discard(struct thin_c *tc, dm_block_t data_b, dm_block_t data_e,
- struct bio *parent_bio)
+/*----------------------------------------------------------------*/
+
+struct discard_op {
+ struct thin_c *tc;
+ struct blk_plug plug;
+ struct bio *parent_bio;
+ struct bio *bio;
+};
+
+static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
+{
+ BUG_ON(!parent);
+
+ op->tc = tc;
+ blk_start_plug(&op->plug);
+ op->parent_bio = parent;
+ op->bio = NULL;
+}
+
+static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
{
+ struct thin_c *tc = op->tc;
sector_t s = block_to_sectors(tc->pool, data_b);
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
- return __blkdev_issue_discard_async(tc->pool_dev->bdev, s, len,
- GFP_NOWAIT, 0, parent_bio);
+ return __blkdev_issue_discard(tc->pool_dev->bdev, s, len,
+ GFP_NOWAIT, REQ_WRITE | REQ_DISCARD, &op->bio);
+}
+
+static void end_discard(struct discard_op *op, int r)
+{
+ if (op->bio) {
+ /*
+ * Even if one of the calls to issue_discard failed, we
+ * need to wait for the chain to complete.
+ */
+ bio_chain(op->bio, op->parent_bio);
+ submit_bio(REQ_WRITE | REQ_DISCARD, op->bio);
+ }
+
+ blk_finish_plug(&op->plug);
+
+ /*
+ * Even if r is set, there could be sub discards in flight that we
+ * need to wait for.
+ */
+ if (r && !op->parent_bio->bi_error)
+ op->parent_bio->bi_error = r;
+ bio_endio(op->parent_bio);
}
/*----------------------------------------------------------------*/
@@ -632,7 +623,7 @@ static void error_retry_list(struct pool *pool)
{
int error = get_pool_io_error_code(pool);
- return error_retry_list_with_code(pool, error);
+ error_retry_list_with_code(pool, error);
}
/*
@@ -1006,24 +997,28 @@ static void process_prepared_discard_no_passdown(struct dm_thin_new_mapping *m)
mempool_free(m, tc->pool->mapping_pool);
}
-static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
+/*----------------------------------------------------------------*/
+
+static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
{
/*
* We've already unmapped this range of blocks, but before we
* passdown we have to check that these blocks are now unused.
*/
- int r;
+ int r = 0;
bool used = true;
struct thin_c *tc = m->tc;
struct pool *pool = tc->pool;
dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
+ struct discard_op op;
+ begin_discard(&op, tc, m->bio);
while (b != end) {
/* find start of unmapped run */
for (; b < end; b++) {
r = dm_pool_block_is_used(pool->pmd, b, &used);
if (r)
- return r;
+ goto out;
if (!used)
break;
@@ -1036,20 +1031,20 @@ static int passdown_double_checking_shared_status(struct dm_thin_new_mapping *m)
for (e = b + 1; e != end; e++) {
r = dm_pool_block_is_used(pool->pmd, e, &used);
if (r)
- return r;
+ goto out;
if (used)
break;
}
- r = issue_discard(tc, b, e, m->bio);
+ r = issue_discard(&op, b, e);
if (r)
- return r;
+ goto out;
b = e;
}
-
- return 0;
+out:
+ end_discard(&op, r);
}
static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
@@ -1059,20 +1054,21 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
struct pool *pool = tc->pool;
r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end);
- if (r)
+ if (r) {
metadata_operation_failed(pool, "dm_thin_remove_range", r);
+ bio_io_error(m->bio);
- else if (m->maybe_shared)
- r = passdown_double_checking_shared_status(m);
- else
- r = issue_discard(tc, m->data_block, m->data_block + (m->virt_end - m->virt_begin), m->bio);
+ } else if (m->maybe_shared) {
+ passdown_double_checking_shared_status(m);
+
+ } else {
+ struct discard_op op;
+ begin_discard(&op, tc, m->bio);
+ r = issue_discard(&op, m->data_block,
+ m->data_block + (m->virt_end - m->virt_begin));
+ end_discard(&op, r);
+ }
- /*
- * Even if r is set, there could be sub discards in flight that we
- * need to wait for.
- */
- m->bio->bi_error = r;
- bio_endio(m->bio);
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
}
@@ -1494,17 +1490,6 @@ static void process_discard_cell_no_passdown(struct thin_c *tc,
pool->process_prepared_discard(m);
}
-/*
- * __bio_inc_remaining() is used to defer parent bios's end_io until
- * we _know_ all chained sub range discard bios have completed.
- */
-static inline void __bio_inc_remaining(struct bio *bio)
-{
- bio->bi_flags |= (1 << BIO_CHAIN);
- smp_mb__before_atomic();
- atomic_inc(&bio->__bi_remaining);
-}
-
static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end,
struct bio *bio)
{
@@ -1554,13 +1539,13 @@ static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t
/*
* The parent bio must not complete before sub discard bios are
- * chained to it (see __blkdev_issue_discard_async's bio_chain)!
+ * chained to it (see end_discard's bio_chain)!
*
* This per-mapping bi_remaining increment is paired with
* the implicit decrement that occurs via bio_endio() in
- * process_prepared_discard_{passdown,no_passdown}.
+ * end_discard().
*/
- __bio_inc_remaining(bio);
+ bio_inc_remaining(bio);
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
pool->process_prepared_discard(m);
@@ -3899,7 +3884,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 18, 0},
+ .version = {1, 19, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4273,7 +4258,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 18, 0},
+ .version = {1, 19, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3d3ac1328..1b2f96205 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -674,7 +674,7 @@ static void free_io(struct mapped_device *md, struct dm_io *io)
mempool_free(io, md->io_pool);
}
-static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
+static void free_tio(struct dm_target_io *tio)
{
bio_put(&tio->clone);
}
@@ -1055,7 +1055,7 @@ static void clone_endio(struct bio *bio)
!bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
disable_write_same(md);
- free_tio(md, tio);
+ free_tio(tio);
dec_pending(io, error);
}
@@ -1517,7 +1517,6 @@ static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
- struct mapped_device *md;
struct bio *clone = &tio->clone;
struct dm_target *ti = tio->ti;
@@ -1540,9 +1539,8 @@ static void __map_bio(struct dm_target_io *tio)
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
/* error the io and bail out, or requeue it if needed */
- md = tio->io->md;
dec_pending(tio->io, r);
- free_tio(md, tio);
+ free_tio(tio);
} else if (r != DM_MAPIO_SUBMITTED) {
DMWARN("unimplemented target map return value: %d", r);
BUG();
@@ -1663,7 +1661,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
tio->len_ptr = len;
r = clone_bio(tio, bio, sector, *len);
if (r < 0) {
- free_tio(ci->md, tio);
+ free_tio(tio);
break;
}
__map_bio(tio);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index dd97d4245..41573f1f6 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -61,6 +61,10 @@ struct resync_info {
* the lock.
*/
#define MD_CLUSTER_SEND_LOCKED_ALREADY 5
+/* We should receive message after node joined cluster and
+ * set up all the related infos such as bitmap and personality */
+#define MD_CLUSTER_ALREADY_IN_CLUSTER 6
+#define MD_CLUSTER_PENDING_RECV_EVENT 7
struct md_cluster_info {
@@ -85,6 +89,9 @@ struct md_cluster_info {
struct completion newdisk_completion;
wait_queue_head_t wait;
unsigned long state;
+ /* record the region in RESYNCING message */
+ sector_t sync_low;
+ sector_t sync_hi;
};
enum msg_type {
@@ -284,11 +291,14 @@ static void recover_bitmaps(struct md_thread *thread)
goto dlm_unlock;
}
if (hi > 0) {
- /* TODO:Wait for current resync to get over */
- set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;
- md_check_recovery(mddev);
+ /* wake up thread to continue resync in case resync
+ * is not finished */
+ if (mddev->recovery_cp != MaxSector) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
}
dlm_unlock:
dlm_unlock_sync(bm_lockres);
@@ -370,8 +380,12 @@ static void ack_bast(void *arg, int mode)
struct dlm_lock_resource *res = arg;
struct md_cluster_info *cinfo = res->mddev->cluster_info;
- if (mode == DLM_LOCK_EX)
- md_wakeup_thread(cinfo->recv_thread);
+ if (mode == DLM_LOCK_EX) {
+ if (test_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state))
+ md_wakeup_thread(cinfo->recv_thread);
+ else
+ set_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state);
+ }
}
static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
@@ -408,6 +422,30 @@ static void process_suspend_info(struct mddev *mddev,
md_wakeup_thread(mddev->thread);
return;
}
+
+ /*
+ * The bitmaps are not same for different nodes
+ * if RESYNCING is happening in one node, then
+ * the node which received the RESYNCING message
+ * probably will perform resync with the region
+ * [lo, hi] again, so we could reduce resync time
+ * a lot if we can ensure that the bitmaps among
+ * different nodes are match up well.
+ *
+ * sync_low/hi is used to record the region which
+ * arrived in the previous RESYNCING message,
+ *
+ * Call bitmap_sync_with_cluster to clear
+ * NEEDED_MASK and set RESYNC_MASK since
+ * resync thread is running in another node,
+ * so we don't need to do the resync again
+ * with the same section */
+ bitmap_sync_with_cluster(mddev, cinfo->sync_low,
+ cinfo->sync_hi,
+ lo, hi);
+ cinfo->sync_low = lo;
+ cinfo->sync_hi = hi;
+
s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
if (!s)
return;
@@ -482,11 +520,13 @@ static void process_readd_disk(struct mddev *mddev, struct cluster_msg *msg)
__func__, __LINE__, le32_to_cpu(msg->raid_slot));
}
-static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
+static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
{
+ int ret = 0;
+
if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
"node %d received it's own msg\n", le32_to_cpu(msg->slot)))
- return;
+ return -1;
switch (le32_to_cpu(msg->type)) {
case METADATA_UPDATED:
process_metadata_update(mddev, msg);
@@ -509,9 +549,11 @@ static void process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
__recover_slot(mddev, le32_to_cpu(msg->slot));
break;
default:
+ ret = -1;
pr_warn("%s:%d Received unknown message from %d\n",
__func__, __LINE__, msg->slot);
}
+ return ret;
}
/*
@@ -535,7 +577,9 @@ static void recv_daemon(struct md_thread *thread)
/* read lvb and wake up thread to process this message_lockres */
memcpy(&msg, message_lockres->lksb.sb_lvbptr, sizeof(struct cluster_msg));
- process_recvd_msg(thread->mddev, &msg);
+ ret = process_recvd_msg(thread->mddev, &msg);
+ if (ret)
+ goto out;
/*release CR on ack_lockres*/
ret = dlm_unlock_sync(ack_lockres);
@@ -549,6 +593,7 @@ static void recv_daemon(struct md_thread *thread)
ret = dlm_lock_sync(ack_lockres, DLM_LOCK_CR);
if (unlikely(ret != 0))
pr_info("lock CR on ack failed return %d\n", ret);
+out:
/*release CR on message_lockres*/
ret = dlm_unlock_sync(message_lockres);
if (unlikely(ret != 0))
@@ -778,17 +823,24 @@ static int join(struct mddev *mddev, int nodes)
cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
if (!cinfo->token_lockres)
goto err;
- cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
- if (!cinfo->ack_lockres)
- goto err;
cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
if (!cinfo->no_new_dev_lockres)
goto err;
+ ret = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
+ if (ret) {
+ ret = -EAGAIN;
+ pr_err("md-cluster: can't join cluster to avoid lock issue\n");
+ goto err;
+ }
+ cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
+ if (!cinfo->ack_lockres)
+ goto err;
/* get sync CR lock on ACK. */
if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
pr_err("md-cluster: failed to get a sync CR lock on ACK!(%d)\n",
ret);
+ dlm_unlock_sync(cinfo->token_lockres);
/* get sync CR lock on no-new-dev. */
if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
pr_err("md-cluster: failed to get a sync CR lock on no-new-dev!(%d)\n", ret);
@@ -809,12 +861,10 @@ static int join(struct mddev *mddev, int nodes)
if (!cinfo->resync_lockres)
goto err;
- ret = gather_all_resync_info(mddev, nodes);
- if (ret)
- goto err;
-
return 0;
err:
+ md_unregister_thread(&cinfo->recovery_thread);
+ md_unregister_thread(&cinfo->recv_thread);
lockres_free(cinfo->message_lockres);
lockres_free(cinfo->token_lockres);
lockres_free(cinfo->ack_lockres);
@@ -828,6 +878,19 @@ err:
return ret;
}
+static void load_bitmaps(struct mddev *mddev, int total_slots)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ /* load all the node's bitmap info for resync */
+ if (gather_all_resync_info(mddev, total_slots))
+ pr_err("md-cluster: failed to gather all resyn infos\n");
+ set_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state);
+ /* wake up recv thread in case something need to be handled */
+ if (test_and_clear_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state))
+ md_wakeup_thread(cinfo->recv_thread);
+}
+
static void resync_bitmap(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
@@ -937,7 +1000,6 @@ static void metadata_update_cancel(struct mddev *mddev)
static int resync_start(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
- cinfo->resync_lockres->flags |= DLM_LKF_NOQUEUE;
return dlm_lock_sync(cinfo->resync_lockres, DLM_LOCK_EX);
}
@@ -967,7 +1029,6 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
static int resync_finish(struct mddev *mddev)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
- cinfo->resync_lockres->flags &= ~DLM_LKF_NOQUEUE;
dlm_unlock_sync(cinfo->resync_lockres);
return resync_info_update(mddev, 0, 0);
}
@@ -1171,6 +1232,7 @@ static struct md_cluster_operations cluster_ops = {
.add_new_disk_cancel = add_new_disk_cancel,
.new_disk_ack = new_disk_ack,
.remove_disk = remove_disk,
+ .load_bitmaps = load_bitmaps,
.gather_bitmaps = gather_bitmaps,
.lock_all_bitmaps = lock_all_bitmaps,
.unlock_all_bitmaps = unlock_all_bitmaps,
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 45ce6c97d..e765499ba 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -23,6 +23,7 @@ struct md_cluster_operations {
void (*add_new_disk_cancel)(struct mddev *mddev);
int (*new_disk_ack)(struct mddev *mddev, bool ack);
int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
+ void (*load_bitmaps)(struct mddev *mddev, int total_slots);
int (*gather_bitmaps)(struct md_rdev *rdev);
int (*lock_all_bitmaps)(struct mddev *mddev);
void (*unlock_all_bitmaps)(struct mddev *mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 85b16aadd..866825f10 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2291,19 +2291,24 @@ void md_update_sb(struct mddev *mddev, int force_change)
return;
}
+repeat:
if (mddev_is_clustered(mddev)) {
if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags))
force_change = 1;
+ if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ nospares = 1;
ret = md_cluster_ops->metadata_update_start(mddev);
/* Has someone else has updated the sb */
if (!does_sb_need_changing(mddev)) {
if (ret == 0)
md_cluster_ops->metadata_update_cancel(mddev);
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
+ BIT(MD_CHANGE_DEVS) |
+ BIT(MD_CHANGE_CLEAN));
return;
}
}
-repeat:
+
/* First make sure individual recovery_offsets are correct */
rdev_for_each(rdev, mddev) {
if (rdev->raid_disk >= 0 &&
@@ -2430,15 +2435,14 @@ repeat:
md_super_wait(mddev);
/* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */
- spin_lock(&mddev->lock);
+ if (mddev_is_clustered(mddev) && ret == 0)
+ md_cluster_ops->metadata_update_finish(mddev);
+
if (mddev->in_sync != sync_req ||
- test_bit(MD_CHANGE_DEVS, &mddev->flags)) {
+ !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING),
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN)))
/* have to write it out again */
- spin_unlock(&mddev->lock);
goto repeat;
- }
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
- spin_unlock(&mddev->lock);
wake_up(&mddev->sb_wait);
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
@@ -2452,9 +2456,6 @@ repeat:
clear_bit(BlockedBadBlocks, &rdev->flags);
wake_up(&rdev->blocked_wait);
}
-
- if (mddev_is_clustered(mddev) && ret == 0)
- md_cluster_ops->metadata_update_finish(mddev);
}
EXPORT_SYMBOL(md_update_sb);
@@ -4816,6 +4817,10 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len)
if (err)
return err;
+ /* cluster raid doesn't support change array_sectors */
+ if (mddev_is_clustered(mddev))
+ return -EINVAL;
+
if (strncmp(buf, "default", 7) == 0) {
if (mddev->pers)
sectors = mddev->pers->size(mddev, 0, 0);
@@ -5039,7 +5044,7 @@ static int md_alloc(dev_t dev, char *name)
disk->fops = &md_fops;
disk->private_data = mddev;
disk->queue = mddev->queue;
- blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(mddev->queue, true, true);
/* Allow extended partitions. This makes the
* 'mdp' device redundant, but we can't really
* remove it now.
@@ -6437,6 +6442,10 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
int rv;
int fit = (num_sectors == 0);
+ /* cluster raid doesn't support update size */
+ if (mddev_is_clustered(mddev))
+ return -EINVAL;
+
if (mddev->pers->resize == NULL)
return -EINVAL;
/* The "num_sectors" is the number of sectors of each device that
@@ -7785,7 +7794,7 @@ void md_do_sync(struct md_thread *thread)
struct md_rdev *rdev;
char *desc, *action = NULL;
struct blk_plug plug;
- bool cluster_resync_finished = false;
+ int ret;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7795,6 +7804,19 @@ void md_do_sync(struct md_thread *thread)
return;
}
+ if (mddev_is_clustered(mddev)) {
+ ret = md_cluster_ops->resync_start(mddev);
+ if (ret)
+ goto skip;
+
+ if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
+ && ((unsigned long long)mddev->curr_resync_completed
+ < (unsigned long long)mddev->resync_max_sectors))
+ goto skip;
+ }
+
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
desc = "data-check";
@@ -8089,11 +8111,6 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync_completed = mddev->curr_resync;
sysfs_notify(&mddev->kobj, NULL, "sync_completed");
}
- /* tell personality and other nodes that we are finished */
- if (mddev_is_clustered(mddev)) {
- md_cluster_ops->resync_finish(mddev);
- cluster_resync_finished = true;
- }
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
@@ -8130,12 +8147,18 @@ void md_do_sync(struct md_thread *thread)
}
}
skip:
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
-
if (mddev_is_clustered(mddev) &&
- test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
- !cluster_resync_finished)
+ ret == 0) {
+ /* set CHANGE_PENDING here since maybe another
+ * update is needed, so other nodes are informed */
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS));
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
md_cluster_ops->resync_finish(mddev);
+ } else
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
spin_lock(&mddev->lock);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
@@ -8226,18 +8249,9 @@ static void md_start_sync(struct work_struct *ws)
struct mddev *mddev = container_of(ws, struct mddev, del_work);
int ret = 0;
- if (mddev_is_clustered(mddev)) {
- ret = md_cluster_ops->resync_start(mddev);
- if (ret) {
- mddev->sync_thread = NULL;
- goto out;
- }
- }
-
mddev->sync_thread = md_register_thread(md_do_sync,
mddev,
"resync");
-out:
if (!mddev->sync_thread) {
if (!(mddev_is_clustered(mddev) && ret == -EAGAIN))
printk(KERN_ERR "%s: could not start resync"
@@ -8536,6 +8550,7 @@ EXPORT_SYMBOL(md_finish_reshape);
int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new)
{
+ struct mddev *mddev = rdev->mddev;
int rv;
if (is_new)
s += rdev->new_data_offset;
@@ -8545,8 +8560,8 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
if (rv == 0) {
/* Make sure they get written out promptly */
sysfs_notify_dirent_safe(rdev->sysfs_state);
- set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
- set_bit(MD_CHANGE_PENDING, &rdev->mddev->flags);
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING));
md_wakeup_thread(rdev->mddev->thread);
return 1;
} else
@@ -8680,6 +8695,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n",
bdevname(rdev2->bdev,b));
+ /* wakeup mddev->thread here, so array could
+ * perform resync with the new activated disk */
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+
}
/* device faulty
* We just want to do the minimum to mark the disk
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a7f2b9c9f..c7c8cde0a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1474,8 +1474,8 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
* if recovery is running, make sure it aborts.
*/
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
printk(KERN_ALERT
"md/raid1:%s: Disk failure on %s, disabling device.\n"
"md/raid1:%s: Operation continuing on %d devices.\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index e3fd725d5..c7de2a53e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1102,8 +1102,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
bio->bi_iter.bi_sector < conf->reshape_progress))) {
/* Need to update reshape_position in metadata */
mddev->reshape_position = conf->reshape_progress;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags));
@@ -1591,8 +1591,8 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
spin_unlock_irqrestore(&conf->device_lock, flags);
printk(KERN_ALERT
"md/raid10:%s: Disk failure on %s, disabling device.\n"
@@ -3782,8 +3782,10 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, size);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ if (mddev->queue) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > oldsize) {
mddev->recovery_cp = oldsize;
@@ -4593,8 +4595,10 @@ static void raid10_finish_reshape(struct mddev *mddev)
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
}
mddev->resync_max_sectors = size;
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ if (mddev->queue) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
} else {
int d;
for (d = conf->geo.raid_disks ;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 9531f5f05..e889e2deb 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -712,8 +712,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log,
* in_teardown check workaround this issue.
*/
if (!log->in_teardown) {
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
md_wakeup_thread(mddev->thread);
wait_event(mddev->sb_wait,
!test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
@@ -1188,6 +1188,7 @@ ioerr:
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
{
+ struct request_queue *q = bdev_get_queue(rdev->bdev);
struct r5l_log *log;
if (PAGE_SIZE != 4096)
@@ -1197,7 +1198,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
return -ENOMEM;
log->rdev = rdev;
- log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
+ log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
sizeof(rdev->mddev->uuid));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e48c262ce..8959e6dd3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2514,8 +2514,8 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- set_bit(MD_CHANGE_PENDING, &mddev->flags);
+ set_mask_bits(&mddev->flags, 0,
+ BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING));
printk(KERN_ALERT
"md/raid:%s: Disk failure on %s, disabling device.\n"
"md/raid:%s: Operation continuing on %d devices.\n",
@@ -7572,8 +7572,10 @@ static void raid5_finish_reshape(struct mddev *mddev)
if (mddev->delta_disks > 0) {
md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ if (mddev->queue) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
} else {
int d;
spin_lock_irq(&conf->device_lock);
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 21154dd87..326df0ad7 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -19,3 +19,4 @@ config CYPRESS_FIRMWARE
source "drivers/media/common/b2c2/Kconfig"
source "drivers/media/common/saa7146/Kconfig"
source "drivers/media/common/siano/Kconfig"
+source "drivers/media/common/v4l2-tpg/Kconfig"
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index 89b795df2..2d1b0a025 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -1,4 +1,4 @@
-obj-y += b2c2/ saa7146/ siano/
+obj-y += b2c2/ saa7146/ siano/ v4l2-tpg/
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o
diff --git a/drivers/media/common/v4l2-tpg/Kconfig b/drivers/media/common/v4l2-tpg/Kconfig
new file mode 100644
index 000000000..7456fc1c4
--- /dev/null
+++ b/drivers/media/common/v4l2-tpg/Kconfig
@@ -0,0 +1,2 @@
+config VIDEO_V4L2_TPG
+ tristate
diff --git a/drivers/media/common/v4l2-tpg/Makefile b/drivers/media/common/v4l2-tpg/Makefile
new file mode 100644
index 000000000..f588df466
--- /dev/null
+++ b/drivers/media/common/v4l2-tpg/Makefile
@@ -0,0 +1,3 @@
+v4l2-tpg-objs := v4l2-tpg-core.o v4l2-tpg-colors.o
+
+obj-$(CONFIG_VIDEO_V4L2_TPG) += v4l2-tpg.o
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
index 2299f0ce4..9bcbd3184 100644
--- a/drivers/media/platform/vivid/vivid-tpg-colors.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c
@@ -1,5 +1,5 @@
/*
- * vivid-color.c - A table that converts colors to various colorspaces
+ * v4l2-tpg-colors.c - A table that converts colors to various colorspaces
*
* The test pattern generator uses the tpg_colors for its test patterns.
* For testing colorspaces the first 8 colors of that table need to be
@@ -12,7 +12,7 @@
* This source also contains the code used to generate the tpg_csc_colors
* table. Run the following command to compile it:
*
- * gcc vivid-tpg-colors.c -DCOMPILE_APP -o gen-colors -lm
+ * gcc v4l2-tpg-colors.c -DCOMPILE_APP -o gen-colors -lm
*
* and run the utility.
*
@@ -36,8 +36,7 @@
*/
#include <linux/videodev2.h>
-
-#include "vivid-tpg-colors.h"
+#include <media/v4l2-tpg-colors.h>
/* sRGB colors with range [0-255] */
const struct color tpg_colors[TPG_COLOR_MAX] = {
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index da862bb2e..cf1dadd0b 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -1,5 +1,5 @@
/*
- * vivid-tpg.c - Test Pattern Generator
+ * v4l2-tpg-core.c - Test Pattern Generator
*
* Note: gen_twopix and tpg_gen_text are based on code from vivi.c. See the
* vivi.c source for the copyright information of those functions.
@@ -20,7 +20,8 @@
* SOFTWARE.
*/
-#include "vivid-tpg.h"
+#include <linux/module.h>
+#include <media/v4l2-tpg.h>
/* Must remain in sync with enum tpg_pattern */
const char * const tpg_pattern_strings[] = {
@@ -48,6 +49,7 @@ const char * const tpg_pattern_strings[] = {
"Noise",
NULL
};
+EXPORT_SYMBOL_GPL(tpg_pattern_strings);
/* Must remain in sync with enum tpg_aspect */
const char * const tpg_aspect_strings[] = {
@@ -58,6 +60,7 @@ const char * const tpg_aspect_strings[] = {
"16x9 Anamorphic",
NULL
};
+EXPORT_SYMBOL_GPL(tpg_aspect_strings);
/*
* Sine table: sin[0] = 127 * sin(-180 degrees)
@@ -93,6 +96,7 @@ void tpg_set_font(const u8 *f)
{
font8x16 = f;
}
+EXPORT_SYMBOL_GPL(tpg_set_font);
void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h)
{
@@ -114,6 +118,7 @@ void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h)
tpg->colorspace = V4L2_COLORSPACE_SRGB;
tpg->perc_fill = 100;
}
+EXPORT_SYMBOL_GPL(tpg_init);
int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
{
@@ -150,6 +155,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
}
return 0;
}
+EXPORT_SYMBOL_GPL(tpg_alloc);
void tpg_free(struct tpg_data *tpg)
{
@@ -174,6 +180,7 @@ void tpg_free(struct tpg_data *tpg)
tpg->random_line[plane] = NULL;
}
}
+EXPORT_SYMBOL_GPL(tpg_free);
bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
{
@@ -403,6 +410,7 @@ bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc)
}
return true;
}
+EXPORT_SYMBOL_GPL(tpg_s_fourcc);
void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
const struct v4l2_rect *compose)
@@ -418,6 +426,7 @@ void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
tpg->scaled_width = 2;
tpg->recalc_lines = true;
}
+EXPORT_SYMBOL_GPL(tpg_s_crop_compose);
void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
u32 field)
@@ -442,6 +451,7 @@ void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
(2 * tpg->hdownsampling[p]);
tpg->recalc_square_border = true;
}
+EXPORT_SYMBOL_GPL(tpg_reset_source);
static enum tpg_color tpg_get_textbg_color(struct tpg_data *tpg)
{
@@ -1250,6 +1260,7 @@ unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line)
return 0;
}
}
+EXPORT_SYMBOL_GPL(tpg_g_interleaved_plane);
/* Return how many pattern lines are used by the current pattern. */
static unsigned tpg_get_pat_lines(const struct tpg_data *tpg)
@@ -1725,6 +1736,7 @@ void tpg_gen_text(const struct tpg_data *tpg, u8 *basep[TPG_MAX_PLANES][2],
}
}
}
+EXPORT_SYMBOL_GPL(tpg_gen_text);
void tpg_update_mv_step(struct tpg_data *tpg)
{
@@ -1773,6 +1785,7 @@ void tpg_update_mv_step(struct tpg_data *tpg)
if (factor < 0)
tpg->mv_vert_step = tpg->src_height - tpg->mv_vert_step;
}
+EXPORT_SYMBOL_GPL(tpg_update_mv_step);
/* Map the line number relative to the crop rectangle to a frame line number */
static unsigned tpg_calc_frameline(const struct tpg_data *tpg, unsigned src_y,
@@ -1862,6 +1875,7 @@ void tpg_calc_text_basep(struct tpg_data *tpg,
if (p == 0 && tpg->interleaved)
tpg_calc_text_basep(tpg, basep, 1, vbuf);
}
+EXPORT_SYMBOL_GPL(tpg_calc_text_basep);
static int tpg_pattern_avg(const struct tpg_data *tpg,
unsigned pat1, unsigned pat2)
@@ -1891,6 +1905,7 @@ void tpg_log_status(struct tpg_data *tpg)
pr_info("tpg quantization: %d/%d\n", tpg->quantization, tpg->real_quantization);
pr_info("tpg RGB range: %d/%d\n", tpg->rgb_range, tpg->real_rgb_range);
}
+EXPORT_SYMBOL_GPL(tpg_log_status);
/*
* This struct contains common parameters used by both the drawing of the
@@ -2296,6 +2311,7 @@ void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
vbuf + buf_line * params.stride);
}
}
+EXPORT_SYMBOL_GPL(tpg_fill_plane_buffer);
void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
{
@@ -2312,3 +2328,8 @@ void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std, unsigned p, u8 *vbuf)
offset += tpg_calc_plane_size(tpg, i);
}
}
+EXPORT_SYMBOL_GPL(tpg_fillbuffer);
+
+MODULE_DESCRIPTION("V4L2 Test Pattern Generator");
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 0afad395e..a7a4674cc 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -58,6 +58,14 @@
#define USB_VID_TELESTAR 0x10b9
#define USB_VID_VISIONPLUS 0x13d3
#define USB_VID_SONY 0x1415
+#define USB_PID_TEVII_S421 0xd421
+#define USB_PID_TEVII_S480_1 0xd481
+#define USB_PID_TEVII_S480_2 0xd482
+#define USB_PID_TEVII_S630 0xd630
+#define USB_PID_TEVII_S632 0xd632
+#define USB_PID_TEVII_S650 0xd650
+#define USB_PID_TEVII_S660 0xd660
+#define USB_PID_TEVII_S662 0xd662
#define USB_VID_TWINHAN 0x1822
#define USB_VID_ULTIMA_ELECTRONIC 0x05d8
#define USB_VID_UNIWILL 0x1584
@@ -141,6 +149,7 @@
#define USB_PID_GENIUS_TVGO_DVB_T03 0x4012
#define USB_PID_GRANDTEC_DVBT_USB_COLD 0x0fa0
#define USB_PID_GRANDTEC_DVBT_USB_WARM 0x0fa1
+#define USB_PID_GOTVIEW_SAT_HD 0x5456
#define USB_PID_INTEL_CE9500 0x9500
#define USB_PID_ITETECH_IT9135 0x9135
#define USB_PID_ITETECH_IT9135_9005 0x9005
@@ -159,6 +168,8 @@
#define USB_PID_KWORLD_UB499_2T_T09 0xe409
#define USB_PID_KWORLD_VSTREAM_COLD 0x17de
#define USB_PID_KWORLD_VSTREAM_WARM 0x17df
+#define USB_PID_PROF_1100 0xb012
+#define USB_PID_TERRATEC_CINERGY_S 0x0064
#define USB_PID_TERRATEC_CINERGY_T_USB_XE 0x0055
#define USB_PID_TERRATEC_CINERGY_T_USB_XE_REV2 0x0069
#define USB_PID_TERRATEC_CINERGY_T_STICK 0x0093
@@ -361,6 +372,8 @@
#define USB_PID_YUAN_STK7700D 0x1efc
#define USB_PID_YUAN_STK7700D_2 0x1e8c
#define USB_PID_DW2102 0x2102
+#define USB_PID_DW2104 0x2104
+#define USB_PID_DW3101 0x3101
#define USB_PID_XTENSIONS_XD_380 0x0381
#define USB_PID_TELESTAR_STARSTICK_2 0x8000
#define USB_PID_MSI_DIGI_VOX_MINI_III 0x8807
@@ -373,6 +386,7 @@
#define USB_PID_ELGATO_EYETV_DTT_Dlx 0x0020
#define USB_PID_ELGATO_EYETV_SAT 0x002a
#define USB_PID_ELGATO_EYETV_SAT_V2 0x0025
+#define USB_PID_ELGATO_EYETV_SAT_V3 0x0036
#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_COLD 0x5000
#define USB_PID_DVB_T_USB_STICK_HIGH_SPEED_WARM 0x5001
#define USB_PID_FRIIO_WHITE 0x0001
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index e1684c570..75a3f4b57 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -676,13 +676,13 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
demux, 0, MEDIA_LNK_FL_ENABLED,
false);
if (ret)
- return -ENOMEM;
+ return ret;
}
if (demux && ca) {
ret = media_create_pad_link(demux, 1, ca,
0, MEDIA_LNK_FL_ENABLED);
if (ret)
- return -ENOMEM;
+ return ret;
}
/* Create demux links for each ringbuffer/pad */
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index dc2d41e14..d879dc060 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1121,7 +1121,7 @@ void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
(state->current_band == BAND_CBAND) ? "CBAND" : "NOT CBAND",
state->identity.version & 0x1f);
- if (rf_ramp && ((state->rf_ramp[0] == 0) ||
+ if (rf_ramp && ((state->rf_ramp && state->rf_ramp[0] == 0) ||
(state->current_band == BAND_CBAND &&
(state->identity.version & 0x1f) <= P1D_E_F))) {
dprintk("DE-Engage mux for direct gain reg control");
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index d329c0cf0..cae93c393 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -456,7 +456,7 @@ static int ds3000_read_status(struct dvb_frontend *fe, enum fe_status *status)
break;
default:
- return 1;
+ return -EINVAL;
}
if (state->config->set_lock_led)
@@ -526,7 +526,7 @@ static int ds3000_read_ber(struct dvb_frontend *fe, u32* ber)
*ber = 0xffffffff;
break;
default:
- return 1;
+ return -EINVAL;
}
return 0;
@@ -621,7 +621,7 @@ static int ds3000_read_snr(struct dvb_frontend *fe, u16 *snr)
snr_reading, *snr);
break;
default:
- return 1;
+ return -EINVAL;
}
return 0;
@@ -659,7 +659,7 @@ static int ds3000_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
state->prevUCBS2 = _ucblocks;
break;
default:
- return 1;
+ return -EINVAL;
}
return 0;
@@ -752,7 +752,7 @@ static int ds3000_send_diseqc_msg(struct dvb_frontend *fe,
data |= 0x80;
ds3000_writereg(state, 0xa2, data);
- return 1;
+ return -ETIMEDOUT;
}
data = ds3000_readreg(state, 0xa2);
@@ -806,7 +806,7 @@ static int ds3000_diseqc_send_burst(struct dvb_frontend *fe,
data |= 0x80;
ds3000_writereg(state, 0xa2, data);
- return 1;
+ return -ETIMEDOUT;
}
data = ds3000_readreg(state, 0xa2);
@@ -949,7 +949,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
ds3000_writereg(state, 0xfe, 0x98);
break;
default:
- return 1;
+ return -EINVAL;
}
/* enable 27MHz clock output */
diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
index 6fe10c7f9..1c312302c 100644
--- a/drivers/media/dvb-frontends/m88ds3103.c
+++ b/drivers/media/dvb-frontends/m88ds3103.c
@@ -1251,9 +1251,9 @@ static void m88ds3103_release(struct dvb_frontend *fe)
i2c_unregister_device(client);
}
-static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
{
- struct m88ds3103_dev *dev = mux_priv;
+ struct m88ds3103_dev *dev = i2c_mux_priv(muxc);
struct i2c_client *client = dev->client;
int ret;
struct i2c_msg msg = {
@@ -1374,7 +1374,7 @@ static struct i2c_adapter *m88ds3103_get_i2c_adapter(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- return dev->i2c_adapter;
+ return dev->muxc->adapter[0];
}
static int m88ds3103_probe(struct i2c_client *client,
@@ -1467,13 +1467,16 @@ static int m88ds3103_probe(struct i2c_client *client,
goto err_kfree;
/* create mux i2c adapter for tuner */
- dev->i2c_adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
- dev, 0, 0, 0, m88ds3103_select,
- NULL);
- if (dev->i2c_adapter == NULL) {
+ dev->muxc = i2c_mux_alloc(client->adapter, &client->dev, 1, 0, 0,
+ m88ds3103_select, NULL);
+ if (!dev->muxc) {
ret = -ENOMEM;
goto err_kfree;
}
+ dev->muxc->priv = dev;
+ ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+ if (ret)
+ goto err_kfree;
/* create dvb_frontend */
memcpy(&dev->fe.ops, &m88ds3103_ops, sizeof(struct dvb_frontend_ops));
@@ -1502,7 +1505,7 @@ static int m88ds3103_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- i2c_del_mux_adapter(dev->i2c_adapter);
+ i2c_mux_del_adapters(dev->muxc);
kfree(dev);
return 0;
diff --git a/drivers/media/dvb-frontends/m88ds3103_priv.h b/drivers/media/dvb-frontends/m88ds3103_priv.h
index 4c60e894b..fcf3a7b95 100644
--- a/drivers/media/dvb-frontends/m88ds3103_priv.h
+++ b/drivers/media/dvb-frontends/m88ds3103_priv.h
@@ -42,11 +42,11 @@ struct m88ds3103_dev {
enum fe_status fe_status;
u32 dvbv3_ber; /* for old DVBv3 API read_ber */
bool warm; /* FW running */
- struct i2c_adapter *i2c_adapter;
+ struct i2c_mux_core *muxc;
/* auto detect chip id to do different config */
u8 chip_id;
/* main mclk is calculated for M88RS6000 dynamically */
- u32 mclk_khz;
+ s32 mclk_khz;
u64 post_bit_error;
u64 post_bit_count;
};
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 3f96429af..d25d1e0cd 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -677,9 +677,9 @@ err:
* adapter lock is already taken by tuner driver.
* Gate is closed automatically after single I2C transfer.
*/
-static int rtl2830_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id)
+static int rtl2830_select(struct i2c_mux_core *muxc, u32 chan_id)
{
- struct i2c_client *client = mux_priv;
+ struct i2c_client *client = i2c_mux_priv(muxc);
struct rtl2830_dev *dev = i2c_get_clientdata(client);
int ret;
@@ -712,7 +712,7 @@ static struct i2c_adapter *rtl2830_get_i2c_adapter(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- return dev->adapter;
+ return dev->muxc->adapter[0];
}
/*
@@ -865,12 +865,16 @@ static int rtl2830_probe(struct i2c_client *client,
goto err_regmap_exit;
/* create muxed i2c adapter for tuner */
- dev->adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
- client, 0, 0, 0, rtl2830_select, NULL);
- if (dev->adapter == NULL) {
- ret = -ENODEV;
+ dev->muxc = i2c_mux_alloc(client->adapter, &client->dev, 1, 0, 0,
+ rtl2830_select, NULL);
+ if (!dev->muxc) {
+ ret = -ENOMEM;
goto err_regmap_exit;
}
+ dev->muxc->priv = client;
+ ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+ if (ret)
+ goto err_regmap_exit;
/* create dvb frontend */
memcpy(&dev->fe.ops, &rtl2830_ops, sizeof(dev->fe.ops));
@@ -903,7 +907,7 @@ static int rtl2830_remove(struct i2c_client *client)
/* stop statistics polling */
cancel_delayed_work_sync(&dev->stat_work);
- i2c_del_mux_adapter(dev->adapter);
+ i2c_mux_del_adapters(dev->muxc);
regmap_exit(dev->regmap);
kfree(dev);
diff --git a/drivers/media/dvb-frontends/rtl2830_priv.h b/drivers/media/dvb-frontends/rtl2830_priv.h
index cf793f39a..da4909543 100644
--- a/drivers/media/dvb-frontends/rtl2830_priv.h
+++ b/drivers/media/dvb-frontends/rtl2830_priv.h
@@ -29,7 +29,7 @@ struct rtl2830_dev {
struct rtl2830_platform_data *pdata;
struct i2c_client *client;
struct regmap *regmap;
- struct i2c_adapter *adapter;
+ struct i2c_mux_core *muxc;
struct dvb_frontend fe;
bool sleeping;
unsigned long filters;
diff --git a/drivers/media/dvb-frontends/rtl2832.c b/drivers/media/dvb-frontends/rtl2832.c
index 7c96f7679..bfb6beedd 100644
--- a/drivers/media/dvb-frontends/rtl2832.c
+++ b/drivers/media/dvb-frontends/rtl2832.c
@@ -153,43 +153,6 @@ static const struct rtl2832_reg_entry registers[] = {
[DVBT_REG_4MSEL] = {0x013, 0, 0},
};
-/* Our regmap is bypassing I2C adapter lock, thus we do it! */
-static int rtl2832_bulk_write(struct i2c_client *client, unsigned int reg,
- const void *val, size_t val_count)
-{
- struct rtl2832_dev *dev = i2c_get_clientdata(client);
- int ret;
-
- i2c_lock_adapter(client->adapter);
- ret = regmap_bulk_write(dev->regmap, reg, val, val_count);
- i2c_unlock_adapter(client->adapter);
- return ret;
-}
-
-static int rtl2832_update_bits(struct i2c_client *client, unsigned int reg,
- unsigned int mask, unsigned int val)
-{
- struct rtl2832_dev *dev = i2c_get_clientdata(client);
- int ret;
-
- i2c_lock_adapter(client->adapter);
- ret = regmap_update_bits(dev->regmap, reg, mask, val);
- i2c_unlock_adapter(client->adapter);
- return ret;
-}
-
-static int rtl2832_bulk_read(struct i2c_client *client, unsigned int reg,
- void *val, size_t val_count)
-{
- struct rtl2832_dev *dev = i2c_get_clientdata(client);
- int ret;
-
- i2c_lock_adapter(client->adapter);
- ret = regmap_bulk_read(dev->regmap, reg, val, val_count);
- i2c_unlock_adapter(client->adapter);
- return ret;
-}
-
static int rtl2832_rd_demod_reg(struct rtl2832_dev *dev, int reg, u32 *val)
{
struct i2c_client *client = dev->client;
@@ -204,7 +167,7 @@ static int rtl2832_rd_demod_reg(struct rtl2832_dev *dev, int reg, u32 *val)
len = (msb >> 3) + 1;
mask = REG_MASK(msb - lsb);
- ret = rtl2832_bulk_read(client, reg_start_addr, reading, len);
+ ret = regmap_bulk_read(dev->regmap, reg_start_addr, reading, len);
if (ret)
goto err;
@@ -234,7 +197,7 @@ static int rtl2832_wr_demod_reg(struct rtl2832_dev *dev, int reg, u32 val)
len = (msb >> 3) + 1;
mask = REG_MASK(msb - lsb);
- ret = rtl2832_bulk_read(client, reg_start_addr, reading, len);
+ ret = regmap_bulk_read(dev->regmap, reg_start_addr, reading, len);
if (ret)
goto err;
@@ -248,7 +211,7 @@ static int rtl2832_wr_demod_reg(struct rtl2832_dev *dev, int reg, u32 val)
for (i = 0; i < len; i++)
writing[i] = (writing_tmp >> ((len - 1 - i) * 8)) & 0xff;
- ret = rtl2832_bulk_write(client, reg_start_addr, writing, len);
+ ret = regmap_bulk_write(dev->regmap, reg_start_addr, writing, len);
if (ret)
goto err;
@@ -525,7 +488,8 @@ static int rtl2832_set_frontend(struct dvb_frontend *fe)
}
for (j = 0; j < sizeof(bw_params[0]); j++) {
- ret = rtl2832_bulk_write(client, 0x11c + j, &bw_params[i][j], 1);
+ ret = regmap_bulk_write(dev->regmap,
+ 0x11c + j, &bw_params[i][j], 1);
if (ret)
goto err;
}
@@ -581,11 +545,11 @@ static int rtl2832_get_frontend(struct dvb_frontend *fe,
if (dev->sleeping)
return 0;
- ret = rtl2832_bulk_read(client, 0x33c, buf, 2);
+ ret = regmap_bulk_read(dev->regmap, 0x33c, buf, 2);
if (ret)
goto err;
- ret = rtl2832_bulk_read(client, 0x351, &buf[2], 1);
+ ret = regmap_bulk_read(dev->regmap, 0x351, &buf[2], 1);
if (ret)
goto err;
@@ -716,7 +680,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
/* signal strength */
if (dev->fe_status & FE_HAS_SIGNAL) {
/* read digital AGC */
- ret = rtl2832_bulk_read(client, 0x305, &u8tmp, 1);
+ ret = regmap_bulk_read(dev->regmap, 0x305, &u8tmp, 1);
if (ret)
goto err;
@@ -742,7 +706,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
{87659938, 87659938, 87885178, 88241743},
};
- ret = rtl2832_bulk_read(client, 0x33c, &u8tmp, 1);
+ ret = regmap_bulk_read(dev->regmap, 0x33c, &u8tmp, 1);
if (ret)
goto err;
@@ -754,7 +718,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
if (hierarchy > HIERARCHY_NUM - 1)
goto err;
- ret = rtl2832_bulk_read(client, 0x40c, buf, 2);
+ ret = regmap_bulk_read(dev->regmap, 0x40c, buf, 2);
if (ret)
goto err;
@@ -775,7 +739,7 @@ static int rtl2832_read_status(struct dvb_frontend *fe, enum fe_status *status)
/* BER */
if (dev->fe_status & FE_HAS_LOCK) {
- ret = rtl2832_bulk_read(client, 0x34e, buf, 2);
+ ret = regmap_bulk_read(dev->regmap, 0x34e, buf, 2);
if (ret)
goto err;
@@ -825,8 +789,6 @@ static int rtl2832_read_ber(struct dvb_frontend *fe, u32 *ber)
/*
* I2C gate/mux/repeater logic
- * We must use unlocked __i2c_transfer() here (through regmap) because of I2C
- * adapter lock is already taken by tuner driver.
* There is delay mechanism to avoid unneeded I2C gate open / close. Gate close
* is delayed here a little bit in order to see if there is sequence of I2C
* messages sent to same I2C bus.
@@ -838,7 +800,7 @@ static void rtl2832_i2c_gate_work(struct work_struct *work)
int ret;
/* close gate */
- ret = rtl2832_update_bits(dev->client, 0x101, 0x08, 0x00);
+ ret = regmap_update_bits(dev->regmap, 0x101, 0x08, 0x00);
if (ret)
goto err;
@@ -847,19 +809,16 @@ err:
dev_dbg(&client->dev, "failed=%d\n", ret);
}
-static int rtl2832_select(struct i2c_adapter *adap, void *mux_priv, u32 chan_id)
+static int rtl2832_select(struct i2c_mux_core *muxc, u32 chan_id)
{
- struct rtl2832_dev *dev = mux_priv;
+ struct rtl2832_dev *dev = i2c_mux_priv(muxc);
struct i2c_client *client = dev->client;
int ret;
/* terminate possible gate closing */
cancel_delayed_work(&dev->i2c_gate_work);
- /*
- * I2C adapter lock is already taken and due to that we will use
- * regmap_update_bits() which does not lock again I2C adapter.
- */
+ /* open gate */
ret = regmap_update_bits(dev->regmap, 0x101, 0x08, 0x08);
if (ret)
goto err;
@@ -870,10 +829,9 @@ err:
return ret;
}
-static int rtl2832_deselect(struct i2c_adapter *adap, void *mux_priv,
- u32 chan_id)
+static int rtl2832_deselect(struct i2c_mux_core *muxc, u32 chan_id)
{
- struct rtl2832_dev *dev = mux_priv;
+ struct rtl2832_dev *dev = i2c_mux_priv(muxc);
schedule_delayed_work(&dev->i2c_gate_work, usecs_to_jiffies(100));
return 0;
@@ -932,120 +890,6 @@ static bool rtl2832_volatile_reg(struct device *dev, unsigned int reg)
return false;
}
-/*
- * We implement own I2C access routines for regmap in order to get manual access
- * to I2C adapter lock, which is needed for I2C mux adapter.
- */
-static int rtl2832_regmap_read(void *context, const void *reg_buf,
- size_t reg_size, void *val_buf, size_t val_size)
-{
- struct i2c_client *client = context;
- int ret;
- struct i2c_msg msg[2] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = reg_size,
- .buf = (u8 *)reg_buf,
- }, {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = val_size,
- .buf = val_buf,
- }
- };
-
- ret = __i2c_transfer(client->adapter, msg, 2);
- if (ret != 2) {
- dev_warn(&client->dev, "i2c reg read failed %d reg %02x\n",
- ret, *(u8 *)reg_buf);
- if (ret >= 0)
- ret = -EREMOTEIO;
- return ret;
- }
- return 0;
-}
-
-static int rtl2832_regmap_write(void *context, const void *data, size_t count)
-{
- struct i2c_client *client = context;
- int ret;
- struct i2c_msg msg[1] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = count,
- .buf = (u8 *)data,
- }
- };
-
- ret = __i2c_transfer(client->adapter, msg, 1);
- if (ret != 1) {
- dev_warn(&client->dev, "i2c reg write failed %d reg %02x\n",
- ret, *(u8 *)data);
- if (ret >= 0)
- ret = -EREMOTEIO;
- return ret;
- }
- return 0;
-}
-
-static int rtl2832_regmap_gather_write(void *context, const void *reg,
- size_t reg_len, const void *val,
- size_t val_len)
-{
- struct i2c_client *client = context;
- int ret;
- u8 buf[256];
- struct i2c_msg msg[1] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1 + val_len,
- .buf = buf,
- }
- };
-
- buf[0] = *(u8 const *)reg;
- memcpy(&buf[1], val, val_len);
-
- ret = __i2c_transfer(client->adapter, msg, 1);
- if (ret != 1) {
- dev_warn(&client->dev, "i2c reg write failed %d reg %02x\n",
- ret, *(u8 const *)reg);
- if (ret >= 0)
- ret = -EREMOTEIO;
- return ret;
- }
- return 0;
-}
-
-/*
- * FIXME: Hack. Implement own regmap locking in order to silence lockdep
- * recursive lock warning. That happens when regmap I2C client calls I2C mux
- * adapter, which leads demod I2C repeater enable via demod regmap. Operation
- * takes two regmap locks recursively - but those are different regmap instances
- * in a two different I2C drivers, so it is not deadlock. Proper fix is to make
- * regmap aware of lockdep.
- */
-static void rtl2832_regmap_lock(void *__dev)
-{
- struct rtl2832_dev *dev = __dev;
- struct i2c_client *client = dev->client;
-
- dev_dbg(&client->dev, "\n");
- mutex_lock(&dev->regmap_mutex);
-}
-
-static void rtl2832_regmap_unlock(void *__dev)
-{
- struct rtl2832_dev *dev = __dev;
- struct i2c_client *client = dev->client;
-
- dev_dbg(&client->dev, "\n");
- mutex_unlock(&dev->regmap_mutex);
-}
-
static struct dvb_frontend *rtl2832_get_dvb_frontend(struct i2c_client *client)
{
struct rtl2832_dev *dev = i2c_get_clientdata(client);
@@ -1059,7 +903,7 @@ static struct i2c_adapter *rtl2832_get_i2c_adapter(struct i2c_client *client)
struct rtl2832_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
- return dev->i2c_adapter_tuner;
+ return dev->muxc->adapter[0];
}
static int rtl2832_slave_ts_ctrl(struct i2c_client *client, bool enable)
@@ -1073,29 +917,29 @@ static int rtl2832_slave_ts_ctrl(struct i2c_client *client, bool enable)
ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x0);
if (ret)
goto err;
- ret = rtl2832_bulk_write(client, 0x10c, "\x5f\xff", 2);
+ ret = regmap_bulk_write(dev->regmap, 0x10c, "\x5f\xff", 2);
if (ret)
goto err;
ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x1);
if (ret)
goto err;
- ret = rtl2832_bulk_write(client, 0x0bc, "\x18", 1);
+ ret = regmap_bulk_write(dev->regmap, 0x0bc, "\x18", 1);
if (ret)
goto err;
- ret = rtl2832_bulk_write(client, 0x192, "\x7f\xf7\xff", 3);
+ ret = regmap_bulk_write(dev->regmap, 0x192, "\x7f\xf7\xff", 3);
if (ret)
goto err;
} else {
- ret = rtl2832_bulk_write(client, 0x192, "\x00\x0f\xff", 3);
+ ret = regmap_bulk_write(dev->regmap, 0x192, "\x00\x0f\xff", 3);
if (ret)
goto err;
- ret = rtl2832_bulk_write(client, 0x0bc, "\x08", 1);
+ ret = regmap_bulk_write(dev->regmap, 0x0bc, "\x08", 1);
if (ret)
goto err;
ret = rtl2832_wr_demod_reg(dev, DVBT_PIP_ON, 0x0);
if (ret)
goto err;
- ret = rtl2832_bulk_write(client, 0x10c, "\x00\x00", 2);
+ ret = regmap_bulk_write(dev->regmap, 0x10c, "\x00\x00", 2);
if (ret)
goto err;
ret = rtl2832_wr_demod_reg(dev, DVBT_SOFT_RST, 0x1);
@@ -1124,7 +968,7 @@ static int rtl2832_pid_filter_ctrl(struct dvb_frontend *fe, int onoff)
else
u8tmp = 0x00;
- ret = rtl2832_update_bits(client, 0x061, 0xc0, u8tmp);
+ ret = regmap_update_bits(dev->regmap, 0x061, 0xc0, u8tmp);
if (ret)
goto err;
@@ -1159,14 +1003,14 @@ static int rtl2832_pid_filter(struct dvb_frontend *fe, u8 index, u16 pid,
buf[1] = (dev->filters >> 8) & 0xff;
buf[2] = (dev->filters >> 16) & 0xff;
buf[3] = (dev->filters >> 24) & 0xff;
- ret = rtl2832_bulk_write(client, 0x062, buf, 4);
+ ret = regmap_bulk_write(dev->regmap, 0x062, buf, 4);
if (ret)
goto err;
/* add PID */
buf[0] = (pid >> 8) & 0xff;
buf[1] = (pid >> 0) & 0xff;
- ret = rtl2832_bulk_write(client, 0x066 + 2 * index, buf, 2);
+ ret = regmap_bulk_write(dev->regmap, 0x066 + 2 * index, buf, 2);
if (ret)
goto err;
@@ -1184,12 +1028,6 @@ static int rtl2832_probe(struct i2c_client *client,
struct rtl2832_dev *dev;
int ret;
u8 tmp;
- static const struct regmap_bus regmap_bus = {
- .read = rtl2832_regmap_read,
- .write = rtl2832_regmap_write,
- .gather_write = rtl2832_regmap_gather_write,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
- };
static const struct regmap_range_cfg regmap_range_cfg[] = {
{
.selector_reg = 0x00,
@@ -1218,36 +1056,35 @@ static int rtl2832_probe(struct i2c_client *client,
dev->sleeping = true;
INIT_DELAYED_WORK(&dev->i2c_gate_work, rtl2832_i2c_gate_work);
/* create regmap */
- mutex_init(&dev->regmap_mutex);
dev->regmap_config.reg_bits = 8,
dev->regmap_config.val_bits = 8,
- dev->regmap_config.lock = rtl2832_regmap_lock,
- dev->regmap_config.unlock = rtl2832_regmap_unlock,
- dev->regmap_config.lock_arg = dev,
dev->regmap_config.volatile_reg = rtl2832_volatile_reg,
dev->regmap_config.max_register = 5 * 0x100,
dev->regmap_config.ranges = regmap_range_cfg,
dev->regmap_config.num_ranges = ARRAY_SIZE(regmap_range_cfg),
dev->regmap_config.cache_type = REGCACHE_NONE,
- dev->regmap = regmap_init(&client->dev, &regmap_bus, client,
- &dev->regmap_config);
+ dev->regmap = regmap_init_i2c(client, &dev->regmap_config);
if (IS_ERR(dev->regmap)) {
ret = PTR_ERR(dev->regmap);
goto err_kfree;
}
/* check if the demod is there */
- ret = rtl2832_bulk_read(client, 0x000, &tmp, 1);
+ ret = regmap_bulk_read(dev->regmap, 0x000, &tmp, 1);
if (ret)
goto err_regmap_exit;
/* create muxed i2c adapter for demod tuner bus */
- dev->i2c_adapter_tuner = i2c_add_mux_adapter(i2c, &i2c->dev, dev,
- 0, 0, 0, rtl2832_select, rtl2832_deselect);
- if (dev->i2c_adapter_tuner == NULL) {
- ret = -ENODEV;
+ dev->muxc = i2c_mux_alloc(i2c, &i2c->dev, 1, 0, I2C_MUX_LOCKED,
+ rtl2832_select, rtl2832_deselect);
+ if (!dev->muxc) {
+ ret = -ENOMEM;
goto err_regmap_exit;
}
+ dev->muxc->priv = dev;
+ ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+ if (ret)
+ goto err_regmap_exit;
/* create dvb_frontend */
memcpy(&dev->fe.ops, &rtl2832_ops, sizeof(struct dvb_frontend_ops));
@@ -1259,9 +1096,7 @@ static int rtl2832_probe(struct i2c_client *client,
pdata->slave_ts_ctrl = rtl2832_slave_ts_ctrl;
pdata->pid_filter = rtl2832_pid_filter;
pdata->pid_filter_ctrl = rtl2832_pid_filter_ctrl;
- pdata->bulk_read = rtl2832_bulk_read;
- pdata->bulk_write = rtl2832_bulk_write;
- pdata->update_bits = rtl2832_update_bits;
+ pdata->regmap = dev->regmap;
dev_info(&client->dev, "Realtek RTL2832 successfully attached\n");
return 0;
@@ -1282,7 +1117,7 @@ static int rtl2832_remove(struct i2c_client *client)
cancel_delayed_work_sync(&dev->i2c_gate_work);
- i2c_del_mux_adapter(dev->i2c_adapter_tuner);
+ i2c_mux_del_adapters(dev->muxc);
regmap_exit(dev->regmap);
diff --git a/drivers/media/dvb-frontends/rtl2832.h b/drivers/media/dvb-frontends/rtl2832.h
index 6390af64c..03c0de039 100644
--- a/drivers/media/dvb-frontends/rtl2832.h
+++ b/drivers/media/dvb-frontends/rtl2832.h
@@ -57,9 +57,7 @@ struct rtl2832_platform_data {
int (*pid_filter)(struct dvb_frontend *, u8, u16, int);
int (*pid_filter_ctrl)(struct dvb_frontend *, int);
/* private: Register access for SDR module use only */
- int (*bulk_read)(struct i2c_client *, unsigned int, void *, size_t);
- int (*bulk_write)(struct i2c_client *, unsigned int, const void *, size_t);
- int (*update_bits)(struct i2c_client *, unsigned int, unsigned int, unsigned int);
+ struct regmap *regmap;
};
#endif /* RTL2832_H */
diff --git a/drivers/media/dvb-frontends/rtl2832_priv.h b/drivers/media/dvb-frontends/rtl2832_priv.h
index 6b875f462..c1a8a69e9 100644
--- a/drivers/media/dvb-frontends/rtl2832_priv.h
+++ b/drivers/media/dvb-frontends/rtl2832_priv.h
@@ -33,10 +33,9 @@
struct rtl2832_dev {
struct rtl2832_platform_data *pdata;
struct i2c_client *client;
- struct mutex regmap_mutex;
struct regmap_config regmap_config;
struct regmap *regmap;
- struct i2c_adapter *i2c_adapter_tuner;
+ struct i2c_mux_core *muxc;
struct dvb_frontend fe;
enum fe_status fe_status;
u64 post_bit_error_prev; /* for old DVBv3 read_ber() calculation */
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index b860f02a4..47a480a7d 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -35,6 +35,7 @@
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/math64.h>
+#include <linux/regmap.h>
static bool rtl2832_sdr_emulated_fmt;
module_param_named(emulated_formats, rtl2832_sdr_emulated_fmt, bool, 0644);
@@ -119,6 +120,7 @@ struct rtl2832_sdr_dev {
unsigned long flags;
struct platform_device *pdev;
+ struct regmap *regmap;
struct video_device vdev;
struct v4l2_device v4l2_dev;
@@ -163,47 +165,6 @@ struct rtl2832_sdr_dev {
unsigned long jiffies_next;
};
-/* write multiple registers */
-static int rtl2832_sdr_wr_regs(struct rtl2832_sdr_dev *dev, u16 reg,
- const u8 *val, int len)
-{
- struct platform_device *pdev = dev->pdev;
- struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
- struct i2c_client *client = pdata->i2c_client;
-
- return pdata->bulk_write(client, reg, val, len);
-}
-
-#if 0
-/* read multiple registers */
-static int rtl2832_sdr_rd_regs(struct rtl2832_sdr_dev *dev, u16 reg, u8 *val,
- int len)
-{
- struct platform_device *pdev = dev->pdev;
- struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
- struct i2c_client *client = pdata->i2c_client;
-
- return pdata->bulk_read(client, reg, val, len);
-}
-#endif
-
-/* write single register */
-static int rtl2832_sdr_wr_reg(struct rtl2832_sdr_dev *dev, u16 reg, u8 val)
-{
- return rtl2832_sdr_wr_regs(dev, reg, &val, 1);
-}
-
-/* write single register with mask */
-static int rtl2832_sdr_wr_reg_mask(struct rtl2832_sdr_dev *dev, u16 reg,
- u8 val, u8 mask)
-{
- struct platform_device *pdev = dev->pdev;
- struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
- struct i2c_client *client = pdata->i2c_client;
-
- return pdata->update_bits(client, reg, mask, val);
-}
-
/* Private functions */
static struct rtl2832_sdr_frame_buf *rtl2832_sdr_get_next_fill_buf(
struct rtl2832_sdr_dev *dev)
@@ -558,11 +519,11 @@ static int rtl2832_sdr_set_adc(struct rtl2832_sdr_dev *dev)
f_sr = dev->f_adc;
- ret = rtl2832_sdr_wr_regs(dev, 0x13e, "\x00\x00", 2);
+ ret = regmap_bulk_write(dev->regmap, 0x13e, "\x00\x00", 2);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x00\x00\x00\x00", 4);
+ ret = regmap_bulk_write(dev->regmap, 0x115, "\x00\x00\x00\x00", 4);
if (ret)
goto err;
@@ -588,7 +549,7 @@ static int rtl2832_sdr_set_adc(struct rtl2832_sdr_dev *dev)
buf[1] = (u32tmp >> 8) & 0xff;
buf[2] = (u32tmp >> 0) & 0xff;
- ret = rtl2832_sdr_wr_regs(dev, 0x119, buf, 3);
+ ret = regmap_bulk_write(dev->regmap, 0x119, buf, 3);
if (ret)
goto err;
@@ -602,15 +563,15 @@ static int rtl2832_sdr_set_adc(struct rtl2832_sdr_dev *dev)
u8tmp2 = 0xcd; /* enable ADC I, ADC Q */
}
- ret = rtl2832_sdr_wr_reg(dev, 0x1b1, u8tmp1);
+ ret = regmap_write(dev->regmap, 0x1b1, u8tmp1);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_reg(dev, 0x008, u8tmp2);
+ ret = regmap_write(dev->regmap, 0x008, u8tmp2);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_reg(dev, 0x006, 0x80);
+ ret = regmap_write(dev->regmap, 0x006, 0x80);
if (ret)
goto err;
@@ -621,168 +582,169 @@ static int rtl2832_sdr_set_adc(struct rtl2832_sdr_dev *dev)
buf[1] = (u32tmp >> 16) & 0xff;
buf[2] = (u32tmp >> 8) & 0xff;
buf[3] = (u32tmp >> 0) & 0xff;
- ret = rtl2832_sdr_wr_regs(dev, 0x19f, buf, 4);
+ ret = regmap_bulk_write(dev->regmap, 0x19f, buf, 4);
if (ret)
goto err;
/* low-pass filter */
- ret = rtl2832_sdr_wr_regs(dev, 0x11c,
- "\xca\xdc\xd7\xd8\xe0\xf2\x0e\x35\x06\x50\x9c\x0d\x71\x11\x14\x71\x74\x19\x41\xa5",
- 20);
+ ret = regmap_bulk_write(dev->regmap, 0x11c,
+ "\xca\xdc\xd7\xd8\xe0\xf2\x0e\x35\x06\x50\x9c\x0d\x71\x11\x14\x71\x74\x19\x41\xa5",
+ 20);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(dev, 0x017, "\x11\x10", 2);
+ ret = regmap_bulk_write(dev->regmap, 0x017, "\x11\x10", 2);
if (ret)
goto err;
/* mode */
- ret = rtl2832_sdr_wr_regs(dev, 0x019, "\x05", 1);
+ ret = regmap_write(dev->regmap, 0x019, 0x05);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(dev, 0x01a, "\x1b\x16\x0d\x06\x01\xff", 6);
+ ret = regmap_bulk_write(dev->regmap, 0x01a,
+ "\x1b\x16\x0d\x06\x01\xff", 6);
if (ret)
goto err;
/* FSM */
- ret = rtl2832_sdr_wr_regs(dev, 0x192, "\x00\xf0\x0f", 3);
+ ret = regmap_bulk_write(dev->regmap, 0x192, "\x00\xf0\x0f", 3);
if (ret)
goto err;
/* PID filter */
- ret = rtl2832_sdr_wr_regs(dev, 0x061, "\x60", 1);
+ ret = regmap_write(dev->regmap, 0x061, 0x60);
if (ret)
goto err;
/* used RF tuner based settings */
switch (pdata->tuner) {
case RTL2832_SDR_TUNER_E4000:
- ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x30", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xd0", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x18", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xd4", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1e5, "\xf0", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1d9, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1db, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1dd, "\x14", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1de, "\xec", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1d8, "\x0c", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1e6, "\x02", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1d7, "\x09", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x83", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x010, "\x49", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x87", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x85", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x013, "\x02", 1);
+ ret = regmap_write(dev->regmap, 0x112, 0x5a);
+ ret = regmap_write(dev->regmap, 0x102, 0x40);
+ ret = regmap_write(dev->regmap, 0x103, 0x5a);
+ ret = regmap_write(dev->regmap, 0x1c7, 0x30);
+ ret = regmap_write(dev->regmap, 0x104, 0xd0);
+ ret = regmap_write(dev->regmap, 0x105, 0xbe);
+ ret = regmap_write(dev->regmap, 0x1c8, 0x18);
+ ret = regmap_write(dev->regmap, 0x106, 0x35);
+ ret = regmap_write(dev->regmap, 0x1c9, 0x21);
+ ret = regmap_write(dev->regmap, 0x1ca, 0x21);
+ ret = regmap_write(dev->regmap, 0x1cb, 0x00);
+ ret = regmap_write(dev->regmap, 0x107, 0x40);
+ ret = regmap_write(dev->regmap, 0x1cd, 0x10);
+ ret = regmap_write(dev->regmap, 0x1ce, 0x10);
+ ret = regmap_write(dev->regmap, 0x108, 0x80);
+ ret = regmap_write(dev->regmap, 0x109, 0x7f);
+ ret = regmap_write(dev->regmap, 0x10a, 0x80);
+ ret = regmap_write(dev->regmap, 0x10b, 0x7f);
+ ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+ ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+ ret = regmap_write(dev->regmap, 0x011, 0xd4);
+ ret = regmap_write(dev->regmap, 0x1e5, 0xf0);
+ ret = regmap_write(dev->regmap, 0x1d9, 0x00);
+ ret = regmap_write(dev->regmap, 0x1db, 0x00);
+ ret = regmap_write(dev->regmap, 0x1dd, 0x14);
+ ret = regmap_write(dev->regmap, 0x1de, 0xec);
+ ret = regmap_write(dev->regmap, 0x1d8, 0x0c);
+ ret = regmap_write(dev->regmap, 0x1e6, 0x02);
+ ret = regmap_write(dev->regmap, 0x1d7, 0x09);
+ ret = regmap_write(dev->regmap, 0x00d, 0x83);
+ ret = regmap_write(dev->regmap, 0x010, 0x49);
+ ret = regmap_write(dev->regmap, 0x00d, 0x87);
+ ret = regmap_write(dev->regmap, 0x00d, 0x85);
+ ret = regmap_write(dev->regmap, 0x013, 0x02);
break;
case RTL2832_SDR_TUNER_FC0012:
case RTL2832_SDR_TUNER_FC0013:
- ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x2c", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x16", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xe9\xbf", 2);
- ret = rtl2832_sdr_wr_regs(dev, 0x1e5, "\xf0", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1d9, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1db, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1dd, "\x11", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1de, "\xef", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1d8, "\x0c", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1e6, "\x02", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1d7, "\x09", 1);
+ ret = regmap_write(dev->regmap, 0x112, 0x5a);
+ ret = regmap_write(dev->regmap, 0x102, 0x40);
+ ret = regmap_write(dev->regmap, 0x103, 0x5a);
+ ret = regmap_write(dev->regmap, 0x1c7, 0x2c);
+ ret = regmap_write(dev->regmap, 0x104, 0xcc);
+ ret = regmap_write(dev->regmap, 0x105, 0xbe);
+ ret = regmap_write(dev->regmap, 0x1c8, 0x16);
+ ret = regmap_write(dev->regmap, 0x106, 0x35);
+ ret = regmap_write(dev->regmap, 0x1c9, 0x21);
+ ret = regmap_write(dev->regmap, 0x1ca, 0x21);
+ ret = regmap_write(dev->regmap, 0x1cb, 0x00);
+ ret = regmap_write(dev->regmap, 0x107, 0x40);
+ ret = regmap_write(dev->regmap, 0x1cd, 0x10);
+ ret = regmap_write(dev->regmap, 0x1ce, 0x10);
+ ret = regmap_write(dev->regmap, 0x108, 0x80);
+ ret = regmap_write(dev->regmap, 0x109, 0x7f);
+ ret = regmap_write(dev->regmap, 0x10a, 0x80);
+ ret = regmap_write(dev->regmap, 0x10b, 0x7f);
+ ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+ ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+ ret = regmap_bulk_write(dev->regmap, 0x011, "\xe9\xbf", 2);
+ ret = regmap_write(dev->regmap, 0x1e5, 0xf0);
+ ret = regmap_write(dev->regmap, 0x1d9, 0x00);
+ ret = regmap_write(dev->regmap, 0x1db, 0x00);
+ ret = regmap_write(dev->regmap, 0x1dd, 0x11);
+ ret = regmap_write(dev->regmap, 0x1de, 0xef);
+ ret = regmap_write(dev->regmap, 0x1d8, 0x0c);
+ ret = regmap_write(dev->regmap, 0x1e6, 0x02);
+ ret = regmap_write(dev->regmap, 0x1d7, 0x09);
break;
case RTL2832_SDR_TUNER_R820T:
case RTL2832_SDR_TUNER_R828D:
- ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x01", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x24", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x14", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xf4", 1);
+ ret = regmap_write(dev->regmap, 0x112, 0x5a);
+ ret = regmap_write(dev->regmap, 0x102, 0x40);
+ ret = regmap_write(dev->regmap, 0x115, 0x01);
+ ret = regmap_write(dev->regmap, 0x103, 0x80);
+ ret = regmap_write(dev->regmap, 0x1c7, 0x24);
+ ret = regmap_write(dev->regmap, 0x104, 0xcc);
+ ret = regmap_write(dev->regmap, 0x105, 0xbe);
+ ret = regmap_write(dev->regmap, 0x1c8, 0x14);
+ ret = regmap_write(dev->regmap, 0x106, 0x35);
+ ret = regmap_write(dev->regmap, 0x1c9, 0x21);
+ ret = regmap_write(dev->regmap, 0x1ca, 0x21);
+ ret = regmap_write(dev->regmap, 0x1cb, 0x00);
+ ret = regmap_write(dev->regmap, 0x107, 0x40);
+ ret = regmap_write(dev->regmap, 0x1cd, 0x10);
+ ret = regmap_write(dev->regmap, 0x1ce, 0x10);
+ ret = regmap_write(dev->regmap, 0x108, 0x80);
+ ret = regmap_write(dev->regmap, 0x109, 0x7f);
+ ret = regmap_write(dev->regmap, 0x10a, 0x80);
+ ret = regmap_write(dev->regmap, 0x10b, 0x7f);
+ ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+ ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+ ret = regmap_write(dev->regmap, 0x011, 0xf4);
break;
case RTL2832_SDR_TUNER_FC2580:
- ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x39", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x2c", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x16", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x9c", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
- ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xe9\xf4", 2);
+ ret = regmap_write(dev->regmap, 0x112, 0x39);
+ ret = regmap_write(dev->regmap, 0x102, 0x40);
+ ret = regmap_write(dev->regmap, 0x103, 0x5a);
+ ret = regmap_write(dev->regmap, 0x1c7, 0x2c);
+ ret = regmap_write(dev->regmap, 0x104, 0xcc);
+ ret = regmap_write(dev->regmap, 0x105, 0xbe);
+ ret = regmap_write(dev->regmap, 0x1c8, 0x16);
+ ret = regmap_write(dev->regmap, 0x106, 0x35);
+ ret = regmap_write(dev->regmap, 0x1c9, 0x21);
+ ret = regmap_write(dev->regmap, 0x1ca, 0x21);
+ ret = regmap_write(dev->regmap, 0x1cb, 0x00);
+ ret = regmap_write(dev->regmap, 0x107, 0x40);
+ ret = regmap_write(dev->regmap, 0x1cd, 0x10);
+ ret = regmap_write(dev->regmap, 0x1ce, 0x10);
+ ret = regmap_write(dev->regmap, 0x108, 0x80);
+ ret = regmap_write(dev->regmap, 0x109, 0x7f);
+ ret = regmap_write(dev->regmap, 0x10a, 0x9c);
+ ret = regmap_write(dev->regmap, 0x10b, 0x7f);
+ ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+ ret = regmap_write(dev->regmap, 0x00e, 0xfc);
+ ret = regmap_bulk_write(dev->regmap, 0x011, "\xe9\xf4", 2);
break;
default:
dev_notice(&pdev->dev, "Unsupported tuner\n");
}
/* software reset */
- ret = rtl2832_sdr_wr_reg_mask(dev, 0x101, 0x04, 0x04);
+ ret = regmap_update_bits(dev->regmap, 0x101, 0x04, 0x04);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_reg_mask(dev, 0x101, 0x00, 0x04);
+ ret = regmap_update_bits(dev->regmap, 0x101, 0x04, 0x00);
if (ret)
goto err;
err:
@@ -797,29 +759,29 @@ static void rtl2832_sdr_unset_adc(struct rtl2832_sdr_dev *dev)
dev_dbg(&pdev->dev, "\n");
/* PID filter */
- ret = rtl2832_sdr_wr_regs(dev, 0x061, "\xe0", 1);
+ ret = regmap_write(dev->regmap, 0x061, 0xe0);
if (ret)
goto err;
/* mode */
- ret = rtl2832_sdr_wr_regs(dev, 0x019, "\x20", 1);
+ ret = regmap_write(dev->regmap, 0x019, 0x20);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(dev, 0x017, "\x11\x10", 2);
+ ret = regmap_bulk_write(dev->regmap, 0x017, "\x11\x10", 2);
if (ret)
goto err;
/* FSM */
- ret = rtl2832_sdr_wr_regs(dev, 0x192, "\x00\x0f\xff", 3);
+ ret = regmap_bulk_write(dev->regmap, 0x192, "\x00\x0f\xff", 3);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(dev, 0x13e, "\x40\x00", 2);
+ ret = regmap_bulk_write(dev->regmap, 0x13e, "\x40\x00", 2);
if (ret)
goto err;
- ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x06\x3f\xce\xcc", 4);
+ ret = regmap_bulk_write(dev->regmap, 0x115, "\x06\x3f\xce\xcc", 4);
if (ret)
goto err;
err:
@@ -1399,6 +1361,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev)
subdev = pdata->v4l2_subdev;
dev->v4l2_subdev = pdata->v4l2_subdev;
dev->pdev = pdev;
+ dev->regmap = pdata->regmap;
dev->udev = pdata->dvb_usb_device->udev;
dev->f_adc = bands_adc[0].rangelow;
dev->f_tuner = bands_fm[0].rangelow;
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.h b/drivers/media/dvb-frontends/rtl2832_sdr.h
index 342ea8486..d8fc7e721 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.h
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.h
@@ -56,10 +56,7 @@ struct rtl2832_sdr_platform_data {
#define RTL2832_SDR_TUNER_R828D 0x2b
u8 tuner;
- struct i2c_client *i2c_client;
- int (*bulk_read)(struct i2c_client *, unsigned int, void *, size_t);
- int (*bulk_write)(struct i2c_client *, unsigned int, const void *, size_t);
- int (*update_bits)(struct i2c_client *, unsigned int, unsigned int, unsigned int);
+ struct regmap *regmap;
struct dvb_frontend *dvb_frontend;
struct v4l2_subdev *v4l2_subdev;
struct dvb_usb_device *dvb_usb_device;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index f82aa141a..11489e1d5 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -18,53 +18,23 @@
static const struct dvb_frontend_ops si2168_ops;
-/* Own I2C adapter locking is needed because of I2C gate logic. */
-static int si2168_i2c_master_send_unlocked(const struct i2c_client *client,
- const char *buf, int count)
-{
- int ret;
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = 0,
- .len = count,
- .buf = (char *)buf,
- };
-
- ret = __i2c_transfer(client->adapter, &msg, 1);
- return (ret == 1) ? count : ret;
-}
-
-static int si2168_i2c_master_recv_unlocked(const struct i2c_client *client,
- char *buf, int count)
-{
- int ret;
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = count,
- .buf = buf,
- };
-
- ret = __i2c_transfer(client->adapter, &msg, 1);
- return (ret == 1) ? count : ret;
-}
-
/* execute firmware command */
-static int si2168_cmd_execute_unlocked(struct i2c_client *client,
- struct si2168_cmd *cmd)
+static int si2168_cmd_execute(struct i2c_client *client, struct si2168_cmd *cmd)
{
+ struct si2168_dev *dev = i2c_get_clientdata(client);
int ret;
unsigned long timeout;
+ mutex_lock(&dev->i2c_mutex);
+
if (cmd->wlen) {
/* write cmd and args for firmware */
- ret = si2168_i2c_master_send_unlocked(client, cmd->args,
- cmd->wlen);
+ ret = i2c_master_send(client, cmd->args, cmd->wlen);
if (ret < 0) {
- goto err;
+ goto err_mutex_unlock;
} else if (ret != cmd->wlen) {
ret = -EREMOTEIO;
- goto err;
+ goto err_mutex_unlock;
}
}
@@ -73,13 +43,12 @@ static int si2168_cmd_execute_unlocked(struct i2c_client *client,
#define TIMEOUT 70
timeout = jiffies + msecs_to_jiffies(TIMEOUT);
while (!time_after(jiffies, timeout)) {
- ret = si2168_i2c_master_recv_unlocked(client, cmd->args,
- cmd->rlen);
+ ret = i2c_master_recv(client, cmd->args, cmd->rlen);
if (ret < 0) {
- goto err;
+ goto err_mutex_unlock;
} else if (ret != cmd->rlen) {
ret = -EREMOTEIO;
- goto err;
+ goto err_mutex_unlock;
}
/* firmware ready? */
@@ -94,32 +63,23 @@ static int si2168_cmd_execute_unlocked(struct i2c_client *client,
/* error bit set? */
if ((cmd->args[0] >> 6) & 0x01) {
ret = -EREMOTEIO;
- goto err;
+ goto err_mutex_unlock;
}
if (!((cmd->args[0] >> 7) & 0x01)) {
ret = -ETIMEDOUT;
- goto err;
+ goto err_mutex_unlock;
}
}
+ mutex_unlock(&dev->i2c_mutex);
return 0;
-err:
+err_mutex_unlock:
+ mutex_unlock(&dev->i2c_mutex);
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
-static int si2168_cmd_execute(struct i2c_client *client, struct si2168_cmd *cmd)
-{
- int ret;
-
- i2c_lock_adapter(client->adapter);
- ret = si2168_cmd_execute_unlocked(client, cmd);
- i2c_unlock_adapter(client->adapter);
-
- return ret;
-}
-
static int si2168_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct i2c_client *client = fe->demodulator_priv;
@@ -610,14 +570,9 @@ static int si2168_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-/*
- * I2C gate logic
- * We must use unlocked I2C I/O because I2C adapter lock is already taken
- * by the caller (usually tuner driver).
- */
-static int si2168_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+static int si2168_select(struct i2c_mux_core *muxc, u32 chan)
{
- struct i2c_client *client = mux_priv;
+ struct i2c_client *client = i2c_mux_priv(muxc);
int ret;
struct si2168_cmd cmd;
@@ -625,7 +580,7 @@ static int si2168_select(struct i2c_adapter *adap, void *mux_priv, u32 chan)
memcpy(cmd.args, "\xc0\x0d\x01", 3);
cmd.wlen = 3;
cmd.rlen = 0;
- ret = si2168_cmd_execute_unlocked(client, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -635,9 +590,9 @@ err:
return ret;
}
-static int si2168_deselect(struct i2c_adapter *adap, void *mux_priv, u32 chan)
+static int si2168_deselect(struct i2c_mux_core *muxc, u32 chan)
{
- struct i2c_client *client = mux_priv;
+ struct i2c_client *client = i2c_mux_priv(muxc);
int ret;
struct si2168_cmd cmd;
@@ -645,7 +600,7 @@ static int si2168_deselect(struct i2c_adapter *adap, void *mux_priv, u32 chan)
memcpy(cmd.args, "\xc0\x0d\x00", 3);
cmd.wlen = 3;
cmd.rlen = 0;
- ret = si2168_cmd_execute_unlocked(client, &cmd);
+ ret = si2168_cmd_execute(client, &cmd);
if (ret)
goto err;
@@ -708,18 +663,25 @@ static int si2168_probe(struct i2c_client *client,
goto err;
}
+ mutex_init(&dev->i2c_mutex);
+
/* create mux i2c adapter for tuner */
- dev->adapter = i2c_add_mux_adapter(client->adapter, &client->dev,
- client, 0, 0, 0, si2168_select, si2168_deselect);
- if (dev->adapter == NULL) {
- ret = -ENODEV;
+ dev->muxc = i2c_mux_alloc(client->adapter, &client->dev,
+ 1, 0, I2C_MUX_LOCKED,
+ si2168_select, si2168_deselect);
+ if (!dev->muxc) {
+ ret = -ENOMEM;
goto err_kfree;
}
+ dev->muxc->priv = client;
+ ret = i2c_mux_add_adapter(dev->muxc, 0, 0, 0);
+ if (ret)
+ goto err_kfree;
/* create dvb_frontend */
memcpy(&dev->fe.ops, &si2168_ops, sizeof(struct dvb_frontend_ops));
dev->fe.demodulator_priv = client;
- *config->i2c_adapter = dev->adapter;
+ *config->i2c_adapter = dev->muxc->adapter[0];
*config->fe = &dev->fe;
dev->ts_mode = config->ts_mode;
dev->ts_clock_inv = config->ts_clock_inv;
@@ -743,7 +705,7 @@ static int si2168_remove(struct i2c_client *client)
dev_dbg(&client->dev, "\n");
- i2c_del_mux_adapter(dev->adapter);
+ i2c_mux_del_adapters(dev->muxc);
dev->fe.ops.release = NULL;
dev->fe.demodulator_priv = NULL;
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index d06b5bc6d..5ac036229 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -29,7 +29,8 @@
/* state struct */
struct si2168_dev {
- struct i2c_adapter *adapter;
+ struct mutex i2c_mutex;
+ struct i2c_mux_core *muxc;
struct dvb_frontend fe;
enum fe_delivery_system delivery_system;
enum fe_status fe_status;
diff --git a/drivers/media/dvb-frontends/zl10353.c b/drivers/media/dvb-frontends/zl10353.c
index 1832c2f76..3b08176d7 100644
--- a/drivers/media/dvb-frontends/zl10353.c
+++ b/drivers/media/dvb-frontends/zl10353.c
@@ -135,8 +135,7 @@ static void zl10353_calc_nominal_rate(struct dvb_frontend *fe,
value = (u64)10 * (1 << 23) / 7 * 125;
value = (bw * value) + adc_clock / 2;
- do_div(value, adc_clock);
- *nominal_rate = value;
+ *nominal_rate = div_u64(value, adc_clock);
dprintk("%s: bw %d, adc_clock %d => 0x%x\n",
__func__, bw, adc_clock, *nominal_rate);
@@ -163,8 +162,7 @@ static void zl10353_calc_input_freq(struct dvb_frontend *fe,
if (ife > adc_clock / 2)
ife = adc_clock - ife;
}
- value = (u64)65536 * ife + adc_clock / 2;
- do_div(value, adc_clock);
+ value = div_u64((u64)65536 * ife + adc_clock / 2, adc_clock);
*input_freq = -value;
dprintk("%s: if2 %d, ife %d, adc_clock %d => %d / 0x%x\n",
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 788967dad..0462f461e 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -1130,8 +1130,6 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
hdl = &state->hdl;
v4l2_ctrl_handler_init(hdl, 5);
- /* private controls */
-
state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &ad9389b_ctrl_ops,
V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
0, V4L2_DV_TX_MODE_DVI_D);
@@ -1151,12 +1149,6 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
goto err_hdl;
}
- state->hdmi_mode_ctrl->is_private = true;
- state->hotplug_ctrl->is_private = true;
- state->rx_sense_ctrl->is_private = true;
- state->have_edid0_ctrl->is_private = true;
- state->rgb_quantization_range_ctrl->is_private = true;
-
state->pad.flags = MEDIA_PAD_FL_SINK;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err)
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index fb7ed730d..e191e295c 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -95,7 +95,7 @@ static int adp1653_get_fault(struct adp1653_flash *flash)
int rval;
fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
- if (IS_ERR_VALUE(fault))
+ if (fault < 0)
return fault;
flash->fault |= fault;
@@ -105,13 +105,13 @@ static int adp1653_get_fault(struct adp1653_flash *flash)
/* Clear faults. */
rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
- if (IS_ERR_VALUE(rval))
+ if (rval < 0)
return rval;
flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE;
rval = adp1653_update_hw(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
return flash->fault;
@@ -158,7 +158,7 @@ static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl)
int rval;
rval = adp1653_get_fault(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
ctrl->cur.val = 0;
@@ -184,7 +184,7 @@ static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl)
int rval;
rval = adp1653_get_fault(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
if ((rval & (ADP1653_REG_FAULT_FLT_SCP |
ADP1653_REG_FAULT_FLT_OT |
@@ -466,9 +466,9 @@ static int adp1653_of_init(struct i2c_client *client,
of_node_put(child);
pd->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_LOW);
- if (!pd->enable_gpio) {
+ if (IS_ERR(pd->enable_gpio)) {
dev_err(&client->dev, "Error getting GPIO\n");
- return -EINVAL;
+ return PTR_ERR(pd->enable_gpio);
}
return 0;
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index ff57c1dcb..b77b0a4db 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -26,8 +26,9 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include <linux/mutex.h>
@@ -192,8 +193,8 @@ struct adv7180_state {
struct mutex mutex; /* mutual excl. when accessing chip */
int irq;
v4l2_std_id curr_norm;
- bool autodetect;
bool powered;
+ bool streaming;
u8 input;
struct i2c_client *client;
@@ -338,12 +339,26 @@ static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
if (err)
return err;
- /* when we are interrupt driven we know the state */
- if (!state->autodetect || state->irq > 0)
- *std = state->curr_norm;
- else
- err = __adv7180_status(state, NULL, std);
+ if (state->streaming) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = adv7180_set_video_standard(state,
+ ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
+ if (err)
+ goto unlock;
+
+ msleep(100);
+ __adv7180_status(state, NULL, std);
+
+ err = v4l2_std_to_adv7180(state->curr_norm);
+ if (err < 0)
+ goto unlock;
+ err = adv7180_set_video_standard(state, err);
+
+unlock:
mutex_unlock(&state->mutex);
return err;
}
@@ -387,23 +402,13 @@ static int adv7180_program_std(struct adv7180_state *state)
{
int ret;
- if (state->autodetect) {
- ret = adv7180_set_video_standard(state,
- ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
- if (ret < 0)
- return ret;
-
- __adv7180_status(state, NULL, &state->curr_norm);
- } else {
- ret = v4l2_std_to_adv7180(state->curr_norm);
- if (ret < 0)
- return ret;
-
- ret = adv7180_set_video_standard(state, ret);
- if (ret < 0)
- return ret;
- }
+ ret = v4l2_std_to_adv7180(state->curr_norm);
+ if (ret < 0)
+ return ret;
+ ret = adv7180_set_video_standard(state, ret);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -415,18 +420,12 @@ static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
if (ret)
return ret;
- /* all standards -> autodetect */
- if (std == V4L2_STD_ALL) {
- state->autodetect = true;
- } else {
- /* Make sure we can support this std */
- ret = v4l2_std_to_adv7180(std);
- if (ret < 0)
- goto out;
+ /* Make sure we can support this std */
+ ret = v4l2_std_to_adv7180(std);
+ if (ret < 0)
+ goto out;
- state->curr_norm = std;
- state->autodetect = false;
- }
+ state->curr_norm = std;
ret = adv7180_program_std(state);
out:
@@ -434,6 +433,15 @@ out:
return ret;
}
+static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
+{
+ struct adv7180_state *state = to_state(sd);
+
+ *norm = state->curr_norm;
+
+ return 0;
+}
+
static int adv7180_set_power(struct adv7180_state *state, bool on)
{
u8 val;
@@ -717,17 +725,77 @@ static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
+static int adv7180_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *cropcap)
+{
+ struct adv7180_state *state = to_state(sd);
+
+ if (state->curr_norm & V4L2_STD_525_60) {
+ cropcap->pixelaspect.numerator = 11;
+ cropcap->pixelaspect.denominator = 10;
+ } else {
+ cropcap->pixelaspect.numerator = 54;
+ cropcap->pixelaspect.denominator = 59;
+ }
+
+ return 0;
+}
+
+static int adv7180_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
+{
+ *norm = V4L2_STD_ALL;
+ return 0;
+}
+
+static int adv7180_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv7180_state *state = to_state(sd);
+ int ret;
+
+ /* It's always safe to stop streaming, no need to take the lock */
+ if (!enable) {
+ state->streaming = enable;
+ return 0;
+ }
+
+ /* Must wait until querystd released the lock */
+ ret = mutex_lock_interruptible(&state->mutex);
+ if (ret)
+ return ret;
+ state->streaming = enable;
+ mutex_unlock(&state->mutex);
+ return 0;
+}
+
+static int adv7180_subscribe_event(struct v4l2_subdev *sd,
+ struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subdev_subscribe(sd, fh, sub);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.s_std = adv7180_s_std,
+ .g_std = adv7180_g_std,
.querystd = adv7180_querystd,
.g_input_status = adv7180_g_input_status,
.s_routing = adv7180_s_routing,
.g_mbus_config = adv7180_g_mbus_config,
+ .cropcap = adv7180_cropcap,
+ .g_tvnorms = adv7180_g_tvnorms,
+ .s_stream = adv7180_s_stream,
};
-
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.s_power = adv7180_s_power,
+ .subscribe_event = adv7180_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_pad_ops adv7180_pad_ops = {
@@ -752,8 +820,14 @@ static irqreturn_t adv7180_irq(int irq, void *devid)
/* clear */
adv7180_write(state, ADV7180_REG_ICR3, isr3);
- if (isr3 & ADV7180_IRQ3_AD_CHANGE && state->autodetect)
- __adv7180_status(state, NULL, &state->curr_norm);
+ if (isr3 & ADV7180_IRQ3_AD_CHANGE) {
+ static const struct v4l2_event src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ v4l2_subdev_notify_event(&state->sd, &src_ch);
+ }
mutex_unlock(&state->mutex);
return IRQ_HANDLED;
@@ -1198,7 +1272,7 @@ static int adv7180_probe(struct i2c_client *client,
state->irq = client->irq;
mutex_init(&state->mutex);
- state->autodetect = true;
+ state->curr_norm = V4L2_STD_NTSC;
if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
state->powered = true;
else
@@ -1206,7 +1280,7 @@ static int adv7180_probe(struct i2c_client *client,
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
- sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
ret = adv7180_init_controls(state);
if (ret)
@@ -1328,6 +1402,14 @@ static SIMPLE_DEV_PM_OPS(adv7180_pm_ops, adv7180_suspend, adv7180_resume);
#ifdef CONFIG_OF
static const struct of_device_id adv7180_of_id[] = {
{ .compatible = "adi,adv7180", },
+ { .compatible = "adi,adv7182", },
+ { .compatible = "adi,adv7280", },
+ { .compatible = "adi,adv7280-m", },
+ { .compatible = "adi,adv7281", },
+ { .compatible = "adi,adv7281-m", },
+ { .compatible = "adi,adv7281-ma", },
+ { .compatible = "adi,adv7282", },
+ { .compatible = "adi,adv7282-m", },
{ },
};
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index bd822f032..39271c35d 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -1502,12 +1502,6 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
err = hdl->error;
goto err_hdl;
}
- state->hdmi_mode_ctrl->is_private = true;
- state->hotplug_ctrl->is_private = true;
- state->rx_sense_ctrl->is_private = true;
- state->have_edid0_ctrl->is_private = true;
- state->rgb_quantization_range_ctrl->is_private = true;
-
state->pad.flags = MEDIA_PAD_FL_SINK;
err = media_entity_pads_init(&sd->entity, 1, &state->pad);
if (err)
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 41a1bfc5e..3f1ab4986 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -779,11 +779,31 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
V4L2_DV_BT_CAP_CUSTOM)
};
-static inline const struct v4l2_dv_timings_cap *
-adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd)
+/*
+ * Return the DV timings capabilities for the requested sink pad. As a special
+ * case, pad value -1 returns the capabilities for the currently selected input.
+ */
+static const struct v4l2_dv_timings_cap *
+adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd, int pad)
{
- return is_digital_input(sd) ? &adv76xx_timings_cap_digital :
- &adv7604_timings_cap_analog;
+ if (pad == -1) {
+ struct adv76xx_state *state = to_state(sd);
+
+ pad = state->selected_input;
+ }
+
+ switch (pad) {
+ case ADV76XX_PAD_HDMI_PORT_A:
+ case ADV7604_PAD_HDMI_PORT_B:
+ case ADV7604_PAD_HDMI_PORT_C:
+ case ADV7604_PAD_HDMI_PORT_D:
+ return &adv76xx_timings_cap_digital;
+
+ case ADV7604_PAD_VGA_RGB:
+ case ADV7604_PAD_VGA_COMP:
+ default:
+ return &adv7604_timings_cap_analog;
+ }
}
@@ -1329,7 +1349,7 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
- adv76xx_get_dv_timings_cap(sd),
+ adv76xx_get_dv_timings_cap(sd, -1),
adv76xx_check_dv_timings, NULL))
continue;
if (vtotal(bt) != stdi->lcf + 1)
@@ -1430,18 +1450,22 @@ static int adv76xx_enum_dv_timings(struct v4l2_subdev *sd,
return -EINVAL;
return v4l2_enum_dv_timings_cap(timings,
- adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL);
+ adv76xx_get_dv_timings_cap(sd, timings->pad),
+ adv76xx_check_dv_timings, NULL);
}
static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
struct adv76xx_state *state = to_state(sd);
+ unsigned int pad = cap->pad;
if (cap->pad >= state->source_pad)
return -EINVAL;
- *cap = *adv76xx_get_dv_timings_cap(sd);
+ *cap = *adv76xx_get_dv_timings_cap(sd, pad);
+ cap->pad = pad;
+
return 0;
}
@@ -1450,9 +1474,9 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
- v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd),
- is_digital_input(sd) ? 250000 : 1000000,
- adv76xx_check_dv_timings, NULL);
+ v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd, -1),
+ is_digital_input(sd) ? 250000 : 1000000,
+ adv76xx_check_dv_timings, NULL);
}
static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
@@ -1620,7 +1644,7 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
bt = &timings->bt;
- if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd),
+ if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd, -1),
adv76xx_check_dv_timings, NULL))
return -ERANGE;
@@ -3141,7 +3165,6 @@ static int adv76xx_probe(struct i2c_client *client,
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
- /* private controls */
state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_DV_RX_POWER_PRESENT, 0,
(1 << state->info->num_dv_ports) - 1, 0, 0);
@@ -3164,13 +3187,6 @@ static int adv76xx_probe(struct i2c_client *client,
err = hdl->error;
goto err_hdl;
}
- state->detect_tx_5v_ctrl->is_private = true;
- state->rgb_quantization_range_ctrl->is_private = true;
- if (adv76xx_has_afe(state))
- state->analog_sampling_phase_ctrl->is_private = true;
- state->free_run_color_manual_ctrl->is_private = true;
- state->free_run_color_ctrl->is_private = true;
-
if (adv76xx_s_detect_tx_5v_ctrl(sd)) {
err = -ENODEV;
goto err_hdl;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 7ccb85d45..ecaacb0a6 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -3300,12 +3300,6 @@ static int adv7842_probe(struct i2c_client *client,
err = hdl->error;
goto err_hdl;
}
- state->detect_tx_5v_ctrl->is_private = true;
- state->rgb_quantization_range_ctrl->is_private = true;
- state->analog_sampling_phase_ctrl->is_private = true;
- state->free_run_color_ctrl_manual->is_private = true;
- state->free_run_color_ctrl->is_private = true;
-
if (adv7842_s_detect_tx_5v_ctrl(sd)) {
err = -ENODEV;
goto err_hdl;
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c
index a60931e66..c2218c0a9 100644
--- a/drivers/media/i2c/m5mols/m5mols_controls.c
+++ b/drivers/media/i2c/m5mols/m5mols_controls.c
@@ -405,7 +405,7 @@ static int m5mols_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
struct v4l2_subdev *sd = to_sd(ctrl);
struct m5mols_info *info = to_m5mols(sd);
int ret = 0;
- u8 status;
+ u8 status = REG_ISO_AUTO;
v4l2_dbg(1, m5mols_debug, sd, "%s: ctrl: %s (%d)\n",
__func__, ctrl->name, info->isp_ready);
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index d2a1ce2bc..bd3526bdd 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1798,6 +1798,21 @@ static int saa711x_detect_chip(struct i2c_client *client,
return GM7113C;
}
+ /* Check if it is a CJC7113 */
+ if (!memcmp(name, "1111111111111111", CHIP_VER_SIZE)) {
+ strlcpy(name, "cjc7113", CHIP_VER_SIZE);
+
+ if (!autodetect && strcmp(name, id->name))
+ return -EINVAL;
+
+ v4l_dbg(1, debug, client,
+ "It seems to be a %s chip (%*ph) @ 0x%x.\n",
+ name, 16, chip_ver, client->addr << 1);
+
+ /* CJC7113 seems to be SAA7113-compatible */
+ return SAA7113;
+ }
+
/* Chip was not discovered. Return its ID and don't bind */
v4l_dbg(1, debug, client, "chip %*ph @ 0x%x is unknown.\n",
16, chip_ver, client->addr << 1);
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index a215efe7a..3dfe387ab 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -188,6 +188,8 @@ static int smiapp_read_frame_fmt(struct smiapp_sensor *sensor)
embedded_end = 0;
}
+ sensor->image_start = image_start;
+
dev_dbg(&client->dev, "embedded data from lines %d to %d\n",
embedded_start, embedded_end);
dev_dbg(&client->dev, "image data starts at line %d\n", image_start);
@@ -2280,6 +2282,15 @@ static int smiapp_get_skip_frames(struct v4l2_subdev *subdev, u32 *frames)
return 0;
}
+static int smiapp_get_skip_top_lines(struct v4l2_subdev *subdev, u32 *lines)
+{
+ struct smiapp_sensor *sensor = to_smiapp_sensor(subdev);
+
+ *lines = sensor->image_start;
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* sysfs attributes
*/
@@ -2890,6 +2901,7 @@ static const struct v4l2_subdev_pad_ops smiapp_pad_ops = {
static const struct v4l2_subdev_sensor_ops smiapp_sensor_ops = {
.g_skip_frames = smiapp_get_skip_frames,
+ .g_skip_top_lines = smiapp_get_skip_top_lines,
};
static const struct v4l2_subdev_ops smiapp_ops = {
diff --git a/drivers/media/i2c/smiapp/smiapp.h b/drivers/media/i2c/smiapp/smiapp.h
index f6af0cc4a..2174f89a0 100644
--- a/drivers/media/i2c/smiapp/smiapp.h
+++ b/drivers/media/i2c/smiapp/smiapp.h
@@ -217,6 +217,7 @@ struct smiapp_sensor {
u8 hvflip_inv_mask; /* H/VFLIP inversion due to sensor orientation */
u8 frame_skip;
+ u16 image_start; /* Offset to first line after metadata lines */
int power_count;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 972e0d472..6cf6d0673 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -1551,6 +1551,8 @@ static int tc358743_g_edid(struct v4l2_subdev *sd,
{
struct tc358743_state *state = to_state(sd);
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
if (edid->pad != 0)
return -EINVAL;
@@ -1585,6 +1587,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
__func__, edid->pad, edid->start_block, edid->blocks);
+ memset(edid->reserved, 0, sizeof(edid->reserved));
+
if (edid->pad != 0)
return -EINVAL;
@@ -1859,7 +1863,6 @@ static int tc358743_probe(struct i2c_client *client,
/* control handlers */
v4l2_ctrl_handler_init(&state->hdl, 3);
- /* private controls */
state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(&state->hdl, NULL,
V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 5bbfcab01..71a313521 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -285,7 +285,7 @@ static int ths7303_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "stream %s\n", state->stream_on ? "On" : "Off");
if (state->bt.pixelclock) {
- struct v4l2_bt_timings *bt = bt = &state->bt;
+ struct v4l2_bt_timings *bt = &state->bt;
u32 frame_width, frame_height;
frame_width = V4L2_DV_BT_FRAME_WIDTH(bt);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index ff18444e1..0b6d46c45 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -83,7 +83,7 @@ static int tvp5150_read(struct v4l2_subdev *sd, unsigned char addr)
return rc;
}
-static inline void tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
+static int tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
unsigned char value)
{
struct i2c_client *c = v4l2_get_subdevdata(sd);
@@ -92,7 +92,9 @@ static inline void tvp5150_write(struct v4l2_subdev *sd, unsigned char addr,
v4l2_dbg(2, debug, sd, "tvp5150: writing 0x%02x 0x%02x\n", addr, value);
rc = i2c_smbus_write_byte_data(c, addr, value);
if (rc < 0)
- v4l2_dbg(0, debug, sd, "i2c i/o error: rc == %d\n", rc);
+ v4l2_err(sd, "i2c i/o error: rc == %d\n", rc);
+
+ return rc;
}
static void dump_reg_range(struct v4l2_subdev *sd, char *s, u8 init,
@@ -1159,8 +1161,7 @@ static int tvp5150_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *
static int tvp5150_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
{
- tvp5150_write(sd, reg->reg & 0xff, reg->val & 0xff);
- return 0;
+ return tvp5150_write(sd, reg->reg & 0xff, reg->val & 0xff);
}
#endif
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 3cfd7af8c..a1cd50f33 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -90,18 +90,13 @@ static struct media_entity *find_entity(struct media_device *mdev, u32 id)
id &= ~MEDIA_ENT_ID_FLAG_NEXT;
- spin_lock(&mdev->lock);
-
media_device_for_each_entity(entity, mdev) {
if (((media_entity_id(entity) == id) && !next) ||
((media_entity_id(entity) > id) && next)) {
- spin_unlock(&mdev->lock);
return entity;
}
}
- spin_unlock(&mdev->lock);
-
return NULL;
}
@@ -431,6 +426,7 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd,
struct media_device *dev = to_media_device(devnode);
long ret;
+ mutex_lock(&dev->graph_mutex);
switch (cmd) {
case MEDIA_IOC_DEVICE_INFO:
ret = media_device_get_info(dev,
@@ -443,29 +439,24 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd,
break;
case MEDIA_IOC_ENUM_LINKS:
- mutex_lock(&dev->graph_mutex);
ret = media_device_enum_links(dev,
(struct media_links_enum __user *)arg);
- mutex_unlock(&dev->graph_mutex);
break;
case MEDIA_IOC_SETUP_LINK:
- mutex_lock(&dev->graph_mutex);
ret = media_device_setup_link(dev,
(struct media_link_desc __user *)arg);
- mutex_unlock(&dev->graph_mutex);
break;
case MEDIA_IOC_G_TOPOLOGY:
- mutex_lock(&dev->graph_mutex);
ret = media_device_get_topology(dev,
(struct media_v2_topology __user *)arg);
- mutex_unlock(&dev->graph_mutex);
break;
default:
ret = -ENOIOCTLCMD;
}
+ mutex_unlock(&dev->graph_mutex);
return ret;
}
@@ -508,12 +499,6 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
long ret;
switch (cmd) {
- case MEDIA_IOC_DEVICE_INFO:
- case MEDIA_IOC_ENUM_ENTITIES:
- case MEDIA_IOC_SETUP_LINK:
- case MEDIA_IOC_G_TOPOLOGY:
- return media_device_ioctl(filp, cmd, arg);
-
case MEDIA_IOC_ENUM_LINKS32:
mutex_lock(&dev->graph_mutex);
ret = media_device_enum_links32(dev,
@@ -522,7 +507,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
break;
default:
- ret = -ENOIOCTLCMD;
+ return media_device_ioctl(filp, cmd, arg);
}
return ret;
@@ -590,12 +575,12 @@ int __must_check media_device_register_entity(struct media_device *mdev,
if (!ida_pre_get(&mdev->entity_internal_idx, GFP_KERNEL))
return -ENOMEM;
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
ret = ida_get_new_above(&mdev->entity_internal_idx, 1,
&entity->internal_idx);
if (ret < 0) {
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
return ret;
}
@@ -615,9 +600,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
(notify)->notify(entity, notify->notify_data);
}
- spin_unlock(&mdev->lock);
-
- mutex_lock(&mdev->graph_mutex);
if (mdev->entity_internal_idx_max
>= mdev->pm_count_walk.ent_enum.idx_max) {
struct media_entity_graph new = { .top = 0 };
@@ -680,9 +662,9 @@ void media_device_unregister_entity(struct media_entity *entity)
if (mdev == NULL)
return;
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
__media_device_unregister_entity(entity);
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_device_unregister_entity);
@@ -703,7 +685,6 @@ void media_device_init(struct media_device *mdev)
INIT_LIST_HEAD(&mdev->pads);
INIT_LIST_HEAD(&mdev->links);
INIT_LIST_HEAD(&mdev->entity_notify);
- spin_lock_init(&mdev->lock);
mutex_init(&mdev->graph_mutex);
ida_init(&mdev->entity_internal_idx);
@@ -752,9 +733,9 @@ EXPORT_SYMBOL_GPL(__media_device_register);
int __must_check media_device_register_entity_notify(struct media_device *mdev,
struct media_entity_notify *nptr)
{
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
list_add_tail(&nptr->list, &mdev->entity_notify);
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(media_device_register_entity_notify);
@@ -771,9 +752,9 @@ static void __media_device_unregister_entity_notify(struct media_device *mdev,
void media_device_unregister_entity_notify(struct media_device *mdev,
struct media_entity_notify *nptr)
{
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
__media_device_unregister_entity_notify(mdev, nptr);
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_device_unregister_entity_notify);
@@ -787,11 +768,11 @@ void media_device_unregister(struct media_device *mdev)
if (mdev == NULL)
return;
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
/* Check if mdev was ever registered at all */
if (!media_devnode_is_registered(&mdev->devnode)) {
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
return;
}
@@ -811,12 +792,11 @@ void media_device_unregister(struct media_device *mdev)
kfree(intf);
}
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
device_remove_file(&mdev->devnode.dev, &dev_attr_model);
+ dev_dbg(mdev->dev, "Media device unregistering\n");
media_devnode_unregister(&mdev->devnode);
-
- dev_dbg(mdev->dev, "Media device unregistered\n");
}
EXPORT_SYMBOL_GPL(media_device_unregister);
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index 29409f440..b66dc9d07 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -197,10 +197,11 @@ static int media_release(struct inode *inode, struct file *filp)
if (mdev->fops->release)
mdev->fops->release(filp);
+ filp->private_data = NULL;
+
/* decrease the refcount unconditionally since the release()
return value is ignored. */
put_device(&mdev->dev);
- filp->private_data = NULL;
return 0;
}
@@ -267,8 +268,11 @@ int __must_check media_devnode_register(struct media_devnode *mdev,
return 0;
error:
+ mutex_lock(&media_devnode_lock);
cdev_del(&mdev->cdev);
clear_bit(mdev->minor, media_devnode_nums);
+ mutex_unlock(&media_devnode_lock);
+
return ret;
}
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index e95070b3a..d8a2299f0 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -219,7 +219,7 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
entity->pads = pads;
if (mdev)
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
for (i = 0; i < num_pads; i++) {
pads[i].entity = entity;
@@ -230,7 +230,7 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
}
if (mdev)
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
return 0;
}
@@ -445,7 +445,7 @@ __must_check int __media_entity_pipeline_start(struct media_entity *entity,
bitmap_or(active, active, has_no_links, entity->num_pads);
if (!bitmap_full(active, entity->num_pads)) {
- ret = -EPIPE;
+ ret = -ENOLINK;
dev_dbg(entity->graph_obj.mdev->dev,
"\"%s\":%u must be connected by an enabled link\n",
entity->name,
@@ -747,9 +747,9 @@ void media_entity_remove_links(struct media_entity *entity)
if (mdev == NULL)
return;
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
__media_entity_remove_links(entity);
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_entity_remove_links);
@@ -951,9 +951,9 @@ void media_remove_intf_link(struct media_link *link)
if (mdev == NULL)
return;
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
__media_remove_intf_link(link);
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_remove_intf_link);
@@ -975,8 +975,8 @@ void media_remove_intf_links(struct media_interface *intf)
if (mdev == NULL)
return;
- spin_lock(&mdev->lock);
+ mutex_lock(&mdev->graph_mutex);
__media_remove_intf_links(intf);
- spin_unlock(&mdev->lock);
+ mutex_unlock(&mdev->graph_mutex);
}
EXPORT_SYMBOL_GPL(media_remove_intf_links);
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 48a611bc3..4f6467fba 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -14,6 +14,7 @@ source "drivers/media/pci/meye/Kconfig"
source "drivers/media/pci/solo6x10/Kconfig"
source "drivers/media/pci/sta2x11/Kconfig"
source "drivers/media/pci/tw68/Kconfig"
+source "drivers/media/pci/tw686x/Kconfig"
source "drivers/media/pci/zoran/Kconfig"
endif
diff --git a/drivers/media/pci/Makefile b/drivers/media/pci/Makefile
index 5f8aacb8b..2e54c3644 100644
--- a/drivers/media/pci/Makefile
+++ b/drivers/media/pci/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_VIDEO_BT848) += bt8xx/
obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
obj-$(CONFIG_VIDEO_TW68) += tw68/
+obj-$(CONFIG_VIDEO_TW686X) += tw686x/
obj-$(CONFIG_VIDEO_DT3155) += dt3155/
obj-$(CONFIG_VIDEO_MEYE) += meye/
obj-$(CONFIG_STA2X11_VIP) += sta2x11/
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig
index a01f0cc74..70343829a 100644
--- a/drivers/media/pci/cobalt/Kconfig
+++ b/drivers/media/pci/cobalt/Kconfig
@@ -4,6 +4,7 @@ config VIDEO_COBALT
depends on PCI_MSI && MTD_COMPLEX_MAPPINGS
depends on GPIOLIB || COMPILE_TEST
depends on SND
+ depends on MTD
select I2C_ALGOBIT
select VIDEO_ADV7604
select VIDEO_ADV7511
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 7e31f2a2e..47ce80fa7 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -707,11 +707,7 @@ static inline int cx18_raw_vbi(const struct cx18 *cx)
/* Call the specified callback for all subdevs with a grp_id bit matching the
* mask in hw (if 0, then match them all). Ignore any errors. */
#define cx18_call_hw(cx, hw, o, f, args...) \
- do { \
- struct v4l2_subdev *__sd; \
- __v4l2_device_call_subdevs_p(&(cx)->v4l2_dev, __sd, \
- !(hw) || (__sd->grp_id & (hw)), o, f , ##args); \
- } while (0)
+ v4l2_device_mask_call_all(&(cx)->v4l2_dev, hw, o, f, ##args)
#define cx18_call_all(cx, o, f, args...) cx18_call_hw(cx, 0, o, f , ##args)
@@ -719,12 +715,7 @@ static inline int cx18_raw_vbi(const struct cx18 *cx)
* mask in hw (if 0, then match them all). If the callback returns an error
* other than 0 or -ENOIOCTLCMD, then return with that error code. */
#define cx18_call_hw_err(cx, hw, o, f, args...) \
-({ \
- struct v4l2_subdev *__sd; \
- __v4l2_device_call_subdevs_until_err_p(&(cx)->v4l2_dev, \
- __sd, !(hw) || (__sd->grp_id & (hw)), o, f, \
- ##args); \
-})
+ v4l2_device_mask_call_until_err(&(cx)->v4l2_dev, hw, o, f, ##args)
#define cx18_call_all_err(cx, o, f, args...) \
cx18_call_hw_err(cx, 0, o, f , ##args)
diff --git a/drivers/media/pci/cx23885/cx23885-av.c b/drivers/media/pci/cx23885/cx23885-av.c
index 877dad891..e7d4406f9 100644
--- a/drivers/media/pci/cx23885/cx23885-av.c
+++ b/drivers/media/pci/cx23885/cx23885-av.c
@@ -24,7 +24,7 @@ void cx23885_av_work_handler(struct work_struct *work)
{
struct cx23885_dev *dev =
container_of(work, struct cx23885_dev, cx25840_work);
- bool handled;
+ bool handled = false;
v4l2_subdev_call(dev->sd_cx25840, core, interrupt_service_routine,
PCI_MSK_AV_CORE, &handled);
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index 6c08dae67..10cba305d 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -827,12 +827,7 @@ static inline int ivtv_raw_vbi(const struct ivtv *itv)
/* Call the specified callback for all subdevs matching hw (if 0, then
match them all). Ignore any errors. */
#define ivtv_call_hw(itv, hw, o, f, args...) \
- do { \
- struct v4l2_subdev *__sd; \
- __v4l2_device_call_subdevs_p(&(itv)->v4l2_dev, __sd, \
- !(hw) ? true : (__sd->grp_id & (hw)), \
- o, f, ##args); \
- } while (0)
+ v4l2_device_mask_call_all(&(itv)->v4l2_dev, hw, o, f, ##args)
#define ivtv_call_all(itv, o, f, args...) ivtv_call_hw(itv, 0, o, f , ##args)
@@ -840,11 +835,7 @@ static inline int ivtv_raw_vbi(const struct ivtv *itv)
match them all). If the callback returns an error other than 0 or
-ENOIOCTLCMD, then return with that error code. */
#define ivtv_call_hw_err(itv, hw, o, f, args...) \
-({ \
- struct v4l2_subdev *__sd; \
- __v4l2_device_call_subdevs_until_err_p(&(itv)->v4l2_dev, __sd, \
- !(hw) || (__sd->grp_id & (hw)), o, f , ##args); \
-})
+ v4l2_device_mask_call_until_err(&(itv)->v4l2_dev, hw, o, f, ##args)
#define ivtv_call_all_err(itv, o, f, args...) ivtv_call_hw_err(itv, 0, o, f , ##args)
diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
index d018673c7..826c7c75e 100644
--- a/drivers/media/pci/smipcie/smipcie-ir.c
+++ b/drivers/media/pci/smipcie/smipcie-ir.c
@@ -203,7 +203,7 @@ int smi_ir_init(struct smi_dev *dev)
rc_dev->dev.parent = &dev->pci_dev->dev;
rc_dev->driver_type = RC_DRIVER_SCANCODE;
- rc_dev->map_name = RC_MAP_DVBSKY;
+ rc_dev->map_name = dev->info->rc_map;
ir->rc_dev = rc_dev;
ir->dev = dev;
diff --git a/drivers/media/pci/smipcie/smipcie-main.c b/drivers/media/pci/smipcie/smipcie-main.c
index b039a229b..83981d611 100644
--- a/drivers/media/pci/smipcie/smipcie-main.c
+++ b/drivers/media/pci/smipcie/smipcie-main.c
@@ -716,7 +716,8 @@ static int smi_fe_init(struct smi_port *port)
/* init MAC.*/
ret = smi_read_eeprom(&dev->i2c_bus[0], 0xc0, mac_ee, 16);
dev_info(&port->dev->pci_dev->dev,
- "DVBSky SMI PCIe MAC= %pM\n", mac_ee + (port->idx)*8);
+ "%s port %d MAC: %pM\n", dev->info->name,
+ port->idx, mac_ee + (port->idx)*8);
memcpy(adap->proposed_mac, mac_ee + (port->idx)*8, 6);
return ret;
}
@@ -1066,6 +1067,7 @@ static struct smi_cfg_info dvbsky_s950_cfg = {
.ts_1 = SMI_TS_DMA_BOTH,
.fe_0 = DVBSKY_FE_NULL,
.fe_1 = DVBSKY_FE_M88DS3103,
+ .rc_map = RC_MAP_DVBSKY,
};
static struct smi_cfg_info dvbsky_s952_cfg = {
@@ -1075,6 +1077,7 @@ static struct smi_cfg_info dvbsky_s952_cfg = {
.ts_1 = SMI_TS_DMA_BOTH,
.fe_0 = DVBSKY_FE_M88RS6000,
.fe_1 = DVBSKY_FE_M88RS6000,
+ .rc_map = RC_MAP_DVBSKY,
};
static struct smi_cfg_info dvbsky_t9580_cfg = {
@@ -1084,6 +1087,17 @@ static struct smi_cfg_info dvbsky_t9580_cfg = {
.ts_1 = SMI_TS_DMA_BOTH,
.fe_0 = DVBSKY_FE_SIT2,
.fe_1 = DVBSKY_FE_M88DS3103,
+ .rc_map = RC_MAP_DVBSKY,
+};
+
+static struct smi_cfg_info technotrend_s2_4200_cfg = {
+ .type = SMI_TECHNOTREND_S2_4200,
+ .name = "TechnoTrend TT-budget S2-4200 Twin",
+ .ts_0 = SMI_TS_DMA_BOTH,
+ .ts_1 = SMI_TS_DMA_BOTH,
+ .fe_0 = DVBSKY_FE_M88RS6000,
+ .fe_1 = DVBSKY_FE_M88RS6000,
+ .rc_map = RC_MAP_TT_1500,
};
/* PCI IDs */
@@ -1096,6 +1110,7 @@ static const struct pci_device_id smi_id_table[] = {
SMI_ID(0x4254, 0x0550, dvbsky_s950_cfg),
SMI_ID(0x4254, 0x0552, dvbsky_s952_cfg),
SMI_ID(0x4254, 0x5580, dvbsky_t9580_cfg),
+ SMI_ID(0x13c2, 0x3016, technotrend_s2_4200_cfg),
{0}
};
MODULE_DEVICE_TABLE(pci, smi_id_table);
diff --git a/drivers/media/pci/smipcie/smipcie.h b/drivers/media/pci/smipcie/smipcie.h
index 68cdda28f..611e4f02c 100644
--- a/drivers/media/pci/smipcie/smipcie.h
+++ b/drivers/media/pci/smipcie/smipcie.h
@@ -216,6 +216,7 @@ struct smi_cfg_info {
#define SMI_DVBSKY_S950 1
#define SMI_DVBSKY_T9580 2
#define SMI_DVBSKY_T982 3
+#define SMI_TECHNOTREND_S2_4200 4
int type;
char *name;
#define SMI_TS_NULL 0
@@ -232,6 +233,7 @@ struct smi_cfg_info {
#define DVBSKY_FE_SIT2 3
int fe_0;
int fe_1;
+ char *rc_map;
};
struct smi_rc {
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 753411cbb..1fc195f89 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -444,27 +444,19 @@ static int vidioc_querycap(struct file *file, void *priv,
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id std)
{
struct sta2x11_vip *vip = video_drvdata(file);
- v4l2_std_id oldstd = vip->std, newstd;
- int status;
-
- if (V4L2_STD_ALL == std) {
- v4l2_subdev_call(vip->decoder, video, s_std, std);
- ssleep(2);
- v4l2_subdev_call(vip->decoder, video, querystd, &newstd);
- v4l2_subdev_call(vip->decoder, video, g_input_status, &status);
- if (status & V4L2_IN_ST_NO_SIGNAL)
+
+ /*
+ * This is here for backwards compatibility only.
+ * The use of V4L2_STD_ALL to trigger a querystd is non-standard.
+ */
+ if (std == V4L2_STD_ALL) {
+ v4l2_subdev_call(vip->decoder, video, querystd, &std);
+ if (std == V4L2_STD_UNKNOWN)
return -EIO;
- std = vip->std = newstd;
- if (oldstd != std) {
- if (V4L2_STD_525_60 & std)
- vip->format = formats_60[0];
- else
- vip->format = formats_50[0];
- }
- return 0;
}
- if (oldstd != std) {
+ if (vip->std != std) {
+ vip->std = std;
if (V4L2_STD_525_60 & std)
vip->format = formats_60[0];
else
diff --git a/drivers/media/pci/tw686x/Kconfig b/drivers/media/pci/tw686x/Kconfig
new file mode 100644
index 000000000..fb8536974
--- /dev/null
+++ b/drivers/media/pci/tw686x/Kconfig
@@ -0,0 +1,18 @@
+config VIDEO_TW686X
+ tristate "Intersil/Techwell TW686x video capture cards"
+ depends on PCI && VIDEO_DEV && VIDEO_V4L2 && SND
+ depends on HAS_DMA
+ select VIDEOBUF2_VMALLOC
+ select SND_PCM
+ help
+ Support for Intersil/Techwell TW686x-based frame grabber cards.
+
+ Currently supported chips:
+ - TW6864 (4 video channels),
+ - TW6865 (4 video channels, not tested, second generation chip),
+ - TW6868 (8 video channels but only 4 first channels using
+ built-in video decoder are supported, not tested),
+ - TW6869 (8 video channels, second generation chip).
+
+ To compile this driver as a module, choose M here: the module
+ will be named tw686x.
diff --git a/drivers/media/pci/tw686x/Makefile b/drivers/media/pci/tw686x/Makefile
new file mode 100644
index 000000000..99819542b
--- /dev/null
+++ b/drivers/media/pci/tw686x/Makefile
@@ -0,0 +1,3 @@
+tw686x-objs := tw686x-core.o tw686x-video.o tw686x-audio.o
+
+obj-$(CONFIG_VIDEO_TW686X) += tw686x.o
diff --git a/drivers/media/pci/tw686x/tw686x-audio.c b/drivers/media/pci/tw686x/tw686x-audio.c
new file mode 100644
index 000000000..91459ab71
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x-audio.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * Based on the audio support from the tw6869 driver:
+ * Copyright 2015 www.starterkit.ru <info@starterkit.ru>
+ *
+ * Based on:
+ * Driver for Intersil|Techwell TW6869 based DVR cards
+ * (c) 2011-12 liran <jli11@intersil.com> [Intersil|Techwell China]
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/control.h>
+#include "tw686x.h"
+#include "tw686x-regs.h"
+
+#define AUDIO_CHANNEL_OFFSET 8
+
+void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
+ unsigned int pb_status)
+{
+ unsigned long flags;
+ unsigned int ch, pb;
+
+ for_each_set_bit(ch, &requests, max_channels(dev)) {
+ struct tw686x_audio_channel *ac = &dev->audio_channels[ch];
+ struct tw686x_audio_buf *done = NULL;
+ struct tw686x_audio_buf *next = NULL;
+ struct tw686x_dma_desc *desc;
+
+ pb = !!(pb_status & BIT(AUDIO_CHANNEL_OFFSET + ch));
+
+ spin_lock_irqsave(&ac->lock, flags);
+
+ /* Sanity check */
+ if (!ac->ss || !ac->curr_bufs[0] || !ac->curr_bufs[1]) {
+ spin_unlock_irqrestore(&ac->lock, flags);
+ continue;
+ }
+
+ if (!list_empty(&ac->buf_list)) {
+ next = list_first_entry(&ac->buf_list,
+ struct tw686x_audio_buf, list);
+ list_move_tail(&next->list, &ac->buf_list);
+ done = ac->curr_bufs[!pb];
+ ac->curr_bufs[pb] = next;
+ }
+ spin_unlock_irqrestore(&ac->lock, flags);
+
+ desc = &ac->dma_descs[pb];
+ if (done && next && desc->virt) {
+ memcpy(done->virt, desc->virt, desc->size);
+ ac->ptr = done->dma - ac->buf[0].dma;
+ snd_pcm_period_elapsed(ac->ss);
+ }
+ }
+}
+
+static int tw686x_pcm_hw_params(struct snd_pcm_substream *ss,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(hw_params));
+}
+
+static int tw686x_pcm_hw_free(struct snd_pcm_substream *ss)
+{
+ return snd_pcm_lib_free_pages(ss);
+}
+
+/*
+ * The audio device rate is global and shared among all
+ * capture channels. The driver makes no effort to prevent
+ * rate modifications. User is free change the rate, but it
+ * means changing the rate for all capture sub-devices.
+ */
+static const struct snd_pcm_hardware tw686x_capture_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 1,
+ .buffer_bytes_max = TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ,
+ .period_bytes_min = TW686X_AUDIO_PAGE_SZ,
+ .period_bytes_max = TW686X_AUDIO_PAGE_SZ,
+ .periods_min = TW686X_AUDIO_PERIODS_MIN,
+ .periods_max = TW686X_AUDIO_PERIODS_MAX,
+};
+
+static int tw686x_pcm_open(struct snd_pcm_substream *ss)
+{
+ struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+ struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+ struct snd_pcm_runtime *rt = ss->runtime;
+ int err;
+
+ ac->ss = ss;
+ rt->hw = tw686x_capture_hw;
+
+ err = snd_pcm_hw_constraint_integer(rt, SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int tw686x_pcm_close(struct snd_pcm_substream *ss)
+{
+ struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+ struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+
+ ac->ss = NULL;
+ return 0;
+}
+
+static int tw686x_pcm_prepare(struct snd_pcm_substream *ss)
+{
+ struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+ struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+ struct snd_pcm_runtime *rt = ss->runtime;
+ unsigned int period_size = snd_pcm_lib_period_bytes(ss);
+ struct tw686x_audio_buf *p_buf, *b_buf;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tw686x_disable_channel(dev, AUDIO_CHANNEL_OFFSET + ac->ch);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (dev->audio_rate != rt->rate) {
+ u32 reg;
+
+ dev->audio_rate = rt->rate;
+ reg = ((125000000 / rt->rate) << 16) +
+ ((125000000 % rt->rate) << 16) / rt->rate;
+
+ reg_write(dev, AUDIO_CONTROL2, reg);
+ }
+
+ if (period_size != TW686X_AUDIO_PAGE_SZ ||
+ rt->periods < TW686X_AUDIO_PERIODS_MIN ||
+ rt->periods > TW686X_AUDIO_PERIODS_MAX) {
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ac->lock, flags);
+ INIT_LIST_HEAD(&ac->buf_list);
+
+ for (i = 0; i < rt->periods; i++) {
+ ac->buf[i].dma = rt->dma_addr + period_size * i;
+ ac->buf[i].virt = rt->dma_area + period_size * i;
+ INIT_LIST_HEAD(&ac->buf[i].list);
+ list_add_tail(&ac->buf[i].list, &ac->buf_list);
+ }
+
+ p_buf = list_first_entry(&ac->buf_list, struct tw686x_audio_buf, list);
+ list_move_tail(&p_buf->list, &ac->buf_list);
+
+ b_buf = list_first_entry(&ac->buf_list, struct tw686x_audio_buf, list);
+ list_move_tail(&b_buf->list, &ac->buf_list);
+
+ ac->curr_bufs[0] = p_buf;
+ ac->curr_bufs[1] = b_buf;
+ ac->ptr = 0;
+ spin_unlock_irqrestore(&ac->lock, flags);
+
+ return 0;
+}
+
+static int tw686x_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
+{
+ struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+ struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+ unsigned long flags;
+ int err = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ if (ac->curr_bufs[0] && ac->curr_bufs[1]) {
+ spin_lock_irqsave(&dev->lock, flags);
+ tw686x_enable_channel(dev,
+ AUDIO_CHANNEL_OFFSET + ac->ch);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ mod_timer(&dev->dma_delay_timer,
+ jiffies + msecs_to_jiffies(100));
+ } else {
+ err = -EIO;
+ }
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ spin_lock_irqsave(&dev->lock, flags);
+ tw686x_disable_channel(dev, AUDIO_CHANNEL_OFFSET + ac->ch);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ spin_lock_irqsave(&ac->lock, flags);
+ ac->curr_bufs[0] = NULL;
+ ac->curr_bufs[1] = NULL;
+ spin_unlock_irqrestore(&ac->lock, flags);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static snd_pcm_uframes_t tw686x_pcm_pointer(struct snd_pcm_substream *ss)
+{
+ struct tw686x_dev *dev = snd_pcm_substream_chip(ss);
+ struct tw686x_audio_channel *ac = &dev->audio_channels[ss->number];
+
+ return bytes_to_frames(ss->runtime, ac->ptr);
+}
+
+static struct snd_pcm_ops tw686x_pcm_ops = {
+ .open = tw686x_pcm_open,
+ .close = tw686x_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = tw686x_pcm_hw_params,
+ .hw_free = tw686x_pcm_hw_free,
+ .prepare = tw686x_pcm_prepare,
+ .trigger = tw686x_pcm_trigger,
+ .pointer = tw686x_pcm_pointer,
+};
+
+static int tw686x_snd_pcm_init(struct tw686x_dev *dev)
+{
+ struct snd_card *card = dev->snd_card;
+ struct snd_pcm *pcm;
+ struct snd_pcm_substream *ss;
+ unsigned int i;
+ int err;
+
+ err = snd_pcm_new(card, card->driver, 0, 0, max_channels(dev), &pcm);
+ if (err < 0)
+ return err;
+
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &tw686x_pcm_ops);
+ snd_pcm_chip(pcm) = dev;
+ pcm->info_flags = 0;
+ strlcpy(pcm->name, "tw686x PCM", sizeof(pcm->name));
+
+ for (i = 0, ss = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
+ ss; ss = ss->next, i++)
+ snprintf(ss->name, sizeof(ss->name), "vch%u audio", i);
+
+ return snd_pcm_lib_preallocate_pages_for_all(pcm,
+ SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(dev->pci_dev),
+ TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ,
+ TW686X_AUDIO_PAGE_MAX * TW686X_AUDIO_PAGE_SZ);
+}
+
+static void tw686x_audio_dma_free(struct tw686x_dev *dev,
+ struct tw686x_audio_channel *ac)
+{
+ int pb;
+
+ for (pb = 0; pb < 2; pb++) {
+ if (!ac->dma_descs[pb].virt)
+ continue;
+ pci_free_consistent(dev->pci_dev, ac->dma_descs[pb].size,
+ ac->dma_descs[pb].virt,
+ ac->dma_descs[pb].phys);
+ ac->dma_descs[pb].virt = NULL;
+ }
+}
+
+static int tw686x_audio_dma_alloc(struct tw686x_dev *dev,
+ struct tw686x_audio_channel *ac)
+{
+ int pb;
+
+ for (pb = 0; pb < 2; pb++) {
+ u32 reg = pb ? ADMA_B_ADDR[ac->ch] : ADMA_P_ADDR[ac->ch];
+ void *virt;
+
+ virt = pci_alloc_consistent(dev->pci_dev, TW686X_AUDIO_PAGE_SZ,
+ &ac->dma_descs[pb].phys);
+ if (!virt) {
+ dev_err(&dev->pci_dev->dev,
+ "dma%d: unable to allocate audio DMA %s-buffer\n",
+ ac->ch, pb ? "B" : "P");
+ return -ENOMEM;
+ }
+ ac->dma_descs[pb].virt = virt;
+ ac->dma_descs[pb].size = TW686X_AUDIO_PAGE_SZ;
+ reg_write(dev, reg, ac->dma_descs[pb].phys);
+ }
+ return 0;
+}
+
+void tw686x_audio_free(struct tw686x_dev *dev)
+{
+ unsigned long flags;
+ u32 dma_ch_mask;
+ u32 dma_cmd;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dma_cmd = reg_read(dev, DMA_CMD);
+ dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE);
+ reg_write(dev, DMA_CMD, dma_cmd & ~0xff00);
+ reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask & ~0xff00);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!dev->snd_card)
+ return;
+ snd_card_free(dev->snd_card);
+ dev->snd_card = NULL;
+}
+
+int tw686x_audio_init(struct tw686x_dev *dev)
+{
+ struct pci_dev *pci_dev = dev->pci_dev;
+ struct snd_card *card;
+ int err, ch;
+
+ /*
+ * AUDIO_CONTROL1
+ * DMA byte length [31:19] = 4096 (i.e. ALSA period)
+ * External audio enable [0] = enabled
+ */
+ reg_write(dev, AUDIO_CONTROL1, 0x80000001);
+
+ err = snd_card_new(&pci_dev->dev, SNDRV_DEFAULT_IDX1,
+ SNDRV_DEFAULT_STR1,
+ THIS_MODULE, 0, &card);
+ if (err < 0)
+ return err;
+
+ dev->snd_card = card;
+ strlcpy(card->driver, "tw686x", sizeof(card->driver));
+ strlcpy(card->shortname, "tw686x", sizeof(card->shortname));
+ strlcpy(card->longname, pci_name(pci_dev), sizeof(card->longname));
+ snd_card_set_dev(card, &pci_dev->dev);
+
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_audio_channel *ac;
+
+ ac = &dev->audio_channels[ch];
+ spin_lock_init(&ac->lock);
+ ac->dev = dev;
+ ac->ch = ch;
+
+ err = tw686x_audio_dma_alloc(dev, ac);
+ if (err < 0)
+ goto err_cleanup;
+ }
+
+ err = tw686x_snd_pcm_init(dev);
+ if (err < 0)
+ goto err_cleanup;
+
+ err = snd_card_register(card);
+ if (!err)
+ return 0;
+
+err_cleanup:
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ if (!dev->audio_channels[ch].dev)
+ continue;
+ tw686x_audio_dma_free(dev, &dev->audio_channels[ch]);
+ }
+ snd_card_free(card);
+ dev->snd_card = NULL;
+ return err;
+}
diff --git a/drivers/media/pci/tw686x/tw686x-core.c b/drivers/media/pci/tw686x/tw686x-core.c
new file mode 100644
index 000000000..cf53b0e97
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x-core.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * Based on original driver by Krzysztof Ha?asa:
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Notes
+ * -----
+ *
+ * 1. Under stress-testing, it has been observed that the PCIe link
+ * goes down, without reason. Therefore, the driver takes special care
+ * to allow device hot-unplugging.
+ *
+ * 2. TW686X devices are capable of setting a few different DMA modes,
+ * including: scatter-gather, field and frame modes. However,
+ * under stress testings it has been found that the machine can
+ * freeze completely if DMA registers are programmed while streaming
+ * is active.
+ * This driver tries to access hardware registers as infrequently
+ * as possible by:
+ * i. allocating fixed DMA buffers and memcpy'ing into
+ * vmalloc'ed buffers
+ * ii. using a timer to mitigate the rate of DMA reset operations,
+ * on DMA channels error.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "tw686x.h"
+#include "tw686x-regs.h"
+
+/*
+ * This module parameter allows to control the DMA_TIMER_INTERVAL value.
+ * The DMA_TIMER_INTERVAL register controls the minimum DMA interrupt
+ * time span (iow, the maximum DMA interrupt rate) thus allowing for
+ * IRQ coalescing.
+ *
+ * The chip datasheet does not mention a time unit for this value, so
+ * users wanting fine-grain control over the interrupt rate should
+ * determine the desired value through testing.
+ */
+static u32 dma_interval = 0x00098968;
+module_param(dma_interval, int, 0444);
+MODULE_PARM_DESC(dma_interval, "Minimum time span for DMA interrupting host");
+
+void tw686x_disable_channel(struct tw686x_dev *dev, unsigned int channel)
+{
+ u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
+ u32 dma_cmd = reg_read(dev, DMA_CMD);
+
+ dma_en &= ~BIT(channel);
+ dma_cmd &= ~BIT(channel);
+
+ /* Must remove it from pending too */
+ dev->pending_dma_en &= ~BIT(channel);
+ dev->pending_dma_cmd &= ~BIT(channel);
+
+ /* Stop DMA if no channels are enabled */
+ if (!dma_en)
+ dma_cmd = 0;
+ reg_write(dev, DMA_CHANNEL_ENABLE, dma_en);
+ reg_write(dev, DMA_CMD, dma_cmd);
+}
+
+void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel)
+{
+ u32 dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
+ u32 dma_cmd = reg_read(dev, DMA_CMD);
+
+ dev->pending_dma_en |= dma_en | BIT(channel);
+ dev->pending_dma_cmd |= dma_cmd | DMA_CMD_ENABLE | BIT(channel);
+}
+
+/*
+ * The purpose of this awful hack is to avoid enabling the DMA
+ * channels "too fast" which makes some TW686x devices very
+ * angry and freeze the CPU (see note 1).
+ */
+static void tw686x_dma_delay(unsigned long data)
+{
+ struct tw686x_dev *dev = (struct tw686x_dev *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ reg_write(dev, DMA_CHANNEL_ENABLE, dev->pending_dma_en);
+ reg_write(dev, DMA_CMD, dev->pending_dma_cmd);
+ dev->pending_dma_en = 0;
+ dev->pending_dma_cmd = 0;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+static void tw686x_reset_channels(struct tw686x_dev *dev, unsigned int ch_mask)
+{
+ u32 dma_en, dma_cmd;
+
+ dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
+ dma_cmd = reg_read(dev, DMA_CMD);
+
+ /*
+ * Save pending register status, the timer will
+ * restore them.
+ */
+ dev->pending_dma_en |= dma_en;
+ dev->pending_dma_cmd |= dma_cmd;
+
+ /* Disable the reset channels */
+ reg_write(dev, DMA_CHANNEL_ENABLE, dma_en & ~ch_mask);
+
+ if ((dma_en & ~ch_mask) == 0) {
+ dev_dbg(&dev->pci_dev->dev, "reset: stopping DMA\n");
+ dma_cmd &= ~DMA_CMD_ENABLE;
+ }
+ reg_write(dev, DMA_CMD, dma_cmd & ~ch_mask);
+}
+
+static irqreturn_t tw686x_irq(int irq, void *dev_id)
+{
+ struct tw686x_dev *dev = (struct tw686x_dev *)dev_id;
+ unsigned int video_requests, audio_requests, reset_ch;
+ u32 fifo_status, fifo_signal, fifo_ov, fifo_bad, fifo_errors;
+ u32 int_status, dma_en, video_en, pb_status;
+ unsigned long flags;
+
+ int_status = reg_read(dev, INT_STATUS); /* cleared on read */
+ fifo_status = reg_read(dev, VIDEO_FIFO_STATUS);
+
+ /* INT_STATUS does not include FIFO_STATUS errors! */
+ if (!int_status && !TW686X_FIFO_ERROR(fifo_status))
+ return IRQ_NONE;
+
+ if (int_status & INT_STATUS_DMA_TOUT) {
+ dev_dbg(&dev->pci_dev->dev,
+ "DMA timeout. Resetting DMA for all channels\n");
+ reset_ch = ~0;
+ goto reset_channels;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ dma_en = reg_read(dev, DMA_CHANNEL_ENABLE);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ video_en = dma_en & 0xff;
+ fifo_signal = ~(fifo_status & 0xff) & video_en;
+ fifo_ov = fifo_status >> 24;
+ fifo_bad = fifo_status >> 16;
+
+ /* Mask of channels with signal and FIFO errors */
+ fifo_errors = fifo_signal & (fifo_ov | fifo_bad);
+
+ reset_ch = 0;
+ pb_status = reg_read(dev, PB_STATUS);
+
+ /* Coalesce video frame/error events */
+ video_requests = (int_status & video_en) | fifo_errors;
+ audio_requests = (int_status & dma_en) >> 8;
+
+ if (video_requests)
+ tw686x_video_irq(dev, video_requests, pb_status,
+ fifo_status, &reset_ch);
+ if (audio_requests)
+ tw686x_audio_irq(dev, audio_requests, pb_status);
+
+reset_channels:
+ if (reset_ch) {
+ spin_lock_irqsave(&dev->lock, flags);
+ tw686x_reset_channels(dev, reset_ch);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ mod_timer(&dev->dma_delay_timer,
+ jiffies + msecs_to_jiffies(100));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void tw686x_dev_release(struct v4l2_device *v4l2_dev)
+{
+ struct tw686x_dev *dev = container_of(v4l2_dev, struct tw686x_dev,
+ v4l2_dev);
+ unsigned int ch;
+
+ for (ch = 0; ch < max_channels(dev); ch++)
+ v4l2_ctrl_handler_free(&dev->video_channels[ch].ctrl_handler);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+ kfree(dev->audio_channels);
+ kfree(dev->video_channels);
+ kfree(dev);
+}
+
+static int tw686x_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ struct tw686x_dev *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->type = pci_id->driver_data;
+ sprintf(dev->name, "tw%04X", pci_dev->device);
+
+ dev->video_channels = kcalloc(max_channels(dev),
+ sizeof(*dev->video_channels), GFP_KERNEL);
+ if (!dev->video_channels) {
+ err = -ENOMEM;
+ goto free_dev;
+ }
+
+ dev->audio_channels = kcalloc(max_channels(dev),
+ sizeof(*dev->audio_channels), GFP_KERNEL);
+ if (!dev->audio_channels) {
+ err = -ENOMEM;
+ goto free_video;
+ }
+
+ pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx\n", dev->name,
+ pci_name(pci_dev), pci_dev->irq,
+ (unsigned long)pci_resource_start(pci_dev, 0));
+
+ dev->pci_dev = pci_dev;
+ if (pci_enable_device(pci_dev)) {
+ err = -EIO;
+ goto free_audio;
+ }
+
+ pci_set_master(pci_dev);
+ err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pci_dev->dev, "32-bit PCI DMA not supported\n");
+ err = -EIO;
+ goto disable_pci;
+ }
+
+ err = pci_request_regions(pci_dev, dev->name);
+ if (err) {
+ dev_err(&pci_dev->dev, "unable to request PCI region\n");
+ goto disable_pci;
+ }
+
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+ if (!dev->mmio) {
+ dev_err(&pci_dev->dev, "unable to remap PCI region\n");
+ err = -ENOMEM;
+ goto free_region;
+ }
+
+ /* Reset all subsystems */
+ reg_write(dev, SYS_SOFT_RST, 0x0f);
+ mdelay(1);
+
+ reg_write(dev, SRST[0], 0x3f);
+ if (max_channels(dev) > 4)
+ reg_write(dev, SRST[1], 0x3f);
+
+ /* Disable the DMA engine */
+ reg_write(dev, DMA_CMD, 0);
+ reg_write(dev, DMA_CHANNEL_ENABLE, 0);
+
+ /* Enable DMA FIFO overflow and pointer check */
+ reg_write(dev, DMA_CONFIG, 0xffffff04);
+ reg_write(dev, DMA_CHANNEL_TIMEOUT, 0x140c8584);
+ reg_write(dev, DMA_TIMER_INTERVAL, dma_interval);
+
+ spin_lock_init(&dev->lock);
+
+ err = request_irq(pci_dev->irq, tw686x_irq, IRQF_SHARED,
+ dev->name, dev);
+ if (err < 0) {
+ dev_err(&pci_dev->dev, "unable to request interrupt\n");
+ goto iounmap;
+ }
+
+ setup_timer(&dev->dma_delay_timer,
+ tw686x_dma_delay, (unsigned long) dev);
+
+ /*
+ * This must be set right before initializing v4l2_dev.
+ * It's used to release resources after the last handle
+ * held is released.
+ */
+ dev->v4l2_dev.release = tw686x_dev_release;
+ err = tw686x_video_init(dev);
+ if (err) {
+ dev_err(&pci_dev->dev, "can't register video\n");
+ goto free_irq;
+ }
+
+ err = tw686x_audio_init(dev);
+ if (err)
+ dev_warn(&pci_dev->dev, "can't register audio\n");
+
+ pci_set_drvdata(pci_dev, dev);
+ return 0;
+
+free_irq:
+ free_irq(pci_dev->irq, dev);
+iounmap:
+ pci_iounmap(pci_dev, dev->mmio);
+free_region:
+ pci_release_regions(pci_dev);
+disable_pci:
+ pci_disable_device(pci_dev);
+free_audio:
+ kfree(dev->audio_channels);
+free_video:
+ kfree(dev->video_channels);
+free_dev:
+ kfree(dev);
+ return err;
+}
+
+static void tw686x_remove(struct pci_dev *pci_dev)
+{
+ struct tw686x_dev *dev = pci_get_drvdata(pci_dev);
+ unsigned long flags;
+
+ /* This guarantees the IRQ handler is no longer running,
+ * which means we can kiss good-bye some resources.
+ */
+ free_irq(pci_dev->irq, dev);
+
+ tw686x_video_free(dev);
+ tw686x_audio_free(dev);
+ del_timer_sync(&dev->dma_delay_timer);
+
+ pci_iounmap(pci_dev, dev->mmio);
+ pci_release_regions(pci_dev);
+ pci_disable_device(pci_dev);
+
+ /*
+ * Setting pci_dev to NULL allows to detect hardware is no longer
+ * available and will be used by vb2_ops. This is required because
+ * the device sometimes hot-unplugs itself as the result of a PCIe
+ * link down.
+ * The lock is really important here.
+ */
+ spin_lock_irqsave(&dev->lock, flags);
+ dev->pci_dev = NULL;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /*
+ * This calls tw686x_dev_release if it's the last reference.
+ * Otherwise, release is postponed until there are no users left.
+ */
+ v4l2_device_put(&dev->v4l2_dev);
+}
+
+/*
+ * On TW6864 and TW6868, all channels share the pair of video DMA SG tables,
+ * with 10-bit start_idx and end_idx determining start and end of frame buffer
+ * for particular channel.
+ * TW6868 with all its 8 channels would be problematic (only 127 SG entries per
+ * channel) but we support only 4 channels on this chip anyway (the first
+ * 4 channels are driven with internal video decoder, the other 4 would require
+ * an external TW286x part).
+ *
+ * On TW6865 and TW6869, each channel has its own DMA SG table, with indexes
+ * starting with 0. Both chips have complete sets of internal video decoders
+ * (respectively 4 or 8-channel).
+ *
+ * All chips have separate SG tables for two video frames.
+ */
+
+/* driver_data is number of A/V channels */
+static const struct pci_device_id tw686x_pci_tbl[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6864),
+ .driver_data = 4
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6865), /* not tested */
+ .driver_data = 4 | TYPE_SECOND_GEN
+ },
+ /*
+ * TW6868 supports 8 A/V channels with an external TW2865 chip;
+ * not supported by the driver.
+ */
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6868), /* not tested */
+ .driver_data = 4
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_TECHWELL, 0x6869),
+ .driver_data = 8 | TYPE_SECOND_GEN},
+ {}
+};
+MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl);
+
+static struct pci_driver tw686x_pci_driver = {
+ .name = "tw686x",
+ .id_table = tw686x_pci_tbl,
+ .probe = tw686x_probe,
+ .remove = tw686x_remove,
+};
+module_pci_driver(tw686x_pci_driver);
+
+MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>");
+MODULE_AUTHOR("Krzysztof Ha?asa <khalasa@piap.pl>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/pci/tw686x/tw686x-regs.h b/drivers/media/pci/tw686x/tw686x-regs.h
new file mode 100644
index 000000000..fcef586a4
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x-regs.h
@@ -0,0 +1,122 @@
+/* DMA controller registers */
+#define REG8_1(a0) ((const u16[8]) { a0, a0 + 1, a0 + 2, a0 + 3, \
+ a0 + 4, a0 + 5, a0 + 6, a0 + 7})
+#define REG8_2(a0) ((const u16[8]) { a0, a0 + 2, a0 + 4, a0 + 6, \
+ a0 + 8, a0 + 0xa, a0 + 0xc, a0 + 0xe})
+#define REG8_8(a0) ((const u16[8]) { a0, a0 + 8, a0 + 0x10, a0 + 0x18, \
+ a0 + 0x20, a0 + 0x28, a0 + 0x30, \
+ a0 + 0x38})
+#define INT_STATUS 0x00
+#define PB_STATUS 0x01
+#define DMA_CMD 0x02
+#define VIDEO_FIFO_STATUS 0x03
+#define VIDEO_CHANNEL_ID 0x04
+#define VIDEO_PARSER_STATUS 0x05
+#define SYS_SOFT_RST 0x06
+#define DMA_PAGE_TABLE0_ADDR ((const u16[8]) { 0x08, 0xd0, 0xd2, 0xd4, \
+ 0xd6, 0xd8, 0xda, 0xdc })
+#define DMA_PAGE_TABLE1_ADDR ((const u16[8]) { 0x09, 0xd1, 0xd3, 0xd5, \
+ 0xd7, 0xd9, 0xdb, 0xdd })
+#define DMA_CHANNEL_ENABLE 0x0a
+#define DMA_CONFIG 0x0b
+#define DMA_TIMER_INTERVAL 0x0c
+#define DMA_CHANNEL_TIMEOUT 0x0d
+#define VDMA_CHANNEL_CONFIG REG8_1(0x10)
+#define ADMA_P_ADDR REG8_2(0x18)
+#define ADMA_B_ADDR REG8_2(0x19)
+#define DMA10_P_ADDR 0x28
+#define DMA10_B_ADDR 0x29
+#define VIDEO_CONTROL1 0x2a
+#define VIDEO_CONTROL2 0x2b
+#define AUDIO_CONTROL1 0x2c
+#define AUDIO_CONTROL2 0x2d
+#define PHASE_REF 0x2e
+#define GPIO_REG 0x2f
+#define INTL_HBAR_CTRL REG8_1(0x30)
+#define AUDIO_CONTROL3 0x38
+#define VIDEO_FIELD_CTRL REG8_1(0x39)
+#define HSCALER_CTRL REG8_1(0x42)
+#define VIDEO_SIZE REG8_1(0x4A)
+#define VIDEO_SIZE_F2 REG8_1(0x52)
+#define MD_CONF REG8_1(0x60)
+#define MD_INIT REG8_1(0x68)
+#define MD_MAP0 REG8_1(0x70)
+#define VDMA_P_ADDR REG8_8(0x80) /* not used in DMA SG mode */
+#define VDMA_WHP REG8_8(0x81)
+#define VDMA_B_ADDR REG8_8(0x82)
+#define VDMA_F2_P_ADDR REG8_8(0x84)
+#define VDMA_F2_WHP REG8_8(0x85)
+#define VDMA_F2_B_ADDR REG8_8(0x86)
+#define EP_REG_ADDR 0xfe
+#define EP_REG_DATA 0xff
+
+/* Video decoder registers */
+#define VDREG8(a0) ((const u16[8]) { \
+ a0 + 0x000, a0 + 0x010, a0 + 0x020, a0 + 0x030, \
+ a0 + 0x100, a0 + 0x110, a0 + 0x120, a0 + 0x130})
+#define VIDSTAT VDREG8(0x100)
+#define BRIGHT VDREG8(0x101)
+#define CONTRAST VDREG8(0x102)
+#define SHARPNESS VDREG8(0x103)
+#define SAT_U VDREG8(0x104)
+#define SAT_V VDREG8(0x105)
+#define HUE VDREG8(0x106)
+#define CROP_HI VDREG8(0x107)
+#define VDELAY_LO VDREG8(0x108)
+#define VACTIVE_LO VDREG8(0x109)
+#define HDELAY_LO VDREG8(0x10a)
+#define HACTIVE_LO VDREG8(0x10b)
+#define MVSN VDREG8(0x10c)
+#define STATUS2 VDREG8(0x10d)
+#define SDT VDREG8(0x10e)
+#define SDT_EN VDREG8(0x10f)
+
+#define VSCALE_LO VDREG8(0x144)
+#define SCALE_HI VDREG8(0x145)
+#define HSCALE_LO VDREG8(0x146)
+#define F2CROP_HI VDREG8(0x147)
+#define F2VDELAY_LO VDREG8(0x148)
+#define F2VACTIVE_LO VDREG8(0x149)
+#define F2HDELAY_LO VDREG8(0x14a)
+#define F2HACTIVE_LO VDREG8(0x14b)
+#define F2VSCALE_LO VDREG8(0x14c)
+#define F2SCALE_HI VDREG8(0x14d)
+#define F2HSCALE_LO VDREG8(0x14e)
+#define F2CNT VDREG8(0x14f)
+
+#define VDREG2(a0) ((const u16[2]) { a0, a0 + 0x100 })
+#define SRST VDREG2(0x180)
+#define ACNTL VDREG2(0x181)
+#define ACNTL2 VDREG2(0x182)
+#define CNTRL1 VDREG2(0x183)
+#define CKHY VDREG2(0x184)
+#define SHCOR VDREG2(0x185)
+#define CORING VDREG2(0x186)
+#define CLMPG VDREG2(0x187)
+#define IAGC VDREG2(0x188)
+#define VCTRL1 VDREG2(0x18f)
+#define MISC1 VDREG2(0x194)
+#define LOOP VDREG2(0x195)
+#define MISC2 VDREG2(0x196)
+
+#define CLMD VDREG2(0x197)
+#define ANPWRDOWN VDREG2(0x1ce)
+#define AIGAIN ((const u16[8]) { 0x1d0, 0x1d1, 0x1d2, 0x1d3, \
+ 0x2d0, 0x2d1, 0x2d2, 0x2d3 })
+
+#define SYS_MODE_DMA_SHIFT 13
+
+#define DMA_CMD_ENABLE BIT(31)
+#define INT_STATUS_DMA_TOUT BIT(17)
+#define TW686X_VIDSTAT_HLOCK BIT(6)
+#define TW686X_VIDSTAT_VDLOSS BIT(7)
+
+#define TW686X_STD_NTSC_M 0
+#define TW686X_STD_PAL 1
+#define TW686X_STD_SECAM 2
+#define TW686X_STD_NTSC_443 3
+#define TW686X_STD_PAL_M 4
+#define TW686X_STD_PAL_CN 5
+#define TW686X_STD_PAL_60 6
+
+#define TW686X_FIFO_ERROR(x) (x & ~(0xff))
diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c
new file mode 100644
index 000000000..253e10823
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x-video.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * Based on original driver by Krzysztof Ha?asa:
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-vmalloc.h>
+#include "tw686x.h"
+#include "tw686x-regs.h"
+
+#define TW686X_INPUTS_PER_CH 4
+#define TW686X_VIDEO_WIDTH 720
+#define TW686X_VIDEO_HEIGHT(id) ((id & V4L2_STD_525_60) ? 480 : 576)
+
+static const struct tw686x_format formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mode = 0,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mode = 5,
+ .depth = 16,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mode = 6,
+ .depth = 16,
+ }
+};
+
+static unsigned int tw686x_fields_map(v4l2_std_id std, unsigned int fps)
+{
+ static const unsigned int map[15] = {
+ 0x00000000, 0x00000001, 0x00004001, 0x00104001, 0x00404041,
+ 0x01041041, 0x01104411, 0x01111111, 0x04444445, 0x04511445,
+ 0x05145145, 0x05151515, 0x05515455, 0x05551555, 0x05555555
+ };
+
+ static const unsigned int std_625_50[26] = {
+ 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7,
+ 8, 8, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 0
+ };
+
+ static const unsigned int std_525_60[31] = {
+ 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 0, 0
+ };
+
+ unsigned int i;
+
+ if (std & V4L2_STD_525_60) {
+ if (fps >= ARRAY_SIZE(std_525_60))
+ fps = 30;
+ i = std_525_60[fps];
+ } else {
+ if (fps >= ARRAY_SIZE(std_625_50))
+ fps = 25;
+ i = std_625_50[fps];
+ }
+
+ return map[i];
+}
+
+static void tw686x_set_framerate(struct tw686x_video_channel *vc,
+ unsigned int fps)
+{
+ unsigned int map;
+
+ if (vc->fps == fps)
+ return;
+
+ map = tw686x_fields_map(vc->video_standard, fps) << 1;
+ map |= map << 1;
+ if (map > 0)
+ map |= BIT(31);
+ reg_write(vc->dev, VIDEO_FIELD_CTRL[vc->ch], map);
+ vc->fps = fps;
+}
+
+static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(formats); cnt++)
+ if (formats[cnt].fourcc == fourcc)
+ return &formats[cnt];
+ return NULL;
+}
+
+static int tw686x_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ unsigned int szimage =
+ (vc->width * vc->height * vc->format->depth) >> 3;
+
+ /*
+ * Let's request at least three buffers: two for the
+ * DMA engine and one for userspace.
+ */
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ if (*nplanes) {
+ if (*nplanes != 1 || sizes[0] < szimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ sizes[0] = szimage;
+ *nplanes = 1;
+ return 0;
+}
+
+static void tw686x_buf_queue(struct vb2_buffer *vb)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
+ struct tw686x_dev *dev = vc->dev;
+ struct pci_dev *pci_dev;
+ unsigned long flags;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tw686x_v4l2_buf *buf =
+ container_of(vbuf, struct tw686x_v4l2_buf, vb);
+
+ /* Check device presence */
+ spin_lock_irqsave(&dev->lock, flags);
+ pci_dev = dev->pci_dev;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!pci_dev) {
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ spin_lock_irqsave(&vc->qlock, flags);
+ list_add_tail(&buf->list, &vc->vidq_queued);
+ spin_unlock_irqrestore(&vc->qlock, flags);
+}
+
+/*
+ * We can call this even when alloc_dma failed for the given channel
+ */
+static void tw686x_free_dma(struct tw686x_video_channel *vc, unsigned int pb)
+{
+ struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
+ struct tw686x_dev *dev = vc->dev;
+ struct pci_dev *pci_dev;
+ unsigned long flags;
+
+ /* Check device presence. Shouldn't really happen! */
+ spin_lock_irqsave(&dev->lock, flags);
+ pci_dev = dev->pci_dev;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!pci_dev) {
+ WARN(1, "trying to deallocate on missing device\n");
+ return;
+ }
+
+ if (desc->virt) {
+ pci_free_consistent(dev->pci_dev, desc->size,
+ desc->virt, desc->phys);
+ desc->virt = NULL;
+ }
+}
+
+static int tw686x_alloc_dma(struct tw686x_video_channel *vc, unsigned int pb)
+{
+ struct tw686x_dev *dev = vc->dev;
+ u32 reg = pb ? VDMA_B_ADDR[vc->ch] : VDMA_P_ADDR[vc->ch];
+ unsigned int len;
+ void *virt;
+
+ WARN(vc->dma_descs[pb].virt,
+ "Allocating buffer but previous still here\n");
+
+ len = (vc->width * vc->height * vc->format->depth) >> 3;
+ virt = pci_alloc_consistent(dev->pci_dev, len,
+ &vc->dma_descs[pb].phys);
+ if (!virt) {
+ v4l2_err(&dev->v4l2_dev,
+ "dma%d: unable to allocate %s-buffer\n",
+ vc->ch, pb ? "B" : "P");
+ return -ENOMEM;
+ }
+ vc->dma_descs[pb].size = len;
+ vc->dma_descs[pb].virt = virt;
+ reg_write(dev, reg, vc->dma_descs[pb].phys);
+
+ return 0;
+}
+
+static void tw686x_buffer_refill(struct tw686x_video_channel *vc,
+ unsigned int pb)
+{
+ struct tw686x_v4l2_buf *buf;
+
+ while (!list_empty(&vc->vidq_queued)) {
+
+ buf = list_first_entry(&vc->vidq_queued,
+ struct tw686x_v4l2_buf, list);
+ list_del(&buf->list);
+
+ vc->curr_bufs[pb] = buf;
+ return;
+ }
+ vc->curr_bufs[pb] = NULL;
+}
+
+static void tw686x_clear_queue(struct tw686x_video_channel *vc,
+ enum vb2_buffer_state state)
+{
+ unsigned int pb;
+
+ while (!list_empty(&vc->vidq_queued)) {
+ struct tw686x_v4l2_buf *buf;
+
+ buf = list_first_entry(&vc->vidq_queued,
+ struct tw686x_v4l2_buf, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+
+ for (pb = 0; pb < 2; pb++) {
+ if (vc->curr_bufs[pb])
+ vb2_buffer_done(&vc->curr_bufs[pb]->vb.vb2_buf, state);
+ vc->curr_bufs[pb] = NULL;
+ }
+}
+
+static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ struct tw686x_dev *dev = vc->dev;
+ struct pci_dev *pci_dev;
+ unsigned long flags;
+ int pb, err;
+
+ /* Check device presence */
+ spin_lock_irqsave(&dev->lock, flags);
+ pci_dev = dev->pci_dev;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!pci_dev) {
+ err = -ENODEV;
+ goto err_clear_queue;
+ }
+
+ spin_lock_irqsave(&vc->qlock, flags);
+
+ /* Sanity check */
+ if (!vc->dma_descs[0].virt || !vc->dma_descs[1].virt) {
+ spin_unlock_irqrestore(&vc->qlock, flags);
+ v4l2_err(&dev->v4l2_dev,
+ "video%d: refusing to start without DMA buffers\n",
+ vc->num);
+ err = -ENOMEM;
+ goto err_clear_queue;
+ }
+
+ for (pb = 0; pb < 2; pb++)
+ tw686x_buffer_refill(vc, pb);
+ spin_unlock_irqrestore(&vc->qlock, flags);
+
+ vc->sequence = 0;
+ vc->pb = 0;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ tw686x_enable_channel(dev, vc->ch);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ mod_timer(&dev->dma_delay_timer, jiffies + msecs_to_jiffies(100));
+
+ return 0;
+
+err_clear_queue:
+ spin_lock_irqsave(&vc->qlock, flags);
+ tw686x_clear_queue(vc, VB2_BUF_STATE_QUEUED);
+ spin_unlock_irqrestore(&vc->qlock, flags);
+ return err;
+}
+
+static void tw686x_stop_streaming(struct vb2_queue *vq)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ struct tw686x_dev *dev = vc->dev;
+ struct pci_dev *pci_dev;
+ unsigned long flags;
+
+ /* Check device presence */
+ spin_lock_irqsave(&dev->lock, flags);
+ pci_dev = dev->pci_dev;
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (pci_dev)
+ tw686x_disable_channel(dev, vc->ch);
+
+ spin_lock_irqsave(&vc->qlock, flags);
+ tw686x_clear_queue(vc, VB2_BUF_STATE_ERROR);
+ spin_unlock_irqrestore(&vc->qlock, flags);
+}
+
+static int tw686x_buf_prepare(struct vb2_buffer *vb)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned int size =
+ (vc->width * vc->height * vc->format->depth) >> 3;
+
+ if (vb2_plane_size(vb, 0) < size)
+ return -EINVAL;
+ vb2_set_plane_payload(vb, 0, size);
+ return 0;
+}
+
+static struct vb2_ops tw686x_video_qops = {
+ .queue_setup = tw686x_queue_setup,
+ .buf_queue = tw686x_buf_queue,
+ .buf_prepare = tw686x_buf_prepare,
+ .start_streaming = tw686x_start_streaming,
+ .stop_streaming = tw686x_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int tw686x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tw686x_video_channel *vc;
+ struct tw686x_dev *dev;
+ unsigned int ch;
+
+ vc = container_of(ctrl->handler, struct tw686x_video_channel,
+ ctrl_handler);
+ dev = vc->dev;
+ ch = vc->ch;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ reg_write(dev, BRIGHT[ch], ctrl->val & 0xff);
+ return 0;
+
+ case V4L2_CID_CONTRAST:
+ reg_write(dev, CONTRAST[ch], ctrl->val);
+ return 0;
+
+ case V4L2_CID_SATURATION:
+ reg_write(dev, SAT_U[ch], ctrl->val);
+ reg_write(dev, SAT_V[ch], ctrl->val);
+ return 0;
+
+ case V4L2_CID_HUE:
+ reg_write(dev, HUE[ch], ctrl->val & 0xff);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = tw686x_s_ctrl,
+};
+
+static int tw686x_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ f->fmt.pix.width = vc->width;
+ f->fmt.pix.height = vc->height;
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ f->fmt.pix.pixelformat = vc->format->fourcc;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * vc->format->depth) / 8;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ return 0;
+}
+
+static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ unsigned int video_height = TW686X_VIDEO_HEIGHT(vc->video_standard);
+ const struct tw686x_format *format;
+
+ format = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (!format) {
+ format = &formats[0];
+ f->fmt.pix.pixelformat = format->fourcc;
+ }
+
+ if (f->fmt.pix.width <= TW686X_VIDEO_WIDTH / 2)
+ f->fmt.pix.width = TW686X_VIDEO_WIDTH / 2;
+ else
+ f->fmt.pix.width = TW686X_VIDEO_WIDTH;
+
+ if (f->fmt.pix.height <= video_height / 2)
+ f->fmt.pix.height = video_height / 2;
+ else
+ f->fmt.pix.height = video_height;
+
+ f->fmt.pix.bytesperline = (f->fmt.pix.width * format->depth) / 8;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+
+ return 0;
+}
+
+static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ u32 val, width, line_width, height;
+ unsigned long bitsperframe;
+ int err, pb;
+
+ if (vb2_is_busy(&vc->vidq))
+ return -EBUSY;
+
+ bitsperframe = vc->width * vc->height * vc->format->depth;
+ err = tw686x_try_fmt_vid_cap(file, priv, f);
+ if (err)
+ return err;
+
+ vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
+ vc->width = f->fmt.pix.width;
+ vc->height = f->fmt.pix.height;
+
+ /* We need new DMA buffers if the framesize has changed */
+ if (bitsperframe != vc->width * vc->height * vc->format->depth) {
+ for (pb = 0; pb < 2; pb++)
+ tw686x_free_dma(vc, pb);
+
+ for (pb = 0; pb < 2; pb++) {
+ err = tw686x_alloc_dma(vc, pb);
+ if (err) {
+ if (pb > 0)
+ tw686x_free_dma(vc, 0);
+ return err;
+ }
+ }
+ }
+
+ val = reg_read(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch]);
+
+ if (vc->width <= TW686X_VIDEO_WIDTH / 2)
+ val |= BIT(23);
+ else
+ val &= ~BIT(23);
+
+ if (vc->height <= TW686X_VIDEO_HEIGHT(vc->video_standard) / 2)
+ val |= BIT(24);
+ else
+ val &= ~BIT(24);
+
+ val &= ~(0x7 << 20);
+ val |= vc->format->mode << 20;
+ reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
+
+ /* Program the DMA frame size */
+ width = (vc->width * 2) & 0x7ff;
+ height = vc->height / 2;
+ line_width = (vc->width * 2) & 0x7ff;
+ val = (height << 22) | (line_width << 11) | width;
+ reg_write(vc->dev, VDMA_WHP[vc->ch], val);
+ return 0;
+}
+
+static int tw686x_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ struct tw686x_dev *dev = vc->dev;
+
+ strlcpy(cap->driver, "tw686x", sizeof(cap->driver));
+ strlcpy(cap->card, dev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "PCI:%s", pci_name(dev->pci_dev));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ struct v4l2_format f;
+ u32 val, ret;
+
+ if (vc->video_standard == id)
+ return 0;
+
+ if (vb2_is_busy(&vc->vidq))
+ return -EBUSY;
+
+ if (id & V4L2_STD_NTSC)
+ val = 0;
+ else if (id & V4L2_STD_PAL)
+ val = 1;
+ else if (id & V4L2_STD_SECAM)
+ val = 2;
+ else if (id & V4L2_STD_NTSC_443)
+ val = 3;
+ else if (id & V4L2_STD_PAL_M)
+ val = 4;
+ else if (id & V4L2_STD_PAL_Nc)
+ val = 5;
+ else if (id & V4L2_STD_PAL_60)
+ val = 6;
+ else
+ return -EINVAL;
+
+ vc->video_standard = id;
+ reg_write(vc->dev, SDT[vc->ch], val);
+
+ val = reg_read(vc->dev, VIDEO_CONTROL1);
+ if (id & V4L2_STD_525_60)
+ val &= ~(1 << (SYS_MODE_DMA_SHIFT + vc->ch));
+ else
+ val |= (1 << (SYS_MODE_DMA_SHIFT + vc->ch));
+ reg_write(vc->dev, VIDEO_CONTROL1, val);
+
+ /*
+ * Adjust format after V4L2_STD_525_60/V4L2_STD_625_50 change,
+ * calling g_fmt and s_fmt will sanitize the height
+ * according to the standard.
+ */
+ ret = tw686x_g_fmt_vid_cap(file, priv, &f);
+ if (!ret)
+ tw686x_s_fmt_vid_cap(file, priv, &f);
+ return 0;
+}
+
+static int tw686x_querystd(struct file *file, void *priv, v4l2_std_id *std)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ struct tw686x_dev *dev = vc->dev;
+ unsigned int old_std, detected_std = 0;
+ unsigned long end;
+
+ if (vb2_is_streaming(&vc->vidq))
+ return -EBUSY;
+
+ /* Enable and start standard detection */
+ old_std = reg_read(dev, SDT[vc->ch]);
+ reg_write(dev, SDT[vc->ch], 0x7);
+ reg_write(dev, SDT_EN[vc->ch], 0xff);
+
+ end = jiffies + msecs_to_jiffies(500);
+ while (time_is_after_jiffies(end)) {
+
+ detected_std = reg_read(dev, SDT[vc->ch]);
+ if (!(detected_std & BIT(7)))
+ break;
+ msleep(100);
+ }
+ reg_write(dev, SDT[vc->ch], old_std);
+
+ /* Exit if still busy */
+ if (detected_std & BIT(7))
+ return 0;
+
+ detected_std = (detected_std >> 4) & 0x7;
+ switch (detected_std) {
+ case TW686X_STD_NTSC_M:
+ *std &= V4L2_STD_NTSC;
+ break;
+ case TW686X_STD_NTSC_443:
+ *std &= V4L2_STD_NTSC_443;
+ break;
+ case TW686X_STD_PAL_M:
+ *std &= V4L2_STD_PAL_M;
+ break;
+ case TW686X_STD_PAL_60:
+ *std &= V4L2_STD_PAL_60;
+ break;
+ case TW686X_STD_PAL:
+ *std &= V4L2_STD_PAL;
+ break;
+ case TW686X_STD_PAL_CN:
+ *std &= V4L2_STD_PAL_Nc;
+ break;
+ case TW686X_STD_SECAM:
+ *std &= V4L2_STD_SECAM;
+ break;
+ default:
+ *std = 0;
+ }
+ return 0;
+}
+
+static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ *id = vc->video_standard;
+ return 0;
+}
+
+static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+ f->pixelformat = formats[f->index].fourcc;
+ return 0;
+}
+
+static int tw686x_s_input(struct file *file, void *priv, unsigned int i)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ u32 val;
+
+ if (i >= TW686X_INPUTS_PER_CH)
+ return -EINVAL;
+ if (i == vc->input)
+ return 0;
+ /*
+ * Not sure we are able to support on the fly input change
+ */
+ if (vb2_is_busy(&vc->vidq))
+ return -EBUSY;
+
+ vc->input = i;
+
+ val = reg_read(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch]);
+ val &= ~(0x3 << 30);
+ val |= i << 30;
+ reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], val);
+ return 0;
+}
+
+static int tw686x_g_input(struct file *file, void *priv, unsigned int *i)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ *i = vc->input;
+ return 0;
+}
+
+static int tw686x_enum_input(struct file *file, void *priv,
+ struct v4l2_input *i)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ unsigned int vidstat;
+
+ if (i->index >= TW686X_INPUTS_PER_CH)
+ return -EINVAL;
+
+ snprintf(i->name, sizeof(i->name), "Composite%d", i->index);
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+ i->std = vc->device->tvnorms;
+ i->capabilities = V4L2_IN_CAP_STD;
+
+ vidstat = reg_read(vc->dev, VIDSTAT[vc->ch]);
+ i->status = 0;
+ if (vidstat & TW686X_VIDSTAT_VDLOSS)
+ i->status |= V4L2_IN_ST_NO_SIGNAL;
+ if (!(vidstat & TW686X_VIDSTAT_HLOCK))
+ i->status |= V4L2_IN_ST_NO_H_LOCK;
+
+ return 0;
+}
+
+static const struct v4l2_file_operations tw686x_video_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .unlocked_ioctl = video_ioctl2,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
+ .vidioc_querycap = tw686x_querycap,
+ .vidioc_g_fmt_vid_cap = tw686x_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = tw686x_s_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_cap = tw686x_enum_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = tw686x_try_fmt_vid_cap,
+
+ .vidioc_querystd = tw686x_querystd,
+ .vidioc_g_std = tw686x_g_std,
+ .vidioc_s_std = tw686x_s_std,
+
+ .vidioc_enum_input = tw686x_enum_input,
+ .vidioc_g_input = tw686x_g_input,
+ .vidioc_s_input = tw686x_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static void tw686x_buffer_copy(struct tw686x_video_channel *vc,
+ unsigned int pb, struct vb2_v4l2_buffer *vb)
+{
+ struct tw686x_dma_desc *desc = &vc->dma_descs[pb];
+ struct vb2_buffer *vb2_buf = &vb->vb2_buf;
+
+ vb->field = V4L2_FIELD_INTERLACED;
+ vb->sequence = vc->sequence++;
+
+ memcpy(vb2_plane_vaddr(vb2_buf, 0), desc->virt, desc->size);
+ vb2_buf->timestamp = ktime_get_ns();
+ vb2_buffer_done(vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests,
+ unsigned int pb_status, unsigned int fifo_status,
+ unsigned int *reset_ch)
+{
+ struct tw686x_video_channel *vc;
+ struct vb2_v4l2_buffer *vb;
+ unsigned long flags;
+ unsigned int ch, pb;
+
+ for_each_set_bit(ch, &requests, max_channels(dev)) {
+ vc = &dev->video_channels[ch];
+
+ /*
+ * This can either be a blue frame (with signal-lost bit set)
+ * or a good frame (with signal-lost bit clear). If we have just
+ * got signal, then this channel needs resetting.
+ */
+ if (vc->no_signal && !(fifo_status & BIT(ch))) {
+ v4l2_printk(KERN_DEBUG, &dev->v4l2_dev,
+ "video%d: signal recovered\n", vc->num);
+ vc->no_signal = false;
+ *reset_ch |= BIT(ch);
+ vc->pb = 0;
+ continue;
+ }
+ vc->no_signal = !!(fifo_status & BIT(ch));
+
+ /* Check FIFO errors only if there's signal */
+ if (!vc->no_signal) {
+ u32 fifo_ov, fifo_bad;
+
+ fifo_ov = (fifo_status >> 24) & BIT(ch);
+ fifo_bad = (fifo_status >> 16) & BIT(ch);
+ if (fifo_ov || fifo_bad) {
+ /* Mark this channel for reset */
+ v4l2_printk(KERN_DEBUG, &dev->v4l2_dev,
+ "video%d: FIFO error\n", vc->num);
+ *reset_ch |= BIT(ch);
+ vc->pb = 0;
+ continue;
+ }
+ }
+
+ pb = !!(pb_status & BIT(ch));
+ if (vc->pb != pb) {
+ /* Mark this channel for reset */
+ v4l2_printk(KERN_DEBUG, &dev->v4l2_dev,
+ "video%d: unexpected p-b buffer!\n",
+ vc->num);
+ *reset_ch |= BIT(ch);
+ vc->pb = 0;
+ continue;
+ }
+
+ /* handle video stream */
+ spin_lock_irqsave(&vc->qlock, flags);
+ if (vc->curr_bufs[pb]) {
+ vb = &vc->curr_bufs[pb]->vb;
+ tw686x_buffer_copy(vc, pb, vb);
+ }
+ vc->pb = !pb;
+ tw686x_buffer_refill(vc, pb);
+ spin_unlock_irqrestore(&vc->qlock, flags);
+ }
+}
+
+void tw686x_video_free(struct tw686x_dev *dev)
+{
+ unsigned int ch, pb;
+
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+ if (vc->device)
+ video_unregister_device(vc->device);
+
+ for (pb = 0; pb < 2; pb++)
+ tw686x_free_dma(vc, pb);
+ }
+}
+
+int tw686x_video_init(struct tw686x_dev *dev)
+{
+ unsigned int ch, val, pb;
+ int err;
+
+ err = v4l2_device_register(&dev->pci_dev->dev, &dev->v4l2_dev);
+ if (err)
+ return err;
+
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+ struct video_device *vdev;
+
+ mutex_init(&vc->vb_mutex);
+ spin_lock_init(&vc->qlock);
+ INIT_LIST_HEAD(&vc->vidq_queued);
+
+ vc->dev = dev;
+ vc->ch = ch;
+
+ /* default settings */
+ vc->format = &formats[0];
+ vc->video_standard = V4L2_STD_NTSC;
+ vc->width = TW686X_VIDEO_WIDTH;
+ vc->height = TW686X_VIDEO_HEIGHT(vc->video_standard);
+ vc->input = 0;
+
+ reg_write(vc->dev, SDT[ch], 0);
+ tw686x_set_framerate(vc, 30);
+
+ reg_write(dev, VDELAY_LO[ch], 0x14);
+ reg_write(dev, HACTIVE_LO[ch], 0xd0);
+ reg_write(dev, VIDEO_SIZE[ch], 0);
+
+ for (pb = 0; pb < 2; pb++) {
+ err = tw686x_alloc_dma(vc, pb);
+ if (err)
+ goto error;
+ }
+
+ vc->vidq.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF;
+ vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vc->vidq.drv_priv = vc;
+ vc->vidq.buf_struct_size = sizeof(struct tw686x_v4l2_buf);
+ vc->vidq.ops = &tw686x_video_qops;
+ vc->vidq.mem_ops = &vb2_vmalloc_memops;
+ vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vc->vidq.min_buffers_needed = 2;
+ vc->vidq.lock = &vc->vb_mutex;
+ vc->vidq.gfp_flags = GFP_DMA32;
+
+ err = vb2_queue_init(&vc->vidq);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "dma%d: cannot init vb2 queue\n", ch);
+ goto error;
+ }
+
+ err = v4l2_ctrl_handler_init(&vc->ctrl_handler, 4);
+ if (err) {
+ v4l2_err(&dev->v4l2_dev,
+ "dma%d: cannot init ctrl handler\n", ch);
+ goto error;
+ }
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 100);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_HUE, -128, 127, 1, 0);
+ err = vc->ctrl_handler.error;
+ if (err)
+ goto error;
+
+ err = v4l2_ctrl_handler_setup(&vc->ctrl_handler);
+ if (err)
+ goto error;
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ v4l2_err(&dev->v4l2_dev,
+ "dma%d: unable to allocate device\n", ch);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ snprintf(vdev->name, sizeof(vdev->name), "%s video", dev->name);
+ vdev->fops = &tw686x_video_fops;
+ vdev->ioctl_ops = &tw686x_video_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->v4l2_dev = &dev->v4l2_dev;
+ vdev->queue = &vc->vidq;
+ vdev->tvnorms = V4L2_STD_525_60 | V4L2_STD_625_50;
+ vdev->minor = -1;
+ vdev->lock = &vc->vb_mutex;
+ vdev->ctrl_handler = &vc->ctrl_handler;
+ vc->device = vdev;
+ video_set_drvdata(vdev, vc);
+
+ err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (err < 0)
+ goto error;
+ vc->num = vdev->num;
+ }
+
+ /* Set DMA frame mode on all channels. Only supported mode for now. */
+ val = TW686X_DEF_PHASE_REF;
+ for (ch = 0; ch < max_channels(dev); ch++)
+ val |= TW686X_FRAME_MODE << (16 + ch * 2);
+ reg_write(dev, PHASE_REF, val);
+
+ reg_write(dev, MISC2[0], 0xe7);
+ reg_write(dev, VCTRL1[0], 0xcc);
+ reg_write(dev, LOOP[0], 0xa5);
+ if (max_channels(dev) > 4) {
+ reg_write(dev, VCTRL1[1], 0xcc);
+ reg_write(dev, LOOP[1], 0xa5);
+ reg_write(dev, MISC2[1], 0xe7);
+ }
+ return 0;
+
+error:
+ tw686x_video_free(dev);
+ return err;
+}
diff --git a/drivers/media/pci/tw686x/tw686x.h b/drivers/media/pci/tw686x/tw686x.h
new file mode 100644
index 000000000..44b5755ac
--- /dev/null
+++ b/drivers/media/pci/tw686x/tw686x.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ * Written by Krzysztof Ha?asa
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <sound/pcm.h>
+
+#include "tw686x-regs.h"
+
+#define TYPE_MAX_CHANNELS 0x0f
+#define TYPE_SECOND_GEN 0x10
+#define TW686X_DEF_PHASE_REF 0x1518
+
+#define TW686X_FIELD_MODE 0x3
+#define TW686X_FRAME_MODE 0x2
+/* 0x1 is reserved */
+#define TW686X_SG_MODE 0x0
+
+#define TW686X_AUDIO_PAGE_SZ 4096
+#define TW686X_AUDIO_PAGE_MAX 16
+#define TW686X_AUDIO_PERIODS_MIN 2
+#define TW686X_AUDIO_PERIODS_MAX TW686X_AUDIO_PAGE_MAX
+
+struct tw686x_format {
+ char *name;
+ unsigned int fourcc;
+ unsigned int depth;
+ unsigned int mode;
+};
+
+struct tw686x_dma_desc {
+ dma_addr_t phys;
+ void *virt;
+ unsigned int size;
+};
+
+struct tw686x_audio_buf {
+ dma_addr_t dma;
+ void *virt;
+ struct list_head list;
+};
+
+struct tw686x_v4l2_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct tw686x_audio_channel {
+ struct tw686x_dev *dev;
+ struct snd_pcm_substream *ss;
+ unsigned int ch;
+ struct tw686x_audio_buf *curr_bufs[2];
+ struct tw686x_dma_desc dma_descs[2];
+ dma_addr_t ptr;
+
+ struct tw686x_audio_buf buf[TW686X_AUDIO_PAGE_MAX];
+ struct list_head buf_list;
+ spinlock_t lock;
+};
+
+struct tw686x_video_channel {
+ struct tw686x_dev *dev;
+
+ struct vb2_queue vidq;
+ struct list_head vidq_queued;
+ struct video_device *device;
+ struct tw686x_v4l2_buf *curr_bufs[2];
+ struct tw686x_dma_desc dma_descs[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ const struct tw686x_format *format;
+ struct mutex vb_mutex;
+ spinlock_t qlock;
+ v4l2_std_id video_standard;
+ unsigned int width, height;
+ unsigned int h_halve, v_halve;
+ unsigned int ch;
+ unsigned int num;
+ unsigned int fps;
+ unsigned int input;
+ unsigned int sequence;
+ unsigned int pb;
+ bool no_signal;
+};
+
+/**
+ * struct tw686x_dev - global device status
+ * @lock: spinlock controlling access to the
+ * shared device registers (DMA enable/disable).
+ */
+struct tw686x_dev {
+ spinlock_t lock;
+
+ struct v4l2_device v4l2_dev;
+ struct snd_card *snd_card;
+
+ char name[32];
+ unsigned int type;
+ struct pci_dev *pci_dev;
+ __u32 __iomem *mmio;
+
+ void *alloc_ctx;
+
+ struct tw686x_video_channel *video_channels;
+ struct tw686x_audio_channel *audio_channels;
+
+ int audio_rate; /* per-device value */
+
+ struct timer_list dma_delay_timer;
+ u32 pending_dma_en; /* must be protected by lock */
+ u32 pending_dma_cmd; /* must be protected by lock */
+};
+
+static inline uint32_t reg_read(struct tw686x_dev *dev, unsigned int reg)
+{
+ return readl(dev->mmio + reg);
+}
+
+static inline void reg_write(struct tw686x_dev *dev, unsigned int reg,
+ uint32_t value)
+{
+ writel(value, dev->mmio + reg);
+}
+
+static inline unsigned int max_channels(struct tw686x_dev *dev)
+{
+ return dev->type & TYPE_MAX_CHANNELS; /* 4 or 8 channels */
+}
+
+void tw686x_enable_channel(struct tw686x_dev *dev, unsigned int channel);
+void tw686x_disable_channel(struct tw686x_dev *dev, unsigned int channel);
+
+int tw686x_video_init(struct tw686x_dev *dev);
+void tw686x_video_free(struct tw686x_dev *dev);
+void tw686x_video_irq(struct tw686x_dev *dev, unsigned long requests,
+ unsigned int pb_status, unsigned int fifo_status,
+ unsigned int *reset_ch);
+
+int tw686x_audio_init(struct tw686x_dev *dev);
+void tw686x_audio_free(struct tw686x_dev *dev);
+void tw686x_audio_irq(struct tw686x_dev *dev, unsigned long requests,
+ unsigned int pb_status);
diff --git a/drivers/media/pci/zoran/videocodec.c b/drivers/media/pci/zoran/videocodec.c
index c01071635..13a3c07cd 100644
--- a/drivers/media/pci/zoran/videocodec.c
+++ b/drivers/media/pci/zoran/videocodec.c
@@ -116,8 +116,9 @@ videocodec_attach (struct videocodec_master *master)
goto out_module_put;
}
- snprintf(codec->name, sizeof(codec->name),
- "%s[%d]", codec->name, h->attached);
+ res = strlen(codec->name);
+ snprintf(codec->name + res, sizeof(codec->name) - res,
+ "[%d]", h->attached);
codec->master_data = master;
res = codec->setup(codec);
if (res == 0) {
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 201f5c296..84e041c0a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -238,7 +238,7 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_JPU
tristate "Renesas JPEG Processing Unit"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
---help---
@@ -250,7 +250,7 @@ config VIDEO_RENESAS_JPU
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
- depends on (ARCH_SHMOBILE && OF) || COMPILE_TEST
+ depends on (ARCH_RENESAS && OF) || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
---help---
This is a V4L2 driver for the Renesas VSP1 video processing engine.
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index de32e3a3d..e749eb7c3 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -1047,7 +1047,7 @@ static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
{
enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
- int ret;
+ int ret = 0;
vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
@@ -1706,7 +1706,7 @@ static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
sdinfo = &cfg->sub_devs[i];
client = v4l2_get_subdevdata(sdinfo->sd);
if (client->addr == curr_client->addr &&
- client->adapter->nr == client->adapter->nr) {
+ client->adapter->nr == curr_client->adapter->nr) {
if (vpfe->current_input >= 1)
return -1;
*app_input_index = j + vpfe->current_input;
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 9b9e423e4..c04973669 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -967,15 +967,6 @@ static struct gsc_driverdata gsc_v_100_drvdata = {
.lclk_frequency = 266000000UL,
};
-static const struct platform_device_id gsc_driver_ids[] = {
- {
- .name = "exynos-gsc",
- .driver_data = (unsigned long)&gsc_v_100_drvdata,
- },
- {},
-};
-MODULE_DEVICE_TABLE(platform, gsc_driver_ids);
-
static const struct of_device_id exynos_gsc_match[] = {
{
.compatible = "samsung,exynos5-gsc",
@@ -988,17 +979,11 @@ MODULE_DEVICE_TABLE(of, exynos_gsc_match);
static void *gsc_get_drv_data(struct platform_device *pdev)
{
struct gsc_driverdata *driver_data = NULL;
+ const struct of_device_id *match;
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(exynos_gsc_match,
- pdev->dev.of_node);
- if (match)
- driver_data = (struct gsc_driverdata *)match->data;
- } else {
- driver_data = (struct gsc_driverdata *)
- platform_get_device_id(pdev)->driver_data;
- }
+ match = of_match_node(exynos_gsc_match, pdev->dev.of_node);
+ if (match)
+ driver_data = (struct gsc_driverdata *)match->data;
return driver_data;
}
@@ -1078,17 +1063,17 @@ static int gsc_probe(struct platform_device *pdev)
struct resource *res;
struct gsc_driverdata *drv_data = gsc_get_drv_data(pdev);
struct device *dev = &pdev->dev;
- int ret = 0;
+ int ret;
gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
if (!gsc)
return -ENOMEM;
- if (dev->of_node)
- gsc->id = of_alias_get_id(pdev->dev.of_node, "gsc");
- else
- gsc->id = pdev->id;
+ ret = of_alias_get_id(pdev->dev.of_node, "gsc");
+ if (ret < 0)
+ return ret;
+ gsc->id = ret;
if (gsc->id >= drv_data->num_entities) {
dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
return -EINVAL;
@@ -1096,7 +1081,6 @@ static int gsc_probe(struct platform_device *pdev)
gsc->variant = drv_data->variant[gsc->id];
gsc->pdev = pdev;
- gsc->pdata = dev->platform_data;
init_waitqueue_head(&gsc->irq_queue);
spin_lock_init(&gsc->slock);
@@ -1253,7 +1237,6 @@ static const struct dev_pm_ops gsc_pm_ops = {
static struct platform_driver gsc_driver = {
.probe = gsc_probe,
.remove = gsc_remove,
- .id_table = gsc_driver_ids,
.driver = {
.name = GSC_MODULE_NAME,
.pm = &gsc_pm_ops,
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index e93a2336c..ec4000c72 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -340,7 +340,6 @@ struct gsc_dev {
void __iomem *regs;
wait_queue_head_t irq_queue;
struct gsc_m2m_device m2m;
- struct exynos_platform_gscaler *pdata;
unsigned long state;
struct vb2_alloc_ctx *alloc_ctx;
struct video_device vdev;
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index cef2a7f07..b1c1cea82 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1154,26 +1154,6 @@ static const struct fimc_pix_limit s5p_pix_limit[4] = {
},
};
-static const struct fimc_variant fimc0_variant_s5p = {
- .has_inp_rot = 1,
- .has_out_rot = 1,
- .has_cam_if = 1,
- .min_inp_pixsize = 16,
- .min_out_pixsize = 16,
- .hor_offs_align = 8,
- .min_vsize_align = 16,
- .pix_limit = &s5p_pix_limit[0],
-};
-
-static const struct fimc_variant fimc2_variant_s5p = {
- .has_cam_if = 1,
- .min_inp_pixsize = 16,
- .min_out_pixsize = 16,
- .hor_offs_align = 8,
- .min_vsize_align = 16,
- .pix_limit = &s5p_pix_limit[1],
-};
-
static const struct fimc_variant fimc0_variant_s5pv210 = {
.has_inp_rot = 1,
.has_out_rot = 1,
@@ -1206,18 +1186,6 @@ static const struct fimc_variant fimc2_variant_s5pv210 = {
.pix_limit = &s5p_pix_limit[2],
};
-/* S5PC100 */
-static const struct fimc_drvdata fimc_drvdata_s5p = {
- .variant = {
- [0] = &fimc0_variant_s5p,
- [1] = &fimc0_variant_s5p,
- [2] = &fimc2_variant_s5p,
- },
- .num_entities = 3,
- .lclk_frequency = 133000000UL,
- .out_buf_count = 4,
-};
-
/* S5PV210, S5PC110 */
static const struct fimc_drvdata fimc_drvdata_s5pv210 = {
.variant = {
@@ -1251,23 +1219,6 @@ static const struct fimc_drvdata fimc_drvdata_exynos4x12 = {
.out_buf_count = 32,
};
-static const struct platform_device_id fimc_driver_ids[] = {
- {
- .name = "s5p-fimc",
- .driver_data = (unsigned long)&fimc_drvdata_s5p,
- }, {
- .name = "s5pv210-fimc",
- .driver_data = (unsigned long)&fimc_drvdata_s5pv210,
- }, {
- .name = "exynos4-fimc",
- .driver_data = (unsigned long)&fimc_drvdata_exynos4210,
- }, {
- .name = "exynos4x12-fimc",
- .driver_data = (unsigned long)&fimc_drvdata_exynos4x12,
- },
- { },
-};
-
static const struct of_device_id fimc_of_match[] = {
{
.compatible = "samsung,s5pv210-fimc",
@@ -1290,7 +1241,6 @@ static const struct dev_pm_ops fimc_pm_ops = {
static struct platform_driver fimc_driver = {
.probe = fimc_probe,
.remove = fimc_remove,
- .id_table = fimc_driver_ids,
.driver = {
.of_match_table = fimc_of_match,
.name = FIMC_DRIVER_NAME,
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 4f494acd8..891625e77 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -446,8 +446,10 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
else
pd->fimc_bus_type = pd->sensor_bus_type;
- if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor)))
+ if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) {
+ of_node_put(rem);
return -EINVAL;
+ }
fmd->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_OF;
fmd->sensor[index].asd.match.of.node = rem;
@@ -1130,7 +1132,7 @@ static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
media_entity_graph_walk_start(graph, entity);
while ((entity = media_entity_graph_walk_next(graph))) {
- if (!is_media_entity_v4l2_io(entity))
+ if (!is_media_entity_v4l2_video_device(entity))
continue;
ret = __fimc_md_modify_pipeline(entity, enable);
@@ -1145,7 +1147,7 @@ err:
media_entity_graph_walk_start(graph, entity_err);
while ((entity_err = media_entity_graph_walk_next(graph))) {
- if (!is_media_entity_v4l2_io(entity_err))
+ if (!is_media_entity_v4l2_video_device(entity_err))
continue;
__fimc_md_modify_pipeline(entity_err, !enable);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index bd5c46c3d..bf954424e 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -757,8 +757,10 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
goto err;
state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
- if (state->index >= CSIS_MAX_ENTITIES)
- return -ENXIO;
+ if (state->index >= CSIS_MAX_ENTITIES) {
+ ret = -ENXIO;
+ goto err;
+ }
/* Get MIPI CSI-2 bus configration from the endpoint node. */
of_property_read_u32(node, "samsung,csis-hs-settle",
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index ac76d2901..1b1a95d54 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -251,7 +251,7 @@ static int isp_video_get_graph_data(struct isp_video *video,
if (entity == &video->video.entity)
continue;
- if (!is_media_entity_v4l2_io(entity))
+ if (!is_media_entity_v4l2_video_device(entity))
continue;
__video = to_isp_video(media_entity_to_video_device(entity));
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 74bd46ca7..612d1ea51 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -719,16 +719,12 @@ static int g2d_probe(struct platform_device *pdev)
def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
- if (!pdev->dev.of_node) {
- dev->variant = g2d_get_drv_data(pdev);
- } else {
- of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node);
- if (!of_id) {
- ret = -ENODEV;
- goto unreg_video_dev;
- }
- dev->variant = (struct g2d_variant *)of_id->data;
+ of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node);
+ if (!of_id) {
+ ret = -ENODEV;
+ goto unreg_video_dev;
}
+ dev->variant = (struct g2d_variant *)of_id->data;
return 0;
@@ -788,22 +784,9 @@ static const struct of_device_id exynos_g2d_match[] = {
};
MODULE_DEVICE_TABLE(of, exynos_g2d_match);
-static const struct platform_device_id g2d_driver_ids[] = {
- {
- .name = "s5p-g2d",
- .driver_data = (unsigned long)&g2d_drvdata_v3x,
- }, {
- .name = "s5p-g2d-v4x",
- .driver_data = (unsigned long)&g2d_drvdata_v4x,
- },
- {},
-};
-MODULE_DEVICE_TABLE(platform, g2d_driver_ids);
-
static struct platform_driver g2d_pdrv = {
.probe = g2d_probe,
.remove = g2d_remove,
- .id_table = g2d_driver_ids,
.driver = {
.name = G2D_NAME,
.of_match_table = exynos_g2d_match,
diff --git a/drivers/media/platform/s5p-g2d/g2d.h b/drivers/media/platform/s5p-g2d/g2d.h
index b0e52ab7e..e31df541a 100644
--- a/drivers/media/platform/s5p-g2d/g2d.h
+++ b/drivers/media/platform/s5p-g2d/g2d.h
@@ -89,8 +89,3 @@ void g2d_set_flip(struct g2d_dev *d, u32 r);
void g2d_set_v41_stretch(struct g2d_dev *d,
struct g2d_frame *src, struct g2d_frame *dst);
void g2d_set_cmd(struct g2d_dev *d, u32 c);
-
-static inline struct g2d_variant *g2d_get_drv_data(struct platform_device *pdev)
-{
- return (struct g2d_variant *)platform_get_device_id(pdev)->driver_data;
-}
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index c3b13a630..caa19b408 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1548,8 +1548,10 @@ static int exynos4_jpeg_get_output_buffer_size(struct s5p_jpeg_ctx *ctx,
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 pix_fmt = f->fmt.pix.pixelformat;
int w = pix->width, h = pix->height, wh_align;
+ int padding = 0;
if (pix_fmt == V4L2_PIX_FMT_RGB32 ||
+ pix_fmt == V4L2_PIX_FMT_RGB565 ||
pix_fmt == V4L2_PIX_FMT_NV24 ||
pix_fmt == V4L2_PIX_FMT_NV42 ||
pix_fmt == V4L2_PIX_FMT_NV12 ||
@@ -1564,7 +1566,10 @@ static int exynos4_jpeg_get_output_buffer_size(struct s5p_jpeg_ctx *ctx,
&h, S5P_JPEG_MIN_HEIGHT,
S5P_JPEG_MAX_HEIGHT, wh_align);
- return w * h * fmt_depth >> 3;
+ if (ctx->jpeg->variant->version == SJPEG_EXYNOS4)
+ padding = PAGE_SIZE;
+
+ return (w * h * fmt_depth >> 3) + padding;
}
static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index d83bbf529..9c5c0f7c1 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1489,27 +1489,6 @@ static struct s5p_mfc_variant mfc_drvdata_v8 = {
.fw_name[0] = "/*(DEBLOBBED)*/",
};
-static const struct platform_device_id mfc_driver_ids[] = {
- {
- .name = "s5p-mfc",
- .driver_data = (unsigned long)&mfc_drvdata_v5,
- }, {
- .name = "s5p-mfc-v5",
- .driver_data = (unsigned long)&mfc_drvdata_v5,
- }, {
- .name = "s5p-mfc-v6",
- .driver_data = (unsigned long)&mfc_drvdata_v6,
- }, {
- .name = "s5p-mfc-v7",
- .driver_data = (unsigned long)&mfc_drvdata_v7,
- }, {
- .name = "s5p-mfc-v8",
- .driver_data = (unsigned long)&mfc_drvdata_v8,
- },
- {},
-};
-MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
-
static const struct of_device_id exynos_mfc_match[] = {
{
.compatible = "samsung,mfc-v5",
@@ -1531,24 +1510,18 @@ MODULE_DEVICE_TABLE(of, exynos_mfc_match);
static void *mfc_get_drv_data(struct platform_device *pdev)
{
struct s5p_mfc_variant *driver_data = NULL;
+ const struct of_device_id *match;
+
+ match = of_match_node(exynos_mfc_match, pdev->dev.of_node);
+ if (match)
+ driver_data = (struct s5p_mfc_variant *)match->data;
- if (pdev->dev.of_node) {
- const struct of_device_id *match;
- match = of_match_node(exynos_mfc_match,
- pdev->dev.of_node);
- if (match)
- driver_data = (struct s5p_mfc_variant *)match->data;
- } else {
- driver_data = (struct s5p_mfc_variant *)
- platform_get_device_id(pdev)->driver_data;
- }
return driver_data;
}
static struct platform_driver s5p_mfc_driver = {
.probe = s5p_mfc_probe,
.remove = s5p_mfc_remove,
- .id_table = mfc_driver_ids,
.driver = {
.name = S5P_MFC_NAME,
.pm = &s5p_mfc_pm_ops,
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index 42cd2709c..4dd62a918 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -300,7 +300,7 @@ void mxr_release_video(struct mxr_device *mdev);
struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx);
struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx);
struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
- int idx, char *name, struct mxr_layer_ops *ops);
+ int idx, char *name, const struct mxr_layer_ops *ops);
void mxr_base_layer_release(struct mxr_layer *layer);
void mxr_layer_release(struct mxr_layer *layer);
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 5ef677749..8a5d19469 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -146,7 +146,7 @@ int mxr_power_get(struct mxr_device *mdev)
/* returning 1 means that power is already enabled,
* so zero success be returned */
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
return 0;
}
diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
index db3163b23..d4d2564f7 100644
--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
{
struct mxr_layer *layer;
int ret;
- struct mxr_layer_ops ops = {
+ const struct mxr_layer_ops ops = {
.release = mxr_graph_layer_release,
.buffer_set = mxr_graph_buffer_set,
.stream_set = mxr_graph_stream_set,
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index d9e7f0302..7ab5578a0 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -1070,7 +1070,7 @@ static void mxr_vfd_release(struct video_device *vdev)
}
struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
- int idx, char *name, struct mxr_layer_ops *ops)
+ int idx, char *name, const struct mxr_layer_ops *ops)
{
struct mxr_layer *layer;
diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
index dd002a497..6fa6f673f 100644
--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
@@ -207,7 +207,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
{
struct mxr_layer *layer;
int ret;
- struct mxr_layer_ops ops = {
+ const struct mxr_layer_ops ops = {
.release = mxr_vp_layer_release,
.buffer_set = mxr_vp_buffer_set,
.stream_set = mxr_vp_stream_set,
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 355298989..83029a485 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -28,7 +28,7 @@ config VIDEO_PXA27x
config VIDEO_RCAR_VIN
tristate "R-Car Video Input (VIN) support"
depends on VIDEO_DEV && SOC_CAMERA
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select SOC_CAMERA_SCALE_CROP
@@ -45,7 +45,7 @@ config VIDEO_SH_MOBILE_CSI2
config VIDEO_SH_MOBILE_CEU
tristate "SuperH Mobile CEU Interface driver"
depends on VIDEO_DEV && SOC_CAMERA && HAS_DMA && HAVE_CLK
- depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
+ depends on ARCH_SHMOBILE || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select SOC_CAMERA_SCALE_CROP
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 3b8edf458..3f9c1b845 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -1845,6 +1845,8 @@ static const struct of_device_id rcar_vin_of_table[] = {
{ .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
{ .compatible = "renesas,vin-r8a7778", .data = (void *)RCAR_M1 },
+ { .compatible = "renesas,rcar-gen3-vin", .data = (void *)RCAR_GEN3 },
+ { .compatible = "renesas,rcar-gen2-vin", .data = (void *)RCAR_GEN2 },
{ },
};
MODULE_DEVICE_TABLE(of, rcar_vin_of_table);
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index d9d57645e..ca0082b5e 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -49,7 +49,7 @@
#define PID_TABLE_SIZE 1024
#define POLL_MSECS 50
-static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei);
+static int load_c8sectpfe_fw(struct c8sectpfei *fei);
#define TS_PKT_SIZE 188
#define HEADER_SIZE (4)
@@ -130,7 +130,7 @@ static void channel_swdemux_tsklet(unsigned long data)
writel(channel->back_buffer_busaddr, channel->irec +
DMA_PRDS_BUSRP_TP(0));
else
- writel(wp, channel->irec + DMA_PRDS_BUSWP_TP(0));
+ writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
}
static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
@@ -141,6 +141,7 @@ static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
struct channel_info *channel;
u32 tmp;
unsigned long *bitmap;
+ int ret;
switch (dvbdmxfeed->type) {
case DMX_TYPE_TS:
@@ -169,8 +170,9 @@ static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
}
if (!atomic_read(&fei->fw_loaded)) {
- dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
- return -EINVAL;
+ ret = load_c8sectpfe_fw(fei);
+ if (ret)
+ return ret;
}
mutex_lock(&fei->lock);
@@ -265,8 +267,9 @@ static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
unsigned long *bitmap;
if (!atomic_read(&fei->fw_loaded)) {
- dev_err(fei->dev, "%s: c8sectpfe fw not loaded\n", __func__);
- return -EINVAL;
+ ret = load_c8sectpfe_fw(fei);
+ if (ret)
+ return ret;
}
mutex_lock(&fei->lock);
@@ -585,7 +588,7 @@ static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
writel(tsin->pid_buffer_busaddr,
fei->io + PIDF_BASE(tsin->tsin_id));
- dev_info(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
+ dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
&tsin->pid_buffer_busaddr);
@@ -880,13 +883,6 @@ static int c8sectpfe_probe(struct platform_device *pdev)
goto err_clk_disable;
}
- /* ensure all other init has been done before requesting firmware */
- ret = load_c8sectpfe_fw_step1(fei);
- if (ret) {
- dev_err(dev, "Couldn't load slim core firmware\n");
- goto err_clk_disable;
- }
-
c8sectpfe_debugfs_init(fei);
return 0;
@@ -1091,15 +1087,14 @@ static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
phdr->p_memsz - phdr->p_filesz);
}
-static int load_slim_core_fw(const struct firmware *fw, void *context)
+static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
{
- struct c8sectpfei *fei = context;
Elf32_Ehdr *ehdr;
Elf32_Phdr *phdr;
u8 __iomem *dst;
int err = 0, i;
- if (!fw || !context)
+ if (!fw || !fei)
return -EINVAL;
ehdr = (Elf32_Ehdr *)fw->data;
@@ -1151,29 +1146,35 @@ static int load_slim_core_fw(const struct firmware *fw, void *context)
return err;
}
-static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context)
+static int load_c8sectpfe_fw(struct c8sectpfei *fei)
{
- struct c8sectpfei *fei = context;
+ const struct firmware *fw;
int err;
+ dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
+
+ err = reject_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
+ if (err)
+ return err;
+
err = c8sectpfe_elf_sanity_check(fei, fw);
if (err) {
dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
, err);
- goto err;
+ return err;
}
- err = load_slim_core_fw(fw, context);
+ err = load_slim_core_fw(fw, fei);
if (err) {
dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
- goto err;
+ return err;
}
/* now the firmware is loaded configure the input blocks */
err = configure_channels(fei);
if (err) {
dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
- goto err;
+ return err;
}
/*
@@ -1186,28 +1187,6 @@ static void load_c8sectpfe_fw_cb(const struct firmware *fw, void *context)
writel(0x1, fei->io + DMA_CPU_RUN);
atomic_set(&fei->fw_loaded, 1);
-err:
- complete_all(&fei->fw_ack);
-}
-
-static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei)
-{
- int err;
-
- dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
-
- init_completion(&fei->fw_ack);
- atomic_set(&fei->fw_loaded, 0);
-
- err = reject_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- FIRMWARE_MEMDMA, fei->dev, GFP_KERNEL, fei,
- load_c8sectpfe_fw_cb);
-
- if (err) {
- dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err);
- complete_all(&fei->fw_ack);
- return err;
- }
return 0;
}
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index 0885e93ad..f535f5769 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -7,6 +7,7 @@ config VIDEO_VIVID
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select VIDEOBUF2_VMALLOC
+ select VIDEO_V4L2_TPG
default n
---help---
Enables a virtual video driver. This driver emulates a webcam,
diff --git a/drivers/media/platform/vivid/Makefile b/drivers/media/platform/vivid/Makefile
index 756fc1285..633c8a1b2 100644
--- a/drivers/media/platform/vivid/Makefile
+++ b/drivers/media/platform/vivid/Makefile
@@ -2,5 +2,5 @@ vivid-objs := vivid-core.o vivid-ctrls.o vivid-vid-common.o vivid-vbi-gen.o \
vivid-vid-cap.o vivid-vid-out.o vivid-kthread-cap.o vivid-kthread-out.o \
vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
vivid-rds-gen.o vivid-sdr-cap.o vivid-vbi-cap.o vivid-vbi-out.o \
- vivid-osd.o vivid-tpg.o vivid-tpg-colors.o
+ vivid-osd.o
obj-$(CONFIG_VIDEO_VIVID) += vivid.o
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index ec125becb..c14da84af 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -200,27 +200,12 @@ static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct vivid_dev *dev = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
strcpy(cap->driver, "vivid");
strcpy(cap->card, "vivid");
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", dev->v4l2_dev.name);
- if (vdev->vfl_type == VFL_TYPE_GRABBER && vdev->vfl_dir == VFL_DIR_RX)
- cap->device_caps = dev->vid_cap_caps;
- if (vdev->vfl_type == VFL_TYPE_GRABBER && vdev->vfl_dir == VFL_DIR_TX)
- cap->device_caps = dev->vid_out_caps;
- else if (vdev->vfl_type == VFL_TYPE_VBI && vdev->vfl_dir == VFL_DIR_RX)
- cap->device_caps = dev->vbi_cap_caps;
- else if (vdev->vfl_type == VFL_TYPE_VBI && vdev->vfl_dir == VFL_DIR_TX)
- cap->device_caps = dev->vbi_out_caps;
- else if (vdev->vfl_type == VFL_TYPE_SDR)
- cap->device_caps = dev->sdr_cap_caps;
- else if (vdev->vfl_type == VFL_TYPE_RADIO && vdev->vfl_dir == VFL_DIR_RX)
- cap->device_caps = dev->radio_rx_caps;
- else if (vdev->vfl_type == VFL_TYPE_RADIO && vdev->vfl_dir == VFL_DIR_TX)
- cap->device_caps = dev->radio_tx_caps;
cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
dev->vbi_cap_caps | dev->vbi_out_caps |
dev->radio_rx_caps | dev->radio_tx_caps |
@@ -1135,6 +1120,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
strlcpy(vfd->name, "vivid-vid-cap", sizeof(vfd->name));
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->vid_cap_caps;
vfd->release = video_device_release_empty;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->queue = &dev->vb_vid_cap_q;
@@ -1160,6 +1146,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->vid_out_caps;
vfd->release = video_device_release_empty;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->queue = &dev->vb_vid_out_q;
@@ -1184,6 +1171,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
strlcpy(vfd->name, "vivid-vbi-cap", sizeof(vfd->name));
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->vbi_cap_caps;
vfd->release = video_device_release_empty;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->queue = &dev->vb_vbi_cap_q;
@@ -1207,6 +1195,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->vbi_out_caps;
vfd->release = video_device_release_empty;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->queue = &dev->vb_vbi_out_q;
@@ -1229,6 +1218,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
strlcpy(vfd->name, "vivid-sdr-cap", sizeof(vfd->name));
vfd->fops = &vivid_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->sdr_cap_caps;
vfd->release = video_device_release_empty;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->queue = &dev->vb_sdr_cap_q;
@@ -1247,6 +1237,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
strlcpy(vfd->name, "vivid-rad-rx", sizeof(vfd->name));
vfd->fops = &vivid_radio_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->radio_rx_caps;
vfd->release = video_device_release_empty;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->lock = &dev->mutex;
@@ -1265,6 +1256,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
vfd->vfl_dir = VFL_DIR_TX;
vfd->fops = &vivid_radio_fops;
vfd->ioctl_ops = &vivid_ioctl_ops;
+ vfd->device_caps = dev->radio_tx_caps;
vfd->release = video_device_release_empty;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->lock = &dev->mutex;
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 751c1ba39..776783bec 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -25,7 +25,7 @@
#include <media/v4l2-device.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ctrls.h>
-#include "vivid-tpg.h"
+#include <media/v4l2-tpg.h>
#include "vivid-rds-gen.h"
#include "vivid-vbi-gen.h"
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 903428194..3b8c10108 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -36,6 +36,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include <media/v4l2-rect.h>
#include "vivid-core.h"
#include "vivid-vid-common.h"
@@ -184,15 +185,15 @@ static void vivid_precalc_copy_rects(struct vivid_dev *dev)
dev->compose_out.width, dev->compose_out.height
};
- dev->loop_vid_copy = rect_intersect(&dev->crop_cap, &dev->compose_out);
+ v4l2_rect_intersect(&dev->loop_vid_copy, &dev->crop_cap, &dev->compose_out);
dev->loop_vid_out = dev->loop_vid_copy;
- rect_scale(&dev->loop_vid_out, &dev->compose_out, &dev->crop_out);
+ v4l2_rect_scale(&dev->loop_vid_out, &dev->compose_out, &dev->crop_out);
dev->loop_vid_out.left += dev->crop_out.left;
dev->loop_vid_out.top += dev->crop_out.top;
dev->loop_vid_cap = dev->loop_vid_copy;
- rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
+ v4l2_rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap);
dprintk(dev, 1,
"loop_vid_copy: %dx%d@%dx%d loop_vid_out: %dx%d@%dx%d loop_vid_cap: %dx%d@%dx%d\n",
@@ -203,13 +204,13 @@ static void vivid_precalc_copy_rects(struct vivid_dev *dev)
dev->loop_vid_cap.width, dev->loop_vid_cap.height,
dev->loop_vid_cap.left, dev->loop_vid_cap.top);
- r_overlay = rect_intersect(&r_fb, &r_overlay);
+ v4l2_rect_intersect(&r_overlay, &r_fb, &r_overlay);
/* shift r_overlay to the same origin as compose_out */
r_overlay.left += dev->compose_out.left - dev->overlay_out_left;
r_overlay.top += dev->compose_out.top - dev->overlay_out_top;
- dev->loop_vid_overlay = rect_intersect(&r_overlay, &dev->loop_vid_copy);
+ v4l2_rect_intersect(&dev->loop_vid_overlay, &r_overlay, &dev->loop_vid_copy);
dev->loop_fb_copy = dev->loop_vid_overlay;
/* shift dev->loop_fb_copy back again to the fb origin */
@@ -217,7 +218,7 @@ static void vivid_precalc_copy_rects(struct vivid_dev *dev)
dev->loop_fb_copy.top -= dev->compose_out.top - dev->overlay_out_top;
dev->loop_vid_overlay_cap = dev->loop_vid_overlay;
- rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
+ v4l2_rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap);
dprintk(dev, 1,
"loop_fb_copy: %dx%d@%dx%d loop_vid_overlay: %dx%d@%dx%d loop_vid_overlay_cap: %dx%d@%dx%d\n",
diff --git a/drivers/media/platform/vivid/vivid-rds-gen.c b/drivers/media/platform/vivid/vivid-rds-gen.c
index c382343fd..53c7777dc 100644
--- a/drivers/media/platform/vivid/vivid-rds-gen.c
+++ b/drivers/media/platform/vivid/vivid-rds-gen.c
@@ -55,6 +55,7 @@ void vivid_rds_generate(struct vivid_rds_gen *rds)
{
struct v4l2_rds_data *data = rds->data;
unsigned grp;
+ unsigned idx;
struct tm tm;
unsigned date;
unsigned time;
@@ -73,24 +74,26 @@ void vivid_rds_generate(struct vivid_rds_gen *rds)
case 0 ... 3:
case 22 ... 25:
case 44 ... 47: /* Group 0B */
+ idx = (grp % 22) % 4;
data[1].lsb |= (rds->ta << 4) | (rds->ms << 3);
- data[1].lsb |= vivid_get_di(rds, grp % 22);
+ data[1].lsb |= vivid_get_di(rds, idx);
data[1].msb |= 1 << 3;
data[2].lsb = rds->picode & 0xff;
data[2].msb = rds->picode >> 8;
data[2].block = V4L2_RDS_BLOCK_C_ALT | (V4L2_RDS_BLOCK_C_ALT << 3);
- data[3].lsb = rds->psname[2 * (grp % 22) + 1];
- data[3].msb = rds->psname[2 * (grp % 22)];
+ data[3].lsb = rds->psname[2 * idx + 1];
+ data[3].msb = rds->psname[2 * idx];
break;
case 4 ... 19:
case 26 ... 41: /* Group 2A */
- data[1].lsb |= (grp - 4) % 22;
+ idx = ((grp - 4) % 22) % 16;
+ data[1].lsb |= idx;
data[1].msb |= 4 << 3;
- data[2].msb = rds->radiotext[4 * ((grp - 4) % 22)];
- data[2].lsb = rds->radiotext[4 * ((grp - 4) % 22) + 1];
+ data[2].msb = rds->radiotext[4 * idx];
+ data[2].lsb = rds->radiotext[4 * idx + 1];
data[2].block = V4L2_RDS_BLOCK_C | (V4L2_RDS_BLOCK_C << 3);
- data[3].msb = rds->radiotext[4 * ((grp - 4) % 22) + 2];
- data[3].lsb = rds->radiotext[4 * ((grp - 4) % 22) + 3];
+ data[3].msb = rds->radiotext[4 * idx + 2];
+ data[3].lsb = rds->radiotext[4 * idx + 3];
break;
case 56:
/*
diff --git a/drivers/media/platform/vivid/vivid-tpg-colors.h b/drivers/media/platform/vivid/vivid-tpg-colors.h
deleted file mode 100644
index 4e5a76a1e..000000000
--- a/drivers/media/platform/vivid/vivid-tpg-colors.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * vivid-color.h - Color definitions for the test pattern generator
- *
- * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _VIVID_COLORS_H_
-#define _VIVID_COLORS_H_
-
-struct color {
- unsigned char r, g, b;
-};
-
-struct color16 {
- int r, g, b;
-};
-
-enum tpg_color {
- TPG_COLOR_CSC_WHITE,
- TPG_COLOR_CSC_YELLOW,
- TPG_COLOR_CSC_CYAN,
- TPG_COLOR_CSC_GREEN,
- TPG_COLOR_CSC_MAGENTA,
- TPG_COLOR_CSC_RED,
- TPG_COLOR_CSC_BLUE,
- TPG_COLOR_CSC_BLACK,
- TPG_COLOR_75_YELLOW,
- TPG_COLOR_75_CYAN,
- TPG_COLOR_75_GREEN,
- TPG_COLOR_75_MAGENTA,
- TPG_COLOR_75_RED,
- TPG_COLOR_75_BLUE,
- TPG_COLOR_100_WHITE,
- TPG_COLOR_100_YELLOW,
- TPG_COLOR_100_CYAN,
- TPG_COLOR_100_GREEN,
- TPG_COLOR_100_MAGENTA,
- TPG_COLOR_100_RED,
- TPG_COLOR_100_BLUE,
- TPG_COLOR_100_BLACK,
- TPG_COLOR_TEXTFG,
- TPG_COLOR_TEXTBG,
- TPG_COLOR_RANDOM,
- TPG_COLOR_RAMP,
- TPG_COLOR_MAX = TPG_COLOR_RAMP + 256
-};
-
-extern const struct color tpg_colors[TPG_COLOR_MAX];
-extern const unsigned short tpg_rec709_to_linear[255 * 16 + 1];
-extern const unsigned short tpg_linear_to_rec709[255 * 16 + 1];
-extern const struct color16 tpg_csc_colors[V4L2_COLORSPACE_DCI_P3 + 1]
- [V4L2_XFER_FUNC_SMPTE2084 + 1]
- [TPG_COLOR_CSC_BLACK + 1];
-
-#endif
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
deleted file mode 100644
index 93fbaee69..000000000
--- a/drivers/media/platform/vivid/vivid-tpg.h
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * vivid-tpg.h - Test Pattern Generator
- *
- * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _VIVID_TPG_H_
-#define _VIVID_TPG_H_
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/videodev2.h>
-
-#include "vivid-tpg-colors.h"
-
-enum tpg_pattern {
- TPG_PAT_75_COLORBAR,
- TPG_PAT_100_COLORBAR,
- TPG_PAT_CSC_COLORBAR,
- TPG_PAT_100_HCOLORBAR,
- TPG_PAT_100_COLORSQUARES,
- TPG_PAT_BLACK,
- TPG_PAT_WHITE,
- TPG_PAT_RED,
- TPG_PAT_GREEN,
- TPG_PAT_BLUE,
- TPG_PAT_CHECKERS_16X16,
- TPG_PAT_CHECKERS_2X2,
- TPG_PAT_CHECKERS_1X1,
- TPG_PAT_COLOR_CHECKERS_2X2,
- TPG_PAT_COLOR_CHECKERS_1X1,
- TPG_PAT_ALTERNATING_HLINES,
- TPG_PAT_ALTERNATING_VLINES,
- TPG_PAT_CROSS_1_PIXEL,
- TPG_PAT_CROSS_2_PIXELS,
- TPG_PAT_CROSS_10_PIXELS,
- TPG_PAT_GRAY_RAMP,
-
- /* Must be the last pattern */
- TPG_PAT_NOISE,
-};
-
-extern const char * const tpg_pattern_strings[];
-
-enum tpg_quality {
- TPG_QUAL_COLOR,
- TPG_QUAL_GRAY,
- TPG_QUAL_NOISE
-};
-
-enum tpg_video_aspect {
- TPG_VIDEO_ASPECT_IMAGE,
- TPG_VIDEO_ASPECT_4X3,
- TPG_VIDEO_ASPECT_14X9_CENTRE,
- TPG_VIDEO_ASPECT_16X9_CENTRE,
- TPG_VIDEO_ASPECT_16X9_ANAMORPHIC,
-};
-
-enum tpg_pixel_aspect {
- TPG_PIXEL_ASPECT_SQUARE,
- TPG_PIXEL_ASPECT_NTSC,
- TPG_PIXEL_ASPECT_PAL,
-};
-
-enum tpg_move_mode {
- TPG_MOVE_NEG_FAST,
- TPG_MOVE_NEG,
- TPG_MOVE_NEG_SLOW,
- TPG_MOVE_NONE,
- TPG_MOVE_POS_SLOW,
- TPG_MOVE_POS,
- TPG_MOVE_POS_FAST,
-};
-
-extern const char * const tpg_aspect_strings[];
-
-#define TPG_MAX_PLANES 3
-#define TPG_MAX_PAT_LINES 8
-
-struct tpg_data {
- /* Source frame size */
- unsigned src_width, src_height;
- /* Buffer height */
- unsigned buf_height;
- /* Scaled output frame size */
- unsigned scaled_width;
- u32 field;
- bool field_alternate;
- /* crop coordinates are frame-based */
- struct v4l2_rect crop;
- /* compose coordinates are format-based */
- struct v4l2_rect compose;
- /* border and square coordinates are frame-based */
- struct v4l2_rect border;
- struct v4l2_rect square;
-
- /* Color-related fields */
- enum tpg_quality qual;
- unsigned qual_offset;
- u8 alpha_component;
- bool alpha_red_only;
- u8 brightness;
- u8 contrast;
- u8 saturation;
- s16 hue;
- u32 fourcc;
- bool is_yuv;
- u32 colorspace;
- u32 xfer_func;
- u32 ycbcr_enc;
- /*
- * Stores the actual transfer function, i.e. will never be
- * V4L2_XFER_FUNC_DEFAULT.
- */
- u32 real_xfer_func;
- /*
- * Stores the actual Y'CbCr encoding, i.e. will never be
- * V4L2_YCBCR_ENC_DEFAULT.
- */
- u32 real_ycbcr_enc;
- u32 quantization;
- /*
- * Stores the actual quantization, i.e. will never be
- * V4L2_QUANTIZATION_DEFAULT.
- */
- u32 real_quantization;
- enum tpg_video_aspect vid_aspect;
- enum tpg_pixel_aspect pix_aspect;
- unsigned rgb_range;
- unsigned real_rgb_range;
- unsigned buffers;
- unsigned planes;
- bool interleaved;
- u8 vdownsampling[TPG_MAX_PLANES];
- u8 hdownsampling[TPG_MAX_PLANES];
- /*
- * horizontal positions must be ANDed with this value to enforce
- * correct boundaries for packed YUYV values.
- */
- unsigned hmask[TPG_MAX_PLANES];
- /* Used to store the colors in native format, either RGB or YUV */
- u8 colors[TPG_COLOR_MAX][3];
- u8 textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8];
- /* size in bytes for two pixels in each plane */
- unsigned twopixelsize[TPG_MAX_PLANES];
- unsigned bytesperline[TPG_MAX_PLANES];
-
- /* Configuration */
- enum tpg_pattern pattern;
- bool hflip;
- bool vflip;
- unsigned perc_fill;
- bool perc_fill_blank;
- bool show_border;
- bool show_square;
- bool insert_sav;
- bool insert_eav;
-
- /* Test pattern movement */
- enum tpg_move_mode mv_hor_mode;
- int mv_hor_count;
- int mv_hor_step;
- enum tpg_move_mode mv_vert_mode;
- int mv_vert_count;
- int mv_vert_step;
-
- bool recalc_colors;
- bool recalc_lines;
- bool recalc_square_border;
-
- /* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */
- unsigned max_line_width;
- u8 *lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
- u8 *downsampled_lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
- u8 *random_line[TPG_MAX_PLANES];
- u8 *contrast_line[TPG_MAX_PLANES];
- u8 *black_line[TPG_MAX_PLANES];
-};
-
-void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h);
-int tpg_alloc(struct tpg_data *tpg, unsigned max_w);
-void tpg_free(struct tpg_data *tpg);
-void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
- u32 field);
-void tpg_log_status(struct tpg_data *tpg);
-
-void tpg_set_font(const u8 *f);
-void tpg_gen_text(const struct tpg_data *tpg,
- u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
-void tpg_calc_text_basep(struct tpg_data *tpg,
- u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
-unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
-void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
- unsigned p, u8 *vbuf);
-void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
- unsigned p, u8 *vbuf);
-bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
-void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
- const struct v4l2_rect *compose);
-
-static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern)
-{
- if (tpg->pattern == pattern)
- return;
- tpg->pattern = pattern;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_quality(struct tpg_data *tpg,
- enum tpg_quality qual, unsigned qual_offset)
-{
- if (tpg->qual == qual && tpg->qual_offset == qual_offset)
- return;
- tpg->qual = qual;
- tpg->qual_offset = qual_offset;
- tpg->recalc_colors = true;
-}
-
-static inline enum tpg_quality tpg_g_quality(const struct tpg_data *tpg)
-{
- return tpg->qual;
-}
-
-static inline void tpg_s_alpha_component(struct tpg_data *tpg,
- u8 alpha_component)
-{
- if (tpg->alpha_component == alpha_component)
- return;
- tpg->alpha_component = alpha_component;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_alpha_mode(struct tpg_data *tpg,
- bool red_only)
-{
- if (tpg->alpha_red_only == red_only)
- return;
- tpg->alpha_red_only = red_only;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_brightness(struct tpg_data *tpg,
- u8 brightness)
-{
- if (tpg->brightness == brightness)
- return;
- tpg->brightness = brightness;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_contrast(struct tpg_data *tpg,
- u8 contrast)
-{
- if (tpg->contrast == contrast)
- return;
- tpg->contrast = contrast;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_saturation(struct tpg_data *tpg,
- u8 saturation)
-{
- if (tpg->saturation == saturation)
- return;
- tpg->saturation = saturation;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_hue(struct tpg_data *tpg,
- s16 hue)
-{
- if (tpg->hue == hue)
- return;
- tpg->hue = hue;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_rgb_range(struct tpg_data *tpg,
- unsigned rgb_range)
-{
- if (tpg->rgb_range == rgb_range)
- return;
- tpg->rgb_range = rgb_range;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_real_rgb_range(struct tpg_data *tpg,
- unsigned rgb_range)
-{
- if (tpg->real_rgb_range == rgb_range)
- return;
- tpg->real_rgb_range = rgb_range;
- tpg->recalc_colors = true;
-}
-
-static inline void tpg_s_colorspace(struct tpg_data *tpg, u32 colorspace)
-{
- if (tpg->colorspace == colorspace)
- return;
- tpg->colorspace = colorspace;
- tpg->recalc_colors = true;
-}
-
-static inline u32 tpg_g_colorspace(const struct tpg_data *tpg)
-{
- return tpg->colorspace;
-}
-
-static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc)
-{
- if (tpg->ycbcr_enc == ycbcr_enc)
- return;
- tpg->ycbcr_enc = ycbcr_enc;
- tpg->recalc_colors = true;
-}
-
-static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg)
-{
- return tpg->ycbcr_enc;
-}
-
-static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func)
-{
- if (tpg->xfer_func == xfer_func)
- return;
- tpg->xfer_func = xfer_func;
- tpg->recalc_colors = true;
-}
-
-static inline u32 tpg_g_xfer_func(const struct tpg_data *tpg)
-{
- return tpg->xfer_func;
-}
-
-static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization)
-{
- if (tpg->quantization == quantization)
- return;
- tpg->quantization = quantization;
- tpg->recalc_colors = true;
-}
-
-static inline u32 tpg_g_quantization(const struct tpg_data *tpg)
-{
- return tpg->quantization;
-}
-
-static inline unsigned tpg_g_buffers(const struct tpg_data *tpg)
-{
- return tpg->buffers;
-}
-
-static inline unsigned tpg_g_planes(const struct tpg_data *tpg)
-{
- return tpg->interleaved ? 1 : tpg->planes;
-}
-
-static inline bool tpg_g_interleaved(const struct tpg_data *tpg)
-{
- return tpg->interleaved;
-}
-
-static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane)
-{
- return tpg->twopixelsize[plane];
-}
-
-static inline unsigned tpg_hdiv(const struct tpg_data *tpg,
- unsigned plane, unsigned x)
-{
- return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) *
- tpg->twopixelsize[plane] / 2;
-}
-
-static inline unsigned tpg_hscale(const struct tpg_data *tpg, unsigned x)
-{
- return (x * tpg->scaled_width) / tpg->src_width;
-}
-
-static inline unsigned tpg_hscale_div(const struct tpg_data *tpg,
- unsigned plane, unsigned x)
-{
- return tpg_hdiv(tpg, plane, tpg_hscale(tpg, x));
-}
-
-static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane)
-{
- return tpg->bytesperline[plane];
-}
-
-static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl)
-{
- unsigned p;
-
- if (tpg->buffers > 1) {
- tpg->bytesperline[plane] = bpl;
- return;
- }
-
- for (p = 0; p < tpg_g_planes(tpg); p++) {
- unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
-
- tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
- }
- if (tpg_g_interleaved(tpg))
- tpg->bytesperline[1] = tpg->bytesperline[0];
-}
-
-
-static inline unsigned tpg_g_line_width(const struct tpg_data *tpg, unsigned plane)
-{
- unsigned w = 0;
- unsigned p;
-
- if (tpg->buffers > 1)
- return tpg_g_bytesperline(tpg, plane);
- for (p = 0; p < tpg_g_planes(tpg); p++) {
- unsigned plane_w = tpg_g_bytesperline(tpg, p);
-
- w += plane_w / tpg->vdownsampling[p];
- }
- return w;
-}
-
-static inline unsigned tpg_calc_line_width(const struct tpg_data *tpg,
- unsigned plane, unsigned bpl)
-{
- unsigned w = 0;
- unsigned p;
-
- if (tpg->buffers > 1)
- return bpl;
- for (p = 0; p < tpg_g_planes(tpg); p++) {
- unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0];
-
- plane_w /= tpg->hdownsampling[p];
- w += plane_w / tpg->vdownsampling[p];
- }
- return w;
-}
-
-static inline unsigned tpg_calc_plane_size(const struct tpg_data *tpg, unsigned plane)
-{
- if (plane >= tpg_g_planes(tpg))
- return 0;
-
- return tpg_g_bytesperline(tpg, plane) * tpg->buf_height /
- tpg->vdownsampling[plane];
-}
-
-static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h)
-{
- tpg->buf_height = h;
-}
-
-static inline void tpg_s_field(struct tpg_data *tpg, unsigned field, bool alternate)
-{
- tpg->field = field;
- tpg->field_alternate = alternate;
-}
-
-static inline void tpg_s_perc_fill(struct tpg_data *tpg,
- unsigned perc_fill)
-{
- tpg->perc_fill = perc_fill;
-}
-
-static inline unsigned tpg_g_perc_fill(const struct tpg_data *tpg)
-{
- return tpg->perc_fill;
-}
-
-static inline void tpg_s_perc_fill_blank(struct tpg_data *tpg,
- bool perc_fill_blank)
-{
- tpg->perc_fill_blank = perc_fill_blank;
-}
-
-static inline void tpg_s_video_aspect(struct tpg_data *tpg,
- enum tpg_video_aspect vid_aspect)
-{
- if (tpg->vid_aspect == vid_aspect)
- return;
- tpg->vid_aspect = vid_aspect;
- tpg->recalc_square_border = true;
-}
-
-static inline enum tpg_video_aspect tpg_g_video_aspect(const struct tpg_data *tpg)
-{
- return tpg->vid_aspect;
-}
-
-static inline void tpg_s_pixel_aspect(struct tpg_data *tpg,
- enum tpg_pixel_aspect pix_aspect)
-{
- if (tpg->pix_aspect == pix_aspect)
- return;
- tpg->pix_aspect = pix_aspect;
- tpg->recalc_square_border = true;
-}
-
-static inline void tpg_s_show_border(struct tpg_data *tpg,
- bool show_border)
-{
- tpg->show_border = show_border;
-}
-
-static inline void tpg_s_show_square(struct tpg_data *tpg,
- bool show_square)
-{
- tpg->show_square = show_square;
-}
-
-static inline void tpg_s_insert_sav(struct tpg_data *tpg, bool insert_sav)
-{
- tpg->insert_sav = insert_sav;
-}
-
-static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav)
-{
- tpg->insert_eav = insert_eav;
-}
-
-void tpg_update_mv_step(struct tpg_data *tpg);
-
-static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg,
- enum tpg_move_mode mv_hor_mode)
-{
- tpg->mv_hor_mode = mv_hor_mode;
- tpg_update_mv_step(tpg);
-}
-
-static inline void tpg_s_mv_vert_mode(struct tpg_data *tpg,
- enum tpg_move_mode mv_vert_mode)
-{
- tpg->mv_vert_mode = mv_vert_mode;
- tpg_update_mv_step(tpg);
-}
-
-static inline void tpg_init_mv_count(struct tpg_data *tpg)
-{
- tpg->mv_hor_count = tpg->mv_vert_count = 0;
-}
-
-static inline void tpg_update_mv_count(struct tpg_data *tpg, bool frame_is_field)
-{
- tpg->mv_hor_count += tpg->mv_hor_step * (frame_is_field ? 1 : 2);
- tpg->mv_vert_count += tpg->mv_vert_step * (frame_is_field ? 1 : 2);
-}
-
-static inline void tpg_s_hflip(struct tpg_data *tpg, bool hflip)
-{
- if (tpg->hflip == hflip)
- return;
- tpg->hflip = hflip;
- tpg_update_mv_step(tpg);
- tpg->recalc_lines = true;
-}
-
-static inline bool tpg_g_hflip(const struct tpg_data *tpg)
-{
- return tpg->hflip;
-}
-
-static inline void tpg_s_vflip(struct tpg_data *tpg, bool vflip)
-{
- tpg->vflip = vflip;
-}
-
-static inline bool tpg_g_vflip(const struct tpg_data *tpg)
-{
- return tpg->vflip;
-}
-
-static inline bool tpg_pattern_is_static(const struct tpg_data *tpg)
-{
- return tpg->pattern != TPG_PAT_NOISE &&
- tpg->mv_hor_mode == TPG_MOVE_NONE &&
- tpg->mv_vert_mode == TPG_MOVE_NONE;
-}
-
-#endif
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index b84f081c1..4f730f355 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -26,6 +26,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-rect.h>
#include "vivid-core.h"
#include "vivid-vid-common.h"
@@ -590,16 +591,16 @@ int vivid_try_fmt_vid_cap(struct file *file, void *priv,
} else {
struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
- rect_set_min_size(&r, &vivid_min_rect);
- rect_set_max_size(&r, &vivid_max_rect);
+ v4l2_rect_set_min_size(&r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&r, &vivid_max_rect);
if (dev->has_scaler_cap && !dev->has_compose_cap) {
struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
- rect_set_max_size(&r, &max_r);
+ v4l2_rect_set_max_size(&r, &max_r);
} else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
- rect_set_max_size(&r, &dev->src_rect);
+ v4l2_rect_set_max_size(&r, &dev->src_rect);
} else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
- rect_set_min_size(&r, &dev->src_rect);
+ v4l2_rect_set_min_size(&r, &dev->src_rect);
}
mp->width = r.width;
mp->height = r.height / factor;
@@ -668,7 +669,7 @@ int vivid_s_fmt_vid_cap(struct file *file, void *priv,
if (dev->has_scaler_cap) {
if (dev->has_compose_cap)
- rect_map_inside(compose, &r);
+ v4l2_rect_map_inside(compose, &r);
else
*compose = r;
if (dev->has_crop_cap && !dev->has_compose_cap) {
@@ -683,9 +684,9 @@ int vivid_s_fmt_vid_cap(struct file *file, void *priv,
factor * r.height * MAX_ZOOM
};
- rect_set_min_size(crop, &min_r);
- rect_set_max_size(crop, &max_r);
- rect_map_inside(crop, &dev->crop_bounds_cap);
+ v4l2_rect_set_min_size(crop, &min_r);
+ v4l2_rect_set_max_size(crop, &max_r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
} else if (dev->has_crop_cap) {
struct v4l2_rect min_r = {
0, 0,
@@ -698,27 +699,27 @@ int vivid_s_fmt_vid_cap(struct file *file, void *priv,
factor * compose->height * MAX_ZOOM
};
- rect_set_min_size(crop, &min_r);
- rect_set_max_size(crop, &max_r);
- rect_map_inside(crop, &dev->crop_bounds_cap);
+ v4l2_rect_set_min_size(crop, &min_r);
+ v4l2_rect_set_max_size(crop, &max_r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
}
} else if (dev->has_crop_cap && !dev->has_compose_cap) {
r.height *= factor;
- rect_set_size_to(crop, &r);
- rect_map_inside(crop, &dev->crop_bounds_cap);
+ v4l2_rect_set_size_to(crop, &r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
r = *crop;
r.height /= factor;
- rect_set_size_to(compose, &r);
+ v4l2_rect_set_size_to(compose, &r);
} else if (!dev->has_crop_cap) {
- rect_map_inside(compose, &r);
+ v4l2_rect_map_inside(compose, &r);
} else {
r.height *= factor;
- rect_set_max_size(crop, &r);
- rect_map_inside(crop, &dev->crop_bounds_cap);
+ v4l2_rect_set_max_size(crop, &r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
compose->top *= factor;
compose->height *= factor;
- rect_set_size_to(compose, crop);
- rect_map_inside(compose, &r);
+ v4l2_rect_set_size_to(compose, crop);
+ v4l2_rect_map_inside(compose, &r);
compose->top /= factor;
compose->height /= factor;
}
@@ -735,9 +736,9 @@ int vivid_s_fmt_vid_cap(struct file *file, void *priv,
} else {
struct v4l2_rect r = { 0, 0, mp->width, mp->height };
- rect_set_size_to(compose, &r);
+ v4l2_rect_set_size_to(compose, &r);
r.height *= factor;
- rect_set_size_to(crop, &r);
+ v4l2_rect_set_size_to(crop, &r);
}
dev->fmt_cap_rect.width = mp->width;
@@ -886,9 +887,9 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
ret = vivid_vid_adjust_sel(s->flags, &s->r);
if (ret)
return ret;
- rect_set_min_size(&s->r, &vivid_min_rect);
- rect_set_max_size(&s->r, &dev->src_rect);
- rect_map_inside(&s->r, &dev->crop_bounds_cap);
+ v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&s->r, &dev->src_rect);
+ v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap);
s->r.top /= factor;
s->r.height /= factor;
if (dev->has_scaler_cap) {
@@ -904,36 +905,36 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
s->r.height / MAX_ZOOM
};
- rect_set_min_size(&fmt, &min_rect);
+ v4l2_rect_set_min_size(&fmt, &min_rect);
if (!dev->has_compose_cap)
- rect_set_max_size(&fmt, &max_rect);
- if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
+ v4l2_rect_set_max_size(&fmt, &max_rect);
+ if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
vb2_is_busy(&dev->vb_vid_cap_q))
return -EBUSY;
if (dev->has_compose_cap) {
- rect_set_min_size(compose, &min_rect);
- rect_set_max_size(compose, &max_rect);
+ v4l2_rect_set_min_size(compose, &min_rect);
+ v4l2_rect_set_max_size(compose, &max_rect);
}
dev->fmt_cap_rect = fmt;
tpg_s_buf_height(&dev->tpg, fmt.height);
} else if (dev->has_compose_cap) {
struct v4l2_rect fmt = dev->fmt_cap_rect;
- rect_set_min_size(&fmt, &s->r);
- if (!rect_same_size(&dev->fmt_cap_rect, &fmt) &&
+ v4l2_rect_set_min_size(&fmt, &s->r);
+ if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
vb2_is_busy(&dev->vb_vid_cap_q))
return -EBUSY;
dev->fmt_cap_rect = fmt;
tpg_s_buf_height(&dev->tpg, fmt.height);
- rect_set_size_to(compose, &s->r);
- rect_map_inside(compose, &dev->fmt_cap_rect);
+ v4l2_rect_set_size_to(compose, &s->r);
+ v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
} else {
- if (!rect_same_size(&s->r, &dev->fmt_cap_rect) &&
+ if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) &&
vb2_is_busy(&dev->vb_vid_cap_q))
return -EBUSY;
- rect_set_size_to(&dev->fmt_cap_rect, &s->r);
- rect_set_size_to(compose, &s->r);
- rect_map_inside(compose, &dev->fmt_cap_rect);
+ v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r);
+ v4l2_rect_set_size_to(compose, &s->r);
+ v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
}
s->r.top *= factor;
@@ -946,8 +947,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
ret = vivid_vid_adjust_sel(s->flags, &s->r);
if (ret)
return ret;
- rect_set_min_size(&s->r, &vivid_min_rect);
- rect_set_max_size(&s->r, &dev->fmt_cap_rect);
+ v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect);
if (dev->has_scaler_cap) {
struct v4l2_rect max_rect = {
0, 0,
@@ -955,7 +956,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
(dev->src_rect.height / factor) * MAX_ZOOM
};
- rect_set_max_size(&s->r, &max_rect);
+ v4l2_rect_set_max_size(&s->r, &max_rect);
if (dev->has_crop_cap) {
struct v4l2_rect min_rect = {
0, 0,
@@ -968,23 +969,23 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
(s->r.height * factor) * MAX_ZOOM
};
- rect_set_min_size(crop, &min_rect);
- rect_set_max_size(crop, &max_rect);
- rect_map_inside(crop, &dev->crop_bounds_cap);
+ v4l2_rect_set_min_size(crop, &min_rect);
+ v4l2_rect_set_max_size(crop, &max_rect);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
}
} else if (dev->has_crop_cap) {
s->r.top *= factor;
s->r.height *= factor;
- rect_set_max_size(&s->r, &dev->src_rect);
- rect_set_size_to(crop, &s->r);
- rect_map_inside(crop, &dev->crop_bounds_cap);
+ v4l2_rect_set_max_size(&s->r, &dev->src_rect);
+ v4l2_rect_set_size_to(crop, &s->r);
+ v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
s->r.top /= factor;
s->r.height /= factor;
} else {
- rect_set_size_to(&s->r, &dev->src_rect);
+ v4l2_rect_set_size_to(&s->r, &dev->src_rect);
s->r.height /= factor;
}
- rect_map_inside(&s->r, &dev->fmt_cap_rect);
+ v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
if (dev->bitmap_cap && (compose->width != s->r.width ||
compose->height != s->r.height)) {
kfree(dev->bitmap_cap);
@@ -1124,7 +1125,7 @@ int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
for (j = i + 1; j < win->clipcount; j++) {
struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
- if (rect_overlap(r1, r2))
+ if (v4l2_rect_overlap(r1, r2))
return -EINVAL;
}
}
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index b0d4e3a0a..39ea22847 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -653,103 +653,6 @@ int fmt_sp2mp_func(struct file *file, void *priv,
return ret;
}
-/* v4l2_rect helper function: copy the width/height values */
-void rect_set_size_to(struct v4l2_rect *r, const struct v4l2_rect *size)
-{
- r->width = size->width;
- r->height = size->height;
-}
-
-/* v4l2_rect helper function: width and height of r should be >= min_size */
-void rect_set_min_size(struct v4l2_rect *r, const struct v4l2_rect *min_size)
-{
- if (r->width < min_size->width)
- r->width = min_size->width;
- if (r->height < min_size->height)
- r->height = min_size->height;
-}
-
-/* v4l2_rect helper function: width and height of r should be <= max_size */
-void rect_set_max_size(struct v4l2_rect *r, const struct v4l2_rect *max_size)
-{
- if (r->width > max_size->width)
- r->width = max_size->width;
- if (r->height > max_size->height)
- r->height = max_size->height;
-}
-
-/* v4l2_rect helper function: r should be inside boundary */
-void rect_map_inside(struct v4l2_rect *r, const struct v4l2_rect *boundary)
-{
- rect_set_max_size(r, boundary);
- if (r->left < boundary->left)
- r->left = boundary->left;
- if (r->top < boundary->top)
- r->top = boundary->top;
- if (r->left + r->width > boundary->width)
- r->left = boundary->width - r->width;
- if (r->top + r->height > boundary->height)
- r->top = boundary->height - r->height;
-}
-
-/* v4l2_rect helper function: return true if r1 has the same size as r2 */
-bool rect_same_size(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
-{
- return r1->width == r2->width && r1->height == r2->height;
-}
-
-/* v4l2_rect helper function: calculate the intersection of two rects */
-struct v4l2_rect rect_intersect(const struct v4l2_rect *a, const struct v4l2_rect *b)
-{
- struct v4l2_rect r;
- int right, bottom;
-
- r.top = max(a->top, b->top);
- r.left = max(a->left, b->left);
- bottom = min(a->top + a->height, b->top + b->height);
- right = min(a->left + a->width, b->left + b->width);
- r.height = max(0, bottom - r.top);
- r.width = max(0, right - r.left);
- return r;
-}
-
-/*
- * v4l2_rect helper function: scale rect r by to->width / from->width and
- * to->height / from->height.
- */
-void rect_scale(struct v4l2_rect *r, const struct v4l2_rect *from,
- const struct v4l2_rect *to)
-{
- if (from->width == 0 || from->height == 0) {
- r->left = r->top = r->width = r->height = 0;
- return;
- }
- r->left = (((r->left - from->left) * to->width) / from->width) & ~1;
- r->width = ((r->width * to->width) / from->width) & ~1;
- r->top = ((r->top - from->top) * to->height) / from->height;
- r->height = (r->height * to->height) / from->height;
-}
-
-bool rect_overlap(const struct v4l2_rect *r1, const struct v4l2_rect *r2)
-{
- /*
- * IF the left side of r1 is to the right of the right side of r2 OR
- * the left side of r2 is to the right of the right side of r1 THEN
- * they do not overlap.
- */
- if (r1->left >= r2->left + r2->width ||
- r2->left >= r1->left + r1->width)
- return false;
- /*
- * IF the top side of r1 is below the bottom of r2 OR
- * the top side of r2 is below the bottom of r1 THEN
- * they do not overlap.
- */
- if (r1->top >= r2->top + r2->height ||
- r2->top >= r1->top + r1->height)
- return false;
- return true;
-}
int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r)
{
unsigned w = r->width;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.h b/drivers/media/platform/vivid/vivid-vid-common.h
index 3ec4fa85c..4b6175eab 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.h
+++ b/drivers/media/platform/vivid/vivid-vid-common.h
@@ -37,15 +37,6 @@ const struct vivid_fmt *vivid_get_format(struct vivid_dev *dev, u32 pixelformat)
bool vivid_vid_can_loop(struct vivid_dev *dev);
void vivid_send_source_change(struct vivid_dev *dev, unsigned type);
-bool rect_overlap(const struct v4l2_rect *r1, const struct v4l2_rect *r2);
-void rect_set_size_to(struct v4l2_rect *r, const struct v4l2_rect *size);
-void rect_set_min_size(struct v4l2_rect *r, const struct v4l2_rect *min_size);
-void rect_set_max_size(struct v4l2_rect *r, const struct v4l2_rect *max_size);
-void rect_map_inside(struct v4l2_rect *r, const struct v4l2_rect *boundary);
-bool rect_same_size(const struct v4l2_rect *r1, const struct v4l2_rect *r2);
-struct v4l2_rect rect_intersect(const struct v4l2_rect *a, const struct v4l2_rect *b);
-void rect_scale(struct v4l2_rect *r, const struct v4l2_rect *from,
- const struct v4l2_rect *to);
int vivid_vid_adjust_sel(unsigned flags, struct v4l2_rect *r);
int vivid_enum_fmt_vid(struct file *file, void *priv, struct v4l2_fmtdesc *f);
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index 64e4d6648..f92f4496d 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -25,6 +25,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-rect.h>
#include "vivid-core.h"
#include "vivid-vid-common.h"
@@ -376,16 +377,16 @@ int vivid_try_fmt_vid_out(struct file *file, void *priv,
} else {
struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
- rect_set_min_size(&r, &vivid_min_rect);
- rect_set_max_size(&r, &vivid_max_rect);
+ v4l2_rect_set_min_size(&r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&r, &vivid_max_rect);
if (dev->has_scaler_out && !dev->has_crop_out) {
struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
- rect_set_max_size(&r, &max_r);
+ v4l2_rect_set_max_size(&r, &max_r);
} else if (!dev->has_scaler_out && dev->has_compose_out && !dev->has_crop_out) {
- rect_set_max_size(&r, &dev->sink_rect);
+ v4l2_rect_set_max_size(&r, &dev->sink_rect);
} else if (!dev->has_scaler_out && !dev->has_compose_out) {
- rect_set_min_size(&r, &dev->sink_rect);
+ v4l2_rect_set_min_size(&r, &dev->sink_rect);
}
mp->width = r.width;
mp->height = r.height / factor;
@@ -473,7 +474,7 @@ int vivid_s_fmt_vid_out(struct file *file, void *priv,
if (dev->has_scaler_out) {
if (dev->has_crop_out)
- rect_map_inside(crop, &r);
+ v4l2_rect_map_inside(crop, &r);
else
*crop = r;
if (dev->has_compose_out && !dev->has_crop_out) {
@@ -488,9 +489,9 @@ int vivid_s_fmt_vid_out(struct file *file, void *priv,
factor * r.height * MAX_ZOOM
};
- rect_set_min_size(compose, &min_r);
- rect_set_max_size(compose, &max_r);
- rect_map_inside(compose, &dev->compose_bounds_out);
+ v4l2_rect_set_min_size(compose, &min_r);
+ v4l2_rect_set_max_size(compose, &max_r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
} else if (dev->has_compose_out) {
struct v4l2_rect min_r = {
0, 0,
@@ -503,36 +504,36 @@ int vivid_s_fmt_vid_out(struct file *file, void *priv,
factor * crop->height * MAX_ZOOM
};
- rect_set_min_size(compose, &min_r);
- rect_set_max_size(compose, &max_r);
- rect_map_inside(compose, &dev->compose_bounds_out);
+ v4l2_rect_set_min_size(compose, &min_r);
+ v4l2_rect_set_max_size(compose, &max_r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
}
} else if (dev->has_compose_out && !dev->has_crop_out) {
- rect_set_size_to(crop, &r);
+ v4l2_rect_set_size_to(crop, &r);
r.height *= factor;
- rect_set_size_to(compose, &r);
- rect_map_inside(compose, &dev->compose_bounds_out);
+ v4l2_rect_set_size_to(compose, &r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
} else if (!dev->has_compose_out) {
- rect_map_inside(crop, &r);
+ v4l2_rect_map_inside(crop, &r);
r.height /= factor;
- rect_set_size_to(compose, &r);
+ v4l2_rect_set_size_to(compose, &r);
} else {
r.height *= factor;
- rect_set_max_size(compose, &r);
- rect_map_inside(compose, &dev->compose_bounds_out);
+ v4l2_rect_set_max_size(compose, &r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
crop->top *= factor;
crop->height *= factor;
- rect_set_size_to(crop, compose);
- rect_map_inside(crop, &r);
+ v4l2_rect_set_size_to(crop, compose);
+ v4l2_rect_map_inside(crop, &r);
crop->top /= factor;
crop->height /= factor;
}
} else {
struct v4l2_rect r = { 0, 0, mp->width, mp->height };
- rect_set_size_to(crop, &r);
+ v4l2_rect_set_size_to(crop, &r);
r.height /= factor;
- rect_set_size_to(compose, &r);
+ v4l2_rect_set_size_to(compose, &r);
}
dev->fmt_out_rect.width = mp->width;
@@ -683,8 +684,8 @@ int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection
ret = vivid_vid_adjust_sel(s->flags, &s->r);
if (ret)
return ret;
- rect_set_min_size(&s->r, &vivid_min_rect);
- rect_set_max_size(&s->r, &dev->fmt_out_rect);
+ v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&s->r, &dev->fmt_out_rect);
if (dev->has_scaler_out) {
struct v4l2_rect max_rect = {
0, 0,
@@ -692,7 +693,7 @@ int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection
(dev->sink_rect.height / factor) * MAX_ZOOM
};
- rect_set_max_size(&s->r, &max_rect);
+ v4l2_rect_set_max_size(&s->r, &max_rect);
if (dev->has_compose_out) {
struct v4l2_rect min_rect = {
0, 0,
@@ -705,23 +706,23 @@ int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection
(s->r.height * factor) * MAX_ZOOM
};
- rect_set_min_size(compose, &min_rect);
- rect_set_max_size(compose, &max_rect);
- rect_map_inside(compose, &dev->compose_bounds_out);
+ v4l2_rect_set_min_size(compose, &min_rect);
+ v4l2_rect_set_max_size(compose, &max_rect);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
}
} else if (dev->has_compose_out) {
s->r.top *= factor;
s->r.height *= factor;
- rect_set_max_size(&s->r, &dev->sink_rect);
- rect_set_size_to(compose, &s->r);
- rect_map_inside(compose, &dev->compose_bounds_out);
+ v4l2_rect_set_max_size(&s->r, &dev->sink_rect);
+ v4l2_rect_set_size_to(compose, &s->r);
+ v4l2_rect_map_inside(compose, &dev->compose_bounds_out);
s->r.top /= factor;
s->r.height /= factor;
} else {
- rect_set_size_to(&s->r, &dev->sink_rect);
+ v4l2_rect_set_size_to(&s->r, &dev->sink_rect);
s->r.height /= factor;
}
- rect_map_inside(&s->r, &dev->fmt_out_rect);
+ v4l2_rect_map_inside(&s->r, &dev->fmt_out_rect);
*crop = s->r;
break;
case V4L2_SEL_TGT_COMPOSE:
@@ -730,9 +731,9 @@ int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection
ret = vivid_vid_adjust_sel(s->flags, &s->r);
if (ret)
return ret;
- rect_set_min_size(&s->r, &vivid_min_rect);
- rect_set_max_size(&s->r, &dev->sink_rect);
- rect_map_inside(&s->r, &dev->compose_bounds_out);
+ v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
+ v4l2_rect_set_max_size(&s->r, &dev->sink_rect);
+ v4l2_rect_map_inside(&s->r, &dev->compose_bounds_out);
s->r.top /= factor;
s->r.height /= factor;
if (dev->has_scaler_out) {
@@ -748,35 +749,35 @@ int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection
s->r.height / MAX_ZOOM
};
- rect_set_min_size(&fmt, &min_rect);
+ v4l2_rect_set_min_size(&fmt, &min_rect);
if (!dev->has_crop_out)
- rect_set_max_size(&fmt, &max_rect);
- if (!rect_same_size(&dev->fmt_out_rect, &fmt) &&
+ v4l2_rect_set_max_size(&fmt, &max_rect);
+ if (!v4l2_rect_same_size(&dev->fmt_out_rect, &fmt) &&
vb2_is_busy(&dev->vb_vid_out_q))
return -EBUSY;
if (dev->has_crop_out) {
- rect_set_min_size(crop, &min_rect);
- rect_set_max_size(crop, &max_rect);
+ v4l2_rect_set_min_size(crop, &min_rect);
+ v4l2_rect_set_max_size(crop, &max_rect);
}
dev->fmt_out_rect = fmt;
} else if (dev->has_crop_out) {
struct v4l2_rect fmt = dev->fmt_out_rect;
- rect_set_min_size(&fmt, &s->r);
- if (!rect_same_size(&dev->fmt_out_rect, &fmt) &&
+ v4l2_rect_set_min_size(&fmt, &s->r);
+ if (!v4l2_rect_same_size(&dev->fmt_out_rect, &fmt) &&
vb2_is_busy(&dev->vb_vid_out_q))
return -EBUSY;
dev->fmt_out_rect = fmt;
- rect_set_size_to(crop, &s->r);
- rect_map_inside(crop, &dev->fmt_out_rect);
+ v4l2_rect_set_size_to(crop, &s->r);
+ v4l2_rect_map_inside(crop, &dev->fmt_out_rect);
} else {
- if (!rect_same_size(&s->r, &dev->fmt_out_rect) &&
+ if (!v4l2_rect_same_size(&s->r, &dev->fmt_out_rect) &&
vb2_is_busy(&dev->vb_vid_out_q))
return -EBUSY;
- rect_set_size_to(&dev->fmt_out_rect, &s->r);
- rect_set_size_to(crop, &s->r);
+ v4l2_rect_set_size_to(&dev->fmt_out_rect, &s->r);
+ v4l2_rect_set_size_to(crop, &s->r);
crop->height /= factor;
- rect_map_inside(crop, &dev->fmt_out_rect);
+ v4l2_rect_map_inside(crop, &dev->fmt_out_rect);
}
s->r.top *= factor;
s->r.height *= factor;
@@ -901,7 +902,7 @@ int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv,
for (j = i + 1; j < win->clipcount; j++) {
struct v4l2_rect *r2 = &dev->try_clips_out[j].c;
- if (rect_overlap(r1, r2))
+ if (v4l2_rect_overlap(r1, r2))
return -EINVAL;
}
}
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 910d6b8e8..46738b6c5 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -26,7 +26,6 @@
struct clk;
struct device;
-struct vsp1_dl;
struct vsp1_drm;
struct vsp1_entity;
struct vsp1_platform_data;
@@ -49,6 +48,7 @@ struct vsp1_uds;
struct vsp1_device_info {
u32 version;
+ unsigned int gen;
unsigned int features;
unsigned int rpf_count;
unsigned int uds_count;
@@ -85,8 +85,6 @@ struct vsp1_device {
struct media_entity_operations media_ops;
struct vsp1_drm *drm;
-
- bool use_dl;
};
int vsp1_device_get(struct vsp1_device *vsp1);
@@ -104,14 +102,4 @@ static inline void vsp1_write(struct vsp1_device *vsp1, u32 reg, u32 data)
iowrite32(data, vsp1->mmio + reg);
}
-#include "vsp1_dl.h"
-
-static inline void vsp1_mod_write(struct vsp1_entity *e, u32 reg, u32 data)
-{
- if (e->vsp1->use_dl)
- vsp1_dl_add(e, reg, data);
- else
- vsp1_write(e->vsp1, reg, data);
-}
-
#endif /* __VSP1_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index cb0dbc15d..b1068c018 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -18,6 +18,8 @@
#include "vsp1.h"
#include "vsp1_bru.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_video.h"
@@ -28,9 +30,10 @@
* Device Access
*/
-static inline void vsp1_bru_write(struct vsp1_bru *bru, u32 reg, u32 data)
+static inline void vsp1_bru_write(struct vsp1_bru *bru, struct vsp1_dl_list *dl,
+ u32 reg, u32 data)
{
- vsp1_mod_write(&bru->entity, reg, data);
+ vsp1_dl_list_write(dl, reg, data);
}
/* -----------------------------------------------------------------------------
@@ -42,13 +45,9 @@ static int bru_s_ctrl(struct v4l2_ctrl *ctrl)
struct vsp1_bru *bru =
container_of(ctrl->handler, struct vsp1_bru, ctrls);
- if (!vsp1_entity_is_streaming(&bru->entity))
- return 0;
-
switch (ctrl->id) {
case V4L2_CID_BG_COLOR:
- vsp1_bru_write(bru, VI6_BRU_VIRRPF_COL, ctrl->val |
- (0xff << VI6_BRU_VIRRPF_COL_A_SHIFT));
+ bru->bgcolor = ctrl->val;
break;
}
@@ -60,116 +59,7 @@ static const struct v4l2_ctrl_ops bru_ctrl_ops = {
};
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int bru_s_stream(struct v4l2_subdev *subdev, int enable)
-{
- struct vsp1_pipeline *pipe = to_vsp1_pipeline(&subdev->entity);
- struct vsp1_bru *bru = to_bru(subdev);
- struct v4l2_mbus_framefmt *format;
- unsigned int flags;
- unsigned int i;
- int ret;
-
- ret = vsp1_entity_set_streaming(&bru->entity, enable);
- if (ret < 0)
- return ret;
-
- if (!enable)
- return 0;
-
- format = &bru->entity.formats[bru->entity.source_pad];
-
- /* The hardware is extremely flexible but we have no userspace API to
- * expose all the parameters, nor is it clear whether we would have use
- * cases for all the supported modes. Let's just harcode the parameters
- * to sane default values for now.
- */
-
- /* Disable dithering and enable color data normalization unless the
- * format at the pipeline output is premultiplied.
- */
- flags = pipe->output ? pipe->output->format.flags : 0;
- vsp1_bru_write(bru, VI6_BRU_INCTRL,
- flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ?
- 0 : VI6_BRU_INCTRL_NRM);
-
- /* Set the background position to cover the whole output image. */
- vsp1_bru_write(bru, VI6_BRU_VIRRPF_SIZE,
- (format->width << VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT) |
- (format->height << VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT));
- vsp1_bru_write(bru, VI6_BRU_VIRRPF_LOC, 0);
-
- /* Route BRU input 1 as SRC input to the ROP unit and configure the ROP
- * unit with a NOP operation to make BRU input 1 available as the
- * Blend/ROP unit B SRC input.
- */
- vsp1_bru_write(bru, VI6_BRU_ROP, VI6_BRU_ROP_DSTSEL_BRUIN(1) |
- VI6_BRU_ROP_CROP(VI6_ROP_NOP) |
- VI6_BRU_ROP_AROP(VI6_ROP_NOP));
-
- for (i = 0; i < bru->entity.source_pad; ++i) {
- bool premultiplied = false;
- u32 ctrl = 0;
-
- /* Configure all Blend/ROP units corresponding to an enabled BRU
- * input for alpha blending. Blend/ROP units corresponding to
- * disabled BRU inputs are used in ROP NOP mode to ignore the
- * SRC input.
- */
- if (bru->inputs[i].rpf) {
- ctrl |= VI6_BRU_CTRL_RBC;
-
- premultiplied = bru->inputs[i].rpf->format.flags
- & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
- } else {
- ctrl |= VI6_BRU_CTRL_CROP(VI6_ROP_NOP)
- | VI6_BRU_CTRL_AROP(VI6_ROP_NOP);
- }
-
- /* Select the virtual RPF as the Blend/ROP unit A DST input to
- * serve as a background color.
- */
- if (i == 0)
- ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF;
-
- /* Route BRU inputs 0 to 3 as SRC inputs to Blend/ROP units A to
- * D in that order. The Blend/ROP unit B SRC is hardwired to the
- * ROP unit output, the corresponding register bits must be set
- * to 0.
- */
- if (i != 1)
- ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i);
-
- vsp1_bru_write(bru, VI6_BRU_CTRL(i), ctrl);
-
- /* Harcode the blending formula to
- *
- * DSTc = DSTc * (1 - SRCa) + SRCc * SRCa
- * DSTa = DSTa * (1 - SRCa) + SRCa
- *
- * when the SRC input isn't premultiplied, and to
- *
- * DSTc = DSTc * (1 - SRCa) + SRCc
- * DSTa = DSTa * (1 - SRCa) + SRCa
- *
- * otherwise.
- */
- vsp1_bru_write(bru, VI6_BRU_BLD(i),
- VI6_BRU_BLD_CCMDX_255_SRC_A |
- (premultiplied ? VI6_BRU_BLD_CCMDY_COEFY :
- VI6_BRU_BLD_CCMDY_SRC_A) |
- VI6_BRU_BLD_ACMDX_255_SRC_A |
- VI6_BRU_BLD_ACMDY_COEFY |
- (0xff << VI6_BRU_BLD_COEFY_SHIFT));
- }
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
*/
/*
@@ -186,24 +76,9 @@ static int bru_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_ARGB8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- struct vsp1_bru *bru = to_bru(subdev);
- struct v4l2_mbus_framefmt *format;
-
- if (code->pad == BRU_PAD_SINK(0)) {
- if (code->index >= ARRAY_SIZE(codes))
- return -EINVAL;
-
- code->code = codes[code->index];
- } else {
- if (code->index)
- return -EINVAL;
-
- format = vsp1_entity_get_pad_format(&bru->entity, cfg,
- BRU_PAD_SINK(0), code->which);
- code->code = format->code;
- }
- return 0;
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
}
static int bru_enum_frame_size(struct v4l2_subdev *subdev,
@@ -227,32 +102,14 @@ static int bru_enum_frame_size(struct v4l2_subdev *subdev,
static struct v4l2_rect *bru_get_compose(struct vsp1_bru *bru,
struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, u32 which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&bru->entity.subdev, cfg, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &bru->inputs[pad].compose;
- default:
- return NULL;
- }
-}
-
-static int bru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+ unsigned int pad)
{
- struct vsp1_bru *bru = to_bru(subdev);
-
- fmt->format = *vsp1_entity_get_pad_format(&bru->entity, cfg, fmt->pad,
- fmt->which);
-
- return 0;
+ return v4l2_subdev_get_try_compose(&bru->entity.subdev, cfg, pad);
}
-static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
+static void bru_try_format(struct vsp1_bru *bru,
+ struct v4l2_subdev_pad_config *config,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
@@ -266,8 +123,8 @@ static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_pad_config *
default:
/* The BRU can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&bru->entity, cfg,
- BRU_PAD_SINK(0), which);
+ format = vsp1_entity_get_pad_format(&bru->entity, config,
+ BRU_PAD_SINK(0));
fmt->code = format->code;
break;
}
@@ -278,23 +135,28 @@ static void bru_try_format(struct vsp1_bru *bru, struct v4l2_subdev_pad_config *
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
-static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
+static int bru_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_bru *bru = to_bru(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
- bru_try_format(bru, cfg, fmt->pad, &fmt->format, fmt->which);
+ config = vsp1_entity_get_pad_config(&bru->entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
+ bru_try_format(bru, config, fmt->pad, &fmt->format);
- format = vsp1_entity_get_pad_format(&bru->entity, cfg, fmt->pad,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&bru->entity, config, fmt->pad);
*format = fmt->format;
/* Reset the compose rectangle */
if (fmt->pad != bru->entity.source_pad) {
struct v4l2_rect *compose;
- compose = bru_get_compose(bru, cfg, fmt->pad, fmt->which);
+ compose = bru_get_compose(bru, config, fmt->pad);
compose->left = 0;
compose->top = 0;
compose->width = format->width;
@@ -306,8 +168,8 @@ static int bru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con
unsigned int i;
for (i = 0; i <= bru->entity.source_pad; ++i) {
- format = vsp1_entity_get_pad_format(&bru->entity, cfg,
- i, fmt->which);
+ format = vsp1_entity_get_pad_format(&bru->entity,
+ config, i);
format->code = fmt->format.code;
}
}
@@ -320,6 +182,7 @@ static int bru_get_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_bru *bru = to_bru(subdev);
+ struct v4l2_subdev_pad_config *config;
if (sel->pad == bru->entity.source_pad)
return -EINVAL;
@@ -333,7 +196,12 @@ static int bru_get_selection(struct v4l2_subdev *subdev,
return 0;
case V4L2_SEL_TGT_COMPOSE:
- sel->r = *bru_get_compose(bru, cfg, sel->pad, sel->which);
+ config = vsp1_entity_get_pad_config(&bru->entity, cfg,
+ sel->which);
+ if (!config)
+ return -EINVAL;
+
+ sel->r = *bru_get_compose(bru, config, sel->pad);
return 0;
default:
@@ -346,6 +214,7 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_selection *sel)
{
struct vsp1_bru *bru = to_bru(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *compose;
@@ -355,57 +224,161 @@ static int bru_set_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
+ config = vsp1_entity_get_pad_config(&bru->entity, cfg, sel->which);
+ if (!config)
+ return -EINVAL;
+
/* The compose rectangle top left corner must be inside the output
* frame.
*/
- format = vsp1_entity_get_pad_format(&bru->entity, cfg,
- bru->entity.source_pad, sel->which);
+ format = vsp1_entity_get_pad_format(&bru->entity, config,
+ bru->entity.source_pad);
sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1);
sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1);
/* Scaling isn't supported, the compose rectangle size must be identical
* to the sink format size.
*/
- format = vsp1_entity_get_pad_format(&bru->entity, cfg, sel->pad,
- sel->which);
+ format = vsp1_entity_get_pad_format(&bru->entity, config, sel->pad);
sel->r.width = format->width;
sel->r.height = format->height;
- compose = bru_get_compose(bru, cfg, sel->pad, sel->which);
+ compose = bru_get_compose(bru, config, sel->pad);
*compose = sel->r;
return 0;
}
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops bru_video_ops = {
- .s_stream = bru_s_stream,
-};
-
static struct v4l2_subdev_pad_ops bru_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = bru_enum_mbus_code,
.enum_frame_size = bru_enum_frame_size,
- .get_fmt = bru_get_format,
+ .get_fmt = vsp1_subdev_get_pad_format,
.set_fmt = bru_set_format,
.get_selection = bru_get_selection,
.set_selection = bru_set_selection,
};
static struct v4l2_subdev_ops bru_ops = {
- .video = &bru_video_ops,
.pad = &bru_pad_ops,
};
/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void bru_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
+{
+ struct vsp1_bru *bru = to_bru(&entity->subdev);
+ struct v4l2_mbus_framefmt *format;
+ unsigned int flags;
+ unsigned int i;
+
+ format = vsp1_entity_get_pad_format(&bru->entity, bru->entity.config,
+ bru->entity.source_pad);
+
+ /* The hardware is extremely flexible but we have no userspace API to
+ * expose all the parameters, nor is it clear whether we would have use
+ * cases for all the supported modes. Let's just harcode the parameters
+ * to sane default values for now.
+ */
+
+ /* Disable dithering and enable color data normalization unless the
+ * format at the pipeline output is premultiplied.
+ */
+ flags = pipe->output ? pipe->output->format.flags : 0;
+ vsp1_bru_write(bru, dl, VI6_BRU_INCTRL,
+ flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ?
+ 0 : VI6_BRU_INCTRL_NRM);
+
+ /* Set the background position to cover the whole output image and
+ * configure its color.
+ */
+ vsp1_bru_write(bru, dl, VI6_BRU_VIRRPF_SIZE,
+ (format->width << VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT) |
+ (format->height << VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT));
+ vsp1_bru_write(bru, dl, VI6_BRU_VIRRPF_LOC, 0);
+
+ vsp1_bru_write(bru, dl, VI6_BRU_VIRRPF_COL, bru->bgcolor |
+ (0xff << VI6_BRU_VIRRPF_COL_A_SHIFT));
+
+ /* Route BRU input 1 as SRC input to the ROP unit and configure the ROP
+ * unit with a NOP operation to make BRU input 1 available as the
+ * Blend/ROP unit B SRC input.
+ */
+ vsp1_bru_write(bru, dl, VI6_BRU_ROP, VI6_BRU_ROP_DSTSEL_BRUIN(1) |
+ VI6_BRU_ROP_CROP(VI6_ROP_NOP) |
+ VI6_BRU_ROP_AROP(VI6_ROP_NOP));
+
+ for (i = 0; i < bru->entity.source_pad; ++i) {
+ bool premultiplied = false;
+ u32 ctrl = 0;
+
+ /* Configure all Blend/ROP units corresponding to an enabled BRU
+ * input for alpha blending. Blend/ROP units corresponding to
+ * disabled BRU inputs are used in ROP NOP mode to ignore the
+ * SRC input.
+ */
+ if (bru->inputs[i].rpf) {
+ ctrl |= VI6_BRU_CTRL_RBC;
+
+ premultiplied = bru->inputs[i].rpf->format.flags
+ & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
+ } else {
+ ctrl |= VI6_BRU_CTRL_CROP(VI6_ROP_NOP)
+ | VI6_BRU_CTRL_AROP(VI6_ROP_NOP);
+ }
+
+ /* Select the virtual RPF as the Blend/ROP unit A DST input to
+ * serve as a background color.
+ */
+ if (i == 0)
+ ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF;
+
+ /* Route BRU inputs 0 to 3 as SRC inputs to Blend/ROP units A to
+ * D in that order. The Blend/ROP unit B SRC is hardwired to the
+ * ROP unit output, the corresponding register bits must be set
+ * to 0.
+ */
+ if (i != 1)
+ ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i);
+
+ vsp1_bru_write(bru, dl, VI6_BRU_CTRL(i), ctrl);
+
+ /* Harcode the blending formula to
+ *
+ * DSTc = DSTc * (1 - SRCa) + SRCc * SRCa
+ * DSTa = DSTa * (1 - SRCa) + SRCa
+ *
+ * when the SRC input isn't premultiplied, and to
+ *
+ * DSTc = DSTc * (1 - SRCa) + SRCc
+ * DSTa = DSTa * (1 - SRCa) + SRCa
+ *
+ * otherwise.
+ */
+ vsp1_bru_write(bru, dl, VI6_BRU_BLD(i),
+ VI6_BRU_BLD_CCMDX_255_SRC_A |
+ (premultiplied ? VI6_BRU_BLD_CCMDY_COEFY :
+ VI6_BRU_BLD_CCMDY_SRC_A) |
+ VI6_BRU_BLD_ACMDX_255_SRC_A |
+ VI6_BRU_BLD_ACMDY_COEFY |
+ (0xff << VI6_BRU_BLD_COEFY_SHIFT));
+ }
+}
+
+static const struct vsp1_entity_operations bru_entity_ops = {
+ .configure = bru_configure,
+};
+
+/* -----------------------------------------------------------------------------
* Initialization and Cleanup
*/
struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1)
{
- struct v4l2_subdev *subdev;
struct vsp1_bru *bru;
int ret;
@@ -413,31 +386,21 @@ struct vsp1_bru *vsp1_bru_create(struct vsp1_device *vsp1)
if (bru == NULL)
return ERR_PTR(-ENOMEM);
+ bru->entity.ops = &bru_entity_ops;
bru->entity.type = VSP1_ENTITY_BRU;
- ret = vsp1_entity_init(vsp1, &bru->entity,
- vsp1->info->num_bru_inputs + 1);
+ ret = vsp1_entity_init(vsp1, &bru->entity, "bru",
+ vsp1->info->num_bru_inputs + 1, &bru_ops);
if (ret < 0)
return ERR_PTR(ret);
- /* Initialize the V4L2 subdev. */
- subdev = &bru->entity.subdev;
- v4l2_subdev_init(subdev, &bru_ops);
-
- subdev->entity.ops = &vsp1->media_ops;
- subdev->internal_ops = &vsp1_subdev_internal_ops;
- snprintf(subdev->name, sizeof(subdev->name), "%s bru",
- dev_name(vsp1->dev));
- v4l2_set_subdevdata(subdev, bru);
- subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- vsp1_entity_init_formats(subdev, NULL);
-
/* Initialize the control handler. */
v4l2_ctrl_handler_init(&bru->ctrls, 1);
v4l2_ctrl_new_std(&bru->ctrls, &bru_ctrl_ops, V4L2_CID_BG_COLOR,
0, 0xffffff, 1, 0);
+ bru->bgcolor = 0;
+
bru->entity.subdev.ctrl_handler = &bru->ctrls;
if (bru->ctrls.error) {
diff --git a/drivers/media/platform/vsp1/vsp1_bru.h b/drivers/media/platform/vsp1/vsp1_bru.h
index dbac9686e..828a3fcad 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.h
+++ b/drivers/media/platform/vsp1/vsp1_bru.h
@@ -31,8 +31,9 @@ struct vsp1_bru {
struct {
struct vsp1_rwpf *rpf;
- struct v4l2_rect compose;
} inputs[VSP1_MAX_RPF];
+
+ u32 bgcolor;
};
static inline struct vsp1_bru *to_bru(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
index 1a9a58588..e238d9b93 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.c
+++ b/drivers/media/platform/vsp1/vsp1_dl.c
@@ -18,139 +18,406 @@
#include "vsp1.h"
#include "vsp1_dl.h"
-#include "vsp1_pipe.h"
-/*
- * Global resources
- *
- * - Display-related interrupts (can be used for vblank evasion ?)
- * - Display-list enable
- * - Header-less for WPF0
- * - DL swap
- */
-
-#define VSP1_DL_BODY_SIZE (2 * 4 * 256)
+#define VSP1_DL_NUM_ENTRIES 256
#define VSP1_DL_NUM_LISTS 3
+#define VSP1_DLH_INT_ENABLE (1 << 1)
+#define VSP1_DLH_AUTO_START (1 << 0)
+
+struct vsp1_dl_header_list {
+ u32 num_bytes;
+ u32 addr;
+} __attribute__((__packed__));
+
+struct vsp1_dl_header {
+ u32 num_lists;
+ struct vsp1_dl_header_list lists[8];
+ u32 next_header;
+ u32 flags;
+} __attribute__((__packed__));
+
struct vsp1_dl_entry {
u32 addr;
u32 data;
} __attribute__((__packed__));
-struct vsp1_dl_list {
+/**
+ * struct vsp1_dl_body - Display list body
+ * @list: entry in the display list list of bodies
+ * @vsp1: the VSP1 device
+ * @entries: array of entries
+ * @dma: DMA address of the entries
+ * @size: size of the DMA memory in bytes
+ * @num_entries: number of stored entries
+ */
+struct vsp1_dl_body {
+ struct list_head list;
+ struct vsp1_device *vsp1;
+
+ struct vsp1_dl_entry *entries;
+ dma_addr_t dma;
size_t size;
- int reg_count;
- bool in_use;
+ unsigned int num_entries;
+};
- struct vsp1_dl_entry *body;
+/**
+ * struct vsp1_dl_list - Display list
+ * @list: entry in the display list manager lists
+ * @dlm: the display list manager
+ * @header: display list header, NULL for headerless lists
+ * @dma: DMA address for the header
+ * @body0: first display list body
+ * @fragments: list of extra display list bodies
+ */
+struct vsp1_dl_list {
+ struct list_head list;
+ struct vsp1_dl_manager *dlm;
+
+ struct vsp1_dl_header *header;
dma_addr_t dma;
+
+ struct vsp1_dl_body body0;
+ struct list_head fragments;
+};
+
+enum vsp1_dl_mode {
+ VSP1_DL_MODE_HEADER,
+ VSP1_DL_MODE_HEADERLESS,
};
/**
- * struct vsp1_dl - Display List manager
+ * struct vsp1_dl_manager - Display List manager
+ * @index: index of the related WPF
+ * @mode: display list operation mode (header or headerless)
* @vsp1: the VSP1 device
* @lock: protects the active, queued and pending lists
- * @lists.all: array of all allocate display lists
- * @lists.active: list currently being processed (loaded) by hardware
- * @lists.queued: list queued to the hardware (written to the DL registers)
- * @lists.pending: list waiting to be queued to the hardware
- * @lists.write: list being written to by software
+ * @free: array of all free display lists
+ * @active: list currently being processed (loaded) by hardware
+ * @queued: list queued to the hardware (written to the DL registers)
+ * @pending: list waiting to be queued to the hardware
*/
-struct vsp1_dl {
+struct vsp1_dl_manager {
+ unsigned int index;
+ enum vsp1_dl_mode mode;
struct vsp1_device *vsp1;
spinlock_t lock;
+ struct list_head free;
+ struct vsp1_dl_list *active;
+ struct vsp1_dl_list *queued;
+ struct vsp1_dl_list *pending;
+};
- size_t size;
- dma_addr_t dma;
- void *mem;
+/* -----------------------------------------------------------------------------
+ * Display List Body Management
+ */
+
+/*
+ * Initialize a display list body object and allocate DMA memory for the body
+ * data. The display list body object is expected to have been initialized to
+ * 0 when allocated.
+ */
+static int vsp1_dl_body_init(struct vsp1_device *vsp1,
+ struct vsp1_dl_body *dlb, unsigned int num_entries,
+ size_t extra_size)
+{
+ size_t size = num_entries * sizeof(*dlb->entries) + extra_size;
- struct {
- struct vsp1_dl_list all[VSP1_DL_NUM_LISTS];
+ dlb->vsp1 = vsp1;
+ dlb->size = size;
- struct vsp1_dl_list *active;
- struct vsp1_dl_list *queued;
- struct vsp1_dl_list *pending;
- struct vsp1_dl_list *write;
- } lists;
-};
+ dlb->entries = dma_alloc_wc(vsp1->dev, dlb->size, &dlb->dma,
+ GFP_KERNEL);
+ if (!dlb->entries)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * Cleanup a display list body and free allocated DMA memory allocated.
+ */
+static void vsp1_dl_body_cleanup(struct vsp1_dl_body *dlb)
+{
+ dma_free_wc(dlb->vsp1->dev, dlb->size, dlb->entries, dlb->dma);
+}
+
+/**
+ * vsp1_dl_fragment_alloc - Allocate a display list fragment
+ * @vsp1: The VSP1 device
+ * @num_entries: The maximum number of entries that the fragment can contain
+ *
+ * Allocate a display list fragment with enough memory to contain the requested
+ * number of entries.
+ *
+ * Return a pointer to a fragment on success or NULL if memory can't be
+ * allocated.
+ */
+struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
+ unsigned int num_entries)
+{
+ struct vsp1_dl_body *dlb;
+ int ret;
+
+ dlb = kzalloc(sizeof(*dlb), GFP_KERNEL);
+ if (!dlb)
+ return NULL;
+
+ ret = vsp1_dl_body_init(vsp1, dlb, num_entries, 0);
+ if (ret < 0) {
+ kfree(dlb);
+ return NULL;
+ }
+
+ return dlb;
+}
+
+/**
+ * vsp1_dl_fragment_free - Free a display list fragment
+ * @dlb: The fragment
+ *
+ * Free the given display list fragment and the associated DMA memory.
+ *
+ * Fragments must only be freed explicitly if they are not added to a display
+ * list, as the display list will take ownership of them and free them
+ * otherwise. Manual free typically happens at cleanup time for fragments that
+ * have been allocated but not used.
+ *
+ * Passing a NULL pointer to this function is safe, in that case no operation
+ * will be performed.
+ */
+void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb)
+{
+ if (!dlb)
+ return;
+
+ vsp1_dl_body_cleanup(dlb);
+ kfree(dlb);
+}
+
+/**
+ * vsp1_dl_fragment_write - Write a register to a display list fragment
+ * @dlb: The fragment
+ * @reg: The register address
+ * @data: The register value
+ *
+ * Write the given register and value to the display list fragment. The maximum
+ * number of entries that can be written in a fragment is specified when the
+ * fragment is allocated by vsp1_dl_fragment_alloc().
+ */
+void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data)
+{
+ dlb->entries[dlb->num_entries].addr = reg;
+ dlb->entries[dlb->num_entries].data = data;
+ dlb->num_entries++;
+}
/* -----------------------------------------------------------------------------
* Display List Transaction Management
*/
-static void vsp1_dl_free_list(struct vsp1_dl_list *list)
+static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
{
- if (!list)
- return;
+ struct vsp1_dl_list *dl;
+ size_t header_size;
+ int ret;
+
+ dl = kzalloc(sizeof(*dl), GFP_KERNEL);
+ if (!dl)
+ return NULL;
- list->in_use = false;
+ INIT_LIST_HEAD(&dl->fragments);
+ dl->dlm = dlm;
+
+ /* Initialize the display list body and allocate DMA memory for the body
+ * and the optional header. Both are allocated together to avoid memory
+ * fragmentation, with the header located right after the body in
+ * memory.
+ */
+ header_size = dlm->mode == VSP1_DL_MODE_HEADER
+ ? ALIGN(sizeof(struct vsp1_dl_header), 8)
+ : 0;
+
+ ret = vsp1_dl_body_init(dlm->vsp1, &dl->body0, VSP1_DL_NUM_ENTRIES,
+ header_size);
+ if (ret < 0) {
+ kfree(dl);
+ return NULL;
+ }
+
+ if (dlm->mode == VSP1_DL_MODE_HEADER) {
+ size_t header_offset = VSP1_DL_NUM_ENTRIES
+ * sizeof(*dl->body0.entries);
+
+ dl->header = ((void *)dl->body0.entries) + header_offset;
+ dl->dma = dl->body0.dma + header_offset;
+
+ memset(dl->header, 0, sizeof(*dl->header));
+ dl->header->lists[0].addr = dl->body0.dma;
+ dl->header->flags = VSP1_DLH_INT_ENABLE;
+ }
+
+ return dl;
}
-void vsp1_dl_reset(struct vsp1_dl *dl)
+static void vsp1_dl_list_free_fragments(struct vsp1_dl_list *dl)
{
- unsigned int i;
+ struct vsp1_dl_body *dlb, *next;
- dl->lists.active = NULL;
- dl->lists.queued = NULL;
- dl->lists.pending = NULL;
- dl->lists.write = NULL;
+ list_for_each_entry_safe(dlb, next, &dl->fragments, list) {
+ list_del(&dlb->list);
+ vsp1_dl_body_cleanup(dlb);
+ kfree(dlb);
+ }
+}
- for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i)
- dl->lists.all[i].in_use = false;
+static void vsp1_dl_list_free(struct vsp1_dl_list *dl)
+{
+ vsp1_dl_body_cleanup(&dl->body0);
+ vsp1_dl_list_free_fragments(dl);
+ kfree(dl);
}
-void vsp1_dl_begin(struct vsp1_dl *dl)
+/**
+ * vsp1_dl_list_get - Get a free display list
+ * @dlm: The display list manager
+ *
+ * Get a display list from the pool of free lists and return it.
+ *
+ * This function must be called without the display list manager lock held.
+ */
+struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm)
{
- struct vsp1_dl_list *list = NULL;
+ struct vsp1_dl_list *dl = NULL;
unsigned long flags;
- unsigned int i;
- spin_lock_irqsave(&dl->lock, flags);
+ spin_lock_irqsave(&dlm->lock, flags);
- for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) {
- if (!dl->lists.all[i].in_use) {
- list = &dl->lists.all[i];
- break;
- }
+ if (!list_empty(&dlm->free)) {
+ dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list);
+ list_del(&dl->list);
}
- if (!list) {
- list = dl->lists.pending;
- dl->lists.pending = NULL;
- }
+ spin_unlock_irqrestore(&dlm->lock, flags);
+
+ return dl;
+}
+
+/* This function must be called with the display list manager lock held.*/
+static void __vsp1_dl_list_put(struct vsp1_dl_list *dl)
+{
+ if (!dl)
+ return;
+
+ vsp1_dl_list_free_fragments(dl);
+ dl->body0.num_entries = 0;
+
+ list_add_tail(&dl->list, &dl->dlm->free);
+}
+
+/**
+ * vsp1_dl_list_put - Release a display list
+ * @dl: The display list
+ *
+ * Release the display list and return it to the pool of free lists.
+ *
+ * Passing a NULL pointer to this function is safe, in that case no operation
+ * will be performed.
+ */
+void vsp1_dl_list_put(struct vsp1_dl_list *dl)
+{
+ unsigned long flags;
- spin_unlock_irqrestore(&dl->lock, flags);
+ if (!dl)
+ return;
- dl->lists.write = list;
+ spin_lock_irqsave(&dl->dlm->lock, flags);
+ __vsp1_dl_list_put(dl);
+ spin_unlock_irqrestore(&dl->dlm->lock, flags);
+}
- list->in_use = true;
- list->reg_count = 0;
+/**
+ * vsp1_dl_list_write - Write a register to the display list
+ * @dl: The display list
+ * @reg: The register address
+ * @data: The register value
+ *
+ * Write the given register and value to the display list. Up to 256 registers
+ * can be written per display list.
+ */
+void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data)
+{
+ vsp1_dl_fragment_write(&dl->body0, reg, data);
}
-void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data)
+/**
+ * vsp1_dl_list_add_fragment - Add a fragment to the display list
+ * @dl: The display list
+ * @dlb: The fragment
+ *
+ * Add a display list body as a fragment to a display list. Registers contained
+ * in fragments are processed after registers contained in the main display
+ * list, in the order in which fragments are added.
+ *
+ * Adding a fragment to a display list passes ownership of the fragment to the
+ * list. The caller must not touch the fragment after this call, and must not
+ * free it explicitly with vsp1_dl_fragment_free().
+ *
+ * Fragments are only usable for display lists in header mode. Attempt to
+ * add a fragment to a header-less display list will return an error.
+ */
+int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb)
{
- struct vsp1_pipeline *pipe = to_vsp1_pipeline(&e->subdev.entity);
- struct vsp1_dl *dl = pipe->dl;
- struct vsp1_dl_list *list = dl->lists.write;
+ /* Multi-body lists are only available in header mode. */
+ if (dl->dlm->mode != VSP1_DL_MODE_HEADER)
+ return -EINVAL;
- list->body[list->reg_count].addr = reg;
- list->body[list->reg_count].data = data;
- list->reg_count++;
+ list_add_tail(&dlb->list, &dl->fragments);
+ return 0;
}
-void vsp1_dl_commit(struct vsp1_dl *dl)
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl)
{
- struct vsp1_device *vsp1 = dl->vsp1;
- struct vsp1_dl_list *list;
+ struct vsp1_dl_manager *dlm = dl->dlm;
+ struct vsp1_device *vsp1 = dlm->vsp1;
unsigned long flags;
bool update;
- list = dl->lists.write;
- dl->lists.write = NULL;
+ spin_lock_irqsave(&dlm->lock, flags);
+
+ if (dl->dlm->mode == VSP1_DL_MODE_HEADER) {
+ struct vsp1_dl_header_list *hdr = dl->header->lists;
+ struct vsp1_dl_body *dlb;
+ unsigned int num_lists = 0;
+
+ /* Fill the header with the display list bodies addresses and
+ * sizes. The address of the first body has already been filled
+ * when the display list was allocated.
+ *
+ * In header mode the caller guarantees that the hardware is
+ * idle at this point.
+ */
+ hdr->num_bytes = dl->body0.num_entries
+ * sizeof(*dl->header->lists);
+
+ list_for_each_entry(dlb, &dl->fragments, list) {
+ num_lists++;
+ hdr++;
+
+ hdr->addr = dlb->dma;
+ hdr->num_bytes = dlb->num_entries
+ * sizeof(*dl->header->lists);
+ }
+
+ dl->header->num_lists = num_lists;
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma);
- spin_lock_irqsave(&dl->lock, flags);
+ dlm->active = dl;
+ goto done;
+ }
/* Once the UPD bit has been set the hardware can start processing the
* display list at any time and we can't touch the address and size
@@ -159,8 +426,8 @@ void vsp1_dl_commit(struct vsp1_dl *dl)
*/
update = !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE) & VI6_DL_BODY_SIZE_UPD);
if (update) {
- vsp1_dl_free_list(dl->lists.pending);
- dl->lists.pending = list;
+ __vsp1_dl_list_put(dlm->pending);
+ dlm->pending = dl;
goto done;
}
@@ -168,42 +435,51 @@ void vsp1_dl_commit(struct vsp1_dl *dl)
* The UPD bit will be cleared by the device when the display list is
* processed.
*/
- vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma);
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
- (list->reg_count * 8));
+ (dl->body0.num_entries * sizeof(*dl->header->lists)));
- vsp1_dl_free_list(dl->lists.queued);
- dl->lists.queued = list;
+ __vsp1_dl_list_put(dlm->queued);
+ dlm->queued = dl;
done:
- spin_unlock_irqrestore(&dl->lock, flags);
+ spin_unlock_irqrestore(&dlm->lock, flags);
}
/* -----------------------------------------------------------------------------
- * Interrupt Handling
+ * Display List Manager
*/
-void vsp1_dl_irq_display_start(struct vsp1_dl *dl)
+/* Interrupt Handling */
+void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm)
{
- spin_lock(&dl->lock);
+ spin_lock(&dlm->lock);
/* The display start interrupt signals the end of the display list
* processing by the device. The active display list, if any, won't be
* accessed anymore and can be reused.
*/
- if (dl->lists.active) {
- vsp1_dl_free_list(dl->lists.active);
- dl->lists.active = NULL;
- }
+ __vsp1_dl_list_put(dlm->active);
+ dlm->active = NULL;
- spin_unlock(&dl->lock);
+ spin_unlock(&dlm->lock);
}
-void vsp1_dl_irq_frame_end(struct vsp1_dl *dl)
+void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm)
{
- struct vsp1_device *vsp1 = dl->vsp1;
+ struct vsp1_device *vsp1 = dlm->vsp1;
+
+ spin_lock(&dlm->lock);
+
+ __vsp1_dl_list_put(dlm->active);
+ dlm->active = NULL;
- spin_lock(&dl->lock);
+ /* Header mode is used for mem-to-mem pipelines only. We don't need to
+ * perform any operation as there can't be any new display list queued
+ * in that case.
+ */
+ if (dlm->mode == VSP1_DL_MODE_HEADER)
+ goto done;
/* The UPD bit set indicates that the commit operation raced with the
* interrupt and occurred after the frame end event and UPD clear but
@@ -216,42 +492,39 @@ void vsp1_dl_irq_frame_end(struct vsp1_dl *dl)
/* The device starts processing the queued display list right after the
* frame end interrupt. The display list thus becomes active.
*/
- if (dl->lists.queued) {
- WARN_ON(dl->lists.active);
- dl->lists.active = dl->lists.queued;
- dl->lists.queued = NULL;
+ if (dlm->queued) {
+ dlm->active = dlm->queued;
+ dlm->queued = NULL;
}
/* Now that the UPD bit has been cleared we can queue the next display
* list to the hardware if one has been prepared.
*/
- if (dl->lists.pending) {
- struct vsp1_dl_list *list = dl->lists.pending;
+ if (dlm->pending) {
+ struct vsp1_dl_list *dl = dlm->pending;
- vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), list->dma);
+ vsp1_write(vsp1, VI6_DL_HDR_ADDR(0), dl->body0.dma);
vsp1_write(vsp1, VI6_DL_BODY_SIZE, VI6_DL_BODY_SIZE_UPD |
- (list->reg_count * 8));
+ (dl->body0.num_entries *
+ sizeof(*dl->header->lists)));
- dl->lists.queued = list;
- dl->lists.pending = NULL;
+ dlm->queued = dl;
+ dlm->pending = NULL;
}
done:
- spin_unlock(&dl->lock);
+ spin_unlock(&dlm->lock);
}
-/* -----------------------------------------------------------------------------
- * Hardware Setup
- */
-
-void vsp1_dl_setup(struct vsp1_device *vsp1)
+/* Hardware Setup */
+void vsp1_dlm_setup(struct vsp1_device *vsp1)
{
u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT)
| VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
| VI6_DL_CTRL_DLE;
- /* The DRM pipeline operates with header-less display lists in
- * Continuous Frame Mode.
+ /* The DRM pipeline operates with display lists in Continuous Frame
+ * Mode, all other pipelines use manual start.
*/
if (vsp1->drm)
ctrl |= VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0;
@@ -260,46 +533,64 @@ void vsp1_dl_setup(struct vsp1_device *vsp1)
vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS);
}
-/* -----------------------------------------------------------------------------
- * Initialization and Cleanup
- */
+void vsp1_dlm_reset(struct vsp1_dl_manager *dlm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dlm->lock, flags);
-struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1)
+ __vsp1_dl_list_put(dlm->active);
+ __vsp1_dl_list_put(dlm->queued);
+ __vsp1_dl_list_put(dlm->pending);
+
+ spin_unlock_irqrestore(&dlm->lock, flags);
+
+ dlm->active = NULL;
+ dlm->queued = NULL;
+ dlm->pending = NULL;
+}
+
+struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
+ unsigned int index,
+ unsigned int prealloc)
{
- struct vsp1_dl *dl;
+ struct vsp1_dl_manager *dlm;
unsigned int i;
- dl = kzalloc(sizeof(*dl), GFP_KERNEL);
- if (!dl)
+ dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL);
+ if (!dlm)
return NULL;
- spin_lock_init(&dl->lock);
+ dlm->index = index;
+ dlm->mode = index == 0 && !vsp1->info->uapi
+ ? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER;
+ dlm->vsp1 = vsp1;
- dl->vsp1 = vsp1;
- dl->size = VSP1_DL_BODY_SIZE * ARRAY_SIZE(dl->lists.all);
+ spin_lock_init(&dlm->lock);
+ INIT_LIST_HEAD(&dlm->free);
- dl->mem = dma_alloc_wc(vsp1->dev, dl->size, &dl->dma,
- GFP_KERNEL);
- if (!dl->mem) {
- kfree(dl);
- return NULL;
- }
+ for (i = 0; i < prealloc; ++i) {
+ struct vsp1_dl_list *dl;
- for (i = 0; i < ARRAY_SIZE(dl->lists.all); ++i) {
- struct vsp1_dl_list *list = &dl->lists.all[i];
+ dl = vsp1_dl_list_alloc(dlm);
+ if (!dl)
+ return NULL;
- list->size = VSP1_DL_BODY_SIZE;
- list->reg_count = 0;
- list->in_use = false;
- list->dma = dl->dma + VSP1_DL_BODY_SIZE * i;
- list->body = dl->mem + VSP1_DL_BODY_SIZE * i;
+ list_add_tail(&dl->list, &dlm->free);
}
- return dl;
+ return dlm;
}
-void vsp1_dl_destroy(struct vsp1_dl *dl)
+void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm)
{
- dma_free_wc(dl->vsp1->dev, dl->size, dl->mem, dl->dma);
- kfree(dl);
+ struct vsp1_dl_list *dl, *next;
+
+ if (!dlm)
+ return;
+
+ list_for_each_entry_safe(dl, next, &dlm->free, list) {
+ list_del(&dl->list);
+ vsp1_dl_list_free(dl);
+ }
}
diff --git a/drivers/media/platform/vsp1/vsp1_dl.h b/drivers/media/platform/vsp1/vsp1_dl.h
index 448c4250e..de387cd4d 100644
--- a/drivers/media/platform/vsp1/vsp1_dl.h
+++ b/drivers/media/platform/vsp1/vsp1_dl.h
@@ -13,30 +13,33 @@
#ifndef __VSP1_DL_H__
#define __VSP1_DL_H__
-#include "vsp1_entity.h"
+#include <linux/types.h>
struct vsp1_device;
-struct vsp1_dl;
-
-struct vsp1_dl *vsp1_dl_create(struct vsp1_device *vsp1);
-void vsp1_dl_destroy(struct vsp1_dl *dl);
-
-void vsp1_dl_setup(struct vsp1_device *vsp1);
-
-void vsp1_dl_reset(struct vsp1_dl *dl);
-void vsp1_dl_begin(struct vsp1_dl *dl);
-void vsp1_dl_add(struct vsp1_entity *e, u32 reg, u32 data);
-void vsp1_dl_commit(struct vsp1_dl *dl);
-
-void vsp1_dl_irq_display_start(struct vsp1_dl *dl);
-void vsp1_dl_irq_frame_end(struct vsp1_dl *dl);
-
-static inline void vsp1_dl_mod_write(struct vsp1_entity *e, u32 reg, u32 data)
-{
- if (e->vsp1->use_dl)
- vsp1_dl_add(e, reg, data);
- else
- vsp1_write(e->vsp1, reg, data);
-}
+struct vsp1_dl_fragment;
+struct vsp1_dl_list;
+struct vsp1_dl_manager;
+
+void vsp1_dlm_setup(struct vsp1_device *vsp1);
+
+struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1,
+ unsigned int index,
+ unsigned int prealloc);
+void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm);
+void vsp1_dlm_reset(struct vsp1_dl_manager *dlm);
+void vsp1_dlm_irq_display_start(struct vsp1_dl_manager *dlm);
+void vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm);
+
+struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm);
+void vsp1_dl_list_put(struct vsp1_dl_list *dl);
+void vsp1_dl_list_write(struct vsp1_dl_list *dl, u32 reg, u32 data);
+void vsp1_dl_list_commit(struct vsp1_dl_list *dl);
+
+struct vsp1_dl_body *vsp1_dl_fragment_alloc(struct vsp1_device *vsp1,
+ unsigned int num_entries);
+void vsp1_dl_fragment_free(struct vsp1_dl_body *dlb);
+void vsp1_dl_fragment_write(struct vsp1_dl_body *dlb, u32 reg, u32 data);
+int vsp1_dl_list_add_fragment(struct vsp1_dl_list *dl,
+ struct vsp1_dl_body *dlb);
#endif /* __VSP1_DL_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index 021fe5778..fc4bbc401 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -13,10 +13,10 @@
#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/vsp1.h>
#include <media/media-entity.h>
#include <media/v4l2-subdev.h>
+#include <media/vsp1.h>
#include "vsp1.h"
#include "vsp1_bru.h"
@@ -26,18 +26,14 @@
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
+
/* -----------------------------------------------------------------------------
- * Runtime Handling
+ * Interrupt Handling
*/
-static void vsp1_drm_pipeline_frame_end(struct vsp1_pipeline *pipe)
+void vsp1_drm_display_start(struct vsp1_device *vsp1)
{
- unsigned long flags;
-
- spin_lock_irqsave(&pipe->irqlock, flags);
- if (pipe->num_inputs)
- vsp1_pipeline_run(pipe);
- spin_unlock_irqrestore(&pipe->irqlock, flags);
+ vsp1_dlm_irq_display_start(vsp1->drm->pipe.output->dlm);
}
/* -----------------------------------------------------------------------------
@@ -97,12 +93,14 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
media_entity_pipeline_stop(&pipe->output->entity.subdev.entity);
for (i = 0; i < bru->entity.source_pad; ++i) {
+ vsp1->drm->inputs[i].enabled = false;
bru->inputs[i].rpf = NULL;
pipe->inputs[i] = NULL;
}
pipe->num_inputs = 0;
+ vsp1_dlm_reset(pipe->output->dlm);
vsp1_device_put(vsp1);
dev_dbg(vsp1->dev, "%s: pipeline disabled\n", __func__);
@@ -110,8 +108,6 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
return 0;
}
- vsp1_dl_reset(vsp1->drm->dl);
-
/* Configure the format at the BRU sinks and propagate it through the
* pipeline.
*/
@@ -222,16 +218,11 @@ void vsp1_du_atomic_begin(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
- unsigned long flags;
-
- spin_lock_irqsave(&pipe->irqlock, flags);
vsp1->drm->num_inputs = pipe->num_inputs;
- spin_unlock_irqrestore(&pipe->irqlock, flags);
-
/* Prepare the display list. */
- vsp1_dl_begin(vsp1->drm->dl);
+ pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
}
EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
@@ -244,10 +235,13 @@ EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
* @mem: DMA addresses of the memory buffers (one per plane)
* @src: the source crop rectangle for the RPF
* @dst: the destination compose rectangle for the BRU input
+ * @alpha: global alpha value for the input
+ * @zpos: the Z-order position of the input
*
* Configure the VSP to perform composition of the image referenced by @mem
* through RPF @rpf_index, using the @src crop rectangle and the @dst
- * composition rectangle. The Z-order is fixed with RPF 0 at the bottom.
+ * composition rectangle. The Z-order is configurable with higher @zpos values
+ * displayed on top.
*
* Image format as stored in memory is expressed as a V4L2 @pixelformat value.
* As a special case, setting the pixel format to 0 will disable the RPF. The
@@ -265,25 +259,17 @@ EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin);
*
* This function isn't reentrant, the caller needs to serialize calls.
*
- * TODO: Implement Z-order control by decoupling the RPF index from the BRU
- * input index.
- *
* Return 0 on success or a negative error code on failure.
*/
-int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
- u32 pixelformat, unsigned int pitch,
- dma_addr_t mem[2], const struct v4l2_rect *src,
- const struct v4l2_rect *dst)
+int vsp1_du_atomic_update_ext(struct device *dev, unsigned int rpf_index,
+ u32 pixelformat, unsigned int pitch,
+ dma_addr_t mem[2], const struct v4l2_rect *src,
+ const struct v4l2_rect *dst, unsigned int alpha,
+ unsigned int zpos)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
- struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
const struct vsp1_format_info *fmtinfo;
- struct v4l2_subdev_selection sel;
- struct v4l2_subdev_format format;
- struct vsp1_rwpf_memory memory;
struct vsp1_rwpf *rpf;
- unsigned long flags;
- int ret;
if (rpf_index >= vsp1->info->rpf_count)
return -EINVAL;
@@ -294,31 +280,20 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__,
rpf_index);
- spin_lock_irqsave(&pipe->irqlock, flags);
-
- if (pipe->inputs[rpf_index]) {
- /* Remove the RPF from the pipeline if it was previously
- * enabled.
- */
- vsp1->bru->inputs[rpf_index].rpf = NULL;
- pipe->inputs[rpf_index] = NULL;
-
- pipe->num_inputs--;
- }
-
- spin_unlock_irqrestore(&pipe->irqlock, flags);
-
+ vsp1->drm->inputs[rpf_index].enabled = false;
return 0;
}
dev_dbg(vsp1->dev,
- "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad }\n",
+ "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad } zpos %u\n",
__func__, rpf_index,
src->left, src->top, src->width, src->height,
dst->left, dst->top, dst->width, dst->height,
- pixelformat, pitch, &mem[0], &mem[1]);
+ pixelformat, pitch, &mem[0], &mem[1], zpos);
- /* Set the stride at the RPF input. */
+ /* Store the format, stride, memory buffer address, crop and compose
+ * rectangles and Z-order position and for the input.
+ */
fmtinfo = vsp1_get_format_info(pixelformat);
if (!fmtinfo) {
dev_dbg(vsp1->dev, "Unsupport pixel format %08x for RPF\n",
@@ -330,16 +305,40 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
rpf->format.num_planes = fmtinfo->planes;
rpf->format.plane_fmt[0].bytesperline = pitch;
rpf->format.plane_fmt[1].bytesperline = pitch;
+ rpf->alpha = alpha;
+
+ rpf->mem.addr[0] = mem[0];
+ rpf->mem.addr[1] = mem[1];
+ rpf->mem.addr[2] = 0;
+
+ vsp1->drm->inputs[rpf_index].crop = *src;
+ vsp1->drm->inputs[rpf_index].compose = *dst;
+ vsp1->drm->inputs[rpf_index].zpos = zpos;
+ vsp1->drm->inputs[rpf_index].enabled = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vsp1_du_atomic_update_ext);
+
+static int vsp1_du_setup_rpf_pipe(struct vsp1_device *vsp1,
+ struct vsp1_rwpf *rpf, unsigned int bru_input)
+{
+ struct v4l2_subdev_selection sel;
+ struct v4l2_subdev_format format;
+ const struct v4l2_rect *crop;
+ int ret;
/* Configure the format on the RPF sink pad and propagate it up to the
* BRU sink pad.
*/
+ crop = &vsp1->drm->inputs[rpf->entity.index].crop;
+
memset(&format, 0, sizeof(format));
format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
format.pad = RWPF_PAD_SINK;
- format.format.width = src->width + src->left;
- format.format.height = src->height + src->top;
- format.format.code = fmtinfo->mbus;
+ format.format.width = crop->width + crop->left;
+ format.format.height = crop->height + crop->top;
+ format.format.code = rpf->fmtinfo->mbus;
format.format.field = V4L2_FIELD_NONE;
ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL,
@@ -356,7 +355,7 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.pad = RWPF_PAD_SINK;
sel.target = V4L2_SEL_TGT_CROP;
- sel.r = *src;
+ sel.r = *crop;
ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL,
&sel);
@@ -391,7 +390,7 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
return ret;
/* BRU sink, propagate the format from the RPF source. */
- format.pad = rpf->entity.index;
+ format.pad = bru_input;
ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_fmt, NULL,
&format);
@@ -402,9 +401,9 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
__func__, format.format.width, format.format.height,
format.format.code, format.pad);
- sel.pad = rpf->entity.index;
+ sel.pad = bru_input;
sel.target = V4L2_SEL_TGT_COMPOSE;
- sel.r = *dst;
+ sel.r = vsp1->drm->inputs[rpf->entity.index].compose;
ret = v4l2_subdev_call(&vsp1->bru->entity.subdev, pad, set_selection,
NULL, &sel);
@@ -416,33 +415,13 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
__func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height,
sel.pad);
- /* Store the compose rectangle coordinates in the RPF. */
- rpf->location.left = dst->left;
- rpf->location.top = dst->top;
-
- /* Set the memory buffer address. */
- memory.num_planes = fmtinfo->planes;
- memory.addr[0] = mem[0];
- memory.addr[1] = mem[1];
-
- rpf->ops->set_memory(rpf, &memory);
-
- spin_lock_irqsave(&pipe->irqlock, flags);
-
- /* If the RPF was previously stopped set the BRU input to the RPF and
- * store the RPF in the pipeline inputs array.
- */
- if (!pipe->inputs[rpf->entity.index]) {
- vsp1->bru->inputs[rpf_index].rpf = rpf;
- pipe->inputs[rpf->entity.index] = rpf;
- pipe->num_inputs++;
- }
-
- spin_unlock_irqrestore(&pipe->irqlock, flags);
-
return 0;
}
-EXPORT_SYMBOL_GPL(vsp1_du_atomic_update);
+
+static unsigned int rpf_zpos(struct vsp1_device *vsp1, struct vsp1_rwpf *rpf)
+{
+ return vsp1->drm->inputs[rpf->entity.index].zpos;
+}
/**
* vsp1_du_atomic_flush - Commit an atomic update
@@ -452,51 +431,96 @@ void vsp1_du_atomic_flush(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
+ struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, };
struct vsp1_entity *entity;
unsigned long flags;
- bool stop = false;
+ unsigned int i;
int ret;
+ /* Count the number of enabled inputs and sort them by Z-order. */
+ pipe->num_inputs = 0;
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf = vsp1->rpf[i];
+ unsigned int j;
+
+ if (!vsp1->drm->inputs[i].enabled) {
+ pipe->inputs[i] = NULL;
+ continue;
+ }
+
+ pipe->inputs[i] = rpf;
+
+ /* Insert the RPF in the sorted RPFs array. */
+ for (j = pipe->num_inputs++; j > 0; --j) {
+ if (rpf_zpos(vsp1, inputs[j-1]) <= rpf_zpos(vsp1, rpf))
+ break;
+ inputs[j] = inputs[j-1];
+ }
+
+ inputs[j] = rpf;
+ }
+
+ /* Setup the RPF input pipeline for every enabled input. */
+ for (i = 0; i < vsp1->info->num_bru_inputs; ++i) {
+ struct vsp1_rwpf *rpf = inputs[i];
+
+ if (!rpf) {
+ vsp1->bru->inputs[i].rpf = NULL;
+ continue;
+ }
+
+ vsp1->bru->inputs[i].rpf = rpf;
+ rpf->bru_input = i;
+ rpf->entity.sink_pad = i;
+
+ dev_dbg(vsp1->dev, "%s: connecting RPF.%u to BRU:%u\n",
+ __func__, rpf->entity.index, i);
+
+ ret = vsp1_du_setup_rpf_pipe(vsp1, rpf, i);
+ if (ret < 0)
+ dev_err(vsp1->dev,
+ "%s: failed to setup RPF.%u\n",
+ __func__, rpf->entity.index);
+ }
+
+ /* Configure all entities in the pipeline. */
list_for_each_entry(entity, &pipe->entities, list_pipe) {
/* Disconnect unused RPFs from the pipeline. */
if (entity->type == VSP1_ENTITY_RPF) {
struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
if (!pipe->inputs[rpf->entity.index]) {
- vsp1_mod_write(entity, entity->route->reg,
- VI6_DPR_NODE_UNUSED);
+ vsp1_dl_list_write(pipe->dl, entity->route->reg,
+ VI6_DPR_NODE_UNUSED);
continue;
}
}
- vsp1_entity_route_setup(entity);
+ vsp1_entity_route_setup(entity, pipe->dl);
- ret = v4l2_subdev_call(&entity->subdev, video,
- s_stream, 1);
- if (ret < 0) {
- dev_err(vsp1->dev,
- "DRM pipeline start failure on entity %s\n",
- entity->subdev.name);
- return;
- }
- }
+ if (entity->ops->configure)
+ entity->ops->configure(entity, pipe, pipe->dl);
- vsp1_dl_commit(vsp1->drm->dl);
+ /* The memory buffer address must be applied after configuring
+ * the RPF to make sure the crop offset are computed.
+ */
+ if (entity->type == VSP1_ENTITY_RPF)
+ vsp1_rwpf_set_memory(to_rwpf(&entity->subdev),
+ pipe->dl);
+ }
- spin_lock_irqsave(&pipe->irqlock, flags);
+ vsp1_dl_list_commit(pipe->dl);
+ pipe->dl = NULL;
/* Start or stop the pipeline if needed. */
if (!vsp1->drm->num_inputs && pipe->num_inputs) {
vsp1_write(vsp1, VI6_DISP_IRQ_STA, 0);
vsp1_write(vsp1, VI6_DISP_IRQ_ENB, VI6_DISP_IRQ_ENB_DSTE);
+ spin_lock_irqsave(&pipe->irqlock, flags);
vsp1_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
} else if (vsp1->drm->num_inputs && !pipe->num_inputs) {
- stop = true;
- }
-
- spin_unlock_irqrestore(&pipe->irqlock, flags);
-
- if (stop) {
vsp1_write(vsp1, VI6_DISP_IRQ_ENB, 0);
vsp1_pipeline_stop(pipe);
}
@@ -562,14 +586,9 @@ int vsp1_drm_init(struct vsp1_device *vsp1)
if (!vsp1->drm)
return -ENOMEM;
- vsp1->drm->dl = vsp1_dl_create(vsp1);
- if (!vsp1->drm->dl)
- return -ENOMEM;
-
pipe = &vsp1->drm->pipe;
vsp1_pipeline_init(pipe);
- pipe->frame_end = vsp1_drm_pipeline_frame_end;
/* The DRM pipeline is static, add entities manually. */
for (i = 0; i < vsp1->info->rpf_count; ++i) {
@@ -586,12 +605,9 @@ int vsp1_drm_init(struct vsp1_device *vsp1)
pipe->lif = &vsp1->lif->entity;
pipe->output = vsp1->wpf[0];
- pipe->dl = vsp1->drm->dl;
-
return 0;
}
void vsp1_drm_cleanup(struct vsp1_device *vsp1)
{
- vsp1_dl_destroy(vsp1->drm->dl);
}
diff --git a/drivers/media/platform/vsp1/vsp1_drm.h b/drivers/media/platform/vsp1/vsp1_drm.h
index f68056838..9e28ab925 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.h
+++ b/drivers/media/platform/vsp1/vsp1_drm.h
@@ -13,37 +13,32 @@
#ifndef __VSP1_DRM_H__
#define __VSP1_DRM_H__
-#include "vsp1_pipe.h"
+#include <linux/videodev2.h>
-struct vsp1_dl;
+#include "vsp1_pipe.h"
/**
* vsp1_drm - State for the API exposed to the DRM driver
- * @dl: display list for DRM pipeline operation
* @pipe: the VSP1 pipeline used for display
* @num_inputs: number of active pipeline inputs at the beginning of an update
- * @update: the pipeline configuration has been updated
+ * @planes: source crop rectangle, destination compose rectangle and z-order
+ * position for every input
*/
struct vsp1_drm {
- struct vsp1_dl *dl;
struct vsp1_pipeline pipe;
unsigned int num_inputs;
- bool update;
+ struct {
+ bool enabled;
+ struct v4l2_rect crop;
+ struct v4l2_rect compose;
+ unsigned int zpos;
+ } inputs[VSP1_MAX_RPF];
};
int vsp1_drm_init(struct vsp1_device *vsp1);
void vsp1_drm_cleanup(struct vsp1_device *vsp1);
int vsp1_drm_create_links(struct vsp1_device *vsp1);
-int vsp1_du_init(struct device *dev);
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
- unsigned int height);
-void vsp1_du_atomic_begin(struct device *dev);
-int vsp1_du_atomic_update(struct device *dev, unsigned int rpf_index,
- u32 pixelformat, unsigned int pitch,
- dma_addr_t mem[2], const struct v4l2_rect *src,
- const struct v4l2_rect *dst);
-void vsp1_du_atomic_flush(struct device *dev);
-
+void vsp1_drm_display_start(struct vsp1_device *vsp1);
#endif /* __VSP1_DRM_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 25750a0e4..e2d779fac 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -30,6 +30,7 @@
#include "vsp1_hsit.h"
#include "vsp1_lif.h"
#include "vsp1_lut.h"
+#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_sru.h"
#include "vsp1_uds.h"
@@ -49,17 +50,15 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
for (i = 0; i < vsp1->info->wpf_count; ++i) {
struct vsp1_rwpf *wpf = vsp1->wpf[i];
- struct vsp1_pipeline *pipe;
if (wpf == NULL)
continue;
- pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
if (status & VI6_WFP_IRQ_STA_FRE) {
- vsp1_pipeline_frame_end(pipe);
+ vsp1_pipeline_frame_end(wpf->pipe);
ret = IRQ_HANDLED;
}
}
@@ -68,14 +67,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
vsp1_write(vsp1, VI6_DISP_IRQ_STA, ~status & VI6_DISP_IRQ_STA_DST);
if (status & VI6_DISP_IRQ_STA_DST) {
- struct vsp1_rwpf *wpf = vsp1->wpf[0];
- struct vsp1_pipeline *pipe;
-
- if (wpf) {
- pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
- vsp1_pipeline_display_start(pipe);
- }
-
+ vsp1_drm_display_start(vsp1);
ret = IRQ_HANDLED;
}
@@ -387,13 +379,10 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
/* Register subdev nodes if the userspace API is enabled or initialize
* the DRM pipeline otherwise.
*/
- if (vsp1->info->uapi) {
- vsp1->use_dl = false;
+ if (vsp1->info->uapi)
ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
- } else {
- vsp1->use_dl = true;
+ else
ret = vsp1_drm_init(vsp1);
- }
if (ret < 0)
goto done;
@@ -465,8 +454,7 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
- if (vsp1->use_dl)
- vsp1_dl_setup(vsp1);
+ vsp1_dlm_setup(vsp1);
return 0;
}
@@ -570,6 +558,7 @@ static const struct dev_pm_ops vsp1_pm_ops = {
static const struct vsp1_device_info vsp1_device_infos[] = {
{
.version = VI6_IP_VERSION_MODEL_VSPS_H2,
+ .gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU,
.rpf_count = 5,
.uds_count = 3,
@@ -578,6 +567,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPR_H2,
+ .gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_SRU,
.rpf_count = 5,
.uds_count = 1,
@@ -586,6 +576,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN2,
+ .gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
.rpf_count = 4,
.uds_count = 1,
@@ -594,6 +585,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPS_M2,
+ .gen = 2,
.features = VSP1_HAS_BRU | VSP1_HAS_LUT | VSP1_HAS_SRU,
.rpf_count = 5,
.uds_count = 3,
@@ -602,6 +594,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPI_GEN3,
+ .gen = 3,
.features = VSP1_HAS_LUT | VSP1_HAS_SRU,
.rpf_count = 1,
.uds_count = 1,
@@ -609,6 +602,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPBD_GEN3,
+ .gen = 3,
.features = VSP1_HAS_BRU,
.rpf_count = 5,
.wpf_count = 1,
@@ -616,6 +610,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPBC_GEN3,
+ .gen = 3,
.features = VSP1_HAS_BRU | VSP1_HAS_LUT,
.rpf_count = 5,
.wpf_count = 1,
@@ -623,7 +618,8 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uapi = true,
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_GEN3,
- .features = VSP1_HAS_BRU | VSP1_HAS_LIF | VSP1_HAS_LUT,
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_LIF,
.rpf_count = 5,
.wpf_count = 2,
.num_bru_inputs = 5,
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
index 20a78fbd3..3d070bcc6 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.c
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -19,46 +19,11 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_dl.h"
#include "vsp1_entity.h"
-bool vsp1_entity_is_streaming(struct vsp1_entity *entity)
-{
- unsigned long flags;
- bool streaming;
-
- spin_lock_irqsave(&entity->lock, flags);
- streaming = entity->streaming;
- spin_unlock_irqrestore(&entity->lock, flags);
-
- return streaming;
-}
-
-int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&entity->lock, flags);
- entity->streaming = streaming;
- spin_unlock_irqrestore(&entity->lock, flags);
-
- if (!streaming)
- return 0;
-
- if (!entity->vsp1->info->uapi || !entity->subdev.ctrl_handler)
- return 0;
-
- ret = v4l2_ctrl_handler_setup(entity->subdev.ctrl_handler);
- if (ret < 0) {
- spin_lock_irqsave(&entity->lock, flags);
- entity->streaming = false;
- spin_unlock_irqrestore(&entity->lock, flags);
- }
-
- return ret;
-}
-
-void vsp1_entity_route_setup(struct vsp1_entity *source)
+void vsp1_entity_route_setup(struct vsp1_entity *source,
+ struct vsp1_dl_list *dl)
{
struct vsp1_entity *sink;
@@ -66,40 +31,74 @@ void vsp1_entity_route_setup(struct vsp1_entity *source)
return;
sink = container_of(source->sink, struct vsp1_entity, subdev.entity);
- vsp1_mod_write(source, source->route->reg,
- sink->route->inputs[source->sink_pad]);
+ vsp1_dl_list_write(dl, source->route->reg,
+ sink->route->inputs[source->sink_pad]);
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Operations
*/
-struct v4l2_mbus_framefmt *
-vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+/**
+ * vsp1_entity_get_pad_config - Get the pad configuration for an entity
+ * @entity: the entity
+ * @cfg: the TRY pad configuration
+ * @which: configuration selector (ACTIVE or TRY)
+ *
+ * Return the pad configuration requested by the which argument. The TRY
+ * configuration is passed explicitly to the function through the cfg argument
+ * and simply returned when requested. The ACTIVE configuration comes from the
+ * entity structure.
+ */
+struct v4l2_subdev_pad_config *
+vsp1_entity_get_pad_config(struct vsp1_entity *entity,
struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, u32 which)
+ enum v4l2_subdev_format_whence which)
{
switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &entity->formats[pad];
+ return entity->config;
+ case V4L2_SUBDEV_FORMAT_TRY:
default:
- return NULL;
+ return cfg;
}
}
+/**
+ * vsp1_entity_get_pad_format - Get a pad format from storage for an entity
+ * @entity: the entity
+ * @cfg: the configuration storage
+ * @pad: the pad number
+ *
+ * Return the format stored in the given configuration for an entity's pad. The
+ * configuration can be an ACTIVE or TRY configuration.
+ */
+struct v4l2_mbus_framefmt *
+vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
+{
+ return v4l2_subdev_get_try_format(&entity->subdev, cfg, pad);
+}
+
+struct v4l2_rect *
+vsp1_entity_get_pad_compose(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad)
+{
+ return v4l2_subdev_get_try_compose(&entity->subdev, cfg, pad);
+}
+
/*
- * vsp1_entity_init_formats - Initialize formats on all pads
+ * vsp1_entity_init_cfg - Initialize formats on all pads
* @subdev: V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
*
- * Initialize all pad formats with default values. If cfg is not NULL, try
- * formats are initialized on the file handle. Otherwise active formats are
- * initialized on the device.
+ * Initialize all pad formats with default values in the given pad config. This
+ * function can be used as a handler for the subdev pad::init_cfg operation.
*/
-void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg)
+int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
{
struct v4l2_subdev_format format;
unsigned int pad;
@@ -113,19 +112,132 @@ void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
v4l2_subdev_call(subdev, pad, set_fmt, cfg, &format);
}
+
+ return 0;
}
-static int vsp1_entity_open(struct v4l2_subdev *subdev,
- struct v4l2_subdev_fh *fh)
+/*
+ * vsp1_subdev_get_pad_format - Subdev pad get_fmt handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fmt: V4L2 subdev format
+ *
+ * This function implements the subdev get_fmt pad operation. It can be used as
+ * a direct drop-in for the operation handler.
+ */
+int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
{
- vsp1_entity_init_formats(subdev, fh->pad);
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+ struct v4l2_subdev_pad_config *config;
+
+ config = vsp1_entity_get_pad_config(entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
+ fmt->format = *vsp1_entity_get_pad_format(entity, config, fmt->pad);
return 0;
}
-const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops = {
- .open = vsp1_entity_open,
-};
+/*
+ * vsp1_subdev_enum_mbus_code - Subdev pad enum_mbus_code handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @code: Media bus code enumeration
+ * @codes: Array of supported media bus codes
+ * @ncodes: Number of supported media bus codes
+ *
+ * This function implements the subdev enum_mbus_code pad operation for entities
+ * that do not support format conversion. It enumerates the given supported
+ * media bus codes on the sink pad and reports a source pad format identical to
+ * the sink pad.
+ */
+int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code,
+ const unsigned int *codes, unsigned int ncodes)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+
+ if (code->pad == 0) {
+ if (code->index >= ncodes)
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+
+ /* The entity can't perform format conversion, the sink format
+ * is always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ config = vsp1_entity_get_pad_config(entity, cfg, code->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(entity, config, 0);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+/*
+ * vsp1_subdev_enum_frame_size - Subdev pad enum_frame_size handler
+ * @subdev: V4L2 subdevice
+ * @cfg: V4L2 subdev pad configuration
+ * @fse: Frame size enumeration
+ * @min_width: Minimum image width
+ * @min_height: Minimum image height
+ * @max_width: Maximum image width
+ * @max_height: Maximum image height
+ *
+ * This function implements the subdev enum_frame_size pad operation for
+ * entities that do not support scaling or cropping. It reports the given
+ * minimum and maximum frame width and height on the sink pad, and a fixed
+ * source pad size identical to the sink pad.
+ */
+int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse,
+ unsigned int min_width, unsigned int min_height,
+ unsigned int max_width, unsigned int max_height)
+{
+ struct vsp1_entity *entity = to_vsp1_entity(subdev);
+ struct v4l2_subdev_pad_config *config;
+ struct v4l2_mbus_framefmt *format;
+
+ config = vsp1_entity_get_pad_config(entity, cfg, fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(entity, config, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == 0) {
+ fse->min_width = min_width;
+ fse->max_width = max_width;
+ fse->min_height = min_height;
+ fse->max_height = max_height;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
/* -----------------------------------------------------------------------------
* Media Operations
@@ -171,11 +283,11 @@ static const struct vsp1_route vsp1_routes[] = {
{ VSP1_ENTITY_HST, 0, VI6_DPR_HST_ROUTE, { VI6_DPR_NODE_HST, } },
{ VSP1_ENTITY_LIF, 0, 0, { VI6_DPR_NODE_LIF, } },
{ VSP1_ENTITY_LUT, 0, VI6_DPR_LUT_ROUTE, { VI6_DPR_NODE_LUT, } },
- { VSP1_ENTITY_RPF, 0, VI6_DPR_RPF_ROUTE(0), { VI6_DPR_NODE_RPF(0), } },
- { VSP1_ENTITY_RPF, 1, VI6_DPR_RPF_ROUTE(1), { VI6_DPR_NODE_RPF(1), } },
- { VSP1_ENTITY_RPF, 2, VI6_DPR_RPF_ROUTE(2), { VI6_DPR_NODE_RPF(2), } },
- { VSP1_ENTITY_RPF, 3, VI6_DPR_RPF_ROUTE(3), { VI6_DPR_NODE_RPF(3), } },
- { VSP1_ENTITY_RPF, 4, VI6_DPR_RPF_ROUTE(4), { VI6_DPR_NODE_RPF(4), } },
+ { VSP1_ENTITY_RPF, 0, VI6_DPR_RPF_ROUTE(0), { 0, } },
+ { VSP1_ENTITY_RPF, 1, VI6_DPR_RPF_ROUTE(1), { 0, } },
+ { VSP1_ENTITY_RPF, 2, VI6_DPR_RPF_ROUTE(2), { 0, } },
+ { VSP1_ENTITY_RPF, 3, VI6_DPR_RPF_ROUTE(3), { 0, } },
+ { VSP1_ENTITY_RPF, 4, VI6_DPR_RPF_ROUTE(4), { 0, } },
{ VSP1_ENTITY_SRU, 0, VI6_DPR_SRU_ROUTE, { VI6_DPR_NODE_SRU, } },
{ VSP1_ENTITY_UDS, 0, VI6_DPR_UDS_ROUTE(0), { VI6_DPR_NODE_UDS(0), } },
{ VSP1_ENTITY_UDS, 1, VI6_DPR_UDS_ROUTE(1), { VI6_DPR_NODE_UDS(1), } },
@@ -187,9 +299,12 @@ static const struct vsp1_route vsp1_routes[] = {
};
int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
- unsigned int num_pads)
+ const char *name, unsigned int num_pads,
+ const struct v4l2_subdev_ops *ops)
{
+ struct v4l2_subdev *subdev;
unsigned int i;
+ int ret;
for (i = 0; i < ARRAY_SIZE(vsp1_routes); ++i) {
if (vsp1_routes[i].type == entity->type &&
@@ -202,37 +317,56 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
if (i == ARRAY_SIZE(vsp1_routes))
return -EINVAL;
- spin_lock_init(&entity->lock);
-
entity->vsp1 = vsp1;
entity->source_pad = num_pads - 1;
- /* Allocate formats and pads. */
- entity->formats = devm_kzalloc(vsp1->dev,
- num_pads * sizeof(*entity->formats),
- GFP_KERNEL);
- if (entity->formats == NULL)
- return -ENOMEM;
-
+ /* Allocate and initialize pads. */
entity->pads = devm_kzalloc(vsp1->dev, num_pads * sizeof(*entity->pads),
GFP_KERNEL);
if (entity->pads == NULL)
return -ENOMEM;
- /* Initialize pads. */
for (i = 0; i < num_pads - 1; ++i)
entity->pads[i].flags = MEDIA_PAD_FL_SINK;
entity->pads[num_pads - 1].flags = MEDIA_PAD_FL_SOURCE;
/* Initialize the media entity. */
- return media_entity_pads_init(&entity->subdev.entity, num_pads,
- entity->pads);
+ ret = media_entity_pads_init(&entity->subdev.entity, num_pads,
+ entity->pads);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &entity->subdev;
+ v4l2_subdev_init(subdev, ops);
+
+ subdev->entity.ops = &vsp1->media_ops;
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ snprintf(subdev->name, sizeof(subdev->name), "%s %s",
+ dev_name(vsp1->dev), name);
+
+ vsp1_entity_init_cfg(subdev, NULL);
+
+ /* Allocate the pad configuration to store formats and selection
+ * rectangles.
+ */
+ entity->config = v4l2_subdev_alloc_pad_config(&entity->subdev);
+ if (entity->config == NULL) {
+ media_entity_cleanup(&entity->subdev.entity);
+ return -ENOMEM;
+ }
+
+ return 0;
}
void vsp1_entity_destroy(struct vsp1_entity *entity)
{
+ if (entity->ops && entity->ops->destroy)
+ entity->ops->destroy(entity);
if (entity->subdev.ctrl_handler)
v4l2_ctrl_handler_free(entity->subdev.ctrl_handler);
+ v4l2_subdev_free_pad_config(entity->config);
media_entity_cleanup(&entity->subdev.entity);
}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
index 83570dfde..69eff4e17 100644
--- a/drivers/media/platform/vsp1/vsp1_entity.h
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -19,6 +19,8 @@
#include <media/v4l2-subdev.h>
struct vsp1_device;
+struct vsp1_dl_list;
+struct vsp1_pipeline;
enum vsp1_entity_type {
VSP1_ENTITY_BRU,
@@ -53,9 +55,27 @@ struct vsp1_route {
unsigned int inputs[VSP1_ENTITY_MAX_INPUTS];
};
+/**
+ * struct vsp1_entity_operations - Entity operations
+ * @destroy: Destroy the entity.
+ * @set_memory: Setup memory buffer access. This operation applies the settings
+ * stored in the rwpf mem field to the display list. Valid for RPF
+ * and WPF only.
+ * @configure: Setup the hardware based on the entity state (pipeline, formats,
+ * selection rectangles, ...)
+ */
+struct vsp1_entity_operations {
+ void (*destroy)(struct vsp1_entity *);
+ void (*set_memory)(struct vsp1_entity *, struct vsp1_dl_list *dl);
+ void (*configure)(struct vsp1_entity *, struct vsp1_pipeline *,
+ struct vsp1_dl_list *);
+};
+
struct vsp1_entity {
struct vsp1_device *vsp1;
+ const struct vsp1_entity_operations *ops;
+
enum vsp1_entity_type type;
unsigned int index;
const struct vsp1_route *route;
@@ -70,10 +90,7 @@ struct vsp1_entity {
unsigned int sink_pad;
struct v4l2_subdev subdev;
- struct v4l2_mbus_framefmt *formats;
-
- spinlock_t lock; /* Protects the streaming field */
- bool streaming;
+ struct v4l2_subdev_pad_config *config;
};
static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
@@ -82,7 +99,8 @@ static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
}
int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
- unsigned int num_pads);
+ const char *name, unsigned int num_pads,
+ const struct v4l2_subdev_ops *ops);
void vsp1_entity_destroy(struct vsp1_entity *entity);
extern const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops;
@@ -91,16 +109,35 @@ int vsp1_entity_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags);
+struct v4l2_subdev_pad_config *
+vsp1_entity_get_pad_config(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ enum v4l2_subdev_format_whence which);
struct v4l2_mbus_framefmt *
vsp1_entity_get_pad_format(struct vsp1_entity *entity,
struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, u32 which);
-void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg);
-
-bool vsp1_entity_is_streaming(struct vsp1_entity *entity);
-int vsp1_entity_set_streaming(struct vsp1_entity *entity, bool streaming);
-
-void vsp1_entity_route_setup(struct vsp1_entity *source);
+ unsigned int pad);
+struct v4l2_rect *
+vsp1_entity_get_pad_compose(struct vsp1_entity *entity,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad);
+int vsp1_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg);
+
+void vsp1_entity_route_setup(struct vsp1_entity *source,
+ struct vsp1_dl_list *dl);
+
+int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt);
+int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code,
+ const unsigned int *codes, unsigned int ncodes);
+int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse,
+ unsigned int min_w, unsigned int min_h,
+ unsigned int max_w, unsigned int max_h);
#endif /* __VSP1_ENTITY_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index c1087cff3..68b8567b3 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -17,6 +17,7 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_dl.h"
#include "vsp1_hsit.h"
#define HSIT_MIN_SIZE 4U
@@ -26,32 +27,14 @@
* Device Access
*/
-static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, u32 reg, u32 data)
+static inline void vsp1_hsit_write(struct vsp1_hsit *hsit,
+ struct vsp1_dl_list *dl, u32 reg, u32 data)
{
- vsp1_write(hsit->entity.vsp1, reg, data);
+ vsp1_dl_list_write(dl, reg, data);
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int hsit_s_stream(struct v4l2_subdev *subdev, int enable)
-{
- struct vsp1_hsit *hsit = to_hsit(subdev);
-
- if (!enable)
- return 0;
-
- if (hsit->inverse)
- vsp1_hsit_write(hsit, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
- else
- vsp1_hsit_write(hsit, VI6_HST_CTRL, VI6_HST_CTRL_EN);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
*/
static int hsit_enum_mbus_code(struct v4l2_subdev *subdev,
@@ -76,43 +59,9 @@ static int hsit_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- struct vsp1_hsit *hsit = to_hsit(subdev);
- struct v4l2_mbus_framefmt *format;
-
- format = vsp1_entity_get_pad_format(&hsit->entity, cfg, fse->pad,
- fse->which);
-
- if (fse->index || fse->code != format->code)
- return -EINVAL;
-
- if (fse->pad == HSIT_PAD_SINK) {
- fse->min_width = HSIT_MIN_SIZE;
- fse->max_width = HSIT_MAX_SIZE;
- fse->min_height = HSIT_MIN_SIZE;
- fse->max_height = HSIT_MAX_SIZE;
- } else {
- /* The size on the source pad are fixed and always identical to
- * the size on the sink pad.
- */
- fse->min_width = format->width;
- fse->max_width = format->width;
- fse->min_height = format->height;
- fse->max_height = format->height;
- }
-
- return 0;
-}
-
-static int hsit_get_format(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vsp1_hsit *hsit = to_hsit(subdev);
-
- fmt->format = *vsp1_entity_get_pad_format(&hsit->entity, cfg, fmt->pad,
- fmt->which);
-
- return 0;
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, HSIT_MIN_SIZE,
+ HSIT_MIN_SIZE, HSIT_MAX_SIZE,
+ HSIT_MAX_SIZE);
}
static int hsit_set_format(struct v4l2_subdev *subdev,
@@ -120,10 +69,14 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_format *fmt)
{
struct vsp1_hsit *hsit = to_hsit(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
- format = vsp1_entity_get_pad_format(&hsit->entity, cfg, fmt->pad,
- fmt->which);
+ config = vsp1_entity_get_pad_config(&hsit->entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad);
if (fmt->pad == HSIT_PAD_SOURCE) {
/* The HST and HSI output format code and resolution can't be
@@ -145,8 +98,8 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
fmt->format = *format;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&hsit->entity, cfg, HSIT_PAD_SOURCE,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&hsit->entity, config,
+ HSIT_PAD_SOURCE);
*format = fmt->format;
format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32
: MEDIA_BUS_FMT_AHSV8888_1X32;
@@ -154,33 +107,44 @@ static int hsit_set_format(struct v4l2_subdev *subdev,
return 0;
}
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops hsit_video_ops = {
- .s_stream = hsit_s_stream,
-};
-
static struct v4l2_subdev_pad_ops hsit_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = hsit_enum_mbus_code,
.enum_frame_size = hsit_enum_frame_size,
- .get_fmt = hsit_get_format,
+ .get_fmt = vsp1_subdev_get_pad_format,
.set_fmt = hsit_set_format,
};
static struct v4l2_subdev_ops hsit_ops = {
- .video = &hsit_video_ops,
.pad = &hsit_pad_ops,
};
/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void hsit_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
+{
+ struct vsp1_hsit *hsit = to_hsit(&entity->subdev);
+
+ if (hsit->inverse)
+ vsp1_hsit_write(hsit, dl, VI6_HSI_CTRL, VI6_HSI_CTRL_EN);
+ else
+ vsp1_hsit_write(hsit, dl, VI6_HST_CTRL, VI6_HST_CTRL_EN);
+}
+
+static const struct vsp1_entity_operations hsit_entity_ops = {
+ .configure = hsit_configure,
+};
+
+/* -----------------------------------------------------------------------------
* Initialization and Cleanup
*/
struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
{
- struct v4l2_subdev *subdev;
struct vsp1_hsit *hsit;
int ret;
@@ -190,27 +154,17 @@ struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse)
hsit->inverse = inverse;
+ hsit->entity.ops = &hsit_entity_ops;
+
if (inverse)
hsit->entity.type = VSP1_ENTITY_HSI;
else
hsit->entity.type = VSP1_ENTITY_HST;
- ret = vsp1_entity_init(vsp1, &hsit->entity, 2);
+ ret = vsp1_entity_init(vsp1, &hsit->entity, inverse ? "hsi" : "hst", 2,
+ &hsit_ops);
if (ret < 0)
return ERR_PTR(ret);
- /* Initialize the V4L2 subdev. */
- subdev = &hsit->entity.subdev;
- v4l2_subdev_init(subdev, &hsit_ops);
-
- subdev->entity.ops = &vsp1->media_ops;
- subdev->internal_ops = &vsp1_subdev_internal_ops;
- snprintf(subdev->name, sizeof(subdev->name), "%s %s",
- dev_name(vsp1->dev), inverse ? "hsi" : "hst");
- v4l2_set_subdevdata(subdev, hsit);
- subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- vsp1_entity_init_formats(subdev, NULL);
-
return hsit;
}
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 433853ce8..0217393f2 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -17,55 +17,24 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_dl.h"
#include "vsp1_lif.h"
#define LIF_MIN_SIZE 2U
-#define LIF_MAX_SIZE 2048U
+#define LIF_MAX_SIZE 8190U
/* -----------------------------------------------------------------------------
* Device Access
*/
-static inline void vsp1_lif_write(struct vsp1_lif *lif, u32 reg, u32 data)
+static inline void vsp1_lif_write(struct vsp1_lif *lif, struct vsp1_dl_list *dl,
+ u32 reg, u32 data)
{
- vsp1_mod_write(&lif->entity, reg, data);
+ vsp1_dl_list_write(dl, reg, data);
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int lif_s_stream(struct v4l2_subdev *subdev, int enable)
-{
- const struct v4l2_mbus_framefmt *format;
- struct vsp1_lif *lif = to_lif(subdev);
- unsigned int hbth = 1300;
- unsigned int obth = 400;
- unsigned int lbth = 200;
-
- if (!enable) {
- vsp1_write(lif->entity.vsp1, VI6_LIF_CTRL, 0);
- return 0;
- }
-
- format = &lif->entity.formats[LIF_PAD_SOURCE];
-
- obth = min(obth, (format->width + 1) / 2 * format->height - 4);
-
- vsp1_lif_write(lif, VI6_LIF_CSBTH,
- (hbth << VI6_LIF_CSBTH_HBTH_SHIFT) |
- (lbth << VI6_LIF_CSBTH_LBTH_SHIFT));
-
- vsp1_lif_write(lif, VI6_LIF_CTRL,
- (obth << VI6_LIF_CTRL_OBTH_SHIFT) |
- (format->code == 0 ? VI6_LIF_CTRL_CFMT : 0) |
- VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
*/
static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
@@ -76,82 +45,38 @@ static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_ARGB8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- struct vsp1_lif *lif = to_lif(subdev);
- if (code->pad == LIF_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(codes))
- return -EINVAL;
-
- code->code = codes[code->index];
- } else {
- struct v4l2_mbus_framefmt *format;
-
- /* The LIF can't perform format conversion, the sink format is
- * always identical to the source format.
- */
- if (code->index)
- return -EINVAL;
-
- format = vsp1_entity_get_pad_format(&lif->entity, cfg,
- LIF_PAD_SINK, code->which);
- code->code = format->code;
- }
-
- return 0;
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
}
static int lif_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- struct vsp1_lif *lif = to_lif(subdev);
- struct v4l2_mbus_framefmt *format;
-
- format = vsp1_entity_get_pad_format(&lif->entity, cfg, LIF_PAD_SINK,
- fse->which);
-
- if (fse->index || fse->code != format->code)
- return -EINVAL;
-
- if (fse->pad == LIF_PAD_SINK) {
- fse->min_width = LIF_MIN_SIZE;
- fse->max_width = LIF_MAX_SIZE;
- fse->min_height = LIF_MIN_SIZE;
- fse->max_height = LIF_MAX_SIZE;
- } else {
- fse->min_width = format->width;
- fse->max_width = format->width;
- fse->min_height = format->height;
- fse->max_height = format->height;
- }
-
- return 0;
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LIF_MIN_SIZE,
+ LIF_MIN_SIZE, LIF_MAX_SIZE,
+ LIF_MAX_SIZE);
}
-static int lif_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vsp1_lif *lif = to_lif(subdev);
-
- fmt->format = *vsp1_entity_get_pad_format(&lif->entity, cfg, fmt->pad,
- fmt->which);
-
- return 0;
-}
-
-static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
+static int lif_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_lif *lif = to_lif(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ config = vsp1_entity_get_pad_config(&lif->entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
- format = vsp1_entity_get_pad_format(&lif->entity, cfg, fmt->pad,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&lif->entity, config, fmt->pad);
if (fmt->pad == LIF_PAD_SOURCE) {
/* The LIF source format is always identical to its sink
@@ -172,40 +97,64 @@ static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con
fmt->format = *format;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&lif->entity, cfg, LIF_PAD_SOURCE,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&lif->entity, config,
+ LIF_PAD_SOURCE);
*format = fmt->format;
return 0;
}
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops lif_video_ops = {
- .s_stream = lif_s_stream,
-};
-
static struct v4l2_subdev_pad_ops lif_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = lif_enum_mbus_code,
.enum_frame_size = lif_enum_frame_size,
- .get_fmt = lif_get_format,
+ .get_fmt = vsp1_subdev_get_pad_format,
.set_fmt = lif_set_format,
};
static struct v4l2_subdev_ops lif_ops = {
- .video = &lif_video_ops,
.pad = &lif_pad_ops,
};
/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void lif_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_lif *lif = to_lif(&entity->subdev);
+ unsigned int hbth = 1300;
+ unsigned int obth = 400;
+ unsigned int lbth = 200;
+
+ format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
+ LIF_PAD_SOURCE);
+
+ obth = min(obth, (format->width + 1) / 2 * format->height - 4);
+
+ vsp1_lif_write(lif, dl, VI6_LIF_CSBTH,
+ (hbth << VI6_LIF_CSBTH_HBTH_SHIFT) |
+ (lbth << VI6_LIF_CSBTH_LBTH_SHIFT));
+
+ vsp1_lif_write(lif, dl, VI6_LIF_CTRL,
+ (obth << VI6_LIF_CTRL_OBTH_SHIFT) |
+ (format->code == 0 ? VI6_LIF_CTRL_CFMT : 0) |
+ VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
+}
+
+static const struct vsp1_entity_operations lif_entity_ops = {
+ .configure = lif_configure,
+};
+
+/* -----------------------------------------------------------------------------
* Initialization and Cleanup
*/
struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
{
- struct v4l2_subdev *subdev;
struct vsp1_lif *lif;
int ret;
@@ -213,24 +162,12 @@ struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
if (lif == NULL)
return ERR_PTR(-ENOMEM);
+ lif->entity.ops = &lif_entity_ops;
lif->entity.type = VSP1_ENTITY_LIF;
- ret = vsp1_entity_init(vsp1, &lif->entity, 2);
+ ret = vsp1_entity_init(vsp1, &lif->entity, "lif", 2, &lif_ops);
if (ret < 0)
return ERR_PTR(ret);
- /* Initialize the V4L2 subdev. */
- subdev = &lif->entity.subdev;
- v4l2_subdev_init(subdev, &lif_ops);
-
- subdev->entity.ops = &vsp1->media_ops;
- subdev->internal_ops = &vsp1_subdev_internal_ops;
- snprintf(subdev->name, sizeof(subdev->name), "%s lif",
- dev_name(vsp1->dev));
- v4l2_set_subdevdata(subdev, lif);
- subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- vsp1_entity_init_formats(subdev, NULL);
-
return lif;
}
diff --git a/drivers/media/platform/vsp1/vsp1_lut.c b/drivers/media/platform/vsp1/vsp1_lut.c
index 4b89095e7..aa09e59f0 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.c
+++ b/drivers/media/platform/vsp1/vsp1_lut.c
@@ -18,6 +18,7 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_dl.h"
#include "vsp1_lut.h"
#define LUT_MIN_SIZE 4U
@@ -27,19 +28,35 @@
* Device Access
*/
-static inline void vsp1_lut_write(struct vsp1_lut *lut, u32 reg, u32 data)
+static inline void vsp1_lut_write(struct vsp1_lut *lut, struct vsp1_dl_list *dl,
+ u32 reg, u32 data)
{
- vsp1_write(lut->entity.vsp1, reg, data);
+ vsp1_dl_list_write(dl, reg, data);
}
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Core Operations
*/
-static void lut_configure(struct vsp1_lut *lut, struct vsp1_lut_config *config)
+static int lut_set_table(struct vsp1_lut *lut, struct vsp1_lut_config *config)
{
- memcpy_toio(lut->entity.vsp1->mmio + VI6_LUT_TABLE, config->lut,
- sizeof(config->lut));
+ struct vsp1_dl_body *dlb;
+ unsigned int i;
+
+ dlb = vsp1_dl_fragment_alloc(lut->entity.vsp1, ARRAY_SIZE(config->lut));
+ if (!dlb)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(config->lut); ++i)
+ vsp1_dl_fragment_write(dlb, VI6_LUT_TABLE + 4 * i,
+ config->lut[i]);
+
+ mutex_lock(&lut->lock);
+ swap(lut->lut, dlb);
+ mutex_unlock(&lut->lock);
+
+ vsp1_dl_fragment_free(dlb);
+ return 0;
}
static long lut_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
@@ -48,8 +65,7 @@ static long lut_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
switch (cmd) {
case VIDIOC_VSP1_LUT_CONFIG:
- lut_configure(lut, arg);
- return 0;
+ return lut_set_table(lut, arg);
default:
return -ENOIOCTLCMD;
@@ -57,22 +73,6 @@ static long lut_ioctl(struct v4l2_subdev *subdev, unsigned int cmd, void *arg)
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Video Operations
- */
-
-static int lut_s_stream(struct v4l2_subdev *subdev, int enable)
-{
- struct vsp1_lut *lut = to_lut(subdev);
-
- if (!enable)
- return 0;
-
- vsp1_lut_write(lut, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
* V4L2 Subdevice Pad Operations
*/
@@ -85,85 +85,39 @@ static int lut_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_AHSV8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- struct vsp1_lut *lut = to_lut(subdev);
- struct v4l2_mbus_framefmt *format;
-
- if (code->pad == LUT_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(codes))
- return -EINVAL;
-
- code->code = codes[code->index];
- } else {
- /* The LUT can't perform format conversion, the sink format is
- * always identical to the source format.
- */
- if (code->index)
- return -EINVAL;
-
- format = vsp1_entity_get_pad_format(&lut->entity, cfg,
- LUT_PAD_SINK, code->which);
- code->code = format->code;
- }
- return 0;
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
}
static int lut_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
- struct vsp1_lut *lut = to_lut(subdev);
- struct v4l2_mbus_framefmt *format;
-
- format = vsp1_entity_get_pad_format(&lut->entity, cfg,
- fse->pad, fse->which);
-
- if (fse->index || fse->code != format->code)
- return -EINVAL;
-
- if (fse->pad == LUT_PAD_SINK) {
- fse->min_width = LUT_MIN_SIZE;
- fse->max_width = LUT_MAX_SIZE;
- fse->min_height = LUT_MIN_SIZE;
- fse->max_height = LUT_MAX_SIZE;
- } else {
- /* The size on the source pad are fixed and always identical to
- * the size on the sink pad.
- */
- fse->min_width = format->width;
- fse->max_width = format->width;
- fse->min_height = format->height;
- fse->max_height = format->height;
- }
-
- return 0;
-}
-
-static int lut_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vsp1_lut *lut = to_lut(subdev);
-
- fmt->format = *vsp1_entity_get_pad_format(&lut->entity, cfg, fmt->pad,
- fmt->which);
-
- return 0;
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, LUT_MIN_SIZE,
+ LUT_MIN_SIZE, LUT_MAX_SIZE,
+ LUT_MAX_SIZE);
}
-static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
+static int lut_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_lut *lut = to_lut(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
+ config = vsp1_entity_get_pad_config(&lut->entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 &&
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
- format = vsp1_entity_get_pad_format(&lut->entity, cfg, fmt->pad,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&lut->entity, config, fmt->pad);
if (fmt->pad == LUT_PAD_SOURCE) {
/* The LUT output format can't be modified. */
@@ -171,6 +125,7 @@ static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con
return 0;
}
+ format->code = fmt->format.code;
format->width = clamp_t(unsigned int, fmt->format.width,
LUT_MIN_SIZE, LUT_MAX_SIZE);
format->height = clamp_t(unsigned int, fmt->format.height,
@@ -181,8 +136,8 @@ static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con
fmt->format = *format;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&lut->entity, cfg, LUT_PAD_SOURCE,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&lut->entity, config,
+ LUT_PAD_SOURCE);
*format = fmt->format;
return 0;
@@ -196,30 +151,49 @@ static struct v4l2_subdev_core_ops lut_core_ops = {
.ioctl = lut_ioctl,
};
-static struct v4l2_subdev_video_ops lut_video_ops = {
- .s_stream = lut_s_stream,
-};
-
static struct v4l2_subdev_pad_ops lut_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = lut_enum_mbus_code,
.enum_frame_size = lut_enum_frame_size,
- .get_fmt = lut_get_format,
+ .get_fmt = vsp1_subdev_get_pad_format,
.set_fmt = lut_set_format,
};
static struct v4l2_subdev_ops lut_ops = {
.core = &lut_core_ops,
- .video = &lut_video_ops,
.pad = &lut_pad_ops,
};
/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void lut_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
+{
+ struct vsp1_lut *lut = to_lut(&entity->subdev);
+
+ vsp1_lut_write(lut, dl, VI6_LUT_CTRL, VI6_LUT_CTRL_EN);
+
+ mutex_lock(&lut->lock);
+ if (lut->lut) {
+ vsp1_dl_list_add_fragment(dl, lut->lut);
+ lut->lut = NULL;
+ }
+ mutex_unlock(&lut->lock);
+}
+
+static const struct vsp1_entity_operations lut_entity_ops = {
+ .configure = lut_configure,
+};
+
+/* -----------------------------------------------------------------------------
* Initialization and Cleanup
*/
struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
{
- struct v4l2_subdev *subdev;
struct vsp1_lut *lut;
int ret;
@@ -227,24 +201,12 @@ struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1)
if (lut == NULL)
return ERR_PTR(-ENOMEM);
+ lut->entity.ops = &lut_entity_ops;
lut->entity.type = VSP1_ENTITY_LUT;
- ret = vsp1_entity_init(vsp1, &lut->entity, 2);
+ ret = vsp1_entity_init(vsp1, &lut->entity, "lut", 2, &lut_ops);
if (ret < 0)
return ERR_PTR(ret);
- /* Initialize the V4L2 subdev. */
- subdev = &lut->entity.subdev;
- v4l2_subdev_init(subdev, &lut_ops);
-
- subdev->entity.ops = &vsp1->media_ops;
- subdev->internal_ops = &vsp1_subdev_internal_ops;
- snprintf(subdev->name, sizeof(subdev->name), "%s lut",
- dev_name(vsp1->dev));
- v4l2_set_subdevdata(subdev, lut);
- subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- vsp1_entity_init_formats(subdev, NULL);
-
return lut;
}
diff --git a/drivers/media/platform/vsp1/vsp1_lut.h b/drivers/media/platform/vsp1/vsp1_lut.h
index f92ffb867..cef874f22 100644
--- a/drivers/media/platform/vsp1/vsp1_lut.h
+++ b/drivers/media/platform/vsp1/vsp1_lut.h
@@ -13,6 +13,8 @@
#ifndef __VSP1_LUT_H__
#define __VSP1_LUT_H__
+#include <linux/mutex.h>
+
#include <media/media-entity.h>
#include <media/v4l2-subdev.h>
@@ -25,7 +27,9 @@ struct vsp1_device;
struct vsp1_lut {
struct vsp1_entity entity;
- u32 lut[256];
+
+ struct mutex lock;
+ struct vsp1_dl_body *lut;
};
static inline struct vsp1_lut *to_lut(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.c b/drivers/media/platform/vsp1/vsp1_pipe.c
index 6659f06b1..4f3b4a1d0 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/vsp1/vsp1_pipe.c
@@ -43,7 +43,7 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
{ V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
- 1, { 16, 0, 0 }, false, false, 1, 1, true },
+ 1, { 16, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32,
VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS,
@@ -172,14 +172,18 @@ void vsp1_pipeline_reset(struct vsp1_pipeline *pipe)
bru->inputs[i].rpf = NULL;
}
- for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i)
+ for (i = 0; i < pipe->num_inputs; ++i) {
+ pipe->inputs[i]->pipe = NULL;
pipe->inputs[i] = NULL;
+ }
+
+ pipe->output->pipe = NULL;
+ pipe->output = NULL;
INIT_LIST_HEAD(&pipe->entities);
pipe->state = VSP1_PIPELINE_STOPPED;
pipe->buffers_ready = 0;
pipe->num_inputs = 0;
- pipe->output = NULL;
pipe->bru = NULL;
pipe->lif = NULL;
pipe->uds = NULL;
@@ -190,11 +194,13 @@ void vsp1_pipeline_init(struct vsp1_pipeline *pipe)
mutex_init(&pipe->lock);
spin_lock_init(&pipe->irqlock);
init_waitqueue_head(&pipe->wq);
+ kref_init(&pipe->kref);
INIT_LIST_HEAD(&pipe->entities);
pipe->state = VSP1_PIPELINE_STOPPED;
}
+/* Must be called with the pipe irqlock held. */
void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
{
struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
@@ -226,7 +232,7 @@ int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
unsigned long flags;
int ret;
- if (pipe->dl) {
+ if (pipe->lif) {
/* When using display lists in continuous frame mode the only
* way to stop the pipeline is to reset the hardware.
*/
@@ -253,10 +259,10 @@ int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
if (entity->route && entity->route->reg)
vsp1_write(entity->vsp1, entity->route->reg,
VI6_DPR_NODE_UNUSED);
-
- v4l2_subdev_call(&entity->subdev, video, s_stream, 0);
}
+ v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
+
return ret;
}
@@ -271,50 +277,15 @@ bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
return pipe->buffers_ready == mask;
}
-void vsp1_pipeline_display_start(struct vsp1_pipeline *pipe)
-{
- if (pipe->dl)
- vsp1_dl_irq_display_start(pipe->dl);
-}
-
void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
{
- enum vsp1_pipeline_state state;
- unsigned long flags;
-
if (pipe == NULL)
return;
- if (pipe->dl)
- vsp1_dl_irq_frame_end(pipe->dl);
+ vsp1_dlm_irq_frame_end(pipe->output->dlm);
- /* Signal frame end to the pipeline handler. */
- pipe->frame_end(pipe);
-
- spin_lock_irqsave(&pipe->irqlock, flags);
-
- state = pipe->state;
-
- /* When using display lists in continuous frame mode the pipeline is
- * automatically restarted by the hardware.
- */
- if (!pipe->dl)
- pipe->state = VSP1_PIPELINE_STOPPED;
-
- /* If a stop has been requested, mark the pipeline as stopped and
- * return.
- */
- if (state == VSP1_PIPELINE_STOPPING) {
- wake_up(&pipe->wq);
- goto done;
- }
-
- /* Restart the pipeline if ready. */
- if (vsp1_pipeline_ready(pipe))
- vsp1_pipeline_run(pipe);
-
-done:
- spin_unlock_irqrestore(&pipe->irqlock, flags);
+ if (pipe->frame_end)
+ pipe->frame_end(pipe);
}
/*
@@ -324,9 +295,13 @@ done:
* to be scaled, we disable alpha scaling when the UDS input has a fixed alpha
* value. The UDS then outputs a fixed alpha value which needs to be programmed
* from the input RPF alpha.
+ *
+ * This function can only be called from a subdev s_stream handler as it
+ * requires a valid display list context.
*/
void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
struct vsp1_entity *input,
+ struct vsp1_dl_list *dl,
unsigned int alpha)
{
struct vsp1_entity *entity;
@@ -349,7 +324,7 @@ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
if (entity->type == VSP1_ENTITY_UDS) {
struct vsp1_uds *uds = to_uds(&entity->subdev);
- vsp1_uds_set_alpha(uds, alpha);
+ vsp1_uds_set_alpha(uds, dl, alpha);
break;
}
@@ -375,7 +350,7 @@ void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
if (wpf == NULL)
continue;
- pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ pipe = wpf->pipe;
if (pipe == NULL)
continue;
@@ -392,7 +367,7 @@ void vsp1_pipelines_suspend(struct vsp1_device *vsp1)
if (wpf == NULL)
continue;
- pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ pipe = wpf->pipe;
if (pipe == NULL)
continue;
@@ -416,7 +391,7 @@ void vsp1_pipelines_resume(struct vsp1_device *vsp1)
if (wpf == NULL)
continue;
- pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ pipe = wpf->pipe;
if (pipe == NULL)
continue;
diff --git a/drivers/media/platform/vsp1/vsp1_pipe.h b/drivers/media/platform/vsp1/vsp1_pipe.h
index b2f3a8a89..7b5611351 100644
--- a/drivers/media/platform/vsp1/vsp1_pipe.h
+++ b/drivers/media/platform/vsp1/vsp1_pipe.h
@@ -13,13 +13,14 @@
#ifndef __VSP1_PIPE_H__
#define __VSP1_PIPE_H__
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <media/media-entity.h>
-struct vsp1_dl;
+struct vsp1_dl_list;
struct vsp1_rwpf;
/*
@@ -63,7 +64,7 @@ enum vsp1_pipeline_state {
* @wq: work queue to wait for state change completion
* @frame_end: frame end interrupt handler
* @lock: protects the pipeline use count and stream count
- * @use_count: number of video nodes using the pipeline
+ * @kref: pipeline reference count
* @stream_count: number of streaming video nodes
* @buffers_ready: bitmask of RPFs and WPFs with at least one buffer available
* @num_inputs: number of RPFs
@@ -86,7 +87,7 @@ struct vsp1_pipeline {
void (*frame_end)(struct vsp1_pipeline *pipe);
struct mutex lock;
- unsigned int use_count;
+ struct kref kref;
unsigned int stream_count;
unsigned int buffers_ready;
@@ -100,17 +101,9 @@ struct vsp1_pipeline {
struct list_head entities;
- struct vsp1_dl *dl;
+ struct vsp1_dl_list *dl;
};
-static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
-{
- if (likely(e->pipe))
- return container_of(e->pipe, struct vsp1_pipeline, pipe);
- else
- return NULL;
-}
-
void vsp1_pipeline_reset(struct vsp1_pipeline *pipe);
void vsp1_pipeline_init(struct vsp1_pipeline *pipe);
@@ -119,11 +112,11 @@ bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe);
int vsp1_pipeline_stop(struct vsp1_pipeline *pipe);
bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe);
-void vsp1_pipeline_display_start(struct vsp1_pipeline *pipe);
void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe);
void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe,
struct vsp1_entity *input,
+ struct vsp1_dl_list *dl,
unsigned int alpha);
void vsp1_pipelines_suspend(struct vsp1_device *vsp1);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 069216f0e..927b5fb94 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -217,6 +217,16 @@
#define VI6_RPF_SRCM_ADDR_C1 0x0344
#define VI6_RPF_SRCM_ADDR_AI 0x0348
+#define VI6_RPF_MULT_ALPHA 0x036c
+#define VI6_RPF_MULT_ALPHA_A_MMD_NONE (0 << 12)
+#define VI6_RPF_MULT_ALPHA_A_MMD_RATIO (1 << 12)
+#define VI6_RPF_MULT_ALPHA_P_MMD_NONE (0 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_RATIO (1 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_IMAGE (2 << 8)
+#define VI6_RPF_MULT_ALPHA_P_MMD_BOTH (3 << 8)
+#define VI6_RPF_MULT_ALPHA_RATIO_MASK (0xff < 0)
+#define VI6_RPF_MULT_ALPHA_RATIO_SHIFT 0
+
/* -----------------------------------------------------------------------------
* WPF Control Registers
*/
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index 5bc1d1574..49168db3f 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -16,6 +16,8 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_video.h"
@@ -26,64 +28,50 @@
* Device Access
*/
-static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf, u32 reg, u32 data)
+static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
+ struct vsp1_dl_list *dl, u32 reg, u32 data)
{
- vsp1_mod_write(&rpf->entity, reg + rpf->entity.index * VI6_RPF_OFFSET,
- data);
+ vsp1_dl_list_write(dl, reg + rpf->entity.index * VI6_RPF_OFFSET, data);
}
/* -----------------------------------------------------------------------------
- * Controls
+ * V4L2 Subdevice Operations
*/
-static int rpf_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct vsp1_rwpf *rpf =
- container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
- struct vsp1_pipeline *pipe;
-
- if (!vsp1_entity_is_streaming(&rpf->entity))
- return 0;
-
- switch (ctrl->id) {
- case V4L2_CID_ALPHA_COMPONENT:
- vsp1_rpf_write(rpf, VI6_RPF_VRTCOL_SET,
- ctrl->val << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
-
- pipe = to_vsp1_pipeline(&rpf->entity.subdev.entity);
- vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, ctrl->val);
- break;
- }
-
- return 0;
-}
-
-static const struct v4l2_ctrl_ops rpf_ctrl_ops = {
- .s_ctrl = rpf_s_ctrl,
+static struct v4l2_subdev_ops rpf_ops = {
+ .pad = &vsp1_rwpf_pad_ops,
};
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
+ * VSP1 Entity Operations
*/
-static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
+static void rpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
+{
+ struct vsp1_rwpf *rpf = entity_to_rwpf(entity);
+
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_Y,
+ rpf->mem.addr[0] + rpf->offsets[0]);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C0,
+ rpf->mem.addr[1] + rpf->offsets[1]);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_ADDR_C1,
+ rpf->mem.addr[2] + rpf->offsets[1]);
+}
+
+static void rpf_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
{
- struct vsp1_pipeline *pipe = to_vsp1_pipeline(&subdev->entity);
- struct vsp1_rwpf *rpf = to_rwpf(subdev);
- struct vsp1_device *vsp1 = rpf->entity.vsp1;
+ struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev);
const struct vsp1_format_info *fmtinfo = rpf->fmtinfo;
const struct v4l2_pix_format_mplane *format = &rpf->format;
- const struct v4l2_rect *crop = &rpf->crop;
+ const struct v4l2_mbus_framefmt *source_format;
+ const struct v4l2_mbus_framefmt *sink_format;
+ const struct v4l2_rect *crop;
+ unsigned int left = 0;
+ unsigned int top = 0;
u32 pstride;
u32 infmt;
- int ret;
-
- ret = vsp1_entity_set_streaming(&rpf->entity, enable);
- if (ret < 0)
- return ret;
-
- if (!enable)
- return 0;
/* Source size, stride and crop offsets.
*
@@ -91,10 +79,12 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
* left corner in the plane buffer. Only two offsets are needed, as
* planes 2 and 3 always have identical strides.
*/
- vsp1_rpf_write(rpf, VI6_RPF_SRC_BSIZE,
+ crop = vsp1_rwpf_get_crop(rpf, rpf->entity.config);
+
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_BSIZE,
(crop->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
(crop->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
- vsp1_rpf_write(rpf, VI6_RPF_SRC_ESIZE,
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRC_ESIZE,
(crop->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
(crop->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
@@ -103,26 +93,25 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
pstride = format->plane_fmt[0].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
- rpf->buf_addr[0] + rpf->offsets[0]);
-
if (format->num_planes > 1) {
rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
+ crop->left * fmtinfo->bpp[1] / 8;
pstride |= format->plane_fmt[1].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
-
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
- rpf->buf_addr[1] + rpf->offsets[1]);
-
- if (format->num_planes > 2)
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
- rpf->buf_addr[2] + rpf->offsets[1]);
+ } else {
+ rpf->offsets[1] = 0;
}
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_SRCM_PSTRIDE, pstride);
/* Format */
+ sink_format = vsp1_entity_get_pad_format(&rpf->entity,
+ rpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&rpf->entity,
+ rpf->entity.config,
+ RWPF_PAD_SOURCE);
+
infmt = VI6_RPF_INFMT_CIPM
| (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT);
@@ -131,88 +120,98 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
if (fmtinfo->swap_uv)
infmt |= VI6_RPF_INFMT_SPUVS;
- if (rpf->entity.formats[RWPF_PAD_SINK].code !=
- rpf->entity.formats[RWPF_PAD_SOURCE].code)
+ if (sink_format->code != source_format->code)
infmt |= VI6_RPF_INFMT_CSC;
- vsp1_rpf_write(rpf, VI6_RPF_INFMT, infmt);
- vsp1_rpf_write(rpf, VI6_RPF_DSWAP, fmtinfo->swap);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_INFMT, infmt);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_DSWAP, fmtinfo->swap);
/* Output location */
- vsp1_rpf_write(rpf, VI6_RPF_LOC,
- (rpf->location.left << VI6_RPF_LOC_HCOORD_SHIFT) |
- (rpf->location.top << VI6_RPF_LOC_VCOORD_SHIFT));
+ if (pipe->bru) {
+ const struct v4l2_rect *compose;
+
+ compose = vsp1_entity_get_pad_compose(pipe->bru,
+ pipe->bru->config,
+ rpf->bru_input);
+ left = compose->left;
+ top = compose->top;
+ }
- /* Use the alpha channel (extended to 8 bits) when available or an
- * alpha value set through the V4L2_CID_ALPHA_COMPONENT control
- * otherwise. Disable color keying.
+ vsp1_rpf_write(rpf, dl, VI6_RPF_LOC,
+ (left << VI6_RPF_LOC_HCOORD_SHIFT) |
+ (top << VI6_RPF_LOC_VCOORD_SHIFT));
+
+ /* On Gen2 use the alpha channel (extended to 8 bits) when available or
+ * a fixed alpha value set through the V4L2_CID_ALPHA_COMPONENT control
+ * otherwise.
+ *
+ * The Gen3 RPF has extended alpha capability and can both multiply the
+ * alpha channel by a fixed global alpha value, and multiply the pixel
+ * components to convert the input to premultiplied alpha.
+ *
+ * As alpha premultiplication is available in the BRU for both Gen2 and
+ * Gen3 we handle it there and use the Gen3 alpha multiplier for global
+ * alpha multiplication only. This however prevents conversion to
+ * premultiplied alpha if no BRU is present in the pipeline. If that use
+ * case turns out to be useful we will revisit the implementation (for
+ * Gen3 only).
+ *
+ * We enable alpha multiplication on Gen3 using the fixed alpha value
+ * set through the V4L2_CID_ALPHA_COMPONENT control when the input
+ * contains an alpha channel. On Gen2 the global alpha is ignored in
+ * that case.
+ *
+ * In all cases, disable color keying.
*/
- vsp1_rpf_write(rpf, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT |
+ vsp1_rpf_write(rpf, dl, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT |
(fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED
: VI6_RPF_ALPH_SEL_ASEL_FIXED));
- if (vsp1->info->uapi)
- mutex_lock(rpf->ctrls.lock);
- vsp1_rpf_write(rpf, VI6_RPF_VRTCOL_SET,
- rpf->alpha->cur.val << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
- vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, rpf->alpha->cur.val);
- if (vsp1->info->uapi)
- mutex_unlock(rpf->ctrls.lock);
-
- vsp1_rpf_write(rpf, VI6_RPF_MSK_CTRL, 0);
- vsp1_rpf_write(rpf, VI6_RPF_CKEY_CTRL, 0);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops rpf_video_ops = {
- .s_stream = rpf_s_stream,
-};
-
-static struct v4l2_subdev_pad_ops rpf_pad_ops = {
- .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
- .enum_frame_size = vsp1_rwpf_enum_frame_size,
- .get_fmt = vsp1_rwpf_get_format,
- .set_fmt = vsp1_rwpf_set_format,
- .get_selection = vsp1_rwpf_get_selection,
- .set_selection = vsp1_rwpf_set_selection,
-};
+ vsp1_rpf_write(rpf, dl, VI6_RPF_VRTCOL_SET,
+ rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
+
+ if (entity->vsp1->info->gen == 3) {
+ u32 mult;
+
+ if (fmtinfo->alpha) {
+ /* When the input contains an alpha channel enable the
+ * alpha multiplier. If the input is premultiplied we
+ * need to multiply both the alpha channel and the pixel
+ * components by the global alpha value to keep them
+ * premultiplied. Otherwise multiply the alpha channel
+ * only.
+ */
+ bool premultiplied = format->flags
+ & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA;
+
+ mult = VI6_RPF_MULT_ALPHA_A_MMD_RATIO
+ | (premultiplied ?
+ VI6_RPF_MULT_ALPHA_P_MMD_RATIO :
+ VI6_RPF_MULT_ALPHA_P_MMD_NONE)
+ | (rpf->alpha << VI6_RPF_MULT_ALPHA_RATIO_SHIFT);
+ } else {
+ /* When the input doesn't contain an alpha channel the
+ * global alpha value is applied in the unpacking unit,
+ * the alpha multiplier isn't needed and must be
+ * disabled.
+ */
+ mult = VI6_RPF_MULT_ALPHA_A_MMD_NONE
+ | VI6_RPF_MULT_ALPHA_P_MMD_NONE;
+ }
+
+ vsp1_rpf_write(rpf, dl, VI6_RPF_MULT_ALPHA, mult);
+ }
-static struct v4l2_subdev_ops rpf_ops = {
- .video = &rpf_video_ops,
- .pad = &rpf_pad_ops,
-};
+ vsp1_pipeline_propagate_alpha(pipe, &rpf->entity, dl, rpf->alpha);
-/* -----------------------------------------------------------------------------
- * Video Device Operations
- */
+ vsp1_rpf_write(rpf, dl, VI6_RPF_MSK_CTRL, 0);
+ vsp1_rpf_write(rpf, dl, VI6_RPF_CKEY_CTRL, 0);
-static void rpf_set_memory(struct vsp1_rwpf *rpf, struct vsp1_rwpf_memory *mem)
-{
- unsigned int i;
-
- for (i = 0; i < 3; ++i)
- rpf->buf_addr[i] = mem->addr[i];
-
- if (!vsp1_entity_is_streaming(&rpf->entity))
- return;
-
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
- mem->addr[0] + rpf->offsets[0]);
- if (mem->num_planes > 1)
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
- mem->addr[1] + rpf->offsets[1]);
- if (mem->num_planes > 2)
- vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
- mem->addr[2] + rpf->offsets[1]);
}
-static const struct vsp1_rwpf_operations rpf_vdev_ops = {
+static const struct vsp1_entity_operations rpf_entity_ops = {
.set_memory = rpf_set_memory,
+ .configure = rpf_configure,
};
/* -----------------------------------------------------------------------------
@@ -221,51 +220,31 @@ static const struct vsp1_rwpf_operations rpf_vdev_ops = {
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
{
- struct v4l2_subdev *subdev;
struct vsp1_rwpf *rpf;
+ char name[6];
int ret;
rpf = devm_kzalloc(vsp1->dev, sizeof(*rpf), GFP_KERNEL);
if (rpf == NULL)
return ERR_PTR(-ENOMEM);
- rpf->ops = &rpf_vdev_ops;
-
rpf->max_width = RPF_MAX_WIDTH;
rpf->max_height = RPF_MAX_HEIGHT;
+ rpf->entity.ops = &rpf_entity_ops;
rpf->entity.type = VSP1_ENTITY_RPF;
rpf->entity.index = index;
- ret = vsp1_entity_init(vsp1, &rpf->entity, 2);
+ sprintf(name, "rpf.%u", index);
+ ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops);
if (ret < 0)
return ERR_PTR(ret);
- /* Initialize the V4L2 subdev. */
- subdev = &rpf->entity.subdev;
- v4l2_subdev_init(subdev, &rpf_ops);
-
- subdev->entity.ops = &vsp1->media_ops;
- subdev->internal_ops = &vsp1_subdev_internal_ops;
- snprintf(subdev->name, sizeof(subdev->name), "%s rpf.%u",
- dev_name(vsp1->dev), index);
- v4l2_set_subdevdata(subdev, rpf);
- subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- vsp1_entity_init_formats(subdev, NULL);
-
/* Initialize the control handler. */
- v4l2_ctrl_handler_init(&rpf->ctrls, 1);
- rpf->alpha = v4l2_ctrl_new_std(&rpf->ctrls, &rpf_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT,
- 0, 255, 1, 255);
-
- rpf->entity.subdev.ctrl_handler = &rpf->ctrls;
-
- if (rpf->ctrls.error) {
+ ret = vsp1_rwpf_init_ctrls(rpf);
+ if (ret < 0) {
dev_err(vsp1->dev, "rpf%u: failed to initialize controls\n",
index);
- ret = rpf->ctrls.error;
goto error;
}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
index 9688c219b..3b6e032e7 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -20,13 +20,20 @@
#define RWPF_MIN_WIDTH 1
#define RWPF_MIN_HEIGHT 1
+struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+ struct v4l2_subdev_pad_config *config)
+{
+ return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, config,
+ RWPF_PAD_SINK);
+}
+
/* -----------------------------------------------------------------------------
* V4L2 Subdevice Pad Operations
*/
-int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
+static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
{
static const unsigned int codes[] = {
MEDIA_BUS_FMT_ARGB8888_1X32,
@@ -41,75 +48,36 @@ int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
return 0;
}
-int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- struct v4l2_mbus_framefmt *format;
-
- format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, fse->pad,
- fse->which);
-
- if (fse->index || fse->code != format->code)
- return -EINVAL;
-
- if (fse->pad == RWPF_PAD_SINK) {
- fse->min_width = RWPF_MIN_WIDTH;
- fse->max_width = rwpf->max_width;
- fse->min_height = RWPF_MIN_HEIGHT;
- fse->max_height = rwpf->max_height;
- } else {
- /* The size on the source pad are fixed and always identical to
- * the size on the sink pad.
- */
- fse->min_width = format->width;
- fse->max_width = format->width;
- fse->min_height = format->height;
- fse->max_height = format->height;
- }
-
- return 0;
-}
-
-static struct v4l2_rect *
-vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_pad_config *cfg, u32 which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, cfg, RWPF_PAD_SINK);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &rwpf->crop;
- default:
- return NULL;
- }
-}
-
-int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
- fmt->format = *vsp1_entity_get_pad_format(&rwpf->entity, cfg, fmt->pad,
- fmt->which);
-
- return 0;
+ return vsp1_subdev_enum_frame_size(subdev, cfg, fse, RWPF_MIN_WIDTH,
+ RWPF_MIN_HEIGHT, rwpf->max_width,
+ rwpf->max_height);
}
-int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
+ config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
/* Default to YUV if the requested format is not supported. */
if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 &&
fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32)
fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32;
- format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, fmt->pad,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config, fmt->pad);
if (fmt->pad == RWPF_PAD_SOURCE) {
/* The RWPF performs format conversion but can't scale, only the
@@ -131,39 +99,44 @@ int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_conf
fmt->format = *format;
/* Update the sink crop rectangle. */
- crop = vsp1_rwpf_get_crop(rwpf, cfg, fmt->which);
+ crop = vsp1_rwpf_get_crop(rwpf, config);
crop->left = 0;
crop->top = 0;
crop->width = fmt->format.width;
crop->height = fmt->format.height;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SOURCE,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SOURCE);
*format = fmt->format;
return 0;
}
-int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
+static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
/* Cropping is implemented on the sink pad. */
if (sel->pad != RWPF_PAD_SINK)
return -EINVAL;
+ config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+ if (!config)
+ return -EINVAL;
+
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *vsp1_rwpf_get_crop(rwpf, cfg, sel->which);
+ sel->r = *vsp1_rwpf_get_crop(rwpf, config);
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
- format = vsp1_entity_get_pad_format(&rwpf->entity, cfg,
- RWPF_PAD_SINK, sel->which);
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SINK);
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = format->width;
@@ -177,11 +150,12 @@ int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
return 0;
}
-int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel)
+static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
{
struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
@@ -192,11 +166,15 @@ int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
+ config = vsp1_entity_get_pad_config(&rwpf->entity, cfg, sel->which);
+ if (!config)
+ return -EINVAL;
+
/* Make sure the crop rectangle is entirely contained in the image. The
* WPF top and left offsets are limited to 255.
*/
- format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SINK,
- sel->which);
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SINK);
/* Restrict the crop rectangle coordinates to multiples of 2 to avoid
* shifting the color plane.
@@ -219,14 +197,59 @@ int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
sel->r.height = min_t(unsigned int, sel->r.height,
format->height - sel->r.top);
- crop = vsp1_rwpf_get_crop(rwpf, cfg, sel->which);
+ crop = vsp1_rwpf_get_crop(rwpf, config);
*crop = sel->r;
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&rwpf->entity, cfg, RWPF_PAD_SOURCE,
- sel->which);
+ format = vsp1_entity_get_pad_format(&rwpf->entity, config,
+ RWPF_PAD_SOURCE);
format->width = crop->width;
format->height = crop->height;
return 0;
}
+
+const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
+ .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
+ .enum_frame_size = vsp1_rwpf_enum_frame_size,
+ .get_fmt = vsp1_subdev_get_pad_format,
+ .set_fmt = vsp1_rwpf_set_format,
+ .get_selection = vsp1_rwpf_get_selection,
+ .set_selection = vsp1_rwpf_set_selection,
+};
+
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
+static int vsp1_rwpf_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct vsp1_rwpf *rwpf =
+ container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
+
+ switch (ctrl->id) {
+ case V4L2_CID_ALPHA_COMPONENT:
+ rwpf->alpha = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops vsp1_rwpf_ctrl_ops = {
+ .s_ctrl = vsp1_rwpf_s_ctrl,
+};
+
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf)
+{
+ rwpf->alpha = 255;
+
+ v4l2_ctrl_handler_init(&rwpf->ctrls, 1);
+ v4l2_ctrl_new_std(&rwpf->ctrls, &vsp1_rwpf_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ rwpf->entity.subdev.ctrl_handler = &rwpf->ctrls;
+
+ return rwpf->ctrls.error;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 8e8235682..9ff7c78f2 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -24,42 +24,35 @@
#define RWPF_PAD_SOURCE 1
struct v4l2_ctrl;
+struct vsp1_dl_manager;
+struct vsp1_pipeline;
struct vsp1_rwpf;
struct vsp1_video;
struct vsp1_rwpf_memory {
- unsigned int num_planes;
dma_addr_t addr[3];
- unsigned int length[3];
-};
-
-struct vsp1_rwpf_operations {
- void (*set_memory)(struct vsp1_rwpf *rwpf,
- struct vsp1_rwpf_memory *mem);
};
struct vsp1_rwpf {
struct vsp1_entity entity;
struct v4l2_ctrl_handler ctrls;
- struct v4l2_ctrl *alpha;
+ struct vsp1_pipeline *pipe;
struct vsp1_video *video;
- const struct vsp1_rwpf_operations *ops;
-
unsigned int max_width;
unsigned int max_height;
struct v4l2_pix_format_mplane format;
const struct vsp1_format_info *fmtinfo;
- struct {
- unsigned int left;
- unsigned int top;
- } location;
- struct v4l2_rect crop;
+ unsigned int bru_input;
+
+ unsigned int alpha;
unsigned int offsets[2];
- dma_addr_t buf_addr[3];
+ struct vsp1_rwpf_memory mem;
+
+ struct vsp1_dl_manager *dlm;
};
static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
@@ -67,24 +60,31 @@ static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
return container_of(subdev, struct vsp1_rwpf, entity.subdev);
}
+static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
+{
+ return container_of(entity, struct vsp1_rwpf, entity);
+}
+
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
-int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code);
-int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse);
-int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt);
-int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt);
-int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel);
-int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_selection *sel);
+int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf);
+
+extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
+
+struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
+ struct v4l2_subdev_pad_config *config);
+/**
+ * vsp1_rwpf_set_memory - Configure DMA addresses for a [RW]PF
+ * @rwpf: the [RW]PF instance
+ * @dl: the display list
+ *
+ * This function applies the cached memory buffer address to the display list.
+ */
+static inline void vsp1_rwpf_set_memory(struct vsp1_rwpf *rwpf,
+ struct vsp1_dl_list *dl)
+{
+ rwpf->entity.ops->set_memory(&rwpf->entity, dl);
+}
#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
index cc09efbfb..97ef997ae 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.c
+++ b/drivers/media/platform/vsp1/vsp1_sru.c
@@ -17,6 +17,7 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_dl.h"
#include "vsp1_sru.h"
#define SRU_MIN_SIZE 4U
@@ -26,14 +27,10 @@
* Device Access
*/
-static inline u32 vsp1_sru_read(struct vsp1_sru *sru, u32 reg)
+static inline void vsp1_sru_write(struct vsp1_sru *sru, struct vsp1_dl_list *dl,
+ u32 reg, u32 data)
{
- return vsp1_read(sru->entity.vsp1, reg);
-}
-
-static inline void vsp1_sru_write(struct vsp1_sru *sru, u32 reg, u32 data)
-{
- vsp1_write(sru->entity.vsp1, reg, data);
+ vsp1_dl_list_write(dl, reg, data);
}
/* -----------------------------------------------------------------------------
@@ -82,20 +79,10 @@ static int sru_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vsp1_sru *sru =
container_of(ctrl->handler, struct vsp1_sru, ctrls);
- const struct vsp1_sru_param *param;
- u32 value;
switch (ctrl->id) {
case V4L2_CID_VSP1_SRU_INTENSITY:
- param = &vsp1_sru_params[ctrl->val - 1];
-
- value = vsp1_sru_read(sru, VI6_SRU_CTRL0);
- value &= ~(VI6_SRU_CTRL0_PARAM0_MASK |
- VI6_SRU_CTRL0_PARAM1_MASK);
- value |= param->ctrl0;
- vsp1_sru_write(sru, VI6_SRU_CTRL0, value);
-
- vsp1_sru_write(sru, VI6_SRU_CTRL2, param->ctrl2);
+ sru->intensity = ctrl->val;
break;
}
@@ -118,54 +105,7 @@ static const struct v4l2_ctrl_config sru_intensity_control = {
};
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
-{
- struct vsp1_sru *sru = to_sru(subdev);
- struct v4l2_mbus_framefmt *input;
- struct v4l2_mbus_framefmt *output;
- u32 ctrl0;
- int ret;
-
- ret = vsp1_entity_set_streaming(&sru->entity, enable);
- if (ret < 0)
- return ret;
-
- if (!enable)
- return 0;
-
- input = &sru->entity.formats[SRU_PAD_SINK];
- output = &sru->entity.formats[SRU_PAD_SOURCE];
-
- if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
- ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
- | VI6_SRU_CTRL0_PARAM4;
- else
- ctrl0 = VI6_SRU_CTRL0_PARAM3;
-
- if (input->width != output->width)
- ctrl0 |= VI6_SRU_CTRL0_MODE_UPSCALE;
-
- /* Take the control handler lock to ensure that the CTRL0 value won't be
- * changed behind our back by a set control operation.
- */
- if (sru->entity.vsp1->info->uapi)
- mutex_lock(sru->ctrls.lock);
- ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
- & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
- vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
- if (sru->entity.vsp1->info->uapi)
- mutex_unlock(sru->ctrls.lock);
-
- vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
*/
static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
@@ -176,27 +116,9 @@ static int sru_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_ARGB8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- struct vsp1_sru *sru = to_sru(subdev);
- struct v4l2_mbus_framefmt *format;
-
- if (code->pad == SRU_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(codes))
- return -EINVAL;
-
- code->code = codes[code->index];
- } else {
- /* The SRU can't perform format conversion, the sink format is
- * always identical to the source format.
- */
- if (code->index)
- return -EINVAL;
- format = vsp1_entity_get_pad_format(&sru->entity, cfg,
- SRU_PAD_SINK, code->which);
- code->code = format->code;
- }
-
- return 0;
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
}
static int sru_enum_frame_size(struct v4l2_subdev *subdev,
@@ -204,10 +126,14 @@ static int sru_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_sru *sru = to_sru(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
- format = vsp1_entity_get_pad_format(&sru->entity, cfg,
- SRU_PAD_SINK, fse->which);
+ config = vsp1_entity_get_pad_config(&sru->entity, cfg, fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SINK);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -233,20 +159,9 @@ static int sru_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
-static int sru_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vsp1_sru *sru = to_sru(subdev);
-
- fmt->format = *vsp1_entity_get_pad_format(&sru->entity, cfg, fmt->pad,
- fmt->which);
-
- return 0;
-}
-
-static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
+static void sru_try_format(struct vsp1_sru *sru,
+ struct v4l2_subdev_pad_config *config,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
unsigned int input_area;
@@ -265,8 +180,8 @@ static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_pad_config *
case SRU_PAD_SOURCE:
/* The SRU can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&sru->entity, cfg,
- SRU_PAD_SINK, which);
+ format = vsp1_entity_get_pad_format(&sru->entity, config,
+ SRU_PAD_SINK);
fmt->code = format->code;
/* We can upscale by 2 in both direction, but not independently.
@@ -295,57 +210,94 @@ static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_pad_config *
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
-static int sru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
+static int sru_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_sru *sru = to_sru(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
- sru_try_format(sru, cfg, fmt->pad, &fmt->format, fmt->which);
+ config = vsp1_entity_get_pad_config(&sru->entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
+ sru_try_format(sru, config, fmt->pad, &fmt->format);
- format = vsp1_entity_get_pad_format(&sru->entity, cfg, fmt->pad,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&sru->entity, config, fmt->pad);
*format = fmt->format;
if (fmt->pad == SRU_PAD_SINK) {
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&sru->entity, cfg,
- SRU_PAD_SOURCE, fmt->which);
+ format = vsp1_entity_get_pad_format(&sru->entity, config,
+ SRU_PAD_SOURCE);
*format = fmt->format;
- sru_try_format(sru, cfg, SRU_PAD_SOURCE, format, fmt->which);
+ sru_try_format(sru, config, SRU_PAD_SOURCE, format);
}
return 0;
}
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static struct v4l2_subdev_video_ops sru_video_ops = {
- .s_stream = sru_s_stream,
-};
-
static struct v4l2_subdev_pad_ops sru_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = sru_enum_mbus_code,
.enum_frame_size = sru_enum_frame_size,
- .get_fmt = sru_get_format,
+ .get_fmt = vsp1_subdev_get_pad_format,
.set_fmt = sru_set_format,
};
static struct v4l2_subdev_ops sru_ops = {
- .video = &sru_video_ops,
.pad = &sru_pad_ops,
};
/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void sru_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
+{
+ const struct vsp1_sru_param *param;
+ struct vsp1_sru *sru = to_sru(&entity->subdev);
+ struct v4l2_mbus_framefmt *input;
+ struct v4l2_mbus_framefmt *output;
+ u32 ctrl0;
+
+ input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config,
+ SRU_PAD_SOURCE);
+
+ if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32)
+ ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3
+ | VI6_SRU_CTRL0_PARAM4;
+ else
+ ctrl0 = VI6_SRU_CTRL0_PARAM3;
+
+ if (input->width != output->width)
+ ctrl0 |= VI6_SRU_CTRL0_MODE_UPSCALE;
+
+ param = &vsp1_sru_params[sru->intensity - 1];
+
+ ctrl0 |= param->ctrl0;
+
+ vsp1_sru_write(sru, dl, VI6_SRU_CTRL0, ctrl0);
+ vsp1_sru_write(sru, dl, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+ vsp1_sru_write(sru, dl, VI6_SRU_CTRL2, param->ctrl2);
+}
+
+static const struct vsp1_entity_operations sru_entity_ops = {
+ .configure = sru_configure,
+};
+
+/* -----------------------------------------------------------------------------
* Initialization and Cleanup
*/
struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
{
- struct v4l2_subdev *subdev;
struct vsp1_sru *sru;
int ret;
@@ -353,29 +305,19 @@ struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1)
if (sru == NULL)
return ERR_PTR(-ENOMEM);
+ sru->entity.ops = &sru_entity_ops;
sru->entity.type = VSP1_ENTITY_SRU;
- ret = vsp1_entity_init(vsp1, &sru->entity, 2);
+ ret = vsp1_entity_init(vsp1, &sru->entity, "sru", 2, &sru_ops);
if (ret < 0)
return ERR_PTR(ret);
- /* Initialize the V4L2 subdev. */
- subdev = &sru->entity.subdev;
- v4l2_subdev_init(subdev, &sru_ops);
-
- subdev->entity.ops = &vsp1->media_ops;
- subdev->internal_ops = &vsp1_subdev_internal_ops;
- snprintf(subdev->name, sizeof(subdev->name), "%s sru",
- dev_name(vsp1->dev));
- v4l2_set_subdevdata(subdev, sru);
- subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- vsp1_entity_init_formats(subdev, NULL);
-
/* Initialize the control handler. */
v4l2_ctrl_handler_init(&sru->ctrls, 1);
v4l2_ctrl_new_custom(&sru->ctrls, &sru_intensity_control, NULL);
+ sru->intensity = 1;
+
sru->entity.subdev.ctrl_handler = &sru->ctrls;
if (sru->ctrls.error) {
diff --git a/drivers/media/platform/vsp1/vsp1_sru.h b/drivers/media/platform/vsp1/vsp1_sru.h
index b6768bf3d..85e241457 100644
--- a/drivers/media/platform/vsp1/vsp1_sru.h
+++ b/drivers/media/platform/vsp1/vsp1_sru.h
@@ -28,6 +28,8 @@ struct vsp1_sru {
struct vsp1_entity entity;
struct v4l2_ctrl_handler ctrls;
+
+ unsigned int intensity;
};
static inline struct vsp1_sru *to_sru(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
index bba67770c..1875e29da 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.c
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -17,6 +17,7 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_dl.h"
#include "vsp1_uds.h"
#define UDS_MIN_SIZE 4U
@@ -29,19 +30,21 @@
* Device Access
*/
-static inline void vsp1_uds_write(struct vsp1_uds *uds, u32 reg, u32 data)
+static inline void vsp1_uds_write(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
+ u32 reg, u32 data)
{
- vsp1_write(uds->entity.vsp1,
- reg + uds->entity.index * VI6_UDS_OFFSET, data);
+ vsp1_dl_list_write(dl, reg + uds->entity.index * VI6_UDS_OFFSET, data);
}
/* -----------------------------------------------------------------------------
* Scaling Computation
*/
-void vsp1_uds_set_alpha(struct vsp1_uds *uds, unsigned int alpha)
+void vsp1_uds_set_alpha(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
+ unsigned int alpha)
{
- vsp1_uds_write(uds, VI6_UDS_ALPVAL, alpha << VI6_UDS_ALPVAL_VAL0_SHIFT);
+ vsp1_uds_write(uds, dl, VI6_UDS_ALPVAL,
+ alpha << VI6_UDS_ALPVAL_VAL0_SHIFT);
}
/*
@@ -105,60 +108,6 @@ static unsigned int uds_compute_ratio(unsigned int input, unsigned int output)
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
- */
-
-static int uds_s_stream(struct v4l2_subdev *subdev, int enable)
-{
- struct vsp1_uds *uds = to_uds(subdev);
- const struct v4l2_mbus_framefmt *output;
- const struct v4l2_mbus_framefmt *input;
- unsigned int hscale;
- unsigned int vscale;
- bool multitap;
-
- if (!enable)
- return 0;
-
- input = &uds->entity.formats[UDS_PAD_SINK];
- output = &uds->entity.formats[UDS_PAD_SOURCE];
-
- hscale = uds_compute_ratio(input->width, output->width);
- vscale = uds_compute_ratio(input->height, output->height);
-
- dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n", hscale, vscale);
-
- /* Multi-tap scaling can't be enabled along with alpha scaling when
- * scaling down with a factor lower than or equal to 1/2 in either
- * direction.
- */
- if (uds->scale_alpha && (hscale >= 8192 || vscale >= 8192))
- multitap = false;
- else
- multitap = true;
-
- vsp1_uds_write(uds, VI6_UDS_CTRL,
- (uds->scale_alpha ? VI6_UDS_CTRL_AON : 0) |
- (multitap ? VI6_UDS_CTRL_BC : 0));
-
- vsp1_uds_write(uds, VI6_UDS_PASS_BWIDTH,
- (uds_passband_width(hscale)
- << VI6_UDS_PASS_BWIDTH_H_SHIFT) |
- (uds_passband_width(vscale)
- << VI6_UDS_PASS_BWIDTH_V_SHIFT));
-
- /* Set the scaling ratios and the output size. */
- vsp1_uds_write(uds, VI6_UDS_SCALE,
- (hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
- (vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
- vsp1_uds_write(uds, VI6_UDS_CLIP_SIZE,
- (output->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
- (output->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
* V4L2 Subdevice Pad Operations
*/
@@ -170,28 +119,9 @@ static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
MEDIA_BUS_FMT_ARGB8888_1X32,
MEDIA_BUS_FMT_AYUV8_1X32,
};
- struct vsp1_uds *uds = to_uds(subdev);
-
- if (code->pad == UDS_PAD_SINK) {
- if (code->index >= ARRAY_SIZE(codes))
- return -EINVAL;
-
- code->code = codes[code->index];
- } else {
- struct v4l2_mbus_framefmt *format;
-
- /* The UDS can't perform format conversion, the sink format is
- * always identical to the source format.
- */
- if (code->index)
- return -EINVAL;
- format = vsp1_entity_get_pad_format(&uds->entity, cfg,
- UDS_PAD_SINK, code->which);
- code->code = format->code;
- }
-
- return 0;
+ return vsp1_subdev_enum_mbus_code(subdev, cfg, code, codes,
+ ARRAY_SIZE(codes));
}
static int uds_enum_frame_size(struct v4l2_subdev *subdev,
@@ -199,10 +129,15 @@ static int uds_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vsp1_uds *uds = to_uds(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
- format = vsp1_entity_get_pad_format(&uds->entity, cfg,
- UDS_PAD_SINK, fse->which);
+ config = vsp1_entity_get_pad_config(&uds->entity, cfg, fse->which);
+ if (!config)
+ return -EINVAL;
+
+ format = vsp1_entity_get_pad_format(&uds->entity, config,
+ UDS_PAD_SINK);
if (fse->index || fse->code != format->code)
return -EINVAL;
@@ -222,20 +157,9 @@ static int uds_enum_frame_size(struct v4l2_subdev *subdev,
return 0;
}
-static int uds_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
-{
- struct vsp1_uds *uds = to_uds(subdev);
-
- fmt->format = *vsp1_entity_get_pad_format(&uds->entity, cfg, fmt->pad,
- fmt->which);
-
- return 0;
-}
-
-static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_pad_config *cfg,
- unsigned int pad, struct v4l2_mbus_framefmt *fmt,
- enum v4l2_subdev_format_whence which)
+static void uds_try_format(struct vsp1_uds *uds,
+ struct v4l2_subdev_pad_config *config,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_mbus_framefmt *format;
unsigned int minimum;
@@ -254,8 +178,8 @@ static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_pad_config *
case UDS_PAD_SOURCE:
/* The UDS scales but can't perform format conversion. */
- format = vsp1_entity_get_pad_format(&uds->entity, cfg,
- UDS_PAD_SINK, which);
+ format = vsp1_entity_get_pad_format(&uds->entity, config,
+ UDS_PAD_SINK);
fmt->code = format->code;
uds_output_limits(format->width, &minimum, &maximum);
@@ -269,25 +193,30 @@ static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_pad_config *
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
-static int uds_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_config *cfg,
+static int uds_set_format(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct vsp1_uds *uds = to_uds(subdev);
+ struct v4l2_subdev_pad_config *config;
struct v4l2_mbus_framefmt *format;
- uds_try_format(uds, cfg, fmt->pad, &fmt->format, fmt->which);
+ config = vsp1_entity_get_pad_config(&uds->entity, cfg, fmt->which);
+ if (!config)
+ return -EINVAL;
+
+ uds_try_format(uds, config, fmt->pad, &fmt->format);
- format = vsp1_entity_get_pad_format(&uds->entity, cfg, fmt->pad,
- fmt->which);
+ format = vsp1_entity_get_pad_format(&uds->entity, config, fmt->pad);
*format = fmt->format;
if (fmt->pad == UDS_PAD_SINK) {
/* Propagate the format to the source pad. */
- format = vsp1_entity_get_pad_format(&uds->entity, cfg,
- UDS_PAD_SOURCE, fmt->which);
+ format = vsp1_entity_get_pad_format(&uds->entity, config,
+ UDS_PAD_SOURCE);
*format = fmt->format;
- uds_try_format(uds, cfg, UDS_PAD_SOURCE, format, fmt->which);
+ uds_try_format(uds, config, UDS_PAD_SOURCE, format);
}
return 0;
@@ -297,55 +226,97 @@ static int uds_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_pad_con
* V4L2 Subdevice Operations
*/
-static struct v4l2_subdev_video_ops uds_video_ops = {
- .s_stream = uds_s_stream,
-};
-
static struct v4l2_subdev_pad_ops uds_pad_ops = {
+ .init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = uds_enum_mbus_code,
.enum_frame_size = uds_enum_frame_size,
- .get_fmt = uds_get_format,
+ .get_fmt = vsp1_subdev_get_pad_format,
.set_fmt = uds_set_format,
};
static struct v4l2_subdev_ops uds_ops = {
- .video = &uds_video_ops,
.pad = &uds_pad_ops,
};
/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
+
+static void uds_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
+{
+ struct vsp1_uds *uds = to_uds(&entity->subdev);
+ const struct v4l2_mbus_framefmt *output;
+ const struct v4l2_mbus_framefmt *input;
+ unsigned int hscale;
+ unsigned int vscale;
+ bool multitap;
+
+ input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SINK);
+ output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config,
+ UDS_PAD_SOURCE);
+
+ hscale = uds_compute_ratio(input->width, output->width);
+ vscale = uds_compute_ratio(input->height, output->height);
+
+ dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n", hscale, vscale);
+
+ /* Multi-tap scaling can't be enabled along with alpha scaling when
+ * scaling down with a factor lower than or equal to 1/2 in either
+ * direction.
+ */
+ if (uds->scale_alpha && (hscale >= 8192 || vscale >= 8192))
+ multitap = false;
+ else
+ multitap = true;
+
+ vsp1_uds_write(uds, dl, VI6_UDS_CTRL,
+ (uds->scale_alpha ? VI6_UDS_CTRL_AON : 0) |
+ (multitap ? VI6_UDS_CTRL_BC : 0));
+
+ vsp1_uds_write(uds, dl, VI6_UDS_PASS_BWIDTH,
+ (uds_passband_width(hscale)
+ << VI6_UDS_PASS_BWIDTH_H_SHIFT) |
+ (uds_passband_width(vscale)
+ << VI6_UDS_PASS_BWIDTH_V_SHIFT));
+
+ /* Set the scaling ratios and the output size. */
+ vsp1_uds_write(uds, dl, VI6_UDS_SCALE,
+ (hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
+ (vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
+ vsp1_uds_write(uds, dl, VI6_UDS_CLIP_SIZE,
+ (output->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
+ (output->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+}
+
+static const struct vsp1_entity_operations uds_entity_ops = {
+ .configure = uds_configure,
+};
+
+/* -----------------------------------------------------------------------------
* Initialization and Cleanup
*/
struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index)
{
- struct v4l2_subdev *subdev;
struct vsp1_uds *uds;
+ char name[6];
int ret;
uds = devm_kzalloc(vsp1->dev, sizeof(*uds), GFP_KERNEL);
if (uds == NULL)
return ERR_PTR(-ENOMEM);
+ uds->entity.ops = &uds_entity_ops;
uds->entity.type = VSP1_ENTITY_UDS;
uds->entity.index = index;
- ret = vsp1_entity_init(vsp1, &uds->entity, 2);
+ sprintf(name, "uds.%u", index);
+ ret = vsp1_entity_init(vsp1, &uds->entity, name, 2, &uds_ops);
if (ret < 0)
return ERR_PTR(ret);
- /* Initialize the V4L2 subdev. */
- subdev = &uds->entity.subdev;
- v4l2_subdev_init(subdev, &uds_ops);
-
- subdev->entity.ops = &vsp1->media_ops;
- subdev->internal_ops = &vsp1_subdev_internal_ops;
- snprintf(subdev->name, sizeof(subdev->name), "%s uds.%u",
- dev_name(vsp1->dev), index);
- v4l2_set_subdevdata(subdev, uds);
- subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- vsp1_entity_init_formats(subdev, NULL);
-
return uds;
}
diff --git a/drivers/media/platform/vsp1/vsp1_uds.h b/drivers/media/platform/vsp1/vsp1_uds.h
index 031ac0da1..5c8cbfcad 100644
--- a/drivers/media/platform/vsp1/vsp1_uds.h
+++ b/drivers/media/platform/vsp1/vsp1_uds.h
@@ -35,6 +35,7 @@ static inline struct vsp1_uds *to_uds(struct v4l2_subdev *subdev)
struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index);
-void vsp1_uds_set_alpha(struct vsp1_uds *uds, unsigned int alpha);
+void vsp1_uds_set_alpha(struct vsp1_uds *uds, struct vsp1_dl_list *dl,
+ unsigned int alpha);
#endif /* __VSP1_UDS_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 72cc7d372..a9aec5c0b 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -29,6 +29,7 @@
#include "vsp1.h"
#include "vsp1_bru.h"
+#include "vsp1_dl.h"
#include "vsp1_entity.h"
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
@@ -171,53 +172,178 @@ static int __vsp1_video_try_format(struct vsp1_video *video,
* Pipeline Management
*/
-static int vsp1_video_pipeline_validate_branch(struct vsp1_pipeline *pipe,
- struct vsp1_rwpf *input,
- struct vsp1_rwpf *output)
+/*
+ * vsp1_video_complete_buffer - Complete the current buffer
+ * @video: the video node
+ *
+ * This function completes the current buffer by filling its sequence number,
+ * time stamp and payload size, and hands it back to the videobuf core.
+ *
+ * When operating in DU output mode (deep pipeline to the DU through the LIF),
+ * the VSP1 needs to constantly supply frames to the display. In that case, if
+ * no other buffer is queued, reuse the one that has just been processed instead
+ * of handing it back to the videobuf core.
+ *
+ * Return the next queued buffer or NULL if the queue is empty.
+ */
+static struct vsp1_vb2_buffer *
+vsp1_video_complete_buffer(struct vsp1_video *video)
+{
+ struct vsp1_pipeline *pipe = video->rwpf->pipe;
+ struct vsp1_vb2_buffer *next = NULL;
+ struct vsp1_vb2_buffer *done;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+
+ if (list_empty(&video->irqqueue)) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return NULL;
+ }
+
+ done = list_first_entry(&video->irqqueue,
+ struct vsp1_vb2_buffer, queue);
+
+ /* In DU output mode reuse the buffer if the list is singular. */
+ if (pipe->lif && list_is_singular(&video->irqqueue)) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return done;
+ }
+
+ list_del(&done->queue);
+
+ if (!list_empty(&video->irqqueue))
+ next = list_first_entry(&video->irqqueue,
+ struct vsp1_vb2_buffer, queue);
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ done->buf.sequence = video->sequence++;
+ done->buf.vb2_buf.timestamp = ktime_get_ns();
+ for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
+ vb2_set_plane_payload(&done->buf.vb2_buf, i,
+ vb2_plane_size(&done->buf.vb2_buf, i));
+ vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
+
+ return next;
+}
+
+static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *rwpf)
+{
+ struct vsp1_video *video = rwpf->video;
+ struct vsp1_vb2_buffer *buf;
+ unsigned long flags;
+
+ buf = vsp1_video_complete_buffer(video);
+ if (buf == NULL)
+ return;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ video->rwpf->mem = buf->mem;
+ pipe->buffers_ready |= 1 << video->pipe_index;
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ unsigned int i;
+
+ if (!pipe->dl)
+ pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
+
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *rwpf = pipe->inputs[i];
+
+ if (rwpf)
+ vsp1_rwpf_set_memory(rwpf, pipe->dl);
+ }
+
+ if (!pipe->lif)
+ vsp1_rwpf_set_memory(pipe->output, pipe->dl);
+
+ vsp1_dl_list_commit(pipe->dl);
+ pipe->dl = NULL;
+
+ vsp1_pipeline_run(pipe);
+}
+
+static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+ enum vsp1_pipeline_state state;
+ unsigned long flags;
+ unsigned int i;
+
+ /* Complete buffers on all video nodes. */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ if (!pipe->inputs[i])
+ continue;
+
+ vsp1_video_frame_end(pipe, pipe->inputs[i]);
+ }
+
+ vsp1_video_frame_end(pipe, pipe->output);
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ state = pipe->state;
+ pipe->state = VSP1_PIPELINE_STOPPED;
+
+ /* If a stop has been requested, mark the pipeline as stopped and
+ * return. Otherwise restart the pipeline if ready.
+ */
+ if (state == VSP1_PIPELINE_STOPPING)
+ wake_up(&pipe->wq);
+ else if (vsp1_pipeline_ready(pipe))
+ vsp1_video_pipeline_run(pipe);
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
+ struct vsp1_rwpf *input,
+ struct vsp1_rwpf *output)
{
- struct vsp1_entity *entity;
struct media_entity_enum ent_enum;
+ struct vsp1_entity *entity;
struct media_pad *pad;
- int rval;
bool bru_found = false;
+ int ret;
- input->location.left = 0;
- input->location.top = 0;
-
- rval = media_entity_enum_init(
- &ent_enum, input->entity.pads[RWPF_PAD_SOURCE].graph_obj.mdev);
- if (rval)
- return rval;
+ ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
+ if (ret < 0)
+ return ret;
pad = media_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
while (1) {
if (pad == NULL) {
- rval = -EPIPE;
+ ret = -EPIPE;
goto out;
}
/* We've reached a video node, that shouldn't have happened. */
if (!is_media_entity_v4l2_subdev(pad->entity)) {
- rval = -EPIPE;
+ ret = -EPIPE;
goto out;
}
entity = to_vsp1_entity(
media_entity_to_v4l2_subdev(pad->entity));
- /* A BRU is present in the pipeline, store the compose rectangle
- * location in the input RPF for use when configuring the RPF.
+ /* A BRU is present in the pipeline, store the BRU input pad
+ * number in the input RPF for use when configuring the RPF.
*/
if (entity->type == VSP1_ENTITY_BRU) {
struct vsp1_bru *bru = to_bru(&entity->subdev);
- struct v4l2_rect *rect =
- &bru->inputs[pad->index].compose;
bru->inputs[pad->index].rpf = input;
-
- input->location.left = rect->left;
- input->location.top = rect->top;
+ input->bru_input = pad->index;
bru_found = true;
}
@@ -229,14 +355,14 @@ static int vsp1_video_pipeline_validate_branch(struct vsp1_pipeline *pipe,
/* Ensure the branch has no loop. */
if (media_entity_enum_test_and_set(&ent_enum,
&entity->subdev.entity)) {
- rval = -EPIPE;
+ ret = -EPIPE;
goto out;
}
/* UDS can't be chained. */
if (entity->type == VSP1_ENTITY_UDS) {
if (pipe->uds) {
- rval = -EPIPE;
+ ret = -EPIPE;
goto out;
}
@@ -256,16 +382,16 @@ static int vsp1_video_pipeline_validate_branch(struct vsp1_pipeline *pipe,
/* The last entity must be the output WPF. */
if (entity != &output->entity)
- rval = -EPIPE;
+ ret = -EPIPE;
out:
media_entity_enum_cleanup(&ent_enum);
- return rval;
+ return ret;
}
-static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
- struct vsp1_video *video)
+static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
{
struct media_entity_graph graph;
struct media_entity *entity = &video->video.entity;
@@ -273,14 +399,10 @@ static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
unsigned int i;
int ret;
- mutex_lock(&mdev->graph_mutex);
-
/* Walk the graph to locate the entities and video nodes. */
ret = media_entity_graph_walk_init(&graph, mdev);
- if (ret) {
- mutex_unlock(&mdev->graph_mutex);
+ if (ret)
return ret;
- }
media_entity_graph_walk_start(&graph, entity);
@@ -300,10 +422,12 @@ static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
rwpf = to_rwpf(subdev);
pipe->inputs[rwpf->entity.index] = rwpf;
rwpf->video->pipe_index = ++pipe->num_inputs;
+ rwpf->pipe = pipe;
} else if (e->type == VSP1_ENTITY_WPF) {
rwpf = to_rwpf(subdev);
pipe->output = rwpf;
rwpf->video->pipe_index = 0;
+ rwpf->pipe = pipe;
} else if (e->type == VSP1_ENTITY_LIF) {
pipe->lif = e;
} else if (e->type == VSP1_ENTITY_BRU) {
@@ -311,15 +435,11 @@ static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
}
}
- mutex_unlock(&mdev->graph_mutex);
-
media_entity_graph_walk_cleanup(&graph);
/* We need one output and at least one input. */
- if (pipe->num_inputs == 0 || !pipe->output) {
- ret = -EPIPE;
- goto error;
- }
+ if (pipe->num_inputs == 0 || !pipe->output)
+ return -EPIPE;
/* Follow links downstream for each input and make sure the graph
* contains no loop and that all branches end at the output WPF.
@@ -328,143 +448,69 @@ static int vsp1_video_pipeline_validate(struct vsp1_pipeline *pipe,
if (!pipe->inputs[i])
continue;
- ret = vsp1_video_pipeline_validate_branch(pipe, pipe->inputs[i],
- pipe->output);
+ ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
+ pipe->output);
if (ret < 0)
- goto error;
+ return ret;
}
return 0;
-
-error:
- vsp1_pipeline_reset(pipe);
- return ret;
}
static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
struct vsp1_video *video)
{
- int ret;
+ vsp1_pipeline_init(pipe);
- mutex_lock(&pipe->lock);
-
- /* If we're the first user validate and initialize the pipeline. */
- if (pipe->use_count == 0) {
- ret = vsp1_video_pipeline_validate(pipe, video);
- if (ret < 0)
- goto done;
- }
+ pipe->frame_end = vsp1_video_pipeline_frame_end;
- pipe->use_count++;
- ret = 0;
-
-done:
- mutex_unlock(&pipe->lock);
- return ret;
+ return vsp1_video_pipeline_build(pipe, video);
}
-static void vsp1_video_pipeline_cleanup(struct vsp1_pipeline *pipe)
-{
- mutex_lock(&pipe->lock);
-
- /* If we're the last user clean up the pipeline. */
- if (--pipe->use_count == 0)
- vsp1_pipeline_reset(pipe);
-
- mutex_unlock(&pipe->lock);
-}
-
-/*
- * vsp1_video_complete_buffer - Complete the current buffer
- * @video: the video node
- *
- * This function completes the current buffer by filling its sequence number,
- * time stamp and payload size, and hands it back to the videobuf core.
- *
- * When operating in DU output mode (deep pipeline to the DU through the LIF),
- * the VSP1 needs to constantly supply frames to the display. In that case, if
- * no other buffer is queued, reuse the one that has just been processed instead
- * of handing it back to the videobuf core.
- *
- * Return the next queued buffer or NULL if the queue is empty.
- */
-static struct vsp1_vb2_buffer *
-vsp1_video_complete_buffer(struct vsp1_video *video)
+static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
{
- struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
- struct vsp1_vb2_buffer *next = NULL;
- struct vsp1_vb2_buffer *done;
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&video->irqlock, flags);
-
- if (list_empty(&video->irqqueue)) {
- spin_unlock_irqrestore(&video->irqlock, flags);
- return NULL;
- }
-
- done = list_first_entry(&video->irqqueue,
- struct vsp1_vb2_buffer, queue);
+ struct vsp1_pipeline *pipe;
+ int ret;
- /* In DU output mode reuse the buffer if the list is singular. */
- if (pipe->lif && list_is_singular(&video->irqqueue)) {
- spin_unlock_irqrestore(&video->irqlock, flags);
- return done;
+ /* Get a pipeline object for the video node. If a pipeline has already
+ * been allocated just increment its reference count and return it.
+ * Otherwise allocate a new pipeline and initialize it, it will be freed
+ * when the last reference is released.
+ */
+ if (!video->rwpf->pipe) {
+ pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
+ if (!pipe)
+ return ERR_PTR(-ENOMEM);
+
+ ret = vsp1_video_pipeline_init(pipe, video);
+ if (ret < 0) {
+ vsp1_pipeline_reset(pipe);
+ kfree(pipe);
+ return ERR_PTR(ret);
+ }
+ } else {
+ pipe = video->rwpf->pipe;
+ kref_get(&pipe->kref);
}
- list_del(&done->queue);
-
- if (!list_empty(&video->irqqueue))
- next = list_first_entry(&video->irqqueue,
- struct vsp1_vb2_buffer, queue);
-
- spin_unlock_irqrestore(&video->irqlock, flags);
-
- done->buf.sequence = video->sequence++;
- done->buf.vb2_buf.timestamp = ktime_get_ns();
- for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
- vb2_set_plane_payload(&done->buf.vb2_buf, i,
- done->mem.length[i]);
- vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
-
- return next;
+ return pipe;
}
-static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
- struct vsp1_rwpf *rwpf)
+static void vsp1_video_pipeline_release(struct kref *kref)
{
- struct vsp1_video *video = rwpf->video;
- struct vsp1_vb2_buffer *buf;
- unsigned long flags;
+ struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
- buf = vsp1_video_complete_buffer(video);
- if (buf == NULL)
- return;
-
- spin_lock_irqsave(&pipe->irqlock, flags);
-
- video->rwpf->ops->set_memory(video->rwpf, &buf->mem);
- pipe->buffers_ready |= 1 << video->pipe_index;
-
- spin_unlock_irqrestore(&pipe->irqlock, flags);
+ vsp1_pipeline_reset(pipe);
+ kfree(pipe);
}
-static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
+static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
{
- struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
- unsigned int i;
-
- /* Complete buffers on all video nodes. */
- for (i = 0; i < vsp1->info->rpf_count; ++i) {
- if (!pipe->inputs[i])
- continue;
+ struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
- vsp1_video_frame_end(pipe, pipe->inputs[i]);
- }
-
- if (!pipe->lif)
- vsp1_video_frame_end(pipe, pipe->output);
+ mutex_lock(&mdev->graph_mutex);
+ kref_put(&pipe->kref, vsp1_video_pipeline_release);
+ mutex_unlock(&mdev->graph_mutex);
}
/* -----------------------------------------------------------------------------
@@ -513,16 +559,16 @@ static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
if (vb->num_planes < format->num_planes)
return -EINVAL;
- buf->mem.num_planes = vb->num_planes;
-
for (i = 0; i < vb->num_planes; ++i) {
buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
- buf->mem.length[i] = vb2_plane_size(vb, i);
- if (buf->mem.length[i] < format->plane_fmt[i].sizeimage)
+ if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
return -EINVAL;
}
+ for ( ; i < 3; ++i)
+ buf->mem.addr[i] = 0;
+
return 0;
}
@@ -530,7 +576,7 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
- struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
+ struct vsp1_pipeline *pipe = video->rwpf->pipe;
struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
unsigned long flags;
bool empty;
@@ -545,54 +591,66 @@ static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&pipe->irqlock, flags);
- video->rwpf->ops->set_memory(video->rwpf, &buf->mem);
+ video->rwpf->mem = buf->mem;
pipe->buffers_ready |= 1 << video->pipe_index;
if (vb2_is_streaming(&video->queue) &&
vsp1_pipeline_ready(pipe))
- vsp1_pipeline_run(pipe);
+ vsp1_video_pipeline_run(pipe);
spin_unlock_irqrestore(&pipe->irqlock, flags);
}
+static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_entity *entity;
+
+ /* Prepare the display list. */
+ pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
+ if (!pipe->dl)
+ return -ENOMEM;
+
+ if (pipe->uds) {
+ struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
+
+ /* If a BRU is present in the pipeline before the UDS, the alpha
+ * component doesn't need to be scaled as the BRU output alpha
+ * value is fixed to 255. Otherwise we need to scale the alpha
+ * component only when available at the input RPF.
+ */
+ if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
+ uds->scale_alpha = false;
+ } else {
+ struct vsp1_rwpf *rpf =
+ to_rwpf(&pipe->uds_input->subdev);
+
+ uds->scale_alpha = rpf->fmtinfo->alpha;
+ }
+ }
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ vsp1_entity_route_setup(entity, pipe->dl);
+
+ if (entity->ops->configure)
+ entity->ops->configure(entity, pipe, pipe->dl);
+ }
+
+ return 0;
+}
+
static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
- struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
- struct vsp1_entity *entity;
+ struct vsp1_pipeline *pipe = video->rwpf->pipe;
unsigned long flags;
int ret;
mutex_lock(&pipe->lock);
if (pipe->stream_count == pipe->num_inputs) {
- if (pipe->uds) {
- struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
-
- /* If a BRU is present in the pipeline before the UDS,
- * the alpha component doesn't need to be scaled as the
- * BRU output alpha value is fixed to 255. Otherwise we
- * need to scale the alpha component only when available
- * at the input RPF.
- */
- if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
- uds->scale_alpha = false;
- } else {
- struct vsp1_rwpf *rpf =
- to_rwpf(&pipe->uds_input->subdev);
-
- uds->scale_alpha = rpf->fmtinfo->alpha;
- }
- }
-
- list_for_each_entry(entity, &pipe->entities, list_pipe) {
- vsp1_entity_route_setup(entity);
-
- ret = v4l2_subdev_call(&entity->subdev, video,
- s_stream, 1);
- if (ret < 0) {
- mutex_unlock(&pipe->lock);
- return ret;
- }
+ ret = vsp1_video_setup_pipeline(pipe);
+ if (ret < 0) {
+ mutex_unlock(&pipe->lock);
+ return ret;
}
}
@@ -601,7 +659,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
spin_lock_irqsave(&pipe->irqlock, flags);
if (vsp1_pipeline_ready(pipe))
- vsp1_pipeline_run(pipe);
+ vsp1_video_pipeline_run(pipe);
spin_unlock_irqrestore(&pipe->irqlock, flags);
return 0;
@@ -610,7 +668,7 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
static void vsp1_video_stop_streaming(struct vb2_queue *vq)
{
struct vsp1_video *video = vb2_get_drv_priv(vq);
- struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
+ struct vsp1_pipeline *pipe = video->rwpf->pipe;
struct vsp1_vb2_buffer *buffer;
unsigned long flags;
int ret;
@@ -621,11 +679,14 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
ret = vsp1_pipeline_stop(pipe);
if (ret == -ETIMEDOUT)
dev_err(video->vsp1->dev, "pipeline stop timeout\n");
+
+ vsp1_dl_list_put(pipe->dl);
+ pipe->dl = NULL;
}
mutex_unlock(&pipe->lock);
- vsp1_video_pipeline_cleanup(pipe);
media_entity_pipeline_stop(&video->video.entity);
+ vsp1_video_pipeline_put(pipe);
/* Remove all buffers from the IRQ queue. */
spin_lock_irqsave(&video->irqlock, flags);
@@ -737,6 +798,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct v4l2_fh *vfh = file->private_data;
struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ struct media_device *mdev = &video->vsp1->media_dev;
struct vsp1_pipeline *pipe;
int ret;
@@ -745,18 +807,25 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
video->sequence = 0;
- /* Start streaming on the pipeline. No link touching an entity in the
- * pipeline can be activated or deactivated once streaming is started.
- *
- * Use the VSP1 pipeline object embedded in the first video object that
- * starts streaming.
+ /* Get a pipeline for the video node and start streaming on it. No link
+ * touching an entity in the pipeline can be activated or deactivated
+ * once streaming is started.
*/
- pipe = video->video.entity.pipe
- ? to_vsp1_pipeline(&video->video.entity) : &video->pipe;
+ mutex_lock(&mdev->graph_mutex);
- ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
- if (ret < 0)
- return ret;
+ pipe = vsp1_video_pipeline_get(video);
+ if (IS_ERR(pipe)) {
+ mutex_unlock(&mdev->graph_mutex);
+ return PTR_ERR(pipe);
+ }
+
+ ret = __media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+ if (ret < 0) {
+ mutex_unlock(&mdev->graph_mutex);
+ goto err_pipe;
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
/* Verify that the configured format matches the output of the connected
* subdev.
@@ -765,21 +834,17 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
if (ret < 0)
goto err_stop;
- ret = vsp1_video_pipeline_init(pipe, video);
- if (ret < 0)
- goto err_stop;
-
/* Start the queue. */
ret = vb2_streamon(&video->queue, type);
if (ret < 0)
- goto err_cleanup;
+ goto err_stop;
return 0;
-err_cleanup:
- vsp1_video_pipeline_cleanup(pipe);
err_stop:
media_entity_pipeline_stop(&video->video.entity);
+err_pipe:
+ vsp1_video_pipeline_put(pipe);
return ret;
}
@@ -895,26 +960,16 @@ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
spin_lock_init(&video->irqlock);
INIT_LIST_HEAD(&video->irqqueue);
- vsp1_pipeline_init(&video->pipe);
- video->pipe.frame_end = vsp1_video_pipeline_frame_end;
-
/* Initialize the media entity... */
ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
if (ret < 0)
return ERR_PTR(ret);
/* ... and the format ... */
- rwpf->fmtinfo = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
- rwpf->format.pixelformat = rwpf->fmtinfo->fourcc;
- rwpf->format.colorspace = V4L2_COLORSPACE_SRGB;
- rwpf->format.field = V4L2_FIELD_NONE;
+ rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
- rwpf->format.num_planes = 1;
- rwpf->format.plane_fmt[0].bytesperline =
- rwpf->format.width * rwpf->fmtinfo->bpp[0] / 8;
- rwpf->format.plane_fmt[0].sizeimage =
- rwpf->format.plane_fmt[0].bytesperline * rwpf->format.height;
+ __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
/* ... and the video node... */
video->video.v4l2_dev = &video->vsp1->v4l2_dev;
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
index 64abd39ee..867b00807 100644
--- a/drivers/media/platform/vsp1/vsp1_video.h
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -18,7 +18,6 @@
#include <media/videobuf2-v4l2.h>
-#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
struct vsp1_vb2_buffer {
@@ -44,7 +43,6 @@ struct vsp1_video {
struct mutex lock;
- struct vsp1_pipeline pipe;
unsigned int pipe_index;
struct vb2_queue queue;
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index c78d4af50..6c91eaa35 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -16,124 +16,114 @@
#include <media/v4l2-subdev.h>
#include "vsp1.h"
+#include "vsp1_dl.h"
+#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
#include "vsp1_video.h"
-#define WPF_MAX_WIDTH 2048
-#define WPF_MAX_HEIGHT 2048
+#define WPF_GEN2_MAX_WIDTH 2048U
+#define WPF_GEN2_MAX_HEIGHT 2048U
+#define WPF_GEN3_MAX_WIDTH 8190U
+#define WPF_GEN3_MAX_HEIGHT 8190U
/* -----------------------------------------------------------------------------
* Device Access
*/
-static inline u32 vsp1_wpf_read(struct vsp1_rwpf *wpf, u32 reg)
+static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf,
+ struct vsp1_dl_list *dl, u32 reg, u32 data)
{
- return vsp1_read(wpf->entity.vsp1,
- reg + wpf->entity.index * VI6_WPF_OFFSET);
-}
-
-static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf, u32 reg, u32 data)
-{
- vsp1_mod_write(&wpf->entity,
- reg + wpf->entity.index * VI6_WPF_OFFSET, data);
+ vsp1_dl_list_write(dl, reg + wpf->entity.index * VI6_WPF_OFFSET, data);
}
/* -----------------------------------------------------------------------------
- * Controls
+ * V4L2 Subdevice Core Operations
*/
-static int wpf_s_ctrl(struct v4l2_ctrl *ctrl)
+static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
{
- struct vsp1_rwpf *wpf =
- container_of(ctrl->handler, struct vsp1_rwpf, ctrls);
- u32 value;
+ struct vsp1_rwpf *wpf = to_rwpf(subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
- if (!vsp1_entity_is_streaming(&wpf->entity))
+ if (enable)
return 0;
- switch (ctrl->id) {
- case V4L2_CID_ALPHA_COMPONENT:
- value = vsp1_wpf_read(wpf, VI6_WPF_OUTFMT);
- value &= ~VI6_WPF_OUTFMT_PDV_MASK;
- value |= ctrl->val << VI6_WPF_OUTFMT_PDV_SHIFT;
- vsp1_wpf_write(wpf, VI6_WPF_OUTFMT, value);
- break;
- }
+ /* Write to registers directly when stopping the stream as there will be
+ * no pipeline run to apply the display list.
+ */
+ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
+ vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
+ VI6_WPF_SRCRPF, 0);
return 0;
}
-static const struct v4l2_ctrl_ops wpf_ctrl_ops = {
- .s_ctrl = wpf_s_ctrl,
-};
-
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
+ * V4L2 Subdevice Operations
*/
-static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
-{
- struct vsp1_pipeline *pipe = to_vsp1_pipeline(&subdev->entity);
- struct vsp1_rwpf *wpf = to_rwpf(subdev);
- struct vsp1_device *vsp1 = wpf->entity.vsp1;
- const struct v4l2_rect *crop = &wpf->crop;
- unsigned int i;
- u32 srcrpf = 0;
- u32 outfmt = 0;
- int ret;
-
- ret = vsp1_entity_set_streaming(&wpf->entity, enable);
- if (ret < 0)
- return ret;
+static struct v4l2_subdev_video_ops wpf_video_ops = {
+ .s_stream = wpf_s_stream,
+};
- if (!enable) {
- vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
- vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
- VI6_WPF_SRCRPF, 0);
- return 0;
- }
+static struct v4l2_subdev_ops wpf_ops = {
+ .video = &wpf_video_ops,
+ .pad = &vsp1_rwpf_pad_ops,
+};
- /* Sources. If the pipeline has a single input and BRU is not used,
- * configure it as the master layer. Otherwise configure all
- * inputs as sub-layers and select the virtual RPF as the master
- * layer.
- */
- for (i = 0; i < vsp1->info->rpf_count; ++i) {
- struct vsp1_rwpf *input = pipe->inputs[i];
+/* -----------------------------------------------------------------------------
+ * VSP1 Entity Operations
+ */
- if (!input)
- continue;
+static void vsp1_wpf_destroy(struct vsp1_entity *entity)
+{
+ struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
- srcrpf |= (!pipe->bru && pipe->num_inputs == 1)
- ? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
- : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
- }
+ vsp1_dlm_destroy(wpf->dlm);
+}
- if (pipe->bru || pipe->num_inputs > 1)
- srcrpf |= VI6_WPF_SRCRPF_VIRACT_MST;
+static void wpf_set_memory(struct vsp1_entity *entity, struct vsp1_dl_list *dl)
+{
+ struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
- vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, srcrpf);
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_Y, wpf->mem.addr[0]);
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C0, wpf->mem.addr[1]);
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_ADDR_C1, wpf->mem.addr[2]);
+}
- /* Destination stride. */
- if (!pipe->lif) {
- struct v4l2_pix_format_mplane *format = &wpf->format;
+static void wpf_configure(struct vsp1_entity *entity,
+ struct vsp1_pipeline *pipe,
+ struct vsp1_dl_list *dl)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ const struct v4l2_mbus_framefmt *source_format;
+ const struct v4l2_mbus_framefmt *sink_format;
+ const struct v4l2_rect *crop;
+ unsigned int i;
+ u32 outfmt = 0;
+ u32 srcrpf = 0;
- vsp1_wpf_write(wpf, VI6_WPF_DSTM_STRIDE_Y,
- format->plane_fmt[0].bytesperline);
- if (format->num_planes > 1)
- vsp1_wpf_write(wpf, VI6_WPF_DSTM_STRIDE_C,
- format->plane_fmt[1].bytesperline);
- }
+ /* Cropping */
+ crop = vsp1_rwpf_get_crop(wpf, wpf->entity.config);
- vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
+ vsp1_wpf_write(wpf, dl, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN |
(crop->left << VI6_WPF_SZCLIP_OFST_SHIFT) |
(crop->width << VI6_WPF_SZCLIP_SIZE_SHIFT));
- vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
+ vsp1_wpf_write(wpf, dl, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN |
(crop->top << VI6_WPF_SZCLIP_OFST_SHIFT) |
(crop->height << VI6_WPF_SZCLIP_SIZE_SHIFT));
/* Format */
+ sink_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SINK);
+ source_format = vsp1_entity_get_pad_format(&wpf->entity,
+ wpf->entity.config,
+ RWPF_PAD_SOURCE);
+
if (!pipe->lif) {
+ const struct v4l2_pix_format_mplane *format = &wpf->format;
const struct vsp1_format_info *fmtinfo = wpf->fmtinfo;
outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT;
@@ -145,73 +135,58 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
if (fmtinfo->swap_uv)
outfmt |= VI6_WPF_OUTFMT_SPUVS;
- vsp1_wpf_write(wpf, VI6_WPF_DSWAP, fmtinfo->swap);
+ /* Destination stride and byte swapping. */
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_STRIDE_Y,
+ format->plane_fmt[0].bytesperline);
+ if (format->num_planes > 1)
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSTM_STRIDE_C,
+ format->plane_fmt[1].bytesperline);
+
+ vsp1_wpf_write(wpf, dl, VI6_WPF_DSWAP, fmtinfo->swap);
}
- if (wpf->entity.formats[RWPF_PAD_SINK].code !=
- wpf->entity.formats[RWPF_PAD_SOURCE].code)
+ if (sink_format->code != source_format->code)
outfmt |= VI6_WPF_OUTFMT_CSC;
- /* Take the control handler lock to ensure that the PDV value won't be
- * changed behind our back by a set control operation.
- */
- if (vsp1->info->uapi)
- mutex_lock(wpf->ctrls.lock);
- outfmt |= wpf->alpha->cur.val << VI6_WPF_OUTFMT_PDV_SHIFT;
- vsp1_wpf_write(wpf, VI6_WPF_OUTFMT, outfmt);
- if (vsp1->info->uapi)
- mutex_unlock(wpf->ctrls.lock);
-
- vsp1_mod_write(&wpf->entity, VI6_DPR_WPF_FPORCH(wpf->entity.index),
- VI6_DPR_WPF_FPORCH_FP_WPFN);
+ outfmt |= wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT;
+ vsp1_wpf_write(wpf, dl, VI6_WPF_OUTFMT, outfmt);
- vsp1_mod_write(&wpf->entity, VI6_WPF_WRBCK_CTRL, 0);
+ vsp1_dl_list_write(dl, VI6_DPR_WPF_FPORCH(wpf->entity.index),
+ VI6_DPR_WPF_FPORCH_FP_WPFN);
- /* Enable interrupts */
- vsp1_write(vsp1, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
- vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index),
- VI6_WFP_IRQ_ENB_FREE);
-
- return 0;
-}
+ vsp1_dl_list_write(dl, VI6_WPF_WRBCK_CTRL, 0);
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
+ /* Sources. If the pipeline has a single input and BRU is not used,
+ * configure it as the master layer. Otherwise configure all
+ * inputs as sub-layers and select the virtual RPF as the master
+ * layer.
+ */
+ for (i = 0; i < vsp1->info->rpf_count; ++i) {
+ struct vsp1_rwpf *input = pipe->inputs[i];
-static struct v4l2_subdev_video_ops wpf_video_ops = {
- .s_stream = wpf_s_stream,
-};
+ if (!input)
+ continue;
-static struct v4l2_subdev_pad_ops wpf_pad_ops = {
- .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
- .enum_frame_size = vsp1_rwpf_enum_frame_size,
- .get_fmt = vsp1_rwpf_get_format,
- .set_fmt = vsp1_rwpf_set_format,
- .get_selection = vsp1_rwpf_get_selection,
- .set_selection = vsp1_rwpf_set_selection,
-};
+ srcrpf |= (!pipe->bru && pipe->num_inputs == 1)
+ ? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
+ : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
+ }
-static struct v4l2_subdev_ops wpf_ops = {
- .video = &wpf_video_ops,
- .pad = &wpf_pad_ops,
-};
+ if (pipe->bru || pipe->num_inputs > 1)
+ srcrpf |= VI6_WPF_SRCRPF_VIRACT_MST;
-/* -----------------------------------------------------------------------------
- * Video Device Operations
- */
+ vsp1_wpf_write(wpf, dl, VI6_WPF_SRCRPF, srcrpf);
-static void wpf_set_memory(struct vsp1_rwpf *wpf, struct vsp1_rwpf_memory *mem)
-{
- vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, mem->addr[0]);
- if (mem->num_planes > 1)
- vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, mem->addr[1]);
- if (mem->num_planes > 2)
- vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, mem->addr[2]);
+ /* Enable interrupts */
+ vsp1_dl_list_write(dl, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
+ vsp1_dl_list_write(dl, VI6_WPF_IRQ_ENB(wpf->entity.index),
+ VI6_WFP_IRQ_ENB_FREE);
}
-static const struct vsp1_rwpf_operations wpf_vdev_ops = {
+static const struct vsp1_entity_operations wpf_entity_ops = {
+ .destroy = vsp1_wpf_destroy,
.set_memory = wpf_set_memory,
+ .configure = wpf_configure,
};
/* -----------------------------------------------------------------------------
@@ -220,51 +195,43 @@ static const struct vsp1_rwpf_operations wpf_vdev_ops = {
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
{
- struct v4l2_subdev *subdev;
struct vsp1_rwpf *wpf;
+ char name[6];
int ret;
wpf = devm_kzalloc(vsp1->dev, sizeof(*wpf), GFP_KERNEL);
if (wpf == NULL)
return ERR_PTR(-ENOMEM);
- wpf->ops = &wpf_vdev_ops;
-
- wpf->max_width = WPF_MAX_WIDTH;
- wpf->max_height = WPF_MAX_HEIGHT;
+ if (vsp1->info->gen == 2) {
+ wpf->max_width = WPF_GEN2_MAX_WIDTH;
+ wpf->max_height = WPF_GEN2_MAX_HEIGHT;
+ } else {
+ wpf->max_width = WPF_GEN3_MAX_WIDTH;
+ wpf->max_height = WPF_GEN3_MAX_HEIGHT;
+ }
+ wpf->entity.ops = &wpf_entity_ops;
wpf->entity.type = VSP1_ENTITY_WPF;
wpf->entity.index = index;
- ret = vsp1_entity_init(vsp1, &wpf->entity, 2);
+ sprintf(name, "wpf.%u", index);
+ ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops);
if (ret < 0)
return ERR_PTR(ret);
- /* Initialize the V4L2 subdev. */
- subdev = &wpf->entity.subdev;
- v4l2_subdev_init(subdev, &wpf_ops);
-
- subdev->entity.ops = &vsp1->media_ops;
- subdev->internal_ops = &vsp1_subdev_internal_ops;
- snprintf(subdev->name, sizeof(subdev->name), "%s wpf.%u",
- dev_name(vsp1->dev), index);
- v4l2_set_subdevdata(subdev, wpf);
- subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-
- vsp1_entity_init_formats(subdev, NULL);
+ /* Initialize the display list manager. */
+ wpf->dlm = vsp1_dlm_create(vsp1, index, 4);
+ if (!wpf->dlm) {
+ ret = -ENOMEM;
+ goto error;
+ }
/* Initialize the control handler. */
- v4l2_ctrl_handler_init(&wpf->ctrls, 1);
- wpf->alpha = v4l2_ctrl_new_std(&wpf->ctrls, &wpf_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT,
- 0, 255, 1, 255);
-
- wpf->entity.subdev.ctrl_handler = &wpf->ctrls;
-
- if (wpf->ctrls.error) {
+ ret = vsp1_rwpf_init_ctrls(wpf);
+ if (ret < 0) {
dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n",
index);
- ret = wpf->ctrls.error;
goto error;
}
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index e795a4501..feb3b2f1d 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -351,19 +351,15 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
struct xvip_graph_entity *entity;
struct device_node *remote;
struct device_node *ep = NULL;
- struct device_node *next;
int ret = 0;
dev_dbg(xdev->dev, "parsing node %s\n", node->full_name);
while (1) {
- next = of_graph_get_next_endpoint(node, ep);
- if (next == NULL)
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (ep == NULL)
break;
- of_node_put(ep);
- ep = next;
-
dev_dbg(xdev->dev, "handling endpoint %s\n", ep->full_name);
remote = of_graph_get_remote_port_parent(ep);
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 3f61d77d4..9f5b59706 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -873,13 +873,10 @@ static int ati_remote_probe(struct usb_interface *interface,
strlcat(ati_remote->rc_phys, "/input0", sizeof(ati_remote->rc_phys));
strlcat(ati_remote->mouse_phys, "/input1", sizeof(ati_remote->mouse_phys));
- if (udev->manufacturer)
- strlcpy(ati_remote->rc_name, udev->manufacturer,
- sizeof(ati_remote->rc_name));
-
- if (udev->product)
- snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name),
- "%s %s", ati_remote->rc_name, udev->product);
+ snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name), "%s%s%s",
+ udev->manufacturer ?: "",
+ udev->manufacturer && udev->product ? " " : "",
+ udev->product ?: "");
if (!strlen(ati_remote->rc_name))
snprintf(ati_remote->rc_name, sizeof(ati_remote->rc_name),
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 35155ae50..5cf2e749b 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -188,6 +188,7 @@
#define VENDOR_TWISTEDMELON 0x2596
#define VENDOR_HAUPPAUGE 0x2040
#define VENDOR_PCTV 0x2013
+#define VENDOR_ADAPTEC 0x03f3
enum mceusb_model_type {
MCE_GEN2 = 0, /* Most boards */
@@ -302,6 +303,9 @@ static struct usb_device_id mceusb_dev_table[] = {
/* SMK/I-O Data GV-MC7/RCKIT Receiver */
{ USB_DEVICE(VENDOR_SMK, 0x0353),
.driver_info = MCE_GEN2_NO_TX },
+ /* SMK RXX6000 Infrared Receiver */
+ { USB_DEVICE(VENDOR_SMK, 0x0357),
+ .driver_info = MCE_GEN2_NO_TX },
/* Tatung eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TATUNG, 0x9150) },
/* Shuttle eHome Infrared Transceiver */
@@ -405,6 +409,8 @@ static struct usb_device_id mceusb_dev_table[] = {
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_PCTV, 0x025e),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
+ /* Adaptec / HP eHome Receiver */
+ { USB_DEVICE(VENDOR_ADAPTEC, 0x0094) },
/* Terminating entry */
{ }
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 4e9bbe735..7dfc7c218 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1263,6 +1263,9 @@ unlock:
static void rc_dev_release(struct device *device)
{
+ struct rc_dev *dev = to_rc_dev(device);
+
+ kfree(dev);
}
#define ADD_HOTPLUG_VAR(fmt, val...) \
@@ -1384,7 +1387,9 @@ void rc_free_device(struct rc_dev *dev)
put_device(&dev->dev);
- kfree(dev);
+ /* kfree(dev) will be called by the callback function
+ rc_dev_release() */
+
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(rc_free_device);
@@ -1492,9 +1497,7 @@ int rc_register_device(struct rc_dev *dev)
}
/* Allow the RC sysfs nodes to be accessible */
- mutex_lock(&dev->lock);
atomic_set(&dev->initialized, 1);
- mutex_unlock(&dev->lock);
IR_dprintk(1, "Registered rc%u (driver: %s, remote: %s, mode %s)\n",
dev->minor,
diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
index 18bc745ed..9af2a155c 100644
--- a/drivers/media/tuners/qm1d1c0042.c
+++ b/drivers/media/tuners/qm1d1c0042.c
@@ -32,14 +32,24 @@
#include "qm1d1c0042.h"
#define QM1D1C0042_NUM_REGS 0x20
-
-static const u8 reg_initval[QM1D1C0042_NUM_REGS] = {
- 0x48, 0x1c, 0xa0, 0x10, 0xbc, 0xc5, 0x20, 0x33,
- 0x06, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
- 0x00, 0xff, 0xf3, 0x00, 0x2a, 0x64, 0xa6, 0x86,
- 0x8c, 0xcf, 0xb8, 0xf1, 0xa8, 0xf2, 0x89, 0x00
+#define QM1D1C0042_NUM_REG_ROWS 2
+
+static const u8
+reg_initval[QM1D1C0042_NUM_REG_ROWS][QM1D1C0042_NUM_REGS] = { {
+ 0x48, 0x1c, 0xa0, 0x10, 0xbc, 0xc5, 0x20, 0x33,
+ 0x06, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x00, 0xff, 0xf3, 0x00, 0x2a, 0x64, 0xa6, 0x86,
+ 0x8c, 0xcf, 0xb8, 0xf1, 0xa8, 0xf2, 0x89, 0x00
+ }, {
+ 0x68, 0x1c, 0xc0, 0x10, 0xbc, 0xc1, 0x11, 0x33,
+ 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
+ 0x00, 0xff, 0xf3, 0x00, 0x3f, 0x25, 0x5c, 0xd6,
+ 0x55, 0xcf, 0x95, 0xf6, 0x36, 0xf2, 0x09, 0x00
+ }
};
+static int reg_index;
+
static const struct qm1d1c0042_config default_cfg = {
.xtal_freq = 16000,
.lpf = 1,
@@ -320,7 +330,6 @@ static int qm1d1c0042_init(struct dvb_frontend *fe)
int i, ret;
state = fe->tuner_priv;
- memcpy(state->regs, reg_initval, sizeof(reg_initval));
reg_write(state, 0x01, 0x0c);
reg_write(state, 0x01, 0x0c);
@@ -330,15 +339,22 @@ static int qm1d1c0042_init(struct dvb_frontend *fe)
goto failed;
usleep_range(2000, 3000);
- val = state->regs[0x01] | 0x10;
- ret = reg_write(state, 0x01, val); /* soft reset off */
+ ret = reg_write(state, 0x01, 0x1c); /* soft reset off */
if (ret < 0)
goto failed;
- /* check ID */
+ /* check ID and choose initial registers corresponding ID */
ret = reg_read(state, 0x00, &val);
- if (ret < 0 || val != 0x48)
+ if (ret < 0)
+ goto failed;
+ for (reg_index = 0; reg_index < QM1D1C0042_NUM_REG_ROWS;
+ reg_index++) {
+ if (val == reg_initval[reg_index][0x00])
+ break;
+ }
+ if (reg_index >= QM1D1C0042_NUM_REG_ROWS)
goto failed;
+ memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS);
usleep_range(2000, 3000);
state->regs[0x0c] |= 0x40;
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 79ac27c30..abe362cd6 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -84,11 +84,22 @@ static int si2157_init(struct dvb_frontend *fe)
struct si2157_cmd cmd;
const struct firmware *fw;
const char *fw_name;
- unsigned int chip_id;
+ unsigned int uitmp, chip_id;
dev_dbg(&client->dev, "\n");
- if (dev->fw_loaded)
+ /* Returned IF frequency is garbage when firmware is not running */
+ memcpy(cmd.args, "\x15\x00\x06\x07", 4);
+ cmd.wlen = 4;
+ cmd.rlen = 4;
+ ret = si2157_cmd_execute(client, &cmd);
+ if (ret)
+ goto err;
+
+ uitmp = cmd.args[2] << 0 | cmd.args[3] << 8;
+ dev_dbg(&client->dev, "if_frequency kHz=%u\n", uitmp);
+
+ if (uitmp == dev->if_frequency / 1000)
goto warm;
/* power up */
@@ -203,9 +214,6 @@ skip_fw_download:
dev_info(&client->dev, "firmware version: %c.%c.%d\n",
cmd.args[6], cmd.args[7], cmd.args[8]);
-
- dev->fw_loaded = true;
-
warm:
/* init statistics in order signal app which are supported */
c->strength.len = 1;
@@ -422,7 +430,6 @@ static int si2157_probe(struct i2c_client *client,
dev->fe = cfg->fe;
dev->inversion = cfg->inversion;
dev->if_port = cfg->if_port;
- dev->fw_loaded = false;
dev->chiptype = (u8)id->driver_data;
dev->if_frequency = 5000000; /* default value of property 0x0706 */
mutex_init(&dev->i2c_mutex);
diff --git a/drivers/media/tuners/si2157_priv.h b/drivers/media/tuners/si2157_priv.h
index 6fdc84fb3..fdc3f2f64 100644
--- a/drivers/media/tuners/si2157_priv.h
+++ b/drivers/media/tuners/si2157_priv.h
@@ -26,7 +26,6 @@ struct si2157_dev {
struct mutex i2c_mutex;
struct dvb_frontend *fe;
bool active;
- bool fw_loaded;
bool inversion;
u8 chiptype;
u8 if_port;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 87c129304..92d9d4214 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -1072,7 +1072,7 @@ static int airspy_probe(struct usb_interface *intf,
if (ret) {
dev_err(s->dev, "Failed to register as video device (%d)\n",
ret);
- goto err_unregister_v4l2_dev;
+ goto err_free_controls;
}
dev_info(s->dev, "Registered as %s\n",
video_device_node_name(&s->vdev));
@@ -1081,7 +1081,6 @@ static int airspy_probe(struct usb_interface *intf,
err_free_controls:
v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
v4l2_device_unregister(&s->v4l2_dev);
err_free_mem:
kfree(s);
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index cc22b3277..321ea5cf1 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -131,22 +131,36 @@ static int recv_control_msg(struct au0828_dev *dev, u16 request, u32 value,
return status;
}
+#ifdef CONFIG_MEDIA_CONTROLLER
+static void au0828_media_graph_notify(struct media_entity *new,
+ void *notify_data);
+#endif
+
static void au0828_unregister_media_device(struct au0828_dev *dev)
{
-
#ifdef CONFIG_MEDIA_CONTROLLER
- if (dev->media_dev &&
- media_devnode_is_registered(&dev->media_dev->devnode)) {
- /* clear enable_source, disable_source */
- dev->media_dev->source_priv = NULL;
- dev->media_dev->enable_source = NULL;
- dev->media_dev->disable_source = NULL;
-
- media_device_unregister(dev->media_dev);
- media_device_cleanup(dev->media_dev);
- kfree(dev->media_dev);
- dev->media_dev = NULL;
+ struct media_device *mdev = dev->media_dev;
+ struct media_entity_notify *notify, *nextp;
+
+ if (!mdev || !media_devnode_is_registered(&mdev->devnode))
+ return;
+
+ /* Remove au0828 entity_notify callbacks */
+ list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list) {
+ if (notify->notify != au0828_media_graph_notify)
+ continue;
+ media_device_unregister_entity_notify(mdev, notify);
}
+
+ /* clear enable_source, disable_source */
+ dev->media_dev->source_priv = NULL;
+ dev->media_dev->enable_source = NULL;
+ dev->media_dev->disable_source = NULL;
+
+ media_device_unregister(dev->media_dev);
+ media_device_cleanup(dev->media_dev);
+ kfree(dev->media_dev);
+ dev->media_dev = NULL;
#endif
}
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index 32d7db964..7d0ec4cb2 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -679,8 +679,6 @@ int au0828_v4l2_device_register(struct usb_interface *interface,
if (retval) {
pr_err("%s() v4l2_device_register failed\n",
__func__);
- mutex_unlock(&dev->lock);
- kfree(dev);
return retval;
}
@@ -691,8 +689,6 @@ int au0828_v4l2_device_register(struct usb_interface *interface,
if (retval) {
pr_err("%s() v4l2_ctrl_handler_init failed\n",
__func__);
- mutex_unlock(&dev->lock);
- kfree(dev);
return retval;
}
dev->v4l2_dev.ctrl_handler = &dev->v4l2_ctrl_hdl;
diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
index 87f32846f..dd7b378fe 100644
--- a/drivers/media/usb/au0828/au0828.h
+++ b/drivers/media/usb/au0828/au0828.h
@@ -55,7 +55,6 @@
#define NTSC_STD_H 480
#define AU0828_INTERLACED_DEFAULT 1
-#define V4L2_CID_PRIVATE_SHARPNESS (V4L2_CID_PRIVATE_BASE + 0)
/* Defination for AU0828 USB transfer */
#define AU0828_MAX_ISO_BUFS 12 /* maybe resize this value in the future */
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 9a3dd944e..fd8b39534 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -360,7 +360,7 @@ static int wait_for_mci_complete(struct cx231xx *dev)
if (count++ > 100) {
dprintk(3, "ERROR: Timeout - gpio=%x\n", gpio);
- return -1;
+ return -EIO;
}
}
return 0;
@@ -856,7 +856,7 @@ static int cx231xx_find_mailbox(struct cx231xx *dev)
}
}
dprintk(3, "Mailbox signature values not found!\n");
- return -1;
+ return -EIO;
}
static void mci_write_memory_to_gpio(struct cx231xx *dev, u32 address, u32 value,
@@ -960,13 +960,14 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
p_fw = p_current_fw;
if (p_current_fw == NULL) {
dprintk(2, "FAIL!!!\n");
- return -1;
+ return -ENOMEM;
}
p_buffer = vmalloc(4096);
if (p_buffer == NULL) {
dprintk(2, "FAIL!!!\n");
- return -1;
+ vfree(p_current_fw);
+ return -ENOMEM;
}
dprintk(2, "%s()\n", __func__);
@@ -989,7 +990,9 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
if (retval != 0) {
dev_err(dev->dev,
"%s: Error with mc417_register_write\n", __func__);
- return -1;
+ vfree(p_current_fw);
+ vfree(p_buffer);
+ return retval;
}
retval = reject_firmware(&firmware, CX231xx_FIRM_IMAGE_NAME,
@@ -1001,7 +1004,9 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
CX231xx_FIRM_IMAGE_NAME);
dev_err(dev->dev,
"Please fix your hotplug setup, the board will not work without firmware loaded!\n");
- return -1;
+ vfree(p_current_fw);
+ vfree(p_buffer);
+ return retval;
}
if (firmware->size != CX231xx_FIRM_IMAGE_SIZE) {
@@ -1009,14 +1014,18 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
"ERROR: Firmware size mismatch (have %zd, expected %d)\n",
firmware->size, CX231xx_FIRM_IMAGE_SIZE);
release_firmware(firmware);
- return -1;
+ vfree(p_current_fw);
+ vfree(p_buffer);
+ return -EINVAL;
}
if (0 != memcmp(firmware->data, magic, 8)) {
dev_err(dev->dev,
"ERROR: Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
- return -1;
+ vfree(p_current_fw);
+ vfree(p_buffer);
+ return -EINVAL;
}
initGPIO(dev);
@@ -1131,21 +1140,21 @@ static int cx231xx_initialize_codec(struct cx231xx *dev)
if (retval < 0) {
dev_err(dev->dev, "%s: mailbox < 0, error\n",
__func__);
- return -1;
+ return retval;
}
dev->cx23417_mailbox = retval;
retval = cx231xx_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
if (retval < 0) {
dev_err(dev->dev,
"ERROR: cx23417 firmware ping failed!\n");
- return -1;
+ return retval;
}
retval = cx231xx_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
&version);
if (retval < 0) {
dev_err(dev->dev,
"ERROR: cx23417 firmware get encoder: version failed!\n");
- return -1;
+ return retval;
}
dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
msleep(200);
diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
index f497888d9..630f4fc51 100644
--- a/drivers/media/usb/cx231xx/cx231xx-core.c
+++ b/drivers/media/usb/cx231xx/cx231xx-core.c
@@ -752,7 +752,8 @@ EXPORT_SYMBOL_GPL(cx231xx_set_mode);
int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
{
int errCode = 0;
- int actlen, ret = -ENOMEM;
+ int actlen = -1;
+ int ret = -ENOMEM;
u32 *buffer;
buffer = kzalloc(4096, GFP_KERNEL);
@@ -1304,6 +1305,9 @@ int cx231xx_dev_init(struct cx231xx *dev)
cx231xx_i2c_register(&dev->i2c_bus[1]);
cx231xx_i2c_register(&dev->i2c_bus[2]);
+ errCode = cx231xx_i2c_mux_create(dev);
+ if (errCode < 0)
+ return errCode;
cx231xx_i2c_mux_register(dev, 0);
cx231xx_i2c_mux_register(dev, 1);
@@ -1426,8 +1430,7 @@ EXPORT_SYMBOL_GPL(cx231xx_dev_init);
void cx231xx_dev_uninit(struct cx231xx *dev)
{
/* Un Initialize I2C bus */
- cx231xx_i2c_mux_unregister(dev, 1);
- cx231xx_i2c_mux_unregister(dev, 0);
+ cx231xx_i2c_mux_unregister(dev);
cx231xx_i2c_unregister(&dev->i2c_bus[2]);
cx231xx_i2c_unregister(&dev->i2c_bus[1]);
cx231xx_i2c_unregister(&dev->i2c_bus[0]);
diff --git a/drivers/media/usb/cx231xx/cx231xx-i2c.c b/drivers/media/usb/cx231xx/cx231xx-i2c.c
index a29c345b0..473cd3433 100644
--- a/drivers/media/usb/cx231xx/cx231xx-i2c.c
+++ b/drivers/media/usb/cx231xx/cx231xx-i2c.c
@@ -557,40 +557,41 @@ int cx231xx_i2c_unregister(struct cx231xx_i2c *bus)
* cx231xx_i2c_mux_select()
* switch i2c master number 1 between port1 and port3
*/
-static int cx231xx_i2c_mux_select(struct i2c_adapter *adap,
- void *mux_priv, u32 chan_id)
+static int cx231xx_i2c_mux_select(struct i2c_mux_core *muxc, u32 chan_id)
{
- struct cx231xx *dev = mux_priv;
+ struct cx231xx *dev = i2c_mux_priv(muxc);
return cx231xx_enable_i2c_port_3(dev, chan_id);
}
+int cx231xx_i2c_mux_create(struct cx231xx *dev)
+{
+ dev->muxc = i2c_mux_alloc(&dev->i2c_bus[1].i2c_adap, dev->dev, 2, 0, 0,
+ cx231xx_i2c_mux_select, NULL);
+ if (!dev->muxc)
+ return -ENOMEM;
+ dev->muxc->priv = dev;
+ return 0;
+}
+
int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no)
{
- struct i2c_adapter *i2c_parent = &dev->i2c_bus[1].i2c_adap;
- /* what is the correct mux_dev? */
- struct device *mux_dev = dev->dev;
-
- dev->i2c_mux_adap[mux_no] = i2c_add_mux_adapter(i2c_parent,
- mux_dev,
- dev /* mux_priv */,
- 0,
- mux_no /* chan_id */,
- 0 /* class */,
- &cx231xx_i2c_mux_select,
- NULL);
-
- if (!dev->i2c_mux_adap[mux_no])
+ int rc;
+
+ rc = i2c_mux_add_adapter(dev->muxc,
+ 0,
+ mux_no /* chan_id */,
+ 0 /* class */);
+ if (rc)
dev_warn(dev->dev,
"i2c mux %d register FAILED\n", mux_no);
- return 0;
+ return rc;
}
-void cx231xx_i2c_mux_unregister(struct cx231xx *dev, int mux_no)
+void cx231xx_i2c_mux_unregister(struct cx231xx *dev)
{
- i2c_del_mux_adapter(dev->i2c_mux_adap[mux_no]);
- dev->i2c_mux_adap[mux_no] = NULL;
+ i2c_mux_del_adapters(dev->muxc);
}
struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port)
@@ -603,9 +604,9 @@ struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port)
case I2C_2:
return &dev->i2c_bus[2].i2c_adap;
case I2C_1_MUX_1:
- return dev->i2c_mux_adap[0];
+ return dev->muxc->adapter[0];
case I2C_1_MUX_3:
- return dev->i2c_mux_adap[1];
+ return dev->muxc->adapter[1];
default:
return NULL;
}
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index 69f6d2087..90c867683 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -624,6 +624,7 @@ struct cx231xx {
/* I2C adapters: Master 1 & 2 (External) & Master 3 (Internal only) */
struct cx231xx_i2c i2c_bus[3];
+ struct i2c_mux_core *muxc;
struct i2c_adapter *i2c_mux_adap[2];
unsigned int xc_fw_load_done:1;
@@ -760,8 +761,9 @@ int cx231xx_reset_analog_tuner(struct cx231xx *dev);
void cx231xx_do_i2c_scan(struct cx231xx *dev, int i2c_port);
int cx231xx_i2c_register(struct cx231xx_i2c *bus);
int cx231xx_i2c_unregister(struct cx231xx_i2c *bus);
+int cx231xx_i2c_mux_create(struct cx231xx *dev);
int cx231xx_i2c_mux_register(struct cx231xx *dev, int mux_no);
-void cx231xx_i2c_mux_unregister(struct cx231xx *dev, int mux_no);
+void cx231xx_i2c_mux_unregister(struct cx231xx *dev);
struct i2c_adapter *cx231xx_get_i2c_adap(struct cx231xx *dev, int i2c_port);
/* Internal block control functions */
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 56f43ce32..1bb2d64d5 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -398,6 +398,8 @@ error:
}
#define AF9015_EEPROM_SIZE 256
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
/* hash (and dump) eeprom */
static int af9015_eeprom_hash(struct dvb_usb_device *d)
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.h b/drivers/media/usb/dvb-usb-v2/af9035.h
index c927c1c0f..dc5849c6b 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.h
+++ b/drivers/media/usb/dvb-usb-v2/af9035.h
@@ -118,20 +118,20 @@ static const u32 clock_lut_it9135[] = {
* Values 0, 3 and 5 are seen to this day. 0 for single TS and 3/5 for dual TS.
*/
-#define EEPROM_BASE_AF9035 0x42fd
-#define EEPROM_BASE_IT9135 0x499c
+#define EEPROM_BASE_AF9035 0x42f5
+#define EEPROM_BASE_IT9135 0x4994
#define EEPROM_SHIFT 0x10
-#define EEPROM_IR_MODE 0x10
-#define EEPROM_TS_MODE 0x29
-#define EEPROM_2ND_DEMOD_ADDR 0x2a
-#define EEPROM_IR_TYPE 0x2c
-#define EEPROM_1_IF_L 0x30
-#define EEPROM_1_IF_H 0x31
-#define EEPROM_1_TUNER_ID 0x34
-#define EEPROM_2_IF_L 0x40
-#define EEPROM_2_IF_H 0x41
-#define EEPROM_2_TUNER_ID 0x44
+#define EEPROM_IR_MODE 0x18
+#define EEPROM_TS_MODE 0x31
+#define EEPROM_2ND_DEMOD_ADDR 0x32
+#define EEPROM_IR_TYPE 0x34
+#define EEPROM_1_IF_L 0x38
+#define EEPROM_1_IF_H 0x39
+#define EEPROM_1_TUNER_ID 0x3c
+#define EEPROM_2_IF_L 0x48
+#define EEPROM_2_IF_H 0x49
+#define EEPROM_2_TUNER_ID 0x4c
/* USB commands */
#define CMD_MEM_RD 0x00
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index fa72642d4..eb7af8cb8 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -1333,10 +1333,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap)
case TUNER_RTL2832_R828D:
pdata.clk = dev->rtl2832_platform_data.clk;
pdata.tuner = dev->tuner;
- pdata.i2c_client = dev->i2c_client_demod;
- pdata.bulk_read = dev->rtl2832_platform_data.bulk_read;
- pdata.bulk_write = dev->rtl2832_platform_data.bulk_write;
- pdata.update_bits = dev->rtl2832_platform_data.update_bits;
+ pdata.regmap = dev->rtl2832_platform_data.regmap;
pdata.dvb_frontend = adap->fe[0];
pdata.dvb_usb_device = d;
pdata.v4l2_subdev = subdev;
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 977c07a2f..b76437965 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -1090,6 +1090,7 @@ static struct usb_device_id az6027_usb_table[] = {
{ USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_HDCI_V2) },
{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT) },
{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT_V2) },
+ { USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_SAT_V3) },
{ },
};
@@ -1138,7 +1139,7 @@ static struct dvb_usb_device_properties az6027_properties = {
.i2c_algo = &az6027_i2c_algo,
- .num_device_descs = 7,
+ .num_device_descs = 8,
.devices = {
{
.name = "AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)",
@@ -1168,6 +1169,10 @@ static struct dvb_usb_device_properties az6027_properties = {
.name = "Elgato EyeTV Sat",
.cold_ids = { &az6027_usb_table[6], NULL },
.warm_ids = { NULL },
+ }, {
+ .name = "Elgato EyeTV Sat",
+ .cold_ids = { &az6027_usb_table[7], NULL },
+ .warm_ids = { NULL },
},
{ NULL },
}
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index bea4b5909..128e89613 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -517,7 +517,7 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
if (nb_packet_buffer_size < 1)
nb_packet_buffer_size = 1;
- /* get the fimware version */
+ /* get the firmware version */
usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
REQUEST_GET_VERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 9b719ef6e..ad25ce34a 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -3814,6 +3814,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E) },
{ USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E_SE) },
{ USB_DEVICE(USB_VID_PCTV, USB_PID_DIBCOM_STK8096PVR) },
+ { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK8096PVR) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -5017,7 +5018,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
.num_device_descs = 1,
.devices = {
{ "DiBcom STK8096-PVR reference design",
- { &dib0700_usb_id_table[83], NULL },
+ { &dib0700_usb_id_table[83],
+ &dib0700_usb_id_table[84], NULL},
{ NULL },
},
},
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 35de60959..6eea4e688 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -184,6 +184,8 @@ int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
}
EXPORT_SYMBOL(dibusb_read_eeprom_byte);
+#if IS_ENABLED(CONFIG_DVB_DIB3000MC)
+
/* 3000MC/P stuff */
// Config Adjacent channels Perf -cal22
static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
@@ -242,8 +244,6 @@ static struct dibx000_agc_config dib3000p_panasonic_agc_config = {
.agc2_slope2 = 0x1e,
};
-#if IS_ENABLED(CONFIG_DVB_DIB3000MC)
-
static struct dib3000mc_config mod3000p_dib3000p_config = {
&dib3000p_panasonic_agc_config,
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index eaa1d906d..ec5d4caf3 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -13,6 +13,7 @@
*
* see Documentation/dvb/README.dvb-usb for more information
*/
+#include "dvb-usb-ids.h"
#include "dw2102.h"
#include "si21xx.h"
#include "stv0299.h"
@@ -38,61 +39,6 @@
/* Max transfer size done by I2C transfer functions */
#define MAX_XFER_SIZE 64
-#ifndef USB_PID_DW2102
-#define USB_PID_DW2102 0x2102
-#endif
-
-#ifndef USB_PID_DW2104
-#define USB_PID_DW2104 0x2104
-#endif
-
-#ifndef USB_PID_DW3101
-#define USB_PID_DW3101 0x3101
-#endif
-
-#ifndef USB_PID_CINERGY_S
-#define USB_PID_CINERGY_S 0x0064
-#endif
-
-#ifndef USB_PID_TEVII_S630
-#define USB_PID_TEVII_S630 0xd630
-#endif
-
-#ifndef USB_PID_TEVII_S650
-#define USB_PID_TEVII_S650 0xd650
-#endif
-
-#ifndef USB_PID_TEVII_S660
-#define USB_PID_TEVII_S660 0xd660
-#endif
-
-#ifndef USB_PID_TEVII_S662
-#define USB_PID_TEVII_S662 0xd662
-#endif
-
-#ifndef USB_PID_TEVII_S480_1
-#define USB_PID_TEVII_S480_1 0xd481
-#endif
-
-#ifndef USB_PID_TEVII_S480_2
-#define USB_PID_TEVII_S480_2 0xd482
-#endif
-
-#ifndef USB_PID_PROF_1100
-#define USB_PID_PROF_1100 0xb012
-#endif
-
-#ifndef USB_PID_TEVII_S421
-#define USB_PID_TEVII_S421 0xd421
-#endif
-
-#ifndef USB_PID_TEVII_S632
-#define USB_PID_TEVII_S632 0xd632
-#endif
-
-#ifndef USB_PID_GOTVIEW_SAT_HD
-#define USB_PID_GOTVIEW_SAT_HD 0x5456
-#endif
#define DW210X_READ_MSG 0
#define DW210X_WRITE_MSG 1
@@ -1709,7 +1655,7 @@ static struct usb_device_id dw2102_table[] = {
[CYPRESS_DW2101] = {USB_DEVICE(USB_VID_CYPRESS, 0x2101)},
[CYPRESS_DW2104] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW2104)},
[TEVII_S650] = {USB_DEVICE(0x9022, USB_PID_TEVII_S650)},
- [TERRATEC_CINERGY_S] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_CINERGY_S)},
+ [TERRATEC_CINERGY_S] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S)},
[CYPRESS_DW3101] = {USB_DEVICE(USB_VID_CYPRESS, USB_PID_DW3101)},
[TEVII_S630] = {USB_DEVICE(0x9022, USB_PID_TEVII_S630)},
[PROF_1100] = {USB_DEVICE(0x3011, USB_PID_PROF_1100)},
@@ -1801,7 +1747,7 @@ static int dw2102_load_firmware(struct usb_device *dev,
dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
DW210X_WRITE_MSG);
break;
- case USB_PID_CINERGY_S:
+ case USB_PID_TERRATEC_CINERGY_S:
case USB_PID_DW2102:
dw210x_op_rw(dev, 0xbf, 0x0040, 0, &reset, 0,
DW210X_WRITE_MSG);
@@ -1843,6 +1789,9 @@ static int dw2102_load_firmware(struct usb_device *dev,
msleep(100);
kfree(p);
}
+
+ if (le16_to_cpu(dev->descriptor.idProduct) == 0x2101)
+ release_firmware(fw);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index ec397c4b7..c05de1b08 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -995,11 +995,11 @@ static struct dvb_usb_device_properties tt_connect_s2_3600_properties = {
/* parameter for the MPEG2-data transfer */
.stream = {
.type = USB_ISOC,
- .count = 7,
+ .count = 4,
.endpoint = 0x02,
.u = {
.isoc = {
- .framesperurb = 4,
+ .framesperurb = 64,
.framesize = 940,
.interval = 1
}
diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
index e382210c4..d917b0a2b 100644
--- a/drivers/media/usb/em28xx/Kconfig
+++ b/drivers/media/usb/em28xx/Kconfig
@@ -59,6 +59,8 @@ config VIDEO_EM28XX_DVB
select DVB_DRX39XYJ if MEDIA_SUBDRV_AUTOSELECT
select DVB_SI2168 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_SI2157 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_TC90522 if MEDIA_SUBDRV_AUTOSELECT
+ select MEDIA_TUNER_QM1D1C0042 if MEDIA_SUBDRV_AUTOSELECT
---help---
This adds support for DVB cards based on the
Empiatech em28xx chips.
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index d08453fcc..9227495e2 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -492,6 +492,44 @@ static struct em28xx_reg_seq terratec_t2_stick_hd[] = {
{-1, -1, -1, -1},
};
+static struct em28xx_reg_seq plex_px_bcud[] = {
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0},
+ {0x0d, 0xff, 0xff, 0},
+ {EM2874_R50_IR_CONFIG, 0x01, 0xff, 0},
+ {EM28XX_R06_I2C_CLK, 0x40, 0xff, 0},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 100},
+ {EM28XX_R12_VINENABLE, 0x20, 0x20, 0},
+ {0x0d, 0x42, 0xff, 1000},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfc, 0xff, 10},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 10},
+ {0x73, 0xfd, 0xff, 100},
+ {-1, -1, -1, -1},
+};
+
+/*
+ * 2040:0265 Hauppauge WinTV-dualHD DVB
+ * reg 0x80/0x84:
+ * GPIO_0: Yellow LED tuner 1, 0=on, 1=off
+ * GPIO_1: Green LED tuner 1, 0=on, 1=off
+ * GPIO_2: Yellow LED tuner 2, 0=on, 1=off
+ * GPIO_3: Green LED tuner 2, 0=on, 1=off
+ * GPIO_5: Reset #2, 0=active
+ * GPIO_6: Reset #1, 0=active
+ */
+static struct em28xx_reg_seq hauppauge_dualhd_dvb[] = {
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 0},
+ {0x0d, 0xff, 0xff, 200},
+ {0x50, 0x04, 0xff, 300},
+ {EM2874_R80_GPIO_P0_CTRL, 0xbf, 0xff, 100}, /* demod 1 reset */
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
+ {EM2874_R80_GPIO_P0_CTRL, 0xdf, 0xff, 100}, /* demod 2 reset */
+ {EM2874_R80_GPIO_P0_CTRL, 0xff, 0xff, 100},
+ {EM2874_R5F_TS_ENABLE, 0x44, 0xff, 50},
+ {EM2874_R5D_TS1_PKT_SIZE, 0x05, 0xff, 50},
+ {EM2874_R5E_TS2_PKT_SIZE, 0x05, 0xff, 50},
+ {-1, -1, -1, -1},
+};
+
/*
* Button definitions
*/
@@ -571,6 +609,22 @@ static struct em28xx_led terratec_grabby_leds[] = {
{-1, 0, 0, 0},
};
+static struct em28xx_led hauppauge_dualhd_leds[] = {
+ {
+ .role = EM28XX_LED_DIGITAL_CAPTURING,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = EM_GPIO_1,
+ .inverted = 1,
+ },
+ {
+ .role = EM28XX_LED_DIGITAL_CAPTURING_TS2,
+ .gpio_reg = EM2874_R80_GPIO_P0_CTRL,
+ .gpio_mask = EM_GPIO_3,
+ .inverted = 1,
+ },
+ {-1, 0, 0, 0},
+};
+
/*
* Board definitions
*/
@@ -2306,6 +2360,35 @@ struct em28xx_board em28xx_boards[] = {
.has_dvb = 1,
.ir_codes = RC_MAP_TERRATEC_SLIM_2,
},
+
+ /*
+ * 3275:0085 PLEX PX-BCUD.
+ * Empia EM28178, TOSHIBA TC90532XBG, Sharp QM1D1C0042
+ */
+ [EM28178_BOARD_PLEX_PX_BCUD] = {
+ .name = "PLEX PX-BCUD",
+ .xclk = EM28XX_XCLK_FREQUENCY_4_3MHZ,
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = plex_px_bcud,
+ .has_dvb = 1,
+ },
+ /*
+ * 2040:0265 Hauppauge WinTV-dualHD (DVB version).
+ * Empia EM28274, 2x Silicon Labs Si2168, 2x Silicon Labs Si2157
+ */
+ [EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB] = {
+ .name = "Hauppauge WinTV-dualHD DVB",
+ .def_i2c_bus = 1,
+ .i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
+ EM28XX_I2C_FREQ_400_KHZ,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_gpio = hauppauge_dualhd_dvb,
+ .has_dvb = 1,
+ .ir_codes = RC_MAP_HAUPPAUGE,
+ .leds = hauppauge_dualhd_leds,
+ },
};
EXPORT_SYMBOL_GPL(em28xx_boards);
@@ -2429,6 +2512,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950 },
{ USB_DEVICE(0x2040, 0x651f),
.driver_info = EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850 },
+ { USB_DEVICE(0x2040, 0x0265),
+ .driver_info = EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB },
{ USB_DEVICE(0x0438, 0xb002),
.driver_info = EM2880_BOARD_AMD_ATI_TV_WONDER_HD_600 },
{ USB_DEVICE(0x2001, 0xf112),
@@ -2495,6 +2580,8 @@ struct usb_device_id em28xx_id_table[] = {
.driver_info = EM2861_BOARD_LEADTEK_VC100 },
{ USB_DEVICE(0xeb1a, 0x8179),
.driver_info = EM28178_BOARD_TERRATEC_T2_STICK_HD },
+ { USB_DEVICE(0x3275, 0x0085),
+ .driver_info = EM28178_BOARD_PLEX_PX_BCUD },
{ },
};
MODULE_DEVICE_TABLE(usb, em28xx_id_table);
@@ -2861,6 +2948,7 @@ static void em28xx_card_setup(struct em28xx *dev)
case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_850:
case EM2883_BOARD_HAUPPAUGE_WINTV_HVR_950:
case EM2884_BOARD_HAUPPAUGE_WINTV_HVR_930C:
+ case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB:
{
struct tveeprom tv;
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 3ef56ae62..7ec9ce2cd 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -58,6 +58,8 @@
#include "ts2020.h"
#include "si2168.h"
#include "si2157.h"
+#include "tc90522.h"
+#include "qm1d1c0042.h"
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
MODULE_LICENSE("GPL");
@@ -787,6 +789,68 @@ static int em28xx_mt352_terratec_xs_init(struct dvb_frontend *fe)
return 0;
}
+static void px_bcud_init(struct em28xx *dev)
+{
+ int i;
+ struct {
+ unsigned char r[4];
+ int len;
+ } regs1[] = {
+ {{ 0x0e, 0x77 }, 2},
+ {{ 0x0f, 0x77 }, 2},
+ {{ 0x03, 0x90 }, 2},
+ }, regs2[] = {
+ {{ 0x07, 0x01 }, 2},
+ {{ 0x08, 0x10 }, 2},
+ {{ 0x13, 0x00 }, 2},
+ {{ 0x17, 0x00 }, 2},
+ {{ 0x03, 0x01 }, 2},
+ {{ 0x10, 0xb1 }, 2},
+ {{ 0x11, 0x40 }, 2},
+ {{ 0x85, 0x7a }, 2},
+ {{ 0x87, 0x04 }, 2},
+ };
+ static struct em28xx_reg_seq gpio[] = {
+ {EM28XX_R06_I2C_CLK, 0x40, 0xff, 300},
+ {EM2874_R80_GPIO_P0_CTRL, 0xfd, 0xff, 60},
+ {EM28XX_R15_RGAIN, 0x20, 0xff, 0},
+ {EM28XX_R16_GGAIN, 0x20, 0xff, 0},
+ {EM28XX_R17_BGAIN, 0x20, 0xff, 0},
+ {EM28XX_R18_ROFFSET, 0x00, 0xff, 0},
+ {EM28XX_R19_GOFFSET, 0x00, 0xff, 0},
+ {EM28XX_R1A_BOFFSET, 0x00, 0xff, 0},
+ {EM28XX_R23_UOFFSET, 0x00, 0xff, 0},
+ {EM28XX_R24_VOFFSET, 0x00, 0xff, 0},
+ {EM28XX_R26_COMPR, 0x00, 0xff, 0},
+ {0x13, 0x08, 0xff, 0},
+ {EM28XX_R12_VINENABLE, 0x27, 0xff, 0},
+ {EM28XX_R0C_USBSUSP, 0x10, 0xff, 0},
+ {EM28XX_R27_OUTFMT, 0x00, 0xff, 0},
+ {EM28XX_R10_VINMODE, 0x00, 0xff, 0},
+ {EM28XX_R11_VINCTRL, 0x11, 0xff, 0},
+ {EM2874_R50_IR_CONFIG, 0x01, 0xff, 0},
+ {EM2874_R5F_TS_ENABLE, 0x80, 0xff, 0},
+ {EM28XX_R06_I2C_CLK, 0x46, 0xff, 0},
+ };
+ em28xx_write_reg(dev, EM28XX_R06_I2C_CLK, 0x46);
+ /* sleeping ISDB-T */
+ dev->dvb->i2c_client_demod->addr = 0x14;
+ for (i = 0; i < ARRAY_SIZE(regs1); i++)
+ i2c_master_send(dev->dvb->i2c_client_demod, regs1[i].r,
+ regs1[i].len);
+ /* sleeping ISDB-S */
+ dev->dvb->i2c_client_demod->addr = 0x15;
+ for (i = 0; i < ARRAY_SIZE(regs2); i++)
+ i2c_master_send(dev->dvb->i2c_client_demod, regs2[i].r,
+ regs2[i].len);
+ for (i = 0; i < ARRAY_SIZE(gpio); i++) {
+ em28xx_write_reg_bits(dev, gpio[i].reg, gpio[i].val,
+ gpio[i].mask);
+ if (gpio[i].sleep > 0)
+ msleep(gpio[i].sleep);
+ }
+};
+
static struct mt352_config terratec_xs_mt352_cfg = {
.demod_address = (0x1e >> 1),
.no_tuner = 1,
@@ -1762,6 +1826,127 @@ static int em28xx_dvb_init(struct em28xx *dev)
dvb->i2c_client_tuner = client;
}
break;
+
+ case EM28178_BOARD_PLEX_PX_BCUD:
+ {
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ struct tc90522_config tc90522_config;
+ struct qm1d1c0042_config qm1d1c0042_config;
+
+ /* attach demod */
+ memset(&tc90522_config, 0, sizeof(tc90522_config));
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "tc90522sat", I2C_NAME_SIZE);
+ info.addr = 0x15;
+ info.platform_data = &tc90522_config;
+ request_module("tc90522");
+ client = i2c_new_device(&dev->i2c_adap[dev->def_i2c_bus], &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ result = -ENODEV;
+ goto out_free;
+ }
+ dvb->i2c_client_demod = client;
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ /* attach tuner */
+ memset(&qm1d1c0042_config, 0,
+ sizeof(qm1d1c0042_config));
+ qm1d1c0042_config.fe = tc90522_config.fe;
+ qm1d1c0042_config.lpf = 1;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "qm1d1c0042", I2C_NAME_SIZE);
+ info.addr = 0x61;
+ info.platform_data = &qm1d1c0042_config;
+ request_module(info.type);
+ client = i2c_new_device(tc90522_config.tuner_i2c,
+ &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+ dvb->i2c_client_tuner = client;
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+ dvb->fe[0] = tc90522_config.fe;
+ px_bcud_init(dev);
+ }
+ break;
+ case EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB:
+ {
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+ struct i2c_board_info info;
+ struct si2168_config si2168_config;
+ struct si2157_config si2157_config;
+
+ /* attach demod */
+ memset(&si2168_config, 0, sizeof(si2168_config));
+ si2168_config.i2c_adapter = &adapter;
+ si2168_config.fe = &dvb->fe[0];
+ si2168_config.ts_mode = SI2168_TS_SERIAL;
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2168", I2C_NAME_SIZE);
+ info.addr = 0x64;
+ info.platform_data = &si2168_config;
+ request_module(info.type);
+ client = i2c_new_device(&dev->i2c_adap[dev->def_i2c_bus], &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_demod = client;
+
+ /* attach tuner */
+ memset(&si2157_config, 0, sizeof(si2157_config));
+ si2157_config.fe = dvb->fe[0];
+ si2157_config.if_port = 1;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ si2157_config.mdev = dev->media_dev;
+#endif
+ memset(&info, 0, sizeof(struct i2c_board_info));
+ strlcpy(info.type, "si2157", I2C_NAME_SIZE);
+ info.addr = 0x60;
+ info.platform_data = &si2157_config;
+ request_module(info.type);
+ client = i2c_new_device(adapter, &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ if (!try_module_get(client->dev.driver->owner)) {
+ i2c_unregister_device(client);
+ module_put(dvb->i2c_client_demod->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod);
+ result = -ENODEV;
+ goto out_free;
+ }
+
+ dvb->i2c_client_tuner = client;
+
+ }
+ break;
default:
em28xx_errdev("/2: The frontend of your DVB/ATSC card"
" isn't supported yet\n");
diff --git a/drivers/media/usb/em28xx/em28xx-reg.h b/drivers/media/usb/em28xx/em28xx-reg.h
index 13cbb7f3e..afe7a66d7 100644
--- a/drivers/media/usb/em28xx/em28xx-reg.h
+++ b/drivers/media/usb/em28xx/em28xx-reg.h
@@ -193,6 +193,19 @@
/* em2874 registers */
#define EM2874_R50_IR_CONFIG 0x50
#define EM2874_R51_IR 0x51
+#define EM2874_R5D_TS1_PKT_SIZE 0x5d
+#define EM2874_R5E_TS2_PKT_SIZE 0x5e
+ /*
+ * For both TS1 and TS2, In isochronous mode:
+ * 0x01 188 bytes
+ * 0x02 376 bytes
+ * 0x03 564 bytes
+ * 0x04 752 bytes
+ * 0x05 940 bytes
+ * In bulk mode:
+ * 0x01..0xff total packet count in 188-byte
+ */
+
#define EM2874_R5F_TS_ENABLE 0x5f
/* em2874/174/84, em25xx, em276x/7x/8x GPIO registers */
diff --git a/drivers/media/usb/em28xx/em28xx.h b/drivers/media/usb/em28xx/em28xx.h
index 267444961..d148463b2 100644
--- a/drivers/media/usb/em28xx/em28xx.h
+++ b/drivers/media/usb/em28xx/em28xx.h
@@ -145,6 +145,8 @@
#define EM2861_BOARD_LEADTEK_VC100 95
#define EM28178_BOARD_TERRATEC_T2_STICK_HD 96
#define EM2884_BOARD_ELGATO_EYETV_HYBRID_2008 97
+#define EM28178_BOARD_PLEX_PX_BCUD 98
+#define EM28174_BOARD_HAUPPAUGE_WINTV_DUALHD_DVB 99
/* Limits minimum and default number of buffers */
#define EM28XX_MIN_BUF 4
@@ -406,6 +408,7 @@ enum em28xx_adecoder {
enum em28xx_led_role {
EM28XX_LED_ANALOG_CAPTURING = 0,
EM28XX_LED_DIGITAL_CAPTURING,
+ EM28XX_LED_DIGITAL_CAPTURING_TS2,
EM28XX_LED_ILLUMINATION,
EM28XX_NUM_LED_ROLES, /* must be the last */
};
diff --git a/drivers/media/usb/go7007/go7007-v4l2.c b/drivers/media/usb/go7007/go7007-v4l2.c
index 358c1c186..ea01ee5df 100644
--- a/drivers/media/usb/go7007/go7007-v4l2.c
+++ b/drivers/media/usb/go7007/go7007-v4l2.c
@@ -1125,7 +1125,7 @@ int go7007_v4l2_init(struct go7007 *go)
vdev->queue = &go->vidq;
video_set_drvdata(vdev, go);
vdev->v4l2_dev = &go->v4l2_dev;
- if (!v4l2_device_has_op(&go->v4l2_dev, video, querystd))
+ if (!v4l2_device_has_op(&go->v4l2_dev, 0, video, querystd))
v4l2_disable_ioctl(vdev, VIDIOC_QUERYSTD);
if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) {
v4l2_disable_ioctl(vdev, VIDIOC_S_FREQUENCY);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 967424f73..203418f87 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -3672,11 +3672,10 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
hdw->cmd_debug_state = 1;
- if (write_len) {
+ if (write_len && write_data)
hdw->cmd_debug_code = ((unsigned char *)write_data)[0];
- } else {
+ else
hdw->cmd_debug_code = 0;
- }
hdw->cmd_debug_write_len = write_len;
hdw->cmd_debug_read_len = read_len;
@@ -3688,7 +3687,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
setup_timer(&timer, pvr2_ctl_timeout, (unsigned long)hdw);
timer.expires = jiffies + timeout;
- if (write_len) {
+ if (write_len && write_data) {
hdw->cmd_debug_state = 2;
/* Transfer write data to internal buffer */
for (idx = 0; idx < write_len; idx++) {
@@ -3795,7 +3794,7 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw,
goto done;
}
}
- if (read_len) {
+ if (read_len && read_data) {
/* Validate results of read request */
if ((hdw->ctl_read_urb->status != 0) &&
(hdw->ctl_read_urb->status != -ENOENT) &&
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 12690c1ea..c04bc6afb 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 {
static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
const struct uvc_xu_control_mapping32 __user *up)
{
- struct uvc_menu_info __user *umenus;
- struct uvc_menu_info __user *kmenus;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
if (__get_user(p, &up->menu_info))
return -EFAULT;
- umenus = compat_ptr(p);
- if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
-
- kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus));
- if (kmenus == NULL)
- return -EFAULT;
- kp->menu_info = kmenus;
-
- if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
+ kp->menu_info = compat_ptr(p);
return 0;
}
@@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp,
static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
struct uvc_xu_control_mapping32 __user *up)
{
- struct uvc_menu_info __user *umenus;
- struct uvc_menu_info __user *kmenus = kp->menu_info;
- compat_caddr_t p;
-
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) ||
__put_user(kp->menu_count, &up->menu_count))
@@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp,
if (__clear_user(up->reserved, sizeof(up->reserved)))
return -EFAULT;
- if (kp->menu_count == 0)
- return 0;
-
- if (get_user(p, &up->menu_info))
- return -EFAULT;
- umenus = compat_ptr(p);
-
- if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus)))
- return -EFAULT;
-
return 0;
}
@@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 {
static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
const struct uvc_xu_control_query32 __user *up)
{
- u8 __user *udata;
- u8 __user *kdata;
compat_caddr_t p;
if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
@@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
if (__get_user(p, &up->data))
return -EFAULT;
- udata = compat_ptr(p);
- if (!access_ok(VERIFY_READ, udata, kp->size))
- return -EFAULT;
-
- kdata = compat_alloc_user_space(kp->size);
- if (kdata == NULL)
- return -EFAULT;
- kp->data = kdata;
-
- if (copy_in_user(kdata, udata, kp->size))
- return -EFAULT;
+ kp->data = compat_ptr(p);
return 0;
}
@@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp,
static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
struct uvc_xu_control_query32 __user *up)
{
- u8 __user *udata;
- u8 __user *kdata = kp->data;
- compat_caddr_t p;
-
if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
__copy_to_user(up, kp, offsetof(typeof(*up), data)))
return -EFAULT;
- if (kp->size == 0)
- return 0;
-
- if (get_user(p, &up->data))
- return -EFAULT;
- udata = compat_ptr(p);
- if (!access_ok(VERIFY_READ, udata, kp->size))
- return -EFAULT;
-
- if (copy_in_user(udata, kdata, kp->size))
- return -EFAULT;
-
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index d8e5994cc..70b559d7c 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -735,6 +735,7 @@ static int video_register_media_controller(struct video_device *vdev, int type)
if (!vdev->v4l2_dev->mdev)
return 0;
+ vdev->entity.obj_type = MEDIA_ENTITY_TYPE_VIDEO_DEVICE;
vdev->entity.function = MEDIA_ENT_F_UNKNOWN;
switch (type) {
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 170dd68d2..528390f33 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1020,9 +1020,12 @@ static int v4l_querycap(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_capability *cap = (struct v4l2_capability *)arg;
+ struct video_device *vfd = video_devdata(file);
int ret;
cap->version = LINUX_VERSION_CODE;
+ cap->device_caps = vfd->device_caps;
+ cap->capabilities = vfd->device_caps | V4L2_CAP_DEVICE_CAPS;
ret = ops->vidioc_querycap(file, fh, cap);
@@ -2157,40 +2160,56 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
struct v4l2_cropcap *p = arg;
+ struct v4l2_selection s = { .type = p->type };
+ int ret = 0;
- if (ops->vidioc_g_selection) {
- struct v4l2_selection s = { .type = p->type };
- int ret;
+ /* setting trivial pixelaspect */
+ p->pixelaspect.numerator = 1;
+ p->pixelaspect.denominator = 1;
- /* obtaining bounds */
- if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
- else
- s.target = V4L2_SEL_TGT_CROP_BOUNDS;
+ /*
+ * The determine_valid_ioctls() call already should ensure
+ * that this can never happen, but just in case...
+ */
+ if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection))
+ return -ENOTTY;
- ret = ops->vidioc_g_selection(file, fh, &s);
- if (ret)
- return ret;
- p->bounds = s.r;
+ if (ops->vidioc_cropcap)
+ ret = ops->vidioc_cropcap(file, fh, p);
- /* obtaining defrect */
- if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
- else
- s.target = V4L2_SEL_TGT_CROP_DEFAULT;
+ if (!ops->vidioc_g_selection)
+ return ret;
- ret = ops->vidioc_g_selection(file, fh, &s);
- if (ret)
- return ret;
- p->defrect = s.r;
- }
+ /*
+ * Ignore ENOTTY or ENOIOCTLCMD error returns, just use the
+ * square pixel aspect ratio in that case.
+ */
+ if (ret && ret != -ENOTTY && ret != -ENOIOCTLCMD)
+ return ret;
- /* setting trivial pixelaspect */
- p->pixelaspect.numerator = 1;
- p->pixelaspect.denominator = 1;
+ /* Use g_selection() to fill in the bounds and defrect rectangles */
- if (ops->vidioc_cropcap)
- return ops->vidioc_cropcap(file, fh, p);
+ /* obtaining bounds */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_BOUNDS;
+ else
+ s.target = V4L2_SEL_TGT_CROP_BOUNDS;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+ if (ret)
+ return ret;
+ p->bounds = s.r;
+
+ /* obtaining defrect */
+ if (V4L2_TYPE_IS_OUTPUT(p->type))
+ s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
+ else
+ s.target = V4L2_SEL_TGT_CROP_DEFAULT;
+
+ ret = ops->vidioc_g_selection(file, fh, &s);
+ if (ret)
+ return ret;
+ p->defrect = s.r;
return 0;
}
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c
index 2228cd3a8..8bef4331b 100644
--- a/drivers/media/v4l2-core/v4l2-mc.c
+++ b/drivers/media/v4l2-core/v4l2-mc.c
@@ -1,7 +1,7 @@
/*
* Media Controller ancillary functions
*
- * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
* Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
* Copyright (C) 2006-2010 Nokia Corporation
* Copyright (c) 2016 Intel Corporation.
@@ -263,7 +263,7 @@ static int pipeline_pm_use_count(struct media_entity *entity,
media_entity_graph_walk_start(graph, entity);
while ((entity = media_entity_graph_walk_next(graph))) {
- if (is_media_entity_v4l2_io(entity))
+ if (is_media_entity_v4l2_video_device(entity))
use += entity->use_count;
}
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index d63083803..953eab08e 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -35,9 +35,11 @@
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
{
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- fh->pad = kzalloc(sizeof(*fh->pad) * sd->entity.num_pads, GFP_KERNEL);
- if (fh->pad == NULL)
- return -ENOMEM;
+ if (sd->entity.num_pads) {
+ fh->pad = v4l2_subdev_alloc_pad_config(sd);
+ if (fh->pad == NULL)
+ return -ENOMEM;
+ }
#endif
return 0;
}
@@ -45,7 +47,7 @@ static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
static void subdev_fh_free(struct v4l2_subdev_fh *fh)
{
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
- kfree(fh->pad);
+ v4l2_subdev_free_pad_config(fh->pad);
fh->pad = NULL;
#endif
}
@@ -508,7 +510,7 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
if (source_fmt->format.width != sink_fmt->format.width
|| source_fmt->format.height != sink_fmt->format.height
|| source_fmt->format.code != sink_fmt->format.code)
- return -EINVAL;
+ return -EPIPE;
/* The field order must match, or the sink field order must be NONE
* to support interlaced hardware connected to bridges that support
@@ -516,7 +518,7 @@ int v4l2_subdev_link_validate_default(struct v4l2_subdev *sd,
*/
if (source_fmt->format.field != sink_fmt->format.field &&
sink_fmt->format.field != V4L2_FIELD_NONE)
- return -EINVAL;
+ return -EPIPE;
return 0;
}
@@ -569,6 +571,35 @@ int v4l2_subdev_link_validate(struct media_link *link)
sink, link, &source_fmt, &sink_fmt);
}
EXPORT_SYMBOL_GPL(v4l2_subdev_link_validate);
+
+struct v4l2_subdev_pad_config *
+v4l2_subdev_alloc_pad_config(struct v4l2_subdev *sd)
+{
+ struct v4l2_subdev_pad_config *cfg;
+ int ret;
+
+ if (!sd->entity.num_pads)
+ return NULL;
+
+ cfg = kcalloc(sd->entity.num_pads, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return NULL;
+
+ ret = v4l2_subdev_call(sd, pad, init_cfg, cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ kfree(cfg);
+ return NULL;
+ }
+
+ return cfg;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_alloc_pad_config);
+
+void v4l2_subdev_free_pad_config(struct v4l2_subdev_pad_config *cfg)
+{
+ kfree(cfg);
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_free_pad_config);
#endif /* CONFIG_MEDIA_CONTROLLER */
void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
@@ -584,6 +615,7 @@ void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
sd->host_priv = NULL;
#if defined(CONFIG_MEDIA_CONTROLLER)
sd->entity.name = sd->name;
+ sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
sd->entity.function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
#endif
}
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 51d5cd20c..81ddb1757 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -51,6 +51,7 @@ config TI_EMIF
config OMAP_GPMC
bool
+ select GPIOLIB
help
This driver is for the General Purpose Memory Controller (GPMC)
present on Texas Instruments SoCs (e.g. OMAP2+). GPMC allows
@@ -122,6 +123,7 @@ config MTK_SMI
mainly help enable/disable iommu and control the power domain and
clocks for each local arbiter.
+source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
endif
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 890bdf402..cb0b7a1df 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -17,4 +17,5 @@ obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
obj-$(CONFIG_MTK_SMI) += mtk-smi.o
+obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 2a691da8c..904b4af5f 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -59,11 +59,11 @@ int fsl_ifc_find(phys_addr_t addr_base)
{
int i = 0;
- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs)
return -ENODEV;
for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
- u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr);
if (cspr & CSPR_V && (cspr & CSPR_BA) ==
convert_ifc_address(addr_base))
return i;
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(fsl_ifc_find);
static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl)
{
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
/*
* Clear all the common status and event registers
@@ -104,7 +104,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev)
irq_dispose_mapping(ctrl->nand_irq);
irq_dispose_mapping(ctrl->irq);
- iounmap(ctrl->regs);
+ iounmap(ctrl->gregs);
dev_set_drvdata(&dev->dev, NULL);
kfree(ctrl);
@@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(nand_irq_lock);
static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl)
{
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
unsigned long flags;
u32 stat;
@@ -157,7 +157,7 @@ static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data)
static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data)
{
struct fsl_ifc_ctrl *ctrl = data;
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_global __iomem *ifc = ctrl->gregs;
u32 err_axiid, err_srcid, status, cs_err, err_addr;
irqreturn_t ret = IRQ_NONE;
@@ -215,6 +215,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
{
int ret = 0;
int version, banks;
+ void __iomem *addr;
dev_info(&dev->dev, "Freescale Integrated Flash Controller\n");
@@ -225,22 +226,13 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev);
/* IOMAP the entire IFC region */
- fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
- if (!fsl_ifc_ctrl_dev->regs) {
+ fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0);
+ if (!fsl_ifc_ctrl_dev->gregs) {
dev_err(&dev->dev, "failed to get memory region\n");
ret = -ENODEV;
goto err;
}
- version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
- FSL_IFC_VERSION_MASK;
- banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
- dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
- version >> 24, (version >> 16) & 0xf, banks);
-
- fsl_ifc_ctrl_dev->version = version;
- fsl_ifc_ctrl_dev->banks = banks;
-
if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
fsl_ifc_ctrl_dev->little_endian = true;
dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
@@ -249,8 +241,9 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
}
- version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+ version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) &
FSL_IFC_VERSION_MASK;
+
banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
version >> 24, (version >> 16) & 0xf, banks);
@@ -258,6 +251,13 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
fsl_ifc_ctrl_dev->version = version;
fsl_ifc_ctrl_dev->banks = banks;
+ addr = fsl_ifc_ctrl_dev->gregs;
+ if (version >= FSL_IFC_VERSION_2_0_0)
+ addr += PGOFFSET_64K;
+ else
+ addr += PGOFFSET_4K;
+ fsl_ifc_ctrl_dev->rregs = addr;
+
/* get the Controller level irq */
fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
if (fsl_ifc_ctrl_dev->irq == 0) {
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 089091f5f..f6b575791 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -91,6 +91,7 @@ int mtk_smi_larb_get(struct device *larbdev)
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_smi_larb_get);
void mtk_smi_larb_put(struct device *larbdev)
{
@@ -106,6 +107,7 @@ void mtk_smi_larb_put(struct device *larbdev)
mtk_smi_disable(&larb->smi);
mtk_smi_disable(common);
}
+EXPORT_SYMBOL_GPL(mtk_smi_larb_put);
static int
mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 60074351f..9daf94bb8 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -109,7 +109,7 @@ const struct lpddr2_timings *of_get_ddr_timings(struct device_node *np_ddr,
struct lpddr2_timings *timings = NULL;
u32 arr_sz = 0, i = 0;
struct device_node *np_tim;
- char *tim_compat;
+ char *tim_compat = NULL;
switch (device_type) {
case DDR_TYPE_LPDDR2_S2:
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 859b4a1d1..15508df24 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -21,15 +21,15 @@
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_mtd.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/omap-gpmc.h>
-#include <linux/mtd/nand.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/mtd-nand-omap2.h>
@@ -81,6 +81,8 @@
#define GPMC_CONFIG_LIMITEDADDRESS BIT(1)
+#define GPMC_STATUS_EMPTYWRITEBUFFERSTATUS BIT(0)
+
#define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
#define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
#define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
@@ -92,6 +94,14 @@
#define GPMC_CS_SIZE 0x30
#define GPMC_BCH_SIZE 0x10
+/*
+ * The first 1MB of GPMC address space is typically mapped to
+ * the internal ROM. Never allocate the first page, to
+ * facilitate bug detection; even if we didn't boot from ROM.
+ * As GPMC minimum partition size is 16MB we can only start from
+ * there.
+ */
+#define GPMC_MEM_START 0x1000000
#define GPMC_MEM_END 0x3FFFFFFF
#define GPMC_CHUNK_SHIFT 24 /* 16 MB */
@@ -125,7 +135,6 @@
#define GPMC_CONFIG_RDY_BSY 0x00000001
#define GPMC_CONFIG_DEV_SIZE 0x00000002
#define GPMC_CONFIG_DEV_TYPE 0x00000003
-#define GPMC_SET_IRQ_STATUS 0x00000004
#define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31)
#define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30)
@@ -174,16 +183,12 @@
#define GPMC_CONFIG_WRITEPROTECT 0x00000010
#define WR_RD_PIN_MONITORING 0x00600000
-#define GPMC_ENABLE_IRQ 0x0000000d
-
/* ECC commands */
#define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
#define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
#define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */
-/* XXX: Only NAND irq has been considered,currently these are the only ones used
- */
-#define GPMC_NR_IRQ 2
+#define GPMC_NR_NAND_IRQS 2 /* number of NAND specific IRQs */
enum gpmc_clk_domain {
GPMC_CD_FCLK,
@@ -199,11 +204,6 @@ struct gpmc_cs_data {
struct resource mem;
};
-struct gpmc_client_irq {
- unsigned irq;
- u32 bitmask;
-};
-
/* Structure to save gpmc cs context */
struct gpmc_cs_config {
u32 config1;
@@ -231,9 +231,15 @@ struct omap3_gpmc_regs {
struct gpmc_cs_config cs_context[GPMC_CS_NUM];
};
-static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
-static struct irq_chip gpmc_irq_chip;
-static int gpmc_irq_start;
+struct gpmc_device {
+ struct device *dev;
+ int irq;
+ struct irq_chip irq_chip;
+ struct gpio_chip gpio_chip;
+ int nirqs;
+};
+
+static struct irq_domain *gpmc_irq_domain;
static struct resource gpmc_mem_root;
static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
@@ -241,8 +247,6 @@ static DEFINE_SPINLOCK(gpmc_mem_lock);
/* Define chip-selects as reserved by default until probe completes */
static unsigned int gpmc_cs_num = GPMC_CS_NUM;
static unsigned int gpmc_nr_waitpins;
-static struct device *gpmc_dev;
-static int gpmc_irq;
static resource_size_t phys_base, mem_size;
static unsigned gpmc_capability;
static void __iomem *gpmc_base;
@@ -1054,14 +1058,6 @@ int gpmc_configure(int cmd, int wval)
u32 regval;
switch (cmd) {
- case GPMC_ENABLE_IRQ:
- gpmc_write_reg(GPMC_IRQENABLE, wval);
- break;
-
- case GPMC_SET_IRQ_STATUS:
- gpmc_write_reg(GPMC_IRQSTATUS, wval);
- break;
-
case GPMC_CONFIG_WP:
regval = gpmc_read_reg(GPMC_CONFIG);
if (wval)
@@ -1084,7 +1080,7 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
{
int i;
- reg->gpmc_status = gpmc_base + GPMC_STATUS;
+ reg->gpmc_status = NULL; /* deprecated */
reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
@@ -1118,87 +1114,201 @@ void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
}
}
-int gpmc_get_client_irq(unsigned irq_config)
+static bool gpmc_nand_writebuffer_empty(void)
{
- int i;
+ if (gpmc_read_reg(GPMC_STATUS) & GPMC_STATUS_EMPTYWRITEBUFFERSTATUS)
+ return true;
- if (hweight32(irq_config) > 1)
+ return false;
+}
+
+static struct gpmc_nand_ops nand_ops = {
+ .nand_writebuffer_empty = gpmc_nand_writebuffer_empty,
+};
+
+/**
+ * gpmc_omap_get_nand_ops - Get the GPMC NAND interface
+ * @regs: the GPMC NAND register map exclusive for NAND use.
+ * @cs: GPMC chip select number on which the NAND sits. The
+ * register map returned will be specific to this chip select.
+ *
+ * Returns NULL on error e.g. invalid cs.
+ */
+struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *reg, int cs)
+{
+ if (cs >= gpmc_cs_num)
+ return NULL;
+
+ gpmc_update_nand_reg(reg, cs);
+
+ return &nand_ops;
+}
+EXPORT_SYMBOL_GPL(gpmc_omap_get_nand_ops);
+
+int gpmc_get_client_irq(unsigned irq_config)
+{
+ if (!gpmc_irq_domain) {
+ pr_warn("%s called before GPMC IRQ domain available\n",
+ __func__);
return 0;
+ }
- for (i = 0; i < GPMC_NR_IRQ; i++)
- if (gpmc_client_irq[i].bitmask & irq_config)
- return gpmc_client_irq[i].irq;
+ /* we restrict this to NAND IRQs only */
+ if (irq_config >= GPMC_NR_NAND_IRQS)
+ return 0;
- return 0;
+ return irq_create_mapping(gpmc_irq_domain, irq_config);
}
-static int gpmc_irq_endis(unsigned irq, bool endis)
+static int gpmc_irq_endis(unsigned long hwirq, bool endis)
{
- int i;
u32 regval;
- for (i = 0; i < GPMC_NR_IRQ; i++)
- if (irq == gpmc_client_irq[i].irq) {
- regval = gpmc_read_reg(GPMC_IRQENABLE);
- if (endis)
- regval |= gpmc_client_irq[i].bitmask;
- else
- regval &= ~gpmc_client_irq[i].bitmask;
- gpmc_write_reg(GPMC_IRQENABLE, regval);
- break;
- }
+ /* bits GPMC_NR_NAND_IRQS to 8 are reserved */
+ if (hwirq >= GPMC_NR_NAND_IRQS)
+ hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+ regval = gpmc_read_reg(GPMC_IRQENABLE);
+ if (endis)
+ regval |= BIT(hwirq);
+ else
+ regval &= ~BIT(hwirq);
+ gpmc_write_reg(GPMC_IRQENABLE, regval);
return 0;
}
static void gpmc_irq_disable(struct irq_data *p)
{
- gpmc_irq_endis(p->irq, false);
+ gpmc_irq_endis(p->hwirq, false);
}
static void gpmc_irq_enable(struct irq_data *p)
{
- gpmc_irq_endis(p->irq, true);
+ gpmc_irq_endis(p->hwirq, true);
}
-static void gpmc_irq_noop(struct irq_data *data) { }
+static void gpmc_irq_mask(struct irq_data *d)
+{
+ gpmc_irq_endis(d->hwirq, false);
+}
-static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
+static void gpmc_irq_unmask(struct irq_data *d)
+{
+ gpmc_irq_endis(d->hwirq, true);
+}
-static int gpmc_setup_irq(void)
+static void gpmc_irq_edge_config(unsigned long hwirq, bool rising_edge)
{
- int i;
u32 regval;
- if (!gpmc_irq)
+ /* NAND IRQs polarity is not configurable */
+ if (hwirq < GPMC_NR_NAND_IRQS)
+ return;
+
+ /* WAITPIN starts at BIT 8 */
+ hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+ regval = gpmc_read_reg(GPMC_CONFIG);
+ if (rising_edge)
+ regval &= ~BIT(hwirq);
+ else
+ regval |= BIT(hwirq);
+
+ gpmc_write_reg(GPMC_CONFIG, regval);
+}
+
+static void gpmc_irq_ack(struct irq_data *d)
+{
+ unsigned int hwirq = d->hwirq;
+
+ /* skip reserved bits */
+ if (hwirq >= GPMC_NR_NAND_IRQS)
+ hwirq += 8 - GPMC_NR_NAND_IRQS;
+
+ /* Setting bit to 1 clears (or Acks) the interrupt */
+ gpmc_write_reg(GPMC_IRQSTATUS, BIT(hwirq));
+}
+
+static int gpmc_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ /* can't set type for NAND IRQs */
+ if (d->hwirq < GPMC_NR_NAND_IRQS)
return -EINVAL;
- gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
- if (gpmc_irq_start < 0) {
- pr_err("irq_alloc_descs failed\n");
- return gpmc_irq_start;
+ /* We can support either rising or falling edge at a time */
+ if (trigger == IRQ_TYPE_EDGE_FALLING)
+ gpmc_irq_edge_config(d->hwirq, false);
+ else if (trigger == IRQ_TYPE_EDGE_RISING)
+ gpmc_irq_edge_config(d->hwirq, true);
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int gpmc_irq_map(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct gpmc_device *gpmc = d->host_data;
+
+ irq_set_chip_data(virq, gpmc);
+ if (hw < GPMC_NR_NAND_IRQS) {
+ irq_modify_status(virq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
+ irq_set_chip_and_handler(virq, &gpmc->irq_chip,
+ handle_simple_irq);
+ } else {
+ irq_set_chip_and_handler(virq, &gpmc->irq_chip,
+ handle_edge_irq);
}
- gpmc_irq_chip.name = "gpmc";
- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
-
- gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
- gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
-
- for (i = 0; i < GPMC_NR_IRQ; i++) {
- gpmc_client_irq[i].irq = gpmc_irq_start + i;
- irq_set_chip_and_handler(gpmc_client_irq[i].irq,
- &gpmc_irq_chip, handle_simple_irq);
- irq_modify_status(gpmc_client_irq[i].irq, IRQ_NOREQUEST,
- IRQ_NOAUTOEN);
+ return 0;
+}
+
+static const struct irq_domain_ops gpmc_irq_domain_ops = {
+ .map = gpmc_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static irqreturn_t gpmc_handle_irq(int irq, void *data)
+{
+ int hwirq, virq;
+ u32 regval, regvalx;
+ struct gpmc_device *gpmc = data;
+
+ regval = gpmc_read_reg(GPMC_IRQSTATUS);
+ regvalx = regval;
+
+ if (!regval)
+ return IRQ_NONE;
+
+ for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++) {
+ /* skip reserved status bits */
+ if (hwirq == GPMC_NR_NAND_IRQS)
+ regvalx >>= 8 - GPMC_NR_NAND_IRQS;
+
+ if (regvalx & BIT(hwirq)) {
+ virq = irq_find_mapping(gpmc_irq_domain, hwirq);
+ if (!virq) {
+ dev_warn(gpmc->dev,
+ "spurious irq detected hwirq %d, virq %d\n",
+ hwirq, virq);
+ }
+
+ generic_handle_irq(virq);
+ }
}
+ gpmc_write_reg(GPMC_IRQSTATUS, regval);
+
+ return IRQ_HANDLED;
+}
+
+static int gpmc_setup_irq(struct gpmc_device *gpmc)
+{
+ u32 regval;
+ int rc;
+
/* Disable interrupts */
gpmc_write_reg(GPMC_IRQENABLE, 0);
@@ -1206,22 +1316,45 @@ static int gpmc_setup_irq(void)
regval = gpmc_read_reg(GPMC_IRQSTATUS);
gpmc_write_reg(GPMC_IRQSTATUS, regval);
- return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
+ gpmc->irq_chip.name = "gpmc";
+ gpmc->irq_chip.irq_enable = gpmc_irq_enable;
+ gpmc->irq_chip.irq_disable = gpmc_irq_disable;
+ gpmc->irq_chip.irq_ack = gpmc_irq_ack;
+ gpmc->irq_chip.irq_mask = gpmc_irq_mask;
+ gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
+ gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
+
+ gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
+ gpmc->nirqs,
+ &gpmc_irq_domain_ops,
+ gpmc);
+ if (!gpmc_irq_domain) {
+ dev_err(gpmc->dev, "IRQ domain add failed\n");
+ return -ENODEV;
+ }
+
+ rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc);
+ if (rc) {
+ dev_err(gpmc->dev, "failed to request irq %d: %d\n",
+ gpmc->irq, rc);
+ irq_domain_remove(gpmc_irq_domain);
+ gpmc_irq_domain = NULL;
+ }
+
+ return rc;
}
-static int gpmc_free_irq(void)
+static int gpmc_free_irq(struct gpmc_device *gpmc)
{
- int i;
+ int hwirq;
- if (gpmc_irq)
- free_irq(gpmc_irq, NULL);
+ free_irq(gpmc->irq, gpmc);
- for (i = 0; i < GPMC_NR_IRQ; i++) {
- irq_set_handler(gpmc_client_irq[i].irq, NULL);
- irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
- }
+ for (hwirq = 0; hwirq < gpmc->nirqs; hwirq++)
+ irq_dispose_mapping(irq_find_mapping(gpmc_irq_domain, hwirq));
- irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
+ irq_domain_remove(gpmc_irq_domain);
+ gpmc_irq_domain = NULL;
return 0;
}
@@ -1242,12 +1375,7 @@ static void gpmc_mem_init(void)
{
int cs;
- /*
- * The first 1MB of GPMC address space is typically mapped to
- * the internal ROM. Never allocate the first page, to
- * facilitate bug detection; even if we didn't boot from ROM.
- */
- gpmc_mem_root.start = SZ_1M;
+ gpmc_mem_root.start = GPMC_MEM_START;
gpmc_mem_root.end = GPMC_MEM_END;
/* Reserve all regions that has been set up by bootloader */
@@ -1796,105 +1924,6 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#if IS_ENABLED(CONFIG_MTD_NAND)
-
-static const char * const nand_xfer_types[] = {
- [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
- [NAND_OMAP_POLLED] = "polled",
- [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
- [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
-};
-
-static int gpmc_probe_nand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- u32 val;
- const char *s;
- struct gpmc_timings gpmc_t;
- struct omap_nand_platform_data *gpmc_nand_data;
-
- if (of_property_read_u32(child, "reg", &val) < 0) {
- dev_err(&pdev->dev, "%s has no 'reg' property\n",
- child->full_name);
- return -ENODEV;
- }
-
- gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
- GFP_KERNEL);
- if (!gpmc_nand_data)
- return -ENOMEM;
-
- gpmc_nand_data->cs = val;
- gpmc_nand_data->of_node = child;
-
- /* Detect availability of ELM module */
- gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
- if (gpmc_nand_data->elm_of_node == NULL)
- gpmc_nand_data->elm_of_node =
- of_parse_phandle(child, "elm_id", 0);
-
- /* select ecc-scheme for NAND */
- if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
- pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
- return -ENODEV;
- }
-
- if (!strcmp(s, "sw"))
- gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
- else if (!strcmp(s, "ham1") ||
- !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
- gpmc_nand_data->ecc_opt =
- OMAP_ECC_HAM1_CODE_HW;
- else if (!strcmp(s, "bch4"))
- if (gpmc_nand_data->elm_of_node)
- gpmc_nand_data->ecc_opt =
- OMAP_ECC_BCH4_CODE_HW;
- else
- gpmc_nand_data->ecc_opt =
- OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
- else if (!strcmp(s, "bch8"))
- if (gpmc_nand_data->elm_of_node)
- gpmc_nand_data->ecc_opt =
- OMAP_ECC_BCH8_CODE_HW;
- else
- gpmc_nand_data->ecc_opt =
- OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
- else if (!strcmp(s, "bch16"))
- if (gpmc_nand_data->elm_of_node)
- gpmc_nand_data->ecc_opt =
- OMAP_ECC_BCH16_CODE_HW;
- else
- pr_err("%s: BCH16 requires ELM support\n", __func__);
- else
- pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
-
- /* select data transfer mode for NAND controller */
- if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
- for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
- if (!strcasecmp(s, nand_xfer_types[val])) {
- gpmc_nand_data->xfer_type = val;
- break;
- }
-
- gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child);
-
- val = of_get_nand_bus_width(child);
- if (val == 16)
- gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
-
- gpmc_read_timings_dt(child, &gpmc_t);
- gpmc_nand_init(gpmc_nand_data, &gpmc_t);
-
- return 0;
-}
-#else
-static int gpmc_probe_nand_child(struct platform_device *pdev,
- struct device_node *child)
-{
- return 0;
-}
-#endif
-
#if IS_ENABLED(CONFIG_MTD_ONENAND)
static int gpmc_probe_onenand_child(struct platform_device *pdev,
struct device_node *child)
@@ -1950,6 +1979,8 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
const char *name;
int ret, cs;
u32 val;
+ struct gpio_desc *waitpin_desc = NULL;
+ struct gpmc_device *gpmc = platform_get_drvdata(pdev);
if (of_property_read_u32(child, "reg", &cs) < 0) {
dev_err(&pdev->dev, "%s has no 'reg' property\n",
@@ -2010,23 +2041,80 @@ static int gpmc_probe_generic_child(struct platform_device *pdev,
if (ret < 0) {
dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
cs, &res.start);
+ if (res.start < GPMC_MEM_START) {
+ dev_info(&pdev->dev,
+ "GPMC CS %d start cannot be lesser than 0x%x\n",
+ cs, GPMC_MEM_START);
+ } else if (res.end > GPMC_MEM_END) {
+ dev_info(&pdev->dev,
+ "GPMC CS %d end cannot be greater than 0x%x\n",
+ cs, GPMC_MEM_END);
+ }
goto err;
}
- ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width);
- if (ret < 0)
- goto err;
+ if (of_node_cmp(child->name, "nand") == 0) {
+ /* Warn about older DT blobs with no compatible property */
+ if (!of_property_read_bool(child, "compatible")) {
+ dev_warn(&pdev->dev,
+ "Incompatible NAND node: missing compatible");
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ if (of_device_is_compatible(child, "ti,omap2-nand")) {
+ /* NAND specific setup */
+ val = 8;
+ of_property_read_u32(child, "nand-bus-width", &val);
+ switch (val) {
+ case 8:
+ gpmc_s.device_width = GPMC_DEVWIDTH_8BIT;
+ break;
+ case 16:
+ gpmc_s.device_width = GPMC_DEVWIDTH_16BIT;
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: invalid 'nand-bus-width'\n",
+ child->name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* disable write protect */
+ gpmc_configure(GPMC_CONFIG_WP, 0);
+ gpmc_s.device_nand = true;
+ } else {
+ ret = of_property_read_u32(child, "bank-width",
+ &gpmc_s.device_width);
+ if (ret < 0)
+ goto err;
+ }
+
+ /* Reserve wait pin if it is required and valid */
+ if (gpmc_s.wait_on_read || gpmc_s.wait_on_write) {
+ unsigned int wait_pin = gpmc_s.wait_pin;
+
+ waitpin_desc = gpiochip_request_own_desc(&gpmc->gpio_chip,
+ wait_pin, "WAITPIN");
+ if (IS_ERR(waitpin_desc)) {
+ dev_err(&pdev->dev, "invalid wait-pin: %d\n", wait_pin);
+ ret = PTR_ERR(waitpin_desc);
+ goto err;
+ }
+ }
gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings");
+
ret = gpmc_cs_program_settings(cs, &gpmc_s);
if (ret < 0)
- goto err;
+ goto err_cs;
ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
if (ret) {
dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
child->name);
- goto err;
+ goto err_cs;
}
/* Clear limited address i.e. enable A26-A11 */
@@ -2057,16 +2145,81 @@ err_child_fail:
dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
ret = -ENODEV;
+err_cs:
+ if (waitpin_desc)
+ gpiochip_free_own_desc(waitpin_desc);
+
err:
gpmc_cs_free(cs);
return ret;
}
+static int gpmc_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ return 1; /* we're input only */
+}
+
+static int gpmc_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return 0; /* we're input only */
+}
+
+static int gpmc_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ return -EINVAL; /* we're input only */
+}
+
+static void gpmc_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+}
+
+static int gpmc_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg;
+
+ offset += 8;
+
+ reg = gpmc_read_reg(GPMC_STATUS) & BIT(offset);
+
+ return !!reg;
+}
+
+static int gpmc_gpio_init(struct gpmc_device *gpmc)
+{
+ int ret;
+
+ gpmc->gpio_chip.parent = gpmc->dev;
+ gpmc->gpio_chip.owner = THIS_MODULE;
+ gpmc->gpio_chip.label = DEVICE_NAME;
+ gpmc->gpio_chip.ngpio = gpmc_nr_waitpins;
+ gpmc->gpio_chip.get_direction = gpmc_gpio_get_direction;
+ gpmc->gpio_chip.direction_input = gpmc_gpio_direction_input;
+ gpmc->gpio_chip.direction_output = gpmc_gpio_direction_output;
+ gpmc->gpio_chip.set = gpmc_gpio_set;
+ gpmc->gpio_chip.get = gpmc_gpio_get;
+ gpmc->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&gpmc->gpio_chip);
+ if (ret < 0) {
+ dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gpmc_gpio_exit(struct gpmc_device *gpmc)
+{
+ gpiochip_remove(&gpmc->gpio_chip);
+}
+
static int gpmc_probe_dt(struct platform_device *pdev)
{
int ret;
- struct device_node *child;
const struct of_device_id *of_id =
of_match_device(gpmc_dt_ids, &pdev->dev);
@@ -2094,17 +2247,26 @@ static int gpmc_probe_dt(struct platform_device *pdev)
return ret;
}
+ return 0;
+}
+
+static int gpmc_probe_dt_children(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *child;
+
for_each_available_child_of_node(pdev->dev.of_node, child) {
if (!child->name)
continue;
- if (of_node_cmp(child->name, "nand") == 0)
- ret = gpmc_probe_nand_child(pdev, child);
- else if (of_node_cmp(child->name, "onenand") == 0)
+ if (of_node_cmp(child->name, "onenand") == 0)
ret = gpmc_probe_onenand_child(pdev, child);
else
ret = gpmc_probe_generic_child(pdev, child);
+
+ if (ret)
+ return ret;
}
return 0;
@@ -2114,6 +2276,11 @@ static int gpmc_probe_dt(struct platform_device *pdev)
{
return 0;
}
+
+static int gpmc_probe_dt_children(struct platform_device *pdev)
+{
+ return 0;
+}
#endif
static int gpmc_probe(struct platform_device *pdev)
@@ -2121,6 +2288,14 @@ static int gpmc_probe(struct platform_device *pdev)
int rc;
u32 l;
struct resource *res;
+ struct gpmc_device *gpmc;
+
+ gpmc = devm_kzalloc(&pdev->dev, sizeof(*gpmc), GFP_KERNEL);
+ if (!gpmc)
+ return -ENOMEM;
+
+ gpmc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gpmc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL)
@@ -2134,15 +2309,16 @@ static int gpmc_probe(struct platform_device *pdev)
return PTR_ERR(gpmc_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL)
- dev_warn(&pdev->dev, "Failed to get resource: irq\n");
- else
- gpmc_irq = res->start;
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource: irq\n");
+ return -ENOENT;
+ }
+
+ gpmc->irq = res->start;
gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(gpmc_l3_clk)) {
dev_err(&pdev->dev, "Failed to get GPMC fck\n");
- gpmc_irq = 0;
return PTR_ERR(gpmc_l3_clk);
}
@@ -2151,11 +2327,18 @@ static int gpmc_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (pdev->dev.of_node) {
+ rc = gpmc_probe_dt(pdev);
+ if (rc)
+ return rc;
+ } else {
+ gpmc_cs_num = GPMC_CS_NUM;
+ gpmc_nr_waitpins = GPMC_NR_WAITPINS;
+ }
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- gpmc_dev = &pdev->dev;
-
l = gpmc_read_reg(GPMC_REVISION);
/*
@@ -2174,36 +2357,51 @@ static int gpmc_probe(struct platform_device *pdev)
gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
if (GPMC_REVISION_MAJOR(l) > 0x5)
gpmc_capability |= GPMC_HAS_MUX_AAD;
- dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
+ dev_info(gpmc->dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
GPMC_REVISION_MINOR(l));
gpmc_mem_init();
-
- if (gpmc_setup_irq() < 0)
- dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
-
- if (!pdev->dev.of_node) {
- gpmc_cs_num = GPMC_CS_NUM;
- gpmc_nr_waitpins = GPMC_NR_WAITPINS;
+ rc = gpmc_gpio_init(gpmc);
+ if (rc)
+ goto gpio_init_failed;
+
+ gpmc->nirqs = GPMC_NR_NAND_IRQS + gpmc_nr_waitpins;
+ rc = gpmc_setup_irq(gpmc);
+ if (rc) {
+ dev_err(gpmc->dev, "gpmc_setup_irq failed\n");
+ goto setup_irq_failed;
}
- rc = gpmc_probe_dt(pdev);
+ rc = gpmc_probe_dt_children(pdev);
if (rc < 0) {
- pm_runtime_put_sync(&pdev->dev);
- dev_err(gpmc_dev, "failed to probe DT parameters\n");
- return rc;
+ dev_err(gpmc->dev, "failed to probe DT children\n");
+ goto dt_children_failed;
}
return 0;
+
+dt_children_failed:
+ gpmc_free_irq(gpmc);
+setup_irq_failed:
+ gpmc_gpio_exit(gpmc);
+gpio_init_failed:
+ gpmc_mem_exit();
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return rc;
}
static int gpmc_remove(struct platform_device *pdev)
{
- gpmc_free_irq();
+ struct gpmc_device *gpmc = platform_get_drvdata(pdev);
+
+ gpmc_free_irq(gpmc);
+ gpmc_gpio_exit(gpmc);
gpmc_mem_exit();
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- gpmc_dev = NULL;
+
return 0;
}
@@ -2249,25 +2447,6 @@ static __exit void gpmc_exit(void)
postcore_initcall(gpmc_init);
module_exit(gpmc_exit);
-static irqreturn_t gpmc_handle_irq(int irq, void *dev)
-{
- int i;
- u32 regval;
-
- regval = gpmc_read_reg(GPMC_IRQSTATUS);
-
- if (!regval)
- return IRQ_NONE;
-
- for (i = 0; i < GPMC_NR_IRQ; i++)
- if (regval & gpmc_client_irq[i].bitmask)
- generic_handle_irq(gpmc_client_irq[i].irq);
-
- gpmc_write_reg(GPMC_IRQSTATUS, regval);
-
- return IRQ_HANDLED;
-}
-
static struct omap3_gpmc_regs gpmc_context;
void omap3_gpmc_save_context(void)
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
new file mode 100644
index 000000000..9de122220
--- /dev/null
+++ b/drivers/memory/samsung/Kconfig
@@ -0,0 +1,13 @@
+config SAMSUNG_MC
+ bool "Samsung Exynos Memory Controller support" if COMPILE_TEST
+ help
+ Support for the Memory Controller (MC) devices found on
+ Samsung Exynos SoCs.
+
+if SAMSUNG_MC
+
+config EXYNOS_SROM
+ bool "Exynos SROM controller driver" if COMPILE_TEST
+ depends on (ARM && ARCH_EXYNOS) || (COMPILE_TEST && HAS_IOMEM)
+
+endif
diff --git a/drivers/memory/samsung/Makefile b/drivers/memory/samsung/Makefile
new file mode 100644
index 000000000..9c554d552
--- /dev/null
+++ b/drivers/memory/samsung/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_EXYNOS_SROM) += exynos-srom.o
diff --git a/drivers/memory/samsung/exynos-srom.c b/drivers/memory/samsung/exynos-srom.c
new file mode 100644
index 000000000..96756fb4d
--- /dev/null
+++ b/drivers/memory/samsung/exynos-srom.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * EXYNOS - SROM Controller support
+ * Author: Pankaj Dubey <pankaj.dubey@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "exynos-srom.h"
+
+static const unsigned long exynos_srom_offsets[] = {
+ /* SROM side */
+ EXYNOS_SROM_BW,
+ EXYNOS_SROM_BC0,
+ EXYNOS_SROM_BC1,
+ EXYNOS_SROM_BC2,
+ EXYNOS_SROM_BC3,
+};
+
+/**
+ * struct exynos_srom_reg_dump: register dump of SROM Controller registers.
+ * @offset: srom register offset from the controller base address.
+ * @value: the value of register under the offset.
+ */
+struct exynos_srom_reg_dump {
+ u32 offset;
+ u32 value;
+};
+
+/**
+ * struct exynos_srom: platform data for exynos srom controller driver.
+ * @dev: platform device pointer
+ * @reg_base: srom base address
+ * @reg_offset: exynos_srom_reg_dump pointer to hold offset and its value.
+ */
+struct exynos_srom {
+ struct device *dev;
+ void __iomem *reg_base;
+ struct exynos_srom_reg_dump *reg_offset;
+};
+
+static struct exynos_srom_reg_dump *exynos_srom_alloc_reg_dump(
+ const unsigned long *rdump,
+ unsigned long nr_rdump)
+{
+ struct exynos_srom_reg_dump *rd;
+ unsigned int i;
+
+ rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return NULL;
+
+ for (i = 0; i < nr_rdump; ++i)
+ rd[i].offset = rdump[i];
+
+ return rd;
+}
+
+static int exynos_srom_configure_bank(struct exynos_srom *srom,
+ struct device_node *np)
+{
+ u32 bank, width, pmc = 0;
+ u32 timing[6];
+ u32 cs, bw;
+
+ if (of_property_read_u32(np, "reg", &bank))
+ return -EINVAL;
+ if (of_property_read_u32(np, "reg-io-width", &width))
+ width = 1;
+ if (of_property_read_bool(np, "samsung,srom-page-mode"))
+ pmc = 1 << EXYNOS_SROM_BCX__PMC__SHIFT;
+ if (of_property_read_u32_array(np, "samsung,srom-timing", timing,
+ ARRAY_SIZE(timing)))
+ return -EINVAL;
+
+ bank *= 4; /* Convert bank into shift/offset */
+
+ cs = 1 << EXYNOS_SROM_BW__BYTEENABLE__SHIFT;
+ if (width == 2)
+ cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT;
+
+ bw = __raw_readl(srom->reg_base + EXYNOS_SROM_BW);
+ bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank);
+ __raw_writel(bw, srom->reg_base + EXYNOS_SROM_BW);
+
+ __raw_writel(pmc | (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
+ (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
+ (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
+ (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
+ (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
+ (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
+ srom->reg_base + EXYNOS_SROM_BC0 + bank);
+
+ return 0;
+}
+
+static int exynos_srom_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *child;
+ struct exynos_srom *srom;
+ struct device *dev = &pdev->dev;
+ bool bad_bank_config = false;
+
+ np = dev->of_node;
+ if (!np) {
+ dev_err(&pdev->dev, "could not find device info\n");
+ return -EINVAL;
+ }
+
+ srom = devm_kzalloc(&pdev->dev,
+ sizeof(struct exynos_srom), GFP_KERNEL);
+ if (!srom)
+ return -ENOMEM;
+
+ srom->dev = dev;
+ srom->reg_base = of_iomap(np, 0);
+ if (!srom->reg_base) {
+ dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, srom);
+
+ srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
+ sizeof(exynos_srom_offsets));
+ if (!srom->reg_offset) {
+ iounmap(srom->reg_base);
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(np, child) {
+ if (exynos_srom_configure_bank(srom, child)) {
+ dev_err(dev,
+ "Could not decode bank configuration for %s\n",
+ child->name);
+ bad_bank_config = true;
+ }
+ }
+
+ /*
+ * If any bank failed to configure, we still provide suspend/resume,
+ * but do not probe child devices
+ */
+ if (bad_bank_config)
+ return 0;
+
+ return of_platform_populate(np, NULL, NULL, dev);
+}
+
+static int exynos_srom_remove(struct platform_device *pdev)
+{
+ struct exynos_srom *srom = platform_get_drvdata(pdev);
+
+ kfree(srom->reg_offset);
+ iounmap(srom->reg_base);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void exynos_srom_save(void __iomem *base,
+ struct exynos_srom_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd)
+ rd->value = readl(base + rd->offset);
+}
+
+static void exynos_srom_restore(void __iomem *base,
+ const struct exynos_srom_reg_dump *rd,
+ unsigned int num_regs)
+{
+ for (; num_regs > 0; --num_regs, ++rd)
+ writel(rd->value, base + rd->offset);
+}
+
+static int exynos_srom_suspend(struct device *dev)
+{
+ struct exynos_srom *srom = dev_get_drvdata(dev);
+
+ exynos_srom_save(srom->reg_base, srom->reg_offset,
+ ARRAY_SIZE(exynos_srom_offsets));
+ return 0;
+}
+
+static int exynos_srom_resume(struct device *dev)
+{
+ struct exynos_srom *srom = dev_get_drvdata(dev);
+
+ exynos_srom_restore(srom->reg_base, srom->reg_offset,
+ ARRAY_SIZE(exynos_srom_offsets));
+ return 0;
+}
+#endif
+
+static const struct of_device_id of_exynos_srom_ids[] = {
+ {
+ .compatible = "samsung,exynos4210-srom",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_exynos_srom_ids);
+
+static SIMPLE_DEV_PM_OPS(exynos_srom_pm_ops, exynos_srom_suspend, exynos_srom_resume);
+
+static struct platform_driver exynos_srom_driver = {
+ .probe = exynos_srom_probe,
+ .remove = exynos_srom_remove,
+ .driver = {
+ .name = "exynos-srom",
+ .of_match_table = of_exynos_srom_ids,
+ .pm = &exynos_srom_pm_ops,
+ },
+};
+module_platform_driver(exynos_srom_driver);
+
+MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
+MODULE_DESCRIPTION("Exynos SROM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/memory/samsung/exynos-srom.h b/drivers/memory/samsung/exynos-srom.h
new file mode 100644
index 000000000..34660c6a5
--- /dev/null
+++ b/drivers/memory/samsung/exynos-srom.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Exynos SROMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __EXYNOS_SROM_H
+#define __EXYNOS_SROM_H __FILE__
+
+#define EXYNOS_SROMREG(x) (x)
+
+#define EXYNOS_SROM_BW EXYNOS_SROMREG(0x0)
+#define EXYNOS_SROM_BC0 EXYNOS_SROMREG(0x4)
+#define EXYNOS_SROM_BC1 EXYNOS_SROMREG(0x8)
+#define EXYNOS_SROM_BC2 EXYNOS_SROMREG(0xc)
+#define EXYNOS_SROM_BC3 EXYNOS_SROMREG(0x10)
+#define EXYNOS_SROM_BC4 EXYNOS_SROMREG(0x14)
+#define EXYNOS_SROM_BC5 EXYNOS_SROMREG(0x18)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define EXYNOS_SROM_BW__DATAWIDTH__SHIFT 0
+#define EXYNOS_SROM_BW__ADDRMODE__SHIFT 1
+#define EXYNOS_SROM_BW__WAITENABLE__SHIFT 2
+#define EXYNOS_SROM_BW__BYTEENABLE__SHIFT 3
+
+#define EXYNOS_SROM_BW__CS_MASK 0xf
+
+#define EXYNOS_SROM_BW__NCS0__SHIFT 0
+#define EXYNOS_SROM_BW__NCS1__SHIFT 4
+#define EXYNOS_SROM_BW__NCS2__SHIFT 8
+#define EXYNOS_SROM_BW__NCS3__SHIFT 12
+#define EXYNOS_SROM_BW__NCS4__SHIFT 16
+#define EXYNOS_SROM_BW__NCS5__SHIFT 20
+
+/* applies to same to BCS0 - BCS3 */
+
+#define EXYNOS_SROM_BCX__PMC__SHIFT 0
+#define EXYNOS_SROM_BCX__TACP__SHIFT 4
+#define EXYNOS_SROM_BCX__TCAH__SHIFT 8
+#define EXYNOS_SROM_BCX__TCOH__SHIFT 12
+#define EXYNOS_SROM_BCX__TACC__SHIFT 16
+#define EXYNOS_SROM_BCX__TCOS__SHIFT 24
+#define EXYNOS_SROM_BCX__TACS__SHIFT 28
+
+#endif /* __EXYNOS_SROM_H */
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 84abf9d3c..3cd68152d 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1220,7 +1220,7 @@ static int msb_read_boot_blocks(struct msb_data *msb)
}
if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
- dbg("managment flag doesn't indicate boot block %d",
+ dbg("management flag doesn't indicate boot block %d",
pba);
continue;
}
@@ -1367,7 +1367,7 @@ static int msb_ftl_initialize(struct msb_data *msb)
static int msb_ftl_scan(struct msb_data *msb)
{
u16 pba, lba, other_block;
- u8 overwrite_flag, managment_flag, other_overwrite_flag;
+ u8 overwrite_flag, management_flag, other_overwrite_flag;
int error;
struct ms_extra_data_register extra;
u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
@@ -1409,7 +1409,7 @@ static int msb_ftl_scan(struct msb_data *msb)
}
lba = be16_to_cpu(extra.logical_address);
- managment_flag = extra.management_flag;
+ management_flag = extra.management_flag;
overwrite_flag = extra.overwrite_flag;
overwrite_flags[pba] = overwrite_flag;
@@ -1421,16 +1421,16 @@ static int msb_ftl_scan(struct msb_data *msb)
}
/* Skip system/drm blocks */
- if ((managment_flag & MEMSTICK_MANAGMENT_FLAG_NORMAL) !=
- MEMSTICK_MANAGMENT_FLAG_NORMAL) {
- dbg("pba %05d -> [reserved managment flag %02x]",
- pba, managment_flag);
+ if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
+ MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
+ dbg("pba %05d -> [reserved management flag %02x]",
+ pba, management_flag);
msb_mark_block_used(msb, pba);
continue;
}
/* Erase temporary tables */
- if (!(managment_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
+ if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
dbg("pba %05d -> [temp table] - will erase", pba);
msb_mark_block_used(msb, pba);
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
index c75198dbf..53962c3b2 100644
--- a/drivers/memstick/core/ms_block.h
+++ b/drivers/memstick/core/ms_block.h
@@ -47,7 +47,7 @@
#define MEMSTICK_OV_PG_NORMAL \
(MEMSTICK_OVERWRITE_PGST1 | MEMSTICK_OVERWRITE_PGST0)
-#define MEMSTICK_MANAGMENT_FLAG_NORMAL \
+#define MEMSTICK_MANAGEMENT_FLAG_NORMAL \
(MEMSTICK_MANAGEMENT_SYSFLG | \
MEMSTICK_MANAGEMENT_SCMS1 | \
MEMSTICK_MANAGEMENT_SCMS0) \
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 922a75064..0fb27d338 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1033,12 +1033,11 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
}
msb->attr_group.name = "media_attributes";
- buffer = kmalloc(attr_len, GFP_KERNEL);
+ buffer = kmemdup(attr, attr_len, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
goto out_free_attr;
}
- memcpy(buffer, (char *)attr, attr_len);
for (cnt = 0; cnt < attr_count; ++cnt) {
s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL);
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index 1105db235..d34bc3530 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -706,7 +706,7 @@ poll_again:
if (host->eject)
break;
- msleep(1000);
+ schedule_timeout_idle(HZ);
}
complete(&host->detect_ms_exit);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index cbe96072a..6955c9e22 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -791,7 +791,7 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
pSimple->Address.High = 0;
mpt_put_msg_frame (LanCtx, mpt_dev, mf);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
IOC_AND_NETDEV_NAMES_s_s(dev),
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ebccfa80..7ee1667ac 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2281,7 +2281,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
- if (!dma_addr_out)
+ if (pci_dma_mapping_error(ioc->pcidev, dma_addr_out))
goto put_mf;
ioc->add_sge(psge, flagsLength, dma_addr_out);
psge += ioc->SGE_size;
@@ -2296,7 +2296,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
flagsLength |= blk_rq_bytes(rsp) + 4;
dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
- if (!dma_addr_in)
+ if (pci_dma_mapping_error(ioc->pcidev, dma_addr_in))
goto unmap;
ioc->add_sge(psge, flagsLength, dma_addr_in);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 613231c16..031e088ed 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1150,7 +1150,7 @@ static void mpt_work_wrapper(struct work_struct *work)
}
shost_printk(KERN_INFO, shost, MYIOC_s_FMT
"Integrated RAID detects new device %d\n", ioc->name, disk);
- scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1);
+ scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, SCSI_SCAN_RESCAN);
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index eea61e349..1bcf601de 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -134,7 +134,7 @@ config MFD_CROS_EC
select MFD_CORE
select CHROME_PLATFORMS
select CROS_EC_PROTO
- depends on X86 || ARM || COMPILE_TEST
+ depends on X86 || ARM || ARM64 || COMPILE_TEST
help
If you say Y here you get support for the ChromeOS Embedded
Controller (EC) providing keyboard, battery and power services.
@@ -319,6 +319,16 @@ config MFD_HI6421_PMIC
menus in order to enable them.
We communicate with the Hi6421 via memory-mapped I/O.
+config MFD_HI655X_PMIC
+ tristate "HiSilicon Hi655X series PMU/Codec IC"
+ depends on ARCH_HISI || COMPILE_TEST
+ depends on OF
+ select MFD_CORE
+ select REGMAP_MMIO
+ select REGMAP_IRQ
+ help
+ Select this option to enable Hisilicon hi655x series pmic driver.
+
config HTC_EGPIO
bool "HTC EGPIO support"
depends on GPIOLIB && ARM
@@ -527,6 +537,21 @@ config MFD_MAX14577
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_MAX77620
+ bool "Maxim Semiconductor MAX77620 and MAX20024 PMIC Support"
+ depends on I2C=y
+ depends on OF
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select IRQ_DOMAIN
+ help
+ Say yes here to add support for Maxim Semiconductor MAX77620 and
+ MAX20024 which are Power Management IC with General purpose pins,
+ RTC, regulators, clock generator, watchdog etc. This driver
+ provides common support for accessing the device; additional drivers
+ must be enabled in order to use the functionality of the device.
+
config MFD_MAX77686
tristate "Maxim Semiconductor MAX77686/802 PMIC Support"
depends on I2C
@@ -543,8 +568,8 @@ config MFD_MAX77686
of the device.
config MFD_MAX77693
- bool "Maxim Semiconductor MAX77693 PMIC Support"
- depends on I2C=y
+ tristate "Maxim Semiconductor MAX77693 PMIC Support"
+ depends on I2C
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
@@ -1568,7 +1593,7 @@ endmenu
config MFD_VEXPRESS_SYSREG
bool "Versatile Express System Registers"
- depends on VEXPRESS_CONFIG && GPIOLIB
+ depends on VEXPRESS_CONFIG && GPIOLIB && !ARCH_USES_GETTIMEOFFSET
default y
select CLKSRC_MMIO
select GPIO_GENERIC_PLATFORM
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 5eaa6465d..42a66e19e 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -128,6 +128,7 @@ obj-$(CONFIG_MFD_DA9063) += da9063.o
obj-$(CONFIG_MFD_DA9150) += da9150-core.o
obj-$(CONFIG_MFD_MAX14577) += max14577.o
+obj-$(CONFIG_MFD_MAX77620) += max77620.o
obj-$(CONFIG_MFD_MAX77686) += max77686.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o
obj-$(CONFIG_MFD_MAX77843) += max77843.o
@@ -195,6 +196,7 @@ obj-$(CONFIG_MFD_STW481X) += stw481x.o
obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
+obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o
obj-$(CONFIG_MFD_DLN2) += dln2.o
obj-$(CONFIG_MFD_RT5033) += rt5033.o
obj-$(CONFIG_MFD_SKY81452) += sky81452.o
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 69d9fffe5..0aecd7bd3 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -2563,7 +2563,7 @@ static ssize_t ab8500_gpadc_trig_timer_write(struct file *file,
if (user_trig_timer & ~0xFF) {
dev_err(dev,
- "debugfs error input: should be beetween 0 to 255\n");
+ "debugfs error input: should be between 0 to 255\n");
return -EINVAL;
}
diff --git a/drivers/mfd/act8945a.c b/drivers/mfd/act8945a.c
index 525b546ba..10c6d2da8 100644
--- a/drivers/mfd/act8945a.c
+++ b/drivers/mfd/act8945a.c
@@ -46,8 +46,9 @@ static int act8945a_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, regmap);
- ret = mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE, act8945a_devs,
- ARRAY_SIZE(act8945a_devs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE,
+ act8945a_devs, ARRAY_SIZE(act8945a_devs),
+ NULL, 0, NULL);
if (ret) {
dev_err(&i2c->dev, "Failed to add sub devices\n");
return ret;
@@ -56,13 +57,6 @@ static int act8945a_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int act8945a_i2c_remove(struct i2c_client *i2c)
-{
- mfd_remove_devices(&i2c->dev);
-
- return 0;
-}
-
static const struct i2c_device_id act8945a_i2c_id[] = {
{ "act8945a", 0 },
{}
@@ -81,7 +75,6 @@ static struct i2c_driver act8945a_i2c_driver = {
.of_match_table = of_match_ptr(act8945a_of_match),
},
.probe = act8945a_i2c_probe,
- .remove = act8945a_i2c_remove,
.id_table = act8945a_i2c_id,
};
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 5319f2527..bf2717967 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -908,12 +908,12 @@ static const char * const wm5102_supplies[] = {
static const struct mfd_cell wm5102_devs[] = {
{ .name = "arizona-micsupp" },
+ { .name = "arizona-gpio" },
{
.name = "arizona-extcon",
.parent_supplies = wm5102_supplies,
.num_parent_supplies = 1, /* We only need MICVDD */
},
- { .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
{
@@ -925,12 +925,12 @@ static const struct mfd_cell wm5102_devs[] = {
static const struct mfd_cell wm5110_devs[] = {
{ .name = "arizona-micsupp" },
+ { .name = "arizona-gpio" },
{
.name = "arizona-extcon",
.parent_supplies = wm5102_supplies,
.num_parent_supplies = 1, /* We only need MICVDD */
},
- { .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
{
@@ -966,12 +966,12 @@ static const char * const wm8997_supplies[] = {
static const struct mfd_cell wm8997_devs[] = {
{ .name = "arizona-micsupp" },
+ { .name = "arizona-gpio" },
{
.name = "arizona-extcon",
.parent_supplies = wm8997_supplies,
.num_parent_supplies = 1, /* We only need MICVDD */
},
- { .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
{
@@ -982,12 +982,13 @@ static const struct mfd_cell wm8997_devs[] = {
};
static const struct mfd_cell wm8998_devs[] = {
+ { .name = "arizona-micsupp" },
+ { .name = "arizona-gpio" },
{
.name = "arizona-extcon",
.parent_supplies = wm5102_supplies,
.num_parent_supplies = 1, /* We only need MICVDD */
},
- { .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
{
@@ -995,7 +996,6 @@ static const struct mfd_cell wm8998_devs[] = {
.parent_supplies = wm5102_supplies,
.num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
},
- { .name = "arizona-micsupp" },
};
int arizona_dev_init(struct arizona *arizona)
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 5fef01492..edeb49513 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -168,12 +168,15 @@ static struct irq_chip arizona_irq_chip = {
.irq_set_wake = arizona_irq_set_wake,
};
+static struct lock_class_key arizona_irq_lock_class;
+
static int arizona_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct arizona *data = h->host_data;
irq_set_chip_data(virq, data);
+ irq_set_lockdep_class(virq, &arizona_irq_lock_class);
irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
irq_set_nested_thread(virq, 1);
irq_set_noprobe(virq);
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index 09e1483b9..67b124175 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -189,22 +189,14 @@ static int as3711_i2c_probe(struct i2c_client *client,
as3711_subdevs[AS3711_BACKLIGHT].pdata_size = 0;
}
- ret = mfd_add_devices(as3711->dev, -1, as3711_subdevs,
- ARRAY_SIZE(as3711_subdevs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(as3711->dev, -1, as3711_subdevs,
+ ARRAY_SIZE(as3711_subdevs), NULL, 0, NULL);
if (ret < 0)
dev_err(&client->dev, "add mfd devices failed: %d\n", ret);
return ret;
}
-static int as3711_i2c_remove(struct i2c_client *client)
-{
- struct as3711 *as3711 = i2c_get_clientdata(client);
-
- mfd_remove_devices(as3711->dev);
- return 0;
-}
-
static const struct i2c_device_id as3711_i2c_id[] = {
{.name = "as3711", .driver_data = 0},
{}
@@ -218,7 +210,6 @@ static struct i2c_driver as3711_i2c_driver = {
.of_match_table = of_match_ptr(as3711_of_match),
},
.probe = as3711_i2c_probe,
- .remove = as3711_i2c_remove,
.id_table = as3711_i2c_id,
};
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index e1f597f97..f87342c21 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -385,9 +385,10 @@ static int as3722_i2c_probe(struct i2c_client *i2c,
return ret;
irq_flags = as3722->irq_flags | IRQF_ONESHOT;
- ret = regmap_add_irq_chip(as3722->regmap, as3722->chip_irq,
- irq_flags, -1, &as3722_irq_chip,
- &as3722->irq_data);
+ ret = devm_regmap_add_irq_chip(as3722->dev, as3722->regmap,
+ as3722->chip_irq,
+ irq_flags, -1, &as3722_irq_chip,
+ &as3722->irq_data);
if (ret < 0) {
dev_err(as3722->dev, "Failed to add regmap irq: %d\n", ret);
return ret;
@@ -395,33 +396,20 @@ static int as3722_i2c_probe(struct i2c_client *i2c,
ret = as3722_configure_pullups(as3722);
if (ret < 0)
- goto scrub;
+ return ret;
- ret = mfd_add_devices(&i2c->dev, -1, as3722_devs,
- ARRAY_SIZE(as3722_devs), NULL, 0,
- regmap_irq_get_domain(as3722->irq_data));
+ ret = devm_mfd_add_devices(&i2c->dev, -1, as3722_devs,
+ ARRAY_SIZE(as3722_devs), NULL, 0,
+ regmap_irq_get_domain(as3722->irq_data));
if (ret) {
dev_err(as3722->dev, "Failed to add MFD devices: %d\n", ret);
- goto scrub;
+ return ret;
}
device_init_wakeup(as3722->dev, true);
dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n");
return 0;
-
-scrub:
- regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data);
- return ret;
-}
-
-static int as3722_i2c_remove(struct i2c_client *i2c)
-{
- struct as3722 *as3722 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(as3722->dev);
- regmap_del_irq_chip(as3722->chip_irq, as3722->irq_data);
- return 0;
}
static int __maybe_unused as3722_i2c_suspend(struct device *dev)
@@ -470,7 +458,6 @@ static struct i2c_driver as3722_i2c_driver = {
.pm = &as3722_pm_ops,
},
.probe = as3722_i2c_probe,
- .remove = as3722_i2c_remove,
.id_table = as3722_i2c_id,
};
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 4dca6bc61..0413c8159 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -446,7 +446,7 @@ static int asic3_gpio_direction(struct gpio_chip *chip,
unsigned long flags;
struct asic3 *asic;
- asic = container_of(chip, struct asic3, gpio);
+ asic = gpiochip_get_data(chip);
gpio_base = ASIC3_GPIO_TO_BASE(offset);
if (gpio_base > ASIC3_GPIO_D_BASE) {
@@ -492,7 +492,7 @@ static int asic3_gpio_get(struct gpio_chip *chip,
u32 mask = ASIC3_GPIO_TO_MASK(offset);
struct asic3 *asic;
- asic = container_of(chip, struct asic3, gpio);
+ asic = gpiochip_get_data(chip);
gpio_base = ASIC3_GPIO_TO_BASE(offset);
if (gpio_base > ASIC3_GPIO_D_BASE) {
@@ -513,7 +513,7 @@ static void asic3_gpio_set(struct gpio_chip *chip,
unsigned long flags;
struct asic3 *asic;
- asic = container_of(chip, struct asic3, gpio);
+ asic = gpiochip_get_data(chip);
gpio_base = ASIC3_GPIO_TO_BASE(offset);
if (gpio_base > ASIC3_GPIO_D_BASE) {
@@ -540,7 +540,7 @@ static void asic3_gpio_set(struct gpio_chip *chip,
static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- struct asic3 *asic = container_of(chip, struct asic3, gpio);
+ struct asic3 *asic = gpiochip_get_data(chip);
return asic->irq_base + offset;
}
@@ -595,7 +595,7 @@ static __init int asic3_gpio_probe(struct platform_device *pdev,
alt_reg[i]);
}
- return gpiochip_add(&asic->gpio);
+ return gpiochip_add_data(&asic->gpio, asic);
}
static int asic3_gpio_remove(struct platform_device *pdev)
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index 06c205868..eca7ea69b 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -128,16 +128,9 @@ static int atmel_hlcdc_probe(struct platform_device *pdev)
dev_set_drvdata(dev, hlcdc);
- return mfd_add_devices(dev, -1, atmel_hlcdc_cells,
- ARRAY_SIZE(atmel_hlcdc_cells),
- NULL, 0, NULL);
-}
-
-static int atmel_hlcdc_remove(struct platform_device *pdev)
-{
- mfd_remove_devices(&pdev->dev);
-
- return 0;
+ return devm_mfd_add_devices(dev, -1, atmel_hlcdc_cells,
+ ARRAY_SIZE(atmel_hlcdc_cells),
+ NULL, 0, NULL);
}
static const struct of_device_id atmel_hlcdc_match[] = {
@@ -152,7 +145,6 @@ MODULE_DEVICE_TABLE(of, atmel_hlcdc_match);
static struct platform_driver atmel_hlcdc_driver = {
.probe = atmel_hlcdc_probe,
- .remove = atmel_hlcdc_remove,
.driver = {
.name = "atmel-hlcdc",
.of_match_table = atmel_hlcdc_match,
diff --git a/drivers/mfd/axp20x-rsb.c b/drivers/mfd/axp20x-rsb.c
index 28c20247c..a407527bc 100644
--- a/drivers/mfd/axp20x-rsb.c
+++ b/drivers/mfd/axp20x-rsb.c
@@ -61,6 +61,7 @@ static int axp20x_rsb_remove(struct sunxi_rsb_device *rdev)
static const struct of_device_id axp20x_rsb_of_match[] = {
{ .compatible = "x-powers,axp223", .data = (void *)AXP223_ID },
+ { .compatible = "x-powers,axp809", .data = (void *)AXP809_ID },
{ },
};
MODULE_DEVICE_TABLE(of, axp20x_rsb_of_match);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index a57d6e940..e4e32978c 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -37,6 +37,7 @@ static const char * const axp20x_model_names[] = {
"AXP221",
"AXP223",
"AXP288",
+ "AXP809",
};
static const struct regmap_range axp152_writeable_ranges[] = {
@@ -85,6 +86,7 @@ static const struct regmap_access_table axp20x_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(axp20x_volatile_ranges),
};
+/* AXP22x ranges are shared with the AXP809, as they cover the same range */
static const struct regmap_range axp22x_writeable_ranges[] = {
regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE),
regmap_reg_range(AXP20X_DCDC_MODE, AXP22X_BATLOW_THRES1),
@@ -128,6 +130,12 @@ static struct resource axp152_pek_resources[] = {
DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
};
+static struct resource axp20x_ac_power_supply_resources[] = {
+ DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_ACIN_PLUGIN, "ACIN_PLUGIN"),
+ DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_ACIN_REMOVAL, "ACIN_REMOVAL"),
+ DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_ACIN_OVER_V, "ACIN_OVER_V"),
+};
+
static struct resource axp20x_pek_resources[] = {
{
.name = "PEK_DBR",
@@ -211,6 +219,20 @@ static struct resource axp288_fuel_gauge_resources[] = {
},
};
+static struct resource axp809_pek_resources[] = {
+ {
+ .name = "PEK_DBR",
+ .start = AXP809_IRQ_PEK_RIS_EDGE,
+ .end = AXP809_IRQ_PEK_RIS_EDGE,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "PEK_DBF",
+ .start = AXP809_IRQ_PEK_FAL_EDGE,
+ .end = AXP809_IRQ_PEK_FAL_EDGE,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
static const struct regmap_config axp152_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -378,6 +400,41 @@ static const struct regmap_irq axp288_regmap_irqs[] = {
INIT_REGMAP_IRQ(AXP288, BC_USB_CHNG, 5, 1),
};
+static const struct regmap_irq axp809_regmap_irqs[] = {
+ INIT_REGMAP_IRQ(AXP809, ACIN_OVER_V, 0, 7),
+ INIT_REGMAP_IRQ(AXP809, ACIN_PLUGIN, 0, 6),
+ INIT_REGMAP_IRQ(AXP809, ACIN_REMOVAL, 0, 5),
+ INIT_REGMAP_IRQ(AXP809, VBUS_OVER_V, 0, 4),
+ INIT_REGMAP_IRQ(AXP809, VBUS_PLUGIN, 0, 3),
+ INIT_REGMAP_IRQ(AXP809, VBUS_REMOVAL, 0, 2),
+ INIT_REGMAP_IRQ(AXP809, VBUS_V_LOW, 0, 1),
+ INIT_REGMAP_IRQ(AXP809, BATT_PLUGIN, 1, 7),
+ INIT_REGMAP_IRQ(AXP809, BATT_REMOVAL, 1, 6),
+ INIT_REGMAP_IRQ(AXP809, BATT_ENT_ACT_MODE, 1, 5),
+ INIT_REGMAP_IRQ(AXP809, BATT_EXIT_ACT_MODE, 1, 4),
+ INIT_REGMAP_IRQ(AXP809, CHARG, 1, 3),
+ INIT_REGMAP_IRQ(AXP809, CHARG_DONE, 1, 2),
+ INIT_REGMAP_IRQ(AXP809, BATT_CHG_TEMP_HIGH, 2, 7),
+ INIT_REGMAP_IRQ(AXP809, BATT_CHG_TEMP_HIGH_END, 2, 6),
+ INIT_REGMAP_IRQ(AXP809, BATT_CHG_TEMP_LOW, 2, 5),
+ INIT_REGMAP_IRQ(AXP809, BATT_CHG_TEMP_LOW_END, 2, 4),
+ INIT_REGMAP_IRQ(AXP809, BATT_ACT_TEMP_HIGH, 2, 3),
+ INIT_REGMAP_IRQ(AXP809, BATT_ACT_TEMP_HIGH_END, 2, 2),
+ INIT_REGMAP_IRQ(AXP809, BATT_ACT_TEMP_LOW, 2, 1),
+ INIT_REGMAP_IRQ(AXP809, BATT_ACT_TEMP_LOW_END, 2, 0),
+ INIT_REGMAP_IRQ(AXP809, DIE_TEMP_HIGH, 3, 7),
+ INIT_REGMAP_IRQ(AXP809, LOW_PWR_LVL1, 3, 1),
+ INIT_REGMAP_IRQ(AXP809, LOW_PWR_LVL2, 3, 0),
+ INIT_REGMAP_IRQ(AXP809, TIMER, 4, 7),
+ INIT_REGMAP_IRQ(AXP809, PEK_RIS_EDGE, 4, 6),
+ INIT_REGMAP_IRQ(AXP809, PEK_FAL_EDGE, 4, 5),
+ INIT_REGMAP_IRQ(AXP809, PEK_SHORT, 4, 4),
+ INIT_REGMAP_IRQ(AXP809, PEK_LONG, 4, 3),
+ INIT_REGMAP_IRQ(AXP809, PEK_OVER_OFF, 4, 2),
+ INIT_REGMAP_IRQ(AXP809, GPIO1_INPUT, 4, 1),
+ INIT_REGMAP_IRQ(AXP809, GPIO0_INPUT, 4, 0),
+};
+
static const struct regmap_irq_chip axp152_regmap_irq_chip = {
.name = "axp152_irq_chip",
.status_base = AXP152_IRQ1_STATE,
@@ -428,6 +485,18 @@ static const struct regmap_irq_chip axp288_regmap_irq_chip = {
};
+static const struct regmap_irq_chip axp809_regmap_irq_chip = {
+ .name = "axp809",
+ .status_base = AXP20X_IRQ1_STATE,
+ .ack_base = AXP20X_IRQ1_STATE,
+ .mask_base = AXP20X_IRQ1_EN,
+ .mask_invert = true,
+ .init_ack_masked = true,
+ .irqs = axp809_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(axp809_regmap_irqs),
+ .num_regs = 5,
+};
+
static struct mfd_cell axp20x_cells[] = {
{
.name = "axp20x-pek",
@@ -436,6 +505,11 @@ static struct mfd_cell axp20x_cells[] = {
}, {
.name = "axp20x-regulator",
}, {
+ .name = "axp20x-ac-power-supply",
+ .of_compatible = "x-powers,axp202-ac-power-supply",
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
+ .resources = axp20x_ac_power_supply_resources,
+ }, {
.name = "axp20x-usb-power-supply",
.of_compatible = "x-powers,axp202-usb-power-supply",
.num_resources = ARRAY_SIZE(axp20x_usb_power_supply_resources),
@@ -572,6 +646,16 @@ static struct mfd_cell axp288_cells[] = {
},
};
+static struct mfd_cell axp809_cells[] = {
+ {
+ .name = "axp20x-pek",
+ .num_resources = ARRAY_SIZE(axp809_pek_resources),
+ .resources = axp809_pek_resources,
+ }, {
+ .name = "axp20x-regulator",
+ },
+};
+
static struct axp20x_dev *axp20x_pm_power_off;
static void axp20x_power_off(void)
{
@@ -631,6 +715,12 @@ int axp20x_match_device(struct axp20x_dev *axp20x)
axp20x->regmap_cfg = &axp288_regmap_config;
axp20x->regmap_irq_chip = &axp288_regmap_irq_chip;
break;
+ case AXP809_ID:
+ axp20x->nr_cells = ARRAY_SIZE(axp809_cells);
+ axp20x->cells = axp809_cells;
+ axp20x->regmap_cfg = &axp22x_regmap_config;
+ axp20x->regmap_irq_chip = &axp809_regmap_irq_chip;
+ break;
default:
dev_err(dev, "unsupported AXP20X ID %lu\n", axp20x->variant);
return -EINVAL;
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 320aaefee..0d76d6901 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -82,8 +82,8 @@ static int bcm590xx_i2c_probe(struct i2c_client *i2c_pri,
goto err;
}
- ret = mfd_add_devices(&i2c_pri->dev, -1, bcm590xx_devs,
- ARRAY_SIZE(bcm590xx_devs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(&i2c_pri->dev, -1, bcm590xx_devs,
+ ARRAY_SIZE(bcm590xx_devs), NULL, 0, NULL);
if (ret < 0) {
dev_err(&i2c_pri->dev, "failed to add sub-devices: %d\n", ret);
goto err;
@@ -96,12 +96,6 @@ err:
return ret;
}
-static int bcm590xx_i2c_remove(struct i2c_client *i2c)
-{
- mfd_remove_devices(&i2c->dev);
- return 0;
-}
-
static const struct of_device_id bcm590xx_of_match[] = {
{ .compatible = "brcm,bcm59056" },
{ }
@@ -120,7 +114,6 @@ static struct i2c_driver bcm590xx_i2c_driver = {
.of_match_table = of_match_ptr(bcm590xx_of_match),
},
.probe = bcm590xx_i2c_probe,
- .remove = bcm590xx_i2c_remove,
.id_table = bcm590xx_i2c_id,
};
module_i2c_driver(bcm590xx_i2c_driver);
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index 263026346..7e903fcb8 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -25,14 +25,6 @@
#define DA9063_REG_EVENT_B_OFFSET 1
#define DA9063_REG_EVENT_C_OFFSET 2
#define DA9063_REG_EVENT_D_OFFSET 3
-#define EVENTS_BUF_LEN 4
-
-static const u8 mask_events_buf[] = { [0 ... (EVENTS_BUF_LEN - 1)] = ~0 };
-
-struct da9063_irq_data {
- u16 reg;
- u8 mask;
-};
static const struct regmap_irq da9063_irqs[] = {
/* DA9063 event A register */
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index ec4438ed2..14661ec5e 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -33,25 +33,25 @@
* This driver was tested with firmware revision A4.
*/
-#if defined(CONFIG_INPUT_DM355EVM) || defined(CONFIG_INPUT_DM355EVM_MODULE)
+#if IS_ENABLED(CONFIG_INPUT_DM355EVM)
#define msp_has_keyboard() true
#else
#define msp_has_keyboard() false
#endif
-#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_GPIO)
#define msp_has_leds() true
#else
#define msp_has_leds() false
#endif
-#if defined(CONFIG_RTC_DRV_DM355EVM) || defined(CONFIG_RTC_DRV_DM355EVM_MODULE)
+#if IS_ENABLED(CONFIG_RTC_DRV_DM355EVM)
#define msp_has_rtc() true
#else
#define msp_has_rtc() false
#endif
-#if defined(CONFIG_VIDEO_TVP514X) || defined(CONFIG_VIDEO_TVP514X_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_TVP514X)
#define msp_has_tvp() true
#else
#define msp_has_tvp() false
@@ -260,7 +260,7 @@ static int add_children(struct i2c_client *client)
/* GPIO-ish stuff */
dm355evm_msp_gpio.parent = &client->dev;
- status = gpiochip_add(&dm355evm_msp_gpio);
+ status = gpiochip_add_data(&dm355evm_msp_gpio, NULL);
if (status < 0)
return status;
diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c
index f9ded45a9..3fd703fe3 100644
--- a/drivers/mfd/hi6421-pmic-core.c
+++ b/drivers/mfd/hi6421-pmic-core.c
@@ -76,8 +76,8 @@ static int hi6421_pmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
- ret = mfd_add_devices(&pdev->dev, 0, hi6421_devs,
- ARRAY_SIZE(hi6421_devs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(&pdev->dev, 0, hi6421_devs,
+ ARRAY_SIZE(hi6421_devs), NULL, 0, NULL);
if (ret) {
dev_err(&pdev->dev, "add mfd devices failed: %d\n", ret);
return ret;
@@ -86,13 +86,6 @@ static int hi6421_pmic_probe(struct platform_device *pdev)
return 0;
}
-static int hi6421_pmic_remove(struct platform_device *pdev)
-{
- mfd_remove_devices(&pdev->dev);
-
- return 0;
-}
-
static const struct of_device_id of_hi6421_pmic_match_tbl[] = {
{ .compatible = "hisilicon,hi6421-pmic", },
{ },
@@ -105,7 +98,6 @@ static struct platform_driver hi6421_pmic_driver = {
.of_match_table = of_hi6421_pmic_match_tbl,
},
.probe = hi6421_pmic_probe,
- .remove = hi6421_pmic_remove,
};
module_platform_driver(hi6421_pmic_driver);
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
new file mode 100644
index 000000000..05ddc7882
--- /dev/null
+++ b/drivers/mfd/hi655x-pmic.c
@@ -0,0 +1,162 @@
+/*
+ * Device driver for MFD hi655x PMIC
+ *
+ * Copyright (c) 2016 Hisilicon.
+ *
+ * Authors:
+ * Chen Feng <puck.chen@hisilicon.com>
+ * Fei Wang <w.f@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/hi655x-pmic.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static const struct mfd_cell hi655x_pmic_devs[] = {
+ { .name = "hi655x-regulator", },
+};
+
+static const struct regmap_irq hi655x_irqs[] = {
+ { .reg_offset = 0, .mask = OTMP_D1R_INT },
+ { .reg_offset = 0, .mask = VSYS_2P5_R_INT },
+ { .reg_offset = 0, .mask = VSYS_UV_D3R_INT },
+ { .reg_offset = 0, .mask = VSYS_6P0_D200UR_INT },
+ { .reg_offset = 0, .mask = PWRON_D4SR_INT },
+ { .reg_offset = 0, .mask = PWRON_D20F_INT },
+ { .reg_offset = 0, .mask = PWRON_D20R_INT },
+ { .reg_offset = 0, .mask = RESERVE_INT },
+};
+
+static const struct regmap_irq_chip hi655x_irq_chip = {
+ .name = "hi655x-pmic",
+ .irqs = hi655x_irqs,
+ .num_regs = 1,
+ .num_irqs = ARRAY_SIZE(hi655x_irqs),
+ .status_base = HI655X_IRQ_STAT_BASE,
+ .mask_base = HI655X_IRQ_MASK_BASE,
+};
+
+static struct regmap_config hi655x_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = HI655X_STRIDE,
+ .val_bits = 8,
+ .max_register = HI655X_BUS_ADDR(0xFFF),
+};
+
+static void hi655x_local_irq_clear(struct regmap *map)
+{
+ int i;
+
+ regmap_write(map, HI655X_ANA_IRQM_BASE, HI655X_IRQ_CLR);
+ for (i = 0; i < HI655X_IRQ_ARRAY; i++) {
+ regmap_write(map, HI655X_IRQ_STAT_BASE + i * HI655X_STRIDE,
+ HI655X_IRQ_CLR);
+ }
+}
+
+static int hi655x_pmic_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct hi655x_pmic *pmic;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ if (!pmic)
+ return -ENOMEM;
+ pmic->dev = dev;
+
+ pmic->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pmic->res)
+ return -ENOENT;
+
+ base = devm_ioremap_resource(dev, pmic->res);
+ if (!base)
+ return -ENOMEM;
+
+ pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
+ &hi655x_regmap_config);
+
+ regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver);
+ if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) {
+ dev_warn(dev, "PMU version %d unsupported\n", pmic->ver);
+ return -EINVAL;
+ }
+
+ hi655x_local_irq_clear(pmic->regmap);
+
+ pmic->gpio = of_get_named_gpio(np, "pmic-gpios", 0);
+ if (!gpio_is_valid(pmic->gpio)) {
+ dev_err(dev, "Failed to get the pmic-gpios\n");
+ return -ENODEV;
+ }
+
+ ret = devm_gpio_request_one(dev, pmic->gpio, GPIOF_IN,
+ "hi655x_pmic_irq");
+ if (ret < 0) {
+ dev_err(dev, "Failed to request gpio %d ret = %d\n",
+ pmic->gpio, ret);
+ return ret;
+ }
+
+ ret = regmap_add_irq_chip(pmic->regmap, gpio_to_irq(pmic->gpio),
+ IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, 0,
+ &hi655x_irq_chip, &pmic->irq_data);
+ if (ret) {
+ dev_err(dev, "Failed to obtain 'hi655x_pmic_irq' %d\n", ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pmic);
+
+ ret = mfd_add_devices(dev, PLATFORM_DEVID_AUTO, hi655x_pmic_devs,
+ ARRAY_SIZE(hi655x_pmic_devs), NULL, 0, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to register device %d\n", ret);
+ regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hi655x_pmic_remove(struct platform_device *pdev)
+{
+ struct hi655x_pmic *pmic = platform_get_drvdata(pdev);
+
+ regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data);
+ mfd_remove_devices(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id hi655x_pmic_match[] = {
+ { .compatible = "hisilicon,hi655x-pmic", },
+ {},
+};
+
+static struct platform_driver hi655x_pmic_driver = {
+ .driver = {
+ .name = "hi655x-pmic",
+ .of_match_table = of_match_ptr(hi655x_pmic_match),
+ },
+ .probe = hi655x_pmic_probe,
+ .remove = hi655x_pmic_remove,
+};
+module_platform_driver(hi655x_pmic_driver);
+
+MODULE_AUTHOR("Chen Feng <puck.chen@hisilicon.com>");
+MODULE_DESCRIPTION("Hisilicon hi655x PMIC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index c636b5f83..513cfc5c8 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -155,7 +155,7 @@ static int egpio_get(struct gpio_chip *chip, unsigned offset)
pr_debug("egpio_get_value(%d)\n", chip->base + offset);
- egpio = container_of(chip, struct egpio_chip, chip);
+ egpio = gpiochip_get_data(chip);
ei = dev_get_drvdata(egpio->dev);
bit = egpio_bit(ei, offset);
reg = egpio->reg_start + egpio_pos(ei, offset);
@@ -170,7 +170,7 @@ static int egpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct egpio_chip *egpio;
- egpio = container_of(chip, struct egpio_chip, chip);
+ egpio = gpiochip_get_data(chip);
return test_bit(offset, &egpio->is_out) ? -EINVAL : 0;
}
@@ -192,7 +192,7 @@ static void egpio_set(struct gpio_chip *chip, unsigned offset, int value)
pr_debug("egpio_set(%s, %d(%d), %d)\n",
chip->label, offset, offset+chip->base, value);
- egpio = container_of(chip, struct egpio_chip, chip);
+ egpio = gpiochip_get_data(chip);
ei = dev_get_drvdata(egpio->dev);
bit = egpio_bit(ei, offset);
pos = egpio_pos(ei, offset);
@@ -216,7 +216,7 @@ static int egpio_direction_output(struct gpio_chip *chip,
{
struct egpio_chip *egpio;
- egpio = container_of(chip, struct egpio_chip, chip);
+ egpio = gpiochip_get_data(chip);
if (test_bit(offset, &egpio->is_out)) {
egpio_set(chip, offset, value);
return 0;
@@ -330,7 +330,7 @@ static int __init egpio_probe(struct platform_device *pdev)
chip->base = pdata->chip[i].gpio_base;
chip->ngpio = pdata->chip[i].num_gpios;
- gpiochip_add(chip);
+ gpiochip_add_data(chip, &ei->chip[i]);
}
/* Set initial pin values */
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index bd6b96d07..3f9eee5f8 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -227,8 +227,7 @@ static irqreturn_t htcpld_handler(int irq, void *dev)
static void htcpld_chip_set(struct gpio_chip *chip, unsigned offset, int val)
{
struct i2c_client *client;
- struct htcpld_chip *chip_data =
- container_of(chip, struct htcpld_chip, chip_out);
+ struct htcpld_chip *chip_data = gpiochip_get_data(chip);
unsigned long flags;
client = chip_data->client;
@@ -257,14 +256,12 @@ static void htcpld_chip_set_ni(struct work_struct *work)
static int htcpld_chip_get(struct gpio_chip *chip, unsigned offset)
{
- struct htcpld_chip *chip_data;
+ struct htcpld_chip *chip_data = gpiochip_get_data(chip);
u8 cache;
if (!strncmp(chip->label, "htcpld-out", 10)) {
- chip_data = container_of(chip, struct htcpld_chip, chip_out);
cache = chip_data->cache_out;
} else if (!strncmp(chip->label, "htcpld-in", 9)) {
- chip_data = container_of(chip, struct htcpld_chip, chip_in);
cache = chip_data->cache_in;
} else
return -EINVAL;
@@ -291,9 +288,7 @@ static int htcpld_direction_input(struct gpio_chip *chip,
static int htcpld_chip_to_irq(struct gpio_chip *chip, unsigned offset)
{
- struct htcpld_chip *chip_data;
-
- chip_data = container_of(chip, struct htcpld_chip, chip_in);
+ struct htcpld_chip *chip_data = gpiochip_get_data(chip);
if (offset < chip_data->nirqs)
return chip_data->irq_start + offset;
@@ -451,14 +446,14 @@ static int htcpld_register_chip_gpio(
gpio_chip->ngpio = plat_chip_data->num_gpios;
/* Add the GPIO chips */
- ret = gpiochip_add(&(chip->chip_out));
+ ret = gpiochip_add_data(&(chip->chip_out), chip);
if (ret) {
dev_warn(dev, "Unable to register output GPIOs for 0x%x: %d\n",
plat_chip_data->addr, ret);
return ret;
}
- ret = gpiochip_add(&(chip->chip_in));
+ ret = gpiochip_add_data(&(chip->chip_in), chip);
if (ret) {
dev_warn(dev, "Unable to register input GPIOs for 0x%x: %d\n",
plat_chip_data->addr, ret);
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
index 5a8d9c766..7ddc4a956 100644
--- a/drivers/mfd/intel-lpss-acpi.c
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -31,13 +31,9 @@ static struct property_entry spt_i2c_properties[] = {
{ },
};
-static struct property_set spt_i2c_pset = {
- .properties = spt_i2c_properties,
-};
-
static const struct intel_lpss_platform_info spt_i2c_info = {
.clk_rate = 120000000,
- .pset = &spt_i2c_pset,
+ .properties = spt_i2c_properties,
};
static const struct intel_lpss_platform_info bxt_info = {
@@ -51,13 +47,9 @@ static struct property_entry bxt_i2c_properties[] = {
{ },
};
-static struct property_set bxt_i2c_pset = {
- .properties = bxt_i2c_properties,
-};
-
static const struct intel_lpss_platform_info bxt_i2c_info = {
.clk_rate = 133000000,
- .pset = &bxt_i2c_pset,
+ .properties = bxt_i2c_properties,
};
static const struct acpi_device_id intel_lpss_acpi_ids[] = {
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index a19e57118..1d79a3c93 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -71,13 +71,9 @@ static struct property_entry spt_i2c_properties[] = {
{ },
};
-static struct property_set spt_i2c_pset = {
- .properties = spt_i2c_properties,
-};
-
static const struct intel_lpss_platform_info spt_i2c_info = {
.clk_rate = 120000000,
- .pset = &spt_i2c_pset,
+ .properties = spt_i2c_properties,
};
static struct property_entry uart_properties[] = {
@@ -87,14 +83,10 @@ static struct property_entry uart_properties[] = {
{ },
};
-static struct property_set uart_pset = {
- .properties = uart_properties,
-};
-
static const struct intel_lpss_platform_info spt_uart_info = {
.clk_rate = 120000000,
.clk_con_id = "baudclk",
- .pset = &uart_pset,
+ .properties = uart_properties,
};
static const struct intel_lpss_platform_info bxt_info = {
@@ -104,7 +96,7 @@ static const struct intel_lpss_platform_info bxt_info = {
static const struct intel_lpss_platform_info bxt_uart_info = {
.clk_rate = 100000000,
.clk_con_id = "baudclk",
- .pset = &uart_pset,
+ .properties = uart_properties,
};
static struct property_entry bxt_i2c_properties[] = {
@@ -114,13 +106,9 @@ static struct property_entry bxt_i2c_properties[] = {
{ },
};
-static struct property_set bxt_i2c_pset = {
- .properties = bxt_i2c_properties,
-};
-
static const struct intel_lpss_platform_info bxt_i2c_info = {
.clk_rate = 133000000,
- .pset = &bxt_i2c_pset,
+ .properties = bxt_i2c_properties,
};
static const struct pci_device_id intel_lpss_pci_ids[] = {
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 807a3e3ec..41b113875 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -338,8 +338,8 @@ static int intel_lpss_register_clock(struct intel_lpss *lpss)
return 0;
/* Root clock */
- clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL,
- CLK_IS_ROOT, lpss->info->clk_rate);
+ clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL, 0,
+ lpss->info->clk_rate);
if (IS_ERR(clk))
return PTR_ERR(clk);
@@ -409,7 +409,7 @@ int intel_lpss_probe(struct device *dev,
if (ret)
return ret;
- lpss->cell->pset = info->pset;
+ lpss->cell->properties = info->properties;
intel_lpss_init_dev(lpss);
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index 0dcea9eb2..694116630 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -16,14 +16,14 @@
struct device;
struct resource;
-struct property_set;
+struct property_entry;
struct intel_lpss_platform_info {
struct resource *mem;
int irq;
unsigned long clk_rate;
const char *clk_con_id;
- struct property_set *pset;
+ struct property_entry *properties;
};
int intel_lpss_probe(struct device *dev,
diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
index 7450f5d87..7946d6e38 100644
--- a/drivers/mfd/intel_quark_i2c_gpio.c
+++ b/drivers/mfd/intel_quark_i2c_gpio.c
@@ -53,7 +53,7 @@
#define INTEL_QUARK_I2C_CLK_HZ 33000000
struct intel_quark_mfd {
- struct pci_dev *pdev;
+ struct device *dev;
struct clk *i2c_clk;
struct clk_lookup *i2c_clk_lookup;
};
@@ -123,14 +123,14 @@ static const struct pci_device_id intel_quark_mfd_ids[] = {
};
MODULE_DEVICE_TABLE(pci, intel_quark_mfd_ids);
-static int intel_quark_register_i2c_clk(struct intel_quark_mfd *quark_mfd)
+static int intel_quark_register_i2c_clk(struct device *dev)
{
- struct pci_dev *pdev = quark_mfd->pdev;
+ struct intel_quark_mfd *quark_mfd = dev_get_drvdata(dev);
struct clk *i2c_clk;
- i2c_clk = clk_register_fixed_rate(&pdev->dev,
+ i2c_clk = clk_register_fixed_rate(dev,
INTEL_QUARK_I2C_CONTROLLER_CLK, NULL,
- CLK_IS_ROOT, INTEL_QUARK_I2C_CLK_HZ);
+ 0, INTEL_QUARK_I2C_CLK_HZ);
if (IS_ERR(i2c_clk))
return PTR_ERR(i2c_clk);
@@ -140,16 +140,16 @@ static int intel_quark_register_i2c_clk(struct intel_quark_mfd *quark_mfd)
if (!quark_mfd->i2c_clk_lookup) {
clk_unregister(quark_mfd->i2c_clk);
- dev_err(&pdev->dev, "Fixed clk register failed\n");
+ dev_err(dev, "Fixed clk register failed\n");
return -ENOMEM;
}
return 0;
}
-static void intel_quark_unregister_i2c_clk(struct pci_dev *pdev)
+static void intel_quark_unregister_i2c_clk(struct device *dev)
{
- struct intel_quark_mfd *quark_mfd = dev_get_drvdata(&pdev->dev);
+ struct intel_quark_mfd *quark_mfd = dev_get_drvdata(dev);
if (!quark_mfd->i2c_clk_lookup)
return;
@@ -220,8 +220,7 @@ static int intel_quark_gpio_setup(struct pci_dev *pdev, struct mfd_cell *cell)
return -ENOMEM;
/* Set the properties for portA */
- pdata->properties->node = NULL;
- pdata->properties->name = "intel-quark-x1000-gpio-portA";
+ pdata->properties->fwnode = NULL;
pdata->properties->idx = 0;
pdata->properties->ngpio = INTEL_QUARK_MFD_NGPIO;
pdata->properties->gpio_base = INTEL_QUARK_MFD_GPIO_BASE;
@@ -248,10 +247,10 @@ static int intel_quark_mfd_probe(struct pci_dev *pdev,
if (!quark_mfd)
return -ENOMEM;
- quark_mfd->pdev = pdev;
+ quark_mfd->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, quark_mfd);
- ret = intel_quark_register_i2c_clk(quark_mfd);
+ ret = intel_quark_register_i2c_clk(&pdev->dev);
if (ret)
return ret;
@@ -272,13 +271,13 @@ static int intel_quark_mfd_probe(struct pci_dev *pdev,
return 0;
err_unregister_i2c_clk:
- intel_quark_unregister_i2c_clk(pdev);
+ intel_quark_unregister_i2c_clk(&pdev->dev);
return ret;
}
static void intel_quark_mfd_remove(struct pci_dev *pdev)
{
- intel_quark_unregister_i2c_clk(pdev);
+ intel_quark_unregister_i2c_clk(&pdev->dev);
mfd_remove_devices(&pdev->dev);
}
diff --git a/drivers/mfd/lp3943.c b/drivers/mfd/lp3943.c
index eecbb13de..65a2a8f14 100644
--- a/drivers/mfd/lp3943.c
+++ b/drivers/mfd/lp3943.c
@@ -123,16 +123,9 @@ static int lp3943_probe(struct i2c_client *cl, const struct i2c_device_id *id)
lp3943->mux_cfg = lp3943_mux_cfg;
i2c_set_clientdata(cl, lp3943);
- return mfd_add_devices(dev, -1, lp3943_devs, ARRAY_SIZE(lp3943_devs),
- NULL, 0, NULL);
-}
-
-static int lp3943_remove(struct i2c_client *cl)
-{
- struct lp3943 *lp3943 = i2c_get_clientdata(cl);
-
- mfd_remove_devices(lp3943->dev);
- return 0;
+ return devm_mfd_add_devices(dev, -1, lp3943_devs,
+ ARRAY_SIZE(lp3943_devs),
+ NULL, 0, NULL);
}
static const struct i2c_device_id lp3943_ids[] = {
@@ -151,7 +144,6 @@ MODULE_DEVICE_TABLE(of, lp3943_of_match);
static struct i2c_driver lp3943_driver = {
.probe = lp3943_probe,
- .remove = lp3943_remove,
.driver = {
.name = "lp3943",
.of_match_table = of_match_ptr(lp3943_of_match),
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index c7a9825aa..792d51bae 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -112,7 +112,7 @@ static irqreturn_t lp8788_irq_handler(int irq, void *ptr)
struct lp8788_irq_data *irqd = ptr;
struct lp8788 *lp = irqd->lp;
u8 status[NUM_REGS], addr, mask;
- bool handled;
+ bool handled = false;
int i;
if (lp8788_read_multi_bytes(lp, LP8788_INT_1, status, NUM_REGS))
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
new file mode 100644
index 000000000..f32fbb8e8
--- /dev/null
+++ b/drivers/mfd/max77620.c
@@ -0,0 +1,592 @@
+/*
+ * Maxim MAX77620 MFD Driver
+ *
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Laxman Dewangan <ldewangan@nvidia.com>
+ * Chaitanya Bandi <bandik@nvidia.com>
+ * Mallikarjun Kasoju <mkasoju@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/****************** Teminology used in driver ********************
+ * Here are some terminology used from datasheet for quick reference:
+ * Flexible Power Sequence (FPS):
+ * The Flexible Power Sequencer (FPS) allows each regulator to power up under
+ * hardware or software control. Additionally, each regulator can power on
+ * independently or among a group of other regulators with an adjustable
+ * power-up and power-down delays (sequencing). GPIO1, GPIO2, and GPIO3 can
+ * be programmed to be part of a sequence allowing external regulators to be
+ * sequenced along with internal regulators. 32KHz clock can be programmed to
+ * be part of a sequence.
+ * There is 3 FPS confguration registers and all resources are configured to
+ * any of these FPS or no FPS.
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max77620.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+static struct resource gpio_resources[] = {
+ DEFINE_RES_IRQ(MAX77620_IRQ_TOP_GPIO),
+};
+
+static struct resource power_resources[] = {
+ DEFINE_RES_IRQ(MAX77620_IRQ_LBT_MBATLOW),
+};
+
+static struct resource rtc_resources[] = {
+ DEFINE_RES_IRQ(MAX77620_IRQ_TOP_RTC),
+};
+
+static struct resource thermal_resources[] = {
+ DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM1),
+ DEFINE_RES_IRQ(MAX77620_IRQ_LBT_TJALRM2),
+};
+
+static const struct regmap_irq max77620_top_irqs[] = {
+ REGMAP_IRQ_REG(MAX77620_IRQ_TOP_GLBL, 0, MAX77620_IRQ_TOP_GLBL_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_TOP_SD, 0, MAX77620_IRQ_TOP_SD_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_TOP_LDO, 0, MAX77620_IRQ_TOP_LDO_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_TOP_GPIO, 0, MAX77620_IRQ_TOP_GPIO_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_TOP_RTC, 0, MAX77620_IRQ_TOP_RTC_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_TOP_32K, 0, MAX77620_IRQ_TOP_32K_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_TOP_ONOFF, 0, MAX77620_IRQ_TOP_ONOFF_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_LBT_MBATLOW, 1, MAX77620_IRQ_LBM_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_LBT_TJALRM1, 1, MAX77620_IRQ_TJALRM1_MASK),
+ REGMAP_IRQ_REG(MAX77620_IRQ_LBT_TJALRM2, 1, MAX77620_IRQ_TJALRM2_MASK),
+};
+
+static const struct mfd_cell max77620_children[] = {
+ { .name = "max77620-pinctrl", },
+ { .name = "max77620-clock", },
+ { .name = "max77620-pmic", },
+ { .name = "max77620-watchdog", },
+ {
+ .name = "max77620-gpio",
+ .resources = gpio_resources,
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ }, {
+ .name = "max77620-rtc",
+ .resources = rtc_resources,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ }, {
+ .name = "max77620-power",
+ .resources = power_resources,
+ .num_resources = ARRAY_SIZE(power_resources),
+ }, {
+ .name = "max77620-thermal",
+ .resources = thermal_resources,
+ .num_resources = ARRAY_SIZE(thermal_resources),
+ },
+};
+
+static const struct mfd_cell max20024_children[] = {
+ { .name = "max20024-pinctrl", },
+ { .name = "max77620-clock", },
+ { .name = "max20024-pmic", },
+ { .name = "max77620-watchdog", },
+ {
+ .name = "max77620-gpio",
+ .resources = gpio_resources,
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ }, {
+ .name = "max77620-rtc",
+ .resources = rtc_resources,
+ .num_resources = ARRAY_SIZE(rtc_resources),
+ }, {
+ .name = "max20024-power",
+ .resources = power_resources,
+ .num_resources = ARRAY_SIZE(power_resources),
+ },
+};
+
+static struct regmap_irq_chip max77620_top_irq_chip = {
+ .name = "max77620-top",
+ .irqs = max77620_top_irqs,
+ .num_irqs = ARRAY_SIZE(max77620_top_irqs),
+ .num_regs = 2,
+ .status_base = MAX77620_REG_IRQTOP,
+ .mask_base = MAX77620_REG_IRQTOPM,
+};
+
+static const struct regmap_range max77620_readable_ranges[] = {
+ regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
+};
+
+static const struct regmap_access_table max77620_readable_table = {
+ .yes_ranges = max77620_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max77620_readable_ranges),
+};
+
+static const struct regmap_range max20024_readable_ranges[] = {
+ regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
+ regmap_reg_range(MAX20024_REG_MAX_ADD, MAX20024_REG_MAX_ADD),
+};
+
+static const struct regmap_access_table max20024_readable_table = {
+ .yes_ranges = max20024_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max20024_readable_ranges),
+};
+
+static const struct regmap_range max77620_writable_ranges[] = {
+ regmap_reg_range(MAX77620_REG_CNFGGLBL1, MAX77620_REG_DVSSD4),
+};
+
+static const struct regmap_access_table max77620_writable_table = {
+ .yes_ranges = max77620_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(max77620_writable_ranges),
+};
+
+static const struct regmap_range max77620_cacheable_ranges[] = {
+ regmap_reg_range(MAX77620_REG_SD0_CFG, MAX77620_REG_LDO_CFG3),
+ regmap_reg_range(MAX77620_REG_FPS_CFG0, MAX77620_REG_FPS_SD3),
+};
+
+static const struct regmap_access_table max77620_volatile_table = {
+ .no_ranges = max77620_cacheable_ranges,
+ .n_no_ranges = ARRAY_SIZE(max77620_cacheable_ranges),
+};
+
+static const struct regmap_config max77620_regmap_config = {
+ .name = "power-slave",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX77620_REG_DVSSD4 + 1,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &max77620_readable_table,
+ .wr_table = &max77620_writable_table,
+ .volatile_table = &max77620_volatile_table,
+};
+
+static const struct regmap_config max20024_regmap_config = {
+ .name = "power-slave",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX20024_REG_MAX_ADD + 1,
+ .cache_type = REGCACHE_RBTREE,
+ .rd_table = &max20024_readable_table,
+ .wr_table = &max77620_writable_table,
+ .volatile_table = &max77620_volatile_table,
+};
+
+/* max77620_get_fps_period_reg_value: Get FPS bit field value from
+ * requested periods.
+ * MAX77620 supports the FPS period of 40, 80, 160, 320, 540, 1280, 2560
+ * and 5120 microseconds. MAX20024 supports the FPS period of 20, 40, 80,
+ * 160, 320, 540, 1280 and 2560 microseconds.
+ * The FPS register has 3 bits field to set the FPS period as
+ * bits max77620 max20024
+ * 000 40 20
+ * 001 80 40
+ * :::
+*/
+static int max77620_get_fps_period_reg_value(struct max77620_chip *chip,
+ int tperiod)
+{
+ int fps_min_period;
+ int i;
+
+ switch (chip->chip_id) {
+ case MAX20024:
+ fps_min_period = MAX20024_FPS_PERIOD_MIN_US;
+ break;
+ case MAX77620:
+ fps_min_period = MAX77620_FPS_PERIOD_MIN_US;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 7; i++) {
+ if (fps_min_period >= tperiod)
+ return i;
+ fps_min_period *= 2;
+ }
+
+ return i;
+}
+
+/* max77620_config_fps: Configure FPS configuration registers
+ * based on platform specific information.
+ */
+static int max77620_config_fps(struct max77620_chip *chip,
+ struct device_node *fps_np)
+{
+ struct device *dev = chip->dev;
+ unsigned int mask = 0, config = 0;
+ u32 fps_max_period;
+ u32 param_val;
+ int tperiod, fps_id;
+ int ret;
+ char fps_name[10];
+
+ switch (chip->chip_id) {
+ case MAX20024:
+ fps_max_period = MAX20024_FPS_PERIOD_MAX_US;
+ break;
+ case MAX77620:
+ fps_max_period = MAX77620_FPS_PERIOD_MAX_US;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
+ sprintf(fps_name, "fps%d", fps_id);
+ if (!strcmp(fps_np->name, fps_name))
+ break;
+ }
+
+ if (fps_id == MAX77620_FPS_COUNT) {
+ dev_err(dev, "FPS node name %s is not valid\n", fps_np->name);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(fps_np, "maxim,shutdown-fps-time-period-us",
+ &param_val);
+ if (!ret) {
+ mask |= MAX77620_FPS_TIME_PERIOD_MASK;
+ chip->shutdown_fps_period[fps_id] = min(param_val,
+ fps_max_period);
+ tperiod = max77620_get_fps_period_reg_value(chip,
+ chip->shutdown_fps_period[fps_id]);
+ config |= tperiod << MAX77620_FPS_TIME_PERIOD_SHIFT;
+ }
+
+ ret = of_property_read_u32(fps_np, "maxim,suspend-fps-time-period-us",
+ &param_val);
+ if (!ret)
+ chip->suspend_fps_period[fps_id] = min(param_val,
+ fps_max_period);
+
+ ret = of_property_read_u32(fps_np, "maxim,fps-event-source",
+ &param_val);
+ if (!ret) {
+ if (param_val > 2) {
+ dev_err(dev, "FPS%d event-source invalid\n", fps_id);
+ return -EINVAL;
+ }
+ mask |= MAX77620_FPS_EN_SRC_MASK;
+ config |= param_val << MAX77620_FPS_EN_SRC_SHIFT;
+ if (param_val == 2) {
+ mask |= MAX77620_FPS_ENFPS_SW_MASK;
+ config |= MAX77620_FPS_ENFPS_SW;
+ }
+ }
+
+ if (!chip->sleep_enable && !chip->enable_global_lpm) {
+ ret = of_property_read_u32(fps_np,
+ "maxim,device-state-on-disabled-event",
+ &param_val);
+ if (!ret) {
+ if (param_val == 0)
+ chip->sleep_enable = true;
+ else if (param_val == 1)
+ chip->enable_global_lpm = true;
+ }
+ }
+
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_FPS_CFG0 + fps_id,
+ mask, config);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update FPS CFG: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max77620_initialise_fps(struct max77620_chip *chip)
+{
+ struct device *dev = chip->dev;
+ struct device_node *fps_np, *fps_child;
+ u8 config;
+ int fps_id;
+ int ret;
+
+ for (fps_id = 0; fps_id < MAX77620_FPS_COUNT; fps_id++) {
+ chip->shutdown_fps_period[fps_id] = -1;
+ chip->suspend_fps_period[fps_id] = -1;
+ }
+
+ fps_np = of_get_child_by_name(dev->of_node, "fps");
+ if (!fps_np)
+ goto skip_fps;
+
+ for_each_child_of_node(fps_np, fps_child) {
+ ret = max77620_config_fps(chip, fps_child);
+ if (ret < 0)
+ return ret;
+ }
+
+ config = chip->enable_global_lpm ? MAX77620_ONOFFCNFG2_SLP_LPM_MSK : 0;
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
+ MAX77620_ONOFFCNFG2_SLP_LPM_MSK, config);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update SLP_LPM: %d\n", ret);
+ return ret;
+ }
+
+skip_fps:
+ /* Enable wake on EN0 pin */
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
+ MAX77620_ONOFFCNFG2_WK_EN0,
+ MAX77620_ONOFFCNFG2_WK_EN0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update WK_EN0: %d\n", ret);
+ return ret;
+ }
+
+ /* For MAX20024, SLPEN will be POR reset if CLRSE is b11 */
+ if ((chip->chip_id == MAX20024) && chip->sleep_enable) {
+ config = MAX77620_ONOFFCNFG1_SLPEN | MAX20024_ONOFFCNFG1_CLRSE;
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG1,
+ config, config);
+ if (ret < 0) {
+ dev_err(dev, "Failed to update SLPEN: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int max77620_read_es_version(struct max77620_chip *chip)
+{
+ unsigned int val;
+ u8 cid_val[6];
+ int i;
+ int ret;
+
+ for (i = MAX77620_REG_CID0; i <= MAX77620_REG_CID5; i++) {
+ ret = regmap_read(chip->rmap, i, &val);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read CID: %d\n", ret);
+ return ret;
+ }
+ dev_dbg(chip->dev, "CID%d: 0x%02x\n",
+ i - MAX77620_REG_CID0, val);
+ cid_val[i - MAX77620_REG_CID0] = val;
+ }
+
+ /* CID4 is OTP Version and CID5 is ES version */
+ dev_info(chip->dev, "PMIC Version OTP:0x%02X and ES:0x%X\n",
+ cid_val[4], MAX77620_CID5_DIDM(cid_val[5]));
+
+ return ret;
+}
+
+static int max77620_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct regmap_config *rmap_config;
+ struct max77620_chip *chip;
+ const struct mfd_cell *mfd_cells;
+ int n_mfd_cells;
+ int ret;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, chip);
+ chip->dev = &client->dev;
+ chip->irq_base = -1;
+ chip->chip_irq = client->irq;
+ chip->chip_id = (enum max77620_chip_id)id->driver_data;
+
+ switch (chip->chip_id) {
+ case MAX77620:
+ mfd_cells = max77620_children;
+ n_mfd_cells = ARRAY_SIZE(max77620_children);
+ rmap_config = &max77620_regmap_config;
+ break;
+ case MAX20024:
+ mfd_cells = max20024_children;
+ n_mfd_cells = ARRAY_SIZE(max20024_children);
+ rmap_config = &max20024_regmap_config;
+ break;
+ default:
+ dev_err(chip->dev, "ChipID is invalid %d\n", chip->chip_id);
+ return -EINVAL;
+ }
+
+ chip->rmap = devm_regmap_init_i2c(client, rmap_config);
+ if (IS_ERR(chip->rmap)) {
+ ret = PTR_ERR(chip->rmap);
+ dev_err(chip->dev, "Failed to intialise regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = max77620_read_es_version(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_regmap_add_irq_chip(chip->dev, chip->rmap, client->irq,
+ IRQF_ONESHOT | IRQF_SHARED,
+ chip->irq_base, &max77620_top_irq_chip,
+ &chip->top_irq_data);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add regmap irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = max77620_initialise_fps(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_mfd_add_devices(chip->dev, PLATFORM_DEVID_NONE,
+ mfd_cells, n_mfd_cells, NULL, 0,
+ regmap_irq_get_domain(chip->top_irq_data));
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add MFD children: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max77620_set_fps_period(struct max77620_chip *chip,
+ int fps_id, int time_period)
+{
+ int period = max77620_get_fps_period_reg_value(chip, time_period);
+ int ret;
+
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_FPS_CFG0 + fps_id,
+ MAX77620_FPS_TIME_PERIOD_MASK,
+ period << MAX77620_FPS_TIME_PERIOD_SHIFT);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to update FPS period: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max77620_i2c_suspend(struct device *dev)
+{
+ struct max77620_chip *chip = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned int config;
+ int fps;
+ int ret;
+
+ for (fps = 0; fps < MAX77620_FPS_COUNT; fps++) {
+ if (chip->suspend_fps_period[fps] < 0)
+ continue;
+
+ ret = max77620_set_fps_period(chip, fps,
+ chip->suspend_fps_period[fps]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * For MAX20024: No need to configure SLPEN on suspend as
+ * it will be configured on Init.
+ */
+ if (chip->chip_id == MAX20024)
+ goto out;
+
+ config = (chip->sleep_enable) ? MAX77620_ONOFFCNFG1_SLPEN : 0;
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG1,
+ MAX77620_ONOFFCNFG1_SLPEN,
+ config);
+ if (ret < 0) {
+ dev_err(dev, "Failed to configure sleep in suspend: %d\n", ret);
+ return ret;
+ }
+
+ /* Disable WK_EN0 */
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
+ MAX77620_ONOFFCNFG2_WK_EN0, 0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to configure WK_EN in suspend: %d\n", ret);
+ return ret;
+ }
+
+out:
+ disable_irq(client->irq);
+
+ return 0;
+}
+
+static int max77620_i2c_resume(struct device *dev)
+{
+ struct max77620_chip *chip = dev_get_drvdata(dev);
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+ int fps;
+
+ for (fps = 0; fps < MAX77620_FPS_COUNT; fps++) {
+ if (chip->shutdown_fps_period[fps] < 0)
+ continue;
+
+ ret = max77620_set_fps_period(chip, fps,
+ chip->shutdown_fps_period[fps]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * For MAX20024: No need to configure WKEN0 on resume as
+ * it is configured on Init.
+ */
+ if (chip->chip_id == MAX20024)
+ goto out;
+
+ /* Enable WK_EN0 */
+ ret = regmap_update_bits(chip->rmap, MAX77620_REG_ONOFFCNFG2,
+ MAX77620_ONOFFCNFG2_WK_EN0,
+ MAX77620_ONOFFCNFG2_WK_EN0);
+ if (ret < 0) {
+ dev_err(dev, "Failed to configure WK_EN0 n resume: %d\n", ret);
+ return ret;
+ }
+
+out:
+ enable_irq(client->irq);
+
+ return 0;
+}
+#endif
+
+static const struct i2c_device_id max77620_id[] = {
+ {"max77620", MAX77620},
+ {"max20024", MAX20024},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, max77620_id);
+
+static const struct dev_pm_ops max77620_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(max77620_i2c_suspend, max77620_i2c_resume)
+};
+
+static struct i2c_driver max77620_driver = {
+ .driver = {
+ .name = "max77620",
+ .pm = &max77620_pm_ops,
+ },
+ .probe = max77620_probe,
+ .id_table = max77620_id,
+};
+
+module_i2c_driver(max77620_driver);
+
+MODULE_DESCRIPTION("MAX77620/MAX20024 Multi Function Device Core Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_AUTHOR("Chaitanya Bandi <bandik@nvidia.com>");
+MODULE_AUTHOR("Mallikarjun Kasoju <mkasoju@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index c1aff46e8..7b68ed72e 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -2,7 +2,7 @@
* max77686.c - mfd core driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@smasung.com>
+ * Chiwoong Byun <woong.byun@samsung.com>
* Jonghwa Lee <jonghwa3.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -230,38 +230,24 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
return -ENODEV;
}
- ret = regmap_add_irq_chip(max77686->regmap, max77686->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
- IRQF_SHARED, 0, irq_chip,
- &max77686->irq_data);
+ ret = devm_regmap_add_irq_chip(&i2c->dev, max77686->regmap,
+ max77686->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_SHARED, 0, irq_chip,
+ &max77686->irq_data);
if (ret < 0) {
dev_err(&i2c->dev, "failed to add PMIC irq chip: %d\n", ret);
return ret;
}
- ret = mfd_add_devices(max77686->dev, -1, cells, n_devs, NULL, 0, NULL);
+ ret = devm_mfd_add_devices(max77686->dev, -1, cells, n_devs, NULL,
+ 0, NULL);
if (ret < 0) {
dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret);
- goto err_del_irqc;
+ return ret;
}
return 0;
-
-err_del_irqc:
- regmap_del_irq_chip(max77686->irq, max77686->irq_data);
-
- return ret;
-}
-
-static int max77686_i2c_remove(struct i2c_client *i2c)
-{
- struct max77686_dev *max77686 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(max77686->dev);
-
- regmap_del_irq_chip(max77686->irq, max77686->irq_data);
-
- return 0;
}
static const struct i2c_device_id max77686_i2c_id[] = {
@@ -317,22 +303,10 @@ static struct i2c_driver max77686_i2c_driver = {
.of_match_table = of_match_ptr(max77686_pmic_dt_match),
},
.probe = max77686_i2c_probe,
- .remove = max77686_i2c_remove,
.id_table = max77686_i2c_id,
};
-static int __init max77686_i2c_init(void)
-{
- return i2c_add_driver(&max77686_i2c_driver);
-}
-/* init early so consumer devices can complete system boot */
-subsys_initcall(max77686_i2c_init);
-
-static void __exit max77686_i2c_exit(void)
-{
- i2c_del_driver(&max77686_i2c_driver);
-}
-module_exit(max77686_i2c_exit);
+module_i2c_driver(max77686_i2c_driver);
MODULE_DESCRIPTION("MAXIM 77686/802 multi-function core driver");
MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>");
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index b83b7a7da..662ae0d9e 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -2,7 +2,7 @@
* max77693.c - mfd core driver for the MAX 77693
*
* Copyright (C) 2012 Samsung Electronics
- * SangYoung Son <hello.son@smasung.com>
+ * SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
*
@@ -368,6 +368,7 @@ static const struct of_device_id max77693_dt_match[] = {
{ .compatible = "maxim,max77693" },
{},
};
+MODULE_DEVICE_TABLE(of, max77693_dt_match);
#endif
static struct i2c_driver max77693_i2c_driver = {
@@ -381,18 +382,7 @@ static struct i2c_driver max77693_i2c_driver = {
.id_table = max77693_i2c_id,
};
-static int __init max77693_i2c_init(void)
-{
- return i2c_add_driver(&max77693_i2c_driver);
-}
-/* init early so consumer devices can complete system boot */
-subsys_initcall(max77693_i2c_init);
-
-static void __exit max77693_i2c_exit(void)
-{
- i2c_del_driver(&max77693_i2c_driver);
-}
-module_exit(max77693_i2c_exit);
+module_i2c_driver(max77693_i2c_driver);
MODULE_DESCRIPTION("MAXIM 77693 multi-function core driver");
MODULE_AUTHOR("SangYoung, Son <hello.son@samsung.com>");
diff --git a/drivers/mfd/menf21bmc.c b/drivers/mfd/menf21bmc.c
index 1c2743458..3ad2def94 100644
--- a/drivers/mfd/menf21bmc.c
+++ b/drivers/mfd/menf21bmc.c
@@ -96,8 +96,8 @@ menf21bmc_probe(struct i2c_client *client, const struct i2c_device_id *ids)
return ret;
}
- ret = mfd_add_devices(&client->dev, 0, menf21bmc_cell,
- ARRAY_SIZE(menf21bmc_cell), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(&client->dev, 0, menf21bmc_cell,
+ ARRAY_SIZE(menf21bmc_cell), NULL, 0, NULL);
if (ret < 0) {
dev_err(&client->dev, "failed to add BMC sub-devices\n");
return ret;
@@ -106,12 +106,6 @@ menf21bmc_probe(struct i2c_client *client, const struct i2c_device_id *ids)
return 0;
}
-static int menf21bmc_remove(struct i2c_client *client)
-{
- mfd_remove_devices(&client->dev);
- return 0;
-}
-
static const struct i2c_device_id menf21bmc_id_table[] = {
{ "menf21bmc" },
{ }
@@ -122,7 +116,6 @@ static struct i2c_driver menf21bmc_driver = {
.driver.name = "menf21bmc",
.id_table = menf21bmc_id_table,
.probe = menf21bmc_probe,
- .remove = menf21bmc_remove,
};
module_i2c_driver(menf21bmc_driver);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 88bd1b1e4..3ac486a59 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -107,7 +107,7 @@ static void mfd_acpi_add_device(const struct mfd_cell *cell,
strlcpy(ids[0].id, match->pnpid, sizeof(ids[0].id));
list_for_each_entry(child, &parent->children, node) {
- if (acpi_match_device_ids(child, ids)) {
+ if (!acpi_match_device_ids(child, ids)) {
adev = child;
break;
}
@@ -193,8 +193,8 @@ static int mfd_add_device(struct device *parent, int id,
goto fail_alias;
}
- if (cell->pset) {
- ret = platform_device_add_properties(pdev, cell->pset);
+ if (cell->properties) {
+ ret = platform_device_add_properties(pdev, cell->properties);
if (ret)
goto fail_alias;
}
@@ -334,6 +334,44 @@ void mfd_remove_devices(struct device *parent)
}
EXPORT_SYMBOL(mfd_remove_devices);
+static void devm_mfd_dev_release(struct device *dev, void *res)
+{
+ mfd_remove_devices(dev);
+}
+
+/**
+ * devm_mfd_add_devices - Resource managed version of mfd_add_devices()
+ *
+ * Returns 0 on success or an appropriate negative error number on failure.
+ * All child-devices of the MFD will automatically be removed when it gets
+ * unbinded.
+ */
+int devm_mfd_add_devices(struct device *dev, int id,
+ const struct mfd_cell *cells, int n_devs,
+ struct resource *mem_base,
+ int irq_base, struct irq_domain *domain)
+{
+ struct device **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_mfd_dev_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = mfd_add_devices(dev, id, cells, n_devs, mem_base,
+ irq_base, domain);
+ if (ret < 0) {
+ devres_free(ptr);
+ return ret;
+ }
+
+ *ptr = dev;
+ devres_add(dev, ptr);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_mfd_add_devices);
+
int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones)
{
struct mfd_cell cell_entry;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 8e8d93249..e14d8b058 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -267,17 +267,26 @@ static int mt6397_probe(struct platform_device *pdev)
ret = regmap_read(pmic->regmap, MT6397_CID, &id);
if (ret) {
dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
- goto fail_irq;
+ return ret;
}
+ pmic->irq = platform_get_irq(pdev, 0);
+ if (pmic->irq <= 0)
+ return pmic->irq;
+
switch (id & 0xff) {
case MT6323_CID_CODE:
pmic->int_con[0] = MT6323_INT_CON0;
pmic->int_con[1] = MT6323_INT_CON1;
pmic->int_status[0] = MT6323_INT_STATUS0;
pmic->int_status[1] = MT6323_INT_STATUS1;
- ret = mfd_add_devices(&pdev->dev, -1, mt6323_devs,
- ARRAY_SIZE(mt6323_devs), NULL, 0, NULL);
+ ret = mt6397_irq_init(pmic);
+ if (ret)
+ return ret;
+
+ ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
+ ARRAY_SIZE(mt6323_devs), NULL,
+ 0, NULL);
break;
case MT6397_CID_CODE:
@@ -286,8 +295,13 @@ static int mt6397_probe(struct platform_device *pdev)
pmic->int_con[1] = MT6397_INT_CON1;
pmic->int_status[0] = MT6397_INT_STATUS0;
pmic->int_status[1] = MT6397_INT_STATUS1;
- ret = mfd_add_devices(&pdev->dev, -1, mt6397_devs,
- ARRAY_SIZE(mt6397_devs), NULL, 0, NULL);
+ ret = mt6397_irq_init(pmic);
+ if (ret)
+ return ret;
+
+ ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
+ ARRAY_SIZE(mt6397_devs), NULL,
+ 0, NULL);
break;
default:
@@ -296,14 +310,6 @@ static int mt6397_probe(struct platform_device *pdev)
break;
}
- pmic->irq = platform_get_irq(pdev, 0);
- if (pmic->irq > 0) {
- ret = mt6397_irq_init(pmic);
- if (ret)
- return ret;
- }
-
-fail_irq:
if (ret) {
irq_domain_remove(pmic->irq_domain);
dev_err(&pdev->dev, "failed to add child devices: %d\n", ret);
@@ -312,13 +318,6 @@ fail_irq:
return ret;
}
-static int mt6397_remove(struct platform_device *pdev)
-{
- mfd_remove_devices(&pdev->dev);
-
- return 0;
-}
-
static const struct of_device_id mt6397_of_match[] = {
{ .compatible = "mediatek,mt6397" },
{ .compatible = "mediatek,mt6323" },
@@ -334,7 +333,6 @@ MODULE_DEVICE_TABLE(platform, mt6397_id);
static struct platform_driver mt6397_driver = {
.probe = mt6397_probe,
- .remove = mt6397_remove,
.driver = {
.name = "mt6397",
.of_match_table = of_match_ptr(mt6397_of_match),
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index 3f8812daa..f8dde59ea 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -389,17 +389,10 @@ int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base)
irq_clear_status_flags(__irq, IRQ_NOREQUEST);
}
- ret = request_threaded_irq(irq, NULL, rc5t583_irq, IRQF_ONESHOT,
- "rc5t583", rc5t583);
+ ret = devm_request_threaded_irq(rc5t583->dev, irq, NULL, rc5t583_irq,
+ IRQF_ONESHOT, "rc5t583", rc5t583);
if (ret < 0)
dev_err(rc5t583->dev,
"Error in registering interrupt error: %d\n", ret);
return ret;
}
-
-int rc5t583_irq_exit(struct rc5t583 *rc5t583)
-{
- if (rc5t583->chip_irq)
- free_irq(rc5t583->chip_irq, rc5t583);
- return 0;
-}
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index fc2b2d93f..d12243d5e 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -252,7 +252,6 @@ static int rc5t583_i2c_probe(struct i2c_client *i2c,
struct rc5t583 *rc5t583;
struct rc5t583_platform_data *pdata = dev_get_platdata(&i2c->dev);
int ret;
- bool irq_init_success = false;
if (!pdata) {
dev_err(&i2c->dev, "Err: Platform data not found\n");
@@ -284,32 +283,16 @@ static int rc5t583_i2c_probe(struct i2c_client *i2c,
/* Still continue with warning, if irq init fails */
if (ret)
dev_warn(&i2c->dev, "IRQ init failed: %d\n", ret);
- else
- irq_init_success = true;
}
- ret = mfd_add_devices(rc5t583->dev, -1, rc5t583_subdevs,
- ARRAY_SIZE(rc5t583_subdevs), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(rc5t583->dev, -1, rc5t583_subdevs,
+ ARRAY_SIZE(rc5t583_subdevs), NULL, 0, NULL);
if (ret) {
dev_err(&i2c->dev, "add mfd devices failed: %d\n", ret);
- goto err_add_devs;
+ return ret;
}
return 0;
-
-err_add_devs:
- if (irq_init_success)
- rc5t583_irq_exit(rc5t583);
- return ret;
-}
-
-static int rc5t583_i2c_remove(struct i2c_client *i2c)
-{
- struct rc5t583 *rc5t583 = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(rc5t583->dev);
- rc5t583_irq_exit(rc5t583);
- return 0;
}
static const struct i2c_device_id rc5t583_i2c_id[] = {
@@ -324,7 +307,6 @@ static struct i2c_driver rc5t583_i2c_driver = {
.name = "rc5t583",
},
.probe = rc5t583_i2c_probe,
- .remove = rc5t583_i2c_remove,
.id_table = rc5t583_i2c_id,
};
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 6575585f1..2bd8c5b6d 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -85,14 +85,10 @@ static int rdc321x_sb_probe(struct pci_dev *pdev,
rdc321x_gpio_pdata.sb_pdev = pdev;
rdc321x_wdt_pdata.sb_pdev = pdev;
- return mfd_add_devices(&pdev->dev, -1,
- rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells),
- NULL, 0, NULL);
-}
-
-static void rdc321x_sb_remove(struct pci_dev *pdev)
-{
- mfd_remove_devices(&pdev->dev);
+ return devm_mfd_add_devices(&pdev->dev, -1,
+ rdc321x_sb_cells,
+ ARRAY_SIZE(rdc321x_sb_cells),
+ NULL, 0, NULL);
}
static const struct pci_device_id rdc321x_sb_table[] = {
@@ -105,7 +101,6 @@ static struct pci_driver rdc321x_sb_driver = {
.name = "RDC321x Southbridge",
.id_table = rdc321x_sb_table,
.probe = rdc321x_sb_probe,
- .remove = rdc321x_sb_remove,
};
module_pci_driver(rdc321x_sb_driver);
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index 4b1e43997..49d7f624f 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -213,9 +213,9 @@ static int rk808_probe(struct i2c_client *client,
rk808->i2c = client;
i2c_set_clientdata(client, rk808);
- ret = mfd_add_devices(&client->dev, -1,
- rk808s, ARRAY_SIZE(rk808s),
- NULL, 0, regmap_irq_get_domain(rk808->irq_data));
+ ret = devm_mfd_add_devices(&client->dev, -1,
+ rk808s, ARRAY_SIZE(rk808s), NULL, 0,
+ regmap_irq_get_domain(rk808->irq_data));
if (ret) {
dev_err(&client->dev, "failed to add MFD devices %d\n", ret);
goto err_irq;
@@ -240,7 +240,6 @@ static int rk808_remove(struct i2c_client *client)
struct rk808 *rk808 = i2c_get_clientdata(client);
regmap_del_irq_chip(client->irq, rk808->irq_data);
- mfd_remove_devices(&client->dev);
pm_power_off = NULL;
return 0;
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 666857192..0ad51d792 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -78,8 +78,8 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = mfd_add_devices(&i2c->dev, -1, rn5t618_cells,
- ARRAY_SIZE(rn5t618_cells), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(&i2c->dev, -1, rn5t618_cells,
+ ARRAY_SIZE(rn5t618_cells), NULL, 0, NULL);
if (ret) {
dev_err(&i2c->dev, "failed to add sub-devices: %d\n", ret);
return ret;
@@ -102,7 +102,6 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c)
pm_power_off = NULL;
}
- mfd_remove_devices(&i2c->dev);
return 0;
}
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index 2b95485f0..9bd089c56 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -97,9 +97,9 @@ static int rt5033_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = mfd_add_devices(rt5033->dev, -1, rt5033_devs,
- ARRAY_SIZE(rt5033_devs), NULL, 0,
- regmap_irq_get_domain(rt5033->irq_data));
+ ret = devm_mfd_add_devices(rt5033->dev, -1, rt5033_devs,
+ ARRAY_SIZE(rt5033_devs), NULL, 0,
+ regmap_irq_get_domain(rt5033->irq_data));
if (ret < 0) {
dev_err(&i2c->dev, "Failed to add RT5033 child devices.\n");
return ret;
@@ -110,13 +110,6 @@ static int rt5033_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int rt5033_i2c_remove(struct i2c_client *i2c)
-{
- mfd_remove_devices(&i2c->dev);
-
- return 0;
-}
-
static const struct i2c_device_id rt5033_i2c_id[] = {
{ "rt5033", },
{ }
@@ -135,7 +128,6 @@ static struct i2c_driver rt5033_driver = {
.of_match_table = of_match_ptr(rt5033_dt_match),
},
.probe = rt5033_i2c_probe,
- .remove = rt5033_i2c_remove,
.id_table = rt5033_i2c_id,
};
module_i2c_driver(rt5033_driver);
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 400e1d7d8..ca6b80d08 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -481,29 +481,16 @@ static int sec_pmic_probe(struct i2c_client *i2c,
/* If this happens the probe function is problem */
BUG();
}
- ret = mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs, NULL,
- 0, NULL);
+ ret = devm_mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs,
+ NULL, 0, NULL);
if (ret)
- goto err_mfd;
+ return ret;
device_init_wakeup(sec_pmic->dev, sec_pmic->wakeup);
sec_pmic_configure(sec_pmic);
sec_pmic_dump_rev(sec_pmic);
return ret;
-
-err_mfd:
- sec_irq_exit(sec_pmic);
- return ret;
-}
-
-static int sec_pmic_remove(struct i2c_client *i2c)
-{
- struct sec_pmic_dev *sec_pmic = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(sec_pmic->dev);
- sec_irq_exit(sec_pmic);
- return 0;
}
static void sec_pmic_shutdown(struct i2c_client *i2c)
@@ -583,7 +570,6 @@ static struct i2c_driver sec_pmic_driver = {
.of_match_table = of_match_ptr(sec_dt_match),
},
.probe = sec_pmic_probe,
- .remove = sec_pmic_remove,
.shutdown = sec_pmic_shutdown,
.id_table = sec_pmic_id,
};
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index d77de431c..5eb59c233 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -483,10 +483,11 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return -EINVAL;
}
- ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- sec_pmic->irq_base, sec_irq_chip,
- &sec_pmic->irq_data);
+ ret = devm_regmap_add_irq_chip(sec_pmic->dev, sec_pmic->regmap_pmic,
+ sec_pmic->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ sec_pmic->irq_base, sec_irq_chip,
+ &sec_pmic->irq_data);
if (ret != 0) {
dev_err(sec_pmic->dev, "Failed to register IRQ chip: %d\n", ret);
return ret;
@@ -500,8 +501,3 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return 0;
}
-
-void sec_irq_exit(struct sec_pmic_dev *sec_pmic)
-{
- regmap_del_irq_chip(sec_pmic->irq, sec_pmic->irq_data);
-}
diff --git a/drivers/mfd/sky81452.c b/drivers/mfd/sky81452.c
index b0c9b0415..30a2a6771 100644
--- a/drivers/mfd/sky81452.c
+++ b/drivers/mfd/sky81452.c
@@ -64,19 +64,14 @@ static int sky81452_probe(struct i2c_client *client,
cells[1].platform_data = pdata->regulator_init_data;
cells[1].pdata_size = sizeof(*pdata->regulator_init_data);
- ret = mfd_add_devices(dev, -1, cells, ARRAY_SIZE(cells), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(dev, -1, cells, ARRAY_SIZE(cells),
+ NULL, 0, NULL);
if (ret)
dev_err(dev, "failed to add child devices. err=%d\n", ret);
return ret;
}
-static int sky81452_remove(struct i2c_client *client)
-{
- mfd_remove_devices(&client->dev);
- return 0;
-}
-
static const struct i2c_device_id sky81452_ids[] = {
{ "sky81452" },
{ }
@@ -97,7 +92,6 @@ static struct i2c_driver sky81452_driver = {
.of_match_table = of_match_ptr(sky81452_of_match),
},
.probe = sky81452_probe,
- .remove = sky81452_remove,
.id_table = sky81452_ids,
};
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index c646784c5..65cd0d2a8 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -879,11 +879,6 @@ static int sm501_register_display(struct sm501_devdata *sm,
#ifdef CONFIG_MFD_SM501_GPIO
-static inline struct sm501_gpio_chip *to_sm501_gpio(struct gpio_chip *gc)
-{
- return container_of(gc, struct sm501_gpio_chip, gpio);
-}
-
static inline struct sm501_devdata *sm501_gpio_to_dev(struct sm501_gpio *gpio)
{
return container_of(gpio, struct sm501_devdata, gpio);
@@ -892,7 +887,7 @@ static inline struct sm501_devdata *sm501_gpio_to_dev(struct sm501_gpio *gpio)
static int sm501_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- struct sm501_gpio_chip *smgpio = to_sm501_gpio(chip);
+ struct sm501_gpio_chip *smgpio = gpiochip_get_data(chip);
unsigned long result;
result = smc501_readl(smgpio->regbase + SM501_GPIO_DATA_LOW);
@@ -923,7 +918,7 @@ static void sm501_gpio_ensure_gpio(struct sm501_gpio_chip *smchip,
static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
+ struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
unsigned long bit = 1 << offset;
void __iomem *regs = smchip->regbase;
@@ -948,7 +943,7 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
{
- struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
+ struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
void __iomem *regs = smchip->regbase;
unsigned long bit = 1 << offset;
@@ -974,7 +969,7 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset)
static int sm501_gpio_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- struct sm501_gpio_chip *smchip = to_sm501_gpio(chip);
+ struct sm501_gpio_chip *smchip = gpiochip_get_data(chip);
struct sm501_gpio *smgpio = smchip->ourgpio;
unsigned long bit = 1 << offset;
void __iomem *regs = smchip->regbase;
@@ -1039,7 +1034,7 @@ static int sm501_gpio_register_chip(struct sm501_devdata *sm,
gchip->base = base;
chip->ourgpio = gpio;
- return gpiochip_add(gchip);
+ return gpiochip_add_data(gchip, chip);
}
static int sm501_register_gpio(struct sm501_devdata *sm)
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index a4c0df71c..7f89e89b8 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -80,15 +80,6 @@ err:
return ret;
}
-static int smsc_i2c_remove(struct i2c_client *i2c)
-{
- struct smsc *smsc = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(smsc->dev);
-
- return 0;
-}
-
static const struct i2c_device_id smsc_i2c_id[] = {
{ "smscece1099", 0},
{},
@@ -100,7 +91,6 @@ static struct i2c_driver smsc_i2c_driver = {
.name = "smsc",
},
.probe = smsc_i2c_probe,
- .remove = smsc_i2c_remove,
.id_table = smsc_i2c_id,
};
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index ca613df36..ab949eaca 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -206,8 +206,8 @@ static int stw481x_probe(struct i2c_client *client,
stw481x_cells[i].pdata_size = sizeof(*stw481x);
}
- ret = mfd_add_devices(&client->dev, 0, stw481x_cells,
- ARRAY_SIZE(stw481x_cells), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(&client->dev, 0, stw481x_cells,
+ ARRAY_SIZE(stw481x_cells), NULL, 0, NULL);
if (ret)
return ret;
@@ -216,12 +216,6 @@ static int stw481x_probe(struct i2c_client *client,
return ret;
}
-static int stw481x_remove(struct i2c_client *client)
-{
- mfd_remove_devices(&client->dev);
- return 0;
-}
-
/*
* This ID table is completely unused, as this is a pure
* device-tree probed driver, but it has to be here due to
@@ -246,7 +240,6 @@ static struct i2c_driver stw481x_driver = {
.of_match_table = stw481x_match,
},
.probe = stw481x_probe,
- .remove = stw481x_remove,
.id_table = stw481x_id,
};
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 1ecbfa40d..d42d322ac 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -24,7 +24,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/mfd/tc6393xb.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#define SCR_REVID 0x08 /* b Revision ID */
@@ -434,7 +434,7 @@ static struct mfd_cell tc6393xb_cells[] = {
static int tc6393xb_gpio_get(struct gpio_chip *chip,
unsigned offset)
{
- struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
/* XXX: does dsr also represent inputs? */
return !!(tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
@@ -444,7 +444,7 @@ static int tc6393xb_gpio_get(struct gpio_chip *chip,
static void __tc6393xb_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
- struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
u8 dsr;
dsr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
@@ -459,7 +459,7 @@ static void __tc6393xb_gpio_set(struct gpio_chip *chip,
static void tc6393xb_gpio_set(struct gpio_chip *chip,
unsigned offset, int value)
{
- struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
unsigned long flags;
spin_lock_irqsave(&tc6393xb->lock, flags);
@@ -472,7 +472,7 @@ static void tc6393xb_gpio_set(struct gpio_chip *chip,
static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
- struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
unsigned long flags;
u8 doecr;
@@ -490,7 +490,7 @@ static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- struct tc6393xb *tc6393xb = container_of(chip, struct tc6393xb, gpio);
+ struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
unsigned long flags;
u8 doecr;
@@ -517,7 +517,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
tc6393xb->gpio.direction_input = tc6393xb_gpio_direction_input;
tc6393xb->gpio.direction_output = tc6393xb_gpio_direction_output;
- return gpiochip_add(&tc6393xb->gpio);
+ return gpiochip_add_data(&tc6393xb->gpio, tc6393xb);
}
/*--------------------------------------------------------------------------*/
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index 51c54951c..baa12ea66 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -21,7 +21,6 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/regulator/driver.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6105x.h>
diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c
index 495e4518f..d829a6131 100644
--- a/drivers/mfd/tps65010.c
+++ b/drivers/mfd/tps65010.c
@@ -34,7 +34,7 @@
#include <linux/i2c/tps65010.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
/*-------------------------------------------------------------------------*/
@@ -477,7 +477,7 @@ tps65010_output(struct gpio_chip *chip, unsigned offset, int value)
if (offset < 4) {
struct tps65010 *tps;
- tps = container_of(chip, struct tps65010, chip);
+ tps = gpiochip_get_data(chip);
if (!(tps->outmask & (1 << offset)))
return -EINVAL;
tps65010_set_gpio_out_value(offset + 1, value);
@@ -494,7 +494,7 @@ static int tps65010_gpio_get(struct gpio_chip *chip, unsigned offset)
int value;
struct tps65010 *tps;
- tps = container_of(chip, struct tps65010, chip);
+ tps = gpiochip_get_data(chip);
if (offset < 4) {
value = i2c_smbus_read_byte_data(tps->client, TPS_DEFGPIO);
@@ -651,7 +651,7 @@ static int tps65010_probe(struct i2c_client *client,
tps->chip.ngpio = 7;
tps->chip.can_sleep = 1;
- status = gpiochip_add(&tps->chip);
+ status = gpiochip_add_data(&tps->chip, tps);
if (status < 0)
dev_err(&client->dev, "can't add gpiochip, err %d\n",
status);
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 1ab3dd6c8..40beb2f43 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -100,16 +100,8 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c,
tps6507x->read_dev = tps6507x_i2c_read_device;
tps6507x->write_dev = tps6507x_i2c_write_device;
- return mfd_add_devices(tps6507x->dev, -1, tps6507x_devs,
- ARRAY_SIZE(tps6507x_devs), NULL, 0, NULL);
-}
-
-static int tps6507x_i2c_remove(struct i2c_client *i2c)
-{
- struct tps6507x_dev *tps6507x = i2c_get_clientdata(i2c);
-
- mfd_remove_devices(tps6507x->dev);
- return 0;
+ return devm_mfd_add_devices(tps6507x->dev, -1, tps6507x_devs,
+ ARRAY_SIZE(tps6507x_devs), NULL, 0, NULL);
}
static const struct i2c_device_id tps6507x_i2c_id[] = {
@@ -132,7 +124,6 @@ static struct i2c_driver tps6507x_i2c_driver = {
.of_match_table = of_match_ptr(tps6507x_of_match),
},
.probe = tps6507x_i2c_probe,
- .remove = tps6507x_i2c_remove,
.id_table = tps6507x_i2c_id,
};
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index d32b54426..049a6fcac 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -205,8 +205,8 @@ static int tps65217_probe(struct i2c_client *client,
return ret;
}
- ret = mfd_add_devices(tps->dev, -1, tps65217s,
- ARRAY_SIZE(tps65217s), NULL, 0, NULL);
+ ret = devm_mfd_add_devices(tps->dev, -1, tps65217s,
+ ARRAY_SIZE(tps65217s), NULL, 0, NULL);
if (ret < 0) {
dev_err(tps->dev, "mfd_add_devices failed: %d\n", ret);
return ret;
@@ -235,15 +235,6 @@ static int tps65217_probe(struct i2c_client *client,
return 0;
}
-static int tps65217_remove(struct i2c_client *client)
-{
- struct tps65217 *tps = i2c_get_clientdata(client);
-
- mfd_remove_devices(tps->dev);
-
- return 0;
-}
-
static const struct i2c_device_id tps65217_id_table[] = {
{"tps65217", TPS65217},
{ /* sentinel */ }
@@ -257,7 +248,6 @@ static struct i2c_driver tps65217_driver = {
},
.id_table = tps65217_id_table,
.probe = tps65217_probe,
- .remove = tps65217_remove,
};
static int __init tps65217_init(void)
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index f7ab11548..11cab1582 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -252,9 +252,10 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
}
tps65910->chip_irq = irq;
- ret = regmap_add_irq_chip(tps65910->regmap, tps65910->chip_irq,
- IRQF_ONESHOT, pdata->irq_base,
- tps6591x_irqs_chip, &tps65910->irq_data);
+ ret = devm_regmap_add_irq_chip(tps65910->dev, tps65910->regmap,
+ tps65910->chip_irq,
+ IRQF_ONESHOT, pdata->irq_base,
+ tps6591x_irqs_chip, &tps65910->irq_data);
if (ret < 0) {
dev_warn(tps65910->dev, "Failed to add irq_chip %d\n", ret);
tps65910->chip_irq = 0;
@@ -262,13 +263,6 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
return ret;
}
-static int tps65910_irq_exit(struct tps65910 *tps65910)
-{
- if (tps65910->chip_irq > 0)
- regmap_del_irq_chip(tps65910->chip_irq, tps65910->irq_data);
- return 0;
-}
-
static bool is_volatile_reg(struct device *dev, unsigned int reg)
{
struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -510,29 +504,18 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
pm_power_off = tps65910_power_off;
}
- ret = mfd_add_devices(tps65910->dev, -1,
- tps65910s, ARRAY_SIZE(tps65910s),
- NULL, 0,
- regmap_irq_get_domain(tps65910->irq_data));
+ ret = devm_mfd_add_devices(tps65910->dev, -1,
+ tps65910s, ARRAY_SIZE(tps65910s),
+ NULL, 0,
+ regmap_irq_get_domain(tps65910->irq_data));
if (ret < 0) {
dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
- tps65910_irq_exit(tps65910);
return ret;
}
return ret;
}
-static int tps65910_i2c_remove(struct i2c_client *i2c)
-{
- struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
-
- tps65910_irq_exit(tps65910);
- mfd_remove_devices(tps65910->dev);
-
- return 0;
-}
-
static const struct i2c_device_id tps65910_i2c_id[] = {
{ "tps65910", TPS65910 },
{ "tps65911", TPS65911 },
@@ -547,7 +530,6 @@ static struct i2c_driver tps65910_i2c_driver = {
.of_match_table = of_match_ptr(tps65910_of_match),
},
.probe = tps65910_i2c_probe,
- .remove = tps65910_i2c_remove,
.id_table = tps65910_i2c_id,
};
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 40e51b0ba..b46c0cfc2 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -696,7 +696,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS;
irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
dev_err(dev, "Fail to allocate IRQ descs\n");
return irq_base;
}
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 04b539850..1beb722f6 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -1,5 +1,4 @@
/*
- * linux/drivers/i2c/chips/twl4030-power.c
*
* Handle TWL4030 Power initialization
*
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index 08a693cd3..852d5874a 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -291,7 +291,11 @@ int twl6040_power(struct twl6040 *twl6040, int on)
if (twl6040->power_count++)
goto out;
- clk_prepare_enable(twl6040->clk32k);
+ ret = clk_prepare_enable(twl6040->clk32k);
+ if (ret) {
+ twl6040->power_count = 0;
+ goto out;
+ }
/* Allow writes to the chip */
regcache_cache_only(twl6040->regmap, false);
@@ -300,6 +304,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
/* use automatic power-up sequence */
ret = twl6040_power_up_automatic(twl6040);
if (ret) {
+ clk_disable_unprepare(twl6040->clk32k);
twl6040->power_count = 0;
goto out;
}
@@ -307,6 +312,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
/* use manual power-up sequence */
ret = twl6040_power_up_manual(twl6040);
if (ret) {
+ clk_disable_unprepare(twl6040->clk32k);
twl6040->power_count = 0;
goto out;
}
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index bcafe1ecd..9ab9ec47e 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -28,7 +28,7 @@
#include <linux/mutex.h>
#include <linux/mfd/ucb1x00.h>
#include <linux/pm.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
static DEFINE_MUTEX(ucb1x00_mutex);
static LIST_HEAD(ucb1x00_drivers);
@@ -109,7 +109,7 @@ unsigned int ucb1x00_io_read(struct ucb1x00 *ucb)
static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
+ struct ucb1x00 *ucb = gpiochip_get_data(chip);
unsigned long flags;
spin_lock_irqsave(&ucb->io_lock, flags);
@@ -126,7 +126,7 @@ static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
+ struct ucb1x00 *ucb = gpiochip_get_data(chip);
unsigned val;
ucb1x00_enable(ucb);
@@ -138,7 +138,7 @@ static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset)
static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
+ struct ucb1x00 *ucb = gpiochip_get_data(chip);
unsigned long flags;
spin_lock_irqsave(&ucb->io_lock, flags);
@@ -154,7 +154,7 @@ static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
, int value)
{
- struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
+ struct ucb1x00 *ucb = gpiochip_get_data(chip);
unsigned long flags;
unsigned old, mask = 1 << offset;
@@ -181,7 +181,7 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset)
{
- struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
+ struct ucb1x00 *ucb = gpiochip_get_data(chip);
return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO;
}
@@ -579,7 +579,7 @@ static int ucb1x00_probe(struct mcp *mcp)
ucb->gpio.direction_input = ucb1x00_gpio_direction_input;
ucb->gpio.direction_output = ucb1x00_gpio_direction_output;
ucb->gpio.to_irq = ucb1x00_to_irq;
- ret = gpiochip_add(&ucb->gpio);
+ ret = gpiochip_add_data(&ucb->gpio, ucb);
if (ret)
goto err_gpio_add;
} else
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index 855c0204f..201a3ea2a 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -202,7 +202,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev)
bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI,
NULL, NULL, NULL, NULL, 0);
mmc_gpio_chip->ngpio = 2;
- gpiochip_add(mmc_gpio_chip);
+ gpiochip_add_data(mmc_gpio_chip, NULL);
return mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
vexpress_sysreg_cells,
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c
index f7c52d901..708046592 100644
--- a/drivers/mfd/wl1273-core.c
+++ b/drivers/mfd/wl1273-core.c
@@ -170,15 +170,6 @@ static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume)
return 0;
}
-static int wl1273_core_remove(struct i2c_client *client)
-{
- dev_dbg(&client->dev, "%s\n", __func__);
-
- mfd_remove_devices(&client->dev);
-
- return 0;
-}
-
static int wl1273_core_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -237,8 +228,8 @@ static int wl1273_core_probe(struct i2c_client *client,
dev_dbg(&client->dev, "%s: number of children: %d.\n",
__func__, children);
- r = mfd_add_devices(&client->dev, -1, core->cells,
- children, NULL, 0, NULL);
+ r = devm_mfd_add_devices(&client->dev, -1, core->cells,
+ children, NULL, 0, NULL);
if (r)
goto err;
@@ -258,7 +249,6 @@ static struct i2c_driver wl1273_core_driver = {
},
.probe = wl1273_core_probe,
.id_table = wl1273_driver_id_table,
- .remove = wl1273_core_remove,
};
static int __init wl1273_core_init(void)
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 8e74e7150..1ee68bd44 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -3066,6 +3066,7 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_AOD_IRQ_RAW_STATUS:
case ARIZONA_FX_CTRL2:
case ARIZONA_ASRC_STATUS:
+ case ARIZONA_CLOCK_CONTROL:
case ARIZONA_DSP_STATUS:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 3bd44a45c..8a98a2fc7 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -35,27 +35,6 @@ static bool wm8400_volatile(struct device *dev, unsigned int reg)
}
}
-/**
- * wm8400_reg_read - Single register read
- *
- * @wm8400: Pointer to wm8400 control structure
- * @reg: Register to read
- *
- * @return Read value
- */
-u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(wm8400->regmap, reg, &val);
- if (ret < 0)
- return ret;
-
- return val;
-}
-EXPORT_SYMBOL_GPL(wm8400_reg_read);
-
int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
{
return regmap_bulk_read(wm8400->regmap, reg, data, count);
@@ -70,7 +49,7 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
.pdata_size = sizeof(*wm8400),
};
- return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0, NULL);
+ return devm_mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0, NULL);
}
/*
@@ -111,7 +90,7 @@ static int wm8400_init(struct wm8400 *wm8400,
ret = wm8400_register_codec(wm8400);
if (ret != 0) {
dev_err(wm8400->dev, "Failed to register codec\n");
- goto err_children;
+ return ret;
}
if (pdata && pdata->platform_init) {
@@ -119,21 +98,12 @@ static int wm8400_init(struct wm8400 *wm8400,
if (ret != 0) {
dev_err(wm8400->dev, "Platform init failed: %d\n",
ret);
- goto err_children;
+ return ret;
}
} else
dev_warn(wm8400->dev, "No platform initialisation supplied\n");
return 0;
-
-err_children:
- mfd_remove_devices(wm8400->dev);
- return ret;
-}
-
-static void wm8400_release(struct wm8400 *wm8400)
-{
- mfd_remove_devices(wm8400->dev);
}
static const struct regmap_config wm8400_regmap_config = {
@@ -156,7 +126,7 @@ void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
}
EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
static int wm8400_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -176,15 +146,6 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
return wm8400_init(wm8400, dev_get_platdata(&i2c->dev));
}
-static int wm8400_i2c_remove(struct i2c_client *i2c)
-{
- struct wm8400 *wm8400 = i2c_get_clientdata(i2c);
-
- wm8400_release(wm8400);
-
- return 0;
-}
-
static const struct i2c_device_id wm8400_i2c_id[] = {
{ "wm8400", 0 },
{ }
@@ -196,7 +157,6 @@ static struct i2c_driver wm8400_i2c_driver = {
.name = "WM8400",
},
.probe = wm8400_i2c_probe,
- .remove = wm8400_i2c_remove,
.id_table = wm8400_i2c_id,
};
#endif
@@ -205,7 +165,7 @@ static int __init wm8400_module_init(void)
{
int ret = -ENODEV;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
ret = i2c_add_driver(&wm8400_i2c_driver);
if (ret != 0)
pr_err("Failed to register I2C driver: %d\n", ret);
@@ -217,7 +177,7 @@ subsys_initcall(wm8400_module_init);
static void __exit wm8400_module_exit(void)
{
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+#if IS_ENABLED(CONFIG_I2C)
i2c_del_driver(&wm8400_i2c_driver);
#endif
}
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 2107c9484..6d228ccd8 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -68,15 +68,6 @@ struct cxl_context *cxl_get_context(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(cxl_get_context);
-struct device *cxl_get_phys_dev(struct pci_dev *dev)
-{
- struct cxl_afu *afu;
-
- afu = cxl_pci_to_afu(dev);
-
- return afu->adapter->dev.parent;
-}
-
int cxl_release_context(struct cxl_context *ctx)
{
if (ctx->status >= STARTED)
@@ -192,6 +183,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
ctx->pid = get_task_pid(task, PIDTYPE_PID);
ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
kernel = false;
+ ctx->real_mode = false;
}
cxl_ctx_get();
@@ -228,6 +220,24 @@ void cxl_set_master(struct cxl_context *ctx)
}
EXPORT_SYMBOL_GPL(cxl_set_master);
+int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode)
+{
+ if (ctx->status == STARTED) {
+ /*
+ * We could potentially update the PE and issue an update LLCMD
+ * to support this, but it doesn't seem to have a good use case
+ * since it's trivial to just create a second kernel context
+ * with different translation modes, so until someone convinces
+ * me otherwise:
+ */
+ return -EBUSY;
+ }
+
+ ctx->real_mode = real_mode;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cxl_set_translation_mode);
+
/* wrappers around afu_* file ops which are EXPORTED */
int cxl_fd_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 7edea9c19..26d206b1d 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -297,8 +297,7 @@ static void reclaim_ctx(struct rcu_head *rcu)
if (ctx->kernelapi)
kfree(ctx->mapping);
- if (ctx->irq_bitmap)
- kfree(ctx->irq_bitmap);
+ kfree(ctx->irq_bitmap);
/* Drop ref to the afu device taken during cxl_context_init */
cxl_afu_put(ctx->afu);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 73dc2a33d..4fe50788f 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -178,15 +178,6 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_SR_An_MP (1ull << (63-62)) /* Master Process */
#define CXL_PSL_SR_An_LE (1ull << (63-63)) /* Little Endian */
-/****** CXL_PSL_LLCMD_An ****************************************************/
-#define CXL_LLCMD_TERMINATE 0x0001000000000000ULL
-#define CXL_LLCMD_REMOVE 0x0002000000000000ULL
-#define CXL_LLCMD_SUSPEND 0x0003000000000000ULL
-#define CXL_LLCMD_RESUME 0x0004000000000000ULL
-#define CXL_LLCMD_ADD 0x0005000000000000ULL
-#define CXL_LLCMD_UPDATE 0x0006000000000000ULL
-#define CXL_LLCMD_HANDLE_MASK 0x000000000000ffffULL
-
/****** CXL_PSL_ID_An ****************************************************/
#define CXL_PSL_ID_An_F (1ull << (63-31))
#define CXL_PSL_ID_An_L (1ull << (63-30))
@@ -376,11 +367,13 @@ struct cxl_afu_native {
};
struct cxl_afu_guest {
+ struct cxl_afu *parent;
u64 handle;
phys_addr_t p2n_phys;
u64 p2n_size;
int max_ints;
- struct mutex recovery_lock;
+ bool handle_err;
+ struct delayed_work work_err;
int previous_state;
};
@@ -524,6 +517,7 @@ struct cxl_context {
bool pe_inserted;
bool master;
bool kernel;
+ bool real_mode;
bool pending_irq;
bool pending_fault;
bool pending_afu_err;
@@ -580,6 +574,7 @@ struct cxl {
bool perst_loads_image;
bool perst_select_user;
bool perst_same_image;
+ bool psl_timebase_synced;
};
int cxl_pci_alloc_one_irq(struct cxl *adapter);
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 9a8650bcb..377e650a2 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -149,11 +149,13 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
* update_mmu_cache() will not have loaded the hash since current->trap
* is not a 0x400 or 0x300, so just call hash_page_mm() here.
*/
- access = _PAGE_PRESENT;
+ access = _PAGE_PRESENT | _PAGE_READ;
if (dsisr & CXL_PSL_DSISR_An_S)
- access |= _PAGE_RW;
- if ((!ctx->kernel) || ~(dar & (1ULL << 63)))
- access |= _PAGE_USER;
+ access |= _PAGE_WRITE;
+
+ access |= _PAGE_PRIVILEGED;
+ if ((!ctx->kernel) || (REGION_ID(dar) == USER_REGION_ID))
+ access &= ~_PAGE_PRIVILEGED;
if (dsisr & DSISR_NOHPTE)
inv_flags |= HPTE_NOHPTE_UPDATE;
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 8213372de..bc8d0b987 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -178,6 +178,9 @@ static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
u64 state;
int rc = 0;
+ if (!afu)
+ return -EIO;
+
rc = cxl_h_read_error_state(afu->guest->handle, &state);
if (!rc) {
WARN_ON(state != H_STATE_NORMAL &&
@@ -552,6 +555,17 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
+
+ /*
+ * Ensure we have at least one interrupt allocated to take faults for
+ * kernel contexts that may not have allocated any AFU IRQs at all:
+ */
+ if (ctx->irqs.range[0] == 0) {
+ rc = afu_register_irqs(ctx, 0);
+ if (rc)
+ goto out_free;
+ }
+
for (r = 0; r < CXL_IRQ_RANGES; r++) {
for (i = 0; i < ctx->irqs.range[r]; i++) {
if (r == 0 && i == 0) {
@@ -597,6 +611,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
enable_afu_irqs(ctx);
}
+out_free:
free_page((u64)elem);
return rc;
}
@@ -605,6 +620,9 @@ static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u
{
pr_devel("in %s\n", __func__);
+ if (ctx->real_mode)
+ return -EPERM;
+
ctx->kernel = kernel;
if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
return attach_afu_directed(ctx, wed, amr);
@@ -818,7 +836,6 @@ static int afu_update_state(struct cxl_afu *afu)
switch (cur_state) {
case H_STATE_NORMAL:
afu->guest->previous_state = cur_state;
- rc = 1;
break;
case H_STATE_DISABLE:
@@ -834,7 +851,6 @@ static int afu_update_state(struct cxl_afu *afu)
pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
pci_channel_io_normal);
pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
- rc = 1;
}
afu->guest->previous_state = 0;
break;
@@ -859,39 +875,30 @@ static int afu_update_state(struct cxl_afu *afu)
return rc;
}
-static int afu_do_recovery(struct cxl_afu *afu)
+static void afu_handle_errstate(struct work_struct *work)
{
- int rc;
+ struct cxl_afu_guest *afu_guest =
+ container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
- /* many threads can arrive here, in case of detach_all for example.
- * Only one needs to drive the recovery
- */
- if (mutex_trylock(&afu->guest->recovery_lock)) {
- rc = afu_update_state(afu);
- mutex_unlock(&afu->guest->recovery_lock);
- return rc;
- }
- return 0;
+ if (!afu_update_state(afu_guest->parent) &&
+ afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
+ return;
+
+ if (afu_guest->handle_err == true)
+ schedule_delayed_work(&afu_guest->work_err,
+ msecs_to_jiffies(3000));
}
static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
{
int state;
- if (afu) {
- if (afu_read_error_state(afu, &state) ||
- state != H_STATE_NORMAL) {
- if (afu_do_recovery(afu) > 0) {
- /* check again in case we've just fixed it */
- if (!afu_read_error_state(afu, &state) &&
- state == H_STATE_NORMAL)
- return true;
- }
- return false;
- }
+ if (afu && (!afu_read_error_state(afu, &state))) {
+ if (state == H_STATE_NORMAL)
+ return true;
}
- return true;
+ return false;
}
static int afu_properties_look_ok(struct cxl_afu *afu)
@@ -929,8 +936,6 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
return -ENOMEM;
}
- mutex_init(&afu->guest->recovery_lock);
-
if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
adapter->adapter_num,
slice)))
@@ -986,6 +991,15 @@ int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_n
afu->enabled = true;
+ /*
+ * wake up the cpu periodically to check the state
+ * of the AFU using "afu" stored in the guest structure.
+ */
+ afu->guest->parent = afu;
+ afu->guest->handle_err = true;
+ INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
+ schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
+
if ((rc = cxl_pci_vphb_add(afu)))
dev_info(&afu->dev, "Can't register vPHB\n");
@@ -1014,6 +1028,10 @@ void cxl_guest_remove_afu(struct cxl_afu *afu)
if (!afu)
return;
+ /* flush and stop pending job */
+ afu->guest->handle_err = false;
+ flush_delayed_work(&afu->guest->work_err);
+
cxl_pci_vphb_remove(afu);
cxl_sysfs_afu_remove(afu);
@@ -1101,6 +1119,12 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
adapter->dev.release = release_adapter;
dev_set_drvdata(&pdev->dev, adapter);
+ /*
+ * Hypervisor controls PSL timebase initialization (p1 register).
+ * On FW840, PSL is initialized.
+ */
+ adapter->psl_timebase_synced = true;
+
if ((rc = cxl_of_read_adapter_handle(adapter, np)))
goto err1;
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index ecf7557cd..55d8a1459 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -186,16 +186,25 @@ static int spa_max_procs(int spa_size)
int cxl_alloc_spa(struct cxl_afu *afu)
{
+ unsigned spa_size;
+
/* Work out how many pages to allocate */
afu->native->spa_order = 0;
do {
afu->native->spa_order++;
- afu->native->spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
+ spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
+
+ if (spa_size > 0x100000) {
+ dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
+ afu->native->spa_max_procs, afu->native->spa_size);
+ afu->num_procs = afu->native->spa_max_procs;
+ break;
+ }
+
+ afu->native->spa_size = spa_size;
afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
} while (afu->native->spa_max_procs < afu->num_procs);
- WARN_ON(afu->native->spa_size > 0x100000); /* Max size supported by the hardware */
-
if (!(afu->native->spa = (struct cxl_process_element *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
@@ -486,8 +495,9 @@ static u64 calculate_sr(struct cxl_context *ctx)
if (mfspr(SPRN_LPCR) & LPCR_TC)
sr |= CXL_PSL_SR_An_TC;
if (ctx->kernel) {
- sr |= CXL_PSL_SR_An_R | (mfmsr() & MSR_SF);
- sr |= CXL_PSL_SR_An_HV;
+ if (!ctx->real_mode)
+ sr |= CXL_PSL_SR_An_R;
+ sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
} else {
sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
sr &= ~(CXL_PSL_SR_An_HV);
@@ -526,6 +536,15 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0);
ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1);
+ /*
+ * Ensure we have the multiplexed PSL interrupt set up to take faults
+ * for kernel contexts that may not have allocated any AFU IRQs at all:
+ */
+ if (ctx->irqs.range[0] == 0) {
+ ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
+ ctx->irqs.range[0] = 1;
+ }
+
for (r = 0; r < CXL_IRQ_RANGES; r++) {
ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 2844e975b..a08fcc888 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -21,6 +21,7 @@
#include <asm/msi_bitmap.h>
#include <asm/pnv-pci.h>
#include <asm/io.h>
+#include <asm/reg.h>
#include "cxl.h"
#include <misc/cxl.h>
@@ -321,12 +322,43 @@ static void dump_afu_descriptor(struct cxl_afu *afu)
#undef show_reg
}
+#define CAPP_UNIT0_ID 0xBA
+#define CAPP_UNIT1_ID 0XBE
+
+static u64 get_capp_unit_id(struct device_node *np)
+{
+ u32 phb_index;
+
+ /*
+ * For chips other than POWER8NVL, we only have CAPP 0,
+ * irrespective of which PHB is used.
+ */
+ if (!pvr_version_is(PVR_POWER8NVL))
+ return CAPP_UNIT0_ID;
+
+ /*
+ * For POWER8NVL, assume CAPP 0 is attached to PHB0 and
+ * CAPP 1 is attached to PHB1.
+ */
+ if (of_property_read_u32(np, "ibm,phb-index", &phb_index))
+ return 0;
+
+ if (phb_index == 0)
+ return CAPP_UNIT0_ID;
+
+ if (phb_index == 1)
+ return CAPP_UNIT1_ID;
+
+ return 0;
+}
+
static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
{
struct device_node *np;
const __be32 *prop;
u64 psl_dsnctl;
u64 chipid;
+ u64 capp_unit_id;
if (!(np = pnv_pci_get_phb_node(dev)))
return -ENODEV;
@@ -336,10 +368,19 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
if (!np)
return -ENODEV;
chipid = be32_to_cpup(prop);
+ capp_unit_id = get_capp_unit_id(np);
of_node_put(np);
+ if (!capp_unit_id) {
+ pr_err("cxl: invalid capp unit id\n");
+ return -ENODEV;
+ }
+ psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
+ psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
/* Tell PSL where to route data to */
- psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5));
+ psl_dsnctl |= (chipid << (63-5));
+ psl_dsnctl |= (capp_unit_id << (63-13));
+
cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
/* snoop write mask */
@@ -355,22 +396,24 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev
#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
#define _2048_250MHZ_CYCLES 1
-static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
+static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
{
u64 psl_tb;
int delta;
unsigned int retry = 0;
struct device_node *np;
+ adapter->psl_timebase_synced = false;
+
if (!(np = pnv_pci_get_phb_node(dev)))
- return -ENODEV;
+ return;
/* Do not fail when CAPP timebase sync is not supported by OPAL */
of_node_get(np);
if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
of_node_put(np);
- pr_err("PSL: Timebase sync: OPAL support missing\n");
- return 0;
+ dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
+ return;
}
of_node_put(np);
@@ -389,8 +432,8 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
do {
msleep(1);
if (retry++ > 5) {
- pr_err("PSL: Timebase sync: giving up!\n");
- return -EIO;
+ dev_info(&dev->dev, "PSL timebase can't synchronize\n");
+ return;
}
psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
delta = mftb() - psl_tb;
@@ -398,7 +441,8 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
delta = -delta;
} while (tb_to_ns(delta) > 16000);
- return 0;
+ adapter->psl_timebase_synced = true;
+ return;
}
static int init_implementation_afu_regs(struct cxl_afu *afu)
@@ -1144,8 +1188,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
goto err;
- if ((rc = cxl_setup_psl_timebase(adapter, dev)))
- goto err;
+ /* Ignore error, adapter init is not dependant on timebase sync */
+ cxl_setup_psl_timebase(adapter, dev);
if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 25913c087..b043c20f1 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -57,6 +57,15 @@ static ssize_t image_loaded_show(struct device *device,
return scnprintf(buf, PAGE_SIZE, "factory\n");
}
+static ssize_t psl_timebase_synced_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl *adapter = to_cxl_adapter(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->psl_timebase_synced);
+}
+
static ssize_t reset_adapter_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -142,6 +151,7 @@ static struct device_attribute adapter_attrs[] = {
__ATTR_RO(psl_revision),
__ATTR_RO(base_image),
__ATTR_RO(image_loaded),
+ __ATTR_RO(psl_timebase_synced),
__ATTR_RW(load_image_on_perst),
__ATTR_RW(perst_reloads_same_image),
__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index cfc493c2e..c4e41c266 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -3,7 +3,6 @@ menu "EEPROM support"
config EEPROM_AT24
tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
depends on I2C && SYSFS
- select REGMAP
select NVMEM
help
Enable this driver to get read/write support to most I2C EEPROMs
@@ -32,7 +31,6 @@ config EEPROM_AT24
config EEPROM_AT25
tristate "SPI EEPROMs from most vendors"
depends on SPI && SYSFS
- select REGMAP
select NVMEM
help
Enable this driver to get read/write support to most SPI EEPROMs,
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 089d6943f..9ceb63b62 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -23,7 +23,6 @@
#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/nvmem-provider.h>
-#include <linux/regmap.h>
#include <linux/platform_data/at24.h>
/*
@@ -69,7 +68,6 @@ struct at24_data {
unsigned write_max;
unsigned num_addresses;
- struct regmap_config regmap_config;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
@@ -245,17 +243,16 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
if (status == count)
return count;
- /* REVISIT: at HZ=100, this is sloooow */
- msleep(1);
+ usleep_range(1000, 1500);
} while (time_before(read_time, timeout));
return -ETIMEDOUT;
}
-static ssize_t at24_read(struct at24_data *at24,
- char *buf, loff_t off, size_t count)
+static int at24_read(void *priv, unsigned int off, void *val, size_t count)
{
- ssize_t retval = 0;
+ struct at24_data *at24 = priv;
+ char *buf = val;
if (unlikely(!count))
return count;
@@ -267,23 +264,21 @@ static ssize_t at24_read(struct at24_data *at24,
mutex_lock(&at24->lock);
while (count) {
- ssize_t status;
+ int status;
status = at24_eeprom_read(at24, buf, off, count);
- if (status <= 0) {
- if (retval == 0)
- retval = status;
- break;
+ if (status < 0) {
+ mutex_unlock(&at24->lock);
+ return status;
}
buf += status;
off += status;
count -= status;
- retval += status;
}
mutex_unlock(&at24->lock);
- return retval;
+ return 0;
}
/*
@@ -365,20 +360,19 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
if (status == count)
return count;
- /* REVISIT: at HZ=100, this is sloooow */
- msleep(1);
+ usleep_range(1000, 1500);
} while (time_before(write_time, timeout));
return -ETIMEDOUT;
}
-static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
- size_t count)
+static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
- ssize_t retval = 0;
+ struct at24_data *at24 = priv;
+ char *buf = val;
if (unlikely(!count))
- return count;
+ return -EINVAL;
/*
* Write data to chip, protecting against concurrent updates
@@ -387,70 +381,23 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
mutex_lock(&at24->lock);
while (count) {
- ssize_t status;
+ int status;
status = at24_eeprom_write(at24, buf, off, count);
- if (status <= 0) {
- if (retval == 0)
- retval = status;
- break;
+ if (status < 0) {
+ mutex_unlock(&at24->lock);
+ return status;
}
buf += status;
off += status;
count -= status;
- retval += status;
}
mutex_unlock(&at24->lock);
- return retval;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Provide a regmap interface, which is registered with the NVMEM
- * framework
-*/
-static int at24_regmap_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct at24_data *at24 = context;
- off_t offset = *(u32 *)reg;
- int err;
-
- err = at24_read(at24, val, offset, val_size);
- if (err)
- return err;
return 0;
}
-static int at24_regmap_write(void *context, const void *data, size_t count)
-{
- struct at24_data *at24 = context;
- const char *buf;
- u32 offset;
- size_t len;
- int err;
-
- memcpy(&offset, data, sizeof(offset));
- buf = (const char *)data + sizeof(offset);
- len = count - sizeof(offset);
-
- err = at24_write(at24, buf, offset, len);
- if (err)
- return err;
- return 0;
-}
-
-static const struct regmap_bus at24_regmap_bus = {
- .read = at24_regmap_read,
- .write = at24_regmap_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-/*-------------------------------------------------------------------------*/
-
#ifdef CONFIG_OF
static void at24_get_ofdata(struct i2c_client *client,
struct at24_platform_data *chip)
@@ -482,7 +429,6 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct at24_data *at24;
int err;
unsigned i, num_addresses;
- struct regmap *regmap;
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
@@ -544,10 +490,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
} else {
return -EPFNOSUPPORT;
}
- }
- /* Use I2C operations unless we're stuck with SMBus extensions. */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
if (i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
use_smbus_write = I2C_SMBUS_I2C_BLOCK_DATA;
@@ -612,19 +555,6 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
- at24->regmap_config.reg_bits = 32;
- at24->regmap_config.val_bits = 8;
- at24->regmap_config.reg_stride = 1;
- at24->regmap_config.max_register = chip.byte_len - 1;
-
- regmap = devm_regmap_init(&client->dev, &at24_regmap_bus, at24,
- &at24->regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(&client->dev, "regmap init failed\n");
- err = PTR_ERR(regmap);
- goto err_clients;
- }
-
at24->nvmem_config.name = dev_name(&client->dev);
at24->nvmem_config.dev = &client->dev;
at24->nvmem_config.read_only = !writable;
@@ -632,6 +562,12 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->nvmem_config.owner = THIS_MODULE;
at24->nvmem_config.compat = true;
at24->nvmem_config.base_dev = &client->dev;
+ at24->nvmem_config.reg_read = at24_read;
+ at24->nvmem_config.reg_write = at24_write;
+ at24->nvmem_config.priv = at24;
+ at24->nvmem_config.stride = 4;
+ at24->nvmem_config.word_size = 1;
+ at24->nvmem_config.size = chip.byte_len;
at24->nvmem = nvmem_register(&at24->nvmem_config);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index fa36a6e37..2c6c7c8e3 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -17,7 +17,6 @@
#include <linux/sched.h>
#include <linux/nvmem-provider.h>
-#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <linux/property.h>
@@ -34,7 +33,6 @@ struct at25_data {
struct mutex lock;
struct spi_eeprom chip;
unsigned addrlen;
- struct regmap_config regmap_config;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
};
@@ -65,14 +63,11 @@ struct at25_data {
#define io_limit PAGE_SIZE /* bytes */
-static ssize_t
-at25_ee_read(
- struct at25_data *at25,
- char *buf,
- unsigned offset,
- size_t count
-)
+static int at25_ee_read(void *priv, unsigned int offset,
+ void *val, size_t count)
{
+ struct at25_data *at25 = priv;
+ char *buf = val;
u8 command[EE_MAXADDRLEN + 1];
u8 *cp;
ssize_t status;
@@ -81,11 +76,11 @@ at25_ee_read(
u8 instr;
if (unlikely(offset >= at25->chip.byte_len))
- return 0;
+ return -EINVAL;
if ((offset + count) > at25->chip.byte_len)
count = at25->chip.byte_len - offset;
if (unlikely(!count))
- return count;
+ return -EINVAL;
cp = command;
@@ -131,28 +126,14 @@ at25_ee_read(
count, offset, (int) status);
mutex_unlock(&at25->lock);
- return status ? status : count;
+ return status;
}
-static int at25_regmap_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
{
- struct at25_data *at25 = context;
- off_t offset = *(u32 *)reg;
- int err;
-
- err = at25_ee_read(at25, val, offset, val_size);
- if (err)
- return err;
- return 0;
-}
-
-static ssize_t
-at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
- size_t count)
-{
- ssize_t status = 0;
- unsigned written = 0;
+ struct at25_data *at25 = priv;
+ const char *buf = val;
+ int status = 0;
unsigned buf_size;
u8 *bounce;
@@ -161,7 +142,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
if ((off + count) > at25->chip.byte_len)
count = at25->chip.byte_len - off;
if (unlikely(!count))
- return count;
+ return -EINVAL;
/* Temp buffer starts with command and address */
buf_size = at25->chip.page_size;
@@ -256,40 +237,15 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
off += segment;
buf += segment;
count -= segment;
- written += segment;
} while (count > 0);
mutex_unlock(&at25->lock);
kfree(bounce);
- return written ? written : status;
+ return status;
}
-static int at25_regmap_write(void *context, const void *data, size_t count)
-{
- struct at25_data *at25 = context;
- const char *buf;
- u32 offset;
- size_t len;
- int err;
-
- memcpy(&offset, data, sizeof(offset));
- buf = (const char *)data + sizeof(offset);
- len = count - sizeof(offset);
-
- err = at25_ee_write(at25, buf, offset, len);
- if (err)
- return err;
- return 0;
-}
-
-static const struct regmap_bus at25_regmap_bus = {
- .read = at25_regmap_read,
- .write = at25_regmap_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
/*-------------------------------------------------------------------------*/
static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
@@ -349,7 +305,6 @@ static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
struct spi_eeprom chip;
- struct regmap *regmap;
int err;
int sr;
int addrlen;
@@ -390,22 +345,10 @@ static int at25_probe(struct spi_device *spi)
mutex_init(&at25->lock);
at25->chip = chip;
- at25->spi = spi_dev_get(spi);
+ at25->spi = spi;
spi_set_drvdata(spi, at25);
at25->addrlen = addrlen;
- at25->regmap_config.reg_bits = 32;
- at25->regmap_config.val_bits = 8;
- at25->regmap_config.reg_stride = 1;
- at25->regmap_config.max_register = chip.byte_len - 1;
-
- regmap = devm_regmap_init(&spi->dev, &at25_regmap_bus, at25,
- &at25->regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
at25->nvmem_config.name = dev_name(&spi->dev);
at25->nvmem_config.dev = &spi->dev;
at25->nvmem_config.read_only = chip.flags & EE_READONLY;
@@ -413,6 +356,12 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.owner = THIS_MODULE;
at25->nvmem_config.compat = true;
at25->nvmem_config.base_dev = &spi->dev;
+ at25->nvmem_config.reg_read = at25_ee_read;
+ at25->nvmem_config.reg_write = at25_ee_write;
+ at25->nvmem_config.priv = at25;
+ at25->nvmem_config.stride = 4;
+ at25->nvmem_config.word_size = 1;
+ at25->nvmem_config.size = chip.byte_len;
at25->nvmem = nvmem_register(&at25->nvmem_config);
if (IS_ERR(at25->nvmem))
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 426fe2fd5..94cc035aa 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/nvmem-provider.h>
-#include <linux/regmap.h>
#include <linux/eeprom_93xx46.h>
#define OP_START 0x4
@@ -43,7 +42,6 @@ struct eeprom_93xx46_dev {
struct spi_device *spi;
struct eeprom_93xx46_platform_data *pdata;
struct mutex lock;
- struct regmap_config regmap_config;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
int addrlen;
@@ -60,11 +58,12 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
}
-static ssize_t
-eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
- unsigned off, size_t count)
+static int eeprom_93xx46_read(void *priv, unsigned int off,
+ void *val, size_t count)
{
- ssize_t ret = 0;
+ struct eeprom_93xx46_dev *edev = priv;
+ char *buf = val;
+ int err = 0;
if (unlikely(off >= edev->size))
return 0;
@@ -84,7 +83,6 @@ eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
u16 cmd_addr = OP_READ << edev->addrlen;
size_t nbytes = count;
int bits;
- int err;
if (edev->addrlen == 7) {
cmd_addr |= off & 0x7f;
@@ -120,21 +118,20 @@ eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
if (err) {
dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
nbytes, (int)off, err);
- ret = err;
break;
}
buf += nbytes;
off += nbytes;
count -= nbytes;
- ret += nbytes;
}
if (edev->pdata->finish)
edev->pdata->finish(edev);
mutex_unlock(&edev->lock);
- return ret;
+
+ return err;
}
static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
@@ -230,10 +227,11 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
return ret;
}
-static ssize_t
-eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf,
- loff_t off, size_t count)
+static int eeprom_93xx46_write(void *priv, unsigned int off,
+ void *val, size_t count)
{
+ struct eeprom_93xx46_dev *edev = priv;
+ char *buf = val;
int i, ret, step = 1;
if (unlikely(off >= edev->size))
@@ -275,52 +273,9 @@ eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf,
/* erase/write disable */
eeprom_93xx46_ew(edev, 0);
- return ret ? : count;
-}
-
-/*
- * Provide a regmap interface, which is registered with the NVMEM
- * framework
-*/
-static int eeprom_93xx46_regmap_read(void *context, const void *reg,
- size_t reg_size, void *val,
- size_t val_size)
-{
- struct eeprom_93xx46_dev *eeprom_93xx46 = context;
- off_t offset = *(u32 *)reg;
- int err;
-
- err = eeprom_93xx46_read(eeprom_93xx46, val, offset, val_size);
- if (err)
- return err;
- return 0;
-}
-
-static int eeprom_93xx46_regmap_write(void *context, const void *data,
- size_t count)
-{
- struct eeprom_93xx46_dev *eeprom_93xx46 = context;
- const char *buf;
- u32 offset;
- size_t len;
- int err;
-
- memcpy(&offset, data, sizeof(offset));
- buf = (const char *)data + sizeof(offset);
- len = count - sizeof(offset);
-
- err = eeprom_93xx46_write(eeprom_93xx46, buf, offset, len);
- if (err)
- return err;
- return 0;
+ return ret;
}
-static const struct regmap_bus eeprom_93xx46_regmap_bus = {
- .read = eeprom_93xx46_regmap_read,
- .write = eeprom_93xx46_regmap_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
{
struct eeprom_93xx46_platform_data *pd = edev->pdata;
@@ -480,7 +435,6 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
{
struct eeprom_93xx46_platform_data *pd;
struct eeprom_93xx46_dev *edev;
- struct regmap *regmap;
int err;
if (spi->dev.of_node) {
@@ -511,24 +465,10 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
mutex_init(&edev->lock);
- edev->spi = spi_dev_get(spi);
+ edev->spi = spi;
edev->pdata = pd;
edev->size = 128;
-
- edev->regmap_config.reg_bits = 32;
- edev->regmap_config.val_bits = 8;
- edev->regmap_config.reg_stride = 1;
- edev->regmap_config.max_register = edev->size - 1;
-
- regmap = devm_regmap_init(&spi->dev, &eeprom_93xx46_regmap_bus, edev,
- &edev->regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "regmap init failed\n");
- err = PTR_ERR(regmap);
- goto fail;
- }
-
edev->nvmem_config.name = dev_name(&spi->dev);
edev->nvmem_config.dev = &spi->dev;
edev->nvmem_config.read_only = pd->flags & EE_READONLY;
@@ -536,6 +476,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->nvmem_config.owner = THIS_MODULE;
edev->nvmem_config.compat = true;
edev->nvmem_config.base_dev = &spi->dev;
+ edev->nvmem_config.reg_read = eeprom_93xx46_read;
+ edev->nvmem_config.reg_write = eeprom_93xx46_write;
+ edev->nvmem_config.priv = edev;
+ edev->nvmem_config.stride = 4;
+ edev->nvmem_config.word_size = 1;
+ edev->nvmem_config.size = edev->size;
edev->nvmem = nvmem_register(&edev->nvmem_config);
if (IS_ERR(edev->nvmem)) {
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 038b6dd24..1f33fea92 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -589,6 +589,7 @@ static int mei_cl_device_probe(struct device *dev)
struct mei_cl_device *cldev;
struct mei_cl_driver *cldrv;
const struct mei_cl_device_id *id;
+ int ret;
cldev = to_mei_cl_device(dev);
cldrv = to_mei_cl_driver(dev->driver);
@@ -603,9 +604,12 @@ static int mei_cl_device_probe(struct device *dev)
if (!id)
return -ENODEV;
- __module_get(THIS_MODULE);
+ ret = cldrv->probe(cldev, id);
+ if (ret)
+ return ret;
- return cldrv->probe(cldev, id);
+ __module_get(THIS_MODULE);
+ return 0;
}
/**
@@ -643,11 +647,8 @@ static ssize_t name_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
- size_t len;
- len = snprintf(buf, PAGE_SIZE, "%s", cldev->name);
-
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+ return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
}
static DEVICE_ATTR_RO(name);
@@ -656,11 +657,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
- size_t len;
-
- len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+ return scnprintf(buf, PAGE_SIZE, "%pUl", uuid);
}
static DEVICE_ATTR_RO(uuid);
@@ -669,11 +667,8 @@ static ssize_t version_show(struct device *dev, struct device_attribute *a,
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
u8 version = mei_me_cl_ver(cldev->me_cl);
- size_t len;
-
- len = snprintf(buf, PAGE_SIZE, "%02X", version);
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+ return scnprintf(buf, PAGE_SIZE, "%02X", version);
}
static DEVICE_ATTR_RO(version);
@@ -682,10 +677,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
{
struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
- size_t len;
- len = snprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
- return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+ return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
}
static DEVICE_ATTR_RO(modalias);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 09f5280fa..641c1a566 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -727,6 +727,11 @@ static void mei_cl_wake_all(struct mei_cl *cl)
cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
wake_up_interruptible(&cl->ev_wait);
}
+ /* synchronized under device mutex */
+ if (waitqueue_active(&cl->wait)) {
+ cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
+ wake_up(&cl->wait);
+ }
}
/**
@@ -879,12 +884,15 @@ static int __mei_cl_disconnect(struct mei_cl *cl)
}
mutex_unlock(&dev->device_lock);
- wait_event_timeout(cl->wait, cl->state == MEI_FILE_DISCONNECT_REPLY,
+ wait_event_timeout(cl->wait,
+ cl->state == MEI_FILE_DISCONNECT_REPLY ||
+ cl->state == MEI_FILE_DISCONNECTED,
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
rets = cl->status;
- if (cl->state != MEI_FILE_DISCONNECT_REPLY) {
+ if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
+ cl->state != MEI_FILE_DISCONNECTED) {
cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
rets = -ETIME;
}
@@ -1085,6 +1093,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
(cl->state == MEI_FILE_CONNECTED ||
+ cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
@@ -1333,16 +1342,13 @@ int mei_cl_notify_request(struct mei_cl *cl,
}
mutex_unlock(&dev->device_lock);
- wait_event_timeout(cl->wait, cl->notify_en == request,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ wait_event_timeout(cl->wait,
+ cl->notify_en == request || !mei_cl_is_connected(cl),
+ mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
- if (cl->notify_en != request) {
- mei_io_list_flush(&dev->ctrl_rd_list, cl);
- mei_io_list_flush(&dev->ctrl_wr_list, cl);
- if (!cl->status)
- cl->status = -EFAULT;
- }
+ if (cl->notify_en != request && !cl->status)
+ cl->status = -EFAULT;
rets = cl->status;
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 8fe1ef821..5aa606c8a 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -113,8 +113,6 @@ void mei_hbm_idle(struct mei_device *dev)
*/
void mei_hbm_reset(struct mei_device *dev)
{
- dev->me_client_index = 0;
-
mei_me_cl_rm_all(dev);
mei_hbm_idle(dev);
@@ -530,24 +528,22 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
* mei_hbm_prop_req - request property for a single client
*
* @dev: the device structure
+ * @start_idx: client index to start search
*
* Return: 0 on success and < 0 on failure
*/
-
-static int mei_hbm_prop_req(struct mei_device *dev)
+static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
{
-
struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
struct hbm_props_request *prop_req;
const size_t len = sizeof(struct hbm_props_request);
- unsigned long next_client_index;
+ unsigned long addr;
int ret;
- next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
- dev->me_client_index);
+ addr = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX, start_idx);
/* We got all client properties */
- if (next_client_index == MEI_CLIENTS_MAX) {
+ if (addr == MEI_CLIENTS_MAX) {
dev->hbm_state = MEI_HBM_STARTED;
mei_host_client_init(dev);
@@ -560,7 +556,7 @@ static int mei_hbm_prop_req(struct mei_device *dev)
memset(prop_req, 0, sizeof(struct hbm_props_request));
prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
- prop_req->me_addr = next_client_index;
+ prop_req->me_addr = addr;
ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
if (ret) {
@@ -570,7 +566,6 @@ static int mei_hbm_prop_req(struct mei_device *dev)
}
dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
- dev->me_client_index = next_client_index;
return 0;
}
@@ -1151,10 +1146,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
mei_hbm_me_cl_add(dev, props_res);
- dev->me_client_index++;
-
/* request property for the next client */
- if (mei_hbm_prop_req(dev))
+ if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
return -EIO;
break;
@@ -1180,7 +1173,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state = MEI_HBM_CLIENT_PROPERTIES;
/* first property request */
- if (mei_hbm_prop_req(dev))
+ if (mei_hbm_prop_req(dev, 0))
return -EIO;
break;
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 859bdac22..c9e01021e 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -396,7 +396,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @me_clients : list of FW clients
* @me_clients_map : FW clients bit map
* @host_clients_map : host clients id pool
- * @me_client_index : last FW client index in enumeration
*
* @allow_fixed_address: allow user space to connect a fixed client
* @override_fixed_address: force allow fixed address behavior
@@ -486,7 +485,6 @@ struct mei_device {
struct list_head me_clients;
DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
DECLARE_BITMAP(host_clients_map, MEI_CLIENTS_MAX);
- unsigned long me_client_index;
bool allow_fixed_address;
bool override_fixed_address;
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index 2e4f3ba75..89e5917e1 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -132,6 +132,7 @@ config VOP
tristate "VOP Driver"
depends on 64BIT && PCI && X86 && VOP_BUS
select VHOST_RING
+ select VIRTIO
help
This enables VOP (Virtio over PCIe) Driver support for the Intel
Many Integrated Core (MIC) family of PCIe form factor coprocessor
diff --git a/drivers/misc/mic/host/mic_boot.c b/drivers/misc/mic/host/mic_boot.c
index 8c91c9950..e047efd83 100644
--- a/drivers/misc/mic/host/mic_boot.c
+++ b/drivers/misc/mic/host/mic_boot.c
@@ -76,7 +76,7 @@ static void __mic_free_irq(struct vop_device *vpdev,
{
struct mic_device *mdev = vpdev_to_mdev(&vpdev->dev);
- return mic_free_irq(mdev, cookie, data);
+ mic_free_irq(mdev, cookie, data);
}
static void __mic_ack_interrupt(struct vop_device *vpdev, int num)
@@ -272,7 +272,7 @@ ___mic_free_irq(struct scif_hw_dev *scdev,
{
struct mic_device *mdev = scdev_to_mdev(scdev);
- return mic_free_irq(mdev, cookie, data);
+ mic_free_irq(mdev, cookie, data);
}
static void ___mic_ack_interrupt(struct scif_hw_dev *scdev, int num)
@@ -362,7 +362,7 @@ _mic_request_threaded_irq(struct mbus_device *mbdev,
static void _mic_free_irq(struct mbus_device *mbdev,
struct mic_irq *cookie, void *data)
{
- return mic_free_irq(mbdev_to_mdev(mbdev), cookie, data);
+ mic_free_irq(mbdev_to_mdev(mbdev), cookie, data);
}
static void _mic_ack_interrupt(struct mbus_device *mbdev, int num)
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index 7f2c96f57..cac3bcc30 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -27,7 +27,8 @@
void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
{
struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
- int mark, err;
+ int mark = 0;
+ int err;
err = _scif_fence_mark(ep, &mark);
if (err)
diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c
index af57d2caa..88e45234d 100644
--- a/drivers/misc/mic/vop/vop_vringh.c
+++ b/drivers/misc/mic/vop/vop_vringh.c
@@ -950,11 +950,6 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
ret = -EINVAL;
goto free_ret;
}
- /* Ensure desc has not changed between the two reads */
- if (memcmp(&dd, dd_config, sizeof(dd))) {
- ret = -EINVAL;
- goto free_ret;
- }
mutex_lock(&vdev->vdev_mutex);
mutex_lock(&vi->vop_mutex);
ret = vop_virtio_add_device(vdev, dd_config);
diff --git a/drivers/misc/qcom-coincell.c b/drivers/misc/qcom-coincell.c
index 7b4a2da48..829a61dbd 100644
--- a/drivers/misc/qcom-coincell.c
+++ b/drivers/misc/qcom-coincell.c
@@ -94,7 +94,8 @@ static int qcom_coincell_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct qcom_coincell chgr;
- u32 rset, vset;
+ u32 rset = 0;
+ u32 vset = 0;
bool enable;
int rc;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 967b9dd24..030769018 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -718,8 +718,8 @@ cberr:
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
void *mesg, int lines)
{
- unsigned long m, *val = mesg, gpa, save;
- int ret;
+ unsigned long m;
+ int ret, loops = 200; /* experimentally determined */
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
if (lines == 2) {
@@ -735,22 +735,28 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
return MQE_OK;
/*
- * Send a cross-partition interrupt to the SSI that contains the target
- * message queue. Normally, the interrupt is automatically delivered by
- * hardware but some error conditions require explicit delivery.
- * Use the GRU to deliver the interrupt. Otherwise partition failures
+ * Send a noop message in order to deliver a cross-partition interrupt
+ * to the SSI that contains the target message queue. Normally, the
+ * interrupt is automatically delivered by hardware following mesq
+ * operations, but some error conditions require explicit delivery.
+ * The noop message will trigger delivery. Otherwise partition failures
* could cause unrecovered errors.
*/
- gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
- save = *val;
- *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
- dest_Fixed);
- gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
- ret = gru_wait(cb);
- *val = save;
- if (ret != CBS_IDLE)
- return MQE_UNEXPECTED_CB_ERR;
- return MQE_OK;
+ do {
+ ret = send_noop_message(cb, mqd, mesg);
+ } while ((ret == MQIE_AGAIN || ret == MQE_CONGESTION) && (loops-- > 0));
+
+ if (ret == MQIE_AGAIN || ret == MQE_CONGESTION) {
+ /*
+ * Don't indicate to the app to resend the message, as it's
+ * already been successfully sent. We simply send an OK
+ * (rather than fail the send with MQE_UNEXPECTED_CB_ERR),
+ * assuming that the other side is receiving enough
+ * interrupts to get this message processed anyway.
+ */
+ ret = MQE_OK;
+ }
+ return ret;
}
/*
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 69cdabea9..f84b53d6c 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -364,8 +364,8 @@ static int sram_probe(struct platform_device *pdev)
sram->virt_base = devm_ioremap(sram->dev, res->start, size);
else
sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size);
- if (IS_ERR(sram->virt_base))
- return PTR_ERR(sram->virt_base);
+ if (!sram->virt_base)
+ return -ENOMEM;
sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
NUMA_NO_NODE, NULL);
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index c976d961b..3c4399251 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -78,7 +78,6 @@ static void validate_firmware_response(struct kim_data_s *kim_gdata)
memcpy(kim_gdata->resp_buffer,
kim_gdata->rx_skb->data,
kim_gdata->rx_skb->len);
- complete_all(&kim_gdata->kim_rcvd);
kim_gdata->rx_state = ST_W4_PACKET_TYPE;
kim_gdata->rx_skb = NULL;
kim_gdata->rx_count = 0;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index b0a27413c..c5472e3c9 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -35,6 +35,7 @@
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/pm_runtime.h>
+#include <linux/idr.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -78,14 +79,14 @@ static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
/*
* We've only got one major, so number of mmcblk devices is
* limited to (1 << 20) / number of minors per device. It is also
- * currently limited by the size of the static bitmaps below.
+ * limited by the MAX_DEVICES below.
*/
static int max_devices;
#define MAX_DEVICES 256
-/* TODO: Replace these with struct ida */
-static DECLARE_BITMAP(dev_use, MAX_DEVICES);
+static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_SPINLOCK(mmc_blk_lock);
/*
* There is one mmc_blk_data per slot.
@@ -178,7 +179,9 @@ static void mmc_blk_put(struct mmc_blk_data *md)
int devidx = mmc_get_devidx(md->disk);
blk_cleanup_queue(md->queue.queue);
- __clear_bit(devidx, dev_use);
+ spin_lock(&mmc_blk_lock);
+ ida_remove(&mmc_blk_ida, devidx);
+ spin_unlock(&mmc_blk_lock);
put_disk(md->disk);
kfree(md);
@@ -352,8 +355,10 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
goto idata_err;
}
- if (!idata->buf_bytes)
+ if (!idata->buf_bytes) {
+ idata->buf = NULL;
return idata;
+ }
idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
if (!idata->buf) {
@@ -615,6 +620,10 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
+ /* Always switch back to main area after RPMB access */
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
mmc_put_card(card);
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
@@ -682,6 +691,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
for (i = 0; i < num_of_cmds && !ioc_err; i++)
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ /* Always switch back to main area after RPMB access */
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
mmc_put_card(card);
/* copy to user if data and response */
@@ -745,16 +758,25 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
if (mmc_card_mmc(card)) {
u8 part_config = card->ext_csd.part_config;
+ if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_pause(card->host);
+
part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
part_config |= md->part_type;
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_PART_CONFIG, part_config,
card->ext_csd.part_time);
- if (ret)
+ if (ret) {
+ if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_unpause(card->host);
return ret;
+ }
card->ext_csd.part_config = part_config;
+
+ if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_unpause(card->host);
}
main_md->part_curr = md->part_type;
@@ -945,16 +967,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
req->rq_disk->disk_name, "timed out", name, status);
/* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid)
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/*
* If it was a r/w cmd crc error, or illegal command
* (eg, issued in wrong state) then retry - we should
* have corrected the state problem above.
*/
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/* Otherwise abort the command */
return ERR_ABORT;
@@ -1760,8 +1788,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
packed_cmd_hdr = packed->cmd_hdr;
memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
- packed_cmd_hdr[0] = (packed->nr_entries << 16) |
- (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
hdr_blocks = mmc_large_sector(card) ? 8 : 1;
/*
@@ -1775,14 +1803,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
((brq->data.blocks * brq->data.blksz) >=
card->ext_csd.data_tag_unit_size);
/* Argument of CMD23 */
- packed_cmd_hdr[(i * 2)] =
+ packed_cmd_hdr[(i * 2)] = cpu_to_le32(
(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
- blk_rq_sectors(prq);
+ blk_rq_sectors(prq));
/* Argument of CMD18 or CMD25 */
- packed_cmd_hdr[((i * 2)) + 1] =
+ packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
mmc_card_blockaddr(card) ?
- blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
packed->blocks += blk_rq_sectors(prq);
i++;
}
@@ -2189,10 +2217,23 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct mmc_blk_data *md;
int devidx, ret;
- devidx = find_first_zero_bit(dev_use, max_devices);
- if (devidx >= max_devices)
- return ERR_PTR(-ENOSPC);
- __set_bit(devidx, dev_use);
+again:
+ if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&mmc_blk_lock);
+ ret = ida_get_new(&mmc_blk_ida, &devidx);
+ spin_unlock(&mmc_blk_lock);
+
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ERR_PTR(ret);
+
+ if (devidx >= max_devices) {
+ ret = -ENOSPC;
+ goto out;
+ }
md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
@@ -2271,7 +2312,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
- blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(md->queue.queue, true, true);
}
if (mmc_card_mmc(card) &&
@@ -2289,6 +2330,9 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
err_kfree:
kfree(md);
out:
+ spin_lock(&mmc_blk_lock);
+ ida_remove(&mmc_blk_ida, devidx);
+ spin_unlock(&mmc_blk_lock);
return ERR_PTR(ret);
}
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index 5415056f9..5af6fb9a9 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -895,7 +895,7 @@ static void sdio_uart_set_termios(struct tty_struct *tty,
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
unsigned int mask = TIOCM_DTR;
- if (!(cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (!(cflag & CRTSCTS) || !tty_throttled(tty))
mask |= TIOCM_RTS;
sdio_uart_set_mctrl(port, mask);
}
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 4c33d7690..250f223aa 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -1,3 +1,24 @@
#
# MMC core configuration
#
+config PWRSEQ_EMMC
+ tristate "HW reset support for eMMC"
+ default y
+ depends on OF
+ help
+ This selects Hardware reset support aka pwrseq-emmc for eMMC
+ devices. By default this option is set to y.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_emmc.
+
+config PWRSEQ_SIMPLE
+ tristate "Simple HW reset support for MMC"
+ default y
+ depends on OF
+ help
+ This selects simple hardware reset support aka pwrseq-simple for MMC
+ devices. By default this option is set to y.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_simple.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138f2..f007151df 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,5 +8,7 @@ mmc_core-y := core.o bus.o host.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
quirks.o slot-gpio.o
-mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
+mmc_core-$(CONFIG_OF) += pwrseq.o
+obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 9eba56c68..8b4dfd454 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -36,6 +36,9 @@
#include <linux/mmc/sd.h>
#include <linux/mmc/slot-gpio.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/mmc.h>
+
#include "core.h"
#include "bus.h"
#include "host.h"
@@ -140,6 +143,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
cmd->retries = 0;
}
+ trace_mmc_request_done(host, mrq);
+
if (err && cmd->retries && !mmc_card_removed(host->card)) {
/*
* Request starter must handle retries - see
@@ -215,6 +220,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
}
}
+ trace_mmc_request_start(host, mrq);
+
host->ops->request(host, mrq);
}
@@ -2449,8 +2456,9 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->reset(host);
mmc_bus_put(host);
- if (ret != -EOPNOTSUPP)
- pr_warn("%s: tried to reset card\n", mmc_hostname(host));
+ if (ret)
+ pr_warn("%s: tried to reset card, got error %d\n",
+ mmc_hostname(host), ret);
return ret;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 6e4c55a4a..1be42fab1 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -33,14 +33,14 @@
#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
-static DEFINE_IDR(mmc_host_idr);
+static DEFINE_IDA(mmc_host_ida);
static DEFINE_SPINLOCK(mmc_host_lock);
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
spin_lock(&mmc_host_lock);
- idr_remove(&mmc_host_idr, host->index);
+ ida_remove(&mmc_host_ida, host->index);
spin_unlock(&mmc_host_lock);
kfree(host);
}
@@ -68,8 +68,32 @@ void mmc_retune_enable(struct mmc_host *host)
jiffies + host->retune_period * HZ);
}
+/*
+ * Pause re-tuning for a small set of operations. The pause begins after the
+ * next command and after first doing re-tuning.
+ */
+void mmc_retune_pause(struct mmc_host *host)
+{
+ if (!host->retune_paused) {
+ host->retune_paused = 1;
+ mmc_retune_needed(host);
+ mmc_retune_hold(host);
+ }
+}
+EXPORT_SYMBOL(mmc_retune_pause);
+
+void mmc_retune_unpause(struct mmc_host *host)
+{
+ if (host->retune_paused) {
+ host->retune_paused = 0;
+ mmc_retune_release(host);
+ }
+}
+EXPORT_SYMBOL(mmc_retune_unpause);
+
void mmc_retune_disable(struct mmc_host *host)
{
+ mmc_retune_unpause(host);
host->can_retune = 0;
del_timer_sync(&host->retune_timer);
host->retune_now = 0;
@@ -321,14 +345,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
- idr_preload(GFP_KERNEL);
+
+again:
+ if (!ida_pre_get(&mmc_host_ida, GFP_KERNEL)) {
+ kfree(host);
+ return NULL;
+ }
+
spin_lock(&mmc_host_lock);
- err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT);
- if (err >= 0)
- host->index = err;
+ err = ida_get_new(&mmc_host_ida, &host->index);
spin_unlock(&mmc_host_lock);
- idr_preload_end();
- if (err < 0) {
+
+ if (err == -EAGAIN) {
+ goto again;
+ } else if (err) {
kfree(host);
return NULL;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 80169643d..5d438ad3e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1251,10 +1251,11 @@ static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
bool send_status = true;
- unsigned int old_timing;
+ unsigned int old_timing, old_signal_voltage;
int err = -EINVAL;
u8 val;
+ old_signal_voltage = host->ios.signal_voltage;
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
@@ -1263,7 +1264,7 @@ static int mmc_select_hs200(struct mmc_card *card)
/* If fails try again during next card power cycle */
if (err)
- goto err;
+ return err;
mmc_select_driver_type(card);
@@ -1275,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
- if (!IS_ERR_VALUE(err)) {
+ if (err >= 0) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1297,9 +1298,14 @@ static int mmc_select_hs200(struct mmc_card *card)
}
}
err:
- if (err)
+ if (err) {
+ /* fall back to the old signal voltage, if fails report error */
+ if (__mmc_set_signal_voltage(host, old_signal_voltage))
+ err = -EIO;
+
pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
__func__, err);
+ }
return err;
}
@@ -1321,21 +1327,13 @@ static int mmc_select_timing(struct mmc_card *card)
if (err && err != -EBADMSG)
return err;
- if (err) {
- pr_warn("%s: switch to %s failed\n",
- mmc_card_hs(card) ? "high-speed" :
- (mmc_card_hs200(card) ? "hs200" : ""),
- mmc_hostname(card->host));
- err = 0;
- }
-
bus_speed:
/*
* Set the bus speed to the selected bus timing.
* If timing is not selected, backward compatible is the default.
*/
mmc_set_bus_speed(card);
- return err;
+ return 0;
}
/*
@@ -1490,12 +1488,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto free_card;
- /* If doing byte addressing, check if required to do sector
+ /*
+ * If doing byte addressing, check if required to do sector
* addressing. Handle the case of <2GB cards needing sector
* addressing. See section 8.1 JEDEC Standard JED84-A441;
* ocr register has bit 30 set for sector addressing.
*/
- if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
+ if (rocr & BIT(30))
mmc_card_set_blockaddr(card);
/* Erase size depends on CSD and Extended CSD */
@@ -1584,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
} else if (mmc_card_hs(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (!IS_ERR_VALUE(err)) {
+ if (err >= 0) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
@@ -1964,19 +1963,23 @@ static int mmc_reset(struct mmc_host *host)
{
struct mmc_card *card = host->card;
- if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
- return -EOPNOTSUPP;
-
- if (!mmc_can_reset(card))
- return -EOPNOTSUPP;
-
- mmc_set_clock(host, host->f_init);
-
- host->ops->hw_reset(host);
-
- /* Set initial state and call mmc_set_ios */
- mmc_set_initial_state(host);
-
+ /*
+ * In the case of recovery, we can't expect flushing the cache to work
+ * always, but we have a go and ignore errors.
+ */
+ mmc_flush_cache(host->card);
+
+ if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
+ mmc_can_reset(card)) {
+ /* If the card accept RST_n signal, send it. */
+ mmc_set_clock(host, host->f_init);
+ host->ops->hw_reset(host);
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+ } else {
+ /* Do a brute force power cycle */
+ mmc_power_cycle(host, card->ocr);
+ }
return mmc_init_card(host, card->ocr, card);
}
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index 4c1d1757d..9386c4771 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -8,88 +8,55 @@
* MMC power sequence management
*/
#include <linux/kernel.h>
-#include <linux/platform_device.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/mmc/host.h>
#include "pwrseq.h"
-struct mmc_pwrseq_match {
- const char *compatible;
- struct mmc_pwrseq *(*alloc)(struct mmc_host *host, struct device *dev);
-};
-
-static struct mmc_pwrseq_match pwrseq_match[] = {
- {
- .compatible = "mmc-pwrseq-simple",
- .alloc = mmc_pwrseq_simple_alloc,
- }, {
- .compatible = "mmc-pwrseq-emmc",
- .alloc = mmc_pwrseq_emmc_alloc,
- },
-};
-
-static struct mmc_pwrseq_match *mmc_pwrseq_find(struct device_node *np)
-{
- struct mmc_pwrseq_match *match = ERR_PTR(-ENODEV);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pwrseq_match); i++) {
- if (of_device_is_compatible(np, pwrseq_match[i].compatible)) {
- match = &pwrseq_match[i];
- break;
- }
- }
-
- return match;
-}
+static DEFINE_MUTEX(pwrseq_list_mutex);
+static LIST_HEAD(pwrseq_list);
int mmc_pwrseq_alloc(struct mmc_host *host)
{
- struct platform_device *pdev;
struct device_node *np;
- struct mmc_pwrseq_match *match;
- struct mmc_pwrseq *pwrseq;
- int ret = 0;
+ struct mmc_pwrseq *p;
np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0);
if (!np)
return 0;
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- ret = -ENODEV;
- goto err;
- }
+ mutex_lock(&pwrseq_list_mutex);
+ list_for_each_entry(p, &pwrseq_list, pwrseq_node) {
+ if (p->dev->of_node == np) {
+ if (!try_module_get(p->owner))
+ dev_err(host->parent,
+ "increasing module refcount failed\n");
+ else
+ host->pwrseq = p;
- match = mmc_pwrseq_find(np);
- if (IS_ERR(match)) {
- ret = PTR_ERR(match);
- goto err;
+ break;
+ }
}
- pwrseq = match->alloc(host, &pdev->dev);
- if (IS_ERR(pwrseq)) {
- ret = PTR_ERR(pwrseq);
- goto err;
- }
+ of_node_put(np);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ if (!host->pwrseq)
+ return -EPROBE_DEFER;
- host->pwrseq = pwrseq;
dev_info(host->parent, "allocated mmc-pwrseq\n");
-err:
- of_node_put(np);
- return ret;
+ return 0;
}
void mmc_pwrseq_pre_power_on(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->pre_power_on)
+ if (pwrseq && pwrseq->ops->pre_power_on)
pwrseq->ops->pre_power_on(host);
}
@@ -97,7 +64,7 @@ void mmc_pwrseq_post_power_on(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->post_power_on)
+ if (pwrseq && pwrseq->ops->post_power_on)
pwrseq->ops->post_power_on(host);
}
@@ -105,7 +72,7 @@ void mmc_pwrseq_power_off(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->power_off)
+ if (pwrseq && pwrseq->ops->power_off)
pwrseq->ops->power_off(host);
}
@@ -113,8 +80,31 @@ void mmc_pwrseq_free(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->free)
- pwrseq->ops->free(host);
+ if (pwrseq) {
+ module_put(pwrseq->owner);
+ host->pwrseq = NULL;
+ }
+}
+
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ if (!pwrseq || !pwrseq->ops || !pwrseq->dev)
+ return -EINVAL;
- host->pwrseq = NULL;
+ mutex_lock(&pwrseq_list_mutex);
+ list_add(&pwrseq->pwrseq_node, &pwrseq_list);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_register);
+
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+ if (pwrseq) {
+ mutex_lock(&pwrseq_list_mutex);
+ list_del(&pwrseq->pwrseq_node);
+ mutex_unlock(&pwrseq_list_mutex);
+ }
}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_unregister);
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index 133de0426..d69e751f1 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -8,32 +8,39 @@
#ifndef _MMC_CORE_PWRSEQ_H
#define _MMC_CORE_PWRSEQ_H
+#include <linux/mmc/host.h>
+
struct mmc_pwrseq_ops {
void (*pre_power_on)(struct mmc_host *host);
void (*post_power_on)(struct mmc_host *host);
void (*power_off)(struct mmc_host *host);
- void (*free)(struct mmc_host *host);
};
struct mmc_pwrseq {
const struct mmc_pwrseq_ops *ops;
+ struct device *dev;
+ struct list_head pwrseq_node;
+ struct module *owner;
};
#ifdef CONFIG_OF
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq);
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq);
+
int mmc_pwrseq_alloc(struct mmc_host *host);
void mmc_pwrseq_pre_power_on(struct mmc_host *host);
void mmc_pwrseq_post_power_on(struct mmc_host *host);
void mmc_pwrseq_power_off(struct mmc_host *host);
void mmc_pwrseq_free(struct mmc_host *host);
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev);
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev);
-
#else
+static inline int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ return -ENOSYS;
+}
+static inline void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq) {}
static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index 4a82bc77f..adc9c0c61 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -9,6 +9,9 @@
*/
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,6 +28,8 @@ struct mmc_pwrseq_emmc {
struct gpio_desc *reset_gpio;
};
+#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
+
static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
{
gpiod_set_value(pwrseq->reset_gpio, 1);
@@ -35,27 +40,11 @@ static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
__mmc_pwrseq_emmc_reset(pwrseq);
}
-static void mmc_pwrseq_emmc_free(struct mmc_host *host)
-{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
-
- unregister_restart_handler(&pwrseq->reset_nb);
- gpiod_put(pwrseq->reset_gpio);
- kfree(pwrseq);
-}
-
-static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
- .post_power_on = mmc_pwrseq_emmc_reset,
- .free = mmc_pwrseq_emmc_free,
-};
-
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
unsigned long mode, void *cmd)
{
@@ -66,21 +55,22 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
return NOTIFY_DONE;
}
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev)
+static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+ .post_power_on = mmc_pwrseq_emmc_reset,
+};
+
+static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_emmc *pwrseq;
- int ret = 0;
+ struct device *dev = &pdev->dev;
- pwrseq = kzalloc(sizeof(struct mmc_pwrseq_emmc), GFP_KERNEL);
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- pwrseq->reset_gpio = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(pwrseq->reset_gpio)) {
- ret = PTR_ERR(pwrseq->reset_gpio);
- goto free;
- }
+ pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
/*
* register reset handler to ensure emmc reset also from
@@ -92,9 +82,38 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
register_restart_handler(&pwrseq->reset_nb);
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+ pwrseq->pwrseq.dev = dev;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
+
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
+}
+
+static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *pwrseq = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&pwrseq->reset_nb);
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
- return &pwrseq->pwrseq;
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return 0;
}
+
+static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
+ { .compatible = "mmc-pwrseq-emmc",},
+ {/* sentinel */},
+};
+
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
+
+static struct platform_driver mmc_pwrseq_emmc_driver = {
+ .probe = mmc_pwrseq_emmc_probe,
+ .remove = mmc_pwrseq_emmc_remove,
+ .driver = {
+ .name = "pwrseq_emmc",
+ .of_match_table = mmc_pwrseq_emmc_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index bc173e18b..450d907c6 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -8,7 +8,10 @@
* Simple MMC power sequence management
*/
#include <linux/clk.h>
+#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,6 +28,8 @@ struct mmc_pwrseq_simple {
struct gpio_descs *reset_gpios;
};
+#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
+
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
int value)
{
@@ -44,8 +49,7 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
clk_prepare_enable(pwrseq->ext_clk);
@@ -57,16 +61,14 @@ static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
}
static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
@@ -76,59 +78,64 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
}
}
-static void mmc_pwrseq_simple_free(struct mmc_host *host)
-{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
-
- if (!IS_ERR(pwrseq->reset_gpios))
- gpiod_put_array(pwrseq->reset_gpios);
-
- if (!IS_ERR(pwrseq->ext_clk))
- clk_put(pwrseq->ext_clk);
-
- kfree(pwrseq);
-}
-
static const struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
.pre_power_on = mmc_pwrseq_simple_pre_power_on,
.post_power_on = mmc_pwrseq_simple_post_power_on,
.power_off = mmc_pwrseq_simple_power_off,
- .free = mmc_pwrseq_simple_free,
};
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev)
+static const struct of_device_id mmc_pwrseq_simple_of_match[] = {
+ { .compatible = "mmc-pwrseq-simple",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_simple_of_match);
+
+static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_simple *pwrseq;
- int ret = 0;
+ struct device *dev = &pdev->dev;
- pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- pwrseq->ext_clk = clk_get(dev, "ext_clock");
- if (IS_ERR(pwrseq->ext_clk) &&
- PTR_ERR(pwrseq->ext_clk) != -ENOENT) {
- ret = PTR_ERR(pwrseq->ext_clk);
- goto free;
- }
+ pwrseq->ext_clk = devm_clk_get(dev, "ext_clock");
+ if (IS_ERR(pwrseq->ext_clk) && PTR_ERR(pwrseq->ext_clk) != -ENOENT)
+ return PTR_ERR(pwrseq->ext_clk);
- pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
+ pwrseq->reset_gpios = devm_gpiod_get_array(dev, "reset",
+ GPIOD_OUT_HIGH);
if (IS_ERR(pwrseq->reset_gpios) &&
PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
- ret = PTR_ERR(pwrseq->reset_gpios);
- goto clk_put;
+ return PTR_ERR(pwrseq->reset_gpios);
}
+ pwrseq->pwrseq.dev = dev;
pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
- return &pwrseq->pwrseq;
-clk_put:
- if (!IS_ERR(pwrseq->ext_clk))
- clk_put(pwrseq->ext_clk);
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
}
+
+static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *pwrseq = platform_get_drvdata(pdev);
+
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+ return 0;
+}
+
+static struct platform_driver mmc_pwrseq_simple_driver = {
+ .probe = mmc_pwrseq_simple_probe,
+ .remove = mmc_pwrseq_simple_remove,
+ .driver = {
+ .name = "pwrseq_simple",
+ .of_match_table = mmc_pwrseq_simple_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 6f6fc527a..dcb3dee59 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -177,8 +177,13 @@ static int cistpl_funce_func(struct mmc_card *card, struct sdio_func *func,
vsn = func->card->cccr.sdio_vsn;
min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
- if (size < min_size)
+ if (size == 28 && vsn == SDIO_SDIO_REV_1_10) {
+ pr_warn("%s: card has broken SDIO 1.1 CIS, forcing SDIO 1.0\n",
+ mmc_hostname(card->host));
+ vsn = SDIO_SDIO_REV_1_00;
+ } else if (size < min_size) {
return -EINVAL;
+ }
/* TPLFE_MAX_BLK_SIZE */
func->max_blksize = buf[12] | (buf[13] << 8);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index be3518f29..d5ae22fbb 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -677,9 +677,9 @@ config MMC_SH_MMCIF
depends on HAS_DMA
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
help
- This selects the MMC Host Interface controller (MMCIF).
+ This selects the MMC Host Interface controller (MMCIF) found in various
+ Renesas SoCs for SH and ARM architectures.
- This driver supports MMCIF in sh7724/sh7757/sh7372.
config MMC_JZ4740
tristate "JZ4740 SD/Multimedia Card Interface support"
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 9268c41a8..0ad8ef565 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1410,8 +1410,6 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(slot->mrq);
dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
- pm_runtime_get_sync(&host->pdev->dev);
-
/*
* We may "know" the card is gone even though there's still an
* electrical connection. If so, we really need to communicate
@@ -1442,8 +1440,6 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct atmel_mci *host = slot->host;
unsigned int i;
- pm_runtime_get_sync(&host->pdev->dev);
-
slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
@@ -1576,8 +1572,6 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
- pm_runtime_mark_last_busy(&host->pdev->dev);
- pm_runtime_put_autosuspend(&host->pdev->dev);
}
static int atmci_get_ro(struct mmc_host *mmc)
@@ -1669,9 +1663,6 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
-
- pm_runtime_mark_last_busy(&host->pdev->dev);
- pm_runtime_put_autosuspend(&host->pdev->dev);
}
static void atmci_command_complete(struct atmel_mci *host,
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 693144e74..a56373c75 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -32,12 +32,10 @@
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/edma.h>
#include <linux/mmc/mmc.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/platform_data/edma.h>
#include <linux/platform_data/mmc-davinci.h>
/*
@@ -202,7 +200,6 @@ struct mmc_davinci_host {
u32 buffer_bytes_left;
u32 bytes_left;
- u32 rxdma, txdma;
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
bool use_dma;
@@ -513,35 +510,20 @@ davinci_release_dma_channels(struct mmc_davinci_host *host)
static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)
{
- int r;
- dma_cap_mask_t mask;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->dma_tx =
- dma_request_slave_channel_compat(mask, edma_filter_fn,
- &host->txdma, mmc_dev(host->mmc), "tx");
- if (!host->dma_tx) {
+ host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
+ if (IS_ERR(host->dma_tx)) {
dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n");
- return -ENODEV;
+ return PTR_ERR(host->dma_tx);
}
- host->dma_rx =
- dma_request_slave_channel_compat(mask, edma_filter_fn,
- &host->rxdma, mmc_dev(host->mmc), "rx");
- if (!host->dma_rx) {
+ host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
+ if (IS_ERR(host->dma_rx)) {
dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n");
- r = -ENODEV;
- goto free_master_write;
+ dma_release_channel(host->dma_tx);
+ return PTR_ERR(host->dma_rx);
}
return 0;
-
-free_master_write:
- dma_release_channel(host->dma_tx);
-
- return r;
}
/*----------------------------------------------------------------------*/
@@ -1223,7 +1205,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
struct mmc_davinci_host *host = NULL;
struct mmc_host *mmc = NULL;
struct resource *r, *mem = NULL;
- int ret = 0, irq = 0;
+ int ret, irq;
size_t mem_size;
const struct platform_device_id *id_entry;
@@ -1233,50 +1215,40 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = -ENODEV;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq == NO_IRQ)
- goto out;
+ return -ENODEV;
- ret = -EBUSY;
mem_size = resource_size(r);
- mem = request_mem_region(r->start, mem_size, pdev->name);
+ mem = devm_request_mem_region(&pdev->dev, r->start, mem_size,
+ pdev->name);
if (!mem)
- goto out;
+ return -EBUSY;
- ret = -ENOMEM;
mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
if (!mmc)
- goto out;
+ return -ENOMEM;
host = mmc_priv(mmc);
host->mmc = mmc; /* Important */
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (!r)
- dev_warn(&pdev->dev, "RX DMA resource not specified\n");
- else
- host->rxdma = r->start;
-
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (!r)
- dev_warn(&pdev->dev, "TX DMA resource not specified\n");
- else
- host->txdma = r->start;
-
host->mem_res = mem;
- host->base = ioremap(mem->start, mem_size);
- if (!host->base)
- goto out;
+ host->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
+ if (!host->base) {
+ ret = -ENOMEM;
+ goto ioremap_fail;
+ }
- ret = -ENXIO;
- host->clk = clk_get(&pdev->dev, "MMCSDCLK");
+ host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- goto out;
+ goto clk_get_fail;
}
- clk_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
+ if (ret)
+ goto clk_prepare_enable_fail;
+
host->mmc_input_clk = clk_get_rate(host->clk);
init_mmcsd_host(host);
@@ -1291,8 +1263,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->mmc_irq = irq;
host->sdio_irq = platform_get_irq(pdev, 1);
- if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
- host->use_dma = 0;
+ if (host->use_dma) {
+ ret = davinci_acquire_dma_channels(host);
+ if (ret == -EPROBE_DEFER)
+ goto dma_probe_defer;
+ else if (ret)
+ host->use_dma = 0;
+ }
/* REVISIT: someday, support IRQ-driven card detection. */
mmc->caps |= MMC_CAP_NEEDS_POLL;
@@ -1346,15 +1323,17 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
ret = mmc_add_host(mmc);
if (ret < 0)
- goto out;
+ goto mmc_add_host_fail;
- ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host);
+ ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0,
+ mmc_hostname(mmc), host);
if (ret)
- goto out;
+ goto request_irq_fail;
if (host->sdio_irq >= 0) {
- ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
- mmc_hostname(mmc), host);
+ ret = devm_request_irq(&pdev->dev, host->sdio_irq,
+ mmc_davinci_sdio_irq, 0,
+ mmc_hostname(mmc), host);
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
}
@@ -1367,28 +1346,18 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
return 0;
-out:
+request_irq_fail:
+ mmc_remove_host(mmc);
+mmc_add_host_fail:
mmc_davinci_cpufreq_deregister(host);
cpu_freq_fail:
- if (host) {
- davinci_release_dma_channels(host);
-
- if (host->clk) {
- clk_disable(host->clk);
- clk_put(host->clk);
- }
-
- if (host->base)
- iounmap(host->base);
- }
-
- if (mmc)
- mmc_free_host(mmc);
-
- if (mem)
- release_resource(mem);
-
- dev_dbg(&pdev->dev, "probe err %d\n", ret);
+ davinci_release_dma_channels(host);
+dma_probe_defer:
+ clk_disable_unprepare(host->clk);
+clk_prepare_enable_fail:
+clk_get_fail:
+ioremap_fail:
+ mmc_free_host(mmc);
return ret;
}
@@ -1397,25 +1366,11 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
- if (host) {
- mmc_davinci_cpufreq_deregister(host);
-
- mmc_remove_host(host->mmc);
- free_irq(host->mmc_irq, host);
- if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
- free_irq(host->sdio_irq, host);
-
- davinci_release_dma_channels(host);
-
- clk_disable(host->clk);
- clk_put(host->clk);
-
- iounmap(host->base);
-
- release_resource(host->mem_res);
-
- mmc_free_host(host->mmc);
- }
+ mmc_remove_host(host->mmc);
+ mmc_davinci_cpufreq_deregister(host);
+ davinci_release_dma_channels(host);
+ clk_disable_unprepare(host->clk);
+ mmc_free_host(host->mmc);
return 0;
}
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 8790f2afc..7e3a3247b 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -91,10 +91,14 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
}
-static int dw_mci_exynos_priv_init(struct dw_mci *host)
+static void dw_mci_exynos_config_smu(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
+ /*
+ * If Exynos is provided the Security management,
+ * set for non-ecryption mode at this time.
+ */
if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
mci_writel(host, MPSBEGIN0, 0);
@@ -104,6 +108,13 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
SDMMC_MPSCTRL_VALID |
SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT);
}
+}
+
+static int dw_mci_exynos_priv_init(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ dw_mci_exynos_config_smu(host);
if (priv->ctrl_type >= DW_MCI_TYPE_EXYNOS5420) {
priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL);
@@ -115,13 +126,6 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host)
DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
}
- return 0;
-}
-
-static int dw_mci_exynos_setup_clock(struct dw_mci *host)
-{
- struct dw_mci_exynos_priv_data *priv = host->priv;
-
host->bus_hz /= (priv->ciu_div + 1);
return 0;
@@ -169,7 +173,7 @@ static int dw_mci_exynos_resume(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
- dw_mci_exynos_priv_init(host);
+ dw_mci_exynos_config_smu(host);
return dw_mci_resume(host);
}
@@ -489,7 +493,6 @@ static unsigned long exynos_dwmmc_caps[4] = {
static const struct dw_mci_drv_data exynos_drv_data = {
.caps = exynos_dwmmc_caps,
.init = dw_mci_exynos_priv_init,
- .setup_clock = dw_mci_exynos_setup_clock,
.set_ios = dw_mci_exynos_set_ios,
.parse_dt = dw_mci_exynos_parse_dt,
.execute_tuning = dw_mci_exynos_execute_tuning,
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 84e50f3a6..358b0dc85 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -26,13 +26,6 @@ struct dw_mci_rockchip_priv_data {
int default_sample_phase;
};
-static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
-{
- host->bus_hz /= RK3288_CLKGEN_DIV;
-
- return 0;
-}
-
static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
{
struct dw_mci_rockchip_priv_data *priv = host->priv;
@@ -73,6 +66,70 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
/* Make sure we use phases which we can enumerate with */
if (!IS_ERR(priv->sample_clk))
clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+
+ /*
+ * Set the drive phase offset based on speed mode to achieve hold times.
+ *
+ * NOTE: this is _not_ a value that is dynamically tuned and is also
+ * _not_ a value that will vary from board to board. It is a value
+ * that could vary between different SoC models if they had massively
+ * different output clock delays inside their dw_mmc IP block (delay_o),
+ * but since it's OK to overshoot a little we don't need to do complex
+ * calculations and can pick values that will just work for everyone.
+ *
+ * When picking values we'll stick with picking 0/90/180/270 since
+ * those can be made very accurately on all known Rockchip SoCs.
+ *
+ * Note that these values match values from the DesignWare Databook
+ * tables for the most part except for SDR12 and "ID mode". For those
+ * two modes the databook calculations assume a clock in of 50MHz. As
+ * seen above, we always use a clock in rate that is exactly the
+ * card's input clock (times RK3288_CLKGEN_DIV, but that gets divided
+ * back out before the controller sees it).
+ *
+ * From measurement of a single device, it appears that delay_o is
+ * about .5 ns. Since we try to leave a bit of margin, it's expected
+ * that numbers here will be fine even with much larger delay_o
+ * (the 1.4 ns assumed by the DesignWare Databook would result in the
+ * same results, for instance).
+ */
+ if (!IS_ERR(priv->drv_clk)) {
+ int phase;
+
+ /*
+ * In almost all cases a 90 degree phase offset will provide
+ * sufficient hold times across all valid input clock rates
+ * assuming delay_o is not absurd for a given SoC. We'll use
+ * that as a default.
+ */
+ phase = 90;
+
+ switch (ios->timing) {
+ case MMC_TIMING_MMC_DDR52:
+ /*
+ * Since clock in rate with MMC_DDR52 is doubled when
+ * bus width is 8 we need to double the phase offset
+ * to get the same timings.
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ phase = 180;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /*
+ * In the case of 150 MHz clock (typical max for
+ * Rockchip SoCs), 90 degree offset will add a delay
+ * of 1.67 ns. That will meet min hold time of .8 ns
+ * as long as clock output delay is < .87 ns. On
+ * SoCs measured this seems to be OK, but it doesn't
+ * hurt to give margin here, so we use 180.
+ */
+ phase = 180;
+ break;
+ }
+
+ clk_set_phase(priv->drv_clk, phase);
+ }
}
#define NUM_PHASES 360
@@ -231,18 +288,30 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* It needs this quirk on all Rockchip SoCs */
host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_DTO;
+ if (of_device_is_compatible(host->dev->of_node,
+ "rockchip,rk3288-dw-mshc"))
+ host->bus_hz /= RK3288_CLKGEN_DIV;
+
return 0;
}
+/* Common capabilities of RK3288 SoC */
+static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
+ MMC_CAP_ERASE | MMC_CAP_CMD23,
+ MMC_CAP_ERASE | MMC_CAP_CMD23,
+ MMC_CAP_ERASE | MMC_CAP_CMD23,
+ MMC_CAP_ERASE | MMC_CAP_CMD23,
+};
+
static const struct dw_mci_drv_data rk2928_drv_data = {
.init = dw_mci_rockchip_init,
};
static const struct dw_mci_drv_data rk3288_drv_data = {
+ .caps = dw_mci_rk3288_dwmmc_caps,
.set_ios = dw_mci_rk3288_set_ios,
.execute_tuning = dw_mci_rk3288_execute_tuning,
.parse_dt = dw_mci_rk3288_parse_dt,
- .setup_clock = dw_mci_rk3288_setup_clock,
.init = dw_mci_rockchip_init,
};
@@ -269,33 +338,13 @@ static int dw_mci_rockchip_probe(struct platform_device *pdev)
return dw_mci_pltfm_register(pdev, drv_data);
}
-#ifdef CONFIG_PM_SLEEP
-static int dw_mci_rockchip_suspend(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_suspend(host);
-}
-
-static int dw_mci_rockchip_resume(struct device *dev)
-{
- struct dw_mci *host = dev_get_drvdata(dev);
-
- return dw_mci_resume(host);
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(dw_mci_rockchip_pmops,
- dw_mci_rockchip_suspend,
- dw_mci_rockchip_resume);
-
static struct platform_driver dw_mci_rockchip_pltfm_driver = {
.probe = dw_mci_rockchip_probe,
.remove = dw_mci_pltfm_remove,
.driver = {
.name = "dwmmc_rockchip",
.of_match_table = dw_mci_rockchip_match,
- .pm = &dw_mci_rockchip_pmops,
+ .pm = &dw_mci_pltfm_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 242f9a076..2cc6123b1 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -680,7 +680,7 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
static void dw_mci_edmac_stop_dma(struct dw_mci *host)
{
- dmaengine_terminate_all(host->dms->ch);
+ dmaengine_terminate_async(host->dms->ch);
}
static int dw_mci_edmac_start_dma(struct dw_mci *host,
@@ -1431,7 +1431,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
int gpio_ro = mmc_gpio_get_ro(mmc);
/* Use platform get_ro function, else try on board write protect */
- if (!IS_ERR_VALUE(gpio_ro))
+ if (gpio_ro >= 0)
read_only = gpio_ro;
else
read_only =
@@ -1454,7 +1454,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
(mmc->caps & MMC_CAP_NONREMOVABLE))
present = 1;
- else if (!IS_ERR_VALUE(gpio_cd))
+ else if (gpio_cd >= 0)
present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
@@ -2595,13 +2595,13 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
/* Useful defaults if platform data is unset. */
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
- mmc->max_blk_size = 65536;
+ mmc->max_blk_size = 65535;
mmc->max_seg_size = 0x1000;
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64;
- mmc->max_blk_size = 65536;
+ mmc->max_blk_size = 65535;
mmc->max_blk_count = 65535;
mmc->max_req_size =
mmc->max_blk_size * mmc->max_blk_count;
@@ -2609,7 +2609,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
} else {
/* TRANS_MODE_PIO */
mmc->max_segs = 64;
- mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+ mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
mmc->max_req_size = mmc->max_blk_size *
mmc->max_blk_count;
@@ -2927,7 +2927,7 @@ static void dw_mci_enable_cd(struct dw_mci *host)
if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
return;
- if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
+ if (mmc_gpio_get_cd(slot->mmc) < 0)
break;
}
if (i == host->num_slots)
@@ -3003,15 +3003,6 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- if (drv_data && drv_data->setup_clock) {
- ret = drv_data->setup_clock(host);
- if (ret) {
- dev_err(host->dev,
- "implementation specific clock setup failed\n");
- goto err_clk_ciu;
- }
- }
-
setup_timer(&host->cmd11_timer,
dw_mci_cmd11_timer, (unsigned long)host);
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 68d5da2df..1e8d8380f 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -277,7 +277,6 @@ struct dw_mci_slot {
* dw_mci driver data - dw-mshc implementation specific driver data.
* @caps: mmc subsystem specified capabilities of the controller(s).
* @init: early implementation specific initialization.
- * @setup_clock: implementation specific clock configuration.
* @set_ios: handle bus specific extensions.
* @parse_dt: parse implementation specific device tree properties.
* @execute_tuning: implementation specific tuning procedure.
@@ -289,7 +288,6 @@ struct dw_mci_slot {
struct dw_mci_drv_data {
unsigned long *caps;
int (*init)(struct dw_mci *host);
- int (*setup_clock)(struct dw_mci *host);
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
int (*parse_dt)(struct dw_mci *host);
int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2e6c96845..df990bb8c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -226,16 +226,11 @@ static int mmci_card_busy(struct mmc_host *mmc)
unsigned long flags;
int busy = 0;
- pm_runtime_get_sync(mmc_dev(mmc));
-
spin_lock_irqsave(&host->lock, flags);
if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
busy = 1;
spin_unlock_irqrestore(&host->lock, flags);
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
-
return busy;
}
@@ -381,9 +376,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->cmd = NULL;
mmc_request_done(host->mmc, mrq);
-
- pm_runtime_mark_last_busy(mmc_dev(host->mmc));
- pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
@@ -1290,8 +1282,6 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
return;
}
- pm_runtime_get_sync(mmc_dev(mmc));
-
spin_lock_irqsave(&host->lock, flags);
host->mrq = mrq;
@@ -1318,8 +1308,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
unsigned long flags;
int ret;
- pm_runtime_get_sync(mmc_dev(mmc));
-
if (host->plat->ios_handler &&
host->plat->ios_handler(mmc_dev(mmc), ios))
dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
@@ -1414,9 +1402,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
-
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
}
static int mmci_get_cd(struct mmc_host *mmc)
@@ -1440,8 +1425,6 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
if (!IS_ERR(mmc->supply.vqmmc)) {
- pm_runtime_get_sync(mmc_dev(mmc));
-
switch (ios->signal_voltage) {
case MMC_SIGNAL_VOLTAGE_330:
ret = regulator_set_voltage(mmc->supply.vqmmc,
@@ -1459,9 +1442,6 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
if (ret)
dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
-
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
}
return ret;
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index b17f30da9..5642f71f8 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -736,9 +736,6 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
if (mrq->data)
msdc_unprepare_data(host, mrq);
mmc_request_done(host->mmc, mrq);
-
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
}
/* returns true if command is fully handled; returns false otherwise */
@@ -886,8 +883,6 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq);
host->mrq = mrq;
- pm_runtime_get_sync(host->dev);
-
if (mrq->data)
msdc_prepare_data(host, mrq);
@@ -1201,8 +1196,6 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct msdc_host *host = mmc_priv(mmc);
int ret;
- pm_runtime_get_sync(host->dev);
-
msdc_set_buswidth(host, ios->bus_width);
/* Suspend/Resume will do power off/on */
@@ -1214,7 +1207,7 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
ios->vdd);
if (ret) {
dev_err(host->dev, "Failed to set vmmc power!\n");
- goto end;
+ return;
}
}
break;
@@ -1242,10 +1235,6 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (host->mclk != ios->clock || host->timing != ios->timing)
msdc_set_mclk(host, ios->timing, ios->clock);
-
-end:
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
}
static u32 test_delay_bit(u32 delay, u32 bit)
@@ -1408,19 +1397,15 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct msdc_host *host = mmc_priv(mmc);
int ret;
- pm_runtime_get_sync(host->dev);
ret = msdc_tune_response(mmc, opcode);
if (ret == -EIO) {
dev_err(host->dev, "Tune response fail!\n");
- goto out;
+ return ret;
}
ret = msdc_tune_data(mmc, opcode);
if (ret == -EIO)
dev_err(host->dev, "Tune data fail!\n");
-out:
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
return ret;
}
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index b9958a123..f23d65eb0 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/of.h>
-#include <linux/omap-dma.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
@@ -1321,8 +1320,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
struct mmc_omap_host *host = NULL;
struct resource *res;
- dma_cap_mask_t mask;
- unsigned sig = 0;
int i, ret = 0;
int irq;
@@ -1382,29 +1379,34 @@ static int mmc_omap_probe(struct platform_device *pdev)
goto err_free_iclk;
}
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
host->dma_tx_burst = -1;
host->dma_rx_burst = -1;
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
- if (res)
- sig = res->start;
- host->dma_tx = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn, &sig, &pdev->dev, "tx");
- if (!host->dma_tx)
- dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
- sig);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
- if (res)
- sig = res->start;
- host->dma_rx = dma_request_slave_channel_compat(mask,
- omap_dma_filter_fn, &sig, &pdev->dev, "rx");
- if (!host->dma_rx)
- dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
- sig);
+ host->dma_tx = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(host->dma_tx)) {
+ ret = PTR_ERR(host->dma_tx);
+ if (ret == -EPROBE_DEFER) {
+ clk_put(host->fclk);
+ goto err_free_iclk;
+ }
+
+ host->dma_tx = NULL;
+ dev_warn(host->dev, "TX DMA channel request failed\n");
+ }
+
+ host->dma_rx = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(host->dma_rx)) {
+ ret = PTR_ERR(host->dma_rx);
+ if (ret == -EPROBE_DEFER) {
+ if (host->dma_tx)
+ dma_release_channel(host->dma_tx);
+ clk_put(host->fclk);
+ goto err_free_iclk;
+ }
+
+ host->dma_rx = NULL;
+ dev_warn(host->dev, "RX DMA channel request failed\n");
+ }
ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
if (ret)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index f9ac3bb5d..24ebc9a8d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -32,7 +32,6 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
-#include <linux/omap-dmaengine.h>
#include <linux/mmc/host.h>
#include <linux/mmc/core.h>
#include <linux/mmc/mmc.h>
@@ -351,15 +350,14 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on,
return 0;
}
-static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
+static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on,
+ int vdd)
{
- struct omap_hsmmc_host *host =
- platform_get_drvdata(to_platform_device(dev));
struct mmc_host *mmc = host->mmc;
int ret = 0;
if (mmc_pdata(host)->set_power)
- return mmc_pdata(host)->set_power(dev, power_on, vdd);
+ return mmc_pdata(host)->set_power(host->dev, power_on, vdd);
/*
* If we don't see a Vcc regulator, assume it's a fixed
@@ -369,7 +367,7 @@ static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
return 0;
if (mmc_pdata(host)->before_set_reg)
- mmc_pdata(host)->before_set_reg(dev, power_on, vdd);
+ mmc_pdata(host)->before_set_reg(host->dev, power_on, vdd);
ret = omap_hsmmc_set_pbias(host, false, 0);
if (ret)
@@ -403,7 +401,7 @@ static int omap_hsmmc_set_power(struct device *dev, int power_on, int vdd)
}
if (mmc_pdata(host)->after_set_reg)
- mmc_pdata(host)->after_set_reg(dev, power_on, vdd);
+ mmc_pdata(host)->after_set_reg(host->dev, power_on, vdd);
return 0;
@@ -968,8 +966,6 @@ static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_req
return;
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
}
/*
@@ -1250,17 +1246,15 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
int ret;
/* Disable the clocks */
- pm_runtime_put_sync(host->dev);
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
/* Turn the power off */
- ret = omap_hsmmc_set_power(host->dev, 0, 0);
+ ret = omap_hsmmc_set_power(host, 0, 0);
/* Turn the power ON with given VDD 1.8 or 3.0v */
if (!ret)
- ret = omap_hsmmc_set_power(host->dev, 1, vdd);
- pm_runtime_get_sync(host->dev);
+ ret = omap_hsmmc_set_power(host, 1, vdd);
if (host->dbclk)
clk_prepare_enable(host->dbclk);
@@ -1368,8 +1362,6 @@ static void omap_hsmmc_dma_callback(void *param)
host->mrq = NULL;
mmc_request_done(host->mmc, mrq);
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
}
}
@@ -1602,7 +1594,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
BUG_ON(host->req_in_progress);
BUG_ON(host->dma_ch != -1);
- pm_runtime_get_sync(host->dev);
if (host->protect_card) {
if (host->reqs_blocked < 3) {
/*
@@ -1619,8 +1610,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
req->data->error = -EBADF;
req->cmd->retries = 0;
mmc_request_done(mmc, req);
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
return;
} else if (host->reqs_blocked)
host->reqs_blocked = 0;
@@ -1634,8 +1623,6 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
req->data->error = err;
host->mrq = NULL;
mmc_request_done(mmc, req);
- pm_runtime_mark_last_busy(host->dev);
- pm_runtime_put_autosuspend(host->dev);
return;
}
if (req->sbc && !(host->flags & AUTO_CMD23)) {
@@ -1653,15 +1640,13 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct omap_hsmmc_host *host = mmc_priv(mmc);
int do_send_init_stream = 0;
- pm_runtime_get_sync(host->dev);
-
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
- omap_hsmmc_set_power(host->dev, 0, 0);
+ omap_hsmmc_set_power(host, 0, 0);
break;
case MMC_POWER_UP:
- omap_hsmmc_set_power(host->dev, 1, ios->vdd);
+ omap_hsmmc_set_power(host, 1, ios->vdd);
break;
case MMC_POWER_ON:
do_send_init_stream = 1;
@@ -1698,8 +1683,6 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
send_init_stream(host);
omap_hsmmc_set_bus_mode(host);
-
- pm_runtime_put_autosuspend(host->dev);
}
static int omap_hsmmc_get_cd(struct mmc_host *mmc)
@@ -1962,13 +1945,17 @@ MODULE_DEVICE_TABLE(of, omap_mmc_of_match);
static struct omap_hsmmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
{
- struct omap_hsmmc_platform_data *pdata;
+ struct omap_hsmmc_platform_data *pdata, *legacy;
struct device_node *np = dev->of_node;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return ERR_PTR(-ENOMEM); /* out of memory */
+ legacy = dev_get_platdata(dev);
+ if (legacy && legacy->name)
+ pdata->name = legacy->name;
+
if (of_find_property(np, "ti,dual-volt", NULL))
pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT;
@@ -2005,8 +1992,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
const struct of_device_id *match;
- dma_cap_mask_t mask;
- unsigned tx_req, rx_req;
const struct omap_mmc_of_data *data;
void __iomem *base;
@@ -2136,44 +2121,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_conf_bus_power(host);
- if (!pdev->dev.of_node) {
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
- if (!res) {
- dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
- ret = -ENXIO;
- goto err_irq;
- }
- tx_req = res->start;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
- if (!res) {
- dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
- ret = -ENXIO;
- goto err_irq;
- }
- rx_req = res->start;
- }
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- host->rx_chan =
- dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
- &rx_req, &pdev->dev, "rx");
-
- if (!host->rx_chan) {
- dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel\n");
- ret = -ENXIO;
+ host->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(host->rx_chan)) {
+ dev_err(mmc_dev(host->mmc), "RX DMA channel request failed\n");
+ ret = PTR_ERR(host->rx_chan);
goto err_irq;
}
- host->tx_chan =
- dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
- &tx_req, &pdev->dev, "tx");
-
- if (!host->tx_chan) {
- dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel\n");
- ret = -ENXIO;
+ host->tx_chan = dma_request_chan(&pdev->dev, "tx");
+ if (IS_ERR(host->tx_chan)) {
+ dev_err(mmc_dev(host->mmc), "TX DMA channel request failed\n");
+ ret = PTR_ERR(host->tx_chan);
goto err_irq;
}
@@ -2231,9 +2189,9 @@ err_slot_name:
mmc_remove_host(mmc);
err_irq:
device_init_wakeup(&pdev->dev, false);
- if (host->tx_chan)
+ if (!IS_ERR_OR_NULL(host->tx_chan))
dma_release_channel(host->tx_chan);
- if (host->rx_chan)
+ if (!IS_ERR_OR_NULL(host->rx_chan))
dma_release_channel(host->rx_chan);
pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 86fac3e86..c763b4045 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -789,14 +789,16 @@ static int pxamci_probe(struct platform_device *pdev)
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
- if (gpio_is_valid(gpio_ro))
+ if (gpio_is_valid(gpio_ro)) {
ret = mmc_gpio_request_ro(mmc, gpio_ro);
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
- goto out;
- } else {
- mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
- 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
+ gpio_ro);
+ goto out;
+ } else {
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+ 0 : MMC_CAP2_RO_ACTIVE_HIGH;
+ }
}
if (gpio_is_valid(gpio_cd))
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index c0e206d72..458ffb763 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -200,8 +200,6 @@ static int bxt_get_cd(struct mmc_host *mmc)
if (!gpio_cd)
return 0;
- pm_runtime_get_sync(mmc->parent);
-
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
@@ -211,9 +209,6 @@ static int bxt_get_cd(struct mmc_host *mmc)
out:
spin_unlock_irqrestore(&host->lock, flags);
- pm_runtime_mark_last_busy(mmc->parent);
- pm_runtime_put_autosuspend(mmc->parent);
-
return ret;
}
@@ -267,8 +262,10 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
/* Platform specific code during sd probe slot goes here */
- if (hid && !strcmp(hid, "80865ACA"))
+ if (hid && !strcmp(hid, "80865ACA")) {
host->mmc_host_ops.get_cd = bxt_get_cd;
+ host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+ }
return 0;
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 2d300d87c..9d3ae1f4b 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1011,7 +1011,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (ret)
return ret;
- if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ if (mmc_gpio_get_cd(host->mmc) >= 0)
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
return 0;
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 2e482b13d..b6f4c1d41 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -55,8 +55,32 @@ static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host)
return freq;
}
+static void sdhci_arasan_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host);
+ bool ctrl_phy = false;
+
+ if (clock > MMC_HIGH_52_MAX_DTR && (!IS_ERR(sdhci_arasan->phy)))
+ ctrl_phy = true;
+
+ if (ctrl_phy) {
+ spin_unlock_irq(&host->lock);
+ phy_power_off(sdhci_arasan->phy);
+ spin_lock_irq(&host->lock);
+ }
+
+ sdhci_set_clock(host, clock);
+
+ if (ctrl_phy) {
+ spin_unlock_irq(&host->lock);
+ phy_power_on(sdhci_arasan->phy);
+ spin_lock_irq(&host->lock);
+ }
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
.get_timeout_clock = sdhci_arasan_get_timeout_clock,
.set_bus_width = sdhci_set_bus_width,
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 2703aa90d..d4cef713d 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -15,8 +15,10 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/kernel.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
@@ -31,14 +33,60 @@
#define SDMMC_CACR_CAPWREN BIT(0)
#define SDMMC_CACR_KEY (0x46 << 8)
+#define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */
+
struct sdhci_at91_priv {
struct clk *hclock;
struct clk *gck;
struct clk *mainck;
};
+static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+ unsigned long timeout;
+
+ host->mmc->actual_clock = 0;
+
+ /*
+ * There is no requirement to disable the internal clock before
+ * changing the SD clock configuration. Moreover, disabling the
+ * internal clock, changing the configuration and re-enabling the
+ * internal clock causes some bugs. It can prevent to get the internal
+ * clock stable flag ready and an unexpected switch to the base clock
+ * when using presets.
+ */
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ clk &= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+
+ clk |= SDHCI_CLOCK_INT_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+
+ /* Wait max 20 ms */
+ timeout = 20;
+ while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
+ & SDHCI_CLOCK_INT_STABLE)) {
+ if (timeout == 0) {
+ pr_err("%s: Internal clock never stabilised.\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ clk |= SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
+}
+
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
- .set_clock = sdhci_set_clock,
+ .set_clock = sdhci_at91_set_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
@@ -46,7 +94,6 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
static const struct sdhci_pltfm_data soc_data_sama5d2 = {
.ops = &sdhci_at91_sama5d2_ops,
- .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST,
};
static const struct of_device_id sdhci_at91_dt_match[] = {
@@ -119,6 +166,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
unsigned int clk_base, clk_mul;
unsigned int gck_rate, real_gck_rate;
int ret;
+ unsigned int preset_div;
match = of_match_device(sdhci_at91_dt_match, &pdev->dev);
if (!match)
@@ -186,6 +234,28 @@ static int sdhci_at91_probe(struct platform_device *pdev)
clk_mul, real_gck_rate);
}
+ /*
+ * We have to set preset values because it depends on the clk_mul
+ * value. Moreover, SDR104 is supported in a degraded mode since the
+ * maximum sd clock value is 120 MHz instead of 208 MHz. For that
+ * reason, we need to use presets to support SDR104.
+ */
+ preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_SDR12);
+ preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_SDR25);
+ preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_SDR50);
+ preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_SDR104);
+ preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1;
+ writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div,
+ host->ioaddr + SDHCI_PRESET_FOR_DDR50);
+
clk_prepare_enable(priv->mainck);
clk_prepare_enable(priv->gck);
@@ -219,7 +289,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
* to enable polling via device tree with broken-cd property.
*/
if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
- IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) {
+ mmc_gpio_get_cd(host->mmc) < 0) {
host->mmc->caps |= MMC_CAP_NEEDS_POLL;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index b42dd6a1b..a4dbf7421 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -340,8 +340,6 @@ static int bxt_get_cd(struct mmc_host *mmc)
if (!gpio_cd)
return 0;
- pm_runtime_get_sync(mmc->parent);
-
spin_lock_irqsave(&host->lock, flags);
if (host->flags & SDHCI_DEVICE_DEAD)
@@ -351,9 +349,6 @@ static int bxt_get_cd(struct mmc_host *mmc)
out:
spin_unlock_irqrestore(&host->lock, flags);
- pm_runtime_mark_last_busy(mmc->parent);
- pm_runtime_put_autosuspend(mmc->parent);
-
return ret;
}
@@ -388,8 +383,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->cd_override_level = true;
if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD ||
- slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) {
slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+ slot->host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+ }
return 0;
}
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
index 059df707a..72c13b6f0 100644
--- a/drivers/mmc/host/sdhci-pic32.c
+++ b/drivers/mmc/host/sdhci-pic32.c
@@ -243,7 +243,6 @@ MODULE_DEVICE_TABLE(of, pic32_sdhci_id_table);
static struct platform_driver pic32_sdhci_driver = {
.driver = {
.name = "pic32-sdhci",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(pic32_sdhci_id_table),
},
.probe = pic32_sdhci_probe,
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 072bb27a6..64f287a03 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -119,16 +119,22 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
{
struct sdhci_host *host;
struct resource *iomem;
- int ret;
+ void __iomem *ioaddr;
+ int irq, ret;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- ret = -ENOMEM;
+ ioaddr = devm_ioremap_resource(&pdev->dev, iomem);
+ if (IS_ERR(ioaddr)) {
+ ret = PTR_ERR(ioaddr);
goto err;
}
- if (resource_size(iomem) < 0x100)
- dev_err(&pdev->dev, "Invalid iomem size!\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ number\n");
+ ret = irq;
+ goto err;
+ }
host = sdhci_alloc_host(&pdev->dev,
sizeof(struct sdhci_pltfm_host) + priv_size);
@@ -138,6 +144,8 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
goto err;
}
+ host->ioaddr = ioaddr;
+ host->irq = irq;
host->hw_name = dev_name(&pdev->dev);
if (pdata && pdata->ops)
host->ops = pdata->ops;
@@ -148,22 +156,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
host->quirks2 = pdata->quirks2;
}
- host->irq = platform_get_irq(pdev, 0);
-
- if (!request_mem_region(iomem->start, resource_size(iomem),
- mmc_hostname(host->mmc))) {
- dev_err(&pdev->dev, "cannot request region\n");
- ret = -EBUSY;
- goto err_request;
- }
-
- host->ioaddr = ioremap(iomem->start, resource_size(iomem));
- if (!host->ioaddr) {
- dev_err(&pdev->dev, "failed to remap registers\n");
- ret = -ENOMEM;
- goto err_remap;
- }
-
/*
* Some platforms need to probe the controller to be able to
* determine which caps should be used.
@@ -174,11 +166,6 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
platform_set_drvdata(pdev, host);
return host;
-
-err_remap:
- release_mem_region(iomem->start, resource_size(iomem));
-err_request:
- sdhci_free_host(host);
err:
dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
return ERR_PTR(ret);
@@ -188,10 +175,7 @@ EXPORT_SYMBOL_GPL(sdhci_pltfm_init);
void sdhci_pltfm_free(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
- struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap(host->ioaddr);
- release_mem_region(iomem->start, resource_size(iomem));
sdhci_free_host(host);
}
EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6bd3d1794..0e3d7c056 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -38,11 +38,6 @@
#define DBG(f, x...) \
pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
-#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
- defined(CONFIG_MMC_SDHCI_MODULE))
-#define SDHCI_USE_LEDS_CLASS
-#endif
-
#define MAX_TUNING_LOOP 40
static unsigned int debug_quirks = 0;
@@ -53,29 +48,7 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
-static int sdhci_do_get_cd(struct sdhci_host *host);
-
-#ifdef CONFIG_PM
-static int sdhci_runtime_pm_get(struct sdhci_host *host);
-static int sdhci_runtime_pm_put(struct sdhci_host *host);
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host);
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host);
-#else
-static inline int sdhci_runtime_pm_get(struct sdhci_host *host)
-{
- return 0;
-}
-static inline int sdhci_runtime_pm_put(struct sdhci_host *host)
-{
- return 0;
-}
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
-{
-}
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
-{
-}
-#endif
+static int sdhci_get_cd(struct mmc_host *mmc);
static void sdhci_dumpregs(struct sdhci_host *host)
{
@@ -171,6 +144,22 @@ static void sdhci_disable_card_detection(struct sdhci_host *host)
sdhci_set_card_detection(host, false);
}
+static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+{
+ if (host->bus_on)
+ return;
+ host->bus_on = true;
+ pm_runtime_get_noresume(host->mmc->parent);
+}
+
+static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+{
+ if (!host->bus_on)
+ return;
+ host->bus_on = false;
+ pm_runtime_put_noidle(host->mmc->parent);
+}
+
void sdhci_reset(struct sdhci_host *host, u8 mask)
{
unsigned long timeout;
@@ -204,7 +193,7 @@ EXPORT_SYMBOL_GPL(sdhci_reset);
static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
- if (!sdhci_do_get_cd(host))
+ if (!sdhci_get_cd(host->mmc))
return;
}
@@ -252,7 +241,7 @@ static void sdhci_reinit(struct sdhci_host *host)
sdhci_enable_card_detection(host);
}
-static void sdhci_activate_led(struct sdhci_host *host)
+static void __sdhci_led_activate(struct sdhci_host *host)
{
u8 ctrl;
@@ -261,7 +250,7 @@ static void sdhci_activate_led(struct sdhci_host *host)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
-static void sdhci_deactivate_led(struct sdhci_host *host)
+static void __sdhci_led_deactivate(struct sdhci_host *host)
{
u8 ctrl;
@@ -270,9 +259,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}
-#ifdef SDHCI_USE_LEDS_CLASS
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
static void sdhci_led_control(struct led_classdev *led,
- enum led_brightness brightness)
+ enum led_brightness brightness)
{
struct sdhci_host *host = container_of(led, struct sdhci_host, led);
unsigned long flags;
@@ -283,12 +272,62 @@ static void sdhci_led_control(struct led_classdev *led,
goto out;
if (brightness == LED_OFF)
- sdhci_deactivate_led(host);
+ __sdhci_led_deactivate(host);
else
- sdhci_activate_led(host);
+ __sdhci_led_activate(host);
out:
spin_unlock_irqrestore(&host->lock, flags);
}
+
+static int sdhci_led_register(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ snprintf(host->led_name, sizeof(host->led_name),
+ "%s::", mmc_hostname(mmc));
+
+ host->led.name = host->led_name;
+ host->led.brightness = LED_OFF;
+ host->led.default_trigger = mmc_hostname(mmc);
+ host->led.brightness_set = sdhci_led_control;
+
+ return led_classdev_register(mmc_dev(mmc), &host->led);
+}
+
+static void sdhci_led_unregister(struct sdhci_host *host)
+{
+ led_classdev_unregister(&host->led);
+}
+
+static inline void sdhci_led_activate(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_led_deactivate(struct sdhci_host *host)
+{
+}
+
+#else
+
+static inline int sdhci_led_register(struct sdhci_host *host)
+{
+ return 0;
+}
+
+static inline void sdhci_led_unregister(struct sdhci_host *host)
+{
+}
+
+static inline void sdhci_led_activate(struct sdhci_host *host)
+{
+ __sdhci_led_activate(host);
+}
+
+static inline void sdhci_led_deactivate(struct sdhci_host *host)
+{
+ __sdhci_led_deactivate(host);
+}
+
#endif
/*****************************************************************************\
@@ -1091,23 +1130,14 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
return preset;
}
-void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
+ unsigned int *actual_clock)
{
int div = 0; /* Initialized for compiler warning */
int real_div = div, clk_mul = 1;
u16 clk = 0;
- unsigned long timeout;
bool switch_base_clk = false;
- host->mmc->actual_clock = 0;
-
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
- if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
- mdelay(1);
-
- if (clock == 0)
- return;
-
if (host->version >= SDHCI_SPEC_300) {
if (host->preset_enabled) {
u16 pre_val;
@@ -1184,10 +1214,29 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
clock_set:
if (real_div)
- host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+ *actual_clock = (host->max_clk * clk_mul) / real_div;
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(sdhci_calc_clk);
+
+void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ u16 clk;
+ unsigned long timeout;
+
+ host->mmc->actual_clock = 0;
+
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+
+ if (clock == 0)
+ return;
+
+ clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
+
clk |= SDHCI_CLOCK_INT_EN;
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
@@ -1319,8 +1368,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host = mmc_priv(mmc);
- sdhci_runtime_pm_get(host);
-
/* Firstly check card presence */
present = mmc->ops->get_cd(mmc);
@@ -1328,9 +1375,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq != NULL);
-#ifndef SDHCI_USE_LEDS_CLASS
- sdhci_activate_led(host);
-#endif
+ sdhci_led_activate(host);
/*
* Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
@@ -1405,11 +1450,11 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
-static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
+ struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
u8 ctrl;
- struct mmc_host *mmc = host->mmc;
spin_lock_irqsave(&host->lock, flags);
@@ -1563,18 +1608,10 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
}
-static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+static int sdhci_get_cd(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
-
- sdhci_runtime_pm_get(host);
- sdhci_do_set_ios(host, ios);
- sdhci_runtime_pm_put(host);
-}
-
-static int sdhci_do_get_cd(struct sdhci_host *host)
-{
- int gpio_cd = mmc_gpio_get_cd(host->mmc);
+ int gpio_cd = mmc_gpio_get_cd(mmc);
if (host->flags & SDHCI_DEVICE_DEAD)
return 0;
@@ -1587,7 +1624,7 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
* Try slot gpio detect, if defined it take precedence
* over build in controller functionality
*/
- if (!IS_ERR_VALUE(gpio_cd))
+ if (gpio_cd >= 0)
return !!gpio_cd;
/* If polling, assume that the card is always present. */
@@ -1598,17 +1635,6 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
-static int sdhci_get_cd(struct mmc_host *mmc)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- int ret;
-
- sdhci_runtime_pm_get(host);
- ret = sdhci_do_get_cd(host);
- sdhci_runtime_pm_put(host);
- return ret;
-}
-
static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
@@ -1633,8 +1659,9 @@ static int sdhci_check_ro(struct sdhci_host *host)
#define SAMPLE_COUNT 5
-static int sdhci_do_get_ro(struct sdhci_host *host)
+static int sdhci_get_ro(struct mmc_host *mmc)
{
+ struct sdhci_host *host = mmc_priv(mmc);
int i, ro_count;
if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
@@ -1659,17 +1686,6 @@ static void sdhci_hw_reset(struct mmc_host *mmc)
host->ops->hw_reset(host);
}
-static int sdhci_get_ro(struct mmc_host *mmc)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- int ret;
-
- sdhci_runtime_pm_get(host);
- ret = sdhci_do_get_ro(host);
- sdhci_runtime_pm_put(host);
- return ret;
-}
-
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
if (!(host->flags & SDHCI_DEVICE_DEAD)) {
@@ -1689,8 +1705,6 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
- sdhci_runtime_pm_get(host);
-
spin_lock_irqsave(&host->lock, flags);
if (enable)
host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -1699,14 +1713,12 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
sdhci_enable_sdio_irq_nolock(host, enable);
spin_unlock_irqrestore(&host->lock, flags);
-
- sdhci_runtime_pm_put(host);
}
-static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
- struct mmc_ios *ios)
+static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
{
- struct mmc_host *mmc = host->mmc;
+ struct sdhci_host *host = mmc_priv(mmc);
u16 ctrl;
int ret;
@@ -1794,29 +1806,13 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
}
}
-static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
- struct mmc_ios *ios)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- int err;
-
- if (host->version < SDHCI_SPEC_300)
- return 0;
- sdhci_runtime_pm_get(host);
- err = sdhci_do_start_signal_voltage_switch(host, ios);
- sdhci_runtime_pm_put(host);
- return err;
-}
-
static int sdhci_card_busy(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
u32 present_state;
- sdhci_runtime_pm_get(host);
/* Check whether DAT[3:0] is 0000 */
present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
- sdhci_runtime_pm_put(host);
return !(present_state & SDHCI_DATA_LVL_MASK);
}
@@ -1843,7 +1839,6 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
unsigned int tuning_count = 0;
bool hs400_tuning;
- sdhci_runtime_pm_get(host);
spin_lock_irqsave(&host->lock, flags);
hs400_tuning = host->flags & SDHCI_HS400_TUNING;
@@ -1879,8 +1874,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
break;
case MMC_TIMING_UHS_SDR50:
- if (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
- host->flags & SDHCI_SDR104_NEEDS_TUNING)
+ if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
break;
/* FALLTHROUGH */
@@ -1891,7 +1885,6 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->ops->platform_execute_tuning) {
spin_unlock_irqrestore(&host->lock, flags);
err = host->ops->platform_execute_tuning(host, opcode);
- sdhci_runtime_pm_put(host);
return err;
}
@@ -2023,8 +2016,6 @@ out:
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
out_unlock:
spin_unlock_irqrestore(&host->lock, flags);
- sdhci_runtime_pm_put(host);
-
return err;
}
@@ -2105,7 +2096,7 @@ static void sdhci_card_event(struct mmc_host *mmc)
if (host->ops->card_event)
host->ops->card_event(host);
- present = sdhci_do_get_cd(host);
+ present = sdhci_get_cd(host->mmc);
spin_lock_irqsave(&host->lock, flags);
@@ -2214,15 +2205,12 @@ static void sdhci_tasklet_finish(unsigned long param)
host->cmd = NULL;
host->data = NULL;
-#ifndef SDHCI_USE_LEDS_CLASS
- sdhci_deactivate_led(host);
-#endif
+ sdhci_led_deactivate(host);
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
- sdhci_runtime_pm_put(host);
}
static void sdhci_timeout_timer(unsigned long data)
@@ -2679,7 +2667,7 @@ int sdhci_resume_host(struct sdhci_host *host)
sdhci_init(host, 0);
host->pwr = 0;
host->clock = 0;
- sdhci_do_set_ios(host, &host->mmc->ios);
+ sdhci_set_ios(host->mmc, &host->mmc->ios);
} else {
sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
@@ -2703,33 +2691,6 @@ int sdhci_resume_host(struct sdhci_host *host)
EXPORT_SYMBOL_GPL(sdhci_resume_host);
-static int sdhci_runtime_pm_get(struct sdhci_host *host)
-{
- return pm_runtime_get_sync(host->mmc->parent);
-}
-
-static int sdhci_runtime_pm_put(struct sdhci_host *host)
-{
- pm_runtime_mark_last_busy(host->mmc->parent);
- return pm_runtime_put_autosuspend(host->mmc->parent);
-}
-
-static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
-{
- if (host->bus_on)
- return;
- host->bus_on = true;
- pm_runtime_get_noresume(host->mmc->parent);
-}
-
-static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
-{
- if (!host->bus_on)
- return;
- host->bus_on = false;
- pm_runtime_put_noidle(host->mmc->parent);
-}
-
int sdhci_runtime_suspend_host(struct sdhci_host *host)
{
unsigned long flags;
@@ -2768,8 +2729,8 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
/* Force clock and power re-program */
host->pwr = 0;
host->clock = 0;
- sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios);
- sdhci_do_set_ios(host, &host->mmc->ios);
+ sdhci_start_signal_voltage_switch(host->mmc, &host->mmc->ios);
+ sdhci_set_ios(host->mmc, &host->mmc->ios);
if ((host_flags & SDHCI_PV_ENABLED) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
@@ -3014,7 +2975,8 @@ int sdhci_add_host(struct sdhci_host *host)
if (!host->ops->get_max_clock) {
pr_err("%s: Hardware doesn't specify base clock frequency.\n",
mmc_hostname(mmc));
- return -ENODEV;
+ ret = -ENODEV;
+ goto undma;
}
host->max_clk = host->ops->get_max_clock(host);
}
@@ -3051,7 +3013,7 @@ int sdhci_add_host(struct sdhci_host *host)
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
- if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
+ if (!mmc->f_max || mmc->f_max > max_clk)
mmc->f_max = max_clk;
if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
@@ -3064,7 +3026,8 @@ int sdhci_add_host(struct sdhci_host *host)
} else {
pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
mmc_hostname(mmc));
- return -ENODEV;
+ ret = -ENODEV;
+ goto undma;
}
}
@@ -3114,12 +3077,13 @@ int sdhci_add_host(struct sdhci_host *host)
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
- IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ mmc_gpio_get_cd(host->mmc) < 0)
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If there are external regulators, get them */
- if (mmc_regulator_get_supply(mmc) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto undma;
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
if (!IS_ERR(mmc->supply.vqmmc)) {
@@ -3174,10 +3138,6 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[1] & SDHCI_USE_SDR50_TUNING)
host->flags |= SDHCI_SDR50_NEEDS_TUNING;
- /* Does the host need tuning for SDR104 / HS200? */
- if (mmc->caps2 & MMC_CAP2_HS200)
- host->flags |= SDHCI_SDR104_NEEDS_TUNING;
-
/* Driver Type(s) (A, C, D) supported by the host */
if (caps[1] & SDHCI_DRIVER_TYPE_A)
mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
@@ -3276,7 +3236,8 @@ int sdhci_add_host(struct sdhci_host *host)
if (mmc->ocr_avail == 0) {
pr_err("%s: Hardware doesn't report any support voltages.\n",
mmc_hostname(mmc));
- return -ENODEV;
+ ret = -ENODEV;
+ goto unreg;
}
spin_lock_init(&host->lock);
@@ -3360,25 +3321,18 @@ int sdhci_add_host(struct sdhci_host *host)
sdhci_dumpregs(host);
#endif
-#ifdef SDHCI_USE_LEDS_CLASS
- snprintf(host->led_name, sizeof(host->led_name),
- "%s::", mmc_hostname(mmc));
- host->led.name = host->led_name;
- host->led.brightness = LED_OFF;
- host->led.default_trigger = mmc_hostname(mmc);
- host->led.brightness_set = sdhci_led_control;
-
- ret = led_classdev_register(mmc_dev(mmc), &host->led);
+ ret = sdhci_led_register(host);
if (ret) {
pr_err("%s: Failed to register LED device: %d\n",
mmc_hostname(mmc), ret);
- goto reset;
+ goto unirq;
}
-#endif
mmiowb();
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (ret)
+ goto unled;
pr_info("%s: SDHCI controller on %s [%s] using %s\n",
mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
@@ -3390,15 +3344,25 @@ int sdhci_add_host(struct sdhci_host *host)
return 0;
-#ifdef SDHCI_USE_LEDS_CLASS
-reset:
+unled:
+ sdhci_led_unregister(host);
+unirq:
sdhci_do_reset(host, SDHCI_RESET_ALL);
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
free_irq(host->irq, host);
-#endif
untasklet:
tasklet_kill(&host->finish_tasklet);
+unreg:
+ if (!IS_ERR(mmc->supply.vqmmc))
+ regulator_disable(mmc->supply.vqmmc);
+undma:
+ if (host->align_buffer)
+ dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, host->align_buffer,
+ host->align_addr);
+ host->adma_table = NULL;
+ host->align_buffer = NULL;
return ret;
}
@@ -3430,9 +3394,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
mmc_remove_host(mmc);
-#ifdef SDHCI_USE_LEDS_CLASS
- led_classdev_unregister(&host->led);
-#endif
+ sdhci_led_unregister(host);
if (!dead)
sdhci_do_reset(host, SDHCI_RESET_ALL);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0f39f4f84..609f87ca5 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -417,11 +417,6 @@ struct sdhci_host {
#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14)
/* Broken Clock divider zero in controller */
#define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
-/*
- * When internal clock is disabled, a delay is needed before modifying the
- * SD clock frequency or enabling back the internal clock.
- */
-#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -433,7 +428,7 @@ struct sdhci_host {
struct mmc_host_ops mmc_host_ops; /* MMC host ops */
u64 dma_mask; /* custom DMA mask */
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
struct led_classdev led; /* LED control */
char led_name[32];
#endif
@@ -450,7 +445,6 @@ struct sdhci_host {
#define SDHCI_AUTO_CMD23 (1<<7) /* Auto CMD23 support */
#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
-#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
@@ -661,6 +655,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host)
return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED);
}
+u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
+ unsigned int *actual_clock);
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index d9a655f47..dd64b8663 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -248,7 +248,6 @@ struct sh_mmcif_host {
int sg_idx;
int sg_blkidx;
bool power;
- bool card_present;
bool ccs_enable; /* Command Completion Signal support */
bool clk_ctrl2_enable;
struct mutex thread_lock;
@@ -1064,16 +1063,6 @@ static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
host->mmc->f_max, host->mmc->f_min);
}
-static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
-{
- struct mmc_host *mmc = host->mmc;
-
- if (!IS_ERR(mmc->supply.vmmc))
- /* Errors ignored... */
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
- ios->power_mode ? ios->vdd : 0);
-}
-
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
@@ -1091,42 +1080,32 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->state = STATE_IOS;
spin_unlock_irqrestore(&host->lock, flags);
- if (ios->power_mode == MMC_POWER_UP) {
- if (!host->card_present) {
- /* See if we also get DMA */
+ switch (ios->power_mode) {
+ case MMC_POWER_UP:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ if (!host->power) {
+ clk_prepare_enable(host->clk);
+ pm_runtime_get_sync(dev);
+ sh_mmcif_sync_reset(host);
sh_mmcif_request_dma(host);
- host->card_present = true;
- }
- sh_mmcif_set_power(host, ios);
- } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
- /* clock stop */
- sh_mmcif_clock_control(host, 0);
- if (ios->power_mode == MMC_POWER_OFF) {
- if (host->card_present) {
- sh_mmcif_release_dma(host);
- host->card_present = false;
- }
+ host->power = true;
}
+ break;
+ case MMC_POWER_OFF:
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
if (host->power) {
- pm_runtime_put_sync(dev);
+ sh_mmcif_clock_control(host, 0);
+ sh_mmcif_release_dma(host);
+ pm_runtime_put(dev);
clk_disable_unprepare(host->clk);
host->power = false;
- if (ios->power_mode == MMC_POWER_OFF)
- sh_mmcif_set_power(host, ios);
- }
- host->state = STATE_IDLE;
- return;
- }
-
- if (ios->clock) {
- if (!host->power) {
- clk_prepare_enable(host->clk);
-
- pm_runtime_get_sync(dev);
- host->power = true;
- sh_mmcif_sync_reset(host);
}
+ break;
+ case MMC_POWER_ON:
sh_mmcif_clock_control(host, ios->clock);
+ break;
}
host->timing = ios->timing;
@@ -1519,23 +1498,23 @@ static int sh_mmcif_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- pm_runtime_enable(dev);
- host->power = false;
-
host->clk = devm_clk_get(dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
dev_err(dev, "cannot get clock: %d\n", ret);
- goto err_pm;
+ goto err_host;
}
ret = clk_prepare_enable(host->clk);
if (ret < 0)
- goto err_pm;
+ goto err_host;
sh_mmcif_clk_setup(host);
- ret = pm_runtime_resume(dev);
+ pm_runtime_enable(dev);
+ host->power = false;
+
+ ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_clk;
@@ -1579,12 +1558,13 @@ static int sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
clk_get_rate(host->clk) / 1000000UL);
+ pm_runtime_put(dev);
clk_disable_unprepare(host->clk);
return ret;
err_clk:
clk_disable_unprepare(host->clk);
-err_pm:
+ pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
err_host:
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 9aa147959..f750f9494 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -28,10 +28,12 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/sh_dma.h>
#include <linux/delay.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl-state.h>
+#include <linux/regulator/consumer.h>
#include "tmio_mmc.h"
@@ -48,10 +50,8 @@ struct sh_mobile_sdhi_of_data {
unsigned bus_shift;
};
-static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = {
- {
- .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
- },
+static const struct sh_mobile_sdhi_of_data of_default_cfg = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT,
};
static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
@@ -62,7 +62,7 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
- TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
+ TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
.capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dma_rx_offset = 0x2000,
@@ -70,17 +70,16 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
static const struct sh_mobile_sdhi_of_data of_rcar_gen3_compatible = {
.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
- TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
- .capabilities = MMC_CAP_SD_HIGHSPEED,
+ TMIO_MMC_CLK_ACTUAL | TMIO_MMC_MIN_RCAR2,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
.bus_shift = 2,
};
static const struct of_device_id sh_mobile_sdhi_of_match[] = {
{ .compatible = "renesas,sdhi-shmobile" },
- { .compatible = "renesas,sdhi-sh7372" },
- { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], },
- { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], },
+ { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, },
+ { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, },
+ { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, },
{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, },
{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, },
@@ -97,6 +96,8 @@ struct sh_mobile_sdhi {
struct clk *clk;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default, *pins_uhs;
};
static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
@@ -131,16 +132,28 @@ static void sh_mobile_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width)
sd_ctrl_write16(host, EXT_ACC, val);
}
-static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f)
+static int sh_mobile_sdhi_clk_enable(struct tmio_mmc_host *host)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct mmc_host *mmc = host->mmc;
struct sh_mobile_sdhi *priv = host_to_priv(host);
int ret = clk_prepare_enable(priv->clk);
if (ret < 0)
return ret;
- *f = clk_get_rate(priv->clk);
+ /*
+ * The clock driver may not know what maximum frequency
+ * actually works, so it should be set with the max-frequency
+ * property which will already have been read to f_max. If it
+ * was missing, assume the current frequency is the maximum.
+ */
+ if (!mmc->f_max)
+ mmc->f_max = clk_get_rate(priv->clk);
+
+ /*
+ * Minimum frequency is the minimum input clock frequency
+ * divided by our maximum divider.
+ */
+ mmc->f_min = max(clk_round_rate(priv->clk, 1) / 512, 1L);
/* enable 16bit data access on SDBUF as default */
sh_mobile_sdhi_sdbuf_width(host, 16);
@@ -148,19 +161,92 @@ static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int
return 0;
}
-static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
+static unsigned int sh_mobile_sdhi_clk_update(struct tmio_mmc_host *host,
+ unsigned int new_clock)
+{
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+ unsigned int freq, diff, best_freq = 0, diff_min = ~0;
+ int i, ret;
+
+ /* tested only on RCar Gen2+ currently; may work for others */
+ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+ return clk_get_rate(priv->clk);
+
+ /*
+ * We want the bus clock to be as close as possible to, but no
+ * greater than, new_clock. As we can divide by 1 << i for
+ * any i in [0, 9] we want the input clock to be as close as
+ * possible, but no greater than, new_clock << i.
+ */
+ for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
+ freq = clk_round_rate(priv->clk, new_clock << i);
+ if (freq > (new_clock << i)) {
+ /* Too fast; look for a slightly slower option */
+ freq = clk_round_rate(priv->clk,
+ (new_clock << i) / 4 * 3);
+ if (freq > (new_clock << i))
+ continue;
+ }
+
+ diff = new_clock - (freq >> i);
+ if (diff <= diff_min) {
+ best_freq = freq;
+ diff_min = diff;
+ }
+ }
+
+ ret = clk_set_rate(priv->clk, best_freq);
+
+ return ret == 0 ? best_freq : clk_get_rate(priv->clk);
+}
+
+static void sh_mobile_sdhi_clk_disable(struct tmio_mmc_host *host)
{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
struct sh_mobile_sdhi *priv = host_to_priv(host);
+
clk_disable_unprepare(priv->clk);
}
+static int sh_mobile_sdhi_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = host_to_priv(host);
+ struct pinctrl_state *pin_state;
+ int ret;
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ pin_state = priv->pins_default;
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ pin_state = priv->pins_uhs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * If anything is missing, assume signal voltage is fixed at
+ * 3.3V and succeed/fail accordingly.
+ */
+ if (IS_ERR(priv->pinctrl) || IS_ERR(pin_state))
+ return ios->signal_voltage ==
+ MMC_SIGNAL_VOLTAGE_330 ? 0 : -EINVAL;
+
+ ret = mmc_regulator_set_vqmmc(host->mmc, ios);
+ if (ret)
+ return ret;
+
+ return pinctrl_select_state(priv->pinctrl, pin_state);
+}
+
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
{
int timeout = 1000;
- while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13)))
+ while (--timeout && !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS)
+ & TMIO_STAT_SCLKDIVEN))
udelay(1);
if (!timeout) {
@@ -226,7 +312,6 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
struct tmio_mmc_host *host;
struct resource *res;
int irq, ret, i = 0;
- bool multiplexed_isr = true;
struct tmio_mmc_dma *dma_priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -247,6 +332,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eprobe;
}
+ priv->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (!IS_ERR(priv->pinctrl)) {
+ priv->pins_default = pinctrl_lookup_state(priv->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ priv->pins_uhs = pinctrl_lookup_state(priv->pinctrl,
+ "state_uhs");
+ }
+
host = tmio_mmc_host_alloc(pdev);
if (!host) {
ret = -ENOMEM;
@@ -267,8 +360,10 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
host->dma = dma_priv;
host->write16_hook = sh_mobile_sdhi_write16_hook;
host->clk_enable = sh_mobile_sdhi_clk_enable;
+ host->clk_update = sh_mobile_sdhi_clk_update;
host->clk_disable = sh_mobile_sdhi_clk_disable;
host->multi_io_quirk = sh_mobile_sdhi_multi_io_quirk;
+ host->start_signal_voltage_switch = sh_mobile_sdhi_start_signal_voltage_switch;
/* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
if (!host->bus_shift && resource_size(res) > 0x100) /* old way to determine the shift */
@@ -308,63 +403,24 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
if (ret < 0)
goto efree;
- /*
- * Allow one or more specific (named) ISRs or
- * one or more multiplexed (un-named) ISRs.
- */
-
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT);
- if (irq >= 0) {
- multiplexed_isr = false;
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0,
- dev_name(&pdev->dev), host);
- if (ret)
- goto eirq;
- }
-
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO);
- if (irq >= 0) {
- multiplexed_isr = false;
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0,
+ while (1) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ break;
+ i++;
+ ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
dev_name(&pdev->dev), host);
if (ret)
goto eirq;
}
- irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD);
- if (irq >= 0) {
- multiplexed_isr = false;
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0,
- dev_name(&pdev->dev), host);
- if (ret)
- goto eirq;
- } else if (!multiplexed_isr) {
- dev_err(&pdev->dev,
- "Principal SD-card IRQ is missing among named interrupts\n");
+ /* There must be at least one IRQ source */
+ if (!i) {
ret = irq;
goto eirq;
}
- if (multiplexed_isr) {
- while (1) {
- irq = platform_get_irq(pdev, i);
- if (irq < 0)
- break;
- i++;
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
- dev_name(&pdev->dev), host);
- if (ret)
- goto eirq;
- }
-
- /* There must be at least one IRQ source */
- if (!i) {
- ret = irq;
- goto eirq;
- }
- }
-
- dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n",
+ dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
mmc_hostname(host->mmc), (unsigned long)
(platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
host->mmc->f_max / 1000000);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 7fc8b7aa8..2ee4c21ec 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
[SDXC_CLK_400K] = { .output = 180, .sample = 180 },
[SDXC_CLK_25M] = { .output = 180, .sample = 75 },
[SDXC_CLK_50M] = { .output = 150, .sample = 120 },
- [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
- [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 },
+ [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 },
};
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
- /* TODO MMC DDR is not working on A80 */
- if (of_device_is_compatible(pdev->dev.of_node,
- "allwinner,sun9i-a80-mmc"))
- mmc->caps &= ~MMC_CAP_1_8V_DDR;
-
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 4a597f5a5..1aac2ad8e 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -1,6 +1,8 @@
/*
* linux/drivers/mmc/host/tmio_mmc.h
*
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
*
@@ -18,12 +20,67 @@
#include <linux/dmaengine.h>
#include <linux/highmem.h>
-#include <linux/mmc/tmio.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+/* driver merges STATUS and following STATUS2 */
+#define CTL_STATUS 0x1c
+/* driver merges IRQ_MASK and following IRQ_MASK2 */
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_DMA_ENABLE 0xd8
+#define CTL_RESET_SD 0xe0
+#define CTL_VERSION 0xe2
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND BIT(0)
+#define TMIO_STAT_DATAEND BIT(2)
+#define TMIO_STAT_CARD_REMOVE BIT(3)
+#define TMIO_STAT_CARD_INSERT BIT(4)
+#define TMIO_STAT_SIGSTATE BIT(5)
+#define TMIO_STAT_WRPROTECT BIT(7)
+#define TMIO_STAT_CARD_REMOVE_A BIT(8)
+#define TMIO_STAT_CARD_INSERT_A BIT(9)
+#define TMIO_STAT_SIGSTATE_A BIT(10)
+
+/* These belong technically to CTRL_STATUS2, but the driver merges them */
+#define TMIO_STAT_CMD_IDX_ERR BIT(16)
+#define TMIO_STAT_CRCFAIL BIT(17)
+#define TMIO_STAT_STOPBIT_ERR BIT(18)
+#define TMIO_STAT_DATATIMEOUT BIT(19)
+#define TMIO_STAT_RXOVERFLOW BIT(20)
+#define TMIO_STAT_TXUNDERRUN BIT(21)
+#define TMIO_STAT_CMDTIMEOUT BIT(22)
+#define TMIO_STAT_DAT0 BIT(23) /* only known on R-Car so far */
+#define TMIO_STAT_RXRDY BIT(24)
+#define TMIO_STAT_TXRQ BIT(25)
+#define TMIO_STAT_ILL_FUNC BIT(29) /* only when !TMIO_MMC_HAS_IDLE_WAIT */
+#define TMIO_STAT_SCLKDIVEN BIT(29) /* only when TMIO_MMC_HAS_IDLE_WAIT */
+#define TMIO_STAT_CMD_BUSY BIT(30)
+#define TMIO_STAT_ILL_ACCESS BIT(31)
+
+#define CLK_CTL_DIV_MASK 0xff
+#define CLK_CTL_SCLKEN BIT(8)
+
+#define TMIO_BBS 512 /* Boot block size */
+
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
#define TMIO_SDIO_STAT_EXPUB52 0x4000
@@ -95,10 +152,14 @@ struct tmio_mmc_host {
bool sdio_irq_enabled;
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
- int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
- void (*clk_disable)(struct platform_device *pdev);
+ int (*clk_enable)(struct tmio_mmc_host *host);
+ unsigned int (*clk_update)(struct tmio_mmc_host *host,
+ unsigned int new_clock);
+ void (*clk_disable)(struct tmio_mmc_host *host);
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
+ int (*start_signal_voltage_switch)(struct mmc_host *mmc,
+ struct mmc_ios *ios);
};
struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
@@ -111,9 +172,6 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
irqreturn_t tmio_mmc_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid);
-irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid);
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
@@ -177,7 +235,7 @@ static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
readsw(host->ctl + (addr << host->bus_shift), buf, count);
}
-static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+static inline u32 sd_ctrl_read16_and_16_as_32(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift)) |
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
@@ -199,11 +257,10 @@ static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
writesw(host->ctl + (addr << host->bus_shift), buf, count);
}
-static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host, int addr, u32 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
-
#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 7fb0c034d..fa8a936a3 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -15,7 +15,6 @@
#include <linux/dmaengine.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/tmio.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 0521b4662..f44e2ab7a 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -39,7 +39,6 @@
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
-#include <linux/mmc/tmio.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
@@ -56,18 +55,18 @@
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
- sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
}
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
- sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
+ sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
}
static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
- sd_ctrl_write32(host, CTL_STATUS, ~i);
+ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
}
static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
@@ -154,31 +153,16 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
}
-static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
- unsigned int new_clock)
+static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
- u32 clk = 0, clock;
-
- if (new_clock) {
- for (clock = host->mmc->f_min, clk = 0x80000080;
- new_clock >= (clock << 1);
- clk >>= 1)
- clock <<= 1;
-
- /* 1/1 clock is option */
- if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
- ((clk >> 22) & 0x1))
- clk |= 0xff;
- }
-
- if (host->set_clk_div)
- host->set_clk_div(host->pdev, (clk >> 22) & 1);
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 1 : 10);
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
- if (!(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG))
+ if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
+ sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
+ }
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
@@ -190,19 +174,41 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 5 : 10);
+ msleep(host->pdata->flags & TMIO_MMC_MIN_RCAR2 ? 5 : 10);
}
-static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
+static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
+ unsigned int new_clock)
{
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 1 : 10);
+ u32 clk = 0, clock;
- if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- msleep(10);
+ if (new_clock == 0) {
+ tmio_mmc_clk_stop(host);
+ return;
}
+
+ if (host->clk_update)
+ clock = host->clk_update(host, new_clock) / 512;
+ else
+ clock = host->mmc->f_min;
+
+ for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
+ clock <<= 1;
+
+ /* 1/1 clock is option */
+ if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) && ((clk >> 22) & 0x1))
+ clk |= 0xff;
+
+ if (host->set_clk_div)
+ host->set_clk_div(host->pdev, (clk >> 22) & 1);
+
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+ sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+ sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
+ msleep(10);
+
+ tmio_mmc_clk_start(host);
}
static void tmio_mmc_reset(struct tmio_mmc_host *host)
@@ -264,9 +270,6 @@ static void tmio_mmc_reset_work(struct work_struct *work)
tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
-
- pm_runtime_mark_last_busy(mmc_dev(host->mmc));
- pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
/* called with host->lock held, interrupts disabled */
@@ -296,9 +299,6 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
-
- pm_runtime_mark_last_busy(mmc_dev(host->mmc));
- pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
static void tmio_mmc_done_work(struct work_struct *work)
@@ -375,7 +375,7 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
tmio_mmc_enable_mmc_irqs(host, irq_mask);
/* Fire off the command */
- sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
+ sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
sd_ctrl_write16(host, CTL_SD_CMD, c);
return 0;
@@ -530,7 +530,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
goto out;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
- u32 status = sd_ctrl_read32(host, CTL_STATUS);
+ u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
bool done = false;
/*
@@ -542,7 +542,7 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
* waiting for one more interrupt fixes the problem.
*/
if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
- if (status & TMIO_STAT_ILL_FUNC)
+ if (status & TMIO_STAT_SCLKDIVEN)
done = true;
} else {
if (!(status & TMIO_STAT_CMD_BUSY))
@@ -585,7 +585,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
*/
for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
- cmd->resp[i] = sd_ctrl_read32(host, addr);
+ cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
if (cmd->flags & MMC_RSP_136) {
cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
@@ -625,19 +625,6 @@ out:
spin_unlock(&host->lock);
}
-static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host,
- int *ireg, int *status)
-{
- *status = sd_ctrl_read32(host, CTL_STATUS);
- *ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
-
- pr_debug_status(*status);
- pr_debug_status(*ireg);
-
- /* Clear the status except the interrupt status */
- sd_ctrl_write32(host, CTL_STATUS, TMIO_MASK_IRQ);
-}
-
static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
int ireg, int status)
{
@@ -657,18 +644,6 @@ static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
return false;
}
-irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid)
-{
- unsigned int ireg, status;
- struct tmio_mmc_host *host = devid;
-
- tmio_mmc_card_irq_status(host, &ireg, &status);
- __tmio_mmc_card_detect_irq(host, ireg, status);
-
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(tmio_mmc_card_detect_irq);
-
static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
int ireg, int status)
{
@@ -698,19 +673,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host,
return false;
}
-irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid)
-{
- unsigned int ireg, status;
- struct tmio_mmc_host *host = devid;
-
- tmio_mmc_card_irq_status(host, &ireg, &status);
- __tmio_mmc_sdcard_irq(host, ireg, status);
-
- return IRQ_HANDLED;
-}
-EXPORT_SYMBOL(tmio_mmc_sdcard_irq);
-
-irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
+static void tmio_mmc_sdio_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
struct mmc_host *mmc = host->mmc;
@@ -719,7 +682,7 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
- return IRQ_HANDLED;
+ return;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
@@ -732,19 +695,22 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
-
- return IRQ_HANDLED;
}
-EXPORT_SYMBOL(tmio_mmc_sdio_irq);
irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
unsigned int ireg, status;
- pr_debug("MMC IRQ begin\n");
+ status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
+ ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
+
+ pr_debug_status(status);
+ pr_debug_status(ireg);
+
+ /* Clear the status except the interrupt status */
+ sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
- tmio_mmc_card_irq_status(host, &ireg, &status);
if (__tmio_mmc_card_detect_irq(host, ireg, status))
return IRQ_HANDLED;
if (__tmio_mmc_sdcard_irq(host, ireg, status))
@@ -812,8 +778,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
spin_unlock_irqrestore(&host->lock, flags);
- pm_runtime_get_sync(mmc_dev(mmc));
-
if (mrq->data) {
ret = tmio_mmc_start_data(host, mrq->data);
if (ret)
@@ -832,24 +796,14 @@ fail:
host->mrq = NULL;
mrq->cmd->error = ret;
mmc_request_done(mmc, mrq);
-
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
}
-static int tmio_mmc_clk_update(struct tmio_mmc_host *host)
+static int tmio_mmc_clk_enable(struct tmio_mmc_host *host)
{
- struct mmc_host *mmc = host->mmc;
- int ret;
-
if (!host->clk_enable)
return -ENOTSUPP;
- ret = host->clk_enable(host->pdev, &mmc->f_max);
- if (!ret)
- mmc->f_min = mmc->f_max / 512;
-
- return ret;
+ return host->clk_enable(host);
}
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
@@ -925,8 +879,6 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct device *dev = &host->pdev->dev;
unsigned long flags;
- pm_runtime_get_sync(mmc_dev(mmc));
-
mutex_lock(&host->ios_lock);
spin_lock_irqsave(&host->lock, flags);
@@ -959,14 +911,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
tmio_mmc_clk_stop(host);
break;
case MMC_POWER_UP:
- tmio_mmc_set_clock(host, ios->clock);
tmio_mmc_power_on(host, ios->vdd);
- tmio_mmc_clk_start(host);
+ tmio_mmc_set_clock(host, ios->clock);
tmio_mmc_set_bus_width(host, ios->bus_width);
break;
case MMC_POWER_ON:
tmio_mmc_set_clock(host, ios->clock);
- tmio_mmc_clk_start(host);
tmio_mmc_set_bus_width(host, ios->bus_width);
break;
}
@@ -983,9 +933,6 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->clk_cache = ios->clock;
mutex_unlock(&host->ios_lock);
-
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
}
static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -996,11 +943,8 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
if (ret >= 0)
return ret;
- pm_runtime_get_sync(mmc_dev(mmc));
ret = !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
- (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
- pm_runtime_mark_last_busy(mmc_dev(mmc));
- pm_runtime_put_autosuspend(mmc_dev(mmc));
+ (sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
return ret;
}
@@ -1016,12 +960,20 @@ static int tmio_multi_io_quirk(struct mmc_card *card,
return blk_size;
}
-static const struct mmc_host_ops tmio_mmc_ops = {
+static int tmio_mmc_card_busy(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+
+ return !(sd_ctrl_read16_and_16_as_32(host, CTL_STATUS) & TMIO_STAT_DAT0);
+}
+
+static struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
.get_cd = mmc_gpio_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
+ .card_busy = tmio_mmc_card_busy,
.multi_io_quirk = tmio_multi_io_quirk,
};
@@ -1120,7 +1072,9 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
goto host_free;
}
+ tmio_mmc_ops.start_signal_voltage_switch = _host->start_signal_voltage_switch;
mmc->ops = &tmio_mmc_ops;
+
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
@@ -1135,7 +1089,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->caps & MMC_CAP_NONREMOVABLE ||
mmc->slot.cd_irq >= 0);
- if (tmio_mmc_clk_update(_host) < 0) {
+ if (tmio_mmc_clk_enable(_host) < 0) {
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
}
@@ -1159,7 +1113,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
tmio_mmc_clk_stop(_host);
tmio_mmc_reset(_host);
- _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
+ _host->sdcard_irq_mask = sd_ctrl_read16_and_16_as_32(_host, CTL_IRQ_MASK);
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
/* Unmask the IRQs we want to know about */
@@ -1251,7 +1205,7 @@ int tmio_mmc_host_runtime_suspend(struct device *dev)
tmio_mmc_clk_stop(host);
if (host->clk_disable)
- host->clk_disable(host->pdev);
+ host->clk_disable(host);
return 0;
}
@@ -1263,12 +1217,10 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_host *host = mmc_priv(mmc);
tmio_mmc_reset(host);
- tmio_mmc_clk_update(host);
+ tmio_mmc_clk_enable(host);
- if (host->clk_cache) {
+ if (host->clk_cache)
tmio_mmc_set_clock(host, host->clk_cache);
- tmio_mmc_clk_start(host);
- }
tmio_mmc_enable_dma(host, true);
diff --git a/drivers/mmc/host/toshsd.c b/drivers/mmc/host/toshsd.c
index e2cdd5fb1..553ef41bb 100644
--- a/drivers/mmc/host/toshsd.c
+++ b/drivers/mmc/host/toshsd.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
index 807c06e20..1bd5f1a18 100644
--- a/drivers/mmc/host/usdhi6rol0.c
+++ b/drivers/mmc/host/usdhi6rol0.c
@@ -22,6 +22,7 @@
#include <linux/mmc/sdio.h>
#include <linux/module.h>
#include <linux/pagemap.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
@@ -198,6 +199,11 @@ struct usdhi6_host {
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
bool dma_active;
+
+ /* Pin control */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_uhs;
};
/* I/O primitives */
@@ -1147,12 +1153,45 @@ static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
}
+static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
+{
+ if (IS_ERR(host->pins_uhs))
+ return 0;
+
+ switch (voltage) {
+ case MMC_SIGNAL_VOLTAGE_180:
+ case MMC_SIGNAL_VOLTAGE_120:
+ return pinctrl_select_state(host->pinctrl,
+ host->pins_uhs);
+
+ default:
+ return pinctrl_select_state(host->pinctrl,
+ host->pins_default);
+ }
+}
+
+static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ int ret;
+
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
+ if (ret < 0)
+ return ret;
+
+ ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
+ if (ret)
+ dev_warn_once(mmc_dev(mmc),
+ "Failed to set pinstate err=%d\n", ret);
+ return ret;
+}
+
static struct mmc_host_ops usdhi6_ops = {
.request = usdhi6_request,
.set_ios = usdhi6_set_ios,
.get_cd = usdhi6_get_cd,
.get_ro = usdhi6_get_ro,
.enable_sdio_irq = usdhi6_enable_sdio_irq,
+ .start_signal_voltage_switch = usdhi6_sig_volt_switch,
};
/* State machine handlers */
@@ -1730,6 +1769,25 @@ static int usdhi6_probe(struct platform_device *pdev)
host->wait = USDHI6_WAIT_FOR_REQUEST;
host->timeout = msecs_to_jiffies(4000);
+ host->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(host->pinctrl)) {
+ ret = PTR_ERR(host->pinctrl);
+ goto e_free_mmc;
+ }
+
+ host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
+ if (!IS_ERR(host->pins_uhs)) {
+ host->pins_default = pinctrl_lookup_state(host->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+
+ if (IS_ERR(host->pins_default)) {
+ dev_err(dev,
+ "UHS pinctrl requires a default pin state.\n");
+ ret = PTR_ERR(host->pins_default);
+ goto e_free_mmc;
+ }
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(dev, res);
if (IS_ERR(host->base)) {
@@ -1785,7 +1843,7 @@ static int usdhi6_probe(struct platform_device *pdev)
mmc->ops = &usdhi6_ops;
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
- MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ;
+ MMC_CAP_SDIO_IRQ;
/* Set .max_segs to some random number. Feel free to adjust. */
mmc->max_segs = 32;
mmc->max_blk_size = 512;
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 3b3dabce5..bbfa1f129 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -115,6 +115,7 @@ config MTD_MAP_BANK_WIDTH_16
config MTD_MAP_BANK_WIDTH_32
bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
+ select MTD_COMPLEX_MAPPINGS if HAS_IOMEM
default n
help
If you wish to support CFI devices on a physical bus which is
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 347bb83db..1c65c15b3 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -2,6 +2,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
@@ -109,8 +110,7 @@ static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
if ((from + len) > mtd->size)
return -EINVAL;
- memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
- len);
+ memcpy_fromio(buf, b47s->window + from, len);
*retlen = len;
return len;
@@ -275,15 +275,33 @@ static void bcm47xxsflash_bcma_cc_write(struct bcm47xxsflash *b47s, u16 offset,
static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
{
- struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct bcma_sflash *sflash = dev_get_platdata(dev);
struct bcm47xxsflash *b47s;
+ struct resource *res;
int err;
- b47s = devm_kzalloc(&pdev->dev, sizeof(*b47s), GFP_KERNEL);
+ b47s = devm_kzalloc(dev, sizeof(*b47s), GFP_KERNEL);
if (!b47s)
return -ENOMEM;
sflash->priv = b47s;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "invalid resource\n");
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ res->name)) {
+ dev_err(dev, "can't request region for resource %pR\n", res);
+ return -EBUSY;
+ }
+ b47s->window = ioremap_cache(res->start, resource_size(res));
+ if (!b47s->window) {
+ dev_err(dev, "ioremap failed for resource %pR\n", res);
+ return -ENOMEM;
+ }
+
b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash);
b47s->cc_read = bcm47xxsflash_bcma_cc_read;
b47s->cc_write = bcm47xxsflash_bcma_cc_write;
@@ -297,7 +315,6 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
break;
}
- b47s->window = sflash->window;
b47s->blocksize = sflash->blocksize;
b47s->numblocks = sflash->numblocks;
b47s->size = sflash->size;
@@ -306,6 +323,7 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
+ iounmap(b47s->window);
return err;
}
@@ -321,6 +339,7 @@ static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
struct bcm47xxsflash *b47s = sflash->priv;
mtd_device_unregister(&b47s->mtd);
+ iounmap(b47s->window);
return 0;
}
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
index fe93daf4f..1564b62b4 100644
--- a/drivers/mtd/devices/bcm47xxsflash.h
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -65,7 +65,8 @@ struct bcm47xxsflash {
enum bcm47xxsflash_type type;
- u32 window;
+ void __iomem *window;
+
u32 blocksize;
u16 numblocks;
u32 size;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index e7b2e4396..b833e6cc6 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -67,16 +67,40 @@ module_param(reliable_mode, uint, 0);
MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
"2=reliable) : MLC normal operations are in normal mode");
-/**
- * struct docg3_oobinfo - DiskOnChip G3 OOB layout
- * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
- * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
- * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
- */
-static struct nand_ecclayout docg3_oobinfo = {
- .eccbytes = 8,
- .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
- .oobfree = {{0, 7}, {15, 1} },
+static int docg3_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ /* byte 7 is Hamming ECC, byte 8-14 are BCH ECC */
+ oobregion->offset = 7;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static int docg3_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ /* free bytes: byte 0 until byte 6, byte 15 */
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 7;
+ } else {
+ oobregion->offset = 15;
+ oobregion->length = 1;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_docg3_ops = {
+ .ecc = docg3_ooblayout_ecc,
+ .free = docg3_ooblayout_free,
};
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
@@ -1857,7 +1881,7 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
mtd->_read_oob = doc_read_oob;
mtd->_write_oob = doc_write_oob;
mtd->_block_isbad = doc_block_isbad;
- mtd->ecclayout = &docg3_oobinfo;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_docg3_ops);
mtd->oobavail = 8;
mtd->ecc_strength = DOC_ECC_BCH_T;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index c9c3b7fa3..9d6854467 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -131,6 +131,28 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len,
/* convert the dummy cycles to the number of bytes */
dummy /= 8;
+ if (spi_flash_read_supported(spi)) {
+ struct spi_flash_read_message msg;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.buf = buf;
+ msg.from = from;
+ msg.len = len;
+ msg.read_opcode = nor->read_opcode;
+ msg.addr_width = nor->addr_width;
+ msg.dummy_bytes = dummy;
+ /* TODO: Support other combinations */
+ msg.opcode_nbits = SPI_NBITS_SINGLE;
+ msg.addr_nbits = SPI_NBITS_SINGLE;
+ msg.data_nbits = m25p80_rx_nbits(nor);
+
+ ret = spi_flash_read(spi, &msg);
+ *retlen = msg.retlen;
+ return ret;
+ }
+
spi_message_init(&m);
memset(t, 0, (sizeof t));
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 708b7e8c8..220f9200f 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -353,7 +353,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len,
* mechanism
* returns the size of the memory region found.
*/
-static int fixup_pmc551(struct pci_dev *dev)
+static int __init fixup_pmc551(struct pci_dev *dev)
{
#ifdef CONFIG_MTD_PMC551_BUGFIX
u32 dram_data;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 7c95a656f..392f9eff5 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -74,6 +74,16 @@ config MTD_PHYSMAP_OF
physically into the CPU's memory. The mapping description here is
taken from OF device tree.
+config MTD_PHYSMAP_OF_VERSATILE
+ bool "Support ARM Versatile physmap OF"
+ depends on MTD_PHYSMAP_OF
+ depends on MFD_SYSCON
+ default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || REALVIEW_DT)
+ help
+ This provides some extra DT physmap parsing for the ARM Versatile
+ platforms, basically to add a VPP (write protection) callback so
+ the flash can be taken out of write protection.
+
config MTD_PMC_MSP_EVM
tristate "CFI Flash device mapped on PMC-Sierra MSP"
depends on PMC_MSP && MTD_CFI
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 141c91a5b..644f7d36d 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -18,6 +18,9 @@ obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
+ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE
+obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of_versatile.o
+endif
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
index 0455166f0..4f206a991 100644
--- a/drivers/mtd/maps/ck804xrom.c
+++ b/drivers/mtd/maps/ck804xrom.c
@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
}
-static int ck804xrom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init ck804xrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
u8 byte;
diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
index 76ed651b5..9646b0766 100644
--- a/drivers/mtd/maps/esb2rom.c
+++ b/drivers/mtd/maps/esb2rom.c
@@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
pci_dev_put(window->pdev);
}
-static int esb2rom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init esb2rom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct esb2rom_window *window = &esb2rom_window;
diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
index 8636bba42..e17d02ae0 100644
--- a/drivers/mtd/maps/ichxrom.c
+++ b/drivers/mtd/maps/ichxrom.c
@@ -84,8 +84,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
}
-static int ichxrom_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init ichxrom_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
struct ichxrom_window *window = &ichxrom_window;
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 70c453144..22f3858c0 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -24,6 +24,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include "physmap_of_versatile.h"
struct of_flash_list {
struct mtd_info *mtd;
@@ -240,6 +241,11 @@ static int of_flash_probe(struct platform_device *dev)
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
info->list[i].map.device_node = dp;
+ err = of_flash_probe_versatile(dev, dp, &info->list[i].map);
+ if (err) {
+ dev_err(&dev->dev, "Can't probe Versatile VPP\n");
+ return err;
+ }
err = -ENOMEM;
info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/maps/physmap_of_versatile.c b/drivers/mtd/maps/physmap_of_versatile.c
new file mode 100644
index 000000000..0f39b2a01
--- /dev/null
+++ b/drivers/mtd/maps/physmap_of_versatile.c
@@ -0,0 +1,255 @@
+/*
+ * Versatile OF physmap driver add-on
+ *
+ * Copyright (c) 2016, Linaro Limited
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/mtd/map.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include "physmap_of_versatile.h"
+
+static struct regmap *syscon_regmap;
+
+enum versatile_flashprot {
+ INTEGRATOR_AP_FLASHPROT,
+ INTEGRATOR_CP_FLASHPROT,
+ VERSATILE_FLASHPROT,
+ REALVIEW_FLASHPROT,
+};
+
+static const struct of_device_id syscon_match[] = {
+ {
+ .compatible = "arm,integrator-ap-syscon",
+ .data = (void *)INTEGRATOR_AP_FLASHPROT,
+ },
+ {
+ .compatible = "arm,integrator-cp-syscon",
+ .data = (void *)INTEGRATOR_CP_FLASHPROT,
+ },
+ {
+ .compatible = "arm,core-module-versatile",
+ .data = (void *)VERSATILE_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-eb-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-pb1176-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-pb11mp-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-pba8-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {
+ .compatible = "arm,realview-pbx-syscon",
+ .data = (void *)REALVIEW_FLASHPROT,
+ },
+ {},
+};
+
+/*
+ * Flash protection handling for the Integrator/AP
+ */
+#define INTEGRATOR_SC_CTRLS_OFFSET 0x08
+#define INTEGRATOR_SC_CTRLC_OFFSET 0x0C
+#define INTEGRATOR_SC_CTRL_FLVPPEN BIT(1)
+#define INTEGRATOR_SC_CTRL_FLWP BIT(2)
+
+#define INTEGRATOR_EBI_CSR1_OFFSET 0x04
+/* The manual says bit 2, the code says bit 3, trust the code */
+#define INTEGRATOR_EBI_WRITE_ENABLE BIT(3)
+#define INTEGRATOR_EBI_LOCK_OFFSET 0x20
+#define INTEGRATOR_EBI_LOCK_VAL 0xA05F
+
+static const struct of_device_id ebi_match[] = {
+ { .compatible = "arm,external-bus-interface"},
+ { },
+};
+
+static int ap_flash_init(struct platform_device *pdev)
+{
+ struct device_node *ebi;
+ static void __iomem *ebi_base;
+ u32 val;
+ int ret;
+
+ /* Look up the EBI */
+ ebi = of_find_matching_node(NULL, ebi_match);
+ if (!ebi) {
+ return -ENODEV;
+ }
+ ebi_base = of_iomap(ebi, 0);
+ if (!ebi_base)
+ return -ENODEV;
+
+ /* Clear VPP and write protection bits */
+ ret = regmap_write(syscon_regmap,
+ INTEGRATOR_SC_CTRLC_OFFSET,
+ INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+ if (ret)
+ dev_err(&pdev->dev, "error clearing Integrator VPP/WP\n");
+
+ /* Unlock the EBI */
+ writel(INTEGRATOR_EBI_LOCK_VAL, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
+
+ /* Enable write cycles on the EBI, CSR1 (flash) */
+ val = readl(ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
+ val |= INTEGRATOR_EBI_WRITE_ENABLE;
+ writel(val, ebi_base + INTEGRATOR_EBI_CSR1_OFFSET);
+
+ /* Lock the EBI again */
+ writel(0, ebi_base + INTEGRATOR_EBI_LOCK_OFFSET);
+ iounmap(ebi_base);
+
+ return 0;
+}
+
+static void ap_flash_set_vpp(struct map_info *map, int on)
+{
+ int ret;
+
+ if (on) {
+ ret = regmap_write(syscon_regmap,
+ INTEGRATOR_SC_CTRLS_OFFSET,
+ INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+ if (ret)
+ pr_err("error enabling AP VPP\n");
+ } else {
+ ret = regmap_write(syscon_regmap,
+ INTEGRATOR_SC_CTRLC_OFFSET,
+ INTEGRATOR_SC_CTRL_FLVPPEN | INTEGRATOR_SC_CTRL_FLWP);
+ if (ret)
+ pr_err("error disabling AP VPP\n");
+ }
+}
+
+/*
+ * Flash protection handling for the Integrator/CP
+ */
+
+#define INTCP_FLASHPROG_OFFSET 0x04
+#define CINTEGRATOR_FLVPPEN BIT(0)
+#define CINTEGRATOR_FLWREN BIT(1)
+#define CINTEGRATOR_FLMASK BIT(0)|BIT(1)
+
+static void cp_flash_set_vpp(struct map_info *map, int on)
+{
+ int ret;
+
+ if (on) {
+ ret = regmap_update_bits(syscon_regmap,
+ INTCP_FLASHPROG_OFFSET,
+ CINTEGRATOR_FLMASK,
+ CINTEGRATOR_FLVPPEN | CINTEGRATOR_FLWREN);
+ if (ret)
+ pr_err("error setting CP VPP\n");
+ } else {
+ ret = regmap_update_bits(syscon_regmap,
+ INTCP_FLASHPROG_OFFSET,
+ CINTEGRATOR_FLMASK,
+ 0);
+ if (ret)
+ pr_err("error setting CP VPP\n");
+ }
+}
+
+/*
+ * Flash protection handling for the Versatiles and RealViews
+ */
+
+#define VERSATILE_SYS_FLASH_OFFSET 0x4C
+
+static void versatile_flash_set_vpp(struct map_info *map, int on)
+{
+ int ret;
+
+ ret = regmap_update_bits(syscon_regmap, VERSATILE_SYS_FLASH_OFFSET,
+ 0x01, !!on);
+ if (ret)
+ pr_err("error setting Versatile VPP\n");
+}
+
+int of_flash_probe_versatile(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ struct device_node *sysnp;
+ const struct of_device_id *devid;
+ struct regmap *rmap;
+ static enum versatile_flashprot versatile_flashprot;
+ int ret;
+
+ /* Not all flash chips use this protection line */
+ if (!of_device_is_compatible(np, "arm,versatile-flash"))
+ return 0;
+
+ /* For first chip probed, look up the syscon regmap */
+ if (!syscon_regmap) {
+ sysnp = of_find_matching_node_and_match(NULL,
+ syscon_match,
+ &devid);
+ if (!sysnp)
+ return -ENODEV;
+
+ versatile_flashprot = (enum versatile_flashprot)devid->data;
+ rmap = syscon_node_to_regmap(sysnp);
+ if (IS_ERR(rmap))
+ return PTR_ERR(rmap);
+
+ syscon_regmap = rmap;
+ }
+
+ switch (versatile_flashprot) {
+ case INTEGRATOR_AP_FLASHPROT:
+ ret = ap_flash_init(pdev);
+ if (ret)
+ return ret;
+ map->set_vpp = ap_flash_set_vpp;
+ dev_info(&pdev->dev, "Integrator/AP flash protection\n");
+ break;
+ case INTEGRATOR_CP_FLASHPROT:
+ map->set_vpp = cp_flash_set_vpp;
+ dev_info(&pdev->dev, "Integrator/CP flash protection\n");
+ break;
+ case VERSATILE_FLASHPROT:
+ case REALVIEW_FLASHPROT:
+ map->set_vpp = versatile_flash_set_vpp;
+ dev_info(&pdev->dev, "versatile/realview flash protection\n");
+ break;
+ default:
+ dev_info(&pdev->dev, "device marked as Versatile flash "
+ "but no system controller was found\n");
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_flash_probe_versatile);
diff --git a/drivers/mtd/maps/physmap_of_versatile.h b/drivers/mtd/maps/physmap_of_versatile.h
new file mode 100644
index 000000000..5b86f6dc6
--- /dev/null
+++ b/drivers/mtd/maps/physmap_of_versatile.h
@@ -0,0 +1,16 @@
+#include <linux/of.h>
+#include <linux/mtd/map.h>
+
+#ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE
+int of_flash_probe_versatile(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map);
+#else
+static inline
+int of_flash_probe_versatile(struct platform_device *pdev,
+ struct device_node *np,
+ struct map_info *map)
+{
+ return 0;
+}
+#endif
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 7497090e9..2cde28ed9 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -71,8 +71,8 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
info->map.name);
return -ENOMEM;
}
- info->map.cached = memremap(info->map.phys, info->map.size,
- MEMREMAP_WB);
+ info->map.cached =
+ ioremap_cached(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
@@ -111,7 +111,7 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
map_destroy(info->mtd);
iounmap(info->map.virt);
if (info->map.cached)
- memunmap(info->map.cached);
+ iounmap(info->map.cached);
kfree(info);
return 0;
}
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index c1af83db5..00a819079 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -4,11 +4,13 @@
* uclinux.c -- generic memory mapped MTD driver for uclinux
*
* (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ * License: GPL
*/
/****************************************************************************/
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -117,27 +119,6 @@ static int __init uclinux_mtd_init(void)
return(0);
}
-
-/****************************************************************************/
-
-static void __exit uclinux_mtd_cleanup(void)
-{
- if (uclinux_ram_mtdinfo) {
- mtd_device_unregister(uclinux_ram_mtdinfo);
- map_destroy(uclinux_ram_mtdinfo);
- uclinux_ram_mtdinfo = NULL;
- }
- if (uclinux_ram_map.virt)
- uclinux_ram_map.virt = 0;
-}
-
-/****************************************************************************/
-
-module_init(uclinux_mtd_init);
-module_exit(uclinux_mtd_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
-MODULE_DESCRIPTION("Generic MTD for uClinux");
+device_initcall(uclinux_mtd_init);
/****************************************************************************/
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index f4701182b..74ae24364 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -409,7 +409,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
goto error3;
if (tr->flush)
- blk_queue_flush(new->rq, REQ_FLUSH);
+ blk_queue_write_cache(new->rq, true, false);
new->rq->queuedata = new;
blk_queue_logical_block_size(new->rq, tr->blksize);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 6d19835b8..2a47a3f0e 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -465,35 +465,108 @@ static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
}
/*
- * Copies (and truncates, if necessary) data from the larger struct,
- * nand_ecclayout, to the smaller, deprecated layout struct,
- * nand_ecclayout_user. This is necessary only to support the deprecated
- * API ioctl ECCGETLAYOUT while allowing all new functionality to use
- * nand_ecclayout flexibly (i.e. the struct may change size in new
- * releases without requiring major rewrites).
+ * Copies (and truncates, if necessary) OOB layout information to the
+ * deprecated layout struct, nand_ecclayout_user. This is necessary only to
+ * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
+ * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
+ * can describe any kind of OOB layout with almost zero overhead from a
+ * memory usage point of view).
*/
-static int shrink_ecclayout(const struct nand_ecclayout *from,
- struct nand_ecclayout_user *to)
+static int shrink_ecclayout(struct mtd_info *mtd,
+ struct nand_ecclayout_user *to)
{
- int i;
+ struct mtd_oob_region oobregion;
+ int i, section = 0, ret;
- if (!from || !to)
+ if (!mtd || !to)
return -EINVAL;
memset(to, 0, sizeof(*to));
- to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
- for (i = 0; i < to->eccbytes; i++)
- to->eccpos[i] = from->eccpos[i];
+ to->eccbytes = 0;
+ for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
+ u32 eccpos;
+
+ ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+
+ break;
+ }
+
+ eccpos = oobregion.offset;
+ for (; i < MTD_MAX_ECCPOS_ENTRIES &&
+ eccpos < oobregion.offset + oobregion.length; i++) {
+ to->eccpos[i] = eccpos++;
+ to->eccbytes++;
+ }
+ }
for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
- if (from->oobfree[i].length == 0 &&
- from->oobfree[i].offset == 0)
+ ret = mtd_ooblayout_free(mtd, i, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+
+ break;
+ }
+
+ to->oobfree[i].offset = oobregion.offset;
+ to->oobfree[i].length = oobregion.length;
+ to->oobavail += to->oobfree[i].length;
+ }
+
+ return 0;
+}
+
+static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
+{
+ struct mtd_oob_region oobregion;
+ int i, section = 0, ret;
+
+ if (!mtd || !to)
+ return -EINVAL;
+
+ memset(to, 0, sizeof(*to));
+
+ to->eccbytes = 0;
+ for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
+ u32 eccpos;
+
+ ret = mtd_ooblayout_ecc(mtd, section, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+
break;
- to->oobavail += from->oobfree[i].length;
- to->oobfree[i] = from->oobfree[i];
+ }
+
+ if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
+ return -EINVAL;
+
+ eccpos = oobregion.offset;
+ for (; eccpos < oobregion.offset + oobregion.length; i++) {
+ to->eccpos[i] = eccpos++;
+ to->eccbytes++;
+ }
}
+ for (i = 0; i < 8; i++) {
+ ret = mtd_ooblayout_free(mtd, i, &oobregion);
+ if (ret < 0) {
+ if (ret != -ERANGE)
+ return ret;
+
+ break;
+ }
+
+ to->oobfree[i][0] = oobregion.offset;
+ to->oobfree[i][1] = oobregion.length;
+ }
+
+ to->useecc = MTD_NANDECC_AUTOPLACE;
+
return 0;
}
@@ -815,16 +888,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct nand_oobinfo oi;
- if (!mtd->ecclayout)
+ if (!mtd->ooblayout)
return -EOPNOTSUPP;
- if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
- return -EINVAL;
- oi.useecc = MTD_NANDECC_AUTOPLACE;
- memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
- memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
- sizeof(oi.oobfree));
- oi.eccbytes = mtd->ecclayout->eccbytes;
+ ret = get_oobinfo(mtd, &oi);
+ if (ret)
+ return ret;
if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
return -EFAULT;
@@ -913,14 +982,14 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct nand_ecclayout_user *usrlay;
- if (!mtd->ecclayout)
+ if (!mtd->ooblayout)
return -EOPNOTSUPP;
usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
if (!usrlay)
return -ENOMEM;
- shrink_ecclayout(mtd->ecclayout, usrlay);
+ shrink_ecclayout(mtd, usrlay);
if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
ret = -EFAULT;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 239a8c806..d573606b9 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -777,7 +777,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
}
- concat->mtd.ecclayout = subdev[0]->ecclayout;
+ mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
concat->num_subdev = num_devs;
concat->mtd.name = name;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 309625130..e3936b847 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/kconfig.h>
+#include <linux/leds.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
@@ -862,6 +863,7 @@ int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
mtd_erase_callback(instr);
return 0;
}
+ ledtrig_mtd_activity();
return mtd->_erase(mtd, instr);
}
EXPORT_SYMBOL_GPL(mtd_erase);
@@ -925,6 +927,7 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
if (!len)
return 0;
+ ledtrig_mtd_activity();
/*
* In the absence of an error, drivers return a non-negative integer
* representing the maximum number of bitflips that were corrected on
@@ -949,6 +952,7 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
return -EROFS;
if (!len)
return 0;
+ ledtrig_mtd_activity();
return mtd->_write(mtd, to, len, retlen, buf);
}
EXPORT_SYMBOL_GPL(mtd_write);
@@ -982,6 +986,8 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
ops->retlen = ops->oobretlen = 0;
if (!mtd->_read_oob)
return -EOPNOTSUPP;
+
+ ledtrig_mtd_activity();
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
* similar to mtd->_read(), returning a non-negative integer
@@ -997,6 +1003,379 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
}
EXPORT_SYMBOL_GPL(mtd_read_oob);
+int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ ops->retlen = ops->oobretlen = 0;
+ if (!mtd->_write_oob)
+ return -EOPNOTSUPP;
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ ledtrig_mtd_activity();
+ return mtd->_write_oob(mtd, to, ops);
+}
+EXPORT_SYMBOL_GPL(mtd_write_oob);
+
+/**
+ * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
+ * @mtd: MTD device structure
+ * @section: ECC section. Depending on the layout you may have all the ECC
+ * bytes stored in a single contiguous section, or one section
+ * per ECC chunk (and sometime several sections for a single ECC
+ * ECC chunk)
+ * @oobecc: OOB region struct filled with the appropriate ECC position
+ * information
+ *
+ * This functions return ECC section information in the OOB area. I you want
+ * to get all the ECC bytes information, then you should call
+ * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc)
+{
+ memset(oobecc, 0, sizeof(*oobecc));
+
+ if (!mtd || section < 0)
+ return -EINVAL;
+
+ if (!mtd->ooblayout || !mtd->ooblayout->ecc)
+ return -ENOTSUPP;
+
+ return mtd->ooblayout->ecc(mtd, section, oobecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
+
+/**
+ * mtd_ooblayout_free - Get the OOB region definition of a specific free
+ * section
+ * @mtd: MTD device structure
+ * @section: Free section you are interested in. Depending on the layout
+ * you may have all the free bytes stored in a single contiguous
+ * section, or one section per ECC chunk plus an extra section
+ * for the remaining bytes (or other funky layout).
+ * @oobfree: OOB region struct filled with the appropriate free position
+ * information
+ *
+ * This functions return free bytes position in the OOB area. I you want
+ * to get all the free bytes information, then you should call
+ * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree)
+{
+ memset(oobfree, 0, sizeof(*oobfree));
+
+ if (!mtd || section < 0)
+ return -EINVAL;
+
+ if (!mtd->ooblayout || !mtd->ooblayout->free)
+ return -ENOTSUPP;
+
+ return mtd->ooblayout->free(mtd, section, oobfree);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
+
+/**
+ * mtd_ooblayout_find_region - Find the region attached to a specific byte
+ * @mtd: mtd info structure
+ * @byte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: used to retrieve the ECC position
+ * @iter: iterator function. Should be either mtd_ooblayout_free or
+ * mtd_ooblayout_ecc depending on the region type you're searching for
+ *
+ * This functions returns the section id and oobregion information of a
+ * specific byte. For example, say you want to know where the 4th ECC byte is
+ * stored, you'll use:
+ *
+ * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
+ int *sectionp, struct mtd_oob_region *oobregion,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ int pos = 0, ret, section = 0;
+
+ memset(oobregion, 0, sizeof(*oobregion));
+
+ while (1) {
+ ret = iter(mtd, section, oobregion);
+ if (ret)
+ return ret;
+
+ if (pos + oobregion->length > byte)
+ break;
+
+ pos += oobregion->length;
+ section++;
+ }
+
+ /*
+ * Adjust region info to make it start at the beginning at the
+ * 'start' ECC byte.
+ */
+ oobregion->offset += byte - pos;
+ oobregion->length -= byte - pos;
+ *sectionp = section;
+
+ return 0;
+}
+
+/**
+ * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
+ * ECC byte
+ * @mtd: mtd info structure
+ * @eccbyte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: OOB region information
+ *
+ * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
+ * byte.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+ int *section,
+ struct mtd_oob_region *oobregion)
+{
+ return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
+
+/**
+ * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @buf: destination buffer to store OOB bytes
+ * @oobbuf: OOB buffer
+ * @start: first byte to retrieve
+ * @nbytes: number of bytes to retrieve
+ * @iter: section iterator
+ *
+ * Extract bytes attached to a specific category (ECC or free)
+ * from the OOB buffer and copy them into buf.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
+ const u8 *oobbuf, int start, int nbytes,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion = { };
+ int section = 0, ret;
+
+ ret = mtd_ooblayout_find_region(mtd, start, &section,
+ &oobregion, iter);
+
+ while (!ret) {
+ int cnt;
+
+ cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+ memcpy(buf, oobbuf + oobregion.offset, cnt);
+ buf += cnt;
+ nbytes -= cnt;
+
+ if (!nbytes)
+ break;
+
+ ret = iter(mtd, ++section, &oobregion);
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @buf: source buffer to get OOB bytes from
+ * @oobbuf: OOB buffer
+ * @start: first OOB byte to set
+ * @nbytes: number of OOB bytes to set
+ * @iter: section iterator
+ *
+ * Fill the OOB buffer with data provided in buf. The category (ECC or free)
+ * is selected by passing the appropriate iterator.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
+ u8 *oobbuf, int start, int nbytes,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion = { };
+ int section = 0, ret;
+
+ ret = mtd_ooblayout_find_region(mtd, start, &section,
+ &oobregion, iter);
+
+ while (!ret) {
+ int cnt;
+
+ cnt = oobregion.length > nbytes ? nbytes : oobregion.length;
+ memcpy(oobbuf + oobregion.offset, buf, cnt);
+ buf += cnt;
+ nbytes -= cnt;
+
+ if (!nbytes)
+ break;
+
+ ret = iter(mtd, ++section, &oobregion);
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
+ * @mtd: mtd info structure
+ * @iter: category iterator
+ *
+ * Count the number of bytes in a given category.
+ *
+ * Returns a positive value on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion = { };
+ int section = 0, ret, nbytes = 0;
+
+ while (1) {
+ ret = iter(mtd, section++, &oobregion);
+ if (ret) {
+ if (ret == -ERANGE)
+ ret = nbytes;
+ break;
+ }
+
+ nbytes += oobregion.length;
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+ const u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
+
+/**
+ * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: source buffer to get ECC bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+ u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
+
+/**
+ * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @databuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+ const u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
+ mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
+
+/**
+ * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: source buffer to get data bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+ u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
+ mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
+
+/**
+ * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
+{
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
+
+/**
+ * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
+{
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
+
/*
* Method to access the protection register area, present in some flash
* devices. The user data is one time programmable but the factory data is read
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 08de4b2cf..1f13e3255 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -317,6 +317,27 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
return res;
}
+static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct mtd_part *part = mtd_to_part(mtd);
+
+ return mtd_ooblayout_ecc(part->master, section, oobregion);
+}
+
+static int part_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct mtd_part *part = mtd_to_part(mtd);
+
+ return mtd_ooblayout_free(part->master, section, oobregion);
+}
+
+static const struct mtd_ooblayout_ops part_ooblayout_ops = {
+ .ecc = part_ooblayout_ecc,
+ .free = part_ooblayout_free,
+};
+
static inline void free_partition(struct mtd_part *p)
{
kfree(p->mtd.name);
@@ -533,7 +554,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
part->name);
}
- slave->mtd.ecclayout = master->ecclayout;
+ mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
slave->mtd.ecc_step_size = master->ecc_step_size;
slave->mtd.ecc_strength = master->ecc_strength;
slave->mtd.bitflip_threshold = master->bitflip_threshold;
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 68b58c857..78e12cc8b 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -224,6 +224,7 @@ static int ams_delta_init(struct platform_device *pdev)
/* 25 us command delay time */
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.algo = NAND_ECC_HAMMING;
platform_set_drvdata(pdev, io_base);
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 20cbaabb2..68b916010 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -36,7 +36,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <linux/of_mtd.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -68,34 +67,44 @@ struct atmel_nand_caps {
uint8_t pmecc_max_correction;
};
-struct atmel_nand_nfc_caps {
- uint32_t rb_mask;
-};
-
-/* oob layout for large page size
+/*
+ * oob layout for large page size
* bad block info is on bytes 0 and 1
* the bytes have to be consecutives to avoid
* several NAND_CMD_RNDOUT during read
- */
-static struct nand_ecclayout atmel_oobinfo_large = {
- .eccbytes = 4,
- .eccpos = {60, 61, 62, 63},
- .oobfree = {
- {2, 58}
- },
-};
-
-/* oob layout for small page size
+ *
+ * oob layout for small page size
* bad block info is on bytes 4 and 5
* the bytes have to be consecutives to avoid
* several NAND_CMD_RNDOUT during read
*/
-static struct nand_ecclayout atmel_oobinfo_small = {
- .eccbytes = 4,
- .eccpos = {0, 1, 2, 3},
- .oobfree = {
- {6, 10}
- },
+static int atmel_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 4;
+ oobregion->offset = 0;
+
+ return 0;
+}
+
+static int atmel_ooblayout_free_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 6;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops atmel_ooblayout_sp_ops = {
+ .ecc = atmel_ooblayout_ecc_sp,
+ .free = atmel_ooblayout_free_sp,
};
struct atmel_nfc {
@@ -116,7 +125,6 @@ struct atmel_nfc {
/* Point to the sram bank which include readed data via NFC */
void *data_in_sram;
bool will_write_sram;
- const struct atmel_nand_nfc_caps *caps;
};
static struct atmel_nfc nand_nfc;
@@ -163,8 +171,6 @@ struct atmel_nand_host {
int *pmecc_delta;
};
-static struct nand_ecclayout atmel_pmecc_oobinfo;
-
/*
* Enable NAND.
*/
@@ -434,14 +440,13 @@ err_buf:
static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- struct atmel_nand_host *host = nand_get_controller_data(chip);
if (use_dma && len > mtd->oobsize)
/* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
return;
- if (host->board.bus_width_16)
+ if (chip->options & NAND_BUSWIDTH_16)
atmel_read_buf16(mtd, buf, len);
else
atmel_read_buf8(mtd, buf, len);
@@ -450,14 +455,13 @@ static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- struct atmel_nand_host *host = nand_get_controller_data(chip);
if (use_dma && len > mtd->oobsize)
/* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
return;
- if (host->board.bus_width_16)
+ if (chip->options & NAND_BUSWIDTH_16)
atmel_write_buf16(mtd, buf, len);
else
atmel_write_buf8(mtd, buf, len);
@@ -483,22 +487,6 @@ static int pmecc_get_ecc_bytes(int cap, int sector_size)
return (m * cap + 7) / 8;
}
-static void pmecc_config_ecc_layout(struct nand_ecclayout *layout,
- int oobsize, int ecc_len)
-{
- int i;
-
- layout->eccbytes = ecc_len;
-
- /* ECC will occupy the last ecc_len bytes continuously */
- for (i = 0; i < ecc_len; i++)
- layout->eccpos[i] = oobsize - ecc_len + i;
-
- layout->oobfree[0].offset = PMECC_OOB_RESERVED_BYTES;
- layout->oobfree[0].length =
- oobsize - ecc_len - layout->oobfree[0].offset;
-}
-
static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
{
int table_size;
@@ -836,13 +824,16 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
pos, bit_pos, err_byte, *(buf + byte_pos));
} else {
+ struct mtd_oob_region oobregion;
+
/* Bit flip in OOB area */
tmp = sector_num * nand_chip->ecc.bytes
+ (byte_pos - sector_size);
err_byte = ecc[tmp];
ecc[tmp] ^= (1 << bit_pos);
- pos = tmp + nand_chip->ecc.layout->eccpos[0];
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ pos = tmp + oobregion.offset;
dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
pos, bit_pos, err_byte, ecc[tmp]);
}
@@ -863,17 +854,6 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
uint8_t *buf_pos;
int max_bitflips = 0;
- /* If can correct bitfilps from erased page, do the normal check */
- if (host->caps->pmecc_correct_erase_page)
- goto normal_check;
-
- for (i = 0; i < nand_chip->ecc.total; i++)
- if (ecc[i] != 0xff)
- goto normal_check;
- /* Erased page, return OK */
- return 0;
-
-normal_check:
for (i = 0; i < nand_chip->ecc.steps; i++) {
err_nbr = 0;
if (pmecc_stat & 0x1) {
@@ -884,16 +864,30 @@ normal_check:
pmecc_get_sigma(mtd);
err_nbr = pmecc_err_location(mtd);
- if (err_nbr == -1) {
+ if (err_nbr >= 0) {
+ pmecc_correct_data(mtd, buf_pos, ecc, i,
+ nand_chip->ecc.bytes,
+ err_nbr);
+ } else if (!host->caps->pmecc_correct_erase_page) {
+ u8 *ecc_pos = ecc + (i * nand_chip->ecc.bytes);
+
+ /* Try to detect erased pages */
+ err_nbr = nand_check_erased_ecc_chunk(buf_pos,
+ host->pmecc_sector_size,
+ ecc_pos,
+ nand_chip->ecc.bytes,
+ NULL, 0,
+ nand_chip->ecc.strength);
+ }
+
+ if (err_nbr < 0) {
dev_err(host->dev, "PMECC: Too many errors\n");
mtd->ecc_stats.failed++;
return -EIO;
- } else {
- pmecc_correct_data(mtd, buf_pos, ecc, i,
- nand_chip->ecc.bytes, err_nbr);
- mtd->ecc_stats.corrected += err_nbr;
- max_bitflips = max_t(int, max_bitflips, err_nbr);
}
+
+ mtd->ecc_stats.corrected += err_nbr;
+ max_bitflips = max_t(int, max_bitflips, err_nbr);
}
pmecc_stat >>= 1;
}
@@ -931,7 +925,6 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
struct atmel_nand_host *host = nand_get_controller_data(chip);
int eccsize = chip->ecc.size * chip->ecc.steps;
uint8_t *oob = chip->oob_poi;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t stat;
unsigned long end_time;
int bitflips = 0;
@@ -953,7 +946,11 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
stat = pmecc_readl_relaxed(host->ecc, ISR);
if (stat != 0) {
- bitflips = pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]);
+ struct mtd_oob_region oobregion;
+
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ bitflips = pmecc_correction(mtd, stat, buf,
+ &oob[oobregion.offset]);
if (bitflips < 0)
/* uncorrectable errors */
return 0;
@@ -967,8 +964,8 @@ static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
int page)
{
struct atmel_nand_host *host = nand_get_controller_data(chip);
- uint32_t *eccpos = chip->ecc.layout->eccpos;
- int i, j;
+ struct mtd_oob_region oobregion = { };
+ int i, j, section = 0;
unsigned long end_time;
if (!host->nfc || !host->nfc->write_by_sram) {
@@ -987,11 +984,14 @@ static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
for (i = 0; i < chip->ecc.steps; i++) {
for (j = 0; j < chip->ecc.bytes; j++) {
- int pos;
+ if (!oobregion.length)
+ mtd_ooblayout_ecc(mtd, section, &oobregion);
- pos = i * chip->ecc.bytes + j;
- chip->oob_poi[eccpos[pos]] =
+ chip->oob_poi[oobregion.offset] =
pmecc_readb_ecc_relaxed(host->ecc, i, j);
+ oobregion.length--;
+ oobregion.offset++;
+ section++;
}
}
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -1003,8 +1003,9 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+ int eccbytes = mtd_ooblayout_count_eccbytes(mtd);
uint32_t val = 0;
- struct nand_ecclayout *ecc_layout;
+ struct mtd_oob_region oobregion;
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
@@ -1054,11 +1055,11 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
| PMECC_CFG_AUTO_DISABLE);
pmecc_writel(host->ecc, CFG, val);
- ecc_layout = nand_chip->ecc.layout;
pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
- pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ pmecc_writel(host->ecc, SADDR, oobregion.offset);
pmecc_writel(host->ecc, EADDR,
- ecc_layout->eccpos[ecc_layout->eccbytes - 1]);
+ oobregion.offset + eccbytes - 1);
/* See datasheet about PMECC Clock Control Register */
pmecc_writel(host->ecc, CLK, 2);
pmecc_writel(host->ecc, IDR, 0xff);
@@ -1206,6 +1207,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
dev_warn(host->dev,
"Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
nand_chip->ecc.mode = NAND_ECC_SOFT;
+ nand_chip->ecc.algo = NAND_ECC_HAMMING;
return 0;
}
@@ -1280,11 +1282,8 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
err_no = -EINVAL;
goto err;
}
- pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
- mtd->oobsize,
- nand_chip->ecc.total);
- nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
break;
default:
dev_warn(host->dev,
@@ -1292,6 +1291,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
/* page size not handled by HW ECC */
/* switching back to soft ECC */
nand_chip->ecc.mode = NAND_ECC_SOFT;
+ nand_chip->ecc.algo = NAND_ECC_HAMMING;
return 0;
}
@@ -1359,12 +1359,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
uint8_t *ecc_pos;
int stat;
unsigned int max_bitflips = 0;
+ struct mtd_oob_region oobregion = {};
/*
* Errata: ALE is incorrectly wired up to the ECC controller
@@ -1382,19 +1382,20 @@ static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, p, eccsize);
/* move to ECC position if needed */
- if (eccpos[0] != 0) {
- /* This only works on large pages
- * because the ECC controller waits for
- * NAND_CMD_RNDOUTSTART after the
- * NAND_CMD_RNDOUT.
- * anyway, for small pages, the eccpos[0] == 0
+ mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ if (oobregion.offset != 0) {
+ /*
+ * This only works on large pages because the ECC controller
+ * waits for NAND_CMD_RNDOUTSTART after the NAND_CMD_RNDOUT.
+ * Anyway, for small pages, the first ECC byte is at offset
+ * 0 in the OOB area.
*/
chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + eccpos[0], -1);
+ mtd->writesize + oobregion.offset, -1);
}
/* the ECC controller needs to read the ECC just after the data */
- ecc_pos = oob + eccpos[0];
+ ecc_pos = oob + oobregion.offset;
chip->read_buf(mtd, ecc_pos, eccbytes);
/* check if there's an error */
@@ -1504,58 +1505,17 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
}
-static int atmel_of_init_port(struct atmel_nand_host *host,
- struct device_node *np)
+static int atmel_of_init_ecc(struct atmel_nand_host *host,
+ struct device_node *np)
{
- u32 val;
u32 offset[2];
- int ecc_mode;
- struct atmel_nand_data *board = &host->board;
- enum of_gpio_flags flags = 0;
-
- host->caps = (struct atmel_nand_caps *)
- of_device_get_match_data(host->dev);
-
- if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
- if (val >= 32) {
- dev_err(host->dev, "invalid addr-offset %u\n", val);
- return -EINVAL;
- }
- board->ale = val;
- }
-
- if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
- if (val >= 32) {
- dev_err(host->dev, "invalid cmd-offset %u\n", val);
- return -EINVAL;
- }
- board->cle = val;
- }
-
- ecc_mode = of_get_nand_ecc_mode(np);
-
- board->ecc_mode = ecc_mode < 0 ? NAND_ECC_SOFT : ecc_mode;
-
- board->on_flash_bbt = of_get_nand_on_flash_bbt(np);
-
- board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
-
- if (of_get_nand_bus_width(np) == 16)
- board->bus_width_16 = 1;
-
- board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
- board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
-
- board->enable_pin = of_get_gpio(np, 1);
- board->det_pin = of_get_gpio(np, 2);
+ u32 val;
host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
- /* load the nfc driver if there is */
- of_platform_populate(np, NULL, NULL, host->dev);
-
- if (!(board->ecc_mode == NAND_ECC_HW) || !host->has_pmecc)
- return 0; /* Not using PMECC */
+ /* Not using PMECC */
+ if (!(host->nand_chip.ecc.mode == NAND_ECC_HW) || !host->has_pmecc)
+ return 0;
/* use PMECC, get correction capability, sector size and lookup
* table offset.
@@ -1596,16 +1556,65 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
/* Will build a lookup table and initialize the offset later */
return 0;
}
+
if (!offset[0] && !offset[1]) {
dev_err(host->dev, "Invalid PMECC lookup table offset\n");
return -EINVAL;
}
+
host->pmecc_lookup_table_offset_512 = offset[0];
host->pmecc_lookup_table_offset_1024 = offset[1];
return 0;
}
+static int atmel_of_init_port(struct atmel_nand_host *host,
+ struct device_node *np)
+{
+ u32 val;
+ struct atmel_nand_data *board = &host->board;
+ enum of_gpio_flags flags = 0;
+
+ host->caps = (struct atmel_nand_caps *)
+ of_device_get_match_data(host->dev);
+
+ if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid addr-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->ale = val;
+ }
+
+ if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
+ if (val >= 32) {
+ dev_err(host->dev, "invalid cmd-offset %u\n", val);
+ return -EINVAL;
+ }
+ board->cle = val;
+ }
+
+ board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
+
+ board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
+ board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
+
+ board->enable_pin = of_get_gpio(np, 1);
+ board->det_pin = of_get_gpio(np, 2);
+
+ /* load the nfc driver if there is */
+ of_platform_populate(np, NULL, NULL, host->dev);
+
+ /*
+ * Initialize ECC mode to NAND_ECC_SOFT so that we have a correct value
+ * even if the nand-ecc-mode property is not defined.
+ */
+ host->nand_chip.ecc.mode = NAND_ECC_SOFT;
+ host->nand_chip.ecc.algo = NAND_ECC_HAMMING;
+
+ return 0;
+}
+
static int atmel_hw_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
@@ -1618,6 +1627,7 @@ static int atmel_hw_nand_init_params(struct platform_device *pdev,
dev_err(host->dev,
"Can't get I/O resource regs, use software ECC\n");
nand_chip->ecc.mode = NAND_ECC_SOFT;
+ nand_chip->ecc.algo = NAND_ECC_HAMMING;
return 0;
}
@@ -1631,25 +1641,26 @@ static int atmel_hw_nand_init_params(struct platform_device *pdev,
/* set ECC page size and oob layout */
switch (mtd->writesize) {
case 512:
- nand_chip->ecc.layout = &atmel_oobinfo_small;
+ mtd_set_ooblayout(mtd, &atmel_ooblayout_sp_ops);
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
break;
case 1024:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
break;
case 2048:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
break;
case 4096:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
break;
default:
/* page size not handled by HW ECC */
/* switching back to soft ECC */
nand_chip->ecc.mode = NAND_ECC_SOFT;
+ nand_chip->ecc.algo = NAND_ECC_HAMMING;
return 0;
}
@@ -1699,9 +1710,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
ret = IRQ_HANDLED;
}
- if (pending & host->nfc->caps->rb_mask) {
+ if (pending & NFC_SR_RB_EDGE) {
complete(&host->nfc->comp_ready);
- nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
ret = IRQ_HANDLED;
}
if (pending & NFC_SR_CMD_DONE) {
@@ -1719,7 +1730,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
init_completion(&host->nfc->comp_xfer_done);
- if (flag & host->nfc->caps->rb_mask)
+ if (flag & NFC_SR_RB_EDGE)
init_completion(&host->nfc->comp_ready);
if (flag & NFC_SR_CMD_DONE)
@@ -1737,7 +1748,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
comp[index++] = &host->nfc->comp_xfer_done;
- if (flag & host->nfc->caps->rb_mask)
+ if (flag & NFC_SR_RB_EDGE)
comp[index++] = &host->nfc->comp_ready;
if (flag & NFC_SR_CMD_DONE)
@@ -1805,7 +1816,7 @@ static int nfc_device_ready(struct mtd_info *mtd)
dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
mask & status);
- return status & host->nfc->caps->rb_mask;
+ return status & NFC_SR_RB_EDGE;
}
static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1978,8 +1989,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
}
/* fall through */
default:
- nfc_prepare_interrupt(host, host->nfc->caps->rb_mask);
- nfc_wait_interrupt(host, host->nfc->caps->rb_mask);
+ nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
+ nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
}
}
@@ -2147,6 +2158,19 @@ static int atmel_nand_probe(struct platform_device *pdev)
} else {
memcpy(&host->board, dev_get_platdata(&pdev->dev),
sizeof(struct atmel_nand_data));
+ nand_chip->ecc.mode = host->board.ecc_mode;
+
+ /*
+ * When using software ECC every supported avr32 board means
+ * Hamming algorithm. If that ever changes we'll need to add
+ * ecc_algo field to the struct atmel_nand_data.
+ */
+ if (nand_chip->ecc.mode == NAND_ECC_SOFT)
+ nand_chip->ecc.algo = NAND_ECC_HAMMING;
+
+ /* 16-bit bus width */
+ if (host->board.bus_width_16)
+ nand_chip->options |= NAND_BUSWIDTH_16;
}
/* link the private data structures */
@@ -2188,11 +2212,8 @@ static int atmel_nand_probe(struct platform_device *pdev)
nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
}
- nand_chip->ecc.mode = host->board.ecc_mode;
nand_chip->chip_delay = 40; /* 40us command delay time */
- if (host->board.bus_width_16) /* 16-bit bus width */
- nand_chip->options |= NAND_BUSWIDTH_16;
nand_chip->read_buf = atmel_read_buf;
nand_chip->write_buf = atmel_write_buf;
@@ -2225,11 +2246,6 @@ static int atmel_nand_probe(struct platform_device *pdev)
}
}
- if (host->board.on_flash_bbt || on_flash_bbt) {
- dev_info(&pdev->dev, "Use On Flash BBT\n");
- nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
- }
-
if (!host->board.has_dma)
use_dma = 0;
@@ -2256,6 +2272,18 @@ static int atmel_nand_probe(struct platform_device *pdev)
goto err_scan_ident;
}
+ if (host->board.on_flash_bbt || on_flash_bbt)
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
+ dev_info(&pdev->dev, "Use On Flash BBT\n");
+
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+ res = atmel_of_init_ecc(host, pdev->dev.of_node);
+ if (res)
+ goto err_hw_ecc;
+ }
+
if (nand_chip->ecc.mode == NAND_ECC_HW) {
if (host->has_pmecc)
res = atmel_pmecc_nand_init_params(pdev, host);
@@ -2393,11 +2421,6 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
}
}
- nfc->caps = (const struct atmel_nand_nfc_caps *)
- of_device_get_match_data(&pdev->dev);
- if (!nfc->caps)
- return -ENODEV;
-
nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
@@ -2426,17 +2449,8 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev)
return 0;
}
-static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = {
- .rb_mask = NFC_SR_RB_EDGE0,
-};
-
-static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = {
- .rb_mask = NFC_SR_RB_EDGE3,
-};
-
static const struct of_device_id atmel_nand_nfc_match[] = {
- { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps },
- { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps },
+ { .compatible = "atmel,sama5d3-nfc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 0bbc1fa97..4d5d26221 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -42,8 +42,7 @@
#define NFC_SR_UNDEF (1 << 21)
#define NFC_SR_AWB (1 << 22)
#define NFC_SR_ASE (1 << 23)
-#define NFC_SR_RB_EDGE0 (1 << 24)
-#define NFC_SR_RB_EDGE3 (1 << 27)
+#define NFC_SR_RB_EDGE (1 << 24)
#define ATMEL_HSMC_NFC_IER 0x0c
#define ATMEL_HSMC_NFC_IDR 0x10
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 341ea4904..9bf6d9915 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -459,6 +459,7 @@ static int au1550nd_probe(struct platform_device *pdev)
/* 30 us command delay time */
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.algo = NAND_ECC_HAMMING;
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 7f6b30e61..37da4236a 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -109,28 +109,33 @@ static const unsigned short bfin_nfc_pin_req[] =
0};
#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
-static struct nand_ecclayout bootrom_ecclayout = {
- .eccbytes = 24,
- .eccpos = {
- 0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2,
- 0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2,
- 0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2,
- 0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2,
- 0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2,
- 0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2,
- 0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2,
- 0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2
- },
- .oobfree = {
- { 0x8 * 0 + 3, 5 },
- { 0x8 * 1 + 3, 5 },
- { 0x8 * 2 + 3, 5 },
- { 0x8 * 3 + 3, 5 },
- { 0x8 * 4 + 3, 5 },
- { 0x8 * 5 + 3, 5 },
- { 0x8 * 6 + 3, 5 },
- { 0x8 * 7 + 3, 5 },
- }
+static int bootrom_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ oobregion->offset = section * 8;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int bootrom_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ oobregion->offset = (section * 8) + 3;
+ oobregion->length = 5;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops bootrom_ooblayout_ops = {
+ .ecc = bootrom_ooblayout_ecc,
+ .free = bootrom_ooblayout_free,
};
#endif
@@ -800,7 +805,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
/* setup hardware ECC data struct */
if (hardware_ecc) {
#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
- chip->ecc.layout = &bootrom_ecclayout;
+ mtd_set_ooblayout(mtd, &bootrom_ooblayout_ops);
#endif
chip->read_buf = bf5xx_nand_dma_read_buf;
chip->write_buf = bf5xx_nand_dma_write_buf;
@@ -812,6 +817,7 @@ static int bf5xx_nand_probe(struct platform_device *pdev)
chip->ecc.write_page_raw = bf5xx_nand_write_page_raw;
} else {
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
}
/* scan hardware nand chip and setup mtd info data struct */
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index e05283973..b76ad7c01 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -32,7 +32,6 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
-#include <linux/of_mtd.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -601,7 +600,7 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
{
- if (ctrl->nand_version < 0x0700)
+ if (ctrl->nand_version < 0x0602)
return 24;
return 0;
}
@@ -781,127 +780,183 @@ static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg)
}
/*
- * Returns a nand_ecclayout strucutre for the given layout/configuration.
- * Returns NULL on failure.
+ * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
+ * the layout/configuration.
+ * Returns -ERRCODE on failure.
*/
-static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
- struct brcmnand_host *host)
+static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
struct brcmnand_cfg *cfg = &host->hwcfg;
- int i, j;
- struct nand_ecclayout *layout;
- int req;
- int sectors;
- int sas;
- int idx1, idx2;
-
- layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL);
- if (!layout)
- return NULL;
-
- sectors = cfg->page_size / (512 << cfg->sector_size_1k);
- sas = cfg->spare_area_size << cfg->sector_size_1k;
-
- /* Hamming */
- if (is_hamming_ecc(cfg)) {
- for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
- /* First sector of each page may have BBI */
- if (i == 0) {
- layout->oobfree[idx2].offset = i * sas + 1;
- /* Small-page NAND use byte 6 for BBI */
- if (cfg->page_size == 512)
- layout->oobfree[idx2].offset--;
- layout->oobfree[idx2].length = 5;
- } else {
- layout->oobfree[idx2].offset = i * sas;
- layout->oobfree[idx2].length = 6;
- }
- idx2++;
- layout->eccpos[idx1++] = i * sas + 6;
- layout->eccpos[idx1++] = i * sas + 7;
- layout->eccpos[idx1++] = i * sas + 8;
- layout->oobfree[idx2].offset = i * sas + 9;
- layout->oobfree[idx2].length = 7;
- idx2++;
- /* Leave zero-terminated entry for OOBFREE */
- if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
- idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
- break;
- }
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
- return layout;
- }
+ if (section >= sectors)
+ return -ERANGE;
- /*
- * CONTROLLER_VERSION:
- * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
- * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
- * But we will just be conservative.
- */
- req = DIV_ROUND_UP(ecc_level * 14, 8);
- if (req >= sas) {
- dev_err(&host->pdev->dev,
- "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
- req, sas);
- return NULL;
- }
+ oobregion->offset = (section * sas) + 6;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
- layout->eccbytes = req * sectors;
- for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
- for (j = sas - req; j < sas && idx1 <
- MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++)
- layout->eccpos[idx1] = i * sas + j;
+ if (section >= sectors * 2)
+ return -ERANGE;
+
+ oobregion->offset = (section / 2) * sas;
+
+ if (section & 1) {
+ oobregion->offset += 9;
+ oobregion->length = 7;
+ } else {
+ oobregion->length = 6;
/* First sector of each page may have BBI */
- if (i == 0) {
- if (cfg->page_size == 512 && (sas - req >= 6)) {
- /* Small-page NAND use byte 6 for BBI */
- layout->oobfree[idx2].offset = 0;
- layout->oobfree[idx2].length = 5;
- idx2++;
- if (sas - req > 6) {
- layout->oobfree[idx2].offset = 6;
- layout->oobfree[idx2].length =
- sas - req - 6;
- idx2++;
- }
- } else if (sas > req + 1) {
- layout->oobfree[idx2].offset = i * sas + 1;
- layout->oobfree[idx2].length = sas - req - 1;
- idx2++;
- }
- } else if (sas > req) {
- layout->oobfree[idx2].offset = i * sas;
- layout->oobfree[idx2].length = sas - req;
- idx2++;
+ if (!section) {
+ /*
+ * Small-page NAND use byte 6 for BBI while large-page
+ * NAND use byte 0.
+ */
+ if (cfg->page_size > 512)
+ oobregion->offset++;
+ oobregion->length--;
}
- /* Leave zero-terminated entry for OOBFREE */
- if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
- idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
- break;
}
- return layout;
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
+ .ecc = brcmnand_hamming_ooblayout_ecc,
+ .free = brcmnand_hamming_ooblayout_free,
+};
+
+static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+ if (section >= sectors)
+ return -ERANGE;
+
+ oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+ int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+ if (section >= sectors)
+ return -ERANGE;
+
+ if (sas <= chip->ecc.bytes)
+ return 0;
+
+ oobregion->offset = section * sas;
+ oobregion->length = sas - chip->ecc.bytes;
+
+ if (!section) {
+ oobregion->offset++;
+ oobregion->length--;
+ }
+
+ return 0;
}
-static struct nand_ecclayout *brcmstb_choose_ecc_layout(
- struct brcmnand_host *host)
+static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct brcmnand_host *host = nand_get_controller_data(chip);
+ struct brcmnand_cfg *cfg = &host->hwcfg;
+ int sas = cfg->spare_area_size << cfg->sector_size_1k;
+
+ if (section > 1 || sas - chip->ecc.bytes < 6 ||
+ (section && sas - chip->ecc.bytes == 6))
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = sas - chip->ecc.bytes - 6;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
+ .ecc = brcmnand_bch_ooblayout_ecc,
+ .free = brcmnand_bch_ooblayout_free_lp,
+};
+
+static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
+ .ecc = brcmnand_bch_ooblayout_ecc,
+ .free = brcmnand_bch_ooblayout_free_sp,
+};
+
+static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
{
- struct nand_ecclayout *layout;
struct brcmnand_cfg *p = &host->hwcfg;
+ struct mtd_info *mtd = nand_to_mtd(&host->chip);
+ struct nand_ecc_ctrl *ecc = &host->chip.ecc;
unsigned int ecc_level = p->ecc_level;
+ int sas = p->spare_area_size << p->sector_size_1k;
+ int sectors = p->page_size / (512 << p->sector_size_1k);
if (p->sector_size_1k)
ecc_level <<= 1;
- layout = brcmnand_create_layout(ecc_level, host);
- if (!layout) {
+ if (is_hamming_ecc(p)) {
+ ecc->bytes = 3 * sectors;
+ mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
+ return 0;
+ }
+
+ /*
+ * CONTROLLER_VERSION:
+ * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
+ * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
+ * But we will just be conservative.
+ */
+ ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
+ if (p->page_size == 512)
+ mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
+
+ if (ecc->bytes >= sas) {
dev_err(&host->pdev->dev,
- "no proper ecc_layout for this NAND cfg\n");
- return NULL;
+ "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
+ ecc->bytes, sas);
+ return -EINVAL;
}
- return layout;
+ return 0;
}
static void brcmnand_wp(struct mtd_info *mtd, int wp)
@@ -1870,9 +1925,31 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
cfg->col_adr_bytes = 2;
cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
+ if (chip->ecc.mode != NAND_ECC_HW) {
+ dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
+ chip->ecc.mode);
+ return -EINVAL;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+ if (chip->ecc.strength == 1 && chip->ecc.size == 512)
+ /* Default to Hamming for 1-bit ECC, if unspecified */
+ chip->ecc.algo = NAND_ECC_HAMMING;
+ else
+ /* Otherwise, BCH */
+ chip->ecc.algo = NAND_ECC_BCH;
+ }
+
+ if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
+ chip->ecc.size != 512)) {
+ dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
+ chip->ecc.strength, chip->ecc.size);
+ return -EINVAL;
+ }
+
switch (chip->ecc.size) {
case 512:
- if (chip->ecc.strength == 1) /* Hamming */
+ if (chip->ecc.algo == NAND_ECC_HAMMING)
cfg->ecc_level = 15;
else
cfg->ecc_level = chip->ecc.strength;
@@ -2001,8 +2078,8 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
*/
chip->options |= NAND_USE_BOUNCE_BUFFER;
- if (of_get_nand_on_flash_bbt(dn))
- chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ if (chip->bbt_options & NAND_BBT_USE_FLASH)
+ chip->bbt_options |= NAND_BBT_NO_OOB;
if (brcmnand_setup_dev(host))
return -ENXIO;
@@ -2011,9 +2088,9 @@ static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
/* only use our internal HW threshold */
mtd->bitflip_threshold = 1;
- chip->ecc.layout = brcmstb_choose_ecc_layout(host);
- if (!chip->ecc.layout)
- return -ENXIO;
+ ret = brcmstb_choose_ecc_layout(host);
+ if (ret)
+ return ret;
if (nand_scan_tail(mtd))
return -ENXIO;
@@ -2115,6 +2192,7 @@ static const struct of_device_id brcmnand_of_match[] = {
{ .compatible = "brcm,brcmnand-v5.0" },
{ .compatible = "brcm,brcmnand-v6.0" },
{ .compatible = "brcm,brcmnand-v6.1" },
+ { .compatible = "brcm,brcmnand-v6.2" },
{ .compatible = "brcm,brcmnand-v7.0" },
{ .compatible = "brcm,brcmnand-v7.1" },
{},
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index e553aff68..0b0c93702 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -459,10 +459,37 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
return max_bitflips;
}
-static struct nand_ecclayout cafe_oobinfo_2048 = {
- .eccbytes = 14,
- .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
- .oobfree = {{14, 50}}
+static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int cafe_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total;
+ oobregion->length = mtd->oobsize - chip->ecc.total;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops cafe_ooblayout_ops = {
+ .ecc = cafe_ooblayout_ecc,
+ .free = cafe_ooblayout_free,
};
/* Ick. The BBT code really ought to be able to work this bit out
@@ -494,12 +521,6 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
.pattern = cafe_mirror_pattern_2048
};
-static struct nand_ecclayout cafe_oobinfo_512 = {
- .eccbytes = 14,
- .eccpos = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13},
- .oobfree = {{14, 2}}
-};
-
static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
| NAND_BBT_2BIT | NAND_BBT_VERSION,
@@ -743,12 +764,11 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe->ctl2 |= 1<<29; /* 2KiB page size */
/* Set up ECC according to the type of chip we found */
+ mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
if (mtd->writesize == 2048) {
- cafe->nand.ecc.layout = &cafe_oobinfo_2048;
cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
} else if (mtd->writesize == 512) {
- cafe->nand.ecc.layout = &cafe_oobinfo_512;
cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
} else {
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index 6f97ebba5..49133783c 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -187,6 +187,7 @@ static int __init cmx270_init(void)
/* 15 us command delay time */
this->chip_delay = 20;
this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.algo = NAND_ECC_HAMMING;
/* read/write functions */
this->read_byte = cmx270_read_byte;
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 8cb821b66..cc07ba0f0 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -34,7 +34,6 @@
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of.h>
-#include <linux/of_mtd.h>
#include <linux/platform_data/mtd-davinci.h>
#include <linux/platform_data/mtd-davinci-aemif.h>
@@ -54,7 +53,6 @@
*/
struct davinci_nand_info {
struct nand_chip chip;
- struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
@@ -480,63 +478,46 @@ static int nand_davinci_dev_ready(struct mtd_info *mtd)
* ten ECC bytes plus the manufacturer's bad block marker byte, and
* and not overlapping the default BBT markers.
*/
-static struct nand_ecclayout hwecc4_small = {
- .eccbytes = 10,
- .eccpos = { 0, 1, 2, 3, 4,
- /* offset 5 holds the badblock marker */
- 6, 7,
- 13, 14, 15, },
- .oobfree = {
- {.offset = 8, .length = 5, },
- {.offset = 16, },
- },
-};
+static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 2)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else if (section == 1) {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ } else {
+ oobregion->offset = 13;
+ oobregion->length = 3;
+ }
-/* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash,
- * storing ten ECC bytes plus the manufacturer's bad block marker byte,
- * and not overlapping the default BBT markers.
- */
-static struct nand_ecclayout hwecc4_2048 = {
- .eccbytes = 40,
- .eccpos = {
- /* at the end of spare sector */
- 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
- 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- },
- .oobfree = {
- /* 2 bytes at offset 0 hold manufacturer badblock markers */
- {.offset = 2, .length = 22, },
- /* 5 bytes at offset 8 hold BBT markers */
- /* 8 bytes at offset 16 hold JFFS2 clean markers */
- },
-};
+ return 0;
+}
-/*
- * An ECC layout for using 4-bit ECC with large-page (4096bytes) flash,
- * storing ten ECC bytes plus the manufacturer's bad block marker byte,
- * and not overlapping the default BBT markers.
- */
-static struct nand_ecclayout hwecc4_4096 = {
- .eccbytes = 80,
- .eccpos = {
- /* at the end of spare sector */
- 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
- 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
- 78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
- 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
- 118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
- },
- .oobfree = {
- /* 2 bytes at offset 0 hold manufacturer badblock markers */
- {.offset = 2, .length = 46, },
- /* 5 bytes at offset 8 hold BBT markers */
- /* 8 bytes at offset 16 hold JFFS2 clean markers */
- },
+static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 8;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 16;
+ oobregion->length = mtd->oobsize - 16;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
+ .ecc = hwecc4_ooblayout_small_ecc,
+ .free = hwecc4_ooblayout_small_free,
};
#if defined(CONFIG_OF)
@@ -577,8 +558,6 @@ static struct davinci_nand_pdata
"ti,davinci-mask-chipsel", &prop))
pdata->mask_chipsel = prop;
if (!of_property_read_string(pdev->dev.of_node,
- "nand-ecc-mode", &mode) ||
- !of_property_read_string(pdev->dev.of_node,
"ti,davinci-ecc-mode", &mode)) {
if (!strncmp("none", mode, 4))
pdata->ecc_mode = NAND_ECC_NONE;
@@ -591,14 +570,11 @@ static struct davinci_nand_pdata
"ti,davinci-ecc-bits", &prop))
pdata->ecc_bits = prop;
- prop = of_get_nand_bus_width(pdev->dev.of_node);
- if (0 < prop || !of_property_read_u32(pdev->dev.of_node,
- "ti,davinci-nand-buswidth", &prop))
- if (prop == 16)
- pdata->options |= NAND_BUSWIDTH_16;
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "ti,davinci-nand-buswidth", &prop) && prop == 16)
+ pdata->options |= NAND_BUSWIDTH_16;
+
if (of_property_read_bool(pdev->dev.of_node,
- "nand-on-flash-bbt") ||
- of_property_read_bool(pdev->dev.of_node,
"ti,davinci-nand-use-bbt"))
pdata->bbt_options = NAND_BBT_USE_FLASH;
@@ -628,7 +604,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
void __iomem *base;
int ret;
uint32_t val;
- nand_ecc_modes_t ecc_mode;
struct mtd_info *mtd;
pdata = nand_davinci_get_pdata(pdev);
@@ -712,13 +687,53 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.write_buf = nand_davinci_write_buf;
/* Use board-specific ECC config */
- ecc_mode = pdata->ecc_mode;
+ info->chip.ecc.mode = pdata->ecc_mode;
ret = -EINVAL;
- switch (ecc_mode) {
+
+ info->clk = devm_clk_get(&pdev->dev, "aemif");
+ if (IS_ERR(info->clk)) {
+ ret = PTR_ERR(info->clk);
+ dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
+ ret);
+ goto err_clk_enable;
+ }
+
+ spin_lock_irq(&davinci_nand_lock);
+
+ /* put CSxNAND into NAND mode */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val |= BIT(info->core_chipsel);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ spin_unlock_irq(&davinci_nand_lock);
+
+ /* Scan to find existence of the device(s) */
+ ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
+ if (ret < 0) {
+ dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
+ goto err;
+ }
+
+ switch (info->chip.ecc.mode) {
case NAND_ECC_NONE:
+ pdata->ecc_bits = 0;
+ break;
case NAND_ECC_SOFT:
pdata->ecc_bits = 0;
+ /*
+ * This driver expects Hamming based ECC when ecc_mode is set
+ * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
+ * avoid adding an extra ->ecc_algo field to
+ * davinci_nand_pdata.
+ */
+ info->chip.ecc.algo = NAND_ECC_HAMMING;
break;
case NAND_ECC_HW:
if (pdata->ecc_bits == 4) {
@@ -754,37 +769,6 @@ static int nand_davinci_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
- info->chip.ecc.mode = ecc_mode;
-
- info->clk = devm_clk_get(&pdev->dev, "aemif");
- if (IS_ERR(info->clk)) {
- ret = PTR_ERR(info->clk);
- dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(info->clk);
- if (ret < 0) {
- dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
- ret);
- goto err_clk_enable;
- }
-
- spin_lock_irq(&davinci_nand_lock);
-
- /* put CSxNAND into NAND mode */
- val = davinci_nand_readl(info, NANDFCR_OFFSET);
- val |= BIT(info->core_chipsel);
- davinci_nand_writel(info, NANDFCR_OFFSET, val);
-
- spin_unlock_irq(&davinci_nand_lock);
-
- /* Scan to find existence of the device(s) */
- ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
- if (ret < 0) {
- dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
- goto err;
- }
/* Update ECC layout if needed ... for 1-bit HW ECC, the default
* is OK, but it allocates 6 bytes when only 3 are needed (for
@@ -805,26 +789,14 @@ static int nand_davinci_probe(struct platform_device *pdev)
* table marker fits in the free bytes.
*/
if (chunks == 1) {
- info->ecclayout = hwecc4_small;
- info->ecclayout.oobfree[1].length = mtd->oobsize - 16;
- goto syndrome_done;
- }
- if (chunks == 4) {
- info->ecclayout = hwecc4_2048;
- info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
- goto syndrome_done;
- }
- if (chunks == 8) {
- info->ecclayout = hwecc4_4096;
+ mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
+ } else if (chunks == 4 || chunks == 8) {
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
- goto syndrome_done;
+ } else {
+ ret = -EIO;
+ goto err;
}
-
- ret = -EIO;
- goto err;
-
-syndrome_done:
- info->chip.ecc.layout = &info->ecclayout;
}
ret = nand_scan_tail(mtd);
@@ -850,7 +822,7 @@ err:
err_clk_enable:
spin_lock_irq(&davinci_nand_lock);
- if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
return ret;
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 30bf5f690..0476ae877 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1374,13 +1374,41 @@ static void denali_hw_init(struct denali_nand_info *denali)
* correction
*/
#define ECC_8BITS 14
-static struct nand_ecclayout nand_8bit_oob = {
- .eccbytes = 14,
-};
-
#define ECC_15BITS 26
-static struct nand_ecclayout nand_15bit_oob = {
- .eccbytes = 26,
+
+static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = denali->bbtskipbytes;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int denali_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct denali_nand_info *denali = mtd_to_denali(mtd);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
+ .ecc = denali_ooblayout_ecc,
+ .free = denali_ooblayout_free,
};
static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
@@ -1561,7 +1589,6 @@ int denali_init(struct denali_nand_info *denali)
ECC_SECTOR_SIZE)))) {
/* if MLC OOB size is large enough, use 15bit ECC*/
denali->nand.ecc.strength = 15;
- denali->nand.ecc.layout = &nand_15bit_oob;
denali->nand.ecc.bytes = ECC_15BITS;
iowrite32(15, denali->flash_reg + ECC_CORRECTION);
} else if (mtd->oobsize < (denali->bbtskipbytes +
@@ -1571,20 +1598,13 @@ int denali_init(struct denali_nand_info *denali)
goto failed_req_irq;
} else {
denali->nand.ecc.strength = 8;
- denali->nand.ecc.layout = &nand_8bit_oob;
denali->nand.ecc.bytes = ECC_8BITS;
iowrite32(8, denali->flash_reg + ECC_CORRECTION);
}
+ mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
denali->nand.ecc.bytes *= denali->devnum;
denali->nand.ecc.strength *= denali->devnum;
- denali->nand.ecc.layout->eccbytes *=
- mtd->writesize / ECC_SECTOR_SIZE;
- denali->nand.ecc.layout->oobfree[0].offset =
- denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
- denali->nand.ecc.layout->oobfree[0].length =
- mtd->oobsize - denali->nand.ecc.layout->eccbytes -
- denali->bbtskipbytes;
/*
* Let driver know the total blocks number and how many blocks
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 547c10029..a023ab9e9 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -950,20 +950,50 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
//u_char mydatabuf[528];
-/* The strange out-of-order .oobfree list below is a (possibly unneeded)
- * attempt to retain compatibility. It used to read:
- * .oobfree = { {8, 8} }
- * Since that leaves two bytes unusable, it was changed. But the following
- * scheme might affect existing jffs2 installs by moving the cleanmarker:
- * .oobfree = { {6, 10} }
- * jffs2 seems to handle the above gracefully, but the current scheme seems
- * safer. The only problem with it is that any code that parses oobfree must
- * be able to handle out-of-order segments.
- */
-static struct nand_ecclayout doc200x_oobinfo = {
- .eccbytes = 6,
- .eccpos = {0, 1, 2, 3, 4, 5},
- .oobfree = {{8, 8}, {6, 2}}
+static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static int doc200x_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ /*
+ * The strange out-of-order free bytes definition is a (possibly
+ * unneeded) attempt to retain compatibility. It used to read:
+ * .oobfree = { {8, 8} }
+ * Since that leaves two bytes unusable, it was changed. But the
+ * following scheme might affect existing jffs2 installs by moving the
+ * cleanmarker:
+ * .oobfree = { {6, 10} }
+ * jffs2 seems to handle the above gracefully, but the current scheme
+ * seems safer. The only problem with it is that any code retrieving
+ * free bytes position must be able to handle out-of-order segments.
+ */
+ if (!section) {
+ oobregion->offset = 8;
+ oobregion->length = 8;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = {
+ .ecc = doc200x_ooblayout_ecc,
+ .free = doc200x_ooblayout_free,
};
/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
@@ -1537,6 +1567,7 @@ static int __init doc_probe(unsigned long physadr)
nand->bbt_md = nand->bbt_td + 1;
mtd->owner = THIS_MODULE;
+ mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
nand_set_controller_data(nand, doc);
nand->select_chip = doc200x_select_chip;
@@ -1548,7 +1579,6 @@ static int __init doc_probe(unsigned long physadr)
nand->ecc.calculate = doc200x_calculate_ecc;
nand->ecc.correct = doc200x_correct_data;
- nand->ecc.layout = &doc200x_oobinfo;
nand->ecc.mode = NAND_ECC_HW_SYNDROME;
nand->ecc.size = 512;
nand->ecc.bytes = 6;
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index d86a60e1b..473169980 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -222,10 +222,33 @@ struct docg4_priv {
* Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
* Byte 15 (the last) is used by the driver as a "page written" flag.
*/
-static struct nand_ecclayout docg4_oobinfo = {
- .eccbytes = 9,
- .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
- .oobfree = { {.offset = 2, .length = 5} }
+static int docg4_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 7;
+ oobregion->length = 9;
+
+ return 0;
+}
+
+static int docg4_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = 5;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops docg4_ooblayout_ops = {
+ .ecc = docg4_ooblayout_ecc,
+ .free = docg4_ooblayout_free,
};
/*
@@ -1209,6 +1232,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
mtd->writesize = DOCG4_PAGE_SIZE;
mtd->erasesize = DOCG4_BLOCK_SIZE;
mtd->oobsize = DOCG4_OOB_SIZE;
+ mtd_set_ooblayout(mtd, &docg4_ooblayout_ops);
nand->chipsize = DOCG4_CHIP_SIZE;
nand->chip_shift = DOCG4_CHIP_SHIFT;
nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
@@ -1217,7 +1241,6 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->pagemask = 0x3ffff;
nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
nand->badblockbits = 8;
- nand->ecc.layout = &docg4_oobinfo;
nand->ecc.mode = NAND_ECC_HW_SYNDROME;
nand->ecc.size = DOCG4_PAGE_SIZE;
nand->ecc.prepad = 8;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 059d5f7ec..60a88f24c 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -79,32 +79,53 @@ struct fsl_elbc_fcm_ctrl {
/* These map to the positions used by the FCM hardware ECC generator */
-/* Small Page FLASH with FMR[ECCM] = 0 */
-static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
- .eccbytes = 3,
- .eccpos = {6, 7, 8},
- .oobfree = { {0, 5}, {9, 7} },
-};
+static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-/* Small Page FLASH with FMR[ECCM] = 1 */
-static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
- .eccbytes = 3,
- .eccpos = {8, 9, 10},
- .oobfree = { {0, 5}, {6, 2}, {11, 5} },
-};
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
-/* Large Page FLASH with FMR[ECCM] = 0 */
-static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
- .eccbytes = 12,
- .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
- .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
-};
+ oobregion->offset = (16 * section) + 6;
+ if (priv->fmr & FMR_ECCM)
+ oobregion->offset += 2;
-/* Large Page FLASH with FMR[ECCM] = 1 */
-static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
- .eccbytes = 12,
- .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
- .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+
+ if (section > chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ if (mtd->writesize > 512)
+ oobregion->offset++;
+ oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5;
+ } else {
+ oobregion->offset = (16 * section) -
+ ((priv->fmr & FMR_ECCM) ? 5 : 7);
+ if (section < chip->ecc.steps)
+ oobregion->length = 13;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = {
+ .ecc = fsl_elbc_ooblayout_ecc,
+ .free = fsl_elbc_ooblayout_free,
};
/*
@@ -657,8 +678,8 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
chip->ecc.bytes);
dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
chip->ecc.total);
- dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
- chip->ecc.layout);
+ dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
+ mtd->ooblayout);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
@@ -675,14 +696,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
} else if (mtd->writesize == 2048) {
priv->page_size = 1;
setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
- /* adjust ecc setup if needed */
- if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
- BR_DECC_CHK_GEN) {
- chip->ecc.size = 512;
- chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
- &fsl_elbc_oob_lp_eccm1 :
- &fsl_elbc_oob_lp_eccm0;
- }
} else {
dev_err(priv->dev,
"fsl_elbc_init: page size %d is not supported\n",
@@ -780,15 +793,14 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
BR_DECC_CHK_GEN) {
chip->ecc.mode = NAND_ECC_HW;
- /* put in small page settings and adjust later if needed */
- chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
- &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
+ mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
chip->ecc.size = 512;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
} else {
/* otherwise fall back to default software ECC */
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
}
return 0;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 43f5a3a48..4e9e5fd8f 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -67,136 +67,6 @@ struct fsl_ifc_nand_ctrl {
static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
-/* 512-byte page with 4-bit ECC, 8-bit */
-static struct nand_ecclayout oob_512_8bit_ecc4 = {
- .eccbytes = 8,
- .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
- .oobfree = { {0, 5}, {6, 2} },
-};
-
-/* 512-byte page with 4-bit ECC, 16-bit */
-static struct nand_ecclayout oob_512_16bit_ecc4 = {
- .eccbytes = 8,
- .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
- .oobfree = { {2, 6}, },
-};
-
-/* 2048-byte page size with 4-bit ECC */
-static struct nand_ecclayout oob_2048_ecc4 = {
- .eccbytes = 32,
- .eccpos = {
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- },
- .oobfree = { {2, 6}, {40, 24} },
-};
-
-/* 4096-byte page size with 4-bit ECC */
-static struct nand_ecclayout oob_4096_ecc4 = {
- .eccbytes = 64,
- .eccpos = {
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71,
- },
- .oobfree = { {2, 6}, {72, 56} },
-};
-
-/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */
-static struct nand_ecclayout oob_4096_ecc8 = {
- .eccbytes = 128,
- .eccpos = {
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71,
- 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 120, 121, 122, 123, 124, 125, 126, 127,
- 128, 129, 130, 131, 132, 133, 134, 135,
- },
- .oobfree = { {2, 6}, {136, 82} },
-};
-
-/* 8192-byte page size with 4-bit ECC */
-static struct nand_ecclayout oob_8192_ecc4 = {
- .eccbytes = 128,
- .eccpos = {
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71,
- 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 120, 121, 122, 123, 124, 125, 126, 127,
- 128, 129, 130, 131, 132, 133, 134, 135,
- },
- .oobfree = { {2, 6}, {136, 208} },
-};
-
-/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
-static struct nand_ecclayout oob_8192_ecc8 = {
- .eccbytes = 256,
- .eccpos = {
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71,
- 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 120, 121, 122, 123, 124, 125, 126, 127,
- 128, 129, 130, 131, 132, 133, 134, 135,
- 136, 137, 138, 139, 140, 141, 142, 143,
- 144, 145, 146, 147, 148, 149, 150, 151,
- 152, 153, 154, 155, 156, 157, 158, 159,
- 160, 161, 162, 163, 164, 165, 166, 167,
- 168, 169, 170, 171, 172, 173, 174, 175,
- 176, 177, 178, 179, 180, 181, 182, 183,
- 184, 185, 186, 187, 188, 189, 190, 191,
- 192, 193, 194, 195, 196, 197, 198, 199,
- 200, 201, 202, 203, 204, 205, 206, 207,
- 208, 209, 210, 211, 212, 213, 214, 215,
- 216, 217, 218, 219, 220, 221, 222, 223,
- 224, 225, 226, 227, 228, 229, 230, 231,
- 232, 233, 234, 235, 236, 237, 238, 239,
- 240, 241, 242, 243, 244, 245, 246, 247,
- 248, 249, 250, 251, 252, 253, 254, 255,
- 256, 257, 258, 259, 260, 261, 262, 263,
- },
- .oobfree = { {2, 6}, {264, 80} },
-};
-
/*
* Generic flash bbt descriptors
*/
@@ -223,6 +93,57 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = mirror_pattern,
};
+static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 8;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section > 1)
+ return -ERANGE;
+
+ if (mtd->writesize == 512 &&
+ !(chip->options & NAND_BUSWIDTH_16)) {
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ }
+
+ return 0;
+ }
+
+ if (!section) {
+ oobregion->offset = 2;
+ oobregion->length = 6;
+ } else {
+ oobregion->offset = chip->ecc.total + 8;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = {
+ .ecc = fsl_ifc_ooblayout_ecc,
+ .free = fsl_ifc_ooblayout_free,
+};
+
/*
* Set up the IFC hardware block and page address fields, and the ifc nand
* structure addr field to point to the correct IFC buffer in memory
@@ -232,7 +153,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
int buf_num;
ifc_nand_ctrl->page = page_addr;
@@ -257,18 +178,22 @@ static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
u32 __iomem *mainarea = (u32 __iomem *)addr;
u8 __iomem *oob = addr + mtd->writesize;
- int i;
+ struct mtd_oob_region oobregion = { };
+ int i, section = 0;
for (i = 0; i < mtd->writesize / 4; i++) {
if (__raw_readl(&mainarea[i]) != 0xffffffff)
return 0;
}
- for (i = 0; i < chip->ecc.layout->eccbytes; i++) {
- int pos = chip->ecc.layout->eccpos[i];
+ mtd_ooblayout_ecc(mtd, section++, &oobregion);
+ while (oobregion.length) {
+ for (i = 0; i < oobregion.length; i++) {
+ if (__raw_readb(&oob[oobregion.offset + i]) != 0xff)
+ return 0;
+ }
- if (__raw_readb(&oob[pos]) != 0xff)
- return 0;
+ mtd_ooblayout_ecc(mtd, section++, &oobregion);
}
return 1;
@@ -295,7 +220,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
u32 eccstat[4];
int i;
@@ -371,7 +296,7 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
{
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
if (mtd->writesize > 512) {
@@ -411,7 +336,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
struct nand_chip *chip = mtd_to_nand(mtd);
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
/* clear the read buffer */
ifc_nand_ctrl->read_bytes = 0;
@@ -723,7 +648,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
u32 nand_fsr;
/* Use READ_STATUS command, but wait for the device to be ready */
@@ -808,8 +733,8 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
chip->ecc.bytes);
dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
chip->ecc.total);
- dev_dbg(priv->dev, "%s: nand->ecc.layout = %p\n", __func__,
- chip->ecc.layout);
+ dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__,
+ mtd->ooblayout);
dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
@@ -825,39 +750,42 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
uint32_t cs = priv->bank;
/* Save CSOR and CSOR_ext */
- csor = ifc_in32(&ifc->csor_cs[cs].csor);
- csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext);
+ csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
+ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
/* chage PageSize 8K and SpareSize 1K*/
csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
- ifc_out32(csor_8k, &ifc->csor_cs[cs].csor);
- ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext);
+ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
+ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
/* READID */
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
- &ifc->ifc_nand.nand_fir0);
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc_runtime->ifc_nand.nand_fir0);
ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
- &ifc->ifc_nand.nand_fcr0);
- ifc_out32(0x0, &ifc->ifc_nand.row3);
+ &ifc_runtime->ifc_nand.nand_fcr0);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
- ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
/* Program ROW0/COL0 */
- ifc_out32(0x0, &ifc->ifc_nand.row0);
- ifc_out32(0x0, &ifc->ifc_nand.col0);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
/* set the chip select for NAND Transaction */
- ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+ ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
+ &ifc_runtime->ifc_nand.nand_csel);
/* start read seq */
- ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
+ &ifc_runtime->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -867,17 +795,17 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
/* Restore CSOR and CSOR_ext */
- ifc_out32(csor, &ifc->csor_cs[cs].csor);
- ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext);
+ ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
+ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
}
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
{
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
- struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
struct nand_chip *chip = &priv->chip;
struct mtd_info *mtd = nand_to_mtd(&priv->chip);
- struct nand_ecclayout *layout;
u32 csor;
/* Fill in fsl_ifc_mtd structure */
@@ -886,7 +814,8 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* fill in nand_chip structure */
/* set up function call table */
- if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
+ & CSPR_PORT_SIZE_16)
chip->read_byte = fsl_ifc_read_byte16;
else
chip->read_byte = fsl_ifc_read_byte;
@@ -900,13 +829,14 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
- ifc_out32(0x0, &ifc->ifc_nand.ncfgr);
+ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->options = NAND_NO_SUBPAGE_WRITE;
- if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
+ & CSPR_PORT_SIZE_16) {
chip->read_byte = fsl_ifc_read_byte16;
chip->options |= NAND_BUSWIDTH_16;
} else {
@@ -919,20 +849,11 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.read_page = fsl_ifc_read_page;
chip->ecc.write_page = fsl_ifc_write_page;
- csor = ifc_in32(&ifc->csor_cs[priv->bank].csor);
-
- /* Hardware generates ECC per 512 Bytes */
- chip->ecc.size = 512;
- chip->ecc.bytes = 8;
- chip->ecc.strength = 4;
+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
switch (csor & CSOR_NAND_PGS_MASK) {
case CSOR_NAND_PGS_512:
- if (chip->options & NAND_BUSWIDTH_16) {
- layout = &oob_512_16bit_ecc4;
- } else {
- layout = &oob_512_8bit_ecc4;
-
+ if (!(chip->options & NAND_BUSWIDTH_16)) {
/* Avoid conflict with bad block marker */
bbt_main_descr.offs = 0;
bbt_mirror_descr.offs = 0;
@@ -942,35 +863,16 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
break;
case CSOR_NAND_PGS_2K:
- layout = &oob_2048_ecc4;
priv->bufnum_mask = 3;
break;
case CSOR_NAND_PGS_4K:
- if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
- CSOR_NAND_ECC_MODE_4) {
- layout = &oob_4096_ecc4;
- } else {
- layout = &oob_4096_ecc8;
- chip->ecc.bytes = 16;
- chip->ecc.strength = 8;
- }
-
priv->bufnum_mask = 1;
break;
case CSOR_NAND_PGS_8K:
- if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
- CSOR_NAND_ECC_MODE_4) {
- layout = &oob_8192_ecc4;
- } else {
- layout = &oob_8192_ecc8;
- chip->ecc.bytes = 16;
- chip->ecc.strength = 8;
- }
-
priv->bufnum_mask = 0;
- break;
+ break;
default:
dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
@@ -980,9 +882,20 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
if (csor & CSOR_NAND_ECC_DEC_EN) {
chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.layout = layout;
+ mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
+
+ /* Hardware generates ECC per 512 Bytes */
+ chip->ecc.size = 512;
+ if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
+ chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
+ } else {
+ chip->ecc.bytes = 16;
+ chip->ecc.strength = 8;
+ }
} else {
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
}
if (ctrl->version == FSL_IFC_VERSION_1_1_0)
@@ -1007,10 +920,10 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
return 0;
}
-static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
+static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
phys_addr_t addr)
{
- u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr);
+ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
if (!(cspr & CSPR_V))
return 0;
@@ -1024,7 +937,7 @@ static DEFINE_MUTEX(fsl_ifc_nand_mutex);
static int fsl_ifc_nand_probe(struct platform_device *dev)
{
- struct fsl_ifc_regs __iomem *ifc;
+ struct fsl_ifc_runtime __iomem *ifc;
struct fsl_ifc_mtd *priv;
struct resource res;
static const char *part_probe_types[]
@@ -1034,9 +947,9 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
struct device_node *node = dev->dev.of_node;
struct mtd_info *mtd;
- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs)
+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
return -ENODEV;
- ifc = fsl_ifc_ctrl_dev->regs;
+ ifc = fsl_ifc_ctrl_dev->rregs;
/* get, allocate and map the memory resource */
ret = of_address_to_resource(node, 0, &res);
@@ -1047,7 +960,7 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
/* find which chip select it is connected to */
for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
- if (match_bank(ifc, bank, res.start))
+ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
break;
}
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index cafd12de7..d85fa2555 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -170,6 +170,7 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
fun->chip.read_buf = fun_read_buf;
fun->chip.write_buf = fun_write_buf;
fun->chip.ecc.mode = NAND_ECC_SOFT;
+ fun->chip.ecc.algo = NAND_ECC_HAMMING;
if (fun->mchip_count > 1)
fun->chip.select_chip = fun_select_chip;
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 1bdcd4fa2..d4f454a4b 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -39,210 +39,41 @@
#include <linux/amba/bus.h>
#include <mtd/mtd-abi.h>
-static struct nand_ecclayout fsmc_ecc1_128_layout = {
- .eccbytes = 24,
- .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52,
- 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116},
- .oobfree = {
- {.offset = 8, .length = 8},
- {.offset = 24, .length = 8},
- {.offset = 40, .length = 8},
- {.offset = 56, .length = 8},
- {.offset = 72, .length = 8},
- {.offset = 88, .length = 8},
- {.offset = 104, .length = 8},
- {.offset = 120, .length = 8}
- }
-};
+static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
-static struct nand_ecclayout fsmc_ecc1_64_layout = {
- .eccbytes = 12,
- .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52},
- .oobfree = {
- {.offset = 8, .length = 8},
- {.offset = 24, .length = 8},
- {.offset = 40, .length = 8},
- {.offset = 56, .length = 8},
- }
-};
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
-static struct nand_ecclayout fsmc_ecc1_16_layout = {
- .eccbytes = 3,
- .eccpos = {2, 3, 4},
- .oobfree = {
- {.offset = 8, .length = 8},
- }
-};
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 3;
-/*
- * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes
- * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46
- * bytes are free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_256_layout = {
- .eccbytes = 208,
- .eccpos = { 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14,
- 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30,
- 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46,
- 50, 51, 52, 53, 54, 55, 56,
- 57, 58, 59, 60, 61, 62,
- 66, 67, 68, 69, 70, 71, 72,
- 73, 74, 75, 76, 77, 78,
- 82, 83, 84, 85, 86, 87, 88,
- 89, 90, 91, 92, 93, 94,
- 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110,
- 114, 115, 116, 117, 118, 119, 120,
- 121, 122, 123, 124, 125, 126,
- 130, 131, 132, 133, 134, 135, 136,
- 137, 138, 139, 140, 141, 142,
- 146, 147, 148, 149, 150, 151, 152,
- 153, 154, 155, 156, 157, 158,
- 162, 163, 164, 165, 166, 167, 168,
- 169, 170, 171, 172, 173, 174,
- 178, 179, 180, 181, 182, 183, 184,
- 185, 186, 187, 188, 189, 190,
- 194, 195, 196, 197, 198, 199, 200,
- 201, 202, 203, 204, 205, 206,
- 210, 211, 212, 213, 214, 215, 216,
- 217, 218, 219, 220, 221, 222,
- 226, 227, 228, 229, 230, 231, 232,
- 233, 234, 235, 236, 237, 238,
- 242, 243, 244, 245, 246, 247, 248,
- 249, 250, 251, 252, 253, 254
- },
- .oobfree = {
- {.offset = 15, .length = 3},
- {.offset = 31, .length = 3},
- {.offset = 47, .length = 3},
- {.offset = 63, .length = 3},
- {.offset = 79, .length = 3},
- {.offset = 95, .length = 3},
- {.offset = 111, .length = 3},
- {.offset = 127, .length = 3},
- {.offset = 143, .length = 3},
- {.offset = 159, .length = 3},
- {.offset = 175, .length = 3},
- {.offset = 191, .length = 3},
- {.offset = 207, .length = 3},
- {.offset = 223, .length = 3},
- {.offset = 239, .length = 3},
- {.offset = 255, .length = 1}
- }
-};
+ return 0;
+}
-/*
- * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes
- * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118
- * bytes are free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_224_layout = {
- .eccbytes = 104,
- .eccpos = { 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14,
- 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30,
- 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46,
- 50, 51, 52, 53, 54, 55, 56,
- 57, 58, 59, 60, 61, 62,
- 66, 67, 68, 69, 70, 71, 72,
- 73, 74, 75, 76, 77, 78,
- 82, 83, 84, 85, 86, 87, 88,
- 89, 90, 91, 92, 93, 94,
- 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110,
- 114, 115, 116, 117, 118, 119, 120,
- 121, 122, 123, 124, 125, 126
- },
- .oobfree = {
- {.offset = 15, .length = 3},
- {.offset = 31, .length = 3},
- {.offset = 47, .length = 3},
- {.offset = 63, .length = 3},
- {.offset = 79, .length = 3},
- {.offset = 95, .length = 3},
- {.offset = 111, .length = 3},
- {.offset = 127, .length = 97}
- }
-};
+static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
-/*
- * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes
- * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22
- * bytes are free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_128_layout = {
- .eccbytes = 104,
- .eccpos = { 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14,
- 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30,
- 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46,
- 50, 51, 52, 53, 54, 55, 56,
- 57, 58, 59, 60, 61, 62,
- 66, 67, 68, 69, 70, 71, 72,
- 73, 74, 75, 76, 77, 78,
- 82, 83, 84, 85, 86, 87, 88,
- 89, 90, 91, 92, 93, 94,
- 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110,
- 114, 115, 116, 117, 118, 119, 120,
- 121, 122, 123, 124, 125, 126
- },
- .oobfree = {
- {.offset = 15, .length = 3},
- {.offset = 31, .length = 3},
- {.offset = 47, .length = 3},
- {.offset = 63, .length = 3},
- {.offset = 79, .length = 3},
- {.offset = 95, .length = 3},
- {.offset = 111, .length = 3},
- {.offset = 127, .length = 1}
- }
-};
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
-/*
- * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of
- * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10
- * bytes are free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_64_layout = {
- .eccbytes = 52,
- .eccpos = { 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14,
- 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30,
- 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46,
- 50, 51, 52, 53, 54, 55, 56,
- 57, 58, 59, 60, 61, 62,
- },
- .oobfree = {
- {.offset = 15, .length = 3},
- {.offset = 31, .length = 3},
- {.offset = 47, .length = 3},
- {.offset = 63, .length = 1},
- }
-};
+ oobregion->offset = (section * 16) + 8;
-/*
- * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of
- * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One
- * byte is free for use.
- */
-static struct nand_ecclayout fsmc_ecc4_16_layout = {
- .eccbytes = 13,
- .eccpos = { 0, 1, 2, 3, 6, 7, 8,
- 9, 10, 11, 12, 13, 14
- },
- .oobfree = {
- {.offset = 15, .length = 1},
- }
+ if (section < chip->ecc.steps - 1)
+ oobregion->length = 8;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
+ .ecc = fsmc_ecc1_ooblayout_ecc,
+ .free = fsmc_ecc1_ooblayout_free,
};
/*
@@ -250,28 +81,46 @@ static struct nand_ecclayout fsmc_ecc4_16_layout = {
* There are 13 bytes of ecc for every 512 byte block and it has to be read
* consecutively and immediately after the 512 byte data block for hardware to
* generate the error bit offsets in 512 byte data.
- * Managing the ecc bytes in the following way makes it easier for software to
- * read ecc bytes consecutive to data bytes. This way is similar to
- * oobfree structure maintained already in generic nand driver
*/
-static struct fsmc_eccplace fsmc_ecc4_lp_place = {
- .eccplace = {
- {.offset = 2, .length = 13},
- {.offset = 18, .length = 13},
- {.offset = 34, .length = 13},
- {.offset = 50, .length = 13},
- {.offset = 66, .length = 13},
- {.offset = 82, .length = 13},
- {.offset = 98, .length = 13},
- {.offset = 114, .length = 13}
- }
-};
+static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
-static struct fsmc_eccplace fsmc_ecc4_sp_place = {
- .eccplace = {
- {.offset = 0, .length = 4},
- {.offset = 6, .length = 9}
- }
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->length = chip->ecc.bytes;
+
+ if (!section && mtd->writesize <= 512)
+ oobregion->offset = 0;
+ else
+ oobregion->offset = (section * 16) + 2;
+
+ return 0;
+}
+
+static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 15;
+
+ if (section < chip->ecc.steps - 1)
+ oobregion->length = 3;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
+ .ecc = fsmc_ecc4_ooblayout_ecc,
+ .free = fsmc_ecc4_ooblayout_free,
};
/**
@@ -283,7 +132,6 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = {
* @partitions: Partition info for a NAND Flash.
* @nr_partitions: Total number of partition of a NAND flash.
*
- * @ecc_place: ECC placing locations in oobfree type format.
* @bank: Bank number for probed device.
* @clk: Clock structure for FSMC.
*
@@ -303,7 +151,6 @@ struct fsmc_nand_data {
struct mtd_partition *partitions;
unsigned int nr_partitions;
- struct fsmc_eccplace *ecc_place;
unsigned int bank;
struct device *dev;
enum access_mode mode;
@@ -710,8 +557,6 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
- struct fsmc_eccplace *ecc_place = host->ecc_place;
int i, j, s, stat, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
@@ -734,9 +579,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, p, eccsize);
for (j = 0; j < eccbytes;) {
- off = ecc_place->eccplace[group].offset;
- len = ecc_place->eccplace[group].length;
- group++;
+ struct mtd_oob_region oobregion;
+ int ret;
+
+ ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
+ if (ret)
+ return ret;
+
+ off = oobregion.offset;
+ len = oobregion.length;
/*
* length is intentionally kept a higher multiple of 2
@@ -1084,24 +935,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (AMBA_REV_BITS(host->pid) >= 8) {
switch (mtd->oobsize) {
case 16:
- nand->ecc.layout = &fsmc_ecc4_16_layout;
- host->ecc_place = &fsmc_ecc4_sp_place;
- break;
case 64:
- nand->ecc.layout = &fsmc_ecc4_64_layout;
- host->ecc_place = &fsmc_ecc4_lp_place;
- break;
case 128:
- nand->ecc.layout = &fsmc_ecc4_128_layout;
- host->ecc_place = &fsmc_ecc4_lp_place;
- break;
case 224:
- nand->ecc.layout = &fsmc_ecc4_224_layout;
- host->ecc_place = &fsmc_ecc4_lp_place;
- break;
case 256:
- nand->ecc.layout = &fsmc_ecc4_256_layout;
- host->ecc_place = &fsmc_ecc4_lp_place;
break;
default:
dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
@@ -1109,6 +946,8 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_probe;
}
+
+ mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
} else {
switch (nand->ecc.mode) {
case NAND_ECC_HW:
@@ -1119,9 +958,11 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->ecc.strength = 1;
break;
- case NAND_ECC_SOFT_BCH:
- dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
- break;
+ case NAND_ECC_SOFT:
+ if (nand->ecc.algo == NAND_ECC_BCH) {
+ dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
+ break;
+ }
default:
dev_err(&pdev->dev, "Unsupported ECC mode!\n");
@@ -1132,16 +973,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* Don't set layout for BCH4 SW ECC. This will be
* generated later in nand_bch_init() later.
*/
- if (nand->ecc.mode != NAND_ECC_SOFT_BCH) {
+ if (nand->ecc.mode == NAND_ECC_HW) {
switch (mtd->oobsize) {
case 16:
- nand->ecc.layout = &fsmc_ecc1_16_layout;
- break;
case 64:
- nand->ecc.layout = &fsmc_ecc1_64_layout;
- break;
case 128:
- nand->ecc.layout = &fsmc_ecc1_128_layout;
+ mtd_set_ooblayout(mtd,
+ &fsmc_ecc1_ooblayout_ops);
break;
default:
dev_warn(&pdev->dev,
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index ded658fc7..6317f6836 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -273,6 +273,7 @@ static int gpio_nand_probe(struct platform_device *pdev)
nand_set_flash_node(chip, pdev->dev.of_node);
chip->IO_ADDR_W = chip->IO_ADDR_R;
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
chip->options = gpiomtd->plat.options;
chip->chip_delay = gpiomtd->plat.chip_delay;
chip->cmd_ctrl = gpio_nand_cmd_ctrl;
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index 8122c699c..6e461560c 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,7 +25,6 @@
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_mtd.h>
#include "gpmi-nand.h"
#include "bch-regs.h"
@@ -47,10 +46,44 @@ static struct nand_bbt_descr gpmi_bbt_descr = {
* We may change the layout if we can get the ECC info from the datasheet,
* else we will use all the (page + OOB).
*/
-static struct nand_ecclayout gpmi_hw_ecclayout = {
- .eccbytes = 0,
- .eccpos = { 0, },
- .oobfree = { {.offset = 0, .length = 0} }
+static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = geo->page_size - mtd->writesize;
+
+ return 0;
+}
+
+static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct gpmi_nand_data *this = nand_get_controller_data(chip);
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ if (section)
+ return -ERANGE;
+
+ /* The available oob size we have. */
+ if (geo->page_size < mtd->writesize + mtd->oobsize) {
+ oobregion->offset = geo->page_size - mtd->writesize;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
+ .ecc = gpmi_ooblayout_ecc,
+ .free = gpmi_ooblayout_free,
};
static const struct gpmi_devdata gpmi_devdata_imx23 = {
@@ -141,7 +174,6 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
struct bch_geometry *geo = &this->bch_geometry;
struct nand_chip *chip = &this->nand;
struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_oobfree *of = gpmi_hw_ecclayout.oobfree;
unsigned int block_mark_bit_offset;
if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
@@ -229,12 +261,6 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
geo->page_size = mtd->writesize + geo->metadata_size +
(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
- /* The available oob size we have. */
- if (geo->page_size < mtd->writesize + mtd->oobsize) {
- of->offset = geo->page_size - mtd->writesize;
- of->length = mtd->oobsize - of->offset;
- }
-
geo->payload_size = mtd->writesize;
geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
@@ -797,6 +823,7 @@ static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
this->cmd_buffer = NULL;
this->data_buffer_dma = NULL;
+ this->raw_buffer = NULL;
this->page_buffer_virt = NULL;
this->page_buffer_size = 0;
}
@@ -1037,14 +1064,87 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* Loop over status bytes, accumulating ECC status. */
status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+ read_page_swap_end(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+
for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
continue;
if (*status == STATUS_UNCORRECTABLE) {
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *eccbuf = this->raw_buffer;
+ int offset, bitoffset;
+ int eccbytes;
+ int flips;
+
+ /* Read ECC bytes into our internal raw_buffer */
+ offset = nfc_geo->metadata_size * 8;
+ offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
+ offset -= eccbits;
+ bitoffset = offset % 8;
+ eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
+ offset /= 8;
+ eccbytes -= offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, eccbuf, eccbytes);
+
+ /*
+ * ECC data are not byte aligned and we may have
+ * in-band data in the first and last byte of
+ * eccbuf. Set non-eccbits to one so that
+ * nand_check_erased_ecc_chunk() does not count them
+ * as bitflips.
+ */
+ if (bitoffset)
+ eccbuf[0] |= GENMASK(bitoffset - 1, 0);
+
+ bitoffset = (bitoffset + eccbits) % 8;
+ if (bitoffset)
+ eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
+
+ /*
+ * The ECC hardware has an uncorrectable ECC status
+ * code in case we have bitflips in an erased page. As
+ * nothing was written into this subpage the ECC is
+ * obviously wrong and we can not trust it. We assume
+ * at this point that we are reading an erased page and
+ * try to correct the bitflips in buffer up to
+ * ecc_strength bitflips. If this is a page with random
+ * data, we exceed this number of bitflips and have a
+ * ECC failure. Otherwise we use the corrected buffer.
+ */
+ if (i == 0) {
+ /* The first block includes metadata */
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->ecc_chunk_size,
+ nfc_geo->ecc_chunk_size,
+ eccbuf, eccbytes,
+ auxiliary_virt,
+ nfc_geo->metadata_size,
+ nfc_geo->ecc_strength);
+ } else {
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->ecc_chunk_size,
+ nfc_geo->ecc_chunk_size,
+ eccbuf, eccbytes,
+ NULL, 0,
+ nfc_geo->ecc_strength);
+ }
+
+ if (flips > 0) {
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ flips);
+ mtd->ecc_stats.corrected += flips;
+ continue;
+ }
+
mtd->ecc_stats.failed++;
continue;
}
+
mtd->ecc_stats.corrected += *status;
max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
@@ -1064,11 +1164,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
}
- read_page_swap_end(this, buf, nfc_geo->payload_size,
- this->payload_virt, this->payload_phys,
- nfc_geo->payload_size,
- payload_virt, payload_phys);
-
return max_bitflips;
}
@@ -1327,18 +1422,19 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
static int
gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
- struct nand_oobfree *of = mtd->ecclayout->oobfree;
+ struct mtd_oob_region of = { };
int status = 0;
/* Do we have available oob area? */
- if (!of->length)
+ mtd_ooblayout_free(mtd, 0, &of);
+ if (!of.length)
return -EPERM;
if (!nand_is_slc(chip))
return -EPERM;
- chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page);
- chip->write_buf(mtd, chip->oob_poi + of->offset, of->length);
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
+ chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
status = chip->waitfunc(mtd, chip);
@@ -1840,6 +1936,7 @@ static void gpmi_nand_exit(struct gpmi_nand_data *this)
static int gpmi_init_last(struct gpmi_nand_data *this)
{
struct nand_chip *chip = &this->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
struct bch_geometry *bch_geo = &this->bch_geometry;
int ret;
@@ -1861,7 +1958,7 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
ecc->mode = NAND_ECC_HW;
ecc->size = bch_geo->ecc_chunk_size;
ecc->strength = bch_geo->ecc_strength;
- ecc->layout = &gpmi_hw_ecclayout;
+ mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
/*
* We only enable the subpage read when:
@@ -1914,16 +2011,6 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
this->swap_block_mark = !GPMI_IS_MX23(this);
- if (of_get_nand_on_flash_bbt(this->dev->of_node)) {
- chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
-
- if (of_property_read_bool(this->dev->of_node,
- "fsl,no-blockmark-swap"))
- this->swap_block_mark = false;
- }
- dev_dbg(this->dev, "Blockmark swapping %sabled\n",
- this->swap_block_mark ? "en" : "dis");
-
/*
* Allocate a temporary DMA buffer for reading ID in the
* nand_scan_ident().
@@ -1938,6 +2025,16 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+ chip->bbt_options |= NAND_BBT_NO_OOB;
+
+ if (of_property_read_bool(this->dev->of_node,
+ "fsl,no-blockmark-swap"))
+ this->swap_block_mark = false;
+ }
+ dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+ this->swap_block_mark ? "en" : "dis");
+
ret = gpmi_init_last(this);
if (ret)
goto err_out;
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
index 96502b624..9432546f4 100644
--- a/drivers/mtd/nand/hisi504_nand.c
+++ b/drivers/mtd/nand/hisi504_nand.c
@@ -19,7 +19,6 @@
* GNU General Public License for more details.
*/
#include <linux/of.h>
-#include <linux/of_mtd.h>
#include <linux/mtd/mtd.h>
#include <linux/sizes.h>
#include <linux/clk.h>
@@ -631,8 +630,28 @@ static void hisi_nfc_host_init(struct hinfc_host *host)
hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
}
-static struct nand_ecclayout nand_ecc_2K_16bits = {
- .oobfree = { {2, 6} },
+static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ /* FIXME: add ECC bytes position */
+ return -ENOTSUPP;
+}
+
+static int hisi_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops hisi_ooblayout_ops = {
+ .ecc = hisi_ooblayout_ecc,
+ .free = hisi_ooblayout_free,
};
static int hisi_nfc_ecc_probe(struct hinfc_host *host)
@@ -642,10 +661,9 @@ static int hisi_nfc_ecc_probe(struct hinfc_host *host)
struct device *dev = host->dev;
struct nand_chip *chip = &host->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
- struct device_node *np = host->dev->of_node;
- size = of_get_nand_ecc_step_size(np);
- strength = of_get_nand_ecc_strength(np);
+ size = chip->ecc.size;
+ strength = chip->ecc.strength;
if (size != 1024) {
dev_err(dev, "error ecc size: %d\n", size);
return -EINVAL;
@@ -668,7 +686,7 @@ static int hisi_nfc_ecc_probe(struct hinfc_host *host)
case 16:
ecc_bits = 6;
if (mtd->writesize == 2048)
- chip->ecc.layout = &nand_ecc_2K_16bits;
+ mtd_set_ooblayout(mtd, &hisi_ooblayout_ops);
/* TODO: add more page size support */
break;
@@ -695,7 +713,7 @@ static int hisi_nfc_ecc_probe(struct hinfc_host *host)
static int hisi_nfc_probe(struct platform_device *pdev)
{
- int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP;
+ int ret = 0, irq, flag, max_chips = HINFC504_MAX_CHIP;
struct device *dev = &pdev->dev;
struct hinfc_host *host;
struct nand_chip *chip;
@@ -747,12 +765,6 @@ static int hisi_nfc_probe(struct platform_device *pdev)
chip->read_buf = hisi_nfc_read_buf;
chip->chip_delay = HINFC504_CHIP_DELAY;
- chip->ecc.mode = of_get_nand_ecc_mode(np);
-
- buswidth = of_get_nand_bus_width(np);
- if (buswidth == 16)
- chip->options |= NAND_BUSWIDTH_16;
-
hisi_nfc_host_init(host);
ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 673ceb2a0..5551c36ad 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -221,7 +221,6 @@ static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
struct jz_nand *nand = mtd_to_jz_nand(mtd);
int i, error_count, index;
uint32_t reg, status, error;
- uint32_t t;
unsigned int timeout = 1000;
for (i = 0; i < 9; ++i)
@@ -476,7 +475,7 @@ static int jz_nand_probe(struct platform_device *pdev)
}
if (pdata && pdata->ident_callback) {
- pdata->ident_callback(pdev, chip, &pdata->partitions,
+ pdata->ident_callback(pdev, mtd, &pdata->partitions,
&pdata->num_partitions);
}
diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c
index 755499c66..d74f4ba4a 100644
--- a/drivers/mtd/nand/jz4780_bch.c
+++ b/drivers/mtd/nand/jz4780_bch.c
@@ -287,7 +287,6 @@ static struct jz4780_bch *jz4780_bch_get(struct device_node *np)
bch = platform_get_drvdata(pdev);
clk_prepare_enable(bch->clk);
- bch->dev = &pdev->dev;
return bch;
}
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c
index e1c016c9d..daf3c4217 100644
--- a/drivers/mtd/nand/jz4780_nand.c
+++ b/drivers/mtd/nand/jz4780_nand.c
@@ -17,7 +17,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_mtd.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
@@ -56,8 +55,6 @@ struct jz4780_nand_chip {
struct nand_chip chip;
struct list_head chip_list;
- struct nand_ecclayout ecclayout;
-
struct gpio_desc *busy_gpio;
struct gpio_desc *wp_gpio;
unsigned int reading: 1;
@@ -165,8 +162,7 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
struct nand_chip *chip = &nand->chip;
struct mtd_info *mtd = nand_to_mtd(chip);
struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller);
- struct nand_ecclayout *layout = &nand->ecclayout;
- u32 start, i;
+ int eccbytes;
chip->ecc.bytes = fls((1 + 8) * chip->ecc.size) *
(chip->ecc.strength / 8);
@@ -183,7 +179,6 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
chip->ecc.correct = jz4780_nand_ecc_correct;
/* fall through */
case NAND_ECC_SOFT:
- case NAND_ECC_SOFT_BCH:
dev_info(dev, "using %s (strength %d, size %d, bytes %d)\n",
(nfc->bch) ? "hardware BCH" : "software ECC",
chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
@@ -201,23 +196,17 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de
return 0;
/* Generate ECC layout. ECC codes are right aligned in the OOB area. */
- layout->eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
+ eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
- if (layout->eccbytes > mtd->oobsize - 2) {
+ if (eccbytes > mtd->oobsize - 2) {
dev_err(dev,
"invalid ECC config: required %d ECC bytes, but only %d are available",
- layout->eccbytes, mtd->oobsize - 2);
+ eccbytes, mtd->oobsize - 2);
return -EINVAL;
}
- start = mtd->oobsize - layout->eccbytes;
- for (i = 0; i < layout->eccbytes; i++)
- layout->eccpos[i] = start + i;
-
- layout->oobfree[0].offset = 2;
- layout->oobfree[0].length = mtd->oobsize - layout->eccbytes - 2;
+ mtd->ooblayout = &nand_ooblayout_lp_ops;
- chip->ecc.layout = layout;
return 0;
}
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index d8c3e7afc..852388171 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -35,7 +35,6 @@
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/of.h>
-#include <linux/of_mtd.h>
#include <linux/of_gpio.h>
#include <linux/mtd/lpc32xx_mlc.h>
#include <linux/io.h>
@@ -139,22 +138,37 @@ struct lpc32xx_nand_cfg_mlc {
unsigned num_parts;
};
-static struct nand_ecclayout lpc32xx_nand_oob = {
- .eccbytes = 40,
- .eccpos = { 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
- .oobfree = {
- { .offset = 0,
- .length = 6, },
- { .offset = 16,
- .length = 6, },
- { .offset = 32,
- .length = 6, },
- { .offset = 48,
- .length = 6, },
- },
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
+ oobregion->length = nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = 16 * section;
+ oobregion->length = 16 - nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+ .ecc = lpc32xx_ooblayout_ecc,
+ .free = lpc32xx_ooblayout_free,
};
static struct nand_bbt_descr lpc32xx_nand_bbt = {
@@ -713,6 +727,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
nand_chip->ecc.write_oob = lpc32xx_write_oob;
nand_chip->ecc.read_oob = lpc32xx_read_oob;
nand_chip->ecc.strength = 4;
+ nand_chip->ecc.bytes = 10;
nand_chip->waitfunc = lpc32xx_waitfunc;
nand_chip->options = NAND_NO_SUBPAGE_WRITE;
@@ -751,7 +766,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
nand_chip->ecc.mode = NAND_ECC_HW;
nand_chip->ecc.size = 512;
- nand_chip->ecc.layout = &lpc32xx_nand_oob;
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
host->mlcsubpages = mtd->writesize / 512;
/* initially clear interrupt status */
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 3b8f3735f..8d3edc349 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -35,7 +35,6 @@
#include <linux/mtd/nand_ecc.h>
#include <linux/gpio.h>
#include <linux/of.h>
-#include <linux/of_mtd.h>
#include <linux/of_gpio.h>
#include <linux/mtd/lpc32xx_slc.h>
@@ -146,13 +145,38 @@
* NAND ECC Layout for small page NAND devices
* Note: For large and huge page devices, the default layouts are used
*/
-static struct nand_ecclayout lpc32xx_nand_oob_16 = {
- .eccbytes = 6,
- .eccpos = {10, 11, 12, 13, 14, 15},
- .oobfree = {
- { .offset = 0, .length = 4 },
- { .offset = 6, .length = 4 },
- },
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 6;
+ oobregion->offset = 10;
+
+ return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 4;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = 4;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+ .ecc = lpc32xx_ooblayout_ecc,
+ .free = lpc32xx_ooblayout_free,
};
static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
@@ -194,7 +218,6 @@ struct lpc32xx_nand_cfg_slc {
uint32_t rwidth;
uint32_t rhold;
uint32_t rsetup;
- bool use_bbt;
int wp_gpio;
struct mtd_partition *parts;
unsigned num_parts;
@@ -604,7 +627,8 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
int oob_required, int page)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
- int stat, i, status;
+ struct mtd_oob_region oobregion = { };
+ int stat, i, status, error;
uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
/* Issue read command */
@@ -620,7 +644,11 @@ static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
/* Pointer to ECC data retrieved from NAND spare area */
- oobecc = chip->oob_poi + chip->ecc.layout->eccpos[0];
+ error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ if (error)
+ return error;
+
+ oobecc = chip->oob_poi + oobregion.offset;
for (i = 0; i < chip->ecc.steps; i++) {
stat = chip->ecc.correct(mtd, buf, oobecc,
@@ -666,7 +694,8 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
int oob_required, int page)
{
struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
- uint8_t *pb = chip->oob_poi + chip->ecc.layout->eccpos[0];
+ struct mtd_oob_region oobregion = { };
+ uint8_t *pb;
int error;
/* Write data, calculate ECC on outbound data */
@@ -678,6 +707,11 @@ static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
* The calculated ECC needs some manual work done to it before
* committing it to NAND. Process the calculated ECC and place
* the resultant values directly into the OOB buffer. */
+ error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+ if (error)
+ return error;
+
+ pb = chip->oob_poi + oobregion.offset;
lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
/* Write ECC data to device */
@@ -747,7 +781,6 @@ static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
return NULL;
}
- ncfg->use_bbt = of_get_nand_on_flash_bbt(np);
ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
return ncfg;
@@ -875,26 +908,22 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* custom BBT marker layout.
*/
if (mtd->writesize <= 512)
- chip->ecc.layout = &lpc32xx_nand_oob_16;
+ mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
/* These sizes remain the same regardless of page size */
chip->ecc.size = 256;
chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
chip->ecc.prepad = chip->ecc.postpad = 0;
- /* Avoid extra scan if using BBT, setup BBT support */
- if (host->ncfg->use_bbt) {
- chip->bbt_options |= NAND_BBT_USE_FLASH;
-
- /*
- * Use a custom BBT marker setup for small page FLASH that
- * won't interfere with the ECC layout. Large and huge page
- * FLASH use the standard layout.
- */
- if (mtd->writesize <= 512) {
- chip->bbt_td = &bbt_smallpage_main_descr;
- chip->bbt_md = &bbt_smallpage_mirror_descr;
- }
+ /*
+ * Use a custom BBT marker setup for small page FLASH that
+ * won't interfere with the ECC layout. Large and huge page
+ * FLASH use the standard layout.
+ */
+ if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
+ mtd->writesize <= 512) {
+ chip->bbt_td = &bbt_smallpage_main_descr;
+ chip->bbt_md = &bbt_smallpage_mirror_descr;
}
/*
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 5d7843fff..7eacb2f54 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -710,6 +710,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip->select_chip = mpc5121_nfc_select_chip;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
/* Support external chip-select logic on ADS5121 board */
if (of_machine_is_compatible("fsl,mpc5121ads")) {
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 854c83259..5173fadc9 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -34,7 +34,6 @@
#include <linux/completion.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_mtd.h>
#include <asm/mach/flash.h>
#include <linux/platform_data/mtd-mxc_nand.h>
@@ -149,7 +148,7 @@ struct mxc_nand_devtype_data {
int (*check_int)(struct mxc_nand_host *);
void (*irq_control)(struct mxc_nand_host *, int);
u32 (*get_ecc_status)(struct mxc_nand_host *);
- struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+ const struct mtd_ooblayout_ops *ooblayout;
void (*select_chip)(struct mtd_info *mtd, int chip);
int (*correct_data)(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc);
@@ -200,73 +199,6 @@ struct mxc_nand_host {
struct mxc_nand_platform_data pdata;
};
-/* OOB placement block for use with hardware ecc generation */
-static struct nand_ecclayout nandv1_hw_eccoob_smallpage = {
- .eccbytes = 5,
- .eccpos = {6, 7, 8, 9, 10},
- .oobfree = {{0, 5}, {12, 4}, }
-};
-
-static struct nand_ecclayout nandv1_hw_eccoob_largepage = {
- .eccbytes = 20,
- .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
- 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
- .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
-};
-
-/* OOB description for 512 byte pages with 16 byte OOB */
-static struct nand_ecclayout nandv2_hw_eccoob_smallpage = {
- .eccbytes = 1 * 9,
- .eccpos = {
- 7, 8, 9, 10, 11, 12, 13, 14, 15
- },
- .oobfree = {
- {.offset = 0, .length = 5}
- }
-};
-
-/* OOB description for 2048 byte pages with 64 byte OOB */
-static struct nand_ecclayout nandv2_hw_eccoob_largepage = {
- .eccbytes = 4 * 9,
- .eccpos = {
- 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 55, 56, 57, 58, 59, 60, 61, 62, 63
- },
- .oobfree = {
- {.offset = 2, .length = 4},
- {.offset = 16, .length = 7},
- {.offset = 32, .length = 7},
- {.offset = 48, .length = 7}
- }
-};
-
-/* OOB description for 4096 byte pages with 128 byte OOB */
-static struct nand_ecclayout nandv2_hw_eccoob_4k = {
- .eccbytes = 8 * 9,
- .eccpos = {
- 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 87, 88, 89, 90, 91, 92, 93, 94, 95,
- 103, 104, 105, 106, 107, 108, 109, 110, 111,
- 119, 120, 121, 122, 123, 124, 125, 126, 127,
- },
- .oobfree = {
- {.offset = 2, .length = 4},
- {.offset = 16, .length = 7},
- {.offset = 32, .length = 7},
- {.offset = 48, .length = 7},
- {.offset = 64, .length = 7},
- {.offset = 80, .length = 7},
- {.offset = 96, .length = 7},
- {.offset = 112, .length = 7},
- }
-};
-
static const char * const part_probes[] = {
"cmdlinepart", "RedBoot", "ofpart", NULL };
@@ -942,6 +874,99 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
}
}
+static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+ if (section > nand_chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ if (mtd->writesize <= 512) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 2;
+ oobregion->length = 4;
+ }
+ } else {
+ oobregion->offset = ((section - 1) * 16) +
+ nand_chip->ecc.bytes + 6;
+ if (section < nand_chip->ecc.steps)
+ oobregion->length = (section * 16) + 6 -
+ oobregion->offset;
+ else
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = {
+ .ecc = mxc_v1_ooblayout_ecc,
+ .free = mxc_v1_ooblayout_free,
+};
+
+static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+ if (section >= nand_chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * stepsize) + 7;
+ oobregion->length = nand_chip->ecc.bytes;
+
+ return 0;
+}
+
+static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand_chip = mtd_to_nand(mtd);
+ int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+ if (section > nand_chip->ecc.steps)
+ return -ERANGE;
+
+ if (!section) {
+ if (mtd->writesize <= 512) {
+ oobregion->offset = 0;
+ oobregion->length = 5;
+ } else {
+ oobregion->offset = 2;
+ oobregion->length = 4;
+ }
+ } else {
+ oobregion->offset = section * stepsize;
+ oobregion->length = 7;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = {
+ .ecc = mxc_v2_ooblayout_ecc,
+ .free = mxc_v2_ooblayout_free,
+};
+
/*
* v2 and v3 type controllers can do 4bit or 8bit ecc depending
* on how much oob the nand chip has. For 8bit ecc we need at least
@@ -959,23 +984,6 @@ static int get_eccsize(struct mtd_info *mtd)
return 8;
}
-static void ecc_8bit_layout_4k(struct nand_ecclayout *layout)
-{
- int i, j;
-
- layout->eccbytes = 8*18;
- for (i = 0; i < 8; i++)
- for (j = 0; j < 18; j++)
- layout->eccpos[i*18 + j] = i*26 + j + 7;
-
- layout->oobfree[0].offset = 2;
- layout->oobfree[0].length = 4;
- for (i = 1; i < 8; i++) {
- layout->oobfree[i].offset = i*26;
- layout->oobfree[i].length = 7;
- }
-}
-
static void preset_v1(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
@@ -1269,9 +1277,7 @@ static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
.check_int = check_int_v1_v2,
.irq_control = irq_control_v1_v2,
.get_ecc_status = get_ecc_status_v1,
- .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
- .ecclayout_2k = &nandv1_hw_eccoob_largepage,
- .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .ooblayout = &mxc_v1_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v1_v3,
.correct_data = mxc_nand_correct_data_v1,
.irqpending_quirk = 1,
@@ -1294,9 +1300,7 @@ static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
.check_int = check_int_v1_v2,
.irq_control = irq_control_v1_v2,
.get_ecc_status = get_ecc_status_v1,
- .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
- .ecclayout_2k = &nandv1_hw_eccoob_largepage,
- .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .ooblayout = &mxc_v1_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v1_v3,
.correct_data = mxc_nand_correct_data_v1,
.irqpending_quirk = 0,
@@ -1320,9 +1324,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
.check_int = check_int_v1_v2,
.irq_control = irq_control_v1_v2,
.get_ecc_status = get_ecc_status_v2,
- .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
- .ecclayout_2k = &nandv2_hw_eccoob_largepage,
- .ecclayout_4k = &nandv2_hw_eccoob_4k,
+ .ooblayout = &mxc_v2_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v2,
.correct_data = mxc_nand_correct_data_v2_v3,
.irqpending_quirk = 0,
@@ -1346,9 +1348,7 @@ static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
.check_int = check_int_v3,
.irq_control = irq_control_v3,
.get_ecc_status = get_ecc_status_v3,
- .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
- .ecclayout_2k = &nandv2_hw_eccoob_largepage,
- .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .ooblayout = &mxc_v2_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v1_v3,
.correct_data = mxc_nand_correct_data_v2_v3,
.irqpending_quirk = 0,
@@ -1373,9 +1373,7 @@ static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
.check_int = check_int_v3,
.irq_control = irq_control_v3,
.get_ecc_status = get_ecc_status_v3,
- .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
- .ecclayout_2k = &nandv2_hw_eccoob_largepage,
- .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .ooblayout = &mxc_v2_ooblayout_ops,
.select_chip = mxc_nand_select_chip_v1_v3,
.correct_data = mxc_nand_correct_data_v2_v3,
.irqpending_quirk = 0,
@@ -1461,25 +1459,12 @@ MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
{
struct device_node *np = host->dev->of_node;
- struct mxc_nand_platform_data *pdata = &host->pdata;
const struct of_device_id *of_id =
of_match_device(mxcnd_dt_ids, host->dev);
- int buswidth;
if (!np)
return 1;
- if (of_get_nand_ecc_mode(np) >= 0)
- pdata->hw_ecc = 1;
-
- pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
-
- buswidth = of_get_nand_bus_width(np);
- if (buswidth < 0)
- return buswidth;
-
- pdata->width = buswidth / 8;
-
host->devtype_data = of_id->data;
return 0;
@@ -1576,27 +1561,22 @@ static int mxcnd_probe(struct platform_device *pdev)
this->select_chip = host->devtype_data->select_chip;
this->ecc.size = 512;
- this->ecc.layout = host->devtype_data->ecclayout_512;
+ mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
if (host->pdata.hw_ecc) {
- this->ecc.calculate = mxc_nand_calculate_ecc;
- this->ecc.hwctl = mxc_nand_enable_hwecc;
- this->ecc.correct = host->devtype_data->correct_data;
this->ecc.mode = NAND_ECC_HW;
} else {
this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.algo = NAND_ECC_HAMMING;
}
/* NAND bus width determines access functions used by upper layer */
if (host->pdata.width == 2)
this->options |= NAND_BUSWIDTH_16;
- if (host->pdata.flash_bbt) {
- this->bbt_td = &bbt_main_descr;
- this->bbt_md = &bbt_mirror_descr;
- /* update flash based bbt */
+ /* update flash based bbt */
+ if (host->pdata.flash_bbt)
this->bbt_options |= NAND_BBT_USE_FLASH;
- }
init_completion(&host->op_completion);
@@ -1637,6 +1617,26 @@ static int mxcnd_probe(struct platform_device *pdev)
goto escan;
}
+ switch (this->ecc.mode) {
+ case NAND_ECC_HW:
+ this->ecc.calculate = mxc_nand_calculate_ecc;
+ this->ecc.hwctl = mxc_nand_enable_hwecc;
+ this->ecc.correct = host->devtype_data->correct_data;
+ break;
+
+ case NAND_ECC_SOFT:
+ break;
+
+ default:
+ err = -EINVAL;
+ goto escan;
+ }
+
+ if (this->bbt_options & NAND_BBT_USE_FLASH) {
+ this->bbt_td = &bbt_main_descr;
+ this->bbt_md = &bbt_mirror_descr;
+ }
+
/* allocate the right size buffer now */
devm_kfree(&pdev->dev, (void *)host->data_buf);
host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
@@ -1649,12 +1649,11 @@ static int mxcnd_probe(struct platform_device *pdev)
/* Call preset again, with correct writesize this time */
host->devtype_data->preset(mtd);
- if (mtd->writesize == 2048)
- this->ecc.layout = host->devtype_data->ecclayout_2k;
- else if (mtd->writesize == 4096) {
- this->ecc.layout = host->devtype_data->ecclayout_4k;
- if (get_eccsize(mtd) == 8)
- ecc_8bit_layout_4k(this->ecc.layout);
+ if (!this->ecc.bytes) {
+ if (host->eccsize == 8)
+ this->ecc.bytes = 18;
+ else if (host->eccsize == 4)
+ this->ecc.bytes = 9;
}
/*
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 557b8462f..0b0dc29d2 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -43,65 +43,100 @@
#include <linux/mtd/nand_bch.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
-#include <linux/leds.h>
#include <linux/io.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_mtd.h>
+#include <linux/of.h>
+
+static int nand_get_device(struct mtd_info *mtd, int new_state);
+
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
/* Define default oob placement schemes for large and small page devices */
-static struct nand_ecclayout nand_oob_8 = {
- .eccbytes = 3,
- .eccpos = {0, 1, 2},
- .oobfree = {
- {.offset = 3,
- .length = 2},
- {.offset = 6,
- .length = 2} }
-};
+static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
-static struct nand_ecclayout nand_oob_16 = {
- .eccbytes = 6,
- .eccpos = {0, 1, 2, 3, 6, 7},
- .oobfree = {
- {.offset = 8,
- . length = 8} }
-};
+ if (section > 1)
+ return -ERANGE;
-static struct nand_ecclayout nand_oob_64 = {
- .eccbytes = 24,
- .eccpos = {
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63},
- .oobfree = {
- {.offset = 2,
- .length = 38} }
-};
+ if (!section) {
+ oobregion->offset = 0;
+ oobregion->length = 4;
+ } else {
+ oobregion->offset = 6;
+ oobregion->length = ecc->total - 4;
+ }
+
+ return 0;
+}
+
+static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
-static struct nand_ecclayout nand_oob_128 = {
- .eccbytes = 48,
- .eccpos = {
- 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 120, 121, 122, 123, 124, 125, 126, 127},
- .oobfree = {
- {.offset = 2,
- .length = 78} }
+ if (mtd->oobsize == 16) {
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 8;
+ oobregion->offset = 8;
+ } else {
+ oobregion->length = 2;
+ if (!section)
+ oobregion->offset = 3;
+ else
+ oobregion->offset = 6;
+ }
+
+ return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
+ .ecc = nand_ooblayout_ecc_sp,
+ .free = nand_ooblayout_free_sp,
};
+EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
-static int nand_get_device(struct mtd_info *mtd, int new_state);
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops);
+ if (section)
+ return -ERANGE;
-/*
- * For devices which display every fart in the system on a separate LED. Is
- * compiled away when LED support is disabled.
- */
-DEFINE_LED_TRIGGER(nand_led_trigger);
+ oobregion->length = ecc->total;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 2;
+ oobregion->offset = 2;
+
+ return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+ .ecc = nand_ooblayout_ecc_lp,
+ .free = nand_ooblayout_free_lp,
+};
+EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
static int check_offs_len(struct mtd_info *mtd,
loff_t ofs, uint64_t len)
@@ -540,19 +575,16 @@ void nand_wait_ready(struct mtd_info *mtd)
if (in_interrupt() || oops_in_progress)
return panic_nand_wait_ready(mtd, timeo);
- led_trigger_event(nand_led_trigger, LED_FULL);
/* Wait until command is processed or timeout occurs */
timeo = jiffies + msecs_to_jiffies(timeo);
do {
if (chip->dev_ready(mtd))
- goto out;
+ return;
cond_resched();
} while (time_before(jiffies, timeo));
if (!chip->dev_ready(mtd))
pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
-out:
- led_trigger_event(nand_led_trigger, LED_OFF);
}
EXPORT_SYMBOL_GPL(nand_wait_ready);
@@ -885,8 +917,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
int status;
unsigned long timeo = 400;
- led_trigger_event(nand_led_trigger, LED_FULL);
-
/*
* Apply this short delay always to ensure that we do wait tWB in any
* case on any machine.
@@ -910,7 +940,6 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
cond_resched();
} while (time_before(jiffies, timeo));
}
- led_trigger_event(nand_led_trigger, LED_OFF);
status = (int)chip->read_byte(mtd);
/* This can happen if in case of timeout or buggy dev_ready */
@@ -1292,13 +1321,12 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
+ int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
unsigned int max_bitflips = 0;
chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
@@ -1306,8 +1334,10 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
eccsteps = chip->ecc.steps;
p = buf;
@@ -1339,14 +1369,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
int page)
{
- int start_step, end_step, num_steps;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
+ int start_step, end_step, num_steps, ret;
uint8_t *p;
int data_col_addr, i, gaps = 0;
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
- int index;
+ int index, section = 0;
unsigned int max_bitflips = 0;
+ struct mtd_oob_region oobregion = { };
/* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
@@ -1374,12 +1404,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
* The performance is faster if we position offsets according to
* ecc.pos. Let's make sure that there are no gaps in ECC positions.
*/
- for (i = 0; i < eccfrag_len - 1; i++) {
- if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
- gaps = 1;
- break;
- }
- }
+ ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
+ if (ret)
+ return ret;
+
+ if (oobregion.length < eccfrag_len)
+ gaps = 1;
+
if (gaps) {
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -1388,20 +1419,23 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
* Send the command to read the particular ECC bytes take care
* about buswidth alignment in read_buf.
*/
- aligned_pos = eccpos[index] & ~(busw - 1);
+ aligned_pos = oobregion.offset & ~(busw - 1);
aligned_len = eccfrag_len;
- if (eccpos[index] & (busw - 1))
+ if (oobregion.offset & (busw - 1))
aligned_len++;
- if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
+ if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
+ (busw - 1))
aligned_len++;
chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
- mtd->writesize + aligned_pos, -1);
+ mtd->writesize + aligned_pos, -1);
chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
}
- for (i = 0; i < eccfrag_len; i++)
- chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+ chip->oob_poi, index, eccfrag_len);
+ if (ret)
+ return ret;
p = bufpoi + data_col_addr;
for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
@@ -1442,13 +1476,12 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
+ int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
@@ -1458,8 +1491,10 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
}
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
eccsteps = chip->ecc.steps;
p = buf;
@@ -1504,12 +1539,11 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
- int i, eccsize = chip->ecc.size;
+ int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
uint8_t *ecc_calc = chip->buffers->ecccalc;
unsigned int max_bitflips = 0;
@@ -1518,8 +1552,10 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
@@ -1620,14 +1656,17 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
/**
* nand_transfer_oob - [INTERN] Transfer oob to client buffer
- * @chip: nand chip structure
+ * @mtd: mtd info structure
* @oob: oob destination address
* @ops: oob ops structure
* @len: size of oob to transfer
*/
-static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
+
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -1635,31 +1674,12 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
memcpy(oob, chip->oob_poi + ops->ooboffs, len);
return oob + len;
- case MTD_OPS_AUTO_OOB: {
- struct nand_oobfree *free = chip->ecc.layout->oobfree;
- uint32_t boffs = 0, roffs = ops->ooboffs;
- size_t bytes = 0;
-
- for (; free->length && len; free++, len -= bytes) {
- /* Read request not from offset 0? */
- if (unlikely(roffs)) {
- if (roffs >= free->length) {
- roffs -= free->length;
- continue;
- }
- boffs = free->offset + roffs;
- bytes = min_t(size_t, len,
- (free->length - roffs));
- roffs = 0;
- } else {
- bytes = min_t(size_t, len, free->length);
- boffs = free->offset;
- }
- memcpy(oob, chip->oob_poi + boffs, bytes);
- oob += bytes;
- }
- return oob;
- }
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
default:
BUG();
}
@@ -1793,7 +1813,7 @@ read_retry:
int toread = min(oobreadlen, max_oobsize);
if (toread) {
- oob = nand_transfer_oob(chip,
+ oob = nand_transfer_oob(mtd,
oob, ops, toread);
oobreadlen -= toread;
}
@@ -1906,13 +1926,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
* @chip: nand chip info structure
* @page: page number to read
*/
-static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
+EXPORT_SYMBOL(nand_read_oob_std);
/**
* nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
@@ -1921,8 +1941,8 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
* @chip: nand chip info structure
* @page: page number to read
*/
-static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
{
int length = mtd->oobsize;
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
@@ -1950,6 +1970,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
+EXPORT_SYMBOL(nand_read_oob_syndrome);
/**
* nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
@@ -1957,8 +1978,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
* @chip: nand chip info structure
* @page: page number to write
*/
-static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page)
+int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
{
int status = 0;
const uint8_t *buf = chip->oob_poi;
@@ -1973,6 +1993,7 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
return status & NAND_STATUS_FAIL ? -EIO : 0;
}
+EXPORT_SYMBOL(nand_write_oob_std);
/**
* nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
@@ -1981,8 +2002,8 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
* @chip: nand chip info structure
* @page: page number to write
*/
-static int nand_write_oob_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, int page)
+int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+ int page)
{
int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
int eccsize = chip->ecc.size, length = mtd->oobsize;
@@ -2032,6 +2053,7 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
return status & NAND_STATUS_FAIL ? -EIO : 0;
}
+EXPORT_SYMBOL(nand_write_oob_syndrome);
/**
* nand_do_read_oob - [INTERN] NAND read out-of-band
@@ -2091,7 +2113,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
break;
len = min(len, readlen);
- buf = nand_transfer_oob(chip, buf, ops, len);
+ buf = nand_transfer_oob(mtd, buf, ops, len);
if (chip->options & NAND_NEED_READRDY) {
/* Apply delay or wait for ready/busy pin */
@@ -2250,19 +2272,20 @@ static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- int i, eccsize = chip->ecc.size;
+ int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *ecc_calc = chip->buffers->ecccalc;
const uint8_t *p = buf;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
/* Software ECC calculation */
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
}
@@ -2279,12 +2302,11 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required,
int page)
{
- int i, eccsize = chip->ecc.size;
+ int i, eccsize = chip->ecc.size, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *ecc_calc = chip->buffers->ecccalc;
const uint8_t *p = buf;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -2292,8 +2314,10 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
}
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -2321,11 +2345,10 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t start_step = offset / ecc_size;
uint32_t end_step = (offset + data_len - 1) / ecc_size;
int oob_bytes = mtd->oobsize / ecc_steps;
- int step, i;
+ int step, ret;
for (step = 0; step < ecc_steps; step++) {
/* configure controller for WRITE access */
@@ -2353,8 +2376,10 @@ static int nand_write_subpage_hwecc(struct mtd_info *mtd,
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
ecc_calc = chip->buffers->ecccalc;
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -2491,6 +2516,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
struct mtd_oob_ops *ops)
{
struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
/*
* Initialise to all 0xFF, to avoid the possibility of left over OOB
@@ -2505,31 +2531,12 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
memcpy(chip->oob_poi + ops->ooboffs, oob, len);
return oob + len;
- case MTD_OPS_AUTO_OOB: {
- struct nand_oobfree *free = chip->ecc.layout->oobfree;
- uint32_t boffs = 0, woffs = ops->ooboffs;
- size_t bytes = 0;
-
- for (; free->length && len; free++, len -= bytes) {
- /* Write request not from offset 0? */
- if (unlikely(woffs)) {
- if (woffs >= free->length) {
- woffs -= free->length;
- continue;
- }
- boffs = free->offset + woffs;
- bytes = min_t(size_t, len,
- (free->length - woffs));
- woffs = 0;
- } else {
- bytes = min_t(size_t, len, free->length);
- boffs = free->offset;
- }
- memcpy(chip->oob_poi + boffs, oob, bytes);
- oob += bytes;
- }
- return oob;
- }
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
default:
BUG();
}
@@ -3964,10 +3971,115 @@ ident_done:
return type;
}
+static const char * const nand_ecc_modes[] = {
+ [NAND_ECC_NONE] = "none",
+ [NAND_ECC_SOFT] = "soft",
+ [NAND_ECC_HW] = "hw",
+ [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
+ [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
+};
+
+static int of_get_nand_ecc_mode(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
+ if (!strcasecmp(pm, nand_ecc_modes[i]))
+ return i;
+
+ /*
+ * For backward compatibility we support few obsoleted values that don't
+ * have their mappings into nand_ecc_modes_t anymore (they were merged
+ * with other enums).
+ */
+ if (!strcasecmp(pm, "soft_bch"))
+ return NAND_ECC_SOFT;
+
+ return -ENODEV;
+}
+
+static const char * const nand_ecc_algos[] = {
+ [NAND_ECC_HAMMING] = "hamming",
+ [NAND_ECC_BCH] = "bch",
+};
+
+static int of_get_nand_ecc_algo(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "nand-ecc-algo", &pm);
+ if (!err) {
+ for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
+ if (!strcasecmp(pm, nand_ecc_algos[i]))
+ return i;
+ return -ENODEV;
+ }
+
+ /*
+ * For backward compatibility we also read "nand-ecc-mode" checking
+ * for some obsoleted values that were specifying ECC algorithm.
+ */
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (err < 0)
+ return err;
+
+ if (!strcasecmp(pm, "soft"))
+ return NAND_ECC_HAMMING;
+ else if (!strcasecmp(pm, "soft_bch"))
+ return NAND_ECC_BCH;
+
+ return -ENODEV;
+}
+
+static int of_get_nand_ecc_step_size(struct device_node *np)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
+ return ret ? ret : val;
+}
+
+static int of_get_nand_ecc_strength(struct device_node *np)
+{
+ int ret;
+ u32 val;
+
+ ret = of_property_read_u32(np, "nand-ecc-strength", &val);
+ return ret ? ret : val;
+}
+
+static int of_get_nand_bus_width(struct device_node *np)
+{
+ u32 val;
+
+ if (of_property_read_u32(np, "nand-bus-width", &val))
+ return 8;
+
+ switch (val) {
+ case 8:
+ case 16:
+ return val;
+ default:
+ return -EIO;
+ }
+}
+
+static bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+ return of_property_read_bool(np, "nand-on-flash-bbt");
+}
+
static int nand_dt_init(struct nand_chip *chip)
{
struct device_node *dn = nand_get_flash_node(chip);
- int ecc_mode, ecc_strength, ecc_step;
+ int ecc_mode, ecc_algo, ecc_strength, ecc_step;
if (!dn)
return 0;
@@ -3979,6 +4091,7 @@ static int nand_dt_init(struct nand_chip *chip)
chip->bbt_options |= NAND_BBT_USE_FLASH;
ecc_mode = of_get_nand_ecc_mode(dn);
+ ecc_algo = of_get_nand_ecc_algo(dn);
ecc_strength = of_get_nand_ecc_strength(dn);
ecc_step = of_get_nand_ecc_step_size(dn);
@@ -3991,6 +4104,9 @@ static int nand_dt_init(struct nand_chip *chip)
if (ecc_mode >= 0)
chip->ecc.mode = ecc_mode;
+ if (ecc_algo >= 0)
+ chip->ecc.algo = ecc_algo;
+
if (ecc_strength >= 0)
chip->ecc.strength = ecc_strength;
@@ -4067,6 +4183,82 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips,
}
EXPORT_SYMBOL(nand_scan_ident);
+static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
+ return -EINVAL;
+
+ switch (ecc->algo) {
+ case NAND_ECC_HAMMING:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->size)
+ ecc->size = 256;
+ ecc->bytes = 3;
+ ecc->strength = 1;
+ return 0;
+ case NAND_ECC_BCH:
+ if (!mtd_nand_has_bch()) {
+ WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+ return -EINVAL;
+ }
+ ecc->calculate = nand_bch_calculate_ecc;
+ ecc->correct = nand_bch_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ /*
+ * Board driver should supply ecc.size and ecc.strength
+ * values to select how many bits are correctable.
+ * Otherwise, default to 4 bits for large page devices.
+ */
+ if (!ecc->size && (mtd->oobsize >= 64)) {
+ ecc->size = 512;
+ ecc->strength = 4;
+ }
+
+ /*
+ * if no ecc placement scheme was provided pickup the default
+ * large page one.
+ */
+ if (!mtd->ooblayout) {
+ /* handle large page devices only */
+ if (mtd->oobsize < 64) {
+ WARN(1, "OOB layout is required when using software BCH on small pages\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+ }
+
+ /* See nand_bch_init() for details. */
+ ecc->bytes = 0;
+ ecc->priv = nand_bch_init(mtd);
+ if (!ecc->priv) {
+ WARN(1, "BCH ECC initialization failed!\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ WARN(1, "Unsupported ECC algorithm!\n");
+ return -EINVAL;
+ }
+}
+
/*
* Check if the chip configuration meet the datasheet requirements.
@@ -4111,14 +4303,15 @@ static bool nand_ecc_strength_good(struct mtd_info *mtd)
*/
int nand_scan_tail(struct mtd_info *mtd)
{
- int i;
struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc;
struct nand_buffers *nbuf;
+ int ret;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
- BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
- !(chip->bbt_options & NAND_BBT_USE_FLASH));
+ if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH)))
+ return -EINVAL;
if (!(chip->options & NAND_OWN_BUFFERS)) {
nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
@@ -4141,24 +4334,22 @@ int nand_scan_tail(struct mtd_info *mtd)
/*
* If no default placement scheme is given, select an appropriate one.
*/
- if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
+ if (!mtd->ooblayout &&
+ !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
switch (mtd->oobsize) {
case 8:
- ecc->layout = &nand_oob_8;
- break;
case 16:
- ecc->layout = &nand_oob_16;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
break;
case 64:
- ecc->layout = &nand_oob_64;
- break;
case 128:
- ecc->layout = &nand_oob_128;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
break;
default:
- pr_warn("No oob scheme defined for oobsize %d\n",
- mtd->oobsize);
- BUG();
+ WARN(1, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ ret = -EINVAL;
+ goto err_free;
}
}
@@ -4174,8 +4365,9 @@ int nand_scan_tail(struct mtd_info *mtd)
case NAND_ECC_HW_OOB_FIRST:
/* Similar to NAND_ECC_HW, but a separate read_page handle */
if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
- pr_warn("No ECC functions supplied; hardware ECC not possible\n");
- BUG();
+ WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+ ret = -EINVAL;
+ goto err_free;
}
if (!ecc->read_page)
ecc->read_page = nand_read_page_hwecc_oob_first;
@@ -4205,8 +4397,9 @@ int nand_scan_tail(struct mtd_info *mtd)
ecc->read_page == nand_read_page_hwecc ||
!ecc->write_page ||
ecc->write_page == nand_write_page_hwecc)) {
- pr_warn("No ECC functions supplied; hardware ECC not possible\n");
- BUG();
+ WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+ ret = -EINVAL;
+ goto err_free;
}
/* Use standard syndrome read/write page function? */
if (!ecc->read_page)
@@ -4224,61 +4417,22 @@ int nand_scan_tail(struct mtd_info *mtd)
if (mtd->writesize >= ecc->size) {
if (!ecc->strength) {
- pr_warn("Driver must set ecc.strength when using hardware ECC\n");
- BUG();
+ WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
+ ret = -EINVAL;
+ goto err_free;
}
break;
}
pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
ecc->size, mtd->writesize);
ecc->mode = NAND_ECC_SOFT;
+ ecc->algo = NAND_ECC_HAMMING;
case NAND_ECC_SOFT:
- ecc->calculate = nand_calculate_ecc;
- ecc->correct = nand_correct_data;
- ecc->read_page = nand_read_page_swecc;
- ecc->read_subpage = nand_read_subpage;
- ecc->write_page = nand_write_page_swecc;
- ecc->read_page_raw = nand_read_page_raw;
- ecc->write_page_raw = nand_write_page_raw;
- ecc->read_oob = nand_read_oob_std;
- ecc->write_oob = nand_write_oob_std;
- if (!ecc->size)
- ecc->size = 256;
- ecc->bytes = 3;
- ecc->strength = 1;
- break;
-
- case NAND_ECC_SOFT_BCH:
- if (!mtd_nand_has_bch()) {
- pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");
- BUG();
- }
- ecc->calculate = nand_bch_calculate_ecc;
- ecc->correct = nand_bch_correct_data;
- ecc->read_page = nand_read_page_swecc;
- ecc->read_subpage = nand_read_subpage;
- ecc->write_page = nand_write_page_swecc;
- ecc->read_page_raw = nand_read_page_raw;
- ecc->write_page_raw = nand_write_page_raw;
- ecc->read_oob = nand_read_oob_std;
- ecc->write_oob = nand_write_oob_std;
- /*
- * Board driver should supply ecc.size and ecc.strength values
- * to select how many bits are correctable. Otherwise, default
- * to 4 bits for large page devices.
- */
- if (!ecc->size && (mtd->oobsize >= 64)) {
- ecc->size = 512;
- ecc->strength = 4;
- }
-
- /* See nand_bch_init() for details. */
- ecc->bytes = 0;
- ecc->priv = nand_bch_init(mtd);
- if (!ecc->priv) {
- pr_warn("BCH ECC initialization failed!\n");
- BUG();
+ ret = nand_set_ecc_soft_ops(mtd);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_free;
}
break;
@@ -4296,8 +4450,9 @@ int nand_scan_tail(struct mtd_info *mtd)
break;
default:
- pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
- BUG();
+ WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
+ ret = -EINVAL;
+ goto err_free;
}
/* For many systems, the standard OOB write also works for raw */
@@ -4306,20 +4461,9 @@ int nand_scan_tail(struct mtd_info *mtd)
if (!ecc->write_oob_raw)
ecc->write_oob_raw = ecc->write_oob;
- /*
- * The number of bytes available for a client to place data into
- * the out of band area.
- */
- mtd->oobavail = 0;
- if (ecc->layout) {
- for (i = 0; ecc->layout->oobfree[i].length; i++)
- mtd->oobavail += ecc->layout->oobfree[i].length;
- }
-
- /* ECC sanity check: warn if it's too weak */
- if (!nand_ecc_strength_good(mtd))
- pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
- mtd->name);
+ /* propagate ecc info to mtd_info */
+ mtd->ecc_strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
/*
* Set the number of read / write steps for one page depending on ECC
@@ -4327,11 +4471,27 @@ int nand_scan_tail(struct mtd_info *mtd)
*/
ecc->steps = mtd->writesize / ecc->size;
if (ecc->steps * ecc->size != mtd->writesize) {
- pr_warn("Invalid ECC parameters\n");
- BUG();
+ WARN(1, "Invalid ECC parameters\n");
+ ret = -EINVAL;
+ goto err_free;
}
ecc->total = ecc->steps * ecc->bytes;
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area.
+ */
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ ret = 0;
+
+ mtd->oobavail = ret;
+
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_strength_good(mtd))
+ pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+ mtd->name);
+
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
switch (ecc->steps) {
@@ -4356,7 +4516,6 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Large page NAND with SOFT_ECC should support subpage reads */
switch (ecc->mode) {
case NAND_ECC_SOFT:
- case NAND_ECC_SOFT_BCH:
if (chip->page_shift > 9)
chip->options |= NAND_SUBPAGE_READ;
break;
@@ -4388,10 +4547,6 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_block_markbad = nand_block_markbad;
mtd->writebufsize = mtd->writesize;
- /* propagate ecc info to mtd_info */
- mtd->ecclayout = ecc->layout;
- mtd->ecc_strength = ecc->strength;
- mtd->ecc_step_size = ecc->size;
/*
* Initialize bitflip_threshold to its default prior scan_bbt() call.
* scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
@@ -4406,6 +4561,10 @@ int nand_scan_tail(struct mtd_info *mtd)
/* Build bad block table */
return chip->scan_bbt(mtd);
+err_free:
+ if (!(chip->options & NAND_OWN_BUFFERS))
+ kfree(chip->buffers);
+ return ret;
}
EXPORT_SYMBOL(nand_scan_tail);
@@ -4449,7 +4608,8 @@ void nand_release(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
- if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
+ if (chip->ecc.mode == NAND_ECC_SOFT &&
+ chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
mtd_device_unregister(mtd);
@@ -4466,20 +4626,6 @@ void nand_release(struct mtd_info *mtd)
}
EXPORT_SYMBOL_GPL(nand_release);
-static int __init nand_base_init(void)
-{
- led_trigger_register_simple("nand-disk", &nand_led_trigger);
- return 0;
-}
-
-static void __exit nand_base_exit(void)
-{
- led_trigger_unregister_simple(nand_led_trigger);
-}
-
-module_init(nand_base_init);
-module_exit(nand_base_exit);
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index b585bae37..44763f87e 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -32,13 +32,11 @@
/**
* struct nand_bch_control - private NAND BCH control structure
* @bch: BCH control structure
- * @ecclayout: private ecc layout for this BCH configuration
* @errloc: error location array
* @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
*/
struct nand_bch_control {
struct bch_control *bch;
- struct nand_ecclayout ecclayout;
unsigned int *errloc;
unsigned char *eccmask;
};
@@ -124,7 +122,6 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd_to_nand(mtd);
unsigned int m, t, eccsteps, i;
- struct nand_ecclayout *layout = nand->ecc.layout;
struct nand_bch_control *nbc = NULL;
unsigned char *erased_page;
unsigned int eccsize = nand->ecc.size;
@@ -161,34 +158,10 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
eccsteps = mtd->writesize/eccsize;
- /* if no ecc placement scheme was provided, build one */
- if (!layout) {
-
- /* handle large page devices only */
- if (mtd->oobsize < 64) {
- printk(KERN_WARNING "must provide an oob scheme for "
- "oobsize %d\n", mtd->oobsize);
- goto fail;
- }
-
- layout = &nbc->ecclayout;
- layout->eccbytes = eccsteps*eccbytes;
-
- /* reserve 2 bytes for bad block marker */
- if (layout->eccbytes+2 > mtd->oobsize) {
- printk(KERN_WARNING "no suitable oob scheme available "
- "for oobsize %d eccbytes %u\n", mtd->oobsize,
- eccbytes);
- goto fail;
- }
- /* put ecc bytes at oob tail */
- for (i = 0; i < layout->eccbytes; i++)
- layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
-
- layout->oobfree[0].offset = 2;
- layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
-
- nand->ecc.layout = layout;
+ /* Check that we have an oob layout description. */
+ if (!mtd->ooblayout) {
+ pr_warn("missing oob scheme");
+ goto fail;
}
/* sanity checks */
@@ -196,7 +169,18 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
goto fail;
}
- if (layout->eccbytes != (eccsteps*eccbytes)) {
+
+ /*
+ * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(),
+ * which is called by mtd_ooblayout_count_eccbytes().
+ * Make sure they are properly initialized before calling
+ * mtd_ooblayout_count_eccbytes().
+ * FIXME: we should probably rework the sequencing in nand_scan_tail()
+ * to avoid setting those fields twice.
+ */
+ nand->ecc.steps = eccsteps;
+ nand->ecc.total = eccsteps * eccbytes;
+ if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
printk(KERN_WARNING "invalid ecc layout\n");
goto fail;
}
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a58169a28..1eb934414 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -569,7 +569,7 @@ static void nandsim_debugfs_remove(struct nandsim *ns)
*
* RETURNS: 0 if success, -ENOMEM if memory alloc fails.
*/
-static int alloc_device(struct nandsim *ns)
+static int __init alloc_device(struct nandsim *ns)
{
struct file *cfile;
int i, err;
@@ -654,7 +654,7 @@ static void free_device(struct nandsim *ns)
}
}
-static char *get_partition_name(int i)
+static char __init *get_partition_name(int i)
{
return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
}
@@ -664,7 +664,7 @@ static char *get_partition_name(int i)
*
* RETURNS: 0 if success, -ERRNO if failure.
*/
-static int init_nandsim(struct mtd_info *mtd)
+static int __init init_nandsim(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct nandsim *ns = nand_get_controller_data(chip);
@@ -2261,6 +2261,7 @@ static int __init ns_init_module(void)
chip->read_buf = ns_nand_read_buf;
chip->read_word = ns_nand_read_word;
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
/* and 'badblocks' parameters to work */
chip->options |= NAND_SKIP_BBTSCAN;
@@ -2338,7 +2339,8 @@ static int __init ns_init_module(void)
retval = -EINVAL;
goto error;
}
- chip->ecc.mode = NAND_ECC_SOFT_BCH;
+ chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_BCH;
chip->ecc.size = 512;
chip->ecc.strength = bch;
chip->ecc.bytes = eccbytes;
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index dbc5b571c..8f64011d3 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -261,6 +261,7 @@ static int nuc900_nand_probe(struct platform_device *pdev)
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 0749ca1a1..a136da8df 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -12,6 +12,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -28,6 +29,7 @@
#include <linux/mtd/nand_bch.h>
#include <linux/platform_data/elm.h>
+#include <linux/omap-gpmc.h>
#include <linux/platform_data/mtd-nand-omap2.h>
#define DRIVER_NAME "omap2-nand"
@@ -151,13 +153,17 @@ static struct nand_hw_control omap_gpmc_controller = {
};
struct omap_nand_info {
- struct omap_nand_platform_data *pdata;
struct nand_chip nand;
struct platform_device *pdev;
int gpmc_cs;
- unsigned long phys_base;
+ bool dev_ready;
+ enum nand_io xfer_type;
+ int devsize;
enum omap_ecc ecc_opt;
+ struct device_node *elm_of_node;
+
+ unsigned long phys_base;
struct completion comp;
struct dma_chan *dma;
int gpmc_irq_fifo;
@@ -168,12 +174,14 @@ struct omap_nand_info {
} iomode;
u_char *buf;
int buf_len;
+ /* Interface to GPMC */
struct gpmc_nand_regs reg;
- /* generated at runtime depending on ECC algorithm and layout selected */
- struct nand_ecclayout oobinfo;
+ struct gpmc_nand_ops *ops;
+ bool flash_bbt;
/* fields specific for BCHx_HW ECC scheme */
struct device *elm_dev;
- struct device_node *of_node;
+ /* NAND ready gpio */
+ struct gpio_desc *ready_gpiod;
};
static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
@@ -208,7 +216,7 @@ static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
*/
val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
- (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
+ (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
writel(val, info->reg.gpmc_prefetch_config1);
/* Start the prefetch engine */
@@ -288,14 +296,13 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
u_char *p = (u_char *)buf;
- u32 status = 0;
+ bool status;
while (len--) {
iowrite8(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = readl(info->reg.gpmc_status) &
- STATUS_BUFF_EMPTY;
+ status = info->ops->nand_writebuffer_empty();
} while (!status);
}
}
@@ -323,7 +330,7 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
u16 *p = (u16 *) buf;
- u32 status = 0;
+ bool status;
/* FIXME try bursts of writesw() or DMA ... */
len >>= 1;
@@ -331,8 +338,7 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
iowrite16(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */
do {
- status = readl(info->reg.gpmc_status) &
- STATUS_BUFF_EMPTY;
+ status = info->ops->nand_writebuffer_empty();
} while (!status);
}
}
@@ -467,17 +473,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
int ret;
u32 val;
- if (addr >= high_memory) {
- struct page *p1;
-
- if (((size_t)addr & PAGE_MASK) !=
- ((size_t)(addr + len - 1) & PAGE_MASK))
- goto out_copy;
- p1 = vmalloc_to_page(addr);
- if (!p1)
- goto out_copy;
- addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
- }
+ if (!virt_addr_valid(addr))
+ goto out_copy;
sg_init_one(&sg, addr, len);
n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
@@ -497,6 +494,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
tx->callback_param = &info->comp;
dmaengine_submit(tx);
+ init_completion(&info->comp);
+
+ /* setup and start DMA using dma_addr */
+ dma_async_issue_pending(info->dma);
+
/* configure and start prefetch transfer */
ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
@@ -504,10 +506,6 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
/* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap;
- init_completion(&info->comp);
- dma_async_issue_pending(info->dma);
-
- /* setup and start DMA using dma_addr */
wait_for_completion(&info->comp);
tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
@@ -1017,21 +1015,16 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
}
/**
- * omap_dev_ready - calls the platform specific dev_ready function
+ * omap_dev_ready - checks the NAND Ready GPIO line
* @mtd: MTD device structure
+ *
+ * Returns true if ready and false if busy.
*/
static int omap_dev_ready(struct mtd_info *mtd)
{
- unsigned int val = 0;
struct omap_nand_info *info = mtd_to_omap(mtd);
- val = readl(info->reg.gpmc_status);
-
- if ((val & 0x100) == 0x100) {
- return 1;
- } else {
- return 0;
- }
+ return gpiod_get_value(info->ready_gpiod);
}
/**
@@ -1495,9 +1488,8 @@ static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page)
{
- int i;
+ int ret;
uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
/* Enable GPMC ecc engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
@@ -1508,8 +1500,10 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
/* Update ecc vector from GPMC result registers */
chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
/* Write ecc vector to OOB area */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -1536,10 +1530,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
{
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
- uint8_t *oob = &chip->oob_poi[eccpos[0]];
- uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
- int stat;
+ int stat, ret;
unsigned int max_bitflips = 0;
/* Enable GPMC ecc engine */
@@ -1549,13 +1540,18 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, buf, mtd->writesize);
/* Read oob bytes */
- chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
- chip->read_buf(mtd, oob, chip->ecc.total);
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+ mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
+ chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+ chip->ecc.total);
/* Calculate ecc bytes */
chip->ecc.calculate(mtd, buf, ecc_calc);
- memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
@@ -1630,7 +1626,7 @@ static bool omap2_nand_ecc_check(struct omap_nand_info *info,
"CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
return false;
}
- if (ecc_needs_elm && !is_elm_present(info, pdata->elm_of_node)) {
+ if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
dev_err(&info->pdev->dev, "ELM not available\n");
return false;
}
@@ -1638,43 +1634,230 @@ static bool omap2_nand_ecc_check(struct omap_nand_info *info,
return true;
}
+static const char * const nand_xfer_types[] = {
+ [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
+ [NAND_OMAP_POLLED] = "polled",
+ [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
+ [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
+};
+
+static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
+{
+ struct device_node *child = dev->of_node;
+ int i;
+ const char *s;
+ u32 cs;
+
+ if (of_property_read_u32(child, "reg", &cs) < 0) {
+ dev_err(dev, "reg not found in DT\n");
+ return -EINVAL;
+ }
+
+ info->gpmc_cs = cs;
+
+ /* detect availability of ELM module. Won't be present pre-OMAP4 */
+ info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
+ if (!info->elm_of_node) {
+ info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+ if (!info->elm_of_node)
+ dev_dbg(dev, "ti,elm-id not in DT\n");
+ }
+
+ /* select ecc-scheme for NAND */
+ if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
+ dev_err(dev, "ti,nand-ecc-opt not found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(s, "sw")) {
+ info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
+ } else if (!strcmp(s, "ham1") ||
+ !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
+ info->ecc_opt = OMAP_ECC_HAM1_CODE_HW;
+ } else if (!strcmp(s, "bch4")) {
+ if (info->elm_of_node)
+ info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
+ else
+ info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
+ } else if (!strcmp(s, "bch8")) {
+ if (info->elm_of_node)
+ info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
+ else
+ info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
+ } else if (!strcmp(s, "bch16")) {
+ info->ecc_opt = OMAP_ECC_BCH16_CODE_HW;
+ } else {
+ dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
+ return -EINVAL;
+ }
+
+ /* select data transfer mode */
+ if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
+ for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
+ if (!strcasecmp(s, nand_xfer_types[i])) {
+ info->xfer_type = i;
+ return 0;
+ }
+ }
+
+ dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct nand_chip *chip = &info->nand;
+ int off = BADBLOCK_MARKER_LENGTH;
+
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ off = 1;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int omap_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct omap_nand_info *info = mtd_to_omap(mtd);
+ struct nand_chip *chip = &info->nand;
+ int off = BADBLOCK_MARKER_LENGTH;
+
+ if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ off = 1;
+
+ if (section)
+ return -ERANGE;
+
+ off += chip->ecc.total;
+ if (off >= mtd->oobsize)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = mtd->oobsize - off;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
+ .ecc = omap_ooblayout_ecc,
+ .free = omap_ooblayout_free,
+};
+
+static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int off = BADBLOCK_MARKER_LENGTH;
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ /*
+ * When SW correction is employed, one OMAP specific marker byte is
+ * reserved after each ECC step.
+ */
+ oobregion->offset = off + (section * (chip->ecc.bytes + 1));
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int off = BADBLOCK_MARKER_LENGTH;
+
+ if (section)
+ return -ERANGE;
+
+ /*
+ * When SW correction is employed, one OMAP specific marker byte is
+ * reserved after each ECC step.
+ */
+ off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+ if (off >= mtd->oobsize)
+ return -ERANGE;
+
+ oobregion->offset = off;
+ oobregion->length = mtd->oobsize - off;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
+ .ecc = omap_sw_ooblayout_ecc,
+ .free = omap_sw_ooblayout_free,
+};
+
static int omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
- struct omap_nand_platform_data *pdata;
+ struct omap_nand_platform_data *pdata = NULL;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
- struct nand_ecclayout *ecclayout;
int err;
- int i;
dma_cap_mask_t mask;
unsigned sig;
- unsigned oob_index;
struct resource *res;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata == NULL) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
- }
+ struct device *dev = &pdev->dev;
+ int min_oobbytes = BADBLOCK_MARKER_LENGTH;
+ int oobbytes_per_step;
info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
+ info->pdev = pdev;
+
+ if (dev->of_node) {
+ if (omap_get_dt_info(dev, info))
+ return -EINVAL;
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -EINVAL;
+ }
+
+ info->gpmc_cs = pdata->cs;
+ info->reg = pdata->reg;
+ info->ecc_opt = pdata->ecc_opt;
+ if (pdata->dev_ready)
+ dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
+
+ info->xfer_type = pdata->xfer_type;
+ info->devsize = pdata->devsize;
+ info->elm_of_node = pdata->elm_of_node;
+ info->flash_bbt = pdata->flash_bbt;
+ }
+
platform_set_drvdata(pdev, info);
+ info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
+ if (!info->ops) {
+ dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
+ return -ENODEV;
+ }
- info->pdev = pdev;
- info->gpmc_cs = pdata->cs;
- info->reg = pdata->reg;
- info->of_node = pdata->of_node;
- info->ecc_opt = pdata->ecc_opt;
nand_chip = &info->nand;
mtd = nand_to_mtd(nand_chip);
mtd->dev.parent = &pdev->dev;
nand_chip->ecc.priv = NULL;
- nand_set_flash_node(nand_chip, pdata->of_node);
+ nand_set_flash_node(nand_chip, dev->of_node);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
@@ -1688,6 +1871,13 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
nand_chip->cmd_ctrl = omap_hwcontrol;
+ info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
+ GPIOD_IN);
+ if (IS_ERR(info->ready_gpiod)) {
+ dev_err(dev, "failed to get ready gpio\n");
+ return PTR_ERR(info->ready_gpiod);
+ }
+
/*
* If RDY/BSY line is connected to OMAP then use the omap ready
* function and the generic nand_wait function which reads the status
@@ -1695,7 +1885,7 @@ static int omap_nand_probe(struct platform_device *pdev)
* chip delay which is slightly more than tR (AC Timing) of the NAND
* device and read status register until you get a failure or success
*/
- if (pdata->dev_ready) {
+ if (info->ready_gpiod) {
nand_chip->dev_ready = omap_dev_ready;
nand_chip->chip_delay = 0;
} else {
@@ -1703,21 +1893,25 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->chip_delay = 50;
}
- if (pdata->flash_bbt)
- nand_chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
- else
- nand_chip->options |= NAND_SKIP_BBTSCAN;
+ if (info->flash_bbt)
+ nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
/* scan NAND device connected to chip controller */
- nand_chip->options |= pdata->devsize & NAND_BUSWIDTH_16;
+ nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
if (nand_scan_ident(mtd, 1, NULL)) {
- dev_err(&info->pdev->dev, "scan failed, may be bus-width mismatch\n");
+ dev_err(&info->pdev->dev,
+ "scan failed, may be bus-width mismatch\n");
err = -ENXIO;
goto return_error;
}
+ if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
+ nand_chip->bbt_options |= NAND_BBT_NO_OOB;
+ else
+ nand_chip->options |= NAND_SKIP_BBTSCAN;
+
/* re-populate low-level callbacks based on xfer modes */
- switch (pdata->xfer_type) {
+ switch (info->xfer_type) {
case NAND_OMAP_PREFETCH_POLLED:
nand_chip->read_buf = omap_read_buf_pref;
nand_chip->write_buf = omap_write_buf_pref;
@@ -1797,7 +1991,7 @@ static int omap_nand_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev,
- "xfer_type(%d) not supported!\n", pdata->xfer_type);
+ "xfer_type(%d) not supported!\n", info->xfer_type);
err = -EINVAL;
goto return_error;
}
@@ -1809,16 +2003,15 @@ static int omap_nand_probe(struct platform_device *pdev)
/*
* Bail out earlier to let NAND_ECC_SOFT code create its own
- * ecclayout instead of using ours.
+ * ooblayout instead of using ours.
*/
if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
nand_chip->ecc.mode = NAND_ECC_SOFT;
+ nand_chip->ecc.algo = NAND_ECC_HAMMING;
goto scan_tail;
}
/* populate MTD interface based on ECC scheme */
- ecclayout = &info->oobinfo;
- nand_chip->ecc.layout = ecclayout;
switch (info->ecc_opt) {
case OMAP_ECC_HAM1_CODE_HW:
pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
@@ -1829,19 +2022,12 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.calculate = omap_calculate_ecc;
nand_chip->ecc.hwctl = omap_enable_hwecc;
nand_chip->ecc.correct = omap_correct_data;
- /* define ECC layout */
- ecclayout->eccbytes = nand_chip->ecc.bytes *
- (mtd->writesize /
- nand_chip->ecc.size);
- if (nand_chip->options & NAND_BUSWIDTH_16)
- oob_index = BADBLOCK_MARKER_LENGTH;
- else
- oob_index = 1;
- for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
- ecclayout->eccpos[i] = oob_index;
- /* no reserved-marker in ecclayout for this ecc-scheme */
- ecclayout->oobfree->offset =
- ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = nand_chip->ecc.bytes;
+
+ if (!(nand_chip->options & NAND_BUSWIDTH_16))
+ min_oobbytes = 1;
+
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
@@ -1853,19 +2039,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
- /* define ECC layout */
- ecclayout->eccbytes = nand_chip->ecc.bytes *
- (mtd->writesize /
- nand_chip->ecc.size);
- oob_index = BADBLOCK_MARKER_LENGTH;
- for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
- ecclayout->eccpos[i] = oob_index;
- if (((i + 1) % nand_chip->ecc.bytes) == 0)
- oob_index++;
- }
- /* include reserved-marker in ecclayout->oobfree calculation */
- ecclayout->oobfree->offset = 1 +
- ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+ /* Reserve one byte for the OMAP marker */
+ oobbytes_per_step = nand_chip->ecc.bytes + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd);
if (!nand_chip->ecc.priv) {
@@ -1887,16 +2063,8 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
- /* define ECC layout */
- ecclayout->eccbytes = nand_chip->ecc.bytes *
- (mtd->writesize /
- nand_chip->ecc.size);
- oob_index = BADBLOCK_MARKER_LENGTH;
- for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
- ecclayout->eccpos[i] = oob_index;
- /* reserved marker already included in ecclayout->eccbytes */
- ecclayout->oobfree->offset =
- ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = nand_chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH4_ECC,
mtd->writesize / nand_chip->ecc.size,
@@ -1914,19 +2082,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
- /* define ECC layout */
- ecclayout->eccbytes = nand_chip->ecc.bytes *
- (mtd->writesize /
- nand_chip->ecc.size);
- oob_index = BADBLOCK_MARKER_LENGTH;
- for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
- ecclayout->eccpos[i] = oob_index;
- if (((i + 1) % nand_chip->ecc.bytes) == 0)
- oob_index++;
- }
- /* include reserved-marker in ecclayout->oobfree calculation */
- ecclayout->oobfree->offset = 1 +
- ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
+ mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+ /* Reserve one byte for the OMAP marker */
+ oobbytes_per_step = nand_chip->ecc.bytes + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd);
if (!nand_chip->ecc.priv) {
@@ -1948,6 +2106,8 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = nand_chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH8_ECC,
mtd->writesize / nand_chip->ecc.size,
@@ -1955,16 +2115,6 @@ static int omap_nand_probe(struct platform_device *pdev)
if (err < 0)
goto return_error;
- /* define ECC layout */
- ecclayout->eccbytes = nand_chip->ecc.bytes *
- (mtd->writesize /
- nand_chip->ecc.size);
- oob_index = BADBLOCK_MARKER_LENGTH;
- for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
- ecclayout->eccpos[i] = oob_index;
- /* reserved marker already included in ecclayout->eccbytes */
- ecclayout->oobfree->offset =
- ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
case OMAP_ECC_BCH16_CODE_HW:
@@ -1978,6 +2128,8 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch;
+ mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+ oobbytes_per_step = nand_chip->ecc.bytes;
err = elm_config(info->elm_dev, BCH16_ECC,
mtd->writesize / nand_chip->ecc.size,
@@ -1985,16 +2137,6 @@ static int omap_nand_probe(struct platform_device *pdev)
if (err < 0)
goto return_error;
- /* define ECC layout */
- ecclayout->eccbytes = nand_chip->ecc.bytes *
- (mtd->writesize /
- nand_chip->ecc.size);
- oob_index = BADBLOCK_MARKER_LENGTH;
- for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
- ecclayout->eccpos[i] = oob_index;
- /* reserved marker already included in ecclayout->eccbytes */
- ecclayout->oobfree->offset =
- ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
default:
dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n");
@@ -2002,13 +2144,13 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- /* all OOB bytes from oobfree->offset till end off OOB are free */
- ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
/* check if NAND device's OOB is enough to store ECC signatures */
- if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
+ min_oobbytes += (oobbytes_per_step *
+ (mtd->writesize / nand_chip->ecc.size));
+ if (mtd->oobsize < min_oobbytes) {
dev_err(&info->pdev->dev,
"not enough OOB bytes required = %d, available=%d\n",
- ecclayout->eccbytes, mtd->oobsize);
+ min_oobbytes, mtd->oobsize);
err = -EINVAL;
goto return_error;
}
@@ -2020,7 +2162,10 @@ scan_tail:
goto return_error;
}
- mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+ if (dev->of_node)
+ mtd_device_register(mtd, NULL, 0);
+ else
+ mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
platform_set_drvdata(pdev, mtd);
@@ -2051,11 +2196,17 @@ static int omap_nand_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id omap_nand_ids[] = {
+ { .compatible = "ti,omap2-nand", },
+ {},
+};
+
static struct platform_driver omap_nand_driver = {
.probe = omap_nand_probe,
.remove = omap_nand_remove,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(omap_nand_ids),
},
};
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index d4614bfbf..40a7c4a2c 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -130,6 +130,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc->cmd_ctrl = orion_nand_cmd_ctrl;
nc->read_buf = orion_nand_read_buf;
nc->ecc.mode = NAND_ECC_SOFT;
+ nc->ecc.algo = NAND_ECC_HAMMING;
if (board->chip_delay)
nc->chip_delay = board->chip_delay;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 3ab53ca53..5de7591b0 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -92,8 +92,9 @@ int pasemi_device_ready(struct mtd_info *mtd)
static int pasemi_nand_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
struct pci_dev *pdev;
- struct device_node *np = ofdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct resource res;
struct nand_chip *chip;
int err = 0;
@@ -107,13 +108,11 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
if (pasemi_nand_mtd)
return -ENODEV;
- pr_debug("pasemi_nand at %pR\n", &res);
+ dev_dbg(dev, "pasemi_nand at %pR\n", &res);
/* Allocate memory for MTD device structure and private data */
chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
if (!chip) {
- printk(KERN_WARNING
- "Unable to allocate PASEMI NAND MTD device structure\n");
err = -ENOMEM;
goto out;
}
@@ -121,7 +120,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
pasemi_nand_mtd = nand_to_mtd(chip);
/* Link the private data with the MTD structure */
- pasemi_nand_mtd->dev.parent = &ofdev->dev;
+ pasemi_nand_mtd->dev.parent = dev;
chip->IO_ADDR_R = of_iomap(np, 0);
chip->IO_ADDR_W = chip->IO_ADDR_R;
@@ -151,6 +150,7 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
chip->write_buf = pasemi_write_buf;
chip->chip_delay = 0;
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
/* Enable the following for a flash based bad block table */
chip->bbt_options = NAND_BBT_USE_FLASH;
@@ -162,13 +162,13 @@ static int pasemi_nand_probe(struct platform_device *ofdev)
}
if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
- printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
+ dev_err(dev, "Unable to register MTD device\n");
err = -ENODEV;
goto out_lpc;
}
- printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n",
- res.start, lpcctl);
+ dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
+ lpcctl);
return 0;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index e4e50da30..415a53a0d 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -74,6 +74,7 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
data->chip.ecc.mode = NAND_ECC_SOFT;
+ data->chip.ecc.algo = NAND_ECC_HAMMING;
platform_set_drvdata(pdev, data);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index d6508856d..436dd6dc1 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_mtd.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
@@ -324,6 +323,62 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
{ 0xba20, 16, 16, &timing[3] },
};
+static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ int nchunks = mtd->writesize / info->chunk_size;
+
+ if (section >= nchunks)
+ return -ERANGE;
+
+ oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
+ info->spare_size;
+ oobregion->length = info->ecc_size;
+
+ return 0;
+}
+
+static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+ struct pxa3xx_nand_info *info = host->info_data;
+ int nchunks = mtd->writesize / info->chunk_size;
+
+ if (section >= nchunks)
+ return -ERANGE;
+
+ if (!info->spare_size)
+ return 0;
+
+ oobregion->offset = section * (info->ecc_size + info->spare_size);
+ oobregion->length = info->spare_size;
+ if (!section) {
+ /*
+ * Bootrom looks in bytes 0 & 5 for bad blocks for the
+ * 4KB page / 4bit BCH combination.
+ */
+ if (mtd->writesize == 4096 && info->chunk_size == 2048) {
+ oobregion->offset += 6;
+ oobregion->length -= 6;
+ } else {
+ oobregion->offset += 2;
+ oobregion->length -= 2;
+ }
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
+ .ecc = pxa3xx_ooblayout_ecc,
+ .free = pxa3xx_ooblayout_free,
+};
+
static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
@@ -347,41 +402,6 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = bbt_mirror_pattern
};
-static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
- .eccbytes = 32,
- .eccpos = {
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63},
- .oobfree = { {2, 30} }
-};
-
-static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
- .eccbytes = 64,
- .eccpos = {
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63,
- 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 120, 121, 122, 123, 124, 125, 126, 127},
- /* Bootrom looks in bytes 0 & 5 for bad blocks */
- .oobfree = { {6, 26}, { 64, 32} }
-};
-
-static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
- .eccbytes = 128,
- .eccpos = {
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63},
- .oobfree = { }
-};
-
#define NDTR0_tCH(c) (min((c), 7) << 19)
#define NDTR0_tCS(c) (min((c), 7) << 16)
#define NDTR0_tWH(c) (min((c), 7) << 11)
@@ -1546,9 +1566,12 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
}
static int pxa_ecc_init(struct pxa3xx_nand_info *info,
- struct nand_ecc_ctrl *ecc,
+ struct mtd_info *mtd,
int strength, int ecc_stepsize, int page_size)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
info->nfullchunks = 1;
info->ntotalchunks = 1;
@@ -1582,7 +1605,7 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
- ecc->layout = &ecc_layout_2KB_bch4bit;
+ mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
ecc->strength = 16;
} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
@@ -1594,7 +1617,7 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
- ecc->layout = &ecc_layout_4KB_bch4bit;
+ mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
ecc->strength = 16;
/*
@@ -1612,7 +1635,7 @@ static int pxa_ecc_init(struct pxa3xx_nand_info *info,
info->ecc_size = 32;
ecc->mode = NAND_ECC_HW;
ecc->size = info->chunk_size;
- ecc->layout = &ecc_layout_4KB_bch8bit;
+ mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
ecc->strength = 16;
} else {
dev_err(&info->pdev->dev,
@@ -1651,6 +1674,12 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
nand_writel(info, NDECCCTRL, 0x0);
+ if (pdata->flash_bbt)
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+ chip->ecc.strength = pdata->ecc_strength;
+ chip->ecc.size = pdata->ecc_step_size;
+
if (nand_scan_ident(mtd, 1, NULL))
return -ENODEV;
@@ -1663,13 +1692,12 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
}
}
- if (pdata->flash_bbt) {
+ if (chip->bbt_options & NAND_BBT_USE_FLASH) {
/*
* We'll use a bad block table stored in-flash and don't
* allow writing the bad block marker to the flash.
*/
- chip->bbt_options |= NAND_BBT_USE_FLASH |
- NAND_BBT_NO_OOB_BBM;
+ chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
}
@@ -1689,10 +1717,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
}
}
- if (pdata->ecc_strength && pdata->ecc_step_size) {
- ecc_strength = pdata->ecc_strength;
- ecc_step = pdata->ecc_step_size;
- } else {
+ ecc_strength = chip->ecc.strength;
+ ecc_step = chip->ecc.size;
+ if (!ecc_strength || !ecc_step) {
ecc_strength = chip->ecc_strength_ds;
ecc_step = chip->ecc_step_ds;
}
@@ -1703,7 +1730,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
ecc_step = 512;
}
- ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+ ret = pxa_ecc_init(info, mtd, ecc_strength,
ecc_step, mtd->writesize);
if (ret)
return ret;
@@ -1903,15 +1930,6 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (of_get_property(np, "marvell,nand-keep-config", NULL))
pdata->keep_config = 1;
of_property_read_u32(np, "num-cs", &pdata->num_cs);
- pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
-
- pdata->ecc_strength = of_get_nand_ecc_strength(np);
- if (pdata->ecc_strength < 0)
- pdata->ecc_strength = 0;
-
- pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
- if (pdata->ecc_step_size < 0)
- pdata->ecc_step_size = 0;
pdev->dev.platform_data = pdata;
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
index f550a57e6..de7d28e62 100644
--- a/drivers/mtd/nand/qcom_nandc.c
+++ b/drivers/mtd/nand/qcom_nandc.c
@@ -21,7 +21,6 @@
#include <linux/mtd/partitions.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_mtd.h>
#include <linux/delay.h>
/* NANDc reg offsets */
@@ -1437,7 +1436,6 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *oob = chip->oob_poi;
- int free_boff;
int data_size, oob_size;
int ret, status = 0;
@@ -1451,12 +1449,11 @@ static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
/* calculate the data and oob size for the last codeword/step */
data_size = ecc->size - ((ecc->steps - 1) << 2);
- oob_size = ecc->steps << 2;
-
- free_boff = ecc->layout->oobfree[0].offset;
+ oob_size = mtd->oobavail;
/* override new oob content to last codeword */
- memcpy(nandc->data_buffer + data_size, oob + free_boff, oob_size);
+ mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
+ 0, mtd->oobavail);
set_address(host, host->cw_size * (ecc->steps - 1), page);
update_rw_regs(host, 1, false);
@@ -1710,61 +1707,52 @@ static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
* This layout is read as is when ECC is disabled. When ECC is enabled, the
* inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
* and assumed as 0xffs when we read a page/oob. The ECC, unused and
- * dummy/real bad block bytes are grouped as ecc bytes in nand_ecclayout (i.e,
- * ecc->bytes is the sum of the three).
+ * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
+ * the sum of the three).
*/
-
-static struct nand_ecclayout *
-qcom_nand_create_layout(struct qcom_nand_host *host)
+static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
{
- struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct nand_ecclayout *layout;
- int i, j, steps, pos = 0, shift = 0;
- layout = devm_kzalloc(nandc->dev, sizeof(*layout), GFP_KERNEL);
- if (!layout)
- return NULL;
-
- steps = mtd->writesize / ecc->size;
- layout->eccbytes = steps * ecc->bytes;
+ if (section > 1)
+ return -ERANGE;
- layout->oobfree[0].offset = (steps - 1) * ecc->bytes + host->bbm_size;
- layout->oobfree[0].length = steps << 2;
-
- /*
- * the oob bytes in the first n - 1 codewords are all grouped together
- * in the format:
- * DUMMY_BBM + UNUSED + ECC
- */
- for (i = 0; i < steps - 1; i++) {
- for (j = 0; j < ecc->bytes; j++)
- layout->eccpos[pos++] = i * ecc->bytes + j;
+ if (!section) {
+ oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
+ host->bbm_size;
+ oobregion->offset = 0;
+ } else {
+ oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
+ oobregion->offset = mtd->oobsize - oobregion->length;
}
- /*
- * the oob bytes in the last codeword are grouped in the format:
- * BBM + FREE OOB + UNUSED + ECC
- */
+ return 0;
+}
- /* fill up the bbm positions */
- for (j = 0; j < host->bbm_size; j++)
- layout->eccpos[pos++] = i * ecc->bytes + j;
+static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
- /*
- * fill up the ecc and reserved positions, their indices are offseted
- * by the free oob region
- */
- shift = layout->oobfree[0].length + host->bbm_size;
+ if (section)
+ return -ERANGE;
- for (j = 0; j < (host->ecc_bytes_hw + host->spare_bytes); j++)
- layout->eccpos[pos++] = i * ecc->bytes + shift + j;
+ oobregion->length = ecc->steps * 4;
+ oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
- return layout;
+ return 0;
}
+static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
+ .ecc = qcom_nand_ooblayout_ecc,
+ .free = qcom_nand_ooblayout_free,
+};
+
static int qcom_nand_host_setup(struct qcom_nand_host *host)
{
struct nand_chip *chip = &host->chip;
@@ -1851,9 +1839,7 @@ static int qcom_nand_host_setup(struct qcom_nand_host *host)
ecc->mode = NAND_ECC_HW;
- ecc->layout = qcom_nand_create_layout(host);
- if (!ecc->layout)
- return -ENOMEM;
+ mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
cwperpage = mtd->writesize / ecc->size;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 9c9397b54..d9309cf0c 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -84,11 +84,33 @@
/* new oob placement block for use with hardware ecc generation
*/
+static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 8;
+ oobregion->length = 8;
+
+ return 0;
+}
-static struct nand_ecclayout nand_hw_eccoob = {
- .eccbytes = 3,
- .eccpos = {0, 1, 2},
- .oobfree = {{8, 8}}
+static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
+ .ecc = s3c2410_ooblayout_ecc,
+ .free = s3c2410_ooblayout_free,
};
/* controller and mtd information */
@@ -542,7 +564,8 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
diff0 |= (diff1 << 8);
diff0 |= (diff2 << 16);
- if ((diff0 & ~(1<<fls(diff0))) == 0)
+ /* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
+ if ((diff0 & (diff0 - 1)) == 0)
return 1;
return -1;
@@ -859,6 +882,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
}
#else
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
#endif
if (set->disable_ecc)
@@ -919,7 +943,7 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
} else {
chip->ecc.size = 512;
chip->ecc.bytes = 3;
- chip->ecc.layout = &nand_hw_eccoob;
+ mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops);
}
}
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 481440290..6fa3bcd59 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -31,7 +31,6 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_mtd.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -43,26 +42,73 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/sh_flctl.h>
-static struct nand_ecclayout flctl_4secc_oob_16 = {
- .eccbytes = 10,
- .eccpos = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
- .oobfree = {
- {.offset = 12,
- . length = 4} },
+static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 0;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 12;
+ oobregion->length = 4;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
+ .ecc = flctl_4secc_ooblayout_sp_ecc,
+ .free = flctl_4secc_ooblayout_sp_free,
};
-static struct nand_ecclayout flctl_4secc_oob_64 = {
- .eccbytes = 4 * 10,
- .eccpos = {
- 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
- .oobfree = {
- {.offset = 2, .length = 4},
- {.offset = 16, .length = 6},
- {.offset = 32, .length = 6},
- {.offset = 48, .length = 6} },
+static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = chip->ecc.bytes;
+
+ return 0;
+}
+
+static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section >= chip->ecc.steps)
+ return -ERANGE;
+
+ oobregion->offset = section * 16;
+ oobregion->length = 6;
+
+ if (!section) {
+ oobregion->offset += 2;
+ oobregion->length -= 2;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
+ .ecc = flctl_4secc_ooblayout_lp_ecc,
+ .free = flctl_4secc_ooblayout_lp_free,
};
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
@@ -987,10 +1033,10 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
if (flctl->hwecc) {
if (mtd->writesize == 512) {
- chip->ecc.layout = &flctl_4secc_oob_16;
+ mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
chip->badblock_pattern = &flctl_4secc_smallpage;
} else {
- chip->ecc.layout = &flctl_4secc_oob_64;
+ mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
chip->badblock_pattern = &flctl_4secc_largepage;
}
@@ -1005,6 +1051,7 @@ static int flctl_chip_init_tail(struct mtd_info *mtd)
flctl->flcmncr_base |= _4ECCEN;
} else {
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
}
return 0;
@@ -1044,8 +1091,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
const struct of_device_id *match;
struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata;
- struct device_node *dn = dev->of_node;
- int ret;
match = of_match_device(of_flctl_match, dev);
if (match)
@@ -1065,15 +1110,6 @@ static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
pdata->has_hwecc = config->has_hwecc;
pdata->use_holden = config->use_holden;
- /* parse user defined options */
- ret = of_get_nand_bus_width(dn);
- if (ret == 16)
- pdata->flcmncr_val |= SEL_16BIT;
- else if (ret != 8) {
- dev_err(dev, "%s: invalid bus width\n", __func__);
- return NULL;
- }
-
return pdata;
}
@@ -1136,15 +1172,14 @@ static int flctl_probe(struct platform_device *pdev)
nand->chip_delay = 20;
nand->read_byte = flctl_read_byte;
+ nand->read_word = flctl_read_word;
nand->write_buf = flctl_write_buf;
nand->read_buf = flctl_read_buf;
nand->select_chip = flctl_select_chip;
nand->cmdfunc = flctl_cmdfunc;
- if (pdata->flcmncr_val & SEL_16BIT) {
+ if (pdata->flcmncr_val & SEL_16BIT)
nand->options |= NAND_BUSWIDTH_16;
- nand->read_word = flctl_read_word;
- }
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
@@ -1155,6 +1190,16 @@ static int flctl_probe(struct platform_device *pdev)
if (ret)
goto err_chip;
+ if (nand->options & NAND_BUSWIDTH_16) {
+ /*
+ * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
+ * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign
+ * flctl->flcmncr_base to pdata->flcmncr_val.
+ */
+ pdata->flcmncr_val |= SEL_16BIT;
+ flctl->flcmncr_base = pdata->flcmncr_val;
+ }
+
ret = flctl_chip_init_tail(flctl_mtd);
if (ret)
goto err_chip;
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index b7d1b55a1..064ca1757 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -148,6 +148,7 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
/* Link the private data with the MTD structure */
mtd = nand_to_mtd(this);
mtd->dev.parent = &pdev->dev;
+ mtd_set_ooblayout(mtd, data->ecc_layout);
platform_set_drvdata(pdev, sharpsl);
@@ -170,7 +171,6 @@ static int sharpsl_nand_probe(struct platform_device *pdev)
this->ecc.bytes = 3;
this->ecc.strength = 1;
this->badblock_pattern = data->badblock_pattern;
- this->ecc.layout = data->ecc_layout;
this->ecc.hwctl = sharpsl_nand_enable_hwecc;
this->ecc.calculate = sharpsl_nand_calculate_ecc;
this->ecc.correct = nand_correct_data;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index c514740f9..5939dff25 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -12,14 +12,47 @@
#include <linux/sizes.h>
#include "sm_common.h"
-static struct nand_ecclayout nand_oob_sm = {
- .eccbytes = 6,
- .eccpos = {8, 9, 10, 13, 14, 15},
- .oobfree = {
- {.offset = 0 , .length = 4}, /* reserved */
- {.offset = 6 , .length = 2}, /* LBA1 */
- {.offset = 11, .length = 2} /* LBA2 */
+static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ oobregion->length = 3;
+ oobregion->offset = ((section + 1) * 8) - 3;
+
+ return 0;
+}
+
+static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ switch (section) {
+ case 0:
+ /* reserved */
+ oobregion->offset = 0;
+ oobregion->length = 4;
+ break;
+ case 1:
+ /* LBA1 */
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ break;
+ case 2:
+ /* LBA2 */
+ oobregion->offset = 11;
+ oobregion->length = 2;
+ break;
+ default:
+ return -ERANGE;
}
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_ops = {
+ .ecc = oob_sm_ooblayout_ecc,
+ .free = oob_sm_ooblayout_free,
};
/* NOTE: This layout is is not compatabable with SmartMedia, */
@@ -28,15 +61,43 @@ static struct nand_ecclayout nand_oob_sm = {
/* If you use smftl, it will bypass this and work correctly */
/* If you not, then you break SmartMedia compliance anyway */
-static struct nand_ecclayout nand_oob_sm_small = {
- .eccbytes = 3,
- .eccpos = {0, 1, 2},
- .oobfree = {
- {.offset = 3 , .length = 2}, /* reserved */
- {.offset = 6 , .length = 2}, /* LBA1 */
+static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 3;
+ oobregion->offset = 0;
+
+ return 0;
+}
+
+static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ switch (section) {
+ case 0:
+ /* reserved */
+ oobregion->offset = 3;
+ oobregion->length = 2;
+ break;
+ case 1:
+ /* LBA1 */
+ oobregion->offset = 6;
+ oobregion->length = 2;
+ break;
+ default:
+ return -ERANGE;
}
-};
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_small_ops = {
+ .ecc = oob_sm_small_ooblayout_ecc,
+ .free = oob_sm_small_ooblayout_free,
+};
static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
@@ -121,9 +182,9 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
/* ECC layout */
if (mtd->writesize == SM_SECTOR_SIZE)
- chip->ecc.layout = &nand_oob_sm;
+ mtd_set_ooblayout(mtd, &oob_sm_ops);
else if (mtd->writesize == SM_SMALL_PAGE)
- chip->ecc.layout = &nand_oob_sm_small;
+ mtd_set_ooblayout(mtd, &oob_sm_small_ops);
else
return -ENODEV;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index e3305f9dd..888fd314c 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -180,6 +180,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
nand_chip->dev_ready = socrates_nand_device_ready;
nand_chip->ecc.mode = NAND_ECC_SOFT; /* enable ECC */
+ nand_chip->ecc.algo = NAND_ECC_HAMMING;
/* TODO: I have no idea what real delay is. */
nand_chip->chip_delay = 20; /* 20us command delay time */
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 1c03eee44..a83a69068 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -30,7 +30,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
-#include <linux/of_mtd.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
@@ -39,7 +38,7 @@
#include <linux/dmaengine.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
+#include <linux/iopoll.h>
#define NFC_REG_CTL 0x0000
#define NFC_REG_ST 0x0004
@@ -155,7 +154,7 @@
/* define bit use in NFC_ECC_ST */
#define NFC_ECC_ERR(x) BIT(x)
#define NFC_ECC_PAT_FOUND(x) BIT(x + 16)
-#define NFC_ECC_ERR_CNT(b, x) (((x) >> ((b) * 8)) & 0xff)
+#define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff)
#define NFC_DEFAULT_TIMEOUT_MS 1000
@@ -212,12 +211,9 @@ struct sunxi_nand_chip_sel {
* sunxi HW ECC infos: stores information related to HW ECC support
*
* @mode: the sunxi ECC mode field deduced from ECC requirements
- * @layout: the OOB layout depending on the ECC requirements and the
- * selected ECC mode
*/
struct sunxi_nand_hw_ecc {
int mode;
- struct nand_ecclayout layout;
};
/*
@@ -239,6 +235,10 @@ struct sunxi_nand_chip {
u32 timing_cfg;
u32 timing_ctl;
int selected;
+ int addr_cycles;
+ u32 addr[2];
+ int cmd_cycles;
+ u8 cmd[2];
int nsels;
struct sunxi_nand_chip_sel sels[0];
};
@@ -298,54 +298,71 @@ static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sunxi_nfc_wait_int(struct sunxi_nfc *nfc, u32 flags,
- unsigned int timeout_ms)
+static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
+ bool use_polling, unsigned int timeout_ms)
{
- init_completion(&nfc->complete);
+ int ret;
- writel(flags, nfc->regs + NFC_REG_INT);
+ if (events & ~NFC_INT_MASK)
+ return -EINVAL;
if (!timeout_ms)
timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
- if (!wait_for_completion_timeout(&nfc->complete,
- msecs_to_jiffies(timeout_ms))) {
- dev_err(nfc->dev, "wait interrupt timedout\n");
- return -ETIMEDOUT;
+ if (!use_polling) {
+ init_completion(&nfc->complete);
+
+ writel(events, nfc->regs + NFC_REG_INT);
+
+ ret = wait_for_completion_timeout(&nfc->complete,
+ msecs_to_jiffies(timeout_ms));
+
+ writel(0, nfc->regs + NFC_REG_INT);
+ } else {
+ u32 status;
+
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+ (status & events) == events, 1,
+ timeout_ms * 1000);
}
- return 0;
+ writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+
+ if (ret)
+ dev_err(nfc->dev, "wait interrupt timedout\n");
+
+ return ret;
}
static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
{
- unsigned long timeout = jiffies +
- msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+ u32 status;
+ int ret;
- do {
- if (!(readl(nfc->regs + NFC_REG_ST) & NFC_CMD_FIFO_STATUS))
- return 0;
- } while (time_before(jiffies, timeout));
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+ !(status & NFC_CMD_FIFO_STATUS), 1,
+ NFC_DEFAULT_TIMEOUT_MS * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
- dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
- return -ETIMEDOUT;
+ return ret;
}
static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
{
- unsigned long timeout = jiffies +
- msecs_to_jiffies(NFC_DEFAULT_TIMEOUT_MS);
+ u32 ctl;
+ int ret;
writel(0, nfc->regs + NFC_REG_ECC_CTL);
writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
- do {
- if (!(readl(nfc->regs + NFC_REG_CTL) & NFC_RESET))
- return 0;
- } while (time_before(jiffies, timeout));
+ ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
+ !(ctl & NFC_RESET), 1,
+ NFC_DEFAULT_TIMEOUT_MS * 1000);
+ if (ret)
+ dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
- dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
- return -ETIMEDOUT;
+ return ret;
}
static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
@@ -354,7 +371,6 @@ static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
struct sunxi_nand_rb *rb;
- unsigned long timeo = (sunxi_nand->nand.state == FL_ERASING ? 400 : 20);
int ret;
if (sunxi_nand->selected < 0)
@@ -366,12 +382,6 @@ static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
case RB_NATIVE:
ret = !!(readl(nfc->regs + NFC_REG_ST) &
NFC_RB_STATE(rb->info.nativeid));
- if (ret)
- break;
-
- sunxi_nfc_wait_int(nfc, NFC_RB_B2R, timeo);
- ret = !!(readl(nfc->regs + NFC_REG_ST) &
- NFC_RB_STATE(rb->info.nativeid));
break;
case RB_GPIO:
ret = gpio_get_value(rb->info.gpio);
@@ -407,7 +417,7 @@ static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
sel = &sunxi_nand->sels[chip];
ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
- NFC_PAGE_SHIFT(nand->page_shift - 10);
+ NFC_PAGE_SHIFT(nand->page_shift);
if (sel->rb.type == RB_NONE) {
nand->dev_ready = NULL;
} else {
@@ -452,7 +462,7 @@ static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
writel(tmp, nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
if (ret)
break;
@@ -487,7 +497,7 @@ static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
NFC_ACCESS_DIR;
writel(tmp, nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
if (ret)
break;
@@ -511,32 +521,54 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
int ret;
- u32 tmp;
ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
if (ret)
return;
- if (ctrl & NAND_CTRL_CHANGE) {
- tmp = readl(nfc->regs + NFC_REG_CTL);
- if (ctrl & NAND_NCE)
- tmp |= NFC_CE_CTL;
- else
- tmp &= ~NFC_CE_CTL;
- writel(tmp, nfc->regs + NFC_REG_CTL);
- }
+ if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
+ !(ctrl & (NAND_CLE | NAND_ALE))) {
+ u32 cmd = 0;
- if (dat == NAND_CMD_NONE)
- return;
+ if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
+ return;
- if (ctrl & NAND_CLE) {
- writel(NFC_SEND_CMD1 | dat, nfc->regs + NFC_REG_CMD);
- } else {
- writel(dat, nfc->regs + NFC_REG_ADDR_LOW);
- writel(NFC_SEND_ADR, nfc->regs + NFC_REG_CMD);
+ if (sunxi_nand->cmd_cycles--)
+ cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
+
+ if (sunxi_nand->cmd_cycles--) {
+ cmd |= NFC_SEND_CMD2;
+ writel(sunxi_nand->cmd[1],
+ nfc->regs + NFC_REG_RCMD_SET);
+ }
+
+ sunxi_nand->cmd_cycles = 0;
+
+ if (sunxi_nand->addr_cycles) {
+ cmd |= NFC_SEND_ADR |
+ NFC_ADR_NUM(sunxi_nand->addr_cycles);
+ writel(sunxi_nand->addr[0],
+ nfc->regs + NFC_REG_ADDR_LOW);
+ }
+
+ if (sunxi_nand->addr_cycles > 4)
+ writel(sunxi_nand->addr[1],
+ nfc->regs + NFC_REG_ADDR_HIGH);
+
+ writel(cmd, nfc->regs + NFC_REG_CMD);
+ sunxi_nand->addr[0] = 0;
+ sunxi_nand->addr[1] = 0;
+ sunxi_nand->addr_cycles = 0;
+ sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
}
- sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ if (ctrl & NAND_CLE) {
+ sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
+ } else if (ctrl & NAND_ALE) {
+ sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
+ dat << ((sunxi_nand->addr_cycles % 4) * 8);
+ sunxi_nand->addr_cycles++;
+ }
}
/* These seed values have been extracted from Allwinner's BSP */
@@ -717,7 +749,8 @@ static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
NFC_ECC_BLOCK_SIZE_MSK);
- ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION;
+ ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
+ NFC_ECC_PIPELINE;
writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
}
@@ -739,18 +772,106 @@ static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
buf[3] = user_data >> 24;
}
+static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
+{
+ return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+}
+
+static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
+ int step, bool bbm, int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+ sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
+ oob);
+
+ /* De-randomize the Bad Block Marker. */
+ if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
+ sunxi_nfc_randomize_bbm(mtd, page, oob);
+}
+
+static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
+ const u8 *oob, int step,
+ bool bbm, int page)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ u8 user_data[4];
+
+ /* Randomize the Bad Block Marker. */
+ if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
+ memcpy(user_data, oob, sizeof(user_data));
+ sunxi_nfc_randomize_bbm(mtd, page, user_data);
+ oob = user_data;
+ }
+
+ writel(sunxi_nfc_buf_to_user_data(oob),
+ nfc->regs + NFC_REG_USER_DATA(step));
+}
+
+static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
+ unsigned int *max_bitflips, int ret)
+{
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += ret;
+ *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ }
+}
+
+static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
+ int step, bool *erased)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+ u32 status, tmp;
+
+ *erased = false;
+
+ status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+ if (status & NFC_ECC_ERR(step))
+ return -EBADMSG;
+
+ if (status & NFC_ECC_PAT_FOUND(step)) {
+ u8 pattern;
+
+ if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
+ pattern = 0x0;
+ } else {
+ pattern = 0xff;
+ *erased = true;
+ }
+
+ if (data)
+ memset(data, pattern, ecc->size);
+
+ if (oob)
+ memset(oob, pattern, ecc->bytes + 4);
+
+ return 0;
+ }
+
+ tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
+
+ return NFC_ECC_ERR_CNT(step, tmp);
+}
+
static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
u8 *data, int data_off,
u8 *oob, int oob_off,
int *cur_off,
unsigned int *max_bitflips,
- bool bbm, int page)
+ bool bbm, bool oob_required, int page)
{
struct nand_chip *nand = mtd_to_nand(mtd);
struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
struct nand_ecc_ctrl *ecc = &nand->ecc;
int raw_mode = 0;
- u32 status;
+ bool erased;
int ret;
if (*cur_off != data_off)
@@ -769,34 +890,19 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
sunxi_nfc_randomizer_disable(mtd);
if (ret)
return ret;
*cur_off = oob_off + ecc->bytes + 4;
- status = readl(nfc->regs + NFC_REG_ECC_ST);
- if (status & NFC_ECC_PAT_FOUND(0)) {
- u8 pattern = 0xff;
-
- if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1)))
- pattern = 0x0;
-
- memset(data, pattern, ecc->size);
- memset(oob, pattern, ecc->bytes + 4);
-
+ ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+ &erased);
+ if (erased)
return 1;
- }
-
- ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0)));
-
- memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
-
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page);
- if (status & NFC_ECC_ERR(0)) {
+ if (ret < 0) {
/*
* Re-read the data with the randomizer disabled to identify
* bitflips in erased pages.
@@ -804,35 +910,34 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
if (nand->options & NAND_NEED_SCRAMBLING) {
nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
nand->read_buf(mtd, data, ecc->size);
- nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
- nand->read_buf(mtd, oob, ecc->bytes + 4);
+ } else {
+ memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
+ ecc->size);
}
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ nand->read_buf(mtd, oob, ecc->bytes + 4);
+
ret = nand_check_erased_ecc_chunk(data, ecc->size,
oob, ecc->bytes + 4,
NULL, 0, ecc->strength);
if (ret >= 0)
raw_mode = 1;
} else {
- /*
- * The engine protects 4 bytes of OOB data per chunk.
- * Retrieve the corrected OOB bytes.
- */
- sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)),
- oob);
+ memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
- /* De-randomize the Bad Block Marker. */
- if (bbm && nand->options & NAND_NEED_SCRAMBLING)
- sunxi_nfc_randomize_bbm(mtd, page, oob);
- }
+ if (oob_required) {
+ nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+ sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
+ true, page);
- if (ret < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += ret;
- *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+ sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
+ bbm, page);
+ }
}
+ sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
+
return raw_mode;
}
@@ -848,7 +953,7 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
if (len <= 0)
return;
- if (*cur_off != offset)
+ if (!cur_off || *cur_off != offset)
nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
offset + mtd->writesize, -1);
@@ -858,12 +963,8 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
false, page);
- *cur_off = mtd->oobsize + mtd->writesize;
-}
-
-static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
-{
- return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+ if (cur_off)
+ *cur_off = mtd->oobsize + mtd->writesize;
}
static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
@@ -882,19 +983,6 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
- /* Fill OOB data in */
- if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) {
- u8 user_data[4];
-
- memcpy(user_data, oob, 4);
- sunxi_nfc_randomize_bbm(mtd, page, user_data);
- writel(sunxi_nfc_buf_to_user_data(user_data),
- nfc->regs + NFC_REG_USER_DATA(0));
- } else {
- writel(sunxi_nfc_buf_to_user_data(oob),
- nfc->regs + NFC_REG_USER_DATA(0));
- }
-
if (data_off + ecc->size != oob_off)
nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
@@ -903,11 +991,13 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
return ret;
sunxi_nfc_randomizer_enable(mtd);
+ sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
+
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
NFC_ACCESS_DIR | NFC_ECC_OP,
nfc->regs + NFC_REG_CMD);
- ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+ ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
sunxi_nfc_randomizer_disable(mtd);
if (ret)
return ret;
@@ -929,13 +1019,14 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
if (len <= 0)
return;
- if (*cur_off != offset)
+ if (!cur_off || *cur_off != offset)
nand->cmdfunc(mtd, NAND_CMD_RNDIN,
offset + mtd->writesize, -1);
sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
- *cur_off = mtd->oobsize + mtd->writesize;
+ if (cur_off)
+ *cur_off = mtd->oobsize + mtd->writesize;
}
static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
@@ -958,7 +1049,7 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
oob_off + mtd->writesize,
&cur_off, &max_bitflips,
- !i, page);
+ !i, oob_required, page);
if (ret < 0)
return ret;
else if (ret)
@@ -974,6 +1065,39 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
return max_bitflips;
}
+static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ u32 data_offs, u32 readlen,
+ u8 *bufpoi, int page)
+{
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i, cur_off = 0;
+ unsigned int max_bitflips = 0;
+
+ sunxi_nfc_hw_ecc_enable(mtd);
+
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+ for (i = data_offs / ecc->size;
+ i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
+ int data_off = i * ecc->size;
+ int oob_off = i * (ecc->bytes + 4);
+ u8 *data = bufpoi + data_off;
+ u8 *oob = chip->oob_poi + oob_off;
+
+ ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
+ oob,
+ oob_off + mtd->writesize,
+ &cur_off, &max_bitflips, !i,
+ false, page);
+ if (ret < 0)
+ return ret;
+ }
+
+ sunxi_nfc_hw_ecc_disable(mtd);
+
+ return max_bitflips;
+}
+
static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
struct nand_chip *chip,
const uint8_t *buf, int oob_required,
@@ -1026,7 +1150,9 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
oob_off, &cur_off,
- &max_bitflips, !i, page);
+ &max_bitflips, !i,
+ oob_required,
+ page);
if (ret < 0)
return ret;
else if (ret)
@@ -1074,6 +1200,40 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
return 0;
}
+static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+ chip->pagebuf = -1;
+
+ return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
+}
+
+static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ int page)
+{
+ int ret, status;
+
+ chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+
+ chip->pagebuf = -1;
+
+ memset(chip->buffers->databuf, 0xff, mtd->writesize);
+ ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
+ if (ret)
+ return ret;
+
+ /* Send command to program the OOB data */
+ chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+ status = chip->waitfunc(mtd, chip);
+
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
static const s32 tWB_lut[] = {6, 12, 16, 20};
static const s32 tRHW_lut[] = {4, 8, 12, 20};
@@ -1101,6 +1261,7 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
u32 min_clk_period = 0;
s32 tWB, tADL, tWHR, tRHW, tCAD;
+ long real_clk_rate;
/* T1 <=> tCLS */
if (timings->tCLS_min > min_clk_period)
@@ -1163,6 +1324,18 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
/* T16 - T19 + tCAD */
+ if (timings->tWB_max > (min_clk_period * 20))
+ min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
+
+ if (timings->tADL_min > (min_clk_period * 32))
+ min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
+
+ if (timings->tWHR_min > (min_clk_period * 32))
+ min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
+
+ if (timings->tRHW_min > (min_clk_period * 20))
+ min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
+
tWB = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
min_clk_period);
if (tWB < 0) {
@@ -1198,23 +1371,26 @@ static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
/* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
- /*
- * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
- * output cycle timings shall be used if the host drives tRC less than
- * 30 ns.
- */
- chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0;
-
/* Convert min_clk_period from picoseconds to nanoseconds */
min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
/*
- * Convert min_clk_period into a clk frequency, then get the
- * appropriate rate for the NAND controller IP given this formula
- * (specified in the datasheet):
- * nand clk_rate = 2 * min_clk_rate
+ * Unlike what is stated in Allwinner datasheet, the clk_rate should
+ * be set to (1 / min_clk_period), and not (2 / min_clk_period).
+ * This new formula was verified with a scope and validated by
+ * Allwinner engineers.
*/
- chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period;
+ chip->clk_rate = NSEC_PER_SEC / min_clk_period;
+ real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+
+ /*
+ * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+ * output cycle timings shall be used if the host drives tRC less than
+ * 30 ns.
+ */
+ min_clk_period = NSEC_PER_SEC / real_clk_rate;
+ chip->timing_ctl = ((min_clk_period * 2) < 30) ?
+ NFC_TIMING_CTL_EDO : 0;
return 0;
}
@@ -1257,6 +1433,57 @@ static int sunxi_nand_chip_init_timings(struct sunxi_nand_chip *chip,
return sunxi_nand_chip_set_timings(chip, timings);
}
+static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+ if (section >= ecc->steps)
+ return -ERANGE;
+
+ oobregion->offset = section * (ecc->bytes + 4) + 4;
+ oobregion->length = ecc->bytes;
+
+ return 0;
+}
+
+static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *nand = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+ if (section > ecc->steps)
+ return -ERANGE;
+
+ /*
+ * The first 2 bytes are used for BB markers, hence we
+ * only have 2 bytes available in the first user data
+ * section.
+ */
+ if (!section && ecc->mode == NAND_ECC_HW) {
+ oobregion->offset = 2;
+ oobregion->length = 2;
+
+ return 0;
+ }
+
+ oobregion->offset = section * (ecc->bytes + 4);
+
+ if (section < ecc->steps)
+ oobregion->length = 4;
+ else
+ oobregion->offset = mtd->oobsize - oobregion->offset;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
+ .ecc = sunxi_nand_ooblayout_ecc,
+ .free = sunxi_nand_ooblayout_free,
+};
+
static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
@@ -1266,7 +1493,6 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
struct sunxi_nand_hw_ecc *data;
- struct nand_ecclayout *layout;
int nsectors;
int ret;
int i;
@@ -1295,7 +1521,6 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
/* HW ECC always work with even numbers of ECC bytes */
ecc->bytes = ALIGN(ecc->bytes, 2);
- layout = &data->layout;
nsectors = mtd->writesize / ecc->size;
if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
@@ -1303,9 +1528,9 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
goto err;
}
- layout->eccbytes = (ecc->bytes * nsectors);
-
- ecc->layout = layout;
+ ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob;
+ ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob;
+ mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
ecc->priv = data;
return 0;
@@ -1325,9 +1550,6 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
- struct nand_ecclayout *layout;
- int nsectors;
- int i, j;
int ret;
ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
@@ -1336,40 +1558,9 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
ecc->read_page = sunxi_nfc_hw_ecc_read_page;
ecc->write_page = sunxi_nfc_hw_ecc_write_page;
- layout = ecc->layout;
- nsectors = mtd->writesize / ecc->size;
-
- for (i = 0; i < nsectors; i++) {
- if (i) {
- layout->oobfree[i].offset =
- layout->oobfree[i - 1].offset +
- layout->oobfree[i - 1].length +
- ecc->bytes;
- layout->oobfree[i].length = 4;
- } else {
- /*
- * The first 2 bytes are used for BB markers, hence we
- * only have 2 bytes available in the first user data
- * section.
- */
- layout->oobfree[i].length = 2;
- layout->oobfree[i].offset = 2;
- }
-
- for (j = 0; j < ecc->bytes; j++)
- layout->eccpos[(ecc->bytes * i) + j] =
- layout->oobfree[i].offset +
- layout->oobfree[i].length + j;
- }
-
- if (mtd->oobsize > (ecc->bytes + 4) * nsectors) {
- layout->oobfree[nsectors].offset =
- layout->oobfree[nsectors - 1].offset +
- layout->oobfree[nsectors - 1].length +
- ecc->bytes;
- layout->oobfree[nsectors].length = mtd->oobsize -
- ((ecc->bytes + 4) * nsectors);
- }
+ ecc->read_oob_raw = nand_read_oob_std;
+ ecc->write_oob_raw = nand_write_oob_std;
+ ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
return 0;
}
@@ -1378,9 +1569,6 @@ static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
struct nand_ecc_ctrl *ecc,
struct device_node *np)
{
- struct nand_ecclayout *layout;
- int nsectors;
- int i;
int ret;
ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
@@ -1390,15 +1578,8 @@ static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
ecc->prepad = 4;
ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
-
- layout = ecc->layout;
- nsectors = mtd->writesize / ecc->size;
-
- for (i = 0; i < (ecc->bytes * nsectors); i++)
- layout->eccpos[i] = i;
-
- layout->oobfree[0].length = mtd->oobsize - i;
- layout->oobfree[0].offset = i;
+ ecc->read_oob_raw = nand_read_oob_syndrome;
+ ecc->write_oob_raw = nand_write_oob_syndrome;
return 0;
}
@@ -1411,7 +1592,6 @@ static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
break;
case NAND_ECC_NONE:
- kfree(ecc->layout);
default:
break;
}
@@ -1432,8 +1612,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
return -EINVAL;
switch (ecc->mode) {
- case NAND_ECC_SOFT_BCH:
- break;
case NAND_ECC_HW:
ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
if (ret)
@@ -1445,10 +1623,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
return ret;
break;
case NAND_ECC_NONE:
- ecc->layout = kzalloc(sizeof(*ecc->layout), GFP_KERNEL);
- if (!ecc->layout)
- return -ENOMEM;
- ecc->layout->oobfree[0].length = mtd->oobsize;
case NAND_ECC_SOFT:
break;
default:
@@ -1536,21 +1710,6 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
}
}
- timings = onfi_async_timing_mode_to_sdr_timings(0);
- if (IS_ERR(timings)) {
- ret = PTR_ERR(timings);
- dev_err(dev,
- "could not retrieve timings for ONFI mode 0: %d\n",
- ret);
- return ret;
- }
-
- ret = sunxi_nand_chip_set_timings(chip, timings);
- if (ret) {
- dev_err(dev, "could not configure chip timings: %d\n", ret);
- return ret;
- }
-
nand = &chip->nand;
/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
nand->chip_delay = 200;
@@ -1570,6 +1729,21 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
+ timings = onfi_async_timing_mode_to_sdr_timings(0);
+ if (IS_ERR(timings)) {
+ ret = PTR_ERR(timings);
+ dev_err(dev,
+ "could not retrieve timings for ONFI mode 0: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = sunxi_nand_chip_set_timings(chip, timings);
+ if (ret) {
+ dev_err(dev, "could not configure chip timings: %d\n", ret);
+ return ret;
+ }
+
ret = nand_scan_ident(mtd, nsels, NULL);
if (ret)
return ret;
@@ -1580,6 +1754,8 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
if (nand->options & NAND_NEED_SCRAMBLING)
nand->options |= NAND_NO_SUBPAGE_WRITE;
+ nand->options |= NAND_SUBPAGE_READ;
+
ret = sunxi_nand_chip_init_timings(chip, np);
if (ret) {
dev_err(dev, "could not configure chip timings: %d\n", ret);
@@ -1728,6 +1904,8 @@ static int sunxi_nfc_remove(struct platform_device *pdev)
struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
sunxi_nand_chips_cleanup(nfc);
+ clk_disable_unprepare(nfc->mod_clk);
+ clk_disable_unprepare(nfc->ahb_clk);
return 0;
}
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 293feb19b..3ad514c44 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -33,7 +33,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_mtd.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -175,34 +174,6 @@ static inline struct vf610_nfc *mtd_to_nfc(struct mtd_info *mtd)
return container_of(mtd_to_nand(mtd), struct vf610_nfc, chip);
}
-static struct nand_ecclayout vf610_nfc_ecc45 = {
- .eccbytes = 45,
- .eccpos = {19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63},
- .oobfree = {
- {.offset = 2,
- .length = 17} }
-};
-
-static struct nand_ecclayout vf610_nfc_ecc60 = {
- .eccbytes = 60,
- .eccpos = { 4, 5, 6, 7, 8, 9, 10, 11,
- 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 33, 34, 35,
- 36, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, 46, 47, 48, 49, 50, 51,
- 52, 53, 54, 55, 56, 57, 58, 59,
- 60, 61, 62, 63 },
- .oobfree = {
- {.offset = 2,
- .length = 2} }
-};
-
static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg)
{
return readl(nfc->regs + reg);
@@ -781,14 +752,16 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (mtd->oobsize > 64)
mtd->oobsize = 64;
+ /*
+ * mtd->ecclayout is not specified here because we're using the
+ * default large page ECC layout defined in NAND core.
+ */
if (chip->ecc.strength == 32) {
nfc->ecc_mode = ECC_60_BYTE;
chip->ecc.bytes = 60;
- chip->ecc.layout = &vf610_nfc_ecc60;
} else if (chip->ecc.strength == 24) {
nfc->ecc_mode = ECC_45_BYTE;
chip->ecc.bytes = 45;
- chip->ecc.layout = &vf610_nfc_ecc45;
} else {
dev_err(nfc->dev, "Unsupported ECC strength\n");
err = -ENXIO;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index af28bb3ae..a4b029a41 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -68,21 +68,33 @@ MODULE_PARM_DESC(otp, "Corresponding behaviour of OneNAND in OTP"
* flexonenand_oob_128 - oob info for Flex-Onenand with 4KB page
* For now, we expose only 64 out of 80 ecc bytes
*/
-static struct nand_ecclayout flexonenand_oob_128 = {
- .eccbytes = 64,
- .eccpos = {
- 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
- 102, 103, 104, 105
- },
- .oobfree = {
- {2, 4}, {18, 4}, {34, 4}, {50, 4},
- {66, 4}, {82, 4}, {98, 4}, {114, 4}
- }
+static int flexonenand_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 6;
+ oobregion->length = 10;
+
+ return 0;
+}
+
+static int flexonenand_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 4;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops flexonenand_ooblayout_ops = {
+ .ecc = flexonenand_ooblayout_ecc,
+ .free = flexonenand_ooblayout_free,
};
/*
@@ -91,56 +103,77 @@ static struct nand_ecclayout flexonenand_oob_128 = {
* Based on specification:
* 4Gb M-die OneNAND Flash (KFM4G16Q4M, KFN8G16Q4M). Rev. 1.3, Apr. 2010
*
- * For eccpos we expose only 64 bytes out of 72 (see struct nand_ecclayout)
- *
- * oobfree uses the spare area fields marked as
- * "Managed by internal ECC logic for Logical Sector Number area"
*/
-static struct nand_ecclayout onenand_oob_128 = {
- .eccbytes = 64,
- .eccpos = {
- 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 23, 24, 25, 26, 27, 28, 29, 30, 31,
- 39, 40, 41, 42, 43, 44, 45, 46, 47,
- 55, 56, 57, 58, 59, 60, 61, 62, 63,
- 71, 72, 73, 74, 75, 76, 77, 78, 79,
- 87, 88, 89, 90, 91, 92, 93, 94, 95,
- 103, 104, 105, 106, 107, 108, 109, 110, 111,
- 119
- },
- .oobfree = {
- {2, 3}, {18, 3}, {34, 3}, {50, 3},
- {66, 3}, {82, 3}, {98, 3}, {114, 3}
- }
+static int onenand_ooblayout_128_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 7;
+ oobregion->length = 9;
+
+ return 0;
+}
+
+static int onenand_ooblayout_128_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 8)
+ return -ERANGE;
+
+ /*
+ * free bytes are using the spare area fields marked as
+ * "Managed by internal ECC logic for Logical Sector Number area"
+ */
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 3;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops onenand_oob_128_ooblayout_ops = {
+ .ecc = onenand_ooblayout_128_ecc,
+ .free = onenand_ooblayout_128_free,
};
/**
- * onenand_oob_64 - oob info for large (2KB) page
+ * onenand_oob_32_64 - oob info for large (2KB) page
*/
-static struct nand_ecclayout onenand_oob_64 = {
- .eccbytes = 20,
- .eccpos = {
- 8, 9, 10, 11, 12,
- 24, 25, 26, 27, 28,
- 40, 41, 42, 43, 44,
- 56, 57, 58, 59, 60,
- },
- .oobfree = {
- {2, 3}, {14, 2}, {18, 3}, {30, 2},
- {34, 3}, {46, 2}, {50, 3}, {62, 2}
+static int onenand_ooblayout_32_64_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+ oobregion->length = 5;
+
+ return 0;
+}
+
+static int onenand_ooblayout_32_64_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ int sections = (mtd->oobsize / 32) * 2;
+
+ if (section >= sections)
+ return -ERANGE;
+
+ if (section & 1) {
+ oobregion->offset = ((section - 1) * 16) + 14;
+ oobregion->length = 2;
+ } else {
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 3;
}
-};
-/**
- * onenand_oob_32 - oob info for middle (1KB) page
- */
-static struct nand_ecclayout onenand_oob_32 = {
- .eccbytes = 10,
- .eccpos = {
- 8, 9, 10, 11, 12,
- 24, 25, 26, 27, 28,
- },
- .oobfree = { {2, 3}, {14, 2}, {18, 3}, {30, 2} }
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops onenand_oob_32_64_ooblayout_ops = {
+ .ecc = onenand_ooblayout_32_64_ecc,
+ .free = onenand_ooblayout_32_64_free,
};
static const unsigned char ffchars[] = {
@@ -1024,34 +1057,15 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
int thislen)
{
struct onenand_chip *this = mtd->priv;
- struct nand_oobfree *free;
- int readcol = column;
- int readend = column + thislen;
- int lastgap = 0;
- unsigned int i;
- uint8_t *oob_buf = this->oob_buf;
-
- free = this->ecclayout->oobfree;
- for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
- if (readcol >= lastgap)
- readcol += free->offset - lastgap;
- if (readend >= lastgap)
- readend += free->offset - lastgap;
- lastgap = free->offset + free->length;
- }
- this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
- free = this->ecclayout->oobfree;
- for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
- int free_end = free->offset + free->length;
- if (free->offset < readend && free_end > readcol) {
- int st = max_t(int,free->offset,readcol);
- int ed = min_t(int,free_end,readend);
- int n = ed - st;
- memcpy(buf, oob_buf + st, n);
- buf += n;
- } else if (column == 0)
- break;
- }
+ int ret;
+
+ this->read_bufferram(mtd, ONENAND_SPARERAM, this->oob_buf, 0,
+ mtd->oobsize);
+ ret = mtd_ooblayout_get_databytes(mtd, buf, this->oob_buf,
+ column, thislen);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1808,34 +1822,7 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf,
const u_char *buf, int column, int thislen)
{
- struct onenand_chip *this = mtd->priv;
- struct nand_oobfree *free;
- int writecol = column;
- int writeend = column + thislen;
- int lastgap = 0;
- unsigned int i;
-
- free = this->ecclayout->oobfree;
- for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
- if (writecol >= lastgap)
- writecol += free->offset - lastgap;
- if (writeend >= lastgap)
- writeend += free->offset - lastgap;
- lastgap = free->offset + free->length;
- }
- free = this->ecclayout->oobfree;
- for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free->length; i++, free++) {
- int free_end = free->offset + free->length;
- if (free->offset < writeend && free_end > writecol) {
- int st = max_t(int,free->offset,writecol);
- int ed = min_t(int,free_end,writeend);
- int n = ed - st;
- memcpy(oob_buf + st, buf, n);
- buf += n;
- } else if (column == 0)
- break;
- }
- return 0;
+ return mtd_ooblayout_set_databytes(mtd, buf, oob_buf, column, thislen);
}
/**
@@ -4003,22 +3990,22 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
switch (mtd->oobsize) {
case 128:
if (FLEXONENAND(this)) {
- this->ecclayout = &flexonenand_oob_128;
+ mtd_set_ooblayout(mtd, &flexonenand_ooblayout_ops);
mtd->subpage_sft = 0;
} else {
- this->ecclayout = &onenand_oob_128;
+ mtd_set_ooblayout(mtd, &onenand_oob_128_ooblayout_ops);
mtd->subpage_sft = 2;
}
if (ONENAND_IS_NOP_1(this))
mtd->subpage_sft = 0;
break;
case 64:
- this->ecclayout = &onenand_oob_64;
+ mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
mtd->subpage_sft = 2;
break;
case 32:
- this->ecclayout = &onenand_oob_32;
+ mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
mtd->subpage_sft = 1;
break;
@@ -4027,7 +4014,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
__func__, mtd->oobsize);
mtd->subpage_sft = 0;
/* To prevent kernel oops */
- this->ecclayout = &onenand_oob_32;
+ mtd_set_ooblayout(mtd, &onenand_oob_32_64_ooblayout_ops);
break;
}
@@ -4037,12 +4024,12 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
* The number of bytes available for a client to place data into
* the out of band area
*/
- mtd->oobavail = 0;
- for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES &&
- this->ecclayout->oobfree[i].length; i++)
- mtd->oobavail += this->ecclayout->oobfree[i].length;
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ ret = 0;
+
+ mtd->oobavail = ret;
- mtd->ecclayout = this->ecclayout;
mtd->ecc_strength = 1;
/* Fill in remaining MTD driver data */
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index b096f8bb0..3692dd547 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -386,7 +386,7 @@ restart:
if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
sm_printk("sector %d of block at LBA %d of zone %d"
- " coudn't be read, marking it as invalid",
+ " couldn't be read, marking it as invalid",
boffset / SM_SECTOR_SIZE, lba, zone);
oob.data_status = 0;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 157841dc3..c52e45594 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -832,6 +832,7 @@ static const struct flash_info spi_nor_ids[] = {
/* GigaDevice */
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+ { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
/* Intel/Numonyx -- xxxs33b */
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 22fd19c0c..ef3618299 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -149,6 +149,8 @@ static struct device_attribute dev_bgt_enabled =
__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_mtd_num =
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ro_mode =
+ __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
/**
* ubi_volume_notify - send a volume change notification.
@@ -385,6 +387,8 @@ static ssize_t dev_attribute_show(struct device *dev,
ret = sprintf(buf, "%d\n", ubi->thread_enabled);
else if (attr == &dev_mtd_num)
ret = sprintf(buf, "%d\n", ubi->mtd->index);
+ else if (attr == &dev_ro_mode)
+ ret = sprintf(buf, "%d\n", ubi->ro_mode);
else
ret = -EINVAL;
@@ -404,6 +408,7 @@ static struct attribute *ubi_dev_attrs[] = {
&dev_min_io_size.attr,
&dev_bgt_enabled.attr,
&dev_mtd_num.attr,
+ &dev_ro_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(ubi_dev);
@@ -1142,22 +1147,26 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
*/
static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
{
- int err, major, minor, mode;
+ int err, minor;
struct path path;
+ struct kstat stat;
/* Probably this is an MTD character device node path */
err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
if (err)
return ERR_PTR(err);
- /* MTD device number is defined by the major / minor numbers */
- major = imajor(d_backing_inode(path.dentry));
- minor = iminor(d_backing_inode(path.dentry));
- mode = d_backing_inode(path.dentry)->i_mode;
+ err = vfs_getattr(&path, &stat);
path_put(&path);
- if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
+ if (err)
+ return ERR_PTR(err);
+
+ /* MTD device number is defined by the major / minor numbers */
+ if (MAJOR(stat.rdev) != MTD_CHAR_MAJOR || !S_ISCHR(stat.mode))
return ERR_PTR(-EINVAL);
+ minor = MINOR(stat.rdev);
+
if (minor & 1)
/*
* Just do not think the "/dev/mtdrX" devices support is need,
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index c4cb15a30..f101a4985 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -352,7 +352,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
} else if (dent == d->dfs_emulate_power_cut) {
if (kstrtoint(buf, 0, &val) != 0)
count = -EINVAL;
- d->emulate_power_cut = val;
+ else
+ d->emulate_power_cut = val;
goto out;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 4dd0391d2..ebf517271 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1227,32 +1227,6 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
cond_resched();
-
- /*
- * We've written the data and are going to read it back to make
- * sure it was written correctly.
- */
- memset(ubi->peb_buf, 0xFF, aldata_size);
- err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
- if (err) {
- if (err != UBI_IO_BITFLIPS) {
- ubi_warn(ubi, "error %d while reading data back from PEB %d",
- err, to);
- if (is_error_sane(err))
- err = MOVE_TARGET_RD_ERR;
- } else
- err = MOVE_TARGET_BITFLIPS;
- goto out_unlock_buf;
- }
-
- cond_resched();
-
- if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
- ubi_warn(ubi, "read data back from PEB %d and it is different",
- to);
- err = -EINVAL;
- goto out_unlock_buf;
- }
}
ubi_assert(vol->eba_tbl[lnum] == from);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index e84488773..a9e2cef7c 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -301,9 +301,9 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
*/
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
- int error, ubi_num, vol_id, mod;
- struct inode *inode;
+ int error, ubi_num, vol_id;
struct path path;
+ struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
@@ -314,14 +314,17 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
if (error)
return ERR_PTR(error);
- inode = d_backing_inode(path.dentry);
- mod = inode->i_mode;
- ubi_num = ubi_major2num(imajor(inode));
- vol_id = iminor(inode) - 1;
+ error = vfs_getattr(&path, &stat);
path_put(&path);
+ if (error)
+ return ERR_PTR(error);
- if (!S_ISCHR(mod))
+ if (!S_ISCHR(stat.mode))
return ERR_PTR(-EINVAL);
+
+ ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ vol_id = MINOR(stat.rdev) - 1;
+
if (vol_id >= 0 && ubi_num >= 0)
return ubi_open_volume(ubi_num, vol_id, mode);
return ERR_PTR(-ENODEV);
@@ -708,7 +711,7 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+ dbg_gen("map LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1ae17bb9b..10059dfdc 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -405,7 +405,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
if (!no_vtbl)
self_check_volumes(ubi);
- return err;
+ return 0;
out_err:
ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 17ec948ac..959c7b12e 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1534,6 +1534,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
+ ubi->free_count = 0;
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
@@ -1552,7 +1553,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
found_pebs++;
}
- ubi->free_count = 0;
list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index befd67df0..0c5415b05 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -192,6 +192,23 @@ config GENEVE
To compile this driver as a module, choose M here: the module
will be called geneve.
+config GTP
+ tristate "GPRS Tunneling Protocol datapath (GTP-U)"
+ depends on INET && NET_UDP_TUNNEL
+ select NET_IP_TUNNEL
+ ---help---
+ This allows one to create gtp virtual interfaces that provide
+ the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
+ is used to prevent subscribers from accessing mobile carrier core
+ network infrastructure. This driver requires a userspace software that
+ implements the signaling protocol (GTP-C) to update its PDP context
+ base, such as OpenGGSN <http://git.osmocom.org/openggsn/). This
+ tunneling protocol is implemented according to the GSM TS 09.60 and
+ 3GPP TS 29.060 standards.
+
+ To compile this drivers as a module, choose M here: the module
+ wil be called gtp.
+
config MACSEC
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
select CRYPTO
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1aa7cb845..7336cbd3e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 67977f15a..11fe71278 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -35,8 +35,8 @@
#include <net/Space.h>
/* A unified ethernet device probe. This is the easiest way to have every
- ethernet adaptor have the name "eth[0123...]".
- */
+ * ethernet adaptor have the name "eth[0123...]".
+ */
struct devprobe2 {
struct net_device *(*probe)(int unit);
@@ -46,6 +46,7 @@ struct devprobe2 {
static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
{
struct net_device *dev;
+
for (; p->probe; p++) {
if (autoprobe && p->status)
continue;
@@ -58,8 +59,7 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
return -ENODEV;
}
-/*
- * ISA probes that touch addresses < 0x400 (including those that also
+/* ISA probes that touch addresses < 0x400 (including those that also
* look for EISA/PCI cards in addition to ISA cards).
*/
static struct devprobe2 isa_probes[] __initdata = {
@@ -86,11 +86,11 @@ static struct devprobe2 isa_probes[] __initdata = {
#endif
#ifdef CONFIG_CS89x0
#ifndef CONFIG_CS89x0_PLATFORM
- {cs89x0_probe, 0},
+ {cs89x0_probe, 0},
#endif
#endif
-#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */
- {i82596_probe, 0},
+#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel */
+ {i82596_probe, 0}, /* I82596 */
#endif
#ifdef CONFIG_NI65
{ni65_probe, 0},
@@ -118,13 +118,12 @@ static struct devprobe2 m68k_probes[] __initdata = {
{mac8390_probe, 0},
#endif
#ifdef CONFIG_MAC89x0
- {mac89x0_probe, 0},
+ {mac89x0_probe, 0},
#endif
{NULL, 0},
};
-/*
- * Unified ethernet device probe, segmented per architecture and
+/* Unified ethernet device probe, segmented per architecture and
* per bus interface. This drives the legacy devices only for now.
*/
@@ -135,7 +134,7 @@ static void __init ethif_probe2(int unit)
if (base_addr == 1)
return;
- (void)( probe_list2(unit, m68k_probes, base_addr == 0) &&
+ (void)(probe_list2(unit, m68k_probes, base_addr == 0) &&
probe_list2(unit, isa_probes, base_addr == 0));
}
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1b650f5a5..cbc785a0e 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -860,7 +860,7 @@ static void cops_timeout(struct net_device *dev)
}
printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
cops_jumpstart(dev); /* Restart the card. */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 0d9b45ff1..81f90c470 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -433,7 +433,7 @@ static void __init com90xx_probe(void)
kfree(iomem);
}
-static int check_mirror(unsigned long addr, size_t size)
+static int __init check_mirror(unsigned long addr, size_t size)
{
void __iomem *p;
int res = -1;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b9304a295..edc70ffad 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
+static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+ 0, 0, 0, 0, 0, 0
+};
static u16 ad_ticks_per_sec;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+ MULTICAST_LACPDU_ADDR;
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -657,6 +660,20 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
}
}
+static int __agg_active_ports(struct aggregator *agg)
+{
+ struct port *port;
+ int active = 0;
+
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (port->is_enabled)
+ active++;
+ }
+
+ return active;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@@ -664,39 +681,40 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
*/
static u32 __get_agg_bandwidth(struct aggregator *aggregator)
{
+ int nports = __agg_active_ports(aggregator);
u32 bandwidth = 0;
- if (aggregator->num_of_ports) {
+ if (nports) {
switch (__get_link_speed(aggregator->lag_ports)) {
case AD_LINK_SPEED_1MBPS:
- bandwidth = aggregator->num_of_ports;
+ bandwidth = nports;
break;
case AD_LINK_SPEED_10MBPS:
- bandwidth = aggregator->num_of_ports * 10;
+ bandwidth = nports * 10;
break;
case AD_LINK_SPEED_100MBPS:
- bandwidth = aggregator->num_of_ports * 100;
+ bandwidth = nports * 100;
break;
case AD_LINK_SPEED_1000MBPS:
- bandwidth = aggregator->num_of_ports * 1000;
+ bandwidth = nports * 1000;
break;
case AD_LINK_SPEED_2500MBPS:
- bandwidth = aggregator->num_of_ports * 2500;
+ bandwidth = nports * 2500;
break;
case AD_LINK_SPEED_10000MBPS:
- bandwidth = aggregator->num_of_ports * 10000;
+ bandwidth = nports * 10000;
break;
case AD_LINK_SPEED_20000MBPS:
- bandwidth = aggregator->num_of_ports * 20000;
+ bandwidth = nports * 20000;
break;
case AD_LINK_SPEED_40000MBPS:
- bandwidth = aggregator->num_of_ports * 40000;
+ bandwidth = nports * 40000;
break;
case AD_LINK_SPEED_56000MBPS:
- bandwidth = aggregator->num_of_ports * 56000;
+ bandwidth = nports * 56000;
break;
case AD_LINK_SPEED_100000MBPS:
- bandwidth = aggregator->num_of_ports * 100000;
+ bandwidth = nports * 100000;
break;
default:
bandwidth = 0; /* to silence the compiler */
@@ -1530,10 +1548,10 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
switch (__get_agg_selection_mode(curr->lag_ports)) {
case BOND_AD_COUNT:
- if (curr->num_of_ports > best->num_of_ports)
+ if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
- if (curr->num_of_ports < best->num_of_ports)
+ if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
/*FALLTHROUGH*/
@@ -1561,8 +1579,14 @@ static int agg_device_up(const struct aggregator *agg)
if (!port)
return 0;
- return netif_running(port->slave->dev) &&
- netif_carrier_ok(port->slave->dev);
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (netif_running(port->slave->dev) &&
+ netif_carrier_ok(port->slave->dev))
+ return 1;
+ }
+
+ return 0;
}
/**
@@ -1610,7 +1634,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
agg->is_active = 0;
- if (agg->num_of_ports && agg_device_up(agg))
+ if (__agg_active_ports(agg) && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
}
@@ -1622,7 +1646,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
* answering partner.
*/
if (active && active->lag_ports &&
- active->lag_ports->is_enabled &&
+ __agg_active_ports(active) &&
(__agg_has_partner(active) ||
(!__agg_has_partner(active) &&
!__agg_has_partner(best)))) {
@@ -1718,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
aggregator->is_individual = false;
aggregator->actor_admin_aggregator_key = 0;
aggregator->actor_oper_aggregator_key = 0;
- aggregator->partner_system = null_mac_addr;
+ eth_zero_addr(aggregator->partner_system.mac_addr_value);
aggregator->partner_system_priority = 0;
aggregator->partner_oper_aggregator_key = 0;
aggregator->receive_state = 0;
@@ -1740,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
if (aggregator) {
ad_clear_agg(aggregator);
- aggregator->aggregator_mac_address = null_mac_addr;
+ eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
aggregator->aggregator_identifier = 0;
aggregator->slave = NULL;
}
@@ -2133,7 +2157,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
else
temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
temp_aggregator->num_of_ports--;
- if (temp_aggregator->num_of_ports == 0) {
+ if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
@@ -2432,7 +2456,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
*/
void bond_3ad_handle_link_change(struct slave *slave, char link)
{
+ struct aggregator *agg;
struct port *port;
+ bool dummy;
port = &(SLAVE_AD_INFO(slave)->port);
@@ -2459,6 +2485,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->is_enabled = false;
ad_update_actor_keys(port, true);
}
+ agg = __get_first_agg(port);
+ ad_agg_selection_logic(agg, &dummy);
+
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
@@ -2499,7 +2528,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
- if (active->num_of_ports < bond->params.min_links) {
+ if (__agg_active_ports(active) < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
goto out;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c5ac160a8..551f0f8de 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -42,13 +42,10 @@
-#ifndef __long_aligned
-#define __long_aligned __attribute__((aligned((sizeof(long)))))
-#endif
-static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
-static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941ec99cd..4d7981946 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1422,7 +1422,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return -EINVAL;
}
- if (slave_ops->ndo_set_mac_address == NULL) {
+ if (slave_dev->type == ARPHRD_INFINIBAND &&
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+ netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
+ slave_dev->type);
+ res = -EOPNOTSUPP;
+ goto err_undo_flags;
+ }
+
+ if (!slave_ops->ndo_set_mac_address ||
+ slave_dev->type == ARPHRD_INFINIBAND) {
netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
@@ -1584,6 +1593,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
/* check for initial state */
+ new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index db760e841..b8df0f5e8 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
if (err < 0)
return err;
- return register_netdevice(bond_dev);
+ err = register_netdevice(bond_dev);
+
+ netif_carrier_off(bond_dev);
+
+ return err;
}
static size_t bond_get_size(const struct net_device *bond_dev)
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8b3275d77..8f5e93cb7 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
- quota > 0 && mb > get_mb_rx_last(priv)) {
+ mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
- goto again;
+ if (quota > 0)
+ goto again;
}
return received;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f91b09428..e3dccd320 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
- for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
- frame->data[i] | (frame->data[i + 1] << 8));
+ if (priv->type == BOSCH_D_CAN) {
+ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = (u32)frame->data[i];
+ data |= (u32)frame->data[i + 1] << 8;
+ data |= (u32)frame->data[i + 2] << 16;
+ data |= (u32)frame->data[i + 3] << 24;
+ priv->write_reg32(priv, dreg, data);
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv,
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+ frame->data[i] |
+ (frame->data[i + 1] << 8));
+ }
}
}
@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
- data = priv->read_reg(priv, dreg);
- frame->data[i] = data;
- frame->data[i + 1] = data >> 8;
+ if (priv->type == BOSCH_D_CAN) {
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = priv->read_reg32(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ frame->data[i + 2] = data >> 16;
+ frame->data[i + 3] = data >> 24;
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+ data = priv->read_reg(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
}
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 910c12e26..ad535a854 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
* - control mode with CAN_CTRLMODE_FD set
*/
+ if (!data)
+ return 0;
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
return -EOPNOTSUPP;
}
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+ return;
+}
+
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
+ .dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index a1bd54ffd..2d1d22eec 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -34,6 +34,7 @@
#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
#define IFI_CANFD_STCMD_ENABLE_ISO BIT(25)
+#define IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING BIT(26)
#define IFI_CANFD_STCMD_NORMAL_MODE ((u32)BIT(31))
#define IFI_CANFD_RXSTCMD 0x4
@@ -51,7 +52,8 @@
#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
#define IFI_CANFD_INTERRUPT 0xc
-#define IFI_CANFD_INTERRUPT_ERROR_WARNING ((u32)BIT(1))
+#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
+#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
#define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY BIT(24)
@@ -71,12 +73,12 @@
#define IFI_CANFD_TIME_TIMEB_OFF 0
#define IFI_CANFD_TIME_TIMEA_OFF 8
#define IFI_CANFD_TIME_PRESCALE_OFF 16
-#define IFI_CANFD_TIME_SJW_OFF_ISO 25
-#define IFI_CANFD_TIME_SJW_OFF_BOSCH 28
-#define IFI_CANFD_TIME_SET_SJW_BOSCH BIT(6)
-#define IFI_CANFD_TIME_SET_TIMEB_BOSCH BIT(7)
-#define IFI_CANFD_TIME_SET_PRESC_BOSCH BIT(14)
-#define IFI_CANFD_TIME_SET_TIMEA_BOSCH BIT(15)
+#define IFI_CANFD_TIME_SJW_OFF_7_9_8_8 25
+#define IFI_CANFD_TIME_SJW_OFF_4_12_6_6 28
+#define IFI_CANFD_TIME_SET_SJW_4_12_6_6 BIT(6)
+#define IFI_CANFD_TIME_SET_TIMEB_4_12_6_6 BIT(7)
+#define IFI_CANFD_TIME_SET_PRESC_4_12_6_6 BIT(14)
+#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15)
#define IFI_CANFD_TDELAY 0x1c
@@ -102,7 +104,26 @@
#define IFI_CANFD_RES1 0x40
-#define IFI_CANFD_RES2 0x44
+#define IFI_CANFD_ERROR_CTR 0x44
+#define IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC 0x21302899
+#define IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST BIT(0)
+#define IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST BIT(1)
+#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST BIT(2)
+#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST BIT(3)
+#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST BIT(4)
+#define IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST BIT(5)
+#define IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST BIT(6)
+#define IFI_CANFD_ERROR_CTR_OVERLOAD_ALL BIT(8)
+#define IFI_CANFD_ERROR_CTR_ACK_ERROR_ALL BIT(9)
+#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_ALL BIT(10)
+#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_ALL BIT(11)
+#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_ALL BIT(12)
+#define IFI_CANFD_ERROR_CTR_CRC_ERROR_ALL BIT(13)
+#define IFI_CANFD_ERROR_CTR_FORM_ERROR_ALL BIT(14)
+#define IFI_CANFD_ERROR_CTR_BITPOSITION_OFFSET 16
+#define IFI_CANFD_ERROR_CTR_BITPOSITION_MASK 0xff
+#define IFI_CANFD_ERROR_CTR_ER_RESET BIT(30)
+#define IFI_CANFD_ERROR_CTR_ER_ENABLE ((u32)BIT(31))
#define IFI_CANFD_PAR 0x48
@@ -196,6 +217,8 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
if (enable) {
enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
}
writel(IFI_CANFD_IRQMASK_SET_ERR |
@@ -334,6 +357,68 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
return 1;
}
+static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
+ IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST;
+
+ if (!(errctr & errmask)) /* No error happened. */
+ return 0;
+
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ /* Propagate the error condition to the CAN stack. */
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ /* Read the error counter register and check for new errors. */
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+
+ /* Reset the error counter, ack the IRQ and re-enable the counter. */
+ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+ writel(IFI_CANFD_INTERRUPT_ERROR_COUNTER,
+ priv->base + IFI_CANFD_INTERRUPT);
+ writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
static int ifi_canfd_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
@@ -469,6 +554,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+ u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
/* Handle bus state changes */
if ((stcmd & stcmd_state_mask) ||
@@ -479,6 +565,10 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
work_done += ifi_canfd_handle_lost_msg(ndev);
+ /* Handle lec errors on the bus */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ work_done += ifi_canfd_handle_lec_err(ndev, errctr);
+
/* Handle normal messages on RX */
if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
@@ -497,11 +587,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
- IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER;
+ IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
+ IFI_CANFD_INTERRUPT_ERROR_WARNING |
+ IFI_CANFD_INTERRUPT_ERROR_COUNTER;
const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
- const u32 clr_irq_mask = ~(IFI_CANFD_INTERRUPT_SET_IRQ |
- IFI_CANFD_INTERRUPT_ERROR_WARNING);
+ const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ |
+ IFI_CANFD_INTERRUPT_ERROR_WARNING));
u32 isr;
isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -513,44 +605,34 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
/* Clear all pending interrupts but ErrWarn */
writel(clr_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
- /* RX IRQ, start NAPI */
+ /* RX IRQ or bus warning, start NAPI */
if (isr & rx_irq_mask) {
ifi_canfd_irq_enable(ndev, 0);
napi_schedule(&priv->napi);
}
/* TX IRQ */
- if (isr & tx_irq_mask) {
+ if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
stats->tx_bytes += can_get_echo_skb(ndev, 0);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
- netif_wake_queue(ndev);
}
+ if (isr & tx_irq_mask)
+ netif_wake_queue(ndev);
+
return IRQ_HANDLED;
}
static const struct can_bittiming_const ifi_canfd_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
- .tseg2_min = 2, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 64,
- .sjw_max = 16,
- .brp_min = 2,
- .brp_max = 256,
- .brp_inc = 1,
-};
-
-static const struct can_bittiming_const ifi_canfd_data_bittiming_const = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
+ .tseg1_max = 256,
.tseg2_min = 2, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 64,
- .sjw_max = 16,
+ .tseg2_max = 256,
+ .sjw_max = 128,
.brp_min = 2,
- .brp_max = 256,
+ .brp_max = 512,
.brp_inc = 1,
};
@@ -560,19 +642,6 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
- u32 noniso_arg = 0;
- u32 time_off;
-
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
- !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) {
- time_off = IFI_CANFD_TIME_SJW_OFF_ISO;
- } else {
- noniso_arg = IFI_CANFD_TIME_SET_TIMEB_BOSCH |
- IFI_CANFD_TIME_SET_TIMEA_BOSCH |
- IFI_CANFD_TIME_SET_PRESC_BOSCH |
- IFI_CANFD_TIME_SET_SJW_BOSCH;
- time_off = IFI_CANFD_TIME_SJW_OFF_BOSCH;
- }
/* Configure bit timing */
brp = bt->brp - 2;
@@ -582,8 +651,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
(tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
- (sjw << time_off) |
- noniso_arg,
+ (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_TIME);
/* Configure data bit timing */
@@ -594,8 +662,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
(tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
- (sjw << time_off) |
- noniso_arg,
+ (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_FTIME);
}
@@ -640,7 +707,8 @@ static void ifi_canfd_start(struct net_device *ndev)
/* Reset the IP */
writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
- writel(0, priv->base + IFI_CANFD_STCMD);
+ writel(IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING,
+ priv->base + IFI_CANFD_STCMD);
ifi_canfd_set_bittiming(ndev);
ifi_canfd_set_filters(ndev);
@@ -659,7 +727,8 @@ static void ifi_canfd_start(struct net_device *ndev)
writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ),
priv->base + IFI_CANFD_INTERRUPT);
- stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE;
+ stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE |
+ IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
stcmd |= IFI_CANFD_STCMD_BUSMONITOR;
@@ -667,16 +736,23 @@ static void ifi_canfd_start(struct net_device *ndev)
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
stcmd |= IFI_CANFD_STCMD_LOOPBACK;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
stcmd |= IFI_CANFD_STCMD_ENABLE_ISO;
- if (!(priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)))
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
stcmd |= IFI_CANFD_STCMD_DISABLE_CANFD;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
ifi_canfd_irq_enable(ndev, 1);
+ /* Unlock, reset and enable the error counter. */
+ writel(IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC,
+ priv->base + IFI_CANFD_ERROR_CTR);
+ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+ writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+
/* Enable controller */
writel(stcmd, priv->base + IFI_CANFD_STCMD);
}
@@ -685,6 +761,10 @@ static void ifi_canfd_stop(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ /* Reset and disable the error counter. */
+ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+ writel(0, priv->base + IFI_CANFD_ERROR_CTR);
+
/* Reset the IP */
writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
@@ -877,7 +957,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
priv->can.bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.data_bittiming_const = &ifi_canfd_data_bittiming_const;
+ priv->can.data_bittiming_const = &ifi_canfd_bittiming_const;
priv->can.do_set_mode = ifi_canfd_set_mode;
priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
@@ -888,7 +968,8 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
- CAN_CTRLMODE_FD_NON_ISO;
+ CAN_CTRLMODE_FD_NON_ISO |
+ CAN_CTRLMODE_BERR_REPORTING;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, dev);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 5d04f5464..f13bb8d9b 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -84,6 +84,7 @@
#define MSG_COFFREQ 0x42
#define MSG_CONREQ 0x43
#define MSG_CCONFREQ 0x47
+#define MSG_NMTS 0xb0
#define MSG_LMTS 0xb4
/*
@@ -130,6 +131,22 @@
#define ICAN3_CAN_DLC_MASK 0x0f
+/* Janz ICAN3 NMTS subtypes */
+#define NMTS_CREATE_NODE_REQ 0x0
+#define NMTS_SLAVE_STATE_IND 0x8
+#define NMTS_SLAVE_EVENT_IND 0x9
+
+/* Janz ICAN3 LMTS subtypes */
+#define LMTS_BUSON_REQ 0x0
+#define LMTS_BUSOFF_REQ 0x1
+#define LMTS_CAN_CONF_REQ 0x2
+
+/* Janz ICAN3 NMTS Event indications */
+#define NE_LOCAL_OCCURRED 0x3
+#define NE_LOCAL_RESOLVED 0x2
+#define NE_REMOTE_OCCURRED 0xc
+#define NE_REMOTE_RESOLVED 0x8
+
/*
* SJA1000 Status and Error Register Definitions
*
@@ -800,21 +817,41 @@ static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
return ican3_send_msg(mod, &msg);
} else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+ /* bittiming + can-on/off request */
memset(&msg, 0, sizeof(msg));
msg.spec = MSG_LMTS;
if (on) {
msg.len = cpu_to_le16(4);
- msg.data[0] = 0;
+ msg.data[0] = LMTS_BUSON_REQ;
msg.data[1] = 0;
msg.data[2] = btr0;
msg.data[3] = btr1;
} else {
msg.len = cpu_to_le16(2);
- msg.data[0] = 1;
+ msg.data[0] = LMTS_BUSOFF_REQ;
msg.data[1] = 0;
}
+ res = ican3_send_msg(mod, &msg);
+ if (res)
+ return res;
- return ican3_send_msg(mod, &msg);
+ if (on) {
+ /* create NMT Slave Node for error processing
+ * class 2 (with error capability, see CiA/DS203-1)
+ * id 1
+ * name locnod1 (must be exactly 7 bytes)
+ */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_NMTS;
+ msg.len = cpu_to_le16(11);
+ msg.data[0] = NMTS_CREATE_NODE_REQ;
+ msg.data[1] = 0;
+ msg.data[2] = 2; /* node class */
+ msg.data[3] = 1; /* node id */
+ strcpy(msg.data + 4, "locnod1"); /* node name */
+ return ican3_send_msg(mod, &msg);
+ }
+ return 0;
}
return -ENOTSUPP;
}
@@ -849,12 +886,23 @@ static int ican3_set_buserror(struct ican3_dev *mod, u8 quota)
{
struct ican3_msg msg;
- memset(&msg, 0, sizeof(msg));
- msg.spec = MSG_CCONFREQ;
- msg.len = cpu_to_le16(2);
- msg.data[0] = 0x00;
- msg.data[1] = quota;
-
+ if (mod->fwtype == ICAN3_FWTYPE_ICANOS) {
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CCONFREQ;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 0x00;
+ msg.data[1] = quota;
+ } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_LMTS;
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = LMTS_CAN_CONF_REQ;
+ msg.data[1] = 0x00;
+ msg.data[2] = 0x00;
+ msg.data[3] = quota;
+ } else {
+ return -ENOTSUPP;
+ }
return ican3_send_msg(mod, &msg);
}
@@ -1150,6 +1198,41 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
}
}
+/* Handle NMTS Slave Event Indication Messages from the firmware */
+static void ican3_handle_nmtsind(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ u16 subspec;
+
+ subspec = msg->data[0] + msg->data[1] * 0x100;
+ if (subspec == NMTS_SLAVE_EVENT_IND) {
+ switch (msg->data[2]) {
+ case NE_LOCAL_OCCURRED:
+ case NE_LOCAL_RESOLVED:
+ /* now follows the same message as Raw ICANOS CEVTIND
+ * shift the data at the same place and call this method
+ */
+ le16_add_cpu(&msg->len, -3);
+ memmove(msg->data, msg->data + 3, le16_to_cpu(msg->len));
+ ican3_handle_cevtind(mod, msg);
+ break;
+ case NE_REMOTE_OCCURRED:
+ case NE_REMOTE_RESOLVED:
+ /* should not occurre, ignore */
+ break;
+ default:
+ netdev_warn(mod->ndev, "unknown NMTS event indication %x\n",
+ msg->data[2]);
+ break;
+ }
+ } else if (subspec == NMTS_SLAVE_STATE_IND) {
+ /* ignore state indications */
+ } else {
+ netdev_warn(mod->ndev, "unhandled NMTS indication %x\n",
+ subspec);
+ return;
+ }
+}
+
static void ican3_handle_unknown_message(struct ican3_dev *mod,
struct ican3_msg *msg)
{
@@ -1179,6 +1262,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
case MSG_INQUIRY:
ican3_handle_inquiry(mod, msg);
break;
+ case MSG_NMTS:
+ ican3_handle_nmtsind(mod, msg);
+ break;
default:
ican3_handle_unknown_message(mod, msg);
break;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index e36b7400d..acb708fc1 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -276,7 +276,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
out_8(&regs->cantflg, 1 << buf_id);
if (!test_bit(F_TX_PROGRESS, &priv->flags))
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
@@ -469,7 +469,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
clear_bit(F_TX_PROGRESS, &priv->flags);
priv->cur_pri = 0;
} else {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 8836a7485..3eb7430df 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -39,6 +39,7 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"Adlink PCI-7841/cPCI-7841 SE, "
"Marathon CAN-bus-PCI, "
+ "Marathon CAN-bus-PCIe, "
"TEWS TECHNOLOGIES TPMC810, "
"esd CAN-PCI/CPCI/PCI104/200, "
"esd CAN-PCI/PMC/266, "
@@ -133,6 +134,7 @@ struct plx_pci_card {
#define IXXAT_PCI_SUB_SYS_ID 0x2540
#define MARATHON_PCI_DEVICE_ID 0x2715
+#define MARATHON_PCIE_DEVICE_ID 0x3432
#define TEWS_PCI_VENDOR_ID 0x1498
#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
@@ -141,8 +143,9 @@ struct plx_pci_card {
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
static void plx_pci_reset_common(struct pci_dev *pdev);
-static void plx_pci_reset_marathon(struct pci_dev *pdev);
static void plx9056_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
+static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev);
struct plx_pci_channel_map {
u32 bar;
@@ -215,14 +218,22 @@ static struct plx_pci_card_info plx_pci_card_info_ixxat = {
/* based on PLX9050 */
};
-static struct plx_pci_card_info plx_pci_card_info_marathon = {
+static struct plx_pci_card_info plx_pci_card_info_marathon_pci = {
"Marathon CAN-bus-PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
- &plx_pci_reset_marathon
+ &plx_pci_reset_marathon_pci
/* based on PLX9052 */
};
+static struct plx_pci_card_info plx_pci_card_info_marathon_pcie = {
+ "Marathon CAN-bus-PCIe", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {3, 0x80, 0x00} },
+ &plx_pci_reset_marathon_pcie
+ /* based on PEX8311 */
+};
+
static struct plx_pci_card_info plx_pci_card_info_tews = {
"TEWS TECHNOLOGIES TPMC810", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -316,7 +327,14 @@ static const struct pci_device_id plx_pci_tbl[] = {
PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
- (kernel_ulong_t)&plx_pci_card_info_marathon
+ (kernel_ulong_t)&plx_pci_card_info_marathon_pci
+ },
+ {
+ /* Marathon CAN-bus-PCIe card */
+ PCI_VENDOR_ID_PLX, MARATHON_PCIE_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_marathon_pcie
},
{
/* TEWS TECHNOLOGIES TPMC810 card */
@@ -437,8 +455,8 @@ static void plx9056_pci_reset_common(struct pci_dev *pdev)
iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
};
-/* Special reset function for Marathon card */
-static void plx_pci_reset_marathon(struct pci_dev *pdev)
+/* Special reset function for Marathon CAN-bus-PCI card */
+static void plx_pci_reset_marathon_pci(struct pci_dev *pdev)
{
void __iomem *reset_addr;
int i;
@@ -460,6 +478,34 @@ static void plx_pci_reset_marathon(struct pci_dev *pdev)
}
}
+/* Special reset function for Marathon CAN-bus-PCIe card */
+static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev)
+{
+ void __iomem *addr;
+ void __iomem *reset_addr;
+ int i;
+
+ plx9056_pci_reset_common(pdev);
+
+ for (i = 0; i < 2; i++) {
+ struct plx_pci_channel_map *chan_map =
+ &plx_pci_card_info_marathon_pcie.chan_map_tbl[i];
+ addr = pci_iomap(pdev, chan_map->bar, chan_map->size);
+ if (!addr) {
+ dev_err(&pdev->dev, "Failed to remap reset "
+ "space %d (BAR%d)\n", i, chan_map->bar);
+ } else {
+ /* reset the SJA1000 chip */
+ #define MARATHON_PCIE_RESET_OFFSET 32
+ reset_addr = addr + chan_map->offset +
+ MARATHON_PCIE_RESET_OFFSET;
+ iowrite8(0x1, reset_addr);
+ udelay(100);
+ pci_iounmap(pdev, addr);
+ }
+ }
+}
+
static void plx_pci_del_card(struct pci_dev *pdev)
{
struct plx_pci_card *card = pci_get_drvdata(pdev);
@@ -486,7 +532,8 @@ static void plx_pci_del_card(struct pci_dev *pdev)
* Disable interrupts from PCI-card and disable local
* interrupts
*/
- if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056 &&
+ pdev->device != MARATHON_PCIE_DEVICE_ID)
iowrite32(0x0, card->conf_addr + PLX_INTCSR);
else
iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
@@ -619,7 +666,8 @@ static int plx_pci_add_card(struct pci_dev *pdev,
* Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
* Local_2 interrupts from the SJA1000 chips
*/
- if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056 &&
+ pdev->device != MARATHON_PCIE_DEVICE_ID) {
val = ioread32(card->conf_addr + PLX_INTCSR);
if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 8dda3b703..9f107798f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -438,6 +438,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ /* set error type */
switch (ecc & ECC_MASK) {
case ECC_BIT:
cf->data[2] |= CAN_ERR_PROT_BIT;
@@ -449,9 +450,12 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[3] = ecc & ECC_SEG;
break;
}
+
+ /* set error location */
+ cf->data[3] = ecc & ECC_SEG;
+
/* Error occurred during transmission? */
if ((ecc & ECC_DIR) == 0)
cf->data[2] |= CAN_ERR_PROT_TX;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 74a7dfece..cf36d26ef 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -961,7 +961,8 @@ static int mcp251x_open(struct net_device *net)
goto open_unlock;
}
- priv->wq = create_freezable_workqueue("mcp251x_wq");
+ priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ 0);
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bcb272f6c..8483a40e7 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -16,7 +16,8 @@ config CAN_ESD_USB2
config CAN_GS_USB
tristate "Geschwister Schneider UG interfaces"
---help---
- This driver supports the Geschwister Schneider USB/CAN devices.
+ This driver supports the Geschwister Schneider and bytewerk.org
+ candleLight USB CAN interfaces USB/CAN devices
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
@@ -46,6 +47,8 @@ config CAN_KVASER_USB
- Kvaser USBcan R
- Kvaser Leaf Light v2
- Kvaser Mini PCI Express HS
+ - Kvaser Mini PCI Express 2xHS
+ - Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
- Kvaser USBcan Rugged ("USBcan Rev B")
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 3400fd1ca..71f0e7913 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -521,7 +521,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* transmission complete interrupt */
netdev->stats.tx_packets++;
@@ -835,7 +835,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
stats->tx_dropped++;
}
} else {
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* Slow down tx path */
if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 113e64fcd..784a9002f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -480,7 +480,7 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
}
static ssize_t show_firmware(struct device *d,
@@ -820,7 +820,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
goto releasebuf;
}
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/*
* Release our reference to this URB, the USB core will eventually free
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index cbc99d564..acb0c8490 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -1,7 +1,9 @@
-/* CAN driver for Geschwister Schneider USB/CAN devices.
+/* CAN driver for Geschwister Schneider USB/CAN devices
+ * and bytewerk.org candleLight USB CAN interfaces.
*
- * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
* Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ * Copyright (C) 2016 Hubert Denkmair
*
* Many thanks to all socketcan devs!
*
@@ -29,6 +31,9 @@
#define USB_GSUSB_1_VENDOR_ID 0x1d50
#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
+
#define GSUSB_ENDPOINT_IN 1
#define GSUSB_ENDPOINT_OUT 2
@@ -950,7 +955,10 @@ static void gs_usb_disconnect(struct usb_interface *intf)
}
static const struct usb_device_id gs_usb_table[] = {
- {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+ { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
+ USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
+ USB_CANDLELIGHT_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
@@ -968,5 +976,6 @@ module_usb_driver(gs_usb_driver);
MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
MODULE_DESCRIPTION(
"Socket CAN device driver for Geschwister Schneider Technologie-, "
-"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
+"and bytewerk.org candleLight USB CAN interfaces.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 022bfa13e..6f1f3b675 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -59,11 +59,14 @@
#define USB_CAN_R_PRODUCT_ID 39
#define USB_LEAF_LITE_V2_PRODUCT_ID 288
#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+ id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
}
/* Kvaser USBCan-II devices */
@@ -537,6 +540,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
/* USBCANII family IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 5a2e341a6..bfb91d8fa 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -274,7 +274,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_bytes += context->data_len;
/* prevent tx timeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
break;
default:
@@ -373,7 +373,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
stats->tx_dropped++;
}
} else {
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* slow down tx path */
if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS)
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 64c016a99..221f5f011 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1106,7 +1106,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
myNextTxDesc->skb = skb;
- dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+ netif_trans_update(dev); /* NETIF_F_LLTX driver :( */
e100_hardware_send_packet(np, buf, skb->len);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 90ba003d8..200663c43 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,10 +1,6 @@
menu "Distributed Switch Architecture drivers"
depends on HAVE_NET_DSA
-config NET_DSA_MV88E6XXX
- tristate
- default n
-
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
depends on NET_DSA
@@ -13,46 +9,13 @@ config NET_DSA_MV88E6060
This enables support for the Marvell 88E6060 ethernet switch
chip.
-config NET_DSA_MV88E6XXX_NEED_PPU
- bool
- default n
-
-config NET_DSA_MV88E6131
- tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_MV88E6XXX
- select NET_DSA_MV88E6XXX_NEED_PPU
- select NET_DSA_TAG_DSA
- ---help---
- This enables support for the Marvell 88E6085/6095/6095F/6131
- ethernet switch chips.
-
-config NET_DSA_MV88E6123
- tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_MV88E6XXX
- select NET_DSA_TAG_EDSA
- ---help---
- This enables support for the Marvell 88E6123/6161/6165
- ethernet switch chips.
-
-config NET_DSA_MV88E6171
- tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_MV88E6XXX
- select NET_DSA_TAG_EDSA
- ---help---
- This enables support for the Marvell 88E6171/6175/6350/6351
- ethernet switches chips.
-
-config NET_DSA_MV88E6352
- tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
+config NET_DSA_MV88E6XXX
+ tristate "Marvell 88E6xxx Ethernet switch chip support"
depends on NET_DSA
- select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6172, 88E6176, 88E6320,
- 88E6321 and 88E6352 ethernet switch chips.
+ This enables support for most of the Marvell 88E6xxx models of
+ Ethernet switch chips, except 88E6060.
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index a6e09939b..76b751dd9 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,16 +1,3 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
-mv88e6xxx_drv-y += mv88e6xxx.o
-ifdef CONFIG_NET_DSA_MV88E6123
-mv88e6xxx_drv-y += mv88e6123.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6131
-mv88e6xxx_drv-y += mv88e6131.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6352
-mv88e6xxx_drv-y += mv88e6352.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6171
-mv88e6xxx_drv-y += mv88e6171.o
-endif
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 95944d5e3..10ddd5a5d 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -135,8 +135,17 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
return BCM_SF2_STATS_SIZE;
}
-static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
+static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **_priv)
{
+ struct bcm_sf2_priv *priv;
+
+ priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+ *_priv = priv;
+
return "Broadcom Starfighter 2";
}
@@ -151,7 +160,7 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
* the same VLAN.
*/
for (i = 0; i < priv->hw_params.num_ports; i++) {
- if (!((1 << i) & ds->phys_port_mask))
+ if (!((1 << i) & ds->enabled_port_mask))
continue;
reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
@@ -545,12 +554,11 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
priv->port_sts[port].bridge_dev = NULL;
}
-static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
- u8 state)
+static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
+ u8 state)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
u8 hw_state, cur_hw_state;
- int ret = 0;
u32 reg;
reg = core_readl(priv, CORE_G_PCTL_PORT(port));
@@ -574,7 +582,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
break;
default:
pr_err("%s: invalid STP state: %d\n", __func__, state);
- return -EINVAL;
+ return;
}
/* Fast-age ARL entries if we are moving a port from Learning or
@@ -584,10 +592,9 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
if (cur_hw_state != hw_state) {
if (cur_hw_state >= G_MISTP_LEARN_STATE &&
hw_state <= G_MISTP_LISTEN_STATE) {
- ret = bcm_sf2_sw_fast_age_port(ds, port);
- if (ret) {
+ if (bcm_sf2_sw_fast_age_port(ds, port)) {
pr_err("%s: fast-ageing failed\n", __func__);
- return ret;
+ return;
}
}
}
@@ -596,8 +603,6 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
reg |= hw_state;
core_writel(priv, reg, CORE_G_PCTL_PORT(port));
-
- return 0;
}
/* Address Resolution Logic routines */
@@ -728,13 +733,14 @@ static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
- return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true);
+ if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
+ pr_err("%s: failed to add MAC address\n", __func__);
}
static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
@@ -943,8 +949,8 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
/* All the interesting properties are at the parent device_node
* level
*/
- dn = ds->pd->of_node->parent;
- bcm_sf2_identify_ports(priv, ds->pd->of_node);
+ dn = ds->cd->of_node->parent;
+ bcm_sf2_identify_ports(priv, ds->cd->of_node);
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
@@ -1003,7 +1009,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
/* Enable all valid ports and disable those unused */
for (port = 0; port < priv->hw_params.num_ports; port++) {
/* IMP port receives special treatment */
- if ((1 << port) & ds->phys_port_mask)
+ if ((1 << port) & ds->enabled_port_mask)
bcm_sf2_port_setup(ds, port, NULL);
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
@@ -1016,11 +1022,12 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
* 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
* that we can use the regular SWITCH_MDIO master controller instead.
*
- * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
- * to have a 1:1 mapping between Port address and PHY address in order
- * to utilize the slave_mii_bus instance to read from Port PHYs. This is
- * not what we want here, so we initialize phys_mii_mask 0 to always
- * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
+ * By default, DSA initializes ds->phys_mii_mask to
+ * ds->enabled_port_mask to have a 1:1 mapping between Port address
+ * and PHY address in order to utilize the slave_mii_bus instance to
+ * read from Port PHYs. This is not what we want here, so we
+ * initialize phys_mii_mask 0 to always utilize the "master" MDIO
+ * bus backed by the "mdio-unimac" driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
@@ -1278,7 +1285,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
* bcm_sf2_sw_setup
*/
for (port = 0; port < DSA_MAX_PORTS; port++) {
- if ((1 << port) & ds->phys_port_mask ||
+ if ((1 << port) & ds->enabled_port_mask ||
dsa_is_cpu_port(ds, port))
bcm_sf2_port_disable(ds, port, NULL);
}
@@ -1302,7 +1309,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
bcm_sf2_gphy_enable_set(ds, true);
for (port = 0; port < DSA_MAX_PORTS; port++) {
- if ((1 << port) & ds->phys_port_mask)
+ if ((1 << port) & ds->enabled_port_mask)
bcm_sf2_port_setup(ds, port, NULL);
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
@@ -1365,8 +1372,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
static struct dsa_switch_driver bcm_sf2_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_BRCM,
- .priv_size = sizeof(struct bcm_sf2_priv),
- .probe = bcm_sf2_sw_probe,
+ .probe = bcm_sf2_sw_drv_probe,
.setup = bcm_sf2_sw_setup,
.set_addr = bcm_sf2_sw_set_addr,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
@@ -1387,7 +1393,7 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
.set_eee = bcm_sf2_sw_set_eee,
.port_bridge_join = bcm_sf2_sw_br_join,
.port_bridge_leave = bcm_sf2_sw_br_leave,
- .port_stp_update = bcm_sf2_sw_br_set_stp_state,
+ .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
.port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
.port_fdb_add = bcm_sf2_sw_fdb_add,
.port_fdb_del = bcm_sf2_sw_fdb_del,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 0527f485c..e36b40886 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -19,12 +19,9 @@
static int reg_read(struct dsa_switch *ds, int addr, int reg)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
+ struct mv88e6060_priv *priv = ds_to_priv(ds);
- if (bus == NULL)
- return -EINVAL;
-
- return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg);
+ return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
}
#define REG_READ(addr, reg) \
@@ -40,12 +37,9 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg)
static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
-
- if (bus == NULL)
- return -EINVAL;
+ struct mv88e6060_priv *priv = ds_to_priv(ds);
- return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val);
+ return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
}
#define REG_WRITE(addr, reg, val) \
@@ -57,14 +51,10 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
return __ret; \
})
-static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
+static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
int ret;
- if (bus == NULL)
- return NULL;
-
ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
if (ret == PORT_SWITCH_ID_6060)
@@ -79,6 +69,27 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
return NULL;
}
+static const char *mv88e6060_drv_probe(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **_priv)
+{
+ struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
+ struct mv88e6060_priv *priv;
+ const char *name;
+
+ name = mv88e6060_get_name(bus, sw_addr);
+ if (name) {
+ priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+ *_priv = priv;
+ priv->bus = bus;
+ priv->sw_addr = sw_addr;
+ }
+
+ return name;
+}
+
static int mv88e6060_switch_reset(struct dsa_switch *ds)
{
int i;
@@ -159,7 +170,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
REG_WRITE(addr, PORT_VLAN_MAP,
((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
(dsa_is_cpu_port(ds, p) ?
- ds->phys_port_mask :
+ ds->enabled_port_mask :
BIT(ds->dst->cpu_port)));
/* Port Association Vector: when learning source addresses
@@ -174,8 +185,8 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
static int mv88e6060_setup(struct dsa_switch *ds)
{
- int i;
int ret;
+ int i;
ret = mv88e6060_switch_reset(ds);
if (ret < 0)
@@ -238,7 +249,7 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
static struct dsa_switch_driver mv88e6060_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_TRAILER,
- .probe = mv88e6060_probe,
+ .probe = mv88e6060_drv_probe,
.setup = mv88e6060_setup,
.set_addr = mv88e6060_set_addr,
.phy_read = mv88e6060_phy_read,
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
index cc9b2ed4a..10249bd16 100644
--- a/drivers/net/dsa/mv88e6060.h
+++ b/drivers/net/dsa/mv88e6060.h
@@ -108,4 +108,15 @@
#define GLOBAL_ATU_MAC_23 0x0e
#define GLOBAL_ATU_MAC_45 0x0f
+struct mv88e6060_priv {
+ /* MDIO bus and address on bus to use. When in single chip
+ * mode, address is 0, and the switch uses multiple addresses
+ * on the bus. When in multi-chip mode, the switch uses a
+ * single address which contains two registers used for
+ * indirect access to more registers.
+ */
+ struct mii_bus *bus;
+ int sw_addr;
+};
+
#endif
diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c
deleted file mode 100644
index 69a6f79dc..000000000
--- a/drivers/net/dsa/mv88e6123.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6123_table[] = {
- { PORT_SWITCH_ID_6123, "Marvell 88E6123" },
- { PORT_SWITCH_ID_6123_A1, "Marvell 88E6123 (A1)" },
- { PORT_SWITCH_ID_6123_A2, "Marvell 88E6123 (A2)" },
- { PORT_SWITCH_ID_6161, "Marvell 88E6161" },
- { PORT_SWITCH_ID_6161_A1, "Marvell 88E6161 (A1)" },
- { PORT_SWITCH_ID_6161_A2, "Marvell 88E6161 (A2)" },
- { PORT_SWITCH_ID_6165, "Marvell 88E6165" },
- { PORT_SWITCH_ID_6165_A1, "Marvell 88E6165 (A1)" },
- { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" },
-};
-
-static char *mv88e6123_probe(struct device *host_dev, int sw_addr)
-{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table,
- ARRAY_SIZE(mv88e6123_table));
-}
-
-static int mv88e6123_setup_global(struct dsa_switch *ds)
-{
- u32 upstream_port = dsa_upstream_port(ds);
- int ret;
- u32 reg;
-
- ret = mv88e6xxx_setup_global(ds);
- if (ret)
- return ret;
-
- /* Disable the PHY polling unit (since there won't be any
- * external PHYs to poll), don't discard packets with
- * excessive collisions, and mask all interrupt sources.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000);
-
- /* Configure the upstream port, and configure the upstream
- * port as the port to which ingress and egress monitor frames
- * are to be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
- /* Disable remote management for now, and set the switch's
- * DSA device number.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
-
- return 0;
-}
-
-static int mv88e6123_setup(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- ret = mv88e6xxx_setup_common(ds);
- if (ret < 0)
- return ret;
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6123:
- ps->num_ports = 3;
- break;
- case PORT_SWITCH_ID_6161:
- case PORT_SWITCH_ID_6165:
- ps->num_ports = 6;
- break;
- default:
- return -ENODEV;
- }
-
- ret = mv88e6xxx_switch_reset(ds, false);
- if (ret < 0)
- return ret;
-
- ret = mv88e6123_setup_global(ds);
- if (ret < 0)
- return ret;
-
- return mv88e6xxx_setup_ports(ds);
-}
-
-struct dsa_switch_driver mv88e6123_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_EDSA,
- .priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6123_probe,
- .setup = mv88e6123_setup,
- .set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6xxx_phy_read,
- .phy_write = mv88e6xxx_phy_write,
- .get_strings = mv88e6xxx_get_strings,
- .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
- .get_sset_count = mv88e6xxx_get_sset_count,
- .adjust_link = mv88e6xxx_adjust_link,
-#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6xxx_get_temp,
-#endif
- .get_regs_len = mv88e6xxx_get_regs_len,
- .get_regs = mv88e6xxx_get_regs,
-};
-
-MODULE_ALIAS("platform:mv88e6123");
-MODULE_ALIAS("platform:mv88e6161");
-MODULE_ALIAS("platform:mv88e6165");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
deleted file mode 100644
index a92ca651c..000000000
--- a/drivers/net/dsa/mv88e6131.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6131_table[] = {
- { PORT_SWITCH_ID_6085, "Marvell 88E6085" },
- { PORT_SWITCH_ID_6095, "Marvell 88E6095/88E6095F" },
- { PORT_SWITCH_ID_6131, "Marvell 88E6131" },
- { PORT_SWITCH_ID_6131_B2, "Marvell 88E6131 (B2)" },
- { PORT_SWITCH_ID_6185, "Marvell 88E6185" },
-};
-
-static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
-{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6131_table,
- ARRAY_SIZE(mv88e6131_table));
-}
-
-static int mv88e6131_setup_global(struct dsa_switch *ds)
-{
- u32 upstream_port = dsa_upstream_port(ds);
- int ret;
- u32 reg;
-
- ret = mv88e6xxx_setup_global(ds);
- if (ret)
- return ret;
-
- /* Enable the PHY polling unit, don't discard packets with
- * excessive collisions, use a weighted fair queueing scheme
- * to arbitrate between packet queues, set the maximum frame
- * size to 1632, and mask all interrupt sources.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
- GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632);
-
- /* Set the VLAN ethertype to 0x8100. */
- REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100);
-
- /* Disable ARP mirroring, and configure the upstream port as
- * the port to which ingress and egress monitor frames are to
- * be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- GLOBAL_MONITOR_CONTROL_ARP_DISABLED;
- REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
- /* Disable cascade port functionality unless this device
- * is used in a cascade configuration, and set the switch's
- * DSA device number.
- */
- if (ds->dst->pd->nr_chips > 1)
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
- GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
- (ds->index & 0x1f));
- else
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
- GLOBAL_CONTROL_2_NO_CASCADE |
- (ds->index & 0x1f));
-
- /* Force the priority of IGMP/MLD snoop frames and ARP frames
- * to the highest setting.
- */
- REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
- GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP |
- 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT |
- GLOBAL2_PRIO_OVERRIDE_FORCE_ARP |
- 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT);
-
- return 0;
-}
-
-static int mv88e6131_setup(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- ret = mv88e6xxx_setup_common(ds);
- if (ret < 0)
- return ret;
-
- mv88e6xxx_ppu_state_init(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6085:
- case PORT_SWITCH_ID_6185:
- ps->num_ports = 10;
- break;
- case PORT_SWITCH_ID_6095:
- ps->num_ports = 11;
- break;
- case PORT_SWITCH_ID_6131:
- case PORT_SWITCH_ID_6131_B2:
- ps->num_ports = 8;
- break;
- default:
- return -ENODEV;
- }
-
- ret = mv88e6xxx_switch_reset(ds, false);
- if (ret < 0)
- return ret;
-
- ret = mv88e6131_setup_global(ds);
- if (ret < 0)
- return ret;
-
- return mv88e6xxx_setup_ports(ds);
-}
-
-static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- if (port >= 0 && port < ps->num_ports)
- return port;
-
- return -EINVAL;
-}
-
-static int
-mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
- int addr = mv88e6131_port_to_phy_addr(ds, port);
-
- if (addr < 0)
- return addr;
-
- return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
-}
-
-static int
-mv88e6131_phy_write(struct dsa_switch *ds,
- int port, int regnum, u16 val)
-{
- int addr = mv88e6131_port_to_phy_addr(ds, port);
-
- if (addr < 0)
- return addr;
-
- return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
-}
-
-struct dsa_switch_driver mv88e6131_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_DSA,
- .priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6131_probe,
- .setup = mv88e6131_setup,
- .set_addr = mv88e6xxx_set_addr_direct,
- .phy_read = mv88e6131_phy_read,
- .phy_write = mv88e6131_phy_write,
- .get_strings = mv88e6xxx_get_strings,
- .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
- .get_sset_count = mv88e6xxx_get_sset_count,
- .adjust_link = mv88e6xxx_adjust_link,
-};
-
-MODULE_ALIAS("platform:mv88e6085");
-MODULE_ALIAS("platform:mv88e6095");
-MODULE_ALIAS("platform:mv88e6095f");
-MODULE_ALIAS("platform:mv88e6131");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
deleted file mode 100644
index c0164b98f..000000000
--- a/drivers/net/dsa/mv88e6171.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6171_table[] = {
- { PORT_SWITCH_ID_6171, "Marvell 88E6171" },
- { PORT_SWITCH_ID_6175, "Marvell 88E6175" },
- { PORT_SWITCH_ID_6350, "Marvell 88E6350" },
- { PORT_SWITCH_ID_6351, "Marvell 88E6351" },
-};
-
-static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
-{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6171_table,
- ARRAY_SIZE(mv88e6171_table));
-}
-
-static int mv88e6171_setup_global(struct dsa_switch *ds)
-{
- u32 upstream_port = dsa_upstream_port(ds);
- int ret;
- u32 reg;
-
- ret = mv88e6xxx_setup_global(ds);
- if (ret)
- return ret;
-
- /* Discard packets with excessive collisions, mask all
- * interrupt sources, enable PPU.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
- GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
-
- /* Configure the upstream port, and configure the upstream
- * port as the port to which ingress and egress monitor frames
- * are to be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
- REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
- /* Disable remote management for now, and set the switch's
- * DSA device number.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
-
- return 0;
-}
-
-static int mv88e6171_setup(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- ret = mv88e6xxx_setup_common(ds);
- if (ret < 0)
- return ret;
-
- ps->num_ports = 7;
-
- ret = mv88e6xxx_switch_reset(ds, true);
- if (ret < 0)
- return ret;
-
- ret = mv88e6171_setup_global(ds);
- if (ret < 0)
- return ret;
-
- return mv88e6xxx_setup_ports(ds);
-}
-
-struct dsa_switch_driver mv88e6171_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_EDSA,
- .priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6171_probe,
- .setup = mv88e6171_setup,
- .set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6xxx_phy_read_indirect,
- .phy_write = mv88e6xxx_phy_write_indirect,
- .get_strings = mv88e6xxx_get_strings,
- .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
- .get_sset_count = mv88e6xxx_get_sset_count,
- .adjust_link = mv88e6xxx_adjust_link,
-#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6xxx_get_temp,
-#endif
- .get_regs_len = mv88e6xxx_get_regs_len,
- .get_regs = mv88e6xxx_get_regs,
- .port_bridge_join = mv88e6xxx_port_bridge_join,
- .port_bridge_leave = mv88e6xxx_port_bridge_leave,
- .port_stp_update = mv88e6xxx_port_stp_update,
- .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
- .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
- .port_vlan_add = mv88e6xxx_port_vlan_add,
- .port_vlan_del = mv88e6xxx_port_vlan_del,
- .port_vlan_dump = mv88e6xxx_port_vlan_dump,
- .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
- .port_fdb_add = mv88e6xxx_port_fdb_add,
- .port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_dump = mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6171");
-MODULE_ALIAS("platform:mv88e6175");
-MODULE_ALIAS("platform:mv88e6350");
-MODULE_ALIAS("platform:mv88e6351");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
deleted file mode 100644
index 5f528abc8..000000000
--- a/drivers/net/dsa/mv88e6352.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support
- *
- * Copyright (c) 2014 Guenter Roeck
- *
- * Derived from mv88e6123_61_65.c
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
- { PORT_SWITCH_ID_6172, "Marvell 88E6172" },
- { PORT_SWITCH_ID_6176, "Marvell 88E6176" },
- { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
- { PORT_SWITCH_ID_6320, "Marvell 88E6320" },
- { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
- { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
- { PORT_SWITCH_ID_6321, "Marvell 88E6321" },
- { PORT_SWITCH_ID_6321_A1, "Marvell 88E6321 (A1)" },
- { PORT_SWITCH_ID_6321_A2, "Marvell 88e6321 (A2)" },
- { PORT_SWITCH_ID_6352, "Marvell 88E6352" },
- { PORT_SWITCH_ID_6352_A0, "Marvell 88E6352 (A0)" },
- { PORT_SWITCH_ID_6352_A1, "Marvell 88E6352 (A1)" },
-};
-
-static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
-{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6352_table,
- ARRAY_SIZE(mv88e6352_table));
-}
-
-static int mv88e6352_setup_global(struct dsa_switch *ds)
-{
- u32 upstream_port = dsa_upstream_port(ds);
- int ret;
- u32 reg;
-
- ret = mv88e6xxx_setup_global(ds);
- if (ret)
- return ret;
-
- /* Discard packets with excessive collisions,
- * mask all interrupt sources, enable PPU (bit 14, undocumented).
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
- GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
-
- /* Configure the upstream port, and configure the upstream
- * port as the port to which ingress and egress monitor frames
- * are to be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
- /* Disable remote management for now, and set the switch's
- * DSA device number.
- */
- REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
- return 0;
-}
-
-static int mv88e6352_setup(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- ret = mv88e6xxx_setup_common(ds);
- if (ret < 0)
- return ret;
-
- ps->num_ports = 7;
-
- mutex_init(&ps->eeprom_mutex);
-
- ret = mv88e6xxx_switch_reset(ds, true);
- if (ret < 0)
- return ret;
-
- ret = mv88e6352_setup_global(ds);
- if (ret < 0)
- return ret;
-
- return mv88e6xxx_setup_ports(ds);
-}
-
-static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- mutex_lock(&ps->eeprom_mutex);
-
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
- GLOBAL2_EEPROM_OP_READ |
- (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_eeprom_busy_wait(ds);
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
-error:
- mutex_unlock(&ps->eeprom_mutex);
- return ret;
-}
-
-static int mv88e6352_get_eeprom(struct dsa_switch *ds,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- int offset;
- int len;
- int ret;
-
- offset = eeprom->offset;
- len = eeprom->len;
- eeprom->len = 0;
-
- eeprom->magic = 0xc3ec4951;
-
- ret = mv88e6xxx_eeprom_load_wait(ds);
- if (ret < 0)
- return ret;
-
- if (offset & 1) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- *data++ = (word >> 8) & 0xff;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- while (len >= 2) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- *data++ = word & 0xff;
- *data++ = (word >> 8) & 0xff;
-
- offset += 2;
- len -= 2;
- eeprom->len += 2;
- }
-
- if (len) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- *data++ = word & 0xff;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- return 0;
-}
-
-static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
-{
- int ret;
-
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
- if (ret < 0)
- return ret;
-
- if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
- return -EROFS;
-
- return 0;
-}
-
-static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
- u16 data)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- mutex_lock(&ps->eeprom_mutex);
-
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
- GLOBAL2_EEPROM_OP_WRITE |
- (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_eeprom_busy_wait(ds);
-error:
- mutex_unlock(&ps->eeprom_mutex);
- return ret;
-}
-
-static int mv88e6352_set_eeprom(struct dsa_switch *ds,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- int offset;
- int ret;
- int len;
-
- if (eeprom->magic != 0xc3ec4951)
- return -EINVAL;
-
- ret = mv88e6352_eeprom_is_readonly(ds);
- if (ret)
- return ret;
-
- offset = eeprom->offset;
- len = eeprom->len;
- eeprom->len = 0;
-
- ret = mv88e6xxx_eeprom_load_wait(ds);
- if (ret < 0)
- return ret;
-
- if (offset & 1) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- word = (*data++ << 8) | (word & 0xff);
-
- ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
- if (ret < 0)
- return ret;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- while (len >= 2) {
- int word;
-
- word = *data++;
- word |= *data++ << 8;
-
- ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
- if (ret < 0)
- return ret;
-
- offset += 2;
- len -= 2;
- eeprom->len += 2;
- }
-
- if (len) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- word = (word & 0xff00) | *data++;
-
- ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
- if (ret < 0)
- return ret;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- return 0;
-}
-
-struct dsa_switch_driver mv88e6352_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_EDSA,
- .priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6352_probe,
- .setup = mv88e6352_setup,
- .set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6xxx_phy_read_indirect,
- .phy_write = mv88e6xxx_phy_write_indirect,
- .get_strings = mv88e6xxx_get_strings,
- .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
- .get_sset_count = mv88e6xxx_get_sset_count,
- .adjust_link = mv88e6xxx_adjust_link,
- .set_eee = mv88e6xxx_set_eee,
- .get_eee = mv88e6xxx_get_eee,
-#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6xxx_get_temp,
- .get_temp_limit = mv88e6xxx_get_temp_limit,
- .set_temp_limit = mv88e6xxx_set_temp_limit,
- .get_temp_alarm = mv88e6xxx_get_temp_alarm,
-#endif
- .get_eeprom = mv88e6352_get_eeprom,
- .set_eeprom = mv88e6352_set_eeprom,
- .get_regs_len = mv88e6xxx_get_regs_len,
- .get_regs = mv88e6xxx_get_regs,
- .port_bridge_join = mv88e6xxx_port_bridge_join,
- .port_bridge_leave = mv88e6xxx_port_bridge_leave,
- .port_stp_update = mv88e6xxx_port_stp_update,
- .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
- .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
- .port_vlan_add = mv88e6xxx_port_vlan_add,
- .port_vlan_del = mv88e6xxx_port_vlan_del,
- .port_vlan_dump = mv88e6xxx_port_vlan_dump,
- .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
- .port_fdb_add = mv88e6xxx_port_fdb_add,
- .port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_dump = mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6172");
-MODULE_ALIAS("platform:mv88e6176");
-MODULE_ALIAS("platform:mv88e6320");
-MODULE_ALIAS("platform:mv88e6321");
-MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 5e572b351..ba9dfc942 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -5,6 +5,8 @@
* Copyright (c) 2015 CMC Electronics, Inc.
* Added support for VLAN Table Unit operations
*
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -17,6 +19,7 @@
#include <linux/if_bridge.h>
#include <linux/jiffies.h>
#include <linux/list.h>
+#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
@@ -25,12 +28,10 @@
#include <net/switchdev.h>
#include "mv88e6xxx.h"
-static void assert_smi_lock(struct dsa_switch *ds)
+static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
- dev_err(ds->master_dev, "SMI lock not held!\n");
+ dev_err(ps->dev, "SMI lock not held!\n");
dump_stack();
}
}
@@ -92,33 +93,29 @@ static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
return ret & 0xffff;
}
-static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
+ int addr, int reg)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
int ret;
- assert_smi_lock(ds);
-
- if (bus == NULL)
- return -EINVAL;
+ assert_smi_lock(ps);
- ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
+ ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
if (ret < 0)
return ret;
- dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
addr, reg, ret);
return ret;
}
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_read(ds, addr, reg);
+ ret = _mv88e6xxx_reg_read(ps, addr, reg);
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -156,58 +153,71 @@ static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
return 0;
}
-static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
- u16 val)
+static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
+ int reg, u16 val)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
-
- assert_smi_lock(ds);
+ assert_smi_lock(ps);
- if (bus == NULL)
- return -EINVAL;
-
- dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
addr, reg, val);
- return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
+ return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
}
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
+ int reg, u16 val)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
+ ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
{
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int err;
- return 0;
+ err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
+ (addr[0] << 8) | addr[1]);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
+ (addr[2] << 8) | addr[3]);
+ if (err)
+ return err;
+
+ return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
+ (addr[4] << 8) | addr[5]);
}
-int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
{
- int i;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
+ int i;
for (i = 0; i < 6; i++) {
int j;
/* Write the MAC address byte. */
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
- GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
+ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
+ GLOBAL2_SWITCH_MAC_BUSY |
+ (i << 8) | addr[i]);
+ if (ret)
+ return ret;
/* Wait for the write to complete. */
for (j = 0; j < 16; j++) {
- ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
+ GLOBAL2_SWITCH_MAC);
+ if (ret < 0)
+ return ret;
+
if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
break;
}
@@ -218,34 +228,52 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
return 0;
}
-static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC))
+ return mv88e6xxx_set_addr_indirect(ds, addr);
+ else
+ return mv88e6xxx_set_addr_direct(ds, addr);
+}
+
+static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum)
{
if (addr >= 0)
- return _mv88e6xxx_reg_read(ds, addr, regnum);
+ return _mv88e6xxx_reg_read(ps, addr, regnum);
return 0xffff;
}
-static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
- u16 val)
+static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum, u16 val)
{
if (addr >= 0)
- return _mv88e6xxx_reg_write(ds, addr, regnum, val);
+ return _mv88e6xxx_reg_write(ps, addr, regnum, val);
return 0;
}
-#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
-static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
{
int ret;
unsigned long timeout;
- ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
- ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+ ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+ if (ret)
+ return ret;
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
usleep_range(1000, 2000);
if ((ret & GLOBAL_STATUS_PPU_MASK) !=
GLOBAL_STATUS_PPU_POLLING)
@@ -255,17 +283,26 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
return -ETIMEDOUT;
}
-static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
{
- int ret;
+ int ret, err;
unsigned long timeout;
- ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+ ret | GLOBAL_CONTROL_PPU_ENABLE);
+ if (err)
+ return err;
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
usleep_range(1000, 2000);
if ((ret & GLOBAL_STATUS_PPU_MASK) ==
GLOBAL_STATUS_PPU_POLLING)
@@ -281,9 +318,7 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
if (mutex_trylock(&ps->ppu_mutex)) {
- struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
-
- if (mv88e6xxx_ppu_enable(ds) == 0)
+ if (mv88e6xxx_ppu_enable(ps) == 0)
ps->ppu_disabled = 0;
mutex_unlock(&ps->ppu_mutex);
}
@@ -296,9 +331,8 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
schedule_work(&ps->ppu_work);
}
-static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->ppu_mutex);
@@ -309,7 +343,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
* it.
*/
if (!ps->ppu_disabled) {
- ret = mv88e6xxx_ppu_disable(ds);
+ ret = mv88e6xxx_ppu_disable(ps);
if (ret < 0) {
mutex_unlock(&ps->ppu_mutex);
return ret;
@@ -323,19 +357,15 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
return ret;
}
-static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
+static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
/* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
mutex_unlock(&ps->ppu_mutex);
}
-void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
+void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
mutex_init(&ps->ppu_mutex);
INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
init_timer(&ps->ppu_timer);
@@ -343,142 +373,86 @@ void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
}
-int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
+static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum)
{
int ret;
- ret = mv88e6xxx_ppu_access_get(ds);
+ ret = mv88e6xxx_ppu_access_get(ps);
if (ret >= 0) {
- ret = mv88e6xxx_reg_read(ds, addr, regnum);
- mv88e6xxx_ppu_access_put(ds);
+ ret = _mv88e6xxx_reg_read(ps, addr, regnum);
+ mv88e6xxx_ppu_access_put(ps);
}
return ret;
}
-int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
- int regnum, u16 val)
+static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum, u16 val)
{
int ret;
- ret = mv88e6xxx_ppu_access_get(ds);
+ ret = mv88e6xxx_ppu_access_get(ps);
if (ret >= 0) {
- ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
- mv88e6xxx_ppu_access_put(ds);
+ ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(ps);
}
return ret;
}
-#endif
-static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6031:
- case PORT_SWITCH_ID_6061:
- case PORT_SWITCH_ID_6035:
- case PORT_SWITCH_ID_6065:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6065;
}
-static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6092:
- case PORT_SWITCH_ID_6095:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6095;
}
-static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6046:
- case PORT_SWITCH_ID_6085:
- case PORT_SWITCH_ID_6096:
- case PORT_SWITCH_ID_6097:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6097;
}
-static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6123:
- case PORT_SWITCH_ID_6161:
- case PORT_SWITCH_ID_6165:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6165;
}
-static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6121:
- case PORT_SWITCH_ID_6122:
- case PORT_SWITCH_ID_6152:
- case PORT_SWITCH_ID_6155:
- case PORT_SWITCH_ID_6182:
- case PORT_SWITCH_ID_6185:
- case PORT_SWITCH_ID_6108:
- case PORT_SWITCH_ID_6131:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6185;
}
-static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6320:
- case PORT_SWITCH_ID_6321:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6320;
}
-static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ return ps->info->family == MV88E6XXX_FAMILY_6351;
+}
- switch (ps->id) {
- case PORT_SWITCH_ID_6171:
- case PORT_SWITCH_ID_6175:
- case PORT_SWITCH_ID_6350:
- case PORT_SWITCH_ID_6351:
- return true;
- }
- return false;
+static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
+{
+ return ps->info->family == MV88E6XXX_FAMILY_6352;
}
-static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ return ps->info->num_databases;
+}
- switch (ps->id) {
- case PORT_SWITCH_ID_6172:
- case PORT_SWITCH_ID_6176:
- case PORT_SWITCH_ID_6240:
- case PORT_SWITCH_ID_6352:
+static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
+{
+ /* Does the device have dedicated FID registers for ATU and VTU ops? */
+ if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
+ mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
return true;
- }
+
return false;
}
@@ -486,8 +460,8 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
* phy. However, in the case of a fixed link phy, we force the port
* settings from the fixed link settings.
*/
-void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u32 reg;
@@ -498,7 +472,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
if (ret < 0)
goto out;
@@ -512,7 +486,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
if (phydev->link)
reg |= PORT_PCS_CTRL_LINK_UP;
- if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
+ if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
goto out;
switch (phydev->speed) {
@@ -534,8 +508,8 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
if (phydev->duplex == DUPLEX_FULL)
reg |= PORT_PCS_CTRL_DUPLEX_FULL;
- if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
- (port >= ps->num_ports - 2)) {
+ if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
+ (port >= ps->info->num_ports - 2)) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -544,19 +518,19 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
}
- _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
+ _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
out:
mutex_unlock(&ps->smi_mutex);
}
-static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
{
int ret;
int i;
for (i = 0; i < 10; i++) {
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
@@ -564,52 +538,54 @@ static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
return -ETIMEDOUT;
}
-static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
+ int port)
{
int ret;
- if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+ if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
GLOBAL_STATS_OP_CAPTURE_PORT |
GLOBAL_STATS_OP_HIST_RX_TX | port);
if (ret < 0)
return ret;
/* Wait for the snapshotting to complete. */
- ret = _mv88e6xxx_stats_wait(ds);
+ ret = _mv88e6xxx_stats_wait(ps);
if (ret < 0)
return ret;
return 0;
}
-static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
+ int stat, u32 *val)
{
u32 _val;
int ret;
*val = 0;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
GLOBAL_STATS_OP_READ_CAPTURED |
GLOBAL_STATS_OP_HIST_RX_TX | stat);
if (ret < 0)
return;
- ret = _mv88e6xxx_stats_wait(ds);
+ ret = _mv88e6xxx_stats_wait(ps);
if (ret < 0)
return;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
if (ret < 0)
return;
_val = ret << 16;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
if (ret < 0)
return;
@@ -678,26 +654,26 @@ static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
{ "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
};
-static bool mv88e6xxx_has_stat(struct dsa_switch *ds,
+static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_hw_stat *stat)
{
switch (stat->type) {
case BANK0:
return true;
case BANK1:
- return mv88e6xxx_6320_family(ds);
+ return mv88e6xxx_6320_family(ps);
case PORT:
- return mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6185_family(ds) ||
- mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6165_family(ds) ||
- mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6352_family(ds);
+ return mv88e6xxx_6095_family(ps) ||
+ mv88e6xxx_6185_family(ps) ||
+ mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6165_family(ps) ||
+ mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6352_family(ps);
}
return false;
}
-static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
+static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_hw_stat *s,
int port)
{
@@ -708,13 +684,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
switch (s->type) {
case PORT:
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
if (ret < 0)
return UINT64_MAX;
low = ret;
if (s->sizeof_stat == 4) {
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
s->reg + 1);
if (ret < 0)
return UINT64_MAX;
@@ -723,22 +699,24 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
break;
case BANK0:
case BANK1:
- _mv88e6xxx_stats_read(ds, s->reg, &low);
+ _mv88e6xxx_stats_read(ps, s->reg, &low);
if (s->sizeof_stat == 8)
- _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+ _mv88e6xxx_stats_read(ps, s->reg + 1, &high);
}
value = (((u64)high) << 16) | low;
return value;
}
-void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
+ uint8_t *data)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ds, stat)) {
+ if (mv88e6xxx_has_stat(ps, stat)) {
memcpy(data + j * ETH_GSTRING_LEN, stat->string,
ETH_GSTRING_LEN);
j++;
@@ -746,22 +724,22 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
}
}
-int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ds, stat))
+ if (mv88e6xxx_has_stat(ps, stat))
j++;
}
return j;
}
-void
-mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
- int port, uint64_t *data)
+static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
@@ -770,15 +748,15 @@ mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_stats_snapshot(ds, port);
+ ret = _mv88e6xxx_stats_snapshot(ps, port);
if (ret < 0) {
mutex_unlock(&ps->smi_mutex);
return;
}
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ds, stat)) {
- data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port);
+ if (mv88e6xxx_has_stat(ps, stat)) {
+ data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
j++;
}
}
@@ -786,14 +764,15 @@ mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
mutex_unlock(&ps->smi_mutex);
}
-int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
return 32 * sizeof(u16);
}
-void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
- struct ethtool_regs *regs, void *_p)
+static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *_p)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 *p = _p;
int i;
@@ -801,16 +780,20 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
memset(p, 0xff, 32 * sizeof(u16));
+ mutex_lock(&ps->smi_mutex);
+
for (i = 0; i < 32; i++) {
int ret;
- ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i);
if (ret >= 0)
p[i] = ret;
}
+
+ mutex_unlock(&ps->smi_mutex);
}
-static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
+static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
u16 mask)
{
unsigned long timeout = jiffies + HZ / 10;
@@ -818,7 +801,7 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
while (time_before(jiffies, timeout)) {
int ret;
- ret = _mv88e6xxx_reg_read(ds, reg, offset);
+ ret = _mv88e6xxx_reg_read(ps, reg, offset);
if (ret < 0)
return ret;
if (!(ret & mask))
@@ -829,91 +812,320 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
return -ETIMEDOUT;
}
-static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
+ int offset, u16 mask)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_wait(ds, reg, offset, mask);
+ ret = _mv88e6xxx_wait(ps, reg, offset, mask);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
{
- return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_BUSY);
}
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
+static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
{
- return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
GLOBAL2_EEPROM_OP_LOAD);
}
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
+static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
{
- return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
GLOBAL2_EEPROM_OP_BUSY);
}
-static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
+static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_READ |
+ (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_eeprom_busy_wait(ds);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
+error:
+ mutex_unlock(&ps->eeprom_mutex);
+ return ret;
+}
+
+static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ return ps->eeprom_len;
+
+ return 0;
+}
+
+static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int offset;
+ int len;
+ int ret;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ return -EOPNOTSUPP;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ eeprom->magic = 0xc3ec4951;
+
+ ret = mv88e6xxx_eeprom_load_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (offset & 1) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = (word >> 8) & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = word & 0xff;
+ *data++ = (word >> 8) & 0xff;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = word & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
+ return -EROFS;
+
+ return 0;
+}
+
+static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
+ u16 data)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_WRITE |
+ (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_eeprom_busy_wait(ds);
+error:
+ mutex_unlock(&ps->eeprom_mutex);
+ return ret;
+}
+
+static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int offset;
+ int ret;
+ int len;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != 0xc3ec4951)
+ return -EINVAL;
+
+ ret = mv88e6xxx_eeprom_is_readonly(ds);
+ if (ret)
+ return ret;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ ret = mv88e6xxx_eeprom_load_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (offset & 1) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ word = (*data++ << 8) | (word & 0xff);
+
+ ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ int word;
+
+ word = *data++;
+ word |= *data++ << 8;
+
+ ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ word = (word & 0xff00) | *data++;
+
+ ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
{
- return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
+ return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
GLOBAL_ATU_OP_BUSY);
}
-static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
- int regnum)
+static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
+ int addr, int regnum)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_22_READ | (addr << 5) |
regnum);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_phy_wait(ds);
+ ret = _mv88e6xxx_phy_wait(ps);
if (ret < 0)
return ret;
- return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+
+ return ret;
}
-static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
- int regnum, u16 val)
+static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
+ int addr, int regnum, u16 val)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
regnum);
- return _mv88e6xxx_phy_wait(ds);
+ return _mv88e6xxx_phy_wait(ps);
}
-int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int reg;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+ reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
if (reg < 0)
goto out;
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
if (reg < 0)
goto out;
@@ -925,16 +1137,19 @@ out:
return reg;
}
-int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
- struct phy_device *phydev, struct ethtool_eee *e)
+static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+ struct phy_device *phydev, struct ethtool_eee *e)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int reg;
int ret;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+ ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
if (ret < 0)
goto out;
@@ -944,25 +1159,45 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
if (e->tx_lpi_enabled)
reg |= 0x0100;
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
+ ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
out:
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
+static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
+ if (mv88e6xxx_has_fid_reg(ps)) {
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
+ if (ret < 0)
+ return ret;
+ } else if (mv88e6xxx_num_databases(ps) == 256) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ (ret & 0xfff) |
+ ((fid << 8) & 0xf000));
+ if (ret < 0)
+ return ret;
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ cmd |= fid & 0xf;
+ }
+
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
if (ret < 0)
return ret;
- return _mv88e6xxx_atu_wait(ds);
+ return _mv88e6xxx_atu_wait(ps);
}
-static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_atu_entry *entry)
{
u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
@@ -982,30 +1217,25 @@ static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
data |= (entry->portv_trunkid << shift) & mask;
}
- return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+ return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
}
-static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_atu_entry *entry,
bool static_too)
{
int op;
int err;
- err = _mv88e6xxx_atu_wait(ds);
+ err = _mv88e6xxx_atu_wait(ps);
if (err)
return err;
- err = _mv88e6xxx_atu_data_write(ds, entry);
+ err = _mv88e6xxx_atu_data_write(ps, entry);
if (err)
return err;
if (entry->fid) {
- err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
- entry->fid);
- if (err)
- return err;
-
op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
} else {
@@ -1013,21 +1243,22 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
}
- return _mv88e6xxx_atu_cmd(ds, op);
+ return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
}
-static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
+static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
+ u16 fid, bool static_too)
{
struct mv88e6xxx_atu_entry entry = {
.fid = fid,
.state = 0, /* EntryState bits must be 0 */
};
- return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
+ return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
}
-static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
- int to_port, bool static_too)
+static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
+ int from_port, int to_port, bool static_too)
{
struct mv88e6xxx_atu_entry entry = {
.trunk = false,
@@ -1041,14 +1272,14 @@ static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
entry.portv_trunkid = (to_port & 0x0f) << 4;
entry.portv_trunkid |= from_port & 0x0f;
- return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
+ return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
}
-static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
- bool static_too)
+static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
+ int port, bool static_too)
{
/* Destination port 0xF means remove the entries */
- return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
+ return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
}
static const char * const mv88e6xxx_port_state_names[] = {
@@ -1058,12 +1289,14 @@ static const char * const mv88e6xxx_port_state_names[] = {
[PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
};
-static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
+static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
+ u8 state)
{
+ struct dsa_switch *ds = ps->ds;
int reg, ret = 0;
u8 oldstate;
- reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
+ reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
if (reg < 0)
return reg;
@@ -1078,13 +1311,13 @@ static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
oldstate == PORT_CONTROL_STATE_FORWARDING)
&& (state == PORT_CONTROL_STATE_DISABLED ||
state == PORT_CONTROL_STATE_BLOCKING)) {
- ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
+ ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
if (ret)
return ret;
}
reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
reg);
if (ret)
return ret;
@@ -1097,11 +1330,12 @@ static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
return ret;
}
-static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
+ int port)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct net_device *bridge = ps->ports[port].bridge_dev;
- const u16 mask = (1 << ps->num_ports) - 1;
+ const u16 mask = (1 << ps->info->num_ports) - 1;
+ struct dsa_switch *ds = ps->ds;
u16 output_ports = 0;
int reg;
int i;
@@ -1110,7 +1344,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
output_ports = mask;
} else {
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
/* allow sending frames to every group member */
if (bridge && ps->ports[i].bridge_dev == bridge)
output_ports |= BIT(i);
@@ -1124,20 +1358,25 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
- reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+ reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
if (reg < 0)
return reg;
reg &= ~mask;
reg |= output_ports & mask;
- return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
+ return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
}
-int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
+static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int stp_state;
+ int err;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE))
+ return;
switch (state) {
case BR_STATE_DISABLED:
@@ -1156,23 +1395,23 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
break;
}
- /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
- * so we can not update the port state directly but need to schedule it.
- */
- ps->ports[port].state = stp_state;
- set_bit(port, ps->port_state_update_mask);
- schedule_work(&ps->bridge_work);
+ mutex_lock(&ps->smi_mutex);
+ err = _mv88e6xxx_port_state(ps, port, stp_state);
+ mutex_unlock(&ps->smi_mutex);
- return 0;
+ if (err)
+ netdev_err(ds->ports[port], "failed to update state to %s\n",
+ mv88e6xxx_port_state_names[stp_state]);
}
-static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
- u16 *old)
+static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
+ u16 *new, u16 *old)
{
+ struct dsa_switch *ds = ps->ds;
u16 pvid;
int ret;
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
if (ret < 0)
return ret;
@@ -1182,7 +1421,7 @@ static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
ret &= ~PORT_DEFAULT_VLAN_MASK;
ret |= *new & PORT_DEFAULT_VLAN_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_DEFAULT_VLAN, ret);
if (ret < 0)
return ret;
@@ -1197,55 +1436,56 @@ static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
return 0;
}
-static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
+ int port, u16 *pvid)
{
- return _mv88e6xxx_port_pvid(ds, port, NULL, pvid);
+ return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
}
-static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
+static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
+ int port, u16 pvid)
{
- return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL);
+ return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
}
-static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
{
- return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
+ return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
GLOBAL_VTU_OP_BUSY);
}
-static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
+static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_wait(ds);
+ return _mv88e6xxx_vtu_wait(ps);
}
-static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
+static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
{
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
+ return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
}
-static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry,
unsigned int nibble_offset)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 regs[3];
int i;
int ret;
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
GLOBAL_VTU_DATA_0_3 + i);
if (ret < 0)
return ret;
@@ -1253,7 +1493,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
regs[i] = ret;
}
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u16 reg = regs[i / 4];
@@ -1263,16 +1503,27 @@ static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
return 0;
}
-static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
+static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry,
unsigned int nibble_offset)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 regs[3] = { 0 };
int i;
int ret;
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u8 data = entry->data[i];
@@ -1280,7 +1531,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
}
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
GLOBAL_VTU_DATA_0_3 + i, regs[i]);
if (ret < 0)
return ret;
@@ -1289,27 +1540,39 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
return 0;
}
-static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
+static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
{
- return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+ return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
vid & GLOBAL_VTU_VID_MASK);
}
-static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_vtu_stu_entry next = { 0 };
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
if (ret < 0)
return ret;
@@ -1317,20 +1580,32 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
+ ret = mv88e6xxx_vtu_data_read(ps, &next);
if (ret < 0)
return ret;
- if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
- mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ if (mv88e6xxx_has_fid_reg(ps)) {
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
GLOBAL_VTU_FID);
if (ret < 0)
return ret;
next.fid = ret & GLOBAL_VTU_FID_MASK;
+ } else if (mv88e6xxx_num_databases(ps) == 256) {
+ /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
+ * VTU DBNum[3:0] are located in VTU Operation 3:0
+ */
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
+ GLOBAL_VTU_OP);
+ if (ret < 0)
+ return ret;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ next.fid = (ret & 0xf00) >> 4;
+ next.fid |= ret & 0xf;
+ }
+
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
GLOBAL_VTU_SID);
if (ret < 0)
return ret;
@@ -1343,27 +1618,30 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
return 0;
}
-int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry next;
u16 pvid;
int err;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+ err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
if (err)
goto unlock;
- err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+ err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ds, &next);
+ err = _mv88e6xxx_vtu_getnext(ps, &next);
if (err)
break;
@@ -1394,13 +1672,14 @@ unlock:
return err;
}
-static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry)
{
+ u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
u16 reg = 0;
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
@@ -1408,66 +1687,73 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
goto loadpurge;
/* Write port member tags */
- ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
+ ret = mv88e6xxx_vtu_data_write(ps, entry);
if (ret < 0)
return ret;
- if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
- mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
if (ret < 0)
return ret;
+ }
+ if (mv88e6xxx_has_fid_reg(ps)) {
reg = entry->fid & GLOBAL_VTU_FID_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
if (ret < 0)
return ret;
+ } else if (mv88e6xxx_num_databases(ps) == 256) {
+ /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
+ * VTU DBNum[3:0] are located in VTU Operation 3:0
+ */
+ op |= (entry->fid & 0xf0) << 8;
+ op |= entry->fid & 0xf;
}
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
reg |= entry->vid & GLOBAL_VTU_VID_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+ return _mv88e6xxx_vtu_cmd(ps, op);
}
-static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
+static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_vtu_stu_entry next = { 0 };
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
sid & GLOBAL_VTU_SID_MASK);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
+ ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
if (ret < 0)
return ret;
next.sid = ret & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
if (ret < 0)
return ret;
next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
+ ret = mv88e6xxx_stu_data_read(ps, &next);
if (ret < 0)
return ret;
}
@@ -1476,13 +1762,13 @@ static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
return 0;
}
-static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
+static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry)
{
u16 reg = 0;
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
@@ -1490,32 +1776,41 @@ static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
goto loadpurge;
/* Write port states */
- ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
+ ret = mv88e6xxx_stu_data_write(ps, entry);
if (ret < 0)
return ret;
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
if (ret < 0)
return ret;
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+ return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
-static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
- u16 *old)
+static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
+ u16 *new, u16 *old)
{
+ struct dsa_switch *ds = ps->ds;
+ u16 upper_mask;
u16 fid;
int ret;
+ if (mv88e6xxx_num_databases(ps) == 4096)
+ upper_mask = 0xff;
+ else if (mv88e6xxx_num_databases(ps) == 256)
+ upper_mask = 0xf;
+ else
+ return -EOPNOTSUPP;
+
/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
if (ret < 0)
return ret;
@@ -1525,24 +1820,24 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
ret);
if (ret < 0)
return ret;
}
/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
if (ret < 0)
return ret;
- fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4;
+ fid |= (ret & upper_mask) << 4;
if (new) {
- ret &= ~PORT_CONTROL_1_FID_11_4_MASK;
- ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK;
+ ret &= ~upper_mask;
+ ret |= (*new >> 4) & upper_mask;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
ret);
if (ret < 0)
return ret;
@@ -1556,19 +1851,20 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
return 0;
}
-static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
+static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
+ int port, u16 *fid)
{
- return _mv88e6xxx_port_fid(ds, port, NULL, fid);
+ return _mv88e6xxx_port_fid(ps, port, NULL, fid);
}
-static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
+static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
+ int port, u16 fid)
{
- return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
+ return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
}
-static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
+static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_vtu_stu_entry vlan;
int i, err;
@@ -1576,8 +1872,8 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < ps->num_ports; ++i) {
- err = _mv88e6xxx_port_fid_get(ds, i, fid);
+ for (i = 0; i < ps->info->num_ports; ++i) {
+ err = _mv88e6xxx_port_fid_get(ps, i, fid);
if (err)
return err;
@@ -1585,12 +1881,12 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
}
/* Set every FID bit used by the VLAN entries */
- err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+ err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
if (err)
return err;
do {
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ err = _mv88e6xxx_vtu_getnext(ps, &vlan);
if (err)
return err;
@@ -1604,35 +1900,35 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
* databases are not needed. Return the next positive available.
*/
*fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
- if (unlikely(*fid == MV88E6XXX_N_FID))
+ if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
return -ENOSPC;
/* Clear the database */
- return _mv88e6xxx_atu_flush(ds, *fid, true);
+ return _mv88e6xxx_atu_flush(ps, *fid, true);
}
-static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
struct mv88e6xxx_vtu_stu_entry *entry)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct dsa_switch *ds = ps->ds;
struct mv88e6xxx_vtu_stu_entry vlan = {
.valid = true,
.vid = vid,
};
int i, err;
- err = _mv88e6xxx_fid_new(ds, &vlan.fid);
+ err = _mv88e6xxx_fid_new(ps, &vlan.fid);
if (err)
return err;
/* exclude all ports except the CPU and DSA ports */
- for (i = 0; i < ps->num_ports; ++i)
+ for (i = 0; i < ps->info->num_ports; ++i)
vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
- if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
- mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+ if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
+ mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
struct mv88e6xxx_vtu_stu_entry vstp;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1640,7 +1936,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
* entries. Thus, validate the SID 0.
*/
vlan.sid = 0;
- err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
+ err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
if (err)
return err;
@@ -1649,7 +1945,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
vstp.valid = true;
vstp.sid = vlan.sid;
- err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
+ err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
if (err)
return err;
}
@@ -1659,7 +1955,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
return 0;
}
-static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
{
int err;
@@ -1667,11 +1963,11 @@ static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
if (!vid)
return -EINVAL;
- err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+ err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
if (err)
return err;
- err = _mv88e6xxx_vtu_getnext(ds, entry);
+ err = _mv88e6xxx_vtu_getnext(ps, entry);
if (err)
return err;
@@ -1682,7 +1978,7 @@ static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
* -EOPNOTSUPP to inform bridge about an eventual software VLAN.
*/
- err = _mv88e6xxx_vtu_new(ds, vid, entry);
+ err = _mv88e6xxx_vtu_new(ps, vid, entry);
}
return err;
@@ -1700,12 +1996,12 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1);
+ err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ err = _mv88e6xxx_vtu_getnext(ps, &vlan);
if (err)
goto unlock;
@@ -1715,7 +2011,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (vlan.vid > vid_end)
break;
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
@@ -1749,17 +2045,20 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
[PORT_CONTROL_2_8021Q_SECURE] = "Secure",
};
-int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
PORT_CONTROL_2_8021Q_DISABLED;
int ret;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
if (ret < 0)
goto unlock;
@@ -1769,7 +2068,7 @@ int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
ret &= ~PORT_CONTROL_2_8021Q_MASK;
ret |= new & PORT_CONTROL_2_8021Q_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
ret);
if (ret < 0)
goto unlock;
@@ -1786,12 +2085,16 @@ unlock:
return ret;
}
-int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int err;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
/* If the requested port doesn't belong to the same bridge as the VLAN
* members, do not support it (yet) and fallback to software VLAN.
*/
@@ -1806,13 +2109,13 @@ int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
- bool untagged)
+static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
+ u16 vid, bool untagged)
{
struct mv88e6xxx_vtu_stu_entry vlan;
int err;
- err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
+ err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
if (err)
return err;
@@ -1820,43 +2123,43 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
- return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+ return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
}
-int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
u16 vid;
- int err = 0;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return;
mutex_lock(&ps->smi_mutex);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged);
- if (err)
- goto unlock;
- }
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
+ netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
+ vid, untagged ? 'u' : 't');
- /* no PVID with ranges, otherwise it's a bug */
- if (pvid)
- err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
-unlock:
- mutex_unlock(&ps->smi_mutex);
+ if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
+ netdev_err(ds->ports[port], "failed to set PVID %d\n",
+ vlan->vid_end);
- return err;
+ mutex_unlock(&ps->smi_mutex);
}
-static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
+ int port, u16 vid)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct dsa_switch *ds = ps->ds;
struct mv88e6xxx_vtu_stu_entry vlan;
int i, err;
- err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+ err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
if (err)
return err;
@@ -1868,7 +2171,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
continue;
@@ -1878,33 +2181,36 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
}
}
- err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+ err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
if (err)
return err;
- return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
+ return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
}
-int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 pvid, vid;
int err = 0;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+ err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
if (err)
goto unlock;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = _mv88e6xxx_port_vlan_del(ds, port, vid);
+ err = _mv88e6xxx_port_vlan_del(ps, port, vid);
if (err)
goto unlock;
if (vid == pvid) {
- err = _mv88e6xxx_port_pvid_set(ds, port, 0);
+ err = _mv88e6xxx_port_pvid_set(ps, port, 0);
if (err)
goto unlock;
}
@@ -1916,14 +2222,14 @@ unlock:
return err;
}
-static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
const unsigned char *addr)
{
int i, ret;
for (i = 0; i < 3; i++) {
ret = _mv88e6xxx_reg_write(
- ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+ ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
(addr[i * 2] << 8) | addr[i * 2 + 1]);
if (ret < 0)
return ret;
@@ -1932,12 +2238,13 @@ static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
return 0;
}
-static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
+ unsigned char *addr)
{
int i, ret;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
GLOBAL_ATU_MAC_01 + i);
if (ret < 0)
return ret;
@@ -1948,31 +2255,27 @@ static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
return 0;
}
-static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_atu_entry *entry)
{
int ret;
- ret = _mv88e6xxx_atu_wait(ds);
+ ret = _mv88e6xxx_atu_wait(ps);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
+ ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_data_write(ds, entry);
+ ret = _mv88e6xxx_atu_data_write(ps, entry);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
- if (ret < 0)
- return ret;
-
- return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
+ return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
}
-static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
const unsigned char *addr, u16 vid,
u8 state)
{
@@ -1982,9 +2285,9 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
/* Null VLAN ID corresponds to the port private database */
if (vid == 0)
- err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
+ err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
else
- err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+ err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
if (err)
return err;
@@ -1996,51 +2299,60 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
entry.portv_trunkid = BIT(port);
}
- return _mv88e6xxx_atu_load(ds, &entry);
+ return _mv88e6xxx_atu_load(ps, &entry);
}
-int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ return -EOPNOTSUPP;
+
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
*/
return 0;
}
-int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
{
int state = is_multicast_ether_addr(fdb->addr) ?
GLOBAL_ATU_DATA_STATE_MC_STATIC :
GLOBAL_ATU_DATA_STATE_UC_STATIC;
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ return;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
+ if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
+ netdev_err(ds->ports[port], "failed to load MAC address\n");
mutex_unlock(&ps->smi_mutex);
-
- return ret;
}
-int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb)
+static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
+ ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
GLOBAL_ATU_DATA_STATE_UNUSED);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
struct mv88e6xxx_atu_entry *entry)
{
struct mv88e6xxx_atu_entry next = { 0 };
@@ -2048,23 +2360,19 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
next.fid = fid;
- ret = _mv88e6xxx_atu_wait(ds);
+ ret = _mv88e6xxx_atu_wait(ps);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
+ ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
+ ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
if (ret < 0)
return ret;
@@ -2089,8 +2397,8 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
return 0;
}
-static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
- int port,
+static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
+ u16 fid, u16 vid, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj))
{
@@ -2099,12 +2407,12 @@ static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
};
int err;
- err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+ err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
if (err)
return err;
do {
- err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
+ err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
if (err)
break;
@@ -2130,9 +2438,9 @@ static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
return err;
}
-int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry vlan = {
@@ -2141,31 +2449,34 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
u16 fid;
int err;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
/* Dump port's default Filtering Information Database (VLAN ID 0) */
- err = _mv88e6xxx_port_fid_get(ds, port, &fid);
+ err = _mv88e6xxx_port_fid_get(ps, port, &fid);
if (err)
goto unlock;
- err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
+ err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
if (err)
goto unlock;
/* Dump VLANs' Filtering Information Databases */
- err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
+ err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ err = _mv88e6xxx_vtu_getnext(ps, &vlan);
if (err)
break;
if (!vlan.valid)
break;
- err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
+ err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
fdb, cb);
if (err)
break;
@@ -2177,20 +2488,23 @@ unlock:
return err;
}
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i, err = 0;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
/* Assign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = bridge;
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
if (ps->ports[i].bridge_dev == bridge) {
- err = _mv88e6xxx_port_based_vlan_map(ds, i);
+ err = _mv88e6xxx_port_based_vlan_map(ps, i);
if (err)
break;
}
@@ -2201,89 +2515,134 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
return err;
}
-void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct net_device *bridge = ps->ports[port].bridge_dev;
int i;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+ return;
+
mutex_lock(&ps->smi_mutex);
/* Unassign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = NULL;
- for (i = 0; i < ps->num_ports; ++i)
+ for (i = 0; i < ps->info->num_ports; ++i)
if (i == port || ps->ports[i].bridge_dev == bridge)
- if (_mv88e6xxx_port_based_vlan_map(ds, i))
+ if (_mv88e6xxx_port_based_vlan_map(ps, i))
netdev_warn(ds->ports[i], "failed to remap\n");
mutex_unlock(&ps->smi_mutex);
}
-static void mv88e6xxx_bridge_work(struct work_struct *work)
+static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
+ int port, int page, int reg, int val)
{
- struct mv88e6xxx_priv_state *ps;
- struct dsa_switch *ds;
- int port;
-
- ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
- ds = ((struct dsa_switch *)ps) - 1;
+ int ret;
- mutex_lock(&ps->smi_mutex);
+ ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
+ if (ret < 0)
+ goto restore_page_0;
- for (port = 0; port < ps->num_ports; ++port)
- if (test_and_clear_bit(port, ps->port_state_update_mask) &&
- _mv88e6xxx_port_state(ds, port, ps->ports[port].state))
- netdev_warn(ds->ports[port], "failed to update state to %s\n",
- mv88e6xxx_port_state_names[ps->ports[port].state]);
+ ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
+restore_page_0:
+ _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
- mutex_unlock(&ps->smi_mutex);
+ return ret;
}
-static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
- int reg, int val)
+static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
+ int port, int page, int reg)
{
int ret;
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+ ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
if (ret < 0)
goto restore_page_0;
- ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+ ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
restore_page_0:
- _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
return ret;
}
-static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
- int reg)
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
{
+ bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE);
+ u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
+ struct gpio_desc *gpiod = ps->reset;
+ unsigned long timeout;
int ret;
+ int i;
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
- if (ret < 0)
- goto restore_page_0;
+ /* Set all ports to the disabled state. */
+ for (i = 0; i < ps->info->num_ports; i++) {
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
+ if (ret < 0)
+ return ret;
- ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
-restore_page_0:
- _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
+ ret & 0xfffc);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
+
+ /* If there is a gpio connected to the reset pin, toggle it */
+ if (gpiod) {
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+ usleep_range(10000, 20000);
+ }
+
+ /* Reset the switch. Keep the PPU active if requested. The PPU
+ * needs to be active to support indirect phy register access
+ * through global registers 0x18 and 0x19.
+ */
+ if (ppu_active)
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
+ else
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
+ if (ret)
+ return ret;
+
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & is_reset) == is_reset)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (time_after(jiffies, timeout))
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
return ret;
}
-static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
+static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
{
int ret;
- ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
+ ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
MII_BMCR);
if (ret < 0)
return ret;
if (ret & BMCR_PDOWN) {
ret &= ~BMCR_PDOWN;
- ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
+ ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
PAGE_FIBER_SERDES, MII_BMCR,
ret);
}
@@ -2291,32 +2650,30 @@ static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
return ret;
}
-static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
+static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct dsa_switch *ds = ps->ds;
int ret;
u16 reg;
- mutex_lock(&ps->smi_mutex);
-
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+ mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
/* MAC Forcing register: don't force link, speed,
* duplex or flow control state to any particular
* values on physical ports, but force the CPU port
* and all DSA ports to their maximum bandwidth and
* full duplex.
*/
- reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+ reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
reg &= ~PORT_PCS_CTRL_UNFORCED;
reg |= PORT_PCS_CTRL_FORCE_LINK |
PORT_PCS_CTRL_LINK_UP |
PORT_PCS_CTRL_DUPLEX_FULL |
PORT_PCS_CTRL_FORCE_DUPLEX;
- if (mv88e6xxx_6065_family(ds))
+ if (mv88e6xxx_6065_family(ps))
reg |= PORT_PCS_CTRL_100;
else
reg |= PORT_PCS_CTRL_1000;
@@ -2324,10 +2681,10 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_PCS_CTRL_UNFORCED;
}
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_PCS_CTRL, reg);
if (ret)
- goto abort;
+ return ret;
}
/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
@@ -2345,19 +2702,19 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* forwarding of unknown unicasts and multicasts.
*/
reg = 0;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
reg = PORT_CONTROL_IGMP_MLD_SNOOP |
PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
PORT_CONTROL_STATE_FORWARDING;
if (dsa_is_cpu_port(ds, port)) {
- if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+ if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
reg |= PORT_CONTROL_DSA_TAG;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
else
@@ -2366,20 +2723,20 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_EGRESS_ADD_TAG;
}
}
if (dsa_is_dsa_port(ds, port)) {
- if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+ if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
reg |= PORT_CONTROL_DSA_TAG;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
reg |= PORT_CONTROL_FRAME_MODE_DSA;
}
@@ -2388,26 +2745,26 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (reg) {
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_CONTROL, reg);
if (ret)
- goto abort;
+ return ret;
}
/* If this port is connected to a SerDes, make sure the SerDes is not
* powered down.
*/
- if (mv88e6xxx_6352_family(ds)) {
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ if (mv88e6xxx_6352_family(ps)) {
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
if (ret < 0)
- goto abort;
+ return ret;
ret &= PORT_STATUS_CMODE_MASK;
if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
(ret == PORT_STATUS_CMODE_1000BASE_X) ||
(ret == PORT_STATUS_CMODE_SGMII)) {
- ret = mv88e6xxx_power_on_serdes(ds);
+ ret = mv88e6xxx_power_on_serdes(ps);
if (ret < 0)
- goto abort;
+ return ret;
}
}
@@ -2418,16 +2775,17 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* copy of all transmitted/received frames on this port to the CPU.
*/
reg = 0;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
+ mv88e6xxx_6185_family(ps))
reg = PORT_CONTROL_2_MAP_DA;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
reg |= PORT_CONTROL_2_JUMBO_10240;
- if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
+ if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
/* Set the upstream port this port should use */
reg |= dsa_upstream_port(ds);
/* enable forwarding of unknown multicast addresses to
@@ -2440,10 +2798,10 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_CONTROL_2_8021Q_DISABLED;
if (reg) {
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_CONTROL_2, reg);
if (ret)
- goto abort;
+ return ret;
}
/* Port Association Vector: when learning source addresses
@@ -2456,300 +2814,344 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (dsa_is_cpu_port(ds, port))
reg = 0;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
if (ret)
- goto abort;
+ return ret;
/* Egress rate control 2: disable egress rate control. */
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
0x0000);
if (ret)
- goto abort;
+ return ret;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
/* Do not limit the period of time that this port can
* be paused for by the remote end or the period of
* time that this port can pause the remote end.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_PAUSE_CTRL, 0x0000);
if (ret)
- goto abort;
+ return ret;
/* Port ATU control: disable limiting the number of
* address database entries that this port is allowed
* to use.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_ATU_CONTROL, 0x0000);
/* Priority Override: disable DA, SA and VTU priority
* override.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_PRI_OVERRIDE, 0x0000);
if (ret)
- goto abort;
+ return ret;
/* Port Ethertype: use the Ethertype DSA Ethertype
* value.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_ETH_TYPE, ETH_P_EDSA);
if (ret)
- goto abort;
+ return ret;
/* Tag Remap: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_TAG_REGMAP_0123, 0x3210);
if (ret)
- goto abort;
+ return ret;
/* Tag Remap 2: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_TAG_REGMAP_4567, 0x7654);
if (ret)
- goto abort;
+ return ret;
}
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
/* Rate Control: disable ingress rate limiting. */
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_RATE_CONTROL, 0x0001);
if (ret)
- goto abort;
+ return ret;
}
/* Port Control 1: disable trunking, disable sending
* learning messages to this port.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
if (ret)
- goto abort;
+ return ret;
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(ds, port, 0);
+ ret = _mv88e6xxx_port_fid_set(ps, port, 0);
if (ret)
- goto abort;
+ return ret;
- ret = _mv88e6xxx_port_based_vlan_map(ds, port);
+ ret = _mv88e6xxx_port_based_vlan_map(ps, port);
if (ret)
- goto abort;
+ return ret;
/* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
0x0000);
-abort:
- mutex_unlock(&ps->smi_mutex);
- return ret;
-}
-
-int mv88e6xxx_setup_ports(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
- int i;
+ if (ret)
+ return ret;
- for (i = 0; i < ps->num_ports; i++) {
- ret = mv88e6xxx_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
return 0;
}
-int mv88e6xxx_setup_common(struct dsa_switch *ds)
+static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- mutex_init(&ps->smi_mutex);
+ struct dsa_switch *ds = ps->ds;
+ u32 upstream_port = dsa_upstream_port(ds);
+ u16 reg;
+ int err;
+ int i;
- ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
+ /* Enable the PHY Polling Unit if present, don't discard any packets,
+ * and mask all interrupt sources.
+ */
+ reg = 0;
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) ||
+ mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE))
+ reg |= GLOBAL_CONTROL_PPU_ENABLE;
- INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg);
+ if (err)
+ return err;
- return 0;
-}
+ /* Configure the upstream port, and configure it as the port to which
+ * ingress and egress and ARP monitor frames are to be sent.
+ */
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
+ if (err)
+ return err;
-int mv88e6xxx_setup_global(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
- int i;
+ /* Disable remote management, and set the switch's DSA device number. */
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ (ds->index & 0x1f));
+ if (err)
+ return err;
/* Set the default address aging time to 5 minutes, and
* enable address learn messages to be sent to all message
* ports.
*/
- REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
- 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+ if (err)
+ return err;
/* Configure the IP ToS mapping registers. */
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ if (err)
+ return err;
/* Configure the IEEE 802.1p priority mapping register. */
- REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ if (err)
+ return err;
/* Send all frames with destination addresses matching
* 01:80:c2:00:00:0x to the CPU port.
*/
- REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+ if (err)
+ return err;
/* Ignore removed tag data on doubly tagged packets, disable
* flow control messages, force flow control priority to the
* highest, and send all special multicast frames to the CPU
* port at the highest priority.
*/
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
- 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
- GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+ 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
+ GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+ if (err)
+ return err;
/* Program the DSA routing table. */
for (i = 0; i < 32; i++) {
int nexthop = 0x1f;
- if (ds->pd->rtable &&
- i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
+ if (ps->ds->cd->rtable &&
+ i != ps->ds->index && i < ps->ds->dst->pd->nr_chips)
+ nexthop = ps->ds->cd->rtable[i] & 0x1f;
- REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
- GLOBAL2_DEVICE_MAPPING_UPDATE |
- (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
- nexthop);
+ err = _mv88e6xxx_reg_write(
+ ps, REG_GLOBAL2,
+ GLOBAL2_DEVICE_MAPPING,
+ GLOBAL2_DEVICE_MAPPING_UPDATE |
+ (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
+ if (err)
+ return err;
}
/* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
- REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
- 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
- ((1 << ps->num_ports) - 1));
+ for (i = 0; i < 8; i++) {
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+ 0x8000 |
+ (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
+ ((1 << ps->info->num_ports) - 1));
+ if (err)
+ return err;
+ }
/* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
- GLOBAL2_TRUNK_MAPPING_UPDATE |
- (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
-
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ for (i = 0; i < 16; i++) {
+ err = _mv88e6xxx_reg_write(
+ ps, REG_GLOBAL2,
+ GLOBAL2_TRUNK_MAPPING,
+ GLOBAL2_TRUNK_MAPPING_UPDATE |
+ (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
+ if (err)
+ return err;
+ }
+
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
/* Send all frames with destination addresses matching
* 01:80:c2:00:00:2x to the CPU port.
*/
- REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ GLOBAL2_MGMT_EN_2X, 0xffff);
+ if (err)
+ return err;
/* Initialise cross-chip port VLAN table to reset
* defaults.
*/
- REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ GLOBAL2_PVT_ADDR, 0x9000);
+ if (err)
+ return err;
/* Clear the priority override table. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
- 0x8000 | (i << 8));
+ for (i = 0; i < 16; i++) {
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ GLOBAL2_PRIO_OVERRIDE,
+ 0x8000 | (i << 8));
+ if (err)
+ return err;
+ }
}
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
/* Disable ingress rate limiting by resetting all
* ingress rate limit registers to their initial
* state.
*/
- for (i = 0; i < ps->num_ports; i++)
- REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
- 0x9000 | (i << 8));
+ for (i = 0; i < ps->info->num_ports; i++) {
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ GLOBAL2_INGRESS_OP,
+ 0x9000 | (i << 8));
+ if (err)
+ return err;
+ }
}
/* Clear the statistics counters for all ports */
- REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_FLUSH_ALL);
+ if (err)
+ return err;
/* Wait for the flush to complete. */
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_stats_wait(ds);
- if (ret < 0)
- goto unlock;
+ err = _mv88e6xxx_stats_wait(ps);
+ if (err)
+ return err;
/* Clear all ATU entries */
- ret = _mv88e6xxx_atu_flush(ds, 0, true);
- if (ret < 0)
- goto unlock;
+ err = _mv88e6xxx_atu_flush(ps, 0, true);
+ if (err)
+ return err;
/* Clear all the VTU and STU entries */
- ret = _mv88e6xxx_vtu_stu_flush(ds);
-unlock:
- mutex_unlock(&ps->smi_mutex);
+ err = _mv88e6xxx_vtu_stu_flush(ps);
+ if (err < 0)
+ return err;
- return ret;
+ return err;
}
-int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
+static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
- struct gpio_desc *gpiod = ds->pd->reset;
- unsigned long timeout;
- int ret;
+ int err;
int i;
- /* Set all ports to the disabled state. */
- for (i = 0; i < ps->num_ports; i++) {
- ret = REG_READ(REG_PORT(i), PORT_CONTROL);
- REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
- }
+ ps->ds = ds;
- /* Wait for transmit queues to drain. */
- usleep_range(2000, 4000);
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ mutex_init(&ps->eeprom_mutex);
- /* If there is a gpio connected to the reset pin, toggle it */
- if (gpiod) {
- gpiod_set_value_cansleep(gpiod, 1);
- usleep_range(10000, 20000);
- gpiod_set_value_cansleep(gpiod, 0);
- usleep_range(10000, 20000);
- }
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+ mv88e6xxx_ppu_state_init(ps);
- /* Reset the switch. Keep the PPU active if requested. The PPU
- * needs to be active to support indirect phy register access
- * through global registers 0x18 and 0x19.
- */
- if (ppu_active)
- REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
- else
- REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+ mutex_lock(&ps->smi_mutex);
- /* Wait up to one second for reset to complete. */
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- if ((ret & is_reset) == is_reset)
- break;
- usleep_range(1000, 2000);
+ err = mv88e6xxx_switch_reset(ps);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_setup_global(ps);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < ps->info->num_ports; i++) {
+ err = mv88e6xxx_setup_port(ps, i);
+ if (err)
+ goto unlock;
}
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- return 0;
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return err;
}
int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
@@ -2758,7 +3160,7 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
+ ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -2771,82 +3173,61 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
+ ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
+ int port)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- if (port >= 0 && port < ps->num_ports)
+ if (port >= 0 && port < ps->info->num_ports)
return port;
return -EINVAL;
}
-int
-mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+ int addr = mv88e6xxx_port_to_phy_addr(ps, port);
int ret;
if (addr < 0)
- return addr;
+ return 0xffff;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_read(ds, addr, regnum);
- mutex_unlock(&ps->smi_mutex);
- return ret;
-}
-
-int
-mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ds, port);
- int ret;
- if (addr < 0)
- return addr;
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+ ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
+ else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
+ ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
+ else
+ ret = _mv88e6xxx_phy_read(ps, addr, regnum);
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-int
-mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum,
+ u16 val)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+ int addr = mv88e6xxx_port_to_phy_addr(ps, port);
int ret;
if (addr < 0)
- return addr;
+ return 0xffff;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
- mutex_unlock(&ps->smi_mutex);
- return ret;
-}
-
-int
-mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
- u16 val)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ds, port);
- int ret;
- if (addr < 0)
- return addr;
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+ ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
+ else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
+ ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
+ else
+ ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -2863,44 +3244,45 @@ static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+ ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
if (ret < 0)
goto error;
/* Enable temperature sensor */
- ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
if (ret < 0)
goto error;
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+ ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
if (ret < 0)
goto error;
/* Wait for temperature to stabilize */
usleep_range(10000, 12000);
- val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
if (val < 0) {
ret = val;
goto error;
}
/* Disable temperature sensor */
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+ ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
if (ret < 0)
goto error;
*temp = ((val & 0x1f) - 5) * 5;
error:
- _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+ _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
mutex_unlock(&ps->smi_mutex);
return ret;
}
static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
{
- int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
int ret;
*temp = 0;
@@ -2914,20 +3296,26 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
return 0;
}
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
{
- if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
+ return -EOPNOTSUPP;
+
+ if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
return mv88e63xx_get_temp(ds, temp);
return mv88e61xx_get_temp(ds, temp);
}
-int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
{
- int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
int ret;
- if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
*temp = 0;
@@ -2941,12 +3329,13 @@ int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
return 0;
}
-int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
{
- int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
int ret;
- if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
@@ -2957,12 +3346,13 @@ int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
(ret & 0xe0ff) | (temp << 8));
}
-int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
{
- int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
int ret;
- if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
*alarm = false;
@@ -2977,70 +3367,354 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
}
#endif /* CONFIG_NET_DSA_HWMON */
-char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
- const struct mv88e6xxx_switch_id *table,
- unsigned int num)
+static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ [MV88E6085] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
+ .family = MV88E6XXX_FAMILY_6097,
+ .name = "Marvell 88E6085",
+ .num_databases = 4096,
+ .num_ports = 10,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6097,
+ },
+
+ [MV88E6095] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6095,
+ .family = MV88E6XXX_FAMILY_6095,
+ .name = "Marvell 88E6095/88E6095F",
+ .num_databases = 256,
+ .num_ports = 11,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6095,
+ },
+
+ [MV88E6123] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6123",
+ .num_databases = 4096,
+ .num_ports = 3,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6131] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6131,
+ .family = MV88E6XXX_FAMILY_6185,
+ .name = "Marvell 88E6131",
+ .num_databases = 256,
+ .num_ports = 8,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ },
+
+ [MV88E6161] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6161",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6165] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6165,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6165",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6171] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6171,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6171",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6172] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6172,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6172",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6175] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6175,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6175",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6176] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6176,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6176",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6185] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6185,
+ .family = MV88E6XXX_FAMILY_6185,
+ .name = "Marvell 88E6185",
+ .num_databases = 256,
+ .num_ports = 10,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ },
+
+ [MV88E6240] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6240",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6320] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
+ .family = MV88E6XXX_FAMILY_6320,
+ .name = "Marvell 88E6320",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ },
+
+ [MV88E6321] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6321,
+ .family = MV88E6XXX_FAMILY_6320,
+ .name = "Marvell 88E6321",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ },
+
+ [MV88E6350] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6350",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6351] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6351,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6351",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6352] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6352,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6352",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+};
+
+static const struct mv88e6xxx_info *
+mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
+ unsigned int num)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
- int i, ret;
+ int i;
+
+ for (i = 0; i < num; ++i)
+ if (table[i].prod_num == prod_num)
+ return &table[i];
+
+ return NULL;
+}
+static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **priv)
+{
+ const struct mv88e6xxx_info *info;
+ struct mv88e6xxx_priv_state *ps;
+ struct mii_bus *bus;
+ const char *name;
+ int id, prod_num, rev;
+
+ bus = dsa_host_dev_to_mii_bus(host_dev);
if (!bus)
return NULL;
- ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
- if (ret < 0)
+ id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
+ if (id < 0)
return NULL;
- /* Look up the exact switch ID */
- for (i = 0; i < num; ++i)
- if (table[i].id == ret)
- return table[i].name;
-
- /* Look up only the product number */
- for (i = 0; i < num; ++i) {
- if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) {
- dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n",
- ret & PORT_SWITCH_ID_REV_MASK,
- ret & PORT_SWITCH_ID_PROD_NUM_MASK);
- return table[i].name;
+ prod_num = (id & 0xfff0) >> 4;
+ rev = id & 0x000f;
+
+ info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
+ ARRAY_SIZE(mv88e6xxx_table));
+ if (!info)
+ return NULL;
+
+ name = info->name;
+
+ ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return NULL;
+
+ ps->bus = bus;
+ ps->sw_addr = sw_addr;
+ ps->info = info;
+ mutex_init(&ps->smi_mutex);
+
+ *priv = ps;
+
+ dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
+ prod_num, name, rev);
+
+ return name;
+}
+
+struct dsa_switch_driver mv88e6xxx_switch_driver = {
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .probe = mv88e6xxx_drv_probe,
+ .setup = mv88e6xxx_setup,
+ .set_addr = mv88e6xxx_set_addr,
+ .phy_read = mv88e6xxx_phy_read,
+ .phy_write = mv88e6xxx_phy_write,
+ .adjust_link = mv88e6xxx_adjust_link,
+ .get_strings = mv88e6xxx_get_strings,
+ .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_sset_count = mv88e6xxx_get_sset_count,
+ .set_eee = mv88e6xxx_set_eee,
+ .get_eee = mv88e6xxx_get_eee,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6xxx_get_temp,
+ .get_temp_limit = mv88e6xxx_get_temp_limit,
+ .set_temp_limit = mv88e6xxx_set_temp_limit,
+ .get_temp_alarm = mv88e6xxx_get_temp_alarm,
+#endif
+ .get_eeprom_len = mv88e6xxx_get_eeprom_len,
+ .get_eeprom = mv88e6xxx_get_eeprom,
+ .set_eeprom = mv88e6xxx_set_eeprom,
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
+ .port_bridge_join = mv88e6xxx_port_bridge_join,
+ .port_bridge_leave = mv88e6xxx_port_bridge_leave,
+ .port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
+ .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
+ .port_vlan_add = mv88e6xxx_port_vlan_add,
+ .port_vlan_del = mv88e6xxx_port_vlan_del,
+ .port_vlan_dump = mv88e6xxx_port_vlan_dump,
+ .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
+};
+
+int mv88e6xxx_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct device_node *np = dev->of_node;
+ struct mv88e6xxx_priv_state *ps;
+ int id, prod_num, rev;
+ struct dsa_switch *ds;
+ u32 eeprom_len;
+ int err;
+
+ ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ps = (struct mv88e6xxx_priv_state *)(ds + 1);
+ ds->priv = ps;
+ ds->dev = dev;
+ ps->dev = dev;
+ ps->ds = ds;
+ ps->bus = mdiodev->bus;
+ ps->sw_addr = mdiodev->addr;
+ mutex_init(&ps->smi_mutex);
+
+ get_device(&ps->bus->dev);
+
+ ds->drv = &mv88e6xxx_switch_driver;
+
+ id = mv88e6xxx_reg_read(ps, REG_PORT(0), PORT_SWITCH_ID);
+ if (id < 0)
+ return id;
+
+ prod_num = (id & 0xfff0) >> 4;
+ rev = id & 0x000f;
+
+ ps->info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
+ ARRAY_SIZE(mv88e6xxx_table));
+ if (!ps->info)
+ return -ENODEV;
+
+ ps->reset = devm_gpiod_get(&mdiodev->dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(ps->reset)) {
+ err = PTR_ERR(ps->reset);
+ if (err == -ENOENT) {
+ /* Optional, so not an error */
+ ps->reset = NULL;
+ } else {
+ return err;
}
}
- return NULL;
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) &&
+ !of_property_read_u32(np, "eeprom-length", &eeprom_len))
+ ps->eeprom_len = eeprom_len;
+
+ dev_set_drvdata(dev, ds);
+
+ dev_info(dev, "switch 0x%x probed: %s, revision %u\n",
+ prod_num, ps->info->name, rev);
+
+ return 0;
}
+static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ put_device(&ps->bus->dev);
+}
+
+static const struct of_device_id mv88e6xxx_of_match[] = {
+ { .compatible = "marvell,mv88e6085" },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
+
+static struct mdio_driver mv88e6xxx_driver = {
+ .probe = mv88e6xxx_probe,
+ .remove = mv88e6xxx_remove,
+ .mdiodrv.driver = {
+ .name = "mv88e6085",
+ .of_match_table = mv88e6xxx_of_match,
+ },
+};
+
static int __init mv88e6xxx_init(void)
{
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
- register_switch_driver(&mv88e6131_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
- register_switch_driver(&mv88e6123_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
- register_switch_driver(&mv88e6352_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
- register_switch_driver(&mv88e6171_switch_driver);
-#endif
- return 0;
+ register_switch_driver(&mv88e6xxx_switch_driver);
+ return mdio_driver_register(&mv88e6xxx_driver);
}
module_init(mv88e6xxx_init);
static void __exit mv88e6xxx_cleanup(void)
{
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
- unregister_switch_driver(&mv88e6171_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
- unregister_switch_driver(&mv88e6352_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
- unregister_switch_driver(&mv88e6123_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
- unregister_switch_driver(&mv88e6131_switch_driver);
-#endif
+ mdio_driver_unregister(&mv88e6xxx_driver);
+ unregister_switch_driver(&mv88e6xxx_switch_driver);
}
module_exit(mv88e6xxx_cleanup);
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 26a424acd..36d0e1504 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -12,6 +12,7 @@
#define __MV88E6XXX_H
#include <linux/if_vlan.h>
+#include <linux/gpio/consumer.h>
#ifndef UINT64_MAX
#define UINT64_MAX (u64)(~((u64)0))
@@ -68,52 +69,23 @@
#define PORT_PCS_CTRL_UNFORCED 0x03
#define PORT_PAUSE_CTRL 0x02
#define PORT_SWITCH_ID 0x03
-#define PORT_SWITCH_ID_PROD_NUM_MASK 0xfff0
-#define PORT_SWITCH_ID_REV_MASK 0x000f
-#define PORT_SWITCH_ID_6031 0x0310
-#define PORT_SWITCH_ID_6035 0x0350
-#define PORT_SWITCH_ID_6046 0x0480
-#define PORT_SWITCH_ID_6061 0x0610
-#define PORT_SWITCH_ID_6065 0x0650
-#define PORT_SWITCH_ID_6085 0x04a0
-#define PORT_SWITCH_ID_6092 0x0970
-#define PORT_SWITCH_ID_6095 0x0950
-#define PORT_SWITCH_ID_6096 0x0980
-#define PORT_SWITCH_ID_6097 0x0990
-#define PORT_SWITCH_ID_6108 0x1070
-#define PORT_SWITCH_ID_6121 0x1040
-#define PORT_SWITCH_ID_6122 0x1050
-#define PORT_SWITCH_ID_6123 0x1210
-#define PORT_SWITCH_ID_6123_A1 0x1212
-#define PORT_SWITCH_ID_6123_A2 0x1213
-#define PORT_SWITCH_ID_6131 0x1060
-#define PORT_SWITCH_ID_6131_B2 0x1066
-#define PORT_SWITCH_ID_6152 0x1a40
-#define PORT_SWITCH_ID_6155 0x1a50
-#define PORT_SWITCH_ID_6161 0x1610
-#define PORT_SWITCH_ID_6161_A1 0x1612
-#define PORT_SWITCH_ID_6161_A2 0x1613
-#define PORT_SWITCH_ID_6165 0x1650
-#define PORT_SWITCH_ID_6165_A1 0x1652
-#define PORT_SWITCH_ID_6165_A2 0x1653
-#define PORT_SWITCH_ID_6171 0x1710
-#define PORT_SWITCH_ID_6172 0x1720
-#define PORT_SWITCH_ID_6175 0x1750
-#define PORT_SWITCH_ID_6176 0x1760
-#define PORT_SWITCH_ID_6182 0x1a60
-#define PORT_SWITCH_ID_6185 0x1a70
-#define PORT_SWITCH_ID_6240 0x2400
-#define PORT_SWITCH_ID_6320 0x1150
-#define PORT_SWITCH_ID_6320_A1 0x1151
-#define PORT_SWITCH_ID_6320_A2 0x1152
-#define PORT_SWITCH_ID_6321 0x3100
-#define PORT_SWITCH_ID_6321_A1 0x3101
-#define PORT_SWITCH_ID_6321_A2 0x3102
-#define PORT_SWITCH_ID_6350 0x3710
-#define PORT_SWITCH_ID_6351 0x3750
-#define PORT_SWITCH_ID_6352 0x3520
-#define PORT_SWITCH_ID_6352_A0 0x3521
-#define PORT_SWITCH_ID_6352_A1 0x3522
+#define PORT_SWITCH_ID_PROD_NUM_6085 0x04a
+#define PORT_SWITCH_ID_PROD_NUM_6095 0x095
+#define PORT_SWITCH_ID_PROD_NUM_6131 0x106
+#define PORT_SWITCH_ID_PROD_NUM_6320 0x115
+#define PORT_SWITCH_ID_PROD_NUM_6123 0x121
+#define PORT_SWITCH_ID_PROD_NUM_6161 0x161
+#define PORT_SWITCH_ID_PROD_NUM_6165 0x165
+#define PORT_SWITCH_ID_PROD_NUM_6171 0x171
+#define PORT_SWITCH_ID_PROD_NUM_6172 0x172
+#define PORT_SWITCH_ID_PROD_NUM_6175 0x175
+#define PORT_SWITCH_ID_PROD_NUM_6176 0x176
+#define PORT_SWITCH_ID_PROD_NUM_6185 0x1a7
+#define PORT_SWITCH_ID_PROD_NUM_6240 0x240
+#define PORT_SWITCH_ID_PROD_NUM_6321 0x310
+#define PORT_SWITCH_ID_PROD_NUM_6352 0x352
+#define PORT_SWITCH_ID_PROD_NUM_6350 0x371
+#define PORT_SWITCH_ID_PROD_NUM_6351 0x375
#define PORT_CONTROL 0x04
#define PORT_CONTROL_USE_CORE_TAG BIT(15)
#define PORT_CONTROL_DROP_ON_LOCK BIT(14)
@@ -367,9 +339,187 @@
#define MV88E6XXX_N_FID 4096
-struct mv88e6xxx_switch_id {
- u16 id;
- char *name;
+/* List of supported models */
+enum mv88e6xxx_model {
+ MV88E6085,
+ MV88E6095,
+ MV88E6123,
+ MV88E6131,
+ MV88E6161,
+ MV88E6165,
+ MV88E6171,
+ MV88E6172,
+ MV88E6175,
+ MV88E6176,
+ MV88E6185,
+ MV88E6240,
+ MV88E6320,
+ MV88E6321,
+ MV88E6350,
+ MV88E6351,
+ MV88E6352,
+};
+
+enum mv88e6xxx_family {
+ MV88E6XXX_FAMILY_NONE,
+ MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */
+ MV88E6XXX_FAMILY_6095, /* 6092 6095 */
+ MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
+ MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
+ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
+ MV88E6XXX_FAMILY_6320, /* 6320 6321 */
+ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
+ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
+};
+
+enum mv88e6xxx_cap {
+ /* Address Translation Unit.
+ * The ATU is used to lookup and learn MAC addresses. See GLOBAL_ATU_OP.
+ */
+ MV88E6XXX_CAP_ATU,
+
+ /* Energy Efficient Ethernet.
+ */
+ MV88E6XXX_CAP_EEE,
+
+ /* EEPROM Command and Data registers.
+ * See GLOBAL2_EEPROM_OP and GLOBAL2_EEPROM_DATA.
+ */
+ MV88E6XXX_CAP_EEPROM,
+
+ /* Port State Filtering for 802.1D Spanning Tree.
+ * See PORT_CONTROL_STATE_* values in the PORT_CONTROL register.
+ */
+ MV88E6XXX_CAP_PORTSTATE,
+
+ /* PHY Polling Unit.
+ * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
+ */
+ MV88E6XXX_CAP_PPU,
+ MV88E6XXX_CAP_PPU_ACTIVE,
+
+ /* SMI PHY Command and Data registers.
+ * This requires an indirect access to PHY registers through
+ * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done.
+ */
+ MV88E6XXX_CAP_SMI_PHY,
+
+ /* Per VLAN Spanning Tree Unit (STU).
+ * The Port State database, if present, is accessed through VTU
+ * operations and dedicated SID registers. See GLOBAL_VTU_SID.
+ */
+ MV88E6XXX_CAP_STU,
+
+ /* Switch MAC/WoL/WoF register.
+ * This requires an indirect access to set the switch MAC address
+ * through GLOBAL2_SWITCH_MAC, otherwise GLOBAL_MAC_01, GLOBAL_MAC_23,
+ * and GLOBAL_MAC_45 are used with a direct access.
+ */
+ MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF,
+
+ /* Internal temperature sensor.
+ * Available from any enabled port's PHY register 26, page 6.
+ */
+ MV88E6XXX_CAP_TEMP,
+ MV88E6XXX_CAP_TEMP_LIMIT,
+
+ /* In-chip Port Based VLANs.
+ * Each port VLANTable register (see PORT_BASE_VLAN) is used to restrict
+ * the output (or egress) ports to which it is allowed to send frames.
+ */
+ MV88E6XXX_CAP_VLANTABLE,
+
+ /* VLAN Table Unit.
+ * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
+ */
+ MV88E6XXX_CAP_VTU,
+};
+
+/* Bitmask of capabilities */
+#define MV88E6XXX_FLAG_ATU BIT(MV88E6XXX_CAP_ATU)
+#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE)
+#define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM)
+#define MV88E6XXX_FLAG_PORTSTATE BIT(MV88E6XXX_CAP_PORTSTATE)
+#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU)
+#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE)
+#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY)
+#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU)
+#define MV88E6XXX_FLAG_SWITCH_MAC BIT(MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF)
+#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP)
+#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT)
+#define MV88E6XXX_FLAG_VLANTABLE BIT(MV88E6XXX_CAP_VLANTABLE)
+#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6095 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6097 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6165 \
+ (MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_SWITCH_MAC | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6185 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6320 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_EEPROM | \
+ MV88E6XXX_FLAG_PORTSTATE | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_SWITCH_MAC | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_TEMP_LIMIT | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6351 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_PORTSTATE | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_SWITCH_MAC | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6352 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_EEPROM | \
+ MV88E6XXX_FLAG_PORTSTATE | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_SWITCH_MAC | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_TEMP_LIMIT | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+struct mv88e6xxx_info {
+ enum mv88e6xxx_family family;
+ u16 prod_num;
+ const char *name;
+ unsigned int num_databases;
+ unsigned int num_ports;
+ unsigned long flags;
};
struct mv88e6xxx_atu_entry {
@@ -393,17 +543,29 @@ struct mv88e6xxx_vtu_stu_entry {
struct mv88e6xxx_priv_port {
struct net_device *bridge_dev;
- u8 state;
};
struct mv88e6xxx_priv_state {
+ const struct mv88e6xxx_info *info;
+
+ /* The dsa_switch this private structure is related to */
+ struct dsa_switch *ds;
+
+ /* The device this structure is associated to */
+ struct device *dev;
+
/* When using multi-chip addressing, this mutex protects
* access to the indirect access registers. (In single-chip
* mode, this mutex is effectively useless.)
*/
struct mutex smi_mutex;
-#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+ /* The MII bus and the address on the bus that is used to
+ * communication with the switch
+ */
+ struct mii_bus *bus;
+ int sw_addr;
+
/* Handles automatic disabling and re-enabling of the PHY
* polling unit.
*/
@@ -411,7 +573,6 @@ struct mv88e6xxx_priv_state {
int ppu_disabled;
struct work_struct ppu_work;
struct timer_list ppu_timer;
-#endif
/* This mutex serialises access to the statistics unit.
* Hold this mutex over snapshot + dump sequences.
@@ -429,14 +590,16 @@ struct mv88e6xxx_priv_state {
*/
struct mutex eeprom_mutex;
- int id; /* switch product id */
- int num_ports; /* number of switch ports */
-
struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS];
- DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS);
+ /* A switch may have a GPIO line tied to its reset pin. Parse
+ * this from the device tree, and use it before performing
+ * switch soft reset.
+ */
+ struct gpio_desc *reset;
- struct work_struct bridge_work;
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
};
enum stat_type {
@@ -452,104 +615,10 @@ struct mv88e6xxx_hw_stat {
enum stat_type type;
};
-int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
-char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
- const struct mv88e6xxx_switch_id *table,
- unsigned int num);
-int mv88e6xxx_setup_ports(struct dsa_switch *ds);
-int mv88e6xxx_setup_common(struct dsa_switch *ds);
-int mv88e6xxx_setup_global(struct dsa_switch *ds);
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
-int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val);
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum);
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
- u16 val);
-void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
-int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
- int regnum, u16 val);
-void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data);
-int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
-int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
-void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev);
-int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
-void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
- struct ethtool_regs *regs, void *_p);
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
-int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
- u16 val);
-int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
- struct phy_device *phydev, struct ethtool_eee *e);
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge);
-void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port);
-int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
-int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering);
-int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
-int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
-int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
-int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj));
-int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans);
-int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans);
-int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb);
-int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj));
-int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
-int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
- int reg, int val);
-
-extern struct dsa_switch_driver mv88e6131_switch_driver;
-extern struct dsa_switch_driver mv88e6123_switch_driver;
-extern struct dsa_switch_driver mv88e6352_switch_driver;
-extern struct dsa_switch_driver mv88e6171_switch_driver;
-
-#define REG_READ(addr, reg) \
- ({ \
- int __ret; \
- \
- __ret = mv88e6xxx_reg_read(ds, addr, reg); \
- if (__ret < 0) \
- return __ret; \
- __ret; \
- })
-
-#define REG_WRITE(addr, reg, val) \
- ({ \
- int __ret; \
- \
- __ret = mv88e6xxx_reg_write(ds, addr, reg, val); \
- if (__ret < 0) \
- return __ret; \
- })
-
-
+static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps,
+ unsigned long flags)
+{
+ return (ps->info->flags & flags) == flags;
+}
#endif
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 7677c745f..91ada52f7 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -699,7 +699,7 @@ el3_tx_timeout (struct net_device *dev)
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 942fb0d5a..b26e038b4 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -992,7 +992,7 @@ static void corkscrew_timeout(struct net_device *dev)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
outw(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index b9948f00c..b88afd759 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -700,7 +700,7 @@ static void el3_tx_timeout(struct net_device *dev)
netdev_notice(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc574_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index c5a320507..71396e4b8 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -534,7 +534,7 @@ static void el3_tx_timeout(struct net_device *dev)
netdev_warn(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc589_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index d81fceddb..25c55ab05 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1944,7 +1944,7 @@ static void vortex_tx_timeout(struct net_device *dev)
}
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
/*
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index ec6eac1f8..4ea717d68 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -1041,7 +1041,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
{
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (output_page == ei_local->tx_start_page)
{
ei_local->tx1 = -1;
@@ -1270,7 +1270,7 @@ static void ei_tx_intr(struct net_device *dev)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx2 = -1,
ei_local->lasttx = 2;
}
@@ -1287,7 +1287,7 @@ static void ei_tx_intr(struct net_device *dev)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
}
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b96e8852b..60f8e2c8e 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -596,7 +596,7 @@ static void ei_tx_intr(struct net_device *dev)
if (ei_local->tx2 > 0) {
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx2 = -1,
ei_local->lasttx = 2;
} else
@@ -609,7 +609,7 @@ static void ei_tx_intr(struct net_device *dev)
if (ei_local->tx1 > 0) {
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
} else
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index e498eb0b9..2f9258e3e 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1128,7 +1128,7 @@ static void tx_timeout(struct net_device *dev)
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 74139cb7f..3d2245fdc 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1430,7 +1430,7 @@ static void bfin_mac_timeout(struct net_device *dev)
bfin_mac_enable(lp->phydev);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
static void bfin_mac_multicast_hash(struct net_device *dev)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index b873531c5..bca07c5c9 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1323,7 +1323,7 @@ static inline int phy_aneg_done(struct phy_device *phydev)
static int greth_mdio_init(struct greth_private *greth)
{
- int ret, phy;
+ int ret;
unsigned long timeout;
greth->mdio = mdiobus_alloc();
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 0907ab6ff..821d86c38 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3349,7 +3349,7 @@ static void et131x_down(struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
/* Save the timestamp for the TX watchdog, prevent a timeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
phy_stop(adapter->phydev);
et131x_disable_txrx(netdev);
@@ -3816,7 +3816,7 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
netif_stop_queue(netdev);
/* Save the timestamp for the TX timeout watchdog */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* TCB is not available */
if (tx_ring->used >= NUM_TCB)
@@ -3851,7 +3851,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
unsigned long flags;
/* If the device is closed, ignore the timeout */
- if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+ if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
return;
/* Any nonrecoverable hardware error?
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 8d50314ac..de2c4bf5f 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -428,7 +428,7 @@ static void emac_timeout(struct net_device *dev)
emac_reset(db);
emac_init_device(dev);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
/* Restore previous register address */
@@ -468,7 +468,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
db->membase + EMAC_TX_CTL0_REG);
/* save the time stamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else if (channel == 1) {
/* set TX len */
writel(skb->len, db->membase + EMAC_TX_PL1_REG);
@@ -477,7 +477,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
db->membase + EMAC_TX_CTL1_REG);
/* save the time stamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
if ((db->tx_fifo_stat & 3) == 3) {
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 66d0b73c3..dcf2a1f36 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -260,7 +260,7 @@ static int lance_reset(struct net_device *dev)
load_csrs(lp);
lance_init_ring(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
#ifdef DEBUG_DRIVER
printk("Lance restart=%d\n", status);
@@ -530,7 +530,7 @@ void lance_tx_timeout(struct net_device *dev)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -543,11 +543,13 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int outs;
unsigned long flags;
- if (!TX_BUFFS_AVAIL)
- return NETDEV_TX_LOCKED;
-
netif_stop_queue(dev);
+ if (!TX_BUFFS_AVAIL) {
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
skblen = skb->len;
#ifdef DEBUG_DRIVER
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 56139184b..a83cd1c4c 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -512,7 +512,7 @@ static inline int lance_reset(struct net_device *dev)
load_csrs(lp);
lance_init_ring(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_start_queue(dev);
status = init_restart_lance(lp);
@@ -547,10 +547,8 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
local_irq_save(flags);
- if (!lance_tx_buffs_avail(lp)) {
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ if (!lance_tx_buffs_avail(lp))
+ goto out_free;
#ifdef DEBUG
/* dump the packet */
@@ -573,6 +571,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ out_free:
dev_kfree_skb(skb);
local_irq_restore(flags);
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index b10964e8c..d2bc8e5dc 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -764,7 +764,7 @@ static void lance_tx_timeout (struct net_device *dev)
/* lance_restart, essentially */
lance_init_ring(dev);
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index d3977d032..20760e102 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev)
* on the current MAC's MII bus
*/
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
- phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+ if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+ phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
if (!aup->phy_search_highest_addr)
/* break out with first one found */
break;
@@ -1074,7 +1074,7 @@ static void au1000_tx_timeout(struct net_device *dev)
netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
au1000_reset_mac(dev);
au1000_init(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1269,7 +1269,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy_irq = pd->phy_irq;
}
- if (aup->phy_busid && aup->phy_busid > 0) {
+ if (aup->phy_busid > 0) {
dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index b584b7823..b799c7ac8 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -877,7 +877,7 @@ static inline int lance_reset(struct net_device *dev)
lance_init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 3a7ebfdda..abb1ba228 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -943,7 +943,7 @@ static void lance_tx_timeout (struct net_device *dev)
#endif
lance_restart (dev, 0x0043, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 1cf33addd..cda53db75 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -782,7 +782,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
if(!p->lock)
if (p->tmdnum || !p->xmit_queued)
netif_wake_queue(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
else
writedatareg(CSR0_STRT | csr0);
@@ -1148,7 +1148,7 @@ static void ni65_timeout(struct net_device *dev)
printk("%02x ",p->tmdhead[i].u.s.status);
printk("\n");
ni65_lance_reinit(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 27245efe9..2807e1816 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -851,7 +851,7 @@ static void mace_tx_timeout(struct net_device *dev)
#else /* #if RESET_ON_TIMEOUT */
pr_cont("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 7ccebae9c..c22bf52d3 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
napi_disable(&lp->napi);
netif_tx_disable(dev);
}
@@ -2426,7 +2426,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
}
pcnet32_restart(dev, CSR0_NORMAL);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 7847638bd..9b56b4025 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -997,7 +997,7 @@ static int lance_reset(struct net_device *dev)
}
lp->init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
index 11be8044e..472c0fb3f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -730,6 +730,6 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
return xgene_cle_setup_ptree(pdata, enet_cle);
}
-struct xgene_cle_ops xgene_cle3in_ops = {
+const struct xgene_cle_ops xgene_cle3in_ops = {
.cle_init = xgene_enet_cle_init,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
index 3bf906832..33c5f6b25 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -292,6 +292,6 @@ struct xgene_enet_cle {
u32 jump_bytes;
};
-extern struct xgene_cle_ops xgene_cle3in_ops;
+extern const struct xgene_cle_ops xgene_cle3in_ops;
#endif /* __XGENE_ENET_CLE_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 513d2a62e..2f5638f7f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -827,7 +827,7 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
return -EINVAL;
phy = get_phy_device(mdio, phy_id, false);
- if (!phy || IS_ERR(phy))
+ if (IS_ERR(phy))
return -EIO;
ret = phy_device_register(phy);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index fd200883d..d208b172f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -973,6 +973,17 @@ static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
return owner;
}
+static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+ u32 cpu_bufnum;
+ int ret;
+
+ ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
+
+ return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
+}
+
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
@@ -981,13 +992,15 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct xgene_enet_desc_ring *buf_pool = NULL;
enum xgene_ring_owner owner;
dma_addr_t dma_exp_bufs;
- u8 cpu_bufnum = pdata->cpu_bufnum;
+ u8 cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
u16 ring_id;
int i, ret, size;
+ cpu_bufnum = xgene_start_cpu_bufnum(pdata);
+
for (i = 0; i < pdata->rxq_cnt; i++) {
/* allocate rx descriptor ring */
owner = xgene_derive_ring_owner(pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 9d9cf4451..092fbecca 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -201,7 +201,7 @@ struct xgene_enet_pdata {
const struct xgene_mac_ops *mac_ops;
const struct xgene_port_ops *port_ops;
struct xgene_ring_ops *ring_ops;
- struct xgene_cle_ops *cle_ops;
+ const struct xgene_cle_ops *cle_ops;
struct delayed_work link_work;
u32 port_id;
u8 cpu_bufnum;
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 16419f550..058460bdd 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus",
+ bus->name = "Synopsys MII Bus";
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d02c4240b..8fc93c5f6 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,10 +96,6 @@ struct alx_priv {
unsigned int rx_ringsz;
unsigned int rxbuf_size;
- struct page *rx_page;
- unsigned int rx_page_offset;
- unsigned int rx_frag_size;
-
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 8611811fb..e708e360a 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,35 +70,6 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
}
}
-static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
-{
- struct sk_buff *skb;
- struct page *page;
-
- if (alx->rx_frag_size > PAGE_SIZE)
- return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
-
- page = alx->rx_page;
- if (!page) {
- alx->rx_page = page = alloc_page(gfp);
- if (unlikely(!page))
- return NULL;
- alx->rx_page_offset = 0;
- }
-
- skb = build_skb(page_address(page) + alx->rx_page_offset,
- alx->rx_frag_size);
- if (likely(skb)) {
- alx->rx_page_offset += alx->rx_frag_size;
- if (alx->rx_page_offset >= PAGE_SIZE)
- alx->rx_page = NULL;
- else
- get_page(page);
- }
- return skb;
-}
-
-
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
struct alx_rx_queue *rxq = &alx->rxq;
@@ -115,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
- skb = alx_alloc_skb(alx, gfp);
+ /*
+ * When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
if (!skb)
break;
+
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
dma = dma_map_single(&alx->hw.pdev->dev,
skb->data, alx->rxbuf_size,
DMA_FROM_DEVICE);
@@ -153,7 +137,6 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
}
-
return count;
}
@@ -622,11 +605,6 @@ static void alx_free_rings(struct alx_priv *alx)
kfree(alx->txq.bufs);
kfree(alx->rxq.bufs);
- if (alx->rx_page) {
- put_page(alx->rx_page);
- alx->rx_page = NULL;
- }
-
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@@ -681,7 +659,6 @@ static int alx_request_irq(struct alx_priv *alx)
alx->dev->name, alx);
if (!err)
goto out;
-
/* fall back to legacy interrupt */
pci_disable_msi(alx->hw.pdev);
}
@@ -725,7 +702,6 @@ static int alx_init_sw(struct alx_priv *alx)
struct pci_dev *pdev = alx->hw.pdev;
struct alx_hw *hw = &alx->hw;
int err;
- unsigned int head_size;
err = alx_identify_hw(alx);
if (err) {
@@ -741,12 +717,7 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
-
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
- head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- alx->rx_frag_size = roundup_pow_of_two(head_size);
-
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
@@ -787,7 +758,7 @@ static netdev_features_t alx_fix_features(struct net_device *netdev,
static void alx_netif_stop(struct alx_priv *alx)
{
- alx->dev->trans_start = jiffies;
+ netif_trans_update(alx->dev);
if (netif_carrier_ok(alx->dev)) {
netif_carrier_off(alx->dev);
netif_tx_disable(alx->dev);
@@ -848,7 +819,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
{
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- unsigned int head_size;
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
@@ -860,9 +830,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
- head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- alx->rx_frag_size = roundup_pow_of_two(head_size);
netdev_update_features(netdev);
if (netif_running(netdev))
alx_reinit(alx);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b9203d928..c46b489ce 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -488,7 +488,7 @@ struct atl1c_tpd_ring {
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
- u16 next_to_use; /* this is protectd by adapter->tx_lock */
+ u16 next_to_use;
atomic_t next_to_clean;
struct atl1c_buffer *buffer_info;
};
@@ -542,7 +542,6 @@ struct atl1c_adapter {
u16 link_duplex;
spinlock_t mdio_lock;
- spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct common_task;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index d0084d4d1..a3200ea6d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -821,7 +821,6 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
atl1c_set_rxbufsize(adapter, adapter->netdev);
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
- spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
return 0;
@@ -2206,7 +2205,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
u16 tpd_req = 1;
struct atl1c_tpd_desc *tpd;
enum atl1c_trans_queue type = atl1c_trans_normal;
@@ -2217,16 +2215,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
}
tpd_req = atl1c_cal_tpd_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
- if (netif_msg_pktdata(adapter))
- dev_info(&adapter->pdev->dev, "tx locked\n");
- return NETDEV_TX_LOCKED;
- }
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -2234,7 +2226,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
/* do TSO and check sum */
if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2257,12 +2248,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
"tx-skb droppted due to dma error\n");
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
} else {
netdev_sent_queue(adapter->netdev, skb->len);
atl1c_tx_queue(adapter, skb, tpd, type);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 0212dac7e..632bb843a 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -442,7 +442,6 @@ struct atl1e_adapter {
u16 link_duplex;
spinlock_t mdio_lock;
- spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct reset_task;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 59a03a193..974713b19 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -648,7 +648,6 @@ static int atl1e_sw_init(struct atl1e_adapter *adapter)
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
- spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
@@ -1866,7 +1865,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
u16 tpd_req = 1;
struct atl1e_tpd_desc *tpd;
@@ -1880,13 +1878,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
- return NETDEV_TX_LOCKED;
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -1910,7 +1905,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
/* do TSO and check sum */
if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1921,10 +1915,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
}
atl1e_tx_queue(adapter, tpd_req, tpd);
-
- netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
out:
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -2285,8 +2276,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features | NETIF_F_LLTX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
/* not enabled by default */
netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
return 0;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 08a23e6b6..1a3555d03 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
if (err) {
netdev_err(dev, "rx buffer allocation failed\n");
dev->stats.rx_dropped++;
+ dev_kfree_skb(skb);
return;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 993c780bd..bfa26a259 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ data[i] = *(unsigned long *)p;
}
}
@@ -831,7 +831,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* re-enable RX interrupts */
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
}
@@ -873,7 +873,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (likely(napi_schedule_prep(&priv->napi))) {
/* disable RX interrupts */
intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
- __napi_schedule(&priv->napi);
+ __napi_schedule_irqoff(&priv->napi);
}
}
@@ -916,7 +916,7 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
if (likely(napi_schedule_prep(&txr->napi))) {
intrl2_1_mask_set(priv, BIT(ring));
- __napi_schedule(&txr->napi);
+ __napi_schedule_irqoff(&txr->napi);
}
}
@@ -1117,7 +1117,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev)
{
netdev_warn(dev, "transmit timeout!\n");
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_errors++;
netif_tx_wake_all_queues(dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 38db2e4d7..25bbae592 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -231,7 +231,7 @@ err_dma:
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
DMA_TO_DEVICE);
- while (i > 0) {
+ while (i-- > 0) {
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
@@ -267,15 +267,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
@@ -1312,7 +1313,8 @@ static int bgmac_open(struct net_device *net_dev)
phy_start(bgmac->phy_dev);
- netif_carrier_on(net_dev);
+ netif_start_queue(net_dev);
+
return 0;
}
@@ -1515,7 +1517,7 @@ static int bgmac_mii_register(struct bgmac *bgmac)
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- bgmac_err(bgmac, "PHY connecton failed\n");
+ bgmac_err(bgmac, "PHY connection failed\n");
err = PTR_ERR(phy_dev);
goto err_unregister_bus;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5f2cd0eb9..9089404bb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12889,52 +12889,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
return rc;
}
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
- if (!bp->vlan_cnt) {
- DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
- return 0;
- }
-
+ /* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
- /* Prepare for cleanup in case of errors */
- if (rc) {
- vlan->hw = false;
- continue;
- }
-
- if (!vlan->hw)
+ if (vlan->hw)
continue;
- DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+ if (bp->vlan_cnt >= bp->vlan_credit)
+ return -ENOBUFS;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
- BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
- vlan->hw = false;
- rc = -EINVAL;
- continue;
+ BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+ return rc;
}
+
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+ vlan->hw = true;
+ bp->vlan_cnt++;
}
- return rc;
+ return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+ bool need_accept_any_vlan;
+
+ need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+ if (bp->accept_any_vlan != need_accept_any_vlan) {
+ bp->accept_any_vlan = need_accept_any_vlan;
+ DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+ bp->accept_any_vlan ? "raised" : "cleared");
+ if (set_rx_mode) {
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+ }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* The hw forgot all entries after reload */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+ return 0;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
- bool hw = false;
- int rc = 0;
-
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
@@ -12942,93 +12961,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
if (!vlan)
return -ENOMEM;
- bp->vlan_cnt++;
- if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
- DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
- bp->accept_any_vlan = true;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- } else if (bp->vlan_cnt <= bp->vlan_credit) {
- rc = __bnx2x_vlan_configure_vid(bp, vid, true);
- hw = true;
- }
-
vlan->vid = vid;
- vlan->hw = hw;
+ vlan->hw = false;
+ list_add_tail(&vlan->link, &bp->vlan_reg);
- if (!rc) {
- list_add(&vlan->link, &bp->vlan_reg);
- } else {
- bp->vlan_cnt--;
- kfree(vlan);
- }
-
- DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
- return rc;
+ return 0;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
+ bool found = false;
int rc = 0;
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
-
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
- if (!bp->vlan_cnt) {
- BNX2X_ERR("Unable to kill VLAN %d\n", vid);
- return -EINVAL;
- }
-
list_for_each_entry(vlan, &bp->vlan_reg, link)
- if (vlan->vid == vid)
+ if (vlan->vid == vid) {
+ found = true;
break;
+ }
- if (vlan->vid != vid) {
+ if (!found) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
- if (vlan->hw)
+ if (netif_running(dev) && vlan->hw) {
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+ DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+ bp->vlan_cnt--;
+ }
list_del(&vlan->link);
kfree(vlan);
- bp->vlan_cnt--;
-
- if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
- /* Configure all non-configured entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link) {
- if (vlan->hw)
- continue;
-
- rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
- if (rc) {
- BNX2X_ERR("Unable to config VLAN %d\n",
- vlan->vid);
- continue;
- }
- DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
- vlan->vid);
- vlan->hw = true;
- }
- DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
- bp->accept_any_vlan = false;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- }
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
@@ -13253,12 +13226,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ NETIF_F_GSO_IPXIP4;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
- NETIF_F_GSO_IPIP |
- NETIF_F_GSO_SIT |
+ NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}
@@ -13878,14 +13850,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
} else {
doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
if (doorbell_size > pci_resource_len(pdev, 2)) {
dev_err(&bp->pdev->dev,
"Cannot map doorbells, bar size too small, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
doorbell_size);
@@ -13894,19 +13866,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
if (IS_VF(bp)) {
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
}
/* Enable SRIOV if capability found in configuration space */
rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13925,7 +13897,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = bnx2x_set_int_mode(bp);
if (rc) {
dev_err(&pdev->dev, "Cannot set interrupts\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("set interrupts successfully\n");
@@ -13933,7 +13905,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
@@ -13966,6 +13938,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
+init_one_freemem:
+ bnx2x_free_mem_bp(bp);
+
init_one_exit:
bnx2x_disable_pcie_error_reporting(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c39a7f5c6..c777cde85 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -78,6 +78,7 @@ enum board_idx {
BCM57402,
BCM57404,
BCM57406,
+ BCM57314,
BCM57304_VF,
BCM57404_VF,
};
@@ -92,6 +93,7 @@ static const struct {
{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
};
@@ -103,6 +105,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+ { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
@@ -118,6 +121,13 @@ static const u16 bnxt_vf_req_snif[] = {
HWRM_CFA_L2_FILTER_ALLOC,
};
+static const u16 bnxt_async_events_arr[] = {
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == BCM57304_VF || idx == BCM57404_VF);
@@ -276,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
+ tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
+ wmb(); /* Sync is_push and byte queue before pushing data */
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
@@ -288,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len);
}
- tx_buf->is_push = 1;
goto tx_done;
}
@@ -1102,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
- netdev_features_t features = skb->dev->features;
+ if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD)) {
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- tpa_info->metadata &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
- }
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1267,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb->protocol = eth_type_trans(skb, dev);
- if (rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
- netdev_features_t features = skb->dev->features;
+ if ((rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD))
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- meta_data &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1313,6 +1313,10 @@ next_rx_no_prod:
return rc;
}
+#define BNXT_GET_EVENT_PORT(data) \
+ ((data) & \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
@@ -1320,12 +1324,40 @@ static int bnxt_async_event_process(struct bnxt *bp,
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ goto async_event_process_exit;
+ if (data1 & 0x20000) {
+ u16 fw_speed = link_info->force_link_speed;
+ u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
+
+ netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+ speed);
+ }
+ /* fall thru */
+ }
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ u16 port_id = BNXT_GET_EVENT_PORT(data1);
+
+ if (BNXT_VF(bp))
+ break;
+
+ if (bp->pf.port_id != port_id)
+ break;
+
+ set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
+ break;
+ }
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
@@ -1452,7 +1484,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
/* The valid test of the entry must be done first before
* reading any further.
*/
- rmb();
+ dma_rmb();
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
@@ -2729,7 +2761,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int timeout, bool silent)
{
- int i, intr_process, rc;
+ int i, intr_process, rc, tmo_count;
struct input *req = msg;
u32 *data = msg;
__le32 *resp_len, *valid;
@@ -2758,11 +2790,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
timeout = DFLT_HWRM_CMD_TIMEOUT;
i = 0;
+ tmo_count = timeout * 40;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
- i++ < timeout) {
- usleep_range(600, 800);
+ i++ < tmo_count) {
+ usleep_range(25, 40);
}
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
@@ -2773,30 +2806,30 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
} else {
/* Check if response len is updated */
resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < tmo_count; i++) {
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
break;
- usleep_range(600, 800);
+ usleep_range(25, 40);
}
- if (i >= timeout) {
+ if (i >= tmo_count) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
timeout, le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), *resp_len);
+ le16_to_cpu(req->seq_id), len);
return -1;
}
/* Last word of resp contains valid bit */
valid = bp->hwrm_cmd_resp_addr + len - 4;
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < 5; i++) {
if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
break;
- usleep_range(600, 800);
+ udelay(1);
}
- if (i >= timeout) {
+ if (i >= 5) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
timeout, le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), len, *valid);
@@ -2842,6 +2875,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_input req = {0};
int i;
+ DECLARE_BITMAP(async_events_bmap, 256);
+ u32 *events = (u32 *)async_events_bmap;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -2850,11 +2885,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
FUNC_DRV_RGTR_REQ_ENABLES_VER |
FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
- /* TODO: current async event fwd bits are not defined and the firmware
- * only checks if it is non-zero to enable async event forwarding
- */
- req.async_event_fwd[0] |= cpu_to_le32(1);
- req.os_type = cpu_to_le16(1);
+ memset(async_events_bmap, 0, sizeof(async_events_bmap));
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+
+ for (i = 0; i < 8; i++)
+ req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+
+ req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
req.ver_maj = DRV_VER_MAJ;
req.ver_min = DRV_VER_MIN;
req.ver_upd = DRV_VER_UPD;
@@ -3817,7 +3855,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
- memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3842,7 +3880,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
- memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
if (is_valid_ether_addr(vf->mac_addr))
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
@@ -3933,6 +3971,8 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+ bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
+ resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
if (resp->hwrm_intf_maj < 1) {
netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
@@ -4589,12 +4629,49 @@ static void bnxt_report_link(struct bnxt *bp)
speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
+ if (bp->flags & BNXT_FLAG_EEE_CAP)
+ netdev_info(bp->dev, "EEE is %s\n",
+ bp->eee.eee_active ? "active" :
+ "not active");
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
}
}
+static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_port_phy_qcaps_input req = {0};
+ struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (bp->hwrm_spec_code < 0x10201)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_phy_qcaps_exit;
+
+ if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
+ struct ethtool_eee *eee = &bp->eee;
+ u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
+
+ bp->flags |= BNXT_FLAG_EEE_CAP;
+ eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
+ PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
+ bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
+ PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
+ }
+
+hwrm_phy_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
{
int rc = 0;
@@ -4626,7 +4703,6 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
else
link_info->link_speed = 0;
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
- link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
link_info->lp_auto_link_speeds =
@@ -4636,9 +4712,47 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
link_info->media_type = resp->media_type;
- link_info->transceiver = resp->transceiver_type;
- link_info->phy_addr = resp->phy_addr;
+ link_info->phy_type = resp->phy_type;
+ link_info->transceiver = resp->xcvr_pkg_type;
+ link_info->phy_addr = resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
+ link_info->module_status = resp->module_status;
+
+ if (bp->flags & BNXT_FLAG_EEE_CAP) {
+ struct ethtool_eee *eee = &bp->eee;
+ u16 fw_speeds;
+
+ eee->eee_active = 0;
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
+ eee->eee_active = 1;
+ fw_speeds = le16_to_cpu(
+ resp->link_partner_adv_eee_link_speed_mask);
+ eee->lp_advertised =
+ _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ }
+
+ /* Pull initial EEE config */
+ if (!chng_link_state) {
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
+ eee->eee_enabled = 1;
+ fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
+ eee->advertised =
+ _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
+ __le32 tmr;
+
+ eee->tx_lpi_enabled = 1;
+ tmr = resp->xcvr_identifier_type_tx_lpi_timer;
+ eee->tx_lpi_timer = le32_to_cpu(tmr) &
+ PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
+ }
+ }
+ }
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -4655,10 +4769,40 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
return 0;
}
+static void bnxt_get_port_module_status(struct bnxt *bp)
+{
+ struct bnxt_link_info *link_info = &bp->link_info;
+ struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
+ u8 module_status;
+
+ if (bnxt_update_link(bp, true))
+ return;
+
+ module_status = link_info->module_status;
+ switch (module_status) {
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
+ netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
+ bp->pf.port_id);
+ if (bp->hwrm_spec_code >= 0x10201) {
+ netdev_warn(bp->dev, "Module part number %s\n",
+ resp->phy_vendor_partnumber);
+ }
+ if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
+ netdev_warn(bp->dev, "TX is disabled\n");
+ if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
+ netdev_warn(bp->dev, "SFP+ module is shutdown\n");
+ }
+}
+
static void
bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
{
if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+ if (bp->hwrm_spec_code >= 0x10201)
+ req->auto_pause =
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
@@ -4672,6 +4816,11 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
req->enables |=
cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+ if (bp->hwrm_spec_code >= 0x10201) {
+ req->auto_pause = req->force_pause;
+ req->enables |= cpu_to_le32(
+ PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+ }
}
}
@@ -4684,7 +4833,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
if (autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |=
- PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+ PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
req->enables |= cpu_to_le32(
PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
@@ -4698,9 +4847,6 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
}
- /* currently don't support half duplex */
- req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
- req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
/* tell chimp that the setting takes effect immediately */
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
}
@@ -4735,7 +4881,30 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
return rc;
}
-int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+static void bnxt_hwrm_set_eee(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ struct ethtool_eee *eee = &bp->eee;
+
+ if (eee->eee_enabled) {
+ u16 eee_speeds;
+ u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
+
+ if (eee->tx_lpi_enabled)
+ flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
+ else
+ flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
+
+ req->flags |= cpu_to_le32(flags);
+ eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
+ req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
+ req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
+ } else {
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
+ }
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
{
struct hwrm_port_phy_cfg_input req = {0};
@@ -4744,14 +4913,57 @@ int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
bnxt_hwrm_set_pause_common(bp, &req);
bnxt_hwrm_set_link_common(bp, &req);
+
+ if (set_eee)
+ bnxt_hwrm_set_eee(bp, &req);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ if (pci_num_vf(bp->pdev))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static bool bnxt_eee_config_ok(struct bnxt *bp)
+{
+ struct ethtool_eee *eee = &bp->eee;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return true;
+
+ if (eee->eee_enabled) {
+ u32 advertising =
+ _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ eee->eee_enabled = 0;
+ return false;
+ }
+ if (eee->advertised & ~advertising) {
+ eee->advertised = advertising & eee->supported;
+ return false;
+ }
+ }
+ return true;
+}
+
static int bnxt_update_phy_setting(struct bnxt *bp)
{
int rc;
bool update_link = false;
bool update_pause = false;
+ bool update_eee = false;
struct bnxt_link_info *link_info = &bp->link_info;
rc = bnxt_update_link(bp, true);
@@ -4761,7 +4973,8 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
- link_info->auto_pause_setting != link_info->req_flow_ctrl)
+ (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
+ link_info->req_flow_ctrl)
update_pause = true;
if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
link_info->force_pause_setting != link_info->req_flow_ctrl)
@@ -4780,8 +4993,11 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
update_link = true;
}
+ if (!bnxt_eee_config_ok(bp))
+ update_eee = true;
+
if (update_link)
- rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+ rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
else if (update_pause)
rc = bnxt_hwrm_set_pause(bp);
if (rc) {
@@ -4872,7 +5088,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* Enable TX queues */
bnxt_tx_enable(bp);
mod_timer(&bp->timer, jiffies + bp->current_interval);
- bnxt_update_link(bp, true);
+ /* Poll link status and check for SFP+ module status */
+ bnxt_get_port_module_status(bp);
return 0;
@@ -4972,6 +5189,7 @@ static int bnxt_close(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
bnxt_close_nic(bp, true, true);
+ bnxt_hwrm_shutdown_link(bp);
return 0;
}
@@ -5238,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (!bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+
+ /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ * turned on or off together.
+ */
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ else
+ features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ }
+
return features;
}
@@ -5453,6 +5685,9 @@ static void bnxt_sp_task(struct work_struct *work)
rtnl_unlock();
}
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
+ bnxt_get_port_module_status(bp);
+
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
bnxt_hwrm_port_qstats(bp);
@@ -5583,10 +5818,9 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
-#ifdef CONFIG_BNXT_SRIOV
- if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
- return -EADDRNOTAVAIL;
-#endif
+ rc = bnxt_approve_mac(bp, addr->sa_data);
+ if (rc)
+ return rc;
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
return 0;
@@ -5921,6 +6155,13 @@ static int bnxt_probe_phy(struct bnxt *bp)
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
+ rc = bnxt_hwrm_phy_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -5930,15 +6171,24 @@ static int bnxt_probe_phy(struct bnxt *bp)
/*initialize the ethool setting copy with NVM settings */
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
- link_info->autoneg = BNXT_AUTONEG_SPEED |
- BNXT_AUTONEG_FLOW_CTRL;
+ link_info->autoneg = BNXT_AUTONEG_SPEED;
+ if (bp->hwrm_spec_code >= 0x10201) {
+ if (link_info->auto_pause_setting &
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ } else {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ }
link_info->advertising = link_info->auto_link_speeds;
- link_info->req_flow_ctrl = link_info->auto_pause_setting;
} else {
link_info->req_link_speed = link_info->force_link_speed;
link_info->req_duplex = link_info->duplex_setting;
- link_info->req_flow_ctrl = link_info->force_pause_setting;
}
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->req_flow_ctrl =
+ link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
+ else
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
return rc;
}
@@ -6013,6 +6263,22 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc;
}
+static void bnxt_parse_log_pcie_link(struct bnxt *bp)
+{
+ enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+ enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+
+ if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+ netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
+ else
+ netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+ speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+ "Unknown", width);
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -6049,15 +6315,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
- NETIF_F_RXHASH |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
@@ -6128,6 +6398,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
+ bnxt_parse_log_pcie_link(bp);
+
return 0;
init_err:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index de9d53eee..2824d65b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,7 +11,7 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.0.0"
+#define DRV_MODULE_VERSION "1.2.0"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 0
@@ -425,10 +425,17 @@ struct rx_tpa_end_cmp_ext {
#define MAX_TPA 64
+#if (BNXT_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES 1
+#define MAX_RX_AGG_PAGES 4
+#define MAX_TX_PAGES 1
+#define MAX_CP_PAGES 8
+#else
#define MAX_RX_PAGES 8
#define MAX_RX_AGG_PAGES 32
#define MAX_TX_PAGES 8
#define MAX_CP_PAGES 64
+#endif
#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
@@ -774,6 +781,7 @@ struct bnxt_ntuple_filter {
};
struct bnxt_link_info {
+ u8 phy_type;
u8 media_type;
u8 transceiver;
u8 phy_addr;
@@ -803,7 +811,7 @@ struct bnxt_link_info {
#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
-#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
#define PHY_VER_LEN 3
u8 phy_ver[PHY_VER_LEN];
u16 link_speed;
@@ -828,9 +836,9 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
u16 lp_auto_link_speeds;
- u16 auto_link_speed;
u16 force_link_speed;
u32 preemphasis;
+ u8 module_status;
/* copy of requested setting from ethtool cmd */
u8 autoneg;
@@ -841,6 +849,7 @@ struct bnxt_link_info {
u16 req_link_speed;
u32 advertising;
bool force_link_chng;
+
/* a copy of phy_qcfg output used to report link
* info to VF
*/
@@ -890,6 +899,7 @@ struct bnxt {
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
+ #define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -955,6 +965,7 @@ struct bnxt {
u32 msg_enable;
+ u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
void *hwrm_cmd_resp_addr;
@@ -1006,6 +1017,7 @@ struct bnxt {
#define BNXT_RST_RING_SP_EVENT 7
#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
#define BNXT_PERIODIC_STATS_SP_EVENT 9
+#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
@@ -1026,6 +1038,9 @@ struct bnxt {
int ntp_fltr_count;
struct bnxt_link_info link_info;
+ struct ethtool_eee eee;
+ u32 lpi_tmr_lo;
+ u32 lpi_tmr_hi;
};
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1115,6 +1130,16 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
#endif
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+#define SFF_MODULE_ID_SFP 0x3
+#define SFF_MODULE_ID_QSFP 0xc
+#define SFF_MODULE_ID_QSFP_PLUS 0xd
+#define SFF_MODULE_ID_QSFP28 0x11
+#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+
void bnxt_set_ring_params(struct bnxt *);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
@@ -1123,7 +1148,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
-int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 2e472f6db..1b0ae4a72 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -327,7 +327,11 @@ static void bnxt_get_channels(struct net_device *dev,
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
channel->max_combined = max_rx_rings;
- bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false);
+ if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
+ max_rx_rings = 0;
+ max_tx_rings = 0;
+ }
+
tcs = netdev_get_num_tc(dev);
if (tcs > 1)
max_tx_rings /= tcs;
@@ -597,7 +601,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
kfree(pkglog);
}
-static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
{
u32 speed_mask = 0;
@@ -698,10 +702,23 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (link_info->phy_link_status == BNXT_LINK_LINK)
cmd->lp_advertising =
bnxt_fw_to_ethtool_lp_adv(link_info);
+ ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ if (!netif_carrier_ok(dev))
+ cmd->duplex = DUPLEX_UNKNOWN;
+ else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
+ else
+ cmd->duplex = DUPLEX_HALF;
} else {
cmd->autoneg = AUTONEG_DISABLE;
cmd->advertising = 0;
+ ethtool_speed =
+ bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
+ cmd->duplex = DUPLEX_HALF;
+ if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
}
+ ethtool_cmd_speed_set(cmd, ethtool_speed);
cmd->port = PORT_NONE;
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
@@ -719,16 +736,8 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->port = PORT_FIBRE;
}
- if (link_info->phy_link_status == BNXT_LINK_LINK) {
- if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
- } else {
- cmd->duplex = DUPLEX_UNKNOWN;
- }
- ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
- ethtool_cmd_speed_set(cmd, ethtool_speed);
if (link_info->transceiver ==
- PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+ PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
cmd->transceiver = XCVR_INTERNAL;
else
cmd->transceiver = XCVR_EXTERNAL;
@@ -739,31 +748,52 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 support_spds = link_info->support_speeds;
+ u32 fw_speed = 0;
+
switch (ethtool_speed) {
case SPEED_100:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ break;
case SPEED_1000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ break;
case SPEED_2500:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ break;
case SPEED_10000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ break;
case SPEED_20000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ break;
case SPEED_25000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ break;
case SPEED_40000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ break;
case SPEED_50000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ break;
default:
netdev_err(dev, "unsupported speed!\n");
break;
}
- return 0;
+ return fw_speed;
}
-static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
{
u16 fw_speed_mask = 0;
@@ -823,6 +853,16 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*/
set_pause = true;
} else {
+ u16 fw_speed;
+ u8 phy_type = link_info->phy_type;
+
+ if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
+ phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
+ link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ netdev_err(dev, "10GBase-T devices must autoneg\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
/* TODO: currently don't support half duplex */
if (cmd->duplex == DUPLEX_HALF) {
netdev_err(dev, "HALF DUPLEX is not supported!\n");
@@ -833,14 +873,19 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->duplex == DUPLEX_UNKNOWN)
cmd->duplex = DUPLEX_FULL;
speed = ethtool_cmd_speed(cmd);
- link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+ fw_speed = bnxt_get_fw_speed(dev, speed);
+ if (!fw_speed) {
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ link_info->req_link_speed = fw_speed;
link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
link_info->autoneg = 0;
link_info->advertising = 0;
}
if (netif_running(dev))
- rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+ rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
set_setting_exit:
return rc;
@@ -874,7 +919,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
return -EINVAL;
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+ if (bp->hwrm_spec_code >= 0x10201)
+ link_info->req_flow_ctrl =
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -882,17 +929,13 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
link_info->force_link_chng = true;
link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+ link_info->req_flow_ctrl = 0;
}
if (epause->rx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
- else
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
- else
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
if (netif_running(dev))
rc = bnxt_hwrm_set_pause(bp);
@@ -1381,6 +1424,199 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ethtool_eee *eee = &bp->eee;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u32 advertising =
+ _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ int rc = 0;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return -EOPNOTSUPP;
+
+ if (!edata->eee_enabled)
+ goto eee_ok;
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ netdev_warn(dev, "EEE requires autoneg\n");
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled) {
+ if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
+ edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
+ netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
+ bp->lpi_tmr_lo, bp->lpi_tmr_hi);
+ return -EINVAL;
+ } else if (!bp->lpi_tmr_hi) {
+ edata->tx_lpi_timer = eee->tx_lpi_timer;
+ }
+ }
+ if (!edata->advertised) {
+ edata->advertised = advertising & eee->supported;
+ } else if (edata->advertised & ~advertising) {
+ netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
+ edata->advertised, advertising);
+ return -EINVAL;
+ }
+
+ eee->advertised = edata->advertised;
+ eee->tx_lpi_enabled = edata->tx_lpi_enabled;
+ eee->tx_lpi_timer = edata->tx_lpi_timer;
+eee_ok:
+ eee->eee_enabled = edata->eee_enabled;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, false, true);
+
+ return rc;
+}
+
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return -EOPNOTSUPP;
+
+ *edata = bp->eee;
+ if (!bp->eee.eee_enabled) {
+ /* Preserve tx_lpi_timer so that the last value will be used
+ * by default when it is re-enabled.
+ */
+ edata->advertised = 0;
+ edata->tx_lpi_enabled = 0;
+ }
+
+ if (!bp->eee.eee_active)
+ edata->lp_advertised = 0;
+
+ return 0;
+}
+
+static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
+ u16 page_number, u16 start_addr,
+ u16 data_length, u8 *buf)
+{
+ struct hwrm_port_phy_i2c_read_input req = {0};
+ struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+ int rc, byte_offset = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+ req.i2c_slave_addr = i2c_addr;
+ req.page_number = cpu_to_le16(page_number);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ do {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
+ data_length -= xfer_size;
+ req.page_offset = cpu_to_le16(start_addr + byte_offset);
+ req.data_length = xfer_size;
+ req.enables = cpu_to_le32(start_addr + byte_offset ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (!rc)
+ memcpy(buf + byte_offset, output->data, xfer_size);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ byte_offset += xfer_size;
+ } while (!rc && data_length > 0);
+
+ return rc;
+}
+
+static int bnxt_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_port_phy_i2c_read_input req = {0};
+ struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* No point in going further if phy status indicates
+ * module is not inserted or if it is powered down or
+ * if it is of type 10GBase-T
+ */
+ if (bp->link_info.module_status >
+ PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+ return -EOPNOTSUPP;
+
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code < 0x10202)
+ return -EOPNOTSUPP;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+ req.i2c_slave_addr = I2C_DEV_ADDR_A0;
+ req.page_number = 0;
+ req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
+ req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ u32 module_id = le32_to_cpu(output->data[0]);
+
+ switch (module_id) {
+ case SFF_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF_MODULE_ID_QSFP:
+ case SFF_MODULE_ID_QSFP_PLUS:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case SFF_MODULE_ID_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u16 start = eeprom->offset, length = eeprom->len;
+ int rc = 0;
+
+ memset(data, 0, eeprom->len);
+
+ /* Read A0 portion of the EEPROM */
+ if (start < ETH_MODULE_SFF_8436_LEN) {
+ if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
+ length = ETH_MODULE_SFF_8436_LEN - start;
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+ start, length, data);
+ if (rc)
+ return rc;
+ start += length;
+ data += length;
+ length = eeprom->len - length;
+ }
+
+ /* Read A2 portion of the EEPROM */
+ if (length) {
+ start -= ETH_MODULE_SFF_8436_LEN;
+ bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
+ length, data);
+ }
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_settings = bnxt_get_settings,
.set_settings = bnxt_set_settings,
@@ -1409,4 +1645,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eeprom = bnxt_get_eeprom,
.set_eeprom = bnxt_set_eeprom,
.get_link = bnxt_get_link,
+ .get_eee = bnxt_get_eee,
+ .set_eee = bnxt_set_eee,
+ .get_module_info = bnxt_get_module_info,
+ .get_module_eeprom = bnxt_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 98fa81e08..3abc03b60 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,6 +12,8 @@
extern const struct ethtool_ops bnxt_ethtool_ops;
+u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
+u16 bnxt_get_fw_auto_link_speeds(u32);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index e0aac65c6..461675caa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 4badbedcb..05e3c49a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -104,6 +104,7 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
@@ -111,6 +112,7 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
__le32 event_data2;
u8 opaque_v;
@@ -141,6 +143,7 @@ struct hwrm_async_event_cmpl_link_status_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
@@ -195,6 +198,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
@@ -237,6 +243,55 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
@@ -363,6 +418,47 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
};
+/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
+struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
@@ -377,6 +473,7 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
@@ -387,12 +484,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.0.0 */
+/* HW Resource Manager Specification 1.2.2 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 0
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_MINOR 2
+#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_STR "1.0.0"
+#define HWRM_VERSION_STR "1.2.2"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -444,7 +541,7 @@ struct cmd_nums {
#define HWRM_FUNC_BUF_RGTR (0x1fUL)
#define HWRM_PORT_PHY_CFG (0x20UL)
#define HWRM_PORT_MAC_CFG (0x21UL)
- #define RESERVED2 (0x22UL)
+ #define HWRM_PORT_TS_QUERY (0x22UL)
#define HWRM_PORT_QSTATS (0x23UL)
#define HWRM_PORT_LPBK_QSTATS (0x24UL)
#define HWRM_PORT_CLR_STATS (0x25UL)
@@ -452,6 +549,9 @@ struct cmd_nums {
#define HWRM_PORT_PHY_QCFG (0x27UL)
#define HWRM_PORT_MAC_QCFG (0x28UL)
#define HWRM_PORT_BLINK_LED (0x29UL)
+ #define HWRM_PORT_PHY_QCAPS (0x2aUL)
+ #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
+ #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
@@ -531,6 +631,7 @@ struct cmd_nums {
__le16 unused_0[3];
};
+/* Return Codes (8 bytes) */
struct ret_codes {
__le16 error_code;
#define HWRM_ERR_CODE_SUCCESS (0x0UL)
@@ -875,10 +976,11 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
#define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
__le16 mtu;
__le16 guest_vlan;
__le16 async_event_cr;
- __le16 unused_0[3];
+ u8 dflt_mac_addr[6];
};
/* Output (16 bytes) */
@@ -917,7 +1019,8 @@ struct hwrm_func_qcaps_output {
__le32 flags;
#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- u8 perm_mac_address[6];
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
__le16 max_tx_rings;
@@ -942,6 +1045,67 @@ struct hwrm_func_qcaps_output {
u8 valid;
};
+/* hwrm_func_qcfg */
+/* Input (24 bytes) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (72 bytes) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ u8 unused_0;
+ u8 unused_1;
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
+ u8 unused_2;
+ __le16 dflt_vnic_id;
+ u8 unused_3;
+ u8 unused_4;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
+ u8 unused_5;
+ __le16 unused_6;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ u8 unused_7;
+ u8 unused_8;
+ u8 unused_9;
+ u8 valid;
+};
+
/* hwrm_func_cfg */
/* Input (88 bytes) */
struct hwrm_func_cfg_input {
@@ -1171,6 +1335,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
@@ -1302,6 +1467,7 @@ struct hwrm_func_drv_qver_output {
#define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
@@ -1317,7 +1483,7 @@ struct hwrm_func_drv_qver_output {
};
/* hwrm_port_phy_cfg */
-/* Input (48 bytes) */
+/* Input (56 bytes) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1329,6 +1495,10 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
#define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -1339,6 +1509,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
@@ -1350,12 +1522,14 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
u8 auto_mode;
#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 auto_duplex;
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
@@ -1363,6 +1537,7 @@ struct hwrm_port_phy_cfg_input {
u8 auto_pause;
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
u8 unused_0;
__le16 auto_link_speed;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
@@ -1374,6 +1549,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
__le16 auto_link_speed_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1386,6 +1563,9 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
#define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
@@ -1398,7 +1578,20 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
u8 unused_1;
__le32 preemphasis;
- __le32 unused_2;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ u8 unused_2;
+ u8 unused_3;
+ __le32 tx_lpi_timer;
+ __le32 unused_4;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
};
/* Output (16 bytes) */
@@ -1426,7 +1619,7 @@ struct hwrm_port_phy_qcfg_input {
__le16 unused_0[3];
};
-/* Output (48 bytes) */
+/* Output (96 bytes) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1447,6 +1640,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0)
u8 duplex;
#define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
@@ -1465,6 +1660,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
@@ -1475,15 +1673,18 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
u8 auto_mode;
#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 auto_pause;
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
__le16 auto_link_speed;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
@@ -1494,6 +1695,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
__le16 auto_link_speed_mask;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1506,6 +1709,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
@@ -1516,31 +1722,49 @@ struct hwrm_port_phy_qcfg_output {
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 reserved1;
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0)
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
u8 phy_bld;
u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0)
u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
- u8 transceiver_type;
- #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
- u8 phy_addr;
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ u8 eee_config_phy_addr;
#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- u8 unused_2;
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
+ #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
__le16 link_partner_adv_speeds;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
@@ -1553,15 +1777,48 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
u8 link_partner_adv_auto_mode;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 link_partner_adv_pause;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ __le32 unused_1;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le32 unused_2;
u8 unused_3;
u8 unused_4;
u8 unused_5;
@@ -1569,7 +1826,7 @@ struct hwrm_port_phy_qcfg_output {
};
/* hwrm_port_mac_cfg */
-/* Input (32 bytes) */
+/* Input (40 bytes) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1581,6 +1838,10 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -1588,6 +1849,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -1598,6 +1861,9 @@ struct hwrm_port_mac_cfg_input {
u8 lcos_map_pri;
u8 tunnel_pri2cos_map_pri;
u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ __le32 unused_0;
};
/* Output (16 bytes) */
@@ -1754,7 +2020,113 @@ struct hwrm_port_blink_led_output {
u8 valid;
};
-/* hwrm_queue_qportcfg */
+/* hwrm_port_phy_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 eee_supported;
+ #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL
+ #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1
+ u8 unused_0;
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+};
+
+/* hwrm_port_phy_i2c_read */
+/* Input (40 bytes) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 unused_0;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* Output (80 bytes) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
__le16 req_type;
@@ -1766,6 +2138,7 @@ struct hwrm_queue_qportcfg_input {
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
__le16 unused_0;
};
@@ -1838,6 +2211,7 @@ struct hwrm_queue_cfg_input {
#define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX
__le32 enables;
#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
@@ -1875,6 +2249,7 @@ struct hwrm_queue_buffers_cfg_input {
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
__le32 enables;
#define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
#define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
@@ -1952,6 +2327,7 @@ struct hwrm_queue_pri2cos_cfg_input {
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
__le32 enables;
u8 port_id;
@@ -2158,6 +2534,8 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2622,6 +3000,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
@@ -2747,6 +3126,7 @@ struct hwrm_cfa_l2_filter_cfg_input {
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
@@ -3337,6 +3717,41 @@ struct hwrm_fw_reset_output {
u8 valid;
};
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 43ef392c8..40a7b0e09 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 0c5f51049..363884dd9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -771,12 +771,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
phy_qcfg_resp.link =
PORT_PHY_QCFG_RESP_LINK_LINK;
- if (phy_qcfg_resp.auto_link_speed)
- phy_qcfg_resp.link_speed =
- phy_qcfg_resp.auto_link_speed;
- else
- phy_qcfg_resp.link_speed =
- phy_qcfg_resp.force_link_speed;
+ phy_qcfg_resp.link_speed = cpu_to_le16(
+ PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
phy_qcfg_resp.duplex =
PORT_PHY_QCFG_RESP_DUPLEX_FULL;
phy_qcfg_resp.pause =
@@ -859,8 +855,8 @@ void bnxt_update_vf_mac(struct bnxt *bp)
* default but the stored zero MAC will allow the VF user to change
* the random MAC address using ndo_set_mac_address() if he wants.
*/
- if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
- memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
+ memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
@@ -869,6 +865,31 @@ update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc = 0;
+
+ if (!BNXT_VF(bp))
+ return 0;
+
+ if (bp->hwrm_spec_code < 0x10202) {
+ if (is_valid_ether_addr(bp->vf.mac_addr))
+ rc = -EADDRNOTAVAIL;
+ goto mac_done;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+mac_done:
+ if (rc) {
+ rc = -EADDRNOTAVAIL;
+ netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
+ mac);
+ }
+ return rc;
+}
#else
void bnxt_sriov_disable(struct bnxt *bp)
@@ -883,4 +904,9 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
void bnxt_update_vf_mac(struct bnxt *bp)
{
}
+
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index c151280e3..0392670ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,4 +20,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
+int bnxt_approve_mac(struct bnxt *, u8 *);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index b69dc58fa..b1d2ac818 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5350,7 +5350,10 @@ static int cnic_start_hw(struct cnic_dev *dev)
return 0;
err1:
- cp->free_resc(dev);
+ if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
+ cp->stop_hw(dev);
+ else
+ cp->free_resc(dev);
pci_dev_put(dev->pcidev);
return err;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 44ad1490b..541456398 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -104,8 +104,8 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
static inline void dmadesc_set(struct bcmgenet_priv *priv,
void __iomem *d, dma_addr_t addr, u32 val)
{
- dmadesc_set_length_status(priv, d, val);
dmadesc_set_addr(priv, d, addr);
+ dmadesc_set_length_status(priv, d, val);
}
static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
@@ -1225,8 +1225,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_packets += pkts_compl;
dev->stats.tx_bytes += bytes_compl;
+ txq = netdev_get_tx_queue(dev, ring->queue);
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq);
}
@@ -1335,6 +1337,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
+ unsigned int frag_size;
dma_addr_t mapping;
int ret;
@@ -1342,10 +1345,12 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
if (unlikely(!tx_cb_ptr))
BUG();
+
tx_cb_ptr->skb = NULL;
- mapping = skb_frag_dma_map(kdev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
+ frag_size = skb_frag_size(frag);
+
+ mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
priv->mib.tx_dma_failed++;
@@ -1355,10 +1360,10 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
- (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
return 0;
@@ -1451,15 +1456,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
else
index -= 1;
- nr_frags = skb_shinfo(skb)->nr_frags;
ring = &priv->tx_rings[index];
txq = netdev_get_tx_queue(dev, ring->queue);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
spin_lock_irqsave(&ring->lock, flags);
- if (ring->free_bds <= nr_frags + 1) {
- netif_tx_stop_queue(txq);
- netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
- __func__, index, ring->queue);
+ if (ring->free_bds <= (nr_frags + 1)) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev,
+ "%s: tx ring %d full when queue %d awake\n",
+ __func__, index, ring->queue);
+ }
ret = NETDEV_TX_BUSY;
goto out;
}
@@ -1513,6 +1522,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
ring->prod_index += nr_frags + 1;
ring->prod_index &= DMA_P_INDEX_MASK;
+ netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
+
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
@@ -1732,7 +1743,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ring->int_enable(ring);
}
@@ -2361,6 +2372,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
+ struct netdev_queue *txq;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
@@ -2375,6 +2387,14 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
}
}
+ for (i = 0; i < priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ netdev_tx_reset_queue(txq);
+ }
+
+ txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
+ netdev_tx_reset_queue(txq);
+
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
@@ -2490,7 +2510,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
if (likely(napi_schedule_prep(&rx_ring->napi))) {
rx_ring->int_disable(rx_ring);
- __napi_schedule(&rx_ring->napi);
+ __napi_schedule_irqoff(&rx_ring->napi);
}
}
@@ -2503,7 +2523,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
if (likely(napi_schedule_prep(&tx_ring->napi))) {
tx_ring->int_disable(tx_ring);
- __napi_schedule(&tx_ring->napi);
+ __napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -2533,7 +2553,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
if (likely(napi_schedule_prep(&rx_ring->napi))) {
rx_ring->int_disable(rx_ring);
- __napi_schedule(&rx_ring->napi);
+ __napi_schedule_irqoff(&rx_ring->napi);
}
}
@@ -2542,7 +2562,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
if (likely(napi_schedule_prep(&tx_ring->napi))) {
tx_ring->int_disable(tx_ring);
- __napi_schedule(&tx_ring->napi);
+ __napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3039,7 +3059,7 @@ static void bcmgenet_timeout(struct net_device *dev)
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index eacc55967..f1b81187a 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2462,7 +2462,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
spin_lock_irqsave(&sc->sbm_lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
spin_unlock_irqrestore(&sc->sbm_lock, flags);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a91a31959..6ad2b4a51 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7374,7 +7374,7 @@ static void tg3_napi_fini(struct tg3 *tp)
static inline void tg3_netif_stop(struct tg3 *tp)
{
- tp->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(tp->dev); /* prevent tx timeout */
tg3_napi_disable(tp);
netif_carrier_off(tp->dev);
netif_tx_disable(tp->dev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a63551d0a..cb07d95e3 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -61,8 +61,7 @@
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
-/*
- * Graceful stop timeouts in us. We should allow up to
+/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
#define MACB_HALT_TIMEOUT 1230
@@ -130,9 +129,8 @@ static void hw_writel(struct macb *bp, int offset, u32 value)
writel_relaxed(value, bp->regs + offset);
}
-/*
- * Find the CPU endianness by using the loopback bit of NCR register. When the
- * CPU is in big endian we need to program swaped mode for management
+/* Find the CPU endianness by using the loopback bit of NCR register. When the
+ * CPU is in big endian we need to program swapped mode for management
* descriptor access.
*/
static bool hw_is_native_io(void __iomem *addr)
@@ -189,7 +187,7 @@ static void macb_get_hwaddr(struct macb *bp)
pdata = dev_get_platdata(&bp->pdev->dev);
- /* Check all 4 address register for vaild address */
+ /* Check all 4 address register for valid address */
for (i = 0; i < 4; i++) {
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
top = macb_or_gem_readl(bp, SA1T + i * 8);
@@ -297,7 +295,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
ferr = DIV_ROUND_UP(ferr, rate / 100000);
if (ferr > 5)
netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
- rate);
+ rate);
if (clk_set_rate(clk, rate_rounded))
netdev_err(dev, "adjusting tx_clk failed.\n");
@@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev)
pdata = dev_get_platdata(&bp->pdev->dev);
if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+ ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
+ "phy int");
if (!ret) {
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
@@ -430,7 +429,7 @@ static int macb_mii_init(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(MPE));
bp->mii_bus = mdiobus_alloc();
- if (bp->mii_bus == NULL) {
+ if (!bp->mii_bus) {
err = -ENOMEM;
goto err_out;
}
@@ -439,7 +438,7 @@ static int macb_mii_init(struct macb *bp)
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- bp->pdev->name, bp->pdev->id);
+ bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->pdev->dev;
pdata = dev_get_platdata(&bp->pdev->dev);
@@ -452,7 +451,8 @@ static int macb_mii_init(struct macb *bp)
err = of_mdiobus_register(bp->mii_bus, np);
/* fallback to standard phy registration if no phy were
- found during dt phy registration */
+ * found during dt phy registration
+ */
if (!err && !phy_find_first(bp->mii_bus)) {
for (i = 0; i < PHY_MAX_ADDR; i++) {
struct phy_device *phydev;
@@ -500,7 +500,7 @@ static void macb_update_stats(struct macb *bp)
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
- for(; p < end; p++, offset += 4)
+ for (; p < end; p++, offset += 4)
*p += bp->macb_reg_readl(bp, offset);
}
@@ -568,8 +568,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Make sure nobody is trying to queue up new packets */
netif_tx_stop_all_queues(bp->dev);
- /*
- * Stop transmission now
+ /* Stop transmission now
* (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register
*/
@@ -577,8 +576,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Just complain for now, reinitializing TX path can be good */
netdev_err(bp->dev, "BUG: halt tx timed out\n");
- /*
- * Treat frames in TX queue including the ones that caused the error.
+ /* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
*/
for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
@@ -608,10 +606,9 @@ static void macb_tx_error_task(struct work_struct *work)
bp->stats.tx_bytes += skb->len;
}
} else {
- /*
- * "Buffers exhausted mid-frame" errors may only happen
- * if the driver is buggy, so complain loudly about those.
- * Statistics are updated by hardware.
+ /* "Buffers exhausted mid-frame" errors may only happen
+ * if the driver is buggy, so complain loudly about
+ * those. Statistics are updated by hardware.
*/
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
netdev_err(bp->dev,
@@ -663,7 +660,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue_writel(queue, ISR, MACB_BIT(TCOMP));
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
- (unsigned long)status);
+ (unsigned long)status);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head; tail++) {
@@ -723,7 +720,8 @@ static void gem_rx_refill(struct macb *bp)
struct sk_buff *skb;
dma_addr_t paddr;
- while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+ while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
+ RX_RING_SIZE) > 0) {
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
@@ -731,10 +729,10 @@ static void gem_rx_refill(struct macb *bp)
bp->rx_prepared_head++;
- if (bp->rx_skbuff[entry] == NULL) {
+ if (!bp->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
- if (unlikely(skb == NULL)) {
+ if (unlikely(!skb)) {
netdev_err(bp->dev,
"Unable to allocate sk_buff\n");
break;
@@ -742,7 +740,8 @@ static void gem_rx_refill(struct macb *bp)
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
- bp->rx_buffer_size, DMA_FROM_DEVICE);
+ bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, paddr)) {
dev_kfree_skb(skb);
break;
@@ -767,7 +766,7 @@ static void gem_rx_refill(struct macb *bp)
wmb();
netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
- bp->rx_prepared_head, bp->rx_tail);
+ bp->rx_prepared_head, bp->rx_tail);
}
/* Mark DMA descriptors from begin up to and not including end as unused */
@@ -778,14 +777,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
for (frag = begin; frag != end; frag++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+
desc->addr &= ~MACB_BIT(RX_USED);
}
/* Make descriptor updates visible to hardware */
wmb();
- /*
- * When this happens, the hardware stats registers for
+ /* When this happens, the hardware stats registers for
* whatever caused this is updated, so we don't have to record
* anything.
*/
@@ -881,11 +880,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- macb_rx_ring_wrap(first_frag),
- macb_rx_ring_wrap(last_frag), len);
+ macb_rx_ring_wrap(first_frag),
+ macb_rx_ring_wrap(last_frag), len);
- /*
- * The ethernet header starts NET_IP_ALIGN bytes into the
+ /* The ethernet header starts NET_IP_ALIGN bytes into the
* first buffer. Since the header is 14 bytes, this makes the
* payload word-aligned.
*
@@ -925,7 +923,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- macb_rx_buffer(bp, frag), frag_len);
+ macb_rx_buffer(bp, frag),
+ frag_len);
offset += bp->rx_buffer_size;
desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED);
@@ -943,7 +942,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
bp->stats.rx_packets++;
bp->stats.rx_bytes += skb->len;
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
- skb->len, skb->csum);
+ skb->len, skb->csum);
netif_receive_skb(skb);
return 0;
@@ -1050,7 +1049,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ (unsigned long)status, budget);
work_done = bp->macbgem_ops.mog_rx(bp, budget);
if (work_done < budget) {
@@ -1100,8 +1099,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
(unsigned long)status);
if (status & MACB_RX_INT_FLAGS) {
- /*
- * There's no point taking any more interrupts
+ /* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
@@ -1130,8 +1128,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(TCOMP))
macb_tx_interrupt(queue);
- /*
- * Link change detection isn't possible with RMII, so we'll
+ /* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@@ -1162,8 +1159,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- /*
- * TODO: Reset the hardware, and maybe move the
+ /* TODO: Reset the hardware, and maybe move the
* netdev_err to a lower-priority context as well
* (work queue?)
*/
@@ -1182,8 +1178,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling receive - used by netconsole and other diagnostic tools
+/* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void macb_poll_controller(struct net_device *dev)
@@ -1269,7 +1264,7 @@ static unsigned int macb_tx_map(struct macb *bp,
}
/* Should never happen */
- if (unlikely(tx_skb == NULL)) {
+ if (unlikely(!tx_skb)) {
netdev_err(bp->dev, "BUG! empty skb!\n");
return 0;
}
@@ -1339,16 +1334,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
- "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
- queue_index, skb->len, skb->head, skb->data,
- skb_tail_pointer(skb), skb_end_pointer(skb));
+ "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
+ queue_index, skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, 16, true);
#endif
/* Count how many TX buffer descriptors are needed to send this
* socket buffer: skb fragments of jumbo frames may need to be
- * splitted into many buffer descriptors.
+ * split into many buffer descriptors.
*/
count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1399,8 +1394,8 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
netdev_dbg(bp->dev,
- "RX buffer must be multiple of %d bytes, expanding\n",
- RX_BUFFER_MULTIPLE);
+ "RX buffer must be multiple of %d bytes, expanding\n",
+ RX_BUFFER_MULTIPLE);
bp->rx_buffer_size =
roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
}
@@ -1423,7 +1418,7 @@ static void gem_free_rx_buffers(struct macb *bp)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = bp->rx_skbuff[i];
- if (skb == NULL)
+ if (!skb)
continue;
desc = &bp->rx_ring[i];
@@ -1479,10 +1474,10 @@ static int gem_alloc_rx_buffers(struct macb *bp)
bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
if (!bp->rx_skbuff)
return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated %d RX struct sk_buff entries at %p\n",
- RX_RING_SIZE, bp->rx_skbuff);
+
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ RX_RING_SIZE, bp->rx_skbuff);
return 0;
}
@@ -1495,10 +1490,10 @@ static int macb_alloc_rx_buffers(struct macb *bp)
&bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers)
return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+
+ netdev_dbg(bp->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0;
}
@@ -1589,8 +1584,7 @@ static void macb_reset_hw(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
- /*
- * Disable RX and TX (XXX: Should we halt the transmission
+ /* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
*/
macb_writel(bp, NCR, 0);
@@ -1653,8 +1647,7 @@ static u32 macb_mdc_clk_div(struct macb *bp)
return config;
}
-/*
- * Get the DMA bus width field of the network configuration register that we
+/* Get the DMA bus width field of the network configuration register that we
* should program. We find the width from decoding the design configuration
* register to find the maximum supported data bus width.
*/
@@ -1674,8 +1667,7 @@ static u32 macb_dbw(struct macb *bp)
}
}
-/*
- * Configure the receive DMA engine
+/* Configure the receive DMA engine
* - use the correct receive buffer size
* - set best burst length for DMA operations
* (if not supported by FIFO, it will fallback to default)
@@ -1763,8 +1755,7 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
}
-/*
- * The hash address register is 64 bits long and takes up two
+/* The hash address register is 64 bits long and takes up two
* locations in the memory map. The least significant bits are stored
* in EMAC_HSL and the most significant bits in EMAC_HSH.
*
@@ -1804,9 +1795,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr)
return 0;
}
-/*
- * Return the hash index value for the specified address.
- */
+/* Return the hash index value for the specified address. */
static int hash_get_index(__u8 *addr)
{
int i, j, bitval;
@@ -1822,9 +1811,7 @@ static int hash_get_index(__u8 *addr)
return hash_index;
}
-/*
- * Add multicast addresses to the internal multicast-hash table.
- */
+/* Add multicast addresses to the internal multicast-hash table. */
static void macb_sethashtable(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -1832,7 +1819,8 @@ static void macb_sethashtable(struct net_device *dev)
unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
- mc_filter[0] = mc_filter[1] = 0;
+ mc_filter[0] = 0;
+ mc_filter[1] = 0;
netdev_for_each_mc_addr(ha, dev) {
bitnr = hash_get_index(ha->addr);
@@ -1843,9 +1831,7 @@ static void macb_sethashtable(struct net_device *dev)
macb_or_gem_writel(bp, HRT, mc_filter[1]);
}
-/*
- * Enable/Disable promiscuous and multicast modes.
- */
+/* Enable/Disable promiscuous and multicast modes. */
static void macb_set_rx_mode(struct net_device *dev)
{
unsigned long cfg;
@@ -2162,9 +2148,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
regs_buff[12] = macb_or_gem_readl(bp, USRIO);
- if (macb_is_gem(bp)) {
+ if (macb_is_gem(bp))
regs_buff[13] = gem_readl(bp, DMACFG);
- }
}
static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2287,11 +2272,11 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_set_features = macb_set_features,
};
-/*
- * Configure peripheral capabilities according to device tree
+/* Configure peripheral capabilities according to device tree
* and integration options used
*/
-static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
+static void macb_configure_caps(struct macb *bp,
+ const struct macb_config *dt_conf)
{
u32 dcfg;
@@ -2989,7 +2974,7 @@ static int macb_probe(struct platform_device *pdev)
mac = of_get_mac_address(np);
if (mac)
- memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ ether_addr_copy(bp->dev->dev_addr, mac);
else
macb_get_hwaddr(bp);
@@ -2997,6 +2982,7 @@ static int macb_probe(struct platform_device *pdev)
phy_node = of_get_next_available_child(np, NULL);
if (phy_node) {
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
+
if (gpio_is_valid(gpio)) {
bp->reset_gpio = gpio_to_desc(gpio);
gpiod_direction_output(bp->reset_gpio, 1);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index e4153c9f4..7708d6b14 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2819,7 +2819,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
}
cmdsetup.s.gather = 1;
@@ -2890,26 +2890,27 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
if (status == IQ_SEND_STOP)
stop_q(lio->netdev, q_idx);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
stats->tx_done++;
stats->tx_tot_bytes += skb->len;
return NETDEV_TX_OK;
+lio_xmit_dma_failed:
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
recv_buffer_free(skb);
return NETDEV_TX_OK;
}
@@ -2926,7 +2927,7 @@ static void liquidio_tx_timeout(struct net_device *netdev)
netif_info(lio, tx_err, lio->netdev,
"Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
netdev->stats.tx_dropped);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
txqs_wake(netdev);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index f67641a2f..8e23e3fad 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -602,12 +602,10 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
h->version);
- buffer = kmalloc(size, GFP_KERNEL);
+ buffer = kmemdup(data, size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- memcpy(buffer, data, size);
-
p = buffer + sizeof(struct octeon_firmware_file_header);
/* load all images */
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index c177c7cec..388cd799d 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1320,7 +1320,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(p->mix + MIX_ORING2, 1);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
rv = NETDEV_TX_OK;
out:
octeon_mgmt_update_tx_stats(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 95f17f8ca..16ed20357 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic;
+ int svf;
if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic];
@@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ if (!sq->sqs_mode) {
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
+ tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
+ tl4 += (svf * NIC_TL4_PER_LMAC);
+ tl4 += (bgx * NIC_TL4_PER_BGX);
+ }
tl4 += sq_idx;
- if (sq->sqs_mode)
- tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bfee298fc..a19e73f11 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1442,7 +1442,7 @@ static void nicvf_reset_task(struct work_struct *work)
nicvf_stop(nic->netdev);
nicvf_open(nic->netdev);
- nic->netdev->trans_start = jiffies;
+ netif_trans_update(nic->netdev);
}
static int nicvf_config_loopback(struct nicvf *nic,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06b819db5..0ff8e60de 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -23,7 +23,7 @@ static void nicvf_get_page(struct nicvf *nic)
if (!nic->rb_pageref || !nic->rb_page)
return;
- atomic_add(nic->rb_pageref, &nic->rb_page->_count);
+ page_ref_add(nic->rb_page, nic->rb_pageref);
nic->rb_pageref = 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index d20539a6d..63a39ac97 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -274,12 +274,14 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
static void bgx_lmac_handler(struct net_device *netdev)
{
struct lmac *lmac = container_of(netdev, struct lmac, netdev);
- struct phy_device *phydev = lmac->phydev;
+ struct phy_device *phydev;
int link_changed = 0;
if (!lmac)
return;
+ phydev = lmac->phydev;
+
if (!phydev->link && lmac->last_link)
link_changed = -1;
@@ -549,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
}
/* Clear rcvflt bit (latching high) and read it back */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
@@ -568,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Wait for MAC RX to be ready */
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
- SMU_RX_CTL_STATUS, true)) {
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
- return -1;
- }
-
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -587,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
- dev_err(&bgx->pdev->dev, "Receive fault\n");
- return -1;
- }
-
- /* Receive link is latching low. Force it high and verify it */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
- SPU_STATUS1_RCV_LNK, false)) {
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
- return -1;
- }
-
+ /* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
- return 0;
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+ return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
- u64 link;
+ u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
@@ -619,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
- if (link & SPU_STATUS1_RCV_LNK) {
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
@@ -634,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
}
if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
lmac->last_link = lmac->link_up;
- if (lmac->link_up)
- bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -708,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
- u64 cmrx_cfg;
+ u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
@@ -717,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
destroy_workqueue(lmac->check_link);
}
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- cmrx_cfg &= ~(1 << 15);
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 149e17936..42010d2e5 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -41,6 +41,7 @@
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
@@ -50,6 +51,7 @@
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 526ea74e8..86f467a2c 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
- if (!spin_trylock(&q->lock))
- return NETDEV_TX_LOCKED;
+ spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 326d40095..b4fceb924 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -324,7 +324,9 @@ struct adapter_params {
unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers;
+ unsigned int bs_vers; /* bootstrap version */
unsigned int tp_vers;
+ unsigned int er_vers; /* expansion ROM version */
u8 api_vers[7];
unsigned short mtus[NMTUS];
@@ -357,6 +359,34 @@ struct sge_idma_monitor_state {
unsigned int idma_warn[2]; /* time to warning in HZ */
};
+/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+ u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */
+ u64 timestamp; /* OS-dependent timestamp */
+ u32 seqno; /* sequence number */
+ s16 access; /* time (ms) to access mailbox */
+ s16 execute; /* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+ unsigned int size; /* number of entries in the log */
+ unsigned int cursor; /* next position in the log to write */
+ u32 seqno; /* next sequence number */
+ /* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+ unsigned int entry_idx)
+{
+ return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
#include "t4fw_api.h"
#define FW_VERSION(chip) ( \
@@ -394,6 +424,7 @@ struct link_config {
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
+ unsigned char link_down_rc; /* link down reason */
};
#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
@@ -731,6 +762,7 @@ struct adapter {
u32 t4_bar0;
struct pci_dev *pdev;
struct device *pdev_dev;
+ const char *name;
unsigned int mbox;
unsigned int pf;
unsigned int flags;
@@ -776,6 +808,10 @@ struct adapter {
struct work_struct db_drop_task;
bool tid_release_task_busy;
+ /* support for mailbox command/reply logging */
+#define T4_OS_LOG_MBOX_CMDS 256
+ struct mbox_cmd_log *mbox_log;
+
struct dentry *debugfs_root;
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is
@@ -1306,6 +1342,7 @@ int t4_fl_pkt_align(struct adapter *adap);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
int t4_check_fw_version(struct adapter *adap);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_bs_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
@@ -1329,6 +1366,8 @@ int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_init_portinfo(struct port_info *pi, int mbox,
+ int port, int pf, int vf, u8 mac[]);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@ -1464,6 +1503,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 052c660ac..6ee2ed306 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -253,7 +253,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
{
const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
- struct net_device *dev = adap->port[port];
+ struct net_device *dev = adap->port[adap->chan_map[port]];
struct port_info *pi = netdev_priv(dev);
struct port_dcb_info *dcb = &pi->dcb;
int dcb_type = pcmd->u.dcb.pgid.type;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0bb41e9b9..91fb50850 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -1152,6 +1152,104 @@ static const struct file_operations devlog_fops = {
.release = seq_release_private
};
+/* Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries. But it's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log.
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int entry_idx, i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq,
+ "%10s %15s %5s %5s %s\n",
+ "Seq#", "Tstamp", "Atime", "Etime",
+ "Command/Reply");
+ return 0;
+ }
+
+ entry_idx = log->cursor + ((uintptr_t)v - 2);
+ if (entry_idx >= log->size)
+ entry_idx -= log->size;
+ entry = mbox_cmd_log_entry(log, entry_idx);
+
+ /* skip over unused entries */
+ if (entry->timestamp == 0)
+ return 0;
+
+ seq_printf(seq, "%10u %15llu %5d %5d",
+ entry->seqno, entry->timestamp,
+ entry->access, entry->execute);
+ for (i = 0; i < MBOX_LEN / 8; i++) {
+ u64 flit = entry->cmd[i];
+ u32 hi = (u32)(flit >> 32);
+ u32 lo = (u32)flit;
+
+ seq_printf(seq, " %08x %08x", hi, lo);
+ }
+ seq_puts(seq, "\n");
+ return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+
+ return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+ .start = mboxlog_start,
+ .next = mboxlog_next,
+ .stop = mboxlog_stop,
+ .show = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &mboxlog_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+ .owner = THIS_MODULE,
+ .open = mboxlog_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static int mbox_show(struct seq_file *seq, void *v)
{
static const char * const owner[] = { "none", "FW", "driver",
@@ -1572,6 +1670,7 @@ static const struct file_operations flash_debugfs_fops = {
.owner = THIS_MODULE,
.open = mem_open,
.read = flash_read,
+ .llseek = default_llseek,
};
static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -3128,6 +3227,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
{ "devlog", &devlog_fops, S_IRUSR, 0 },
+ { "mboxlog", &mboxlog_fops, S_IRUSR, 0 },
{ "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
{ "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
{ "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 408c546fe..6eea06a31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -166,7 +166,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
-MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
+ "deprecated parameter");
/*
* The driver uses the best interrupt scheme available on a platform in the
@@ -301,6 +302,22 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
}
#endif /* CONFIG_CHELSIO_T4_DCB */
+int cxgb4_dcb_enabled(const struct net_device *dev)
+{
+#ifdef CONFIG_CHELSIO_T4_DCB
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!pi->dcb.enabled)
+ return 0;
+
+ return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+ (pi->dcb.state == CXGB4_DCB_STATE_HOST));
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(cxgb4_dcb_enabled);
+
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
struct net_device *dev = adapter->port[port_id];
@@ -311,8 +328,10 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
netif_carrier_on(dev);
else {
#ifdef CONFIG_CHELSIO_T4_DCB
- cxgb4_dcb_state_init(dev);
- dcb_tx_queue_prio_enable(dev, false);
+ if (cxgb4_dcb_enabled(dev)) {
+ cxgb4_dcb_state_init(dev);
+ dcb_tx_queue_prio_enable(dev, false);
+ }
#endif /* CONFIG_CHELSIO_T4_DCB */
netif_carrier_off(dev);
}
@@ -334,6 +353,17 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
netdev_info(dev, "port module unplugged\n");
else if (pi->mod_type < ARRAY_SIZE(mod_str))
netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ netdev_info(dev, "%s: unsupported port module inserted\n",
+ dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ netdev_info(dev, "%s: unknown port module inserted\n",
+ dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ netdev_info(dev, "%s: transceiver module error\n", dev->name);
+ else
+ netdev_info(dev, "%s: unknown module type %d inserted\n",
+ dev->name, pi->mod_type);
}
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
@@ -480,28 +510,12 @@ static int link_start(struct net_device *dev)
return ret;
}
-int cxgb4_dcb_enabled(const struct net_device *dev)
-{
-#ifdef CONFIG_CHELSIO_T4_DCB
- struct port_info *pi = netdev_priv(dev);
-
- if (!pi->dcb.enabled)
- return 0;
-
- return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
- (pi->dcb.state == CXGB4_DCB_STATE_HOST));
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL(cxgb4_dcb_enabled);
-
#ifdef CONFIG_CHELSIO_T4_DCB
/* Handle a Data Center Bridging update message from the firmware. */
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
{
int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
- struct net_device *dev = adap->port[port];
+ struct net_device *dev = adap->port[adap->chan_map[port]];
int old_dcb_enabled = cxgb4_dcb_enabled(dev);
int new_dcb_enabled;
@@ -631,7 +645,8 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
action == FW_PORT_ACTION_GET_PORT_INFO) {
int port = FW_PORT_CMD_PORTID_G(
be32_to_cpu(pcmd->op_to_portid));
- struct net_device *dev = q->adap->port[port];
+ struct net_device *dev =
+ q->adap->port[q->adap->chan_map[port]];
int state_input = ((pcmd->u.info.dcbxdis_pkd &
FW_PORT_CMD_DCBXDIS_F)
? CXGB4_DCB_INPUT_FW_DISABLED
@@ -3735,7 +3750,10 @@ static int adap_init0(struct adapter *adap)
* is excessively mismatched relative to the driver.)
*/
t4_get_fw_version(adap, &adap->params.fw_vers);
+ t4_get_bs_version(adap, &adap->params.bs_vers);
t4_get_tp_version(adap, &adap->params.tp_vers);
+ t4_get_exprom_version(adap, &adap->params.er_vers);
+
ret = t4_check_fw_version(adap);
/* If firmware is too old (not supported by driver) force an update. */
if (ret)
@@ -4649,6 +4667,68 @@ static void cxgb4_check_pcie_caps(struct adapter *adap)
"suggested for optimal performance.\n");
}
+/* Dump basic information about the adapter */
+static void print_adapter_info(struct adapter *adapter)
+{
+ /* Device information */
+ dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
+ adapter->params.vpd.id,
+ CHELSIO_CHIP_RELEASE(adapter->params.chip));
+ dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
+ adapter->params.vpd.sn, adapter->params.vpd.pn);
+
+ /* Firmware Version */
+ if (!adapter->params.fw_vers)
+ dev_warn(adapter->pdev_dev, "No firmware loaded\n");
+ else
+ dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
+
+ /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
+ * Firmware, so dev_info() is more appropriate here.)
+ */
+ if (!adapter->params.bs_vers)
+ dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
+ else
+ dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
+
+ /* TP Microcode Version */
+ if (!adapter->params.tp_vers)
+ dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
+ else
+ dev_info(adapter->pdev_dev,
+ "TP Microcode version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+ /* Expansion ROM version */
+ if (!adapter->params.er_vers)
+ dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
+ else
+ dev_info(adapter->pdev_dev,
+ "Expansion ROM version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
+
+ /* Software/Hardware configuration */
+ dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
+ is_offload(adapter) ? "R" : "",
+ ((adapter->flags & USING_MSIX) ? "MSI-X" :
+ (adapter->flags & USING_MSI) ? "MSI" : ""),
+ is_offload(adapter) ? "Offload" : "non-Offload");
+}
+
static void print_port_info(const struct net_device *dev)
{
char buf[80];
@@ -4676,14 +4756,8 @@ static void print_port_info(const struct net_device *dev)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
- adap->params.vpd.id,
- CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
- is_offload(adap) ? "R" : "",
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, P/N: %s\n",
- adap->params.vpd.sn, adap->params.vpd.pn);
+ netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
+ dev->name, adap->params.vpd.id, adap->name, buf);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
@@ -4835,12 +4909,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
}
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
+
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
adapter->regs = regs;
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
adapter->mbox = func;
adapter->pf = func;
adapter->msg_enable = dflt_msg_enable;
@@ -5071,6 +5156,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_offload(adapter))
attach_ulds(adapter);
+ print_adapter_info(adapter);
+
sriov:
#ifdef CONFIG_PCI_IOV
if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
@@ -5090,6 +5177,7 @@ sriov:
if (adapter->workq)
destroy_workqueue(adapter->workq);
+ kfree(adapter->mbox_log);
kfree(adapter);
out_unmap_bar0:
iounmap(regs);
@@ -5156,6 +5244,7 @@ static void remove_one(struct pci_dev *pdev)
adapter->flags &= ~DEV_ENABLED;
}
pci_release_regions(pdev);
+ kfree(adapter->mbox_log);
synchronize_rcu();
kfree(adapter);
} else
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6278e5a74..bad253beb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -3006,7 +3006,9 @@ void t4_free_sge_resources(struct adapter *adap)
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
+ __netif_tx_lock_bh(etq->txq);
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+ __netif_tx_unlock_bh(etq->txq);
kfree(etq->q.sdesc);
free_txq(adap, &etq->q);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 71586a3e0..a63addb4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -224,18 +224,34 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
}
-static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+/**
+ * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ * @adapter: the adapter
+ * @cmd: the Firmware Mailbox Command or Reply
+ * @size: command length in bytes
+ * @access: the time (ms) needed to access the Firmware Mailbox
+ * @execute: the time (ms) the command spent being executed
+ */
+static void t4_record_mbox(struct adapter *adapter,
+ const __be64 *cmd, unsigned int size,
+ int access, int execute)
{
- dev_err(adap->pdev_dev,
- "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
- (unsigned long long)t4_read_reg64(adap, data_reg),
- (unsigned long long)t4_read_reg64(adap, data_reg + 8),
- (unsigned long long)t4_read_reg64(adap, data_reg + 16),
- (unsigned long long)t4_read_reg64(adap, data_reg + 24),
- (unsigned long long)t4_read_reg64(adap, data_reg + 32),
- (unsigned long long)t4_read_reg64(adap, data_reg + 40),
- (unsigned long long)t4_read_reg64(adap, data_reg + 48),
- (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int i;
+
+ entry = mbox_cmd_log_entry(log, log->cursor++);
+ if (log->cursor == log->size)
+ log->cursor = 0;
+
+ for (i = 0; i < size / 8; i++)
+ entry->cmd[i] = be64_to_cpu(cmd[i]);
+ while (i < MBOX_LEN / 8)
+ entry->cmd[i++] = 0;
+ entry->timestamp = jiffies;
+ entry->seqno = log->seqno++;
+ entry->access = access;
+ entry->execute = execute;
}
/**
@@ -268,12 +284,16 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
+ u16 access = 0;
+ u16 execute = 0;
u32 v;
u64 res;
- int i, ms, delay_idx;
+ int i, ms, delay_idx, ret;
const __be64 *p = cmd;
u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ u32 pcie_fw;
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
@@ -285,13 +305,24 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
if (adap->pdev->error_state != pci_channel_io_normal)
return -EIO;
+ /* If we have a negative timeout, that implies that we can't sleep. */
+ if (timeout < 0) {
+ sleep_ok = false;
+ timeout = -timeout;
+ }
+
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
- if (v != MBOX_OWNER_DRV)
- return v ? -EBUSY : -ETIMEDOUT;
+ if (v != MBOX_OWNER_DRV) {
+ ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ return ret;
+ }
+ /* Copy in the new mailbox command and send it on its way ... */
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
@@ -301,7 +332,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < timeout; i += ms) {
+ for (i = 0;
+ !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
+ i < timeout;
+ i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -317,26 +351,31 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
continue;
}
- res = t4_read_reg64(adap, data_reg);
+ get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
+ res = be64_to_cpu(cmd_rpl[0]);
+
if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
fw_asrt(adap, data_reg);
res = FW_CMD_RETVAL_V(EIO);
} else if (rpl) {
- get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ memcpy(rpl, cmd_rpl, size);
}
- if (FW_CMD_RETVAL_G((int)res))
- dump_mbox(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, 0);
+
+ execute = i + ms;
+ t4_record_mbox(adap, cmd_rpl,
+ MBOX_LEN, access, execute);
return -FW_CMD_RETVAL_G((int)res);
}
}
- dump_mbox(adap, mbox, data_reg);
+ ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
- return -ETIMEDOUT;
+ return ret;
}
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
@@ -2937,6 +2976,20 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers)
}
/**
+ * t4_get_bs_version - read the firmware bootstrap version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW Bootstrap version from flash.
+ */
+int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
* t4_get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
@@ -7089,52 +7142,122 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
}
/**
- * t4_handle_fw_rpl - process a FW reply message
+ * t4_link_down_rc_str - return a string for a Link Down Reason Code
* @adap: the adapter
+ * @link_down_rc: Link Down Reason Code
+ *
+ * Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+
+ if (link_down_rc >= ARRAY_SIZE(reason))
+ return "Bad Reason Code";
+
+ return reason[link_down_rc];
+}
+
+/**
+ * t4_handle_get_port_info - process a FW reply message
+ * @pi: the port info
* @rpl: start of the FW message
*
- * Processes a FW message, such as link state change messages.
+ * Processes a GET_PORT_INFO FW reply message.
+ */
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+ const struct fw_port_cmd *p = (const void *)rpl;
+ struct adapter *adap = pi->adapter;
+
+ /* link/module state change message */
+ int speed = 0, fc = 0;
+ struct link_config *lc;
+ u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
+ int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+ u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
+
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
+ fc |= PAUSE_RX;
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
+ fc |= PAUSE_TX;
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ speed = 100;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ speed = 1000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
+
+ lc = &pi->link_cfg;
+
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4_os_portmod_changed(adap, pi->port_id);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat);
+
+ lc->link_down_rc = rc;
+ dev_warn(adap->pdev_dev,
+ "Port %d link down, reason: %s\n",
+ pi->port_id, t4_link_down_rc_str(rc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->supported = be16_to_cpu(p->u.info.pcap);
+ t4_os_link_changed(adap, pi->port_id, link_ok);
+ }
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
*/
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
{
u8 opcode = *(const u8 *)rpl;
- if (opcode == FW_PORT_CMD) { /* link/module state change message */
- int speed = 0, fc = 0;
- const struct fw_port_cmd *p = (void *)rpl;
+ /* This might be a port command ... this simplifies the following
+ * conditionals ... We can get away with pre-dereferencing
+ * action_to_len16 because it's in the first 16 bytes and all messages
+ * will be at least that long.
+ */
+ const struct fw_port_cmd *p = (const void *)rpl;
+ unsigned int action =
+ FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
+
+ if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+ int i;
int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
- int port = adap->chan_map[chan];
- struct port_info *pi = adap2pinfo(adap, port);
- struct link_config *lc = &pi->link_cfg;
- u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
- int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
- u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
-
- if (stat & FW_PORT_CMD_RXPAUSE_F)
- fc |= PAUSE_RX;
- if (stat & FW_PORT_CMD_TXPAUSE_F)
- fc |= PAUSE_TX;
- if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
- speed = 100;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
- speed = 1000;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
- speed = 10000;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
- speed = 40000;
-
- if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc) { /* something changed */
- lc->link_ok = link_ok;
- lc->speed = speed;
- lc->fc = fc;
- lc->supported = be16_to_cpu(p->u.info.pcap);
- t4_os_link_changed(adap, port, link_ok);
- }
- if (mod != pi->mod_type) {
- pi->mod_type = mod;
- t4_os_portmod_changed(adap, port);
+ struct port_info *pi = NULL;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->tx_chan == chan)
+ break;
}
+
+ t4_handle_get_port_info(pi, rpl);
+ } else {
+ dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode);
+ return -EINVAL;
}
return 0;
}
@@ -7654,61 +7777,74 @@ int t4_init_rss_mode(struct adapter *adap, int mbox)
return 0;
}
-int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+/**
+ * t4_init_portinfo - allocate a virtual interface amd initialize port_info
+ * @pi: the port_info
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @mac: the MAC address of the VI
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC address of the VI as assigned by FW.
+ * @mac should be large enough to hold an Ethernet address.
+ * Returns < 0 on error.
+ */
+int t4_init_portinfo(struct port_info *pi, int mbox,
+ int port, int pf, int vf, u8 mac[])
{
- u8 addr[6];
- int ret, i, j = 0;
+ int ret;
struct fw_port_cmd c;
- struct fw_rss_vi_config_cmd rvc;
+ unsigned int rss_size;
memset(&c, 0, sizeof(c));
- memset(&rvc, 0, sizeof(rvc));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 = cpu_to_be32(
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(c));
+ ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+ if (ret < 0)
+ return ret;
+
+ pi->viid = ret;
+ pi->tx_chan = port;
+ pi->lport = port;
+ pi->rss_size = rss_size;
+
+ ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
+ pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(ret) : -1;
+ pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
+ return 0;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+ u8 addr[6];
+ int ret, i, j = 0;
for_each_port(adap, i) {
- unsigned int rss_size;
- struct port_info *p = adap2pinfo(adap, i);
+ struct port_info *pi = adap2pinfo(adap, i);
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_PORT_CMD_PORTID_V(j));
- c.action_to_len16 = cpu_to_be32(
- FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
- FW_LEN16(c));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
if (ret)
return ret;
- ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
- if (ret < 0)
- return ret;
-
- p->viid = ret;
- p->tx_chan = j;
- p->lport = j;
- p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
adap->port[i]->dev_port = j;
-
- ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
- p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
- FW_PORT_CMD_MDIOADDR_G(ret) : -1;
- p->port_type = FW_PORT_CMD_PTYPE_G(ret);
- p->mod_type = FW_PORT_MOD_TYPE_NA;
-
- rvc.op_to_viid =
- cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
- rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
- ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
- if (ret)
- return ret;
- p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
-
- init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
j++;
}
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 2fc60e83a..7f59ca458 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -220,6 +220,13 @@ enum {
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ /* Location of bootstrap firmware image in FLASH.
+ */
+ FLASH_FWBOOTSTRAP_START_SEC = 27,
+ FLASH_FWBOOTSTRAP_NSECS = 1,
+ FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+ FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
/*
* iSCSI persistent/crash information.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 80417fc56..4705e2dea 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1392,6 +1392,10 @@ struct ulp_mem_io {
#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
+#define T5_ULP_MEMIO_FID_S 4
+#define T5_ULP_MEMIO_FID_M 0x7ff
+#define T5_ULP_MEMIO_FID_V(x) ((x) << T5_ULP_MEMIO_FID_S)
+
/* ulp_mem_io.lock_addr fields */
#define ULP_MEMIO_ADDR_S 0
#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a2cdfc126..50812a1d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7ad6d4e75..392d6644f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2510,6 +2510,11 @@ struct fw_port_cmd {
#define FW_PORT_CMD_PTYPE_G(x) \
(((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M)
+#define FW_PORT_CMD_LINKDNRC_S 5
+#define FW_PORT_CMD_LINKDNRC_M 0x7
+#define FW_PORT_CMD_LINKDNRC_G(x) \
+ (((x) >> FW_PORT_CMD_LINKDNRC_S) & FW_PORT_CMD_LINKDNRC_M)
+
#define FW_PORT_CMD_MODTYPE_S 0
#define FW_PORT_CMD_MODTYPE_M 0x1f
#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index c4b262ca7..2accab386 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 4a707c32d..734dd776c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -387,6 +387,10 @@ struct adapter {
/* various locks */
spinlock_t stats_lock;
+ /* support for mailbox command/reply logging */
+#define T4VF_OS_LOG_MBOX_CMDS 256
+ struct mbox_cmd_log *mbox_log;
+
/* list of MAC addresses in MPS Hash */
struct list_head mac_hlist;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1cc8a7a69..04fc6f6d1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -74,7 +74,8 @@ static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
MODULE_PARM_DESC(dflt_msg_enable,
- "default adapter ethtool message level bitmap");
+ "default adapter ethtool message level bitmap, "
+ "deprecated parameter");
/*
* The driver uses the best interrupt scheme available on a platform in the
@@ -1703,6 +1704,105 @@ static const struct ethtool_ops cxgb4vf_ethtool_ops = {
*/
/*
+ * Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries. But i9t's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log. But as stated above, meh ...
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int entry_idx, i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq,
+ "%10s %15s %5s %5s %s\n",
+ "Seq#", "Tstamp", "Atime", "Etime",
+ "Command/Reply");
+ return 0;
+ }
+
+ entry_idx = log->cursor + ((uintptr_t)v - 2);
+ if (entry_idx >= log->size)
+ entry_idx -= log->size;
+ entry = mbox_cmd_log_entry(log, entry_idx);
+
+ /* skip over unused entries */
+ if (entry->timestamp == 0)
+ return 0;
+
+ seq_printf(seq, "%10u %15llu %5d %5d",
+ entry->seqno, entry->timestamp,
+ entry->access, entry->execute);
+ for (i = 0; i < MBOX_LEN / 8; i++) {
+ u64 flit = entry->cmd[i];
+ u32 hi = (u32)(flit >> 32);
+ u32 lo = (u32)flit;
+
+ seq_printf(seq, " %08x %08x", hi, lo);
+ }
+ seq_puts(seq, "\n");
+ return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+
+ return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+ .start = mboxlog_start,
+ .next = mboxlog_next,
+ .stop = mboxlog_stop,
+ .show = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &mboxlog_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+ .owner = THIS_MODULE,
+ .open = mboxlog_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
* Show SGE Queue Set information. We display QPL Queues Sets per line.
*/
#define QPL 4
@@ -2121,6 +2221,7 @@ struct cxgb4vf_debugfs_entry {
};
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+ { "mboxlog", S_IRUGO, &mboxlog_fops },
{ "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
{ "resources", S_IRUGO, &resources_proc_fops },
@@ -2663,6 +2764,16 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4VF_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto err_free_adapter;
+ }
+ adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
+
/*
* Initialize SMP data synchronization resources.
*/
@@ -2912,6 +3023,7 @@ err_unmap_bar0:
iounmap(adapter->regs);
err_free_adapter:
+ kfree(adapter->mbox_log);
kfree(adapter);
err_release_regions:
@@ -2981,6 +3093,7 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
+ kfree(adapter->mbox_log);
kfree(adapter);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 1ccd28294..1bb57d3fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1448,7 +1448,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* the new TX descriptors and return success.
*/
txq_advance(&txq->q, ndesc);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ring_tx_db(adapter, &txq->q, ndesc);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 9b40a85cc..438374a05 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -36,6 +36,7 @@
#ifndef __T4VF_COMMON_H__
#define __T4VF_COMMON_H__
+#include "../cxgb4/t4_hw.h"
#include "../cxgb4/t4fw_api.h"
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
@@ -227,6 +228,34 @@ struct adapter_params {
u8 nports; /* # of Ethernet "ports" */
};
+/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+ u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */
+ u64 timestamp; /* OS-dependent timestamp */
+ u32 seqno; /* sequence number */
+ s16 access; /* time (ms) to access mailbox */
+ s16 execute; /* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+ unsigned int size; /* number of entries in the log */
+ unsigned int cursor; /* next position in the log to write */
+ u32 seqno; /* next sequence number */
+ /* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+ unsigned int entry_idx)
+{
+ return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
#include "adapter.h"
#ifndef PCI_VENDOR_ID_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index fed83d88f..955ff7c61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -76,21 +76,33 @@ static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
*rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
}
-/*
- * Dump contents of mailbox with a leading tag.
+/**
+ * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ * @adapter: the adapter
+ * @cmd: the Firmware Mailbox Command or Reply
+ * @size: command length in bytes
+ * @access: the time (ms) needed to access the Firmware Mailbox
+ * @execute: the time (ms) the command spent being executed
*/
-static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
+ int size, int access, int execute)
{
- dev_err(adapter->pdev_dev,
- "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int i;
+
+ entry = mbox_cmd_log_entry(log, log->cursor++);
+ if (log->cursor == log->size)
+ log->cursor = 0;
+
+ for (i = 0; i < size / 8; i++)
+ entry->cmd[i] = be64_to_cpu(cmd[i]);
+ while (i < MBOX_LEN / 8)
+ entry->cmd[i++] = 0;
+ entry->timestamp = jiffies;
+ entry->seqno = log->seqno++;
+ entry->access = access;
+ entry->execute = execute;
}
/**
@@ -120,10 +132,13 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
1, 1, 3, 5, 10, 10, 20, 50, 100
};
+ u16 access = 0, execute = 0;
u32 v, mbox_data;
- int i, ms, delay_idx;
+ int i, ms, delay_idx, ret;
const __be64 *p;
u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+ u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
+ __be64 cmd_rpl[MBOX_LEN / 8];
/* In T6, mailbox size is changed to 128 bytes to avoid
* invalidating the entire prefetch buffer.
@@ -148,8 +163,11 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
- if (v != MBOX_OWNER_DRV)
- return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+ if (v != MBOX_OWNER_DRV) {
+ ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ return ret;
+ }
/*
* Write the command array into the Mailbox Data register array and
@@ -164,6 +182,8 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* Data registers before doing the write to the VF Mailbox Control
* register.
*/
+ if (cmd_op != FW_VI_STATS_CMD)
+ t4vf_record_mbox(adapter, cmd, size, access, 0);
for (i = 0, p = cmd; i < size; i += 8)
t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
t4_read_reg(adapter, mbox_data); /* flush write */
@@ -209,31 +229,33 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* We return the (negated) firmware command return
* code (this depends on FW_SUCCESS == 0).
*/
+ get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
/* return value in low-order little-endian word */
- v = t4_read_reg(adapter, mbox_data);
- if (FW_CMD_RETVAL_G(v))
- dump_mbox(adapter, "FW Error", mbox_data);
+ v = be64_to_cpu(cmd_rpl[0]);
if (rpl) {
/* request bit in high-order BE word */
WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
& FW_CMD_REQUEST_F) == 0);
- get_mbox_rpl(adapter, rpl, size, mbox_data);
+ memcpy(rpl, cmd_rpl, size);
WARN_ON((be32_to_cpu(*(__be32 *)rpl)
& FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
MBOWNER_V(MBOX_OWNER_NONE));
+ execute = i + ms;
+ if (cmd_op != FW_VI_STATS_CMD)
+ t4vf_record_mbox(adapter, cmd_rpl, size, access,
+ execute);
return -FW_CMD_RETVAL_G(v);
}
}
- /*
- * We timed out. Return the error ...
- */
- dump_mbox(adapter, "FW Timeout", mbox_data);
- return -ETIMEDOUT;
+ /* We timed out. Return the error ... */
+ ret = -ETIMEDOUT;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ return ret;
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b2182d3ba..f15560a06 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2740,6 +2740,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
+ netdev->vlan_features |= netdev->features;
#ifdef CONFIG_RFS_ACCEL
netdev->hw_features |= NETIF_F_NTUPLE;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 48d919414..1471e16ba 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -966,7 +966,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/* Our watchdog timed out. Called by the networking layer */
@@ -985,7 +985,7 @@ static void dm9000_timeout(struct net_device *dev)
dm9000_init_dm9000(dev);
dm9000_unmask_interrupts(db);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
@@ -1432,6 +1432,7 @@ dm9000_probe(struct platform_device *pdev)
int reset_gpios;
enum of_gpio_flags flags;
struct regulator *power;
+ bool inv_mac_addr = false;
power = devm_regulator_get(dev, "vcc");
if (IS_ERR(power)) {
@@ -1686,9 +1687,7 @@ dm9000_probe(struct platform_device *pdev)
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
- dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
-
+ inv_mac_addr = true;
eth_hw_addr_random(ndev);
mac_src = "random";
}
@@ -1697,11 +1696,15 @@ dm9000_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
ret = register_netdev(ndev);
- if (ret == 0)
+ if (ret == 0) {
+ if (inv_mac_addr)
+ dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
ndev->name, dm9000_type_to_char(db->type),
db->io_addr, db->io_data, ndev->irq,
ndev->dev_addr, mac_src);
+ }
return 0;
out:
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 3acde3b9b..cbe84972f 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1336,7 +1336,7 @@ de4x5_open(struct net_device *dev)
}
lp->interrupt = UNMASK_INTERRUPTS;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
START_DE4X5;
@@ -1465,7 +1465,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if (!lp->tx_enable) /* Cannot send for now */
- return NETDEV_TX_LOCKED;
+ goto tx_err;
/*
** Clean out the TX ring asynchronously to interrupts - sometimes the
@@ -1478,7 +1478,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
/* Test if cache is already locked - requeue skb if so */
if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
- return NETDEV_TX_LOCKED;
+ goto tx_err;
/* Transmit descriptor ring full or stale skb */
if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
@@ -1519,6 +1519,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
lp->cache.lock = 0;
return NETDEV_TX_OK;
+tx_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/*
@@ -1932,7 +1935,7 @@ set_multicast_list(struct net_device *dev)
lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
}
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index afd8e78e0..8ed0fd8b1 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -192,9 +192,6 @@
(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
(pci_dev)->revision))
-/* Sten Check */
-#define DEVICE net_device
-
/* Structure/enum declaration ------------------------------- */
struct tx_desc {
__le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -313,10 +310,10 @@ static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
/* function declaration ------------------------------------- */
-static int dmfe_open(struct DEVICE *);
-static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
-static int dmfe_stop(struct DEVICE *);
-static void dmfe_set_filter_mode(struct DEVICE *);
+static int dmfe_open(struct net_device *);
+static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
+static int dmfe_stop(struct net_device *);
+static void dmfe_set_filter_mode(struct net_device *);
static const struct ethtool_ops netdev_ethtool_ops;
static u16 read_srom_word(void __iomem *, int);
static irqreturn_t dmfe_interrupt(int , void *);
@@ -326,8 +323,8 @@ static void poll_dmfe (struct net_device *dev);
static void dmfe_descriptor_init(struct net_device *);
static void allocate_rx_buffer(struct net_device *);
static void update_cr6(u32, void __iomem *);
-static void send_filter_frame(struct DEVICE *);
-static void dm9132_id_table(struct DEVICE *);
+static void send_filter_frame(struct net_device *);
+static void dm9132_id_table(struct net_device *);
static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
static void dmfe_phy_write_1bit(void __iomem *, u32);
@@ -336,12 +333,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *);
static void dmfe_process_mode(struct dmfe_board_info *);
static void dmfe_timer(unsigned long);
static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
-static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
-static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
+static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
-static void dmfe_dynamic_reset(struct DEVICE *);
+static void dmfe_dynamic_reset(struct net_device *);
static void dmfe_free_rxbuffer(struct dmfe_board_info *);
-static void dmfe_init_dm910x(struct DEVICE *);
+static void dmfe_init_dm910x(struct net_device *);
static void dmfe_parse_srom(struct dmfe_board_info *);
static void dmfe_program_DM9801(struct dmfe_board_info *, int);
static void dmfe_program_DM9802(struct dmfe_board_info *);
@@ -558,7 +555,7 @@ static void dmfe_remove_one(struct pci_dev *pdev)
* The interface is opened whenever "ifconfig" actives it.
*/
-static int dmfe_open(struct DEVICE *dev)
+static int dmfe_open(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
const int irq = db->pdev->irq;
@@ -617,7 +614,7 @@ static int dmfe_open(struct DEVICE *dev)
* Enable Tx/Rx machine
*/
-static void dmfe_init_dm910x(struct DEVICE *dev)
+static void dmfe_init_dm910x(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -684,7 +681,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
*/
static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
- struct DEVICE *dev)
+ struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -728,7 +725,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
db->tx_packet_cnt++; /* Ready to send */
dw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
} else {
db->tx_queue_cnt++; /* queue TX packet */
dw32(DCR1, 0x1); /* Issue Tx polling */
@@ -754,7 +751,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
* The interface is stopped when it is brought.
*/
-static int dmfe_stop(struct DEVICE *dev)
+static int dmfe_stop(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -798,7 +795,7 @@ static int dmfe_stop(struct DEVICE *dev)
static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
{
- struct DEVICE *dev = dev_id;
+ struct net_device *dev = dev_id;
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
unsigned long flags;
@@ -879,7 +876,7 @@ static void poll_dmfe (struct net_device *dev)
* Free TX resource after TX complete
*/
-static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
{
struct tx_desc *txptr;
void __iomem *ioaddr = db->ioaddr;
@@ -934,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
db->tx_packet_cnt++; /* Ready to send */
db->tx_queue_cnt--;
dw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
}
/* Resource available check */
@@ -961,7 +958,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
* Receive the come packet and pass to upper layer
*/
-static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
{
struct rx_desc *rxptr;
struct sk_buff *skb, *newskb;
@@ -1052,7 +1049,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
* Set DM910X multicast address
*/
-static void dmfe_set_filter_mode(struct DEVICE * dev)
+static void dmfe_set_filter_mode(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
@@ -1545,7 +1542,7 @@ static void send_filter_frame(struct net_device *dev)
update_cr6(db->cr6_data | 0x2000, ioaddr);
dw32(DCR1, 0x1); /* Issue Tx polling */
update_cr6(db->cr6_data, ioaddr);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else
db->tx_queue_cnt++; /* Put in TX queue */
}
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 5364563c4..7bcccf5ca 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -44,7 +44,7 @@ void pnic_do_nway(struct net_device *dev)
tp->csr6 = new_csr6;
/* Restart Tx */
tulip_restart_rxtx(tp);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
}
}
@@ -70,7 +70,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
iowrite32(tp->csr6, ioaddr + CSR6);
iowrite32(0x30, ioaddr + CSR12);
iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
} else if (ioread32(ioaddr + CSR5) & TPLnkPass) {
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
@@ -147,7 +147,7 @@ void pnic_timer(unsigned long data)
tp->csr6 = new_csr6;
/* Restart Tx */
tulip_restart_rxtx(tp);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (tulip_debug > 1)
dev_info(&dev->dev,
"Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 94d0eebef..bbde90bc7 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
out_unlock:
spin_unlock_irqrestore (&tp->lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 447d09272..e750b5ddc 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -636,7 +636,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
db->tx_packet_cnt++; /* Ready to send */
uw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
}
/* Tx resource check */
@@ -1431,7 +1431,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
update_cr6(db->cr6_data | 0x2000, ioaddr);
uw32(DCR1, 0x1); /* Issue Tx polling */
update_cr6(db->cr6_data, ioaddr);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else
netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
}
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 3c0e4d5c5..1f62b9423 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -966,7 +966,7 @@ static void tx_timeout(struct net_device *dev)
enable_irq(irq);
netif_wake_queue(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
np->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index f92b6d948..78f144696 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -706,7 +706,7 @@ rio_tx_timeout (struct net_device *dev)
dev->name, dr32(TxStatus));
rio_free_tx(dev, 0);
dev->if_port = 0;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
static netdev_tx_t
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index a28a2e583..58c6338a8 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1011,7 +1011,7 @@ static void tx_timeout(struct net_device *dev)
dev->if_port = 0;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 536686476..ed98ef1ec 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4890,11 +4890,13 @@ static int be_resume(struct be_adapter *adapter)
if (status)
return status;
- if (netif_running(netdev)) {
+ rtnl_lock();
+ if (netif_running(netdev))
status = be_open(netdev);
- if (status)
- return status;
- }
+ rtnl_unlock();
+
+ if (status)
+ return status;
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 41b010645..4466a1187 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -860,6 +860,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int entry;
void *dest;
+ if (skb_put_padto(skb, ETHOC_ZLEN)) {
+ dev->stats.tx_errors++;
+ goto out_no_free;
+ }
+
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
dev->stats.tx_errors++;
goto out;
@@ -894,6 +899,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
out:
dev_kfree_skb(skb);
+out_no_free:
return NETDEV_TX_OK;
}
@@ -1086,7 +1092,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
if (netdev->mem_end) {
@@ -1095,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
} else {
/* Allocate buffer memory */
@@ -1106,7 +1112,7 @@ static int ethoc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
buffer_size);
ret = -ENOMEM;
- goto error;
+ goto free;
}
netdev->mem_end = netdev->mem_start + buffer_size;
priv->dma_alloc = buffer_size;
@@ -1120,7 +1126,7 @@ static int ethoc_probe(struct platform_device *pdev)
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
if (num_bd < 4) {
ret = -ENODEV;
- goto error;
+ goto free;
}
priv->num_bd = num_bd;
/* num_tx must be a power of two */
@@ -1133,7 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
- goto error;
+ goto free;
}
/* Allow the platform setup code to pass in a MAC address. */
@@ -1195,7 +1201,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
- goto free;
+ goto free2;
}
priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1214,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free;
+ goto free2;
}
ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1247,10 @@ error2:
error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
-free:
+free2:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+free:
free_netdev(netdev);
out:
return ret;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 06f031715..9b7a3f5a2 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -285,6 +285,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
usleep_range(10, 20);
+ ge_rst_value = 0;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 84384e158..e7cf313e3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -71,7 +71,6 @@ struct ftgmac100 {
struct napi_struct napi;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
int old_speed;
};
@@ -807,7 +806,7 @@ err:
static void ftgmac100_adjust_link(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = netdev->phydev;
int ier;
if (phydev->speed == priv->old_speed)
@@ -850,7 +849,6 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
return PTR_ERR(phydev);
}
- priv->phydev = phydev;
return 0;
}
@@ -939,27 +937,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static int ftgmac100_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int ftgmac100_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static const struct ethtool_ops ftgmac100_ethtool_ops = {
- .set_settings = ftgmac100_set_settings,
- .get_settings = ftgmac100_get_settings,
.get_drvinfo = ftgmac100_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/******************************************************************************
@@ -1085,7 +1067,7 @@ static int ftgmac100_open(struct net_device *netdev)
ftgmac100_init_hw(priv);
ftgmac100_start_hw(priv, 10);
- phy_start(priv->phydev);
+ phy_start(netdev->phydev);
napi_enable(&priv->napi);
netif_start_queue(netdev);
@@ -1111,7 +1093,7 @@ static int ftgmac100_stop(struct net_device *netdev)
netif_stop_queue(netdev);
napi_disable(&priv->napi);
- phy_stop(priv->phydev);
+ phy_stop(netdev->phydev);
ftgmac100_stop_hw(priv);
free_irq(priv->irq, netdev);
@@ -1152,9 +1134,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
/* optional */
static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- return phy_mii_ioctl(priv->phydev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
static const struct net_device_ops ftgmac100_netdev_ops = {
@@ -1275,7 +1255,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
return 0;
err_register_netdev:
- phy_disconnect(priv->phydev);
+ phy_disconnect(netdev->phydev);
err_mii_probe:
mdiobus_unregister(priv->mii_bus);
err_register_mdiobus:
@@ -1301,7 +1281,7 @@ static int __exit ftgmac100_remove(struct platform_device *pdev)
unregister_netdev(netdev);
- phy_disconnect(priv->phydev);
+ phy_disconnect(netdev->phydev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b1b9ebafb..c08bd7631 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1227,7 +1227,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev); /* or .._start_.. ?? */
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 195122e11..f58f9ea51 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -517,7 +517,6 @@ struct fec_enet_private {
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
int mii_timeout;
uint phy_speed;
phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2a03857cc..fea0f330d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -967,10 +967,10 @@ fec_restart(struct net_device *ndev)
rcntl &= ~(1 << 8);
/* 1G, 100M or 10M */
- if (fep->phy_dev) {
- if (fep->phy_dev->speed == SPEED_1000)
+ if (ndev->phydev) {
+ if (ndev->phydev->speed == SPEED_1000)
ecntl |= (1 << 5);
- else if (fep->phy_dev->speed == SPEED_100)
+ else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~(1 << 9);
else
rcntl |= (1 << 9);
@@ -991,7 +991,7 @@ fec_restart(struct net_device *ndev)
*/
cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
- if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+ if (ndev->phydev && ndev->phydev->speed == SPEED_10)
cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
@@ -1005,7 +1005,7 @@ fec_restart(struct net_device *ndev)
/* enable pause frame*/
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
- fep->phy_dev && fep->phy_dev->pause)) {
+ ndev->phydev && ndev->phydev->pause)) {
rcntl |= FEC_ENET_FCE;
/* set FIFO threshold parameter to reduce overrun */
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
- continue;
- }
+ if (!skb)
+ goto skb_done;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
-
+skb_done:
/* Make sure the update to bdp and tx_skbuff are performed
* before dirty_tx
*/
@@ -1685,7 +1683,7 @@ static void fec_get_mac(struct net_device *ndev)
static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phy_dev = fep->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int status_change = 0;
/* Prevent a state halted on mii error */
@@ -1885,8 +1883,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
int phy_id;
int dev_id = fep->dev_id;
- fep->phy_dev = NULL;
-
if (fep->phy_node) {
phy_dev = of_phy_connect(ndev, fep->phy_node,
&fec_enet_adjust_link, 0,
@@ -1934,7 +1930,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_dev->advertising = phy_dev->supported;
- fep->phy_dev = phy_dev;
fep->link = 0;
fep->full_duplex = 0;
@@ -2064,30 +2059,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
}
}
-static int fec_enet_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int fec_enet_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static void fec_enet_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -2220,7 +2191,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- if (!fep->phy_dev)
+ if (!ndev->phydev)
return -ENODEV;
if (pause->tx_pause != pause->rx_pause) {
@@ -2236,17 +2207,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
if (pause->rx_pause || pause->autoneg) {
- fep->phy_dev->supported |= ADVERTISED_Pause;
- fep->phy_dev->advertising |= ADVERTISED_Pause;
+ ndev->phydev->supported |= ADVERTISED_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Pause;
} else {
- fep->phy_dev->supported &= ~ADVERTISED_Pause;
- fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+ ndev->phydev->supported &= ~ADVERTISED_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Pause;
}
if (pause->autoneg) {
if (netif_running(ndev))
fec_stop(ndev);
- phy_start_aneg(fep->phy_dev);
+ phy_start_aneg(ndev->phydev);
}
if (netif_running(ndev)) {
napi_disable(&fep->napi);
@@ -2362,8 +2333,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
static int fec_enet_nway_reset(struct net_device *dev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = fep->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!phydev)
return -ENODEV;
@@ -2446,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
- pr_err("Rx coalesced frames exceed hardware limiation");
+ pr_err("Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
- pr_err("Tx coalesced frame exceed hardware limiation");
+ pr_err("Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
@@ -2568,8 +2538,6 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
- .get_settings = fec_enet_get_settings,
- .set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
.get_regs_len = fec_enet_get_regs_len,
.get_regs = fec_enet_get_regs,
@@ -2589,12 +2557,14 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.set_tunable = fec_enet_set_tunable,
.get_wol = fec_enet_get_wol,
.set_wol = fec_enet_set_wol,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -2849,7 +2819,7 @@ fec_enet_open(struct net_device *ndev)
goto err_enet_mii_probe;
napi_enable(&fep->napi);
- phy_start(fep->phy_dev);
+ phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
@@ -2873,7 +2843,7 @@ fec_enet_close(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- phy_stop(fep->phy_dev);
+ phy_stop(ndev->phydev);
if (netif_device_present(ndev)) {
napi_disable(&fep->napi);
@@ -2881,8 +2851,7 @@ fec_enet_close(struct net_device *ndev)
fec_stop(ndev);
}
- phy_disconnect(fep->phy_dev);
- fep->phy_dev = NULL;
+ phy_disconnect(ndev->phydev);
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
@@ -3510,7 +3479,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
if (netif_running(ndev)) {
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
- phy_stop(fep->phy_dev);
+ phy_stop(ndev->phydev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
netif_device_detach(ndev);
@@ -3570,7 +3539,7 @@ static int __maybe_unused fec_resume(struct device *dev)
netif_device_attach(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
- phy_start(fep->phy_dev);
+ phy_start(ndev->phydev);
}
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 25553ee85..446ae9d60 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -66,7 +66,6 @@ struct mpc52xx_fec_priv {
/* MDIO link details */
unsigned int mdio_speed;
struct device_node *phy_node;
- struct phy_device *phydev;
enum phy_state link;
int seven_wire_mode;
};
@@ -165,7 +164,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
static void mpc52xx_fec_adjust_link(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
if (phydev->link != PHY_DOWN) {
@@ -215,16 +214,17 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
static int mpc52xx_fec_open(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
int err = -EBUSY;
if (priv->phy_node) {
- priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
- mpc52xx_fec_adjust_link, 0, 0);
- if (!priv->phydev) {
+ phydev = of_phy_connect(priv->ndev, priv->phy_node,
+ mpc52xx_fec_adjust_link, 0, 0);
+ if (!phydev) {
dev_err(&dev->dev, "of_phy_connect failed\n");
return -ENODEV;
}
- phy_start(priv->phydev);
+ phy_start(phydev);
}
if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
@@ -268,10 +268,9 @@ static int mpc52xx_fec_open(struct net_device *dev)
free_ctrl_irq:
free_irq(dev->irq, dev);
free_phy:
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (phydev) {
+ phy_stop(phydev);
+ phy_disconnect(phydev);
}
return err;
@@ -280,6 +279,7 @@ static int mpc52xx_fec_open(struct net_device *dev)
static int mpc52xx_fec_close(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
netif_stop_queue(dev);
@@ -291,11 +291,10 @@ static int mpc52xx_fec_close(struct net_device *dev)
free_irq(priv->r_irq, dev);
free_irq(priv->t_irq, dev);
- if (priv->phydev) {
+ if (phydev) {
/* power down phy */
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_stop(phydev);
+ phy_disconnect(phydev);
}
return 0;
@@ -763,26 +762,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
/* ethtool interface */
-static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
@@ -796,23 +775,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
}
static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
- .get_settings = mpc52xx_fec_get_settings,
- .set_settings = mpc52xx_fec_set_settings,
.get_link = ethtool_op_get_link,
.get_msglevel = mpc52xx_fec_get_msglevel,
.set_msglevel = mpc52xx_fec_set_msglevel,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- if (!priv->phydev)
+ if (!phydev)
return -ENOTSUPP;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(phydev, rq, cmd);
}
static const struct net_device_ops mpc52xx_fec_netdev_ops = {
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ea83712a6..1de2e1e51 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -615,7 +615,7 @@ struct fman {
struct fman_cfg *cfg;
struct muram_info *muram;
/* cam section in muram */
- int cam_offset;
+ unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
int fifo_offset;
@@ -2772,7 +2772,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n",
+ dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
__func__);
goto fman_node_put;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 4eb0e9ac7..47394c45b 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -129,7 +129,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
*
* Return: address of the allocated memory; NULL otherwise.
*/
-int fman_muram_alloc(struct muram_info *muram, size_t size)
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
{
unsigned long vaddr;
@@ -150,7 +150,7 @@ int fman_muram_alloc(struct muram_info *muram, size_t size)
*
* Free an allocated memory from FM-MURAM partition.
*/
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
{
unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index dbf0af9e5..889649ad8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -44,8 +44,8 @@ struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
unsigned long offset);
-int fman_muram_alloc(struct muram_info *muram, size_t size);
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 48a9c176e..61fd486c5 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -652,13 +652,13 @@ static void fs_timeout(struct net_device *dev)
spin_lock_irqsave(&fep->lock, flags);
if (dev->flags & IFF_UP) {
- phy_stop(fep->phydev);
+ phy_stop(dev->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
}
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
@@ -672,7 +672,7 @@ static void fs_timeout(struct net_device *dev)
static void generic_adjust_link(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = fep->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
if (phydev->link) {
@@ -741,8 +741,6 @@ static int fs_init_phy(struct net_device *dev)
return -ENODEV;
}
- fep->phydev = phydev;
-
return 0;
}
@@ -776,7 +774,7 @@ static int fs_enet_open(struct net_device *dev)
napi_disable(&fep->napi_tx);
return err;
}
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -792,7 +790,7 @@ static int fs_enet_close(struct net_device *dev)
netif_carrier_off(dev);
napi_disable(&fep->napi);
napi_disable(&fep->napi_tx);
- phy_stop(fep->phydev);
+ phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
spin_lock(&fep->tx_lock);
@@ -801,8 +799,7 @@ static int fs_enet_close(struct net_device *dev)
spin_unlock_irqrestore(&fep->lock, flags);
/* release any irqs */
- phy_disconnect(fep->phydev);
- fep->phydev = NULL;
+ phy_disconnect(dev->phydev);
free_irq(fep->interrupt, dev);
return 0;
@@ -847,26 +844,6 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 0;
}
-static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (!fep->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(fep->phydev, cmd);
-}
-
-static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (!fep->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(fep->phydev, cmd);
-}
-
static int fs_nway_reset(struct net_device *dev)
{
return 0;
@@ -887,24 +864,22 @@ static void fs_set_msglevel(struct net_device *dev, u32 value)
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
- .get_settings = fs_get_settings,
- .set_settings = fs_set_settings,
.nway_reset = fs_nway_reset,
.get_link = ethtool_op_get_link,
.get_msglevel = fs_get_msglevel,
.set_msglevel = fs_set_msglevel,
.get_regs = fs_get_regs,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct fs_enet_private *fep = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- return phy_mii_ioctl(fep->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
extern int fs_mii_connect(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index f184d8f95..e29f54a35 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -149,7 +149,6 @@ struct fs_enet_private {
unsigned int last_mii_status;
int interrupt;
- struct phy_device *phydev;
int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 1ba359f17..d71761a34 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -370,7 +370,7 @@ static void restart(struct net_device *dev)
/* adjust to speed (for RMII mode) */
if (fpi->use_rmii) {
- if (fep->phydev->speed == 100)
+ if (dev->phydev->speed == 100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
@@ -396,7 +396,7 @@ static void restart(struct net_device *dev)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
- if (fep->phydev->duplex)
+ if (dev->phydev->duplex)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index bade2f8f9..35a318ed3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -254,7 +254,7 @@ static void restart(struct net_device *dev)
int r;
u32 addrhi, addrlo;
- struct mii_bus *mii = fep->phydev->mdio.bus;
+ struct mii_bus *mii = dev->phydev->mdio.bus;
struct fec_info* fec_inf = mii->priv;
r = whack_reset(fep->fec.fecp);
@@ -333,7 +333,7 @@ static void restart(struct net_device *dev)
/*
* adjust to duplex mode
*/
- if (fep->phydev->duplex) {
+ if (dev->phydev->duplex) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
@@ -363,7 +363,7 @@ static void stop(struct net_device *dev)
const struct fs_platform_info *fpi = fep->fpi;
struct fec __iomem *fecp = fep->fec.fecp;
- struct fec_info *feci = fep->phydev->mdio.bus->priv;
+ struct fec_info *feci = dev->phydev->mdio.bus->priv;
int i;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 7a184e881..e8b9c33d3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -352,7 +352,7 @@ static void restart(struct net_device *dev)
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
- if (fep->phydev->duplex)
+ if (dev->phydev->duplex)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
/* Restore multicast and promiscuous settings */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2f917af5..2e6785b6e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -999,7 +999,7 @@ static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -1009,10 +1009,10 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd == SIOCGHWTSTAMP)
return gfar_hwtstamp_get(dev, rq);
- if (!priv->phydev)
+ if (!phydev)
return -ENODEV;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(phydev, rq, cmd);
}
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
@@ -1635,7 +1635,7 @@ static int gfar_suspend(struct device *dev)
gfar_start_wol_filer(priv);
} else {
- phy_stop(priv->phydev);
+ phy_stop(ndev->phydev);
}
return 0;
@@ -1664,7 +1664,7 @@ static int gfar_resume(struct device *dev)
gfar_filer_restore_table(priv);
} else {
- phy_start(priv->phydev);
+ phy_start(ndev->phydev);
}
gfar_start(priv);
@@ -1698,8 +1698,8 @@ static int gfar_restore(struct device *dev)
priv->oldspeed = 0;
priv->oldduplex = -1;
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
netif_device_attach(ndev);
enable_napi(priv);
@@ -1778,6 +1778,7 @@ static int init_phy(struct net_device *dev)
priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
+ struct phy_device *phydev;
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -1785,9 +1786,9 @@ static int init_phy(struct net_device *dev)
interface = gfar_get_interface(dev);
- priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
- interface);
- if (!priv->phydev) {
+ phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+ interface);
+ if (!phydev) {
dev_err(&dev->dev, "could not attach to PHY\n");
return -ENODEV;
}
@@ -1796,11 +1797,11 @@ static int init_phy(struct net_device *dev)
gfar_configure_serdes(dev);
/* Remove any features not supported by the controller */
- priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
- priv->phydev->advertising = priv->phydev->supported;
+ phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+ phydev->advertising = phydev->supported;
/* Add support for flow control, but don't advertise it by default */
- priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
return 0;
}
@@ -1944,7 +1945,7 @@ void stop_gfar(struct net_device *dev)
/* disable ints and gracefully shut down Rx/Tx DMA */
gfar_halt(priv);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
free_skb_resources(priv);
}
@@ -2076,7 +2077,7 @@ void gfar_start(struct gfar_private *priv)
gfar_ints_enable(priv);
- priv->ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(priv->ndev); /* prevent tx timeout */
}
static void free_grp_irqs(struct gfar_priv_grp *grp)
@@ -2204,7 +2205,7 @@ int startup_gfar(struct net_device *ndev)
priv->oldspeed = 0;
priv->oldduplex = -1;
- phy_start(priv->phydev);
+ phy_start(ndev->phydev);
enable_napi(priv);
@@ -2439,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_ring_size);
if (likely(!nr_frags)) {
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (likely(!do_tstamp))
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;
@@ -2572,8 +2574,7 @@ static int gfar_close(struct net_device *dev)
stop_gfar(dev);
/* Disconnect from the PHY */
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_disconnect(dev->phydev);
gfar_free_irq(priv);
@@ -3379,7 +3380,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
if (unlikely(phydev->link != priv->oldlink ||
(phydev->link && (phydev->duplex != priv->oldduplex ||
@@ -3620,7 +3621,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
{
- struct phy_device *phydev = priv->phydev;
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
u32 val = 0;
if (!phydev->duplex)
@@ -3660,7 +3662,8 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
static noinline void gfar_update_link_state(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- struct phy_device *phydev = priv->phydev;
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index cb7766797..373fd094f 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1153,7 +1153,6 @@ struct gfar_private {
phy_interface_t interface;
struct device_node *phy_node;
struct device_node *tbi_node;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
int oldspeed;
int oldduplex;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 4b0ee855e..56588f2e1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -184,40 +184,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
}
-
-static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (NULL == phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
-
-/* Return the current settings in the ethtool_cmd structure */
-static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
- struct gfar_priv_rx_q *rx_queue = NULL;
- struct gfar_priv_tx_q *tx_queue = NULL;
-
- if (NULL == phydev)
- return -ENODEV;
- tx_queue = priv->tx_queue[0];
- rx_queue = priv->rx_queue[0];
-
- /* etsec-1.7 and older versions have only one txic
- * and rxic regs although they support multiple queues */
- cmd->maxtxpkt = get_icft_value(tx_queue->txic);
- cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
/* Return the length of the register structure */
static int gfar_reglen(struct net_device *dev)
{
@@ -242,10 +208,12 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
unsigned int usecs)
{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
unsigned int count;
/* The timer is different, depending on the interface speed */
- switch (priv->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_1000:
count = GFAR_GBIT_TIME;
break;
@@ -267,10 +235,12 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
unsigned int ticks)
{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
unsigned int count;
/* The timer is different, depending on the interface speed */
- switch (priv->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_1000:
count = GFAR_GBIT_TIME;
break;
@@ -304,7 +274,7 @@ static int gfar_gcoalesce(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- if (NULL == priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
rx_queue = priv->rx_queue[0];
@@ -365,7 +335,7 @@ static int gfar_scoalesce(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- if (NULL == priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
/* Check the bounds of the values */
@@ -529,7 +499,7 @@ static int gfar_spauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 oldadv, newadv;
@@ -1565,8 +1535,6 @@ static int gfar_get_ts_info(struct net_device *dev,
}
const struct ethtool_ops gfar_ethtool_ops = {
- .get_settings = gfar_gsettings,
- .set_settings = gfar_ssettings,
.get_drvinfo = gfar_gdrvinfo,
.get_regs_len = gfar_reglen,
.get_regs = gfar_get_regs,
@@ -1589,4 +1557,6 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_rxnfc = gfar_set_nfc,
.get_rxnfc = gfar_get_nfc,
.get_ts_info = gfar_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 89714f5e0..812a968a7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -105,23 +105,20 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
static int
-uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
- struct ucc_geth_info *ug_info = ugeth->ug_info;
if (!phydev)
return -ENODEV;
- ecmd->maxtxpkt = 1;
- ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
-
- return phy_ethtool_gset(phydev, ecmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
static int
-uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_set_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
@@ -129,7 +126,7 @@ uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, ecmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
static void
@@ -392,8 +389,6 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
#endif /* CONFIG_PM */
static const struct ethtool_ops uec_ethtool_ops = {
- .get_settings = uec_get_settings,
- .set_settings = uec_set_settings,
.get_drvinfo = uec_get_drvinfo,
.get_regs_len = uec_get_regs_len,
.get_regs = uec_get_regs,
@@ -411,6 +406,8 @@ static const struct ethtool_ops uec_ethtool_ops = {
.get_wol = uec_get_wol,
.set_wol = uec_set_wol,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = uec_get_ksettings,
+ .set_link_ksettings = uec_set_ksettings,
};
void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 678f5018d..399cfd217 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -746,7 +746,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
lp->sent = lp->tx_queue ;
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else {
lp->tx_started = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e51892d51..b9f2ea593 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -636,7 +636,7 @@ static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
pos = dma_ring_incr(pos, TX_DESC_NUM);
writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
netdev_sent_queue(dev, skb->len);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index a1cb461ac..7a757e88c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -29,25 +29,6 @@ static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
return vf_cb->mac_cb;
}
-/**
- * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
- * @port_id: enet port id
- *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
- * return: dsaf port id
- *: service ports 0 - 5, debug port 6-7
- **/
-static int hns_ae_map_eport_to_dport(u32 port_id)
-{
- int port_index;
-
- if (port_id < DSAF_DEBUG_NW_NUM)
- port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
- else
- port_index = port_id - DSAF_DEBUG_NW_NUM;
-
- return port_index;
-}
-
static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
{
return container_of(dev, struct dsaf_device, ae_dev);
@@ -56,50 +37,35 @@ static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
{
int ppe_index;
- int ppe_common_index;
struct ppe_common_cb *ppe_comm;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
- if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
- ppe_index = vf_cb->port_index;
- ppe_common_index = 0;
- } else {
- ppe_index = 0;
- ppe_common_index =
- vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
- }
- ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+ ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
+ ppe_index = vf_cb->port_index;
+
return &ppe_comm->ppe_cb[ppe_index];
}
static int hns_ae_get_q_num_per_vf(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
- return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+ return dsaf_dev->rcb_common[0]->max_q_per_vf;
}
static int hns_ae_get_vf_num_per_port(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
- return dsaf_dev->rcb_common[common_idx]->max_vfn;
+ return dsaf_dev->rcb_common[0]->max_vfn;
}
static struct ring_pair_cb *hns_ae_get_base_ring_pair(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
int q_num = rcb_comm->max_q_per_vf;
int vf_num = rcb_comm->max_vfn;
- if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
- return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
- else
- return &rcb_comm->ring_pair_cb[0];
+ return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
}
static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
@@ -110,7 +76,6 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
u32 port_id)
{
- int port_idx;
int vfnum_per_port;
int qnum_per_vf;
int i;
@@ -120,11 +85,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
struct hnae_vf_cb *vf_cb;
dsaf_dev = hns_ae_get_dsaf_dev(dev);
- port_idx = hns_ae_map_eport_to_dport(port_id);
- ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
- vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
- qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+ ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
+ vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
+ qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
vf_cb = kzalloc(sizeof(*vf_cb) +
qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
@@ -163,14 +127,14 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
}
vf_cb->dsaf_dev = dsaf_dev;
- vf_cb->port_index = port_idx;
- vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+ vf_cb->port_index = port_id;
+ vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
ae_handle->phy_if = vf_cb->mac_cb->phy_if;
ae_handle->phy_node = vf_cb->mac_cb->phy_node;
ae_handle->if_support = vf_cb->mac_cb->if_support;
ae_handle->port_type = vf_cb->mac_cb->mac_type;
- ae_handle->dport_id = port_idx;
+ ae_handle->dport_id = port_id;
return ae_handle;
vf_id_err:
@@ -320,11 +284,8 @@ static void hns_ae_reset(struct hnae_handle *handle)
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
- u8 ppe_common_index =
- vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
-
hns_mac_reset(vf_cb->mac_cb);
- hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+ hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
}
}
@@ -399,11 +360,16 @@ static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
static void hns_ae_get_pauseparam(struct hnae_handle *handle,
u32 *auto_neg, u32 *rx_en, u32 *tx_en)
{
- assert(handle);
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+
+ hns_mac_get_autoneg(mac_cb, auto_neg);
- hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
+ hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
- hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
}
static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
@@ -436,12 +402,21 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
u32 autoneg, u32 rx_en, u32 tx_en)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
int ret;
ret = hns_mac_set_autoneg(mac_cb, autoneg);
if (ret)
return ret;
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE) {
+ ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
+ mac_cb->mac_id, rx_en);
+ if (ret)
+ return ret;
+ rx_en = 0;
+ }
return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
}
@@ -689,7 +664,7 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
assert(handle);
mac_cb = hns_get_mac_cb(handle);
- if (!mac_cb->cpld_vaddr)
+ if (!mac_cb->cpld_ctrl)
return;
hns_set_led_opt(mac_cb);
}
@@ -709,7 +684,6 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
void hns_ae_get_regs(struct hnae_handle *handle, void *data)
{
u32 *p = data;
- u32 rcb_com_idx;
int i;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
@@ -717,8 +691,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data)
hns_ppe_get_regs(ppe_cb, p);
p += hns_ppe_get_regs_count();
- rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
- hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+ hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
p += hns_rcb_get_common_regs_count();
for (i = 0; i < handle->q_num; i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a38084a22..611581fcc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -7,18 +7,19 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
-#include "hns_dsaf_misc.h"
#include "hns_dsaf_main.h"
+#include "hns_dsaf_misc.h"
#include "hns_dsaf_rcb.h"
#define MAC_EN_FLAG_V 0xada0328
@@ -81,17 +82,6 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
}
}
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
-{
- if (!mac_cb->cpld_vaddr)
- return -ENODEV;
-
- *sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr
- + MAC_SFP_PORT_OFFSET);
-
- return 0;
-}
-
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
{
struct mac_driver *mac_ctrl_drv;
@@ -168,10 +158,9 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
u8 vmid, u8 *port_num)
{
u8 tmp_port;
- u32 comm_idx;
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
- if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+ if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
dev_err(mac_cb->dev,
"input invalid,%s mac%d vmid%d !\n",
mac_cb->dsaf_dev->ae_dev.name,
@@ -179,7 +168,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL;
}
} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
- if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
+ if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
dev_err(mac_cb->dev,
"input invalid,%s mac%d vmid%d!\n",
mac_cb->dsaf_dev->ae_dev.name,
@@ -192,9 +181,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL;
}
- comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
-
- if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+ if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
return -EINVAL;
@@ -234,7 +221,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
}
/**
- *hns_mac_get_inner_port_num - change vf mac address
+ *hns_mac_change_vf_addr - change vf mac address
*@mac_cb: mac device
*@vmid: vmid
*@addr:mac address
@@ -249,7 +236,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
struct mac_entry_idx *old_entry;
old_entry = &mac_cb->addr_entry_idx[vmid];
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = old_entry->vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -289,7 +276,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
struct dsaf_drv_mac_single_dest_entry mac_entry;
- if (dsaf_dev && addr) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = 0;/*vlan_id;*/
mac_entry.in_port_num = mac_cb->mac_id;
@@ -380,7 +367,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
if (mac_cb->mac_type == HNAE_PORT_DEBUG)
return 0;
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -418,7 +405,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -439,9 +426,8 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
void hns_mac_reset(struct hns_mac_cb *mac_cb)
{
- struct mac_driver *drv;
-
- drv = hns_mac_get_drv(mac_cb);
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
drv->mac_init(drv);
@@ -456,7 +442,7 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb)
if (drv->mac_pausefrm_cfg) {
if (mac_cb->mac_type == HNAE_PORT_DEBUG)
- drv->mac_pausefrm_cfg(drv, 0, 0);
+ drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1);
else /* mac rx must disable, dsaf pfc close instead of it*/
drv->mac_pausefrm_cfg(drv, 0, 1);
}
@@ -561,14 +547,6 @@ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
*rx_en = 0;
*tx_en = 0;
}
-
- /* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
- * We set the rx pause frm always be true (1), because DSAF deals with
- * the rx pause frm instead of service mac. After all, we still support
- * rx pause frm.
- */
- if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- *rx_en = 1;
}
/**
@@ -602,20 +580,13 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
- if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
- if (!rx_en) {
- dev_err(mac_cb->dev, "disable rx_pause is not allowed!");
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ if (is_ver1 && (tx_en || rx_en)) {
+ dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!");
return -EINVAL;
}
- } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
- if (tx_en || rx_en) {
- dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
- return -EINVAL;
- }
- } else {
- dev_err(mac_cb->dev, "Unsupport this operation!");
- return -EINVAL;
}
if (mac_ctrl_drv->mac_pausefrm_cfg)
@@ -667,14 +638,18 @@ free_mac_drv:
}
/**
- *mac_free_dev - get mac information from device node
+ *hns_mac_get_info - get mac information from device node
*@mac_cb: mac device
*@np:device node
- *@mac_mode_idx:mac mode index
+ * return: 0 --success, negative --fail
*/
-static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
- struct device_node *np, u32 mac_mode_idx)
+static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
{
+ struct device_node *np = mac_cb->dev->of_node;
+ struct regmap *syscon;
+ struct of_phandle_args cpld_args;
+ u32 ret;
+
mac_cb->link = false;
mac_cb->half_duplex = false;
mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
@@ -690,12 +665,73 @@ static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
mac_cb->max_frm = MAC_DEFAULT_MTU;
mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+ mac_cb->port_rst_off = mac_cb->mac_id;
+ mac_cb->port_mode_off = 0;
- /* Get the rest of the PHY information */
- mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
+ /* if the dsaf node doesn't contain a port subnode, get phy-handle
+ * from dsaf node
+ */
+ if (!mac_cb->fw_port) {
+ mac_cb->phy_node = of_parse_phandle(np, "phy-handle",
+ mac_cb->mac_id);
+ if (mac_cb->phy_node)
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+ mac_cb->mac_id, mac_cb->phy_node->name);
+ return 0;
+ }
+ if (!is_of_node(mac_cb->fw_port))
+ return -EINVAL;
+ /* parse property from port subnode in dsaf */
+ mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "phy-handle", 0);
if (mac_cb->phy_node)
dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
mac_cb->mac_id, mac_cb->phy_node->name);
+ syscon = syscon_node_to_regmap(
+ of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "serdes-syscon", 0));
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+ return -EINVAL;
+ }
+ mac_cb->serdes_ctrl = syscon;
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-rst-offset",
+ &mac_cb->port_rst_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-rst-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-mode-offset",
+ &mac_cb->port_mode_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-mode-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port),
+ "cpld-syscon", 1, 0, &cpld_args);
+ if (ret) {
+ dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+ mac_cb->mac_id);
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ syscon = syscon_node_to_regmap(cpld_args.np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ mac_cb->cpld_ctrl = syscon;
+ mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ }
+ }
+
+ return 0;
}
/**
@@ -725,40 +761,31 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
return base + 0x40000 + mac_id * 0x4000 -
mac_mode_idx * 0x20000;
else
- return mac_cb->serdes_vaddr + 0x1000
- + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+ return dsaf_dev->ppe_base + 0x1000;
}
/**
* hns_mac_get_cfg - get mac cfg from dtb or acpi table
* @dsaf_dev: dsa fabric device struct pointer
- * @mac_idx: mac index
- * retuen 0 - success , negative --fail
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
*/
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
{
int ret;
u32 mac_mode_idx;
- struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
mac_cb->dsaf_dev = dsaf_dev;
mac_cb->dev = dsaf_dev->dev;
- mac_cb->mac_id = mac_idx;
mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
mac_cb->serdes_vaddr = dsaf_dev->sds_base;
- if (dsaf_dev->cpld_base &&
- mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
- mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
- mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
- cpld_led_reset(mac_cb);
- }
mac_cb->sfp_prsnt = 0;
mac_cb->txpkt_for_led = 0;
mac_cb->rxpkt_for_led = 0;
- if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
mac_cb->mac_type = HNAE_PORT_SERVICE;
else
mac_cb->mac_type = HNAE_PORT_DEBUG;
@@ -774,53 +801,100 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
}
mac_mode_idx = (u32)ret;
- hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+ ret = hns_mac_get_info(mac_cb);
+ if (ret)
+ return ret;
+ cpld_led_reset(mac_cb);
mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
return 0;
}
+static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
+{
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 1;
+ else
+ return DSAF_MAX_PORT_NUM;
+}
+
/**
* hns_mac_init - init mac
* @dsaf_dev: dsa fabric device struct pointer
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
int hns_mac_init(struct dsaf_device *dsaf_dev)
{
- int i;
+ bool found = false;
int ret;
- size_t size;
+ u32 port_id;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
struct hns_mac_cb *mac_cb;
+ struct fwnode_handle *child;
- size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
- dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
- if (!dsaf_dev->mac_cb)
- return -ENOMEM;
+ device_for_each_child_node(dsaf_dev->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &port_id);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "get reg fail, ret=%d!\n", ret);
+ return ret;
+ }
+ if (port_id >= max_port_num) {
+ dev_err(dsaf_dev->dev,
+ "reg(%u) out of range!\n", port_id);
+ return -EINVAL;
+ }
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+ mac_cb->fw_port = child;
+ mac_cb->mac_id = (u8)port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ found = true;
+ }
- for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
- ret = hns_mac_get_cfg(dsaf_dev, i);
- if (ret)
- goto free_mac_cb;
+ /* if don't get any port subnode from dsaf node
+ * will init all port then, this is compatible with the old dts
+ */
+ if (!found) {
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+
+ mac_cb->mac_id = port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ }
+ }
+ /* init mac_cb for all port */
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = dsaf_dev->mac_cb[port_id];
+ if (!mac_cb)
+ continue;
- mac_cb = &dsaf_dev->mac_cb[i];
+ ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
+ if (ret)
+ return ret;
ret = hns_mac_init_ex(mac_cb);
if (ret)
- goto free_mac_cb;
+ return ret;
}
return 0;
-
-free_mac_cb:
- dsaf_dev->mac_cb = NULL;
-
- return ret;
}
void hns_mac_uninit(struct dsaf_device *dsaf_dev)
{
- cpld_led_reset(dsaf_dev->mac_cb);
- dsaf_dev->mac_cb = NULL;
+ int i;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+
+ for (i = 0; i < max_port_num; i++) {
+ cpld_led_reset(dsaf_dev->mac_cb[i]);
+ dsaf_dev->mac_cb[i] = NULL;
+ }
}
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
@@ -908,7 +982,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
- if (!mac_cb || !mac_cb->cpld_vaddr)
+ if (!mac_cb || !mac_cb->cpld_ctrl)
return 0;
return cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 823b6e78c..97ce9a750 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -10,9 +10,10 @@
#ifndef _HNS_DSAF_MAC_H
#define _HNS_DSAF_MAC_H
-#include <linux/phy.h>
-#include <linux/kernel.h>
#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
#include "hns_dsaf_main.h"
struct dsaf_device;
@@ -310,10 +311,15 @@ struct hns_mac_cb {
struct device *dev;
struct dsaf_device *dsaf_dev;
struct mac_priv priv;
+ struct fwnode_handle *fw_port;
u8 __iomem *vaddr;
- u8 __iomem *cpld_vaddr;
u8 __iomem *sys_ctl_vaddr;
u8 __iomem *serdes_vaddr;
+ struct regmap *serdes_ctrl;
+ struct regmap *cpld_ctrl;
+ u32 cpld_ctrl_reg;
+ u32 port_rst_off;
+ u32 port_mode_off;
struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
u8 sfp_prsnt;
u8 cpld_led_value;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 5978a5c8e..1c2ddb25e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -7,27 +7,29 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
+#include "hns_dsaf_mac.h"
#include "hns_dsaf_main.h"
-#include "hns_dsaf_rcb.h"
#include "hns_dsaf_ppe.h"
-#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+ [DSAF_MODE_DISABLE_SP] = "single-port",
};
int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
@@ -35,8 +37,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
int ret, i;
u32 desc_num;
u32 buf_size;
+ u32 reset_offset = 0;
+ u32 res_idx = 0;
const char *mode_str;
+ struct regmap *syscon;
+ struct resource *res;
struct device_node *np = dsaf_dev->dev->of_node;
+ struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
dsaf_dev->dsaf_ver = AE_VERSION_1;
@@ -73,42 +80,68 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
else
dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
- dsaf_dev->sc_base = of_iomap(np, 0);
- if (!dsaf_dev->sc_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
- }
+ syscon = syscon_node_to_regmap(
+ of_parse_phandle(np, "subctrl-syscon", 0));
+ if (IS_ERR_OR_NULL(syscon)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+ return -ENOMEM;
+ }
+ dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsaf_dev->sc_base) {
+ dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+ return -ENOMEM;
+ }
- dsaf_dev->sds_base = of_iomap(np, 1);
- if (!dsaf_dev->sds_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+ return -ENOMEM;
+ }
+ dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsaf_dev->sds_base) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+ return -ENOMEM;
+ }
+ } else {
+ dsaf_dev->sub_ctrl = syscon;
}
- dsaf_dev->ppe_base = of_iomap(np, 2);
- if (!dsaf_dev->ppe_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "ppe-base info is needed!\n");
+ return -ENOMEM;
+ }
}
-
- dsaf_dev->io_base = of_iomap(np, 3);
- if (!dsaf_dev->io_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsaf_dev->ppe_base) {
+ dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
+ return -ENOMEM;
+ }
+ dsaf_dev->ppe_paddr = res->start;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dsaf-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx);
+ if (!res) {
+ dev_err(dsaf_dev->dev,
+ "dsaf-base info is needed!\n");
+ return -ENOMEM;
+ }
+ }
+ dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsaf_dev->io_base) {
+ dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
+ return -ENOMEM;
+ }
}
- dsaf_dev->cpld_base = of_iomap(np, 4);
- if (!dsaf_dev->cpld_base)
- dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
-
ret = of_property_read_u32(np, "desc-num", &desc_num);
if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
desc_num > HNS_DSAF_MAX_DESC_CNT) {
@@ -118,6 +151,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
dsaf_dev->desc_num = desc_num;
+ ret = of_property_read_u32(np, "reset-field-offset", &reset_offset);
+ if (ret < 0) {
+ dev_dbg(dsaf_dev->dev,
+ "get reset-field-offset fail, ret=%d!\r\n", ret);
+ }
+ dsaf_dev->reset_offset = reset_offset;
+
ret = of_property_read_u32(np, "buf-size", &buf_size);
if (ret < 0) {
dev_err(dsaf_dev->dev,
@@ -149,8 +189,6 @@ unmap_base_addr:
iounmap(dsaf_dev->sds_base);
if (dsaf_dev->sc_base)
iounmap(dsaf_dev->sc_base);
- if (dsaf_dev->cpld_base)
- iounmap(dsaf_dev->cpld_base);
return ret;
}
@@ -167,9 +205,6 @@ static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
if (dsaf_dev->sc_base)
iounmap(dsaf_dev->sc_base);
-
- if (dsaf_dev->cpld_base)
- iounmap(dsaf_dev->cpld_base);
}
/**
@@ -217,9 +252,7 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
u32 q_id, q_num_per_port;
u32 i;
- hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
- HNS_DSAF_COMM_SERVICE_NW_IDX,
- &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
q_num_per_port = max_vfn * max_q_per_vf;
for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
@@ -239,9 +272,7 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
if (AE_IS_VER1(dsaf_dev->dsaf_ver))
return;
- hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
- HNS_DSAF_COMM_SERVICE_NW_IDX,
- &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
q_num_per_port = max_vfn * max_q_per_vf;
for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
@@ -712,13 +743,15 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul(
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
{
- dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG,
+ DSAF_CFG_MIX_MODE_S, !!en);
}
void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
{
if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
- dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
+ dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
return;
dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
@@ -1022,12 +1055,52 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
* @mac_cb: mac contrl block
*/
static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
- int mac_id, int en)
+ int mac_id, int tc_en)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en);
+}
+
+static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev,
+ int mac_id, int tx_en, int rx_en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!tx_en || !rx_en)
+ dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n");
+
+ return;
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_RX_EN_B, !!rx_en);
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_TX_EN_B, !!tx_en);
+}
+
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en)
{
- if (!en)
- dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0);
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!en)
+ dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
+
+ return -EINVAL;
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B, !!en);
+
+ return 0;
+}
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ *en = 1;
else
- dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff);
+ *en = dsaf_get_dev_bit(dsaf_dev,
+ DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B);
}
/**
@@ -1039,6 +1112,7 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
{
u32 i;
u32 o_dsaf_cfg;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
@@ -1064,8 +1138,10 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
/*set dsaf pfc to 0 for parseing rx pause*/
- for (i = 0; i < DSAF_COMM_CHN; i++)
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+ hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1);
+ }
/*msk and clr exception irqs */
for (i = 0; i < DSAF_COMM_CHN; i++) {
@@ -1264,6 +1340,9 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
u32 i;
int ret;
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 0;
+
ret = hns_dsaf_init_hw(dsaf_dev);
if (ret)
return ret;
@@ -2013,6 +2092,8 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
{
struct dsaf_hw_stats *hw_stats
= &dsaf_dev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ u32 reg_tmp;
hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
@@ -2022,8 +2103,12 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
- hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev,
- DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ hw_stats->rx_pause_frame +=
+ dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num);
+
hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
@@ -2056,6 +2141,8 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
u32 i = 0;
u32 j;
u32 *p = data;
+ u32 reg_tmp;
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
/* dsaf common registers */
p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
@@ -2120,8 +2207,9 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
p[190 + i] = dsaf_read_dev(ddev,
DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
- p[193 + i] = dsaf_read_dev(ddev,
- DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80);
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80);
p[196 + i] = dsaf_read_dev(ddev,
DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
p[199 + i] = dsaf_read_dev(ddev,
@@ -2368,8 +2456,11 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+ if (!is_ver1)
+ p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+
/* mark end of dsaf regs */
- for (i = 498; i < 504; i++)
+ for (i = 499; i < 504; i++)
p[i] = 0xdddddddd;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 5fea226ef..f0502ba0a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -41,6 +41,7 @@ struct hns_mac_cb;
#define DSAF_STATIC_NUM 28
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
enum hal_dsaf_mode {
HRD_DSAF_NO_DSAF_MODE = 0x0,
@@ -117,6 +118,7 @@ enum dsaf_mode {
DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */
DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/
+ DSAF_MODE_DISABLE_SP, /* <non-dsaf, single port mode */
DSAF_MODE_DISABLE_FIX, /**< non-dasf, fixed to queue*/
DSAF_MODE_DISABLE_2PORT_8VM, /**< non-dasf, 2port 8VM */
DSAF_MODE_DISABLE_2PORT_16VM, /**< non-dasf, 2port 16VM */
@@ -275,10 +277,12 @@ struct dsaf_device {
u8 __iomem *sds_base;
u8 __iomem *ppe_base;
u8 __iomem *io_base;
- u8 __iomem *cpld_base;
+ struct regmap *sub_ctrl;
+ phys_addr_t ppe_paddr;
u32 desc_num; /* desc num per queue*/
u32 buf_size; /* ring buffer size */
+ u32 reset_offset; /* reset field offset in sub sysctrl */
int buf_size_type; /* ring buffer size-type */
enum dsaf_mode dsaf_mode; /* dsaf mode */
enum hal_dsaf_mode dsaf_en;
@@ -287,7 +291,7 @@ struct dsaf_device {
struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
- struct hns_mac_cb *mac_cb;
+ struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
struct dsaf_int_stat int_stat;
@@ -359,14 +363,6 @@ static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
tab_line_addr);
}
-static inline int hns_dsaf_get_comm_idx_by_port(int port)
-{
- if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
- return 0;
- else
- return (port - DSAF_COMM_CHN + 1);
-}
-
static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
struct hnae_handle *handle)
{
@@ -417,6 +413,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
int hns_dsaf_get_regs_count(void);
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en);
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en);
void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index e69b02287..a837bb9e3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -7,10 +7,30 @@
* (at your option) any later version.
*/
-#include "hns_dsaf_misc.h"
#include "hns_dsaf_mac.h"
-#include "hns_dsaf_reg.h"
+#include "hns_dsaf_misc.h"
#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_reg.h"
+
+static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
+{
+ if (dsaf_dev->sub_ctrl)
+ dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val);
+ else
+ dsaf_write_reg(dsaf_dev->sc_base, reg, val);
+}
+
+static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
+{
+ u32 ret;
+
+ if (dsaf_dev->sub_ctrl)
+ ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
+ else
+ ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+
+ return ret;
+}
void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
u16 speed, int data)
@@ -22,8 +42,8 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
pr_err("sfp_led_opt mac_dev is null!\n");
return;
}
- if (!mac_cb->cpld_vaddr) {
- dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+ if (!mac_cb->cpld_ctrl) {
+ dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n",
mac_cb->mac_id);
return;
}
@@ -40,21 +60,24 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
dsaf_set_bit(value, DSAF_LED_DATA_B, data);
if (value != mac_cb->cpld_led_value) {
- dsaf_write_b(mac_cb->cpld_vaddr, value);
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
mac_cb->cpld_led_value = value;
}
} else {
- dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ CPLD_LED_DEFAULT_VALUE);
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
}
void cpld_led_reset(struct hns_mac_cb *mac_cb)
{
- if (!mac_cb || !mac_cb->cpld_vaddr)
+ if (!mac_cb || !mac_cb->cpld_ctrl)
return;
- dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ CPLD_LED_DEFAULT_VALUE);
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
@@ -63,15 +86,19 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
{
switch (status) {
case HNAE_LED_ACTIVE:
- mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+ mac_cb->cpld_led_value =
+ dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg);
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_ON_VALUE);
- dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ mac_cb->cpld_led_value);
return 2;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_DEFAULT_VALUE);
- dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ mac_cb->cpld_led_value);
break;
default:
break;
@@ -95,10 +122,8 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
}
- dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
- RESET_REQ_OR_DREQ);
- dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
- RESET_REQ_OR_DREQ);
+ dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ);
+ dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
}
void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
@@ -110,14 +135,14 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
return;
reg_val |= RESET_REQ_OR_DREQ;
- reg_val |= 0x2082082 << port;
+ reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
if (val == 0)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
@@ -129,68 +154,63 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
if (port >= DSAF_XGE_NUM)
return;
- reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+ reg_val |= XGMAC_TRX_CORE_SRST_M
+ << dsaf_dev->mac_cb[port]->port_rst_off;
if (val == 0)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
{
u32 reg_val_1;
u32 reg_val_2;
+ u32 port_rst_off;
if (port >= DSAF_GE_NUM)
return;
- if (port < DSAF_SERVICE_NW_NUM) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
reg_val_1 = 0x1 << port;
+ port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
/* there is difference between V1 and V2 in register.*/
if (AE_IS_VER1(dsaf_dev->dsaf_ver))
- reg_val_2 = 0x1041041 << port;
+ reg_val_2 = 0x1041041 << port_rst_off;
else
- reg_val_2 = 0x2082082 << port;
+ reg_val_2 = 0x2082082 << port_rst_off;
if (val == 0) {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ0_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG,
reg_val_2);
} else {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG,
reg_val_2);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
reg_val_1);
}
} else {
- reg_val_1 = 0x15540 << (port - 6);
- reg_val_2 = 0x100 << (port - 6);
+ reg_val_1 = 0x15540 << dsaf_dev->reset_offset;
+ reg_val_2 = 0x100 << dsaf_dev->reset_offset;
if (val == 0) {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_PPE_RESET_REQ_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG,
reg_val_2);
} else {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG,
reg_val_2);
}
}
@@ -201,24 +221,23 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
u32 reg_val = 0;
u32 reg_addr;
- reg_val |= RESET_REQ_OR_DREQ << port;
+ reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
if (val == 0)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
{
- int comm_index = ppe_common->comm_index;
struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
u32 reg_val;
u32 reg_addr;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
reg_val = RESET_REQ_OR_DREQ;
if (val == 0)
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
@@ -226,7 +245,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
} else {
- reg_val = 0x100 << (comm_index - 1);
+ reg_val = 0x100 << dsaf_dev->reset_offset;
if (val == 0)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
@@ -234,7 +253,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
}
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
/**
@@ -246,36 +265,45 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
{
u32 mode;
u32 reg;
- u32 shift;
bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
- void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
int mac_id = mac_cb->mac_id;
- phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+ phy_interface_t phy_if;
- if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
- phy_if = PHY_INTERFACE_MODE_SGMII;
- } else if (mac_id >= 0 && mac_id <= 3) {
- reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
- mode = dsaf_read_reg(sys_ctl_vaddr, reg);
- /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
- shift = is_ver1 ? 0 : mac_id;
- if (dsaf_get_bit(mode, shift))
- phy_if = PHY_INTERFACE_MODE_XGMII;
+ if (is_ver1) {
+ if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev))
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (mac_id >= 0 && mac_id <= 3)
+ reg = HNS_MAC_HILINK4_REG;
else
- phy_if = PHY_INTERFACE_MODE_SGMII;
- } else if (mac_id >= 4 && mac_id <= 7) {
- reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
- mode = dsaf_read_reg(sys_ctl_vaddr, reg);
- /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
- shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
- if (dsaf_get_bit(mode, shift))
- phy_if = PHY_INTERFACE_MODE_XGMII;
+ reg = HNS_MAC_HILINK3_REG;
+ } else{
+ if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3)
+ reg = HNS_MAC_HILINK4V2_REG;
else
- phy_if = PHY_INTERFACE_MODE_SGMII;
+ reg = HNS_MAC_HILINK3V2_REG;
}
+
+ mode = dsaf_read_sub(mac_cb->dsaf_dev, reg);
+ if (dsaf_get_bit(mode, mac_cb->port_mode_off))
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+
return phy_if;
}
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ if (!mac_cb->cpld_ctrl)
+ return -ENODEV;
+
+ *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
+ + MAC_SFP_PORT_OFFSET);
+
+ return 0;
+}
+
/**
* hns_mac_config_sds_loopback - set loop back for serdes
* @mac_cb: mac control block
@@ -312,7 +340,14 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
pr_info("no sfp in this eth\n");
}
- dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+ if (mac_cb->serdes_ctrl) {
+ u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+
+ dsaf_set_field(origin, 1ull << 10, 10, !!en);
+ dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
+ } else {
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 5b7ae5ff4..8cd151a52 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,22 +61,10 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
}
}
-static void __iomem *hns_ppe_common_get_ioaddr(
- struct ppe_common_cb *ppe_common)
+static void __iomem *
+hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{
- void __iomem *base_addr;
-
- int idx = ppe_common->comm_index;
-
- if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
- base_addr = ppe_common->dsaf_dev->ppe_base
- + PPE_COMMON_REG_OFFSET;
- else
- base_addr = ppe_common->dsaf_dev->sds_base
- + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
- + PPE_COMMON_REG_OFFSET;
-
- return base_addr;
+ return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
}
/**
@@ -90,7 +78,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
struct ppe_common_cb *ppe_common;
int ppe_num;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
else
ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
@@ -103,7 +91,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
ppe_common->ppe_num = ppe_num;
ppe_common->dsaf_dev = dsaf_dev;
ppe_common->comm_index = comm_index;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
else
ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
@@ -124,32 +112,8 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
int ppe_idx)
{
- void __iomem *base_addr;
- int common_idx = ppe_common->comm_index;
-
- if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
- base_addr = ppe_common->dsaf_dev->ppe_base +
- ppe_idx * PPE_REG_OFFSET;
-
- } else {
- base_addr = ppe_common->dsaf_dev->sds_base +
- (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
- }
-
- return base_addr;
-}
-
-static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
-{
- int port;
-
- if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
- port = idx;
- else
- port = HNS_PPE_SERVICE_NW_ENGINE_NUM
- + ppe_common->comm_index - 1;
- return port;
+ return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
@@ -164,7 +128,6 @@ static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
ppe_cb->next = NULL;
ppe_cb->ppe_common_cb = ppe_common;
ppe_cb->index = i;
- ppe_cb->port = hns_ppe_get_port(ppe_common, i);
ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
ppe_cb->virq = 0;
}
@@ -318,7 +281,7 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
{
struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
- u32 port = ppe_cb->port;
+ u32 port = ppe_cb->index;
struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
int i;
@@ -332,10 +295,12 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
/* clr and msk except irq*/
hns_ppe_exc_irq_en(ppe_cb, 0);
- if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG)
+ if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) {
hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
- else
+ dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0);
+ } else {
hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+ }
hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
hns_ppe_cnt_clr_ce(ppe_cb);
@@ -375,7 +340,8 @@ void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
u32 i;
for (i = 0; i < ppe_common->ppe_num; i++) {
- hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+ if (ppe_common->dsaf_dev->mac_cb[i])
+ hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
}
}
@@ -408,8 +374,11 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
if (ret)
return;
- for (i = 0; i < ppe_common->ppe_num; i++)
- hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ /* We only need to initiate ppe when the port exists */
+ if (dsaf_dev->mac_cb[i])
+ hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+ }
ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index e9c0ec2fa..9d8e643e8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,6 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */
- u8 port; /* port id in dsaf */
void __iomem *io_base;
int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 28ee26e5c..4ef6d23d9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -270,7 +270,7 @@ static void hns_rcb_set_port_timeout(
static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
{
- if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
return HNS_RCB_SERVICE_NW_ENGINE_NUM;
else
return HNS_RCB_DEBUG_NW_ENGINE_NUM;
@@ -430,36 +430,20 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
static int hns_rcb_get_port_in_comm(
struct rcb_common_cb *rcb_common, int ring_idx)
{
- int comm_index = rcb_common->comm_index;
- int port;
- int q_num;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
- port = ring_idx / q_num;
- } else {
- port = 0; /* config debug-ports port_id_in_comm to 0*/
- }
-
- return port;
+ return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
}
#define SERVICE_RING_IRQ_IDX(v1) \
((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_IDX(v1) \
- ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_OFFSET(v1) \
- ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET)
static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
{
- int comm_index = rcb_common->comm_index;
bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
return SERVICE_RING_IRQ_IDX(is_ver1);
else
- return DEBUG_RING_IRQ_IDX(is_ver1) +
- (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1);
+ return HNS_DEBUG_RING_IRQ_IDX;
}
#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
@@ -549,7 +533,7 @@ int hns_rcb_set_coalesce_usecs(
return 0;
if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
- if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
dev_err(rcb_common->dsaf_dev->dev,
"error: not support coalesce_usecs setting!\n");
return -EINVAL;
@@ -601,113 +585,82 @@ int hns_rcb_set_coalesced_frames(
*@max_vfn : max vfn number
*@max_q_per_vf:max ring number per vm
*/
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
- u16 *max_vfn, u16 *max_q_per_vf)
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
+ u16 *max_q_per_vf)
{
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- switch (dsaf_mode) {
- case DSAF_MODE_DISABLE_6PORT_0VM:
- *max_vfn = 1;
- *max_q_per_vf = 16;
- break;
- case DSAF_MODE_DISABLE_FIX:
- *max_vfn = 1;
- *max_q_per_vf = 1;
- break;
- case DSAF_MODE_DISABLE_2PORT_64VM:
- *max_vfn = 64;
- *max_q_per_vf = 1;
- break;
- case DSAF_MODE_DISABLE_6PORT_16VM:
- *max_vfn = 16;
- *max_q_per_vf = 1;
- break;
- default:
- *max_vfn = 1;
- *max_q_per_vf = 16;
- break;
- }
- } else {
+ switch (dsaf_mode) {
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
*max_vfn = 1;
*max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ *max_vfn = 64;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ *max_vfn = 16;
+ *max_q_per_vf = 1;
+ break;
+ default:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
}
}
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
{
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- switch (dsaf_dev->dsaf_mode) {
- case DSAF_MODE_ENABLE_FIX:
- return 1;
-
- case DSAF_MODE_DISABLE_FIX:
- return 6;
-
- case DSAF_MODE_ENABLE_0VM:
- return 32;
-
- case DSAF_MODE_DISABLE_6PORT_0VM:
- case DSAF_MODE_ENABLE_16VM:
- case DSAF_MODE_DISABLE_6PORT_2VM:
- case DSAF_MODE_DISABLE_6PORT_16VM:
- case DSAF_MODE_DISABLE_6PORT_4VM:
- case DSAF_MODE_ENABLE_8VM:
- return 96;
-
- case DSAF_MODE_DISABLE_2PORT_16VM:
- case DSAF_MODE_DISABLE_2PORT_8VM:
- case DSAF_MODE_ENABLE_32VM:
- case DSAF_MODE_DISABLE_2PORT_64VM:
- case DSAF_MODE_ENABLE_128VM:
- return 128;
-
- default:
- dev_warn(dsaf_dev->dev,
- "get ring num fail,use default!dsaf_mode=%d\n",
- dsaf_dev->dsaf_mode);
- return 128;
- }
- } else {
+ switch (dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
return 1;
+
+ case DSAF_MODE_DISABLE_FIX:
+ return 6;
+
+ case DSAF_MODE_ENABLE_0VM:
+ return 32;
+
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_ENABLE_8VM:
+ return 96;
+
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_ENABLE_128VM:
+ return 128;
+
+ default:
+ dev_warn(dsaf_dev->dev,
+ "get ring num fail,use default!dsaf_mode=%d\n",
+ dsaf_dev->dsaf_mode);
+ return 128;
}
}
-void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
- int comm_index)
+void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
- void __iomem *base_addr;
-
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
- base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
- else
- base_addr = dsaf_dev->sds_base
- + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
- + RCB_COMMON_REG_OFFSET;
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
- return base_addr;
+ return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
}
-static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
- int comm_index)
+static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
{
- struct device_node *np = dsaf_dev->dev->of_node;
- phys_addr_t phy_addr;
- const __be32 *tmp_addr;
- u64 addr_offset = 0;
- u64 size = 0;
- int index = 0;
-
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- index = 2;
- addr_offset = RCB_COMMON_REG_OFFSET;
- } else {
- index = 1;
- addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
- RCB_COMMON_REG_OFFSET;
- }
- tmp_addr = of_get_address(np, index, &size, NULL);
- phy_addr = of_translate_address(np, tmp_addr);
- return phy_addr + addr_offset;
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
+
+ return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
}
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
@@ -717,7 +670,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
u16 max_vfn;
u16 max_q_per_vf;
- int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+ int ring_num = hns_rcb_get_ring_num(dsaf_dev);
rcb_common =
devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
@@ -732,12 +685,12 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
rcb_common->desc_num = dsaf_dev->desc_num;
- hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
rcb_common->max_vfn = max_vfn;
rcb_common->max_q_per_vf = max_q_per_vf;
- rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
- rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+ rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
+ rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
dsaf_dev->rcb_common[comm_index] = rcb_common;
return 0;
@@ -932,7 +885,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
{
u32 *regs = data;
bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
- bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
+ bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
u32 reg_tmp;
u32 reg_num_tmp;
u32 i = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index eb61014ad..bd54dac82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -111,7 +111,7 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
void hns_rcb_start(struct hnae_queue *q, u32 val);
void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 7d7204f45..7c3b5103d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -10,25 +10,20 @@
#ifndef _DSAF_REG_H_
#define _DSAF_REG_H_
-#define HNS_DEBUG_RING_IRQ_IDX 55
-#define HNS_SERVICE_RING_IRQ_IDX 59
-#define HNS_DEBUG_RING_IRQ_OFFSET 2
-#define HNSV2_DEBUG_RING_IRQ_IDX 409
-#define HNSV2_SERVICE_RING_IRQ_IDX 25
-#define HNSV2_DEBUG_RING_IRQ_OFFSET 9
-
-#define DSAF_MAX_PORT_NUM_PER_CHIP 8
-#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
-#define DSAF_MAX_VM_NUM 128
-
-#define DSAF_COMM_DEV_NUM 3
-#define DSAF_PPE_INODE_BASE 6
-#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#include <linux/regmap.h>
+#define HNS_DEBUG_RING_IRQ_IDX 0
+#define HNS_SERVICE_RING_IRQ_IDX 59
+#define HNSV2_SERVICE_RING_IRQ_IDX 25
+
+#define DSAF_MAX_PORT_NUM 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 1
+#define DSAF_PPE_INODE_BASE 6
#define DSAF_DEBUG_NW_NUM 2
#define DSAF_SERVICE_NW_NUM 6
#define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM
#define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
-#define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
#define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM
#define DSAF_PORT_TYPE_NUM 3
#define DSAF_NODE_NUM 18
@@ -137,6 +132,7 @@
#define DSAF_PPE_INT_STS_0_REG 0x1E0
#define DSAF_ROCEE_INT_STS_0_REG 0x200
#define DSAFV2_SERDES_LBK_0_REG 0x220
+#define DSAF_PAUSE_CFG_REG 0x240
#define DSAF_PPE_QID_CFG_0_REG 0x300
#define DSAF_SW_PORT_TYPE_0_REG 0x320
#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -155,6 +151,7 @@
#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030
#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038
#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C
+#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x1024
#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C
#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050
#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054
@@ -711,6 +708,10 @@
#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
#define DSAF_PFC_UNINT_CNT_S 0
+#define DSAF_MAC_PAUSE_RX_EN_B 2
+#define DSAF_PFC_PAUSE_RX_EN_B 1
+#define DSAF_PFC_PAUSE_TX_EN_B 0
+
#define DSAF_PPE_QID_CFG_M 0xFF
#define DSAF_PPE_QID_CFG_S 0
@@ -988,6 +989,19 @@ static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
return readl(reg_addr + reg);
}
+static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
+{
+ regmap_write(base, reg, value);
+}
+
+static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(base, reg, &val);
+ return val;
+}
+
#define dsaf_read_dev(a, reg) \
dsaf_read_reg((a)->io_base, (reg))
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 687204b78..e621636e6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1275,7 +1275,7 @@ void hns_nic_net_reinit(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
- priv->netdev->trans_start = jiffies;
+ netif_trans_update(priv->netdev);
while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
usleep_range(1000, 2000);
@@ -1376,7 +1376,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
ret = hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
if (ret == NETDEV_TX_OK) {
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
ndev->stats.tx_bytes += skb->len;
ndev->stats.tx_packets++;
}
@@ -1648,7 +1648,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
rtnl_lock();
/* put off any impending NetWatchDogTimeout */
- priv->netdev->trans_start = jiffies;
+ netif_trans_update(priv->netdev);
if (type == HNAE_PORT_DEBUG) {
hns_nic_net_reinit(priv->netdev);
@@ -1873,6 +1873,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
struct net_device *ndev;
struct hns_nic_priv *priv;
struct device_node *node = dev->of_node;
+ u32 port_id;
int ret;
ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
@@ -1896,10 +1897,18 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail;
}
-
- ret = of_property_read_u32(node, "port-id", &priv->port_id);
- if (ret)
- goto out_read_prop_fail;
+ /* try to find port-idx-in-ae first */
+ ret = of_property_read_u32(node, "port-idx-in-ae", &port_id);
+ if (ret) {
+ /* only for old code compatible */
+ ret = of_property_read_u32(node, "port-id", &port_id);
+ if (ret)
+ goto out_read_prop_fail;
+ /* for old dts, we need to caculate the port offset */
+ port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
+ : port_id - HNS_SRV_OFFSET;
+ }
+ priv->port_id = port_id;
hns_init_mac_addr(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index c68ab3d34..337efa582 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -18,6 +18,9 @@
#include "hnae.h"
+#define HNS_DEBUG_OFFSET 6
+#define HNS_SRV_OFFSET 2
+
enum hns_nic_state {
NIC_STATE_TESTING = 0,
NIC_STATE_RESETTING,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3d746c887..67a648c7d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
u32 link_stat = priv->link;
struct hnae_handle *h;
- assert(priv && priv->ae_handle);
h = priv->ae_handle;
if (priv->phy) {
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
- assert(priv);
-
strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
struct hnae_handle *h;
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
h = priv->ae_handle;
ops = h->dev->ops;
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
struct hnae_ae_ops *ops;
int ret;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (!ops->get_regs_len) {
netdev_err(net_dev, "ops->get_regs_len is null!\n");
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 3daf2d4a7..631dbc7b4 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
return -EAGAIN;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_start_queue(dev);
lp->lan_type = hp100_sense_lan(dev);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 7ce6379fd..befb4ac3e 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1042,7 +1042,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index c984998b3..3dbc53c21 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -960,7 +960,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 353f57f67..21c84cc9c 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -983,7 +983,7 @@ static void sun3_82586_timeout(struct net_device *dev)
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
return 0;
}
#endif
@@ -996,7 +996,7 @@ static void sun3_82586_timeout(struct net_device *dev)
sun3_82586_close(dev);
sun3_82586_open(dev);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
/******************************************************
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 2a0dc127d..54efa9a51 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1169,16 +1169,15 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
port = ehea_get_port(adapter, portnum);
+ if (!port) {
+ netdev_err(NULL, "unknown portnum %x\n", portnum);
+ return;
+ }
dev = port->netdev;
switch (ec) {
case EHEA_EC_PORTSTATE_CHG: /* port state change */
- if (!port) {
- netdev_err(dev, "unknown portnum %x\n", portnum);
- break;
- }
-
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
if (!netif_carrier_ok(dev)) {
ret = ehea_sense_port_attr(port);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5d7db6c01..4c9771d57 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -301,7 +301,7 @@ static inline void emac_netif_stop(struct emac_instance *dev)
dev->no_mcast = 1;
netif_addr_unlock(dev->ndev);
netif_tx_unlock_bh(dev->ndev);
- dev->ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev->ndev); /* prevent tx timeout */
mal_poll_disable(dev->mal, &dev->commac);
netif_tx_disable(dev->ndev);
}
@@ -1377,7 +1377,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len)
DBG2(dev, "stopped TX queue" NL);
}
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
++dev->stats.tx_packets;
dev->stats.tx_bytes += len;
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index d3b9d1033..5b88cc690 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -470,12 +470,38 @@ static struct mii_phy_def m88e1112_phy_def = {
.ops = &m88e1112_phy_ops,
};
+static int ar8035_init(struct mii_phy *phy)
+{
+ phy_write(phy, 0x1d, 0x5); /* Address debug register 5 */
+ phy_write(phy, 0x1e, 0x2d47); /* Value copied from u-boot */
+ phy_write(phy, 0x1d, 0xb); /* Address hib ctrl */
+ phy_write(phy, 0x1e, 0xbc20); /* Value copied from u-boot */
+
+ return 0;
+}
+
+static struct mii_phy_ops ar8035_phy_ops = {
+ .init = ar8035_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link,
+};
+
+static struct mii_phy_def ar8035_phy_def = {
+ .phy_id = 0x004dd070,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Atheros 8035 Gigabit Ethernet",
+ .ops = &ar8035_phy_ops,
+};
+
static struct mii_phy_def *mii_phy_table[] = {
&et1011c_phy_def,
&cis8201_phy_def,
&bcm5248_phy_def,
&m88e1111_phy_def,
&m88e1112_phy_def,
+ &ar8035_phy_def,
&genmii_phy_def,
NULL
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e9e16eee..88f3c85fb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -61,6 +61,7 @@
#include <linux/proc_fs.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
@@ -74,6 +75,7 @@
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/seq_file.h>
+#include <linux/workqueue.h>
#include "ibmvnic.h"
@@ -88,12 +90,14 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
union sub_crq *sub_crq);
+static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
static int enable_scrq_irq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
@@ -467,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
ibmvnic_send_crq(adapter, &crq);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
+
return 0;
bounce_map_failed:
@@ -517,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
if (adapter->bounce_buffer) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -561,10 +566,141 @@ static int ibmvnic_close(struct net_device *netdev)
return 0;
}
+/**
+ * build_hdr_data - creates L2/L3/L4 header data buffer
+ * @hdr_field - bitfield determining needed headers
+ * @skb - socket buffer
+ * @hdr_len - array of header lengths
+ * @tot_len - total length of data
+ *
+ * Reads hdr_field to determine which headers are needed by firmware.
+ * Builds a buffer containing these headers. Saves individual header
+ * lengths and total buffer length to be used to build descriptors.
+ */
+static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len, u8 *hdr_data)
+{
+ int len = 0;
+ u8 *hdr;
+
+ hdr_len[0] = sizeof(struct ethhdr);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ hdr_len[1] = ip_hdr(skb)->ihl * 4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hdr_len[1] = sizeof(struct ipv6hdr);
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ }
+
+ memset(hdr_data, 0, 120);
+ if ((hdr_field >> 6) & 1) {
+ hdr = skb_mac_header(skb);
+ memcpy(hdr_data, hdr, hdr_len[0]);
+ len += hdr_len[0];
+ }
+
+ if ((hdr_field >> 5) & 1) {
+ hdr = skb_network_header(skb);
+ memcpy(hdr_data + len, hdr, hdr_len[1]);
+ len += hdr_len[1];
+ }
+
+ if ((hdr_field >> 4) & 1) {
+ hdr = skb_transport_header(skb);
+ memcpy(hdr_data + len, hdr, hdr_len[2]);
+ len += hdr_len[2];
+ }
+ return len;
+}
+
+/**
+ * create_hdr_descs - create header and header extension descriptors
+ * @hdr_field - bitfield determining needed headers
+ * @data - buffer containing header data
+ * @len - length of data buffer
+ * @hdr_len - array of individual header lengths
+ * @scrq_arr - descriptor array
+ *
+ * Creates header and, if needed, header extension descriptors and
+ * places them in a descriptor array, scrq_arr
+ */
+
+static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+ union sub_crq *scrq_arr)
+{
+ union sub_crq hdr_desc;
+ int tmp_len = len;
+ u8 *data, *cur;
+ int tmp;
+
+ while (tmp_len > 0) {
+ cur = hdr_data + len - tmp_len;
+
+ memset(&hdr_desc, 0, sizeof(hdr_desc));
+ if (cur != hdr_data) {
+ data = hdr_desc.hdr_ext.data;
+ tmp = tmp_len > 29 ? 29 : tmp_len;
+ hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc.hdr_ext.len = tmp;
+ } else {
+ data = hdr_desc.hdr.data;
+ tmp = tmp_len > 24 ? 24 : tmp_len;
+ hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc.hdr.len = tmp;
+ hdr_desc.hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc.hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc.hdr.flag = hdr_field << 1;
+ }
+ memcpy(data, cur, tmp);
+ tmp_len -= tmp;
+ *scrq_arr = hdr_desc;
+ scrq_arr++;
+ }
+}
+
+/**
+ * build_hdr_descs_arr - build a header descriptor array
+ * @skb - socket buffer
+ * @num_entries - number of descriptors to be sent
+ * @subcrq - first TX descriptor
+ * @hdr_field - bit field determining which headers will be sent
+ *
+ * This function will build a TX descriptor array with applicable
+ * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
+ */
+
+static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+ int *num_entries, u8 hdr_field)
+{
+ int hdr_len[3] = {0, 0, 0};
+ int tot_len, len;
+ u8 *hdr_data = txbuff->hdr_data;
+
+ tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
+ txbuff->hdr_data);
+ len = tot_len;
+ len -= 24;
+ if (len > 0)
+ num_entries += len % 29 ? len / 29 + 1 : len / 29;
+ create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+ txbuff->indir_arr + 1);
+}
+
static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int queue_num = skb_get_queue_mapping(skb);
+ u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +715,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
+ int num_entries = 1;
unsigned char *dst;
u64 *handle_array;
int index = 0;
@@ -644,11 +781,35 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL)
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
-
- lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
-
+ hdrs += 2;
+ }
+ /* determine if l2/3/4 headers are sent to firmware */
+ if ((*hdrs >> 7) & 1 &&
+ (skb->protocol == htons(ETH_P_IP) ||
+ skb->protocol == htons(ETH_P_IPV6))) {
+ build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
+ tx_crq.v1.n_crq_elem = num_entries;
+ tx_buff->indir_arr[0] = tx_crq;
+ tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
+ sizeof(tx_buff->indir_arr),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "tx: unable to map descriptor array\n");
+ tx_map_failed++;
+ tx_dropped++;
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+ lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ (u64)tx_buff->indir_dma,
+ (u64)num_entries);
+ } else {
+ lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+ &tx_crq);
+ }
if (lpar_rc != H_SUCCESS) {
dev_err(dev, "tx failed with code %ld\n", lpar_rc);
@@ -832,7 +993,7 @@ restart_poll:
netdev->stats.rx_bytes += length;
frames_processed++;
}
- replenish_pools(adapter);
+ replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1054,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
goto reg_failed;
}
- scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
- dev_err(dev, "Error mapping irq\n");
- goto map_irq_failed;
- }
-
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
@@ -1072,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
return scrq;
-map_irq_failed:
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
- adapter->vdev->unit_address,
- scrq->crq_num);
- } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -1098,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
if (adapter->tx_scrq[i]) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
+ irq_dispose_mapping(adapter->tx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->tx_scrq[i]);
}
@@ -1109,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
if (adapter->rx_scrq[i]) {
free_irq(adapter->rx_scrq[i]->irq,
adapter->rx_scrq[i]);
+ irq_dispose_mapping(adapter->rx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->rx_scrq[i]);
}
@@ -1118,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
adapter->requested_caps = 0;
}
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->tx_scrq[i]);
+ adapter->tx_scrq = NULL;
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->rx_scrq[i]);
+ adapter->rx_scrq = NULL;
+ }
+
+ adapter->requested_caps = 0;
+}
+
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -1159,6 +1333,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
union sub_crq *next;
int index;
int i, j;
+ u8 first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1356,13 @@ restart_loop:
txbuff->data_dma[j] = 0;
txbuff->used_bounce = false;
}
+ /* if sub_crq was sent indirectly */
+ first = txbuff->indir_arr[0].generic.first;
+ if (first == IBMVNIC_CRQ_CMD) {
+ dma_unmap_single(dev, txbuff->indir_dma,
+ sizeof(txbuff->indir_arr),
+ DMA_TO_DEVICE);
+ }
if (txbuff->last_frag)
dev_kfree_skb_any(txbuff->skb);
@@ -1229,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
return IRQ_HANDLED;
}
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_sub_crq_queue *scrq;
+ int i = 0, j = 0;
+ int rc = 0;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ scrq = adapter->tx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_tx_irq_failed;
+ }
+
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+ 0, "ibmvnic_tx", scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ scrq = adapter->rx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_rx_irq_failed;
+ }
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+ 0, "ibmvnic_rx", scrq);
+ if (rc) {
+ dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+ return rc;
+
+req_rx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ i = adapter->req_tx_queues;
+req_tx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ release_sub_crqs_no_irqs(adapter);
+ return rc;
+}
+
static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
@@ -1237,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
union ibmvnic_crq crq;
int total_queues;
int more = 0;
- int i, j;
- int rc;
+ int i;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
@@ -1261,9 +1502,9 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
entries_page : adapter->max_rx_add_entries_per_subcrq;
/* Choosing the maximum number of queues supported by firmware*/
- adapter->req_tx_queues = adapter->min_tx_queues;
- adapter->req_rx_queues = adapter->min_rx_queues;
- adapter->req_rx_add_queues = adapter->min_rx_add_queues;
+ adapter->req_tx_queues = adapter->max_tx_queues;
+ adapter->req_rx_queues = adapter->max_rx_queues;
+ adapter->req_rx_add_queues = adapter->max_rx_add_queues;
adapter->req_mtu = adapter->max_mtu;
}
@@ -1317,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
- rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", adapter->tx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
- adapter->tx_scrq[i]->irq, rc);
- goto req_tx_irq_failed;
- }
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1334,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
- rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", adapter->rx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
- adapter->rx_scrq[i]->irq, rc);
- goto req_rx_irq_failed;
- }
}
memset(&crq, 0, sizeof(crq));
@@ -1393,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
return;
-req_rx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
- i = adapter->req_tx_queues;
-req_tx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- kfree(adapter->rx_scrq);
- adapter->rx_scrq = NULL;
rx_failed:
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
@@ -1494,6 +1712,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
return rc;
}
+static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 ioba, u64 num_entries)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ mb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
+ cpu_to_be64(remote_handle),
+ ioba, num_entries);
+
+ if (rc) {
+ if (rc == H_CLOSED)
+ dev_warn(dev, "CRQ Queue closed\n");
+ dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
+ }
+
+ return rc;
+}
+
static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
union ibmvnic_crq *crq)
{
@@ -1589,13 +1829,11 @@ static void send_login(struct ibmvnic_adapter *adapter)
goto buf_map_failed;
}
- rsp_buffer_size =
- sizeof(struct ibmvnic_login_rsp_buffer) +
- sizeof(u64) * (adapter->req_tx_queues +
- adapter->req_rx_queues *
- adapter->req_rx_add_queues + adapter->
- req_rx_add_queues) +
- sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
+ rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
+ sizeof(u64) * adapter->req_tx_queues +
+ sizeof(u64) * adapter->req_rx_queues +
+ sizeof(u64) * adapter->req_rx_queues +
+ sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
if (!login_rsp_buffer)
@@ -1918,6 +2156,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+ if ((adapter->netdev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+ adapter->netdev->features |= NETIF_F_RXCSUM;
+
memset(&crq, 0, sizeof(crq));
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
@@ -1931,7 +2173,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp;
unsigned long flags;
bool found = false;
int i;
@@ -1943,7 +2185,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
}
spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry(error_buff, &adapter->errors, list)
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
if (error_buff->error_id == crq->request_error_rsp.error_id) {
found = true;
list_del(&error_buff->list);
@@ -2158,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be32_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
+ release_sub_crqs_no_irqs(adapter);
*req_value = be32_to_cpu(crq->request_capability_rsp.number);
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -2210,6 +2452,16 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
dma_unmap_single(dev, adapter->login_rsp_buf_token,
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
+ /* If the number of queues requested can't be allocated by the
+ * server, the login response will return with code 1. We will need
+ * to resend the login buffer with fewer queues requested.
+ */
+ if (login_rsp_crq->generic.rc.code) {
+ adapter->renegotiate = true;
+ complete(&adapter->init_done);
+ return 0;
+ }
+
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
@@ -2459,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_queries) == 0)
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 0);
/* We're done querying the capabilities, initialize sub-crqs */
}
@@ -2941,14 +3193,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
{
- struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp2;
unsigned long flags;
unsigned long flags2;
spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+ list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
switch (inflight_cmd->crq.generic.cmd) {
case LOGIN:
dma_unmap_single(dev, adapter->login_buf_token,
@@ -2965,8 +3217,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
break;
case REQUEST_ERROR_INFO:
spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry(error_buff, &adapter->errors,
- list) {
+ list_for_each_entry_safe(error_buff, tmp2,
+ &adapter->errors, list) {
dma_unmap_single(dev, error_buff->dma,
error_buff->len,
DMA_FROM_DEVICE);
@@ -3002,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_info(dev, "Partner initialized\n");
/* Send back a response */
rc = ibmvnic_send_crq_init_complete(adapter);
- if (rc == 0)
- send_version_xchg(adapter);
+ if (!rc)
+ schedule_work(&adapter->vnic_crq_init);
else
dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
break;
@@ -3355,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
.release = single_release,
};
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ vnic_crq_init);
+ struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ send_version_xchg(adapter);
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+ }
+ } while (adapter->renegotiate);
+ rc = init_sub_crq_irqs(adapter);
+
+ if (rc)
+ goto task_failed;
+
+ netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(dev,
+ "failed to register netdev rc=%d\n", rc);
+ goto register_failed;
+ }
+ dev_info(dev, "ibmvnic registered\n");
+
+ return;
+
+register_failed:
+ release_sub_crqs(adapter);
+task_failed:
+ dev_err(dev, "Passive initialization was not successful\n");
+}
+
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
+ unsigned long timeout = msecs_to_jiffies(30000);
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
@@ -3393,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
+ INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
spin_lock_init(&adapter->stats_lock);
rc = ibmvnic_init_crq_queue(adapter);
@@ -3435,23 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
-
- /* needed to pull init_sub_crqs outside of an interrupt context
- * because it creates IRQ mappings for the subCRQ queues, causing
- * a kernel warning
- */
- init_sub_crqs(adapter, 0);
-
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+ return 0;
- /* if init_sub_crqs is partially successful, retry */
- while (!adapter->tx_scrq || !adapter->rx_scrq) {
- init_sub_crqs(adapter, 1);
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout))
+ return 0;
+ }
+ } while (adapter->renegotiate);
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+ goto free_debugfs;
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3459,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto free_debugfs;
+ goto free_sub_crqs;
}
dev_info(&dev->dev, "ibmvnic registered\n");
return 0;
+free_sub_crqs:
+ release_sub_crqs(adapter);
free_debugfs:
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
debugfs_remove_recursive(adapter->debugfs_dir);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1a9993cc7..e82898fd5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -879,6 +879,9 @@ struct ibmvnic_tx_buff {
int pool_index;
bool last_frag;
bool used_bounce;
+ union sub_crq indir_arr[6];
+ u8 hdr_data[140];
+ dma_addr_t indir_dma;
};
struct ibmvnic_tx_pool {
@@ -977,6 +980,7 @@ struct ibmvnic_adapter {
struct ibmvnic_sub_crq_queue **tx_scrq;
struct ibmvnic_sub_crq_queue **rx_scrq;
int requested_caps;
+ bool renegotiate;
/* rx structs */
struct napi_struct *napi;
@@ -1041,4 +1045,6 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+
+ struct work_struct vnic_crq_init;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3772f3ac9..714bd1014 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -25,16 +25,13 @@ config E100
on the adapter. Look for a label that has a barcode and a number
in the format 123456-001 (six digits hyphen three digits).
- Use the above information and the Adapter & Driver ID Guide at:
+ Use the above information and the Adapter & Driver ID Guide that
+ can be located at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com>
to identify the adapter.
- For the latest Intel PRO/100 network driver for Linux, see:
-
- <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
-
More specific information on configuring the driver is in
<file:Documentation/networking/e100.txt>.
@@ -47,12 +44,7 @@ config E1000
---help---
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -71,12 +63,8 @@ config E1000E
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
use the regular e1000 driver For more information on how to
- identify your adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ identify your adapter, go to the Adapter & Driver ID Guide that
+ can be located at:
<http://support.intel.com>
@@ -101,12 +89,7 @@ config IGB
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -142,12 +125,7 @@ config IGBVF
---help---
This driver supports Intel(R) 82576 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -164,12 +142,7 @@ config IXGB
This driver supports Intel(R) PRO/10GbE family of adapters for
PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
instead. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -187,12 +160,7 @@ config IXGBE
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -243,12 +211,7 @@ config IXGBEVF
---help---
This driver supports Intel(R) PCI Express virtual functions for the
Intel(R) ixgbe driver. For more information on how to identify your
- adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ adapter, go to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -266,12 +229,7 @@ config I40E
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -326,12 +284,7 @@ config I40EVF
---help---
This driver supports Intel(R) XL710 and X710 virtual functions.
For more information on how to identify your adapter, go to the
- Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -347,12 +300,7 @@ config FM10K
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
- go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ go to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 98fe5a2cd..d7bdea79e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -358,6 +358,8 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 83e557c7f..975eeb885 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1553,7 +1553,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ e1000_close(netdev);
else
e1000_reset(adapter);
@@ -1582,7 +1582,7 @@ static void e1000_diag_test(struct net_device *netdev,
e1000_reset(adapter);
clear_bit(__E1000_TESTING, &adapter->flags);
if (if_running)
- dev_open(netdev);
+ e1000_open(netdev);
} else {
e_info(hw, "online testing starting\n");
/* Online tests */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index ae90d4f12..f42129d09 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -114,8 +114,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void e1000_remove(struct pci_dev *pdev);
static int e1000_alloc_queues(struct e1000_adapter *adapter);
static int e1000_sw_init(struct e1000_adapter *adapter);
-static int e1000_open(struct net_device *netdev);
-static int e1000_close(struct net_device *netdev);
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
static void e1000_configure_tx(struct e1000_adapter *adapter);
static void e1000_configure_rx(struct e1000_adapter *adapter);
static void e1000_setup_rctl(struct e1000_adapter *adapter);
@@ -1360,7 +1360,7 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-static int e1000_open(struct net_device *netdev)
+int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1437,7 +1437,7 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int e1000_close(struct net_device *netdev)
+int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 2af603f3e..cd3913760 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -121,7 +121,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
return 0;
}
@@ -845,27 +845,27 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
if (hw->phy.media_type != e1000_media_type_copper)
- reg &= ~(1 << 20);
+ reg &= ~BIT(20);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
+ reg |= BIT(28);
ew32(TARC(1), reg);
/* Disable IPv6 extension header parsing because some malformed
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 5f7016442..7fd4d5459 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -185,7 +185,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
break;
}
@@ -1163,12 +1163,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
@@ -1177,11 +1177,11 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+ reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26);
break;
case e1000_82574:
case e1000_82583:
- reg |= (1 << 26);
+ reg |= BIT(26);
break;
default:
break;
@@ -1193,12 +1193,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- reg &= ~((1 << 29) | (1 << 30));
- reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+ reg &= ~(BIT(29) | BIT(30));
+ reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26);
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
+ reg |= BIT(28);
ew32(TARC(1), reg);
break;
default:
@@ -1211,7 +1211,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(CTRL);
- reg &= ~(1 << 29);
+ reg &= ~BIT(29);
ew32(CTRL, reg);
break;
default:
@@ -1224,8 +1224,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(CTRL_EXT);
- reg &= ~(1 << 23);
- reg |= (1 << 22);
+ reg &= ~BIT(23);
+ reg |= BIT(22);
ew32(CTRL_EXT, reg);
break;
default:
@@ -1261,7 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(GCR);
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(GCR, reg);
/* Workaround for hardware errata.
@@ -1308,8 +1308,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
E1000_VFTA_ENTRY_SHIFT) &
E1000_VFTA_ENTRY_MASK;
vfta_bit_in_reg =
- 1 << (hw->mng_cookie.vlan_id &
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ BIT(hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1dc293bad..ef96cd11d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -109,18 +109,18 @@ struct e1000_info;
#define E1000_TXDCTL_DMA_BURST_ENABLE \
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
E1000_TXDCTL_COUNT_DESC | \
- (1 << 16) | /* wthresh must be +1 more than desired */\
- (1 << 8) | /* hthresh */ \
- 0x1f) /* pthresh */
+ (1u << 16) | /* wthresh must be +1 more than desired */\
+ (1u << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
#define E1000_RXDCTL_DMA_BURST_ENABLE \
(0x01000000 | /* set descriptor granularity */ \
- (4 << 16) | /* set writeback threshold */ \
- (4 << 8) | /* set prefetch threshold */ \
+ (4u << 16) | /* set writeback threshold */ \
+ (4u << 8) | /* set prefetch threshold */ \
0x20) /* set hthresh */
-#define E1000_TIDV_FPD (1 << 31)
-#define E1000_RDTR_FPD (1 << 31)
+#define E1000_TIDV_FPD BIT(31)
+#define E1000_RDTR_FPD BIT(31)
enum e1000_boards {
board_82571,
@@ -347,6 +347,7 @@ struct e1000_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct pm_qos_request pm_qos_req;
+ s32 ptp_delta;
u16 eee_advert;
};
@@ -404,53 +405,53 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_AMT (1 << 0)
-#define FLAG_HAS_FLASH (1 << 1)
-#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
-#define FLAG_HAS_WOL (1 << 3)
-/* reserved bit4 */
-#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
-#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
-#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
-#define FLAG_READ_ONLY_NVM (1 << 8)
-#define FLAG_IS_ICH (1 << 9)
-#define FLAG_HAS_MSIX (1 << 10)
-#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
-#define FLAG_IS_QUAD_PORT_A (1 << 12)
-#define FLAG_IS_QUAD_PORT (1 << 13)
-#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
-#define FLAG_APME_IN_WUC (1 << 15)
-#define FLAG_APME_IN_CTRL3 (1 << 16)
-#define FLAG_APME_CHECK_PORT_B (1 << 17)
-#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
-#define FLAG_NO_WAKE_UCAST (1 << 19)
-#define FLAG_MNG_PT_ENABLED (1 << 20)
-#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
-#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
-#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
-#define FLAG_RX_NEEDS_RESTART (1 << 24)
-#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
-#define FLAG_SMART_POWER_DOWN (1 << 26)
-#define FLAG_MSI_ENABLED (1 << 27)
-/* reserved (1 << 28) */
-#define FLAG_TSO_FORCE (1 << 29)
-#define FLAG_RESTART_NOW (1 << 30)
-#define FLAG_MSI_TEST_FAILED (1 << 31)
-
-#define FLAG2_CRC_STRIPPING (1 << 0)
-#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
-#define FLAG2_IS_DISCARDING (1 << 2)
-#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
-#define FLAG2_HAS_PHY_STATS (1 << 4)
-#define FLAG2_HAS_EEE (1 << 5)
-#define FLAG2_DMA_BURST (1 << 6)
-#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
-#define FLAG2_DISABLE_AIM (1 << 8)
-#define FLAG2_CHECK_PHY_HANG (1 << 9)
-#define FLAG2_NO_DISABLE_RX (1 << 10)
-#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
-#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
-#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
+#define FLAG_HAS_AMT BIT(0)
+#define FLAG_HAS_FLASH BIT(1)
+#define FLAG_HAS_HW_VLAN_FILTER BIT(2)
+#define FLAG_HAS_WOL BIT(3)
+/* reserved BIT(4) */
+#define FLAG_HAS_CTRLEXT_ON_LOAD BIT(5)
+#define FLAG_HAS_SWSM_ON_LOAD BIT(6)
+#define FLAG_HAS_JUMBO_FRAMES BIT(7)
+#define FLAG_READ_ONLY_NVM BIT(8)
+#define FLAG_IS_ICH BIT(9)
+#define FLAG_HAS_MSIX BIT(10)
+#define FLAG_HAS_SMART_POWER_DOWN BIT(11)
+#define FLAG_IS_QUAD_PORT_A BIT(12)
+#define FLAG_IS_QUAD_PORT BIT(13)
+#define FLAG_HAS_HW_TIMESTAMP BIT(14)
+#define FLAG_APME_IN_WUC BIT(15)
+#define FLAG_APME_IN_CTRL3 BIT(16)
+#define FLAG_APME_CHECK_PORT_B BIT(17)
+#define FLAG_DISABLE_FC_PAUSE_TIME BIT(18)
+#define FLAG_NO_WAKE_UCAST BIT(19)
+#define FLAG_MNG_PT_ENABLED BIT(20)
+#define FLAG_RESET_OVERWRITES_LAA BIT(21)
+#define FLAG_TARC_SPEED_MODE_BIT BIT(22)
+#define FLAG_TARC_SET_BIT_ZERO BIT(23)
+#define FLAG_RX_NEEDS_RESTART BIT(24)
+#define FLAG_LSC_GIG_SPEED_DROP BIT(25)
+#define FLAG_SMART_POWER_DOWN BIT(26)
+#define FLAG_MSI_ENABLED BIT(27)
+/* reserved BIT(28) */
+#define FLAG_TSO_FORCE BIT(29)
+#define FLAG_RESTART_NOW BIT(30)
+#define FLAG_MSI_TEST_FAILED BIT(31)
+
+#define FLAG2_CRC_STRIPPING BIT(0)
+#define FLAG2_HAS_PHY_WAKEUP BIT(1)
+#define FLAG2_IS_DISCARDING BIT(2)
+#define FLAG2_DISABLE_ASPM_L1 BIT(3)
+#define FLAG2_HAS_PHY_STATS BIT(4)
+#define FLAG2_HAS_EEE BIT(5)
+#define FLAG2_DMA_BURST BIT(6)
+#define FLAG2_DISABLE_ASPM_L0S BIT(7)
+#define FLAG2_DISABLE_AIM BIT(8)
+#define FLAG2_CHECK_PHY_HANG BIT(9)
+#define FLAG2_NO_DISABLE_RX BIT(10)
+#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
+#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
+#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -480,6 +481,8 @@ extern const char e1000e_driver_version[];
void e1000e_check_options(struct e1000_adapter *adapter);
void e1000e_set_ethtool_ops(struct net_device *netdev);
+int e1000e_open(struct net_device *netdev);
+int e1000e_close(struct net_device *netdev);
void e1000e_up(struct e1000_adapter *adapter);
void e1000e_down(struct e1000_adapter *adapter, bool reset);
void e1000e_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 6cab1f30d..7aff68a4a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -201,6 +201,9 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
return 0;
}
@@ -236,8 +239,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
mac->forced_speed_duplex = ADVERTISE_100_FULL;
break;
case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = 1;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised =
+ ADVERTISE_1000_FULL;
+ } else {
+ mac->forced_speed_duplex = ADVERTISE_1000_FULL;
+ }
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
@@ -439,8 +447,9 @@ static void e1000_get_regs(struct net_device *netdev,
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
+ regs->version = (1u << 24) |
+ (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -895,7 +904,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- mask |= (1 << 18);
+ mask |= BIT(18);
break;
default:
break;
@@ -914,9 +923,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
/* SHRAH[9] different than the others */
if (i == 10)
- mask |= (1 << 30);
+ mask |= BIT(30);
else
- mask &= ~(1 << 30);
+ mask &= ~BIT(30);
}
if (mac->type == e1000_pch2lan) {
/* SHRAH[0,1,2] different than previous */
@@ -924,7 +933,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
mask &= 0xFFF4FFFF;
/* SHRAH[3] different than SHRAH[0,1,2] */
if (i == 4)
- mask |= (1 << 30);
+ mask |= BIT(30);
/* RAR[1-6] owned by management engine - skipping */
if (i > 0)
i += 6;
@@ -1019,7 +1028,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Test each interrupt */
for (i = 0; i < 10; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (adapter->flags & FLAG_IS_ICH) {
switch (mask) {
@@ -1387,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
case e1000_phy_82579:
/* Disable PHY energy detect power down */
e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
- e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3));
/* Disable full chip energy detect */
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
@@ -1453,7 +1462,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* disable autoneg */
ctrl = er32(TXCW);
- ctrl &= ~(1 << 31);
+ ctrl &= ~BIT(31);
ew32(TXCW, ctrl);
link = (er32(STATUS) & E1000_STATUS_LU);
@@ -1816,7 +1825,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ e1000e_close(netdev);
if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1849,7 +1858,7 @@ static void e1000_diag_test(struct net_device *netdev,
clear_bit(__E1000_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ e1000e_open(netdev);
} else {
/* Online tests */
@@ -2283,19 +2292,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_ALL));
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_ALL));
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index c0f4887ea..3e11322d8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1048,7 +1048,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
while (value > PCI_LTR_VALUE_MASK) {
scale++;
- value = DIV_ROUND_UP(value, (1 << 5));
+ value = DIV_ROUND_UP(value, BIT(5));
}
if (scale > E1000_LTRV_SCALE_MAX) {
e_dbg("Invalid LTR latency scale %d\n", scale);
@@ -1573,7 +1573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
- phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+ phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
break;
@@ -2044,9 +2044,9 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
/* Restore SMBus frequency */
if (freq--) {
phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
- phy_data |= (freq & (1 << 0)) <<
+ phy_data |= (freq & BIT(0)) <<
HV_SMB_ADDR_FREQ_LOW_SHIFT;
- phy_data |= (freq & (1 << 1)) <<
+ phy_data |= (freq & BIT(1)) <<
(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
} else {
e_dbg("Unsupported SMB frequency in PHY\n");
@@ -2530,7 +2530,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
/* disable Rx path while enabling/disabling workaround */
e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
if (ret_val)
return ret_val;
@@ -2561,7 +2561,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
/* Enable jumbo frame workaround in the MAC */
mac_reg = er32(FFLT_DBG);
- mac_reg &= ~(1 << 14);
+ mac_reg &= ~BIT(14);
mac_reg |= (7 << 15);
ew32(FFLT_DBG, mac_reg);
@@ -2576,7 +2576,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
- data | (1 << 0));
+ data | BIT(0));
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
@@ -2600,7 +2600,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data &= ~(1 << 13);
+ data &= ~BIT(13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
return ret_val;
@@ -2614,7 +2614,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
- ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
if (ret_val)
return ret_val;
} else {
@@ -2634,7 +2634,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
- data & ~(1 << 0));
+ data & ~BIT(0));
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
@@ -2657,7 +2657,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data |= (1 << 13);
+ data |= BIT(13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
return ret_val;
@@ -2671,13 +2671,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
- ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
if (ret_val)
return ret_val;
}
/* re-enable Rx path after enabling/disabling workaround */
- return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
}
/**
@@ -4841,7 +4841,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Extended Device Control */
reg = er32(CTRL_EXT);
- reg |= (1 << 22);
+ reg |= BIT(22);
/* Enable PHY low-power state when MAC is at D3 w/o WoL */
if (hw->mac.type >= e1000_pchlan)
reg |= E1000_CTRL_EXT_PHYPDEN;
@@ -4849,34 +4849,34 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
if (hw->mac.type == e1000_ich8lan)
- reg |= (1 << 28) | (1 << 29);
- reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+ reg |= BIT(28) | BIT(29);
+ reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
- reg |= (1 << 24) | (1 << 26) | (1 << 30);
+ reg |= BIT(28);
+ reg |= BIT(24) | BIT(26) | BIT(30);
ew32(TARC(1), reg);
/* Device Status */
if (hw->mac.type == e1000_ich8lan) {
reg = er32(STATUS);
- reg &= ~(1 << 31);
+ reg &= ~BIT(31);
ew32(STATUS, reg);
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2311f6003..67163ca89 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -73,10 +73,10 @@
(ID_LED_OFF1_ON2 << 4) | \
(ID_LED_DEF1_DEF2))
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
-#define E1000_ICH_NVM_SIG_VALUE 0x80
+#define E1000_ICH_NVM_SIG_WORD 0x13u
+#define E1000_ICH_NVM_SIG_MASK 0xC000u
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0u
+#define E1000_ICH_NVM_SIG_VALUE 0x80u
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e59d7c283..b322011ec 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -346,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += (ETH_ALEN);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9b4ec13d9..2b2e2f8c6 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -242,7 +242,7 @@ static void e1000e_dump(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
+ netdev->state, dev_trans_start(netdev), netdev->last_rx);
}
/* Print Registers */
@@ -317,8 +317,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
else
next_desc = "";
pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
i,
(unsigned long long)le64_to_cpu(u0->a),
(unsigned long long)le64_to_cpu(u0->b),
@@ -2018,7 +2018,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
adapter->eiac_mask |= E1000_IMS_OTHER;
/* Cause Tx interrupts on every write back */
- ivar |= (1 << 31);
+ ivar |= BIT(31);
ew32(IVAR, ivar);
@@ -2709,7 +2709,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev,
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta |= (1 << (vid & 0x1F));
+ vfta |= BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
@@ -2737,7 +2737,7 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta &= ~(1 << (vid & 0x1F));
+ vfta &= ~BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
@@ -2789,7 +2789,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
}
/**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
@@ -2878,7 +2878,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
/* Enable this decision filter in MANC2H */
if (mdef)
- manc2h |= (1 << i);
+ manc2h |= BIT(i);
j |= mdef;
}
@@ -2891,7 +2891,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
if (er32(MDEF(i)) == 0) {
ew32(MDEF(i), (E1000_MDEF_PORT_623 |
E1000_MDEF_PORT_664));
- manc2h |= (1 << 1);
+ manc2h |= BIT(1);
j++;
break;
}
@@ -2971,7 +2971,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* set the speed mode bit, we'll clear it if we're not at
* gigabit link later
*/
-#define SPEED_MODE_BIT (1 << 21)
+#define SPEED_MODE_BIT BIT(21)
tarc |= SPEED_MODE_BIT;
ew32(TARC(0), tarc);
}
@@ -3071,12 +3071,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
phy_data &= 0xfff8;
- phy_data |= (1 << 2);
+ phy_data |= BIT(2);
e1e_wphy(hw, PHY_REG(770, 26), phy_data);
e1e_rphy(hw, 22, &phy_data);
phy_data &= 0x0fff;
- phy_data |= (1 << 14);
+ phy_data |= BIT(14);
e1e_wphy(hw, 0x10, 0x2823);
e1e_wphy(hw, 0x11, 0x0003);
e1e_wphy(hw, 22, phy_data);
@@ -3368,12 +3368,12 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
- int rval;
+ int ret_val;
if (!rar_entries)
break;
- rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
- if (rval < 0)
+ ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (ret_val < 0)
return -ENOMEM;
count++;
}
@@ -3503,8 +3503,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
- if (!(fextnvm7 & (1 << 0))) {
- ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+ if (!(fextnvm7 & BIT(0))) {
+ ew32(FEXTNVM7, fextnvm7 | BIT(0));
e1e_flush();
}
}
@@ -3580,7 +3580,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l4 = false;
bool is_l2 = false;
u32 regval;
- s32 ret_val;
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
@@ -3719,16 +3718,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
er32(RXSTMPH);
er32(TXSTMPH);
- /* Get and set the System Time Register SYSTIM base frequency */
- ret_val = e1000e_get_base_timinca(adapter, &regval);
- if (ret_val)
- return ret_val;
- ew32(TIMINCA, regval);
-
- /* reset the ns time counter */
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
-
return 0;
}
@@ -3839,7 +3828,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
/* update thresholds: prefetch threshold to 31, host threshold to 1
* and make sure the granularity is "descriptors" and not "cache lines"
*/
- rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+ rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
ew32(RXDCTL(0), rxdctl);
/* momentarily enable the RX ring for the changes to take effect */
@@ -3885,6 +3874,53 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
}
/**
+ * e1000e_systim_reset - reset the timesync registers after a hardware reset
+ * @adapter: board private structure
+ *
+ * When the MAC is reset, all hardware bits for timesync will be reset to the
+ * default values. This function will restore the settings last in place.
+ * Since the clock SYSTIME registers are reset, we will simply restore the
+ * cyclecounter to the kernel real clock time.
+ **/
+static void e1000e_systim_reset(struct e1000_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp_clock_info;
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
+ u32 timinca;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ if (info->adjfreq) {
+ /* restore the previous ptp frequency delta */
+ ret_val = info->adjfreq(info, adapter->ptp_delta);
+ } else {
+ /* set the default base frequency if no adjustment possible */
+ ret_val = e1000e_get_base_timinca(adapter, &timinca);
+ if (!ret_val)
+ ew32(TIMINCA, timinca);
+ }
+
+ if (ret_val) {
+ dev_warn(&adapter->pdev->dev,
+ "Failed to restore TIMINCA clock rate delta: %d\n",
+ ret_val);
+ return;
+ }
+
+ /* reset the systim ns time counter */
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ /* restore the previous hwtstamp configuration settings */
+ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+}
+
+/**
* e1000e_reset - bring the hardware into a known good state
*
* This function boots the hardware and enables some settings that
@@ -4063,8 +4099,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
- /* initialize systim and reset the ns time counter */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ /* restore systim and hwtstamp settings */
+ e1000e_systim_reset(adapter);
/* Set EEE advertisement as appropriate */
if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -4275,7 +4311,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
- u32 systimel_1, systimel_2, systimeh;
+ u32 systimel, systimeh;
cycle_t systim, systim_next;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
@@ -4283,24 +4319,25 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
* will experience a huge non linear increment in the systime value
* to fix that we test for overflow and if true, we re-read systime.
*/
- systimel_1 = er32(SYSTIML);
+ systimel = er32(SYSTIML);
systimeh = er32(SYSTIMH);
- systimel_2 = er32(SYSTIML);
- /* Check for overflow. If there was no overflow, use the values */
- if (systimel_1 < systimel_2) {
- systim = (cycle_t)systimel_1;
- systim |= (cycle_t)systimeh << 32;
- } else {
- /* There was an overflow, read again SYSTIMH, and use
- * systimel_2
- */
- systimeh = er32(SYSTIMH);
- systim = (cycle_t)systimel_2;
- systim |= (cycle_t)systimeh << 32;
+ /* Is systimel is so large that overflow is possible? */
+ if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
+ u32 systimel_2 = er32(SYSTIML);
+ if (systimel > systimel_2) {
+ /* There was an overflow, read again SYSTIMH, and use
+ * systimel_2
+ */
+ systimeh = er32(SYSTIMH);
+ systimel = systimel_2;
+ }
}
+ systim = (cycle_t)systimel;
+ systim |= (cycle_t)systimeh << 32;
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
- u64 incvalue, time_delta, rem, temp;
+ u64 time_delta, rem, temp;
+ u32 incvalue;
int i;
/* errata for 82574/82583 possible bad bits read from SYSTIMH/L
@@ -4495,7 +4532,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
}
/**
- * e1000_open - Called when a network interface is made active
+ * e1000e_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
@@ -4506,7 +4543,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int e1000_open(struct net_device *netdev)
+int e1000e_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4604,7 +4641,7 @@ err_setup_tx:
}
/**
- * e1000_close - Disables a network interface
+ * e1000e_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
@@ -4614,7 +4651,7 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int e1000_close(struct net_device *netdev)
+int e1000e_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
@@ -6861,7 +6898,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
le16_to_cpus(&buf);
- if (!ret_val && (!(buf & (1 << 0)))) {
+ if (!ret_val && (!(buf & BIT(0)))) {
/* Deep Smart Power Down (DSPD) */
dev_warn(&adapter->pdev->dev,
"Warning: detected DSPD enabled in EEPROM\n");
@@ -6878,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_RXFCS;
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
return features;
}
@@ -6920,8 +6965,8 @@ static int e1000_set_features(struct net_device *netdev,
}
static const struct net_device_ops e1000e_netdev_ops = {
- .ndo_open = e1000_open,
- .ndo_stop = e1000_close,
+ .ndo_open = e1000e_open,
+ .ndo_stop = e1000e_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_rx_mode = e1000e_set_rx_mode,
@@ -6965,7 +7010,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
- s32 rval = 0;
+ s32 ret_val = 0;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -7200,18 +7245,18 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
(adapter->hw.bus.func == 1))
- rval = e1000_read_nvm(&adapter->hw,
+ ret_val = e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_B,
1, &eeprom_data);
else
- rval = e1000_read_nvm(&adapter->hw,
+ ret_val = e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_A,
1, &eeprom_data);
}
/* fetch WoL from EEPROM */
- if (rval)
- e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+ if (ret_val)
+ e_dbg("NVM read error getting WoL initial values: %d\n", ret_val);
else if (eeprom_data & eeprom_apme_mask)
adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -7231,13 +7276,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
device_wakeup_enable(&pdev->dev);
/* save off EEPROM version number */
- rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+ ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
- if (rval) {
- e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+ if (ret_val) {
+ e_dbg("NVM read error getting EEPROM version: %d\n", ret_val);
adapter->eeprom_vers = 0;
}
+ /* init PTP hardware clock */
+ e1000e_ptp_init(adapter);
+
/* reset the hardware with the new settings */
e1000e_reset(adapter);
@@ -7256,9 +7304,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- /* init PTP hardware clock */
- e1000e_ptp_init(adapter);
-
e1000_print_device_info(adapter);
if (pci_dev_run_wake(pdev))
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 49f205c02..2efd80dfd 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -67,7 +67,7 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
u32 eecd = er32(EECD);
u32 mask;
- mask = 0x01 << (count - 1);
+ mask = BIT(count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index de13aeaca..d78d47b41 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2894,11 +2894,11 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if ((hw->phy.type == e1000_phy_82578) &&
(hw->phy.revision >= 1) &&
(hw->phy.addr == 2) &&
- !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
+ !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) {
u16 data2 = 0x7EFF;
ret_val = e1000_access_phy_debug_regs_hv(hw,
- (1 << 6) | 0x3,
+ BIT(6) | 0x3,
&data2, false);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 55bfe4735..3027f63ee 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -104,9 +104,9 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define BM_WUC_DATA_OPCODE 0x12
#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
#define BM_WUC_ENABLE_REG 17
-#define BM_WUC_ENABLE_BIT (1 << 2)
-#define BM_WUC_HOST_WU_BIT (1 << 4)
-#define BM_WUC_ME_WU_BIT (1 << 5)
+#define BM_WUC_ENABLE_BIT BIT(2)
+#define BM_WUC_HOST_WU_BIT BIT(4)
+#define BM_WUC_ME_WU_BIT BIT(5)
#define PHY_UPPER_SHIFT 21
#define BM_PHY_REG(page, reg) \
@@ -124,8 +124,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define I82578_ADDR_REG 29
#define I82577_ADDR_REG 16
#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CFG_ASSERT_CRS_ON_TX BIT(15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift */
#define I82577_CTRL_REG 23
/* 82577 specific PHY registers */
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index e2ff3ef75..2e1b17ad5 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -79,6 +79,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ew32(TIMINCA, timinca);
+ adapter->ptp_delta = delta;
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile
index b006ff66d..cac645329 100644
--- a/drivers/net/ethernet/intel/fm10k/Makefile
+++ b/drivers/net/ethernet/intel/fm10k/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
-# Intel Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
+# Intel(R) Ethernet Switch Host Interface Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -22,7 +22,7 @@
################################################################################
#
-# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver
+# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
#
obj-$(CONFIG_FM10K) += fm10k.o
@@ -30,7 +30,6 @@ obj-$(CONFIG_FM10K) += fm10k.o
fm10k-y := fm10k_main.o \
fm10k_common.o \
fm10k_pci.o \
- fm10k_ptp.o \
fm10k_netdev.o \
fm10k_ethtool.o \
fm10k_pf.o \
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b34bb008b..fcf106e54 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,9 +27,6 @@
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/ptp_clock_kernel.h>
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -262,12 +259,12 @@ struct fm10k_intfc {
unsigned long state;
u32 flags;
-#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0)
-#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1)
-#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2)
-#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3)
-#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4)
-#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5)
+#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
+#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
+#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
+#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3))
+#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4))
+#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5))
int xcast_mode;
/* Tx fast path data */
@@ -333,6 +330,7 @@ struct fm10k_intfc {
unsigned long last_reset;
unsigned long link_down_event;
bool host_ready;
+ bool lport_map_failed;
u32 reta[FM10K_RETA_SIZE];
u32 rssrk[FM10K_RSSRK_SIZE];
@@ -342,22 +340,8 @@ struct fm10k_intfc {
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
-
#endif /* CONFIG_DEBUG_FS */
- struct ptp_clock_info ptp_caps;
- struct ptp_clock *ptp_clock;
-
- struct sk_buff_head ts_tx_skb_queue;
- u32 tx_hwtstamp_timeouts;
- struct hwtstamp_config ts_config;
- /* We are unable to actually adjust the clock beyond the frequency
- * value. Once the clock is started there is no resetting it. As
- * such we maintain a separate offset from the actual hardware clock
- * to allow for offset adjustment.
- */
- s64 ptp_adjust;
- rwlock_t systime_lock;
#ifdef CONFIG_DCB
u8 pfc_en;
#endif
@@ -510,6 +494,8 @@ int fm10k_close(struct net_device *netdev);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
+u32 fm10k_get_reta_size(struct net_device *netdev);
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
/* IOV */
s32 fm10k_iov_event(struct fm10k_intfc *interface);
@@ -544,21 +530,6 @@ static inline void fm10k_dbg_init(void) {}
static inline void fm10k_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */
-/* Time Stamping */
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
- struct skb_shared_hwtstamps *hwtstamp,
- u64 systime);
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
- u64 systime);
-void fm10k_ts_reset(struct fm10k_intfc *interface);
-void fm10k_ts_init(struct fm10k_intfc *interface);
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
-void fm10k_ptp_register(struct fm10k_intfc *interface);
-void fm10k_ptp_unregister(struct fm10k_intfc *interface);
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-
/* DCB */
#ifdef CONFIG_DCB
void fm10k_dcbnl_set_ops(struct net_device *dev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index 6cfae6ac0..5bbf19cfe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 45e4e5b1f..50f71e997 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 2be436183..db4bd8bf9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 5d6137faf..5116fd043 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2f6a05b57..9c0d87503 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -77,19 +77,6 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
FM10K_STAT("tx_hang_count", tx_timeout_count),
-
- FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
-};
-
-static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
- FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
- FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
- FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
- FM10K_STAT("rx_switch_errors", rx_switch_errors),
- FM10K_STAT("rx_drops", rx_drops),
- FM10K_STAT("rx_pp_errors", rx_pp_errors),
- FM10K_STAT("rx_link_errors", rx_link_errors),
- FM10K_STAT("rx_length_errors", rx_length_errors),
};
static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
@@ -121,13 +108,21 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
};
+#define FM10K_QUEUE_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
+ .stat_offset = offsetof(struct fm10k_ring, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
+ FM10K_QUEUE_STAT("packets", stats.packets),
+ FM10K_QUEUE_STAT("bytes", stats.bytes),
+};
+
#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
-#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
-
-#define FM10K_QUEUE_STATS_LEN(_n) \
- ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
+#define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
FM10K_NETDEV_STATS_LEN + \
@@ -145,77 +140,56 @@ enum fm10k_self_test_types {
};
enum {
- FM10K_PRV_FLAG_DEBUG_STATS,
FM10K_PRV_FLAG_LEN,
};
static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
- "debug-statistics",
};
+static void fm10k_add_stat_strings(char **p, const char *prefix,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%s%s",
+ prefix, stats[i].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
char *p = (char *)data;
unsigned int i;
- unsigned int j;
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
+ FM10K_NETDEV_STATS_LEN);
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
+ FM10K_GLOBAL_STATS_LEN);
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_debug_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
+ FM10K_MBX_STATS_LEN);
- for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ if (interface->hw.mac.type != fm10k_mac_vf)
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
+ FM10K_PF_STATS_LEN);
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < interface->hw.mac.max_queues; i++) {
+ char prefix[ETH_GSTRING_LEN];
- if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
- for (i = 0; i < iov_data->num_vfs; i++) {
- for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
- snprintf(p,
- ETH_GSTRING_LEN,
- "vf_%u_%s", i,
- fm10k_gstrings_mbx_stats[j].stat_string);
- p += ETH_GSTRING_LEN;
- }
- }
- }
+ snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
+ fm10k_add_stat_strings(&p, prefix,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
- for (i = 0; i < interface->hw.mac.max_queues; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
+ fm10k_add_stat_strings(&p, prefix,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
}
}
@@ -242,7 +216,6 @@ static void fm10k_get_strings(struct net_device *dev,
static int fm10k_get_sset_count(struct net_device *dev, int sset)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
int stats_len = FM10K_STATIC_STATS_LEN;
@@ -250,19 +223,11 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_TEST:
return FM10K_TEST_LEN;
case ETH_SS_STATS:
- stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues);
+ stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
if (hw->mac.type != fm10k_mac_vf)
stats_len += FM10K_PF_STATS_LEN;
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- stats_len += FM10K_DEBUG_STATS_LEN;
-
- if (iov_data)
- stats_len += FM10K_MBX_STATS_LEN *
- iov_data->num_vfs;
- }
-
return stats_len;
case ETH_SS_PRIV_FLAGS:
return FM10K_PRV_FLAG_LEN;
@@ -271,93 +236,80 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fm10k_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats __always_unused *stats,
- u64 *data)
+static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
{
- const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
- struct net_device_stats *net_stats = &netdev->stats;
+ unsigned int i;
char *p;
- int i, j;
- fm10k_update_stats(interface);
-
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ if (!pointer) {
+ /* memory is not zero allocated so we have to clear it */
+ for (i = 0; i < size; i++)
+ *((*data)++) = 0;
+ return;
}
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_global_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+ for (i = 0; i < size; i++) {
+ p = (char *)pointer + stats[i].stat_offset;
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_debug_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ switch (stats[i].sizeof_stat) {
+ case sizeof(u64):
+ *((*data)++) = *(u64 *)p;
+ break;
+ case sizeof(u32):
+ *((*data)++) = *(u32 *)p;
+ break;
+ case sizeof(u16):
+ *((*data)++) = *(u16 *)p;
+ break;
+ case sizeof(u8):
+ *((*data)++) = *(u8 *)p;
+ break;
+ default:
+ *((*data)++) = 0;
}
}
+}
- for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
- p = (char *)&interface->hw.mbx +
- fm10k_gstrings_mbx_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+static void fm10k_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct net_device_stats *net_stats = &netdev->stats;
+ int i;
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_pf_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ fm10k_update_stats(interface);
- if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
- for (i = 0; i < iov_data->num_vfs; i++) {
- struct fm10k_vf_info *vf_info;
+ fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
+ FM10K_NETDEV_STATS_LEN);
- vf_info = &iov_data->vf_info[i];
+ fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
+ FM10K_GLOBAL_STATS_LEN);
- /* skip stats if we don't have a vf info */
- if (!vf_info) {
- data += FM10K_MBX_STATS_LEN;
- continue;
- }
+ fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
+ fm10k_gstrings_mbx_stats,
+ FM10K_MBX_STATS_LEN);
- for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
- p = (char *)&vf_info->mbx +
- fm10k_gstrings_mbx_stats[j].stat_offset;
- *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ if (interface->hw.mac.type != fm10k_mac_vf) {
+ fm10k_add_ethtool_stats(&data, interface,
+ fm10k_gstrings_pf_stats,
+ FM10K_PF_STATS_LEN);
}
for (i = 0; i < interface->hw.mac.max_queues; i++) {
struct fm10k_ring *ring;
- u64 *queue_stat;
ring = interface->tx_ring[i];
- if (ring)
- queue_stat = (u64 *)&ring->stats;
- for (j = 0; j < stat_count; j++)
- *(data++) = ring ? queue_stat[j] : 0;
+ fm10k_add_ethtool_stats(&data, ring,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
ring = interface->rx_ring[i];
- if (ring)
- queue_stat = (u64 *)&ring->stats;
- for (j = 0; j < stat_count; j++)
- *(data++) = ring ? queue_stat[j] : 0;
+ fm10k_add_ethtool_stats(&data, ring,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
}
}
@@ -425,7 +377,7 @@ static void fm10k_get_regs(struct net_device *netdev,
u32 *buff = p;
u16 i;
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
switch (hw->mac.type) {
case fm10k_mac_pf:
@@ -935,15 +887,15 @@ static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 attr_flag, test_msg[6];
unsigned long timeout;
- int err;
+ int err = -EINVAL;
/* For now this is a VF only feature */
if (hw->mac.type != fm10k_mac_vf)
return 0;
/* loop through both nested and unnested attribute types */
- for (attr_flag = (1 << FM10K_TEST_MSG_UNSET);
- attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED));
+ for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
+ attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
attr_flag += attr_flag) {
/* generate message to be tested */
fm10k_tlv_msg_test_create(test_msg, attr_flag);
@@ -1001,35 +953,56 @@ static void fm10k_self_test(struct net_device *dev,
static u32 fm10k_get_priv_flags(struct net_device *netdev)
{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- u32 priv_flags = 0;
-
- if (interface->flags & FM10K_FLAG_DEBUG_STATS)
- priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS;
-
- return priv_flags;
+ return 0;
}
static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct fm10k_intfc *interface = netdev_priv(netdev);
-
- if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN))
+ if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
return -EINVAL;
- if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS))
- interface->flags |= FM10K_FLAG_DEBUG_STATS;
- else
- interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
-
return 0;
}
-static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
+u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
}
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
+{
+ u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
+ struct fm10k_hw *hw = &interface->hw;
+ u32 table[4];
+ int i, j;
+
+ /* record entries to reta table */
+ for (i = 0; i < FM10K_RETA_SIZE; i++) {
+ u32 reta, n;
+
+ /* generate a new table if we weren't given one */
+ for (j = 0; j < 4; j++) {
+ if (indir)
+ n = indir[i + j];
+ else
+ n = ethtool_rxfh_indir_default(i + j, rss_i);
+
+ table[j] = n;
+ }
+
+ reta = table[0] |
+ (table[1] << 8) |
+ (table[2] << 16) |
+ (table[3] << 24);
+
+ if (interface->reta[i] == reta)
+ continue;
+
+ interface->reta[i] = reta;
+ fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
+ }
+}
+
static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -1053,7 +1026,6 @@ static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_hw *hw = &interface->hw;
int i;
u16 rss_i;
@@ -1068,19 +1040,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
return -EINVAL;
}
- /* record entries to reta table */
- for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
- u32 reta = indir[0] |
- (indir[1] << 8) |
- (indir[2] << 16) |
- (indir[3] << 24);
-
- if (interface->reta[i] == reta)
- continue;
-
- interface->reta[i] = reta;
- fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
- }
+ fm10k_write_reta(interface, indir);
return 0;
}
@@ -1145,7 +1105,7 @@ static unsigned int fm10k_max_channels(struct net_device *dev)
/* For QoS report channels per traffic class */
if (tcs > 1)
- max_combined = 1 << (fls(max_combined / tcs) - 1);
+ max_combined = BIT((fls(max_combined / tcs) - 1));
return max_combined;
}
@@ -1192,33 +1152,6 @@ static int fm10k_set_channels(struct net_device *dev,
return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
}
-static int fm10k_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
-
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (interface->ptp_clock)
- info->phc_index = ptp_clock_index(interface->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
-
- return 0;
-}
-
static const struct ethtool_ops fm10k_ethtool_ops = {
.get_strings = fm10k_get_strings,
.get_sset_count = fm10k_get_sset_count,
@@ -1246,7 +1179,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_rxfh = fm10k_set_rssh,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
- .get_ts_info = fm10k_get_ts_info,
};
void fm10k_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index acfb8b1f8..47f0743ec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -50,7 +50,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
s64 vflre;
int i;
- /* if there is no iov_data then there is no mailboxes to process */
+ /* if there is no iov_data then there is no mailbox to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
@@ -98,7 +98,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
struct fm10k_iov_data *iov_data;
int i;
- /* if there is no iov_data then there is no mailboxes to process */
+ /* if there is no iov_data then there is no mailbox to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 4de17db38..0e166e9c9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,15 +29,15 @@
#include "fm10k.h"
#define DRV_VERSION "0.19.3-k"
+#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
-static const char fm10k_driver_string[] =
- "Intel(R) Ethernet Switch Host Interface Driver";
+static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright (c) 2013 Intel Corporation.";
+ "Copyright (c) 2013 - 2016 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
+MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
@@ -401,10 +401,10 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
}
#define FM10K_RSS_L4_TYPES_MASK \
- ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
- (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
- (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
- (1ul << FM10K_RSSTYPE_IPV6_UDP))
+ (BIT(FM10K_RSSTYPE_IPV4_TCP) | \
+ BIT(FM10K_RSSTYPE_IPV4_UDP) | \
+ BIT(FM10K_RSSTYPE_IPV6_TCP) | \
+ BIT(FM10K_RSSTYPE_IPV6_UDP))
static inline void fm10k_rx_hash(struct fm10k_ring *ring,
union fm10k_rx_desc *rx_desc,
@@ -420,23 +420,10 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring,
return;
skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
- (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
-static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
- union fm10k_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct fm10k_intfc *interface = rx_ring->q_vector->interface;
-
- FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
-
- if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED))
- fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb),
- le64_to_cpu(rx_desc->q.timestamp));
-}
-
static void fm10k_type_trans(struct fm10k_ring *rx_ring,
union fm10k_rx_desc __maybe_unused *rx_desc,
struct sk_buff *skb)
@@ -486,8 +473,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
fm10k_rx_checksum(rx_ring, rx_desc, skb);
- fm10k_rx_hwtstamp(rx_ring, rx_desc, skb);
-
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -835,6 +820,8 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
struct ipv6hdr *ipv6;
u8 *raw;
} network_hdr;
+ u8 *transport_hdr;
+ __be16 frag_off;
__be16 protocol;
u8 l4_hdr = 0;
@@ -852,9 +839,11 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
goto no_csum;
}
network_hdr.raw = skb_inner_network_header(skb);
+ transport_hdr = skb_inner_transport_header(skb);
} else {
protocol = vlan_get_protocol(skb);
network_hdr.raw = skb_network_header(skb);
+ transport_hdr = skb_transport_header(skb);
}
switch (protocol) {
@@ -863,15 +852,17 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
break;
case htons(ETH_P_IPV6):
l4_hdr = network_hdr.ipv6->nexthdr;
+ if (likely((transport_hdr - network_hdr.raw) ==
+ sizeof(struct ipv6hdr)))
+ break;
+ ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
+ sizeof(struct ipv6hdr),
+ &l4_hdr, &frag_off);
+ if (unlikely(frag_off))
+ l4_hdr = NEXTHDR_FRAGMENT;
break;
default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but ip version=%x!\n",
- protocol);
- }
- tx_ring->tx_stats.csum_err++;
- goto no_csum;
+ break;
}
switch (l4_hdr) {
@@ -884,9 +875,10 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum, version=%d l4 proto=%x\n",
+ protocol, l4_hdr);
}
+ skb_checksum_help(skb);
tx_ring->tx_stats.csum_err++;
goto no_csum;
}
@@ -912,11 +904,6 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
/* set type for advanced descriptor with frame checksum insertion */
u32 desc_flags = 0;
- /* set timestamping bits */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
- desc_flags |= FM10K_TXD_FLAG_TIME;
-
/* set checksum offload bits */
desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
FM10K_TXD_FLAG_CSUM);
@@ -1198,9 +1185,10 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
* fm10k_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
- struct fm10k_ring *tx_ring)
+ struct fm10k_ring *tx_ring, int napi_budget)
{
struct fm10k_intfc *interface = q_vector->interface;
struct fm10k_tx_buffer *tx_buffer;
@@ -1238,7 +1226,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1409,7 +1397,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
* accounts for changes in the ITR due to PCIe link speed.
*/
itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
- avg_wire_size += (1 << itr_round) - 1;
+ avg_wire_size += BIT(itr_round) - 1;
avg_wire_size >>= itr_round;
/* write back value and retain adaptive flag */
@@ -1449,8 +1437,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
int per_ring_budget, work_done = 0;
bool clean_complete = true;
- fm10k_for_each_ring(ring, q_vector->tx)
- clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+ fm10k_for_each_ring(ring, q_vector->tx) {
+ if (!fm10k_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
/* Handle case where we are called by netpoll with a budget of 0 */
if (budget <= 0)
@@ -1468,7 +1458,8 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
work_done += work;
- clean_complete &= !!(work < per_ring_budget);
+ if (work >= per_ring_budget)
+ clean_complete = false;
}
/* If all work not completed, return budget and keep polling */
@@ -1511,17 +1502,17 @@ static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
/* set QoS mask and indices */
f = &interface->ring_feature[RING_F_QOS];
f->indices = pcs;
- f->mask = (1 << fls(pcs - 1)) - 1;
+ f->mask = BIT(fls(pcs - 1)) - 1;
/* determine the upper limit for our current DCB mode */
rss_i = interface->hw.mac.max_queues / pcs;
- rss_i = 1 << (fls(rss_i) - 1);
+ rss_i = BIT(fls(rss_i) - 1);
/* set RSS mask and indices */
f = &interface->ring_feature[RING_F_RSS];
rss_i = min_t(u16, rss_i, f->limit);
f->indices = rss_i;
- f->mask = (1 << fls(rss_i - 1)) - 1;
+ f->mask = BIT(fls(rss_i - 1)) - 1;
/* configure pause class to queue mapping */
for (i = 0; i < pcs; i++)
@@ -1551,7 +1542,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
/* record indices and power of 2 mask for RSS */
f->indices = rss_i;
- f->mask = (1 << fls(rss_i - 1)) - 1;
+ f->mask = BIT(fls(rss_i - 1)) - 1;
interface->num_rx_queues = rss_i;
interface->num_tx_queues = rss_i;
@@ -1572,17 +1563,29 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
**/
static void fm10k_set_num_queues(struct fm10k_intfc *interface)
{
- /* Start with base case */
- interface->num_rx_queues = 1;
- interface->num_tx_queues = 1;
-
+ /* Attempt to setup QoS and RSS first */
if (fm10k_set_qos_queues(interface))
return;
+ /* If we don't have QoS, just fallback to only RSS. */
fm10k_set_rss_queues(interface);
}
/**
+ * fm10k_reset_num_queues - Reset the number of queues to zero
+ * @interface: board private structure
+ *
+ * This function should be called whenever we need to reset the number of
+ * queues after an error condition.
+ */
+static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
+{
+ interface->num_tx_queues = 0;
+ interface->num_rx_queues = 0;
+ interface->num_q_vectors = 0;
+}
+
+/**
* fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
* @interface: board private structure to initialize
* @v_count: q_vectors allocated on interface, used for ring interleaving
@@ -1765,9 +1768,7 @@ static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
return 0;
err_out:
- interface->num_tx_queues = 0;
- interface->num_rx_queues = 0;
- interface->num_q_vectors = 0;
+ fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
@@ -1787,9 +1788,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
{
int v_idx = interface->num_q_vectors;
- interface->num_tx_queues = 0;
- interface->num_rx_queues = 0;
- interface->num_q_vectors = 0;
+ fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
@@ -1935,7 +1934,7 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface)
static void fm10k_init_reta(struct fm10k_intfc *interface)
{
u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
- u32 reta, base;
+ u32 reta;
/* If the Rx flow indirection table has been configured manually, we
* need to maintain it when possible.
@@ -1960,21 +1959,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
}
repopulate_reta:
- /* Populate the redirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (i = FM10K_RETA_SIZE; i--;) {
- /* first pass generates n and n+2 */
- base = ((i * 0x00040004) + 0x00020000) * rss_i;
- reta = (base & 0x3F803F80) >> 7;
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * rss_i;
- reta |= (base & 0x3F803F80) << 1;
-
- interface->reta[i] = reta;
- }
+ fm10k_write_reta(interface, NULL);
}
/**
@@ -1997,14 +1982,15 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
if (err) {
dev_err(&interface->pdev->dev,
"Unable to initialize MSI-X capability\n");
- return err;
+ goto err_init_msix;
}
/* Allocate memory for queues */
err = fm10k_alloc_q_vectors(interface);
if (err) {
- fm10k_reset_msix_capability(interface);
- return err;
+ dev_err(&interface->pdev->dev,
+ "Unable to allocate queue vectors\n");
+ goto err_alloc_q_vectors;
}
/* Map rings to devices, and map devices to physical queues */
@@ -2014,6 +2000,12 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
fm10k_init_reta(interface);
return 0;
+
+err_alloc_q_vectors:
+ fm10k_reset_msix_capability(interface);
+err_init_msix:
+ fm10k_reset_num_queues(interface);
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 98202c3d5..c9dfa6564 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index 245a0a3dc..b7dbc8a84 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index d09a8dd71..2a08d3f5b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -243,9 +243,6 @@ void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
for (i = 0; i < interface->num_tx_queues; i++)
fm10k_clean_tx_ring(interface->tx_ring[i]);
-
- /* remove any stale timestamp buffers and free them */
- skb_queue_purge(&interface->ts_tx_skb_queue);
}
/**
@@ -440,7 +437,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
* @sa_family: Address family of new port
* @port: port number used for VXLAN
*
- * This funciton is called when a new VXLAN interface has added a new port
+ * This function is called when a new VXLAN interface has added a new port
* number to the range that is currently in use for VXLAN. The new port
* number is always added to the tail so that the port number list should
* match the order in which the ports were allocated. The head of the list
@@ -484,7 +481,7 @@ insert_tail:
* @sa_family: Address family of freed port
* @port: port number used for VXLAN
*
- * This funciton is called when a new VXLAN interface has freed a port
+ * This function is called when a new VXLAN interface has freed a port
* number from the range that is currently in use for VXLAN. The freed
* port is removed from the list and the new head is used to determine
* the port number for offloads.
@@ -660,10 +657,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
__skb_put(skb, pad_len);
}
- /* prepare packet for hardware time stamping */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- fm10k_ts_tx_enqueue(interface, skb);
-
if (r_idx >= interface->num_tx_queues)
r_idx %= interface->num_tx_queues;
@@ -884,7 +877,7 @@ static int __fm10k_uc_sync(struct net_device *dev,
return -EADDRNOTAVAIL;
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = hw->mac.ops.update_uc_addr(hw, glort, addr,
@@ -947,7 +940,7 @@ static int __fm10k_mc_sync(struct net_device *dev,
u16 vid, glort = interface->glort;
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
@@ -1002,11 +995,8 @@ static void fm10k_set_rx_mode(struct net_device *dev)
}
/* synchronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
- }
+ __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
+ __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
fm10k_mbx_unlock(interface);
}
@@ -1044,7 +1034,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
hw->mac.ops.update_vlan(hw, 0, 0, true);
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_vlan(hw, vid, 0, true);
@@ -1056,11 +1046,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
- }
+ __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
+ __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
fm10k_mbx_unlock(interface);
@@ -1213,18 +1200,6 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return fm10k_setup_tc(dev, tc->tc);
}
-static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return fm10k_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return fm10k_set_ts_config(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
struct fm10k_l2_accel *l2_accel)
{
@@ -1402,7 +1377,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
.ndo_add_vxlan_port = fm10k_add_vxlan_port,
.ndo_del_vxlan_port = fm10k_del_vxlan_port,
- .ndo_do_ioctl = fm10k_ioctl,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1429,7 +1403,7 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
/* configure default debug level */
interface = netdev_priv(dev);
- interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
/* configure default features */
dev->features |= NETIF_F_IP_CSUM |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 4eb7a6fa6..e05aca9be 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -99,7 +99,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface)
static void fm10k_service_event_complete(struct fm10k_intfc *interface)
{
- BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
+ WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
/* flush memory to make sure state is correct before next watchog */
smp_mb__before_atomic();
@@ -145,7 +145,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
usleep_range(1000, 2000);
@@ -209,9 +209,6 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
- /* reset clock */
- fm10k_ts_reset(interface);
-
err = netif_running(netdev) ? fm10k_open(netdev) : 0;
if (err)
goto err_open;
@@ -559,7 +556,6 @@ static void fm10k_service_task(struct work_struct *work)
/* tasks only run when interface is up */
fm10k_watchdog_subtask(interface);
fm10k_check_hang_subtask(interface);
- fm10k_ts_tx_subtask(interface);
/* release lock on service events to allow scheduling next event */
fm10k_service_event_complete(interface);
@@ -579,7 +575,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
u64 tdba = ring->dma;
u32 size = ring->count * sizeof(struct fm10k_tx_desc);
u32 txint = FM10K_INT_MAP_DISABLE;
- u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT);
+ u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
@@ -730,7 +726,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
if (interface->pfc_en)
rx_pause = interface->pfc_en;
#endif
- if (!(rx_pause & (1 << ring->qos_pc)))
+ if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -779,7 +775,7 @@ void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
u8 reg_idx = ring->reg_idx;
- if (!(rx_pause & (1 << ring->qos_pc)))
+ if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -903,8 +899,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
/* re-enable mailbox interrupt and indicate 20us delay */
fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
- FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
- hw->mac.itr_scale));
+ (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+ FM10K_ITR_ENABLE);
/* service upstream mailbox */
if (fm10k_mbx_trylock(interface)) {
@@ -1065,7 +1061,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
if (maxholdq)
fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
for (q = 255;;) {
- if (maxholdq & (1 << 31)) {
+ if (maxholdq & BIT(31)) {
if (q < FM10K_MAX_QUEUES_PF) {
interface->rx_overrun_pf++;
fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
@@ -1135,22 +1131,24 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* re-enable mailbox interrupt and indicate 20us delay */
fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
- FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
- hw->mac.itr_scale));
+ (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+ FM10K_ITR_ENABLE);
return IRQ_HANDLED;
}
void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
{
- struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
struct fm10k_hw *hw = &interface->hw;
+ struct msix_entry *entry;
int itr_reg;
/* no mailbox IRQ to free if MSI-X is not enabled */
if (!interface->msix_entries)
return;
+ entry = &interface->msix_entries[FM10K_MBX_VECTOR];
+
/* disconnect the mailbox */
hw->mbx.ops.disconnect(hw, &hw->mbx);
@@ -1202,25 +1200,6 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
return 0;
}
-static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info __always_unused *mbx)
-{
- struct fm10k_intfc *interface;
- u64 timestamp;
- s32 err;
-
- err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP],
- &timestamp);
- if (err)
- return err;
-
- interface = container_of(hw, struct fm10k_intfc, hw);
-
- fm10k_ts_tx_hwtstamp(interface, 0, timestamp);
-
- return 0;
-}
-
/* generic error handler for mailbox issues */
static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info __always_unused *mbx)
@@ -1241,7 +1220,6 @@ static const struct fm10k_msg_data vf_mbx_data[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
- FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
};
@@ -1253,7 +1231,7 @@ static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
int err;
/* Use timer0 for interrupt moderation on the mailbox */
- u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry;
+ u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
/* register mailbox handlers */
err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
@@ -1285,11 +1263,40 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
u32 dglort_map = hw->mac.dglort_map;
s32 err;
+ interface = container_of(hw, struct fm10k_intfc, hw);
+
+ err = fm10k_msg_err_pf(hw, results, mbx);
+ if (!err && hw->swapi.status) {
+ /* force link down for a reasonable delay */
+ interface->link_down_event = jiffies + (2 * HZ);
+ set_bit(__FM10K_LINK_DOWN, &interface->state);
+
+ /* reset dglort_map back to no config */
+ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+ fm10k_service_event_schedule(interface);
+
+ /* prevent overloading kernel message buffer */
+ if (interface->lport_map_failed)
+ return 0;
+
+ interface->lport_map_failed = true;
+
+ if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
+ dev_warn(&interface->pdev->dev,
+ "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
+ dev_warn(&interface->pdev->dev,
+ "request logical port map failed: %d\n",
+ hw->swapi.status);
+
+ return 0;
+ }
+
err = fm10k_msg_lport_map_pf(hw, results, mbx);
if (err)
return err;
- interface = container_of(hw, struct fm10k_intfc, hw);
+ interface->lport_map_failed = false;
/* we need to reset if port count was just updated */
if (dglort_map != hw->mac.dglort_map)
@@ -1339,68 +1346,6 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
return 0;
}
-static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info __always_unused *mbx)
-{
- struct fm10k_swapi_1588_timestamp timestamp;
- struct fm10k_iov_data *iov_data;
- struct fm10k_intfc *interface;
- u16 sglort, vf_idx;
- s32 err;
-
- err = fm10k_tlv_attr_get_le_struct(
- results[FM10K_PF_ATTR_ID_1588_TIMESTAMP],
- &timestamp, sizeof(timestamp));
- if (err)
- return err;
-
- interface = container_of(hw, struct fm10k_intfc, hw);
-
- if (timestamp.dglort) {
- fm10k_ts_tx_hwtstamp(interface, timestamp.dglort,
- le64_to_cpu(timestamp.egress));
- return 0;
- }
-
- /* either dglort or sglort must be set */
- if (!timestamp.sglort)
- return FM10K_ERR_PARAM;
-
- /* verify GLORT is at least one of the ones we own */
- sglort = le16_to_cpu(timestamp.sglort);
- if (!fm10k_glort_valid_pf(hw, sglort))
- return FM10K_ERR_PARAM;
-
- if (sglort == interface->glort) {
- fm10k_ts_tx_hwtstamp(interface, 0,
- le64_to_cpu(timestamp.ingress));
- return 0;
- }
-
- /* if there is no iov_data then there is no mailboxes to process */
- if (!ACCESS_ONCE(interface->iov_data))
- return FM10K_ERR_PARAM;
-
- rcu_read_lock();
-
- /* notify VF if this timestamp belongs to it */
- iov_data = interface->iov_data;
- vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort;
-
- if (!iov_data || vf_idx >= iov_data->num_vfs) {
- err = FM10K_ERR_PARAM;
- goto err_unlock;
- }
-
- err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx],
- le64_to_cpu(timestamp.ingress));
-
-err_unlock:
- rcu_read_unlock();
-
- return err;
-}
-
static const struct fm10k_msg_data pf_mbx_data[] = {
FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1408,7 +1353,6 @@ static const struct fm10k_msg_data pf_mbx_data[] = {
FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
- FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
};
@@ -1420,8 +1364,8 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
int err;
/* Use timer0 for interrupt moderation on the mailbox */
- u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
- u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
+ u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
+ u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
/* register mailbox handlers */
err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
@@ -1654,6 +1598,7 @@ void fm10k_down(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
+ int err;
/* signal that we are down to the interrupt handler and service task */
set_bit(__FM10K_DOWN, &interface->state);
@@ -1678,7 +1623,9 @@ void fm10k_down(struct fm10k_intfc *interface)
fm10k_update_stats(interface);
/* Disable DMA engine for Tx/Rx */
- hw->mac.ops.stop_hw(hw);
+ err = hw->mac.ops.stop_hw(hw);
+ if (err)
+ dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
/* free any buffers still on the rings */
fm10k_clean_all_tx_rings(interface);
@@ -1776,35 +1723,17 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ ether_addr_copy(netdev->perm_addr, hw->mac.addr);
if (!is_valid_ether_addr(netdev->perm_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
return -EIO;
}
- /* assign BAR 4 resources for use with PTP */
- if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED)
- interface->sw_addr = ioremap(pci_resource_start(pdev, 4),
- pci_resource_len(pdev, 4));
- hw->sw_addr = interface->sw_addr;
-
/* initialize DCBNL interface */
fm10k_dcbnl_set_ops(netdev);
- /* Initialize service timer and service task */
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
- setup_timer(&interface->service_timer, &fm10k_service_timer,
- (unsigned long)interface);
- INIT_WORK(&interface->service_task, fm10k_service_task);
-
- /* kick off service timer now, even when interface is down */
- mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
-
- /* Intitialize timestamp data */
- fm10k_ts_init(interface);
-
/* set default ring sizes */
interface->tx_ring_count = FM10K_DEFAULT_TXD;
interface->rx_ring_count = FM10K_DEFAULT_RXD;
@@ -1987,6 +1916,12 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* the mbx interrupt might attempt to schedule the service task, so we
+ * must ensure it is disabled since we haven't yet requested the timer
+ * or work item.
+ */
+ set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+
err = fm10k_mbx_request_irq(interface);
if (err)
goto err_mbx_interrupt;
@@ -2006,8 +1941,15 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* stop all the transmit queues from transmitting until link is up */
netif_tx_stop_all_queues(netdev);
- /* Register PTP interface */
- fm10k_ptp_register(interface);
+ /* Initialize service timer and service task late in order to avoid
+ * cleanup issues.
+ */
+ setup_timer(&interface->service_timer, &fm10k_service_timer,
+ (unsigned long)interface);
+ INIT_WORK(&interface->service_task, fm10k_service_task);
+
+ /* kick off service timer now, even when interface is down */
+ mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
/* print warning for non-optimal configurations */
fm10k_slot_warn(interface);
@@ -2065,9 +2007,6 @@ static void fm10k_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- /* cleanup timestamp handling */
- fm10k_ptp_unregister(interface);
-
/* release VFs */
fm10k_iov_disable(pdev);
@@ -2140,9 +2079,6 @@ static int fm10k_resume(struct pci_dev *pdev)
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
- /* reset clock */
- fm10k_ts_reset(interface);
-
rtnl_lock();
err = fm10k_init_queueing_scheme(interface);
@@ -2259,15 +2195,17 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
+ rtnl_lock();
+
if (netif_running(netdev))
fm10k_close(netdev);
+ fm10k_mbx_free_irq(interface);
+
/* free interrupts */
fm10k_clear_queueing_scheme(interface);
- fm10k_mbx_free_irq(interface);
-
- pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -2337,27 +2275,31 @@ static void fm10k_io_resume(struct pci_dev *pdev)
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+ rtnl_lock();
+
err = fm10k_init_queueing_scheme(interface);
if (err) {
dev_err(&interface->pdev->dev,
"init_queueing_scheme failed: %d\n", err);
- return;
+ goto unlock;
}
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
- /* reset clock */
- fm10k_ts_reset(interface);
-
+ rtnl_lock();
if (netif_running(netdev))
err = fm10k_open(netdev);
+ rtnl_unlock();
/* final check of hardware state before registering the interface */
err = err ? : fm10k_hw_ready(interface);
if (!err)
netif_device_attach(netdev);
+
+unlock:
+ rtnl_unlock();
}
static const struct pci_error_handlers fm10k_err_handler = {
@@ -2382,7 +2324,7 @@ static struct pci_driver fm10k_driver = {
/**
* fm10k_register_pci_driver - register driver interface
*
- * This funciton is called on module load in order to register the driver.
+ * This function is called on module load in order to register the driver.
**/
int fm10k_register_pci_driver(void)
{
@@ -2392,7 +2334,7 @@ int fm10k_register_pci_driver(void)
/**
* fm10k_unregister_pci_driver - unregister driver interface
*
- * This funciton is called on module unload in order to remove the driver.
+ * This function is called on module unload in order to remove the driver.
**/
void fm10k_unregister_pci_driver(void)
{
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 8cf943db5..dc75507c9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -219,8 +219,8 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
/* VLAN multi-bit write:
* The multi-bit write has several parts to it.
- * 3 2 1 0
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * 24 16 8 0
+ * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | RSVD0 | Length |C|RSVD0| VLAN ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -488,6 +488,10 @@ static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
if (!fm10k_glort_valid_pf(hw, glort))
return FM10K_ERR_PARAM;
+ /* reset multicast mode if deleting lport */
+ if (!enable)
+ fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
/* construct the lport message from the 2 pieces of data we have */
lport_msg = ((u32)count << 16) | glort;
@@ -527,8 +531,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
return FM10K_ERR_PARAM;
/* determine count of VSIs and queues */
- queue_count = 1 << (dglort->rss_l + dglort->pc_l);
- vsi_count = 1 << (dglort->vsi_l + dglort->queue_l);
+ queue_count = BIT(dglort->rss_l + dglort->pc_l);
+ vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
glort = dglort->glort;
q_idx = dglort->queue_b;
@@ -544,8 +548,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
}
/* determine count of PCs and queues */
- queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l);
- pc_count = 1 << dglort->pc_l;
+ queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
+ pc_count = BIT(dglort->pc_l);
/* configure PC for Tx queues */
for (pc = 0; pc < pc_count; pc++) {
@@ -711,8 +715,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
FM10K_RXDCTL_DROP_ON_EMPTY);
fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
- FM10K_RXQCTL_VF |
- (i << FM10K_RXQCTL_VF_SHIFT));
+ (i << FM10K_RXQCTL_VF_SHIFT) |
+ FM10K_RXQCTL_VF);
/* map queue pair to VF */
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
@@ -864,9 +868,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
- /* determine correct default VLAN ID */
+ /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+ * used here to indicate to the VF that it will not have privilege to
+ * write VLAN_TABLE. All policy is enforced on the PF but this allows
+ * the VF to correctly report errors to userspace rqeuests.
+ */
if (vf_info->pf_vid)
- vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
else
vf_vid = vf_info->sw_vid;
@@ -952,7 +960,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
return FM10K_ERR_PARAM;
/* clear event notification of VF FLR */
- fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32));
+ fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
/* force timeout and then disconnect the mailbox */
vf_info->mbx.timeout = 0;
@@ -987,7 +995,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
(vf_idx << FM10K_TXQCTL_TC_SHIFT) |
FM10K_TXQCTL_VF | vf_idx;
- rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT);
+ rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
/* stop further DMA and reset queue ownership back to VF */
for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
@@ -1140,19 +1148,6 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
fm10k_update_hw_stats_q(hw, q, idx, qpp);
}
-static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
- struct fm10k_vf_info *vf_info,
- u64 timestamp)
-{
- u32 msg[4];
-
- /* generate port state response to notify VF it is not ready */
- fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
- fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp);
-
- return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
-}
-
/**
* fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
* @hw: Pointer to hardware structure
@@ -1384,7 +1379,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
/* if mode is not currently enabled, enable it */
- if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode)))
+ if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
/* swap mode back to a bit flag */
@@ -1618,7 +1613,7 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
* @hw: pointer to hardware structure
* @switch_ready: pointer to boolean value that will record switch state
*
- * This funciton will check the DMA_CTRL2 register and mailbox in order
+ * This function will check the DMA_CTRL2 register and mailbox in order
* to determine if the switch is ready for the PF to begin requesting
* addresses and mapping traffic to the local interface.
**/
@@ -1647,6 +1642,8 @@ out:
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
FM10K_TLV_ATTR_LAST
};
@@ -1787,89 +1784,6 @@ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
return 0;
}
-const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
- FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
- sizeof(struct fm10k_swapi_1588_timestamp)),
- FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
-/**
- * fm10k_adjust_systime_pf - Adjust systime frequency
- * @hw: pointer to hardware structure
- * @ppb: adjustment rate in parts per billion
- *
- * This function will adjust the SYSTIME_CFG register contained in BAR 4
- * if this function is supported for BAR 4 access. The adjustment amount
- * is based on the parts per billion value provided and adjusted to a
- * value based on parts per 2^48 clock cycles.
- *
- * If adjustment is not supported or the requested value is too large
- * we will return an error.
- **/
-static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
-{
- u64 systime_adjust;
-
- /* if sw_addr is not set we don't have switch register access */
- if (!hw->sw_addr)
- return ppb ? FM10K_ERR_PARAM : 0;
-
- /* we must convert the value from parts per billion to parts per
- * 2^48 cycles. In addition I have opted to only use the 30 most
- * significant bits of the adjustment value as the 8 least
- * significant bits are located in another register and represent
- * a value significantly less than a part per billion, the result
- * of dropping the 8 least significant bits is that the adjustment
- * value is effectively multiplied by 2^8 when we write it.
- *
- * As a result of all this the math for this breaks down as follows:
- * ppb / 10^9 == adjust * 2^8 / 2^48
- * If we solve this for adjust, and simplify it comes out as:
- * ppb * 2^31 / 5^9 == adjust
- */
- systime_adjust = (ppb < 0) ? -ppb : ppb;
- systime_adjust <<= 31;
- do_div(systime_adjust, 1953125);
-
- /* verify the requested adjustment value is in range */
- if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
- return FM10K_ERR_PARAM;
-
- if (ppb > 0)
- systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
-
- fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
-
- return 0;
-}
-
-/**
- * fm10k_read_systime_pf - Reads value of systime registers
- * @hw: pointer to the hardware structure
- *
- * Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanosecods. In order to guarantee the value is accurate
- * we check the 32 most significant bits both before and after reading the
- * 32 least significant bits to verify they didn't change as we were reading
- * the registers.
- **/
-static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
-{
- u32 systime_l, systime_h, systime_tmp;
-
- systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
-
- do {
- systime_tmp = systime_h;
- systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
- systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
- } while (systime_tmp != systime_h);
-
- return ((u64)systime_h << 32) | systime_l;
-}
-
static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1899,8 +1813,6 @@ static const struct fm10k_mac_ops mac_ops_pf = {
.set_dma_mask = fm10k_set_dma_mask_pf,
.get_fault = fm10k_get_fault_pf,
.get_host_state = fm10k_get_host_state_pf,
- .adjust_systime = fm10k_adjust_systime_pf,
- .read_systime = fm10k_read_systime_pf,
};
static const struct fm10k_iov_ops iov_ops_pf = {
@@ -1912,7 +1824,6 @@ static const struct fm10k_iov_ops iov_ops_pf = {
.set_lport = fm10k_iov_set_lport_pf,
.reset_lport = fm10k_iov_reset_lport_pf,
.update_stats = fm10k_iov_update_stats_pf,
- .report_timestamp = fm10k_iov_report_timestamp_pf,
};
static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index b2d96b45c..3336d3c10 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -42,8 +42,6 @@ enum fm10k_pf_tlv_msg_id_v1 {
FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
- FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
- FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
};
enum fm10k_pf_tlv_attr_id_v1 {
@@ -61,7 +59,6 @@ enum fm10k_pf_tlv_attr_id_v1 {
FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
FM10K_PF_ATTR_ID_PORT = 0x0C,
FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
- FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
};
#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0
@@ -74,6 +71,8 @@ enum fm10k_pf_tlv_attr_id_v1 {
#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280
+
/* The following data structures are overlayed directly onto TLV mailbox
* messages, and must not break 4 byte alignment. Ensure the structures line
* up correctly as per their TLV definition.
@@ -100,13 +99,6 @@ struct fm10k_swapi_error {
struct fm10k_global_table_data ffu;
} __aligned(4) __packed;
-struct fm10k_swapi_1588_timestamp {
- __le64 egress;
- __le64 ingress;
- __le16 dglort;
- __le16 sglort;
-} __aligned(4) __packed;
-
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
@@ -122,11 +114,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
-extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
-#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
- FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
- fm10k_1588_timestamp_msg_attr, func)
-
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
deleted file mode 100644
index b4945e8ab..000000000
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
-
-#include <linux/ptp_classify.h>
-#include <linux/ptp_clock_kernel.h>
-
-#include "fm10k.h"
-
-#define FM10K_TS_TX_TIMEOUT (HZ * 15)
-
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
- struct skb_shared_hwtstamps *hwtstamp,
- u64 systime)
-{
- unsigned long flags;
-
- read_lock_irqsave(&interface->systime_lock, flags);
- systime += interface->ptp_adjust;
- read_unlock_irqrestore(&interface->systime_lock, flags);
-
- hwtstamp->hwtstamp = ns_to_ktime(systime);
-}
-
-static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface,
- __le16 dglort)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb;
-
- skb_queue_walk(list, skb) {
- if (FM10K_CB(skb)->fi.w.dglort == dglort)
- return skb;
- }
-
- return NULL;
-}
-
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *clone;
- unsigned long flags;
-
- /* create clone for us to return on the Tx path */
- clone = skb_clone_sk(skb);
- if (!clone)
- return;
-
- FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
- spin_lock_irqsave(&list->lock, flags);
-
- /* attempt to locate any buffers with the same dglort,
- * if none are present then insert skb in tail of list
- */
- skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
- if (!skb) {
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- __skb_queue_tail(list, clone);
- }
-
- spin_unlock_irqrestore(&list->lock, flags);
-
- /* if list is already has one then we just free the clone */
- if (skb)
- dev_kfree_skb(clone);
-}
-
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
- u64 systime)
-{
- struct skb_shared_hwtstamps shhwtstamps;
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb;
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
-
- /* attempt to locate and pull the sk_buff out of the list */
- skb = fm10k_ts_tx_skb(interface, dglort);
- if (skb)
- __skb_unlink(skb, list);
-
- spin_unlock_irqrestore(&list->lock, flags);
-
- /* if not found do nothing */
- if (!skb)
- return;
-
- /* timestamp the sk_buff and free out copy */
- fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
-}
-
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb, *tmp;
- unsigned long flags;
-
- /* If we're down or resetting, just bail */
- if (test_bit(__FM10K_DOWN, &interface->state) ||
- test_bit(__FM10K_RESETTING, &interface->state))
- return;
-
- spin_lock_irqsave(&list->lock, flags);
-
- /* walk though the list and flush any expired timestamp packets */
- skb_queue_walk_safe(list, skb, tmp) {
- if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout))
- continue;
- __skb_unlink(skb, list);
- kfree_skb(skb);
- interface->tx_hwtstamp_timeouts++;
- }
-
- spin_unlock_irqrestore(&list->lock, flags);
-}
-
-static u64 fm10k_systime_read(struct fm10k_intfc *interface)
-{
- struct fm10k_hw *hw = &interface->hw;
-
- return hw->mac.ops.read_systime(hw);
-}
-
-void fm10k_ts_reset(struct fm10k_intfc *interface)
-{
- s64 ns = ktime_to_ns(ktime_get_real());
- unsigned long flags;
-
- /* reinitialize the clock */
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust = fm10k_systime_read(interface) - ns;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-}
-
-void fm10k_ts_init(struct fm10k_intfc *interface)
-{
- /* Initialize lock protecting systime access */
- rwlock_init(&interface->systime_lock);
-
- /* Initialize skb queue for pending timestamp requests */
- skb_queue_head_init(&interface->ts_tx_skb_queue);
-
- /* reset the clock to current kernel time */
- fm10k_ts_reset(interface);
-}
-
-/**
- * fm10k_get_ts_config - get current hardware timestamping configuration
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * This function returns the current timestamping settings. Rather than
- * attempt to deconstruct registers to fill in the values, simply keep a copy
- * of the old settings around, and return a copy when requested.
- */
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct hwtstamp_config *config = &interface->ts_config;
-
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
-}
-
-/**
- * fm10k_set_ts_config - control hardware time stamping
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't cause any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- * Since hardware always timestamps Path delay packets when timestamping V2
- * packets, regardless of the type specified in the register, only use V2
- * Event mode. This more accurately tells the user what the hardware is going
- * to do anyways.
- */
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct hwtstamp_config ts_config;
-
- if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (ts_config.flags)
- return -EINVAL;
-
- switch (ts_config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
- case HWTSTAMP_TX_ON:
- /* we likely need some check here to see if this is supported */
- break;
- default:
- return -ERANGE;
- }
-
- switch (ts_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_ALL:
- interface->flags |= FM10K_FLAG_RX_TS_ENABLED;
- ts_config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- /* save these settings for future reference */
- interface->ts_config = ts_config;
-
- return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ?
- -EFAULT : 0;
-}
-
-static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
- struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
- int err;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
- hw = &interface->hw;
-
- err = hw->mac.ops.adjust_systime(hw, ppb);
-
- /* the only error we should see is if the value is out of range */
- return (err == FM10K_ERR_PARAM) ? -ERANGE : err;
-}
-
-static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust += delta;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-
- return 0;
-}
-
-static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
- u64 now;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- read_lock_irqsave(&interface->systime_lock, flags);
- now = fm10k_systime_read(interface) + interface->ptp_adjust;
- read_unlock_irqrestore(&interface->systime_lock, flags);
-
- *ts = ns_to_timespec64(now);
-
- return 0;
-}
-
-static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
- u64 ns = timespec64_to_ns(ts);
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust = fm10k_systime_read(interface) - ns;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-
- return 0;
-}
-
-static int fm10k_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int __always_unused on)
-{
- struct ptp_clock_time *t = &rq->perout.period;
- struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
- u64 period;
- u32 step;
-
- /* we can only support periodic output */
- if (rq->type != PTP_CLK_REQ_PEROUT)
- return -EINVAL;
-
- /* verify the requested channel is there */
- if (rq->perout.index >= ptp->n_per_out)
- return -EINVAL;
-
- /* we cannot enforce start time as there is no
- * mechanism for that in the hardware, we can only control
- * the period.
- */
-
- /* we cannot support periods greater than 4 seconds due to reg limit */
- if (t->sec > 4 || t->sec < 0)
- return -ERANGE;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
- hw = &interface->hw;
-
- /* we simply cannot support the operation if we don't have BAR4 */
- if (!hw->sw_addr)
- return -ENOTSUPP;
-
- /* convert to unsigned 64b ns, verify we can put it in a 32b register */
- period = t->sec * 1000000000LL + t->nsec;
-
- /* determine the minimum size for period */
- step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) &
- FM10K_SYSTIME_CFG_STEP_MASK);
-
- /* verify the value is in range supported by hardware */
- if ((period && (period < step)) || (period > U32_MAX))
- return -ERANGE;
-
- /* notify hardware of request to being sending pulses */
- fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index),
- (u32)period);
-
- return 0;
-}
-
-static struct ptp_pin_desc fm10k_ptp_pd[2] = {
- {
- .name = "IEEE1588_PULSE0",
- .index = 0,
- .func = PTP_PF_PEROUT,
- .chan = 0
- },
- {
- .name = "IEEE1588_PULSE1",
- .index = 1,
- .func = PTP_PF_PEROUT,
- .chan = 1
- }
-};
-
-static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
-{
- /* verify the requested pin is there */
- if (pin >= ptp->n_pins || !ptp->pin_config)
- return -EINVAL;
-
- /* enforce locked channels, no changing them */
- if (chan != ptp->pin_config[pin].chan)
- return -EINVAL;
-
- /* we want to keep the functions locked as well */
- if (func != ptp->pin_config[pin].func)
- return -EINVAL;
-
- return 0;
-}
-
-void fm10k_ptp_register(struct fm10k_intfc *interface)
-{
- struct ptp_clock_info *ptp_caps = &interface->ptp_caps;
- struct device *dev = &interface->pdev->dev;
- struct ptp_clock *ptp_clock;
-
- snprintf(ptp_caps->name, sizeof(ptp_caps->name),
- "%s", interface->netdev->name);
- ptp_caps->owner = THIS_MODULE;
- /* This math is simply the inverse of the math in
- * fm10k_adjust_systime_pf applied to an adjustment value
- * of 2^30 - 1 which is the maximum value of the register:
- * max_ppb == ((2^30 - 1) * 5^9) / 2^31
- */
- ptp_caps->max_adj = 976562;
- ptp_caps->adjfreq = fm10k_ptp_adjfreq;
- ptp_caps->adjtime = fm10k_ptp_adjtime;
- ptp_caps->gettime64 = fm10k_ptp_gettime;
- ptp_caps->settime64 = fm10k_ptp_settime;
-
- /* provide pins if BAR4 is accessible */
- if (interface->sw_addr) {
- /* enable periodic outputs */
- ptp_caps->n_per_out = 2;
- ptp_caps->enable = fm10k_ptp_enable;
-
- /* enable clock pins */
- ptp_caps->verify = fm10k_ptp_verify;
- ptp_caps->n_pins = 2;
- ptp_caps->pin_config = fm10k_ptp_pd;
- }
-
- ptp_clock = ptp_clock_register(ptp_caps, dev);
- if (IS_ERR(ptp_clock)) {
- ptp_clock = NULL;
- dev_err(dev, "ptp_clock_register failed\n");
- } else {
- dev_info(dev, "registered PHC device %s\n", ptp_caps->name);
- }
-
- interface->ptp_clock = ptp_clock;
-}
-
-void fm10k_ptp_unregister(struct fm10k_intfc *interface)
-{
- struct ptp_clock *ptp_clock = interface->ptp_clock;
- struct device *dev = &interface->pdev->dev;
-
- if (!ptp_clock)
- return;
-
- interface->ptp_clock = NULL;
-
- ptp_clock_unregister(ptp_clock);
- dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name);
-}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index ab01bb307..f8e87bf08 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -222,7 +222,7 @@ s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
if (len < 4) {
- attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1);
+ attr[1] = (u32)value & (BIT(8 * len) - 1);
} else {
attr[1] = (u32)value;
if (len > 4)
@@ -481,7 +481,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr,
* up into an array of pointers stored in results. The function will
* return FM10K_ERR_PARAM on any input or message error,
* FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
- * and 0 on success.
+ * and 0 on success. Any attributes not found in tlv_attr will be silently
+ * ignored.
**/
static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
@@ -518,14 +519,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
while (offset < len) {
attr_id = *attr & FM10K_TLV_ID_MASK;
- if (attr_id < FM10K_TLV_RESULTS_MAX)
- err = fm10k_tlv_attr_validate(attr, tlv_attr);
- else
- err = FM10K_NOT_IMPLEMENTED;
+ if (attr_id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_NOT_IMPLEMENTED;
- if (err < 0)
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ if (err == FM10K_NOT_IMPLEMENTED)
+ ; /* silently ignore non-implemented attributes */
+ else if (err)
return err;
- if (!err)
+ else
results[attr_id] = attr;
/* update offset */
@@ -652,29 +654,29 @@ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
**/
static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
{
- if (attr_flags & (1 << FM10K_TEST_MSG_STRING))
+ if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
test_str);
- if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR))
+ if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
test_mac, test_vlan);
- if (attr_flags & (1 << FM10K_TEST_MSG_U8))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U8))
fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
- if (attr_flags & (1 << FM10K_TEST_MSG_U16))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U16))
fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
- if (attr_flags & (1 << FM10K_TEST_MSG_U32))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U32))
fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
- if (attr_flags & (1 << FM10K_TEST_MSG_U64))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U64))
fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
- if (attr_flags & (1 << FM10K_TEST_MSG_S8))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S8))
fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
- if (attr_flags & (1 << FM10K_TEST_MSG_S16))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S16))
fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
- if (attr_flags & (1 << FM10K_TEST_MSG_S32))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S32))
fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
- if (attr_flags & (1 << FM10K_TEST_MSG_S64))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S64))
fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
- if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT))
+ if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
test_le, 8);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index e1845e0a1..a1f1027fe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 854ebb190..b8bc06183 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -225,11 +225,6 @@ struct fm10k_hw;
#define FM10K_STATS_LOOPBACK_DROP 0x3806
#define FM10K_STATS_NODESC_DROP 0x3807
-/* Timesync registers */
-#define FM10K_SYSTIME 0x3814
-#define FM10K_SYSTIME_CFG 0x3818
-#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F
-
/* PCIe state registers */
#define FM10K_PHYADDR 0x381C
@@ -355,6 +350,7 @@ struct fm10k_hw;
#define FM10K_VLAN_TABLE_VSI_MAX 64
#define FM10K_VLAN_LENGTH_SHIFT 16
#define FM10K_VLAN_CLEAR BIT(15)
+#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR
#define FM10K_VLAN_ALL \
((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
@@ -381,12 +377,6 @@ struct fm10k_hw;
#define FM10K_VFSYSTIME 0x00040
#define FM10K_VFITR(_n) ((_n) + 0x00060)
-/* Registers contained in BAR 4 for Switch management */
-#define FM10K_SW_SYSTIME_ADJUST 0x0224D
-#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
-#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
-
enum fm10k_int_source {
fm10k_int_mailbox = 0,
fm10k_int_pcie_fault = 1,
@@ -550,8 +540,6 @@ struct fm10k_mac_ops {
struct fm10k_dglort_cfg *);
void (*set_dma_mask)(struct fm10k_hw *, u64);
s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
- s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
- u64 (*read_systime)(struct fm10k_hw *);
};
enum fm10k_mac_type {
@@ -617,10 +605,10 @@ struct fm10k_vf_info {
*/
};
-#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI)
-#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI)
-#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC)
-#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
+#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI))
+#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC))
+#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE))
#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
@@ -643,7 +631,6 @@ struct fm10k_iov_ops {
s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
- s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64);
};
struct fm10k_iov_info {
@@ -667,7 +654,6 @@ struct fm10k_info {
struct fm10k_hw {
u32 __iomem *hw_addr;
- u32 __iomem *sw_addr;
void *back;
struct fm10k_mac_info mac;
struct fm10k_bus_info bus;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 91f8d7311..3b06685ea 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -188,7 +188,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
if (vsi)
return FM10K_ERR_PARAM;
- /* verify upper 4 bits of vid and length are 0 */
+ /* clever trick to verify reserved bits in both vid and length */
if ((vid << 16 | vid) >> 28)
return FM10K_ERR_PARAM;
@@ -228,7 +228,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
ether_addr_copy(hw->mac.perm_addr, perm_addr);
hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
- hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
return 0;
}
@@ -451,13 +451,6 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
-const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
- FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP),
- FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
/**
* fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
* @hw: pointer to hardware structure
@@ -509,52 +502,6 @@ static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
return 0;
}
-/**
- * fm10k_adjust_systime_vf - Adjust systime frequency
- * @hw: pointer to hardware structure
- * @ppb: adjustment rate in parts per billion
- *
- * This function takes an adjustment rate in parts per billion and will
- * verify that this value is 0 as the VF cannot support adjusting the
- * systime clock.
- *
- * If the ppb value is non-zero the return is ERR_PARAM else success
- **/
-static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
-{
- /* The VF cannot adjust the clock frequency, however it should
- * already have a syntonic clock with whichever host interface is
- * running as the master for the host interface clock domain so
- * there should be not frequency adjustment necessary.
- */
- return ppb ? FM10K_ERR_PARAM : 0;
-}
-
-/**
- * fm10k_read_systime_vf - Reads value of systime registers
- * @hw: pointer to the hardware structure
- *
- * Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanoseconds. In order to guarantee the value is accurate
- * we check the 32 most significant bits both before and after reading the
- * 32 least significant bits to verify they didn't change as we were reading
- * the registers.
- **/
-static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
-{
- u32 systime_l, systime_h, systime_tmp;
-
- systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
-
- do {
- systime_tmp = systime_h;
- systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
- systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
- } while (systime_tmp != systime_h);
-
- return ((u64)systime_h << 32) | systime_l;
-}
-
static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
@@ -579,8 +526,6 @@ static const struct fm10k_mac_ops mac_ops_vf = {
.rebind_hw_stats = fm10k_rebind_hw_stats_vf,
.configure_dglort_map = fm10k_configure_dglort_map_vf,
.get_host_state = fm10k_get_host_state_generic,
- .adjust_systime = fm10k_adjust_systime_vf,
- .read_systime = fm10k_read_systime_vf,
};
static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index c4439f131..2662f33c0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@ enum fm10k_vf_tlv_msg_id {
FM10K_VF_MSG_ID_MSIX,
FM10K_VF_MSG_ID_MAC_VLAN,
FM10K_VF_MSG_ID_LPORT_STATE,
- FM10K_VF_MSG_ID_1588,
FM10K_VF_MSG_ID_MAX,
};
@@ -49,11 +48,6 @@ enum fm10k_tlv_lport_state_attr_id {
FM10K_LPORT_STATE_MSG_MAX
};
-enum fm10k_tlv_1588_attr_id {
- FM10K_1588_MSG_TIMESTAMP,
- FM10K_1588_MSG_MAX
-};
-
#define FM10K_VF_MSG_MSIX_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
@@ -70,9 +64,5 @@ extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
fm10k_lport_state_msg_attr, func)
-extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
-#define FM10K_VF_MSG_1588_HANDLER(func) \
- FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
-
extern const struct fm10k_info fm10k_vf_info;
#endif /* _FM10K_VF_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ce6e9c04..9c44739da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -97,12 +97,12 @@
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
-#define I40E_PRIV_FLAGS_PS BIT(4)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5)
+#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
+#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
+#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -112,7 +112,9 @@
#define I40E_OEM_VER_PATCH_MASK 0xff
#define I40E_OEM_VER_BUILD_SHIFT 8
#define I40E_OEM_VER_SHIFT 24
-#define I40E_PHY_DEBUG_PORT BIT(4)
+#define I40E_PHY_DEBUG_ALL \
+ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
+ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -123,10 +125,7 @@
#define XSTRINGIFY(bar) STRINGIFY(bar)
#define I40E_RX_DESC(R, i) \
- ((ring_is_16byte_desc_enabled(R)) \
- ? (union i40e_32byte_rx_desc *) \
- (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
- : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
@@ -202,6 +201,7 @@ struct i40e_lump_tracking {
#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
@@ -244,7 +244,6 @@ struct i40e_fdir_filter {
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
-#define I40E_MAX_USER_PRIORITY 8
/* DCB per TC information data structure */
struct i40e_tc_info {
u16 qoffset; /* Queue offset from base queue */
@@ -320,8 +319,6 @@ struct i40e_pf {
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
-#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
-#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
@@ -330,7 +327,6 @@ struct i40e_pf {
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
@@ -363,6 +359,7 @@ struct i40e_pf {
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
#define I40E_FLAG_PF_MAC BIT_ULL(50)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -534,9 +531,7 @@ struct i40e_vsi {
u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame;
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
/* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors;
@@ -554,7 +549,7 @@ struct i40e_vsi {
u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
- u16 vf_id; /* Virtual function ID for SRIOV VSIs */
+ s16 vf_id; /* Virtual function ID for SRIOV VSIs */
struct i40e_tc_configuration tc_config;
struct i40e_aqc_vsi_properties_data info;
@@ -811,6 +806,7 @@ int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
int i40e_open(struct net_device *netdev);
+int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -823,7 +819,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
-int i40e_close(struct net_device *netdev);
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
void i40e_netpoll(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index df8e2fd6a..738b42a44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,16 +33,6 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
- return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
- (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
-}
-
-/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -624,13 +614,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_release_on_done = false;
+ hw->nvm_release_on_done = false;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- ret_code = i40e_aq_set_hmc_resource_profile(hw,
- I40E_HMC_PROFILE_DEFAULT,
- 0,
- NULL);
ret_code = 0;
/* success! */
@@ -1023,26 +1009,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- if (i40e_is_nvm_update_op(&e->desc)) {
- if (hw->aq.nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->aq.nvm_release_on_done = false;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
- }
-
+ i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 12fbbddea..d92aad38a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 8d5c65ab6..11cf1a5eb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -429,6 +425,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -1585,27 +1582,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1652,11 +1628,11 @@ enum i40e_aq_phy_type {
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
@@ -1857,7 +1833,10 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
u8 reserved[15];
};
@@ -1927,9 +1906,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+ BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2226,13 +2205,11 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
*/
struct i40e_aqc_lldp_set_local_mib {
#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
- SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
- SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
+ BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
@@ -2250,7 +2227,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
struct i40e_aqc_lldp_stop_start_specific_agent {
#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
- (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
@@ -2303,7 +2280,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2323,14 +2300,13 @@ struct i40e_aqc_get_set_rss_key_data {
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index bf6b453d9..a4601d97f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -217,7 +217,7 @@ struct i40e_client {
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
enum i40e_client_type type;
- struct i40e_client_ops *ops; /* client ops provided by the client */
+ const struct i40e_client_ops *ops; /* client ops provided by the client */
};
static inline bool i40e_client_is_registered(struct i40e_client *client)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 4596294c2..422b41d61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -60,6 +60,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
default:
@@ -694,7 +696,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -1901,13 +1903,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
*
* Reset the external PHY.
**/
-enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
- enum i40e_status_code status;
+ i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
@@ -1970,10 +1972,12 @@ aq_add_vsi_exit:
* @seid: vsi number
* @set: set unicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -1986,8 +1990,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
@@ -2037,6 +2042,76 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2157,6 +2232,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -2168,6 +2246,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
return status;
}
@@ -2205,6 +2286,35 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_switch_config *scfg =
+ (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_switch_config);
+ scfg->flags = cpu_to_le16(flags);
+ scfg->valid_flags = cpu_to_le16(valid_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_get_firmware_version
* @hw: pointer to the hw struct
* @fw_major_version: firmware major version
@@ -2660,10 +2770,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- if (!rule_id)
- return I40E_ERR_PARAM;
- } else {
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
/* count and mr_list shall be valid for rule_type INGRESS VLAN
* mirroring. For other rule_type, count and rule_type should
* not matter.
@@ -2780,36 +2887,6 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *cmd =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_hmc_resource_profile);
-
- cmd->pm_profile = (u8)profile;
- cmd->pe_vf_enabled = pe_vf_enabled_count;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_request_resource
* @hw: pointer to the hw struct
* @resource: resource id
@@ -3073,6 +3150,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MSIX:
p->num_msix_vectors = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX vector count = %d\n",
+ p->num_msix_vectors);
break;
case I40E_AQ_CAP_ID_VF_MSIX:
p->num_msix_vectors_vf = number;
@@ -3128,6 +3208,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = true;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = true;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 0c97733d2..e6af8c8d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
- if (vsi->active_vlans)
- dev_info(&pf->pdev->dev,
- " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev,
+ " vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev,
" state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->state, vsi->flags,
@@ -269,13 +268,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->queue_index,
rx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, rx_ring->rx_hdr_len,
- rx_ring->rx_buf_len,
- rx_ring->dtype);
+ " rx_rings[%i]: rx_buf_len = %d\n",
+ i, rx_ring->rx_buf_len);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, ring_is_ps_enabled(rx_ring),
+ " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
rx_ring->next_to_use,
rx_ring->next_to_clean,
rx_ring->ring_active);
@@ -327,9 +324,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->queue_index,
tx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, tx_ring->dtype);
- dev_info(&pf->pdev->dev,
" tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i,
tx_ring->next_to_use,
@@ -366,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" work_limit = %d\n",
vsi->work_limit);
dev_info(&pf->pdev->dev,
- " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
- vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+ " max_frame = %d, rx_buf_len = %d dtype = %d\n",
+ vsi->max_frame, vsi->rx_buf_len, 0);
dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector);
@@ -592,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
" d[%03x] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr,
txd->cmd_type_offset_bsz);
- } else if (sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(ring, i);
- dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx\n",
- i, rxd->read.pkt_addr,
- rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
@@ -620,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz);
- } else if (sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(ring, desc_n);
- dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
- vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 99257fcd1..d701861c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -44,6 +44,8 @@
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 784b16594..5e8d84ff7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -230,12 +230,22 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+ "MFP",
+ "LinkPolling",
+ "flow-director-atr",
+ "veb-stats",
+ "hw-atr-eviction",
+ "vf-true-promisc-support",
+};
+
+#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+
static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"NPAR",
"LinkPolling",
"flow-director-atr",
"veb-stats",
- "packet-split",
"hw-atr-eviction",
};
@@ -252,6 +262,110 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
}
/**
+ * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @phy_types: PHY types to convert
+ * @supported: pointer to the ethtool supported variable to fill in
+ * @advertising: pointer to the ethtool advertising variable to fill in
+ *
+ **/
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
+ u32 *advertising)
+{
+ enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
+
+ *supported = 0x0;
+ *advertising = 0x0;
+
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+ *supported |= SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_100baseT_Full;
+ }
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XFI ||
+ phy_types & I40E_CAP_PHY_TYPE_SFI ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+ *supported |= SUPPORTED_10000baseT_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+ *supported |= SUPPORTED_40000baseCR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_40000baseCR4_Full;
+ }
+ if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+ !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_100baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ *supported |= SUPPORTED_40000baseSR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ *supported |= SUPPORTED_40000baseLR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+ *supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+ *supported |= SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+ *supported |= SUPPORTED_10000baseKR_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_10000baseKR_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ *supported |= SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+ *supported |= SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
+ }
+}
+
+/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
* @ecmd: ethtool command to fill in
@@ -265,6 +379,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
{
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
u32 link_speed = hw_link_info->link_speed;
+ u32 e_advertising = 0x0;
+ u32 e_supported = 0x0;
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
@@ -305,14 +421,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
ecmd->supported = SUPPORTED_Autoneg |
@@ -320,12 +440,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ecmd->advertising = ADVERTISED_Autoneg |
ADVERTISED_1000baseT_Full;
break;
- case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
ecmd->supported = SUPPORTED_Autoneg |
@@ -352,14 +466,23 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ecmd->advertising |= ADVERTISED_100baseT_Full;
}
break;
- /* Backplane is set based on supported phy types in get_settings
- * so don't set anything here but don't warn either
- */
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
+ ecmd->supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_10000baseKR_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
break;
default:
/* if we got here and link is up something bad is afoot */
@@ -367,6 +490,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
hw_link_info->phy_type);
}
+ /* Now that we've worked out everything that could be supported by the
+ * current PHY type, get what is supported by the NVM and them to
+ * get what is truly supported
+ */
+ i40e_phy_type_to_ethtool(pf, &e_supported,
+ &e_advertising);
+
+ ecmd->supported = ecmd->supported & e_supported;
+ ecmd->advertising = ecmd->advertising & e_advertising;
+
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
@@ -401,74 +534,11 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
struct ethtool_cmd *ecmd,
struct i40e_pf *pf)
{
- enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
-
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- ecmd->supported = 0x0;
- ecmd->advertising = 0x0;
- if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- if (pf->hw.mac.type == I40E_MAC_X722) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
- }
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XFI ||
- phy_types & I40E_CAP_PHY_TYPE_SFI ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
- ecmd->supported |= SUPPORTED_10000baseT_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
- ecmd->supported |= SUPPORTED_40000baseCR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
- }
- if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
- !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_100baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
- ecmd->supported |= SUPPORTED_40000baseSR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
- ecmd->supported |= SUPPORTED_40000baseLR4_Full;
+ i40e_phy_type_to_ethtool(pf, &ecmd->supported,
+ &ecmd->advertising);
/* With no link speed and duplex are unknown */
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -497,38 +567,6 @@ static int i40e_get_settings(struct net_device *netdev,
i40e_get_settings_link_down(hw, ecmd, pf);
/* Now set the settings that don't rely on link being up/down */
-
- /* For backplane, supported and advertised are only reliant on the
- * phy types the NVM specifies are supported.
- */
- if (hw->device_id == I40E_DEV_ID_KX_B ||
- hw->device_id == I40E_DEV_ID_KX_C ||
- hw->device_id == I40E_DEV_ID_20G_KR2 ||
- hw->device_id == I40E_DEV_ID_20G_KR2_A) {
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
- ecmd->supported |= SUPPORTED_40000baseKR4_Full;
- ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
- ecmd->supported |= SUPPORTED_20000baseKR2_Full;
- ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
- ecmd->supported |= SUPPORTED_10000baseKR_Full;
- ecmd->advertising |= ADVERTISED_10000baseKR_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
- ecmd->supported |= SUPPORTED_10000baseKX4_Full;
- ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
- ecmd->supported |= SUPPORTED_1000baseKX_Full;
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
- }
- }
-
/* Set autoneg settings */
ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -1143,6 +1181,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ if (pf->hw.pf_id == 0)
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
+ else
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1259,6 +1301,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* this is to allow wr32 to have something to write to
+ * during early allocation of Rx buffers
+ */
+ u32 __iomem faketail = 0;
+ struct i40e_ring *ring;
+ u16 unused;
+
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
@@ -1267,12 +1316,22 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
+ rx_rings[i].tail = (u8 __iomem *)&faketail;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
+
+ /* now allocate the Rx buffers to make sure the OS
+ * has enough memory, any failure here means abort
+ */
+ ring = &rx_rings[i];
+ unused = I40E_DESC_UNUSED(ring);
+ err = i40e_alloc_rx_buffers(ring, unused);
+rx_unwind:
if (err) {
- while (i) {
- i--;
+ do {
i40e_free_rx_resources(&rx_rings[i]);
- }
+ } while (i--);
kfree(rx_rings);
rx_rings = NULL;
@@ -1298,6 +1357,17 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(vsi->rx_rings[i]);
+ /* get the real tail offset */
+ rx_rings[i].tail = vsi->rx_rings[i]->tail;
+ /* this is to fake out the allocation routine
+ * into thinking it has to realloc everything
+ * but the recycling logic will let us re-use
+ * the buffers allocated above
+ */
+ rx_rings[i].next_to_use = 0;
+ rx_rings[i].next_to_clean = 0;
+ rx_rings[i].next_to_alloc = 0;
+ /* do a struct copy */
*vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
@@ -1342,7 +1412,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
return I40E_VSI_STATS_LEN(netdev);
}
case ETH_SS_PRIV_FLAGS:
- return I40E_PRIV_FLAGS_STR_LEN;
+ if (pf->hw.pf_id == 0)
+ return I40E_PRIV_FLAGS_GL_STR_LEN;
+ else
+ return I40E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1540,10 +1613,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40e_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ if (pf->hw.pf_id == 0) {
+ for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings_gl[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ } else {
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
}
break;
default:
@@ -1714,7 +1795,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* If the device is online then take it offline */
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ i40e_close(netdev);
else
/* This reset does not affect link - if it is
* changed to a type of reset that does affect
@@ -1743,7 +1824,7 @@ static void i40e_diag_test(struct net_device *netdev,
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
if (if_running)
- dev_open(netdev);
+ i40e_open(netdev);
} else {
/* Online tests */
netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1837,7 +1918,7 @@ static int i40e_set_phys_id(struct net_device *netdev,
if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
pf->led_status = i40e_led_get(hw);
} else {
- i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val);
pf->led_status = temp_status;
@@ -2490,7 +2571,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!vsi)
return -EINVAL;
-
pf = vsi->back;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
@@ -2548,15 +2628,18 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
if (ntohl(fsp->m_ext.data[1])) {
- if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
- netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+ vf_id = ntohl(fsp->h_ext.data[1]);
+ if (vf_id >= pf->num_alloc_vfs) {
+ netif_info(pf, drv, vsi->netdev,
+ "Invalid VF id %d\n", vf_id);
goto free_input;
}
- vf_id = ntohl(fsp->h_ext.data[1]);
/* Find vsi id from vf id and override dest vsi */
input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
- netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+ netif_info(pf, drv, vsi->netdev,
+ "Invalid queue id %d for VF %d\n",
+ input->q_index, vf_id);
goto free_input;
}
}
@@ -2803,18 +2886,18 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
struct i40e_pf *pf = vsi->back;
u32 ret_flags = 0;
- ret_flags |= pf->hw.func_caps.npar_enable ?
- I40E_PRIV_FLAGS_NPAR_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
I40E_PRIV_FLAGS_FD_ATR : 0;
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0;
- ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
- I40E_PRIV_FLAGS_PS : 0;
ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
+ if (pf->hw.pf_id == 0) {
+ ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
+ I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+ }
return ret_flags;
}
@@ -2829,27 +2912,13 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ u16 sw_flags = 0, valid_flags = 0;
bool reset_required = false;
+ bool promisc_change = false;
+ int ret;
/* NOTE: MFP is not settable */
- /* allow the user to control the method of receive
- * buffer DMA, whether the packet is split at header
- * boundaries into two separate buffers. In some cases
- * one routine or the other will perform better.
- */
- if ((flags & I40E_PRIV_FLAGS_PS) &&
- !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
- pf->flags |= I40E_FLAG_RX_PS_ENABLED;
- pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
- reset_required = true;
- } else if (!(flags & I40E_PRIV_FLAGS_PS) &&
- (pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
- pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
- pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
- reset_required = true;
- }
-
if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
else
@@ -2876,6 +2945,33 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
reset_required = true;
}
+ if (pf->hw.pf_id == 0) {
+ if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
+ promisc_change = true;
+ } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+ (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
+ promisc_change = true;
+ }
+ }
+ if (promisc_change) {
+ if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
(pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 8ad162c16..58e6c1570 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,16 +38,6 @@
#include "i40e_fcoe.h"
/**
- * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
- * @ptype: the packet type field from rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
-/**
* i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
* @sof: the FCoE start of frame delimiter
**/
@@ -1371,7 +1361,7 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 5ebe12d56..a7c7b1d9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -49,7 +49,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
- i40e_status ret_code;
+ i40e_status ret_code = I40E_SUCCESS;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 344912957..501f15d9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -45,8 +45,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -90,6 +90,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
/* required last entry */
@@ -326,7 +328,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
unsigned long trans_start;
q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start ? : netdev->trans_start;
+ trans_start = q->trans_start;
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + netdev->watchdog_timeo))) {
@@ -396,24 +398,6 @@ static void i40e_tx_timeout(struct net_device *netdev)
}
/**
- * i40e_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
- **/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
-{
- rx_ring->next_to_use = val;
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(val, rx_ring->tail);
-}
-
-/**
* i40e_get_vsi_stats_struct - Get System Network Statistics
* @vsi: the VSI we care about
*
@@ -1360,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!vsi || !macaddr)
return NULL;
+ /* Do not allow broadcast filter to be added since broadcast filter
+ * is added as part of add VSI for any newly created VSI except
+ * FDIR VSI
+ */
+ if (is_broadcast_ether_addr(macaddr))
+ return NULL;
+
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
@@ -2097,6 +2088,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
}
+ /* if the VF is not trusted do not do promisc */
+ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ goto out;
+ }
+
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
@@ -2138,7 +2135,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
&vsi->back->hw,
vsi->seid,
- cur_promisc, NULL);
+ cur_promisc, NULL,
+ true);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
@@ -2160,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
aq_ret, pf->hw.aq.asq_last_status);
}
}
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- }
}
out:
/* if something went wrong then set the changed flag so we try again */
@@ -2865,34 +2851,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
memset(&rx_ctx, 0, sizeof(rx_ctx));
ring->rx_buf_len = vsi->rx_buf_len;
- ring->rx_hdr_len = vsi->rx_hdr_len;
rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
- rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
- set_ring_16byte_desc_enabled(ring);
- rx_ctx.dsize = 0;
- } else {
- rx_ctx.dsize = 1;
- }
+ /* use 32 byte descriptors */
+ rx_ctx.dsize = 1;
- rx_ctx.dtype = vsi->dtype;
- if (vsi->dtype) {
- set_ring_ps_enabled(ring);
- rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
- I40E_RX_SPLIT_IP |
- I40E_RX_SPLIT_TCP_UDP |
- I40E_RX_SPLIT_SCTP;
- } else {
- rx_ctx.hsplit_0 = 0;
- }
+ /* descriptor type is always zero
+ * rx_ctx.dtype = 0;
+ */
+ rx_ctx.hsplit_0 = 0;
- rx_ctx.rxmax = min_t(u16, vsi->max_frame,
- (chain_len * ring->rx_buf_len));
+ rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
if (hw->revision_id == 0)
rx_ctx.lrxqthresh = 0;
else
@@ -2929,12 +2902,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring_is_ps_enabled(ring)) {
- i40e_alloc_rx_headers(ring);
- i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
- } else {
- i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
- }
+ i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
return 0;
}
@@ -2973,40 +2941,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
else
vsi->max_frame = I40E_RXBUFFER_2048;
- /* figure out correct receive buffer length */
- switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
- I40E_FLAG_RX_PS_ENABLED)) {
- case I40E_FLAG_RX_1BUF_ENABLED:
- vsi->rx_hdr_len = 0;
- vsi->rx_buf_len = vsi->max_frame;
- vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
- break;
- case I40E_FLAG_RX_PS_ENABLED:
- vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
- vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
- break;
- default:
- vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
- vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
- break;
- }
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
#ifdef I40E_FCOE
/* setup rx buffer for FCoE */
if ((vsi->type == I40E_VSI_FCOE) &&
(vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
- vsi->rx_hdr_len = 0;
vsi->rx_buf_len = I40E_RXBUFFER_3072;
vsi->max_frame = I40E_RXBUFFER_3072;
- vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
}
#endif /* I40E_FCOE */
/* round up for the chip's needs */
- vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
- BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
@@ -4164,7 +4110,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
}
@@ -5509,11 +5455,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
*
* Returns 0, this is not allowed to fail
**/
-#ifdef I40E_FCOE
int i40e_close(struct net_device *netdev)
-#else
-static int i40e_close(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5538,8 +5480,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
WARN_ON(in_interrupt());
- if (i40e_check_asq_alive(&pf->hw))
- i40e_vc_notify_reset(pf);
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -6377,7 +6317,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event 0x%04x received\n",
+ "ARQ: Unknown event 0x%04x ignored\n",
opcode);
break;
}
@@ -6742,6 +6682,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
return;
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
@@ -6862,6 +6804,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
*/
ret = i40e_aq_set_phy_int_mask(&pf->hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -7525,10 +7468,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
rx_ring->count = vsi->num_desc;
rx_ring->size = 0;
rx_ring->dcb_tc = 0;
- if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
- set_ring_16byte_desc_enabled(rx_ring);
- else
- clear_ring_16byte_desc_enabled(rx_ring);
rx_ring->rx_itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = rx_ring;
}
@@ -7782,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
{
struct i40e_q_vector *q_vector;
@@ -7796,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, NAPI_POLL_WEIGHT);
@@ -7820,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int v_idx, num_q_vectors;
- int err;
+ int err, v_idx, num_q_vectors, current_cpu;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -7831,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
+ current_cpu = cpumask_first(cpu_online_mask);
+
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
if (err)
goto err_out;
+ current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+ if (unlikely(current_cpu >= nr_cpu_ids))
+ current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
@@ -8084,24 +8029,45 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
u8 i;
/* Fill out hash function seed */
if (seed) {
u32 *seed_dw = (u32 *)seed;
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
+ seed_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw,
+ I40E_VFQF_HKEY1(i, vf_id),
+ seed_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+ }
}
if (lut) {
u32 *lut_dw = (u32 *)lut;
- if (lut_size != I40E_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ if (vsi->type == I40E_VSI_MAIN) {
+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw,
+ I40E_VFQF_HLUT1(i, vf_id),
+ lut_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
}
i40e_flush(hw);
@@ -8440,7 +8406,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
- pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -8451,14 +8416,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_LINK_POLLING_ENABLED |
I40E_FLAG_MSIX_ENABLED;
- if (iommu_present(&pci_bus_type))
- pf->flags |= I40E_FLAG_RX_PS_ENABLED;
- else
- pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
@@ -9074,6 +9033,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
+ .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
#if IS_ENABLED(CONFIG_VXLAN)
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -9114,40 +9074,44 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ netdev->hw_enc_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
0;
- netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_HIGHDMA |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- 0;
+ if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= netdev->hw_enc_features |
+ NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->features |= NETIF_F_NTUPLE;
- if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
+ netdev->hw_features |= netdev->hw_enc_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
@@ -9163,6 +9127,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
I40E_VLAN_ANY, false, true);
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
+ } else if ((pf->hw.aq.api_maj_ver > 1) ||
+ ((pf->hw.aq.api_maj_ver == 1) &&
+ (pf->hw.aq.api_min_ver > 4))) {
+ /* Supported in FW API version higher than 1.4 */
+ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -9180,12 +9150,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
- /* vlan gets same features (except vlan offload)
- * after any tweaks for specific VSI types
- */
- netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
/* Setup netdev TC information */
@@ -9260,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
+ i40e_status aq_ret = 0;
u8 laa_macaddr[ETH_ALEN];
bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
@@ -9398,7 +9364,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |=
- I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
+ I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
}
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
@@ -9448,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
}
+ /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+ if (vsi->type != I40E_VSI_FDIR) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (aq_ret) {
+ ret = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
@@ -10444,6 +10423,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u16 flags = 0;
int ret;
/* find out what's out there already */
@@ -10457,6 +10437,32 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
}
i40e_pf_reset_stats(pf);
+ /* set the switch config bit for the whole device to
+ * support limited promisc or true promisc
+ * when user requests promisc. The default is limited
+ * promisc.
+ */
+
+ if ((pf->hw.pf_id == 0) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+
+ if (pf->hw.pf_id == 0) {
+ u16 valid_flags;
+
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
/* first time setup */
if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
@@ -10684,11 +10690,9 @@ static void i40e_print_features(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
- i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
+ i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs,
- pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
-
+ pf->vsi[pf->lan_vsi]->num_queue_pairs);
if (pf->flags & I40E_FLAG_RSS_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " RSS");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
@@ -10827,6 +10831,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.func = PCI_FUNC(pdev->devfn);
pf->instance = pfs_found;
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
if (debug != -1) {
pf->msg_enable = pf->hw.debug_mask;
pf->msg_enable = debug;
@@ -10872,12 +10882,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up a default setting for link flow control */
pf->hw.fc.requested_mode = I40E_FC_NONE;
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&hw->aq.asq_mutex);
- mutex_init(&hw->aq.arq_mutex);
-
err = i40e_init_adminq(hw);
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
@@ -11069,6 +11073,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
err = i40e_aq_set_phy_int_mask(&pf->hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -11270,7 +11275,6 @@ err_init_lan_hmc:
kfree(pf->qp_pile);
err_sw_init:
err_adminq_setup:
- (void)i40e_shutdown_adminq(hw);
err_pf_reset:
iounmap(hw->hw_addr);
err_ioremap:
@@ -11312,8 +11316,10 @@ static void i40e_remove(struct pci_dev *pdev)
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
+ if (pf->service_timer.data)
+ del_timer_sync(&pf->service_timer);
+ if (pf->service_task.func)
+ cancel_work_sync(&pf->service_task);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 5730f8091..954efe311 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -693,10 +693,10 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
- hw->aq.nvm_release_on_done,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -710,7 +710,18 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* going into the state machine
*/
if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
return 0;
}
@@ -729,6 +740,14 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
case I40E_NVMUPD_STATE_INIT_WAIT:
case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ return 0;
+ }
+
status = I40E_ERR_NOT_READY;
*perrno = -EBUSY;
break;
@@ -799,7 +818,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -815,7 +835,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -828,10 +849,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (status)
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
}
break;
@@ -849,7 +872,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
-EIO;
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -940,8 +964,10 @@ retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (!status)
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
break;
case I40E_NVMUPD_WRITE_LCB:
@@ -953,7 +979,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -967,6 +994,7 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
@@ -980,7 +1008,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1030,6 +1059,37 @@ retry:
}
/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1189,6 +1249,12 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index d51eee5bf..80403c6ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -130,9 +130,18 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -174,6 +183,10 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
struct i40e_aqc_get_switch_config_resp *buf,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
enum i40e_aq_resource_access_type access,
@@ -228,10 +241,6 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
@@ -308,6 +317,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 565ca7c83..ed39cbad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -158,9 +158,10 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now, then = ns_to_timespec64(delta);
+ struct timespec64 now, then;
unsigned long flags;
+ then = ns_to_timespec64(delta);
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_read(pf, &now);
@@ -288,9 +289,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->last_rx_ptp_check = jiffies;
pf->rx_hwtstamp_cleared++;
- dev_warn(&vsi->back->pdev->dev,
- "%s: clearing Rx timestamp hang\n",
- __func__);
+ WARN_ONCE(1, "Detected Rx timestamp register hang\n");
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6a49b7ae5..a8868e1bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -636,19 +636,21 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -678,7 +680,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -749,7 +751,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -767,7 +769,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -1022,7 +1024,6 @@ err:
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
unsigned long bi_size;
u16 i;
@@ -1030,48 +1031,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (ring_is_ps_enabled(rx_ring)) {
- int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
- rx_bi = &rx_ring->rx_bi[0];
- if (rx_bi->hdr_buf) {
- dma_free_coherent(dev,
- bufsz,
- rx_bi->hdr_buf,
- rx_bi->dma);
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = 0;
- rx_bi->hdr_buf = NULL;
- }
- }
- }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->dma) {
- dma_unmap_single(dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
- }
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
if (rx_bi->skb) {
dev_kfree_skb(rx_bi->skb);
rx_bi->skb = NULL;
}
- if (rx_bi->page) {
- if (rx_bi->page_dma) {
- dma_unmap_page(dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
- }
- __free_page(rx_bi->page);
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
- }
+ if (!rx_bi->page)
+ continue;
+
+ dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_bi->page, 0);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
}
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -1080,6 +1055,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -1104,37 +1080,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40e_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
- dma_addr_t dma;
- void *buffer;
- int buf_size;
- int i;
-
- if (rx_ring->rx_bi[0].hdr_buf)
- return;
- /* Make sure the buffers don't cross cache line boundaries. */
- buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
- buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
- &dma, GFP_KERNEL);
- if (!buffer)
- return;
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = dma + (i * buf_size);
- rx_bi->hdr_buf = buffer + (i * buf_size);
- }
-}
-
-/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1155,9 +1100,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
- ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
- : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1168,6 +1111,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
goto err;
}
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -1186,6 +1130,10 @@ err:
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -1196,160 +1144,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
*
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
**/
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
{
- u16 i = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
- const int current_node = numa_node_id();
+ struct page *page = bi->page;
+ dma_addr_t dma;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
- return false;
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
- if (bi->skb) /* desc is in use */
- goto no_buffers;
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /* If we've been moved to a different NUMA node, release the
- * page so we can get a new one on the current node.
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
*/
- if (bi->page && page_to_nid(bi->page) != current_node) {
- dma_unmap_page(rx_ring->dev,
- bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else if (bi->page) {
- rx_ring->rx_stats.page_reuse_count++;
- }
-
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- bi->page_offset = 0;
- goto no_buffers;
- }
- bi->page_offset = 0;
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
}
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
- return false;
+ return true;
+}
-no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
- * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40e_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
**/
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
- u16 i = rx_ring->next_to_use;
+ u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
- struct sk_buff *skb;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
- skb = bi->skb;
-
- if (!skb) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- bi->dma = 0;
- dev_kfree_skb(bi->skb);
- bi->skb = NULL;
- goto no_buffers;
- }
- }
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
- i++;
- if (i == rx_ring->count)
- i = 0;
- }
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -1358,41 +1268,35 @@ no_buffers:
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
- u32 rx_status,
- u32 rx_error,
- u16 rx_ptype)
+ union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+ struct i40e_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
@@ -1432,20 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
@@ -1459,7 +1366,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -1487,11 +1394,11 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
u8 rx_ptype)
{
u32 hash;
- const __le64 rss_mask =
+ const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
@@ -1501,346 +1408,436 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- u16 i = rx_ring->next_to_clean;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u32 copysize;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- if (budget <= 0)
- return 0;
+ if (unlikely(rsyn)) {
+ i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
- do {
- struct i40e_rx_buffer *rx_bi;
- struct sk_buff *skb;
- u16 vlan_tag;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- i40e_alloc_rx_buffers_ps(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
- */
- dma_rmb();
- /* sync header buffer for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- if (i40e_rx_is_programming_status(qword)) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_INCREMENT(rx_ring, i);
- continue;
- }
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- if (likely(!skb)) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- failure = true;
- break;
- }
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+}
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- }
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
- I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- /* sync half-page for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->page_dma,
- rx_bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
- rx_bi->skb = NULL;
- cleaned_count++;
- copysize = 0;
- if (rx_hbo || rx_sph) {
- int len;
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- if (rx_hbo)
- len = I40E_RX_HDR_SIZE;
- else
- len = rx_header_len;
- memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
- } else if (skb->len == 0) {
- int len;
- unsigned char *va = page_address(rx_bi->page) +
- rx_bi->page_offset;
-
- len = min(rx_packet_len, rx_ring->rx_hdr_len);
- memcpy(__skb_put(skb, len), va, len);
- copysize = len;
- rx_packet_len -= len;
- }
- /* Get the rest of the data if this was a header split */
- if (rx_packet_len) {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset + copysize,
- rx_packet_len, I40E_RXBUFFER_2048);
-
- /* If the page count is more than 2, then both halves
- * of the page are used and we need to free it. Do it
- * here instead of in the alloc code. Otherwise one
- * of the half-pages might be released between now and
- * then, and we wouldn't know which one to use.
- * Don't call get_page and free_page since those are
- * both expensive atomic operations that just change
- * the refcount in opposite directions. Just give the
- * page to the stack; he can have our refcount.
- */
- if (page_count(rx_bi->page) > 2) {
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page = NULL;
- rx_bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else {
- get_page(rx_bi->page);
- /* switch to the other half-page here; the
- * allocation code programs the right addr
- * into HW. If we haven't used this half-page,
- * the address won't be changed, and HW can
- * just use it next time through.
- */
- rx_bi->page_offset ^= PAGE_SIZE / 2;
- }
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- }
- I40E_RX_INCREMENT(rx_ring, i);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- struct i40e_rx_buffer *next_buffer;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
- next_buffer = &rx_ring->rx_bi[i];
- next_buffer->skb = skb;
- rx_ring->rx_stats.non_eop_descs++;
- continue;
- }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ i40e_pull_tail(rx_ring, skb);
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ return false;
+}
- if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
- i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
- I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
- rx_ring->last_rx_timestamp = jiffies;
- }
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
+ new_buff = &rx_ring->rx_bi[nta];
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* transfer page from old buffer to new buffer */
+ *new_buff = *old_buff;
+}
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
-#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
- i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- } while (likely(total_rx_packets < budget));
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!i40e_page_is_reserved(page)))
+ return true;
- return failure ? budget : total_rx_packets;
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(i40e_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buffer->page);
+
+ return true;
}
/**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
*
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ } else {
+ rx_buffer->skb = NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+#define staterrlen rx_desc->wb.qword1.status_error_len
+ if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ rx_ring->rx_bi[ntc].skb = skb;
+ return true;
+ }
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_bi[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
**/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- u16 rx_packet_len;
bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u16 i;
- do {
- struct i40e_rx_buffer *rx_bi;
+ while (likely(total_rx_packets < budget)) {
+ union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
+ u32 rx_status;
u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- i40e_alloc_rx_buffers_1buf(rx_ring,
- cleaned_count);
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ if (!rx_desc->wb.qword1.status_error_len)
+ break;
+
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* DD bit is set.
*/
dma_rmb();
- if (i40e_rx_is_programming_status(qword)) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_INCREMENT(rx_ring, i);
- continue;
- }
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- prefetch(skb->data);
-
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+ if (!skb)
+ break;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_bi->skb = NULL;
cleaned_count++;
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- skb_put(skb, rx_packet_len);
- dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
-
- I40E_RX_INCREMENT(rx_ring, i);
-
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- rx_ring->rx_stats.non_eop_descs++;
+ if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
continue;
}
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
- i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
- I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
- rx_ring->last_rx_timestamp = jiffies;
- }
+ if (i40e_cleanup_headers(rx_ring, skb))
+ continue;
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
-
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* populate checksum, VLAN, and protocol */
+ i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+ if (unlikely(
+ i40e_rx_is_fcoe(rx_ptype) &&
+ !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
dev_kfree_skb_any(skb);
continue;
}
#endif
+
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
- } while (likely(total_rx_packets < budget));
+ /* update budget accounting */
+ total_rx_packets++;
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1849,6 +1846,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets;
}
@@ -1975,9 +1973,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete = clean_complete &&
- i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
@@ -1991,16 +1991,12 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
-
- if (ring_is_ps_enabled(ring))
- cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
- else
- cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete = clean_complete && (budget_per_ring > cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -2247,15 +2243,13 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
u64 cd_cmd, cd_tso_len, cd_mss;
union {
@@ -2292,16 +2286,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
@@ -2321,9 +2321,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -2405,7 +2404,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *hdr;
} l4;
unsigned char *exthdr;
- u32 offset, cmd = 0, tunnel = 0;
+ u32 offset, cmd = 0;
__be16 frag_off;
u8 l4_proto = 0;
@@ -2419,6 +2418,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
+ u32 tunnel = 0;
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -2436,13 +2436,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
@@ -2453,6 +2446,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
return -1;
@@ -2461,12 +2459,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
return 0;
}
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
@@ -2716,6 +2722,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -2723,12 +2731,14 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
- I40E_MAX_DATA_PER_TXD, td_tag);
+ max_data, td_tag);
tx_desc++;
i++;
@@ -2739,9 +2749,10 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- dma += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
+ dma += max_data;
+ size -= max_data;
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2891,7 +2902,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2922,7 +2933,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index a9bd70537..b78c810d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512 512 /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -142,14 +161,41 @@ enum i40e_dyn_idx_t {
prefetch((n)); \
} while (0)
-#define i40e_rx_desc i40e_32byte_rx_desc
-
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive. It is used to
+ * approximate the number of descriptors used per linear buffer. Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+ unsigned int adjust = ~(u32)0;
+
+ /* if we rounded up on the reciprocal pull down the adjustment */
+ if ((max * reciprocal) > adjust)
+ adjust = ~(u32)(reciprocal - 1);
+
+ return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
@@ -184,10 +230,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
- void *hdr_buf;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -216,22 +260,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_RX_PS_ENABLED,
- __I40E_RX_16BYTE_DESC_ENABLED,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
- test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
- set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
- clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
@@ -258,16 +298,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
@@ -301,6 +332,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -324,9 +356,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -377,7 +407,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += TXD_USE_COUNT(size);
+ count += i40e_txd_use_count(size);
if (!nr_frags--)
break;
@@ -423,4 +453,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+ return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+ (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 3335f9d13..bd5f13bef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,7 +36,7 @@
#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
@@ -275,6 +275,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -549,6 +554,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1533,4 +1540,37 @@ struct i40e_lldp_variables {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index ab866cf3d..c92a3bdee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -80,10 +80,15 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -157,6 +162,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -165,8 +171,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -325,6 +331,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 816c6bbf7..1fcafcfa8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -48,7 +48,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
int i;
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
@@ -63,7 +63,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
}
/**
- * i40e_vc_notify_link_state
+ * i40e_vc_notify_vf_link_state
* @vf: pointer to the VF structure
*
* send a link status message to a single VF
@@ -74,7 +74,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -141,7 +141,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
return;
- abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+ abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
@@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
}
rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
- /* set splitalways mode 10b */
+ /* set split mode 10b */
rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
}
@@ -665,8 +665,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
- u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
/* If the port VLAN has been configured and then the
@@ -688,12 +686,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
- f = i40e_add_filter(vsi, brdcast,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
- if (!f)
- dev_info(&pf->pdev->dev,
- "Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
@@ -860,7 +852,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
if (ret)
goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
- set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ if (vf->trusted)
+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
/* store the total qps number for the runtime
* VF req validation
@@ -917,9 +913,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
bool rsd = false;
int i;
- u32 reg;
if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
return;
@@ -937,6 +933,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
i40e_flush(hw);
}
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ i40e_flush(hw);
if (i40e_quiesce_vf_pci(vf))
dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
@@ -988,6 +989,7 @@ complete_reset:
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, &pf->state);
}
@@ -1227,8 +1229,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
/* single place to detect unsuccessful return values */
if (v_retval) {
vf->num_invalid_msgs++;
- dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n",
- vf->vf_id, v_opcode, v_retval);
+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+ vf->vf_id, v_opcode, v_retval);
if (vf->num_invalid_msgs >
I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
dev_err(&pf->pdev->dev,
@@ -1246,9 +1248,9 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "Unable to send the message to VF %d aq_err %d\n",
- vf->vf_id, pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
return -EIO;
}
@@ -1306,8 +1308,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- int i = 0, len = 0;
int num_vsis = 1;
+ int len = 0;
int ret;
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -1342,12 +1344,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
}
- if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+ (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
@@ -1356,8 +1362,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
+ vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ goto err;
+ }
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ }
if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
@@ -1368,16 +1382,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+
if (vf->lan_vsi_idx) {
- vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
- vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
- vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+ vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
/* VFs only use TC 0 */
- vfres->vsi_res[i].qset_handle
+ vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
- ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
- i++;
}
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1407,6 +1423,25 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
}
/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ int num_vlans = 0;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+ num_vlans++;
+ }
+
+ return num_vlans;
+}
+
+/**
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1422,22 +1457,128 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
(struct i40e_virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct i40e_vsi *vsi;
+ struct i40e_mac_filter *f;
+ i40e_status aq_ret = 0;
bool allmulti = false;
- i40e_status aq_ret;
+ struct i40e_vsi *vsi;
+ bool alluni = false;
+ int aq_err = 0;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
- (vsi->type != I40E_VSI_FCOE)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Lie to the VF on purpose. */
+ aq_ret = 0;
+ goto error_param;
+ }
+ /* Multicast promiscuous handling*/
if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
- allmulti, NULL);
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
+ allmulti,
+ vf->port_vlan_id,
+ NULL);
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
+ vsi->seid,
+ allmulti,
+ f->vlan,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ break;
+ }
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ allmulti, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ goto error_param_int;
+ }
+ }
+
+ if (!aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ if (allmulti)
+ set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ else
+ clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ }
+
+ if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
+ alluni,
+ vf->port_vlan_id,
+ NULL);
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ aq_ret = 0;
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
+ aq_ret =
+ i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
+ vsi->seid,
+ alluni,
+ f->vlan,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ }
+ if (aq_ret)
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ allmulti, NULL,
+ true);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret)
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
+ vf->vf_id, info->flags,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+error_param_int:
+ if (!aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ if (alluni)
+ set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ else
+ clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ }
error_param:
/* send the response to the VF */
@@ -1688,6 +1829,10 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 8
+
/**
* i40e_check_vf_permission
* @vf: pointer to the VF info
@@ -1708,15 +1853,22 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
ret = I40E_ERR_INVALID_MAC_ADDR;
} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
!ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
/* If the host VMM administrator has set the VF MAC address
* administratively via the ndo_set_vf_mac command then deny
* permission to the VF to add or delete unicast MAC addresses.
+ * Unless the VF is privileged and then it can do whatever.
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
dev_err(&pf->pdev->dev,
- "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more functionality\n");
ret = -EPERM;
}
return ret;
@@ -1741,7 +1893,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1780,6 +1931,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
+ } else {
+ vf->num_mac++;
}
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1815,7 +1968,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1839,6 +1991,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
+ } else {
+ vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1873,8 +2027,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int i;
+ if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+ goto error_param;
+ }
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1898,6 +2057,19 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
/* add new VLAN filter */
int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan++;
+
+ if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
if (ret)
dev_err(&pf->pdev->dev,
@@ -1929,7 +2101,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1950,6 +2121,19 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan--;
+
+ if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
if (ret)
dev_err(&pf->pdev->dev,
@@ -2029,6 +2213,135 @@ error_param:
}
/**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_key *vrk =
+ (struct i40e_virtchnl_rss_key *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vrk->vsi_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_lut *vrl =
+ (struct i40e_virtchnl_rss_lut *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vrl->vsi_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_hena *vrh = NULL;
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+ int len = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_hena);
+
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+ /* send the response back to the VF */
+ aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret, (u8 *)vrh, len);
+ return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_hena *vrh =
+ (struct i40e_virtchnl_rss_hena *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(vrh->hena >> 32));
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ aq_ret);
+}
+
+/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2041,7 +2354,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len;
+ int valid_len = 0;
/* Check if VF is disabled. */
if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
@@ -2053,13 +2366,10 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- valid_len = 0;
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(vf))
valid_len = sizeof(u32);
- else
- valid_len = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
@@ -2149,6 +2459,35 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
sizeof(struct i40e_virtchnl_iwarp_qv_info));
}
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct i40e_virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_rss_key *vrk =
+ (struct i40e_virtchnl_rss_key *)msg;
+ if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct i40e_virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_rss_lut *vrl =
+ (struct i40e_virtchnl_rss_lut *)msg;
+ if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct i40e_virtchnl_rss_hena);
+ break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -2175,11 +2514,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
* called from the common aeq/arq handler to
* process request from VF
**/
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
- unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
struct i40e_vf *vf;
int ret;
@@ -2247,6 +2586,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ret = i40e_vc_config_rss_key(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+ break;
+
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2268,9 +2620,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
**/
int i40e_vc_process_vflr_event(struct i40e_pf *pf)
{
- u32 reg, reg_idx, bit_idx, vf_id;
struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
struct i40e_vf *vf;
+ int vf_id;
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
return 0;
@@ -2292,13 +2645,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx)) {
- /* clear the bit in GLGEN_VFLRSTAT */
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-
- if (!test_bit(__I40E_DOWN, &pf->state))
- i40e_reset_vf(vf, true);
- }
+ if (reg & BIT(bit_idx))
+ /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+ i40e_reset_vf(vf, true);
}
return 0;
@@ -2762,3 +3111,45 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
out:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_trust
+ * @netdev: network interface device structure of the pf
+ * @vf_id: VF identifier
+ * @setting: trust setting
+ *
+ * Enable or disable VF trust setting
+ **/
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ if (!vf)
+ return -EINVAL;
+ if (setting == vf->trusted)
+ goto out;
+
+ vf->trusted = setting;
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, setting ? "" : "un");
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index e7b2fba03..875174141 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -61,6 +61,8 @@ enum i40e_vf_states {
I40E_VF_STAT_IWARPENA,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
+ I40E_VF_STAT_MC_PROMISC,
+ I40E_VF_STAT_UC_PROMISC,
};
/* VF capabilities */
@@ -75,7 +77,7 @@ struct i40e_vf {
struct i40e_pf *pf;
/* VF id in the PF space */
- u16 vf_id;
+ s16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
struct i40e_virtchnl_version_info vf_ver;
@@ -88,6 +90,7 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
+ bool trusted;
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -108,6 +111,9 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
+ u16 num_mac;
+ u16 num_vlan;
+
/* RDMA Client */
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
};
@@ -115,7 +121,7 @@ struct i40e_vf {
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
@@ -127,6 +133,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a3eae5d9a..1f9b3b5d9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index aad8d6277..3114dcfa1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -426,6 +422,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -1582,27 +1579,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1649,11 +1625,11 @@ enum i40e_aq_phy_type {
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
@@ -1924,9 +1900,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+ BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2195,7 +2171,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2215,14 +2191,14 @@ struct i40e_aqc_get_set_rss_key_data {
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+ BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 771ac6ad8..8f6420400 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -58,6 +58,8 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index ca8b58c3d..d34972bab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -44,6 +44,8 @@
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index cea97daa8..79d99cd91 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_kfree_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -267,7 +269,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -285,7 +287,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -494,7 +496,6 @@ err:
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
unsigned long bi_size;
u16 i;
@@ -502,48 +503,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (ring_is_ps_enabled(rx_ring)) {
- int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
- rx_bi = &rx_ring->rx_bi[0];
- if (rx_bi->hdr_buf) {
- dma_free_coherent(dev,
- bufsz,
- rx_bi->hdr_buf,
- rx_bi->dma);
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = 0;
- rx_bi->hdr_buf = NULL;
- }
- }
- }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->dma) {
- dma_unmap_single(dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
- }
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
if (rx_bi->skb) {
dev_kfree_skb(rx_bi->skb);
rx_bi->skb = NULL;
}
- if (rx_bi->page) {
- if (rx_bi->page_dma) {
- dma_unmap_page(dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
- }
- __free_page(rx_bi->page);
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
- }
+ if (!rx_bi->page)
+ continue;
+
+ dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_bi->page, 0);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
}
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -552,6 +527,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -576,37 +552,6 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40evf_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
- dma_addr_t dma;
- void *buffer;
- int buf_size;
- int i;
-
- if (rx_ring->rx_bi[0].hdr_buf)
- return;
- /* Make sure the buffers don't cross cache line boundaries. */
- buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
- buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
- &dma, GFP_KERNEL);
- if (!buffer)
- return;
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = dma + (i * buf_size);
- rx_bi->hdr_buf = buffer + (i * buf_size);
- }
-}
-
-/**
* i40evf_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -627,9 +572,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
- ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
- : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -640,6 +583,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
goto err;
}
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -658,6 +602,10 @@ err:
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -668,160 +616,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
*
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
**/
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
{
- u16 i = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
- const int current_node = numa_node_id();
+ struct page *page = bi->page;
+ dma_addr_t dma;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
- return false;
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
- if (bi->skb) /* desc is in use */
- goto no_buffers;
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /* If we've been moved to a different NUMA node, release the
- * page so we can get a new one on the current node.
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
*/
- if (bi->page && page_to_nid(bi->page) != current_node) {
- dma_unmap_page(rx_ring->dev,
- bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else if (bi->page) {
- rx_ring->rx_stats.page_reuse_count++;
- }
-
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- bi->page_offset = 0;
- goto no_buffers;
- }
- bi->page_offset = 0;
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
}
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
- return false;
+ return true;
+}
-no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
- * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40evf_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
**/
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
- u16 i = rx_ring->next_to_use;
+ u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
- struct sk_buff *skb;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
- skb = bi->skb;
-
- if (!skb) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- bi->dma = 0;
- dev_kfree_skb(bi->skb);
- bi->skb = NULL;
- goto no_buffers;
- }
- }
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
- i++;
- if (i == rx_ring->count)
- i = 0;
- }
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -830,41 +740,35 @@ no_buffers:
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
- u32 rx_status,
- u32 rx_error,
- u16 rx_ptype)
+ union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+ struct i40e_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
@@ -904,20 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
@@ -931,7 +838,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -959,7 +866,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
u8 rx_ptype)
{
u32 hash;
- const __le64 rss_mask =
+ const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
@@ -973,313 +880,411 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- u16 i = rx_ring->next_to_clean;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u32 copysize;
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- do {
- struct i40e_rx_buffer *rx_bi;
- struct sk_buff *skb;
- u16 vlan_tag;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- i40evf_alloc_rx_buffers_ps(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+}
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
- */
- dma_rmb();
- /* sync header buffer for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- if (likely(!skb)) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- failure = true;
- break;
- }
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- }
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
- I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- /* sync half-page for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->page_dma,
- rx_bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
- rx_bi->skb = NULL;
- cleaned_count++;
- copysize = 0;
- if (rx_hbo || rx_sph) {
- int len;
-
- if (rx_hbo)
- len = I40E_RX_HDR_SIZE;
- else
- len = rx_header_len;
- memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
- } else if (skb->len == 0) {
- int len;
- unsigned char *va = page_address(rx_bi->page) +
- rx_bi->page_offset;
-
- len = min(rx_packet_len, rx_ring->rx_hdr_len);
- memcpy(__skb_put(skb, len), va, len);
- copysize = len;
- rx_packet_len -= len;
- }
- /* Get the rest of the data if this was a header split */
- if (rx_packet_len) {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset + copysize,
- rx_packet_len, I40E_RXBUFFER_2048);
-
- /* If the page count is more than 2, then both halves
- * of the page are used and we need to free it. Do it
- * here instead of in the alloc code. Otherwise one
- * of the half-pages might be released between now and
- * then, and we wouldn't know which one to use.
- * Don't call get_page and free_page since those are
- * both expensive atomic operations that just change
- * the refcount in opposite directions. Just give the
- * page to the stack; he can have our refcount.
- */
- if (page_count(rx_bi->page) > 2) {
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page = NULL;
- rx_bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else {
- get_page(rx_bi->page);
- /* switch to the other half-page here; the
- * allocation code programs the right addr
- * into HW. If we haven't used this half-page,
- * the address won't be changed, and HW can
- * just use it next time through.
- */
- rx_bi->page_offset ^= PAGE_SIZE / 2;
- }
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- }
- I40E_RX_INCREMENT(rx_ring, i);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- struct i40e_rx_buffer *next_buffer;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
- next_buffer = &rx_ring->rx_bi[i];
- next_buffer->skb = skb;
- rx_ring->rx_stats.non_eop_descs++;
- continue;
- }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ i40e_pull_tail(rx_ring, skb);
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ return false;
+}
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ new_buff = &rx_ring->rx_bi[nta];
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
-#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* transfer page from old buffer to new buffer */
+ *new_buff = *old_buff;
+}
+
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
- i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- } while (likely(total_rx_packets < budget));
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!i40e_page_is_reserved(page)))
+ return true;
- return failure ? budget : total_rx_packets;
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(i40e_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buffer->page);
+
+ return true;
}
/**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
*
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ } else {
+ rx_buffer->skb = NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_bi[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
**/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- u16 rx_packet_len;
bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u16 i;
- do {
- struct i40e_rx_buffer *rx_bi;
+ while (likely(total_rx_packets < budget)) {
+ union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
+ u32 rx_status;
u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- i40evf_alloc_rx_buffers_1buf(rx_ring,
- cleaned_count);
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ if (!rx_desc->wb.qword1.status_error_len)
+ break;
+
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* DD bit is set.
*/
dma_rmb();
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- prefetch(skb->data);
-
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+ if (!skb)
+ break;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_bi->skb = NULL;
cleaned_count++;
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- skb_put(skb, rx_packet_len);
- dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
-
- I40E_RX_INCREMENT(rx_ring, i);
-
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- rx_ring->rx_stats.non_eop_descs++;
+ if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
continue;
}
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ if (i40e_cleanup_headers(rx_ring, skb))
+ continue;
+
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* populate checksum, VLAN, and protocol */
+ i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
- } while (likely(total_rx_packets < budget));
+ /* update budget accounting */
+ total_rx_packets++;
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1288,6 +1293,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets;
}
@@ -1411,9 +1417,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete = clean_complete &&
- i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
@@ -1427,16 +1435,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
-
- if (ring_is_ps_enabled(ring))
- cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
- else
- cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete = clean_complete && (budget_per_ring > cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -1514,15 +1518,13 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
u64 cd_cmd, cd_tso_len, cd_mss;
union {
@@ -1559,16 +1561,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
@@ -1588,9 +1596,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -1630,7 +1637,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *hdr;
} l4;
unsigned char *exthdr;
- u32 offset, cmd = 0, tunnel = 0;
+ u32 offset, cmd = 0;
__be16 frag_off;
u8 l4_proto = 0;
@@ -1644,6 +1651,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
+ u32 tunnel = 0;
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -1661,13 +1669,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
@@ -1678,6 +1679,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
return -1;
@@ -1686,12 +1692,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
return 0;
}
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
@@ -1935,6 +1949,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -1942,12 +1958,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
- I40E_MAX_DATA_PER_TXD, td_tag);
+ max_data, td_tag);
tx_desc++;
i++;
@@ -1958,9 +1976,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- dma += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
+ dma += max_data;
+ size -= max_data;
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2109,7 +2128,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2140,7 +2159,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0429553fe..0112277e5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512 512 /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -142,14 +161,41 @@ enum i40e_dyn_idx_t {
prefetch((n)); \
} while (0)
-#define i40e_rx_desc i40e_32byte_rx_desc
-
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive. It is used to
+ * approximate the number of descriptors used per linear buffer. Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+ unsigned int adjust = ~(u32)0;
+
+ /* if we rounded up on the reciprocal pull down the adjustment */
+ if ((max * reciprocal) > adjust)
+ adjust = ~(u32)(reciprocal - 1);
+
+ return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
@@ -183,10 +229,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
- void *hdr_buf;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -215,22 +259,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_RX_PS_ENABLED,
- __I40E_RX_16BYTE_DESC_ENABLED,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
- test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
- set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
- clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
@@ -249,16 +289,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
@@ -290,6 +321,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -313,9 +345,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -359,7 +389,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += TXD_USE_COUNT(size);
+ count += i40e_txd_use_count(size);
if (!nr_frags--)
break;
@@ -405,4 +435,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+ return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+ (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 301fe2b6d..97f96e0d9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -36,7 +36,7 @@
#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
@@ -258,6 +258,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -522,6 +527,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1329,4 +1336,46 @@ enum i40e_reset_type {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director and flexible payload */
+#define I40E_FD_INSET_L3_SRC_SHIFT 47
+#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_L3_SRC_SHIFT)
+#define I40E_FD_INSET_L3_DST_SHIFT 35
+#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_L3_DST_SHIFT)
+#define I40E_FD_INSET_L4_SRC_SHIFT 34
+#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \
+ I40E_FD_INSET_L4_SRC_SHIFT)
+#define I40E_FD_INSET_L4_DST_SHIFT 33
+#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \
+ I40E_FD_INSET_L4_DST_SHIFT)
+#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31
+#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_VERIFY_TAG_SHIFT)
+
+#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17
+#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD50_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16
+#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD51_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15
+#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD52_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14
+#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD53_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13
+#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD54_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12
+#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD55_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11
+#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD56_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
+#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 3b9d20374..f04ce6cb7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -80,7 +80,12 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -154,6 +159,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -162,8 +168,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -322,6 +328,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index e657eccd2..76ed97db2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -67,8 +67,6 @@ struct i40e_vsi {
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 qs_handle;
- u8 *rss_hkey_user; /* User configured hash keys */
- u8 *rss_lut_user; /* User configured lookup table entries */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -82,9 +80,6 @@ struct i40e_vsi {
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
-#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
-#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
#define I40EVF_RXBUFFER_2048 2048
#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
@@ -210,9 +205,6 @@ struct i40evf_adapter {
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
-#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
-#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
@@ -222,6 +214,8 @@ struct i40evf_adapter {
#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
+#define I40EVF_FLAG_PROMISC_ON BIT(15)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -239,8 +233,17 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
+/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
+#define I40EVF_FLAG_AQ_GET_HENA BIT(11)
+#define I40EVF_FLAG_AQ_SET_HENA BIT(12)
+#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13)
+#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14)
+#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
+#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
+#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
+#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
/* OS defined structs */
struct net_device *netdev;
@@ -256,10 +259,18 @@ struct i40evf_adapter {
bool netdev_registered;
bool link_up;
enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+ (_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+ 0)
+/* RSS by the PF should be preferred over RSS via other methods. */
+#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
+ (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -271,11 +282,16 @@ struct i40evf_adapter {
struct i40e_eth_stats current_stats;
struct i40e_vsi vsi;
u32 aq_wait_count;
+ /* RSS stuff */
+ u64 hena;
+ u16 rss_key_size;
+ u16 rss_lut_size;
+ u8 *rss_key;
+ u8 *rss_lut;
};
/* Ethtool Private Flags */
-#define I40EVF_PRIV_FLAGS_PS BIT(0)
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
@@ -314,11 +330,12 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter);
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
void i40evf_request_stats(struct i40evf_adapter *adapter);
void i40evf_request_reset(struct i40evf_adapter *adapter);
+void i40evf_get_hena(struct i40evf_adapter *adapter);
+void i40evf_set_hena(struct i40evf_adapter *adapter);
+void i40evf_set_rss_key(struct i40evf_adapter *adapter);
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
- u16 lut_size);
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
- u16 lut_size);
+int i40evf_config_rss(struct i40evf_adapter *adapter);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index dd4430aae..c9c202f6c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
-static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "packet-split",
-};
-
-#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
-
/**
* i40evf_get_settings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return I40EVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
- } else if (sset == ETH_SS_PRIV_FLAGS) {
- for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40evf_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
}
}
@@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -378,63 +363,6 @@ static int i40evf_set_coalesce(struct net_device *netdev,
}
/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- struct i40e_hw *hw = &adapter->hw;
- u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
- /* We always hash on IP src and dest addresses */
- cmd->data = RXH_IP_SRC | RXH_IP_DST;
-
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V4_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
-
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case IPV4_FLOW:
- break;
-
- case TCP_V6_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V6_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
-
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case IPV6_FLOW:
- break;
- default:
- cmd->data = 0;
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -454,145 +382,8 @@ static int i40evf_get_rxnfc(struct net_device *netdev,
ret = 0;
break;
case ETHTOOL_GRXFH:
- ret = i40evf_get_rss_hash_opts(adapter, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-/**
- * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
- struct ethtool_rxnfc *nfc)
-{
- struct i40e_hw *hw = &adapter->hw;
- u32 flags = adapter->vf_res->vf_offload_flags;
-
- u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
- /* RSS does not support anything other than hashing
- * to queues on src and dst IPs and ports
- */
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- /* We need at least the IP SRC and DEST fields for hashing */
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
-
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- } else {
- return -EINVAL;
- }
- break;
- case TCP_V6_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- } else {
- return -EINVAL;
- }
- break;
- case UDP_V4_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- } else {
- return -EINVAL;
- }
- break;
- case UDP_V6_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- } else {
- return -EINVAL;
- }
- break;
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case SCTP_V4_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
- break;
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
- break;
- case IPV4_FLOW:
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- case IPV6_FLOW:
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
- return -EINVAL;
- }
-
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
- i40e_flush(hw);
-
- return 0;
-}
-
-/**
- * i40evf_set_rxnfc - command to set RX flow classification rules
- * @netdev: network interface device structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the command is supported.
- **/
-static int i40evf_set_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ netdev_info(netdev,
+ "RSS hash info is not available to vf, use pf.\n");
break;
default:
break;
@@ -600,7 +391,6 @@ static int i40evf_set_rxnfc(struct net_device *netdev,
return ret;
}
-
/**
* i40evf_get_channels: get the number of channels supported by the device
* @netdev: network interface device structure
@@ -624,6 +414,19 @@ static void i40evf_get_channels(struct net_device *netdev,
}
/**
+ * i40evf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_key_size;
+}
+
+/**
* i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
* @netdev: network interface device structure
*
@@ -631,7 +434,9 @@ static void i40evf_get_channels(struct net_device *netdev,
**/
static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
{
- return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_lut_size;
}
/**
@@ -646,9 +451,6 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- u8 *seed = NULL, *lut;
- int ret;
u16 i;
if (hfunc)
@@ -656,24 +458,13 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (!indir)
return 0;
- seed = key;
-
- lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
- ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
- if (ret)
- goto out;
+ memcpy(key, adapter->rss_key, adapter->rss_key_size);
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
- indir[i] = (u32)lut[i];
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ indir[i] = (u32)adapter->rss_lut[i];
-out:
- kfree(lut);
-
- return ret;
+ return 0;
}
/**
@@ -689,8 +480,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- u8 *seed = NULL;
u16 i;
/* We do not allow change in unsupported parameters */
@@ -701,76 +490,14 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
if (key) {
- if (!vsi->rss_hkey_user) {
- vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE,
- GFP_KERNEL);
- if (!vsi->rss_hkey_user)
- return -ENOMEM;
- }
- memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE);
- seed = vsi->rss_hkey_user;
- }
- if (!vsi->rss_lut_user) {
- vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE,
- GFP_KERNEL);
- if (!vsi->rss_lut_user)
- return -ENOMEM;
+ memcpy(adapter->rss_key, key, adapter->rss_key_size);
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
-
- return i40evf_config_rss(vsi, seed, vsi->rss_lut_user,
- I40EVF_HLUT_ARRAY_SIZE);
-}
-
-/**
- * i40evf_get_priv_flags - report device private flags
- * @dev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 i40evf_get_priv_flags(struct net_device *dev)
-{
- struct i40evf_adapter *adapter = netdev_priv(dev);
- u32 ret_flags = 0;
-
- ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
- I40EVF_PRIV_FLAGS_PS : 0;
-
- return ret_flags;
-}
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
-/**
- * i40evf_set_priv_flags - set private flags
- * @dev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
-{
- struct i40evf_adapter *adapter = netdev_priv(dev);
- bool reset_required = false;
-
- if ((flags & I40EVF_PRIV_FLAGS_PS) &&
- !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
- adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
- reset_required = true;
- } else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
- (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
- reset_required = true;
- }
-
- /* if needed, issue reset to cause things to take effect */
- if (reset_required)
- i40evf_schedule_reset(adapter);
-
- return 0;
+ return i40evf_config_rss(adapter);
}
static const struct ethtool_ops i40evf_ethtool_ops = {
@@ -782,18 +509,16 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count,
- .get_priv_flags = i40evf_get_priv_flags,
- .set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
- .set_rxnfc = i40evf_set_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
+ .get_rxfh_key_size = i40evf_get_rxfh_key_size,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4b70aae2f..16c552952 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 15
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i;
- int rx_buf_len;
-
-
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
- netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = I40EVF_RXBUFFER_2048;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- set_ring_ps_enabled(&adapter->rx_rings[i]);
- adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
- } else {
- clear_ring_ps_enabled(&adapter->rx_rings[i]);
- }
+ adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
}
}
@@ -943,6 +926,21 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
bottom_of_search_loop:
continue;
}
+
+ if (netdev->flags & IFF_PROMISC &&
+ !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
+ adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ else if (!(netdev->flags & IFF_PROMISC) &&
+ adapter->flags & I40EVF_FLAG_PROMISC_ON)
+ adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI &&
+ !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
+ adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ else if (!(netdev->flags & IFF_ALLMULTI) &&
+ adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
+ adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
+
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
@@ -999,14 +997,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = &adapter->rx_rings[i];
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- i40evf_alloc_rx_headers(ring);
- i40evf_alloc_rx_buffers_ps(ring, ring->count);
- } else {
- i40evf_alloc_rx_buffers_1buf(ring, ring->count);
- }
- ring->next_to_use = ring->count - 1;
- writel(ring->next_to_use, ring->tail);
+ i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
}
}
@@ -1224,24 +1215,18 @@ out:
}
/**
- * i40e_config_rss_aq - Prepare for RSS using AQ commands
- * @vsi: vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * @adapter: board private structure
*
* Return 0 on success, negative on failure
**/
-static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
+static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_aqc_get_set_rss_key_data *rss_key =
+ (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
struct i40e_hw *hw = &adapter->hw;
int ret = 0;
- if (!vsi->id)
- return -EINVAL;
-
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
@@ -1249,198 +1234,82 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
return -EBUSY;
}
- if (seed) {
- struct i40e_aqc_get_set_rss_key_data *rss_key =
- (struct i40e_aqc_get_set_rss_key_data *)seed;
- ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key);
- if (ret) {
- dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
+ ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+
}
- if (lut) {
- ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
+ ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
}
return ret;
+
}
/**
* i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
- const u8 *lut, u16 lut_size)
+static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
struct i40e_hw *hw = &adapter->hw;
+ u32 *dw;
u16 i;
- if (seed) {
- u32 *seed_dw = (u32 *)seed;
-
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
- }
-
- if (lut) {
- u32 *lut_dw = (u32 *)lut;
+ dw = (u32 *)adapter->rss_key;
+ for (i = 0; i <= adapter->rss_key_size / 4; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
- if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
- return -EINVAL;
+ dw = (u32 *)adapter->rss_lut;
+ for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+ wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
- }
i40e_flush(hw);
return 0;
}
/**
- * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Return 0 on success, negative on failure
- **/
-static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
- struct i40e_hw *hw = &adapter->hw;
- int ret = 0;
-
- if (seed) {
- ret = i40evf_aq_get_rss_key(hw, vsi->id,
- (struct i40e_aqc_get_set_rss_key_data *)seed);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot get RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
-
- if (lut) {
- ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot get RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
-
- return ret;
-}
-
-/**
- * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
- const u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
- struct i40e_hw *hw = &adapter->hw;
- u16 i;
-
- if (seed) {
- u32 *seed_dw = (u32 *)seed;
-
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i));
- }
-
- if (lut) {
- u32 *lut_dw = (u32 *)lut;
-
- if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i));
- }
-
- return 0;
-}
-
-/**
* i40evf_config_rss - Configure RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
-
- if (RSS_AQ(adapter))
- return i40evf_config_rss_aq(vsi, seed, lut, lut_size);
- else
- return i40evf_config_rss_reg(vsi, seed, lut, lut_size);
-}
-
-/**
- * i40evf_get_rss - Get RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size)
+int i40evf_config_rss(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
- if (RSS_AQ(adapter))
- return i40evf_get_rss_aq(vsi, seed, lut, lut_size);
- else
- return i40evf_get_rss_reg(vsi, seed, lut, lut_size);
+ if (RSS_PF(adapter)) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
+ I40EVF_FLAG_AQ_SET_RSS_KEY;
+ return 0;
+ } else if (RSS_AQ(adapter)) {
+ return i40evf_config_rss_aq(adapter);
+ } else {
+ return i40evf_config_rss_reg(adapter);
+ }
}
/**
* i40evf_fill_rss_lut - Fill the lut with default values
- * @lut: Lookup table to be filled with
- * @rss_table_size: Lookup table size
- * @rss_size: Range of queue number for hashing
+ * @adapter: board private structure
**/
-static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
+static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
{
u16 i;
- for (i = 0; i < rss_table_size; i++)
- lut[i] = i % rss_size;
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = i % adapter->num_active_queues;
}
/**
@@ -1451,42 +1320,25 @@ static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
**/
static int i40evf_init_rss(struct i40evf_adapter *adapter)
{
- struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
- u8 seed[I40EVF_HKEY_ARRAY_SIZE];
- u64 hena;
- u8 *lut;
int ret;
- /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
- else
- hena = I40E_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ if (!RSS_PF(adapter)) {
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ if (adapter->vf_res->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+ else
+ adapter->hena = I40E_DEFAULT_RSS_HENA;
- lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ }
- /* Use user configured lut if there is one, otherwise use default */
- if (vsi->rss_lut_user)
- memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE);
- else
- i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE,
- adapter->num_active_queues);
+ i40evf_fill_rss_lut(adapter);
- /* Use user configured hash key if there is one, otherwise
- * user default.
- */
- if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE);
- else
- netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
- ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
- kfree(lut);
+ netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
+ ret = i40evf_config_rss(adapter);
return ret;
}
@@ -1507,7 +1359,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
GFP_KERNEL);
if (!adapter->q_vectors)
- goto err_out;
+ return -ENOMEM;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
@@ -1519,15 +1371,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
}
return 0;
-
-err_out:
- while (q_idx) {
- q_idx--;
- q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
- }
- kfree(adapter->q_vectors);
- return -ENOMEM;
}
/**
@@ -1610,19 +1453,16 @@ err_set_interrupt:
}
/**
- * i40evf_clear_rss_config_user - Clear user configurations of RSS
- * @vsi: Pointer to VSI structure
+ * i40evf_free_rss - Free memory used by RSS structs
+ * @adapter: board private structure
**/
-static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi)
+static void i40evf_free_rss(struct i40evf_adapter *adapter)
{
- if (!vsi)
- return;
-
- kfree(vsi->rss_hkey_user);
- vsi->rss_hkey_user = NULL;
+ kfree(adapter->rss_key);
+ adapter->rss_key = NULL;
- kfree(vsi->rss_lut_user);
- vsi->rss_lut_user = NULL;
+ kfree(adapter->rss_lut);
+ adapter->rss_lut = NULL;
}
/**
@@ -1756,6 +1596,39 @@ static void i40evf_watchdog_task(struct work_struct *work)
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
+ i40evf_get_hena(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
+ i40evf_set_hena(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
+ i40evf_set_rss_key(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
+ i40evf_set_rss_lut(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
+ i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
+ I40E_FLAG_VF_MULTICAST_PROMISC);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
+ i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+ goto watchdog_done;
+ }
+
+ if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
+ (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+ i40evf_set_promiscuous(adapter, 0);
+ goto watchdog_done;
+ }
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
@@ -2003,6 +1876,8 @@ static void i40evf_adminq_task(struct work_struct *work)
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
+ if (val == 0xdeadbeef) /* indicates device in reset */
+ goto freedom;
oldval = val;
if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
@@ -2259,6 +2134,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
+ NETIF_F_HW_VLAN_CTAG_RX |\
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+
+/**
+ * i40evf_fix_features - fix up the netdev feature bits
+ * @netdev: our net device
+ * @features: desired feature bits
+ *
+ * Returns fixed-up features bits
+ **/
+static netdev_features_t i40evf_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ features &= ~I40EVF_VLAN_FEATURES;
+ if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
+ features |= I40EVF_VLAN_FEATURES;
+ return features;
+}
+
static const struct net_device_ops i40evf_netdev_ops = {
.ndo_open = i40evf_open,
.ndo_stop = i40evf_close,
@@ -2271,6 +2168,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_tx_timeout = i40evf_tx_timeout,
.ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+ .ndo_fix_features = i40evf_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
#endif
@@ -2307,57 +2205,61 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
**/
int i40evf_process_config(struct i40evf_adapter *adapter)
{
+ struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
+ struct i40e_vsi *vsi = &adapter->vsi;
int i;
/* got VF config message back from PF, now we can parse it */
- for (i = 0; i < adapter->vf_res->num_vsis; i++) {
- if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
- adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ for (i = 0; i < vfres->num_vsis; i++) {
+ if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &vfres->vsi_res[i];
}
if (!adapter->vsi_res) {
dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
return -ENODEV;
}
- if (adapter->vf_res->vf_offload_flags
- & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
- netdev->vlan_features = netdev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
- netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- }
- netdev->features |= NETIF_F_HIGHDMA |
- NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_RXCSUM |
- NETIF_F_GRO;
-
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features &= ~NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
+
+ if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= netdev->hw_enc_features |
+ NETIF_F_TSO_MANGLEID;
+
+ /* Write features and hw_features separately to avoid polluting
+ * with, or dropping, features that are set when we registgered.
+ */
+ netdev->hw_features |= netdev->hw_enc_features;
+
+ netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+
+ /* disable VLAN features if not supported */
+ if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
+ netdev->features ^= I40EVF_VLAN_FEATURES;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2368,8 +2270,16 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- adapter->vsi.netdev = adapter->netdev;
- adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
+ vsi->netdev = adapter->netdev;
+ vsi->qs_handle = adapter->vsi_res->qset_handle;
+ if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ adapter->rss_key_size = vfres->rss_key_size;
+ adapter->rss_lut_size = vfres->rss_lut_size;
+ } else {
+ adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
+ adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
+ }
+
return 0;
}
@@ -2502,11 +2412,6 @@ static void i40evf_init_task(struct work_struct *work)
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
- adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
- adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
-
- /* Default to single buffer rx, can be changed through ethtool. */
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
@@ -2565,6 +2470,11 @@ static void i40evf_init_task(struct work_struct *work)
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
+ adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+ adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+ if (!adapter->rss_key || !adapter->rss_lut)
+ goto err_mem;
+
if (RSS_AQ(adapter)) {
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
@@ -2575,7 +2485,8 @@ static void i40evf_init_task(struct work_struct *work)
restart:
schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
return;
-
+err_mem:
+ i40evf_free_rss(adapter);
err_register:
i40evf_free_misc_irq(adapter);
err_sw_init:
@@ -2838,11 +2749,11 @@ static void i40evf_remove(struct pci_dev *pdev)
adapter->state = __I40EVF_REMOVE;
adapter->aq_required = 0;
i40evf_request_reset(adapter);
- msleep(20);
+ msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
if (!i40evf_asq_done(hw)) {
i40evf_request_reset(adapter);
- msleep(20);
+ msleep(50);
}
if (adapter->msix_entries) {
@@ -2857,8 +2768,7 @@ static void i40evf_remove(struct pci_dev *pdev)
flush_scheduled_work();
- /* Clear user configurations for RSS */
- i40evf_clear_rss_config_user(&adapter->vsi);
+ i40evf_free_rss(adapter);
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
@@ -2869,7 +2779,6 @@ static void i40evf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
-
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 488e738f7..f13445691 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- vqpi->rxq.splithdr_enabled = true;
- vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
- }
vqpi++;
}
@@ -645,6 +641,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
{
struct i40e_virtchnl_promisc_info vpi;
+ int promisc_all;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -652,6 +649,27 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
adapter->current_op);
return;
}
+
+ promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
+ I40E_FLAG_VF_MULTICAST_PROMISC;
+ if ((flags & promisc_all) == promisc_all) {
+ adapter->flags |= I40EVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ }
+
+ if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+ adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ }
+
+ if (!flags) {
+ adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ }
+
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
@@ -681,6 +699,115 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
/* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
+
+/**
+ * i40evf_get_hena
+ * @adapter: adapter structure
+ *
+ * Request hash enable capabilities from PF
+ **/
+void i40evf_get_hena(struct i40evf_adapter *adapter)
+{
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ NULL, 0);
+}
+
+/**
+ * i40evf_set_hena
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash capabilities
+ **/
+void i40evf_set_hena(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_hena vrh;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ vrh.hena = adapter->hena;
+ adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ (u8 *)&vrh, sizeof(vrh));
+}
+
+/**
+ * i40evf_set_rss_key
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash key
+ **/
+void i40evf_set_rss_key(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_key *vrk;
+ int len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_key) +
+ (adapter->rss_key_size * sizeof(u8)) - 1;
+ vrk = kzalloc(len, GFP_KERNEL);
+ if (!vrk)
+ return;
+ vrk->vsi_id = adapter->vsi.id;
+ vrk->key_len = adapter->rss_key_size;
+ memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
+
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ (u8 *)vrk, len);
+ kfree(vrk);
+}
+
+/**
+ * i40evf_set_rss_lut
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS lookup table
+ **/
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_lut *vrl;
+ int len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_lut) +
+ (adapter->rss_lut_size * sizeof(u8)) - 1;
+ vrl = kzalloc(len, GFP_KERNEL);
+ if (!vrl)
+ return;
+ vrl->vsi_id = adapter->vsi.id;
+ vrl->lut_entries = adapter->rss_lut_size;
+ memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ (u8 *)vrl, len);
+ kfree(vrl);
+}
+
/**
* i40evf_request_reset
* @adapter: adapter structure
@@ -820,6 +947,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+ struct i40e_virtchnl_rss_hena *vrh =
+ (struct i40e_virtchnl_rss_hena *)msg;
+ if (msglen == sizeof(*vrh))
+ adapter->hena = vrh->hena;
+ else
+ dev_warn(&adapter->pdev->dev,
+ "Invalid message %d from PF\n", v_opcode);
+ }
+ break;
default:
if (v_opcode != adapter->current_op)
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a23aa6704..a61447fd7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -361,7 +361,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
if (size > 15)
size = 15;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
@@ -380,7 +380,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
16 : 8;
break;
}
- if (nvm->word_size == (1 << 15))
+ if (nvm->word_size == BIT(15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
@@ -391,7 +391,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
nvm->ops.write = igb_write_nvm_spi;
nvm->ops.validate = igb_validate_nvm_checksum;
nvm->ops.update = igb_update_nvm_checksum;
- if (nvm->word_size < (1 << 15))
+ if (nvm->word_size < BIT(15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
@@ -2107,7 +2107,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
/* The PF can spoof - it has to in order to
* support emulation mode NICs
*/
- reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
} else {
reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
E1000_DTXSWC_VLAN_SPOOF_MASK);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index de8805a2a..199ff9820 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -168,16 +168,16 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
@@ -186,8 +186,8 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
/* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE (1 << 26)
-#define E1000_ETQF_1588 (1 << 30)
+#define E1000_ETQF_FILTER_ENABLE BIT(26)
+#define E1000_ETQF_1588 BIT(30)
/* FTQF register bit definitions */
#define E1000_FTQF_VF_BP 0x00008000
@@ -203,16 +203,16 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
/* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+#define E1000_VT_CTL_IGNORE_MAC BIT(28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29)
+#define E1000_VT_CTL_VM_REPL_EN BIT(30)
/* Per VM Offload register setup */
#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
@@ -252,7 +252,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXCTL_MDP_EN 0x0020
#define E1000_DTXCTL_SPOOF_INT 0x0040
-#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14)
#define ALL_QUEUES 0xFFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index e9f23ee8f..2997c443c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -530,65 +530,65 @@
/* Time Sync Interrupt Cause/Mask Register Bits */
-#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
-#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
-#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
-#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
-#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
-#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
-#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
-#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
+#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */
+#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */
+#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */
#define TSYNC_INTERRUPTS TSINTR_TXTS
#define E1000_TSICR_TXTS TSINTR_TXTS
/* TSAUXC Configuration Bits */
-#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
-#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
-#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
-#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
-#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
-#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
-#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
-#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
-#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
-#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
+#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */
+#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */
+#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */
+#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */
/* SDP Configuration Bits */
-#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
-#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
-#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
-#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
-#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
-#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
-#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
-#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
-#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
-#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
-#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
-#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
-#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
-#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
-#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
-#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
-#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
-#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
-#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
-#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
-#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
-#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
-#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
-#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
+#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */
+#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */
+#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */
+#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */
+#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
@@ -997,8 +997,8 @@
#define E1000_M88E1543_FIBER_CTRL 0x0
#define E1000_EEE_ADV_DEV_I354 7
#define E1000_EEE_ADV_ADDR_I354 60
-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */
#define E1000_PCS_STATUS_DEV_I354 3
#define E1000_PCS_STATUS_ADDR_I354 1
#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 07cf4fe58..5010e2232 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -212,7 +212,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
- vfta_delta = 1 << (vlan % 32);
+ vfta_delta = BIT(vlan % 32);
vfta = adapter->shadow_vfta[regidx];
/* vfta_delta represents the difference between the current value
@@ -243,12 +243,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
bits = rd32(E1000_VLVF(vlvf_index));
/* set the pool bit */
- bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
- bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
/* Clear VFTA first, then disable VLVF. Otherwise
@@ -427,7 +427,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
mta = array_rd32(E1000_MTA, hash_reg);
- mta |= (1 << hash_bit);
+ mta |= BIT(hash_bit);
array_wr32(E1000_MTA, hash_reg, mta);
wrfl();
@@ -527,7 +527,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += (ETH_ALEN);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 10f5c9e01..00e263f0c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -302,9 +302,9 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
u32 vflre = rd32(E1000_VFLRE);
s32 ret_val = -E1000_ERR_MBX;
- if (vflre & (1 << vf_number)) {
+ if (vflre & BIT(vf_number)) {
ret_val = 0;
- wr32(E1000_VFLRE, (1 << vf_number));
+ wr32(E1000_VFLRE, BIT(vf_number));
hw->mbx.stats.rsts++;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index e8280d0d7..3582c5cf8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -72,7 +72,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
u32 eecd = rd32(E1000_EECD);
u32 mask;
- mask = 0x01 << (count - 1);
+ mask = 1u << (count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 969a6ddaf..9b622b33b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -91,10 +91,10 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define I82580_ADDR_REG 16
#define I82580_CFG_REG 22
-#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15)
+#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */
#define I82580_CTRL_REG 23
-#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
+#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10)
/* 82580 specific PHY registers */
#define I82580_PHY_CTRL_2 18
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9413fa613..b9609afa5 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -91,6 +91,14 @@ struct igb_adapter;
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGB_I210_TX_LATENCY_10 9542
+#define IGB_I210_TX_LATENCY_100 1024
+#define IGB_I210_TX_LATENCY_1000 178
+#define IGB_I210_RX_LATENCY_10 20662
+#define IGB_I210_RX_LATENCY_100 2213
+#define IGB_I210_RX_LATENCY_1000 448
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -169,7 +177,7 @@ enum igb_tx_flags {
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
@@ -466,21 +474,21 @@ struct igb_adapter {
u16 eee_advert;
};
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
-#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
-#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
-#define IGB_FLAG_MEDIA_RESET (1 << 10)
-#define IGB_FLAG_MAS_CAPABLE (1 << 11)
-#define IGB_FLAG_MAS_ENABLE (1 << 12)
-#define IGB_FLAG_HAS_MSIX (1 << 13)
-#define IGB_FLAG_EEE (1 << 14)
+#define IGB_FLAG_HAS_MSI BIT(0)
+#define IGB_FLAG_DCA_ENABLED BIT(1)
+#define IGB_FLAG_QUAD_PORT_A BIT(2)
+#define IGB_FLAG_QUEUE_PAIRS BIT(3)
+#define IGB_FLAG_DMAC BIT(4)
+#define IGB_FLAG_PTP BIT(5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
+#define IGB_FLAG_WOL_SUPPORTED BIT(8)
+#define IGB_FLAG_NEED_LINK_UPDATE BIT(9)
+#define IGB_FLAG_MEDIA_RESET BIT(10)
+#define IGB_FLAG_MAS_CAPABLE BIT(11)
+#define IGB_FLAG_MAS_ENABLE BIT(12)
+#define IGB_FLAG_HAS_MSIX BIT(13)
+#define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
/* Media Auto Sense */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7982243d1..64e91c575 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -466,7 +466,7 @@ static void igb_get_regs(struct net_device *netdev,
memset(p, 0, IGB_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = rd32(E1000_CTRL);
@@ -1448,7 +1448,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 31; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (!(mask & ics_mask))
continue;
@@ -2411,19 +2411,19 @@ static int igb_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
- info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
/* 82576 does not support timestamping all packets. */
if (adapter->hw.mac.type >= e1000_82580)
- info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
else
info->rx_filters |=
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
default:
@@ -2831,7 +2831,8 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
+ status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2,
+ &dataword[i]);
if (status) {
/* Error occurred while reading module */
kfree(dataword);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 55a1405cb..ef3d642f5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -50,6 +50,7 @@
#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/pm_runtime.h>
+#include <linux/etherdevice.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -150,7 +151,7 @@ static void igb_update_dca(struct igb_q_vector *);
static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */
static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
@@ -382,7 +383,7 @@ static void igb_dump(struct igb_adapter *adapter)
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
+ netdev->state, dev_trans_start(netdev), netdev->last_rx);
}
/* Print Registers */
@@ -835,7 +836,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
igb_write_ivar(hw, msix_vector,
tx_queue & 0x7,
((tx_queue & 0x8) << 1) + 8);
- q_vector->eims_value = 1 << msix_vector;
+ q_vector->eims_value = BIT(msix_vector);
break;
case e1000_82580:
case e1000_i350:
@@ -856,7 +857,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
igb_write_ivar(hw, msix_vector,
tx_queue >> 1,
((tx_queue & 0x1) << 4) + 8);
- q_vector->eims_value = 1 << msix_vector;
+ q_vector->eims_value = BIT(msix_vector);
break;
default:
BUG();
@@ -918,7 +919,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
E1000_GPIE_NSICR);
/* enable msix_other interrupt */
- adapter->eims_other = 1 << vector;
+ adapter->eims_other = BIT(vector);
tmp = (vector++ | E1000_IVAR_VALID) << 8;
wr32(E1000_IVAR_MISC, tmp);
@@ -2086,6 +2087,40 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
}
+#define IGB_MAX_MAC_HDR_LEN 127
+#define IGB_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+igb_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2110,7 +2145,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = igb_features_check,
};
/**
@@ -2376,38 +2411,43 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO6 |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_CSUM;
if (hw->mac.type >= e1000_82576)
netdev->features |= NETIF_F_SCTP_CRC;
+#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
+
/* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXALL;
if (hw->mac.type >= e1000_i350)
netdev->hw_features |= NETIF_F_NTUPLE;
- /* set this bit last since it cannot be part of hw_features */
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_HW_CSUM |
- NETIF_F_SCTP_CRC;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
- netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -2442,9 +2482,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* copy the MAC address out of the NVM */
- if (hw->mac.ops.read_mac_addr(hw))
- dev_err(&pdev->dev, "NVM Read Error\n");
+ if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
+ /* copy the MAC address out of the NVM */
+ if (hw->mac.ops.read_mac_addr(hw))
+ dev_err(&pdev->dev, "NVM Read Error\n");
+ }
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
@@ -4061,7 +4103,7 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
u32 vlvf = rd32(E1000_VLVF(i));
- vlvf |= 1 << pf_id;
+ vlvf |= BIT(pf_id);
wr32(E1000_VLVF(i), vlvf);
}
@@ -4088,7 +4130,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
/* guarantee that we don't scrub out management VLAN */
vid = adapter->mng_vlan_id;
if (vid >= vid_start && vid < vid_end)
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
if (!adapter->vfs_allocated_count)
goto set_vfta;
@@ -4107,7 +4149,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
if (vlvf & E1000_VLVF_VLANID_ENABLE) {
/* record VLAN ID in VFTA */
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
@@ -4115,7 +4157,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
}
/* remove PF from the pool */
- bits = ~(1 << pf_id);
+ bits = ~BIT(pf_id);
bits &= rd32(E1000_VLVF(i));
wr32(E1000_VLVF(i), bits);
}
@@ -4273,13 +4315,13 @@ static void igb_spoof_check(struct igb_adapter *adapter)
return;
for (j = 0; j < adapter->vfs_allocated_count; j++) {
- if (adapter->wvbr & (1 << j) ||
- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+ if (adapter->wvbr & BIT(j) ||
+ adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
dev_warn(&adapter->pdev->dev,
"Spoof event(s) detected on VF %d\n", j);
adapter->wvbr &=
- ~((1 << j) |
- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ ~(BIT(j) |
+ BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
}
}
}
@@ -4839,9 +4881,18 @@ static int igb_tso(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -4854,45 +4905,52 @@ static int igb_tso(struct igb_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM |
IGB_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* MSS L4LEN IDX */
- mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
/* VLAN MACLEN IPLEN */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
@@ -5960,11 +6018,11 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
/* create mask for VF and other pools */
pool_mask = E1000_VLVF_POOLSEL_MASK;
- vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+ vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
/* drop PF from pool bits */
- pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
- adapter->vfs_allocated_count));
+ pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
/* Find the vlan filter for this id */
for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
@@ -5987,7 +6045,7 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
goto update_vlvf;
vid = vlvf & E1000_VLVF_VLANID_MASK;
- vfta_mask = 1 << (vid % 32);
+ vfta_mask = BIT(vid % 32);
/* clear bit from VFTA */
vfta = adapter->shadow_vfta[vid / 32];
@@ -6024,7 +6082,7 @@ static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
return idx;
}
-void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
+static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
{
struct e1000_hw *hw = &adapter->hw;
u32 bits, pf_id;
@@ -6038,13 +6096,13 @@ void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
* entry other than the PF.
*/
pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
- bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
+ bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
bits &= rd32(E1000_VLVF(idx));
/* Disable the filter so this falls into the default pool. */
if (!bits) {
if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
- wr32(E1000_VLVF(idx), 1 << pf_id);
+ wr32(E1000_VLVF(idx), BIT(pf_id));
else
wr32(E1000_VLVF(idx), 0);
}
@@ -6228,9 +6286,9 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
/* enable transmit and receive for vf */
reg = rd32(E1000_VFTE);
- wr32(E1000_VFTE, reg | (1 << vf));
+ wr32(E1000_VFTE, reg | BIT(vf));
reg = rd32(E1000_VFRE);
- wr32(E1000_VFRE, reg | (1 << vf));
+ wr32(E1000_VFRE, reg | BIT(vf));
adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
@@ -6522,13 +6580,14 @@ static int igb_poll(struct napi_struct *napi, int budget)
igb_update_dca(q_vector);
#endif
if (q_vector->tx.ring)
- clean_complete = igb_clean_tx_irq(q_vector);
+ clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
int cleaned = igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
- clean_complete &= (cleaned < budget);
+ if (cleaned >= budget)
+ clean_complete = false;
}
/* If all work not completed, return budget and keep polling */
@@ -6545,10 +6604,11 @@ static int igb_poll(struct napi_struct *napi, int budget)
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
+ * @napi_budget: Used to determine if we are in netpoll
*
* returns true if ring is completely cleaned
**/
-static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx.ring;
@@ -6587,7 +6647,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -7574,7 +7634,6 @@ static int igb_resume(struct device *dev)
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- rtnl_unlock();
return -ENOMEM;
}
@@ -7845,11 +7904,13 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
struct e1000_hw *hw = &adapter->hw;
u32 rar_low, rar_high;
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to CPU endian
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
*/
- rar_low = le32_to_cpup((__be32 *)(addr));
- rar_high = le16_to_cpup((__be16 *)(addr + 4));
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
/* Indicate to hardware the Address is Valid. */
rar_high |= E1000_RAH_AV;
@@ -7921,7 +7982,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
/* Calculate the rate factor values to set */
rf_int = link_speed / tx_rate;
rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
+ rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
@@ -8011,11 +8072,11 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
reg_val = rd32(reg_offset);
if (setting)
- reg_val |= ((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ reg_val |= (BIT(vf) |
+ BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
else
- reg_val &= ~((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ reg_val &= ~(BIT(vf) |
+ BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 22a8a2989..f097c5a8a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,9 +69,9 @@
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
-#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
+#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
@@ -722,11 +722,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
+ int adjust = 0;
regval = rd32(E1000_TXSTMPL);
regval |= (u64)rd32(E1000_TXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ /* adjust timestamp for the TX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_TX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_TX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_TX_LATENCY_1000;
+ break;
+ }
+ }
+
+ shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
+
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
@@ -771,6 +789,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
+ int adjust = 0;
/* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -790,6 +809,23 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_RX_LATENCY_1000;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
/* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.
*/
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index ae3f28332..ee1ef08d7 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -113,7 +113,7 @@
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */
/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index b74ce53d7..8dea1b136 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -154,7 +154,8 @@ static void igbvf_get_regs(struct net_device *netdev,
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ regs->version = (1u << 24) |
+ (adapter->pdev->revision << 16) |
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index f166baab8..6f4290d6d 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -287,8 +287,8 @@ struct igbvf_info {
};
/* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED BIT(0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP BIT(1)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index c12442252..b0778ba65 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -964,7 +964,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
ivar = ivar & 0xFFFFFF00;
ivar |= msix_vector | E1000_IVAR_VALID;
}
- adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
+ adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
array_ew32(IVAR0, index, ivar);
}
if (tx_queue > IGBVF_NO_QUEUE) {
@@ -979,7 +979,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
ivar = ivar & 0xFFFF00FF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
}
- adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
+ adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
array_ew32(IVAR0, index, ivar);
}
}
@@ -1014,8 +1014,8 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter)
ew32(IVAR_MISC, tmp);
- adapter->eims_enable_mask = (1 << (vector)) - 1;
- adapter->eims_other = 1 << (vector - 1);
+ adapter->eims_enable_mask = GENMASK(vector - 1, 0);
+ adapter->eims_other = BIT(vector - 1);
e1e_flush();
}
@@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct igbvf_ring *rx_ring = adapter->rx_ring;
u64 rdba;
- u32 rdlen, rxdctl;
+ u32 rxdctl;
/* disable receives */
rxdctl = er32(RXDCTL(0));
@@ -1375,8 +1375,6 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
e1e_flush();
msleep(10);
- rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
@@ -1933,83 +1931,74 @@ static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
buffer_info->dma = 0;
}
-static int igbvf_tso(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
- __be16 protocol)
-{
- struct e1000_adv_tx_context_desc *context_desc;
- struct igbvf_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
- u32 mss_l4len_idx, l4len;
- unsigned int i;
+static int igbvf_tso(struct igbvf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
- *hdr_len = 0;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
err = skb_cow_head(skb, 0);
- if (err < 0) {
- dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
+ if (err < 0)
return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
- i = tx_ring->next_to_use;
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IGBVF_TX_FLAGS_VLAN)
- info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- info |= (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(info);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+ ip.v4->tot_len = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
- if (protocol == htons(ETH_P_IP))
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
- /* MSS L4LEN IDX */
- mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
- context_desc->seqnum_seed = 0;
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
- buffer_info->time_stamp = jiffies;
- buffer_info->dma = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
- tx_ring->next_to_use = i;
+ igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
- return true;
+ return 1;
}
static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
@@ -2091,7 +2080,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
}
#define IGBVF_MAX_TXD_PWR 16
-#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
@@ -2271,8 +2260,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = skb_is_gso(skb) ?
- igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
+ tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
if (unlikely(tso < 0)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2615,6 +2603,40 @@ static int igbvf_set_features(struct net_device *netdev,
return 0;
}
+#define IGBVF_MAX_MAC_HDR_LEN 127
+#define IGBVF_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops igbvf_netdev_ops = {
.ndo_open = igbvf_open,
.ndo_stop = igbvf_close,
@@ -2631,7 +2653,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_poll_controller = igbvf_netpoll,
#endif
.ndo_set_features = igbvf_set_features,
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = igbvf_features_check,
};
/**
@@ -2739,22 +2761,30 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+ IGBVF_GSO_PARTIAL_FEATURES;
+
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_HW_CSUM |
- NETIF_F_SCTP_CRC;
-
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
+
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
/*reset the controller to put the device in a known good state */
err = hw->mac.ops.reset_hw(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index a13baa90a..335ba6642 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -266,7 +266,7 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
msgbuf[1] = vid;
/* Setting the 8 bit field MSG INFO to true indicates "add" */
if (set)
- msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT);
mbx->ops.write_posted(hw, msgbuf, 2);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e4949af7d..9f2db1855 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -143,14 +143,11 @@ struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
- u16 default_vf_vlan_id;
- u16 vlans_enabled;
bool clear_to_send;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
- u16 vlan_count;
u8 spoofchk_enabled;
bool rss_query_enabled;
u8 trusted;
@@ -173,7 +170,7 @@ struct vf_macvlans {
};
#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
@@ -456,7 +453,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
IXGBE_QV_STATE_POLL);
#ifdef BP_EXTENDED_STATS
if (rc != IXGBE_QV_STATE_IDLE)
- q_vector->tx.ring->stats.yields++;
+ q_vector->rx.ring->stats.yields++;
#endif
return rc == IXGBE_QV_STATE_IDLE;
}
@@ -623,44 +620,45 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
+#define IXGBE_FLAG_MSI_ENABLED BIT(1)
+#define IXGBE_FLAG_MSIX_ENABLED BIT(3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
+#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
+#define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
+#define IXGBE_FLAG_DCA_ENABLED BIT(8)
+#define IXGBE_FLAG_DCA_CAPABLE BIT(9)
+#define IXGBE_FLAG_IMIR_ENABLED BIT(10)
+#define IXGBE_FLAG_MQ_CAPABLE BIT(11)
+#define IXGBE_FLAG_DCB_ENABLED BIT(12)
+#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
+#define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
+#define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
+#define IXGBE_FLAG_FCOE_ENABLED BIT(21)
+#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
+#define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
+#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
-#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
-#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
-#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
-#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
+#define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
+#define IXGBE_FLAG2_RSC_ENABLED BIT(1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
+#define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
+#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
@@ -795,7 +793,7 @@ struct ixgbe_adapter {
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
#define IXGBE_MAX_LINK_HANDLE 10
- struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE];
+ struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
unsigned long tables;
/* maximum number of RETA entries among all devices supported by ixgbe
@@ -806,6 +804,8 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
+
+ bool need_crosstalk_fix;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -817,6 +817,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
return IXGBE_MAX_RSS_INDICES;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -827,7 +828,7 @@ struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
u16 sw_idx;
- u16 action;
+ u64 action;
};
enum ixgbe_state_t {
@@ -860,13 +861,15 @@ enum ixgbe_boards {
board_X540,
board_X550,
board_X550EM_x,
+ board_x550em_a,
};
-extern struct ixgbe_info ixgbe_82598_info;
-extern struct ixgbe_info ixgbe_82599_info;
-extern struct ixgbe_info ixgbe_X540_info;
-extern struct ixgbe_info ixgbe_X550_info;
-extern struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_82598_info;
+extern const struct ixgbe_info ixgbe_82599_info;
+extern const struct ixgbe_info ixgbe_X540_info;
+extern const struct ixgbe_info ixgbe_X550_info;
+extern const struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_x550em_a_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
#endif
@@ -893,8 +896,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
- u16 subdevice_id);
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+ u16 subdevice_id);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index d8a9fb8a5..fb51be74d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -792,7 +792,7 @@ mac_reset_top:
}
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
- gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
/*
@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on)
/* Turn on this VLAN id */
- bits |= (1 << bitindex);
+ bits |= BIT(bitindex);
else
/* Turn off this VLAN id */
- bits &= ~(1 << bitindex);
+ bits &= ~BIT(bitindex);
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
return 0;
@@ -1160,7 +1160,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
}
-static struct ixgbe_mac_operations mac_ops_82598 = {
+static const struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
.start_hw = &ixgbe_start_hw_82598,
@@ -1192,9 +1192,11 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.clear_vfta = &ixgbe_clear_vfta_82598,
.set_vfta = &ixgbe_set_vfta_82598,
.fc_enable = &ixgbe_fc_enable_82598,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
+ .init_swfw_sync = NULL,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
.prot_autoc_read = &prot_autoc_read_generic,
@@ -1203,7 +1205,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
@@ -1214,7 +1216,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
-static struct ixgbe_phy_operations phy_ops_82598 = {
+static const struct ixgbe_phy_operations phy_ops_82598 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82598,
@@ -1230,7 +1232,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.check_overtemp = &ixgbe_tn_check_overtemp,
};
-struct ixgbe_info ixgbe_82598_info = {
+const struct ixgbe_info ixgbe_82598_info = {
.mac = ixgbe_mac_82598EB,
.get_invariants = &ixgbe_get_invariants_82598,
.mac_ops = &mac_ops_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index fa8d4f40a..47afed74a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
common_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
sig_hash ^= lo_hash_dword << (16 - n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
common_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
sig_hash ^= hi_hash_dword << (16 - n); \
} while (0)
@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \
} while (0)
@@ -1633,6 +1633,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (hw->mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
@@ -2181,7 +2182,7 @@ release_i2c_access:
return status;
}
-static struct ixgbe_mac_operations mac_ops_82599 = {
+static const struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
.start_hw = &ixgbe_start_hw_82599,
@@ -2220,6 +2221,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
@@ -2227,6 +2229,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
+ .init_swfw_sync = NULL,
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
.prot_autoc_read = &prot_autoc_read_82599,
@@ -2235,7 +2238,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eeprom_82599,
.read_buffer = &ixgbe_read_eeprom_buffer_82599,
@@ -2246,7 +2249,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
-static struct ixgbe_phy_operations phy_ops_82599 = {
+static const struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
.identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82599,
@@ -2263,7 +2266,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.check_overtemp = &ixgbe_tn_check_overtemp,
};
-struct ixgbe_info ixgbe_82599_info = {
+const struct ixgbe_info ixgbe_82599_info = {
.mac = ixgbe_mac_82599EB,
.get_invariants = &ixgbe_get_invariants_82599,
.mac_ops = &mac_ops_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 64045053e..902d2061c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -97,6 +97,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = true;
break;
@@ -111,12 +112,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_fc - Set up flow control
+ * ixgbe_setup_fc_generic - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
@@ -296,7 +297,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ret_val = ixgbe_setup_fc(hw);
+ ret_val = hw->mac.ops.setup_fc(hw);
if (ret_val)
return ret_val;
@@ -681,6 +682,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
+ u16 ee_ctrl_4;
u32 reg;
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
@@ -691,6 +693,13 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
+
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ }
}
/**
@@ -816,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -1493,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* Mask is used to shift "count" bits of "data" out to the EEPROM
* one bit at a time. Determine the starting bit based on count
*/
- mask = 0x01 << (count - 1);
+ mask = BIT(count - 1);
for (i = 0; i < count; i++) {
/*
@@ -1982,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
*/
vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F;
- hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+ hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
}
/**
@@ -2854,6 +2863,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
@@ -2911,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_hi = 0;
}
} else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
+ mpsar_lo &= ~BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else {
- mpsar_hi &= ~(1 << (vmdq - 32));
+ mpsar_hi &= ~BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
}
@@ -2943,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
if (vmdq < 32) {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
+ mpsar |= BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
} else {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
+ mpsar |= BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
}
return 0;
@@ -2968,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
u32 rar = hw->mac.san_mac_rar_index;
if (vmdq < 32) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
} else {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
}
return 0;
@@ -3072,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
- vfta_delta = 1 << (vlan % 32);
+ vfta_delta = BIT(vlan % 32);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
/* vfta_delta represents the difference between the current value
@@ -3103,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
/* set the pool bit */
- bits |= 1 << (vind % 32);
+ bits |= BIT(vind % 32);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
- bits ^= 1 << (vind % 32);
+ bits ^= BIT(vind % 32);
if (!bits &&
!IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
@@ -3300,43 +3310,25 @@ wwn_prefix_err:
/**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
- * @enable: enable or disable switch for anti-spoofing
- * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ * @enable: enable or disable switch for MAC anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
*
**/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{
- int j;
- int pf_target_reg = pf >> 3;
- int pf_target_shift = pf % 8;
- u32 pfvfspoof = 0;
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
- /*
- * PFVFSPOOF register array is size 8 with 8 bits assigned to
- * MAC anti-spoof enables in each register array element.
- */
- for (j = 0; j < pf_target_reg; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Do not set the bits assigned to the PF
- */
- pfvfspoof &= (1 << pf_target_shift) - 1;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * Remaining pools belong to the PF so they do not need to have
- * anti-spoofing enabled.
- */
- for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+ pfvfspoof |= BIT(vf_target_shift);
+ else
+ pfvfspoof &= ~BIT(vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
/**
@@ -3357,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof |= (1 << vf_target_shift);
+ pfvfspoof |= BIT(vf_target_shift);
else
- pfvfspoof &= ~(1 << vf_target_shift);
+ pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
@@ -3483,18 +3475,27 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
- u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u32 hicr, i, bi, fwsts;
u16 buf_len, dword_len;
+ union {
+ struct ixgbe_hic_hdr hdr;
+ u32 u32arr[1];
+ } *bp = buffer;
+ s32 status;
- if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ if (status)
+ return status;
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3502,26 +3503,27 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
- if ((hicr & IXGBE_HICR_EN) == 0) {
+ if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
+ if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto rel_out;
}
dword_len = length >> 2;
- /*
- * The device driver writes the relevant command block
+ /* The device driver writes the relevant command block
* into the ram area.
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, cpu_to_le32(buffer[i]));
+ i, cpu_to_le32(bp->u32arr[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3534,44 +3536,49 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
}
/* Check command successful completion. */
- if ((timeout != 0 && i == timeout) ||
- (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ if ((timeout && i == timeout) ||
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
hw_dbg(hw, "Command has failed with no status valid.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
if (!return_data)
- return 0;
+ goto rel_out;
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&buffer[bi]);
+ bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&bp->u32arr[bi]);
}
/* If there is any thing in data position pull it in */
- buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
- if (buf_len == 0)
- return 0;
+ buf_len = bp->hdr.buf_len;
+ if (!buf_len)
+ goto rel_out;
- if (length < (buf_len + hdr_size)) {
+ if (length < round_up(buf_len, 4) + hdr_size) {
hw_dbg(hw, "Buffer not large enough for reply message.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs, add 3 for odd lengths */
dword_len = (buf_len + 3) >> 2;
- /* Pull in the rest of the buffer (bi is where we left off)*/
+ /* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&buffer[bi]);
+ bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&bp->u32arr[bi]);
}
- return 0;
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ return status;
}
/**
@@ -3594,13 +3601,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
int i;
s32 ret_val;
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
- return IXGBE_ERR_SWFW_SYNC;
-
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.port_num = hw->bus.func;
fw_cmd.ver_maj = maj;
fw_cmd.ver_min = min;
fw_cmd.ver_build = build;
@@ -3612,7 +3616,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.pad2 = 0;
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
sizeof(fw_cmd),
IXGBE_HI_COMMAND_TIMEOUT,
true);
@@ -3628,7 +3632,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
break;
}
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b9563137..6d4c260d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -81,6 +81,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
@@ -105,13 +106,13 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length, u32 timeout, bool return_data);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+ u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 02c7333a9..072ef3b5f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
if (tc_config[tc].dcb_pfc != pfc_disabled)
- *pfc_en |= 1 << tc;
+ *pfc_en |= BIT(tc);
}
}
@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{
struct tc_configuration *tc_config = &cfg->tc_config[0];
- u8 prio_mask = 1 << up;
+ u8 prio_mask = BIT(up);
u8 tc = cfg->num_tcs.pg_tcs;
/* If tc is 0 then DCB is likely not enabled or supported */
@@ -293,6 +293,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
default:
@@ -311,6 +312,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
default:
break;
@@ -368,6 +370,7 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -398,6 +401,7 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_dcb_read_rtrup2tc_82599(hw, map);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index d3ba63f9a..b79e93a5b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (!(pfc_en & (1 << i))) {
+ if (!(pfc_en & BIT(i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index b5cc989a3..1011d6449 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
int enabled = 0;
for (j = 0; j < MAX_USER_PRIORITY; j++) {
- if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+ if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
enabled = 1;
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 2707bda37..b8fc3cfec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
};
u8 up = dcb_getapp(adapter->netdev, &app);
- if (up && !(up & (1 << adapter->fcoe.up)))
+ if (up && !(up & BIT(adapter->fcoe.up)))
changes |= BIT_APP_UPCHG;
#endif
@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
- if (app_mask & (1 << adapter->fcoe.up))
+ if (app_mask & BIT(adapter->fcoe.up))
return 0;
adapter->fcoe.up = app->priority;
@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
- if (app_mask & (1 << adapter->fcoe.up))
+ if (app_mask & BIT(adapter->fcoe.up))
return 0;
adapter->fcoe.up = app_mask ?
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index b3530e1e3..59b771b9b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* Flow Control */
regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
- regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
- regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
- regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
- regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+ for (i = 0; i < 4; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
for (i = 0; i < 8; i++) {
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -547,6 +545,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -660,6 +659,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
for (i = 0; i < 8; i++)
@@ -718,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
- regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
- regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+ regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
+ regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
+ regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
+ regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
for (i = 0; i < 8; i++)
regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
@@ -729,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
- regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+ regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
+ regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
@@ -801,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
- regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
- regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
- regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
- regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+ for (i = 0; i < 4; i++)
+ regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
- regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
- regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
- regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
- regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+ for (i = 0; i < 4; i++)
+ regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
for (i = 0; i < 8; i++)
regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
@@ -1443,6 +1442,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1583,7 +1583,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 10; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (!shared_int) {
/*
@@ -1681,6 +1681,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1720,6 +1721,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1780,6 +1782,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2991,6 +2994,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -3007,14 +3011,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->phc_index = -1;
info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
return ethtool_op_get_ts_info(dev, info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index e771e764d..bcdc88444 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -128,6 +128,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7df3fe29b..8bebd862a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -53,15 +53,7 @@
#include <net/vxlan.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-
-#ifdef CONFIG_OF
-#include <linux/of_net.h>
-#endif
-
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
+#include <net/tc_act/tc_mirred.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -79,10 +71,10 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.2.1-k"
+#define DRV_VERSION "4.4.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2015 Intel Corporation.";
+ "Copyright (c) 1999-2016 Intel Corporation.";
static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
@@ -92,6 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X540] = &ixgbe_X540_info,
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
+ [board_x550em_a] = &ixgbe_x550em_a_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -134,10 +127,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
/* required last entry */
{0, }
};
@@ -372,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
if (ixgbe_removed(reg_addr))
return IXGBE_FAILED_READ_REG;
+ if (unlikely(hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
+ struct ixgbe_adapter *adapter;
+ int i;
+
+ for (i = 0; i < 200; ++i) {
+ value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
+ if (likely(!value))
+ goto writes_completed;
+ if (value == IXGBE_FAILED_READ_REG) {
+ ixgbe_remove_adapter(hw);
+ return IXGBE_FAILED_READ_REG;
+ }
+ udelay(5);
+ }
+
+ adapter = hw->back;
+ e_warn(hw, "register writes incomplete %08x\n", value);
+ }
+
+writes_completed:
value = readl(reg_addr + reg);
if (unlikely(value == IXGBE_FAILED_READ_REG))
ixgbe_check_remove(hw, reg);
@@ -588,7 +609,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_info("%-15s %016lX %016lX %016lX\n",
netdev->name,
netdev->state,
- netdev->trans_start,
+ dev_trans_start(netdev),
netdev->last_rx);
}
@@ -869,6 +890,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -907,6 +929,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1087,9 +1110,40 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ **/
+static int ixgbe_tx_maxrate(struct net_device *netdev,
+ int queue_index, u32 maxrate)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 bcnrc_val = ixgbe_link_mbps(adapter);
+
+ if (!maxrate)
+ return 0;
+
+ /* Calculate the rate factor values to set */
+ bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+ bcnrc_val /= maxrate;
+
+ /* clear everything but the rate factor */
+ bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+ IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+ /* enable the rate scheduler */
+ bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+
+ return 0;
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget)
@@ -2192,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) {
- u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
}
@@ -2222,6 +2276,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2333,6 +2388,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2494,6 +2550,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
return false;
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
case ixgbe_media_type_fiber_qsfp:
@@ -2568,6 +2625,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2596,6 +2654,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2631,6 +2690,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask |= IXGBE_EIMS_TS;
break;
default:
@@ -2646,7 +2706,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ case ixgbe_mac_x550em_a:
+ if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
+ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
mask |= IXGBE_EICR_GPI_SDP0_X540;
@@ -2704,6 +2767,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -2786,8 +2850,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- ixgbe_for_each_ring(ring, q_vector->tx)
- clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
+ ixgbe_for_each_ring(ring, q_vector->tx) {
+ if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
/* Exit if we are called by netpoll or busy polling is active */
if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -2805,7 +2871,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget);
work_done += cleaned;
- clean_complete &= (cleaned < per_ring_budget);
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
}
ixgbe_qv_unlock_napi(q_vector);
@@ -2818,9 +2885,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -2937,6 +3004,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -3033,6 +3101,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3109,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
* currently 40.
*/
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
- txdctl |= (1 << 16); /* WTHRESH = 1 */
+ txdctl |= 1u << 16; /* WTHRESH = 1 */
else
- txdctl |= (8 << 16); /* WTHRESH = 8 */
+ txdctl |= 8u << 16; /* WTHRESH = 8 */
/*
* Setting PTHRESH to 32 both improves performance
* and avoids a TX hang with DFP enabled
*/
- txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
@@ -3669,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
return;
if (rss_i > 3)
- psrtype |= 2 << 29;
+ psrtype |= 2u << 29;
else if (rss_i > 1)
- psrtype |= 1 << 29;
+ psrtype |= 1u << 29;
for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
@@ -3698,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
if (adapter->bridge_mode == BRIDGE_MODE_VEB)
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
@@ -3729,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
-
- /* Enable MAC Anti-Spoofing */
- hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
- adapter->num_vfs);
-
- /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
- * calling set_ethertype_anti_spoofing for each VF in loop below
- */
- if (hw->mac.ops.set_ethertype_anti_spoofing) {
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
- (IXGBE_ETQF_FILTER_EN |
- IXGBE_ETQF_TX_ANTISPOOF |
- IXGBE_ETH_P_LLDP));
-
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
- (IXGBE_ETQF_FILTER_EN |
- IXGBE_ETQF_TX_ANTISPOOF |
- ETH_P_PAUSE));
- }
-
- /* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
- if (!adapter->vfinfo[i].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
-
- /* enable ethertype anti spoofing if hw supports it */
- if (hw->mac.ops.set_ethertype_anti_spoofing)
- hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+ /* configure spoof checking */
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
+ adapter->vfinfo[i].spoofchk_enabled);
/* Enable/Disable RSS query feature */
ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
@@ -3832,6 +3877,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
/* fall through for older HW */
@@ -3908,7 +3954,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
+ if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -3947,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
* entry other than the PF.
*/
word = idx * 2 + (VMDQ_P(0) / 32);
- bits = ~(1 << (VMDQ_P(0)) % 32);
+ bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* Disable the filter so this falls into the default pool. */
@@ -3965,9 +4013,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
- if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
- ixgbe_update_pf_promisc_vlvf(adapter, vid);
- else
+ if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
clear_bit(vid, adapter->active_vlans);
@@ -3995,6 +4041,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4031,6 +4078,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4057,6 +4105,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
break;
@@ -4081,7 +4130,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
- vlvfb |= 1 << (VMDQ_P(0) % 32);
+ vlvfb |= BIT(VMDQ_P(0) % 32);
IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
}
@@ -4111,7 +4160,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
if (vlvf) {
/* record VLAN ID in VFTA */
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
@@ -4120,7 +4169,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
/* remove PF from the pool */
word = i * 2 + VMDQ_P(0) / 32;
- bits = ~(1 << (VMDQ_P(0) % 32));
+ bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
}
@@ -4147,6 +4196,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
break;
@@ -4172,11 +4222,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
{
- u16 vid;
+ u16 vid = 1;
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
@@ -4426,6 +4476,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ netdev_features_t features = netdev->features;
int count;
/* Check for Promiscuous and All Multicast modes */
@@ -4443,14 +4494,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= IXGBE_VMOLR_MPE;
- ixgbe_vlan_promisc_enable(adapter);
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
hw->addr_ctrl.user_set_promisc = false;
- ixgbe_vlan_promisc_disable(adapter);
}
/*
@@ -4483,7 +4533,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
}
/* This is useful for sniffing bad packets. */
- if (adapter->netdev->features & NETIF_F_RXALL) {
+ if (features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode */
fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
@@ -4496,10 +4546,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter);
else
ixgbe_vlan_strip_disable(adapter);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ixgbe_vlan_promisc_disable(adapter);
+ else
+ ixgbe_vlan_promisc_enable(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -4530,6 +4585,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
adapter->vxlan_port = 0;
break;
@@ -4630,6 +4686,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -4690,6 +4747,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -4805,9 +4863,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
return;
if (rss_i > 3)
- psrtype |= 2 << 29;
+ psrtype |= 2u << 29;
else if (rss_i > 1)
- psrtype |= 1 << 29;
+ psrtype |= 1u << 29;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4871,7 +4929,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
/* shutdown specific queue receive and wait for dma to settle */
ixgbe_disable_rx_queue(adapter, rx_ring);
usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+ ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
ixgbe_clean_rx_ring(rx_ring);
rx_ring->l2_accel_priv = NULL;
}
@@ -5106,6 +5164,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5156,6 +5215,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
gpie |= IXGBE_SDP0_GPIEN_X540;
break;
default:
@@ -5228,7 +5288,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
- adapter->netdev->trans_start = jiffies;
+ netif_trans_update(adapter->netdev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -5467,6 +5527,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -5498,6 +5559,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
ixgbe_tx_timeout_reset(adapter);
}
+#ifdef CONFIG_IXGBE_DCB
+static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct tc_configuration *tc;
+ int j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ case ixgbe_mac_82599EB:
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ default:
+ adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
+ break;
+ }
+
+ /* Configure DCB traffic classes */
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &adapter->dcb_cfg.tc_config[j];
+ tc->path[DCB_TX_CONFIG].bwg_id = 0;
+ tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->path[DCB_RX_CONFIG].bwg_id = 0;
+ tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->dcb_pfc = pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &adapter->dcb_cfg.tc_config[0];
+ tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+ adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+ adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_set_bitmap = 0x00;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+ memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ sizeof(adapter->temp_dcb_cfg));
+}
+#endif
+
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
@@ -5512,10 +5625,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
unsigned int rss, fdir;
u32 fwsm;
-#ifdef CONFIG_IXGBE_DCB
- int j;
- struct tc_configuration *tc;
-#endif
+ u16 device_caps;
+ int i;
/* PCI config space info */
@@ -5537,6 +5648,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
+#ifdef CONFIG_IXGBE_DCB
+ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+#endif
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
@@ -5547,7 +5662,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/* initialize static ixgbe jump table entries */
- adapter->jump_tables[0] = ixgbe_ipv4_fields;
+ adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
+ GFP_KERNEL);
+ if (!adapter->jump_tables[0])
+ return -ENOMEM;
+ adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
+
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
+ adapter->jump_tables[i] = NULL;
adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
hw->mac.num_rar_entries,
@@ -5585,6 +5707,17 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+#ifdef CONFIG_IXGBE_DCB
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+#ifdef CONFIG_IXGBE_DCB
+ adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+ /* Fall Through */
case ixgbe_mac_X550:
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
@@ -5606,42 +5739,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
spin_lock_init(&adapter->fdir_perfect_lock);
#ifdef CONFIG_IXGBE_DCB
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
- break;
- default:
- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
- break;
- }
-
- /* Configure DCB traffic classes */
- for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
- tc = &adapter->dcb_cfg.tc_config[j];
- tc->path[DCB_TX_CONFIG].bwg_id = 0;
- tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->path[DCB_RX_CONFIG].bwg_id = 0;
- tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->dcb_pfc = pfc_disabled;
- }
-
- /* Initialize default user to priority mapping, UPx->TC0 */
- tc = &adapter->dcb_cfg.tc_config[0];
- tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
- tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-
- adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
- adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
- adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_set_bitmap = 0x00;
- adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
- memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- sizeof(adapter->temp_dcb_cfg));
-
+ ixgbe_init_dcb(adapter);
#endif
/* default flow control settings */
@@ -5675,6 +5773,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+ /* Cache bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ adapter->need_crosstalk_fix = false;
+ else
+ adapter->need_crosstalk_fix = true;
+ break;
+ default:
+ adapter->need_crosstalk_fix = false;
+ break;
+ }
+
/* set default work limits */
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
@@ -6217,6 +6331,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -6352,6 +6467,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -6367,7 +6483,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
if ((hw->mac.type == ixgbe_mac_82599EB) ||
(hw->mac.type == ixgbe_mac_X540) ||
(hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x)) {
+ (hw->mac.type == ixgbe_mac_X550EM_x) ||
+ (hw->mac.type == ixgbe_mac_x550em_a)) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -6392,6 +6509,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -6562,7 +6680,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
- eics |= ((u64)1 << i);
+ eics |= BIT_ULL(i);
}
}
@@ -6593,6 +6711,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
link_up = true;
}
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is empty.
+ */
+ if (adapter->need_crosstalk_fix) {
+ u32 sfp_cage_full;
+
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
+ link_up = false;
+ }
+
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
@@ -6662,6 +6792,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6938,6 +7069,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
s32 err;
+ /* If crosstalk fix enabled verify the SFP+ cage is full */
+ if (adapter->need_crosstalk_fix) {
+ u32 sfp_cage_full;
+
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ if (!sfp_cage_full)
+ return;
+ }
+
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
@@ -7122,10 +7263,12 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
#ifdef CONFIG_IXGBE_VXLAN
+ rtnl_lock();
if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
vxlan_get_rx_port(adapter->netdev);
}
+ rtnl_unlock();
#endif /* CONFIG_IXGBE_VXLAN */
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
@@ -7148,9 +7291,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -7163,46 +7315,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 0 as index for TSO */
- mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
@@ -7211,103 +7369,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 1;
}
+static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
- !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+csum_failed:
+ if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
+ IXGBE_TX_FLAGS_CC)))
return;
- vlan_macip_lens = skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- } else {
- u8 l4_hdr = 0;
- union {
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- u8 *raw;
- } network_hdr;
- union {
- struct tcphdr *tcphdr;
- u8 *raw;
- } transport_hdr;
- __be16 frag_off;
-
- if (skb->encapsulation) {
- network_hdr.raw = skb_inner_network_header(skb);
- transport_hdr.raw = skb_inner_transport_header(skb);
- vlan_macip_lens = skb_inner_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- } else {
- network_hdr.raw = skb_network_header(skb);
- transport_hdr.raw = skb_transport_header(skb);
- vlan_macip_lens = skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- }
-
- /* use first 4 bits to determine IP version */
- switch (network_hdr.ipv4->version) {
- case IPVERSION:
- vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
- type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = network_hdr.ipv4->protocol;
- break;
- case 6:
- vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
- l4_hdr = network_hdr.ipv6->nexthdr;
- if (likely((transport_hdr.raw - network_hdr.raw) ==
- sizeof(struct ipv6hdr)))
- break;
- ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- l4_hdr = NEXTHDR_FRAGMENT;
- break;
- default:
- break;
- }
+ goto no_csum;
+ }
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ ixgbe_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum, version=%d, l4 proto=%x\n",
- network_hdr.ipv4->version, l4_hdr);
- }
- skb_checksum_help(skb);
- goto no_csum;
}
-
- /* update TX checksum flag */
- first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto csum_failed;
}
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
- type_tucmd, mss_l4len_idx);
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
}
#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -8238,6 +8354,134 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
return 0;
}
+#ifdef CONFIG_NET_CLS_ACT
+static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
+ u8 *queue, u64 *action)
+{
+ unsigned int num_vfs = adapter->num_vfs, vf;
+ struct net_device *upper;
+ struct list_head *iter;
+
+ /* redirect to a SRIOV VF */
+ for (vf = 0; vf < num_vfs; ++vf) {
+ upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
+ if (upper->ifindex == ifindex) {
+ if (adapter->num_rx_pools > 1)
+ *queue = vf * 2;
+ else
+ *queue = vf * adapter->num_rx_queues_per_pool;
+
+ *action = vf + 1;
+ *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ return 0;
+ }
+ }
+
+ /* redirect to a offloaded macvlan netdev */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+ if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+ *action = *queue;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+ struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+ const struct tc_action *a;
+ int err;
+
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ tc_for_each_action(a, exts) {
+
+ /* Drop action */
+ if (is_tcf_gact_shot(a)) {
+ *action = IXGBE_FDIR_DROP_QUEUE;
+ *queue = IXGBE_FDIR_DROP_QUEUE;
+ return 0;
+ }
+
+ /* Redirect to a VF or a offloaded macvlan */
+ if (is_tcf_mirred_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+
+ err = handle_redirect_action(adapter, ifindex, queue,
+ action);
+ if (err == 0)
+ return err;
+ }
+ }
+
+ return -EINVAL;
+}
+#else
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+ struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NET_CLS_ACT */
+
+static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
+ union ixgbe_atr_input *mask,
+ struct tc_cls_u32_offload *cls,
+ struct ixgbe_mat_field *field_ptr,
+ struct ixgbe_nexthdr *nexthdr)
+{
+ int i, j, off;
+ __be32 val, m;
+ bool found_entry = false, found_jump_field = false;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ off = cls->knode.sel->keys[i].off;
+ val = cls->knode.sel->keys[i].val;
+ m = cls->knode.sel->keys[i].mask;
+
+ for (j = 0; field_ptr[j].val; j++) {
+ if (field_ptr[j].off == off) {
+ field_ptr[j].val(input, mask, val, m);
+ input->filter.formatted.flow_type |=
+ field_ptr[j].type;
+ found_entry = true;
+ break;
+ }
+ }
+ if (nexthdr) {
+ if (nexthdr->off == cls->knode.sel->keys[i].off &&
+ nexthdr->val == cls->knode.sel->keys[i].val &&
+ nexthdr->mask == cls->knode.sel->keys[i].mask)
+ found_jump_field = true;
+ else
+ continue;
+ }
+ }
+
+ if (nexthdr && !found_jump_field)
+ return -EINVAL;
+
+ if (!found_entry)
+ return 0;
+
+ mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ return 0;
+}
+
static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
__be16 protocol,
struct tc_cls_u32_offload *cls)
@@ -8245,16 +8489,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
u32 loc = cls->knode.handle & 0xfffff;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_mat_field *field_ptr;
- struct ixgbe_fdir_filter *input;
- union ixgbe_atr_input mask;
-#ifdef CONFIG_NET_CLS_ACT
- const struct tc_action *a;
-#endif
- int i, err = 0;
+ struct ixgbe_fdir_filter *input = NULL;
+ union ixgbe_atr_input *mask = NULL;
+ struct ixgbe_jump_table *jump = NULL;
+ int i, err = -EINVAL;
u8 queue;
u32 uhtid, link_uhtid;
- memset(&mask, 0, sizeof(union ixgbe_atr_input));
uhtid = TC_U32_USERHTID(cls->knode.handle);
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
@@ -8266,38 +8507,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* headers when needed.
*/
if (protocol != htons(ETH_P_IP))
- return -EINVAL;
-
- if (link_uhtid) {
- struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
-
- if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- if (!test_bit(link_uhtid - 1, &adapter->tables))
- return -EINVAL;
-
- for (i = 0; nexthdr[i].jump; i++) {
- if (nexthdr->o != cls->knode.sel->offoff ||
- nexthdr->s != cls->knode.sel->offshift ||
- nexthdr->m != cls->knode.sel->offmask ||
- /* do not support multiple key jumps its just mad */
- cls->knode.sel->nkeys > 1)
- return -EINVAL;
-
- if (nexthdr->off != cls->knode.sel->keys[0].off ||
- nexthdr->val != cls->knode.sel->keys[0].val ||
- nexthdr->mask != cls->knode.sel->keys[0].mask)
- return -EINVAL;
-
- adapter->jump_tables[link_uhtid] = nexthdr->jump;
- }
- return 0;
- }
+ return err;
if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
e_err(drv, "Location out of range\n");
- return -EINVAL;
+ return err;
}
/* cls u32 is a graph starting at root node 0x800. The driver tracks
@@ -8308,87 +8522,123 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* this function _should_ be generic try not to hardcode values here.
*/
if (uhtid == 0x800) {
- field_ptr = adapter->jump_tables[0];
+ field_ptr = (adapter->jump_tables[0])->mat;
} else {
if (uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- field_ptr = adapter->jump_tables[uhtid];
+ return err;
+ if (!adapter->jump_tables[uhtid])
+ return err;
+ field_ptr = (adapter->jump_tables[uhtid])->mat;
}
if (!field_ptr)
- return -EINVAL;
+ return err;
- input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!input)
- return -ENOMEM;
+ /* At this point we know the field_ptr is valid and need to either
+ * build cls_u32 link or attach filter. Because adding a link to
+ * a handle that does not exist is invalid and the same for adding
+ * rules to handles that don't exist.
+ */
- for (i = 0; i < cls->knode.sel->nkeys; i++) {
- int off = cls->knode.sel->keys[i].off;
- __be32 val = cls->knode.sel->keys[i].val;
- __be32 m = cls->knode.sel->keys[i].mask;
- bool found_entry = false;
- int j;
+ if (link_uhtid) {
+ struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
- for (j = 0; field_ptr[j].val; j++) {
- if (field_ptr[j].off == off) {
- field_ptr[j].val(input, &mask, val, m);
- input->filter.formatted.flow_type |=
- field_ptr[j].type;
- found_entry = true;
+ if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return err;
+
+ if (!test_bit(link_uhtid - 1, &adapter->tables))
+ return err;
+
+ for (i = 0; nexthdr[i].jump; i++) {
+ if (nexthdr[i].o != cls->knode.sel->offoff ||
+ nexthdr[i].s != cls->knode.sel->offshift ||
+ nexthdr[i].m != cls->knode.sel->offmask)
+ return err;
+
+ jump = kzalloc(sizeof(*jump), GFP_KERNEL);
+ if (!jump)
+ return -ENOMEM;
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input) {
+ err = -ENOMEM;
+ goto free_jump;
+ }
+ mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask) {
+ err = -ENOMEM;
+ goto free_input;
+ }
+ jump->input = input;
+ jump->mask = mask;
+ err = ixgbe_clsu32_build_input(input, mask, cls,
+ field_ptr, &nexthdr[i]);
+ if (!err) {
+ jump->mat = nexthdr[i].jump;
+ adapter->jump_tables[link_uhtid] = jump;
break;
}
}
-
- if (!found_entry)
- goto err_out;
+ return 0;
}
- mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
- IXGBE_ATR_L4TYPE_MASK;
-
- if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
- mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+ mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask) {
+ err = -ENOMEM;
+ goto free_input;
+ }
-#ifdef CONFIG_NET_CLS_ACT
- if (list_empty(&cls->knode.exts->actions))
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
+ if ((adapter->jump_tables[uhtid])->input)
+ memcpy(input, (adapter->jump_tables[uhtid])->input,
+ sizeof(*input));
+ if ((adapter->jump_tables[uhtid])->mask)
+ memcpy(mask, (adapter->jump_tables[uhtid])->mask,
+ sizeof(*mask));
+ }
+ err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
+ if (err)
goto err_out;
- list_for_each_entry(a, &cls->knode.exts->actions, list) {
- if (!is_tcf_gact_shot(a))
- goto err_out;
- }
-#endif
+ err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
+ &queue);
+ if (err < 0)
+ goto err_out;
- input->action = IXGBE_FDIR_DROP_QUEUE;
- queue = IXGBE_FDIR_DROP_QUEUE;
input->sw_idx = loc;
spin_lock(&adapter->fdir_perfect_lock);
if (hlist_empty(&adapter->fdir_filter_list)) {
- memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
- err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, mask);
if (err)
goto err_out_w_lock;
- } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
err = -EINVAL;
goto err_out_w_lock;
}
- ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
input->sw_idx, queue);
if (!err)
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
+ kfree(mask);
return err;
err_out_w_lock:
spin_unlock(&adapter->fdir_perfect_lock);
err_out:
+ kfree(mask);
+free_input:
kfree(input);
- return -EINVAL;
+free_jump:
+ kfree(jump);
+ return err;
}
static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
@@ -8515,11 +8765,6 @@ static int ixgbe_set_features(struct net_device *netdev,
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- ixgbe_vlan_strip_enable(adapter);
- else
- ixgbe_vlan_strip_disable(adapter);
-
if (changed & NETIF_F_RXALL)
need_reset = true;
@@ -8536,6 +8781,9 @@ static int ixgbe_set_features(struct net_device *netdev,
if (need_reset)
ixgbe_do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER))
+ ixgbe_set_rx_mode(netdev);
return 0;
}
@@ -8833,17 +9081,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
-#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+#define IXGBE_MAX_MAC_HDR_LEN 127
+#define IXGBE_MAX_NETWORK_HDR_LEN 511
+
static netdev_features_t
ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
- if (!skb->encapsulation)
- return features;
-
- if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
- IXGBE_MAX_TUNNEL_HDR_LEN))
- return features & ~NETIF_F_CSUM_MASK;
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
return features;
}
@@ -8858,6 +9125,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_mac_address = ixgbe_set_mac,
.ndo_change_mtu = ixgbe_change_mtu,
.ndo_tx_timeout = ixgbe_tx_timeout,
+ .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
.ndo_do_ioctl = ixgbe_ioctl,
@@ -8943,7 +9211,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
/**
* ixgbe_wol_supported - Check whether device supports WoL
- * @hw: hw specific details
+ * @adapter: the adapter private structure
* @device_id: the device ID
* @subdev_id: the subsystem device ID
*
@@ -8951,19 +9219,33 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
* which devices have WoL support
*
**/
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
- u16 subdevice_id)
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+ u16 subdevice_id)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
- int is_wol_supported = 0;
+ /* WOL not supported on 82598 */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ /* check eeprom to see if WOL is enabled for X540 and newer */
+ if (hw->mac.type >= ixgbe_mac_X540) {
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0)))
+ return true;
+ }
+
+ /* WOL is determined based on device IDs for 82599 MACs */
switch (device_id) {
case IXGBE_DEV_ID_82599_SFP:
/* Only these subdevices could supports WOL */
switch (subdevice_id) {
- case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
case IXGBE_SUBDEV_ID_82599_560FLR:
+ case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
+ case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
+ case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
/* only support first port */
if (hw->bus.func != 0)
break;
@@ -8971,66 +9253,31 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
case IXGBE_SUBDEV_ID_82599_ECNA_DP:
- case IXGBE_SUBDEV_ID_82599_LOM_SFP:
- is_wol_supported = 1;
- break;
+ case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
+ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
+ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
+ return true;
}
break;
case IXGBE_DEV_ID_82599EN_SFP:
- /* Only this subdevice supports WOL */
+ /* Only these subdevices support WOL */
switch (subdevice_id) {
case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
- is_wol_supported = 1;
- break;
+ return true;
}
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
- is_wol_supported = 1;
+ return true;
break;
case IXGBE_DEV_ID_82599_KX4:
- is_wol_supported = 1;
- break;
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_X550T:
- case IXGBE_DEV_ID_X550EM_X_KX4:
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_X_10G_T:
- /* check eeprom to see if enabled wol */
- if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
- ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
- (hw->bus.func == 0))) {
- is_wol_supported = 1;
- }
+ return true;
+ default:
break;
}
- return is_wol_supported;
-}
-
-/**
- * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
- * @adapter: Pointer to adapter struct
- */
-static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_OF
- struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
- struct ixgbe_hw *hw = &adapter->hw;
- const unsigned char *addr;
-
- addr = of_get_mac_address(dp);
- if (addr) {
- ether_addr_copy(hw->mac.perm_addr, addr);
- return;
- }
-#endif /* CONFIG_OF */
-
-#ifdef CONFIG_SPARC
- ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
-#endif /* CONFIG_SPARC */
+ return false;
}
/**
@@ -9136,23 +9383,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
hw->mvals = ii->mvals;
/* EEPROM */
- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+ hw->eeprom.ops = *ii->eeprom_ops;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (ixgbe_removed(hw->hw_addr)) {
err = -EIO;
goto err_ioremap;
}
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
- if (!(eec & (1 << 8)))
+ if (!(eec & BIT(8)))
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
/* PHY */
- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+ hw->phy.ops = *ii->phy_ops;
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
/* ixgbe_identify_phy_generic will set prtad and mmds properly */
hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
@@ -9169,12 +9416,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -9215,54 +9467,62 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto skip_sriov;
/* Mailbox */
ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ hw->mbx.ops = ii->mbx_ops;
pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
skip_sriov:
#endif
netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM;
- netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
+#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
+ netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ IXGBE_GSO_PARTIAL_FEATURES;
+
+ if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->features |= NETIF_F_SCTP_CRC;
- netdev->hw_features |= NETIF_F_SCTP_CRC |
- NETIF_F_NTUPLE |
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXALL |
+ NETIF_F_HW_L2FW_DOFFLOAD;
+
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC;
- break;
- default:
- break;
- }
- netdev->hw_features |= NETIF_F_RXALL;
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->hw_enc_features |= netdev->vlan_features;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
#ifdef CONFIG_IXGBE_DCB
- netdev->dcbnl_ops = &dcbnl_ops;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+ netdev->dcbnl_ops = &dcbnl_ops;
#endif
#ifdef IXGBE_FCOE
@@ -9287,10 +9547,6 @@ skip_sriov:
NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO;
@@ -9304,7 +9560,8 @@ skip_sriov:
goto err_sw_init;
}
- ixgbe_get_platform_mac_addr(adapter);
+ eth_platform_get_mac_address(&adapter->pdev->dev,
+ adapter->hw.mac.perm_addr);
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
@@ -9455,6 +9712,7 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->jump_tables[0]);
kfree(adapter->mac_table);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
@@ -9483,6 +9741,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev;
bool disable_dev;
+ int i;
/* if !adapter then we already cleaned up in probe */
if (!adapter)
@@ -9532,6 +9791,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
+ for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ if (adapter->jump_tables[i]) {
+ kfree(adapter->jump_tables[i]->input);
+ kfree(adapter->jump_tables[i]->mask);
+ }
+ kfree(adapter->jump_tables[i]);
+ }
+
kfree(adapter->mac_table);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -9612,6 +9879,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_X550EM_x:
device_id = IXGBE_DEV_ID_X550EM_X_VF;
break;
+ case ixgbe_mac_x550em_a:
+ device_id = IXGBE_DEV_ID_X550EM_A_VF;
+ break;
default:
device_id = 0;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 9993a471d..a0cb84381 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -48,10 +48,10 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
if (size > mbx->size)
size = mbx->size;
- if (!mbx->ops.read)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.read(hw, msg, size, mbx_id);
+ return mbx->ops->read(hw, msg, size, mbx_id);
}
/**
@@ -70,10 +70,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
if (size > mbx->size)
return IXGBE_ERR_MBX;
- if (!mbx->ops.write)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.write(hw, msg, size, mbx_id);
+ return mbx->ops->write(hw, msg, size, mbx_id);
}
/**
@@ -87,10 +87,10 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_msg)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_msg(hw, mbx_id);
+ return mbx->ops->check_for_msg(hw, mbx_id);
}
/**
@@ -104,10 +104,10 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_ack)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_ack(hw, mbx_id);
+ return mbx->ops->check_for_ack(hw, mbx_id);
}
/**
@@ -121,10 +121,10 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_rst)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_rst(hw, mbx_id);
+ return mbx->ops->check_for_rst(hw, mbx_id);
}
/**
@@ -139,10 +139,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!countdown || !mbx->ops.check_for_msg)
+ if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
- while (mbx->ops.check_for_msg(hw, mbx_id)) {
+ while (mbx->ops->check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
@@ -164,10 +164,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!countdown || !mbx->ops.check_for_ack)
+ if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
- while (mbx->ops.check_for_ack(hw, mbx_id)) {
+ while (mbx->ops->check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
@@ -193,7 +193,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val;
- if (!mbx->ops.read)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
@@ -201,7 +201,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ret_val;
/* if ack received read message */
- return mbx->ops.read(hw, msg, size, mbx_id);
+ return mbx->ops->read(hw, msg, size, mbx_id);
}
/**
@@ -221,11 +221,11 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
s32 ret_val;
/* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
+ if (!mbx->ops || !mbx->timeout)
return IXGBE_ERR_MBX;
/* send msg */
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+ ret_val = mbx->ops->write(hw, msg, size, mbx_id);
if (ret_val)
return ret_val;
@@ -307,14 +307,15 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
break;
}
- if (vflre & (1 << vf_shift)) {
- IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ if (vflre & BIT(vf_shift)) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
hw->mbx.stats.rsts++;
return 0;
}
@@ -430,6 +431,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_x550em_a &&
hw->mac.type != ixgbe_mac_X540)
return;
@@ -446,7 +448,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
}
#endif /* CONFIG_PCI_IOV */
-struct ixgbe_mbx_operations mbx_ops_generic = {
+const struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 8daa95f74..01c2667c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -123,6 +123,6 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
-extern struct ixgbe_mbx_operations mbx_ops_generic;
+extern const struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index 74c53ad9d..a8bed3d88 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -38,6 +38,12 @@ struct ixgbe_mat_field {
unsigned int type;
};
+struct ixgbe_jump_table {
+ struct ixgbe_mat_field *mat;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input *mask;
+};
+
static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
@@ -82,6 +88,12 @@ static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
{ .val = NULL } /* terminal node */
};
+static struct ixgbe_mat_field ixgbe_udp_fields[] = {
+ {.off = 0, .val = ixgbe_mat_prgm_ports,
+ .type = IXGBE_ATR_FLOW_TYPE_UDPV4},
+ { .val = NULL } /* terminal node */
+};
+
struct ixgbe_nexthdr {
/* offset, shift, and mask of position to next header */
unsigned int o;
@@ -98,6 +110,8 @@ struct ixgbe_nexthdr {
static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = {
{ .o = 0, .s = 6, .m = 0xf,
.off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields},
+ { .o = 0, .s = 6, .m = 0xf,
+ .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields},
{ .jump = NULL } /* terminal node */
};
#endif /* _IXGBE_MODEL_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 5abd66c84..cc735ec3e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -81,7 +81,11 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
+#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
+#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -103,7 +107,7 @@
#define IXGBE_PE 0xE0 /* Port expander addr */
#define IXGBE_PE_OUTPUT 1 /* Output reg offset */
#define IXGBE_PE_CONFIG 3 /* Config reg offset */
-#define IXGBE_PE_BIT1 (1 << 1)
+#define IXGBE_PE_BIT1 BIT(1)
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef1504d41..e5431bfe3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -333,6 +333,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
*/
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -395,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
if (incval > 0x00FFFFFFULL)
e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
- (1 << IXGBE_INCPER_SHIFT_82599) |
+ BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL));
break;
default:
@@ -921,6 +922,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
switch (hw->mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1083,6 +1085,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
cc.shift = 2;
}
/* fallthrough */
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
@@ -1111,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
incval >>= IXGBE_INCVAL_SHIFT_82599;
cc.shift -= IXGBE_INCVAL_SHIFT_82599;
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
- (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+ BIT(IXGBE_INCPER_SHIFT_82599) | incval);
break;
default:
/* other devices aren't supported */
@@ -1223,6 +1226,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8025a3f93..c5caacdd1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
+ mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
vmolr |= IXGBE_VMOLR_ROMPE;
@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
+ mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* enable or disable receive depending on error */
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
if (err)
- vfre &= ~(1 << vf_shift);
+ vfre &= ~BIT(vf_shift);
else
- vfre |= 1 << vf_shift;
+ vfre |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
if (err) {
@@ -589,47 +589,47 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 i;
+ u32 vlvfb_mask, pool_mask, i;
+
+ /* create mask for VF and other pools */
+ pool_mask = ~BIT(VMDQ_P(0) % 32);
+ vlvfb_mask = BIT(vf % 32);
/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
for (i = IXGBE_VLVF_ENTRIES; i--;) {
u32 bits[2], vlvfb, vid, vfta, vlvf;
u32 word = i * 2 + vf / 32;
- u32 mask = 1 << (vf % 32);
+ u32 mask;
vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* if our bit isn't set we can skip it */
- if (!(vlvfb & mask))
+ if (!(vlvfb & vlvfb_mask))
continue;
/* clear our bit from vlvfb */
- vlvfb ^= mask;
+ vlvfb ^= vlvfb_mask;
/* create 64b mask to chedk to see if we should clear VLVF */
bits[word % 2] = vlvfb;
bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
- /* if promisc is enabled, PF will be present, leave VFTA */
- if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) {
- bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32));
-
- if (bits[0] || bits[1])
- goto update_vlvfb;
- goto update_vlvf;
- }
-
/* if other pools are present, just remove ourselves */
- if (bits[0] || bits[1])
+ if (bits[(VMDQ_P(0) / 32) ^ 1] ||
+ (bits[VMDQ_P(0) / 32] & pool_mask))
goto update_vlvfb;
+ /* if PF is present, leave VFTA */
+ if (bits[0] || bits[1])
+ goto update_vlvf;
+
/* if we cannot determine VLAN just remove ourselves */
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
if (!vlvf)
goto update_vlvfb;
vid = vlvf & VLAN_VID_MASK;
- mask = 1 << (vid % 32);
+ mask = BIT(vid % 32);
/* clear bit from VFTA */
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
@@ -638,6 +638,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
update_vlvf:
/* clear POOL selection enable */
IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ vlvfb = 0;
update_vlvfb:
/* clear pool bits */
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
@@ -810,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= 1 << vf_shift;
+ reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* force drop enable for all VF Rx queues */
@@ -818,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= 1 << vf_shift;
+ reg |= BIT(vf_shift);
/*
* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
* For more info take a look at ixgbe_set_vf_lpe
@@ -834,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
#endif /* CONFIG_FCOE */
if (pf_max_frame > ETH_FRAME_LEN)
- reg &= ~(1 << vf_shift);
+ reg &= ~BIT(vf_shift);
}
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
@@ -843,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
- reg |= (1 << vf_shift);
+ reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/*
@@ -887,7 +890,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
return -1;
}
- if (adapter->vfinfo[vf].pf_set_mac &&
+ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
!ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
@@ -905,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = netdev_get_num_tc(adapter->netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- int err;
if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv,
@@ -920,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
if (!vid && !add)
return 0;
- err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
- if (err)
- return err;
-
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-
- if (add)
- adapter->vfinfo[vf].vlan_count++;
- else if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
-
- return 0;
+ return ixgbe_set_vf_vlan(adapter, add, vid, vf);
}
static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
@@ -961,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives.
*/
- if (adapter->vfinfo[vf].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+ if (adapter->vfinfo[vf].spoofchk_enabled) {
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
+ }
}
err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
@@ -1318,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false);
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- adapter->vfinfo[vf].vlan_count++;
/* enable hide vlan on X550 */
if (hw->mac.type >= ixgbe_mac_X550)
@@ -1353,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
ixgbe_set_vf_vlan(adapter, true, 0, vf);
ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true);
- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
- if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
/* disable hide VLAN on X550 */
if (hw->mac.type >= ixgbe_mac_X550)
@@ -1395,7 +1381,7 @@ out:
return err;
}
-static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
{
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
@@ -1522,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int vf_target_reg = vf >> 3;
- int vf_target_shift = vf % 8;
struct ixgbe_hw *hw = &adapter->hw;
- u32 regval;
if (vf >= adapter->num_vfs)
return -EINVAL;
adapter->vfinfo[vf].spoofchk_enabled = setting;
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- regval &= ~(1 << vf_target_shift);
- regval |= (setting << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
-
- if (adapter->vfinfo[vf].vlan_count) {
- vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- regval &= ~(1 << vf_target_shift);
- regval |= (setting << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+ /* configure MAC spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
+
+ /* configure VLAN spoofing */
+ hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
+
+ /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
+ * calling set_ethertype_anti_spoofing for each VF in loop below
+ */
+ if (hw->mac.ops.set_ethertype_anti_spoofing) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETH_P_LLDP));
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ ETH_P_PAUSE));
+
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index dad925706..47e65e2f8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -44,6 +44,7 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index bf7367a08..da3d8358f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -59,8 +59,12 @@
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -75,21 +79,25 @@
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550T1 0x15D1
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
-#define IXGBE_DEV_ID_X550_VF_HV 0x1564
-#define IXGBE_DEV_ID_X550_VF 0x1565
-#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
-#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
+#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
+#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
/* VF Device IDs */
-#define IXGBE_DEV_ID_82599_VF 0x10ED
-#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -128,7 +136,7 @@
#define IXGBE_FLA_X540 IXGBE_FLA_8259X
#define IXGBE_FLA_X550 IXGBE_FLA_8259X
#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X
-#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA_X550EM_a 0x15F68
#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -143,13 +151,6 @@
#define IXGBE_GRC_X550EM_a 0x15F64
#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC)
-#define IXGBE_SRAMREL_8259X 0x10210
-#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_a 0x15F6C
-#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
-
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
@@ -375,6 +376,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
#define IXGBE_RXFECCERR0 0x051B8
#define IXGBE_LLITHRESH 0x0EC90
@@ -446,6 +449,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
@@ -543,6 +548,7 @@ struct ixgbe_thermal_sensor_data {
/* DCB registers */
#define MAX_TRAFFIC_CLASS 8
#define X540_TRAFFIC_CLASS 4
+#define DEF_TRAFFIC_CLASS 1
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
@@ -554,7 +560,6 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-
/* Security Control Registers */
#define IXGBE_SECTXCTRL 0x08800
#define IXGBE_SECTXSTAT 0x08804
@@ -693,16 +698,16 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */
#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
#define IXGBE_FCBUFF_OFFSET_SHIFT 16
-#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
-#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */
+#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */
#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
@@ -719,23 +724,23 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */
#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
-#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */
/* FCoE Receive Control */
#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */
#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
/* FCoE Redirection */
@@ -1056,15 +1061,9 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
#define IXGBE_TDPROBE 0x07F20
#define IXGBE_TXBUFCTRL 0x0C600
-#define IXGBE_TXBUFDATA0 0x0C610
-#define IXGBE_TXBUFDATA1 0x0C614
-#define IXGBE_TXBUFDATA2 0x0C618
-#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_RXBUFCTRL 0x03600
-#define IXGBE_RXBUFDATA0 0x03610
-#define IXGBE_RXBUFDATA1 0x03614
-#define IXGBE_RXBUFDATA2 0x03618
-#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
#define IXGBE_RFVAL 0x050A4
#define IXGBE_MDFTC1 0x042B8
@@ -1127,6 +1126,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_XPCSS 0x04290
#define IXGBE_MFLCN 0x04294
#define IXGBE_SERDESC 0x04298
+#define IXGBE_MAC_SGMII_BUSY 0x04298
#define IXGBE_MACS 0x0429C
#define IXGBE_AUTOC 0x042A0
#define IXGBE_LINKS 0x042A4
@@ -1203,6 +1203,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_MBINTEN 0x10000000
+#define IXGBE_RDRXCTL_MDP_EN 0x20000000
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@@ -1249,20 +1251,20 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
@@ -1309,6 +1311,7 @@ struct ixgbe_thermal_sensor_data {
/* MDIO definitions */
+#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
@@ -1740,7 +1743,7 @@ enum {
#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */
#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
@@ -1866,20 +1869,20 @@ enum {
#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
-#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
@@ -1957,7 +1960,9 @@ enum {
#define IXGBE_GSSR_PHY1_SM 0x0004
#define IXGBE_GSSR_MAC_CSR_SM 0x0008
#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
#define IXGBE_GSSR_NVM_PHY_MASK 0xF
@@ -1997,6 +2002,9 @@ enum {
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_EEPROM_CTRL_4 0x45
+#define IXGBE_EE_CTRL_4_INST_ID 0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
@@ -2111,6 +2119,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7)
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
@@ -2530,6 +2539,10 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
@@ -2620,6 +2633,20 @@ enum ixgbe_fdir_pballoc_type {
#define FW_MAX_READ_BUFFER_SIZE 1024
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
+#define FW_PHY_MGMT_REQ_CMD 0x20
+#define FW_PHY_TOKEN_REQ_CMD 0x0A
+#define FW_PHY_TOKEN_REQ_LEN 2
+#define FW_PHY_TOKEN_REQ 0
+#define FW_PHY_TOKEN_REL 1
+#define FW_PHY_TOKEN_OK 1
+#define FW_PHY_TOKEN_RETRY 0x80
+#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */
+#define FW_PHY_TOKEN_WAIT 5 /* seconds */
+#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
+#define FW_INT_PHY_REQ_CMD 0xB
+#define FW_INT_PHY_REQ_LEN 10
+#define FW_INT_PHY_REQ_READ 0
+#define FW_INT_PHY_REQ_WRITE 1
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2688,6 +2715,28 @@ struct ixgbe_hic_disable_rxen {
u16 pad3;
};
+struct ixgbe_hic_phy_token_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ u16 pad;
+};
+
+struct ixgbe_hic_internal_phy_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ __be16 address;
+ u16 rsv1;
+ __be32 write_data;
+ u16 pad;
+} __packed;
+
+struct ixgbe_hic_internal_phy_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 read_data;
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2786,15 +2835,15 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
@@ -2948,7 +2997,6 @@ union ixgbe_atr_hash_dword {
IXGBE_CAT(EEC, m), \
IXGBE_CAT(FLA, m), \
IXGBE_CAT(GRC, m), \
- IXGBE_CAT(SRAMREL, m), \
IXGBE_CAT(FACTPS, m), \
IXGBE_CAT(SWSM, m), \
IXGBE_CAT(SWFW_SYNC, m), \
@@ -2989,6 +3037,7 @@ enum ixgbe_mac_type {
ixgbe_mac_X540,
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
+ ixgbe_mac_x550em_a,
ixgbe_num_macs
};
@@ -3017,6 +3066,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_intel,
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
+ ixgbe_phy_sgmii,
ixgbe_phy_generic
};
@@ -3130,8 +3180,9 @@ struct ixgbe_bus_info {
enum ixgbe_bus_width width;
enum ixgbe_bus_type type;
- u16 func;
- u16 lan_id;
+ u8 func;
+ u8 lan_id;
+ u8 instance_id;
};
/* Flow control parameters */
@@ -3266,6 +3317,7 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*init_swfw_sync)(struct ixgbe_hw *);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@@ -3308,6 +3360,7 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
+ s32 (*setup_fc)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3323,6 +3376,8 @@ struct ixgbe_mac_operations {
s32 (*dmac_config)(struct ixgbe_hw *hw);
s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
@@ -3442,7 +3497,7 @@ struct ixgbe_mbx_stats {
};
struct ixgbe_mbx_info {
- struct ixgbe_mbx_operations ops;
+ const struct ixgbe_mbx_operations *ops;
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 usec_delay;
@@ -3475,10 +3530,10 @@ struct ixgbe_hw {
struct ixgbe_info {
enum ixgbe_mac_type mac;
s32 (*get_invariants)(struct ixgbe_hw *);
- struct ixgbe_mac_operations *mac_ops;
- struct ixgbe_eeprom_operations *eeprom_ops;
- struct ixgbe_phy_operations *phy_ops;
- struct ixgbe_mbx_operations *mbx_ops;
+ const struct ixgbe_mac_operations *mac_ops;
+ const struct ixgbe_eeprom_operations *eeprom_ops;
+ const struct ixgbe_phy_operations *phy_ops;
+ const struct ixgbe_mbx_operations *mbx_ops;
const u32 *mvals;
};
@@ -3517,14 +3572,19 @@ struct ixgbe_info {
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
+#define IXGBE_ERR_FW_RESP_INVALID -39
+#define IXGBE_ERR_TOKEN_RETRY -40
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
-#define IXGBE_FUSES0_REV_MASK (3 << 6)
+#define IXGBE_FUSES0_REV_MASK (3u << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
@@ -3532,43 +3592,54 @@ struct ixgbe_info {
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31)
+
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29)
+
+#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19)
-#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2)
-#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
#define IXGBE_KX4_LINK_CNTL_1 0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@@ -3584,12 +3655,17 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
-#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
+#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 2358c1b7d..f2b1d48a1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -747,6 +747,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function reset hardware semaphore bits for a semaphore that may
+ * have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+ /* First try to grab the semaphore but we don't need to bother
+ * looking to see whether we got the lock or not since we do
+ * the same thing regardless of whether we got the lock or not.
+ * We got the lock - we release it.
+ * We timeout trying to get the lock - we force its release.
+ */
+ ixgbe_get_swfw_sync_semaphore(hw);
+ ixgbe_release_swfw_sync_semaphore(hw);
+}
+
+/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
@@ -810,7 +829,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
return 0;
}
-static struct ixgbe_mac_operations mac_ops_X540 = {
+static const struct ixgbe_mac_operations mac_ops_X540 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_X540,
.start_hw = &ixgbe_start_hw_X540,
@@ -846,6 +865,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = NULL,
@@ -853,6 +873,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
.disable_rx_buff = &ixgbe_disable_rx_buff_generic,
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_thermal_sensor_data = NULL,
@@ -863,7 +884,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.init_params = &ixgbe_init_eeprom_params_X540,
.read = &ixgbe_read_eerd_X540,
.read_buffer = &ixgbe_read_eerd_buffer_X540,
@@ -874,7 +895,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
};
-static struct ixgbe_phy_operations phy_ops_X540 = {
+static const struct ixgbe_phy_operations phy_ops_X540 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
.init = NULL,
@@ -897,7 +918,7 @@ static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X540)
};
-struct ixgbe_info ixgbe_X540_info = {
+const struct ixgbe_info ixgbe_X540_info = {
.mac = ixgbe_mac_X540,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X540,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index a1468b1f4..e21cd4849 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -36,4 +36,5 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 68a9c6464..19b75cd98 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
+ * Copyright(c) 1999 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
#include "ixgbe_phy.h"
static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
@@ -272,16 +273,26 @@ out:
static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
+ /* Fallthrough */
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
@@ -324,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -412,6 +423,121 @@ out:
return ret;
}
+/**
+ * ixgbe_get_phy_token - Get the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REQ;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+ return IXGBE_ERR_FW_RESP_INVALID;
+
+ return IXGBE_ERR_TOKEN_RETRY;
+}
+
+/**
+ * ixgbe_put_phy_token - Put the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REL;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ return IXGBE_ERR_FW_RESP_INVALID;
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ __always_unused u32 device_type,
+ u32 data)
+{
+ struct ixgbe_hic_internal_phy_req write_cmd;
+
+ memset(&write_cmd, 0, sizeof(write_cmd));
+ write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ write_cmd.port_number = hw->bus.lan_id;
+ write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
+ write_cmd.address = cpu_to_be16(reg_addr);
+ write_cmd.write_data = cpu_to_be32(data);
+
+ return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Pointer to read data from the register
+ **/
+static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ __always_unused u32 device_type,
+ u32 *data)
+{
+ union {
+ struct ixgbe_hic_internal_phy_req cmd;
+ struct ixgbe_hic_internal_phy_resp rsp;
+ } hic;
+ s32 status;
+
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+ hic.cmd.address = cpu_to_be16(reg_addr);
+
+ status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
+
+ /* Extract the register value from the response. */
+ *data = be32_to_cpu(hic.rsp.read_data);
+
+ return status;
+}
+
/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
* command assuming that the semaphore is already obtained.
* @hw: pointer to hardware structure
@@ -436,8 +562,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
/* one word */
buffer.length = cpu_to_be16(sizeof(u16));
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
if (status)
return status;
@@ -487,7 +612,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ status = ixgbe_host_interface_command(hw, &buffer,
sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT,
false);
@@ -770,8 +895,7 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
buffer.data = data;
buffer.address = cpu_to_be32(offset * 2);
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -813,8 +937,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
buffer.req.checksum = FW_DEFAULT_CHECKSUM;
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -861,9 +984,9 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- fw_cmd.port_number = (u8)hw->bus.lan_id;
+ fw_cmd.port_number = hw->bus.lan_id;
- status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ status = ixgbe_host_interface_command(hw, &fw_cmd,
sizeof(struct ixgbe_hic_disable_rxen),
IXGBE_HI_COMMAND_TIMEOUT, true);
@@ -1248,6 +1371,117 @@ i2c_err:
}
/**
+ * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for native SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ bool setup_linear = false;
+ u32 reg_phy_int;
+ s32 rc;
+
+ /* Check if SFP module is supported and linear */
+ rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (!rc)
+ return rc;
+
+ /* Configure internal PHY for native SFI */
+ rc = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ &reg_phy_int);
+ if (rc)
+ return rc;
+
+ if (setup_linear) {
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
+ } else {
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
+ }
+
+ rc = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ reg_phy_int);
+ if (rc)
+ return rc;
+
+ /* Setup XFI/SFI internal link */
+ return ixgbe_setup_ixfi_x550em(hw, &speed);
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ u32 reg_slice, slice_offset;
+ bool setup_linear = false;
+ u16 reg_phy_ext;
+ s32 rc;
+
+ /* Check if SFP module is supported and linear */
+ rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (!rc)
+ return rc;
+
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+
+ /* Get external PHY device id */
+ rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ if (rc)
+ return rc;
+
+ /* When configuring quad port CS4223, the MAC instance is part
+ * of the slice offset.
+ */
+ if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ slice_offset = (hw->bus.lan_id +
+ (hw->bus.instance_id << 1)) << 12;
+ else
+ slice_offset = hw->bus.lan_id << 12;
+
+ /* Configure CS4227/CS4223 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+ if (setup_linear)
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
+ else
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
+ return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE,
+ reg_phy_ext);
+}
+
+/**
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -1326,6 +1560,57 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
return 0;
}
+/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ */
+static s32
+ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+
+ return rc;
+}
+
/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -1342,15 +1627,35 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
- mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+ mac->ops.setup_fc = ixgbe_setup_fc_x550em;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550a;
+ break;
+ default:
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550em;
+ break;
+ }
mac->ops.set_rate_select_speed =
ixgbe_set_soft_rate_select_speed;
break;
case ixgbe_media_type_copper:
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
mac->ops.check_link = ixgbe_check_link_t_X550em;
+ return;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+ mac->ops.setup_link = ixgbe_setup_sgmii;
break;
default:
+ mac->ops.setup_fc = ixgbe_setup_fc_x550em;
break;
}
}
@@ -1614,7 +1919,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
@@ -1636,7 +1941,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
/* Restart auto-negotiation. */
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
@@ -1653,9 +1958,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, &reg_val);
if (status)
return status;
@@ -1674,20 +1979,24 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
/* Restart auto-negotiation. */
reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, reg_val);
return status;
}
-/** ixgbe_setup_kr_x550em - Configure the KR PHY.
- * @hw: pointer to hardware structure
+/**
+ * ixgbe_setup_kr_x550em - Configure the KR PHY
+ * @hw: pointer to hardware structure
*
- * Configures the integrated KR PHY.
+ * Configures the integrated KR PHY for X550EM_x.
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
+ if (hw->mac.type != ixgbe_mac_X550EM_x)
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -1842,6 +2151,86 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
return status;
}
+/**
+ * ixgbe_setup_fc_x550em - Set up flow control
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+{
+ bool pause, asm_dir;
+ u32 reg_val;
+ s32 rc;
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ /* 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Determine PAUSE and ASM_DIR bits. */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ pause = false;
+ asm_dir = false;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause = false;
+ asm_dir = true;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ /* Fallthrough */
+ case ixgbe_fc_full:
+ pause = true;
+ asm_dir = true;
+ break;
+ default:
+ hw_err(hw, "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_KR &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L)
+ return 0;
+
+ rc = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ &reg_val);
+ if (rc)
+ return rc;
+
+ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ if (pause)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ if (asm_dir)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ rc = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ reg_val);
+
+ /* This device does not fully support AN. */
+ hw->fc.disable_fc_autoneg = true;
+
+ return rc;
+}
+
/** ixgbe_enter_lplu_x550em - Transition to low power states
* @hw: pointer to hardware structure
*
@@ -1939,6 +2328,36 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values.
+ */
+static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+ * PHY address. This register field was has only been used for X552.
+ */
+ if (!hw->phy.nw_mng_if_sel) {
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ struct ixgbe_adapter *adapter = hw->back;
+
+ e_warn(drv, "nw_mng_if_sel not set\n");
+ }
+ return;
+ }
+
+ hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1953,14 +2372,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode.
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
}
/* Identify the PHY or SFP module */
@@ -2023,16 +2439,24 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/* Detect if there is a copper PHY attached. */
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ hw->phy.type = ixgbe_phy_sgmii;
+ /* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
- media_type = ixgbe_media_type_copper;
+ media_type = ixgbe_media_type_copper;
break;
default:
media_type = ixgbe_media_type_unknown;
@@ -2080,6 +2504,27 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_set_mdio_speed - Set MDIO clock speed
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+{
+ u32 hlreg0;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ /* Config MDIO clock speed before the first MDIO PHY access */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
+ default:
+ break;
+ }
+}
+
/** ixgbe_reset_hw_X550em - Perform hardware reset
** @hw: pointer to hardware structure
**
@@ -2093,7 +2538,6 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
s32 status;
u32 ctrl = 0;
u32 i;
- u32 hlreg0;
bool link_up = false;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -2179,11 +2623,7 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
- hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- }
+ ixgbe_set_mdio_speed(hw);
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
@@ -2206,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof |= (1 << vf_target_shift);
+ pfvfspoof |= BIT(vf_target_shift);
else
- pfvfspoof &= ~(1 << vf_target_shift);
+ pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
@@ -2296,6 +2736,110 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync_X540(hw, mask);
}
+/**
+ * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared PHY token as needed
+ */
+static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status;
+
+ while (--retries) {
+ status = 0;
+ if (hmask)
+ status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+ if (status)
+ return status;
+ if (!(mask & IXGBE_GSSR_TOKEN_SM))
+ return 0;
+
+ status = ixgbe_get_phy_token(hw);
+ if (!status)
+ return 0;
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+ if (status != IXGBE_ERR_TOKEN_RETRY)
+ return status;
+ msleep(FW_PHY_TOKEN_DELAY);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Release the SWFW semaphore and puts the shared PHY token as needed
+ */
+static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+
+ if (mask & IXGBE_GSSR_TOKEN_SM)
+ ixgbe_put_phy_token(hw);
+
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+}
+
+/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ */
+static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ */
+static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -2337,12 +2881,10 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
.init_thermal_sensor_thresh = NULL, \
- .prot_autoc_read = &prot_autoc_read_generic, \
- .prot_autoc_write = &prot_autoc_write_generic, \
.enable_rx = &ixgbe_enable_rx_generic, \
.disable_rx = &ixgbe_disable_rx_x550, \
-static struct ixgbe_mac_operations mac_ops_X550 = {
+static const struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC
.reset_hw = &ixgbe_reset_hw_X540,
.get_media_type = &ixgbe_get_media_type_X540,
@@ -2354,20 +2896,45 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
.setup_sfp = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
};
-static struct ixgbe_mac_operations mac_ops_X550EM_x = {
+static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
X550_COMMON_MAC
.reset_hw = &ixgbe_reset_hw_X550em,
.get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
.get_wwn_prefix = NULL,
- .setup_link = NULL, /* defined later */
+ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
.get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
.release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .setup_fc = NULL, /* defined later */
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
+};
+
+static struct ixgbe_mac_operations mac_ops_x550em_a = {
+ X550_COMMON_MAC
+ .reset_hw = ixgbe_reset_hw_X550em,
+ .get_media_type = ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = NULL, /* defined later */
+ .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = ixgbe_get_bus_info_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
+ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
+ .setup_fc = ixgbe_setup_fc_x550em,
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
#define X550_COMMON_EEP \
@@ -2379,12 +2946,12 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
-static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
.init_params = &ixgbe_init_eeprom_params_X550,
};
-static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
X550_COMMON_EEP
.init_params = &ixgbe_init_eeprom_params_X540,
};
@@ -2398,23 +2965,25 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
- .read_reg = &ixgbe_read_phy_reg_generic, \
- .write_reg = &ixgbe_write_phy_reg_generic, \
.setup_link = &ixgbe_setup_phy_link_generic, \
.set_phy_power = NULL, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
-static struct ixgbe_phy_operations phy_ops_X550 = {
+static const struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
};
-static struct ixgbe_phy_operations phy_ops_X550EM_x = {
+static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
.read_i2c_combined = &ixgbe_read_i2c_combined_generic,
.write_i2c_combined = &ixgbe_write_i2c_combined_generic,
.read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -2422,6 +2991,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = {
&ixgbe_write_i2c_combined_generic_unlocked,
};
+static const struct ixgbe_phy_operations phy_ops_x550em_a = {
+ X550_COMMON_PHY
+ .init = &ixgbe_init_phy_ops_X550em,
+ .identify = &ixgbe_identify_phy_x550em,
+ .read_reg = &ixgbe_read_phy_reg_x550a,
+ .write_reg = &ixgbe_write_phy_reg_x550a,
+};
+
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550)
};
@@ -2430,7 +3007,11 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-struct ixgbe_info ixgbe_X550_info = {
+static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550EM_a)
+};
+
+const struct ixgbe_info ixgbe_X550_info = {
.mac = ixgbe_mac_X550,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X550,
@@ -2440,7 +3021,7 @@ struct ixgbe_info ixgbe_X550_info = {
.mvals = ixgbe_mvals_X550,
};
-struct ixgbe_info ixgbe_X550EM_x_info = {
+const struct ixgbe_info ixgbe_X550EM_x_info = {
.mac = ixgbe_mac_X550EM_x,
.get_invariants = &ixgbe_get_invariants_X550_x,
.mac_ops = &mac_ops_X550EM_x,
@@ -2449,3 +3030,13 @@ struct ixgbe_info ixgbe_X550EM_x_info = {
.mbx_ops = &mbx_ops_generic,
.mvals = ixgbe_mvals_X550EM_x,
};
+
+const struct ixgbe_info ixgbe_x550em_a_info = {
+ .mac = ixgbe_mac_x550em_a,
+ .get_invariants = &ixgbe_get_invariants_X550_x,
+ .mac_ops = &mac_ops_x550em_a,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_x550em_a,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 58434584b..ae09d60e7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,6 +33,11 @@
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -74,7 +79,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -296,16 +301,16 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
-
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index d7aa4b203..508e72c5f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -42,65 +42,54 @@
#define IXGBE_ALL_RAR_ENTRIES 16
+enum {NETDEV_STATS, IXGBEVF_STATS};
+
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
- struct {
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
- };
+ int type;
+ int sizeof_stat;
+ int stat_offset;
};
-#define IXGBEVF_STAT(m, b, r) { \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
- .stat_offset = offsetof(struct ixgbevf_adapter, m), \
- .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
- .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+#define IXGBEVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .type = IXGBEVF_STATS, \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
}
-#define IXGBEVF_ZSTAT(m) { \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
- .stat_offset = offsetof(struct ixgbevf_adapter, m), \
- .base_stat_offset = -1, \
- .saved_reset_offset = -1 \
+#define IXGBEVF_NETDEV_STAT(_net_stat) { \
+ .stat_string = #_net_stat, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
}
-static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
- {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
- stats.saved_reset_vfgprc)},
- {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
- stats.saved_reset_vfgptc)},
- {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
- stats.saved_reset_vfgorc)},
- {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
- stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
- {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
- {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
- {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
- stats.saved_reset_vfmprc)},
- {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
-#ifdef BP_EXTENDED_STATS
- {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
- {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
- {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
- {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
- {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
- {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
-#endif
+static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
+ IXGBEVF_NETDEV_STAT(rx_packets),
+ IXGBEVF_NETDEV_STAT(tx_packets),
+ IXGBEVF_NETDEV_STAT(rx_bytes),
+ IXGBEVF_NETDEV_STAT(tx_bytes),
+ IXGBEVF_STAT("tx_busy", tx_busy),
+ IXGBEVF_STAT("tx_restart_queue", restart_queue),
+ IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
+ IXGBEVF_NETDEV_STAT(multicast),
+ IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
};
-#define IXGBE_QUEUE_STATS_LEN 0
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBEVF_QUEUE_STATS_LEN ( \
+ (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
+ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+ (sizeof(struct ixgbe_stats) / sizeof(u64)))
+#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
-#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
-#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
memset(p, 0, regs_len);
- regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+ /* generate a number suitable for ethtool's register version */
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
@@ -392,13 +382,13 @@ clear_reset:
return err;
}
-static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
{
switch (stringset) {
case ETH_SS_TEST:
- return IXGBE_TEST_LEN;
+ return IXGBEVF_TEST_LEN;
case ETH_SS_STATS:
- return IXGBE_GLOBAL_STATS_LEN;
+ return IXGBEVF_STATS_LEN;
default:
return -EINVAL;
}
@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- char *base = (char *)adapter;
- int i;
-#ifdef BP_EXTENDED_STATS
- u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
- tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *net_stats;
+ unsigned int start;
+ struct ixgbevf_ring *ring;
+ int i, j;
+ char *p;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_yields += adapter->rx_ring[i]->stats.yields;
- rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
- rx_yields += adapter->rx_ring[i]->stats.yields;
- }
+ ixgbevf_update_stats(adapter);
+ net_stats = dev_get_stats(netdev, &temp);
+ for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+ switch (ixgbevf_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *)net_stats +
+ ixgbevf_gstrings_stats[i].stat_offset;
+ break;
+ case IXGBEVF_STATS:
+ p = (char *)adapter +
+ ixgbevf_gstrings_stats[i].stat_offset;
+ break;
+ default:
+ data[i] = 0;
+ continue;
+ }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_yields += adapter->tx_ring[i]->stats.yields;
- tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
- tx_yields += adapter->tx_ring[i]->stats.yields;
+ data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- adapter->bp_rx_yields = rx_yields;
- adapter->bp_rx_cleaned = rx_cleaned;
- adapter->bp_rx_missed = rx_missed;
+ /* populate Tx queue data */
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ ring = adapter->tx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
+ continue;
+ }
- adapter->bp_tx_yields = tx_yields;
- adapter->bp_tx_cleaned = tx_cleaned;
- adapter->bp_tx_missed = tx_missed;
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
#endif
+ }
- ixgbevf_update_stats(adapter);
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = base + ixgbe_gstrings_stats[i].stat_offset;
- char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
-
- if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
- if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
- data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
- else
- data[i] = *(u64 *)p;
- } else {
- if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
- data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
- else
- data[i] = *(u32 *)p;
+ /* populate Rx queue data */
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
+ continue;
}
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
+#endif
}
}
static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+ }
break;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 991eeae81..d5944c391 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -166,10 +166,10 @@ struct ixgbevf_ring {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_CSUM BIT(0)
+#define IXGBE_TX_FLAGS_VLAN BIT(1)
+#define IXGBE_TX_FLAGS_TSO BIT(2)
+#define IXGBE_TX_FLAGS_IPV4 BIT(3)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -403,13 +403,6 @@ struct ixgbevf_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- /* Some features need tri-state capability,
- * thus the additional *_CAPABLE flags.
- */
- u32 flags;
-#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
-#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
-
struct msix_entry *msix_entries;
/* OS defined structs */
@@ -429,16 +422,6 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count;
unsigned int rx_ring_count;
-#ifdef BP_EXTENDED_STATS
- u64 bp_rx_yields;
- u64 bp_rx_cleaned;
- u64 bp_rx_missed;
-
- u64 bp_tx_yields;
- u64 bp_tx_cleaned;
- u64 bp_tx_missed;
-#endif
-
u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed;
bool link_up;
@@ -461,13 +444,19 @@ enum ixbgevf_state_t {
__IXGBEVF_REMOVING,
__IXGBEVF_SERVICE_SCHED,
__IXGBEVF_SERVICE_INITED,
+ __IXGBEVF_RESET_REQUESTED,
+ __IXGBEVF_QUEUE_RESET_REQUESTED,
};
enum ixgbevf_boards {
board_82599_vf,
+ board_82599_vf_hv,
board_X540_vf,
+ board_X540_vf_hv,
board_X550_vf,
+ board_X550_vf_hv,
board_X550EM_x_vf,
+ board_X550EM_x_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -482,6 +471,12 @@ extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
+extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index b0edae94d..acc24010c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -62,10 +62,14 @@ static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
- [board_82599_vf] = &ixgbevf_82599_vf_info,
- [board_X540_vf] = &ixgbevf_X540_vf_info,
- [board_X550_vf] = &ixgbevf_X550_vf_info,
- [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_82599_vf] = &ixgbevf_82599_vf_info,
+ [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
+ [board_X540_vf] = &ixgbevf_X540_vf_info,
+ [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
+ [board_X550_vf] = &ixgbevf_X550_vf_info,
+ [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
+ [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
*/
static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
/* required last entry */
{0, }
};
@@ -268,7 +276,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
{
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
ixgbevf_service_event_schedule(adapter);
}
}
@@ -288,9 +296,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev)
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: board private structure
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
- struct ixgbevf_ring *tx_ring)
+ struct ixgbevf_ring *tx_ring, int napi_budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_tx_buffer *tx_buffer;
@@ -328,7 +337,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_kfree_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1013,8 +1022,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
int per_ring_budget, work_done = 0;
bool clean_complete = true;
- ixgbevf_for_each_ring(ring, q_vector->tx)
- clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+ ixgbevf_for_each_ring(ring, q_vector->tx) {
+ if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
if (budget <= 0)
return budget;
@@ -1035,7 +1046,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned;
- clean_complete &= (cleaned < per_ring_budget);
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1052,7 +1064,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
- 1 << q_vector->v_idx);
+ BIT(q_vector->v_idx));
return 0;
}
@@ -1154,14 +1166,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
}
/* add q_vector eims value to global eims_enable_mask */
- adapter->eims_enable_mask |= 1 << v_idx;
+ adapter->eims_enable_mask |= BIT(v_idx);
ixgbevf_write_eitr(q_vector);
}
ixgbevf_set_ivar(adapter, -1, 1, v_idx);
/* setup eims_other and add value to global eims_enable_mask */
- adapter->eims_other = 1 << v_idx;
+ adapter->eims_other = BIT(v_idx);
adapter->eims_enable_mask |= adapter->eims_other;
}
@@ -1585,8 +1597,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (8 << 16); /* WTHRESH = 8 */
/* Setting PTHRESH to 32 both improves performance */
- txdctl |= (1 << 8) | /* HTHRESH = 1 */
- 32; /* PTHRESH = 32 */
+ txdctl |= (1u << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
@@ -1642,7 +1654,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
IXGBE_PSRTYPE_L2HDR;
if (adapter->num_rx_queues > 1)
- psrtype |= 1 << 29;
+ psrtype |= BIT(29);
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
}
@@ -1748,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_rx_desc));
+#ifndef CONFIG_SPARC
/* enable relaxed ordering */
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+#else
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+ IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_WRO_EN);
+#endif
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
@@ -1791,7 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
ixgbevf_setup_vfmrqc(adapter);
/* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
@@ -1904,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
spin_lock_bh(&adapter->mbx_lock);
- hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+ hw->mac.ops.update_xcast_mode(hw, xcast_mode);
/* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -1984,7 +2002,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
hw->mbx.timeout = 0;
/* wait for watchdog to come around and bail us out */
- adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+ set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
}
return 0;
@@ -2052,7 +2070,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) {
- err = ixgbevf_negotiate_api_version(hw, api[idx]);
+ err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
if (!err)
break;
idx++;
@@ -2749,11 +2767,9 @@ static void ixgbevf_service_timer(unsigned long data)
static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
{
- if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
return;
- adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
-
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -2795,7 +2811,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
- eics |= 1 << i;
+ eics |= BIT(i);
}
/* Cause software interrupt to ensure rings are cleaned */
@@ -2821,7 +2837,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
/* if check for link returns error we will need to reset */
if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
- adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
link_up = false;
}
@@ -3222,11 +3238,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
- if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
+ &adapter->state))
return;
- adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
-
/* if interface is down do nothing */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -3271,9 +3286,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3286,49 +3310,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
- /* update GSO size and bytecount with header size */
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+
+ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 1 as index for TSO */
- mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+ mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
@@ -3337,76 +3365,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
+static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 l4_hdr = 0;
- __be16 frag_off;
-
- switch (first->protocol) {
- case htons(ETH_P_IP):
- vlan_macip_lens |= skb_network_header_len(skb);
- type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
- break;
- case htons(ETH_P_IPV6):
- vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
- if (likely(skb_network_header_len(skb) ==
- sizeof(struct ipv6hdr)))
- break;
- ipv6_skip_exthdr(skb, skb_network_offset(skb) +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- l4_hdr = NEXTHDR_FRAGMENT;
- break;
- default:
- break;
- }
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto no_csum;
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = tcp_hdrlen(skb) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ ixgbevf_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum, l3 proto=%x, l4 proto=%x\n",
- first->protocol, l4_hdr);
- }
- skb_checksum_help(skb);
- goto no_csum;
}
-
- /* update TX checksum flag */
- first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto no_csum;
}
-
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
- type_tucmd, mss_l4len_idx);
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
}
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
@@ -3442,7 +3449,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
/* use index 1 context for TSO/FSO/FCOE */
if (tx_flags & IXGBE_TX_FLAGS_TSO)
- olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+ olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
@@ -3747,7 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
/* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, max_frame);
+ hw->mac.ops.set_rlpml(hw, max_frame);
return 0;
}
@@ -3890,6 +3897,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
+#define IXGBEVF_MAX_MAC_HDR_LEN 127
+#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -3908,7 +3949,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = ixgbevf_features_check,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -4013,26 +4054,37 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
- netdev->vlan_features |= NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_SG;
+ netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+ IXGBEVF_GSO_PARTIAL_FEATURES;
+
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
+
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
if (IXGBE_REMOVED(hw->hw_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index dc68fea48..2819abc45 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
@@ -111,7 +111,7 @@ out:
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
@@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.check_for_rst = ixgbevf_check_for_rst_vf,
};
+/* Mailbox operations when running on Hyper-V.
+ * On Hyper-V, PF/VF communication is not through the
+ * hardware mailbox; this communication is through
+ * a software mediated path.
+ * Most mail box operations are noop while running on
+ * Hyper-V.
+ */
+const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 4d613a4f2..e670d3b19 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -27,6 +27,12 @@
#include "vf.h"
#include "ixgbevf.h"
+/* On Hyper-V, to reset, we need to read from this offset
+ * from the PCI config space. This is the mechanism used on
+ * Hyper-V to support PF/VF communication.
+ */
+#define IXGBE_HV_RESET_OFFSET 0x201
+
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
@@ -126,6 +132,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
}
/**
+ * Hyper-V variant; the VF/PF communication is through the PCI
+ * config space.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
+ struct ixgbevf_adapter *adapter = hw->back;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ pci_read_config_byte(adapter->pdev,
+ (i + IXGBE_HV_RESET_OFFSET),
+ &hw->mac.perm_addr[i]);
+ return 0;
+#else
+ pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
+ return -EOPNOTSUPP;
+#endif
+}
+
+/**
* ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
@@ -258,6 +285,11 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
return ret_val;
}
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ return -EOPNOTSUPP;
+}
+
/**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
* @adapter: pointer to the port handle
@@ -416,6 +448,26 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
return ret_val;
}
+/**
+ * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ *
+ * We don't really allow setting the device MAC address. However,
+ * if the address being set is the permanent MAC address we will
+ * permit that.
+ **/
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ if (ether_addr_equal(addr, hw->mac.perm_addr))
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
u32 *msg, u16 size)
{
@@ -473,15 +525,22 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_update_xcast_mode - Update Multicast mode
* @hw: pointer to the HW structure
- * @netdev: pointer to net device structure
* @xcast_mode: new multicast mode
*
* Updates the Multicast Mode of VF.
**/
-static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
- struct net_device *netdev, int xcast_mode)
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
@@ -513,6 +572,14 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -551,6 +618,15 @@ mbx_err:
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: Unused in this implementation
@@ -656,11 +732,72 @@ out:
}
/**
- * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ udelay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
@@ -670,11 +807,30 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
}
/**
- * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 reg;
+
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+ /* CRC == 4 */
+ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+}
+
+/**
+ * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
* @hw: pointer to the HW structure
* @api: integer containing requested API version
**/
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
{
int err;
u32 msg[3];
@@ -703,6 +859,21 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
return err;
}
+/**
+ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+ /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+ if (api != ixgbe_mbox_api_10)
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ return 0;
+}
+
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc)
{
@@ -769,11 +940,30 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
+ .set_rlpml = ixgbevf_set_rlpml_vf,
+};
+
+static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_hv_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_hv_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
+ .set_rar = ixgbevf_hv_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
+ .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+ .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
+ .set_vfta = ixgbevf_hv_set_vfta_vf,
+ .set_rlpml = ixgbevf_hv_set_rlpml_vf,
};
const struct ixgbevf_info ixgbevf_82599_vf_info = {
@@ -781,17 +971,37 @@ const struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
+ .mac = ixgbe_mac_X540_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X550_vf_info = {
.mac = ixgbe_mac_X550_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
+ .mac = ixgbe_mac_X550_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
.mac = ixgbe_mac_X550EM_x_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
+ .mac = ixgbe_mac_X550EM_x_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index ef9f7736b..2cac610f3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -51,6 +51,7 @@ struct ixgbe_mac_operations {
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
+ s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -63,11 +64,12 @@ struct ixgbe_mac_operations {
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ void (*set_rlpml)(struct ixgbe_hw *, u16);
};
enum ixgbe_mac_type {
@@ -207,8 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 3ddf657bc..836ebd8ee 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -222,7 +222,7 @@ jme_clear_ghc_reset(struct jme_adapter *jme)
jwrite32f(jme, JME_GHC, jme->reg_ghc);
}
-static inline void
+static void
jme_reset_mac_processor(struct jme_adapter *jme)
{
static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d74f5f4e5..1799fe141 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -152,7 +152,7 @@ static inline void korina_abort_dma(struct net_device *dev,
writel(0x10, &ch->dmac);
while (!(readl(&ch->dmas) & DMA_STAT_HALT))
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
writel(0, &ch->dmas);
}
@@ -283,7 +283,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
}
dma_cache_wback((u32) td, sizeof(*td));
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
@@ -622,7 +622,7 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
&(lp->tx_dma_regs->dmandptr));
lp->tx_chain_status = desc_empty;
lp->tx_chain_head = lp->tx_chain_tail;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
if (dmas & DMA_STAT_ERR)
printk(KERN_ERR "%s: DMA error\n", dev->name);
@@ -811,7 +811,7 @@ static int korina_init(struct net_device *dev)
/* reset ethernet logic */
writel(0, &lp->eth_regs->ethintfc);
while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* Enable Ethernet Interface */
writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index b630ef1e9..dc82b1b19 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -519,7 +519,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
byte_offset = CPHYSADDR(skb->data) % 16;
ch->skb[ch->dma.desc] = skb;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_lock_irqsave(&priv->lock, flags);
desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
@@ -657,7 +657,7 @@ ltq_etop_tx_timeout(struct net_device *dev)
err = ltq_etop_hw_init(dev);
if (err)
goto err_hw;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
return;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a6d26d351..f92018b13 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -244,7 +244,7 @@
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 1
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
return 0;
err_free_irq:
+ unregister_cpu_notifier(&pp->cpu_notifier);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index c442f6ad1..54d5154ac 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -286,12 +286,12 @@ static int pxa168_eth_stop(struct net_device *dev);
static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
{
- return readl(pep->base + offset);
+ return readl_relaxed(pep->base + offset);
}
static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
{
- writel(data, pep->base + offset);
+ writel_relaxed(data, pep->base + offset);
}
static void abort_dma(struct pxa168_eth_private *pep)
@@ -342,9 +342,9 @@ static void rxq_refill(struct net_device *dev)
pep->rx_skb[used_rx_desc] = skb;
/* Return the descriptor to DMA ownership */
- wmb();
+ dma_wmb();
p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
- wmb();
+ dma_wmb();
/* Move the used descriptor pointer to the next descriptor */
pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
@@ -794,7 +794,7 @@ static int rxq_process(struct net_device *dev, int budget)
rx_used_desc = pep->rx_used_desc_q;
rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
cmd_sts = rx_desc->cmd_sts;
- rmb();
+ dma_rmb();
if (cmd_sts & (BUF_OWNED_BY_DMA))
break;
skb = pep->rx_skb[rx_curr_desc];
@@ -981,8 +981,6 @@ static int pxa168_init_phy(struct net_device *dev)
pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
if (IS_ERR(pep->phy))
return PTR_ERR(pep->phy);
- if (!pep->phy)
- return -ENODEV;
err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
pep->phy_intf);
@@ -1289,7 +1287,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
- wmb();
+ dma_wmb();
desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
wmb();
@@ -1297,7 +1295,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += length;
stats->tx_packets++;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
/* We handled the current skb, but now we are out of space.*/
netif_stop_queue(dev);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ec0a22119..467138b42 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2418,7 +2418,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write32(hw, B0_IMSK, 0);
sky2_read32(hw, B0_IMSK);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
napi_disable(&hw->napi);
netif_tx_disable(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e0b68afea..d1cdc2d76 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (mac->phy_dev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex)
+ if (mac->phy_dev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
- mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+ if (mac->phy_dev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (mac->phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= MAC_MCR_FORCE_TX_FC;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+ netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+ flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+ flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ }
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
u32 val, ge_mode;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(mac->of_node))
+ if (!of_phy_register_fixed_link(mac->of_node))
+ np = of_node_get(mac->of_node);
if (!np)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
ge_mode = 0;
break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
- mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+ mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
return 0;
err_free_bus:
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
err_put_node:
of_node_put(mii_np);
@@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -453,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
- dma_addr_t phy_ring_head, phy_ring_tail;
+ dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dev,
cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
+ &eth->phy_scratch_ring,
GFP_ATOMIC | __GFP_ZERO);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
dma_addr = dma_map_single(eth->dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
@@ -474,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
return -ENOMEM;
memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head +
+ phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
for (i = 0; i < cnt; i++) {
eth->scratch_ring[i].txd1 =
(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
+ eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
((i + 1) * sizeof(struct mtk_tx_dma)));
eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
}
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -536,7 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_buf *tx_buf;
- unsigned long flags;
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
@@ -568,11 +598,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
return -ENOMEM;
- /* normally we can rely on the stack not calling this more than once,
- * however we have 2 queues running ont he same ring so we need to lock
- * the ring access
- */
- spin_lock_irqsave(&eth->page_lock, flags);
WRITE_ONCE(itxd->txd1, mapped_addr);
tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -609,8 +634,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd1, mapped_addr);
WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0) |
- mac->id);
+ last_frag * TX_DMA_LS0));
WRITE_ONCE(txd->txd4, 0);
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
@@ -632,8 +656,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
(!nr_frags * TX_DMA_LS0)));
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
netdev_sent_queue(dev, skb->len);
skb_tx_timestamp(skb);
@@ -652,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf);
@@ -661,8 +683,6 @@ err_dma:
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
} while (itxd != txd);
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
return -ENOMEM;
}
@@ -681,7 +701,43 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
nfrags += skb_shinfo(skb)->nr_frags;
}
- return DIV_ROUND_UP(nfrags, 2);
+ return nfrags;
+}
+
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_wake_queue(eth->netdev[i]);
+ }
+}
+
+static void mtk_stop_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_stop_queue(eth->netdev[i]);
+ }
}
static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -690,14 +746,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mtk_eth *eth = mac->hw;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct net_device_stats *stats = &dev->stats;
+ unsigned long flags;
bool gso = false;
int tx_num;
+ /* normally we can rely on the stack not calling this more than once,
+ * however we have 2 queues running on the same ring so we need to lock
+ * the ring access
+ */
+ spin_lock_irqsave(&eth->page_lock, flags);
+
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- netif_stop_queue(dev);
+ mtk_stop_queue(eth);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
+ spin_unlock_irqrestore(&eth->page_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -719,16 +783,15 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
goto drop;
- if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
- netif_stop_queue(dev);
- if (unlikely(atomic_read(&ring->free_count) >
- ring->thresh))
- netif_wake_queue(dev);
- }
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
+ mtk_stop_queue(eth);
+
+ spin_unlock_irqrestore(&eth->page_lock, flags);
return NETDEV_TX_OK;
drop:
+ spin_unlock_irqrestore(&eth->page_lock, flags);
stats->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -777,6 +840,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
goto release_desc;
}
@@ -784,6 +848,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
put_page(virt_to_head_page(new_data));
+ netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@@ -872,7 +937,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
}
mtk_tx_unmap(eth->dev, tx_buf);
- ring->last_free->txd2 = next_cpu;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -897,13 +961,9 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
if (!total)
return 0;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i] ||
- unlikely(!netif_queue_stopped(eth->netdev[i])))
- continue;
- if (atomic_read(&ring->free_count) > ring->thresh)
- netif_wake_queue(eth->netdev[i]);
- }
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
+ mtk_wake_queue(eth);
return total;
}
@@ -983,9 +1043,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
- ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
- MAX_SKB_FRAGS);
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
* continue
@@ -1163,6 +1222,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
+ if (eth->scratch_ring) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
mtk_tx_clean(eth);
mtk_rx_clean(eth);
kfree(eth->scratch_head);
@@ -1176,7 +1243,7 @@ static void mtk_tx_timeout(struct net_device *dev)
eth->netdev[mac->id]->stats.tx_errors++;
netif_err(eth, tx_err, dev,
"transmit timed out\n");
- schedule_work(&mac->pending_work);
+ schedule_work(&eth->pending_work);
}
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
@@ -1225,7 +1292,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
- MTK_RX_BT_32DWORDS,
+ MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
return 0;
@@ -1339,7 +1406,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
@@ -1413,19 +1480,30 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void mtk_pending_work(struct work_struct *work)
{
- struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
- struct mtk_eth *eth = mac->hw;
- struct net_device *dev = eth->netdev[mac->id];
- int err;
+ struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+ int err, i;
+ unsigned long restart = 0;
rtnl_lock();
- mtk_stop(dev);
- err = mtk_open(dev);
- if (err) {
- netif_alert(eth, ifup, dev,
- "Driver up/down cycle failed, closing device.\n");
- dev_close(dev);
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ __set_bit(i, &restart);
+ }
+
+ /* restart DMA and enable IRQs */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!test_bit(i, &restart))
+ continue;
+ err = mtk_open(eth->netdev[i]);
+ if (err) {
+ netif_alert(eth, ifup, eth->netdev[i],
+ "Driver up/down cycle failed, closing device.\n");
+ dev_close(eth->netdev[i]);
+ }
}
rtnl_unlock();
}
@@ -1435,15 +1513,13 @@ static int mtk_cleanup(struct mtk_eth *eth)
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
- struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
if (!eth->netdev[i])
continue;
unregister_netdev(eth->netdev[i]);
free_netdev(eth->netdev[i]);
- cancel_work_sync(&mac->pending_work);
}
+ cancel_work_sync(&eth->pending_work);
return 0;
}
@@ -1631,7 +1707,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->id = id;
mac->hw = eth;
mac->of_node = np;
- INIT_WORK(&mac->pending_work, mtk_pending_work);
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
@@ -1645,6 +1720,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
@@ -1678,10 +1754,6 @@ static int mtk_probe(struct platform_device *pdev)
struct mtk_eth *eth;
int err;
- err = device_reset(&pdev->dev);
- if (err)
- return err;
-
match = of_match_device(of_mtk_match, &pdev->dev);
soc = (struct mtk_soc_data *)match->data;
@@ -1736,6 +1808,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->dev = &pdev->dev;
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+ INIT_WORK(&eth->pending_work, mtk_pending_work);
err = mtk_hw_init(eth);
if (err)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 48a5292c8..a5eb7c623 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -91,6 +91,7 @@
#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
#define MTK_DMA_SIZE_16DWORDS (2 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
@@ -357,12 +358,14 @@ struct mtk_rx_ring {
* @rx_ring: Pointer to the memore holding info about the RX ring
* @rx_napi: The NAPI struct
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
* @clk_ethif: The ethif clock
* @clk_esw: The switch clock
* @clk_gp1: The gmac1 clock
* @clk_gp2: The gmac2 clock
* @mii_bus: If there is a bus we need to create an instance for it
+ * @pending_work: The workqueue used to reset the dma ring
*/
struct mtk_eth {
@@ -383,12 +386,14 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clk_ethif;
struct clk *clk_esw;
struct clk *clk_gp1;
struct clk *clk_gp2;
struct mii_bus *mii_bus;
+ struct work_struct pending_work;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
@@ -398,7 +403,6 @@ struct mtk_eth {
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
* @phy_dev: The attached PHY if available
- * @pending_work: The workqueue used to reset the dma ring
*/
struct mtk_mac {
int id;
@@ -406,7 +410,6 @@ struct mtk_mac {
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
struct phy_device *phy_dev;
- struct work_struct pending_work;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 0c51c69f8..249a45844 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -576,41 +576,48 @@ out:
return res;
}
-/*
- * Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
- */
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp)
+static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
+ struct mlx4_buf *buf, gfp_t gfp)
{
dma_addr_t t;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
- size, &t, gfp);
- if (!buf->direct.buf)
- return -ENOMEM;
+ buf->nbufs = 1;
+ buf->npages = 1;
+ buf->page_shift = get_order(size) + PAGE_SHIFT;
+ buf->direct.buf =
+ dma_zalloc_coherent(&dev->persist->pdev->dev,
+ size, &t, gfp);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- buf->direct.map = t;
+ buf->direct.map = t;
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
- memset(buf->direct.buf, 0, size);
+ return 0;
+}
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+ struct mlx4_buf *buf, gfp_t gfp)
+{
+ if (size <= max_direct) {
+ return mlx4_buf_direct_alloc(dev, size, buf, gfp);
} else {
+ dma_addr_t t;
int i;
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
+ buf->direct.buf = NULL;
+ buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
gfp);
@@ -619,28 +626,12 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
- dma_alloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE,
- &t, gfp);
+ dma_zalloc_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE, &t, gfp);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
-
- memset(buf->page_list[i].buf, 0, PAGE_SIZE);
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
}
}
@@ -655,15 +646,11 @@ EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
+ if (buf->nbufs == 1) {
dma_free_coherent(&dev->persist->pdev->dev, size,
- buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
+ buf->direct.buf, buf->direct.map);
+ } else {
+ int i;
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
@@ -789,7 +776,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
EXPORT_SYMBOL_GPL(mlx4_db_free);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
- int size, int max_direct)
+ int size)
{
int err;
@@ -799,7 +786,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
*wqres->db.db = 0;
- err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
if (err)
goto err_db;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e94ca1c3f..f04a423ff 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
- spin_lock_init(&priv->cmd.context_lock);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index af975a2b7..132cea655 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -73,22 +73,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
*/
set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
- cq->buf_size, 2 * PAGE_SIZE);
+ cq->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_cq;
- err = mlx4_en_map_buffer(&cq->wqres.buf);
- if (err)
- goto err_res;
-
cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
*pcq = cq;
return 0;
-err_res:
- mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
err_cq:
kfree(cq);
*pcq = NULL;
@@ -177,7 +171,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq = *pcq;
- mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
cq->is_tx == RX)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c761194bb..44cf16d01 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- data[index++] = ((unsigned long *)&priv->stats)[i];
+ data[index++] = ((unsigned long *)&dev->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
@@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
@@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size == priv->tx_ring[0]->size)
return 0;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_size = tx_size;
+ new_prof.rx_ring_size = rx_size;
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->prof->tx_ring_size = tx_size;
- priv->prof->rx_ring_size = rx_size;
+ mlx4_en_safe_replace_resources(priv, tmp);
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
@@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
@@ -1714,6 +1719,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
@@ -1723,23 +1730,26 @@ static int mlx4_en_set_channels(struct net_device *dev,
!channel->tx_count || !channel->rx_count)
return -EINVAL;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_tx_rings_p_up = channel->tx_count;
+ new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.rx_ring_num = channel->rx_count;
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->num_tx_rings_p_up = channel->tx_count;
- priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
- priv->rx_ring_num = channel->rx_count;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
+ mlx4_en_safe_replace_resources(priv, tmp);
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -1757,8 +1767,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b4b258c8c..8359e9e51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -406,14 +406,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
+ if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
+ goto out;
+ }
}
- if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
- en_dbg(HW, priv, "failed adding vlan %d\n", vid);
- mutex_unlock(&mdev->state_lock);
+ err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+ if (err)
+ en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
- return 0;
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@@ -421,7 +425,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@ -438,7 +442,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
}
mutex_unlock(&mdev->state_lock);
- return 0;
+ return err;
}
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@@ -1296,15 +1300,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
}
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
- memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
- return &priv->ret_stats;
+ return stats;
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1856,6 +1861,7 @@ static void mlx4_en_restart(struct work_struct *work)
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+ rtnl_lock();
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev, 1);
@@ -1863,6 +1869,7 @@ static void mlx4_en_restart(struct work_struct *work)
en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
+ rtnl_unlock();
}
static void mlx4_en_clear_stats(struct net_device *dev)
@@ -1874,7 +1881,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
- memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1890,6 +1896,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
+ priv->tx_ring[i]->tx_dropped = 0;
+ priv->tx_ring[i]->queue_stopped = 0;
+ priv->tx_ring[i]->wake_queue = 0;
+ priv->tx_ring[i]->tso_packets = 0;
+ priv->tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@@ -1943,7 +1954,7 @@ static int mlx4_en_close(struct net_device *dev)
return 0;
}
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
@@ -1968,7 +1979,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
}
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
@@ -2025,11 +2036,91 @@ err:
return -ENOMEM;
}
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+ rtnl_lock();
+ netif_device_detach(dev);
+ mlx4_en_close(dev);
+ rtnl_unlock();
+}
+
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src,
+ struct mlx4_en_port_profile *prof)
+{
+ memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+ dst->tx_ring_num = prof->tx_ring_num;
+ dst->rx_ring_num = prof->rx_ring_num;
+ dst->flags = prof->flags;
+ dst->mdev = src->mdev;
+ dst->port = src->port;
+ dst->dev = src->dev;
+ dst->prof = prof;
+ dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+ dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_ring)
+ return -ENOMEM;
+
+ dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_cq) {
+ kfree(dst->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src)
+{
+ memcpy(dst->rx_ring, src->rx_ring,
+ sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+ memcpy(dst->rx_cq, src->rx_cq,
+ sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+ memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->tx_ring_num = src->tx_ring_num;
+ dst->rx_ring_num = src->rx_ring_num;
+ dst->tx_ring = src->tx_ring;
+ dst->tx_cq = src->tx_cq;
+ memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof)
+{
+ mlx4_en_copy_priv(tmp, priv, prof);
+
+ if (mlx4_en_alloc_resources(tmp)) {
+ en_warn(priv,
+ "%s: Resource allocation failed, using previous configuration\n",
+ __func__);
+ kfree(tmp->tx_ring);
+ kfree(tmp->tx_cq);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp)
+{
+ mlx4_en_free_resources(priv);
+ mlx4_en_update_priv(priv, tmp);
+}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ bool shutdown = mdev->dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@ -2037,7 +2128,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
- unregister_netdev(dev);
+ if (shutdown)
+ mlx4_en_shutdown(dev);
+ else
+ unregister_netdev(dev);
}
if (priv->allocated)
@@ -2057,12 +2151,17 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mdev->upper[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv);
+#endif
+
mlx4_en_free_resources(priv);
kfree(priv->tx_ring);
kfree(priv->tx_cq);
- free_netdev(dev);
+ if (!shutdown)
+ free_netdev(dev);
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@@ -2355,8 +2454,12 @@ out:
}
/* set offloads */
- priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2365,8 +2468,12 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
/* unset offloads */
- priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL);
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2425,7 +2532,23 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
netdev_features_t features)
{
features = vlan_features_check(skb, features);
- return vxlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
+ * support inner IPv6 checksums and segmentation so we need to
+ * strip that feature if this is an IPv6 encapsulated frame.
+ */
+ if (skb->encapsulation &&
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->vxlan_port ||
+ (ip_hdr(skb)->version != 4) ||
+ (udp_hdr(skb)->dest != priv->vxlan_port))
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+
+ return features;
}
#endif
@@ -2461,7 +2584,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
@@ -2493,7 +2616,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
@@ -2907,7 +3030,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
- MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+ MLX4_EN_PAGE_SIZE);
if (err) {
en_err(priv, "Failed to allocate page for rx qps\n");
goto out;
@@ -2990,8 +3113,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
mdev->pndev[port] = dev;
@@ -3071,6 +3199,8 @@ int mlx4_en_reset_config(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
@@ -3087,19 +3217,29 @@ int mlx4_en_reset_config(struct net_device *dev,
return -EINVAL;
}
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+ ts_config.rx_filter,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
- priv->hwtstamp_config.tx_type = ts_config.tx_type;
- priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+ mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3133,11 +3273,6 @@ int mlx4_en_reset_config(struct net_device *dev,
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
@@ -3146,6 +3281,8 @@ int mlx4_en_reset_config(struct net_device *dev,
out:
mutex_unlock(&mdev->state_lock);
- netdev_features_change(dev);
+ kfree(tmp);
+ if (!err)
+ netdev_features_change(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 20b6c2e67..5aa8b751f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
- struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device *dev = mdev->pndev[port];
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
+ stats->tx_dropped = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets += ring->packets;
stats->tx_bytes += ring->bytes;
+ stats->tx_dropped += ring->tx_dropped;
priv->port_stats.tx_chksum_offload += ring->tx_csum;
priv->port_stats.queue_stopped += ring->queue_stopped;
priv->port_stats.wake_queue += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
- stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
- stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = 0;
- stats->tx_aborted_errors = 0;
- stats->tx_carrier_errors = 0;
- stats->tx_fifo_errors = 0;
- stats->tx_heartbeat_errors = 0;
- stats->tx_window_errors = 0;
- stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 02e925d6f..a6b0db0e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -107,37 +107,6 @@ int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
return ret;
}
-int mlx4_en_map_buffer(struct mlx4_buf *buf)
-{
- struct page **pages;
- int i;
-
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return 0;
-
- pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
-
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- return 0;
-}
-
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
-{
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return;
-
- vunmap(buf->direct.buf);
-}
-
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ca3a38421..99b5407f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -394,17 +394,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
- ring->buf_size, 2 * PAGE_SIZE);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_info;
- err = mlx4_en_map_buffer(&ring->wqres.buf);
- if (err) {
- en_err(priv, "Failed to map RX buffer\n");
- goto err_hwq;
- }
ring->buf = ring->wqres.buf.direct.buf;
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
@@ -412,8 +406,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
*pring = ring;
return 0;
-err_hwq:
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -517,15 +509,11 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring = *pring;
- mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv);
-#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a386f047c..76aa4d271 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -41,6 +41,7 @@
#include <linux/vmalloc.h>
#include <linux/tcp.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/moduleparam.h>
#include "mlx4_en.h"
@@ -93,20 +94,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
- 2 * PAGE_SIZE);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
- err = mlx4_en_map_buffer(&ring->wqres.buf);
- if (err) {
- en_err(priv, "Failed to map TX buffer\n");
- goto err_hwq_res;
- }
-
ring->buf = ring->wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
@@ -117,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
MLX4_RESERVE_ETH_BF_QP);
if (err) {
en_err(priv, "failed reserving qp for TX ring\n");
- goto err_map;
+ goto err_hwq_res;
}
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -154,8 +148,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
-err_map:
- mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_bounce:
@@ -182,7 +174,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
- mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
@@ -735,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool inline_ok;
u32 ring_cons;
- if (!priv->port_up)
- goto tx_drop;
-
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[tx_ind];
+ if (!priv->port_up)
+ goto tx_drop;
+
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = ACCESS_ONCE(ring->cons);
@@ -920,8 +911,18 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ind, fragptr);
if (skb->encapsulation) {
- struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
- if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ u8 proto;
+
+ ip.hdr = skb_inner_network_header(skb);
+ proto = (ip.v4->version == 4) ? ip.v4->protocol :
+ ip.v6->nexthdr;
+
+ if (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
else
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
@@ -1029,7 +1030,7 @@ tx_drop_unmap:
tx_drop:
dev_kfree_skb_any(skb);
- priv->stats.tx_dropped++;
+ ring->tx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12c77a70a..546fab0ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3222,6 +3222,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
+ spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
@@ -4134,8 +4135,11 @@ static void mlx4_shutdown(struct pci_dev *pdev)
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
- if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+ /* Notify mlx4 clients that the kernel is being shut down */
+ persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
mlx4_unload_one(pdev);
+ }
mutex_unlock(&persist->interface_state_mutex);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 6aa73972d..f2d092001 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1102,7 +1102,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
- int index, prev;
+ int index = -1, prev;
int link = 0;
int i;
int err;
@@ -1181,7 +1181,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
out:
- if (prot == MLX4_PROT_ETH) {
+ if (prot == MLX4_PROT_ETH && index != -1) {
/* manage the steering entry for promisc mode */
if (new_entry)
err = new_steering_entry(dev, port, steer,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 63b1aeae2..13d297ee3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
+ unsigned int tx_dropped;
struct mlx4_bf bf;
unsigned long queue_stopped;
@@ -352,12 +353,14 @@ struct mlx4_en_port_profile {
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
+ u8 num_tx_rings_p_up;
u8 rx_pause;
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
int inline_thold;
+ struct hwtstamp_config hwtstamp_config;
};
struct mlx4_en_profile {
@@ -482,8 +485,6 @@ struct mlx4_en_priv {
struct mlx4_en_port_profile *prof;
struct net_device *dev;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct net_device_stats stats;
- struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
@@ -624,8 +625,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode, int node);
@@ -672,8 +676,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
-int mlx4_en_map_buffer(struct mlx4_buf *buf);
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
int loopback);
void mlx4_en_calc_rx_buf(struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index f5c3b9465..1cf722eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -31,10 +31,3 @@ config MLX5_CORE_EN_DCB
This flag is depended on the kernel's DCB support.
If unsure, set to Y
-
-config MLX5_CORE_EN_VXLAN
- bool "VXLAN offloads Support"
- default y
- depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m)
- ---help---
- Say Y here if you want to use VXLAN offloads in the driver.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index bf65b71c7..9ea7b5830 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,11 +2,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
+ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
- en_txrx.o en_clock.o en_tc.o
+ en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index eb926e1ee..d6e2a1cae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,6 +294,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -320,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
- case MLX5_CMD_OP_2ERR_QP:
- case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_QUERY_QP:
case MLX5_CMD_OP_SQD_RTS_QP:
case MLX5_CMD_OP_INIT2INIT_QP:
@@ -341,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
- case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -389,12 +393,15 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
+
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -406,178 +413,143 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
const char *mlx5_command_str(int command)
{
- switch (command) {
- case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
- return "QUERY_HCA_VPORT_CONTEXT";
-
- case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
- return "MODIFY_HCA_VPORT_CONTEXT";
-
- case MLX5_CMD_OP_QUERY_HCA_CAP:
- return "QUERY_HCA_CAP";
-
- case MLX5_CMD_OP_SET_HCA_CAP:
- return "SET_HCA_CAP";
-
- case MLX5_CMD_OP_QUERY_ADAPTER:
- return "QUERY_ADAPTER";
-
- case MLX5_CMD_OP_INIT_HCA:
- return "INIT_HCA";
-
- case MLX5_CMD_OP_TEARDOWN_HCA:
- return "TEARDOWN_HCA";
-
- case MLX5_CMD_OP_ENABLE_HCA:
- return "MLX5_CMD_OP_ENABLE_HCA";
-
- case MLX5_CMD_OP_DISABLE_HCA:
- return "MLX5_CMD_OP_DISABLE_HCA";
-
- case MLX5_CMD_OP_QUERY_PAGES:
- return "QUERY_PAGES";
-
- case MLX5_CMD_OP_MANAGE_PAGES:
- return "MANAGE_PAGES";
-
- case MLX5_CMD_OP_CREATE_MKEY:
- return "CREATE_MKEY";
-
- case MLX5_CMD_OP_QUERY_MKEY:
- return "QUERY_MKEY";
-
- case MLX5_CMD_OP_DESTROY_MKEY:
- return "DESTROY_MKEY";
-
- case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
- return "QUERY_SPECIAL_CONTEXTS";
-
- case MLX5_CMD_OP_CREATE_EQ:
- return "CREATE_EQ";
-
- case MLX5_CMD_OP_DESTROY_EQ:
- return "DESTROY_EQ";
-
- case MLX5_CMD_OP_QUERY_EQ:
- return "QUERY_EQ";
-
- case MLX5_CMD_OP_CREATE_CQ:
- return "CREATE_CQ";
-
- case MLX5_CMD_OP_DESTROY_CQ:
- return "DESTROY_CQ";
-
- case MLX5_CMD_OP_QUERY_CQ:
- return "QUERY_CQ";
-
- case MLX5_CMD_OP_MODIFY_CQ:
- return "MODIFY_CQ";
-
- case MLX5_CMD_OP_CREATE_QP:
- return "CREATE_QP";
-
- case MLX5_CMD_OP_DESTROY_QP:
- return "DESTROY_QP";
-
- case MLX5_CMD_OP_RST2INIT_QP:
- return "RST2INIT_QP";
-
- case MLX5_CMD_OP_INIT2RTR_QP:
- return "INIT2RTR_QP";
-
- case MLX5_CMD_OP_RTR2RTS_QP:
- return "RTR2RTS_QP";
-
- case MLX5_CMD_OP_RTS2RTS_QP:
- return "RTS2RTS_QP";
-
- case MLX5_CMD_OP_SQERR2RTS_QP:
- return "SQERR2RTS_QP";
-
- case MLX5_CMD_OP_2ERR_QP:
- return "2ERR_QP";
-
- case MLX5_CMD_OP_2RST_QP:
- return "2RST_QP";
-
- case MLX5_CMD_OP_QUERY_QP:
- return "QUERY_QP";
-
- case MLX5_CMD_OP_MAD_IFC:
- return "MAD_IFC";
-
- case MLX5_CMD_OP_INIT2INIT_QP:
- return "INIT2INIT_QP";
-
- case MLX5_CMD_OP_CREATE_PSV:
- return "CREATE_PSV";
-
- case MLX5_CMD_OP_DESTROY_PSV:
- return "DESTROY_PSV";
-
- case MLX5_CMD_OP_CREATE_SRQ:
- return "CREATE_SRQ";
-
- case MLX5_CMD_OP_DESTROY_SRQ:
- return "DESTROY_SRQ";
-
- case MLX5_CMD_OP_QUERY_SRQ:
- return "QUERY_SRQ";
-
- case MLX5_CMD_OP_ARM_RQ:
- return "ARM_RQ";
-
- case MLX5_CMD_OP_CREATE_XRC_SRQ:
- return "CREATE_XRC_SRQ";
-
- case MLX5_CMD_OP_DESTROY_XRC_SRQ:
- return "DESTROY_XRC_SRQ";
-
- case MLX5_CMD_OP_QUERY_XRC_SRQ:
- return "QUERY_XRC_SRQ";
-
- case MLX5_CMD_OP_ARM_XRC_SRQ:
- return "ARM_XRC_SRQ";
-
- case MLX5_CMD_OP_ALLOC_PD:
- return "ALLOC_PD";
-
- case MLX5_CMD_OP_DEALLOC_PD:
- return "DEALLOC_PD";
-
- case MLX5_CMD_OP_ALLOC_UAR:
- return "ALLOC_UAR";
-
- case MLX5_CMD_OP_DEALLOC_UAR:
- return "DEALLOC_UAR";
-
- case MLX5_CMD_OP_ATTACH_TO_MCG:
- return "ATTACH_TO_MCG";
-
- case MLX5_CMD_OP_DETTACH_FROM_MCG:
- return "DETTACH_FROM_MCG";
-
- case MLX5_CMD_OP_ALLOC_XRCD:
- return "ALLOC_XRCD";
-
- case MLX5_CMD_OP_DEALLOC_XRCD:
- return "DEALLOC_XRCD";
-
- case MLX5_CMD_OP_ACCESS_REG:
- return "MLX5_CMD_OP_ACCESS_REG";
-
- case MLX5_CMD_OP_SET_WOL_ROL:
- return "SET_WOL_ROL";
-
- case MLX5_CMD_OP_QUERY_WOL_ROL:
- return "QUERY_WOL_ROL";
-
- case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
- return "ADD_VXLAN_UDP_DPORT";
-
- case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
- return "DELETE_VXLAN_UDP_DPORT";
+#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
+ switch (command) {
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
+ MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
+ MLX5_COMMAND_STR_CASE(INIT_HCA);
+ MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
+ MLX5_COMMAND_STR_CASE(ENABLE_HCA);
+ MLX5_COMMAND_STR_CASE(DISABLE_HCA);
+ MLX5_COMMAND_STR_CASE(QUERY_PAGES);
+ MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
+ MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
+ MLX5_COMMAND_STR_CASE(QUERY_ISSI);
+ MLX5_COMMAND_STR_CASE(SET_ISSI);
+ MLX5_COMMAND_STR_CASE(CREATE_MKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_MKEY);
+ MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
+ MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
+ MLX5_COMMAND_STR_CASE(CREATE_EQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_EQ);
+ MLX5_COMMAND_STR_CASE(QUERY_EQ);
+ MLX5_COMMAND_STR_CASE(GEN_EQE);
+ MLX5_COMMAND_STR_CASE(CREATE_CQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_CQ);
+ MLX5_COMMAND_STR_CASE(QUERY_CQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_CQ);
+ MLX5_COMMAND_STR_CASE(CREATE_QP);
+ MLX5_COMMAND_STR_CASE(DESTROY_QP);
+ MLX5_COMMAND_STR_CASE(RST2INIT_QP);
+ MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
+ MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
+ MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
+ MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
+ MLX5_COMMAND_STR_CASE(2ERR_QP);
+ MLX5_COMMAND_STR_CASE(2RST_QP);
+ MLX5_COMMAND_STR_CASE(QUERY_QP);
+ MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
+ MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
+ MLX5_COMMAND_STR_CASE(CREATE_PSV);
+ MLX5_COMMAND_STR_CASE(DESTROY_PSV);
+ MLX5_COMMAND_STR_CASE(CREATE_SRQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_SRQ);
+ MLX5_COMMAND_STR_CASE(ARM_RQ);
+ MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(CREATE_DCT);
+ MLX5_COMMAND_STR_CASE(DESTROY_DCT);
+ MLX5_COMMAND_STR_CASE(DRAIN_DCT);
+ MLX5_COMMAND_STR_CASE(QUERY_DCT);
+ MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
+ MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
+ MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
+ MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
+ MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
+ MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(ALLOC_PD);
+ MLX5_COMMAND_STR_CASE(DEALLOC_PD);
+ MLX5_COMMAND_STR_CASE(ALLOC_UAR);
+ MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
+ MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
+ MLX5_COMMAND_STR_CASE(ACCESS_REG);
+ MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
+ MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
+ MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
+ MLX5_COMMAND_STR_CASE(MAD_IFC);
+ MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
+ MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
+ MLX5_COMMAND_STR_CASE(NOP);
+ MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
+ MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
+ MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
+ MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
+ MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
+ MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
+ MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
+ MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
+ MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
+ MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
+ MLX5_COMMAND_STR_CASE(CREATE_TIR);
+ MLX5_COMMAND_STR_CASE(MODIFY_TIR);
+ MLX5_COMMAND_STR_CASE(DESTROY_TIR);
+ MLX5_COMMAND_STR_CASE(QUERY_TIR);
+ MLX5_COMMAND_STR_CASE(CREATE_SQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_SQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_SQ);
+ MLX5_COMMAND_STR_CASE(QUERY_SQ);
+ MLX5_COMMAND_STR_CASE(CREATE_RQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_RQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_RQ);
+ MLX5_COMMAND_STR_CASE(QUERY_RQ);
+ MLX5_COMMAND_STR_CASE(CREATE_RMP);
+ MLX5_COMMAND_STR_CASE(MODIFY_RMP);
+ MLX5_COMMAND_STR_CASE(DESTROY_RMP);
+ MLX5_COMMAND_STR_CASE(QUERY_RMP);
+ MLX5_COMMAND_STR_CASE(CREATE_TIS);
+ MLX5_COMMAND_STR_CASE(MODIFY_TIS);
+ MLX5_COMMAND_STR_CASE(DESTROY_TIS);
+ MLX5_COMMAND_STR_CASE(QUERY_TIS);
+ MLX5_COMMAND_STR_CASE(CREATE_RQT);
+ MLX5_COMMAND_STR_CASE(MODIFY_RQT);
+ MLX5_COMMAND_STR_CASE(DESTROY_RQT);
+ MLX5_COMMAND_STR_CASE(QUERY_RQT);
+ MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
+ MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
default: return "unknown command opcode";
}
}
@@ -634,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
pr_debug("\n");
}
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+ struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+ return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_cmd_work_ent *ent = container_of(dwork,
+ struct mlx5_cmd_work_ent,
+ cb_timeout_work);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+ cmd);
+
+ ent->ret = -ETIMEDOUT;
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
@@ -679,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ if (ent->callback)
+ schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -723,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
}
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
-}
-
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -738,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
if (cmd->mode == CMD_MODE_POLLING) {
wait_for_completion(&ent->done);
- err = ent->ret;
- } else {
- if (!wait_for_completion_timeout(&ent->done, timeout))
- err = -ETIMEDOUT;
- else
- err = 0;
+ } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+ ent->ret = -ETIMEDOUT;
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
+
+ err = ent->ret;
+
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
@@ -793,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (!callback)
init_completion(&ent->done);
+ INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
INIT_WORK(&ent->work, cmd_work_handler);
if (page_queue) {
cmd_work_handler(&ent->work);
@@ -802,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
}
- if (!callback) {
- err = wait_func(dev, ent);
- if (err == -ETIMEDOUT)
- goto out;
-
- ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
- if (op < ARRAY_SIZE(cmd->stats)) {
- stats = &cmd->stats[op];
- spin_lock_irq(&stats->lock);
- stats->sum += ds;
- ++stats->n;
- spin_unlock_irq(&stats->lock);
- }
- mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
- "fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
- *status = ent->status;
- free_cmd(ent);
- }
+ if (callback)
+ goto out;
- return err;
+ err = wait_func(dev, ent);
+ if (err == -ETIMEDOUT)
+ goto out_free;
+
+ ds = ent->ts2 - ent->ts1;
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ if (op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[op];
+ spin_lock_irq(&stats->lock);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irq(&stats->lock);
+ }
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ "fw exec time for %s is %lld nsec\n",
+ mlx5_command_str(op), ds);
+ *status = ent->status;
out_free:
free_cmd(ent);
@@ -1213,41 +1205,30 @@ err_dbg:
return err;
}
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
-
down(&cmd->pages_sem);
- flush_workqueue(cmd->wq);
-
- cmd->mode = CMD_MODE_EVENTS;
+ cmd->mode = mode;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
}
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd *cmd = &dev->cmd;
- int i;
-
- for (i = 0; i < cmd->max_reg_cmds; i++)
- down(&cmd->sem);
-
- down(&cmd->pages_sem);
-
- flush_workqueue(cmd->wq);
- cmd->mode = CMD_MODE_POLLING;
+ mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+ mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1283,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
struct semaphore *sem;
ent = cmd->ent_arr[i];
+ if (ent->callback)
+ cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue)
sem = &cmd->pages_sem;
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index b51e42d6f..873a631ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -39,6 +39,53 @@
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
+#define TASKLET_MAX_TIME 2
+#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
+
+void mlx5_cq_tasklet_cb(unsigned long data)
+{
+ unsigned long flags;
+ unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
+ struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
+ struct mlx5_core_cq *mcq;
+ struct mlx5_core_cq *temp;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ list_splice_tail_init(&ctx->list, &ctx->process_list);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ list_for_each_entry_safe(mcq, temp, &ctx->process_list,
+ tasklet_ctx.list) {
+ list_del_init(&mcq->tasklet_ctx.list);
+ mcq->tasklet_ctx.comp(mcq);
+ if (atomic_dec_and_test(&mcq->refcount))
+ complete(&mcq->free);
+ if (time_after(jiffies, end))
+ break;
+ }
+
+ if (!list_empty(&ctx->process_list))
+ tasklet_schedule(&ctx->task);
+}
+
+static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
+{
+ unsigned long flags;
+ struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+
+ spin_lock_irqsave(&tasklet_ctx->lock, flags);
+ /* When migrating CQs between EQs will be implemented, please note
+ * that you need to sync this point. It is possible that
+ * while migrating a CQ, completions on the old EQs could
+ * still arrive.
+ */
+ if (list_empty_careful(&cq->tasklet_ctx.list)) {
+ atomic_inc(&cq->refcount);
+ list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
+ }
+ spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
+}
+
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
{
struct mlx5_core_cq *cq;
@@ -96,6 +143,13 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_out out;
struct mlx5_destroy_cq_mbox_in din;
struct mlx5_destroy_cq_mbox_out dout;
+ int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
+ c_eqn);
+ struct mlx5_eq *eq;
+
+ eq = mlx5_eqn2eq(dev, eqn);
+ if (IS_ERR(eq))
+ return PTR_ERR(eq);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
memset(&out, 0, sizeof(out));
@@ -111,6 +165,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
+ if (!cq->comp)
+ cq->comp = mlx5_add_cq_to_tasklet;
+ /* assuming CQ will be deleted before the EQ */
+ cq->tasklet_ctx.priv = &eq->tasklet_ctx;
+ INIT_LIST_HEAD(&cq->tasklet_ctx.list);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 24344aafb..943b1bd43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -46,6 +46,9 @@
#include <linux/rhashtable.h>
#include "wq.h"
#include "mlx5_core.h"
+#include "en_stats.h"
+
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
#define MLX5E_MAX_NUM_TC 8
@@ -57,12 +60,30 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
+
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_WQE_SZ 17
+#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
+ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
+ MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
+ BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+#define MLX5_UMR_ALIGN (2048)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
+
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
@@ -73,233 +94,69 @@
#define MLX5E_NUM_MAIN_GROUPS 9
+static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
+ wq_size / 2);
+ default:
+ return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
+ wq_size / 2);
+ }
+}
+
+static inline int mlx5_min_log_rq_size(int wq_type)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+ default:
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ }
+}
+
+static inline int mlx5_max_log_rq_size(int wq_type)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
+ default:
+ return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ }
+}
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_umr_ctrl_seg uctrl;
+ struct mlx5_mkey_seg mkc;
+ struct mlx5_wqe_data_seg data;
+};
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
-static const char vport_strings[][ETH_GSTRING_LEN] = {
- /* vport statistics */
- "rx_packets",
- "rx_bytes",
- "tx_packets",
- "tx_bytes",
- "rx_error_packets",
- "rx_error_bytes",
- "tx_error_packets",
- "tx_error_bytes",
- "rx_unicast_packets",
- "rx_unicast_bytes",
- "tx_unicast_packets",
- "tx_unicast_bytes",
- "rx_multicast_packets",
- "rx_multicast_bytes",
- "tx_multicast_packets",
- "tx_multicast_bytes",
- "rx_broadcast_packets",
- "rx_broadcast_bytes",
- "tx_broadcast_packets",
- "tx_broadcast_bytes",
-
- /* SW counters */
- "tso_packets",
- "tso_bytes",
- "tso_inner_packets",
- "tso_inner_bytes",
- "lro_packets",
- "lro_bytes",
- "rx_csum_good",
- "rx_csum_none",
- "rx_csum_sw",
- "tx_csum_offload",
- "tx_csum_inner",
- "tx_queue_stopped",
- "tx_queue_wake",
- "tx_queue_dropped",
- "rx_wqe_err",
-};
-
-struct mlx5e_vport_stats {
- /* HW counters */
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- u64 rx_error_packets;
- u64 rx_error_bytes;
- u64 tx_error_packets;
- u64 tx_error_bytes;
- u64 rx_unicast_packets;
- u64 rx_unicast_bytes;
- u64 tx_unicast_packets;
- u64 tx_unicast_bytes;
- u64 rx_multicast_packets;
- u64 rx_multicast_bytes;
- u64 tx_multicast_packets;
- u64 tx_multicast_bytes;
- u64 rx_broadcast_packets;
- u64 rx_broadcast_bytes;
- u64 tx_broadcast_packets;
- u64 tx_broadcast_bytes;
-
- /* SW counters */
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 lro_packets;
- u64 lro_bytes;
- u64 rx_csum_good;
- u64 rx_csum_none;
- u64 rx_csum_sw;
- u64 tx_csum_offload;
- u64 tx_csum_inner;
- u64 tx_queue_stopped;
- u64 tx_queue_wake;
- u64 tx_queue_dropped;
- u64 rx_wqe_err;
-
-#define NUM_VPORT_COUNTERS 35
-};
-
-static const char pport_strings[][ETH_GSTRING_LEN] = {
- /* IEEE802.3 counters */
- "frames_tx",
- "frames_rx",
- "check_seq_err",
- "alignment_err",
- "octets_tx",
- "octets_received",
- "multicast_xmitted",
- "broadcast_xmitted",
- "multicast_rx",
- "broadcast_rx",
- "in_range_len_errors",
- "out_of_range_len",
- "too_long_errors",
- "symbol_err",
- "mac_control_tx",
- "mac_control_rx",
- "unsupported_op_rx",
- "pause_ctrl_rx",
- "pause_ctrl_tx",
-
- /* RFC2863 counters */
- "in_octets",
- "in_ucast_pkts",
- "in_discards",
- "in_errors",
- "in_unknown_protos",
- "out_octets",
- "out_ucast_pkts",
- "out_discards",
- "out_errors",
- "in_multicast_pkts",
- "in_broadcast_pkts",
- "out_multicast_pkts",
- "out_broadcast_pkts",
-
- /* RFC2819 counters */
- "drop_events",
- "octets",
- "pkts",
- "broadcast_pkts",
- "multicast_pkts",
- "crc_align_errors",
- "undersize_pkts",
- "oversize_pkts",
- "fragments",
- "jabbers",
- "collisions",
- "p64octets",
- "p65to127octets",
- "p128to255octets",
- "p256to511octets",
- "p512to1023octets",
- "p1024to1518octets",
- "p1519to2047octets",
- "p2048to4095octets",
- "p4096to8191octets",
- "p8192to10239octets",
-};
-
-#define NUM_IEEE_802_3_COUNTERS 19
-#define NUM_RFC_2863_COUNTERS 13
-#define NUM_RFC_2819_COUNTERS 21
-#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
- NUM_RFC_2863_COUNTERS + \
- NUM_RFC_2819_COUNTERS)
-
-struct mlx5e_pport_stats {
- __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
- __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
- __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
-};
-
-static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
- "packets",
- "bytes",
- "csum_none",
- "csum_sw",
- "lro_packets",
- "lro_bytes",
- "wqe_err"
-};
-
-struct mlx5e_rq_stats {
- u64 packets;
- u64 bytes;
- u64 csum_none;
- u64 csum_sw;
- u64 lro_packets;
- u64 lro_bytes;
- u64 wqe_err;
-#define NUM_RQ_STATS 7
-};
-
-static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
- "packets",
- "bytes",
- "tso_packets",
- "tso_bytes",
- "tso_inner_packets",
- "tso_inner_bytes",
- "csum_offload_inner",
- "nop",
- "csum_offload_none",
- "stopped",
- "wake",
- "dropped",
-};
-
-struct mlx5e_sq_stats {
- /* commonly accessed in data path */
- u64 packets;
- u64 bytes;
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 csum_offload_inner;
- u64 nop;
- /* less likely accessed in data path */
- u64 csum_offload_none;
- u64 stopped;
- u64 wake;
- u64 dropped;
-#define NUM_SQ_STATS 12
-};
-
-struct mlx5e_stats {
- struct mlx5e_vport_stats vport;
- struct mlx5e_pport_stats pport;
-};
-
struct mlx5e_params {
u8 log_sq_size;
+ u8 rq_wq_type;
+ u8 mpwqe_log_stride_sz;
+ u8 mpwqe_log_num_strides;
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
+ bool rx_cqe_compress_admin;
+ bool rx_cqe_compress;
u16 rx_cq_moderation_usec;
u16 rx_cq_moderation_pkts;
u16 tx_cq_moderation_usec;
@@ -311,6 +168,7 @@ struct mlx5e_params {
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+ bool vlan_strip_disable;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct ieee_ets ets;
#endif
@@ -331,6 +189,8 @@ struct mlx5e_tstamp {
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
+ MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+ MLX5E_RQ_STATE_FLUSH_TIMEOUT,
};
struct mlx5e_cq {
@@ -343,32 +203,91 @@ struct mlx5e_cq {
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
+ /* cqe decompression */
+ struct mlx5_cqe64 title;
+ struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
+ u8 mini_arr_idx;
+ u16 decmprs_left;
+ u16 decmprs_wqe_counter;
+
/* control */
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
+ u16 ix);
+
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
+struct mlx5e_dma_info {
+ struct page *page;
+ dma_addr_t addr;
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
u32 wqe_sz;
struct sk_buff **skb;
+ struct mlx5e_mpw_info *wqe_info;
+ __be32 mkey_be;
+ __be32 umr_mkey_be;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+ mlx5e_fp_alloc_wqe alloc_wqe;
+ mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
+ u8 wq_type;
+ u32 mpwqe_stride_sz;
+ u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp;
+struct mlx5e_umr_dma_info {
+ __be64 *mtt;
+ __be64 *mtt_no_align;
+ dma_addr_t mtt_addr;
+ struct mlx5e_dma_info *dma_info;
+};
+
+struct mlx5e_mpw_info {
+ union {
+ struct mlx5e_dma_info dma_info;
+ struct mlx5e_umr_dma_info umr;
+ };
+ u16 consumed_strides;
+ u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+
+ void (*dma_pre_sync)(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len);
+ void (*add_skb_frag)(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset, u32 len);
+ void (*copy_skb_header)(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen);
+ void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+};
+
struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
@@ -389,6 +308,12 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
MLX5E_SQ_STATE_BF_ENABLE,
+ MLX5E_SQ_STATE_TX_TIMEOUT,
+};
+
+struct mlx5e_ico_wqe_info {
+ u8 opcode;
+ u8 num_wqebbs;
};
struct mlx5e_sq {
@@ -432,6 +357,7 @@ struct mlx5e_sq {
struct mlx5_uar uar;
struct mlx5e_channel *channel;
int tc;
+ struct mlx5e_ico_wqe_info *ico_wqe_info;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -448,6 +374,7 @@ struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq icosq; /* internal control operations */
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -474,42 +401,42 @@ enum mlx5e_traffic_types {
MLX5E_TT_IPV6,
MLX5E_TT_ANY,
MLX5E_NUM_TT,
+ MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
};
-#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLED,
+ MLX5E_STATE_OPENED,
+ MLX5E_STATE_DESTROYING,
+};
-enum mlx5e_rqt_ix {
- MLX5E_INDIRECTION_RQT,
- MLX5E_SINGLE_RQ_RQT,
- MLX5E_NUM_RQT,
+struct mlx5e_vxlan_db {
+ spinlock_t lock; /* protect vxlan table */
+ struct radix_tree_root tree;
};
-struct mlx5e_eth_addr_info {
+struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2];
- u32 tt_vec;
- struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
+ struct mlx5_flow_rule *rule;
};
-#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
-
-struct mlx5e_eth_addr_db {
- struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
- struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
- struct mlx5e_eth_addr_info broadcast;
- struct mlx5e_eth_addr_info allmulti;
- struct mlx5e_eth_addr_info promisc;
- bool broadcast_enabled;
- bool allmulti_enabled;
- bool promisc_enabled;
+struct mlx5e_flow_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
};
-enum {
- MLX5E_STATE_ASYNC_EVENTS_ENABLE,
- MLX5E_STATE_OPENED,
- MLX5E_STATE_DESTROYING,
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_tc_table {
+ struct mlx5_flow_table *t;
+
+ struct rhashtable_params ht_params;
+ struct rhashtable ht;
};
-struct mlx5e_vlan_db {
+struct mlx5e_vlan_table {
+ struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_rule;
@@ -517,29 +444,74 @@ struct mlx5e_vlan_db {
bool filter_disabled;
};
-struct mlx5e_vxlan_db {
- spinlock_t lock; /* protect vxlan table */
- struct radix_tree_root tree;
+struct mlx5e_l2_table {
+ struct mlx5e_flow_table ft;
+ struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct mlx5e_l2_rule broadcast;
+ struct mlx5e_l2_rule allmulti;
+ struct mlx5e_l2_rule promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
};
-struct mlx5e_flow_table {
- int num_groups;
- struct mlx5_flow_table *t;
- struct mlx5_flow_group **g;
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_rule *rules[MLX5E_NUM_TT];
};
-struct mlx5e_tc_flow_table {
- struct mlx5_flow_table *t;
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+struct arfs_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_rule *default_rule;
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+};
- struct rhashtable_params ht_params;
- struct rhashtable ht;
+enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+ ARFS_IPV4_UDP,
+ ARFS_IPV6_UDP,
+ ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+ struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+ /* Protect aRFS rules list */
+ spinlock_t arfs_lock;
+ struct list_head rules;
+ int last_filter_id;
+ struct workqueue_struct *wq;
};
-struct mlx5e_flow_tables {
- struct mlx5_flow_namespace *ns;
- struct mlx5e_tc_flow_table tc;
- struct mlx5e_flow_table vlan;
- struct mlx5e_flow_table main;
+/* NIC prio FTS */
+enum {
+ MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_L2_FT_LEVEL,
+ MLX5E_TTC_FT_LEVEL,
+ MLX5E_ARFS_FT_LEVEL
+};
+
+struct mlx5e_flow_steering {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5e_tc_table tc;
+ struct mlx5e_vlan_table vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5e_ttc_table ttc;
+ struct mlx5e_arfs_tables arfs;
+};
+
+struct mlx5e_direct_tir {
+ u32 tirn;
+ u32 rqtn;
+};
+
+enum {
+ MLX5E_TC_PRIO = 0,
+ MLX5E_NIC_PRIO
};
struct mlx5e_priv {
@@ -554,42 +526,30 @@ struct mlx5e_priv {
u32 pdn;
u32 tdn;
struct mlx5_core_mkey mkey;
+ struct mlx5_core_mkey umr_mkey;
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 rqtn[MLX5E_NUM_RQT];
- u32 tirn[MLX5E_NUM_TT];
+ u32 indir_rqtn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
- struct mlx5e_flow_tables fts;
- struct mlx5e_eth_addr_db eth_addr;
- struct mlx5e_vlan_db vlan;
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
+ struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
-#endif
struct mlx5e_params params;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
+ struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_tstamp tstamp;
-};
-
-#define MLX5E_NET_IP_ALIGN 2
-
-struct mlx5e_tx_wqe {
- struct mlx5_wqe_ctrl_seg ctrl;
- struct mlx5_wqe_eth_seg eth;
-};
-
-struct mlx5e_rx_wqe {
- struct mlx5_wqe_srq_next_seg next;
- struct mlx5_wqe_data_seg data;
+ u16 q_counter;
};
enum mlx5e_link_mode {
@@ -634,14 +594,39 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u16 byte_cnt,
+ struct mlx5e_mpw_info *wi,
+ struct sk_buff *skb);
+void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u16 byte_cnt,
+ struct mlx5e_mpw_info *wi,
+ struct sk_buff *skb);
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi);
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -650,6 +635,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
@@ -658,16 +644,20 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+ u32 *indirection_rqt, int len,
int num_channels);
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe, int bf_sz)
+ struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
{
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
@@ -681,9 +671,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
if (bf_sz)
- __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+ __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
else
- mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
/* flush the write-combining mapped buffer */
wmb();
@@ -704,12 +694,43 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
+static inline int mlx5e_get_mtt_octw(int npages)
+{
+ return ALIGN(npages, 8) / 2;
+}
+
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
#endif
+#ifndef CONFIG_RFS_ACCEL
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+ return -ENOTSUPP;
+}
+
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+ return -ENOTSUPP;
+}
+#else
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+#endif
+
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
new file mode 100644
index 000000000..3515e78ba
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -0,0 +1,752 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_RFS_ACCEL
+
+#include <linux/hash.h>
+#include <linux/mlx5/fs.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "en.h"
+
+struct arfs_tuple {
+ __be16 etype;
+ u8 ip_proto;
+ union {
+ __be32 src_ipv4;
+ struct in6_addr src_ipv6;
+ };
+ union {
+ __be32 dst_ipv4;
+ struct in6_addr dst_ipv6;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+struct arfs_rule {
+ struct mlx5e_priv *priv;
+ struct work_struct arfs_work;
+ struct mlx5_flow_rule *rule;
+ struct hlist_node hlist;
+ int rxq;
+ /* Flow ID passed to ndo_rx_flow_steer */
+ int flow_id;
+ /* Filter ID returned by ndo_rx_flow_steer */
+ int filter_id;
+ struct arfs_tuple tuple;
+};
+
+#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
+ for (i = 0; i < ARFS_NUM_TYPES; i++) \
+ mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
+
+#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
+ for (j = 0; j < ARFS_HASH_SIZE; j++) \
+ hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
+
+static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
+{
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ return MLX5E_TT_IPV4_TCP;
+ case ARFS_IPV4_UDP:
+ return MLX5E_TT_IPV4_UDP;
+ case ARFS_IPV6_TCP:
+ return MLX5E_TT_IPV6_TCP;
+ case ARFS_IPV6_UDP:
+ return MLX5E_TT_IPV6_UDP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int arfs_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ u32 *tirn = priv->indir_tirn;
+ int err = 0;
+ int tt;
+ int i;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ dest.tir_num = tirn[i];
+ tt = arfs_get_tt(i);
+ /* Modify ttc rules destination to bypass the aRFS tables*/
+ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+ &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc destination failed\n",
+ __func__);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv);
+
+int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+ arfs_del_rules(priv);
+
+ return arfs_disable(priv);
+}
+
+int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ int err = 0;
+ int tt;
+ int i;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
+ tt = arfs_get_tt(i);
+ /* Modify ttc rules destination to point on the aRFS FTs */
+ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+ &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc destination failed err=%d\n",
+ __func__, err);
+ arfs_disable(priv);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void arfs_destroy_table(struct arfs_table *arfs_t)
+{
+ mlx5_del_flow_rule(arfs_t->default_rule);
+ mlx5e_destroy_flow_table(&arfs_t->ft);
+}
+
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+{
+ int i;
+
+ if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ return;
+
+ arfs_del_rules(priv);
+ destroy_workqueue(priv->fs.arfs.wq);
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
+ }
+}
+
+static int arfs_add_default_rule(struct mlx5e_priv *priv,
+ enum arfs_type type)
+{
+ struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+ struct mlx5_flow_destination dest;
+ u8 match_criteria_enable = 0;
+ u32 *tirn = priv->indir_tirn;
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+ break;
+ case ARFS_IPV4_UDP:
+ dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+ break;
+ case ARFS_IPV6_TCP:
+ dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+ break;
+ case ARFS_IPV6_UDP:
+ dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable,
+ match_criteria, match_value,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ &dest);
+ if (IS_ERR(arfs_t->default_rule)) {
+ err = PTR_ERR(arfs_t->default_rule);
+ arfs_t->default_rule = NULL;
+ netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
+ __func__, type);
+ }
+out:
+ kvfree(match_criteria);
+ kvfree(match_value);
+ return err;
+}
+
+#define MLX5E_ARFS_NUM_GROUPS 2
+#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
+#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
+#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
+ MLX5E_ARFS_GROUP2_SIZE)
+static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ enum arfs_type type)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+ sizeof(*ft->g), GFP_KERNEL);
+ in = mlx5_vzalloc(inlen);
+ if (!in || !ft->g) {
+ kvfree(ft->g);
+ kvfree(in);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
+ outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ case ARFS_IPV6_TCP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
+ break;
+ case ARFS_IPV4_UDP:
+ case ARFS_IPV6_UDP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ case ARFS_IPV4_UDP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ break;
+ case ARFS_IPV6_TCP:
+ case ARFS_IPV6_UDP:
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_ARFS_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_ARFS_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+out:
+ kvfree(in);
+
+ return err;
+}
+
+static int arfs_create_table(struct mlx5e_priv *priv,
+ enum arfs_type type)
+{
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
+ int err;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ err = arfs_create_groups(ft, type);
+ if (err)
+ goto err;
+
+ err = arfs_add_default_rule(priv, type);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+ int err = 0;
+ int i;
+
+ if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ return 0;
+
+ spin_lock_init(&priv->fs.arfs.arfs_lock);
+ INIT_LIST_HEAD(&priv->fs.arfs.rules);
+ priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs.arfs.wq)
+ return -ENOMEM;
+
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ err = arfs_create_table(priv, i);
+ if (err)
+ goto err;
+ }
+ return 0;
+err:
+ mlx5e_arfs_destroy_tables(priv);
+ return err;
+}
+
+#define MLX5E_ARFS_EXPIRY_QUOTA 60
+
+static void arfs_may_expire_flow(struct mlx5e_priv *priv)
+{
+ struct arfs_rule *arfs_rule;
+ struct hlist_node *htmp;
+ int quota = 0;
+ int i;
+ int j;
+
+ HLIST_HEAD(del_list);
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+ break;
+ if (!work_pending(&arfs_rule->arfs_work) &&
+ rps_may_expire_flow(priv->netdev,
+ arfs_rule->rxq, arfs_rule->flow_id,
+ arfs_rule->filter_id)) {
+ hlist_del_init(&arfs_rule->hlist);
+ hlist_add_head(&arfs_rule->hlist, &del_list);
+ }
+ }
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
+ if (arfs_rule->rule)
+ mlx5_del_flow_rule(arfs_rule->rule);
+ hlist_del(&arfs_rule->hlist);
+ kfree(arfs_rule);
+ }
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv)
+{
+ struct hlist_node *htmp;
+ struct arfs_rule *rule;
+ int i;
+ int j;
+
+ HLIST_HEAD(del_list);
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ hlist_del_init(&rule->hlist);
+ hlist_add_head(&rule->hlist, &del_list);
+ }
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+ hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
+ cancel_work_sync(&rule->arfs_work);
+ if (rule->rule)
+ mlx5_del_flow_rule(rule->rule);
+ hlist_del(&rule->hlist);
+ kfree(rule);
+ }
+}
+
+static struct hlist_head *
+arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
+ __be16 dst_port)
+{
+ unsigned long l;
+ int bucket_idx;
+
+ l = (__force unsigned long)src_port |
+ ((__force unsigned long)dst_port << 2);
+
+ bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
+
+ return &arfs_t->rules_hash[bucket_idx];
+}
+
+static u8 arfs_get_ip_proto(const struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IP)) ?
+ ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
+static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
+ u8 ip_proto, __be16 etype)
+{
+ if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
+ return &arfs->arfs_tables[ARFS_IPV4_TCP];
+ if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
+ return &arfs->arfs_tables[ARFS_IPV4_UDP];
+ if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
+ return &arfs->arfs_tables[ARFS_IPV6_TCP];
+ if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
+ return &arfs->arfs_tables[ARFS_IPV6_UDP];
+
+ return NULL;
+}
+
+static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
+ struct arfs_rule *arfs_rule)
+{
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct arfs_tuple *tuple = &arfs_rule->tuple;
+ struct mlx5_flow_rule *rule = NULL;
+ struct mlx5_flow_destination dest;
+ struct arfs_table *arfs_table;
+ u8 match_criteria_enable = 0;
+ struct mlx5_flow_table *ft;
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ntohs(tuple->etype));
+ arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
+ if (!arfs_table) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ ft = arfs_table->ft.t;
+ if (tuple->ip_proto == IPPROTO_TCP) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.tcp_dport);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.tcp_sport);
+ MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport,
+ ntohs(tuple->dst_port));
+ MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport,
+ ntohs(tuple->src_port));
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.udp_dport);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.udp_sport);
+ MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport,
+ ntohs(tuple->dst_port));
+ MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport,
+ ntohs(tuple->src_port));
+ }
+ if (tuple->etype == htons(ETH_P_IP)) {
+ memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &tuple->src_ipv4,
+ 4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &tuple->dst_ipv4,
+ 4);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ } else {
+ memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &tuple->src_ipv6,
+ 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &tuple->dst_ipv6,
+ 16);
+ memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xff,
+ 16);
+ memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff,
+ 16);
+ }
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
+ rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+ match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ &dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
+ __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+ }
+
+out:
+ kvfree(match_criteria);
+ kvfree(match_value);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
+ struct mlx5_flow_rule *rule, u16 rxq)
+{
+ struct mlx5_flow_destination dst;
+ int err = 0;
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dst.tir_num = priv->direct_tir[rxq].tirn;
+ err = mlx5_modify_rule_destination(rule, &dst);
+ if (err)
+ netdev_warn(priv->netdev,
+ "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
+}
+
+static void arfs_handle_work(struct work_struct *work)
+{
+ struct arfs_rule *arfs_rule = container_of(work,
+ struct arfs_rule,
+ arfs_work);
+ struct mlx5e_priv *priv = arfs_rule->priv;
+ struct mlx5_flow_rule *rule;
+
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ hlist_del(&arfs_rule->hlist);
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+ mutex_unlock(&priv->state_lock);
+ kfree(arfs_rule);
+ goto out;
+ }
+ mutex_unlock(&priv->state_lock);
+
+ if (!arfs_rule->rule) {
+ rule = arfs_add_rule(priv, arfs_rule);
+ if (IS_ERR(rule))
+ goto out;
+ arfs_rule->rule = rule;
+ } else {
+ arfs_modify_rule_rq(priv, arfs_rule->rule,
+ arfs_rule->rxq);
+ }
+out:
+ arfs_may_expire_flow(priv);
+}
+
+/* return L4 destination port from ip4/6 packets */
+static __be16 arfs_get_dst_port(const struct sk_buff *skb)
+{
+ char *transport_header;
+
+ transport_header = skb_transport_header(skb);
+ if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+ return ((struct tcphdr *)transport_header)->dest;
+ return ((struct udphdr *)transport_header)->dest;
+}
+
+/* return L4 source port from ip4/6 packets */
+static __be16 arfs_get_src_port(const struct sk_buff *skb)
+{
+ char *transport_header;
+
+ transport_header = skb_transport_header(skb);
+ if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+ return ((struct tcphdr *)transport_header)->source;
+ return ((struct udphdr *)transport_header)->source;
+}
+
+static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+ struct arfs_table *arfs_t,
+ const struct sk_buff *skb,
+ u16 rxq, u32 flow_id)
+{
+ struct arfs_rule *rule;
+ struct arfs_tuple *tuple;
+
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
+ if (!rule)
+ return NULL;
+
+ rule->priv = priv;
+ rule->rxq = rxq;
+ INIT_WORK(&rule->arfs_work, arfs_handle_work);
+
+ tuple = &rule->tuple;
+ tuple->etype = skb->protocol;
+ if (tuple->etype == htons(ETH_P_IP)) {
+ tuple->src_ipv4 = ip_hdr(skb)->saddr;
+ tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+ } else {
+ memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr));
+ }
+ tuple->ip_proto = arfs_get_ip_proto(skb);
+ tuple->src_port = arfs_get_src_port(skb);
+ tuple->dst_port = arfs_get_dst_port(skb);
+
+ rule->flow_id = flow_id;
+ rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+
+ hlist_add_head(&rule->hlist,
+ arfs_hash_bucket(arfs_t, tuple->src_port,
+ tuple->dst_port));
+ return rule;
+}
+
+static bool arfs_cmp_ips(struct arfs_tuple *tuple,
+ const struct sk_buff *skb)
+{
+ if (tuple->etype == htons(ETH_P_IP) &&
+ tuple->src_ipv4 == ip_hdr(skb)->saddr &&
+ tuple->dst_ipv4 == ip_hdr(skb)->daddr)
+ return true;
+ if (tuple->etype == htons(ETH_P_IPV6) &&
+ (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr))) &&
+ (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr))))
+ return true;
+ return false;
+}
+
+static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
+ const struct sk_buff *skb)
+{
+ struct arfs_rule *arfs_rule;
+ struct hlist_head *head;
+ __be16 src_port = arfs_get_src_port(skb);
+ __be16 dst_port = arfs_get_dst_port(skb);
+
+ head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+ hlist_for_each_entry(arfs_rule, head, hlist) {
+ if (arfs_rule->tuple.src_port == src_port &&
+ arfs_rule->tuple.dst_port == dst_port &&
+ arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+ return arfs_rule;
+ }
+ }
+
+ return NULL;
+}
+
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct arfs_table *arfs_t;
+ struct arfs_rule *arfs_rule;
+
+ if (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+ if (!arfs_t)
+ return -EPROTONOSUPPORT;
+
+ spin_lock_bh(&arfs->arfs_lock);
+ arfs_rule = arfs_find_rule(arfs_t, skb);
+ if (arfs_rule) {
+ if (arfs_rule->rxq == rxq_index) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return arfs_rule->filter_id;
+ }
+ arfs_rule->rxq = rxq_index;
+ } else {
+ arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
+ rxq_index, flow_id);
+ if (!arfs_rule) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -ENOMEM;
+ }
+ }
+ queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
+ spin_unlock_bh(&arfs->arfs_lock);
+ return arfs_rule->filter_id;
+}
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 2018eebe1..847a8f3ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -93,6 +93,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
+ /* Reset CQE compression to Admin default */
+ mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -108,6 +110,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Disable CQE compression */
+ mlx5e_modify_rx_cqe_compression(priv, false);
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 3036f279a..c585349e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+ tc_tx_bw[i] = ets->tc_tx_bw[i];
break;
}
}
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ if (!ets->tc_tx_bw[i])
+ return -EINVAL;
+
bw_sum += ets->tc_tx_bw[i];
+ }
}
if (bw_sum != 0 && bw_sum != 100)
@@ -174,8 +178,14 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ int i;
pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
+ pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
+ }
return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3476ab844..e667a870e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -165,26 +165,112 @@ static const struct {
},
};
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 pfc_en_tx;
+ u8 pfc_en_rx;
+ int err;
+
+ err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+ return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
+#define MLX5E_NUM_RQ_STATS(priv) \
+ (NUM_RQ_STATS * priv->params.num_channels * \
+ test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_SQ_STATS(priv) \
+ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
+ test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+ (hweight8(mlx5e_query_pfc_combined(priv)) * \
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS)
+
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
struct mlx5e_priv *priv = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
- priv->params.num_channels * NUM_RQ_STATS +
- priv->params.num_channels * priv->params.num_tc *
- NUM_SQ_STATS;
+ return NUM_SW_COUNTERS +
+ MLX5E_NUM_Q_CNTRS(priv) +
+ NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+ MLX5E_NUM_RQ_STATS(priv) +
+ MLX5E_NUM_SQ_STATS(priv) +
+ MLX5E_NUM_PFC_COUNTERS(priv);
/* fallthrough */
default:
return -EOPNOTSUPP;
}
}
+static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ int i, j, tc, prio, idx = 0;
+ unsigned long pfc_combined;
+
+ /* SW counters */
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
+
+ /* Q counters */
+ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+
+ /* VPORT counters */
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_stats_desc[i].format);
+
+ /* PPORT counters */
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_802_3_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_2863_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_2819_stats_desc[i].format);
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
+ }
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, prio);
+ }
+ }
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return;
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
+
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ sq_stats_desc[j].format,
+ priv->channeltc_to_txq_map[i][tc]);
+}
+
static void mlx5e_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
- int i, j, tc, idx = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
switch (stringset) {
@@ -195,30 +281,7 @@ static void mlx5e_get_strings(struct net_device *dev,
break;
case ETH_SS_STATS:
- /* VPORT counters */
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_strings[i]);
-
- /* PPORT counters */
- for (i = 0; i < NUM_PPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_strings[i]);
-
- /* per channel counters */
- for (i = 0; i < priv->params.num_channels; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "rx%d_%s", i, rq_stats_strings[j]);
-
- for (tc = 0; tc < priv->params.num_tc; tc++)
- for (i = 0; i < priv->params.num_channels; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data +
- (idx++) * ETH_GSTRING_LEN,
- "tx%d_%s",
- priv->channeltc_to_txq_map[i][tc],
- sq_stats_strings[j]);
+ mlx5e_fill_stats_strings(priv, data);
break;
}
}
@@ -227,7 +290,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int i, j, tc, idx = 0;
+ int i, j, tc, prio, idx = 0;
+ unsigned long pfc_combined;
if (!data)
return;
@@ -237,33 +301,68 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_stats_desc, i);
+
+ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ q_stats_desc, i);
+
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = ((u64 *)&priv->stats.vport)[i];
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i);
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i);
- for (i = 0; i < NUM_PPORT_COUNTERS; i++)
- data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i);
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i);
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i);
+ }
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return;
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] = !test_bit(MLX5E_STATE_OPENED,
- &priv->state) ? 0 :
- ((u64 *)&priv->channel[i]->rq.stats)[j];
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+ rq_stats_desc, j);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = !test_bit(MLX5E_STATE_OPENED,
- &priv->state) ? 0 :
- ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+ sq_stats_desc, j);
}
static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int rq_wq_type = priv->params.rq_wq_type;
- param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->params.log_rq_size;
param->tx_pending = 1 << priv->params.log_sq_size;
@@ -274,6 +373,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
+ int rq_wq_type = priv->params.rq_wq_type;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
@@ -289,16 +389,16 @@ static int mlx5e_set_ringparam(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ 1 << mlx5_min_log_rq_size(rq_wq_type));
return -EINVAL;
}
- if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
@@ -316,8 +416,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
log_rq_size = order_base_2(param->rx_pending);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = min_t(u16, param->rx_pending - 1,
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+ min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
@@ -357,6 +456,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
+ bool arfs_enabled;
bool was_opened;
int err = 0;
@@ -385,13 +485,27 @@ static int mlx5e_set_channels(struct net_device *dev,
if (was_opened)
mlx5e_close_locked(dev);
+ arfs_enabled = dev->features & NETIF_F_NTUPLE;
+ if (arfs_enabled)
+ mlx5e_arfs_disable(priv);
+
priv->params.num_channels = count;
- mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (was_opened)
err = mlx5e_open_locked(dev);
+ if (err)
+ goto out;
+
+ if (arfs_enabled) {
+ err = mlx5e_arfs_enable(priv);
+ if (err)
+ netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+ __func__, err);
+ }
+out:
mutex_unlock(&priv->state_lock);
return err;
@@ -499,6 +613,25 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
return 0;
}
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ u32 max_speed = 0;
+ u32 proto_cap;
+ int err;
+ int i;
+
+ err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
+ if (proto_cap & MLX5E_PROT_MASK(i))
+ max_speed = max(max_speed, ptys2ethtool_table[i].speed);
+
+ *speed = max_speed;
+ return 0;
+}
+
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
struct ethtool_cmd *cmd)
@@ -727,9 +860,8 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
mlx5e_build_tir_ctx_hash(tirc, priv);
- for (i = 0; i < MLX5E_NUM_TT; i++)
- if (IS_HASHING_TT(i))
- mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+ for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+ mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen);
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -751,9 +883,11 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
if (indir) {
+ u32 rqtn = priv->indir_rqtn;
+
memcpy(priv->params.indirection_rqt, indir,
sizeof(priv->params.indirection_rqt));
- mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
}
if (key)
@@ -1036,6 +1170,108 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static int mlx5e_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 beacon_duration;
+
+ if (!MLX5_CAP_GEN(mdev, beacon_led))
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ beacon_duration = MLX5_BEACON_DURATION_INF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ beacon_duration = MLX5_BEACON_DURATION_OFF;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return mlx5_set_port_beacon(mdev, beacon_duration);
+}
+
+static int mlx5e_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *dev = priv->mdev;
+ int size_read = 0;
+ u8 data[4];
+
+ size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+ if (size_read < 2)
+ return -EIO;
+
+ /* data[0] = identifier byte */
+ switch (data[0]) {
+ case MLX5_MODULE_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLX5_MODULE_ID_QSFP_PLUS:
+ case MLX5_MODULE_ID_QSFP28:
+ /* data[1] = revision id */
+ if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLX5_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
+ __func__, data[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5e_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int offset = ee->offset;
+ int size_read;
+ int i = 0;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
+ data + i);
+
+ if (!size_read)
+ /* Done reading */
+ return 0;
+
+ if (size_read < 0) {
+ netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
+ __func__, size_read);
+ return 0;
+ }
+
+ i += size_read;
+ offset += size_read;
+ }
+
+ return 0;
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1060,6 +1296,9 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
+ .set_phys_id = mlx5e_set_phys_id,
.get_wol = mlx5e_get_wol,
.set_wol = mlx5e_set_wol,
+ .get_module_info = mlx5e_get_module_info,
+ .get_module_eeprom = mlx5e_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d00a24203..b32740092 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -37,7 +37,10 @@
#include <linux/mlx5/fs.h>
#include "en.h"
-#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai, int type);
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai);
enum {
MLX5E_FULLMATCH = 0,
@@ -58,21 +61,21 @@ enum {
MLX5E_ACTION_DEL = 2,
};
-struct mlx5e_eth_addr_hash_node {
+struct mlx5e_l2_hash_node {
struct hlist_node hlist;
u8 action;
- struct mlx5e_eth_addr_info ai;
+ struct mlx5e_l2_rule ai;
};
-static inline int mlx5e_hash_eth_addr(u8 *addr)
+static inline int mlx5e_hash_l2(u8 *addr)
{
return addr[5];
}
-static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
{
- struct mlx5e_eth_addr_hash_node *hn;
- int ix = mlx5e_hash_eth_addr(addr);
+ struct mlx5e_l2_hash_node *hn;
+ int ix = mlx5e_hash_l2(addr);
int found = 0;
hlist_for_each_entry(hn, &hash[ix], hlist)
@@ -96,371 +99,12 @@ static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
hlist_add_head(&hn->hlist, &hash[ix]);
}
-static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
{
hlist_del(&hn->hlist);
kfree(hn);
}
-static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai)
-{
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_ANY))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
-}
-
-static int mlx5e_get_eth_addr_type(u8 *addr)
-{
- if (is_unicast_ether_addr(addr))
- return MLX5E_UC;
-
- if ((addr[0] == 0x01) &&
- (addr[1] == 0x00) &&
- (addr[2] == 0x5e) &&
- !(addr[3] & 0x80))
- return MLX5E_MC_IPV4;
-
- if ((addr[0] == 0x33) &&
- (addr[1] == 0x33))
- return MLX5E_MC_IPV6;
-
- return MLX5E_MC_OTHER;
-}
-
-static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
-{
- int eth_addr_type;
- u32 ret;
-
- switch (type) {
- case MLX5E_FULLMATCH:
- eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
- switch (eth_addr_type) {
- case MLX5E_UC:
- ret =
- BIT(MLX5E_TT_IPV4_TCP) |
- BIT(MLX5E_TT_IPV6_TCP) |
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4_IPSEC_AH) |
- BIT(MLX5E_TT_IPV6_IPSEC_AH) |
- BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
-
- case MLX5E_MC_IPV4:
- ret =
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV4) |
- 0;
- break;
-
- case MLX5E_MC_IPV6:
- ret =
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV6) |
- 0;
- break;
-
- case MLX5E_MC_OTHER:
- ret =
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
- }
-
- break;
-
- case MLX5E_ALLMULTI:
- ret =
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
-
- default: /* MLX5E_PROMISC */
- ret =
- BIT(MLX5E_TT_IPV4_TCP) |
- BIT(MLX5E_TT_IPV6_TCP) |
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4_IPSEC_AH) |
- BIT(MLX5E_TT_IPV6_IPSEC_AH) |
- BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
- }
-
- return ret;
-}
-
-static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai,
- int type, u32 *mc, u32 *mv)
-{
- struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
- struct mlx5_flow_rule **rule_p;
- struct mlx5_flow_table *ft = priv->fts.main.t;
- u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
- outer_headers.dmac_47_16);
- u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
- outer_headers.dmac_47_16);
- u32 *tirn = priv->tirn;
- u32 tt_vec;
- int err = 0;
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-
- switch (type) {
- case MLX5E_FULLMATCH:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- eth_broadcast_addr(mc_dmac);
- ether_addr_copy(mv_dmac, ai->addr);
- break;
-
- case MLX5E_ALLMULTI:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- mc_dmac[0] = 0x01;
- mv_dmac[0] = 0x01;
- break;
-
- case MLX5E_PROMISC:
- break;
- }
-
- tt_vec = mlx5e_get_tt_vec(ai, type);
-
- if (tt_vec & BIT(MLX5E_TT_ANY)) {
- rule_p = &ai->ft_rule[MLX5E_TT_ANY];
- dest.tir_num = tirn[MLX5E_TT_ANY];
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_ANY);
- }
-
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
- dest.tir_num = tirn[MLX5E_TT_IPV4];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
- dest.tir_num = tirn[MLX5E_TT_IPV6];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6);
- }
-
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
-
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
- dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
- dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
- }
-
- return 0;
-
-err_del_ai:
- err = PTR_ERR(*rule_p);
- *rule_p = NULL;
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
-
- return err;
-}
-
-static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai, int type)
-{
- u32 *match_criteria;
- u32 *match_value;
- int err = 0;
-
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_eth_addr_rule_out;
- }
-
- err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
- match_value);
-
-add_eth_addr_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
-
- return err;
-}
-
static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
{
struct net_device *ndev = priv->netdev;
@@ -472,7 +116,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
+ for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -489,7 +133,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@@ -514,28 +158,28 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, u32 *mc, u32 *mv)
{
- struct mlx5_flow_table *ft = priv->fts.vlan.t;
+ struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
struct mlx5_flow_rule **rule_p;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fts.main.t;
+ dest.ft = priv->fs.l2.ft.t;
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- rule_p = &priv->vlan.untagged_rule;
+ rule_p = &priv->fs.vlan.untagged_rule;
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- rule_p = &priv->vlan.any_vlan_rule;
+ rule_p = &priv->fs.vlan.any_vlan_rule;
MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
- rule_p = &priv->vlan.active_vlans_rule[vid];
+ rule_p = &priv->fs.vlan.active_vlans_rule[vid];
MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
@@ -589,22 +233,22 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->vlan.untagged_rule) {
- mlx5_del_flow_rule(priv->vlan.untagged_rule);
- priv->vlan.untagged_rule = NULL;
+ if (priv->fs.vlan.untagged_rule) {
+ mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
+ priv->fs.vlan.untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- if (priv->vlan.any_vlan_rule) {
- mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
- priv->vlan.any_vlan_rule = NULL;
+ if (priv->fs.vlan.any_vlan_rule) {
+ mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
+ priv->fs.vlan.any_vlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv);
- if (priv->vlan.active_vlans_rule[vid]) {
- mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
- priv->vlan.active_vlans_rule[vid] = NULL;
+ if (priv->fs.vlan.active_vlans_rule[vid]) {
+ mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
+ priv->fs.vlan.active_vlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@@ -613,10 +257,10 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->vlan.filter_disabled)
+ if (!priv->fs.vlan.filter_disabled)
return;
- priv->vlan.filter_disabled = false;
+ priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -624,10 +268,10 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{
- if (priv->vlan.filter_disabled)
+ if (priv->fs.vlan.filter_disabled)
return;
- priv->vlan.filter_disabled = true;
+ priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -638,7 +282,7 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- set_bit(vid, priv->vlan.active_vlans);
+ set_bit(vid, priv->fs.vlan.active_vlans);
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
}
@@ -648,7 +292,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- clear_bit(vid, priv->vlan.active_vlans);
+ clear_bit(vid, priv->fs.vlan.active_vlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
@@ -656,21 +300,21 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
- for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-static void mlx5e_execute_action(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+ struct mlx5e_l2_hash_node *hn)
{
switch (hn->action) {
case MLX5E_ACTION_ADD:
- mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
hn->action = MLX5E_ACTION_NONE;
break;
case MLX5E_ACTION_DEL:
- mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
- mlx5e_del_eth_addr_from_hash(hn);
+ mlx5e_del_l2_flow_rule(priv, &hn->ai);
+ mlx5e_del_l2_from_hash(hn);
break;
}
}
@@ -682,14 +326,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
netif_addr_lock_bh(netdev);
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
- priv->netdev->dev_addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
+ priv->netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev)
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev)
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev);
}
@@ -699,17 +343,17 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
struct net_device *ndev = priv->netdev;
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list;
struct hlist_node *tmp;
int i = 0;
int hi;
- addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+ addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr);
- else if (priv->eth_addr.broadcast_enabled)
+ else if (priv->fs.l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -725,7 +369,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int list_type)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
u8 (*addr_array)[ETH_ALEN] = NULL;
struct hlist_head *addr_list;
struct hlist_node *tmp;
@@ -734,12 +378,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err;
int hi;
- size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
+ size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ?
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
- addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+ addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++;
@@ -770,7 +414,7 @@ out:
static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct mlx5e_l2_table *ea = &priv->fs.l2;
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
@@ -781,26 +425,26 @@ static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
- mlx5e_execute_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+ mlx5e_execute_l2_action(priv, hn);
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
- mlx5e_execute_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+ mlx5e_execute_l2_action(priv, hn);
}
static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
@@ -814,7 +458,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
set_rx_mode_work);
- struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct mlx5e_l2_table *ea = &priv->fs.l2;
struct net_device *ndev = priv->netdev;
bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
@@ -830,27 +474,27 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
if (enable_promisc) {
- mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->vlan.filter_disabled)
+ mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (!priv->fs.vlan.filter_disabled)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0);
}
if (enable_allmulti)
- mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
- mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
mlx5e_handle_netdev_addr(priv);
if (disable_broadcast)
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
if (disable_allmulti)
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
- if (!priv->vlan.filter_disabled)
+ if (!priv->fs.vlan.filter_disabled)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0);
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+ mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
ea->promisc_enabled = promisc_enabled;
@@ -872,224 +516,454 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0;
}
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
{
- ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+ ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
}
-#define MLX5E_MAIN_GROUP0_SIZE BIT(3)
-#define MLX5E_MAIN_GROUP1_SIZE BIT(1)
-#define MLX5E_MAIN_GROUP2_SIZE BIT(0)
-#define MLX5E_MAIN_GROUP3_SIZE BIT(14)
-#define MLX5E_MAIN_GROUP4_SIZE BIT(13)
-#define MLX5E_MAIN_GROUP5_SIZE BIT(11)
-#define MLX5E_MAIN_GROUP6_SIZE BIT(2)
-#define MLX5E_MAIN_GROUP7_SIZE BIT(1)
-#define MLX5E_MAIN_GROUP8_SIZE BIT(0)
-#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
- MLX5E_MAIN_GROUP1_SIZE +\
- MLX5E_MAIN_GROUP2_SIZE +\
- MLX5E_MAIN_GROUP3_SIZE +\
- MLX5E_MAIN_GROUP4_SIZE +\
- MLX5E_MAIN_GROUP5_SIZE +\
- MLX5E_MAIN_GROUP6_SIZE +\
- MLX5E_MAIN_GROUP7_SIZE +\
- MLX5E_MAIN_GROUP8_SIZE)
-
-static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
- int inlen)
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
{
- u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
- match_criteria.outer_headers.dmac_47_16);
+ mlx5e_destroy_groups(ft);
+ kfree(ft->g);
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+}
+
+static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ if (!IS_ERR_OR_NULL(ttc->rules[i])) {
+ mlx5_del_flow_rule(ttc->rules[i]);
+ ttc->rules[i] = NULL;
+ }
+ }
+}
+
+static struct {
+ u16 etype;
+ u8 proto;
+} ttc_rules[] = {
+ [MLX5E_TT_IPV4_TCP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_TCP,
+ },
+ [MLX5E_TT_IPV6_TCP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_TCP,
+ },
+ [MLX5E_TT_IPV4_UDP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_UDP,
+ },
+ [MLX5E_TT_IPV6_UDP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_UDP,
+ },
+ [MLX5E_TT_IPV4_IPSEC_AH] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_AH,
+ },
+ [MLX5E_TT_IPV6_IPSEC_AH] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_AH,
+ },
+ [MLX5E_TT_IPV4_IPSEC_ESP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_ESP,
+ },
+ [MLX5E_TT_IPV6_IPSEC_ESP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_ESP,
+ },
+ [MLX5E_TT_IPV4] = {
+ .etype = ETH_P_IP,
+ .proto = 0,
+ },
+ [MLX5E_TT_IPV6] = {
+ .etype = ETH_P_IPV6,
+ .proto = 0,
+ },
+ [MLX5E_TT_ANY] = {
+ .etype = 0,
+ .proto = 0,
+ },
+};
+
+static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_destination *dest,
+ u16 etype,
+ u8 proto)
+{
+ struct mlx5_flow_rule *rule;
+ u8 match_criteria_enable = 0;
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (proto) {
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto);
+ }
+ if (etype) {
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype);
+ }
+
+ rule = mlx5_add_flow_rule(ft, match_criteria_enable,
+ match_criteria, match_value,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ }
+out:
+ kvfree(match_criteria);
+ kvfree(match_value);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5e_ttc_table *ttc;
+ struct mlx5_flow_rule **rules;
+ struct mlx5_flow_table *ft;
+ int tt;
int err;
+
+ ttc = &priv->fs.ttc;
+ ft = ttc->ft.t;
+ rules = ttc->rules;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+ if (tt == MLX5E_TT_ANY)
+ dest.tir_num = priv->direct_tir[0].tirn;
+ else
+ dest.tir_num = priv->indir_tirn[tt];
+ rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
+ ttc_rules[tt].etype,
+ ttc_rules[tt].proto);
+ if (IS_ERR(rules[tt]))
+ goto del_rules;
+ }
+
+ return 0;
+
+del_rules:
+ err = PTR_ERR(rules[tt]);
+ rules[tt] = NULL;
+ mlx5e_cleanup_ttc_rules(ttc);
+ return err;
+}
+
+#define MLX5E_TTC_NUM_GROUPS 3
+#define MLX5E_TTC_GROUP1_SIZE BIT(3)
+#define MLX5E_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
+ MLX5E_TTC_GROUP2_SIZE +\
+ MLX5E_TTC_GROUP3_SIZE)
+static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_flow_table *ft = &ttc->ft;
int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP0_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+ ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
+ sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ /* L4 Group */
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP1_SIZE;
+ ix += MLX5E_TTC_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
- memset(in, 0, inlen);
+ /* L3 Group */
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP2_SIZE;
+ ix += MLX5E_TTC_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
+ /* Any Group */
memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- eth_broadcast_addr(dmac);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP3_SIZE;
+ ix += MLX5E_TTC_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- eth_broadcast_addr(dmac);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP4_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+ kvfree(in);
+ return 0;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- eth_broadcast_addr(dmac);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP5_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- dmac[0] = 0x01;
+ return err;
+}
+
+static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+
+ mlx5e_cleanup_ttc_rules(ttc);
+ mlx5e_destroy_flow_table(&ttc->ft);
+}
+
+static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+ struct mlx5e_flow_table *ft = &ttc->ft;
+ int err;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ err = mlx5e_create_ttc_table_groups(ttc);
+ if (err)
+ goto err;
+
+ err = mlx5e_generate_ttc_table_rules(priv);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai)
+{
+ if (!IS_ERR_OR_NULL(ai->rule)) {
+ mlx5_del_flow_rule(ai->rule);
+ ai->rule = NULL;
+ }
+}
+
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai, int type)
+{
+ struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_destination dest;
+ u8 match_criteria_enable = 0;
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+ u8 *mc_dmac;
+ u8 *mv_dmac;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_l2_rule_out;
+ }
+
+ mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.ttc.ft.t;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ eth_broadcast_addr(mc_dmac);
+ ether_addr_copy(mv_dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ mc_dmac[0] = 0x01;
+ mv_dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ }
+
+ ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+ match_value,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR(ai->rule)) {
+ netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
+ __func__, mv_dmac);
+ err = PTR_ERR(ai->rule);
+ ai->rule = NULL;
+ }
+
+add_l2_rule_out:
+ kvfree(match_criteria);
+ kvfree(match_value);
+
+ return err;
+}
+
+#define MLX5E_NUM_L2_GROUPS 3
+#define MLX5E_L2_GROUP1_SIZE BIT(0)
+#define MLX5E_L2_GROUP2_SIZE BIT(15)
+#define MLX5E_L2_GROUP3_SIZE BIT(0)
+#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
+ MLX5E_L2_GROUP2_SIZE +\
+ MLX5E_L2_GROUP3_SIZE)
+static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_flow_table *ft = &l2_table->ft;
+ int ix = 0;
+ u8 *mc_dmac;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+ outer_headers.dmac_47_16);
+ /* Flow Group for promiscuous */
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP6_SIZE;
+ ix += MLX5E_L2_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
- memset(in, 0, inlen);
+ /* Flow Group for full match */
+ eth_broadcast_addr(mc_dmac);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP7_SIZE;
+ ix += MLX5E_L2_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- dmac[0] = 0x01;
+ /* Flow Group for allmulti */
+ eth_zero_addr(mc_dmac);
+ mc_dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP8_SIZE;
+ ix += MLX5E_L2_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
+ kvfree(in);
return 0;
err_destroy_groups:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
+ kvfree(in);
return err;
}
-static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
+static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
{
- u32 *in;
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int err;
-
- in = mlx5_vzalloc(inlen);
- if (!in)
- return -ENOMEM;
-
- err = __mlx5e_create_main_groups(ft, in, inlen);
-
- kvfree(in);
- return err;
+ mlx5e_destroy_flow_table(&priv->fs.l2.ft);
}
-static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fts.main;
+ struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+ struct mlx5e_flow_table *ft = &l2_table->ft;
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g) {
- err = -ENOMEM;
- goto err_destroy_main_flow_table;
- }
- err = mlx5e_create_main_groups(ft);
+ err = mlx5e_create_l2_table_groups(l2_table);
if (err)
- goto err_free_g;
- return 0;
+ goto err_destroy_flow_table;
-err_free_g:
- kfree(ft->g);
+ return 0;
-err_destroy_main_flow_table:
+err_destroy_flow_table:
mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
return err;
}
-static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
-{
- mlx5e_destroy_groups(ft);
- kfree(ft->g);
- mlx5_destroy_flow_table(ft->t);
- ft->t = NULL;
-}
-
-static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
-{
- mlx5e_destroy_flow_table(&priv->fts.main);
-}
-
#define MLX5E_NUM_VLAN_GROUPS 2
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE)
-static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
- int inlen)
+static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
+ int inlen)
{
int err;
int ix = 0;
@@ -1128,7 +1002,7 @@ err_destroy_groups:
return err;
}
-static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
+static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
{
u32 *in;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1138,19 +1012,20 @@ static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
if (!in)
return -ENOMEM;
- err = __mlx5e_create_vlan_groups(ft, in, inlen);
+ err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
kvfree(in);
return err;
}
-static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fts.vlan;
+ struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1160,65 +1035,90 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
err = -ENOMEM;
- goto err_destroy_vlan_flow_table;
+ goto err_destroy_vlan_table;
}
- err = mlx5e_create_vlan_groups(ft);
+ err = mlx5e_create_vlan_table_groups(ft);
if (err)
goto err_free_g;
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ goto err_destroy_vlan_flow_groups;
+
return 0;
+err_destroy_vlan_flow_groups:
+ mlx5e_destroy_groups(ft);
err_free_g:
kfree(ft->g);
-
-err_destroy_vlan_flow_table:
+err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
return err;
}
-static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
- mlx5e_destroy_flow_table(&priv->fts.vlan);
+ mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
}
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fts.ns)
+ if (!priv->fs.ns)
return -EINVAL;
- err = mlx5e_create_vlan_flow_table(priv);
- if (err)
- return err;
+ err = mlx5e_arfs_create_tables(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
+ err);
+ priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+ }
- err = mlx5e_create_main_flow_table(priv);
- if (err)
- goto err_destroy_vlan_flow_table;
+ err = mlx5e_create_ttc_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- goto err_destroy_main_flow_table;
+ err = mlx5e_create_l2_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
+ err);
+ goto err_destroy_ttc_table;
+ }
+
+ err = mlx5e_create_vlan_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
+ err);
+ goto err_destroy_l2_table;
+ }
return 0;
-err_destroy_main_flow_table:
- mlx5e_destroy_main_flow_table(priv);
-err_destroy_vlan_flow_table:
- mlx5e_destroy_vlan_flow_table(priv);
+err_destroy_l2_table:
+ mlx5e_destroy_l2_table(priv);
+err_destroy_ttc_table:
+ mlx5e_destroy_ttc_table(priv);
+err_destroy_arfs_tables:
+ mlx5e_arfs_destroy_tables(priv);
return err;
}
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- mlx5e_destroy_main_flow_table(priv);
- mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_vlan_table(priv);
+ mlx5e_destroy_l2_table(priv);
+ mlx5e_destroy_ttc_table(priv);
+ mlx5e_arfs_destroy_tables(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 94fef7058..5a4d88c2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,13 @@
#include "eswitch.h"
#include "vxlan.h"
+enum {
+ MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
+ MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
@@ -48,6 +55,7 @@ struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
u16 max_inline;
+ bool icosq;
};
struct mlx5e_cq_param {
@@ -59,8 +67,10 @@ struct mlx5e_cq_param {
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
+ struct mlx5e_sq_param icosq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
+ struct mlx5e_cq_param icosq_cq;
};
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -71,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
- if (port_state == VPORT_STATE_UP)
+ if (port_state == VPORT_STATE_UP) {
+ netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- else
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
+ }
}
static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -88,111 +101,85 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+static void mlx5e_tx_timeout_work(struct work_struct *work)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_pport_stats *s = &priv->stats.pport;
- u32 *in;
- u32 *out;
- int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
-
- in = mlx5_vzalloc(sz);
- out = mlx5_vzalloc(sz);
- if (!in || !out)
- goto free_out;
-
- MLX5_SET(ppcnt_reg, in, local_port, 1);
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->IEEE_802_3_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->IEEE_802_3_counters));
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->RFC_2863_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->RFC_2863_counters));
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->RFC_2819_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->RFC_2819_counters));
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ int err;
-free_out:
- kvfree(in);
- kvfree(out);
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+ mlx5e_close_locked(priv->netdev);
+ err = mlx5e_open_locked(priv->netdev);
+ if (err)
+ netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
}
-void mlx5e_update_stats(struct mlx5e_priv *priv)
+static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
- u32 *out;
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u64 tx_offload_none;
+ u64 tx_offload_none = 0;
int i, j;
- out = mlx5_vzalloc(outlen);
- if (!out)
- return;
-
- /* Collect firts the SW counters and then HW for consistency */
- s->rx_packets = 0;
- s->rx_bytes = 0;
- s->tx_packets = 0;
- s->tx_bytes = 0;
- s->tso_packets = 0;
- s->tso_bytes = 0;
- s->tso_inner_packets = 0;
- s->tso_inner_bytes = 0;
- s->tx_queue_stopped = 0;
- s->tx_queue_wake = 0;
- s->tx_queue_dropped = 0;
- s->tx_csum_inner = 0;
- tx_offload_none = 0;
- s->lro_packets = 0;
- s->lro_bytes = 0;
- s->rx_csum_none = 0;
- s->rx_csum_sw = 0;
- s->rx_wqe_err = 0;
+ memset(s, 0, sizeof(*s));
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
- s->lro_packets += rq_stats->lro_packets;
- s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_sw += rq_stats->csum_sw;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
- s->tso_packets += sq_stats->tso_packets;
- s->tso_bytes += sq_stats->tso_bytes;
- s->tso_inner_packets += sq_stats->tso_inner_packets;
- s->tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
- s->tx_csum_inner += sq_stats->csum_offload_inner;
- tx_offload_none += sq_stats->csum_offload_none;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ tx_offload_none += sq_stats->csum_none;
}
}
- /* HW counters */
+ /* Update calculated offload counters */
+ s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+ s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
+
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+ priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events);
+}
+
+static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
memset(in, 0, sizeof(in));
MLX5_SET(query_vport_counter_in, in, opcode,
@@ -202,56 +189,69 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
memset(out, 0, outlen);
- if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int prio;
+ void *out;
+ u32 *in;
+
+ in = mlx5_vzalloc(sz);
+ if (!in)
goto free_out;
-#define MLX5_GET_CTR(p, x) \
- MLX5_GET64(query_vport_counter_out, p, x)
-
- s->rx_error_packets =
- MLX5_GET_CTR(out, received_errors.packets);
- s->rx_error_bytes =
- MLX5_GET_CTR(out, received_errors.octets);
- s->tx_error_packets =
- MLX5_GET_CTR(out, transmit_errors.packets);
- s->tx_error_bytes =
- MLX5_GET_CTR(out, transmit_errors.octets);
-
- s->rx_unicast_packets =
- MLX5_GET_CTR(out, received_eth_unicast.packets);
- s->rx_unicast_bytes =
- MLX5_GET_CTR(out, received_eth_unicast.octets);
- s->tx_unicast_packets =
- MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
- s->tx_unicast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
-
- s->rx_multicast_packets =
- MLX5_GET_CTR(out, received_eth_multicast.packets);
- s->rx_multicast_bytes =
- MLX5_GET_CTR(out, received_eth_multicast.octets);
- s->tx_multicast_packets =
- MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
- s->tx_multicast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
-
- s->rx_broadcast_packets =
- MLX5_GET_CTR(out, received_eth_broadcast.packets);
- s->rx_broadcast_bytes =
- MLX5_GET_CTR(out, received_eth_broadcast.octets);
- s->tx_broadcast_packets =
- MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
- s->tx_broadcast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
- /* Update calculated offload counters */
- s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none -
- s->rx_csum_sw;
+ out = pstats->IEEE_802_3_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ out = pstats->RFC_2863_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ out = pstats->RFC_2819_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ out = pstats->phy_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ out = pstats->per_prio_counters[prio];
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+ mlx5_core_access_reg(mdev, in, sz, out, sz,
+ MLX5_REG_PPCNT, 0, 0);
+ }
- mlx5e_update_pport_counters(priv);
free_out:
- kvfree(out);
+ kvfree(in);
+}
+
+static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+
+ if (!priv->q_counter)
+ return;
+
+ mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
+ &qcnt->rx_out_of_buffer);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_update_q_counter(priv);
+ mlx5e_update_vport_counters(priv);
+ mlx5e_update_pport_counters(priv);
+ mlx5e_update_sw_counters(priv);
}
static void mlx5e_update_stats_work(struct work_struct *work)
@@ -273,7 +273,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
{
struct mlx5e_priv *priv = vpriv;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
switch (event) {
@@ -289,12 +289,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
- set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
@@ -309,6 +309,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 byte_count;
int wq_sz;
int err;
int i;
@@ -323,32 +324,58 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!rq->skb) {
- err = -ENOMEM;
- goto err_rq_wq_destroy;
- }
- rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
- MLX5E_SW2HW_MTU(priv->netdev->mtu);
- rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->wqe_info) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
+ rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+
+ rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
+ rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+ rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+ byte_count = rq->wqe_sz;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+ rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+
+ rq->wqe_sz = (priv->params.lro_en) ?
+ priv->params.lro_wqe_sz :
+ MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
+ byte_count = rq->wqe_sz;
+ byte_count |= MLX5_HW_START_PADDING;
+ }
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
- u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
- wqe->data.lkey = c->mkey_be;
- wqe->data.byte_count =
- cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+ wqe->data.byte_count = cpu_to_be32(byte_count);
}
+ rq->wq_type = priv->params.rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->tstamp = &priv->tstamp;
rq->channel = c;
rq->ix = c->ix;
rq->priv = c->priv;
+ rq->mkey_be = c->mkey_be;
+ rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
return 0;
@@ -360,7 +387,14 @@ err_rq_wq_destroy:
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
- kfree(rq->skb);
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ kfree(rq->wqe_info);
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ kfree(rq->skb);
+ }
+
mlx5_wq_destroy(&rq->wq_ctrl);
}
@@ -389,6 +423,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -403,7 +438,8 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
return err;
}
-static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
+ int next_state)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
@@ -431,6 +467,36 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
return err;
}
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+ MLX5_SET(rqc, rqc, vsd, vsd);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
@@ -457,6 +523,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
+ struct mlx5e_sq *sq = &c->icosq;
+ u16 pi = sq->pc & sq->wq.sz_m1;
int err;
err = mlx5e_create_rq(c, param, rq);
@@ -467,12 +535,15 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
- mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
+ sq->ico_wqe_info[pi].num_wqebbs = 1;
+ mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
return 0;
@@ -486,17 +557,25 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
+ int tout = 0;
+ int err;
+
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
- mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq))
- msleep(20);
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
+ tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+
+ if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
mlx5e_disable_rq(rq);
+ mlx5e_free_rx_descs(rq);
mlx5e_destroy_rq(rq);
}
@@ -538,10 +617,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc = param->sqc;
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
- int txq_ix;
int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+ err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
@@ -566,8 +644,24 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- txq_ix = c->ix + tc * priv->params.num_channels;
- sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+ if (param->icosq) {
+ u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+ sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
+ wq_sz,
+ GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!sq->ico_wqe_info) {
+ err = -ENOMEM;
+ goto err_free_sq_db;
+ }
+ } else {
+ int txq_ix;
+
+ txq_ix = c->ix + tc * priv->params.num_channels;
+ sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+ priv->txq_to_sq_map[txq_ix] = sq;
+ }
sq->pdev = c->pdev;
sq->tstamp = &priv->tstamp;
@@ -576,10 +670,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
- priv->txq_to_sq_map[txq_ix] = sq;
return 0;
+err_free_sq_db:
+ mlx5e_free_sq_db(sq);
+
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
@@ -594,6 +690,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
+ kfree(sq->ico_wqe_info);
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -622,10 +719,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
- MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
@@ -700,9 +797,11 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
if (err)
goto err_disable_sq;
- set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- netdev_tx_reset_queue(sq->txq);
- netif_tx_start_queue(sq->txq);
+ if (sq->txq) {
+ set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+ }
return 0;
@@ -723,21 +822,37 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
- clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
- netif_tx_disable_queue(sq->txq);
+ int tout = 0;
+ int err;
- /* ensure hw is notified of all pending wqes */
- if (mlx5e_sq_has_room_for(sq, 1))
- mlx5e_send_nop(sq, true);
+ if (sq->txq) {
+ clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ /* prevent netif_tx_wake_queue */
+ napi_synchronize(&sq->channel->napi);
+ netif_tx_disable_queue(sq->txq);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
- while (sq->cc != sq->pc) /* wait till sq is empty */
- msleep(20);
+ /* ensure hw is notified of all pending wqes */
+ if (mlx5e_sq_has_room_for(sq, 1))
+ mlx5e_send_nop(sq, true);
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_ERR);
+ if (err)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ }
+
+ /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+ while (sq->cc != sq->pc &&
+ !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+ if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ }
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi);
+ mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
}
@@ -985,10 +1100,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_tx_cqs(c, cparam);
+ err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0);
if (err)
goto err_napi_del;
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_close_icosq_cq;
+
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
priv->params.rx_cq_moderation_usec,
priv->params.rx_cq_moderation_pkts);
@@ -997,10 +1116,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
napi_enable(&c->napi);
- err = mlx5e_open_sqs(c, cparam);
+ err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
if (err)
goto err_disable_napi;
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_close_icosq;
+
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_sqs;
@@ -1013,6 +1136,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
err_close_sqs:
mlx5e_close_sqs(c);
+err_close_icosq:
+ mlx5e_close_sq(&c->icosq);
+
err_disable_napi:
napi_disable(&c->napi);
mlx5e_close_cq(&c->rq.cq);
@@ -1020,6 +1146,9 @@ err_disable_napi:
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
+err_close_icosq_cq:
+ mlx5e_close_cq(&c->icosq.cq);
+
err_napi_del:
netif_napi_del(&c->napi);
napi_hash_del(&c->napi);
@@ -1032,9 +1161,11 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
+ mlx5e_close_sq(&c->icosq);
napi_disable(&c->napi);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
+ mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
napi_hash_del(&c->napi);
@@ -1049,11 +1180,23 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ priv->params.mpwqe_log_num_strides - 9);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ priv->params.mpwqe_log_stride_sz - 6);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ }
+
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
+ MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
@@ -1068,17 +1211,27 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
}
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
- struct mlx5e_sq_param *param)
+static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
- MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+
param->max_inline = priv->params.tx_max_inline;
}
@@ -1094,8 +1247,22 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
+ u8 log_cq_size;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ log_cq_size = priv->params.log_rq_size +
+ priv->params.mpwqe_log_num_strides;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ log_cq_size = priv->params.log_rq_size;
+ }
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
+ if (priv->params.rx_cqe_compress) {
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ }
mlx5e_build_common_cq_param(priv, param);
}
@@ -1105,25 +1272,52 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param);
}
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
- struct mlx5e_channel_param *cparam)
+static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param,
+ u8 log_wq_size)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param,
+ u8 log_wq_size)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+
+ param->icosq = true;
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
{
- memset(cparam, 0, sizeof(*cparam));
+ u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+ mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
}
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
- struct mlx5e_channel_param cparam;
+ struct mlx5e_channel_param *cparam;
int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
@@ -1135,12 +1329,15 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
- if (!priv->channel || !priv->txq_to_sq_map)
+ cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+
+ if (!priv->channel || !priv->txq_to_sq_map || !cparam)
goto err_free_txq_to_sq_map;
- mlx5e_build_channel_param(priv, &cparam);
+ mlx5e_build_channel_param(priv, cparam);
+
for (i = 0; i < nch; i++) {
- err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
@@ -1151,6 +1348,12 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
goto err_close_channels;
}
+ /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_start_all_queues(priv->netdev);
+
+ kfree(cparam);
return 0;
err_close_channels:
@@ -1160,6 +1363,7 @@ err_close_channels:
err_free_txq_to_sq_map:
kfree(priv->txq_to_sq_map);
kfree(priv->channel);
+ kfree(cparam);
return err;
}
@@ -1168,6 +1372,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
{
int i;
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_stop_all_queues(priv->netdev);
+ netif_tx_disable(priv->netdev);
+
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]);
@@ -1199,48 +1409,36 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
int ix = i;
+ u32 rqn;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix];
- MLX5_SET(rqtc, rqtc, rq_num[i],
- test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[ix]->rq.rqn :
- priv->drop_rq.rqn);
+ rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn;
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
}
}
-static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
- enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
+ int ix)
{
+ u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn;
- switch (rqt_ix) {
- case MLX5E_INDIRECTION_RQT:
- mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-
- break;
-
- default: /* MLX5E_SINGLE_RQ_RQT */
- MLX5_SET(rqtc, rqtc, rq_num[0],
- test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[0]->rq.rqn :
- priv->drop_rq.rqn);
-
- break;
- }
+ MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
}
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
void *rqtc;
int inlen;
- int sz;
int err;
-
- sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+ u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1252,26 +1450,73 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+ if (sz > 1) /* RSS */
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+ else
+ mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
- err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
+ err = mlx5_core_create_rqt(mdev, in, inlen, rqtn);
kvfree(in);
+ return err;
+}
+
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn)
+{
+ mlx5_core_destroy_rqt(priv->mdev, rqtn);
+}
+
+static int mlx5e_create_rqts(struct mlx5e_priv *priv)
+{
+ int nch = mlx5e_get_max_num_channels(priv->mdev);
+ u32 *rqtn;
+ int err;
+ int ix;
+
+ /* Indirect RQT */
+ rqtn = &priv->indir_rqtn;
+ err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn);
+ if (err)
+ return err;
+
+ /* Direct RQTs */
+ for (ix = 0; ix < nch; ix++) {
+ rqtn = &priv->direct_tir[ix].rqtn;
+ err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
+ if (err)
+ goto err_destroy_rqts;
+ }
+
+ return 0;
+
+err_destroy_rqts:
+ for (ix--; ix >= 0; ix--)
+ mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn);
+
+ mlx5e_destroy_rqt(priv, priv->indir_rqtn);
return err;
}
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
+{
+ int nch = mlx5e_get_max_num_channels(priv->mdev);
+ int i;
+
+ for (i = 0; i < nch; i++)
+ mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
+
+ mlx5e_destroy_rqt(priv, priv->indir_rqtn);
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
void *rqtc;
int inlen;
- int sz;
+ u32 *in;
int err;
- sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
-
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (!in)
@@ -1280,27 +1525,31 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-
- mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+ if (sz > 1) /* RSS */
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+ else
+ mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
- err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
+ err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
-{
- mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
-}
-
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
{
- mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
- mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ u32 rqtn;
+ int ix;
+
+ rqtn = priv->indir_rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ for (ix = 0; ix < priv->params.num_channels; ix++) {
+ rqtn = priv->direct_tir[ix].rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+ }
}
static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
@@ -1345,6 +1594,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
int inlen;
int err;
int tt;
+ int ix;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -1356,23 +1606,32 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(tirc, priv);
- for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in,
+ inlen);
if (err)
- break;
+ goto free_in;
}
+ for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) {
+ err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
+ in, inlen);
+ if (err)
+ goto free_in;
+ }
+
+free_in:
kvfree(in);
return err;
}
-static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
- u32 tirn)
+static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
{
void *in;
int inlen;
int err;
+ int i;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -1381,25 +1640,23 @@ static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
- err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
-
- kvfree(in);
-
- return err;
-}
-
-static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
-{
- int err;
- int i;
+ for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
+ err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
+ inlen);
+ if (err)
+ return err;
+ }
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
- priv->tirn[i]);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5_core_modify_tir(priv->mdev,
+ priv->direct_tir[i].tirn, in,
+ inlen);
if (err)
return err;
}
+ kvfree(in);
+
return 0;
}
@@ -1464,8 +1721,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_num_tc(netdev, ntc);
+ /* Map netdev TCs to offset 0
+ * We have our own UP to TXQ mapping for QoS
+ */
for (tc = 0; tc < ntc; tc++)
- netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+ netdev_set_tc_queue(netdev, tc, nch, 0);
}
int mlx5e_open_locked(struct net_device *netdev)
@@ -1503,6 +1763,9 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_redirect_rqts(priv);
mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
+#ifdef CONFIG_RFS_ACCEL
+ priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
+#endif
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -1710,7 +1973,8 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
mlx5e_destroy_tis(priv, tc);
}
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+ enum mlx5e_traffic_types tt)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
@@ -1731,19 +1995,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
-
- switch (tt) {
- case MLX5E_TT_ANY:
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
- break;
- default:
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn[MLX5E_INDIRECTION_RQT]);
- mlx5e_build_tir_ctx_hash(tirc, priv);
- break;
- }
+ MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn);
+ mlx5e_build_tir_ctx_hash(tirc, priv);
switch (tt) {
case MLX5E_TT_IPV4_TCP:
@@ -1823,64 +2076,107 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
+ default:
+ WARN_ONCE(true,
+ "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
}
}
-static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+ u32 rqtn)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+ mlx5e_build_tir_ctx_lro(tirc, priv);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+ int nch = mlx5e_get_max_num_channels(priv->mdev);
void *tirc;
int inlen;
+ u32 *tirn;
int err;
+ u32 *in;
+ int ix;
+ int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ /* indirect tirs */
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(in, 0, inlen);
+ tirn = &priv->indir_tirn[tt];
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+ err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+ if (err)
+ goto err_destroy_tirs;
+ }
- mlx5e_build_tir_ctx(priv, tirc, tt);
+ /* direct tirs */
+ for (ix = 0; ix < nch; ix++) {
+ memset(in, 0, inlen);
+ tirn = &priv->direct_tir[ix].tirn;
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ mlx5e_build_direct_tir_ctx(priv, tirc,
+ priv->direct_tir[ix].rqtn);
+ err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+ if (err)
+ goto err_destroy_ch_tirs;
+ }
- err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+ kvfree(in);
+
+ return 0;
+
+err_destroy_ch_tirs:
+ for (ix--; ix >= 0; ix--)
+ mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn);
+
+err_destroy_tirs:
+ for (tt--; tt >= 0; tt--)
+ mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
kvfree(in);
return err;
}
-static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
{
- mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+ int nch = mlx5e_get_max_num_channels(priv->mdev);
+ int i;
+
+ for (i = 0; i < nch; i++)
+ mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
+
+ for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+ mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]);
}
-static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
{
- int err;
+ int err = 0;
int i;
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_create_tir(priv, i);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
if (err)
- goto err_destroy_tirs;
+ return err;
}
return 0;
-
-err_destroy_tirs:
- for (i--; i >= 0; i--)
- mlx5e_destroy_tir(priv, i);
-
- return err;
-}
-
-static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++)
- mlx5e_destroy_tir(priv, i);
}
static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
@@ -1923,6 +2219,8 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
return mlx5e_configure_flower(priv, proto, tc->cls_flower);
case TC_CLSFLOWER_DESTROY:
return mlx5e_delete_flower(priv, tc->cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return mlx5e_stats_flower(priv, tc->cls_flower);
}
default:
return -EOPNOTSUPP;
@@ -1939,19 +2237,37 @@ static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sw_stats *sstats = &priv->stats.sw;
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
-
- stats->rx_packets = vstats->rx_packets;
- stats->rx_bytes = vstats->rx_bytes;
- stats->tx_packets = vstats->tx_packets;
- stats->tx_bytes = vstats->tx_bytes;
- stats->multicast = vstats->rx_multicast_packets +
- vstats->tx_multicast_packets;
- stats->tx_errors = vstats->tx_error_packets;
- stats->rx_errors = vstats->rx_error_packets;
- stats->tx_dropped = vstats->tx_queue_dropped;
- stats->rx_crc_errors = 0;
- stats->rx_length_errors = 0;
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+
+ stats->rx_packets = sstats->rx_packets;
+ stats->rx_bytes = sstats->rx_bytes;
+ stats->tx_packets = sstats->tx_packets;
+ stats->tx_bytes = sstats->tx_bytes;
+
+ stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+ stats->tx_dropped = sstats->tx_queue_dropped;
+
+ stats->rx_length_errors =
+ PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+ PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
+ PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+ stats->rx_crc_errors =
+ PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
+ stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
+ stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
+ stats->tx_carrier_errors =
+ PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
+
+ /* vport multicast also counts packets that are dropped due to steering
+ * or rx out of buffer
+ */
+ stats->multicast =
+ VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
return stats;
}
@@ -1980,50 +2296,154 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return 0;
}
-static int mlx5e_set_features(struct net_device *netdev,
- netdev_features_t features)
+#define MLX5E_SET_FEATURE(netdev, feature, enable) \
+ do { \
+ if (enable) \
+ netdev->features |= feature; \
+ else \
+ netdev->features &= ~feature; \
+ } while (0)
+
+typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
+
+static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int err = 0;
- netdev_features_t changes = features ^ netdev->features;
+ bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ int err;
mutex_lock(&priv->state_lock);
- if (changes & NETIF_F_LRO) {
- bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
-
- priv->params.lro_en = !!(features & NETIF_F_LRO);
- err = mlx5e_modify_tirs_lro(priv);
- if (err)
- mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
- err);
+ if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+ mlx5e_close_locked(priv->netdev);
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ priv->params.lro_en = enable;
+ err = mlx5e_modify_tirs_lro(priv);
+ if (err) {
+ netdev_err(netdev, "lro modify failed, %d\n", err);
+ priv->params.lro_en = !enable;
}
+ if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+ mlx5e_open_locked(priv->netdev);
+
mutex_unlock(&priv->state_lock);
- if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
- if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
- mlx5e_enable_vlan_filter(priv);
- else
- mlx5e_disable_vlan_filter(priv);
- }
+ return err;
+}
- if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) &&
- mlx5e_tc_num_filters(priv)) {
+static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (enable)
+ mlx5e_enable_vlan_filter(priv);
+ else
+ mlx5e_disable_vlan_filter(priv);
+
+ return 0;
+}
+
+static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (!enable && mlx5e_tc_num_filters(priv)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
+ return 0;
+}
+
+static int set_feature_rx_all(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_set_port_fcs(mdev, !enable);
+}
+
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ priv->params.vlan_strip_disable = !enable;
+ err = mlx5e_modify_rqs_vsd(priv, !enable);
+ if (err)
+ priv->params.vlan_strip_disable = enable;
+
+ mutex_unlock(&priv->state_lock);
+
return err;
}
+#ifdef CONFIG_RFS_ACCEL
+static int set_feature_arfs(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ if (enable)
+ err = mlx5e_arfs_enable(priv);
+ else
+ err = mlx5e_arfs_disable(priv);
+
+ return err;
+}
+#endif
+
+static int mlx5e_handle_feature(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ mlx5e_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ netdev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature))
+ return 0;
+
+ err = feature_handler(netdev, enable);
+ if (err) {
+ netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
+ enable ? "Enable" : "Disable", feature, err);
+ return err;
+ }
+
+ MLX5E_SET_FEATURE(netdev, feature, enable);
+ return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ int err;
+
+ err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
+ set_feature_lro);
+ err |= mlx5e_handle_feature(netdev, features,
+ NETIF_F_HW_VLAN_CTAG_FILTER,
+ set_feature_vlan_filter);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
+ set_feature_tc_num_filters);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
+ set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
+ set_feature_rx_vlan);
+#ifdef CONFIG_RFS_ACCEL
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
+ set_feature_arfs);
+#endif
+
+ return err ? -EINVAL : 0;
+}
+
#define MXL5_HW_MIN_MTU 64
#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
@@ -2093,6 +2513,21 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
vlan, qos);
}
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
+static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
+}
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
@@ -2149,7 +2584,6 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats);
}
-#if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN)
static void mlx5e_add_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
@@ -2221,7 +2655,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
-#endif
+
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool sched_work = false;
+ int i;
+
+ netdev_err(dev, "TX timeout detected\n");
+
+ for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+ if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ continue;
+ sched_work = true;
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ }
+
+ if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ schedule_work(&priv->tx_timeout_work);
+}
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
@@ -2237,6 +2693,10 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
+#endif
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2253,16 +2713,20 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
.ndo_add_vxlan_port = mlx5e_add_vxlan_port,
.ndo_del_vxlan_port = mlx5e_del_vxlan_port,
.ndo_features_check = mlx5e_features_check,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
+ .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
+ .ndo_set_vf_trust = mlx5e_set_vf_trust,
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2317,25 +2781,121 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
}
#endif
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+ u32 *indirection_rqt, int len,
int num_channels)
{
+ int node = mdev->priv.numa_node;
+ int node_num_of_cores;
int i;
+ if (node == -1)
+ node = first_online_node;
+
+ node_num_of_cores = cpumask_weight(cpumask_of_node(node));
+
+ if (node_num_of_cores)
+ num_channels = min_t(int, num_channels, node_num_of_cores);
+
for (i = 0; i < len; i++)
indirection_rqt[i] = i % num_channels;
}
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, striding_rq) &&
+ MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
+{
+ enum pcie_link_width width;
+ enum pci_bus_speed speed;
+ int err = 0;
+
+ err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
+ if (err)
+ return err;
+
+ if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+ return -EINVAL;
+
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ *pci_bw = 2500 * width;
+ break;
+ case PCIE_SPEED_5_0GT:
+ *pci_bw = 5000 * width;
+ break;
+ case PCIE_SPEED_8_0GT:
+ *pci_bw = 8000 * width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+{
+ return (link_speed && pci_bw &&
+ (pci_bw < 40000) && (pci_bw < link_speed));
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ u32 link_speed = 0;
+ u32 pci_bw = 0;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- priv->params.log_rq_size =
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
+
+ /* set CQE compression */
+ priv->params.rx_cqe_compress_admin = false;
+ if (MLX5_CAP_GEN(mdev, cqe_compression) &&
+ MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_get_pci_bw(mdev, &pci_bw);
+ mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+ priv->params.rx_cqe_compress_admin =
+ cqe_compress_heuristic(link_speed, pci_bw);
+ }
+
+ priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
+
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.mpwqe_log_stride_sz =
+ priv->params.rx_cqe_compress ?
+ MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+ MLX5_MPWRQ_LOG_STRIDE_SIZE;
+ priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ priv->params.mpwqe_log_stride_sz;
+ priv->params.lro_en = true;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ }
+
+ mlx5_core_info(mdev,
+ "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(priv->params.log_rq_size),
+ BIT(priv->params.mpwqe_log_stride_sz),
+ priv->params.rx_cqe_compress_admin);
+
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
priv->params.rx_cq_moderation_usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
priv->params.rx_cq_moderation_pkts =
@@ -2345,15 +2905,13 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- priv->params.min_rx_wqes =
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key));
- mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, num_channels);
priv->params.lro_wqe_sz =
@@ -2371,6 +2929,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}
@@ -2390,6 +2949,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool fcs_supported;
+ bool fcs_enabled;
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
@@ -2424,25 +2985,41 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (mlx5e_vxlan_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
- netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
- netdev->hw_enc_features |= NETIF_F_RXHASH;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
+
+ if (fcs_supported)
+ netdev->hw_features |= NETIF_F_RXALL;
+
netdev->features = netdev->hw_features;
if (!priv->params.lro_en)
netdev->features &= ~NETIF_F_LRO;
+ if (fcs_enabled)
+ netdev->features &= ~NETIF_F_RXALL;
+
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
- FT_CAP(flow_table_modify))
- priv->netdev->hw_features |= NETIF_F_HW_TC;
+ FT_CAP(flow_table_modify)) {
+ netdev->hw_features |= NETIF_F_HW_TC;
+#ifdef CONFIG_RFS_ACCEL
+ netdev->hw_features |= NETIF_F_NTUPLE;
+#endif
+ }
netdev->features |= NETIF_F_HIGHDMA;
@@ -2476,6 +3053,61 @@ static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
return err;
}
+static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
+ if (err) {
+ mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
+ priv->q_counter = 0;
+ }
+}
+
+static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+{
+ if (!priv->q_counter)
+ return;
+
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_mkey_seg *mkc;
+ int inlen = sizeof(*in);
+ u64 npages =
+ mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = &in->seg;
+ mkc->status = MLX5_MKEY_STATUS_FREE;
+ mkc->flags = MLX5_PERM_UMR_EN |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_PERM_LOCAL_WRITE |
+ MLX5_ACCESS_MODE_MTT;
+
+ mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ mkc->flags_pd = cpu_to_be32(priv->pdn);
+ mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
+ mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
+ mkc->log2_page_size = PAGE_SHIFT;
+
+ err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
+ NULL, NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
@@ -2529,10 +3161,16 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_dealloc_transport_domain;
}
+ err = mlx5e_create_umr_mkey(priv);
+ if (err) {
+ mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
+ goto err_destroy_mkey;
+ }
+
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(mdev, "create tises failed, %d\n", err);
- goto err_destroy_mkey;
+ goto err_destroy_umr_mkey;
}
err = mlx5e_open_drop_rq(priv);
@@ -2541,37 +3179,33 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_destroy_tises;
}
- err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+ err = mlx5e_create_rqts(priv);
if (err) {
- mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+ mlx5_core_warn(mdev, "create rqts failed, %d\n", err);
goto err_close_drop_rq;
}
- err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
- if (err) {
- mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
- goto err_destroy_rqt_indir;
- }
-
err = mlx5e_create_tirs(priv);
if (err) {
mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
- goto err_destroy_rqt_single;
+ goto err_destroy_rqts;
}
- err = mlx5e_create_flow_tables(priv);
+ err = mlx5e_create_flow_steering(priv);
if (err) {
- mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+ mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_tirs;
}
- mlx5e_init_eth_addr(priv);
+ mlx5e_create_q_counter(priv);
+
+ mlx5e_init_l2_addr(priv);
mlx5e_vxlan_init(priv);
err = mlx5e_tc_init(priv);
if (err)
- goto err_destroy_flow_tables;
+ goto err_dealloc_q_counters;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
@@ -2583,8 +3217,11 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_tc_cleanup;
}
- if (mlx5e_vxlan_allowed(mdev))
+ if (mlx5e_vxlan_allowed(mdev)) {
+ rtnl_lock();
vxlan_get_rx_port(netdev);
+ rtnl_unlock();
+ }
mlx5e_enable_async_events(priv);
queue_work(priv->wq, &priv->set_rx_mode_work);
@@ -2594,17 +3231,15 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err_tc_cleanup:
mlx5e_tc_cleanup(priv);
-err_destroy_flow_tables:
- mlx5e_destroy_flow_tables(priv);
+err_dealloc_q_counters:
+ mlx5e_destroy_q_counter(priv);
+ mlx5e_destroy_flow_steering(priv);
err_destroy_tirs:
mlx5e_destroy_tirs(priv);
-err_destroy_rqt_single:
- mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-
-err_destroy_rqt_indir:
- mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+err_destroy_rqts:
+ mlx5e_destroy_rqts(priv);
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
@@ -2612,6 +3247,9 @@ err_close_drop_rq:
err_destroy_tises:
mlx5e_destroy_tises(priv);
+err_destroy_umr_mkey:
+ mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
+
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mkey);
@@ -2645,22 +3283,20 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_close_locked(netdev);
- mutex_unlock(&priv->state_lock);
+ mlx5e_close(netdev);
} else {
unregister_netdev(netdev);
}
mlx5e_tc_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
- mlx5e_destroy_flow_tables(priv);
+ mlx5e_destroy_q_counter(priv);
+ mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_tirs(priv);
- mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
- mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_destroy_rqts(priv);
mlx5e_close_drop_rq(priv);
mlx5e_destroy_tises(priv);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 58d4e2f96..9f2a16a50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,13 +42,149 @@ static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
}
-static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe, u16 ix)
+static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
+ void *data)
+{
+ u32 ci = cqcc & cq->wq.sz_m1;
+
+ memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
+}
+
+static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
+ cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt);
+ cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
+ rq->stats.cqe_compress_blks++;
+}
+
+static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
+ cq->mini_arr_idx = 0;
+}
+
+static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+{
+ u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
+ u32 wq_sz = 1 << cq->wq.log_sz;
+ u32 ci = cqcc & cq->wq.sz_m1;
+ u32 ci_top = min_t(u32, wq_sz, ci + n);
+
+ for (; ci < ci_top; ci++, n--) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+ cqe->op_own = op_own;
+ }
+
+ if (unlikely(ci == wq_sz)) {
+ op_own = !op_own;
+ for (ci = 0; ci < n; ci++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+ cqe->op_own = op_own;
+ }
+ }
+}
+
+static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ u16 wqe_cnt_step;
+
+ cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
+ cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
+ cq->title.op_own &= 0xf0;
+ cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz);
+ cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
+
+ wqe_cnt_step =
+ rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
+ cq->decmprs_wqe_counter =
+ (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
+}
+
+static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_decompress_cqe(rq, cq, cqcc);
+ cq->title.rss_hash_type = 0;
+ cq->title.rss_hash_result = 0;
+}
+
+static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq,
+ int update_owner_only,
+ int budget_rem)
+{
+ u32 cqcc = cq->wq.cc + update_owner_only;
+ u32 cqe_count;
+ u32 i;
+
+ cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
+
+ for (i = update_owner_only; i < cqe_count;
+ i++, cq->mini_arr_idx++, cqcc++) {
+ if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
+ mlx5e_read_mini_arr_slot(cq, cqcc);
+
+ mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
+ rq->handle_rx_cqe(rq, &cq->title);
+ }
+ mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
+ cq->wq.cc = cqcc;
+ cq->decmprs_left -= cqe_count;
+ rq->stats.cqe_compress_pkts += cqe_count;
+
+ return cqe_count;
+}
+
+static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq,
+ int budget_rem)
+{
+ mlx5e_read_title_slot(rq, cq, cq->wq.cc);
+ mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
+ mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
+ rq->handle_rx_cqe(rq, &cq->title);
+ cq->mini_arr_idx++;
+
+ return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
+}
+
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+{
+ bool was_opened;
+
+ if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+ return;
+
+ mutex_lock(&priv->state_lock);
+
+ if (priv->params.rx_cqe_compress == val)
+ goto unlock;
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params.rx_cqe_compress = val;
+
+ if (was_opened)
+ mlx5e_open_locked(priv->netdev);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+}
+
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
struct sk_buff *skb;
dma_addr_t dma_addr;
- skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+ skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
if (unlikely(!skb))
return -ENOMEM;
@@ -62,10 +198,9 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
goto err_free_skb;
- skb_reserve(skb, MLX5E_NET_IP_ALIGN);
-
*((dma_addr_t *)skb->cb) = dma_addr;
- wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+ wqe->data.addr = cpu_to_be64(dma_addr);
+ wqe->data.lkey = rq->mkey_be;
rq->skb[ix] = skb;
@@ -77,18 +212,427 @@ err_free_skb:
return -ENOMEM;
}
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct sk_buff *skb = rq->skb[ix];
+
+ if (skb) {
+ rq->skb[ix] = NULL;
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+}
+
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+{
+ return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+}
+
+static inline void
+mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len)
+{
+ dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
+ len, DMA_FROM_DEVICE);
+}
+
+static inline void
+mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len)
+{
+ /* No dma pre sync for fragmented MPWQE */
+}
+
+static inline void
+mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
+{
+ unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+
+ wi->skbs_frags[page_idx]++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ &wi->dma_info.page[page_idx], frag_offset,
+ len, truesize);
+}
+
+static inline void
+mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
+{
+ unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+
+ dma_sync_single_for_cpu(rq->pdev,
+ wi->umr.dma_info[page_idx].addr + frag_offset,
+ len, DMA_FROM_DEVICE);
+ wi->skbs_frags[page_idx]++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ wi->umr.dma_info[page_idx].page, frag_offset,
+ len, truesize);
+}
+
+static inline void
+mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
+{
+ struct page *page = &wi->dma_info.page[page_idx];
+
+ skb_copy_to_linear_data(skb, page_address(page) + offset,
+ ALIGN(headlen, sizeof(long)));
+}
+
+static inline void
+mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
+{
+ u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
+ unsigned int len;
+
+ /* Aligning len to sizeof(long) optimizes memcpy performance */
+ len = ALIGN(headlen_pg, sizeof(long));
+ dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data_offset(skb, 0,
+ page_address(dma_info->page) + offset,
+ len);
+ if (unlikely(offset + headlen > PAGE_SIZE)) {
+ dma_info++;
+ headlen_pg = len;
+ len = ALIGN(headlen - headlen_pg, sizeof(long));
+ dma_sync_single_for_cpu(pdev, dma_info->addr, len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data_offset(skb, headlen_pg,
+ page_address(dma_info->page),
+ len);
+ }
+}
+
+static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+{
+ return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+ wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
+static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_sq *sq,
+ struct mlx5e_umr_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_data_seg *dseg = &wqe->data;
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+ u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+
+ memset(wqe, 0, sizeof(*wqe));
+ cseg->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->imm = rq->umr_mkey_be;
+
+ ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->klm_octowords =
+ cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+ ucseg->bsf_octowords =
+ cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+ ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+ dseg->lkey = sq->mkey_be;
+ dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_sq *sq = &rq->channel->icosq;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_umr_wqe *wqe;
+ u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
+ u16 pi;
+
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
+ sq->ico_wqe_info[pi].num_wqebbs = 1;
+ mlx5e_send_nop(sq, true);
+ }
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ mlx5e_build_umr_wqe(rq, sq, wqe, ix);
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
+ sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+ sq->pc += num_wqebbs;
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the mtt array, we allocate
+ * a little more.
+ */
+ return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+ MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi,
+ int i)
+{
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ wi->umr.dma_info[i].page = page;
+ wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
+ put_page(page);
+ return -ENOMEM;
+ }
+ wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
+
+ return 0;
+}
+
+static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+ int i;
+
+ wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
+ MLX5_MPWRQ_PAGES_PER_WQE,
+ GFP_ATOMIC);
+ if (unlikely(!wi->umr.dma_info))
+ goto err_out;
+
+ /* We allocate more than mtt_sz as we will align the pointer */
+ wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
+ GFP_ATOMIC);
+ if (unlikely(!wi->umr.mtt_no_align))
+ goto err_free_umr;
+
+ wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
+ wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
+ PCI_DMA_TODEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
+ goto err_free_mtt;
+
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+ goto err_unmap;
+ page_ref_add(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq));
+ wi->skbs_frags[i] = 0;
+ }
+
+ wi->consumed_strides = 0;
+ wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
+ wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
+ wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
+ wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe;
+ wqe->data.lkey = rq->umr_mkey_be;
+ wqe->data.addr = cpu_to_be64(dma_offset);
+
+ return 0;
+
+err_unmap:
+ while (--i >= 0) {
+ dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ page_ref_sub(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq));
+ put_page(wi->umr.dma_info[i].page);
+ }
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+
+err_free_mtt:
+ kfree(wi->umr.mtt_no_align);
+
+err_free_umr:
+ kfree(wi->umr.dma_info);
+
+err_out:
+ return -ENOMEM;
+}
+
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi)
+{
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int i;
+
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ page_ref_sub(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
+ put_page(wi->umr.dma_info[i].page);
+ }
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+ kfree(wi->umr.mtt_no_align);
+ kfree(wi->umr.dma_info);
+}
+
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ rq->stats.mpwqe_frag++;
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+}
+
+static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ gfp_t gfp_mask;
+ int i;
+
+ gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
+ wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+ MLX5_MPWRQ_WQE_PAGE_ORDER);
+ if (unlikely(!wi->dma_info.page))
+ return -ENOMEM;
+
+ wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
+ rq->wqe_sz, PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
+ put_page(wi->dma_info.page);
+ return -ENOMEM;
+ }
+
+ /* We split the high-order page into order-0 ones and manage their
+ * reference counter to minimize the memory held by small skb fragments
+ */
+ split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ page_ref_add(&wi->dma_info.page[i],
+ mlx5e_mpwqe_strides_per_page(rq));
+ wi->skbs_frags[i] = 0;
+ }
+
+ wi->consumed_strides = 0;
+ wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
+ wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
+ wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
+ wi->free_wqe = mlx5e_free_rx_linear_mpwqe;
+ wqe->data.lkey = rq->mkey_be;
+ wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
+
+ return 0;
+}
+
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi)
+{
+ int i;
+
+ dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
+ PCI_DMA_FROMDEVICE);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ page_ref_sub(&wi->dma_info.page[i],
+ mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
+ put_page(&wi->dma_info.page[i]);
+ }
+}
+
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ int err;
+
+ err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
+ if (unlikely(err)) {
+ err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
+ if (unlikely(err))
+ return err;
+ set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+ mlx5e_post_umr_wqe(rq, ix);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+ wi->free_wqe(rq, wi);
+}
+
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
+#define RQ_CANNOT_POST(rq) \
+ (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
+ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
- if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+ if (unlikely(RQ_CANNOT_POST(rq)))
return false;
while (!mlx5_wq_ll_is_full(wq)) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+ int err;
- if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+ err = rq->alloc_wqe(rq, wqe, wq->head);
+ if (unlikely(err)) {
+ if (err != -EBUSY)
+ rq->stats.buff_alloc_err++;
break;
+ }
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
}
@@ -101,7 +645,8 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !mlx5_wq_ll_is_full(wq);
}
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
@@ -112,7 +657,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
- u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+ u16 tot_len = cqe_bcnt - ETH_HLEN;
if (eth->h_proto == htons(ETH_P_IP)) {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
@@ -176,35 +721,43 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (likely(is_first_ethertype_ip(skb))) {
+ return;
+ }
+
+ if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- rq->stats.csum_sw++;
- } else {
- goto csum_none;
+ rq->stats.csum_complete++;
+ return;
}
- return;
-
+ if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (cqe_is_tunneled(cqe)) {
+ skb->csum_level = 1;
+ skb->encapsulation = 1;
+ rq->stats.csum_unnecessary_inner++;
+ }
+ return;
+ }
csum_none:
skb->ip_summed = CHECKSUM_NONE;
rq->stats.csum_none++;
}
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
- u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
struct mlx5e_tstamp *tstamp = rq->tstamp;
int lro_num_seg;
- skb_put(skb, cqe_bcnt);
-
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
- mlx5e_lro_update_hdr(skb, cqe);
+ mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
@@ -213,10 +766,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
- mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
-
- skb->protocol = eth_type_trans(skb, netdev);
-
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -227,52 +776,168 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
be16_to_cpu(cqe->vlan_info));
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
+
+ mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+ skb->protocol = eth_type_trans(skb, netdev);
+}
+
+static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ rq->stats.packets++;
+ rq->stats.bytes += cqe_bcnt;
+ mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+}
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_rx_wqe *wqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ skb = rq->skb[wqe_counter];
+ prefetch(skb->data);
+ rq->skb[wqe_counter] = NULL;
+
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ dev_kfree_skb(skb);
+ goto wq_ll_pop;
+ }
+
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ skb_put(skb, cqe_bcnt);
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_mpw_info *wi,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz;
+ u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_page_idx = page_idx;
+ u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+ u32 frag_offset = head_offset + headlen;
+ u16 byte_cnt = cqe_bcnt - headlen;
+
+ if (unlikely(frag_offset >= PAGE_SIZE)) {
+ page_idx++;
+ frag_offset -= PAGE_SIZE;
+ }
+ wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
+
+ while (byte_cnt) {
+ u32 pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+
+ wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
+ pg_consumed_bytes);
+ byte_cnt -= pg_consumed_bytes;
+ frag_offset = 0;
+ page_idx++;
+ }
+ /* copy header */
+ wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
+ headlen);
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += headlen;
+ skb->len += headlen;
+}
+
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+ struct sk_buff *skb;
+ u16 cqe_bcnt;
+
+ wi->consumed_strides += cstrides;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ rq->stats.mpwqe_filler++;
+ goto mpwrq_cqe_out;
+ }
+
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
+ sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ prefetch(skb->data);
+ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+ mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
+ return;
+
+ wi->free_wqe(rq, wi);
+ mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- int work_done;
+ int work_done = 0;
- for (work_done = 0; work_done < budget; work_done++) {
- struct mlx5e_rx_wqe *wqe;
- struct mlx5_cqe64 *cqe;
- struct sk_buff *skb;
- __be16 wqe_counter_be;
- u16 wqe_counter;
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+ return 0;
- cqe = mlx5e_get_cqe(cq);
- if (!cqe)
- break;
+ if (cq->decmprs_left)
+ work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
- mlx5_cqwq_pop(&cq->wq);
+ for (; work_done < budget; work_done++) {
+ struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
- wqe_counter_be = cqe->wqe_counter;
- wqe_counter = be16_to_cpu(wqe_counter_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- skb = rq->skb[wqe_counter];
- prefetch(skb->data);
- rq->skb[wqe_counter] = NULL;
-
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
+ if (!cqe)
+ break;
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
- dev_kfree_skb(skb);
- goto wq_ll_pop;
+ if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+ work_done +=
+ mlx5e_decompress_cqes_start(rq, cq,
+ budget - work_done);
+ continue;
}
- mlx5e_build_rx_skb(cqe, rq, skb);
- rq->stats.packets++;
- rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
- napi_gro_receive(cq->napi, skb);
+ mlx5_cqwq_pop(&cq->wq);
-wq_ll_pop:
- mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
- &wqe->next.next_wqe_index);
+ rq->handle_rx_cqe(rq, cqe);
}
mlx5_cqwq_update_db_record(&cq->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
new file mode 100644
index 000000000..fcd490cc5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_STATS_H__
+#define __MLX5_EN_STATS_H__
+
+#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
+ (*(u64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
+ be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
+ (*(u32 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
+ be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+
+#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+
+struct counter_desc {
+ char format[ETH_GSTRING_LEN];
+ int offset; /* Byte offset */
+};
+
+struct mlx5e_sw_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_tso_packets;
+ u64 tx_tso_bytes;
+ u64 tx_tso_inner_packets;
+ u64 tx_tso_inner_bytes;
+ u64 rx_lro_packets;
+ u64 rx_lro_bytes;
+ u64 rx_csum_unnecessary;
+ u64 rx_csum_none;
+ u64 rx_csum_complete;
+ u64 rx_csum_unnecessary_inner;
+ u64 tx_csum_partial;
+ u64 tx_csum_partial_inner;
+ u64 tx_queue_stopped;
+ u64 tx_queue_wake;
+ u64 tx_queue_dropped;
+ u64 rx_wqe_err;
+ u64 rx_mpwqe_filler;
+ u64 rx_mpwqe_frag;
+ u64 rx_buff_alloc_err;
+ u64 rx_cqe_compress_blks;
+ u64 rx_cqe_compress_pkts;
+
+ /* Special handling counters */
+ u64 link_down_events_phy;
+};
+
+static const struct counter_desc sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
+};
+
+struct mlx5e_qcounter_stats {
+ u32 rx_out_of_buffer;
+};
+
+static const struct counter_desc q_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
+ vstats->query_vport_out, c)
+
+struct mlx5e_vport_stats {
+ __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
+};
+
+static const struct counter_desc vport_stats_desc[] = {
+ { "rx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+ { "rx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+ { "tx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+ { "tx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+ { "rx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+ { "rx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+ { "tx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+ { "tx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+ { "rx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+ { "rx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+ { "tx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+ { "tx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+};
+
+#define PPORT_802_3_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_802_3_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_GET(pstats, prio, c) \
+ MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define NUM_PPORT_PRIO 8
+
+struct mlx5e_pport_stats {
+ __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+};
+
+static const struct counter_desc pport_802_3_stats_desc[] = {
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+static const struct counter_desc pport_2863_stats_desc[] = {
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
+};
+
+static const struct counter_desc pport_2819_stats_desc[] = {
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+ { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+struct mlx5e_rq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 csum_complete;
+ u64 csum_unnecessary_inner;
+ u64 csum_none;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 wqe_err;
+ u64 mpwqe_filler;
+ u64 mpwqe_frag;
+ u64 buff_alloc_err;
+ u64 cqe_compress_blks;
+ u64 cqe_compress_pkts;
+};
+
+static const struct counter_desc rq_stats_desc[] = {
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+};
+
+struct mlx5e_sq_stats {
+ /* commonly accessed in data path */
+ u64 packets;
+ u64 bytes;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 tso_inner_packets;
+ u64 tso_inner_bytes;
+ u64 csum_partial_inner;
+ u64 nop;
+ /* less likely accessed in data path */
+ u64 csum_none;
+ u64 stopped;
+ u64 wake;
+ u64 dropped;
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+};
+
+#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
+#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
+#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
+#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
+#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
+ ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
+ ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \
+ NUM_PPORT_2863_COUNTERS + \
+ NUM_PPORT_2819_COUNTERS + \
+ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
+ NUM_PPORT_PRIO)
+#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+
+struct mlx5e_stats {
+ struct mlx5e_sw_stats sw;
+ struct mlx5e_qcounter_stats qcnt;
+ struct mlx5e_vport_stats vport;
+ struct mlx5e_pport_stats pport;
+};
+
+#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b3de09f13..704c3d304 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -46,43 +46,65 @@ struct mlx5e_tc_flow {
struct mlx5_flow_rule *rule;
};
-#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024
-#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4
+#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
+#define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
u32 *match_c, u32 *match_v,
u32 action, u32 flow_tag)
{
- struct mlx5_flow_destination dest = {
- .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
- {.ft = priv->fts.vlan.t},
- };
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule;
bool table_created = false;
- if (IS_ERR_OR_NULL(priv->fts.tc.t)) {
- priv->fts.tc.t =
- mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0,
- MLX5E_TC_FLOW_TABLE_NUM_ENTRIES,
- MLX5E_TC_FLOW_TABLE_NUM_GROUPS);
- if (IS_ERR(priv->fts.tc.t)) {
+ if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.vlan.ft.t;
+ } else {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter))
+ return ERR_CAST(counter);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = counter;
+ }
+
+ if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+ priv->fs.tc.t =
+ mlx5_create_auto_grouped_flow_table(priv->fs.ns,
+ MLX5E_TC_PRIO,
+ MLX5E_TC_TABLE_NUM_ENTRIES,
+ MLX5E_TC_TABLE_NUM_GROUPS,
+ 0);
+ if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- return ERR_CAST(priv->fts.tc.t);
+ rule = ERR_CAST(priv->fs.tc.t);
+ goto err_create_ft;
}
table_created = true;
}
- rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS,
+ rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
match_c, match_v,
action, flow_tag,
- action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
+ &dest);
+
+ if (IS_ERR(rule))
+ goto err_add_rule;
+
+ return rule;
- if (IS_ERR(rule) && table_created) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+err_add_rule:
+ if (table_created) {
+ mlx5_destroy_flow_table(priv->fs.tc.t);
+ priv->fs.tc.t = NULL;
}
+err_create_ft:
+ mlx5_fc_destroy(dev, counter);
return rule;
}
@@ -90,11 +112,17 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule)
{
+ struct mlx5_fc *counter = NULL;
+
+ counter = mlx5_flow_rule_counter(rule);
+
mlx5_del_flow_rule(rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+
if (!mlx5e_tc_num_filters(priv)) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+ mlx5_destroy_flow_table(priv->fs.tc.t);
+ priv->fs.tc.t = NULL;
}
}
@@ -284,6 +312,9 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_gact_shot(a)) {
*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ if (MLX5_CAP_FLOWTABLE(priv->mdev,
+ flow_table_properties_nic_receive.flow_counter))
+ *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -310,7 +341,7 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
u32 *match_c;
u32 *match_v;
int err = 0;
@@ -376,7 +407,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_flow *flow;
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
@@ -392,6 +423,34 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
return 0;
}
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_flow *flow;
+ struct tc_action *a;
+ struct mlx5_fc *counter;
+ u64 bytes;
+ u64 packets;
+ u64 lastuse;
+
+ flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+ tc->ht_params);
+ if (!flow)
+ return -EINVAL;
+
+ counter = mlx5_flow_rule_counter(flow->rule);
+ if (!counter)
+ return 0;
+
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+
+ tc_for_each_action(a, f->exts)
+ tcf_action_stats_update(a, bytes, packets, lastuse);
+
+ return 0;
+}
+
static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
.head_offset = offsetof(struct mlx5e_tc_flow, node),
.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
@@ -401,7 +460,7 @@ static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
int mlx5e_tc_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
@@ -418,12 +477,12 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
- if (!IS_ERR_OR_NULL(priv->fts.tc.t)) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+ if (!IS_ERR_OR_NULL(tc->t)) {
+ mlx5_destroy_flow_table(tc->t);
+ tc->t = NULL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index d677428dc..34bf903fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -43,9 +43,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f);
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f);
+
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
{
- return atomic_read(&priv->fts.tc.ht.nelems);
+ return atomic_read(&priv->fs.tc.ht.nelems);
}
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 1ffc7cb6f..5740b465e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -54,10 +54,11 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
sq->skb[pi] = NULL;
sq->pc++;
+ sq->stats.nop++;
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe, 0);
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
}
@@ -109,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
- int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
- skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+ int up = 0;
+
+ if (!netdev_get_num_tc(dev))
+ return channel_ix;
+
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+ /* channel_ix can be larger than num_channels since
+ * dev->num_real_tx_queues = num_channels * num_tc
+ */
+ if (channel_ix >= priv->params.num_channels)
+ channel_ix = reciprocal_scale(channel_ix,
+ priv->params.num_channels);
return priv->channeltc_to_txq_map[channel_ix][up];
}
@@ -122,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE ETH_HLEN
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
if (bf) {
u16 ihs = skb_headlen(skb);
@@ -134,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return skb_headlen(skb);
}
- return MLX5E_MIN_INLINE;
+ return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -191,12 +204,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
- sq->stats.csum_offload_inner++;
+ sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
- sq->stats.csum_offload_none++;
+ sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
@@ -309,14 +322,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe, bf_sz);
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
- sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+ if (bf)
+ sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
@@ -339,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
@@ -350,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
+ if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+ return false;
+
npkts = 0;
nbytes = 0;
@@ -387,7 +433,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wi = &sq->wqe_info[ci];
if (unlikely(!skb)) { /* nop */
- sq->stats.nop++;
sqcc++;
continue;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 9bb4395ac..c38781fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,6 +49,60 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
return cqe;
}
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5_wq_cyc *wq;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_sq *sq;
+ u16 sqcc;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (likely(!cqe))
+ return;
+
+ sq = container_of(cq, struct mlx5e_sq, cq);
+ wq = &sq->wq;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ do {
+ u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+ struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+
+ mlx5_cqwq_pop(&cq->wq);
+ sqcc += icowi->num_wqebbs;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+ WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
+ cqe->op_own);
+ break;
+ }
+
+ switch (icowi->opcode) {
+ case MLX5_OPCODE_NOP:
+ break;
+ case MLX5_OPCODE_UMR:
+ mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+ break;
+ default:
+ WARN_ONCE(true,
+ "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+ icowi->opcode);
+ }
+
+ } while ((cqe = mlx5e_get_cqe(cq)));
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -64,6 +118,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
+
+ mlx5e_poll_ico_cq(&c->icosq.cq);
+
busy |= mlx5e_post_rx_wqes(&c->rq);
if (busy)
@@ -80,6 +137,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
mlx5e_cq_arm(&c->rq.cq);
+ mlx5e_cq_arm(&c->icosq.cq);
return work_done;
}
@@ -89,7 +147,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
- barrier();
napi_schedule(cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 18fccec72..0e30602ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -202,7 +202,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
struct mlx5_eqe *eqe;
int eqes_found = 0;
int set_ci = 0;
- u32 cqn;
+ u32 cqn = -1;
u32 rsn;
u8 port;
@@ -320,6 +320,9 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
eq_update_ci(eq, 1);
+ if (cqn != -1)
+ tasklet_schedule(&eq->tasklet_ctx.task);
+
return eqes_found;
}
@@ -403,6 +406,12 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
if (err)
goto err_irq;
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
+ (unsigned long)&eq->tasklet_ctx);
+
/* EQs are created in ARMED state
*/
eq_update_ci(eq, 1);
@@ -436,6 +445,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
synchronize_irq(eq->irqn);
+ tasklet_disable(&eq->tasklet_ctx.task);
mlx5_buf_free(dev, &eq->buf);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bc3d9f8a7..aebbd6ccb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -77,16 +77,20 @@ struct vport_addr {
u8 action;
u32 vport;
struct mlx5_flow_rule *flow_rule; /* SRIOV only */
+ /* A flag indicating that mac was added due to mc promiscuous vport */
+ bool mc_promisc;
};
enum {
UC_ADDR_CHANGE = BIT(0),
MC_ADDR_CHANGE = BIT(1),
+ PROMISC_CHANGE = BIT(3),
};
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
- MC_ADDR_CHANGE)
+ MC_ADDR_CHANGE | \
+ PROMISC_CHANGE)
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
@@ -116,6 +120,9 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
if (events_mask & MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
+ if (events_mask & PROMISC_CHANGE)
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ event_on_promisc_change, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
@@ -323,30 +330,45 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
/* E-Switch FDB */
static struct mlx5_flow_rule *
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
+ u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
- int match_header = MLX5_MATCH_OUTER_HEADERS;
- struct mlx5_flow_destination dest;
+ int match_header = (is_zero_ether_addr(mac_c) ? 0 :
+ MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_destination dest;
+ void *mv_misc = NULL;
+ void *mc_misc = NULL;
+ u8 *dmac_v = NULL;
+ u8 *dmac_c = NULL;
u32 *match_v;
u32 *match_c;
- u8 *dmac_v;
- u8 *dmac_c;
+ if (rx_rule)
+ match_header |= MLX5_MATCH_MISC_PARAMETERS;
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
if (!match_v || !match_c) {
pr_warn("FDB: Failed to alloc match parameters\n");
goto out;
}
+
dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers.dmac_47_16);
dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers.dmac_47_16);
- ether_addr_copy(dmac_v, mac);
- /* Match criteria mask */
- memset(dmac_c, 0xff, 6);
+ if (match_header & MLX5_MATCH_OUTER_HEADERS) {
+ ether_addr_copy(dmac_v, mac_v);
+ ether_addr_copy(dmac_c, mac_c);
+ }
+
+ if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
+ mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
+ mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+ MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
+ MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+ }
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
@@ -361,7 +383,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
match_v,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
- if (IS_ERR_OR_NULL(flow_rule)) {
+ if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@@ -373,6 +395,39 @@ out:
return flow_rule;
}
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+
+ eth_broadcast_addr(mac_c);
+ return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+ u8 mac_v[ETH_ALEN];
+
+ eth_zero_addr(mac_c);
+ eth_zero_addr(mac_v);
+ mac_c[0] = 0x01;
+ mac_v[0] = 0x01;
+ return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+ u8 mac_v[ETH_ALEN];
+
+ eth_zero_addr(mac_c);
+ eth_zero_addr(mac_v);
+ return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
+}
+
static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -401,34 +456,80 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- fdb = mlx5_create_flow_table(root_ns, 0, table_size);
- if (IS_ERR_OR_NULL(fdb)) {
+ fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
+ if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
}
+ esw->fdb_table.fdb = fdb;
+ /* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ /* Preserve 2 entries for allmulti and promisc rules*/
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
-
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
}
-
esw->fdb_table.addr_grp = g;
- esw->fdb_table.fdb = fdb;
+
+ /* Allmulti group : One rule that forwards any mcast traffic */
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+ eth_zero_addr(dmac);
+ dmac[0] = 0x01;
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.allmulti_grp = g;
+
+ /* Promiscuous group :
+ * One rule that forward all unmatched traffic from previous groups
+ */
+ eth_zero_addr(dmac);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.promisc_grp = g;
+
out:
- kfree(flow_group_in);
- if (err && !IS_ERR_OR_NULL(fdb))
- mlx5_destroy_flow_table(fdb);
+ if (err) {
+ if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
+ esw->fdb_table.allmulti_grp = NULL;
+ }
+ if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
+ esw->fdb_table.addr_grp = NULL;
+ }
+ if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ esw->fdb_table.fdb = NULL;
+ }
+ }
+
+ kvfree(flow_group_in);
return err;
}
@@ -438,10 +539,14 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
return;
esw_debug(esw->dev, "Destroy FDB Table\n");
+ mlx5_destroy_flow_group(esw->fdb_table.promisc_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb);
esw->fdb_table.fdb = NULL;
esw->fdb_table.addr_grp = NULL;
+ esw->fdb_table.allmulti_grp = NULL;
+ esw->fdb_table.promisc_grp = NULL;
}
/* E-Switch vport UC/MC lists management */
@@ -511,6 +616,53 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
return 0;
}
+static void update_allmulti_vports(struct mlx5_eswitch *esw,
+ struct vport_addr *vaddr,
+ struct esw_mc_addr *esw_mc)
+{
+ u8 *mac = vaddr->node.addr;
+ u32 vport_idx = 0;
+
+ for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ struct hlist_head *vport_hash = vport->mc_list;
+ struct vport_addr *iter_vaddr =
+ l2addr_hash_find(vport_hash,
+ mac,
+ struct vport_addr);
+ if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
+ vaddr->vport == vport_idx)
+ continue;
+ switch (vaddr->action) {
+ case MLX5_ACTION_ADD:
+ if (iter_vaddr)
+ continue;
+ iter_vaddr = l2addr_hash_add(vport_hash, mac,
+ struct vport_addr,
+ GFP_KERNEL);
+ if (!iter_vaddr) {
+ esw_warn(esw->dev,
+ "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
+ mac, vport_idx);
+ continue;
+ }
+ iter_vaddr->vport = vport_idx;
+ iter_vaddr->flow_rule =
+ esw_fdb_set_vport_rule(esw,
+ mac,
+ vport_idx);
+ iter_vaddr->mc_promisc = true;
+ break;
+ case MLX5_ACTION_DEL:
+ if (!iter_vaddr)
+ continue;
+ mlx5_del_flow_rule(iter_vaddr->flow_rule);
+ l2addr_hash_del(iter_vaddr);
+ break;
+ }
+ }
+}
+
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
struct hlist_head *hash = esw->mc_table;
@@ -531,8 +683,17 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
+
+ /* Add this multicast mac to all the mc promiscuous vports */
+ update_allmulti_vports(esw, vaddr, esw_mc);
+
add:
- esw_mc->refcnt++;
+ /* If the multicast mac is added as a result of mc promiscuous vport,
+ * don't increment the multicast ref count
+ */
+ if (!vaddr->mc_promisc)
+ esw_mc->refcnt++;
+
/* Forward MC MAC to vport */
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev,
@@ -568,9 +729,15 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
mlx5_del_flow_rule(vaddr->flow_rule);
vaddr->flow_rule = NULL;
- if (--esw_mc->refcnt)
+ /* If the multicast mac is added as a result of mc promiscuous vport,
+ * don't decrement the multicast ref count.
+ */
+ if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
return 0;
+ /* Remove this multicast mac from all the mc promiscuous vports */
+ update_allmulti_vports(esw, vaddr, esw_mc);
+
if (esw_mc->uplink_rule)
mlx5_del_flow_rule(esw_mc->uplink_rule);
@@ -643,10 +810,13 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr->action = MLX5_ACTION_DEL;
}
+ if (!vport->enabled)
+ goto out;
+
err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
mac_list, &size);
if (err)
- return;
+ goto out;
esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
vport_num, is_uc ? "UC" : "MC", size);
@@ -660,6 +830,24 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
if (addr) {
addr->action = MLX5_ACTION_NONE;
+ /* If this mac was previously added because of allmulti
+ * promiscuous rx mode, its now converted to be original
+ * vport mac.
+ */
+ if (addr->mc_promisc) {
+ struct esw_mc_addr *esw_mc =
+ l2addr_hash_find(esw->mc_table,
+ mac_list[i],
+ struct esw_mc_addr);
+ if (!esw_mc) {
+ esw_warn(esw->dev,
+ "Failed to MAC(%pM) in mcast DB\n",
+ mac_list[i]);
+ continue;
+ }
+ esw_mc->refcnt++;
+ addr->mc_promisc = false;
+ }
continue;
}
@@ -674,13 +862,121 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr->vport = vport_num;
addr->action = MLX5_ACTION_ADD;
}
+out:
kfree(mac_list);
}
-static void esw_vport_change_handler(struct work_struct *work)
+/* Sync vport UC/MC list from vport context
+ * Must be called after esw_update_vport_addr_list
+ */
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct l2addr_node *node;
+ struct vport_addr *addr;
+ struct hlist_head *hash;
+ struct hlist_node *tmp;
+ int hi;
+
+ hash = vport->mc_list;
+
+ for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
+ u8 *mac = node->addr;
+
+ addr = l2addr_hash_find(hash, mac, struct vport_addr);
+ if (addr) {
+ if (addr->action == MLX5_ACTION_DEL)
+ addr->action = MLX5_ACTION_NONE;
+ continue;
+ }
+ addr = l2addr_hash_add(hash, mac, struct vport_addr,
+ GFP_KERNEL);
+ if (!addr) {
+ esw_warn(esw->dev,
+ "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
+ mac, vport_num);
+ continue;
+ }
+ addr->vport = vport_num;
+ addr->action = MLX5_ACTION_ADD;
+ addr->mc_promisc = true;
+ }
+}
+
+/* Apply vport rx mode to HW FDB table */
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
+ bool promisc, bool mc_promisc)
+{
+ struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+
+ if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
+ goto promisc;
+
+ if (mc_promisc) {
+ vport->allmulti_rule =
+ esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+ if (!allmulti_addr->uplink_rule)
+ allmulti_addr->uplink_rule =
+ esw_fdb_set_vport_allmulti_rule(esw,
+ UPLINK_VPORT);
+ allmulti_addr->refcnt++;
+ } else if (vport->allmulti_rule) {
+ mlx5_del_flow_rule(vport->allmulti_rule);
+ vport->allmulti_rule = NULL;
+
+ if (--allmulti_addr->refcnt > 0)
+ goto promisc;
+
+ if (allmulti_addr->uplink_rule)
+ mlx5_del_flow_rule(allmulti_addr->uplink_rule);
+ allmulti_addr->uplink_rule = NULL;
+ }
+
+promisc:
+ if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
+ return;
+
+ if (promisc) {
+ vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
+ vport_num);
+ } else if (vport->promisc_rule) {
+ mlx5_del_flow_rule(vport->promisc_rule);
+ vport->promisc_rule = NULL;
+ }
+}
+
+/* Sync vport rx mode from vport context */
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ int promisc_all = 0;
+ int promisc_uc = 0;
+ int promisc_mc = 0;
+ int err;
+
+ err = mlx5_query_nic_vport_promisc(esw->dev,
+ vport_num,
+ &promisc_uc,
+ &promisc_mc,
+ &promisc_all);
+ if (err)
+ return;
+ esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
+ vport_num, promisc_all, promisc_mc);
+
+ if (!vport->trusted || !vport->enabled) {
+ promisc_uc = 0;
+ promisc_mc = 0;
+ promisc_all = 0;
+ }
+
+ esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+ (promisc_all || promisc_mc));
+}
+
+static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
{
- struct mlx5_vport *vport =
- container_of(work, struct mlx5_vport, vport_change_handler);
struct mlx5_core_dev *dev = vport->dev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];
@@ -699,6 +995,15 @@ static void esw_vport_change_handler(struct work_struct *work)
if (vport->enabled_events & MC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
+ }
+
+ if (vport->enabled_events & PROMISC_CHANGE) {
+ esw_update_vport_rx_mode(esw, vport->vport);
+ if (!IS_ERR_OR_NULL(vport->allmulti_rule))
+ esw_update_vport_mc_promisc(esw, vport->vport);
+ }
+
+ if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
esw_apply_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
}
@@ -709,15 +1014,477 @@ static void esw_vport_change_handler(struct work_struct *work)
vport->enabled_events);
}
+static void esw_vport_change_handler(struct work_struct *work)
+{
+ struct mlx5_vport *vport =
+ container_of(work, struct mlx5_vport, vport_change_handler);
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ mutex_lock(&esw->state_lock);
+ esw_vport_change_handle_locked(vport);
+ mutex_unlock(&esw->state_lock);
+}
+
+static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *vlan_grp = NULL;
+ struct mlx5_flow_group *drop_grp = NULL;
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ void *match_criteria;
+ u32 *flow_group_in;
+ /* The egress acl table contains 2 rules:
+ * 1)Allow traffic with vlan_tag=vst_vlan_id
+ * 2)Drop all other traffic.
+ */
+ int table_size = 2;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
+ !IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+ return;
+ }
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return;
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(vlan_grp)) {
+ err = PTR_ERR(vlan_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ drop_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(drop_grp)) {
+ err = PTR_ERR(drop_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ vport->egress.acl = acl;
+ vport->egress.drop_grp = drop_grp;
+ vport->egress.allowed_vlans_grp = vlan_grp;
+out:
+ kvfree(flow_group_in);
+ if (err && !IS_ERR_OR_NULL(vlan_grp))
+ mlx5_destroy_flow_group(vlan_grp);
+ if (err && !IS_ERR_OR_NULL(acl))
+ mlx5_destroy_flow_table(acl);
+}
+
+static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+ mlx5_del_flow_rule(vport->egress.allowed_vlan);
+
+ if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
+ mlx5_del_flow_rule(vport->egress.drop_rule);
+
+ vport->egress.allowed_vlan = NULL;
+ vport->egress.drop_rule = NULL;
+}
+
+static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
+ mlx5_destroy_flow_group(vport->egress.drop_grp);
+ mlx5_destroy_flow_table(vport->egress.acl);
+ vport->egress.allowed_vlans_grp = NULL;
+ vport->egress.drop_grp = NULL;
+ vport->egress.acl = NULL;
+}
+
+static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
+ !IS_ERR_OR_NULL(vport->ingress.acl))
+ return;
+
+ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+ return;
+ }
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return;
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.acl = acl;
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_untagged_spoofchk_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_untagged_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_spoofchk_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.drop_grp = g;
+
+out:
+ if (err) {
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_spoofchk_only_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_untagged_only_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_untagged_spoofchk_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ }
+
+ kvfree(flow_group_in);
+}
+
+static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
+ mlx5_del_flow_rule(vport->ingress.drop_rule);
+
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+ mlx5_del_flow_rule(vport->ingress.allow_rule);
+
+ vport->ingress.drop_rule = NULL;
+ vport->ingress.allow_rule = NULL;
+}
+
+static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->ingress.acl))
+ return;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
+ mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
+ mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
+ mlx5_destroy_flow_group(vport->ingress.drop_grp);
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ vport->ingress.drop_grp = NULL;
+ vport->ingress.allow_spoofchk_only_grp = NULL;
+ vport->ingress.allow_untagged_only_grp = NULL;
+ vport->ingress.allow_untagged_spoofchk_grp = NULL;
+}
+
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u8 smac[ETH_ALEN];
+ u32 *match_v;
+ u32 *match_c;
+ int err = 0;
+ u8 *smac_v;
+
+ if (vport->spoofchk) {
+ err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+
+ if (!is_valid_ether_addr(smac)) {
+ mlx5_core_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+ vport->vport);
+ return -EPERM;
+ }
+ }
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+ esw_vport_disable_ingress_acl(esw, vport);
+ return 0;
+ }
+
+ esw_vport_enable_ingress_acl(esw, vport);
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->vlan, vport->qos);
+
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_v || !match_c) {
+ err = -ENOMEM;
+ esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ if (vport->vlan || vport->qos)
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+
+ if (vport->spoofchk) {
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
+ smac_v = MLX5_ADDR_OF(fte_match_param,
+ match_v,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, smac);
+ }
+
+ vport->ingress.allow_rule =
+ mlx5_add_flow_rule(vport->ingress.acl,
+ MLX5_MATCH_OUTER_HEADERS,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+ 0, NULL);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ goto out;
+ }
+
+ memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ vport->ingress.drop_rule =
+ mlx5_add_flow_rule(vport->ingress.acl,
+ 0,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_DROP,
+ 0, NULL);
+ if (IS_ERR(vport->ingress.drop_rule)) {
+ err = PTR_ERR(vport->ingress.drop_rule);
+ pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.drop_rule = NULL;
+ goto out;
+ }
+
+out:
+ if (err)
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ kfree(match_v);
+ kfree(match_c);
+ return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u32 *match_v;
+ u32 *match_c;
+ int err = 0;
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ if (!vport->vlan && !vport->qos) {
+ esw_vport_disable_egress_acl(esw, vport);
+ return 0;
+ }
+
+ esw_vport_enable_egress_acl(esw, vport);
+
+ esw_debug(esw->dev,
+ "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->vlan, vport->qos);
+
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_v || !match_c) {
+ err = -ENOMEM;
+ esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ /* Allowed vlan rule */
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
+
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rule(vport->egress.acl,
+ MLX5_MATCH_OUTER_HEADERS,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+ 0, NULL);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ goto out;
+ }
+
+ /* Drop others rule (star rule) */
+ memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ vport->egress.drop_rule =
+ mlx5_add_flow_rule(vport->egress.acl,
+ 0,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_DROP,
+ 0, NULL);
+ if (IS_ERR(vport->egress.drop_rule)) {
+ err = PTR_ERR(vport->egress.drop_rule);
+ pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.drop_rule = NULL;
+ }
+out:
+ kfree(match_v);
+ kfree(match_c);
+ return err;
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
- unsigned long flags;
+ mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+
+ if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+ esw_vport_ingress_config(esw, vport);
+ esw_vport_egress_config(esw, vport);
+ }
+
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport_num,
@@ -725,53 +1492,29 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Sync with current vport context */
vport->enabled_events = enable_events;
- esw_vport_change_handler(&vport->vport_change_handler);
-
- spin_lock_irqsave(&vport->lock, flags);
vport->enabled = true;
- spin_unlock_irqrestore(&vport->lock, flags);
- arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+ /* only PF is trusted by default */
+ vport->trusted = (vport_num) ? false : true;
+ esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
-}
-
-static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
-{
- struct mlx5_vport *vport = &esw->vports[vport_num];
- struct l2addr_node *node;
- struct vport_addr *addr;
- struct hlist_node *tmp;
- int hi;
-
- for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
- addr = container_of(node, struct vport_addr, node);
- addr->action = MLX5_ACTION_DEL;
- }
- esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC);
-
- for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
- addr = container_of(node, struct vport_addr, node);
- addr->action = MLX5_ACTION_DEL;
- }
- esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC);
+ mutex_unlock(&esw->state_lock);
}
static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
- unsigned long flags;
if (!vport->enabled)
return;
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
/* Mark this vport as disabled to discard new events */
- spin_lock_irqsave(&vport->lock, flags);
vport->enabled = false;
- vport->enabled_events = 0;
- spin_unlock_irqrestore(&vport->lock, flags);
+
+ synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
@@ -781,9 +1524,19 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
- /* We don't assume VFs will cleanup after themselves */
- esw_cleanup_vport(esw, vport_num);
+ mutex_lock(&esw->state_lock);
+ /* We don't assume VFs will cleanup after themselves.
+ * Calling vport change handler while vport is disabled will cleanup
+ * the vport resources.
+ */
+ esw_vport_change_handle_locked(vport);
+ vport->enabled_events = 0;
+ if (vport_num) {
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_disable_ingress_acl(esw, vport);
+ }
esw->enabled_vports--;
+ mutex_unlock(&esw->state_lock);
}
/* Public E-Switch API */
@@ -802,6 +1555,12 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
return -ENOTSUPP;
}
+ if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
+ esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+
+ if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
+ esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
+
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
esw_disable_vport(esw, 0);
@@ -824,6 +1583,7 @@ abort:
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
+ struct esw_mc_addr *mc_promisc;
int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
@@ -833,9 +1593,14 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
esw->enabled_vports);
+ mc_promisc = esw->mc_promisc;
+
for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i);
+ if (mc_promisc && mc_promisc->uplink_rule)
+ mlx5_del_flow_rule(mc_promisc->uplink_rule);
+
esw_destroy_fdb_table(esw);
/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
@@ -845,7 +1610,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
- int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev);
+ int total_vports = MLX5_TOTAL_VPORTS(dev);
+ struct esw_mc_addr *mc_promisc;
struct mlx5_eswitch *esw;
int vport_num;
int err;
@@ -874,6 +1640,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
esw->l2_table.size = l2_table_size;
+ mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
+ if (!mc_promisc) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ esw->mc_promisc = mc_promisc;
+
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
err = -ENOMEM;
@@ -887,6 +1660,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ mutex_init(&esw->state_lock);
+
for (vport_num = 0; vport_num < total_vports; vport_num++) {
struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -894,7 +1669,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
- spin_lock_init(&vport->lock);
}
esw->total_vports = total_vports;
@@ -925,6 +1699,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
+ kfree(esw->mc_promisc);
kfree(esw->vports);
kfree(esw);
}
@@ -942,10 +1717,8 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
}
vport = &esw->vports[vport_num];
- spin_lock(&vport->lock);
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
- spin_unlock(&vport->lock);
}
/* Vport Administration */
@@ -953,9 +1726,23 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
+ struct mlx5_vport *evport;
+ u64 node_guid;
int err = 0;
if (!ESW_ALLOWED(esw))
@@ -963,6 +1750,15 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ evport = &esw->vports[vport];
+
+ if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+ mlx5_core_warn(esw->dev,
+ "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+ vport);
+ return -EPERM;
+ }
+
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
if (err) {
mlx5_core_warn(esw->dev,
@@ -971,6 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
+ node_guid_gen_from_mac(&node_guid, mac);
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+ if (err)
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+ vport, err);
+
+ mutex_lock(&esw->state_lock);
+ if (evport->enabled)
+ err = esw_vport_ingress_config(esw, evport);
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -990,6 +1797,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
+ struct mlx5_vport *evport;
u16 vlan;
u8 qos;
@@ -998,6 +1806,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ evport = &esw->vports[vport];
+
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
@@ -1008,7 +1818,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
ivi->vlan = vlan;
ivi->qos = qos;
- ivi->spoofchk = 0;
+ ivi->spoofchk = evport->spoofchk;
return 0;
}
@@ -1016,6 +1826,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos)
{
+ struct mlx5_vport *evport;
+ int err = 0;
int set = 0;
if (!ESW_ALLOWED(esw))
@@ -1026,7 +1838,72 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan || qos)
set = 1;
- return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ evport = &esw->vports[vport];
+
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ if (err)
+ return err;
+
+ mutex_lock(&esw->state_lock);
+ evport->vlan = vlan;
+ evport->qos = qos;
+ if (evport->enabled) {
+ err = esw_vport_ingress_config(esw, evport);
+ if (err)
+ goto out;
+ err = esw_vport_egress_config(esw, evport);
+ }
+
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ int vport, bool spoofchk)
+{
+ struct mlx5_vport *evport;
+ bool pschk;
+ int err = 0;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ evport = &esw->vports[vport];
+
+ mutex_lock(&esw->state_lock);
+ pschk = evport->spoofchk;
+ evport->spoofchk = spoofchk;
+ if (evport->enabled)
+ err = esw_vport_ingress_config(esw, evport);
+ if (err)
+ evport->spoofchk = pschk;
+ mutex_unlock(&esw->state_lock);
+
+ return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ int vport, bool setting)
+{
+ struct mlx5_vport *evport;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ evport = &esw->vports[vport];
+
+ mutex_lock(&esw->state_lock);
+ evport->trusted = setting;
+ if (evport->enabled)
+ esw_vport_change_handle_locked(evport);
+ mutex_unlock(&esw->state_lock);
+
+ return 0;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3416a428f..fd6800256 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -88,18 +88,40 @@ struct l2addr_node {
kfree(ptr); \
})
+struct vport_ingress {
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+ struct mlx5_flow_group *allow_spoofchk_only_grp;
+ struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_rule *allow_rule;
+ struct mlx5_flow_rule *drop_rule;
+};
+
+struct vport_egress {
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allowed_vlans_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_rule *allowed_vlan;
+ struct mlx5_flow_rule *drop_rule;
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
+ struct mlx5_flow_rule *promisc_rule;
+ struct mlx5_flow_rule *allmulti_rule;
struct work_struct vport_change_handler;
- /* This spinlock protects access to vport data, between
- * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event"
- * once vport marked as disabled new interrupts are discarded.
- */
- spinlock_t lock; /* vport events sync */
+ struct vport_ingress ingress;
+ struct vport_egress egress;
+
+ u16 vlan;
+ u8 qos;
+ bool spoofchk;
+ bool trusted;
bool enabled;
u16 enabled_events;
};
@@ -113,6 +135,8 @@ struct mlx5_l2_table {
struct mlx5_eswitch_fdb {
void *fdb;
struct mlx5_flow_group *addr_grp;
+ struct mlx5_flow_group *allmulti_grp;
+ struct mlx5_flow_group *promisc_grp;
};
struct mlx5_eswitch {
@@ -124,6 +148,11 @@ struct mlx5_eswitch {
struct mlx5_vport *vports;
int total_vports;
int enabled_vports;
+ /* Synchronize between vport change events
+ * and async SRIOV admin state changes
+ */
+ struct mutex state_lock;
+ struct esw_mc_addr *mc_promisc;
};
/* E-Switch API */
@@ -138,6 +167,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state);
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos);
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ int vport, bool spoofchk);
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ int vport_num, bool setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index f46f1db0f..a5bb6b695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,6 +50,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+ MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
+ }
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -57,6 +61,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
}
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id)
@@ -77,6 +82,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, level, level);
MLX5_SET(create_flow_table_in, in, log_size, log_size);
+ if (vport) {
+ MLX5_SET(create_flow_table_in, in, vport_number, vport);
+ MLX5_SET(create_flow_table_in, in, other_vport, 1);
+ }
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -101,6 +110,10 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
+ }
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
@@ -120,6 +133,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+ }
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -148,6 +165,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_group_in, in, other_vport, 1);
+ }
err = mlx5_cmd_exec_check_status(dev, in,
inlen, out,
@@ -174,6 +195,10 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+ if (ft->vport) {
+ MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
+ }
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
@@ -207,22 +232,29 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ if (ft->vport) {
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport, 1);
+ }
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
- MLX5_SET(flow_context, in_flow_context, destination_list_size,
- fte->dests_size);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ int list_size = 0;
+
list_for_each_entry(dst, &fte->node.children, node.list) {
unsigned int id;
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
MLX5_SET(dest_format_struct, in_dests, destination_type,
dst->dest_attr.type);
if (dst->dest_attr.type ==
@@ -233,8 +265,31 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
}
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ list_size++;
}
+
+ MLX5_SET(flow_context, in_flow_context, destination_list_size,
+ list_size);
}
+
+ if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int list_size = 0;
+
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+ dst->dest_attr.counter->id);
+ in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ list_size++;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+ list_size);
+ }
+
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
sizeof(out));
@@ -254,18 +309,16 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
+ int modify_mask,
struct fs_fte *fte)
{
int opmod;
- int modify_mask;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
return -ENOTSUPP;
opmod = 1;
- modify_mask = 1 <<
- MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
}
@@ -285,8 +338,78 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id);
MLX5_SET(delete_fte_in, in, flow_index, index);
+ if (ft->vport) {
+ MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(delete_fte_in, in, other_vport, 1);
+ }
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
return err;
}
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+
+ return 0;
+}
+
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter)];
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+ void *stats;
+ int err = 0;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
+ *packets = MLX5_GET64(traffic_counter, stats, packets);
+ *bytes = MLX5_GET64(traffic_counter, stats, octets);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 9814d4784..fc4f7b83f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -34,6 +34,7 @@
#define _MLX5_FS_CMD_
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id);
@@ -61,6 +62,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
+ int modify_mask,
struct fs_fte *fte);
int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
@@ -69,4 +71,9 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft);
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 89cce97d4..e912a3d25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,18 +40,18 @@
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
-#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\
+#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
...) {.type = FS_TYPE_PRIO,\
.min_ft_level = min_level_val,\
- .max_ft = max_ft_val,\
+ .num_levels = num_levels_val,\
.num_leaf_prios = num_prios_val,\
.caps = caps_val,\
.children = (struct init_tree_node[]) {__VA_ARGS__},\
.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
}
-#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\
- ADD_PRIO(num_prios_val, 0, max_ft_val, {},\
+#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
+ ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
__VA_ARGS__)\
#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
@@ -67,17 +67,20 @@
#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
.caps = (long[]) {__VA_ARGS__} }
-#define LEFTOVERS_MAX_FT 1
+#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
-#define BY_PASS_PRIO_MAX_FT 1
-#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
- LEFTOVERS_MAX_FT)
-#define KERNEL_MAX_FT 3
-#define KERNEL_NUM_PRIOS 2
-#define KENREL_MIN_LEVEL 2
+#define BY_PASS_PRIO_NUM_LEVELS 1
+#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+ LEFTOVERS_NUM_PRIOS)
-#define ANCHOR_MAX_FT 1
+/* Vlan, mac, ttc, aRFS */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 4
+#define KERNEL_NIC_NUM_PRIOS 1
+/* One more level for tc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+
+#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
struct node_caps {
@@ -92,7 +95,7 @@ static struct init_tree_node {
int min_ft_level;
int num_leaf_prios;
int prio;
- int max_ft;
+ int num_levels;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
.ar_size = 4,
@@ -102,17 +105,20 @@ static struct init_tree_node {
FS_CAP(flow_table_properties_nic_receive.modify_root),
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))),
- ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))),
+ ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+ ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
FS_CAP(flow_table_properties_nic_receive.modify_root),
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))),
+ ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))),
+ ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
}
};
@@ -222,19 +228,6 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
return NULL;
}
-static unsigned int find_next_free_level(struct fs_prio *prio)
-{
- if (!list_empty(&prio->node.children)) {
- struct mlx5_flow_table *ft;
-
- ft = list_last_entry(&prio->node.children,
- struct mlx5_flow_table,
- node.list);
- return ft->level + 1;
- }
- return prio->start_level;
-}
-
static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
{
unsigned int i;
@@ -351,6 +344,7 @@ static void del_rule(struct fs_node *node)
struct mlx5_flow_group *fg;
struct fs_fte *fte;
u32 *match_value;
+ int modify_mask;
struct mlx5_core_dev *dev = get_dev(node);
int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
int err;
@@ -374,8 +368,11 @@ static void del_rule(struct fs_node *node)
}
if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
+ modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
err = mlx5_cmd_update_fte(dev, ft,
- fg->id, fte);
+ fg->id,
+ modify_mask,
+ fte);
if (err)
pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
@@ -464,7 +461,7 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
enum fs_flow_table_type table_type)
{
struct mlx5_flow_table *ft;
@@ -476,6 +473,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
ft->level = level;
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->type = table_type;
+ ft->vport = vport;
ft->max_fte = max_fte;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -615,12 +613,13 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
-static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
- struct mlx5_flow_destination *dest)
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+ struct mlx5_flow_destination *dest)
{
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct fs_fte *fte;
+ int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int err = 0;
fs_get_obj(fte, rule->node.parent);
@@ -632,7 +631,9 @@ static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
err = mlx5_cmd_update_fte(get_dev(&ft->node),
- ft, fg->id, fte);
+ ft, fg->id,
+ modify_mask,
+ fte);
unlock_ref_node(&fte->node);
return err;
@@ -693,9 +694,23 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
return err;
}
-struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int max_fte)
+static void list_add_flow_table(struct mlx5_flow_table *ft,
+ struct fs_prio *prio)
+{
+ struct list_head *prev = &prio->node.children;
+ struct mlx5_flow_table *iter;
+
+ fs_for_each_ft(iter, prio) {
+ if (iter->level > ft->level)
+ break;
+ prev = &iter->node.list;
+ }
+ list_add(&ft->node.list, prev);
+}
+
+static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ u16 vport, int prio,
+ int max_fte, u32 level)
{
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_table *ft;
@@ -716,12 +731,16 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
err = -EINVAL;
goto unlock_root;
}
- if (fs_prio->num_ft == fs_prio->max_ft) {
+ if (level >= fs_prio->num_levels) {
err = -ENOSPC;
goto unlock_root;
}
-
- ft = alloc_flow_table(find_next_free_level(fs_prio),
+ /* The level is related to the
+ * priority level range.
+ */
+ level += fs_prio->start_level;
+ ft = alloc_flow_table(level,
+ vport,
roundup_pow_of_two(max_fte),
root->table_type);
if (!ft) {
@@ -732,7 +751,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
tree_init_node(&ft->node, 1, del_flow_table);
log_table_sz = ilog2(ft->max_fte);
next_ft = find_next_chained_ft(fs_prio);
- err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+ err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
log_table_sz, next_ft, &ft->id);
if (err)
goto free_ft;
@@ -742,7 +761,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
goto destroy_ft;
lock_ref_node(&fs_prio->node);
tree_add_node(&ft->node, &fs_prio->node);
- list_add_tail(&ft->node.list, &fs_prio->node.children);
+ list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
unlock_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
@@ -756,17 +775,32 @@ unlock_root:
return ERR_PTR(err);
}
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, int max_fte,
+ u32 level)
+{
+ return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
+}
+
+struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, int max_fte,
+ u32 level, u16 vport)
+{
+ return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
+}
+
struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
- int max_num_groups)
+ int max_num_groups,
+ u32 level)
{
struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries)
return ERR_PTR(-EINVAL);
- ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries);
+ ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
if (IS_ERR(ft))
return ft;
@@ -850,6 +884,7 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
{
struct mlx5_flow_table *ft;
struct mlx5_flow_rule *rule;
+ int modify_mask = 0;
int err;
rule = alloc_rule(dest);
@@ -865,14 +900,20 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
list_add(&rule->node.list, &fte->node.children);
else
list_add_tail(&rule->node.list, &fte->node.children);
- if (dest)
+ if (dest) {
fte->dests_size++;
+
+ modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ?
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) :
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ }
+
if (fte->dests_size == 1 || !dest)
err = mlx5_cmd_create_fte(get_dev(&ft->node),
ft, fg->id, fte);
else
err = mlx5_cmd_update_fte(get_dev(&ft->node),
- ft, fg->id, fte);
+ ft, fg->id, modify_mask, fte);
if (err)
goto free_rule;
@@ -1065,6 +1106,50 @@ unlock_fg:
return rule;
}
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule)
+{
+ struct mlx5_flow_rule *dst;
+ struct fs_fte *fte;
+
+ fs_get_obj(fte, rule->node.parent);
+
+ fs_for_each_dst(dst, fte) {
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ return dst->dest_attr.counter;
+ }
+
+ return NULL;
+}
+
+static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
+{
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
+ return !counter;
+
+ if (!counter)
+ return false;
+
+ /* Hardware support counter for a drop action only */
+ return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT);
+}
+
+static bool dest_is_valid(struct mlx5_flow_destination *dest,
+ u32 action,
+ struct mlx5_flow_table *ft)
+{
+ if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
+ return counter_is_valid(dest->counter, action);
+
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return true;
+
+ if (!dest || ((dest->type ==
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
+ (dest->ft->level <= ft->level)))
+ return false;
+ return true;
+}
+
static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
@@ -1077,7 +1162,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
struct mlx5_flow_group *g;
struct mlx5_flow_rule *rule;
- if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest)
+ if (!dest_is_valid(dest, action, ft))
return ERR_PTR(-EINVAL);
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
@@ -1207,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
ft->id);
return err;
}
- root->root_ft = new_root_ft;
}
+ root->root_ft = new_root_ft;
return 0;
}
@@ -1294,6 +1379,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &dev->priv.fdb_root_ns->ns;
else
return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (dev->priv.esw_egress_root_ns)
+ return &dev->priv.esw_egress_root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (dev->priv.esw_ingress_root_ns)
+ return &dev->priv.esw_ingress_root_ns->ns;
+ else
+ return NULL;
default:
return NULL;
}
@@ -1311,7 +1406,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
EXPORT_SYMBOL(mlx5_get_flow_namespace);
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
- unsigned prio, int max_ft)
+ unsigned int prio, int num_levels)
{
struct fs_prio *fs_prio;
@@ -1322,7 +1417,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
fs_prio->node.type = FS_TYPE_PRIO;
tree_init_node(&fs_prio->node, 1, NULL);
tree_add_node(&fs_prio->node, &ns->node);
- fs_prio->max_ft = max_ft;
+ fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
list_add_tail(&fs_prio->node.list, &ns->node.children);
@@ -1353,14 +1448,14 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ns;
}
-static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node
- *prio_metadata)
+static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
+ struct init_tree_node *prio_metadata)
{
struct fs_prio *fs_prio;
int i;
for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
- fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft);
+ fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
if (IS_ERR(fs_prio))
return PTR_ERR(fs_prio);
}
@@ -1387,7 +1482,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node,
struct init_tree_node *init_parent_node,
- int index)
+ int prio)
{
int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
@@ -1405,8 +1500,8 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
fs_get_obj(fs_ns, fs_parent_node);
if (init_node->num_leaf_prios)
- return create_leaf_prios(fs_ns, init_node);
- fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft);
+ return create_leaf_prios(fs_ns, prio, init_node);
+ fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
if (IS_ERR(fs_prio))
return PTR_ERR(fs_prio);
base = &fs_prio->node;
@@ -1419,11 +1514,16 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
} else {
return -EINVAL;
}
+ prio = 0;
for (i = 0; i < init_node->ar_size; i++) {
err = init_root_tree_recursive(dev, &init_node->children[i],
- base, init_node, i);
+ base, init_node, prio);
if (err)
return err;
+ if (init_node->children[i].type == FS_TYPE_PRIO &&
+ init_node->children[i].num_leaf_prios) {
+ prio += init_node->children[i].num_leaf_prios;
+ }
}
return 0;
@@ -1479,9 +1579,9 @@ static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
struct fs_prio *prio;
fs_for_each_prio(prio, ns) {
- /* This updates prio start_level and max_ft */
+ /* This updates prio start_level and num_levels */
set_prio_attrs_in_prio(prio, acc_level);
- acc_level += prio->max_ft;
+ acc_level += prio->num_levels;
}
return acc_level;
}
@@ -1493,11 +1593,11 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
prio->start_level = acc_level;
fs_for_each_ns(ns, prio)
- /* This updates start_level and max_ft of ns's priority descendants */
+ /* This updates start_level and num_levels of ns's priority descendants */
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
- if (!prio->max_ft)
- prio->max_ft = acc_level_ns - prio->start_level;
- WARN_ON(prio->max_ft < acc_level_ns - prio->start_level);
+ if (!prio->num_levels)
+ prio->num_levels = acc_level_ns - prio->start_level;
+ WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
}
static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
@@ -1508,12 +1608,13 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
fs_for_each_prio(prio, ns) {
set_prio_attrs_in_prio(prio, start_level);
- start_level += prio->max_ft;
+ start_level += prio->num_levels;
}
}
#define ANCHOR_PRIO 0
#define ANCHOR_SIZE 1
+#define ANCHOR_LEVEL 0
static int create_anchor_flow_table(struct mlx5_core_dev
*dev)
{
@@ -1523,7 +1624,7 @@ static int create_anchor_flow_table(struct mlx5_core_dev
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns)
return -EINVAL;
- ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE);
+ ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) {
mlx5_core_err(dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
@@ -1666,8 +1767,14 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
cleanup_root_ns(dev);
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+ cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
+ cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns);
+ mlx5_cleanup_fc_stats(dev);
}
static int init_fdb_root_ns(struct mlx5_core_dev *dev)
@@ -1688,20 +1795,76 @@ static int init_fdb_root_ns(struct mlx5_core_dev *dev)
}
}
+static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+ struct fs_prio *prio;
+
+ dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL);
+ if (!dev->priv.esw_egress_root_ns)
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ else
+ return 0;
+}
+
+static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+ struct fs_prio *prio;
+
+ dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL);
+ if (!dev->priv.esw_ingress_root_ns)
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ else
+ return 0;
+}
+
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
int err = 0;
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
+ err = mlx5_init_fc_stats(dev);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(dev);
if (err)
- return err;
+ goto err;
}
+
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = init_fdb_root_ns(dev);
- if (err)
- cleanup_root_ns(dev);
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+ err = init_fdb_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = init_egress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = init_ingress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
}
+ return 0;
+err:
+ mlx5_cleanup_fs(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index f37a6248a..aa41a7314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -45,8 +45,10 @@ enum fs_node_type {
};
enum fs_flow_table_type {
- FS_FT_NIC_RX = 0x0,
- FS_FT_FDB = 0X4,
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_ESW_EGRESS_ACL = 0x2,
+ FS_FT_ESW_INGRESS_ACL = 0x3,
+ FS_FT_FDB = 0X4,
};
enum fs_fte_status {
@@ -79,6 +81,7 @@ struct mlx5_flow_rule {
struct mlx5_flow_table {
struct fs_node node;
u32 id;
+ u16 vport;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
@@ -93,6 +96,28 @@ struct mlx5_flow_table {
struct list_head fwd_rules;
};
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ struct list_head list;
+
+ /* last{packets,bytes} members are used when calculating the delta since
+ * last reading
+ */
+ u64 lastpackets;
+ u64 lastbytes;
+
+ u16 id;
+ bool deleted;
+ bool aging;
+
+ struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+};
+
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
@@ -102,12 +127,13 @@ struct fs_fte {
u32 index;
u32 action;
enum fs_fte_status status;
+ struct mlx5_fc *counter;
};
/* Type of children is mlx5_flow_table/namespace */
struct fs_prio {
struct fs_node node;
- unsigned int max_ft;
+ unsigned int num_levels;
unsigned int start_level;
unsigned int prio;
unsigned int num_ft;
@@ -143,6 +169,9 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
};
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
new file mode 100644
index 000000000..164dc37fd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+
+#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+
+/* locking scheme:
+ *
+ * It is the responsibility of the user to prevent concurrent calls or bad
+ * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
+ * to struct mlx5_fc.
+ * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
+ * dump (access to struct mlx5_fc) after a counter is destroyed.
+ *
+ * access to counter list:
+ * - create (user context)
+ * - mlx5_fc_create() only adds to an addlist to be used by
+ * mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
+ * - spawn thread to do the actual destroy
+ *
+ * - destroy (user context)
+ * - mark a counter as deleted
+ * - spawn thread to do the actual del
+ *
+ * - dump (user context)
+ * user should not call dump after destroy
+ *
+ * - query (single thread workqueue context)
+ * destroy/dump - no conflict (see destroy)
+ * query/dump - packets and bytes might be inconsistent (since update is not
+ * atomic)
+ * query/create - no conflict (see create)
+ * since every create/destroy spawn the work, only after necessary time has
+ * elapsed, the thread will actually query the hardware.
+ */
+
+static void mlx5_fc_stats_work(struct work_struct *work)
+{
+ struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
+ priv.fc_stats.work.work);
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ unsigned long now = jiffies;
+ struct mlx5_fc *counter;
+ struct mlx5_fc *tmp;
+ int err = 0;
+
+ spin_lock(&fc_stats->addlist_lock);
+
+ list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+
+ if (!list_empty(&fc_stats->list))
+ queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
+
+ spin_unlock(&fc_stats->addlist_lock);
+
+ list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+ struct mlx5_fc_cache *c = &counter->cache;
+ u64 packets;
+ u64 bytes;
+
+ if (counter->deleted) {
+ list_del(&counter->list);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ continue;
+ }
+
+ if (time_before(now, fc_stats->next_query))
+ continue;
+
+ err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes);
+ if (err) {
+ pr_err("Error querying stats for counter id %d\n",
+ counter->id);
+ continue;
+ }
+
+ if (packets == c->packets)
+ continue;
+
+ c->lastuse = jiffies;
+ c->packets = packets;
+ c->bytes = bytes;
+ }
+
+ if (time_after_eq(now, fc_stats->next_query))
+ fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+ int err;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_cmd_fc_alloc(dev, &counter->id);
+ if (err)
+ goto err_out;
+
+ if (aging) {
+ counter->aging = true;
+
+ spin_lock(&fc_stats->addlist_lock);
+ list_add(&counter->list, &fc_stats->addlist);
+ spin_unlock(&fc_stats->addlist_lock);
+
+ mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+ }
+
+ return counter;
+
+err_out:
+ kfree(counter);
+
+ return ERR_PTR(err);
+}
+
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ if (!counter)
+ return;
+
+ if (counter->aging) {
+ counter->deleted = true;
+ mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+ return;
+ }
+
+ mlx5_cmd_fc_free(dev, counter->id);
+ kfree(counter);
+}
+
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ INIT_LIST_HEAD(&fc_stats->list);
+ INIT_LIST_HEAD(&fc_stats->addlist);
+ spin_lock_init(&fc_stats->addlist_lock);
+
+ fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
+ if (!fc_stats->wq)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+
+ return 0;
+}
+
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+ struct mlx5_fc *tmp;
+
+ cancel_delayed_work_sync(&dev->priv.fc_stats.work);
+ destroy_workqueue(dev->priv.fc_stats.wq);
+ dev->priv.fc_stats.wq = NULL;
+
+ list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+
+ list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+ list_del(&counter->list);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ }
+}
+
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse)
+{
+ struct mlx5_fc_cache c;
+
+ c = counter->cache;
+
+ *bytes = c.bytes - counter->lastbytes;
+ *packets = c.packets - counter->lastpackets;
+ *lastuse = c.lastuse;
+
+ counter->lastbytes = c.bytes;
+ counter->lastpackets = c.packets;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f5deb642d..96a59463a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
{
+ mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return;
+ goto unlock;
mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ trigger_cmd_completions(dev);
+ }
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
mlx5_core_err(dev, "end\n");
+
+unlock:
+ mutex_unlock(&dev->intf_state_mutex);
}
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -176,11 +182,11 @@ static const char *hsynd_str(u8 synd)
case MLX5_HEALTH_SYNDR_EQ_ERR:
return "EQ error";
case MLX5_HEALTH_SYNDR_EQ_INV:
- return "Invalid EQ refrenced";
+ return "Invalid EQ referenced";
case MLX5_HEALTH_SYNDR_FFSER_ERR:
return "FFSER error";
case MLX5_HEALTH_SYNDR_HIGH_TEMP:
- return "High temprature";
+ return "High temperature";
default:
return "unrecognized error";
}
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
u32 count;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- trigger_cmd_completions(dev);
mod_timer(&health->timer, get_next_poll_jiffies());
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 6892746fd..6695893dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -48,6 +48,9 @@
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include "mlx5_core.h"
#include "fs_core.h"
#ifdef CONFIG_MLX5_CORE_EN
@@ -660,11 +663,34 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
}
EXPORT_SYMBOL(mlx5_vector2eqn);
+struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq *eq;
+
+ spin_lock(&table->lock);
+ list_for_each_entry(eq, &table->comp_eqs_list, list)
+ if (eq->eqn == eqn) {
+ spin_unlock(&table->lock);
+ return eq;
+ }
+
+ spin_unlock(&table->lock);
+
+ return ERR_PTR(-ENOENT);
+}
+
static void free_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
struct mlx5_eq *eq, *n;
+#ifdef CONFIG_RFS_ACCEL
+ if (dev->rmap) {
+ free_irq_cpu_rmap(dev->rmap);
+ dev->rmap = NULL;
+ }
+#endif
spin_lock(&table->lock);
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
list_del(&eq->list);
@@ -691,6 +717,11 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&table->comp_eqs_list);
ncomp_vec = table->num_comp_vectors;
nent = MLX5_COMP_EQ_SIZE;
+#ifdef CONFIG_RFS_ACCEL
+ dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
+ if (!dev->rmap)
+ return -ENOMEM;
+#endif
for (i = 0; i < ncomp_vec; i++) {
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -698,6 +729,10 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean;
}
+#ifdef CONFIG_RFS_ACCEL
+ irq_cpu_rmap_add(dev->rmap,
+ dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector);
+#endif
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
@@ -1387,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
mlx5_pci_err_detected(dev->pdev, 0);
}
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
*/
-static void wait_vital(struct pci_dev *pdev)
+static int wait_vital(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_health *health = &dev->priv.health;
const int niter = 100;
+ u32 last_count = 0;
u32 count;
- u16 did;
int i;
- /* Wait for firmware to be ready after reset */
- msleep(1000);
- for (i = 0; i < niter; i++) {
- if (pci_read_config_word(pdev, 2, &did)) {
- dev_warn(&pdev->dev, "failed reading config word\n");
- break;
- }
- if (did == pdev->device) {
- dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
- break;
- }
- msleep(50);
- }
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
for (i = 0; i < niter; i++) {
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
- dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
- break;
+ if (last_count && last_count != count) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ return 0;
+ }
+ last_count = count;
}
msleep(50);
}
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+ return -ETIMEDOUT;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1438,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
pci_save_state(pdev);
- wait_vital(pdev);
+ err = wait_vital(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+ return;
+ }
err = mlx5_load_one(dev, priv);
if (err)
@@ -1473,8 +1497,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
- { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0b0b226c7..2f86ec6fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -42,6 +42,8 @@
#define DRIVER_VERSION "3.0-1"
#define DRIVER_RELDATE "January 2015"
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
@@ -100,6 +102,8 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
+struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+void mlx5_cq_tasklet_cb(unsigned long data);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9eeee0545..32dea3524 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -345,7 +345,6 @@ retry:
func_id, npages, err);
goto out_4k;
}
- dev->priv.fw_pages += npages;
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
@@ -373,6 +372,33 @@ out_free:
return err;
}
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_manage_pages_inbox *in, int in_size,
+ struct mlx5_manage_pages_outbox *out, int out_size)
+{
+ struct fw_page *fwp;
+ struct rb_node *p;
+ u32 npages;
+ u32 i = 0;
+
+ if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+ (u32 *)out, out_size);
+
+ npages = be32_to_cpu(in->num_entries);
+
+ p = rb_first(&dev->priv.page_root);
+ while (p && i < npages) {
+ fwp = rb_entry(p, struct fw_page, rb_node);
+ out->pas[i] = cpu_to_be64(fwp->addr);
+ p = rb_next(p);
+ i++;
+ }
+
+ out->num_entries = cpu_to_be32(i);
+ return 0;
+}
+
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed reclaiming pages\n");
- goto out_free;
- }
- dev->priv.fw_pages -= npages;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
+ mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
err = -EINVAL;
goto out_free;
}
- if (nclaimed)
- *nclaimed = num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
+
+ if (nclaimed)
+ *nclaimed = num_claimed;
+
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- free_4k(dev, fwp->addr);
- nclaimed = 1;
- } else {
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
- }
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
}
} while (p);
+ WARN(dev->priv.fw_pages,
+ "FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.fw_pages);
+ WARN(dev->priv.vfs_pages,
+ "VFs FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.vfs_pages);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 53cc1e2c6..3e35611b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -115,6 +115,19 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
+{
+ u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
+ u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(mlcr_reg, in, local_port, 1);
+ MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MLCR, 0, 1);
+}
+
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask)
{
@@ -297,6 +310,82 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
+{
+ u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
+ int module_mapping;
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmlp_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_PMLP, 0, 0);
+ if (err)
+ return err;
+
+ module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
+ *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+
+ return 0;
+}
+
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data)
+{
+ u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+ u32 in[MLX5_ST_SZ_DW(mcia_reg)];
+ int module_num;
+ u16 i2c_addr;
+ int status;
+ int err;
+ void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+ err = mlx5_query_module_num(dev, &module_num);
+ if (err)
+ return err;
+
+ memset(in, 0, sizeof(in));
+ size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+
+ if (offset < MLX5_EEPROM_PAGE_LENGTH &&
+ offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ /* Cross pages read, read until offset 256 in low page */
+ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+
+ i2c_addr = MLX5_I2C_ADDR_LOW;
+ if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
+ i2c_addr = MLX5_I2C_ADDR_HIGH;
+ offset -= MLX5_EEPROM_PAGE_LENGTH;
+ }
+
+ MLX5_SET(mcia_reg, in, l, 0);
+ MLX5_SET(mcia_reg, in, module, module_num);
+ MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
+ MLX5_SET(mcia_reg, in, page_number, 0);
+ MLX5_SET(mcia_reg, in, device_address, offset);
+ MLX5_SET(mcia_reg, in, size, size);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCIA, 0, 0);
+ if (err)
+ return err;
+
+ status = MLX5_GET(mcia_reg, out, status);
+ if (status) {
+ mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ memcpy(data, ptr, size);
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
@@ -607,3 +696,52 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
+
+static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
+ int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ outlen, MLX5_REG_PCMR, 0, 0);
+}
+
+static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ return mlx5_core_access_reg(mdev, in, inlen, out,
+ sizeof(out), MLX5_REG_PCMR, 0, 1);
+}
+
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+ MLX5_SET(pcmr_reg, in, fcs_chk, enable);
+
+ return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled)
+{
+ u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+ /* Default values for FW which do not support MLX5_REG_PCMR */
+ *supported = false;
+ *enabled = true;
+
+ if (!MLX5_CAP_GEN(mdev, ports_check))
+ return;
+
+ if (mlx5_query_ports_check(mdev, out, sizeof(out)))
+ return;
+
+ *supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
+ *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index def289375..b82d65802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
- *xrcdn = be32_to_cpu(out.xrcdn);
+ *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
return err;
}
@@ -538,3 +538,71 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
mlx5_core_destroy_sq(dev, sq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
+
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *counter_id = MLX5_GET(alloc_q_counter_out, out,
+ counter_set_id);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
+
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
+
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+ int reset, void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, clear, reset);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
+
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+ u32 *out_of_buffer)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+ void *out;
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
+ if (!err)
+ *out_of_buffer = MLX5_GET(query_q_counter_out, out,
+ out_of_buffer);
+
+ kfree(out);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 7b2438679..d6a3f412b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -140,7 +140,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err;
- mlx5_core_dbg(dev, "requsted num_vfs %d\n", num_vfs);
+ mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
if (!mlx5_core_is_pf(dev))
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index b69dadcfb..91846dfcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -508,6 +508,41 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_context;
+ void *in;
+ int err;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EACCES;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index f2fd1ef16..e25a73ed2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan;
int err;
+ if (mlx5e_vxlan_lookup_port(priv, port))
+ goto free_work;
+
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 217ac530a..5def12c04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -48,18 +48,12 @@ struct mlx5e_vxlan_work {
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
{
- return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) &&
- (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
+ return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
mlx5_core_is_pf(mdev));
}
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
void mlx5e_vxlan_init(struct mlx5e_priv *priv);
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
-#else
-static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {}
-static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {}
-#endif
void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
u16 port, int add);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index ce21ee5b2..821a087c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 2ad7f6785..5989f7cb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -50,3 +50,11 @@ config MLXSW_SPECTRUM
To compile this driver as a module, choose M here: the
module will be called mlxsw_spectrum.
+
+config MLXSW_SPECTRUM_DCB
+ bool "Data Center Bridging (DCB) support"
+ depends on MLXSW_SPECTRUM && DCB
+ default y
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 584cac444..9b5ebf84c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -8,3 +8,4 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o
+mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f69f62805..b0a0b01bb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -44,7 +44,7 @@
#include <linux/seq_file.h>
#include <linux/u64_stats_sync.h>
#include <linux/netdevice.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/types.h>
@@ -55,6 +55,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <net/devlink.h>
@@ -73,6 +74,8 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core";
static struct dentry *mlxsw_core_dbg_root;
+static struct workqueue_struct *mlxsw_wq;
+
struct mlxsw_core_pcpu_stats {
u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
@@ -93,11 +96,9 @@ struct mlxsw_core {
struct list_head rx_listener_list;
struct list_head event_listener_list;
struct {
- struct sk_buff *resp_skb;
- u64 tid;
- wait_queue_head_t wait;
- bool trans_active;
- struct mutex lock; /* One EMAD transaction at a time. */
+ atomic64_t tid;
+ struct list_head trans_list;
+ spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
} emad;
struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
@@ -114,6 +115,12 @@ struct mlxsw_core {
/* driver_priv has to be always the last item */
};
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver_priv;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_priv);
+
struct mlxsw_rx_listener_item {
struct list_head list;
struct mlxsw_rx_listener rxl;
@@ -284,7 +291,7 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
static void mlxsw_emad_pack_op_tlv(char *op_tlv,
const struct mlxsw_reg_info *reg,
enum mlxsw_core_reg_access_type type,
- struct mlxsw_core *mlxsw_core)
+ u64 tid)
{
mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
@@ -300,7 +307,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_WRITE);
mlxsw_emad_op_tlv_class_set(op_tlv,
MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
- mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+ mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
}
static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
@@ -322,7 +329,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type,
- struct mlxsw_core *mlxsw_core)
+ u64 tid)
{
char *buf;
@@ -333,7 +340,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
- mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+ mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
mlxsw_emad_construct_eth_hdr(skb);
}
@@ -370,58 +377,16 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
}
-#define MLXSW_EMAD_TIMEOUT_MS 200
-
-static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- int err;
- int ret;
-
- mlxsw_core->emad.trans_active = true;
-
- err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
- if (err) {
- dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
- mlxsw_core->emad.tid);
- dev_kfree_skb(skb);
- goto trans_inactive_out;
- }
-
- ret = wait_event_timeout(mlxsw_core->emad.wait,
- !(mlxsw_core->emad.trans_active),
- msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
- if (!ret) {
- dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
- mlxsw_core->emad.tid);
- err = -EIO;
- goto trans_inactive_out;
- }
-
- return 0;
-
-trans_inactive_out:
- mlxsw_core->emad.trans_active = false;
- return err;
-}
-
-static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
- char *op_tlv)
+static int mlxsw_emad_process_status(char *op_tlv,
+ enum mlxsw_emad_op_tlv_status *p_status)
{
- enum mlxsw_emad_op_tlv_status status;
- u64 tid;
+ *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
- status = mlxsw_emad_op_tlv_status_get(op_tlv);
- tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
-
- switch (status) {
+ switch (*p_status) {
case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
return 0;
case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
- dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
- tid, status, mlxsw_emad_op_tlv_status_str(status));
return -EAGAIN;
case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
@@ -432,70 +397,150 @@ static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
default:
- dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
- tid, status, mlxsw_emad_op_tlv_status_str(status));
return -EIO;
}
}
-static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb)
+static int
+mlxsw_emad_process_status_skb(struct sk_buff *skb,
+ enum mlxsw_emad_op_tlv_status *p_status)
+{
+ return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
+}
+
+struct mlxsw_reg_trans {
+ struct list_head list;
+ struct list_head bulk_list;
+ struct mlxsw_core *core;
+ struct sk_buff *tx_skb;
+ struct mlxsw_tx_info tx_info;
+ struct delayed_work timeout_dw;
+ unsigned int retries;
+ u64 tid;
+ struct completion completion;
+ atomic_t active;
+ mlxsw_reg_trans_cb_t *cb;
+ unsigned long cb_priv;
+ const struct mlxsw_reg_info *reg;
+ enum mlxsw_core_reg_access_type type;
+ int err;
+ enum mlxsw_emad_op_tlv_status emad_status;
+ struct rcu_head rcu;
+};
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
{
- return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+ unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+
+ mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ struct mlxsw_reg_trans *trans)
{
- struct sk_buff *trans_skb;
- int n_retry;
+ struct sk_buff *skb;
int err;
- n_retry = 0;
-retry:
- /* We copy the EMAD to a new skb, since we might need
- * to retransmit it in case of failure.
- */
- trans_skb = skb_copy(skb, GFP_KERNEL);
- if (!trans_skb) {
- err = -ENOMEM;
- goto out;
+ skb = skb_copy(trans->tx_skb, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ atomic_set(&trans->active, 1);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ if (err) {
+ dev_kfree_skb(skb);
+ return err;
}
+ mlxsw_emad_trans_timeout_schedule(trans);
+ return 0;
+}
- err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
- if (!err) {
- struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
+{
+ struct mlxsw_core *mlxsw_core = trans->core;
+
+ dev_kfree_skb(trans->tx_skb);
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del_rcu(&trans->list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ trans->err = err;
+ complete(&trans->completion);
+}
- err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
- if (err)
- dev_kfree_skb(resp_skb);
- if (!err || err != -EAGAIN)
- goto out;
+static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans)
+{
+ int err;
+
+ if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
+ trans->retries++;
+ err = mlxsw_emad_transmit(trans->core, trans);
+ if (err == 0)
+ return;
+ } else {
+ err = -EIO;
}
- if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
- goto retry;
+ mlxsw_emad_trans_finish(trans, err);
+}
-out:
- dev_kfree_skb(skb);
- mlxsw_core->emad.tid++;
- return err;
+static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
+{
+ struct mlxsw_reg_trans *trans = container_of(work,
+ struct mlxsw_reg_trans,
+ timeout_dw.work);
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+
+ mlxsw_emad_transmit_retry(trans->core, trans);
+}
+
+static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans,
+ struct sk_buff *skb)
+{
+ int err;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+
+ err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
+ if (err == -EAGAIN) {
+ mlxsw_emad_transmit_retry(mlxsw_core, trans);
+ } else {
+ if (err == 0) {
+ char *op_tlv = mlxsw_emad_op_tlv(skb);
+
+ if (trans->cb)
+ trans->cb(mlxsw_core,
+ mlxsw_emad_reg_payload(op_tlv),
+ trans->reg->len, trans->cb_priv);
+ }
+ mlxsw_emad_trans_finish(trans, err);
+ }
}
+/* called with rcu read lock held */
static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
struct mlxsw_core *mlxsw_core = priv;
+ struct mlxsw_reg_trans *trans;
- if (mlxsw_emad_is_resp(skb) &&
- mlxsw_core->emad.trans_active &&
- mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
- mlxsw_core->emad.resp_skb = skb;
- mlxsw_core->emad.trans_active = false;
- wake_up(&mlxsw_core->emad.wait);
- } else {
- dev_kfree_skb(skb);
+ if (!mlxsw_emad_is_resp(skb))
+ goto free_skb;
+
+ list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
+ if (mlxsw_emad_get_tid(skb) == trans->tid) {
+ mlxsw_emad_process_response(mlxsw_core, trans, skb);
+ break;
+ }
}
+
+free_skb:
+ dev_kfree_skb(skb);
}
static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
@@ -522,18 +567,19 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
+ u64 tid;
int err;
/* Set the upper 32 bits of the transaction ID field to a random
* number. This allows us to discard EMADs addressed to other
* devices.
*/
- get_random_bytes(&mlxsw_core->emad.tid, 4);
- mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+ get_random_bytes(&tid, 4);
+ tid <<= 32;
+ atomic64_set(&mlxsw_core->emad.tid, tid);
- init_waitqueue_head(&mlxsw_core->emad.wait);
- mlxsw_core->emad.trans_active = false;
- mutex_init(&mlxsw_core->emad.lock);
+ INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
+ spin_lock_init(&mlxsw_core->emad.trans_list_lock);
err = mlxsw_core_rx_listener_register(mlxsw_core,
&mlxsw_emad_rx_listener,
@@ -591,6 +637,59 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
return skb;
}
+static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_reg_trans *trans,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb,
+ unsigned long cb_priv, u64 tid)
+{
+ struct sk_buff *skb;
+ int err;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ if (!skb)
+ return -ENOMEM;
+
+ list_add_tail(&trans->bulk_list, bulk_list);
+ trans->core = mlxsw_core;
+ trans->tx_skb = skb;
+ trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->tx_info.is_emad = true;
+ INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
+ trans->tid = tid;
+ init_completion(&trans->completion);
+ trans->cb = cb;
+ trans->cb_priv = cb_priv;
+ trans->reg = reg;
+ trans->type = type;
+
+ mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
+ mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
+
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ err = mlxsw_emad_transmit(mlxsw_core, trans);
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del_rcu(&trans->list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del(&trans->bulk_list);
+ dev_kfree_skb(trans->tx_skb);
+ return err;
+}
+
/*****************
* Core functions
*****************/
@@ -680,24 +779,6 @@ static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
.llseek = seq_lseek
};
-static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
- const char *buf, size_t size)
-{
- __be32 *m = (__be32 *) buf;
- int i;
- int count = size / sizeof(__be32);
-
- for (i = count - 1; i >= 0; i--)
- if (m[i])
- break;
- i++;
- count = i ? i : 1;
- for (i = 0; i < count; i += 4)
- dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
- i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
- be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
-}
-
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
{
spin_lock(&mlxsw_core_driver_list_lock);
@@ -795,8 +876,7 @@ static int mlxsw_devlink_port_split(struct devlink *devlink,
return -EINVAL;
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_split(mlxsw_core->driver_priv,
- port_index, count);
+ return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
}
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
@@ -808,13 +888,171 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
return -EINVAL;
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv,
- port_index);
+ return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
+}
+
+static int
+mlxsw_devlink_sb_pool_get(struct devlink *devlink,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
+ pool_index, pool_info);
+}
+
+static int
+mlxsw_devlink_sb_pool_set(struct devlink *devlink,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_pool_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
+ pool_index, size, threshold_type);
+}
+
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+ return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
+static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_port_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
+ pool_index, p_threshold);
+}
+
+static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_port_pool_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
+ pool_index, threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_tc_pool_bind_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
+ tc_index, pool_type,
+ p_pool_index, p_threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_tc_pool_bind_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
+ tc_index, pool_type,
+ pool_index, threshold);
+}
+
+static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_occ_snapshot)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
+}
+
+static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_occ_max_clear)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
+}
+
+static int
+mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_occ_port_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
+ pool_index, p_cur, p_max);
+}
+
+static int
+mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_occ_tc_port_bind_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
+ sb_index, tc_index,
+ pool_type, p_cur, p_max);
}
static const struct devlink_ops mlxsw_devlink_ops = {
- .port_split = mlxsw_devlink_port_split,
- .port_unsplit = mlxsw_devlink_port_unsplit,
+ .port_split = mlxsw_devlink_port_split,
+ .port_unsplit = mlxsw_devlink_port_unsplit,
+ .sb_pool_get = mlxsw_devlink_sb_pool_get,
+ .sb_pool_set = mlxsw_devlink_sb_pool_set,
+ .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
};
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
@@ -880,8 +1118,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_devlink_register;
- err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
- mlxsw_bus_info);
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
if (err)
goto err_driver_init;
@@ -892,7 +1129,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
return 0;
err_debugfs_init:
- mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
devlink_unregister(devlink);
err_devlink_register:
@@ -918,7 +1155,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
struct devlink *devlink = priv_to_devlink(mlxsw_core);
mlxsw_core_debugfs_fini(mlxsw_core);
- mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_core->driver->fini(mlxsw_core);
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
@@ -929,26 +1166,17 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
-static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
-{
- return container_of(driver_priv, struct mlxsw_core, driver_priv);
-}
-
-bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info)
{
- struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
-
return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
tx_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
-int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
- struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
-
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
tx_info);
}
@@ -1108,56 +1336,112 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
+{
+ return atomic64_inc_return(&mlxsw_core->emad.tid);
+}
+
static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
- enum mlxsw_core_reg_access_type type)
+ enum mlxsw_core_reg_access_type type,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb,
+ unsigned long cb_priv)
{
+ u64 tid = mlxsw_core_tid_get(mlxsw_core);
+ struct mlxsw_reg_trans *trans;
int err;
- char *op_tlv;
- struct sk_buff *skb;
- struct mlxsw_tx_info tx_info = {
- .local_port = MLXSW_PORT_CPU_PORT,
- .is_emad = true,
- };
- skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
- if (!skb)
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+ if (!trans)
return -ENOMEM;
- mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
- mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+ err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
+ bulk_list, cb, cb_priv, tid);
+ if (err) {
+ kfree(trans);
+ return err;
+ }
+ return 0;
+}
- dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
- mlxsw_core->emad.tid);
- mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+ return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_query);
- err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
- if (!err) {
- op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
- memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
- reg->len);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+ return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+ bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_write);
- dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
- mlxsw_core->emad.tid - 1);
- mlxsw_core_buf_dump_dbg(mlxsw_core,
- mlxsw_core->emad.resp_skb->data,
- mlxsw_core->emad.resp_skb->len);
+static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
+{
+ struct mlxsw_core *mlxsw_core = trans->core;
+ int err;
- dev_kfree_skb(mlxsw_core->emad.resp_skb);
- }
+ wait_for_completion(&trans->completion);
+ cancel_delayed_work_sync(&trans->timeout_dw);
+ err = trans->err;
+ if (trans->retries)
+ dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
+ trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
+ trans->tid, trans->reg->id,
+ mlxsw_reg_id_str(trans->reg->id),
+ mlxsw_core_reg_access_type_str(trans->type),
+ trans->emad_status,
+ mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+ list_del(&trans->bulk_list);
+ kfree_rcu(trans, rcu);
return err;
}
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
+{
+ struct mlxsw_reg_trans *trans;
+ struct mlxsw_reg_trans *tmp;
+ int sum_err = 0;
+ int err;
+
+ list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
+ err = mlxsw_reg_trans_wait(trans);
+ if (err && sum_err == 0)
+ sum_err = err; /* first error to be returned */
+ }
+ return sum_err;
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+
static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type)
{
+ enum mlxsw_emad_op_tlv_status status;
int err, n_retry;
char *in_mbox, *out_mbox, *tmp;
+ dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
in_mbox = mlxsw_cmd_mbox_alloc();
if (!in_mbox)
return -ENOMEM;
@@ -1168,7 +1452,8 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
goto free_in_mbox;
}
- mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+ mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
+ mlxsw_core_tid_get(mlxsw_core));
tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
@@ -1176,60 +1461,61 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
retry:
err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
if (!err) {
- err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
- if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
- goto retry;
+ err = mlxsw_emad_process_status(out_mbox, &status);
+ if (err) {
+ if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+ dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
+ status, mlxsw_emad_op_tlv_status_str(status));
+ }
}
if (!err)
memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
reg->len);
- mlxsw_core->emad.tid++;
mlxsw_cmd_mbox_free(out_mbox);
free_in_mbox:
mlxsw_cmd_mbox_free(in_mbox);
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
return err;
}
+static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
+ char *payload, size_t payload_len,
+ unsigned long cb_priv)
+{
+ char *orig_payload = (char *) cb_priv;
+
+ memcpy(orig_payload, payload, payload_len);
+}
+
static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type)
{
- u64 cur_tid;
+ LIST_HEAD(bulk_list);
int err;
- if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
- dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
- reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
- return -EINTR;
- }
-
- cur_tid = mlxsw_core->emad.tid;
- dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
- cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
-
/* During initialization EMAD interface is not available to us,
* so we default to command interface. We switch to EMAD interface
* after setting the appropriate traps.
*/
if (!mlxsw_core->emad.use_emad)
- err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
- payload, type);
- else
- err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
payload, type);
+ err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ payload, type, &bulk_list,
+ mlxsw_core_reg_access_cb,
+ (unsigned long) payload);
if (err)
- dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
- cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
-
- mutex_unlock(&mlxsw_core->emad.lock);
- return err;
+ return err;
+ return mlxsw_reg_trans_bulk_wait(&bulk_list);
}
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
@@ -1358,6 +1644,46 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
+ struct net_device *dev, bool split, u32 split_group)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ if (split)
+ devlink_port_split_set(devlink_port, split_group);
+ devlink_port_type_eth_set(devlink_port, dev);
+ return devlink_port_register(devlink, devlink_port, local_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_init);
+
+void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
+{
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ devlink_port_unregister(devlink_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_fini);
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+ const char *buf, size_t size)
+{
+ __be32 *m = (__be32 *) buf;
+ int i;
+ int count = size / sizeof(__be32);
+
+ for (i = count - 1; i >= 0; i--)
+ if (m[i])
+ break;
+ i++;
+ count = i ? i : 1;
+ for (i = 0; i < count; i += 4)
+ dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+ i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+ be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size,
@@ -1400,17 +1726,35 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
}
EXPORT_SYMBOL(mlxsw_cmd_exec);
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
+{
+ return queue_delayed_work(mlxsw_wq, dwork, delay);
+}
+EXPORT_SYMBOL(mlxsw_core_schedule_dw);
+
static int __init mlxsw_core_module_init(void)
{
- mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
- if (!mlxsw_core_dbg_root)
+ int err;
+
+ mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
+ if (!mlxsw_wq)
return -ENOMEM;
+ mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+ if (!mlxsw_core_dbg_root) {
+ err = -ENOMEM;
+ goto err_debugfs_create_dir;
+ }
return 0;
+
+err_debugfs_create_dir:
+ destroy_workqueue(mlxsw_wq);
+ return err;
}
static void __exit mlxsw_core_module_exit(void)
{
debugfs_remove_recursive(mlxsw_core_dbg_root);
+ destroy_workqueue(mlxsw_wq);
}
module_init(mlxsw_core_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c73d1c079..436bc49df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -43,6 +43,8 @@
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
#include "trap.h"
#include "reg.h"
@@ -61,6 +63,8 @@ struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
+
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
@@ -74,10 +78,9 @@ struct mlxsw_tx_info {
bool is_emad;
};
-bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
-
-int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
struct mlxsw_rx_listener {
@@ -106,6 +109,19 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_event_listener *el,
void *priv);
+typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
+ size_t payload_len, unsigned long cb_priv);
+
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload);
int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
@@ -131,6 +147,26 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port);
+struct mlxsw_core_port {
+ struct devlink_port devlink_port;
+};
+
+static inline void *
+mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
+{
+ /* mlxsw_core_port is ensured to always be the first field in driver
+ * port structure.
+ */
+ return mlxsw_core_port;
+}
+
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
+ struct net_device *dev, bool split, u32 split_group);
+void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port);
+
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
+
#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
struct mlxsw_swid_config {
@@ -183,11 +219,43 @@ struct mlxsw_driver {
const char *kind;
struct module *owner;
size_t priv_size;
- int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+ int (*init)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info);
- void (*fini)(void *driver_priv);
- int (*port_split)(void *driver_priv, u8 local_port, unsigned int count);
- int (*port_unsplit)(void *driver_priv, u8 local_port);
+ void (*fini)(struct mlxsw_core *mlxsw_core);
+ int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ unsigned int count);
+ int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
+ int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
+ int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold);
+ int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold);
+ int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+ int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+ int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
u8 txhdr_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ffe4c0305..57d48da70 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1805,6 +1805,184 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* QTCT - QoS Switch Traffic Class Table
+ * -------------------------------------
+ * Configures the mapping between the packet switch priority and the
+ * traffic class on the transmit port.
+ */
+#define MLXSW_REG_QTCT_ID 0x400A
+#define MLXSW_REG_QTCT_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_qtct = {
+ .id = MLXSW_REG_QTCT_ID,
+ .len = MLXSW_REG_QTCT_LEN,
+};
+
+/* reg_qtct_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
+
+/* reg_qtct_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8);
+
+/* reg_qtct_switch_prio
+ * Switch priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
+
+/* reg_qtct_tclass
+ * Traffic class.
+ * Default values:
+ * switch_prio 0 : tclass 1
+ * switch_prio 1 : tclass 0
+ * switch_prio i : tclass i, for i > 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
+ u8 switch_prio, u8 tclass)
+{
+ MLXSW_REG_ZERO(qtct, payload);
+ mlxsw_reg_qtct_local_port_set(payload, local_port);
+ mlxsw_reg_qtct_switch_prio_set(payload, switch_prio);
+ mlxsw_reg_qtct_tclass_set(payload, tclass);
+}
+
+/* QEEC - QoS ETS Element Configuration Register
+ * ---------------------------------------------
+ * Configures the ETS elements.
+ */
+#define MLXSW_REG_QEEC_ID 0x400D
+#define MLXSW_REG_QEEC_LEN 0x1C
+
+static const struct mlxsw_reg_info mlxsw_reg_qeec = {
+ .id = MLXSW_REG_QEEC_ID,
+ .len = MLXSW_REG_QEEC_LEN,
+};
+
+/* reg_qeec_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_qeec_hr {
+ MLXSW_REG_QEEC_HIERARCY_PORT,
+ MLXSW_REG_QEEC_HIERARCY_GROUP,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+};
+
+/* reg_qeec_element_hierarchy
+ * 0 - Port
+ * 1 - Group
+ * 2 - Subgroup
+ * 3 - Traffic Class
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4);
+
+/* reg_qeec_element_index
+ * The index of the element in the hierarchy.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
+
+/* reg_qeec_next_element_index
+ * The index of the next (lower) element in the hierarchy.
+ * Access: RW
+ *
+ * Note: Reserved for element_hierarchy 0.
+ */
+MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+
+enum {
+ MLXSW_REG_QEEC_BYTES_MODE,
+ MLXSW_REG_QEEC_PACKETS_MODE,
+};
+
+/* reg_qeec_pb
+ * Packets or bytes mode.
+ * 0 - Bytes mode
+ * 1 - Packets mode
+ * Access: RW
+ *
+ * Note: Used for max shaper configuration. For Spectrum, packets mode
+ * is supported only for traffic classes of CPU port.
+ */
+MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+
+/* reg_qeec_mase
+ * Max shaper configuration enable. Enables configuration of the max
+ * shaper on this ETS element.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
+
+/* A large max rate will disable the max shaper. */
+#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
+
+/* reg_qeec_max_shaper_rate
+ * Max shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
+
+/* reg_qeec_de
+ * DWRR configuration enable. Enables configuration of the dwrr and
+ * dwrr_weight.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1);
+
+/* reg_qeec_dwrr
+ * Transmission selection algorithm to use on the link going down from
+ * the ETS element.
+ * 0 - Strict priority
+ * 1 - DWRR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
+
+/* reg_qeec_dwrr_weight
+ * DWRR weight on the link going down from the ETS element. The
+ * percentage of bandwidth guaranteed to an ETS element within
+ * its hierarchy. The sum of all weights across all ETS elements
+ * within one hierarchy should be equal to 100. Reserved when
+ * transmission selection algorithm is strict priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
+
+static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index)
+{
+ MLXSW_REG_ZERO(qeec, payload);
+ mlxsw_reg_qeec_local_port_set(payload, local_port);
+ mlxsw_reg_qeec_element_hierarchy_set(payload, hr);
+ mlxsw_reg_qeec_element_index_set(payload, index);
+ mlxsw_reg_qeec_next_element_index_set(payload, next_index);
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -2141,6 +2319,145 @@ static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
mlxsw_reg_paos_e_set(payload, 1);
}
+/* PFCC - Ports Flow Control Configuration Register
+ * ------------------------------------------------
+ * Configures and retrieves the per port flow control configuration.
+ */
+#define MLXSW_REG_PFCC_ID 0x5007
+#define MLXSW_REG_PFCC_LEN 0x20
+
+static const struct mlxsw_reg_info mlxsw_reg_pfcc = {
+ .id = MLXSW_REG_PFCC_ID,
+ .len = MLXSW_REG_PFCC_LEN,
+};
+
+/* reg_pfcc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
+
+/* reg_pfcc_pnat
+ * Port number access type. Determines the way local_port is interpreted:
+ * 0 - Local port number.
+ * 1 - IB / label port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2);
+
+/* reg_pfcc_shl_cap
+ * Send to higher layers capabilities:
+ * 0 - No capability of sending Pause and PFC frames to higher layers.
+ * 1 - Device has capability of sending Pause and PFC frames to higher
+ * layers.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1);
+
+/* reg_pfcc_shl_opr
+ * Send to higher layers operation:
+ * 0 - Pause and PFC frames are handled by the port (default).
+ * 1 - Pause and PFC frames are handled by the port and also sent to
+ * higher layers. Only valid if shl_cap = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1);
+
+/* reg_pfcc_ppan
+ * Pause policy auto negotiation.
+ * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx.
+ * 1 - Enabled. When auto-negotiation is performed, set the Pause policy
+ * based on the auto-negotiation resolution.
+ * Access: RW
+ *
+ * Note: The auto-negotiation advertisement is set according to pptx and
+ * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0.
+ */
+MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4);
+
+/* reg_pfcc_prio_mask_tx
+ * Bit per priority indicating if Tx flow control policy should be
+ * updated based on bit pfctx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8);
+
+/* reg_pfcc_prio_mask_rx
+ * Bit per priority indicating if Rx flow control policy should be
+ * updated based on bit pfcrx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8);
+
+/* reg_pfcc_pptx
+ * Admin Pause policy on Tx.
+ * 0 - Never generate Pause frames (default).
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1);
+
+/* reg_pfcc_aptx
+ * Active (operational) Pause policy on Tx.
+ * 0 - Never generate Pause frames.
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1);
+
+/* reg_pfcc_pfctx
+ * Priority based flow control policy on Tx[7:0]. Per-priority bit mask:
+ * 0 - Never generate priority Pause frames on the specified priority
+ * (default).
+ * 1 - Generate priority Pause frames according to Rx buffer threshold on
+ * the specified priority.
+ * Access: RW
+ *
+ * Note: pfctx and pptx must be mutually exclusive.
+ */
+MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8);
+
+/* reg_pfcc_pprx
+ * Admin Pause policy on Rx.
+ * 0 - Ignore received Pause frames (default).
+ * 1 - Respect received Pause frames.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1);
+
+/* reg_pfcc_aprx
+ * Active (operational) Pause policy on Rx.
+ * 0 - Ignore received Pause frames.
+ * 1 - Respect received Pause frames.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1);
+
+/* reg_pfcc_pfcrx
+ * Priority based flow control policy on Rx[7:0]. Per-priority bit mask:
+ * 0 - Ignore incoming priority Pause frames on the specified priority
+ * (default).
+ * 1 - Respect incoming priority Pause frames on the specified priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8);
+
+#define MLXSW_REG_PFCC_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
+{
+ mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+ mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+ mlxsw_reg_pfcc_pfctx_set(payload, pfc_en);
+ mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
+}
+
+static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pfcc, payload);
+ mlxsw_reg_pfcc_local_port_set(payload, local_port);
+}
+
/* PPCNT - Ports Performance Counters Register
* -------------------------------------------
* The PPCNT register retrieves per port performance counters.
@@ -2180,6 +2497,11 @@ MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+enum mlxsw_reg_ppcnt_grp {
+ MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
+};
+
/* reg_ppcnt_grp
* Performance counter group.
* Group 63 indicates all groups. Only valid on Set() operation with
@@ -2215,6 +2537,8 @@ MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
*/
MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+/* Ethernet IEEE 802.3 Counter Group */
+
/* reg_ppcnt_a_frames_transmitted_ok
* Access: RO
*/
@@ -2329,15 +2653,160 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
0x08 + 0x90, 0, 64);
-static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+/* Ethernet Per Priority Group Counters */
+
+/* reg_ppcnt_rx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_rx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_tx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_tx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_rx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_rx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_tx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_tx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_rx_pause_transition
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_ppcnt_grp grp,
+ u8 prio_tc)
{
MLXSW_REG_ZERO(ppcnt, payload);
mlxsw_reg_ppcnt_swid_set(payload, 0);
mlxsw_reg_ppcnt_local_port_set(payload, local_port);
mlxsw_reg_ppcnt_pnat_set(payload, 0);
- mlxsw_reg_ppcnt_grp_set(payload, 0);
+ mlxsw_reg_ppcnt_grp_set(payload, grp);
mlxsw_reg_ppcnt_clr_set(payload, 0);
- mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+ mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
+}
+
+/* PPTB - Port Prio To Buffer Register
+ * -----------------------------------
+ * Configures the switch priority to buffer table.
+ */
+#define MLXSW_REG_PPTB_ID 0x500B
+#define MLXSW_REG_PPTB_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pptb = {
+ .id = MLXSW_REG_PPTB_ID,
+ .len = MLXSW_REG_PPTB_LEN,
+};
+
+enum {
+ MLXSW_REG_PPTB_MM_UM,
+ MLXSW_REG_PPTB_MM_UNICAST,
+ MLXSW_REG_PPTB_MM_MULTICAST,
+};
+
+/* reg_pptb_mm
+ * Mapping mode.
+ * 0 - Map both unicast and multicast packets to the same buffer.
+ * 1 - Map only unicast packets.
+ * 2 - Map only multicast packets.
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports the first option.
+ */
+MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
+
+/* reg_pptb_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
+
+/* reg_pptb_um
+ * Enables the update of the untagged_buf field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1);
+
+/* reg_pptb_pm
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8);
+
+/* reg_pptb_prio_to_buff
+ * Mapping of switch priority <i> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4);
+
+/* reg_pptb_pm_msb
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i+8>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
+
+/* reg_pptb_untagged_buff
+ * Mapping of untagged frames to one of the allocated receive port buffers.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for
+ * Spectrum, as it maps untagged packets based on the default switch priority.
+ */
+MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
+#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pptb, payload);
+ mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
+ mlxsw_reg_pptb_local_port_set(payload, local_port);
+ mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+ mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+ u8 buff)
+{
+ mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+ mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
}
/* PBMC - Port Buffer Management Control Register
@@ -2346,7 +2815,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
* allocation for different Prios, and the Pause threshold management.
*/
#define MLXSW_REG_PBMC_ID 0x500C
-#define MLXSW_REG_PBMC_LEN 0x68
+#define MLXSW_REG_PBMC_LEN 0x6C
static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
.id = MLXSW_REG_PBMC_ID,
@@ -2374,6 +2843,8 @@ MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
*/
MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11
+
/* reg_pbmc_buf_lossy
* The field indicates if the buffer is lossy.
* 0 - Lossless
@@ -2398,6 +2869,30 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
*/
MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+/* reg_pbmc_buf_xoff_threshold
+ * Once the amount of data in the buffer goes above this value, device
+ * starts sending PFC frames for all priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
+ 0x08, 0x04, false);
+
+/* reg_pbmc_buf_xon_threshold
+ * When the amount of data in the buffer goes below this value, device
+ * stops sending PFC frames for the priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
+ 0x08, 0x04, false);
+
static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
u16 xoff_timer_value, u16 xoff_refresh)
{
@@ -2416,6 +2911,17 @@ static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
}
+static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
+ int buf_index, u16 size,
+ u16 threshold)
+{
+ mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+ mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold);
+ mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold);
+}
+
/* PSPA - Port Switch Partition Allocation
* ---------------------------------------
* Controls the association of a port with a switch partition and enables
@@ -2985,9 +3491,10 @@ static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
.len = MLXSW_REG_SBPR_LEN,
};
-enum mlxsw_reg_sbpr_dir {
- MLXSW_REG_SBPR_DIR_INGRESS,
- MLXSW_REG_SBPR_DIR_EGRESS,
+/* shared direstion enum for SBPR, SBCM, SBPM */
+enum mlxsw_reg_sbxx_dir {
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ MLXSW_REG_SBXX_DIR_EGRESS,
};
/* reg_sbpr_dir
@@ -3020,7 +3527,7 @@ enum mlxsw_reg_sbpr_mode {
MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
- enum mlxsw_reg_sbpr_dir dir,
+ enum mlxsw_reg_sbxx_dir dir,
enum mlxsw_reg_sbpr_mode mode, u32 size)
{
MLXSW_REG_ZERO(sbpr, payload);
@@ -3062,11 +3569,6 @@ MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
-enum mlxsw_reg_sbcm_dir {
- MLXSW_REG_SBCM_DIR_INGRESS,
- MLXSW_REG_SBCM_DIR_EGRESS,
-};
-
/* reg_sbcm_dir
* Direction.
* Access: Index
@@ -3079,6 +3581,10 @@ MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
*/
MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
+
/* reg_sbcm_max_buff
* When the pool associated to the port-pg/tclass is configured to
* static, Maximum buffer size for the limiter configured in cells.
@@ -3099,7 +3605,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
- enum mlxsw_reg_sbcm_dir dir,
+ enum mlxsw_reg_sbxx_dir dir,
u32 min_buff, u32 max_buff, u8 pool)
{
MLXSW_REG_ZERO(sbcm, payload);
@@ -3111,8 +3617,8 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
mlxsw_reg_sbcm_pool_set(payload, pool);
}
-/* SBPM - Shared Buffer Class Management Register
- * ----------------------------------------------
+/* SBPM - Shared Buffer Port Management Register
+ * ---------------------------------------------
* The SBPM register configures and retrieves the shared buffer allocation
* and configuration according to Port-Pool, including the definition
* of the associated quota.
@@ -3139,17 +3645,33 @@ MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
-enum mlxsw_reg_sbpm_dir {
- MLXSW_REG_SBPM_DIR_INGRESS,
- MLXSW_REG_SBPM_DIR_EGRESS,
-};
-
/* reg_sbpm_dir
* Direction.
* Access: Index
*/
MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+/* reg_sbpm_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
+
+/* reg_sbpm_clr
+ * Clear Max Buffer Occupancy
+ * When this bit is set, max_buff_occupancy field is cleared (and a
+ * new max value is tracked from the time the clear was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
+
+/* reg_sbpm_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
+
/* reg_sbpm_min_buff
* Minimum buffer size for the limiter, in cells.
* Access: RW
@@ -3170,17 +3692,25 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
- enum mlxsw_reg_sbpm_dir dir,
+ enum mlxsw_reg_sbxx_dir dir, bool clr,
u32 min_buff, u32 max_buff)
{
MLXSW_REG_ZERO(sbpm, payload);
mlxsw_reg_sbpm_local_port_set(payload, local_port);
mlxsw_reg_sbpm_pool_set(payload, pool);
mlxsw_reg_sbpm_dir_set(payload, dir);
+ mlxsw_reg_sbpm_clr_set(payload, clr);
mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
}
+static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
+ u32 *p_max_buff_occupancy)
+{
+ *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
+ *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
+}
+
/* SBMM - Shared Buffer Multicast Management Register
* --------------------------------------------------
* The SBMM register configures and retrieves the shared buffer allocation
@@ -3236,6 +3766,104 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
mlxsw_reg_sbmm_pool_set(payload, pool);
}
+/* SBSR - Shared Buffer Status Register
+ * ------------------------------------
+ * The SBSR register retrieves the shared buffer occupancy according to
+ * Port-Pool. Note that this register enables reading a large amount of data.
+ * It is the user's responsibility to limit the amount of data to ensure the
+ * response can match the maximum transfer unit. In case the response exceeds
+ * the maximum transport unit, it will be truncated with no special notice.
+ */
+#define MLXSW_REG_SBSR_ID 0xB005
+#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
+#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
+#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \
+ MLXSW_REG_SBSR_REC_LEN * \
+ MLXSW_REG_SBSR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
+ .id = MLXSW_REG_SBSR_ID,
+ .len = MLXSW_REG_SBSR_LEN,
+};
+
+/* reg_sbsr_clr
+ * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
+ * field is cleared (and a new max value is tracked from the time the clear
+ * was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+
+/* reg_sbsr_ingress_port_mask
+ * Bit vector for all ingress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
+
+/* reg_sbsr_pg_buff_mask
+ * Bit vector for all switch priority groups.
+ * Indicates which of the priorities (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other priority
+ * does not change.
+ * Range is 0..cap_max_pg_buffers - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
+
+/* reg_sbsr_egress_port_mask
+ * Bit vector for all egress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
+
+/* reg_sbsr_tclass_mask
+ * Bit vector for all traffic classes.
+ * Indicates which of the traffic classes (for which the relevant bit is
+ * set) are affected by the set operation. Configuration of any other
+ * traffic class does not change.
+ * Range is 0..cap_max_tclass - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
+
+static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
+{
+ MLXSW_REG_ZERO(sbsr, payload);
+ mlxsw_reg_sbsr_clr_set(payload, clr);
+}
+
+/* reg_sbsr_rec_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+ 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
+
+/* reg_sbsr_rec_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+ 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
+ u32 *p_buff_occupancy,
+ u32 *p_max_buff_occupancy)
+{
+ *p_buff_occupancy =
+ mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
+ *p_max_buff_occupancy =
+ mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
+}
+
static inline const char *mlxsw_reg_id_str(u16 reg_id)
{
switch (reg_id) {
@@ -3283,6 +3911,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SFMR";
case MLXSW_REG_SPVMLR_ID:
return "SPVMLR";
+ case MLXSW_REG_QTCT_ID:
+ return "QTCT";
+ case MLXSW_REG_QEEC_ID:
+ return "QEEC";
case MLXSW_REG_PMLP_ID:
return "PMLP";
case MLXSW_REG_PMTU_ID:
@@ -3293,8 +3925,12 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "PPAD";
case MLXSW_REG_PAOS_ID:
return "PAOS";
+ case MLXSW_REG_PFCC_ID:
+ return "PFCC";
case MLXSW_REG_PPCNT_ID:
return "PPCNT";
+ case MLXSW_REG_PPTB_ID:
+ return "PPTB";
case MLXSW_REG_PBMC_ID:
return "PBMC";
case MLXSW_REG_PSPA_ID:
@@ -3323,6 +3959,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SBPM";
case MLXSW_REG_SBMM_ID:
return "SBMM";
+ case MLXSW_REG_SBSR_ID:
+ return "SBSR";
default:
return "*UNKNOWN*";
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 668b2f465..374080027 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,7 +49,7 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/list.h>
-#include <net/devlink.h>
+#include <linux/dcbnl.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -171,23 +171,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
}
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
- bool *p_is_up)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char paos_pl[MLXSW_REG_PAOS_LEN];
- u8 oper_status;
- int err;
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
- if (err)
- return err;
- oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
- return 0;
-}
-
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned char *addr)
{
@@ -247,15 +230,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+ swid);
+}
+
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
@@ -307,7 +298,7 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 *p_module,
- u8 *p_width)
+ u8 *p_width, u8 *p_lane)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@@ -318,6 +309,7 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
return err;
*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
@@ -379,7 +371,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
return NETDEV_TX_BUSY;
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -399,11 +391,15 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
}
mlxsw_sp_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
+
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -438,16 +434,89 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
+ bool pause_en, bool pfc_en, u16 delay)
+{
+ u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
+
+ delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
+ MLXSW_SP_PAUSE_DELAY;
+
+ if (pause_en || pfc_en)
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
+ pg_size + delay, pg_size);
+ else
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
+}
+
+int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
+ u8 *prio_tc, bool pause_en,
+ struct ieee_pfc *my_pfc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
+ u16 delay = !!my_pfc ? my_pfc->delay : 0;
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int i, j, err;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ bool configure = false;
+ bool pfc = false;
+
+ for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
+ if (prio_tc[j] == i) {
+ pfc = pfc_en & BIT(j);
+ configure = true;
+ break;
+ }
+ }
+
+ if (!configure)
+ continue;
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+}
+
+static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ int mtu, bool pause_en)
+{
+ u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
+ bool dcb_en = !!mlxsw_sp_port->dcb.ets;
+ struct ieee_pfc *my_pfc;
+ u8 *prio_tc;
+
+ prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
+ my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
+
+ return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
+ pause_en, my_pfc);
+}
+
static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
int err;
- err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
if (err)
return err;
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_port_mtu_set;
dev->mtu = mtu;
return 0;
+
+err_port_mtu_set:
+ mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ return err;
}
static struct rtnl_link_stats64 *
@@ -861,6 +930,27 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
+static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
+ size_t len)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ u8 module = mlxsw_sp_port->mapping.module;
+ u8 width = mlxsw_sp_port->mapping.width;
+ u8 lane = mlxsw_sp_port->mapping.lane;
+ int err;
+
+ if (!mlxsw_sp_port->split)
+ err = snprintf(name, len, "p%d", module + 1);
+ else
+ err = snprintf(name, len, "p%ds%d", module + 1,
+ lane / width);
+
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -877,6 +967,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -897,6 +988,68 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
+static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ pause->rx_pause = mlxsw_sp_port->link.rx_pause;
+ pause->tx_pause = mlxsw_sp_port->link.tx_pause;
+}
+
+static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ethtool_pauseparam *pause)
+{
+ char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+ mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+ pfcc_pl);
+}
+
+static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = pause->tx_pause || pause->rx_pause;
+ int err;
+
+ if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
+ netdev_err(dev, "PFC already enabled on port\n");
+ return -EINVAL;
+ }
+
+ if (pause->autoneg) {
+ netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
+ if (err) {
+ netdev_err(dev, "Failed to set PAUSE parameters\n");
+ goto err_port_pause_configure;
+ }
+
+ mlxsw_sp_port->link.rx_pause = pause->rx_pause;
+ mlxsw_sp_port->link.tx_pause = pause->tx_pause;
+
+ return 0;
+
+err_port_pause_configure:
+ pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ return err;
+}
+
struct mlxsw_sp_port_hw_stats {
char str[ETH_GSTRING_LEN];
u64 (*getter)(char *payload);
@@ -1032,7 +1185,8 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
int i;
int err;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
+ MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -1263,7 +1417,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+ SUPPORTED_Autoneg;
cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
eth_proto_oper, cmd);
@@ -1322,7 +1477,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
u32 eth_proto_new;
u32 eth_proto_cap;
u32 eth_proto_admin;
- bool is_up;
int err;
speed = ethtool_cmd_speed(cmd);
@@ -1354,12 +1508,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
return err;
}
- err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
- if (err) {
- netdev_err(dev, "Failed to get oper status");
- return err;
- }
- if (!is_up)
+ if (!netif_running(dev))
return 0;
err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
@@ -1380,6 +1529,8 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_pauseparam = mlxsw_sp_port_get_pauseparam,
+ .set_pauseparam = mlxsw_sp_port_set_pauseparam,
.get_strings = mlxsw_sp_port_get_strings,
.set_phys_id = mlxsw_sp_port_set_phys_id,
.get_ethtool_stats = mlxsw_sp_port_get_stats,
@@ -1402,12 +1553,112 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width)
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+ bool dwrr, u8 dwrr_weight)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_de_set(qeec_pl, true);
+ mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
+ mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 maxrate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_mase_set(qeec_pl, true);
+ mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtct_pl[MLXSW_REG_QTCT_LEN];
+
+ mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
+ tclass);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
+}
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err, i;
+
+ /* Setup the elements hierarcy, so that each TC is linked to
+ * one subgroup, which are all member in the same group.
+ */
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
+ 0);
+ if (err)
+ return err;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, false, 0);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC, i, i,
+ false, 0);
+ if (err)
+ return err;
+ }
+
+ /* Make sure the max shaper is disabled in all hierarcies that
+ * support it.
+ */
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i, i,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ }
+
+ /* Map all priorities to traffic class 0. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
- struct devlink_port *devlink_port;
struct net_device *dev;
size_t bytes;
int err;
@@ -1420,6 +1671,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->split = split;
+ mlxsw_sp_port->mapping.module = module;
+ mlxsw_sp_port->mapping.width = width;
+ mlxsw_sp_port->mapping.lane = lane;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -1460,16 +1714,6 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
*/
dev->hard_header_len += MLXSW_TXHDR_LEN;
- devlink_port = &mlxsw_sp_port->devlink_port;
- if (mlxsw_sp_port->split)
- devlink_port_split_set(devlink_port, module);
- err = devlink_port_register(devlink, devlink_port, local_port);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
- mlxsw_sp_port->local_port);
- goto err_devlink_port_register;
- }
-
err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1509,6 +1753,21 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_buffers_init;
}
+ err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_ets_init;
+ }
+
+ /* ETS and buffers must be initialized before DCB. */
+ err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_dcb_init;
+ }
+
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
err = register_netdev(dev);
if (err) {
@@ -1517,7 +1776,14 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_register_netdev;
}
- devlink_port_type_eth_set(devlink_port, dev);
+ err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
+ mlxsw_sp_port->local_port, dev,
+ mlxsw_sp_port->split, module);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+ mlxsw_sp_port->local_port);
+ goto err_core_port_init;
+ }
err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
if (err)
@@ -1527,16 +1793,18 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
err_port_vlan_init:
+ mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
+err_core_port_init:
unregister_netdev(dev);
err_register_netdev:
+err_port_dcb_init:
+err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
err_port_swid_set:
err_port_system_port_mapping_set:
- devlink_port_unregister(&mlxsw_sp_port->devlink_port);
-err_devlink_port_register:
err_dev_addr_init:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
@@ -1548,28 +1816,6 @@ err_port_active_vlans_alloc:
return err;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- lane);
- if (err)
- return err;
-
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
- width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
- return err;
-}
-
static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
@@ -1590,15 +1836,13 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- struct devlink_port *devlink_port;
if (!mlxsw_sp_port)
return;
mlxsw_sp->ports[local_port] = NULL;
- devlink_port = &mlxsw_sp_port->devlink_port;
- devlink_port_type_clear(devlink_port);
+ mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
- devlink_port_unregister(devlink_port);
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_vports_fini(mlxsw_sp_port);
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -1620,8 +1864,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ u8 module, width, lane;
size_t alloc_size;
- u8 module, width;
int i;
int err;
@@ -1632,13 +1876,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width);
+ &width, &lane);
if (err)
goto err_port_module_info_get;
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
- err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+ lane);
if (err)
goto err_port_create;
}
@@ -1659,11 +1904,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
return local_port - offset;
}
-static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ u8 module, unsigned int count)
{
- struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_port *mlxsw_sp_port;
u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ int err, i;
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+ width, i * width);
+ if (err)
+ goto err_port_module_map;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+ if (err)
+ goto err_port_swid_set;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ i = count;
+err_port_swid_set:
+ for (i--; i >= 0; i--)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ i = count;
+err_port_module_map:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+ return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+ u8 base_port, unsigned int count)
+{
+ u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ int i;
+
+ /* Split by four means we need to re-create two ports, otherwise
+ * only one.
+ */
+ count = count / 2;
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ 0);
+ }
+
+ for (i = 0; i < count; i++)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+ width, 0);
+ }
+}
+
+static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
+ unsigned int count)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_port *mlxsw_sp_port;
u8 module, cur_width, base_port;
int i;
int err;
@@ -1675,18 +1994,14 @@ static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
return -EINVAL;
}
+ module = mlxsw_sp_port->mapping.module;
+ cur_width = mlxsw_sp_port->mapping.width;
+
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
-
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
return -EINVAL;
@@ -1711,36 +2026,26 @@ static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
- goto err_port_create;
- }
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+ goto err_port_split_create;
}
return 0;
-err_port_create:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
- }
+err_port_split_create:
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return err;
}
-static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ u8 cur_width, base_port;
unsigned int count;
int i;
- int err;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
@@ -1754,12 +2059,7 @@ static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
+ cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -1771,14 +2071,7 @@ static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH,
- 0);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
- }
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return 0;
}
@@ -2080,10 +2373,10 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
}
-static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
@@ -2144,6 +2437,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
err_switchdev_init:
err_lag_init:
+ mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
@@ -2154,11 +2448,12 @@ err_event_register:
return err;
}
-static void mlxsw_sp_fini(void *priv)
+static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
mlxsw_sp_ports_remove(mlxsw_sp);
@@ -2201,16 +2496,26 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
};
static struct mlxsw_driver mlxsw_sp_driver = {
- .kind = MLXSW_DEVICE_KIND_SPECTRUM,
- .owner = THIS_MODULE,
- .priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp_init,
- .fini = mlxsw_sp_fini,
- .port_split = mlxsw_sp_port_split,
- .port_unsplit = mlxsw_sp_port_unsplit,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
- .txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp_config_profile,
+ .kind = MLXSW_DEVICE_KIND_SPECTRUM,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp_init,
+ .fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp_config_profile,
};
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4b8abaf06..13b30eaa1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -42,15 +42,15 @@
#include <linux/bitops.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
+#include <linux/dcbnl.h>
#include <net/switchdev.h>
-#include <net/devlink.h>
#include "port.h"
#include "core.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID
#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */
-#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */
+#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */
#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
#define MLXSW_SP_LAG_MAX 64
@@ -62,6 +62,24 @@
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
+#define MLXSW_SP_BYTES_PER_CELL 96
+
+#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
+#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
+
+/* Maximum delay buffer needed in case of PAUSE frames, in cells.
+ * Assumes 100m cable and maximum MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY 612
+
+#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
+
+static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
+{
+ delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
+ return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
+}
+
struct mlxsw_sp_port;
struct mlxsw_sp_upper {
@@ -100,6 +118,40 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
return fid >= MLXSW_SP_VFID_BASE;
}
+struct mlxsw_sp_sb_pr {
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+struct mlxsw_cp_sb_occ {
+ u32 cur;
+ u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_pm {
+ u32 min_buff;
+ u32 max_buff;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+#define MLXSW_SP_SB_POOL_COUNT 4
+#define MLXSW_SP_SB_TC_COUNT 8
+
+struct mlxsw_sp_sb {
+ struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+ struct {
+ struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+ } ports[MLXSW_PORT_MAX_PORTS];
+};
+
struct mlxsw_sp {
struct {
struct list_head list;
@@ -130,6 +182,7 @@ struct mlxsw_sp {
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
+ struct mlxsw_sp_sb sb;
};
static inline struct mlxsw_sp_upper *
@@ -148,6 +201,7 @@ struct mlxsw_sp_port_pcpu_stats {
};
struct mlxsw_sp_port {
+ struct mlxsw_core_port core_port; /* must be first */
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
@@ -166,14 +220,33 @@ struct mlxsw_sp_port {
struct mlxsw_sp_vfid *vfid;
u16 vid;
} vport;
+ struct {
+ u8 tx_pause:1,
+ rx_pause:1;
+ } link;
+ struct {
+ struct ieee_ets *ets;
+ struct ieee_maxrate *maxrate;
+ struct ieee_pfc *pfc;
+ } dcb;
+ struct {
+ u8 module;
+ u8 width;
+ u8 lane;
+ } mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
/* VLAN interfaces */
struct list_head vports_list;
- struct devlink_port devlink_port;
};
+static inline bool
+mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
+}
+
static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
@@ -245,7 +318,39 @@ enum mlxsw_sp_flood_table {
};
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold);
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold);
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
@@ -265,5 +370,33 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
bool set, bool only_uc);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+ bool dwrr, u8 dwrr_weight);
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass);
+int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
+ u8 *prio_tc, bool pause_en,
+ struct ieee_pfc *my_pfc);
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 maxrate);
+
+#ifdef CONFIG_MLXSW_SPECTRUM_DCB
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+
+#else
+
+static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{}
+
+#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d59195e3f..074cdda7b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -34,36 +34,140 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/dcbnl.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
#include "spectrum.h"
#include "core.h"
#include "port.h"
#include "reg.h"
-struct mlxsw_sp_pb {
- u8 index;
- u16 size;
-};
+static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
+ u8 pool,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.prs[dir][pool];
+}
-#define MLXSW_SP_PB(_index, _size) \
- { \
- .index = _index, \
- .size = _size, \
+static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 pg_buff,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
+}
+
+static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
+}
+
+static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir,
+ enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+ struct mlxsw_sp_sb_pr *pr;
+ int err;
+
+ mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ if (err)
+ return err;
+
+ pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+ pr->mode = mode;
+ pr->size = size;
+ return 0;
+}
+
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
+ u32 min_buff, u32 max_buff, u8 pool)
+{
+ char sbcm_pl[MLXSW_REG_SBCM_LEN];
+ int err;
+
+ mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
+ min_buff, max_buff, pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ if (err)
+ return err;
+ if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
+ struct mlxsw_sp_sb_cm *cm;
+
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
+ cm->min_buff = min_buff;
+ cm->max_buff = max_buff;
+ cm->pool = pool;
}
+ return 0;
+}
+
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ u32 min_buff, u32 max_buff)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ struct mlxsw_sp_sb_pm *pm;
+ int err;
+
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
+ min_buff, max_buff);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
+ if (err)
+ return err;
+
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+ pm->min_buff = min_buff;
+ pm->max_buff = max_buff;
+ return 0;
+}
+
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ struct list_head *bulk_list)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
+ return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+ bulk_list, NULL, 0);
+}
+
+static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ char *sbpm_pl, size_t sbpm_pl_len,
+ unsigned long cb_priv)
+{
+ struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
+
+ mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
+}
+
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ struct list_head *bulk_list)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ struct mlxsw_sp_sb_pm *pm;
+
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
+ return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+ bulk_list,
+ mlxsw_sp_sb_pm_occ_query_cb,
+ (unsigned long) pm);
+}
-static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
- MLXSW_SP_PB(0, 208),
- MLXSW_SP_PB(1, 208),
- MLXSW_SP_PB(2, 208),
- MLXSW_SP_PB(3, 208),
- MLXSW_SP_PB(4, 208),
- MLXSW_SP_PB(5, 208),
- MLXSW_SP_PB(6, 208),
- MLXSW_SP_PB(7, 208),
- MLXSW_SP_PB(9, 208),
+static const u16 mlxsw_sp_pbs[] = {
+ [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
+ [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
};
#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+#define MLXSW_SP_PB_UNUSED 8
static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -73,194 +177,206 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
0xffff, 0xffff / 2);
for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
- const struct mlxsw_sp_pb *pb;
-
- pb = &mlxsw_sp_pbs[i];
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+ if (i == MLXSW_SP_PB_UNUSED)
+ continue;
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
}
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
+ MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
MLXSW_REG(pbmc), pbmc_pl);
}
-#define MLXSW_SP_SB_BYTES_PER_CELL 96
+static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char pptb_pl[MLXSW_REG_PPTB_LEN];
+ int i;
-struct mlxsw_sp_sb_pool {
- u8 pool;
- enum mlxsw_reg_sbpr_dir dir;
- enum mlxsw_reg_sbpr_mode mode;
- u32 size;
-};
+ mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
+ pptb_pl);
+}
+
+static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
+}
-#define MLXSW_SP_SB_POOL_INGRESS_SIZE \
- ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \
- MLXSW_SP_SB_BYTES_PER_CELL)
-#define MLXSW_SP_SB_POOL_EGRESS_SIZE \
- ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \
- MLXSW_SP_SB_BYTES_PER_CELL)
-
-#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \
- { \
- .pool = _pool, \
- .dir = _dir, \
- .mode = _mode, \
- .size = _size, \
+#define MLXSW_SP_SB_PR_INGRESS_SIZE \
+ (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
+#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
+#define MLXSW_SP_SB_PR_EGRESS_SIZE \
+ (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
+
+#define MLXSW_SP_SB_PR(_mode, _size) \
+ { \
+ .mode = _mode, \
+ .size = _size, \
}
-#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \
- MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \
- MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \
- MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \
- MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
- MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
- MLXSW_SP_SB_POOL_INGRESS(1, 0),
- MLXSW_SP_SB_POOL_INGRESS(2, 0),
- MLXSW_SP_SB_POOL_INGRESS(3, 0),
- MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
- MLXSW_SP_SB_POOL_EGRESS(1, 0),
- MLXSW_SP_SB_POOL_EGRESS(2, 0),
- MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
};
-#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
+
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+};
-static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
+
+static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_pr *prs,
+ size_t prs_len)
{
- char sbpr_pl[MLXSW_REG_SBPR_LEN];
int i;
int err;
- for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
- const struct mlxsw_sp_sb_pool *pool;
+ for (i = 0; i < prs_len; i++) {
+ const struct mlxsw_sp_sb_pr *pr;
- pool = &mlxsw_sp_sb_pools[i];
- mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
- pool->mode, pool->size);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ pr = &prs[i];
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
+ pr->mode, pr->size);
if (err)
return err;
}
return 0;
}
-struct mlxsw_sp_sb_cm {
- union {
- u8 pg;
- u8 tc;
- } u;
- enum mlxsw_reg_sbcm_dir dir;
- u32 min_buff;
- u32 max_buff;
- u8 pool;
-};
+static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
-#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \
- { \
- .u.pg = _pg_tc, \
- .dir = _dir, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- .pool = _pool, \
+ err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_prs_ingress,
+ MLXSW_SP_SB_PRS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_prs_egress,
+ MLXSW_SP_SB_PRS_EGRESS_LEN);
+}
+
+#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
}
-#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \
- MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \
- _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \
- MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \
- _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \
- MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
-
-static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
- MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
- MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
- MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
};
-#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(1, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
+
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
};
#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
-static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- const struct mlxsw_sp_sb_cm *cms,
- size_t cms_len)
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_cm *cms,
+ size_t cms_len)
{
- char sbcm_pl[MLXSW_REG_SBCM_LEN];
int i;
int err;
for (i = 0; i < cms_len; i++) {
const struct mlxsw_sp_sb_cm *cm;
+ if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ continue; /* PG number 8 does not exist, skip it */
cm = &cms[i];
- mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
- cm->min_buff, cm->max_buff, cm->pool);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
+ cm->min_buff, cm->max_buff,
+ cm->pool);
if (err)
return err;
}
@@ -269,105 +385,120 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
- MLXSW_SP_SB_CMS_LEN);
+ int err;
+
+ err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_cms_ingress,
+ MLXSW_SP_SB_CMS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_cms_egress,
+ MLXSW_SP_SB_CMS_EGRESS_LEN);
}
static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
{
- return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
- MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+ return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_cpu_port_sb_cms,
+ MLXSW_SP_CPU_PORT_SB_MCS_LEN);
}
-struct mlxsw_sp_sb_pm {
- u8 pool;
- enum mlxsw_reg_sbpm_dir dir;
- u32 min_buff;
- u32 max_buff;
+#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ }
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
};
-#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \
- { \
- .pool = _pool, \
- .dir = _dir, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- }
+#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
-#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \
- MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \
- _min_buff, _max_buff)
-
-#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \
- MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \
- _min_buff, _max_buff)
-
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
- MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
- MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
- MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
- MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
- MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
+ MLXSW_SP_SB_PM(0, 7),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
};
-#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_pm *pms,
+ size_t pms_len)
{
- char sbpm_pl[MLXSW_REG_SBPM_LEN];
int i;
int err;
- for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+ for (i = 0; i < pms_len; i++) {
const struct mlxsw_sp_sb_pm *pm;
- pm = &mlxsw_sp_sb_pms[i];
- mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
- pm->pool, pm->dir,
- pm->min_buff, pm->max_buff);
- err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
- MLXSW_REG(sbpm), sbpm_pl);
+ pm = &pms[i];
+ err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
+ pm->min_buff, pm->max_buff);
if (err)
return err;
}
return 0;
}
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_pms_ingress,
+ MLXSW_SP_SB_PMS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_pms_egress,
+ MLXSW_SP_SB_PMS_EGRESS_LEN);
+}
+
struct mlxsw_sp_sb_mm {
- u8 prio;
u32 min_buff;
u32 max_buff;
u8 pool;
};
-#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \
- { \
- .prio = _prio, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- .pool = _pool, \
+#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
}
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
- MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
};
#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -382,7 +513,7 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_sb_mm *mc;
mc = &mlxsw_sp_sb_mms[i];
- mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+ mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
mc->max_buff, mc->pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
if (err)
@@ -391,26 +522,39 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+ err = mlxsw_sp_sb_prs_init(mlxsw_sp);
if (err)
return err;
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
if (err)
return err;
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+ if (err)
+ return err;
+ return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ MLXSW_SP_SB_SIZE,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_TC_COUNT,
+ MLXSW_SP_SB_TC_COUNT);
+}
- return err;
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
}
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
- err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
if (err)
return err;
err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
@@ -420,3 +564,394 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
+
+static u8 pool_get(u16 pool_index)
+{
+ return pool_index % MLXSW_SP_SB_POOL_COUNT;
+}
+
+static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
+{
+ u16 pool_index;
+
+ pool_index = pool;
+ if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
+ pool_index += MLXSW_SP_SB_POOL_COUNT;
+ return pool_index;
+}
+
+static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
+{
+ return pool_index < MLXSW_SP_SB_POOL_COUNT ?
+ MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
+}
+
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ pool_info->pool_type = dir;
+ pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
+ pool_info->threshold_type = pr->mode;
+ return 0;
+}
+
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ enum mlxsw_reg_sbpr_mode mode = threshold_type;
+ u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+
+ return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
+}
+
+#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
+
+static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
+ return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+ return MLXSW_SP_CELLS_TO_BYTES(max_buff);
+}
+
+static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir, u32 threshold,
+ u32 *p_max_buff)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
+ int val;
+
+ val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+ if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
+ val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
+ return -EINVAL;
+ *p_max_buff = val;
+ } else {
+ *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
+ }
+ return 0;
+}
+
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+ pool, dir);
+
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
+ pm->max_buff);
+ return 0;
+}
+
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ u32 max_buff;
+ int err;
+
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+ threshold, &max_buff);
+ if (err)
+ return err;
+
+ return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
+ 0, max_buff);
+}
+
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+ pg_buff, dir);
+
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
+ cm->max_buff);
+ *p_pool_index = pool_index_get(cm->pool, pool_type);
+ return 0;
+}
+
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ u8 pool = pool_index;
+ u32 max_buff;
+ int err;
+
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+ threshold, &max_buff);
+ if (err)
+ return err;
+
+ if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
+ if (pool < MLXSW_SP_SB_POOL_COUNT)
+ return -EINVAL;
+ pool -= MLXSW_SP_SB_POOL_COUNT;
+ } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
+ return -EINVAL;
+ }
+ return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
+ 0, max_buff, pool);
+}
+
+#define MASKED_COUNT_MAX \
+ (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
+
+struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
+ u8 masked_count;
+ u8 local_port_1;
+};
+
+static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ char *sbsr_pl, size_t sbsr_pl_len,
+ unsigned long cb_priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count;
+ u8 local_port;
+ int rec_index = 0;
+ struct mlxsw_sp_sb_cm *cm;
+ int i;
+
+ memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
+
+ masked_count = 0;
+ for (local_port = cb_ctx.local_port_1;
+ local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS);
+ mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+ &cm->occ.cur, &cm->occ.max);
+ }
+ if (++masked_count == cb_ctx.masked_count)
+ break;
+ }
+ masked_count = 0;
+ for (local_port = cb_ctx.local_port_1;
+ local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS);
+ mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+ &cm->occ.cur, &cm->occ.max);
+ }
+ if (++masked_count == cb_ctx.masked_count)
+ break;
+ }
+}
+
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ unsigned long cb_priv;
+ LIST_HEAD(bulk_list);
+ char *sbsr_pl;
+ u8 masked_count;
+ u8 local_port_1;
+ u8 local_port = 0;
+ int i;
+ int err;
+ int err2;
+
+ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+ if (!sbsr_pl)
+ return -ENOMEM;
+
+next_batch:
+ local_port++;
+ local_port_1 = local_port;
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+ }
+ for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ }
+ if (++masked_count == MASKED_COUNT_MAX)
+ goto do_query;
+ }
+
+do_query:
+ cb_ctx.masked_count = masked_count;
+ cb_ctx.local_port_1 = local_port_1;
+ memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
+ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+ &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
+ cb_priv);
+ if (err)
+ goto out;
+ if (local_port < MLXSW_PORT_MAX_PORTS)
+ goto next_batch;
+
+out:
+ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+ if (!err)
+ err = err2;
+ kfree(sbsr_pl);
+ return err;
+}
+
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ LIST_HEAD(bulk_list);
+ char *sbsr_pl;
+ unsigned int masked_count;
+ u8 local_port = 0;
+ int i;
+ int err;
+ int err2;
+
+ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+ if (!sbsr_pl)
+ return -ENOMEM;
+
+next_batch:
+ local_port++;
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+ }
+ for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+ err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ }
+ if (++masked_count == MASKED_COUNT_MAX)
+ goto do_query;
+ }
+
+do_query:
+ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+ &bulk_list, NULL, 0);
+ if (err)
+ goto out;
+ if (local_port < MLXSW_PORT_MAX_PORTS)
+ goto next_batch;
+
+out:
+ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+ if (!err)
+ err = err2;
+ kfree(sbsr_pl);
+ return err;
+}
+
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+ pool, dir);
+
+ *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
+ *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
+ return 0;
+}
+
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+ pg_buff, dir);
+
+ *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
+ *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
new file mode 100644
index 000000000..01cfb7512
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -0,0 +1,484 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <net/dcbnl.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev,
+ u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets));
+
+ return 0;
+}
+
+static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ bool has_ets_tc = false;
+ int i, tx_bw_sum = 0;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ has_ets_tc = true;
+ tx_bw_sum += ets->tc_tx_bw[i];
+ break;
+ default:
+ netdev_err(dev, "Only strict priority and ETS are supported\n");
+ return -EINVAL;
+ }
+
+ if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "Invalid TC\n");
+ return -EINVAL;
+ }
+ }
+
+ if (has_ets_tc && tx_bw_sum != 100) {
+ netdev_err(dev, "Total ETS bandwidth should equal 100\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 *prio_tc)
+{
+ char pptb_pl[MLXSW_REG_PPTB_LEN];
+ int i;
+
+ mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
+ pptb_pl);
+}
+
+static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
+{
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ if (prio_tc[i] == pg)
+ return true;
+ return false;
+}
+
+static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 *old_prio_tc, u8 *new_prio_tc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int err, i;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 pg = old_prio_tc[i];
+
+ if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+}
+
+static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Create the required PGs, but don't destroy existing ones, as
+ * traffic is still directed to them.
+ */
+ err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ ets->prio_tc, pause_en,
+ mlxsw_sp_port->dcb.pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc);
+ if (err) {
+ netdev_err(dev, "Failed to set PG-priority mapping\n");
+ goto err_port_prio_pg_map;
+ }
+
+ err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
+ ets->prio_tc);
+ if (err)
+ netdev_warn(dev, "Failed to remove ununsed PGs\n");
+
+ return 0;
+
+err_port_prio_pg_map:
+ mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc);
+ return err;
+}
+
+static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int i, err;
+
+ /* Egress configuration. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+ u8 weight = ets->tc_tx_bw[i];
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, dwrr, weight);
+ if (err) {
+ netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
+ i);
+ goto err_port_ets_set;
+ }
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ ets->prio_tc[i]);
+ if (err) {
+ netdev_err(dev, "Failed to map prio %d to TC %d\n", i,
+ ets->prio_tc[i]);
+ goto err_port_prio_tc_set;
+ }
+ }
+
+ /* Ingress configuration. */
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets);
+ if (err)
+ goto err_port_headroom_set;
+
+ return 0;
+
+err_port_headroom_set:
+ i = IEEE_8021QAZ_MAX_TCS;
+err_port_prio_tc_set:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]);
+ i = IEEE_8021QAZ_MAX_TCS;
+err_port_ets_set:
+ for (i--; i >= 0; i--) {
+ bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+ u8 weight = my_ets->tc_tx_bw[i];
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, dwrr, weight);
+ }
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets);
+ if (err)
+ return err;
+
+ memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate));
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate;
+ int err, i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0,
+ maxrate->tc_maxrate[i]);
+ if (err) {
+ netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
+ goto err_port_ets_maxrate_set;
+ }
+ }
+
+ memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate));
+
+ return 0;
+
+err_port_ets_maxrate_set:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0, my_maxrate->tc_maxrate[i]);
+ return err;
+}
+
+static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 prio)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
+ MLXSW_REG_PPCNT_PRIO_CNT, prio);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ if (err)
+ return err;
+
+ my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl);
+ my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl);
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err, i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i);
+ if (err) {
+ netdev_err(dev, "Failed to get PFC count for priority %d\n",
+ i);
+ return err;
+ }
+ }
+
+ memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc));
+
+ return 0;
+}
+
+static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_pfc *pfc)
+{
+ char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+ mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+ pfcc_pl);
+}
+
+static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
+ pfc->pfc_en) {
+ netdev_err(dev, "PAUSE frames already enabled on port\n");
+ return -EINVAL;
+ }
+
+ err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ mlxsw_sp_port->dcb.ets->prio_tc,
+ false, pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom for PFC\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure PFC\n");
+ goto err_port_pfc_set;
+ }
+
+ memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+
+err_port_pfc_set:
+ __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ mlxsw_sp_port->dcb.ets->prio_tc, false,
+ mlxsw_sp_port->dcb.pfc);
+ return err;
+}
+
+static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
+ .ieee_getets = mlxsw_sp_dcbnl_ieee_getets,
+ .ieee_setets = mlxsw_sp_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlxsw_sp_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
+ .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
+
+ .getdcbx = mlxsw_sp_dcbnl_getdcbx,
+ .setdcbx = mlxsw_sp_dcbnl_setdcbx,
+};
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.ets)
+ return -ENOMEM;
+
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.ets);
+}
+
+static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.maxrate)
+ return -ENOMEM;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.maxrate);
+}
+
+static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.pfc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.pfc);
+}
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_maxrate_init;
+ err = mlxsw_sp_port_pfc_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_pfc_init;
+
+ mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
+
+ return 0;
+
+err_port_pfc_init:
+ mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+err_port_maxrate_init:
+ mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+ return err;
+}
+
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_pfc_fini(mlxsw_sp_port);
+ mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+ mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 9cd6f4722..3710f19ed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1438,8 +1438,8 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
- msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+ mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
+ msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
}
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 7a60a2675..25f658b38 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -43,7 +43,6 @@
#include <linux/device.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
-#include <net/devlink.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -75,11 +74,11 @@ struct mlxsw_sx_port_pcpu_stats {
};
struct mlxsw_sx_port {
+ struct mlxsw_core_port core_port; /* must be first */
struct net_device *dev;
struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sx *mlxsw_sx;
u8 local_port;
- struct devlink_port devlink_port;
};
/* tx_hdr_version
@@ -303,7 +302,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
return NETDEV_TX_BUSY;
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -317,11 +316,14 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
}
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
@@ -518,7 +520,8 @@ static void mlxsw_sx_port_get_stats(struct net_device *dev,
int i;
int err;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
+ MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -955,9 +958,7 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sx->core);
struct mlxsw_sx_port *mlxsw_sx_port;
- struct devlink_port *devlink_port;
struct net_device *dev;
bool usable;
int err;
@@ -1011,14 +1012,6 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto port_not_usable;
}
- devlink_port = &mlxsw_sx_port->devlink_port;
- err = devlink_port_register(devlink, devlink_port, local_port);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n",
- mlxsw_sx_port->local_port);
- goto err_devlink_port_register;
- }
-
err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1076,11 +1069,19 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto err_register_netdev;
}
- devlink_port_type_eth_set(devlink_port, dev);
+ err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port,
+ mlxsw_sx_port->local_port, dev, false, 0);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
+ mlxsw_sx_port->local_port);
+ goto err_core_port_init;
+ }
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
+err_core_port_init:
+ unregister_netdev(dev);
err_register_netdev:
err_port_mac_learning_mode_set:
err_port_stp_state_set:
@@ -1089,8 +1090,6 @@ err_port_mtu_set:
err_port_speed_set:
err_port_swid_set:
err_port_system_port_mapping_set:
- devlink_port_unregister(&mlxsw_sx_port->devlink_port);
-err_devlink_port_register:
port_not_usable:
err_port_module_check:
err_dev_addr_get:
@@ -1103,15 +1102,12 @@ err_alloc_stats:
static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
- struct devlink_port *devlink_port;
if (!mlxsw_sx_port)
return;
- devlink_port = &mlxsw_sx_port->devlink_port;
- devlink_port_type_clear(devlink_port);
+ mlxsw_core_port_fini(&mlxsw_sx_port->core_port);
unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
- devlink_port_unregister(devlink_port);
free_percpu(mlxsw_sx_port->pcpu_stats);
free_netdev(mlxsw_sx_port->dev);
}
@@ -1454,10 +1450,10 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
}
-static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
- struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
int err;
mlxsw_sx->core = mlxsw_core;
@@ -1504,9 +1500,9 @@ err_event_register:
return err;
}
-static void mlxsw_sx_fini(void *priv)
+static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
{
- struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sx_traps_fini(mlxsw_sx);
mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index a8522d8af..20cb85bc0 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1354,6 +1354,7 @@ ks8695_probe(struct platform_device *pdev)
struct resource *rxirq_res, *txirq_res, *linkirq_res;
int ret = 0;
int buff_n;
+ bool inv_mac_addr = false;
u32 machigh, maclow;
/* Initialise a net_device */
@@ -1456,8 +1457,7 @@ ks8695_probe(struct platform_device *pdev)
ndev->dev_addr[5] = maclow & 0xFF;
if (!is_valid_ether_addr(ndev->dev_addr))
- dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
+ inv_mac_addr = true;
/* In order to be efficient memory-wise, we allocate both
* rings in one go.
@@ -1520,6 +1520,9 @@ ks8695_probe(struct platform_device *pdev)
ret = register_netdev(ndev);
if (ret == 0) {
+ if (inv_mac_addr)
+ dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
ks8695_port_type(ksp), ndev->dev_addr);
} else {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 75dc46c5f..280e761d3 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4790,7 +4790,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
/* Notify the network subsystem that the packet has been sent. */
if (dev)
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/**
@@ -4965,7 +4965,7 @@ static void netdev_tx_timeout(struct net_device *dev)
hw_ena_intr(hw);
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 86ea17e7b..0a26b11ca 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -28,11 +28,12 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
+#include <linux/of_net.h>
#include "enc28j60_hw.h"
#define DRV_NAME "enc28j60"
-#define DRV_VERSION "1.01"
+#define DRV_VERSION "1.02"
#define SPI_OPLEN 1
@@ -89,22 +90,26 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
{
u8 *rx_buf = priv->spi_transfer_buf + 4;
u8 *tx_buf = priv->spi_transfer_buf;
- struct spi_transfer t = {
+ struct spi_transfer tx = {
.tx_buf = tx_buf,
+ .len = SPI_OPLEN,
+ };
+ struct spi_transfer rx = {
.rx_buf = rx_buf,
- .len = SPI_OPLEN + len,
+ .len = len,
};
struct spi_message msg;
int ret;
tx_buf[0] = ENC28J60_READ_BUF_MEM;
- tx_buf[1] = tx_buf[2] = tx_buf[3] = 0; /* don't care */
spi_message_init(&msg);
- spi_message_add_tail(&t, &msg);
+ spi_message_add_tail(&tx, &msg);
+ spi_message_add_tail(&rx, &msg);
+
ret = spi_sync(priv->spi, &msg);
if (ret == 0) {
- memcpy(data, &rx_buf[SPI_OPLEN], len);
+ memcpy(data, rx_buf, len);
ret = msg.status;
}
if (ret && netif_msg_drv(priv))
@@ -1146,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_phy_read(priv, PHIR);
}
/* TX complete handler */
- if ((intflags & EIR_TXIF) != 0) {
+ if (((intflags & EIR_TXIF) != 0) &&
+ ((intflags & EIR_TXERIF) == 0)) {
bool err = false;
loop++;
if (netif_msg_intr(priv))
@@ -1198,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_tx_clear(ndev, true);
} else
enc28j60_tx_clear(ndev, true);
- locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+ locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
}
/* RX Error handler */
if ((intflags & EIR_RXERIF) != 0) {
@@ -1233,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ BUG_ON(!priv->tx_skb);
+
if (netif_msg_tx_queued(priv))
printk(KERN_DEBUG DRV_NAME
": Tx Packet Len:%d\n", priv->tx_skb->len);
@@ -1544,6 +1552,7 @@ static int enc28j60_probe(struct spi_device *spi)
{
struct net_device *dev;
struct enc28j60_net *priv;
+ const void *macaddr;
int ret = 0;
if (netif_msg_drv(&debug))
@@ -1575,7 +1584,12 @@ static int enc28j60_probe(struct spi_device *spi)
ret = -EIO;
goto error_irq;
}
- eth_hw_addr_random(dev);
+
+ macaddr = of_get_mac_address(spi->dev.of_node);
+ if (macaddr)
+ ether_addr_copy(dev->dev_addr, macaddr);
+ else
+ eth_hw_addr_random(dev);
enc28j60_set_hw_macaddr(dev);
/* Board setup must set the relevant edge trigger type;
@@ -1630,9 +1644,16 @@ static int enc28j60_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id enc28j60_dt_ids[] = {
+ { .compatible = "microchip,enc28j60" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, enc28j60_dt_ids);
+
static struct spi_driver enc28j60_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .of_match_table = enc28j60_dt_ids,
},
.probe = enc28j60_probe,
.remove = enc28j60_remove,
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 7df318346..42e34076d 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -874,7 +874,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* save the timestamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
@@ -890,7 +890,7 @@ static void encx24j600_tx_timeout(struct net_device *dev)
struct encx24j600_priv *priv = netdev_priv(dev);
netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
- jiffies, jiffies - dev->trans_start);
+ jiffies, jiffies - dev_trans_start(dev));
dev->stats.tx_errors++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 3e67f451f..4367dd687 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -376,7 +376,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->tx_head = TX_NEXT(tx_head);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
ret = NETDEV_TX_OK;
out_unlock:
spin_unlock_irq(&priv->txlock);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 122c2ee3d..ed89029ff 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1904,7 +1904,7 @@ static void ns_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock);
enable_irq(irq);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 1bd419dbd..612c7a44b 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9ba975853..2874dffe7 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4021,7 +4021,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags = 0;
u16 vlan_tag = 0;
struct fifo_info *fifo = NULL;
- int do_spin_lock = 1;
int offload_type;
int enable_per_list_interrupt = 0;
struct config_param *config = &sp->config;
@@ -4074,7 +4073,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
queue += sp->udp_fifo_idx;
if (skb->len > 1024)
enable_per_list_interrupt = 1;
- do_spin_lock = 0;
}
}
}
@@ -4084,12 +4082,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
- if (do_spin_lock)
- spin_lock_irqsave(&fifo->tx_lock, flags);
- else {
- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&fifo->tx_lock, flags);
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 75683fb26..e744acc18 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -59,8 +59,8 @@
netdev_warn((nn)->netdev, fmt, ## args); \
} while (0)
-/* Max time to wait for NFP to respond on updates (in ms) */
-#define NFP_NET_POLL_TIMEOUT 5000
+/* Max time to wait for NFP to respond on updates (in seconds) */
+#define NFP_NET_POLL_TIMEOUT 5
/* Bar allocation */
#define NFP_NET_CRTL_BAR 0
@@ -298,6 +298,8 @@ struct nfp_net_rx_buf {
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
+ * @bufsz: Buffer allocation size for convenience of management routines
+ * (NOTE: this is in second cache line, do not use on fast path!)
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -319,6 +321,7 @@ struct nfp_net_rx_ring {
dma_addr_t dma;
unsigned int size;
+ unsigned int bufsz;
} ____cacheline_aligned;
/**
@@ -444,6 +447,10 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @shared_name: Name for shared interrupt
* @me_freq_mhz: ME clock_freq (MHz)
* @reconfig_lock: Protects HW reconfiguration request regs/machinery
+ * @reconfig_posted: Pending reconfig bits coming from async sources
+ * @reconfig_timer_active: Timer for reading reconfiguration results is pending
+ * @reconfig_sync_present: Some thread is performing synchronous reconfig
+ * @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up?
* @link_status_lock: Protects @link_up and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -472,6 +479,9 @@ struct nfp_net {
u32 rx_offset;
+ struct nfp_net_tx_ring *tx_rings;
+ struct nfp_net_rx_ring *rx_rings;
+
#ifdef CONFIG_PCI_IOV
unsigned int num_vfs;
struct vf_data_storage *vfinfo;
@@ -504,9 +514,6 @@ struct nfp_net {
int txd_cnt;
int rxd_cnt;
- struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS];
- struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS];
-
u8 num_irqs;
u8 num_r_vecs;
struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS];
@@ -528,6 +535,10 @@ struct nfp_net {
spinlock_t link_status_lock;
spinlock_t reconfig_lock;
+ u32 reconfig_posted;
+ bool reconfig_timer_active;
+ bool reconfig_sync_present;
+ struct timer_list reconfig_timer;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
@@ -721,6 +732,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_irqs_alloc(struct nfp_net *nn);
void nfp_net_irqs_disable(struct nfp_net *nn);
+int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt);
#ifdef CONFIG_NFP_NET_DEBUG
void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 43c618baf..ba26bb356 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -80,6 +80,116 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
+/* Firmware reconfig
+ *
+ * Firmware reconfig may take a while so we have two versions of it -
+ * synchronous and asynchronous (posted). All synchronous callers are holding
+ * RTNL so we don't have to worry about serializing them.
+ */
+static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
+{
+ nn_writel(nn, NFP_NET_CFG_UPDATE, update);
+ /* ensure update is written before pinging HW */
+ nn_pci_flush(nn);
+ nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+}
+
+/* Pass 0 as update to run posted reconfigs. */
+static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
+{
+ update |= nn->reconfig_posted;
+ nn->reconfig_posted = 0;
+
+ nfp_net_reconfig_start(nn, update);
+
+ nn->reconfig_timer_active = true;
+ mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
+}
+
+static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
+{
+ u32 reg;
+
+ reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
+ if (reg == 0)
+ return true;
+ if (reg & NFP_NET_CFG_UPDATE_ERR) {
+ nn_err(nn, "Reconfig error: 0x%08x\n", reg);
+ return true;
+ } else if (last_check) {
+ nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
+ return true;
+ }
+
+ return false;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+ bool timed_out = false;
+
+ /* Poll update field, waiting for NFP to ack the config */
+ while (!nfp_net_reconfig_check_done(nn, timed_out)) {
+ msleep(1);
+ timed_out = time_is_before_eq_jiffies(deadline);
+ }
+
+ if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
+ return -EIO;
+
+ return timed_out ? -EIO : 0;
+}
+
+static void nfp_net_reconfig_timer(unsigned long data)
+{
+ struct nfp_net *nn = (void *)data;
+
+ spin_lock_bh(&nn->reconfig_lock);
+
+ nn->reconfig_timer_active = false;
+
+ /* If sync caller is present it will take over from us */
+ if (nn->reconfig_sync_present)
+ goto done;
+
+ /* Read reconfig status and report errors */
+ nfp_net_reconfig_check_done(nn, true);
+
+ if (nn->reconfig_posted)
+ nfp_net_reconfig_start_async(nn, 0);
+done:
+ spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig_post() - Post async reconfig request
+ * @nn: NFP Net device to reconfigure
+ * @update: The value for the update field in the BAR config
+ *
+ * Record FW reconfiguration request. Reconfiguration will be kicked off
+ * whenever reconfiguration machinery is idle. Multiple requests can be
+ * merged together!
+ */
+static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
+{
+ spin_lock_bh(&nn->reconfig_lock);
+
+ /* Sync caller will kick off async reconf when it's done, just post */
+ if (nn->reconfig_sync_present) {
+ nn->reconfig_posted |= update;
+ goto done;
+ }
+
+ /* Opportunistically check if the previous command is done */
+ if (!nn->reconfig_timer_active ||
+ nfp_net_reconfig_check_done(nn, false))
+ nfp_net_reconfig_start_async(nn, update);
+ else
+ nn->reconfig_posted |= update;
+done:
+ spin_unlock_bh(&nn->reconfig_lock);
+}
+
/**
* nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
@@ -93,35 +203,45 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
*/
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
- int cnt, ret = 0;
- u32 new;
+ bool cancelled_timer = false;
+ u32 pre_posted_requests;
+ int ret;
spin_lock_bh(&nn->reconfig_lock);
- nn_writel(nn, NFP_NET_CFG_UPDATE, update);
- /* ensure update is written before pinging HW */
- nn_pci_flush(nn);
- nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+ nn->reconfig_sync_present = true;
- /* Poll update field, waiting for NFP to ack the config */
- for (cnt = 0; ; cnt++) {
- new = nn_readl(nn, NFP_NET_CFG_UPDATE);
- if (new == 0)
- break;
- if (new & NFP_NET_CFG_UPDATE_ERR) {
- nn_err(nn, "Reconfig error: 0x%08x\n", new);
- ret = -EIO;
- break;
- } else if (cnt >= NFP_NET_POLL_TIMEOUT) {
- nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n",
- update, cnt);
- ret = -EIO;
- break;
- }
- mdelay(1);
+ if (nn->reconfig_timer_active) {
+ del_timer(&nn->reconfig_timer);
+ nn->reconfig_timer_active = false;
+ cancelled_timer = true;
+ }
+ pre_posted_requests = nn->reconfig_posted;
+ nn->reconfig_posted = 0;
+
+ spin_unlock_bh(&nn->reconfig_lock);
+
+ if (cancelled_timer)
+ nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+
+ /* Run the posted reconfigs which were issued before we started */
+ if (pre_posted_requests) {
+ nfp_net_reconfig_start(nn, pre_posted_requests);
+ nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
}
+ nfp_net_reconfig_start(nn, update);
+ ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+
+ spin_lock_bh(&nn->reconfig_lock);
+
+ if (nn->reconfig_posted)
+ nfp_net_reconfig_start_async(nn, 0);
+
+ nn->reconfig_sync_present = false;
+
spin_unlock_bh(&nn->reconfig_lock);
+
return ret;
}
@@ -347,12 +467,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
/**
* nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
* @tx_ring: TX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
*/
-static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring)
+static void
+nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
+ tx_ring->idx = idx;
+ tx_ring->r_vec = r_vec;
+
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
}
@@ -360,12 +486,18 @@ static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
* @rx_ring: RX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
*/
-static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring)
+static void
+nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
+ rx_ring->idx = idx;
+ rx_ring->r_vec = r_vec;
+
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
@@ -401,16 +533,6 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
cpumask_set_cpu(r, &r_vec->affinity_mask);
-
- r_vec->tx_ring = &nn->tx_rings[r];
- nn->tx_rings[r].idx = r;
- nn->tx_rings[r].r_vec = r_vec;
- nfp_net_tx_ring_init(r_vec->tx_ring);
-
- r_vec->rx_ring = &nn->rx_rings[r];
- nn->rx_rings[r].idx = r;
- nn->rx_rings[r].r_vec = r_vec;
- nfp_net_rx_ring_init(r_vec->rx_ring);
}
}
@@ -865,61 +987,59 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
}
/**
- * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring
- * @tx_ring: TX ring structure
+ * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
+ * @nn: NFP Net device
+ * @tx_ring: TX ring structure
*
* Assumes that the device is stopped
*/
-static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring)
+static void
+nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
- struct sk_buff *skb;
- int nr_frags;
- int fidx;
- int idx;
+ struct pci_dev *pdev = nn->pdev;
while (tx_ring->rd_p != tx_ring->wr_p) {
- idx = tx_ring->rd_p % tx_ring->cnt;
+ int nr_frags, fidx, idx;
+ struct sk_buff *skb;
+ idx = tx_ring->rd_p % tx_ring->cnt;
skb = tx_ring->txbufs[idx].skb;
- if (skb) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_ring->txbufs[idx].fidx;
-
- if (fidx == -1) {
- /* unmap head */
- dma_unmap_single(&pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(&pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
-
- tx_ring->txbufs[idx].dma_addr = 0;
- tx_ring->txbufs[idx].skb = NULL;
- tx_ring->txbufs[idx].fidx = -2;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ fidx = tx_ring->txbufs[idx].fidx;
+
+ if (fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(&pdev->dev,
+ tx_ring->txbufs[idx].dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[fidx];
+ dma_unmap_page(&pdev->dev,
+ tx_ring->txbufs[idx].dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
- memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx]));
+ /* check for last gather fragment */
+ if (fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+
+ tx_ring->txbufs[idx].dma_addr = 0;
+ tx_ring->txbufs[idx].skb = NULL;
+ tx_ring->txbufs[idx].fidx = -2;
tx_ring->qcp_rd_p++;
tx_ring->rd_p++;
}
+ memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
@@ -957,25 +1077,27 @@ static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
* nfp_net_rx_alloc_one() - Allocate and map skb for RX
* @rx_ring: RX ring structure of the skb
* @dma_addr: Pointer to storage for DMA address (output param)
+ * @fl_bufsz: size of freelist buffers
*
* This function will allcate a new skb, map it for DMA.
*
* Return: allocated skb or NULL on failure.
*/
static struct sk_buff *
-nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr)
+nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
+ unsigned int fl_bufsz)
{
struct nfp_net *nn = rx_ring->r_vec->nfp_net;
struct sk_buff *skb;
- skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz);
+ skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
if (!skb) {
nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
return NULL;
}
*dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
- nn->fl_bufsz, DMA_FROM_DEVICE);
+ fl_bufsz, DMA_FROM_DEVICE);
if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
dev_kfree_skb_any(skb);
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
@@ -1020,62 +1142,101 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
}
/**
- * nfp_net_rx_flush() - Free any buffers currently on the RX ring
- * @rx_ring: RX ring to remove buffers from
+ * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
+ * @rx_ring: RX ring structure
*
- * Assumes that the device is stopped
+ * Warning: Do *not* call if ring buffers were never put on the FW freelist
+ * (i.e. device was not enabled)!
*/
-static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring)
+static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
{
- struct nfp_net *nn = rx_ring->r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
- int idx;
+ unsigned int wr_idx, last_idx;
- while (rx_ring->rd_p != rx_ring->wr_p) {
- idx = rx_ring->rd_p % rx_ring->cnt;
+ /* Move the empty entry to the end of the list */
+ wr_idx = rx_ring->wr_p % rx_ring->cnt;
+ last_idx = rx_ring->cnt - 1;
+ rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
+ rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
+ rx_ring->rxbufs[last_idx].dma_addr = 0;
+ rx_ring->rxbufs[last_idx].skb = NULL;
- if (rx_ring->rxbufs[idx].skb) {
- dma_unmap_single(&pdev->dev,
- rx_ring->rxbufs[idx].dma_addr,
- nn->fl_bufsz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_ring->rxbufs[idx].skb);
- rx_ring->rxbufs[idx].dma_addr = 0;
- rx_ring->rxbufs[idx].skb = NULL;
- }
+ memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
+ rx_ring->wr_p = 0;
+ rx_ring->rd_p = 0;
+ rx_ring->wr_ptr_add = 0;
+}
+
+/**
+ * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
+ * @nn: NFP Net device
+ * @rx_ring: RX ring to remove buffers from
+ *
+ * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
+ * entries. After device is disabled nfp_net_rx_ring_reset() must be called
+ * to restore required ring geometry.
+ */
+static void
+nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = nn->pdev;
+ unsigned int i;
- memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx]));
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ /* NULL skb can only happen when initial filling of the ring
+ * fails to allocate enough buffers and calls here to free
+ * already allocated ones.
+ */
+ if (!rx_ring->rxbufs[i].skb)
+ continue;
- rx_ring->rd_p++;
+ dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
+ rx_ring->bufsz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
+ rx_ring->rxbufs[i].dma_addr = 0;
+ rx_ring->rxbufs[i].skb = NULL;
}
}
/**
- * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers
- * @rx_ring: RX ring to fill
- *
- * Try to fill as many buffers as possible into freelist. Return
- * number of buffers added.
- *
- * Return: Number of freelist buffers added.
+ * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
+ * @nn: NFP Net device
+ * @rx_ring: RX ring to remove buffers from
*/
-static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+static int
+nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
{
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+ struct nfp_net_rx_buf *rxbufs;
+ unsigned int i;
+
+ rxbufs = rx_ring->rxbufs;
- while (nfp_net_rx_space(rx_ring)) {
- skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr);
- if (!skb) {
- nfp_net_rx_flush(rx_ring);
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ rxbufs[i].skb =
+ nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
+ rx_ring->bufsz);
+ if (!rxbufs[i].skb) {
+ nfp_net_rx_ring_bufs_free(nn, rx_ring);
return -ENOMEM;
}
- nfp_net_rx_give_one(rx_ring, skb, dma_addr);
}
return 0;
}
/**
+ * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @rx_ring: RX ring to fill
+ */
+static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
* nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
* @flags: RX descriptor flags field in CPU byte order
*/
@@ -1240,7 +1401,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb = rx_ring->rxbufs[idx].skb;
- new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr);
+ new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
+ nn->fl_bufsz);
if (!new_skb) {
nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
rx_ring->rxbufs[idx].dma_addr);
@@ -1256,23 +1418,25 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len);
- if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) {
- /* The packet data starts after the metadata */
+ if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
skb_reserve(skb, meta_len);
- } else {
- /* The packet data starts at a fixed offset */
+ else
skb_reserve(skb, nn->rx_offset);
- }
-
- /* Adjust the SKB for the dynamic meta data pre-pended */
skb_put(skb, data_len - meta_len);
nfp_net_set_hash(nn->netdev, skb, rxd);
@@ -1349,10 +1513,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0);
-
kfree(tx_ring->txbufs);
if (tx_ring->txds)
@@ -1360,11 +1520,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
- tx_ring->wr_p = 0;
- tx_ring->rd_p = 0;
- tx_ring->qcp_rd_p = 0;
- tx_ring->wr_ptr_add = 0;
-
tx_ring->txbufs = NULL;
tx_ring->txds = NULL;
tx_ring->dma = 0;
@@ -1374,17 +1529,18 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
* @tx_ring: TX Ring structure to allocate
+ * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
-static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
+static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
int sz;
- tx_ring->cnt = nn->txd_cnt;
+ tx_ring->cnt = cnt;
tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
@@ -1397,11 +1553,6 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
if (!tx_ring->txbufs)
goto err_alloc;
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx);
-
netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
@@ -1415,6 +1566,59 @@ err_alloc:
return -ENOMEM;
}
+static struct nfp_net_tx_ring *
+nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
+{
+ struct nfp_net_tx_ring *rings;
+ unsigned int r;
+
+ rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
+ return NULL;
+
+ for (r = 0; r < nn->num_tx_rings; r++) {
+ nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
+
+ if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
+ goto err_free_prev;
+ }
+
+ return rings;
+
+err_free_prev:
+ while (r--)
+ nfp_net_tx_ring_free(&rings[r]);
+ kfree(rings);
+ return NULL;
+}
+
+static struct nfp_net_tx_ring *
+nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+{
+ struct nfp_net_tx_ring *old = nn->tx_rings;
+ unsigned int r;
+
+ for (r = 0; r < nn->num_tx_rings; r++)
+ old[r].r_vec->tx_ring = &rings[r];
+
+ nn->tx_rings = rings;
+ return old;
+}
+
+static void
+nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+{
+ unsigned int r;
+
+ if (!rings)
+ return;
+
+ for (r = 0; r < nn->num_tx_rings; r++)
+ nfp_net_tx_ring_free(&rings[r]);
+
+ kfree(rings);
+}
+
/**
* nfp_net_rx_ring_free() - Free resources allocated to a RX ring
* @rx_ring: RX ring to free
@@ -1425,10 +1629,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0);
-
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
@@ -1436,10 +1636,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
rx_ring->rxds, rx_ring->dma);
rx_ring->cnt = 0;
- rx_ring->wr_p = 0;
- rx_ring->rd_p = 0;
- rx_ring->wr_ptr_add = 0;
-
rx_ring->rxbufs = NULL;
rx_ring->rxds = NULL;
rx_ring->dma = 0;
@@ -1449,17 +1645,22 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
* @rx_ring: RX ring to allocate
+ * @fl_bufsz: Size of buffers to allocate
+ * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
-static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring)
+static int
+nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
+ u32 cnt)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
int sz;
- rx_ring->cnt = nn->rxd_cnt;
+ rx_ring->cnt = cnt;
+ rx_ring->bufsz = fl_bufsz;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
@@ -1472,11 +1673,6 @@ static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring)
if (!rx_ring->rxbufs)
goto err_alloc;
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx);
-
nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
@@ -1488,91 +1684,109 @@ err_alloc:
return -ENOMEM;
}
-static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free)
+static struct nfp_net_rx_ring *
+nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
+ u32 buf_cnt)
{
- struct nfp_net_r_vector *r_vec;
- struct msix_entry *entry;
+ struct nfp_net_rx_ring *rings;
+ unsigned int r;
- while (n_free--) {
- r_vec = &nn->r_vecs[n_free];
- entry = &nn->irq_entries[r_vec->irq_idx];
+ rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
+ return NULL;
- nfp_net_rx_ring_free(r_vec->rx_ring);
- nfp_net_tx_ring_free(r_vec->tx_ring);
+ for (r = 0; r < nn->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
- irq_set_affinity_hint(entry->vector, NULL);
- free_irq(entry->vector, r_vec);
+ if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
+ goto err_free_prev;
- netif_napi_del(&r_vec->napi);
+ if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
+ goto err_free_ring;
}
+
+ return rings;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+err_free_ring:
+ nfp_net_rx_ring_free(&rings[r]);
+ }
+ kfree(rings);
+ return NULL;
}
-/**
- * nfp_net_free_rings() - Free all ring resources
- * @nn: NFP Net device to reconfigure
- */
-static void nfp_net_free_rings(struct nfp_net *nn)
+static struct nfp_net_rx_ring *
+nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
{
- __nfp_net_free_rings(nn, nn->num_r_vecs);
+ struct nfp_net_rx_ring *old = nn->rx_rings;
+ unsigned int r;
+
+ for (r = 0; r < nn->num_rx_rings; r++)
+ old[r].r_vec->rx_ring = &rings[r];
+
+ nn->rx_rings = rings;
+ return old;
}
-/**
- * nfp_net_alloc_rings() - Allocate resources for RX and TX rings
- * @nn: NFP Net device to reconfigure
- *
- * Return: 0 on success or negative errno on error.
- */
-static int nfp_net_alloc_rings(struct nfp_net *nn)
+static void
+nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
{
- struct nfp_net_r_vector *r_vec;
- struct msix_entry *entry;
- int err;
- int r;
+ unsigned int r;
+
+ if (!rings)
+ return;
for (r = 0; r < nn->num_r_vecs; r++) {
- r_vec = &nn->r_vecs[r];
- entry = &nn->irq_entries[r_vec->irq_idx];
-
- /* Setup NAPI */
- netif_napi_add(nn->netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
-
- snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->netdev->name, r);
- err = request_irq(entry->vector, r_vec->handler, 0,
- r_vec->name, r_vec);
- if (err) {
- nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector);
- goto err_napi_del;
- }
+ nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+ nfp_net_rx_ring_free(&rings[r]);
+ }
- irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+ kfree(rings);
+}
- nn_dbg(nn, "RV%02d: irq=%03d/%03d\n",
- r, entry->vector, entry->entry);
+static int
+nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ int idx)
+{
+ struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
+ int err;
- /* Allocate TX ring resources */
- err = nfp_net_tx_ring_alloc(r_vec->tx_ring);
- if (err)
- goto err_free_irq;
+ r_vec->tx_ring = &nn->tx_rings[idx];
+ nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
- /* Allocate RX ring resources */
- err = nfp_net_rx_ring_alloc(r_vec->rx_ring);
- if (err)
- goto err_free_tx;
+ r_vec->rx_ring = &nn->rx_rings[idx];
+ nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
+
+ snprintf(r_vec->name, sizeof(r_vec->name),
+ "%s-rxtx-%d", nn->netdev->name, idx);
+ err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
+ if (err) {
+ nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
+ return err;
}
+ disable_irq(entry->vector);
+
+ /* Setup NAPI */
+ netif_napi_add(nn->netdev, &r_vec->napi,
+ nfp_net_poll, NAPI_POLL_WEIGHT);
+
+ irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+
+ nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
return 0;
+}
+
+static void
+nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+{
+ struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
-err_free_tx:
- nfp_net_tx_ring_free(r_vec->tx_ring);
-err_free_irq:
irq_set_affinity_hint(entry->vector, NULL);
- free_irq(entry->vector, r_vec);
-err_napi_del:
netif_napi_del(&r_vec->napi);
- __nfp_net_free_rings(nn, r);
- return err;
+ free_irq(entry->vector, r_vec);
}
/**
@@ -1646,6 +1860,17 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
}
+static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
+
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
+}
+
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
@@ -1653,6 +1878,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
u32 new_ctrl, update;
+ unsigned int r;
int err;
new_ctrl = nn->ctrl;
@@ -1669,79 +1895,40 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
- if (err) {
+ if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- return;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
+ nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
+ nfp_net_vec_clear_ring_data(nn, r);
}
nn->ctrl = new_ctrl;
}
-/**
- * nfp_net_start_vec() - Start ring vector
- * @nn: NFP Net device structure
- * @r_vec: Ring vector to be started
- */
-static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+static void
+nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ unsigned int idx)
{
- unsigned int irq_vec;
- int err = 0;
-
- irq_vec = nn->irq_entries[r_vec->irq_idx].vector;
-
- disable_irq(irq_vec);
-
- err = nfp_net_rx_fill_freelist(r_vec->rx_ring);
- if (err) {
- nn_err(nn, "RV%02d: couldn't allocate enough buffers\n",
- r_vec->irq_idx);
- goto out;
- }
-
- napi_enable(&r_vec->napi);
-out:
- enable_irq(irq_vec);
+ /* Write the DMA address, size and MSI-X info to the device */
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
- return err;
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
}
-static int nfp_net_netdev_open(struct net_device *netdev)
+static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
- int err, r;
- u32 update = 0;
- u32 new_ctrl;
-
- if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
- nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
- return -EBUSY;
- }
+ u32 new_ctrl, update = 0;
+ unsigned int r;
+ int err;
new_ctrl = nn->ctrl;
- /* Step 1: Allocate resources for rings and the like
- * - Request interrupts
- * - Allocate RX and TX ring resources
- * - Setup initial RSS table
- */
- err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
- nn->exn_name, sizeof(nn->exn_name),
- NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
- if (err)
- return err;
-
- err = nfp_net_alloc_rings(nn);
- if (err)
- goto err_free_exn;
-
- err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
- if (err)
- goto err_free_rings;
-
- err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
- if (err)
- goto err_free_rings;
-
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
nfp_net_rss_write_key(nn);
nfp_net_rss_write_itbl(nn);
@@ -1756,22 +1943,18 @@ static int nfp_net_netdev_open(struct net_device *netdev)
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
- /* Step 2: Configure the NFP
- * - Enable rings from 0 to tx_rings/rx_rings - 1.
- * - Write MAC address (in case it changed)
- * - Set the MTU
- * - Set the Freelist buffer size
- * - Enable the FW
- */
+ for (r = 0; r < nn->num_r_vecs; r++)
+ nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
+
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
- nfp_net_write_mac_addr(nn, netdev->dev_addr);
+ nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
- nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu);
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
/* Enable device */
@@ -1784,69 +1967,213 @@ static int nfp_net_netdev_open(struct net_device *netdev)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
- if (err)
- goto err_clear_config;
nn->ctrl = new_ctrl;
+ for (r = 0; r < nn->num_r_vecs; r++)
+ nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
+
/* Since reconfiguration requests while NFP is down are ignored we
* have to wipe the entire VXLAN configuration and reinitialize it.
*/
if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
- vxlan_get_rx_port(netdev);
+ vxlan_get_rx_port(nn->netdev);
}
- /* Step 3: Enable for kernel
- * - put some freelist descriptors on each RX ring
- * - enable NAPI on each ring
- * - enable all TX queues
- * - set link state
- */
+ return err;
+}
+
+/**
+ * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
+ * @nn: NFP Net device to reconfigure
+ */
+static int nfp_net_set_config_and_enable(struct nfp_net *nn)
+{
+ int err;
+
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nfp_net_clear_config_and_disable(nn);
+
+ return err;
+}
+
+/**
+ * nfp_net_open_stack() - Start the device from stack's perspective
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_open_stack(struct nfp_net *nn)
+{
+ unsigned int r;
+
for (r = 0; r < nn->num_r_vecs; r++) {
- err = nfp_net_start_vec(nn, &nn->r_vecs[r]);
- if (err)
- goto err_disable_napi;
+ napi_enable(&nn->r_vecs[r].napi);
+ enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
}
- netif_tx_wake_all_queues(netdev);
+ netif_tx_wake_all_queues(nn->netdev);
+ enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
+ nfp_net_read_link_status(nn);
+}
+
+static int nfp_net_netdev_open(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err, r;
+
+ if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
+ nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
+ return -EBUSY;
+ }
+
+ /* Step 1: Allocate resources for rings and the like
+ * - Request interrupts
+ * - Allocate RX and TX ring resources
+ * - Setup initial RSS table
+ */
+ err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
+ nn->exn_name, sizeof(nn->exn_name),
+ NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
+ if (err)
+ return err;
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
nn->lsc_name, sizeof(nn->lsc_name),
NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
if (err)
- goto err_stop_tx;
- nfp_net_read_link_status(nn);
+ goto err_free_exn;
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- return 0;
+ nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
+ GFP_KERNEL);
+ if (!nn->rx_rings)
+ goto err_free_lsc;
+ nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
+ GFP_KERNEL);
+ if (!nn->tx_rings)
+ goto err_free_rx_rings;
-err_stop_tx:
- netif_tx_disable(netdev);
- for (r = 0; r < nn->num_r_vecs; r++)
- nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
-err_disable_napi:
- while (r--) {
- napi_disable(&nn->r_vecs[r].napi);
- nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
+ if (err)
+ goto err_free_prev_vecs;
+
+ err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
+ if (err)
+ goto err_cleanup_vec_p;
+
+ err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
+ nn->fl_bufsz, nn->rxd_cnt);
+ if (err)
+ goto err_free_tx_ring_p;
+
+ err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
+ if (err)
+ goto err_flush_rx_ring_p;
}
-err_clear_config:
- nfp_net_clear_config_and_disable(nn);
+
+ err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
+ if (err)
+ goto err_free_rings;
+
+ err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
+ if (err)
+ goto err_free_rings;
+
+ /* Step 2: Configure the NFP
+ * - Enable rings from 0 to tx_rings/rx_rings - 1.
+ * - Write MAC address (in case it changed)
+ * - Set the MTU
+ * - Set the Freelist buffer size
+ * - Enable the FW
+ */
+ err = nfp_net_set_config_and_enable(nn);
+ if (err)
+ goto err_free_rings;
+
+ /* Step 3: Enable for kernel
+ * - put some freelist descriptors on each RX ring
+ * - enable NAPI on each ring
+ * - enable all TX queues
+ * - set link state
+ */
+ nfp_net_open_stack(nn);
+
+ return 0;
+
err_free_rings:
- nfp_net_free_rings(nn);
+ r = nn->num_r_vecs;
+err_free_prev_vecs:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+err_flush_rx_ring_p:
+ nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+err_free_tx_ring_p:
+ nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+err_cleanup_vec_p:
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ }
+ kfree(nn->tx_rings);
+err_free_rx_rings:
+ kfree(nn->rx_rings);
+err_free_lsc:
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
return err;
}
/**
+ * nfp_net_close_stack() - Quiescent the stack (part of close)
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_stack(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
+ netif_carrier_off(nn->netdev);
+ nn->link_up = false;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+ napi_disable(&nn->r_vecs[r].napi);
+ }
+
+ netif_tx_disable(nn->netdev);
+}
+
+/**
+ * nfp_net_close_free_all() - Free all runtime resources
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_free_all(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+ nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+ nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ }
+
+ kfree(nn->rx_rings);
+ kfree(nn->tx_rings);
+
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+}
+
+/**
* nfp_net_netdev_close() - Called when the device is downed
* @netdev: netdev structure
*/
static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- int r;
if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
@@ -1855,14 +2182,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
/* Step 1: Disable RX and TX rings from the Linux kernel perspective
*/
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
- netif_carrier_off(netdev);
- nn->link_up = false;
-
- for (r = 0; r < nn->num_r_vecs; r++)
- napi_disable(&nn->r_vecs[r].napi);
-
- netif_tx_disable(netdev);
+ nfp_net_close_stack(nn);
/* Step 2: Tell NFP
*/
@@ -1870,13 +2190,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
/* Step 3: Free resources
*/
- for (r = 0; r < nn->num_r_vecs; r++) {
- nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
- nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
- }
-
- nfp_net_free_rings(nn);
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+ nfp_net_close_free_all(nn);
nn_dbg(nn, "%s down", netdev->name);
return 0;
@@ -1902,37 +2216,139 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
return;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
- if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN))
- return;
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
nn->ctrl = new_ctrl;
}
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
+ unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
struct nfp_net *nn = netdev_priv(netdev);
- u32 tmp;
-
- nn_dbg(nn, "New MTU = %d\n", new_mtu);
+ struct nfp_net_rx_ring *tmp_rings;
+ int err;
if (new_mtu < 68 || new_mtu > nn->max_mtu) {
nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
return -EINVAL;
}
+ old_mtu = netdev->mtu;
+ old_fl_bufsz = nn->fl_bufsz;
+ new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
+
+ if (!netif_running(netdev)) {
+ netdev->mtu = new_mtu;
+ nn->fl_bufsz = new_fl_bufsz;
+ return 0;
+ }
+
+ /* Prepare new rings */
+ tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
+ nn->rxd_cnt);
+ if (!tmp_rings)
+ return -ENOMEM;
+
+ /* Stop device, swap in new rings, try to start the firmware */
+ nfp_net_close_stack(nn);
+ nfp_net_clear_config_and_disable(nn);
+
+ tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+
netdev->mtu = new_mtu;
+ nn->fl_bufsz = new_fl_bufsz;
- /* Freelist buffer size rounded up to the nearest 1K */
- tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND;
- nn->fl_bufsz = roundup(tmp, 1024);
+ err = nfp_net_set_config_and_enable(nn);
+ if (err) {
+ const int err_new = err;
- /* restart if running */
- if (netif_running(netdev)) {
- nfp_net_netdev_close(netdev);
- nfp_net_netdev_open(netdev);
+ /* Try with old configuration and old rings */
+ tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+
+ netdev->mtu = old_mtu;
+ nn->fl_bufsz = old_fl_bufsz;
+
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
+ err_new, err);
}
- return 0;
+ nfp_net_shadow_rx_rings_free(nn, tmp_rings);
+
+ nfp_net_open_stack(nn);
+
+ return err;
+}
+
+int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
+{
+ struct nfp_net_tx_ring *tx_rings = NULL;
+ struct nfp_net_rx_ring *rx_rings = NULL;
+ u32 old_rxd_cnt, old_txd_cnt;
+ int err;
+
+ if (!netif_running(nn->netdev)) {
+ nn->rxd_cnt = rxd_cnt;
+ nn->txd_cnt = txd_cnt;
+ return 0;
+ }
+
+ old_rxd_cnt = nn->rxd_cnt;
+ old_txd_cnt = nn->txd_cnt;
+
+ /* Prepare new rings */
+ if (nn->rxd_cnt != rxd_cnt) {
+ rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
+ rxd_cnt);
+ if (!rx_rings)
+ return -ENOMEM;
+ }
+ if (nn->txd_cnt != txd_cnt) {
+ tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
+ if (!tx_rings) {
+ nfp_net_shadow_rx_rings_free(nn, rx_rings);
+ return -ENOMEM;
+ }
+ }
+
+ /* Stop device, swap in new rings, try to start the firmware */
+ nfp_net_close_stack(nn);
+ nfp_net_clear_config_and_disable(nn);
+
+ if (rx_rings)
+ rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
+ if (tx_rings)
+ tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+
+ nn->rxd_cnt = rxd_cnt;
+ nn->txd_cnt = txd_cnt;
+
+ err = nfp_net_set_config_and_enable(nn);
+ if (err) {
+ const int err_new = err;
+
+ /* Try with old configuration and old rings */
+ if (rx_rings)
+ rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
+ if (tx_rings)
+ tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+
+ nn->rxd_cnt = old_rxd_cnt;
+ nn->txd_cnt = old_txd_cnt;
+
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
+ err_new, err);
+ }
+
+ nfp_net_shadow_rx_rings_free(nn, rx_rings);
+ nfp_net_shadow_tx_rings_free(nn, tx_rings);
+
+ nfp_net_open_stack(nn);
+
+ return err;
}
static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
@@ -2108,7 +2524,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
be16_to_cpu(nn->vxlan_ports[i]));
- nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
}
/**
@@ -2254,6 +2670,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
+ setup_timer(&nn->reconfig_timer,
+ nfp_net_reconfig_timer, (unsigned long)nn);
+
return nn;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 8692003ae..ad6c4e31c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -81,14 +81,10 @@
/**
* @NFP_NET_TXR_MAX: Maximum number of TX rings
- * @NFP_NET_TXR_MASK: Mask for TX rings
* @NFP_NET_RXR_MAX: Maximum number of RX rings
- * @NFP_NET_RXR_MASK: Mask for RX rings
*/
#define NFP_NET_TXR_MAX 64
-#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1)
#define NFP_NET_RXR_MAX 64
-#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1)
/**
* Read/Write config words (0x0000 - 0x002c)
@@ -152,9 +148,9 @@
* @NFP_NET_CFG_VERSION: Firmware version number
* @NFP_NET_CFG_STS: Status
* @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_MAX_MTU: Maximum support MTU
+ * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * @NFP_NET_CFG_MAX_MTU: Maximum support MTU
* @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
* @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
*
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 4c97c7131..f7c9a5bc4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -40,8 +40,9 @@ static struct dentry *nfp_dir;
static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
{
- struct nfp_net_rx_ring *rx_ring = file->private;
int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
+ struct nfp_net_r_vector *r_vec = file->private;
+ struct nfp_net_rx_ring *rx_ring;
struct nfp_net_rx_desc *rxd;
struct sk_buff *skb;
struct nfp_net *nn;
@@ -49,9 +50,10 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
rtnl_lock();
- if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net)
+ if (!r_vec->nfp_net || !r_vec->rx_ring)
goto out;
- nn = rx_ring->r_vec->nfp_net;
+ nn = r_vec->nfp_net;
+ rx_ring = r_vec->rx_ring;
if (!netif_running(nn->netdev))
goto out;
@@ -115,7 +117,8 @@ static const struct file_operations nfp_rx_q_fops = {
static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
{
- struct nfp_net_tx_ring *tx_ring = file->private;
+ struct nfp_net_r_vector *r_vec = file->private;
+ struct nfp_net_tx_ring *tx_ring;
struct nfp_net_tx_desc *txd;
int d_rd_p, d_wr_p, txd_cnt;
struct sk_buff *skb;
@@ -124,9 +127,10 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
rtnl_lock();
- if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net)
+ if (!r_vec->nfp_net || !r_vec->tx_ring)
goto out;
- nn = tx_ring->r_vec->nfp_net;
+ nn = r_vec->nfp_net;
+ tx_ring = r_vec->tx_ring;
if (!netif_running(nn->netdev))
goto out;
@@ -183,7 +187,7 @@ static const struct file_operations nfp_tx_q_fops = {
void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
{
- static struct dentry *queues, *tx, *rx;
+ struct dentry *queues, *tx, *rx;
char int_name[16];
int i;
@@ -196,7 +200,7 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
/* Create queue debugging sub-tree */
queues = debugfs_create_dir("queue", nn->debugfs_dir);
- if (IS_ERR_OR_NULL(nn->debugfs_dir))
+ if (IS_ERR_OR_NULL(queues))
return;
rx = debugfs_create_dir("rx", queues);
@@ -207,13 +211,13 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
for (i = 0; i < nn->num_rx_rings; i++) {
sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, rx,
- &nn->rx_rings[i], &nfp_rx_q_fops);
+ &nn->r_vecs[i], &nfp_rx_q_fops);
}
for (i = 0; i < nn->num_tx_rings; i++) {
sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, tx,
- &nn->tx_rings[i], &nfp_tx_q_fops);
+ &nn->r_vecs[i], &nfp_tx_q_fops);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 9a4084a68..ccfef1f17 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -153,37 +153,25 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
u32 rxd_cnt, txd_cnt;
- if (netif_running(netdev)) {
- /* Some NIC drivers allow reconfiguration on the fly,
- * some down the interface, change and then up it
- * again. For now we don't allow changes when the
- * device is up.
- */
- nn_warn(nn, "Can't change rings while device is up\n");
- return -EBUSY;
- }
-
/* We don't have separate queues/rings for small/large frames. */
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
/* Round up to supported values */
rxd_cnt = roundup_pow_of_two(ring->rx_pending);
- rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS);
- rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS);
-
txd_cnt = roundup_pow_of_two(ring->tx_pending);
- txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS);
- txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS);
- if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt)
- nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
- nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+ if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
+ txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
+ return -EINVAL;
- nn->rxd_cnt = rxd_cnt;
- nn->txd_cnt = txd_cnt;
+ if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
+ return 0;
- return 0;
+ nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
+ nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+
+ return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
static void nfp_net_get_strings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 7096b11d1..0c49be93f 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -313,7 +313,8 @@ static int netx_eth_enable(struct net_device *ndev)
{
struct netx_eth_priv *priv = netdev_priv(ndev);
unsigned int mac4321, mac65;
- int running, i;
+ int running, i, ret;
+ bool inv_mac_addr = false;
ndev->netdev_ops = &netx_eth_netdev_ops;
ndev->watchdog_timeo = msecs_to_jiffies(5000);
@@ -358,15 +359,18 @@ static int netx_eth_enable(struct net_device *ndev)
xc_start(priv->xc);
if (!is_valid_ether_addr(ndev->dev_addr))
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
+ inv_mac_addr = true;
for (i=2; i<=18; i++)
pfifo_push(EMPTY_PTR_FIFO(priv->id),
FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
- return register_netdev(ndev);
+ ret = register_netdev(ndev);
+ if (inv_mac_addr)
+ printk("%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
+ return ret;
}
static int netx_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 52d9a94ae..87b7b8147 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -476,7 +476,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_init_desc(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
ether->cur_tx = 0x0;
ether->finish_tx = 0x0;
ether->cur_rx = 0x0;
@@ -490,7 +490,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_trigger_tx(dev);
w90p910_trigger_rx(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a55d6d53..8d710a3b4 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -481,7 +481,6 @@ struct pch_gbe_buffer {
/**
* struct pch_gbe_tx_ring - tx ring information
- * @tx_lock: spinlock structs
* @desc: pointer to the descriptor ring memory
* @dma: physical address of the descriptor ring
* @size: length of descriptor ring in bytes
@@ -491,7 +490,6 @@ struct pch_gbe_buffer {
* @buffer_info: array of buffer information structs
*/
struct pch_gbe_tx_ring {
- spinlock_t tx_lock;
struct pch_gbe_tx_desc *desc;
dma_addr_t dma;
unsigned int size;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3b98b263b..3cd87a41a 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1640,7 +1640,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
cleaned_count);
if (cleaned_count > 0) { /*skip this if nothing cleaned*/
/* Recover from running out of Tx resources in xmit_frame */
- spin_lock(&tx_ring->tx_lock);
+ netif_tx_lock(adapter->netdev);
if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
{
netif_wake_queue(adapter->netdev);
@@ -1652,7 +1652,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
tx_ring->next_to_clean);
- spin_unlock(&tx_ring->tx_lock);
+ netif_tx_unlock(adapter->netdev);
}
return cleaned;
}
@@ -1805,7 +1805,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- spin_lock_init(&tx_ring->tx_lock);
for (desNo = 0; desNo < tx_ring->count; desNo++) {
tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
@@ -2135,15 +2134,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
- unsigned long flags;
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
- }
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
netdev_dbg(netdev,
"Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
tx_ring->next_to_use, tx_ring->next_to_clean);
@@ -2152,7 +2145,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* CRC,ITAG no support */
pch_gbe_tx_queue(adapter, tx_ring, skb);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 13d88a602..91be2f02e 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1144,7 +1144,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
/* Restart the chip's Tx/Rx processes . */
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fa2db41e0..fb1d1031b 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -714,7 +714,7 @@ static void yellowfin_tx_timeout(struct net_device *dev)
if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
netif_wake_queue (dev); /* Typical path */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index ddcfcab03..680d8c736 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -98,9 +98,40 @@ config QED
---help---
This enables the support for ...
+config QED_SRIOV
+ bool "QLogic QED 25/40/100Gb SR-IOV support"
+ depends on QED && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QED devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED
---help---
This enables the support for ...
+
+config QEDE_VXLAN
+ bool "Virtual eXtensible Local Area Network support"
+ default n
+ depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
+ ---help---
+ This enables hardware offload support for VXLAN protocol over
+ qede module. Say Y here if you want to enable hardware offload
+ support for Virtual eXtensible Local Area Network (VXLAN)
+ in the driver.
+
+config QEDE_GENEVE
+ bool "Generic Network Virtualization Encapsulation (GENEVE) support"
+ depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
+ ---help---
+ This allows one to create GENEVE virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to enable hardware offload support for
+ Generic Network Virtualization Encapsulation (GENEVE) in the driver.
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 84bc885d7..1b8136496 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2286,7 +2286,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
goto request_reset;
}
}
- adapter->netdev->trans_start = jiffies;
+ netif_trans_update(adapter->netdev);
rtnl_unlock();
return;
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 5c2fd5723..d1f157e43 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
- qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
+ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
+ qed_selftest.o qed_dcbx.o
+qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fcb8e9ba5..1042f2af8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,12 +26,14 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.0.0"
+#define DRV_MODULE_VERSION "8.7.1.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
#define VER_SIZE 16
+#define QED_WFQ_UNIT 100
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -74,6 +76,51 @@ struct qed_rt_data {
bool *b_valid;
};
+enum qed_tunn_mode {
+ QED_MODE_L2GENEVE_TUNN,
+ QED_MODE_IPGENEVE_TUNN,
+ QED_MODE_L2GRE_TUNN,
+ QED_MODE_IPGRE_TUNN,
+ QED_MODE_VXLAN_TUNN,
+};
+
+enum qed_tunn_clss {
+ QED_TUNN_CLSS_MAC_VLAN,
+ QED_TUNN_CLSS_MAC_VNI,
+ QED_TUNN_CLSS_INNER_MAC_VLAN,
+ QED_TUNN_CLSS_INNER_MAC_VNI,
+ MAX_QED_TUNN_CLSS,
+};
+
+struct qed_tunn_start_params {
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+struct qed_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol
@@ -105,6 +152,7 @@ enum QED_RESOURCES {
enum QED_FEATURE {
QED_PF_L2_QUE,
+ QED_VF,
QED_MAX_FEATURES,
};
@@ -192,6 +240,12 @@ struct qed_dmae_info {
struct dmae_cmd *p_dmae_cmd;
};
+struct qed_wfq_data {
+ /* when feature is configured for at least 1 vport */
+ u32 min_speed;
+ bool configured;
+};
+
struct qed_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
@@ -212,6 +266,7 @@ struct qed_qm_info {
bool vport_wfq_en;
u8 pf_wfq;
u32 pf_rl;
+ struct qed_wfq_data *wfq_data;
};
struct storm_stats {
@@ -256,6 +311,8 @@ struct qed_hwfn {
bool first_on_engine;
bool hw_init_done;
+ u8 num_funcs_on_engine;
+
/* BAR access */
void __iomem *regview;
void __iomem *doorbells;
@@ -306,8 +363,12 @@ struct qed_hwfn {
/* True if the driver requests for the link */
bool b_drv_link_init;
+ struct qed_vf_iov *vf_iov_info;
+ struct qed_pf_iov *pf_iov_info;
struct qed_mcp_info *mcp_info;
+ struct qed_dcbx_info *p_dcbx_info;
+
struct qed_hw_cid_data *p_tx_cids;
struct qed_hw_cid_data *p_rx_cids;
@@ -322,6 +383,12 @@ struct qed_hwfn {
struct qed_simd_fp_handler simd_proto_handler[64];
+#ifdef CONFIG_QED_SRIOV
+ struct workqueue_struct *iov_wq;
+ struct delayed_work iov_task;
+ unsigned long iov_task_flags;
+#endif
+
struct z_stream_s *stream;
};
@@ -430,6 +497,13 @@ struct qed_dev {
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+ /* SRIOV */
+ struct qed_hw_sriov_info *p_iov_info;
+#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
+
+ unsigned long tunn_mode;
+
+ bool b_is_vf;
u32 drv_type;
struct qed_eth_stats *reset_stats;
@@ -459,6 +533,8 @@ struct qed_dev {
const struct firmware *firmware;
};
+#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
+#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
@@ -480,6 +556,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define PURE_LB_TC 8
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */
@@ -507,6 +587,4 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
-#define QED_ETH_INTERFACE_VERSION 300
-
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index fc767c07a..ac284c58d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -24,11 +24,13 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
@@ -63,10 +65,12 @@ union conn_context {
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
+ u32 cids_per_vf;
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
enum ilt_clients {
@@ -97,6 +101,10 @@ struct qed_ilt_client_cfg {
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
};
/* Per Path -
@@ -123,6 +131,11 @@ struct qed_cxt_mngr {
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
@@ -131,37 +144,60 @@ struct qed_cxt_mngr {
u32 pf_start_line;
};
-static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
-{
- u32 type, pf_cids = 0;
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct qed_cdu_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
- for (type = 0; type < MAX_CONN_TYPES; type++)
- pf_cids += p_mngr->conn_cfg[type].cid_count;
+static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_cdu_iids *iids)
+{
+ u32 type;
- return pf_cids;
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
}
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- int type;
+ u32 vf_cids = 0, type;
- for (type = 0; type < MAX_CONN_TYPES; type++)
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->cids += p_mngr->conn_cfg[type].cid_count;
+ vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
- DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+ iids->vf_cids += vf_cids * p_mngr->vf_count;
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "iids: CIDS %08x vf_cids %08x\n",
+ iids->cids, iids->vf_cids);
}
/* set the iids count per protocol */
static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
- u32 cid_count)
+ u32 cid_count, u32 vf_cid_cnt)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+ p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+}
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *vf_cid)
+{
+ if (vf_cid)
+ *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+ return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
}
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
@@ -210,10 +246,12 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
- u32 curr_line, total, pf_cids;
+ struct qed_cdu_iids cdu_iids;
struct qed_qm_iids qm_iids;
+ u32 curr_line, total, i;
memset(&qm_iids, 0, sizeof(qm_iids));
+ memset(&cdu_iids, 0, sizeof(cdu_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
@@ -224,14 +262,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* CDUC */
p_cli = &p_mngr->clients[ILT_CLI_CDUC];
curr_line = p_mngr->pf_start_line;
+
+ /* CDUC PF */
p_cli->pf_total_lines = 0;
/* get the counters for the CDUC and QM clients */
- pf_cids = qed_cxt_cdu_iids(p_mngr);
+ qed_cxt_cdu_iids(p_mngr, &cdu_iids);
p_blk = &p_cli->pf_blks[CDUC_BLK];
- total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+ total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn));
@@ -239,17 +279,36 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ /* CDUC VF */
+ p_blk = &p_cli->vf_blks[CDUC_BLK];
+ total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUC);
+
/* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids);
- total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
- p_hwfn->qm_info.num_pqs, 0);
-
- DP_VERBOSE(p_hwfn, QED_MSG_ILT,
- "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
- qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+ total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ qm_iids.vf_cids, 0,
+ p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_vf_pqs);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids,
+ qm_iids.vf_cids,
+ p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total * 0x1000,
@@ -358,7 +417,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *clients = p_mngr->clients;
struct qed_ilt_cli_blk *p_blk;
- u32 size, i, j;
+ u32 size, i, j, k;
int rc;
size = qed_cxt_ilt_shadow_size(clients);
@@ -383,6 +442,16 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
if (rc != 0)
goto ilt_shadow_fail;
}
+ for (k = 0; k < p_mngr->vf_count; k++) {
+ for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+ u32 lines = clients[i].vf_total_lines * k;
+
+ p_blk = &clients[i].vf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
+ if (rc != 0)
+ goto ilt_shadow_fail;
+ }
+ }
}
return 0;
@@ -467,6 +536,9 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ if (p_hwfn->cdev->p_iov_info)
+ p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
@@ -579,8 +651,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine;
params.num_pf_cids = iids.cids;
+ params.num_vf_cids = iids.vf_cids;
params.start_pq = qm_info->start_pq;
- params.num_pf_pqs = qm_info->num_pqs;
+ params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+ params.num_vf_pqs = qm_info->num_vf_pqs;
params.start_vport = qm_info->start_vport;
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
@@ -610,26 +684,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 dq_pf_max_cid = 0;
+ u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
- /* 5 - PF */
+ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+ /* Connection types 6 & 7 are not in use, yet they must be configured
+ * as the highest possible connection. Not configuring them means the
+ * defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults will be smaller than dq_pf_max_cid /
+ * dq_vf_max_cid.
+ */
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
}
static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
@@ -653,6 +756,38 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
}
}
+static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 blk_factor;
+
+ /* For simplicty we set the 'block' to be an ILT page */
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_iov->first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_iov->first_vf_in_pf + p_iov->total_vfs);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+}
+
/* ILT (PSWRQ2) PF */
static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
@@ -662,6 +797,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
+ qed_ilt_vf_bounds_init(p_hwfn);
p_mngr = p_hwfn->p_cxt_mngr;
p_shdw = p_mngr->ilt_shadow;
@@ -839,10 +975,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons);
+ p_params->num_cons, 1);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c8e1f5e5c..234c0fa8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -51,6 +51,9 @@ enum qed_cxt_elem_type {
QED_ELEM_TASK
};
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid);
+
/**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*
@@ -128,6 +131,16 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
/**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief qed_cxt_release - Release a cid
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
new file mode 100644
index 000000000..21ec1c2df
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -0,0 +1,563 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_DCBX_MAX_MIB_READ_TRY (100)
+#define QED_ETH_TYPE_DEFAULT (0)
+#define QED_ETH_TYPE_ROCE (0x8915)
+#define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7)
+#define QED_ETH_TYPE_FCOE (0x8906)
+#define QED_TCP_PORT_ISCSI (0xCBC)
+
+#define QED_DCBX_INVALID_PRIORITY 0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
+};
+
+static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool qed_dcbx_app_port(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
+}
+
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_DEFAULT);
+}
+
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_port(app_info_bitmap) &&
+ proto_id == QED_TCP_PORT_ISCSI);
+}
+
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_FCOE);
+}
+
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_ROCE);
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_port(app_info_bitmap) &&
+ proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
+}
+
+static void
+qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
+{
+ enum dcbx_protocol_type id;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
+ qed_dcbx_app_update[i].name, p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+ }
+}
+
+static void
+qed_dcbx_set_params(struct qed_dcbx_results *p_data,
+ struct qed_hw_info *p_info,
+ bool enable,
+ bool update,
+ u8 prio,
+ u8 tc,
+ enum dcbx_protocol_type type,
+ enum qed_pci_personality personality)
+{
+ /* PF update ramrod data */
+ p_data->arr[type].update = update;
+ p_data->arr[type].enable = enable;
+ p_data->arr[type].priority = prio;
+ p_data->arr[type].tc = tc;
+
+ /* QM reconf data */
+ if (p_info->personality == personality) {
+ if (personality == QED_PCI_ETH)
+ p_info->non_offload_tc = tc;
+ else
+ p_info->offload_tc = tc;
+ }
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
+ struct qed_hwfn *p_hwfn,
+ bool enable,
+ bool update,
+ u8 prio, u8 tc, enum dcbx_protocol_type type)
+{
+ struct qed_hw_info *p_info = &p_hwfn->hw_info;
+ enum qed_pci_personality personality;
+ enum dcbx_protocol_type id;
+ char *name;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ if (type != id)
+ continue;
+
+ personality = qed_dcbx_app_update[i].personality;
+ name = qed_dcbx_app_update[i].name;
+
+ qed_dcbx_set_params(p_data, p_info, enable, update,
+ prio, tc, type, personality);
+ }
+}
+
+static bool
+qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+ u32 app_prio_bitmap,
+ u16 id, enum dcbx_protocol_type *type)
+{
+ if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_FCOE;
+ } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ROCE;
+ } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ISCSI;
+ } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ETH;
+ } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ROCE_V2;
+ } else {
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+ DP_ERR(p_hwfn,
+ "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
+ id, app_prio_bitmap);
+ return false;
+ }
+
+ return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_results *p_data,
+ struct dcbx_app_priority_entry *p_tbl,
+ u32 pri_tc_tbl, int count, bool dcbx_enabled)
+{
+ u8 tc, priority_map;
+ enum dcbx_protocol_type type;
+ u16 protocol_id;
+ int priority;
+ bool enable;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+
+ /* Parse APP TLV */
+ for (i = 0; i < count; i++) {
+ protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PRI_MAP);
+ priority = ffs(priority_map) - 1;
+ if (priority < 0) {
+ DP_ERR(p_hwfn, "Invalid priority\n");
+ return -EINVAL;
+ }
+
+ tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
+ if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ protocol_id, &type)) {
+ /* ETH always have the enable bit reset, as it gets
+ * vlan information per packet. For other protocols,
+ * should be set according to the dcbx_enabled
+ * indication, but we only got here if there was an
+ * app tlv for the protocol, so dcbx must be enabled.
+ */
+ enable = !(type == DCBX_PROTOCOL_ETH);
+
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+ }
+
+ /* If RoCE-V2 TLV is not detected, driver need to use RoCE app
+ * data for RoCE-v2 not the default app data.
+ */
+ if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_data->arr[DCBX_PROTOCOL_ROCE].update) {
+ tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority;
+ qed_dcbx_update_app_info(p_data, p_hwfn, true, true,
+ priority, tc, DCBX_PROTOCOL_ROCE_V2);
+ }
+
+ /* Update ramrod protocol data and hw_info fields
+ * with default info when corresponding APP TLV's are not detected.
+ * The enabled field has a different logic for ethernet as only for
+ * ethernet dcb should disabled by default, as the information arrives
+ * from the OS (unless an explicit app tlv was present).
+ */
+ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+ for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+ if (p_data->arr[type].update)
+ continue;
+
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+
+ return 0;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+{
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct qed_dcbx_results data = { 0 };
+ struct dcbx_ets_feature *p_ets;
+ struct qed_hw_info *p_info;
+ u32 pri_tc_tbl, flags;
+ bool dcbx_enabled;
+ int num_entries;
+ int rc = 0;
+
+ /* If DCBx version is non zero, then negotiation was
+ * successfuly performed
+ */
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+ dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+ p_info = &p_hwfn->hw_info;
+ num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+ rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ num_entries, dcbx_enabled);
+ if (rc)
+ return rc;
+
+ p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ data.pf_id = p_hwfn->rel_pf_id;
+ data.dcbx_enabled = dcbx_enabled;
+
+ qed_dcbx_dp_protocol(p_hwfn, &data);
+
+ memcpy(&p_hwfn->p_dcbx_info->results, &data,
+ sizeof(struct qed_dcbx_results));
+
+ return 0;
+}
+
+static int
+qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_mib_meta_data *p_data,
+ enum qed_mib_read_type type)
+{
+ u32 prefix_seq_num, suffix_seq_num;
+ int read_count = 0;
+ int rc = 0;
+
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
+ do {
+ if (type == QED_DCBX_REMOTE_LLDP_MIB) {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->mib->prefix_seq_num;
+ suffix_seq_num = p_data->mib->suffix_seq_num;
+ }
+ read_count++;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (read_count < QED_DCBX_MAX_MIB_READ_TRY));
+
+ if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) {
+ DP_ERR(p_hwfn,
+ "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_config_params);
+ data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+ data.size = sizeof(struct lldp_config_params_s);
+ qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_status_params);
+ data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+ data.size = sizeof(struct lldp_status_params_s);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, operational_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->operational;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, remote_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->remote;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
+
+ return rc;
+}
+
+static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = -EINVAL;
+
+ switch (type) {
+ case QED_DCBX_OPERATIONAL_MIB:
+ rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_REMOTE_MIB:
+ rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ }
+
+ return rc;
+}
+
+/* Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = 0;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ return rc;
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ rc = qed_dcbx_process_mib_info(p_hwfn);
+ if (!rc) {
+ /* reconfigure tcs of QM queues according
+ * to negotiation results
+ */
+ qed_qm_reconf(p_hwfn, p_ptt);
+
+ /* update storm FW with negotiation results */
+ qed_sp_pf_update(p_hwfn);
+ }
+ }
+
+ return rc;
+}
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc = 0;
+
+ p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
+ if (!p_hwfn->p_dcbx_info) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate 'struct qed_dcbx_info'\n");
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_info *p_dcbx_info)
+{
+ kfree(p_hwfn->p_dcbx_info);
+}
+
+static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+ struct qed_dcbx_results *p_src,
+ enum dcbx_protocol_type type)
+{
+ p_data->dcb_enable_flag = p_src->arr[type].enable;
+ p_data->dcb_priority = p_src->arr[type].priority;
+ p_data->dcb_tc = p_src->arr[type].tc;
+}
+
+/* Set pf update ramrod command params */
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest)
+{
+ struct protocol_dcb_data *p_dcb_data;
+ bool update_flag = false;
+
+ p_dest->pf_id = p_src->pf_id;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+ p_dest->update_fcoe_dcb_data_flag = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+ p_dest->update_roce_dcb_data_flag = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+ p_dest->update_roce_dcb_data_flag = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+ p_dest->update_iscsi_dcb_data_flag = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+ p_dest->update_eth_dcb_data_flag = update_flag;
+
+ p_dcb_data = &p_dest->fcoe_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+ p_dcb_data = &p_dest->roce_dcb_data;
+
+ if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE);
+ if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE_V2);
+
+ p_dcb_data = &p_dest->iscsi_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
+ p_dcb_data = &p_dest->eth_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
new file mode 100644
index 000000000..e7f834dbd
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -0,0 +1,80 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DCBX_H
+#define _QED_DCBX_H
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+
+enum qed_mib_read_type {
+ QED_DCBX_OPERATIONAL_MIB,
+ QED_DCBX_REMOTE_MIB,
+ QED_DCBX_LOCAL_MIB,
+ QED_DCBX_REMOTE_LLDP_MIB,
+ QED_DCBX_LOCAL_LLDP_MIB
+};
+
+struct qed_dcbx_app_data {
+ bool enable; /* DCB enabled */
+ bool update; /* Update indication */
+ u8 priority; /* Priority */
+ u8 tc; /* Traffic Class */
+};
+
+struct qed_dcbx_results {
+ bool dcbx_enabled;
+ u8 pf_id;
+ struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct qed_dcbx_app_metadata {
+ enum dcbx_protocol_type id;
+ char *name;
+ enum qed_pci_personality personality;
+};
+
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+struct qed_dcbx_info {
+ struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_results results;
+ struct dcbx_mib operational;
+ struct dcbx_mib remote;
+ u8 dcbx_cap;
+};
+
+struct qed_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct dcbx_local_params *local_admin;
+ struct dcbx_mib *mib;
+ size_t size;
+ u32 addr;
+};
+
+/* QED local interface routines */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *,
+ struct qed_ptt *, enum qed_mib_read_type);
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b7d100f6b..2d89e8c16 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -22,6 +22,7 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
#include "qed_hw.h"
@@ -30,6 +31,11 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static spinlock_t qm_lock;
+static bool qm_lock_init = false;
/* API common to all protocols */
enum BAR_ID {
@@ -40,10 +46,14 @@ enum BAR_ID {
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
{
- u32 bar_reg = (bar_id == BAR_ID_0 ?
- PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
- u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 1 << 17;
+ val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -105,12 +115,17 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
qm_info->qm_vport_params = NULL;
kfree(qm_info->qm_port_params);
qm_info->qm_port_params = NULL;
+ kfree(qm_info->wfq_data);
+ qm_info->wfq_data = NULL;
}
void qed_resc_free(struct qed_dev *cdev)
{
int i;
+ if (IS_VF(cdev))
+ return;
+
kfree(cdev->fw_data);
cdev->fw_data = NULL;
@@ -134,20 +149,29 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
+ qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
+ qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
{
+ u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
- u8 num_vports, i, vport_id, num_ports;
u16 num_pqs, multi_cos_tcs = 1;
-
+ u8 pf_wfq = qm_info->pf_wfq;
+ u32 pf_rl = qm_info->pf_rl;
+ u16 num_vfs = 0;
+
+#ifdef CONFIG_QED_SRIOV
+ if (p_hwfn->cdev->p_iov_info)
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+#endif
memset(qm_info, 0, sizeof(*qm_info));
- num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+ num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
/* Sanity checking that setup requires legal number of resources */
@@ -160,40 +184,70 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
- qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
- num_pqs, GFP_KERNEL);
+ qm_info->qm_pq_params = kcalloc(num_pqs,
+ sizeof(struct init_qm_pq_params),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->qm_pq_params)
goto alloc_err;
- qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
- num_vports, GFP_KERNEL);
+ qm_info->qm_vport_params = kcalloc(num_vports,
+ sizeof(struct init_qm_vport_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_vport_params)
goto alloc_err;
- qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- MAX_NUM_PORTS, GFP_KERNEL);
+ qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+ sizeof(struct init_qm_port_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_port_params)
goto alloc_err;
+ qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
/* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) {
- struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
-
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.non_offload_tc;
- params->wrr_group = 1;
+ struct init_qm_pq_params *params =
+ &qm_info->qm_pq_params[curr_queue++];
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1;
+ } else {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.offload_tc;
+ params->wrr_group = 1;
+ }
}
/* Then init pure-LB PQ */
- qm_info->pure_lb_pq = i;
- qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
- qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[i].wrr_group = 1;
- i++;
+ qm_info->pure_lb_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id =
+ (u8) RESC_START(p_hwfn, QED_VPORT);
+ qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
qm_info->offload_pq = 0;
+ /* Then init per-VF PQs */
+ vf_offset = curr_queue;
+ for (i = 0; i < num_vfs; i++) {
+ /* First vport is used by the PF */
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.non_offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ qm_info->vf_queues_offset = vf_offset;
qm_info->num_pqs = num_pqs;
qm_info->num_vports = num_vports;
@@ -211,29 +265,91 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
- qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+ qm_info->num_vf_pqs = num_vfs;
+ qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+ for (i = 0; i < qm_info->num_vports; i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
- qm_info->pf_wfq = 0;
- qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+ qm_info->pf_rl = pf_rl;
+ qm_info->pf_wfq = pf_wfq;
return 0;
alloc_err:
DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
- kfree(qm_info->qm_pq_params);
- kfree(qm_info->qm_vport_params);
- kfree(qm_info->qm_port_params);
-
+ qed_qm_info_free(p_hwfn);
return -ENOMEM;
}
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool b_rc;
+ int rc;
+
+ /* qm_info is allocated in qed_init_qm_info() which is already called
+ * from qed_resc_alloc() or previous call of qed_qm_reconf().
+ * The allocated size may change each init, so we free it before next
+ * allocation.
+ */
+ qed_qm_info_free(p_hwfn);
+
+ /* initialize qed's qm data structure */
+ rc = qed_init_qm_info(p_hwfn, false);
+ if (rc)
+ return rc;
+
+ /* stop PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ /* clear the QM_PF runtime phase leftovers from previous init */
+ qed_init_clear_rt_data(p_hwfn);
+
+ /* prepare QM portion of runtime array */
+ qed_qm_init_pf(p_hwfn);
+
+ /* activate init tool on runtime array */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ return rc;
+
+ /* start PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ return 0;
+}
+
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_consq *p_consq;
struct qed_eq *p_eq;
int i, rc = 0;
+ if (IS_VF(cdev))
+ return rc;
+
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data)
return -ENOMEM;
@@ -279,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
/* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, true);
if (rc)
goto alloc_err;
@@ -308,6 +424,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
+ rc = qed_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* EQ */
p_eq = qed_eq_alloc(p_hwfn, 256);
if (!p_eq) {
@@ -330,6 +450,14 @@ int qed_resc_alloc(struct qed_dev *cdev)
"Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
+
+ /* DCBX initialization */
+ rc = qed_dcbx_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for dcbx structure\n");
+ goto alloc_err;
+ }
}
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
@@ -350,6 +478,9 @@ void qed_resc_setup(struct qed_dev *cdev)
{
int i;
+ if (IS_VF(cdev))
+ return;
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -365,14 +496,15 @@ void qed_resc_setup(struct qed_dev *cdev)
p_hwfn->mcp_info->mfw_mb_length);
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+ qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
}
}
#define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 id)
+ struct qed_ptt *p_ptt, u16 id, bool is_vf)
{
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
@@ -380,6 +512,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+ if (is_vf)
+ id += 0x10;
+
command |= X_FINAL_CLEANUP_AGG_INT <<
SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
@@ -453,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
hw_mode |= 1 << MODE_ASIC;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
}
/* Init run time data for all PFs on an engine. */
@@ -492,7 +634,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u32 concrete_fid;
int rc = 0;
+ u8 vf_id;
qed_init_cau_rt_data(cdev);
@@ -542,6 +686,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, 0x20b4,
qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+ for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+ concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
return rc;
}
@@ -558,6 +710,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
+ struct qed_tunn_start_params *p_tunn,
int hw_mode,
bool b_hw_start,
enum qed_int_mode int_mode,
@@ -574,7 +727,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
/* Update rate limit once we'll actually have a link */
- p_hwfn->qm_info.pf_rl = 100;
+ p_hwfn->qm_info.pf_rl = 100000;
}
qed_cxt_hw_init_pf(p_hwfn);
@@ -603,7 +756,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
/* Cleanup chip from previous driver if such remains exist */
- rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
if (rc != 0)
return rc;
@@ -625,7 +778,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */
- rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+ rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+ allow_npar_tx_switch);
if (rc)
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
}
@@ -672,6 +826,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
}
int qed_hw_init(struct qed_dev *cdev,
+ struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
@@ -680,13 +835,25 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param;
int rc, mfw_rc, i;
- rc = qed_init_fw_data(cdev, bin_fw_data);
- if (rc != 0)
- return rc;
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
+ if (IS_PF(cdev)) {
+ rc = qed_init_fw_data(cdev, bin_fw_data);
+ if (rc != 0)
+ return rc;
+ }
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ p_hwfn->b_int_enabled = 1;
+ continue;
+ }
+
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@@ -708,6 +875,11 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
+ if (!qm_lock_init) {
+ spin_lock_init(&qm_lock);
+ qm_lock_init = true;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -724,7 +896,7 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_hwfn->hw_info.hw_mode,
+ p_tunn, p_hwfn->hw_info.hw_mode,
b_hw_start, int_mode,
allow_npar_tx_switch);
break;
@@ -749,6 +921,20 @@ int qed_hw_init(struct qed_dev *cdev,
return mfw_rc;
}
+ /* send DCBX attention request command */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "sending phony dcbx set command to trigger DCBx attention handling\n");
+ mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &load_code, &param);
+ if (mfw_rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send DCBX attention request\n");
+ return mfw_rc;
+ }
+
p_hwfn->hw_init_done = true;
}
@@ -811,6 +997,11 @@ int qed_hw_stop(struct qed_dev *cdev)
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
@@ -842,15 +1033,16 @@ int qed_hw_stop(struct qed_dev *cdev)
usleep_range(1000, 2000);
}
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
- */
- t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
- cdev->hwfns[0].p_main_ptt,
- false);
- if (t_rc != 0)
- rc = t_rc;
+ if (IS_PF(cdev)) {
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+ cdev->hwfns[0].p_main_ptt, false);
+ if (t_rc != 0)
+ rc = t_rc;
+ }
return rc;
}
@@ -861,7 +1053,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN,
@@ -885,6 +1082,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
@@ -914,6 +1114,13 @@ int qed_hw_reset(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc)
+ return rc;
+ continue;
+ }
+
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */
@@ -1009,13 +1216,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info;
- int num_funcs, i;
-
- num_funcs = MAX_NUM_PFS_BB;
+ int i, max_vf_vlan_filters;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+
+#ifdef CONFIG_QED_SRIOV
+ max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
+#else
+ max_vf_vlan_filters = 0;
+#endif
+
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[QED_SB] = min_t(u32,
@@ -1220,6 +1433,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
+static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 reg_function_hide, tmp, eng_mask;
+ u8 num_funcs;
+
+ num_funcs = MAX_NUM_PFS_BB;
+
+ /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+ * in the other bits are selected.
+ * Bits 1-15 are for functions 1-15, respectively, and their value is
+ * '0' only for enabled functions (function 0 always exists and
+ * enabled).
+ * In case of CMT, only the "even" functions are enabled, and thus the
+ * number of functions for both hwfns is learnt from the same bits.
+ */
+ reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+ if (reg_function_hide & 0x1) {
+ if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
+
+ /* Get the number of the enabled functions on the engine */
+ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ num_funcs++;
+ tmp >>= 0x1;
+ }
+ }
+
+ p_hwfn->num_funcs_on_engine = num_funcs;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_PROBE,
+ "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+ p_hwfn->rel_pf_id,
+ p_hwfn->abs_pf_id,
+ p_hwfn->num_funcs_on_engine);
+}
+
static int
qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1228,6 +1486,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
u32 port_mode;
int rc;
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
/* Read the port mode */
port_mode = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NW_PORT_MODE_BB_B0);
@@ -1271,6 +1536,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ qed_get_num_funcs(p_hwfn, p_ptt);
+
qed_hw_get_resc(p_hwfn);
return rc;
@@ -1336,6 +1603,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_hw_prepare(p_hwfn);
+
/* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
@@ -1387,6 +1657,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
return rc;
err2:
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_iov_free_hw_info(p_hwfn->cdev);
qed_mcp_free(p_hwfn);
err1:
qed_hw_hwfn_free(p_hwfn);
@@ -1401,7 +1673,8 @@ int qed_hw_prepare(struct qed_dev *cdev,
int rc;
/* Store the precompiled init data ptrs */
- qed_init_iro_array(cdev);
+ if (IS_PF(cdev))
+ qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn,
@@ -1433,9 +1706,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
* initiliazed hwfn 0.
*/
if (rc) {
- qed_init_free(p_hwfn);
- qed_mcp_free(p_hwfn);
- qed_hw_hwfn_free(p_hwfn);
+ if (IS_PF(cdev)) {
+ qed_init_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ }
}
}
@@ -1449,10 +1724,17 @@ void qed_hw_remove(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ qed_vf_pf_release(p_hwfn);
+ continue;
+ }
+
qed_init_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
qed_mcp_free(p_hwfn);
}
+
+ qed_iov_free_hw_info(cdev);
}
int qed_chain_alloc(struct qed_dev *cdev,
@@ -1593,3 +1875,395 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+ min_pf_rate;
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+ u32 min_pf_rate)
+
+{
+ int i;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+ p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ * rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+ u16 vport_id, u32 req_rate,
+ u32 min_pf_rate)
+{
+ u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+ int non_requested_count = 0, req_count = 0, i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ /* Accounting for the vports which are configured for WFQ explicitly */
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+
+ if ((i != vport_id) &&
+ p_hwfn->qm_info.wfq_data[i].configured) {
+ req_count++;
+ tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+ total_req_min_rate += tmp_speed;
+ }
+ }
+
+ /* Include current vport data as well */
+ req_count++;
+ total_req_min_rate += req_rate;
+ non_requested_count = num_vports - req_count;
+
+ if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ if (num_vports > QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Number of vports is greater than %d\n",
+ QED_WFQ_UNIT);
+ return -EINVAL;
+ }
+
+ if (total_req_min_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ total_left_rate = min_pf_rate - total_req_min_rate;
+
+ left_rate_per_vp = total_left_rate / non_requested_count;
+ if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
+ return -EINVAL;
+ }
+
+ p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+ p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+ }
+
+ return 0;
+}
+
+static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
+{
+ struct qed_mcp_link_state *p_link;
+ int rc = 0;
+
+ p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
+
+ if (!p_link->min_pf_rate) {
+ p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+ p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+ return rc;
+ }
+
+ rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+ if (rc == 0)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+ else
+ DP_NOTICE(p_hwfn,
+ "Validation failed while configuring min rate\n");
+
+ return rc;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ bool use_wfq = false;
+ int rc = 0;
+ u16 i;
+
+ /* Validate all pre configured vports for wfq */
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 rate;
+
+ if (!p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+ use_wfq = true;
+
+ rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "WFQ validation failed while configuring min rate\n");
+ break;
+ }
+ }
+
+ if (!rc && use_wfq)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ else
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+ return rc;
+}
+
+/* Main API for qed clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
+{
+ int i, rc = -EINVAL;
+
+ /* Currently not supported; Might change in future */
+ if (cdev->num_hwfns > 1) {
+ DP_NOTICE(cdev,
+ "WFQ configuration is not supported for this device\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+ if (!rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+{
+ int i;
+
+ if (cdev->num_hwfns > 1) {
+ DP_VERBOSE(cdev,
+ NETIF_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_hwfn->p_dpc_ptt,
+ min_pf_rate);
+ }
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+ if (!p_link->line_speed && (max_bw != 100))
+ return rc;
+
+ p_link->speed = (p_link->line_speed * max_bw) / 100;
+ p_hwfn->qm_info.pf_rl = p_link->speed;
+
+ /* Since the limiter also affects Tx-switched traffic, we don't want it
+ * to limit such traffic in case there's no actual limit.
+ * In that case, set limit to imaginary high boundary.
+ */
+ if (max_bw == 100)
+ p_hwfn->qm_info.pf_rl = 100000;
+
+ rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_hwfn->qm_info.pf_rl);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+
+ return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (max_bw < 1 || max_bw > 100) {
+ DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+ p_hwfn->qm_info.pf_wfq = min_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+ rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MIN bandwidth to be %d Mb/sec\n",
+ p_link->min_pf_rate);
+
+ return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (min_bw < 1 || min_bw > 100) {
+ DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ if (p_link->min_pf_rate) {
+ u32 min_rate = p_link->min_pf_rate;
+
+ rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_ptt,
+ min_rate);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_link_state *p_link;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+
+ if (p_link->min_pf_rate)
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+
+ memset(p_hwfn->qm_info.wfq_data, 0,
+ sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d6c7ddf4f..dde364d6f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @brief qed_hw_init -
*
* @param cdev
+ * @param p_tunn
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @return int
*/
int qed_hw_init(struct qed_dev *cdev,
+ struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
@@ -180,11 +182,15 @@ enum qed_dmae_address_type_t {
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
-#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
-#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define QED_DMAE_FLAG_VF_SRC 0x00000002
+#define QED_DMAE_FLAG_VF_DST 0x00000004
+#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params {
- u32 flags; /* consists of QED_DMAE_FLAG_* values */
+ u32 flags; /* consists of QED_DMAE_FLAG_* values */
+ u8 src_vfid;
+ u8 dst_vfid;
};
/**
@@ -207,6 +213,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 flags);
/**
+ * @brief qed_dmae_host2host - copy data from to source address
+ * to a destination adress (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params);
+
+/**
* @brief qed_chain_alloc - Allocate and initialize a chain
*
* @param p_hwfn
@@ -280,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
*
* @return int
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 id);
+ struct qed_ptt *p_ptt, u16 id, bool is_vf);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index a368f5e71..e29ed5a69 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -29,9 +29,9 @@ struct qed_ptt;
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
- COMMON_EVENT_RESERVED,
- COMMON_EVENT_RESERVED2,
- COMMON_EVENT_RESERVED3,
+ COMMON_EVENT_VF_START,
+ COMMON_EVENT_VF_STOP,
+ COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5,
COMMON_EVENT_RESERVED6,
@@ -44,9 +44,9 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
- COMMON_RAMROD_RESERVED,
- COMMON_RAMROD_RESERVED2,
- COMMON_RAMROD_RESERVED3,
+ COMMON_RAMROD_VF_START,
+ COMMON_RAMROD_VF_STOP,
+ COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
@@ -573,6 +573,14 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+struct mstorm_non_trigger_vf_zone {
+ struct eth_mstorm_per_queue_stat eth_queue_stat;
+};
+
+struct mstorm_vf_zone {
+ struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
enum personality_type {
BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED,
@@ -626,6 +634,59 @@ struct pf_start_ramrod_data {
u8 reserved0[4];
};
+/* Data for port update ramrod */
+struct protocol_dcb_data {
+ u8 dcb_enable_flag;
+ u8 dcb_priority;
+ u8 dcb_tc;
+ u8 reserved;
+};
+
+/* tunnel configuration */
+struct pf_update_tunnel_config {
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
+ __le16 reserved[3];
+};
+
+struct pf_update_ramrod_data {
+ u8 pf_id;
+ u8 update_eth_dcb_data_flag;
+ u8 update_fcoe_dcb_data_flag;
+ u8 update_iscsi_dcb_data_flag;
+ u8 update_roce_dcb_data_flag;
+ u8 update_mf_vlan_flag;
+ __le16 mf_vlan;
+ struct protocol_dcb_data eth_dcb_data;
+ struct protocol_dcb_data fcoe_dcb_data;
+ struct protocol_dcb_data iscsi_dcb_data;
+ struct protocol_dcb_data roce_dcb_data;
+ struct pf_update_tunnel_config tunnel_config;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+ TUNNEL_CLSS_MAC_VLAN = 0,
+ TUNNEL_CLSS_MAC_VNI,
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+ TUNNEL_CLSS_INNER_MAC_VNI,
+ MAX_TUNNEL_CLSS
+};
+
enum ports_mode {
ENGX2_PORTX1 /* 2 engines x 1 port */,
ENGX2_PORTX2 /* 2 engines x 2 ports */,
@@ -635,6 +696,16 @@ enum ports_mode {
MAX_PORTS_MODE
};
+struct pstorm_non_trigger_vf_zone {
+ struct eth_pstorm_per_queue_stat eth_queue_stat;
+ struct regpair reserved[2];
+};
+
+struct pstorm_vf_zone {
+ struct pstorm_non_trigger_vf_zone non_trigger;
+ struct regpair reserved[7];
+};
+
/* Ramrod Header of SPQE */
struct ramrod_header {
__le32 cid /* Slowpath Connection CID */;
@@ -664,6 +735,36 @@ struct tstorm_per_port_stat {
struct regpair preroce_irregular_pkt;
};
+struct ustorm_non_trigger_vf_zone {
+ struct eth_ustorm_per_queue_stat eth_queue_stat;
+ struct regpair vf_pf_msg_addr;
+};
+
+struct ustorm_trigger_vf_zone {
+ u8 vf_pf_msg_valid;
+ u8 reserved[7];
+};
+
+struct ustorm_vf_zone {
+ struct ustorm_non_trigger_vf_zone non_trigger;
+ struct ustorm_trigger_vf_zone trigger;
+};
+
+struct vf_start_ramrod_data {
+ u8 vf_id;
+ u8 enable_flr_ack;
+ __le16 opaque_fid;
+ u8 personality;
+ u8 reserved[3];
+};
+
+struct vf_stop_ramrod_data {
+ u8 vf_id;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -990,7 +1091,7 @@ enum init_phases {
PHASE_ENGINE,
PHASE_PORT,
PHASE_PF,
- PHASE_RESERVED,
+ PHASE_VF,
PHASE_QM_PF,
MAX_INIT_PHASES
};
@@ -1603,6 +1704,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u16 start_pq,
u16 num_pqs);
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool vxlan_enable);
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool eth_gre_enable,
+ bool ip_gre_enable);
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool eth_geneve_enable,
+ bool ip_geneve_enable);
+
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
@@ -3586,6 +3700,7 @@ struct public_port {
#define MEDIA_DA_TWINAX 0x3
#define MEDIA_BASE_T 0x4
#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
#define MEDIA_KR 0xf0
#define MEDIA_NOT_PRESENT 0xff
@@ -3788,7 +3903,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_SET_LLDP 0x24000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
-
+#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
@@ -3808,6 +3923,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -3865,6 +3981,18 @@ struct public_drv_mb {
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
@@ -5067,4 +5195,8 @@ struct hw_set_image {
struct hw_set_info hw_sets[1];
};
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 pf_id, u16 pf_wfq);
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a95a3e4b3..0ada7fdb9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -23,6 +23,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000
@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
- qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
- hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ if (IS_PF(p_hwfn->cdev)) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ } else {
+ hw_offset = hw_addr + done;
+ }
dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done);
@@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u32 concrete_fid = 0;
+
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+ return concrete_fid;
+}
+
/* DMAE */
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
+ u16 opcode_b = 0;
u32 opcode = 0;
- u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe
@@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
- opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
- DMAE_CMD_SRC_VF_ID_SHIFT);
+ /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+ if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
+ opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT;
+ }
- opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
- DMAE_CMD_DST_VF_ID_SHIFT);
+ if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
+ opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ }
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
- p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
}
u32 qed_dmae_idx_to_go_cmd(u8 idx)
@@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
+int
+qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
+{
+ int rc;
+
+ mutex_lock(&(p_hwfn->dmae_info.mutex));
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ dest_addr,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ size_in_dwords, p_params);
+
+ mutex_unlock(&(p_hwfn->dmae_info.mutex));
+
+ return rc;
+}
+
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
union qed_qm_pq_params *p_params)
@@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
break;
case PROTOCOLID_ETH:
pq_id = p_params->eth.tc;
+ if (p_params->eth.is_vf)
+ pq_id += p_hwfn->qm_info.vf_queues_offset +
+ p_params->eth.vf_id;
break;
default:
pq_id = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e56d43379..4367363ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -221,6 +221,16 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief qed_vfid_to_concrete - build a concrete FID for a
+ * given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
+
+/**
* @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
* this is declared here since other files will require it.
* @param idx
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index f55ebdc3c..e8a3b9da5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -712,6 +712,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id, u16 pf_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+ return 0;
+}
+
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 pf_id,
@@ -732,6 +747,31 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS],
+ u16 vport_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ u8 tc;
+
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = first_tx_pq_id[tc];
+
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
+ inc_val);
+ }
+
+ return 0;
+}
+
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 vport_id,
@@ -788,3 +828,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
+
+static void
+qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
+{
+ if (enable)
+ set_bit(bit, var);
+ else
+ clear_bit(bit, var);
+}
+
+#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 dest_port)
+{
+ qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool vxlan_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+ vxlan_enable ? 1 : 0);
+}
+
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+ shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+ eth_gre_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+ ip_gre_enable ? 1 : 0);
+}
+
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 dest_port)
+{
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable,
+ bool ip_geneve_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
+
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
+
+ /* comp ver */
+ reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+
+ /* EDPM with geneve tunnel not supported in BB_B0 */
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ return;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 3269b3610..d358c3bb1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -18,6 +18,7 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
#define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500
@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rt_data *rt_data = &p_hwfn->rt_data;
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL);
if (!rt_data->b_valid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 2017b0121..09a6ad3d2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -26,6 +26,8 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
struct qed_pi_info {
qed_int_comp_cb_t comp_cb;
@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u32 sb_offset;
u32 pi_offset;
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
@@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
- sb_info->igu_sb_id, 0, 0);
+ if (IS_PF(p_hwfn->cdev))
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
}
/**
@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
/* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- else
+ else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ else
+ igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
- sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD +
- (sb_info->igu_sb_id << 3);
+ if (IS_PF(p_hwfn->cdev)) {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+ } else {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_IGU +
+ ((IGU_CMD_INT_ACK_BASE +
+ sb_info->igu_sb_id) << 3);
+ }
sb_info->flags |= QED_SB_INFO_INIT;
@@ -2783,24 +2798,20 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
{
p_hwfn->b_int_enabled = 0;
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
}
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 sb_id,
- bool cleanup_set,
- u16 opaque_fid
- )
+static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id, bool cleanup_set, u16 opaque_fid)
{
+ u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
- u32 data = 0;
- u32 cmd_ctrl = 0;
- u32 val = 0;
- u32 sb_bit = 0;
- u32 sb_bit_addr = 0;
/* Set the data field */
SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
@@ -2845,11 +2856,9 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id,
- u16 opaque,
- bool b_set)
+ u32 sb_id, u16 opaque, bool b_set)
{
- int pi;
+ int pi, i;
/* Set */
if (b_set)
@@ -2858,6 +2867,22 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
/* Clear */
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ /* Wait for the IGU SB to cleanup */
+ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+ u32 val;
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
+ if (val & (1 << (sb_id % 32)))
+ usleep_range(10, 20);
+ else
+ break;
+ }
+ if (i == IGU_CLEANUP_SLEEP_LENGTH)
+ DP_NOTICE(p_hwfn,
+ "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+ sb_id);
+
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
qed_wr(p_hwfn, p_ptt,
@@ -2866,13 +2891,11 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- bool b_set,
- bool b_slowpath)
+ bool b_set, bool b_slowpath)
{
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
- u32 sb_id = 0;
- u32 val = 0;
+ u32 sb_id = 0, val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
@@ -2888,14 +2911,14 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.opaque_fid,
b_set);
- if (b_slowpath) {
- sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU cleaning slowpath SB [%d]\n", sb_id);
- qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
- p_hwfn->hw_info.opaque_fid,
- b_set);
- }
+ if (!b_slowpath)
+ return;
+
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid, b_set);
}
static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
@@ -2935,9 +2958,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
+ u32 val, min_vf = 0, max_vf = 0;
+ u16 sb_id, last_iov_sb_id = 0;
struct qed_igu_block *blk;
- u32 val;
- u16 sb_id;
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
@@ -2947,12 +2970,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
p_igu_info = p_hwfn->hw_info.p_igu_info;
- /* Initialize base sb / sb cnt for PFs */
+ /* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ min_vf = p_iov->first_vf_in_pf;
+ max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
+ }
+
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2986,14 +3016,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
(p_igu_info->igu_sb_cnt)++;
}
}
+ } else {
+ if ((blk->function_id >= min_vf) &&
+ (blk->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ if (p_igu_info->igu_base_sb_iov == 0xffff) {
+ p_igu_info->igu_base_sb_iov = sb_id;
+ } else if (last_iov_sb_id != sb_id - 1) {
+ if (!val) {
+ DP_VERBOSE(p_hwfn->cdev,
+ NETIF_MSG_INTR,
+ "First uninitialized IGU CAM entry at index 0x%04x\n",
+ sb_id);
+ } else {
+ DP_NOTICE(p_hwfn->cdev,
+ "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
+ p_hwfn->rel_pf_id,
+ last_iov_sb_id,
+ sb_id); }
+ break;
+ }
+ blk->status |= QED_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ last_iov_sb_id = sb_id;
+ }
}
}
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb,
- p_igu_info->igu_sb_cnt,
- p_igu_info->igu_dsb_id);
+ p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+ DP_VERBOSE(
+ p_hwfn,
+ NETIF_MSG_INTR,
+ "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_base_sb_iov,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_sb_cnt_iov,
+ p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff ||
@@ -3116,6 +3175,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Determine origin of SB id */
+ if ((sb_id >= p_info->igu_base_sb) &&
+ (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+ return sb_id - p_info->igu_base_sb;
+ } else if ((sb_id >= p_info->igu_base_sb_iov) &&
+ (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+ return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+ } else {
+ DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
+ return 0;
+ }
+}
+
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{
int i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c57f2e680..20b468637 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -20,6 +20,12 @@
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands
*/
@@ -292,26 +298,8 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
- * @param cleanup_set - set(1) / clear(0)
- * @param opaque_fid - the function for which to perform
- * cleanup, for example a PF on behalf of
- * its VFs.
- */
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 sb_id,
- bool cleanup_set,
- u16 opaque_fid);
-
-/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
- * @param cleanup_set - set(1) / clear(0)
+ * @param b_set - set(1) / clear(0)
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -365,6 +353,16 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 3f35c6ca9..aada4c7e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -31,137 +31,25 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_l2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
-enum qed_rss_caps {
- QED_RSS_IPV4 = 0x1,
- QED_RSS_IPV6 = 0x2,
- QED_RSS_IPV4_TCP = 0x4,
- QED_RSS_IPV6_TCP = 0x8,
- QED_RSS_IPV4_UDP = 0x10,
- QED_RSS_IPV6_UDP = 0x20,
-};
-
-/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
-#define QED_RSS_IND_TABLE_SIZE 128
-#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
-
-struct qed_rss_params {
- u8 update_rss_config;
- u8 rss_enable;
- u8 rss_eng_id;
- u8 update_rss_capabilities;
- u8 update_rss_ind_table;
- u8 update_rss_key;
- u8 rss_caps;
- u8 rss_table_size_log;
- u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
- u32 rss_key[QED_RSS_KEY_SIZE];
-};
-
-enum qed_filter_opcode {
- QED_FILTER_ADD,
- QED_FILTER_REMOVE,
- QED_FILTER_MOVE,
- QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
- QED_FILTER_FLUSH, /* Removes all filters */
-};
-
-enum qed_filter_ucast_type {
- QED_FILTER_MAC,
- QED_FILTER_VLAN,
- QED_FILTER_MAC_VLAN,
- QED_FILTER_INNER_MAC,
- QED_FILTER_INNER_VLAN,
- QED_FILTER_INNER_PAIR,
- QED_FILTER_INNER_MAC_VNI_PAIR,
- QED_FILTER_MAC_VNI_PAIR,
- QED_FILTER_VNI,
-};
-
-struct qed_filter_ucast {
- enum qed_filter_opcode opcode;
- enum qed_filter_ucast_type type;
- u8 is_rx_filter;
- u8 is_tx_filter;
- u8 vport_to_add_to;
- u8 vport_to_remove_from;
- unsigned char mac[ETH_ALEN];
- u8 assert_on_error;
- u16 vlan;
- u32 vni;
-};
-
-struct qed_filter_mcast {
- /* MOVE is not supported for multicast */
- enum qed_filter_opcode opcode;
- u8 vport_to_add_to;
- u8 vport_to_remove_from;
- u8 num_mc_addrs;
-#define QED_MAX_MC_ADDRS 64
- unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
-};
-
-struct qed_filter_accept_flags {
- u8 update_rx_mode_config;
- u8 update_tx_mode_config;
- u8 rx_accept_filter;
- u8 tx_accept_filter;
-#define QED_ACCEPT_NONE 0x01
-#define QED_ACCEPT_UCAST_MATCHED 0x02
-#define QED_ACCEPT_UCAST_UNMATCHED 0x04
-#define QED_ACCEPT_MCAST_MATCHED 0x08
-#define QED_ACCEPT_MCAST_UNMATCHED 0x10
-#define QED_ACCEPT_BCAST 0x20
-};
-
-struct qed_sp_vport_update_params {
- u16 opaque_fid;
- u8 vport_id;
- u8 update_vport_active_rx_flg;
- u8 vport_active_rx_flg;
- u8 update_vport_active_tx_flg;
- u8 vport_active_tx_flg;
- u8 update_approx_mcast_flg;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- unsigned long bins[8];
- struct qed_rss_params *rss_params;
- struct qed_filter_accept_flags accept_flags;
-};
-
-enum qed_tpa_mode {
- QED_TPA_MODE_NONE,
- QED_TPA_MODE_UNUSED,
- QED_TPA_MODE_GRO,
- QED_TPA_MODE_MAX
-};
-
-struct qed_sp_vport_start_params {
- enum qed_tpa_mode tpa_mode;
- bool remove_inner_vlan;
- bool drop_ttl0;
- u8 max_buffers_per_cqe;
- u32 concrete_fid;
- u16 opaque_fid;
- u8 vport_id;
- u16 mtu;
-};
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
-static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params)
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
{
struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
+ u8 abs_vport_id = 0;
int rc = -EINVAL;
u16 rx_mode = 0;
- u8 abs_vport_id = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
@@ -184,6 +72,7 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->mtu = cpu_to_le16(p_params->mtu);
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -211,6 +100,8 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
break;
}
+ p_ramrod->tx_switching_en = p_params->tx_switching;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@@ -218,6 +109,21 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
+{
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+ p_params->mtu,
+ p_params->remove_inner_vlan,
+ p_params->tpa_mode,
+ p_params->max_buffers_per_cqe,
+ p_params->only_untagged);
+ }
+
+ return qed_sp_eth_vport_start(p_hwfn, p_params);
+}
+
static int
qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
@@ -342,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
- SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
- (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
- !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
@@ -363,6 +265,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
}
static void
+qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sge_tpa_params *p_params)
+{
+ struct eth_vport_tpa_param *p_tpa;
+
+ if (!p_params) {
+ p_ramrod->common.update_tpa_param_flg = 0;
+ p_ramrod->common.update_tpa_en_flg = 0;
+ p_ramrod->common.update_tpa_param_flg = 0;
+ return;
+ }
+
+ p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+ p_tpa = &p_ramrod->tpa_param;
+ p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+ p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+ p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+ p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+ p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+ p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+ p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+ p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+ p_tpa->tpa_max_size = p_params->tpa_max_size;
+ p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct qed_sp_vport_update_params *p_params)
@@ -383,20 +317,24 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
}
}
-static int
-qed_sp_vport_update(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_update_params *p_params,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_rss_params *p_rss_params = p_params->rss_params;
struct vport_update_ramrod_data_cmn *p_cmn;
struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
- u8 abs_vport_id = 0;
+ u8 abs_vport_id = 0, val;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev)) {
+ rc = qed_vf_pf_vport_update(p_hwfn, p_params);
+ return rc;
+ }
+
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@@ -425,6 +363,27 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
p_cmn->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg;
+
+ p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+ val = p_params->update_inner_vlan_removal_flg;
+ p_cmn->update_inner_vlan_removal_en_flg = val;
+
+ p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+ val = p_params->update_default_vlan_enable_flg;
+ p_cmn->update_default_vlan_en_flg = val;
+
+ p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
+ p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+ p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+ p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+ p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+ p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+ val = p_params->update_anti_spoofing_en_flg;
+ p_ramrod->common.update_anti_spoofing_en_flg = val;
+
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
@@ -436,12 +395,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u8 vport_id)
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
{
struct vport_stop_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
@@ -449,6 +407,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_vport_stop(p_hwfn);
+
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@@ -470,13 +431,26 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int
+qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
+ struct qed_filter_accept_flags *p_accept_flags)
+{
+ struct qed_sp_vport_update_params s_params;
+
+ memset(&s_params, 0, sizeof(s_params));
+ memcpy(&s_params.accept_flags, p_accept_flags,
+ sizeof(struct qed_filter_accept_flags));
+
+ return qed_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
static int qed_filter_accept_cmd(struct qed_dev *cdev,
u8 vport,
struct qed_filter_accept_flags accept_flags,
u8 update_accept_any_vlan,
u8 accept_any_vlan,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_sp_vport_update_params vport_update_params;
int i, rc;
@@ -493,6 +467,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
+ if (rc)
+ return rc;
+ continue;
+ }
+
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
if (rc != 0) {
@@ -527,16 +508,14 @@ static int qed_sp_release_queue_cid(
return 0;
}
-static int
-qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *params,
- u8 stats_id,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size)
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -605,8 +584,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void __iomem **pp_prod)
+ u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_hw_cid_data *p_rx_cid;
u64 init_prod_val = 0;
@@ -614,6 +592,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u8 abs_stats_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_rxq_start(p_hwfn,
+ params->queue_id,
+ params->sb,
+ params->sb_idx,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size, pp_prod);
+ }
+
rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
if (rc != 0)
return rc;
@@ -656,10 +644,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only,
- bool cqe_completion)
+int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct rx_queue_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ struct qed_hw_cid_data *p_rx_cid;
+ u16 qid, abs_rx_q_id = 0;
+ int rc = -EINVAL;
+ u8 i;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ for (i = 0; i < num_rxqs; i++) {
+ qid = rx_queue_id + i;
+ p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+ /* Get SPQ entry */
+ init_data.cid = p_rx_cid->cid;
+ init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+ qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = complete_cqe_flg;
+ p_ramrod->complete_event_flg = complete_event_flg;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion)
{
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
@@ -668,6 +705,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 abs_rx_q_id = 0;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_rx_cid->cid;
@@ -703,15 +743,14 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
}
-static int
-qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *p_params,
- u8 stats_id,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union qed_qm_pq_params *p_pq_params)
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params)
{
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -765,14 +804,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- u16 pbl_size,
- void __iomem **pp_doorbell)
+ u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_hw_cid_data *p_tx_cid;
union qed_qm_pq_params pq_params;
u8 abs_stats_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_txq_start(p_hwfn,
+ p_params->queue_id,
+ p_params->sb,
+ p_params->sb_idx,
+ pbl_addr, pbl_size, pp_doorbell);
+ }
+
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
if (rc)
return rc;
@@ -813,14 +859,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 tx_queue_id)
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
{
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_tx_cid->cid;
@@ -1016,11 +1064,11 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
return 0;
}
-static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- struct qed_filter_ucast *p_filter_cmd,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct vport_filter_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1118,7 +1166,7 @@ static inline u32 qed_crc32c_le(u32 seed,
return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
}
-static u8 qed_mcast_bin_from_mac(u8 *mac)
+u8 qed_mcast_bin_from_mac(u8 *mac)
{
u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
mac, ETH_ALEN);
@@ -1201,11 +1249,10 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static int
-qed_filter_mcast_cmd(struct qed_dev *cdev,
- struct qed_filter_mcast *p_filter_cmd,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+static int qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
int rc = 0;
int i;
@@ -1221,8 +1268,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev,
u16 opaque_fid;
- if (rc != 0)
- break;
+ if (IS_VF(cdev)) {
+ qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+ continue;
+ }
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1247,8 +1296,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u16 opaque_fid;
- if (rc != 0)
- break;
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+ continue;
+ }
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1257,6 +1308,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
p_filter_cmd,
comp_mode,
p_comp_data);
+ if (rc != 0)
+ break;
}
return rc;
@@ -1265,12 +1318,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
/* Statistics related code */
static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_PSDM_RAM +
- PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+ }
}
static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
@@ -1285,32 +1345,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_from(p_hwfn, p_ptt, &pstats,
- pstats_addr, pstats_len);
-
- p_stats->tx_ucast_bytes +=
- HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts +=
- HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts +=
- HILO_64_REGPAIR(pstats.error_drop_pkts);
-}
-
-static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
- u32 *p_addr,
- u32 *p_len)
-{
- *p_addr = BAR0_MAP_REG_TSDM_RAM +
- TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
- *p_len = sizeof(struct tstorm_per_port_stat);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1318,14 +1361,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
- u32 tstats_addr = 0, tstats_len = 0;
struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
+
+ if (IS_PF(p_hwfn->cdev)) {
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ tstats_len = sizeof(struct tstorm_per_port_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
- __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+ tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+ tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+ }
memset(&tstats, 0, sizeof(tstats));
- qed_memcpy_from(p_hwfn, p_ptt, &tstats,
- tstats_addr, tstats_len);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
p_stats->mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard);
@@ -1335,12 +1387,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_USDM_RAM +
- USTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+ *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+ }
}
static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
@@ -1355,31 +1414,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_from(p_hwfn, p_ptt, &ustats,
- ustats_addr, ustats_len);
-
- p_stats->rx_ucast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_MSDM_RAM +
- MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+ }
}
static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
@@ -1394,21 +1453,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_from(p_hwfn, p_ptt, &mstats,
- mstats_addr, mstats_len);
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards +=
- HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard +=
- HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
p_stats->tpa_coalesced_pkts +=
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
p_stats->tpa_coalesced_events +=
HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num +=
- HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
@@ -1428,16 +1483,16 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
sizeof(port_stats));
p_stats->rx_64_byte_packets += port_stats.pmm.r64;
- p_stats->rx_127_byte_packets += port_stats.pmm.r127;
- p_stats->rx_255_byte_packets += port_stats.pmm.r255;
- p_stats->rx_511_byte_packets += port_stats.pmm.r511;
- p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
- p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
- p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
- p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
- p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
- p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
- p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
p_stats->rx_crc_errors += port_stats.pmm.rfcs;
p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
p_stats->rx_pause_frames += port_stats.pmm.rxpf;
@@ -1481,44 +1536,49 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *stats,
- u16 statistics_bin)
+ u16 statistics_bin, bool b_get_port_stats)
{
__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
- if (p_hwfn->mcp_info)
+ if (b_get_port_stats && p_hwfn->mcp_info)
__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
- u8 fw_vport = 0;
- int i;
+ u8 fw_vport = 0;
+ int i;
memset(stats, 0, sizeof(*stats));
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct qed_ptt *p_ptt;
-
- /* The main vport index is relative first */
- if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
- DP_ERR(p_hwfn, "No vport available!\n");
- continue;
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
+
+ if (IS_PF(cdev)) {
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ goto out;
+ }
}
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
+ if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
- __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+ __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+ IS_PF(cdev) ? true : false);
- qed_ptt_release(p_hwfn, p_ptt);
+out:
+ if (IS_PF(cdev) && p_ptt)
+ qed_ptt_release(p_hwfn, p_ptt);
}
}
@@ -1552,10 +1612,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
u32 addr = 0, len = 0;
- if (!p_ptt) {
+ if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
@@ -1572,7 +1633,8 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
- qed_ptt_release(p_hwfn, p_ptt);
+ if (IS_PF(cdev))
+ qed_ptt_release(p_hwfn, p_ptt);
}
/* PORT statistics are not necessarily reset, so we need to
@@ -1593,32 +1655,61 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1;
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- for_each_hwfn(cdev, i)
- info->num_queues += FEAT_NUM(&cdev->hwfns[i],
- QED_PF_L2_QUE);
- if (cdev->int_params.fp_msix_cnt)
- info->num_queues = min_t(u8, info->num_queues,
- cdev->int_params.fp_msix_cnt);
+ if (IS_PF(cdev)) {
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i)
+ info->num_queues +=
+ FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+ if (cdev->int_params.fp_msix_cnt)
+ info->num_queues =
+ min_t(u8, info->num_queues,
+ cdev->int_params.fp_msix_cnt);
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
} else {
- info->num_queues = cdev->num_hwfns;
- }
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
+ if (cdev->num_hwfns > 1) {
+ u8 queues = 0;
- info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
- ether_addr_copy(info->port_mac,
- cdev->hwfns[0].hw_info.hw_mac_addr);
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+ info->num_queues += queues;
+ }
+
+ qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
+ &info->num_vlan_filters);
+ qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+ }
qed_fill_dev_info(cdev, &info->common);
+ if (IS_VF(cdev))
+ memset(info->common.hw_mac, 0, ETH_ALEN);
+
return 0;
}
static void qed_register_eth_ops(struct qed_dev *cdev,
- struct qed_eth_cb_ops *ops,
- void *cookie)
+ struct qed_eth_cb_ops *ops, void *cookie)
+{
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+
+ /* For VF, we start bulletin reading */
+ if (IS_VF(cdev))
+ qed_vf_start_iov_wq(cdev);
+}
+
+static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
{
- cdev->protocol_ops.eth = ops;
- cdev->ops_cookie = cookie;
+ if (IS_PF(cdev))
+ return true;
+
+ return qed_vf_check_mac(&cdev->hwfns[0], mac);
}
static int qed_start_vport(struct qed_dev *cdev,
@@ -1633,6 +1724,7 @@ static int qed_start_vport(struct qed_dev *cdev,
start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
QED_TPA_MODE_NONE;
start.remove_inner_vlan = params->remove_inner_vlan;
+ start.only_untagged = true; /* untagged only */
start.drop_ttl0 = params->drop_ttl0;
start.opaque_fid = p_hwfn->hw_info.opaque_fid;
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
@@ -1653,7 +1745,8 @@ static int qed_start_vport(struct qed_dev *cdev,
start.vport_id, start.mtu);
}
- qed_reset_vport_stats(cdev);
+ if (params->clear_stats)
+ qed_reset_vport_stats(cdev);
return 0;
}
@@ -1699,6 +1792,8 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+ sp_params.tx_switching_flg = params->tx_switching_flg;
sp_params.accept_any_vlan = params->accept_any_vlan;
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
@@ -1744,9 +1839,7 @@ static int qed_update_vport(struct qed_dev *cdev,
sp_rss_params.update_rss_capabilities = 1;
sp_rss_params.update_rss_ind_table = 1;
sp_rss_params.update_rss_key = 1;
- sp_rss_params.rss_caps = QED_RSS_IPV4 |
- QED_RSS_IPV6 |
- QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ sp_rss_params.rss_caps = params->rss_params.rss_caps;
sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
memcpy(sp_rss_params.rss_ind_table,
params->rss_params.rss_ind_table,
@@ -1899,6 +1992,39 @@ static int qed_stop_txq(struct qed_dev *cdev,
return 0;
}
+static int qed_tunn_configure(struct qed_dev *cdev,
+ struct qed_tunn_params *tunn_params)
+{
+ struct qed_tunn_update_params tunn_info;
+ int i, rc;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ if (tunn_params->update_vxlan_port == 1) {
+ tunn_info.update_vxlan_udp_port = 1;
+ tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+ }
+
+ if (tunn_params->update_geneve_port == 1) {
+ tunn_info.update_geneve_udp_port = 1;
+ tunn_info.geneve_udp_port = tunn_params->geneve_port;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+ QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
enum qed_filter_rx_mode_type type)
{
@@ -2026,10 +2152,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
+#ifdef CONFIG_QED_SRIOV
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+#endif
+
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
+#ifdef CONFIG_QED_SRIOV
+ .iov = &qed_iov_ops_pass,
+#endif
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
+ .check_mac = &qed_check_mac,
.vport_start = &qed_start_vport,
.vport_stop = &qed_stop_vport,
.vport_update = &qed_update_vport,
@@ -2041,16 +2175,11 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
+ .tunn_config = &qed_tunn_configure,
};
-const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+const struct qed_eth_ops *qed_get_eth_ops(void)
{
- if (version != QED_ETH_INTERFACE_VERSION) {
- pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
- version, QED_ETH_INTERFACE_VERSION);
- return NULL;
- }
-
return &qed_eth_ops_pass;
}
EXPORT_SYMBOL(qed_get_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
new file mode 100644
index 000000000..002114543
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -0,0 +1,239 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _QED_L2_H
+#define _QED_L2_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_eth_if.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_sp.h"
+
+struct qed_sge_tpa_params {
+ u8 max_buffers_per_cqe;
+
+ u8 update_tpa_en_flg;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+
+ u8 update_tpa_param_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion);
+
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
+
+enum qed_tpa_mode {
+ QED_TPA_MODE_NONE,
+ QED_TPA_MODE_UNUSED,
+ QED_TPA_MODE_GRO,
+ QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+ enum qed_tpa_mode tpa_mode;
+ bool remove_inner_vlan;
+ bool tx_switching;
+ bool only_untagged;
+ bool drop_ttl0;
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id;
+ u16 mtu;
+};
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+ u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ unsigned long bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+ struct qed_sge_tpa_params *sge_tpa_params;
+};
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return int
+ */
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note At the moment - only used by non-linux VFs.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id RX Queue ID
+ * @param num_rxqs Allow to update multiple rx
+ * queues, from rx_queue_id to
+ * (rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg Post completion to the CQE Ring if set
+ * @param complete_event_flg Post completion to the Event Ring if set
+ *
+ * @return int
+ */
+
+int
+qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params);
+
+u8 qed_mcast_bin_from_mac(u8 *mac);
+
+#endif /* _QED_L2_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 53c9e9aa1..a41c6b559 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -24,10 +24,12 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
+#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_hw.h"
+#include "qed_selftest.h"
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -124,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err1;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO;
goto err1;
@@ -156,7 +158,7 @@ static int qed_init_pci(struct qed_dev *cdev,
}
cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (cdev->pci_params.pm_cap == 0)
+ if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
rc = qed_set_coherency_mask(cdev);
@@ -174,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err2;
}
- cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
- cdev->db_size = pci_resource_len(cdev->pdev, 2);
- cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
- if (!cdev->doorbells) {
- DP_NOTICE(cdev, "Cannot map doorbell space\n");
- return -ENOMEM;
+ if (IS_PF(cdev)) {
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
}
return 0;
@@ -206,20 +210,33 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
- dev_info->fw_major = FW_MAJOR_VERSION;
- dev_info->fw_minor = FW_MINOR_VERSION;
- dev_info->fw_rev = FW_REVISION_VERSION;
- dev_info->fw_eng = FW_ENGINEERING_VERSION;
- dev_info->mf_mode = cdev->mf_mode;
+ if (IS_PF(cdev)) {
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = cdev->mf_mode;
+ dev_info->tx_switching = true;
+ } else {
+ qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
+ &dev_info->fw_minor, &dev_info->fw_rev,
+ &dev_info->fw_eng);
+ }
- qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+ if (IS_PF(cdev)) {
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mfw_rev, NULL);
- ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (ptt) {
- qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
- &dev_info->flash_size);
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
- qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+ } else {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
+ &dev_info->mfw_rev, NULL);
}
return 0;
@@ -256,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev,
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
- enum qed_protocol protocol,
- u32 dp_module,
- u8 dp_level)
+ struct qed_probe_params *params)
{
struct qed_dev *cdev;
int rc;
@@ -267,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;
- cdev->protocol = protocol;
+ cdev->protocol = params->protocol;
+
+ if (params->is_vf)
+ cdev->b_is_vf = true;
- qed_init_dp(cdev, dp_module, dp_level);
+ qed_init_dp(cdev, params->dp_module, params->dp_level);
rc = qed_init_pci(cdev, pdev);
if (rc) {
@@ -395,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
/* Fallthrough */
case QED_INT_MODE_MSI:
- rc = pci_enable_msi(cdev->pdev);
- if (!rc) {
- int_params->out.int_mode = QED_INT_MODE_MSI;
- goto out;
- }
+ if (cdev->num_hwfns == 1) {
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
- DP_NOTICE(cdev, "Failed to enable MSI\n");
- if (force_mode)
- goto out;
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ }
/* Fallthrough */
case QED_INT_MODE_INTA:
@@ -663,6 +683,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
return 0;
}
+static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
+{
+ int rc;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
+
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
+ &cdev->int_params.in.num_vectors);
+ if (cdev->num_hwfns > 1) {
+ u8 vectors = 0;
+
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
+ cdev->int_params.in.num_vectors += vectors;
+ }
+
+ /* We want a minimum of one fastpath vector per vf hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
+
+ rc = qed_set_int_mode(cdev, true);
+ if (rc)
+ return rc;
+
+ cdev->int_params.fp_msix_base = 0;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
+
+ return 0;
+}
+
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
{
@@ -744,39 +793,62 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
+ struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
- int rc;
+ int rc = -EINVAL;
- rc = reject_firmware(&cdev->firmware, QED_FW_FILE_NAME,
- &cdev->pdev->dev);
- if (rc) {
- DP_NOTICE(cdev,
- "Failed to find fw file - /lib/firmware/%s\n",
- QED_FW_FILE_NAME);
+ if (qed_iov_wq_start(cdev))
goto err;
+
+ if (IS_PF(cdev)) {
+ rc = reject_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
}
rc = qed_nic_setup(cdev);
if (rc)
goto err;
- rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ if (IS_PF(cdev))
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ else
+ rc = qed_slowpath_vf_setup_int(cdev);
if (rc)
goto err1;
- /* Allocate stream for unzipping */
- rc = qed_alloc_stream_mem(cdev);
- if (rc) {
- DP_NOTICE(cdev, "Failed to allocate stream memory\n");
- goto err2;
+ if (IS_PF(cdev)) {
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ goto err2;
+ }
+
+ data = cdev->firmware->data;
}
- /* Start the slowpath */
- data = cdev->firmware->data;
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
+ 1 << QED_MODE_L2GRE_TUNN |
+ 1 << QED_MODE_IPGRE_TUNN |
+ 1 << QED_MODE_L2GENEVE_TUNN |
+ 1 << QED_MODE_IPGENEVE_TUNN;
- rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+ tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
+
+ /* Start the slowpath */
+ rc = qed_hw_init(cdev, &tunn_info, true,
+ cdev->int_params.out.int_mode,
true, data);
if (rc)
goto err2;
@@ -784,18 +856,20 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
- hwfn = QED_LEADING_HWFN(cdev);
- drv_version.version = (params->drv_major << 24) |
- (params->drv_minor << 16) |
- (params->drv_rev << 8) |
- (params->drv_eng);
- strlcpy(drv_version.name, params->name,
- MCP_DRV_VER_STR_SIZE - 4);
- rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
- &drv_version);
- if (rc) {
- DP_NOTICE(cdev, "Failed sending drv version command\n");
- return rc;
+ if (IS_PF(cdev)) {
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strlcpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ return rc;
+ }
}
qed_reset_vport_stats(cdev);
@@ -804,13 +878,17 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err2:
qed_hw_timers_stop_all(cdev);
- qed_slowpath_irq_free(cdev);
+ if (IS_PF(cdev))
+ qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
err:
- release_firmware(cdev->firmware);
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
+
+ qed_iov_wq_stop(cdev, false);
return rc;
}
@@ -820,15 +898,21 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
- qed_free_stream_mem(cdev);
+ if (IS_PF(cdev)) {
+ qed_free_stream_mem(cdev);
+ qed_sriov_disable(cdev, true);
- qed_nic_stop(cdev);
- qed_slowpath_irq_free(cdev);
+ qed_nic_stop(cdev);
+ qed_slowpath_irq_free(cdev);
+ }
qed_disable_msix(cdev);
qed_nic_reset(cdev);
- release_firmware(cdev->firmware);
+ qed_iov_wq_stop(cdev, true);
+
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
return 0;
}
@@ -902,6 +986,11 @@ static u32 qed_sb_release(struct qed_dev *cdev,
return rc;
}
+static bool qed_can_link_change(struct qed_dev *cdev)
+{
+ return true;
+}
+
static int qed_set_link(struct qed_dev *cdev,
struct qed_link_params *params)
{
@@ -913,6 +1002,9 @@ static int qed_set_link(struct qed_dev *cdev,
if (!cdev)
return -ENODEV;
+ if (IS_VF(cdev))
+ return 0;
+
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
@@ -944,6 +1036,39 @@ static int qed_set_link(struct qed_dev *cdev,
}
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
link_params->speed.forced_speed = params->forced_speed;
+ if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+ if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ link_params->pause.autoneg = true;
+ else
+ link_params->pause.autoneg = false;
+ if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ link_params->pause.forced_rx = true;
+ else
+ link_params->pause.forced_rx = false;
+ if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ link_params->pause.forced_tx = true;
+ else
+ link_params->pause.forced_tx = false;
+ }
+ if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
+ switch (params->loopback_mode) {
+ case QED_LINK_LOOPBACK_INT_PHY:
+ link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT_PHY:
+ link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT:
+ link_params->loopback_mode = PMM_LOOPBACK_EXT;
+ break;
+ case QED_LINK_LOOPBACK_MAC:
+ link_params->loopback_mode = PMM_LOOPBACK_MAC;
+ break;
+ default:
+ link_params->loopback_mode = PMM_LOOPBACK_NONE;
+ break;
+ }
+ }
rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
@@ -960,6 +1085,7 @@ static int qed_get_port_type(u32 media_type)
case MEDIA_SFPP_10G_FIBER:
case MEDIA_SFP_1G_FIBER:
case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
case MEDIA_KR:
port_type = PORT_FIBRE;
break;
@@ -980,6 +1106,39 @@ static int qed_get_port_type(u32 media_type)
return port_type;
}
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *link_caps)
+{
+ void *p;
+
+ if (!IS_PF(hwfn->cdev)) {
+ qed_vf_get_link_params(hwfn, params);
+ qed_vf_get_link_state(hwfn, link);
+ qed_vf_get_link_caps(hwfn, link_caps);
+
+ return 0;
+ }
+
+ p = qed_mcp_get_link_params(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(params, p, sizeof(*params));
+
+ p = qed_mcp_get_link_state(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link, p, sizeof(*link));
+
+ p = qed_mcp_get_link_capabilities(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link_caps, p, sizeof(*link_caps));
+
+ return 0;
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
struct qed_link_output *if_link)
{
@@ -991,10 +1150,10 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
- sizeof(link_caps));
+ if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+ dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+ return;
+ }
/* Set the link parameters to pass to protocol driver */
if (link.link_up)
@@ -1096,7 +1255,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link)
{
+ int i;
+
qed_fill_link(&cdev->hwfns[0], if_link);
+
+ for_each_hwfn(cdev, i)
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
}
void qed_link_update(struct qed_hwfn *hwfn)
@@ -1106,6 +1270,7 @@ void qed_link_update(struct qed_hwfn *hwfn)
struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link);
+ qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie)
op->link_update(cookie, &if_link);
@@ -1117,6 +1282,9 @@ static int qed_drain(struct qed_dev *cdev)
struct qed_ptt *ptt;
int i, rc;
+ if (IS_VF(cdev))
+ return 0;
+
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn);
@@ -1150,7 +1318,15 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
+struct qed_selftest_ops qed_selftest_ops_pass = {
+ .selftest_memory = &qed_selftest_memory,
+ .selftest_interrupt = &qed_selftest_interrupt,
+ .selftest_register = &qed_selftest_register,
+ .selftest_clock = &qed_selftest_clock,
+};
+
const struct qed_common_ops qed_common_ops_pass = {
+ .selftest = &qed_selftest_ops_pass,
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
@@ -1164,6 +1340,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.sb_release = &qed_sb_release,
.simd_handler_config = &qed_simd_handler_config,
.simd_handler_clean = &qed_simd_handler_clean,
+ .can_link_change = &qed_can_link_change,
.set_link = &qed_set_link,
.get_link = &qed_get_current_link,
.drain = &qed_drain,
@@ -1172,14 +1349,3 @@ const struct qed_common_ops qed_common_ops_pass = {
.chain_free = &qed_chain_free,
.set_led = &qed_set_led,
};
-
-u32 qed_get_protocol_version(enum qed_protocol protocol)
-{
- switch (protocol) {
- case QED_PROTOCOL_ETH:
- return QED_ETH_INTERFACE_VERSION;
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index b89c9a8e1..118236179 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -15,10 +15,13 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include "qed.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
#define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
@@ -440,6 +443,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+ QED_PATH_ID(p_hwfn));
+ u32 disabled_vfs[VF_MAX_STATIC / 32];
+ int i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
+ mfw_path_offsize, path_addr);
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path,
+ mcp_vf_disabled) +
+ sizeof(u32) * i);
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+ }
+
+ if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+ MCP_PF_ID(p_hwfn));
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+ int i;
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "Acking VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+ memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
+ return -EBUSY;
+ }
+
+ /* Clear the ACK bits */
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ qed_wr(p_hwfn, p_ptt,
+ func_addr +
+ offsetof(struct public_func, drv_ack_vf_disabled) +
+ i * sizeof(u32), 0);
+
+ return rc;
+}
+
static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -472,6 +544,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
bool b_reset)
{
struct qed_mcp_link_state *p_link;
+ u8 max_bw, min_bw;
u32 status = 0;
p_link = &p_hwfn->mcp_info->link_output;
@@ -527,17 +600,20 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
p_link->speed = 0;
}
- /* Correct speed according to bandwidth allocation */
- if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
- p_link->speed = p_link->speed *
- p_hwfn->mcp_info->func_info.bandwidth_max /
- 100;
- qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_link->speed);
- DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
- "Configured MAX bandwidth to be %08x Mb/sec\n",
- p_link->speed);
- }
+ if (p_link->link_up && p_link->speed)
+ p_link->line_speed = p_link->speed;
+ else
+ p_link->line_speed = 0;
+
+ max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+ min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+ /* Max bandwidth configuration */
+ __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
+
+ /* Min bandwidth configuration */
+ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+ qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->an_complete = !!(status &
@@ -648,6 +724,77 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = (p_shmem_info->config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = (p_shmem_info->config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data),
+ QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *p_info;
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
+ qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
+
+ /* Acknowledge the MFW */
+ qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
+}
+
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -676,9 +823,27 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break;
+ case MFW_DRV_MSG_VF_DISABLED:
+ qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_LLDP_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_OPERATIONAL_MIB);
+ break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_BW_UPDATE:
+ qed_mcp_update_bw(p_hwfn, p_ptt);
+ break;
default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -709,26 +874,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
- u32 *p_mfw_ver)
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
- struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
- struct qed_ptt *p_ptt;
u32 global_offsize;
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EBUSY;
+ if (IS_VF(p_hwfn->cdev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return 0;
+ } else {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return -EINVAL;
+ }
+ }
global_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
- public_base,
+ SECTION_OFFSIZE_ADDR(p_hwfn->
+ mcp_info->public_base,
PUBLIC_GLOBAL));
- *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
- SECTION_ADDR(global_offsize, 0) +
- offsetof(struct public_global, mfw_ver));
-
- qed_ptt_release(p_hwfn, p_ptt);
+ *p_mfw_ver =
+ qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + offsetof(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id != NULL) {
+ *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global,
+ running_bundle_id));
+ }
return 0;
}
@@ -739,6 +920,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
+ if (IS_VF(cdev))
+ return -EINVAL;
+
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY;
@@ -758,28 +942,6 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return 0;
}
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- memset(p_data, 0, sizeof(*p_data));
-
- size = min_t(u32, sizeof(*p_data),
- QED_SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
-
- return size;
-}
-
static int
qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
struct public_func *p_info,
@@ -818,26 +980,7 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
-
- info->bandwidth_min = (shmem_info.config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- info->bandwidth_min);
- info->bandwidth_min = 1;
- }
-
- info->bandwidth_max = (shmem_info.config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- info->bandwidth_max);
- info->bandwidth_max = 100;
- }
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
if (shmem_info.mac_upper || shmem_info.mac_lower) {
info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
@@ -914,6 +1057,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
{
u32 flash_size;
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
@@ -924,6 +1070,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+ u32 resp = 0, param = 0, rc_param = 0;
+ int rc;
+
+ /* Only Leader can configure MSIX, and need to take CMT into account */
+ if (!IS_LEAD_HWFN(p_hwfn))
+ return 0;
+ num *= p_hwfn->cdev->num_hwfns;
+
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+ &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
+ rc = -EINVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+ num, vf_id);
+ }
+
+ return rc;
+}
+
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -938,9 +1115,10 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
+
for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
val = cpu_to_be32(p_ver->name[i]);
- *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
memset(&mb_params, 0, sizeof(mb_params));
@@ -979,3 +1157,45 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 50917a213..6dd59eb7f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -40,7 +40,15 @@ struct qed_mcp_link_capabilities {
struct qed_mcp_link_state {
bool link_up;
- u32 speed; /* In Mb/s */
+ u32 min_pf_rate;
+
+ /* Actual link speed in Mb/s */
+ u32 line_speed;
+
+ /* PF max speed in Mb/s, deduced from line_speed
+ * according to PF max bandwidth configuration.
+ */
+ u32 speed;
bool full_duplex;
bool an;
@@ -141,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
/**
* @brief Get the management firmware version value
*
- * @param cdev - qed dev pointer
- * @param mfw_ver - mfw version value
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mfw_ver - mfw version value
+ * @param p_running_bundle_id - image id in nvram; Optional.
*
- * @return int - 0 - operation was successul.
+ * @return int - 0 - operation was successful.
*/
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
- u32 *mfw_ver);
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
* @brief Get media type value of the port.
@@ -237,6 +248,28 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
+/**
+ * @brief Bist register test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -360,6 +393,18 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return int - 0 upon success.
+ */
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack);
+
+/**
* @brief - calls during init to read shmem of all function-related info.
*
* @param p_hwfn
@@ -389,4 +434,27 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return int
+ */
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw);
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index c15b1622e..3a6c506f0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -39,6 +39,10 @@
0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+ 0x2aa118UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE \
+ 0x2a0800UL
#define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \
@@ -77,6 +81,8 @@
0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \
0x100508UL
+#define DORQ_REG_VF_USAGE_CNT \
+ 0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \
@@ -111,6 +117,8 @@
0x009778UL
#define MISCS_REG_CHIP_METAL \
0x009774UL
+#define MISCS_REG_FUNCTION_HIDE \
+ 0x0096f0UL
#define BRB_REG_HEADER_SIZE \
0x340804UL
#define BTB_REG_HEADER_SIZE \
@@ -119,6 +127,8 @@
0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL
+#define CCFC_REG_STRONG_ENABLE_VF \
+ 0x2e070cUL
#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
@@ -161,6 +171,10 @@
0x040200UL
#define PBF_REG_INIT \
0xd80000UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
+ 0xd806c8UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
+ 0xd806ccUL
#define PTU_REG_ATC_INIT_ARRAY \
0x560000UL
#define PCM_REG_INIT \
@@ -385,6 +399,8 @@
0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \
0x180800UL
+#define IGU_REG_VF_CONFIGURATION \
+ 0x180804UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
@@ -411,6 +427,10 @@
0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
+ 0x180408UL
+#define IGU_REG_WRITE_DONE_PENDING \
+ 0x180900UL
#define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL
#define MCP_REG_NVM_CFG4 \
@@ -427,4 +447,37 @@
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
0x2aae64UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1)
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+
+#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
new file mode 100644
index 000000000..a342bfe42
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -0,0 +1,76 @@
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+int qed_selftest_memory(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_interrupt(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_register(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_register_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int qed_selftest_clock(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
new file mode 100644
index 000000000..50eb0b499
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -0,0 +1,40 @@
+#ifndef _QED_SELFTEST_API_H
+#define _QED_SELFTEST_API_H
+#include <linux/types.h>
+
+/**
+ * @brief qed_selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_memory(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_interrupt(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_register(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_clock(struct qed_dev *cdev);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index d39f914b6..ea4e9ce53 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
union ramrod_data {
struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
struct rx_queue_start_ramrod_data rx_queue_start;
struct rx_queue_update_ramrod_data rx_queue_update;
struct rx_queue_stop_ramrod_data rx_queue_stop;
@@ -61,6 +62,9 @@ union ramrod_data {
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
};
#define EQ_MAX_CREDIT 0xffffffff
@@ -338,13 +342,29 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_tunn
* @param mode
+ * @param allow_npar_tx_switch
*
* @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum qed_mf_mode mode);
+ struct qed_tunn_start_params *p_tunn,
+ enum qed_mf_mode mode, bool allow_npar_tx_switch);
+
+/**
+ * @brief qed_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
@@ -362,4 +382,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+/**
+ * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 1c06c37d4..67f6ce3c8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -15,11 +15,13 @@
#include "qed.h"
#include <linux/qed/qed_chain.h>
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
@@ -87,8 +89,218 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0;
}
+static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+{
+ switch (type) {
+ case QED_TUNN_CLSS_MAC_VLAN:
+ return TUNNEL_CLSS_MAC_VLAN;
+ case QED_TUNN_CLSS_MAC_VNI:
+ return TUNNEL_CLSS_MAC_VNI;
+ case QED_TUNN_CLSS_INNER_MAC_VLAN:
+ return TUNNEL_CLSS_INNER_MAC_VLAN;
+ case QED_TUNN_CLSS_INNER_MAC_VNI:
+ return TUNNEL_CLSS_INNER_MAC_VNI;
+ default:
+ return TUNNEL_CLSS_MAC_VLAN;
+ }
+}
+
+static void
+qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
+ unsigned long update_mask = p_src->tunn_mode_update_mask;
+ unsigned long tunn_mode = p_src->tunn_mode;
+ unsigned long new_tunn_mode = 0;
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+ }
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ }
+
+ p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode = p_src->tunn_mode;
+ enum tunnel_clss type;
+
+ qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+ p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+ p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ unsigned long tunn_mode)
+{
+ u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+ u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ l2gre_enable = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ ipgre_enable = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ vxlan_enable = 1;
+
+ qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+ qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ l2geneve_enable = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ ipgeneve_enable = 1;
+
+ qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+ ipgeneve_enable);
+}
+
+static void
+qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_start_params *p_src,
+ struct pf_start_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode;
+ enum tunnel_clss type;
+
+ if (!p_src)
+ return;
+
+ tunn_mode = p_src->tunn_mode;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum qed_mf_mode mode)
+ struct qed_tunn_start_params *p_tunn,
+ enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
@@ -143,16 +355,103 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
+ &p_ramrod->tunnel_config);
p_hwfn->hw_info.personality = PERSONALITY_ETH;
+ if (IS_MF_SI(p_hwfn))
+ p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+ }
+
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,
p_ramrod->outer_tag);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ if (p_tunn) {
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->tunn_mode);
+ p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+ }
+
+ return rc;
+}
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+ &p_ent->ramrod.pf_update);
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+/* Set pf update ramrod command params */
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
+ &p_ent->ramrod.pf_update.tunnel_config);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ if (p_tunn->update_vxlan_udp_port)
+ qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_udp_port);
+ if (p_tunn->update_geneve_udp_port)
+ qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_udp_port);
+
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+ p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+
+ return rc;
+}
+
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
@@ -173,3 +472,24 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 89469d5aa..03601dfc0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -27,6 +27,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
/***************************************************************************
* Structures & Definitions
@@ -212,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
- /* validate producer is up to-date */
- rmb();
-
db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
- /* do not reorder */
- barrier();
+ /* make sure the SPQE is updated before the doorbell */
+ wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
- mmiowb();
+ wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
@@ -242,10 +239,17 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- DP_NOTICE(p_hwfn,
- "Unknown Async completion for protocol: %d\n",
- p_eqe->protocol_id);
- return -EINVAL;
+ switch (p_eqe->protocol_id) {
+ case PROTOCOLID_COMMON:
+ return qed_sriov_eqe_event(p_hwfn,
+ p_eqe->opcode,
+ p_eqe->echo, &p_eqe->data);
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return -EINVAL;
+ }
}
/***************************************************************************
@@ -379,6 +383,9 @@ static int qed_cqe_completion(
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
{
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
/* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe
@@ -603,7 +610,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
*p_en2 = *p_ent;
- kfree(p_ent);
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+ kfree(p_ent);
p_ent = p_en2;
}
@@ -738,6 +747,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ kfree(p_ent);
+ return rc;
+ }
+
if (rc)
goto spq_post_fail2;
@@ -791,13 +809,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
- bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
+ __set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
- bitmap_clear(p_spq->p_comp_bitmap,
- p_spq->comp_bitmap_idx,
- SPQ_RING_SIZE);
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
@@ -833,8 +850,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
- if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
- /* EBLOCK is responsible for freeing its own entry */
+ if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
new file mode 100644
index 000000000..c325ee857
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -0,0 +1,3613 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/qed/qed_iov_if.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+/* IOV ramrods */
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
+
+ p_ramrod->personality = PERSONALITY_ETH;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return false;
+ }
+
+ if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
+ (rel_vf_id < 0))
+ return false;
+
+ if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+ b_enabled_only)
+ return false;
+
+ return true;
+}
+
+static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return NULL;
+ }
+
+ if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
+ relative_vf_id);
+
+ return vf;
+}
+
+int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
+{
+ struct qed_bulletin_content *p_bulletin;
+ int crc_size = sizeof(p_bulletin->crc);
+ struct qed_dmae_params params;
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf)
+ return -EINVAL;
+
+ if (!p_vf->vf_bulletin)
+ return -EINVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ /* Increment bulletin board version and compute crc */
+ p_bulletin->version++;
+ p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+ p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+ /* propagate bulletin board via dmae to vm memory */
+ memset(&params, 0, sizeof(params));
+ params.flags = QED_DMAE_FLAG_VF_DST;
+ params.dst_vfid = p_vf->abs_vf_id;
+ return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+ p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+ &params);
+}
+
+static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
+{
+ struct qed_hw_sriov_info *iov = cdev->p_iov_info;
+ int pos = iov->pos;
+
+ DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
+
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+ if (iov->num_vfs) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
+ iov->num_vfs = 0;
+ }
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+ pci_read_config_dword(cdev->pdev,
+ pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+ pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+ pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ iov->nres,
+ iov->cap,
+ iov->ctrl,
+ iov->total_vfs,
+ iov->initial_vfs,
+ iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ /* Some sanity checks */
+ if (iov->num_vfs > NUM_OF_VFS(cdev) ||
+ iov->total_vfs > NUM_OF_VFS(cdev)) {
+ /* This can happen only due to a bug. In this case we set
+ * num_vfs to zero to avoid memory corruption in the code that
+ * assumes max number of vfs
+ */
+ DP_NOTICE(cdev,
+ "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
+ iov->num_vfs);
+
+ iov->num_vfs = 0;
+ iov->total_vfs = 0;
+ }
+
+ return 0;
+}
+
+static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_igu_block *p_sb;
+ u16 sb_id;
+ u32 val;
+
+ if (!p_hwfn->hw_info.p_igu_info) {
+ DP_ERR(p_hwfn,
+ "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
+ return;
+ }
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ sb_id++) {
+ p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+ if ((p_sb->status & QED_IGU_STATUS_FREE) &&
+ !(p_sb->status & QED_IGU_STATUS_PF)) {
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+ }
+ }
+}
+
+static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ struct qed_bulletin_content *p_bulletin_virt;
+ dma_addr_t req_p, rply_p, bulletin_p;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
+ u8 idx = 0;
+
+ memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+ p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+ req_p = p_iov_info->mbx_msg_phys_addr;
+ p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+ rply_p = p_iov_info->mbx_reply_phys_addr;
+ p_bulletin_virt = p_iov_info->p_bulletins;
+ bulletin_p = p_iov_info->bulletins_phys;
+ if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+ DP_ERR(p_hwfn,
+ "qed_iov_setup_vfdb called without allocating mem first\n");
+ return;
+ }
+
+ for (idx = 0; idx < p_iov->total_vfs; idx++) {
+ struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
+ u32 concrete;
+
+ vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+ vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+ vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+ vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+ vf->state = VF_STOPPED;
+ vf->b_init = false;
+
+ vf->bulletin.phys = idx *
+ sizeof(struct qed_bulletin_content) +
+ bulletin_p;
+ vf->bulletin.p_virt = p_bulletin_virt + idx;
+ vf->bulletin.size = sizeof(struct qed_bulletin_content);
+
+ vf->relative_vf_id = idx;
+ vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+ concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+ vf->concrete_fid = concrete;
+ vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+ (vf->abs_vf_id << 8);
+ vf->vport_id = idx + 1;
+ }
+}
+
+static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ void **p_v_addr;
+ u16 num_vfs = 0;
+
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+ /* Allocate PF Mailbox buffer (per-VF) */
+ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ &p_iov_info->mbx_msg_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ /* Allocate PF Mailbox Reply buffer (per-VF) */
+ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ &p_iov_info->mbx_reply_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
+ num_vfs;
+ p_v_addr = &p_iov_info->p_bulletins;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ &p_iov_info->bulletins_phys,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
+ p_iov_info->mbx_msg_virt_addr,
+ (u64) p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_reply_virt_addr,
+ (u64) p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+
+ return 0;
+}
+
+static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+ if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ p_iov_info->mbx_msg_virt_addr,
+ p_iov_info->mbx_msg_phys_addr);
+
+ if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ p_iov_info->mbx_reply_virt_addr,
+ p_iov_info->mbx_reply_phys_addr);
+
+ if (p_iov_info->p_bulletins)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ p_iov_info->p_bulletins,
+ p_iov_info->bulletins_phys);
+}
+
+int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_sriov;
+
+ if (!IS_PF_SRIOV(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No SR-IOV - no need for IOV db\n");
+ return 0;
+ }
+
+ p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
+ if (!p_sriov) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ return -ENOMEM;
+ }
+
+ p_hwfn->pf_iov_info = p_sriov;
+
+ return qed_iov_allocate_vfdb(p_hwfn);
+}
+
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return;
+
+ qed_iov_setup_vfdb(p_hwfn);
+ qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+ if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+ qed_iov_free_vfdb(p_hwfn);
+ kfree(p_hwfn->pf_iov_info);
+ }
+}
+
+void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+}
+
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int pos;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
+ /* Learn the PCI configuration */
+ pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
+ return 0;
+ }
+
+ /* Allocate a new struct for IOV information */
+ cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
+ if (!cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+ return -ENOMEM;
+ }
+ cdev->p_iov_info->pos = pos;
+
+ rc = qed_iov_pci_cfg_info(cdev);
+ if (rc)
+ return rc;
+
+ /* We want PF IOV to be synonemous with the existance of p_iov_info;
+ * In case the capability is published but there are no VFs, simply
+ * de-allocate the struct.
+ */
+ if (!cdev->p_iov_info->total_vfs) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "IOV capabilities, but no VFs are published\n");
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+ return 0;
+ }
+
+ /* Calculate the first VF index - this is a bit tricky; Basically,
+ * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
+ * after the first engine's VFs.
+ */
+ cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "First VF in hwfn 0x%08x\n",
+ cdev->p_iov_info->first_vf_in_pf);
+
+ return 0;
+}
+
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+ /* Check PF supports sriov */
+ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return false;
+
+ /* Check VF validity */
+ if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
+ return false;
+
+ return true;
+}
+
+static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
+ u16 rel_vf_id, u8 to_disable)
+{
+ struct qed_vf_info *vf;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ continue;
+
+ vf->to_disable = to_disable;
+ }
+}
+
+void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+{
+ u16 i;
+
+ if (!IS_QED_SRIOV(cdev))
+ return;
+
+ for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
+ qed_iov_set_vf_to_disable(cdev, i, to_disable);
+}
+
+static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_vfid)
+{
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+ 1 << (abs_vfid & 0x1f));
+}
+
+static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Set VF masks and configuration - pretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, true);
+}
+
+static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, bool enable)
+{
+ u32 igu_vf_conf;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+ if (enable)
+ igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+ else
+ igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+}
+
+static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+ int rc;
+
+ if (vf->to_disable)
+ return 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Enable internal access for vf %x [abs %x]\n",
+ vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
+ if (rc)
+ return rc;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+ STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+ qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+ p_hwfn->hw_info.hw_mode);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ if (vf->state != VF_STOPPED) {
+ DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
+ vf->abs_vf_id);
+ return -EINVAL;
+ }
+
+ /* Start VF */
+ rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+
+ vf->state = VF_FREE;
+
+ return rc;
+}
+
+/**
+ * @brief qed_iov_config_perm_table - configure the permission
+ * zone table.
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 enable)
+{
+ u32 reg_addr, val;
+ u16 qzone_id = 0;
+ int qid;
+
+ for (qid = 0; qid < vf->num_rxqs; qid++) {
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+ &qzone_id);
+
+ reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+ val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ qed_wr(p_hwfn, p_ptt, reg_addr, val);
+ }
+}
+
+static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ /* Reset vf in IGU - interrupts are still disabled */
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+ /* Permission Table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u16 num_rx_queues)
+{
+ struct qed_igu_block *igu_blocks;
+ int qid = 0, igu_id = 0;
+ u32 val = 0;
+
+ igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+ p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+ while ((qid < num_rx_queues) &&
+ (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
+ if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
+ struct cau_sb_entry sb_entry;
+
+ vf->igu_sbs[qid] = (u16)igu_id;
+ igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+ val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_id * sizeof(u64), 2, 0);
+ qid++;
+ }
+ igu_id++;
+ }
+
+ vf->num_sbs = (u8) num_rx_queues;
+
+ return vf->num_sbs;
+}
+
+static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ int idx, igu_id;
+ u32 addr, val;
+
+ /* Invalidate igu CAM lines and mark them as free */
+ for (idx = 0; idx < vf->num_sbs; idx++) {
+ igu_id = vf->igu_sbs[idx];
+ addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+
+ p_info->igu_map.igu_blocks[igu_id].status |=
+ QED_IGU_STATUS_FREE;
+
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ }
+
+ vf->num_sbs = 0;
+}
+
+static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u16 num_rx_queues)
+{
+ u8 num_of_vf_avaiable_chains = 0;
+ struct qed_vf_info *vf = NULL;
+ int rc = 0;
+ u32 cids;
+ u8 i;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf) {
+ DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vf->b_init) {
+ DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
+ return -EINVAL;
+ }
+
+ /* Limit number of queues according to number of CIDs */
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
+ vf->relative_vf_id, num_rx_queues, (u16) cids);
+ num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
+
+ num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
+ p_ptt,
+ vf,
+ num_rx_queues);
+ if (!num_of_vf_avaiable_chains) {
+ DP_ERR(p_hwfn, "no available igu sbs\n");
+ return -ENOMEM;
+ }
+
+ /* Choose queue number and index ranges */
+ vf->num_rxqs = num_of_vf_avaiable_chains;
+ vf->num_txqs = num_of_vf_avaiable_chains;
+
+ for (i = 0; i < vf->num_rxqs; i++) {
+ u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
+ vf->igu_sbs[i]);
+
+ if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn,
+ "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
+ vf->relative_vf_id, queue_id);
+ return -EINVAL;
+ }
+
+ /* CIDs are per-VF, so no problem having them 0-based. */
+ vf->vf_queues[i].fw_rx_qid = queue_id;
+ vf->vf_queues[i].fw_tx_qid = queue_id;
+ vf->vf_queues[i].fw_cid = i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ }
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+ if (!rc) {
+ vf->b_init = true;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs++;
+ }
+
+ return rc;
+}
+
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rel_vf_id)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_vf_info *vf = NULL;
+ int rc = 0;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!vf) {
+ DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vf->bulletin.p_virt)
+ memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
+
+ memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+ /* Get the link configuration back in bulletin so
+ * that when VFs are re-enabled they get the actual
+ * link configuration.
+ */
+ memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
+ qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+ if (vf->state != VF_STOPPED) {
+ /* Stopping the VF */
+ rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+ rc);
+ return rc;
+ }
+
+ vf->state = VF_STOPPED;
+ }
+
+ /* disablng interrupts and resetting permission table was done during
+ * vf-close, however, we could get here without going through vf_close
+ */
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ vf->num_rxqs = 0;
+ vf->num_txqs = 0;
+ qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+ if (vf->b_init) {
+ vf->b_init = false;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs--;
+ }
+
+ return 0;
+}
+
+static bool qed_iov_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+ tl->type = type;
+ tl->length = length;
+
+ /* Offset should keep pointing to next TLV (the end of the last) */
+ *offset += length;
+
+ /* Return a pointer to the start of the added tlv */
+ return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
+{
+ u16 i = 1, total_length = 0;
+ struct channel_tlv *tlv;
+
+ do {
+ tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+ /* output tlv */
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV number %d: type %d, length %d\n",
+ i, tlv->type, tlv->length);
+
+ if (tlv->type == CHANNEL_TLV_LIST_END)
+ return;
+
+ /* Validate entry - protect against malicious VFs */
+ if (!tlv->length) {
+ DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
+ return;
+ }
+
+ total_length += tlv->length;
+
+ if (total_length >= sizeof(struct tlv_buffer_size)) {
+ DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
+ return;
+ }
+
+ i++;
+ } while (1);
+}
+
+static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct qed_dmae_params params;
+ u8 eng_vf_id;
+
+ mbx->reply_virt->default_resp.hdr.status = status;
+
+ qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+ eng_vf_id = p_vf->abs_vf_id;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = QED_DMAE_FLAG_VF_DST;
+ params.dst_vfid = eng_vf_id;
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+ mbx->req_virt->first_tlv.reply_address +
+ sizeof(u64),
+ (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+ &params);
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
+
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
+ enum qed_iov_vport_update_flag flag)
+{
+ switch (flag) {
+ case QED_IOV_VP_UPDATE_ACTIVATE:
+ return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ case QED_IOV_VP_UPDATE_VLAN_STRIP:
+ return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ case QED_IOV_VP_UPDATE_TX_SWITCH:
+ return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ case QED_IOV_VP_UPDATE_MCAST:
+ return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ case QED_IOV_VP_UPDATE_RSS:
+ return CHANNEL_TLV_VPORT_UPDATE_RSS;
+ case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ case QED_IOV_VP_UPDATE_SGE_TPA:
+ return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ default:
+ return 0;
+ }
+}
+
+static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx,
+ u8 status,
+ u16 tlvs_mask, u16 tlvs_accepted)
+{
+ struct pfvf_def_resp_tlv *resp;
+ u16 size, total_len, i;
+
+ memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+ p_mbx->offset = (u8 *)p_mbx->reply_virt;
+ size = sizeof(struct pfvf_def_resp_tlv);
+ total_len = size;
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+ /* Prepare response for all extended tlvs if they are found by PF */
+ for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
+ if (!(tlvs_mask & (1 << i)))
+ continue;
+
+ resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
+ qed_iov_vport_to_tlv(p_hwfn, i), size);
+
+ if (tlvs_accepted & (1 << i))
+ resp->hdr.status = status;
+ else
+ resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - vport_update response: TLV %d, status %02x\n",
+ p_vf->relative_vf_id,
+ qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+ total_len += size;
+ }
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ return total_len;
+}
+
+static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf_info,
+ u16 type, u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ qed_add_tlv(p_hwfn, &mbx->offset, type, length);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return NULL;
+
+ return &vf->p_vf_info;
+}
+
+void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
+
+ if (!vf_info)
+ return;
+
+ /* Clear the VF mac */
+ memset(vf_info->mac, 0, ETH_ALEN);
+}
+
+static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u32 i;
+
+ p_vf->vf_bulletin = 0;
+ p_vf->vport_instance = 0;
+ p_vf->num_mac_filters = 0;
+ p_vf->num_vlan_filters = 0;
+ p_vf->configured_features = 0;
+
+ /* If VF previously requested less resources, go back to default */
+ p_vf->num_rxqs = p_vf->num_sbs;
+ p_vf->num_txqs = p_vf->num_sbs;
+
+ p_vf->num_active_rxqs = 0;
+
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
+ p_vf->vf_queues[i].rxq_active = 0;
+
+ memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
+}
+
+static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+ struct pf_vf_resc *resc = &resp->resc;
+
+ /* Validate FW compatibility */
+ if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
+ req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
+ req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
+ req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
+ vf->abs_vf_id,
+ req->vfdev_info.fw_major,
+ req->vfdev_info.fw_minor,
+ req->vfdev_info.fw_revision,
+ req->vfdev_info.fw_engineering,
+ FW_MAJOR_VERSION,
+ FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* On 100g PFs, prevent old VFs from loading */
+ if ((p_hwfn->cdev->num_hwfns > 1) &&
+ !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an old driver that doesn't support 100g\n",
+ vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* Fill in vf info stuff */
+ vf->opaque_fid = req->vfdev_info.opaque_fid;
+ vf->num_mac_filters = 1;
+ vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+ vf->vf_bulletin = req->bulletin_addr;
+ vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+ vf->bulletin.size : req->bulletin_size;
+
+ /* fill in pfdev info */
+ pfdev_info->chip_num = p_hwfn->cdev->chip_num;
+ pfdev_info->db_size = 0;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
+
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+ pfdev_info->stats_info.mstats.address =
+ PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.mstats.len =
+ sizeof(struct eth_mstorm_per_queue_stat);
+
+ pfdev_info->stats_info.ustats.address =
+ PXP_VF_BAR0_START_USDM_ZONE_B +
+ offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.ustats.len =
+ sizeof(struct eth_ustorm_per_queue_stat);
+
+ pfdev_info->stats_info.pstats.address =
+ PXP_VF_BAR0_START_PSDM_ZONE_B +
+ offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.pstats.len =
+ sizeof(struct eth_pstorm_per_queue_stat);
+
+ pfdev_info->stats_info.tstats.address = 0;
+ pfdev_info->stats_info.tstats.len = 0;
+
+ memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+
+ pfdev_info->fw_major = FW_MAJOR_VERSION;
+ pfdev_info->fw_minor = FW_MINOR_VERSION;
+ pfdev_info->fw_rev = FW_REVISION_VERSION;
+ pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+ pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
+ qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
+
+ pfdev_info->dev_type = p_hwfn->cdev->type;
+ pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
+
+ resc->num_rxqs = vf->num_rxqs;
+ resc->num_txqs = vf->num_txqs;
+ resc->num_sbs = vf->num_sbs;
+ for (i = 0; i < resc->num_sbs; i++) {
+ resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
+ resc->hw_sbs[i].sb_qid = 0;
+ }
+
+ for (i = 0; i < resc->num_rxqs; i++) {
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&resc->hw_qid[i]);
+ resc->cid[i] = vf->vf_queues[i].fw_cid;
+ }
+
+ resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
+ req->resc_request.num_mac_filters);
+ resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
+ req->resc_request.num_vlan_filters);
+
+ /* This isn't really required as VF isn't limited, but some VFs might
+ * actually test this value, so need to provide it.
+ */
+ resc->num_mc_filters = req->resc_request.num_mc_filters;
+
+ /* Fill agreed size of bulletin board in response */
+ resp->bulletin_size = vf->bulletin.size;
+ qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
+ vf->abs_vf_id,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.capabilities,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters);
+ vf->state = VF_ACQUIRED;
+
+ /* Prepare Response */
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+ sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
+}
+
+static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, bool val)
+{
+ struct qed_sp_vport_update_params params;
+ int rc;
+
+ if (val == p_vf->spoof_chk) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk value[%d] is already configured\n", val);
+ return 0;
+ }
+
+ memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.update_anti_spoofing_en_flg = 1;
+ params.anti_spoofing_en = val;
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ p_vf->spoof_chk = val;
+ p_vf->req_spoofchk_val = p_vf->spoof_chk;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk val[%d] configured\n", val);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+ val, p_vf->relative_vf_id);
+ }
+
+ return rc;
+}
+
+static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_filter_ucast filter;
+ int rc = 0;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.opcode = QED_FILTER_ADD;
+
+ /* Reconfigure vlans */
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (!p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ filter.type = QED_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ p_vf->opaque_fid,
+ &filter,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to configure VLAN [%04x] to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+
+ if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+ rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+ return rc;
+}
+
+static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+ struct qed_filter_ucast filter;
+
+ if (!p_vf->vport_instance)
+ return -EINVAL;
+
+ if (events & (1 << MAC_ADDR_FORCED)) {
+ /* Since there's no way [currently] of removing the MAC,
+ * we can always assume this means we need to force it.
+ */
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_MAC;
+ filter.opcode = QED_FILTER_REPLACE;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure MAC for VF\n");
+ return rc;
+ }
+
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ }
+
+ if (events & (1 << VLAN_ADDR_FORCED)) {
+ struct qed_sp_vport_update_params vport_update;
+ u8 removal;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_VLAN;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.vlan = p_vf->bulletin.p_virt->pvid;
+ filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
+ QED_FILTER_FLUSH;
+
+ /* Send the ramrod */
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VLAN for VF\n");
+ return rc;
+ }
+
+ /* Update the default-vlan & silent vlan stripping */
+ memset(&vport_update, 0, sizeof(vport_update));
+ vport_update.opaque_fid = p_vf->opaque_fid;
+ vport_update.vport_id = p_vf->vport_id;
+ vport_update.update_default_vlan_enable_flg = 1;
+ vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+ vport_update.update_default_vlan_flg = 1;
+ vport_update.default_vlan = filter.vlan;
+
+ vport_update.update_inner_vlan_removal_flg = 1;
+ removal = filter.vlan ? 1
+ : p_vf->shadow_config.inner_vlan_removal;
+ vport_update.inner_vlan_removal_flg = removal;
+ vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+ rc = qed_sp_vport_update(p_hwfn,
+ &vport_update,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VF vport for vlan\n");
+ return rc;
+ }
+
+ /* Update all the Rx queues */
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+ u16 qid;
+
+ if (!p_vf->vf_queues[i].rxq_active)
+ continue;
+
+ qid = p_vf->vf_queues[i].fw_rx_qid;
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
+ 1, 0, 1,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send Rx update fo queue[0x%04x]\n",
+ qid);
+ return rc;
+ }
+ }
+
+ if (filter.vlan)
+ p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+ else
+ p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ }
+
+ /* If forced features are terminated, we need to configure the shadow
+ * configuration back again.
+ */
+ if (events)
+ qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_start_params params = { 0 };
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_vf_info *vf_info;
+ u64 *p_bitmap;
+ int sb_id;
+ int rc;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to get VF info, invalid vfid [%d]\n",
+ vf->relative_vf_id);
+ return;
+ }
+
+ vf->state = VF_ENABLED;
+ start = &mbx->req_virt->start_vport;
+
+ /* Initialize Status block in CAU */
+ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+ if (!start->sb_addr[sb_id]) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] did not fill the address of SB %d\n",
+ vf->relative_vf_id, sb_id);
+ break;
+ }
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt,
+ start->sb_addr[sb_id],
+ vf->igu_sbs[sb_id],
+ vf->abs_vf_id, 1);
+ }
+ qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+ vf->mtu = start->mtu;
+ vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+ /* Take into consideration configuration forced by hypervisor;
+ * If none is configured, use the supplied VF values [for old
+ * vfs that would still be fine, since they passed '0' as padding].
+ */
+ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+ if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ u8 vf_req = start->only_untagged;
+
+ vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+ *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+ }
+
+ params.tpa_mode = start->tpa_mode;
+ params.remove_inner_vlan = start->inner_vlan_removal;
+ params.tx_switching = true;
+
+ params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+ params.drop_ttl0 = false;
+ params.concrete_fid = vf->concrete_fid;
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+
+ rc = qed_sp_eth_vport_start(p_hwfn, &params);
+ if (rc != 0) {
+ DP_ERR(p_hwfn,
+ "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vport_instance++;
+
+ /* Force configuration if needed on the newly opened vport */
+ qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+
+ __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+ }
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc;
+
+ vf->vport_instance--;
+ vf->spoof_chk = false;
+
+ rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ /* Forget the configuration on the vport */
+ vf->configured_features = 0;
+ memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ struct vfpf_start_rxq_tlv *req;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+ sizeof(*p_tlv));
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if (status == PFVF_STATUS_SUCCESS) {
+ u16 hw_qid = 0;
+
+ req = &mbx->req_virt->start_rxq;
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
+ &hw_qid);
+
+ p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE +
+ offsetof(struct mstorm_eth_queue_zone,
+ rx_producers);
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_queue_start_common_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_start_rxq_tlv *req;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_rxq;
+ params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
+ vf->vf_queues[req->rx_qid].fw_cid,
+ &params,
+ vf->abs_vf_id + 0x10,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr, req->cqe_pbl_size);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vf_queues[req->rx_qid].rxq_active = true;
+ vf->num_active_rxqs++;
+ }
+
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+}
+
+static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_queue_start_common_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ union qed_qm_pq_params pq_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_start_txq_tlv *req;
+ int rc;
+
+ /* Prepare the parameters which would choose the right PQ */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.eth.is_vf = 1;
+ pq_params.eth.vf_id = vf->relative_vf_id;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_txq;
+ params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+ vf->opaque_fid,
+ vf->vf_queues[req->tx_qid].fw_cid,
+ &params,
+ vf->abs_vf_id + 0x10,
+ req->pbl_addr,
+ req->pbl_size, &pq_params);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+ else
+ vf->vf_queues[req->tx_qid].txq_active = true;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
+ length, status);
+}
+
+static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+{
+ int rc = 0;
+ int qid;
+
+ if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+ return -EINVAL;
+
+ for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+ if (vf->vf_queues[qid].rxq_active) {
+ rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_rx_qid, false,
+ cqe_completion);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].rxq_active = false;
+ vf->num_active_rxqs--;
+ }
+
+ return rc;
+}
+
+static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+{
+ int rc = 0;
+ int qid;
+
+ if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+ return -EINVAL;
+
+ for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+ if (vf->vf_queues[qid].txq_active) {
+ rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_tx_qid);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].txq_active = false;
+ }
+ return rc;
+}
+
+static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_rxqs_tlv *req;
+ int rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_rxqs;
+ rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->num_rxqs, req->cqe_completion);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_txqs_tlv *req;
+ int rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_txqs;
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u8 complete_event_flg;
+ u8 complete_cqe_flg;
+ u16 qid;
+ int rc;
+ u8 i;
+
+ req = &mbx->req_virt->update_rxq;
+ complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+ complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+ for (i = 0; i < req->num_rxqs; i++) {
+ qid = req->rx_qid + i;
+
+ if (!vf->vf_queues[qid].rxq_active) {
+ DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
+ qid);
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn,
+ vf->vf_queues[qid].fw_rx_qid,
+ 1,
+ complete_cqe_flg,
+ complete_event_flg,
+ QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+ }
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type)
+{
+ struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+ int len = 0;
+
+ do {
+ if (!p_tlv->length) {
+ DP_NOTICE(p_hwfn, "Zero length TLV found\n");
+ return NULL;
+ }
+
+ if (p_tlv->type == req_type) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Extended tlv type %d, length %d found\n",
+ p_tlv->type, p_tlv->length);
+ return p_tlv;
+ }
+
+ len += p_tlv->length;
+ p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+ if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+ DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
+ return NULL;
+ }
+ } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+ return NULL;
+}
+
+static void
+qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+ p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_act_tlv)
+ return;
+
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+ p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_vlan_tlv)
+ return;
+
+ p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+ /* Ignore the VF request if we're forcing a vlan */
+ if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ p_data->update_inner_vlan_removal_flg = 1;
+ p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+ }
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+ p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_tx_switch_tlv)
+ return;
+
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+ p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_mcast_tlv)
+ return;
+
+ p_data->update_approx_mcast_flg = 1;
+ memcpy(p_data->bins, p_mcast_tlv->bins,
+ sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+ p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_tlv)
+ return;
+
+ p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+ p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+ p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+ p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+ p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_accept_any_vlan)
+ return;
+
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_rss_params *p_rss,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ u16 i, q_idx, max_q_idx;
+ u16 table_size;
+
+ p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_rss_tlv) {
+ p_data->rss_params = NULL;
+ return;
+ }
+
+ memset(p_rss, 0, sizeof(struct qed_rss_params));
+
+ p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+ sizeof(p_rss->rss_ind_table));
+ memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
+
+ table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ max_q_idx = ARRAY_SIZE(vf->vf_queues);
+
+ for (i = 0; i < table_size; i++) {
+ u16 index = vf->vf_queues[0].fw_rx_qid;
+
+ q_idx = p_rss->rss_ind_table[i];
+ if (q_idx >= max_q_idx)
+ DP_NOTICE(p_hwfn,
+ "rss_ind_table[%d] = %d, rxq is out of range\n",
+ i, q_idx);
+ else if (!vf->vf_queues[q_idx].rxq_active)
+ DP_NOTICE(p_hwfn,
+ "rss_ind_table[%d] = %d, rxq is not active\n",
+ i, q_idx);
+ else
+ index = vf->vf_queues[q_idx].fw_rx_qid;
+ p_rss->rss_ind_table[i] = index;
+ }
+
+ p_data->rss_params = p_rss;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+}
+
+static void
+qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_sge_tpa_params *p_sge_tpa,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+ p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (!p_sge_tpa_tlv) {
+ p_data->sge_tpa_params = NULL;
+ return;
+ }
+
+ memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
+
+ p_sge_tpa->update_tpa_en_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+ p_sge_tpa->update_tpa_param_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+ VFPF_UPDATE_TPA_PARAM_FLAG);
+
+ p_sge_tpa->tpa_ipv4_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+ p_sge_tpa->tpa_ipv6_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+ p_sge_tpa->tpa_pkt_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+ p_sge_tpa->tpa_hdr_data_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+ p_sge_tpa->tpa_gro_consistent_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+ p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+ p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+ p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+ p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+ p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+ p_data->sge_tpa_params = p_sge_tpa;
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_update_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct qed_sge_tpa_params sge_tpa_params;
+ struct qed_rss_params rss_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u16 tlvs_mask = 0;
+ u16 length;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.rss_params = NULL;
+
+ /* Search for extended tlvs list and update values
+ * from VF in struct qed_sp_vport_update_params.
+ */
+ qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+ qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
+ mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ &sge_tpa_params, mbx, &tlvs_mask);
+
+ /* Just log a message if there is no single extended tlv in buffer.
+ * When all features of vport update ramrod would be requested by VF
+ * as extended TLVs in buffer then an error can be returned in response
+ * if there is no extended TLV present in buffer.
+ */
+ if (!tlvs_mask) {
+ DP_NOTICE(p_hwfn,
+ "No feature tlvs found for vport update\n");
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+ tlvs_mask, tlvs_mask);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ if (p_params->type == QED_FILTER_MAC)
+ return 0;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (p_vf->shadow_config.vlans[i].used &&
+ p_vf->shadow_config.vlans[i].vid ==
+ p_params->vlan) {
+ p_vf->shadow_config.vlans[i].used = false;
+ break;
+ }
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to remove a non-existing vlan\n",
+ p_vf->relative_vf_id);
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ p_vf->shadow_config.vlans[i].used = false;
+ }
+
+ /* In forced mode, we're willing to remove entries - but we don't add
+ * new ones.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ return 0;
+
+ if (p_params->opcode == QED_FILTER_ADD ||
+ p_params->opcode == QED_FILTER_REPLACE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+ break;
+ }
+
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to configure more than %d vlan filters\n",
+ p_vf->relative_vf_id,
+ QED_ETH_VF_NUM_VLAN_FILTERS + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
+{
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ /* No real decision to make; Store the configured MAC */
+ if (params->type == QED_FILTER_MAC ||
+ params->type == QED_FILTER_MAC_VLAN)
+ ether_addr_copy(vf->mac, params->mac);
+
+ return 0;
+}
+
+static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_filter_ucast params;
+ int rc;
+
+ /* Prepare the unicast filter params */
+ memset(&params, 0, sizeof(struct qed_filter_ucast));
+ req = &mbx->req_virt->ucast_filter;
+ params.opcode = (enum qed_filter_opcode)req->opcode;
+ params.type = (enum qed_filter_ucast_type)req->type;
+
+ params.is_rx_filter = 1;
+ params.is_tx_filter = 1;
+ params.vport_to_remove_from = vf->vport_id;
+ params.vport_to_add_to = vf->vport_id;
+ memcpy(params.mac, req->mac, ETH_ALEN);
+ params.vlan = req->vlan;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ vf->abs_vf_id, params.opcode, params.type,
+ params.is_rx_filter ? "RX" : "",
+ params.is_tx_filter ? "TX" : "",
+ params.vport_to_add_to,
+ params.mac[0], params.mac[1],
+ params.mac[2], params.mac[3],
+ params.mac[4], params.mac[5], params.vlan);
+
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Update shadow copy of the VF configuration */
+ if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Determine if the unicast filtering is acceptible by PF */
+ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_VLAN ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ /* Once VLAN is forced or PVID is set, do not allow
+ * to add/replace any further VLANs.
+ */
+ if (params.opcode == QED_FILTER_ADD ||
+ params.opcode == QED_FILTER_REPLACE)
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_MAC ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
+ (params.opcode != QED_FILTER_ADD &&
+ params.opcode != QED_FILTER_REPLACE))
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Reset the SBs */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, false);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_SUCCESS);
+}
+
+static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+ length, PFVF_STATUS_SUCCESS);
+}
+
+static int
+qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int cnt;
+ u32 val;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+
+ for (cnt = 0; cnt < 50; cnt++) {
+ val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+ if (!val)
+ break;
+ msleep(20);
+ }
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn,
+ "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+ p_vf->abs_vf_id, val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ int i, cnt;
+
+ /* Read initial consumers & producers */
+ for (i = 0; i < MAX_NUM_VOQS; i++) {
+ u32 prod;
+
+ cons[i] = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ prod = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+ i * 0x40);
+ distance[i] = prod - cons[i];
+ }
+
+ /* Wait for consumers to pass the producers */
+ i = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; i < MAX_NUM_VOQS; i++) {
+ u32 tmp;
+
+ tmp = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ if (distance[i] > tmp - cons[i])
+ break;
+ }
+
+ if (i == MAX_NUM_VOQS)
+ break;
+
+ msleep(20);
+ }
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+ p_vf->abs_vf_id, i);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int rc;
+
+ rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int
+qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u32 *ack_vfs)
+{
+ struct qed_vf_info *p_vf;
+ int rc = 0;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!p_vf)
+ return 0;
+
+ if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64))) {
+ u16 vfid = p_vf->abs_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Handling FLR\n", vfid);
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ /* If VF isn't active, no need for anything but SW */
+ if (!p_vf->b_init)
+ goto cleanup;
+
+ rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ goto cleanup;
+
+ rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+ return rc;
+ }
+
+ /* VF_STOPPED has to be set only after final cleanup
+ * but prior to re-enabling the VF.
+ */
+ p_vf->state = VF_STOPPED;
+
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ vfid);
+ return rc;
+ }
+cleanup:
+ /* Mark VF for ack and clean pending state */
+ if (p_vf->state == VF_RESET)
+ p_vf->state = VF_STOPPED;
+ ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ }
+
+ return rc;
+}
+
+int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ int rc = 0;
+ u16 i;
+
+ memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Since BRB <-> PRS interface can't be tested as part of the flr
+ * polling due to HW limitations, simply sleep a bit. And since
+ * there's no need to wait per-vf, do it before looping.
+ */
+ msleep(100);
+
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
+ qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+ rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+ u16 i, found = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "[%08x,...,%08x]: %08x\n",
+ i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+ if (!p_hwfn->cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
+ return 0;
+ }
+
+ /* Mark VFs */
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_vf_info *p_vf;
+ u8 vfid;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
+ if (!p_vf)
+ continue;
+
+ vfid = p_vf->abs_vf_id;
+ if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+ u16 rel_vf_id = p_vf->relative_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] [rel %d] got FLR-ed\n",
+ vfid, rel_vf_id);
+
+ p_vf->state = VF_RESET;
+
+ /* No need to lock here, since pending_flr should
+ * only change here and before ACKing MFw. Since
+ * MFW will not trigger an additional attention for
+ * VF flr until ACKs, we're safe.
+ */
+ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+ found = 1;
+ }
+ }
+
+ return found;
+}
+
+static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *p_params,
+ struct qed_mcp_link_state *p_link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ if (p_params)
+ __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ if (p_link)
+ __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ if (p_caps)
+ __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid)
+{
+ struct qed_iov_vf_mbx *mbx;
+ struct qed_vf_info *p_vf;
+ int i;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf)
+ return;
+
+ mbx = &p_vf->vf_mbx;
+
+ /* qed_iov_process_mbx_request */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+
+ mbx->first_tlv = mbx->req_virt->first_tlv;
+
+ /* check if tlv type is known */
+ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_START:
+ qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_TEARDOWN:
+ qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_RXQ:
+ qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_TXQ:
+ qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_RXQS:
+ qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_TXQS:
+ qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_RXQ:
+ qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_UPDATE:
+ qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UCAST_FILTER:
+ qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_INT_CLEANUP:
+ qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+ break;
+ }
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ DP_ERR(p_hwfn,
+ "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+
+ for (i = 0; i < 20; i++) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "%x ",
+ mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+ }
+ }
+}
+
+void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u64 add_bit = 1ULL << (vfid % 64);
+
+ p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
+ u64 *events)
+{
+ u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+ memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+ memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+}
+
+static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
+ u16 abs_vfid, struct regpair *vf_msg)
+{
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ struct qed_vf_info *p_vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
+ abs_vfid);
+ return 0;
+ }
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+ /* List the physical address of the request so that handler
+ * could later on copy the message from it.
+ */
+ p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+ /* Mark the event and schedule the workqueue */
+ qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+
+ return 0;
+}
+
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
+ &data->vf_pf_channel.msg_addr);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
+ opcode);
+ return -EINVAL;
+ }
+}
+
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ u16 i;
+
+ if (!p_iov)
+ goto out;
+
+ for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+ if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ return i;
+
+out:
+ return MAX_NUM_VFS;
+}
+
+static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
+ int vfid)
+{
+ struct qed_dmae_params params;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return -EINVAL;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+ params.src_vfid = vf_info->abs_vf_id;
+
+ if (qed_dmae_host2host(p_hwfn, ptt,
+ vf_info->vf_mbx.pending_req,
+ vf_info->vf_mbx.req_phys,
+ sizeof(union vfpf_tlvs) / 4, &params)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to copy message from VF 0x%02x\n", vfid);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << MAC_ADDR_FORCED;
+ memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf_info)
+ return false;
+
+ return !!p_vf_info->vport_instance;
+}
+
+bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf_info)
+ return true;
+
+ return p_vf_info->state == VF_STOPPED;
+}
+
+static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return false;
+
+ return vf_info->spoof_chk;
+}
+
+int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+{
+ struct qed_vf_info *vf;
+ int rc = -EINVAL;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set spoofchk\n");
+ goto out;
+ }
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf)
+ goto out;
+
+ if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+ /* After VF VPORT start PF will configure spoof check */
+ vf->req_spoofchk_val = val;
+ rc = 0;
+ goto out;
+ }
+
+ rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+ return rc;
+}
+
+static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return 0;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return 0;
+
+ return p_vf->bulletin.p_virt->pvid;
+}
+
+static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid, int val)
+{
+ struct qed_vf_info *vf;
+ u8 abs_vp_id = 0;
+ int rc;
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+ if (rc)
+ return rc;
+
+ return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+}
+
+int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+{
+ struct qed_vf_info *vf;
+ u8 vport_id;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set min rate\n");
+ return -EINVAL;
+ }
+ }
+
+ vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ vport_id = vf->vport_id;
+
+ return qed_configure_vport_wfq(cdev, vport_id, rate);
+}
+
+static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_wfq_data *vf_vp_wfq;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return 0;
+
+ vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+ if (vf_vp_wfq->configured)
+ return vf_vp_wfq->min_speed;
+ else
+ return 0;
+}
+
+/**
+ * qed_schedule_iov - schedules IOV task for VF and PF
+ * @hwfn: hardware function pointer
+ * @flag: IOV flag for VF/PF
+ */
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &hwfn->iov_task_flags);
+ smp_mb__after_atomic();
+ DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
+}
+
+void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
+}
+
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, i)
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+
+ /* Mark VFs for disablement */
+ qed_iov_set_vfs_to_disable(cdev, true);
+
+ if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
+ pci_disable_sriov(cdev->pdev);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ /* Failure to acquire the ptt in 100g creates an odd error
+ * where the first engine has already relased IOV.
+ */
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Clean WFQ db and configure equal weight for all vports */
+ qed_clean_wfq_db(hwfn, ptt);
+
+ qed_for_each_vf(hwfn, j) {
+ int k;
+
+ if (!qed_iov_is_valid_vfid(hwfn, j, true))
+ continue;
+
+ /* Wait until VF is disabled before releasing */
+ for (k = 0; k < 100; k++) {
+ if (!qed_iov_is_vf_stopped(hwfn, j))
+ msleep(20);
+ else
+ break;
+ }
+
+ if (k < 100)
+ qed_iov_release_hw_for_vf(&cdev->hwfns[i],
+ ptt, j);
+ else
+ DP_ERR(hwfn,
+ "Timeout waiting for VF's FLR to end\n");
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_vfs_to_disable(cdev, false);
+
+ return 0;
+}
+
+static int qed_sriov_enable(struct qed_dev *cdev, int num)
+{
+ struct qed_sb_cnt_info sb_cnt_info;
+ int i, j, rc;
+
+ if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+ DP_NOTICE(cdev, "Can start at most %d VFs\n",
+ RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
+ return -EINVAL;
+ }
+
+ /* Initialize HW for VF access */
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[j];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ int num_sbs = 0, limit = 16;
+
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (IS_MF_DEFAULT(hwfn))
+ limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
+
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(hwfn, &sb_cnt_info);
+ num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
+
+ for (i = 0; i < num; i++) {
+ if (!qed_iov_is_valid_vfid(hwfn, i, false))
+ continue;
+
+ rc = qed_iov_init_hw_for_vf(hwfn,
+ ptt, i, num_sbs / num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
+ qed_ptt_release(hwfn, ptt);
+ goto err;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ /* Enable SRIOV PCIe functions */
+ rc = pci_enable_sriov(cdev->pdev, num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
+ goto err;
+ }
+
+ return num;
+
+err:
+ qed_sriov_disable(cdev, false);
+ return rc;
+}
+
+static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
+{
+ if (!IS_QED_SRIOV(cdev)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (num_vfs_param)
+ return qed_sriov_enable(cdev, num_vfs_param);
+ else
+ return qed_sriov_disable(cdev, true);
+}
+
+static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced MAC, and schedule the IOV task */
+ ether_addr_copy(vf_info->forced_mac, mac);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced vlan, and schedule the IOV task */
+ vf_info->forced_vlan = vid;
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_get_vf_config(struct qed_dev *cdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_public_vf_info *vf_info;
+ struct qed_mcp_link_state link;
+ u32 tx_rate;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+
+ qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+
+ /* Fill information about VF */
+ ivi->vf = vf_id;
+
+ if (is_valid_ether_addr(vf_info->forced_mac))
+ ether_addr_copy(ivi->mac, vf_info->forced_mac);
+ else
+ ether_addr_copy(ivi->mac, vf_info->mac);
+
+ ivi->vlan = vf_info->forced_vlan;
+ ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
+ ivi->linkstate = vf_info->link_state;
+ tx_rate = vf_info->tx_rate;
+ ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
+ ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+
+ return 0;
+}
+
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ int i;
+
+ if (!hwfn->pf_iov_info)
+ return;
+
+ /* Update bulletin of all future possible VFs with link configuration */
+ for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
+ if (!vf_info)
+ continue;
+
+ memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(caps));
+
+ /* Modify link according to the VF's configured link state */
+ switch (vf_info->link_state) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link.link_up = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link.link_up = true;
+ /* Set speed according to maximum supported by HW.
+ * that is 40G for regular devices and 100G for CMT
+ * mode devices.
+ */
+ link.speed = (hwfn->cdev->num_hwfns > 1) ?
+ 100000 : 40000;
+ default:
+ /* In auto mode pass PF link image to VF */
+ break;
+ }
+
+ if (link.link_up && vf_info->tx_rate) {
+ struct qed_ptt *ptt;
+ int rate;
+
+ rate = min_t(int, vf_info->tx_rate, link.speed);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to acquire PTT\n");
+ return;
+ }
+
+ if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
+ vf_info->tx_rate = rate;
+ link.speed = rate;
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_link(hwfn, i, &params, &link, &caps);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+}
+
+static int qed_set_vf_link_state(struct qed_dev *cdev,
+ int vf_id, int link_state)
+{
+ int i;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* Handle configuration of link state */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+ if (!vf)
+ continue;
+
+ if (vf->link_state == link_state)
+ continue;
+
+ vf->link_state = link_state;
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
+ }
+
+ return 0;
+}
+
+static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
+{
+ int i, rc = -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set tx rate\n");
+ return -EINVAL;
+ }
+
+ vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
+
+ vf->tx_rate = rate;
+
+ qed_inform_vf_link_state(p_hwfn);
+ }
+
+ return 0;
+}
+
+static int qed_set_vf_rate(struct qed_dev *cdev,
+ int vfid, u32 min_rate, u32 max_rate)
+{
+ int rc_min = 0, rc_max = 0;
+
+ if (max_rate)
+ rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
+
+ if (min_rate)
+ rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
+
+ if (rc_max | rc_min)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
+{
+ u64 events[QED_VF_ARRAY_LENGTH];
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Can't acquire PTT; re-scheduling\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+ return;
+ }
+
+ qed_iov_pf_get_and_clear_pending_events(hwfn, events);
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
+ events[0], events[1], events[2]);
+
+ qed_for_each_vf(hwfn, i) {
+ /* Skip VFs with no pending messages */
+ if (!(events[i / 64] & (1ULL << (i % 64))))
+ continue;
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+ i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Copy VF's message to PF's request buffer for that VF */
+ if (qed_iov_copy_vf_msg(hwfn, ptt, i))
+ continue;
+
+ qed_iov_process_mbx_req(hwfn, ptt, i);
+ }
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+{
+ int i;
+
+ qed_for_each_vf(hwfn, i) {
+ struct qed_public_vf_info *info;
+ bool update = false;
+ u8 *mac;
+
+ info = qed_iov_get_public_vf_info(hwfn, i, true);
+ if (!info)
+ continue;
+
+ /* Update data on bulletin board */
+ mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+ if (is_valid_ether_addr(info->forced_mac) &&
+ (!mac || !ether_addr_equal(mac, info->forced_mac))) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Update bulletin board with forced MAC */
+ qed_iov_bulletin_set_forced_mac(hwfn,
+ info->forced_mac, i);
+ update = true;
+ }
+
+ if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
+ info->forced_vlan) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
+ info->forced_vlan,
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ qed_iov_bulletin_set_forced_vlan(hwfn,
+ info->forced_vlan, i);
+ update = true;
+ }
+
+ if (update)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+}
+
+static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
+{
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ return;
+ }
+
+ qed_for_each_vf(hwfn, i)
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+void qed_iov_pf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ int rc;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+ return;
+ }
+
+ rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
+ if (rc)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
+ qed_handle_vf_msg(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_pf_set_vf_unicast(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_bulletin_post(hwfn);
+}
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].iov_wq)
+ continue;
+
+ if (schedule_first) {
+ qed_schedule_iov(&cdev->hwfns[i],
+ QED_IOV_WQ_STOP_WQ_FLAG);
+ cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
+ }
+
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+ destroy_workqueue(cdev->hwfns[i].iov_wq);
+ }
+}
+
+int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ char name[NAME_SIZE];
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* PFs needs a dedicated workqueue only if they support IOV.
+ * VFs always require one.
+ */
+ if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
+ continue;
+
+ snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
+
+ p_hwfn->iov_wq = create_singlethread_workqueue(name);
+ if (!p_hwfn->iov_wq) {
+ DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
+ return -ENOMEM;
+ }
+
+ if (IS_PF(cdev))
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
+ else
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
+ }
+
+ return 0;
+}
+
+const struct qed_iov_hv_ops qed_iov_ops_pass = {
+ .configure = &qed_sriov_configure,
+ .set_mac = &qed_sriov_pf_set_mac,
+ .set_vlan = &qed_sriov_pf_set_vlan,
+ .get_config = &qed_get_vf_config,
+ .set_link_state = &qed_set_vf_link_state,
+ .set_spoof = &qed_spoof_configure,
+ .set_rate = &qed_set_vf_rate,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
new file mode 100644
index 000000000..c90b2b6ad
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -0,0 +1,388 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SRIOV_H
+#define _QED_SRIOV_H
+#include <linux/types.h>
+#include "qed_vf.h"
+#define QED_VF_ARRAY_LENGTH (3)
+
+#ifdef CONFIG_QED_SRIOV
+#define IS_VF(cdev) ((cdev)->b_is_vf)
+#define IS_PF(cdev) (!((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#else
+#define IS_VF(cdev) (0)
+#define IS_PF(cdev) (1)
+#define IS_PF_SRIOV(p_hwfn) (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+
+#define QED_MAX_VF_CHAINS_PER_PF 16
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
+
+#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
+ (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
+
+enum qed_iov_vport_update_flag {
+ QED_IOV_VP_UPDATE_ACTIVATE,
+ QED_IOV_VP_UPDATE_VLAN_STRIP,
+ QED_IOV_VP_UPDATE_TX_SWITCH,
+ QED_IOV_VP_UPDATE_MCAST,
+ QED_IOV_VP_UPDATE_ACCEPT_PARAM,
+ QED_IOV_VP_UPDATE_RSS,
+ QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
+ QED_IOV_VP_UPDATE_SGE_TPA,
+ QED_IOV_VP_UPDATE_MAX,
+};
+
+struct qed_public_vf_info {
+ /* These copies will later be reflected in the bulletin board,
+ * but this copy should be newer.
+ */
+ u8 forced_mac[ETH_ALEN];
+ u16 forced_vlan;
+ u8 mac[ETH_ALEN];
+
+ /* IFLA_VF_LINK_STATE_<X> */
+ int link_state;
+
+ /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
+ int tx_rate;
+};
+
+/* This struct is part of qed_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct qed_hw_sriov_info {
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ u32 first_vf_in_pf;
+};
+
+/* This mailbox is maintained per VF in its PF contains all information
+ * required for sending / receiving a message.
+ */
+struct qed_iov_vf_mbx {
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ u8 *offset;
+
+ /* saved VF request header */
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct qed_vf_q_info {
+ u16 fw_rx_qid;
+ u16 fw_tx_qid;
+ u8 fw_cid;
+ u8 rxq_active;
+ u8 txq_active;
+};
+
+enum vf_state {
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED, /* VF, acquired, but not initalized */
+ VF_ENABLED, /* VF, Enabled */
+ VF_RESET, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED /* VF, Stopped */
+};
+
+struct qed_vf_vlan_shadow {
+ bool used;
+ u16 vid;
+};
+
+struct qed_vf_shadow_config {
+ /* Shadow copy of all guest vlans */
+ struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+ u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct qed_vf_info {
+ struct qed_iov_vf_mbx vf_mbx;
+ enum vf_state state;
+ bool b_init;
+ u8 to_disable;
+
+ struct qed_bulletin bulletin;
+ dma_addr_t vf_bulletin;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
+#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
+ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+ (p_vf)->abs_vf_id)
+
+ u8 vport_instance;
+ u8 num_rxqs;
+ u8 num_txqs;
+
+ u8 num_sbs;
+
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
+ u8 num_active_rxqs;
+ struct qed_public_vf_info p_vf_info;
+ bool spoof_chk;
+ bool req_spoofchk_val;
+
+ /* Stores the configuration requested by VF */
+ struct qed_vf_shadow_config shadow_config;
+
+ /* A bitfield using bulletin's valid-map bits, used to indicate
+ * which of the bulletin board features have been configured.
+ */
+ u64 configured_features;
+#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
+ (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of qed_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct qed_pf_iov {
+ struct qed_vf_info vfs_array[MAX_NUM_VFS];
+ u64 pending_events[QED_VF_ARRAY_LENGTH];
+ u64 pending_flr[QED_VF_ARRAY_LENGTH];
+
+ /* Allocate message address continuosuly and split to each VF */
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
+};
+
+enum qed_iov_wq_flag {
+ QED_IOV_WQ_MSG_FLAG,
+ QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ QED_IOV_WQ_STOP_WQ_FLAG,
+ QED_IOV_WQ_FLR_FLAG,
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Read sriov related information and allocated resources
+ * reads from configuraiton space, shmem, etc.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * @brief qed_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void qed_iov_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param cdev
+ */
+void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+/**
+ * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
+int qed_iov_wq_start(struct qed_dev *cdev);
+
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
+void qed_vf_start_iov_wq(struct qed_dev *cdev);
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
+#else
+static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return MAX_NUM_VFS;
+}
+
+static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+}
+
+static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+}
+
+static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo, union event_ring_data *data)
+{
+ return -EINVAL;
+}
+
+static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+ u32 *disabled_vfs)
+{
+ return 0;
+}
+
+static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+}
+
+static inline int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
+ enum qed_iov_wq_flag flag)
+{
+}
+
+static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ return 0;
+}
+
+static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+}
+#endif
+
+#define qed_for_each_vf(_p_hwfn, _i) \
+ for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
+ _i < MAX_NUM_VFS; \
+ _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
new file mode 100644
index 000000000..72e69c0ec
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -0,0 +1,1102 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ void *p_tlv;
+
+ /* This lock is released when we receive PF's response
+ * in qed_send_msg2pf().
+ * So, qed_vf_pf_prep() and qed_send_msg2pf()
+ * must come in sequence.
+ */
+ mutex_lock(&(p_iov->mutex));
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "preparing to send 0x%04x tlv over vf pf channel\n",
+ type);
+
+ /* Reset Requst offset */
+ p_iov->offset = (u8 *)p_iov->vf2pf_request;
+
+ /* Clear mailbox - both request and reply */
+ memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* Init type and length */
+ p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+ /* Init first tlv header */
+ ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+ (u64)p_iov->pf2vf_reply_phys;
+
+ return p_tlv;
+}
+
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+{
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
+ int rc = 0, time = 100;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+ /* output tlvs list */
+ qed_dp_tlv_list(p_hwfn, p_req);
+
+ /* need to add the END TLV to the message size */
+ resp_size += sizeof(struct channel_list_end_tlv);
+
+ /* Send TLVs over HW channel */
+ memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+ trigger.vf_pf_msg_valid = 1;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
+ GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID),
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ &zone_data->non_trigger.vf_pf_msg_addr,
+ *((u32 *)&trigger), &zone_data->trigger);
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ /* The message data must be written first, to prevent trigger before
+ * data is written.
+ */
+ wmb();
+
+ REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
+
+ /* When PF would be done with the response, it would write back to the
+ * `done' address. Poll until then.
+ */
+ while ((!*done) && time) {
+ msleep(25);
+ time--;
+ }
+
+ if (!*done) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
+ rc = -EBUSY;
+ goto exit;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+exit:
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+
+ return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+
+static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ u8 rx_count = 1, tx_count = 1, num_sbs = 1;
+ u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
+ bool resources_acquired = false;
+ struct vfpf_acquire_tlv *req;
+ int rc = 0, attempts = 0;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ /* starting filling the request */
+ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = num_sbs;
+ req->resc_request.num_mac_filters = num_mac;
+ req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+ req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
+ req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+ req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+ req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+ req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+
+ /* Fill capability field with any non-deprecated config we support */
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = p_iov->bulletin.phys;
+ req->bulletin_size = p_iov->bulletin.size;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ while (!resources_acquired) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to p_hwfn */
+ memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
+
+ attempts++;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF agrees to allocate our resources */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+ DP_INFO(p_hwfn,
+ "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
+ return -EINVAL;
+ }
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
+ resources_acquired = true;
+ } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF unwilling to fullfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs = resp->resc.num_txqs;
+ req->resc_request.num_rxqs = resp->resc.num_rxqs;
+ req->resc_request.num_sbs = resp->resc.num_sbs;
+ req->resc_request.num_mac_filters =
+ resp->resc.num_mac_filters;
+ req->resc_request.num_vlan_filters =
+ resp->resc.num_vlan_filters;
+
+ /* Clear response buffer */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+ } else {
+ DP_ERR(p_hwfn,
+ "PF returned error %d to VF acquisition request\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+ }
+
+ /* Update bulletin board size with response from PF */
+ p_iov->bulletin.size = resp->bulletin_size;
+
+ /* get HW info */
+ p_hwfn->cdev->type = resp->pfdev_info.dev_type;
+ p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
+
+ p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
+
+ /* Learn of the possibility of CMT */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+ DP_NOTICE(p_hwfn, "100g VF\n");
+ p_hwfn->cdev->num_hwfns = 2;
+ }
+ }
+
+ return 0;
+}
+
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov;
+ u32 reg;
+
+ /* Set number of hwfns - might be overriden once leading hwfn learns
+ * actual configuration from PF.
+ */
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->num_hwfns = 1;
+
+ /* Set the doorbell bar. Assumption: regview is set */
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+
+ reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+ reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+ /* Allocate vf sriov info */
+ p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate vf2pf msg */
+ p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ &p_iov->vf2pf_request_phys,
+ GFP_KERNEL);
+ if (!p_iov->vf2pf_request) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate `vf2pf_request' DMA memory\n");
+ goto free_p_iov;
+ }
+
+ p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ &p_iov->pf2vf_reply_phys,
+ GFP_KERNEL);
+ if (!p_iov->pf2vf_reply) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate `pf2vf_reply' DMA memory\n");
+ goto free_vf2pf_request;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
+ p_iov->vf2pf_request,
+ (u64) p_iov->vf2pf_request_phys,
+ p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
+
+ /* Allocate Bulletin board */
+ p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
+ p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov->bulletin.size,
+ &p_iov->bulletin.phys,
+ GFP_KERNEL);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
+ p_iov->bulletin.p_virt,
+ (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
+
+ mutex_init(&p_iov->mutex);
+
+ p_hwfn->vf_iov_info = p_iov;
+
+ p_hwfn->hw_info.personality = QED_PCI_ETH;
+
+ return qed_vf_pf_acquire(p_hwfn);
+
+free_vf2pf_request:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
+free_p_iov:
+ kfree(p_iov);
+
+ return -ENOMEM;
+}
+
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_qid,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_rxq_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->cqe_pbl_addr = cqe_pbl_addr;
+ req->cqe_pbl_size = cqe_pbl_size;
+ req->rxq_addr = bd_chain_phys_addr;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+ req->bd_max_bytes = bd_max_bytes;
+ req->stat_id = -1;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ /* Learn the address of the producer from the response */
+ if (pp_prod) {
+ u64 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+ rx_qid, *pp_prod, resp->offset);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)&init_prod_val);
+ }
+
+ return rc;
+}
+
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_rxqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->num_rxqs = 1;
+ req->cqe_completion = cqe_completion;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_start_txq_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+ req->tx_qid = tx_queue_id;
+
+ /* Tx */
+ req->pbl_addr = pbl_addr;
+ req->pbl_size = pbl_size;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ if (pp_doorbell) {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+ }
+
+ return rc;
+}
+
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_txqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+ req->tx_qid = tx_qid;
+ req->num_txqs = 1;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_start_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+ req->mtu = mtu;
+ req->vport_id = vport_id;
+ req->inner_vlan_removal = inner_vlan_removal;
+ req->tpa_mode = tpa_mode;
+ req->max_buffers_per_cqe = max_buffers_per_cqe;
+ req->only_untagged = only_untagged;
+
+ /* status blocks */
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+ if (p_hwfn->sbs_info[i])
+ req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+static bool
+qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ u16 tlv)
+{
+ switch (tlv) {
+ case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+ return !!(p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg);
+ case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+ return !!p_data->update_tx_switching_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+ return !!p_data->update_inner_vlan_removal_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+ return !!p_data->update_accept_any_vlan_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+ return !!p_data->update_approx_mcast_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+ return !!(p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config);
+ case CHANNEL_TLV_VPORT_UPDATE_RSS:
+ return !!p_data->rss_params;
+ case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+ return !!p_data->sge_tpa_params;
+ default:
+ DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
+ tlv);
+ return false;
+ }
+}
+
+static void
+qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *p_resp;
+ u16 tlv;
+
+ for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
+ if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+ continue;
+
+ p_resp = (struct pfvf_def_resp_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
+ tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV[%d] Configuration %s\n",
+ tlv,
+ (p_resp && p_resp->hdr.status) ? "succeeded"
+ : "failed");
+ }
+}
+
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_update_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ u8 update_rx, update_tx;
+ u32 resp_size = 0;
+ u16 size, tlv;
+ int rc;
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ resp_size = sizeof(*resp);
+
+ update_rx = p_params->update_vport_active_rx_flg;
+ update_tx = p_params->update_vport_active_tx_flg;
+
+ /* clear mailbox and prep header tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+ /* Prepare extended tlvs */
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+ size = sizeof(struct vfpf_vport_update_activate_tlv);
+ p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_act_tlv->update_rx = update_rx;
+ p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+ }
+
+ if (update_tx) {
+ p_act_tlv->update_tx = update_tx;
+ p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+ }
+ }
+
+ if (p_params->update_tx_switching_flg) {
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+ size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+ }
+
+ if (p_params->update_approx_mcast_flg) {
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+ size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+ p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ memcpy(p_mcast_tlv->bins, p_params->bins,
+ sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ }
+
+ update_rx = p_params->accept_flags.update_rx_mode_config;
+ update_tx = p_params->accept_flags.update_tx_mode_config;
+
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+ p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_accept_tlv->update_rx_mode = update_rx;
+ p_accept_tlv->rx_accept_filter =
+ p_params->accept_flags.rx_accept_filter;
+ }
+
+ if (update_tx) {
+ p_accept_tlv->update_tx_mode = update_tx;
+ p_accept_tlv->tx_accept_filter =
+ p_params->accept_flags.tx_accept_filter;
+ }
+ }
+
+ if (p_params->rss_params) {
+ struct qed_rss_params *rss_params = p_params->rss_params;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+
+ size = sizeof(struct vfpf_vport_update_rss_tlv);
+ p_rss_tlv = qed_add_tlv(p_hwfn,
+ &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (rss_params->update_rss_config)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CONFIG_FLAG;
+ if (rss_params->update_rss_capabilities)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CAPS_FLAG;
+ if (rss_params->update_rss_ind_table)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+ if (rss_params->update_rss_key)
+ p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+ p_rss_tlv->rss_enable = rss_params->rss_enable;
+ p_rss_tlv->rss_caps = rss_params->rss_caps;
+ p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+ memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
+ sizeof(rss_params->rss_ind_table));
+ memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ }
+
+ if (p_params->update_accept_any_vlan_flg) {
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+ p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+ p_any_vlan_tlv->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
+ }
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+ return rc;
+}
+
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EAGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ return 0;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = -EAGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys);
+ if (p_iov->pf2vf_reply)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct qed_bulletin_content);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+ }
+
+ kfree(p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = NULL;
+
+ return rc;
+}
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+ struct qed_sp_vport_update_params sp_params;
+ int i;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.update_approx_mcast_flg = 1;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ __set_bit(bit, sp_params.bins);
+ }
+ }
+
+ qed_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_ucast)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_ucast_filter_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+ req->opcode = (u8) p_ucast->opcode;
+ req->type = (u8) p_ucast->type;
+ memcpy(req->mac, p_ucast->mac, ETH_ALEN);
+ req->vlan = p_ucast->vlan;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EAGAIN;
+
+ return 0;
+}
+
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct qed_bulletin_content shadow;
+ u32 crc, crc_size;
+
+ crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+ *p_change = 0;
+
+ /* Need to guarantee PF is not in the middle of writing it */
+ memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+ /* If version did not update, no need to do anything */
+ if (shadow.version == p_iov->bulletin_shadow.version)
+ return 0;
+
+ /* Verify the bulletin we see is valid */
+ crc = crc32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
+ if (crc != shadow.crc)
+ return -EAGAIN;
+
+ /* Set the shadow bulletin and process it */
+ memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Read a bulletin update %08x\n", shadow.version);
+
+ *p_change = 1;
+
+ return 0;
+}
+
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_params, 0, sizeof(*p_params));
+
+ p_params->speed.autoneg = p_bulletin->req_autoneg;
+ p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+ p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+ p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+ p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+ p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+ p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+ __qed_vf_get_link_params(p_hwfn, params,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link, 0, sizeof(*p_link));
+
+ p_link->link_up = p_bulletin->link_up;
+ p_link->speed = p_bulletin->speed;
+ p_link->full_duplex = p_bulletin->full_duplex;
+ p_link->an = p_bulletin->autoneg;
+ p_link->an_complete = p_bulletin->autoneg_complete;
+ p_link->parallel_detection = p_bulletin->parallel_detection;
+ p_link->pfc_enabled = p_bulletin->pfc_enabled;
+ p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+ p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+ p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+ p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+ p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+ __qed_vf_get_link_state(p_hwfn, link,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link_caps, 0, sizeof(*p_link_caps));
+ p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+ __qed_vf_get_link_caps(p_hwfn, p_link_caps,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+ *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+ memcpy(port_mac,
+ p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
+}
+
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
+{
+ struct qed_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+ if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return true;
+
+ /* Forbid VF from changing a MAC enforced by PF */
+ if (ether_addr_equal(bulletin->mac, mac))
+ return false;
+
+ return false;
+}
+
+bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ if (p_is_forced)
+ *p_is_forced = 1;
+ } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+ if (p_is_forced)
+ *p_is_forced = 0;
+ } else {
+ return false;
+ }
+
+ ether_addr_copy(dst_mac, bulletin->mac);
+
+ return true;
+}
+
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+ struct pf_vf_pfdev_info *info;
+
+ info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+ *fw_major = info->fw_major;
+ *fw_minor = info->fw_minor;
+ *fw_rev = info->fw_rev;
+ *fw_eng = info->fw_eng;
+}
+
+static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
+{
+ struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
+ u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+ void *cookie = hwfn->cdev->ops_cookie;
+
+ is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced && cookie)
+ ops->force_mac(cookie, mac);
+
+ /* Always update link configuration according to bulletin */
+ qed_link_update(hwfn);
+}
+
+void qed_iov_vf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ u8 change = 0;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ /* Handle bulletin board changes */
+ qed_vf_read_bulletin(hwfn, &change);
+ if (change)
+ qed_handle_bulletin_change(hwfn);
+
+ /* As VF is polling bulletin board, need to constantly re-schedule */
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
new file mode 100644
index 000000000..b82fda964
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -0,0 +1,990 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_VF_H
+#define _QED_VF_H
+
+#include "qed_l2.h"
+#include "qed_mcp.h"
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u16 padding;
+};
+
+struct hw_sb_info {
+ u16 hw_sb_id;
+ u8 sb_qid;
+ u8 padding[5];
+};
+
+#define TLV_BUFFER_SIZE 1024
+
+enum {
+ PFVF_STATUS_WAITING,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate reponse
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 padding;
+ u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
+#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+ u64 capabilities;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_revision;
+ u8 fw_engineering;
+ u32 driver_version;
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 padding[5];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ u64 bulletin_addr;
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+ struct channel_tlv tl;
+
+ u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
+#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
+
+ u8 rss_enable;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct pfvf_stats_info {
+ struct pfvf_storm_stats mstats;
+ struct pfvf_storm_stats pstats;
+ struct pfvf_storm_stats tstats;
+ struct pfvf_storm_stats ustats;
+};
+
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 mfw_ver;
+
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
+#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
+
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 os_type;
+
+ /* These should match the PF's qed_dev values */
+ u16 chip_rev;
+ u8 dev_type;
+
+ u8 padding;
+
+ struct pfvf_stats_info stats_info;
+
+ u8 port_mac[ETH_ALEN];
+ u8 padding2[2];
+ } pfdev_info;
+
+ struct pf_vf_resc {
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 padding[2];
+ } resc;
+
+ u32 bulletin_size;
+ u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+ struct pfvf_tlv hdr;
+ u32 offset; /* offset to consumer/producer of queue */
+ u8 padding[4];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 rxq_addr;
+ u64 deprecated_sge_addr;
+ u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
+};
+
+struct vfpf_start_txq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 cqe_completion;
+ u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 tx_qid;
+ u8 num_txqs;
+ u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
+
+ u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+
+ u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 inner_vlan_removal;
+
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
+
+ u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+
+ u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+ struct channel_tlv tl;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+ struct channel_tlv tl;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+
+ u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+ struct channel_tlv tl;
+
+ u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG BIT(0)
+#define VFPF_TPA_IPV6_EN_FLAG BIT(1)
+#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
+#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
+
+ u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
+#define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
+
+ u8 max_buffers_per_cqe;
+
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 opcode;
+ u8 type;
+
+ u8 mac[ETH_ALEN];
+
+ u16 vlan;
+ u16 padding[3];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_start_queue_resp_tlv queue_start;
+};
+
+enum qed_bulletin_bit {
+ /* Alert the VF that a forced MAC was set by the PF */
+ MAC_ADDR_FORCED = 0,
+ /* Alert the VF that a forced VLAN was set by the PF */
+ VLAN_ADDR_FORCED = 2,
+
+ /* Indicate that `default_only_untagged' contains actual data */
+ VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+ VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+ /* Alert the VF that suggested mac was sent by the PF.
+ * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
+ */
+ VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct qed_bulletin_content {
+ /* crc of structure to ensure is not in mid-update */
+ u32 crc;
+
+ u32 version;
+
+ /* bitmap indicating which fields hold valid values */
+ u64 valid_bitmap;
+
+ /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ u8 mac[ETH_ALEN];
+
+ /* If valid, 1 => only untagged Rx if no vlan is configured */
+ u8 default_only_untagged;
+ u8 padding;
+
+ /* The following is a 'copy' of qed_mcp_link_state,
+ * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
+ * possible the structs will increase further along the road we cannot
+ * have it here; Instead we need to have all of its fields.
+ */
+ u8 req_autoneg;
+ u8 req_autoneg_pause;
+ u8 req_forced_rx;
+ u8 req_forced_tx;
+ u8 padding2[4];
+
+ u32 req_adv_speed;
+ u32 req_forced_speed;
+ u32 req_loopback;
+ u32 padding3;
+
+ u8 link_up;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 autoneg_complete;
+ u8 parallel_detection;
+ u8 pfc_enabled;
+ u8 partner_tx_flow_ctrl_en;
+ u8 partner_rx_flow_ctrl_en;
+ u8 partner_adv_pause;
+ u8 sfp_tx_fault;
+ u8 padding4[6];
+
+ u32 speed;
+ u32 partner_adv_speed;
+
+ u32 capability_speed;
+
+ /* Forced vlan */
+ u16 pvid;
+ u16 padding5;
+};
+
+struct qed_bulletin {
+ dma_addr_t phys;
+ struct qed_bulletin_content *p_virt;
+ u32 size;
+};
+
+enum {
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_START_RXQ,
+ CHANNEL_TLV_START_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_MAX,
+
+ /* Required for iterating over vport-update tlvs.
+ * Will break in case non-sequential vport-update tlvs.
+ */
+ CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+};
+
+/* This data is held in the qed_hwfn structure for VFs only. */
+struct qed_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ struct mutex mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct qed_bulletin bulletin;
+ struct qed_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
+
+/**
+ * @brief Get link paramters for VF from qed
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from qed
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from qed
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated RX queues
+ */
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated VLAN filters
+ */
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng);
+
+/**
+ * @brief hw preparation for VF
+ * sends ACQUIRE message
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - start the RX Queue by sending a message to the PF
+ * @param p_hwfn
+ * @param cid - zero based within the VF
+ * @param rx_queue_id - zero based within the VF
+ * @param sb - VF status block for this queue
+ * @param sb_index - Index within the status block
+ * @param bd_max_bytes - maximum number of bytes per bd
+ * @param bd_chain_phys_addr - physical address of bd chain
+ * @param cqe_pbl_addr - physical address of pbl
+ * @param cqe_pbl_size - pbl size
+ * @param pp_prod - pointer to the producer to be
+ * used in fastpath
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod);
+
+/**
+ * @brief VF - start the TX queue by sending a message to the
+ * PF.
+ *
+ * @param p_hwfn
+ * @param tx_queue_id - zero based within the VF
+ * @param sb - status block for this queue
+ * @param sb_index - index within the status block
+ * @param bd_chain_phys_addr - physical address of tx chain
+ * @param pp_doorbell - pointer to address to which to
+ * write the doorbell too..
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell);
+
+/**
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param rx_qid
+ * @param cqe_completion
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion);
+
+/**
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param tx_qid
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
+
+/**
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return int
+ */
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params);
+
+/**
+ *
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged);
+
+/**
+ * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param);
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin);
+
+void qed_iov_vf_task(struct work_struct *work);
+#else
+static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+}
+
+static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+}
+
+static inline void
+qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+}
+
+static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+}
+
+static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+}
+
+static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+}
+
+static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ return false;
+}
+
+static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+}
+
+static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_adr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+ return -EINVAL;
+}
+
+static inline int
+qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ return 0;
+}
+
+static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param)
+{
+ return -EINVAL;
+}
+
+static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+}
+
+static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params
+ *p_params,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void
+__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+}
+
+static inline void qed_iov_vf_task(struct work_struct *work)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d02325154..47d6b2225 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -25,15 +25,13 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 7
-#define QEDE_REVISION_VERSION 0
-#define QEDE_ENGINEERING_VERSION 0
+#define QEDE_REVISION_VERSION 1
+#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
__stringify(QEDE_ENGINEERING_VERSION)
-#define QEDE_ETH_INTERFACE_VERSION 300
-
#define DRV_MODULE_SYM qede
struct qede_stats {
@@ -61,16 +59,16 @@ struct qede_stats {
/* port */
u64 rx_64_byte_packets;
- u64 rx_127_byte_packets;
- u64 rx_255_byte_packets;
- u64 rx_511_byte_packets;
- u64 rx_1023_byte_packets;
- u64 rx_1518_byte_packets;
- u64 rx_1522_byte_packets;
- u64 rx_2047_byte_packets;
- u64 rx_4095_byte_packets;
- u64 rx_9216_byte_packets;
- u64 rx_16383_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -114,6 +112,10 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
+ u32 flags;
+#define QEDE_FLAG_IS_VF BIT(0)
+#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
+
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;
@@ -156,6 +158,10 @@ struct qede_dev {
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
struct qede_stats stats;
+#define QEDE_RSS_INDIR_INITED BIT(0)
+#define QEDE_RSS_KEY_INITED BIT(1)
+#define QEDE_RSS_CAPS_INITED BIT(2)
+ u32 rss_params_inited; /* bit-field to track initialized rss params */
struct qed_update_vport_rss_params rss_params;
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
@@ -167,6 +173,8 @@ struct qede_dev {
bool accept_any_vlan;
struct delayed_work sp_task;
unsigned long sp_flags;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
};
enum QEDE_STATE {
@@ -286,8 +294,11 @@ struct qede_fastpath {
#define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
-#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_VXLAN_PORT_CONFIG 2
+#define QEDE_SP_GENEVE_PORT_CONFIG 3
union qede_reload_args {
u16 mtu;
@@ -301,6 +312,10 @@ void qede_reload(struct qede_dev *edev,
union qede_reload_args *args);
int qede_change_mtu(struct net_device *dev, int new_mtu);
void qede_fill_by_demand_stats(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
+ u8 count);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index c49dc10ce..ad3cae3b7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/string.h>
#include <linux/pci.h>
@@ -27,6 +28,9 @@
#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
#define QEDE_RQSTAT(stat_name) \
{QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
@@ -59,16 +63,16 @@ static const struct {
QEDE_STAT(tx_bcast_pkts),
QEDE_PF_STAT(rx_64_byte_packets),
- QEDE_PF_STAT(rx_127_byte_packets),
- QEDE_PF_STAT(rx_255_byte_packets),
- QEDE_PF_STAT(rx_511_byte_packets),
- QEDE_PF_STAT(rx_1023_byte_packets),
- QEDE_PF_STAT(rx_1518_byte_packets),
- QEDE_PF_STAT(rx_1522_byte_packets),
- QEDE_PF_STAT(rx_2047_byte_packets),
- QEDE_PF_STAT(rx_4095_byte_packets),
- QEDE_PF_STAT(rx_9216_byte_packets),
- QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(rx_65_to_127_byte_packets),
+ QEDE_PF_STAT(rx_128_to_255_byte_packets),
+ QEDE_PF_STAT(rx_256_to_511_byte_packets),
+ QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
@@ -116,11 +120,39 @@ static const struct {
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+enum {
+ QEDE_PRI_FLAG_CMT,
+ QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "Coupled-Function",
+};
+
+enum qede_ethtool_tests {
+ QEDE_ETHTOOL_INT_LOOPBACK,
+ QEDE_ETHTOOL_INTERRUPT_TEST,
+ QEDE_ETHTOOL_MEMORY_TEST,
+ QEDE_ETHTOOL_REGISTER_TEST,
+ QEDE_ETHTOOL_CLOCK_TEST,
+ QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+ "Internal loopback (offline)",
+ "Interrupt (online)\t",
+ "Memory (online)\t\t",
+ "Register (online)\t",
+ "Clock (online)\t\t",
+};
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
int i, j, k;
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ continue;
strcpy(buf + j * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
@@ -139,6 +171,14 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
qede_get_strings_stats(edev, buf);
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, qede_private_arr,
+ ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, qede_tests_str_arr,
+ ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+ break;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -156,8 +196,11 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
- for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
+ if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+ continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+ }
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
buf[cnt] = 0;
@@ -176,8 +219,21 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
- return num_stats + QEDE_NUM_RQSTATS;
+ if (IS_VF(edev)) {
+ int i;
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_stats_arr[i].pf_only)
+ num_stats--;
+ }
+ return num_stats + QEDE_NUM_RQSTATS;
+ case ETH_SS_PRIV_FLAGS:
+ return QEDE_PRI_FLAG_LEN;
+ case ETH_SS_TEST:
+ if (!IS_VF(edev))
+ return QEDE_ETHTOOL_TEST_MAX;
+ else
+ return 0;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -185,6 +241,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
}
}
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+}
+
static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -217,9 +280,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct qed_link_params params;
u32 speed;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Link parameters can not be changed in non-default mode\n");
+ "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -328,6 +391,12 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_output current_link;
struct qed_link_params link_params;
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev,
+ "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
if (!netif_running(dev))
return 0;
@@ -428,9 +497,9 @@ static int qede_set_pauseparam(struct net_device *dev,
struct qed_link_params params;
struct qed_link_output current_link;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Pause parameters can not be updated in non-default mode\n");
+ "Pause settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -569,6 +638,497 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = edev->num_rss;
+ return 0;
+ case ETHTOOL_GRXFH:
+ return qede_get_rss_flags(edev, info);
+ default:
+ DP_ERR(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct qed_update_vport_params vport_update_params;
+ u8 set_caps = 0, clr_caps = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case UDP_V4_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* No action is needed if there is no change in the rss capability */
+ if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
+ ~clr_caps) | set_caps))
+ return 0;
+
+ /* Update internal configuration */
+ edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
+ set_caps;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+ /* Re-configure if possible */
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return qede_set_rss_flags(edev, info);
+ default:
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+ return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return sizeof(edev->rss_params.rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ indir[i] = edev->rss_params.rss_ind_table[i];
+
+ if (key)
+ memcpy(key, edev->rss_params.rss_key,
+ qede_get_rxfh_key_size(dev));
+
+ return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!indir && !key)
+ return 0;
+
+ if (indir) {
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ edev->rss_params.rss_ind_table[i] = indir[i];
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (key) {
+ memcpy(&edev->rss_params.rss_key, key,
+ qede_get_rxfh_key_size(dev));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+ int i;
+
+ if (!netif_running(edev->ndev))
+ return;
+
+ for_each_rss(i) {
+ /* Update and reenable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+ /* Disable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+ }
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+ struct sk_buff *skb)
+{
+ struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+ struct eth_tx_1st_bd *first_bd;
+ dma_addr_t mapping;
+ int i, idx, val;
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ first_bd->data.bd_flags.bitfields = val;
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ return -ENOMEM;
+ }
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = 1;
+ txq->sw_tx_prod++;
+ /* 'next page' entries are counted in the producer value */
+ val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+ txq->tx_db.data.bd_prod = val;
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_txq_has_work(txq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_txq_has_work(txq)) {
+ DP_NOTICE(edev, "Tx completion didn't happen\n");
+ return -1;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ txq->sw_tx_cons++;
+ txq->sw_tx_ring[idx].skb = NULL;
+
+ return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+ struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ u8 *data_ptr;
+ int i;
+
+ /* The packet is expected to receive on rx-queue 0 even though RSS is
+ * enabled. This is because the queue 0 is configured as the default
+ * queue and that the loopback traffic is not IP.
+ */
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_has_rx_work(rxq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_has_rx_work(rxq)) {
+ DP_NOTICE(edev, "Failed to receive the traffic\n");
+ return -1;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD before reading hw_comp_cons. If the CQE is read before it is
+ * written by FW, then FW writes CQE and SB, and then the CPU reads the
+ * hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset + sw_rx_data->page_offset);
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ DP_NOTICE(edev, "Loopback test failed\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ return -1;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+
+ return 0;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+ struct qed_link_params link_params;
+ struct sk_buff *skb = NULL;
+ int rc = 0, i;
+ u32 pkt_size;
+ u8 *packet;
+
+ if (!netif_running(edev->ndev)) {
+ DP_NOTICE(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ qede_netif_stop(edev);
+
+ /* Bring up the link in Loopback mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = loopback_mode;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ /* prepare the loopback packet */
+ pkt_size = edev->ndev->mtu + ETH_HLEN;
+
+ skb = netdev_alloc_skb(edev->ndev, pkt_size);
+ if (!skb) {
+ DP_INFO(edev, "Can't allocate skb\n");
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ ether_addr_copy(packet, edev->ndev->dev_addr);
+ ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+ memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+
+ rc = qede_selftest_transmit_traffic(edev, skb);
+ if (rc)
+ goto test_loopback_exit;
+
+ rc = qede_selftest_receive_traffic(edev);
+ if (rc)
+ goto test_loopback_exit;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+ dev_kfree_skb(skb);
+
+ /* Bring up the link in Normal mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ qede_netif_start(edev);
+
+ return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+ memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (qede_selftest_run_loopback(edev,
+ QED_LINK_LOOPBACK_INT_PHY)) {
+ buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+
+ if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+ buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+ buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+ buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+ buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_settings = qede_get_settings,
.set_settings = qede_set_settings,
@@ -584,13 +1144,47 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_strings = qede_get_strings,
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
.get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .self_test = qede_self_test,
+};
+static const struct ethtool_ops qede_vf_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
};
void qede_set_ethtool_ops(struct net_device *dev)
{
- dev->ethtool_ops = &qede_ethtool_ops;
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (IS_VF(edev))
+ dev->ethtool_ops = &qede_vf_ethtool_ops;
+ else
+ dev->ethtool_ops = &qede_ethtool_ops;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 12f661579..f8e11f953 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,7 +24,12 @@
#include <linux/netdev_features.h>
#include <linux/udp.h>
#include <linux/tcp.h>
+#ifdef CONFIG_QEDE_VXLAN
#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+#include <net/geneve.h>
+#endif
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -58,6 +63,7 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -66,15 +72,24 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
#endif
+enum qede_pci_private {
+ QEDE_PRIVATE_PF,
+ QEDE_PRIVATE_VF
+};
+
static const struct pci_device_id qede_pci_tbl[] = {
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -89,17 +104,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link);
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (vlan > 4095) {
+ DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+ vlan, vf);
+
+ return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+
+ if (!is_valid_ether_addr(mac)) {
+ DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+ return -EINVAL;
+ }
+
+ return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+ struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
+ int rc;
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+ rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+ /* Enable/Disable Tx switching for PF */
+ if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+ qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+ struct qed_update_vport_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = num_vfs_param ? 1 : 0;
+ edev->ops->vport_update(edev->cdev, &params);
+ }
+
+ return rc;
+}
+#endif
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
+#ifdef CONFIG_QED_SRIOV
+ .sriov_configure = qede_sriov_configure,
+#endif
};
+static void qede_force_mac(void *dev, u8 *mac)
+{
+ struct qede_dev *edev = dev;
+
+ ether_addr_copy(edev->ndev->dev_addr, mac);
+ ether_addr_copy(edev->primary_mac, mac);
+}
+
static struct qed_eth_cb_ops qede_ll_ops = {
{
.link_update = qede_link_update,
},
+ .force_mac = qede_force_mac,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -141,19 +226,10 @@ static
int __init qede_init(void)
{
int ret;
- u32 qed_ver;
pr_notice("qede_init: %s\n", version);
- qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
- if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
- pr_notice("Version mismatch [%08x != %08x]\n",
- qed_ver,
- QEDE_ETH_INTERFACE_VERSION);
- return -EINVAL;
- }
-
- qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ qed_ops = qed_get_eth_ops();
if (!qed_ops) {
pr_notice("Failed to get qed ethtool operations\n");
return -EINVAL;
@@ -319,6 +395,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1;
+ if (skb->encapsulation)
+ rc |= XMIT_ENC;
+
if (skb_is_gso(skb))
rc |= XMIT_LSO;
@@ -380,6 +459,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
return 0;
}
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+ if (is_encap_pkt)
+ return (skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data);
+ else
+ return (skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data);
+}
+
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -390,8 +479,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
if (xmit_type & XMIT_LSO) {
int hlen;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
/* linear payload would require its own BD */
if (skb_headlen(skb) > hlen)
@@ -499,7 +587,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
- first_bd->data.bitfields |= cpu_to_le16(temp);
+ if (xmit_type & XMIT_ENC) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ } else {
+ /* In cases when OS doesn't indicate for inner offloads
+ * when packet is tunnelled, we need to override the HW
+ * tunnel configuration so that packets are treated as
+ * regular non tunnelled packets and no inner offloads
+ * are done by the hardware.
+ */
+ first_bd->data.bitfields |= cpu_to_le16(temp);
+ }
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
@@ -515,10 +614,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
third_bd->data.lso_mss =
cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ if (unlikely(xmit_type & XMIT_ENC)) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, true);
+ } else {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, false);
+ }
/* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |=
@@ -644,7 +748,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int qede_txq_has_work(struct qede_tx_queue *txq)
+int qede_txq_has_work(struct qede_tx_queue *txq)
{
u16 hw_bd_cons;
@@ -727,7 +831,7 @@ static int qede_tx_int(struct qede_dev *edev,
return 0;
}
-static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
{
u16 hw_comp_cons, sw_comp_cons;
@@ -782,8 +886,8 @@ static inline void qede_reuse_page(struct qede_dev *edev,
/* In case of allocation failures reuse buffers
* from consumer index to produce buffers for firmware
*/
-static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
- struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
{
struct sw_rx_data *curr_cons;
@@ -818,7 +922,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
* network stack to take the ownership of the page
* which can be recycled multiple times by the driver.
*/
- atomic_inc(&curr_cons->data->_count);
+ page_ref_inc(curr_cons->data);
qede_reuse_page(edev, rxq, curr_cons);
}
@@ -879,6 +983,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
if (csum_flag & QEDE_CSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+ skb->csum_level = 1;
}
static inline void qede_skb_receive(struct qede_dev *edev,
@@ -931,7 +1038,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
/* Incr page ref count to reuse on allocation failure
* so that it doesn't get freed while freeing SKB.
*/
- atomic_inc(&current_bd->data->_count);
+ page_ref_inc(current_bd->data);
goto out;
}
@@ -971,8 +1078,7 @@ static void qede_tpa_start(struct qede_dev *edev,
* start until its over and we don't want to risk allocation failing
* here, so re-allocate when aggregation will be over.
*/
- dma_unmap_addr_set(sw_rx_data_prod, mapping,
- dma_unmap_addr(replace_buf, mapping));
+ sw_rx_data_prod->mapping = replace_buf->mapping;
sw_rx_data_prod->data = replace_buf->data;
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
@@ -1188,13 +1294,47 @@ err:
tpa_info->skb = NULL;
}
-static u8 qede_check_csum(u16 flag)
+static bool qede_tunn_exist(u16 flag)
+{
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 tcsum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
{
u16 csum_flag = 0;
u8 csum = 0;
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
csum = QEDE_CSUM_UNNECESSARY;
@@ -1209,6 +1349,14 @@ static u8 qede_check_csum(u16 flag)
return csum;
}
+static u8 qede_check_csum(u16 flag)
+{
+ if (!qede_tunn_exist(flag))
+ return qede_check_notunn_csum(flag);
+ else
+ return qede_check_tunn_csum(flag);
+}
+
static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_dev *edev = fp->edev;
@@ -1340,7 +1488,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
* freeing SKB.
*/
- atomic_inc(&sw_rx_data->data->_count);
+ page_ref_inc(sw_rx_data->data);
rxq->rx_alloc_errors++;
qede_recycle_rx_bd_ring(rxq, edev,
fp_cqe->bd_num);
@@ -1569,16 +1717,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
- edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
- edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
- edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
- edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
- edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
- edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
- edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
- edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
- edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+ edev->stats.rx_128_to_255_byte_packets =
+ stats.rx_128_to_255_byte_packets;
+ edev->stats.rx_256_to_511_byte_packets =
+ stats.rx_256_to_511_byte_packets;
+ edev->stats.rx_512_to_1023_byte_packets =
+ stats.rx_512_to_1023_byte_packets;
+ edev->stats.rx_1024_to_1518_byte_packets =
+ stats.rx_1024_to_1518_byte_packets;
+ edev->stats.rx_1519_to_1522_byte_packets =
+ stats.rx_1519_to_1522_byte_packets;
+ edev->stats.rx_1519_to_2047_byte_packets =
+ stats.rx_1519_to_2047_byte_packets;
+ edev->stats.rx_2048_to_4095_byte_packets =
+ stats.rx_2048_to_4095_byte_packets;
+ edev->stats.rx_4096_to_9216_byte_packets =
+ stats.rx_4096_to_9216_byte_packets;
+ edev->stats.rx_9217_to_16383_byte_packets =
+ stats.rx_9217_to_16383_byte_packets;
edev->stats.rx_crc_errors = stats.rx_crc_errors;
edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
edev->stats.rx_pause_frames = stats.rx_pause_frames;
@@ -1652,6 +1809,49 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
+ max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+ int link_state)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+#endif
+
static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
{
struct qed_update_vport_params params;
@@ -1893,6 +2093,99 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ /* No action needed if hardware GRO is disabled during driver load */
+ if (changes & NETIF_F_GRO) {
+ if (dev->features & NETIF_F_GRO)
+ need_reload = !edev->gro_disable;
+ else
+ need_reload = edev->gro_disable;
+ }
+
+ if (need_reload && netif_running(edev->ndev)) {
+ dev->features = features;
+ qede_reload(edev, NULL, NULL);
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_QEDE_VXLAN
+static void qede_add_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (t_port != edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
+#ifdef CONFIG_QEDE_GENEVE
+static void qede_add_geneve_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_geneve_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (t_port != edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -1901,9 +2194,28 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+#ifdef CONFIG_QEDE_VXLAN
+ .ndo_add_vxlan_port = qede_add_vxlan_port,
+ .ndo_del_vxlan_port = qede_del_vxlan_port,
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+ .ndo_add_geneve_port = qede_add_geneve_port,
+ .ndo_del_geneve_port = qede_del_geneve_port,
+#endif
};
/* -------------------------------------------------------------------------
@@ -1974,6 +2286,14 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
+ /* Encap features*/
+ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
@@ -2074,6 +2394,8 @@ static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ struct qed_dev *cdev = edev->cdev;
+
mutex_lock(&edev->qede_lock);
if (edev->state == QEDE_STATE_OPEN) {
@@ -2081,6 +2403,24 @@ static void qede_sp_task(struct work_struct *work)
qede_config_rx_mode(edev->ndev);
}
+ if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = edev->vxlan_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
+ if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = edev->geneve_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
mutex_unlock(&edev->qede_lock);
}
@@ -2099,8 +2439,9 @@ enum qede_probe_mode {
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
- enum qede_probe_mode mode)
+ bool is_vf, enum qede_probe_mode mode)
{
+ struct qed_probe_params probe_params;
struct qed_slowpath_params params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
@@ -2110,8 +2451,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (unlikely(dp_level & QED_LEVEL_INFO))
pr_notice("Starting qede probe\n");
- cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
- dp_module, dp_level);
+ memset(&probe_params, 0, sizeof(probe_params));
+ probe_params.protocol = QED_PROTOCOL_ETH;
+ probe_params.dp_module = dp_module;
+ probe_params.dp_level = dp_level;
+ probe_params.is_vf = is_vf;
+ cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
goto err0;
@@ -2145,6 +2490,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err2;
}
+ if (is_vf)
+ edev->flags |= QEDE_FLAG_IS_VF;
+
qede_init_ndev(edev);
rc = register_netdev(edev->ndev);
@@ -2176,12 +2524,24 @@ err0:
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ bool is_vf = false;
u32 dp_module = 0;
u8 dp_level = 0;
+ switch ((enum qede_pci_private)id->driver_data) {
+ case QEDE_PRIVATE_VF:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a VF\n");
+ is_vf = true;
+ break;
+ default:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a PF\n");
+ }
+
qede_config_debug(debug, &dp_module, &dp_level);
- return __qede_probe(pdev, dp_module, dp_level,
+ return __qede_probe(pdev, dp_module, dp_level, is_vf,
QEDE_PROBE_NORMAL);
}
@@ -2320,7 +2680,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
- dma_unmap_addr(replace_buf, mapping),
+ replace_buf->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(replace_buf->data);
}
@@ -2420,7 +2780,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
goto err;
}
- dma_unmap_addr_set(replace_buf, mapping, mapping);
+ replace_buf->mapping = mapping;
tpa_info->replace_buf.page_offset = 0;
tpa_info->replace_buf_mapping = mapping;
@@ -2871,15 +3231,16 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc;
}
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int rc, tc, i;
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
- struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
+ bool reset_rss_indir = false;
if (!edev->num_rss) {
DP_ERR(edev,
@@ -2971,19 +3332,59 @@ static int qede_start_queues(struct qede_dev *edev)
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
+ if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+ qed_info->tx_switching) {
+ vport_update_params.update_tx_switching_flg = 1;
+ vport_update_params.tx_switching_flg = 1;
+ }
+
/* Fill struct with RSS params */
if (QEDE_RSS_CNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
- for (i = 0; i < 128; i++)
- rss_params->rss_ind_table[i] =
- ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
- netdev_rss_key_fill(rss_params->rss_key,
- sizeof(rss_params->rss_key));
+
+ /* Need to validate current RSS config uses valid entries */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ if (edev->rss_params.rss_ind_table[i] >=
+ edev->num_rss) {
+ reset_rss_indir = true;
+ break;
+ }
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+ reset_rss_indir) {
+ u16 val;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 indir_val;
+
+ val = QEDE_RSS_CNT(edev);
+ indir_val = ethtool_rxfh_indir_default(i, val);
+ edev->rss_params.rss_ind_table[i] = indir_val;
+ }
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+ netdev_rss_key_fill(edev->rss_params.rss_key,
+ sizeof(edev->rss_params.rss_key));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+ edev->rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP |
+ QED_RSS_IPV6_TCP;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+ }
+
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
} else {
- memset(rss_params, 0, sizeof(*rss_params));
+ memset(&vport_update_params.rss_params, 0,
+ sizeof(vport_update_params.rss_params));
}
- memcpy(&vport_update_params.rss_params, rss_params,
- sizeof(*rss_params));
rc = edev->ops->vport_update(cdev, &vport_update_params);
if (rc) {
@@ -3061,6 +3462,7 @@ out:
enum qede_load_mode {
QEDE_LOAD_NORMAL,
+ QEDE_LOAD_RELOAD,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3099,7 +3501,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
goto err3;
DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev);
+ rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
if (rc)
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3154,7 +3556,7 @@ void qede_reload(struct qede_dev *edev,
if (func)
func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL);
+ qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock);
qede_config_rx_mode(edev->ndev);
@@ -3165,12 +3567,24 @@ void qede_reload(struct qede_dev *edev,
static int qede_open(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
+ int rc;
netif_carrier_off(ndev);
edev->ops->common->set_power_state(edev->cdev, PCI_D0);
- return qede_load(edev, QEDE_LOAD_NORMAL);
+ rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_QEDE_VXLAN
+ vxlan_get_rx_port(ndev);
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+ geneve_get_rx_port(ndev);
+#endif
+ return 0;
}
static int qede_close(struct net_device *ndev)
@@ -3221,6 +3635,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
return -EFAULT;
}
+ if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+ DP_NOTICE(edev, "qed prevents setting MAC\n");
+ return -EINVAL;
+ }
+
ether_addr_copy(ndev->dev_addr, addr->sa_data);
if (!netif_running(ndev)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 7bd6f25b4..87c642d3b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
+ /* Ensure writes are complete before HW fetches Tx descriptors */
+ wmb();
qlcnic_update_cmd_producer(tx_ring);
return NETDEV_TX_OK;
@@ -2220,7 +2222,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
if (!opcode)
return;
- ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 392f193cd..9fedc8f62 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -3952,8 +3952,14 @@ static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
- return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
- PCI_ERS_RESULT_RECOVERED;
+ pci_ers_result_t res;
+
+ rtnl_lock();
+ res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+ PCI_ERS_RESULT_RECOVERED;
+ rtnl_unlock();
+
+ return res;
}
static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b28e73ea2..fd5d1c93b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4687,7 +4687,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
/*
* Set up the operating parameters.
*/
- qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
}
/* Disabling the timer */
- del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev))
ql_eeh_close(ndev);
pci_disable_device(pdev);
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ del_timer_sync(&qdev->timer);
ql_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 1ef03939d..6e2add979 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -719,7 +719,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
qca->stats.ring_full++;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (qca->spi_thread &&
qca->spi_thread->state != TASK_RUNNING)
@@ -734,7 +734,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
struct qcaspi *qca = netdev_priv(dev);
netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
- jiffies, jiffies - dev->trans_start);
+ jiffies, jiffies - dev_trans_start(dev));
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
qca->sync = QCASPI_SYNC_UNKNOWN;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index d77d60ea8..5cb96785f 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -544,7 +544,7 @@ static void tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
/* Try to restart the adapter. */
hardware_init(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
dev->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e894dc7c9..e4030c3d7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -345,7 +345,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
static int rx_buf_sz = 16383;
-static int use_dac;
+static int use_dac = -1;
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -8206,20 +8206,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = 0;
-
- if ((sizeof(dma_addr_t) > 4) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
- tp->cp_cmd |= PCIDAC;
- dev->features |= NETIF_F_HIGHDMA;
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc < 0) {
- netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_free_res_3;
- }
- }
-
/* ioremap MMIO region */
ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
if (!ioaddr) {
@@ -8235,6 +8221,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+ tp->cp_cmd = 0;
+
+ if ((sizeof(dma_addr_t) > 4) &&
+ (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
+ tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+ /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
+ if (!pci_is_pcie(pdev))
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_unmap_4;
+ }
+ }
+
rtl_init_rxcfg(tp);
rtl_irq_disable(tp);
@@ -8394,12 +8399,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&tp->counters_phys_addr, GFP_KERNEL);
if (!tp->counters) {
rc = -ENOMEM;
- goto err_out_msi_4;
+ goto err_out_msi_5;
}
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_cnt_5;
+ goto err_out_cnt_6;
pci_set_drvdata(pdev, dev);
@@ -8433,12 +8438,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
return rc;
-err_out_cnt_5:
+err_out_cnt_6:
dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
tp->counters_phys_addr);
-err_out_msi_4:
+err_out_msi_5:
netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
+err_out_unmap_4:
iounmap(ioaddr);
err_out_free_res_3:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b2160d1b9..4e5d5e953 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -157,6 +157,7 @@ enum ravb_reg {
TIC = 0x0378,
TIS = 0x037C,
ISS = 0x0380,
+ CIE = 0x0384, /* R-Car Gen3 only */
GCCR = 0x0390,
GMTT = 0x0394,
GPTC = 0x0398,
@@ -170,6 +171,15 @@ enum ravb_reg {
GCT0 = 0x03B8,
GCT1 = 0x03BC,
GCT2 = 0x03C0,
+ GIE = 0x03CC, /* R-Car Gen3 only */
+ GID = 0x03D0, /* R-Car Gen3 only */
+ DIL = 0x0440, /* R-Car Gen3 only */
+ RIE0 = 0x0460, /* R-Car Gen3 only */
+ RID0 = 0x0464, /* R-Car Gen3 only */
+ RIE2 = 0x0470, /* R-Car Gen3 only */
+ RID2 = 0x0474, /* R-Car Gen3 only */
+ TIE = 0x0478, /* R-Car Gen3 only */
+ TID = 0x047c, /* R-Car Gen3 only */
/* E-MAC registers */
ECMR = 0x0500,
@@ -556,6 +566,16 @@ enum ISS_BIT {
ISS_DPS15 = 0x80000000,
};
+/* CIE (R-Car Gen3 only) */
+enum CIE_BIT {
+ CIE_CRIE = 0x00000001,
+ CIE_CTIE = 0x00000100,
+ CIE_RQFM = 0x00010000,
+ CIE_CL0M = 0x00020000,
+ CIE_RFWL = 0x00040000,
+ CIE_RFFL = 0x00080000,
+};
+
/* GCCR */
enum GCCR_BIT {
GCCR_TCR = 0x00000003,
@@ -592,6 +612,188 @@ enum GIS_BIT {
GIS_PTMF = 0x00000004,
};
+/* GIE (R-Car Gen3 only) */
+enum GIE_BIT {
+ GIE_PTCS = 0x00000001,
+ GIE_PTOS = 0x00000002,
+ GIE_PTMS0 = 0x00000004,
+ GIE_PTMS1 = 0x00000008,
+ GIE_PTMS2 = 0x00000010,
+ GIE_PTMS3 = 0x00000020,
+ GIE_PTMS4 = 0x00000040,
+ GIE_PTMS5 = 0x00000080,
+ GIE_PTMS6 = 0x00000100,
+ GIE_PTMS7 = 0x00000200,
+ GIE_ATCS0 = 0x00010000,
+ GIE_ATCS1 = 0x00020000,
+ GIE_ATCS2 = 0x00040000,
+ GIE_ATCS3 = 0x00080000,
+ GIE_ATCS4 = 0x00100000,
+ GIE_ATCS5 = 0x00200000,
+ GIE_ATCS6 = 0x00400000,
+ GIE_ATCS7 = 0x00800000,
+ GIE_ATCS8 = 0x01000000,
+ GIE_ATCS9 = 0x02000000,
+ GIE_ATCS10 = 0x04000000,
+ GIE_ATCS11 = 0x08000000,
+ GIE_ATCS12 = 0x10000000,
+ GIE_ATCS13 = 0x20000000,
+ GIE_ATCS14 = 0x40000000,
+ GIE_ATCS15 = 0x80000000,
+};
+
+/* GID (R-Car Gen3 only) */
+enum GID_BIT {
+ GID_PTCD = 0x00000001,
+ GID_PTOD = 0x00000002,
+ GID_PTMD0 = 0x00000004,
+ GID_PTMD1 = 0x00000008,
+ GID_PTMD2 = 0x00000010,
+ GID_PTMD3 = 0x00000020,
+ GID_PTMD4 = 0x00000040,
+ GID_PTMD5 = 0x00000080,
+ GID_PTMD6 = 0x00000100,
+ GID_PTMD7 = 0x00000200,
+ GID_ATCD0 = 0x00010000,
+ GID_ATCD1 = 0x00020000,
+ GID_ATCD2 = 0x00040000,
+ GID_ATCD3 = 0x00080000,
+ GID_ATCD4 = 0x00100000,
+ GID_ATCD5 = 0x00200000,
+ GID_ATCD6 = 0x00400000,
+ GID_ATCD7 = 0x00800000,
+ GID_ATCD8 = 0x01000000,
+ GID_ATCD9 = 0x02000000,
+ GID_ATCD10 = 0x04000000,
+ GID_ATCD11 = 0x08000000,
+ GID_ATCD12 = 0x10000000,
+ GID_ATCD13 = 0x20000000,
+ GID_ATCD14 = 0x40000000,
+ GID_ATCD15 = 0x80000000,
+};
+
+/* RIE0 (R-Car Gen3 only) */
+enum RIE0_BIT {
+ RIE0_FRS0 = 0x00000001,
+ RIE0_FRS1 = 0x00000002,
+ RIE0_FRS2 = 0x00000004,
+ RIE0_FRS3 = 0x00000008,
+ RIE0_FRS4 = 0x00000010,
+ RIE0_FRS5 = 0x00000020,
+ RIE0_FRS6 = 0x00000040,
+ RIE0_FRS7 = 0x00000080,
+ RIE0_FRS8 = 0x00000100,
+ RIE0_FRS9 = 0x00000200,
+ RIE0_FRS10 = 0x00000400,
+ RIE0_FRS11 = 0x00000800,
+ RIE0_FRS12 = 0x00001000,
+ RIE0_FRS13 = 0x00002000,
+ RIE0_FRS14 = 0x00004000,
+ RIE0_FRS15 = 0x00008000,
+ RIE0_FRS16 = 0x00010000,
+ RIE0_FRS17 = 0x00020000,
+};
+
+/* RID0 (R-Car Gen3 only) */
+enum RID0_BIT {
+ RID0_FRD0 = 0x00000001,
+ RID0_FRD1 = 0x00000002,
+ RID0_FRD2 = 0x00000004,
+ RID0_FRD3 = 0x00000008,
+ RID0_FRD4 = 0x00000010,
+ RID0_FRD5 = 0x00000020,
+ RID0_FRD6 = 0x00000040,
+ RID0_FRD7 = 0x00000080,
+ RID0_FRD8 = 0x00000100,
+ RID0_FRD9 = 0x00000200,
+ RID0_FRD10 = 0x00000400,
+ RID0_FRD11 = 0x00000800,
+ RID0_FRD12 = 0x00001000,
+ RID0_FRD13 = 0x00002000,
+ RID0_FRD14 = 0x00004000,
+ RID0_FRD15 = 0x00008000,
+ RID0_FRD16 = 0x00010000,
+ RID0_FRD17 = 0x00020000,
+};
+
+/* RIE2 (R-Car Gen3 only) */
+enum RIE2_BIT {
+ RIE2_QFS0 = 0x00000001,
+ RIE2_QFS1 = 0x00000002,
+ RIE2_QFS2 = 0x00000004,
+ RIE2_QFS3 = 0x00000008,
+ RIE2_QFS4 = 0x00000010,
+ RIE2_QFS5 = 0x00000020,
+ RIE2_QFS6 = 0x00000040,
+ RIE2_QFS7 = 0x00000080,
+ RIE2_QFS8 = 0x00000100,
+ RIE2_QFS9 = 0x00000200,
+ RIE2_QFS10 = 0x00000400,
+ RIE2_QFS11 = 0x00000800,
+ RIE2_QFS12 = 0x00001000,
+ RIE2_QFS13 = 0x00002000,
+ RIE2_QFS14 = 0x00004000,
+ RIE2_QFS15 = 0x00008000,
+ RIE2_QFS16 = 0x00010000,
+ RIE2_QFS17 = 0x00020000,
+ RIE2_RFFS = 0x80000000,
+};
+
+/* RID2 (R-Car Gen3 only) */
+enum RID2_BIT {
+ RID2_QFD0 = 0x00000001,
+ RID2_QFD1 = 0x00000002,
+ RID2_QFD2 = 0x00000004,
+ RID2_QFD3 = 0x00000008,
+ RID2_QFD4 = 0x00000010,
+ RID2_QFD5 = 0x00000020,
+ RID2_QFD6 = 0x00000040,
+ RID2_QFD7 = 0x00000080,
+ RID2_QFD8 = 0x00000100,
+ RID2_QFD9 = 0x00000200,
+ RID2_QFD10 = 0x00000400,
+ RID2_QFD11 = 0x00000800,
+ RID2_QFD12 = 0x00001000,
+ RID2_QFD13 = 0x00002000,
+ RID2_QFD14 = 0x00004000,
+ RID2_QFD15 = 0x00008000,
+ RID2_QFD16 = 0x00010000,
+ RID2_QFD17 = 0x00020000,
+ RID2_RFFD = 0x80000000,
+};
+
+/* TIE (R-Car Gen3 only) */
+enum TIE_BIT {
+ TIE_FTS0 = 0x00000001,
+ TIE_FTS1 = 0x00000002,
+ TIE_FTS2 = 0x00000004,
+ TIE_FTS3 = 0x00000008,
+ TIE_TFUS = 0x00000100,
+ TIE_TFWS = 0x00000200,
+ TIE_MFUS = 0x00000400,
+ TIE_MFWS = 0x00000800,
+ TIE_TDPS0 = 0x00010000,
+ TIE_TDPS1 = 0x00020000,
+ TIE_TDPS2 = 0x00040000,
+ TIE_TDPS3 = 0x00080000,
+};
+
+/* TID (R-Car Gen3 only) */
+enum TID_BIT {
+ TID_FTD0 = 0x00000001,
+ TID_FTD1 = 0x00000002,
+ TID_FTD2 = 0x00000004,
+ TID_FTD3 = 0x00000008,
+ TID_TFUD = 0x00000100,
+ TID_TFWD = 0x00000200,
+ TID_MFUD = 0x00000400,
+ TID_MFWD = 0x00000800,
+ TID_TDPD0 = 0x00010000,
+ TID_TDPD1 = 0x00020000,
+ TID_TDPD2 = 0x00040000,
+ TID_TDPD3 = 0x00080000,
+};
+
/* ECMR */
enum ECMR_BIT {
ECMR_PRM = 0x00000001,
@@ -817,6 +1019,8 @@ struct ravb_private {
int duplex;
int emac_irq;
enum ravb_chip_id chip_id;
+ int rx_irqs[NUM_RX_QUEUE];
+ int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
@@ -841,7 +1045,7 @@ void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set);
int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_interrupt(struct net_device *ndev);
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
void ravb_ptp_stop(struct net_device *ndev);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4277d0c12..867caf6e7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -42,6 +42,16 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
+static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
+ "ch0", /* RAVB_BE */
+ "ch1", /* RAVB_NC */
+};
+
+static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
+ "ch18", /* RAVB_BE */
+ "ch19", /* RAVB_NC */
+};
+
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
@@ -236,10 +246,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
- /* The size of the buffer should be on 16-byte boundary. */
- rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- ALIGN(PKT_BUF_SZ, 16),
+ PKT_BUF_SZ,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -365,6 +374,7 @@ static void ravb_emac_init(struct net_device *ndev)
/* Device init function for Ethernet AVB */
static int ravb_dmac_init(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
int error;
/* Set CONFIG mode */
@@ -401,6 +411,12 @@ static int ravb_dmac_init(struct net_device *ndev)
ravb_write(ndev, TCCR_TFEN, TCCR);
/* Interrupt init: */
+ if (priv->chip_id == RCAR_GEN3) {
+ /* Clear DIL.DPLx */
+ ravb_write(ndev, 0, DIL);
+ /* Set queue specific interrupt */
+ ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
+ }
/* Frame receive */
ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
/* Disable FIFO full warning */
@@ -541,7 +557,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
+ PKT_BUF_SZ,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -571,8 +587,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
- /* The size of the buffer should be on 16-byte boundary. */
- desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev,
@@ -643,7 +658,7 @@ static int ravb_stop_dma(struct net_device *ndev)
}
/* E-MAC interrupt handler */
-static void ravb_emac_interrupt(struct net_device *ndev)
+static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 ecsr, psr;
@@ -669,6 +684,18 @@ static void ravb_emac_interrupt(struct net_device *ndev)
}
}
+static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->lock);
+ ravb_emac_interrupt_unlocked(ndev);
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
/* Error interrupt handler */
static void ravb_error_interrupt(struct net_device *ndev)
{
@@ -695,6 +722,50 @@ static void ravb_error_interrupt(struct net_device *ndev)
}
}
+static bool ravb_queue_interrupt(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ris0 = ravb_read(ndev, RIS0);
+ u32 ric0 = ravb_read(ndev, RIC0);
+ u32 tis = ravb_read(ndev, TIS);
+ u32 tic = ravb_read(ndev, TIC);
+
+ if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
+ if (napi_schedule_prep(&priv->napi[q])) {
+ /* Mask RX and TX interrupts */
+ if (priv->chip_id == RCAR_GEN2) {
+ ravb_write(ndev, ric0 & ~BIT(q), RIC0);
+ ravb_write(ndev, tic & ~BIT(q), TIC);
+ } else {
+ ravb_write(ndev, BIT(q), RID0);
+ ravb_write(ndev, BIT(q), TID);
+ }
+ __napi_schedule(&priv->napi[q]);
+ } else {
+ netdev_warn(ndev,
+ "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+ ris0, ric0);
+ netdev_warn(ndev,
+ " tx status 0x%08x, tx mask 0x%08x.\n",
+ tis, tic);
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool ravb_timestamp_interrupt(struct net_device *ndev)
+{
+ u32 tis = ravb_read(ndev, TIS);
+
+ if (tis & TIS_TFUF) {
+ ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_get_tx_tstamp(ndev);
+ return true;
+ }
+ return false;
+}
+
static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
@@ -708,63 +779,102 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
/* Received and transmitted interrupts */
if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
- u32 ris0 = ravb_read(ndev, RIS0);
- u32 ric0 = ravb_read(ndev, RIC0);
- u32 tis = ravb_read(ndev, TIS);
- u32 tic = ravb_read(ndev, TIC);
int q;
/* Timestamp updated */
- if (tis & TIS_TFUF) {
- ravb_write(ndev, ~TIS_TFUF, TIS);
- ravb_get_tx_tstamp(ndev);
+ if (ravb_timestamp_interrupt(ndev))
result = IRQ_HANDLED;
- }
/* Network control and best effort queue RX/TX */
for (q = RAVB_NC; q >= RAVB_BE; q--) {
- if (((ris0 & ric0) & BIT(q)) ||
- ((tis & tic) & BIT(q))) {
- if (napi_schedule_prep(&priv->napi[q])) {
- /* Mask RX and TX interrupts */
- ric0 &= ~BIT(q);
- tic &= ~BIT(q);
- ravb_write(ndev, ric0, RIC0);
- ravb_write(ndev, tic, TIC);
- __napi_schedule(&priv->napi[q]);
- } else {
- netdev_warn(ndev,
- "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
- ris0, ric0);
- netdev_warn(ndev,
- " tx status 0x%08x, tx mask 0x%08x.\n",
- tis, tic);
- }
+ if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
- }
}
}
/* E-MAC status summary */
if (iss & ISS_MS) {
- ravb_emac_interrupt(ndev);
+ ravb_emac_interrupt_unlocked(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Error status summary */
+ if (iss & ISS_ES) {
+ ravb_error_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* gPTP interrupt status summary */
+ if (iss & ISS_CGIS) {
+ ravb_ptp_interrupt(ndev);
result = IRQ_HANDLED;
}
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+/* Timestamp/Error/gPTP interrupt handler */
+static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+ u32 iss;
+
+ spin_lock(&priv->lock);
+ /* Get interrupt status */
+ iss = ravb_read(ndev, ISS);
+
+ /* Timestamp updated */
+ if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
+ result = IRQ_HANDLED;
+
/* Error status summary */
if (iss & ISS_ES) {
ravb_error_interrupt(ndev);
result = IRQ_HANDLED;
}
- if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
+ /* gPTP interrupt status summary */
+ if (iss & ISS_CGIS) {
+ ravb_ptp_interrupt(ndev);
result = IRQ_HANDLED;
+ }
mmiowb();
spin_unlock(&priv->lock);
return result;
}
+static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ /* Network control/Best effort queue RX/TX */
+ if (ravb_queue_interrupt(ndev, q))
+ result = IRQ_HANDLED;
+
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
+{
+ return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
+}
+
+static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
+{
+ return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
+}
+
static int ravb_poll(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
@@ -804,8 +914,13 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Re-enable RX/TX interrupts */
spin_lock_irqsave(&priv->lock, flags);
- ravb_modify(ndev, RIC0, mask, mask);
- ravb_modify(ndev, TIC, mask, mask);
+ if (priv->chip_id == RCAR_GEN2) {
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
+ } else {
+ ravb_write(ndev, mask, RIE0);
+ ravb_write(ndev, mask, TIE);
+ }
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1208,35 +1323,72 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.get_ts_info = ravb_get_ts_info,
};
+static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
+ struct net_device *ndev, struct device *dev,
+ const char *ch)
+{
+ char *name;
+ int error;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
+ if (!name)
+ return -ENOMEM;
+ error = request_irq(irq, handler, 0, name, ndev);
+ if (error)
+ netdev_err(ndev, "cannot request IRQ %s\n", name);
+
+ return error;
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
napi_enable(&priv->napi[RAVB_NC]);
- error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
- ndev);
- if (error) {
- netdev_err(ndev, "cannot request IRQ\n");
- goto out_napi_off;
- }
-
- if (priv->chip_id == RCAR_GEN3) {
- error = request_irq(priv->emac_irq, ravb_interrupt,
- IRQF_SHARED, ndev->name, ndev);
+ if (priv->chip_id == RCAR_GEN2) {
+ error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
+ ndev->name, ndev);
if (error) {
netdev_err(ndev, "cannot request IRQ\n");
- goto out_free_irq;
+ goto out_napi_off;
}
+ } else {
+ error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
+ dev, "ch22:multi");
+ if (error)
+ goto out_napi_off;
+ error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
+ dev, "ch24:emac");
+ if (error)
+ goto out_free_irq;
+ error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
+ ndev, dev, "ch0:rx_be");
+ if (error)
+ goto out_free_irq_emac;
+ error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
+ ndev, dev, "ch18:tx_be");
+ if (error)
+ goto out_free_irq_be_rx;
+ error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
+ ndev, dev, "ch1:rx_nc");
+ if (error)
+ goto out_free_irq_be_tx;
+ error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
+ ndev, dev, "ch19:tx_nc");
+ if (error)
+ goto out_free_irq_nc_rx;
}
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq2;
+ goto out_free_irq_nc_tx;
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
@@ -1256,9 +1408,18 @@ out_ptp_stop:
/* Stop PTP Clock driver */
if (priv->chip_id == RCAR_GEN2)
ravb_ptp_stop(ndev);
-out_free_irq2:
- if (priv->chip_id == RCAR_GEN3)
- free_irq(priv->emac_irq, ndev);
+out_free_irq_nc_tx:
+ if (priv->chip_id == RCAR_GEN2)
+ goto out_free_irq;
+ free_irq(priv->tx_irqs[RAVB_NC], ndev);
+out_free_irq_nc_rx:
+ free_irq(priv->rx_irqs[RAVB_NC], ndev);
+out_free_irq_be_tx:
+ free_irq(priv->tx_irqs[RAVB_BE], ndev);
+out_free_irq_be_rx:
+ free_irq(priv->rx_irqs[RAVB_BE], ndev);
+out_free_irq_emac:
+ free_irq(priv->emac_irq, ndev);
out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
@@ -1506,8 +1667,13 @@ static int ravb_close(struct net_device *ndev)
priv->phydev = NULL;
}
- if (priv->chip_id == RCAR_GEN3)
+ if (priv->chip_id != RCAR_GEN2) {
+ free_irq(priv->tx_irqs[RAVB_NC], ndev);
+ free_irq(priv->rx_irqs[RAVB_NC], ndev);
+ free_irq(priv->tx_irqs[RAVB_BE], ndev);
+ free_irq(priv->rx_irqs[RAVB_BE], ndev);
free_irq(priv->emac_irq, ndev);
+ }
free_irq(ndev->irq, ndev);
napi_disable(&priv->napi[RAVB_NC]);
@@ -1718,6 +1884,7 @@ static int ravb_probe(struct platform_device *pdev)
struct net_device *ndev;
int error, irq, q;
struct resource *res;
+ int i;
if (!np) {
dev_err(&pdev->dev,
@@ -1787,6 +1954,22 @@ static int ravb_probe(struct platform_device *pdev)
goto out_release;
}
priv->emac_irq = irq;
+ for (i = 0; i < NUM_RX_QUEUE; i++) {
+ irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->rx_irqs[i] = irq;
+ }
+ for (i = 0; i < NUM_TX_QUEUE; i++) {
+ irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->tx_irqs[i] = irq;
+ }
}
priv->chip_id = chip_id;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 57992ccc4..eede70ec3 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -194,7 +194,12 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
priv->ptp.extts[req->index] = on;
spin_lock_irqsave(&priv->lock, flags);
- ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
+ else if (on)
+ ravb_write(ndev, GIE_PTCS, GIE);
+ else
+ ravb_write(ndev, GID_PTCD, GID);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -241,7 +246,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
error = ravb_ptp_update_compare(priv, (u32)start_ns);
if (!error) {
/* Unmask interrupt */
- ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
+ else
+ ravb_write(ndev, GIE_PTMS0, GIE);
}
} else {
spin_lock_irqsave(&priv->lock, flags);
@@ -250,7 +258,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
perout->period = 0;
/* Mask interrupt */
- ravb_modify(ndev, GIC, GIC_PTME, 0);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTME, 0);
+ else
+ ravb_write(ndev, GID_PTMD0, GID);
}
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -285,7 +296,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
};
/* Caller must hold the lock */
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+void ravb_ptp_interrupt(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 gis = ravb_read(ndev, GIS);
@@ -308,12 +319,7 @@ irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
}
}
- if (gis) {
- ravb_write(ndev, ~gis, GIS);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
+ ravb_write(ndev, ~gis, GIS);
}
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index ceea74cc2..04cd39f66 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -482,7 +482,7 @@ static void sh_eth_chip_reset(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
/* reset device */
- sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+ sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
mdelay(1);
}
@@ -537,11 +537,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- /* reset device */
- sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
- mdelay(1);
+ sh_eth_chip_reset(ndev);
sh_eth_select_mii(ndev);
}
@@ -725,8 +721,8 @@ static struct sh_eth_cpu_data sh7757_data = {
#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
static void sh_eth_chip_reset_giga(struct net_device *ndev)
{
- int i;
u32 mahr[2], malr[2];
+ int i;
/* save MAHR and MALR */
for (i = 0; i < 2; i++) {
@@ -734,9 +730,7 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
mahr[i] = ioread32((void *)GIGA_MAHR(i));
}
- /* reset device */
- iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
- mdelay(1);
+ sh_eth_chip_reset(ndev);
/* restore MAHR and MALR */
for (i = 0; i < 2; i++) {
@@ -899,7 +893,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
int cnt = 100;
while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
break;
mdelay(1);
cnt--;
@@ -1229,7 +1223,7 @@ ring_free:
return -ENOMEM;
}
-static int sh_eth_dev_init(struct net_device *ndev, bool start)
+static int sh_eth_dev_init(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
@@ -1279,10 +1273,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
RFLR);
sh_eth_modify(ndev, EESR, 0, 0);
- if (start) {
- mdp->irq_enabled = true;
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- }
+ mdp->irq_enabled = true;
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
@@ -1295,8 +1287,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
- if (start)
- sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
@@ -1309,10 +1300,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
- if (start) {
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
- }
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
return ret;
}
@@ -2194,7 +2183,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
__func__);
return ret;
}
- ret = sh_eth_dev_init(ndev, true);
+ ret = sh_eth_dev_init(ndev);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__);
@@ -2246,7 +2235,7 @@ static int sh_eth_open(struct net_device *ndev)
goto out_free_irq;
/* device init */
- ret = sh_eth_dev_init(ndev, true);
+ ret = sh_eth_dev_init(ndev);
if (ret)
goto out_free_irq;
@@ -2299,7 +2288,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
}
/* device init */
- sh_eth_dev_init(ndev, true);
+ sh_eth_dev_init(ndev);
netif_start_queue(ndev);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 8fa4ef3a7..c62380e34 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -394,7 +394,7 @@ enum RPADIR_BIT {
#define DEFAULT_FDR_INIT 0x00000707
/* ARSTR */
-enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
+enum ARSTR_BIT { ARSTR_ARST = 0x00000001, };
/* TSU_FWEN0 */
enum TSU_FWEN0_BIT {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index ca7336605..c2bd5378f 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -572,7 +572,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
if (err)
return err;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
return 0;
@@ -648,7 +648,7 @@ static void timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0705ec869..097f363f1 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
- efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*efx->rps_flow_id),
- GFP_KERNEL);
- if (!efx->rps_flow_id) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
}
+
+ efx->rps_expire_index = efx->rps_expire_channel = 0;
}
#endif
out_unlock:
@@ -1744,7 +1763,10 @@ out_unlock:
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_flow_id);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 133e9e35b..4c83739d1 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs)
{
- unsigned address = 0, i, j;
+ unsigned address = 0;
+ int i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7f295c4d7..2a9228a6e 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
- result |= SUPPORTED_FIBRE;
- break;
-
case MC_CMD_MEDIA_QSFP_PLUS:
result |= SUPPORTED_FIBRE;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
result |= SUPPORTED_40000baseCR4_Full;
break;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 38c422321..d13ddf970 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+ u32 *rps_flow_id;
#endif
unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@ struct vfdi_status;
* @filter_sem: Filter table rw_semaphore, for freeing the table
* @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- * indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ * @rps_expire_channel's @rps_flow_id
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@ struct efx_nic {
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
+ unsigned int rps_expire_channel;
unsigned int rps_expire_index;
#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995b2..02b0b5272 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const __be16 *ports;
- __be16 ether_type;
- int nhoff;
+ struct flow_keys fk;
int rc;
- /* The core RPS/RFS code has already parsed and validated
- * VLAN, IP and transport headers. We assume they are in the
- * header area.
- */
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- const struct vlan_hdr *vh =
- (const struct vlan_hdr *)skb->data;
+ if (flow_id == RPS_FLOW_ID_INVALID)
+ return -EINVAL;
- /* We can't filter on the IP 5-tuple and the vlan
- * together, so just strip the vlan header and filter
- * on the IP part.
- */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
- ether_type = vh->h_vlan_encapsulated_proto;
- nhoff = sizeof(struct vlan_hdr);
- } else {
- ether_type = skb->protocol;
- nhoff = 0;
- }
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
- spec.ether_type = ether_type;
-
- if (ether_type == htons(ETH_P_IP)) {
- const struct iphdr *ip =
- (const struct iphdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- spec.ip_proto = ip->protocol;
- spec.rem_host[0] = ip->saddr;
- spec.loc_host[0] = ip->daddr;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ spec.ether_type = fk.basic.n_proto;
+ spec.ip_proto = fk.basic.ip_proto;
+
+ if (fk.basic.n_proto == htons(ETH_P_IP)) {
+ spec.rem_host[0] = fk.addrs.v4addrs.src;
+ spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
- const struct ipv6hdr *ip6 =
- (const struct ipv6hdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(*ip6) + 4);
- spec.ip_proto = ip6->nexthdr;
- memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
- memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
- ports = (const __be16 *)(ip6 + 1);
+ memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+ memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
}
- spec.rem_port = ports[0];
- spec.loc_port = ports[1];
+ spec.rem_port = fk.ports.src;
+ spec.loc_port = fk.ports.dst;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
- efx->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ channel = efx_get_channel(efx, rxq_index);
+ channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
- if (ether_type == htons(ETH_P_IP))
+ if (spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
else
netif_info(efx, rx_status, efx->net_dev,
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
return rc;
}
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
- unsigned int index, size;
+ unsigned int channel_idx, index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
+ channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
- flow_id = efx->rps_flow_id[index];
- if (expire_one(efx, flow_id, index))
+ struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+ flow_id = channel->rps_flow_id[index];
+
+ if (flow_id != RPS_FLOW_ID_INVALID &&
+ expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev,
- "expired filter %d [flow %u]\n",
- index, flow_id);
- if (++index == size)
+ "expired filter %d [queue %u flow %u]\n",
+ index, channel_idx, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ }
+ if (++index == size) {
+ if (++channel_idx == efx->n_channels)
+ channel_idx = 0;
index = 0;
+ }
}
+ efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 5eac523b4..aaa80f138 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -708,7 +708,7 @@ static int meth_tx(struct sk_buff *skb, struct net_device *dev)
mace->eth.dma_ctrl = priv->dma_ctrl;
meth_add_to_tx_ring(priv, skb);
- dev->trans_start = jiffies; /* save the timestamp */
+ netif_trans_update(dev); /* save the timestamp */
/* If TX ring is full, tell the upper layer to stop sending packets */
if (meth_tx_full(dev)) {
@@ -756,7 +756,7 @@ static void meth_tx_timeout(struct net_device *dev)
/* Enable interrupt */
spin_unlock_irqrestore(&priv->meth_lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index fd812d2e5..95001ee40 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1575,7 +1575,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(net_dev); /* prevent tx timeout */
/* load Transmit Descriptor Register */
sw32(txdp, sis_priv->tx_ring_dma);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 443f1da9f..7186b8926 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -889,7 +889,7 @@ static void epic_tx_timeout(struct net_device *dev)
ew32(COMMAND, TxQueued);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
if (!ep->tx_full)
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index a733868a4..cb49c9654 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -499,7 +499,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
/* DMA complete IRQ will free buffer and set jiffies */
#else
SMC_PUSH_DATA(lp, buf, len);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb_irq(skb);
#endif
if (!lp->tx_throttle) {
@@ -1189,7 +1189,7 @@ smc911x_tx_dma_irq(void *data)
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
BUG_ON(skb == NULL);
dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb_irq(skb);
lp->current_tx_skb = NULL;
if (lp->pending_tx_skb != NULL)
@@ -1283,7 +1283,7 @@ static void smc911x_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 664f59697..d496888b8 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -663,7 +663,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
lp->saved_skb = NULL;
dev_kfree_skb_any (skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* we can send another packet */
netif_wake_queue(dev);
@@ -1104,7 +1104,7 @@ static void smc_timeout(struct net_device *dev)
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* clear anything saved */
((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index e841e1eb8..e0e001566 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1172,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
smc->saved_skb = NULL;
dev_kfree_skb_irq(skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_start_queue(dev);
}
@@ -1187,7 +1187,7 @@ static void smc_tx_timeout(struct net_device *dev)
inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
smc->saved_skb = NULL;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index c5ed27c54..18ac52ded 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -619,7 +619,7 @@ static void smc_hardware_send_pkt(unsigned long data)
SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
smc_special_unlock(&lp->lock, flags);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
@@ -1364,7 +1364,7 @@ static void smc_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8af25563f..b5ab5e120 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -116,7 +116,6 @@ struct smsc911x_data {
struct phy_device *phy_dev;
struct mii_bus *mii_bus;
- int phy_irq[PHY_MAX_ADDR];
unsigned int using_extphy;
int last_duplex;
int last_carrier;
@@ -1073,7 +1072,6 @@ static int smsc911x_mii_init(struct platform_device *pdev,
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
- memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
pdata->mii_bus->parent = &pdev->dev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b3901616f..0fb362d5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,7 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
- mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
+ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
+ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y)
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index f96d25730..fc60368df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -41,6 +41,8 @@
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_4_00 0x40
+#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
#define DMA_TX_SIZE 512
#define DMA_RX_SIZE 512
@@ -167,6 +169,9 @@ struct stmmac_extra_stats {
unsigned long mtl_rx_fifo_ctrl_active;
unsigned long mac_rx_frame_ctrl_fifo;
unsigned long mac_gmii_rx_proto_engine;
+ /* TSO */
+ unsigned long tx_tso_frames;
+ unsigned long tx_tso_nfrags;
};
/* CSR Frequency Access Defines*/
@@ -243,6 +248,7 @@ enum rx_frame_status {
csum_none = 0x2,
llc_snap = 0x4,
dma_own = 0x8,
+ rx_not_ls = 0x10,
};
/* Tx status */
@@ -269,6 +275,7 @@ enum dma_irq_status {
#define CORE_PCS_ANE_COMPLETE (1 << 5)
#define CORE_PCS_LINK_STATUS (1 << 6)
#define CORE_RGMII_IRQ (1 << 7)
+#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
/* Physical Coding Sublayer */
struct rgmii_adv {
@@ -300,8 +307,10 @@ struct dma_features {
/* 802.3az - Energy-Efficient Ethernet (EEE) */
unsigned int eee;
unsigned int av;
+ unsigned int tsoen;
/* TX and RX csum */
unsigned int tx_coe;
+ unsigned int rx_coe;
unsigned int rx_coe_type1;
unsigned int rx_coe_type2;
unsigned int rxfifo_over_2048;
@@ -348,6 +357,10 @@ struct stmmac_desc_ops {
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls);
+ void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
+ int len2, bool tx_own, bool ls,
+ unsigned int tcphdrlen,
+ unsigned int tcppayloadlen);
/* Set/get the owner of the descriptor */
void (*set_tx_owner) (struct dma_desc *p);
int (*get_tx_owner) (struct dma_desc *p);
@@ -380,6 +393,10 @@ struct stmmac_desc_ops {
u64(*get_timestamp) (void *desc, u32 ats);
/* get rx timestamp status */
int (*get_rx_timestamp_status) (void *desc, u32 ats);
+ /* Display ring */
+ void (*display_ring)(void *head, unsigned int size, bool rx);
+ /* set MSS via context descriptor */
+ void (*set_mss)(struct dma_desc *p, unsigned int mss);
};
extern const struct stmmac_desc_ops enh_desc_ops;
@@ -412,9 +429,15 @@ struct stmmac_dma_ops {
int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x);
/* If supported then get the optional core features */
- unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+ void (*get_hw_feature)(void __iomem *ioaddr,
+ struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
+ void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
+ void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
+ void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
};
struct mac_device_info;
@@ -463,6 +486,7 @@ struct stmmac_hwtimestamp {
};
extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
int port;
@@ -495,7 +519,6 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
- unsigned int synopsys_uid;
void __iomem *pcsr; /* vpointer to device CSRs */
int multicast_filter_bins;
int unicast_filter_entries;
@@ -504,18 +527,47 @@ struct mac_device_info {
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries);
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
+ int perfect_uc_entries,
+ int *synopsys_id);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id);
+struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
+ int perfect_uc_entries, int *synopsys_id);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
-
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
+
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_mode_ops chain_mode_ops;
-
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
+/**
+ * stmmac_get_synopsys_id - return the SYINID.
+ * @priv: driver private structure
+ * Description: this simple function is to decode and return the SYINID
+ * starting from the HW core register.
+ */
+static inline u32 stmmac_get_synopsys_id(u32 hwid)
+{
+ /* Check Synopsys Id (not available on old chips) */
+ if (likely(hwid)) {
+ u32 uid = ((hwid & 0x0000ff00) >> 8);
+ u32 synid = (hwid & 0x000000ff);
+
+ pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ uid, synid);
+
+ return synid;
+ }
+ return 0;
+}
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index afb90d129..f13499fa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -49,6 +49,7 @@ struct socfpga_dwmac {
u32 reg_shift;
struct device *dev;
struct regmap *sys_mgr_base_addr;
+ struct reset_control *stmmac_rst;
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
};
@@ -135,7 +136,7 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
return 0;
}
-static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
+static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
{
struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
int phymode = dwmac->interface;
@@ -164,6 +165,10 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
if (dwmac->splitter_base)
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ /* Assert reset to the enet controller before changing the phy mode */
+ if (dwmac->stmmac_rst)
+ reset_control_assert(dwmac->stmmac_rst);
+
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
@@ -181,57 +186,13 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
- return 0;
-}
-
-static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
-{
- struct socfpga_dwmac *dwmac = priv;
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *stpriv = NULL;
- int ret = 0;
-
- if (!ndev)
- return -EINVAL;
-
- stpriv = netdev_priv(ndev);
- if (!stpriv)
- return -EINVAL;
-
- /* Assert reset to the enet controller before changing the phy mode */
- if (stpriv->stmmac_rst)
- reset_control_assert(stpriv->stmmac_rst);
-
- /* Setup the phy mode in the system manager registers according to
- * devicetree configuration
- */
- ret = socfpga_dwmac_setup(dwmac);
-
/* Deassert reset for the phy configuration to be sampled by
* the enet controller, and operation to start in requested mode
*/
- if (stpriv->stmmac_rst)
- reset_control_deassert(stpriv->stmmac_rst);
-
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (stpriv->phydev)
- phy_resume(stpriv->phydev);
+ if (dwmac->stmmac_rst)
+ reset_control_deassert(dwmac->stmmac_rst);
- return ret;
+ return 0;
}
static int socfpga_dwmac_probe(struct platform_device *pdev)
@@ -260,23 +221,59 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = socfpga_dwmac_setup(dwmac);
- if (ret) {
- dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
- return ret;
- }
-
plat_dat->bsp_priv = dwmac;
- plat_dat->init = socfpga_dwmac_init;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (!ret)
- ret = socfpga_dwmac_init(pdev, dwmac);
+ if (!ret) {
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *stpriv = netdev_priv(ndev);
+
+ /* The socfpga driver needs to control the stmmac reset to
+ * set the phy mode. Create a copy of the core reset handel
+ * so it can be used by the driver later.
+ */
+ dwmac->stmmac_rst = stpriv->stmmac_rst;
+
+ ret = socfpga_dwmac_set_phy_mode(dwmac);
+ }
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int socfpga_dwmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv);
+
+ /* Before the enet controller is suspended, the phy is suspended.
+ * This causes the phy clock to be gated. The enet controller is
+ * resumed before the phy, so the clock is still gated "off" when
+ * the enet controller is resumed. This code makes sure the phy
+ * is "resumed" before reinitializing the enet controller since
+ * the enet controller depends on an active phy clock to complete
+ * a DMA reset. A DMA reset will "time out" if executed
+ * with no phy clock input on the Synopsys enet controller.
+ * Verified through Synopsys Case #8000711656.
+ *
+ * Note that the phy clock is also gated when the phy is isolated.
+ * Phy "suspend" and "isolate" controls are located in phy basic
+ * control register 0, and can be modified by the phy driver
+ * framework.
+ */
+ if (priv->phydev)
+ phy_resume(priv->phydev);
+
+ return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
+ socfpga_dwmac_resume);
+
static const struct of_device_id socfpga_dwmac_match[] = {
{ .compatible = "altr,socfpga-stmmac" },
{ }
@@ -288,7 +285,7 @@ static struct platform_driver socfpga_dwmac_driver = {
.remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
- .pm = &stmmac_pltfr_pm_ops,
+ .pm = &socfpga_dwmac_pm_ops,
.of_match_table = socfpga_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index c2941172f..fb1eb578e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -491,7 +491,8 @@ static const struct stmmac_ops dwmac1000_ops = {
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries)
+ int perfect_uc_entries,
+ int *synopsys_id)
{
struct mac_device_info *mac;
u32 hwid = readl(ioaddr + GMAC_VERSION);
@@ -516,7 +517,9 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->link.speed = GMAC_CONTROL_FES;
mac->mii.addr = GMAC_MII_ADDR;
mac->mii.data = GMAC_MII_DATA;
- mac->synopsys_uid = hwid;
+
+ /* Get and dump the chip ID */
+ *synopsys_id = stmmac_get_synopsys_id(hwid);
return mac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da32d6037..990746955 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -215,9 +215,40 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
}
}
-static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
+static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
- return readl(ioaddr + DMA_HW_FEATURE);
+ u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
+
+ dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+ dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+ dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+ dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+ dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
+ dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+ dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+ dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ /* MMC */
+ dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
+ /* IEEE 1588-2002 */
+ dma_cap->time_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
+ /* IEEE 1588-2008 */
+ dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+ dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
+ /* TX and RX csum */
+ dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+ dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
+ /* TX and RX number of channels */
+ dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
+ /* Alternate (enhanced) DESC mode */
+ dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
}
static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f8dd773f2..6418b2e07 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -173,7 +173,7 @@ static const struct stmmac_ops dwmac100_ops = {
.get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
{
struct mac_device_info *mac;
@@ -192,7 +192,8 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
mac->link.speed = 0;
mac->mii.addr = MAC_MII_ADDR;
mac->mii.data = MAC_MII_DATA;
- mac->synopsys_uid = 0;
+ /* Synopsys Id is not available on old chips */
+ *synopsys_id = 0;
return mac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
new file mode 100644
index 000000000..bc50952a1
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -0,0 +1,255 @@
+/*
+ * DWMAC4 Header file.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_H__
+#define __DWMAC4_H__
+
+#include "common.h"
+
+/* MAC registers */
+#define GMAC_CONFIG 0x00000000
+#define GMAC_PACKET_FILTER 0x00000008
+#define GMAC_HASH_TAB_0_31 0x00000010
+#define GMAC_HASH_TAB_32_63 0x00000014
+#define GMAC_RX_FLOW_CTRL 0x00000090
+#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
+#define GMAC_INT_STATUS 0x000000b0
+#define GMAC_INT_EN 0x000000b4
+#define GMAC_AN_CTRL 0x000000e0
+#define GMAC_AN_STATUS 0x000000e4
+#define GMAC_AN_ADV 0x000000e8
+#define GMAC_AN_LPA 0x000000ec
+#define GMAC_PMT 0x000000c0
+#define GMAC_VERSION 0x00000110
+#define GMAC_DEBUG 0x00000114
+#define GMAC_HW_FEATURE0 0x0000011c
+#define GMAC_HW_FEATURE1 0x00000120
+#define GMAC_HW_FEATURE2 0x00000124
+#define GMAC_MDIO_ADDR 0x00000200
+#define GMAC_MDIO_DATA 0x00000204
+#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
+#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+
+/* MAC Packet Filtering */
+#define GMAC_PACKET_FILTER_PR BIT(0)
+#define GMAC_PACKET_FILTER_HMC BIT(2)
+#define GMAC_PACKET_FILTER_PM BIT(4)
+
+#define GMAC_MAX_PERFECT_ADDRESSES 128
+
+/* MAC Flow Control RX */
+#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
+
+/* MAC Flow Control TX */
+#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
+#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
+
+/* MAC Interrupt bitmap*/
+#define GMAC_INT_PMT_EN BIT(4)
+#define GMAC_INT_LPI_EN BIT(5)
+
+enum dwmac4_irq_status {
+ time_stamp_irq = 0x00001000,
+ mmc_rx_csum_offload_irq = 0x00000800,
+ mmc_tx_irq = 0x00000400,
+ mmc_rx_irq = 0x00000200,
+ mmc_irq = 0x00000100,
+ pmt_irq = 0x00000010,
+ pcs_ane_irq = 0x00000004,
+ pcs_link_irq = 0x00000002,
+};
+
+/* MAC Auto-Neg bitmap*/
+#define GMAC_AN_CTRL_RAN BIT(9)
+#define GMAC_AN_CTRL_ANE BIT(12)
+#define GMAC_AN_CTRL_ELE BIT(14)
+#define GMAC_AN_FD BIT(5)
+#define GMAC_AN_HD BIT(6)
+#define GMAC_AN_PSE_MASK GENMASK(8, 7)
+#define GMAC_AN_PSE_SHIFT 7
+
+/* MAC PMT bitmap */
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* MAC Debug bitmap */
+#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
+#define GMAC_DEBUG_TFCSTS_SHIFT 17
+#define GMAC_DEBUG_TFCSTS_IDLE 0
+#define GMAC_DEBUG_TFCSTS_WAIT 1
+#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
+#define GMAC_DEBUG_TFCSTS_XFER 3
+#define GMAC_DEBUG_TPESTS BIT(16)
+#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
+#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
+#define GMAC_DEBUG_RPESTS BIT(0)
+
+/* MAC config */
+#define GMAC_CONFIG_IPC BIT(27)
+#define GMAC_CONFIG_2K BIT(22)
+#define GMAC_CONFIG_ACS BIT(20)
+#define GMAC_CONFIG_BE BIT(18)
+#define GMAC_CONFIG_JD BIT(17)
+#define GMAC_CONFIG_JE BIT(16)
+#define GMAC_CONFIG_PS BIT(15)
+#define GMAC_CONFIG_FES BIT(14)
+#define GMAC_CONFIG_DM BIT(13)
+#define GMAC_CONFIG_DCRS BIT(9)
+#define GMAC_CONFIG_TE BIT(1)
+#define GMAC_CONFIG_RE BIT(0)
+
+/* MAC HW features0 bitmap */
+#define GMAC_HW_FEAT_ADDMAC BIT(18)
+#define GMAC_HW_FEAT_RXCOESEL BIT(16)
+#define GMAC_HW_FEAT_TXCOSEL BIT(14)
+#define GMAC_HW_FEAT_EEESEL BIT(13)
+#define GMAC_HW_FEAT_TSSEL BIT(12)
+#define GMAC_HW_FEAT_MMCSEL BIT(8)
+#define GMAC_HW_FEAT_MGKSEL BIT(7)
+#define GMAC_HW_FEAT_RWKSEL BIT(6)
+#define GMAC_HW_FEAT_SMASEL BIT(5)
+#define GMAC_HW_FEAT_VLHASH BIT(4)
+#define GMAC_HW_FEAT_PCSSEL BIT(3)
+#define GMAC_HW_FEAT_HDSEL BIT(2)
+#define GMAC_HW_FEAT_GMIISEL BIT(1)
+#define GMAC_HW_FEAT_MIISEL BIT(0)
+
+/* MAC HW features1 bitmap */
+#define GMAC_HW_FEAT_AVSEL BIT(20)
+#define GMAC_HW_TSOEN BIT(18)
+
+/* MAC HW features2 bitmap */
+#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
+#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
+
+/* MAC HW ADDR regs */
+#define GMAC_HI_DCS GENMASK(18, 16)
+#define GMAC_HI_DCS_SHIFT 16
+#define GMAC_HI_REG_AE BIT(31)
+
+/* MTL registers */
+#define MTL_INT_STATUS 0x00000c20
+#define MTL_INT_Q0 BIT(0)
+
+#define MTL_CHAN_BASE_ADDR 0x00000d00
+#define MTL_CHAN_BASE_OFFSET 0x40
+#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
+ (x * MTL_CHAN_BASE_OFFSET))
+
+#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x)
+#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8)
+#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c)
+#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30)
+#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
+
+#define MTL_OP_MODE_RSF BIT(5)
+#define MTL_OP_MODE_TSF BIT(1)
+
+#define MTL_OP_MODE_TTC_MASK 0x70
+#define MTL_OP_MODE_TTC_SHIFT 4
+
+#define MTL_OP_MODE_TTC_32 0
+#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+
+#define MTL_OP_MODE_RTC_MASK 0x18
+#define MTL_OP_MODE_RTC_SHIFT 3
+
+#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_64 0
+#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+
+/* MTL debug */
+#define MTL_DEBUG_TXSTSFSTS BIT(5)
+#define MTL_DEBUG_TXFSTS BIT(4)
+#define MTL_DEBUG_TWCSTS BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT 1
+#define MTL_DEBUG_TRCSTS_IDLE 0
+#define MTL_DEBUG_TRCSTS_READ 1
+#define MTL_DEBUG_TRCSTS_TXW 2
+#define MTL_DEBUG_TRCSTS_WRITE 3
+#define MTL_DEBUG_TXPAUSED BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT 4
+#define MTL_DEBUG_RXFSTS_EMPTY 0
+#define MTL_DEBUG_RXFSTS_BT 1
+#define MTL_DEBUG_RXFSTS_AT 2
+#define MTL_DEBUG_RXFSTS_FULL 3
+#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT 1
+#define MTL_DEBUG_RRCSTS_IDLE 0
+#define MTL_DEBUG_RRCSTS_RDATA 1
+#define MTL_DEBUG_RRCSTS_RSTAT 2
+#define MTL_DEBUG_RRCSTS_FLUSH 3
+#define MTL_DEBUG_RWCSTS BIT(0)
+
+/* MTL interrupt */
+#define MTL_RX_OVERFLOW_INT_EN BIT(24)
+#define MTL_RX_OVERFLOW_INT BIT(16)
+
+/* Default operating mode of the MAC */
+#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
+ GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
+
+/* To dump the core regs excluding the Address Registers */
+#define GMAC_REG_NUM 132
+
+/* MTL debug */
+#define MTL_DEBUG_TXSTSFSTS BIT(5)
+#define MTL_DEBUG_TXFSTS BIT(4)
+#define MTL_DEBUG_TWCSTS BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT 1
+#define MTL_DEBUG_TRCSTS_IDLE 0
+#define MTL_DEBUG_TRCSTS_READ 1
+#define MTL_DEBUG_TRCSTS_TXW 2
+#define MTL_DEBUG_TRCSTS_WRITE 3
+#define MTL_DEBUG_TXPAUSED BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT 4
+#define MTL_DEBUG_RXFSTS_EMPTY 0
+#define MTL_DEBUG_RXFSTS_BT 1
+#define MTL_DEBUG_RXFSTS_AT 2
+#define MTL_DEBUG_RXFSTS_FULL 3
+#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT 1
+#define MTL_DEBUG_RRCSTS_IDLE 0
+#define MTL_DEBUG_RRCSTS_RDATA 1
+#define MTL_DEBUG_RRCSTS_RSTAT 2
+#define MTL_DEBUG_RRCSTS_FLUSH 3
+#define MTL_DEBUG_RWCSTS BIT(0)
+
+extern const struct stmmac_dma_ops dwmac4_dma_ops;
+extern const struct stmmac_dma_ops dwmac410_dma_ops;
+#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
new file mode 100644
index 000000000..44da877d2
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -0,0 +1,407 @@
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.00 has been used for developing this code.
+ *
+ * This only implements the mac core functions for this chip.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include "dwmac4.h"
+
+static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ value |= GMAC_CORE_INIT;
+
+ if (mtu > 1500)
+ value |= GMAC_CONFIG_2K;
+ if (mtu > 2000)
+ value |= GMAC_CONFIG_JE;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+
+ /* Mask GMAC interrupts */
+ writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN);
+}
+
+static void dwmac4_dump_regs(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr);
+
+ for (i = 0; i < GMAC_REG_NUM; i++) {
+ int offset = i * 4;
+
+ pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+}
+
+static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (hw->rx_csum)
+ value |= GMAC_CONFIG_IPC;
+ else
+ value &= ~GMAC_CONFIG_IPC;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+
+ value = readl(ioaddr + GMAC_CONFIG);
+
+ return !!(value & GMAC_CONFIG_IPC);
+}
+
+static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC) {
+ pr_debug("GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ }
+ if (mode & WAKE_UCAST) {
+ pr_debug("GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+}
+
+static void dwmac4_set_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ unsigned int value = 0;
+
+ if (dev->flags & IFF_PROMISC) {
+ value = GMAC_PACKET_FILTER_PR;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ /* Pass all multi */
+ value = GMAC_PACKET_FILTER_PM;
+ /* Set the 64 bits of the HASH tab. To be updated if taller
+ * hash table is used
+ */
+ writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
+ writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
+ } else if (!netdev_mc_empty(dev)) {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Hash filter for multicast */
+ value = GMAC_PACKET_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the content of the Hash Table Reg 0 and 1.
+ */
+ int bit_nr =
+ (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
+ /* The most significant bit determines the register
+ * to use while the other 5 bits determines the bit
+ * within the selected register
+ */
+ mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
+ }
+
+ /* Handle multiple unicast addresses */
+ if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+ /* Switch to promiscuous mode if more than 128 addrs
+ * are required
+ */
+ value |= GMAC_PACKET_FILTER_PR;
+ } else if (!netdev_uc_empty(dev)) {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
+ reg++;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+}
+
+static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 channel = STMMAC_CHAN0; /* FIXME */
+ unsigned int flow = 0;
+
+ pr_debug("GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ pr_debug("\tReceive Flow-Control ON\n");
+ flow |= GMAC_RX_FLOW_CTRL_RFE;
+ writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+ }
+ if (fc & FLOW_TX) {
+ pr_debug("\tTransmit Flow-Control ON\n");
+ flow |= GMAC_TX_FLOW_CTRL_TFE;
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+
+ if (duplex) {
+ pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
+ flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+ }
+ }
+}
+
+static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ /* auto negotiation enable and External Loopback enable */
+ u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL);
+}
+
+static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_AN_ADV);
+
+ if (value & GMAC_AN_FD)
+ adv->duplex = DUPLEX_FULL;
+ if (value & GMAC_AN_HD)
+ adv->duplex |= DUPLEX_HALF;
+
+ adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_AN_LPA);
+
+ if (value & GMAC_AN_FD)
+ adv->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_AN_HD)
+ adv->lp_duplex = DUPLEX_HALF;
+
+ adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+}
+
+static int dwmac4_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 mtl_int_qx_status;
+ u32 intr_status;
+ int ret = 0;
+
+ intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ x->mmc_tx_irq_n++;
+ if (unlikely(intr_status & mmc_rx_irq))
+ x->mmc_rx_irq_n++;
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ x->mmc_rx_csum_offload_irq_n++;
+ /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
+ if (unlikely(intr_status & pmt_irq)) {
+ readl(ioaddr + GMAC_PMT);
+ x->irq_receive_pmt_irq_n++;
+ }
+
+ if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
+ readl(ioaddr + GMAC_AN_STATUS);
+ x->irq_pcs_ane_n++;
+ }
+
+ mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+ /* Check MTL Interrupt: Currently only one queue is used: Q0. */
+ if (mtl_int_qx_status & MTL_INT_Q0) {
+ /* read Queue 0 Interrupt status */
+ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+
+ if (status & MTL_RX_OVERFLOW_INT) {
+ /* clear Interrupt */
+ writel(status | MTL_RX_OVERFLOW_INT,
+ ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+ ret = CORE_IRQ_MTL_RX_OVERFLOW;
+ }
+ }
+
+ return ret;
+}
+
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 value;
+
+ /* Currently only channel 0 is supported */
+ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
+
+ if (value & MTL_DEBUG_TXSTSFSTS)
+ x->mtl_tx_status_fifo_full++;
+ if (value & MTL_DEBUG_TXFSTS)
+ x->mtl_tx_fifo_not_empty++;
+ if (value & MTL_DEBUG_TWCSTS)
+ x->mmtl_fifo_ctrl++;
+ if (value & MTL_DEBUG_TRCSTS_MASK) {
+ u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+ >> MTL_DEBUG_TRCSTS_SHIFT;
+ if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+ x->mtl_tx_fifo_read_ctrl_write++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+ x->mtl_tx_fifo_read_ctrl_wait++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+ x->mtl_tx_fifo_read_ctrl_read++;
+ else
+ x->mtl_tx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_TXPAUSED)
+ x->mac_tx_in_pause++;
+
+ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
+
+ if (value & MTL_DEBUG_RXFSTS_MASK) {
+ u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+ >> MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+ x->mtl_rx_fifo_fill_level_full++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+ x->mtl_rx_fifo_fill_above_thresh++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+ x->mtl_rx_fifo_fill_below_thresh++;
+ else
+ x->mtl_rx_fifo_fill_level_empty++;
+ }
+ if (value & MTL_DEBUG_RRCSTS_MASK) {
+ u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+ MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+ x->mtl_rx_fifo_read_ctrl_flush++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+ x->mtl_rx_fifo_read_ctrl_read_data++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+ x->mtl_rx_fifo_read_ctrl_status++;
+ else
+ x->mtl_rx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_RWCSTS)
+ x->mtl_rx_fifo_ctrl_active++;
+
+ /* GMAC debug */
+ value = readl(ioaddr + GMAC_DEBUG);
+
+ if (value & GMAC_DEBUG_TFCSTS_MASK) {
+ u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
+ >> GMAC_DEBUG_TFCSTS_SHIFT;
+
+ if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
+ x->mac_tx_frame_ctrl_xfer++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
+ x->mac_tx_frame_ctrl_pause++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
+ x->mac_tx_frame_ctrl_wait++;
+ else
+ x->mac_tx_frame_ctrl_idle++;
+ }
+ if (value & GMAC_DEBUG_TPESTS)
+ x->mac_gmii_tx_proto_engine++;
+ if (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ if (value & GMAC_DEBUG_RPESTS)
+ x->mac_gmii_rx_proto_engine++;
+}
+
+static const struct stmmac_ops dwmac4_ops = {
+ .core_init = dwmac4_core_init,
+ .rx_ipc = dwmac4_rx_ipc_enable,
+ .dump_regs = dwmac4_dump_regs,
+ .host_irq_status = dwmac4_irq_status,
+ .flow_ctrl = dwmac4_flow_ctrl,
+ .pmt = dwmac4_pmt,
+ .set_umac_addr = dwmac4_set_umac_addr,
+ .get_umac_addr = dwmac4_get_umac_addr,
+ .ctrl_ane = dwmac4_ctrl_ane,
+ .get_adv = dwmac4_get_adv,
+ .debug = dwmac4_debug,
+ .set_filter = dwmac4_set_filter,
+};
+
+struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
+ int perfect_uc_entries, int *synopsys_id)
+{
+ struct mac_device_info *mac;
+ u32 hwid = readl(ioaddr + GMAC_VERSION);
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ mac->pcsr = ioaddr;
+ mac->multicast_filter_bins = mcbins;
+ mac->unicast_filter_entries = perfect_uc_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->mac = &dwmac4_ops;
+
+ mac->link.port = GMAC_CONFIG_PS;
+ mac->link.duplex = GMAC_CONFIG_DM;
+ mac->link.speed = GMAC_CONFIG_FES;
+ mac->mii.addr = GMAC_MDIO_ADDR;
+ mac->mii.data = GMAC_MDIO_DATA;
+
+ /* Get and dump the chip ID */
+ *synopsys_id = stmmac_get_synopsys_id(hwid);
+
+ if (*synopsys_id > DWMAC_CORE_4_00)
+ mac->dma = &dwmac410_dma_ops;
+ else
+ mac->dma = &dwmac4_dma_ops;
+
+ return mac;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
new file mode 100644
index 000000000..4ec7397e7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -0,0 +1,389 @@
+/*
+ * This contains the functions to handle the descriptors for DesignWare databook
+ * 4.xx.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwmac4_descs.h"
+
+static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p,
+ void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes3;
+ int ret = tx_done;
+
+ tdes3 = p->des3;
+
+ /* Get tx owner first */
+ if (unlikely(tdes3 & TDES3_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
+ return tx_not_ls;
+
+ if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
+ if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
+ x->tx_jabber++;
+ if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
+ x->tx_frame_flushed++;
+ if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
+ (tdes3 & TDES3_EXCESSIVE_COLLISION)))
+ stats->collisions +=
+ (tdes3 & TDES3_COLLISION_COUNT_MASK)
+ >> TDES3_COLLISION_COUNT_SHIFT;
+
+ if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
+ x->tx_deferred++;
+
+ if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
+ x->tx_underflow++;
+
+ if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
+ x->tx_ip_header_error++;
+
+ if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
+ x->tx_payload_error++;
+
+ ret = tx_err;
+ }
+
+ if (unlikely(tdes3 & TDES3_DEFERRED))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int rdes1 = p->des1;
+ unsigned int rdes2 = p->des2;
+ unsigned int rdes3 = p->des3;
+ int message_type;
+ int ret = good_frame;
+
+ if (unlikely(rdes3 & RDES3_OWN))
+ return dma_own;
+
+ /* Verify rx error by looking at the last segment. */
+ if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ return discard_frame;
+
+ if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
+ if (unlikely(rdes3 & RDES3_GIANT_PACKET))
+ stats->rx_length_errors++;
+ if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
+ x->rx_gmac_overflow++;
+
+ if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
+ x->rx_watchdog++;
+
+ if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
+ x->rx_mii++;
+
+ if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+
+ if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
+ x->dribbling_bit++;
+
+ ret = discard_frame;
+ }
+
+ message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
+
+ if (rdes1 & RDES1_IP_HDR_ERROR)
+ x->ip_hdr_err++;
+ if (rdes1 & RDES1_IP_CSUM_BYPASSED)
+ x->ip_csum_bypassed++;
+ if (rdes1 & RDES1_IPV4_HEADER)
+ x->ipv4_pkt_rcvd++;
+ if (rdes1 & RDES1_IPV6_HEADER)
+ x->ipv6_pkt_rcvd++;
+ if (message_type == RDES_EXT_SYNC)
+ x->rx_msg_type_sync++;
+ else if (message_type == RDES_EXT_FOLLOW_UP)
+ x->rx_msg_type_follow_up++;
+ else if (message_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_delay_req++;
+ else if (message_type == RDES_EXT_DELAY_RESP)
+ x->rx_msg_type_delay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_REQ)
+ x->rx_msg_type_pdelay_req++;
+ else if (message_type == RDES_EXT_PDELAY_RESP)
+ x->rx_msg_type_pdelay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ x->rx_msg_type_pdelay_follow_up++;
+ else
+ x->rx_msg_type_ext_no_ptp++;
+
+ if (rdes1 & RDES1_PTP_PACKET_TYPE)
+ x->ptp_frame_type++;
+ if (rdes1 & RDES1_PTP_VER)
+ x->ptp_ver++;
+ if (rdes1 & RDES1_TIMESTAMP_DROPPED)
+ x->timestamp_dropped++;
+
+ if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+
+ if (rdes2 & RDES2_L3_FILTER_MATCH)
+ x->l3_filter_match++;
+ if (rdes2 & RDES2_L4_FILTER_MATCH)
+ x->l4_filter_match++;
+ if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
+ >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
+ x->l3_l4_filter_no_match++;
+
+ return ret;
+}
+
+static int dwmac4_rd_get_tx_len(struct dma_desc *p)
+{
+ return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
+}
+
+static int dwmac4_get_tx_owner(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
+}
+
+static void dwmac4_set_tx_owner(struct dma_desc *p)
+{
+ p->des3 |= TDES3_OWN;
+}
+
+static void dwmac4_set_rx_owner(struct dma_desc *p)
+{
+ p->des3 |= RDES3_OWN;
+}
+
+static int dwmac4_get_tx_ls(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
+}
+
+static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+ return (p->des3 & RDES3_PACKET_SIZE_MASK);
+}
+
+static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des2 |= TDES2_TIMESTAMP_ENABLE;
+}
+
+static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_TIMESTAMP_STATUS)
+ >> TDES3_TIMESTAMP_STATUS_SHIFT;
+}
+
+/* NOTE: For RX CTX bit has to be checked before
+ * HAVE a specific function for TX and another one for RX
+ */
+static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns;
+
+ ns = p->des0;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des1 * 1000000000ULL;
+
+ return ns;
+}
+
+static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+
+ return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
+ >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
+}
+
+static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
+
+ if (!disable_rx_ic)
+ p->des3 |= RDES3_INT_ON_COMPLETION_EN;
+}
+
+static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls)
+{
+ unsigned int tdes3 = p->des3;
+
+ p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
+
+ if (is_fs)
+ tdes3 |= TDES3_FIRST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+
+ if (likely(csum_flag))
+ tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+ else
+ tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+
+ if (ls)
+ tdes3 |= TDES3_LAST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= TDES3_OWN;
+
+ if (is_fs & tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ wmb();
+
+ p->des3 = tdes3;
+}
+
+static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+ int len1, int len2, bool tx_own,
+ bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen)
+{
+ unsigned int tdes3 = p->des3;
+
+ if (len1)
+ p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
+
+ if (len2)
+ p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
+ & TDES2_BUFFER2_SIZE_MASK;
+
+ if (is_fs) {
+ tdes3 |= TDES3_FIRST_DESCRIPTOR |
+ TDES3_TCP_SEGMENTATION_ENABLE |
+ ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
+ TDES3_SLOT_NUMBER_MASK) |
+ ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
+ } else {
+ tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+ }
+
+ if (ls)
+ tdes3 |= TDES3_LAST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= TDES3_OWN;
+
+ if (is_fs & tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ wmb();
+
+ p->des3 = tdes3;
+}
+
+static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
+{
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
+{
+ p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
+}
+
+static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_desc *p = (struct dma_desc *)head;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ if (p->des0)
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
+ p++;
+ }
+}
+
+static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = mss;
+ p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
+}
+
+const struct stmmac_desc_ops dwmac4_desc_ops = {
+ .tx_status = dwmac4_wrback_get_tx_status,
+ .rx_status = dwmac4_wrback_get_rx_status,
+ .get_tx_len = dwmac4_rd_get_tx_len,
+ .get_tx_owner = dwmac4_get_tx_owner,
+ .set_tx_owner = dwmac4_set_tx_owner,
+ .set_rx_owner = dwmac4_set_rx_owner,
+ .get_tx_ls = dwmac4_get_tx_ls,
+ .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
+ .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
+ .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
+ .get_timestamp = dwmac4_wrback_get_timestamp,
+ .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
+ .set_tx_ic = dwmac4_rd_set_tx_ic,
+ .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
+ .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
+ .release_tx_desc = dwmac4_release_tx_desc,
+ .init_rx_desc = dwmac4_rd_init_rx_desc,
+ .init_tx_desc = dwmac4_rd_init_tx_desc,
+ .display_ring = dwmac4_display_ring,
+ .set_mss = dwmac4_set_mss_ctxt,
+};
+
+const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
new file mode 100644
index 000000000..0902a2ede
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -0,0 +1,129 @@
+/*
+ * Header File to describe the DMA descriptors and related definitions specific
+ * for DesignWare databook 4.xx.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DESCS_H__
+#define __DWMAC4_DESCS_H__
+
+#include <linux/bitops.h>
+
+/* Normal transmit descriptor defines (without split feature) */
+
+/* TDES2 (read format) */
+#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0)
+#define TDES2_VLAN_TAG_MASK GENMASK(15, 14)
+#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16)
+#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16
+#define TDES2_TIMESTAMP_ENABLE BIT(30)
+#define TDES2_INTERRUPT_ON_COMPLETION BIT(31)
+
+/* TDES3 (read format) */
+#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0)
+#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16)
+#define TDES3_CHECKSUM_INSERTION_SHIFT 16
+#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0)
+#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18)
+#define TDES3_HDR_LEN_SHIFT 19
+#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19)
+#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23)
+#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26)
+
+/* TDES3 (write back format) */
+#define TDES3_IP_HDR_ERROR BIT(0)
+#define TDES3_DEFERRED BIT(1)
+#define TDES3_UNDERFLOW_ERROR BIT(2)
+#define TDES3_EXCESSIVE_DEFERRAL BIT(3)
+#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4)
+#define TDES3_COLLISION_COUNT_SHIFT 4
+#define TDES3_EXCESSIVE_COLLISION BIT(8)
+#define TDES3_LATE_COLLISION BIT(9)
+#define TDES3_NO_CARRIER BIT(10)
+#define TDES3_LOSS_CARRIER BIT(11)
+#define TDES3_PAYLOAD_ERROR BIT(12)
+#define TDES3_PACKET_FLUSHED BIT(13)
+#define TDES3_JABBER_TIMEOUT BIT(14)
+#define TDES3_ERROR_SUMMARY BIT(15)
+#define TDES3_TIMESTAMP_STATUS BIT(17)
+#define TDES3_TIMESTAMP_STATUS_SHIFT 17
+
+/* TDES3 context */
+#define TDES3_CTXT_TCMSSV BIT(26)
+
+/* TDES3 Common */
+#define TDES3_LAST_DESCRIPTOR BIT(28)
+#define TDES3_LAST_DESCRIPTOR_SHIFT 28
+#define TDES3_FIRST_DESCRIPTOR BIT(29)
+#define TDES3_CONTEXT_TYPE BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define TDES3_OWN BIT(31)
+#define TDES3_OWN_SHIFT 31
+
+/* Normal receive descriptor defines (without split feature) */
+
+/* RDES0 (write back format) */
+#define RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* RDES1 (write back format) */
+#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0)
+#define RDES1_IP_HDR_ERROR BIT(3)
+#define RDES1_IPV4_HEADER BIT(4)
+#define RDES1_IPV6_HEADER BIT(5)
+#define RDES1_IP_CSUM_BYPASSED BIT(6)
+#define RDES1_IP_CSUM_ERROR BIT(7)
+#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8)
+#define RDES1_PTP_PACKET_TYPE BIT(12)
+#define RDES1_PTP_VER BIT(13)
+#define RDES1_TIMESTAMP_AVAILABLE BIT(14)
+#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14
+#define RDES1_TIMESTAMP_DROPPED BIT(15)
+#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16)
+
+/* RDES2 (write back format) */
+#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0)
+#define RDES2_VLAN_FILTER_STATUS BIT(15)
+#define RDES2_SA_FILTER_FAIL BIT(16)
+#define RDES2_DA_FILTER_FAIL BIT(17)
+#define RDES2_HASH_FILTER_STATUS BIT(18)
+#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19)
+#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19)
+#define RDES2_L3_FILTER_MATCH BIT(27)
+#define RDES2_L4_FILTER_MATCH BIT(28)
+#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
+#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
+
+/* RDES3 (write back format) */
+#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
+#define RDES3_ERROR_SUMMARY BIT(15)
+#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16)
+#define RDES3_DRIBBLE_ERROR BIT(19)
+#define RDES3_RECEIVE_ERROR BIT(20)
+#define RDES3_OVERFLOW_ERROR BIT(21)
+#define RDES3_RECEIVE_WATCHDOG BIT(22)
+#define RDES3_GIANT_PACKET BIT(23)
+#define RDES3_CRC_ERROR BIT(24)
+#define RDES3_RDES0_VALID BIT(25)
+#define RDES3_RDES1_VALID BIT(26)
+#define RDES3_RDES2_VALID BIT(27)
+#define RDES3_LAST_DESCRIPTOR BIT(28)
+#define RDES3_FIRST_DESCRIPTOR BIT(29)
+#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
+
+/* RDES3 (read format) */
+#define RDES3_BUFFER1_VALID_ADDR BIT(24)
+#define RDES3_BUFFER2_VALID_ADDR BIT(25)
+#define RDES3_INT_ON_COMPLETION_EN BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define RDES3_OWN BIT(31)
+
+#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
new file mode 100644
index 000000000..116151cd6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -0,0 +1,354 @@
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.xx has been used for developing this code.
+ *
+ * This contains the functions to handle the dma.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include "dwmac4.h"
+#include "dwmac4_dma.h"
+
+static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+ int i;
+
+ pr_info("dwmac4: Master AXI performs %s burst length\n",
+ (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
+
+ if (axi->axi_lpi_en)
+ value |= DMA_AXI_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= DMA_AXI_LPI_XIT_FRM;
+
+ value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
+ DMA_AXI_WR_OSR_LMT_SHIFT;
+
+ value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
+ DMA_AXI_RD_OSR_LMT_SHIFT;
+
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set).
+ */
+ for (i = 0; i < AXI_BLEN; i++) {
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= DMA_AXI_BLEN256;
+ break;
+ case 128:
+ value |= DMA_AXI_BLEN128;
+ break;
+ case 64:
+ value |= DMA_AXI_BLEN64;
+ break;
+ case 32:
+ value |= DMA_AXI_BLEN32;
+ break;
+ case 16:
+ value |= DMA_AXI_BLEN16;
+ break;
+ case 8:
+ value |= DMA_AXI_BLEN8;
+ break;
+ case 4:
+ value |= DMA_AXI_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + DMA_SYS_BUS_MODE);
+}
+
+static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
+ u32 dma_tx_phy, u32 dma_rx_phy,
+ u32 channel)
+{
+ u32 value;
+
+ /* set PBL for each channels. Currently we affect same configuration
+ * on each channel
+ */
+ value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
+ value = value | DMA_BUS_MODE_PBL;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+ value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
+
+ value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
+ value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
+
+ writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
+ writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+}
+
+static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds)
+{
+ u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+ int i;
+
+ /* Set the Fixed burst mode */
+ if (fb)
+ value |= DMA_SYS_BUS_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (mb)
+ value |= DMA_SYS_BUS_MB;
+
+ if (aal)
+ value |= DMA_SYS_BUS_AAL;
+
+ writel(value, ioaddr + DMA_SYS_BUS_MODE);
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
+}
+
+static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
+{
+ pr_debug(" Channel %d\n", channel);
+ pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
+ readl(ioaddr + DMA_CHAN_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
+ readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
+ readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
+ readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
+ readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
+ readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
+ readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
+ readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
+ pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
+ readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
+ pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
+ readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
+ pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
+ readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
+ pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
+ readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
+ pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
+ readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
+ pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
+ readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
+ pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
+ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
+ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
+ readl(ioaddr + DMA_CHAN_STATUS(channel)));
+}
+
+static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
+{
+ int i;
+
+ pr_debug(" GMAC4 DMA registers\n");
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ _dwmac4_dump_dma_regs(ioaddr, i);
+}
+
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+ int i;
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
+}
+
+static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
+ int rxmode, u32 channel)
+{
+ u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
+
+ /* Following code only done for channel 0, other channels not yet
+ * supported.
+ */
+ mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ if (txmode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ mtl_tx_op |= MTL_OP_MODE_TSF;
+ } else {
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+ mtl_tx_op &= ~MTL_OP_MODE_TSF;
+ mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+ /* Set the transmit threshold */
+ if (txmode <= 32)
+ mtl_tx_op |= MTL_OP_MODE_TTC_32;
+ else if (txmode <= 64)
+ mtl_tx_op |= MTL_OP_MODE_TTC_64;
+ else if (txmode <= 96)
+ mtl_tx_op |= MTL_OP_MODE_TTC_96;
+ else if (txmode <= 128)
+ mtl_tx_op |= MTL_OP_MODE_TTC_128;
+ else if (txmode <= 192)
+ mtl_tx_op |= MTL_OP_MODE_TTC_192;
+ else if (txmode <= 256)
+ mtl_tx_op |= MTL_OP_MODE_TTC_256;
+ else if (txmode <= 384)
+ mtl_tx_op |= MTL_OP_MODE_TTC_384;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TTC_512;
+ }
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ if (rxmode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ mtl_rx_op |= MTL_OP_MODE_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
+ mtl_rx_op &= ~MTL_OP_MODE_RSF;
+ mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ if (rxmode <= 32)
+ mtl_rx_op |= MTL_OP_MODE_RTC_32;
+ else if (rxmode <= 64)
+ mtl_rx_op |= MTL_OP_MODE_RTC_64;
+ else if (rxmode <= 96)
+ mtl_rx_op |= MTL_OP_MODE_RTC_96;
+ else
+ mtl_rx_op |= MTL_OP_MODE_RTC_128;
+ }
+
+ writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ /* Enable MTL RX overflow */
+ mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+ writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+ ioaddr + MTL_CHAN_INT_CTRL(channel));
+}
+
+static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode, int rxfifosz)
+{
+ /* Only Channel 0 is actually configured and used */
+ dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
+}
+
+static void dwmac4_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
+
+ /* MAC HW feature0 */
+ dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
+ dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
+ dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
+ dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
+ dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
+ dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
+ dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
+ dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
+ dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
+ /* MMC */
+ dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
+ /* IEEE 1588-2008 */
+ dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
+ /* TX and RX csum */
+ dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
+ dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
+
+ /* MAC HW feature1 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+ dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
+ dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ /* MAC HW feature2 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
+ /* TX and RX number of channels */
+ dma_cap->number_rx_channel =
+ ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
+ dma_cap->number_tx_channel =
+ ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
+
+ /* IEEE 1588-2002 */
+ dma_cap->time_stamp = 0;
+}
+
+/* Enable/disable TSO feature and set MSS */
+static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value;
+
+ if (en) {
+ /* enable TSO */
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value | DMA_CONTROL_TSE,
+ ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ } else {
+ /* enable TSO */
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value & ~DMA_CONTROL_TSE,
+ ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ }
+}
+
+const struct stmmac_dma_ops dwmac4_dma_ops = {
+ .reset = dwmac4_dma_reset,
+ .init = dwmac4_dma_init,
+ .axi = dwmac4_dma_axi,
+ .dump_regs = dwmac4_dump_dma_regs,
+ .dma_mode = dwmac4_dma_operation_mode,
+ .enable_dma_irq = dwmac4_enable_dma_irq,
+ .disable_dma_irq = dwmac4_disable_dma_irq,
+ .start_tx = dwmac4_dma_start_tx,
+ .stop_tx = dwmac4_dma_stop_tx,
+ .start_rx = dwmac4_dma_start_rx,
+ .stop_rx = dwmac4_dma_stop_rx,
+ .dma_interrupt = dwmac4_dma_interrupt,
+ .get_hw_feature = dwmac4_get_hw_feature,
+ .rx_watchdog = dwmac4_rx_watchdog,
+ .set_rx_ring_len = dwmac4_set_rx_ring_len,
+ .set_tx_ring_len = dwmac4_set_tx_ring_len,
+ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+ .enable_tso = dwmac4_enable_tso,
+};
+
+const struct stmmac_dma_ops dwmac410_dma_ops = {
+ .reset = dwmac4_dma_reset,
+ .init = dwmac4_dma_init,
+ .axi = dwmac4_dma_axi,
+ .dump_regs = dwmac4_dump_dma_regs,
+ .dma_mode = dwmac4_dma_operation_mode,
+ .enable_dma_irq = dwmac410_enable_dma_irq,
+ .disable_dma_irq = dwmac4_disable_dma_irq,
+ .start_tx = dwmac4_dma_start_tx,
+ .stop_tx = dwmac4_dma_stop_tx,
+ .start_rx = dwmac4_dma_start_rx,
+ .stop_rx = dwmac4_dma_stop_rx,
+ .dma_interrupt = dwmac4_dma_interrupt,
+ .get_hw_feature = dwmac4_get_hw_feature,
+ .rx_watchdog = dwmac4_rx_watchdog,
+ .set_rx_ring_len = dwmac4_set_rx_ring_len,
+ .set_tx_ring_len = dwmac4_set_tx_ring_len,
+ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+ .enable_tso = dwmac4_enable_tso,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
new file mode 100644
index 000000000..1b06df749
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -0,0 +1,202 @@
+/*
+ * DWMAC4 DMA Header file.
+ *
+ *
+ * Copyright (C) 2007-2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DMA_H__
+#define __DWMAC4_DMA_H__
+
+/* Define the max channel number used for tx (also rx).
+ * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX
+ */
+#define DMA_CHANNEL_NB_MAX 1
+
+#define DMA_BUS_MODE 0x00001000
+#define DMA_SYS_BUS_MODE 0x00001004
+#define DMA_STATUS 0x00001008
+#define DMA_DEBUG_STATUS_0 0x0000100c
+#define DMA_DEBUG_STATUS_1 0x00001010
+#define DMA_DEBUG_STATUS_2 0x00001014
+#define DMA_AXI_BUS_MODE 0x00001028
+
+/* DMA Bus Mode bitmap */
+#define DMA_BUS_MODE_SFT_RESET BIT(0)
+
+/* DMA SYS Bus Mode bitmap */
+#define DMA_BUS_MODE_SPH BIT(24)
+#define DMA_BUS_MODE_PBL BIT(16)
+#define DMA_BUS_MODE_PBL_SHIFT 16
+#define DMA_BUS_MODE_RPBL_SHIFT 16
+#define DMA_BUS_MODE_MB BIT(14)
+#define DMA_BUS_MODE_FB BIT(0)
+
+/* DMA Interrupt top status */
+#define DMA_STATUS_MAC BIT(17)
+#define DMA_STATUS_MTL BIT(16)
+#define DMA_STATUS_CHAN7 BIT(7)
+#define DMA_STATUS_CHAN6 BIT(6)
+#define DMA_STATUS_CHAN5 BIT(5)
+#define DMA_STATUS_CHAN4 BIT(4)
+#define DMA_STATUS_CHAN3 BIT(3)
+#define DMA_STATUS_CHAN2 BIT(2)
+#define DMA_STATUS_CHAN1 BIT(1)
+#define DMA_STATUS_CHAN0 BIT(0)
+
+/* DMA debug status bitmap */
+#define DMA_DEBUG_STATUS_TS_MASK 0xf
+#define DMA_DEBUG_STATUS_RS_MASK 0xf
+
+/* DMA AXI bitmap */
+#define DMA_AXI_EN_LPI BIT(31)
+#define DMA_AXI_LPI_XIT_FRM BIT(30)
+#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24)
+#define DMA_AXI_WR_OSR_LMT_SHIFT 24
+#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
+#define DMA_AXI_RD_OSR_LMT_SHIFT 16
+
+#define DMA_AXI_OSR_MAX 0xf
+#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
+ (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
+
+#define DMA_SYS_BUS_MB BIT(14)
+#define DMA_AXI_1KBBE BIT(13)
+#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_SYS_BUS_FB BIT(0)
+
+#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
+ DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
+ DMA_AXI_BLEN4)
+
+#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+
+/* Following DMA defines are chanels oriented */
+#define DMA_CHAN_BASE_ADDR 0x00001100
+#define DMA_CHAN_BASE_OFFSET 0x80
+#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \
+ (x * DMA_CHAN_BASE_OFFSET))
+#define DMA_CHAN_REG_NUMBER 17
+
+#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
+#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
+#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
+#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
+#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
+#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c)
+#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30)
+#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34)
+#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38)
+#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c)
+#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44)
+#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c)
+#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54)
+#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c)
+#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
+
+/* DMA Control X */
+#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
+
+/* DMA Tx Channel X Control register defines */
+#define DMA_CONTROL_TSE BIT(12)
+#define DMA_CONTROL_OSP BIT(4)
+#define DMA_CONTROL_ST BIT(0)
+
+/* DMA Rx Channel X Control register defines */
+#define DMA_CONTROL_SR BIT(0)
+
+/* Interrupt status per channel */
+#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
+#define DMA_CHAN_STATUS_REB_SHIFT 19
+#define DMA_CHAN_STATUS_TEB GENMASK(18, 16)
+#define DMA_CHAN_STATUS_TEB_SHIFT 16
+#define DMA_CHAN_STATUS_NIS BIT(15)
+#define DMA_CHAN_STATUS_AIS BIT(14)
+#define DMA_CHAN_STATUS_CDE BIT(13)
+#define DMA_CHAN_STATUS_FBE BIT(12)
+#define DMA_CHAN_STATUS_ERI BIT(11)
+#define DMA_CHAN_STATUS_ETI BIT(10)
+#define DMA_CHAN_STATUS_RWT BIT(9)
+#define DMA_CHAN_STATUS_RPS BIT(8)
+#define DMA_CHAN_STATUS_RBU BIT(7)
+#define DMA_CHAN_STATUS_RI BIT(6)
+#define DMA_CHAN_STATUS_TBU BIT(2)
+#define DMA_CHAN_STATUS_TPS BIT(1)
+#define DMA_CHAN_STATUS_TI BIT(0)
+
+/* Interrupt enable bits per channel */
+#define DMA_CHAN_INTR_ENA_NIE BIT(16)
+#define DMA_CHAN_INTR_ENA_AIE BIT(15)
+#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
+#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
+#define DMA_CHAN_INTR_ENA_CDE BIT(13)
+#define DMA_CHAN_INTR_ENA_FBE BIT(12)
+#define DMA_CHAN_INTR_ENA_ERE BIT(11)
+#define DMA_CHAN_INTR_ENA_ETE BIT(10)
+#define DMA_CHAN_INTR_ENA_RWE BIT(9)
+#define DMA_CHAN_INTR_ENA_RSE BIT(8)
+#define DMA_CHAN_INTR_ENA_RBUE BIT(7)
+#define DMA_CHAN_INTR_ENA_RIE BIT(6)
+#define DMA_CHAN_INTR_ENA_TBUE BIT(2)
+#define DMA_CHAN_INTR_ENA_TSE BIT(1)
+#define DMA_CHAN_INTR_ENA_TIE BIT(0)
+
+#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.00 */
+#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
+ DMA_CHAN_INTR_ABNORMAL)
+
+#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.10a */
+#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
+ DMA_CHAN_INTR_ABNORMAL_4_10)
+
+/* channel 0 specific fields */
+#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
+#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12
+#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8)
+#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
+
+int dwmac4_dma_reset(void __iomem *ioaddr);
+void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr);
+void dwmac4_dma_start_tx(void __iomem *ioaddr);
+void dwmac4_dma_stop_tx(void __iomem *ioaddr);
+void dwmac4_dma_start_rx(void __iomem *ioaddr);
+void dwmac4_dma_stop_rx(void __iomem *ioaddr);
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+
+#endif /* __DWMAC4_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
new file mode 100644
index 000000000..c7326d5b2
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2007-2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "dwmac4_dma.h"
+#include "dwmac4.h"
+
+int dwmac4_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ mdelay(10);
+ }
+
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+ writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
+}
+
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+ writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
+}
+
+void dwmac4_dma_start_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value |= GMAC_CONFIG_TE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value &= ~GMAC_CONFIG_TE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_start_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value |= DMA_CONTROL_SR;
+
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value |= GMAC_CONFIG_RE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value &= ~GMAC_CONFIG_RE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
+{
+ writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
+}
+
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
+{
+ writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
+}
+
+void dwmac4_enable_dma_irq(void __iomem *ioaddr)
+{
+ writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
+ DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+void dwmac410_enable_dma_irq(void __iomem *ioaddr)
+{
+ writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
+ ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+void dwmac4_disable_dma_irq(void __iomem *ioaddr)
+{
+ writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
+ u32 value;
+
+ value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
+ x->rx_early_irq++;
+ }
+
+ /* Clear the interrupt by writing a logic 1 to the chanX interrupt
+ * status [21-0] expect reserved bits [5-3]
+ */
+ writel((intr_status & 0x3fffc7),
+ ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
+
+ return ret;
+}
+
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ /* For MAC Addr registers se have to set the Address Enable (AE)
+ * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+ * is RO.
+ */
+ data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
+ writel(data | GMAC_HI_REG_AE, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+}
+
+/* Enable disable MAC RX/TX */
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (enable)
+ value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
+ else
+ value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
+
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index cfb018c7c..38f19c99c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -411,6 +411,26 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
}
}
+static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ int i;
+
+ pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+
+ x = *(u64 *)ep;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ }
+ pr_info("\n");
+}
+
const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status,
@@ -430,4 +450,5 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
.get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
+ .display_ring = enh_desc_display_ring,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 192c24913..38a1a5603 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -35,6 +35,10 @@
* current value.*/
#define MMC_CNTRL_PRESET 0x10
#define MMC_CNTRL_FULL_HALF_PRESET 0x20
+
+#define MMC_GMAC4_OFFSET 0x700
+#define MMC_GMAC3_X_OFFSET 0x100
+
struct stmmac_counters {
unsigned int mmc_tx_octetcount_gb;
unsigned int mmc_tx_framecount_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 3f20bb1fe..ce9aa7928 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -28,12 +28,12 @@
/* MAC Management Counters register offset */
-#define MMC_CNTRL 0x00000100 /* MMC Control */
-#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */
-#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
-#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
-#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
-#define MMC_DEFAULT_MASK 0xffffffff
+#define MMC_CNTRL 0x00 /* MMC Control */
+#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */
+#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */
+#define MMC_DEFAULT_MASK 0xffffffff
/* MMC TX counter registers */
@@ -41,115 +41,115 @@
* _GB register stands for good and bad frames
* _G is for good only.
*/
-#define MMC_TX_OCTETCOUNT_GB 0x00000114
-#define MMC_TX_FRAMECOUNT_GB 0x00000118
-#define MMC_TX_BROADCASTFRAME_G 0x0000011c
-#define MMC_TX_MULTICASTFRAME_G 0x00000120
-#define MMC_TX_64_OCTETS_GB 0x00000124
-#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128
-#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c
-#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130
-#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134
-#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138
-#define MMC_TX_UNICAST_GB 0x0000013c
-#define MMC_TX_MULTICAST_GB 0x00000140
-#define MMC_TX_BROADCAST_GB 0x00000144
-#define MMC_TX_UNDERFLOW_ERROR 0x00000148
-#define MMC_TX_SINGLECOL_G 0x0000014c
-#define MMC_TX_MULTICOL_G 0x00000150
-#define MMC_TX_DEFERRED 0x00000154
-#define MMC_TX_LATECOL 0x00000158
-#define MMC_TX_EXESSCOL 0x0000015c
-#define MMC_TX_CARRIER_ERROR 0x00000160
-#define MMC_TX_OCTETCOUNT_G 0x00000164
-#define MMC_TX_FRAMECOUNT_G 0x00000168
-#define MMC_TX_EXCESSDEF 0x0000016c
-#define MMC_TX_PAUSE_FRAME 0x00000170
-#define MMC_TX_VLAN_FRAME_G 0x00000174
+#define MMC_TX_OCTETCOUNT_GB 0x14
+#define MMC_TX_FRAMECOUNT_GB 0x18
+#define MMC_TX_BROADCASTFRAME_G 0x1c
+#define MMC_TX_MULTICASTFRAME_G 0x20
+#define MMC_TX_64_OCTETS_GB 0x24
+#define MMC_TX_65_TO_127_OCTETS_GB 0x28
+#define MMC_TX_128_TO_255_OCTETS_GB 0x2c
+#define MMC_TX_256_TO_511_OCTETS_GB 0x30
+#define MMC_TX_512_TO_1023_OCTETS_GB 0x34
+#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38
+#define MMC_TX_UNICAST_GB 0x3c
+#define MMC_TX_MULTICAST_GB 0x40
+#define MMC_TX_BROADCAST_GB 0x44
+#define MMC_TX_UNDERFLOW_ERROR 0x48
+#define MMC_TX_SINGLECOL_G 0x4c
+#define MMC_TX_MULTICOL_G 0x50
+#define MMC_TX_DEFERRED 0x54
+#define MMC_TX_LATECOL 0x58
+#define MMC_TX_EXESSCOL 0x5c
+#define MMC_TX_CARRIER_ERROR 0x60
+#define MMC_TX_OCTETCOUNT_G 0x64
+#define MMC_TX_FRAMECOUNT_G 0x68
+#define MMC_TX_EXCESSDEF 0x6c
+#define MMC_TX_PAUSE_FRAME 0x70
+#define MMC_TX_VLAN_FRAME_G 0x74
/* MMC RX counter registers */
-#define MMC_RX_FRAMECOUNT_GB 0x00000180
-#define MMC_RX_OCTETCOUNT_GB 0x00000184
-#define MMC_RX_OCTETCOUNT_G 0x00000188
-#define MMC_RX_BROADCASTFRAME_G 0x0000018c
-#define MMC_RX_MULTICASTFRAME_G 0x00000190
-#define MMC_RX_CRC_ERROR 0x00000194
-#define MMC_RX_ALIGN_ERROR 0x00000198
-#define MMC_RX_RUN_ERROR 0x0000019C
-#define MMC_RX_JABBER_ERROR 0x000001A0
-#define MMC_RX_UNDERSIZE_G 0x000001A4
-#define MMC_RX_OVERSIZE_G 0x000001A8
-#define MMC_RX_64_OCTETS_GB 0x000001AC
-#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0
-#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4
-#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8
-#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc
-#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0
-#define MMC_RX_UNICAST_G 0x000001c4
-#define MMC_RX_LENGTH_ERROR 0x000001c8
-#define MMC_RX_AUTOFRANGETYPE 0x000001cc
-#define MMC_RX_PAUSE_FRAMES 0x000001d0
-#define MMC_RX_FIFO_OVERFLOW 0x000001d4
-#define MMC_RX_VLAN_FRAMES_GB 0x000001d8
-#define MMC_RX_WATCHDOG_ERROR 0x000001dc
+#define MMC_RX_FRAMECOUNT_GB 0x80
+#define MMC_RX_OCTETCOUNT_GB 0x84
+#define MMC_RX_OCTETCOUNT_G 0x88
+#define MMC_RX_BROADCASTFRAME_G 0x8c
+#define MMC_RX_MULTICASTFRAME_G 0x90
+#define MMC_RX_CRC_ERROR 0x94
+#define MMC_RX_ALIGN_ERROR 0x98
+#define MMC_RX_RUN_ERROR 0x9C
+#define MMC_RX_JABBER_ERROR 0xA0
+#define MMC_RX_UNDERSIZE_G 0xA4
+#define MMC_RX_OVERSIZE_G 0xA8
+#define MMC_RX_64_OCTETS_GB 0xAC
+#define MMC_RX_65_TO_127_OCTETS_GB 0xb0
+#define MMC_RX_128_TO_255_OCTETS_GB 0xb4
+#define MMC_RX_256_TO_511_OCTETS_GB 0xb8
+#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0
+#define MMC_RX_UNICAST_G 0xc4
+#define MMC_RX_LENGTH_ERROR 0xc8
+#define MMC_RX_AUTOFRANGETYPE 0xcc
+#define MMC_RX_PAUSE_FRAMES 0xd0
+#define MMC_RX_FIFO_OVERFLOW 0xd4
+#define MMC_RX_VLAN_FRAMES_GB 0xd8
+#define MMC_RX_WATCHDOG_ERROR 0xdc
/* IPC*/
-#define MMC_RX_IPC_INTR_MASK 0x00000200
-#define MMC_RX_IPC_INTR 0x00000208
+#define MMC_RX_IPC_INTR_MASK 0x100
+#define MMC_RX_IPC_INTR 0x108
/* IPv4*/
-#define MMC_RX_IPV4_GD 0x00000210
-#define MMC_RX_IPV4_HDERR 0x00000214
-#define MMC_RX_IPV4_NOPAY 0x00000218
-#define MMC_RX_IPV4_FRAG 0x0000021C
-#define MMC_RX_IPV4_UDSBL 0x00000220
+#define MMC_RX_IPV4_GD 0x110
+#define MMC_RX_IPV4_HDERR 0x114
+#define MMC_RX_IPV4_NOPAY 0x118
+#define MMC_RX_IPV4_FRAG 0x11C
+#define MMC_RX_IPV4_UDSBL 0x120
-#define MMC_RX_IPV4_GD_OCTETS 0x00000250
-#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254
-#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258
-#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c
-#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260
+#define MMC_RX_IPV4_GD_OCTETS 0x150
+#define MMC_RX_IPV4_HDERR_OCTETS 0x154
+#define MMC_RX_IPV4_NOPAY_OCTETS 0x158
+#define MMC_RX_IPV4_FRAG_OCTETS 0x15c
+#define MMC_RX_IPV4_UDSBL_OCTETS 0x160
/* IPV6*/
-#define MMC_RX_IPV6_GD_OCTETS 0x00000264
-#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268
-#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c
+#define MMC_RX_IPV6_GD_OCTETS 0x164
+#define MMC_RX_IPV6_HDERR_OCTETS 0x168
+#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c
-#define MMC_RX_IPV6_GD 0x00000224
-#define MMC_RX_IPV6_HDERR 0x00000228
-#define MMC_RX_IPV6_NOPAY 0x0000022c
+#define MMC_RX_IPV6_GD 0x124
+#define MMC_RX_IPV6_HDERR 0x128
+#define MMC_RX_IPV6_NOPAY 0x12c
/* Protocols*/
-#define MMC_RX_UDP_GD 0x00000230
-#define MMC_RX_UDP_ERR 0x00000234
-#define MMC_RX_TCP_GD 0x00000238
-#define MMC_RX_TCP_ERR 0x0000023c
-#define MMC_RX_ICMP_GD 0x00000240
-#define MMC_RX_ICMP_ERR 0x00000244
+#define MMC_RX_UDP_GD 0x130
+#define MMC_RX_UDP_ERR 0x134
+#define MMC_RX_TCP_GD 0x138
+#define MMC_RX_TCP_ERR 0x13c
+#define MMC_RX_ICMP_GD 0x140
+#define MMC_RX_ICMP_ERR 0x144
-#define MMC_RX_UDP_GD_OCTETS 0x00000270
-#define MMC_RX_UDP_ERR_OCTETS 0x00000274
-#define MMC_RX_TCP_GD_OCTETS 0x00000278
-#define MMC_RX_TCP_ERR_OCTETS 0x0000027c
-#define MMC_RX_ICMP_GD_OCTETS 0x00000280
-#define MMC_RX_ICMP_ERR_OCTETS 0x00000284
+#define MMC_RX_UDP_GD_OCTETS 0x170
+#define MMC_RX_UDP_ERR_OCTETS 0x174
+#define MMC_RX_TCP_GD_OCTETS 0x178
+#define MMC_RX_TCP_ERR_OCTETS 0x17c
+#define MMC_RX_ICMP_GD_OCTETS 0x180
+#define MMC_RX_ICMP_ERR_OCTETS 0x184
-void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
- u32 value = readl(ioaddr + MMC_CNTRL);
+ u32 value = readl(mmcaddr + MMC_CNTRL);
value |= (mode & 0x3F);
- writel(value, ioaddr + MMC_CNTRL);
+ writel(value, mmcaddr + MMC_CNTRL);
pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
MMC_CNTRL, value);
}
/* To mask all all interrupts.*/
-void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
+void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
@@ -157,111 +157,116 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
* counter after a read. So all the field of the mmc struct
* have to be incremented.
*/
-void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
+void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
{
- mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB);
- mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB);
- mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G);
- mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G);
- mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB);
+ mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
+ mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
+ mmc->mmc_tx_broadcastframe_g += readl(mmcaddr +
+ MMC_TX_BROADCASTFRAME_G);
+ mmc->mmc_tx_multicastframe_g += readl(mmcaddr +
+ MMC_TX_MULTICASTFRAME_G);
+ mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB);
mmc->mmc_tx_65_to_127_octets_gb +=
- readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB);
mmc->mmc_tx_128_to_255_octets_gb +=
- readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB);
mmc->mmc_tx_256_to_511_octets_gb +=
- readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB);
mmc->mmc_tx_512_to_1023_octets_gb +=
- readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB);
mmc->mmc_tx_1024_to_max_octets_gb +=
- readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
- mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB);
- mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB);
- mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB);
- mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR);
- mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G);
- mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G);
- mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED);
- mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL);
- mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL);
- mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR);
- mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G);
- mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G);
- mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF);
- mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME);
- mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G);
+ readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB);
+ mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB);
+ mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB);
+ mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR);
+ mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G);
+ mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G);
+ mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED);
+ mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL);
+ mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL);
+ mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR);
+ mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G);
+ mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G);
+ mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
+ mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
+ mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
/* MMC RX counter registers */
- mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB);
- mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB);
- mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
- mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
- mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
- mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR);
- mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
- mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
- mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
- mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G);
- mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G);
- mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB);
+ mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
+ mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB);
+ mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G);
+ mmc->mmc_rx_broadcastframe_g += readl(mmcaddr +
+ MMC_RX_BROADCASTFRAME_G);
+ mmc->mmc_rx_multicastframe_g += readl(mmcaddr +
+ MMC_RX_MULTICASTFRAME_G);
+ mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR);
+ mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G);
+ mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB);
mmc->mmc_rx_65_to_127_octets_gb +=
- readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB);
mmc->mmc_rx_128_to_255_octets_gb +=
- readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB);
mmc->mmc_rx_256_to_511_octets_gb +=
- readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB);
mmc->mmc_rx_512_to_1023_octets_gb +=
- readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB);
mmc->mmc_rx_1024_to_max_octets_gb +=
- readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
- mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G);
- mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR);
- mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE);
- mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES);
- mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW);
- mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB);
- mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR);
+ readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G);
+ mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR);
+ mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE);
+ mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES);
+ mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
+ mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
/* IPC */
- mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK);
- mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR);
+ mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
+ mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
/* IPv4 */
- mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD);
- mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR);
- mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY);
- mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG);
- mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL);
+ mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
+ mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
+ mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY);
+ mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG);
+ mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL);
- mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS);
+ mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS);
mmc->mmc_rx_ipv4_hderr_octets +=
- readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS);
mmc->mmc_rx_ipv4_nopay_octets +=
- readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS);
- mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+ mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr +
+ MMC_RX_IPV4_FRAG_OCTETS);
mmc->mmc_rx_ipv4_udsbl_octets +=
- readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS);
/* IPV6 */
- mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS);
+ mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS);
mmc->mmc_rx_ipv6_hderr_octets +=
- readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS);
mmc->mmc_rx_ipv6_nopay_octets +=
- readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS);
- mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD);
- mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR);
- mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY);
+ mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD);
+ mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR);
+ mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY);
/* Protocols */
- mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD);
- mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR);
- mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD);
- mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR);
- mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD);
- mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR);
+ mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD);
+ mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR);
+ mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD);
+ mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR);
+ mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD);
+ mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR);
- mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS);
- mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS);
- mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS);
- mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS);
- mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS);
- mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS);
+ mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS);
+ mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS);
+ mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS);
+ mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
+ mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
+ mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 011386f6f..2beacd0d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -279,6 +279,26 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
return 1;
}
+static void ndesc_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_desc *p = (struct dma_desc *)head;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+
+ x = *(u64 *)p;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ i, (unsigned int)virt_to_phys(p),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
+ pr_info("\n");
+}
+
const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status,
@@ -297,4 +317,5 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
.get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
+ .display_ring = ndesc_display_ring,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8bbab9789..59ae6088c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,7 +24,7 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "Oct_2015"
+#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
#include <linux/stmmac.h>
@@ -67,6 +67,7 @@ struct stmmac_priv {
spinlock_t tx_lock;
bool tx_path_in_lpi_mode;
struct timer_list txtimer;
+ bool tso;
struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
struct dma_extended_desc *dma_erx;
@@ -128,6 +129,10 @@ struct stmmac_priv {
int use_riwt;
int irq_wake;
spinlock_t ptp_lock;
+ void __iomem *mmcaddr;
+ u32 rx_tail_addr;
+ u32 tx_tail_addr;
+ u32 mss;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
@@ -143,9 +148,9 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_resume(struct net_device *ndev);
-int stmmac_suspend(struct net_device *ndev);
-int stmmac_dvr_remove(struct net_device *ndev);
+int stmmac_resume(struct device *dev);
+int stmmac_suspend(struct device *dev);
+int stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 3c7928edf..e2b98b016 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -161,6 +161,9 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(mtl_rx_fifo_ctrl_active),
STMMAC_STAT(mac_rx_frame_ctrl_fifo),
STMMAC_STAT(mac_gmii_rx_proto_engine),
+ /* TSO */
+ STMMAC_STAT(tx_tso_frames),
+ STMMAC_STAT(tx_tso_nfrags),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -499,14 +502,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
int i, j = 0;
/* Update the DMA HW counters for dwmac10/100 */
- if (!priv->plat->has_gmac)
+ if (priv->hw->dma->dma_diagnostic_fr)
priv->hw->dma->dma_diagnostic_fr(&dev->stats,
(void *) &priv->xstats,
priv->ioaddr);
else {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
- dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+ dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
char *p;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fcbd4be56..e4071265b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -56,6 +56,7 @@
#include "dwmac1000.h"
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
#define TX_TIMEO 5000
@@ -721,13 +722,15 @@ static void stmmac_adjust_link(struct net_device *dev)
new_state = 1;
switch (phydev->speed) {
case 1000:
- if (likely(priv->plat->has_gmac))
+ if (likely((priv->plat->has_gmac) ||
+ (priv->plat->has_gmac4)))
ctrl &= ~priv->hw->link.port;
stmmac_hw_fix_mac_speed(priv);
break;
case 100:
case 10:
- if (priv->plat->has_gmac) {
+ if (likely((priv->plat->has_gmac) ||
+ (priv->plat->has_gmac4))) {
ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
ctrl |= priv->hw->link.speed;
@@ -875,53 +878,22 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-/**
- * stmmac_display_ring - display ring
- * @head: pointer to the head of the ring passed.
- * @size: size of the ring.
- * @extend_desc: to verify if extended descriptors are used.
- * Description: display the control/status and buffer descriptors.
- */
-static void stmmac_display_ring(void *head, int size, int extend_desc)
-{
- int i;
- struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
- struct dma_desc *p = (struct dma_desc *)head;
-
- for (i = 0; i < size; i++) {
- u64 x;
- if (extend_desc) {
- x = *(u64 *) ep;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
- ep->basic.des2, ep->basic.des3);
- ep++;
- } else {
- x = *(u64 *) p;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
- i, (unsigned int)virt_to_phys(p),
- (unsigned int)x, (unsigned int)(x >> 32),
- p->des2, p->des3);
- p++;
- }
- pr_info("\n");
- }
-}
-
static void stmmac_display_rings(struct stmmac_priv *priv)
{
+ void *head_rx, *head_tx;
+
if (priv->extend_desc) {
- pr_info("Extended RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1);
- pr_info("Extended TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
+ head_rx = (void *)priv->dma_erx;
+ head_tx = (void *)priv->dma_etx;
} else {
- pr_info("RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0);
- pr_info("TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
+ head_rx = (void *)priv->dma_rx;
+ head_tx = (void *)priv->dma_tx;
}
+
+ /* Display Rx ring */
+ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+ /* Display Tx ring */
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1000,7 +972,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -EINVAL;
}
- p->des2 = priv->rx_skbuff_dma[i];
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ p->des0 = priv->rx_skbuff_dma[i];
+ else
+ p->des2 = priv->rx_skbuff_dma[i];
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -1091,7 +1066,16 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
p = &((priv->dma_etx + i)->basic);
else
p = priv->dma_tx + i;
- p->des2 = 0;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+ } else {
+ p->des2 = 0;
+ }
+
priv->tx_skbuff_dma[i].buf = 0;
priv->tx_skbuff_dma[i].map_as_page = false;
priv->tx_skbuff_dma[i].len = 0;
@@ -1354,9 +1338,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry].buf = 0;
+ priv->tx_skbuff_dma[entry].len = 0;
priv->tx_skbuff_dma[entry].map_as_page = false;
}
- priv->hw->mode->clean_desc3(priv, p);
+
+ if (priv->hw->mode->clean_desc3)
+ priv->hw->mode->clean_desc3(priv, p);
+
priv->tx_skbuff_dma[entry].last_segment = false;
priv->tx_skbuff_dma[entry].is_jumbo = false;
@@ -1479,41 +1467,23 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
- MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
+ else
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
- dwmac_mmc_intr_all_mask(priv->ioaddr);
+ dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) {
- dwmac_mmc_ctrl(priv->ioaddr, mode);
+ dwmac_mmc_ctrl(priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
pr_info(" No MAC Management Counters available\n");
}
/**
- * stmmac_get_synopsys_id - return the SYINID.
- * @priv: driver private structure
- * Description: this simple function is to decode and return the SYINID
- * starting from the HW core register.
- */
-static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
-{
- u32 hwid = priv->hw->synopsys_uid;
-
- /* Check Synopsys Id (not available on old chips) */
- if (likely(hwid)) {
- u32 uid = ((hwid & 0x0000ff00) >> 8);
- u32 synid = (hwid & 0x000000ff);
-
- pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
- uid, synid);
-
- return synid;
- }
- return 0;
-}
-
-/**
* stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
* @priv: driver private structure
* Description: select the Enhanced/Alternate or Normal descriptors.
@@ -1550,51 +1520,15 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
*/
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
- u32 hw_cap = 0;
+ u32 ret = 0;
if (priv->hw->dma->get_hw_feature) {
- hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
-
- priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
- priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
- priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
- priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
- priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
- priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
- priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
- priv->dma_cap.pmt_remote_wake_up =
- (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
- priv->dma_cap.pmt_magic_frame =
- (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
- /* MMC */
- priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
- /* IEEE 1588-2002 */
- priv->dma_cap.time_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
- /* IEEE 1588-2008 */
- priv->dma_cap.atime_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
- /* 802.3az - Energy-Efficient Ethernet (EEE) */
- priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
- priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
- /* TX and RX csum */
- priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
- priv->dma_cap.rx_coe_type1 =
- (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
- priv->dma_cap.rx_coe_type2 =
- (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
- priv->dma_cap.rxfifo_over_2048 =
- (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
- /* TX and RX number of channels */
- priv->dma_cap.number_rx_channel =
- (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
- priv->dma_cap.number_tx_channel =
- (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
- /* Alternate (enhanced) DESC mode */
- priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
- }
-
- return hw_cap;
+ priv->hw->dma->get_hw_feature(priv->ioaddr,
+ &priv->dma_cap);
+ ret = 1;
+ }
+
+ return ret;
}
/**
@@ -1650,8 +1584,19 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
- if ((priv->synopsys_id >= DWMAC_CORE_3_50) &&
- (priv->plat->axi && priv->hw->dma->axi))
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->rx_tail_addr = priv->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
+ STMMAC_CHAN0);
+
+ priv->tx_tail_addr = priv->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
+ }
+
+ if (priv->plat->axi && priv->hw->dma->axi)
priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
return ret;
@@ -1731,7 +1676,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Enable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, true);
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ stmmac_dwmac4_set_mac(priv->ioaddr, true);
+ else
+ stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1769,6 +1717,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->pcs && priv->hw->mac->ctrl_ane)
priv->hw->mac->ctrl_ane(priv->hw, 0);
+ /* set TX ring length */
+ if (priv->hw->dma->set_tx_ring_len)
+ priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+ (DMA_TX_SIZE - 1));
+ /* set RX ring length */
+ if (priv->hw->dma->set_rx_ring_len)
+ priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+ (DMA_RX_SIZE - 1));
+ /* Enable TSO */
+ if (priv->tso)
+ priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
+
return 0;
}
@@ -1934,6 +1894,239 @@ static int stmmac_release(struct net_device *dev)
}
/**
+ * stmmac_tso_allocator - close entry point of the driver
+ * @priv: driver private structure
+ * @des: buffer start address
+ * @total_len: total length to fill in descriptors
+ * @last_segmant: condition for the last descriptor
+ * Description:
+ * This function fills descriptor and request new descriptors according to
+ * buffer length to fill
+ */
+static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+ int total_len, bool last_segment)
+{
+ struct dma_desc *desc;
+ int tmp_len;
+ u32 buff_size;
+
+ tmp_len = total_len;
+
+ while (tmp_len > 0) {
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ desc = priv->dma_tx + priv->cur_tx;
+
+ desc->des0 = des + (total_len - tmp_len);
+ buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+ TSO_MAX_BUFF_SIZE : tmp_len;
+
+ priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
+ 0, 1,
+ (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+ 0, 0);
+
+ tmp_len -= TSO_MAX_BUFF_SIZE;
+ }
+}
+
+/**
+ * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description: this is the transmit function that is called on TSO frames
+ * (support available on GMAC4 and newer chips).
+ * Diagram below show the ring programming in case of TSO frames:
+ *
+ * First Descriptor
+ * --------
+ * | DES0 |---> buffer1 = L2/L3/L4 header
+ * | DES1 |---> TCP Payload (can continue on next descr...)
+ * | DES2 |---> buffer 1 and 2 len
+ * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
+ * --------
+ * |
+ * ...
+ * |
+ * --------
+ * | DES0 | --| Split TCP Payload on Buffers 1 and 2
+ * | DES1 | --|
+ * | DES2 | --> buffer 1 and 2 len
+ * | DES3 |
+ * --------
+ *
+ * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
+ */
+static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 pay_len, mss;
+ int tmp_pay_len = 0;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, des;
+ struct dma_desc *desc, *first, *mss_desc = NULL;
+ u8 proto_hdr_len;
+ int i;
+
+ spin_lock(&priv->tx_lock);
+
+ /* Compute header lengths */
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ /* Desc availability based on threshold should be enough safe */
+ if (unlikely(stmmac_tx_avail(priv) <
+ (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ pr_err("%s: Tx Ring full when queue awake\n", __func__);
+ }
+ spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+ mss = skb_shinfo(skb)->gso_size;
+
+ /* set new MSS value if needed */
+ if (mss != priv->mss) {
+ mss_desc = priv->dma_tx + priv->cur_tx;
+ priv->hw->desc->set_mss(mss_desc, mss);
+ priv->mss = mss;
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ }
+
+ if (netif_msg_tx_queued(priv)) {
+ pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
+ pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
+ skb->data_len);
+ }
+
+ first_entry = priv->cur_tx;
+
+ desc = priv->dma_tx + first_entry;
+ first = desc;
+
+ /* first descriptor: fill Headers on Buf1 */
+ des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ priv->tx_skbuff_dma[first_entry].buf = des;
+ priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ priv->tx_skbuff[first_entry] = skb;
+
+ first->des0 = des;
+
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = des + proto_hdr_len;
+
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
+
+ /* Prepare fragments */
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ des = skb_frag_dma_map(priv->device, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+ (i == nfrags - 1));
+
+ priv->tx_skbuff_dma[priv->cur_tx].buf = des;
+ priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
+ priv->tx_skbuff[priv->cur_tx] = NULL;
+ priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
+ }
+
+ priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
+
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ if (netif_msg_hw(priv))
+ pr_debug("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(dev);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ priv->xstats.tx_tso_frames++;
+ priv->xstats.tx_tso_nfrags += nfrags;
+
+ /* Manage tx mitigation */
+ priv->tx_count_frames += nfrags + 1;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ mod_timer(&priv->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ } else {
+ priv->tx_count_frames = 0;
+ priv->hw->desc->set_tx_ic(desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ if (!priv->hwts_tx_en)
+ skb_tx_timestamp(skb);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ /* Complete the first descriptor before granting the DMA */
+ priv->hw->desc->prepare_tso_tx_desc(first, 1,
+ proto_hdr_len,
+ pay_len,
+ 1, priv->tx_skbuff_dma[first_entry].last_segment,
+ tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+
+ /* If context desc is used to change MSS */
+ if (mss_desc)
+ priv->hw->desc->set_tx_owner(mss_desc);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ smp_wmb();
+
+ if (netif_msg_pktdata(priv)) {
+ pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+ __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ priv->cur_tx, first, nfrags);
+
+ priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
+ 0);
+
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb_headlen(skb));
+ }
+
+ netdev_sent_queue(dev, skb->len);
+
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
+
+ spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ spin_unlock(&priv->tx_lock);
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/**
* stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
@@ -1950,6 +2143,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int entry, first_entry;
struct dma_desc *desc, *first;
unsigned int enh_desc;
+ unsigned int des;
+
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ return stmmac_tso_xmit(skb, dev);
+ }
spin_lock(&priv->tx_lock);
@@ -1985,7 +2185,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (enh_desc)
is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
- if (unlikely(is_jumbo)) {
+ if (unlikely(is_jumbo) && likely(priv->synopsys_id <
+ DWMAC_CORE_4_00)) {
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
if (unlikely(entry < 0))
goto dma_map_err;
@@ -2003,13 +2204,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
else
desc = priv->dma_tx + entry;
- desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */
priv->tx_skbuff[entry] = NULL;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ desc->des0 = des;
+ priv->tx_skbuff_dma[entry].buf = desc->des0;
+ } else {
+ desc->des2 = des;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
+ }
+
priv->tx_skbuff_dma[entry].map_as_page = true;
priv->tx_skbuff_dma[entry].len = len;
priv->tx_skbuff_dma[entry].last_segment = last_segment;
@@ -2024,16 +2233,18 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
+ void *tx_head;
+
pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, priv->cur_tx, priv->dirty_tx, first_entry,
entry, first, nfrags);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_etx,
- DMA_TX_SIZE, 1);
+ tx_head = (void *)priv->dma_etx;
else
- stmmac_display_ring((void *)priv->dma_tx,
- DMA_TX_SIZE, 0);
+ tx_head = (void *)priv->dma_tx;
+
+ priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
pr_debug(">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
@@ -2072,12 +2283,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!is_jumbo)) {
bool last_segment = (nfrags == 0);
- first->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, first->des2))
+ des = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ first->des0 = des;
+ priv->tx_skbuff_dma[first_entry].buf = first->des0;
+ } else {
+ first->des2 = des;
+ priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ }
+
priv->tx_skbuff_dma[first_entry].len = nopaged_len;
priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -2101,7 +2319,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_sent_queue(dev, skb->len);
- priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+ else
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
spin_unlock(&priv->tx_lock);
return NETDEV_TX_OK;
@@ -2183,9 +2406,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
dev_kfree_skb(skb);
break;
}
- p->des2 = priv->rx_skbuff_dma[entry];
- priv->hw->mode->refill_desc3(priv, p);
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ p->des0 = priv->rx_skbuff_dma[entry];
+ p->des1 = 0;
+ } else {
+ p->des2 = priv->rx_skbuff_dma[entry];
+ }
+ if (priv->hw->mode->refill_desc3)
+ priv->hw->mode->refill_desc3(priv, p);
if (priv->rx_zeroc_thresh > 0)
priv->rx_zeroc_thresh--;
@@ -2193,9 +2422,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
}
-
wmb();
- priv->hw->desc->set_rx_owner(p);
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
+ else
+ priv->hw->desc->set_rx_owner(p);
+
wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
@@ -2218,13 +2451,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
pr_debug("%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_erx,
- DMA_RX_SIZE, 1);
+ rx_head = (void *)priv->dma_erx;
else
- stmmac_display_ring((void *)priv->dma_rx,
- DMA_RX_SIZE, 0);
+ rx_head = (void *)priv->dma_rx;
+
+ priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
int status;
@@ -2274,11 +2509,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
} else {
struct sk_buff *skb;
int frame_len;
+ unsigned int des;
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ des = p->des0;
+ else
+ des = p->des2;
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
- /* check if frame_len fits the preallocated memory */
+ /* If frame length is greather than skb buffer size
+ * (preallocated during init) then the packet is
+ * ignored
+ */
if (frame_len > priv->dma_buf_sz) {
+ pr_err("%s: len %d larger than size (%d)\n",
+ priv->dev->name, frame_len,
+ priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
break;
}
@@ -2291,14 +2538,19 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, p->des2);
+ p, entry, des);
if (frame_len > ETH_FRAME_LEN)
pr_debug("\tframe size %d, COE: %d\n",
frame_len, status);
}
- if (unlikely((frame_len < priv->rx_copybreak) ||
- stmmac_rx_threshold_count(priv))) {
+ /* The zero-copy is always used for all the sizes
+ * in case of GMAC4 because it needs
+ * to refill the used descriptors, always.
+ */
+ if (unlikely(!priv->plat->has_gmac4 &&
+ ((frame_len < priv->rx_copybreak) ||
+ stmmac_rx_threshold_count(priv)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
@@ -2450,7 +2702,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- if (priv->plat->enh_desc)
+ if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
max_mtu = JUMBO_LEN;
else
max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
@@ -2464,6 +2716,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
}
dev->mtu = new_mtu;
+
netdev_update_features(dev);
return 0;
@@ -2488,6 +2741,14 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_CSUM_MASK;
+ /* Disable tso if asked by ethtool */
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if (features & NETIF_F_TSO)
+ priv->tso = true;
+ else
+ priv->tso = false;
+ }
+
return features;
}
@@ -2534,7 +2795,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
}
/* To handle GMAC own interrupts */
- if (priv->plat->has_gmac) {
+ if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
if (unlikely(status)) {
@@ -2543,6 +2804,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+ priv->rx_tail_addr,
+ STMMAC_CHAN0);
}
}
@@ -2615,15 +2880,14 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
x = *(u64 *) ep;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des0, ep->basic.des1,
ep->basic.des2, ep->basic.des3);
ep++;
} else {
x = *(u64 *) p;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
- p->des2, p->des3);
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
seq_printf(seq, "\n");
@@ -2706,10 +2970,15 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
- seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
- (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
- seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
- (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
+ (priv->dma_cap.rx_coe) ? "Y" : "N");
+ } else {
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ }
seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
(priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
@@ -2818,27 +3087,35 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac = dwmac1000_setup(priv->ioaddr,
priv->plat->multicast_filter_bins,
- priv->plat->unicast_filter_entries);
+ priv->plat->unicast_filter_entries,
+ &priv->synopsys_id);
+ } else if (priv->plat->has_gmac4) {
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac = dwmac4_setup(priv->ioaddr,
+ priv->plat->multicast_filter_bins,
+ priv->plat->unicast_filter_entries,
+ &priv->synopsys_id);
} else {
- mac = dwmac100_setup(priv->ioaddr);
+ mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
}
if (!mac)
return -ENOMEM;
priv->hw = mac;
- /* Get and dump the chip ID */
- priv->synopsys_id = stmmac_get_synopsys_id(priv);
-
/* To use the chained or ring mode */
- if (chain_mode) {
- priv->hw->mode = &chain_mode_ops;
- pr_info(" Chain mode enabled\n");
- priv->mode = STMMAC_CHAIN_MODE;
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->hw->mode = &dwmac4_ring_mode_ops;
} else {
- priv->hw->mode = &ring_mode_ops;
- pr_info(" Ring mode enabled\n");
- priv->mode = STMMAC_RING_MODE;
+ if (chain_mode) {
+ priv->hw->mode = &chain_mode_ops;
+ pr_info(" Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ } else {
+ priv->hw->mode = &ring_mode_ops;
+ pr_info(" Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ }
}
/* Get the HW capability (new GMAC newer than 3.50a) */
@@ -2860,6 +3137,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
else
priv->plat->tx_coe = priv->dma_cap.tx_coe;
+ /* In case of GMAC4 rx_coe is from HW cap register. */
+ priv->plat->rx_coe = priv->dma_cap.rx_coe;
+
if (priv->dma_cap.rx_coe_type2)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
else if (priv->dma_cap.rx_coe_type1)
@@ -2868,13 +3148,17 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
} else
pr_info(" No HW DMA feature register supported");
- /* To use alternate (extended) or normal descriptor structures */
- stmmac_selec_desc_mode(priv);
+ /* To use alternate (extended), normal or GMAC4 descriptor structures */
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ priv->hw->desc = &dwmac4_desc_ops;
+ else
+ stmmac_selec_desc_mode(priv);
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
- pr_info(" RX Checksum Offload Engine supported (type %d)\n",
- priv->plat->rx_coe);
+ pr_info(" RX Checksum Offload Engine supported\n");
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
}
if (priv->plat->tx_coe)
pr_info(" TX Checksum insertion supported\n");
@@ -2884,6 +3168,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
device_set_wakeup_capable(priv->device, 1);
}
+ if (priv->dma_cap.tsoen)
+ pr_info(" TSO supported\n");
+
return 0;
}
@@ -2987,6 +3274,12 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ ndev->hw_features |= NETIF_F_TSO;
+ priv->tso = true;
+ pr_info(" TSO feature enabled\n");
+ }
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
@@ -3062,12 +3355,13 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
* stmmac_dvr_remove
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
* changes the link status, releases the DMA descriptor rings.
*/
-int stmmac_dvr_remove(struct net_device *ndev)
+int stmmac_dvr_remove(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
pr_info("%s:\n\tremoving driver", __func__);
@@ -3093,13 +3387,14 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
/**
* stmmac_suspend - suspend callback
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: this is the function to suspend the device and it is called
* by the platform driver to stop the network queue, release the resources,
* program the PMT register (for WoL), clean and release driver resources.
*/
-int stmmac_suspend(struct net_device *ndev)
+int stmmac_suspend(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -3142,20 +3437,19 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
* stmmac_resume - resume callback
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
* in a usable state.
*/
-int stmmac_resume(struct net_device *ndev)
+int stmmac_resume(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
if (!netif_running(ndev))
return 0;
- spin_lock_irqsave(&priv->lock, flags);
-
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -3163,7 +3457,9 @@ int stmmac_resume(struct net_device *ndev)
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
+ spin_lock_irqsave(&priv->lock, flags);
priv->hw->mac->pmt(priv->hw, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
@@ -3177,10 +3473,17 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
+ spin_lock_irqsave(&priv->lock, flags);
+
priv->cur_rx = 0;
priv->dirty_rx = 0;
priv->dirty_tx = 0;
priv->cur_tx = 0;
+ /* reset private mss value to force mss context settings at
+ * next tso xmit (only used for gmac4).
+ */
+ priv->mss = 0;
+
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 8683a2169..ec2958518 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -37,6 +37,18 @@
#define MII_BUSY 0x00000001
#define MII_WRITE 0x00000002
+/* GMAC4 defines */
+#define MII_GMAC4_GOC_SHIFT 2
+#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
+
+#define MII_PHY_ADDR_GMAC4_SHIFT 21
+#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21)
+#define MII_PHY_REG_GMAC4_SHIFT 16
+#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16)
+#define MII_CSR_CLK_GMAC4_SHIFT 8
+#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8)
+
static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
{
unsigned long curr;
@@ -124,6 +136,80 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
}
/**
+ * stmmac_mdio_read_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * Description: it reads data from the MII register of GMAC4 from within
+ * the phy device.
+ */
+static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ int data;
+ u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+ (MII_PHY_ADDR_GMAC4_MASK)) |
+ ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+ (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
+
+ value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+ << MII_CSR_CLK_GMAC4_SHIFT);
+
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ writel(value, priv->ioaddr + mii_address);
+
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ data = (int)readl(priv->ioaddr + mii_data);
+
+ return data;
+}
+
+/**
+ * stmmac_mdio_write_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * @phydata: phy data
+ * Description: it writes the data into the MII register of GMAC4 from within
+ * the device.
+ */
+static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+
+ u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+ (MII_PHY_ADDR_GMAC4_MASK)) |
+ ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+ (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
+
+ value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+ << MII_CSR_CLK_GMAC4_SHIFT);
+
+ /* Wait until any existing MII operation is complete */
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(phydata, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+}
+
+/**
* stmmac_mdio_reset
* @bus: points to the mii_bus structure
* Description: reset the MII bus
@@ -180,9 +266,11 @@ int stmmac_mdio_reset(struct mii_bus *bus)
/* This is a workaround for problems with the STE101P PHY.
* It doesn't complete its reset until at least one clock cycle
- * on MDC, so perform a dummy mdio read.
+ * on MDC, so perform a dummy mdio read. To be upadted for GMAC4
+ * if needed.
*/
- writel(0, priv->ioaddr + mii_address);
+ if (!priv->plat->has_gmac4)
+ writel(0, priv->ioaddr + mii_address);
#endif
return 0;
}
@@ -217,8 +305,14 @@ int stmmac_mdio_register(struct net_device *ndev)
#endif
new_bus->name = "stmmac";
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
+ if (priv->plat->has_gmac4) {
+ new_bus->read = &stmmac_mdio_read_gmac4;
+ new_bus->write = &stmmac_mdio_write_gmac4;
+ } else {
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ }
+
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index ae4388735..56c8a2342 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -231,30 +231,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- stmmac_dvr_remove(ndev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int stmmac_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- return stmmac_suspend(ndev);
-}
-
-static int stmmac_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- return stmmac_resume(ndev);
+ stmmac_dvr_remove(&pdev->dev);
}
-#endif
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
#define STMMAC_VENDOR_ID 0x700
#define STMMAC_QUARK_ID 0x0937
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cf37ea558..409db913b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -284,6 +284,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->pmt = 1;
}
+ if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
+ of_device_is_compatible(np, "snps,dwmac-4.10a")) {
+ plat->has_gmac4 = 1;
+ plat->pmt = 1;
+ plat->tso_en = of_property_read_bool(np, "snps,tso");
+ }
+
if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
of_device_is_compatible(np, "snps,dwmac-3.710")) {
plat->enh_desc = 1;
@@ -379,7 +386,7 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- int ret = stmmac_dvr_remove(ndev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
@@ -403,7 +410,7 @@ static int stmmac_pltfr_suspend(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- ret = stmmac_suspend(ndev);
+ ret = stmmac_suspend(dev);
if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
@@ -426,7 +433,7 @@ static int stmmac_pltfr_resume(struct device *dev)
if (priv->plat->init)
priv->plat->init(pdev, priv->plat->bsp_priv);
- return stmmac_resume(ndev);
+ return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 9cc45649f..a2371aa14 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6431,7 +6431,7 @@ static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void niu_netif_stop(struct niu *np)
{
- np->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(np->dev); /* prevent tx timeout */
niu_disable_napi(np);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 243722771..d6ad0fbd0 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -226,7 +226,7 @@ static void gem_put_cell(struct gem *gp)
static inline void gem_netif_stop(struct gem *gp)
{
- gp->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(gp->dev); /* prevent tx timeout */
napi_disable(&gp->napi);
netif_tx_disable(gp->dev);
}
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index af11ed1e0..158213cd6 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -949,7 +949,7 @@ static void dwceqos_adjust_link(struct net_device *ndev)
if (status_change) {
if (phydev->link) {
- lp->ndev->trans_start = jiffies;
+ netif_trans_update(lp->ndev);
dwceqos_link_up(lp);
} else {
dwceqos_link_down(lp);
@@ -2203,7 +2203,7 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
netdev_sent_queue(ndev, skb->len);
spin_unlock_bh(&lp->tx_lock);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
return 0;
tx_error:
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e34ca5fd3..f7540dd6b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1610,7 +1610,6 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
* o NETDEV_TX_BUSY Cannot transmit packet, try later
* Usually a bug, means queue start/stop flow control is broken in
* the driver. Note: the driver must NOT put the skb in its DMA ring.
- * o NETDEV_TX_LOCKED Locking failed, please retry quickly.
*/
static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
struct net_device *ndev)
@@ -1630,12 +1629,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
ENTER;
local_irq_save(flags);
- if (!spin_trylock(&priv->tx_lock)) {
- local_irq_restore(flags);
- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
- BDX_DRV_NAME, ndev->name);
- return NETDEV_TX_LOCKED;
- }
+ spin_lock(&priv->tx_lock);
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
@@ -1707,7 +1701,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
#endif
#ifdef BDX_LLTX
- ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+ netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */
#endif
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e2fcdf1ee..53190894f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -380,7 +380,6 @@ struct cpsw_priv {
u32 coal_intvl;
u32 bus_freq_mhz;
int rx_packet_max;
- int host_port;
struct clk *clk;
u8 mac_addr[ETH_ALEN];
struct cpsw_slave *slaves;
@@ -530,21 +529,18 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
int slave_port = cpsw_get_slave_port(priv, \
slave->slave_num); \
cpsw_ale_add_mcast(priv->ale, addr, \
- 1 << slave_port | 1 << priv->host_port, \
+ 1 << slave_port | ALE_PORT_HOST, \
ALE_VLAN, slave->port_vlan, 0); \
} else { \
cpsw_ale_add_mcast(priv->ale, addr, \
- ALE_ALL_PORTS << priv->host_port, \
+ ALE_ALL_PORTS, \
0, 0, 0); \
} \
} while (0)
static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
{
- if (priv->host_port == 0)
- return slave_num + 1;
- else
- return slave_num;
+ return slave_num + 1;
}
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
@@ -601,8 +597,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
- priv->host_port, -1);
+ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -647,8 +642,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
- vid);
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -1091,7 +1085,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
struct cpsw_priv *priv, struct cpsw_slave *slave,
u32 slave_port)
{
- u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+ u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
if (priv->version == CPSW_VERSION_1)
slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
@@ -1102,7 +1096,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
+ HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1180,7 +1174,6 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
{
const int vlan = priv->data.default_vlan;
- const int port = priv->host_port;
u32 reg;
int i;
int unreg_mcast_mask;
@@ -1198,9 +1191,9 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
else
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
- cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
- ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
- unreg_mcast_mask << port);
+ cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS,
+ ALE_ALL_PORTS, ALE_ALL_PORTS,
+ unreg_mcast_mask);
}
static void cpsw_init_host_port(struct cpsw_priv *priv)
@@ -1213,7 +1206,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
cpsw_ale_start(priv->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+ cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&priv->regs->control);
control_reg |= CPSW_VLAN_AWARE;
@@ -1227,14 +1220,14 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
&priv->host_port_regs->cpdma_tx_pri_map);
__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
- cpsw_ale_control_set(priv->ale, priv->host_port,
+ cpsw_ale_control_set(priv->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
if (!priv->data.dual_emac) {
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
0, 0);
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+ ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
}
}
@@ -1281,8 +1274,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_add_default_vlan(priv);
else
cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
- ALE_ALL_PORTS << priv->host_port,
- ALE_ALL_PORTS << priv->host_port, 0, 0);
+ ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
if (!cpsw_common_res_usage_state(priv)) {
struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
@@ -1347,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (priv->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ coal.rx_coalesce_usecs = priv->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
@@ -1397,7 +1389,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
@@ -1628,9 +1620,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
flags = ALE_VLAN;
}
- cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
flags, vid);
- cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+ cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM,
flags, vid);
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
@@ -1674,12 +1666,12 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
}
ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
- unreg_mcast_mask << priv->host_port);
+ unreg_mcast_mask);
if (ret != 0)
return ret;
ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
goto clean_vid;
@@ -1691,7 +1683,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
clean_vlan_ucast:
cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
clean_vid:
cpsw_ale_del_vlan(priv->ale, vid, 0);
return ret;
@@ -1746,7 +1738,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
return ret;
ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
return ret;
@@ -2157,7 +2149,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
priv_sl2->regs = priv->regs;
- priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
priv_sl2->wr_regs = priv->wr_regs;
priv_sl2->hw_stats = priv->hw_stats;
@@ -2326,7 +2317,6 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_runtime_disable_ret;
}
priv->regs = ss_regs;
- priv->host_port = HOST_PORT_NUM;
/* Need to enable clocks with runtime PM api to access module
* registers
@@ -2515,8 +2505,6 @@ static int cpsw_probe(struct platform_device *pdev)
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
@@ -2544,8 +2532,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale);
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1d0942c53..32516661f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1272,7 +1272,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (ret)
goto drop;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
/* Check Tx pool count & stop subqueue if needed */
desc_count = knav_pool_count(netcp->tx_pool);
@@ -1788,7 +1788,7 @@ static void netcp_ndo_tx_timeout(struct net_device *ndev)
dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_tx_wake_all_queues(ndev);
}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index a274cd49a..561703317 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1007,7 +1007,7 @@ static void tlan_tx_timeout(struct net_device *dev)
tlan_reset_lists(dev);
tlan_read_and_clear_stats(dev, TLAN_IGNORE);
tlan_reset_adapter(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0a15acc07..11213a38c 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec ts;
+ struct timespec64 ts;
shtx->tx_flags |= SKBTX_IN_PROGRESS;
gxio_mpipe_get_timestamp(&md->context, &ts);
@@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
- struct timespec ts;
+ struct timespec64 ts;
- getnstimeofday(&ts);
+ ktime_get_ts64(&ts);
gxio_mpipe_set_timestamp(&md->context, &ts);
mutex_init(&md->ptp_lock);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 298e059d0..922a443e3 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -1883,7 +1883,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Save the timestamp. */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
#ifdef TILE_NET_PARANOIA
@@ -2026,7 +2026,7 @@ static void tile_net_tx_timeout(struct net_device *dev)
{
PDEBUG("tile_net_tx_timeout()\n");
PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
- jiffies - dev->trans_start);
+ jiffies - dev_trans_start(dev));
/* XXX: ISSUE: This doesn't seem useful for us. */
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 743b18266..446ea580a 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1616,13 +1616,13 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
target->valid = 1;
target->eurus_index = i;
kfree(target->hwinfo);
- target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
+ target->hwinfo = kmemdup(scan_info,
+ be16_to_cpu(scan_info->size),
GFP_KERNEL);
if (!target->hwinfo)
continue;
/* copy hw scan info */
- memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
target->essid_len = strnlen(scan_info->essid,
sizeof(scan_info->essid));
target->rate_len = 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index feb20cfbd..497e725ee 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -705,7 +705,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
wmb();
descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
+ netif_trans_update(card->netdev); /* set netdev watchdog timer */
return 0;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 520cf50a3..01a77145a 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1314,7 +1314,8 @@ static int tsi108_open(struct net_device *dev)
data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
GFP_KERNEL);
if (!data->txring) {
- pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+ pci_free_consistent(NULL, rxring_size, data->rxring,
+ data->rxdma);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 2b7550c43..9d14731cd 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1758,7 +1758,7 @@ static void rhine_reset_task(struct work_struct *work)
spin_unlock_bh(&rp->lock);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
index f98b91d21..1981e88c1 100644
--- a/drivers/net/ethernet/wiznet/Kconfig
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -69,4 +69,18 @@ config WIZNET_BUS_ANY
Performance may decrease compared to explicitly selected bus mode.
endchoice
+config WIZNET_W5100_SPI
+ tristate "WIZnet W5100/W5200/W5500 Ethernet support for SPI mode"
+ depends on WIZNET_BUS_ANY && WIZNET_W5100
+ depends on SPI
+ ---help---
+ In SPI mode host system accesses registers using SPI protocol
+ (mode 0) on the SPI bus.
+
+ Performance decreases compared to other bus interface mode.
+ In W5100 SPI mode, burst READ/WRITE processing are not provided.
+
+ To compile this driver as a module, choose M here: the module
+ will be called w5100-spi.
+
endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
index c61453522..1e05e1a84 100644
--- a/drivers/net/ethernet/wiznet/Makefile
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_WIZNET_W5100) += w5100.o
+obj-$(CONFIG_WIZNET_W5100_SPI) += w5100-spi.o
obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
new file mode 100644
index 000000000..93a2d3c07
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -0,0 +1,466 @@
+/*
+ * Ethernet driver for the WIZnet W5100/W5200/W5500 chip.
+ *
+ * Copyright (C) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * Datasheet:
+ * http://www.wiznet.co.kr/wp-content/uploads/wiznethome/Chip/W5100/Document/W5100_Datasheet_v1.2.6.pdf
+ * http://wiznethome.cafe24.com/wp-content/uploads/wiznethome/Chip/W5200/Documents/W5200_DS_V140E.pdf
+ * http://wizwiki.net/wiki/lib/exe/fetch.php?media=products:w5500:w5500_ds_v106e_141230.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/spi/spi.h>
+
+#include "w5100.h"
+
+#define W5100_SPI_WRITE_OPCODE 0xf0
+#define W5100_SPI_READ_OPCODE 0x0f
+
+static int w5100_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = { W5100_SPI_READ_OPCODE, addr >> 8, addr & 0xff };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5100_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { W5100_SPI_WRITE_OPCODE, addr >> 8, addr & 0xff, data};
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5100_spi_read16(struct net_device *ndev, u32 addr)
+{
+ u16 data;
+ int ret;
+
+ ret = w5100_spi_read(ndev, addr);
+ if (ret < 0)
+ return ret;
+ data = ret << 8;
+ ret = w5100_spi_read(ndev, addr + 1);
+
+ return ret < 0 ? ret : data | ret;
+}
+
+static int w5100_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ int ret;
+
+ ret = w5100_spi_write(ndev, addr, data >> 8);
+ if (ret)
+ return ret;
+
+ return w5100_spi_write(ndev, addr + 1, data & 0xff);
+}
+
+static int w5100_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int ret = w5100_spi_read(ndev, addr + i);
+
+ if (ret < 0)
+ return ret;
+ buf[i] = ret;
+ }
+
+ return 0;
+}
+
+static int w5100_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int ret = w5100_spi_write(ndev, addr + i, buf[i]);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_spi_ops = {
+ .may_sleep = true,
+ .chip_id = W5100,
+ .read = w5100_spi_read,
+ .write = w5100_spi_write,
+ .read16 = w5100_spi_read16,
+ .write16 = w5100_spi_write16,
+ .readbulk = w5100_spi_readbulk,
+ .writebulk = w5100_spi_writebulk,
+};
+
+#define W5200_SPI_WRITE_OPCODE 0x80
+
+struct w5200_spi_priv {
+ /* Serialize access to cmd_buf */
+ struct mutex cmd_lock;
+
+ /* DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 cmd_buf[4] ____cacheline_aligned;
+};
+
+static struct w5200_spi_priv *w5200_spi_priv(struct net_device *ndev)
+{
+ return w5100_ops_priv(ndev);
+}
+
+static int w5200_spi_init(struct net_device *ndev)
+{
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+
+ mutex_init(&spi_priv->cmd_lock);
+
+ return 0;
+}
+
+static int w5200_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 1 };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5200_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[5] = { addr >> 8, addr & 0xff, W5200_SPI_WRITE_OPCODE, 1, data };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_read16(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 2 };
+ __be16 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+ return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5200_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[6] = {
+ addr >> 8, addr & 0xff,
+ W5200_SPI_WRITE_OPCODE, 2,
+ data >> 8, data & 0xff
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .rx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = len >> 8;
+ spi_priv->cmd_buf[3] = len;
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static int w5200_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .tx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5200_SPI_WRITE_OPCODE | (len >> 8);
+ spi_priv->cmd_buf[3] = len;
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static const struct w5100_ops w5200_ops = {
+ .may_sleep = true,
+ .chip_id = W5200,
+ .read = w5200_spi_read,
+ .write = w5200_spi_write,
+ .read16 = w5200_spi_read16,
+ .write16 = w5200_spi_write16,
+ .readbulk = w5200_spi_readbulk,
+ .writebulk = w5200_spi_writebulk,
+ .init = w5200_spi_init,
+};
+
+#define W5500_SPI_BLOCK_SELECT(addr) (((addr) >> 16) & 0x1f)
+#define W5500_SPI_READ_CONTROL(addr) (W5500_SPI_BLOCK_SELECT(addr) << 3)
+#define W5500_SPI_WRITE_CONTROL(addr) \
+ ((W5500_SPI_BLOCK_SELECT(addr) << 3) | BIT(2))
+
+struct w5500_spi_priv {
+ /* Serialize access to cmd_buf */
+ struct mutex cmd_lock;
+
+ /* DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 cmd_buf[3] ____cacheline_aligned;
+};
+
+static struct w5500_spi_priv *w5500_spi_priv(struct net_device *ndev)
+{
+ return w5100_ops_priv(ndev);
+}
+
+static int w5500_spi_init(struct net_device *ndev)
+{
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+
+ mutex_init(&spi_priv->cmd_lock);
+
+ return 0;
+}
+
+static int w5500_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_READ_CONTROL(addr)
+ };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5500_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_WRITE_CONTROL(addr),
+ data
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_read16(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_READ_CONTROL(addr)
+ };
+ __be16 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+ return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5500_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[5] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_WRITE_CONTROL(addr),
+ data >> 8,
+ data
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .rx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5500_SPI_READ_CONTROL(addr);
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static int w5500_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .tx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5500_SPI_WRITE_CONTROL(addr);
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static const struct w5100_ops w5500_ops = {
+ .may_sleep = true,
+ .chip_id = W5500,
+ .read = w5500_spi_read,
+ .write = w5500_spi_write,
+ .read16 = w5500_spi_read16,
+ .write16 = w5500_spi_write16,
+ .readbulk = w5500_spi_readbulk,
+ .writebulk = w5500_spi_writebulk,
+ .init = w5500_spi_init,
+};
+
+static int w5100_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct w5100_ops *ops;
+ int priv_size;
+ const void *mac = of_get_mac_address(spi->dev.of_node);
+
+ switch (id->driver_data) {
+ case W5100:
+ ops = &w5100_spi_ops;
+ priv_size = 0;
+ break;
+ case W5200:
+ ops = &w5200_ops;
+ priv_size = sizeof(struct w5200_spi_priv);
+ break;
+ case W5500:
+ ops = &w5500_ops;
+ priv_size = sizeof(struct w5500_spi_priv);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL);
+}
+
+static int w5100_spi_remove(struct spi_device *spi)
+{
+ return w5100_remove(&spi->dev);
+}
+
+static const struct spi_device_id w5100_spi_ids[] = {
+ { "w5100", W5100 },
+ { "w5200", W5200 },
+ { "w5500", W5500 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, w5100_spi_ids);
+
+static struct spi_driver w5100_spi_driver = {
+ .driver = {
+ .name = "w5100",
+ .pm = &w5100_pm_ops,
+ },
+ .probe = w5100_spi_probe,
+ .remove = w5100_spi_remove,
+ .id_table = w5100_spi_ids,
+};
+module_spi_driver(w5100_spi_driver);
+
+MODULE_DESCRIPTION("WIZnet W5100/W5200/W5500 Ethernet driver for SPI mode");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 8b282d0b1..4f6255cf6 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -27,6 +27,8 @@
#include <linux/irq.h>
#include <linux/gpio.h>
+#include "w5100.h"
+
#define DRV_NAME "w5100"
#define DRV_VERSION "2012-04-04"
@@ -36,7 +38,7 @@ MODULE_ALIAS("platform:"DRV_NAME);
MODULE_LICENSE("GPL");
/*
- * Registers
+ * W5100/W5200/W5500 common registers
*/
#define W5100_COMMON_REGS 0x0000
#define W5100_MR 0x0000 /* Mode Register */
@@ -46,55 +48,115 @@ MODULE_LICENSE("GPL");
#define MR_IND 0x01 /* Indirect mode */
#define W5100_SHAR 0x0009 /* Source MAC address */
#define W5100_IR 0x0015 /* Interrupt Register */
-#define W5100_IMR 0x0016 /* Interrupt Mask Register */
-#define IR_S0 0x01 /* S0 interrupt */
-#define W5100_RTR 0x0017 /* Retry Time-value Register */
-#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
-#define W5100_RMSR 0x001a /* Receive Memory Size */
-#define W5100_TMSR 0x001b /* Transmit Memory Size */
#define W5100_COMMON_REGS_LEN 0x0040
-#define W5100_S0_REGS 0x0400
-#define W5100_S0_MR 0x0400 /* S0 Mode Register */
-#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */
-#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */
-#define W5100_S0_CR 0x0401 /* S0 Command Register */
+#define W5100_Sn_MR 0x0000 /* Sn Mode Register */
+#define W5100_Sn_CR 0x0001 /* Sn Command Register */
+#define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */
+#define W5100_Sn_SR 0x0003 /* Sn Status Register */
+#define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */
+#define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */
+#define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */
+#define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */
+#define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */
+
+#define S0_REGS(priv) ((priv)->s0_regs)
+
+#define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR)
+#define S0_MR_MACRAW 0x04 /* MAC RAW mode */
+#define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */
+#define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */
+#define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR)
#define S0_CR_OPEN 0x01 /* OPEN command */
#define S0_CR_CLOSE 0x10 /* CLOSE command */
#define S0_CR_SEND 0x20 /* SEND command */
#define S0_CR_RECV 0x40 /* RECV command */
-#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */
+#define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR)
#define S0_IR_SENDOK 0x10 /* complete sending */
#define S0_IR_RECV 0x04 /* receiving data */
-#define W5100_S0_SR 0x0403 /* S0 Status Register */
+#define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR)
#define S0_SR_MACRAW 0x42 /* mac raw mode */
-#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */
-#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */
-#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */
-#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */
-#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */
+#define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR)
+#define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD)
+#define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR)
+#define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR)
+#define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD)
+
#define W5100_S0_REGS_LEN 0x0040
+/*
+ * W5100 and W5200 common registers
+ */
+#define W5100_IMR 0x0016 /* Interrupt Mask Register */
+#define IR_S0 0x01 /* S0 interrupt */
+#define W5100_RTR 0x0017 /* Retry Time-value Register */
+#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
+
+/*
+ * W5100 specific register and memory
+ */
+#define W5100_RMSR 0x001a /* Receive Memory Size */
+#define W5100_TMSR 0x001b /* Transmit Memory Size */
+
+#define W5100_S0_REGS 0x0400
+
#define W5100_TX_MEM_START 0x4000
-#define W5100_TX_MEM_END 0x5fff
-#define W5100_TX_MEM_MASK 0x1fff
+#define W5100_TX_MEM_SIZE 0x2000
#define W5100_RX_MEM_START 0x6000
-#define W5100_RX_MEM_END 0x7fff
-#define W5100_RX_MEM_MASK 0x1fff
+#define W5100_RX_MEM_SIZE 0x2000
+
+/*
+ * W5200 specific register and memory
+ */
+#define W5200_S0_REGS 0x4000
+
+#define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */
+#define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */
+
+#define W5200_TX_MEM_START 0x8000
+#define W5200_TX_MEM_SIZE 0x4000
+#define W5200_RX_MEM_START 0xc000
+#define W5200_RX_MEM_SIZE 0x4000
+
+/*
+ * W5500 specific register and memory
+ *
+ * W5500 register and memory are organized by multiple blocks. Each one is
+ * selected by 16bits offset address and 5bits block select bits. So we
+ * encode it into 32bits address. (lower 16bits is offset address and
+ * upper 16bits is block select bits)
+ */
+#define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */
+#define W5500_RTR 0x0019 /* Retry Time-value Register */
+
+#define W5500_S0_REGS 0x10000
+
+#define W5500_Sn_RXMEM_SIZE(n) \
+ (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
+#define W5500_Sn_TXMEM_SIZE(n) \
+ (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
+
+#define W5500_TX_MEM_START 0x20000
+#define W5500_TX_MEM_SIZE 0x04000
+#define W5500_RX_MEM_START 0x30000
+#define W5500_RX_MEM_SIZE 0x04000
/*
* Device driver private data structure
*/
+
struct w5100_priv {
- void __iomem *base;
- spinlock_t reg_lock;
- bool indirect;
- u8 (*read)(struct w5100_priv *priv, u16 addr);
- void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
- u16 (*read16)(struct w5100_priv *priv, u16 addr);
- void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
- void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
- void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+ const struct w5100_ops *ops;
+
+ /* Socket 0 register offset address */
+ u32 s0_regs;
+ /* Socket 0 TX buffer offset address and size */
+ u32 s0_tx_buf;
+ u16 s0_tx_buf_size;
+ /* Socket 0 RX buffer offset address and size */
+ u32 s0_rx_buf;
+ u16 s0_rx_buf_size;
+
int irq;
int link_irq;
int link_gpio;
@@ -103,6 +165,13 @@ struct w5100_priv {
struct net_device *ndev;
bool promisc;
u32 msg_enable;
+
+ struct workqueue_struct *xfer_wq;
+ struct work_struct rx_work;
+ struct sk_buff *tx_skb;
+ struct work_struct tx_work;
+ struct work_struct setrx_work;
+ struct work_struct restart_work;
};
/************************************************************************
@@ -111,63 +180,122 @@ struct w5100_priv {
*
***********************************************************************/
+struct w5100_mmio_priv {
+ void __iomem *base;
+ /* Serialize access in indirect address mode */
+ spinlock_t reg_lock;
+};
+
+static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
+{
+ return w5100_ops_priv(dev);
+}
+
+static inline void __iomem *w5100_mmio(struct net_device *ndev)
+{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+
+ return mmio_priv->base;
+}
+
/*
* In direct address mode host system can directly access W5100 registers
* after mapping to Memory-Mapped I/O space.
*
* 0x8000 bytes are required for memory space.
*/
-static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
{
- return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+ return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
}
-static inline void w5100_write_direct(struct w5100_priv *priv,
- u16 addr, u8 data)
+static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
+ u8 data)
{
- iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+ iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
+
+ return 0;
}
-static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
+{
+ __w5100_write_direct(ndev, addr, data);
+ mmiowb();
+
+ return 0;
+}
+
+static int w5100_read16_direct(struct net_device *ndev, u32 addr)
{
u16 data;
- data = w5100_read_direct(priv, addr) << 8;
- data |= w5100_read_direct(priv, addr + 1);
+ data = w5100_read_direct(ndev, addr) << 8;
+ data |= w5100_read_direct(ndev, addr + 1);
return data;
}
-static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
{
- w5100_write_direct(priv, addr, data >> 8);
- w5100_write_direct(priv, addr + 1, data);
+ __w5100_write_direct(ndev, addr, data >> 8);
+ __w5100_write_direct(ndev, addr + 1, data);
+ mmiowb();
+
+ return 0;
}
-static void w5100_readbuf_direct(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
{
- u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
int i;
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_RX_MEM_END))
- addr = W5100_RX_MEM_START;
- *buf++ = w5100_read_direct(priv, addr);
- }
+ for (i = 0; i < len; i++, addr++)
+ *buf++ = w5100_read_direct(ndev, addr);
+
+ return 0;
}
-static void w5100_writebuf_direct(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
+ const u8 *buf, int len)
{
- u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
int i;
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_TX_MEM_END))
- addr = W5100_TX_MEM_START;
- w5100_write_direct(priv, addr, *buf++);
- }
+ for (i = 0; i < len; i++, addr++)
+ __w5100_write_direct(ndev, addr, *buf++);
+
+ mmiowb();
+
+ return 0;
}
+static int w5100_mmio_init(struct net_device *ndev)
+{
+ struct platform_device *pdev = to_platform_device(ndev->dev.parent);
+ struct w5100_priv *priv = netdev_priv(ndev);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+ struct resource *mem;
+
+ spin_lock_init(&mmio_priv->reg_lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(mmio_priv->base))
+ return PTR_ERR(mmio_priv->base);
+
+ netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_mmio_direct_ops = {
+ .chip_id = W5100,
+ .read = w5100_read_direct,
+ .write = w5100_write_direct,
+ .read16 = w5100_read16_direct,
+ .write16 = w5100_write16_direct,
+ .readbulk = w5100_readbulk_direct,
+ .writebulk = w5100_writebulk_direct,
+ .init = w5100_mmio_init,
+};
+
/*
* In indirect address mode host system indirectly accesses registers by
* using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
@@ -179,139 +307,290 @@ static void w5100_writebuf_direct(struct w5100_priv *priv,
#define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
#define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
-static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read_indirect(struct net_device *ndev, u32 addr)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
u8 data;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- data = w5100_read_direct(priv, W5100_IDM_DR);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ data = w5100_read_direct(ndev, W5100_IDM_DR);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return data;
}
-static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
+static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- w5100_write_direct(priv, W5100_IDM_DR, data);
- mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ w5100_write_direct(ndev, W5100_IDM_DR, data);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
u16 data;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- data = w5100_read_direct(priv, W5100_IDM_DR) << 8;
- data |= w5100_read_direct(priv, W5100_IDM_DR);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ data = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
+ data |= w5100_read_direct(ndev, W5100_IDM_DR);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return data;
}
-static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
- w5100_write_direct(priv, W5100_IDM_DR, data);
- mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
+ w5100_write_direct(ndev, W5100_IDM_DR, data);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static void w5100_readbuf_indirect(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
{
- u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
int i;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+
+ for (i = 0; i < len; i++)
+ *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_RX_MEM_END)) {
- addr = W5100_RX_MEM_START;
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- }
- *buf++ = w5100_read_direct(priv, W5100_IDM_DR);
- }
mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static void w5100_writebuf_indirect(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
+ const u8 *buf, int len)
{
- u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
int i;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+
+ for (i = 0; i < len; i++)
+ __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_TX_MEM_END)) {
- addr = W5100_TX_MEM_START;
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- }
- w5100_write_direct(priv, W5100_IDM_DR, *buf++);
- }
mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
+static int w5100_reset_indirect(struct net_device *ndev)
+{
+ w5100_write_direct(ndev, W5100_MR, MR_RST);
+ mdelay(5);
+ w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_mmio_indirect_ops = {
+ .chip_id = W5100,
+ .read = w5100_read_indirect,
+ .write = w5100_write_indirect,
+ .read16 = w5100_read16_indirect,
+ .write16 = w5100_write16_indirect,
+ .readbulk = w5100_readbulk_indirect,
+ .writebulk = w5100_writebulk_indirect,
+ .init = w5100_mmio_init,
+ .reset = w5100_reset_indirect,
+};
+
#if defined(CONFIG_WIZNET_BUS_DIRECT)
-#define w5100_read w5100_read_direct
-#define w5100_write w5100_write_direct
-#define w5100_read16 w5100_read16_direct
-#define w5100_write16 w5100_write16_direct
-#define w5100_readbuf w5100_readbuf_direct
-#define w5100_writebuf w5100_writebuf_direct
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read_direct(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return w5100_write_direct(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read16_direct(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return w5100_write16_direct(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return w5100_readbulk_direct(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return w5100_writebulk_direct(priv->ndev, addr, buf, len);
+}
#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
-#define w5100_read w5100_read_indirect
-#define w5100_write w5100_write_indirect
-#define w5100_read16 w5100_read16_indirect
-#define w5100_write16 w5100_write16_indirect
-#define w5100_readbuf w5100_readbuf_indirect
-#define w5100_writebuf w5100_writebuf_indirect
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read_indirect(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return w5100_write_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read16_indirect(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return w5100_write16_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
+}
#else /* CONFIG_WIZNET_BUS_ANY */
-#define w5100_read priv->read
-#define w5100_write priv->write
-#define w5100_read16 priv->read16
-#define w5100_write16 priv->write16
-#define w5100_readbuf priv->readbuf
-#define w5100_writebuf priv->writebuf
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return priv->ops->read(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return priv->ops->write(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return priv->ops->read16(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return priv->ops->write16(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return priv->ops->readbulk(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return priv->ops->writebulk(priv->ndev, addr, buf, len);
+}
+
#endif
+static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
+{
+ u32 addr;
+ int remain = 0;
+ int ret;
+ const u32 mem_start = priv->s0_rx_buf;
+ const u16 mem_size = priv->s0_rx_buf_size;
+
+ offset %= mem_size;
+ addr = mem_start + offset;
+
+ if (offset + len > mem_size) {
+ remain = (offset + len) % mem_size;
+ len = mem_size - offset;
+ }
+
+ ret = w5100_readbulk(priv, addr, buf, len);
+ if (ret || !remain)
+ return ret;
+
+ return w5100_readbulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
+ int len)
+{
+ u32 addr;
+ int ret;
+ int remain = 0;
+ const u32 mem_start = priv->s0_tx_buf;
+ const u16 mem_size = priv->s0_tx_buf_size;
+
+ offset %= mem_size;
+ addr = mem_start + offset;
+
+ if (offset + len > mem_size) {
+ remain = (offset + len) % mem_size;
+ len = mem_size - offset;
+ }
+
+ ret = w5100_writebulk(priv, addr, buf, len);
+ if (ret || !remain)
+ return ret;
+
+ return w5100_writebulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_reset(struct w5100_priv *priv)
+{
+ if (priv->ops->reset)
+ return priv->ops->reset(priv->ndev);
+
+ w5100_write(priv, W5100_MR, MR_RST);
+ mdelay(5);
+ w5100_write(priv, W5100_MR, MR_PB);
+
+ return 0;
+}
+
static int w5100_command(struct w5100_priv *priv, u16 cmd)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ unsigned long timeout;
- w5100_write(priv, W5100_S0_CR, cmd);
- mmiowb();
+ w5100_write(priv, W5100_S0_CR(priv), cmd);
+
+ timeout = jiffies + msecs_to_jiffies(100);
- while (w5100_read(priv, W5100_S0_CR) != 0) {
+ while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
if (time_after(jiffies, timeout))
return -EIO;
cpu_relax();
@@ -323,47 +602,124 @@ static int w5100_command(struct w5100_priv *priv, u16 cmd)
static void w5100_write_macaddr(struct w5100_priv *priv)
{
struct net_device *ndev = priv->ndev;
- int i;
- for (i = 0; i < ETH_ALEN; i++)
- w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
- mmiowb();
+ w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
}
-static void w5100_hw_reset(struct w5100_priv *priv)
+static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
{
- w5100_write_direct(priv, W5100_MR, MR_RST);
- mmiowb();
- mdelay(5);
- w5100_write_direct(priv, W5100_MR, priv->indirect ?
- MR_PB | MR_AI | MR_IND :
- MR_PB);
- mmiowb();
- w5100_write(priv, W5100_IMR, 0);
- w5100_write_macaddr(priv);
+ u32 imr;
+
+ if (priv->ops->chip_id == W5500)
+ imr = W5500_SIMR;
+ else
+ imr = W5100_IMR;
+
+ w5100_write(priv, imr, mask);
+}
+static void w5100_enable_intr(struct w5100_priv *priv)
+{
+ w5100_socket_intr_mask(priv, IR_S0);
+}
+
+static void w5100_disable_intr(struct w5100_priv *priv)
+{
+ w5100_socket_intr_mask(priv, 0);
+}
+
+static void w5100_memory_configure(struct w5100_priv *priv)
+{
/* Configure 16K of internal memory
* as 8K RX buffer and 8K TX buffer
*/
w5100_write(priv, W5100_RMSR, 0x03);
w5100_write(priv, W5100_TMSR, 0x03);
- mmiowb();
+}
+
+static void w5200_memory_configure(struct w5100_priv *priv)
+{
+ int i;
+
+ /* Configure internal RX memory as 16K RX buffer and
+ * internal TX memory as 16K TX buffer
+ */
+ w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
+ w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
+
+ for (i = 1; i < 8; i++) {
+ w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
+ w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
+ }
+}
+
+static void w5500_memory_configure(struct w5100_priv *priv)
+{
+ int i;
+
+ /* Configure internal RX memory as 16K RX buffer and
+ * internal TX memory as 16K TX buffer
+ */
+ w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
+ w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
+
+ for (i = 1; i < 8; i++) {
+ w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
+ w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
+ }
+}
+
+static int w5100_hw_reset(struct w5100_priv *priv)
+{
+ u32 rtr;
+
+ w5100_reset(priv);
+
+ w5100_disable_intr(priv);
+ w5100_write_macaddr(priv);
+
+ switch (priv->ops->chip_id) {
+ case W5100:
+ w5100_memory_configure(priv);
+ rtr = W5100_RTR;
+ break;
+ case W5200:
+ w5200_memory_configure(priv);
+ rtr = W5100_RTR;
+ break;
+ case W5500:
+ w5500_memory_configure(priv);
+ rtr = W5500_RTR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (w5100_read16(priv, rtr) != RTR_DEFAULT)
+ return -ENODEV;
+
+ return 0;
}
static void w5100_hw_start(struct w5100_priv *priv)
{
- w5100_write(priv, W5100_S0_MR, priv->promisc ?
- S0_MR_MACRAW : S0_MR_MACRAW_MF);
- mmiowb();
+ u8 mode = S0_MR_MACRAW;
+
+ if (!priv->promisc) {
+ if (priv->ops->chip_id == W5500)
+ mode |= W5500_S0_MR_MF;
+ else
+ mode |= S0_MR_MF;
+ }
+
+ w5100_write(priv, W5100_S0_MR(priv), mode);
w5100_command(priv, S0_CR_OPEN);
- w5100_write(priv, W5100_IMR, IR_S0);
- mmiowb();
+ w5100_enable_intr(priv);
}
static void w5100_hw_close(struct w5100_priv *priv)
{
- w5100_write(priv, W5100_IMR, 0);
- mmiowb();
+ w5100_disable_intr(priv);
w5100_command(priv, S0_CR_CLOSE);
}
@@ -412,20 +768,17 @@ static int w5100_get_regs_len(struct net_device *ndev)
}
static void w5100_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *_buf)
+ struct ethtool_regs *regs, void *buf)
{
struct w5100_priv *priv = netdev_priv(ndev);
- u8 *buf = _buf;
- u16 i;
regs->version = 1;
- for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
- *buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
- for (i = 0; i < W5100_S0_REGS_LEN; i++)
- *buf++ = w5100_read(priv, W5100_S0_REGS + i);
+ w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
+ buf += W5100_COMMON_REGS_LEN;
+ w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
}
-static void w5100_tx_timeout(struct net_device *ndev)
+static void w5100_restart(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
@@ -433,74 +786,138 @@ static void w5100_tx_timeout(struct net_device *ndev)
w5100_hw_reset(priv);
w5100_hw_start(priv);
ndev->stats.tx_errors++;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_wake_queue(ndev);
}
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static void w5100_restart_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ restart_work);
+
+ w5100_restart(priv->ndev);
+}
+
+static void w5100_tx_timeout(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
- u16 offset;
- netif_stop_queue(ndev);
+ if (priv->ops->may_sleep)
+ schedule_work(&priv->restart_work);
+ else
+ w5100_restart(ndev);
+}
- offset = w5100_read16(priv, W5100_S0_TX_WR);
+static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
+{
+ struct w5100_priv *priv = netdev_priv(ndev);
+ u16 offset;
+
+ offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
w5100_writebuf(priv, offset, skb->data, skb->len);
- w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
- mmiowb();
+ w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
ndev->stats.tx_bytes += skb->len;
ndev->stats.tx_packets++;
dev_kfree_skb(skb);
w5100_command(priv, S0_CR_SEND);
+}
+
+static void w5100_tx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ tx_work);
+ struct sk_buff *skb = priv->tx_skb;
+
+ priv->tx_skb = NULL;
+
+ if (WARN_ON(!skb))
+ return;
+ w5100_tx_skb(priv->ndev, skb);
+}
+
+static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct w5100_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ if (priv->ops->may_sleep) {
+ WARN_ON(priv->tx_skb);
+ priv->tx_skb = skb;
+ queue_work(priv->xfer_wq, &priv->tx_work);
+ } else {
+ w5100_tx_skb(ndev, skb);
+ }
return NETDEV_TX_OK;
}
-static int w5100_napi_poll(struct napi_struct *napi, int budget)
+static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
{
- struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
- struct net_device *ndev = priv->ndev;
+ struct w5100_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
- int rx_count;
u16 rx_len;
u16 offset;
u8 header[2];
+ u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
- for (rx_count = 0; rx_count < budget; rx_count++) {
- u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
- if (rx_buf_len == 0)
- break;
+ if (rx_buf_len == 0)
+ return NULL;
- offset = w5100_read16(priv, W5100_S0_RX_RD);
- w5100_readbuf(priv, offset, header, 2);
- rx_len = get_unaligned_be16(header) - 2;
-
- skb = netdev_alloc_skb_ip_align(ndev, rx_len);
- if (unlikely(!skb)) {
- w5100_write16(priv, W5100_S0_RX_RD,
- offset + rx_buf_len);
- w5100_command(priv, S0_CR_RECV);
- ndev->stats.rx_dropped++;
- return -ENOMEM;
- }
+ offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
+ w5100_readbuf(priv, offset, header, 2);
+ rx_len = get_unaligned_be16(header) - 2;
- skb_put(skb, rx_len);
- w5100_readbuf(priv, offset + 2, skb->data, rx_len);
- w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
- mmiowb();
+ skb = netdev_alloc_skb_ip_align(ndev, rx_len);
+ if (unlikely(!skb)) {
+ w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
w5100_command(priv, S0_CR_RECV);
- skb->protocol = eth_type_trans(skb, ndev);
+ ndev->stats.rx_dropped++;
+ return NULL;
+ }
+
+ skb_put(skb, rx_len);
+ w5100_readbuf(priv, offset + 2, skb->data, rx_len);
+ w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
+ w5100_command(priv, S0_CR_RECV);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += rx_len;
+
+ return skb;
+}
+
+static void w5100_rx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ rx_work);
+ struct sk_buff *skb;
+
+ while ((skb = w5100_rx_skb(priv->ndev)))
+ netif_rx_ni(skb);
+
+ w5100_enable_intr(priv);
+}
+
+static int w5100_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
+ int rx_count;
+
+ for (rx_count = 0; rx_count < budget; rx_count++) {
+ struct sk_buff *skb = w5100_rx_skb(priv->ndev);
- netif_receive_skb(skb);
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += rx_len;
+ if (skb)
+ netif_receive_skb(skb);
+ else
+ break;
}
if (rx_count < budget) {
napi_complete(napi);
- w5100_write(priv, W5100_IMR, IR_S0);
- mmiowb();
+ w5100_enable_intr(priv);
}
return rx_count;
@@ -511,11 +928,10 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
struct net_device *ndev = ndev_instance;
struct w5100_priv *priv = netdev_priv(ndev);
- int ir = w5100_read(priv, W5100_S0_IR);
+ int ir = w5100_read(priv, W5100_S0_IR(priv));
if (!ir)
return IRQ_NONE;
- w5100_write(priv, W5100_S0_IR, ir);
- mmiowb();
+ w5100_write(priv, W5100_S0_IR(priv), ir);
if (ir & S0_IR_SENDOK) {
netif_dbg(priv, tx_done, ndev, "tx done\n");
@@ -523,11 +939,12 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
}
if (ir & S0_IR_RECV) {
- if (napi_schedule_prep(&priv->napi)) {
- w5100_write(priv, W5100_IMR, 0);
- mmiowb();
+ w5100_disable_intr(priv);
+
+ if (priv->ops->may_sleep)
+ queue_work(priv->xfer_wq, &priv->rx_work);
+ else if (napi_schedule_prep(&priv->napi))
__napi_schedule(&priv->napi);
- }
}
return IRQ_HANDLED;
@@ -551,6 +968,14 @@ static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
return IRQ_HANDLED;
}
+static void w5100_setrx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ setrx_work);
+
+ w5100_hw_start(priv);
+}
+
static void w5100_set_rx_mode(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
@@ -558,7 +983,11 @@ static void w5100_set_rx_mode(struct net_device *ndev)
if (priv->promisc != set_promisc) {
priv->promisc = set_promisc;
- w5100_hw_start(priv);
+
+ if (priv->ops->may_sleep)
+ schedule_work(&priv->setrx_work);
+ else
+ w5100_hw_start(priv);
}
}
@@ -620,95 +1049,100 @@ static const struct net_device_ops w5100_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int w5100_hw_probe(struct platform_device *pdev)
+static int w5100_mmio_probe(struct platform_device *pdev)
{
struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct w5100_priv *priv = netdev_priv(ndev);
- const char *name = netdev_name(ndev);
+ const void *mac_addr = NULL;
struct resource *mem;
- int mem_size;
+ const struct w5100_ops *ops;
int irq;
- int ret;
- if (data && is_valid_ether_addr(data->mac_addr)) {
- memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
- } else {
- eth_hw_addr_random(ndev);
- }
+ if (data && is_valid_ether_addr(data->mac_addr))
+ mac_addr = data->mac_addr;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- mem_size = resource_size(mem);
-
- spin_lock_init(&priv->reg_lock);
- priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
- if (priv->indirect) {
- priv->read = w5100_read_indirect;
- priv->write = w5100_write_indirect;
- priv->read16 = w5100_read16_indirect;
- priv->write16 = w5100_write16_indirect;
- priv->readbuf = w5100_readbuf_indirect;
- priv->writebuf = w5100_writebuf_indirect;
- } else {
- priv->read = w5100_read_direct;
- priv->write = w5100_write_direct;
- priv->read16 = w5100_read16_direct;
- priv->write16 = w5100_write16_direct;
- priv->readbuf = w5100_readbuf_direct;
- priv->writebuf = w5100_writebuf_direct;
- }
-
- w5100_hw_reset(priv);
- if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
- return -ENODEV;
+ if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
+ ops = &w5100_mmio_indirect_ops;
+ else
+ ops = &w5100_mmio_direct_ops;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = request_irq(irq, w5100_interrupt,
- IRQ_TYPE_LEVEL_LOW, name, ndev);
- if (ret < 0)
- return ret;
- priv->irq = irq;
- priv->link_gpio = data ? data->link_gpio : -EINVAL;
- if (gpio_is_valid(priv->link_gpio)) {
- char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
- if (!link_name)
- return -ENOMEM;
- snprintf(link_name, 16, "%s-link", name);
- priv->link_irq = gpio_to_irq(priv->link_gpio);
- if (request_any_context_irq(priv->link_irq, w5100_detect_link,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- link_name, priv->ndev) < 0)
- priv->link_gpio = -EINVAL;
- }
+ return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
+ mac_addr, irq, data ? data->link_gpio : -EINVAL);
+}
- netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
- return 0;
+static int w5100_mmio_remove(struct platform_device *pdev)
+{
+ return w5100_remove(&pdev->dev);
+}
+
+void *w5100_ops_priv(const struct net_device *ndev)
+{
+ return netdev_priv(ndev) +
+ ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
}
+EXPORT_SYMBOL_GPL(w5100_ops_priv);
-static int w5100_probe(struct platform_device *pdev)
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+ int sizeof_ops_priv, const void *mac_addr, int irq,
+ int link_gpio)
{
struct w5100_priv *priv;
struct net_device *ndev;
int err;
+ size_t alloc_size;
- ndev = alloc_etherdev(sizeof(*priv));
+ alloc_size = sizeof(*priv);
+ if (sizeof_ops_priv) {
+ alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
+ alloc_size += sizeof_ops_priv;
+ }
+ alloc_size += NETDEV_ALIGN - 1;
+
+ ndev = alloc_etherdev(alloc_size);
if (!ndev)
return -ENOMEM;
- SET_NETDEV_DEV(ndev, &pdev->dev);
- platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
priv = netdev_priv(ndev);
+
+ switch (ops->chip_id) {
+ case W5100:
+ priv->s0_regs = W5100_S0_REGS;
+ priv->s0_tx_buf = W5100_TX_MEM_START;
+ priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5100_RX_MEM_START;
+ priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
+ break;
+ case W5200:
+ priv->s0_regs = W5200_S0_REGS;
+ priv->s0_tx_buf = W5200_TX_MEM_START;
+ priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5200_RX_MEM_START;
+ priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
+ break;
+ case W5500:
+ priv->s0_regs = W5500_S0_REGS;
+ priv->s0_tx_buf = W5500_TX_MEM_START;
+ priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5500_RX_MEM_START;
+ priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_register;
+ }
+
priv->ndev = ndev;
+ priv->ops = ops;
+ priv->irq = irq;
+ priv->link_gpio = link_gpio;
ndev->netdev_ops = &w5100_netdev_ops;
ndev->ethtool_ops = &w5100_ethtool_ops;
- ndev->watchdog_timeo = HZ;
netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
/* This chip doesn't support VLAN packets with normal MTU,
@@ -720,22 +1154,76 @@ static int w5100_probe(struct platform_device *pdev)
if (err < 0)
goto err_register;
- err = w5100_hw_probe(pdev);
- if (err < 0)
- goto err_hw_probe;
+ priv->xfer_wq = create_workqueue(netdev_name(ndev));
+ if (!priv->xfer_wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ INIT_WORK(&priv->rx_work, w5100_rx_work);
+ INIT_WORK(&priv->tx_work, w5100_tx_work);
+ INIT_WORK(&priv->setrx_work, w5100_setrx_work);
+ INIT_WORK(&priv->restart_work, w5100_restart_work);
+
+ if (mac_addr)
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ else
+ eth_hw_addr_random(ndev);
+
+ if (priv->ops->init) {
+ err = priv->ops->init(priv->ndev);
+ if (err)
+ goto err_hw;
+ }
+
+ err = w5100_hw_reset(priv);
+ if (err)
+ goto err_hw;
+
+ if (ops->may_sleep) {
+ err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ netdev_name(ndev), ndev);
+ } else {
+ err = request_irq(priv->irq, w5100_interrupt,
+ IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
+ }
+ if (err)
+ goto err_hw;
+
+ if (gpio_is_valid(priv->link_gpio)) {
+ char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
+
+ if (!link_name) {
+ err = -ENOMEM;
+ goto err_gpio;
+ }
+ snprintf(link_name, 16, "%s-link", netdev_name(ndev));
+ priv->link_irq = gpio_to_irq(priv->link_gpio);
+ if (request_any_context_irq(priv->link_irq, w5100_detect_link,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ link_name, priv->ndev) < 0)
+ priv->link_gpio = -EINVAL;
+ }
return 0;
-err_hw_probe:
+err_gpio:
+ free_irq(priv->irq, ndev);
+err_hw:
+ destroy_workqueue(priv->xfer_wq);
+err_wq:
unregister_netdev(ndev);
err_register:
free_netdev(ndev);
return err;
}
+EXPORT_SYMBOL_GPL(w5100_probe);
-static int w5100_remove(struct platform_device *pdev)
+int w5100_remove(struct device *dev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
w5100_hw_reset(priv);
@@ -743,16 +1231,21 @@ static int w5100_remove(struct platform_device *pdev)
if (gpio_is_valid(priv->link_gpio))
free_irq(priv->link_irq, ndev);
+ flush_work(&priv->setrx_work);
+ flush_work(&priv->restart_work);
+ flush_workqueue(priv->xfer_wq);
+ destroy_workqueue(priv->xfer_wq);
+
unregister_netdev(ndev);
free_netdev(ndev);
return 0;
}
+EXPORT_SYMBOL_GPL(w5100_remove);
#ifdef CONFIG_PM_SLEEP
static int w5100_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
@@ -766,8 +1259,7 @@ static int w5100_suspend(struct device *dev)
static int w5100_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
@@ -783,15 +1275,15 @@ static int w5100_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+EXPORT_SYMBOL_GPL(w5100_pm_ops);
-static struct platform_driver w5100_driver = {
+static struct platform_driver w5100_mmio_driver = {
.driver = {
.name = DRV_NAME,
.pm = &w5100_pm_ops,
},
- .probe = w5100_probe,
- .remove = w5100_remove,
+ .probe = w5100_mmio_probe,
+ .remove = w5100_mmio_remove,
};
-
-module_platform_driver(w5100_driver);
+module_platform_driver(w5100_mmio_driver);
diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h
new file mode 100644
index 000000000..17983a3b8
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.h
@@ -0,0 +1,37 @@
+/*
+ * Ethernet driver for the WIZnet W5100 chip.
+ *
+ * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+enum {
+ W5100,
+ W5200,
+ W5500,
+};
+
+struct w5100_ops {
+ bool may_sleep;
+ int chip_id;
+ int (*read)(struct net_device *ndev, u32 addr);
+ int (*write)(struct net_device *ndev, u32 addr, u8 data);
+ int (*read16)(struct net_device *ndev, u32 addr);
+ int (*write16)(struct net_device *ndev, u32 addr, u16 data);
+ int (*readbulk)(struct net_device *ndev, u32 addr, u8 *buf, int len);
+ int (*writebulk)(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len);
+ int (*reset)(struct net_device *ndev);
+ int (*init)(struct net_device *ndev);
+};
+
+void *w5100_ops_priv(const struct net_device *ndev);
+
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+ int sizeof_ops_priv, const void *mac_addr, int irq,
+ int link_gpio);
+int w5100_remove(struct device *dev);
+
+extern const struct dev_pm_ops w5100_pm_ops;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 8da7b930f..0b37ce9f2 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -362,7 +362,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
w5300_hw_reset(priv);
w5300_hw_start(priv);
ndev->stats.tx_errors++;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_wake_queue(ndev);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 5a1068df7..739708712 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -584,7 +584,7 @@ static void temac_device_reset(struct net_device *ndev)
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
- ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(ndev); /* prevent tx timeout */
}
static void temac_adjust_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 468464470..8c7f5be51 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -508,7 +508,7 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
}
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index e324b3092..3cee84a24 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -531,7 +531,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
}
/* To exclude tx timeout */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* We're all ready to go. Start the queue */
netif_wake_queue(dev);
@@ -563,7 +563,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
dev->stats.tx_bytes += lp->deferred_skb->len;
dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
}
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index d56f86932..7b44968e0 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1199,7 +1199,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/fddi/skfp/Makefile b/drivers/net/fddi/skfp/Makefile
index b0be0234a..a957a1c7e 100644
--- a/drivers/net/fddi/skfp/Makefile
+++ b/drivers/net/fddi/skfp/Makefile
@@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
# projects. To keep the source common for all those drivers (and
# thus simplify fixes to it), please do not clean it up!
-ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b103adb8d..0dbafedc0 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -179,6 +179,8 @@ void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
info->v1i.vlan_id[i] = vlan_id[i];
+
+ info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
}
void
@@ -214,6 +216,7 @@ static int fjes_hw_setup(struct fjes_hw *hw)
u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
struct fjes_device_command_param param;
struct ep_share_mem_info *buf_pair;
+ unsigned long flags;
size_t mem_size;
int result;
int epidx;
@@ -262,10 +265,12 @@ static int fjes_hw_setup(struct fjes_hw *hw)
if (result)
return result;
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
fjes_support_mtu[0]);
fjes_hw_setup_epbuf(&buf_pair->rx, mac,
fjes_support_mtu[0]);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
}
@@ -327,6 +332,7 @@ int fjes_hw_init(struct fjes_hw *hw)
INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
mutex_init(&hw->hw_info.lock);
+ spin_lock_init(&hw->rx_status_lock);
hw->max_epid = fjes_hw_get_max_epid(hw);
hw->my_epid = fjes_hw_get_my_epid(hw);
@@ -734,6 +740,7 @@ fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
void fjes_hw_raise_epstop(struct fjes_hw *hw)
{
enum ep_partner_status status;
+ unsigned long flags;
int epidx;
for (epidx = 0; epidx < hw->max_epid; epidx++) {
@@ -753,8 +760,10 @@ void fjes_hw_raise_epstop(struct fjes_hw *hw)
set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
set_bit(epidx, &hw->txrx_stop_req_bit);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_REQUEST;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
}
@@ -810,7 +819,8 @@ bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
{
union ep_buffer_info *info = epbh->info;
- return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
+ return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
+ info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
}
bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
@@ -863,6 +873,9 @@ bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
{
union ep_buffer_info *info = epbh->info;
+ if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+ return true;
+
if (info->v1i.count_max == 0)
return true;
@@ -932,6 +945,7 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
struct fjes_adapter *adapter;
struct net_device *netdev;
+ unsigned long flags;
ulong unshare_bit = 0;
ulong share_bit = 0;
@@ -1024,8 +1038,10 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
continue;
if (test_bit(epidx, &share_bit)) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
mutex_lock(&hw->hw_info.lock);
@@ -1069,10 +1085,14 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
mutex_unlock(&hw->hw_info.lock);
- if (ret == 0)
+ if (ret == 0) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
+ }
}
if (test_bit(epidx, &irq_bit)) {
@@ -1080,9 +1100,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
REG_ICTL_MASK_TXRX_STOP_REQ);
set_bit(epidx, &hw->txrx_stop_req_bit);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.
info->v1i.rx_status |=
FJES_RX_STOP_REQ_REQUEST;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
}
}
@@ -1098,6 +1120,7 @@ static void fjes_hw_epstop_task(struct work_struct *work)
{
struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
+ unsigned long flags;
ulong remain_bit;
int epid_bit;
@@ -1105,9 +1128,12 @@ static void fjes_hw_epstop_task(struct work_struct *work)
while ((remain_bit = hw->epstop_req_bit)) {
for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
if (remain_bit & 1) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epid_bit].
tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
clear_bit(epid_bit, &hw->epstop_req_bit);
set_bit(epid_bit,
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
index 6d57b89a0..1445ac99d 100644
--- a/drivers/net/fjes/fjes_hw.h
+++ b/drivers/net/fjes/fjes_hw.h
@@ -33,9 +33,9 @@ struct fjes_hw;
#define EP_BUFFER_SUPPORT_VLAN_MAX 4
#define EP_BUFFER_INFO_SIZE 4096
-#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3) /* sec */
-#define FJES_COMMAND_REQ_TIMEOUT (5 + 1) /* sec */
-#define FJES_COMMAND_REQ_BUFF_TIMEOUT (8 * 3) /* sec */
+#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3 * 8) /* sec */
+#define FJES_COMMAND_REQ_TIMEOUT ((5 + 1) * 3 * 8) /* sec */
+#define FJES_COMMAND_REQ_BUFF_TIMEOUT (60 * 3) /* sec */
#define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT (1) /* sec */
#define FJES_CMD_REQ_ERR_INFO_PARAM (0x0001)
@@ -57,6 +57,7 @@ struct fjes_hw;
#define FJES_RX_STOP_REQ_DONE (0x1)
#define FJES_RX_STOP_REQ_REQUEST (0x2)
#define FJES_RX_POLL_WORK (0x4)
+#define FJES_RX_MTU_CHANGING_DONE (0x8)
#define EP_BUFFER_SIZE \
(((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \
@@ -299,6 +300,8 @@ struct fjes_hw {
u8 *base;
struct fjes_hw_info hw_info;
+
+ spinlock_t rx_status_lock; /* spinlock for rx_status */
};
int fjes_hw_init(struct fjes_hw *);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 0ddb54fe3..86c331bb5 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -29,7 +29,7 @@
#include "fjes.h"
#define MAJ 1
-#define MIN 0
+#define MIN 1
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
#define DRV_NAME "fjes"
char fjes_driver_name[] = DRV_NAME;
@@ -290,6 +290,7 @@ static int fjes_close(struct net_device *netdev)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
int epidx;
netif_tx_stop_all_queues(netdev);
@@ -299,13 +300,18 @@ static int fjes_close(struct net_device *netdev)
napi_disable(&adapter->napi);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &=
- ~FJES_RX_POLL_WORK;
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status &=
+ ~FJES_RX_POLL_WORK;
}
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
fjes_free_irq(adapter);
@@ -330,6 +336,7 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct ep_share_mem_info *buf_pair;
struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
int result;
int epidx;
@@ -371,8 +378,10 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
buf_pair = &hw->ep_shm_info[epidx];
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
if (fjes_hw_epid_is_same_zone(hw, epidx)) {
mutex_lock(&hw->hw_info.lock);
@@ -402,6 +411,7 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
struct ep_share_mem_info *buf_pair;
struct fjes_hw *hw = &adapter->hw;
bool reset_flag = false;
+ unsigned long flags;
int result;
int epidx;
@@ -418,8 +428,10 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
buf_pair = &hw->ep_shm_info[epidx];
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
}
@@ -459,7 +471,7 @@ static void fjes_tx_stall_task(struct work_struct *work)
int i;
if (((long)jiffies -
- (long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) {
+ dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
netif_wake_queue(netdev);
return;
}
@@ -481,6 +493,9 @@ static void fjes_tx_stall_task(struct work_struct *work)
info = adapter->hw.ep_shm_info[epid].tx.info;
+ if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+ return;
+
if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
info->v1i.count_max)) {
all_queue_available = 0;
@@ -549,7 +564,8 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *work)
if ((hw->ep_shm_info[epid].tx_status_work ==
FJES_TX_DELAY_SEND_PENDING) &&
(pstatus == EP_PARTNER_SHARED) &&
- !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) {
+ !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
+ FJES_RX_POLL_WORK)) {
fjes_hw_raise_interrupt(hw, epid,
REG_ICTL_MASK_RX_DATA);
}
@@ -653,7 +669,7 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
/* version is NOT 0 */
adapter->stats64.tx_carrier_errors += 1;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_carrier_errors += 1;
ret = NETDEV_TX_OK;
@@ -661,9 +677,9 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
&adapter->hw.ep_shm_info[dest_epid].rx,
netdev->mtu)) {
adapter->stats64.tx_dropped += 1;
- hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1;
+ hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
adapter->stats64.tx_errors += 1;
- hw->ep_shm_info[my_epid].net_stats.tx_errors += 1;
+ hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
ret = NETDEV_TX_OK;
} else if (vlan &&
@@ -694,15 +710,15 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
(long)adapter->tx_start_jiffies) >=
FJES_TX_RETRY_TIMEOUT) {
adapter->stats64.tx_fifo_errors += 1;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_fifo_errors += 1;
adapter->stats64.tx_errors += 1;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_errors += 1;
ret = NETDEV_TX_OK;
} else {
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
netif_tx_stop_queue(cur_queue);
if (!work_pending(&adapter->tx_stall_task))
@@ -714,10 +730,10 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
} else {
if (!is_multi) {
adapter->stats64.tx_packets += 1;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_packets += 1;
adapter->stats64.tx_bytes += len;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_bytes += len;
}
@@ -759,9 +775,12 @@ fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
bool running = netif_running(netdev);
- int ret = 0;
- int idx;
+ struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
+ int ret = -EINVAL;
+ int idx, epidx;
for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
if (new_mtu <= fjes_support_mtu[idx]) {
@@ -769,19 +788,58 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu == netdev->mtu)
return 0;
- if (running)
- fjes_close(netdev);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (running) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
+ ~FJES_RX_MTU_CHANGING_DONE;
+ }
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ cancel_work_sync(&adapter->tx_stall_task);
+ napi_disable(&adapter->napi);
+
+ msleep(1000);
+
+ netif_tx_stop_all_queues(netdev);
+ }
+
+ netdev->mtu = new_mtu;
- netdev->mtu = new_mtu;
+ if (running) {
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
- if (running)
- ret = fjes_open(netdev);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+ netdev->dev_addr,
+ netdev->mtu);
- return ret;
+ hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
+ FJES_RX_MTU_CHANGING_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
+
+ netif_tx_wake_all_queues(netdev);
+ netif_carrier_on(netdev);
+ napi_enable(&adapter->napi);
+ napi_schedule(&adapter->napi);
}
- return -EINVAL;
+ return ret;
}
static int fjes_vlan_rx_add_vid(struct net_device *netdev,
@@ -825,6 +883,7 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status status;
+ unsigned long flags;
status = fjes_hw_get_partner_ep_status(hw, src_epid);
switch (status) {
@@ -834,8 +893,10 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
break;
case EP_PARTNER_WAITING:
if (src_epid < hw->my_epid) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
set_bit(src_epid, &adapter->unshare_watch_bitmask);
@@ -861,14 +922,17 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status status;
+ unsigned long flags;
set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
status = fjes_hw_get_partner_ep_status(hw, src_epid);
switch (status) {
case EP_PARTNER_WAITING:
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
/* fall through */
case EP_PARTNER_UNSHARE:
@@ -1001,13 +1065,17 @@ static int fjes_poll(struct napi_struct *napi, int budget)
size_t frame_len;
void *frame;
+ spin_lock(&hw->rx_status_lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |=
- FJES_RX_POLL_WORK;
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
}
+ spin_unlock(&hw->rx_status_lock);
while (work_done < budget) {
prefetch(&adapter->hw);
@@ -1065,13 +1133,17 @@ static int fjes_poll(struct napi_struct *napi, int budget)
if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
napi_reschedule(napi);
} else {
+ spin_lock(&hw->rx_status_lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx]
- .tx.info->v1i.rx_status &=
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx].tx
+ .info->v1i.rx_status &=
~FJES_RX_POLL_WORK;
}
+ spin_unlock(&hw->rx_status_lock);
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
}
@@ -1129,7 +1201,7 @@ static int fjes_probe(struct platform_device *plat_dev)
res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
hw->hw_res.start = res->start;
- hw->hw_res.size = res->end - res->start + 1;
+ hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
err = fjes_hw_init(&adapter->hw);
if (err)
@@ -1203,7 +1275,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
netdev->netdev_ops = &fjes_netdev_ops;
fjes_set_ethtool_ops(netdev);
- netdev->mtu = fjes_support_mtu[0];
+ netdev->mtu = fjes_support_mtu[3];
netdev->flags |= IFF_BROADCAST;
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
}
@@ -1240,6 +1312,7 @@ static void fjes_watch_unshare_task(struct work_struct *work)
int max_epid, my_epid, epidx;
int stop_req, stop_req_done;
ulong unshare_watch_bitmask;
+ unsigned long flags;
int wait_time = 0;
int is_shared;
int ret;
@@ -1292,8 +1365,10 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
mutex_unlock(&hw->hw_info.lock);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
clear_bit(epidx, &unshare_watch_bitmask);
@@ -1331,9 +1406,12 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
mutex_unlock(&hw->hw_info.lock);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
clear_bit(epidx, &unshare_watch_bitmask);
@@ -1341,8 +1419,11 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
if (test_bit(epidx, &unshare_watch_bitmask)) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
~FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
}
}
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index c70e51567..9b3dc3c61 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -87,7 +87,6 @@ struct geneve_sock {
struct socket *sock;
struct rcu_head rcu;
int refcnt;
- struct udp_offload udp_offloads;
struct hlist_head vni_list[VNI_HASH_SIZE];
u32 flags;
};
@@ -405,14 +404,6 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
struct net *net = sock_net(sk);
sa_family_t sa_family = geneve_get_sk_family(gs);
__be16 port = inet_sk(sk)->inet_sport;
- int err;
-
- if (sa_family == AF_INET) {
- err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
- if (err)
- pr_warn("geneve: udp_add_offload failed with status %d\n",
- err);
- }
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -428,9 +419,9 @@ static int geneve_hlen(struct genevehdr *gh)
return sizeof(*gh) + gh->opt_len * 4;
}
-static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff)
+static struct sk_buff **geneve_gro_receive(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
{
struct sk_buff *p, **pp = NULL;
struct genevehdr *gh, *gh2;
@@ -491,8 +482,8 @@ out:
return pp;
}
-static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
- struct udp_offload *uoff)
+static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
+ int nhoff)
{
struct genevehdr *gh;
struct packet_offload *ptype;
@@ -542,14 +533,14 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
INIT_HLIST_HEAD(&gs->vni_list[h]);
/* Initialize the geneve udp offloads structure */
- gs->udp_offloads.port = port;
- gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
- gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
geneve_notify_add_rx_port(gs);
/* Mark socket as an encapsulation socket */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
tunnel_cfg.sk_user_data = gs;
tunnel_cfg.encap_type = 1;
+ tunnel_cfg.gro_receive = geneve_gro_receive;
+ tunnel_cfg.gro_complete = geneve_gro_complete;
tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@ -573,9 +564,6 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs)
}
rcu_read_unlock();
-
- if (sa_family == AF_INET)
- udp_del_offload(&gs->udp_offloads);
}
static void __geneve_sock_release(struct geneve_sock *gs)
@@ -705,16 +693,12 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
+ if (unlikely(err))
goto free_rt;
- }
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
goto free_rt;
- }
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -742,16 +726,12 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
+ if (unlikely(err))
goto free_dst;
- }
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
goto free_dst;
- }
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -946,7 +926,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve_build_skb(rt, skb, key->tun_flags, vni,
info->options_len, opts, flags, xnet);
if (unlikely(err))
- goto err;
+ goto tx_error;
tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
@@ -955,7 +935,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve_build_skb(rt, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
- goto err;
+ goto tx_error;
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
ttl = geneve->ttl;
@@ -973,13 +953,13 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tx_error:
dev_kfree_skb(skb);
-err:
+
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -1035,7 +1015,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
info->options_len, opts,
flags, xnet);
if (unlikely(err))
- goto err;
+ goto tx_error;
prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
@@ -1044,7 +1024,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve6_build_skb(dst, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
- goto err;
+ goto tx_error;
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
iip, skb);
@@ -1063,13 +1043,13 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tx_error:
dev_kfree_skb(skb);
-err:
+
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
#endif
@@ -1194,7 +1174,7 @@ static struct device_type geneve_type = {
* supply the listening GENEVE udp ports. Callers are expected
* to implement the ndo_add_geneve_port.
*/
-void geneve_get_rx_port(struct net_device *dev)
+static void geneve_push_rx_ports(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct geneve_net *gn = net_generic(net, geneve_net_id);
@@ -1203,6 +1183,9 @@ void geneve_get_rx_port(struct net_device *dev)
struct sock *sk;
__be16 port;
+ if (!dev->netdev_ops->ndo_add_geneve_port)
+ return;
+
rcu_read_lock();
list_for_each_entry_rcu(gs, &gn->sock_list, list) {
sk = gs->sock->sk;
@@ -1212,7 +1195,6 @@ void geneve_get_rx_port(struct net_device *dev)
}
rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(geneve_get_rx_port);
/* Initialize the device structure. */
static void geneve_setup(struct net_device *dev)
@@ -1531,6 +1513,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
+ LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
@@ -1542,8 +1525,10 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
err = geneve_configure(net, dev, &geneve_remote_unspec,
0, 0, 0, 0, htons(dst_port), true,
GENEVE_F_UDP_ZERO_CSUM6_RX);
- if (err)
- goto err;
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
@@ -1552,14 +1537,34 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
if (err)
goto err;
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto err;
+
return dev;
err:
- free_netdev(dev);
+ geneve_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
+static int geneve_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (event == NETDEV_OFFLOAD_PUSH_GENEVE)
+ geneve_push_rx_ports(dev);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block geneve_notifier_block __read_mostly = {
+ .notifier_call = geneve_netdevice_event,
+};
+
static __net_init int geneve_init_net(struct net *net)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
@@ -1612,11 +1617,18 @@ static int __init geneve_init_module(void)
if (rc)
goto out1;
- rc = rtnl_link_register(&geneve_link_ops);
+ rc = register_netdevice_notifier(&geneve_notifier_block);
if (rc)
goto out2;
+ rc = rtnl_link_register(&geneve_link_ops);
+ if (rc)
+ goto out3;
+
return 0;
+
+out3:
+ unregister_netdevice_notifier(&geneve_notifier_block);
out2:
unregister_pernet_subsys(&geneve_net_ops);
out1:
@@ -1627,6 +1639,7 @@ late_initcall(geneve_init_module);
static void __exit geneve_cleanup_module(void)
{
rtnl_link_unregister(&geneve_link_ops);
+ unregister_netdevice_notifier(&geneve_notifier_block);
unregister_pernet_subsys(&geneve_net_ops);
}
module_exit(geneve_cleanup_module);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
new file mode 100644
index 000000000..4e976a0d5
--- /dev/null
+++ b/drivers/net/gtp.c
@@ -0,0 +1,1375 @@
+/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
+ *
+ * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
+ * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * Author: Harald Welte <hwelte@sysmocom.de>
+ * Pablo Neira Ayuso <pablo@netfilter.org>
+ * Andreas Schultz <aschultz@travelping.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/rculist.h>
+#include <linux/jhash.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/file.h>
+#include <linux/gtp.h>
+
+#include <net/net_namespace.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/icmp.h>
+#include <net/xfrm.h>
+#include <net/genetlink.h>
+#include <net/netns/generic.h>
+#include <net/gtp.h>
+
+/* An active session for the subscriber. */
+struct pdp_ctx {
+ struct hlist_node hlist_tid;
+ struct hlist_node hlist_addr;
+
+ union {
+ u64 tid;
+ struct {
+ u64 tid;
+ u16 flow;
+ } v0;
+ struct {
+ u32 i_tei;
+ u32 o_tei;
+ } v1;
+ } u;
+ u8 gtp_version;
+ u16 af;
+
+ struct in_addr ms_addr_ip4;
+ struct in_addr sgsn_addr_ip4;
+
+ atomic_t tx_seq;
+ struct rcu_head rcu_head;
+};
+
+/* One instance of the GTP device. */
+struct gtp_dev {
+ struct list_head list;
+
+ struct socket *sock0;
+ struct socket *sock1u;
+
+ struct net *net;
+ struct net_device *dev;
+
+ unsigned int hash_size;
+ struct hlist_head *tid_hash;
+ struct hlist_head *addr_hash;
+};
+
+static int gtp_net_id __read_mostly;
+
+struct gtp_net {
+ struct list_head gtp_dev_list;
+};
+
+static u32 gtp_h_initval;
+
+static inline u32 gtp0_hashfn(u64 tid)
+{
+ u32 *tid32 = (u32 *) &tid;
+ return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
+}
+
+static inline u32 gtp1u_hashfn(u32 tid)
+{
+ return jhash_1word(tid, gtp_h_initval);
+}
+
+static inline u32 ipv4_hashfn(__be32 ip)
+{
+ return jhash_1word((__force u32)ip, gtp_h_initval);
+}
+
+/* Resolve a PDP context structure based on the 64bit TID. */
+static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
+{
+ struct hlist_head *head;
+ struct pdp_ctx *pdp;
+
+ head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
+
+ hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
+ if (pdp->gtp_version == GTP_V0 &&
+ pdp->u.v0.tid == tid)
+ return pdp;
+ }
+ return NULL;
+}
+
+/* Resolve a PDP context structure based on the 32bit TEI. */
+static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
+{
+ struct hlist_head *head;
+ struct pdp_ctx *pdp;
+
+ head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
+
+ hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
+ if (pdp->gtp_version == GTP_V1 &&
+ pdp->u.v1.i_tei == tid)
+ return pdp;
+ }
+ return NULL;
+}
+
+/* Resolve a PDP context based on IPv4 address of MS. */
+static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
+{
+ struct hlist_head *head;
+ struct pdp_ctx *pdp;
+
+ head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
+
+ hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
+ if (pdp->af == AF_INET &&
+ pdp->ms_addr_ip4.s_addr == ms_addr)
+ return pdp;
+ }
+
+ return NULL;
+}
+
+static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
+ unsigned int hdrlen)
+{
+ struct iphdr *iph;
+
+ if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
+ return false;
+
+ iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr));
+
+ return iph->saddr != pctx->ms_addr_ip4.s_addr;
+}
+
+/* Check if the inner IP source address in this packet is assigned to any
+ * existing mobile subscriber.
+ */
+static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
+ unsigned int hdrlen)
+{
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_IP:
+ return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
+ }
+ return false;
+}
+
+/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
+static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
+ bool xnet)
+{
+ unsigned int hdrlen = sizeof(struct udphdr) +
+ sizeof(struct gtp0_header);
+ struct gtp0_header *gtp0;
+ struct pdp_ctx *pctx;
+ int ret = 0;
+
+ if (!pskb_may_pull(skb, hdrlen))
+ return -1;
+
+ gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+ if ((gtp0->flags >> 5) != GTP_V0)
+ return 1;
+
+ if (gtp0->type != GTP_TPDU)
+ return 1;
+
+ rcu_read_lock();
+ pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
+ if (!pctx) {
+ netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+ ret = -1;
+ goto out_rcu;
+ }
+
+ if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+ netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
+ ret = -1;
+ goto out_rcu;
+ }
+ rcu_read_unlock();
+
+ /* Get rid of the GTP + UDP headers. */
+ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+out_rcu:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
+ bool xnet)
+{
+ unsigned int hdrlen = sizeof(struct udphdr) +
+ sizeof(struct gtp1_header);
+ struct gtp1_header *gtp1;
+ struct pdp_ctx *pctx;
+ int ret = 0;
+
+ if (!pskb_may_pull(skb, hdrlen))
+ return -1;
+
+ gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+
+ if ((gtp1->flags >> 5) != GTP_V1)
+ return 1;
+
+ if (gtp1->type != GTP_TPDU)
+ return 1;
+
+ /* From 29.060: "This field shall be present if and only if any one or
+ * more of the S, PN and E flags are set.".
+ *
+ * If any of the bit is set, then the remaining ones also have to be
+ * set.
+ */
+ if (gtp1->flags & GTP1_F_MASK)
+ hdrlen += 4;
+
+ /* Make sure the header is larger enough, including extensions. */
+ if (!pskb_may_pull(skb, hdrlen))
+ return -1;
+
+ gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+
+ rcu_read_lock();
+ pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
+ if (!pctx) {
+ netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+ ret = -1;
+ goto out_rcu;
+ }
+
+ if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+ netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
+ ret = -1;
+ goto out_rcu;
+ }
+ rcu_read_unlock();
+
+ /* Get rid of the GTP + UDP headers. */
+ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+out_rcu:
+ rcu_read_unlock();
+ return ret;
+}
+
+static void gtp_encap_disable(struct gtp_dev *gtp)
+{
+ if (gtp->sock0 && gtp->sock0->sk) {
+ udp_sk(gtp->sock0->sk)->encap_type = 0;
+ rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
+ }
+ if (gtp->sock1u && gtp->sock1u->sk) {
+ udp_sk(gtp->sock1u->sk)->encap_type = 0;
+ rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
+ }
+
+ gtp->sock0 = NULL;
+ gtp->sock1u = NULL;
+}
+
+static void gtp_encap_destroy(struct sock *sk)
+{
+ struct gtp_dev *gtp;
+
+ gtp = rcu_dereference_sk_user_data(sk);
+ if (gtp)
+ gtp_encap_disable(gtp);
+}
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
+ */
+static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pcpu_sw_netstats *stats;
+ struct gtp_dev *gtp;
+ bool xnet;
+ int ret;
+
+ gtp = rcu_dereference_sk_user_data(sk);
+ if (!gtp)
+ return 1;
+
+ netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+
+ xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+
+ switch (udp_sk(sk)->encap_type) {
+ case UDP_ENCAP_GTP0:
+ netdev_dbg(gtp->dev, "received GTP0 packet\n");
+ ret = gtp0_udp_encap_recv(gtp, skb, xnet);
+ break;
+ case UDP_ENCAP_GTP1U:
+ netdev_dbg(gtp->dev, "received GTP1U packet\n");
+ ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
+ break;
+ default:
+ ret = -1; /* Shouldn't happen. */
+ }
+
+ switch (ret) {
+ case 1:
+ netdev_dbg(gtp->dev, "pass up to the process\n");
+ return 1;
+ case 0:
+ netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
+ break;
+ case -1:
+ netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ /* Now that the UDP and the GTP header have been removed, set up the
+ * new network header. This is required by the upper layer to
+ * calculate the transport header.
+ */
+ skb_reset_network_header(skb);
+
+ skb->dev = gtp->dev;
+
+ stats = this_cpu_ptr(gtp->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netif_rx(skb);
+
+ return 0;
+}
+
+static int gtp_dev_init(struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ gtp->dev = dev;
+
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gtp_dev_uninit(struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ gtp_encap_disable(gtp);
+ free_percpu(dev->tstats);
+}
+
+static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
+ const struct sock *sk, __be32 daddr)
+{
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_oif = sk->sk_bound_dev_if;
+ fl4->daddr = daddr;
+ fl4->saddr = inet_sk(sk)->inet_saddr;
+ fl4->flowi4_tos = RT_CONN_FLAGS(sk);
+ fl4->flowi4_proto = sk->sk_protocol;
+
+ return ip_route_output_key(net, fl4);
+}
+
+static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+{
+ int payload_len = skb->len;
+ struct gtp0_header *gtp0;
+
+ gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
+
+ gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
+ gtp0->type = GTP_TPDU;
+ gtp0->length = htons(payload_len);
+ gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
+ gtp0->flow = htons(pctx->u.v0.flow);
+ gtp0->number = 0xff;
+ gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
+ gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
+}
+
+static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+{
+ int payload_len = skb->len;
+ struct gtp1_header *gtp1;
+
+ gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1));
+
+ /* Bits 8 7 6 5 4 3 2 1
+ * +--+--+--+--+--+--+--+--+
+ * |version |PT| 1| E| S|PN|
+ * +--+--+--+--+--+--+--+--+
+ * 0 0 1 1 1 0 0 0
+ */
+ gtp1->flags = 0x38; /* v1, GTP-non-prime. */
+ gtp1->type = GTP_TPDU;
+ gtp1->length = htons(payload_len);
+ gtp1->tid = htonl(pctx->u.v1.o_tei);
+
+ /* TODO: Suppport for extension header, sequence number and N-PDU.
+ * Update the length field if any of them is available.
+ */
+}
+
+struct gtp_pktinfo {
+ struct sock *sk;
+ struct iphdr *iph;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct pdp_ctx *pctx;
+ struct net_device *dev;
+ __be16 gtph_port;
+};
+
+static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
+{
+ switch (pktinfo->pctx->gtp_version) {
+ case GTP_V0:
+ pktinfo->gtph_port = htons(GTP0_PORT);
+ gtp0_push_header(skb, pktinfo->pctx);
+ break;
+ case GTP_V1:
+ pktinfo->gtph_port = htons(GTP1U_PORT);
+ gtp1_push_header(skb, pktinfo->pctx);
+ break;
+ }
+}
+
+static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
+ struct sock *sk, struct iphdr *iph,
+ struct pdp_ctx *pctx, struct rtable *rt,
+ struct flowi4 *fl4,
+ struct net_device *dev)
+{
+ pktinfo->sk = sk;
+ pktinfo->iph = iph;
+ pktinfo->pctx = pctx;
+ pktinfo->rt = rt;
+ pktinfo->fl4 = *fl4;
+ pktinfo->dev = dev;
+}
+
+static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ struct gtp_pktinfo *pktinfo)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+ struct pdp_ctx *pctx;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct iphdr *iph;
+ struct sock *sk;
+ __be16 df;
+ int mtu;
+
+ /* Read the IP destination address and resolve the PDP context.
+ * Prepend PDP header with TEI/TID from PDP ctx.
+ */
+ iph = ip_hdr(skb);
+ pctx = ipv4_pdp_find(gtp, iph->daddr);
+ if (!pctx) {
+ netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
+ &iph->daddr);
+ return -ENOENT;
+ }
+ netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ if (gtp->sock0)
+ sk = gtp->sock0->sk;
+ else
+ sk = NULL;
+ break;
+ case GTP_V1:
+ if (gtp->sock1u)
+ sk = gtp->sock1u->sk;
+ else
+ sk = NULL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (!sk) {
+ netdev_dbg(dev, "no userspace socket is available, skip\n");
+ return -ENOENT;
+ }
+
+ rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
+ pctx->sgsn_addr_ip4.s_addr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to SSGN %pI4\n",
+ &pctx->sgsn_addr_ip4.s_addr);
+ dev->stats.tx_carrier_errors++;
+ goto err;
+ }
+
+ if (rt->dst.dev == dev) {
+ netdev_dbg(dev, "circular route to SSGN %pI4\n",
+ &pctx->sgsn_addr_ip4.s_addr);
+ dev->stats.collisions++;
+ goto err_rt;
+ }
+
+ skb_dst_drop(skb);
+
+ /* This is similar to tnl_update_pmtu(). */
+ df = iph->frag_off;
+ if (df) {
+ mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
+ sizeof(struct iphdr) - sizeof(struct udphdr);
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ mtu -= sizeof(struct gtp0_header);
+ break;
+ case GTP_V1:
+ mtu -= sizeof(struct gtp1_header);
+ break;
+ }
+ } else {
+ mtu = dst_mtu(&rt->dst);
+ }
+
+ rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+
+ if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+ mtu < ntohs(iph->tot_len)) {
+ netdev_dbg(dev, "packet too big, fragmentation needed\n");
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+ goto err_rt;
+ }
+
+ gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
+ gtp_push_header(skb, pktinfo);
+
+ return 0;
+err_rt:
+ ip_rt_put(rt);
+err:
+ return -EBADMSG;
+}
+
+static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int proto = ntohs(skb->protocol);
+ struct gtp_pktinfo pktinfo;
+ int err;
+
+ /* Ensure there is sufficient headroom. */
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto tx_err;
+
+ skb_reset_inner_headers(skb);
+
+ /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
+ rcu_read_lock();
+ switch (proto) {
+ case ETH_P_IP:
+ err = gtp_build_skb_ip4(skb, dev, &pktinfo);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ rcu_read_unlock();
+
+ if (err < 0)
+ goto tx_err;
+
+ switch (proto) {
+ case ETH_P_IP:
+ netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
+ &pktinfo.iph->saddr, &pktinfo.iph->daddr);
+ udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
+ pktinfo.fl4.saddr, pktinfo.fl4.daddr,
+ pktinfo.iph->tos,
+ ip4_dst_hoplimit(&pktinfo.rt->dst),
+ htons(IP_DF),
+ pktinfo.gtph_port, pktinfo.gtph_port,
+ true, false);
+ break;
+ }
+
+ return NETDEV_TX_OK;
+tx_err:
+ dev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops gtp_netdev_ops = {
+ .ndo_init = gtp_dev_init,
+ .ndo_uninit = gtp_dev_uninit,
+ .ndo_start_xmit = gtp_dev_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+};
+
+static void gtp_link_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &gtp_netdev_ops;
+ dev->destructor = free_netdev;
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+
+ /* Zero header length. */
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->features |= NETIF_F_LLTX;
+ netif_keep_dst(dev);
+
+ /* Assume largest header, ie. GTPv0. */
+ dev->needed_headroom = LL_MAX_HEADER +
+ sizeof(struct iphdr) +
+ sizeof(struct udphdr) +
+ sizeof(struct gtp0_header);
+}
+
+static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
+static void gtp_hashtable_free(struct gtp_dev *gtp);
+static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+ int fd_gtp0, int fd_gtp1, struct net *src_net);
+
+static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int hashsize, err, fd0, fd1;
+ struct gtp_dev *gtp;
+ struct gtp_net *gn;
+
+ if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
+ return -EINVAL;
+
+ gtp = netdev_priv(dev);
+
+ fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+ fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+
+ err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+ if (err < 0)
+ goto out_err;
+
+ if (!data[IFLA_GTP_PDP_HASHSIZE])
+ hashsize = 1024;
+ else
+ hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+
+ err = gtp_hashtable_new(gtp, hashsize);
+ if (err < 0)
+ goto out_encap;
+
+ err = register_netdevice(dev);
+ if (err < 0) {
+ netdev_dbg(dev, "failed to register new netdev %d\n", err);
+ goto out_hashtable;
+ }
+
+ gn = net_generic(dev_net(dev), gtp_net_id);
+ list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+
+ netdev_dbg(dev, "registered new GTP interface\n");
+
+ return 0;
+
+out_hashtable:
+ gtp_hashtable_free(gtp);
+out_encap:
+ gtp_encap_disable(gtp);
+out_err:
+ return err;
+}
+
+static void gtp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ gtp_encap_disable(gtp);
+ gtp_hashtable_free(gtp);
+ list_del_rcu(&gtp->list);
+ unregister_netdevice_queue(dev, head);
+}
+
+static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
+ [IFLA_GTP_FD0] = { .type = NLA_U32 },
+ [IFLA_GTP_FD1] = { .type = NLA_U32 },
+ [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
+};
+
+static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (!data)
+ return -EINVAL;
+
+ return 0;
+}
+
+static size_t gtp_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */
+}
+
+static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops gtp_link_ops __read_mostly = {
+ .kind = "gtp",
+ .maxtype = IFLA_GTP_MAX,
+ .policy = gtp_policy,
+ .priv_size = sizeof(struct gtp_dev),
+ .setup = gtp_link_setup,
+ .validate = gtp_validate,
+ .newlink = gtp_newlink,
+ .dellink = gtp_dellink,
+ .get_size = gtp_get_size,
+ .fill_info = gtp_fill_info,
+};
+
+static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
+{
+ struct net *net;
+
+ /* Examine the link attributes and figure out which network namespace
+ * we are talking about.
+ */
+ if (tb[GTPA_NET_NS_FD])
+ net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
+ else
+ net = get_net(src_net);
+
+ return net;
+}
+
+static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
+{
+ int i;
+
+ gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+ if (gtp->addr_hash == NULL)
+ return -ENOMEM;
+
+ gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+ if (gtp->tid_hash == NULL)
+ goto err1;
+
+ gtp->hash_size = hsize;
+
+ for (i = 0; i < hsize; i++) {
+ INIT_HLIST_HEAD(&gtp->addr_hash[i]);
+ INIT_HLIST_HEAD(&gtp->tid_hash[i]);
+ }
+ return 0;
+err1:
+ kfree(gtp->addr_hash);
+ return -ENOMEM;
+}
+
+static void gtp_hashtable_free(struct gtp_dev *gtp)
+{
+ struct pdp_ctx *pctx;
+ int i;
+
+ for (i = 0; i < gtp->hash_size; i++) {
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
+ hlist_del_rcu(&pctx->hlist_tid);
+ hlist_del_rcu(&pctx->hlist_addr);
+ kfree_rcu(pctx, rcu_head);
+ }
+ }
+ synchronize_rcu();
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
+}
+
+static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+ int fd_gtp0, int fd_gtp1, struct net *src_net)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {NULL};
+ struct socket *sock0, *sock1u;
+ int err;
+
+ netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
+
+ sock0 = sockfd_lookup(fd_gtp0, &err);
+ if (sock0 == NULL) {
+ netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
+ return -ENOENT;
+ }
+
+ if (sock0->sk->sk_protocol != IPPROTO_UDP) {
+ netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
+ err = -EINVAL;
+ goto err1;
+ }
+
+ sock1u = sockfd_lookup(fd_gtp1, &err);
+ if (sock1u == NULL) {
+ netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
+ err = -ENOENT;
+ goto err1;
+ }
+
+ if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
+ netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
+ err = -EINVAL;
+ goto err2;
+ }
+
+ netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
+
+ gtp->sock0 = sock0;
+ gtp->sock1u = sock1u;
+ gtp->net = src_net;
+
+ tuncfg.sk_user_data = gtp;
+ tuncfg.encap_rcv = gtp_encap_recv;
+ tuncfg.encap_destroy = gtp_encap_destroy;
+
+ tuncfg.encap_type = UDP_ENCAP_GTP0;
+ setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
+
+ tuncfg.encap_type = UDP_ENCAP_GTP1U;
+ setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
+
+ err = 0;
+err2:
+ sockfd_put(sock1u);
+err1:
+ sockfd_put(sock0);
+ return err;
+}
+
+static struct net_device *gtp_find_dev(struct net *net, int ifindex)
+{
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
+
+ list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ if (ifindex == gtp->dev->ifindex)
+ return gtp->dev;
+ }
+ return NULL;
+}
+
+static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+ pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
+ pctx->af = AF_INET;
+ pctx->sgsn_addr_ip4.s_addr =
+ nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
+ pctx->ms_addr_ip4.s_addr =
+ nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
+ * label needs to be the same for uplink and downlink packets,
+ * so let's annotate this.
+ */
+ pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
+ pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
+ break;
+ case GTP_V1:
+ pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
+ pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+ u32 hash_ms, hash_tid = 0;
+ struct pdp_ctx *pctx;
+ bool found = false;
+ __be32 ms_addr;
+
+ ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+ hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+
+ hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
+ if (pctx->ms_addr_ip4.s_addr == ms_addr) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
+ ipv4_pdp_fill(pctx, info);
+
+ if (pctx->gtp_version == GTP_V0)
+ netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
+ pctx->u.v0.tid, pctx);
+ else if (pctx->gtp_version == GTP_V1)
+ netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
+ pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+
+ return 0;
+
+ }
+
+ pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+ if (pctx == NULL)
+ return -ENOMEM;
+
+ ipv4_pdp_fill(pctx, info);
+ atomic_set(&pctx->tx_seq, 0);
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ /* TS 09.60: "The flow label identifies unambiguously a GTP
+ * flow.". We use the tid for this instead, I cannot find a
+ * situation in which this doesn't unambiguosly identify the
+ * PDP context.
+ */
+ hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
+ break;
+ case GTP_V1:
+ hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
+ break;
+ }
+
+ hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
+ hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
+ pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
+ &pctx->ms_addr_ip4, pctx);
+ break;
+ case GTP_V1:
+ netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
+ pctx->u.v1.i_tei, pctx->u.v1.o_tei,
+ &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
+ break;
+ }
+
+ return 0;
+}
+
+static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ struct net *net;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK] ||
+ !info->attrs[GTPA_SGSN_ADDRESS] ||
+ !info->attrs[GTPA_MS_ADDRESS])
+ return -EINVAL;
+
+ switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+ case GTP_V0:
+ if (!info->attrs[GTPA_TID] ||
+ !info->attrs[GTPA_FLOW])
+ return -EINVAL;
+ break;
+ case GTP_V1:
+ if (!info->attrs[GTPA_I_TEI] ||
+ !info->attrs[GTPA_O_TEI])
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ /* Check if there's an existing gtpX device to configure */
+ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+ if (dev == NULL) {
+ put_net(net);
+ return -ENODEV;
+ }
+ put_net(net);
+
+ return ipv4_pdp_add(dev, info);
+}
+
+static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ struct pdp_ctx *pctx;
+ struct gtp_dev *gtp;
+ struct net *net;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK])
+ return -EINVAL;
+
+ net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ /* Check if there's an existing gtpX device to configure */
+ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+ if (dev == NULL) {
+ put_net(net);
+ return -ENODEV;
+ }
+ put_net(net);
+
+ gtp = netdev_priv(dev);
+
+ switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+ case GTP_V0:
+ if (!info->attrs[GTPA_TID])
+ return -EINVAL;
+ pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
+ break;
+ case GTP_V1:
+ if (!info->attrs[GTPA_I_TEI])
+ return -EINVAL;
+ pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (pctx == NULL)
+ return -ENOENT;
+
+ if (pctx->gtp_version == GTP_V0)
+ netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
+ pctx->u.v0.tid, pctx);
+ else if (pctx->gtp_version == GTP_V1)
+ netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
+ pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+
+ hlist_del_rcu(&pctx->hlist_tid);
+ hlist_del_rcu(&pctx->hlist_addr);
+ kfree_rcu(pctx, rcu_head);
+
+ return 0;
+}
+
+static struct genl_family gtp_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = "gtp",
+ .version = 0,
+ .hdrsize = 0,
+ .maxattr = GTPA_MAX,
+ .netnsok = true,
+};
+
+static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
+ u32 type, struct pdp_ctx *pctx)
+{
+ void *genlh;
+
+ genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
+ type);
+ if (genlh == NULL)
+ goto nlmsg_failure;
+
+ if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
+ nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
+ nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
+ goto nla_put_failure;
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
+ nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
+ goto nla_put_failure;
+ break;
+ case GTP_V1:
+ if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
+ nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
+ goto nla_put_failure;
+ break;
+ }
+ genlmsg_end(skb, genlh);
+ return 0;
+
+nlmsg_failure:
+nla_put_failure:
+ genlmsg_cancel(skb, genlh);
+ return -EMSGSIZE;
+}
+
+static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct pdp_ctx *pctx = NULL;
+ struct net_device *dev;
+ struct sk_buff *skb2;
+ struct gtp_dev *gtp;
+ u32 gtp_version;
+ struct net *net;
+ int err;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK])
+ return -EINVAL;
+
+ gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
+ switch (gtp_version) {
+ case GTP_V0:
+ case GTP_V1:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ /* Check if there's an existing gtpX device to configure */
+ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+ if (dev == NULL) {
+ put_net(net);
+ return -ENODEV;
+ }
+ put_net(net);
+
+ gtp = netdev_priv(dev);
+
+ rcu_read_lock();
+ if (gtp_version == GTP_V0 &&
+ info->attrs[GTPA_TID]) {
+ u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
+
+ pctx = gtp0_pdp_find(gtp, tid);
+ } else if (gtp_version == GTP_V1 &&
+ info->attrs[GTPA_I_TEI]) {
+ u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
+
+ pctx = gtp1_pdp_find(gtp, tid);
+ } else if (info->attrs[GTPA_MS_ADDRESS]) {
+ __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+ pctx = ipv4_pdp_find(gtp, ip);
+ }
+
+ if (pctx == NULL) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
+ skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (skb2 == NULL) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
+ info->snd_seq, info->nlhdr->nlmsg_type, pctx);
+ if (err < 0)
+ goto err_unlock_free;
+
+ rcu_read_unlock();
+ return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
+
+err_unlock_free:
+ kfree_skb(skb2);
+err_unlock:
+ rcu_read_unlock();
+ return err;
+}
+
+static int gtp_genl_dump_pdp(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+ struct net *net = sock_net(skb->sk);
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ unsigned long tid = cb->args[1];
+ int i, k = cb->args[0], ret;
+ struct pdp_ctx *pctx;
+
+ if (cb->args[4])
+ return 0;
+
+ list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ if (last_gtp && last_gtp != gtp)
+ continue;
+ else
+ last_gtp = NULL;
+
+ for (i = k; i < gtp->hash_size; i++) {
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
+ if (tid && tid != pctx->u.tid)
+ continue;
+ else
+ tid = 0;
+
+ ret = gtp_genl_fill_info(skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, pctx);
+ if (ret < 0) {
+ cb->args[0] = i;
+ cb->args[1] = pctx->u.tid;
+ cb->args[2] = (unsigned long)gtp;
+ goto out;
+ }
+ }
+ }
+ }
+ cb->args[4] = 1;
+out:
+ return skb->len;
+}
+
+static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
+ [GTPA_LINK] = { .type = NLA_U32, },
+ [GTPA_VERSION] = { .type = NLA_U32, },
+ [GTPA_TID] = { .type = NLA_U64, },
+ [GTPA_SGSN_ADDRESS] = { .type = NLA_U32, },
+ [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
+ [GTPA_FLOW] = { .type = NLA_U16, },
+ [GTPA_NET_NS_FD] = { .type = NLA_U32, },
+ [GTPA_I_TEI] = { .type = NLA_U32, },
+ [GTPA_O_TEI] = { .type = NLA_U32, },
+};
+
+static const struct genl_ops gtp_genl_ops[] = {
+ {
+ .cmd = GTP_CMD_NEWPDP,
+ .doit = gtp_genl_new_pdp,
+ .policy = gtp_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = GTP_CMD_DELPDP,
+ .doit = gtp_genl_del_pdp,
+ .policy = gtp_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = GTP_CMD_GETPDP,
+ .doit = gtp_genl_get_pdp,
+ .dumpit = gtp_genl_dump_pdp,
+ .policy = gtp_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static int __net_init gtp_net_init(struct net *net)
+{
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+
+ INIT_LIST_HEAD(&gn->gtp_dev_list);
+ return 0;
+}
+
+static void __net_exit gtp_net_exit(struct net *net)
+{
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations gtp_net_ops = {
+ .init = gtp_net_init,
+ .exit = gtp_net_exit,
+ .id = &gtp_net_id,
+ .size = sizeof(struct gtp_net),
+};
+
+static int __init gtp_init(void)
+{
+ int err;
+
+ get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
+
+ err = rtnl_link_register(&gtp_link_ops);
+ if (err < 0)
+ goto error_out;
+
+ err = genl_register_family_with_ops(&gtp_genl_family, gtp_genl_ops);
+ if (err < 0)
+ goto unreg_rtnl_link;
+
+ err = register_pernet_subsys(&gtp_net_ops);
+ if (err < 0)
+ goto unreg_genl_family;
+
+ pr_info("GTP module loaded (pdp ctx size %Zd bytes)\n",
+ sizeof(struct pdp_ctx));
+ return 0;
+
+unreg_genl_family:
+ genl_unregister_family(&gtp_genl_family);
+unreg_rtnl_link:
+ rtnl_link_unregister(&gtp_link_ops);
+error_out:
+ pr_err("error loading GTP module loaded\n");
+ return err;
+}
+late_initcall(gtp_init);
+
+static void __exit gtp_fini(void)
+{
+ unregister_pernet_subsys(&gtp_net_ops);
+ genl_unregister_family(&gtp_genl_family);
+ rtnl_link_unregister(&gtp_link_ops);
+
+ pr_info("GTP module unloaded\n");
+}
+module_exit(gtp_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
+MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("gtp");
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 72c9f1f35..78dbc4454 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -635,10 +635,10 @@ static int receive(struct net_device *dev, int cnt)
#ifdef __i386__
#include <asm/msr.h>
-#define GETTICK(x) \
-({ \
- if (cpu_has_tsc) \
- x = (unsigned int)rdtsc(); \
+#define GETTICK(x) \
+({ \
+ if (boot_cpu_has(X86_FEATURE_TSC)) \
+ x = (unsigned int)rdtsc(); \
})
#else /* __i386__ */
#define GETTICK(x)
@@ -780,8 +780,10 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- if (bc->skb)
- return NETDEV_TX_LOCKED;
+ if (bc->skb) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
/* strip KISS byte */
if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
dev_kfree_skb(skb);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 49fe59b18..4bad0b894 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -412,8 +412,10 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- if (sm->skb)
- return NETDEV_TX_LOCKED;
+ if (sm->skb) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
netif_stop_queue(dev);
sm->skb = skb;
return NETDEV_TX_OK;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 85828f153..1dfe2304d 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -519,7 +519,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
dev->stats.tx_packets++;
dev->stats.tx_bytes += actual;
- ax->dev->trans_start = jiffies;
+ netif_trans_update(ax->dev);
ax->xleft = count - actual;
ax->xhead = ax->xbuff + actual;
}
@@ -542,7 +542,7 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
* May be we must check transmitter timeout here ?
* 14 Oct 1994 Dmitry Gorodchanin.
*/
- if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
/* 20 sec timeout not reached */
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index ce88df33f..b8083161e 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1669,7 +1669,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb_del);
}
skb_queue_tail(&scc->tx_queue, skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/*
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1c1d5e105..b1f395e2f 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -601,7 +601,7 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb,
return ax25_ip_xmit(skb);
skb_queue_tail(&yp->send_queue, skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 8b3bd8ecd..c270c5a54 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -158,7 +158,7 @@ enum rndis_device_state {
};
struct rndis_device {
- struct netvsc_device *net_dev;
+ struct net_device *ndev;
enum rndis_device_state state;
bool link_state;
@@ -202,6 +202,8 @@ int rndis_filter_receive(struct hv_device *dev,
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
+void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
+
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
#define NVSP_PROTOCOL_VERSION_1 2
@@ -641,10 +643,18 @@ struct netvsc_reconfig {
u32 event;
};
+struct garp_wrk {
+ struct work_struct dwrk;
+ struct net_device *netdev;
+ struct netvsc_device *netvsc_dev;
+};
+
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
+ /* netvsc_device */
+ struct netvsc_device *nvdev;
/* reconfigure work */
struct delayed_work dwork;
/* last reconfig time */
@@ -656,6 +666,7 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
+ struct garp_wrk gwrk;
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
@@ -663,17 +674,17 @@ struct net_device_context {
/* Ethtool settings */
u8 duplex;
u32 speed;
+
+ /* the device is going away */
+ bool start_remove;
};
/* Per netvsc device */
struct netvsc_device {
- struct hv_device *dev;
-
u32 nvsp_version;
atomic_t num_outstanding_sends;
wait_queue_head_t wait_drain;
- bool start_remove;
bool destroy;
/* Receive buffer allocated by us but manages by NetVSP */
@@ -699,8 +710,6 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
/* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
- struct net_device *ndev;
-
struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
@@ -723,13 +732,15 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
- /* The net device context */
- struct net_device_context *nd_ctx;
-
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
+ atomic_t open_cnt;
+ /* State to manage the associated VF interface. */
+ bool vf_inject;
+ struct net_device *vf_netdev;
+ atomic_t vf_use_cnt;
};
/* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index ec313fc08..719cb3578 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -33,11 +33,36 @@
#include "hyperv_net.h"
+/*
+ * Switch the data path from the synthetic interface to the VF
+ * interface.
+ */
+void netvsc_switch_datapath(struct net_device *ndev, bool vf)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct hv_device *dev = net_device_ctx->device_ctx;
+ struct netvsc_device *nv_dev = net_device_ctx->nvdev;
+ struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+
+ memset(init_pkt, 0, sizeof(struct nvsp_message));
+ init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
+ if (vf)
+ init_pkt->msg.v4_msg.active_dp.active_datapath =
+ NVSP_DATAPATH_VF;
+ else
+ init_pkt->msg.v4_msg.active_dp.active_datapath =
+ NVSP_DATAPATH_SYNTHETIC;
+
+ vmbus_sendpacket(dev->channel, init_pkt,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_pkt,
+ VM_PKT_DATA_INBAND, 0);
+}
+
-static struct netvsc_device *alloc_net_device(struct hv_device *device)
+static struct netvsc_device *alloc_net_device(void)
{
struct netvsc_device *net_device;
- struct net_device *ndev = hv_get_drvdata(device);
net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
if (!net_device)
@@ -50,14 +75,15 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
}
init_waitqueue_head(&net_device->wait_drain);
- net_device->start_remove = false;
net_device->destroy = false;
- net_device->dev = device;
- net_device->ndev = ndev;
+ atomic_set(&net_device->open_cnt, 0);
+ atomic_set(&net_device->vf_use_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
- hv_set_drvdata(device, net_device);
+ net_device->vf_netdev = NULL;
+ net_device->vf_inject = false;
+
return net_device;
}
@@ -69,9 +95,10 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
- struct netvsc_device *net_device;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
- net_device = hv_get_drvdata(device);
if (net_device && net_device->destroy)
net_device = NULL;
@@ -80,9 +107,9 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
{
- struct netvsc_device *net_device;
-
- net_device = hv_get_drvdata(device);
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
if (!net_device)
goto get_in_err;
@@ -96,11 +123,13 @@ get_in_err:
}
-static int netvsc_destroy_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct hv_device *device)
{
struct nvsp_message *revoke_packet;
int ret = 0;
- struct net_device *ndev = net_device->ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
/*
* If we got a section count, it means we received a
@@ -118,7 +147,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
revoke_packet->msg.v1_msg.
revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
- ret = vmbus_sendpacket(net_device->dev->channel,
+ ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
@@ -136,8 +165,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
/* Teardown the gpadl on the vsp end */
if (net_device->recv_buf_gpadl_handle) {
- ret = vmbus_teardown_gpadl(net_device->dev->channel,
- net_device->recv_buf_gpadl_handle);
+ ret = vmbus_teardown_gpadl(device->channel,
+ net_device->recv_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@@ -178,7 +207,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
revoke_packet->msg.v1_msg.revoke_send_buf.id =
NETVSC_SEND_BUFFER_ID;
- ret = vmbus_sendpacket(net_device->dev->channel,
+ ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
@@ -194,7 +223,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
}
/* Teardown the gpadl on the vsp end */
if (net_device->send_buf_gpadl_handle) {
- ret = vmbus_teardown_gpadl(net_device->dev->channel,
+ ret = vmbus_teardown_gpadl(device->channel,
net_device->send_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
@@ -229,7 +258,7 @@ static int netvsc_init_buf(struct hv_device *device)
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
- ndev = net_device->ndev;
+ ndev = hv_get_drvdata(device);
node = cpu_to_node(device->channel->target_cpu);
net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
@@ -406,7 +435,7 @@ static int netvsc_init_buf(struct hv_device *device)
goto exit;
cleanup:
- netvsc_destroy_buf(net_device);
+ netvsc_destroy_buf(device);
exit:
return ret;
@@ -419,6 +448,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
struct nvsp_message *init_packet,
u32 nvsp_ver)
{
+ struct net_device *ndev = hv_get_drvdata(device);
int ret;
unsigned long t;
@@ -452,8 +482,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
/* NVSPv2 or later: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
- init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
- ETH_HLEN;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
@@ -473,7 +502,6 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
int ndis_version;
- struct net_device *ndev;
u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
int i, num_ver = 4; /* number of different NVSP versions */
@@ -481,7 +509,6 @@ static int netvsc_connect_vsp(struct hv_device *device)
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
- ndev = net_device->ndev;
init_packet = &net_device->channel_init_pkt;
@@ -537,9 +564,9 @@ cleanup:
return ret;
}
-static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
+static void netvsc_disconnect_vsp(struct hv_device *device)
{
- netvsc_destroy_buf(net_device);
+ netvsc_destroy_buf(device);
}
/*
@@ -547,24 +574,13 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
*/
int netvsc_device_remove(struct hv_device *device)
{
- struct netvsc_device *net_device;
- unsigned long flags;
-
- net_device = hv_get_drvdata(device);
-
- netvsc_disconnect_vsp(net_device);
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
- /*
- * Since we have already drained, we don't need to busy wait
- * as was done in final_release_stor_device()
- * Note that we cannot set the ext pointer to NULL until
- * we have drained - to drain the outgoing packets, we need to
- * allow incoming packets.
- */
+ netvsc_disconnect_vsp(device);
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
- hv_set_drvdata(device, NULL);
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+ net_device_ctx->nvdev = NULL;
/*
* At this point, no one should be accessing net_device
@@ -612,12 +628,11 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
{
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
u32 send_index;
struct sk_buff *skb;
- ndev = net_device->ndev;
-
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
(packet->offset8 << 3));
@@ -662,7 +677,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
- !net_device->start_remove &&
+ !net_device_ctx->start_remove &&
(hv_ringbuf_avail_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
netif_tx_wake_queue(netdev_get_tx_queue(
@@ -746,6 +761,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
}
static inline int netvsc_send_pkt(
+ struct hv_device *device,
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
struct hv_page_buffer **pb,
@@ -754,7 +770,7 @@ static inline int netvsc_send_pkt(
struct nvsp_message nvmsg;
u16 q_idx = packet->q_idx;
struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
- struct net_device *ndev = net_device->ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
@@ -949,7 +965,8 @@ int netvsc_send(struct hv_device *device,
}
if (msd_send) {
- m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
+ m_ret = netvsc_send_pkt(device, msd_send, net_device,
+ NULL, msd_skb);
if (m_ret != 0) {
netvsc_free_send_slot(net_device,
@@ -960,7 +977,7 @@ int netvsc_send(struct hv_device *device,
send_now:
if (cur_send)
- ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
+ ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, section_index);
@@ -976,9 +993,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
- struct net_device *ndev;
-
- ndev = net_device->ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
recvcompMessage.hdr.msg_type =
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
@@ -1025,11 +1040,9 @@ static void netvsc_receive(struct netvsc_device *net_device,
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
void *data;
- ndev = net_device->ndev;
-
/*
* All inbound packets other than send completion should be xfer page
* packet
@@ -1085,14 +1098,13 @@ static void netvsc_send_table(struct hv_device *hdev,
struct nvsp_message *nvmsg)
{
struct netvsc_device *nvscdev;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(hdev);
int i;
u32 count, *tab;
nvscdev = get_outbound_net_device(hdev);
if (!nvscdev)
return;
- ndev = nvscdev->ndev;
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
@@ -1151,7 +1163,7 @@ void netvsc_channel_cb(void *context)
net_device = get_inbound_net_device(device);
if (!net_device)
return;
- ndev = net_device->ndev;
+ ndev = hv_get_drvdata(device);
buffer = get_per_channel_state(channel);
do {
@@ -1224,30 +1236,19 @@ void netvsc_channel_cb(void *context)
*/
int netvsc_device_add(struct hv_device *device, void *additional_info)
{
- int ret = 0;
+ int i, ret = 0;
int ring_size =
((struct netvsc_device_info *)additional_info)->ring_size;
struct netvsc_device *net_device;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
- net_device = alloc_net_device(device);
+ net_device = alloc_net_device();
if (!net_device)
return -ENOMEM;
net_device->ring_size = ring_size;
- /*
- * Coming into this function, struct net_device * is
- * registered as the driver private data.
- * In alloc_net_device(), we register struct netvsc_device *
- * as the driver private data and stash away struct net_device *
- * in struct netvsc_device *.
- */
- ndev = net_device->ndev;
-
- /* Add netvsc_device context to netvsc_device */
- net_device->nd_ctx = netdev_priv(ndev);
-
/* Initialize the NetVSC channel extension */
init_completion(&net_device->channel_init_wait);
@@ -1266,7 +1267,19 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
/* Channel is opened */
pr_info("hv_netvsc channel opened successfully\n");
- net_device->chn_table[0] = device->channel;
+ /* If we're reopening the device we may have multiple queues, fill the
+ * chn_table with the default channel to use it before subchannels are
+ * opened.
+ */
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++)
+ net_device->chn_table[i] = device->channel;
+
+ /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
+ * populated.
+ */
+ wmb();
+
+ net_device_ctx->nvdev = net_device;
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index b8121eba3..6a69b5cc9 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -67,18 +67,19 @@ static void do_set_multicast(struct work_struct *w)
{
struct net_device_context *ndevctx =
container_of(w, struct net_device_context, work);
- struct netvsc_device *nvdev;
+ struct hv_device *device_obj = ndevctx->device_ctx;
+ struct net_device *ndev = hv_get_drvdata(device_obj);
+ struct netvsc_device *nvdev = ndevctx->nvdev;
struct rndis_device *rdev;
- nvdev = hv_get_drvdata(ndevctx->device_ctx);
- if (nvdev == NULL || nvdev->ndev == NULL)
+ if (!nvdev)
return;
rdev = nvdev->extension;
if (rdev == NULL)
return;
- if (nvdev->ndev->flags & IFF_PROMISC)
+ if (ndev->flags & IFF_PROMISC)
rndis_filter_set_packet_filter(rdev,
NDIS_PACKET_TYPE_PROMISCUOUS);
else
@@ -99,7 +100,7 @@ static int netvsc_open(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev;
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct rndis_device *rdev;
int ret = 0;
@@ -114,7 +115,6 @@ static int netvsc_open(struct net_device *net)
netif_tx_wake_all_queues(net);
- nvdev = hv_get_drvdata(device_obj);
rdev = nvdev->extension;
if (!rdev->link_state)
netif_carrier_on(net);
@@ -126,7 +126,7 @@ static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
int ret;
u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
struct vmbus_channel *chn;
@@ -205,8 +205,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct hv_device *hdev = net_device_ctx->device_ctx;
- struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
+ struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
u32 hash;
u16 q_idx = 0;
@@ -580,7 +579,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
struct net_device *net;
struct net_device_context *ndev_ctx;
- struct netvsc_device *net_device;
struct netvsc_reconfig *event;
unsigned long flags;
@@ -590,8 +588,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
return;
- net_device = hv_get_drvdata(device_obj);
- net = net_device->ndev;
+ net = hv_get_drvdata(device_obj);
if (!net || net->reg_state != NETREG_REGISTERED)
return;
@@ -610,42 +607,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
-/*
- * netvsc_recv_callback - Callback when we receive a packet from the
- * "wire" on the specified device.
- */
-int netvsc_recv_callback(struct hv_device *device_obj,
+
+static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct hv_netvsc_packet *packet,
- void **data,
struct ndis_tcp_ip_checksum_info *csum_info,
- struct vmbus_channel *channel,
- u16 vlan_tci)
+ void *data, u16 vlan_tci)
{
- struct net_device *net;
- struct net_device_context *net_device_ctx;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats;
- net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
- if (!net || net->reg_state != NETREG_REGISTERED) {
- return NVSP_STAT_FAIL;
- }
- net_device_ctx = netdev_priv(net);
- rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
-
- /* Allocate a skb - TODO direct I/O to pages? */
skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
- if (unlikely(!skb)) {
- ++net->stats.rx_dropped;
- return NVSP_STAT_FAIL;
- }
+ if (!skb)
+ return skb;
/*
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- memcpy(skb_put(skb, packet->total_data_buflen), *data,
- packet->total_data_buflen);
+ memcpy(skb_put(skb, packet->total_data_buflen), data,
+ packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
if (csum_info) {
@@ -663,6 +642,74 @@ int netvsc_recv_callback(struct hv_device *device_obj,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tci);
+ return skb;
+}
+
+/*
+ * netvsc_recv_callback - Callback when we receive a packet from the
+ * "wire" on the specified device.
+ */
+int netvsc_recv_callback(struct hv_device *device_obj,
+ struct hv_netvsc_packet *packet,
+ void **data,
+ struct ndis_tcp_ip_checksum_info *csum_info,
+ struct vmbus_channel *channel,
+ u16 vlan_tci)
+{
+ struct net_device *net = hv_get_drvdata(device_obj);
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct sk_buff *skb;
+ struct sk_buff *vf_skb;
+ struct netvsc_stats *rx_stats;
+ struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
+ u32 bytes_recvd = packet->total_data_buflen;
+ int ret = 0;
+
+ if (!net || net->reg_state != NETREG_REGISTERED)
+ return NVSP_STAT_FAIL;
+
+ if (READ_ONCE(netvsc_dev->vf_inject)) {
+ atomic_inc(&netvsc_dev->vf_use_cnt);
+ if (!READ_ONCE(netvsc_dev->vf_inject)) {
+ /*
+ * We raced; just move on.
+ */
+ atomic_dec(&netvsc_dev->vf_use_cnt);
+ goto vf_injection_done;
+ }
+
+ /*
+ * Inject this packet into the VF inerface.
+ * On Hyper-V, multicast and brodcast packets
+ * are only delivered on the synthetic interface
+ * (after subjecting these to policy filters on
+ * the host). Deliver these via the VF interface
+ * in the guest.
+ */
+ vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
+ csum_info, *data, vlan_tci);
+ if (vf_skb != NULL) {
+ ++netvsc_dev->vf_netdev->stats.rx_packets;
+ netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
+ netif_receive_skb(vf_skb);
+ } else {
+ ++net->stats.rx_dropped;
+ ret = NVSP_STAT_FAIL;
+ }
+ atomic_dec(&netvsc_dev->vf_use_cnt);
+ return ret;
+ }
+
+vf_injection_done:
+ net_device_ctx = netdev_priv(net);
+ rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+
+ /* Allocate a skb - TODO direct I/O to pages? */
+ skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+ if (unlikely(!skb)) {
+ ++net->stats.rx_dropped;
+ return NVSP_STAT_FAIL;
+ }
skb_record_rx_queue(skb, channel->
offermsg.offer.sub_channel_index);
@@ -692,8 +739,7 @@ static void netvsc_get_channels(struct net_device *net,
struct ethtool_channels *channel)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = hv_get_drvdata(dev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
if (nvdev) {
channel->max_combined = nvdev->max_chn;
@@ -706,14 +752,14 @@ static int netvsc_set_channels(struct net_device *net,
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *dev = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = hv_get_drvdata(dev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct netvsc_device_info device_info;
u32 num_chn;
u32 max_chn;
int ret = 0;
bool recovering = false;
- if (!nvdev || nvdev->destroy)
+ if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
num_chn = nvdev->num_chn;
@@ -742,14 +788,11 @@ static int netvsc_set_channels(struct net_device *net,
goto out;
do_set:
- nvdev->start_remove = true;
+ net_device_ctx->start_remove = true;
rndis_filter_device_remove(dev);
nvdev->num_chn = channels->combined_count;
- net_device_ctx->device_ctx = dev;
- hv_set_drvdata(dev, net);
-
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
device_info.ring_size = ring_size;
@@ -764,7 +807,7 @@ static int netvsc_set_channels(struct net_device *net,
goto recover;
}
- nvdev = hv_get_drvdata(dev);
+ nvdev = net_device_ctx->nvdev;
ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
if (ret) {
@@ -786,6 +829,9 @@ static int netvsc_set_channels(struct net_device *net,
out:
netvsc_open(net);
+ net_device_ctx->start_remove = false;
+ /* We may have missed link change notifications */
+ schedule_delayed_work(&net_device_ctx->dwork, 0);
return ret;
@@ -854,14 +900,14 @@ static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
- struct hv_device *hdev = ndevctx->device_ctx;
- struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct netvsc_device *nvdev = ndevctx->nvdev;
+ struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
int limit = ETH_DATA_LEN;
u32 num_chn;
int ret = 0;
- if (nvdev == NULL || nvdev->destroy)
+ if (ndevctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
@@ -876,14 +922,11 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
num_chn = nvdev->num_chn;
- nvdev->start_remove = true;
+ ndevctx->start_remove = true;
rndis_filter_device_remove(hdev);
ndev->mtu = mtu;
- ndevctx->device_ctx = hdev;
- hv_set_drvdata(hdev, ndev);
-
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = num_chn;
@@ -892,6 +935,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
out:
netvsc_open(ndev);
+ ndevctx->start_remove = false;
+
+ /* We may have missed link change notifications */
+ schedule_delayed_work(&ndevctx->dwork, 0);
return ret;
}
@@ -1004,18 +1051,22 @@ static const struct net_device_ops device_ops = {
*/
static void netvsc_link_change(struct work_struct *w)
{
- struct net_device_context *ndev_ctx;
- struct net_device *net;
+ struct net_device_context *ndev_ctx =
+ container_of(w, struct net_device_context, dwork.work);
+ struct hv_device *device_obj = ndev_ctx->device_ctx;
+ struct net_device *net = hv_get_drvdata(device_obj);
struct netvsc_device *net_device;
struct rndis_device *rdev;
struct netvsc_reconfig *event = NULL;
bool notify = false, reschedule = false;
unsigned long flags, next_reconfig, delay;
- ndev_ctx = container_of(w, struct net_device_context, dwork.work);
- net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+ rtnl_lock();
+ if (ndev_ctx->start_remove)
+ goto out_unlock;
+
+ net_device = ndev_ctx->nvdev;
rdev = net_device->extension;
- net = net_device->ndev;
next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
if (time_is_after_jiffies(next_reconfig)) {
@@ -1026,7 +1077,7 @@ static void netvsc_link_change(struct work_struct *w)
delay = next_reconfig - jiffies;
delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
schedule_delayed_work(&ndev_ctx->dwork, delay);
- return;
+ goto out_unlock;
}
ndev_ctx->last_reconfig = jiffies;
@@ -1040,9 +1091,7 @@ static void netvsc_link_change(struct work_struct *w)
spin_unlock_irqrestore(&ndev_ctx->lock, flags);
if (!event)
- return;
-
- rtnl_lock();
+ goto out_unlock;
switch (event->event) {
/* Only the following events are possible due to the check in
@@ -1074,7 +1123,7 @@ static void netvsc_link_change(struct work_struct *w)
netif_tx_stop_all_queues(net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
- list_add_tail(&event->list, &ndev_ctx->reconfig_events);
+ list_add(&event->list, &ndev_ctx->reconfig_events);
spin_unlock_irqrestore(&ndev_ctx->lock, flags);
reschedule = true;
}
@@ -1091,6 +1140,11 @@ static void netvsc_link_change(struct work_struct *w)
*/
if (reschedule)
schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+
+ return;
+
+out_unlock:
+ rtnl_unlock();
}
static void netvsc_free_netdev(struct net_device *netdev)
@@ -1102,6 +1156,192 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
+static void netvsc_notify_peers(struct work_struct *wrk)
+{
+ struct garp_wrk *gwrk;
+
+ gwrk = container_of(wrk, struct garp_wrk, dwrk);
+
+ netdev_notify_peers(gwrk->netdev);
+
+ atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
+}
+
+static struct net_device *get_netvsc_net_device(char *mac)
+{
+ struct net_device *dev, *found = NULL;
+ int rtnl_locked;
+
+ rtnl_locked = rtnl_trylock();
+
+ for_each_netdev(&init_net, dev) {
+ if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
+ if (dev->netdev_ops != &device_ops)
+ continue;
+ found = dev;
+ break;
+ }
+ }
+ if (rtnl_locked)
+ rtnl_unlock();
+
+ return found;
+}
+
+static int netvsc_register_vf(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device *netvsc_dev;
+ const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+ if (eth_ops == NULL || eth_ops == &ethtool_ops)
+ return NOTIFY_DONE;
+
+ /*
+ * We will use the MAC address to locate the synthetic interface to
+ * associate with the VF interface. If we don't find a matching
+ * synthetic interface, move on.
+ */
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
+ if (netvsc_dev == NULL)
+ return NOTIFY_DONE;
+
+ netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
+ /*
+ * Take a reference on the module.
+ */
+ try_module_get(THIS_MODULE);
+ netvsc_dev->vf_netdev = vf_netdev;
+ return NOTIFY_OK;
+}
+
+
+static int netvsc_vf_up(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct netvsc_device *netvsc_dev;
+ const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+ struct net_device_context *net_device_ctx;
+
+ if (eth_ops == &ethtool_ops)
+ return NOTIFY_DONE;
+
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
+
+ if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+ return NOTIFY_DONE;
+
+ netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
+ netvsc_dev->vf_inject = true;
+
+ /*
+ * Open the device before switching data path.
+ */
+ rndis_filter_open(net_device_ctx->device_ctx);
+
+ /*
+ * notify the host to switch the data path.
+ */
+ netvsc_switch_datapath(ndev, true);
+ netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
+
+ netif_carrier_off(ndev);
+
+ /*
+ * Now notify peers. We are scheduling work to
+ * notify peers; take a reference to prevent
+ * the VF interface from vanishing.
+ */
+ atomic_inc(&netvsc_dev->vf_use_cnt);
+ net_device_ctx->gwrk.netdev = vf_netdev;
+ net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
+ schedule_work(&net_device_ctx->gwrk.dwrk);
+
+ return NOTIFY_OK;
+}
+
+
+static int netvsc_vf_down(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct netvsc_device *netvsc_dev;
+ struct net_device_context *net_device_ctx;
+ const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+ if (eth_ops == &ethtool_ops)
+ return NOTIFY_DONE;
+
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
+
+ if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+ return NOTIFY_DONE;
+
+ netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
+ netvsc_dev->vf_inject = false;
+ /*
+ * Wait for currently active users to
+ * drain out.
+ */
+
+ while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
+ udelay(50);
+ netvsc_switch_datapath(ndev, false);
+ netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
+ rndis_filter_close(net_device_ctx->device_ctx);
+ netif_carrier_on(ndev);
+ /*
+ * Notify peers.
+ */
+ atomic_inc(&netvsc_dev->vf_use_cnt);
+ net_device_ctx->gwrk.netdev = ndev;
+ net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
+ schedule_work(&net_device_ctx->gwrk.dwrk);
+
+ return NOTIFY_OK;
+}
+
+
+static int netvsc_unregister_vf(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct netvsc_device *netvsc_dev;
+ const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+ struct net_device_context *net_device_ctx;
+
+ if (eth_ops == &ethtool_ops)
+ return NOTIFY_DONE;
+
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
+ if (netvsc_dev == NULL)
+ return NOTIFY_DONE;
+ netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
+
+ netvsc_dev->vf_netdev = NULL;
+ module_put(THIS_MODULE);
+ return NOTIFY_OK;
+}
+
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1138,8 +1378,12 @@ static int netvsc_probe(struct hv_device *dev,
}
hv_set_drvdata(dev, net);
+
+ net_device_ctx->start_remove = false;
+
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
+ INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1168,7 +1412,7 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
- nvdev = hv_get_drvdata(dev);
+ nvdev = net_device_ctx->nvdev;
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
@@ -1190,17 +1434,24 @@ static int netvsc_remove(struct hv_device *dev)
struct net_device_context *ndev_ctx;
struct netvsc_device *net_device;
- net_device = hv_get_drvdata(dev);
- net = net_device->ndev;
+ net = hv_get_drvdata(dev);
if (net == NULL) {
dev_err(&dev->device, "No net device to remove\n");
return 0;
}
- net_device->start_remove = true;
ndev_ctx = netdev_priv(net);
+ net_device = ndev_ctx->nvdev;
+
+ /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
+ * removing the device.
+ */
+ rtnl_lock();
+ ndev_ctx->start_remove = true;
+ rtnl_unlock();
+
cancel_delayed_work_sync(&ndev_ctx->dwork);
cancel_work_sync(&ndev_ctx->work);
@@ -1215,6 +1466,8 @@ static int netvsc_remove(struct hv_device *dev)
*/
rndis_filter_device_remove(dev);
+ hv_set_drvdata(dev, NULL);
+
netvsc_free_netdev(net);
return 0;
}
@@ -1235,19 +1488,58 @@ static struct hv_driver netvsc_drv = {
.remove = netvsc_remove,
};
+
+/*
+ * On Hyper-V, every VF interface is matched with a corresponding
+ * synthetic interface. The synthetic interface is presented first
+ * to the guest. When the corresponding VF instance is registered,
+ * we will take care of switching the data path.
+ */
+static int netvsc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ return netvsc_register_vf(event_dev);
+ case NETDEV_UNREGISTER:
+ return netvsc_unregister_vf(event_dev);
+ case NETDEV_UP:
+ return netvsc_vf_up(event_dev);
+ case NETDEV_DOWN:
+ return netvsc_vf_down(event_dev);
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block netvsc_netdev_notifier = {
+ .notifier_call = netvsc_netdev_event,
+};
+
static void __exit netvsc_drv_exit(void)
{
+ unregister_netdevice_notifier(&netvsc_netdev_notifier);
vmbus_driver_unregister(&netvsc_drv);
}
static int __init netvsc_drv_init(void)
{
+ int ret;
+
if (ring_size < RING_SIZE_MIN) {
ring_size = RING_SIZE_MIN;
pr_info("Increased ring_size to %d (min allowed)\n",
ring_size);
}
- return vmbus_driver_register(&netvsc_drv);
+ ret = vmbus_driver_register(&netvsc_drv);
+
+ if (ret)
+ return ret;
+
+ register_netdevice_notifier(&netvsc_netdev_notifier);
+ return 0;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c4e1e0408..97c292b7d 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -126,11 +126,7 @@ static void put_rndis_request(struct rndis_device *dev,
static void dump_rndis_message(struct hv_device *hv_dev,
struct rndis_message *rndis_msg)
{
- struct net_device *netdev;
- struct netvsc_device *net_device;
-
- net_device = hv_get_drvdata(hv_dev);
- netdev = net_device->ndev;
+ struct net_device *netdev = hv_get_drvdata(hv_dev);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
@@ -211,6 +207,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct hv_netvsc_packet *packet;
struct hv_page_buffer page_buf[2];
struct hv_page_buffer *pb = page_buf;
+ struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
/* Setup the packet to send it */
packet = &req->pkt;
@@ -236,7 +233,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
pb[0].len;
}
- ret = netvsc_send(dev->net_dev->dev, packet, NULL, &pb, NULL);
+ ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL);
return ret;
}
@@ -262,9 +259,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
struct rndis_request *request = NULL;
bool found = false;
unsigned long flags;
- struct net_device *ndev;
-
- ndev = dev->net_dev->ndev;
+ struct net_device *ndev = dev->ndev;
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -355,6 +350,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
struct ndis_pkt_8021q_info *vlan;
struct ndis_tcp_ip_checksum_info *csum_info;
u16 vlan_tci = 0;
+ struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
rndis_pkt = &msg->msg.pkt;
@@ -368,7 +364,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
* should be the data packet size plus the trailer padding size
*/
if (pkt->total_data_buflen < rndis_pkt->data_len) {
- netdev_err(dev->net_dev->ndev, "rndis message buffer "
+ netdev_err(dev->ndev, "rndis message buffer "
"overflow detected (got %u, min %u)"
"...dropping this message!\n",
pkt->total_data_buflen, rndis_pkt->data_len);
@@ -390,7 +386,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
}
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
- return netvsc_recv_callback(dev->net_dev->dev, pkt, data,
+ return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data,
csum_info, channel, vlan_tci);
}
@@ -399,10 +395,11 @@ int rndis_filter_receive(struct hv_device *dev,
void **data,
struct vmbus_channel *channel)
{
- struct netvsc_device *net_dev = hv_get_drvdata(dev);
+ struct net_device *ndev = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_dev = net_device_ctx->nvdev;
struct rndis_device *rndis_dev;
struct rndis_message *rndis_msg;
- struct net_device *ndev;
int ret = 0;
if (!net_dev) {
@@ -410,8 +407,6 @@ int rndis_filter_receive(struct hv_device *dev,
goto exit;
}
- ndev = net_dev->ndev;
-
/* Make sure the rndis device state is initialized */
if (!net_dev->extension) {
netdev_err(ndev, "got rndis message but no rndis device - "
@@ -430,7 +425,7 @@ int rndis_filter_receive(struct hv_device *dev,
rndis_msg = *data;
- if (netif_msg_rx_err(net_dev->nd_ctx))
+ if (netif_msg_rx_err(net_device_ctx))
dump_rndis_message(dev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
@@ -550,9 +545,10 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
{
- struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct net_device *ndev = hv_get_drvdata(hdev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct rndis_device *rdev = nvdev->extension;
- struct net_device *ndev = nvdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_config_parameter_info *cpi;
@@ -629,9 +625,10 @@ static int
rndis_filter_set_offload_params(struct hv_device *hdev,
struct ndis_offload_params *req_offloads)
{
- struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct net_device *ndev = hv_get_drvdata(hdev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct rndis_device *rdev = nvdev->extension;
- struct net_device *ndev = nvdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct ndis_offload_params *offload_params;
@@ -703,7 +700,7 @@ u8 netvsc_hash_key[HASH_KEYLEN] = {
static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
{
- struct net_device *ndev = rdev->net_dev->ndev;
+ struct net_device *ndev = rdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
@@ -799,9 +796,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
u32 status;
int ret;
unsigned long t;
- struct net_device *ndev;
-
- ndev = dev->net_dev->ndev;
+ struct net_device *ndev = dev->ndev;
request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -856,7 +851,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
u32 status;
int ret;
unsigned long t;
- struct netvsc_device *nvdev = dev->net_dev;
+ struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
request = get_rndis_request(dev, RNDIS_MSG_INIT,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -879,7 +875,6 @@ static int rndis_filter_init_device(struct rndis_device *dev)
goto cleanup;
}
-
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
@@ -910,8 +905,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
struct rndis_halt_request *halt;
- struct netvsc_device *nvdev = dev->net_dev;
- struct hv_device *hdev = nvdev->dev;
+ struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct hv_device *hdev = net_device_ctx->device_ctx;
ulong flags;
/* Attempt to do a rndis device halt */
@@ -979,13 +975,14 @@ static int rndis_filter_close_device(struct rndis_device *dev)
static void netvsc_sc_open(struct vmbus_channel *new_sc)
{
- struct netvsc_device *nvscdev;
+ struct net_device *ndev =
+ hv_get_drvdata(new_sc->primary_channel->device_obj);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvscdev = net_device_ctx->nvdev;
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
int ret;
unsigned long flags;
- nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
-
if (chn_index >= nvscdev->num_chn)
return;
@@ -1010,6 +1007,8 @@ int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
{
int ret;
+ struct net_device *net = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct netvsc_device_info *device_info = additional_info;
@@ -1040,16 +1039,15 @@ int rndis_filter_device_add(struct hv_device *dev,
return ret;
}
-
/* Initialize the rndis device */
- net_device = hv_get_drvdata(dev);
+ net_device = net_device_ctx->nvdev;
net_device->max_chn = 1;
net_device->num_chn = 1;
spin_lock_init(&net_device->sc_lock);
net_device->extension = rndis_device;
- rndis_device->net_dev = net_device;
+ rndis_device->ndev = net;
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device);
@@ -1063,8 +1061,8 @@ int rndis_filter_device_add(struct hv_device *dev,
ret = rndis_filter_query_device(rndis_device,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
&mtu, &size);
- if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
- net_device->ndev->mtu = mtu;
+ if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
+ net->mtu = mtu;
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
@@ -1198,7 +1196,9 @@ err_dev_remv:
void rndis_filter_device_remove(struct hv_device *dev)
{
- struct netvsc_device *net_dev = hv_get_drvdata(dev);
+ struct net_device *ndev = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_dev = net_device_ctx->nvdev;
struct rndis_device *rndis_dev = net_dev->extension;
unsigned long t;
@@ -1224,20 +1224,30 @@ void rndis_filter_device_remove(struct hv_device *dev)
int rndis_filter_open(struct hv_device *dev)
{
- struct netvsc_device *net_device = hv_get_drvdata(dev);
+ struct net_device *ndev = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
if (!net_device)
return -EINVAL;
+ if (atomic_inc_return(&net_device->open_cnt) != 1)
+ return 0;
+
return rndis_filter_open_device(net_device->extension);
}
int rndis_filter_close(struct hv_device *dev)
{
- struct netvsc_device *nvdev = hv_get_drvdata(dev);
+ struct net_device *ndev = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
if (!nvdev)
return -EINVAL;
+ if (atomic_dec_return(&nvdev->open_cnt) != 0)
+ return 0;
+
return rndis_filter_close_device(nvdev->extension);
}
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index d32920688..83c94fba2 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -915,7 +915,6 @@ static void adf7242_debug(u8 irq1)
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
(stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "",
(stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : "");
- }
#endif
}
@@ -1026,6 +1025,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
if (ret) {
dev_err(&lp->spi->dev,
"upload firmware failed with %d\n", ret);
+ release_firmware(fw);
return ret;
}
@@ -1033,6 +1033,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
if (ret) {
dev_err(&lp->spi->dev,
"verify firmware failed with %d\n", ret);
+ release_firmware(fw);
return ret;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index cb9e9fe6d..9f10da60e 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1340,7 +1340,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_off_to_aack = 80,
.t_off_to_tx_on = 80,
.t_off_to_sleep = 35,
- .t_sleep_to_off = 210,
+ .t_sleep_to_off = 1000,
.t_frame = 4096,
.t_p_ack = 545,
.rssi_base_val = -91,
@@ -1355,7 +1355,7 @@ static struct at86rf2xx_chip_data at86rf231_data = {
.t_off_to_aack = 110,
.t_off_to_tx_on = 110,
.t_off_to_sleep = 35,
- .t_sleep_to_off = 380,
+ .t_sleep_to_off = 1000,
.t_frame = 4096,
.t_p_ack = 545,
.rssi_base_val = -91,
@@ -1370,7 +1370,7 @@ static struct at86rf2xx_chip_data at86rf212_data = {
.t_off_to_aack = 200,
.t_off_to_tx_on = 200,
.t_off_to_sleep = 35,
- .t_sleep_to_off = 380,
+ .t_sleep_to_off = 1000,
.t_frame = 4096,
.t_p_ack = 545,
.rssi_base_val = -100,
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index b1cd865ad..52c9051f3 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -3,6 +3,8 @@
*
* Written 2013 by Werner Almesberger <werner@almesberger.net>
*
+ * Copyright (c) 2015 - 2016 Stefan Schmidt <stefan@datenfreihafen.org>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2
@@ -472,6 +474,76 @@ atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm)
return -EINVAL;
}
+#define ATUSB_MAX_ED_LEVELS 0xF
+static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = {
+ -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+ -7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static int
+atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
+{
+ struct atusb *atusb = hw->priv;
+ u8 val;
+
+ /* mapping 802.15.4 to driver spec */
+ switch (cca->mode) {
+ case NL802154_CCA_ENERGY:
+ val = 1;
+ break;
+ case NL802154_CCA_CARRIER:
+ val = 2;
+ break;
+ case NL802154_CCA_ENERGY_CARRIER:
+ switch (cca->opt) {
+ case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+ val = 3;
+ break;
+ case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return atusb_write_subreg(atusb, SR_CCA_MODE, val);
+}
+
+static int
+atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct atusb *atusb = hw->priv;
+ u32 i;
+
+ for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+ if (hw->phy->supported.cca_ed_levels[i] == mbm)
+ return atusb_write_subreg(atusb, SR_CCA_ED_THRES, i);
+ }
+
+ return -EINVAL;
+}
+
+static int
+atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries)
+{
+ struct atusb *atusb = hw->priv;
+ int ret;
+
+ ret = atusb_write_subreg(atusb, SR_MIN_BE, min_be);
+ if (ret)
+ return ret;
+
+ ret = atusb_write_subreg(atusb, SR_MAX_BE, max_be);
+ if (ret)
+ return ret;
+
+ return atusb_write_subreg(atusb, SR_MAX_CSMA_RETRIES, retries);
+}
+
static int
atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
{
@@ -508,6 +580,9 @@ static struct ieee802154_ops atusb_ops = {
.stop = atusb_stop,
.set_hw_addr_filt = atusb_set_hw_addr_filt,
.set_txpower = atusb_set_txpower,
+ .set_cca_mode = atusb_set_cca_mode,
+ .set_cca_ed_level = atusb_set_cca_ed_level,
+ .set_csma_params = atusb_set_csma_params,
.set_promiscuous_mode = atusb_set_promiscuous_mode,
};
@@ -636,9 +711,20 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
- IEEE802154_HW_PROMISCUOUS;
+ IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
+
+ hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
+ WPAN_PHY_FLAG_CCA_MODE;
+
+ hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+ BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+ hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+ BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+ hw->phy->supported.cca_ed_levels = atusb_ed_levels;
+ hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
- hw->phy->flags = WPAN_PHY_FLAG_TXPOWER;
+ hw->phy->cca.mode = NL802154_CCA_ENERGY;
hw->phy->current_page = 0;
hw->phy->current_channel = 11; /* reset default */
@@ -647,6 +733,7 @@ static int atusb_probe(struct usb_interface *interface,
hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+ hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7];
atusb_command(atusb, ATUSB_RF_RESET, 0);
atusb_get_and_show_chip(atusb);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 764a2bddf..f446db828 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -61,6 +61,7 @@
#define REG_TXBCON0 0x1A
#define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */
#define BIT_TXNTRIG BIT(0)
+#define BIT_TXNSECEN BIT(1)
#define BIT_TXNACKREQ BIT(2)
#define REG_TXG1CON 0x1C
@@ -85,10 +86,13 @@
#define REG_INTSTAT 0x31 /* Interrupt Status */
#define BIT_TXNIF BIT(0)
#define BIT_RXIF BIT(3)
+#define BIT_SECIF BIT(4)
+#define BIT_SECIGNORE BIT(7)
#define REG_INTCON 0x32 /* Interrupt Control */
#define BIT_TXNIE BIT(0)
#define BIT_RXIE BIT(3)
+#define BIT_SECIE BIT(4)
#define REG_GPIO 0x33 /* GPIO */
#define REG_TRISGPIO 0x34 /* GPIO direction */
@@ -548,6 +552,9 @@ static void write_tx_buf_complete(void *context)
u8 val = BIT_TXNTRIG;
int ret;
+ if (ieee802154_is_secen(fc))
+ val |= BIT_TXNSECEN;
+
if (ieee802154_is_ackreq(fc))
val |= BIT_TXNACKREQ;
@@ -616,7 +623,7 @@ static int mrf24j40_start(struct ieee802154_hw *hw)
/* Clear TXNIE and RXIE. Enable interrupts */
return regmap_update_bits(devrec->regmap_short, REG_INTCON,
- BIT_TXNIE | BIT_RXIE, 0);
+ BIT_TXNIE | BIT_RXIE | BIT_SECIE, 0);
}
static void mrf24j40_stop(struct ieee802154_hw *hw)
@@ -1025,6 +1032,11 @@ static void mrf24j40_intstat_complete(void *context)
enable_irq(devrec->spi->irq);
+ /* Ignore Rx security decryption */
+ if (intstat & BIT_SECIF)
+ regmap_write_async(devrec->regmap_short, REG_SECCON0,
+ BIT_SECIGNORE);
+
/* Check for TX complete */
if (intstat & BIT_TXNIF)
ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index cc56fac3c..66c0eeafc 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -196,6 +196,7 @@ static const struct net_device_ops ifb_netdev_ops = {
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
+ NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
@@ -224,6 +225,8 @@ static void ifb_setup(struct net_device *dev)
dev->tx_queue_len = TX_Q_LIMIT;
dev->features |= IFB_FEATURES;
+ dev->hw_features |= dev->features;
+ dev->hw_enc_features |= dev->features;
dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 57941d3f4..1c4d395fb 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -113,6 +113,7 @@ static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
const struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_port *port = ipvlan->port;
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
@@ -128,6 +129,8 @@ static int ipvlan_init(struct net_device *dev)
if (!ipvlan->pcpu_stats)
return -ENOMEM;
+ port->count += 1;
+
return 0;
}
@@ -481,27 +484,21 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_IPVLAN_SLAVE;
- port->count += 1;
err = register_netdevice(dev);
if (err < 0)
- goto ipvlan_destroy_port;
+ return err;
err = netdev_upper_dev_link(phy_dev, dev);
- if (err)
- goto ipvlan_destroy_port;
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
ipvlan_set_port_mode(port, mode);
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
-
-ipvlan_destroy_port:
- port->count -= 1;
- if (!port->count)
- ipvlan_port_destroy(phy_dev);
-
- return err;
}
static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index a2c227bfb..e070e1222 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -394,12 +394,5 @@ config MCS_FIR
To compile it as a module, choose M here: the module will be called
mcs7780.
-config SH_IRDA
- tristate "SuperH IrDA driver"
- depends on IRDA
- depends on (ARCH_SHMOBILE || COMPILE_TEST) && HAS_IOMEM
- help
- Say Y here if your want to enable SuperH IrDA devices.
-
endmenu
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index be8ab5b9a..4c344433d 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
obj-$(CONFIG_MCS_FIR) += mcs7780.o
obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
-obj-$(CONFIG_SH_IRDA) += sh_irda.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 64bb44d5d..c285eafd3 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1427,7 +1427,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
ali_ircc_change_speed(self, speed);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1533,7 +1533,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
/* Restore bank register */
switch_bank(iobase, BANK0);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
@@ -1946,7 +1946,7 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
ali_ircc_change_speed(self, speed);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1966,7 +1966,7 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
/* Turn on transmit finished interrupt. Will fire immediately! */
outb(UART_IER_THRI, iobase+UART_IER);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 303c4bd26..be5bb0b7f 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -531,7 +531,7 @@ static void bfin_sir_send_work(struct work_struct *work)
bfin_sir_dma_tx_chars(dev);
#endif
bfin_sir_enable_tx(port);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index a57bd1102..e3f108c61 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -429,7 +429,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
* do an extra memcpy and increment packet counters...
* Jean II */
irda_usb_change_speed_xbofs(self);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* Will netif_wake_queue() in callback */
goto drop;
}
@@ -526,7 +526,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += skb->len;
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
}
spin_unlock_irqrestore(&self->lock, flags);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index dc0dbd8dd..aaecc3baa 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1253,7 +1253,7 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
*/
static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
{
- struct net_device *dev = self->netdev;
+ struct net_device *dev;
__u8 mcr = MCR_SIR;
int iobase;
__u8 bank;
@@ -1263,6 +1263,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
IRDA_ASSERT(self != NULL, return 0;);
+ dev = self->netdev;
iobase = self->io.fir_base;
/* Update accounting for new speed */
@@ -1399,7 +1400,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
* to make sure packets gets through the
* proper xmit handler - Jean II */
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1424,7 +1425,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
/* Restore bank register */
outb(bank, iobase+BSR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
@@ -1470,7 +1471,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
* the speed change has been done.
* Jean II */
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1553,7 +1554,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
/* Restore bank register */
outb(bank, iobase+BSR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
deleted file mode 100644
index c96b46b2c..000000000
--- a/drivers/net/irda/sh_irda.c
+++ /dev/null
@@ -1,875 +0,0 @@
-/*
- * SuperH IrDA Driver
- *
- * Copyright (C) 2010 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * Based on sh_sir.c
- * Copyright (C) 2009 Renesas Solutions Corp.
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * CAUTION
- *
- * This driver is very simple.
- * So, it doesn't have below support now
- * - MIR/FIR support
- * - DMA transfer support
- * - FIFO mode support
- */
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <net/irda/wrapper.h>
-#include <net/irda/irda_device.h>
-
-#define DRIVER_NAME "sh_irda"
-
-#define __IRDARAM_LEN 0x1039
-
-#define IRTMR 0x1F00 /* Transfer mode */
-#define IRCFR 0x1F02 /* Configuration */
-#define IRCTR 0x1F04 /* IR control */
-#define IRTFLR 0x1F20 /* Transmit frame length */
-#define IRTCTR 0x1F22 /* Transmit control */
-#define IRRFLR 0x1F40 /* Receive frame length */
-#define IRRCTR 0x1F42 /* Receive control */
-#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
-#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
-#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
-#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
-#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
-#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
-#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
-#define CRCCTR 0x1F80 /* CRC engine control */
-#define CRCIR 0x1F86 /* CRC engine input data */
-#define CRCCR 0x1F8A /* CRC engine calculation */
-#define CRCOR 0x1F8E /* CRC engine output data */
-#define FIFOCP 0x1FC0 /* FIFO current pointer */
-#define FIFOFP 0x1FC2 /* FIFO follow pointer */
-#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
-#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
-#define FIFOSEL 0x1FC8 /* FIFO select */
-#define FIFORS 0x1FCA /* FIFO receive status */
-#define FIFORFL 0x1FCC /* FIFO receive frame length */
-#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
-#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
-#define BIFCTL 0x1FD2 /* BUS interface control */
-#define IRDARAM 0x0000 /* IrDA buffer RAM */
-#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
-
-/* IRTMR */
-#define TMD_MASK (0x3 << 14) /* Transfer Mode */
-#define TMD_SIR (0x0 << 14)
-#define TMD_MIR (0x3 << 14)
-#define TMD_FIR (0x2 << 14)
-
-#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
-#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
-#define SIM (1 << 0) /* SIR Interrupt Mask */
-#define xIM_MASK (FIFORIM | MIM | SIM)
-
-/* IRCFR */
-#define RTO_SHIFT 8 /* shift for Receive Timeout */
-#define RTO (0x3 << RTO_SHIFT)
-
-/* IRTCTR */
-#define ARMOD (1 << 15) /* Auto-Receive Mode */
-#define TE (1 << 0) /* Transmit Enable */
-
-/* IRRFLR */
-#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
-
-/* IRRCTR */
-#define RE (1 << 0) /* Receive Enable */
-
-/*
- * SIRISR, SIRIMR, SIRICR,
- * MFIRISR, MFIRIMR, MFIRICR
- */
-#define FRE (1 << 15) /* Frame Receive End */
-#define TROV (1 << 11) /* Transfer Area Overflow */
-#define xIR_9 (1 << 9)
-#define TOT xIR_9 /* for SIR Timeout */
-#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
-#define xIR_8 (1 << 8)
-#define FER xIR_8 /* for SIR Framing Error */
-#define CRCER xIR_8 /* for MIR/FIR CRC error */
-#define FTE (1 << 7) /* Frame Transmit End */
-#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
-
-/* SIRBCR */
-#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
-
-/* CRCCTR */
-#define CRC_RST (1 << 15) /* CRC Engine Reset */
-#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
-
-/* CRCIR */
-#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
-
-/************************************************************************
-
-
- enum / structure
-
-
-************************************************************************/
-enum sh_irda_mode {
- SH_IRDA_NONE = 0,
- SH_IRDA_SIR,
- SH_IRDA_MIR,
- SH_IRDA_FIR,
-};
-
-struct sh_irda_self;
-struct sh_irda_xir_func {
- int (*xir_fre) (struct sh_irda_self *self);
- int (*xir_trov) (struct sh_irda_self *self);
- int (*xir_9) (struct sh_irda_self *self);
- int (*xir_8) (struct sh_irda_self *self);
- int (*xir_fte) (struct sh_irda_self *self);
-};
-
-struct sh_irda_self {
- void __iomem *membase;
- unsigned int irq;
- struct platform_device *pdev;
-
- struct net_device *ndev;
-
- struct irlap_cb *irlap;
- struct qos_info qos;
-
- iobuff_t tx_buff;
- iobuff_t rx_buff;
-
- enum sh_irda_mode mode;
- spinlock_t lock;
-
- struct sh_irda_xir_func *xir_func;
-};
-
-/************************************************************************
-
-
- common function
-
-
-************************************************************************/
-static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&self->lock, flags);
- iowrite16(data, self->membase + offset);
- spin_unlock_irqrestore(&self->lock, flags);
-}
-
-static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
-{
- unsigned long flags;
- u16 ret;
-
- spin_lock_irqsave(&self->lock, flags);
- ret = ioread16(self->membase + offset);
- spin_unlock_irqrestore(&self->lock, flags);
-
- return ret;
-}
-
-static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
- u16 mask, u16 data)
-{
- unsigned long flags;
- u16 old, new;
-
- spin_lock_irqsave(&self->lock, flags);
- old = ioread16(self->membase + offset);
- new = (old & ~mask) | data;
- if (old != new)
- iowrite16(data, self->membase + offset);
- spin_unlock_irqrestore(&self->lock, flags);
-}
-
-/************************************************************************
-
-
- mode function
-
-
-************************************************************************/
-/*=====================================
- *
- * common
- *
- *=====================================*/
-static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
-{
- struct device *dev = &self->ndev->dev;
-
- sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
- dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
-}
-
-static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
-{
- struct device *dev = &self->ndev->dev;
-
- if (SH_IRDA_SIR != self->mode)
- interval = 0;
-
- if (interval < 0 || interval > 2) {
- dev_err(dev, "unsupported timeout interval\n");
- return -EINVAL;
- }
-
- sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
- return 0;
-}
-
-static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
-{
- struct device *dev = &self->ndev->dev;
- u16 val;
-
- if (baudrate < 0)
- return 0;
-
- if (SH_IRDA_SIR != self->mode) {
- dev_err(dev, "it is not SIR mode\n");
- return -EINVAL;
- }
-
- /*
- * Baud rate (bits/s) =
- * (48 MHz / 26) / (baud rate counter value + 1) x 16
- */
- val = (48000000 / 26 / 16 / baudrate) - 1;
- dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
-
- sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
-
- return 0;
-}
-
-static int sh_irda_get_rcv_length(struct sh_irda_self *self)
-{
- return RFL_MASK & sh_irda_read(self, IRRFLR);
-}
-
-/*=====================================
- *
- * NONE MODE
- *
- *=====================================*/
-static int sh_irda_xir_fre(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: frame recv\n");
- return 0;
-}
-
-static int sh_irda_xir_trov(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: buffer ram over\n");
- return 0;
-}
-
-static int sh_irda_xir_9(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: time over\n");
- return 0;
-}
-
-static int sh_irda_xir_8(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: framing error\n");
- return 0;
-}
-
-static int sh_irda_xir_fte(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: frame transmit end\n");
- return 0;
-}
-
-static struct sh_irda_xir_func sh_irda_xir_func = {
- .xir_fre = sh_irda_xir_fre,
- .xir_trov = sh_irda_xir_trov,
- .xir_9 = sh_irda_xir_9,
- .xir_8 = sh_irda_xir_8,
- .xir_fte = sh_irda_xir_fte,
-};
-
-/*=====================================
- *
- * MIR/FIR MODE
- *
- * MIR/FIR are not supported now
- *=====================================*/
-static struct sh_irda_xir_func sh_irda_mfir_func = {
- .xir_fre = sh_irda_xir_fre,
- .xir_trov = sh_irda_xir_trov,
- .xir_9 = sh_irda_xir_9,
- .xir_8 = sh_irda_xir_8,
- .xir_fte = sh_irda_xir_fte,
-};
-
-/*=====================================
- *
- * SIR MODE
- *
- *=====================================*/
-static int sh_irda_sir_fre(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- u16 data16;
- u8 *data = (u8 *)&data16;
- int len = sh_irda_get_rcv_length(self);
- int i, j;
-
- if (len > IRDARAM_LEN)
- len = IRDARAM_LEN;
-
- dev_dbg(dev, "frame recv length = %d\n", len);
-
- for (i = 0; i < len; i++) {
- j = i % 2;
- if (!j)
- data16 = sh_irda_read(self, IRDARAM + i);
-
- async_unwrap_char(self->ndev, &self->ndev->stats,
- &self->rx_buff, data[j]);
- }
- self->ndev->last_rx = jiffies;
-
- sh_irda_rcv_ctrl(self, 1);
-
- return 0;
-}
-
-static int sh_irda_sir_trov(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
-
- dev_err(dev, "buffer ram over\n");
- sh_irda_rcv_ctrl(self, 1);
- return 0;
-}
-
-static int sh_irda_sir_tot(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
-
- dev_err(dev, "time over\n");
- sh_irda_set_baudrate(self, 9600);
- sh_irda_rcv_ctrl(self, 1);
- return 0;
-}
-
-static int sh_irda_sir_fer(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
-
- dev_err(dev, "framing error\n");
- sh_irda_rcv_ctrl(self, 1);
- return 0;
-}
-
-static int sh_irda_sir_fte(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
-
- dev_dbg(dev, "frame transmit end\n");
- netif_wake_queue(self->ndev);
-
- return 0;
-}
-
-static struct sh_irda_xir_func sh_irda_sir_func = {
- .xir_fre = sh_irda_sir_fre,
- .xir_trov = sh_irda_sir_trov,
- .xir_9 = sh_irda_sir_tot,
- .xir_8 = sh_irda_sir_fer,
- .xir_fte = sh_irda_sir_fte,
-};
-
-static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
-{
- struct device *dev = &self->ndev->dev;
- struct sh_irda_xir_func *func;
- const char *name;
- u16 data;
-
- switch (mode) {
- case SH_IRDA_SIR:
- name = "SIR";
- data = TMD_SIR;
- func = &sh_irda_sir_func;
- break;
- case SH_IRDA_MIR:
- name = "MIR";
- data = TMD_MIR;
- func = &sh_irda_mfir_func;
- break;
- case SH_IRDA_FIR:
- name = "FIR";
- data = TMD_FIR;
- func = &sh_irda_mfir_func;
- break;
- default:
- name = "NONE";
- data = 0;
- func = &sh_irda_xir_func;
- break;
- }
-
- self->mode = mode;
- self->xir_func = func;
- sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
-
- dev_dbg(dev, "switch to %s mode", name);
-}
-
-/************************************************************************
-
-
- irq function
-
-
-************************************************************************/
-static void sh_irda_set_irq_mask(struct sh_irda_self *self)
-{
- u16 tmr_hole;
- u16 xir_reg;
-
- /* set all mask */
- sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
- sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
- sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
-
- /* clear irq */
- sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
- sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
-
- switch (self->mode) {
- case SH_IRDA_SIR:
- tmr_hole = SIM;
- xir_reg = SIRIMR;
- break;
- case SH_IRDA_MIR:
- case SH_IRDA_FIR:
- tmr_hole = MIM;
- xir_reg = MFIRIMR;
- break;
- default:
- tmr_hole = 0;
- xir_reg = 0;
- break;
- }
-
- /* open mask */
- if (xir_reg) {
- sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
- sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
- }
-}
-
-static irqreturn_t sh_irda_irq(int irq, void *dev_id)
-{
- struct sh_irda_self *self = dev_id;
- struct sh_irda_xir_func *func = self->xir_func;
- u16 isr = sh_irda_read(self, SIRISR);
-
- /* clear irq */
- sh_irda_write(self, SIRICR, isr);
-
- if (isr & FRE)
- func->xir_fre(self);
- if (isr & TROV)
- func->xir_trov(self);
- if (isr & xIR_9)
- func->xir_9(self);
- if (isr & xIR_8)
- func->xir_8(self);
- if (isr & FTE)
- func->xir_fte(self);
-
- return IRQ_HANDLED;
-}
-
-/************************************************************************
-
-
- CRC function
-
-
-************************************************************************/
-static void sh_irda_crc_reset(struct sh_irda_self *self)
-{
- sh_irda_write(self, CRCCTR, CRC_RST);
-}
-
-static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
-{
- sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
-}
-
-static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
-{
- return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
-}
-
-static u16 sh_irda_crc_out(struct sh_irda_self *self)
-{
- return sh_irda_read(self, CRCOR);
-}
-
-static int sh_irda_crc_init(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- int ret = -EIO;
- u16 val;
-
- sh_irda_crc_reset(self);
-
- sh_irda_crc_add(self, 0xCC);
- sh_irda_crc_add(self, 0xF5);
- sh_irda_crc_add(self, 0xF1);
- sh_irda_crc_add(self, 0xA7);
-
- val = sh_irda_crc_cnt(self);
- if (4 != val) {
- dev_err(dev, "CRC count error %x\n", val);
- goto crc_init_out;
- }
-
- val = sh_irda_crc_out(self);
- if (0x51DF != val) {
- dev_err(dev, "CRC result error%x\n", val);
- goto crc_init_out;
- }
-
- ret = 0;
-
-crc_init_out:
-
- sh_irda_crc_reset(self);
- return ret;
-}
-
-/************************************************************************
-
-
- iobuf function
-
-
-************************************************************************/
-static void sh_irda_remove_iobuf(struct sh_irda_self *self)
-{
- kfree(self->rx_buff.head);
-
- self->tx_buff.head = NULL;
- self->tx_buff.data = NULL;
- self->rx_buff.head = NULL;
- self->rx_buff.data = NULL;
-}
-
-static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
-{
- if (self->rx_buff.head ||
- self->tx_buff.head) {
- dev_err(&self->ndev->dev, "iobuff has already existed.");
- return -EINVAL;
- }
-
- /* rx_buff */
- self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
- if (!self->rx_buff.head)
- return -ENOMEM;
-
- self->rx_buff.truesize = rxsize;
- self->rx_buff.in_frame = FALSE;
- self->rx_buff.state = OUTSIDE_FRAME;
- self->rx_buff.data = self->rx_buff.head;
-
- /* tx_buff */
- self->tx_buff.head = self->membase + IRDARAM;
- self->tx_buff.truesize = IRDARAM_LEN;
-
- return 0;
-}
-
-/************************************************************************
-
-
- net_device_ops function
-
-
-************************************************************************/
-static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct sh_irda_self *self = netdev_priv(ndev);
- struct device *dev = &self->ndev->dev;
- int speed = irda_get_next_speed(skb);
- int ret;
-
- dev_dbg(dev, "hard xmit\n");
-
- netif_stop_queue(ndev);
- sh_irda_rcv_ctrl(self, 0);
-
- ret = sh_irda_set_baudrate(self, speed);
- if (ret < 0)
- goto sh_irda_hard_xmit_end;
-
- self->tx_buff.len = 0;
- if (skb->len) {
- unsigned long flags;
-
- spin_lock_irqsave(&self->lock, flags);
- self->tx_buff.len = async_wrap_skb(skb,
- self->tx_buff.head,
- self->tx_buff.truesize);
- spin_unlock_irqrestore(&self->lock, flags);
-
- if (self->tx_buff.len > self->tx_buff.truesize)
- self->tx_buff.len = self->tx_buff.truesize;
-
- sh_irda_write(self, IRTFLR, self->tx_buff.len);
- sh_irda_write(self, IRTCTR, ARMOD | TE);
- } else
- goto sh_irda_hard_xmit_end;
-
- dev_kfree_skb(skb);
-
- return 0;
-
-sh_irda_hard_xmit_end:
- sh_irda_set_baudrate(self, 9600);
- netif_wake_queue(self->ndev);
- sh_irda_rcv_ctrl(self, 1);
- dev_kfree_skb(skb);
-
- return ret;
-
-}
-
-static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
-{
- /*
- * FIXME
- *
- * This function is needed for irda framework.
- * But nothing to do now
- */
- return 0;
-}
-
-static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
-{
- struct sh_irda_self *self = netdev_priv(ndev);
-
- return &self->ndev->stats;
-}
-
-static int sh_irda_open(struct net_device *ndev)
-{
- struct sh_irda_self *self = netdev_priv(ndev);
- int err;
-
- pm_runtime_get_sync(&self->pdev->dev);
- err = sh_irda_crc_init(self);
- if (err)
- goto open_err;
-
- sh_irda_set_mode(self, SH_IRDA_SIR);
- sh_irda_set_timeout(self, 2);
- sh_irda_set_baudrate(self, 9600);
-
- self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
- if (!self->irlap) {
- err = -ENODEV;
- goto open_err;
- }
-
- netif_start_queue(ndev);
- sh_irda_rcv_ctrl(self, 1);
- sh_irda_set_irq_mask(self);
-
- dev_info(&ndev->dev, "opened\n");
-
- return 0;
-
-open_err:
- pm_runtime_put_sync(&self->pdev->dev);
-
- return err;
-}
-
-static int sh_irda_stop(struct net_device *ndev)
-{
- struct sh_irda_self *self = netdev_priv(ndev);
-
- /* Stop IrLAP */
- if (self->irlap) {
- irlap_close(self->irlap);
- self->irlap = NULL;
- }
-
- netif_stop_queue(ndev);
- pm_runtime_put_sync(&self->pdev->dev);
-
- dev_info(&ndev->dev, "stopped\n");
-
- return 0;
-}
-
-static const struct net_device_ops sh_irda_ndo = {
- .ndo_open = sh_irda_open,
- .ndo_stop = sh_irda_stop,
- .ndo_start_xmit = sh_irda_hard_xmit,
- .ndo_do_ioctl = sh_irda_ioctl,
- .ndo_get_stats = sh_irda_stats,
-};
-
-/************************************************************************
-
-
- platform_driver function
-
-
-************************************************************************/
-static int sh_irda_probe(struct platform_device *pdev)
-{
- struct net_device *ndev;
- struct sh_irda_self *self;
- struct resource *res;
- int irq;
- int err = -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!res || irq < 0) {
- dev_err(&pdev->dev, "Not enough platform resources.\n");
- goto exit;
- }
-
- ndev = alloc_irdadev(sizeof(*self));
- if (!ndev)
- goto exit;
-
- self = netdev_priv(ndev);
- self->membase = ioremap_nocache(res->start, resource_size(res));
- if (!self->membase) {
- err = -ENXIO;
- dev_err(&pdev->dev, "Unable to ioremap.\n");
- goto err_mem_1;
- }
-
- err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
- if (err)
- goto err_mem_2;
-
- self->pdev = pdev;
- pm_runtime_enable(&pdev->dev);
-
- irda_init_max_qos_capabilies(&self->qos);
-
- ndev->netdev_ops = &sh_irda_ndo;
- ndev->irq = irq;
-
- self->ndev = ndev;
- self->qos.baud_rate.bits &= IR_9600; /* FIXME */
- self->qos.min_turn_time.bits = 1; /* 10 ms or more */
- spin_lock_init(&self->lock);
-
- irda_qos_bits_to_value(&self->qos);
-
- err = register_netdev(ndev);
- if (err)
- goto err_mem_4;
-
- platform_set_drvdata(pdev, ndev);
- err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
- if (err) {
- dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
- goto err_mem_4;
- }
-
- dev_info(&pdev->dev, "SuperH IrDA probed\n");
-
- goto exit;
-
-err_mem_4:
- pm_runtime_disable(&pdev->dev);
- sh_irda_remove_iobuf(self);
-err_mem_2:
- iounmap(self->membase);
-err_mem_1:
- free_netdev(ndev);
-exit:
- return err;
-}
-
-static int sh_irda_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct sh_irda_self *self = netdev_priv(ndev);
-
- if (!self)
- return 0;
-
- unregister_netdev(ndev);
- pm_runtime_disable(&pdev->dev);
- sh_irda_remove_iobuf(self);
- iounmap(self->membase);
- free_netdev(ndev);
-
- return 0;
-}
-
-static int sh_irda_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-
-static const struct dev_pm_ops sh_irda_pm_ops = {
- .runtime_suspend = sh_irda_runtime_nop,
- .runtime_resume = sh_irda_runtime_nop,
-};
-
-static struct platform_driver sh_irda_driver = {
- .probe = sh_irda_probe,
- .remove = sh_irda_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &sh_irda_pm_ops,
- },
-};
-
-module_platform_driver(sh_irda_driver);
-
-MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_DESCRIPTION("SuperH IrDA driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index b455ffe88..dcf92ba80 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -862,7 +862,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
spin_lock_irqsave(&self->lock, flags);
smsc_ircc_sir_start(self);
smsc_ircc_change_speed(self, self->io.speed);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&self->lock, flags);
}
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 83cc48a01..42da094b6 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -718,7 +718,7 @@ static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
stir->netdev->stats.tx_packets++;
stir->netdev->stats.tx_bytes += skb->len;
- stir->netdev->trans_start = jiffies;
+ netif_trans_update(stir->netdev);
pr_debug("send %d (%d)\n", skb->len, wraplen);
if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 6960d4cd3..ca4442a9d 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -774,7 +774,7 @@ static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
via_ircc_change_speed(self, speed);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
@@ -821,7 +821,7 @@ static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
RXStart(iobase, OFF);
TXStart(iobase, ON);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -849,7 +849,7 @@ static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
if ((speed != self->io.speed) && (speed != -1)) {
if (!skb->len) {
via_ircc_change_speed(self, speed);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
@@ -869,7 +869,7 @@ static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
via_ircc_dma_xmit(self, iobase);
//F01 }
//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb(skb);
spin_unlock_irqrestore(&self->lock, flags);
return NETDEV_TX_OK;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 8f3c55d03..a70b6c460 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -605,12 +605,41 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
dev_put(dev);
}
+static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
+ unsigned char **iv,
+ struct scatterlist **sg)
+{
+ size_t size, iv_offset, sg_offset;
+ struct aead_request *req;
+ void *tmp;
+
+ size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
+ iv_offset = size;
+ size += GCM_AES_IV_LEN;
+
+ size = ALIGN(size, __alignof__(struct scatterlist));
+ sg_offset = size;
+ size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+
+ tmp = kmalloc(size, GFP_ATOMIC);
+ if (!tmp)
+ return NULL;
+
+ *iv = (unsigned char *)(tmp + iv_offset);
+ *sg = (struct scatterlist *)(tmp + sg_offset);
+ req = tmp;
+
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct net_device *dev)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct ethhdr *eth;
struct macsec_eth_header *hh;
size_t unprotected_len;
@@ -668,8 +697,6 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_fill_sectag(hh, secy, pn);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
- macsec_fill_iv(iv, secy->sci, pn);
-
skb_put(skb, secy->icv_len);
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
@@ -684,13 +711,15 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
- req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
if (!req) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
+ macsec_fill_iv(iv, secy->sci, pn);
+
sg_init_table(sg, MAX_SKB_FRAGS + 1);
skb_to_sgvec(skb, sg, 0, skb->len);
@@ -861,7 +890,6 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
out:
macsec_rxsa_put(rx_sa);
dev_put(dev);
- return;
}
static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
@@ -871,8 +899,8 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
struct macsec_secy *secy)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct aead_request *req;
struct macsec_eth_header *hdr;
u16 icv_len = secy->icv_len;
@@ -882,7 +910,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
if (!skb)
return ERR_PTR(-ENOMEM);
- req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
if (!req) {
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
@@ -914,7 +942,6 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
}
macsec_skb_cb(skb)->req = req;
- macsec_skb_cb(skb)->rx_sa = rx_sa;
skb->dev = dev;
aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
@@ -1141,6 +1168,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
}
}
+ macsec_skb_cb(skb)->rx_sa = rx_sa;
+
/* Disabled && !changed text => skip validation */
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames != MACSEC_VALIDATE_DISABLED)
@@ -1234,7 +1263,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (!tfm || IS_ERR(tfm))
return NULL;
@@ -1408,9 +1437,10 @@ static sci_t nla_get_sci(const struct nlattr *nla)
return (__force sci_t)nla_get_u64(nla);
}
-static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value)
+static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
+ int padattr)
{
- return nla_put_u64(skb, attrtype, (__force u64)value);
+ return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
}
static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
@@ -2143,16 +2173,36 @@ static int copy_rx_sc_stats(struct sk_buff *skb,
sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
}
- if (nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, sum.InOctetsValidated) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, sum.InOctetsDecrypted) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, sum.InPktsUnchecked) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, sum.InPktsDelayed) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, sum.InPktsLate) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
+ if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
+ sum.InOctetsValidated,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
+ sum.InOctetsDecrypted,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
+ sum.InPktsUnchecked,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
+ sum.InPktsDelayed,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
+ sum.InPktsOK,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
+ sum.InPktsInvalid,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
+ sum.InPktsLate,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
+ sum.InPktsNotValid,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+ sum.InPktsNotUsingSA,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+ sum.InPktsUnusedSA,
+ MACSEC_RXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
@@ -2181,10 +2231,18 @@ static int copy_tx_sc_stats(struct sk_buff *skb,
sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
}
- if (nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
- nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted) ||
- nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, sum.OutOctetsProtected) ||
- nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, sum.OutOctetsEncrypted))
+ if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
+ sum.OutPktsProtected,
+ MACSEC_TXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+ sum.OutPktsEncrypted,
+ MACSEC_TXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
+ sum.OutOctetsProtected,
+ MACSEC_TXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+ sum.OutOctetsEncrypted,
+ MACSEC_TXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
@@ -2217,14 +2275,30 @@ static int copy_secy_stats(struct sk_buff *skb,
sum.InPktsOverrun += tmp.InPktsOverrun;
}
- if (nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, sum.OutPktsUntagged) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, sum.InPktsUntagged) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, sum.OutPktsTooLong) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, sum.InPktsNoTag) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, sum.InPktsBadTag) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, sum.InPktsUnknownSCI) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, sum.InPktsNoSCI) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, sum.InPktsOverrun))
+ if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
+ sum.OutPktsUntagged,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
+ sum.InPktsUntagged,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
+ sum.OutPktsTooLong,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
+ sum.InPktsNoTag,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
+ sum.InPktsBadTag,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
+ sum.InPktsUnknownSCI,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
+ sum.InPktsNoSCI,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+ sum.InPktsOverrun,
+ MACSEC_SECY_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
@@ -2238,9 +2312,11 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
if (!secy_nest)
return 1;
- if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
- nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID) ||
+ if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
+ MACSEC_SECY_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
+ MACSEC_DEFAULT_CIPHER_ID,
+ MACSEC_SECY_ATTR_PAD) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2366,7 +2442,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
- nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci)) {
+ nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
+ MACSEC_RXSC_ATTR_PAD)) {
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
@@ -2844,7 +2921,7 @@ static void macsec_free_netdev(struct net_device *dev)
static void macsec_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &macsec_netdev_ops;
dev->destructor = macsec_free_netdev;
@@ -3171,9 +3248,9 @@ static struct net *macsec_get_link_net(const struct net_device *dev)
static size_t macsec_get_size(const struct net_device *dev)
{
return 0 +
- nla_total_size(8) + /* SCI */
+ nla_total_size_64bit(8) + /* SCI */
nla_total_size(1) + /* ICV_LEN */
- nla_total_size(8) + /* CIPHER_SUITE */
+ nla_total_size_64bit(8) + /* CIPHER_SUITE */
nla_total_size(4) + /* WINDOW */
nla_total_size(1) + /* ENCODING_SA */
nla_total_size(1) + /* ENCRYPT */
@@ -3192,10 +3269,11 @@ static int macsec_fill_info(struct sk_buff *skb,
struct macsec_secy *secy = &macsec_priv(dev)->secy;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
+ if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
+ IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
- nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID) ||
+ nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
+ MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
@@ -3313,6 +3391,7 @@ static void __exit macsec_exit(void)
genl_unregister_family(&macsec_fam);
rtnl_link_unregister(&macsec_link_ops);
unregister_netdevice_notifier(&macsec_notifier);
+ rcu_barrier();
}
module_init(macsec_init);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 2bcf1f321..cb01023ea 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -795,6 +795,7 @@ static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
const struct net_device *lowerdev = vlan->lowerdev;
+ struct macvlan_port *port = vlan->port;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
@@ -812,6 +813,8 @@ static int macvlan_init(struct net_device *dev)
if (!vlan->pcpu_stats)
return -ENOMEM;
+ port->count += 1;
+
return 0;
}
@@ -1312,10 +1315,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return err;
}
- port->count += 1;
err = register_netdevice(dev);
if (err < 0)
- goto destroy_port;
+ return err;
dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
@@ -1330,10 +1332,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
unregister_netdev:
unregister_netdevice(dev);
-destroy_port:
- port->count -= 1;
- if (!port->count)
- macvlan_port_destroy(lowerdev);
return err;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 9a35aa462..bd6720962 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -129,7 +129,18 @@ static DEFINE_MUTEX(minor_lock);
static DEFINE_IDR(minor_idr);
#define GOODCOPY_LEN 128
-static struct class *macvtap_class;
+static const void *macvtap_net_namespace(struct device *d)
+{
+ struct net_device *dev = to_net_dev(d->parent);
+ return dev_net(dev);
+}
+
+static struct class macvtap_class = {
+ .name = "macvtap",
+ .owner = THIS_MODULE,
+ .ns_type = &net_ns_type_operations,
+ .namespace = macvtap_net_namespace,
+};
static struct cdev macvtap_cdev;
static const struct proto_ops macvtap_socket_ops;
@@ -1278,10 +1289,12 @@ static int macvtap_device_event(struct notifier_block *unused,
struct device *classdev;
dev_t devt;
int err;
+ char tap_name[IFNAMSIZ];
if (dev->rtnl_link_ops != &macvtap_link_ops)
return NOTIFY_DONE;
+ snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
vlan = netdev_priv(dev);
switch (event) {
@@ -1295,16 +1308,24 @@ static int macvtap_device_event(struct notifier_block *unused,
return notifier_from_errno(err);
devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
- classdev = device_create(macvtap_class, &dev->dev, devt,
- dev, "tap%d", dev->ifindex);
+ classdev = device_create(&macvtap_class, &dev->dev, devt,
+ dev, tap_name);
if (IS_ERR(classdev)) {
macvtap_free_minor(vlan);
return notifier_from_errno(PTR_ERR(classdev));
}
+ err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
+ tap_name);
+ if (err)
+ return notifier_from_errno(err);
break;
case NETDEV_UNREGISTER:
+ /* vlan->minor == 0 if NETDEV_REGISTER above failed */
+ if (vlan->minor == 0)
+ break;
+ sysfs_remove_link(&dev->dev.kobj, tap_name);
devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
- device_destroy(macvtap_class, devt);
+ device_destroy(&macvtap_class, devt);
macvtap_free_minor(vlan);
break;
}
@@ -1330,11 +1351,9 @@ static int macvtap_init(void)
if (err)
goto out2;
- macvtap_class = class_create(THIS_MODULE, "macvtap");
- if (IS_ERR(macvtap_class)) {
- err = PTR_ERR(macvtap_class);
+ err = class_register(&macvtap_class);
+ if (err)
goto out3;
- }
err = register_netdevice_notifier(&macvtap_notifier_block);
if (err)
@@ -1349,7 +1368,7 @@ static int macvtap_init(void)
out5:
unregister_netdevice_notifier(&macvtap_notifier_block);
out4:
- class_unregister(macvtap_class);
+ class_unregister(&macvtap_class);
out3:
cdev_del(&macvtap_cdev);
out2:
@@ -1363,7 +1382,7 @@ static void macvtap_exit(void)
{
rtnl_link_unregister(&macvtap_link_ops);
unregister_netdevice_notifier(&macvtap_notifier_block);
- class_unregister(macvtap_class);
+ class_unregister(&macvtap_class);
cdev_del(&macvtap_cdev);
unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
idr_destroy(&minor_idr);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 2afa61b51..91177a4a3 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -57,6 +57,7 @@
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret;
- u16 val, delay;
+ int ret, val;
+ u16 delay;
if (!phydev->priv) {
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
}
if (phy_interface_is_rgmii(phydev)) {
- ret = phy_write(phydev, MII_DP83867_PHYCTRL,
- (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+ val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index fc07a8866..9ec7f7353 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio.h>
+#include <linux/idr.h>
#define MII_REGS_NUM 29
@@ -255,7 +256,8 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
- fmb->mii_bus->irq[phy_addr] = irq;
+ if (irq != PHY_POLL)
+ fmb->mii_bus->irq[phy_addr] = irq;
fp->addr = phy_addr;
fp->status = *status;
@@ -285,6 +287,8 @@ err_regs:
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
+static DEFINE_IDA(phy_fixed_ida);
+
static void fixed_phy_del(int phy_addr)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -296,14 +300,12 @@ static void fixed_phy_del(int phy_addr)
if (gpio_is_valid(fp->link_gpio))
gpio_free(fp->link_gpio);
kfree(fp);
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return;
}
}
}
-static int phy_fixed_addr;
-static DEFINE_SPINLOCK(phy_fixed_addr_lock);
-
struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
int link_gpio,
@@ -314,21 +316,22 @@ struct phy_device *fixed_phy_register(unsigned int irq,
int phy_addr;
int ret;
+ if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED)
+ return ERR_PTR(-EPROBE_DEFER);
+
/* Get the next available PHY address, up to PHY_MAX_ADDR */
- spin_lock(&phy_fixed_addr_lock);
- if (phy_fixed_addr == PHY_MAX_ADDR) {
- spin_unlock(&phy_fixed_addr_lock);
- return ERR_PTR(-ENOSPC);
- }
- phy_addr = phy_fixed_addr++;
- spin_unlock(&phy_fixed_addr_lock);
+ phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+ if (phy_addr < 0)
+ return ERR_PTR(phy_addr);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
- if (ret < 0)
+ if (ret < 0) {
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
+ }
phy = get_phy_device(fmb->mii_bus, phy_addr, false);
- if (!phy || IS_ERR(phy)) {
+ if (IS_ERR(phy)) {
fixed_phy_del(phy_addr);
return ERR_PTR(-EINVAL);
}
@@ -430,6 +433,7 @@ static void __exit fixed_mdio_bus_exit(void)
list_del(&fp->node);
kfree(fp);
}
+ ida_destroy(&phy_fixed_ida);
}
module_exit(fixed_mdio_bus_exit);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index f6078376e..b9fde1bcf 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -80,23 +80,15 @@ static int lxt970_ack_interrupt(struct phy_device *phydev)
static int lxt970_config_intr(struct phy_device *phydev)
{
- int err;
-
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
+ return phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
else
- err = phy_write(phydev, MII_LXT970_IER, 0);
-
- return err;
+ return phy_write(phydev, MII_LXT970_IER, 0);
}
static int lxt970_config_init(struct phy_device *phydev)
{
- int err;
-
- err = phy_write(phydev, MII_LXT970_CONFIG, 0);
-
- return err;
+ return phy_write(phydev, MII_LXT970_CONFIG, 0);
}
@@ -112,14 +104,10 @@ static int lxt971_ack_interrupt(struct phy_device *phydev)
static int lxt971_config_intr(struct phy_device *phydev)
{
- int err;
-
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
+ return phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
else
- err = phy_write(phydev, MII_LXT971_IER, 0);
-
- return err;
+ return phy_write(phydev, MII_LXT971_IER, 0);
}
/*
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 280e8795b..ec2c1eee6 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -285,6 +285,48 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ err = marvell_set_polarity(phydev, phydev->mdix);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
+ MII_M1111_PHY_LED_DIRECT);
+ if (err < 0)
+ return err;
+
+ err = genphy_config_aneg(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ int bmcr;
+
+ /* A write to speed/duplex bits (that is performed by
+ * genphy_config_aneg() call above) must be followed by
+ * a software reset. Otherwise, the write has no effect.
+ */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_OF_MDIO
/*
* Set and/or override some configuration registers based on the
@@ -407,15 +449,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
- phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
-
- err = genphy_config_aneg(phydev);
-
- return err;
+ return genphy_config_aneg(phydev);
}
static int m88e1318_config_aneg(struct phy_device *phydev)
@@ -636,6 +670,28 @@ static int m88e1111_config_init(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
+static int m88e1121_config_init(struct phy_device *phydev)
+{
+ int err, oldpage;
+
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+ err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
+ MII_88E1121_PHY_LED_DEF);
+ if (err < 0)
+ return err;
+
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+ /* Set marvell,reg-init configuration from device tree */
+ return marvell_config_init(phydev);
+}
+
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
@@ -668,7 +724,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
return err;
}
- return marvell_config_init(phydev);
+ return m88e1121_config_init(phydev);
}
static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -1161,7 +1217,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1111_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
@@ -1196,7 +1252,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1215,7 +1271,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 308ade0eb..5c81d6faf 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -45,13 +45,7 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
struct mdio_mux_parent_bus *pb = cb->parent;
int r;
- /* In theory multiple mdio_mux could be stacked, thus creating
- * more than a single level of nesting. But in practice,
- * SINGLE_DEPTH_NESTING will cover the vast majority of use
- * cases. We use it, instead of trying to handle the general
- * case.
- */
- mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
@@ -76,7 +70,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
int r;
- mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 0cba64f1e..09deef4be 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -333,7 +333,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
struct phy_device *phydev;
phydev = mdiobus_scan(bus, i);
- if (IS_ERR(phydev)) {
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) {
err = PTR_ERR(phydev);
goto error;
}
@@ -419,7 +419,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
int err;
phydev = get_phy_device(bus, addr, false);
- if (IS_ERR(phydev) || phydev == NULL)
+ if (IS_ERR(phydev))
return phydev;
/*
@@ -431,7 +431,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
err = phy_device_register(phydev);
if (err) {
phy_device_free(phydev);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
return phydev;
@@ -457,7 +457,7 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
BUG_ON(in_interrupt());
- mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
retval = bus->read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
@@ -509,7 +509,7 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
BUG_ON(in_interrupt());
- mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
err = bus->write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 4516c8a4f..5a8fefc25 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -726,7 +726,7 @@ static int kszphy_probe(struct phy_device *phydev)
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -781,7 +781,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ8041,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
@@ -800,7 +800,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ8041RNLI,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
.features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
@@ -819,7 +819,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ8051,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
@@ -857,7 +857,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8081_type,
@@ -875,7 +875,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
@@ -909,7 +909,7 @@ static struct phy_driver ksphy_driver[] = {
.write_mmd_indirect = ksz9021_wr_mmd_phyreg,
}, {
.phy_id = PHY_ID_KSZ9031,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -926,7 +926,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
.features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG,
@@ -940,7 +940,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ886X,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -962,17 +962,17 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
- { PHY_ID_KSZ9031, 0x00fffff0 },
+ { PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8001, 0x00ffffff },
- { PHY_ID_KS8737, 0x00fffff0 },
+ { PHY_ID_KS8737, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8031, 0x00ffffff },
- { PHY_ID_KSZ8041, 0x00fffff0 },
- { PHY_ID_KSZ8051, 0x00fffff0 },
- { PHY_ID_KSZ8061, 0x00fffff0 },
- { PHY_ID_KSZ8081, 0x00fffff0 },
- { PHY_ID_KSZ8873MLL, 0x00fffff0 },
- { PHY_ID_KSZ886X, 0x00fffff0 },
+ { PHY_ID_KSZ8041, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8051, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8061, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 445fc5aef..c5dc2c363 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -362,6 +362,60 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
}
EXPORT_SYMBOL(phy_ethtool_sset);
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ u8 autoneg = cmd->base.autoneg;
+ u8 duplex = cmd->base.duplex;
+ u32 speed = cmd->base.speed;
+ u32 advertising;
+
+ if (cmd->base.phy_address != phydev->mdio.addr)
+ return -EINVAL;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ /* We make sure that we don't pass unsupported values in to the PHY */
+ advertising &= phydev->supported;
+
+ /* Verify the settings we care about. */
+ if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (autoneg == AUTONEG_ENABLE && advertising == 0)
+ return -EINVAL;
+
+ if (autoneg == AUTONEG_DISABLE &&
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
+ (duplex != DUPLEX_HALF &&
+ duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ phydev->autoneg = autoneg;
+
+ phydev->speed = speed;
+
+ phydev->advertising = advertising;
+
+ if (autoneg == AUTONEG_ENABLE)
+ phydev->advertising |= ADVERTISED_Autoneg;
+ else
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+
+ phydev->duplex = duplex;
+
+ phydev->mdix = cmd->base.eth_tp_mdix_ctrl;
+
+ /* Restart the PHY */
+ phy_start_aneg(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_set);
+
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
{
cmd->supported = phydev->supported;
@@ -385,6 +439,33 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
}
EXPORT_SYMBOL(phy_ethtool_gset);
+int phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd)
+{
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ phydev->supported);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ phydev->advertising);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ phydev->lp_advertising);
+
+ cmd->base.speed = phydev->speed;
+ cmd->base.duplex = phydev->duplex;
+ if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+ cmd->base.port = PORT_BNC;
+ else
+ cmd->base.port = PORT_MII;
+
+ cmd->base.phy_address = phydev->mdio.addr;
+ cmd->base.autoneg = phydev->autoneg;
+ cmd->base.eth_tp_mdix_ctrl = phydev->mdix;
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_get);
+
/**
* phy_mii_ioctl - generic PHY MII ioctl interface
* @phydev: the phy_device struct
@@ -1268,3 +1349,27 @@ void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
phydev->drv->get_wol(phydev, wol);
}
EXPORT_SYMBOL(phy_ethtool_get_wol);
+
+int phy_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_get(phydev, cmd);
+}
+EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
+
+int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_set(phydev, cmd);
+}
+EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e551f3a89..e977ba931 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -529,7 +529,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
/* If the phy_id is mostly Fs, there is no device there */
if ((phy_id & 0x1fffffff) == 0x1fffffff)
- return NULL;
+ return ERR_PTR(-ENODEV);
return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
}
@@ -1123,8 +1123,9 @@ static int genphy_config_advert(struct phy_device *phydev)
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- int ctl = 0;
+ int ctl = phy_read(phydev, MII_BMCR);
+ ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2e21e9366..b62c4aaee 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -75,22 +75,13 @@ static int smsc_phy_reset(struct phy_device *phydev)
* in all capable mode before using it.
*/
if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
- int timeout = 50000;
-
- /* set "all capable" mode and reset the phy */
+ /* set "all capable" mode */
rc |= MII_LAN83C185_MODE_ALL;
phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
- phy_write(phydev, MII_BMCR, BMCR_RESET);
-
- /* wait end of reset (max 500 ms) */
- do {
- udelay(10);
- if (timeout-- == 0)
- return -1;
- rc = phy_read(phydev, MII_BMCR);
- } while (rc & BMCR_RESET);
}
- return 0;
+
+ /* reset the phy */
+ return genphy_soft_reset(phydev);
}
static int lan911x_config_init(struct phy_device *phydev)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index f572b31a2..a30ee427e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -46,6 +46,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/file.h>
#include <asm/unaligned.h>
#include <net/slhc_vj.h>
#include <linux/atomic.h>
@@ -183,6 +184,12 @@ struct channel {
#endif /* CONFIG_PPP_MULTILINK */
};
+struct ppp_config {
+ struct file *file;
+ s32 unit;
+ bool ifname_is_set;
+};
+
/*
* SMP locking issues:
* Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
@@ -269,8 +276,7 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit,
- struct file *file, int *retp);
+static int ppp_create_interface(struct net *net, struct file *file, int *unit);
static void init_ppp_file(struct ppp_file *pf, int kind);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
@@ -282,6 +288,7 @@ static int unit_get(struct idr *p, void *ptr);
static int unit_set(struct idr *p, void *ptr, int n);
static void unit_put(struct idr *p, int n);
static void *unit_find(struct idr *p, int n);
+static void ppp_setup(struct net_device *dev);
static const struct net_device_ops ppp_netdev_ops;
@@ -853,12 +860,12 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
/* Create a new ppp unit */
if (get_user(unit, p))
break;
- ppp = ppp_create_interface(net, unit, file, &err);
- if (!ppp)
+ err = ppp_create_interface(net, file, &unit);
+ if (err < 0)
break;
- file->private_data = &ppp->file;
+
err = -EFAULT;
- if (put_user(ppp->file.index, p))
+ if (put_user(unit, p))
break;
err = 0;
break;
@@ -960,6 +967,188 @@ static struct pernet_operations ppp_net_ops = {
.size = sizeof(struct ppp_net),
};
+static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
+{
+ struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+ int ret;
+
+ mutex_lock(&pn->all_ppp_mutex);
+
+ if (unit < 0) {
+ ret = unit_get(&pn->units_idr, ppp);
+ if (ret < 0)
+ goto err;
+ } else {
+ /* Caller asked for a specific unit number. Fail with -EEXIST
+ * if unavailable. For backward compatibility, return -EEXIST
+ * too if idr allocation fails; this makes pppd retry without
+ * requesting a specific unit number.
+ */
+ if (unit_find(&pn->units_idr, unit)) {
+ ret = -EEXIST;
+ goto err;
+ }
+ ret = unit_set(&pn->units_idr, ppp, unit);
+ if (ret < 0) {
+ /* Rewrite error for backward compatibility */
+ ret = -EEXIST;
+ goto err;
+ }
+ }
+ ppp->file.index = ret;
+
+ if (!ifname_is_set)
+ snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+
+ ret = register_netdevice(ppp->dev);
+ if (ret < 0)
+ goto err_unit;
+
+ atomic_inc(&ppp_unit_count);
+
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ return 0;
+
+err_unit:
+ unit_put(&pn->units_idr, ppp->file.index);
+err:
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ return ret;
+}
+
+static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
+ const struct ppp_config *conf)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ int indx;
+ int err;
+
+ ppp->dev = dev;
+ ppp->ppp_net = src_net;
+ ppp->mru = PPP_MRU;
+ ppp->owner = conf->file;
+
+ init_ppp_file(&ppp->file, INTERFACE);
+ ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+
+ for (indx = 0; indx < NUM_NP; ++indx)
+ ppp->npmode[indx] = NPMODE_PASS;
+ INIT_LIST_HEAD(&ppp->channels);
+ spin_lock_init(&ppp->rlock);
+ spin_lock_init(&ppp->wlock);
+#ifdef CONFIG_PPP_MULTILINK
+ ppp->minseq = -1;
+ skb_queue_head_init(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+ ppp->pass_filter = NULL;
+ ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
+
+ err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
+ if (err < 0)
+ return err;
+
+ conf->file->private_data = &ppp->file;
+
+ return 0;
+}
+
+static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
+ [IFLA_PPP_DEV_FD] = { .type = NLA_S32 },
+};
+
+static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (!data)
+ return -EINVAL;
+
+ if (!data[IFLA_PPP_DEV_FD])
+ return -EINVAL;
+ if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
+ return -EBADF;
+
+ return 0;
+}
+
+static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ppp_config conf = {
+ .unit = -1,
+ .ifname_is_set = true,
+ };
+ struct file *file;
+ int err;
+
+ file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
+ if (!file)
+ return -EBADF;
+
+ /* rtnl_lock is already held here, but ppp_create_interface() locks
+ * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
+ * possible deadlock due to lock order inversion, at the cost of
+ * pushing the problem back to userspace.
+ */
+ if (!mutex_trylock(&ppp_mutex)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (file->f_op != &ppp_device_fops || file->private_data) {
+ err = -EBADF;
+ goto out_unlock;
+ }
+
+ conf.file = file;
+ err = ppp_dev_configure(src_net, dev, &conf);
+
+out_unlock:
+ mutex_unlock(&ppp_mutex);
+out:
+ fput(file);
+
+ return err;
+}
+
+static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
+{
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t ppp_nl_get_size(const struct net_device *dev)
+{
+ return 0;
+}
+
+static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ return 0;
+}
+
+static struct net *ppp_nl_get_link_net(const struct net_device *dev)
+{
+ struct ppp *ppp = netdev_priv(dev);
+
+ return ppp->ppp_net;
+}
+
+static struct rtnl_link_ops ppp_link_ops __read_mostly = {
+ .kind = "ppp",
+ .maxtype = IFLA_PPP_MAX,
+ .policy = ppp_nl_policy,
+ .priv_size = sizeof(struct ppp),
+ .setup = ppp_setup,
+ .validate = ppp_nl_validate,
+ .newlink = ppp_nl_newlink,
+ .dellink = ppp_nl_dellink,
+ .get_size = ppp_nl_get_size,
+ .fill_info = ppp_nl_fill_info,
+ .get_link_net = ppp_nl_get_link_net,
+};
+
#define PPP_MAJOR 108
/* Called at boot time if ppp is compiled into the kernel,
@@ -988,11 +1177,19 @@ static int __init ppp_init(void)
goto out_chrdev;
}
+ err = rtnl_link_register(&ppp_link_ops);
+ if (err) {
+ pr_err("failed to register rtnetlink PPP handler\n");
+ goto out_class;
+ }
+
/* not a big deal if we fail here :-) */
device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
return 0;
+out_class:
+ class_destroy(ppp_class);
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
out_net:
@@ -2404,8 +2601,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -2732,102 +2927,42 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
-static struct ppp *ppp_create_interface(struct net *net, int unit,
- struct file *file, int *retp)
+static int ppp_create_interface(struct net *net, struct file *file, int *unit)
{
+ struct ppp_config conf = {
+ .file = file,
+ .unit = *unit,
+ .ifname_is_set = false,
+ };
+ struct net_device *dev;
struct ppp *ppp;
- struct ppp_net *pn;
- struct net_device *dev = NULL;
- int ret = -ENOMEM;
- int i;
+ int err;
dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
- if (!dev)
- goto out1;
-
- pn = ppp_pernet(net);
-
- ppp = netdev_priv(dev);
- ppp->dev = dev;
- ppp->mru = PPP_MRU;
- init_ppp_file(&ppp->file, INTERFACE);
- ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
- ppp->owner = file;
- for (i = 0; i < NUM_NP; ++i)
- ppp->npmode[i] = NPMODE_PASS;
- INIT_LIST_HEAD(&ppp->channels);
- spin_lock_init(&ppp->rlock);
- spin_lock_init(&ppp->wlock);
-#ifdef CONFIG_PPP_MULTILINK
- ppp->minseq = -1;
- skb_queue_head_init(&ppp->mrq);
-#endif /* CONFIG_PPP_MULTILINK */
-#ifdef CONFIG_PPP_FILTER
- ppp->pass_filter = NULL;
- ppp->active_filter = NULL;
-#endif /* CONFIG_PPP_FILTER */
-
- /*
- * drum roll: don't forget to set
- * the net device is belong to
- */
+ if (!dev) {
+ err = -ENOMEM;
+ goto err;
+ }
dev_net_set(dev, net);
+ dev->rtnl_link_ops = &ppp_link_ops;
rtnl_lock();
- mutex_lock(&pn->all_ppp_mutex);
- if (unit < 0) {
- unit = unit_get(&pn->units_idr, ppp);
- if (unit < 0) {
- ret = unit;
- goto out2;
- }
- } else {
- ret = -EEXIST;
- if (unit_find(&pn->units_idr, unit))
- goto out2; /* unit already exists */
- /*
- * if caller need a specified unit number
- * lets try to satisfy him, otherwise --
- * he should better ask us for new unit number
- *
- * NOTE: yes I know that returning EEXIST it's not
- * fair but at least pppd will ask us to allocate
- * new unit in this case so user is happy :)
- */
- unit = unit_set(&pn->units_idr, ppp, unit);
- if (unit < 0)
- goto out2;
- }
-
- /* Initialize the new ppp unit */
- ppp->file.index = unit;
- sprintf(dev->name, "ppp%d", unit);
-
- ret = register_netdevice(dev);
- if (ret != 0) {
- unit_put(&pn->units_idr, unit);
- netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
- dev->name, ret);
- goto out2;
- }
-
- ppp->ppp_net = net;
+ err = ppp_dev_configure(net, dev, &conf);
+ if (err < 0)
+ goto err_dev;
+ ppp = netdev_priv(dev);
+ *unit = ppp->file.index;
- atomic_inc(&ppp_unit_count);
- mutex_unlock(&pn->all_ppp_mutex);
rtnl_unlock();
- *retp = 0;
- return ppp;
+ return 0;
-out2:
- mutex_unlock(&pn->all_ppp_mutex);
+err_dev:
rtnl_unlock();
free_netdev(dev);
-out1:
- *retp = ret;
- return NULL;
+err:
+ return err;
}
/*
@@ -2999,6 +3134,9 @@ ppp_disconnect_channel(struct channel *pch)
*/
static void ppp_destroy_channel(struct channel *pch)
{
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
+
atomic_dec(&channel_count);
if (!pch->file.dead) {
@@ -3016,6 +3154,7 @@ static void __exit ppp_cleanup(void)
/* should never happen */
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
pr_err("PPP: removing module but units remain!\n");
+ rtnl_link_unregister(&ppp_link_ops);
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
@@ -3074,4 +3213,5 @@ EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
+MODULE_ALIAS_RTNL_LINK("ppp");
MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 9cfe6aeac..a31f4610b 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -179,11 +179,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
int add_num = 1;
- local_irq_save(flags);
- if (!spin_trylock(&rnet->tx_lock)) {
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&rnet->tx_lock, flags);
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index a17d86a57..9ed6d1c1e 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -407,7 +407,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
#ifdef SL_CHECK_TRANSMIT
- sl->dev->trans_start = jiffies;
+ netif_trans_update(sl->dev);
#endif
sl->xleft = count - actual;
sl->xhead = sl->xbuff + actual;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2ace12653..fdee77207 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1203,8 +1203,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_dev_open;
}
+ netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dda490542..e16487cc6 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -131,6 +131,17 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
+struct tun_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 rx_frame_errors;
+};
+
/* A tun_file connects an open character device to a tuntap netdevice. It
* also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
@@ -205,6 +216,7 @@ struct tun_struct {
struct list_head disabled;
void *security;
u32 flow_count;
+ struct tun_pcpu_stats __percpu *pcpu_stats;
};
#ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -624,8 +636,9 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
- err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
- lockdep_rtnl_is_held());
+ lock_sock(tfile->socket.sk);
+ err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ release_sock(tfile->socket.sk);
if (!err)
goto out;
}
@@ -823,7 +836,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (txq >= numqueues)
goto drop;
- if (numqueues == 1) {
+#ifdef CONFIG_RPS
+ if (numqueues == 1 && static_key_false(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
@@ -838,6 +852,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
tun_flow_save_rps_rxhash(e, rxhash);
}
}
+#endif
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
@@ -864,7 +879,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
if (skb->sk && sk_fullsock(skb->sk)) {
- sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
+ sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
+ &skb_shinfo(skb)->tx_flags);
sw_tx_timestamp(skb);
}
@@ -887,7 +903,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
+ this_cpu_inc(tun->pcpu_stats->tx_dropped);
skb_tx_error(skb);
kfree_skb(skb);
rcu_read_unlock();
@@ -950,6 +966,43 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
tun->align = new_hr;
}
+static struct rtnl_link_stats64 *
+tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_pcpu_stats *p;
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 rxpackets, rxbytes, txpackets, txbytes;
+ unsigned int start;
+
+ p = per_cpu_ptr(tun->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ rxpackets = p->rx_packets;
+ rxbytes = p->rx_bytes;
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ stats->rx_packets += rxpackets;
+ stats->rx_bytes += rxbytes;
+ stats->tx_packets += txpackets;
+ stats->tx_bytes += txbytes;
+
+ /* u32 counters */
+ rx_dropped += p->rx_dropped;
+ rx_frame_errors += p->rx_frame_errors;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_dropped = rx_dropped;
+ stats->rx_frame_errors = rx_frame_errors;
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -962,6 +1015,7 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_poll_controller = tun_poll_controller,
#endif
.ndo_set_rx_headroom = tun_set_headroom,
+ .ndo_get_stats64 = tun_net_get_stats64,
};
static const struct net_device_ops tap_netdev_ops = {
@@ -980,6 +1034,7 @@ static const struct net_device_ops tap_netdev_ops = {
#endif
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
+ .ndo_get_stats64 = tun_net_get_stats64,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1104,6 +1159,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
size_t total_len = iov_iter_count(from);
size_t len = total_len, align = tun->align, linear;
struct virtio_net_hdr gso = { 0 };
+ struct tun_pcpu_stats *stats;
int good_linear;
int copylen;
bool zerocopy = false;
@@ -1178,7 +1234,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
- tun->dev->stats.rx_dropped++;
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
return PTR_ERR(skb);
}
@@ -1193,7 +1249,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (err) {
- tun->dev->stats.rx_dropped++;
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
return -EFAULT;
}
@@ -1201,7 +1257,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
tun16_to_cpu(tun, gso.csum_offset))) {
- tun->dev->stats.rx_frame_errors++;
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
}
@@ -1218,7 +1274,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
pi.proto = htons(ETH_P_IPV6);
break;
default:
- tun->dev->stats.rx_dropped++;
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
return -EINVAL;
}
@@ -1246,7 +1302,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
default:
- tun->dev->stats.rx_frame_errors++;
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
}
@@ -1256,7 +1312,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
if (skb_shinfo(skb)->gso_size == 0) {
- tun->dev->stats.rx_frame_errors++;
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
}
@@ -1279,8 +1335,12 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rxhash = skb_get_hash(skb);
netif_rx_ni(skb);
- tun->dev->stats.rx_packets++;
- tun->dev->stats.rx_bytes += len;
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(stats);
tun_flow_update(tun, rxhash, tfile);
return total_len;
@@ -1309,6 +1369,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
struct iov_iter *iter)
{
struct tun_pi pi = { 0, skb->protocol };
+ struct tun_pcpu_stats *stats;
ssize_t total;
int vlan_offset = 0;
int vlan_hlen = 0;
@@ -1409,8 +1470,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
done:
- tun->dev->stats.tx_packets++;
- tun->dev->stats.tx_bytes += skb->len + vlan_hlen;
+ /* caller is in process context, */
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len + vlan_hlen;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(tun->pcpu_stats);
return total;
}
@@ -1465,6 +1531,7 @@ static void tun_free_netdev(struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
BUG_ON(!(list_empty(&tun->disabled)));
+ free_percpu(tun->pcpu_stats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
free_netdev(dev);
@@ -1713,11 +1780,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
+ tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
+ if (!tun->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
spin_lock_init(&tun->lock);
err = security_tun_dev_alloc_security(&tun->security);
if (err < 0)
- goto err_free_dev;
+ goto err_free_stat;
tun_net_init(dev);
tun_flow_init(tun);
@@ -1725,7 +1798,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
- dev->features = dev->hw_features;
+ dev->features = dev->hw_features | NETIF_F_LLTX;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
@@ -1761,6 +1834,8 @@ err_detach:
err_free_flow:
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
+err_free_stat:
+ free_percpu(tun->pcpu_stats);
err_free_dev:
free_netdev(dev);
return err;
@@ -1823,7 +1898,9 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
+ lock_sock(tfile->socket.sk);
+ sk_detach_filter(tfile->socket.sk);
+ release_sock(tfile->socket.sk);
}
tun->filter_attached = false;
@@ -1836,8 +1913,9 @@ static int tun_attach_filter(struct tun_struct *tun)
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
- lockdep_rtnl_is_held());
+ lock_sock(tfile->socket.sk);
+ ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ release_sock(tfile->socket.sk);
if (ret) {
tun_detach_filter(tun, i);
return ret;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 4e2b26a88..d9ca05d3a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -376,7 +376,7 @@ static int catc_tx_run(struct catc *catc)
catc->tx_idx = !catc->tx_idx;
catc->tx_ptr = 0;
- catc->netdev->trans_start = jiffies;
+ netif_trans_update(catc->netdev);
return status;
}
@@ -389,7 +389,7 @@ static void catc_tx_done(struct urb *urb)
if (status == -ECONNRESET) {
dev_dbg(&urb->dev->dev, "Tx Reset.\n");
urb->status = 0;
- catc->netdev->trans_start = jiffies;
+ netif_trans_update(catc->netdev);
catc->netdev->stats.tx_errors++;
clear_bit(TX_RUNNING, &catc->flags);
netif_wake_queue(catc->netdev);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d4425c565..877c9516e 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -740,12 +740,14 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
+ int maxmtu = cdc_ncm_max_dgram_size(dev) - cdc_ncm_eth_hlen(dev);
if (new_mtu <= 0 || new_mtu > maxmtu)
return -EINVAL;
+
net->mtu = new_mtu;
+ cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev));
+
return 0;
}
EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 5e151e6a3..8a40202c0 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -155,12 +155,11 @@ static int control_write(struct usbnet *dev, unsigned char request,
index, size);
if (data) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- memcpy(buf, data, size);
}
err = usb_control_msg(dev->udev,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 111d907e0..4b4458616 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2029,7 +2029,7 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
tty = tty_port_tty_get(&serial->port);
- if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty && tty_throttled(tty)) {
tty_kref_put(tty);
return -1;
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index b0f65bcfa..52c72ed22 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -935,7 +935,7 @@ static void kaweth_tx_timeout(struct net_device *net)
dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
kaweth->stats.tx_errors++;
- net->trans_start = jiffies;
+ netif_trans_update(net);
usb_unlink_urb(kaweth->tx_urb);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f64778ad9..6a9d474b0 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3045,7 +3045,7 @@ gso_skb:
ret = usb_submit_urb(urb, GFP_ATOMIC);
switch (ret) {
case 0:
- dev->net->trans_start = jiffies;
+ netif_trans_update(dev->net);
lan78xx_queue_skb(&dev->txq, skb, tx_start);
if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
netif_stop_queue(dev->net);
@@ -3729,7 +3729,7 @@ int lan78xx_resume(struct usb_interface *intf)
usb_free_urb(res);
usb_autopm_put_interface_async(dev->intf);
} else {
- dev->net->trans_start = jiffies;
+ netif_trans_update(dev->net);
lan78xx_queue_skb(&dev->txq, skb, tx_start);
}
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 82129eef7..9bbe0161a 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
goto goon;
}
- if (!count || count < 4)
+ if (count < 4)
goto goon;
rx_status = buf[count - 2];
@@ -615,7 +615,7 @@ static void write_bulk_callback(struct urb *urb)
break;
}
- net->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(net); /* prevent tx timeout */
netif_wake_queue(net);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d1f78c2c9..e9654a685 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,12 +26,13 @@
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
#include <linux/suspend.h>
+#include <linux/acpi.h>
/* Information for net-next */
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "3"
+#define NET_VERSION "5"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -116,6 +117,7 @@
#define USB_TX_DMA 0xd434
#define USB_TOLERANCE 0xd490
#define USB_LPM_CTRL 0xd41a
+#define USB_BMU_RESET 0xd4b0
#define USB_UPS_CTRL 0xd800
#define USB_MISC_0 0xd81a
#define USB_POWER_CUT 0xd80a
@@ -338,6 +340,10 @@
#define TEST_MODE_DISABLE 0x00000001
#define TX_SIZE_ADJUST1 0x00000100
+/* USB_BMU_RESET */
+#define BMU_RESET_EP_IN 0x01
+#define BMU_RESET_EP_OUT 0x02
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
@@ -455,6 +461,11 @@
/* SRAM_IMPEDANCE */
#define RX_DRIVING_MASK 0x6000
+/* MAC PASSTHRU */
+#define AD_MASK 0xfee0
+#define EFUSE 0xcfdb
+#define PASS_THRU_MASK 0x1
+
enum rtl_register_content {
_1000bps = 0x10,
_100bps = 0x08,
@@ -619,6 +630,7 @@ struct r8152 {
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
+ void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
int intr_interval;
@@ -1030,6 +1042,65 @@ out1:
return ret;
}
+/* Devices containing RTL8153-AD can support a persistent
+ * host system provided MAC address.
+ * Examples of this are Dell TB15 and Dell WD15 docks
+ */
+static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ int ret = -EINVAL;
+ u32 ocp_data;
+ unsigned char buf[6];
+
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) != 0x1000)
+ return -ENODEV;
+
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1)
+ return -ENODEV;
+
+ /* returns _AUXMAC_#AABBCCDDEEFF# */
+ status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ obj = (union acpi_object *)buffer.pointer;
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+ if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid buffer when reading pass-thru MAC addr: "
+ "(%d, %d)\n",
+ obj->type, obj->string.length);
+ goto amacout;
+ }
+ if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
+ strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid header when reading pass-thru MAC addr\n");
+ goto amacout;
+ }
+ ret = hex2bin(buf, obj->string.pointer + 9, 6);
+ if (!(ret == 0 && is_valid_ether_addr(buf))) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid MAC when reading pass-thru MAC addr: "
+ "%d, %pM\n", ret, buf);
+ ret = -EINVAL;
+ goto amacout;
+ }
+ memcpy(sa->sa_data, buf, 6);
+ ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
+ netif_info(tp, probe, tp->netdev,
+ "Using pass-thru MAC addr %pM\n", sa->sa_data);
+
+amacout:
+ kfree(obj);
+ return ret;
+}
+
static int set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
@@ -1038,8 +1109,15 @@ static int set_ethernet_addr(struct r8152 *tp)
if (tp->version == RTL_VER_01)
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
- else
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ else {
+ /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
+ * or system doesn't provide valid _SB.AMAC this will be
+ * be expected to non-zero
+ */
+ ret = vendor_mac_passthru_addr_read(tp, &sa);
+ if (ret < 0)
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ }
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
@@ -2169,7 +2247,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp)
{
u32 mtu = tp->netdev->mtu;
- u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+ u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
}
@@ -2290,10 +2368,6 @@ static u32 __rtl_get_wol(struct r8152 *tp)
u32 ocp_data;
u32 wolopts = 0;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- if (!(ocp_data & LAN_WAKE_EN))
- return 0;
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
if (ocp_data & LINK_ON_WAKE_EN)
wolopts |= WAKE_PHY;
@@ -2326,15 +2400,13 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
if (wolopts & WAKE_UCAST)
ocp_data |= UWF_EN;
if (wolopts & WAKE_BCAST)
ocp_data |= BWF_EN;
if (wolopts & WAKE_MCAST)
ocp_data |= MWF_EN;
- if (wolopts & WAKE_ANY)
- ocp_data |= LAN_WAKE_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
@@ -2403,9 +2475,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
if (enable) {
u32 ocp_data;
- r8153_u1u2en(tp, false);
- r8153_u2p3en(tp, false);
-
__rtl_set_wol(tp, WAKE_ANY);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2416,7 +2485,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
} else {
+ u32 ocp_data;
+
__rtl_set_wol(tp, tp->saved_wolopts);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ }
+}
+
+static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+{
+ rtl_runtime_suspend_enable(tp, enable);
+
+ if (enable) {
+ r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ } else {
r8153_u2p3en(tp, true);
r8153_u1u2en(tp, true);
}
@@ -2456,6 +2546,17 @@ static void r8153_teredo_off(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
}
+static void rtl_reset_bmu(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
+ ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+ ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+}
+
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
@@ -2681,6 +2782,7 @@ static void r8153_first_init(struct r8152 *tp)
r8153_hw_phy_cfg(tp);
rtl8152_nic_reset(tp);
+ rtl_reset_bmu(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -2742,6 +2844,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2803,6 +2906,7 @@ static void rtl8153_disable(struct r8152 *tp)
{
r8153_aldps_en(tp, false);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
r8153_aldps_en(tp, true);
usb_enable_lpm(tp->udev);
}
@@ -3366,7 +3470,7 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM;
- if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER)
+ if (tp->version == RTL_VER_04 && tp->udev->speed < USB_SPEED_SUPER)
ocp_data |= LPM_TIMER_500MS;
else
ocp_data |= LPM_TIMER_500US;
@@ -3382,15 +3486,11 @@ static void r8153_init(struct r8152 *tp)
r8153_power_cut_en(tp, false);
r8153_u1u2en(tp, true);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
- PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
- U1U2_SPDWN_EN | L1_SPDWN_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
- PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
- TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
- EEE_SPDWN_EN);
+ /* MAC clock speed down */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp);
r8153_aldps_en(tp, true);
@@ -3497,7 +3597,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
napi_disable(&tp->napi);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_stop_rx(tp);
- rtl_runtime_suspend_enable(tp, true);
+ tp->rtl_ops.autosuspend_en(tp, true);
} else {
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
@@ -3523,7 +3623,7 @@ static int rtl8152_resume(struct usb_interface *intf)
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
@@ -3542,7 +3642,7 @@ static int rtl8152_resume(struct usb_interface *intf)
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
if (tp->netdev->flags & IFF_UP)
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
@@ -4122,6 +4222,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
+ ops->autosuspend_en = rtl_runtime_suspend_enable;
break;
case RTL_VER_03:
@@ -4137,6 +4238,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
+ ops->autosuspend_en = rtl8153_runtime_enable;
break;
default:
@@ -4211,6 +4313,7 @@ static int rtl8152_probe(struct usb_interface *intf,
switch (udev->speed) {
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
tp->coalesce = COALESCE_SUPER;
break;
case USB_SPEED_HIGH:
@@ -4322,3 +4425,4 @@ module_usb_driver(rtl8152_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index d37b7dce2..7c72bfac8 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -451,7 +451,7 @@ static void write_bulk_callback(struct urb *urb)
if (status)
dev_info(&urb->dev->dev, "%s: Tx status %d\n",
dev->netdev->name, status);
- dev->netdev->trans_start = jiffies;
+ netif_trans_update(dev->netdev);
netif_wake_queue(dev->netdev);
}
@@ -694,7 +694,7 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
} else {
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += skb->len;
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
}
return NETDEV_TX_OK;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index c369db99c..9af979993 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -99,9 +99,11 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
+ return ret;
+ }
le32_to_cpus(&buf);
*data = buf;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2edc2bc6d..dc989a8b5 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -61,6 +61,8 @@
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+#define CARRIER_CHECK_DELAY (2 * HZ)
+
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
@@ -69,6 +71,9 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
+ bool link_ok;
+ struct delayed_work carrier_check;
+ struct usbnet *dev;
};
static bool turbo_mode = true;
@@ -92,9 +97,11 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
+ return ret;
+ }
le32_to_cpus(&buf);
*data = buf;
@@ -622,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
intdata);
}
+static void set_carrier(struct usbnet *dev, bool link)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ if (pdata->link_ok == link)
+ return;
+
+ pdata->link_ok = link;
+
+ if (link)
+ usbnet_link_change(dev, 1, 0);
+ else
+ usbnet_link_change(dev, 0, 0);
+}
+
+static void check_carrier(struct work_struct *work)
+{
+ struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
+ carrier_check.work);
+ struct usbnet *dev = pdata->dev;
+ int ret;
+
+ if (pdata->suspend_flags != 0)
+ return;
+
+ ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MII_BMSR\n");
+ return;
+ }
+ if (ret & BMSR_LSTATUS)
+ set_carrier(dev, 1);
+ else
+ set_carrier(dev, 0);
+
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+}
+
/* Enable or disable Tx & Rx checksum offload engines */
static int smsc95xx_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -1163,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ pdata->dev = dev;
+ INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+
return 0;
}
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
if (pdata) {
+ cancel_delayed_work(&pdata->carrier_check);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
@@ -1693,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 10798128c..6086a0163 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -356,6 +356,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
break;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
/*
* Not take default 5ms qlen for super speed HC to
* save memory, and iperf tests show 2.5ms qlen can
@@ -394,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
dev->rx_urb_size = dev->hard_mtu;
- if (dev->rx_urb_size > old_rx_urb_size)
+ if (dev->rx_urb_size > old_rx_urb_size) {
+ usbnet_pause_rx(dev);
usbnet_unlink_rx_urbs(dev);
+ usbnet_resume_rx(dev);
+ }
}
/* max qlen depend on hard_mtu and rx_urb_size */
@@ -1415,7 +1419,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
"tx: submit urb err %d\n", retval);
break;
case 0:
- net->trans_start = jiffies;
+ netif_trans_update(net);
__usbnet_queue_skb(&dev->txq, skb, tx_start);
if (dev->txq.qlen >= TX_QLEN (dev))
netif_stop_queue (net);
@@ -1507,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
- !timer_pending (&dev->delay) &&
- !test_bit (EVENT_RX_HALT, &dev->flags)) {
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
if (temp < RX_QLEN(dev)) {
@@ -1844,7 +1849,7 @@ int usbnet_resume (struct usb_interface *intf)
usb_free_urb(res);
usb_autopm_put_interface_async(dev->intf);
} else {
- dev->net->trans_start = jiffies;
+ netif_trans_update(dev->net);
__skb_queue_tail(&dev->txq, skb);
}
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 4f30a6ae5..f37a6e61d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,10 +312,9 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_set_rx_headroom = veth_set_rx_headroom,
};
-#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
- NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
- NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | \
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO | \
+#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 49d84e540..e0638e556 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- /* Last of all, set up some receive buffers. */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
-
- /* If we didn't even get one input buffer, we're useless. */
- if (vi->rq[i].vq->num_free ==
- virtqueue_get_vring_size(vi->rq[i].vq)) {
- free_unused_bufs(vi);
- err = -ENOMEM;
- goto free_recv_bufs;
- }
- }
-
vi->nb.notifier_call = &virtnet_cpu_callback;
err = register_hotcpu_notifier(&vi->nb);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
- goto free_recv_bufs;
+ goto free_unregister_netdev;
}
/* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
-free_recv_bufs:
+free_unregister_netdev:
vi->vdev->config->reset(vdev);
- free_receive_bufs(vi);
unregister_netdev(dev);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index db8022ae4..08885bc8d 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
segCnt = rcdlro->segCnt;
- BUG_ON(segCnt <= 1);
+ WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c4825392d..3d2b64e63 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040800
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 8a8f1e58b..8bd8c7e1e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -42,12 +42,9 @@
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
-#define vrf_master_get_rcu(dev) \
- ((struct net_device *)rcu_dereference(dev->rx_handler_data))
-
struct net_vrf {
- struct rtable *rth;
- struct rt6_info *rt6;
+ struct rtable __rcu *rth;
+ struct rt6_info __rcu *rt6;
u32 tb_id;
};
@@ -60,90 +57,12 @@ struct pcpu_dstats {
struct u64_stats_sync syncp;
};
-/* neighbor handling is done with actual device; do not want
- * to flip skb->dev for those ndisc packets. This really fails
- * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
- * a start.
- */
-#if IS_ENABLED(CONFIG_IPV6)
-static bool check_ipv6_frame(const struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct ipv6hdr _ipv6h;
- bool rc = true;
-
- ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
- if (!ipv6h)
- goto out;
-
- if (ipv6h->nexthdr == NEXTHDR_ICMP) {
- const struct icmp6hdr *icmph;
- struct icmp6hdr _icmph;
-
- icmph = skb_header_pointer(skb, sizeof(_ipv6h),
- sizeof(_icmph), &_icmph);
- if (!icmph)
- goto out;
-
- switch (icmph->icmp6_type) {
- case NDISC_ROUTER_SOLICITATION:
- case NDISC_ROUTER_ADVERTISEMENT:
- case NDISC_NEIGHBOUR_SOLICITATION:
- case NDISC_NEIGHBOUR_ADVERTISEMENT:
- case NDISC_REDIRECT:
- rc = false;
- break;
- }
- }
-
-out:
- return rc;
-}
-#else
-static bool check_ipv6_frame(const struct sk_buff *skb)
-{
- return false;
-}
-#endif
-
-static bool is_ip_rx_frame(struct sk_buff *skb)
-{
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- return true;
- case htons(ETH_P_IPV6):
- return check_ipv6_frame(skb);
- }
- return false;
-}
-
static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
{
vrf_dev->stats.tx_errors++;
kfree_skb(skb);
}
-/* note: already called with rcu_read_lock */
-static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
-{
- struct sk_buff *skb = *pskb;
-
- if (is_ip_rx_frame(skb)) {
- struct net_device *dev = vrf_master_get_rcu(skb->dev);
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- dstats->rx_pkts++;
- dstats->rx_bytes += skb->len;
- u64_stats_update_end(&dstats->syncp);
-
- skb->dev = dev;
-
- return RX_HANDLER_ANOTHER;
- }
- return RX_HANDLER_PASS;
-}
-
static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -354,28 +273,40 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
+/* holding rtnl */
static void vrf_rt6_release(struct net_vrf *vrf)
{
- dst_release(&vrf->rt6->dst);
- vrf->rt6 = NULL;
+ struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
+
+ rcu_assign_pointer(vrf->rt6, NULL);
+
+ if (rt6)
+ dst_release(&rt6->dst);
}
static int vrf_rt6_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
+ struct fib6_table *rt6i_table;
struct rt6_info *rt6;
int rc = -ENOMEM;
+ rt6i_table = fib6_new_table(net, vrf->tb_id);
+ if (!rt6i_table)
+ goto out;
+
rt6 = ip6_dst_alloc(net, dev,
DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
if (!rt6)
goto out;
- rt6->dst.output = vrf_output6;
- rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
dst_hold(&rt6->dst);
- vrf->rt6 = rt6;
+
+ rt6->rt6i_table = rt6i_table;
+ rt6->dst.output = vrf_output6;
+ rcu_assign_pointer(vrf->rt6, rt6);
+
rc = 0;
out:
return rc;
@@ -449,26 +380,35 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
+/* holding rtnl */
static void vrf_rtable_release(struct net_vrf *vrf)
{
- struct dst_entry *dst = (struct dst_entry *)vrf->rth;
+ struct rtable *rth = rtnl_dereference(vrf->rth);
+
+ rcu_assign_pointer(vrf->rth, NULL);
- dst_release(dst);
- vrf->rth = NULL;
+ if (rth)
+ dst_release(&rth->dst);
}
-static struct rtable *vrf_rtable_create(struct net_device *dev)
+static int vrf_rtable_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
struct rtable *rth;
+ if (!fib_new_table(dev_net(dev), vrf->tb_id))
+ return -ENOMEM;
+
rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
- if (rth) {
- rth->dst.output = vrf_output;
- rth->rt_table_id = vrf->tb_id;
- }
+ if (!rth)
+ return -ENOMEM;
- return rth;
+ rth->dst.output = vrf_output;
+ rth->rt_table_id = vrf->tb_id;
+
+ rcu_assign_pointer(vrf->rth, rth);
+
+ return 0;
}
/**************************** device handling ********************/
@@ -497,28 +437,14 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
{
int ret;
- /* register the packet handler for slave ports */
- ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
- if (ret) {
- netdev_err(port_dev,
- "Device %s failed to register rx_handler\n",
- port_dev->name);
- goto out_fail;
- }
-
ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
if (ret < 0)
- goto out_unregister;
+ return ret;
port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
cycle_netdev(port_dev);
return 0;
-
-out_unregister:
- netdev_rx_handler_unregister(port_dev);
-out_fail:
- return ret;
}
static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
@@ -535,8 +461,6 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
netdev_upper_dev_unlink(port_dev, dev);
port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
- netdev_rx_handler_unregister(port_dev);
-
cycle_netdev(port_dev);
return 0;
@@ -572,8 +496,7 @@ static int vrf_dev_init(struct net_device *dev)
goto out_nomem;
/* create the default dst which points back to us */
- vrf->rth = vrf_rtable_create(dev);
- if (!vrf->rth)
+ if (vrf_rtable_create(dev) != 0)
goto out_stats;
if (vrf_rt6_create(dev) != 0)
@@ -616,8 +539,13 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
struct net_vrf *vrf = netdev_priv(dev);
- rth = vrf->rth;
- dst_hold(&rth->dst);
+ rcu_read_lock();
+
+ rth = rcu_dereference(vrf->rth);
+ if (likely(rth))
+ dst_hold(&rth->dst);
+
+ rcu_read_unlock();
}
return rth;
@@ -639,6 +567,8 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
+ /* make sure oif is set to VRF device for lookup */
+ fl4->flowi4_oif = dev->ifindex;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -659,19 +589,116 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
}
#if IS_ENABLED(CONFIG_IPV6)
+/* neighbor handling is done with actual device; do not want
+ * to flip skb->dev for those ndisc packets. This really fails
+ * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
+ * a start.
+ */
+static bool ipv6_ndisc_frame(const struct sk_buff *skb)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ bool rc = false;
+
+ if (iph->nexthdr == NEXTHDR_ICMP) {
+ const struct icmp6hdr *icmph;
+ struct icmp6hdr _icmph;
+
+ icmph = skb_header_pointer(skb, sizeof(*iph),
+ sizeof(_icmph), &_icmph);
+ if (!icmph)
+ goto out;
+
+ switch (icmph->icmp6_type) {
+ case NDISC_ROUTER_SOLICITATION:
+ case NDISC_ROUTER_ADVERTISEMENT:
+ case NDISC_NEIGHBOUR_SOLICITATION:
+ case NDISC_NEIGHBOUR_ADVERTISEMENT:
+ case NDISC_REDIRECT:
+ rc = true;
+ break;
+ }
+ }
+
+out:
+ return rc;
+}
+
+static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb)
+{
+ /* if packet is NDISC keep the ingress interface */
+ if (!ipv6_ndisc_frame(skb)) {
+ skb->dev = vrf_dev;
+ skb->skb_iif = vrf_dev->ifindex;
+
+ skb_push(skb, skb->mac_len);
+ dev_queue_xmit_nit(skb, vrf_dev);
+ skb_pull(skb, skb->mac_len);
+
+ IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
+ }
+
+ return skb;
+}
+
+#else
+static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb)
+{
+ return skb;
+}
+#endif
+
+static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb)
+{
+ skb->dev = vrf_dev;
+ skb->skb_iif = vrf_dev->ifindex;
+
+ skb_push(skb, skb->mac_len);
+ dev_queue_xmit_nit(skb, vrf_dev);
+ skb_pull(skb, skb->mac_len);
+
+ return skb;
+}
+
+/* called with rcu lock held */
+static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb,
+ u16 proto)
+{
+ switch (proto) {
+ case AF_INET:
+ return vrf_ip_rcv(vrf_dev, skb);
+ case AF_INET6:
+ return vrf_ip6_rcv(vrf_dev, skb);
+ }
+
+ return skb;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
const struct flowi6 *fl6)
{
- struct rt6_info *rt = NULL;
+ struct dst_entry *dst = NULL;
if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
struct net_vrf *vrf = netdev_priv(dev);
+ struct rt6_info *rt;
+
+ rcu_read_lock();
+
+ rt = rcu_dereference(vrf->rt6);
+ if (likely(rt)) {
+ dst = &rt->dst;
+ dst_hold(dst);
+ }
- rt = vrf->rt6;
- dst_hold(&rt->dst);
+ rcu_read_unlock();
}
- return (struct dst_entry *)rt;
+ return dst;
}
#endif
@@ -679,6 +706,7 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
.l3mdev_fib_table = vrf_fib_table,
.l3mdev_get_rtable = vrf_get_rtable,
.l3mdev_get_saddr = vrf_get_saddr,
+ .l3mdev_l3_rcv = vrf_l3_rcv,
#if IS_ENABLED(CONFIG_IPV6)
.l3mdev_get_rt6_dst = vrf_get_rt6_dst,
#endif
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7e29b5501..b3b9db68f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -98,7 +98,6 @@ struct vxlan_fdb {
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
-static struct workqueue_struct *vxlan_wq;
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
@@ -551,16 +550,15 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
return vh;
}
-static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff)
+static struct sk_buff **vxlan_gro_receive(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
{
struct sk_buff *p, **pp = NULL;
struct vxlanhdr *vh, *vh2;
unsigned int hlen, off_vx;
int flush = 1;
- struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
- udp_offloads);
+ struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
__be32 flags;
struct gro_remcsum grc;
@@ -613,8 +611,7 @@ out:
return pp;
}
-static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
- struct udp_offload *uoff)
+static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
{
/* Sets 'skb->inner_mac_header' since we are always called with
* 'skb->encapsulation' set.
@@ -630,13 +627,6 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
struct net *net = sock_net(sk);
sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport;
- int err;
-
- if (sa_family == AF_INET) {
- err = udp_add_offload(net, &vs->udp_offloads);
- if (err)
- pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
- }
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -663,9 +653,6 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
port);
}
rcu_read_unlock();
-
- if (sa_family == AF_INET)
- udp_del_offload(&vs->udp_offloads);
}
/* Add new entry to forwarding table -- assumes lock held */
@@ -1051,14 +1038,14 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
return false;
}
-static void __vxlan_sock_release(struct vxlan_sock *vs)
+static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
struct vxlan_net *vn;
if (!vs)
- return;
+ return false;
if (!atomic_dec_and_test(&vs->refcnt))
- return;
+ return false;
vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
spin_lock(&vn->sock_lock);
@@ -1066,14 +1053,28 @@ static void __vxlan_sock_release(struct vxlan_sock *vs)
vxlan_notify_del_rx_port(vs);
spin_unlock(&vn->sock_lock);
- queue_work(vxlan_wq, &vs->del_work);
+ return true;
}
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
- __vxlan_sock_release(vxlan->vn4_sock);
+ bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
- __vxlan_sock_release(vxlan->vn6_sock);
+ bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+#endif
+
+ synchronize_net();
+
+ if (ipv4) {
+ udp_tunnel_sock_release(vxlan->vn4_sock->sock);
+ kfree(vxlan->vn4_sock);
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6) {
+ udp_tunnel_sock_release(vxlan->vn6_sock->sock);
+ kfree(vxlan->vn6_sock);
+ }
#endif
}
@@ -1193,6 +1194,45 @@ out:
unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
+static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
+ __be16 *protocol,
+ struct sk_buff *skb, u32 vxflags)
+{
+ struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
+
+ /* Need to have Next Protocol set for interfaces in GPE mode. */
+ if (!gpe->np_applied)
+ return false;
+ /* "The initial version is 0. If a receiver does not support the
+ * version indicated it MUST drop the packet.
+ */
+ if (gpe->version != 0)
+ return false;
+ /* "When the O bit is set to 1, the packet is an OAM packet and OAM
+ * processing MUST occur." However, we don't implement OAM
+ * processing, thus drop the packet.
+ */
+ if (gpe->oam_flag)
+ return false;
+
+ switch (gpe->next_protocol) {
+ case VXLAN_GPE_NP_IPV4:
+ *protocol = htons(ETH_P_IP);
+ break;
+ case VXLAN_GPE_NP_IPV6:
+ *protocol = htons(ETH_P_IPV6);
+ break;
+ case VXLAN_GPE_NP_ETHERNET:
+ *protocol = htons(ETH_P_TEB);
+ break;
+ default:
+ return false;
+ }
+
+ unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
+ return true;
+}
+
static bool vxlan_set_mac(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
struct sk_buff *skb)
@@ -1258,9 +1298,11 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
+ __be16 protocol = htons(ETH_P_TEB);
+ bool raw_proto = false;
void *oiph;
- /* Need Vxlan and inner Ethernet header to be present */
+ /* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
goto drop;
@@ -1284,9 +1326,18 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (!vxlan)
goto drop;
- if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB),
- !net_eq(vxlan->net, dev_net(vxlan->dev))))
- goto drop;
+ /* For backwards compatibility, only allow reserved fields to be
+ * used by VXLAN extensions if explicitly requested.
+ */
+ if (vs->flags & VXLAN_F_GPE) {
+ if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
+ goto drop;
+ raw_proto = true;
+ }
+
+ if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
+ !net_eq(vxlan->net, dev_net(vxlan->dev))))
+ goto drop;
if (vxlan_collect_metadata(vs)) {
__be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
@@ -1305,14 +1356,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
memset(md, 0, sizeof(*md));
}
- /* For backwards compatibility, only allow reserved fields to be
- * used by VXLAN extensions if explicitly requested.
- */
if (vs->flags & VXLAN_F_REMCSUM_RX)
if (!vxlan_remcsum(&unparsed, skb, vs->flags))
goto drop;
if (vs->flags & VXLAN_F_GBP)
vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
+ /* Note that GBP and GPE can never be active together. This is
+ * ensured in vxlan_dev_configure.
+ */
if (unparsed.vx_flags || unparsed.vx_vni) {
/* If there are any unprocessed flags remaining treat
@@ -1326,8 +1377,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (!vxlan_set_mac(vxlan, vs, skb))
- goto drop;
+ if (!raw_proto) {
+ if (!vxlan_set_mac(vxlan, vs, skb))
+ goto drop;
+ } else {
+ skb_reset_mac_header(skb);
+ skb->dev = vxlan->dev;
+ skb->pkt_type = PACKET_HOST;
+ }
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
@@ -1686,6 +1743,27 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
}
+static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
+ __be16 protocol)
+{
+ struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
+
+ gpe->np_applied = 1;
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ gpe->next_protocol = VXLAN_GPE_NP_IPV4;
+ return 0;
+ case htons(ETH_P_IPV6):
+ gpe->next_protocol = VXLAN_GPE_NP_IPV6;
+ return 0;
+ case htons(ETH_P_TEB):
+ gpe->next_protocol = VXLAN_GPE_NP_ETHERNET;
+ return 0;
+ }
+ return -EPFNOSUPPORT;
+}
+
static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
int iphdr_len, __be32 vni,
struct vxlan_metadata *md, u32 vxflags,
@@ -1695,6 +1773,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
int min_headroom;
int err;
int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ __be16 inner_protocol = htons(ETH_P_TEB);
if ((vxflags & VXLAN_F_REMCSUM_TX) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1713,18 +1792,16 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
+ if (unlikely(err))
+ goto out_free;
skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb))
return -ENOMEM;
- skb = iptunnel_handle_offloads(skb, type);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ err = iptunnel_handle_offloads(skb, type);
+ if (err)
+ goto out_free;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI;
@@ -1745,9 +1822,19 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
if (vxflags & VXLAN_F_GBP)
vxlan_build_gbp_hdr(vxh, vxflags, md);
+ if (vxflags & VXLAN_F_GPE) {
+ err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
+ if (err < 0)
+ goto out_free;
+ inner_protocol = skb->protocol;
+ }
- skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+ skb_set_inner_protocol(skb, inner_protocol);
return 0;
+
+out_free:
+ kfree_skb(skb);
+ return err;
}
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
@@ -2107,9 +2194,17 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
info = skb_tunnel_info(skb);
skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
- if ((vxlan->flags & VXLAN_F_PROXY)) {
+ if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (info && info->mode & IP_TUNNEL_INFO_TX)
+ vxlan_xmit_one(skb, dev, NULL, false);
+ else
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (vxlan->flags & VXLAN_F_PROXY) {
+ eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb);
#if IS_ENABLED(CONFIG_IPV6)
@@ -2124,18 +2219,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
return neigh_reduce(dev, skb);
}
- eth = eth_hdr(skb);
#endif
}
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
- if (info && info->mode & IP_TUNNEL_INFO_TX)
- vxlan_xmit_one(skb, dev, NULL, false);
- else
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
+ eth = eth_hdr(skb);
f = vxlan_find_mac(vxlan, eth->h_dest);
did_rsc = false;
@@ -2405,7 +2492,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
return 0;
}
-static const struct net_device_ops vxlan_netdev_ops = {
+static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
.ndo_open = vxlan_open,
@@ -2422,6 +2509,17 @@ static const struct net_device_ops vxlan_netdev_ops = {
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
+static const struct net_device_ops vxlan_netdev_raw_ops = {
+ .ndo_init = vxlan_init,
+ .ndo_uninit = vxlan_uninit,
+ .ndo_open = vxlan_open,
+ .ndo_stop = vxlan_stop,
+ .ndo_start_xmit = vxlan_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_change_mtu = vxlan_change_mtu,
+ .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
+};
+
/* Info for udev, that this is a virtual tunnel endpoint */
static struct device_type vxlan_type = {
.name = "vxlan",
@@ -2431,7 +2529,7 @@ static struct device_type vxlan_type = {
* supply the listening VXLAN udp ports. Callers are expected
* to implement the ndo_add_vxlan_port.
*/
-void vxlan_get_rx_port(struct net_device *dev)
+static void vxlan_push_rx_ports(struct net_device *dev)
{
struct vxlan_sock *vs;
struct net *net = dev_net(dev);
@@ -2440,6 +2538,9 @@ void vxlan_get_rx_port(struct net_device *dev)
__be16 port;
unsigned int i;
+ if (!dev->netdev_ops->ndo_add_vxlan_port)
+ return;
+
spin_lock(&vn->sock_lock);
for (i = 0; i < PORT_HASH_SIZE; ++i) {
hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
@@ -2451,7 +2552,6 @@ void vxlan_get_rx_port(struct net_device *dev)
}
spin_unlock(&vn->sock_lock);
}
-EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
/* Initialize the device structure. */
static void vxlan_setup(struct net_device *dev)
@@ -2462,7 +2562,6 @@ static void vxlan_setup(struct net_device *dev)
eth_hw_addr_random(dev);
ether_setup(dev);
- dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = free_netdev;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
@@ -2477,8 +2576,7 @@ static void vxlan_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_QUEUE;
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
@@ -2497,6 +2595,23 @@ static void vxlan_setup(struct net_device *dev)
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
}
+static void vxlan_ether_setup(struct net_device *dev)
+{
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->netdev_ops = &vxlan_netdev_ether_ops;
+}
+
+static void vxlan_raw_setup(struct net_device *dev)
+{
+ dev->header_ops = NULL;
+ dev->type = ARPHRD_NONE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->netdev_ops = &vxlan_netdev_raw_ops;
+}
+
static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_ID] = { .type = NLA_U32 },
[IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
@@ -2523,6 +2638,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
[IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
[IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
+ [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
};
@@ -2575,13 +2691,6 @@ static const struct ethtool_ops vxlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static void vxlan_del_work(struct work_struct *work)
-{
- struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
- udp_tunnel_sock_release(vs->sock);
- kfree_rcu(vs, rcu);
-}
-
static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
__be16 port, u32 flags)
{
@@ -2627,8 +2736,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
for (h = 0; h < VNI_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vs->vni_list[h]);
- INIT_WORK(&vs->del_work, vxlan_del_work);
-
sock = vxlan_create_sock(net, ipv6, port, flags);
if (IS_ERR(sock)) {
pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
@@ -2641,21 +2748,19 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
atomic_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
- /* Initialize the vxlan udp offloads structure */
- vs->udp_offloads.port = port;
- vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
- vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
-
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
vxlan_notify_add_rx_port(vs);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = vxlan_rcv;
tunnel_cfg.encap_destroy = NULL;
+ tunnel_cfg.gro_receive = vxlan_gro_receive;
+ tunnel_cfg.gro_complete = vxlan_gro_complete;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@ -2723,6 +2828,21 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
__be16 default_port = vxlan->cfg.dst_port;
struct net_device *lowerdev = NULL;
+ if (conf->flags & VXLAN_F_GPE) {
+ if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
+ return -EINVAL;
+ /* For now, allow GPE only together with COLLECT_METADATA.
+ * This can be relaxed later; in such case, the other side
+ * of the PtP link will have to be provided.
+ */
+ if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+ return -EINVAL;
+
+ vxlan_raw_setup(dev);
+ } else {
+ vxlan_ether_setup(dev);
+ }
+
vxlan->net = src_net;
dst->remote_vni = conf->vni;
@@ -2784,8 +2904,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));
- if (!vxlan->cfg.dst_port)
- vxlan->cfg.dst_port = default_port;
+ if (!vxlan->cfg.dst_port) {
+ if (conf->flags & VXLAN_F_GPE)
+ vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+ else
+ vxlan->cfg.dst_port = default_port;
+ }
vxlan->flags |= conf->flags;
if (!vxlan->cfg.age_interval)
@@ -2828,30 +2952,6 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return 0;
}
-struct net_device *vxlan_dev_create(struct net *net, const char *name,
- u8 name_assign_type, struct vxlan_config *conf)
-{
- struct nlattr *tb[IFLA_MAX+1];
- struct net_device *dev;
- int err;
-
- memset(&tb, 0, sizeof(tb));
-
- dev = rtnl_create_link(net, name, name_assign_type,
- &vxlan_link_ops, tb);
- if (IS_ERR(dev))
- return dev;
-
- err = vxlan_dev_configure(net, dev, conf);
- if (err < 0) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(vxlan_dev_create);
-
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@@ -2956,6 +3056,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (data[IFLA_VXLAN_GBP])
conf.flags |= VXLAN_F_GBP;
+ if (data[IFLA_VXLAN_GPE])
+ conf.flags |= VXLAN_F_GPE;
+
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
@@ -2975,6 +3078,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
case -EEXIST:
pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
break;
+
+ case -EINVAL:
+ pr_info("unsupported combination of extensions\n");
+ break;
}
return err;
@@ -3102,6 +3209,10 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_flag(skb, IFLA_VXLAN_GBP))
goto nla_put_failure;
+ if (vxlan->flags & VXLAN_F_GPE &&
+ nla_put_flag(skb, IFLA_VXLAN_GPE))
+ goto nla_put_failure;
+
if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
@@ -3133,6 +3244,40 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.get_link_net = vxlan_get_link_net,
};
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct vxlan_config *conf)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &vxlan_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = vxlan_dev_configure(net, dev, conf);
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0) {
+ LIST_HEAD(list_kill);
+
+ vxlan_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{
@@ -3155,20 +3300,22 @@ static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
unregister_netdevice_many(&list_kill);
}
-static int vxlan_lowerdev_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int vxlan_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
+ else if (event == NETDEV_OFFLOAD_PUSH_VXLAN)
+ vxlan_push_rx_ports(dev);
return NOTIFY_DONE;
}
static struct notifier_block vxlan_notifier_block __read_mostly = {
- .notifier_call = vxlan_lowerdev_event,
+ .notifier_call = vxlan_netdevice_event,
};
static __net_init int vxlan_init_net(struct net *net)
@@ -3222,10 +3369,6 @@ static int __init vxlan_init_module(void)
{
int rc;
- vxlan_wq = alloc_workqueue("vxlan", 0, 0);
- if (!vxlan_wq)
- return -ENOMEM;
-
get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
rc = register_pernet_subsys(&vxlan_net_ops);
@@ -3246,7 +3389,6 @@ out3:
out2:
unregister_pernet_subsys(&vxlan_net_ops);
out1:
- destroy_workqueue(vxlan_wq);
return rc;
}
late_initcall(vxlan_init_module);
@@ -3255,7 +3397,6 @@ static void __exit vxlan_cleanup_module(void)
{
rtnl_link_unregister(&vxlan_link_ops);
unregister_netdevice_notifier(&vxlan_notifier_block);
- destroy_workqueue(vxlan_wq);
unregister_pernet_subsys(&vxlan_net_ops);
/* rcu_barrier() is called by netns */
}
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 848ea6a39..b87fe0a01 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -739,7 +739,7 @@ static char *cosa_net_setup_rx(struct channel_data *chan, int size)
chan->netdev->stats.rx_dropped++;
return NULL;
}
- chan->netdev->trans_start = jiffies;
+ netif_trans_update(chan->netdev);
return skb_put(chan->rx_skb, size);
}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 69b994f3b..3c9cbf908 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -831,7 +831,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/*
@@ -1389,7 +1389,7 @@ do_bottom_half_tx(struct fst_card_info *card)
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else {
/* Or do it through dma */
memcpy(card->tx_dma_handle_host,
@@ -2258,7 +2258,7 @@ fst_tx_timeout(struct net_device *dev)
card->card_no, port->index);
fst_issue_cmd(port, ABORTTX);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
port->start = 0;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index bb33b242a..299140c04 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -2105,7 +2105,7 @@ static void lmc_driver_timeout(struct net_device *dev)
sc->lmc_device->stats.tx_errors++;
sc->extra_stats.tx_ProcTimeout++; /* -baz */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
bug_out:
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 8fef8d834..d98c7e571 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -860,9 +860,9 @@ prepare_to_send( struct sk_buff *skb, struct net_device *dev )
outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->trans_start = jiffies;
+ netif_trans_update(nl->master);
#else
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
#endif
}
@@ -889,10 +889,10 @@ drop_xmit_queue( struct net_device *dev )
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
#ifdef CONFIG_SBNI_MULTILINE
netif_start_queue( nl->master );
- nl->master->trans_start = jiffies;
+ netif_trans_update(nl->master);
#else
netif_start_queue( dev );
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
#endif
}
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index a9970f1af..bb74f4b9a 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -334,7 +334,7 @@ int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
i2400m, net_dev, skb);
/* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
- net_dev->trans_start = jiffies;
+ netif_trans_update(net_dev);
i2400m_tx_prep_header(skb);
d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
skb, skb->len);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 15f057ed4..70ecd82d6 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -440,7 +440,7 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
rx_status.rate_idx = rate;
rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
- rx_status.band = IEEE80211_BAND_2GHZ;
+ rx_status.band = NL80211_BAND_2GHZ;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(dev, skb);
@@ -1894,7 +1894,7 @@ static int adm8211_probe(struct pci_dev *pdev,
priv->channel = 1;
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
err = ieee80211_register_hw(dev);
if (err) {
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index ccfa825a0..425fa1d32 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1471,12 +1471,12 @@ static int ar5523_init_modes(struct ar5523 *ar)
memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
- ar->band.band = IEEE80211_BAND_2GHZ;
+ ar->band.band = NL80211_BAND_2GHZ;
ar->band.channels = ar->channels;
ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
ar->band.bitrates = ar->rates;
ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
- ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 65ef483eb..da7a7c8da 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -185,7 +185,7 @@ struct ath_common {
bool bt_ant_diversity;
int last_rssi;
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
};
static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index edf362928..9fb8d7472 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -411,7 +411,8 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
lockdep_assert_held(&ar_pci->ce_lock);
- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ if ((pipe->id != 5) &&
+ CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
return -ENOSPC;
desc->addr = __cpu_to_le32(paddr);
@@ -425,6 +426,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
return 0;
}
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+}
+
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
struct ath10k *ar = pipe->ar;
@@ -444,14 +458,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
*/
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+ unsigned int *nbytesp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
- struct ath10k *ar = ce_state->ar;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -476,21 +486,17 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
desc->nbytes = 0;
/* Return data from completed destination descriptor */
- *bufferp = __le32_to_cpu(sdesc.addr);
*nbytesp = nbytes;
- *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
-
- if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
- *flagsp = CE_RECV_FLAG_SWAPPED;
- else
- *flagsp = 0;
if (per_transfer_contextp)
*per_transfer_contextp =
dest_ring->per_transfer_context[sw_index];
- /* sanity */
- dest_ring->per_transfer_context[sw_index] = NULL;
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -501,10 +507,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+ unsigned int *nbytesp)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -513,8 +516,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
spin_lock_bh(&ar_pci->ce_lock);
ret = ath10k_ce_completed_recv_next_nolock(ce_state,
per_transfer_contextp,
- bufferp, nbytesp,
- transfer_idp, flagsp);
+ nbytesp);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -1048,11 +1050,11 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
*
* For the lack of a better place do the check here.
*/
- BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ce_state->ar = ar;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 47b734ce7..dfc098606 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -22,7 +22,7 @@
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 12
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
@@ -166,6 +166,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
/* recv flags */
/* Data is byte-swapped */
@@ -177,10 +178,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
*/
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp);
+ unsigned int *nbytesp);
/*
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
@@ -212,10 +210,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp);
+ unsigned int *nbytesp);
/*
* Support clean shutdown by allowing the caller to cancel
@@ -413,9 +408,11 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
- (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+ (((int)(toidx) - (int)(fromidx)) & (nentries_mask))
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+ (((idx) + (num)) & (nentries_mask))
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
ar->regs->ce_wrap_intr_sum_host_msi_lsb
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 3ede921da..697f8d054 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -60,10 +60,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
- .fw = QCA988X_HW_2_0_FW_FILE,
- .otp = QCA988X_HW_2_0_OTP_FILE,
.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
@@ -78,10 +77,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .fw = QCA6174_HW_2_1_FW_FILE,
- .otp = QCA6174_HW_2_1_OTP_FILE,
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -97,10 +95,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .fw = QCA6174_HW_2_1_FW_FILE,
- .otp = QCA6174_HW_2_1_OTP_FILE,
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -116,10 +113,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
- .fw = QCA6174_HW_3_0_FW_FILE,
- .otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -135,11 +131,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 8124,
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
- .fw = QCA6174_HW_3_0_FW_FILE,
- .otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -156,15 +151,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
- .num_msdu_desc = 1424,
- .qcache_active_peers = 50,
.tx_chain_mask = 0xf,
.rx_chain_mask = 0xf,
.max_spatial_stream = 4,
+ .cal_data_len = 12064,
.fw = {
.dir = QCA99X0_HW_2_0_FW_DIR,
- .fw = QCA99X0_HW_2_0_FW_FILE,
- .otp = QCA99X0_HW_2_0_OTP_FILE,
.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
@@ -179,10 +171,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .fw = QCA9377_HW_1_0_FW_FILE,
- .otp = QCA9377_HW_1_0_OTP_FILE,
.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
@@ -197,10 +188,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .fw = QCA9377_HW_1_0_FW_FILE,
- .otp = QCA9377_HW_1_0_OTP_FILE,
.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
@@ -212,20 +202,18 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .has_shifted_cc_wraparound = true,
.otp_exe_param = 0x0010000,
.continuous_frag_desc = true,
.channel_counters_freq_hz = 125000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
- .num_msdu_desc = 2500,
- .qcache_active_peers = 35,
.tx_chain_mask = 0x3,
.rx_chain_mask = 0x3,
.max_spatial_stream = 2,
+ .cal_data_len = 12064,
.fw = {
.dir = QCA4019_HW_1_0_FW_DIR,
- .fw = QCA4019_HW_1_0_FW_FILE,
- .otp = QCA4019_HW_1_0_OTP_FILE,
.board = QCA4019_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA4019_BOARD_DATA_SZ,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
@@ -274,7 +262,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar,
int i;
for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
- if (test_bit(i, ar->fw_features)) {
+ if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) {
if (len > 0)
len += scnprintf(buf + len, buf_len - len, ",");
@@ -466,18 +454,18 @@ exit:
return ret;
}
-static int ath10k_download_cal_file(struct ath10k *ar)
+static int ath10k_download_cal_file(struct ath10k *ar,
+ const struct firmware *file)
{
int ret;
- if (!ar->cal_file)
+ if (!file)
return -ENOENT;
- if (IS_ERR(ar->cal_file))
- return PTR_ERR(ar->cal_file);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- ret = ath10k_download_board_data(ar, ar->cal_file->data,
- ar->cal_file->size);
+ ret = ath10k_download_board_data(ar, file->data, file->size);
if (ret) {
ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
return ret;
@@ -488,7 +476,7 @@ static int ath10k_download_cal_file(struct ath10k *ar)
return 0;
}
-static int ath10k_download_cal_dt(struct ath10k *ar)
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
{
struct device_node *node;
int data_len;
@@ -502,13 +490,12 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
*/
return -ENOENT;
- if (!of_get_property(node, "qcom,ath10k-calibration-data",
- &data_len)) {
+ if (!of_get_property(node, dt_name, &data_len)) {
/* The calibration data node is optional */
return -ENOENT;
}
- if (data_len != QCA988X_CAL_DATA_LEN) {
+ if (data_len != ar->hw_params.cal_data_len) {
ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
data_len);
ret = -EMSGSIZE;
@@ -521,8 +508,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
goto out;
}
- ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
- data, data_len);
+ ret = of_property_read_u8_array(node, dt_name, data, data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
ret);
@@ -553,7 +539,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
address = ar->hw_params.patch_load_addr;
- if (!ar->otp_data || !ar->otp_len) {
+ if (!ar->normal_mode_fw.fw_file.otp_data ||
+ !ar->normal_mode_fw.fw_file.otp_len) {
ath10k_warn(ar,
"failed to retrieve board id because of invalid otp\n");
return -ENODATA;
@@ -561,9 +548,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot upload otp to 0x%x len %zd for board id\n",
- address, ar->otp_len);
+ address, ar->normal_mode_fw.fw_file.otp_len);
- ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len);
if (ret) {
ath10k_err(ar, "could not write otp for board id check: %d\n",
ret);
@@ -601,7 +590,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
int ret;
- ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+ ret = ath10k_download_board_data(ar,
+ ar->running_fw->board_data,
+ ar->running_fw->board_len);
if (ret) {
ath10k_err(ar, "failed to download board data: %d\n", ret);
return ret;
@@ -609,16 +600,20 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
/* OTP is optional */
- if (!ar->otp_data || !ar->otp_len) {
+ if (!ar->running_fw->fw_file.otp_data ||
+ !ar->running_fw->fw_file.otp_len) {
ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
- ar->otp_data, ar->otp_len);
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
return 0;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
- address, ar->otp_len);
+ address, ar->running_fw->fw_file.otp_len);
- ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
if (ret) {
ath10k_err(ar, "could not write otp (%d)\n", ret);
return ret;
@@ -633,7 +628,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
- ar->fw_features)) &&
+ ar->running_fw->fw_file.fw_features)) &&
result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
@@ -642,46 +637,32 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return 0;
}
-static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_download_fw(struct ath10k *ar)
{
u32 address, data_len;
- const char *mode_name;
const void *data;
int ret;
address = ar->hw_params.patch_load_addr;
- switch (mode) {
- case ATH10K_FIRMWARE_MODE_NORMAL:
- data = ar->firmware_data;
- data_len = ar->firmware_len;
- mode_name = "normal";
- ret = ath10k_swap_code_seg_configure(ar,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
- if (ret) {
- ath10k_err(ar, "failed to configure fw code swap: %d\n",
- ret);
- return ret;
- }
- break;
- case ATH10K_FIRMWARE_MODE_UTF:
- data = ar->testmode.utf_firmware_data;
- data_len = ar->testmode.utf_firmware_len;
- mode_name = "utf";
- break;
- default:
- ath10k_err(ar, "unknown firmware mode: %d\n", mode);
- return -EINVAL;
+ data = ar->running_fw->fw_file.firmware_data;
+ data_len = ar->running_fw->fw_file.firmware_len;
+
+ ret = ath10k_swap_code_seg_configure(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to configure fw code swap: %d\n",
+ ret);
+ return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot uploading firmware image %p len %d mode %s\n",
- data, data_len, mode_name);
+ "boot uploading firmware image %p len %d\n",
+ data, data_len);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
if (ret) {
- ath10k_err(ar, "failed to download %s firmware: %d\n",
- mode_name, ret);
+ ath10k_err(ar, "failed to download firmware: %d\n",
+ ret);
return ret;
}
@@ -690,42 +671,50 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
static void ath10k_core_free_board_files(struct ath10k *ar)
{
- if (!IS_ERR(ar->board))
- release_firmware(ar->board);
+ if (!IS_ERR(ar->normal_mode_fw.board))
+ release_firmware(ar->normal_mode_fw.board);
- ar->board = NULL;
- ar->board_data = NULL;
- ar->board_len = 0;
+ ar->normal_mode_fw.board = NULL;
+ ar->normal_mode_fw.board_data = NULL;
+ ar->normal_mode_fw.board_len = 0;
}
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
- if (!IS_ERR(ar->otp))
- release_firmware(ar->otp);
-
- if (!IS_ERR(ar->firmware))
- release_firmware(ar->firmware);
+ if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware))
+ release_firmware(ar->normal_mode_fw.fw_file.firmware);
if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
+ if (!IS_ERR(ar->pre_cal_file))
+ release_firmware(ar->pre_cal_file);
+
ath10k_swap_code_seg_release(ar);
- ar->otp = NULL;
- ar->otp_data = NULL;
- ar->otp_len = 0;
+ ar->normal_mode_fw.fw_file.otp_data = NULL;
+ ar->normal_mode_fw.fw_file.otp_len = 0;
- ar->firmware = NULL;
- ar->firmware_data = NULL;
- ar->firmware_len = 0;
+ ar->normal_mode_fw.fw_file.firmware = NULL;
+ ar->normal_mode_fw.fw_file.firmware_data = NULL;
+ ar->normal_mode_fw.fw_file.firmware_len = 0;
ar->cal_file = NULL;
+ ar->pre_cal_file = NULL;
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
{
char filename[100];
+ /* pre-cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "/*(DEBLOBBED)*/",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (!IS_ERR(ar->pre_cal_file))
+ goto success;
+
/*(DEBLOBBED)*/
scnprintf(filename, sizeof(filename), "/*(DEBLOBBED)*/",
ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -734,7 +723,7 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
if (IS_ERR(ar->cal_file))
/* calibration file is optional, don't print any warnings */
return PTR_ERR(ar->cal_file);
-
+success:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
ATH10K_FW_DIR, filename);
@@ -748,14 +737,14 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
return -EINVAL;
}
- ar->board = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board))
- return PTR_ERR(ar->board);
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
- ar->board_data = ar->board->data;
- ar->board_len = ar->board->size;
+ ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+ ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
return 0;
}
@@ -815,8 +804,8 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
"boot found board data for '%s'",
boardname);
- ar->board_data = board_ie_data;
- ar->board_len = board_ie_len;
+ ar->normal_mode_fw.board_data = board_ie_data;
+ ar->normal_mode_fw.board_len = board_ie_len;
ret = 0;
goto out;
@@ -849,12 +838,14 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
const u8 *data;
int ret, ie_id;
- ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
- if (IS_ERR(ar->board))
- return PTR_ERR(ar->board);
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ filename);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
- data = ar->board->data;
- len = ar->board->size;
+ data = ar->normal_mode_fw.board->data;
+ len = ar->normal_mode_fw.board->size;
/* magic has extra null byte padded */
magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
@@ -921,7 +912,7 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
}
out:
- if (!ar->board_data || !ar->board_len) {
+ if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) {
ath10k_err(ar,
"failed to fetch board data for %s from %s/%s\n",
boardname, ar->hw_params.fw.dir, filename);
@@ -989,51 +980,8 @@ success:
return 0;
}
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
-{
- int ret = 0;
-
- if (ar->hw_params.fw.fw == NULL) {
- ath10k_err(ar, "firmware file not defined\n");
- return -EINVAL;
- }
-
- ar->firmware = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.fw);
- if (IS_ERR(ar->firmware)) {
- ret = PTR_ERR(ar->firmware);
- ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
- goto err;
- }
-
- ar->firmware_data = ar->firmware->data;
- ar->firmware_len = ar->firmware->size;
-
- /* OTP may be undefined. If so, don't fetch it at all */
- if (ar->hw_params.fw.otp == NULL)
- return 0;
-
- ar->otp = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.otp);
- if (IS_ERR(ar->otp)) {
- ret = PTR_ERR(ar->otp);
- ath10k_err(ar, "could not fetch otp (%d)\n", ret);
- goto err;
- }
-
- ar->otp_data = ar->otp->data;
- ar->otp_len = ar->otp->size;
-
- return 0;
-
-err:
- ath10k_core_free_firmware_files(ar);
- return ret;
-}
-
-static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file)
{
size_t magic_len, len, ie_len;
int ie_id, i, index, bit, ret;
@@ -1042,15 +990,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
__le32 *timestamp, *version;
/* first fetch the firmware file (firmware-*.bin) */
- ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
- if (IS_ERR(ar->firmware)) {
+ fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ name);
+ if (IS_ERR(fw_file->firmware)) {
ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
- ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
- return PTR_ERR(ar->firmware);
+ ar->hw_params.fw.dir, name,
+ PTR_ERR(fw_file->firmware));
+ return PTR_ERR(fw_file->firmware);
}
- data = ar->firmware->data;
- len = ar->firmware->size;
+ data = fw_file->firmware->data;
+ len = fw_file->firmware->size;
/* magic also includes the null byte, check that as well */
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
@@ -1093,15 +1043,15 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
switch (ie_id) {
case ATH10K_FW_IE_FW_VERSION:
- if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+ if (ie_len > sizeof(fw_file->fw_version) - 1)
break;
- memcpy(ar->hw->wiphy->fw_version, data, ie_len);
- ar->hw->wiphy->fw_version[ie_len] = '\0';
+ memcpy(fw_file->fw_version, data, ie_len);
+ fw_file->fw_version[ie_len] = '\0';
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw version %s\n",
- ar->hw->wiphy->fw_version);
+ fw_file->fw_version);
break;
case ATH10K_FW_IE_TIMESTAMP:
if (ie_len != sizeof(u32))
@@ -1128,21 +1078,21 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Enabling feature bit: %i\n",
i);
- __set_bit(i, ar->fw_features);
+ __set_bit(i, fw_file->fw_features);
}
}
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
- ar->fw_features,
- sizeof(ar->fw_features));
+ fw_file->fw_features,
+ sizeof(fw_file->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw image ie (%zd B)\n",
ie_len);
- ar->firmware_data = data;
- ar->firmware_len = ie_len;
+ fw_file->firmware_data = data;
+ fw_file->firmware_len = ie_len;
break;
case ATH10K_FW_IE_OTP_IMAGE:
@@ -1150,8 +1100,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
"found otp image ie (%zd B)\n",
ie_len);
- ar->otp_data = data;
- ar->otp_len = ie_len;
+ fw_file->otp_data = data;
+ fw_file->otp_len = ie_len;
break;
case ATH10K_FW_IE_WMI_OP_VERSION:
@@ -1160,10 +1110,10 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
version = (__le32 *)data;
- ar->wmi.op_version = le32_to_cpup(version);
+ fw_file->wmi_op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
- ar->wmi.op_version);
+ fw_file->wmi_op_version);
break;
case ATH10K_FW_IE_HTT_OP_VERSION:
if (ie_len != sizeof(u32))
@@ -1171,17 +1121,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
version = (__le32 *)data;
- ar->htt.op_version = le32_to_cpup(version);
+ fw_file->htt_op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
- ar->htt.op_version);
+ fw_file->htt_op_version);
break;
case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw code swap image ie (%zd B)\n",
ie_len);
- ar->swap.firmware_codeswap_data = data;
- ar->swap.firmware_codeswap_len = ie_len;
+ fw_file->codeswap_data = data;
+ fw_file->codeswap_len = ie_len;
break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
@@ -1196,7 +1146,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += ie_len;
}
- if (!ar->firmware_data || !ar->firmware_len) {
+ if (!fw_file->firmware_data ||
+ !fw_file->firmware_len) {
ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
@@ -1220,40 +1171,95 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
ar->fw_api = 5;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 4;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 3;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 2;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
- if (ret == 0)
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE,
+ &ar->normal_mode_fw.fw_file);
+ if (ret)
+ return ret;
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_download(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
goto success;
+ }
- ar->fw_api = 1;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a pre calibration file, try DT next: %d\n",
+ ret);
- ret = ath10k_core_fetch_firmware_api_1(ar);
- if (ret)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "unable to load pre cal data from DT: %d\n", ret);
return ret;
+ }
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
success:
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_config(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to load pre cal data: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to get board id: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal configuration done successfully\n");
return 0;
}
@@ -1262,7 +1268,15 @@ static int ath10k_download_cal_data(struct ath10k *ar)
{
int ret;
- ret = ath10k_download_cal_file(ar);
+ ret = ath10k_core_pre_cal_config(ar);
+ if (ret == 0)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal download procedure failed, try cal file: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_file(ar, ar->cal_file);
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_FILE;
goto done;
@@ -1272,7 +1286,7 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"boot did not find a calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar);
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_DT;
goto done;
@@ -1383,6 +1397,7 @@ static void ath10k_core_restart(struct work_struct *work)
complete_all(&ar->install_key_done);
complete_all(&ar->vdev_setup_done);
complete_all(&ar->thermal.wmi_sync);
+ complete_all(&ar->bss_survey_done);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
@@ -1420,15 +1435,17 @@ static void ath10k_core_restart(struct work_struct *work)
static int ath10k_core_init_firmware_features(struct ath10k *ar)
{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
- !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
return -EINVAL;
}
- if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+ if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
- ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+ ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version);
return -EINVAL;
}
@@ -1440,7 +1457,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
break;
case ATH10K_CRYPT_MODE_SW:
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
- ar->fw_features)) {
+ fw_file->fw_features)) {
ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
return -EINVAL;
}
@@ -1459,7 +1476,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
if (rawmode) {
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
- ar->fw_features)) {
+ fw_file->fw_features)) {
ath10k_err(ar, "rawmode = 1 requires support from firmware");
return -EINVAL;
}
@@ -1484,19 +1501,19 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_WMI_OP_VERSION.
*/
- if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
- ar->fw_features))
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+ fw_file->fw_features))
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
else
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
} else {
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
}
}
- switch (ar->wmi.op_version) {
+ switch (fw_file->wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->max_num_peers = TARGET_NUM_PEERS;
ar->max_num_stations = TARGET_NUM_STATIONS;
@@ -1509,7 +1526,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+ if (ath10k_peer_stats_enabled(ar)) {
ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
} else {
@@ -1538,9 +1555,15 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
- ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc;
- ar->fw_stats_req_mask = WMI_STAT_PEER;
+ ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
+ WMI_10_4_STAT_PEER_EXTD;
ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ fw_file->fw_features))
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
+ else
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1551,18 +1574,18 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_HTT_OP_VERSION.
*/
- if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
- switch (ar->wmi.op_version) {
+ if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+ switch (fw_file->wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
case ATH10K_FW_WMI_OP_VERSION_UNSET:
@@ -1575,14 +1598,18 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return 0;
}
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw)
{
int status;
+ u32 val;
lockdep_assert_held(&ar->conf_mutex);
clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+ ar->running_fw = fw;
+
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
@@ -1601,7 +1628,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
* to set the clock source once the target is initialized.
*/
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
if (status) {
ath10k_err(ar, "could not write to skip_clock_init: %d\n",
@@ -1610,7 +1637,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
}
}
- status = ath10k_download_fw(ar, mode);
+ status = ath10k_download_fw(ar);
if (status)
goto err;
@@ -1698,6 +1725,23 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
+ if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
+ val = 0;
+ if (ath10k_peer_stats_enabled(ar))
+ val = WMI_10_4_PEER_STATS;
+
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+
+ status = ath10k_mac_ext_resource_config(ar, val);
+ if (status) {
+ ath10k_err(ar,
+ "failed to send ext resource cfg command : %d\n",
+ status);
+ goto err_hif_stop;
+ }
+ }
+
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err(ar, "could not send WMI init command (%d)\n",
@@ -1832,13 +1876,27 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_power_down;
}
+ BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
+ sizeof(ar->normal_mode_fw.fw_file.fw_version));
+ memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version,
+ sizeof(ar->hw->wiphy->fw_version));
+
ath10k_debug_print_hwfw_info(ar);
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ /* pre calibration data download is not necessary
+ * for all the chipsets. Ignore failures and continue.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "could not load pre cal data: %d\n", ret);
+ }
+
ret = ath10k_core_get_board_id_from_otp(ar);
if (ret && ret != -EOPNOTSUPP) {
ath10k_err(ar, "failed to get board id from otp: %d\n",
ret);
- return ret;
+ goto err_free_firmware_files;
}
ret = ath10k_core_fetch_board_file(ar);
@@ -1865,7 +1923,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
mutex_lock(&ar->conf_mutex);
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
if (ret) {
ath10k_err(ar, "could not init core (%d)\n", ret);
goto err_unlock;
@@ -2035,6 +2094,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
init_completion(&ar->thermal.wmi_sync);
+ init_completion(&ar->bss_survey_done);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
@@ -2048,7 +2108,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
+ spin_lock_init(&ar->txqs_lock);
+ INIT_LIST_HEAD(&ar->txqs);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_waitqueue_head(&ar->htt.empty_tx_wq);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index a62b62a62..1852e0ee3 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -44,8 +44,8 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
-#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
#define ATH10K_NUM_CHANS 39
/* Antenna noise floor */
@@ -98,6 +98,7 @@ struct ath10k_skb_cb {
u8 eid;
u16 msdu_id;
struct ieee80211_vif *vif;
+ struct ieee80211_txq *txq;
} __packed;
struct ath10k_skb_rxcb {
@@ -138,7 +139,6 @@ struct ath10k_mem_chunk {
};
struct ath10k_wmi {
- enum ath10k_fw_wmi_op_version op_version;
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
@@ -297,6 +297,9 @@ struct ath10k_dfs_stats {
struct ath10k_peer {
struct list_head list;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -305,6 +308,12 @@ struct ath10k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
+struct ath10k_txq {
+ struct list_head list;
+ unsigned long num_fw_queued;
+ unsigned long num_push_allowed;
+};
+
struct ath10k_sta {
struct ath10k_vif *arvif;
@@ -313,6 +322,7 @@ struct ath10k_sta {
u32 bw;
u32 nss;
u32 smps;
+ u16 peer_id;
struct work_struct update_wk;
@@ -323,7 +333,7 @@ struct ath10k_sta {
#endif
};
-#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
enum ath10k_beacon_state {
ATH10K_BEACON_SCHEDULED = 0,
@@ -335,6 +345,7 @@ struct ath10k_vif {
struct list_head list;
u32 vdev_id;
+ u16 peer_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
@@ -549,12 +560,17 @@ enum ath10k_dev_flags {
/* Bluetooth coexistance enabled */
ATH10K_FLAG_BTCOEX,
+
+ /* Per Station statistics service */
+ ATH10K_FLAG_PEER_STATS,
};
enum ath10k_cal_mode {
ATH10K_CAL_MODE_FILE,
ATH10K_CAL_MODE_OTP,
ATH10K_CAL_MODE_DT,
+ ATH10K_PRE_CAL_MODE_FILE,
+ ATH10K_PRE_CAL_MODE_DT,
};
enum ath10k_crypt_mode {
@@ -573,6 +589,10 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
return "otp";
case ATH10K_CAL_MODE_DT:
return "dt";
+ case ATH10K_PRE_CAL_MODE_FILE:
+ return "pre-cal-file";
+ case ATH10K_PRE_CAL_MODE_DT:
+ return "pre-cal-dt";
}
return "unknown";
@@ -606,6 +626,34 @@ enum ath10k_tx_pause_reason {
ATH10K_TX_PAUSE_MAX,
};
+struct ath10k_fw_file {
+ const struct firmware *firmware;
+
+ char fw_version[ETHTOOL_FWVERS_LEN];
+
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+ enum ath10k_fw_wmi_op_version wmi_op_version;
+ enum ath10k_fw_htt_op_version htt_op_version;
+
+ const void *firmware_data;
+ size_t firmware_len;
+
+ const void *otp_data;
+ size_t otp_len;
+
+ const void *codeswap_data;
+ size_t codeswap_len;
+};
+
+struct ath10k_fw_components {
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+
+ struct ath10k_fw_file fw_file;
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -631,8 +679,6 @@ struct ath10k {
/* protected by conf_mutex */
bool ani_enabled;
- DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
-
bool p2p;
struct {
@@ -680,39 +726,31 @@ struct ath10k {
/* The padding bytes's location is different on various chips */
enum ath10k_hw_4addr_pad hw_4addr_pad;
- u32 num_msdu_desc;
- u32 qcache_active_peers;
u32 tx_chain_mask;
u32 rx_chain_mask;
u32 max_spatial_stream;
+ u32 cal_data_len;
struct ath10k_hw_params_fw {
const char *dir;
- const char *fw;
- const char *otp;
const char *board;
size_t board_size;
size_t board_ext_size;
} fw;
} hw_params;
- const struct firmware *board;
- const void *board_data;
- size_t board_len;
-
- const struct firmware *otp;
- const void *otp_data;
- size_t otp_len;
+ /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
+ struct ath10k_fw_components normal_mode_fw;
- const struct firmware *firmware;
- const void *firmware_data;
- size_t firmware_len;
+ /* READ-ONLY images of the running firmware, which can be either
+ * normal or UTF. Do not modify, release etc!
+ */
+ const struct ath10k_fw_components *running_fw;
+ const struct firmware *pre_cal_file;
const struct firmware *cal_file;
struct {
- const void *firmware_codeswap_data;
- size_t firmware_codeswap_len;
struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
} swap;
@@ -744,7 +782,7 @@ struct ath10k {
} scan;
struct {
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
} mac;
/* should never be NULL; needed for regular htt rx */
@@ -756,6 +794,9 @@ struct ath10k {
/* current operating channel definition */
struct cfg80211_chan_def chandef;
+ /* currently configured operating channel in firmware */
+ struct ieee80211_channel *tgt_oper_chan;
+
unsigned long long free_vdev_map;
struct ath10k_vif *monitor_arvif;
bool monitor;
@@ -786,9 +827,13 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
+ /* protects: ar->txqs, artxq->list */
+ spinlock_t txqs_lock;
+ struct list_head txqs;
struct list_head arvifs;
struct list_head peers;
+ struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
wait_queue_head_t peer_mapping_wq;
/* protected by conf_mutex */
@@ -831,6 +876,7 @@ struct ath10k {
* avoid reporting garbage data.
*/
bool ch_info_can_report_survey;
+ struct completion bss_survey_done;
struct dfs_pattern_detector *dfs_detector;
@@ -838,8 +884,6 @@ struct ath10k {
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
-#endif
-
struct {
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
@@ -848,16 +892,12 @@ struct ath10k {
enum ath10k_spectral_mode mode;
struct ath10k_spec_scan config;
} spectral;
+#endif
struct {
/* protected by conf_mutex */
- const struct firmware *utf;
- char utf_version[32];
- const void *utf_firmware_data;
- size_t utf_firmware_len;
- DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
- enum ath10k_fw_wmi_op_version orig_wmi_op_version;
- enum ath10k_fw_wmi_op_version op_version;
+ struct ath10k_fw_components utf_mode_fw;
+
/* protected by data_lock */
bool utf_monitor;
} testmode;
@@ -876,6 +916,15 @@ struct ath10k {
u8 drv_priv[0] __aligned(sizeof(void *));
};
+static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
+ test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ return true;
+
+ return false;
+}
+
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
@@ -884,8 +933,11 @@ void ath10k_core_destroy(struct ath10k *ar);
void ath10k_core_get_fw_features_str(struct ath10k *ar,
char *buf,
size_t max_len);
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file);
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw_components);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0f834646e..e2511550f 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -126,7 +126,9 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_debug_print_hwfw_info(struct ath10k *ar)
{
+ const struct firmware *firmware;
char fw_features[128] = {};
+ u32 crc = 0;
ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
@@ -143,11 +145,15 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
config_enabled(CONFIG_NL80211_TESTMODE));
+ firmware = ar->normal_mode_fw.fw_file.firmware;
+ if (firmware)
+ crc = crc32_le(0, firmware->data, firmware->size);
+
ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
ar->hw->wiphy->fw_version,
ar->fw_api,
fw_features,
- crc32_le(0, ar->firmware->data, ar->firmware->size));
+ crc);
}
void ath10k_debug_print_board_info(struct ath10k *ar)
@@ -163,7 +169,8 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
ar->bd_api,
boardinfo,
- crc32_le(0, ar->board->data, ar->board->size));
+ crc32_le(0, ar->normal_mode_fw.board->data,
+ ar->normal_mode_fw.board->size));
}
void ath10k_debug_print_boot_info(struct ath10k *ar)
@@ -171,8 +178,8 @@ void ath10k_debug_print_boot_info(struct ath10k *ar)
ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n",
ar->htt.target_version_major,
ar->htt.target_version_minor,
- ar->wmi.op_version,
- ar->htt.op_version,
+ ar->normal_mode_fw.fw_file.wmi_op_version,
+ ar->normal_mode_fw.fw_file.htt_op_version,
ath10k_cal_mode_str(ar->cal_mode),
ar->max_num_stations,
test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
@@ -319,7 +326,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_fw_stats stats = {};
- bool is_start, is_started, is_end, peer_stats_svc;
+ bool is_start, is_started, is_end;
size_t num_peers;
size_t num_vdevs;
int ret;
@@ -346,13 +353,11 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
* b) consume stat update events until another one with pdev stats is
* delivered which is treated as end-of-data and is itself discarded
*/
-
- peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map);
- if (peer_stats_svc)
+ if (ath10k_peer_stats_enabled(ar))
ath10k_sta_update_rx_duration(ar, &stats.peers);
if (ar->debug.fw_stats_done) {
- if (!peer_stats_svc)
+ if (!ath10k_peer_stats_enabled(ar))
ath10k_warn(ar, "received unsolicited stats update event\n");
goto free;
@@ -1447,7 +1452,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
goto err;
}
- buf = vmalloc(QCA988X_CAL_DATA_LEN);
+ buf = vmalloc(ar->hw_params.cal_data_len);
if (!buf) {
ret = -ENOMEM;
goto err;
@@ -1462,7 +1467,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
}
ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
- QCA988X_CAL_DATA_LEN);
+ ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
goto err_vfree;
@@ -1487,10 +1492,11 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
+ struct ath10k *ar = file->private_data;
void *buf = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos,
- buf, QCA988X_CAL_DATA_LEN);
+ buf, ar->hw_params.cal_data_len);
}
static int ath10k_debug_cal_data_release(struct inode *inode,
@@ -2119,7 +2125,7 @@ static ssize_t ath10k_write_btcoex(struct file *file,
struct ath10k *ar = file->private_data;
char buf[32];
size_t buf_size;
- int ret = 0;
+ int ret;
bool val;
buf_size = min(count, (sizeof(buf) - 1));
@@ -2139,8 +2145,10 @@ static ssize_t ath10k_write_btcoex(struct file *file,
goto exit;
}
- if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val))
+ if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) {
+ ret = count;
goto exit;
+ }
if (val)
set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
@@ -2179,6 +2187,75 @@ static const struct file_operations fops_btcoex = {
.open = simple_open
};
+static ssize_t ath10k_write_peer_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t buf_size;
+ int ret;
+ bool val;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) {
+ ret = count;
+ goto exit;
+ }
+
+ if (val)
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+ else
+ clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+ ath10k_info(ar, "restarting firmware due to Peer stats change");
+
+ queue_work(ar->workqueue, &ar->restart_work);
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_stats = {
+ .read = ath10k_read_peer_stats,
+ .write = ath10k_write_peer_stats,
+ .open = simple_open
+};
+
static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2196,23 +2273,28 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
len += scnprintf(buf + len, buf_len - len,
"firmware-N.bin\t\t%08x\n",
- crc32_le(0, ar->firmware->data, ar->firmware->size));
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data,
+ ar->normal_mode_fw.fw_file.firmware->size));
len += scnprintf(buf + len, buf_len - len,
"athwlan\t\t\t%08x\n",
- crc32_le(0, ar->firmware_data, ar->firmware_len));
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data,
+ ar->normal_mode_fw.fw_file.firmware_len));
len += scnprintf(buf + len, buf_len - len,
"otp\t\t\t%08x\n",
- crc32_le(0, ar->otp_data, ar->otp_len));
+ crc32_le(0, ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len));
len += scnprintf(buf + len, buf_len - len,
"codeswap\t\t%08x\n",
- crc32_le(0, ar->swap.firmware_codeswap_data,
- ar->swap.firmware_codeswap_len));
+ crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data,
+ ar->normal_mode_fw.fw_file.codeswap_len));
len += scnprintf(buf + len, buf_len - len,
"board-N.bin\t\t%08x\n",
- crc32_le(0, ar->board->data, ar->board->size));
+ crc32_le(0, ar->normal_mode_fw.board->data,
+ ar->normal_mode_fw.board->size));
len += scnprintf(buf + len, buf_len - len,
"board\t\t\t%08x\n",
- crc32_le(0, ar->board_data, ar->board_len));
+ crc32_le(0, ar->normal_mode_fw.board_data,
+ ar->normal_mode_fw.board_len));
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -2342,6 +2424,11 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_btcoex);
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_peer_stats);
+
debugfs_create_file("fw_checksums", S_IRUSR,
ar->debug.debugfs_phy, ar, &fops_fw_checksums);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 6206edd7c..75c89e362 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -57,7 +57,7 @@ enum ath10k_dbg_aggr_mode {
};
/* FIXME: How to calculate the buffer size sanely? */
-#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
extern unsigned int ath10k_debug_mask;
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e70aa38e6..cc827185d 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -297,10 +297,10 @@ struct ath10k_htc_svc_conn_resp {
#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
#define ATH10K_HTC_MAX_LEN 4096
#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
-#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
sizeof(struct ath10k_htc_hdr))
-#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
struct ath10k_htc_ep {
struct ath10k_htc *htc;
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 7561f22f1..130cd9502 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -149,7 +149,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
- conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@@ -183,7 +183,7 @@ int ath10k_htt_init(struct ath10k *ar)
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
- switch (ar->htt.op_version) {
+ switch (ar->running_fw->fw_file.htt_op_version) {
case ATH10K_FW_HTT_OP_VERSION_10_4:
ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
@@ -208,7 +208,7 @@ int ath10k_htt_init(struct ath10k *ar)
return 0;
}
-#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 13391ea44..911c535d0 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <linux/hashtable.h>
+#include <linux/kfifo.h>
#include <net/mac80211.h>
#include "htc.h"
@@ -1461,15 +1462,23 @@ struct htt_tx_mode_switch_ind {
struct htt_tx_mode_switch_record records[0];
} __packed;
+struct htt_channel_change {
+ u8 pad[3];
+ __le32 freq;
+ __le32 center_freq1;
+ __le32 center_freq2;
+ __le32 phymode;
+} __packed;
+
union htt_rx_pn_t {
/* WEP: 24-bit PN */
u32 pn24;
/* TKIP or CCMP: 48-bit PN */
- u_int64_t pn48;
+ u64 pn48;
/* WAPI: 128-bit PN */
- u_int64_t pn128[2];
+ u64 pn128[2];
};
struct htt_cmd {
@@ -1511,16 +1520,22 @@ struct htt_resp {
struct htt_tx_fetch_ind tx_fetch_ind;
struct htt_tx_fetch_confirm tx_fetch_confirm;
struct htt_tx_mode_switch_ind tx_mode_switch_ind;
+ struct htt_channel_change chan_change;
};
} __packed;
/*** host side structures follow ***/
struct htt_tx_done {
- u32 msdu_id;
- bool discard;
- bool no_ack;
- bool success;
+ u16 msdu_id;
+ u16 status;
+};
+
+enum htt_tx_compl_state {
+ HTT_TX_COMPL_STATE_NONE,
+ HTT_TX_COMPL_STATE_ACK,
+ HTT_TX_COMPL_STATE_NOACK,
+ HTT_TX_COMPL_STATE_DISCARD,
};
struct htt_peer_map_event {
@@ -1547,7 +1562,6 @@ struct ath10k_htt {
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
- enum ath10k_fw_htt_op_version op_version;
u8 max_num_amsdu;
u8 max_num_ampdu;
@@ -1641,17 +1655,20 @@ struct ath10k_htt {
struct idr pending_tx;
wait_queue_head_t empty_tx_wq;
+ /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
+ DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
+
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
- struct tasklet_struct rx_replenish_task;
+ atomic_t num_mpdus_ready;
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */
struct tasklet_struct txrx_compl_task;
- struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
+ struct sk_buff_head tx_fetch_ind_q;
/* rx_status template */
struct ieee80211_rx_status rx_status;
@@ -1667,10 +1684,13 @@ struct ath10k_htt {
} txbuf;
struct {
+ bool enabled;
struct htt_q_state *vaddr;
dma_addr_t paddr;
+ u16 num_push_allowed;
u16 num_peers;
u16 num_tids;
+ enum htt_tx_mode_switch_mode mode;
enum htt_q_depth_type type;
} tx_q_state;
};
@@ -1715,7 +1735,7 @@ struct htt_rx_desc {
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely. */
-#define ATH10K_HTT_MAX_NUM_REFILL 16
+#define ATH10K_HTT_MAX_NUM_REFILL 100
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
@@ -1743,7 +1763,8 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar);
void ath10k_htt_rx_free(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
@@ -1752,8 +1773,23 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records);
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_sync(struct ath10k *ar);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp);
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index ae9b686a4..813cdd262 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -31,6 +31,8 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
+#define HTT_RX_RING_REFILL_RESCHED_MS 5
+
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
@@ -192,7 +194,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
} else if (num_deficit > 0) {
- tasklet_schedule(&htt->rx_replenish_task);
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
}
spin_unlock_bh(&htt->rx_ring.lock);
}
@@ -223,12 +226,11 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
- tasklet_kill(&htt->rx_replenish_task);
tasklet_kill(&htt->txrx_compl_task);
- skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
+ skb_queue_purge(&htt->tx_fetch_ind_q);
ath10k_htt_rx_ring_free(htt);
@@ -281,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
- u8 **fw_desc, int *fw_desc_len,
struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
@@ -323,48 +324,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return -EIO;
}
- /*
- * Copy the FW rx descriptor for this MSDU from the rx
- * indication message into the MSDU's netbuf. HL uses the
- * same rx indication message definition as LL, and simply
- * appends new info (fields from the HW rx desc, and the
- * MSDU payload itself). So, the offset into the rx
- * indication message only has to account for the standard
- * offset of the per-MSDU FW rx desc info within the
- * message, and how many bytes of the per-MSDU FW rx desc
- * info have already been consumed. (And the endianness of
- * the host, since for a big-endian host, the rx ind
- * message contents, including the per-MSDU rx desc bytes,
- * were byteswapped during upload.)
- */
- if (*fw_desc_len > 0) {
- rx_desc->fw_desc.info0 = **fw_desc;
- /*
- * The target is expected to only provide the basic
- * per-MSDU rx descriptors. Just to be sure, verify
- * that the target has not attached extension data
- * (e.g. LRO flow ID).
- */
-
- /* or more, if there's extension data */
- (*fw_desc)++;
- (*fw_desc_len)--;
- } else {
- /*
- * When an oversized AMSDU happened, FW will lost
- * some of MSDU status - in this case, the FW
- * descriptors provided will be less than the
- * actual MSDUs inside this MPDU. Mark the FW
- * descriptors so that it will still deliver to
- * upper stack, if no CRC error for this MPDU.
- *
- * FIX THIS - the FW descriptors are actually for
- * MSDUs in the end of this A-MSDU instead of the
- * beginning.
- */
- rx_desc->fw_desc.info0 = 0;
- }
-
msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
@@ -423,13 +382,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
-static void ath10k_htt_rx_replenish_task(unsigned long ptr)
-{
- struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
-
- ath10k_htt_rx_msdu_buff_replenish(htt);
-}
-
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
u32 paddr)
{
@@ -563,12 +515,10 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = 0;
hash_init(htt->rx_ring.skb_table);
- tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
- (unsigned long)htt);
-
- skb_queue_head_init(&htt->tx_compl_q);
skb_queue_head_init(&htt->rx_compl_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q);
+ skb_queue_head_init(&htt->tx_fetch_ind_q);
+ atomic_set(&htt->num_mpdus_ready, 0);
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt);
@@ -860,6 +810,8 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
+ if (!ch)
+ ch = ar->tgt_oper_chan;
spin_unlock_bh(&ar->data_lock);
if (!ch)
@@ -979,7 +931,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -1014,7 +966,7 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
int len = ieee80211_hdrlen(hdr->frame_control);
if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
- ar->fw_features))
+ ar->running_fw->fw_file.fw_features))
len = round_up(len, 4);
return len;
@@ -1076,20 +1028,25 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
hdr = (void *)msdu->data;
/* Tail */
- skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ if (status->flag & RX_FLAG_IV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_tail_len(ar, enctype));
/* MMIC */
- if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
skb_trim(msdu, msdu->len - 8);
/* Head */
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- memmove((void *)msdu->data + crypto_len,
- (void *)msdu->data, hdr_len);
- skb_pull(msdu, crypto_len);
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
}
static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
@@ -1343,6 +1300,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
bool has_tkip_err;
bool has_peer_idx_invalid;
bool is_decrypted;
+ bool is_mgmt;
u32 attention;
if (skb_queue_empty(amsdu))
@@ -1351,6 +1309,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
first = skb_peek(amsdu);
rxd = (void *)first->data - sizeof(*rxd);
+ is_mgmt = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
@@ -1392,6 +1353,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
+ RX_FLAG_ONLY_MONITOR |
RX_FLAG_MMIC_STRIPPED);
if (has_fcs_err)
@@ -1400,10 +1362,21 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
if (has_tkip_err)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (is_decrypted)
- status->flag |= RX_FLAG_DECRYPTED |
- RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
+ /* Firmware reports all necessary management frames via WMI already.
+ * They are not reported to monitor interfaces at all so pass the ones
+ * coming via HTT to monitor interfaces instead. This simplifies
+ * matters a lot.
+ */
+ if (is_mgmt)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (is_decrypted) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (likely(!is_mgmt))
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+}
skb_queue_walk(amsdu, msdu) {
ath10k_htt_rx_h_csum_offload(msdu);
@@ -1416,6 +1389,8 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
*/
if (!is_decrypted)
continue;
+ if (is_mgmt)
+ continue;
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -1516,14 +1491,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
- struct sk_buff *msdu;
- struct htt_rx_desc *rxd;
- bool is_mgmt;
- bool has_fcs_err;
-
- msdu = skb_peek(amsdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
-
/* FIXME: It might be a good idea to do some fuzzy-testing to drop
* invalid/dangerous frames.
*/
@@ -1533,23 +1500,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
- is_mgmt = !!(rxd->attention.flags &
- __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
- has_fcs_err = !!(rxd->attention.flags &
- __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
-
- /* Management frames are handled via WMI events. The pros of such
- * approach is that channel is explicitly provided in WMI events
- * whereas HTT doesn't provide channel information for Rxed frames.
- *
- * However some firmware revisions don't report corrupted frames via
- * WMI so don't drop them.
- */
- if (is_mgmt && !has_fcs_err) {
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
- return false;
- }
-
if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
return false;
@@ -1571,25 +1521,49 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
__skb_queue_purge(amsdu);
}
-static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
- struct htt_rx_indication *rx)
+static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- struct ieee80211_rx_status *rx_status = &htt->rx_status;
- struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ static struct ieee80211_rx_status rx_status;
struct sk_buff_head amsdu;
- int num_mpdu_ranges;
- int fw_desc_len;
- u8 *fw_desc;
- int i, ret, mpdu_count = 0;
+ int ret;
- lockdep_assert_held(&htt->rx_ring.lock);
+ __skb_queue_head_init(&amsdu);
- if (htt->rx_confused)
- return;
+ spin_lock_bh(&htt->rx_ring.lock);
+ if (htt->rx_confused) {
+ spin_unlock_bh(&htt->rx_ring.lock);
+ return -EIO;
+ }
+ ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
+ spin_unlock_bh(&htt->rx_ring.lock);
- fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
- fw_desc = (u8 *)&rx->fw_desc;
+ if (ret < 0) {
+ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+ __skb_queue_purge(&amsdu);
+ /* FIXME: It's probably a good idea to reboot the
+ * device instead of leaving it inoperable.
+ */
+ htt->rx_confused = true;
+ return ret;
+ }
+
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+ ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+
+ return 0;
+}
+
+static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
+ struct htt_rx_indication *rx)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ int num_mpdu_ranges;
+ int i, mpdu_count = 0;
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
@@ -1603,80 +1577,19 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
for (i = 0; i < num_mpdu_ranges; i++)
mpdu_count += mpdu_ranges[i].mpdu_count;
- while (mpdu_count--) {
- __skb_queue_head_init(&amsdu);
- ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
- &fw_desc_len, &amsdu);
- if (ret < 0) {
- ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
- __skb_queue_purge(&amsdu);
- /* FIXME: It's probably a good idea to reboot the
- * device instead of leaving it inoperable.
- */
- htt->rx_confused = true;
- break;
- }
+ atomic_add(mpdu_count, &htt->num_mpdus_ready);
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
- ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
- ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
- }
-
- tasklet_schedule(&htt->rx_replenish_task);
+ tasklet_schedule(&htt->txrx_compl_task);
}
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
- struct htt_rx_fragment_indication *frag)
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
{
- struct ath10k *ar = htt->ar;
- struct ieee80211_rx_status *rx_status = &htt->rx_status;
- struct sk_buff_head amsdu;
- int ret;
- u8 *fw_desc;
- int fw_desc_len;
-
- fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
- fw_desc = (u8 *)frag->fw_msdu_rx_desc;
-
- __skb_queue_head_init(&amsdu);
+ atomic_inc(&htt->num_mpdus_ready);
- spin_lock_bh(&htt->rx_ring.lock);
- ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &amsdu);
- spin_unlock_bh(&htt->rx_ring.lock);
-
- tasklet_schedule(&htt->rx_replenish_task);
-
- ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
-
- if (ret) {
- ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
- ret);
- __skb_queue_purge(&amsdu);
- return;
- }
-
- if (skb_queue_len(&amsdu) != 1) {
- ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
- __skb_queue_purge(&amsdu);
- return;
- }
-
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
- ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-
- if (fw_desc_len > 0) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "expecting more fragmented rx in one indication %d\n",
- fw_desc_len);
- }
+ tasklet_schedule(&htt->txrx_compl_task);
}
-static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -1688,19 +1601,19 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
- tx_done.no_ack = true;
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
break;
case HTT_DATA_TX_STATUS_OK:
- tx_done.success = true;
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
default:
ath10k_warn(ar, "unhandled tx completion status %d\n", status);
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
}
@@ -1710,7 +1623,20 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_unref(htt, &tx_done);
+
+ /* kfifo_put: In practice firmware shouldn't fire off per-CE
+ * interrupt and main interrupt (MSI/-X range case) for the same
+ * HTC service so it should be safe to use kfifo_put w/o lock.
+ *
+ * From kfifo_put() documentation:
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+ if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+ ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+ tx_done.msdu_id, tx_done.status);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ }
}
}
@@ -1978,11 +1904,323 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
return;
}
}
+}
+
+static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
+ const __le32 *resp_ids,
+ int num_resp_ids)
+{
+ int i;
+ u32 resp_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
+ num_resp_ids);
+
+ for (i = 0; i < num_resp_ids; i++) {
+ resp_id = le32_to_cpu(resp_ids[i]);
- tasklet_schedule(&htt->rx_replenish_task);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
+ resp_id);
+
+ /* TODO: free resp_id */
+ }
}
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_fetch_record *record;
+ size_t len;
+ size_t max_num_bytes;
+ size_t max_num_msdus;
+ size_t num_bytes;
+ size_t num_msdus;
+ const __le32 *resp_ids;
+ u16 num_records;
+ u16 num_resp_ids;
+ u16 peer_id;
+ u8 tid;
+ int ret;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
+ return;
+ }
+
+ num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
+
+ len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
+ len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
+ num_records, num_resp_ids,
+ le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
+
+ if (!ar->htt.tx_q_state.enabled) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
+ return;
+ }
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
+ return;
+ }
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_fetch_ind.records[i];
+ peer_id = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_PEER_ID);
+ tid = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_TID);
+ max_num_msdus = le16_to_cpu(record->num_msdus);
+ max_num_bytes = le32_to_cpu(record->num_bytes);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
+ i, peer_id, tid, max_num_msdus, max_num_bytes);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ num_msdus = 0;
+ num_bytes = 0;
+
+ while (num_msdus < max_num_msdus &&
+ num_bytes < max_num_bytes) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+
+ num_msdus++;
+ num_bytes += ret;
+ }
+
+ record->num_msdus = cpu_to_le16(num_msdus);
+ record->num_bytes = cpu_to_le32(num_bytes);
+
+ ath10k_htt_tx_txq_recalc(hw, txq);
+ }
+
+ rcu_read_unlock();
+
+ resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
+
+ ret = ath10k_htt_tx_fetch_resp(ar,
+ resp->tx_fetch_ind.token,
+ resp->tx_fetch_ind.fetch_seq_num,
+ resp->tx_fetch_ind.records,
+ num_records);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
+ le32_to_cpu(resp->tx_fetch_ind.token), ret);
+ /* FIXME: request fw restart */
+ }
+
+ ath10k_htt_tx_txq_sync(ar);
+}
+
+static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ size_t len;
+ int num_resp_ids;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
+ return;
+ }
+
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
+ len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
+ return;
+ }
+
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
+ resp->tx_fetch_confirm.resp_ids,
+ num_resp_ids);
+}
+
+static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ const struct htt_tx_mode_switch_record *record;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ size_t len;
+ size_t num_records;
+ enum htt_tx_mode_switch_mode mode;
+ bool enable;
+ u16 info0;
+ u16 info1;
+ u16 threshold;
+ u16 peer_id;
+ u8 tid;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
+ return;
+ }
+
+ info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
+ info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
+
+ enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
+ num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+ mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
+ threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
+ info0, info1, enable, num_records, mode, threshold);
+
+ len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
+ return;
+ }
+
+ switch (mode) {
+ case HTT_TX_MODE_SWITCH_PUSH:
+ case HTT_TX_MODE_SWITCH_PUSH_PULL:
+ break;
+ default:
+ ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
+ mode);
+ return;
+ }
+
+ if (!enable)
+ return;
+
+ ar->htt.tx_q_state.enabled = enable;
+ ar->htt.tx_q_state.mode = mode;
+ ar->htt.tx_q_state.num_push_allowed = threshold;
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_mode_switch_ind.records[i];
+ info0 = le16_to_cpu(record->info0);
+ peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
+ tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq = (void *)txq->drv_priv;
+ artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ rcu_read_unlock();
+
+ ath10k_mac_tx_push_pending(ar);
+}
+
+static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
+{
+ enum nl80211_band band;
+
+ switch (phy_mode) {
+ case MODE_11A:
+ case MODE_11NA_HT20:
+ case MODE_11NA_HT40:
+ case MODE_11AC_VHT20:
+ case MODE_11AC_VHT40:
+ case MODE_11AC_VHT80:
+ band = NL80211_BAND_5GHZ;
+ break;
+ case MODE_11G:
+ case MODE_11B:
+ case MODE_11GONLY:
+ case MODE_11NG_HT20:
+ case MODE_11NG_HT40:
+ case MODE_11AC_VHT20_2G:
+ case MODE_11AC_VHT40_2G:
+ case MODE_11AC_VHT80_2G:
+ default:
+ band = NL80211_BAND_2GHZ;
+ }
+
+ return band;
+}
+
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ bool release;
+
+ release = ath10k_htt_t2h_msg_handler(ar, skb);
+
+ /* Free the indication buffer */
+ if (release)
+ dev_kfree_skb_any(skb);
+}
+
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
@@ -1998,8 +2236,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
- dev_kfree_skb_any(skb);
- return;
+ return true;
}
type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
@@ -2011,9 +2248,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- skb_queue_tail(&htt->rx_compl_q, skb);
- tasklet_schedule(&htt->txrx_compl_task);
- return;
+ ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+ break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
.vdev_id = resp->peer_map.vdev_id,
@@ -2034,28 +2270,33 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
struct htt_tx_done tx_done = {};
int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
- tx_done.msdu_id =
- __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+ tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
- tx_done.success = true;
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
break;
case HTT_MGMT_TX_STATUS_RETRY:
- tx_done.no_ack = true;
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
break;
case HTT_MGMT_TX_STATUS_DROP:
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
}
- ath10k_txrx_tx_unref(htt, &tx_done);
+ status = ath10k_txrx_tx_unref(htt, &tx_done);
+ if (!status) {
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+ }
+ ath10k_mac_tx_push_pending(ar);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
- skb_queue_tail(&htt->tx_compl_q, skb);
+ ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
tasklet_schedule(&htt->txrx_compl_task);
- return;
+ break;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
@@ -2071,7 +2312,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
- ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+ ath10k_htt_rx_frag_handler(htt);
break;
}
case HTT_T2H_MSG_TYPE_TEST:
@@ -2111,18 +2352,39 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
tasklet_schedule(&htt->txrx_compl_task);
- return;
+ return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
break;
- case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
+ case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
+ u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
+ u32 freq = __le32_to_cpu(resp->chan_change.freq);
+
+ ar->tgt_oper_chan =
+ __ieee80211_get_channel(ar->hw->wiphy, freq);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt chan change freq %u phymode %s\n",
+ freq, ath10k_wmi_phymode_str(phymode));
break;
+ }
case HTT_T2H_MSG_TYPE_AGGR_CONF:
break;
- case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+ case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
+ struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
+
+ if (!tx_fetch_ind) {
+ ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
+ break;
+ }
+ skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
+ tasklet_schedule(&htt->txrx_compl_task);
+ break;
+ }
case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+ ath10k_htt_rx_tx_fetch_confirm(ar, skb);
+ break;
case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
- /* TODO: Implement pull-push logic */
+ ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
break;
case HTT_T2H_MSG_TYPE_EN_STATS:
default:
@@ -2132,9 +2394,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb->data, skb->len);
break;
};
-
- /* Free the indication buffer */
- dev_kfree_skb_any(skb);
+ return true;
}
EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
@@ -2150,40 +2410,47 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
struct ath10k *ar = htt->ar;
- struct sk_buff_head tx_q;
- struct sk_buff_head rx_q;
+ struct htt_tx_done tx_done = {};
struct sk_buff_head rx_ind_q;
- struct htt_resp *resp;
+ struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
unsigned long flags;
+ int num_mpdus;
- __skb_queue_head_init(&tx_q);
- __skb_queue_head_init(&rx_q);
__skb_queue_head_init(&rx_ind_q);
-
- spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
- skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
- spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
-
- spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
- skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
- spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
+ __skb_queue_head_init(&tx_ind_q);
spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
- while ((skb = __skb_dequeue(&tx_q))) {
- ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+ spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+ skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+ spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+
+ /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
+ * From kfifo_get() documentation:
+ * Note that with only one concurrent reader and one concurrent writer,
+ * you don't need extra locking to use these macro.
+ */
+ while (kfifo_get(&htt->txdone_fifo, &tx_done))
+ ath10k_txrx_tx_unref(htt, &tx_done);
+
+ while ((skb = __skb_dequeue(&tx_ind_q))) {
+ ath10k_htt_rx_tx_fetch_ind(ar, skb);
dev_kfree_skb_any(skb);
}
- while ((skb = __skb_dequeue(&rx_q))) {
- resp = (struct htt_resp *)skb->data;
- spin_lock_bh(&htt->rx_ring.lock);
- ath10k_htt_rx_handler(htt, &resp->rx_ind);
- spin_unlock_bh(&htt->rx_ring.lock);
- dev_kfree_skb_any(skb);
+ ath10k_mac_tx_push_pending(ar);
+
+ num_mpdus = atomic_read(&htt->num_mpdus_ready);
+
+ while (num_mpdus) {
+ if (ath10k_htt_rx_handle_amsdu(htt))
+ break;
+
+ num_mpdus--;
+ atomic_dec(&htt->num_mpdus_ready);
}
while ((skb = __skb_dequeue(&rx_ind_q))) {
@@ -2192,4 +2459,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
spin_unlock_bh(&htt->rx_ring.lock);
dev_kfree_skb_any(skb);
}
+
+ ath10k_htt_rx_msdu_buff_replenish(htt);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 95acb727c..6269c610b 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -22,53 +22,183 @@
#include "txrx.h"
#include "debug.h"
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
{
- if (limit_mgmt_desc)
- htt->num_pending_mgmt_tx--;
+ int exp;
+ int factor;
+
+ exp = 0;
+ factor = count >> 7;
+
+ while (factor >= 64 && exp < 4) {
+ factor >>= 3;
+ exp++;
+ }
+
+ if (exp == 4)
+ return 0xff;
+
+ if (count > 0)
+ factor = max(1, factor);
+
+ return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+ SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
+}
+
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
+ struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+ unsigned long frame_cnt;
+ unsigned long byte_cnt;
+ int idx;
+ u32 bit;
+ u16 peer_id;
+ u8 tid;
+ u8 count;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ if (txq->sta)
+ peer_id = arsta->peer_id;
+ else
+ peer_id = arvif->peer_id;
+
+ tid = txq->tid;
+ bit = BIT(peer_id % 32);
+ idx = peer_id / 32;
+
+ ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+ peer_id, tid);
+ return;
+ }
+
+ ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+ peer_id, tid, count);
+}
+
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ u32 seq;
+ size_t size;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+ seq++;
+ ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+ seq);
+
+ size = sizeof(*ar->htt.tx_q_state.vaddr);
+ dma_sync_single_for_device(ar->dev,
+ ar->htt.tx_q_state.paddr,
+ size,
+ DMA_TO_DEVICE);
+}
+
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
}
-static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
- bool limit_mgmt_desc)
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
{
- spin_lock_bh(&htt->tx_lock);
- __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
- spin_unlock_bh(&htt->tx_lock);
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (htt->num_pending_tx >= htt->max_num_pending_tx)
+ return -EBUSY;
+
+ htt->num_pending_tx++;
+ if (htt->num_pending_tx == htt->max_num_pending_tx)
+ ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+ return 0;
}
-static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
- bool limit_mgmt_desc, bool is_probe_resp)
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp)
{
struct ath10k *ar = htt->ar;
- int ret = 0;
- spin_lock_bh(&htt->tx_lock);
+ lockdep_assert_held(&htt->tx_lock);
- if (htt->num_pending_tx >= htt->max_num_pending_tx) {
- ret = -EBUSY;
- goto exit;
- }
+ if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
+ return 0;
- if (limit_mgmt_desc) {
- if (is_probe_resp && (htt->num_pending_mgmt_tx >
- ar->hw_params.max_probe_resp_desc_thres)) {
- ret = -EBUSY;
- goto exit;
- }
- htt->num_pending_mgmt_tx++;
- }
+ if (is_presp &&
+ ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+ return -EBUSY;
- htt->num_pending_tx++;
- if (htt->num_pending_tx == htt->max_num_pending_tx)
- ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+ htt->num_pending_mgmt_tx++;
-exit:
- spin_unlock_bh(&htt->tx_lock);
- return ret;
+ return 0;
+}
+
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (!htt->ar->hw_params.max_probe_resp_desc_thres)
+ return;
+
+ htt->num_pending_mgmt_tx--;
}
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
@@ -137,7 +267,8 @@ static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
size_t size;
- if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
return;
size = sizeof(*htt->tx_q_state.vaddr);
@@ -152,7 +283,8 @@ static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
size_t size;
int ret;
- if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
return 0;
htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
@@ -209,8 +341,18 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
goto free_frag_desc;
}
+ size = roundup_pow_of_two(htt->max_num_pending_tx);
+ ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
+ goto free_txq;
+ }
+
return 0;
+free_txq:
+ ath10k_htt_tx_free_txq(htt);
+
free_frag_desc:
ath10k_htt_tx_free_cont_frag_desc(htt);
@@ -234,8 +376,8 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
- tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
ath10k_txrx_tx_unref(htt, &tx_done);
@@ -258,6 +400,8 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
ath10k_htt_tx_free_txq(htt);
ath10k_htt_tx_free_cont_frag_desc(htt);
+ WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+ kfifo_free(&htt->txdone_fifo);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -371,7 +515,8 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
info |= SM(htt->tx_q_state.type,
HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
- if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
cfg = &cmd->frag_desc_bank_cfg;
@@ -535,6 +680,55 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
return 0;
}
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records)
+{
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ const u16 resp_id = 0;
+ int len = 0;
+ int ret;
+
+ /* Response IDs are echo-ed back only for host driver convienence
+ * purposes. They aren't used for anything in the driver yet so use 0.
+ */
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->tx_fetch_resp);
+ len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
+ cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
+ cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
+ cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
+ cmd->tx_fetch_resp.token = token;
+
+ memcpy(cmd->tx_fetch_resp.records, records,
+ sizeof(records[0]) * num_records);
+
+ ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
+ goto err_free_skb;
+ }
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -576,20 +770,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int msdu_id = -1;
int res;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
- bool limit_mgmt_desc = false;
- bool is_probe_resp = false;
-
- if (ar->hw_params.max_probe_resp_desc_thres) {
- limit_mgmt_desc = true;
-
- if (ieee80211_is_probe_resp(hdr->frame_control))
- is_probe_resp = true;
- }
-
- res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-
- if (res)
- goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
@@ -598,7 +778,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
- goto err_tx_dec;
+ goto err;
msdu_id = res;
@@ -649,8 +829,6 @@ err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
- ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
return res;
}
@@ -677,26 +855,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
u32 frags_paddr = 0;
u32 txbuf_paddr;
struct htt_msdu_ext_desc *ext_desc = NULL;
- bool limit_mgmt_desc = false;
- bool is_probe_resp = false;
-
- if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
- ar->hw_params.max_probe_resp_desc_thres) {
- limit_mgmt_desc = true;
-
- if (ieee80211_is_probe_resp(hdr->frame_control))
- is_probe_resp = true;
- }
-
- res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
- if (res)
- goto err;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
- goto err_tx_dec;
+ goto err;
msdu_id = res;
@@ -862,11 +1026,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
- spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
- ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
return res;
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 3e7d4806a..1f9774f7a 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -35,8 +35,6 @@
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
-#define QCA988X_HW_2_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA988X_HW_2_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
@@ -76,14 +74,10 @@ enum qca9377_chip_id_rev {
};
#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
-#define QCA6174_HW_2_1_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA6174_HW_2_1_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA6174_HW_2_1_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
-#define QCA6174_HW_3_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA6174_HW_3_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA6174_HW_3_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
@@ -94,23 +88,17 @@ enum qca9377_chip_id_rev {
#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
-#define QCA99X0_HW_2_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA99X0_HW_2_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA99X0_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9377 1.0 definitions */
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
-#define QCA9377_HW_1_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA9377_HW_1_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA9377_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA4019 1.0 definitions */
#define QCA4019_HW_1_0_DEV_VERSION 0x01000000
#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0"
-#define QCA4019_HW_1_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA4019_HW_1_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA4019_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
@@ -134,8 +122,6 @@ enum qca9377_chip_id_rev {
#define REG_DUMP_COUNT_QCA988X 60
-#define QCA988X_CAL_DATA_LEN 2116
-
struct ath10k_fw_ie {
__le32 id;
__le32 len;
@@ -431,10 +417,14 @@ enum ath10k_hw_4addr_pad {
#define TARGET_10_4_ACTIVE_PEERS 0
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35
#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
#define TARGET_10_4_NUM_PEER_KEYS 2
#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500
#define TARGET_10_4_AST_SKID_LIMIT 32
/* 100 ms for video, best-effort, and background */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e11160ab4..4040f9413 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -157,6 +157,26 @@ ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
return 1;
}
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
+{
+ enum wmi_host_platform_type platform_type;
+ int ret;
+
+ if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
+ platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+ else
+ platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
+
+ ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
+
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/**********/
/* Crypto */
/**********/
@@ -449,10 +469,10 @@ static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(peer, &ar->peers, list) {
- if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+ if (ether_addr_equal(peer->addr, arvif->vif->addr))
continue;
- if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+ if (ether_addr_equal(peer->addr, arvif->bssid))
continue;
if (peer->keys[key->keyidx] == key)
@@ -482,7 +502,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
enum wmi_phy_mode phymode = MODE_UNKNOWN;
switch (chandef->chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
@@ -505,7 +525,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
break;
}
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
phymode = MODE_11A;
@@ -618,10 +638,15 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
*def = &conf->def;
}
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+static int ath10k_peer_create(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 vdev_id,
+ const u8 *addr,
enum wmi_peer_type peer_type)
{
struct ath10k_vif *arvif;
+ struct ath10k_peer *peer;
int num_peers = 0;
int ret;
@@ -650,6 +675,22 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
return ret;
}
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, vdev_id, addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
+ addr, vdev_id);
+ ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ return -ENOENT;
+ }
+
+ peer->vif = vif;
+ peer->sta = sta;
+
+ spin_unlock_bh(&ar->data_lock);
+
ar->num_peers++;
return 0;
@@ -731,6 +772,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
+ int peer_id;
lockdep_assert_held(&ar->conf_mutex);
@@ -742,6 +784,11 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
@@ -1725,7 +1772,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
!test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
arvif->vdev_id);
enable_ps = false;
@@ -2013,7 +2060,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
}
if (sta->mfp &&
- test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, ar->fw_features)) {
+ test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
arg->peer_flags |= ar->wmi.peer_flags->pmf;
}
}
@@ -2028,7 +2076,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
- enum ieee80211_band band;
+ enum nl80211_band band;
u32 ratemask;
u8 rate;
int i;
@@ -2088,7 +2136,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
int i, n;
@@ -2312,7 +2360,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u16 *vht_mcs_mask;
u8 ampdu_factor;
@@ -2330,7 +2378,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_flags |= ar->wmi.peer_flags->vht;
- if (def.chan->band == IEEE80211_BAND_2GHZ)
+ if (def.chan->band == NL80211_BAND_2GHZ)
arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
arg->peer_vht_caps = vht_cap->cap;
@@ -2399,7 +2447,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
@@ -2410,7 +2458,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
@@ -2423,7 +2471,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (sta->vht_cap.vht_supported &&
!ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -2443,7 +2491,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
}
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
/*
* Check VHT first.
*/
@@ -2821,7 +2869,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_supported_band **bands;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_channel *channel;
struct wmi_scan_chan_list_arg arg = {0};
struct wmi_channel_arg *ch;
@@ -2833,7 +2881,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
bands = hw->wiphy->bands;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
@@ -2852,7 +2900,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
return -ENOMEM;
ch = arg.channels;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
@@ -2890,7 +2938,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
/* FIXME: why use only legacy modes, why not any
* HT/VHT modes? Would that even make any
* difference? */
- if (channel->band == IEEE80211_BAND_2GHZ)
+ if (channel->band == NL80211_BAND_2GHZ)
ch->mode = MODE_11G;
else
ch->mode = MODE_11A;
@@ -2994,6 +3042,13 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
+enum ath10k_mac_tx_path {
+ ATH10K_MAC_TX_HTT,
+ ATH10K_MAC_TX_HTT_MGMT,
+ ATH10K_MAC_TX_WMI_MGMT,
+ ATH10K_MAC_TX_UNKNOWN,
+};
+
void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
{
lockdep_assert_held(&ar->htt.tx_lock);
@@ -3153,7 +3208,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
*/
if (ar->htt.target_version_major < 3 &&
(ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
- !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+ !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features))
return ATH10K_HW_TXRX_MGMT;
/* Workaround:
@@ -3271,6 +3327,28 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
}
}
+static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+ cb->flags = 0;
+ if (!ath10k_tx_h_use_hwcrypto(vif, skb))
+ cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_MGMT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_QOS;
+
+ cb->vif = vif;
+ cb->txq = txq;
+}
+
bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
{
/* FIXME: Not really sure since when the behaviour changed. At some
@@ -3281,7 +3359,7 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
*/
return (ar->htt.target_version_major >= 3 &&
ar->htt.target_version_minor >= 4 &&
- ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
+ ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
}
static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
@@ -3306,26 +3384,50 @@ unlock:
return ret;
}
-static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode,
- struct sk_buff *skb)
+static enum ath10k_mac_tx_path
+ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
+ struct sk_buff *skb,
+ enum ath10k_hw_txrx_mode txmode)
{
- struct ath10k_htt *htt = &ar->htt;
- int ret = 0;
-
switch (txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
case ATH10K_HW_TXRX_ETHERNET:
- ret = ath10k_htt_tx(htt, txmode, skb);
- break;
+ return ATH10K_MAC_TX_HTT;
case ATH10K_HW_TXRX_MGMT:
if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features))
- ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ ar->running_fw->fw_file.fw_features))
+ return ATH10K_MAC_TX_WMI_MGMT;
else if (ar->htt.target_version_major >= 3)
- ret = ath10k_htt_tx(htt, txmode, skb);
+ return ATH10K_MAC_TX_HTT;
else
- ret = ath10k_htt_mgmt_tx(htt, skb);
+ return ATH10K_MAC_TX_HTT_MGMT;
+ }
+
+ return ATH10K_MAC_TX_UNKNOWN;
+}
+
+static int ath10k_mac_tx_submit(struct ath10k *ar,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ int ret = -EINVAL;
+
+ switch (txpath) {
+ case ATH10K_MAC_TX_HTT:
+ ret = ath10k_htt_tx(htt, txmode, skb);
+ break;
+ case ATH10K_MAC_TX_HTT_MGMT:
+ ret = ath10k_htt_mgmt_tx(htt, skb);
+ break;
+ case ATH10K_MAC_TX_WMI_MGMT:
+ ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ break;
+ case ATH10K_MAC_TX_UNKNOWN:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
break;
}
@@ -3334,6 +3436,64 @@ static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode,
ret);
ieee80211_free_txskb(ar->hw, skb);
}
+
+ return ret;
+}
+
+/* This function consumes the sk_buff regardless of return value as far as
+ * caller is concerned so no freeing is necessary afterwards.
+ */
+static int ath10k_mac_tx(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int ret;
+
+ /* We should disable CCK RATE due to P2P */
+ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_MGMT:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ ath10k_tx_h_nwifi(hw, skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+ ath10k_tx_h_seq_no(vif, skb);
+ break;
+ case ATH10K_HW_TXRX_ETHERNET:
+ ath10k_tx_h_8023(skb);
+ break;
+ case ATH10K_HW_TXRX_RAW:
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return -ENOTSUPP;
+ }
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!ath10k_mac_tx_frm_has_freq(ar)) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+ skb);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return 0;
+ }
+ }
+
+ ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit frame: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
void ath10k_offchan_tx_purge(struct ath10k *ar)
@@ -3354,12 +3514,13 @@ void ath10k_offchan_tx_work(struct work_struct *work)
struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
struct ieee80211_hdr *hdr;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
struct sk_buff *skb;
const u8 *peer_addr;
- enum ath10k_hw_txrx_mode txmode;
int vdev_id;
int ret;
unsigned long time_left;
@@ -3396,7 +3557,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
peer_addr, vdev_id);
if (!peer) {
- ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+ ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
+ peer_addr,
WMI_PEER_TYPE_DEFAULT);
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
@@ -3423,8 +3585,14 @@ void ath10k_offchan_tx_work(struct work_struct *work)
}
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
- ath10k_mac_tx(ar, txmode, skb);
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+ ret);
+ /* not serious */
+ }
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
@@ -3476,6 +3644,175 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
}
+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ if (!txq)
+ return;
+
+ INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *msdu;
+ int msdu_id;
+
+ if (!txq)
+ return;
+
+ spin_lock_bh(&ar->txqs_lock);
+ if (!list_empty(&artxq->list))
+ list_del_init(&artxq->list);
+ spin_unlock_bh(&ar->txqs_lock);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+ cb = ATH10K_SKB_CB(msdu);
+ if (cb->txq == txq)
+ cb->txq = NULL;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ peer = ar->peer_map[peer_id];
+ if (!peer)
+ return NULL;
+
+ if (peer->sta)
+ return peer->sta->txq[tid];
+ else if (peer->vif)
+ return peer->vif->txq;
+ else
+ return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ /* No need to get locks */
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+ return true;
+
+ if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+ return true;
+
+ if (artxq->num_fw_queued < artxq->num_push_allowed)
+ return true;
+
+ return false;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_vif *vif = txq->vif;
+ struct ieee80211_sta *sta = txq->sta;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ struct sk_buff *skb;
+ size_t skb_len;
+ int ret;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_inc_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ if (ret)
+ return ret;
+
+ skb = ieee80211_tx_dequeue(hw, txq);
+ if (!skb) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return -ENOENT;
+ }
+
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+ skb_len = skb->len;
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return ret;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq->num_fw_queued++;
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return skb_len;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ struct ath10k_txq *last;
+ int ret;
+ int max;
+
+ spin_lock_bh(&ar->txqs_lock);
+ rcu_read_lock();
+
+ last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
+ while (!list_empty(&ar->txqs)) {
+ artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ txq = container_of((void *)artxq, struct ieee80211_txq,
+ drv_priv);
+
+ /* Prevent aggressive sta/tid taking over tx queue */
+ max = 16;
+ ret = 0;
+ while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+ }
+
+ list_del_init(&artxq->list);
+ if (ret != -ENOENT)
+ list_add_tail(&artxq->list, &ar->txqs);
+
+ ath10k_htt_tx_txq_update(hw, txq);
+
+ if (artxq == last || (ret < 0 && ret != -ENOENT))
+ break;
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->txqs_lock);
+}
+
/************/
/* Scanning */
/************/
@@ -3531,7 +3868,7 @@ static int ath10k_scan_stop(struct ath10k *ar)
goto out;
}
- ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
ret = -ETIMEDOUT;
@@ -3611,7 +3948,7 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
- ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
if (ret == 0) {
ret = ath10k_scan_stop(ar);
if (ret)
@@ -3638,66 +3975,86 @@ static int ath10k_start_scan(struct ath10k *ar,
/* mac80211 callbacks */
/**********************/
-static void ath10k_tx(struct ieee80211_hw *hw,
- struct ieee80211_tx_control *control,
- struct sk_buff *skb)
+static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct ath10k *ar = hw->priv;
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct ath10k_htt *htt = &ar->htt;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ieee80211_sta *sta = control->sta;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_txq *txq = NULL;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ bool is_htt;
+ bool is_mgmt;
+ bool is_presp;
+ int ret;
- /* We should disable CCK RATE due to P2P */
- if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+ txpath == ATH10K_MAC_TX_HTT_MGMT);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
- skb_cb->flags = 0;
- if (!ath10k_tx_h_use_hwcrypto(vif, skb))
- skb_cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
-
- if (ieee80211_is_mgmt(hdr->frame_control))
- skb_cb->flags |= ATH10K_SKB_F_MGMT;
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
- if (ieee80211_is_data_qos(hdr->frame_control))
- skb_cb->flags |= ATH10K_SKB_F_QOS;
-
- skb_cb->vif = vif;
+ ret = ath10k_htt_tx_inc_pending(htt);
+ if (ret) {
+ ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+ ret);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ }
- switch (txmode) {
- case ATH10K_HW_TXRX_MGMT:
- case ATH10K_HW_TXRX_NATIVE_WIFI:
- ath10k_tx_h_nwifi(hw, skb);
- ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
- ath10k_tx_h_seq_no(vif, skb);
- break;
- case ATH10K_HW_TXRX_ETHERNET:
- ath10k_tx_h_8023(skb);
- break;
- case ATH10K_HW_TXRX_RAW:
- if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
- WARN_ON_ONCE(1);
- ieee80211_free_txskb(hw, skb);
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
+ ret);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
return;
}
+ spin_unlock_bh(&ar->htt.tx_lock);
}
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
- if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
- skb);
-
- skb_queue_tail(&ar->offchan_tx_queue, skb);
- ieee80211_queue_work(hw, &ar->offchan_tx_work);
- return;
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
}
+ return;
}
+}
- ath10k_mac_tx(ar, txmode, skb);
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ spin_lock_bh(&ar->txqs_lock);
+ if (list_empty(&artxq->list))
+ list_add_tail(&artxq->list, &ar->txqs);
+ spin_unlock_bh(&ar->txqs_lock);
+
+ if (ath10k_mac_tx_can_push(hw, txq))
+ tasklet_schedule(&ar->htt.txrx_compl_task);
+
+ ath10k_htt_tx_txq_update(hw, txq);
}
/* Must not be called with conf_mutex held as workers can use that also. */
@@ -3919,14 +4276,11 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
vht_cap = ath10k_create_vht_cap(ar);
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
- band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->ht_cap = ht_cap;
-
- /* Enable the VHT support at 2.4 GHz */
- band->vht_cap = vht_cap;
}
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
- band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->ht_cap = ht_cap;
band->vht_cap = vht_cap;
}
@@ -3989,7 +4343,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
/*
* This makes sense only when restarting hw. It is harmless to call
- * uncoditionally. This is necessary to make sure no HTT/WMI tx
+ * unconditionally. This is necessary to make sure no HTT/WMI tx
* commands will be submitted while restarting.
*/
ath10k_drain_tx(ar);
@@ -4021,7 +4375,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_off;
}
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
if (ret) {
ath10k_err(ar, "Could not init core: %d\n", ret);
goto err_power_down;
@@ -4079,7 +4434,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
WMI_CCA_DETECT_LEVEL_AUTO,
WMI_CCA_DETECT_MARGIN_AUTO);
@@ -4100,7 +4455,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
ar->ani_enabled = true;
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+ if (ath10k_peer_stats_enabled(ar)) {
param = ar->wmi.pdev_param->peer_stats_update_period;
ret = ath10k_wmi_pdev_set_param(ar, param,
PEER_DEFAULT_STATS_UPDATE_PERIOD);
@@ -4313,6 +4668,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_peer *peer;
enum wmi_sta_powersave_param param;
int ret = 0;
u32 value;
@@ -4325,6 +4681,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
memset(arvif, 0, sizeof(*arvif));
+ ath10k_mac_txq_init(vif->txq);
arvif->ar = ar;
arvif->vif = vif;
@@ -4508,13 +4865,31 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
- WMI_PEER_TYPE_DEFAULT);
+ ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
+ vif->addr, WMI_PEER_TYPE_DEFAULT);
if (ret) {
ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ret = -ENOENT;
+ goto err_peer_delete;
+ }
+
+ arvif->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+ } else {
+ arvif->peer_id = HTT_INVALID_PEERID;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -4625,7 +5000,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_peer *peer;
int ret;
+ int i;
cancel_work_sync(&arvif->ap_csa_work);
cancel_delayed_work_sync(&arvif->connection_loss_work);
@@ -4679,7 +5056,22 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
}
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->vif == vif) {
+ ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->vif = NULL;
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
ath10k_peer_cleanup(ar, arvif->vdev_id);
+ ath10k_mac_txq_unref(ar, vif->txq);
if (vif->type == NL80211_IFTYPE_MONITOR) {
ar->monitor_arvif = NULL;
@@ -4692,6 +5084,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_mac_vif_tx_unlock_all(arvif);
spin_unlock_bh(&ar->htt.tx_lock);
+ ath10k_mac_txq_unref(ar, vif->txq);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5221,7 +5615,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
struct ath10k_sta *arsta;
struct ieee80211_sta *sta;
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
u32 changed, bw, nss, smps;
@@ -5396,13 +5790,18 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_peer *peer;
int ret = 0;
+ int i;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_init(sta->txq[i]);
}
/* cancel must be done outside the mutex to avoid deadlock */
@@ -5437,8 +5836,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
if (sta->tdls)
peer_type = WMI_PEER_TYPE_TDLS;
- ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
- peer_type);
+ ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
+ sta->addr, peer_type);
if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -5446,6 +5845,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
goto exit;
}
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ arsta->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+
if (!sta->tdls)
goto exit;
@@ -5508,6 +5925,23 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_mac_dec_num_stations(arvif, sta);
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->sta == sta) {
+ ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
+ sta->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_unref(ar, sta->txq[i]);
+
if (!sta->tdls)
goto exit;
@@ -5754,7 +6188,7 @@ exit:
return ret;
}
-#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5818,7 +6252,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
goto exit;
}
- ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "failed to switch to channel for roc scan\n");
@@ -5970,6 +6404,39 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void
+ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
+ (ar->rx_channel != channel))
+ return;
+
+ if (ar->scan.state != ATH10K_SCAN_IDLE) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath10k_warn(ar, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (!ret) {
+ ath10k_warn(ar, "bss channel survey timed out\n");
+ return;
+ }
+}
+
static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
@@ -5980,20 +6447,22 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
mutex_lock(&ar->conf_mutex);
- sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
- sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
}
+ ath10k_mac_update_bss_chan_survey(ar, survey->channel);
+
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
spin_unlock_bh(&ar->data_lock);
@@ -6010,7 +6479,7 @@ exit:
static bool
ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
@@ -6029,7 +6498,7 @@ ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
static bool
ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
int *nss)
{
@@ -6078,7 +6547,7 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
static int
ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
u8 *rate, u8 *nss)
{
@@ -6179,7 +6648,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
static bool
ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int i;
@@ -6231,7 +6700,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
struct ath10k *ar = arvif->ar;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
u8 rate;
@@ -6382,6 +6851,32 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return 0;
}
+static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
+ int ret;
+
+ /* Workaround:
+ *
+ * Given tsf argument is entire TSF value, but firmware accepts
+ * only TSF offset to current TSF.
+ *
+ * get_tsf function is used to get offset value, however since
+ * ath10k_get_tsf is not implemented properly, it will return 0 always.
+ * Luckily all the caller functions to set_tsf, as of now, also rely on
+ * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
+ * final tsf offset value to firmware will be arithmetically correct.
+ */
+ tsf_offset = tsf - ath10k_get_tsf(hw, vif);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, tsf_offset);
+ if (ret && ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
+}
+
static int ath10k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
@@ -6816,7 +7311,8 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops ath10k_ops = {
- .tx = ath10k_tx,
+ .tx = ath10k_mac_op_tx,
+ .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
.start = ath10k_start,
.stop = ath10k_stop,
.config = ath10k_config,
@@ -6843,6 +7339,7 @@ static const struct ieee80211_ops ath10k_ops = {
.set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
+ .set_tsf = ath10k_set_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
@@ -6866,7 +7363,7 @@ static const struct ieee80211_ops ath10k_ops = {
};
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
@@ -6875,7 +7372,7 @@ static const struct ieee80211_ops ath10k_ops = {
}
#define CHAN5G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
@@ -7195,13 +7692,13 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
- band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
band->channels = channels;
band->n_bitrates = ath10k_g_rates_size;
band->bitrates = ath10k_g_rates;
- ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
}
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
@@ -7213,12 +7710,12 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
- band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
band->channels = channels;
band->n_bitrates = ath10k_a_rates_size;
band->bitrates = ath10k_a_rates;
- ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
}
ath10k_mac_setup_ht_vht_cap(ar);
@@ -7231,7 +7728,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
- if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
ar->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -7271,6 +7768,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+ ar->hw->txq_data_size = sizeof(struct ath10k_txq);
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
@@ -7295,7 +7793,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
@@ -7319,7 +7818,7 @@ int ath10k_mac_register(struct ath10k *ar)
*/
ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
- switch (ar->wmi.op_version) {
+ switch (ar->running_fw->fw_file.wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations =
@@ -7404,8 +7903,8 @@ err_dfs_detector_exit:
ar->dfs_detector->exit(ar->dfs_detector);
err_free:
- kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
SET_IEEE80211_DEV(ar->hw, NULL);
return ret;
@@ -7418,8 +7917,8 @@ void ath10k_mac_unregister(struct ath10k *ar)
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
- kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
SET_IEEE80211_DEV(ar->hw, NULL);
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 530915880..1bd29ecfc 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -75,6 +75,13 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_tx_push_pending(struct ath10k *ar);
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid);
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index f21f09cce..d2674ade4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -33,12 +33,6 @@
#include "ce.h"
#include "pci.h"
-enum ath10k_pci_irq_mode {
- ATH10K_PCI_IRQ_AUTO = 0,
- ATH10K_PCI_IRQ_LEGACY = 1,
- ATH10K_PCI_IRQ_MSI = 2,
-};
-
enum ath10k_pci_reset_mode {
ATH10K_PCI_RESET_AUTO = 0,
ATH10K_PCI_RESET_WARM_ONLY = 1,
@@ -745,10 +739,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- if (ar_pci->num_msi_intrs > 1)
- return "msi-x";
-
- if (ar_pci->num_msi_intrs == 1)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
return "msi";
return "legacy";
@@ -809,7 +800,8 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
spin_lock_bh(&ar_pci->ce_lock);
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
spin_unlock_bh(&ar_pci->ce_lock);
- while (num--) {
+
+ while (num >= 0) {
ret = __ath10k_pci_rx_post_buf(pipe);
if (ret) {
if (ret == -ENOSPC)
@@ -819,6 +811,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
ATH10K_PCI_RX_POST_RETRY_MS);
break;
}
+ num--;
}
}
@@ -870,10 +863,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
+ u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
@@ -909,7 +900,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
@@ -940,9 +931,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -956,7 +948,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
- if (buf != ce_data) {
+ if (*buf != ce_data) {
ret = -EIO;
goto done;
}
@@ -1026,10 +1018,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
+ u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
@@ -1078,7 +1068,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@@ -1103,9 +1093,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1119,7 +1110,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- if (buf != address) {
+ if (*buf != address) {
ret = -EIO;
goto done;
}
@@ -1181,15 +1172,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
struct sk_buff *skb;
struct sk_buff_head list;
void *transfer_context;
- u32 ce_data;
unsigned int nbytes, max_nbytes;
- unsigned int transfer_id;
- unsigned int flags;
__skb_queue_head_init(&list);
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
- &ce_data, &nbytes, &transfer_id,
- &flags) == 0) {
+ &nbytes) == 0) {
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
@@ -1218,6 +1205,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
ath10k_pci_rx_post_pipe(pipe_info);
}
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes, nentries;
+ int orig_len;
+
+ /* No need to aquire ce_lock for CE5, since this is the only place CE5
+ * is processed other than init and deinit. Before releasing CE5
+ * buffers, interrupts are disabled. Thus CE5 access is serialized.
+ */
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ continue;
+ }
+
+ dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ nentries = skb_queue_len(&list);
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ orig_len = skb->len;
+ callback(ar, skb);
+ skb_push(skb, orig_len - skb->len);
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ /*let device gain the buffer again*/
+ dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ }
+ ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
/* Called by lower (CE) layer when data is received from the Target. */
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
{
@@ -1274,7 +1318,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
*/
ath10k_ce_per_engine_service(ce_state->ar, 4);
- ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+ ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
}
int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -1449,13 +1493,8 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
void ath10k_pci_kill_tasklet(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
tasklet_kill(&ar_pci->intr_tq);
- tasklet_kill(&ar_pci->msi_fw_err);
-
- for (i = 0; i < CE_COUNT; i++)
- tasklet_kill(&ar_pci->pipe_info[i].intr);
del_timer_sync(&ar_pci->rx_post_retry);
}
@@ -1571,10 +1610,8 @@ static void ath10k_pci_irq_disable(struct ath10k *ar)
static void ath10k_pci_irq_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- synchronize_irq(ar_pci->pdev->irq + i);
+ synchronize_irq(ar_pci->pdev->irq);
}
static void ath10k_pci_irq_enable(struct ath10k *ar)
@@ -1835,13 +1872,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct bmi_xfer *xfer;
- u32 ce_data;
unsigned int nbytes;
- unsigned int transfer_id;
- unsigned int flags;
- if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
- &nbytes, &transfer_id, &flags))
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+ &nbytes))
return;
if (WARN_ON_ONCE(!xfer))
@@ -2546,65 +2580,6 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
#endif
};
-static void ath10k_pci_ce_tasklet(unsigned long ptr)
-{
- struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
- struct ath10k_pci *ar_pci = pipe->ar_pci;
-
- ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
-}
-
-static void ath10k_msi_err_tasklet(unsigned long data)
-{
- struct ath10k *ar = (struct ath10k *)data;
-
- if (!ath10k_pci_has_fw_crashed(ar)) {
- ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
- return;
- }
-
- ath10k_pci_irq_disable(ar);
- ath10k_pci_fw_crashed_clear(ar);
- ath10k_pci_fw_crashed_dump(ar);
-}
-
-/*
- * Handler for a per-engine interrupt on a PARTICULAR CE.
- * This is used in cases where each CE has a private MSI interrupt.
- */
-static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
-{
- struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
-
- if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
- ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
- ce_id);
- return IRQ_HANDLED;
- }
-
- /*
- * NOTE: We are able to derive ce_id from irq because we
- * use a one-to-one mapping for CE's 0..5.
- * CE's 6 & 7 do not use interrupts at all.
- *
- * This mapping must be kept in sync with the mapping
- * used by firmware.
- */
- tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
-{
- struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- tasklet_schedule(&ar_pci->msi_fw_err);
- return IRQ_HANDLED;
-}
-
/*
* Top-level interrupt handler for all PCI interrupts from a Target.
* When a block of MSI interrupts is allocated, this top-level handler
@@ -2622,7 +2597,7 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_NONE;
}
- if (ar_pci->num_msi_intrs == 0) {
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
@@ -2649,43 +2624,10 @@ static void ath10k_pci_tasklet(unsigned long data)
ath10k_ce_per_engine_service_any(ar);
/* Re-enable legacy irq that was disabled in the irq handler */
- if (ar_pci->num_msi_intrs == 0)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
ath10k_pci_enable_legacy_irq(ar);
}
-static int ath10k_pci_request_irq_msix(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret, i;
-
- ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
- ath10k_pci_msi_fw_handler,
- IRQF_SHARED, "ath10k_pci", ar);
- if (ret) {
- ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
- ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
- return ret;
- }
-
- for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
- ret = request_irq(ar_pci->pdev->irq + i,
- ath10k_pci_per_engine_handler,
- IRQF_SHARED, "ath10k_pci", ar);
- if (ret) {
- ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
- ar_pci->pdev->irq + i, ret);
-
- for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
- free_irq(ar_pci->pdev->irq + i, ar);
-
- free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
- return ret;
- }
- }
-
- return 0;
-}
-
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2724,41 +2666,28 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar_pci->num_msi_intrs) {
- case 0:
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
return ath10k_pci_request_irq_legacy(ar);
- case 1:
+ case ATH10K_PCI_IRQ_MSI:
return ath10k_pci_request_irq_msi(ar);
default:
- return ath10k_pci_request_irq_msix(ar);
+ return -EINVAL;
}
}
static void ath10k_pci_free_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
- /* There's at least one interrupt irregardless whether its legacy INTR
- * or MSI or MSI-X */
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- free_irq(ar_pci->pdev->irq + i, ar);
+ free_irq(ar_pci->pdev->irq, ar);
}
void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
- tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
- (unsigned long)ar);
-
- for (i = 0; i < CE_COUNT; i++) {
- ar_pci->pipe_info[i].ar_pci = ar_pci;
- tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
- (unsigned long)&ar_pci->pipe_info[i]);
- }
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2772,20 +2701,9 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
ath10k_info(ar, "limiting irq mode to: %d\n",
ath10k_pci_irq_mode);
- /* Try MSI-X */
- if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
- ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
- ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
- ar_pci->num_msi_intrs);
- if (ret > 0)
- return 0;
-
- /* fall-through */
- }
-
/* Try MSI */
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
- ar_pci->num_msi_intrs = 1;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
ret = pci_enable_msi(ar_pci->pdev);
if (ret == 0)
return 0;
@@ -2801,7 +2719,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* This write might get lost if target has NOT written BAR.
* For now, fix the race by repeating the write in below
* synchronization checking. */
- ar_pci->num_msi_intrs = 0;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
@@ -2819,8 +2737,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar_pci->num_msi_intrs) {
- case 0:
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
ath10k_pci_deinit_irq_legacy(ar);
break;
default:
@@ -2858,7 +2776,7 @@ int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_INITIALIZED)
break;
- if (ar_pci->num_msi_intrs == 0)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
/* Fix potential race by repeating CORE_BASE writes */
ath10k_pci_enable_legacy_irq(ar);
@@ -3136,8 +3054,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_sleep;
}
- ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
- ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+ ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
+ ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
ret = ath10k_pci_request_irq(ar);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 249c73a69..959dc321b 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -148,9 +148,6 @@ struct ath10k_pci_pipe {
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
-
- struct ath10k_pci *ar_pci;
- struct tasklet_struct intr;
};
struct ath10k_pci_supp_chip {
@@ -164,6 +161,12 @@ struct ath10k_bus_ops {
int (*get_num_banks)(struct ath10k *ar);
};
+enum ath10k_pci_irq_mode {
+ ATH10K_PCI_IRQ_AUTO = 0,
+ ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_MSI = 2,
+};
+
struct ath10k_pci {
struct pci_dev *pdev;
struct device *dev;
@@ -171,14 +174,10 @@ struct ath10k_pci {
void __iomem *mem;
size_t mem_len;
- /*
- * Number of MSI interrupts granted, 0 --> using legacy PCI line
- * interrupts.
- */
- int num_msi_intrs;
+ /* Operating interrupt mode */
+ enum ath10k_pci_irq_mode oper_irq_mode;
struct tasklet_struct intr_tq;
- struct tasklet_struct msi_fw_err;
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index 3ca3fae40..0c5f5863d 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -134,27 +134,17 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
return seg_info;
}
-int ath10k_swap_code_seg_configure(struct ath10k *ar,
- enum ath10k_swap_code_seg_bin_type type)
+int ath10k_swap_code_seg_configure(struct ath10k *ar)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info = NULL;
- switch (type) {
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
- if (!ar->swap.firmware_swap_code_seg_info)
- return 0;
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
- seg_info = ar->swap.firmware_swap_code_seg_info;
- break;
- default:
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
- ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
- type);
+ if (!ar->swap.firmware_swap_code_seg_info)
return 0;
- }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+
+ seg_info = ar->swap.firmware_swap_code_seg_info;
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
&seg_info->seg_hw_info,
@@ -171,8 +161,13 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar,
void ath10k_swap_code_seg_release(struct ath10k *ar)
{
ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
- ar->swap.firmware_codeswap_data = NULL;
- ar->swap.firmware_codeswap_len = 0;
+
+ /* FIXME: these two assignments look to bein wrong place! Shouldn't
+ * they be in ath10k_core_free_firmware_files() like the rest?
+ */
+ ar->normal_mode_fw.fw_file.codeswap_data = NULL;
+ ar->normal_mode_fw.fw_file.codeswap_len = 0;
+
ar->swap.firmware_swap_code_seg_info = NULL;
}
@@ -180,20 +175,23 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info;
+ const void *codeswap_data;
+ size_t codeswap_len;
+
+ codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
+ codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
- if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+ if (!codeswap_len || !codeswap_data)
return 0;
- seg_info = ath10k_swap_code_seg_alloc(ar,
- ar->swap.firmware_codeswap_len);
+ seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
if (!seg_info) {
ath10k_err(ar, "failed to allocate fw code swap segment\n");
return -ENOMEM;
}
ret = ath10k_swap_code_seg_fill(ar, seg_info,
- ar->swap.firmware_codeswap_data,
- ar->swap.firmware_codeswap_len);
+ codeswap_data, codeswap_len);
if (ret) {
ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 5c89952dd..36991c7b0 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -39,12 +39,6 @@ union ath10k_swap_code_seg_item {
struct ath10k_swap_code_seg_tail tail;
} __packed;
-enum ath10k_swap_code_seg_bin_type {
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
-};
-
struct ath10k_swap_code_seg_hw_info {
/* Swap binary image size */
__le32 swap_size;
@@ -64,8 +58,7 @@ struct ath10k_swap_code_seg_info {
dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
};
-int ath10k_swap_code_seg_configure(struct ath10k *ar,
- enum ath10k_swap_code_seg_bin_type type);
+int ath10k_swap_code_seg_configure(struct ath10k *ar);
void ath10k_swap_code_seg_release(struct ath10k *ar);
int ath10k_swap_code_seg_init(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 361f143b0..8e24099fa 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -438,7 +438,7 @@ Fw Mode/SubMode Mask
((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
#define HI_DEV_LPL_TYPE_GET(_devix) \
(HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
- (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+ (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2)))
#define HOST_INTEREST_SMPS_IS_ALLOWED() \
((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 028e70bba..332393bf0 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -139,127 +139,8 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
return cfg80211_testmode_reply(skb);
}
-static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
-{
- size_t len, magic_len, ie_len;
- struct ath10k_fw_ie *hdr;
- char filename[100];
- __le32 *version;
- const u8 *data;
- int ie_id, ret;
-
- snprintf(filename, sizeof(filename), "%s/%s",
- ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
-
- /* load utf firmware image */
- ret = reject_firmware(&ar->testmode.utf, filename, ar->dev);
- if (ret) {
- ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
- filename, ret);
- return ret;
- }
-
- data = ar->testmode.utf->data;
- len = ar->testmode.utf->size;
-
- /* FIXME: call release_firmware() in error cases */
-
- /* magic also includes the null byte, check that as well */
- magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
-
- if (len < magic_len) {
- ath10k_err(ar, "utf firmware file is too small to contain magic\n");
- ret = -EINVAL;
- goto err;
- }
-
- if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
- ath10k_err(ar, "invalid firmware magic\n");
- ret = -EINVAL;
- goto err;
- }
-
- /* jump over the padding */
- magic_len = ALIGN(magic_len, 4);
-
- len -= magic_len;
- data += magic_len;
-
- /* loop elements */
- while (len > sizeof(struct ath10k_fw_ie)) {
- hdr = (struct ath10k_fw_ie *)data;
-
- ie_id = le32_to_cpu(hdr->id);
- ie_len = le32_to_cpu(hdr->len);
-
- len -= sizeof(*hdr);
- data += sizeof(*hdr);
-
- if (len < ie_len) {
- ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
- ie_id, len, ie_len);
- ret = -EINVAL;
- goto err;
- }
-
- switch (ie_id) {
- case ATH10K_FW_IE_FW_VERSION:
- if (ie_len > sizeof(ar->testmode.utf_version) - 1)
- break;
-
- memcpy(ar->testmode.utf_version, data, ie_len);
- ar->testmode.utf_version[ie_len] = '\0';
-
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode found fw utf version %s\n",
- ar->testmode.utf_version);
- break;
- case ATH10K_FW_IE_TIMESTAMP:
- /* ignore timestamp, but don't warn about it either */
- break;
- case ATH10K_FW_IE_FW_IMAGE:
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode found fw image ie (%zd B)\n",
- ie_len);
-
- ar->testmode.utf_firmware_data = data;
- ar->testmode.utf_firmware_len = ie_len;
- break;
- case ATH10K_FW_IE_WMI_OP_VERSION:
- if (ie_len != sizeof(u32))
- break;
- version = (__le32 *)data;
- ar->testmode.op_version = le32_to_cpup(version);
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
- ar->testmode.op_version);
- break;
- default:
- ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
- le32_to_cpu(hdr->id));
- break;
- }
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
- len -= ie_len;
- data += ie_len;
- }
-
- if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
- ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
- ret = -EINVAL;
- goto err;
- }
-
- return 0;
-
-err:
- release_firmware(ar->testmode.utf);
-
- return ret;
-}
-
-static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
{
char filename[100];
int ret;
@@ -268,7 +149,7 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
/* load utf firmware image */
- ret = reject_firmware(&ar->testmode.utf, filename, ar->dev);
+ ret = reject_firmware(&fw_file->firmware, filename, ar->dev);
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
@@ -281,24 +162,27 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
* correct WMI interface.
*/
- ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
- ar->testmode.utf_firmware_data = ar->testmode.utf->data;
- ar->testmode.utf_firmware_len = ar->testmode.utf->size;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ fw_file->firmware_data = fw_file->firmware->data;
+ fw_file->firmware_len = fw_file->firmware->size;
return 0;
}
static int ath10k_tm_fetch_firmware(struct ath10k *ar)
{
+ struct ath10k_fw_components *utf_mode_fw;
int ret;
- ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
+ &ar->testmode.utf_mode_fw.fw_file);
if (ret == 0) {
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
- return 0;
+ goto out;
}
- ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
+ ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
return ret;
@@ -306,6 +190,21 @@ static int ath10k_tm_fetch_firmware(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
+out:
+ utf_mode_fw = &ar->testmode.utf_mode_fw;
+
+ /* Use the same board data file as the normal firmware uses (but
+ * it's still "owned" by normal_mode_fw so we shouldn't free it.
+ */
+ utf_mode_fw->board_data = ar->normal_mode_fw.board_data;
+ utf_mode_fw->board_len = ar->normal_mode_fw.board_len;
+
+ if (!utf_mode_fw->fw_file.otp_data) {
+ ath10k_info(ar, "/*(DEBLOBBED)*/ didn't contain otp binary, taking it from the normal mode firmware");
+ utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data;
+ utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len;
+ }
+
return 0;
}
@@ -329,7 +228,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err;
}
- if (WARN_ON(ar->testmode.utf != NULL)) {
+ if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) {
/* utf image is already downloaded, it shouldn't be */
ret = -EEXIST;
goto err;
@@ -344,27 +243,19 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
- BUILD_BUG_ON(sizeof(ar->fw_features) !=
- sizeof(ar->testmode.orig_fw_features));
-
- memcpy(ar->testmode.orig_fw_features, ar->fw_features,
- sizeof(ar->fw_features));
- ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
- memset(ar->fw_features, 0, sizeof(ar->fw_features));
-
- ar->wmi.op_version = ar->testmode.op_version;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
- ar->wmi.op_version);
+ ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
ret = ath10k_hif_power_up(ar);
if (ret) {
ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
- goto err_fw_features;
+ goto err_release_utf_mode_fw;
}
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
+ &ar->testmode.utf_mode_fw);
if (ret) {
ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
@@ -373,8 +264,8 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
ar->state = ATH10K_STATE_UTF;
- if (strlen(ar->testmode.utf_version) > 0)
- ver = ar->testmode.utf_version;
+ if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0)
+ ver = ar->testmode.utf_mode_fw.fw_file.fw_version;
else
ver = "API 1";
@@ -387,14 +278,9 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
err_power_down:
ath10k_hif_power_down(ar);
-err_fw_features:
- /* return the original firmware features */
- memcpy(ar->fw_features, ar->testmode.orig_fw_features,
- sizeof(ar->fw_features));
- ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
-
- release_firmware(ar->testmode.utf);
- ar->testmode.utf = NULL;
+err_release_utf_mode_fw:
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
err:
mutex_unlock(&ar->conf_mutex);
@@ -415,13 +301,8 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock);
- /* return the original firmware features */
- memcpy(ar->fw_features, ar->testmode.orig_fw_features,
- sizeof(ar->fw_features));
- ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
-
- release_firmware(ar->testmode.utf);
- ar->testmode.utf = NULL;
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
ar->state = ATH10K_STATE_OFF;
}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index c9223e9e9..3abb97f63 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -20,7 +20,7 @@
#define ATH10K_QUIET_PERIOD_MIN 25
#define ATH10K_QUIET_START_OFFSET 10
#define ATH10K_HWMON_NAME_LEN 15
-#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
#define ATH10K_THERMAL_THROTTLE_MAX 100
struct ath10k_thermal {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index fbfb608e4..576e7c42e 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -49,25 +49,25 @@ out:
spin_unlock_bh(&ar->data_lock);
}
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done)
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
+ struct ieee80211_txq *txq;
struct ath10k_skb_cb *skb_cb;
+ struct ath10k_txq *artxq;
struct sk_buff *msdu;
- bool limit_mgmt_desc = false;
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
- tx_done->msdu_id, !!tx_done->discard,
- !!tx_done->no_ack, !!tx_done->success);
+ "htt tx completion msdu_id %u status %d\n",
+ tx_done->msdu_id, tx_done->status);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
- return;
+ return -EINVAL;
}
spin_lock_bh(&htt->tx_lock);
@@ -76,17 +76,18 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
spin_unlock_bh(&htt->tx_lock);
- return;
+ return -ENOENT;
}
skb_cb = ATH10K_SKB_CB(msdu);
+ txq = skb_cb->txq;
+ artxq = (void *)txq->drv_priv;
- if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) &&
- ar->hw_params.max_probe_resp_desc_thres)
- limit_mgmt_desc = true;
+ if (txq)
+ artxq->num_fw_queued--;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
- __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+ ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
@@ -99,22 +100,24 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
memset(&info->status, 0, sizeof(info->status));
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
- if (tx_done->discard) {
+ if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
ieee80211_free_txskb(htt->ar->hw, msdu);
- return;
+ return 0;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (tx_done->no_ack)
+ if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
info->flags &= ~IEEE80211_TX_STAT_ACK;
- if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
+ return 0;
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -127,7 +130,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
list_for_each_entry(peer, &ar->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
- if (memcmp(peer->addr, addr, ETH_ALEN))
+ if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@@ -163,7 +166,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
(mapped == expect_mapped ||
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
- }), 3*HZ);
+ }), 3 * HZ);
if (time_left == 0)
return -ETIMEDOUT;
@@ -187,6 +190,13 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer map event with idx out of bounds: %hu\n",
+ ev->peer_id);
+ return;
+ }
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
if (!peer) {
@@ -203,6 +213,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
+ ar->peer_map[ev->peer_id] = peer;
set_bit(ev->peer_id, peer->peer_ids);
exit:
spin_unlock_bh(&ar->data_lock);
@@ -214,6 +225,13 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer unmap event with idx out of bounds: %hu\n",
+ ev->peer_id);
+ return;
+ }
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
@@ -225,6 +243,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
+ ar->peer_map[ev->peer_id] = NULL;
clear_bit(ev->peer_id, peer->peer_ids);
if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index a90e09f5c..e7ea1ae1c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,8 +19,8 @@
#include "htt.h"
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done);
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 32ab34edc..64ebd304f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -186,8 +186,14 @@ struct wmi_ops {
u8 enable,
u32 detect_level,
u32 detect_margin);
+ struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap);
int (*get_vdev_subtype)(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
+ struct sk_buff *(*gen_pdev_bss_chan_info_req)
+ (struct ath10k *ar,
+ enum wmi_bss_survey_req_type type);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1330,6 +1336,26 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
}
static inline int
+ath10k_wmi_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->ext_resource_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->ext_resource_config(ar, type,
+ fw_feature_bitmap);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ext_resource_cfg_cmdid);
+}
+
+static inline int
ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
{
if (!ar->wmi.ops->get_vdev_subtype)
@@ -1338,4 +1364,22 @@ ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
return ar->wmi.ops->get_vdev_subtype(ar, subtype);
}
+static inline int
+ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_pdev_bss_chan_info_req)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ wmi->cmd->pdev_bss_chan_info_request_cmdid);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 108593202..e09337ee7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3409,6 +3409,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static const struct wmi_ops wmi_tlv_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index dd6785905..b8aa60005 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -968,8 +968,8 @@ enum wmi_tlv_service {
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
((svc_id) < (len) && \
- __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
- BIT((svc_id)%(sizeof(u32))))
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+ BIT((svc_id) % (sizeof(u32))))
#define SVCMAP(x, y, len) \
do { \
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 70261387d..2c300329e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -521,7 +521,8 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
- .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
};
/* 10.4 WMI cmd track */
@@ -705,6 +706,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
.pdev_bss_chan_info_request_cmdid =
WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
};
/* MAIN WMI VDEV param map */
@@ -780,6 +782,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -855,6 +858,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -929,6 +933,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1004,6 +1009,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ .set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -1628,6 +1634,7 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
ch->antenna_max = arg->max_antenna_gain;
+ ch->max_tx_power = arg->max_power;
/* mode & flags share storage */
ch->mode = arg->mode;
@@ -1803,7 +1810,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
ret = -ESHUTDOWN;
(ret != -EAGAIN);
- }), 3*HZ);
+ }), 3 * HZ);
if (ret)
dev_kfree_skb_any(skb);
@@ -2099,34 +2106,6 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
-{
- enum ieee80211_band band;
-
- switch (phy_mode) {
- case MODE_11A:
- case MODE_11NA_HT20:
- case MODE_11NA_HT40:
- case MODE_11AC_VHT20:
- case MODE_11AC_VHT40:
- case MODE_11AC_VHT80:
- band = IEEE80211_BAND_5GHZ;
- break;
- case MODE_11G:
- case MODE_11B:
- case MODE_11GONLY:
- case MODE_11NG_HT20:
- case MODE_11NG_HT40:
- case MODE_11AC_VHT20_2G:
- case MODE_11AC_VHT40_2G:
- case MODE_11AC_VHT80_2G:
- default:
- band = IEEE80211_BAND_2GHZ;
- }
-
- return band;
-}
-
/* If keys are configured, HW decrypts all frames
* with protected bit set. Mark such frames as decrypted.
*/
@@ -2167,10 +2146,13 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_event_v1 *ev_v1;
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+ struct wmi_mgmt_rx_ext_info *ext_info;
size_t pull_len;
u32 msdu_len;
+ u32 len;
- if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+ ar->running_fw->fw_file.fw_features)) {
ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
ev_hdr = &ev_v2->hdr.v1;
pull_len = sizeof(*ev_v2);
@@ -2195,6 +2177,12 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
if (skb->len < msdu_len)
return -EPROTO;
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
* trailer with credit update. Trim the excess garbage.
*/
@@ -2211,6 +2199,8 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
size_t pull_len;
u32 msdu_len;
+ struct wmi_mgmt_rx_ext_info *ext_info;
+ u32 len;
ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
ev_hdr = &ev->hdr;
@@ -2231,6 +2221,13 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
if (skb->len < msdu_len)
return -EPROTO;
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
+
/* Make sure bytes added for padding are removed. */
skb_trim(skb, msdu_len);
@@ -2281,14 +2278,19 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
+ if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+ status->mactime =
+ __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+ }
/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
* MODE_11B. This means phy_mode is not a reliable source for the band
* of mgmt rx.
*/
if (channel >= 1 && channel <= 14) {
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
} else if (channel >= 36 && channel <= 165) {
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
* mac80211 has been changed.
@@ -2298,7 +2300,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
- if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+ if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
sband = &ar->mac.sbands[status->band];
@@ -2310,6 +2312,12 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
ath10k_wmi_handle_wep_reauth(ar, skb, status);
/* FW delivers WEP Shared Auth frame with Protected Bit set and
@@ -2351,7 +2359,7 @@ static int freq_to_idx(struct ath10k *ar, int freq)
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
sband = ar->hw->wiphy->bands[band];
if (!sband)
continue;
@@ -2612,6 +2620,16 @@ void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
}
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb,
struct ath10k_fw_stats *stats)
@@ -2865,11 +2883,8 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
const struct wmi_10_2_4_ext_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
int stats_len;
- bool ext_peer_stats_support;
- ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS,
- ar->wmi.svc_map);
- if (ext_peer_stats_support)
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
else
stats_len = sizeof(struct wmi_10_2_4_peer_stats);
@@ -2886,7 +2901,7 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
- if (ext_peer_stats_support)
+ if (ath10k_peer_stats_enabled(ar))
dst->rx_duration = __le32_to_cpu(src->rx_duration);
/* FIXME: expose 10.2 specific values */
@@ -2905,6 +2920,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
u32 num_pdev_ext_stats;
u32 num_vdev_stats;
u32 num_peer_stats;
+ u32 stats_id;
int i;
if (!skb_pull(skb, sizeof(*ev)))
@@ -2914,6 +2930,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ stats_id = __le32_to_cpu(ev->stats_id);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_10_4_pdev_stats *src;
@@ -2953,22 +2970,28 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
- const struct wmi_10_4_peer_stats *src;
+ const struct wmi_10_4_peer_extd_stats *src;
struct ath10k_fw_stats_peer *dst;
+ int stats_len;
+ bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD);
+
+ if (extd_peer_stats)
+ stats_len = sizeof(struct wmi_10_4_peer_extd_stats);
+ else
+ stats_len = sizeof(struct wmi_10_4_peer_stats);
src = (void *)skb->data;
- if (!skb_pull(skb, sizeof(*src)))
+ if (!skb_pull(skb, stats_len))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
- ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
- dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
- dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
- dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ ath10k_wmi_10_4_pull_peer_stats(&src->common, dst);
/* FIXME: expose 10.4 specific values */
+ if (extd_peer_stats)
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
list_add_tail(&dst->list, &stats->peers);
}
@@ -4584,10 +4607,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
- /* only manually set fw features when not using FW IE format */
- if (ar->fw_api == 1 && ar->fw_version_build > 636)
- set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
-
if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, ar->max_spatial_stream);
@@ -4617,10 +4636,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
}
if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+ ar->max_num_vdevs;
+ else
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+ ar->max_num_vdevs;
+
ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
ar->max_num_vdevs;
- ar->num_active_peers = ar->hw_params.qcache_active_peers +
- ar->max_num_vdevs;
ar->num_tids = ar->num_active_peers * 2;
ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
}
@@ -4769,6 +4794,58 @@ static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event *ev;
+ struct survey_info *survey;
+ u64 busy, total, tx, rx, rx_bss;
+ u32 freq, noise_floor;
+ u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
+ int idx;
+
+ ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ freq = __le32_to_cpu(ev->freq);
+ noise_floor = __le32_to_cpu(ev->noise_floor);
+ busy = __le64_to_cpu(ev->cycle_busy);
+ total = __le64_to_cpu(ev->cycle_total);
+ tx = __le64_to_cpu(ev->cycle_tx);
+ rx = __le64_to_cpu(ev->cycle_rx);
+ rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ freq, noise_floor, busy, total, tx, rx, rx_bss);
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+ return 0;
+}
+
static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -5112,6 +5189,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_event_temperature(ar, skb);
break;
+ case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
case WMI_10_2_RTT_KEEPALIVE_EVENTID:
case WMI_10_2_GPIO_INPUT_EVENTID:
case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
@@ -5189,6 +5269,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_stopped(ar, skb);
break;
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+ case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
@@ -5198,6 +5279,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_event_temperature(ar, skb);
break;
+ case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -5517,7 +5601,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+
+ if (ath10k_peer_stats_enabled(ar)) {
config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
} else {
@@ -5579,9 +5664,12 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
features |= WMI_10_2_COEX_GPIO;
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ if (ath10k_peer_stats_enabled(ar))
features |= WMI_10_2_PEER_STATS;
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ features |= WMI_10_2_BSS_CHAN_INFO;
+
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -5800,9 +5888,8 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
for (i = 0; i < arg->n_bssids; i++)
- memcpy(&bssids->bssid_list[i],
- arg->bssids[i].bssid,
- ETH_ALEN);
+ ether_addr_copy(bssids->bssid_list[i].addr,
+ arg->bssids[i].bssid);
ptr += sizeof(*bssids);
ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
@@ -6613,6 +6700,26 @@ ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct wmi_pdev_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev bss info request type %d\n", type);
+
+ return skb;
+}
+
/* This function assumes the beacon is already DMA mapped */
static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
@@ -7484,6 +7591,28 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
return -ENOTSUPP;
}
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct wmi_ext_resource_config_10_4_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+ cmd->host_platform_config = __cpu_to_le32(type);
+ cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+ type, fw_feature_bitmap);
+ return skb;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -7690,6 +7819,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.gen_init = ath10k_wmi_10_2_op_gen_init,
.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
/* shared with 10.1 */
.map_svc = wmi_10x_svc_map,
@@ -7810,16 +7940,18 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+ .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
/* shared with 10.2 */
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
};
int ath10k_wmi_attach(struct ath10k *ar)
{
- switch (ar->wmi.op_version) {
+ switch (ar->running_fw->fw_file.wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->wmi.ops = &wmi_10_4_ops;
ar->wmi.cmd = &wmi_10_4_cmd_map;
@@ -7861,7 +7993,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
ath10k_err(ar, "unsupported WMI op version: %d\n",
- ar->wmi.op_version);
+ ar->running_fw->fw_file.wmi_op_version);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4d3cbc44f..9fdf47ea2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -180,6 +180,9 @@ enum wmi_service {
WMI_SERVICE_MESH_NON_11S,
WMI_SERVICE_PEER_STATS,
WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_DYNAMIC,
/* keep last */
WMI_SERVICE_MAX,
@@ -302,6 +305,9 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
WMI_10_4_SERVICE_PEER_STATS,
WMI_10_4_SERVICE_MESH_11S,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
};
static inline char *wmi_service_name(int service_id)
@@ -396,6 +402,9 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_MESH_NON_11S);
SVCSTR(WMI_SERVICE_PEER_STATS);
SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
+ SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
default:
return NULL;
}
@@ -405,8 +414,8 @@ static inline char *wmi_service_name(int service_id)
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
((svc_id) < (len) && \
- __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
- BIT((svc_id)%(sizeof(u32))))
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+ BIT((svc_id) % (sizeof(u32))))
#define SVCMAP(x, y, len) \
do { \
@@ -643,6 +652,12 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_PEER_STATS, len);
SVCMAP(WMI_10_4_SERVICE_MESH_11S,
WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_PUSH_PULL, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+ WMI_SERVICE_TX_MODE_DYNAMIC, len);
}
#undef SVCMAP
@@ -816,6 +831,7 @@ struct wmi_cmd_map {
u32 set_cca_params_cmdid;
u32 pdev_bss_chan_info_request_cmdid;
u32 pdev_enable_adaptive_cca_cmdid;
+ u32 ext_resource_cfg_cmdid;
};
/*
@@ -1308,7 +1324,7 @@ enum wmi_10x_event_id {
WMI_10X_PDEV_TPC_CONFIG_EVENTID,
WMI_10X_GPIO_INPUT_EVENTID,
- WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1,
};
enum wmi_10_2_cmd_id {
@@ -1428,6 +1444,7 @@ enum wmi_10_2_cmd_id {
WMI_10_2_MU_CAL_START_CMDID,
WMI_10_2_SET_LTEU_CONFIG_CMDID,
WMI_10_2_SET_CCA_PARAMS,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
};
@@ -1471,6 +1488,8 @@ enum wmi_10_2_event_id {
WMI_10_2_WDS_PEER_EVENTID,
WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
WMI_10_2_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_2_MU_REPORT_EVENTID,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID,
WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
};
@@ -1779,6 +1798,7 @@ struct wmi_channel {
__le32 reginfo1;
struct {
u8 antenna_max;
+ u8 max_tx_power;
} __packed;
} __packed;
} __packed;
@@ -2041,8 +2061,8 @@ struct wmi_10x_service_ready_event {
struct wlan_host_mem_req mem_reqs[0];
} __packed;
-#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
-#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ)
struct wmi_ready_event {
__le32 sw_version;
@@ -2434,6 +2454,7 @@ enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
WMI_10_2_COEX_GPIO = BIT(3),
+ WMI_10_2_BSS_CHAN_INFO = BIT(6),
WMI_10_2_PEER_STATS = BIT(7),
};
@@ -2660,13 +2681,43 @@ struct wmi_resource_config_10_4 {
*/
__le32 iphdr_pad_config;
- /* qwrap configuration
+ /* qwrap configuration (bits 15-0)
* 1 - This is qwrap configuration
* 0 - This is not qwrap
+ *
+ * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables)
+ * In order to get ack-RSSI reporting and to specify the tx-rate for
+ * individual frames, this option must be enabled. This uses an extra
+ * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it.
*/
__le32 qwrap_config;
} __packed;
+/**
+ * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
+ * @WMI_10_4_LTEU_SUPPORT: LTEU config
+ * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
+ * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
+ * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
+ * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
+ * @WMI_10_4_PEER_STATS: Per station stats
+ */
+enum wmi_10_4_feature_mask {
+ WMI_10_4_LTEU_SUPPORT = BIT(0),
+ WMI_10_4_COEX_GPIO_SUPPORT = BIT(1),
+ WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2),
+ WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3),
+ WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4),
+ WMI_10_4_PEER_STATS = BIT(5),
+};
+
+struct wmi_ext_resource_config_10_4_cmd {
+ /* contains enum wmi_host_platform_type */
+ __le32 host_platform_config;
+ /* see enum wmi_10_4_feature_mask */
+ __le32 fw_feature_bitmap;
+};
+
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
@@ -3037,11 +3088,17 @@ struct wmi_10_4_mgmt_rx_event {
u8 buf[0];
} __packed;
+struct wmi_mgmt_rx_ext_info {
+ __le64 rx_mac_timestamp;
+} __packed __aligned(4);
+
#define WMI_RX_STATUS_OK 0x00
#define WMI_RX_STATUS_ERR_CRC 0x01
#define WMI_RX_STATUS_ERR_DECRYPT 0x08
#define WMI_RX_STATUS_ERR_MIC 0x10
#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+/* Extension data at the end of mgmt frame */
+#define WMI_RX_STATUS_EXT_INFO 0x40
#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26
#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24
@@ -4072,6 +4129,13 @@ enum wmi_stats_id {
WMI_STAT_VDEV_RATE = BIT(5),
};
+enum wmi_10_4_stats_id {
+ WMI_10_4_STAT_PEER = BIT(0),
+ WMI_10_4_STAT_AP = BIT(1),
+ WMI_10_4_STAT_INST = BIT(2),
+ WMI_10_4_STAT_PEER_EXTD = BIT(3),
+};
+
struct wlan_inst_rssi_args {
__le16 cfg_retry_count;
__le16 retry_count;
@@ -4271,6 +4335,15 @@ struct wmi_10_4_peer_stats {
__le32 peer_rssi_changed;
} __packed;
+struct wmi_10_4_peer_extd_stats {
+ struct wmi_10_4_peer_stats common;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 inactive_time;
+ __le32 peer_chain_rssi;
+ __le32 rx_duration;
+ __le32 reserved[10];
+} __packed;
+
struct wmi_10_2_pdev_ext_stats {
__le32 rx_rssi_comb;
__le32 rx_rssi[4];
@@ -4336,14 +4409,14 @@ enum wmi_vdev_subtype_10_4 {
/*
* Indicates that AP VDEV uses hidden ssid. only valid for
* AP/GO */
-#define WMI_VDEV_START_HIDDEN_SSID (1<<0)
+#define WMI_VDEV_START_HIDDEN_SSID (1 << 0)
/*
* Indicates if robust management frame/management frame
* protection is enabled. For GO/AP vdevs, it indicates that
* it may support station/client associations with RMF enabled.
* For STA/client vdevs, it indicates that sta will
* associate with AP with RMF enabled. */
-#define WMI_VDEV_START_PMF_ENABLED (1<<1)
+#define WMI_VDEV_START_PMF_ENABLED (1 << 1)
struct wmi_p2p_noa_descriptor {
__le32 type_count; /* 255: continuous schedule, 0: reserved */
@@ -4582,6 +4655,7 @@ struct wmi_vdev_param_map {
u32 meru_vc;
u32 rx_decap_type;
u32 bw_nss_ratemask;
+ u32 set_tsf;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -4838,6 +4912,7 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
WMI_10X_VDEV_PARAM_VHT_SGIMASK,
WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
enum wmi_10_4_vdev_param {
@@ -4907,6 +4982,12 @@ enum wmi_10_4_vdev_param {
WMI_10_4_VDEV_PARAM_MERU_VC,
WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_10_4_VDEV_PARAM_SENSOR_AP,
+ WMI_10_4_VDEV_PARAM_BEACON_RATE,
+ WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_10_4_VDEV_PARAM_STA_KICKOUT,
+ WMI_10_4_VDEV_PARAM_CAPABILITIES,
+ WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
};
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
@@ -5281,7 +5362,7 @@ enum wmi_sta_ps_param_pspoll_count {
#define WMI_UAPSD_AC_TYPE_TRIG 1
#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
- ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
enum wmi_sta_ps_param_uapsd {
WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
@@ -5696,7 +5777,7 @@ struct wmi_rate_set {
* the rates are filled from least significant byte to most
* significant byte.
*/
- __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+ __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
} __packed;
struct wmi_rate_set_arg {
@@ -6116,6 +6197,7 @@ struct wmi_mgmt_rx_ev_arg {
__le32 phy_mode;
__le32 buf_len;
__le32 status; /* %WMI_RX_STATUS_ */
+ struct wmi_mgmt_rx_ext_info ext_info;
};
struct wmi_ch_info_ev_arg {
@@ -6203,6 +6285,17 @@ struct wmi_pdev_temperature_event {
__le32 temperature;
} __packed;
+struct wmi_pdev_bss_chan_info_event {
+ __le32 freq;
+ __le32 noise_floor;
+ __le64 cycle_busy;
+ __le64 cycle_total;
+ __le64 cycle_tx;
+ __le64 cycle_rx;
+ __le64 cycle_rx_bss;
+ __le32 reserved;
+} __packed;
+
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
@@ -6401,6 +6494,21 @@ struct wmi_pdev_set_adaptive_cca_params {
__le32 cca_detect_margin;
} __packed;
+enum wmi_host_platform_type {
+ WMI_HOST_PLATFORM_HIGH_PERF,
+ WMI_HOST_PLATFORM_LOW_PERF,
+};
+
+enum wmi_bss_survey_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_pdev_chan_info_req_cmd {
+ __le32 type;
+ __le32 reserved;
+} __packed;
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 8e02b3819..77100d42f 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -233,7 +233,7 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
- ar->fw_features))) {
+ ar->running_fw->fw_file.fw_features))) {
ret = 1;
goto exit;
}
@@ -285,7 +285,7 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
- ar->fw_features))) {
+ ar->running_fw->fw_file.fw_features))) {
ret = 1;
goto exit;
}
@@ -325,7 +325,8 @@ exit:
int ath10k_wow_init(struct ath10k *ar)
{
- if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))
return 0;
if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 38be2702c..0624333f5 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -279,7 +279,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
- } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+ } else if (ah->ah_current_channel->band == NL80211_BAND_2GHZ) {
/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
* detect and zero firstep level to maximize CCK sensitivity */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ba12f7f40..67fedb61f 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1265,10 +1265,10 @@ struct ath5k_hw {
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
struct ieee80211_hw *hw; /* IEEE 802.11 common */
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
struct ieee80211_channel channels[ATH_CHAN_MAX];
- struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
- s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+ struct ieee80211_rate rates[NUM_NL80211_BANDS][AR5K_MAX_RATES];
+ s8 rate_idx[NUM_NL80211_BANDS][AR5K_MAX_RATES];
enum nl80211_iftype opmode;
#ifdef CONFIG_ATH5K_DEBUG
@@ -1532,7 +1532,7 @@ int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
/* Protocol Control Unit Functions */
/* Helpers */
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
int len, struct ieee80211_rate *rate, bool shortpre);
unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
@@ -1611,7 +1611,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
/* PHY functions */
/* Misc PHY functions */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band);
int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Gain_F optimization */
enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 66b636615..233054bd6 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -152,7 +152,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
/* Try to identify radio chip based on its srev */
switch (ah->ah_radio_5ghz_revision & 0xf0) {
@@ -160,14 +160,14 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_5112:
case AR5K_SREV_RAD_2112:
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_2413:
ah->ah_radio = AR5K_RF2413;
@@ -204,7 +204,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
} else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3d946d8b2..d98fd421c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -268,15 +268,15 @@ static void ath5k_reg_notifier(struct wiphy *wiphy,
* Returns true for the channel numbers used.
*/
#ifdef CONFIG_ATH5K_TEST_CHANNELS
-static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
{
return true;
}
#else
-static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
{
- if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+ if (band == NL80211_BAND_2GHZ && chan <= 14)
return true;
return /* UNII 1,2 */
@@ -297,18 +297,18 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
unsigned int mode, unsigned int max)
{
unsigned int count, size, freq, ch;
- enum ieee80211_band band;
+ enum nl80211_band band;
switch (mode) {
case AR5K_MODE_11A:
/* 1..220, but 2GHz frequencies are filtered by check_channel */
size = 220;
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
break;
case AR5K_MODE_11B:
case AR5K_MODE_11G:
size = 26;
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
break;
default:
ATH5K_WARN(ah, "bad mode, not copying channels\n");
@@ -363,13 +363,13 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
int max_c, count_c = 0;
int i;
- BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
+ BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS);
max_c = ARRAY_SIZE(ah->channels);
/* 2GHz band */
- sband = &ah->sbands[IEEE80211_BAND_2GHZ];
- sband->band = IEEE80211_BAND_2GHZ;
- sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
+ sband = &ah->sbands[NL80211_BAND_2GHZ];
+ sband->band = NL80211_BAND_2GHZ;
+ sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0];
if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
/* G mode */
@@ -381,7 +381,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11G, max_c);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
count_c = sband->n_channels;
max_c -= count_c;
} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
@@ -407,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11B, max_c);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
count_c = sband->n_channels;
max_c -= count_c;
}
@@ -415,9 +415,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
/* 5GHz band, A mode */
if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
- sband = &ah->sbands[IEEE80211_BAND_5GHZ];
- sband->band = IEEE80211_BAND_5GHZ;
- sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
+ sband = &ah->sbands[NL80211_BAND_5GHZ];
+ sband->band = NL80211_BAND_5GHZ;
+ sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0];
memcpy(sband->bitrates, &ath5k_rates[4],
sizeof(struct ieee80211_rate) * 8);
@@ -427,7 +427,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11A, max_c);
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
}
ath5k_setup_rate_idx(ah, sband);
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 654a1e33f..929d7ccc0 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -1043,14 +1043,14 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah)
BUG_ON(!ah->sbands);
- for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+ for (b = 0; b < NUM_NL80211_BANDS; b++) {
struct ieee80211_supported_band *band = &ah->sbands[b];
char bname[6];
switch (band->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
strcpy(bname, "2 GHz");
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
strcpy(bname, "5 GHz");
break;
default:
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index bf29da5e9..fc47b7098 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] =
* bwmodes.
*/
int
-ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
@@ -221,7 +221,7 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
case AR5K_BWMODE_DEFAULT:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
default:
- if (channel->band == IEEE80211_BAND_5GHZ)
+ if (channel->band == NL80211_BAND_5GHZ)
sifs = AR5K_INIT_SIFS_DEFAULT_A;
break;
}
@@ -279,7 +279,7 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
struct ieee80211_rate *rate;
unsigned int i;
/* 802.11g covers both OFDM and CCK */
- u8 band = IEEE80211_BAND_2GHZ;
+ u8 band = NL80211_BAND_2GHZ;
/* Write rate duration table */
for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 0fce1c766..641b13a27 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -75,13 +75,13 @@
/**
* ath5k_hw_radio_revision() - Get the PHY Chip revision
* @ah: The &struct ath5k_hw
- * @band: One of enum ieee80211_band
+ * @band: One of enum nl80211_band
*
* Returns the revision number of a 2GHz, 5GHz or single chip
* radio.
*/
u16
-ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band)
{
unsigned int i;
u32 srev;
@@ -91,10 +91,10 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
* Set the radio chip access register
*/
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
break;
default:
@@ -138,11 +138,11 @@ ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
u16 freq = channel->center_freq;
/* Check if the channel is in our supported range */
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
(freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
return true;
- } else if (channel->band == IEEE80211_BAND_5GHZ)
+ } else if (channel->band == NL80211_BAND_5GHZ)
if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
(freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
return true;
@@ -743,7 +743,7 @@ done:
/**
* ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
* @ah: The &struct ath5k_hw
- * @band: One of enum ieee80211_band
+ * @band: One of enum nl80211_band
*
* Write initial RF gain table to set the RF sensitivity.
*
@@ -751,7 +751,7 @@ done:
* with Gain_F calibration
*/
static int
-ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum nl80211_band band)
{
const struct ath5k_ini_rfgain *ath5k_rfg;
unsigned int i, size, index;
@@ -786,7 +786,7 @@ ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
return -EINVAL;
}
- index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0;
+ index = (band == NL80211_BAND_2GHZ) ? 1 : 0;
for (i = 0; i < size; i++) {
AR5K_REG_WAIT(i);
@@ -917,7 +917,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
}
/* Set Output and Driver bias current (OB/DB) */
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
if (channel->hw_value == AR5K_MODE_11B)
ee_mode = AR5K_EEPROM_MODE_11B;
@@ -944,7 +944,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
AR5K_RF_DB_2GHZ, true);
/* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
- } else if ((channel->band == IEEE80211_BAND_5GHZ) ||
+ } else if ((channel->band == NL80211_BAND_5GHZ) ||
(ah->ah_radio == AR5K_RF5111)) {
/* For 11a, Turbo and XR we need to choose
@@ -1145,7 +1145,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
}
if (ah->ah_radio == AR5K_RF5413 &&
- channel->band == IEEE80211_BAND_2GHZ) {
+ channel->band == NL80211_BAND_2GHZ) {
ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
true);
@@ -1270,7 +1270,7 @@ ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
*/
data0 = data1 = 0;
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
/* Map 2GHz channel to 5GHz Atheros channel ID */
ret = ath5k_hw_rf5111_chan2athchan(
ieee80211_frequency_to_channel(channel->center_freq),
@@ -1446,7 +1446,7 @@ ath5k_hw_channel(struct ath5k_hw *ah,
"channel frequency (%u MHz) out of supported "
"band range\n",
channel->center_freq);
- return -EINVAL;
+ return -EINVAL;
}
/*
@@ -1919,7 +1919,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
/* Convert current frequency to fbin value (the same way channels
* are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
* up by 2 so we can compare it later */
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
chan_fbin = (channel->center_freq - 2300) * 10;
freq_band = AR5K_EEPROM_BAND_2GHZ;
} else {
@@ -1983,7 +1983,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
break;
default:
- if (channel->band == IEEE80211_BAND_5GHZ) {
+ if (channel->band == NL80211_BAND_5GHZ) {
/* Both sample_freq and chip_freq are 40MHz */
spur_delta_phase = (spur_offset << 17) / 25;
spur_freq_sigma_delta =
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index ddaad712c..beda11ce3 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -559,7 +559,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
@@ -596,10 +596,10 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
*
* Also we have different lowest rate for 802.11a
*/
- if (channel->band == IEEE80211_BAND_5GHZ)
- band = IEEE80211_BAND_5GHZ;
+ if (channel->band == NL80211_BAND_5GHZ)
+ band = NL80211_BAND_5GHZ;
else
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_5MHZ:
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 99e62f99a..56d7925a0 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -634,7 +634,7 @@ ath5k_hw_on_hold(struct ath5k_hw *ah)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- usleep_range(2000, 2500);
+ usleep_range(2000, 2500);
} else {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -699,7 +699,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- usleep_range(2000, 2500);
+ usleep_range(2000, 2500);
} else {
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -752,7 +752,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
clock = AR5K_PHY_PLL_RF5111; /*Zero*/
}
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
mode |= AR5K_PHY_MODE_FREQ_2GHZ;
clock |= AR5K_PHY_PLL_44MHZ;
@@ -771,7 +771,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
else
mode |= AR5K_PHY_MODE_MOD_DYN;
}
- } else if (channel->band == IEEE80211_BAND_5GHZ) {
+ } else if (channel->band == NL80211_BAND_5GHZ) {
mode |= (AR5K_PHY_MODE_FREQ_5GHZ |
AR5K_PHY_MODE_MOD_OFDM);
@@ -906,7 +906,7 @@ ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
u32 data;
ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
AR5K_PHY_CCKTXCTL);
- if (channel->band == IEEE80211_BAND_5GHZ)
+ if (channel->band == NL80211_BAND_5GHZ)
data = 0xffb81020;
else
data = 0xffb80d20;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7f3f94fbf..4e11ba06f 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -34,7 +34,7 @@
}
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
@@ -43,7 +43,7 @@
}
#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.hw_value = (_channel), \
.center_freq = 5000 + (5 * (_channel)), \
.flags = (_flags), \
@@ -2583,7 +2583,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
}
#endif
-static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
+static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum nl80211_band band,
bool ht_enable)
{
struct ath6kl_htcap *htcap = &vif->htcap[band];
@@ -2594,7 +2594,7 @@ static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
if (ht_enable) {
/* Set default ht capabilities */
htcap->ht_enable = true;
- htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
+ htcap->cap_info = (band == NL80211_BAND_2GHZ) ?
ath6kl_g_htcap : ath6kl_a_htcap;
htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
} else /* Disable ht */
@@ -2609,7 +2609,7 @@ static int ath6kl_restore_htcap(struct ath6kl_vif *vif)
struct wiphy *wiphy = vif->ar->wiphy;
int band, ret = 0;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
@@ -3530,7 +3530,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- u32 rates[IEEE80211_NUM_BANDS];
+ u32 rates[NUM_NL80211_BANDS];
int ret, i;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@@ -3555,7 +3555,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
* changed.
*/
- for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
if (wiphy->bands[i])
rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
@@ -3791,8 +3791,8 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
vif->bg_scan_period = 0;
- vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
- vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
+ vif->htcap[NL80211_BAND_2GHZ].ht_enable = true;
+ vif->htcap[NL80211_BAND_5GHZ].ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
if (fw_vif_idx != 0) {
@@ -3943,9 +3943,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->available_antennas_rx = ar->hw.rx_ant;
if (band_2gig)
- wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wiphy->bands[NL80211_BAND_2GHZ] = &ath6kl_band_2ghz;
if (band_5gig)
- wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wiphy->bands[NL80211_BAND_5GHZ] = &ath6kl_band_5ghz;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 4ec02cea0..ebb9f1637 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -31,6 +31,7 @@ unsigned int debug_mask;
static unsigned int suspend_mode;
static unsigned int wow_mode;
static unsigned int uart_debug;
+static unsigned int uart_rate = 115200;
static unsigned int ath6kl_p2p;
static unsigned int testmode;
static unsigned int recovery_enable;
@@ -40,6 +41,7 @@ module_param(debug_mask, uint, 0644);
module_param(suspend_mode, uint, 0644);
module_param(wow_mode, uint, 0644);
module_param(uart_debug, uint, 0644);
+module_param(uart_rate, uint, 0644);
module_param(ath6kl_p2p, uint, 0644);
module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644);
@@ -180,6 +182,7 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
if (uart_debug)
ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
+ ar->hw.uarttx_rate = uart_rate;
set_bit(FIRST_BOOT, &ar->flag);
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 6e770b78f..7067b87d0 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -623,7 +623,7 @@ struct ath6kl_vif {
struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
struct aggr_info *aggr_cntxt;
- struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
+ struct ath6kl_htcap htcap[NUM_NL80211_BANDS];
struct timer_list disconnect_timer;
struct timer_list sched_scan_timer;
@@ -781,6 +781,7 @@ struct ath6kl {
u32 board_addr;
u32 refclk_hz;
u32 uarttx_pin;
+ u32 uarttx_rate;
u32 testscript_addr;
u8 tx_ant;
u8 rx_ant;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index e508217a6..0d140a774 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -173,6 +173,7 @@ static const struct ath6kl_hw hw_list[] = {
.reserved_ram_size = 7168,
.board_addr = 0x436400,
.testscript_addr = 0,
+ .uarttx_pin = 11,
.flags = 0,
.fw = {
@@ -650,6 +651,14 @@ int ath6kl_configure_target(struct ath6kl *ar)
if (status)
return status;
+ /* Only set the baud rate if we're actually doing debug */
+ if (ar->conf_flags & ATH6KL_CONF_UART_DEBUG) {
+ status = ath6kl_bmi_write_hi32(ar, hi_desired_baud_rate,
+ ar->hw.uarttx_rate);
+ if (status)
+ return status;
+ }
+
/* Configure target refclk_hz */
if (ar->hw.refclk_hz != 0) {
status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a5e1de75a..631c3a0c5 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1584,6 +1584,11 @@ static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
if (len < sizeof(*ev))
return -EINVAL;
+ if (vif->nw_type != INFRA_NETWORK ||
+ !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
+ vif->ar->fw_capabilities))
+ return -EOPNOTSUPP;
+
if (vif->sme_state != SME_CONNECTED)
return -ENOTCONN;
@@ -2043,7 +2048,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
sc->no_cck = cpu_to_le32(no_cck);
sc->num_ch = num_chan;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
sband = ar->wiphy->bands[band];
if (!sband)
@@ -2765,10 +2770,10 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
memset(&ratemask, 0, sizeof(ratemask));
/* only check 2.4 and 5 GHz bands, skip the rest */
- for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+ for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
/* copy legacy rate mask */
ratemask[band] = mask->control[band].legacy;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
ratemask[band] =
mask->control[band].legacy << 4;
@@ -2794,9 +2799,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
if (mode == WMI_RATES_MODE_11A ||
mode == WMI_RATES_MODE_11A_HT20 ||
mode == WMI_RATES_MODE_11A_HT40)
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
else
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
}
@@ -2817,10 +2822,10 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
memset(&ratemask, 0, sizeof(ratemask));
/* only check 2.4 and 5 GHz bands, skip the rest */
- for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+ for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
/* copy legacy rate mask */
ratemask[band] = mask->control[band].legacy;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
ratemask[band] =
mask->control[band].legacy << 4;
@@ -2844,9 +2849,9 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
if (mode == WMI_RATES_MODE_11A ||
mode == WMI_RATES_MODE_11A_HT20 ||
mode == WMI_RATES_MODE_11A_HT40)
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
else
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
}
@@ -3169,7 +3174,7 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
}
int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ath6kl_htcap *htcap)
{
struct sk_buff *skb;
@@ -3182,7 +3187,7 @@ int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
cmd = (struct wmi_set_htcap_cmd *) skb->data;
/*
- * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
+ * NOTE: Band in firmware matches enum nl80211_band, it is unlikely
* this will be changed in firmware. If at all there is any change in
* band value, the host needs to be fixed.
*/
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 05d25a94c..3af464a73 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -2628,7 +2628,7 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
u8 keep_alive_intvl);
int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ath6kl_htcap *htcap);
int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 40fa915d6..f68cb0045 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -75,6 +75,26 @@ config ATH9K_STATION_STATISTICS
---help---
This option enables detailed statistics for association stations.
+config ATH9K_TX99
+ bool "Atheros ath9k TX99 testing support"
+ depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems undergoing
+ certification testing and evaluation in a controlled environment.
+ Enabling this will only enable TX99 support, all other modes of
+ operation will be disabled.
+
+ TX99 support enables Specific Absorption Rate (SAR) testing.
+ SAR is the unit of measurement for the amount of radio frequency(RF)
+ absorbed by the body when using a wireless device. The RF exposure
+ limits used are expressed in the terms of SAR, which is a measure
+ of the electric and magnetic field strength and power density for
+ transmitters operating at frequencies from 300 kHz to 100 GHz.
+ Regulatory bodies around the world require that wireless device
+ be evaluated to meet the RF exposure limits set forth in the
+ governmental SAR regulations.
+
config ATH9K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
depends on ATH9K && CFG80211_CERTIFICATION_ONUS
@@ -103,26 +123,6 @@ config ATH9K_DYNACK
based on ACK frame RX timestamp, TX frame timestamp and frame
duration
-config ATH9K_TX99
- bool "Atheros ath9k TX99 testing support"
- depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
- default n
- ---help---
- Say N. This should only be enabled on systems undergoing
- certification testing and evaluation in a controlled environment.
- Enabling this will only enable TX99 support, all other modes of
- operation will be disabled.
-
- TX99 support enables Specific Absorption Rate (SAR) testing.
- SAR is the unit of measurement for the amount of radio frequency(RF)
- absorbed by the body when using a wireless device. The RF exposure
- limits used are expressed in the terms of SAR, which is a measure
- of the electric and magnetic field strength and power density for
- transmitters operating at frequencies from 300 kHz to 100 GHz.
- Regulatory bodies around the world require that wireless device
- be evaluated to meet the RF exposure limits set forth in the
- governmental SAR regulations.
-
config ATH9K_WOW
bool "Wake on Wireless LAN support (EXPERIMENTAL)"
depends on ATH9K && PM
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index c38399bc9..c07866a2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -331,7 +331,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -351,7 +351,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 0c391997a..518e649ec 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1203,12 +1203,12 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
{
int offset[8] = {0}, total = 0, test;
- int agc_out, i, peak_detect_threshold;
+ int agc_out, i, peak_detect_threshold = 0;
if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
peak_detect_threshold = 8;
- else
- peak_detect_threshold = 0;
+ else if (AR_SREV_9561(ah))
+ peak_detect_threshold = 11;
/*
* Turn off LNA/SW.
@@ -1249,17 +1249,14 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
- if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
- AR_SREV_9561(ah)) {
- if (is_2g)
- REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
- peak_detect_threshold);
- else
- REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
- peak_detect_threshold);
- }
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
+ peak_detect_threshold);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
+ peak_detect_threshold);
for (i = 6; i > 0; i--) {
offset[i] = BIT(i - 1);
@@ -1311,9 +1308,6 @@ static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah,
struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
- if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
- return;
-
if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
return;
@@ -1641,14 +1635,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
- if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
- AR_SREV_9561(ah)) {
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
- continue;
- ar9003_hw_manual_peak_cal(ah, i,
- IS_CHAN_2GHZ(chan));
- }
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+
+ ar9003_hw_manual_peak_cal(ah, i,
+ IS_CHAN_2GHZ(chan));
}
/*
@@ -1709,7 +1701,7 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
- if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ if (AR_SREV_9003_PCOEM(ah))
priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
else
priv_ops->init_cal = ar9003_hw_init_cal_soc;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 54ed2f72d..dec1a317a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3590,8 +3590,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
else
gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
- ath9k_hw_cfg_output(ah, gpio,
- AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
+ ath9k_hw_gpio_request_out(ah, gpio, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
}
value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
@@ -4097,16 +4097,16 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
- therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
+ therm_on = thermometer == 0;
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
if (pCap->chip_chainmask & BIT(1)) {
- therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
+ therm_on = thermometer == 1;
REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
}
if (pCap->chip_chainmask & BIT(2)) {
- therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
+ therm_on = thermometer == 2;
REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
}
@@ -4402,7 +4402,7 @@ static void ar9003_hw_selfgen_tpc_txpower(struct ath_hw *ah,
}
/* Set tx power registers to array of values passed in */
-static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
+int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
{
#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
/* make sure forced gain is not set */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 694ca2e68..107bcfbbe 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -355,5 +355,6 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index af5ee416a..0fe9c8378 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -427,21 +427,34 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
- ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
- ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
- ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
- ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+ ath9k_hw_gpio_request_out(ah, 3, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+ ath9k_hw_gpio_request_out(ah, 2, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+ ath9k_hw_gpio_request_out(ah, 1, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_gpio_request_out(ah, 0, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
- ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
- ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
- ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
- ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
- ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_gpio_request_out(ah, 3, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+ ath9k_hw_gpio_request_out(ah, 2, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+ ath9k_hw_gpio_request_out(ah, 1, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_gpio_request_out(ah, 0, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_gpio_request_out(ah, 5, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
- ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
- ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
- ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
- ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+ ath9k_hw_gpio_request_out(ah, 3, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_gpio_request_out(ah, 2, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_gpio_request_out(ah, 1, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_gpio_request_out(ah, 0, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
} else
return;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 06c1ca6e8..ae3043559 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include "hw.h"
#include "ar9003_phy.h"
+#include "ar9003_eeprom.h"
#define AR9300_OFDM_RATES 8
#define AR9300_HT_SS_RATES 8
@@ -1009,7 +1010,7 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
- if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
+ if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW, 3);
@@ -1337,11 +1338,11 @@ skip_ws_det:
chan->channel,
aniState->mrcCCK ? "on" : "off",
is_on ? "on" : "off");
- if (is_on)
- ah->stats.ast_ani_ccklow++;
- else
- ah->stats.ast_ani_cckhigh++;
- aniState->mrcCCK = is_on;
+ if (is_on)
+ ah->stats.ast_ani_ccklow++;
+ else
+ ah->stats.ast_ani_cckhigh++;
+ aniState->mrcCCK = is_on;
}
break;
}
@@ -1840,73 +1841,14 @@ static void ar9003_hw_tx99_stop(struct ath_hw *ah)
static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
{
- static s16 p_pwr_array[ar9300RateSize] = { 0 };
+ static u8 p_pwr_array[ar9300RateSize] = { 0 };
unsigned int i;
- if (txpower <= MAX_RATE_POWER) {
- for (i = 0; i < ar9300RateSize; i++)
- p_pwr_array[i] = txpower;
- } else {
- for (i = 0; i < ar9300RateSize; i++)
- p_pwr_array[i] = MAX_RATE_POWER;
- }
+ txpower = txpower <= MAX_RATE_POWER ? txpower : MAX_RATE_POWER;
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = txpower;
- REG_WRITE(ah, 0xa458, 0);
-
- REG_WRITE(ah, 0xa3c0,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
- REG_WRITE(ah, 0xa3c4,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
- REG_WRITE(ah, 0xa3c8,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
- REG_WRITE(ah, 0xa3cc,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
- REG_WRITE(ah, 0xa3d0,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
- REG_WRITE(ah, 0xa3d4,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6], 0));
- REG_WRITE(ah, 0xa3e4,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14], 0));
- REG_WRITE(ah, 0xa3e8,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22], 0));
- REG_WRITE(ah, 0xa3d8,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
- REG_WRITE(ah, 0xa3dc,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6], 0));
- REG_WRITE(ah, 0xa3ec,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
+ ar9003_hw_tx_power_regwrite(ah, p_pwr_array);
}
static void ar9003_hw_init_txpower_cck(struct ath_hw *ah, u8 *rate_array)
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 2c42ff05e..29479afbc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -40,7 +40,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -59,7 +59,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 2154efcd3..c4a6ffa55 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -345,7 +345,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -364,7 +364,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index b995ffe88..2eb163fc1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -245,7 +245,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -265,7 +265,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
{0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1b6b4d0cf..b00dd6494 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -59,7 +59,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -79,7 +79,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index dc3adda46..0f8745ec7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -239,7 +239,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -259,7 +259,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index ce83ce47a..bdf6f107f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1026,7 +1026,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
@@ -1044,7 +1044,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index c0b90daa3..924ae6bde 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -988,7 +988,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1008,7 +1008,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index 148562add..67edf344b 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -83,7 +83,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 10d4a6cb1..35c1bbb2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -347,7 +347,7 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
index c3a47eaaf..db051071c 100644
--- a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
@@ -220,7 +220,7 @@ static const u32 qca956x_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 5d4629f96..f4c9befb3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1290,7 +1290,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1310,7 +1310,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 5294595da..93b3793cc 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -813,7 +813,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
void ath_deinit_leds(struct ath_softc *sc);
-void ath_fill_led_pin(struct ath_softc *sc);
#else
static inline void ath_init_leds(struct ath_softc *sc)
{
@@ -822,9 +821,6 @@ static inline void ath_init_leds(struct ath_softc *sc)
static inline void ath_deinit_leds(struct ath_softc *sc)
{
}
-static inline void ath_fill_led_pin(struct ath_softc *sc)
-{
-}
#endif
/************************/
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 5a084d94e..618c9df35 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -15,6 +15,8 @@
*/
#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/ath9k_platform.h>
#include "hw.h"
enum ath_bt_mode {
@@ -34,6 +36,8 @@ struct ath_btcoex_config {
u8 bt_priority_time;
u8 bt_first_slot_time;
bool bt_hold_rx_clear;
+ u8 wl_active_time;
+ u8 wl_qc_time;
};
static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
@@ -65,31 +69,71 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
.bt_priority_time = 2,
.bt_first_slot_time = 5,
.bt_hold_rx_clear = true,
+ .wl_active_time = 0x20,
+ .wl_qc_time = 0x20,
};
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
+ u8 time_extend = ath_bt_config.bt_time_extend;
+ u8 first_slot_time = ath_bt_config.bt_first_slot_time;
if (AR_SREV_9300_20_OR_LATER(ah))
rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
+ if (AR_SREV_SOC(ah)) {
+ first_slot_time = 0x1d;
+ time_extend = 0xa;
+
+ btcoex_hw->bt_coex_mode3 =
+ SM(ath_bt_config.wl_active_time, AR_BT_WL_ACTIVE_TIME) |
+ SM(ath_bt_config.wl_qc_time, AR_BT_WL_QC_TIME);
+
+ btcoex_hw->bt_coex_mode2 =
+ AR_BT_PROTECT_BT_AFTER_WAKEUP |
+ AR_BT_PHY_ERR_BT_COLL_ENABLE;
+ }
+
btcoex_hw->bt_coex_mode =
(btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
- SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) |
+ SM(time_extend, AR_BT_TIME_EXTEND) |
SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
SM(ath_bt_config.bt_mode, AR_BT_MODE) |
SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) |
SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) |
SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) |
- SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
+ SM(first_slot_time, AR_BT_FIRST_SLOT_TIME) |
SM(qnum, AR_BT_QCU_THRESH);
- btcoex_hw->bt_coex_mode2 =
+ btcoex_hw->bt_coex_mode2 |=
SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
AR_BT_DISABLE_BT_ANT;
}
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
+static void ath9k_hw_btcoex_pin_init(struct ath_hw *ah, u8 wlanactive_gpio,
+ u8 btactive_gpio, u8 btpriority_gpio)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ struct ath9k_platform_data *pdata = ah->dev->platform_data;
+
+ if (btcoex_hw->scheme != ATH_BTCOEX_CFG_2WIRE &&
+ btcoex_hw->scheme != ATH_BTCOEX_CFG_3WIRE)
+ return;
+
+ /* bt priority GPIO will be ignored by 2 wire scheme */
+ if (pdata && (pdata->bt_active_pin || pdata->bt_priority_pin ||
+ pdata->wlan_active_pin)) {
+ btcoex_hw->btactive_gpio = pdata->bt_active_pin;
+ btcoex_hw->wlanactive_gpio = pdata->wlan_active_pin;
+ btcoex_hw->btpriority_gpio = pdata->bt_priority_pin;
+ } else {
+ btcoex_hw->btactive_gpio = btactive_gpio;
+ btcoex_hw->wlanactive_gpio = wlanactive_gpio;
+ btcoex_hw->btpriority_gpio = btpriority_gpio;
+ }
+}
+
void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -107,19 +151,19 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
} else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
- btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
- btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
- btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
- } else if (AR_SREV_9280_20_OR_LATER(ah)) {
- btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
- btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
- if (AR_SREV_9285(ah)) {
+ ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9300,
+ ATH_BTACTIVE_GPIO_9300,
+ ATH_BTPRIORITY_GPIO_9300);
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (AR_SREV_9285(ah))
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
- btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285;
- } else {
+ else
btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
- }
+
+ ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9280,
+ ATH_BTACTIVE_GPIO_9280,
+ ATH_BTPRIORITY_GPIO_9285);
}
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme);
@@ -137,12 +181,14 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
/* Set input mux for bt_active to gpio pin */
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
- AR_GPIO_INPUT_MUX1_BT_ACTIVE,
- btcoex_hw->btactive_gpio);
+ if (!AR_SREV_SOC(ah))
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+ btcoex_hw->btactive_gpio);
/* Configure the desired gpio port for input */
- ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
+ ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
+ "ath9k-btactive");
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
@@ -157,21 +203,33 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
/* Set input mux for bt_prority_async and
* bt_active_async to GPIO pins */
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
- AR_GPIO_INPUT_MUX1_BT_ACTIVE,
- btcoex_hw->btactive_gpio);
-
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
- AR_GPIO_INPUT_MUX1_BT_PRIORITY,
- btcoex_hw->btpriority_gpio);
+ if (!AR_SREV_SOC(ah)) {
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+ btcoex_hw->btactive_gpio);
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_PRIORITY,
+ btcoex_hw->btpriority_gpio);
+ }
/* Configure the desired GPIO ports for input */
-
- ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
- ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio);
+ ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
+ "ath9k-btactive");
+ ath9k_hw_gpio_request_in(ah, btcoex_hw->btpriority_gpio,
+ "ath9k-btpriority");
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
+void ath9k_hw_btcoex_deinit(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+ ath9k_hw_gpio_free(ah, btcoex_hw->btactive_gpio);
+ ath9k_hw_gpio_free(ah, btcoex_hw->btpriority_gpio);
+ ath9k_hw_gpio_free(ah, btcoex_hw->wlanactive_gpio);
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_deinit);
+
void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
{
ah->btcoex_hw.mci.ready = false;
@@ -201,8 +259,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
/* Configure the desired GPIO port for TX_FRAME output */
- ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
- AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
+ ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
+ "ath9k-wlanactive",
+ AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
}
/*
@@ -247,13 +306,13 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
txprio_shift[i-1]);
}
}
+
/* Last WLAN weight has to be adjusted wrt tx priority */
if (concur_tx) {
btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
<< txprio_shift[i-1]);
}
-
}
EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
@@ -268,9 +327,14 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
* Program coex mode and weight registers to
* enable coex 3-wire
*/
+ if (AR_SREV_SOC(ah))
+ REG_CLR_BIT(ah, AR_BT_COEX_MODE2, AR_BT_PHY_ERR_BT_COLL_ENABLE);
+
REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+ if (AR_SREV_SOC(ah))
+ REG_WRITE(ah, AR_BT_COEX_MODE3, btcoex->bt_coex_mode3);
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
@@ -281,8 +345,6 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
} else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
-
-
if (AR_SREV_9271(ah)) {
val = REG_READ(ah, 0x50040);
val &= 0xFFFFFEFF;
@@ -292,8 +354,9 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
- ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
- AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
+ ath9k_hw_gpio_request_out(ah, btcoex->wlanactive_gpio,
+ "ath9k-wlanactive",
+ AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
}
static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
@@ -339,7 +402,8 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
break;
}
- if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI &&
+ !AR_SREV_SOC(ah)) {
REG_RMW(ah, AR_GPIO_PDPU,
(0x2 << (btcoex_hw->btactive_gpio * 2)),
(0x3 << (btcoex_hw->btactive_gpio * 2)));
@@ -364,8 +428,8 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
- ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
+ NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index cd2f0a237..1bdfa8465 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -115,6 +115,7 @@ struct ath_btcoex_hw {
u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
+ u32 bt_coex_mode3; /* Register setting for AR_BT_COEX_MODE3 */
u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
@@ -123,6 +124,7 @@ struct ath_btcoex_hw {
void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
+void ath9k_hw_btcoex_deinit(struct ath_hw *ah);
void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 37f6d66d1..0f71146b7 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -145,14 +145,14 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
- enum ieee80211_band band,
+ enum nl80211_band band,
int16_t *nft)
{
switch (band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 319cb5f25..e56bafcf5 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -107,9 +107,9 @@ void ath_chanctx_init(struct ath_softc *sc)
struct ieee80211_channel *chan;
int i, j;
- sband = &common->sbands[IEEE80211_BAND_2GHZ];
+ sband = &common->sbands[NL80211_BAND_2GHZ];
if (!sband->n_channels)
- sband = &common->sbands[IEEE80211_BAND_5GHZ];
+ sband = &common->sbands[NL80211_BAND_5GHZ];
chan = &sband->channels[0];
for (i = 0; i < ATH9K_NUM_CHANCTX; i++) {
@@ -1333,9 +1333,9 @@ void ath9k_offchannel_init(struct ath_softc *sc)
struct ieee80211_channel *chan;
int i;
- sband = &common->sbands[IEEE80211_BAND_2GHZ];
+ sband = &common->sbands[NL80211_BAND_2GHZ];
if (!sband->n_channels)
- sband = &common->sbands[IEEE80211_BAND_5GHZ];
+ sband = &common->sbands[NL80211_BAND_5GHZ];
chan = &sband->channels[0];
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
index a006c1499..8b4f7fdab 100644
--- a/drivers/net/wireless/ath/ath9k/common-init.c
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -19,14 +19,14 @@
#include "common.h"
#define CHAN2G(_freq, _idx) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 20, \
}
#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 20, \
@@ -139,12 +139,12 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common)
memcpy(channels, ath9k_2ghz_chantable,
sizeof(ath9k_2ghz_chantable));
- common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
- common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- common->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ common->sbands[NL80211_BAND_2GHZ].channels = channels;
+ common->sbands[NL80211_BAND_2GHZ].band = NL80211_BAND_2GHZ;
+ common->sbands[NL80211_BAND_2GHZ].n_channels =
ARRAY_SIZE(ath9k_2ghz_chantable);
- common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ common->sbands[NL80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ common->sbands[NL80211_BAND_2GHZ].n_bitrates =
ARRAY_SIZE(ath9k_legacy_rates);
}
@@ -156,13 +156,13 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common)
memcpy(channels, ath9k_5ghz_chantable,
sizeof(ath9k_5ghz_chantable));
- common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
- common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- common->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ common->sbands[NL80211_BAND_5GHZ].channels = channels;
+ common->sbands[NL80211_BAND_5GHZ].band = NL80211_BAND_5GHZ;
+ common->sbands[NL80211_BAND_5GHZ].n_channels =
ARRAY_SIZE(ath9k_5ghz_chantable);
- common->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ common->sbands[NL80211_BAND_5GHZ].bitrates =
ath9k_legacy_rates + 4;
- common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ common->sbands[NL80211_BAND_5GHZ].n_bitrates =
ARRAY_SIZE(ath9k_legacy_rates) - 4;
}
return 0;
@@ -236,9 +236,9 @@ void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
ath9k_cmn_setup_ht_cap(ah,
- &common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ &common->sbands[NL80211_BAND_2GHZ].ht_cap);
if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
ath9k_cmn_setup_ht_cap(ah,
- &common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+ &common->sbands[NL80211_BAND_5GHZ].ht_cap);
}
EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index e8c699446..b80e08b13 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -173,7 +173,7 @@ int ath9k_cmn_process_rate(struct ath_common *common,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
+ enum nl80211_band band;
unsigned int i = 0;
struct ath_hw *ah = common->ah;
@@ -305,7 +305,7 @@ static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
ichan->channel = chan->center_freq;
ichan->chan = chan;
- if (chan->band == IEEE80211_BAND_5GHZ)
+ if (chan->band == NL80211_BAND_5GHZ)
flags |= CHANNEL_5GHZ;
switch (chandef->width) {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6de64cfac..c56e40ff3 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -916,10 +916,21 @@ static int open_file_regdump(struct inode *inode, struct file *file)
struct ath_softc *sc = inode->i_private;
unsigned int len = 0;
u8 *buf;
- int i;
+ int i, j = 0;
unsigned long num_regs, regdump_len, max_reg_offset;
+ const struct reg_hole {
+ u32 start;
+ u32 end;
+ } reg_hole_list[] = {
+ {0x0200, 0x07fc},
+ {0x0c00, 0x0ffc},
+ {0x2000, 0x3ffc},
+ {0x4100, 0x6ffc},
+ {0x705c, 0x7ffc},
+ {0x0000, 0x0000}
+ };
- max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
+ max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x8800 : 0xb500;
num_regs = max_reg_offset / 4 + 1;
regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
buf = vmalloc(regdump_len);
@@ -927,9 +938,16 @@ static int open_file_regdump(struct inode *inode, struct file *file)
return -ENOMEM;
ath9k_ps_wakeup(sc);
- for (i = 0; i < num_regs; i++)
+ for (i = 0; i < num_regs; i++) {
+ if (reg_hole_list[j].start == i << 2) {
+ i = reg_hole_list[j].end >> 2;
+ j++;
+ continue;
+ }
+
len += scnprintf(buf + len, regdump_len - len,
"0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
+ }
ath9k_ps_restore(sc);
file->private_data = buf;
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index c2ca57a2e..b66cfa913 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -139,7 +139,7 @@ void ath_debug_rate_stats(struct ath_softc *sc,
}
if (IS_OFDM_RATE(rs->rs_rate)) {
- if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ)
+ if (ah->curchan->chan->band == NL80211_BAND_2GHZ)
rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
else
rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
@@ -173,7 +173,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
struct ath_hw *ah = sc->sc_ah;
struct ath_rx_rate_stats *rstats;
struct ieee80211_sta *sta = an->sta;
- enum ieee80211_band band;
+ enum nl80211_band band;
u32 len = 0, size = 4096;
char *buf;
size_t retval;
@@ -206,7 +206,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, size - len, "\n");
legacy:
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
PRINT_CCK_RATE("CCK-1M/LP", 0, false);
PRINT_CCK_RATE("CCK-2M/LP", 1, false);
PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 22b3cc4c2..d2ff0fc04 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -212,7 +212,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
struct ieee80211_tx_rate *rates = info->status.rates;
rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
- if (info->band == IEEE80211_BAND_2GHZ &&
+ if (info->band == NL80211_BAND_2GHZ &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
else
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 284706798..490f74d9d 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -21,6 +21,33 @@
/********************************/
#ifdef CONFIG_MAC80211_LEDS
+
+void ath_fill_led_pin(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ /* Set default led pin if invalid */
+ if (ah->led_pin < 0) {
+ if (AR_SREV_9287(ah))
+ ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9485(ah))
+ ah->led_pin = ATH_LED_PIN_9485;
+ else if (AR_SREV_9300(ah))
+ ah->led_pin = ATH_LED_PIN_9300;
+ else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ ah->led_pin = ATH_LED_PIN_9462;
+ else
+ ah->led_pin = ATH_LED_PIN_DEF;
+ }
+
+ /* Configure gpio for output */
+ ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led",
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ /* LED off, active low */
+ ath9k_hw_set_gpio(ah, ah->led_pin, ah->config.led_active_high ? 0 : 1);
+}
+
static void ath_led_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -40,6 +67,8 @@ void ath_deinit_leds(struct ath_softc *sc)
ath_led_brightness(&sc->led_cdev, LED_OFF);
led_classdev_unregister(&sc->led_cdev);
+
+ ath9k_hw_gpio_free(sc->sc_ah, sc->sc_ah->led_pin);
}
void ath_init_leds(struct ath_softc *sc)
@@ -49,6 +78,8 @@ void ath_init_leds(struct ath_softc *sc)
if (AR_SREV_9100(sc->sc_ah))
return;
+ ath_fill_led_pin(sc);
+
if (!ath9k_led_blink)
sc->led_cdev.default_trigger =
ieee80211_get_radio_led_name(sc->hw);
@@ -64,37 +95,6 @@ void ath_init_leds(struct ath_softc *sc)
sc->led_registered = true;
}
-
-void ath_fill_led_pin(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (AR_SREV_9100(ah))
- return;
-
- if (ah->led_pin >= 0) {
- if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK))
- ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led");
- return;
- }
-
- if (AR_SREV_9287(ah))
- ah->led_pin = ATH_LED_PIN_9287;
- else if (AR_SREV_9485(sc->sc_ah))
- ah->led_pin = ATH_LED_PIN_9485;
- else if (AR_SREV_9300(sc->sc_ah))
- ah->led_pin = ATH_LED_PIN_9300;
- else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
- ah->led_pin = ATH_LED_PIN_9462;
- else
- ah->led_pin = ATH_LED_PIN_DEF;
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-
- /* LED off, active low */
- ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1);
-}
#endif
/*******************/
@@ -402,6 +402,13 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
if (ath9k_hw_mci_is_enabled(ah))
ath_mci_cleanup(sc);
+ else {
+ enum ath_btcoex_scheme scheme = ath9k_hw_get_btcoex_scheme(ah);
+
+ if (scheme == ATH_BTCOEX_CFG_2WIRE ||
+ scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath9k_hw_btcoex_deinit(sc->sc_ah);
+ }
}
int ath9k_init_btcoex(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 8cbf4904d..e1c338cb9 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -527,7 +527,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
- int index = 0, i = 0, len = skb->len;
+ int index = 0, i, len = skb->len;
int rx_remain_len, rx_pkt_len;
u16 pool_index = 0;
u8 *ptr;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 2aabcbdab..ecb848b60 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -253,17 +253,19 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
ath9k_led_brightness(&priv->led_cdev, LED_OFF);
led_classdev_unregister(&priv->led_cdev);
cancel_work_sync(&priv->led_work);
+
+ ath9k_hw_gpio_free(priv->ah, priv->ah->led_pin);
}
void ath9k_configure_leds(struct ath9k_htc_priv *priv)
{
/* Configure gpio 1 for output */
- ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_gpio_request_out(priv->ah, priv->ah->led_pin,
+ "ath9k-led",
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
/* LED off, active low */
ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
-
}
void ath9k_init_leds(struct ath9k_htc_priv *priv)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 8647ab77c..c148c6c50 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -262,11 +262,11 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
__be32 tmpval[8];
int i, ret;
- for (i = 0; i < count; i++) {
- tmpaddr[i] = cpu_to_be32(addr[i]);
- }
+ for (i = 0; i < count; i++) {
+ tmpaddr[i] = cpu_to_be32(addr[i]);
+ }
- ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
(u8 *)tmpaddr , sizeof(u32) * count,
(u8 *)tmpval, sizeof(u32) * count,
100);
@@ -275,9 +275,9 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
"Multiple REGISTER READ FAILED (count: %d)\n", count);
}
- for (i = 0; i < count; i++) {
- val[i] = be32_to_cpu(tmpval[i]);
- }
+ for (i = 0; i < count; i++) {
+ val[i] = be32_to_cpu(tmpval[i]);
+ }
}
static void ath9k_regwrite_multi(struct ath_common *common)
@@ -765,11 +765,11 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
sizeof(struct htc_frame_hdr) + 4;
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &common->sbands[IEEE80211_BAND_2GHZ];
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &common->sbands[NL80211_BAND_2GHZ];
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &common->sbands[IEEE80211_BAND_5GHZ];
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &common->sbands[NL80211_BAND_5GHZ];
ath9k_cmn_reload_chainmask(ah);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 639294a9e..a553c91d4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -246,7 +246,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
struct ieee80211_conf *conf = &common->hw->conf;
bool fastcc;
struct ieee80211_channel *channel = hw->conf.chandef.chan;
- struct ath9k_hw_cal_data *caldata = NULL;
+ struct ath9k_hw_cal_data *caldata;
enum htc_phymode mode;
__be16 htc_mode;
u8 cmd_rsp;
@@ -274,10 +274,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
priv->ah->curchan->channel,
channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
fastcc);
-
- if (!fastcc)
- caldata = &priv->caldata;
-
+ caldata = fastcc ? NULL : &priv->caldata;
ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
if (ret) {
ath_err(common,
@@ -1770,8 +1767,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask));
tmask.vif_index = avp->index;
- tmask.band = IEEE80211_BAND_2GHZ;
- tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy);
+ tmask.band = NL80211_BAND_2GHZ;
+ tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_2GHZ].legacy);
WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
if (ret) {
@@ -1781,8 +1778,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
goto out;
}
- tmask.band = IEEE80211_BAND_5GHZ;
- tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy);
+ tmask.band = NL80211_BAND_5GHZ;
+ tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_5GHZ].legacy);
WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
if (ret) {
@@ -1793,8 +1790,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
}
ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
- mask->control[IEEE80211_BAND_2GHZ].legacy,
- mask->control[IEEE80211_BAND_5GHZ].legacy);
+ mask->control[NL80211_BAND_2GHZ].legacy,
+ mask->control[NL80211_BAND_5GHZ].legacy);
out:
return ret;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index cc9648f84..f333ef1e3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -494,7 +494,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
} else {
- if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (cur_conf->chandef.chan->band == NL80211_BAND_5GHZ)
rate->idx += 4; /* No CCK rates */
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e7a31016f..8b2895f9a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1582,8 +1582,10 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
if (!(gpio_mask & 1))
continue;
- ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_gpio_request_out(ah, i, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
+ ath9k_hw_gpio_free(ah, i);
}
}
@@ -1958,7 +1960,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_qos(ah);
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ ath9k_hw_gpio_request_in(ah, ah->rfkill_gpio, "ath9k-rfkill");
ath9k_hw_init_global_settings(ah);
@@ -2385,6 +2387,61 @@ static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
}
}
+static void ath9k_gpio_cap_init(struct ath_hw *ah)
+{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+ if (AR_SREV_9271(ah)) {
+ pCap->num_gpio_pins = AR9271_NUM_GPIO;
+ pCap->gpio_mask = AR9271_GPIO_MASK;
+ } else if (AR_DEVID_7010(ah)) {
+ pCap->num_gpio_pins = AR7010_NUM_GPIO;
+ pCap->gpio_mask = AR7010_GPIO_MASK;
+ } else if (AR_SREV_9287(ah)) {
+ pCap->num_gpio_pins = AR9287_NUM_GPIO;
+ pCap->gpio_mask = AR9287_GPIO_MASK;
+ } else if (AR_SREV_9285(ah)) {
+ pCap->num_gpio_pins = AR9285_NUM_GPIO;
+ pCap->gpio_mask = AR9285_GPIO_MASK;
+ } else if (AR_SREV_9280(ah)) {
+ pCap->num_gpio_pins = AR9280_NUM_GPIO;
+ pCap->gpio_mask = AR9280_GPIO_MASK;
+ } else if (AR_SREV_9300(ah)) {
+ pCap->num_gpio_pins = AR9300_NUM_GPIO;
+ pCap->gpio_mask = AR9300_GPIO_MASK;
+ } else if (AR_SREV_9330(ah)) {
+ pCap->num_gpio_pins = AR9330_NUM_GPIO;
+ pCap->gpio_mask = AR9330_GPIO_MASK;
+ } else if (AR_SREV_9340(ah)) {
+ pCap->num_gpio_pins = AR9340_NUM_GPIO;
+ pCap->gpio_mask = AR9340_GPIO_MASK;
+ } else if (AR_SREV_9462(ah)) {
+ pCap->num_gpio_pins = AR9462_NUM_GPIO;
+ pCap->gpio_mask = AR9462_GPIO_MASK;
+ } else if (AR_SREV_9485(ah)) {
+ pCap->num_gpio_pins = AR9485_NUM_GPIO;
+ pCap->gpio_mask = AR9485_GPIO_MASK;
+ } else if (AR_SREV_9531(ah)) {
+ pCap->num_gpio_pins = AR9531_NUM_GPIO;
+ pCap->gpio_mask = AR9531_GPIO_MASK;
+ } else if (AR_SREV_9550(ah)) {
+ pCap->num_gpio_pins = AR9550_NUM_GPIO;
+ pCap->gpio_mask = AR9550_GPIO_MASK;
+ } else if (AR_SREV_9561(ah)) {
+ pCap->num_gpio_pins = AR9561_NUM_GPIO;
+ pCap->gpio_mask = AR9561_GPIO_MASK;
+ } else if (AR_SREV_9565(ah)) {
+ pCap->num_gpio_pins = AR9565_NUM_GPIO;
+ pCap->gpio_mask = AR9565_GPIO_MASK;
+ } else if (AR_SREV_9580(ah)) {
+ pCap->num_gpio_pins = AR9580_NUM_GPIO;
+ pCap->gpio_mask = AR9580_GPIO_MASK;
+ } else {
+ pCap->num_gpio_pins = AR_NUM_GPIO;
+ pCap->gpio_mask = AR_GPIO_MASK;
+ }
+}
+
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2478,20 +2535,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
- if (AR_SREV_9271(ah))
- pCap->num_gpio_pins = AR9271_NUM_GPIO;
- else if (AR_DEVID_7010(ah))
- pCap->num_gpio_pins = AR7010_NUM_GPIO;
- else if (AR_SREV_9300_20_OR_LATER(ah))
- pCap->num_gpio_pins = AR9300_NUM_GPIO;
- else if (AR_SREV_9287_11_OR_LATER(ah))
- pCap->num_gpio_pins = AR9287_NUM_GPIO;
- else if (AR_SREV_9285_12_OR_LATER(ah))
- pCap->num_gpio_pins = AR9285_NUM_GPIO;
- else if (AR_SREV_9280_20_OR_LATER(ah))
- pCap->num_gpio_pins = AR928X_NUM_GPIO;
- else
- pCap->num_gpio_pins = AR_NUM_GPIO;
+ ath9k_gpio_cap_init(ah);
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
@@ -2612,8 +2656,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
/* GPIO / RFKILL / Antennae */
/****************************/
-static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
- u32 gpio, u32 type)
+static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
{
int addr;
u32 gpio_shift, tmp;
@@ -2627,8 +2670,8 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
gpio_shift = (gpio % 6) * 5;
- if (AR_SREV_9280_20_OR_LATER(ah)
- || (addr != AR_GPIO_OUTPUT_MUX1)) {
+ if (AR_SREV_9280_20_OR_LATER(ah) ||
+ (addr != AR_GPIO_OUTPUT_MUX1)) {
REG_RMW(ah, addr, (type << gpio_shift),
(0x1f << gpio_shift));
} else {
@@ -2640,106 +2683,144 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
}
}
-void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
+/* BSP should set the corresponding MUX register correctly.
+ */
+static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
+ const char *label)
{
- u32 gpio_shift;
+ if (ah->caps.gpio_requested & BIT(gpio))
+ return;
- BUG_ON(gpio >= ah->caps.num_gpio_pins);
+ /* may be requested by BSP, free anyway */
+ gpio_free(gpio);
- if (AR_DEVID_7010(ah)) {
- gpio_shift = gpio;
- REG_RMW(ah, AR7010_GPIO_OE,
- (AR7010_GPIO_OE_AS_INPUT << gpio_shift),
- (AR7010_GPIO_OE_MASK << gpio_shift));
+ if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
return;
- }
- gpio_shift = gpio << 1;
- REG_RMW(ah,
- AR_GPIO_OE_OUT,
- (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
- (AR_GPIO_OE_OUT_DRV << gpio_shift));
+ ah->caps.gpio_requested |= BIT(gpio);
}
-EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
-u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
+static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out,
+ u32 ah_signal_type)
{
-#define MS_REG_READ(x, y) \
- (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
-
- if (gpio >= ah->caps.num_gpio_pins)
- return 0xffffffff;
+ u32 gpio_set, gpio_shift = gpio;
if (AR_DEVID_7010(ah)) {
- u32 val;
- val = REG_READ(ah, AR7010_GPIO_IN);
- return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
- } else if (AR_SREV_9300_20_OR_LATER(ah))
- return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
- AR_GPIO_BIT(gpio)) != 0;
- else if (AR_SREV_9271(ah))
- return MS_REG_READ(AR9271, gpio) != 0;
- else if (AR_SREV_9287_11_OR_LATER(ah))
- return MS_REG_READ(AR9287, gpio) != 0;
- else if (AR_SREV_9285_12_OR_LATER(ah))
- return MS_REG_READ(AR9285, gpio) != 0;
- else if (AR_SREV_9280_20_OR_LATER(ah))
- return MS_REG_READ(AR928X, gpio) != 0;
- else
- return MS_REG_READ(AR, gpio) != 0;
+ gpio_set = out ?
+ AR7010_GPIO_OE_AS_OUTPUT : AR7010_GPIO_OE_AS_INPUT;
+ REG_RMW(ah, AR7010_GPIO_OE, gpio_set << gpio_shift,
+ AR7010_GPIO_OE_MASK << gpio_shift);
+ } else if (AR_SREV_SOC(ah)) {
+ gpio_set = out ? 1 : 0;
+ REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+ gpio_set << gpio_shift);
+ } else {
+ gpio_shift = gpio << 1;
+ gpio_set = out ?
+ AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO;
+ REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+ AR_GPIO_OE_OUT_DRV << gpio_shift);
+
+ if (out)
+ ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
+ }
}
-EXPORT_SYMBOL(ath9k_hw_gpio_get);
-void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
- u32 ah_signal_type)
+static void ath9k_hw_gpio_request(struct ath_hw *ah, u32 gpio, bool out,
+ const char *label, u32 ah_signal_type)
{
- u32 gpio_shift;
+ WARN_ON(gpio >= ah->caps.num_gpio_pins);
- if (AR_DEVID_7010(ah)) {
- gpio_shift = gpio;
- REG_RMW(ah, AR7010_GPIO_OE,
- (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
- (AR7010_GPIO_OE_MASK << gpio_shift));
- return;
- }
+ if (BIT(gpio) & ah->caps.gpio_mask)
+ ath9k_hw_gpio_cfg_wmac(ah, gpio, out, ah_signal_type);
+ else if (AR_SREV_SOC(ah))
+ ath9k_hw_gpio_cfg_soc(ah, gpio, out, label);
+ else
+ WARN_ON(1);
+}
- ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
- gpio_shift = 2 * gpio;
- REG_RMW(ah,
- AR_GPIO_OE_OUT,
- (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
- (AR_GPIO_OE_OUT_DRV << gpio_shift));
+void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label)
+{
+ ath9k_hw_gpio_request(ah, gpio, false, label, 0);
}
-EXPORT_SYMBOL(ath9k_hw_cfg_output);
+EXPORT_SYMBOL(ath9k_hw_gpio_request_in);
-void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
+void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
+ u32 ah_signal_type)
{
- if (AR_DEVID_7010(ah)) {
- val = val ? 0 : 1;
- REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
- AR_GPIO_BIT(gpio));
+ ath9k_hw_gpio_request(ah, gpio, true, label, ah_signal_type);
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_request_out);
+
+void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio)
+{
+ if (!AR_SREV_SOC(ah))
return;
+
+ WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+ if (ah->caps.gpio_requested & BIT(gpio)) {
+ gpio_free(gpio);
+ ah->caps.gpio_requested &= ~BIT(gpio);
}
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_free);
- if (AR_SREV_9271(ah))
- val = ~val;
+u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
+{
+ u32 val = 0xffffffff;
- if ((1 << gpio) & AR_GPIO_OE_OUT_MASK)
- REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
- AR_GPIO_BIT(gpio));
- else
- gpio_set_value(gpio, val & 1);
+#define MS_REG_READ(x, y) \
+ (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y))
+
+ WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+ if (BIT(gpio) & ah->caps.gpio_mask) {
+ if (AR_SREV_9271(ah))
+ val = MS_REG_READ(AR9271, gpio);
+ else if (AR_SREV_9287(ah))
+ val = MS_REG_READ(AR9287, gpio);
+ else if (AR_SREV_9285(ah))
+ val = MS_REG_READ(AR9285, gpio);
+ else if (AR_SREV_9280(ah))
+ val = MS_REG_READ(AR928X, gpio);
+ else if (AR_DEVID_7010(ah))
+ val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio);
+ else if (AR_SREV_9300_20_OR_LATER(ah))
+ val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio);
+ else
+ val = MS_REG_READ(AR, gpio);
+ } else if (BIT(gpio) & ah->caps.gpio_requested) {
+ val = gpio_get_value(gpio) & BIT(gpio);
+ } else {
+ WARN_ON(1);
+ }
+
+ return val;
}
-EXPORT_SYMBOL(ath9k_hw_set_gpio);
+EXPORT_SYMBOL(ath9k_hw_gpio_get);
-void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label)
+void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
{
- if (gpio >= ah->caps.num_gpio_pins)
- return;
+ WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+ if (AR_DEVID_7010(ah) || AR_SREV_9271(ah))
+ val = !val;
+ else
+ val = !!val;
+
+ if (BIT(gpio) & ah->caps.gpio_mask) {
+ u32 out_addr = AR_DEVID_7010(ah) ?
+ AR7010_GPIO_OUT : AR_GPIO_IN_OUT;
- gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label);
+ REG_RMW(ah, out_addr, val << gpio, BIT(gpio));
+ } else if (BIT(gpio) & ah->caps.gpio_requested) {
+ gpio_set_value(gpio, val);
+ } else {
+ WARN_ON(1);
+ }
}
-EXPORT_SYMBOL(ath9k_hw_request_gpio);
+EXPORT_SYMBOL(ath9k_hw_set_gpio);
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
{
@@ -2833,8 +2914,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
{
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
- int chan_pwr, new_pwr, max_gain;
- int ant_gain, ant_reduction = 0;
+ int chan_pwr, new_pwr;
if (!chan)
return;
@@ -2842,15 +2922,10 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
channel = chan->chan;
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);
- max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2;
-
- ant_gain = get_antenna_gain(ah, chan);
- if (ant_gain > max_gain)
- ant_reduction = ant_gain - max_gain;
ah->eep_ops->set_txpower(ah, chan,
ath9k_regd_get_ctl(reg, chan),
- ant_reduction, new_pwr, test);
+ get_antenna_gain(ah, chan), new_pwr, test);
}
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 831a54415..9cbca1229 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -160,7 +160,6 @@
#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
#define AR_GPIOD_MASK 0x00001FFF
-#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
#define BASE_ACTIVATE_DELAY 100
#define RTC_PLL_SETTLE_DELAY (AR_SREV_9340(ah) ? 1000 : 100)
@@ -301,6 +300,8 @@ struct ath9k_hw_capabilities {
u8 max_txchains;
u8 max_rxchains;
u8 num_gpio_pins;
+ u32 gpio_mask;
+ u32 gpio_requested;
u8 rx_hp_qdepth;
u8 rx_lp_qdepth;
u8 rx_status_len;
@@ -1019,12 +1020,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah);
u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
/* GPIO / RFKILL / Antennae */
-void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
+void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label);
+void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
+ u32 ah_signal_type);
+void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio);
u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
-void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
- u32 ah_signal_type);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
-void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label);
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
/* General Operation */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index deded2a9a..2ee862475 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -481,7 +481,7 @@ static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
static int ath9k_eeprom_request(struct ath_softc *sc, const char *name)
{
struct ath9k_eeprom_ctx ec;
- struct ath_hw *ah = ah = sc->sc_ah;
+ struct ath_hw *ah = sc->sc_ah;
int err;
/* try to load the EEPROM content asynchronously */
@@ -667,7 +667,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
- ath_fill_led_pin(sc);
ath_chanctx_init(sc);
ath9k_offchannel_init(sc);
@@ -713,9 +712,9 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
struct ath9k_channel *curchan = ah->curchan;
if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
+ ath9k_init_band_txpower(sc, NL80211_BAND_2GHZ);
if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
+ ath9k_init_band_txpower(sc, NL80211_BAND_5GHZ);
ah->curchan = curchan;
}
@@ -887,11 +886,11 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
sc->ant_tx = hw->wiphy->available_antennas_tx;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &common->sbands[IEEE80211_BAND_2GHZ];
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &common->sbands[NL80211_BAND_2GHZ];
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &common->sbands[IEEE80211_BAND_5GHZ];
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &common->sbands[NL80211_BAND_5GHZ];
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
ath9k_set_mcc_capab(sc, hw);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3aed43a63..8b6398850 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -718,12 +718,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (!ath_complete_reset(sc, false))
ah->reset_power_on = false;
- if (ah->led_pin >= 0) {
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ if (ah->led_pin >= 0)
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 1 : 0);
- }
/*
* Reset key cache to sane defaults (all entries cleared) instead of
@@ -867,11 +864,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
- if (ah->led_pin >= 0) {
+ if (ah->led_pin >= 0)
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 0 : 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
- }
ath_prepare_reset(sc);
@@ -1938,14 +1933,14 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
if (idx == 0)
ath_update_survey_stats(sc);
- sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
- sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
spin_unlock_bh(&common->cc_lock);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index c8d35feba..80ff69f99 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -985,6 +985,10 @@
#define AR_SREV_9561(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
+#define AR_SREV_SOC(_ah) \
+ (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \
+ AR_SREV_9561(ah))
+
/* NOTE: When adding chips newer than Peacock, add chip check here */
#define AR_SREV_9580_10_OR_LATER(_ah) \
(AR_SREV_9580(_ah))
@@ -1104,14 +1108,46 @@ enum {
#define AR_PCIE_PHY_REG3 0x18c08
+/* Define correct GPIO numbers and MASK bits to indicate the WMAC
+ * GPIO resource.
+ * Allow SOC chips(AR9340, AR9531, AR9550, AR9561) to access all GPIOs
+ * which rely on gpiolib framework. But restrict SOC AR9330 only to
+ * access WMAC GPIO which has the same design with the old chips.
+ */
#define AR_NUM_GPIO 14
-#define AR928X_NUM_GPIO 10
+#define AR9280_NUM_GPIO 10
#define AR9285_NUM_GPIO 12
-#define AR9287_NUM_GPIO 11
+#define AR9287_NUM_GPIO 10
#define AR9271_NUM_GPIO 16
-#define AR9300_NUM_GPIO 17
+#define AR9300_NUM_GPIO 16
+#define AR9330_NUM_GPIO 16
+#define AR9340_NUM_GPIO 23
+#define AR9462_NUM_GPIO 14
+#define AR9485_NUM_GPIO 12
+#define AR9531_NUM_GPIO 18
+#define AR9550_NUM_GPIO 24
+#define AR9561_NUM_GPIO 23
+#define AR9565_NUM_GPIO 14
+#define AR9580_NUM_GPIO 16
#define AR7010_NUM_GPIO 16
+#define AR_GPIO_MASK 0x00003FFF
+#define AR9271_GPIO_MASK 0x0000FFFF
+#define AR9280_GPIO_MASK 0x000003FF
+#define AR9285_GPIO_MASK 0x00000FFF
+#define AR9287_GPIO_MASK 0x000003FF
+#define AR9300_GPIO_MASK 0x0000F4FF
+#define AR9330_GPIO_MASK 0x0000F4FF
+#define AR9340_GPIO_MASK 0x0000000F
+#define AR9462_GPIO_MASK 0x00003FFF
+#define AR9485_GPIO_MASK 0x00000FFF
+#define AR9531_GPIO_MASK 0x0000000F
+#define AR9550_GPIO_MASK 0x0000000F
+#define AR9561_GPIO_MASK 0x0000000F
+#define AR9565_GPIO_MASK 0x00003FFF
+#define AR9580_GPIO_MASK 0x0000F4FF
+#define AR7010_GPIO_MASK 0x0000FFFF
+
#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
#define AR_GPIO_IN_VAL 0x0FFFC000
#define AR_GPIO_IN_VAL_S 14
@@ -1132,8 +1168,6 @@ enum {
#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \
(AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
-#define AR_GPIO_OE_OUT_MASK (AR_SREV_9550_OR_LATER(ah) ? \
- 0x0000000F : 0xFFFFFFFF)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
#define AR_GPIO_OE_OUT_DRV_LOW 0x1
@@ -1858,15 +1892,33 @@ enum {
#define AR9300_BT_WGHT 0xcccc4444
-#define AR_BT_COEX_MODE2 0x817c
-#define AR_BT_BCN_MISS_THRESH 0x000000ff
-#define AR_BT_BCN_MISS_THRESH_S 0
-#define AR_BT_BCN_MISS_CNT 0x0000ff00
-#define AR_BT_BCN_MISS_CNT_S 8
-#define AR_BT_HOLD_RX_CLEAR 0x00010000
-#define AR_BT_HOLD_RX_CLEAR_S 16
-#define AR_BT_DISABLE_BT_ANT 0x00100000
-#define AR_BT_DISABLE_BT_ANT_S 20
+#define AR_BT_COEX_MODE2 0x817c
+#define AR_BT_BCN_MISS_THRESH 0x000000ff
+#define AR_BT_BCN_MISS_THRESH_S 0
+#define AR_BT_BCN_MISS_CNT 0x0000ff00
+#define AR_BT_BCN_MISS_CNT_S 8
+#define AR_BT_HOLD_RX_CLEAR 0x00010000
+#define AR_BT_HOLD_RX_CLEAR_S 16
+#define AR_BT_PROTECT_BT_AFTER_WAKEUP 0x00080000
+#define AR_BT_PROTECT_BT_AFTER_WAKEUP_S 19
+#define AR_BT_DISABLE_BT_ANT 0x00100000
+#define AR_BT_DISABLE_BT_ANT_S 20
+#define AR_BT_QUIET_2_WIRE 0x00200000
+#define AR_BT_QUIET_2_WIRE_S 21
+#define AR_BT_WL_ACTIVE_MODE 0x00c00000
+#define AR_BT_WL_ACTIVE_MODE_S 22
+#define AR_BT_WL_TXRX_SEPARATE 0x01000000
+#define AR_BT_WL_TXRX_SEPARATE_S 24
+#define AR_BT_RS_DISCARD_EXTEND 0x02000000
+#define AR_BT_RS_DISCARD_EXTEND_S 25
+#define AR_BT_TSF_BT_ACTIVE_CTRL 0x0c000000
+#define AR_BT_TSF_BT_ACTIVE_CTRL_S 26
+#define AR_BT_TSF_BT_PRIORITY_CTRL 0x30000000
+#define AR_BT_TSF_BT_PRIORITY_CTRL_S 28
+#define AR_BT_INTERRUPT_ENABLE 0x40000000
+#define AR_BT_INTERRUPT_ENABLE_S 30
+#define AR_BT_PHY_ERR_BT_COLL_ENABLE 0x80000000
+#define AR_BT_PHY_ERR_BT_COLL_ENABLE_S 31
#define AR_TXSIFS 0x81d0
#define AR_TXSIFS_TIME 0x000000FF
@@ -1875,6 +1927,16 @@ enum {
#define AR_TXSIFS_ACK_SHIFT 0x00007000
#define AR_TXSIFS_ACK_SHIFT_S 12
+#define AR_BT_COEX_MODE3 0x81d4
+#define AR_BT_WL_ACTIVE_TIME 0x000000ff
+#define AR_BT_WL_ACTIVE_TIME_S 0
+#define AR_BT_WL_QC_TIME 0x0000ff00
+#define AR_BT_WL_QC_TIME_S 8
+#define AR_BT_ALLOW_CONCURRENT_ACCESS 0x000f0000
+#define AR_BT_ALLOW_CONCURRENT_ACCESS_S 16
+#define AR_BT_AGC_SATURATION_CNT_ENABLE 0x00100000
+#define AR_BT_AGC_SATURATION_CNT_ENABLE_S 20
+
#define AR_TXOP_X 0x81ec
#define AR_TXOP_X_VAL 0x000000FF
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index c9cb2aad7..d38e50f96 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -55,11 +55,26 @@ static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
return j << 2;
}
+static u32 ath9k_rng_delay_get(u32 fail_stats)
+{
+ u32 delay;
+
+ if (fail_stats < 100)
+ delay = 10;
+ else if (fail_stats < 105)
+ delay = 1000;
+ else
+ delay = 10000;
+
+ return delay;
+}
+
static int ath9k_rng_kthread(void *data)
{
int bytes_read;
struct ath_softc *sc = data;
u32 *rng_buf;
+ u32 delay, fail_stats = 0;
rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
if (!rng_buf)
@@ -69,10 +84,13 @@ static int ath9k_rng_kthread(void *data)
bytes_read = ath9k_rng_data_read(sc, rng_buf,
ATH9K_RNG_BUF_SIZE);
if (unlikely(!bytes_read)) {
- msleep_interruptible(10);
+ delay = ath9k_rng_delay_get(++fail_stats);
+ msleep_interruptible(delay);
continue;
}
+ fail_stats = 0;
+
/* sleep until entropy bits under write_wakeup_threshold */
add_hwgenerator_randomness((void *)rng_buf, bytes_read,
ATH9K_RNG_ENTROPY(bytes_read));
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fe795fc52..8ddd604bd 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1112,7 +1112,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
bool is_2ghz;
struct modal_eep_header *pmodal;
- is_2ghz = info->band == IEEE80211_BAND_2GHZ;
+ is_2ghz = info->band == NL80211_BAND_2GHZ;
pmodal = &eep->modalHeader[is_2ghz];
power_ht40delta = pmodal->ht40PowerIncForPdadc;
} else {
@@ -1236,7 +1236,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
/* legacy rates */
rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
- if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+ if ((tx_info->band == NL80211_BAND_2GHZ) &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
else
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index a2f005703..7d4a72dc9 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -48,7 +48,7 @@ int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
if (conf_is_ht40(&ar->hw->conf))
val = 0x010a;
else {
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
val = 0x105;
else
val = 0x104;
@@ -66,7 +66,7 @@ int carl9170_set_rts_cts_rate(struct ar9170 *ar)
rts_rate = 0x1da;
cts_rate = 0x10a;
} else {
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
/* 11 mbit CCK */
rts_rate = 033;
cts_rate = 003;
@@ -93,7 +93,7 @@ int carl9170_set_slot_time(struct ar9170 *ar)
return 0;
}
- if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) ||
+ if ((ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) ||
vif->bss_conf.use_short_slot)
slottime = 9;
@@ -120,7 +120,7 @@ int carl9170_set_mac_rates(struct ar9170 *ar)
basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
rcu_read_unlock();
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
else
mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
@@ -512,10 +512,10 @@ int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel)
chains = AR9170_TX_PHY_TXCHAIN_1;
switch (channel->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
power = ar->power_2G_ofdm[0] & 0x3f;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
power = ar->power_5G_leg[0] & 0x3f;
break;
default:
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4d1527a2e..ffb22a04b 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1666,7 +1666,7 @@ static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
return err;
}
- for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+ for (b = 0; b < NUM_NL80211_BANDS; b++) {
band = ar->hw->wiphy->bands[b];
if (!band)
@@ -1941,13 +1941,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
}
if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
- ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
&carl9170_band_2GHz;
chans += carl9170_band_2GHz.n_channels;
bands++;
}
if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
- ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&carl9170_band_5GHz;
chans += carl9170_band_5GHz.n_channels;
bands++;
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index dca6df13f..34d9fd770 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -540,11 +540,11 @@ static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
return carl9170_regwrite_result();
}
-static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
+static int carl9170_init_phy(struct ar9170 *ar, enum nl80211_band band)
{
int i, err;
u32 val;
- bool is_2ghz = band == IEEE80211_BAND_2GHZ;
+ bool is_2ghz = band == NL80211_BAND_2GHZ;
bool is_40mhz = conf_is_ht40(&ar->hw->conf);
carl9170_regwrite_begin(ar);
@@ -1125,13 +1125,13 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
u8 f, tmp;
switch (channel->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
f = channel->center_freq - 2300;
cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
i = AR5416_NUM_2G_CAL_PIERS - 1;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
f = (channel->center_freq - 4800) / 5;
cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
i = AR5416_NUM_5G_CAL_PIERS - 1;
@@ -1158,12 +1158,12 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
int j;
switch (channel->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
cal_pier_data = &ar->eeprom.
cal_pier_data_2G[chain][idx];
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
cal_pier_data = &ar->eeprom.
cal_pier_data_5G[chain][idx];
break;
@@ -1340,7 +1340,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
/* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
return;
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
modes = mode_list_2ghz;
nr_modes = ARRAY_SIZE(mode_list_2ghz);
} else {
@@ -1607,7 +1607,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
return err;
err = carl9170_init_rf_banks_0_7(ar,
- channel->band == IEEE80211_BAND_5GHZ);
+ channel->band == NL80211_BAND_5GHZ);
if (err)
return err;
@@ -1621,7 +1621,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
return err;
err = carl9170_init_rf_bank4_pwr(ar,
- channel->band == IEEE80211_BAND_5GHZ,
+ channel->band == NL80211_BAND_5GHZ,
channel->center_freq, bw);
if (err)
return err;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index d66533cbc..0c34c8729 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -417,7 +417,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar,
return -EINVAL;
}
- if (status->band == IEEE80211_BAND_2GHZ)
+ if (status->band == NL80211_BAND_2GHZ)
status->rate_idx += 4;
break;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index ae86a600d..2bf04c9ed 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -720,12 +720,12 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
/* +1 dBm for HT40 */
*tpc += 2;
- if (info->band == IEEE80211_BAND_2GHZ)
+ if (info->band == NL80211_BAND_2GHZ)
txpower = ar->power_2G_ht40;
else
txpower = ar->power_5G_ht40;
} else {
- if (info->band == IEEE80211_BAND_2GHZ)
+ if (info->band == NL80211_BAND_2GHZ)
txpower = ar->power_2G_ht20;
else
txpower = ar->power_5G_ht20;
@@ -734,7 +734,7 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
*phyrate = txrate->idx;
*tpc += txpower[idx & 7];
} else {
- if (info->band == IEEE80211_BAND_2GHZ) {
+ if (info->band == NL80211_BAND_2GHZ) {
if (idx < 4)
txpower = ar->power_2G_cck;
else
@@ -797,7 +797,7 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
* tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
*/
} else {
- if (info->band == IEEE80211_BAND_2GHZ) {
+ if (info->band == NL80211_BAND_2GHZ) {
if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
else
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 06ea6cc9e..7e15ed9ed 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -336,12 +336,12 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
struct ath_regulatory *reg,
enum nl80211_reg_initiator initiator)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
unsigned int i;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
sband = wiphy->bands[band];
@@ -374,7 +374,7 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
{
struct ieee80211_supported_band *sband;
- sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
if (!sband)
return;
@@ -402,10 +402,10 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
struct ieee80211_channel *ch;
unsigned int i;
- if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+ if (!wiphy->bands[NL80211_BAND_5GHZ])
return;
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
@@ -772,7 +772,7 @@ ath_regd_init(struct ath_regulatory *reg,
EXPORT_SYMBOL(ath_regd_init);
u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
if (!reg->regpair ||
(reg->country_code == CTRY_DEFAULT &&
@@ -794,9 +794,9 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
}
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
return reg->regpair->reg_2ghz_ctl;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
return reg->regpair->reg_5ghz_ctl;
default:
return NO_CTL;
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 37f53bd8f..565d3075f 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -255,7 +255,7 @@ int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request));
u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
- enum ieee80211_band band);
+ enum nl80211_band band);
void ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg);
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index ef44a2da6..2a6bb62e7 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -33,9 +33,7 @@ static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
char buf[3];
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
- vif = container_of((void *)vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type) {
if (vif_priv->pw_state == WCN36XX_BMPS)
buf[0] = '1';
@@ -70,9 +68,7 @@ static ssize_t write_file_bool_bmps(struct file *file,
case 'Y':
case '1':
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
- vif = container_of((void *)vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type) {
wcn36xx_enable_keep_alive_null_packet(wcn, vif);
wcn36xx_pmc_enter_bmps_state(wcn, vif);
@@ -83,9 +79,7 @@ static ssize_t write_file_bool_bmps(struct file *file,
case 'N':
case '0':
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
- vif = container_of((void *)vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type)
wcn36xx_pmc_exit_bmps_state(wcn, vif);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index b947de0fb..658bfb8ba 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -48,12 +48,15 @@
#define WCN36XX_HAL_IPV4_ADDR_LEN 4
-#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_STA_INVALID_IDX 0xFF
#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
/* Default Beacon template size */
#define BEACON_TEMPLATE_SIZE 0x180
+/* Minimum PVM size that the FW expects. See comment in smd.c for details. */
+#define TIM_MIN_PVM_SIZE 6
+
/* Param Change Bitmap sent to HAL */
#define PARAM_BCN_INTERVAL_CHANGED (1 << 0)
#define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1)
@@ -2884,11 +2887,14 @@ struct update_beacon_rsp_msg {
struct wcn36xx_hal_send_beacon_req_msg {
struct wcn36xx_hal_msg_header header;
+ /* length of the template + 6. Only qcom knows why */
+ u32 beacon_length6;
+
/* length of the template. */
u32 beacon_length;
/* Beacon data. */
- u8 beacon[BEACON_TEMPLATE_SIZE];
+ u8 beacon[BEACON_TEMPLATE_SIZE - sizeof(u32)];
u8 bssid[ETH_ALEN];
@@ -4261,9 +4267,9 @@ struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
u8 data_offset;
u32 mc_addr_count;
- u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+ u8 mc_addr[WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS][ETH_ALEN];
u8 bss_index;
-};
+} __packed;
struct wcn36xx_hal_set_pkt_filter_rsp_msg {
struct wcn36xx_hal_msg_header header;
@@ -4317,7 +4323,7 @@ struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
struct wcn36xx_hal_msg_header header;
struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
-};
+} __packed;
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
struct wcn36xx_hal_msg_header header;
@@ -4383,6 +4389,45 @@ enum place_holder_in_cap_bitmap {
RTT = 20,
RATECTRL = 21,
WOW = 22,
+ WLAN_ROAM_SCAN_OFFLOAD = 23,
+ SPECULATIVE_PS_POLL = 24,
+ SCAN_SCH = 25,
+ IBSS_HEARTBEAT_OFFLOAD = 26,
+ WLAN_SCAN_OFFLOAD = 27,
+ WLAN_PERIODIC_TX_PTRN = 28,
+ ADVANCE_TDLS = 29,
+ BATCH_SCAN = 30,
+ FW_IN_TX_PATH = 31,
+ EXTENDED_NSOFFLOAD_SLOT = 32,
+ CH_SWITCH_V1 = 33,
+ HT40_OBSS_SCAN = 34,
+ UPDATE_CHANNEL_LIST = 35,
+ WLAN_MCADDR_FLT = 36,
+ WLAN_CH144 = 37,
+ NAN = 38,
+ TDLS_SCAN_COEXISTENCE = 39,
+ LINK_LAYER_STATS_MEAS = 40,
+ MU_MIMO = 41,
+ EXTENDED_SCAN = 42,
+ DYNAMIC_WMM_PS = 43,
+ MAC_SPOOFED_SCAN = 44,
+ BMU_ERROR_GENERIC_RECOVERY = 45,
+ DISA = 46,
+ FW_STATS = 47,
+ WPS_PRBRSP_TMPL = 48,
+ BCN_IE_FLT_DELTA = 49,
+ TDLS_OFF_CHANNEL = 51,
+ RTT3 = 52,
+ MGMT_FRAME_LOGGING = 53,
+ ENHANCED_TXBD_COMPLETION = 54,
+ LOGGING_ENHANCEMENT = 55,
+ EXT_SCAN_ENHANCED = 56,
+ MEMORY_DUMP_SUPPORTED = 57,
+ PER_PKT_STATS_SUPPORTED = 58,
+ EXT_LL_STAT = 60,
+ WIFI_CONFIG = 61,
+ ANTENNA_DIVERSITY_SELECTION = 62,
+
MAX_FEATURE_SUPPORTED = 128,
};
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 27f9852e7..8e2c5ff7a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -26,14 +26,14 @@ module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
#define CHAN2G(_freq, _idx) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 25, \
}
#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 25, \
@@ -201,7 +201,45 @@ static const char * const wcn36xx_caps_names[] = {
"BCN_FILTER", /* 19 */
"RTT", /* 20 */
"RATECTRL", /* 21 */
- "WOW" /* 22 */
+ "WOW", /* 22 */
+ "WLAN_ROAM_SCAN_OFFLOAD", /* 23 */
+ "SPECULATIVE_PS_POLL", /* 24 */
+ "SCAN_SCH", /* 25 */
+ "IBSS_HEARTBEAT_OFFLOAD", /* 26 */
+ "WLAN_SCAN_OFFLOAD", /* 27 */
+ "WLAN_PERIODIC_TX_PTRN", /* 28 */
+ "ADVANCE_TDLS", /* 29 */
+ "BATCH_SCAN", /* 30 */
+ "FW_IN_TX_PATH", /* 31 */
+ "EXTENDED_NSOFFLOAD_SLOT", /* 32 */
+ "CH_SWITCH_V1", /* 33 */
+ "HT40_OBSS_SCAN", /* 34 */
+ "UPDATE_CHANNEL_LIST", /* 35 */
+ "WLAN_MCADDR_FLT", /* 36 */
+ "WLAN_CH144", /* 37 */
+ "NAN", /* 38 */
+ "TDLS_SCAN_COEXISTENCE", /* 39 */
+ "LINK_LAYER_STATS_MEAS", /* 40 */
+ "MU_MIMO", /* 41 */
+ "EXTENDED_SCAN", /* 42 */
+ "DYNAMIC_WMM_PS", /* 43 */
+ "MAC_SPOOFED_SCAN", /* 44 */
+ "BMU_ERROR_GENERIC_RECOVERY", /* 45 */
+ "DISA", /* 46 */
+ "FW_STATS", /* 47 */
+ "WPS_PRBRSP_TMPL", /* 48 */
+ "BCN_IE_FLT_DELTA", /* 49 */
+ "TDLS_OFF_CHANNEL", /* 51 */
+ "RTT3", /* 52 */
+ "MGMT_FRAME_LOGGING", /* 53 */
+ "ENHANCED_TXBD_COMPLETION", /* 54 */
+ "LOGGING_ENHANCEMENT", /* 55 */
+ "EXT_SCAN_ENHANCED", /* 56 */
+ "MEMORY_DUMP_SUPPORTED", /* 57 */
+ "PER_PKT_STATS_SUPPORTED", /* 58 */
+ "EXT_LL_STAT", /* 60 */
+ "WIFI_CONFIG", /* 61 */
+ "ANTENNA_DIVERSITY_SELECTION", /* 62 */
};
static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
@@ -287,6 +325,7 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
}
wcn36xx_detect_chip_version(wcn);
+ wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST, 1);
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
@@ -346,9 +385,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
ch);
list_for_each_entry(tmp, &wcn->vif_list, list) {
- vif = container_of((void *)tmp,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(tmp);
wcn36xx_smd_switch_channel(wcn, vif, ch);
}
}
@@ -356,15 +393,57 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
return 0;
}
-#define WCN36XX_SUPPORTED_FILTERS (0)
-
static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *total, u64 multicast)
{
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *tmp;
+ struct ieee80211_vif *vif = NULL;
+
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
- *total &= WCN36XX_SUPPORTED_FILTERS;
+ *total &= FIF_ALLMULTI;
+
+ fp = (void *)(unsigned long)multicast;
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+
+ /* FW handles MC filtering only when connected as STA */
+ if (*total & FIF_ALLMULTI)
+ wcn36xx_smd_set_mc_list(wcn, vif, NULL);
+ else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc)
+ wcn36xx_smd_set_mc_list(wcn, vif, fp);
+ }
+ kfree(fp);
+}
+
+static u64 wcn36xx_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
+ struct netdev_hw_addr *ha;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac prepare multicast list\n");
+ fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+ if (!fp) {
+ wcn36xx_err("Out of memory setting filters.\n");
+ return 0;
+ }
+
+ fp->mc_addr_count = 0;
+ /* update multicast filtering parameters */
+ if (netdev_hw_addr_list_count(mc_list) <=
+ WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS) {
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ memcpy(fp->mc_addr[fp->mc_addr_count],
+ ha->addr, ETH_ALEN);
+ fp->mc_addr_count++;
+ }
+ }
+
+ return (u64)(unsigned long)fp;
}
static void wcn36xx_tx(struct ieee80211_hw *hw,
@@ -375,7 +454,7 @@ static void wcn36xx_tx(struct ieee80211_hw *hw,
struct wcn36xx_sta *sta_priv = NULL;
if (control->sta)
- sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+ sta_priv = wcn36xx_sta_to_priv(control->sta);
if (wcn36xx_start_tx(wcn, sta_priv, skb))
ieee80211_free_txskb(wcn->hw, skb);
@@ -387,8 +466,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
- struct wcn36xx_sta *sta_priv = vif_priv->sta;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
int ret = 0;
u8 key[WLAN_MAX_KEY_LEN];
@@ -473,6 +552,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case DISABLE_KEY:
if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
wcn36xx_smd_remove_bsskey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx);
@@ -516,11 +596,11 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
}
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
int i, size;
u16 *rates_table;
- struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
u32 rates = sta->supp_rates[band];
memset(&sta_priv->supported_rates, 0,
@@ -529,7 +609,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
rates_table = sta_priv->supported_rates.dsss_rates;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
for (i = 0; i < size; i++) {
if (rates & 0x01) {
rates_table[i] = wcn_2ghz_rates[i].hw_value;
@@ -590,7 +670,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
struct sk_buff *skb = NULL;
u16 tim_off, tim_len;
enum wcn36xx_hal_link_state link_state;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
vif, changed);
@@ -620,7 +700,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
if (!is_zero_ether_addr(bss_conf->bssid)) {
vif_priv->is_joining = true;
- vif_priv->bss_index = 0xff;
+ vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
wcn36xx_smd_join(wcn, bss_conf->bssid,
vif->addr, WCN36XX_HW_CHANNEL(wcn));
wcn36xx_smd_config_bss(wcn, vif, NULL,
@@ -628,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
} else {
vif_priv->is_joining = false;
wcn36xx_smd_delete_bss(wcn, vif);
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
}
}
@@ -655,6 +736,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif->addr,
bss_conf->aid);
+ vif_priv->sta_assoc = true;
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!sta) {
@@ -663,7 +745,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
goto out;
}
- sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
@@ -686,6 +768,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid,
vif->addr,
bss_conf->aid);
+ vif_priv->sta_assoc = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
vif->addr,
@@ -713,7 +796,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
if (bss_conf->enable_beacon) {
vif_priv->dtim_period = bss_conf->dtim_period;
- vif_priv->bss_index = 0xff;
+ vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
@@ -734,9 +817,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
link_state);
} else {
+ wcn36xx_smd_delete_bss(wcn, vif);
wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
WCN36XX_HAL_LINK_IDLE_STATE);
- wcn36xx_smd_delete_bss(wcn, vif);
}
}
out:
@@ -757,7 +840,7 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
list_del(&vif_priv->list);
@@ -768,7 +851,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
vif, vif->type);
@@ -792,13 +875,12 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
- struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
vif, sta->addr);
spin_lock_init(&sta_priv->ampdu_lock);
- vif_priv->sta = sta_priv;
sta_priv->vif = vif_priv;
/*
* For STA mode HW will be configured on BSS_CHANGED_ASSOC because
@@ -817,14 +899,12 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
- struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
vif, sta->addr, sta_priv->sta_index);
wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
- vif_priv->sta = NULL;
sta_priv->vif = NULL;
return 0;
}
@@ -860,7 +940,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_ampdu_params *params)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_sta *sta_priv = NULL;
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(params->sta);
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
@@ -869,8 +949,6 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
- sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
-
switch (action) {
case IEEE80211_AMPDU_RX_START:
sta_priv->tid = tid;
@@ -923,6 +1001,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.resume = wcn36xx_resume,
#endif
.config = wcn36xx_config,
+ .prepare_multicast = wcn36xx_prepare_multicast,
.configure_filter = wcn36xx_configure_filter,
.tx = wcn36xx_tx,
.set_key = wcn36xx_set_key,
@@ -958,8 +1037,8 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
- wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
- wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+ wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
+ wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
wcn->hw->wiphy->cipher_suites = cipher_suites;
wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 28b515c81..589fe5f70 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -22,7 +22,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
int ret = 0;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
/* TODO: Make sure the TX chain clean */
ret = wcn36xx_smd_enter_bmps(wcn, vif);
if (!ret) {
@@ -42,7 +42,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (WCN36XX_BMPS != vif_priv->pw_state) {
wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 936938c74..06c5ddcd3 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -104,11 +104,11 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params *bss_params)
{
- if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+ if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn))
bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
else if (sta && sta->ht_cap.ht_supported)
bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
- else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+ else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f))
bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
else
bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
@@ -191,16 +191,16 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params *sta_params)
{
- struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
- struct wcn36xx_sta *priv_sta = NULL;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_sta *sta_priv = NULL;
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
sta_params->type = 1;
- sta_params->sta_index = 0xFF;
+ sta_params->sta_index = WCN36XX_HAL_STA_INVALID_IDX;
} else {
sta_params->type = 0;
- sta_params->sta_index = 1;
+ sta_params->sta_index = vif_priv->self_sta_index;
}
sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
@@ -215,7 +215,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
else
memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
- sta_params->encrypt_type = priv_vif->encrypt_type;
+ sta_params->encrypt_type = vif_priv->encrypt_type;
sta_params->short_preamble_supported = true;
sta_params->rifs_mode = 0;
@@ -224,21 +224,21 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
sta_params->uapsd = 0;
sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
sta_params->max_ampdu_duration = 0;
- sta_params->bssid_index = priv_vif->bss_index;
+ sta_params->bssid_index = vif_priv->bss_index;
sta_params->p2p = 0;
if (sta) {
- priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+ sta_priv = wcn36xx_sta_to_priv(sta);
if (NL80211_IFTYPE_STATION == vif->type)
memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
else
memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
sta_params->wmm_enabled = sta->wme;
sta_params->max_sp_len = sta->max_sp;
- sta_params->aid = priv_sta->aid;
+ sta_params->aid = sta_priv->aid;
wcn36xx_smd_set_sta_ht_params(sta, sta_params);
- memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
- sizeof(priv_sta->supported_rates));
+ memcpy(&sta_params->supported_rates, &sta_priv->supported_rates,
+ sizeof(sta_priv->supported_rates));
} else {
wcn36xx_set_default_rates(&sta_params->supported_rates);
wcn36xx_smd_set_sta_default_ht_params(sta_params);
@@ -271,6 +271,16 @@ out:
return ret;
}
+static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
+ enum wcn36xx_hal_host_msg_type msg_type,
+ size_t msg_size)
+{
+ memset(hdr, 0, msg_size + sizeof(*hdr));
+ hdr->msg_type = msg_type;
+ hdr->msg_version = WCN36XX_HAL_MSG_VERSION0;
+ hdr->len = msg_size + sizeof(*hdr);
+}
+
#define INIT_HAL_MSG(msg_body, type) \
do { \
memset(&msg_body, 0, sizeof(msg_body)); \
@@ -302,22 +312,6 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
return 0;
}
-static int wcn36xx_smd_rsp_status_check_v2(struct wcn36xx *wcn, void *buf,
- size_t len)
-{
- struct wcn36xx_fw_msg_status_rsp_v2 *rsp;
-
- if (len < sizeof(struct wcn36xx_hal_msg_header) + sizeof(*rsp))
- return wcn36xx_smd_rsp_status_check(buf, len);
-
- rsp = buf + sizeof(struct wcn36xx_hal_msg_header);
-
- if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
- return rsp->status;
-
- return 0;
-}
-
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
struct nv_data *nv_d;
@@ -726,7 +720,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
size_t len)
{
struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
- struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (len < sizeof(*rsp))
return -EINVAL;
@@ -743,8 +737,8 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
"hal add sta self status %d self_sta_index %d dpu_index %d\n",
rsp->status, rsp->self_sta_index, rsp->dpu_index);
- priv_vif->self_sta_index = rsp->self_sta_index;
- priv_vif->self_dpu_desc_index = rsp->dpu_index;
+ vif_priv->self_sta_index = rsp->self_sta_index;
+ vif_priv->self_dpu_desc_index = rsp->dpu_index;
return 0;
}
@@ -949,17 +943,32 @@ static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
memcpy(&v1->mac, orig->mac, ETH_ALEN);
v1->aid = orig->aid;
v1->type = orig->type;
+ v1->short_preamble_supported = orig->short_preamble_supported;
v1->listen_interval = orig->listen_interval;
+ v1->wmm_enabled = orig->wmm_enabled;
v1->ht_capable = orig->ht_capable;
-
+ v1->tx_channel_width_set = orig->tx_channel_width_set;
+ v1->rifs_mode = orig->rifs_mode;
+ v1->lsig_txop_protection = orig->lsig_txop_protection;
v1->max_ampdu_size = orig->max_ampdu_size;
v1->max_ampdu_density = orig->max_ampdu_density;
v1->sgi_40mhz = orig->sgi_40mhz;
v1->sgi_20Mhz = orig->sgi_20Mhz;
-
+ v1->rmf = orig->rmf;
+ v1->encrypt_type = orig->encrypt_type;
+ v1->action = orig->action;
+ v1->uapsd = orig->uapsd;
+ v1->max_sp_len = orig->max_sp_len;
+ v1->green_field_capable = orig->green_field_capable;
+ v1->mimo_ps = orig->mimo_ps;
+ v1->delayed_ba_support = orig->delayed_ba_support;
+ v1->max_ampdu_duration = orig->max_ampdu_duration;
+ v1->dsss_cck_mode_40mhz = orig->dsss_cck_mode_40mhz;
memcpy(&v1->supported_rates, &orig->supported_rates,
sizeof(orig->supported_rates));
v1->sta_index = orig->sta_index;
+ v1->bssid_index = orig->bssid_index;
+ v1->p2p = orig->p2p;
}
static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
@@ -969,7 +978,7 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
{
struct wcn36xx_hal_config_sta_rsp_msg *rsp;
struct config_sta_rsp_params *params;
- struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
if (len < sizeof(*rsp))
return -EINVAL;
@@ -1170,12 +1179,13 @@ static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
void *buf,
size_t len)
{
struct wcn36xx_hal_config_bss_rsp_msg *rsp;
struct wcn36xx_hal_config_bss_rsp_params *params;
- struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (len < sizeof(*rsp))
return -EINVAL;
@@ -1198,14 +1208,15 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
params->bss_bcast_sta_idx, params->mac,
params->tx_mgmt_power, params->ucast_dpu_signature);
- priv_vif->bss_index = params->bss_index;
+ vif_priv->bss_index = params->bss_index;
- if (priv_vif->sta) {
- priv_vif->sta->bss_sta_index = params->bss_sta_index;
- priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+ if (sta) {
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+ sta_priv->bss_sta_index = params->bss_sta_index;
+ sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
}
- priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
+ vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
return 0;
}
@@ -1217,7 +1228,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct wcn36xx_hal_config_bss_req_msg msg;
struct wcn36xx_hal_config_bss_params *bss;
struct wcn36xx_hal_config_sta_params *sta_params;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1329,6 +1340,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
}
ret = wcn36xx_smd_config_bss_rsp(wcn,
vif,
+ sta,
wcn->hal_buf,
wcn->hal_rsp_len);
if (ret) {
@@ -1343,13 +1355,13 @@ out:
int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_delete_bss_req_msg msg_body;
- struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
- msg_body.bss_index = priv_vif->bss_index;
+ msg_body.bss_index = vif_priv->bss_index;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1375,26 +1387,47 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
u16 p2p_off)
{
struct wcn36xx_hal_send_beacon_req_msg msg_body;
- int ret = 0;
+ int ret = 0, pad, pvm_len;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
- /* TODO need to find out why this is needed? */
- msg_body.beacon_length = skb_beacon->len + 6;
+ pvm_len = skb_beacon->data[tim_off + 1] - 3;
+ pad = TIM_MIN_PVM_SIZE - pvm_len;
- if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
- memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
- memcpy(&(msg_body.beacon[4]), skb_beacon->data,
- skb_beacon->len);
- } else {
+ /* Padding is irrelevant to mesh mode since tim_off is always 0. */
+ if (vif->type == NL80211_IFTYPE_MESH_POINT)
+ pad = 0;
+
+ msg_body.beacon_length = skb_beacon->len + pad;
+ /* TODO need to find out why + 6 is needed */
+ msg_body.beacon_length6 = msg_body.beacon_length + 6;
+
+ if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) {
wcn36xx_err("Beacon is to big: beacon size=%d\n",
msg_body.beacon_length);
ret = -ENOMEM;
goto out;
}
+ memcpy(msg_body.beacon, skb_beacon->data, skb_beacon->len);
memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+ if (pad > 0) {
+ /*
+ * The wcn36xx FW has a fixed size for the PVM in the TIM. If
+ * given the beacon template from mac80211 with a PVM shorter
+ * than the FW expectes it will overwrite the data after the
+ * TIM.
+ */
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "Pad TIM PVM. %d bytes at %d\n",
+ pad, pvm_len);
+ memmove(&msg_body.beacon[tim_off + 5 + pvm_len + pad],
+ &msg_body.beacon[tim_off + 5 + pvm_len],
+ skb_beacon->len - (tim_off + 5 + pvm_len));
+ memset(&msg_body.beacon[tim_off + 5 + pvm_len], 0, pad);
+ msg_body.beacon[tim_off + 1] += pad;
+ }
+
/* TODO need to find out why this is needed? */
if (vif->type == NL80211_IFTYPE_MESH_POINT)
/* mesh beacon don't need this, so push further down */
@@ -1598,8 +1631,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
wcn36xx_err("Sending hal_remove_bsskey failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
- wcn->hal_rsp_len);
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
goto out;
@@ -1612,7 +1644,7 @@ out:
int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_enter_bmps_req_msg msg_body;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1641,8 +1673,8 @@ out:
int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
- struct wcn36xx_hal_enter_bmps_req_msg msg_body;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_hal_exit_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1703,7 +1735,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int packet_type)
{
struct wcn36xx_hal_keep_alive_req_msg msg_body;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1944,6 +1976,17 @@ out:
return ret;
}
+static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len)
+{
+ struct wcn36xx_hal_trigger_ba_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf;
+ return rsp->status;
+}
+
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
@@ -1968,8 +2011,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
wcn36xx_err("Sending hal_trigger_ba failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
- wcn->hal_rsp_len);
+ ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
goto out;
@@ -2006,9 +2048,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
list_for_each_entry(tmp, &wcn->vif_list, list) {
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
tmp->bss_index);
- vif = container_of((void *)tmp,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(tmp);
ieee80211_connection_loss(vif);
}
return 0;
@@ -2023,9 +2063,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
if (tmp->bss_index == rsp->bss_index) {
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
rsp->bss_index);
- vif = container_of((void *)tmp,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(tmp);
ieee80211_connection_loss(vif);
return 0;
}
@@ -2041,25 +2079,24 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
{
struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
struct wcn36xx_vif *tmp;
- struct ieee80211_sta *sta = NULL;
+ struct ieee80211_sta *sta;
if (len != sizeof(*rsp)) {
wcn36xx_warn("Corrupted delete sta indication\n");
return -EIO;
}
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n",
+ rsp->addr2, rsp->sta_id);
+
list_for_each_entry(tmp, &wcn->vif_list, list) {
- if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
- sta = container_of((void *)tmp->sta,
- struct ieee80211_sta,
- drv_priv);
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "delete station indication %pM index %d\n",
- rsp->addr2,
- rsp->sta_id);
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2);
+ if (sta)
ieee80211_report_low_ack(sta, 0);
+ rcu_read_unlock();
+ if (sta)
return 0;
- }
}
wcn36xx_warn("STA with addr %pM and index %d not found\n",
@@ -2100,6 +2137,46 @@ out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
+
+int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
+ wcn->hal_buf;
+ init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ,
+ sizeof(msg_body->mc_addr_list));
+
+ /* An empty list means all mc traffic will be received */
+ if (fp)
+ memcpy(&msg_body->mc_addr_list, fp,
+ sizeof(msg_body->mc_addr_list));
+ else
+ msg_body->mc_addr_list.mc_addr_count = 0;
+
+ msg_body->mc_addr_list.bss_index = vif_priv->bss_index;
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending HAL_8023_MULTICAST_LIST failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("HAL_8023_MULTICAST_LIST rsp failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
{
struct wcn36xx_hal_msg_header *msg_header = buf;
@@ -2141,6 +2218,7 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
case WCN36XX_HAL_CH_SWITCH_RSP:
case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+ case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 8361f9e39..d93e3fd73 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -24,7 +24,7 @@
#define WCN36XX_HAL_BUF_SIZE 4096
-#define HAL_MSG_TIMEOUT 500
+#define HAL_MSG_TIMEOUT 10000
#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
/* The PNO version info be contained in the rsp msg */
@@ -44,15 +44,6 @@ struct wcn36xx_fw_msg_status_rsp {
u32 status;
} __packed;
-/* wcn3620 returns this for tigger_ba */
-
-struct wcn36xx_fw_msg_status_rsp_v2 {
- u8 bss_id[6];
- u32 status __packed;
- u16 count_following_candidates __packed;
- /* candidate list follows */
-};
-
struct wcn36xx_hal_ind_msg {
struct list_head list;
u8 *msg;
@@ -136,4 +127,7 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp);
#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 9bec82372..1f34c2e91 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,7 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
@@ -102,9 +102,7 @@ static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_vif *vif = NULL;
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
- vif = container_of((void *)vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(vif_priv);
if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
return vif_priv;
}
@@ -167,9 +165,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
*/
if (sta_priv) {
__vif_priv = sta_priv->vif;
- vif = container_of((void *)__vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(__vif_priv);
bd->dpu_sign = sta_priv->ucast_dpu_sign;
if (vif->type == NL80211_IFTYPE_STATION) {
@@ -225,7 +221,7 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
/* default rate for unicast */
if (ieee80211_is_mgmt(hdr->frame_control))
- bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+ bd->bd_rate = (WCN36XX_BAND(wcn) == NL80211_BAND_5GHZ) ?
WCN36XX_BD_RATE_CTRL :
WCN36XX_BD_RATE_MGMT;
else if (ieee80211_is_ctl(hdr->frame_control))
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 0555c9502..051fb3338 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -125,10 +125,10 @@ struct wcn36xx_platform_ctrl_ops {
*/
struct wcn36xx_vif {
struct list_head list;
- struct wcn36xx_sta *sta;
u8 dtim_period;
enum ani_ed_type encrypt_type;
bool is_joining;
+ bool sta_assoc;
struct wcn36xx_hal_mac_ssid ssid;
/* Power management */
@@ -263,4 +263,22 @@ struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
return container_of((void *)sta_priv, struct ieee80211_sta, drv_priv);
}
+static inline
+struct wcn36xx_vif *wcn36xx_vif_to_priv(struct ieee80211_vif *vif)
+{
+ return (struct wcn36xx_vif *) vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wcn36xx_priv_to_vif(struct wcn36xx_vif *vif_priv)
+{
+ return container_of((void *) vif_priv, struct ieee80211_vif, drv_priv);
+}
+
+static inline
+struct wcn36xx_sta *wcn36xx_sta_to_priv(struct ieee80211_sta *sta)
+{
+ return (struct wcn36xx_sta *)sta->drv_priv;
+}
+
#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index fdf63d5fe..11b544b26 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -18,6 +18,7 @@ wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
wil6210-y += ethtool.o
wil6210-y += wil_crash_dump.o
+wil6210-y += p2p.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 11f1bb8df..576981129 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -18,8 +18,10 @@
#include "wil6210.h"
#include "wmi.h"
+#define WIL_MAX_ROC_DURATION_MS 5000
+
#define CHAN60G(_channel, _flags) { \
- .band = IEEE80211_BAND_60GHZ, \
+ .band = NL80211_BAND_60GHZ, \
.center_freq = 56160 + (2160 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -76,12 +78,24 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
+ [NL80211_IFTYPE_P2P_DEVICE] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
};
static const u32 wil_cipher_suites[] = {
WLAN_CIPHER_SUITE_GCMP,
};
+static const char * const key_usage_str[] = {
+ [WMI_KEY_USE_PAIRWISE] = "PTK",
+ [WMI_KEY_USE_RX_GROUP] = "RX_GTK",
+ [WMI_KEY_USE_TX_GROUP] = "TX_GTK",
+};
+
int wil_iftype_nl2wmi(enum nl80211_iftype type)
{
static const struct {
@@ -113,7 +127,7 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
.interval_usec = 0,
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_notify_req_done_event evt;
} __packed reply;
struct wil_net_stats *stats = &wil->sta[cid].stats;
@@ -226,13 +240,82 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
return rc;
}
+static struct wireless_dev *
+wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ u32 *flags, struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *p2p_wdev;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (type != NL80211_IFTYPE_P2P_DEVICE) {
+ wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (wil->p2p_wdev) {
+ wil_err(wil, "%s: P2P_DEVICE interface already created\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL);
+ if (!p2p_wdev)
+ return ERR_PTR(-ENOMEM);
+
+ p2p_wdev->iftype = type;
+ p2p_wdev->wiphy = wiphy;
+ /* use our primary ethernet address */
+ ether_addr_copy(p2p_wdev->address, ndev->perm_addr);
+
+ wil->p2p_wdev = p2p_wdev;
+
+ return p2p_wdev;
+}
+
+static int wil_cfg80211_del_iface(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (wdev != wil->p2p_wdev) {
+ wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
+ __func__, wdev);
+ return -EINVAL;
+ }
+
+ wil_p2p_wdev_free(wil);
+
+ return 0;
+}
+
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil->wdev;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ int rc;
+
+ wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
+
+ if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
+ wil_dbg_misc(wil, "interface is up. resetting...\n");
+ mutex_lock(&wil->mutex);
+ __wil_down(wil);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ if (rc)
+ return rc;
+ }
switch (type) {
case NL80211_IFTYPE_STATION:
@@ -260,7 +343,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil->wdev;
+ struct wireless_dev *wdev = request->wdev;
struct {
struct wmi_start_scan_cmd cmd;
u16 chnl[4];
@@ -268,6 +351,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
uint i, n;
int rc;
+ wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
+ __func__, wdev, wdev->iftype);
+
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
return -EAGAIN;
@@ -277,6 +363,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_DEVICE:
break;
default:
return -EOPNOTSUPP;
@@ -288,6 +375,21 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
+ /* social scan on P2P_DEVICE is handled as p2p search */
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
+ wil_p2p_is_social_scan(request)) {
+ wil->scan_request = request;
+ wil->radio_wdev = wdev;
+ rc = wil_p2p_search(wil, request);
+ if (rc) {
+ wil->radio_wdev = wil_to_wdev(wil);
+ wil->scan_request = NULL;
+ }
+ return rc;
+ }
+
+ (void)wil_p2p_stop_discovery(wil);
+
wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
@@ -313,6 +415,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
cmd.cmd.num_channels = 0;
n = min(request->n_channels, 4U);
for (i = 0; i < n; i++) {
@@ -340,12 +443,19 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
if (rc)
goto out;
+ if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
+ cmd.cmd.discovery_mode = 1;
+ wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
+ }
+
+ wil->radio_wdev = wdev;
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
out:
if (rc) {
del_timer_sync(&wil->scan_timer);
+ wil->radio_wdev = wil_to_wdev(wil);
wil->scan_request = NULL;
}
@@ -390,6 +500,7 @@ static void wil_print_connect_params(struct wil6210_priv *wil,
print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
16, 1, sme->ssid, sme->ssid_len, true);
wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
+ wil_info(wil, " PBSS: %d\n", sme->pbss);
wil_print_crypto(wil, &sme->crypto);
}
@@ -404,7 +515,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
const u8 *rsn_eid;
int ch;
int rc = 0;
+ enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
+ wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_connect_params(wil, sme);
if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -422,14 +535,12 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
if (sme->privacy && !rsn_eid)
wil_info(wil, "WSC connection\n");
- if (sme->pbss) {
- wil_err(wil, "connect - PBSS not yet supported\n");
- return -EOPNOTSUPP;
- }
+ if (sme->pbss)
+ bss_type = IEEE80211_BSS_TYPE_PBSS;
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
- IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+ bss_type, IEEE80211_PRIVACY_ANY);
if (!bss) {
wil_err(wil, "Unable to find BSS\n");
return -ENOENT;
@@ -568,10 +679,20 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_mgmt *mgmt_frame = (void *)buf;
struct wmi_sw_tx_req_cmd *cmd;
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_sw_tx_complete_event evt;
} __packed evt;
+ /* Note, currently we do not support the "wait" parameter, user-space
+ * must call remain_on_channel before mgmt_tx or listen on a channel
+ * another way (AP/PCP or connected station)
+ * in addition we need to check if specified "chan" argument is
+ * different from currently "listened" channel and fail if it is.
+ */
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+ print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
+
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
@@ -598,7 +719,7 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil->wdev;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
wdev->preset_chandef = *chandef;
@@ -608,22 +729,19 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
bool pairwise)
{
- struct wireless_dev *wdev = wil->wdev;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
enum wmi_key_usage rc;
- static const char * const key_usage_str[] = {
- [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE",
- [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP",
- [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP",
- };
if (pairwise) {
rc = WMI_KEY_USE_PAIRWISE;
} else {
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
rc = WMI_KEY_USE_RX_GROUP;
break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
rc = WMI_KEY_USE_TX_GROUP;
break;
default:
@@ -638,20 +756,86 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
return rc;
}
+static struct wil_tid_crypto_rx_single *
+wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
+ enum wmi_key_usage key_usage, const u8 *mac_addr)
+{
+ int cid = -EINVAL;
+ int tid = 0;
+ struct wil_sta_info *s;
+ struct wil_tid_crypto_rx *c;
+
+ if (key_usage == WMI_KEY_USE_TX_GROUP)
+ return NULL; /* not needed */
+
+ /* supplicant provides Rx group key in STA mode with NULL MAC address */
+ if (mac_addr)
+ cid = wil_find_cid(wil, mac_addr);
+ else if (key_usage == WMI_KEY_USE_RX_GROUP)
+ cid = wil_find_cid_by_idx(wil, 0);
+ if (cid < 0) {
+ wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
+ key_usage_str[key_usage], key_index);
+ return ERR_PTR(cid);
+ }
+
+ s = &wil->sta[cid];
+ if (key_usage == WMI_KEY_USE_PAIRWISE)
+ c = &s->tid_crypto_rx[tid];
+ else
+ c = &s->group_crypto_rx;
+
+ return &c->key_id[key_index];
+}
+
static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
{
+ int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
+ struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
+ key_index,
+ key_usage,
+ mac_addr);
+
+ wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
+ mac_addr, key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+
+ if (IS_ERR(cc)) {
+ wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
+ __func__, mac_addr, key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+ return -EINVAL;
+ }
- wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
- pairwise ? "PTK" : "GTK");
+ if (cc)
+ cc->key_set = false;
+
+ if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
+ wil_err(wil,
+ "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
+ params->seq_len, __func__, mac_addr,
+ key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+ return -EINVAL;
+ }
+
+ rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
+ params->key, key_usage);
+ if ((rc == 0) && cc) {
+ if (params->seq)
+ memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ }
- return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
- params->key, key_usage);
+ return rc;
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -661,9 +845,20 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
+ struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
+ key_index,
+ key_usage,
+ mac_addr);
+
+ wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
+ key_usage_str[key_usage], key_index);
+
+ if (IS_ERR(cc))
+ wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
+ mac_addr, key_usage_str[key_usage], key_index);
- wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
- pairwise ? "PTK" : "GTK");
+ if (!IS_ERR_OR_NULL(cc))
+ cc->key_set = false;
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
@@ -674,6 +869,9 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
u8 key_index, bool unicast,
bool multicast)
{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s: entered\n", __func__);
return 0;
}
@@ -686,16 +884,19 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
- /* TODO: handle duration */
- wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
+ wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
+ __func__, chan->center_freq, duration, wdev->iftype);
- rc = wmi_set_channel(wil, chan->hw_value);
+ rc = wil_p2p_listen(wil, duration, chan, cookie);
if (rc)
return rc;
- rc = wmi_rxon(wil, true);
+ wil->radio_wdev = wdev;
- return rc;
+ cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
+ GFP_KERNEL);
+
+ return 0;
}
static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -703,13 +904,10 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
u64 cookie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- int rc;
-
- wil_info(wil, "%s()\n", __func__);
- rc = wmi_rxon(wil, false);
+ wil_dbg_misc(wil, "%s()\n", __func__);
- return rc;
+ return wil_p2p_cancel_listen(wil, cookie);
}
/**
@@ -852,12 +1050,22 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
const u8 *ssid, size_t ssid_len, u32 privacy,
int bi, u8 chan,
struct cfg80211_beacon_data *bcon,
- u8 hidden_ssid)
+ u8 hidden_ssid, u32 pbss)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+ u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
+
+ if (pbss)
+ wmi_nettype = WMI_NETTYPE_P2P;
+
+ wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
+ if (is_go && !pbss) {
+ wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
+ return -ENOTSUPP;
+ }
wil_set_recovery_state(wil, fw_recovery_idle);
@@ -879,10 +1087,11 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
wil->privacy = privacy;
wil->channel = chan;
wil->hidden_ssid = hidden_ssid;
+ wil->pbss = pbss;
netif_carrier_on(ndev);
- rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+ rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
if (rc)
goto err_pcp_start;
@@ -928,7 +1137,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
wdev->ssid_len, privacy,
wdev->beacon_interval,
wil->channel, bcon,
- wil->hidden_ssid);
+ wil->hidden_ssid,
+ wil->pbss);
} else {
rc = _wil_cfg80211_set_ies(wiphy, bcon);
}
@@ -954,11 +1164,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
- if (info->pbss) {
- wil_err(wil, "AP: PBSS not yet supported\n");
- return -EOPNOTSUPP;
- }
-
switch (info->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
@@ -984,6 +1189,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
info->hidden_ssid);
wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
info->dtim_period);
+ wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
info->ssid, info->ssid_len);
wil_print_bcon_data(bcon);
@@ -992,7 +1198,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
- bcon, hidden_ssid);
+ bcon, hidden_ssid, info->pbss);
return rc;
}
@@ -1139,7 +1345,26 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy,
return 0;
}
+static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s: entered\n", __func__);
+ return 0;
+}
+
+static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s: entered\n", __func__);
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
+ .add_virtual_intf = wil_cfg80211_add_iface,
+ .del_virtual_intf = wil_cfg80211_del_iface,
.scan = wil_cfg80211_scan,
.connect = wil_cfg80211_connect,
.disconnect = wil_cfg80211_disconnect,
@@ -1160,20 +1385,25 @@ static struct cfg80211_ops wil_cfg80211_ops = {
.del_station = wil_cfg80211_del_station,
.probe_client = wil_cfg80211_probe_client,
.change_bss = wil_cfg80211_change_bss,
+ /* P2P device */
+ .start_p2p_device = wil_cfg80211_start_p2p_device,
+ .stop_p2p_device = wil_cfg80211_stop_p2p_device,
};
static void wil_wiphy_init(struct wiphy *wiphy)
{
wiphy->max_scan_ssids = 1;
wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
+ wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
wiphy->max_num_pmkids = 0 /* TODO: */;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_MONITOR);
- /* TODO: enable P2P when integrated with supplicant:
- * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
- */
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
__func__, wiphy->flags);
@@ -1182,7 +1412,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
- wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+ wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz;
/* TODO: figure this out */
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
@@ -1241,3 +1471,18 @@ void wil_wdev_free(struct wil6210_priv *wil)
wiphy_free(wdev->wiphy);
kfree(wdev);
}
+
+void wil_p2p_wdev_free(struct wil6210_priv *wil)
+{
+ struct wireless_dev *p2p_wdev;
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+ p2p_wdev = wil->p2p_wdev;
+ if (p2p_wdev) {
+ wil->p2p_wdev = NULL;
+ wil->radio_wdev = wil_to_wdev(wil);
+ cfg80211_unregister_wdev(p2p_wdev);
+ kfree(p2p_wdev);
+ }
+ mutex_unlock(&wil->p2p_wdev_mutex);
+}
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index 3249562d9..c312a667c 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -17,7 +17,7 @@
#include "wil6210.h"
#include "trace.h"
-void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
+void __wil_err(struct wil6210_priv *wil, const char *fmt, ...)
{
struct net_device *ndev = wil_to_ndev(wil);
struct va_format vaf = {
@@ -32,7 +32,7 @@ void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
va_end(args);
}
-void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
+void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
{
if (net_ratelimit()) {
struct net_device *ndev = wil_to_ndev(wil);
@@ -49,7 +49,23 @@ void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
}
}
-void wil_info(struct wil6210_priv *wil, const char *fmt, ...)
+void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (!net_ratelimit())
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ netdev_dbg(wil_to_ndev(wil), "%pV", &vaf);
+ trace_wil6210_log_dbg(&vaf);
+ va_end(args);
+}
+
+void __wil_info(struct wil6210_priv *wil, const char *fmt, ...)
{
struct net_device *ndev = wil_to_ndev(wil);
struct va_format vaf = {
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3bbe73b6d..a8098b406 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -37,6 +37,7 @@ enum dbg_off_type {
doff_x32 = 1,
doff_ulong = 2,
doff_io32 = 3,
+ doff_u8 = 4
};
/* offset to "wil" */
@@ -170,6 +171,8 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
int rsize;
uint i;
+ wil_halp_vote(wil);
+
wil_memcpy_fromio_32(&r, off, sizeof(r));
wil_mbox_ring_le2cpus(&r);
/*
@@ -235,6 +238,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
}
out:
seq_puts(s, "}\n");
+ wil_halp_unvote(wil);
}
static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
@@ -346,6 +350,10 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
tbl[i].mode, dbg,
base + tbl[i].off);
break;
+ case doff_u8:
+ f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
+ break;
default:
f = ERR_PTR(-EINVAL);
}
@@ -495,9 +503,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
enum { max_count = 4096 };
- struct debugfs_blob_wrapper *blob = file->private_data;
+ struct wil_blob_wrapper *wil_blob = file->private_data;
loff_t pos = *ppos;
- size_t available = blob->size;
+ size_t available = wil_blob->blob.size;
void *buf;
size_t ret;
@@ -516,8 +524,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
- pos, count);
+ wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
+ (const volatile void __iomem *)
+ wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
kfree(buf);
@@ -540,9 +549,9 @@ static
struct dentry *wil_debugfs_create_ioblob(const char *name,
umode_t mode,
struct dentry *parent,
- struct debugfs_blob_wrapper *blob)
+ struct wil_blob_wrapper *wil_blob)
{
- return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+ return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob);
}
/*---reset---*/
@@ -821,13 +830,13 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
- struct wil6210_mbox_hdr_wmi *wmi;
+ struct wmi_cmd_hdr *wmi;
void *cmd;
- int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi);
+ int cmdlen = len - sizeof(struct wmi_cmd_hdr);
u16 cmdid;
int rc, rc1;
- if (cmdlen <= 0)
+ if (cmdlen < 0)
return -EINVAL;
wmi = kmalloc(len, GFP_KERNEL);
@@ -840,8 +849,8 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
return rc;
}
- cmd = &wmi[1];
- cmdid = le16_to_cpu(wmi->id);
+ cmd = (cmdlen > 0) ? &wmi[1] : NULL;
+ cmdid = le16_to_cpu(wmi->command_id);
rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
kfree(wmi);
@@ -985,7 +994,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data)
.interval_usec = 0,
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_notify_req_done_event evt;
} __packed reply;
@@ -1333,6 +1342,34 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
r->ssn_last_drop);
}
+static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
+ struct wil_tid_crypto_rx *c)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+ if (cc->key_set)
+ goto has_keys;
+ }
+ return;
+
+has_keys:
+ if (tid < WIL_STA_TID_NUM)
+ seq_printf(s, " [%2d] PN", tid);
+ else
+ seq_puts(s, " [GR] PN");
+
+ for (i = 0; i < 4; i++) {
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+ seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-",
+ cc->pn);
+ }
+ seq_puts(s, "\n");
+}
+
static int wil_sta_debugfs_show(struct seq_file *s, void *data)
__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
{
@@ -1360,18 +1397,25 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
spin_lock_bh(&p->tid_rx_lock);
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+ struct wil_tid_crypto_rx *c =
+ &p->tid_crypto_rx[tid];
if (r) {
- seq_printf(s, "[%2d] ", tid);
+ seq_printf(s, " [%2d] ", tid);
wil_print_rxtid(s, r);
}
+
+ wil_print_rxtid_crypto(s, tid, c);
}
+ wil_print_rxtid_crypto(s, WIL_STA_TID_NUM,
+ &p->group_crypto_rx);
spin_unlock_bh(&p->tid_rx_lock);
seq_printf(s,
- "Rx invalid frame: non-data %lu, short %lu, large %lu\n",
+ "Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n",
p->stats.rx_non_data_frame,
p->stats.rx_short_frame,
- p->stats.rx_large_frame);
+ p->stats.rx_large_frame,
+ p->stats.rx_replay);
seq_puts(s, "Rx/MCS:");
for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
@@ -1397,6 +1441,118 @@ static const struct file_operations fops_sta = {
.llseek = seq_lseek,
};
+static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[80];
+ int n;
+
+ n = snprintf(buf, sizeof(buf),
+ "led_id is set to %d, echo 1 to enable, 0 to disable\n",
+ led_id);
+
+ n = min_t(int, n, sizeof(buf));
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, n);
+}
+
+static ssize_t wil_write_file_led_cfg(struct file *file,
+ const char __user *buf_,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int val;
+ int rc;
+
+ rc = kstrtoint_from_user(buf_, count, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+
+ wil_info(wil, "%s led %d\n", val ? "Enabling" : "Disabling", led_id);
+ rc = wmi_led_cfg(wil, val);
+ if (rc) {
+ wil_info(wil, "%s led %d failed\n",
+ val ? "Enabling" : "Disabling", led_id);
+ return rc;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_led_cfg = {
+ .read = wil_read_file_led_cfg,
+ .write = wil_write_file_led_cfg,
+ .open = simple_open,
+};
+
+/* led_blink_time, write:
+ * "<blink_on_slow> <blink_off_slow> <blink_on_med> <blink_off_med> <blink_on_fast> <blink_off_fast>
+ */
+static ssize_t wil_write_led_blink_time(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%d %d %d %d %d %d",
+ &led_blink_time[WIL_LED_TIME_SLOW].on_ms,
+ &led_blink_time[WIL_LED_TIME_SLOW].off_ms,
+ &led_blink_time[WIL_LED_TIME_MED].on_ms,
+ &led_blink_time[WIL_LED_TIME_MED].off_ms,
+ &led_blink_time[WIL_LED_TIME_FAST].on_ms,
+ &led_blink_time[WIL_LED_TIME_FAST].off_ms);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+ if (rc < 6)
+ return -EINVAL;
+
+ return len;
+}
+
+static ssize_t wil_read_led_blink_time(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char text[400];
+
+ snprintf(text, sizeof(text),
+ "To set led blink on/off time variables write:\n"
+ "<blink_on_slow> <blink_off_slow> <blink_on_med> "
+ "<blink_off_med> <blink_on_fast> <blink_off_fast>\n"
+ "The current values are:\n"
+ "%d %d %d %d %d %d\n",
+ led_blink_time[WIL_LED_TIME_SLOW].on_ms,
+ led_blink_time[WIL_LED_TIME_SLOW].off_ms,
+ led_blink_time[WIL_LED_TIME_MED].on_ms,
+ led_blink_time[WIL_LED_TIME_MED].off_ms,
+ led_blink_time[WIL_LED_TIME_FAST].on_ms,
+ led_blink_time[WIL_LED_TIME_FAST].off_ms);
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ sizeof(text));
+}
+
+static const struct file_operations fops_led_blink_time = {
+ .read = wil_read_led_blink_time,
+ .write = wil_write_led_blink_time,
+ .open = simple_open,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1405,16 +1561,18 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
char name[32];
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
- struct debugfs_blob_wrapper *blob = &wil->blobs[i];
+ struct wil_blob_wrapper *wil_blob = &wil->blobs[i];
+ struct debugfs_blob_wrapper *blob = &wil_blob->blob;
const struct fw_map *map = &fw_mapping[i];
if (!map->name)
continue;
+ wil_blob->wil = wil;
blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
blob->size = map->to - map->from;
snprintf(name, sizeof(name), "blob_%s", map->name);
- wil_debugfs_create_ioblob(name, S_IRUGO, dbg, blob);
+ wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob);
}
}
@@ -1443,6 +1601,8 @@ static const struct {
{"link", S_IRUGO, &fops_link},
{"info", S_IRUGO, &fops_info},
{"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
+ {"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
+ {"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1487,6 +1647,7 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
+ WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8),
{},
};
@@ -1504,6 +1665,7 @@ static const struct dbg_off dbg_statics[] = {
{"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
{"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
doff_u32},
+ {"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8},
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 4f2ffa5c6..011e7412d 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -35,15 +35,19 @@
*
*/
-#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IRQ_DISABLE_NO_HALP (0xF7FFFFFFUL)
#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \
BIT_DMA_EP_RX_ICR_RX_HTRSH)
+#define WIL6210_IMC_RX_NO_RX_HTRSH (WIL6210_IMC_RX & \
+ (~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
-#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
- ISR_MISC_MBOX_EVT | \
- ISR_MISC_FW_ERROR)
-
+#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
+ ISR_MISC_MBOX_EVT | \
+ ISR_MISC_FW_ERROR)
+#define WIL6210_IMC_MISC (WIL6210_IMC_MISC_NO_HALP | \
+ BIT_DMA_EP_MISC_ICR_HALP)
#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
BIT_DMA_PSEUDO_CAUSE_TX | \
BIT_DMA_PSEUDO_CAUSE_MISC))
@@ -51,6 +55,7 @@
#if defined(CONFIG_WIL6210_ISR_COR)
/* configure to Clear-On-Read mode */
#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
+#define WIL_ICR_ICC_MISC_VALUE (0xF7FFFFFFUL)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
@@ -58,6 +63,7 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr)
#else /* defined(CONFIG_WIL6210_ISR_COR) */
/* configure to Write-1-to-Clear mode */
#define WIL_ICR_ICC_VALUE (0UL)
+#define WIL_ICR_ICC_MISC_VALUE (0UL)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
@@ -86,10 +92,21 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
WIL6210_IRQ_DISABLE);
}
-static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
+ wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__,
+ mask_halp ? "true" : "false");
+
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
- WIL6210_IRQ_DISABLE);
+ mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
+}
+
+static void wil6210_mask_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+ BIT_DMA_EP_MISC_ICR_HALP);
}
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
@@ -109,14 +126,27 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
+ bool unmask_rx_htrsh = test_bit(wil_status_fwconnected, wil->status);
+
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
- WIL6210_IMC_RX);
+ unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
}
-static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
+ wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__,
+ unmask_halp ? "true" : "false");
+
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
- WIL6210_IMC_MISC);
+ unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP);
+}
+
+static void wil6210_unmask_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+ BIT_DMA_EP_MISC_ICR_HALP);
}
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@@ -134,7 +164,7 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_rx(wil);
- wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@@ -147,12 +177,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
- WIL_ICR_ICC_VALUE);
+ WIL_ICR_ICC_MISC_VALUE);
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
- wil6210_unmask_irq_misc(wil);
+ wil6210_unmask_irq_misc(wil, true);
}
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
@@ -228,11 +258,8 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
*/
if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
- wil_dbg_irq(wil, "RX done\n");
-
- if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH))
- wil_err_ratelimited(wil,
- "Received \"Rx buffer is in risk of overflow\" interrupt\n");
+ wil_dbg_irq(wil, "RX done / RX_HTRSH received, ISR (0x%x)\n",
+ isr);
isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH);
@@ -344,7 +371,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
return IRQ_NONE;
}
- wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_misc(wil, false);
if (isr & ISR_MISC_FW_ERROR) {
u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
@@ -372,12 +399,19 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
isr &= ~ISR_MISC_FW_READY;
}
+ if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
+ wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__);
+ wil6210_mask_halp(wil);
+ isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
+ complete(&wil->halp.comp);
+ }
+
wil->isr_misc = isr;
if (isr) {
return IRQ_WAKE_THREAD;
} else {
- wil6210_unmask_irq_misc(wil);
+ wil6210_unmask_irq_misc(wil, false);
return IRQ_HANDLED;
}
}
@@ -391,12 +425,14 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
if (isr & ISR_MISC_FW_ERROR) {
+ wil->recovery_state = fw_recovery_pending;
wil_fw_core_dump(wil);
wil_notify_fw_error(wil);
isr &= ~ISR_MISC_FW_ERROR;
- if (wil->platform_ops.notify_crash) {
+ if (wil->platform_ops.notify) {
wil_err(wil, "notify platform driver about FW crash");
- wil->platform_ops.notify_crash(wil->platform_handle);
+ wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_FW_CRASH);
} else {
wil_fw_error_recovery(wil);
}
@@ -412,7 +448,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil->isr_misc = 0;
- wil6210_unmask_irq_misc(wil);
+ wil6210_unmask_irq_misc(wil, false);
return IRQ_HANDLED;
}
@@ -554,6 +590,23 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
wmb(); /* make sure write completed */
}
+void wil6210_set_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
+ BIT_DMA_EP_MISC_ICR_HALP);
+}
+
+void wil6210_clear_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
+ BIT_DMA_EP_MISC_ICR_HALP);
+ wil6210_unmask_halp(wil);
+}
+
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index f7f948621..630380078 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -161,13 +161,20 @@ out_free:
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
{
+ int ret;
+
switch (cmd) {
case WIL_IOCTL_MEMIO:
- return wil_ioc_memio_dword(wil, data);
+ ret = wil_ioc_memio_dword(wil, data);
+ break;
case WIL_IOCTL_MEMIO_BLOCK:
- return wil_ioc_memio_block(wil, data);
+ ret = wil_ioc_memio_block(wil, data);
+ break;
default:
wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
return -ENOIOCTLCMD;
}
+
+ wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
+ return ret;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 78ba6e04c..8e31d755b 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -23,10 +23,17 @@
#include "wmi.h"
#include "boot_loader.h"
+#define WAIT_FOR_HALP_VOTE_MS 100
+
bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
+static bool oob_mode;
+module_param(oob_mode, bool, S_IRUGO);
+MODULE_PARM_DESC(oob_mode,
+ " enable out of the box (OOB) mode in FW, for diagnostics and certification");
+
bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -127,6 +134,14 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
*d++ = __raw_readl(s++);
}
+void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
+ const volatile void __iomem *src, size_t count)
+{
+ wil_halp_vote(wil);
+ wil_memcpy_fromio_32(dst, src, count);
+ wil_halp_unvote(wil);
+}
+
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count)
{
@@ -137,6 +152,15 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
__raw_writel(*s++, d++);
}
+void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
+ volatile void __iomem *dst,
+ const void *src, size_t count)
+{
+ wil_halp_vote(wil);
+ wil_memcpy_toio_32(dst, src, count);
+ wil_halp_unvote(wil);
+}
+
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -149,7 +173,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
might_sleep();
wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
sta->status);
-
+ /* inform upper/lower layers */
if (sta->status != wil_sta_unused) {
if (!from_event)
wmi_disconnect_sta(wil, sta->addr, reason_code, true);
@@ -165,7 +189,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
}
sta->status = wil_sta_unused;
}
-
+ /* reorder buffers */
for (i = 0; i < WIL_STA_TID_NUM; i++) {
struct wil_tid_ampdu_rx *r;
@@ -177,13 +201,30 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
spin_unlock_bh(&sta->tid_rx_lock);
}
+ /* crypto context */
+ memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
+ memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
+ /* release vrings */
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
if (wil->vring2cid_tid[i][0] == cid)
wil_vring_fini_tx(wil, i);
}
+ /* statistics */
memset(&sta->stats, 0, sizeof(sta->stats));
}
+static bool wil_ap_is_connected(struct wil6210_priv *wil)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if (wil->sta[i].status == wil_sta_connected)
+ return true;
+ }
+
+ return false;
+}
+
static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event)
{
@@ -237,6 +278,11 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
}
clear_bit(wil_status_fwconnecting, wil->status);
break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (!wil_ap_is_connected(wil))
+ clear_bit(wil_status_fwconnected, wil->status);
+ break;
default:
break;
}
@@ -300,6 +346,11 @@ void wil_set_recovery_state(struct wil6210_priv *wil, int state)
wake_up_interruptible(&wil->wq);
}
+bool wil_is_recovery_blocked(struct wil6210_priv *wil)
+{
+ return no_fw_recovery && (wil->recovery_state == fw_recovery_pending);
+}
+
static void wil_fw_error_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
@@ -440,27 +491,26 @@ int wil_priv_init(struct wil6210_priv *wil)
mutex_init(&wil->mutex);
mutex_init(&wil->wmi_mutex);
- mutex_init(&wil->back_rx_mutex);
- mutex_init(&wil->back_tx_mutex);
mutex_init(&wil->probe_client_mutex);
+ mutex_init(&wil->p2p_wdev_mutex);
+ mutex_init(&wil->halp.lock);
init_completion(&wil->wmi_ready);
init_completion(&wil->wmi_call);
+ init_completion(&wil->halp.comp);
wil->bcast_vring = -1;
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
+ setup_timer(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn,
+ (ulong)wil);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
- INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker);
- INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker);
INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
- INIT_LIST_HEAD(&wil->back_rx_pending);
- INIT_LIST_HEAD(&wil->back_tx_pending);
INIT_LIST_HEAD(&wil->probe_client_pending);
spin_lock_init(&wil->wmi_ev_lock);
init_waitqueue_head(&wil->wq);
@@ -514,16 +564,14 @@ void wil_priv_deinit(struct wil6210_priv *wil)
wil_set_recovery_state(wil, fw_recovery_idle);
del_timer_sync(&wil->scan_timer);
+ del_timer_sync(&wil->p2p.discovery_timer);
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
+ cancel_work_sync(&wil->p2p.discovery_expired_work);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
mutex_unlock(&wil->mutex);
wmi_event_flush(wil);
- wil_back_rx_flush(wil);
- cancel_work_sync(&wil->back_rx_worker);
- wil_back_tx_flush(wil);
- cancel_work_sync(&wil->back_tx_worker);
wil_probe_client_flush(wil);
cancel_work_sync(&wil->probe_client_worker);
destroy_workqueue(wil->wq_service);
@@ -542,6 +590,15 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
+static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
+{
+ wil_info(wil, "%s: enable=%d\n", __func__, enable);
+ if (enable)
+ wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+ else
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+}
+
static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
@@ -637,6 +694,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
union {
struct bl_dedicated_registers_v0 bl0;
struct bl_dedicated_registers_v1 bl1;
@@ -681,6 +739,7 @@ static int wil_get_bl_info(struct wil6210_priv *wil)
}
ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(wiphy->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
ether_addr_copy(ndev->dev_addr, mac);
@@ -767,12 +826,24 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
+ if (wil->platform_ops.notify) {
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_PRE_RESET);
+ if (rc)
+ wil_err(wil,
+ "%s: PRE_RESET platform notify failed, rc %d\n",
+ __func__, rc);
+ }
+
set_bit(wil_status_resetting, wil->status);
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
+ /* Disable device led before reset*/
+ wmi_led_cfg(wil, false);
+
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
bitmap_zero(wil->status, wil_status_last);
@@ -807,6 +878,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (rc)
return rc;
+ wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
WIL_FW2_NAME);
@@ -839,6 +911,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil->ap_isolate = 0;
reinit_completion(&wil->wmi_ready);
reinit_completion(&wil->wmi_call);
+ reinit_completion(&wil->halp.comp);
if (load_fw) {
wil_configure_interrupt_moderation(wil);
@@ -846,8 +919,27 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* we just started MAC, wait for FW ready */
rc = wil_wait_for_fw_ready(wil);
- if (rc == 0) /* check FW is responsive */
- rc = wmi_echo(wil);
+ if (rc)
+ return rc;
+
+ /* check FW is responsive */
+ rc = wmi_echo(wil);
+ if (rc) {
+ wil_err(wil, "%s: wmi_echo failed, rc %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (wil->platform_ops.notify) {
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_FW_RDY);
+ if (rc) {
+ wil_err(wil,
+ "%s: FW_RDY notify failed, rc %d\n",
+ __func__, rc);
+ rc = 0;
+ }
+ }
}
return rc;
@@ -954,6 +1046,8 @@ int __wil_down(struct wil6210_priv *wil)
}
wil_enable_irq(wil);
+ (void)wil_p2p_stop_discovery(wil);
+
if (wil->scan_request) {
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
@@ -1008,3 +1102,51 @@ int wil_find_cid(struct wil6210_priv *wil, const u8 *mac)
return rc;
}
+
+void wil_halp_vote(struct wil6210_priv *wil)
+{
+ unsigned long rc;
+ unsigned long to_jiffies = msecs_to_jiffies(WAIT_FOR_HALP_VOTE_MS);
+
+ mutex_lock(&wil->halp.lock);
+
+ wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
+
+ if (++wil->halp.ref_cnt == 1) {
+ wil6210_set_halp(wil);
+ rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
+ if (!rc)
+ wil_err(wil, "%s: HALP vote timed out\n", __func__);
+ else
+ wil_dbg_misc(wil,
+ "%s: HALP vote completed after %d ms\n",
+ __func__,
+ jiffies_to_msecs(to_jiffies - rc));
+ }
+
+ wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
+
+ mutex_unlock(&wil->halp.lock);
+}
+
+void wil_halp_unvote(struct wil6210_priv *wil)
+{
+ WARN_ON(wil->halp.ref_cnt == 0);
+
+ mutex_lock(&wil->halp.lock);
+
+ wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
+
+ if (--wil->halp.ref_cnt == 0) {
+ wil6210_clear_halp(wil);
+ wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
+ }
+
+ wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
+
+ mutex_unlock(&wil->halp.lock);
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ecc3c1bda..098409753 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -60,11 +60,7 @@ static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- int ret = wil_ioctl(wil, ifr->ifr_data, cmd);
-
- wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
-
- return ret;
+ return wil_ioctl(wil, ifr->ifr_data, cmd);
}
static const struct net_device_ops wil_netdev_ops = {
@@ -149,6 +145,7 @@ void *wil_if_alloc(struct device *dev)
wil = wdev_to_wil(wdev);
wil->wdev = wdev;
+ wil->radio_wdev = wdev;
wil_dbg_misc(wil, "%s()\n", __func__);
@@ -160,7 +157,7 @@ void *wil_if_alloc(struct device *dev)
wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
/* default monitor channel */
- ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+ ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
new file mode 100644
index 000000000..1c9153894
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define P2P_WILDCARD_SSID "DIRECT-"
+#define P2P_DMG_SOCIAL_CHANNEL 2
+#define P2P_SEARCH_DURATION_MS 500
+#define P2P_DEFAULT_BI 100
+
+bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request)
+{
+ return (request->n_channels == 1) &&
+ (request->channels[0]->hw_value == P2P_DMG_SOCIAL_CHANNEL);
+}
+
+void wil_p2p_discovery_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg_misc(wil, "%s\n", __func__);
+
+ schedule_work(&wil->p2p.discovery_expired_work);
+}
+
+int wil_p2p_search(struct wil6210_priv *wil,
+ struct cfg80211_scan_request *request)
+{
+ int rc;
+ struct wil_p2p_info *p2p = &wil->p2p;
+
+ wil_dbg_misc(wil, "%s: channel %d\n",
+ __func__, P2P_DMG_SOCIAL_CHANNEL);
+
+ mutex_lock(&wil->mutex);
+
+ if (p2p->discovery_started) {
+ wil_err(wil, "%s: search failed. discovery already ongoing\n",
+ __func__);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
+ if (rc) {
+ wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+ goto out;
+ }
+
+ rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+ if (rc) {
+ wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+ goto out_stop;
+ }
+
+ /* Set application IE to probe request and probe response */
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
+ request->ie_len, request->ie);
+ if (rc) {
+ wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
+ __func__);
+ goto out_stop;
+ }
+
+ /* supplicant doesn't provide Probe Response IEs. As a workaround -
+ * re-use Probe Request IEs
+ */
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
+ request->ie_len, request->ie);
+ if (rc) {
+ wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
+ __func__);
+ goto out_stop;
+ }
+
+ rc = wmi_start_search(wil);
+ if (rc) {
+ wil_err(wil, "%s: wmi_start_search failed\n", __func__);
+ goto out_stop;
+ }
+
+ p2p->discovery_started = 1;
+ INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired);
+ mod_timer(&p2p->discovery_timer,
+ jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS));
+
+out_stop:
+ if (rc)
+ wmi_stop_discovery(wil);
+
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
+ struct ieee80211_channel *chan, u64 *cookie)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ u8 channel = P2P_DMG_SOCIAL_CHANNEL;
+ int rc;
+
+ if (chan)
+ channel = chan->hw_value;
+
+ wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
+
+ mutex_lock(&wil->mutex);
+
+ if (p2p->discovery_started) {
+ wil_err(wil, "%s: discovery already ongoing\n", __func__);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
+ if (rc) {
+ wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+ goto out;
+ }
+
+ rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+ if (rc) {
+ wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+ goto out_stop;
+ }
+
+ rc = wmi_start_listen(wil);
+ if (rc) {
+ wil_err(wil, "%s: wmi_start_listen failed\n", __func__);
+ goto out_stop;
+ }
+
+ memcpy(&p2p->listen_chan, chan, sizeof(*chan));
+ *cookie = ++p2p->cookie;
+
+ p2p->discovery_started = 1;
+ INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
+ mod_timer(&p2p->discovery_timer,
+ jiffies + msecs_to_jiffies(duration));
+
+out_stop:
+ if (rc)
+ wmi_stop_discovery(wil);
+
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+u8 wil_p2p_stop_discovery(struct wil6210_priv *wil)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ u8 started = p2p->discovery_started;
+
+ if (p2p->discovery_started) {
+ del_timer_sync(&p2p->discovery_timer);
+ p2p->discovery_started = 0;
+ wmi_stop_discovery(wil);
+ }
+
+ return started;
+}
+
+int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ u8 started;
+
+ mutex_lock(&wil->mutex);
+
+ if (cookie != p2p->cookie) {
+ wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+ __func__, p2p->cookie, cookie);
+ mutex_unlock(&wil->mutex);
+ return -ENOENT;
+ }
+
+ started = wil_p2p_stop_discovery(wil);
+
+ mutex_unlock(&wil->mutex);
+
+ if (!started) {
+ wil_err(wil, "%s: listen not started\n", __func__);
+ return -ENOENT;
+ }
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+ cfg80211_remain_on_channel_expired(wil->radio_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ return 0;
+}
+
+void wil_p2p_listen_expired(struct work_struct *work)
+{
+ struct wil_p2p_info *p2p = container_of(work,
+ struct wil_p2p_info, discovery_expired_work);
+ struct wil6210_priv *wil = container_of(p2p,
+ struct wil6210_priv, p2p);
+ u8 started;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->mutex);
+ started = wil_p2p_stop_discovery(wil);
+ mutex_unlock(&wil->mutex);
+
+ if (started) {
+ mutex_lock(&wil->p2p_wdev_mutex);
+ cfg80211_remain_on_channel_expired(wil->radio_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ }
+
+}
+
+void wil_p2p_search_expired(struct work_struct *work)
+{
+ struct wil_p2p_info *p2p = container_of(work,
+ struct wil_p2p_info, discovery_expired_work);
+ struct wil6210_priv *wil = container_of(p2p,
+ struct wil6210_priv, p2p);
+ u8 started;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->mutex);
+ started = wil_p2p_stop_discovery(wil);
+ mutex_unlock(&wil->mutex);
+
+ if (started) {
+ mutex_lock(&wil->p2p_wdev_mutex);
+ cfg80211_scan_done(wil->scan_request, 0);
+ wil->scan_request = NULL;
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index e36f2a0c8..aeb72c438 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -275,6 +275,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
+ wil_p2p_wdev_free(wil);
wil_if_free(wil);
}
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 32031e7a1..19ed127d4 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -291,35 +291,15 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
return min(max_agg_size, req_agg_wsize);
}
-/* Block Ack - Rx side (recipient */
+/* Block Ack - Rx side (recipient) */
int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl)
-{
- struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
-
- if (!req)
- return -ENOMEM;
-
- req->cidxtid = cidxtid;
- req->dialog_token = dialog_token;
- req->ba_param_set = le16_to_cpu(ba_param_set);
- req->ba_timeout = le16_to_cpu(ba_timeout);
- req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
-
- mutex_lock(&wil->back_rx_mutex);
- list_add_tail(&req->list, &wil->back_rx_pending);
- mutex_unlock(&wil->back_rx_mutex);
-
- queue_work(wil->wq_service, &wil->back_rx_worker);
-
- return 0;
-}
-
-static void wil_back_rx_handle(struct wil6210_priv *wil,
- struct wil_back_rx *req)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
+ u16 param_set = le16_to_cpu(ba_param_set);
+ u16 agg_timeout = le16_to_cpu(ba_timeout);
+ u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
struct wil_sta_info *sta;
u8 cid, tid;
u16 agg_wsize = 0;
@@ -328,34 +308,35 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
* bits 2..5: TID
* bits 6..15: buffer size
*/
- u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
- bool agg_amsdu = !!(req->ba_param_set & BIT(0));
- int ba_policy = req->ba_param_set & BIT(1);
- u16 agg_timeout = req->ba_timeout;
+ u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
+ bool agg_amsdu = !!(param_set & BIT(0));
+ int ba_policy = param_set & BIT(1);
u16 status = WLAN_STATUS_SUCCESS;
- u16 ssn = req->ba_seq_ctrl >> 4;
+ u16 ssn = seq_ctrl >> 4;
struct wil_tid_ampdu_rx *r;
- int rc;
+ int rc = 0;
might_sleep();
- parse_cidxtid(req->cidxtid, &cid, &tid);
+ parse_cidxtid(cidxtid, &cid, &tid);
/* sanity checks */
if (cid >= WIL6210_MAX_CID) {
wil_err(wil, "BACK: invalid CID %d\n", cid);
- return;
+ rc = -EINVAL;
+ goto out;
}
sta = &wil->sta[cid];
if (sta->status != wil_sta_connected) {
wil_err(wil, "BACK: CID %d not connected\n", cid);
- return;
+ rc = -EINVAL;
+ goto out;
}
wil_dbg_wmi(wil,
"ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
- cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
- agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
+ cid, sta->addr, tid, req_agg_wsize, agg_timeout,
+ agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
/* apply policies */
if (ba_policy) {
@@ -365,10 +346,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (status == WLAN_STATUS_SUCCESS)
agg_wsize = wil_agg_size(wil, req_agg_wsize);
- rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
+ rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
- if (rc || (status != WLAN_STATUS_SUCCESS))
- return;
+ if (rc || (status != WLAN_STATUS_SUCCESS)) {
+ wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
+ __func__, rc, status);
+ goto out;
+ }
/* apply */
r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
@@ -376,143 +360,37 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
sta->tid_rx[tid] = r;
spin_unlock_bh(&sta->tid_rx_lock);
-}
-
-void wil_back_rx_flush(struct wil6210_priv *wil)
-{
- struct wil_back_rx *evt, *t;
- wil_dbg_misc(wil, "%s()\n", __func__);
-
- mutex_lock(&wil->back_rx_mutex);
-
- list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
- list_del(&evt->list);
- kfree(evt);
- }
-
- mutex_unlock(&wil->back_rx_mutex);
-}
-
-/* Retrieve next ADDBA request from the pending list */
-static struct list_head *next_back_rx(struct wil6210_priv *wil)
-{
- struct list_head *ret = NULL;
-
- mutex_lock(&wil->back_rx_mutex);
-
- if (!list_empty(&wil->back_rx_pending)) {
- ret = wil->back_rx_pending.next;
- list_del(ret);
- }
-
- mutex_unlock(&wil->back_rx_mutex);
-
- return ret;
-}
-
-void wil_back_rx_worker(struct work_struct *work)
-{
- struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
- back_rx_worker);
- struct wil_back_rx *evt;
- struct list_head *lh;
-
- while ((lh = next_back_rx(wil)) != NULL) {
- evt = list_entry(lh, struct wil_back_rx, list);
-
- wil_back_rx_handle(wil, evt);
- kfree(evt);
- }
+out:
+ return rc;
}
-/* BACK - Tx (originator) side */
-static void wil_back_tx_handle(struct wil6210_priv *wil,
- struct wil_back_tx *req)
+/* BACK - Tx side (originator) */
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
{
- struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
- int rc;
+ u8 agg_wsize = wil_agg_size(wil, wsize);
+ u16 agg_timeout = 0;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ int rc = 0;
if (txdata->addba_in_progress) {
wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
- req->ringid);
- return;
+ ringid);
+ goto out;
}
if (txdata->agg_wsize) {
wil_dbg_misc(wil,
- "ADDBA for vring[%d] already established wsize %d\n",
- req->ringid, txdata->agg_wsize);
- return;
+ "ADDBA for vring[%d] already done for wsize %d\n",
+ ringid, txdata->agg_wsize);
+ goto out;
}
txdata->addba_in_progress = true;
- rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
- if (rc)
+ rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
+ if (rc) {
+ wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
txdata->addba_in_progress = false;
-}
-
-static struct list_head *next_back_tx(struct wil6210_priv *wil)
-{
- struct list_head *ret = NULL;
-
- mutex_lock(&wil->back_tx_mutex);
-
- if (!list_empty(&wil->back_tx_pending)) {
- ret = wil->back_tx_pending.next;
- list_del(ret);
- }
-
- mutex_unlock(&wil->back_tx_mutex);
-
- return ret;
-}
-
-void wil_back_tx_worker(struct work_struct *work)
-{
- struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
- back_tx_worker);
- struct wil_back_tx *evt;
- struct list_head *lh;
-
- while ((lh = next_back_tx(wil)) != NULL) {
- evt = list_entry(lh, struct wil_back_tx, list);
-
- wil_back_tx_handle(wil, evt);
- kfree(evt);
}
-}
-
-void wil_back_tx_flush(struct wil6210_priv *wil)
-{
- struct wil_back_tx *evt, *t;
-
- wil_dbg_misc(wil, "%s()\n", __func__);
-
- mutex_lock(&wil->back_tx_mutex);
-
- list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
- list_del(&evt->list);
- kfree(evt);
- }
-
- mutex_unlock(&wil->back_tx_mutex);
-}
-
-int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
-{
- struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
-
- if (!req)
- return -ENOMEM;
- req->ringid = ringid;
- req->agg_wsize = wil_agg_size(wil, wsize);
- req->agg_timeout = 0;
-
- mutex_lock(&wil->back_tx_mutex);
- list_add_tail(&req->list, &wil->back_tx_pending);
- mutex_unlock(&wil->back_tx_mutex);
-
- queue_work(wil->wq_service, &wil->back_tx_worker);
-
- return 0;
+out:
+ return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index e59239d22..c4db2a9d9 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -37,39 +37,40 @@ static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
DECLARE_EVENT_CLASS(wil6210_wmi,
- TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
TP_ARGS(wmi, buf, buf_len),
TP_STRUCT__entry(
__field(u8, mid)
- __field(u16, id)
- __field(u32, timestamp)
+ __field(u16, command_id)
+ __field(u32, fw_timestamp)
__field(u16, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__entry->mid = wmi->mid;
- __entry->id = le16_to_cpu(wmi->id);
- __entry->timestamp = le32_to_cpu(wmi->timestamp);
+ __entry->command_id = le16_to_cpu(wmi->command_id);
+ __entry->fw_timestamp = le32_to_cpu(wmi->fw_timestamp);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"MID %d id 0x%04x len %d timestamp %d",
- __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
+ __entry->mid, __entry->command_id, __entry->buf_len,
+ __entry->fw_timestamp
)
);
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
- TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
TP_ARGS(wmi, buf, buf_len)
);
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
- TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
TP_ARGS(wmi, buf, buf_len)
);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 6af20903c..a4e43796a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -549,6 +549,60 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
return rc;
}
+/**
+ * reverse_memcmp - Compare two areas of memory, in reverse order
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ *
+ * Cut'n'paste from original memcmp (see lib/string.c)
+ * with minimal modifications
+ */
+static int reverse_memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
+ --su1, --su2, count--) {
+ res = *su1 - *su2;
+ if (res)
+ break;
+ }
+ return res;
+}
+
+static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int cid = wil_rxdesc_cid(d);
+ int tid = wil_rxdesc_tid(d);
+ int key_id = wil_rxdesc_key_id(d);
+ int mc = wil_rxdesc_mcast(d);
+ struct wil_sta_info *s = &wil->sta[cid];
+ struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
+ &s->tid_crypto_rx[tid];
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
+ const u8 *pn = (u8 *)&d->mac.pn_15_0;
+
+ if (!cc->key_set) {
+ wil_err_ratelimited(wil,
+ "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+ cid, tid, mc, key_id);
+ return -EINVAL;
+ }
+
+ if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+ wil_err_ratelimited(wil,
+ "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+ cid, tid, mc, key_id, pn, cc->pn);
+ return -EINVAL;
+ }
+ memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+ return 0;
+}
+
/*
* Pass Rx packet to the netif. Update statistics.
* Called in softirq context (NAPI poll).
@@ -561,6 +615,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
unsigned int len = skb->len;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+ int security = wil_rxdesc_security(d);
struct ethhdr *eth = (void *)skb->data;
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
* is not suitable, need to look at data
@@ -586,6 +641,13 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
skb_orphan(skb);
+ if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
+ rc = GRO_DROP;
+ dev_kfree_skb(skb);
+ stats->rx_replay++;
+ goto stats;
+ }
+
if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
if (mcast) {
/* send multicast frames both to higher layers in
@@ -627,6 +689,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
len, gro_res_str[rc]);
}
+stats:
/* statistics. rc set to GRO_NORMAL for AP bridging */
if (unlikely(rc == GRO_DROP)) {
ndev->stats.rx_dropped++;
@@ -757,7 +820,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
},
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_vring_cfg_done_event cmd;
} __packed reply;
struct vring *vring = &wil->vring_tx[id];
@@ -834,7 +897,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
},
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_vring_cfg_done_event cmd;
} __packed reply;
struct vring *vring = &wil->vring_tx[id];
@@ -1696,7 +1759,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
goto drop;
}
if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
- wil_err_ratelimited(wil, "FW not connected\n");
+ wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n");
goto drop;
}
if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index ee7c7b4b9..fcdffaa82 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -480,6 +480,16 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
return WIL_GET_BITS(d->mac.d0, 28, 31);
}
+static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 4, 5);
+}
+
+static inline int wil_rxdesc_security(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 7, 7);
+}
+
static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->mac.d1, 8, 9);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 5eb41e1f6..1abdd45c3 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -22,6 +22,7 @@
#include <net/cfg80211.h>
#include <linux/timex.h>
#include <linux/types.h>
+#include "wmi.h"
#include "wil_platform.h"
extern bool no_fw_recovery;
@@ -131,6 +132,7 @@ struct RGF_ICR {
/* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018)
+ #define BIT_USER_OOB_MODE BIT(31)
#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
@@ -166,6 +168,7 @@ struct RGF_ICR {
#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
#define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
#define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
+ #define BIT_DMA_EP_MISC_ICR_HALP BIT(27)
#define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
/* Legacy interrupt moderation control (before Sparrow v2)*/
@@ -334,29 +337,11 @@ struct wil6210_mbox_hdr {
/* max. value for wil6210_mbox_hdr.len */
#define MAX_MBOXITEM_SIZE (240)
-/**
- * struct wil6210_mbox_hdr_wmi - WMI header
- *
- * @mid: MAC ID
- * 00 - default, created by FW
- * 01..0f - WiFi ports, driver to create
- * 10..fe - debug
- * ff - broadcast
- * @id: command/event ID
- * @timestamp: FW fills for events, free-running msec timer
- */
-struct wil6210_mbox_hdr_wmi {
- u8 mid;
- u8 reserved;
- __le16 id;
- __le32 timestamp;
-} __packed;
-
struct pending_wmi_event {
struct list_head list;
struct {
struct wil6210_mbox_hdr hdr;
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
u8 data[0];
} __packed event;
};
@@ -455,6 +440,29 @@ struct wil_tid_ampdu_rx {
bool first_time; /* is it 1-st time this buffer used? */
};
+/**
+ * struct wil_tid_crypto_rx_single - TID crypto information (Rx).
+ *
+ * @pn: GCMP PN for the session
+ * @key_set: valid key present
+ */
+struct wil_tid_crypto_rx_single {
+ u8 pn[IEEE80211_GCMP_PN_LEN];
+ bool key_set;
+};
+
+struct wil_tid_crypto_rx {
+ struct wil_tid_crypto_rx_single key_id[4];
+};
+
+struct wil_p2p_info {
+ struct ieee80211_channel listen_chan;
+ u8 discovery_started;
+ u64 cookie;
+ struct timer_list discovery_timer; /* listen/search duration */
+ struct work_struct discovery_expired_work; /* listen/search expire */
+};
+
enum wil_sta_status {
wil_sta_unused = 0,
wil_sta_conn_pending = 1,
@@ -474,6 +482,7 @@ struct wil_net_stats {
unsigned long rx_non_data_frame;
unsigned long rx_short_frame;
unsigned long rx_large_frame;
+ unsigned long rx_replay;
u16 last_mcs_rx;
u64 rx_per_mcs[WIL_MCS_MAX + 1];
};
@@ -495,6 +504,8 @@ struct wil_sta_info {
spinlock_t tid_rx_lock; /* guarding tid_rx array */
unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+ struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
+ struct wil_tid_crypto_rx group_crypto_rx;
};
enum {
@@ -507,24 +518,6 @@ enum {
hw_capability_last
};
-struct wil_back_rx {
- struct list_head list;
- /* request params, converted to CPU byte order - what we asked for */
- u8 cidxtid;
- u8 dialog_token;
- u16 ba_param_set;
- u16 ba_timeout;
- u16 ba_seq_ctrl;
-};
-
-struct wil_back_tx {
- struct list_head list;
- /* request params, converted to CPU byte order - what we asked for */
- u8 ringid;
- u8 agg_wsize;
- u16 agg_timeout;
-};
-
struct wil_probe_client_req {
struct list_head list;
u64 cookie;
@@ -542,6 +535,41 @@ struct pmc_ctx {
int descriptor_size;
};
+struct wil_halp {
+ struct mutex lock; /* protect halp ref_cnt */
+ unsigned int ref_cnt;
+ struct completion comp;
+};
+
+struct wil_blob_wrapper {
+ struct wil6210_priv *wil;
+ struct debugfs_blob_wrapper blob;
+};
+
+#define WIL_LED_MAX_ID (2)
+#define WIL_LED_INVALID_ID (0xF)
+#define WIL_LED_BLINK_ON_SLOW_MS (300)
+#define WIL_LED_BLINK_OFF_SLOW_MS (300)
+#define WIL_LED_BLINK_ON_MED_MS (200)
+#define WIL_LED_BLINK_OFF_MED_MS (200)
+#define WIL_LED_BLINK_ON_FAST_MS (100)
+#define WIL_LED_BLINK_OFF_FAST_MS (100)
+enum {
+ WIL_LED_TIME_SLOW = 0,
+ WIL_LED_TIME_MED,
+ WIL_LED_TIME_FAST,
+ WIL_LED_TIME_LAST,
+};
+
+struct blink_on_off_time {
+ u32 on_ms;
+ u32 off_ms;
+};
+
+extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
+extern u8 led_id;
+extern u8 led_polarity;
+
struct wil6210_priv {
struct pci_dev *pdev;
struct wireless_dev *wdev;
@@ -595,13 +623,6 @@ struct wil6210_priv {
spinlock_t wmi_ev_lock;
struct napi_struct napi_rx;
struct napi_struct napi_tx;
- /* BACK */
- struct list_head back_rx_pending;
- struct mutex back_rx_mutex; /* protect @back_rx_pending */
- struct work_struct back_rx_worker;
- struct list_head back_tx_pending;
- struct mutex back_tx_mutex; /* protect @back_tx_pending */
- struct work_struct back_tx_worker;
/* keep alive */
struct list_head probe_client_pending;
struct mutex probe_client_mutex; /* protect @probe_client_pending */
@@ -621,12 +642,26 @@ struct wil6210_priv {
atomic_t isr_count_rx, isr_count_tx;
/* debugfs */
struct dentry *debug;
- struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+ struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+ u8 discovery_mode;
void *platform_handle;
struct wil_platform_ops platform_ops;
struct pmc_ctx pmc;
+
+ bool pbss;
+
+ struct wil_p2p_info p2p;
+
+ /* P2P_DEVICE vif */
+ struct wireless_dev *p2p_wdev;
+ struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
+ struct wireless_dev *radio_wdev;
+
+ /* High Access Latency Policy voting */
+ struct wil_halp halp;
+
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -640,11 +675,13 @@ struct wil6210_priv {
__printf(2, 3)
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
-void wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+void __wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
-void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
+void __wil_info(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
-void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
+void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg(wil, fmt, arg...) do { \
netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
wil_dbg_trace(wil, fmt, ##arg); \
@@ -655,6 +692,10 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+#define wil_err(wil, fmt, arg...) __wil_err(wil, "%s: " fmt, __func__, ##arg)
+#define wil_info(wil, fmt, arg...) __wil_info(wil, "%s: " fmt, __func__, ##arg)
+#define wil_err_ratelimited(wil, fmt, arg...) \
+ __wil_err_ratelimited(wil, "%s: " fmt, __func__, ##arg)
/* target operations */
/* register read */
@@ -712,6 +753,12 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
+void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
+ const volatile void __iomem *src,
+ size_t count);
+void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
+ volatile void __iomem *dst,
+ const void *src, size_t count);
void *wil_if_alloc(struct device *dev);
void wil_if_free(struct wil6210_priv *wil);
@@ -722,6 +769,7 @@ void wil_priv_deinit(struct wil6210_priv *wil);
int wil_reset(struct wil6210_priv *wil, bool no_fw);
void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_set_recovery_state(struct wil6210_priv *wil, int state);
+bool wil_is_recovery_blocked(struct wil6210_priv *wil);
int wil_up(struct wil6210_priv *wil);
int __wil_up(struct wil6210_priv *wil);
int wil_down(struct wil6210_priv *wil);
@@ -752,7 +800,6 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
-int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
@@ -765,11 +812,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
-void wil_back_rx_worker(struct work_struct *work);
-void wil_back_rx_flush(struct wil6210_priv *wil);
int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
-void wil_back_tx_worker(struct work_struct *work);
-void wil_back_tx_flush(struct wil6210_priv *wil);
void wil6210_clear_irq(struct wil6210_priv *wil);
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
@@ -779,6 +822,25 @@ void wil_unmask_irq(struct wil6210_priv *wil);
void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
void wil_disable_irq(struct wil6210_priv *wil);
void wil_enable_irq(struct wil6210_priv *wil);
+
+/* P2P */
+bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
+void wil_p2p_discovery_timer_fn(ulong x);
+int wil_p2p_search(struct wil6210_priv *wil,
+ struct cfg80211_scan_request *request);
+int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
+ struct ieee80211_channel *chan, u64 *cookie);
+u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
+int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
+void wil_p2p_listen_expired(struct work_struct *work);
+void wil_p2p_search_expired(struct work_struct *work);
+
+/* WMI for P2P */
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
+int wmi_start_listen(struct wil6210_priv *wil);
+int wmi_start_search(struct wil6210_priv *wil);
+int wmi_stop_discovery(struct wil6210_priv *wil);
+
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie);
@@ -790,11 +852,13 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
struct wireless_dev *wil_cfg80211_init(struct device *dev);
void wil_wdev_free(struct wil6210_priv *wil);
+void wil_p2p_wdev_free(struct wil6210_priv *wil);
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
- u8 chan, u8 hidden_ssid);
+ u8 chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_priv *wil);
+int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
@@ -832,4 +896,9 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
+void wil_halp_vote(struct wil6210_priv *wil);
+void wil_halp_unvote(struct wil6210_priv *wil);
+void wil6210_set_halp(struct wil6210_priv *wil);
+void wil6210_clear_halp(struct wil6210_priv *wil);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 9a949d910..33d4a34b3 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -19,6 +19,12 @@
struct device;
+enum wil_platform_event {
+ WIL_PLATFORM_EVT_FW_CRASH = 0,
+ WIL_PLATFORM_EVT_PRE_RESET = 1,
+ WIL_PLATFORM_EVT_FW_RDY = 2,
+};
+
/**
* struct wil_platform_ops - wil platform module calls from this
* driver to platform driver
@@ -28,7 +34,7 @@ struct wil_platform_ops {
int (*suspend)(void *handle);
int (*resume)(void *handle);
void (*uninit)(void *handle);
- int (*notify_crash)(void *handle);
+ int (*notify)(void *handle, enum wil_platform_event evt);
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 493e721c4..b80c5d850 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -32,6 +32,11 @@ module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
" 0 - use default; < 0 - don't auto-establish");
+u8 led_id = WIL_LED_INVALID_ID;
+module_param(led_id, byte, S_IRUGO);
+MODULE_PARM_DESC(led_id,
+ " 60G device led enablement. Set the led ID (0-2) to enable");
+
/**
* WMI event receiving - theory of operations
*
@@ -94,6 +99,14 @@ const struct fw_map fw_mapping[] = {
*/
};
+struct blink_on_off_time led_blink_time[] = {
+ {WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
+ {WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
+ {WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS},
+};
+
+u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
+
/**
* return AHB address for given firmware/ucode internal (linker) address
* @x - internal address
@@ -176,7 +189,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
{
struct {
struct wil6210_mbox_hdr hdr;
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
} __packed cmd = {
.hdr = {
.type = WIL_MBOX_HDR_TYPE_WMI,
@@ -185,7 +198,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
},
.wmi = {
.mid = 0,
- .id = cpu_to_le16(cmdid),
+ .command_id = cpu_to_le16(cmdid),
},
};
struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -194,6 +207,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
void __iomem *dst;
void __iomem *head = wmi_addr(wil, r->head);
uint retry;
+ int rc = 0;
if (sizeof(cmd) + len > r->entry_size) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
@@ -212,6 +226,9 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
return -EINVAL;
}
+
+ wil_halp_vote(wil);
+
/* read Tx head till it is not busy */
for (retry = 5; retry > 0; retry--) {
wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
@@ -221,7 +238,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
if (d_head.sync != 0) {
wil_err(wil, "WMI head busy\n");
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
/* next head */
next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
@@ -230,7 +248,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
for (retry = 5; retry > 0; retry--) {
if (!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "WMI: cannot send command while FW not ready\n");
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
r->tail = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.tail));
@@ -240,13 +259,15 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
if (next_head == r->tail) {
wil_err(wil, "WMI ring full\n");
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
dst = wmi_buffer(wil, d_head.addr);
if (!dst) {
wil_err(wil, "invalid WMI buffer: 0x%08x\n",
le32_to_cpu(d_head.addr));
- return -EINVAL;
+ rc = -EAGAIN;
+ goto out;
}
cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
/* set command */
@@ -269,7 +290,9 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
SW_INT_MBOX);
- return 0;
+out:
+ wil_halp_unvote(wil);
+ return rc;
}
int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
@@ -333,7 +356,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
}
ch_no = data->info.channel + 1;
- freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+ freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
channel = ieee80211_get_channel(wiphy, freq);
signal = data->info.sqi;
d_status = le16_to_cpu(data->info.status);
@@ -368,6 +391,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
ie_len, true);
+ wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+
bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
d_len, signal, GFP_KERNEL);
if (bss) {
@@ -378,8 +403,10 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
}
} else {
- cfg80211_rx_mgmt(wil->wdev, freq, signal,
+ mutex_lock(&wil->p2p_wdev_mutex);
+ cfg80211_rx_mgmt(wil->radio_wdev, freq, signal,
(void *)rx_mgmt_frame, d_len, 0);
+ mutex_unlock(&wil->p2p_wdev_mutex);
}
}
@@ -406,7 +433,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
wil->scan_request, aborted);
del_timer_sync(&wil->scan_timer);
+ mutex_lock(&wil->p2p_wdev_mutex);
cfg80211_scan_done(wil->scan_request, aborted);
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
wil->scan_request = NULL;
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
@@ -487,6 +517,14 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
return;
}
del_timer_sync(&wil->connect_timer);
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ if (wil->sta[evt->cid].status != wil_sta_unused) {
+ wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
+ __func__, wil->sta[evt->cid].status, evt->cid);
+ mutex_unlock(&wil->mutex);
+ return;
+ }
}
/* FIXME FW can transmit only ucast frames to peer */
@@ -648,7 +686,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
int len)
{
- struct wmi_vring_ba_status_event *evt = d;
+ struct wmi_ba_status_event *evt = d;
struct vring_tx_data *txdata;
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
@@ -834,10 +872,10 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
offsetof(struct wil6210_mbox_ring_desc, sync), 0);
/* indicate */
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
- (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
- struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
- u16 id = le16_to_cpu(wmi->id);
- u32 tstamp = le32_to_cpu(wmi->timestamp);
+ (len >= sizeof(struct wmi_cmd_hdr))) {
+ struct wmi_cmd_hdr *wmi = &evt->event.wmi;
+ u16 id = le16_to_cpu(wmi->command_id);
+ u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
if (wil->reply_id && wil->reply_id == id) {
if (wil->reply_buf) {
@@ -946,8 +984,62 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
}
+int wmi_led_cfg(struct wil6210_priv *wil, bool enable)
+{
+ int rc = 0;
+ struct wmi_led_cfg_cmd cmd = {
+ .led_mode = enable,
+ .id = led_id,
+ .slow_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].on_ms),
+ .slow_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].off_ms),
+ .medium_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].on_ms),
+ .medium_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].off_ms),
+ .fast_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].on_ms),
+ .fast_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].off_ms),
+ .led_polarity = led_polarity,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_led_cfg_done_event evt;
+ } __packed reply;
+
+ if (led_id == WIL_LED_INVALID_ID)
+ goto out;
+
+ if (led_id > WIL_LED_MAX_ID) {
+ wil_err(wil, "Invalid led id %d\n", led_id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil,
+ "%s led %d\n",
+ enable ? "enabling" : "disabling", led_id);
+
+ rc = wmi_call(wil, WMI_LED_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ 100);
+ if (rc)
+ goto out;
+
+ if (reply.evt.status) {
+ wil_err(wil, "led %d cfg failed with status %d\n",
+ led_id, le32_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+out:
+ return rc;
+}
+
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
- u8 chan, u8 hidden_ssid)
+ u8 chan, u8 hidden_ssid, u8 is_go)
{
int rc;
@@ -958,9 +1050,10 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
.channel = chan - 1,
.pcp_max_assoc_sta = max_assoc_sta,
.hidden_ssid = hidden_ssid,
+ .is_go = is_go,
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_pcp_started_event evt;
} __packed reply;
@@ -987,11 +1080,21 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
rc = -EINVAL;
+ if (wmi_nettype != WMI_NETTYPE_P2P)
+ /* Don't fail due to error in the led configuration */
+ wmi_led_cfg(wil, true);
+
return rc;
}
int wmi_pcp_stop(struct wil6210_priv *wil)
{
+ int rc;
+
+ rc = wmi_led_cfg(wil, false);
+ if (rc)
+ return rc;
+
return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
}
@@ -1014,7 +1117,7 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
{
int rc;
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_set_ssid_cmd cmd;
} __packed reply;
int len; /* reply.cmd.ssid_len in CPU order */
@@ -1047,7 +1150,7 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
{
int rc;
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_set_pcp_channel_cmd cmd;
} __packed reply;
@@ -1064,14 +1167,86 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
return 0;
}
-int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi)
{
+ int rc;
struct wmi_p2p_cfg_cmd cmd = {
- .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
+ .discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER,
+ .bcon_interval = cpu_to_le16(bi),
.channel = channel - 1,
};
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_p2p_cfg_done_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n");
+
+ rc = wmi_call(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_start_listen(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_listen_started_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n");
+
+ rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
+ WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "device failed to start listen. status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_start_search(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_search_started_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n");
+
+ rc = wmi_call(wil, WMI_START_SEARCH_CMDID, NULL, 0,
+ WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "device failed to start search. status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_stop_discovery(struct wil6210_priv *wil)
+{
+ int rc;
+
+ wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
+
+ rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100);
- return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
+ if (rc)
+ wil_err(wil, "Failed to stop discovery\n");
+
+ return rc;
}
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
@@ -1155,7 +1330,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
{
int rc;
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_listen_started_event evt;
} __packed reply;
@@ -1192,7 +1367,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
.host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_cfg_rx_chain_done_event evt;
} __packed evt;
int rc;
@@ -1246,7 +1421,7 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
.measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_temp_sense_done_event evt;
} __packed reply;
@@ -1272,7 +1447,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
.disconnect_reason = cpu_to_le16(reason),
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_disconnect_event evt;
} __packed reply;
@@ -1364,7 +1539,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
.ba_timeout = cpu_to_le16(timeout),
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_rcp_addba_resp_sent_event evt;
} __packed reply;
@@ -1420,10 +1595,10 @@ static void wmi_event_handle(struct wil6210_priv *wil,
u16 len = le16_to_cpu(hdr->len);
if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
- (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
- struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+ (len >= sizeof(struct wmi_cmd_hdr))) {
+ struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]);
void *evt_data = (void *)(&wmi[1]);
- u16 id = le16_to_cpu(wmi->id);
+ u16 id = le16_to_cpu(wmi->command_id);
wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
id, wil->reply_id);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 6e90e78f1..685fe0dde 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
- * Copyright (c) 2006-2012 Wilocity .
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,187 +17,198 @@
/*
* This file contains the definitions of the WMI protocol specified in the
- * Wireless Module Interface (WMI) for the Wilocity
- * MARLON 60 Gigabit wireless solution.
+ * Wireless Module Interface (WMI) for the Qualcomm
+ * 60 GHz wireless solution.
* It includes definitions of all the commands and events.
* Commands are messages from the host to the WM.
* Events are messages from the WM to the host.
+ *
+ * This is an automatically generated file.
*/
#ifndef __WILOCITY_WMI_H__
#define __WILOCITY_WMI_H__
/* General */
-#define WILOCITY_MAX_ASSOC_STA (8)
-#define WILOCITY_DEFAULT_ASSOC_STA (1)
-#define WMI_MAC_LEN (6)
-#define WMI_PROX_RANGE_NUM (3)
-#define WMI_MAX_LOSS_DMG_BEACONS (32)
+#define WMI_MAX_ASSOC_STA (8)
+#define WMI_DEFAULT_ASSOC_STA (1)
+#define WMI_MAC_LEN (6)
+#define WMI_PROX_RANGE_NUM (3)
+#define WMI_MAX_LOSS_DMG_BEACONS (20)
+
+/* Mailbox interface
+ * used for commands and events
+ */
+enum wmi_mid {
+ MID_DEFAULT = 0x00,
+ FIRST_DBG_MID_ID = 0x10,
+ LAST_DBG_MID_ID = 0xFE,
+ MID_BROADCAST = 0xFF,
+};
+
+/* WMI_CMD_HDR */
+struct wmi_cmd_hdr {
+ u8 mid;
+ u8 reserved;
+ __le16 command_id;
+ __le32 fw_timestamp;
+} __packed;
/* List of Commands */
enum wmi_command_id {
- WMI_CONNECT_CMDID = 0x0001,
- WMI_DISCONNECT_CMDID = 0x0003,
- WMI_DISCONNECT_STA_CMDID = 0x0004,
- WMI_START_SCAN_CMDID = 0x0007,
- WMI_SET_BSS_FILTER_CMDID = 0x0009,
- WMI_SET_PROBED_SSID_CMDID = 0x000a,
- WMI_SET_LISTEN_INT_CMDID = 0x000b,
- WMI_BCON_CTRL_CMDID = 0x000f,
- WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
- WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
- WMI_SET_APPIE_CMDID = 0x003f,
- WMI_SET_WSC_STATUS_CMDID = 0x0041,
- WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
- WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
-/* WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, */
- WMI_MEM_READ_CMDID = 0x0800,
- WMI_MEM_WR_CMDID = 0x0801,
- WMI_ECHO_CMDID = 0x0803,
- WMI_DEEP_ECHO_CMDID = 0x0804,
- WMI_CONFIG_MAC_CMDID = 0x0805,
- WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
- WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
- WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
- WMI_FS_TUNE_CMDID = 0x080a,
- WMI_CORR_MEASURE_CMDID = 0x080b,
- WMI_READ_RSSI_CMDID = 0x080c,
- WMI_TEMP_SENSE_CMDID = 0x080e,
- WMI_DC_CALIB_CMDID = 0x080f,
- WMI_SEND_TONE_CMDID = 0x0810,
- WMI_IQ_TX_CALIB_CMDID = 0x0811,
- WMI_IQ_RX_CALIB_CMDID = 0x0812,
- WMI_SET_UCODE_IDLE_CMDID = 0x0813,
- WMI_SET_WORK_MODE_CMDID = 0x0815,
- WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
- WMI_MARLON_R_READ_CMDID = 0x0818,
- WMI_MARLON_R_WRITE_CMDID = 0x0819,
- WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
- MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
- MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
- WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
- WMI_RF_RX_TEST_CMDID = 0x081e,
- WMI_CFG_RX_CHAIN_CMDID = 0x0820,
- WMI_VRING_CFG_CMDID = 0x0821,
- WMI_BCAST_VRING_CFG_CMDID = 0x0822,
- WMI_VRING_BA_EN_CMDID = 0x0823,
- WMI_VRING_BA_DIS_CMDID = 0x0824,
- WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
- WMI_RCP_DELBA_CMDID = 0x0826,
- WMI_SET_SSID_CMDID = 0x0827,
- WMI_GET_SSID_CMDID = 0x0828,
- WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
- WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
- WMI_SW_TX_REQ_CMDID = 0x082b,
- WMI_READ_MAC_RXQ_CMDID = 0x0830,
- WMI_READ_MAC_TXQ_CMDID = 0x0831,
- WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
- WMI_WRITE_MAC_TXQ_CMDID = 0x0833,
- WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834,
- WMI_MLME_PUSH_CMDID = 0x0835,
- WMI_BEAMFORMING_MGMT_CMDID = 0x0836,
- WMI_BF_TXSS_MGMT_CMDID = 0x0837,
- WMI_BF_SM_MGMT_CMDID = 0x0838,
- WMI_BF_RXSS_MGMT_CMDID = 0x0839,
- WMI_BF_TRIG_CMDID = 0x083A,
- WMI_SET_SECTORS_CMDID = 0x0849,
- WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
- WMI_MAINTAIN_RESUME_CMDID = 0x0851,
- WMI_RS_MGMT_CMDID = 0x0852,
- WMI_RF_MGMT_CMDID = 0x0853,
- WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x0854,
- WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855,
+ WMI_CONNECT_CMDID = 0x01,
+ WMI_DISCONNECT_CMDID = 0x03,
+ WMI_DISCONNECT_STA_CMDID = 0x04,
+ WMI_START_SCAN_CMDID = 0x07,
+ WMI_SET_BSS_FILTER_CMDID = 0x09,
+ WMI_SET_PROBED_SSID_CMDID = 0x0A,
+ WMI_SET_LISTEN_INT_CMDID = 0x0B,
+ WMI_BCON_CTRL_CMDID = 0x0F,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x16,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
+ WMI_PCP_CONF_CMDID = 0x18,
+ WMI_SET_APPIE_CMDID = 0x3F,
+ WMI_SET_WSC_STATUS_CMDID = 0x41,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x42,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
+ WMI_MEM_READ_CMDID = 0x800,
+ WMI_MEM_WR_CMDID = 0x801,
+ WMI_ECHO_CMDID = 0x803,
+ WMI_DEEP_ECHO_CMDID = 0x804,
+ WMI_CONFIG_MAC_CMDID = 0x805,
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x806,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x809,
+ WMI_FS_TUNE_CMDID = 0x80A,
+ WMI_CORR_MEASURE_CMDID = 0x80B,
+ WMI_READ_RSSI_CMDID = 0x80C,
+ WMI_TEMP_SENSE_CMDID = 0x80E,
+ WMI_DC_CALIB_CMDID = 0x80F,
+ WMI_SEND_TONE_CMDID = 0x810,
+ WMI_IQ_TX_CALIB_CMDID = 0x811,
+ WMI_IQ_RX_CALIB_CMDID = 0x812,
+ WMI_SET_UCODE_IDLE_CMDID = 0x813,
+ WMI_SET_WORK_MODE_CMDID = 0x815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x816,
+ WMI_MARLON_R_READ_CMDID = 0x818,
+ WMI_MARLON_R_WRITE_CMDID = 0x819,
+ WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A,
+ MAC_IO_STATIC_PARAMS_CMDID = 0x81B,
+ MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x81D,
+ WMI_RF_RX_TEST_CMDID = 0x81E,
+ WMI_CFG_RX_CHAIN_CMDID = 0x820,
+ WMI_VRING_CFG_CMDID = 0x821,
+ WMI_BCAST_VRING_CFG_CMDID = 0x822,
+ WMI_VRING_BA_EN_CMDID = 0x823,
+ WMI_VRING_BA_DIS_CMDID = 0x824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x825,
+ WMI_RCP_DELBA_CMDID = 0x826,
+ WMI_SET_SSID_CMDID = 0x827,
+ WMI_GET_SSID_CMDID = 0x828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
+ WMI_SW_TX_REQ_CMDID = 0x82B,
+ WMI_READ_MAC_RXQ_CMDID = 0x830,
+ WMI_READ_MAC_TXQ_CMDID = 0x831,
+ WMI_WRITE_MAC_RXQ_CMDID = 0x832,
+ WMI_WRITE_MAC_TXQ_CMDID = 0x833,
+ WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834,
+ WMI_MLME_PUSH_CMDID = 0x835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x837,
+ WMI_BF_SM_MGMT_CMDID = 0x838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x839,
+ WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_SET_SECTORS_CMDID = 0x849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x851,
+ WMI_RS_MGMT_CMDID = 0x852,
+ WMI_RF_MGMT_CMDID = 0x853,
+ WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
+ WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
+ WMI_OTP_READ_CMDID = 0x856,
+ WMI_OTP_WRITE_CMDID = 0x857,
+ WMI_LED_CFG_CMDID = 0x858,
/* Performance monitoring commands */
- WMI_BF_CTRL_CMDID = 0x0862,
- WMI_NOTIFY_REQ_CMDID = 0x0863,
- WMI_GET_STATUS_CMDID = 0x0864,
- WMI_UNIT_TEST_CMDID = 0x0900,
- WMI_HICCUP_CMDID = 0x0901,
- WMI_FLASH_READ_CMDID = 0x0902,
- WMI_FLASH_WRITE_CMDID = 0x0903,
- WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
- /*P2P*/
- WMI_P2P_CFG_CMDID = 0x0910,
- WMI_PORT_ALLOCATE_CMDID = 0x0911,
- WMI_PORT_DELETE_CMDID = 0x0912,
- WMI_POWER_MGMT_CFG_CMDID = 0x0913,
- WMI_START_LISTEN_CMDID = 0x0914,
- WMI_START_SEARCH_CMDID = 0x0915,
- WMI_DISCOVERY_START_CMDID = 0x0916,
- WMI_DISCOVERY_STOP_CMDID = 0x0917,
- WMI_PCP_START_CMDID = 0x0918,
- WMI_PCP_STOP_CMDID = 0x0919,
- WMI_GET_PCP_FACTOR_CMDID = 0x091b,
-
- WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
- WMI_ABORT_SCAN_CMDID = 0xf007,
- WMI_SET_PMK_CMDID = 0xf028,
-
- WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041,
- WMI_GET_PMK_CMDID = 0xf048,
- WMI_SET_PASSPHRASE_CMDID = 0xf049,
- WMI_SEND_ASSOC_RES_CMDID = 0xf04a,
- WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b,
- WMI_EAPOL_TX_CMDID = 0xf04c,
- WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
- WMI_FW_VER_CMDID = 0xf04e,
- WMI_PMC_CMDID = 0xf04f,
+ WMI_BF_CTRL_CMDID = 0x862,
+ WMI_NOTIFY_REQ_CMDID = 0x863,
+ WMI_GET_STATUS_CMDID = 0x864,
+ WMI_UNIT_TEST_CMDID = 0x900,
+ WMI_HICCUP_CMDID = 0x901,
+ WMI_FLASH_READ_CMDID = 0x902,
+ WMI_FLASH_WRITE_CMDID = 0x903,
+ /* P2P */
+ WMI_P2P_CFG_CMDID = 0x910,
+ WMI_PORT_ALLOCATE_CMDID = 0x911,
+ WMI_PORT_DELETE_CMDID = 0x912,
+ WMI_POWER_MGMT_CFG_CMDID = 0x913,
+ WMI_START_LISTEN_CMDID = 0x914,
+ WMI_START_SEARCH_CMDID = 0x915,
+ WMI_DISCOVERY_START_CMDID = 0x916,
+ WMI_DISCOVERY_STOP_CMDID = 0x917,
+ WMI_PCP_START_CMDID = 0x918,
+ WMI_PCP_STOP_CMDID = 0x919,
+ WMI_GET_PCP_FACTOR_CMDID = 0x91B,
+ WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
+ WMI_ABORT_SCAN_CMDID = 0xF007,
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
+ WMI_GET_PMK_CMDID = 0xF048,
+ WMI_SET_PASSPHRASE_CMDID = 0xF049,
+ WMI_SEND_ASSOC_RES_CMDID = 0xF04A,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B,
+ WMI_MAC_ADDR_REQ_CMDID = 0xF04D,
+ WMI_FW_VER_CMDID = 0xF04E,
+ WMI_PMC_CMDID = 0xF04F,
};
-/*
- * Commands data structures
- */
-
-/*
- * WMI_CONNECT_CMDID
- */
+/* WMI_CONNECT_CMDID */
enum wmi_network_type {
WMI_NETTYPE_INFRA = 0x01,
WMI_NETTYPE_ADHOC = 0x02,
WMI_NETTYPE_ADHOC_CREATOR = 0x04,
WMI_NETTYPE_AP = 0x10,
WMI_NETTYPE_P2P = 0x20,
- WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */
+ /* PCIE over 60g */
+ WMI_NETTYPE_WBE = 0x40,
};
enum wmi_dot11_auth_mode {
- WMI_AUTH11_OPEN = 0x01,
- WMI_AUTH11_SHARED = 0x02,
- WMI_AUTH11_LEAP = 0x04,
- WMI_AUTH11_WSC = 0x08,
+ WMI_AUTH11_OPEN = 0x01,
+ WMI_AUTH11_SHARED = 0x02,
+ WMI_AUTH11_LEAP = 0x04,
+ WMI_AUTH11_WSC = 0x08,
};
enum wmi_auth_mode {
- WMI_AUTH_NONE = 0x01,
- WMI_AUTH_WPA = 0x02,
- WMI_AUTH_WPA2 = 0x04,
- WMI_AUTH_WPA_PSK = 0x08,
- WMI_AUTH_WPA2_PSK = 0x10,
- WMI_AUTH_WPA_CCKM = 0x20,
- WMI_AUTH_WPA2_CCKM = 0x40,
+ WMI_AUTH_NONE = 0x01,
+ WMI_AUTH_WPA = 0x02,
+ WMI_AUTH_WPA2 = 0x04,
+ WMI_AUTH_WPA_PSK = 0x08,
+ WMI_AUTH_WPA2_PSK = 0x10,
+ WMI_AUTH_WPA_CCKM = 0x20,
+ WMI_AUTH_WPA2_CCKM = 0x40,
};
enum wmi_crypto_type {
- WMI_CRYPT_NONE = 0x01,
- WMI_CRYPT_WEP = 0x02,
- WMI_CRYPT_TKIP = 0x04,
- WMI_CRYPT_AES = 0x08,
- WMI_CRYPT_AES_GCMP = 0x20,
+ WMI_CRYPT_NONE = 0x01,
+ WMI_CRYPT_AES_GCMP = 0x20,
};
enum wmi_connect_ctrl_flag_bits {
- WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
- WMI_CONNECT_SEND_REASSOC = 0x0002,
- WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004,
- WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
- WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
- WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
- WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040,
- WMI_CONNECT_DO_NOT_DEAUTH = 0x0080,
+ WMI_CONNECT_ASSOC_POLICY_USER = 0x01,
+ WMI_CONNECT_SEND_REASSOC = 0x02,
+ WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x04,
+ WMI_CONNECT_PROFILE_MATCH_DONE = 0x08,
+ WMI_CONNECT_IGNORE_AAC_BEACON = 0x10,
+ WMI_CONNECT_CSA_FOLLOW_BSS = 0x20,
+ WMI_CONNECT_DO_WPA_OFFLOAD = 0x40,
+ WMI_CONNECT_DO_NOT_DEAUTH = 0x80,
};
-#define WMI_MAX_SSID_LEN (32)
+#define WMI_MAX_SSID_LEN (32)
+/* WMI_CONNECT_CMDID */
struct wmi_connect_cmd {
u8 network_type;
u8 dot11_auth_mode;
@@ -216,31 +227,17 @@ struct wmi_connect_cmd {
u8 reserved1[2];
} __packed;
-/*
- * WMI_DISCONNECT_STA_CMDID
- */
+/* WMI_DISCONNECT_STA_CMDID */
struct wmi_disconnect_sta_cmd {
u8 dst_mac[WMI_MAC_LEN];
__le16 disconnect_reason;
} __packed;
-/*
- * WMI_SET_PMK_CMDID
- */
-
-#define WMI_MIN_KEY_INDEX (0)
#define WMI_MAX_KEY_INDEX (3)
#define WMI_MAX_KEY_LEN (32)
#define WMI_PASSPHRASE_LEN (64)
-#define WMI_PMK_LEN (32)
-struct wmi_set_pmk_cmd {
- u8 pmk[WMI_PMK_LEN];
-} __packed;
-
-/*
- * WMI_SET_PASSPHRASE_CMDID
- */
+/* WMI_SET_PASSPHRASE_CMDID */
struct wmi_set_passphrase_cmd {
u8 ssid[WMI_MAX_SSID_LEN];
u8 passphrase[WMI_PASSPHRASE_LEN];
@@ -248,36 +245,34 @@ struct wmi_set_passphrase_cmd {
u8 passphrase_len;
} __packed;
-/*
- * WMI_ADD_CIPHER_KEY_CMDID
- */
+/* WMI_ADD_CIPHER_KEY_CMDID */
enum wmi_key_usage {
- WMI_KEY_USE_PAIRWISE = 0,
- WMI_KEY_USE_RX_GROUP = 1,
- WMI_KEY_USE_TX_GROUP = 2,
+ WMI_KEY_USE_PAIRWISE = 0x00,
+ WMI_KEY_USE_RX_GROUP = 0x01,
+ WMI_KEY_USE_TX_GROUP = 0x02,
};
struct wmi_add_cipher_key_cmd {
u8 key_index;
u8 key_type;
- u8 key_usage; /* enum wmi_key_usage */
+ /* enum wmi_key_usage */
+ u8 key_usage;
u8 key_len;
- u8 key_rsc[8]; /* key replay sequence counter */
+ /* key replay sequence counter */
+ u8 key_rsc[8];
u8 key[WMI_MAX_KEY_LEN];
- u8 key_op_ctrl; /* Additional Key Control information */
+ /* Additional Key Control information */
+ u8 key_op_ctrl;
u8 mac[WMI_MAC_LEN];
} __packed;
-/*
- * WMI_DELETE_CIPHER_KEY_CMDID
- */
+/* WMI_DELETE_CIPHER_KEY_CMDID */
struct wmi_delete_cipher_key_cmd {
u8 key_index;
u8 mac[WMI_MAC_LEN];
} __packed;
-/*
- * WMI_START_SCAN_CMDID
+/* WMI_START_SCAN_CMDID
*
* Start L1 scan operation
*
@@ -286,146 +281,142 @@ struct wmi_delete_cipher_key_cmd {
* - WMI_SCAN_COMPLETE_EVENTID
*/
enum wmi_scan_type {
- WMI_LONG_SCAN = 0,
- WMI_SHORT_SCAN = 1,
- WMI_PBC_SCAN = 2,
- WMI_DIRECT_SCAN = 3,
- WMI_ACTIVE_SCAN = 4,
+ WMI_ACTIVE_SCAN = 0x00,
+ WMI_SHORT_SCAN = 0x01,
+ WMI_PASSIVE_SCAN = 0x02,
+ WMI_DIRECT_SCAN = 0x03,
+ WMI_LONG_SCAN = 0x04,
};
+/* WMI_START_SCAN_CMDID */
struct wmi_start_scan_cmd {
- u8 direct_scan_mac_addr[6];
- u8 reserved[2];
- __le32 home_dwell_time; /* Max duration in the home channel(ms) */
- __le32 force_scan_interval; /* Time interval between scans (ms)*/
- u8 scan_type; /* wmi_scan_type */
- u8 num_channels; /* how many channels follow */
+ u8 direct_scan_mac_addr[WMI_MAC_LEN];
+ /* DMG Beacon frame is transmitted during active scanning */
+ u8 discovery_mode;
+ /* reserved */
+ u8 reserved;
+ /* Max duration in the home channel(ms) */
+ __le32 dwell_time;
+ /* Time interval between scans (ms) */
+ __le32 force_scan_interval;
+ /* enum wmi_scan_type */
+ u8 scan_type;
+ /* how many channels follow */
+ u8 num_channels;
+ /* channels ID's:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
struct {
u8 channel;
u8 reserved;
- } channel_list[0]; /* channels ID's */
- /* 0 - 58320 MHz */
- /* 1 - 60480 MHz */
- /* 2 - 62640 MHz */
+ } channel_list[0];
} __packed;
-/*
- * WMI_SET_PROBED_SSID_CMDID
- */
+/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
enum wmi_ssid_flag {
- WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
- WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */
- WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */
+ /* disables entry */
+ WMI_SSID_FLAG_DISABLE = 0x00,
+ /* probes specified ssid */
+ WMI_SSID_FLAG_SPECIFIC = 0x01,
+ /* probes for any ssid */
+ WMI_SSID_FLAG_ANY = 0x02,
};
struct wmi_probed_ssid_cmd {
- u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */
- u8 flag; /* enum wmi_ssid_flag */
+ /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 entry_index;
+ /* enum wmi_ssid_flag */
+ u8 flag;
u8 ssid_len;
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/*
- * WMI_SET_APPIE_CMDID
+/* WMI_SET_APPIE_CMDID
* Add Application specified IE to a management frame
*/
-#define WMI_MAX_IE_LEN (1024)
+#define WMI_MAX_IE_LEN (1024)
-/*
- * Frame Types
- */
+/* Frame Types */
enum wmi_mgmt_frame_type {
- WMI_FRAME_BEACON = 0,
- WMI_FRAME_PROBE_REQ = 1,
- WMI_FRAME_PROBE_RESP = 2,
- WMI_FRAME_ASSOC_REQ = 3,
- WMI_FRAME_ASSOC_RESP = 4,
- WMI_NUM_MGMT_FRAME,
+ WMI_FRAME_BEACON = 0x00,
+ WMI_FRAME_PROBE_REQ = 0x01,
+ WMI_FRAME_PROBE_RESP = 0x02,
+ WMI_FRAME_ASSOC_REQ = 0x03,
+ WMI_FRAME_ASSOC_RESP = 0x04,
+ WMI_NUM_MGMT_FRAME = 0x05,
};
struct wmi_set_appie_cmd {
- u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ /* enum wmi_mgmt_frame_type */
+ u8 mgmt_frm_type;
u8 reserved;
- __le16 ie_len; /* Length of the IE to be added to MGMT frame */
+ /* Length of the IE to be added to MGMT frame */
+ __le16 ie_len;
u8 ie_info[0];
} __packed;
-/*
- * WMI_PXMT_RANGE_CFG_CMDID
- */
+/* WMI_PXMT_RANGE_CFG_CMDID */
struct wmi_pxmt_range_cfg_cmd {
u8 dst_mac[WMI_MAC_LEN];
__le16 range;
} __packed;
-/*
- * WMI_PXMT_SNR2_RANGE_CFG_CMDID
- */
+/* WMI_PXMT_SNR2_RANGE_CFG_CMDID */
struct wmi_pxmt_snr2_range_cfg_cmd {
- s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+ s8 snr2range_arr[2];
} __packed;
-/*
- * WMI_RF_MGMT_CMDID
- */
+/* WMI_RF_MGMT_CMDID */
enum wmi_rf_mgmt_type {
- WMI_RF_MGMT_W_DISABLE = 0,
- WMI_RF_MGMT_W_ENABLE = 1,
- WMI_RF_MGMT_GET_STATUS = 2,
+ WMI_RF_MGMT_W_DISABLE = 0x00,
+ WMI_RF_MGMT_W_ENABLE = 0x01,
+ WMI_RF_MGMT_GET_STATUS = 0x02,
};
+/* WMI_RF_MGMT_CMDID */
struct wmi_rf_mgmt_cmd {
__le32 rf_mgmt_type;
} __packed;
-/*
- * WMI_THERMAL_THROTTLING_CTRL_CMDID
- */
+/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
+/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
struct wmi_thermal_throttling_ctrl_cmd {
__le32 time_on_usec;
__le32 time_off_usec;
__le32 max_txop_length_usec;
} __packed;
-/*
- * WMI_RF_RX_TEST_CMDID
- */
+/* WMI_RF_RX_TEST_CMDID */
struct wmi_rf_rx_test_cmd {
__le32 sector;
} __packed;
-/*
- * WMI_CORR_MEASURE_CMDID
- */
+/* WMI_CORR_MEASURE_CMDID */
struct wmi_corr_measure_cmd {
- s32 freq_mhz;
+ __le32 freq_mhz;
__le32 length_samples;
__le32 iterations;
} __packed;
-/*
- * WMI_SET_SSID_CMDID
- */
+/* WMI_SET_SSID_CMDID */
struct wmi_set_ssid_cmd {
__le32 ssid_len;
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/*
- * WMI_SET_PCP_CHANNEL_CMDID
- */
+/* WMI_SET_PCP_CHANNEL_CMDID */
struct wmi_set_pcp_channel_cmd {
u8 channel;
u8 reserved[3];
} __packed;
-/*
- * WMI_BCON_CTRL_CMDID
- */
+/* WMI_BCON_CTRL_CMDID */
struct wmi_bcon_ctrl_cmd {
__le16 bcon_interval;
__le16 frag_num;
@@ -434,214 +425,192 @@ struct wmi_bcon_ctrl_cmd {
u8 pcp_max_assoc_sta;
u8 disable_sec_offload;
u8 disable_sec;
+ u8 hidden_ssid;
+ u8 is_go;
+ u8 reserved[2];
} __packed;
-/******* P2P ***********/
-
-/*
- * WMI_PORT_ALLOCATE_CMDID
- */
+/* WMI_PORT_ALLOCATE_CMDID */
enum wmi_port_role {
- WMI_PORT_STA = 0,
- WMI_PORT_PCP = 1,
- WMI_PORT_AP = 2,
- WMI_PORT_P2P_DEV = 3,
- WMI_PORT_P2P_CLIENT = 4,
- WMI_PORT_P2P_GO = 5,
+ WMI_PORT_STA = 0x00,
+ WMI_PORT_PCP = 0x01,
+ WMI_PORT_AP = 0x02,
+ WMI_PORT_P2P_DEV = 0x03,
+ WMI_PORT_P2P_CLIENT = 0x04,
+ WMI_PORT_P2P_GO = 0x05,
};
+/* WMI_PORT_ALLOCATE_CMDID */
struct wmi_port_allocate_cmd {
u8 mac[WMI_MAC_LEN];
u8 port_role;
u8 mid;
} __packed;
-/*
- * WMI_PORT_DELETE_CMDID
- */
-struct wmi_delete_port_cmd {
+/* WMI_PORT_DELETE_CMDID */
+struct wmi_port_delete_cmd {
u8 mid;
u8 reserved[3];
} __packed;
-/*
- * WMI_P2P_CFG_CMDID
- */
+/* WMI_P2P_CFG_CMDID */
enum wmi_discovery_mode {
- WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
- WMI_DISCOVERY_MODE_OFFLOAD = 1,
- WMI_DISCOVERY_MODE_PEER2PEER = 2,
+ WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
+ WMI_DISCOVERY_MODE_OFFLOAD = 0x01,
+ WMI_DISCOVERY_MODE_PEER2PEER = 0x02,
};
struct wmi_p2p_cfg_cmd {
- u8 discovery_mode; /* wmi_discovery_mode */
+ /* enum wmi_discovery_mode */
+ u8 discovery_mode;
u8 channel;
- __le16 bcon_interval; /* base to listen/search duration calculation */
+ /* base to listen/search duration calculation */
+ __le16 bcon_interval;
} __packed;
-/*
- * WMI_POWER_MGMT_CFG_CMDID
- */
+/* WMI_POWER_MGMT_CFG_CMDID */
enum wmi_power_source_type {
- WMI_POWER_SOURCE_BATTERY = 0,
- WMI_POWER_SOURCE_OTHER = 1,
+ WMI_POWER_SOURCE_BATTERY = 0x00,
+ WMI_POWER_SOURCE_OTHER = 0x01,
};
struct wmi_power_mgmt_cfg_cmd {
- u8 power_source; /* wmi_power_source_type */
+ /* enum wmi_power_source_type */
+ u8 power_source;
u8 reserved[3];
} __packed;
-/*
- * WMI_PCP_START_CMDID
- */
-
-enum wmi_hidden_ssid {
- WMI_HIDDEN_SSID_DISABLED = 0,
- WMI_HIDDEN_SSID_SEND_EMPTY = 1,
- WMI_HIDDEN_SSID_CLEAR = 2,
-};
-
+/* WMI_PCP_START_CMDID */
struct wmi_pcp_start_cmd {
__le16 bcon_interval;
u8 pcp_max_assoc_sta;
u8 hidden_ssid;
- u8 reserved0[8];
+ u8 is_go;
+ u8 reserved0[7];
u8 network_type;
u8 channel;
u8 disable_sec_offload;
u8 disable_sec;
} __packed;
-/*
- * WMI_SW_TX_REQ_CMDID
- */
+/* WMI_SW_TX_REQ_CMDID */
struct wmi_sw_tx_req_cmd {
u8 dst_mac[WMI_MAC_LEN];
__le16 len;
u8 payload[0];
} __packed;
-/*
- * WMI_VRING_CFG_CMDID
- */
-
struct wmi_sw_ring_cfg {
__le64 ring_mem_base;
__le16 ring_size;
__le16 max_mpdu_size;
} __packed;
+/* wmi_vring_cfg_schd */
struct wmi_vring_cfg_schd {
__le16 priority;
__le16 timeslot_us;
} __packed;
enum wmi_vring_cfg_encap_trans_type {
- WMI_VRING_ENC_TYPE_802_3 = 0,
- WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1,
+ WMI_VRING_ENC_TYPE_802_3 = 0x00,
+ WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01,
};
enum wmi_vring_cfg_ds_cfg {
- WMI_VRING_DS_PBSS = 0,
- WMI_VRING_DS_STATION = 1,
- WMI_VRING_DS_AP = 2,
- WMI_VRING_DS_ADDR4 = 3,
+ WMI_VRING_DS_PBSS = 0x00,
+ WMI_VRING_DS_STATION = 0x01,
+ WMI_VRING_DS_AP = 0x02,
+ WMI_VRING_DS_ADDR4 = 0x03,
};
enum wmi_vring_cfg_nwifi_ds_trans_type {
- WMI_NWIFI_TX_TRANS_MODE_NO = 0,
- WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1,
- WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2,
+ WMI_NWIFI_TX_TRANS_MODE_NO = 0x00,
+ WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 0x01,
+ WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 0x02,
};
enum wmi_vring_cfg_schd_params_priority {
- WMI_SCH_PRIO_REGULAR = 0,
- WMI_SCH_PRIO_HIGH = 1,
+ WMI_SCH_PRIO_REGULAR = 0x00,
+ WMI_SCH_PRIO_HIGH = 0x01,
};
-#define CIDXTID_CID_POS (0)
-#define CIDXTID_CID_LEN (4)
-#define CIDXTID_CID_MSK (0xF)
-#define CIDXTID_TID_POS (4)
-#define CIDXTID_TID_LEN (4)
-#define CIDXTID_TID_MSK (0xF0)
+#define CIDXTID_CID_POS (0)
+#define CIDXTID_CID_LEN (4)
+#define CIDXTID_CID_MSK (0xF)
+#define CIDXTID_TID_POS (4)
+#define CIDXTID_TID_LEN (4)
+#define CIDXTID_TID_MSK (0xF0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+#define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+#define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+#define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
struct wmi_vring_cfg {
struct wmi_sw_ring_cfg tx_sw_ring;
- u8 ringid; /* 0-23 vrings */
-
+ /* 0-23 vrings */
+ u8 ringid;
u8 cidxtid;
-
u8 encap_trans_type;
- u8 ds_cfg; /* 802.3 DS cfg */
+ /* 802.3 DS cfg */
+ u8 ds_cfg;
u8 nwifi_ds_trans_type;
-
- #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
- #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
- #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
- #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
- #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
- #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
u8 mac_ctrl;
-
- #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
- #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
- #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
u8 to_resolution;
u8 agg_max_wsize;
struct wmi_vring_cfg_schd schd_params;
} __packed;
enum wmi_vring_cfg_cmd_action {
- WMI_VRING_CMD_ADD = 0,
- WMI_VRING_CMD_MODIFY = 1,
- WMI_VRING_CMD_DELETE = 2,
+ WMI_VRING_CMD_ADD = 0x00,
+ WMI_VRING_CMD_MODIFY = 0x01,
+ WMI_VRING_CMD_DELETE = 0x02,
};
+/* WMI_VRING_CFG_CMDID */
struct wmi_vring_cfg_cmd {
__le32 action;
struct wmi_vring_cfg vring_cfg;
} __packed;
-/*
- * WMI_BCAST_VRING_CFG_CMDID
- */
struct wmi_bcast_vring_cfg {
struct wmi_sw_ring_cfg tx_sw_ring;
- u8 ringid; /* 0-23 vrings */
+ /* 0-23 vrings */
+ u8 ringid;
u8 encap_trans_type;
- u8 ds_cfg; /* 802.3 DS cfg */
+ /* 802.3 DS cfg */
+ u8 ds_cfg;
u8 nwifi_ds_trans_type;
} __packed;
+/* WMI_BCAST_VRING_CFG_CMDID */
struct wmi_bcast_vring_cfg_cmd {
__le32 action;
struct wmi_bcast_vring_cfg vring_cfg;
} __packed;
-/*
- * WMI_VRING_BA_EN_CMDID
- */
+/* WMI_VRING_BA_EN_CMDID */
struct wmi_vring_ba_en_cmd {
u8 ringid;
u8 agg_max_wsize;
__le16 ba_timeout;
u8 amsdu;
+ u8 reserved[3];
} __packed;
-/*
- * WMI_VRING_BA_DIS_CMDID
- */
+/* WMI_VRING_BA_DIS_CMDID */
struct wmi_vring_ba_dis_cmd {
u8 ringid;
u8 reserved;
__le16 reason;
} __packed;
-/*
- * WMI_NOTIFY_REQ_CMDID
- */
+/* WMI_NOTIFY_REQ_CMDID */
struct wmi_notify_req_cmd {
u8 cid;
u8 year;
@@ -654,102 +623,100 @@ struct wmi_notify_req_cmd {
u8 miliseconds;
} __packed;
-/*
- * WMI_CFG_RX_CHAIN_CMDID
- */
+/* WMI_CFG_RX_CHAIN_CMDID */
enum wmi_sniffer_cfg_mode {
- WMI_SNIFFER_OFF = 0,
- WMI_SNIFFER_ON = 1,
+ WMI_SNIFFER_OFF = 0x00,
+ WMI_SNIFFER_ON = 0x01,
};
enum wmi_sniffer_cfg_phy_info_mode {
- WMI_SNIFFER_PHY_INFO_DISABLED = 0,
- WMI_SNIFFER_PHY_INFO_ENABLED = 1,
+ WMI_SNIFFER_PHY_INFO_DISABLED = 0x00,
+ WMI_SNIFFER_PHY_INFO_ENABLED = 0x01,
};
enum wmi_sniffer_cfg_phy_support {
- WMI_SNIFFER_CP = 0,
- WMI_SNIFFER_DP = 1,
- WMI_SNIFFER_BOTH_PHYS = 2,
+ WMI_SNIFFER_CP = 0x00,
+ WMI_SNIFFER_DP = 0x01,
+ WMI_SNIFFER_BOTH_PHYS = 0x02,
};
+/* wmi_sniffer_cfg */
struct wmi_sniffer_cfg {
- __le32 mode; /* enum wmi_sniffer_cfg_mode */
- __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */
- __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */
+ /* enum wmi_sniffer_cfg_mode */
+ __le32 mode;
+ /* enum wmi_sniffer_cfg_phy_info_mode */
+ __le32 phy_info_mode;
+ /* enum wmi_sniffer_cfg_phy_support */
+ __le32 phy_support;
u8 channel;
u8 reserved[3];
} __packed;
enum wmi_cfg_rx_chain_cmd_action {
- WMI_RX_CHAIN_ADD = 0,
- WMI_RX_CHAIN_DEL = 1,
+ WMI_RX_CHAIN_ADD = 0x00,
+ WMI_RX_CHAIN_DEL = 0x01,
};
enum wmi_cfg_rx_chain_cmd_decap_trans_type {
- WMI_DECAP_TYPE_802_3 = 0,
- WMI_DECAP_TYPE_NATIVE_WIFI = 1,
- WMI_DECAP_TYPE_NONE = 2,
+ WMI_DECAP_TYPE_802_3 = 0x00,
+ WMI_DECAP_TYPE_NATIVE_WIFI = 0x01,
+ WMI_DECAP_TYPE_NONE = 0x02,
};
enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
- WMI_NWIFI_RX_TRANS_MODE_NO = 0,
- WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1,
- WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
+ WMI_NWIFI_RX_TRANS_MODE_NO = 0x00,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 0x01,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 0x02,
};
enum wmi_cfg_rx_chain_cmd_reorder_type {
- WMI_RX_HW_REORDER = 0,
- WMI_RX_SW_REORDER = 1,
+ WMI_RX_HW_REORDER = 0x00,
+ WMI_RX_SW_REORDER = 0x01,
};
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+
+/* WMI_CFG_RX_CHAIN_CMDID */
struct wmi_cfg_rx_chain_cmd {
__le32 action;
struct wmi_sw_ring_cfg rx_sw_ring;
u8 mid;
u8 decap_trans_type;
-
- #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
- #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
- #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
- #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
- #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
- #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
u8 l2_802_3_offload_ctrl;
-
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
u8 l2_nwifi_offload_ctrl;
-
u8 vlan_id;
u8 nwifi_ds_trans_type;
-
- #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
- #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
- #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
- #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
- #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
- #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
u8 l3_l4_ctrl;
-
- #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
- #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
- #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
- #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
- #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
- #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
- #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
- #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
- #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
- #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
- #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
- #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
u8 ring_ctrl;
-
__le16 prefetch_thrsh;
__le16 wb_thrsh;
__le32 itr_value;
@@ -757,31 +724,27 @@ struct wmi_cfg_rx_chain_cmd {
u8 reorder_type;
u8 reserved;
struct wmi_sniffer_cfg sniffer_cfg;
+ __le16 max_rx_pl_per_desc;
} __packed;
-/*
- * WMI_RCP_ADDBA_RESP_CMDID
- */
+/* WMI_RCP_ADDBA_RESP_CMDID */
struct wmi_rcp_addba_resp_cmd {
u8 cidxtid;
u8 dialog_token;
__le16 status_code;
- __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
+ /* ieee80211_ba_parameterset field to send */
+ __le16 ba_param_set;
__le16 ba_timeout;
} __packed;
-/*
- * WMI_RCP_DELBA_CMDID
- */
+/* WMI_RCP_DELBA_CMDID */
struct wmi_rcp_delba_cmd {
u8 cidxtid;
u8 reserved;
__le16 reason;
} __packed;
-/*
- * WMI_RCP_ADDBA_REQ_CMDID
- */
+/* WMI_RCP_ADDBA_REQ_CMDID */
struct wmi_rcp_addba_req_cmd {
u8 cidxtid;
u8 dialog_token;
@@ -792,32 +755,16 @@ struct wmi_rcp_addba_req_cmd {
__le16 ba_seq_ctrl;
} __packed;
-/*
- * WMI_SET_MAC_ADDRESS_CMDID
- */
+/* WMI_SET_MAC_ADDRESS_CMDID */
struct wmi_set_mac_address_cmd {
u8 mac[WMI_MAC_LEN];
u8 reserved[2];
} __packed;
-/*
-* WMI_EAPOL_TX_CMDID
-*/
-struct wmi_eapol_tx_cmd {
- u8 dst_mac[WMI_MAC_LEN];
- __le16 eapol_len;
- u8 eapol[0];
-} __packed;
-
-/*
- * WMI_ECHO_CMDID
- *
+/* WMI_ECHO_CMDID
* Check FW is alive
- *
* WMI_DEEP_ECHO_CMDID
- *
* Check FW and ucode are alive
- *
* Returned event: WMI_ECHO_RSP_EVENTID
* same event for both commands
*/
@@ -825,70 +772,79 @@ struct wmi_echo_cmd {
__le32 value;
} __packed;
-/*
- * WMI_TEMP_SENSE_CMDID
+/* WMI_OTP_READ_CMDID */
+struct wmi_otp_read_cmd {
+ __le32 addr;
+ __le32 size;
+ __le32 values;
+} __packed;
+
+/* WMI_OTP_WRITE_CMDID */
+struct wmi_otp_write_cmd {
+ __le32 addr;
+ __le32 size;
+ __le32 values;
+} __packed;
+
+/* WMI_TEMP_SENSE_CMDID
*
* Measure MAC and radio temperatures
+ *
+ * Possible modes for temperature measurement
*/
-
-/* Possible modes for temperature measurement */
enum wmi_temperature_measure_mode {
- TEMPERATURE_USE_OLD_VALUE = 0x1,
- TEMPERATURE_MEASURE_NOW = 0x2,
+ TEMPERATURE_USE_OLD_VALUE = 0x01,
+ TEMPERATURE_MEASURE_NOW = 0x02,
};
+/* WMI_TEMP_SENSE_CMDID */
struct wmi_temp_sense_cmd {
__le32 measure_baseband_en;
__le32 measure_rf_en;
__le32 measure_mode;
} __packed;
-/*
- * WMI_PMC_CMDID
- */
-enum wmi_pmc_op_e {
- WMI_PMC_ALLOCATE = 0,
- WMI_PMC_RELEASE = 1,
+enum wmi_pmc_op {
+ WMI_PMC_ALLOCATE = 0x00,
+ WMI_PMC_RELEASE = 0x01,
};
+/* WMI_PMC_CMDID */
struct wmi_pmc_cmd {
- u8 op; /* enum wmi_pmc_cmd_op_type */
+ /* enum wmi_pmc_cmd_op_type */
+ u8 op;
u8 reserved;
__le16 ring_size;
__le64 mem_base;
} __packed;
-/*
- * WMI Events
- */
-
-/*
+/* WMI Events
* List of Events (target to host)
*/
enum wmi_event_id {
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
- WMI_SCAN_COMPLETE_EVENTID = 0x100a,
- WMI_REPORT_STATISTICS_EVENTID = 0x100b,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100A,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
WMI_FW_READY_EVENTID = 0x1801,
- WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
WMI_ECHO_RSP_EVENTID = 0x1803,
- WMI_FS_TUNE_DONE_EVENTID = 0x180a,
- WMI_CORR_MEASURE_EVENTID = 0x180b,
- WMI_READ_RSSI_EVENTID = 0x180c,
- WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
- WMI_DC_CALIB_DONE_EVENTID = 0x180f,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180A,
+ WMI_CORR_MEASURE_EVENTID = 0x180B,
+ WMI_READ_RSSI_EVENTID = 0x180C,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180F,
WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
- WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
- WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
- WMI_RF_RX_TEST_DONE_EVENTID = 0x181e,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
+ WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
WMI_BA_STATUS_EVENTID = 0x1823,
@@ -896,15 +852,13 @@ enum wmi_event_id {
WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
- WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
- WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
-
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
WMI_READ_MAC_RXQ_EVENTID = 0x1830,
WMI_READ_MAC_TXQ_EVENTID = 0x1831,
WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
-
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
@@ -914,20 +868,19 @@ enum wmi_event_id {
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
-
+ WMI_OTP_READ_RESULT_EVENTID = 0x1856,
+ WMI_LED_CFG_DONE_EVENTID = 0x1858,
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
-
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
WMI_VRING_EN_EVENTID = 0x1865,
-
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
- /*P2P*/
+ /* P2P */
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
WMI_PORT_ALLOCATED_EVENTID = 0x1911,
WMI_PORT_DELETED_EVENTID = 0x1912,
@@ -937,49 +890,42 @@ enum wmi_event_id {
WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
WMI_PCP_STARTED_EVENTID = 0x1918,
WMI_PCP_STOPPED_EVENTID = 0x1919,
- WMI_PCP_FACTOR_EVENTID = 0x191a,
+ WMI_PCP_FACTOR_EVENTID = 0x191A,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
WMI_FW_VER_EVENTID = 0x9004,
+ WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
};
-/*
- * Events data structures
- */
-
+/* Events data structures */
enum wmi_fw_status {
- WMI_FW_STATUS_SUCCESS,
- WMI_FW_STATUS_FAILURE,
+ WMI_FW_STATUS_SUCCESS = 0x00,
+ WMI_FW_STATUS_FAILURE = 0x01,
};
-/*
- * WMI_RF_MGMT_STATUS_EVENTID
- */
+/* WMI_RF_MGMT_STATUS_EVENTID */
enum wmi_rf_status {
- WMI_RF_ENABLED = 0,
- WMI_RF_DISABLED_HW = 1,
- WMI_RF_DISABLED_SW = 2,
- WMI_RF_DISABLED_HW_SW = 3,
+ WMI_RF_ENABLED = 0x00,
+ WMI_RF_DISABLED_HW = 0x01,
+ WMI_RF_DISABLED_SW = 0x02,
+ WMI_RF_DISABLED_HW_SW = 0x03,
};
+/* WMI_RF_MGMT_STATUS_EVENTID */
struct wmi_rf_mgmt_status_event {
__le32 rf_status;
} __packed;
-/*
- * WMI_THERMAL_THROTTLING_STATUS_EVENTID
- */
+/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */
struct wmi_thermal_throttling_status_event {
__le32 time_on_usec;
__le32 time_off_usec;
__le32 max_txop_length_usec;
} __packed;
-/*
- * WMI_GET_STATUS_DONE_EVENTID
- */
+/* WMI_GET_STATUS_DONE_EVENTID */
struct wmi_get_status_done_event {
__le32 is_associated;
u8 cid;
@@ -995,9 +941,7 @@ struct wmi_get_status_done_event {
__le32 is_secured;
} __packed;
-/*
- * WMI_FW_VER_EVENTID
- */
+/* WMI_FW_VER_EVENTID */
struct wmi_fw_ver_event {
u8 major;
u8 minor;
@@ -1005,9 +949,7 @@ struct wmi_fw_ver_event {
__le16 build;
} __packed;
-/*
-* WMI_MAC_ADDR_RESP_EVENTID
-*/
+/* WMI_MAC_ADDR_RESP_EVENTID */
struct wmi_mac_addr_resp_event {
u8 mac[WMI_MAC_LEN];
u8 auth_mode;
@@ -1015,42 +957,38 @@ struct wmi_mac_addr_resp_event {
__le32 offload_mode;
} __packed;
-/*
-* WMI_EAPOL_RX_EVENTID
-*/
+/* WMI_EAPOL_RX_EVENTID */
struct wmi_eapol_rx_event {
u8 src_mac[WMI_MAC_LEN];
__le16 eapol_len;
u8 eapol[0];
} __packed;
-/*
-* WMI_READY_EVENTID
-*/
+/* WMI_READY_EVENTID */
enum wmi_phy_capability {
- WMI_11A_CAPABILITY = 1,
- WMI_11G_CAPABILITY = 2,
- WMI_11AG_CAPABILITY = 3,
- WMI_11NA_CAPABILITY = 4,
- WMI_11NG_CAPABILITY = 5,
- WMI_11NAG_CAPABILITY = 6,
- WMI_11AD_CAPABILITY = 7,
- WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+ WMI_11A_CAPABILITY = 0x01,
+ WMI_11G_CAPABILITY = 0x02,
+ WMI_11AG_CAPABILITY = 0x03,
+ WMI_11NA_CAPABILITY = 0x04,
+ WMI_11NG_CAPABILITY = 0x05,
+ WMI_11NAG_CAPABILITY = 0x06,
+ WMI_11AD_CAPABILITY = 0x07,
+ WMI_11N_CAPABILITY_OFFSET = 0x03,
};
struct wmi_ready_event {
__le32 sw_version;
__le32 abi_version;
u8 mac[WMI_MAC_LEN];
- u8 phy_capability; /* enum wmi_phy_capability */
+ /* enum wmi_phy_capability */
+ u8 phy_capability;
u8 numof_additional_mids;
} __packed;
-/*
- * WMI_NOTIFY_REQ_DONE_EVENTID
- */
+/* WMI_NOTIFY_REQ_DONE_EVENTID */
struct wmi_notify_req_done_event {
- __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */
+ /* beamforming status, 0: fail; 1: OK; 2: retrying */
+ __le32 status;
__le64 tsf;
__le32 snr_val;
__le32 tx_tpt;
@@ -1066,9 +1004,7 @@ struct wmi_notify_req_done_event {
u8 reserved[3];
} __packed;
-/*
- * WMI_CONNECT_EVENTID
- */
+/* WMI_CONNECT_EVENTID */
struct wmi_connect_event {
u8 channel;
u8 reserved0;
@@ -1082,68 +1018,103 @@ struct wmi_connect_event {
u8 assoc_resp_len;
u8 cid;
u8 reserved2[3];
+ /* not in use */
u8 assoc_info[0];
} __packed;
-/*
- * WMI_DISCONNECT_EVENTID
- */
+/* WMI_DISCONNECT_EVENTID */
enum wmi_disconnect_reason {
- WMI_DIS_REASON_NO_NETWORK_AVAIL = 1,
- WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */
- WMI_DIS_REASON_DISCONNECT_CMD = 3,
- WMI_DIS_REASON_BSS_DISCONNECTED = 4,
- WMI_DIS_REASON_AUTH_FAILED = 5,
- WMI_DIS_REASON_ASSOC_FAILED = 6,
- WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7,
- WMI_DIS_REASON_CSERV_DISCONNECT = 8,
- WMI_DIS_REASON_INVALID_PROFILE = 10,
- WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11,
- WMI_DIS_REASON_PROFILE_MISMATCH = 12,
- WMI_DIS_REASON_CONNECTION_EVICTED = 13,
- WMI_DIS_REASON_IBSS_MERGE = 14,
+ WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01,
+ /* bmiss */
+ WMI_DIS_REASON_LOST_LINK = 0x02,
+ WMI_DIS_REASON_DISCONNECT_CMD = 0x03,
+ WMI_DIS_REASON_BSS_DISCONNECTED = 0x04,
+ WMI_DIS_REASON_AUTH_FAILED = 0x05,
+ WMI_DIS_REASON_ASSOC_FAILED = 0x06,
+ WMI_DIS_REASON_NO_RESOURCES_AVAIL = 0x07,
+ WMI_DIS_REASON_CSERV_DISCONNECT = 0x08,
+ WMI_DIS_REASON_INVALID_PROFILE = 0x0A,
+ WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 0x0B,
+ WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C,
+ WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D,
+ WMI_DIS_REASON_IBSS_MERGE = 0x0E,
};
struct wmi_disconnect_event {
- __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
- u8 bssid[WMI_MAC_LEN]; /* set if known */
- u8 disconnect_reason; /* see wmi_disconnect_reason */
- u8 assoc_resp_len; /* not used */
- u8 assoc_info[0]; /* not used */
+ /* reason code, see 802.11 spec. */
+ __le16 protocol_reason_status;
+ /* set if known */
+ u8 bssid[WMI_MAC_LEN];
+ /* see enum wmi_disconnect_reason */
+ u8 disconnect_reason;
+ /* last assoc req may passed to host - not in used */
+ u8 assoc_resp_len;
+ /* last assoc req may passed to host - not in used */
+ u8 assoc_info[0];
} __packed;
-/*
- * WMI_SCAN_COMPLETE_EVENTID
- */
+/* WMI_SCAN_COMPLETE_EVENTID */
enum scan_status {
- WMI_SCAN_SUCCESS = 0,
- WMI_SCAN_FAILED = 1,
- WMI_SCAN_ABORTED = 2,
- WMI_SCAN_REJECTED = 3,
+ WMI_SCAN_SUCCESS = 0x00,
+ WMI_SCAN_FAILED = 0x01,
+ WMI_SCAN_ABORTED = 0x02,
+ WMI_SCAN_REJECTED = 0x03,
+ WMI_SCAN_ABORT_REJECTED = 0x04,
};
struct wmi_scan_complete_event {
- __le32 status; /* scan_status */
+ /* enum scan_status */
+ __le32 status;
} __packed;
-/*
- * WMI_BA_STATUS_EVENTID
- */
+/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
+enum wmi_acs_info_bitmask {
+ WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01,
+ WMI_ACS_INFO_BITMASK_BUSY_TIME = 0x02,
+ WMI_ACS_INFO_BITMASK_TX_TIME = 0x04,
+ WMI_ACS_INFO_BITMASK_RX_TIME = 0x08,
+ WMI_ACS_INFO_BITMASK_NOISE = 0x10,
+};
+
+struct scan_acs_info {
+ u8 channel;
+ u8 beacon_found;
+ /* msec */
+ __le16 busy_time;
+ __le16 tx_time;
+ __le16 rx_time;
+ u8 noise;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_acs_passive_scan_complete_event {
+ __le32 dwell_time;
+ /* valid fields within channel info according to
+ * their appearance in struct order
+ */
+ __le16 filled;
+ u8 num_scanned_channels;
+ u8 reserved;
+ struct scan_acs_info scan_info_list[0];
+} __packed;
+
+/* WMI_BA_STATUS_EVENTID */
enum wmi_vring_ba_status {
- WMI_BA_AGREED = 0,
- WMI_BA_NON_AGREED = 1,
+ WMI_BA_AGREED = 0x00,
+ WMI_BA_NON_AGREED = 0x01,
/* BA_EN in middle of teardown flow */
- WMI_BA_TD_WIP = 2,
+ WMI_BA_TD_WIP = 0x02,
/* BA_DIS or BA_EN in middle of BA SETUP flow */
- WMI_BA_SETUP_WIP = 3,
+ WMI_BA_SETUP_WIP = 0x03,
/* BA_EN when the BA session is already active */
- WMI_BA_SESSION_ACTIVE = 4,
+ WMI_BA_SESSION_ACTIVE = 0x04,
/* BA_DIS when the BA session is not active */
- WMI_BA_SESSION_NOT_ACTIVE = 5,
+ WMI_BA_SESSION_NOT_ACTIVE = 0x05,
};
-struct wmi_vring_ba_status_event {
- __le16 status; /* enum wmi_vring_ba_status */
+struct wmi_ba_status_event {
+ /* enum wmi_vring_ba_status */
+ __le16 status;
u8 reserved[2];
u8 ringid;
u8 agg_wsize;
@@ -1151,18 +1122,14 @@ struct wmi_vring_ba_status_event {
u8 amsdu;
} __packed;
-/*
- * WMI_DELBA_EVENTID
- */
+/* WMI_DELBA_EVENTID */
struct wmi_delba_event {
u8 cidxtid;
u8 from_initiator;
__le16 reason;
} __packed;
-/*
- * WMI_VRING_CFG_DONE_EVENTID
- */
+/* WMI_VRING_CFG_DONE_EVENTID */
struct wmi_vring_cfg_done_event {
u8 ringid;
u8 status;
@@ -1170,174 +1137,151 @@ struct wmi_vring_cfg_done_event {
__le32 tx_vring_tail_ptr;
} __packed;
-/*
- * WMI_RCP_ADDBA_RESP_SENT_EVENTID
- */
+/* WMI_RCP_ADDBA_RESP_SENT_EVENTID */
struct wmi_rcp_addba_resp_sent_event {
u8 cidxtid;
u8 reserved;
__le16 status;
} __packed;
-/*
- * WMI_RCP_ADDBA_REQ_EVENTID
- */
+/* WMI_RCP_ADDBA_REQ_EVENTID */
struct wmi_rcp_addba_req_event {
u8 cidxtid;
u8 dialog_token;
- __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
+ /* ieee80211_ba_parameterset as it received */
+ __le16 ba_param_set;
__le16 ba_timeout;
- __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
} __packed;
-/*
- * WMI_CFG_RX_CHAIN_DONE_EVENTID
- */
+/* WMI_CFG_RX_CHAIN_DONE_EVENTID */
enum wmi_cfg_rx_chain_done_event_status {
- WMI_CFG_RX_CHAIN_SUCCESS = 1,
+ WMI_CFG_RX_CHAIN_SUCCESS = 0x01,
};
struct wmi_cfg_rx_chain_done_event {
- __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */
+ /* V-Ring Tail pointer */
+ __le32 rx_ring_tail_ptr;
__le32 status;
} __packed;
-/*
- * WMI_WBE_LINK_DOWN_EVENTID
- */
+/* WMI_WBE_LINK_DOWN_EVENTID */
enum wmi_wbe_link_down_event_reason {
- WMI_WBE_REASON_USER_REQUEST = 0,
- WMI_WBE_REASON_RX_DISASSOC = 1,
- WMI_WBE_REASON_BAD_PHY_LINK = 2,
+ WMI_WBE_REASON_USER_REQUEST = 0x00,
+ WMI_WBE_REASON_RX_DISASSOC = 0x01,
+ WMI_WBE_REASON_BAD_PHY_LINK = 0x02,
};
+/* WMI_WBE_LINK_DOWN_EVENTID */
struct wmi_wbe_link_down_event {
u8 cid;
u8 reserved[3];
__le32 reason;
} __packed;
-/*
- * WMI_DATA_PORT_OPEN_EVENTID
- */
+/* WMI_DATA_PORT_OPEN_EVENTID */
struct wmi_data_port_open_event {
u8 cid;
u8 reserved[3];
} __packed;
-/*
- * WMI_VRING_EN_EVENTID
- */
+/* WMI_VRING_EN_EVENTID */
struct wmi_vring_en_event {
u8 vring_index;
u8 reserved[3];
} __packed;
-/*
- * WMI_GET_PCP_CHANNEL_EVENTID
- */
+/* WMI_GET_PCP_CHANNEL_EVENTID */
struct wmi_get_pcp_channel_event {
u8 channel;
u8 reserved[3];
} __packed;
-/*
- * WMI_P2P_CFG_DONE_EVENTID
- */
+/* WMI_P2P_CFG_DONE_EVENTID */
struct wmi_p2p_cfg_done_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
-* WMI_PORT_ALLOCATED_EVENTID
-*/
+/* WMI_PORT_ALLOCATED_EVENTID */
struct wmi_port_allocated_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
-* WMI_PORT_DELETED_EVENTID
-*/
+/* WMI_PORT_DELETED_EVENTID */
struct wmi_port_deleted_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_LISTEN_STARTED_EVENTID
- */
+/* WMI_LISTEN_STARTED_EVENTID */
struct wmi_listen_started_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_SEARCH_STARTED_EVENTID
- */
+/* WMI_SEARCH_STARTED_EVENTID */
struct wmi_search_started_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_PCP_STARTED_EVENTID
- */
+/* WMI_PCP_STARTED_EVENTID */
struct wmi_pcp_started_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_PCP_FACTOR_EVENTID
- */
+/* WMI_PCP_FACTOR_EVENTID */
struct wmi_pcp_factor_event {
__le32 pcp_factor;
} __packed;
-/*
- * WMI_SW_TX_COMPLETE_EVENTID
- */
enum wmi_sw_tx_status {
- WMI_TX_SW_STATUS_SUCCESS = 0,
- WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1,
- WMI_TX_SW_STATUS_FAILED_TX = 2,
+ WMI_TX_SW_STATUS_SUCCESS = 0x00,
+ WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 0x01,
+ WMI_TX_SW_STATUS_FAILED_TX = 0x02,
};
+/* WMI_SW_TX_COMPLETE_EVENTID */
struct wmi_sw_tx_complete_event {
- u8 status; /* enum wmi_sw_tx_status */
+ /* enum wmi_sw_tx_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_CORR_MEASURE_EVENTID
- */
+/* WMI_CORR_MEASURE_EVENTID */
struct wmi_corr_measure_event {
- s32 i;
- s32 q;
- s32 image_i;
- s32 image_q;
+ /* signed */
+ __le32 i;
+ /* signed */
+ __le32 q;
+ /* signed */
+ __le32 image_i;
+ /* signed */
+ __le32 image_q;
} __packed;
-/*
- * WMI_READ_RSSI_EVENTID
- */
+/* WMI_READ_RSSI_EVENTID */
struct wmi_read_rssi_event {
__le32 ina_rssi_adc_dbm;
} __packed;
-/*
- * WMI_GET_SSID_EVENTID
- */
+/* WMI_GET_SSID_EVENTID */
struct wmi_get_ssid_event {
__le32 ssid_len;
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/*
- * WMI_RX_MGMT_PACKET_EVENTID
- */
+/* wmi_rx_mgmt_info */
struct wmi_rx_mgmt_info {
u8 mcs;
s8 snr;
@@ -1346,39 +1290,124 @@ struct wmi_rx_mgmt_info {
__le16 stype;
__le16 status;
__le32 len;
+ /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */
u8 qid;
+ /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */
u8 mid;
u8 cid;
- u8 channel; /* From Radio MNGR */
+ /* From Radio MNGR */
+ u8 channel;
} __packed;
-/*
- * WMI_TX_MGMT_PACKET_EVENTID
- */
+/* wmi_otp_read_write_cmd */
+struct wmi_otp_read_write_cmd {
+ __le32 addr;
+ __le32 size;
+ u8 values[0];
+} __packed;
+
+/* WMI_OTP_READ_RESULT_EVENTID */
+struct wmi_otp_read_result_event {
+ u8 payload[0];
+} __packed;
+
+/* WMI_TX_MGMT_PACKET_EVENTID */
struct wmi_tx_mgmt_packet_event {
u8 payload[0];
} __packed;
+/* WMI_RX_MGMT_PACKET_EVENTID */
struct wmi_rx_mgmt_packet_event {
struct wmi_rx_mgmt_info info;
u8 payload[0];
} __packed;
-/*
- * WMI_ECHO_RSP_EVENTID
- */
-struct wmi_echo_event {
+/* WMI_ECHO_RSP_EVENTID */
+struct wmi_echo_rsp_event {
__le32 echoed_value;
} __packed;
-/*
- * WMI_TEMP_SENSE_DONE_EVENTID
+/* WMI_TEMP_SENSE_DONE_EVENTID
*
* Measure MAC and radio temperatures
*/
struct wmi_temp_sense_done_event {
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000)
+ */
__le32 baseband_t1000;
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000)
+ */
__le32 rf_t1000;
} __packed;
+#define WMI_SCAN_DWELL_TIME_MS (100)
+#define WMI_SURVEY_TIMEOUT_MS (10000)
+
+enum wmi_hidden_ssid {
+ WMI_HIDDEN_SSID_DISABLED = 0x00,
+ WMI_HIDDEN_SSID_SEND_EMPTY = 0x10,
+ WMI_HIDDEN_SSID_CLEAR = 0xFE,
+};
+
+/* WMI_LED_CFG_CMDID
+ *
+ * Configure LED On\Off\Blinking operation
+ *
+ * Returned events:
+ * - WMI_LED_CFG_DONE_EVENTID
+ */
+enum led_mode {
+ LED_DISABLE = 0x00,
+ LED_ENABLE = 0x01,
+};
+
+/* The names of the led as
+ * described on HW schemes.
+ */
+enum wmi_led_id {
+ WMI_LED_WLAN = 0x00,
+ WMI_LED_WPAN = 0x01,
+ WMI_LED_WWAN = 0x02,
+};
+
+/* Led polarity mode. */
+enum wmi_led_polarity {
+ LED_POLARITY_HIGH_ACTIVE = 0x00,
+ LED_POLARITY_LOW_ACTIVE = 0x01,
+};
+
+/* Combination of on and off
+ * creates the blinking period
+ */
+struct wmi_led_blink_mode {
+ __le32 blink_on;
+ __le32 blink_off;
+} __packed;
+
+/* WMI_LED_CFG_CMDID */
+struct wmi_led_cfg_cmd {
+ /* enum led_mode_e */
+ u8 led_mode;
+ /* enum wmi_led_id_e */
+ u8 id;
+ /* slow speed blinking combination */
+ struct wmi_led_blink_mode slow_blink_cfg;
+ /* medium speed blinking combination */
+ struct wmi_led_blink_mode medium_blink_cfg;
+ /* high speed blinking combination */
+ struct wmi_led_blink_mode fast_blink_cfg;
+ /* polarity of the led */
+ u8 led_polarity;
+ /* reserved */
+ u8 reserved;
+} __packed;
+
+/* WMI_LED_CFG_DONE_EVENTID */
+struct wmi_led_cfg_done_event {
+ /* led config status */
+ __le32 status;
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index bb1ed0ebd..488c08c30 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -1540,7 +1540,7 @@ static inline int at76_guess_freq(struct at76_priv *priv)
channel = el[2];
exit:
- return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
}
static void at76_rx_tasklet(unsigned long param)
@@ -1583,7 +1583,7 @@ static void at76_rx_tasklet(unsigned long param)
rx_status.signal = buf->rssi;
rx_status.flag |= RX_FLAG_DECRYPTED;
rx_status.flag |= RX_FLAG_IV_STRIPPED;
- rx_status.band = IEEE80211_BAND_2GHZ;
+ rx_status.band = NL80211_BAND_2GHZ;
rx_status.freq = at76_guess_freq(priv);
at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
@@ -2352,7 +2352,7 @@ static int at76_init_new_device(struct at76_priv *priv,
priv->hw->wiphy->max_scan_ssids = 1;
priv->hw->wiphy->max_scan_ie_len = 0;
priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &at76_supported_band;
ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
priv->hw->max_signal = 100;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index ae6dc6fac..b6107aea6 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -2260,7 +2260,7 @@ static int atmel_set_freq(struct net_device *dev,
fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
- if ((fwrq->m > 1000) || (fwrq->e > 0))
+ if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
rc = -EOPNOTSUPP;
else {
int channel = fwrq->m;
@@ -2419,7 +2419,7 @@ static int atmel_get_range(struct net_device *dev,
/* Values in MHz -> * 10^5 * 10 */
range->freq[k].m = 100000 *
- ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(i, NL80211_BAND_2GHZ);
range->freq[k++].e = 1;
}
range->num_frequency = k;
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 036552439..d7d42f0b8 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -992,9 +992,9 @@ static inline int b43_is_mode(struct b43_wl *wl, int type)
/**
* b43_current_band - Returns the currently used band.
- * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ.
+ * Returns one of NL80211_BAND_2GHZ and NL80211_BAND_5GHZ.
*/
-static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
+static inline enum nl80211_band b43_current_band(struct b43_wl *wl)
{
return wl->hw->conf.chandef.chan->band;
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 97774b338..f8c7aa9db 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -181,7 +181,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
#define b43_g_ratetable_size 12
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -210,7 +210,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
#undef CHAN2G
#define CHAN4G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 4000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -218,7 +218,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
.max_power = 30, \
}
#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -317,7 +317,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
#undef CHAN5G
static struct ieee80211_supported_band b43_band_5GHz_nphy = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = b43_5ghz_nphy_chantable,
.n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable),
.bitrates = b43_a_ratetable,
@@ -325,7 +325,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy = {
};
static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = b43_5ghz_nphy_chantable_limited,
.n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable_limited),
.bitrates = b43_a_ratetable,
@@ -333,7 +333,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
};
static struct ieee80211_supported_band b43_band_5GHz_aphy = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = b43_5ghz_aphy_chantable,
.n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable),
.bitrates = b43_a_ratetable,
@@ -341,7 +341,7 @@ static struct ieee80211_supported_band b43_band_5GHz_aphy = {
};
static struct ieee80211_supported_band b43_band_2GHz = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.channels = b43_2ghz_chantable,
.n_channels = ARRAY_SIZE(b43_2ghz_chantable),
.bitrates = b43_g_ratetable,
@@ -349,7 +349,7 @@ static struct ieee80211_supported_band b43_band_2GHz = {
};
static struct ieee80211_supported_band b43_band_2ghz_limited = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.channels = b43_2ghz_chantable,
.n_channels = b43_2ghz_chantable_limited_size,
.bitrates = b43_g_ratetable,
@@ -711,7 +711,7 @@ static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
{
/* slot_time is in usec. */
/* This test used to exit for all but a G PHY. */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
return;
b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
/* Shared memory location 0x0010 is the slot time and should be
@@ -3876,12 +3876,12 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static const char *band_to_string(enum ieee80211_band band)
+static const char *band_to_string(enum nl80211_band band)
{
switch (band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
return "5";
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
return "2.4";
default:
break;
@@ -3899,10 +3899,10 @@ static int b43_switch_band(struct b43_wldev *dev,
u32 tmp;
switch (chan->band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
gmode = false;
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
gmode = true;
break;
default:
@@ -5290,16 +5290,16 @@ static int b43_setup_bands(struct b43_wldev *dev,
phy->radio_rev == 9;
if (have_2ghz_phy)
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = limited_2g ?
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = limited_2g ?
&b43_band_2ghz_limited : &b43_band_2GHz;
if (dev->phy.type == B43_PHYTYPE_N) {
if (have_5ghz_phy)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = limited_5g ?
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = limited_5g ?
&b43_band_5GHz_nphy_limited :
&b43_band_5GHz_nphy;
} else {
if (have_5ghz_phy)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
}
dev->phy.supports_2ghz = have_2ghz_phy;
diff --git a/drivers/net/wireless/broadcom/b43/phy_ac.c b/drivers/net/wireless/broadcom/b43/phy_ac.c
index e75633d67..52f8abad8 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ac.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ac.c
@@ -61,7 +61,7 @@ static void b43_phy_ac_op_radio_write(struct b43_wldev *dev, u16 reg,
static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 11;
return 36;
}
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index ec2b9c577..85f2ca989 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -436,7 +436,7 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
* firmware from sending ghost packets.
*/
channelcookie = new_channel;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
channelcookie |= B43_SHM_SH_CHAN_5GHZ;
/* FIXME: set 40Mhz flag if required */
if (0)
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index bd6894596..718c90e81 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -568,7 +568,7 @@ static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
} else {
b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
for (i = 0; i < 3; i++)
b43_phy_write(dev, cmd_regs[i], 0x32);
}
@@ -643,7 +643,7 @@ static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev)
u16 freq = dev->phy.chandef->chan->center_freq;
int i, c;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
for (c = 0; c < 3; c++) {
target[c] = sprom->core_pwr_info[c].maxpwr_2g;
a1[c] = sprom->core_pwr_info[c].pa_2g[0];
@@ -777,7 +777,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
const struct b43_phy_ht_channeltab_e_phy *e,
struct ieee80211_channel *new_channel)
{
- if (new_channel->band == IEEE80211_BAND_5GHZ) {
+ if (new_channel->band == NL80211_BAND_5GHZ) {
/* Switch to 2 GHz for a moment to access B-PHY regs */
b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ);
@@ -805,7 +805,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
} else {
b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN,
B43_PHY_HT_CLASS_CTL_OFDM_EN);
- if (new_channel->band == IEEE80211_BAND_2GHZ)
+ if (new_channel->band == NL80211_BAND_2GHZ)
b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840);
}
@@ -916,7 +916,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
if (0) /* TODO: condition */
; /* TODO: PHY op on reg 0x217 */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
else
b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
@@ -1005,7 +1005,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
b43_phy_ht_classifier(dev, 0, 0);
b43_phy_ht_read_clip_detection(dev, clip_state);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_phy_ht_bphy_init(dev);
b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
@@ -1077,7 +1077,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if ((new_channel < 1) || (new_channel > 14))
return -EINVAL;
} else {
@@ -1089,7 +1089,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 11;
return 36;
}
diff --git a/drivers/net/wireless/broadcom/b43/phy_lcn.c b/drivers/net/wireless/broadcom/b43/phy_lcn.c
index 97461ccf3..63bd29f07 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lcn.c
@@ -108,7 +108,7 @@ static void b43_radio_2064_channel_setup(struct b43_wldev *dev)
/* wlc_radio_2064_init */
static void b43_radio_2064_init(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, 0x09c, 0x0020);
b43_radio_write(dev, 0x105, 0x0008);
} else {
@@ -535,7 +535,7 @@ static void b43_phy_lcn_tx_pwr_ctl_init(struct b43_wldev *dev)
b43_mac_suspend(dev);
if (!dev->phy.lcn->hw_pwr_ctl_capable) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
tx_gains.gm_gain = 4;
tx_gains.pga_gain = 12;
tx_gains.pad_gain = 12;
@@ -720,7 +720,7 @@ static int b43_phy_lcn_op_init(struct b43_wldev *dev)
else
B43_WARN_ON(1);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_phy_lcn_tx_pwr_ctl_init(dev);
b43_switch_channel(dev, dev->phy.channel);
@@ -779,7 +779,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if ((new_channel < 1) || (new_channel > 14))
return -EINVAL;
} else {
@@ -791,7 +791,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 1;
return 36;
}
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 058a9f232..6922cbb99 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -46,7 +46,7 @@ static inline u16 channel2freq_lp(u8 channel)
static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 1;
return 36;
}
@@ -91,7 +91,7 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
u32 ofdmpo;
int i;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
lpphy->tx_isolation_med_band = sprom->tri2g;
lpphy->bx_arch = sprom->bxa2g;
lpphy->rx_pwr_offset = sprom->rxpo2g;
@@ -174,7 +174,7 @@ static void lpphy_adjust_gain_table(struct b43_wldev *dev, u32 freq)
B43_WARN_ON(dev->phy.rev >= 2);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
isolation = lpphy->tx_isolation_med_band;
else if (freq <= 5320)
isolation = lpphy->tx_isolation_low_band;
@@ -238,7 +238,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB,
0xFF00, lpphy->rx_pwr_offset);
if ((sprom->boardflags_lo & B43_BFL_FEM) &&
- ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ ((b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
(sprom->boardflags_hi & B43_BFH_PAREF))) {
ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28);
ssb_pmu_set_ldo_paref(&bus->chipco, true);
@@ -280,7 +280,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
- } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ ||
+ } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ ||
(dev->dev->board_type == SSB_BOARD_BU4312) ||
(dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
@@ -326,7 +326,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
//FIXME the Broadcom driver caches & delays this HF write!
b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W);
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000);
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040);
b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400);
@@ -466,7 +466,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40);
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00);
b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6);
@@ -547,7 +547,7 @@ static void lpphy_2062_init(struct b43_wldev *dev)
b43_radio_write(dev, B2062_S_BG_CTL1,
(b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80);
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1);
else
b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1);
@@ -746,7 +746,7 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
lpphy->crs_sys_disable = false;
if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL,
0xFF1F, 0x60);
else
@@ -807,7 +807,7 @@ static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
if (dev->phy.rev >= 2) {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
}
@@ -823,7 +823,7 @@ static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
if (dev->phy.rev >= 2) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
}
@@ -951,7 +951,7 @@ static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain)
0xFBFF, ext_lna << 10);
b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain);
b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
tmp = (gain >> 2) & 0x3;
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
0xE7FF, tmp<<11);
@@ -1344,7 +1344,7 @@ static void lpphy_calibrate_rc(struct b43_wldev *dev)
if (dev->phy.rev >= 2) {
lpphy_rev2plus_rc_calib(dev);
} else if (!lpphy->rc_cap) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
lpphy_rev0_1_rc_calib(dev);
} else {
lpphy_set_rc_cap(dev);
@@ -1548,7 +1548,7 @@ static void lpphy_tx_pctl_init_sw(struct b43_wldev *dev)
{
struct lpphy_tx_gains gains;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
gains.gm = 4;
gains.pad = 12;
gains.pga = 12;
@@ -1902,7 +1902,7 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
lpphy_set_trsw_over(dev, tx, rx);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
0xFFF7, pa << 3);
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 9f0bcf3b8..a5557d706 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -105,9 +105,9 @@ enum n_rail_type {
static inline bool b43_nphy_ipa(struct b43_wldev *dev)
{
- enum ieee80211_band band = b43_current_band(dev->wl);
- return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
- (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
+ enum nl80211_band band = b43_current_band(dev->wl);
+ return ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
+ (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ));
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
@@ -357,7 +357,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
break;
case N_INTC_OVERRIDE_PA:
tmp = 0x0030;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
val = value << 5;
else
val = value << 4;
@@ -365,7 +365,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
b43_phy_set(dev, reg, 0x1000);
break;
case N_INTC_OVERRIDE_EXT_LNA_PU:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0001;
tmp2 = 0x0004;
val = value;
@@ -378,7 +378,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
b43_phy_mask(dev, reg, ~tmp2);
break;
case N_INTC_OVERRIDE_EXT_LNA_GAIN:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0002;
tmp2 = 0x0008;
val = value << 1;
@@ -465,7 +465,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
}
break;
case N_INTC_OVERRIDE_PA:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0020;
val = value << 5;
} else {
@@ -475,7 +475,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
b43_phy_maskset(dev, reg, ~tmp, val);
break;
case N_INTC_OVERRIDE_EXT_LNA_PU:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0001;
val = value;
} else {
@@ -485,7 +485,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
b43_phy_maskset(dev, reg, ~tmp, val);
break;
case N_INTC_OVERRIDE_EXT_LNA_GAIN:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0002;
val = value << 1;
} else {
@@ -600,7 +600,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 1);
if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
gain[0] = 6;
gain[1] = 6;
} else {
@@ -736,7 +736,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
switch (phy->radio_rev) {
case 0 ... 4:
case 6:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x3f);
b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f);
b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8);
@@ -751,7 +751,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
case 9: /* e.g. PHY rev 16 */
b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x20);
b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x18);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x38);
b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x0f);
@@ -775,7 +775,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
break;
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
u16 txmix2g_tune_boost_pu = 0;
u16 pad2g_tune_pus = 0;
@@ -1135,7 +1135,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
{
struct b43_phy *phy = &dev->phy;
struct ssb_sprom *sprom = dev->dev->bus_sprom;
- enum ieee80211_band band = b43_current_band(dev->wl);
+ enum nl80211_band band = b43_current_band(dev->wl);
u16 offset;
u8 i;
u16 bias, cbias;
@@ -1152,10 +1152,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
b43_chantab_radio_2056_upload(dev, e);
- b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+ b2056_upload_syn_pll_cp2(dev, band == NL80211_BAND_5GHZ);
if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
@@ -1168,21 +1168,21 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
}
}
if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
}
if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
}
- if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) {
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
if (dev->phy.rev >= 5) {
@@ -1244,7 +1244,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
}
b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
}
- } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+ } else if (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ) {
u16 freq = phy->chandef->chan->center_freq;
if (freq < 5100) {
paa_boost = 0xA;
@@ -1501,7 +1501,7 @@ static void b43_radio_init2055(struct b43_wldev *dev)
/* Follow wl, not specs. Do not force uploading all regs */
b2055_upload_inittab(dev, 0, 0);
} else {
- bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ;
+ bool ghz5 = b43_current_band(dev->wl) == NL80211_BAND_5GHZ;
b2055_upload_inittab(dev, ghz5, 0);
}
b43_radio_init2055_post(dev);
@@ -1785,7 +1785,7 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
b43_phy_maskset(dev, reg, 0xFFC3, 0);
if (rssi_type == N_RSSI_W1)
- val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
+ val = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 4 : 8;
else if (rssi_type == N_RSSI_W2)
val = 16;
else
@@ -1813,12 +1813,12 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
if (rssi_type != N_RSSI_IQ &&
rssi_type != N_RSSI_TBD) {
- enum ieee80211_band band =
+ enum nl80211_band band =
b43_current_band(dev->wl);
if (dev->phy.rev < 7) {
if (b43_nphy_ipa(dev))
- val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ val = (band == NL80211_BAND_5GHZ) ? 0xC : 0xE;
else
val = 0x11;
reg = (i == 0) ? B2056_TX0 : B2056_TX1;
@@ -2120,7 +2120,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1, 0, false);
b43_nphy_rf_ctl_override_rev7(dev, 0x80, 1, 0, false, 0);
b43_nphy_rf_ctl_override_rev7(dev, 0x40, 1, 0, false, 0);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_nphy_rf_ctl_override_rev7(dev, 0x20, 0, 0, false,
0);
b43_nphy_rf_ctl_override_rev7(dev, 0x10, 1, 0, false,
@@ -2136,7 +2136,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
} else {
@@ -2257,7 +2257,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
/* Store for future configuration */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
} else {
@@ -2289,7 +2289,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y);
/* Remember for which channel we store configuration */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
nphy->rssical_chanspec_2G.center_freq = phy->chandef->chan->center_freq;
else
nphy->rssical_chanspec_5G.center_freq = phy->chandef->chan->center_freq;
@@ -2336,7 +2336,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
b43_nphy_read_clip_detection(dev, clip_state);
b43_nphy_write_clip_detection(dev, clip_off);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
override = 0x140;
else
override = 0x110;
@@ -2629,7 +2629,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ &&
b43_is_40mhz(dev))
code = 4;
else
@@ -2688,7 +2688,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
}
@@ -2803,7 +2803,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
scap_val = b43_radio_read(dev, R2057_RCCAL_SCAP_VAL);
if (b43_nphy_ipa(dev)) {
- bool ghz2 = b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ;
+ bool ghz2 = b43_current_band(dev->wl) == NL80211_BAND_2GHZ;
switch (phy->radio_rev) {
case 5:
@@ -2831,7 +2831,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
bcap_val_11b[core] = bcap_val;
lpf_ofdm_20mhz[core] = 4;
lpf_11b[core] = 1;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
scap_val_11n_20[core] = 0xc;
bcap_val_11n_20[core] = 0xc;
scap_val_11n_40[core] = 0xa;
@@ -2982,7 +2982,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
conv = 0x7f;
filt = 0xee;
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
for (core = 0; core < 2; core++) {
if (core == 0) {
b43_radio_write(dev, 0x5F, bias);
@@ -2998,7 +2998,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
}
if (b43_nphy_ipa(dev)) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
phy->radio_rev == 6) {
for (core = 0; core < 2; core++) {
@@ -3221,7 +3221,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
ARRAY_SIZE(rx2tx_events));
}
- tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+ tmp16 = (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ?
0x2 : 0x9C40;
b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
@@ -3240,7 +3240,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
pdet_range = sprom->fem.ghz2.pdet_range;
else
pdet_range = sprom->fem.ghz5.pdet_range;
@@ -3249,7 +3249,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
switch (pdet_range) {
case 3:
if (!(dev->phy.rev >= 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
break;
/* FALL THROUGH */
case 0:
@@ -3261,7 +3261,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
break;
case 2:
if (dev->phy.rev >= 6) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
vmid[3] = 0x94;
else
vmid[3] = 0x8e;
@@ -3277,7 +3277,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
break;
case 4:
case 5:
- if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) != NL80211_BAND_2GHZ) {
if (pdet_range == 4) {
vmid[3] = 0x8e;
tmp16 = 0x96;
@@ -3322,9 +3322,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
(sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
tmp32 = 0x00088888;
else
tmp32 = 0x88888888;
@@ -3333,7 +3333,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
if (dev->phy.rev == 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
0x70);
b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
@@ -3376,7 +3376,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
delays1[5] = 0x14;
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ &&
nphy->band5g_pwrgain) {
b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
@@ -3451,7 +3451,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
b43_nphy_classifier(dev, 1, 0);
else
b43_nphy_classifier(dev, 1, 1);
@@ -3586,7 +3586,7 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
gain = (target.pad[core]) | (target.pga[core] << 4) |
(target.txgm[core] << 8);
- indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ indx = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ?
1 : 0;
for (i = 0; i < 9; i++)
if (tbl_iqcal_gainparams[indx][i][0] == gain)
@@ -3614,7 +3614,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
struct b43_phy_n *nphy = dev->phy.n;
u8 i;
u16 bmask, val, tmp;
- enum ieee80211_band band = b43_current_band(dev->wl);
+ enum nl80211_band band = b43_current_band(dev->wl);
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
@@ -3679,7 +3679,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
}
b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
@@ -3770,7 +3770,7 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
txpi[0] = 72;
txpi[1] = 72;
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
txpi[0] = sprom->txpid2g[0];
txpi[1] = sprom->txpid2g[1];
} else if (freq >= 4900 && freq < 5100) {
@@ -3868,7 +3868,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
} else if (phy->rev >= 7) {
for (core = 0; core < 2; core++) {
r = core ? 0x190 : 0x170;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, r + 0x5, 0x5);
b43_radio_write(dev, r + 0x9, 0xE);
if (phy->rev != 5)
@@ -3892,7 +3892,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
b43_radio_write(dev, r + 0xC, 0);
}
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
else
b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
@@ -3909,7 +3909,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
0x5);
if (phy->rev != 5)
@@ -4098,7 +4098,7 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
b0[0] = b0[1] = 5612;
b1[0] = b1[1] = -1393;
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
for (c = 0; c < 2; c++) {
idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g;
target[c] = sprom->core_pwr_info[c].maxpwr_2g;
@@ -4153,11 +4153,11 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
for (c = 0; c < 2; c++) {
r = c ? 0x190 : 0x170;
if (b43_nphy_ipa(dev))
- b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC);
+ b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ? 0xE : 0xC);
}
} else {
if (b43_nphy_ipa(dev)) {
- tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ tmp = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 0xC : 0xE;
b43_radio_write(dev,
B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp);
b43_radio_write(dev,
@@ -4267,13 +4267,13 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
} else if (phy->rev >= 7) {
pga_gain = (table[i] >> 24) & 0xf;
pad_gain = (table[i] >> 19) & 0x1f;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
rfpwr_offset = rf_pwr_offset_table[pad_gain];
else
rfpwr_offset = rf_pwr_offset_table[pga_gain];
} else {
pga_gain = (table[i] >> 24) & 0xF;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
else
rfpwr_offset = 0; /* FIXME */
@@ -4288,7 +4288,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
{
struct b43_phy_n *nphy = dev->phy.n;
- enum ieee80211_band band;
+ enum nl80211_band band;
u16 tmp;
if (!enable) {
@@ -4300,12 +4300,12 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
if (dev->phy.rev >= 7) {
tmp = 0x1480;
} else if (dev->phy.rev >= 3) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
tmp = 0x600;
else
tmp = 0x480;
} else {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
tmp = 0x180;
else
tmp = 0x120;
@@ -4734,7 +4734,7 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
u16 *rssical_radio_regs = NULL;
u16 *rssical_phy_regs = NULL;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (!nphy->rssical_chanspec_2G.center_freq)
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
@@ -4804,7 +4804,7 @@ static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev)
save[off + 7] = b43_radio_read(dev, r + R2057_TX0_TSSIG);
save[off + 8] = b43_radio_read(dev, r + R2057_TX0_TSSI_MISC1);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0xA);
b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55);
@@ -4864,7 +4864,7 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
save[offset + 9] = b43_radio_read(dev, B2055_XOMISC);
save[offset + 10] = b43_radio_read(dev, B2055_PLL_LFC1);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40);
b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55);
@@ -5005,7 +5005,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
tbl_tx_filter_coef_rev4[3]);
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
tbl_tx_filter_coef_rev4[5]);
if (dev->phy.channel == 14)
@@ -5185,7 +5185,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
false, 0);
} else if (phy->rev == 7) {
b43_radio_maskset(dev, R2057_OVR_REG0, 1 << 4, 1 << 4);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0);
b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0);
} else {
@@ -5210,7 +5210,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
tmp = 0x0180;
else
tmp = 0x0120;
@@ -5233,7 +5233,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
iqcal_chanspec = &nphy->iqcal_chanspec_2G;
@@ -5304,7 +5304,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
u16 *txcal_radio_regs = NULL;
struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (!nphy->iqcal_chanspec_2G.center_freq)
return;
table = nphy->cal_cache.txcal_coeffs_2G;
@@ -5332,7 +5332,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
if (dev->phy.rev < 2)
b43_nphy_tx_iq_workaround(dev);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
} else {
@@ -5422,7 +5422,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
phy6or5x = dev->phy.rev >= 6 ||
(dev->phy.rev == 5 && nphy->ipa2g_on &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ);
if (phy6or5x) {
if (b43_is_40mhz(dev)) {
b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
@@ -5657,7 +5657,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
u16 tmp[6];
u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
u32 real, imag;
- enum ieee80211_band band;
+ enum nl80211_band band;
u8 use;
u16 cur_hpf;
@@ -5712,18 +5712,18 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
band = b43_current_band(dev->wl);
if (nphy->rxcalparams & 0xFF000000) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
b43_phy_write(dev, rfctl[0], 0x140);
else
b43_phy_write(dev, rfctl[0], 0x110);
} else {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
b43_phy_write(dev, rfctl[0], 0x180);
else
b43_phy_write(dev, rfctl[0], 0x120);
}
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
b43_phy_write(dev, rfctl[1], 0x148);
else
b43_phy_write(dev, rfctl[1], 0x114);
@@ -5919,7 +5919,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
#if 0
/* Some extra gains */
hw_gain = 6; /* N-PHY specific */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
hw_gain += sprom->antenna_gain.a0;
else
hw_gain += sprom->antenna_gain.a1;
@@ -6043,7 +6043,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
u8 tx_pwr_state;
struct nphy_txgains target;
u16 tmp;
- enum ieee80211_band tmp2;
+ enum nl80211_band tmp2;
bool do_rssi_cal;
u16 clip[2];
@@ -6051,7 +6051,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
if ((dev->phy.rev >= 3) &&
(sprom->boardflags_lo & B43_BFL_EXTLNA) &&
- (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
+ (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)) {
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
@@ -6170,7 +6170,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
b43_nphy_classifier(dev, 0, 0);
b43_nphy_read_clip_detection(dev, clip);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_nphy_bphy_init(dev);
tx_pwr_state = nphy->txpwrctrl;
@@ -6187,7 +6187,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
do_rssi_cal = false;
if (phy->rev >= 3) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq;
else
do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq;
@@ -6201,7 +6201,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
}
if (!((nphy->measure_hold & 0x6) != 0)) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
do_cal = !nphy->iqcal_chanspec_2G.center_freq;
else
do_cal = !nphy->iqcal_chanspec_5G.center_freq;
@@ -6291,7 +6291,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
int ch = new_channel->hw_value;
u16 tmp16;
- if (new_channel->band == IEEE80211_BAND_5GHZ) {
+ if (new_channel->band == NL80211_BAND_5GHZ) {
/* Switch to 2 GHz for a moment to access B43_PHY_B_BBCFG */
b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
@@ -6302,7 +6302,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
- } else if (new_channel->band == IEEE80211_BAND_2GHZ) {
+ } else if (new_channel->band == NL80211_BAND_2GHZ) {
b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
@@ -6319,7 +6319,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
} else {
b43_nphy_classifier(dev, 2, 2);
- if (new_channel->band == IEEE80211_BAND_2GHZ)
+ if (new_channel->band == NL80211_BAND_2GHZ)
b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
}
@@ -6449,7 +6449,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
&(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs);
if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
- tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 2 : 0;
+ tmp = (channel->band == NL80211_BAND_5GHZ) ? 2 : 0;
b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE0, ~2, tmp);
b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE1, ~2, tmp);
}
@@ -6457,12 +6457,12 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
b43_radio_2057_setup(dev, tabent_r7, tabent_r7_2g);
b43_nphy_channel_setup(dev, phy_regs, channel);
} else if (phy->rev >= 3) {
- tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0;
+ tmp = (channel->band == NL80211_BAND_5GHZ) ? 4 : 0;
b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
b43_radio_2056_setup(dev, tabent_r3);
b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel);
} else {
- tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050;
+ tmp = (channel->band == NL80211_BAND_5GHZ) ? 0x0020 : 0x0050;
b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
b43_radio_2055_setup(dev, tabent_r2);
b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel);
@@ -6692,7 +6692,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if ((new_channel < 1) || (new_channel > 14))
return -EINVAL;
} else {
@@ -6705,7 +6705,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 1;
return 36;
}
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
index cff187c56..ce01e1645 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
@@ -560,7 +560,7 @@ void b2062_upload_init_table(struct b43_wldev *dev)
for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) {
e = &b2062_init_tab[i];
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (!(e->flags & B206X_FLAG_G))
continue;
b43_radio_write(dev, e->offset, e->value_g);
@@ -579,7 +579,7 @@ void b2063_upload_init_table(struct b43_wldev *dev)
for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) {
e = &b2063_init_tab[i];
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (!(e->flags & B206X_FLAG_G))
continue;
b43_radio_write(dev, e->offset, e->value_g);
@@ -2379,12 +2379,12 @@ static void lpphy_rev2plus_write_gain_table(struct b43_wldev *dev, int offset,
tmp |= data.pga << 8;
tmp |= data.gm;
if (dev->phy.rev >= 3) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
tmp |= 0x10 << 24;
else
tmp |= 0x70 << 24;
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
tmp |= 0x14 << 24;
else
tmp |= 0x7F << 24;
@@ -2423,7 +2423,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
(sprom->boardflags_lo & B43_BFL_HGPA))
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev0_nopa_tx_gain_table);
- else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev0_2ghz_tx_gain_table);
else
@@ -2435,7 +2435,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
(sprom->boardflags_lo & B43_BFL_HGPA))
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev1_nopa_tx_gain_table);
- else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev1_2ghz_tx_gain_table);
else
@@ -2446,7 +2446,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
if (sprom->boardflags_hi & B43_BFH_NOPA)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev2_nopa_tx_gain_table);
- else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev2_2ghz_tx_gain_table);
else
diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c
index b2f0d245b..44e0957a7 100644
--- a/drivers/net/wireless/broadcom/b43/tables_nphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c
@@ -3502,7 +3502,7 @@ static void b43_nphy_tables_init_rev7_volatile(struct b43_wldev *dev)
{ 0x2, 0x18, 0x2 }, /* Core 1 */
};
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
antswlut = sprom->fem.ghz5.antswlut;
else
antswlut = sprom->fem.ghz2.antswlut;
@@ -3566,7 +3566,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
struct ssb_sprom *sprom = dev->dev->bus_sprom;
u8 antswlut;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
antswlut = sprom->fem.ghz5.antswlut;
else
antswlut = sprom->fem.ghz2.antswlut;
@@ -3651,7 +3651,7 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
switch (phy->rev) {
case 17:
if (phy->radio_rev == 14)
@@ -3698,17 +3698,17 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- enum ieee80211_band band = b43_current_band(dev->wl);
+ enum nl80211_band band = b43_current_band(dev->wl);
struct ssb_sprom *sprom = dev->dev->bus_sprom;
if (dev->phy.rev < 3)
return b43_ntab_tx_gain_rev0_1_2;
/* rev 3+ */
- if ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
- (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) {
+ if ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
+ (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)) {
return b43_nphy_get_ipa_gain_table(dev);
- } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
switch (phy->rev) {
case 6:
case 5:
@@ -3746,7 +3746,7 @@ const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
switch (phy->rev) {
case 17:
if (phy->radio_rev == 14)
diff --git a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
index e347b8d80..704ef1bcb 100644
--- a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
+++ b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
@@ -701,7 +701,7 @@ void b43_phy_lcn_tables_init(struct b43_wldev *dev)
b43_phy_lcn_upload_static_tables(dev);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (sprom->boardflags_lo & B43_BFL_FEM)
b43_phy_lcn_load_tx_gain_tab(dev,
b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0);
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 426dc13c4..f6201264d 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -803,7 +803,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
switch (chanstat & B43_RX_CHAN_PHYTYPE) {
case B43_PHYTYPE_A:
- status.band = IEEE80211_BAND_5GHZ;
+ status.band = NL80211_BAND_5GHZ;
B43_WARN_ON(1);
/* FIXME: We don't really know which value the "chanid" contains.
* So the following assignment might be wrong. */
@@ -811,7 +811,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
ieee80211_channel_to_frequency(chanid, status.band);
break;
case B43_PHYTYPE_G:
- status.band = IEEE80211_BAND_2GHZ;
+ status.band = NL80211_BAND_2GHZ;
/* Somewhere between 478.104 and 508.1084 firmware for G-PHY
* has been modified to be compatible with N-PHY and others.
*/
@@ -826,9 +826,9 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
/* chanid is the SHM channel cookie. Which is the plain
* channel number in b43. */
if (chanstat & B43_RX_CHAN_5GHZ)
- status.band = IEEE80211_BAND_5GHZ;
+ status.band = NL80211_BAND_5GHZ;
else
- status.band = IEEE80211_BAND_2GHZ;
+ status.band = NL80211_BAND_2GHZ;
status.freq =
ieee80211_channel_to_frequency(chanid, status.band);
break;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 8491b6f4d..17aa4fd21 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1055,7 +1055,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
dur = ieee80211_generic_frame_duration(dev->wl->hw,
dev->wl->vif,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
size,
rate);
/* Write PLCP in two parts and timing for packet transfer */
@@ -1121,7 +1121,7 @@ static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
IEEE80211_STYPE_PROBE_RESP);
dur = ieee80211_generic_frame_duration(dev->wl->hw,
dev->wl->vif,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
*dest_size,
rate);
hdr->duration_id = dur;
@@ -2716,7 +2716,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
/* Switch the PHY mode (if necessary). */
switch (conf->chandef.chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (phy->type == B43legacy_PHYTYPE_B)
new_phymode = B43legacy_PHYMODE_B;
else
@@ -2789,7 +2789,7 @@ out_unlock_mutex:
static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates)
{
struct ieee80211_supported_band *sband =
- dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ dev->wl->hw->wiphy->bands[NL80211_BAND_2GHZ];
struct ieee80211_rate *rate;
int i;
u16 basic, direct, offset, basic_offset, rateptr;
@@ -3627,13 +3627,13 @@ static int b43legacy_setup_modes(struct b43legacy_wldev *dev,
phy->possible_phymodes = 0;
if (have_bphy) {
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
&b43legacy_band_2GHz_BPHY;
phy->possible_phymodes |= B43legacy_PHYMODE_B;
}
if (have_gphy) {
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
&b43legacy_band_2GHz_GPHY;
phy->possible_phymodes |= B43legacy_PHYMODE_G;
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
index 34bf3f0b7..35ccf400b 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
@@ -565,7 +565,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) {
case B43legacy_PHYTYPE_B:
case B43legacy_PHYTYPE_G:
- status.band = IEEE80211_BAND_2GHZ;
+ status.band = NL80211_BAND_2GHZ;
status.freq = chanid + 2400;
break;
default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 6af658e44..d1bc51f92 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -321,7 +321,8 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws,
if (pktbuf->len == 0)
return -ENODATA;
- *ifp = tmp_if;
+ if (ifp != NULL)
+ *ifp = tmp_if;
return 0;
}
@@ -351,6 +352,12 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
{
}
+static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
+ struct sk_buff *skb)
+{
+ brcmf_fws_rxreorder(ifp, skb);
+}
+
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
{
struct brcmf_bcdc *bcdc;
@@ -372,6 +379,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
drvr->proto->configure_addr_mode = brcmf_proto_bcdc_configure_addr_mode;
drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer;
+ drvr->proto->rxreorder = brcmf_proto_bcdc_rxreorder;
drvr->proto->pd = bcdc;
drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index da0cdd313..c7550dab6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -250,7 +250,7 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
u32 addr, u8 regsz, void *data, bool write)
{
struct sdio_func *func;
- int ret;
+ int ret = -EINVAL;
brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
write, fn, addr, regsz);
@@ -1098,6 +1098,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 8e02a478e..2b2465456 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -216,7 +216,9 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
+/* Receive async event packet from firmware. Callee disposes of rxp. */
+void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index d5c2a2757..62f475e31 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -144,7 +144,7 @@ static struct ieee80211_rate __wl_rates[] = {
#define wl_a_rates_size (wl_g_rates_size - 4)
#define CHAN2G(_channel, _freq) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = IEEE80211_CHAN_DISABLED, \
@@ -153,7 +153,7 @@ static struct ieee80211_rate __wl_rates[] = {
}
#define CHAN5G(_channel) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = IEEE80211_CHAN_DISABLED, \
@@ -181,13 +181,13 @@ static struct ieee80211_channel __wl_5ghz_channels[] = {
* above is added to the band during setup.
*/
static const struct ieee80211_supported_band __wl_band_2ghz = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.bitrates = wl_g_rates,
.n_bitrates = wl_g_rates_size,
};
static const struct ieee80211_supported_band __wl_band_5ghz = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.bitrates = wl_a_rates,
.n_bitrates = wl_a_rates_size,
};
@@ -250,6 +250,20 @@ struct parsed_vndr_ies {
struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
};
+static u8 nl80211_band_to_fwil(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return WLC_BAND_2G;
+ case NL80211_BAND_5GHZ:
+ return WLC_BAND_5G;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ return 0;
+}
+
static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
struct cfg80211_chan_def *ch)
{
@@ -292,13 +306,13 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
WARN_ON_ONCE(1);
}
switch (ch->chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
ch_inf.band = BRCMU_CHAN_BAND_2G;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
ch_inf.band = BRCMU_CHAN_BAND_5G;
break;
- case IEEE80211_BAND_60GHZ:
+ case NL80211_BAND_60GHZ:
default:
WARN_ON_ONCE(1);
}
@@ -1796,6 +1810,50 @@ enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
return type;
}
+static void brcmf_set_join_pref(struct brcmf_if *ifp,
+ struct cfg80211_bss_selection *bss_select)
+{
+ struct brcmf_join_pref_params join_pref_params[2];
+ enum nl80211_band band;
+ int err, i = 0;
+
+ join_pref_params[i].len = 2;
+ join_pref_params[i].rssi_gain = 0;
+
+ if (bss_select->behaviour != NL80211_BSS_SELECT_ATTR_BAND_PREF)
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_ASSOC_PREFER, WLC_BAND_AUTO);
+
+ switch (bss_select->behaviour) {
+ case __NL80211_BSS_SELECT_ATTR_INVALID:
+ brcmf_c_set_joinpref_default(ifp);
+ return;
+ case NL80211_BSS_SELECT_ATTR_BAND_PREF:
+ join_pref_params[i].type = BRCMF_JOIN_PREF_BAND;
+ band = bss_select->param.band_pref;
+ join_pref_params[i].band = nl80211_band_to_fwil(band);
+ i++;
+ break;
+ case NL80211_BSS_SELECT_ATTR_RSSI_ADJUST:
+ join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+ band = bss_select->param.adjust.band;
+ join_pref_params[i].band = nl80211_band_to_fwil(band);
+ join_pref_params[i].rssi_gain = bss_select->param.adjust.delta;
+ i++;
+ break;
+ case NL80211_BSS_SELECT_ATTR_RSSI:
+ default:
+ break;
+ }
+ join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI;
+ join_pref_params[i].len = 2;
+ join_pref_params[i].rssi_gain = 0;
+ join_pref_params[i].band = 0;
+ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+ sizeof(join_pref_params));
+ if (err)
+ brcmf_err("Set join_pref error (%d)\n", err);
+}
+
static s32
brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_connect_params *sme)
@@ -1952,6 +2010,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
}
+ brcmf_set_join_pref(ifp, &sme->bss_select);
+
err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
join_params_size);
kfree(ext_join_params);
@@ -2480,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_scb_val_le scb_val;
s32 err = 0;
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
s32 total_rssi;
s32 count_rssi;
+ int rssi;
u32 i;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@@ -2569,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
+ } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
+ memset(&scb_val, 0, sizeof(scb_val));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+ &scb_val, sizeof(scb_val));
+ if (err) {
+ brcmf_err("Could not get rssi (%d)\n", err);
+ goto done;
+ } else {
+ rssi = le32_to_cpu(scb_val.val);
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = rssi;
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+ }
}
}
done:
@@ -2679,9 +2755,9 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
channel = bi->ctl_ch;
if (channel <= CH_MAX_2G_CHANNEL)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2788,9 +2864,9 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
cfg->d11inf.decchspec(&ch);
if (ch.band == BRCMU_CHAN_BAND_2G)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
cfg->channel = freq;
@@ -3608,7 +3684,8 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
wowl_config |= BRCMF_WOWL_UNASSOC;
- brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear", strlen("clear"));
+ brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear",
+ sizeof(struct brcmf_wowl_wakeind_le));
brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
brcmf_bus_wowl_config(cfg->pub->bus_if, true);
@@ -5215,9 +5292,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
cfg->d11inf.decchspec(&ch);
if (ch.band == BRCMU_CHAN_BAND_2G)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -5707,11 +5784,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
}
wiphy = cfg_to_wiphy(cfg);
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
if (band)
for (i = 0; i < band->n_channels; i++)
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
if (band)
for (i = 0; i < band->n_channels; i++)
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
@@ -5722,9 +5799,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
cfg->d11inf.decchspec(&ch);
if (ch.band == BRCMU_CHAN_BAND_2G) {
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
} else if (ch.band == BRCMU_CHAN_BAND_5G) {
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
} else {
brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
continue;
@@ -5839,7 +5916,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
return err;
}
- band = cfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+ band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
list = (struct brcmf_chanspec_list *)pbuf;
num_chan = le32_to_cpu(list->count);
for (i = 0; i < num_chan; i++) {
@@ -5871,11 +5948,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
band = WLC_BAND_2G;
err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
if (!err) {
- bw_cap[IEEE80211_BAND_2GHZ] = band;
+ bw_cap[NL80211_BAND_2GHZ] = band;
band = WLC_BAND_5G;
err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
if (!err) {
- bw_cap[IEEE80211_BAND_5GHZ] = band;
+ bw_cap[NL80211_BAND_5GHZ] = band;
return;
}
WARN_ON(1);
@@ -5890,14 +5967,14 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
switch (mimo_bwcap) {
case WLC_N_BW_40ALL:
- bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+ bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
/* fall-thru */
case WLC_N_BW_20IN2G_40IN5G:
- bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+ bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
/* fall-thru */
case WLC_N_BW_20ALL:
- bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
- bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+ bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+ bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
break;
default:
brcmf_err("invalid mimo_bw_cap value\n");
@@ -5938,7 +6015,7 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
__le16 mcs_map;
/* not allowed in 2.4G band */
- if (band->band == IEEE80211_BAND_2GHZ)
+ if (band->band == NL80211_BAND_2GHZ)
return;
band->vht_cap.vht_supported = true;
@@ -5997,8 +6074,8 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy)
brcmf_get_bwcap(ifp, bw_cap);
}
brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
- nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
- bw_cap[IEEE80211_BAND_5GHZ]);
+ nmode, vhtmode, bw_cap[NL80211_BAND_2GHZ],
+ bw_cap[NL80211_BAND_5GHZ]);
err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
if (err) {
@@ -6279,6 +6356,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->n_cipher_suites = ARRAY_SIZE(brcmf_cipher_suites);
if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
wiphy->n_cipher_suites--;
+ wiphy->bss_select_support = BIT(NL80211_BSS_SELECT_ATTR_RSSI) |
+ BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
+ BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
+
wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_OFFCHAN_TX |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -6321,7 +6402,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
}
band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
- wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ wiphy->bands[NL80211_BAND_2GHZ] = band;
}
if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
@@ -6338,7 +6419,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
}
band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
- wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ wiphy->bands[NL80211_BAND_5GHZ] = band;
}
}
err = brcmf_setup_wiphybands(wiphy);
@@ -6604,13 +6685,13 @@ static void brcmf_free_wiphy(struct wiphy *wiphy)
kfree(wiphy->iface_combinations[i].limits);
}
kfree(wiphy->iface_combinations);
- if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
+ if (wiphy->bands[NL80211_BAND_2GHZ]) {
+ kfree(wiphy->bands[NL80211_BAND_2GHZ]->channels);
+ kfree(wiphy->bands[NL80211_BAND_2GHZ]);
}
- if (wiphy->bands[IEEE80211_BAND_5GHZ]) {
- kfree(wiphy->bands[IEEE80211_BAND_5GHZ]->channels);
- kfree(wiphy->bands[IEEE80211_BAND_5GHZ]);
+ if (wiphy->bands[NL80211_BAND_5GHZ]) {
+ kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels);
+ kfree(wiphy->bands[NL80211_BAND_5GHZ]);
}
wiphy_free(wiphy);
}
@@ -6698,8 +6779,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
* cfg80211 here that we do and have it decide we can enable
* it. But first check if device does support 2G operation.
*/
- if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
- cap = &wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap;
+ if (wiphy->bands[NL80211_BAND_2GHZ]) {
+ cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap;
*cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
err = wiphy_register(wiphy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 0e8f2a079..d3fd6b1db 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -1333,6 +1333,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
switch (pub->chip) {
case BRCM_CC_4354_CHIP_ID:
+ case BRCM_CC_4356_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
/* fall-through */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 9e909e3c2..3e15d64c6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -38,7 +38,7 @@ const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
-/* boost value for RSSI_DELTA in preferred join selection */
+/* default boost value for RSSI_DELTA in preferred join selection */
#define BRCMF_JOIN_PREF_RSSI_BOOST 8
#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
@@ -83,11 +83,31 @@ MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging");
static struct brcmfmac_platform_data *brcmfmac_pdata;
struct brcmf_mp_global_t brcmf_mp_global;
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp)
+{
+ struct brcmf_join_pref_params join_pref_params[2];
+ int err;
+
+ /* Setup join_pref to select target by RSSI (boost on 5GHz) */
+ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+ join_pref_params[0].len = 2;
+ join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+ join_pref_params[0].band = WLC_BAND_5G;
+
+ join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+ join_pref_params[1].len = 2;
+ join_pref_params[1].rssi_gain = 0;
+ join_pref_params[1].band = 0;
+ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+ sizeof(join_pref_params));
+ if (err)
+ brcmf_err("Set join_pref error (%d)\n", err);
+}
+
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
u8 buf[BRCMF_DCMD_SMLEN];
- struct brcmf_join_pref_params join_pref_params[2];
struct brcmf_rev_info_le revinfo;
struct brcmf_rev_info *ri;
char *ptr;
@@ -154,19 +174,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
goto done;
}
- /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
- join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
- join_pref_params[0].len = 2;
- join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
- join_pref_params[0].band = WLC_BAND_5G;
- join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
- join_pref_params[1].len = 2;
- join_pref_params[1].rssi_gain = 0;
- join_pref_params[1].band = 0;
- err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
- sizeof(join_pref_params));
- if (err)
- brcmf_err("Set join_pref error (%d)\n", err);
+ brcmf_c_set_joinpref_default(ifp);
/* Setup event_msgs, enable E_IF */
err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index ff825cd77..b590499f6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -40,19 +40,6 @@
#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(950)
-/* AMPDU rx reordering definitions */
-#define BRCMF_RXREORDER_FLOWID_OFFSET 0
-#define BRCMF_RXREORDER_MAXIDX_OFFSET 2
-#define BRCMF_RXREORDER_FLAGS_OFFSET 4
-#define BRCMF_RXREORDER_CURIDX_OFFSET 6
-#define BRCMF_RXREORDER_EXPIDX_OFFSET 8
-
-#define BRCMF_RXREORDER_DEL_FLOW 0x01
-#define BRCMF_RXREORDER_FLUSH_ALL 0x02
-#define BRCMF_RXREORDER_CURIDX_VALID 0x04
-#define BRCMF_RXREORDER_EXPIDX_VALID 0x08
-#define BRCMF_RXREORDER_NEW_HOLE 0x10
-
#define BRCMF_BSSIDX_INVALID -1
char *brcmf_ifname(struct brcmf_if *ifp)
@@ -313,15 +300,9 @@ void brcmf_txflowblock(struct device *dev, bool state)
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
- skb->dev = ifp->ndev;
- skb->protocol = eth_type_trans(skb, skb->dev);
-
if (skb->pkt_type == PACKET_MULTICAST)
ifp->stats.multicast++;
- /* Process special event packets */
- brcmf_fweh_process_skb(ifp->drvr, skb);
-
if (!(ifp->ndev->flags & IFF_UP)) {
brcmu_pkt_buf_free_skb(skb);
return;
@@ -341,226 +322,60 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
netif_rx_ni(skb);
}
-static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
- u8 start, u8 end,
- struct sk_buff_head *skb_list)
+static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
+ struct brcmf_if **ifp)
{
- /* initialize return list */
- __skb_queue_head_init(skb_list);
+ int ret;
- if (rfi->pend_pkts == 0) {
- brcmf_dbg(INFO, "no packets in reorder queue\n");
- return;
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
+
+ if (ret || !(*ifp) || !(*ifp)->ndev) {
+ if (ret != -ENODATA && *ifp)
+ (*ifp)->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ return -ENODATA;
}
- do {
- if (rfi->pktslots[start]) {
- __skb_queue_tail(skb_list, rfi->pktslots[start]);
- rfi->pktslots[start] = NULL;
- }
- start++;
- if (start > rfi->max_idx)
- start = 0;
- } while (start != end);
- rfi->pend_pkts -= skb_queue_len(skb_list);
+ skb->protocol = eth_type_trans(skb, (*ifp)->ndev);
+ return 0;
}
-static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
- struct sk_buff *pkt)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
{
- u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
- struct brcmf_ampdu_rx_reorder *rfi;
- struct sk_buff_head reorder_list;
- struct sk_buff *pnext;
- u8 flags;
- u32 buf_size;
-
- flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
- flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
-
- /* validate flags and flow id */
- if (flags == 0xFF) {
- brcmf_err("invalid flags...so ignore this packet\n");
- brcmf_netif_rx(ifp, pkt);
- return;
- }
-
- rfi = ifp->drvr->reorder_flows[flow_id];
- if (flags & BRCMF_RXREORDER_DEL_FLOW) {
- brcmf_dbg(INFO, "flow-%d: delete\n",
- flow_id);
+ struct brcmf_if *ifp;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
- if (rfi == NULL) {
- brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
- flow_id);
- brcmf_netif_rx(ifp, pkt);
- return;
- }
+ brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
- brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
- &reorder_list);
- /* add the last packet */
- __skb_queue_tail(&reorder_list, pkt);
- kfree(rfi);
- ifp->drvr->reorder_flows[flow_id] = NULL;
- goto netif_rx;
- }
- /* from here on we need a flow reorder instance */
- if (rfi == NULL) {
- buf_size = sizeof(*rfi);
- max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
-
- buf_size += (max_idx + 1) * sizeof(pkt);
-
- /* allocate space for flow reorder info */
- brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
- flow_id, max_idx);
- rfi = kzalloc(buf_size, GFP_ATOMIC);
- if (rfi == NULL) {
- brcmf_err("failed to alloc buffer\n");
- brcmf_netif_rx(ifp, pkt);
- return;
- }
+ if (brcmf_rx_hdrpull(drvr, skb, &ifp))
+ return;
- ifp->drvr->reorder_flows[flow_id] = rfi;
- rfi->pktslots = (struct sk_buff **)(rfi+1);
- rfi->max_idx = max_idx;
- }
- if (flags & BRCMF_RXREORDER_NEW_HOLE) {
- if (rfi->pend_pkts) {
- brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
- rfi->exp_idx,
- &reorder_list);
- WARN_ON(rfi->pend_pkts);
- } else {
- __skb_queue_head_init(&reorder_list);
- }
- rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
- rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
- rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
- rfi->pktslots[rfi->cur_idx] = pkt;
- rfi->pend_pkts++;
- brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
- flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
- } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
- cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
- exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-
- if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
- /* still in the current hole */
- /* enqueue the current on the buffer chain */
- if (rfi->pktslots[cur_idx] != NULL) {
- brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
- brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
- rfi->pktslots[cur_idx] = NULL;
- }
- rfi->pktslots[cur_idx] = pkt;
- rfi->pend_pkts++;
- rfi->cur_idx = cur_idx;
- brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
- flow_id, cur_idx, exp_idx, rfi->pend_pkts);
-
- /* can return now as there is no reorder
- * list to process.
- */
- return;
- }
- if (rfi->exp_idx == cur_idx) {
- if (rfi->pktslots[cur_idx] != NULL) {
- brcmf_dbg(INFO, "error buffer pending..free it\n");
- brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
- rfi->pktslots[cur_idx] = NULL;
- }
- rfi->pktslots[cur_idx] = pkt;
- rfi->pend_pkts++;
-
- /* got the expected one. flush from current to expected
- * and update expected
- */
- brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
- flow_id, cur_idx, exp_idx, rfi->pend_pkts);
-
- rfi->cur_idx = cur_idx;
- rfi->exp_idx = exp_idx;
-
- brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
- &reorder_list);
- brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
- flow_id, skb_queue_len(&reorder_list),
- rfi->pend_pkts);
- } else {
- u8 end_idx;
-
- brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
- flow_id, flags, rfi->cur_idx, rfi->exp_idx,
- cur_idx, exp_idx);
- if (flags & BRCMF_RXREORDER_FLUSH_ALL)
- end_idx = rfi->exp_idx;
- else
- end_idx = exp_idx;
-
- /* flush pkts first */
- brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
- &reorder_list);
-
- if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
- __skb_queue_tail(&reorder_list, pkt);
- } else {
- rfi->pktslots[cur_idx] = pkt;
- rfi->pend_pkts++;
- }
- rfi->exp_idx = exp_idx;
- rfi->cur_idx = cur_idx;
- }
+ if (brcmf_proto_is_reorder_skb(skb)) {
+ brcmf_proto_rxreorder(ifp, skb);
} else {
- /* explicity window move updating the expected index */
- exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-
- brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
- flow_id, flags, rfi->exp_idx, exp_idx);
- if (flags & BRCMF_RXREORDER_FLUSH_ALL)
- end_idx = rfi->exp_idx;
- else
- end_idx = exp_idx;
+ /* Process special event packets */
+ if (handle_event)
+ brcmf_fweh_process_skb(ifp->drvr, skb);
- brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
- &reorder_list);
- __skb_queue_tail(&reorder_list, pkt);
- /* set the new expected idx */
- rfi->exp_idx = exp_idx;
- }
-netif_rx:
- skb_queue_walk_safe(&reorder_list, pkt, pnext) {
- __skb_unlink(pkt, &reorder_list);
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, skb);
}
}
-void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
+void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
{
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
- struct brcmf_skb_reorder_data *rd;
- int ret;
- brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
-
- /* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
+ brcmf_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
- if (ret || !ifp || !ifp->ndev) {
- if (ret != -ENODATA && ifp)
- ifp->stats.rx_errors++;
- brcmu_pkt_buf_free_skb(skb);
+ if (brcmf_rx_hdrpull(drvr, skb, &ifp))
return;
- }
- rd = (struct brcmf_skb_reorder_data *)skb->cb;
- if (rd->reorder)
- brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
- else
- brcmf_netif_rx(ifp, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb);
+ brcmu_pkt_buf_free_skb(skb);
}
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 7bdb6fef9..647d3cc2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -208,10 +208,6 @@ struct brcmf_if {
u8 ipv6addr_idx;
};
-struct brcmf_skb_reorder_data {
- u8 *reorder;
-};
-
int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
/* Return pointer to interface name */
@@ -227,6 +223,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index d278c1117..875bf1356 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -29,6 +29,7 @@
#define BRCMF_FW_MAX_NVRAM_SIZE 64000
#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
+#define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff"
enum nvram_parser_state {
IDLE,
@@ -51,6 +52,7 @@ enum nvram_parser_state {
* @entry: start position of key,value entry.
* @multi_dev_v1: detect pcie multi device v1 (compressed).
* @multi_dev_v2: detect pcie multi device v2.
+ * @boardrev_found: nvram contains boardrev information.
*/
struct nvram_parser {
enum nvram_parser_state state;
@@ -63,6 +65,7 @@ struct nvram_parser {
u32 entry;
bool multi_dev_v1;
bool multi_dev_v2;
+ bool boardrev_found;
};
/**
@@ -125,6 +128,8 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
nvp->multi_dev_v1 = true;
if (strncmp(&nvp->data[nvp->entry], "pcie/", 5) == 0)
nvp->multi_dev_v2 = true;
+ if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0)
+ nvp->boardrev_found = true;
} else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
@@ -284,6 +289,8 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
while (i < nvp->nvram_len) {
if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
i += 2;
+ if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0)
+ nvp->boardrev_found = true;
while (nvp->nvram[i] != 0) {
nvram[j] = nvp->nvram[i];
i++;
@@ -335,6 +342,8 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
while (i < nvp->nvram_len - len) {
if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
i += len;
+ if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0)
+ nvp->boardrev_found = true;
while (nvp->nvram[i] != 0) {
nvram[j] = nvp->nvram[i];
i++;
@@ -356,6 +365,18 @@ fail:
nvp->nvram_len = 0;
}
+static void brcmf_fw_add_defaults(struct nvram_parser *nvp)
+{
+ if (nvp->boardrev_found)
+ return;
+
+ memcpy(&nvp->nvram[nvp->nvram_len], &BRCMF_FW_DEFAULT_BOARDREV,
+ strlen(BRCMF_FW_DEFAULT_BOARDREV));
+ nvp->nvram_len += strlen(BRCMF_FW_DEFAULT_BOARDREV);
+ nvp->nvram[nvp->nvram_len] = '\0';
+ nvp->nvram_len++;
+}
+
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
@@ -377,16 +398,21 @@ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
if (nvp.state == END)
break;
}
- if (nvp.multi_dev_v1)
+ if (nvp.multi_dev_v1) {
+ nvp.boardrev_found = false;
brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
- else if (nvp.multi_dev_v2)
+ } else if (nvp.multi_dev_v2) {
+ nvp.boardrev_found = false;
brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+ }
if (nvp.nvram_len == 0) {
kfree(nvp.nvram);
return NULL;
}
+ brcmf_fw_add_defaults(&nvp);
+
pad = nvp.nvram_len;
*new_length = roundup(nvp.nvram_len + 1, 4);
while (pad != *new_length) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index d414fbbcc..b39056125 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -371,6 +371,7 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
int i, err;
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ memset(eventmask, 0, sizeof(eventmask));
for (i = 0; i < BRCMF_E_LAST; i++) {
if (ifp->drvr->fweh.evt_handler[i]) {
brcmf_dbg(EVENT, "enable event %s\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 6b72df177..3a9a76dd9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -78,6 +78,7 @@
#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define BRCMF_C_SET_ASSOC_PREFER 205
#define BRCMF_C_GET_VALID_CHANNELS 217
#define BRCMF_C_GET_KEY_PRIMARY 235
#define BRCMF_C_SET_KEY_PRIMARY 236
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index f82c9ab54..5b30922b6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -92,6 +92,19 @@ enum brcmf_fws_tlv_len {
};
#undef BRCMF_FWS_TLV_DEF
+/* AMPDU rx reordering definitions */
+#define BRCMF_RXREORDER_FLOWID_OFFSET 0
+#define BRCMF_RXREORDER_MAXIDX_OFFSET 2
+#define BRCMF_RXREORDER_FLAGS_OFFSET 4
+#define BRCMF_RXREORDER_CURIDX_OFFSET 6
+#define BRCMF_RXREORDER_EXPIDX_OFFSET 8
+
+#define BRCMF_RXREORDER_DEL_FLOW 0x01
+#define BRCMF_RXREORDER_FLUSH_ALL 0x02
+#define BRCMF_RXREORDER_CURIDX_VALID 0x04
+#define BRCMF_RXREORDER_EXPIDX_VALID 0x08
+#define BRCMF_RXREORDER_NEW_HOLE 0x10
+
#ifdef DEBUG
/*
* brcmf_fws_tlv_names - array of tlv names.
@@ -1614,6 +1627,202 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
return 0;
}
+static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
+ u8 start, u8 end,
+ struct sk_buff_head *skb_list)
+{
+ /* initialize return list */
+ __skb_queue_head_init(skb_list);
+
+ if (rfi->pend_pkts == 0) {
+ brcmf_dbg(INFO, "no packets in reorder queue\n");
+ return;
+ }
+
+ do {
+ if (rfi->pktslots[start]) {
+ __skb_queue_tail(skb_list, rfi->pktslots[start]);
+ rfi->pktslots[start] = NULL;
+ }
+ start++;
+ if (start > rfi->max_idx)
+ start = 0;
+ } while (start != end);
+ rfi->pend_pkts -= skb_queue_len(skb_list);
+}
+
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
+{
+ u8 *reorder_data;
+ u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
+ struct brcmf_ampdu_rx_reorder *rfi;
+ struct sk_buff_head reorder_list;
+ struct sk_buff *pnext;
+ u8 flags;
+ u32 buf_size;
+
+ reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder;
+ flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
+ flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
+
+ /* validate flags and flow id */
+ if (flags == 0xFF) {
+ brcmf_err("invalid flags...so ignore this packet\n");
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ rfi = ifp->drvr->reorder_flows[flow_id];
+ if (flags & BRCMF_RXREORDER_DEL_FLOW) {
+ brcmf_dbg(INFO, "flow-%d: delete\n",
+ flow_id);
+
+ if (rfi == NULL) {
+ brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
+ flow_id);
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
+ &reorder_list);
+ /* add the last packet */
+ __skb_queue_tail(&reorder_list, pkt);
+ kfree(rfi);
+ ifp->drvr->reorder_flows[flow_id] = NULL;
+ goto netif_rx;
+ }
+ /* from here on we need a flow reorder instance */
+ if (rfi == NULL) {
+ buf_size = sizeof(*rfi);
+ max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+
+ buf_size += (max_idx + 1) * sizeof(pkt);
+
+ /* allocate space for flow reorder info */
+ brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
+ flow_id, max_idx);
+ rfi = kzalloc(buf_size, GFP_ATOMIC);
+ if (rfi == NULL) {
+ brcmf_err("failed to alloc buffer\n");
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ ifp->drvr->reorder_flows[flow_id] = rfi;
+ rfi->pktslots = (struct sk_buff **)(rfi + 1);
+ rfi->max_idx = max_idx;
+ }
+ if (flags & BRCMF_RXREORDER_NEW_HOLE) {
+ if (rfi->pend_pkts) {
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
+ rfi->exp_idx,
+ &reorder_list);
+ WARN_ON(rfi->pend_pkts);
+ } else {
+ __skb_queue_head_init(&reorder_list);
+ }
+ rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+ rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+ rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+ rfi->pktslots[rfi->cur_idx] = pkt;
+ rfi->pend_pkts++;
+ brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
+ flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
+ } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
+ cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+ if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
+ /* still in the current hole */
+ /* enqueue the current on the buffer chain */
+ if (rfi->pktslots[cur_idx] != NULL) {
+ brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
+ brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+ rfi->pktslots[cur_idx] = NULL;
+ }
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+ rfi->cur_idx = cur_idx;
+ brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
+ flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+ /* can return now as there is no reorder
+ * list to process.
+ */
+ return;
+ }
+ if (rfi->exp_idx == cur_idx) {
+ if (rfi->pktslots[cur_idx] != NULL) {
+ brcmf_dbg(INFO, "error buffer pending..free it\n");
+ brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+ rfi->pktslots[cur_idx] = NULL;
+ }
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+
+ /* got the expected one. flush from current to expected
+ * and update expected
+ */
+ brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
+ flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+ rfi->cur_idx = cur_idx;
+ rfi->exp_idx = exp_idx;
+
+ brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
+ &reorder_list);
+ brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
+ flow_id, skb_queue_len(&reorder_list),
+ rfi->pend_pkts);
+ } else {
+ u8 end_idx;
+
+ brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
+ flow_id, flags, rfi->cur_idx, rfi->exp_idx,
+ cur_idx, exp_idx);
+ if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+ end_idx = rfi->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ /* flush pkts first */
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+ &reorder_list);
+
+ if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
+ __skb_queue_tail(&reorder_list, pkt);
+ } else {
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+ }
+ rfi->exp_idx = exp_idx;
+ rfi->cur_idx = cur_idx;
+ }
+ } else {
+ /* explicity window move updating the expected index */
+ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+ brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
+ flow_id, flags, rfi->exp_idx, exp_idx);
+ if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+ end_idx = rfi->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+ &reorder_list);
+ __skb_queue_tail(&reorder_list, pkt);
+ /* set the new expected idx */
+ rfi->exp_idx = exp_idx;
+ }
+netif_rx:
+ skb_queue_walk_safe(&reorder_list, pkt, pnext) {
+ __skb_unlink(pkt, &reorder_list);
+ brcmf_netif_rx(ifp, pkt);
+ }
+}
+
void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
{
struct brcmf_skb_reorder_data *rd;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index a36bac17e..ef0ad8597 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -29,5 +29,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 922966734..2b9a2bc42 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
@@ -526,6 +527,9 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return -ENODEV;
}
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+}
static void
brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
@@ -1075,28 +1079,13 @@ static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
}
-static void
-brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
- u8 ifidx)
-{
- struct brcmf_if *ifp;
-
- ifp = brcmf_get_ifp(msgbuf->drvr, ifidx);
- if (!ifp || !ifp->ndev) {
- brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
- brcmu_pkt_buf_free_skb(skb);
- return;
- }
- brcmf_netif_rx(ifp, skb);
-}
-
-
static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
{
struct msgbuf_rx_event *event;
u32 idx;
u16 buflen;
struct sk_buff *skb;
+ struct brcmf_if *ifp;
event = (struct msgbuf_rx_event *)buf;
idx = le32_to_cpu(event->msg.request_id);
@@ -1116,7 +1105,19 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
skb_trim(skb, buflen);
- brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
+ ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
+ if (!ifp || !ifp->ndev) {
+ brcmf_err("Received pkt for invalid ifidx %d\n",
+ event->msg.ifidx);
+ goto exit;
+ }
+
+ skb->protocol = eth_type_trans(skb, ifp->ndev);
+
+ brcmf_fweh_process_skb(ifp->drvr, skb);
+
+exit:
+ brcmu_pkt_buf_free_skb(skb);
}
@@ -1128,6 +1129,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
u16 data_offset;
u16 buflen;
u32 idx;
+ struct brcmf_if *ifp;
brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
@@ -1148,7 +1150,16 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
skb_trim(skb, buflen);
- brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
+ ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
+ if (!ifp || !ifp->ndev) {
+ brcmf_err("Received pkt for invalid ifidx %d\n",
+ rx_complete->msg.ifidx);
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+
+ skb->protocol = eth_type_trans(skb, ifp->ndev);
+ brcmf_netif_rx(ifp, skb);
}
@@ -1460,6 +1471,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
+ drvr->proto->rxreorder = brcmf_msgbuf_rxreorder;
drvr->proto->pd = msgbuf;
init_waitqueue_head(&msgbuf->ioctl_resp_wait);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index b5a49e564..a70cda6c0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1266,7 +1266,7 @@ static void
brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_p2p_info *p2p = &cfg->p2p;
- struct brcmf_if *ifp = cfg->escan_info.ifp;
+ struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
(test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
@@ -1430,8 +1430,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
freq = ieee80211_channel_to_frequency(ch.chnum,
ch.band == BRCMU_CHAN_BAND_2G ?
- IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ);
wdev = &ifp->vif->wdev;
cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0);
@@ -1900,8 +1900,8 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
mgmt_frame_len = e->datalen - sizeof(*rxframe);
freq = ieee80211_channel_to_frequency(ch.chnum,
ch.band == BRCMU_CHAN_BAND_2G ?
- IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ);
cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index d55119d36..57531f421 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -22,6 +22,9 @@ enum proto_addr_mode {
ADDR_DIRECT
};
+struct brcmf_skb_reorder_data {
+ u8 *reorder;
+};
struct brcmf_proto {
int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
@@ -38,6 +41,7 @@ struct brcmf_proto {
u8 peer[ETH_ALEN]);
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
+ void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
void *pd;
};
@@ -91,6 +95,18 @@ brcmf_proto_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
{
drvr->proto->add_tdls_peer(drvr, ifidx, peer);
}
+static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
+{
+ struct brcmf_skb_reorder_data *rd;
+
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ return !!rd->reorder;
+}
+static inline void
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+ ifp->drvr->proto->rxreorder(ifp, skb);
+}
#endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index c5be36079..af4652047 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -535,9 +535,6 @@ static int qcount[NUMPRIO];
#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
-/* Retry count for register access failures */
-static const uint retry_limit = 2;
-
/* Limit on rounding up frames */
static const uint max_roundup = 512;
@@ -612,6 +609,7 @@ BRCMF_FW_NVRAM_DEF(4339, "/*(DEBLOBBED)*/", "brcmfmac4339-sdio.txt");
BRCMF_FW_NVRAM_DEF(43430, "/*(DEBLOBBED)*/", "brcmfmac43430-sdio.txt");
BRCMF_FW_NVRAM_DEF(43455, "/*(DEBLOBBED)*/", "brcmfmac43455-sdio.txt");
BRCMF_FW_NVRAM_DEF(4354, "/*(DEBLOBBED)*/", "brcmfmac4354-sdio.txt");
+BRCMF_FW_NVRAM_DEF(4356, "/*(DEBLOBBED)*/", "brcmfmac4356-sdio.txt");
static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
@@ -627,7 +625,8 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, 43430),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
- BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354)
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356)
};
static void pkt_align(struct sk_buff *p, int len, int align)
@@ -1297,6 +1296,17 @@ static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
}
+static inline bool brcmf_sdio_fromevntchan(u8 *swheader)
+{
+ u32 hdrvalue;
+ u8 ret;
+
+ hdrvalue = *(u32 *)swheader;
+ ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT);
+
+ return (ret == SDPCM_EVENT_CHANNEL);
+}
+
static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
struct brcmf_sdio_hdrinfo *rd,
enum brcmf_sdio_frmtype type)
@@ -1644,7 +1654,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pfirst->len, pfirst->next,
pfirst->prev);
skb_unlink(pfirst, &bus->glom);
- brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+ if (brcmf_sdio_fromevntchan(pfirst->data))
+ brcmf_rx_event(bus->sdiodev->dev, pfirst);
+ else
+ brcmf_rx_frame(bus->sdiodev->dev, pfirst,
+ false);
bus->sdcnt.rxglompkts++;
}
@@ -1970,18 +1984,19 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
__skb_trim(pkt, rd->len);
skb_pull(pkt, rd->dat_offset);
+ if (pkt->len == 0)
+ brcmu_pkt_buf_free_skb(pkt);
+ else if (rd->channel == SDPCM_EVENT_CHANNEL)
+ brcmf_rx_event(bus->sdiodev->dev, pkt);
+ else
+ brcmf_rx_frame(bus->sdiodev->dev, pkt,
+ false);
+
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
rd->len_nxtfrm = 0;
/* treat all packet as event if we don't know */
rd->channel = SDPCM_EVENT_CHANNEL;
-
- if (pkt->len == 0) {
- brcmu_pkt_buf_free_skb(pkt);
- continue;
- }
-
- brcmf_rx_frame(bus->sdiodev->dev, pkt);
}
rxcount = maxframes - rxleft;
@@ -3261,7 +3276,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
const struct firmware *fw,
void *nvram, u32 nvlen)
{
- int bcmerror = -EFAULT;
+ int bcmerror;
u32 rstvec;
sdio_claim_host(bus->sdiodev->func[1]);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 7fb546547..16c9c8f81 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -514,7 +514,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
skb_put(skb, urb->actual_length);
- brcmf_rx_frame(devinfo->dev, skb);
+ brcmf_rx_frame(devinfo->dev, skb, true);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
@@ -1368,7 +1368,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
devinfo->ifnum = desc->bInterfaceNumber;
- if (usb->speed == USB_SPEED_SUPER)
+ if (usb->speed == USB_SPEED_SUPER_PLUS)
+ brcmf_dbg(USB, "Broadcom super speed plus USB WLAN interface detected\n");
+ else if (usb->speed == USB_SPEED_SUPER)
brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n");
else if (usb->speed == USB_SPEED_HIGH)
brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index 38bd5890b..3a03287fa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -636,7 +636,7 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
struct ieee80211_channel *ch;
int i;
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
if (!sband)
return;
@@ -666,7 +666,7 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
const struct ieee80211_reg_rule *rule;
int band, i;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
sband = wiphy->bands[band];
if (!sband)
continue;
@@ -710,7 +710,7 @@ static void brcms_reg_notifier(struct wiphy *wiphy,
brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
/* Disable radio if all channels disallowed by regulatory */
- for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; !ch_found && band < NUM_NL80211_BANDS; band++) {
sband = wiphy->bands[band];
if (!sband)
continue;
@@ -755,9 +755,9 @@ void brcms_c_regd_init(struct brcms_c_info *wlc)
&sup_chan);
if (band_idx == BAND_2G_INDEX)
- sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
else
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 4ab06e1e6..d5fc5d3a1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -49,7 +49,7 @@
FIF_PSPOLL)
#define CHAN2GHZ(channel, freqency, chflags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (freqency), \
.hw_value = (channel), \
.flags = chflags, \
@@ -58,7 +58,7 @@
}
#define CHAN5GHZ(channel, chflags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + 5*(channel), \
.hw_value = (channel), \
.flags = chflags, \
@@ -216,7 +216,7 @@ static struct ieee80211_rate legacy_ratetable[] = {
};
static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.channels = brcms_2ghz_chantable,
.n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
.bitrates = legacy_ratetable,
@@ -237,7 +237,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
};
static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = brcms_5ghz_nphy_chantable,
.n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
.bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET,
@@ -1025,8 +1025,8 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
int has_5g = 0;
u16 phy_type;
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
phy_type = brcms_c_get_phy_type(wl->wlc, 0);
if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
@@ -1037,7 +1037,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
band->ht_cap.mcs.rx_mask[1] = 0;
band->ht_cap.mcs.rx_highest = cpu_to_le16(72);
}
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
} else {
return -EPERM;
}
@@ -1048,7 +1048,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
band = &wlc->bandstate[BAND_5G_INDEX]->band;
*band = brcms_band_5GHz_nphy_template;
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
} else {
return -EPERM;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 218cbc8bf..e16ee6063 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -7076,7 +7076,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
rx_status->band =
- channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
rx_status->freq =
ieee80211_channel_to_frequency(channel, rx_status->band);
@@ -7143,7 +7143,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
* a subset of the 2.4G rates. See bitrates field
* of brcms_band_5GHz_nphy (in mac80211_if.c).
*/
- if (rx_status->band == IEEE80211_BAND_5GHZ)
+ if (rx_status->band == NL80211_BAND_5GHZ)
rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
/* Determine short preamble and rate_idx */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index d2353f6e5..ca3cd2102 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -2026,7 +2026,7 @@ static int mpi_send_packet (struct net_device *dev)
} else {
*payloadLen = cpu_to_le16(len - sizeof(etherHead));
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* copy data into airo dma buffer */
memcpy(sendbuf, buffer, len);
@@ -2107,7 +2107,7 @@ static void airo_end_xmit(struct net_device *dev) {
i = 0;
if ( status == SUCCESS ) {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
} else {
priv->fids[fid] &= 0xffff;
@@ -2174,7 +2174,7 @@ static void airo_end_xmit11(struct net_device *dev) {
i = MAX_FIDS / 2;
if ( status == SUCCESS ) {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
} else {
priv->fids[fid] &= 0xffff;
@@ -5794,7 +5794,7 @@ static int airo_set_freq(struct net_device *dev,
fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
- if((fwrq->m > 1000) || (fwrq->e > 0))
+ if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
rc = -EOPNOTSUPP;
else {
int channel = fwrq->m;
@@ -5836,7 +5836,7 @@ static int airo_get_freq(struct net_device *dev,
ch = le16_to_cpu(status_rid.channel);
if((ch > 0) && (ch < 15)) {
fwrq->m = 100000 *
- ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
fwrq->e = 1;
} else {
fwrq->m = ch;
@@ -6894,7 +6894,7 @@ static int airo_get_range(struct net_device *dev,
for(i = 0; i < 14; i++) {
range->freq[k].i = i + 1; /* List index */
range->freq[k].m = 100000 *
- ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
}
range->num_frequency = k;
@@ -7302,7 +7302,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
iwe.u.freq.m = 100000 *
- ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(iwe.u.freq.m, NL80211_BAND_2GHZ);
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 8b5fec510..02a299a89 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -1913,7 +1913,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
if (geo->bg_channels) {
struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
- bg_band->band = IEEE80211_BAND_2GHZ;
+ bg_band->band = NL80211_BAND_2GHZ;
bg_band->n_channels = geo->bg_channels;
bg_band->channels = kcalloc(geo->bg_channels,
sizeof(struct ieee80211_channel),
@@ -1924,7 +1924,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
}
/* translate geo->bg to bg_band.channels */
for (i = 0; i < geo->bg_channels; i++) {
- bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+ bg_band->channels[i].band = NL80211_BAND_2GHZ;
bg_band->channels[i].center_freq = geo->bg[i].freq;
bg_band->channels[i].hw_value = geo->bg[i].channel;
bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -1945,7 +1945,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
bg_band->bitrates = ipw2100_bg_rates;
bg_band->n_bitrates = RATE_COUNT;
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
}
wdev->wiphy->cipher_suites = ipw_cipher_suites;
@@ -2954,7 +2954,7 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv)
/* A packet was processed by the hardware, so update the
* watchdog */
- priv->net_dev->trans_start = jiffies;
+ netif_trans_update(priv->net_dev);
break;
@@ -3521,7 +3521,7 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
static ssize_t show_pci(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev);
+ struct pci_dev *pci_dev = to_pci_dev(d);
char *out = buf;
int i, j;
u32 val;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 888eae128..9f02e7e78 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -7707,7 +7707,7 @@ static void ipw_handle_data_packet(struct ipw_priv *priv,
struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
/* We received data from the HW, so stop the watchdog */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* We only process data packets if the
* interface is open */
@@ -7770,7 +7770,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
unsigned short len = le16_to_cpu(pkt->u.frame.length);
/* We received data from the HW, so stop the watchdog */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* We only process data packets if the
* interface is open */
@@ -7952,7 +7952,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
return;
/* We received data from the HW, so stop the watchdog */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
dev->stats.rx_errors++;
@@ -11359,7 +11359,7 @@ static int ipw_wdev_init(struct net_device *dev)
if (geo->bg_channels) {
struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
- bg_band->band = IEEE80211_BAND_2GHZ;
+ bg_band->band = NL80211_BAND_2GHZ;
bg_band->n_channels = geo->bg_channels;
bg_band->channels = kcalloc(geo->bg_channels,
sizeof(struct ieee80211_channel),
@@ -11370,7 +11370,7 @@ static int ipw_wdev_init(struct net_device *dev)
}
/* translate geo->bg to bg_band.channels */
for (i = 0; i < geo->bg_channels; i++) {
- bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+ bg_band->channels[i].band = NL80211_BAND_2GHZ;
bg_band->channels[i].center_freq = geo->bg[i].freq;
bg_band->channels[i].hw_value = geo->bg[i].channel;
bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -11391,14 +11391,14 @@ static int ipw_wdev_init(struct net_device *dev)
bg_band->bitrates = ipw2200_bg_rates;
bg_band->n_bitrates = ipw2200_num_bg_rates;
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
}
/* fill-out priv->ieee->a_band */
if (geo->a_channels) {
struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
- a_band->band = IEEE80211_BAND_5GHZ;
+ a_band->band = NL80211_BAND_5GHZ;
a_band->n_channels = geo->a_channels;
a_band->channels = kcalloc(geo->a_channels,
sizeof(struct ieee80211_channel),
@@ -11409,7 +11409,7 @@ static int ipw_wdev_init(struct net_device *dev)
}
/* translate geo->a to a_band.channels */
for (i = 0; i < geo->a_channels; i++) {
- a_band->channels[i].band = IEEE80211_BAND_5GHZ;
+ a_band->channels[i].band = NL80211_BAND_5GHZ;
a_band->channels[i].center_freq = geo->a[i].freq;
a_band->channels[i].hw_value = geo->a[i].channel;
a_band->channels[i].max_power = geo->a[i].max_power;
@@ -11430,7 +11430,7 @@ static int ipw_wdev_init(struct net_device *dev)
a_band->bitrates = ipw2200_a_rates;
a_band->n_bitrates = ipw2200_num_a_rates;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
+ wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
}
wdev->wiphy->cipher_suites = ipw_cipher_suites;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index e8b5545ca..260d6a3a4 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1547,7 +1547,7 @@ il3945_irq_tasklet(struct il_priv *il)
}
static int
-il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band,
u8 is_active, u8 n_probes,
struct il3945_scan_channel *scan_ch,
struct ieee80211_vif *vif)
@@ -1618,7 +1618,7 @@ il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
/* scan_pwr_info->tpc.dsp_atten; */
/*scan_pwr_info->tpc.tx_gain; */
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
else {
scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
@@ -2534,7 +2534,7 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
};
struct il3945_scan_cmd *scan;
u8 n_probes = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
bool is_active = false;
int ret;
u16 len;
@@ -2615,14 +2615,14 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
/* flags + rate selection */
switch (il->scan_band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
scan->tx_cmd.rate = RATE_1M_PLCP;
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
scan->tx_cmd.rate = RATE_6M_PLCP;
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
break;
default:
IL_WARN("Invalid scan band\n");
@@ -3507,7 +3507,7 @@ il3945_init_drv(struct il_priv *il)
il->ieee_channels = NULL;
il->ieee_rates = NULL;
- il->band = IEEE80211_BAND_2GHZ;
+ il->band = NL80211_BAND_2GHZ;
il->iw_mode = NL80211_IFTYPE_STATION;
il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
@@ -3582,13 +3582,13 @@ il3945_setup_mac(struct il_priv *il)
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
- if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
- il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &il->bands[IEEE80211_BAND_2GHZ];
+ if (il->bands[NL80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &il->bands[NL80211_BAND_2GHZ];
- if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
- il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &il->bands[IEEE80211_BAND_5GHZ];
+ if (il->bands[NL80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &il->bands[NL80211_BAND_5GHZ];
il_leds_init(il);
@@ -3761,7 +3761,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_irq;
}
- il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]);
+ il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
il3945_setup_deferred_work(il);
il3945_setup_handlers(il);
il_power_initialize(il);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 76b0729ad..03ad9b8b5 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -97,7 +97,7 @@ static struct il3945_tpt_entry il3945_tpt_table_g[] = {
#define RATE_RETRY_TH 15
static u8
-il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+il3945_get_rate_idx_by_rssi(s32 rssi, enum nl80211_band band)
{
u32 idx = 0;
u32 table_size = 0;
@@ -107,11 +107,11 @@ il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
rssi = IL_MIN_RSSI_VAL;
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
tpt_table = il3945_tpt_table_g;
table_size = ARRAY_SIZE(il3945_tpt_table_g);
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
tpt_table = il3945_tpt_table_a;
table_size = ARRAY_SIZE(il3945_tpt_table_a);
break;
@@ -380,7 +380,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
/* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
- if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (sband->band == NL80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
}
@@ -541,7 +541,7 @@ il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
static u16
il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
u8 high = RATE_INVALID;
u8 low = RATE_INVALID;
@@ -549,7 +549,7 @@ il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
/* 802.11A walks to the next literal adjacent rate in
* the rate table */
- if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+ if (unlikely(band == NL80211_BAND_5GHZ)) {
int i;
u32 mask;
@@ -657,14 +657,14 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
/* get user max rate if set */
max_rate_idx = txrc->max_rate_idx;
- if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+ if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1)
max_rate_idx += IL_FIRST_OFDM_RATE;
if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
max_rate_idx = -1;
idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
- if (sband->band == IEEE80211_BAND_5GHZ)
+ if (sband->band == NL80211_BAND_5GHZ)
rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
spin_lock_irqsave(&rs_sta->lock, flags);
@@ -806,7 +806,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
out:
- if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (sband->band == NL80211_BAND_5GHZ) {
if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
idx = IL_FIRST_OFDM_RATE;
rs_sta->last_txrate_idx = idx;
@@ -935,7 +935,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
rs_sta->tgg = 0;
switch (il->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
/* TODO: this always does G, not a regression */
if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
rs_sta->tgg = 1;
@@ -943,7 +943,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
} else
rs_sta->expected_tpt = il3945_expected_tpt_g;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
rs_sta->expected_tpt = il3945_expected_tpt_a;
break;
default:
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 93bdf684b..7bcedbb53 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -255,13 +255,13 @@ il3945_rs_next_rate(struct il_priv *il, int rate)
int next_rate = il3945_get_prev_ieee_rate(rate);
switch (il->band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
if (rate == RATE_12M_IDX)
next_rate = RATE_9M_IDX;
else if (rate == RATE_6M_IDX)
next_rate = RATE_6M_IDX;
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
il_is_associated(il)) {
if (rate == RATE_11M_IDX)
@@ -349,7 +349,7 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
/* Fill the MRR chain with some info about on-chip retransmissions */
rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx -= IL_FIRST_OFDM_RATE;
fail = tx_resp->failure_frame;
@@ -554,14 +554,14 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
rx_status.mactime = le64_to_cpu(rx_end->timestamp);
rx_status.band =
(rx_hdr->
- phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
rx_status.band);
rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
- if (rx_status.band == IEEE80211_BAND_5GHZ)
+ if (rx_status.band == NL80211_BAND_5GHZ)
rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
rx_status.antenna =
@@ -1409,7 +1409,7 @@ il3945_send_tx_power(struct il_priv *il)
chan = le16_to_cpu(il->active.channel);
- txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+ txpower.band = (il->band == NL80211_BAND_5GHZ) ? 0 : 1;
ch_info = il_get_channel_info(il, il->band, chan);
if (!ch_info) {
IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
@@ -2310,7 +2310,7 @@ il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
(il->band ==
- IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+ NL80211_BAND_5GHZ) ? RATE_6M_PLCP :
RATE_1M_PLCP);
il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
@@ -2343,7 +2343,7 @@ il3945_init_hw_rate_table(struct il_priv *il)
}
switch (il->band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
D_RATE("Select A mode rate scale\n");
/* If one of the following CCK rates is used,
* have it fall back to the 6M OFDM rate */
@@ -2359,7 +2359,7 @@ il3945_init_hw_rate_table(struct il_priv *il)
il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
D_RATE("Select B/G mode rate scale\n");
/* If an OFDM rate is used, have it fall back to the
* 1M CCK rates */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index baee8fe03..a27559854 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -457,7 +457,7 @@ il4965_rxq_stop(struct il_priv *il)
}
int
-il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
{
int idx = 0;
int band_offset = 0;
@@ -468,7 +468,7 @@ il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return idx;
/* Legacy rate format, search for match in table */
} else {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
band_offset = IL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -688,8 +688,8 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.band =
(phy_res->
- phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
@@ -766,7 +766,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
static int
il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
- enum ieee80211_band band, u8 is_active,
+ enum nl80211_band band, u8 is_active,
u8 n_probes, struct il_scan_channel *scan_ch)
{
struct ieee80211_channel *chan;
@@ -822,7 +822,7 @@ il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
* power level:
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
*/
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -870,7 +870,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
u32 rate_flags = 0;
u16 cmd_len;
u16 rx_chain = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
u8 n_probes = 0;
u8 rx_ant = il->hw_params.valid_rx_ant;
u8 rate;
@@ -944,7 +944,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (il->scan_band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
chan_mod =
le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
@@ -956,7 +956,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
rate_flags = RATE_MCS_CCK_MSK;
}
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
rate = RATE_6M_PLCP;
break;
default:
@@ -1590,7 +1590,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il,
|| rate_idx > RATE_COUNT_LEGACY)
rate_idx = rate_lowest_index(&il->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx += IL_FIRST_OFDM_RATE;
/* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = il_rates[rate_idx].plcp;
@@ -3051,7 +3051,7 @@ il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
}
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
- if (il->band == IEEE80211_BAND_5GHZ)
+ if (il->band == NL80211_BAND_5GHZ)
r = RATE_6M_IDX;
else
r = RATE_1M_IDX;
@@ -5553,6 +5553,7 @@ __il4965_up(struct il_priv *il)
il4965_prepare_card_hw(il);
if (!il->hw_ready) {
+ il_dealloc_bcast_stations(il);
IL_ERR("HW not ready\n");
return -EIO;
}
@@ -5564,6 +5565,7 @@ __il4965_up(struct il_priv *il)
set_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+ il_dealloc_bcast_stations(il);
il_enable_rfkill_int(il);
IL_WARN("Radio disabled by HW RF Kill switch\n");
return 0;
@@ -5577,6 +5579,7 @@ __il4965_up(struct il_priv *il)
ret = il4965_hw_nic_init(il);
if (ret) {
IL_ERR("Unable to init nic\n");
+ il_dealloc_bcast_stations(il);
return ret;
}
@@ -5787,12 +5790,12 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
- if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
- il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &il->bands[IEEE80211_BAND_2GHZ];
- if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
- il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &il->bands[IEEE80211_BAND_5GHZ];
+ if (il->bands[NL80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &il->bands[NL80211_BAND_2GHZ];
+ if (il->bands[NL80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &il->bands[NL80211_BAND_5GHZ];
il_leds_init(il);
@@ -6365,7 +6368,7 @@ il4965_init_drv(struct il_priv *il)
il->ieee_channels = NULL;
il->ieee_rates = NULL;
- il->band = IEEE80211_BAND_2GHZ;
+ il->band = NL80211_BAND_2GHZ;
il->iw_mode = NL80211_IFTYPE_STATION;
il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -6477,7 +6480,7 @@ il4965_set_hw_params(struct il_priv *il)
il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
- il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+ il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index bac60b2bc..a867ae7f4 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -549,7 +549,7 @@ il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
*/
static int
il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct il_scale_tbl_info *tbl, int *rate_idx)
{
u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
@@ -574,7 +574,7 @@ il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
/* legacy rate format */
if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
if (il4965_num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
@@ -743,7 +743,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
switch_to_legacy = 1;
scale_idx = rs_ht_to_legacy[scale_idx];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
@@ -762,7 +762,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
/* Mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
/* supp_rates has no CCK bits in A mode */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
rate_mask =
(u16) (rate_mask &
(lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
@@ -851,7 +851,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
table = &lq_sta->lq;
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
- if (il->band == IEEE80211_BAND_5GHZ)
+ if (il->band == NL80211_BAND_5GHZ)
rs_idx -= IL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_idx = info->status.rates[0].idx;
@@ -864,7 +864,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
* mac80211 HT idx is always zero-idxed; we need to move
* HT OFDM rates after CCK rates in 2.4 GHz band
*/
- if (il->band == IEEE80211_BAND_2GHZ)
+ if (il->band == NL80211_BAND_2GHZ)
mac_idx += IL_FIRST_OFDM_RATE;
}
/* Here we actually compare this rate to the latest LQ command */
@@ -1816,7 +1816,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
/* supp_rates has no CCK bits in A mode */
rate_scale_idx_msk =
(u16) (rate_mask &
@@ -2212,7 +2212,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
/* Get max rate if user set max rate */
if (lq_sta) {
lq_sta->max_rate_idx = txrc->max_rate_idx;
- if (sband->band == IEEE80211_BAND_5GHZ &&
+ if (sband->band == NL80211_BAND_5GHZ &&
lq_sta->max_rate_idx != -1)
lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
if (lq_sta->max_rate_idx < 0 ||
@@ -2258,11 +2258,11 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
} else {
/* Check for invalid rates */
if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
- (sband->band == IEEE80211_BAND_5GHZ &&
+ (sband->band == NL80211_BAND_5GHZ &&
rate_idx < IL_FIRST_OFDM_RATE))
rate_idx = rate_lowest_index(sband, sta);
/* On valid 5 GHz rate, adjust idx */
- else if (sband->band == IEEE80211_BAND_5GHZ)
+ else if (sband->band == NL80211_BAND_5GHZ)
rate_idx -= IL_FIRST_OFDM_RATE;
info->control.rates[0].flags = 0;
}
@@ -2362,7 +2362,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
/* Set last_txrate_idx to lowest rate */
lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
- if (sband->band == IEEE80211_BAND_5GHZ)
+ if (sband->band == NL80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index dd2c478bd..b353010d6 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1267,7 +1267,7 @@ il4965_send_tx_power(struct il_priv *il)
"TX Power requested while scanning!\n"))
return -EAGAIN;
- band = il->band == IEEE80211_BAND_2GHZ;
+ band = il->band == NL80211_BAND_2GHZ;
is_ht40 = iw4965_is_ht40_channel(il->active.flags);
@@ -1480,7 +1480,7 @@ il4965_hw_channel_switch(struct il_priv *il,
u8 switch_count;
u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
struct ieee80211_vif *vif = il->vif;
- band = (il->band == IEEE80211_BAND_2GHZ);
+ band = (il->band == NL80211_BAND_2GHZ);
if (WARN_ON_ONCE(vif == NULL))
return -EIO;
@@ -1918,7 +1918,7 @@ struct il_cfg il4965_cfg = {
* Force use of chains B and C for scan RX on 5 GHz band
* because the device has off-channel reception on chain A.
*/
- .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+ .scan_rx_antennas[NL80211_BAND_5GHZ] = ANT_BC,
.eeprom_size = IL4965_EEPROM_IMG_SIZE,
.num_of_queues = IL49_NUM_QUEUES,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
index e432715e0..527e8b531 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.h
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -68,7 +68,7 @@ void il4965_rx_replenish(struct il_priv *il);
void il4965_rx_replenish_now(struct il_priv *il);
void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
int il4965_rxq_stop(struct il_priv *il);
-int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
void il4965_rx_handle(struct il_priv *il);
/* tx */
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index eb5cb603b..eb24b9241 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -723,10 +723,9 @@ il_eeprom_init(struct il_priv *il)
sz = il->cfg->eeprom_size;
D_EEPROM("NVM size = %d\n", sz);
il->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!il->eeprom) {
- ret = -ENOMEM;
- goto alloc_err;
- }
+ if (!il->eeprom)
+ return -ENOMEM;
+
e = (__le16 *) il->eeprom;
il->ops->apm_init(il);
@@ -778,7 +777,6 @@ err:
il_eeprom_free(il);
/* Reset chip to save power until we load uCode during "up". */
il_apm_stop(il);
-alloc_err:
return ret;
}
EXPORT_SYMBOL(il_eeprom_init);
@@ -862,7 +860,7 @@ il_init_band_reference(const struct il_priv *il, int eep_band,
* Does not set up a command, or touch hardware.
*/
static int
-il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
const struct il_eeprom_channel *eeprom_ch,
u8 clear_ht40_extension_channel)
{
@@ -947,7 +945,7 @@ il_init_channel_map(struct il_priv *il)
ch_info->channel = eeprom_ch_idx[ch];
ch_info->band =
(band ==
- 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
/* permanently store EEPROM's channel regulatory flags
* and max power in channel info database. */
@@ -1005,14 +1003,14 @@ il_init_channel_map(struct il_priv *il)
/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
+ enum nl80211_band ieeeband;
il_init_band_reference(il, band, &eeprom_ch_count,
&eeprom_ch_info, &eeprom_ch_idx);
/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
ieeeband =
- (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
/* Loop through each band adding each of the channels */
for (ch = 0; ch < eeprom_ch_count; ch++) {
@@ -1050,19 +1048,19 @@ EXPORT_SYMBOL(il_free_channel_map);
* Based on band and channel number.
*/
const struct il_channel_info *
-il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
u16 channel)
{
int i;
switch (band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
for (i = 14; i < il->channel_count; i++) {
if (il->channel_info[i].channel == channel)
return &il->channel_info[i];
}
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (channel >= 1 && channel <= 14)
return &il->channel_info[channel - 1];
break;
@@ -1459,7 +1457,7 @@ il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
clear_bit(S_SCAN_HW, &il->status);
D_SCAN("Scan on %sGHz took %dms\n",
- (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(jiffies - il->scan_start));
queue_work(il->workqueue, &il->scan_completed);
@@ -1477,10 +1475,10 @@ il_setup_rx_scan_handlers(struct il_priv *il)
EXPORT_SYMBOL(il_setup_rx_scan_handlers);
u16
-il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
u8 n_probes)
{
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
return IL_ACTIVE_DWELL_TIME_52 +
IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
else
@@ -1490,14 +1488,14 @@ il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
EXPORT_SYMBOL(il_get_active_dwell_time);
u16
-il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif)
{
u16 value;
u16 passive =
(band ==
- IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+ NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
IL_PASSIVE_DWELL_TIME_52;
@@ -1522,10 +1520,10 @@ void
il_init_scan_params(struct il_priv *il)
{
u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
- if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
- il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
- if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
- il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+ if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
+ il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+ if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
+ il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
}
EXPORT_SYMBOL(il_init_scan_params);
@@ -2005,7 +2003,7 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
il_set_ht_add_station(il, sta_id, sta);
/* 3945 only */
- rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+ rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
/* Turn on both antennas for the station... */
station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
@@ -2794,8 +2792,10 @@ il_tx_queue_free(struct il_priv *il, int txq_id)
il_tx_queue_unmap(il, txq_id);
/* De-alloc array of command/tx buffers */
- for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
+ if (txq->cmd) {
+ for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+ }
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
@@ -2873,8 +2873,10 @@ il_cmd_queue_free(struct il_priv *il)
il_cmd_queue_unmap(il);
/* De-alloc array of command/tx buffers */
- for (i = 0; i <= TFD_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
+ if (txq->cmd) {
+ for (i = 0; i <= TFD_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+ }
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
@@ -3080,7 +3082,9 @@ err:
kfree(txq->cmd[i]);
out_free_arrays:
kfree(txq->meta);
+ txq->meta = NULL;
kfree(txq->cmd);
+ txq->cmd = NULL;
return -ENOMEM;
}
@@ -3378,7 +3382,7 @@ EXPORT_SYMBOL(il_bcast_addr);
static void
il_init_ht_hw_capab(const struct il_priv *il,
struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
u16 max_bit_rate = 0;
u8 rx_chains_num = il->hw_params.rx_chains_num;
@@ -3439,8 +3443,8 @@ il_init_geos(struct il_priv *il)
int i = 0;
s8 max_tx_power = 0;
- if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
- il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+ if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
+ il->bands[NL80211_BAND_5GHZ].n_bitrates) {
D_INFO("Geography modes already initialized.\n");
set_bit(S_GEO_CONFIGURED, &il->status);
return 0;
@@ -3461,23 +3465,23 @@ il_init_geos(struct il_priv *il)
}
/* 5.2GHz channels start after the 2.4GHz channels */
- sband = &il->bands[IEEE80211_BAND_5GHZ];
+ sband = &il->bands[NL80211_BAND_5GHZ];
sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
/* just OFDM */
sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
if (il->cfg->sku & IL_SKU_N)
- il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+ il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
- sband = &il->bands[IEEE80211_BAND_2GHZ];
+ sband = &il->bands[NL80211_BAND_2GHZ];
sband->channels = channels;
/* OFDM & CCK */
sband->bitrates = rates;
sband->n_bitrates = RATE_COUNT_LEGACY;
if (il->cfg->sku & IL_SKU_N)
- il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+ il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
il->ieee_channels = channels;
il->ieee_rates = rates;
@@ -3528,7 +3532,7 @@ il_init_geos(struct il_priv *il)
il->tx_power_user_lmt = max_tx_power;
il->tx_power_next = max_tx_power;
- if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+ if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
(il->cfg->sku & IL_SKU_A)) {
IL_INFO("Incorrectly detected BG card as ABG. "
"Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
@@ -3537,8 +3541,8 @@ il_init_geos(struct il_priv *il)
}
IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
- il->bands[IEEE80211_BAND_2GHZ].n_channels,
- il->bands[IEEE80211_BAND_5GHZ].n_channels);
+ il->bands[NL80211_BAND_2GHZ].n_channels,
+ il->bands[NL80211_BAND_5GHZ].n_channels);
set_bit(S_GEO_CONFIGURED, &il->status);
@@ -3559,7 +3563,7 @@ il_free_geos(struct il_priv *il)
EXPORT_SYMBOL(il_free_geos);
static bool
-il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
u16 channel, u8 extension_chan_offset)
{
const struct il_channel_info *ch_info;
@@ -3922,14 +3926,14 @@ EXPORT_SYMBOL(il_set_rxon_ht);
/* Return valid, unused, channel for a passive scan to reset the RF */
u8
-il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
{
const struct il_channel_info *ch_info;
int i;
u8 channel = 0;
u8 min, max;
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
min = 14;
max = il->channel_count;
} else {
@@ -3961,14 +3965,14 @@ EXPORT_SYMBOL(il_get_single_channel_number);
int
il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
{
- enum ieee80211_band band = ch->band;
+ enum nl80211_band band = ch->band;
u16 channel = ch->hw_value;
if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
return 0;
il->staging.channel = cpu_to_le16(channel);
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
else
il->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -3982,10 +3986,10 @@ il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
EXPORT_SYMBOL(il_set_rxon_channel);
void
-il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif)
{
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
il->staging.flags &=
~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
RXON_FLG_CCK_MSK);
@@ -5411,7 +5415,7 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_ERP_CTS_PROT) {
D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+ if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
else
il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index ce52cf114..726ede391 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -432,7 +432,7 @@ u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
int il_init_channel_map(struct il_priv *il);
void il_free_channel_map(struct il_priv *il);
const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
- enum ieee80211_band band,
+ enum nl80211_band band,
u16 channel);
#define IL_NUM_SCAN_RATES (2)
@@ -497,7 +497,7 @@ struct il_channel_info {
u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
- enum ieee80211_band band;
+ enum nl80211_band band;
/* HT40 channel info */
s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
@@ -811,7 +811,7 @@ struct il_sensitivity_ranges {
* @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
* @max_stations:
* @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * BIT(NL80211_BAND_5GHZ) BIT(NL80211_BAND_5GHZ)
* @sw_crypto: 0 for hw, 1 for sw
* @max_xxx_size: for ucode uses
* @ct_kill_threshold: temperature threshold
@@ -1141,13 +1141,13 @@ struct il_priv {
struct list_head free_frames;
int frames_count;
- enum ieee80211_band band;
+ enum nl80211_band band;
int alloc_rxb_page;
void (*handlers[IL_CN_MAX]) (struct il_priv *il,
struct il_rx_buf *rxb);
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
/* spectrum measurement report caching */
struct il_spectrum_notification measure_report;
@@ -1176,10 +1176,10 @@ struct il_priv {
unsigned long scan_start;
unsigned long scan_start_tsf;
void *scan_cmd;
- enum ieee80211_band scan_band;
+ enum nl80211_band scan_band;
struct cfg80211_scan_request *scan_request;
struct ieee80211_vif *scan_vif;
- u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 scan_tx_ant[NUM_NL80211_BANDS];
u8 mgmt_tx_ant;
/* spinlock */
@@ -1479,7 +1479,7 @@ il_is_channel_radar(const struct il_channel_info *ch_info)
static inline u8
il_is_channel_a_band(const struct il_channel_info *ch_info)
{
- return ch_info->band == IEEE80211_BAND_5GHZ;
+ return ch_info->band == NL80211_BAND_5GHZ;
}
static inline int
@@ -1673,7 +1673,7 @@ struct il_cfg {
/* params not likely to change within a device family */
struct il_base_params *base_params;
/* params likely to change within a device family */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ u8 scan_rx_antennas[NUM_NL80211_BANDS];
enum il_led_mode led_mode;
int eeprom_size;
@@ -1707,9 +1707,9 @@ void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
int il_check_rxon_cmd(struct il_priv *il);
int il_full_rxon_required(struct il_priv *il);
int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
-void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif);
-u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band);
void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
bool il_is_ht40_tx_allowed(struct il_priv *il,
struct ieee80211_sta_ht_cap *ht_cap);
@@ -1793,9 +1793,9 @@ int il_force_reset(struct il_priv *il, bool external);
u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ie, int ie_len, int left);
void il_setup_rx_scan_handlers(struct il_priv *il);
-u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+u16 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
u8 n_probes);
-u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+u16 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif);
void il_setup_scan_deferred_work(struct il_priv *il);
void il_cancel_scan_deferred_work(struct il_priv *il);
@@ -1955,7 +1955,7 @@ il_commit_rxon(struct il_priv *il)
}
static inline const struct ieee80211_supported_band *
-il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
{
return il->hw->wiphy->bands[band];
}
@@ -2813,7 +2813,7 @@ struct il_lq_sta {
u8 action_counter; /* # mode-switch actions tried */
u8 is_green;
u8 is_dup;
- enum ieee80211_band band;
+ enum nl80211_band band;
/* The following are bitmaps of rates; RATE_6M_MASK, etc. */
u32 supp_rates;
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
index 908b9f4fe..6fc6b7ff9 100644
--- a/drivers/net/wireless/intel/iwlegacy/debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -544,7 +544,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
return -ENOMEM;
}
- supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+ supp_band = il_get_hw_mode(il, NL80211_BAND_2GHZ);
if (supp_band) {
channels = supp_band->channels;
@@ -571,7 +571,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
flags & IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
- supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+ supp_band = il_get_hw_mode(il, NL80211_BAND_5GHZ);
if (supp_band) {
channels = supp_band->channels;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 16c4f3834..b64db47b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING
If unsure, don't enable this option, as some programs might
expect incoming broadcasts for their normal operations.
-config IWLWIFI_UAPSD
- bool "enable U-APSD by default"
- depends on IWLMVM
- help
- Say Y here to enable U-APSD by default. This may cause
- interoperability problems with some APs, manifesting in lower than
- expected throughput due to those APs not enabling aggregation
-
- If unsure, say N.
-
config IWLWIFI_PCIE_RTPM
bool "Enable runtime power management mode for PCIe devices"
depends on IWLMVM && PM
@@ -144,12 +134,6 @@ config IWLWIFI_DEBUGFS
is a low-impact option that allows getting insight into the
driver's state at runtime.
-config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
- bool "Experimental uCode support"
- depends on IWLWIFI_DEBUG
- ---help---
- Enable use of experimental ucode for testing and debugging.
-
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
depends on EVENT_TRACING
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 9de277c6c..b79e38734 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -158,7 +158,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx);
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_vif *vif);
/* uCode */
@@ -186,7 +186,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
- struct iwl_priv *priv, enum ieee80211_band band)
+ struct iwl_priv *priv, enum nl80211_band band)
{
return priv->hw->wiphy->bands[band];
}
@@ -198,7 +198,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
#endif
/* rx */
-int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
void iwl_setup_rx_handlers(struct iwl_priv *priv);
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
@@ -258,7 +258,7 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
int __must_check iwl_scan_initiate(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum iwl_scan_type scan_type,
- enum ieee80211_band band);
+ enum nl80211_band band);
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 74c516152..f6591c83d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -335,7 +335,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
+ supp_band = iwl_get_hw_mode(priv, NL80211_BAND_2GHZ);
if (supp_band) {
channels = supp_band->channels;
@@ -358,7 +358,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
+ supp_band = iwl_get_hw_mode(priv, NL80211_BAND_5GHZ);
if (supp_band) {
channels = supp_band->channels;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 1a7ead753..8148df61a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -677,7 +677,7 @@ struct iwl_priv {
struct iwl_hw_params hw_params;
- enum ieee80211_band band;
+ enum nl80211_band band;
u8 valid_contexts;
void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
@@ -722,11 +722,11 @@ struct iwl_priv {
unsigned long scan_start;
unsigned long scan_start_tsf;
void *scan_cmd;
- enum ieee80211_band scan_band;
+ enum nl80211_band scan_band;
struct cfg80211_scan_request *scan_request;
struct ieee80211_vif *scan_vif;
enum iwl_scan_type scan_type;
- u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 scan_tx_ant[NUM_NL80211_BANDS];
u8 mgmt_tx_ant;
/* max number of station keys */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index cc13c0406..f21732ec3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -420,7 +420,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
.data = { &cmd, },
};
- cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+ cmd.band = priv->band == NL80211_BAND_2GHZ;
ch = ch_switch->chandef.chan->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
ctx->active.channel, ch);
@@ -588,7 +588,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
hcmd.data[0] = cmd;
- cmd->band = priv->band == IEEE80211_BAND_2GHZ;
+ cmd->band = priv->band == NL80211_BAND_2GHZ;
ch = ch_switch->chandef.chan->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
ctx->active.channel, ch);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 179946926..8dda52ae3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -94,7 +94,7 @@ void iwlagn_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
{
int idx = 0;
int band_offset = 0;
@@ -105,7 +105,7 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return idx;
/* Legacy rate format, search for match in table */
} else {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -878,7 +878,7 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
int i;
u8 ind = ant;
- if (priv->band == IEEE80211_BAND_2GHZ &&
+ if (priv->band == NL80211_BAND_2GHZ &&
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index c63ea7957..8c0719468 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -202,12 +202,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
- if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->nvm_data->bands[IEEE80211_BAND_2GHZ];
- if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ if (priv->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &priv->nvm_data->bands[NL80211_BAND_2GHZ];
+ if (priv->nvm_data->bands[NL80211_BAND_5GHZ].n_channels)
+ priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &priv->nvm_data->bands[NL80211_BAND_5GHZ];
hw->wiphy->hw_version = priv->trans->hw_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 856281279..37b32a6f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -262,7 +262,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* In mac80211, rates for 5 GHz start at 0 */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate += IWL_FIRST_OFDM_RATE;
else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
@@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data)
static void iwl_setup_deferred_work(struct iwl_priv *priv)
{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+ priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -1117,7 +1117,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
INIT_LIST_HEAD(&priv->calib_results);
- priv->band = IEEE80211_BAND_2GHZ;
+ priv->band = NL80211_BAND_2GHZ;
priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index ee7505537..b95c2d76d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -599,7 +599,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
* fill "search" or "active" tx mode table.
*/
static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct iwl_scale_tbl_info *tbl,
int *rate_idx)
{
@@ -624,7 +624,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
/* legacy rate format */
if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
if (num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
@@ -802,7 +802,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
switch_to_legacy = 1;
scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
@@ -821,7 +821,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
/* Mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
/* supp_rates has no CCK bits in A mode */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
rate_mask = (u16)(rate_mask &
(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
else
@@ -939,7 +939,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
table = &lq_sta->lq;
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
- if (priv->band == IEEE80211_BAND_5GHZ)
+ if (priv->band == NL80211_BAND_5GHZ)
rs_index -= IWL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_index = info->status.rates[0].idx;
@@ -952,7 +952,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
* mac80211 HT index is always zero-indexed; we need to move
* HT OFDM rates after CCK rates in 2.4 GHz band
*/
- if (priv->band == IEEE80211_BAND_2GHZ)
+ if (priv->band == NL80211_BAND_2GHZ)
mac_index += IWL_FIRST_OFDM_RATE;
}
/* Here we actually compare this rate to the latest LQ command */
@@ -2284,7 +2284,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
/* supp_rates has no CCK bits in A mode */
rate_scale_index_msk = (u16) (rate_mask &
(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
@@ -2721,7 +2721,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
/* Get max rate if user set max rate */
if (lq_sta) {
lq_sta->max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) &&
+ if ((sband->band == NL80211_BAND_5GHZ) &&
(lq_sta->max_rate_idx != -1))
lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
if ((lq_sta->max_rate_idx < 0) ||
@@ -2763,11 +2763,11 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
} else {
/* Check for invalid rates */
if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
+ ((sband->band == NL80211_BAND_5GHZ) &&
(rate_idx < IWL_FIRST_OFDM_RATE)))
rate_idx = rate_lowest_index(sband, sta);
/* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
+ else if (sband->band == NL80211_BAND_5GHZ)
rate_idx -= IWL_FIRST_OFDM_RATE;
info->control.rates[0].flags = 0;
}
@@ -2880,7 +2880,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/* Set last_txrate_idx to lowest rate */
lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
- if (sband->band == IEEE80211_BAND_5GHZ)
+ if (sband->band == NL80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
index c5fe44584..50c1e951d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -355,7 +355,7 @@ struct iwl_lq_sta {
u8 action_counter; /* # mode-switch actions tried */
u8 is_green;
u8 is_dup;
- enum ieee80211_band band;
+ enum nl80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
u32 supp_rates;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 52ab1e012..dfa2041cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -686,7 +686,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx_napi(priv->hw, skb, priv->napi);
+ ieee80211_rx_napi(priv->hw, NULL, skb, priv->napi);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -834,7 +834,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 2d47cb24c..b22855218 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -719,7 +719,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx)
{
- enum ieee80211_band band = ch->band;
+ enum nl80211_band band = ch->band;
u16 channel = ch->hw_value;
if ((le16_to_cpu(ctx->staging.channel) == channel) &&
@@ -727,7 +727,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
return;
ctx->staging.channel = cpu_to_le16(channel);
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
else
ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -740,10 +740,10 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_vif *vif)
{
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
ctx->staging.flags &=
~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
| RXON_FLG_CCK_MSK);
@@ -1476,7 +1476,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
iwlagn_set_rxon_chain(priv, ctx);
- if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+ if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
else
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 81a2ddbe9..d01766f16 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -312,7 +312,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
scan_notif->tsf_high, scan_notif->status);
IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
- (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ (priv->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(jiffies - priv->scan_start));
/*
@@ -362,9 +362,9 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
}
static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band, u8 n_probes)
+ enum nl80211_band band, u8 n_probes)
{
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
return IWL_ACTIVE_DWELL_TIME_52 +
IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
else
@@ -431,9 +431,9 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
}
static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
- u16 passive = (band == IEEE80211_BAND_2GHZ) ?
+ u16 passive = (band == NL80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
@@ -442,7 +442,7 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
/* Return valid, unused, channel for a passive scan to reset the RF */
static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
struct iwl_rxon_context *ctx;
@@ -470,7 +470,7 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct iwl_scan_channel *scan_ch)
{
const struct ieee80211_supported_band *sband;
@@ -492,7 +492,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
/* Set txpower levels to defaults */
scan_ch->dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -505,7 +505,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
- enum ieee80211_band band,
+ enum nl80211_band band,
u8 is_active, u8 n_probes,
struct iwl_scan_channel *scan_ch)
{
@@ -553,7 +553,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
* power level:
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
*/
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -636,7 +636,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u32 rate_flags = 0;
u16 cmd_len = 0;
u16 rx_chain = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
u8 n_probes = 0;
u8 rx_ant = priv->nvm_data->valid_rx_ant;
u8 rate;
@@ -750,7 +750,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
chan_mod = le32_to_cpu(
priv->contexts[IWL_RXON_CTX_BSS].active.flags &
@@ -771,7 +771,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
priv->lib->bt_params->advanced_bt_coexist)
scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
rate = IWL_RATE_6M_PLCP;
break;
default:
@@ -809,7 +809,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
band = priv->scan_band;
- if (band == IEEE80211_BAND_2GHZ &&
+ if (band == NL80211_BAND_2GHZ &&
priv->lib->bt_params &&
priv->lib->bt_params->advanced_bt_coexist) {
/* transmit 2.4 GHz probes only on first antenna */
@@ -925,16 +925,16 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
void iwl_init_scan_params(struct iwl_priv *priv)
{
u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
- if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
- if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+ if (!priv->scan_tx_ant[NL80211_BAND_5GHZ])
+ priv->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+ if (!priv->scan_tx_ant[NL80211_BAND_2GHZ])
+ priv->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
}
int __must_check iwl_scan_initiate(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum iwl_scan_type scan_type,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
int ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 8e9768a55..de6ec9b7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -579,7 +579,7 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
+ if (priv->band == NL80211_BAND_5GHZ)
r = IWL_RATE_6M_INDEX;
else if (ctx && ctx->vif && ctx->vif->p2p)
r = IWL_RATE_6M_INDEX;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 59e2001c3..4b97371c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -81,7 +81,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_TSF_MSK;
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
- else if (info->band == IEEE80211_BAND_2GHZ &&
+ else if (info->band == NL80211_BAND_2GHZ &&
priv->lib->bt_params &&
priv->lib->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
@@ -177,7 +177,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_idx = rate_lowest_index(
&priv->nvm_data->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = iwl_rates[rate_idx].plcp;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
index 2cfac764a..9bdac0306 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
@@ -34,10 +34,6 @@
#define IWL1000_UCODE_API_MAX 5
#define IWL100_UCODE_API_MAX 5
-/* Oldest version we won't warn about */
-#define IWL1000_UCODE_API_OK 5
-#define IWL100_UCODE_API_OK 5
-
/* Lowest firmware API version supported */
#define IWL1000_UCODE_API_MIN 1
#define IWL100_UCODE_API_MIN 5
@@ -56,7 +52,7 @@
static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
- .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+ .pll_cfg = true,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
.led_compensation = 51,
@@ -68,7 +64,7 @@ static const struct iwl_base_params iwl1000_base_params = {
static const struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
};
static const struct iwl_eeprom_params iwl1000_eeprom_params = {
@@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
#define IWL_DEVICE_1000 \
.fw_name_pre = IWL1000_FW_PRE, \
.ucode_api_max = IWL1000_UCODE_API_MAX, \
- .ucode_api_ok = IWL1000_UCODE_API_OK, \
.ucode_api_min = IWL1000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_1000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
@@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = {
#define IWL_DEVICE_100 \
.fw_name_pre = IWL100_FW_PRE, \
.ucode_api_max = IWL100_UCODE_API_MAX, \
- .ucode_api_ok = IWL100_UCODE_API_OK, \
.ucode_api_min = IWL100_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_100, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
index f128e40cb..761cdccaa 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
@@ -36,12 +36,6 @@
#define IWL105_UCODE_API_MAX 6
#define IWL135_UCODE_API_MAX 6
-/* Oldest version we won't warn about */
-#define IWL2030_UCODE_API_OK 6
-#define IWL2000_UCODE_API_OK 6
-#define IWL105_UCODE_API_OK 6
-#define IWL135_UCODE_API_OK 6
-
/* Lowest firmware API version supported */
#define IWL2030_UCODE_API_MIN 5
#define IWL2000_UCODE_API_MIN 5
@@ -68,7 +62,6 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -82,7 +75,6 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
@@ -95,7 +87,7 @@ static const struct iwl_base_params iwl2030_base_params = {
static const struct iwl_ht_params iwl2000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
};
static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
@@ -114,7 +106,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
#define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \
- .ucode_api_ok = IWL2000_UCODE_API_OK, \
.ucode_api_min = IWL2000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_2000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -142,7 +133,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
#define IWL_DEVICE_2030 \
.fw_name_pre = IWL2030_FW_PRE, \
.ucode_api_max = IWL2030_UCODE_API_MAX, \
- .ucode_api_ok = IWL2030_UCODE_API_OK, \
.ucode_api_min = IWL2030_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_2030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -163,7 +153,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
- .ucode_api_ok = IWL105_UCODE_API_OK, \
.ucode_api_min = IWL105_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_105, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -191,7 +180,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
#define IWL_DEVICE_135 \
.fw_name_pre = IWL135_FW_PRE, \
.ucode_api_max = IWL135_UCODE_API_MAX, \
- .ucode_api_ok = IWL135_UCODE_API_OK, \
.ucode_api_min = IWL135_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_135, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
index cae82e1a1..230f324ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
@@ -34,10 +34,6 @@
#define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2
-/* Oldest version we won't warn about */
-#define IWL5000_UCODE_API_OK 5
-#define IWL5150_UCODE_API_OK 2
-
/* Lowest firmware API version supported */
#define IWL5000_UCODE_API_MIN 1
#define IWL5150_UCODE_API_MIN 1
@@ -57,7 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+ .pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
@@ -66,7 +62,7 @@ static const struct iwl_base_params iwl5000_base_params = {
static const struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_eeprom_params iwl5000_eeprom_params = {
@@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
#define IWL_DEVICE_5000 \
.fw_name_pre = IWL5000_FW_PRE, \
.ucode_api_max = IWL5000_UCODE_API_MAX, \
- .ucode_api_ok = IWL5000_UCODE_API_OK, \
.ucode_api_min = IWL5000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_5000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
@@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
- .ucode_api_ok = IWL5000_UCODE_API_OK,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.device_family = IWL_DEVICE_FAMILY_5000,
.max_inst_size = IWLAGN_RTC_INST_SIZE,
@@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
#define IWL_DEVICE_5150 \
.fw_name_pre = IWL5150_FW_PRE, \
.ucode_api_max = IWL5150_UCODE_API_MAX, \
- .ucode_api_ok = IWL5150_UCODE_API_OK, \
.ucode_api_min = IWL5150_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_5150, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
index b8b2a0e08..fa86e5e7d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
@@ -36,13 +36,6 @@
#define IWL6000G2_UCODE_API_MAX 6
#define IWL6035_UCODE_API_MAX 6
-/* Oldest version we won't warn about */
-#define IWL6000_UCODE_API_OK 4
-#define IWL6000G2_UCODE_API_OK 5
-#define IWL6050_UCODE_API_OK 5
-#define IWL6000G2B_UCODE_API_OK 6
-#define IWL6035_UCODE_API_OK 6
-
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
@@ -78,7 +71,6 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -91,7 +83,6 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -104,7 +95,6 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
@@ -117,7 +107,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
static const struct iwl_ht_params iwl6000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_eeprom_params iwl6000_eeprom_params = {
@@ -136,7 +126,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
#define IWL_DEVICE_6005 \
.fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6005, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -191,7 +180,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
#define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -228,7 +216,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
#define IWL_DEVICE_6035 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6035_UCODE_API_MAX, \
- .ucode_api_ok = IWL6035_UCODE_API_OK, \
.ucode_api_min = IWL6035_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -282,7 +269,6 @@ const struct iwl_cfg iwl130_bg_cfg = {
#define IWL_DEVICE_6000i \
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000_UCODE_API_OK, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6000i, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -370,7 +356,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
- .ucode_api_ok = IWL6000_UCODE_API_OK,
.ucode_api_min = IWL6000_UCODE_API_MIN,
.device_family = IWL_DEVICE_FAMILY_6000,
.max_inst_size = IWL60_RTC_INST_SIZE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 786a919fa..8d164d896 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -76,16 +76,10 @@
#define IWL7265D_UCODE_API_MAX 21
#define IWL3168_UCODE_API_MAX 21
-/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 13
-#define IWL7265_UCODE_API_OK 13
-#define IWL7265D_UCODE_API_OK 13
-#define IWL3168_UCODE_API_OK 20
-
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 13
-#define IWL7265_UCODE_API_MIN 13
-#define IWL7265D_UCODE_API_MIN 13
+#define IWL7260_UCODE_API_MIN 16
+#define IWL7265_UCODE_API_MIN 16
+#define IWL7265D_UCODE_API_MIN 16
#define IWL3168_UCODE_API_MIN 20
/* NVM versions */
@@ -128,7 +122,6 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
- .pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -162,7 +155,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
static const struct iwl_ht_params iwl7000_ht_params = {
.stbc = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
#define IWL_DEVICE_7000_COMMON \
@@ -179,25 +172,21 @@ static const struct iwl_ht_params iwl7000_ht_params = {
#define IWL_DEVICE_7000 \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7260_UCODE_API_MAX, \
- .ucode_api_ok = IWL7260_UCODE_API_OK, \
.ucode_api_min = IWL7260_UCODE_API_MIN
#define IWL_DEVICE_7005 \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7265_UCODE_API_MAX, \
- .ucode_api_ok = IWL7265_UCODE_API_OK, \
.ucode_api_min = IWL7265_UCODE_API_MIN
#define IWL_DEVICE_3008 \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL3168_UCODE_API_MAX, \
- .ucode_api_ok = IWL3168_UCODE_API_OK, \
.ucode_api_min = IWL3168_UCODE_API_MIN
#define IWL_DEVICE_7005D \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7265D_UCODE_API_MAX, \
- .ucode_api_ok = IWL7265D_UCODE_API_OK, \
.ucode_api_min = IWL7265D_UCODE_API_MIN
const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -297,7 +286,7 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
static const struct iwl_ht_params iwl7265_ht_params = {
.stbc = true,
.ldpc = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
const struct iwl_cfg iwl3165_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 16846cae3..0b4684234 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -73,12 +73,8 @@
#define IWL8000_UCODE_API_MAX 21
#define IWL8265_UCODE_API_MAX 21
-/* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK 13
-#define IWL8265_UCODE_API_OK 20
-
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 13
+#define IWL8000_UCODE_API_MIN 16
#define IWL8265_UCODE_API_MIN 20
/* NVM versions */
@@ -116,7 +112,6 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
- .pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -128,7 +123,7 @@ static const struct iwl_base_params iwl8000_base_params = {
static const struct iwl_ht_params iwl8000_ht_params = {
.stbc = true,
.ldpc = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_tt_params iwl8000_tt_params = {
@@ -175,19 +170,16 @@ static const struct iwl_tt_params iwl8000_tt_params = {
#define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN \
#define IWL_DEVICE_8260 \
IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN \
#define IWL_DEVICE_8265 \
IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8265_UCODE_API_MAX, \
- .ucode_api_ok = IWL8265_UCODE_API_OK, \
.ucode_api_min = IWL8265_UCODE_API_MIN \
const struct iwl_cfg iwl8260_2n_cfg = {
@@ -244,6 +236,20 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
+const struct iwl_cfg iwl8265_2ac_sdio_cfg = {
+ .name = "Intel(R) Dual Band Wireless-AC 8265",
+ .fw_name_pre = IWL8265_FW_PRE,
+ IWL_DEVICE_8265,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+ .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
+ .disable_dummy_notification = true,
+ .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
+ .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
+};
+
const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index 7b34387d7..19569fff2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -57,11 +57,8 @@
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 21
-/* Oldest version we won't warn about */
-#define IWL9000_UCODE_API_OK 13
-
/* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN 13
+#define IWL9000_UCODE_API_MIN 16
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
@@ -76,15 +73,20 @@
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "/*(DEBLOBBED)*/"
+#define IWL9260_FW_PRE "/*(DEBLOBBED)*/"
+#define IWL9260LC_FW_PRE "/*(DEBLOBBED)*/"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE /*(DEBLOBBED)*/
+#define IWL9260_MODULE_FIRMWARE(api) \
+ IWL9260_FW_PRE /*(DEBLOBBED)*/
+#define IWL9260LC_MODULE_FIRMWARE(api) \
+ IWL9260LC_FW_PRE /*(DEBLOBBED)*/
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
- .pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -96,7 +98,7 @@ static const struct iwl_base_params iwl9000_base_params = {
static const struct iwl_ht_params iwl9000_ht_params = {
.stbc = true,
.ldpc = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_tt_params iwl9000_tt_params = {
@@ -122,7 +124,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
#define IWL_DEVICE_9000 \
.ucode_api_max = IWL9000_UCODE_API_MAX, \
- .ucode_api_ok = IWL9000_UCODE_API_OK, \
.ucode_api_min = IWL9000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -137,15 +138,31 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.dccm2_len = IWL9000_DCCM2_LEN, \
.smem_offset = IWL9000_SMEM_OFFSET, \
.smem_len = IWL9000_SMEM_LEN, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = true
+ .mac_addr_from_csr = true, \
+ .rf_id = true
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9000_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+/*
+ * TODO the struct below is for internal testing only this should be
+ * removed by EO 2016~
+ */
+const struct iwl_cfg iwl9260lc_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9260",
+ .fw_name_pre = IWL9260LC_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 3e4d346be..4a0af7de8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -131,6 +133,8 @@ enum iwl_led_mode {
#define IWL_MAX_WD_TIMEOUT 120000
#define IWL_DEFAULT_MAX_TX_POWER 22
+#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+ NETIF_F_TSO | NETIF_F_TSO6)
/* Antenna presence definitions */
#define ANT_NONE 0x0
@@ -163,34 +167,36 @@ static inline u8 num_of_ant(u8 mask)
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
*/
struct iwl_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- /* for iwl_pcie_apm_init() */
- u32 pll_cfg_val;
-
- const u16 max_ll_items;
- const bool shadow_ram_support;
- u16 led_compensation;
unsigned int wd_timeout;
- u32 max_event_log_size;
- const bool shadow_reg_enable;
- const bool pcie_l1_allowed;
- const bool apmg_wake_up_wa;
- const bool scd_chain_ext_wa;
+
+ u16 eeprom_size;
+ u16 max_event_log_size;
+
+ u8 pll_cfg:1, /* for iwl_pcie_apm_init() */
+ shadow_ram_support:1,
+ shadow_reg_enable:1,
+ pcie_l1_allowed:1,
+ apmg_wake_up_wa:1,
+ scd_chain_ext_wa:1;
+
+ u8 num_of_queues; /* def: HW dependent */
+
+ u8 max_ll_items;
+ u8 led_compensation;
};
/*
* @stbc: support Tx STBC and 1*SS Rx STBC
* @ldpc: support Tx/Rx with LDPC
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
- * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
+ * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
*/
struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
- const bool ht_greenfield_support; /* if used set to true */
- const bool stbc;
- const bool ldpc;
- bool use_rts_for_aggregation;
+ u8 ht_greenfield_support:1,
+ stbc:1,
+ ldpc:1,
+ use_rts_for_aggregation:1;
u8 ht40_bands;
};
@@ -231,10 +237,10 @@ struct iwl_tt_params {
u32 tx_protection_entry;
u32 tx_protection_exit;
struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
- bool support_ct_kill;
- bool support_dynamic_smps;
- bool support_tx_protection;
- bool support_tx_backoff;
+ u8 support_ct_kill:1,
+ support_dynamic_smps:1,
+ support_tx_protection:1,
+ support_tx_backoff:1;
};
/*
@@ -277,8 +283,6 @@ struct iwl_pwr_tx_backoff {
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
* @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- * without a warning, for use in transitions
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
* @max_data_size: The maximal length of the fw data section
@@ -314,6 +318,7 @@ struct iwl_pwr_tx_backoff {
* @smem_len: the length of SMEM
* @mq_rx_supported: multi-queue rx support
* @vht_mu_mimo_supported: VHT MU-MIMO support
+ * @rf_id: need to read rf_id to determine the firmware image
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -323,51 +328,51 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_ok;
- const unsigned int ucode_api_min;
- const enum iwl_device_family device_family;
- const u32 max_data_size;
- const u32 max_inst_size;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u8 non_shared_ant;
- bool bt_shared_single_ant;
- u16 nvm_ver;
- u16 nvm_calib_ver;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
const struct iwl_ht_params *ht_params;
const struct iwl_eeprom_params *eeprom_params;
- enum iwl_led_mode led_mode;
- const bool rx_with_siso_diversity;
- const bool internal_wimax_coex;
- const bool host_interrupt_operation_mode;
- bool high_temp;
- u8 nvm_hw_section_num;
- bool mac_addr_from_csr;
- bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
- bool no_power_up_nic_in_init;
const char *default_nvm_file_B_step;
const char *default_nvm_file_C_step;
- netdev_features_t features;
- unsigned int max_rx_agg_size;
- bool disable_dummy_notification;
- unsigned int max_tx_agg_size;
- unsigned int max_ht_ampdu_exponent;
- unsigned int max_vht_ampdu_exponent;
- const u32 dccm_offset;
- const u32 dccm_len;
- const u32 dccm2_offset;
- const u32 dccm2_len;
- const u32 smem_offset;
- const u32 smem_len;
const struct iwl_tt_params *thermal_params;
- bool apmg_not_supported;
- bool mq_rx_supported;
- bool vht_mu_mimo_supported;
+ enum iwl_device_family device_family;
+ enum iwl_led_mode led_mode;
+ u32 max_data_size;
+ u32 max_inst_size;
+ netdev_features_t features;
+ u32 dccm_offset;
+ u32 dccm_len;
+ u32 dccm2_offset;
+ u32 dccm2_len;
+ u32 smem_offset;
+ u32 smem_len;
+ u16 nvm_ver;
+ u16 nvm_calib_ver;
+ u16 rx_with_siso_diversity:1,
+ bt_shared_single_ant:1,
+ internal_wimax_coex:1,
+ host_interrupt_operation_mode:1,
+ high_temp:1,
+ mac_addr_from_csr:1,
+ lp_xtal_workaround:1,
+ no_power_up_nic_in_init:1,
+ disable_dummy_notification:1,
+ apmg_not_supported:1,
+ mq_rx_supported:1,
+ vht_mu_mimo_supported:1,
+ rf_id:1;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u8 non_shared_ant;
+ u8 nvm_hw_section_num;
+ u8 max_rx_agg_size;
+ u8 max_tx_agg_size;
+ u8 max_ht_ampdu_exponent;
+ u8 max_vht_ampdu_exponent;
+ u8 ucode_api_max;
+ u8 ucode_api_min;
};
/*
@@ -438,8 +443,10 @@ extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260lc_2ac_cfg;
extern const struct iwl_cfg iwl5165_2ac_cfg;
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index b978f6cae..b52913448 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -108,6 +108,17 @@
#define CSR_HW_REV (CSR_BASE+0x028)
/*
+ * RF ID revision info
+ * Bit fields:
+ * 31:24: Reserved (set to 0x0)
+ * 23:12: Type
+ * 11:8: Step (A - 0x0, B - 0x1, etc)
+ * 7:4: Dash
+ * 3:0: Flavor
+ */
+#define CSR_HW_RF_ID (CSR_BASE+0x09c)
+
+/*
* EEPROM and OTP (one-time-programmable) memory reads
*
* NOTE: Device must be awake, initialized via apm_ops.init(),
@@ -333,6 +344,10 @@ enum {
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
+/* RF_ID value */
+#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
+#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
+
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 7ce381ba3..c695125d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -117,7 +117,7 @@ struct iwl_drv {
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
- char firmware_name[32]; /* name of firmware file to load */
+ char firmware_name[64]; /* name of firmware file to load */
struct completion request_firmware_complete;
@@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg_conf_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
kfree(drv->fw.dbg_trigger_tlv[i]);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
+ kfree(drv->fw.dbg_mem_tlv[i]);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -209,20 +211,12 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
static void iwl_req_fw_callback(const struct firmware *ucode_raw,
void *context);
-#define UCODE_EXPERIMENTAL_INDEX 100
-#define UCODE_EXPERIMENTAL_TAG "exp"
-
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const char *name_pre = drv->cfg->fw_name_pre;
char tag[8];
if (first) {
-#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
- drv->fw_index = UCODE_EXPERIMENTAL_INDEX;
- strcpy(tag, UCODE_EXPERIMENTAL_TAG);
- } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
-#endif
drv->fw_index = drv->cfg->ucode_api_max;
sprintf(tag, "%d", drv->fw_index);
} else {
@@ -238,9 +232,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "/*(DEBLOBBED)*/",
name_pre, tag);
- IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
- (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? "EXPERIMENTAL " : "",
+ IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
return reject_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
@@ -284,6 +276,7 @@ struct iwl_firmware_pieces {
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+ struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
};
/*
@@ -538,9 +531,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
}
if (build)
- sprintf(buildstr, " build %u%s", build,
- (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? " (EXP)" : "");
+ sprintf(buildstr, " build %u", build);
else
buildstr[0] = '\0';
@@ -624,9 +615,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
build = le32_to_cpu(ucode->build);
if (build)
- sprintf(buildstr, " build %u%s", build,
- (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? " (EXP)" : "");
+ sprintf(buildstr, " build %u", build);
else
buildstr[0] = '\0';
@@ -1028,6 +1017,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
gscan_capa = true;
break;
+ case IWL_UCODE_TLV_FW_MEM_SEG: {
+ struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+ (void *)tlv_data;
+ u32 type;
+
+ if (tlv_len != (sizeof(*dbg_mem)))
+ goto invalid_tlv_len;
+
+ type = le32_to_cpu(dbg_mem->data_type);
+ drv->fw.dbg_dynamic_mem = true;
+
+ if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
+ IWL_ERR(drv,
+ "Skip unknown dbg mem segment: %u\n",
+ dbg_mem->data_type);
+ break;
+ }
+
+ if (pieces->dbg_mem_tlv[type]) {
+ IWL_ERR(drv,
+ "Ignore duplicate mem segment: %u\n",
+ dbg_mem->data_type);
+ break;
+ }
+
+ IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
+ dbg_mem->data_type);
+
+ pieces->dbg_mem_tlv[type] = dbg_mem;
+ break;
+ }
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -1193,7 +1213,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int err;
struct iwl_firmware_pieces *pieces;
const unsigned int api_max = drv->cfg->ucode_api_max;
- unsigned int api_ok = drv->cfg->ucode_api_ok;
const unsigned int api_min = drv->cfg->ucode_api_min;
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
@@ -1206,20 +1225,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
- if (!api_ok)
- api_ok = api_max;
-
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
return;
- if (!ucode_raw) {
- if (drv->fw_index <= api_ok)
- IWL_ERR(drv,
- "request for firmware file '%s' failed.\n",
- drv->firmware_name);
+ if (!ucode_raw)
goto try_again;
- }
IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
drv->firmware_name, ucode_raw->size);
@@ -1252,28 +1263,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* firmware filename ... but we don't check for that and only rely
* on the API version read from firmware header from here on forward
*/
- /* no api version check required for experimental uCode */
- if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(drv,
- "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- goto try_again;
- }
-
- if (api_ver < api_ok) {
- if (api_ok != api_max)
- IWL_ERR(drv, "Firmware has old API version, "
- "expected v%u through v%u, got v%u.\n",
- api_ok, api_max, api_ver);
- else
- IWL_ERR(drv, "Firmware has old API version, "
- "expected v%u, got v%u.\n",
- api_max, api_ver);
- IWL_ERR(drv, "New firmware can be obtained from "
- "http://www.intellinuxwireless.org/.\n");
- }
+ if (api_ver < api_min || api_ver > api_max) {
+ IWL_ERR(drv,
+ "Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n",
+ api_max, api_ver);
+ goto try_again;
}
/*
@@ -1362,6 +1357,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
}
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
+ if (pieces->dbg_mem_tlv[i]) {
+ drv->fw.dbg_mem_tlv[i] =
+ kmemdup(pieces->dbg_mem_tlv[i],
+ sizeof(*drv->fw.dbg_mem_tlv[i]),
+ GFP_KERNEL);
+ if (!drv->fw.dbg_mem_tlv[i])
+ goto out_free_fw;
+ }
+ }
+
/* Now that we can no longer fail, copy information */
/*
@@ -1554,9 +1560,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.power_level = IWL_POWER_INDEX_1,
.d0i3_disable = true,
.d0i3_entry_delay = 1000,
-#ifndef CONFIG_IWLWIFI_UAPSD
- .uapsd_disable = true,
-#endif /* CONFIG_IWLWIFI_UAPSD */
+ .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1675,12 +1679,9 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
- bool, S_IRUGO | S_IWUSR);
-#ifdef CONFIG_IWLWIFI_UAPSD
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
-#else
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)");
-#endif
+ uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(uapsd_disable,
+ "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
/*
* set bt_coex_active to true, uCode will do kill/defer
@@ -1726,4 +1727,4 @@ MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool,
S_IRUGO);
-MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities");
+MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index c15f5be85..bf1b69aec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -390,10 +390,10 @@ iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
int n_channels, s8 max_txpower_avg)
{
int ch_idx;
- enum ieee80211_band band;
+ enum nl80211_band band;
band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
- IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
struct ieee80211_channel *chan = &data->channels[ch_idx];
@@ -526,7 +526,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg,
static void iwl_mod_ht40_chan_info(struct device *dev,
struct iwl_nvm_data *data, int n_channels,
- enum ieee80211_band band, u16 channel,
+ enum nl80211_band band, u16 channel,
const struct iwl_eeprom_channel *eeprom_ch,
u8 clear_ht40_extension_channel)
{
@@ -548,7 +548,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev,
IWL_DEBUG_EEPROM(dev,
"HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
channel,
- band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+ band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
CHECK_AND_PRINT(IBSS),
CHECK_AND_PRINT(ACTIVE),
CHECK_AND_PRINT(RADAR),
@@ -606,8 +606,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = eeprom_ch_array[ch_idx];
- channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
- : IEEE80211_BAND_5GHZ;
+ channel->band = (band == 1) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -677,15 +677,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
+ enum nl80211_band ieeeband;
iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
&eeprom_ch_count, &eeprom_ch_info,
&eeprom_ch_array);
/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
- : IEEE80211_BAND_5GHZ;
+ ieeeband = (band == 6) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
/* Loop through each band adding each of the channels */
for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
@@ -708,7 +708,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
- int n_channels, enum ieee80211_band band)
+ int n_channels, enum nl80211_band band)
{
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
@@ -734,7 +734,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band,
+ enum nl80211_band band,
u8 tx_chains, u8 rx_chains)
{
int max_bit_rate = 0;
@@ -813,22 +813,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
int n_used = 0;
struct ieee80211_supported_band *sband;
- sband = &data->bands[IEEE80211_BAND_2GHZ];
- sband->band = IEEE80211_BAND_2GHZ;
+ sband = &data->bands[NL80211_BAND_2GHZ];
+ sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
sband->n_bitrates = N_RATES_24;
n_used += iwl_init_sband_channels(data, sband, n_channels,
- IEEE80211_BAND_2GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
data->valid_tx_ant, data->valid_rx_ant);
- sband = &data->bands[IEEE80211_BAND_5GHZ];
- sband->band = IEEE80211_BAND_5GHZ;
+ sband = &data->bands[NL80211_BAND_5GHZ];
+ sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
sband->n_bitrates = N_RATES_52;
n_used += iwl_init_sband_channels(data, sband, n_channels,
- IEEE80211_BAND_5GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
data->valid_tx_ant, data->valid_rx_ant);
if (n_channels != n_used)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index ad2b83466..1f4e50289 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -98,7 +98,8 @@ struct iwl_nvm_data {
s8 max_tx_pwr_half_dbm;
bool lar_enabled;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ bool vht160_supported;
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels[];
};
@@ -133,12 +134,12 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data,
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
- int n_channels, enum ieee80211_band band);
+ int n_channels, enum nl80211_band band);
void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band,
+ enum nl80211_band band,
u8 tx_chains, u8 rx_chains);
#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 582008a66..270f39ecd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -321,6 +321,9 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
/* Write index table */
#define RFH_Q0_FRBDCB_WIDX 0xA08080
#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Write index table - shadow registers */
+#define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
+#define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
/* Read index table */
#define RFH_Q0_FRBDCB_RIDX 0xA080C0
#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
index 8425e1a58..09b7ea28f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
@@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_RB = 11,
IWL_FW_ERROR_DUMP_PAGING = 12,
IWL_FW_ERROR_DUMP_RADIO_REG = 13,
+ IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
IWL_FW_ERROR_DUMP_MAX,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 15ec4e290..37dc09e8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
+ IWL_UCODE_TLV_FW_MEM_SEG = 51,
};
struct iwl_ucode_tlv {
@@ -245,7 +246,6 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
/**
* enum iwl_ucode_tlv_api - ucode api
- * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
@@ -260,12 +260,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
enum iwl_ucode_tlv_api {
- IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
- IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+ IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
@@ -324,6 +323,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
* @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
* regular image.
+ * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ * memory addresses from the firmware.
+ * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -361,6 +363,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
+ IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
@@ -491,6 +495,37 @@ enum iwl_fw_dbg_monitor_mode {
};
/**
+ * enum iwl_fw_mem_seg_type - data types for dumping on error
+ *
+ * @FW_DBG_MEM_SMEM: the data type is SMEM
+ * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
+ * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
+ */
+enum iwl_fw_dbg_mem_seg_type {
+ FW_DBG_MEM_DCCM_LMAC = 0,
+ FW_DBG_MEM_DCCM_UMAC,
+ FW_DBG_MEM_SMEM,
+
+ /* Must be last */
+ FW_DBG_MEM_MAX,
+};
+
+/**
+ * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: enum %iwl_fw_mem_seg_type
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWL_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwl_fw_dbg_mem_seg_tlv {
+ __le32 data_type;
+ __le32 ofs;
+ __le32 len;
+} __packed;
+
+/**
* struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
*
* @version: version of the TLV - currently 0
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index 2942571c6..e461d6318 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -286,6 +286,8 @@ struct iwl_fw {
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+ struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
+ bool dbg_dynamic_mem;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
struct iwl_gscan_capabilities gscan_capa;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index d1a5dd160..6c5c2f9f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -92,6 +92,11 @@ enum iwl_amsdu_size {
IWL_AMSDU_12K = 2,
};
+enum iwl_uapsd_disable {
+ IWL_DISABLE_UAPSD_BSS = BIT(0),
+ IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1),
+};
+
/**
* struct iwl_mod_params
*
@@ -109,7 +114,8 @@ enum iwl_amsdu_size {
* @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0
* @nvm_file: specifies a external NVM file
- * @uapsd_disable: disable U-APSD, default = 1
+ * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
+ * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
* @d0i3_disable: disable d0i3, default = 1,
* @d0i3_entry_delay: time to wait after no refs are taken before
* entering D0i3 (in msecs)
@@ -131,7 +137,7 @@ struct iwl_mod_params {
#endif
int ant_coupling;
char *nvm_file;
- bool uapsd_disable;
+ u32 uapsd_disable;
bool d0i3_disable;
unsigned int d0i3_entry_delay;
bool lar_disable;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 93a689583..21653fee8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -288,6 +288,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
!data->sku_cap_band_52GHz_enable)
continue;
+ if (ch_flags & NVM_CHANNEL_160MHZ)
+ data->vht160_supported = true;
+
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
/*
* Channels might become valid later if lar is
@@ -308,7 +311,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->hw_value = nvm_chan[ch_idx];
channel->band = (ch_idx < num_2ghz_channels) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -320,7 +323,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
* is not used in mvm, and is used for backwards compatibility
*/
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
- is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+ is_5ghz = channel->band == NL80211_BAND_5GHZ;
/* don't put limitations in case we're using LAR */
if (!lar_supported)
@@ -331,17 +334,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags = 0;
IWL_DEBUG_EEPROM(dev,
- "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
channel->hw_value,
is_5ghz ? "5.2" : "2.4",
+ ch_flags,
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
CHECK_AND_PRINT_I(ACTIVE),
CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
CHECK_AND_PRINT_I(INDOOR_ONLY),
CHECK_AND_PRINT_I(GO_CONCURRENT),
- ch_flags,
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(40MHZ),
+ CHECK_AND_PRINT_I(80MHZ),
+ CHECK_AND_PRINT_I(160MHZ),
channel->max_power,
((ch_flags & NVM_CHANNEL_IBSS) &&
!(ch_flags & NVM_CHANNEL_RADAR))
@@ -370,6 +376,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
max_ampdu_exponent <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ if (data->vht160_supported)
+ vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SHORT_GI_160;
+
if (cfg->vht_mu_mimo_supported)
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
@@ -439,22 +449,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
&ch_section[NVM_CHANNELS_FAMILY_8000],
lar_supported);
- sband = &data->bands[IEEE80211_BAND_2GHZ];
- sband->band = IEEE80211_BAND_2GHZ;
+ sband = &data->bands[NL80211_BAND_2GHZ];
+ sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
sband->n_bitrates = N_RATES_24;
n_used += iwl_init_sband_channels(data, sband, n_channels,
- IEEE80211_BAND_2GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
tx_chains, rx_chains);
- sband = &data->bands[IEEE80211_BAND_5GHZ];
- sband->band = IEEE80211_BAND_5GHZ;
+ sband = &data->bands[NL80211_BAND_5GHZ];
+ sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
sband->n_bitrates = N_RATES_52;
n_used += iwl_init_sband_channels(data, sband, n_channels,
- IEEE80211_BAND_5GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
tx_chains, rx_chains);
if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
@@ -781,7 +791,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *regd;
int size_of_regd;
struct ieee80211_reg_rule *rule;
- enum ieee80211_band band;
+ enum nl80211_band band;
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
@@ -809,7 +819,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
band = (ch_idx < NUM_2GHZ_CHANNELS) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
band);
new_rule = false;
@@ -857,7 +867,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
"Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
center_freq,
- band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+ band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(ACTIVE),
CHECK_AND_PRINT_I(RADAR),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
index 4a4dea087..7beba9ae5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -72,8 +73,6 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 9
-#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
u16 size;
@@ -86,14 +85,18 @@ struct iwl_phy_db_entry {
* @cfg: phy configuration.
* @calib_nch: non channel specific calibration data.
* @calib_ch: channel specific calibration data.
+ * @n_group_papd: number of entries in papd channel group.
* @calib_ch_group_papd: calibration data related to papd channel group.
+ * @n_group_txp: number of entries in tx power channel group.
* @calib_ch_group_txp: calibration data related to tx power chanel group.
*/
struct iwl_phy_db {
struct iwl_phy_db_entry cfg;
struct iwl_phy_db_entry calib_nch;
- struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
- struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
+ int n_group_papd;
+ struct iwl_phy_db_entry *calib_ch_group_papd;
+ int n_group_txp;
+ struct iwl_phy_db_entry *calib_ch_group_txp;
struct iwl_trans *trans;
};
@@ -143,6 +146,9 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
phy_db->trans = trans;
+ phy_db->n_group_txp = -1;
+ phy_db->n_group_papd = -1;
+
/* TODO: add default values of the phy db. */
return phy_db;
}
@@ -166,11 +172,11 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
case IWL_PHY_DB_CALIB_NCH:
return &phy_db->calib_nch;
case IWL_PHY_DB_CALIB_CHG_PAPD:
- if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
+ if (chg_id >= phy_db->n_group_papd)
return NULL;
return &phy_db->calib_ch_group_papd[chg_id];
case IWL_PHY_DB_CALIB_CHG_TXP:
- if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
+ if (chg_id >= phy_db->n_group_txp)
return NULL;
return &phy_db->calib_ch_group_txp[chg_id];
default:
@@ -202,17 +208,21 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
- for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
+
+ for (i = 0; i < phy_db->n_group_papd; i++)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
+ kfree(phy_db->calib_ch_group_papd);
+
+ for (i = 0; i < phy_db->n_group_txp; i++)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+ kfree(phy_db->calib_ch_group_txp);
kfree(phy_db);
}
IWL_EXPORT_SYMBOL(iwl_phy_db_free);
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
- gfp_t alloc_ctx)
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
+ struct iwl_rx_packet *pkt)
{
struct iwl_calib_res_notif_phy_db *phy_db_notif =
(struct iwl_calib_res_notif_phy_db *)pkt->data;
@@ -224,16 +234,42 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
if (!phy_db)
return -EINVAL;
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
- type == IWL_PHY_DB_CALIB_CHG_TXP)
+ if (type == IWL_PHY_DB_CALIB_CHG_PAPD) {
chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+ if (phy_db && !phy_db->calib_ch_group_papd) {
+ /*
+ * Firmware sends the largest index first, so we can use
+ * it to know how much we should allocate.
+ */
+ phy_db->calib_ch_group_papd = kcalloc(chg_id + 1,
+ sizeof(struct iwl_phy_db_entry),
+ GFP_ATOMIC);
+ if (!phy_db->calib_ch_group_papd)
+ return -ENOMEM;
+ phy_db->n_group_papd = chg_id + 1;
+ }
+ } else if (type == IWL_PHY_DB_CALIB_CHG_TXP) {
+ chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+ if (phy_db && !phy_db->calib_ch_group_txp) {
+ /*
+ * Firmware sends the largest index first, so we can use
+ * it to know how much we should allocate.
+ */
+ phy_db->calib_ch_group_txp = kcalloc(chg_id + 1,
+ sizeof(struct iwl_phy_db_entry),
+ GFP_ATOMIC);
+ if (!phy_db->calib_ch_group_txp)
+ return -ENOMEM;
+ phy_db->n_group_txp = chg_id + 1;
+ }
+ }
entry = iwl_phy_db_get_section(phy_db, type, chg_id);
if (!entry)
return -EINVAL;
kfree(entry->data);
- entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
+ entry->data = kmemdup(phy_db_notif->data, size, GFP_ATOMIC);
if (!entry->data) {
entry->size = 0;
return -ENOMEM;
@@ -296,7 +332,7 @@ static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
if (ch_index == 0xff)
return 0xff;
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
+ for (i = 0; i < phy_db->n_group_txp; i++) {
txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
if (!txp_chg)
return 0xff;
@@ -447,7 +483,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
/* Send all the TXP channel specific data */
err = iwl_phy_db_send_all_channel_groups(phy_db,
IWL_PHY_DB_CALIB_CHG_PAPD,
- IWL_NUM_PAPD_CH_GROUPS);
+ phy_db->n_group_papd);
if (err) {
IWL_ERR(phy_db->trans,
"Cannot send channel specific PAPD groups\n");
@@ -457,7 +493,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
/* Send all the TXP channel specific data */
err = iwl_phy_db_send_all_channel_groups(phy_db,
IWL_PHY_DB_CALIB_CHG_TXP,
- IWL_NUM_TXP_CH_GROUPS);
+ phy_db->n_group_txp);
if (err) {
IWL_ERR(phy_db->trans,
"Cannot send channel specific TX power groups\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
index 24103877e..d34de3f71 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
@@ -73,8 +73,8 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
void iwl_phy_db_free(struct iwl_phy_db *phy_db);
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
- gfp_t alloc_ctx);
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
+ struct iwl_rx_packet *pkt);
int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index c46e596e1..6c1d20ded 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -345,6 +347,16 @@ enum secure_load_status_reg {
#define TXF_READ_MODIFY_DATA (0xa00448)
#define TXF_READ_MODIFY_ADDR (0xa0044c)
+/* UMAC Internal Tx Fifo */
+#define TXF_CPU2_FIFO_ITEM_CNT (0xA00538)
+#define TXF_CPU2_WR_PTR (0xA00514)
+#define TXF_CPU2_RD_PTR (0xA00510)
+#define TXF_CPU2_FENCE_PTR (0xA00518)
+#define TXF_CPU2_LOCK_FENCE (0xA00524)
+#define TXF_CPU2_NUM (0xA0053C)
+#define TXF_CPU2_READ_MODIFY_DATA (0xA00548)
+#define TXF_CPU2_READ_MODIFY_ADDR (0xA0054C)
+
/* Radio registers access */
#define RSP_RADIO_CMD (0xa02804)
#define RSP_RADIO_RDDAT (0xa02814)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 91d74b3f6..8193d36ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -519,7 +521,7 @@ struct iwl_trans;
struct iwl_trans_txq_scd_cfg {
u8 fifo;
- s8 sta_id;
+ u8 sta_id;
u8 tid;
bool aggregate;
int frame_limit;
@@ -751,6 +753,7 @@ enum iwl_plat_pm_mode {
* @dev - pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
+ * @hw_rf_id a u32 with the device RF ID
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -795,6 +798,7 @@ struct iwl_trans {
struct device *dev;
u32 max_skb_frags;
u32 hw_rev;
+ u32 hw_rf_id;
u32 hw_id;
char hw_id_str[52];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 23e7e2937..2e06dfc1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o coex.o coex_legacy.o
+iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 2e098f8e0..a63f5bbb1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -378,7 +378,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
rcu_read_unlock();
return BT_COEX_INVALID_LUT;
}
@@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
struct iwl_bt_coex_cmd bt_cmd = {};
u32 mode;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_send_bt_init_conf_old(mvm);
-
lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -540,7 +537,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
if (vif->type == NL80211_IFTYPE_STATION) {
/* ... relax constraints and disable rssi events */
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
@@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
- return;
- }
-
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
@@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
- return;
- }
-
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
@@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
-
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
return LINK_QUAL_AGG_TIME_LIMIT_DEF;
@@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
-
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
return true;
@@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
if (ant & mvm->cfg->non_shared_ant)
return true;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC;
}
@@ -877,21 +853,15 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
if (mvm->cfg->bt_shared_single_ant)
return true;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
-
- if (band != IEEE80211_BAND_2GHZ)
+ if (band != NL80211_BAND_2GHZ)
return false;
return bt_activity >= BT_LOW_TRAFFIC;
@@ -903,7 +873,7 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
__le16 fc = hdr->frame_control;
bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
- if (info->band != IEEE80211_BAND_2GHZ)
+ if (info->band != NL80211_BAND_2GHZ)
return 0;
if (unlikely(mvm->bt_tx_prio))
@@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- iwl_mvm_bt_coex_vif_change_old(mvm);
- return;
- }
-
iwl_mvm_bt_coex_notif_handle(mvm);
}
@@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
- return;
- }
-
if (!iwl_mvm_bt_is_plcr_supported(mvm))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
deleted file mode 100644
index 015045733..000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
+++ /dev/null
@@ -1,1315 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-
-#include "fw-api-coex.h"
-#include "iwl-modparams.h"
-#include "mvm.h"
-#include "iwl-debug.h"
-
-#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \
- [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \
- ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
-
-static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
- BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
- BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
- BT_COEX_PRIO_TBL_PRIO_LOW, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
- BT_COEX_PRIO_TBL_PRIO_LOW, 1),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
- BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
- BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
- BT_COEX_PRIO_TBL_DISABLED, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
- BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
- BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
- BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
- 0, 0, 0, 0, 0, 0,
-};
-
-#undef EVENT_PRIO_ANT
-
-static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
-{
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
- sizeof(struct iwl_bt_coex_prio_tbl_cmd),
- &iwl_bt_prio_tbl);
-}
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
- cpu_to_le32(0xf0f0f0f0), /* 50% */
- cpu_to_le32(0xc0c0c0c0), /* 25% */
- cpu_to_le32(0xfcfcfcfc), /* 75% */
- cpu_to_le32(0xfefefefe), /* 87.5% */
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
- {
- /* Tight */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0x00004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
- {
- /* Loose */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
- {
- /* Tx Tx disabled */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xeeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
-};
-
-/* 20MHz / 40MHz below / 40Mhz above*/
-static const __le64 iwl_ci_mask[][3] = {
- /* dummy entry for channel 0 */
- {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
- {
- cpu_to_le64(0x0000001FFFULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x00007FFFFFULL),
- },
- {
- cpu_to_le64(0x000000FFFFULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x0003FFFFFFULL),
- },
- {
- cpu_to_le64(0x000003FFFCULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x000FFFFFFCULL),
- },
- {
- cpu_to_le64(0x00001FFFE0ULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x007FFFFFE0ULL),
- },
- {
- cpu_to_le64(0x00007FFF80ULL),
- cpu_to_le64(0x00007FFFFFULL),
- cpu_to_le64(0x01FFFFFF80ULL),
- },
- {
- cpu_to_le64(0x0003FFFC00ULL),
- cpu_to_le64(0x0003FFFFFFULL),
- cpu_to_le64(0x0FFFFFFC00ULL),
- },
- {
- cpu_to_le64(0x000FFFF000ULL),
- cpu_to_le64(0x000FFFFFFCULL),
- cpu_to_le64(0x3FFFFFF000ULL),
- },
- {
- cpu_to_le64(0x007FFF8000ULL),
- cpu_to_le64(0x007FFFFFE0ULL),
- cpu_to_le64(0xFFFFFF8000ULL),
- },
- {
- cpu_to_le64(0x01FFFE0000ULL),
- cpu_to_le64(0x01FFFFFF80ULL),
- cpu_to_le64(0xFFFFFE0000ULL),
- },
- {
- cpu_to_le64(0x0FFFF00000ULL),
- cpu_to_le64(0x0FFFFFFC00ULL),
- cpu_to_le64(0x0ULL),
- },
- {
- cpu_to_le64(0x3FFFC00000ULL),
- cpu_to_le64(0x3FFFFFF000ULL),
- cpu_to_le64(0x0)
- },
- {
- cpu_to_le64(0xFFFE000000ULL),
- cpu_to_le64(0xFFFFFF8000ULL),
- cpu_to_le64(0x0)
- },
- {
- cpu_to_le64(0xFFF8000000ULL),
- cpu_to_le64(0xFFFFFE0000ULL),
- cpu_to_le64(0x0)
- },
- {
- cpu_to_le64(0xFFC0000000ULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x0ULL)
- },
-};
-
-enum iwl_bt_kill_msk {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
- [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
- [BT_KILL_MSK_NEVER] = 0xffffffff,
- [BT_KILL_MSK_ALWAYS] = 0,
-};
-
-static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- },
- {
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- },
- {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_DEFAULT,
- },
-};
-
-static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_DEFAULT,
- },
-};
-
-struct corunning_block_luts {
- u8 range;
- __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
-};
-
-/*
- * Ranges for the antenna coupling calibration / co-running block LUT:
- * LUT0: [ 0, 12[
- * LUT1: [12, 20[
- * LUT2: [20, 21[
- * LUT3: [21, 23[
- * LUT4: [23, 27[
- * LUT5: [27, 30[
- * LUT6: [30, 32[
- * LUT7: [32, 33[
- * LUT8: [33, - [
- */
-static const struct corunning_block_luts antenna_coupling_ranges[] = {
- {
- .range = 0,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 12,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 20,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 21,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 23,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 27,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 30,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 32,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 33,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
-};
-
-static enum iwl_bt_coex_lut_type
-iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
-{
- struct ieee80211_chanctx_conf *chanctx_conf;
- enum iwl_bt_coex_lut_type ret;
- u16 phy_ctx_id;
-
- /*
- * Checking that we hold mvm->mutex is a good idea, but the rate
- * control can't acquire the mutex since it runs in Tx path.
- * So this is racy in that case, but in the worst case, the AMPDU
- * size limit will be wrong for a short time which is not a big
- * issue.
- */
-
- rcu_read_lock();
-
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
- if (!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
- rcu_read_unlock();
- return BT_COEX_INVALID_LUT;
- }
-
- ret = BT_COEX_TX_DIS_LUT;
-
- if (mvm->cfg->bt_shared_single_ant) {
- rcu_read_unlock();
- return ret;
- }
-
- phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
-
- if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
- ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
- else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
- ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
- /* else - default = TX TX disallowed */
-
- rcu_read_unlock();
-
- return ret;
-}
-
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
-{
- struct iwl_bt_coex_cmd_old *bt_cmd;
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- int ret;
- u32 flags;
-
- ret = iwl_send_bt_prio_tbl(mvm);
- if (ret)
- return ret;
-
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
- if (!bt_cmd)
- return -ENOMEM;
- cmd.data[0] = bt_cmd;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
- switch (mvm->bt_force_ant_mode) {
- case BT_FORCE_ANT_AUTO:
- flags = BT_COEX_AUTO_OLD;
- break;
- case BT_FORCE_ANT_BT:
- flags = BT_COEX_BT_OLD;
- break;
- case BT_FORCE_ANT_WIFI:
- flags = BT_COEX_WIFI_OLD;
- break;
- default:
- WARN_ON(1);
- flags = 0;
- }
-
- bt_cmd->flags = cpu_to_le32(flags);
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
- goto send_cmd;
- }
-
- bt_cmd->max_kill = 5;
- bt_cmd->bt4_antenna_isolation_thr =
- IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
- bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
- bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
- bt_cmd->bt4_tx_rx_max_freq0 = 15;
- bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
- bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
-
- flags = iwlwifi_mod_params.bt_coex_active ?
- BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
- bt_cmd->flags = cpu_to_le32(flags);
-
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
- BT_VALID_BT_PRIO_BOOST |
- BT_VALID_MAX_KILL |
- BT_VALID_3W_TMRS |
- BT_VALID_KILL_ACK |
- BT_VALID_KILL_CTS |
- BT_VALID_REDUCED_TX_POWER |
- BT_VALID_LUT |
- BT_VALID_WIFI_RX_SW_PRIO_BOOST |
- BT_VALID_WIFI_TX_SW_PRIO_BOOST |
- BT_VALID_ANT_ISOLATION |
- BT_VALID_ANT_ISOLATION_THRS |
- BT_VALID_TXTX_DELTA_FREQ_THRS |
- BT_VALID_TXRX_MAX_FREQ_0 |
- BT_VALID_SYNC_TO_SCO |
- BT_VALID_TTC |
- BT_VALID_RRC);
-
- if (IWL_MVM_BT_COEX_SYNC2SCO)
- bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
-
- if (iwl_mvm_bt_is_plcr_supported(mvm)) {
- bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40);
- bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
- }
-
- if (IWL_MVM_BT_COEX_MPLUT) {
- bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
- bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
- }
-
- if (IWL_MVM_BT_COEX_TTC)
- bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
-
- if (iwl_mvm_bt_is_rrc_supported(mvm))
- bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
-
- if (mvm->cfg->bt_shared_single_ant)
- memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
- sizeof(iwl_single_shared_ant));
- else
- memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
- sizeof(iwl_combined_lookup));
-
- /* Take first Co-running block LUT to get started */
- memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
- sizeof(bt_cmd->bt4_corun_lut20));
- memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
- sizeof(bt_cmd->bt4_corun_lut40));
-
- memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
- sizeof(iwl_bt_prio_boost));
- bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
- bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
-
-send_cmd:
- memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
- memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
-
- kfree(bt_cmd);
- return ret;
-}
-
-static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
-{
- struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
- u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
- u32 ag = le32_to_cpu(notif->bt_activity_grading);
- struct iwl_bt_coex_cmd_old *bt_cmd;
- u8 ack_kill_msk, cts_kill_msk;
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .data[0] = &bt_cmd,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- int ret = 0;
-
- lockdep_assert_held(&mvm->mutex);
-
- ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
- cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
-
- if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
- mvm->bt_cts_kill_msk[0] == cts_kill_msk)
- return 0;
-
- mvm->bt_ack_kill_msk[0] = ack_kill_msk;
- mvm->bt_cts_kill_msk[0] = cts_kill_msk;
-
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
- if (!bt_cmd)
- return -ENOMEM;
- cmd.data[0] = bt_cmd;
- bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
- bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
- bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
- bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
- BT_VALID_KILL_ACK |
- BT_VALID_KILL_CTS);
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
-
- kfree(bt_cmd);
- return ret;
-}
-
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
- bool enable)
-{
- struct iwl_bt_coex_cmd_old *bt_cmd;
- /* Send ASYNC since this can be sent from an atomic context */
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_DUP, },
- .flags = CMD_ASYNC,
- };
- struct iwl_mvm_sta *mvmsta;
- int ret;
-
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
- if (!mvmsta)
- return 0;
-
- /* nothing to do */
- if (mvmsta->bt_reduced_txpower == enable)
- return 0;
-
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
- if (!bt_cmd)
- return -ENOMEM;
- cmd.data[0] = bt_cmd;
- bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
- bt_cmd->valid_bit_msk =
- cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
- bt_cmd->bt_reduced_tx_power = sta_id;
-
- if (enable)
- bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
-
- IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
- enable ? "en" : "dis", sta_id);
-
- mvmsta->bt_reduced_txpower = enable;
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
-
- kfree(bt_cmd);
- return ret;
-}
-
-struct iwl_bt_iterator_data {
- struct iwl_bt_coex_profile_notif_old *notif;
- struct iwl_mvm *mvm;
- struct ieee80211_chanctx_conf *primary;
- struct ieee80211_chanctx_conf *secondary;
- bool primary_ll;
-};
-
-static inline
-void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool enable, int rssi)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- mvmvif->bf_data.last_bt_coex_event = rssi;
- mvmvif->bf_data.bt_coex_max_thold =
- enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
- mvmvif->bf_data.bt_coex_min_thold =
- enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
-}
-
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- struct ieee80211_chanctx_conf *chanctx_conf;
- enum ieee80211_smps_mode smps_mode;
- u32 bt_activity_grading;
- int ave_rssi;
-
- lockdep_assert_held(&mvm->mutex);
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- /* default smps_mode for BSS / P2P client is AUTOMATIC */
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
- break;
- case NL80211_IFTYPE_AP:
- if (!mvmvif->ap_ibss_active)
- return;
- break;
- default:
- return;
- }
-
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
- /* If channel context is invalid or not on 2.4GHz .. */
- if ((!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
- if (vif->type == NL80211_IFTYPE_STATION) {
- /* ... relax constraints and disable rssi events */
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
- iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
- false);
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
- }
- return;
- }
-
- bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
- if (bt_activity_grading >= BT_HIGH_TRAFFIC)
- smps_mode = IEEE80211_SMPS_STATIC;
- else if (bt_activity_grading >= BT_LOW_TRAFFIC)
- smps_mode = vif->type == NL80211_IFTYPE_AP ?
- IEEE80211_SMPS_OFF :
- IEEE80211_SMPS_DYNAMIC;
-
- /* relax SMPS contraints for next association */
- if (!vif->bss_conf.assoc)
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
- if (mvmvif->phy_ctxt &&
- data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
- IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
- mvmvif->id, data->notif->bt_status, bt_activity_grading,
- smps_mode);
-
- if (vif->type == NL80211_IFTYPE_STATION)
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
-
- /* low latency is always primary */
- if (iwl_mvm_vif_low_latency(mvmvif)) {
- data->primary_ll = true;
-
- data->secondary = data->primary;
- data->primary = chanctx_conf;
- }
-
- if (vif->type == NL80211_IFTYPE_AP) {
- if (!mvmvif->ap_ibss_active)
- return;
-
- if (chanctx_conf == data->primary)
- return;
-
- if (!data->primary_ll) {
- /*
- * downgrade the current primary no matter what its
- * type is.
- */
- data->secondary = data->primary;
- data->primary = chanctx_conf;
- } else {
- /* there is low latency vif - we will be secondary */
- data->secondary = chanctx_conf;
- }
- return;
- }
-
- /*
- * STA / P2P Client, try to be primary if first vif. If we are in low
- * latency mode, we are already in primary and just don't do much
- */
- if (!data->primary || data->primary == chanctx_conf)
- data->primary = chanctx_conf;
- else if (!data->secondary)
- /* if secondary is not NULL, it might be a GO */
- data->secondary = chanctx_conf;
-
- /*
- * don't reduce the Tx power if one of these is true:
- * we are in LOOSE
- * single share antenna product
- * BT is active
- * we are associated
- */
- if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
- mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
- !data->notif->bt_status) {
- iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
- return;
- }
-
- /* try to get the avg rssi from fw */
- ave_rssi = mvmvif->bf_data.ave_beacon_signal;
-
- /* if the RSSI isn't valid, fake it is very low */
- if (!ave_rssi)
- ave_rssi = -100;
- if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
- if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
- IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
- } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
- if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
- IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
- }
-
- /* Begin to monitor the RSSI: it may influence the reduced Tx power */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
-}
-
-static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
-{
- struct iwl_bt_iterator_data data = {
- .mvm = mvm,
- .notif = &mvm->last_bt_notif_old,
- };
- struct iwl_bt_coex_ci_cmd_old cmd = {};
- u8 ci_bw_idx;
-
- /* Ignore updates if we are in force mode */
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return;
-
- rcu_read_lock();
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_notif_iterator, &data);
-
- if (data.primary) {
- struct ieee80211_chanctx_conf *chan = data.primary;
-
- if (WARN_ON(!chan->def.chan)) {
- rcu_read_unlock();
- return;
- }
-
- if (chan->def.width < NL80211_CHAN_WIDTH_40) {
- ci_bw_idx = 0;
- cmd.co_run_bw_primary = 0;
- } else {
- cmd.co_run_bw_primary = 1;
- if (chan->def.center_freq1 >
- chan->def.chan->center_freq)
- ci_bw_idx = 2;
- else
- ci_bw_idx = 1;
- }
-
- cmd.bt_primary_ci =
- iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
- cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
- }
-
- if (data.secondary) {
- struct ieee80211_chanctx_conf *chan = data.secondary;
-
- if (WARN_ON(!data.secondary->def.chan)) {
- rcu_read_unlock();
- return;
- }
-
- if (chan->def.width < NL80211_CHAN_WIDTH_40) {
- ci_bw_idx = 0;
- cmd.co_run_bw_secondary = 0;
- } else {
- cmd.co_run_bw_secondary = 1;
- if (chan->def.center_freq1 >
- chan->def.chan->center_freq)
- ci_bw_idx = 2;
- else
- ci_bw_idx = 1;
- }
-
- cmd.bt_secondary_ci =
- iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
- cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
- }
-
- rcu_read_unlock();
-
- /* Don't spam the fw with the same command over and over */
- if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
- if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
- sizeof(cmd), &cmd))
- IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
- memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
- }
-
- if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
- IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
-
- IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
- IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
- notif->bt_status ? "ON" : "OFF");
- IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
- IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
- IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
- notif->bt_agg_traffic_load);
-
- /* remember this notification for future use: rssi fluctuations */
- memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
-
- iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
-
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
-
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- /* If channel context is invalid or not on 2.4GHz - don't count it */
- if (!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
-
- if (vif->type != NL80211_IFTYPE_STATION ||
- mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
- return;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /* This can happen if the station has been removed right now */
- if (IS_ERR_OR_NULL(sta))
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum ieee80211_rssi_event_data rssi_event)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data data = {
- .mvm = mvm,
- };
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Ignore updates if we are in force mode */
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return;
-
- /*
- * Rssi update while not associated - can happen since the statistics
- * are handled asynchronously
- */
- if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
- return;
-
- /* No BT - reports should be disabled */
- if (!mvm->last_bt_notif_old.bt_status)
- return;
-
- IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
- rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
-
- /*
- * Check if rssi is good enough for reduced Tx power, but not in loose
- * scheme.
- */
- if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
- iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
- ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
- false);
- else
- ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
-
- if (ret)
- IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_rssi_iterator, &data);
-
- if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
- IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
-#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
-
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- enum iwl_bt_coex_lut_type lut_type;
-
- if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
- BT_HIGH_TRAFFIC)
- return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
- if (mvm->last_bt_notif_old.ttc_enabled)
- return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
- lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-
- if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
- return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
- /* tight coex, high bt traffic, reduce AGG time limit */
- return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
-}
-
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- enum iwl_bt_coex_lut_type lut_type;
-
- if (mvm->last_bt_notif_old.ttc_enabled)
- return true;
-
- if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
- BT_HIGH_TRAFFIC)
- return true;
-
- /*
- * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
- * since BT is already killed.
- * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
- * we Tx.
- * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
- */
- lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
- return lut_type != BT_COEX_LOOSE_LUT;
-}
-
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
-{
- u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
- return ag < BT_HIGH_TRAFFIC;
-}
-
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
- enum ieee80211_band band)
-{
- u32 bt_activity =
- le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-
- if (band != IEEE80211_BAND_2GHZ)
- return false;
-
- return bt_activity >= BT_LOW_TRAFFIC;
-}
-
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
-{
- iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 ant_isolation = le32_to_cpup((void *)pkt->data);
- u8 __maybe_unused lower_bound, upper_bound;
- u8 lut;
-
- struct iwl_bt_coex_cmd_old *bt_cmd;
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
-
- if (!iwl_mvm_bt_is_plcr_supported(mvm))
- return;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Ignore updates if we are in force mode */
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return;
-
- if (ant_isolation == mvm->last_ant_isol)
- return;
-
- for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
- if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
- break;
-
- lower_bound = antenna_coupling_ranges[lut].range;
-
- if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
- upper_bound = antenna_coupling_ranges[lut + 1].range;
- else
- upper_bound = antenna_coupling_ranges[lut].range;
-
- IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
- ant_isolation, lower_bound, upper_bound, lut);
-
- mvm->last_ant_isol = ant_isolation;
-
- if (mvm->last_corun_lut == lut)
- return;
-
- mvm->last_corun_lut = lut;
-
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
- if (!bt_cmd)
- return;
- cmd.data[0] = bt_cmd;
-
- bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
- bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
- BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40);
-
- /* For the moment, use the same LUT for 20GHz and 40GHz */
- memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
- sizeof(bt_cmd->bt4_corun_lut20));
-
- memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
- sizeof(bt_cmd->bt4_corun_lut40));
-
- if (iwl_mvm_send_cmd(mvm, &cmd))
- IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
-
- kfree(bt_cmd);
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 4b560e441..4eeb6b78d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -75,7 +75,6 @@
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
-#define IWL_MVM_P2P_UAPSD_STANDALONE 0
#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
@@ -110,6 +109,7 @@
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
+#define IWL_MVM_HW_CSUM_DISABLE 0
#define IWL_MVM_COLLECT_FW_ERR_DUMP 1
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c1a313149..4fdc3dad3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
- ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
+ ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
if (ret)
return ret;
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
@@ -1804,7 +1804,6 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status *fw_status;
int i;
bool keep;
- struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
@@ -1823,13 +1822,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status.wake_packet = fw_status->wake_packet;
/* still at hard-coded place 0 for D3 image */
- ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[0],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(ap_sta))
+ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
+ if (!mvm_ap_sta)
goto out_free;
- mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status.qos_seq_ctr[i];
/* firmware stores last-used value, we store next value */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 14004456b..b23271755 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -281,13 +281,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
if (vif->type == NL80211_IFTYPE_STATION &&
ap_sta_id != IWL_MVM_STATION_COUNT) {
- struct ieee80211_sta *sta;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
- lockdep_is_held(&mvm->mutex));
- if (!IS_ERR_OR_NULL(sta)) {
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_sta *mvm_sta;
+ mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
+ if (mvm_sta) {
pos += scnprintf(buf+pos, bufsz-pos,
"ap_sta_id %d - reduced Tx power %d\n",
ap_sta_id,
@@ -724,9 +721,9 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
- enum ieee80211_band band = (cmd->channel_num <= 14) ?
- IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
+ enum nl80211_band band = (cmd->channel_num <= 14) ?
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ;
struct ieee80211_channel chn = {
.band = band,
.center_freq = ieee80211_channel_to_frequency(
@@ -1425,6 +1422,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
+static const char * const chanwidths[] = {
+ [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
+ [NL80211_CHAN_WIDTH_20] = "ht20",
+ [NL80211_CHAN_WIDTH_40] = "ht40",
+ [NL80211_CHAN_WIDTH_80] = "vht80",
+ [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
+ [NL80211_CHAN_WIDTH_160] = "vht160",
+};
+
+static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct ieee80211_vif *vif = data;
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data;
+ u32 num_of_stations = le32_to_cpu(report->number_of_stations);
+ int i;
+
+ IWL_INFO(mvm, "LQM report:\n");
+ IWL_INFO(mvm, "\tstatus: %d\n", report->status);
+ IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id));
+ IWL_INFO(mvm, "\ttx_frame_dropped: %d\n",
+ le32_to_cpu(report->tx_frame_dropped));
+ IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n",
+ le32_to_cpu(report->time_in_measurement_window));
+ IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n",
+ le32_to_cpu(report->total_air_time_other_stations));
+ IWL_INFO(mvm, "\tchannel_freq: %d\n",
+ vif->bss_conf.chandef.center_freq1);
+ IWL_INFO(mvm, "\tchannel_width: %s\n",
+ chanwidths[vif->bss_conf.chandef.width]);
+ IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations);
+ for (i = 0; i < num_of_stations; i++)
+ IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i,
+ report->frequent_stations_air_time[i]);
+
+ return true;
+}
+
+static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_notification_wait wait_lqm_notif;
+ static u16 lqm_notif[] = {
+ WIDE_ID(MAC_CONF_GROUP,
+ LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF)
+ };
+ int err;
+ u32 duration;
+ u32 timeout;
+
+ if (sscanf(buf, "%d,%d", &duration, &timeout) != 2)
+ return -EINVAL;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif,
+ lqm_notif, ARRAY_SIZE(lqm_notif),
+ iwl_mvm_lqm_notif_wait, vif);
+ mutex_lock(&mvm->mutex);
+ err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT,
+ duration, timeout);
+ mutex_unlock(&mvm->mutex);
+
+ if (err) {
+ IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err);
+ iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif);
+ return err;
+ }
+
+ /* wait for 2 * timeout (safety guard) and convert to jiffies*/
+ timeout = msecs_to_jiffies((timeout * 2) / 1000);
+
+ err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif,
+ timeout);
+ if (err)
+ IWL_ERR(mvm, "Getting lqm notif timed out\n");
+
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1449,6 +1529,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
+MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -1488,6 +1569,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a43b3921c..406cf1cb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -65,6 +65,7 @@
*****************************************************************************/
#include <linux/vmalloc.h>
#include <linux/ieee80211.h>
+#include <linux/netdevice.h>
#include "mvm.h"
#include "fw-dbg.h"
@@ -463,69 +464,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
return pos;
}
-static
-int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
- char *buf, int pos, int bufsz)
-{
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
-
- BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
- BT_MBOX_PRINT(0, LE_PROF1, false);
- BT_MBOX_PRINT(0, LE_PROF2, false);
- BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
- BT_MBOX_PRINT(0, CHL_SEQ_N, false);
- BT_MBOX_PRINT(0, INBAND_S, false);
- BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
- BT_MBOX_PRINT(0, LE_SCAN, false);
- BT_MBOX_PRINT(0, LE_ADV, false);
- BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
- BT_MBOX_PRINT(0, OPEN_CON_1, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
-
- BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
- BT_MBOX_PRINT(1, IP_SR, false);
- BT_MBOX_PRINT(1, LE_MSTR, false);
- BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
- BT_MBOX_PRINT(1, MSG_TYPE, false);
- BT_MBOX_PRINT(1, SSN, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
-
- BT_MBOX_PRINT(2, SNIFF_ACT, false);
- BT_MBOX_PRINT(2, PAG, false);
- BT_MBOX_PRINT(2, INQUIRY, false);
- BT_MBOX_PRINT(2, CONN, false);
- BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
- BT_MBOX_PRINT(2, DISC, false);
- BT_MBOX_PRINT(2, SCO_TX_ACT, false);
- BT_MBOX_PRINT(2, SCO_RX_ACT, false);
- BT_MBOX_PRINT(2, ESCO_RE_TX, false);
- BT_MBOX_PRINT(2, SCO_DURATION, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
-
- BT_MBOX_PRINT(3, SCO_STATE, false);
- BT_MBOX_PRINT(3, SNIFF_STATE, false);
- BT_MBOX_PRINT(3, A2DP_STATE, false);
- BT_MBOX_PRINT(3, ACL_STATE, false);
- BT_MBOX_PRINT(3, MSTR_STATE, false);
- BT_MBOX_PRINT(3, OBX_STATE, false);
- BT_MBOX_PRINT(3, OPEN_CON_2, false);
- BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
- BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
- BT_MBOX_PRINT(3, INBAND_P, false);
- BT_MBOX_PRINT(3, MSG_TYPE_2, false);
- BT_MBOX_PRINT(3, SSN_2, false);
- BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
-
- return pos;
-}
-
static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
char *buf;
int ret, pos = 0, bufsz = sizeof(char) * 1024;
@@ -535,52 +478,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- struct iwl_bt_coex_profile_notif_old *notif =
- &mvm->last_bt_notif_old;
-
- pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz);
-
- pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
- pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- pos += scnprintf(buf+pos,
- bufsz-pos, "bt_activity_grading = %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- pos += scnprintf(buf+pos, bufsz-pos,
- "antenna isolation = %d CORUN LUT index = %d\n",
- mvm->last_ant_isol, mvm->last_corun_lut);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
- notif->rrc_enabled);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
- notif->ttc_enabled);
- } else {
- struct iwl_bt_coex_profile_notif *notif =
- &mvm->last_bt_notif;
-
- pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
-
- pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
- pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- pos += scnprintf(buf+pos,
- bufsz-pos, "bt_activity_grading = %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- pos += scnprintf(buf+pos, bufsz-pos,
- "antenna isolation = %d CORUN LUT index = %d\n",
- mvm->last_ant_isol, mvm->last_corun_lut);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
- (notif->ttc_rrc_status >> 4) & 0xF);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
- notif->ttc_rrc_status & 0xF);
- }
+ pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
+ notif->bt_ci_compliance);
+ pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ pos += scnprintf(buf + pos,
+ bufsz - pos, "bt_activity_grading = %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "antenna isolation = %d CORUN LUT index = %d\n",
+ mvm->last_ant_isol, mvm->last_corun_lut);
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
+ (notif->ttc_rrc_status >> 4) & 0xF);
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
+ notif->ttc_rrc_status & 0xF);
pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
IWL_MVM_BT_COEX_SYNC2SCO);
@@ -602,44 +517,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
char buf[256];
int bufsz = sizeof(buf);
int pos = 0;
mutex_lock(&mvm->mutex);
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
-
- pos += scnprintf(buf+pos, bufsz-pos,
- "Channel inhibition CMD\n");
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tPrimary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_primary_ci));
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tSecondary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_secondary_ci));
-
- pos += scnprintf(buf+pos, bufsz-pos,
- "BT Configuration CMD - 0=default, 1=never, 2=always\n");
- pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
- mvm->bt_ack_kill_msk[0]);
- pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
- mvm->bt_cts_kill_msk[0]);
-
- } else {
- struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
-
- pos += scnprintf(buf+pos, bufsz-pos,
- "Channel inhibition CMD\n");
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tPrimary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_primary_ci));
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tSecondary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_secondary_ci));
- }
+ pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tPrimary Channel Bitmap 0x%016llx\n",
+ le64_to_cpu(cmd->bt_primary_ci));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tSecondary Channel Bitmap 0x%016llx\n",
+ le64_to_cpu(cmd->bt_secondary_ci));
mutex_unlock(&mvm->mutex);
@@ -990,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE),
.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+ IWL_RSS_HASH_TYPE_IPV4_UDP |
IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
IWL_RSS_HASH_TYPE_IPV6_TCP |
+ IWL_RSS_HASH_TYPE_IPV6_UDP |
IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
};
int ret, i, num_repeats, nbytes = count / 2;
@@ -1015,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
ARRAY_SIZE(cmd.indirection_table) % nbytes);
- memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
@@ -1416,6 +1309,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE);
+ PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD);
+ PRINT_MVM_REF(IWL_MVM_REF_RX);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
index eec52c57f..5f22cc7ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
@@ -368,7 +368,7 @@ struct iwl_wowlan_gtk_status {
u8 decrypt_key[16];
u8 tkip_mic_key[8];
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
-} __packed;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index 7a16e55df..1ca8e4988 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
+enum iwl_rx_l3_proto_values {
+ IWL_RX_L3_TYPE_NONE,
+ IWL_RX_L3_TYPE_IPV4,
+ IWL_RX_L3_TYPE_IPV4_FRAG,
+ IWL_RX_L3_TYPE_IPV6_FRAG,
+ IWL_RX_L3_TYPE_IPV6,
+ IWL_RX_L3_TYPE_IPV6_IN_IPV4,
+ IWL_RX_L3_TYPE_ARP,
+ IWL_RX_L3_TYPE_EAPOL,
+};
+
+#define IWL_RX_L3_PROTO_POS 4
+
enum iwl_rx_l3l4_flags {
IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0),
IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1),
IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2),
IWL_RX_L3L4_TCP_ACK = BIT(3),
- IWL_RX_L3L4_L3_PROTO_MASK = 0xf << 4,
+ IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS,
IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8,
IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12,
};
@@ -424,21 +437,28 @@ struct iwl_rxq_sync_notification {
/**
* Internal message identifier
*
+* @IWL_MVM_RXQ_EMPTY: empty sync notification
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
*/
enum iwl_mvm_rxq_notif_type {
+ IWL_MVM_RXQ_EMPTY,
IWL_MVM_RXQ_NOTIF_DEL_BA,
};
/**
* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
+* FW is agnostic to the payload, so there are no endianity requirements.
*
* @type: value from &iwl_mvm_rxq_notif_type
+* @sync: ctrl path is waiting for all notifications to be received
+* @cookie: internal cookie to identify old notifications
* @data: payload
*/
struct iwl_mvm_internal_rxq_notif {
- u32 type;
+ u16 type;
+ u16 sync;
+ u32 cookie;
u8 data[];
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index 90d911394..38b1d045b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -173,7 +173,7 @@ enum iwl_sta_key_flag {
/**
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
- * @STA_MODIFY_KEY: this command modifies %key
+ * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
* @STA_MODIFY_TX_RATE: unused
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
@@ -183,7 +183,7 @@ enum iwl_sta_key_flag {
* @STA_MODIFY_QUEUES: modify the queues used by this station
*/
enum iwl_sta_modify_flag {
- STA_MODIFY_KEY = BIT(0),
+ STA_MODIFY_QUEUE_REMOVAL = BIT(0),
STA_MODIFY_TID_DISABLE_TX = BIT(1),
STA_MODIFY_TX_RATE = BIT(2),
STA_MODIFY_ADD_BA_TID = BIT(3),
@@ -255,8 +255,10 @@ struct iwl_mvm_keyinfo {
__le64 hw_tkip_mic_tx_key;
} __packed;
-#define IWL_ADD_STA_STATUS_MASK 0xFF
-#define IWL_ADD_STA_BAID_MASK 0xFF00
+#define IWL_ADD_STA_STATUS_MASK 0xFF
+#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
+#define IWL_ADD_STA_BAID_MASK 0x7F00
+#define IWL_ADD_STA_BAID_SHIFT 8
/**
* struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index ba3f0bbdd..dadcccd88 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts {
#define IWL_BAR_DFAULT_RETRY_LIMIT 60
#define IWL_LOW_RETRY_LIMIT 7
+/**
+ * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
+ * from mac header end. For normal case it is 4 words for SNAP.
+ * note: tx_cmd, mac header and pad are not counted in the offset.
+ * This is used to help the offload in case there is tunneling such as
+ * IPv6 in IPv4, in such case the ip header offset should point to the
+ * inner ip header and IPv4 checksum of the external header should be
+ * calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ * field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ * alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+enum iwl_tx_offload_assist_flags_pos {
+ TX_CMD_OFFLD_IP_HDR = 0,
+ TX_CMD_OFFLD_L4_EN = 6,
+ TX_CMD_OFFLD_L3_EN = 7,
+ TX_CMD_OFFLD_MH_SIZE = 8,
+ TX_CMD_OFFLD_PAD = 13,
+ TX_CMD_OFFLD_AMSDU = 14,
+};
+
+#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
+#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
+
/* TODO: complete documentation for try_cnt and btkill_cnt */
/**
* struct iwl_tx_cmd - TX command struct to FW
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
* @tx_flags: combination of TX_CMD_FLG_*
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
@@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts {
*/
struct iwl_tx_cmd {
__le16 len;
- __le16 next_frame_len;
+ __le16 offload_assist;
__le32 tx_flags;
struct {
u8 try_cnt;
@@ -255,7 +286,7 @@ struct iwl_tx_cmd {
__le16 reserved4;
u8 payload[0];
struct ieee80211_hdr hdr[0];
-} __packed; /* TX_CMD_API_S_VER_3 */
+} __packed; /* TX_CMD_API_S_VER_6 */
/*
* TX response related data
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 4a0fc47c8..41b80ae2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -80,12 +80,44 @@
#include "fw-api-stats.h"
#include "fw-api-tof.h"
-/* Tx queue numbers */
+/* Tx queue numbers for non-DQA mode */
enum {
IWL_MVM_OFFCHANNEL_QUEUE = 8,
IWL_MVM_CMD_QUEUE = 9,
};
+/*
+ * DQA queue numbers
+ *
+ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
+ * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
+ * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
+ * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
+ * that we are never left without the possibility to connect to an AP.
+ * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
+ * Each MGMT queue is mapped to a single STA
+ * MGMT frames are frames that return true on ieee80211_is_mgmt()
+ * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
+ * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe
+ * responses
+ * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
+ * DATA frames are intended for !ieee80211_is_mgmt() frames, but if
+ * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
+ * as well
+ * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
+ */
+enum iwl_mvm_dqa_txq {
+ IWL_MVM_DQA_CMD_QUEUE = 0,
+ IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
+ IWL_MVM_DQA_GCAST_QUEUE = 3,
+ IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
+ IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
+ IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+ IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
+ IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+ IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
+};
+
enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_BK = 0,
IWL_MVM_TX_FIFO_BE,
@@ -279,6 +311,11 @@ enum {
/* Please keep this enum *SORTED* by hex value.
* Needed for binary search, otherwise a warning will be triggered.
*/
+enum iwl_mac_conf_subcmd_ids {
+ LINK_QUALITY_MEASUREMENT_CMD = 0x1,
+ LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
+};
+
enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03,
@@ -287,6 +324,10 @@ enum iwl_phy_ops_subcmd_ids {
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
+enum iwl_system_subcmd_ids {
+ SHARED_MEM_CFG_CMD = 0x0,
+};
+
enum iwl_data_path_subcmd_ids {
UPDATE_MU_GROUPS_CMD = 0x1,
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
@@ -302,6 +343,8 @@ enum iwl_prot_offload_subcmd_ids {
enum {
LEGACY_GROUP = 0x0,
LONG_GROUP = 0x1,
+ SYSTEM_GROUP = 0x2,
+ MAC_CONF_GROUP = 0x3,
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb,
@@ -1923,6 +1966,7 @@ struct iwl_tdls_config_res {
#define TX_FIFO_MAX_NUM 8
#define RX_FIFO_MAX_NUM 2
+#define TX_FIFO_INTERNAL_MAX_NUM 6
/**
* Shared memory configuration information from the FW
@@ -1940,6 +1984,12 @@ struct iwl_tdls_config_res {
* @page_buff_addr: used by UMAC and performance debug (page miss analysis),
* when paging is not supported this should be 0
* @page_buff_size: size of %page_buff_addr
+ * @rxfifo_addr: Start address of rxFifo
+ * @internal_txfifo_addr: start address of internalFifo
+ * @internal_txfifo_size: internal fifos' size
+ *
+ * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
+ * set, the last 3 members don't exist.
*/
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
@@ -1951,7 +2001,10 @@ struct iwl_shared_mem_cfg {
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
__le32 page_buff_addr;
__le32 page_buff_size;
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
+ __le32 rxfifo_addr;
+ __le32 internal_txfifo_addr;
+ __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
/**
* VHT MU-MIMO group configuration
@@ -2002,4 +2055,60 @@ struct iwl_stored_beacon_notif {
u8 data[MAX_STORED_BEACON_SIZE];
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
+#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
+
+enum iwl_lqm_cmd_operatrions {
+ LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
+ LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
+};
+
+enum iwl_lqm_status {
+ LQM_STATUS_SUCCESS = 0,
+ LQM_STATUS_TIMEOUT = 1,
+ LQM_STATUS_ABORT = 2,
+};
+
+/**
+ * Link Quality Measurement command
+ * @cmd_operatrion: command operation to be performed (start or stop)
+ * as defined above.
+ * @mac_id: MAC ID the measurement applies to.
+ * @measurement_time: time of the total measurement to be performed, in uSec.
+ * @timeout: maximum time allowed until a response is sent, in uSec.
+ */
+struct iwl_link_qual_msrmnt_cmd {
+ __le32 cmd_operation;
+ __le32 mac_id;
+ __le32 measurement_time;
+ __le32 timeout;
+} __packed /* LQM_CMD_API_S_VER_1 */;
+
+/**
+ * Link Quality Measurement notification
+ *
+ * @frequent_stations_air_time: an array containing the total air time
+ * (in uSec) used by the most frequently transmitting stations.
+ * @number_of_stations: the number of uniqe stations included in the array
+ * (a number between 0 to 16)
+ * @total_air_time_other_stations: the total air time (uSec) used by all the
+ * stations which are not included in the above report.
+ * @time_in_measurement_window: the total time in uSec in which a measurement
+ * took place.
+ * @tx_frame_dropped: the number of TX frames dropped due to retry limit during
+ * measurement
+ * @mac_id: MAC ID the measurement applies to.
+ * @status: return status. may be one of the LQM_STATUS_* defined above.
+ * @reserved: reserved.
+ */
+struct iwl_link_qual_msrmnt_notif {
+ __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
+ __le32 number_of_stations;
+ __le32 total_air_time_other_stations;
+ __le32 time_in_measurement_window;
+ __le32 tx_frame_dropped;
+ __le32 mac_id;
+ __le32 status;
+ __le32 reserved[3];
+} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 6938cd37b..e1b6b2c66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,7 +71,7 @@
#include "iwl-csr.h"
static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
- const void *data, size_t datalen)
+ void *data, size_t datalen)
{
const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
ssize_t bytes_read;
@@ -104,7 +104,7 @@ static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
return bytes_read + bytes_read_trans;
}
-static void iwl_mvm_free_coredump(const void *data)
+static void iwl_mvm_free_coredump(void *data)
{
const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
@@ -265,6 +265,66 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
*dump_data = iwl_fw_error_next_data(*dump_data);
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+ /* Pull UMAC internal TXF data from all TXFs */
+ for (i = 0;
+ i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i++) {
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ continue;
+
+ /* Add a TLV for the internal FIFOs */
+ (*dump_data)->type =
+ cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
+ (*dump_data)->len =
+ cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(i);
+
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
+
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_FIFO_ITEM_CNT));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_WR_PTR));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_RD_PTR));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_FENCE_PTR));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_LOCK_FENCE));
+
+ /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
+ iwl_trans_write_prph(mvm->trans,
+ TXF_CPU2_READ_MODIFY_ADDR,
+ TXF_CPU2_WR_PTR);
+
+ /* Dummy-read to advance the read pointer to head */
+ iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_READ_MODIFY_DATA);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (j = 0; j < fifo_len; j++)
+ fifo_data[j] =
+ iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_READ_MODIFY_DATA);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+ }
+ }
+
iwl_trans_release_nic_access(mvm->trans, &flags);
}
@@ -280,9 +340,11 @@ void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
-static const struct {
+struct iwl_prph_range {
u32 start, end;
-} iwl_prph_dump_addr[] = {
+};
+
+static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
{ .start = 0x00a00000, .end = 0x00a00000 },
{ .start = 0x00a0000c, .end = 0x00a00024 },
{ .start = 0x00a0002c, .end = 0x00a0003c },
@@ -380,8 +442,18 @@ static const struct {
{ .start = 0x00a44000, .end = 0x00a7bf80 },
};
+static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
+ { .start = 0x00a05c00, .end = 0x00a05c18 },
+ { .start = 0x00a05400, .end = 0x00a056e8 },
+ { .start = 0x00a08000, .end = 0x00a098bc },
+ { .start = 0x00adfc00, .end = 0x00adfd1c },
+ { .start = 0x00a02400, .end = 0x00a02758 },
+};
+
static u32 iwl_dump_prph(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data)
+ struct iwl_fw_error_dump_data **data,
+ const struct iwl_prph_range *iwl_prph_dump_addr,
+ u32 range_len)
{
struct iwl_fw_error_dump_prph *prph;
unsigned long flags;
@@ -390,7 +462,7 @@ static u32 iwl_dump_prph(struct iwl_trans *trans,
if (!iwl_trans_grab_nic_access(trans, &flags))
return 0;
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+ for (i = 0; i < range_len; i++) {
/* The range includes both boundaries */
int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
iwl_prph_dump_addr[i].start + 4;
@@ -429,9 +501,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_trigger_desc *dump_trig;
struct iwl_mvm_dump_ptrs *fw_error_dump;
u32 sram_len, sram_ofs;
+ struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
+ mvm->fw->dbg_mem_tlv;
u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
- u32 smem_len = mvm->cfg->smem_len;
- u32 sram2_len = mvm->cfg->dccm2_len;
+ u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
+ u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
bool monitor_dump_only = false;
int i;
@@ -494,24 +568,54 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(struct iwl_fw_error_dump_fifo);
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+ for (i = 0;
+ i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
+ i++) {
+ if (!mem_cfg->internal_txfifo_size[i])
+ continue;
+
+ /* Add header info */
+ fifo_data_len +=
+ mem_cfg->internal_txfifo_size[i] +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
+ }
+
/* Make room for PRPH registers */
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) {
/* The range includes both boundaries */
- int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
- iwl_prph_dump_addr[i].start + 4;
+ int num_bytes_in_chunk =
+ iwl_prph_dump_addr_comm[i].end -
+ iwl_prph_dump_addr_comm[i].start + 4;
prph_len += sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_prph) +
num_bytes_in_chunk;
}
+ if (mvm->cfg->mq_rx_supported) {
+ for (i = 0; i <
+ ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk =
+ iwl_prph_dump_addr_9000[i].end -
+ iwl_prph_dump_addr_9000[i].start + 4;
+
+ prph_len += sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
+ }
+
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
file_len = sizeof(*dump_file) +
sizeof(*dump_data) * 2 +
- sram_len + sizeof(*dump_mem) +
fifo_data_len +
prph_len +
radio_len +
@@ -525,6 +629,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (sram2_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+ /* Make room for MEM segments */
+ for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+ if (fw_dbg_mem[i])
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ le32_to_cpu(fw_dbg_mem[i]->len);
+ }
+
/* Make room for fw's virtual image pages, if it exists */
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
mvm->fw_paging_db[0].fw_paging_block)
@@ -551,6 +662,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
+ if (!mvm->fw->dbg_dynamic_mem)
+ file_len += sram_len + sizeof(*dump_mem);
+
dump_file = vzalloc(file_len);
if (!dump_file) {
kfree(fw_error_dump);
@@ -600,16 +714,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (monitor_dump_only)
goto dump_trans_data;
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
- dump_mem = (void *)dump_data->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
- dump_mem->offset = cpu_to_le32(sram_ofs);
- iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
- sram_len);
+ if (!mvm->fw->dbg_dynamic_mem) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+ dump_mem->offset = cpu_to_le32(sram_ofs);
+ iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
+ sram_len);
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+ if (fw_dbg_mem[i]) {
+ u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
+ u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
+
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(len +
+ sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = fw_dbg_mem[i]->data_type;
+ dump_mem->offset = cpu_to_le32(ofs);
+ iwl_trans_read_mem_bytes(mvm->trans, ofs,
+ dump_mem->data,
+ len);
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+ }
if (smem_len) {
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -617,10 +751,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
dump_mem->data, smem_len);
+ dump_data = iwl_fw_error_next_data(dump_data);
}
if (sram2_len) {
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -628,11 +762,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
dump_mem->data, sram2_len);
+ dump_data = iwl_fw_error_next_data(dump_data);
}
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
sizeof(*dump_mem));
@@ -641,6 +775,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
dump_mem->data, IWL8260_ICCM_LEN);
+ dump_data = iwl_fw_error_next_data(dump_data);
}
/* Dump fw's virtual image */
@@ -651,7 +786,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct page *pages =
mvm->fw_paging_db[i].fw_paging_block;
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
dump_data->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE);
@@ -659,12 +793,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
paging->index = cpu_to_le32(i);
memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE);
+ dump_data = iwl_fw_error_next_data(dump_data);
}
}
- dump_data = iwl_fw_error_next_data(dump_data);
- if (prph_len)
- iwl_dump_prph(mvm->trans, &dump_data);
+ if (prph_len) {
+ iwl_dump_prph(mvm->trans, &dump_data,
+ iwl_prph_dump_addr_comm,
+ ARRAY_SIZE(iwl_prph_dump_addr_comm));
+
+ if (mvm->cfg->mq_rx_supported)
+ iwl_dump_prph(mvm->trans, &dump_data,
+ iwl_prph_dump_addr_9000,
+ ARRAY_SIZE(iwl_prph_dump_addr_9000));
+ }
dump_trans_data:
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 09d895faf..7057f35cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -64,6 +64,7 @@
*
*****************************************************************************/
#include <net/mac80211.h>
+#include <linux/netdevice.h>
#include "iwl-trans.h"
#include "iwl-op-mode.h"
@@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE),
.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+ IWL_RSS_HASH_TYPE_IPV4_UDP |
IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
IWL_RSS_HASH_TYPE_IPV6_TCP |
+ IWL_RSS_HASH_TYPE_IPV6_UDP |
IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
};
+ /* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
- cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
- memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+ cmd.indirection_table[i] =
+ 1 + (i % (mvm->trans->num_rx_queues - 1));
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
@@ -176,8 +181,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
}
}
- if (sec_idx >= IWL_UCODE_SECTION_MAX) {
- IWL_ERR(mvm, "driver didn't find paging image\n");
+ /*
+ * If paging is enabled there should be at least 2 more sections left
+ * (one for CSS and one for Paging data)
+ */
+ if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
+ IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
iwl_free_fw_paging(mvm);
return -EINVAL;
}
@@ -412,7 +421,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
goto exit;
}
- mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+ /* Add an extra page for headers */
+ mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
+ FW_PAGING_SIZE,
GFP_KERNEL);
if (!mvm->trans->paging_download_buf) {
ret = -ENOMEM;
@@ -524,7 +535,7 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return true;
}
- WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
+ WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
return false;
}
@@ -643,7 +654,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
- mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
+ if (iwl_mvm_is_dqa_supported(mvm))
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+ else
+ mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@ -790,17 +804,22 @@ out:
static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
struct iwl_host_cmd cmd = {
- .id = SHARED_MEM_CFG,
.flags = CMD_WANT_SKB,
.data = { NULL, },
.len = { 0, },
};
- struct iwl_rx_packet *pkt;
struct iwl_shared_mem_cfg *mem_cfg;
+ struct iwl_rx_packet *pkt;
u32 i;
lockdep_assert_held(&mvm->mutex);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+ cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+ else
+ cmd.id = SHARED_MEM_CFG;
+
if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
return;
@@ -826,6 +845,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
le32_to_cpu(mem_cfg->page_buff_addr);
mvm->shared_mem_cfg.page_buff_size =
le32_to_cpu(mem_cfg->page_buff_size);
+
+ /* new API has more data */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+ mvm->shared_mem_cfg.rxfifo_addr =
+ le32_to_cpu(mem_cfg->rxfifo_addr);
+ mvm->shared_mem_cfg.internal_txfifo_addr =
+ le32_to_cpu(mem_cfg->internal_txfifo_addr);
+
+ BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+ sizeof(mem_cfg->internal_txfifo_size));
+
+ for (i = 0;
+ i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i++)
+ mvm->shared_mem_cfg.internal_txfifo_size[i] =
+ le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+ }
+
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
iwl_free_resp(&cmd);
@@ -944,7 +982,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Add all the PHY contexts */
- chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+ chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
for (i = 0; i < NUM_PHY_CTX; i++) {
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index e885db346..7aae068c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
.exclude_vif = exclude_vif,
.used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(mvm->aux_queue) |
- BIT(IWL_MVM_CMD_QUEUE),
+ BIT(mvm->aux_queue),
};
+ if (iwl_mvm_is_dqa_supported(mvm))
+ data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE);
+ else
+ data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
+
lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */
@@ -425,12 +429,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0;
}
- /* Find available queues, and allocate them to the ACs */
+ /*
+ * Find available queues, and allocate them to the ACs. When in
+ * DQA-mode they aren't really used, and this is done only so the
+ * mac80211 ieee80211_check_queues() function won't fail
+ */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(&used_hw_queues,
mvm->first_agg_queue);
- if (queue >= mvm->first_agg_queue) {
+ if (!iwl_mvm_is_dqa_supported(mvm) &&
+ queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate queue\n");
ret = -EIO;
goto exit_fail;
@@ -442,13 +451,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP) {
- u8 queue = find_first_zero_bit(&used_hw_queues,
- mvm->first_agg_queue);
+ u8 queue;
- if (queue >= mvm->first_agg_queue) {
- IWL_ERR(mvm, "Failed to allocate cab queue\n");
- ret = -EIO;
- goto exit_fail;
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ queue = find_first_zero_bit(&used_hw_queues,
+ mvm->first_agg_queue);
+
+ if (queue >= mvm->first_agg_queue) {
+ IWL_ERR(mvm, "Failed to allocate cab queue\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+ } else {
+ queue = IWL_MVM_DQA_GCAST_QUEUE;
}
vif->cab_queue = queue;
@@ -486,15 +501,21 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_TX_FIFO_VO, 0,
+ wdg_timeout);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* fall through */
default:
+ /* If DQA is supported - queues will be enabled when needed */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ break;
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
@@ -514,15 +535,31 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
- 0);
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MAX_TID_COUNT, 0);
+
break;
case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
+
+ if (iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_disable_txq(mvm,
+ IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
+ vif->hw_queue[0], IWL_MAX_TID_COUNT,
+ 0);
/* fall through */
default:
+ /*
+ * If DQA is supported - queues were already disabled, since in
+ * DQA-mode the queues are a property of the STA and not of the
+ * vif, and at this point the STA was already deleted
+ */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ break;
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
@@ -532,7 +569,7 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- enum ieee80211_band band,
+ enum nl80211_band band,
u8 *cck_rates, u8 *ofdm_rates)
{
struct ieee80211_supported_band *sband;
@@ -703,7 +740,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
rcu_read_lock();
chanctx = rcu_dereference(vif->chanctx_conf);
iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
- : IEEE80211_BAND_2GHZ,
+ : NL80211_BAND_2GHZ,
&cck_ack_rates, &ofdm_ack_rates);
rcu_read_unlock();
@@ -1038,7 +1075,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
RATE_MCS_ANT_POS);
- if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
+ if (info->band == NL80211_BAND_5GHZ || vif->p2p) {
rate = IWL_FIRST_OFDM_RATE;
} else {
rate = IWL_FIRST_CCK_RATE;
@@ -1489,7 +1526,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
rx_status.device_timestamp = le32_to_cpu(sb->system_time);
rx_status.band =
(sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
rx_status.band);
@@ -1499,5 +1536,5 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
/* pass it as regular rx to mac80211 */
- ieee80211_rx_napi(mvm->hw, skb, NULL);
+ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a50f4df7e..18a8474b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -229,7 +229,11 @@ void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
spin_lock_bh(&mvm->refs_lock);
- WARN_ON(!mvm->refs[ref_type]--);
+ if (WARN_ON(!mvm->refs[ref_type])) {
+ spin_unlock_bh(&mvm->refs_lock);
+ return;
+ }
+ mvm->refs[ref_type]--;
spin_unlock_bh(&mvm->refs_lock);
iwl_trans_unref(mvm->trans);
}
@@ -439,11 +443,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+ if (iwl_mvm_has_new_rx_api(mvm))
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ if (mvm->trans->num_rx_queues > 1)
+ ieee80211_hw_set(hw, USES_RSS);
if (mvm->trans->max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
- hw->queues = mvm->first_agg_queue;
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ hw->queues = mvm->first_agg_queue;
+ else
+ hw->queues = IEEE80211_MAX_QUEUES;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
@@ -550,18 +562,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
- if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
- if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_LQ_SS_PARAMS))
- hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
+ hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
}
@@ -665,12 +677,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
hw->netdev_features |= mvm->cfg->features;
- if (!iwl_mvm_is_csum_supported(mvm))
- hw->netdev_features &= ~NETIF_F_RXCSUM;
-
- if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
- hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
+ if (!iwl_mvm_is_csum_supported(mvm)) {
+ hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
+ NETIF_F_RXCSUM);
+ /* We may support SW TX CSUM */
+ if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
+ hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
+ }
ret = ieee80211_register_hw(mvm->hw);
if (ret)
@@ -847,6 +860,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
u16 *ssn = &params->ssn;
u8 buf_size = params->buf_size;
bool amsdu = params->amsdu;
+ u16 timeout = params->timeout;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
@@ -887,10 +901,12 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
break;
}
- ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size);
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
+ timeout);
break;
case IEEE80211_AMPDU_RX_STOP:
- ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size);
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
+ timeout);
break;
case IEEE80211_AMPDU_TX_START:
if (!iwl_enable_tx_ampdu(mvm->cfg)) {
@@ -992,6 +1008,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+ memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -1180,6 +1197,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
+ flush_work(&mvm->add_stream_wk);
cancel_delayed_work_sync(&mvm->fw_dump_wk);
iwl_mvm_free_fw_dump_desc(mvm);
@@ -1823,6 +1841,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+ if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
+ mvmvif->lqm_active)
+ iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
+ 0, 0);
+
/*
* If we're not associated yet, take the (new) BSSID before associating
* so the firmware knows. If we're already associated, then use the old
@@ -2342,7 +2365,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return;
}
- if (iwlwifi_mod_params.uapsd_disable) {
+ if (!vif->p2p &&
+ (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return;
}
@@ -2378,6 +2402,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
peer_addr, action);
}
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ struct iwl_mvm_tid_data *tid_data;
+ struct sk_buff *skb;
+ int i;
+
+ spin_lock_bh(&mvm_sta->lock);
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ tid_data = &mvm_sta->tid_data[i];
+ while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+ ieee80211_free_txskb(mvm->hw, skb);
+ }
+ spin_unlock_bh(&mvm_sta->lock);
+}
+
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2398,6 +2438,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* if a STA is being removed, reuse its ID */
flush_work(&mvm->sta_drained_wk);
+ /*
+ * If we are in a STA removal flow and in DQA mode:
+ *
+ * This is after the sync_rcu part, so the queues have already been
+ * flushed. No more TXs on their way in mac80211's path, and no more in
+ * the queues.
+ * Also, we won't be getting any new TX frames for this station.
+ * What we might have are deferred TX frames that need to be taken care
+ * of.
+ *
+ * Drop any still-queued deferred-frame before removing the STA, and
+ * make sure the worker is no longer handling frames for this STA.
+ */
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST &&
+ iwl_mvm_is_dqa_supported(mvm)) {
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
+ flush_work(&mvm->add_stream_wk);
+
+ /*
+ * No need to make sure deferred TX indication is off since the
+ * worker will already remove it if it was on
+ */
+ }
+
mutex_lock(&mvm->mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
@@ -2861,7 +2928,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
/* Set the channel info data */
- .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
+ .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
PHY_BAND_24 : PHY_BAND_5,
.channel_info.channel = channel->hw_value,
.channel_info.width = PHY_VHT_CHANNEL_MODE20,
@@ -3630,6 +3697,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
+ if (mvmvif->lqm_active)
+ iwl_mvm_send_lqm_cmd(vif,
+ LQM_CMD_OPERATION_STOP_MEASUREMENT,
+ 0, 0);
+
/* Schedule the time event to a bit before beacon 1,
* to make sure we're in the new channel when the
* GO/AP arrives.
@@ -3729,6 +3801,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
+ /* Make sure we're done with the deferred traffic before flushing */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ flush_work(&mvm->add_stream_wk);
+
mutex_lock(&mvm->mutex);
mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -3775,8 +3851,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
if (idx != 0)
return -ENOENT;
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
mutex_lock(&mvm->mutex);
@@ -3822,8 +3898,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
@@ -3976,6 +4052,55 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
}
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
+ u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+ notif->cookie = mvm->queue_sync_cookie;
+
+ if (notif->sync)
+ atomic_set(&mvm->queue_sync_counter,
+ mvm->trans->num_rx_queues);
+
+ ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
+ goto out;
+ }
+
+ if (notif->sync)
+ ret = wait_event_timeout(notif_waitq,
+ atomic_read(&mvm->queue_sync_counter) == 0,
+ HZ);
+ WARN_ON_ONCE(!ret);
+
+out:
+ atomic_set(&mvm->queue_sync_counter, 0);
+ mvm->queue_sync_cookie++;
+}
+
+static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_internal_rxq_notif data = {
+ .type = IWL_MVM_RXQ_EMPTY,
+ .sync = 1,
+ };
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
+ mutex_unlock(&mvm->mutex);
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -4032,6 +4157,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.event_callback = iwl_mvm_mac_event_callback,
+ .sync_rx_queues = iwl_mvm_sync_rx_queues,
+
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 9abbc93e3..ffbd41dcc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -208,7 +208,7 @@ enum iwl_power_scheme {
};
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
-#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
+#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
@@ -301,6 +301,8 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_PROTECT_CSA,
IWL_MVM_REF_FW_DBG_COLLECT,
IWL_MVM_REF_INIT_UCODE,
+ IWL_MVM_REF_SENDING_CMD,
+ IWL_MVM_REF_RX,
/* update debugfs.c when changing this */
@@ -453,6 +455,12 @@ struct iwl_mvm_vif {
/* TCP Checksum Offload */
netdev_features_t features;
+
+ /*
+ * link quality measurement - used to check whether this interface
+ * is in the middle of a link quality measurement
+ */
+ bool lqm_active;
};
static inline struct iwl_mvm_vif *
@@ -602,6 +610,87 @@ struct iwl_mvm_shared_mem_cfg {
u32 rxfifo_size[RX_FIFO_MAX_NUM];
u32 page_buff_addr;
u32 page_buff_size;
+ u32 rxfifo_addr;
+ u32 internal_txfifo_addr;
+ u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+};
+
+/**
+ * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @sta_id: sta id of this reorder buffer
+ * @queue: queue of this reorder buffer
+ * @last_amsdu: track last ASMDU SN for duplication detection
+ * @last_sub_index: track ASMDU sub frame index for duplication detection
+ * @entries: list of skbs stored
+ * @reorder_time: time the packet was stored in the reorder buffer
+ * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
+ * it is the time of last received sub-frame
+ * @removed: prevent timer re-arming
+ * @lock: protect reorder buffer internal state
+ * @mvm: mvm pointer, needed for frame timer context
+ */
+struct iwl_mvm_reorder_buffer {
+ u16 head_sn;
+ u16 num_stored;
+ u8 buf_size;
+ u8 sta_id;
+ int queue;
+ u16 last_amsdu;
+ u8 last_sub_index;
+ struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF];
+ unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
+ struct timer_list reorder_timer;
+ bool removed;
+ spinlock_t lock;
+ struct iwl_mvm *mvm;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mvm_baid_data - BA session data
+ * @sta_id: station id
+ * @tid: tid of the session
+ * @baid baid of the session
+ * @timeout: the timeout set in the addba request
+ * @last_rx: last rx jiffies, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @mvm: mvm pointer, needed for timer context
+ * @reorder_buf: reorder buffer, allocated per queue
+ */
+struct iwl_mvm_baid_data {
+ struct rcu_head rcu_head;
+ u8 sta_id;
+ u8 tid;
+ u8 baid;
+ u16 timeout;
+ unsigned long last_rx;
+ struct timer_list session_timer;
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_reorder_buffer reorder_buf[];
+};
+
+/*
+ * enum iwl_mvm_queue_status - queue status
+ * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
+ * Basically, this means that this queue can be used for any purpose
+ * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
+ * This is the state of a queue that has been dedicated for some RATID
+ * (agg'd or not), but that hasn't yet gone through the actual enablement
+ * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
+ * Note that in this state there is no requirement to already know what TID
+ * should be used with this queue, it is just marked as a queue that will
+ * be used, and shouldn't be allocated to anyone else.
+ * @IWL_MVM_QUEUE_READY: queue is ready to be used
+ * This is the state of a queue that has been fully configured (including
+ * SCD pointers, etc), has a specific RA/TID assigned to it, and can be
+ * used to send traffic.
+ */
+enum iwl_mvm_queue_status {
+ IWL_MVM_QUEUE_FREE,
+ IWL_MVM_QUEUE_RESERVED,
+ IWL_MVM_QUEUE_READY,
};
struct iwl_mvm {
@@ -624,6 +713,8 @@ struct iwl_mvm {
unsigned long status;
+ u32 queue_sync_cookie;
+ atomic_t queue_sync_counter;
/*
* for beacon filtering -
* currently only one interface can be supported
@@ -656,10 +747,12 @@ struct iwl_mvm {
/* Map to HW queue */
u32 hw_queue_to_mac80211;
u8 hw_queue_refcount;
- bool setup_reserved;
+ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+ enum iwl_mvm_queue_status status;
} queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
+ struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name;
@@ -679,11 +772,11 @@ struct iwl_mvm {
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
struct work_struct sta_drained_wk;
+ unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions;
- u32 secret_key[IWL_RSS_HASH_KEY_CNT];
/* configured by mac80211 */
u32 rts_threshold;
@@ -694,6 +787,7 @@ struct iwl_mvm {
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
enum iwl_mvm_scan_type scan_type;
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
+ struct timer_list scan_timer;
/* max number of simultaneous scans the FW supports */
unsigned int max_scans;
@@ -903,6 +997,10 @@ struct iwl_mvm {
u32 ciphers[6];
struct iwl_mvm_tof_data tof_data;
+ struct ieee80211_vif *nan_vif;
+#define IWL_MAX_BAID 32
+ struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
+
/*
* Drop beacons from other APs in AP mode when there are no connected
* clients.
@@ -1048,7 +1146,8 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) &&
+ !IWL_MVM_HW_CSUM_DISABLE;
}
static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
@@ -1063,7 +1162,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
- IWL_MVM_P2P_UAPSD_STANDALONE;
+ !(iwlwifi_mod_params.uapsd_disable &
+ IWL_DISABLE_UAPSD_P2P_CLIENT);
}
static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
@@ -1115,9 +1215,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
- enum ieee80211_band band);
+ enum nl80211_band band);
void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
@@ -1224,7 +1324,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
-void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
const u8 *data, u32 count);
@@ -1297,6 +1397,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
+void iwl_mvm_scan_timeout(unsigned long data);
/* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -1449,26 +1550,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
- enum ieee80211_band band);
+ enum nl80211_band band);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum ieee80211_rssi_event_data);
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
- enum ieee80211_band band);
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
-
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
void
@@ -1563,6 +1648,10 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+/* Re-configure the SCD for a queue that has already been configured */
+int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+ int tid, int frame_limit, u16 ssn);
+
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
@@ -1625,6 +1714,10 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size);
+void iwl_mvm_reorder_timer_expired(unsigned long data);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
@@ -1634,4 +1727,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const char *errmsg);
+/* Link Quality Measurement */
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+ enum iwl_lqm_cmd_operatrions operation,
+ u32 duration, u32 timeout);
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index d27839909..a68054f12 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
- iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED),
+ iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
@@ -421,6 +421,21 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
+ HCMD_NAME(SHARED_MEM_CFG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+ HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
+ HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD),
@@ -449,6 +464,8 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+ [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
+ [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
@@ -537,8 +554,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
mvm->aux_queue = 15;
- mvm->first_agg_queue = 16;
- mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ mvm->first_agg_queue = 16;
+ mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+ } else {
+ mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
+ mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
+ }
if (mvm->cfg->base_params->num_of_queues == 16) {
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
@@ -562,12 +584,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+ INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock);
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
+ atomic_set(&mvm->queue_sync_counter, 0);
+
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
/*
@@ -601,7 +626,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
- trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+ if (iwl_mvm_is_dqa_supported(mvm))
+ trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+ else
+ trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true;
@@ -707,8 +735,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_tof_init(mvm);
- /* init RSS hash key */
- get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
+ setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
+ (unsigned long)mvm);
return op_mode;
@@ -763,6 +791,11 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_tof_clean(mvm);
+ del_timer_sync(&mvm->scan_timer);
+
+ mutex_destroy(&mvm->mutex);
+ mutex_destroy(&mvm->d0i3_suspend_mutex);
+
ieee80211_free_hw(mvm->hw);
}
@@ -904,7 +937,7 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
else if (pkt->hdr.cmd == FRAME_RELEASE)
- iwl_mvm_rx_frame_release(mvm, rxb, 0);
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
@@ -1182,7 +1215,6 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
struct iwl_d0i3_iter_data *iter_data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvmsta;
u32 available_tids = 0;
u8 tid;
@@ -1191,11 +1223,10 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
return false;
- ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
- if (IS_ERR_OR_NULL(ap_sta))
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+ if (!mvmsta)
return false;
- mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
spin_lock_bh(&mvmsta->lock);
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
@@ -1606,7 +1637,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
- iwl_mvm_rx_frame_release(mvm, rxb, queue);
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
pkt->hdr.group_id == DATA_PATH_GROUP))
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 6e6a56f21..95138830b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -147,7 +147,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
u8 active_cnt, idle_cnt;
/* Set the channel info data */
- cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+ cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5);
cmd->ci.channel = chandef->chan->hw_value;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index f313910cd..7b1f6ad60 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
}
- cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+ cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 61d0a8cd1..81dd2f6a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -829,7 +829,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
/* Convert a ucode rate into an rs_rate object */
static int rs_rate_from_ucode_rate(const u32 ucode_rate,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct rs_rate *rate)
{
u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
@@ -848,7 +848,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (!(ucode_rate & RATE_MCS_HT_MSK) &&
!(ucode_rate & RATE_MCS_VHT_MSK)) {
if (num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
else
rate->type = LQ_LEGACY_G;
@@ -1043,7 +1043,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
return;
} else if (is_siso(rate)) {
/* Downgrade to Legacy if we were in SISO */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
else
rate->type = LQ_LEGACY_G;
@@ -1850,7 +1850,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
rate->ant = column->ant;
if (column->mode == RS_LEGACY) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
else
rate->type = LQ_LEGACY_G;
@@ -2020,7 +2020,7 @@ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
}
static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct rs_rate *rate, enum ieee80211_band band)
+ struct rs_rate *rate, enum nl80211_band band)
{
int index = rate->index;
bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
@@ -2126,7 +2126,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *vif = mvm_sta->vif;
struct ieee80211_chanctx_conf *chanctx_conf;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct iwl_rate_scale_data *window;
struct rs_rate *rate = &tbl->rate;
enum tpc_action action;
@@ -2148,7 +2148,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (WARN_ON(!chanctx_conf))
- band = IEEE80211_NUM_BANDS;
+ band = NUM_NL80211_BANDS;
else
band = chanctx_conf->def.chan->band;
rcu_read_unlock();
@@ -2606,7 +2606,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
- else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ else if (lq_sta->band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
else
rate->type = LQ_LEGACY_G;
@@ -2623,7 +2623,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
} else {
lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
- if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+ if (lq_sta->band == NL80211_BAND_5GHZ) {
lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
lq_sta->optimal_nentries =
ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2679,7 +2679,7 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
static void rs_get_initial_rate(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct rs_rate *rate)
{
int i, nentries;
@@ -2714,7 +2714,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
rate->index = find_first_bit(&lq_sta->active_legacy_rate,
BITS_PER_LONG);
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
rate->type = LQ_LEGACY_A;
initial_rates = rs_optimal_rates_5ghz_legacy;
nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2814,7 +2814,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum ieee80211_band band,
+ enum nl80211_band band,
bool init)
{
struct iwl_scale_tbl_info *tbl;
@@ -3097,7 +3097,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
* Called after adding a new station to initialize rate scaling
*/
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum ieee80211_band band, bool init)
+ enum nl80211_band band, bool init)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@@ -3203,7 +3203,7 @@ static void rs_rate_update(void *mvm_r,
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
struct iwl_lq_cmd *lq_cmd,
- enum ieee80211_band band,
+ enum nl80211_band band,
u32 ucode_rate)
{
struct rs_rate rate;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index bdb6f2d8d..90d046fb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -305,7 +305,7 @@ struct iwl_lq_sta {
bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */
bool bfer_capable; /* Remote supports beamformee and we BFer */
- enum ieee80211_band band;
+ enum nl80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
unsigned long active_legacy_rate;
@@ -358,7 +358,7 @@ struct iwl_lq_sta {
/* Initialize station's rate scaling information after adding station */
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum ieee80211_band band, bool init);
+ enum nl80211_band band, bool init);
/* Notify RS about Tx status */
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 485cfc1a4..ab7f7eda9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -97,6 +97,7 @@ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* Adds the rxb to a new skb and give it to mac80211
*/
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct napi_struct *napi,
struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
@@ -131,7 +132,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen, rxb->truesize);
}
- ieee80211_rx_napi(mvm->hw, skb, napi);
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
/*
@@ -271,6 +272,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
u32 rate_n_flags;
u32 rx_pkt_status;
u8 crypt_len = 0;
+ bool take_ref;
phy_info = &mvm->last_phy_info;
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -319,7 +321,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
rx_status->band =
(phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
rx_status->freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
rx_status->band);
@@ -453,8 +455,26 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
- crypt_len, rxb);
+ if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)))
+ rx_status->boottime_ns = ktime_get_boot_ns();
+
+ /* Take a reference briefly to kick off a d0i3 entry delay so
+ * we can handle bursts of RX packets without toggling the
+ * state too often. But don't do this for beacons if we are
+ * going to idle because the beacon filtering changes we make
+ * cause the firmware to send us collateral beacons. */
+ take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) &&
+ ieee80211_is_beacon(hdr->frame_control));
+
+ if (take_ref)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
+ ampdu_status, crypt_len, rxb);
+
+ if (take_ref)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
}
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 9a54f2d2a..2c61516d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
if (iwl_mvm_check_pn(mvm, skb, queue, sta))
kfree_skb(skb);
else
- ieee80211_rx_napi(mvm->hw, skb, napi);
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ u16 flags = le16_to_cpu(desc->l3l4_flags);
+ u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+ IWL_RX_L3_PROTO_POS);
if (mvmvif->features & NETIF_F_RXCSUM &&
- desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
- desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
+ flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+ (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -390,6 +395,150 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
return ret;
}
+/*
+ * Returns true if sn2 - buffer_size < sn1 < sn2.
+ * To be used only in order to compare reorder buffer head with NSSN.
+ * We fully trust NSSN unless it is behind us due to reorder timeout.
+ * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
+ */
+static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
+{
+ return ieee80211_sn_less(sn1, sn2) &&
+ !ieee80211_sn_less(sn1, sn2 - buffer_size);
+}
+
+#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
+
+static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct iwl_mvm_reorder_buffer *reorder_buf,
+ u16 nssn)
+{
+ u16 ssn = reorder_buf->head_sn;
+
+ lockdep_assert_held(&reorder_buf->lock);
+
+ /* ignore nssn smaller than head sn - this can happen due to timeout */
+ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
+ return;
+
+ while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+ int index = ssn % reorder_buf->buf_size;
+ struct sk_buff_head *skb_list = &reorder_buf->entries[index];
+ struct sk_buff *skb;
+
+ ssn = ieee80211_sn_inc(ssn);
+
+ /* holes are valid since nssn indicates frames were received. */
+ if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
+ continue;
+ /* Empty the list. Will have more than one frame for A-MSDU */
+ while ((skb = __skb_dequeue(skb_list))) {
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
+ reorder_buf->queue,
+ sta);
+ reorder_buf->num_stored--;
+ }
+ }
+ reorder_buf->head_sn = nssn;
+
+ if (reorder_buf->num_stored && !reorder_buf->removed) {
+ u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
+
+ while (!skb_peek_tail(&reorder_buf->entries[index]))
+ index = (index + 1) % reorder_buf->buf_size;
+ /* modify timer to match next frame's expiration time */
+ mod_timer(&reorder_buf->reorder_timer,
+ reorder_buf->reorder_time[index] + 1 +
+ RX_REORDER_BUF_TIMEOUT_MQ);
+ } else {
+ del_timer(&reorder_buf->reorder_timer);
+ }
+}
+
+void iwl_mvm_reorder_timer_expired(unsigned long data)
+{
+ struct iwl_mvm_reorder_buffer *buf = (void *)data;
+ int i;
+ u16 sn = 0, index = 0;
+ bool expired = false;
+
+ spin_lock_bh(&buf->lock);
+
+ if (!buf->num_stored || buf->removed) {
+ spin_unlock_bh(&buf->lock);
+ return;
+ }
+
+ for (i = 0; i < buf->buf_size ; i++) {
+ index = (buf->head_sn + i) % buf->buf_size;
+
+ if (!skb_peek_tail(&buf->entries[index]))
+ continue;
+ if (!time_after(jiffies, buf->reorder_time[index] +
+ RX_REORDER_BUF_TIMEOUT_MQ))
+ break;
+ expired = true;
+ sn = ieee80211_sn_add(buf->head_sn, i + 1);
+ }
+
+ if (expired) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
+ /* SN is set to the last expired frame + 1 */
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
+ rcu_read_unlock();
+ } else if (buf->num_stored) {
+ /*
+ * If no frame expired and there are stored frames, index is now
+ * pointing to the first unexpired frame - modify timer
+ * accordingly to this frame.
+ */
+ mod_timer(&buf->reorder_timer,
+ buf->reorder_time[index] +
+ 1 + RX_REORDER_BUF_TIMEOUT_MQ);
+ }
+ spin_unlock_bh(&buf->lock);
+}
+
+static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
+ struct iwl_mvm_delba_data *data)
+{
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ u8 baid = data->baid;
+
+ if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ /* release all frames that are in the reorder buffer to the stack */
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
+ ieee80211_sn_add(reorder_buf->head_sn,
+ reorder_buf->buf_size));
+ spin_unlock_bh(&reorder_buf->lock);
+ del_timer_sync(&reorder_buf->reorder_timer);
+
+out:
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue)
{
@@ -400,15 +549,184 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
notif = (void *)pkt->data;
internal_notif = (void *)notif->payload;
+ if (internal_notif->sync) {
+ if (mvm->queue_sync_cookie != internal_notif->cookie) {
+ WARN_ONCE(1,
+ "Received expired RX queue sync message\n");
+ return;
+ }
+ atomic_dec(&mvm->queue_sync_counter);
+ }
+
switch (internal_notif->type) {
+ case IWL_MVM_RXQ_EMPTY:
+ break;
case IWL_MVM_RXQ_NOTIF_DEL_BA:
- /* TODO */
+ iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
}
+/*
+ * Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ int queue,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_mvm_sta *mvm_sta;
+ struct iwl_mvm_baid_data *baid_data;
+ struct iwl_mvm_reorder_buffer *buffer;
+ struct sk_buff *tail;
+ u32 reorder = le32_to_cpu(desc->reorder_data);
+ bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+ u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ u8 sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ int index;
+ u16 nssn, sn;
+ u8 baid;
+
+ baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
+ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+ return false;
+
+ /* no sta yet */
+ if (WARN_ON(IS_ERR_OR_NULL(sta)))
+ return false;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* not a data packet */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return false;
+
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ return false;
+
+ baid_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN(!baid_data,
+ "Received baid %d, but no data exists for this BAID\n", baid))
+ return false;
+ if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
+ "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
+ baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
+ tid))
+ return false;
+
+ nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
+ sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
+ IWL_RX_MPDU_REORDER_SN_SHIFT;
+
+ buffer = &baid_data->reorder_buf[queue];
+
+ spin_lock_bh(&buffer->lock);
+
+ /*
+ * If there was a significant jump in the nssn - adjust.
+ * If the SN is smaller than the NSSN it might need to first go into
+ * the reorder buffer, in which case we just release up to it and the
+ * rest of the function will take of storing it and releasing up to the
+ * nssn
+ */
+ if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
+ buffer->buf_size)) {
+ u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
+
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
+ }
+
+ /* drop any oudated packets */
+ if (ieee80211_sn_less(sn, buffer->head_sn))
+ goto drop;
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+ if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
+ buffer->buf_size))
+ buffer->head_sn = nssn;
+ /* No need to update AMSDU last SN - we are moving the head */
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+
+ index = sn % buffer->buf_size;
+
+ /*
+ * Check if we already stored this frame
+ * As AMSDU is either received or not as whole, logic is simple:
+ * If we have frames in that position in the buffer and the last frame
+ * originated from AMSDU had a different SN then it is a retransmission.
+ * If it is the same SN then if the subframe index is incrementing it
+ * is the same AMSDU - otherwise it is a retransmission.
+ */
+ tail = skb_peek_tail(&buffer->entries[index]);
+ if (tail && !amsdu)
+ goto drop;
+ else if (tail && (sn != buffer->last_amsdu ||
+ buffer->last_sub_index >= sub_frame_idx))
+ goto drop;
+
+ /* put in reorder buffer */
+ __skb_queue_tail(&buffer->entries[index], skb);
+ buffer->num_stored++;
+ buffer->reorder_time[index] = jiffies;
+
+ if (amsdu) {
+ buffer->last_amsdu = sn;
+ buffer->last_sub_index = sub_frame_idx;
+ }
+
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ spin_unlock_bh(&buffer->lock);
+ return true;
+
+drop:
+ kfree_skb(skb);
+ spin_unlock_bh(&buffer->lock);
+ return true;
+}
+
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
+{
+ unsigned long now = jiffies;
+ unsigned long timeout;
+ struct iwl_mvm_baid_data *data;
+
+ rcu_read_lock();
+
+ data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON(!data))
+ goto out;
+
+ if (!data->timeout)
+ goto out;
+
+ timeout = data->timeout;
+ /*
+ * Do not update last rx all the time to avoid cache bouncing
+ * between the rx queues.
+ * Update it every timeout. Worst case is the session will
+ * expire after ~ 2 * timeout, which doesn't matter that much.
+ */
+ if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
+ /* Update is atomic */
+ data->last_rx = now;
+
+out:
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -451,8 +769,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
- rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ :
- IEEE80211_BAND_2GHZ;
+ rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, desc, rx_status);
@@ -479,6 +797,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
+ IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT);
/*
* We have tx blocked stations (with CS bit). If we heard
@@ -531,6 +852,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
+ iwl_mvm_agg_rx_received(mvm, baid);
}
/*
@@ -588,12 +911,42 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
/* TODO: PHY info - gscan */
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
rcu_read_unlock();
}
-void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
- /* TODO */
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_frame_release *release = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ struct iwl_mvm_baid_data *ba_data;
+
+ int baid = release->baid;
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
+ le16_to_cpu(release->nssn));
+ spin_unlock_bh(&reorder_buf->lock);
+
+out:
+ rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 09eb72c4a..e78fc567f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -70,6 +70,7 @@
#include "mvm.h"
#include "fw-api-scan.h"
+#include "iwl-io.h"
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -162,16 +163,16 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
+static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
{
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
return cpu_to_le32(PHY_BAND_24);
else
return cpu_to_le32(PHY_BAND_5);
}
static inline __le32
-iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
bool no_cck)
{
u32 tx_ant;
@@ -181,7 +182,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
mvm->scan_last_antenna_idx);
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
- if (band == IEEE80211_BAND_2GHZ && !no_cck)
+ if (band == NL80211_BAND_2GHZ && !no_cck)
return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
tx_ant);
else
@@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
ieee80211_scan_completed(mvm->hw,
scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ del_timer(&mvm->scan_timer);
+ } else {
+ IWL_ERR(mvm,
+ "got scan complete notification but no scan is running\n");
}
mvm->last_ebs_successful =
@@ -586,14 +591,14 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
TX_CMD_FLG_BT_DIS);
tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
no_cck);
tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
TX_CMD_FLG_BT_DIS);
tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
no_cck);
tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
}
@@ -690,19 +695,19 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* Insert ds parameter set element on 2.4 GHz band */
newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
- ies->ies[IEEE80211_BAND_2GHZ],
- ies->len[IEEE80211_BAND_2GHZ],
+ ies->ies[NL80211_BAND_2GHZ],
+ ies->len[NL80211_BAND_2GHZ],
pos);
params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
pos = newpos;
- memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
- ies->len[IEEE80211_BAND_5GHZ]);
+ memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+ ies->len[NL80211_BAND_5GHZ]);
params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
params->preq.band_data[1].len =
- cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
- pos += ies->len[IEEE80211_BAND_5GHZ];
+ cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+ pos += ies->len[NL80211_BAND_5GHZ];
memcpy(pos, ies->common_ies, ies->common_ie_len);
params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
@@ -916,10 +921,10 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
unsigned int rates = 0;
int i;
- band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
for (i = 0; i < band->n_bitrates; i++)
rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
- band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
for (i = 0; i < band->n_bitrates; i++)
rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
@@ -934,8 +939,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
struct iwl_scan_config *scan_config;
struct ieee80211_supported_band *band;
int num_channels =
- mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
- mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
int ret, i, j = 0, cmd_size;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
@@ -961,6 +966,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
SCAN_CONFIG_FLAG_SET_ALL_TIMES |
SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
@@ -988,10 +994,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
- band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
for (i = 0; i < band->n_channels; i++, j++)
scan_config->channel_array[j] = band->channels[i].hw_value;
- band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
for (i = 0; i < band->n_channels; i++, j++)
scan_config->channel_array[j] = band->channels[i].hw_value;
@@ -1216,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
+#define SCAN_TIMEOUT (20 * HZ)
+
+void iwl_mvm_scan_timeout(unsigned long data)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)data;
+
+ IWL_ERR(mvm, "regular scan timed out\n");
+
+ del_timer(&mvm->scan_timer);
+ iwl_force_nmi(mvm->trans);
+}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1295,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+ mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
+
return 0;
}
@@ -1412,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
ieee80211_scan_completed(mvm->hw, aborted);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ del_timer(&mvm->scan_timer);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@@ -1607,6 +1628,7 @@ out:
* to release the scan reference here.
*/
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ del_timer(&mvm->scan_timer);
if (notify)
ieee80211_scan_completed(mvm->hw, true);
} else if (notify) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index c2def1232..443a42855 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
}
}
- if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
+ if (sta) {
BUILD_BUG_ON(sizeof(sf_full_timeout) !=
sizeof(__le32) * SF_NUM_SCENARIO *
SF_NUM_TIMEOUT_TYPES);
@@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
struct ieee80211_sta *sta;
int ret = 0;
- if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
- sf_cmd.state = cpu_to_le32(new_state);
-
if (mvm->cfg->disable_dummy_notification)
sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
@@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
switch (new_state) {
case SF_UNINIT:
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
- iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break;
case SF_FULL_ON:
if (sta_id == IWL_MVM_STATION_COUNT) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ef99942d7..b23ab4a45 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
/* send station add/update command to firmware */
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- bool update)
+ bool update, unsigned int flags)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd add_sta_cmd = {
@@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
- if (!update) {
+ if (!update || (flags & STA_MODIFY_QUEUES)) {
add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+ if (flags & STA_MODIFY_QUEUES)
+ add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
}
switch (sta->bandwidth) {
@@ -220,6 +223,39 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
+static void iwl_mvm_rx_agg_session_expired(unsigned long data)
+{
+ struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvm_sta;
+ unsigned long timeout;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(*rcu_ptr);
+
+ if (WARN_ON(!ba_data))
+ goto unlock;
+
+ if (!ba_data->timeout)
+ goto unlock;
+
+ timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
+ if (time_is_after_jiffies(timeout)) {
+ mod_timer(&ba_data->session_timer, timeout);
+ goto unlock;
+ }
+
+ /* Timer expired */
+ sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
+ sta->addr, ba_data->tid);
+unlock:
+ rcu_read_unlock();
+}
+
static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
@@ -274,6 +310,229 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
}
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac, int tid,
+ struct ieee80211_hdr *hdr)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = iwl_mvm_ac_to_tx_fifo[ac],
+ .sta_id = mvmsta->sta_id,
+ .tid = tid,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
+ int queue = -1;
+ int ssn;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /*
+ * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+ * exists
+ */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
+ IWL_MVM_DQA_MAX_MGMT_QUEUE);
+ if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+ IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+ queue);
+
+ /* If no such queue is found, we'll use a DATA queue instead */
+ }
+
+ if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+ queue = mvmsta->reserved_queue;
+ IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+ }
+
+ if (queue < 0)
+ queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+
+ /*
+ * Mark TXQ as ready, even though it hasn't been fully configured yet,
+ * to make sure no one else takes it.
+ * This will allow avoiding re-acquiring the lock at the end of the
+ * configuration. On error we'll mark it back as free.
+ */
+ if (queue >= 0)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* TODO: support shared queues for same RA */
+ if (queue < 0)
+ return -ENOSPC;
+
+ /*
+ * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+ * but for configuring the SCD to send A-MPDUs we need to mark the queue
+ * as aggregatable.
+ * Mark all DATA queues as allowing to be aggregated at some point
+ */
+ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
+ queue, mvmsta->sta_id, tid);
+
+ ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
+ wdg_timeout);
+
+ spin_lock_bh(&mvmsta->lock);
+ mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tfd_queue_msk |= BIT(queue);
+
+ if (mvmsta->reserved_queue == queue)
+ mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+ spin_unlock_bh(&mvmsta->lock);
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
+
+ return ret;
+}
+
+static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
+{
+ if (tid == IWL_MAX_TID_COUNT)
+ return IEEE80211_AC_VO; /* MGMT */
+
+ return tid_to_mac80211_ac[tid];
+}
+
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff_head deferred_tx;
+ u8 mac_queue;
+ bool no_queue = false; /* Marks if there is a problem with the queue */
+ u8 ac;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ skb = skb_peek(&tid_data->deferred_tx_frames);
+ if (!skb)
+ return;
+ hdr = (void *)skb->data;
+
+ ac = iwl_mvm_tid_to_ac_queue(tid);
+ mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
+
+ if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
+ iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
+ IWL_ERR(mvm,
+ "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
+ mvmsta->sta_id, tid);
+
+ /*
+ * Mark queue as problematic so later the deferred traffic is
+ * freed, as we can do nothing with it
+ */
+ no_queue = true;
+ }
+
+ __skb_queue_head_init(&deferred_tx);
+
+ /* Disable bottom-halves when entering TX path */
+ local_bh_disable();
+ spin_lock(&mvmsta->lock);
+ skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+ spin_unlock(&mvmsta->lock);
+
+ while ((skb = __skb_dequeue(&deferred_tx)))
+ if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
+ ieee80211_free_txskb(mvm->hw, skb);
+ local_bh_enable();
+
+ /* Wake queue */
+ iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
+}
+
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+ add_stream_wk);
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long deferred_tid_traffic;
+ int sta_id, tid;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Go over all stations with deferred traffic */
+ for_each_set_bit(sta_id, mvm->sta_deferred_frames,
+ IWL_MVM_STATION_COUNT) {
+ clear_bit(sta_id, mvm->sta_deferred_frames);
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+
+ for_each_set_bit(tid, &deferred_tid_traffic,
+ IWL_MAX_TID_COUNT + 1)
+ iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+ }
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum nl80211_iftype vif_type)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int queue;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /* Make sure we have free resources for this STA */
+ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+ !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+ (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
+ IWL_MVM_QUEUE_FREE))
+ queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+ else
+ queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (queue < 0) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "No available queues for new station\n");
+ return -ENOSPC;
+ }
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ mvmsta->reserved_queue = queue;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
+ queue, mvmsta->sta_id);
+
+ return 0;
+}
+
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -314,18 +573,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_tdls_sta_init(mvm, sta);
if (ret)
return ret;
- } else {
+ } else if (!iwl_mvm_is_dqa_supported(mvm)) {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
}
/* for HW restart - reset everything but the sequence number */
- for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_sta->tid_data[i].seq_number;
memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
mvm_sta->tid_data[i].seq_number = seq;
+
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ continue;
+
+ /*
+ * Mark all queues for this STA as unallocated and defer TX
+ * frames until the queue is allocated
+ */
+ mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
}
+ mvm_sta->deferred_traffic_tid_map = 0;
mvm_sta->agg_tids = 0;
if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -338,7 +608,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->dup_data = dup_data;
}
- ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ ret = iwl_mvm_reserve_sta_stream(mvm, sta,
+ ieee80211_vif_type_p2p(vif));
+ if (ret)
+ goto err;
+ }
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
if (ret)
goto err;
@@ -364,7 +641,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- return iwl_mvm_sta_send_to_fw(mvm, sta, true);
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
}
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
@@ -509,6 +786,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
mutex_unlock(&mvm->mutex);
}
+static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ int ac;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+ if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
+ continue;
+
+ ac = iwl_mvm_tid_to_ac_queue(i);
+ iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
+ vif->hw_queue[ac], i, 0);
+ mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ }
+}
+
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -537,6 +834,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
+ /* If DQA is supported - the queues can be disabled now */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
/* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
@@ -750,6 +1051,33 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_VO,
+ .sta_id = mvmvif->bcast_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ int queue;
+
+ if ((vif->type == NL80211_IFTYPE_AP) &&
+ (mvmvif->bcast_sta.tfd_queue_msk &
+ BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
+ queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
+ (mvmvif->bcast_sta.tfd_queue_msk &
+ BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
+ queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
+ return -EINVAL;
+
+ iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
+ wdg_timeout);
+ }
+
if (vif->type == NL80211_IFTYPE_ADHOC)
baddr = vif->bss_conf.bssid;
@@ -778,20 +1106,28 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 qmask;
+ u32 qmask = 0;
lockdep_assert_held(&mvm->mutex);
- qmask = iwl_mvm_mac_get_queues_mask(vif);
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ qmask = iwl_mvm_mac_get_queues_mask(vif);
- /*
- * The firmware defines the TFD queue mask to only be relevant
- * for *unicast* queues, so the multicast (CAB) queue shouldn't
- * be included.
- */
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * The firmware defines the TFD queue mask to only be relevant
+ * for *unicast* queues, so the multicast (CAB) queue shouldn't
+ * be included.
+ */
qmask &= ~BIT(vif->cab_queue);
+ if (iwl_mvm_is_dqa_supported(mvm))
+ qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
+ }
+
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
ieee80211_vif_type_p2p(vif));
}
@@ -849,11 +1185,92 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#define IWL_MAX_RX_BA_SESSIONS 16
+static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
+{
+ struct iwl_mvm_delba_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
+ .metadata.sync = 1,
+ .delba.baid = baid,
+ };
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+};
+
+static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
+ struct iwl_mvm_baid_data *data)
+{
+ int i;
+
+ iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
+
+ for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ int j;
+ struct iwl_mvm_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+
+ spin_lock_bh(&reorder_buf->lock);
+ if (likely(!reorder_buf->num_stored)) {
+ spin_unlock_bh(&reorder_buf->lock);
+ continue;
+ }
+
+ /*
+ * This shouldn't happen in regular DELBA since the internal
+ * delBA notification should trigger a release of all frames in
+ * the reorder buffer.
+ */
+ WARN_ON(1);
+
+ for (j = 0; j < reorder_buf->buf_size; j++)
+ __skb_queue_purge(&reorder_buf->entries[j]);
+ /*
+ * Prevent timer re-arm. This prevents a very far fetched case
+ * where we timed out on the notification. There may be prior
+ * RX frames pending in the RX queue before the notification
+ * that might get processed between now and the actual deletion
+ * and we would re-arm the timer although we are deleting the
+ * reorder buffer.
+ */
+ reorder_buf->removed = true;
+ spin_unlock_bh(&reorder_buf->lock);
+ del_timer_sync(&reorder_buf->reorder_timer);
+ }
+}
+
+static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
+ u32 sta_id,
+ struct iwl_mvm_baid_data *data,
+ u16 ssn, u8 buf_size)
+{
+ int i;
+
+ for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ struct iwl_mvm_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ int j;
+
+ reorder_buf->num_stored = 0;
+ reorder_buf->head_sn = ssn;
+ reorder_buf->buf_size = buf_size;
+ /* rx reorder timer */
+ reorder_buf->reorder_timer.function =
+ iwl_mvm_reorder_timer_expired;
+ reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
+ init_timer(&reorder_buf->reorder_timer);
+ spin_lock_init(&reorder_buf->lock);
+ reorder_buf->mvm = mvm;
+ reorder_buf->queue = i;
+ reorder_buf->sta_id = sta_id;
+ for (j = 0; j < reorder_buf->buf_size; j++)
+ __skb_queue_head_init(&reorder_buf->entries[j]);
+ }
+}
+
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size)
+ int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_baid_data *baid_data = NULL;
int ret;
u32 status;
@@ -864,6 +1281,19 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return -ENOSPC;
}
+ if (iwl_mvm_has_new_rx_api(mvm) && start) {
+ /*
+ * Allocate here so if allocation fails we can bail out early
+ * before starting the BA session in the firmware
+ */
+ baid_data = kzalloc(sizeof(*baid_data) +
+ mvm->trans->num_rx_queues *
+ sizeof(baid_data->reorder_buf[0]),
+ GFP_KERNEL);
+ if (!baid_data)
+ return -ENOMEM;
+ }
+
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
@@ -882,7 +1312,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
- return ret;
+ goto out_free;
switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
@@ -900,14 +1330,75 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- if (!ret) {
- if (start)
- mvm->rx_ba_sessions++;
- else if (mvm->rx_ba_sessions > 0)
- /* check that restart flow didn't zero the counter */
- mvm->rx_ba_sessions--;
+ if (ret)
+ goto out_free;
+
+ if (start) {
+ u8 baid;
+
+ mvm->rx_ba_sessions++;
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return 0;
+
+ if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
+ IWL_ADD_STA_BAID_SHIFT);
+ baid_data->baid = baid;
+ baid_data->timeout = timeout;
+ baid_data->last_rx = jiffies;
+ init_timer(&baid_data->session_timer);
+ baid_data->session_timer.function =
+ iwl_mvm_rx_agg_session_expired;
+ baid_data->session_timer.data =
+ (unsigned long)&mvm->baid_map[baid];
+ baid_data->mvm = mvm;
+ baid_data->tid = tid;
+ baid_data->sta_id = mvm_sta->sta_id;
+
+ mvm_sta->tid_to_baid[tid] = baid;
+ if (timeout)
+ mod_timer(&baid_data->session_timer,
+ TU_TO_EXP_TIME(timeout * 2));
+
+ iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
+ baid_data, ssn, buf_size);
+ /*
+ * protect the BA data with RCU to cover a case where our
+ * internal RX sync mechanism will timeout (not that it's
+ * supposed to happen) and we will free the session data while
+ * RX is being processed in parallel
+ */
+ WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
+ rcu_assign_pointer(mvm->baid_map[baid], baid_data);
+ } else if (mvm->rx_ba_sessions > 0) {
+ u8 baid = mvm_sta->tid_to_baid[tid];
+
+ /* check that restart flow didn't zero the counter */
+ mvm->rx_ba_sessions--;
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return 0;
+
+ if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+ return -EINVAL;
+
+ baid_data = rcu_access_pointer(mvm->baid_map[baid]);
+ if (WARN_ON(!baid_data))
+ return -EINVAL;
+
+ /* synchronize all rx queues so we can safely delete */
+ iwl_mvm_free_reorder(mvm, baid_data);
+ del_timer_sync(&baid_data->session_timer);
+ RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
+ kfree_rcu(baid_data, rcu_head);
}
+ return 0;
+out_free:
+ kfree(baid_data);
return ret;
}
@@ -925,7 +1416,9 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvm_sta->tfd_queue_msk |= BIT(queue);
mvm_sta->tid_disable_agg &= ~BIT(tid);
} else {
- mvm_sta->tfd_queue_msk &= ~BIT(queue);
+ /* In DQA-mode the queue isn't removed on agg termination */
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ mvm_sta->tfd_queue_msk &= ~BIT(queue);
mvm_sta->tid_disable_agg |= BIT(tid);
}
@@ -1008,17 +1501,35 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
spin_lock_bh(&mvm->queue_info_lock);
- txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
- mvm->last_agg_queue);
- if (txq_id < 0) {
- ret = txq_id;
- spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "Failed to allocate agg queue\n");
- goto release_locks;
+ /*
+ * Note the possible cases:
+ * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
+ * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
+ * one and mark it as reserved
+ * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
+ * non-DQA mode, since the TXQ hasn't yet been allocated
+ */
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ if (!iwl_mvm_is_dqa_supported(mvm) ||
+ mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
+ txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
+ mvm->last_agg_queue);
+ if (txq_id < 0) {
+ ret = txq_id;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "Failed to allocate agg queue\n");
+ goto release_locks;
+ }
+
+ /* TXQ hasn't yet been enabled, so mark it only as reserved */
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
}
- mvm->queue_info[txq_id].setup_reserved = true;
spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "AGG for tid %d will be on queue #%d\n",
+ tid, txq_id);
+
tid_data = &mvmsta->tid_data[tid];
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->txq_id = txq_id;
@@ -1053,6 +1564,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
int queue, ret;
+ bool alloc_queue = true;
u16 ssn;
struct iwl_trans_txq_scd_cfg cfg = {
@@ -1078,8 +1590,46 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
- iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]],
- ssn, &cfg, wdg_timeout);
+ /* In DQA mode, the existing queue might need to be reconfigured */
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ spin_lock_bh(&mvm->queue_info_lock);
+ /* Maybe there is no need to even alloc a queue... */
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
+ alloc_queue = false;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /*
+ * Only reconfig the SCD for the queue if the window size has
+ * changed from current (become smaller)
+ */
+ if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
+ /*
+ * If reconfiguring an existing queue, it first must be
+ * drained
+ */
+ ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
+ BIT(queue));
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error draining queue before reconfig\n");
+ return ret;
+ }
+
+ ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
+ mvmsta->sta_id, tid,
+ buf_size, ssn);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error reconfiguring TXQ #%d\n", queue);
+ return ret;
+ }
+ }
+ }
+
+ if (alloc_queue)
+ iwl_mvm_enable_txq(mvm, queue,
+ vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
+ &cfg, wdg_timeout);
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -1087,7 +1637,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].setup_reserved = false;
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
/*
@@ -1134,9 +1684,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
- /* No need to mark as reserved anymore */
spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[txq_id].setup_reserved = false;
+ /*
+ * The TXQ is marked as reserved only if no traffic came through yet
+ * This means no traffic has been sent on this TID (agg'd or not), so
+ * we no longer have use for the queue. Since it hasn't even been
+ * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+ * free.
+ */
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
switch (tid_data->state) {
@@ -1162,9 +1719,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
- iwl_mvm_disable_txq(mvm, txq_id,
- vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
- 0);
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
+
+ iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
+ }
return 0;
case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1215,9 +1774,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
spin_unlock_bh(&mvmsta->lock);
- /* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[txq_id].setup_reserved = false;
+ /*
+ * The TXQ is marked as reserved only if no traffic came through yet
+ * This means no traffic has been sent on this TID (agg'd or not), so
+ * we no longer have use for the queue. Since it hasn't even been
+ * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+ * free.
+ */
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
if (old_state >= IWL_AGG_ON) {
@@ -1230,9 +1796,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
- iwl_mvm_disable_txq(mvm, tid_data->txq_id,
- vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
- 0);
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
+
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
+ tid, 0);
+ }
}
return 0;
@@ -1285,6 +1854,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
+
/*
* It is possible that the 'sta' parameter is NULL,
* for example when a GTK is removed - the sta_id will then
@@ -1391,6 +1961,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
struct ieee80211_key_seq seq;
const u8 *pn;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+ break;
+ default:
+ return -EINVAL;
+ }
+
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1a8f69a41..d2c58f134 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -80,6 +80,60 @@ struct iwl_mvm;
struct iwl_mvm_vif;
/**
+ * DOC: DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * driver to allow dynamic allocation of queues on-demand, rather than allocate
+ * them statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ * TXQ #0 - command queue
+ * TXQ #1 - aux frames
+ * TXQ #2 - P2P device frames
+ * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ * TXQ #4 - BSS DATA frames queue
+ * TXQ #5-8 - Non-QoS and MGMT frames queue pool
+ * TXQ #9 - P2P GO/SoftAP probe responses
+ * TXQ #10-31 - DATA frames queue pool
+ * The queues are dynamically taken from either the MGMT frames queue pool or
+ * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
+ * queue.
+ *
+ * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
+ * until a queue is allocated for it, and only then can be TXed. Therefore, it
+ * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
+ * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
+ *
+ * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
+ * queues in the pool. If there is no longer a free MGMT queue to allocate, a
+ * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
+ * a problem for aggregations, they too will use a MGMT queue.
+ *
+ * When adding a STA, a DATA queue is reserved for it so that it can TX from
+ * it. If no such free queue exists for reserving, the STA addition will fail.
+ *
+ * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
+ * new RA/TID comes in for an existing STA, one of the STA's queues will become
+ * shared and will serve more than the single TID (but always for the same RA!).
+ *
+ * When a RA/TID needs to become aggregated, no new queue is required to be
+ * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
+ * however, that a shared queue cannot be aggregated, and only after the other
+ * TIDs become inactive and are removed - only then can the queue be
+ * reconfigured and become aggregated.
+ *
+ * When removing a station, its queues are returned to the pool for reuse. Here
+ * we also need to make sure that we are synced with the worker thread that TXes
+ * the deferred frames so we don't get into a situation where the queues are
+ * removed and then the worker puts deferred frames onto the released queues or
+ * tries to allocate new queues for a STA we don't need anymore.
+ */
+
+/**
* DOC: station table - introduction
*
* The station table is a list of data structure that reprensent the stations.
@@ -253,6 +307,7 @@ enum iwl_mvm_agg_state {
/**
* struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
* @seq_number: the next WiFi sequence number to use
* @next_reclaimed: the WiFi sequence number of the next packet to be acked.
* This is basically (last acked packet++).
@@ -260,7 +315,7 @@ enum iwl_mvm_agg_state {
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
* @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
* @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session
+ * @txq_id: Tx queue used by the BA session / DQA
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
* the first packet to be sent in legacy HW queue in Tx AGG stop flow.
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -268,6 +323,7 @@ enum iwl_mvm_agg_state {
* @tx_time: medium time consumed by this A-MPDU
*/
struct iwl_mvm_tid_data {
+ struct sk_buff_head deferred_tx_frames;
u16 seq_number;
u16 next_reclaimed;
/* The rest is Tx AGG related */
@@ -292,6 +348,15 @@ struct iwl_mvm_key_pn {
} ____cacheline_aligned_in_smp q[];
};
+struct iwl_mvm_delba_data {
+ u32 baid;
+} __packed;
+
+struct iwl_mvm_delba_notif {
+ struct iwl_mvm_internal_rxq_notif metadata;
+ struct iwl_mvm_delba_data delba;
+} __packed;
+
/**
* struct iwl_mvm_rxq_dup_data - per station per rx queue data
* @last_seq: last sequence per tid for duplicate packet detection
@@ -316,7 +381,11 @@ struct iwl_mvm_rxq_dup_data {
* we need to signal the EOSP
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
- * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
+ * @tid_to_baid: a simple map of TID to baid
+ * @reserved_queue: the queue reserved for this STA for DQA purposes
+ * Every STA has is given one reserved queue to allow it to operate. If no
+ * such queue can be guaranteed, the STA addition will fail.
* @tx_protection: reference counter for controlling the Tx protection.
* @tt_tx_protection: is thermal throttling enable Tx protection?
* @disable_tx: is tx to this STA disabled?
@@ -329,6 +398,7 @@ struct iwl_mvm_rxq_dup_data {
* the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
+ * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -345,12 +415,17 @@ struct iwl_mvm_sta {
bool bt_reduced_txpower;
bool next_status_eosp;
spinlock_t lock;
- struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
+ struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
+ u8 tid_to_baid[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
+ u16 deferred_traffic_tid_map;
+
+ u8 reserved_queue;
+
/* Temporary, until the new TLC will control the Tx protection */
s8 tx_protection;
bool tt_tx_protection;
@@ -378,8 +453,18 @@ struct iwl_mvm_int_sta {
u32 tfd_queue_msk;
};
+/**
+ * Send the STA info to the FW.
+ *
+ * @mvm: the iwl_mvm* to use
+ * @sta: the STA
+ * @update: this is true if the FW is being updated about a STA it already knows
+ * about. Otherwise (if this is a new STA), this should be false.
+ * @flags: if update==true, this marks what is being changed via ORs of values
+ * from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ */
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- bool update);
+ bool update, unsigned int flags);
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -413,7 +498,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size);
+ int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -459,5 +544,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 18711c5de..9f160fc58 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -444,7 +444,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
}
if (chandef) {
- cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+ cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5);
cmd.ci.channel = chandef->chan->hw_value;
cmd.ci.width = iwl_mvm_get_channel_width(chandef);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f1f28255a..58fc7b3c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
return;
- /*
- * We are now handling a temperature notification from the firmware
- * in ASYNC and hold the mutex. thermal_notify_framework will call
- * us back through get_temp() which ought to send a SYNC command to
- * the firmware and hence to take the mutex.
- * Avoid the deadlock by unlocking the mutex here.
- */
if (mvm->tz_device.tzone) {
struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
- mutex_unlock(&mvm->mutex);
thermal_notify_framework(tz_dev->tzone,
tz_dev->fw_trips_index[ths_crossed]);
- mutex_lock(&mvm->mutex);
}
#endif /* CONFIG_THERMAL */
}
@@ -368,16 +359,14 @@ static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
{
- struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int i, err;
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
+ if (!mvmsta)
continue;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
if (enable == mvmsta->tt_tx_protection)
continue;
err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
@@ -796,9 +785,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
{
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
- if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
- return -EBUSY;
-
*state = mvm->cooling_dev.cur_state;
return 0;
@@ -813,9 +799,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
return -EIO;
- if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
- return -EBUSY;
-
mutex_lock(&mvm->mutex);
if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 34731e29c..779bafcbc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -67,6 +67,7 @@
#include <linux/etherdevice.h>
#include <linux/tcp.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include "iwl-trans.h"
#include "iwl-eeprom-parse.h"
@@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
addr, tid, ssn);
}
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
+static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd)
+{
+#if IS_ENABLED(CONFIG_INET)
+ u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+ u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
+ u8 protocol = 0;
+
+ /*
+ * Do not compute checksum if already computed or if transport will
+ * compute it
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
+ return;
+
+ /* We do not expect to be requested to csum stuff we do not support */
+ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)),
+ "No support for requested checksum\n")) {
+ skb_checksum_help(skb);
+ return;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ protocol = ip_hdr(skb)->protocol;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ipv6h =
+ (struct ipv6hdr *)skb_network_header(skb);
+ unsigned int off = sizeof(*ipv6h);
+
+ protocol = ipv6h->nexthdr;
+ while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+ /* only supported extension headers */
+ if (protocol != NEXTHDR_ROUTING &&
+ protocol != NEXTHDR_HOP &&
+ protocol != NEXTHDR_DEST &&
+ protocol != NEXTHDR_FRAGMENT) {
+ skb_checksum_help(skb);
+ return;
+ }
+
+ if (protocol == NEXTHDR_FRAGMENT) {
+ struct frag_hdr *hp =
+ OPT_HDR(struct frag_hdr, skb, off);
+
+ protocol = hp->nexthdr;
+ off += sizeof(struct frag_hdr);
+ } else {
+ struct ipv6_opt_hdr *hp =
+ OPT_HDR(struct ipv6_opt_hdr, skb, off);
+
+ protocol = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ }
+ }
+ /* if we get here - protocol now should be TCP/UDP */
+#endif
+ }
+
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+ WARN_ON_ONCE(1);
+ skb_checksum_help(skb);
+ return;
+ }
+
+ /* enable L4 csum */
+ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+ /*
+ * Set offset to IP header (snap).
+ * We don't support tunneling so no need to take care of inner header.
+ * Size is in words.
+ */
+ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+ /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+ if (skb->protocol == htons(ETH_P_IP) &&
+ (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+ ip_hdr(skb)->check = 0;
+ offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+ }
+
+ /* reset UDP/TCP header csum */
+ if (protocol == IPPROTO_TCP)
+ tcp_hdr(skb)->check = 0;
+ else
+ udp_hdr(skb)->check = 0;
+
+ /* mac header len should include IV, size is in words */
+ if (info->control.hw_key)
+ mh_len += info->control.hw_key->iv_len;
+ mh_len /= 2;
+ offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+ tx_cmd->offload_assist = cpu_to_le16(offload_assist);
+#endif
+}
+
/*
* Sets most of the Tx cmd's fields
*/
@@ -127,6 +233,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+ tx_cmd->offload_assist |=
+ cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
@@ -187,9 +296,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Total # bytes to be transmitted */
tx_cmd->len = cpu_to_le16((u16)skb->len +
(uintptr_t)skb_info->driver_data[0]);
- tx_cmd->next_frame_len = 0;
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
+
+ /* padding is inserted later in transport */
+ if (ieee80211_hdrlen(fc) % 4 &&
+ !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
+ tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
+
+ iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
}
/*
@@ -245,7 +360,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
&mvm->nvm_data->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* For 2.4 GHZ band, check that there is no need to remap */
@@ -258,7 +373,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->mgmt_last_antenna_idx);
- if (info->band == IEEE80211_BAND_2GHZ &&
+ if (info->band == NL80211_BAND_2GHZ &&
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
else
@@ -360,6 +475,21 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
return dev_cmd;
}
+static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info, __le16 fc)
+{
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (info->control.vif->type == NL80211_IFTYPE_AP &&
+ ieee80211_is_probe_resp(fc))
+ return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ else if (ieee80211_is_mgmt(fc) &&
+ info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ }
+
+ return info->hw_queue;
+}
+
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -369,6 +499,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int queue;
memcpy(&info, skb->cb, sizeof(info));
@@ -393,6 +524,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.control.vif->type == NL80211_IFTYPE_STATION)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+ queue = info.hw_queue;
+
/*
* If the interface on which the frame is sent is the P2P_DEVICE
* or an AP/GO interface use the broadcast station associated
@@ -408,10 +541,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
iwl_mvm_vif_from_mac80211(info.control.vif);
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info.control.vif->type == NL80211_IFTYPE_AP)
+ info.control.vif->type == NL80211_IFTYPE_AP) {
sta_id = mvmvif->bcast_sta.sta_id;
- else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- is_multicast_ether_addr(hdr->addr1)) {
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
+ hdr->frame_control);
+ } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_STATION_COUNT)
@@ -419,7 +554,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
}
}
- IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue);
+ IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
if (!dev_cmd)
@@ -430,7 +565,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
- if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) {
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1;
}
@@ -463,6 +598,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
u16 amsdu_add, snap_ip_tcp, pad, i = 0;
unsigned int dbg_max_amsdu_len;
+ netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 *qc, tid, txf;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
@@ -473,15 +609,30 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
+ dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
+
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
- !mvmsta->tlc_amsdu) {
+ (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) {
num_subframes = 1;
pad = 0;
goto segment;
}
/*
+ * Do not build AMSDU for IPv6 with extension headers.
+ * ask stack to segment and checkum the generated MPDUs for us.
+ */
+ if (skb->protocol == htons(ETH_P_IPV6) &&
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+ IPPROTO_TCP) {
+ num_subframes = 1;
+ pad = 0;
+ netdev_features &= ~NETIF_F_CSUM_MASK;
+ goto segment;
+ }
+
+ /*
* No need to lock amsdu_in_ampdu_allowed since it can't be modified
* during an BA session.
*/
@@ -493,7 +644,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
max_amsdu_len = sta->max_amsdu_len;
- dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
/* the Tx FIFO to which this A-MSDU will be routed */
txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
@@ -507,7 +657,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
- if (dbg_max_amsdu_len)
+ if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
dbg_max_amsdu_len);
@@ -575,7 +725,7 @@ segment:
skb_shinfo(skb)->gso_size = num_subframes * mss;
memcpy(cb, skb->cb, sizeof(cb));
- next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
+ next = skb_gso_segment(skb, netdev_features);
skb_shinfo(skb)->gso_size = mss;
if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
@@ -641,6 +791,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
#endif
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta, u8 tid,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u8 mac_queue = info->hw_queue;
+ struct sk_buff_head *deferred_tx_frames;
+
+ lockdep_assert_held(&mvm_sta->lock);
+
+ mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+ set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+ deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+ skb_queue_tail(deferred_tx_frames, skb);
+
+ /*
+ * The first deferred frame should've stopped the MAC queues, so we
+ * should never get a second deferred frame for the RA/TID.
+ */
+ if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
+ "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
+ skb_queue_len(deferred_tx_frames))) {
+ iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+ schedule_work(&mvm->add_stream_wk);
+ }
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@@ -656,7 +835,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
u8 txq_id = info->hw_queue;
- bool is_data_qos = false, is_ampdu = false;
+ bool is_ampdu = false;
int hdrlen;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -697,8 +876,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
- is_data_qos = true;
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ (ieee80211_is_qos_nullfunc(fc) ||
+ ieee80211_is_nullfunc(fc))) {
+ /*
+ * nullfunc frames should go to the MGMT queue regardless of QOS
+ */
+ tid = IWL_MAX_TID_COUNT;
+ txq_id = mvmsta->tid_data[tid].txq_id;
}
/* Copy MAC header from skb into command buffer */
@@ -719,18 +905,36 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->tid_data[tid].txq_id;
}
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (unlikely(mvmsta->tid_data[tid].txq_id ==
+ IEEE80211_INVAL_HW_QUEUE)) {
+ iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+ /*
+ * The frame is now deferred, and the worker scheduled
+ * will re-allocate it, so we can free it for now.
+ */
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+ return 0;
+ }
+
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ }
+
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
- if (is_data_qos && !ieee80211_has_morefrags(fc))
+ if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
spin_unlock(&mvmsta->lock);
- if (txq_id < mvm->first_agg_queue)
+ /* Increase pending frames count if this isn't AMPDU */
+ if (!is_ampdu)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -883,7 +1087,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
#endif /* CONFIG_IWLWIFI_DEBUG */
void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_tx_rate *r)
{
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
@@ -978,6 +1182,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
u8 skb_freed = 0;
u16 next_reclaimed, seq_ctl;
bool is_ndp = false;
+ bool txq_agg = false; /* Is this TXQ aggregated */
__skb_queue_head_init(&skbs);
@@ -1108,6 +1313,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock);
+ txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON);
+
if (!is_ndp) {
tid_data->next_reclaimed = next_reclaimed;
IWL_DEBUG_TX_REPLY(mvm,
@@ -1163,11 +1370,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id >= mvm->first_agg_queue)
+ if (txq_agg)
goto out;
/* We can't free more than one frame at once on a shared queue */
- WARN_ON(skb_freed > 1);
+ WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
/* If we have still frames for this STA nothing to do here */
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
@@ -1261,9 +1468,12 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ int queue = SEQ_TO_QUEUE(sequence);
- if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
+ if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
+ (!iwl_mvm_is_dqa_supported(mvm) ||
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -1273,10 +1483,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
rcu_read_lock();
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
- if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (!WARN_ON_ONCE(!mvmsta)) {
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].tx_time =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 53cdc5760..161b99efd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -90,11 +90,17 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
* the mutex, this ensures we don't try to send two
* (or more) synchronous commands at a time.
*/
- if (!(cmd->flags & CMD_ASYNC))
+ if (!(cmd->flags & CMD_ASYNC)) {
lockdep_assert_held(&mvm->mutex);
+ if (!(cmd->flags & CMD_SEND_IN_IDLE))
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
+ }
ret = iwl_trans_send_cmd(mvm->trans, cmd);
+ if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
+
/*
* If the caller wants the SKB, then don't hide any problems, the
* caller might access the response buffer which will be NULL if
@@ -217,14 +223,14 @@ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
};
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
int idx;
int band_offset = 0;
/* Legacy rate format, search for match in table */
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (fw_rate_idx_to_plcp[idx] == rate)
@@ -491,98 +497,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
-static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
-{
- struct iwl_trans *trans = mvm->trans;
- struct iwl_error_event_table_v1 table;
- u32 base;
-
- base = mvm->error_event_table;
- if (mvm->cur_ucode == IWL_UCODE_INIT) {
- if (!base)
- base = mvm->fw->init_errlog_ptr;
- } else {
- if (!base)
- base = mvm->fw->inst_errlog_ptr;
- }
-
- if (base < 0x800000) {
- IWL_ERR(mvm,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base,
- (mvm->cur_ucode == IWL_UCODE_INIT)
- ? "Init" : "RT");
- return;
- }
-
- iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
- if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
- IWL_ERR(trans, "Start IWL Error Log Dump:\n");
- IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
- mvm->status, table.valid);
- }
-
- /* Do not change this output - scripts rely on it */
-
- IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
-
- trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
- table.data1, table.data2, table.data3,
- table.blink2, table.ilink1, table.ilink2,
- table.bcon_time, table.gp1, table.gp2,
- table.gp3, table.ucode_ver, 0,
- table.hw_ver, table.brd_ver);
- IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
- desc_lookup(table.error_id));
- IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
- IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
- IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
- IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
- IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
- IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
- IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
- IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
- IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
- IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
- IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
- IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
- IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
- IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
- IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
- IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
- IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
- IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
- IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
- IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
- IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
- IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
- IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
- IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
- IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
- IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
- IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
- IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
- IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
- IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
- IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
- IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
-
- if (mvm->support_umac_log)
- iwl_mvm_dump_umac_error_log(mvm);
-}
-
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
u32 base;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
- iwl_mvm_dump_nic_error_log_old(mvm);
- return;
- }
-
base = mvm->error_event_table;
if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base)
@@ -667,12 +587,45 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
- !mvm->queue_info[i].setup_reserved)
+ mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i;
return -ENOSPC;
}
+int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+ int tid, int frame_limit, u16 ssn)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .enable = 1,
+ .window = frame_limit,
+ .sta_id = sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = fifo,
+ .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+ .tid = tid,
+ };
+ int ret;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
+ "Trying to reconfig unallocated queue %d\n", queue)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return -ENXIO;
+ }
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+ queue, fifo, ret);
+
+ return ret;
+}
+
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
@@ -694,6 +647,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_refcount++;
if (mvm->queue_info[queue].hw_queue_refcount > 1)
enable_queue = false;
+ else
+ mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
IWL_DEBUG_TX_QUEUES(mvm,
@@ -766,6 +721,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_refcount--;
cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
+ if (!cmd.enable)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
@@ -779,6 +736,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
return;
}
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount ||
mvm->queue_info[queue].tid_bitmap ||
@@ -1079,3 +1038,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
out:
ieee80211_connection_loss(vif);
}
+
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+ enum iwl_lqm_cmd_operatrions operation,
+ u32 duration, u32 timeout)
+{
+ struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_link_qual_msrmnt_cmd cmd = {
+ .cmd_operation = cpu_to_le32(operation),
+ .mac_id = cpu_to_le32(mvm_vif->id),
+ .measurement_time = cpu_to_le32(duration),
+ .timeout = cpu_to_le32(timeout),
+ };
+ u32 cmdid =
+ iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
+ int ret;
+
+ if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
+ return -EOPNOTSUPP;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return -EINVAL;
+
+ switch (operation) {
+ case LQM_CMD_OPERATION_START_MEASUREMENT:
+ if (iwl_mvm_lqm_active(mvm_vif->mvm))
+ return -EBUSY;
+ if (!vif->bss_conf.assoc)
+ return -EINVAL;
+ mvm_vif->lqm_active = true;
+ break;
+ case LQM_CMD_OPERATION_STOP_MEASUREMENT:
+ if (!iwl_mvm_lqm_active(mvm_vif->mvm))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
+ &cmd);
+
+ /* command failed - roll back lqm_active state */
+ if (ret) {
+ mvm_vif->lqm_active =
+ operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+ bool *lqm_active = _data;
+
+ *lqm_active = *lqm_active || mvm_vif->lqm_active;
+}
+
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&mvm->mutex);
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_lqm_active_iterator, &ret);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 79d7cd7d4..a588b05e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -493,17 +493,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -593,6 +596,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+ const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int ret;
@@ -620,6 +624,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg = cfg_7265d;
iwl_trans->cfg = cfg_7265d;
}
+
+ if (iwl_trans->cfg->rf_id) {
+ if (cfg == &iwl9260_2ac_cfg)
+ cfg_9260lc = &iwl9260lc_2ac_cfg;
+ if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
+ cfg = cfg_9260lc;
+ iwl_trans->cfg = cfg_9260lc;
+ }
+ }
#endif
pci_set_drvdata(pdev, iwl_trans);
@@ -661,10 +674,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* The PCI device starts with a reference taken and we are
* supposed to release it here. But to simplify the
* interaction with the opmode, we don't do it now, but let
- * the opmode release it when it's ready. To account for this
- * reference, we start with ref_count set to 1.
+ * the opmode release it when it's ready.
*/
- trans_pcie->ref_count = 1;
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index dadafbdef..de6974f9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -348,7 +348,7 @@ struct iwl_tso_hdr_page {
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
- struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
+ struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
struct iwl_trans *trans;
struct iwl_drv *drv;
@@ -403,10 +403,6 @@ struct iwl_trans_pcie {
bool cmd_hold_nic_awake;
bool ref_cmd_in_flight;
- /* protect ref counter */
- spinlock_t ref_lock;
- u32 ref_count;
-
dma_addr_t fw_mon_phys;
struct page *fw_mon_page;
u32 fw_mon_size;
@@ -485,9 +481,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-void iwl_trans_pcie_ref(struct iwl_trans *trans);
-void iwl_trans_pcie_unref(struct iwl_trans *trans);
-
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 4be3c35af..0a4a3c502 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -161,10 +161,11 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
return cpu_to_le32((u32)(dma_addr >> 8));
}
-static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
+static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
+ u64 val)
{
- iwl_write_prph(trans, ofs, val & 0xffffffff);
- iwl_write_prph(trans, ofs + 4, val >> 32);
+ iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
+ iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
}
/*
@@ -208,10 +209,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->write_actual = round_down(rxq->write, 8);
if (trans->cfg->mq_rx_supported)
- iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
- rxq->write_actual);
- else
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
+ rxq->write_actual);
+ /*
+ * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
+ * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
+ * not wake the NIC.
+ */
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -694,6 +699,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
+ unsigned long flags;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
switch (trans_pcie->rx_buf_size) {
@@ -711,23 +717,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
}
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return;
+
/* Stop Rx DMA */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* reset and flush pointers */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
/* Reset driver's Rx queue write index */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
+ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
+ iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
@@ -737,13 +746,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
* RB timeout 0x10
* 256 RBDs
*/
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- rb_size|
- (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ rb_size |
+ (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -757,6 +768,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size, enabled = 0;
+ unsigned long flags;
int i;
switch (trans_pcie->rx_buf_size) {
@@ -774,25 +786,31 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
}
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return;
+
/* Stop Rx DMA */
- iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
/* disable free amd used rx queue operation */
- iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
+ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
for (i = 0; i < trans->num_rx_queues; i++) {
/* Tell device where to find RBD free table in DRAM */
- iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
- (u64)(trans_pcie->rxq[i].bd_dma));
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_FRBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].bd_dma);
/* Tell device where to find RBD used table in DRAM */
- iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
- (u64)(trans_pcie->rxq[i].used_bd_dma));
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_URBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].used_bd_dma);
/* Tell device where in DRAM to update its Rx status */
- iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
- trans_pcie->rxq[i].rb_stts_dma);
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_URBD_STTS_WPTR_LSB(i),
+ trans_pcie->rxq[i].rb_stts_dma);
/* Reset device indice tables */
- iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
- iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
- iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
enabled |= BIT(i) | BIT(i + 16);
}
@@ -808,23 +826,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
* Drop frames that exceed RB size
* 512 RBDs
*/
- iwl_write_prph(trans, RFH_RXF_DMA_CFG,
- RFH_DMA_EN_ENABLE_VAL |
- rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
- RFH_RXF_DMA_MIN_RB_4_8 |
- RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
- RFH_RXF_DMA_RBDCB_SIZE_512);
+ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
+ RFH_DMA_EN_ENABLE_VAL |
+ rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
+ RFH_RXF_DMA_MIN_RB_4_8 |
+ RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
+ RFH_RXF_DMA_RBDCB_SIZE_512);
/*
* Activate DMA snooping.
* Set RX DMA chunk size to 64B
* Default queue is 0
*/
- iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
- (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
- RFH_GEN_CFG_SERVICE_DMA_SNOOP);
+ iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
+ (DEFAULT_RXQ_NUM <<
+ RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
+ RFH_GEN_CFG_SERVICE_DMA_SNOOP);
/* Enable the relevant rx queues */
- iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+
+ iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -908,6 +929,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
+ BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
+ ARRAY_SIZE(trans_pcie->rx_pool));
for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
@@ -1292,7 +1315,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
- iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}
/*
@@ -1805,19 +1828,19 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
- struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta_fh, inta_hw;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
spin_lock(&trans_pcie->irq_lock);
- inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
- inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
+ inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
/*
* Clear causes registers to avoid being handling the same cause.
*/
- iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
- iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+ iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+ iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
spin_unlock(&trans_pcie->irq_lock);
if (unlikely(!(inta_fh | inta_hw))) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index b2b79354d..f603d7830 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -269,9 +269,8 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
- if (trans->cfg->base_params->pll_cfg_val)
- iwl_set_bit(trans, CSR_ANA_PLL_CFG,
- trans->cfg->base_params->pll_cfg_val);
+ if (trans->cfg->base_params->pll_cfg)
+ iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
/*
* Set "initialization complete" bit to move adapter from
@@ -361,8 +360,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
+ usleep_range(1000, 2000);
/*
* Set "initialization complete" bit to move adapter from
@@ -408,8 +406,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* SHRD_HW_RST). Turn MAC off before proceeding.
*/
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
+ usleep_range(1000, 2000);
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@@ -506,8 +503,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
+ usleep_range(1000, 2000);
/*
* Clear "initialization complete" bit to move adapter from
@@ -586,7 +582,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
- msleep(1);
+ usleep_range(1000, 2000);
for (iter = 0; iter < 10; iter++) {
/* If HW is not ready, prepare the conditions to check again */
@@ -1074,7 +1070,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- udelay(20);
+ usleep_range(1000, 2000);
/*
* Upon stop, the APM issues an interrupt if HW RF kill is set.
@@ -1321,6 +1317,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
* after this call.
*/
iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
@@ -1434,7 +1431,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
int ret, i;
if (trans->cfg->mq_rx_supported) {
- max_vector = min_t(u32, (num_possible_cpus() + 1),
+ max_vector = min_t(u32, (num_possible_cpus() + 2),
IWL_MAX_RX_HW_QUEUES);
for (i = 0; i < max_vector; i++)
trans_pcie->msix_entries[i].entry = i;
@@ -1465,7 +1462,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
ret = pci_enable_msi(pdev);
if (ret) {
- dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
+ dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
/* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -1499,8 +1496,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
IWL_ERR(trans_pcie->trans,
"Error allocating IRQ %d\n", i);
for (j = 0; j < i; j++)
- free_irq(trans_pcie->msix_entries[i].vector,
- &trans_pcie->msix_entries[i]);
+ free_irq(trans_pcie->msix_entries[j].vector,
+ &trans_pcie->msix_entries[j]);
pci_disable_msix(pdev);
return ret;
}
@@ -1525,8 +1522,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
/* Reset the entire device */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- usleep_range(10, 15);
+ usleep_range(1000, 2000);
iwl_pcie_apm_init(trans);
@@ -1694,6 +1690,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
}
free_percpu(trans_pcie->tso_hdr_page);
+ mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);
}
@@ -1948,7 +1945,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
"WR pointer moved while flushing %d -> %d\n",
wr_ptr, write_ptr))
return -ETIMEDOUT;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (q->read_ptr != q->write_ptr) {
@@ -2011,41 +2008,35 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
-void iwl_trans_pcie_ref(struct iwl_trans *trans)
+static void iwl_trans_pcie_ref(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
if (iwlwifi_mod_params.d0i3_disable)
return;
- spin_lock_irqsave(&trans_pcie->ref_lock, flags);
- IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
- trans_pcie->ref_count++;
pm_runtime_get(&trans_pcie->pci_dev->dev);
- spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+
+#ifdef CONFIG_PM
+ IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+ atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
}
-void iwl_trans_pcie_unref(struct iwl_trans *trans)
+static void iwl_trans_pcie_unref(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
if (iwlwifi_mod_params.d0i3_disable)
return;
- spin_lock_irqsave(&trans_pcie->ref_lock, flags);
- IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
- if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
- spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
- return;
- }
- trans_pcie->ref_count--;
-
pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
- spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+#ifdef CONFIG_PM
+ IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+ atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
}
static const char *get_csr_string(int cmd)
@@ -2793,7 +2784,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
- spin_lock_init(&trans_pcie->ref_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
@@ -2912,6 +2902,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
+ trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
+
iwl_pcie_set_interrupt_capa(pdev, trans);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 16ad820ca..d6beac9af 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -32,6 +32,7 @@
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/pm_runtime.h>
#include <net/ip6_checksum.h>
#include <net/tso.h>
@@ -596,6 +597,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb)
}
}
+static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ if (trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = false;
+ IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
+ iwl_trans_unref(trans);
+ }
+
+ if (!trans->cfg->base_params->apmg_wake_up_wa)
+ return;
+ if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+ return;
+
+ trans_pcie->cmd_hold_nic_awake = false;
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
/*
* iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
*/
@@ -620,6 +643,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
}
iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+
+ if (q->read_ptr == q->write_ptr) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ if (txq_id != trans_pcie->cmd_queue) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+ q->id);
+ iwl_trans_unref(trans);
+ } else {
+ iwl_pcie_clear_cmd_in_flight(trans);
+ }
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
}
txq->active = false;
@@ -1098,7 +1135,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (q->read_ptr == q->write_ptr) {
IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
- iwl_trans_pcie_unref(trans);
+ iwl_trans_unref(trans);
}
out:
@@ -1117,7 +1154,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
!trans_pcie->ref_cmd_in_flight) {
trans_pcie->ref_cmd_in_flight = true;
IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
- iwl_trans_pcie_ref(trans);
+ iwl_trans_ref(trans);
}
/*
@@ -1148,29 +1185,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
return 0;
}
-static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- lockdep_assert_held(&trans_pcie->reg_lock);
-
- if (trans_pcie->ref_cmd_in_flight) {
- trans_pcie->ref_cmd_in_flight = false;
- IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
- iwl_trans_pcie_unref(trans);
- }
-
- if (trans->cfg->base_params->apmg_wake_up_wa) {
- if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
- return 0;
-
- trans_pcie->cmd_hold_nic_awake = false;
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- }
- return 0;
-}
-
/*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -1786,6 +1800,16 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
iwl_get_cmd_string(trans, cmd->id));
+ if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
+ ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+ pm_runtime_active(&trans_pcie->pci_dev->dev),
+ msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+ if (!ret) {
+ IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
+ return -ETIMEDOUT;
+ }
+ }
+
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
@@ -2197,6 +2221,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
__le16 fc;
u8 hdr_len;
u16 wifi_seq;
+ bool amsdu;
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
@@ -2288,11 +2313,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
- tb1_len = ALIGN(len, 4);
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (tb1_len != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+ amsdu = ieee80211_is_data_qos(fc) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ if (trans_pcie->sw_csum_tx || !amsdu) {
+ tb1_len = ALIGN(len, 4);
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (tb1_len != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+ } else {
+ tb1_len = len;
+ }
/* The first TB points to the scratchbuf data - min_copy bytes */
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
@@ -2310,8 +2342,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
- if (ieee80211_is_data_qos(fc) &&
- (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) {
+ if (amsdu) {
if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
out_meta, dev_cmd,
tb1_len)))
@@ -2342,7 +2373,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->frozen_expiry_remainder = txq->wd_timeout;
}
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
- iwl_trans_pcie_ref(trans);
+ iwl_trans_ref(trans);
}
/* Tell device the write index *just past* this latest filled TFD */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 515aa3f99..a8a9bd8e1 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -1794,7 +1794,7 @@ static int prism2_transmit(struct net_device *dev, int idx)
netif_wake_queue(dev);
return -1;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* Since we did not wait for command completion, the card continues
* to process on the background and we will finish handling when
diff --git a/drivers/net/wireless/intersil/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c
index 0f6ea316e..7aa47069a 100644
--- a/drivers/net/wireless/intersil/orinoco/cfg.c
+++ b/drivers/net/wireless/intersil/orinoco/cfg.c
@@ -60,14 +60,14 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
if (priv->channel_mask & (1 << i)) {
priv->channels[i].center_freq =
ieee80211_channel_to_frequency(i + 1,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
channels++;
}
}
priv->band.channels = priv->channels;
priv->band.n_channels = channels;
- wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
i = 0;
@@ -175,7 +175,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy,
if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
return -EINVAL;
- if (chandef->chan->band != IEEE80211_BAND_2GHZ)
+ if (chandef->chan->band != NL80211_BAND_2GHZ)
return -EINVAL;
channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
index e27e32851..61af5a28f 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ b/drivers/net/wireless/intersil/orinoco/hw.c
@@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
goto out;
}
- freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
out:
orinoco_unlock(priv, &flags);
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 7b5c55432..7afe2004e 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -1794,7 +1794,7 @@ void orinoco_reset(struct work_struct *work)
printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
dev->name, err);
} else
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
orinoco_unlock_irq(priv);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 977298ad9..892174318 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1275,7 +1275,7 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
goto busy;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
stats->tx_bytes += skb->len;
goto ok;
diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c
index 2c66166ad..d0ceb06c7 100644
--- a/drivers/net/wireless/intersil/orinoco/scan.c
+++ b/drivers/net/wireless/intersil/orinoco/scan.c
@@ -111,7 +111,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
}
freq = ieee80211_channel_to_frequency(
- le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ);
+ le16_to_cpu(bss->a.channel), NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
if (!channel) {
printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -148,7 +148,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
ie_len = len - sizeof(*bss);
ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
chan = ie ? ie[2] : 0;
- freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ);
+ freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
timestamp = le64_to_cpu(bss->timestamp);
diff --git a/drivers/net/wireless/intersil/p54/eeprom.c b/drivers/net/wireless/intersil/p54/eeprom.c
index 2fe713eda..d4c73d393 100644
--- a/drivers/net/wireless/intersil/p54/eeprom.c
+++ b/drivers/net/wireless/intersil/p54/eeprom.c
@@ -76,14 +76,14 @@ struct p54_channel_entry {
u16 data;
int index;
int max_power;
- enum ieee80211_band band;
+ enum nl80211_band band;
};
struct p54_channel_list {
struct p54_channel_entry *channels;
size_t entries;
size_t max_entries;
- size_t band_channel_num[IEEE80211_NUM_BANDS];
+ size_t band_channel_num[NUM_NL80211_BANDS];
};
static int p54_get_band_from_freq(u16 freq)
@@ -91,10 +91,10 @@ static int p54_get_band_from_freq(u16 freq)
/* FIXME: sync these values with the 802.11 spec */
if ((freq >= 2412) && (freq <= 2484))
- return IEEE80211_BAND_2GHZ;
+ return NL80211_BAND_2GHZ;
if ((freq >= 4920) && (freq <= 5825))
- return IEEE80211_BAND_5GHZ;
+ return NL80211_BAND_5GHZ;
return -1;
}
@@ -124,16 +124,16 @@ static int p54_compare_rssichan(const void *_a,
static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
struct ieee80211_supported_band *band_entry,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
/* TODO: generate rate array dynamically */
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
band_entry->bitrates = p54_bgrates;
band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates);
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
band_entry->bitrates = p54_arates;
band_entry->n_bitrates = ARRAY_SIZE(p54_arates);
break;
@@ -147,7 +147,7 @@ static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
static int p54_generate_band(struct ieee80211_hw *dev,
struct p54_channel_list *list,
unsigned int *chan_num,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
struct p54_common *priv = dev->priv;
struct ieee80211_supported_band *tmp, *old;
@@ -206,7 +206,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
if (j == 0) {
wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
- (band == IEEE80211_BAND_2GHZ) ? 2 : 5);
+ (band == NL80211_BAND_2GHZ) ? 2 : 5);
ret = -ENODATA;
goto err_out;
@@ -396,7 +396,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
p54_compare_channels, NULL);
k = 0;
- for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
+ for (i = 0, j = 0; i < NUM_NL80211_BANDS; i++) {
if (p54_generate_band(dev, list, &k, i) == 0)
j++;
}
@@ -573,10 +573,10 @@ static int p54_parse_rssical(struct ieee80211_hw *dev,
for (i = 0; i < entries; i++) {
u16 freq = 0;
switch (i) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
freq = 2437;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
freq = 5240;
break;
}
@@ -902,11 +902,11 @@ good_eeprom:
if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
p54_init_xbow_synth(priv);
if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] =
- priv->band_table[IEEE80211_BAND_2GHZ];
+ dev->wiphy->bands[NL80211_BAND_2GHZ] =
+ priv->band_table[NL80211_BAND_2GHZ];
if (!(synth & PDR_SYNTH_5_GHZ_DISABLED))
- dev->wiphy->bands[IEEE80211_BAND_5GHZ] =
- priv->band_table[IEEE80211_BAND_5GHZ];
+ dev->wiphy->bands[NL80211_BAND_5GHZ] =
+ priv->band_table[NL80211_BAND_5GHZ];
if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED)
priv->rx_diversity_mask = 3;
if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED)
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index 7805864e7..d5a3bf91a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -477,7 +477,7 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
p54_set_edcf(priv);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
- if (dev->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (dev->conf.chandef.chan->band == NL80211_BAND_5GHZ)
priv->basic_rate_mask = (info->basic_rates << 4);
else
priv->basic_rate_mask = info->basic_rates;
@@ -829,7 +829,7 @@ void p54_free_common(struct ieee80211_hw *dev)
struct p54_common *priv = dev->priv;
unsigned int i;
- for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
kfree(priv->band_table[i]);
kfree(priv->iq_autocal);
diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
index 40b401ed6..529939e61 100644
--- a/drivers/net/wireless/intersil/p54/p54.h
+++ b/drivers/net/wireless/intersil/p54/p54.h
@@ -223,7 +223,7 @@ struct p54_common {
struct p54_cal_database *curve_data;
struct p54_cal_database *output_limit;
struct p54_cal_database *rssi_db;
- struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band *band_table[NUM_NL80211_BANDS];
/* BBP/MAC state */
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 24e5ff9a9..1af7da0b3 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -353,7 +353,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
if (hdr->rate & 0x10)
rx_status->flag |= RX_FLAG_SHORTPRE;
- if (priv->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
else
rx_status->rate_idx = rate;
@@ -867,7 +867,7 @@ void p54_tx_80211(struct ieee80211_hw *dev,
for (i = 0; i < nrates && ridx < 8; i++) {
/* we register the rates in perfect order */
rate = info->control.rates[i].idx;
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate += 4;
/* store the count we actually calculated for TX status */
diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c
index 333c1a2f8..6700387ef 100644
--- a/drivers/net/wireless/intersil/prism54/isl_38xx.c
+++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -113,7 +114,7 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
#if VERBOSE > SHOW_ERROR_MESSAGES
u32 counter = 0;
- struct timeval current_time;
+ struct timespec64 current_ts64;
DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n");
#endif
@@ -121,22 +122,22 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
if (asleep) {
/* device is in powersave, trigger the device for wakeup */
#if VERBOSE > SHOW_ERROR_MESSAGES
- do_gettimeofday(&current_time);
- DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n",
- current_time.tv_sec, (long)current_time.tv_usec);
+ ktime_get_real_ts64(&current_ts64);
+ DEBUG(SHOW_TRACING, "%lld.%09ld Device wakeup triggered\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec);
- DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
- current_time.tv_sec, (long)current_time.tv_usec,
+ DEBUG(SHOW_TRACING, "%lld.%09ld Device register read %08x\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
readl(device_base + ISL38XX_CTRL_STAT_REG));
#endif
reg = readl(device_base + ISL38XX_INT_IDENT_REG);
if (reg == 0xabadface) {
#if VERBOSE > SHOW_ERROR_MESSAGES
- do_gettimeofday(&current_time);
+ ktime_get_real_ts64(&current_ts64);
DEBUG(SHOW_TRACING,
- "%08li.%08li Device register abadface\n",
- current_time.tv_sec, (long)current_time.tv_usec);
+ "%lld.%09ld Device register abadface\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec);
#endif
/* read the Device Status Register until Sleepmode bit is set */
while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG),
@@ -149,13 +150,13 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
- "%08li.%08li Device register read %08x\n",
- current_time.tv_sec, (long)current_time.tv_usec,
+ "%lld.%09ld Device register read %08x\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
readl(device_base + ISL38XX_CTRL_STAT_REG));
- do_gettimeofday(&current_time);
+ ktime_get_real_ts64(&current_ts64);
DEBUG(SHOW_TRACING,
- "%08li.%08li Device asleep counter %i\n",
- current_time.tv_sec, (long)current_time.tv_usec,
+ "%lld.%09ld Device asleep counter %i\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
counter);
#endif
}
@@ -168,9 +169,9 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
/* perform another read on the Device Status Register */
reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
- do_gettimeofday(&current_time);
- DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
- current_time.tv_sec, (long)current_time.tv_usec, reg);
+ ktime_get_real_ts64(&current_ts64);
+ DEBUG(SHOW_TRACING, "%lld.%00ld Device register read %08x\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec, reg);
#endif
} else {
/* device is (still) awake */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 06664baa4..4dd5adcdd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -255,14 +255,14 @@ static struct class *hwsim_class;
static struct net_device *hwsim_mon; /* global monitor netdev */
#define CHAN2G(_freq) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_freq), \
.max_power = 20, \
}
#define CHAN5G(_freq) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_freq), \
.max_power = 20, \
@@ -479,7 +479,7 @@ struct mac80211_hwsim_data {
struct list_head list;
struct ieee80211_hw *hw;
struct device *dev;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
@@ -1030,7 +1030,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
data->pending_cookie++;
cookie = data->pending_cookie;
info->rate_driver_data[0] = (void *)cookie;
- if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, cookie))
+ if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
goto nla_put_failure;
genlmsg_end(skb, msg_head);
@@ -1909,6 +1909,7 @@ static void hw_scan_work(struct work_struct *work)
/* send probes */
for (i = 0; i < req->n_ssids; i++) {
struct sk_buff *probe;
+ struct ieee80211_mgmt *mgmt;
probe = ieee80211_probereq_get(hwsim->hw,
hwsim->scan_addr,
@@ -1918,6 +1919,10 @@ static void hw_scan_work(struct work_struct *work)
if (!probe)
continue;
+ mgmt = (struct ieee80211_mgmt *) probe->data;
+ memcpy(mgmt->da, req->bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, req->bssid, ETH_ALEN);
+
if (req->ie_len)
memcpy(skb_put(probe, req->ie_len), req->ie,
req->ie_len);
@@ -2342,7 +2347,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
u8 addr[ETH_ALEN];
struct mac80211_hwsim_data *data;
struct ieee80211_hw *hw;
- enum ieee80211_band band;
+ enum nl80211_band band;
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
int idx;
@@ -2471,16 +2476,16 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sizeof(hwsim_channels_5ghz));
memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = &data->bands[band];
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
sband->channels = data->channels_2ghz;
sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
sband->bitrates = data->rates;
sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
sband->channels = data->channels_5ghz;
sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
sband->bitrates = data->rates + 4;
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 66e1c73bd..39f22467c 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -148,6 +148,7 @@ enum {
HWSIM_ATTR_RADIO_NAME,
HWSIM_ATTR_NO_VIF,
HWSIM_ATTR_FREQ,
+ HWSIM_ATTR_PAD,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 2eea76a34..776b44bfd 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -23,7 +23,7 @@
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -639,7 +639,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
if (chan_no != -1) {
struct wiphy *wiphy = priv->wdev->wiphy;
int freq = ieee80211_channel_to_frequency(chan_no,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
struct ieee80211_channel *channel =
ieee80211_get_channel(wiphy, freq);
@@ -1266,7 +1266,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
{
struct cfg80211_scan_request *creq = NULL;
int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
- enum ieee80211_band band;
+ enum nl80211_band band;
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
@@ -1281,7 +1281,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
/* Scan all available channels */
i = 0;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
int j;
if (!wiphy->bands[band])
@@ -2200,7 +2200,7 @@ int lbs_cfg_register(struct lbs_private *priv)
if (lbs_mesh_activated(priv))
wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT);
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = &lbs_band_2ghz;
/*
* We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 4ddd0e5a6..301170ccc 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -743,7 +743,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv)
struct cmd_ds_802_11d_domain_info cmd;
struct mrvl_ie_domain_param_set *domain = &cmd.domain;
struct ieee80211_country_ie_triplet *t;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_channel *ch;
u8 num_triplet = 0;
u8 num_parsed_chan = 0;
@@ -777,7 +777,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv)
* etc.
*/
for (band = 0;
- (band < IEEE80211_NUM_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
+ (band < NUM_NL80211_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
band++) {
if (!bands[band])
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index a47f0acc0..0bf8916a0 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -570,7 +570,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
stats.flag |= RX_FLAG_FAILED_FCS_CRC;
stats.freq = priv->cur_freq;
- stats.band = IEEE80211_BAND_2GHZ;
+ stats.band = NL80211_BAND_2GHZ;
stats.signal = prxpd->snr;
priv->noise = prxpd->nf;
/* Marvell rate index has a hole at value 4 */
@@ -642,7 +642,7 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->band.bitrates = priv->rates;
priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
priv->band.channels = priv->channels;
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 09578c6cd..a74cc43b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -59,7 +59,10 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb->len);
}
- ret = mwifiex_recv_packet(priv, rx_skb);
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ ret = mwifiex_uap_recv_packet(priv, rx_skb);
+ else
+ ret = mwifiex_recv_packet(priv, rx_skb);
if (ret == -1)
mwifiex_dbg(priv->adapter, ERROR,
"Rx of A-MSDU failed");
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index bb7235e1b..ff948a922 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -474,7 +474,7 @@ int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
u8 no_of_parsed_chan = 0;
u8 first_chan = 0, next_chan = 0, max_pwr = 0;
u8 i, flag = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
@@ -1410,7 +1410,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
- enum ieee80211_band band;
+ enum nl80211_band band;
mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
@@ -1586,7 +1586,7 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
- enum ieee80211_band band;
+ enum nl80211_band band;
struct mwifiex_adapter *adapter = priv->adapter;
if (!priv->media_connected) {
@@ -1600,11 +1600,11 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
memset(bitmap_rates, 0, sizeof(bitmap_rates));
/* Fill HR/DSSS rates. */
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
bitmap_rates[0] = mask->control[band].legacy & 0x000f;
/* Fill OFDM rates */
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4;
else
bitmap_rates[1] = mask->control[band].legacy;
@@ -1771,7 +1771,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
} else {
struct ieee80211_sta_ht_cap *ht_info;
int rx_mcs_supp;
- enum ieee80211_band band;
+ enum nl80211_band band;
if ((tx_ant == 0x1 && rx_ant == 0x1)) {
adapter->user_dev_mcs_support = HT_STREAM_1X1;
@@ -1785,7 +1785,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
MWIFIEX_11AC_MCS_MAP_2X2;
}
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!adapter->wiphy->bands[band])
continue;
@@ -1997,7 +1997,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
struct cfg80211_bss *bss;
int ie_len;
u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
- enum ieee80211_band band;
+ enum nl80211_band band;
if (mwifiex_get_bss_info(priv, &bss_info))
return -1;
@@ -2271,7 +2271,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
int index = 0, i;
u8 config_bands = 0;
- if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (params->chandef.chan->band == NL80211_BAND_2GHZ) {
if (!params->basic_rates) {
config_bands = BAND_B | BAND_G;
} else {
@@ -2859,18 +2859,18 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mwifiex_init_priv_params(priv, dev);
priv->netdev = dev;
- mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
+ mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv);
if (adapter->is_hw_11ac_capable)
mwifiex_setup_vht_caps(
- &wiphy->bands[IEEE80211_BAND_2GHZ]->vht_cap, priv);
+ &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap, priv);
if (adapter->config_bands & BAND_A)
mwifiex_setup_ht_caps(
- &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
+ &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap, priv);
if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable)
mwifiex_setup_vht_caps(
- &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv);
+ &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap, priv);
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = &priv->wdev;
@@ -3272,8 +3272,11 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev)
+ if (priv && priv->netdev) {
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
+ }
}
for (i = 0; i < retry_num; i++) {
@@ -3341,13 +3344,20 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
struct mwifiex_ds_wakeup_reason wakeup_reason;
struct cfg80211_wowlan_wakeup wakeup_report;
int i;
+ bool report_wakeup_reason = true;
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev)
+ if (priv && priv->netdev) {
+ if (!netif_carrier_ok(priv->netdev))
+ netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
+ }
}
+ if (!wiphy->wowlan_config)
+ goto done;
+
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD,
&wakeup_reason);
@@ -3380,19 +3390,20 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
if (wiphy->wowlan_config->n_patterns)
wakeup_report.pattern_idx = 1;
break;
- case CONTROL_FRAME_MATCHED:
- break;
- case MANAGEMENT_FRAME_MATCHED:
+ case GTK_REKEY_FAILURE:
+ if (wiphy->wowlan_config->gtk_rekey_failure)
+ wakeup_report.gtk_rekey_failure = true;
break;
default:
+ report_wakeup_reason = false;
break;
}
- if ((wakeup_reason.hs_wakeup_reason > 0) &&
- (wakeup_reason.hs_wakeup_reason <= 7))
+ if (report_wakeup_reason)
cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report,
GFP_KERNEL);
+done:
if (adapter->nd_info) {
for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
kfree(adapter->nd_info->matches[i]);
@@ -3410,6 +3421,16 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
device_set_wakeup_enable(adapter->dev, enabled);
}
+
+static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
+ HostCmd_ACT_GEN_SET, 0, data, true);
+}
+
#endif
static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
@@ -3801,7 +3822,7 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
struct ieee80211_channel *chan;
u8 second_chan_offset;
enum nl80211_channel_type chan_type;
- enum ieee80211_band band;
+ enum nl80211_band band;
int freq;
int ret = -ENODATA;
@@ -3932,6 +3953,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.suspend = mwifiex_cfg80211_suspend,
.resume = mwifiex_cfg80211_resume,
.set_wakeup = mwifiex_cfg80211_set_wakeup,
+ .set_rekey_data = mwifiex_set_rekey_data,
#endif
.set_coalesce = mwifiex_cfg80211_set_coalesce,
.tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
@@ -3948,7 +3970,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_NET_DETECT,
+ WIPHY_WOWLAN_NET_DETECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
.n_patterns = MWIFIEX_MEF_MAX_FILTERS,
.pattern_min_len = 1,
.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
@@ -4031,11 +4054,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP);
- wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+ wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
if (adapter->config_bands & BAND_A)
- wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+ wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
else
- wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ wiphy->bands[NL80211_BAND_5GHZ] = NULL;
if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
@@ -4086,6 +4109,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->features |= NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_INACTIVITY_TIMER |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_NEED_OBSS_SCAN;
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index 09fae2714..1ff22055e 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -322,9 +322,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
return cfp;
if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
- sband = priv->wdev.wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = priv->wdev.wiphy->bands[NL80211_BAND_2GHZ];
else
- sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = priv->wdev.wiphy->bands[NL80211_BAND_5GHZ];
if (!sband) {
mwifiex_dbg(priv->adapter, ERROR,
@@ -399,15 +399,15 @@ u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
int i;
if (radio_type) {
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
if (WARN_ON_ONCE(!sband))
return 0;
- rate_mask = request->rates[IEEE80211_BAND_5GHZ];
+ rate_mask = request->rates[NL80211_BAND_5GHZ];
} else {
- sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
if (WARN_ON_ONCE(!sband))
return 0;
- rate_mask = request->rates[IEEE80211_BAND_2GHZ];
+ rate_mask = request->rates[NL80211_BAND_2GHZ];
}
num_rates = 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index a12adee77..6bc2011d8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -105,6 +105,47 @@ mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
}
/*
+ * This function returns a command to the command free queue.
+ *
+ * The function also calls the completion callback if required, before
+ * cleaning the command node and re-inserting it into the free queue.
+ */
+static void
+mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
+ struct cmd_ctrl_node *cmd_node)
+{
+ unsigned long flags;
+
+ if (!cmd_node)
+ return;
+
+ if (cmd_node->wait_q_enabled)
+ mwifiex_complete_cmd(adapter, cmd_node);
+ /* Clean the node */
+ mwifiex_clean_cmd_node(adapter, cmd_node);
+
+ /* Insert node into cmd_free_q */
+ spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
+ list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
+ spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+}
+
+/* This function reuses a command node. */
+void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
+ struct cmd_ctrl_node *cmd_node)
+{
+ struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
+
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+
+ atomic_dec(&adapter->cmd_pending);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
+ le16_to_cpu(host_cmd->command),
+ atomic_read(&adapter->cmd_pending));
+}
+
+/*
* This function sends a host command to the firmware.
*
* The function copies the host command into the driver command
@@ -614,47 +655,6 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
}
/*
- * This function returns a command to the command free queue.
- *
- * The function also calls the completion callback if required, before
- * cleaning the command node and re-inserting it into the free queue.
- */
-void
-mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
- struct cmd_ctrl_node *cmd_node)
-{
- unsigned long flags;
-
- if (!cmd_node)
- return;
-
- if (cmd_node->wait_q_enabled)
- mwifiex_complete_cmd(adapter, cmd_node);
- /* Clean the node */
- mwifiex_clean_cmd_node(adapter, cmd_node);
-
- /* Insert node into cmd_free_q */
- spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
- list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
- spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
-}
-
-/* This function reuses a command node. */
-void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
- struct cmd_ctrl_node *cmd_node)
-{
- struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
-
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-
- atomic_dec(&adapter->cmd_pending);
- mwifiex_dbg(adapter, CMD,
- "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
- le16_to_cpu(host_cmd->command),
- atomic_read(&adapter->cmd_pending));
-}
-
-/*
* This function queues a command to the command pending queue.
*
* This in effect adds the command to the command list to be executed.
@@ -991,6 +991,23 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->if_ops.card_reset(adapter);
}
+void
+mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter)
+{
+ struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
+ unsigned long flags;
+
+ /* Cancel all pending scan command */
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ list_for_each_entry_safe(cmd_node, tmp_node,
+ &adapter->scan_pending_q, list) {
+ list_del(&cmd_node->list);
+ cmd_node->wait_q_enabled = false;
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ }
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+}
+
/*
* This function cancels all the pending commands.
*
@@ -1009,9 +1026,9 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
/* Cancel current cmd */
if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
- adapter->curr_cmd->wait_q_enabled = false;
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ adapter->curr_cmd->wait_q_enabled = false;
/* no recycle probably wait for response */
}
/* Cancel all pending command */
@@ -1029,16 +1046,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
- /* Cancel all pending scan command */
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
-
- cmd_node->wait_q_enabled = false;
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
if (adapter->scan_processing) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
@@ -1070,9 +1078,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
void
mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
- struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
+ struct cmd_ctrl_node *cmd_node = NULL;
unsigned long cmd_flags;
- unsigned long scan_pending_q_flags;
struct mwifiex_private *priv;
int i;
@@ -1094,17 +1101,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
mwifiex_recycle_cmd_node(adapter, cmd_node);
}
- /* Cancel all pending scan command */
- spin_lock_irqsave(&adapter->scan_pending_q_lock,
- scan_pending_q_flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
- cmd_node->wait_q_enabled = false;
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- scan_pending_q_flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
if (adapter->scan_processing) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index c134cf865..8e4145abd 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -372,6 +372,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_COALESCE_CFG 0x010a
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
+#define HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG 0x010f
#define HostCmd_CMD_11AC_CFG 0x0112
#define HostCmd_CMD_HS_WAKEUP_REASON 0x0116
#define HostCmd_CMD_TDLS_CONFIG 0x0100
@@ -619,6 +620,7 @@ enum HS_WAKEUP_REASON {
MAGIC_PATTERN_MATCHED,
CONTROL_FRAME_MATCHED,
MANAGEMENT_FRAME_MATCHED,
+ GTK_REKEY_FAILURE,
RESERVED
};
@@ -2183,6 +2185,14 @@ struct host_cmd_ds_wakeup_reason {
u16 wakeup_reason;
} __packed;
+struct host_cmd_ds_gtk_rekey_params {
+ __le16 action;
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KEK_LEN];
+ __le32 replay_ctr_low;
+ __le32 replay_ctr_high;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2256,6 +2266,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_multi_chan_policy mc_policy;
struct host_cmd_ds_robust_coex coex;
struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
+ struct host_cmd_ds_gtk_rekey_params rekey;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 517653b3a..78c532f0d 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -317,7 +317,7 @@ void mwifiex_set_trans_start(struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++)
netdev_get_tx_queue(dev, i)->trans_start = jiffies;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 2d40cb76c..4dde1ae35 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -702,6 +702,13 @@ mwifiex_close(struct net_device *dev)
priv->scan_aborting = true;
}
+ if (priv->sched_scanning) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "aborting bgscan on ndo_stop\n");
+ mwifiex_stop_bg_scan(priv);
+ cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+ }
+
return 0;
}
@@ -753,13 +760,6 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
mwifiex_queue_main_work(priv->adapter);
- if (priv->sched_scanning) {
- mwifiex_dbg(priv->adapter, INFO,
- "aborting bgscan on ndo_stop\n");
- mwifiex_stop_bg_scan(priv);
- cfg80211_sched_scan_stopped(priv->wdev.wiphy);
- }
-
return 0;
}
@@ -1074,12 +1074,14 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
priv->netdev->name, priv->num_tx_timeout);
}
- if (adapter->iface_type == MWIFIEX_SDIO) {
- p += sprintf(p, "\n=== SDIO register dump===\n");
+ if (adapter->iface_type == MWIFIEX_SDIO ||
+ adapter->iface_type == MWIFIEX_PCIE) {
+ p += sprintf(p, "\n=== %s register dump===\n",
+ adapter->iface_type == MWIFIEX_SDIO ?
+ "SDIO" : "PCIE");
if (adapter->if_ops.reg_dump)
p += adapter->if_ops.reg_dump(adapter, p);
}
-
p += sprintf(p, "\n=== more debug information\n");
debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
if (debug_info) {
@@ -1432,7 +1434,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
struct mwifiex_private *priv = NULL;
int i;
- if (down_interruptible(sem))
+ if (down_trylock(sem))
goto exit_sem_err;
if (!adapter)
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index aafc4ab4e..0207af00b 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -37,6 +37,17 @@
#include <linux/idr.h>
#include <linux/inetdevice.h>
#include <linux/devcoredump.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
#include "decl.h"
#include "ioctl.h"
@@ -100,8 +111,8 @@ enum {
#define SCAN_BEACON_ENTRY_PAD 6
#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110
-#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 30
-#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 30
+#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 40
+#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 40
#define MWIFIEX_DEF_SCAN_CHAN_GAP_TIME 50
#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
@@ -1019,6 +1030,8 @@ int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
+int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
+ struct sk_buff *skb);
int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
@@ -1040,9 +1053,8 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter);
int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter);
void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
+void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter);
-void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
- struct cmd_ctrl_node *cmd_node);
void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 106717dd8..0c7937eb6 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -190,7 +190,6 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
if (ent->driver_data) {
struct mwifiex_pcie_device *data = (void *)ent->driver_data;
- card->pcie.firmware = data->firmware;
card->pcie.reg = data->reg;
card->pcie.blksz_fw_dl = data->blksz_fw_dl;
card->pcie.tx_buf_size = data->tx_buf_size;
@@ -269,6 +268,11 @@ static const struct pci_device_id mwifiex_ids[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
.driver_data = (unsigned long)&mwifiex_pcie8997,
},
+ {
+ PCIE_VENDOR_ID_V2_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ .driver_data = (unsigned long)&mwifiex_pcie8997,
+ },
{},
};
@@ -2351,6 +2355,47 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
return 0;
}
+/* Function to dump PCIE scratch registers in case of FW crash
+ */
+static int
+mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
+{
+ char *p = drv_buf;
+ char buf[256], *ptr;
+ int i;
+ u32 value;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG,
+ PCIE_SCRATCH_13_REG,
+ PCIE_SCRATCH_14_REG};
+
+ if (!p)
+ return 0;
+
+ mwifiex_dbg(adapter, MSG, "PCIE register dump start\n");
+
+ if (mwifiex_read_reg(adapter, reg->fw_status, &value)) {
+ mwifiex_dbg(adapter, ERROR, "failed to read firmware status");
+ return 0;
+ }
+
+ ptr = buf;
+ mwifiex_dbg(adapter, MSG, "pcie scratch register:");
+ for (i = 0; i < ARRAY_SIZE(pcie_scratch_reg); i++) {
+ mwifiex_read_reg(adapter, pcie_scratch_reg[i], &value);
+ ptr += sprintf(ptr, "reg:0x%x, value=0x%x\n",
+ pcie_scratch_reg[i], value);
+ }
+
+ mwifiex_dbg(adapter, MSG, "%s\n", buf);
+ p += sprintf(p, "%s\n", buf);
+
+ mwifiex_dbg(adapter, MSG, "PCIE register dump end\n");
+
+ return p - drv_buf;
+}
+
/* This function read/write firmware */
static enum rdwr_status
mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
@@ -2759,6 +2804,68 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
}
/*
+ * This function get firmare name for downloading by revision id
+ *
+ * Read revision id register to get revision id
+ */
+static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
+{
+ int revision_id = 0;
+ int version;
+ struct pcie_service_card *card = adapter->card;
+
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ mwifiex_write_reg(adapter, 0x0c58, 0x80c00000);
+ mwifiex_read_reg(adapter, 0x0c58, &revision_id);
+ revision_id &= 0xff00;
+ switch (revision_id) {
+ case PCIE8897_A0:
+ strcpy(adapter->fw_name, PCIE8897_A0_FW_NAME);
+ break;
+ case PCIE8897_B0:
+ strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME);
+ break;
+ default:
+ strcpy(adapter->fw_name, PCIE8897_DEFAULT_FW_NAME);
+
+ break;
+ }
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8997:
+ mwifiex_read_reg(adapter, 0x0c48, &revision_id);
+ mwifiex_read_reg(adapter, 0x0cd0, &version);
+ version &= 0x7;
+ switch (revision_id) {
+ case PCIE8997_V2:
+ if (version == CHIP_VER_PCIEUSB)
+ strcpy(adapter->fw_name,
+ PCIEUSB8997_FW_NAME_V2);
+ else
+ strcpy(adapter->fw_name,
+ PCIEUART8997_FW_NAME_V2);
+ break;
+ case PCIE8997_Z:
+ if (version == CHIP_VER_PCIEUSB)
+ strcpy(adapter->fw_name,
+ PCIEUSB8997_FW_NAME_Z);
+ else
+ strcpy(adapter->fw_name,
+ PCIEUART8997_FW_NAME_Z);
+ break;
+ default:
+ strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/*
* This function registers the PCIE device.
*
* PCIE IRQ is claimed, block size is set and driver data is initialized.
@@ -2778,8 +2885,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->tx_buf_size = card->pcie.tx_buf_size;
adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
adapter->num_mem_types = card->pcie.num_mem_types;
- strcpy(adapter->fw_name, card->pcie.firmware);
adapter->ext_scan = card->pcie.can_ext_scan;
+ mwifiex_pcie_get_fw_name(adapter);
return 0;
}
@@ -2850,6 +2957,7 @@ static struct mwifiex_if_ops pcie_ops = {
.cleanup_mpa_buf = NULL,
.init_fw_port = mwifiex_pcie_init_fw_port,
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
+ .reg_dump = mwifiex_pcie_reg_dump,
.device_dump = mwifiex_pcie_device_dump,
};
@@ -2907,4 +3015,3 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
MODULE_VERSION(PCIE_VERSION);
MODULE_LICENSE("GPL v2");
-/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index e55002806..db18ac6f5 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -23,7 +23,6 @@
#define _MWIFIEX_PCIE_H
#include <linux/pci.h>
-#include <linux/pcieport_if.h>
#include <linux/interrupt.h>
#include "decl.h"
@@ -31,13 +30,26 @@
#define PCIE8766_DEFAULT_FW_NAME "/*(DEBLOBBED)*/"
#define PCIE8897_DEFAULT_FW_NAME "/*(DEBLOBBED)*/"
+#define PCIE8897_A0_FW_NAME "/*(DEBLOBBED)*/"
+#define PCIE8897_B0_FW_NAME "/*(DEBLOBBED)*/"
#define PCIE8997_DEFAULT_FW_NAME "/*(DEBLOBBED)*/"
+#define PCIEUART8997_FW_NAME_Z "/*(DEBLOBBED)*/"
+#define PCIEUART8997_FW_NAME_V2 "/*(DEBLOBBED)*/"
+#define PCIEUSB8997_FW_NAME_Z "/*(DEBLOBBED)*/"
+#define PCIEUSB8997_FW_NAME_V2 "/*(DEBLOBBED)*/"
#define PCIE_VENDOR_ID_MARVELL (0x11ab)
+#define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b)
#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38)
#define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42)
+#define PCIE8897_A0 0x1100
+#define PCIE8897_B0 0x1200
+#define PCIE8997_Z 0x0
+#define PCIE8997_V2 0x471
+#define CHIP_VER_PCIEUSB 0x2
+
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
#define MWIFIEX_TXBD_MASK 0x3F
@@ -65,6 +77,8 @@
#define PCIE_SCRATCH_10_REG 0xCE8
#define PCIE_SCRATCH_11_REG 0xCEC
#define PCIE_SCRATCH_12_REG 0xCF0
+#define PCIE_SCRATCH_13_REG 0xCF8
+#define PCIE_SCRATCH_14_REG 0xCFC
#define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C
#define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C
@@ -102,7 +116,7 @@
/* FW awake cookie after FW ready */
#define FW_AWAKE_COOKIE (0xAA55AA55)
#define MWIFIEX_DEF_SLEEP_COOKIE 0xBEEFBEEF
-#define MWIFIEX_MAX_DELAY_COUNT 5
+#define MWIFIEX_MAX_DELAY_COUNT 100
struct mwifiex_pcie_card_reg {
u16 cmd_addr_lo;
@@ -263,7 +277,6 @@ static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
};
struct mwifiex_pcie_device {
- const char *firmware;
const struct mwifiex_pcie_card_reg *reg;
u16 blksz_fw_dl;
u16 tx_buf_size;
@@ -274,7 +287,6 @@ struct mwifiex_pcie_device {
};
static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
- .firmware = PCIE8766_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8766,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
@@ -283,7 +295,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
};
static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
- .firmware = PCIE8897_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8897,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
@@ -294,7 +305,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
};
static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
- .firmware = PCIE8997_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8997,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 489f7a911..bc5e52ceb 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -76,6 +76,39 @@ static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = {
{ 0x00, 0x0f, 0xac, 0x04 }, /* AES */
};
+static void
+_dbg_security_flags(int log_level, const char *func, const char *desc,
+ struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
+{
+ _mwifiex_dbg(priv->adapter, log_level,
+ "info: %s: %s:\twpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\tEncMode=%#x privacy=%#x\n",
+ func, desc,
+ bss_desc->bcn_wpa_ie ?
+ bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0,
+ bss_desc->bcn_rsn_ie ?
+ bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0,
+ priv->sec_info.wep_enabled ? "e" : "d",
+ priv->sec_info.wpa_enabled ? "e" : "d",
+ priv->sec_info.wpa2_enabled ? "e" : "d",
+ priv->sec_info.encryption_mode,
+ bss_desc->privacy);
+}
+#define dbg_security_flags(mask, desc, priv, bss_desc) \
+ _dbg_security_flags(MWIFIEX_DBG_##mask, desc, __func__, priv, bss_desc)
+
+static bool
+has_ieee_hdr(struct ieee_types_generic *ie, u8 key)
+{
+ return (ie && ie->ieee_hdr.element_id == key);
+}
+
+static bool
+has_vendor_hdr(struct ieee_types_vendor_specific *ie, u8 key)
+{
+ return (ie && ie->vend_hdr.element_id == key);
+}
+
/*
* This function parses a given IE for a given OUI.
*
@@ -121,8 +154,7 @@ mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
struct ie_body *iebody;
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
- if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).
- ieee_hdr.element_id == WLAN_EID_RSN))) {
+ if (has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) {
iebody = (struct ie_body *)
(((u8 *) bss_desc->bcn_rsn_ie->data) +
RSN_GTK_OUI_OFFSET);
@@ -148,9 +180,7 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
struct ie_body *iebody;
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
- if (((bss_desc->bcn_wpa_ie) &&
- ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
- WLAN_EID_VENDOR_SPECIFIC))) {
+ if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
@@ -180,11 +210,8 @@ mwifiex_is_bss_wapi(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (priv->sec_info.wapi_enabled &&
- (bss_desc->bcn_wapi_ie &&
- ((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id ==
- WLAN_EID_BSS_AC_ACCESS_DELAY))) {
+ has_ieee_hdr(bss_desc->bcn_wapi_ie, WLAN_EID_BSS_AC_ACCESS_DELAY))
return true;
- }
return false;
}
@@ -197,12 +224,9 @@ mwifiex_is_bss_no_sec(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
- !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
- ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
- WLAN_EID_VENDOR_SPECIFIC)) &&
- ((!bss_desc->bcn_rsn_ie) ||
- ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
- WLAN_EID_RSN)) &&
+ !priv->sec_info.wpa2_enabled &&
+ !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+ !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
!priv->sec_info.encryption_mode && !bss_desc->privacy) {
return true;
}
@@ -233,29 +257,14 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
- !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
- ((*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC))
+ !priv->sec_info.wpa2_enabled &&
+ has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)
/*
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
*/
) {
- mwifiex_dbg(priv->adapter, INFO,
- "info: %s: WPA:\t"
- "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
- "EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*bss_desc->bcn_wpa_ie).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*bss_desc->bcn_rsn_ie).
- ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ dbg_security_flags(INFO, "WPA", priv, bss_desc);
return true;
}
return false;
@@ -269,30 +278,14 @@ static bool
mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
- if (!priv->sec_info.wep_enabled &&
- !priv->sec_info.wpa_enabled &&
+ if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
priv->sec_info.wpa2_enabled &&
- ((bss_desc->bcn_rsn_ie) &&
- ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id == WLAN_EID_RSN))) {
+ has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) {
/*
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
*/
- mwifiex_dbg(priv->adapter, INFO,
- "info: %s: WPA2:\t"
- "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
- "EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*bss_desc->bcn_wpa_ie).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*bss_desc->bcn_rsn_ie).
- ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ dbg_security_flags(INFO, "WAP2", priv, bss_desc);
return true;
}
return false;
@@ -308,11 +301,8 @@ mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv,
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled &&
- ((!bss_desc->bcn_wpa_ie) ||
- ((*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
- ((!bss_desc->bcn_rsn_ie) ||
- ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+ !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+ !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
!priv->sec_info.encryption_mode && bss_desc->privacy) {
return true;
}
@@ -329,25 +319,10 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled &&
- ((!bss_desc->bcn_wpa_ie) ||
- ((*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
- ((!bss_desc->bcn_rsn_ie) ||
- ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+ !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+ !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
priv->sec_info.encryption_mode && bss_desc->privacy) {
- mwifiex_dbg(priv->adapter, INFO,
- "info: %s: dynamic\t"
- "WEP: wpa_ie=%#x wpa2_ie=%#x\t"
- "EncMode=%#x privacy=%#x\n",
- __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*bss_desc->bcn_wpa_ie).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*bss_desc->bcn_rsn_ie).
- ieee_hdr.element_id : 0,
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ dbg_security_flags(INFO, "dynamic", priv, bss_desc);
return true;
}
return false;
@@ -460,18 +435,7 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
}
/* Security doesn't match */
- mwifiex_dbg(adapter, ERROR,
- "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t"
- "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
- __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode, bss_desc->privacy);
+ dbg_security_flags(ERROR, "failed", priv, bss_desc);
return -1;
}
@@ -494,13 +458,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
*scan_chan_list,
u8 filtered_scan)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = priv->adapter;
int chan_idx = 0, i;
- for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
+ for (band = 0; (band < NUM_NL80211_BANDS) ; band++) {
if (!priv->wdev.wiphy->bands[band])
continue;
@@ -534,11 +498,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
&= ~MWIFIEX_PASSIVE_SCAN;
scan_chan_list[chan_idx].chan_number =
(u32) ch->hw_value;
+
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
+ |= MWIFIEX_DISABLE_CHAN_FILT;
+
if (filtered_scan) {
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->specific_scan_time);
- scan_chan_list[chan_idx].chan_scan_mode_bitmap
- |= MWIFIEX_DISABLE_CHAN_FILT;
}
chan_idx++;
}
@@ -557,13 +523,13 @@ mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv,
struct mwifiex_chan_scan_param_set
*scan_chan_list)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = priv->adapter;
int chan_idx = 0, i;
- for (band = 0; (band < IEEE80211_NUM_BANDS); band++) {
+ for (band = 0; (band < NUM_NL80211_BANDS); band++) {
if (!priv->wdev.wiphy->bands[band])
continue;
@@ -655,8 +621,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
int ret = 0;
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
- struct cmd_ctrl_node *cmd_node, *tmp_node;
- unsigned long flags;
u32 tlv_idx, rates_size, cmd_no;
u32 total_scan_time;
u32 done_early;
@@ -813,16 +777,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header) + rates_size;
if (ret) {
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q,
- list) {
- list_del(&cmd_node->list);
- cmd_node->wait_q_enabled = false;
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
break;
}
}
@@ -912,14 +867,11 @@ mwifiex_config_scan(struct mwifiex_private *priv,
/* Set the BSS type scan filter, use Adapter setting if
unset */
scan_cfg_out->bss_mode =
- (user_scan_in->bss_mode ? (u8) user_scan_in->
- bss_mode : (u8) adapter->scan_mode);
+ (u8)(user_scan_in->bss_mode ?: adapter->scan_mode);
/* Set the number of probes to send, use Adapter setting
if unset */
- num_probes =
- (user_scan_in->num_probes ? user_scan_in->
- num_probes : adapter->scan_probes);
+ num_probes = user_scan_in->num_probes ?: adapter->scan_probes;
/*
* Set the BSSID filter to the incoming configuration,
@@ -1094,28 +1046,24 @@ mwifiex_config_scan(struct mwifiex_private *priv,
chan_idx++) {
channel = user_scan_in->chan_list[chan_idx].chan_number;
- (scan_chan_list + chan_idx)->chan_number = channel;
+ scan_chan_list[chan_idx].chan_number = channel;
radio_type =
user_scan_in->chan_list[chan_idx].radio_type;
- (scan_chan_list + chan_idx)->radio_type = radio_type;
+ scan_chan_list[chan_idx].radio_type = radio_type;
scan_type = user_scan_in->chan_list[chan_idx].scan_type;
if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
- (scan_chan_list +
- chan_idx)->chan_scan_mode_bitmap
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= (MWIFIEX_PASSIVE_SCAN |
MWIFIEX_HIDDEN_SSID_REPORT);
else
- (scan_chan_list +
- chan_idx)->chan_scan_mode_bitmap
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
&= ~MWIFIEX_PASSIVE_SCAN;
- if (*filtered_scan)
- (scan_chan_list +
- chan_idx)->chan_scan_mode_bitmap
- |= MWIFIEX_DISABLE_CHAN_FILT;
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
+ |= MWIFIEX_DISABLE_CHAN_FILT;
if (user_scan_in->chan_list[chan_idx].scan_time) {
scan_dur = (u16) user_scan_in->
@@ -1129,9 +1077,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
scan_dur = adapter->active_scan_time;
}
- (scan_chan_list + chan_idx)->min_scan_time =
+ scan_chan_list[chan_idx].min_scan_time =
cpu_to_le16(scan_dur);
- (scan_chan_list + chan_idx)->max_scan_time =
+ scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(scan_dur);
}
@@ -1991,12 +1939,13 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
- struct cmd_ctrl_node *cmd_node, *tmp_node;
+ struct cmd_ctrl_node *cmd_node;
unsigned long flags;
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (list_empty(&adapter->scan_pending_q)) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
@@ -2018,13 +1967,10 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
}
} else if ((priv->scan_aborting && !priv->scan_request) ||
priv->scan_block) {
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
+
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 6b281ea03..e757dfc51 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -73,6 +73,67 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"EXTLAST", NULL, 0, 0xFE},
};
+static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+ { .compatible = "marvell,sd8897" },
+ { .compatible = "marvell,sd8997" },
+ { }
+};
+
+static irqreturn_t mwifiex_wake_irq_wifi(int irq, void *priv)
+{
+ struct mwifiex_plt_wake_cfg *cfg = priv;
+
+ if (cfg->irq_wifi >= 0) {
+ pr_info("%s: wake by wifi", __func__);
+ cfg->wake_by_wifi = true;
+ disable_irq_nosync(irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* This function parse device tree node using mmc subnode devicetree API.
+ * The device node is saved in card->plt_of_node.
+ * if the device tree node exist and include interrupts attributes, this
+ * function will also request platform specific wakeup interrupt.
+ */
+static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
+{
+ struct mwifiex_plt_wake_cfg *cfg;
+ int ret;
+
+ if (!dev->of_node ||
+ !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
+ dev_err(dev, "sdio platform data not available\n");
+ return -1;
+ }
+
+ card->plt_of_node = dev->of_node;
+ card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg),
+ GFP_KERNEL);
+ cfg = card->plt_wake_cfg;
+ if (cfg && card->plt_of_node) {
+ cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0);
+ if (!cfg->irq_wifi) {
+ dev_err(dev,
+ "fail to parse irq_wifi from device tree\n");
+ } else {
+ ret = devm_request_irq(dev, cfg->irq_wifi,
+ mwifiex_wake_irq_wifi,
+ IRQF_TRIGGER_LOW,
+ "wifi_wake", cfg);
+ if (ret) {
+ dev_err(dev,
+ "Failed to request irq_wifi %d (%d)\n",
+ cfg->irq_wifi, ret);
+ }
+ disable_irq(cfg->irq_wifi);
+ }
+ }
+
+ return 0;
+}
+
/*
* SDIO probe.
*
@@ -127,6 +188,9 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
return -EIO;
}
+ /* device tree node parsing and platform specific configuration*/
+ mwifiex_sdio_probe_of(&func->dev, card);
+
if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
MWIFIEX_SDIO)) {
pr_err("%s: add card failed\n", __func__);
@@ -183,6 +247,13 @@ static int mwifiex_sdio_resume(struct device *dev)
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_SYNC_CMD);
+ /* Disable platform specific wakeup interrupt */
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
+ disable_irq_wake(card->plt_wake_cfg->irq_wifi);
+ if (!card->plt_wake_cfg->wake_by_wifi)
+ disable_irq(card->plt_wake_cfg->irq_wifi);
+ }
+
return 0;
}
@@ -262,6 +333,13 @@ static int mwifiex_sdio_suspend(struct device *dev)
adapter = card->adapter;
+ /* Enable platform specific wakeup interrupt */
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
+ card->plt_wake_cfg->wake_by_wifi = false;
+ enable_irq(card->plt_wake_cfg->irq_wifi);
+ enable_irq_wake(card->plt_wake_cfg->irq_wifi);
+ }
+
/* Enable the Host Sleep */
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
@@ -1026,13 +1104,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
- sdio_release_host(card->func);
-
mwifiex_dbg(adapter, MSG,
"info: FW download over, size %d bytes\n", offset);
ret = 0;
done:
+ sdio_release_host(card->func);
kfree(fwbuf);
return ret;
}
@@ -1123,8 +1200,8 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
__func__, pkt_len, blk_size);
break;
}
- skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
- GFP_KERNEL | GFP_DMA);
+
+ skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, GFP_KERNEL);
if (!skb_deaggr)
break;
skb_put(skb_deaggr, pkt_len);
@@ -1373,8 +1450,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
/* copy pkt to deaggr buf */
skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
- GFP_KERNEL |
- GFP_DMA);
+ GFP_KERNEL);
if (!skb_deaggr) {
mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
"drop pkt len=%d type=%d\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index da70fde39..df6c5e57d 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -154,6 +154,11 @@
a->mpa_rx.start_port = 0; \
} while (0)
+struct mwifiex_plt_wake_cfg {
+ int irq_wifi;
+ bool wake_by_wifi;
+};
+
/* data structure for SDIO MPA TX */
struct mwifiex_sdio_mpa_tx {
/* multiport tx aggregation buffer pointer */
@@ -237,6 +242,8 @@ struct mwifiex_sdio_card_reg {
struct sdio_mmc_card {
struct sdio_func *func;
struct mwifiex_adapter *adapter;
+ struct device_node *plt_of_node;
+ struct mwifiex_plt_wake_cfg *plt_wake_cfg;
const char *firmware;
const struct mwifiex_sdio_card_reg *reg;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 30f152601..e436574b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1558,6 +1558,30 @@ static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct host_cmd_ds_gtk_rekey_params *rekey = &cmd->params.rekey;
+ u64 rekey_ctr;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG);
+ cmd->size = cpu_to_le16(sizeof(*rekey) + S_DS_GEN);
+
+ rekey->action = cpu_to_le16(cmd_action);
+ if (cmd_action == HostCmd_ACT_GEN_SET) {
+ memcpy(rekey->kek, data->kek, NL80211_KEK_LEN);
+ memcpy(rekey->kck, data->kck, NL80211_KCK_LEN);
+ rekey_ctr = be64_to_cpup((__be64 *)data->replay_ctr);
+ rekey->replay_ctr_low = cpu_to_le32((u32)rekey_ctr);
+ rekey->replay_ctr_high =
+ cpu_to_le32((u32)((u64)rekey_ctr >> 32));
+ }
+
+ return 0;
+}
+
static int
mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
@@ -2094,6 +2118,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
+ ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2134,6 +2162,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
enum state_11d_t state_11d;
struct mwifiex_ds_11n_tx_cfg tx_cfg;
u8 sdio_sp_rx_aggr_enable;
+ int data;
if (first_sta) {
if (priv->adapter->iface_type == MWIFIEX_PCIE) {
@@ -2154,9 +2183,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
* The cal-data can be read from device tree and/or
* a configuration file and downloaded to firmware.
*/
- adapter->dt_node =
- of_find_node_by_name(NULL, "marvell_cfgdata");
- if (adapter->dt_node) {
+ if (priv->adapter->iface_type == MWIFIEX_SDIO &&
+ adapter->dev->of_node) {
+ adapter->dt_node = adapter->dev->of_node;
+ if (of_property_read_u32(adapter->dt_node,
+ "marvell,wakeup-pin",
+ &data) == 0) {
+ pr_debug("Wakeup pin = 0x%x\n", data);
+ adapter->hs_cfg.gpio = data;
+ }
+
ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
"marvell,caldata");
if (ret)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index d96523e10..d18c7979d 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -44,7 +44,6 @@ static void
mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
- struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ps_mode_enh *pm;
unsigned long flags;
@@ -71,17 +70,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_802_11_SCAN:
case HostCmd_CMD_802_11_SCAN_EXT:
- /* Cancel all pending scan command */
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
@@ -1244,6 +1233,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_ROBUST_COEX:
ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
break;
+ case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
+ break;
default:
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 070bce401..0104108b4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -147,6 +147,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
+
+ mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
+ HostCmd_ACT_GEN_REMOVE, 0, NULL, false);
}
static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index d5c56eb9e..8e0862657 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -146,6 +146,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
size_t beacon_ie_len;
struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
const struct cfg80211_bss_ies *ies;
+ int ret;
rcu_read_lock();
ies = rcu_dereference(bss->ies);
@@ -189,7 +190,48 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT)
bss_desc->sensed_11h = true;
- return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
+ ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
+ if (ret)
+ return ret;
+
+ /* Update HT40 capability based on current channel information */
+ if (bss_desc->bcn_ht_oper && bss_desc->bcn_ht_cap) {
+ u8 ht_param = bss_desc->bcn_ht_oper->ht_param;
+ u8 radio = mwifiex_band_to_radio_type(bss_desc->bss_band);
+ struct ieee80211_supported_band *sband =
+ priv->wdev.wiphy->bands[radio];
+ int freq = ieee80211_channel_to_frequency(bss_desc->channel,
+ radio);
+ struct ieee80211_channel *chan =
+ ieee80211_get_channel(priv->adapter->wiphy, freq);
+
+ switch (ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ if (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) {
+ sband->ht_cap.cap &=
+ ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+ } else {
+ sband->ht_cap.cap |=
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ if (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) {
+ sband->ht_cap.cap &=
+ ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+ } else {
+ sband->ht_cap.cap |=
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ }
+ }
+
+ return 0;
}
void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
@@ -509,7 +551,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
if (priv && priv->sched_scanning) {
#ifdef CONFIG_PM
- if (!priv->wdev.wiphy->wowlan_config->nd_config) {
+ if (priv->wdev.wiphy->wowlan_config &&
+ !priv->wdev.wiphy->wowlan_config->nd_config) {
#endif
mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
mwifiex_stop_bg_scan(priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 150649602..df9704de0 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -285,7 +285,7 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
else
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
- /* find the minmum bandwith between AP/TDLS peers */
+ /* find the minimum bandwidth between AP/TDLS peers */
vht_cap = &sta_ptr->tdls_cap.vhtcap;
supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
peer_supp_chwd_set =
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index bf6182b64..abdd0cf71 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -297,6 +297,13 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
goto done;
mwifiex_set_trans_start(priv->netdev);
+
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+ atomic_dec_return(&adapter->pending_bridged_pkts);
+
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+ goto done;
+
if (!status) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += tx_info->pkt_len;
@@ -306,12 +313,6 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
priv->stats.tx_errors++;
}
- if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
- atomic_dec_return(&adapter->pending_bridged_pkts);
-
- if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
- goto done;
-
if (aggr)
/* For skb_aggr, do not wake up tx queue */
goto done;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 16d95b22f..f79d00d1e 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -694,7 +694,7 @@ static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
struct mwifiex_ie_list *ap_ie = cmd_buf;
struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
- if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
+ if (!ap_ie || !ap_ie->len)
return -1;
*ie_size += le16_to_cpu(ap_ie->len) +
@@ -816,7 +816,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
chandef.chan->center_freq);
/* Set appropriate bands */
- if (chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (chandef.chan->band == NL80211_BAND_2GHZ) {
bss_cfg->band_cfg = BAND_CONFIG_BG;
config_bands = BAND_B | BAND_G;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 52f7981a8..666e91af5 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -102,6 +102,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
int hdr_chop;
struct ethhdr *p_ethhdr;
struct mwifiex_sta_node *src_node;
+ int index;
uap_rx_pd = (struct uap_rxpd *)(skb->data);
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -208,10 +209,15 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
}
__net_timestamp(skb);
+
+ index = mwifiex_1d_to_wmm_queue[skb->priority];
+ atomic_inc(&priv->wmm_tx_pending[index]);
mwifiex_wmm_add_buf_txqueue(priv, skb);
atomic_inc(&adapter->tx_pending);
atomic_inc(&adapter->pending_bridged_pkts);
+ mwifiex_queue_main_work(priv->adapter);
+
return;
}
@@ -263,6 +269,96 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
return mwifiex_process_rx_packet(priv, skb);
}
+int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct mwifiex_adapter *adapter = adapter;
+ struct mwifiex_sta_node *src_node;
+ struct ethhdr *p_ethhdr;
+ struct sk_buff *skb_uap;
+ struct mwifiex_txinfo *tx_info;
+
+ if (!skb)
+ return -1;
+
+ p_ethhdr = (void *)skb->data;
+ src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
+ if (src_node) {
+ src_node->stats.last_rx = jiffies;
+ src_node->stats.rx_bytes += skb->len;
+ src_node->stats.rx_packets++;
+ }
+
+ skb->dev = priv->netdev;
+ skb->protocol = eth_type_trans(skb, priv->netdev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* This is required only in case of 11n and USB/PCIE as we alloc
+ * a buffer of 4K only if its 11N (to be able to receive 4K
+ * AMSDU packets). In case of SD we allocate buffers based
+ * on the size of packet and hence this is not needed.
+ *
+ * Modifying the truesize here as our allocation for each
+ * skb is 4K but we only receive 2K packets and this cause
+ * the kernel to start dropping packets in case where
+ * application has allocated buffer based on 2K size i.e.
+ * if there a 64K packet received (in IP fragments and
+ * application allocates 64K to receive this packet but
+ * this packet would almost double up because we allocate
+ * each 1.5K fragment in 4K and pass it up. As soon as the
+ * 64K limit hits kernel will start to drop rest of the
+ * fragments. Currently we fail the Filesndl-ht.scr script
+ * for UDP, hence this fix
+ */
+ if ((adapter->iface_type == MWIFIEX_USB ||
+ adapter->iface_type == MWIFIEX_PCIE) &&
+ (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
+ skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
+
+ if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
+ mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
+ if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
+ skb_uap =
+ skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+ else
+ skb_uap = skb_copy(skb, GFP_ATOMIC);
+
+ if (likely(skb_uap)) {
+ tx_info = MWIFIEX_SKB_TXCB(skb_uap);
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+ __net_timestamp(skb_uap);
+ mwifiex_wmm_add_buf_txqueue(priv, skb_uap);
+ atomic_inc(&adapter->tx_pending);
+ atomic_inc(&adapter->pending_bridged_pkts);
+ if ((atomic_read(&adapter->pending_bridged_pkts) >=
+ MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
+ mwifiex_dbg(adapter, ERROR,
+ "Tx: Bridge packet limit reached. Drop packet!\n");
+ mwifiex_uap_cleanup_tx_queues(priv);
+ }
+
+ } else {
+ mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap");
+ }
+
+ mwifiex_queue_main_work(adapter);
+ /* Don't forward Intra-BSS unicast packet to upper layer*/
+ if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest))
+ return 0;
+ }
+
+ /* Forward multicast/broadcast packet to upper layer*/
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+ return 0;
+}
+
/*
* This function processes the packet received on AP interface.
*
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 3ae9be7e4..4c2328f75 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -995,7 +995,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
{
int ret = 0;
u8 *firmware = fw->fw_buf, *recv_buff;
- u32 retries = USB8XXX_FW_MAX_RETRY, dlen;
+ u32 retries = USB8XXX_FW_MAX_RETRY + 1;
+ u32 dlen;
u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
struct fw_data *fwdata;
struct fw_sync_header sync_fw;
@@ -1017,8 +1018,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* Allocate memory for receive */
recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
- if (!recv_buff)
+ if (!recv_buff) {
+ ret = -ENOMEM;
goto cleanup;
+ }
do {
/* Send pseudo data to check winner status first */
@@ -1041,7 +1044,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
}
/* If the send/receive fails or CRC occurs then retry */
- while (retries--) {
+ while (--retries) {
u8 *buf = (u8 *)fwdata;
u32 len = FW_DATA_XMIT_SIZE;
@@ -1101,7 +1104,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
continue;
}
- retries = USB8XXX_FW_MAX_RETRY;
+ retries = USB8XXX_FW_MAX_RETRY + 1;
break;
}
fw_seqnum++;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 427b961b7..4133136b8 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -346,20 +346,20 @@ struct mwl8k_sta {
#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
static const struct ieee80211_channel mwl8k_channels_24[] = {
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
};
static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -379,10 +379,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
};
static const struct ieee80211_channel mwl8k_channels_50[] = {
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
};
static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -1010,11 +1010,11 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
}
if (rxd->channel > 14) {
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
if (!(status->flag & RX_FLAG_HT))
status->rate_idx -= 5;
} else {
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
}
status->freq = ieee80211_channel_to_frequency(rxd->channel,
status->band);
@@ -1118,11 +1118,11 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
status->flag |= RX_FLAG_HT;
if (rxd->channel > 14) {
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
if (!(status->flag & RX_FLAG_HT))
status->rate_idx -= 5;
} else {
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
}
status->freq = ieee80211_channel_to_frequency(rxd->channel,
status->band);
@@ -2300,13 +2300,13 @@ static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
- priv->band_24.band = IEEE80211_BAND_2GHZ;
+ priv->band_24.band = NL80211_BAND_2GHZ;
priv->band_24.channels = priv->channels_24;
priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
priv->band_24.bitrates = priv->rates_24;
priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band_24;
}
static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
@@ -2319,13 +2319,13 @@ static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
- priv->band_50.band = IEEE80211_BAND_5GHZ;
+ priv->band_50.band = NL80211_BAND_5GHZ;
priv->band_50.channels = priv->channels_50;
priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
priv->band_50.bitrates = priv->rates_50;
priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &priv->band_50;
}
/*
@@ -2876,9 +2876,9 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
- if (channel->band == IEEE80211_BAND_2GHZ)
+ if (channel->band == NL80211_BAND_2GHZ)
cmd->band = cpu_to_le16(0x1);
- else if (channel->band == IEEE80211_BAND_5GHZ)
+ else if (channel->band == NL80211_BAND_5GHZ)
cmd->band = cpu_to_le16(0x4);
cmd->channel = cpu_to_le16(channel->hw_value);
@@ -3067,7 +3067,7 @@ static int freq_to_idx(struct mwl8k_priv *priv, int freq)
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
sband = priv->hw->wiphy->bands[band];
if (!sband)
continue;
@@ -3149,9 +3149,9 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->current_channel = channel->hw_value;
- if (channel->band == IEEE80211_BAND_2GHZ)
+ if (channel->band == NL80211_BAND_2GHZ)
cmd->channel_flags |= cpu_to_le32(0x00000001);
- else if (channel->band == IEEE80211_BAND_5GHZ)
+ else if (channel->band == NL80211_BAND_5GHZ)
cmd->channel_flags |= cpu_to_le32(0x00000004);
if (!priv->sw_scan_start) {
@@ -4094,10 +4094,10 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
cmd->stn_id = cpu_to_le16(sta->aid);
cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
- rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
+ rates = sta->supp_rates[NL80211_BAND_2GHZ];
else
- rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
cmd->legacy_rates = cpu_to_le32(rates);
if (sta->ht_cap.ht_supported) {
cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
@@ -4529,10 +4529,10 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
((sta->ht_cap.ampdu_density & 7) << 2);
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
- rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
+ rates = sta->supp_rates[NL80211_BAND_2GHZ];
else
- rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
legacy_rate_mask_to_array(p->legacy_rates, rates);
memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
p->interop = 1;
@@ -5010,11 +5010,11 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
- ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+ if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
+ ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ];
} else {
ap_legacy_rates =
- ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ ap->supp_rates[NL80211_BAND_5GHZ] << 5;
}
memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
@@ -5042,7 +5042,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
idx--;
if (hw->conf.chandef.chan->band ==
- IEEE80211_BAND_2GHZ)
+ NL80211_BAND_2GHZ)
rate = mwl8k_rates_24[idx].hw_value;
else
rate = mwl8k_rates_50[idx].hw_value;
@@ -5116,7 +5116,7 @@ mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (idx)
idx--;
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
rate = mwl8k_rates_24[idx].hw_value;
else
rate = mwl8k_rates_50[idx].hw_value;
@@ -5388,7 +5388,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
struct ieee80211_supported_band *sband;
if (priv->ap_fw) {
- sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -5396,7 +5396,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
}
if (!sband)
- sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels)
return -ENOENT;
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 26190fd33..8fa78d715 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -469,7 +469,7 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
}
#define CHAN2G(_idx, _freq) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 30, \
@@ -563,7 +563,7 @@ mt76_init_sband_2g(struct mt7601u_dev *dev)
{
dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
GFP_KERNEL);
- dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
ARRAY_SIZE(mt76_channels_2ghz));
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 7fa0128de..bf3f0a399 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -777,7 +777,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
u8 offset1;
u8 offset2;
- if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
@@ -1174,7 +1174,7 @@ static void rt2800_brightness_set(struct led_classdev *led_cdev,
container_of(led_cdev, struct rt2x00_led, led_dev);
unsigned int enabled = brightness != LED_OFF;
unsigned int bg_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
unsigned int polarity =
rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
EEPROM_FREQ_LED_POLARITY);
@@ -1741,7 +1741,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
u8 led_ctrl, led_g_mode, led_r_mode;
rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1);
rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1);
} else {
@@ -1844,7 +1844,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2x00_has_cap_bt_coexist(rt2x00dev)) {
rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
- rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ rt2x00dev->curr_band == NL80211_BAND_5GHZ);
rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B);
} else {
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
@@ -3451,7 +3451,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
* Matching Delta value -4 -3 -2 -1 0 +1 +2 +3 +4
* Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
*/
- if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS4);
@@ -3546,7 +3546,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
}
static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
u16 eeprom;
u8 comp_en;
@@ -3562,7 +3562,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
return 0;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
comp_en = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_DELTA_ENABLE_2G);
if (comp_en) {
@@ -3611,7 +3611,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
}
static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
- enum ieee80211_band band, int power_level,
+ enum nl80211_band band, int power_level,
u8 txpower, int delta)
{
u16 eeprom;
@@ -3639,7 +3639,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
&eeprom);
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
eirp_txpower_criterion = rt2x00_get_field16(eeprom,
EEPROM_EIRP_MAX_TX_POWER_2GHZ);
else
@@ -3686,7 +3686,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
u16 eeprom;
u32 regs[TX_PWR_CFG_IDX_COUNT];
unsigned int offset;
- enum ieee80211_band band = chan->band;
+ enum nl80211_band band = chan->band;
int delta;
int i;
@@ -3697,7 +3697,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
/* calculate temperature compensation delta */
delta = rt2800_get_gain_calibration_delta(rt2x00dev);
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
offset = 16;
else
offset = 0;
@@ -4055,7 +4055,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
rt2x00_dbg(rt2x00dev,
"band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
- (band == IEEE80211_BAND_5GHZ) ? '5' : '2',
+ (band == NL80211_BAND_5GHZ) ? '5' : '2',
(test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
'4' : '2',
(i > TX_PWR_CFG_9_IDX) ?
@@ -4081,7 +4081,7 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
u16 eeprom;
u32 reg, offset;
int i, is_rate_b, delta, power_ctrl;
- enum ieee80211_band band = chan->band;
+ enum nl80211_band band = chan->band;
/*
* Calculate HT40 compensation. For 40MHz we need to add or subtract
@@ -4436,7 +4436,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
u8 vgc;
- if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
@@ -4511,7 +4511,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
case RT3572:
case RT3593:
if (qual->rssi > -65) {
- if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ)
vgc += 0x20;
else
vgc += 0x10;
@@ -7492,6 +7492,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_is_usb(rt2x00dev))
ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+ /* Set MFP if HW crypto is disabled. */
+ if (rt2800_hwcrypt_disabled(rt2x00dev))
+ ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE);
+
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2800_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 6418620f9..f68d49212 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -38,6 +38,7 @@
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
#include <linux/average.h>
+#include <linux/usb.h>
#include <net/mac80211.h>
@@ -752,8 +753,8 @@ struct rt2x00_dev {
* IEEE80211 control structure.
*/
struct ieee80211_hw *hw;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
- enum ieee80211_band curr_band;
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
+ enum nl80211_band curr_band;
int curr_freq;
/*
@@ -1002,6 +1003,8 @@ struct rt2x00_dev {
/* Extra TX headroom required for alignment purposes. */
unsigned int extra_tx_headroom;
+
+ struct usb_anchor *anchor;
};
struct rt2x00_bar_list_entry {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 5639ed816..4e0c56530 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -911,7 +911,7 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
const int value)
{
/* XXX: this assumption about the band is wrong for 802.11j */
- entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
entry->center_freq = ieee80211_channel_to_frequency(channel,
entry->band);
entry->hw_value = value;
@@ -975,13 +975,13 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
* Channels: 2.4 GHz
*/
if (spec->supported_bands & SUPPORT_BAND_2GHZ) {
- rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_channels = 14;
- rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_bitrates = num_rates;
- rt2x00dev->bands[IEEE80211_BAND_2GHZ].channels = channels;
- rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates;
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &rt2x00dev->bands[IEEE80211_BAND_2GHZ];
- memcpy(&rt2x00dev->bands[IEEE80211_BAND_2GHZ].ht_cap,
+ rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14;
+ rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates;
+ rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels;
+ rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &rt2x00dev->bands[NL80211_BAND_2GHZ];
+ memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap,
&spec->ht, sizeof(spec->ht));
}
@@ -991,15 +991,15 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
* Channels: OFDM, UNII, HiperLAN2.
*/
if (spec->supported_bands & SUPPORT_BAND_5GHZ) {
- rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_channels =
+ rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels =
spec->num_channels - 14;
- rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_bitrates =
+ rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates =
num_rates - 4;
- rt2x00dev->bands[IEEE80211_BAND_5GHZ].channels = &channels[14];
- rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4];
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &rt2x00dev->bands[IEEE80211_BAND_5GHZ];
- memcpy(&rt2x00dev->bands[IEEE80211_BAND_5GHZ].ht_cap,
+ rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14];
+ rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4];
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &rt2x00dev->bands[NL80211_BAND_5GHZ];
+ memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap,
&spec->ht, sizeof(spec->ht));
}
@@ -1016,11 +1016,11 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
ieee80211_unregister_hw(rt2x00dev->hw);
- if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) {
- kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
- kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->bitrates);
- rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
- rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) {
+ kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels);
+ kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates);
+ rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+ rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
}
kfree(rt2x00dev->spec.channels_info);
@@ -1422,11 +1422,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
+#ifdef CONFIG_RT2X00_LIB_USB
if (rt2x00_is_usb(rt2x00dev)) {
+ usb_kill_anchored_urbs(rt2x00dev->anchor);
hrtimer_cancel(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
}
+#endif
if (rt2x00dev->workqueue)
destroy_workqueue(rt2x00dev->workqueue);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7627af609..7cf26c612 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -171,8 +171,11 @@ static void rt2x00usb_register_read_async_cb(struct urb *urb)
{
struct rt2x00_async_read_data *rd = urb->context;
if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
- if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+ usb_anchor_urb(urb, rd->rt2x00dev->anchor);
+ if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
+ usb_unanchor_urb(urb);
kfree(rd);
+ }
} else
kfree(rd);
}
@@ -206,8 +209,11 @@ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
(unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
rt2x00usb_register_read_async_cb, rd);
- if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+ usb_anchor_urb(urb, rt2x00dev->anchor);
+ if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
+ usb_unanchor_urb(urb);
kfree(rd);
+ }
usb_free_urb(urb);
}
EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
@@ -313,8 +319,10 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
+ usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
+ usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -402,8 +410,10 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
entry->skb->data, entry->skb->len,
rt2x00usb_interrupt_rxdone, entry);
+ usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
+ usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -818,6 +828,13 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
if (retval)
goto exit_free_reg;
+ rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
+ sizeof(struct usb_anchor),
+ GFP_KERNEL);
+ if (!rt2x00dev->anchor)
+ goto exit_free_reg;
+
+ init_usb_anchor(rt2x00dev->anchor);
return 0;
exit_free_reg:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 1d07ff74d..1c4226701 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -252,9 +252,9 @@ static void rt61pci_brightness_set(struct led_classdev *led_cdev,
container_of(led_cdev, struct rt2x00_led, led_dev);
unsigned int enabled = brightness != LED_OFF;
unsigned int a_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
unsigned int bg_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
if (led->type == LED_TYPE_RADIO) {
rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -643,12 +643,12 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ));
+ (rt2x00dev->curr_band != NL80211_BAND_5GHZ));
break;
case ANTENNA_A:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -657,7 +657,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
default:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -808,7 +808,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
ant->tx == ANTENNA_SW_DIVERSITY);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
sel = antenna_sel_a;
lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
@@ -822,9 +822,9 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, &reg);
rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
- rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ rt2x00dev->curr_band == NL80211_BAND_2GHZ);
rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
- rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ rt2x00dev->curr_band == NL80211_BAND_5GHZ);
rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg);
@@ -846,7 +846,7 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
u16 eeprom;
short lna_gain = 0;
- if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
@@ -1048,7 +1048,7 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
/*
* Determine r17 bounds.
*/
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
@@ -2077,7 +2077,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
return 0;
}
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
if (lna == 3 || lna == 2)
offset += 10;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 61637defa..903cc6f67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -197,9 +197,9 @@ static void rt73usb_brightness_set(struct led_classdev *led_cdev,
container_of(led_cdev, struct rt2x00_led, led_dev);
unsigned int enabled = brightness != LED_OFF;
unsigned int a_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
unsigned int bg_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
if (led->type == LED_TYPE_RADIO) {
rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -593,13 +593,13 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
- (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+ (rt2x00dev->curr_band != NL80211_BAND_5GHZ);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
break;
case ANTENNA_A:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -608,7 +608,7 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
default:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -704,7 +704,7 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
ant->tx == ANTENNA_SW_DIVERSITY);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
sel = antenna_sel_a;
lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
@@ -718,9 +718,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00usb_register_read(rt2x00dev, PHY_CSR0, &reg);
rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
- (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ));
+ (rt2x00dev->curr_band == NL80211_BAND_2GHZ));
rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
- (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ));
+ (rt2x00dev->curr_band == NL80211_BAND_5GHZ));
rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
@@ -736,7 +736,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
u16 eeprom;
short lna_gain = 0;
- if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
@@ -923,7 +923,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
/*
* Determine r17 bounds.
*/
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
@@ -1657,7 +1657,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
return 0;
}
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
if (lna == 3 || lna == 2)
offset += 10;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index a43a16fde..e895a8448 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -526,7 +526,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
* ieee80211_generic_frame_duration
*/
duration = ieee80211_generic_frame_duration(dev, priv->vif,
- IEEE80211_BAND_2GHZ, skb->len,
+ NL80211_BAND_2GHZ, skb->len,
ieee80211_get_tx_rate(dev, info));
frame_duration = priv->ack_time + le16_to_cpu(duration);
@@ -1018,6 +1018,8 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
dma_addr_t *mapping;
entry = priv->rx_ring + priv->rx_ring_sz*i;
if (!skb) {
+ pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
return -ENOMEM;
}
@@ -1028,6 +1030,8 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
if (pci_dma_mapping_error(priv->pdev, *mapping)) {
kfree_skb(skb);
+ pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
return -ENOMEM;
}
@@ -1529,7 +1533,7 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
priv->ack_time =
le16_to_cpu(ieee80211_generic_frame_duration(dev,
priv->vif,
- IEEE80211_BAND_2GHZ, 10,
+ NL80211_BAND_2GHZ, 10,
&priv->rates[0])) - 10;
rtl8180_conf_erp(dev, info);
@@ -1736,7 +1740,7 @@ static int rtl8180_probe(struct pci_dev *pdev,
if (err) {
printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n",
pci_name(pdev));
- return err;
+ goto err_disable_dev;
}
io_addr = pci_resource_start(pdev, 0);
@@ -1795,12 +1799,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
- priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.band = NL80211_BAND_2GHZ;
priv->band.channels = priv->channels;
priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = 4;
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(dev, RX_INCLUDES_FCS);
@@ -1938,6 +1942,8 @@ static int rtl8180_probe(struct pci_dev *pdev,
err_free_reg:
pci_release_regions(pdev);
+
+ err_disable_dev:
pci_disable_device(pdev);
return err;
}
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index b7f72f9c7..231f84db9 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1470,12 +1470,12 @@ static int rtl8187_probe(struct usb_interface *intf,
memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
priv->map = (struct rtl818x_csr *)0xFF00;
- priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.band = NL80211_BAND_2GHZ;
priv->band.channels = priv->channels;
priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
ieee80211_hw_set(dev, RX_INCLUDES_FCS);
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
index a6ad79f61..324451df9 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
@@ -160,104 +160,40 @@ struct rtl8187_priv {
void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
-static inline u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv,
- u8 *addr, u8 idx)
-{
- u8 val;
-
- mutex_lock(&priv->io_mutex);
- usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
- RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
-
- val = priv->io_dmabuf->bits8;
- mutex_unlock(&priv->io_mutex);
-
- return val;
-}
+u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv,
+ u8 *addr, u8 idx);
static inline u8 rtl818x_ioread8(struct rtl8187_priv *priv, u8 *addr)
{
return rtl818x_ioread8_idx(priv, addr, 0);
}
-static inline u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv,
- __le16 *addr, u8 idx)
-{
- __le16 val;
-
- mutex_lock(&priv->io_mutex);
- usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
- RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
-
- val = priv->io_dmabuf->bits16;
- mutex_unlock(&priv->io_mutex);
-
- return le16_to_cpu(val);
-}
+u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv,
+ __le16 *addr, u8 idx);
static inline u16 rtl818x_ioread16(struct rtl8187_priv *priv, __le16 *addr)
{
return rtl818x_ioread16_idx(priv, addr, 0);
}
-static inline u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv,
- __le32 *addr, u8 idx)
-{
- __le32 val;
-
- mutex_lock(&priv->io_mutex);
- usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
- RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
-
- val = priv->io_dmabuf->bits32;
- mutex_unlock(&priv->io_mutex);
-
- return le32_to_cpu(val);
-}
+u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv,
+ __le32 *addr, u8 idx);
static inline u32 rtl818x_ioread32(struct rtl8187_priv *priv, __le32 *addr)
{
return rtl818x_ioread32_idx(priv, addr, 0);
}
-static inline void rtl818x_iowrite8_idx(struct rtl8187_priv *priv,
- u8 *addr, u8 val, u8 idx)
-{
- mutex_lock(&priv->io_mutex);
-
- priv->io_dmabuf->bits8 = val;
- usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
- RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
-
- mutex_unlock(&priv->io_mutex);
-}
+void rtl818x_iowrite8_idx(struct rtl8187_priv *priv,
+ u8 *addr, u8 val, u8 idx);
static inline void rtl818x_iowrite8(struct rtl8187_priv *priv, u8 *addr, u8 val)
{
rtl818x_iowrite8_idx(priv, addr, val, 0);
}
-static inline void rtl818x_iowrite16_idx(struct rtl8187_priv *priv,
- __le16 *addr, u16 val, u8 idx)
-{
- mutex_lock(&priv->io_mutex);
-
- priv->io_dmabuf->bits16 = cpu_to_le16(val);
- usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
- RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
-
- mutex_unlock(&priv->io_mutex);
-}
+void rtl818x_iowrite16_idx(struct rtl8187_priv *priv,
+ __le16 *addr, u16 val, u8 idx);
static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr,
u16 val)
@@ -265,19 +201,8 @@ static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr,
rtl818x_iowrite16_idx(priv, addr, val, 0);
}
-static inline void rtl818x_iowrite32_idx(struct rtl8187_priv *priv,
- __le32 *addr, u32 val, u8 idx)
-{
- mutex_lock(&priv->io_mutex);
-
- priv->io_dmabuf->bits32 = cpu_to_le32(val);
- usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
- RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
-
- mutex_unlock(&priv->io_mutex);
-}
+void rtl818x_iowrite32_idx(struct rtl8187_priv *priv,
+ __le32 *addr, u32 val, u8 idx);
static inline void rtl818x_iowrite32(struct rtl8187_priv *priv, __le32 *addr,
u32 val)
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
index 5ecf18ed6..e6668ffb7 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
@@ -22,6 +22,99 @@
#include "rtl8187.h"
#include "rtl8225.h"
+u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv,
+ u8 *addr, u8 idx)
+{
+ u8 val;
+
+ mutex_lock(&priv->io_mutex);
+ usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+ RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
+
+ val = priv->io_dmabuf->bits8;
+ mutex_unlock(&priv->io_mutex);
+
+ return val;
+}
+
+u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv,
+ __le16 *addr, u8 idx)
+{
+ __le16 val;
+
+ mutex_lock(&priv->io_mutex);
+ usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+ RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
+
+ val = priv->io_dmabuf->bits16;
+ mutex_unlock(&priv->io_mutex);
+
+ return le16_to_cpu(val);
+}
+
+u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv,
+ __le32 *addr, u8 idx)
+{
+ __le32 val;
+
+ mutex_lock(&priv->io_mutex);
+ usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+ RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
+
+ val = priv->io_dmabuf->bits32;
+ mutex_unlock(&priv->io_mutex);
+
+ return le32_to_cpu(val);
+}
+
+void rtl818x_iowrite8_idx(struct rtl8187_priv *priv,
+ u8 *addr, u8 val, u8 idx)
+{
+ mutex_lock(&priv->io_mutex);
+
+ priv->io_dmabuf->bits8 = val;
+ usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
+
+ mutex_unlock(&priv->io_mutex);
+}
+
+void rtl818x_iowrite16_idx(struct rtl8187_priv *priv,
+ __le16 *addr, u16 val, u8 idx)
+{
+ mutex_lock(&priv->io_mutex);
+
+ priv->io_dmabuf->bits16 = cpu_to_le16(val);
+ usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
+
+ mutex_unlock(&priv->io_mutex);
+}
+
+void rtl818x_iowrite32_idx(struct rtl8187_priv *priv,
+ __le32 *addr, u32 val, u8 idx)
+{
+ mutex_lock(&priv->io_mutex);
+
+ priv->io_dmabuf->bits32 = cpu_to_le32(val);
+ usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
+
+ mutex_unlock(&priv->io_mutex);
+}
+
static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
{
struct rtl8187_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
index 5dea3bb93..1cf951eb0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Makefile
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -1 +1,4 @@
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
+
+rtl8xxxu-y := rtl8xxxu_core.o rtl8xxxu_8192e.o rtl8xxxu_8723b.o \
+ rtl8xxxu_8723a.o rtl8xxxu_8192c.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 7b73654e1..870c9cd5c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -42,12 +42,18 @@
#define REALTEK_USB_CMD_IDX 0x00
#define TX_TOTAL_PAGE_NUM 0xf8
+#define TX_TOTAL_PAGE_NUM_8192E 0xf3
/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
#define TX_PAGE_NUM_PUBQ 0xe7
#define TX_PAGE_NUM_HI_PQ 0x0c
#define TX_PAGE_NUM_LO_PQ 0x02
#define TX_PAGE_NUM_NORM_PQ 0x02
+#define TX_PAGE_NUM_PUBQ_8192E 0xe7
+#define TX_PAGE_NUM_HI_PQ_8192E 0x08
+#define TX_PAGE_NUM_LO_PQ_8192E 0x0c
+#define TX_PAGE_NUM_NORM_PQ_8192E 0x00
+
#define RTL_FW_PAGE_SIZE 4096
#define RTL8XXXU_FIRMWARE_POLL_MAX 1000
@@ -65,13 +71,37 @@
#define EFUSE_BT_MAP_LEN_8723A 1024
#define EFUSE_MAX_WORD_UNIT 4
+enum rtl8xxxu_rtl_chip {
+ RTL8192S = 0x81920,
+ RTL8191S = 0x81910,
+ RTL8192C = 0x8192c,
+ RTL8191C = 0x8191c,
+ RTL8188C = 0x8188c,
+ RTL8188R = 0x81889,
+ RTL8192D = 0x8192d,
+ RTL8723A = 0x8723a,
+ RTL8188E = 0x8188e,
+ RTL8812 = 0x88120,
+ RTL8821 = 0x88210,
+ RTL8192E = 0x8192e,
+ RTL8191E = 0x8191e,
+ RTL8723B = 0x8723b,
+ RTL8814A = 0x8814a,
+ RTL8881A = 0x8881a,
+ RTL8821B = 0x8821b,
+ RTL8822B = 0x8822b,
+ RTL8703B = 0x8703b,
+ RTL8195A = 0x8195a,
+ RTL8188F = 0x8188f
+};
+
enum rtl8xxxu_rx_type {
RX_TYPE_DATA_PKT = 0,
RX_TYPE_C2H = 1,
RX_TYPE_ERROR = -1
};
-struct rtl8xxxu_rx_desc {
+struct rtl8xxxu_rxdesc16 {
#ifdef __LITTLE_ENDIAN
u32 pktlen:14;
u32 crc32:1;
@@ -207,7 +237,7 @@ struct rtl8xxxu_rx_desc {
#endif
};
-struct rtl8723bu_rx_desc {
+struct rtl8xxxu_rxdesc24 {
#ifdef __LITTLE_ENDIAN
u32 pktlen:14;
u32 crc32:1;
@@ -332,7 +362,7 @@ struct rtl8723bu_rx_desc {
__le32 tsfl;
};
-struct rtl8723au_tx_desc {
+struct rtl8xxxu_txdesc32 {
__le16 pkt_size;
u8 pkt_offset;
u8 txdw0;
@@ -346,7 +376,7 @@ struct rtl8723au_tx_desc {
__le16 txdw7;
};
-struct rtl8723bu_tx_desc {
+struct rtl8xxxu_txdesc40 {
__le16 pkt_size;
u8 pkt_offset;
u8 txdw0;
@@ -422,10 +452,10 @@ struct rtl8723bu_tx_desc {
* aggregation enable and break respectively. For 8723bu, bits 0-7 are macid.
*/
#define TXDESC_PKT_OFFSET_SZ 0
-#define TXDESC_AGG_ENABLE_8723A BIT(5)
-#define TXDESC_AGG_BREAK_8723A BIT(6)
-#define TXDESC_MACID_SHIFT_8723B 0
-#define TXDESC_MACID_MASK_8723B 0x00f0
+#define TXDESC32_AGG_ENABLE BIT(5)
+#define TXDESC32_AGG_BREAK BIT(6)
+#define TXDESC40_MACID_SHIFT 0
+#define TXDESC40_MACID_MASK 0x00f0
#define TXDESC_QUEUE_SHIFT 8
#define TXDESC_QUEUE_MASK 0x1f00
#define TXDESC_QUEUE_BK 0x2
@@ -437,9 +467,9 @@ struct rtl8723bu_tx_desc {
#define TXDESC_QUEUE_MGNT 0x12
#define TXDESC_QUEUE_CMD 0x13
#define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1)
-#define TXDESC_RDG_NAV_EXT_8723B BIT(13)
-#define TXDESC_LSIG_TXOP_ENABLE_8723B BIT(14)
-#define TXDESC_PIFS_8723B BIT(15)
+#define TXDESC40_RDG_NAV_EXT BIT(13)
+#define TXDESC40_LSIG_TXOP_ENABLE BIT(14)
+#define TXDESC40_PIFS BIT(15)
#define DESC_RATE_ID_SHIFT 16
#define DESC_RATE_ID_MASK 0xf
@@ -451,71 +481,71 @@ struct rtl8723bu_tx_desc {
#define TXDESC_HWPC BIT(31)
/* Word 2 */
-#define TXDESC_PAID_SHIFT_8723B 0
-#define TXDESC_PAID_MASK_8723B 0x1ff
-#define TXDESC_CCA_RTS_SHIFT_8723B 10
-#define TXDESC_CCA_RTS_MASK_8723B 0xc00
-#define TXDESC_AGG_ENABLE_8723B BIT(12)
-#define TXDESC_RDG_ENABLE_8723B BIT(13)
-#define TXDESC_AGG_BREAK_8723B BIT(16)
-#define TXDESC_MORE_FRAG_8723B BIT(17)
-#define TXDESC_RAW_8723B BIT(18)
-#define TXDESC_ACK_REPORT_8723A BIT(19)
-#define TXDESC_SPE_RPT_8723B BIT(19)
+#define TXDESC40_PAID_SHIFT 0
+#define TXDESC40_PAID_MASK 0x1ff
+#define TXDESC40_CCA_RTS_SHIFT 10
+#define TXDESC40_CCA_RTS_MASK 0xc00
+#define TXDESC40_AGG_ENABLE BIT(12)
+#define TXDESC40_RDG_ENABLE BIT(13)
+#define TXDESC40_AGG_BREAK BIT(16)
+#define TXDESC40_MORE_FRAG BIT(17)
+#define TXDESC40_RAW BIT(18)
+#define TXDESC32_ACK_REPORT BIT(19)
+#define TXDESC40_SPE_RPT BIT(19)
#define TXDESC_AMPDU_DENSITY_SHIFT 20
-#define TXDESC_BT_INT_8723B BIT(23)
-#define TXDESC_GID_8723B BIT(24)
+#define TXDESC40_BT_INT BIT(23)
+#define TXDESC40_GID_SHIFT 24
/* Word 3 */
-#define TXDESC_USE_DRIVER_RATE_8723B BIT(8)
-#define TXDESC_CTS_SELF_ENABLE_8723B BIT(11)
-#define TXDESC_RTS_CTS_ENABLE_8723B BIT(12)
-#define TXDESC_HW_RTS_ENABLE_8723B BIT(13)
-#define TXDESC_SEQ_SHIFT_8723A 16
-#define TXDESC_SEQ_MASK_8723A 0x0fff0000
+#define TXDESC40_USE_DRIVER_RATE BIT(8)
+#define TXDESC40_CTS_SELF_ENABLE BIT(11)
+#define TXDESC40_RTS_CTS_ENABLE BIT(12)
+#define TXDESC40_HW_RTS_ENABLE BIT(13)
+#define TXDESC32_SEQ_SHIFT 16
+#define TXDESC32_SEQ_MASK 0x0fff0000
/* Word 4 */
-#define TXDESC_RTS_RATE_SHIFT_8723A 0
-#define TXDESC_RTS_RATE_MASK_8723A 0x3f
-#define TXDESC_QOS_8723A BIT(6)
-#define TXDESC_HW_SEQ_ENABLE_8723A BIT(7)
-#define TXDESC_USE_DRIVER_RATE_8723A BIT(8)
+#define TXDESC32_RTS_RATE_SHIFT 0
+#define TXDESC32_RTS_RATE_MASK 0x3f
+#define TXDESC32_QOS BIT(6)
+#define TXDESC32_HW_SEQ_ENABLE BIT(7)
+#define TXDESC32_USE_DRIVER_RATE BIT(8)
#define TXDESC_DISABLE_DATA_FB BIT(10)
-#define TXDESC_CTS_SELF_ENABLE_8723A BIT(11)
-#define TXDESC_RTS_CTS_ENABLE_8723A BIT(12)
-#define TXDESC_HW_RTS_ENABLE_8723A BIT(13)
+#define TXDESC32_CTS_SELF_ENABLE BIT(11)
+#define TXDESC32_RTS_CTS_ENABLE BIT(12)
+#define TXDESC32_HW_RTS_ENABLE BIT(13)
#define TXDESC_PRIME_CH_OFF_LOWER BIT(20)
#define TXDESC_PRIME_CH_OFF_UPPER BIT(21)
-#define TXDESC_SHORT_PREAMBLE_8723A BIT(24)
+#define TXDESC32_SHORT_PREAMBLE BIT(24)
#define TXDESC_DATA_BW BIT(25)
#define TXDESC_RTS_DATA_BW BIT(27)
#define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28)
#define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29)
-#define TXDESC_DATA_RATE_FB_SHIFT_8723B 8
-#define TXDESC_DATA_RATE_FB_MASK_8723B 0x00001f00
-#define TXDESC_RETRY_LIMIT_ENABLE_8723B BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT_8723B 18
-#define TXDESC_RETRY_LIMIT_MASK_8723B 0x00fc0000
-#define TXDESC_RTS_RATE_SHIFT_8723B 24
-#define TXDESC_RTS_RATE_MASK_8723B 0x3f000000
+#define TXDESC40_DATA_RATE_FB_SHIFT 8
+#define TXDESC40_DATA_RATE_FB_MASK 0x00001f00
+#define TXDESC40_RETRY_LIMIT_ENABLE BIT(17)
+#define TXDESC40_RETRY_LIMIT_SHIFT 18
+#define TXDESC40_RETRY_LIMIT_MASK 0x00fc0000
+#define TXDESC40_RTS_RATE_SHIFT 24
+#define TXDESC40_RTS_RATE_MASK 0x3f000000
/* Word 5 */
-#define TXDESC_SHORT_PREAMBLE_8723B BIT(4)
-#define TXDESC_SHORT_GI BIT(6)
+#define TXDESC40_SHORT_PREAMBLE BIT(4)
+#define TXDESC32_SHORT_GI BIT(6)
#define TXDESC_CCX_TAG BIT(7)
-#define TXDESC_RETRY_LIMIT_ENABLE_8723A BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT_8723A 18
-#define TXDESC_RETRY_LIMIT_MASK_8723A 0x00fc0000
+#define TXDESC32_RETRY_LIMIT_ENABLE BIT(17)
+#define TXDESC32_RETRY_LIMIT_SHIFT 18
+#define TXDESC32_RETRY_LIMIT_MASK 0x00fc0000
/* Word 6 */
#define TXDESC_MAX_AGG_SHIFT 11
/* Word 8 */
-#define TXDESC_HW_SEQ_ENABLE_8723B BIT(15)
+#define TXDESC40_HW_SEQ_ENABLE BIT(15)
/* Word 9 */
-#define TXDESC_SEQ_SHIFT_8723B 12
-#define TXDESC_SEQ_MASK_8723B 0x00fff000
+#define TXDESC40_SEQ_SHIFT 12
+#define TXDESC40_SEQ_MASK 0x00fff000
struct phy_rx_agc_info {
#ifdef __LITTLE_ENDIAN
@@ -600,6 +630,31 @@ struct rtl8xxxu_firmware_header {
};
/*
+ * 8723au/8192cu/8188ru required base power index offset tables.
+ */
+struct rtl8xxxu_power_base {
+ u32 reg_0e00;
+ u32 reg_0e04;
+ u32 reg_0e08;
+ u32 reg_086c;
+
+ u32 reg_0e10;
+ u32 reg_0e14;
+ u32 reg_0e18;
+ u32 reg_0e1c;
+
+ u32 reg_0830;
+ u32 reg_0834;
+ u32 reg_0838;
+ u32 reg_086c_2;
+
+ u32 reg_083c;
+ u32 reg_0848;
+ u32 reg_084c;
+ u32 reg_0868;
+};
+
+/*
* The 8723au has 3 channel groups: 1-3, 4-9, and 10-14
*/
struct rtl8723au_idx {
@@ -763,55 +818,49 @@ struct rtl8192eu_efuse_tx_power {
u8 cck_base[6];
u8 ht40_base[5];
struct rtl8723au_idx ht20_ofdm_1s_diff;
- struct rtl8723au_idx ht40_ht20_2s_diff;
- struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */
- struct rtl8723au_idx ht40_ht20_3s_diff;
- struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */
- struct rtl8723au_idx ht40_ht20_4s_diff;
- struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */
+ struct rtl8723bu_pwr_idx pwr_diff[3];
+ u8 dummy5g[24]; /* max channel group (14) + power diff offset (10) */
};
struct rtl8192eu_efuse {
__le16 rtl_id;
u8 res0[0x0e];
struct rtl8192eu_efuse_tx_power tx_power_index_A; /* 0x10 */
- struct rtl8192eu_efuse_tx_power tx_power_index_B; /* 0x22 */
- struct rtl8192eu_efuse_tx_power tx_power_index_C; /* 0x34 */
- struct rtl8192eu_efuse_tx_power tx_power_index_D; /* 0x46 */
- u8 res1[0x60];
+ struct rtl8192eu_efuse_tx_power tx_power_index_B; /* 0x3a */
+ u8 res2[0x54];
u8 channel_plan; /* 0xb8 */
u8 xtal_k;
u8 thermal_meter;
u8 iqk_lck;
u8 pa_type; /* 0xbc */
u8 lna_type_2g; /* 0xbd */
- u8 res2[1];
+ u8 res3[1];
u8 lna_type_5g; /* 0xbf */
- u8 res13[1];
+ u8 res4[1];
u8 rf_board_option;
u8 rf_feature_option;
u8 rf_bt_setting;
u8 eeprom_version;
u8 eeprom_customer_id;
- u8 res3[3];
+ u8 res5[3];
u8 rf_antenna_option; /* 0xc9 */
- u8 res4[6];
+ u8 res6[6];
u8 vid; /* 0xd0 */
- u8 res5[1];
+ u8 res7[1];
u8 pid; /* 0xd2 */
- u8 res6[1];
+ u8 res8[1];
u8 usb_optional_function;
- u8 res7[2];
+ u8 res9[2];
u8 mac_addr[ETH_ALEN]; /* 0xd7 */
- u8 res8[2];
+ u8 res10[2];
u8 vendor_name[7];
- u8 res9[2];
+ u8 res11[2];
u8 device_name[0x0b]; /* 0xe8 */
- u8 res10[2];
+ u8 res12[2];
u8 serial[0x0b]; /* 0xf5 */
- u8 res11[0x30];
+ u8 res13[0x30];
u8 unknown[0x0d]; /* 0x130 */
- u8 res12[0xc3];
+ u8 res14[0xc3];
};
struct rtl8xxxu_reg8val {
@@ -1177,6 +1226,7 @@ struct rtl8xxxu_priv {
struct rtl8723au_idx ofdm_tx_power_diff[RTL8723B_TX_COUNT];
struct rtl8723au_idx ht20_tx_power_diff[RTL8723B_TX_COUNT];
struct rtl8723au_idx ht40_tx_power_diff[RTL8723B_TX_COUNT];
+ struct rtl8xxxu_power_base *power_base;
u32 chip_cut:4;
u32 rom_rev:4;
u32 is_multi_func:1;
@@ -1204,7 +1254,6 @@ struct rtl8xxxu_priv {
u8 rf_paths;
u8 rx_paths;
u8 tx_paths;
- u32 rf_mode_ag[2];
u32 rege94;
u32 rege9c;
u32 regeb4;
@@ -1236,8 +1285,9 @@ struct rtl8xxxu_priv {
u32 mac_backup[RTL8XXXU_MAC_REGS];
u32 bb_backup[RTL8XXXU_BB_REGS];
u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
- u32 rtlchip;
+ enum rtl8xxxu_rtl_chip rtl_chip;
u8 pi_enabled:1;
+ u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
};
@@ -1260,6 +1310,8 @@ struct rtl8xxxu_fileops {
void (*power_off) (struct rtl8xxxu_priv *priv);
void (*reset_8051) (struct rtl8xxxu_priv *priv);
int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page);
+ void (*init_phy_bb) (struct rtl8xxxu_priv *priv);
+ int (*init_phy_rf) (struct rtl8xxxu_priv *priv);
void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv);
void (*config_channel) (struct ieee80211_hw *hw);
@@ -1269,6 +1321,7 @@ struct rtl8xxxu_fileops {
void (*init_statistics) (struct rtl8xxxu_priv *priv);
void (*enable_rf) (struct rtl8xxxu_priv *priv);
void (*disable_rf) (struct rtl8xxxu_priv *priv);
+ void (*usb_quirks) (struct rtl8xxxu_priv *priv);
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
@@ -1276,12 +1329,98 @@ struct rtl8xxxu_fileops {
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
int writeN_block_size;
- u16 mbox_ext_reg;
- char mbox_ext_width;
char tx_desc_size;
+ char rx_desc_size;
char has_s0s1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
u32 adda_2t_path_on_b;
+ u16 trxff_boundary;
+ u8 pbp_rx;
+ u8 pbp_tx;
+ struct rtl8xxxu_reg8val *mactable;
+ u8 total_page_num;
+ u8 page_num_hi;
+ u8 page_num_lo;
+ u8 page_num_norm;
};
+
+extern int rtl8xxxu_debug;
+
+extern struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[];
+extern const u32 rtl8xxxu_iqk_phy_iq_bb_reg[];
+u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr);
+u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr);
+u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr);
+int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val);
+int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val);
+int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val);
+u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg);
+int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg, u32 data);
+void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count);
+void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count);
+void rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv,
+ const u32 *reg, u32 *backup);
+void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
+ const u32 *reg, u32 *backup);
+void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
+ bool path_a_on);
+void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
+ const u32 *regs, u32 *backup);
+void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv, bool iqk_ok,
+ int result[][8], int candidate, bool tx_only);
+void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv, bool iqk_ok,
+ int result[][8], int candidate, bool tx_only);
+int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rfregval *table,
+ enum rtl8xxxu_rfpath path);
+int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_reg32val *array);
+int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name);
+void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start);
+int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_gen2_h2c_cmd(struct rtl8xxxu_priv *priv,
+ struct h2c_cmd *h2c, int len);
+int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv,
+ int channel, bool ht40);
+void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw);
+void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
+void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi);
+void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi);
+void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect);
+void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect);
+void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status);
+int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status);
+int rtl8xxxu_gen2_channel_to_group(int channel);
+bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2);
+
+extern struct rtl8xxxu_fileops rtl8192cu_fops;
+extern struct rtl8xxxu_fileops rtl8192eu_fops;
+extern struct rtl8xxxu_fileops rtl8723au_fops;
+extern struct rtl8xxxu_fileops rtl8723bu_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
new file mode 100644
index 000000000..08066dec1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -0,0 +1,586 @@
+/*
+ * RTL8XXXU mac80211 USB driver - 8188c/8188r/8192c specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+static struct rtl8xxxu_power_base rtl8192c_power_base = {
+ .reg_0e00 = 0x07090c0c,
+ .reg_0e04 = 0x01020405,
+ .reg_0e08 = 0x00000000,
+ .reg_086c = 0x00000000,
+
+ .reg_0e10 = 0x0b0c0c0e,
+ .reg_0e14 = 0x01030506,
+ .reg_0e18 = 0x0b0c0d0e,
+ .reg_0e1c = 0x01030509,
+
+ .reg_0830 = 0x07090c0c,
+ .reg_0834 = 0x01020405,
+ .reg_0838 = 0x00000000,
+ .reg_086c_2 = 0x00000000,
+
+ .reg_083c = 0x0b0c0d0e,
+ .reg_0848 = 0x01030509,
+ .reg_084c = 0x0b0c0d0e,
+ .reg_0868 = 0x01030509,
+};
+
+static struct rtl8xxxu_power_base rtl8188r_power_base = {
+ .reg_0e00 = 0x06080808,
+ .reg_0e04 = 0x00040406,
+ .reg_0e08 = 0x00000000,
+ .reg_086c = 0x00000000,
+
+ .reg_0e10 = 0x04060608,
+ .reg_0e14 = 0x00020204,
+ .reg_0e18 = 0x04060608,
+ .reg_0e1c = 0x00020204,
+
+ .reg_0830 = 0x06080808,
+ .reg_0834 = 0x00040406,
+ .reg_0838 = 0x00000000,
+ .reg_086c_2 = 0x00000000,
+
+ .reg_083c = 0x04060608,
+ .reg_0848 = 0x00020204,
+ .reg_084c = 0x04060608,
+ .reg_0868 = 0x00020204,
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00010255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f424},
+ {0x15, 0x0004f424}, {0x15, 0x0008f424},
+ {0x15, 0x000cf424}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287af}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x00014297},
+ {0x13, 0x00010295}, {0x13, 0x0000c298},
+ {0x13, 0x0000819c}, {0x13, 0x000040a8},
+ {0x13, 0x0000001c}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f424},
+ {0x15, 0x0004f424}, {0x15, 0x0008f424},
+ {0x15, 0x000cf424}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00010255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f405},
+ {0x15, 0x0004f405}, {0x15, 0x0008f405},
+ {0x15, 0x000cf405}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb0}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e529},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00000255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x0000083c},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000977c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x000d8000}, {0x12, 0x00090000},
+ {0x12, 0x00051000}, {0x12, 0x00012000},
+ {0x13, 0x00028fb4}, {0x13, 0x00024fa8},
+ {0x13, 0x000207a4}, {0x13, 0x0001c3b0},
+ {0x13, 0x000183a4}, {0x13, 0x00014398},
+ {0x13, 0x000101a4}, {0x13, 0x0000c198},
+ {0x13, 0x000080a4}, {0x13, 0x00004098},
+ {0x13, 0x00000000}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f405},
+ {0x15, 0x0004f405}, {0x15, 0x0008f405},
+ {0x15, 0x000cf405}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ if (!priv->vendor_umc)
+ fw_name = "/*(DEBLOBBED)*/";
+ else if (priv->chip_cut || priv->rtl_chip == RTL8192C)
+ fw_name = "/*(DEBLOBBED)*/";
+ else
+ fw_name = "/*(DEBLOBBED)*/";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
+static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8192cu_efuse *efuse = &priv->efuse_wifi.efuse8192;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A,
+ efuse->cck_tx_power_index_A,
+ sizeof(efuse->cck_tx_power_index_A));
+ memcpy(priv->cck_tx_power_index_B,
+ efuse->cck_tx_power_index_B,
+ sizeof(efuse->cck_tx_power_index_B));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->ht40_1s_tx_power_index_A,
+ sizeof(efuse->ht40_1s_tx_power_index_A));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->ht40_1s_tx_power_index_B,
+ sizeof(efuse->ht40_1s_tx_power_index_B));
+ memcpy(priv->ht40_2s_tx_power_index_diff,
+ efuse->ht40_2s_tx_power_index_diff,
+ sizeof(efuse->ht40_2s_tx_power_index_diff));
+
+ memcpy(priv->ht20_tx_power_index_diff,
+ efuse->ht20_tx_power_index_diff,
+ sizeof(efuse->ht20_tx_power_index_diff));
+ memcpy(priv->ofdm_tx_power_index_diff,
+ efuse->ofdm_tx_power_index_diff,
+ sizeof(efuse->ofdm_tx_power_index_diff));
+
+ memcpy(priv->ht40_max_power_offset,
+ efuse->ht40_max_power_offset,
+ sizeof(efuse->ht40_max_power_offset));
+ memcpy(priv->ht20_max_power_offset,
+ efuse->ht20_max_power_offset,
+ sizeof(efuse->ht20_max_power_offset));
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+ efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.20s\n",
+ efuse->device_name);
+
+ priv->power_base = &rtl8192c_power_base;
+
+ if (efuse->rf_regulatory & 0x20) {
+ sprintf(priv->chip_name, "8188RU");
+ priv->rtl_chip = RTL8188R;
+ priv->hi_pa = 1;
+ priv->no_pape = 1;
+ priv->power_base = &rtl8188r_power_base;
+ }
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8192cu_efuse));
+ for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+ return 0;
+}
+
+static int rtl8192cu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8xxxu_rfregval *rftable;
+ int ret;
+
+ if (priv->rtl_chip == RTL8188R) {
+ rftable = rtl8188ru_radioa_1t_highpa_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ } else if (priv->rf_paths == 1) {
+ rftable = rtl8192cu_radioa_1t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ } else {
+ rftable = rtl8192cu_radioa_2t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ if (ret)
+ goto exit;
+ rftable = rtl8192cu_radiob_2t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int i;
+
+ for (i = 100; i; i--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+ if (val8 & APS_FSMCO_PFM_ALDN)
+ break;
+ }
+
+ if (!i) {
+ pr_info("%s: Poll failed\n", __func__);
+ return -ENODEV;
+ }
+
+ /*
+ * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+ */
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+ rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b);
+ udelay(100);
+
+ val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL);
+ if (!(val8 & LDOV12D_ENABLE)) {
+ pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8);
+ val8 |= LDOV12D_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8);
+
+ udelay(100);
+
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_MD2PP;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+ }
+
+ /*
+ * Auto enable WLAN
+ */
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ for (i = 1000; i; i--) {
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ if (!(val16 & APS_FSMCO_MAC_ENABLE))
+ break;
+ }
+ if (!i) {
+ pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Enable radio, GPIO, LED
+ */
+ val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN |
+ APS_FSMCO_PFM_ALDN;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ /*
+ * Release RF digital isolation
+ */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+ val16 &= ~SYS_ISO_DIOR;
+ rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+ val8 &= ~APSD_CTRL_OFF;
+ rtl8xxxu_write8(priv, REG_APSD_CTRL, val8);
+ for (i = 200; i; i--) {
+ val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+ if (!(val8 & APSD_CTRL_OFF_STATUS))
+ break;
+ }
+
+ if (!i) {
+ pr_info("%s: APSD_CTRL poll failed\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE |
+ CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ rtl8xxxu_write8(priv, 0xfe10, 0x19);
+
+ /*
+ * Workaround for 8188RU LNA power leakage problem.
+ */
+ if (priv->rtl_chip == RTL8188R) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+ val32 &= ~BIT(1);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+ }
+ return 0;
+}
+
+struct rtl8xxxu_fileops rtl8192cu_fops = {
+ .parse_efuse = rtl8192cu_parse_efuse,
+ .load_firmware = rtl8192cu_load_firmware,
+ .power_on = rtl8192cu_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_init_llt_table,
+ .init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
+ .init_phy_rf = rtl8192cu_init_phy_rf,
+ .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
+ .config_channel = rtl8xxxu_gen1_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+ .enable_rf = rtl8xxxu_gen1_enable_rf,
+ .disable_rf = rtl8xxxu_gen1_disable_rf,
+ .usb_quirks = rtl8xxxu_gen1_usb_quirks,
+ .set_tx_power = rtl8xxxu_gen1_set_tx_power,
+ .update_rate_mask = rtl8xxxu_update_rate_mask,
+ .report_connect = rtl8xxxu_gen1_report_connect,
+ .writeN_block_size = 128,
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .adda_1t_init = 0x0b1b25a0,
+ .adda_1t_path_on = 0x0bdb25a0,
+ .adda_2t_path_on_a = 0x04db25a4,
+ .adda_2t_path_on_b = 0x0b1b25a4,
+ .trxff_boundary = 0x27ff,
+ .pbp_rx = PBP_PAGE_SIZE_128,
+ .pbp_tx = PBP_PAGE_SIZE_128,
+ .mactable = rtl8xxxu_gen1_mac_init_table,
+};
+#endif
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
new file mode 100644
index 000000000..9461ecd31
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -0,0 +1,1525 @@
+/*
+ * RTL8XXXU mac80211 USB driver - 8192e specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
+ {0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7},
+ {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
+ {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
+ {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
+ {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
+ {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
+ {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff},
+ {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, {0x502, 0x2f},
+ {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, {0x506, 0x5e},
+ {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, {0x50a, 0x5e},
+ {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, {0x50e, 0x00},
+ {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, {0x516, 0x0a},
+ {0x525, 0x4f}, {0x540, 0x12}, {0x541, 0x64}, {0x550, 0x10},
+ {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, {0x55d, 0xff},
+ {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, {0x620, 0xff},
+ {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, {0x624, 0xff},
+ {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, {0x638, 0x50},
+ {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, {0x63f, 0x0e},
+ {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xc8},
+ {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65},
+ {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65},
+ {0x70b, 0x87},
+ {0xffff, 0xff},
+};
+
+static struct rtl8xxxu_reg32val rtl8192eu_phy_init_table[] = {
+ {0x800, 0x80040000}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10001331}, {0x814, 0x020c3d10},
+ {0x818, 0x02220385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00390204},
+ {0x828, 0x01000100}, {0x82c, 0x00390204},
+ {0x830, 0x32323232}, {0x834, 0x30303030},
+ {0x838, 0x30303030}, {0x83c, 0x30303030},
+ {0x840, 0x00010000}, {0x844, 0x00010000},
+ {0x848, 0x28282828}, {0x84c, 0x28282828},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x009a009a}, {0x85c, 0x01000014},
+ {0x860, 0x66f60000}, {0x864, 0x061f0000},
+ {0x868, 0x30303030}, {0x86c, 0x30303030},
+ {0x870, 0x00000000}, {0x874, 0x55004200},
+ {0x878, 0x08080808}, {0x87c, 0x00000000},
+ {0x880, 0xb0000c1c}, {0x884, 0x00000001},
+ {0x888, 0x00000000}, {0x88c, 0xcc0000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x900, 0x00000000},
+ {0x904, 0x00000023}, {0x908, 0x00000000},
+ {0x90c, 0x81121313}, {0x910, 0x806c0001},
+ {0x914, 0x00000001}, {0x918, 0x00000000},
+ {0x91c, 0x00010000}, {0x924, 0x00000001},
+ {0x928, 0x00000000}, {0x92c, 0x00000000},
+ {0x930, 0x00000000}, {0x934, 0x00000000},
+ {0x938, 0x00000000}, {0x93c, 0x00000000},
+ {0x940, 0x00000000}, {0x944, 0x00000000},
+ {0x94c, 0x00000008}, {0xa00, 0x00d0c7c8},
+ {0xa04, 0x81ff000c}, {0xa08, 0x8c838300},
+ {0xa0c, 0x2e68120f}, {0xa10, 0x95009b78},
+ {0xa14, 0x1114d028}, {0xa18, 0x00881117},
+ {0xa1c, 0x89140f00}, {0xa20, 0x1a1b0000},
+ {0xa24, 0x090e1317}, {0xa28, 0x00000204},
+ {0xa2c, 0x00d30000}, {0xa70, 0x101fff00},
+ {0xa74, 0x00000007}, {0xa78, 0x00000900},
+ {0xa7c, 0x225b0606}, {0xa80, 0x218075b1},
+ {0xb38, 0x00000000}, {0xc00, 0x48071d40},
+ {0xc04, 0x03a05633}, {0xc08, 0x000000e4},
+ {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000},
+ {0xc14, 0x40000100}, {0xc18, 0x08800000},
+ {0xc1c, 0x40000100}, {0xc20, 0x00000000},
+ {0xc24, 0x00000000}, {0xc28, 0x00000000},
+ {0xc2c, 0x00000000}, {0xc30, 0x69e9ac47},
+ {0xc34, 0x469652af}, {0xc38, 0x49795994},
+ {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f},
+ {0xc44, 0x000100b7}, {0xc48, 0xec020107},
+ {0xc4c, 0x007f037f},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0xc50, 0x00340220},
+#else
+ {0xc50, 0x00340020},
+#endif
+ {0xc54, 0x0080801f},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0xc58, 0x00000220},
+#else
+ {0xc58, 0x00000020},
+#endif
+ {0xc5c, 0x00248492}, {0xc60, 0x00000000},
+ {0xc64, 0x7112848b}, {0xc68, 0x47c00bff},
+ {0xc6c, 0x00000036}, {0xc70, 0x00000600},
+ {0xc74, 0x02013169}, {0xc78, 0x0000001f},
+ {0xc7c, 0x00b91612},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0xc80, 0x2d4000b5},
+#else
+ {0xc80, 0x40000100},
+#endif
+ {0xc84, 0x21f60000},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0xc88, 0x2d4000b5},
+#else
+ {0xc88, 0x40000100},
+#endif
+ {0xc8c, 0xa0e40000}, {0xc90, 0x00121820},
+ {0xc94, 0x00000000}, {0xc98, 0x00121820},
+ {0xc9c, 0x00007f7f}, {0xca0, 0x00000000},
+ {0xca4, 0x000300a0}, {0xca8, 0x00000000},
+ {0xcac, 0x00000000}, {0xcb0, 0x00000000},
+ {0xcb4, 0x00000000}, {0xcb8, 0x00000000},
+ {0xcbc, 0x28000000}, {0xcc0, 0x00000000},
+ {0xcc4, 0x00000000}, {0xcc8, 0x00000000},
+ {0xccc, 0x00000000}, {0xcd0, 0x00000000},
+ {0xcd4, 0x00000000}, {0xcd8, 0x64b22427},
+ {0xcdc, 0x00766932}, {0xce0, 0x00222222},
+ {0xce4, 0x00040000}, {0xce8, 0x77644302},
+ {0xcec, 0x2f97d40c}, {0xd00, 0x00080740},
+ {0xd04, 0x00020403}, {0xd08, 0x0000907f},
+ {0xd0c, 0x20010201}, {0xd10, 0xa0633333},
+ {0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b},
+ {0xd1c, 0x0000007f}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00127353},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000282}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xd80, 0x01081008},
+ {0xd84, 0x00000800}, {0xd88, 0xf0b50000},
+ {0xe00, 0x30303030}, {0xe04, 0x30303030},
+ {0xe08, 0x03903030}, {0xe10, 0x30303030},
+ {0xe14, 0x30303030}, {0xe18, 0x30303030},
+ {0xe1c, 0x30303030}, {0xe28, 0x00000000},
+ {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f},
+ {0xe38, 0x02140102}, {0xe3c, 0x681604c2},
+ {0xe40, 0x01007c00}, {0xe44, 0x01004800},
+ {0xe48, 0xfb000000}, {0xe4c, 0x000028d1},
+ {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f},
+ {0xe58, 0x02140102}, {0xe5c, 0x28160d05},
+ {0xe60, 0x00000008}, {0xe68, 0x0fc05656},
+ {0xe6c, 0x03c09696}, {0xe70, 0x03c09696},
+ {0xe74, 0x0c005656}, {0xe78, 0x0c005656},
+ {0xe7c, 0x0c005656}, {0xe80, 0x0c005656},
+ {0xe84, 0x03c09696}, {0xe88, 0x0c005656},
+ {0xe8c, 0x03c09696}, {0xed0, 0x03c09696},
+ {0xed4, 0x03c09696}, {0xed8, 0x03c09696},
+ {0xedc, 0x0000d6d6}, {0xee0, 0x0000d6d6},
+ {0xeec, 0x0fc01616}, {0xee4, 0xb0000c1c},
+ {0xee8, 0x00000001}, {0xf14, 0x00000003},
+ {0xf4c, 0x00000000}, {0xf00, 0x00000300},
+ {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_std_table[] = {
+ {0xc78, 0xfb000001}, {0xc78, 0xfb010001},
+ {0xc78, 0xfb020001}, {0xc78, 0xfb030001},
+ {0xc78, 0xfb040001}, {0xc78, 0xfb050001},
+ {0xc78, 0xfa060001}, {0xc78, 0xf9070001},
+ {0xc78, 0xf8080001}, {0xc78, 0xf7090001},
+ {0xc78, 0xf60a0001}, {0xc78, 0xf50b0001},
+ {0xc78, 0xf40c0001}, {0xc78, 0xf30d0001},
+ {0xc78, 0xf20e0001}, {0xc78, 0xf10f0001},
+ {0xc78, 0xf0100001}, {0xc78, 0xef110001},
+ {0xc78, 0xee120001}, {0xc78, 0xed130001},
+ {0xc78, 0xec140001}, {0xc78, 0xeb150001},
+ {0xc78, 0xea160001}, {0xc78, 0xe9170001},
+ {0xc78, 0xe8180001}, {0xc78, 0xe7190001},
+ {0xc78, 0xc81a0001}, {0xc78, 0xc71b0001},
+ {0xc78, 0xc61c0001}, {0xc78, 0x071d0001},
+ {0xc78, 0x061e0001}, {0xc78, 0x051f0001},
+ {0xc78, 0x04200001}, {0xc78, 0x03210001},
+ {0xc78, 0xaa220001}, {0xc78, 0xa9230001},
+ {0xc78, 0xa8240001}, {0xc78, 0xa7250001},
+ {0xc78, 0xa6260001}, {0xc78, 0x85270001},
+ {0xc78, 0x84280001}, {0xc78, 0x83290001},
+ {0xc78, 0x252a0001}, {0xc78, 0x242b0001},
+ {0xc78, 0x232c0001}, {0xc78, 0x222d0001},
+ {0xc78, 0x672e0001}, {0xc78, 0x662f0001},
+ {0xc78, 0x65300001}, {0xc78, 0x64310001},
+ {0xc78, 0x63320001}, {0xc78, 0x62330001},
+ {0xc78, 0x61340001}, {0xc78, 0x45350001},
+ {0xc78, 0x44360001}, {0xc78, 0x43370001},
+ {0xc78, 0x42380001}, {0xc78, 0x41390001},
+ {0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+ {0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+ {0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+ {0xc78, 0xfb400001}, {0xc78, 0xfb410001},
+ {0xc78, 0xfb420001}, {0xc78, 0xfb430001},
+ {0xc78, 0xfb440001}, {0xc78, 0xfb450001},
+ {0xc78, 0xfa460001}, {0xc78, 0xf9470001},
+ {0xc78, 0xf8480001}, {0xc78, 0xf7490001},
+ {0xc78, 0xf64a0001}, {0xc78, 0xf54b0001},
+ {0xc78, 0xf44c0001}, {0xc78, 0xf34d0001},
+ {0xc78, 0xf24e0001}, {0xc78, 0xf14f0001},
+ {0xc78, 0xf0500001}, {0xc78, 0xef510001},
+ {0xc78, 0xee520001}, {0xc78, 0xed530001},
+ {0xc78, 0xec540001}, {0xc78, 0xeb550001},
+ {0xc78, 0xea560001}, {0xc78, 0xe9570001},
+ {0xc78, 0xe8580001}, {0xc78, 0xe7590001},
+ {0xc78, 0xe65a0001}, {0xc78, 0xe55b0001},
+ {0xc78, 0xe45c0001}, {0xc78, 0xe35d0001},
+ {0xc78, 0xe25e0001}, {0xc78, 0xe15f0001},
+ {0xc78, 0x8a600001}, {0xc78, 0x89610001},
+ {0xc78, 0x88620001}, {0xc78, 0x87630001},
+ {0xc78, 0x86640001}, {0xc78, 0x85650001},
+ {0xc78, 0x84660001}, {0xc78, 0x83670001},
+ {0xc78, 0x82680001}, {0xc78, 0x6b690001},
+ {0xc78, 0x6a6a0001}, {0xc78, 0x696b0001},
+ {0xc78, 0x686c0001}, {0xc78, 0x676d0001},
+ {0xc78, 0x666e0001}, {0xc78, 0x656f0001},
+ {0xc78, 0x64700001}, {0xc78, 0x63710001},
+ {0xc78, 0x62720001}, {0xc78, 0x61730001},
+ {0xc78, 0x49740001}, {0xc78, 0x48750001},
+ {0xc78, 0x47760001}, {0xc78, 0x46770001},
+ {0xc78, 0x45780001}, {0xc78, 0x44790001},
+ {0xc78, 0x437a0001}, {0xc78, 0x427b0001},
+ {0xc78, 0x417c0001}, {0xc78, 0x407d0001},
+ {0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+ {0xc50, 0x00040022}, {0xc50, 0x00040020},
+ {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_highpa_table[] = {
+ {0xc78, 0xfa000001}, {0xc78, 0xf9010001},
+ {0xc78, 0xf8020001}, {0xc78, 0xf7030001},
+ {0xc78, 0xf6040001}, {0xc78, 0xf5050001},
+ {0xc78, 0xf4060001}, {0xc78, 0xf3070001},
+ {0xc78, 0xf2080001}, {0xc78, 0xf1090001},
+ {0xc78, 0xf00a0001}, {0xc78, 0xef0b0001},
+ {0xc78, 0xee0c0001}, {0xc78, 0xed0d0001},
+ {0xc78, 0xec0e0001}, {0xc78, 0xeb0f0001},
+ {0xc78, 0xea100001}, {0xc78, 0xe9110001},
+ {0xc78, 0xe8120001}, {0xc78, 0xe7130001},
+ {0xc78, 0xe6140001}, {0xc78, 0xe5150001},
+ {0xc78, 0xe4160001}, {0xc78, 0xe3170001},
+ {0xc78, 0xe2180001}, {0xc78, 0xe1190001},
+ {0xc78, 0x8a1a0001}, {0xc78, 0x891b0001},
+ {0xc78, 0x881c0001}, {0xc78, 0x871d0001},
+ {0xc78, 0x861e0001}, {0xc78, 0x851f0001},
+ {0xc78, 0x84200001}, {0xc78, 0x83210001},
+ {0xc78, 0x82220001}, {0xc78, 0x6a230001},
+ {0xc78, 0x69240001}, {0xc78, 0x68250001},
+ {0xc78, 0x67260001}, {0xc78, 0x66270001},
+ {0xc78, 0x65280001}, {0xc78, 0x64290001},
+ {0xc78, 0x632a0001}, {0xc78, 0x622b0001},
+ {0xc78, 0x612c0001}, {0xc78, 0x602d0001},
+ {0xc78, 0x472e0001}, {0xc78, 0x462f0001},
+ {0xc78, 0x45300001}, {0xc78, 0x44310001},
+ {0xc78, 0x43320001}, {0xc78, 0x42330001},
+ {0xc78, 0x41340001}, {0xc78, 0x40350001},
+ {0xc78, 0x40360001}, {0xc78, 0x40370001},
+ {0xc78, 0x40380001}, {0xc78, 0x40390001},
+ {0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+ {0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+ {0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+ {0xc78, 0xfa400001}, {0xc78, 0xf9410001},
+ {0xc78, 0xf8420001}, {0xc78, 0xf7430001},
+ {0xc78, 0xf6440001}, {0xc78, 0xf5450001},
+ {0xc78, 0xf4460001}, {0xc78, 0xf3470001},
+ {0xc78, 0xf2480001}, {0xc78, 0xf1490001},
+ {0xc78, 0xf04a0001}, {0xc78, 0xef4b0001},
+ {0xc78, 0xee4c0001}, {0xc78, 0xed4d0001},
+ {0xc78, 0xec4e0001}, {0xc78, 0xeb4f0001},
+ {0xc78, 0xea500001}, {0xc78, 0xe9510001},
+ {0xc78, 0xe8520001}, {0xc78, 0xe7530001},
+ {0xc78, 0xe6540001}, {0xc78, 0xe5550001},
+ {0xc78, 0xe4560001}, {0xc78, 0xe3570001},
+ {0xc78, 0xe2580001}, {0xc78, 0xe1590001},
+ {0xc78, 0x8a5a0001}, {0xc78, 0x895b0001},
+ {0xc78, 0x885c0001}, {0xc78, 0x875d0001},
+ {0xc78, 0x865e0001}, {0xc78, 0x855f0001},
+ {0xc78, 0x84600001}, {0xc78, 0x83610001},
+ {0xc78, 0x82620001}, {0xc78, 0x6a630001},
+ {0xc78, 0x69640001}, {0xc78, 0x68650001},
+ {0xc78, 0x67660001}, {0xc78, 0x66670001},
+ {0xc78, 0x65680001}, {0xc78, 0x64690001},
+ {0xc78, 0x636a0001}, {0xc78, 0x626b0001},
+ {0xc78, 0x616c0001}, {0xc78, 0x606d0001},
+ {0xc78, 0x476e0001}, {0xc78, 0x466f0001},
+ {0xc78, 0x45700001}, {0xc78, 0x44710001},
+ {0xc78, 0x43720001}, {0xc78, 0x42730001},
+ {0xc78, 0x41740001}, {0xc78, 0x40750001},
+ {0xc78, 0x40760001}, {0xc78, 0x40770001},
+ {0xc78, 0x40780001}, {0xc78, 0x40790001},
+ {0xc78, 0x407a0001}, {0xc78, 0x407b0001},
+ {0xc78, 0x407c0001}, {0xc78, 0x407d0001},
+ {0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+ {0xc50, 0x00040222}, {0xc50, 0x00040220},
+ {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = {
+ {0x7f, 0x00000082}, {0x81, 0x0003fc00},
+ {0x00, 0x00030000}, {0x08, 0x00008400},
+ {0x18, 0x00000407}, {0x19, 0x00000012},
+ {0x1b, 0x00000064}, {0x1e, 0x00080009},
+ {0x1f, 0x00000880}, {0x2f, 0x0001a060},
+ {0x3f, 0x00000000}, {0x42, 0x000060c0},
+ {0x57, 0x000d0000}, {0x58, 0x000be180},
+ {0x67, 0x00001552}, {0x83, 0x00000000},
+ {0xb0, 0x000ff9f1}, {0xb1, 0x00055418},
+ {0xb2, 0x0008cc00}, {0xb4, 0x00043083},
+ {0xb5, 0x00008166}, {0xb6, 0x0000803e},
+ {0xb7, 0x0001c69f}, {0xb8, 0x0000407f},
+ {0xb9, 0x00080001}, {0xba, 0x00040001},
+ {0xbb, 0x00000400}, {0xbf, 0x000c0000},
+ {0xc2, 0x00002400}, {0xc3, 0x00000009},
+ {0xc4, 0x00040c91}, {0xc5, 0x00099999},
+ {0xc6, 0x000000a3}, {0xc7, 0x00088820},
+ {0xc8, 0x00076c06}, {0xc9, 0x00000000},
+ {0xca, 0x00080000}, {0xdf, 0x00000180},
+ {0xef, 0x000001a0}, {0x51, 0x00069545},
+ {0x52, 0x0007e45e}, {0x53, 0x00000071},
+ {0x56, 0x00051ff3}, {0x35, 0x000000a8},
+ {0x35, 0x000001e2}, {0x35, 0x000002a8},
+ {0x36, 0x00001c24}, {0x36, 0x00009c24},
+ {0x36, 0x00011c24}, {0x36, 0x00019c24},
+ {0x18, 0x00000c07}, {0x5a, 0x00048000},
+ {0x19, 0x000739d0},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x34, 0x0000a093}, {0x34, 0x0000908f},
+ {0x34, 0x0000808c}, {0x34, 0x0000704d},
+ {0x34, 0x0000604a}, {0x34, 0x00005047},
+ {0x34, 0x0000400a}, {0x34, 0x00003007},
+ {0x34, 0x00002004}, {0x34, 0x00001001},
+ {0x34, 0x00000000},
+#else
+ /* Regular */
+ {0x34, 0x0000add7}, {0x34, 0x00009dd4},
+ {0x34, 0x00008dd1}, {0x34, 0x00007dce},
+ {0x34, 0x00006dcb}, {0x34, 0x00005dc8},
+ {0x34, 0x00004dc5}, {0x34, 0x000034cc},
+ {0x34, 0x0000244f}, {0x34, 0x0000144c},
+ {0x34, 0x00000014},
+#endif
+ {0x00, 0x00030159},
+ {0x84, 0x00068180},
+ {0x86, 0x0000014e},
+ {0x87, 0x00048e00},
+ {0x8e, 0x00065540},
+ {0x8f, 0x00088000},
+ {0xef, 0x000020a0},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x3b, 0x000f07b0},
+#else
+ {0x3b, 0x000f02b0},
+#endif
+ {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+ {0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+ {0x3b, 0x000a0080}, {0x3b, 0x00090080},
+ {0x3b, 0x0008f780},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x3b, 0x000787b0},
+#else
+ {0x3b, 0x00078730},
+#endif
+ {0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0},
+ {0x3b, 0x00040620}, {0x3b, 0x00037090},
+ {0x3b, 0x00020080}, {0x3b, 0x0001f060},
+ {0x3b, 0x0000ffb0}, {0xef, 0x000000a0},
+ {0xfe, 0x00000000}, {0x18, 0x0000fc07},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00000001}, {0x1f, 0x00080000},
+ {0x00, 0x00033e70},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192eu_radiob_init_table[] = {
+ {0x7f, 0x00000082}, {0x81, 0x0003fc00},
+ {0x00, 0x00030000}, {0x08, 0x00008400},
+ {0x18, 0x00000407}, {0x19, 0x00000012},
+ {0x1b, 0x00000064}, {0x1e, 0x00080009},
+ {0x1f, 0x00000880}, {0x2f, 0x0001a060},
+ {0x3f, 0x00000000}, {0x42, 0x000060c0},
+ {0x57, 0x000d0000}, {0x58, 0x000be180},
+ {0x67, 0x00001552}, {0x7f, 0x00000082},
+ {0x81, 0x0003f000}, {0x83, 0x00000000},
+ {0xdf, 0x00000180}, {0xef, 0x000001a0},
+ {0x51, 0x00069545}, {0x52, 0x0007e42e},
+ {0x53, 0x00000071}, {0x56, 0x00051ff3},
+ {0x35, 0x000000a8}, {0x35, 0x000001e0},
+ {0x35, 0x000002a8}, {0x36, 0x00001ca8},
+ {0x36, 0x00009c24}, {0x36, 0x00011c24},
+ {0x36, 0x00019c24}, {0x18, 0x00000c07},
+ {0x5a, 0x00048000}, {0x19, 0x000739d0},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x34, 0x0000a093}, {0x34, 0x0000908f},
+ {0x34, 0x0000808c}, {0x34, 0x0000704d},
+ {0x34, 0x0000604a}, {0x34, 0x00005047},
+ {0x34, 0x0000400a}, {0x34, 0x00003007},
+ {0x34, 0x00002004}, {0x34, 0x00001001},
+ {0x34, 0x00000000},
+#else
+ {0x34, 0x0000add7}, {0x34, 0x00009dd4},
+ {0x34, 0x00008dd1}, {0x34, 0x00007dce},
+ {0x34, 0x00006dcb}, {0x34, 0x00005dc8},
+ {0x34, 0x00004dc5}, {0x34, 0x000034cc},
+ {0x34, 0x0000244f}, {0x34, 0x0000144c},
+ {0x34, 0x00000014},
+#endif
+ {0x00, 0x00030159}, {0x84, 0x00068180},
+ {0x86, 0x000000ce}, {0x87, 0x00048a00},
+ {0x8e, 0x00065540}, {0x8f, 0x00088000},
+ {0xef, 0x000020a0},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x3b, 0x000f07b0},
+#else
+ {0x3b, 0x000f02b0},
+#endif
+
+ {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+ {0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+ {0x3b, 0x000a0080}, {0x3b, 0x00090080},
+ {0x3b, 0x0008f780},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x3b, 0x000787b0},
+#else
+ {0x3b, 0x00078730},
+#endif
+ {0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0},
+ {0x3b, 0x00040620}, {0x3b, 0x00037090},
+ {0x3b, 0x00020080}, {0x3b, 0x0001f060},
+ {0x3b, 0x0000ffb0}, {0xef, 0x000000a0},
+ {0x00, 0x00010159}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1e, 0x00000001},
+ {0x1f, 0x00080000}, {0x00, 0x00033e70},
+ {0xff, 0xffffffff}
+};
+
+static void
+rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+ u32 val32, ofdm, mcs;
+ u8 cck, ofdmbase, mcsbase;
+ int group, tx_idx;
+
+ tx_idx = 0;
+ group = rtl8xxxu_gen2_channel_to_group(channel);
+
+ cck = priv->cck_tx_power_index_A[group];
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+ val32 &= 0xffff00ff;
+ val32 |= (cck << 8);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xff;
+ val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ ofdmbase = priv->ht40_1s_tx_power_index_A[group];
+ ofdmbase += priv->ofdm_tx_power_diff[tx_idx].a;
+ ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
+
+ mcsbase = priv->ht40_1s_tx_power_index_A[group];
+ if (ht40)
+ mcsbase += priv->ht40_tx_power_diff[tx_idx++].a;
+ else
+ mcsbase += priv->ht20_tx_power_diff[tx_idx++].a;
+ mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs);
+
+ if (priv->tx_paths > 1) {
+ cck = priv->cck_tx_power_index_B[group];
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32);
+ val32 &= 0xff;
+ val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xffffff00;
+ val32 |= cck;
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ ofdmbase = priv->ht40_1s_tx_power_index_B[group];
+ ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
+ ofdm = ofdmbase | ofdmbase << 8 |
+ ofdmbase << 16 | ofdmbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm);
+
+ mcsbase = priv->ht40_1s_tx_power_index_B[group];
+ if (ht40)
+ mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
+ else
+ mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
+ mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs);
+ }
+}
+
+static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+ sizeof(efuse->tx_power_index_A.cck_base));
+ memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
+ sizeof(efuse->tx_power_index_B.cck_base));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->tx_power_index_B.ht40_base,
+ sizeof(efuse->tx_power_index_B.ht40_base));
+
+ priv->ht20_tx_power_diff[0].a =
+ efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+ priv->ht20_tx_power_diff[0].b =
+ efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
+
+ priv->ht40_tx_power_diff[0].a = 0;
+ priv->ht40_tx_power_diff[0].b = 0;
+
+ for (i = 1; i < RTL8723B_TX_COUNT; i++) {
+ priv->ofdm_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
+ priv->ofdm_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
+
+ priv->ht20_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
+ priv->ht20_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
+
+ priv->ht40_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
+ priv->ht40_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
+ }
+
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
+ dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8192eu_efuse));
+ for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+ return 0;
+}
+
+static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ fw_name = "/*(DEBLOBBED)*/";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
+static void rtl8192eu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* 6. 0x1f[7:0] = 0x07 */
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= (SYS_FUNC_USBA | SYS_FUNC_USBD | SYS_FUNC_DIO_RF |
+ SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB);
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+ rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table);
+
+ if (priv->hi_pa)
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_highpa_table);
+ else
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_std_table);
+}
+
+static int rtl8192eu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ int ret;
+
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radioa_init_table, RF_A);
+ if (ret)
+ goto exit;
+
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radiob_init_table, RF_B);
+
+exit:
+ return ret;
+}
+
+static int rtl8192eu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_e94, reg_e9c;
+ int result = 0;
+
+ /*
+ * TX IQK
+ * PA/PAD controlled by 0x0
+ */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00180);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140303);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+
+ return result;
+}
+
+static int rtl8192eu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32;
+ int result = 0;
+
+ /* Leave IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00);
+
+ /* Enable path A PA in TX IQK mode */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf117b);
+
+ /* PA/PAD control by 0x56, and set = 0x0 */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /* Enter IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* TX IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160c1f);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000)) {
+ result |= 0x01;
+ } else {
+ /* PA/PAD controlled by 0x0 */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+ goto out;
+ }
+
+ val32 = 0x80007c00 |
+ (reg_e94 & 0x03ff0000) | ((reg_e9c >> 16) & 0x03ff);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /* Modify RX IQK mode table */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+ /* PA/PAD control by 0x56, and set = 0x0 */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /* Enter IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* IQK setting */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+ __func__);
+
+out:
+ return result;
+}
+
+static int rtl8192eu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_eb4, reg_ebc;
+ int result = 0;
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00180);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* Path B IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x821403e2);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00492911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+
+ if (!(reg_eac & BIT(31)) &&
+ ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+ ((reg_ebc & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path B IQK failed!\n",
+ __func__);
+
+ return result;
+}
+
+static int rtl8192eu_rx_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, val32;
+ int result = 0;
+
+ /* Leave IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+ /* Enable path A PA in TX IQK mode */
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf117b);
+
+ /* PA/PAD control by 0x56, and set = 0x0 */
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /* Enter IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* TX IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82160c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160c1f);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+
+ if (!(reg_eac & BIT(31)) &&
+ ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+ ((reg_ebc & 0x03ff0000) != 0x00420000)) {
+ result |= 0x01;
+ } else {
+ /*
+ * PA/PAD controlled by 0x0
+ * Vendor driver restores RF_A here which I believe is a bug
+ */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180);
+ goto out;
+ }
+
+ val32 = 0x80007c00 |
+ (reg_eb4 & 0x03ff0000) | ((reg_ebc >> 16) & 0x03ff);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /* Modify RX IQK mode table */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+ /* PA/PAD control by 0x56, and set = 0x0 */
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /* Enter IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* IQK setting */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x18008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+ reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180);
+
+ if (!(reg_eac & BIT(30)) &&
+ ((reg_ec4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_ecc & 0x03ff0000) != 0x00360000))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
+ __func__);
+
+out:
+ return result;
+}
+
+static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32;
+ int path_a_ok, path_b_ok;
+ int retry = 2;
+ const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_CCK0_AFE_SETTING
+ };
+ u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
+ u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ /* MAC settings */
+ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+ val32 |= 0x0f000000;
+ rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22208200);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+ val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+ val32 |= BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+ val32 |= BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8192eu_iqk_path_a(priv);
+ if (path_a_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8192eu_rx_iqk_path_a(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+ if (priv->rf_paths > 1) {
+ /* Path A into standby */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* Turn Path B ADDA on */
+ rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8192eu_iqk_path_b(priv);
+ if (path_b_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ result[t][4] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ result[t][5] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
+ if (path_b_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_B_2);
+ result[t][6] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_B_2);
+ result[t][7] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
+ }
+
+ /* Back to BB mode, load original value */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+ if (t) {
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Restore RX initial gain */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
+
+ if (priv->rf_paths > 1) {
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | xb_agc);
+ }
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+ }
+}
+
+static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok, path_b_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ bool simu;
+
+ memset(result, 0, sizeof(result));
+ candidate = -1;
+
+ path_a_ok = false;
+ path_b_ok = false;
+
+ for (i = 0; i < 3; i++) {
+ rtl8192eu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 1, 2);
+ if (simu)
+ candidate = 1;
+ else
+ candidate = 3;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+ "ecc=%x\n ", __func__, reg_e94, reg_e9c,
+ reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ path_b_ok = true;
+ } else {
+ reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+ reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ if (priv->rf_paths > 1)
+ rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+ candidate, (reg_ec4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
+/*
+ * This is needed for 8723bu as well, presumable
+ */
+static void rtl8192e_crystal_afe_adjust(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+
+ /*
+ * 40Mhz crystal source, MAC 0x28[2]=0
+ */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+ val8 &= 0xfb;
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+
+ val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4);
+ val32 &= 0xfffffc7f;
+ rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32);
+
+ /*
+ * 92e AFE parameter
+ * AFE PLL KVCO selection, MAC 0x28[6]=1
+ */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+ val8 &= 0xbf;
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+
+ /*
+ * AFE PLL KVCO selection, MAC 0x78[21]=0
+ */
+ val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4);
+ val32 &= 0xffdfffff;
+ rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32);
+}
+
+static void rtl8192e_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* Clear suspend enable and power down enable*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+}
+
+static int rtl8192e_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* disable HWPDN 0x04[15]=0*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(7);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable SW LPS 0x04[10]= 0 */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(2);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable WL suspend*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* wait till 0x04[17] = 1 power ready*/
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* release WLON reset 0x04[16]= 1*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /* set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ ret = 0;
+
+ val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ if (val32 & SYS_CFG_SPS_LDO_SEL) {
+ rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0xc3);
+ } else {
+ /*
+ * Raise 1.2V voltage
+ */
+ val32 = rtl8xxxu_read32(priv, REG_8192E_LDOV12_CTRL);
+ val32 &= 0xff0fffff;
+ val32 |= 0x00500000;
+ rtl8xxxu_write32(priv, REG_8192E_LDOV12_CTRL, val32);
+ rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83);
+ }
+
+ /*
+ * Adjust AFE before enabling PLL
+ */
+ rtl8192e_crystal_afe_adjust(priv);
+ rtl8192e_disabled_to_emu(priv);
+
+ ret = rtl8192e_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ rtl8xxxu_write16(priv, REG_CR, 0x0000);
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+exit:
+ return ret;
+}
+
+static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+ u8 val8;
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
+ val8 |= BIT(5);
+ rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
+
+ /*
+ * WLAN action by PTA
+ */
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+
+ val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+ val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+ val32 |= (BIT(0) | BIT(1));
+ rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+ rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 &= ~BIT(24);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ /*
+ * Fix external switch Main->S1, Aux->S0
+ */
+ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+}
+
+struct rtl8xxxu_fileops rtl8192eu_fops = {
+ .parse_efuse = rtl8192eu_parse_efuse,
+ .load_firmware = rtl8192eu_load_firmware,
+ .power_on = rtl8192eu_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_auto_llt_table,
+ .init_phy_bb = rtl8192eu_init_phy_bb,
+ .init_phy_rf = rtl8192eu_init_phy_rf,
+ .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate,
+ .config_channel = rtl8xxxu_gen2_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc24,
+ .enable_rf = rtl8192e_enable_rf,
+ .disable_rf = rtl8xxxu_gen2_disable_rf,
+ .usb_quirks = rtl8xxxu_gen2_usb_quirks,
+ .set_tx_power = rtl8192e_set_tx_power,
+ .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+ .report_connect = rtl8xxxu_gen2_report_connect,
+ .writeN_block_size = 128,
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+ .has_s0s1 = 0,
+ .adda_1t_init = 0x0fc01616,
+ .adda_1t_path_on = 0x0fc01616,
+ .adda_2t_path_on_a = 0x0fc01616,
+ .adda_2t_path_on_b = 0x0fc01616,
+ .trxff_boundary = 0x3cff,
+ .mactable = rtl8192e_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM_8192E,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ_8192E,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ_8192E,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ_8192E,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
new file mode 100644
index 000000000..cd6bf209d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -0,0 +1,397 @@
+/*
+ * RTL8XXXU mac80211 USB driver - 8723a specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static struct rtl8xxxu_power_base rtl8723a_power_base = {
+ .reg_0e00 = 0x0a0c0c0c,
+ .reg_0e04 = 0x02040608,
+ .reg_0e08 = 0x00000000,
+ .reg_086c = 0x00000000,
+
+ .reg_0e10 = 0x0a0c0d0e,
+ .reg_0e14 = 0x02040608,
+ .reg_0e18 = 0x0a0c0d0e,
+ .reg_0e1c = 0x02040608,
+
+ .reg_0830 = 0x0a0c0c0c,
+ .reg_0834 = 0x02040608,
+ .reg_0838 = 0x00000000,
+ .reg_086c_2 = 0x00000000,
+
+ .reg_083c = 0x0a0c0d0e,
+ .reg_0848 = 0x02040608,
+ .reg_084c = 0x0a0c0d0e,
+ .reg_0868 = 0x02040608,
+};
+
+static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00039c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001a3f1}, {0x0b, 0x00014787},
+ {0x0c, 0x000896fe}, {0x0d, 0x0000e02c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00030355},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0000024f},
+ {0x1f, 0x00000000}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x00057730},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f474},
+ {0x15, 0x0004f477}, {0x15, 0x0008f455},
+ {0x15, 0x000cf455}, {0x16, 0x00000339},
+ {0x16, 0x00040339}, {0x16, 0x00080339},
+ {0x16, 0x000c0366}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00000003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00000247}, {0x1f, 0x00000000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8723au_efuse *efuse = &priv->efuse_wifi.efuse8723;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A,
+ efuse->cck_tx_power_index_A,
+ sizeof(efuse->cck_tx_power_index_A));
+ memcpy(priv->cck_tx_power_index_B,
+ efuse->cck_tx_power_index_B,
+ sizeof(efuse->cck_tx_power_index_B));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->ht40_1s_tx_power_index_A,
+ sizeof(efuse->ht40_1s_tx_power_index_A));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->ht40_1s_tx_power_index_B,
+ sizeof(efuse->ht40_1s_tx_power_index_B));
+
+ memcpy(priv->ht20_tx_power_index_diff,
+ efuse->ht20_tx_power_index_diff,
+ sizeof(efuse->ht20_tx_power_index_diff));
+ memcpy(priv->ofdm_tx_power_index_diff,
+ efuse->ofdm_tx_power_index_diff,
+ sizeof(efuse->ofdm_tx_power_index_diff));
+
+ memcpy(priv->ht40_max_power_offset,
+ efuse->ht40_max_power_offset,
+ sizeof(efuse->ht40_max_power_offset));
+ memcpy(priv->ht20_max_power_offset,
+ efuse->ht20_max_power_offset,
+ sizeof(efuse->ht20_max_power_offset));
+
+ if (priv->efuse_wifi.efuse8723.version >= 0x01) {
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+ }
+
+ priv->power_base = &rtl8723a_power_base;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+ efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.41s\n",
+ efuse->device_name);
+ return 0;
+}
+
+static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ switch (priv->chip_cut) {
+ case 0:
+ fw_name = "/*(DEBLOBBED)*/";
+ break;
+ case 1:
+ if (priv->enable_bluetooth)
+ fw_name = "/*(DEBLOBBED)*/";
+ else
+ fw_name = "/*(DEBLOBBED)*/";
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+ return ret;
+}
+
+static int rtl8723au_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ int ret;
+
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8723au_radioa_1t_init_table, RF_A);
+
+ /* Reduce 80M spur */
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+
+ return ret;
+}
+
+static int rtl8723a_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 |= LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+ /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ mdelay(1);
+
+ /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* disable SW LPS 0x04[10]= 0 */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(2);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* wait till 0x04[17] = 1 power ready*/
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* release WLON reset 0x04[16]= 1*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /* disable HWPDN 0x04[15]= 0*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(7);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable WL suspend*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */
+ /*
+ * Note: Vendor driver actually clears this bit, despite the
+ * documentation claims it's being set!
+ */
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 |= LEDCFG2_DPDT_SELECT;
+ val8 &= ~LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ /*
+ * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+ */
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+
+ rtl8xxxu_disabled_to_emu(priv);
+
+ ret = rtl8723a_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ /*
+ * 0x0004[19] = 1, reset 8051
+ */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ /* For EFuse PG */
+ val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+ val32 &= ~(BIT(28) | BIT(29) | BIT(30));
+ val32 |= (0x06 << 28);
+ rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32);
+exit:
+ return ret;
+}
+
+struct rtl8xxxu_fileops rtl8723au_fops = {
+ .parse_efuse = rtl8723au_parse_efuse,
+ .load_firmware = rtl8723au_load_firmware,
+ .power_on = rtl8723au_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_init_llt_table,
+ .init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
+ .init_phy_rf = rtl8723au_init_phy_rf,
+ .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
+ .config_channel = rtl8xxxu_gen1_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+ .enable_rf = rtl8xxxu_gen1_enable_rf,
+ .disable_rf = rtl8xxxu_gen1_disable_rf,
+ .usb_quirks = rtl8xxxu_gen1_usb_quirks,
+ .set_tx_power = rtl8xxxu_gen1_set_tx_power,
+ .update_rate_mask = rtl8xxxu_update_rate_mask,
+ .report_connect = rtl8xxxu_gen1_report_connect,
+ .writeN_block_size = 1024,
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .adda_1t_init = 0x0b1b25a0,
+ .adda_1t_path_on = 0x0bdb25a0,
+ .adda_2t_path_on_a = 0x04db25a4,
+ .adda_2t_path_on_b = 0x0b1b25a4,
+ .trxff_boundary = 0x27ff,
+ .pbp_rx = PBP_PAGE_SIZE_128,
+ .pbp_tx = PBP_PAGE_SIZE_128,
+ .mactable = rtl8xxxu_gen1_mac_init_table,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
new file mode 100644
index 000000000..6f42f0a16
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -0,0 +1,1682 @@
+/*
+ * RTL8XXXU mac80211 USB driver - 8723b specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
+ {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0},
+ {0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10},
+ {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
+ {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
+ {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
+ {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
+ {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
+ {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+ {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x516, 0x0a}, {0x525, 0x4f},
+ {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50},
+ {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+ {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff},
+ {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff},
+ {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+ {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00},
+ {0x652, 0xc8}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+ {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+ {0x70a, 0x65}, {0x70b, 0x87}, {0x765, 0x18}, {0x76e, 0x04},
+ {0xffff, 0xff},
+};
+
+static struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = {
+ {0x800, 0x80040000}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10001331}, {0x814, 0x020c3d10},
+ {0x818, 0x02200385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00190204},
+ {0x828, 0x00000000}, {0x82c, 0x00000000},
+ {0x830, 0x00000000}, {0x834, 0x00000000},
+ {0x838, 0x00000000}, {0x83c, 0x00000000},
+ {0x840, 0x00010000}, {0x844, 0x00000000},
+ {0x848, 0x00000000}, {0x84c, 0x00000000},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x569a11a9}, {0x85c, 0x01000014},
+ {0x860, 0x66f60110}, {0x864, 0x061f0649},
+ {0x868, 0x00000000}, {0x86c, 0x27272700},
+ {0x870, 0x07000760}, {0x874, 0x25004000},
+ {0x878, 0x00000808}, {0x87c, 0x00000000},
+ {0x880, 0xb0000c1c}, {0x884, 0x00000001},
+ {0x888, 0x00000000}, {0x88c, 0xccc000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x89c, 0x00706050},
+ {0x900, 0x00000000}, {0x904, 0x00000023},
+ {0x908, 0x00000000}, {0x90c, 0x81121111},
+ {0x910, 0x00000002}, {0x914, 0x00000201},
+ {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c},
+ {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f},
+ {0xa10, 0x9500bb78}, {0xa14, 0x1114d028},
+ {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+ {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+ {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+ {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+ {0xa78, 0x00000900}, {0xa7c, 0x225b0606},
+ {0xa80, 0x21806490}, {0xb2c, 0x00000000},
+ {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+ {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+ {0xc10, 0x08800000}, {0xc14, 0x40000100},
+ {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+ {0xc20, 0x00000000}, {0xc24, 0x00000000},
+ {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+ {0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
+ {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+ {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+ {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+ {0xc50, 0x69553420}, {0xc54, 0x43bc0094},
+ {0xc58, 0x00013149}, {0xc5c, 0x00250492},
+ {0xc60, 0x00000000}, {0xc64, 0x7112848b},
+ {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+ {0xc70, 0x2c7f000d}, {0xc74, 0x020610db},
+ {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+ {0xc80, 0x390000e4}, {0xc84, 0x20f60000},
+ {0xc88, 0x40000100}, {0xc8c, 0x20200000},
+ {0xc90, 0x00020e1a}, {0xc94, 0x00000000},
+ {0xc98, 0x00020e1a}, {0xc9c, 0x00007f7f},
+ {0xca0, 0x00000000}, {0xca4, 0x000300a0},
+ {0xca8, 0x00000000}, {0xcac, 0x00000000},
+ {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+ {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+ {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+ {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+ {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+ {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+ {0xce0, 0x00222222}, {0xce4, 0x00000000},
+ {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+ {0xd00, 0x00000740}, {0xd04, 0x40020401},
+ {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+ {0xd10, 0xa0633333}, {0xd14, 0x3333bc53},
+ {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00127353},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000282}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d},
+ {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d},
+ {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d},
+ {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d},
+ {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+ {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+ {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+ {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+ {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+ {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+ {0xe5c, 0x28160d05}, {0xe60, 0x00000008},
+ {0xe68, 0x001b2556}, {0xe6c, 0x00c00096},
+ {0xe70, 0x00c00096}, {0xe74, 0x01000056},
+ {0xe78, 0x01000014}, {0xe7c, 0x01000056},
+ {0xe80, 0x01000014}, {0xe84, 0x00c00096},
+ {0xe88, 0x01000056}, {0xe8c, 0x00c00096},
+ {0xed0, 0x00c00096}, {0xed4, 0x00c00096},
+ {0xed8, 0x00c00096}, {0xedc, 0x000000d6},
+ {0xee0, 0x000000d6}, {0xeec, 0x01c00016},
+ {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+ {0xf00, 0x00000300},
+ {0x820, 0x01000100}, {0x800, 0x83040000},
+ {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
+ {0xc78, 0xfd000001}, {0xc78, 0xfc010001},
+ {0xc78, 0xfb020001}, {0xc78, 0xfa030001},
+ {0xc78, 0xf9040001}, {0xc78, 0xf8050001},
+ {0xc78, 0xf7060001}, {0xc78, 0xf6070001},
+ {0xc78, 0xf5080001}, {0xc78, 0xf4090001},
+ {0xc78, 0xf30a0001}, {0xc78, 0xf20b0001},
+ {0xc78, 0xf10c0001}, {0xc78, 0xf00d0001},
+ {0xc78, 0xef0e0001}, {0xc78, 0xee0f0001},
+ {0xc78, 0xed100001}, {0xc78, 0xec110001},
+ {0xc78, 0xeb120001}, {0xc78, 0xea130001},
+ {0xc78, 0xe9140001}, {0xc78, 0xe8150001},
+ {0xc78, 0xe7160001}, {0xc78, 0xe6170001},
+ {0xc78, 0xe5180001}, {0xc78, 0xe4190001},
+ {0xc78, 0xe31a0001}, {0xc78, 0xa51b0001},
+ {0xc78, 0xa41c0001}, {0xc78, 0xa31d0001},
+ {0xc78, 0x671e0001}, {0xc78, 0x661f0001},
+ {0xc78, 0x65200001}, {0xc78, 0x64210001},
+ {0xc78, 0x63220001}, {0xc78, 0x4a230001},
+ {0xc78, 0x49240001}, {0xc78, 0x48250001},
+ {0xc78, 0x47260001}, {0xc78, 0x46270001},
+ {0xc78, 0x45280001}, {0xc78, 0x44290001},
+ {0xc78, 0x432a0001}, {0xc78, 0x422b0001},
+ {0xc78, 0x292c0001}, {0xc78, 0x282d0001},
+ {0xc78, 0x272e0001}, {0xc78, 0x262f0001},
+ {0xc78, 0x0a300001}, {0xc78, 0x09310001},
+ {0xc78, 0x08320001}, {0xc78, 0x07330001},
+ {0xc78, 0x06340001}, {0xc78, 0x05350001},
+ {0xc78, 0x04360001}, {0xc78, 0x03370001},
+ {0xc78, 0x02380001}, {0xc78, 0x01390001},
+ {0xc78, 0x013a0001}, {0xc78, 0x013b0001},
+ {0xc78, 0x013c0001}, {0xc78, 0x013d0001},
+ {0xc78, 0x013e0001}, {0xc78, 0x013f0001},
+ {0xc78, 0xfc400001}, {0xc78, 0xfb410001},
+ {0xc78, 0xfa420001}, {0xc78, 0xf9430001},
+ {0xc78, 0xf8440001}, {0xc78, 0xf7450001},
+ {0xc78, 0xf6460001}, {0xc78, 0xf5470001},
+ {0xc78, 0xf4480001}, {0xc78, 0xf3490001},
+ {0xc78, 0xf24a0001}, {0xc78, 0xf14b0001},
+ {0xc78, 0xf04c0001}, {0xc78, 0xef4d0001},
+ {0xc78, 0xee4e0001}, {0xc78, 0xed4f0001},
+ {0xc78, 0xec500001}, {0xc78, 0xeb510001},
+ {0xc78, 0xea520001}, {0xc78, 0xe9530001},
+ {0xc78, 0xe8540001}, {0xc78, 0xe7550001},
+ {0xc78, 0xe6560001}, {0xc78, 0xe5570001},
+ {0xc78, 0xe4580001}, {0xc78, 0xe3590001},
+ {0xc78, 0xa65a0001}, {0xc78, 0xa55b0001},
+ {0xc78, 0xa45c0001}, {0xc78, 0xa35d0001},
+ {0xc78, 0x675e0001}, {0xc78, 0x665f0001},
+ {0xc78, 0x65600001}, {0xc78, 0x64610001},
+ {0xc78, 0x63620001}, {0xc78, 0x62630001},
+ {0xc78, 0x61640001}, {0xc78, 0x48650001},
+ {0xc78, 0x47660001}, {0xc78, 0x46670001},
+ {0xc78, 0x45680001}, {0xc78, 0x44690001},
+ {0xc78, 0x436a0001}, {0xc78, 0x426b0001},
+ {0xc78, 0x286c0001}, {0xc78, 0x276d0001},
+ {0xc78, 0x266e0001}, {0xc78, 0x256f0001},
+ {0xc78, 0x24700001}, {0xc78, 0x09710001},
+ {0xc78, 0x08720001}, {0xc78, 0x07730001},
+ {0xc78, 0x06740001}, {0xc78, 0x05750001},
+ {0xc78, 0x04760001}, {0xc78, 0x03770001},
+ {0xc78, 0x02780001}, {0xc78, 0x01790001},
+ {0xc78, 0x017a0001}, {0xc78, 0x017b0001},
+ {0xc78, 0x017c0001}, {0xc78, 0x017d0001},
+ {0xc78, 0x017e0001}, {0xc78, 0x017f0001},
+ {0xc50, 0x69553422},
+ {0xc50, 0x69553420},
+ {0x824, 0x00390204},
+ {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
+ {0x00, 0x00010000}, {0xb0, 0x000dffe0},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xb1, 0x00000018},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xb2, 0x00084c00},
+ {0xb5, 0x0000d2cc}, {0xb6, 0x000925aa},
+ {0xb7, 0x00000010}, {0xb8, 0x0000907f},
+ {0x5c, 0x00000002}, {0x7c, 0x00000002},
+ {0x7e, 0x00000005}, {0x8b, 0x0006fc00},
+ {0xb0, 0x000ff9f0}, {0x1c, 0x000739d2},
+ {0x1e, 0x00000000}, {0xdf, 0x00000780},
+ {0x50, 0x00067435},
+ /*
+ * The 8723bu vendor driver indicates that bit 8 should be set in
+ * 0x51 for package types TFBGA90, TFBGA80, and TFBGA79. However
+ * they never actually check the package type - and just default
+ * to not setting it.
+ */
+ {0x51, 0x0006b04e},
+ {0x52, 0x000007d2}, {0x53, 0x00000000},
+ {0x54, 0x00050400}, {0x55, 0x0004026e},
+ {0xdd, 0x0000004c}, {0x70, 0x00067435},
+ /*
+ * 0x71 has same package type condition as for register 0x51
+ */
+ {0x71, 0x0006b04e},
+ {0x72, 0x000007d2}, {0x73, 0x00000000},
+ {0x74, 0x00050400}, {0x75, 0x0004026e},
+ {0xef, 0x00000100}, {0x34, 0x0000add7},
+ {0x35, 0x00005c00}, {0x34, 0x00009dd4},
+ {0x35, 0x00005000}, {0x34, 0x00008dd1},
+ {0x35, 0x00004400}, {0x34, 0x00007dce},
+ {0x35, 0x00003800}, {0x34, 0x00006cd1},
+ {0x35, 0x00004400}, {0x34, 0x00005cce},
+ {0x35, 0x00003800}, {0x34, 0x000048ce},
+ {0x35, 0x00004400}, {0x34, 0x000034ce},
+ {0x35, 0x00003800}, {0x34, 0x00002451},
+ {0x35, 0x00004400}, {0x34, 0x0000144e},
+ {0x35, 0x00003800}, {0x34, 0x00000051},
+ {0x35, 0x00004400}, {0xef, 0x00000000},
+ {0xef, 0x00000100}, {0xed, 0x00000010},
+ {0x44, 0x0000add7}, {0x44, 0x00009dd4},
+ {0x44, 0x00008dd1}, {0x44, 0x00007dce},
+ {0x44, 0x00006cc1}, {0x44, 0x00005cce},
+ {0x44, 0x000044d1}, {0x44, 0x000034ce},
+ {0x44, 0x00002451}, {0x44, 0x0000144e},
+ {0x44, 0x00000051}, {0xef, 0x00000000},
+ {0xed, 0x00000000}, {0x7f, 0x00020080},
+ {0xef, 0x00002000}, {0x3b, 0x000380ef},
+ {0x3b, 0x000302fe}, {0x3b, 0x00028ce6},
+ {0x3b, 0x000200bc}, {0x3b, 0x000188a5},
+ {0x3b, 0x00010fbc}, {0x3b, 0x00008f71},
+ {0x3b, 0x00000900}, {0xef, 0x00000000},
+ {0xed, 0x00000001}, {0x40, 0x000380ef},
+ {0x40, 0x000302fe}, {0x40, 0x00028ce6},
+ {0x40, 0x000200bc}, {0x40, 0x000188a5},
+ {0x40, 0x00010fbc}, {0x40, 0x00008f71},
+ {0x40, 0x00000900}, {0xed, 0x00000000},
+ {0x82, 0x00080000}, {0x83, 0x00008000},
+ {0x84, 0x00048d80}, {0x85, 0x00068000},
+ {0xa2, 0x00080000}, {0xa3, 0x00008000},
+ {0xa4, 0x00048d80}, {0xa5, 0x00068000},
+ {0xed, 0x00000002}, {0xef, 0x00000002},
+ {0x56, 0x00000032}, {0x76, 0x00000032},
+ {0x01, 0x00000780},
+ {0xff, 0xffffffff}
+};
+
+static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data)
+{
+ struct h2c_cmd h2c;
+ int reqnum = 0;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
+ h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
+ h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
+ h2c.bt_mp_oper.data = data;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+
+ reqnum++;
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
+ h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
+ h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
+ h2c.bt_mp_oper.addr = reg;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+}
+
+static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 sys_func;
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ sys_func &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ sys_func |= SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+}
+
+static void
+rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+ u32 val32, ofdm, mcs;
+ u8 cck, ofdmbase, mcsbase;
+ int group, tx_idx;
+
+ tx_idx = 0;
+ group = rtl8xxxu_gen2_channel_to_group(channel);
+
+ cck = priv->cck_tx_power_index_B[group];
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+ val32 &= 0xffff00ff;
+ val32 |= (cck << 8);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xff;
+ val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ ofdmbase = priv->ht40_1s_tx_power_index_B[group];
+ ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
+ ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
+
+ mcsbase = priv->ht40_1s_tx_power_index_B[group];
+ if (ht40)
+ mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
+ else
+ mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
+ mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
+}
+
+static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+ sizeof(efuse->tx_power_index_A.cck_base));
+ memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
+ sizeof(efuse->tx_power_index_B.cck_base));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->tx_power_index_B.ht40_base,
+ sizeof(efuse->tx_power_index_B.ht40_base));
+
+ priv->ofdm_tx_power_diff[0].a =
+ efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
+ priv->ofdm_tx_power_diff[0].b =
+ efuse->tx_power_index_B.ht20_ofdm_1s_diff.a;
+
+ priv->ht20_tx_power_diff[0].a =
+ efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+ priv->ht20_tx_power_diff[0].b =
+ efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
+
+ priv->ht40_tx_power_diff[0].a = 0;
+ priv->ht40_tx_power_diff[0].b = 0;
+
+ for (i = 1; i < RTL8723B_TX_COUNT; i++) {
+ priv->ofdm_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
+ priv->ofdm_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
+
+ priv->ht20_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
+ priv->ht20_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
+
+ priv->ht40_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
+ priv->ht40_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
+ }
+
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ int i;
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8723bu_efuse));
+ for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+
+ return 0;
+}
+
+static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ if (priv->enable_bluetooth)
+ fw_name = "/*(DEBLOBBED)*/";
+ else
+ fw_name = "/*(DEBLOBBED)*/";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+ return ret;
+}
+
+static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+
+ /* 6. 0x1f[7:0] = 0x07 */
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ /* Why? */
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
+ rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
+
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
+}
+
+static int rtl8723bu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ int ret;
+
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8723bu_radioa_1t_init_table, RF_A);
+ /*
+ * PHY LCK
+ */
+ rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
+ msleep(200);
+ rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
+
+ return ret;
+}
+
+static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ val32 = rtl8xxxu_read32(priv, REG_PAD_CTRL1);
+ val32 &= ~(BIT(20) | BIT(24));
+ rtl8xxxu_write32(priv, REG_PAD_CTRL1, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
+ val32 &= ~BIT(4);
+ rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
+ val32 |= BIT(3);
+ rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 |= BIT(24);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 &= ~BIT(23);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+ val32 |= (BIT(0) | BIT(1));
+ rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_CTRL_ANTA_SRC);
+ val32 &= 0xffffff00;
+ val32 |= 0x77;
+ rtl8xxxu_write32(priv, REG_RFE_CTRL_ANTA_SRC, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+ val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+}
+
+static int rtl8723bu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_e94, reg_e9c, path_sel, val32;
+ int result = 0;
+
+ path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Enable path A PA in TX IQK mode
+ */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0003f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xc7f87);
+
+ /*
+ * Tx IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ea);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * The vendor driver indicates the USB module is always using
+ * S0S1 path 1 for the 8723bu. This may be different for 8192eu
+ */
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
+ * No trace of this in the 8192eu or 8188eu vendor drivers.
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ val32 = (reg_e9c >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000) &&
+ ((reg_e94 & 0x03ff0000) < 0x01100000) &&
+ ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x01;
+ else /* If TX not OK, ignore RX */
+ goto out;
+
+out:
+ return result;
+}
+
+static int rtl8723bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_ea4, reg_eac, reg_e94, reg_e9c, path_sel, val32;
+ int result = 0;
+
+ path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Enable path A PA in TX IQK mode
+ */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
+
+ /*
+ * Tx IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160ff0);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * The vendor driver indicates the USB module is always using
+ * S0S1 path 1 for the 8723bu. This may be different for 8192eu
+ */
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
+ * No trace of this in the 8192eu or 8188eu vendor drivers.
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ val32 = (reg_e9c >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000) &&
+ ((reg_e94 & 0x03ff0000) < 0x01100000) &&
+ ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x01;
+ else /* If TX not OK, ignore RX */
+ goto out;
+
+ val32 = 0x80007c00 | (reg_e94 &0x3ff0000) |
+ ((reg_e9c & 0x3ff0000) >> 16);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /*
+ * Modify RX IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7d77);
+
+ /*
+ * PA, PAD setting
+ */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0xf80);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, 0x4021f);
+
+ /*
+ * RX IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816001f);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Disable BT
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x780);
+
+ val32 = (reg_eac >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000) &&
+ ((reg_ea4 & 0x03ff0000) < 0x01100000) &&
+ ((reg_ea4 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x02;
+ else /* If TX not OK, ignore RX */
+ goto out;
+out:
+ return result;
+}
+
+static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32;
+ int path_a_ok /*, path_b_ok */;
+ int retry = 2;
+ const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+ };
+ u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
+ u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ /* MAC settings */
+ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+ val32 |= 0x0f000000;
+ rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+ /*
+ * RX IQ calibration setting for 8723B D cut large current issue
+ * when leaving IPS
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
+ val32 |= 0x20;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8723bu_iqk_path_a(priv);
+ if (path_a_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8723bu_rx_iqk_path_a(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+ if (priv->tx_paths > 1) {
+#if 1
+ dev_warn(dev, "%s: Path B not supported\n", __func__);
+#else
+
+ /*
+ * Path A into standby
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Turn Path B ADDA on */
+ rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8xxxu_iqk_path_b(priv);
+ if (path_b_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ result[t][4] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ result[t][5] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8723bu_rx_iqk_path_b(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_B_2);
+ result[t][6] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_B_2);
+ result[t][7] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
+#endif
+ }
+
+ /* Back to BB mode, load original value */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (t) {
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Restore RX initial gain */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
+
+ if (priv->tx_paths > 1) {
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | xb_agc);
+ }
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+ }
+}
+
+static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok, path_b_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ u32 val32, bt_control;
+ s32 reg_tmp = 0;
+ bool simu;
+
+ rtl8xxxu_gen2_prepare_calibrate(priv, 1);
+
+ memset(result, 0, sizeof(result));
+ candidate = -1;
+
+ path_a_ok = false;
+ path_b_ok = false;
+
+ bt_control = rtl8xxxu_read32(priv, REG_BT_CONTROL_8723BU);
+
+ for (i = 0; i < 3; i++) {
+ rtl8723bu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 1, 2);
+ if (simu) {
+ candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp)
+ candidate = 3;
+ else
+ candidate = -1;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+ "ecc=%x\n ", __func__, reg_e94, reg_e9c,
+ reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ path_b_ok = true;
+ } else {
+ reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+ reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ if (priv->tx_paths > 1 && reg_eb4)
+ rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+ candidate, (reg_ec4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x18000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xe6177);
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
+ val32 |= 0x20;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd);
+
+ if (priv->rf_paths > 1)
+ dev_dbg(dev, "%s: 8723BU 2T not supported\n", __func__);
+
+ rtl8xxxu_gen2_prepare_calibrate(priv, 0);
+}
+
+static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int count, ret = 0;
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+ /* Enable rising edge triggering interrupt */
+ val16 = rtl8xxxu_read16(priv, REG_GPIO_INTM);
+ val16 &= ~GPIO_INTM_EDGE_TRIG_IRQ;
+ rtl8xxxu_write16(priv, REG_GPIO_INTM, val16);
+
+ /* Release WLON reset 0x04[16]= 1*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_WLON_RESET;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* 0x0005[1] = 1 turn off MAC by HW state machine*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ if ((val8 & BIT(1)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (!count) {
+ dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+ __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Enable BT control XTAL setting */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
+ val8 &= ~AFE_MISC_WL_XTAL_CTRL;
+ rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
+
+ /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 |= SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* 0x0020[0] = 0 disable LDOA12 MACRO block*/
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 &= ~LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8723b_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface */
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 |= LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+ /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ mdelay(1);
+
+ /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* Disable SW LPS 0x04[10]= 0 */
+ val32 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+ val32 &= ~APS_FSMCO_SW_LPS;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Wait until 0x04[17] = 1 power ready */
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* Release WLON reset 0x04[16]= 1*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_WLON_RESET;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Disable HWPDN 0x04[15]= 0*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 &= ~APS_FSMCO_HW_POWERDOWN;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Disable WL suspend*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Enable WL control XTAL setting */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
+ val8 |= AFE_MISC_WL_XTAL_CTRL;
+ rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
+
+ /* Enable falling edge triggering interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_INTM + 1, val8);
+
+ /* Enable GPIO9 interrupt mode */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2 + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2 + 1, val8);
+
+ /* Enable GPIO9 input mode */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2, val8);
+
+ /* Enable HSISR GPIO[C:0] interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_HSIMR);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_HSIMR, val8);
+
+ /* Enable HSISR GPIO9 interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_HSIMR + 2);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_HSIMR + 2, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL);
+ val8 |= MULTI_WIFI_HW_ROF_EN;
+ rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL, val8);
+
+ /* For GPIO9 internal pull high setting BIT(14) */
+ val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL + 1);
+ val8 |= BIT(6);
+ rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL + 1, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8723bu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ rtl8xxxu_disabled_to_emu(priv);
+
+ ret = rtl8723b_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ /*
+ * BT coexist power on settings. This is identical for 1 and 2
+ * antenna parts.
+ */
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1 + 3, 0x20);
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ rtl8xxxu_write8(priv, REG_BT_CONTROL_8723BU + 1, 0x18);
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+ /* Antenna inverse */
+ rtl8xxxu_write8(priv, 0xfe08, 0x01);
+
+ val16 = rtl8xxxu_read16(priv, REG_PWR_DATA);
+ val16 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write16(priv, REG_PWR_DATA, val16);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 |= LEDCFG0_DPDT_SELECT;
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ val8 &= ~PAD_CTRL1_SW_DPDT_SEL_DATA;
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+exit:
+ return ret;
+}
+
+static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ /*
+ * Disable TX report timer
+ */
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ rtl8xxxu_write8(priv, REG_CR, 0x0000);
+
+ rtl8xxxu_active_to_lps(priv);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ rtl8723bu_active_to_emu(priv);
+
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(3); /* APS_FSMCO_HW_SUSPEND */
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+}
+
+static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ struct h2c_cmd h2c;
+ u32 val32;
+ u8 val8;
+
+ /*
+ * No indication anywhere as to what 0x0790 does. The 2 antenna
+ * vendor code preserves bits 6-7 here.
+ */
+ rtl8xxxu_write8(priv, 0x0790, 0x05);
+ /*
+ * 0x0778 seems to be related to enabling the number of antennas
+ * In the vendor driver halbtc8723b2ant_InitHwConfig() sets it
+ * to 0x03, while halbtc8723b1ant_InitHwConfig() sets it to 0x01
+ */
+ rtl8xxxu_write8(priv, 0x0778, 0x01);
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
+ val8 |= BIT(5);
+ rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780);
+
+ rtl8723bu_write_btreg(priv, 0x3c, 0x15); /* BT TRx Mask on */
+
+ /*
+ * Set BT grant to low
+ */
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_grant.cmd = H2C_8723B_BT_GRANT;
+ h2c.bt_grant.data = 0;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_grant));
+
+ /*
+ * WLAN action by PTA
+ */
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+
+ /*
+ * BT select S0/S1 controlled by WiFi
+ */
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 |= BIT(5);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+ val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+
+ /*
+ * Bits 6/7 are marked in/out ... but for what?
+ */
+ rtl8xxxu_write8(priv, 0x0974, 0xff);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+ val32 |= (BIT(0) | BIT(1));
+ rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+ rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 &= ~BIT(24);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ /*
+ * Fix external switch Main->S1, Aux->S0
+ */
+ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.ant_sel_rsv.cmd = H2C_8723B_ANT_SEL_RSV;
+ h2c.ant_sel_rsv.ant_inverse = 1;
+ h2c.ant_sel_rsv.int_switch_type = 0;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv));
+
+ /*
+ * 0x280, 0x00, 0x200, 0x80 - not clear
+ */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+
+ /*
+ * Software control, antenna at WiFi side
+ */
+#ifdef NEED_PS_TDMA
+ rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00);
+#endif
+
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_info.cmd = H2C_8723B_BT_INFO;
+ h2c.bt_info.data = BIT(0);
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info));
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT;
+ h2c.ignore_wlan.data = 0;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan));
+}
+
+static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv)
+{
+ u32 agg_rx;
+ u8 agg_ctrl;
+
+ /*
+ * For now simply disable RX aggregation
+ */
+ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
+ agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+
+ agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH);
+ agg_rx &= ~RXDMA_USB_AGG_ENABLE;
+ agg_rx &= ~0xff0f;
+
+ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
+ rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx);
+}
+
+static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ /* Time duration for NHM unit: 4us, 0x2710=40ms */
+ rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0x2710);
+ rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff);
+ rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff52);
+ rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff);
+ /* TH8 */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 |= 0xff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ /* Enable CCK */
+ val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B);
+ val32 |= BIT(8) | BIT(9) | BIT(10);
+ rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32);
+ /* Max power amongst all RX antennas */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC);
+ val32 |= BIT(7);
+ rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
+}
+
+struct rtl8xxxu_fileops rtl8723bu_fops = {
+ .parse_efuse = rtl8723bu_parse_efuse,
+ .load_firmware = rtl8723bu_load_firmware,
+ .power_on = rtl8723bu_power_on,
+ .power_off = rtl8723bu_power_off,
+ .reset_8051 = rtl8723bu_reset_8051,
+ .llt_init = rtl8xxxu_auto_llt_table,
+ .init_phy_bb = rtl8723bu_init_phy_bb,
+ .init_phy_rf = rtl8723bu_init_phy_rf,
+ .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
+ .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
+ .config_channel = rtl8xxxu_gen2_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc24,
+ .init_aggregation = rtl8723bu_init_aggregation,
+ .init_statistics = rtl8723bu_init_statistics,
+ .enable_rf = rtl8723b_enable_rf,
+ .disable_rf = rtl8xxxu_gen2_disable_rf,
+ .usb_quirks = rtl8xxxu_gen2_usb_quirks,
+ .set_tx_power = rtl8723b_set_tx_power,
+ .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+ .report_connect = rtl8xxxu_gen2_report_connect,
+ .writeN_block_size = 1024,
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+ .has_s0s1 = 1,
+ .adda_1t_init = 0x01c00014,
+ .adda_1t_path_on = 0x01c00014,
+ .adda_2t_path_on_a = 0x01c00014,
+ .adda_2t_path_on_b = 0x01c00014,
+ .trxff_boundary = 0x3f7f,
+ .pbp_rx = PBP_PAGE_SIZE_256,
+ .pbp_tx = PBP_PAGE_SIZE_256,
+ .mactable = rtl8723b_mac_init_table,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index a275ff145..05a1ff26f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1,7 +1,7 @@
/*
* RTL8XXXU mac80211 USB driver
*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
*
* Portions, notably calibration code:
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
@@ -42,7 +42,7 @@
#define DRIVER_NAME "rtl8xxxu"
-static int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
static bool rtl8xxxu_ht40_2g;
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
@@ -83,33 +83,33 @@ static struct ieee80211_rate rtl8xxxu_rates[] = {
};
static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
.hw_value = 1, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
.hw_value = 2, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
.hw_value = 3, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
.hw_value = 4, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
.hw_value = 5, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
.hw_value = 6, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
.hw_value = 7, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
.hw_value = 8, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
.hw_value = 9, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
.hw_value = 10, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
.hw_value = 11, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
.hw_value = 12, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
.hw_value = 13, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
.hw_value = 14, .max_power = 30 }
};
@@ -120,7 +120,7 @@ static struct ieee80211_supported_band rtl8xxxu_supported_band = {
.n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
};
-static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
+struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
{0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
{0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
{0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
@@ -145,37 +145,6 @@ static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
{0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
};
-static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
- {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0},
- {0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10},
- {0x430, 0x00}, {0x431, 0x00},
- {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
- {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
- {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
- {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
- {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
- {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
- {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
- {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
- {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
- {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
- {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
- {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
- {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
- {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
- {0x516, 0x0a}, {0x525, 0x4f},
- {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50},
- {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
- {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff},
- {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff},
- {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
- {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00},
- {0x652, 0xc8}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
- {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
- {0x70a, 0x65}, {0x70b, 0x87}, {0x765, 0x18}, {0x76e, 0x04},
- {0xffff, 0xff},
-};
-
static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0x800, 0x80040000}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
@@ -274,107 +243,6 @@ static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0xffff, 0xffffffff},
};
-static struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = {
- {0x800, 0x80040000}, {0x804, 0x00000003},
- {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
- {0x810, 0x10001331}, {0x814, 0x020c3d10},
- {0x818, 0x02200385}, {0x81c, 0x00000000},
- {0x820, 0x01000100}, {0x824, 0x00190204},
- {0x828, 0x00000000}, {0x82c, 0x00000000},
- {0x830, 0x00000000}, {0x834, 0x00000000},
- {0x838, 0x00000000}, {0x83c, 0x00000000},
- {0x840, 0x00010000}, {0x844, 0x00000000},
- {0x848, 0x00000000}, {0x84c, 0x00000000},
- {0x850, 0x00000000}, {0x854, 0x00000000},
- {0x858, 0x569a11a9}, {0x85c, 0x01000014},
- {0x860, 0x66f60110}, {0x864, 0x061f0649},
- {0x868, 0x00000000}, {0x86c, 0x27272700},
- {0x870, 0x07000760}, {0x874, 0x25004000},
- {0x878, 0x00000808}, {0x87c, 0x00000000},
- {0x880, 0xb0000c1c}, {0x884, 0x00000001},
- {0x888, 0x00000000}, {0x88c, 0xccc000c0},
- {0x890, 0x00000800}, {0x894, 0xfffffffe},
- {0x898, 0x40302010}, {0x89c, 0x00706050},
- {0x900, 0x00000000}, {0x904, 0x00000023},
- {0x908, 0x00000000}, {0x90c, 0x81121111},
- {0x910, 0x00000002}, {0x914, 0x00000201},
- {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c},
- {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f},
- {0xa10, 0x9500bb78}, {0xa14, 0x1114d028},
- {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
- {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
- {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
- {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
- {0xa78, 0x00000900}, {0xa7c, 0x225b0606},
- {0xa80, 0x21806490}, {0xb2c, 0x00000000},
- {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
- {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
- {0xc10, 0x08800000}, {0xc14, 0x40000100},
- {0xc18, 0x08800000}, {0xc1c, 0x40000100},
- {0xc20, 0x00000000}, {0xc24, 0x00000000},
- {0xc28, 0x00000000}, {0xc2c, 0x00000000},
- {0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
- {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
- {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
- {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
- {0xc50, 0x69553420}, {0xc54, 0x43bc0094},
- {0xc58, 0x00013149}, {0xc5c, 0x00250492},
- {0xc60, 0x00000000}, {0xc64, 0x7112848b},
- {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
- {0xc70, 0x2c7f000d}, {0xc74, 0x020610db},
- {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
- {0xc80, 0x390000e4}, {0xc84, 0x20f60000},
- {0xc88, 0x40000100}, {0xc8c, 0x20200000},
- {0xc90, 0x00020e1a}, {0xc94, 0x00000000},
- {0xc98, 0x00020e1a}, {0xc9c, 0x00007f7f},
- {0xca0, 0x00000000}, {0xca4, 0x000300a0},
- {0xca8, 0x00000000}, {0xcac, 0x00000000},
- {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
- {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
- {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
- {0xcc8, 0x00000000}, {0xccc, 0x00000000},
- {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
- {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
- {0xce0, 0x00222222}, {0xce4, 0x00000000},
- {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
- {0xd00, 0x00000740}, {0xd04, 0x40020401},
- {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
- {0xd10, 0xa0633333}, {0xd14, 0x3333bc53},
- {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975},
- {0xd30, 0x00000000}, {0xd34, 0x80608000},
- {0xd38, 0x00000000}, {0xd3c, 0x00127353},
- {0xd40, 0x00000000}, {0xd44, 0x00000000},
- {0xd48, 0x00000000}, {0xd4c, 0x00000000},
- {0xd50, 0x6437140a}, {0xd54, 0x00000000},
- {0xd58, 0x00000282}, {0xd5c, 0x30032064},
- {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
- {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
- {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
- {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d},
- {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d},
- {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d},
- {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d},
- {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
- {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
- {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
- {0xe44, 0x01004800}, {0xe48, 0xfb000000},
- {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
- {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
- {0xe5c, 0x28160d05}, {0xe60, 0x00000008},
- {0xe68, 0x001b2556}, {0xe6c, 0x00c00096},
- {0xe70, 0x00c00096}, {0xe74, 0x01000056},
- {0xe78, 0x01000014}, {0xe7c, 0x01000056},
- {0xe80, 0x01000014}, {0xe84, 0x00c00096},
- {0xe88, 0x01000056}, {0xe8c, 0x00c00096},
- {0xed0, 0x00c00096}, {0xed4, 0x00c00096},
- {0xed8, 0x00c00096}, {0xedc, 0x000000d6},
- {0xee0, 0x000000d6}, {0xeec, 0x01c00016},
- {0xf14, 0x00000003}, {0xf4c, 0x00000000},
- {0xf00, 0x00000300},
- {0x820, 0x01000100}, {0x800, 0x83040000},
- {0xffff, 0xffffffff},
-};
-
static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
{0x800, 0x80040002}, {0x804, 0x00000003},
@@ -740,470 +608,6 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
{0xffff, 0xffffffff}
};
-static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
- {0xc78, 0xfd000001}, {0xc78, 0xfc010001},
- {0xc78, 0xfb020001}, {0xc78, 0xfa030001},
- {0xc78, 0xf9040001}, {0xc78, 0xf8050001},
- {0xc78, 0xf7060001}, {0xc78, 0xf6070001},
- {0xc78, 0xf5080001}, {0xc78, 0xf4090001},
- {0xc78, 0xf30a0001}, {0xc78, 0xf20b0001},
- {0xc78, 0xf10c0001}, {0xc78, 0xf00d0001},
- {0xc78, 0xef0e0001}, {0xc78, 0xee0f0001},
- {0xc78, 0xed100001}, {0xc78, 0xec110001},
- {0xc78, 0xeb120001}, {0xc78, 0xea130001},
- {0xc78, 0xe9140001}, {0xc78, 0xe8150001},
- {0xc78, 0xe7160001}, {0xc78, 0xe6170001},
- {0xc78, 0xe5180001}, {0xc78, 0xe4190001},
- {0xc78, 0xe31a0001}, {0xc78, 0xa51b0001},
- {0xc78, 0xa41c0001}, {0xc78, 0xa31d0001},
- {0xc78, 0x671e0001}, {0xc78, 0x661f0001},
- {0xc78, 0x65200001}, {0xc78, 0x64210001},
- {0xc78, 0x63220001}, {0xc78, 0x4a230001},
- {0xc78, 0x49240001}, {0xc78, 0x48250001},
- {0xc78, 0x47260001}, {0xc78, 0x46270001},
- {0xc78, 0x45280001}, {0xc78, 0x44290001},
- {0xc78, 0x432a0001}, {0xc78, 0x422b0001},
- {0xc78, 0x292c0001}, {0xc78, 0x282d0001},
- {0xc78, 0x272e0001}, {0xc78, 0x262f0001},
- {0xc78, 0x0a300001}, {0xc78, 0x09310001},
- {0xc78, 0x08320001}, {0xc78, 0x07330001},
- {0xc78, 0x06340001}, {0xc78, 0x05350001},
- {0xc78, 0x04360001}, {0xc78, 0x03370001},
- {0xc78, 0x02380001}, {0xc78, 0x01390001},
- {0xc78, 0x013a0001}, {0xc78, 0x013b0001},
- {0xc78, 0x013c0001}, {0xc78, 0x013d0001},
- {0xc78, 0x013e0001}, {0xc78, 0x013f0001},
- {0xc78, 0xfc400001}, {0xc78, 0xfb410001},
- {0xc78, 0xfa420001}, {0xc78, 0xf9430001},
- {0xc78, 0xf8440001}, {0xc78, 0xf7450001},
- {0xc78, 0xf6460001}, {0xc78, 0xf5470001},
- {0xc78, 0xf4480001}, {0xc78, 0xf3490001},
- {0xc78, 0xf24a0001}, {0xc78, 0xf14b0001},
- {0xc78, 0xf04c0001}, {0xc78, 0xef4d0001},
- {0xc78, 0xee4e0001}, {0xc78, 0xed4f0001},
- {0xc78, 0xec500001}, {0xc78, 0xeb510001},
- {0xc78, 0xea520001}, {0xc78, 0xe9530001},
- {0xc78, 0xe8540001}, {0xc78, 0xe7550001},
- {0xc78, 0xe6560001}, {0xc78, 0xe5570001},
- {0xc78, 0xe4580001}, {0xc78, 0xe3590001},
- {0xc78, 0xa65a0001}, {0xc78, 0xa55b0001},
- {0xc78, 0xa45c0001}, {0xc78, 0xa35d0001},
- {0xc78, 0x675e0001}, {0xc78, 0x665f0001},
- {0xc78, 0x65600001}, {0xc78, 0x64610001},
- {0xc78, 0x63620001}, {0xc78, 0x62630001},
- {0xc78, 0x61640001}, {0xc78, 0x48650001},
- {0xc78, 0x47660001}, {0xc78, 0x46670001},
- {0xc78, 0x45680001}, {0xc78, 0x44690001},
- {0xc78, 0x436a0001}, {0xc78, 0x426b0001},
- {0xc78, 0x286c0001}, {0xc78, 0x276d0001},
- {0xc78, 0x266e0001}, {0xc78, 0x256f0001},
- {0xc78, 0x24700001}, {0xc78, 0x09710001},
- {0xc78, 0x08720001}, {0xc78, 0x07730001},
- {0xc78, 0x06740001}, {0xc78, 0x05750001},
- {0xc78, 0x04760001}, {0xc78, 0x03770001},
- {0xc78, 0x02780001}, {0xc78, 0x01790001},
- {0xc78, 0x017a0001}, {0xc78, 0x017b0001},
- {0xc78, 0x017c0001}, {0xc78, 0x017d0001},
- {0xc78, 0x017e0001}, {0xc78, 0x017f0001},
- {0xc50, 0x69553422},
- {0xc50, 0x69553420},
- {0x824, 0x00390204},
- {0xffff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00039c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001a3f1}, {0x0b, 0x00014787},
- {0x0c, 0x000896fe}, {0x0d, 0x0000e02c},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x19, 0x00000000}, {0x1a, 0x00030355},
- {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
- {0x1d, 0x000a1250}, {0x1e, 0x0000024f},
- {0x1f, 0x00000000}, {0x20, 0x0000b614},
- {0x21, 0x0006c000}, {0x22, 0x00000000},
- {0x23, 0x00001558}, {0x24, 0x00000060},
- {0x25, 0x00000483}, {0x26, 0x0004f000},
- {0x27, 0x000ec7d9}, {0x28, 0x00057730},
- {0x29, 0x00004783}, {0x2a, 0x00000001},
- {0x2b, 0x00021334}, {0x2a, 0x00000000},
- {0x2b, 0x00000054}, {0x2a, 0x00000001},
- {0x2b, 0x00000808}, {0x2b, 0x00053333},
- {0x2c, 0x0000000c}, {0x2a, 0x00000002},
- {0x2b, 0x00000808}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000003},
- {0x2b, 0x00000808}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000004},
- {0x2b, 0x00000808}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000005},
- {0x2b, 0x00000808}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000006},
- {0x2b, 0x00000709}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000007},
- {0x2b, 0x00000709}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000008},
- {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000009},
- {0x2b, 0x0000060a}, {0x2b, 0x00053333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
- {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
- {0x2b, 0x0000060a}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
- {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
- {0x2b, 0x0000060a}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
- {0x2b, 0x0000050b}, {0x2b, 0x00066666},
- {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
- {0x10, 0x0004000f}, {0x11, 0x000e31fc},
- {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
- {0x10, 0x0002000f}, {0x11, 0x000203f9},
- {0x10, 0x0003000f}, {0x11, 0x000ff500},
- {0x10, 0x00000000}, {0x11, 0x00000000},
- {0x10, 0x0008000f}, {0x11, 0x0003f100},
- {0x10, 0x0009000f}, {0x11, 0x00023100},
- {0x12, 0x00032000}, {0x12, 0x00071000},
- {0x12, 0x000b0000}, {0x12, 0x000fc000},
- {0x13, 0x000287b3}, {0x13, 0x000244b7},
- {0x13, 0x000204ab}, {0x13, 0x0001c49f},
- {0x13, 0x00018493}, {0x13, 0x0001429b},
- {0x13, 0x00010299}, {0x13, 0x0000c29c},
- {0x13, 0x000081a0}, {0x13, 0x000040ac},
- {0x13, 0x00000020}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f474},
- {0x15, 0x0004f477}, {0x15, 0x0008f455},
- {0x15, 0x000cf455}, {0x16, 0x00000339},
- {0x16, 0x00040339}, {0x16, 0x00080339},
- {0x16, 0x000c0366}, {0x00, 0x00010159},
- {0x18, 0x0000f401}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0x1f, 0x00000003},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0x1e, 0x00000247}, {0x1f, 0x00000000},
- {0x00, 0x00030159},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
- {0x00, 0x00010000}, {0xb0, 0x000dffe0},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0xb1, 0x00000018},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0xb2, 0x00084c00},
- {0xb5, 0x0000d2cc}, {0xb6, 0x000925aa},
- {0xb7, 0x00000010}, {0xb8, 0x0000907f},
- {0x5c, 0x00000002}, {0x7c, 0x00000002},
- {0x7e, 0x00000005}, {0x8b, 0x0006fc00},
- {0xb0, 0x000ff9f0}, {0x1c, 0x000739d2},
- {0x1e, 0x00000000}, {0xdf, 0x00000780},
- {0x50, 0x00067435},
- /*
- * The 8723bu vendor driver indicates that bit 8 should be set in
- * 0x51 for package types TFBGA90, TFBGA80, and TFBGA79. However
- * they never actually check the package type - and just default
- * to not setting it.
- */
- {0x51, 0x0006b04e},
- {0x52, 0x000007d2}, {0x53, 0x00000000},
- {0x54, 0x00050400}, {0x55, 0x0004026e},
- {0xdd, 0x0000004c}, {0x70, 0x00067435},
- /*
- * 0x71 has same package type condition as for register 0x51
- */
- {0x71, 0x0006b04e},
- {0x72, 0x000007d2}, {0x73, 0x00000000},
- {0x74, 0x00050400}, {0x75, 0x0004026e},
- {0xef, 0x00000100}, {0x34, 0x0000add7},
- {0x35, 0x00005c00}, {0x34, 0x00009dd4},
- {0x35, 0x00005000}, {0x34, 0x00008dd1},
- {0x35, 0x00004400}, {0x34, 0x00007dce},
- {0x35, 0x00003800}, {0x34, 0x00006cd1},
- {0x35, 0x00004400}, {0x34, 0x00005cce},
- {0x35, 0x00003800}, {0x34, 0x000048ce},
- {0x35, 0x00004400}, {0x34, 0x000034ce},
- {0x35, 0x00003800}, {0x34, 0x00002451},
- {0x35, 0x00004400}, {0x34, 0x0000144e},
- {0x35, 0x00003800}, {0x34, 0x00000051},
- {0x35, 0x00004400}, {0xef, 0x00000000},
- {0xef, 0x00000100}, {0xed, 0x00000010},
- {0x44, 0x0000add7}, {0x44, 0x00009dd4},
- {0x44, 0x00008dd1}, {0x44, 0x00007dce},
- {0x44, 0x00006cc1}, {0x44, 0x00005cce},
- {0x44, 0x000044d1}, {0x44, 0x000034ce},
- {0x44, 0x00002451}, {0x44, 0x0000144e},
- {0x44, 0x00000051}, {0xef, 0x00000000},
- {0xed, 0x00000000}, {0x7f, 0x00020080},
- {0xef, 0x00002000}, {0x3b, 0x000380ef},
- {0x3b, 0x000302fe}, {0x3b, 0x00028ce6},
- {0x3b, 0x000200bc}, {0x3b, 0x000188a5},
- {0x3b, 0x00010fbc}, {0x3b, 0x00008f71},
- {0x3b, 0x00000900}, {0xef, 0x00000000},
- {0xed, 0x00000001}, {0x40, 0x000380ef},
- {0x40, 0x000302fe}, {0x40, 0x00028ce6},
- {0x40, 0x000200bc}, {0x40, 0x000188a5},
- {0x40, 0x00010fbc}, {0x40, 0x00008f71},
- {0x40, 0x00000900}, {0xed, 0x00000000},
- {0x82, 0x00080000}, {0x83, 0x00008000},
- {0x84, 0x00048d80}, {0x85, 0x00068000},
- {0xa2, 0x00080000}, {0xa3, 0x00008000},
- {0xa4, 0x00048d80}, {0xa5, 0x00068000},
- {0xed, 0x00000002}, {0xef, 0x00000002},
- {0x56, 0x00000032}, {0x76, 0x00000032},
- {0x01, 0x00000780},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00018c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
- {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x19, 0x00000000}, {0x1a, 0x00010255},
- {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
- {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
- {0x1f, 0x00080001}, {0x20, 0x0000b614},
- {0x21, 0x0006c000}, {0x22, 0x00000000},
- {0x23, 0x00001558}, {0x24, 0x00000060},
- {0x25, 0x00000483}, {0x26, 0x0004f000},
- {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
- {0x29, 0x00004783}, {0x2a, 0x00000001},
- {0x2b, 0x00021334}, {0x2a, 0x00000000},
- {0x2b, 0x00000054}, {0x2a, 0x00000001},
- {0x2b, 0x00000808}, {0x2b, 0x00053333},
- {0x2c, 0x0000000c}, {0x2a, 0x00000002},
- {0x2b, 0x00000808}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000003},
- {0x2b, 0x00000808}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000004},
- {0x2b, 0x00000808}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000005},
- {0x2b, 0x00000808}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000006},
- {0x2b, 0x00000709}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000007},
- {0x2b, 0x00000709}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000008},
- {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000009},
- {0x2b, 0x0000060a}, {0x2b, 0x00053333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
- {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
- {0x2b, 0x0000060a}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
- {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
- {0x2b, 0x0000060a}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
- {0x2b, 0x0000050b}, {0x2b, 0x00066666},
- {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
- {0x10, 0x0004000f}, {0x11, 0x000e31fc},
- {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
- {0x10, 0x0002000f}, {0x11, 0x000203f9},
- {0x10, 0x0003000f}, {0x11, 0x000ff500},
- {0x10, 0x00000000}, {0x11, 0x00000000},
- {0x10, 0x0008000f}, {0x11, 0x0003f100},
- {0x10, 0x0009000f}, {0x11, 0x00023100},
- {0x12, 0x00032000}, {0x12, 0x00071000},
- {0x12, 0x000b0000}, {0x12, 0x000fc000},
- {0x13, 0x000287b3}, {0x13, 0x000244b7},
- {0x13, 0x000204ab}, {0x13, 0x0001c49f},
- {0x13, 0x00018493}, {0x13, 0x0001429b},
- {0x13, 0x00010299}, {0x13, 0x0000c29c},
- {0x13, 0x000081a0}, {0x13, 0x000040ac},
- {0x13, 0x00000020}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f424},
- {0x15, 0x0004f424}, {0x15, 0x0008f424},
- {0x15, 0x000cf424}, {0x16, 0x000e0330},
- {0x16, 0x000a0330}, {0x16, 0x00060330},
- {0x16, 0x00020330}, {0x00, 0x00010159},
- {0x18, 0x0000f401}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0x1f, 0x00080003},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0x1e, 0x00044457}, {0x1f, 0x00080000},
- {0x00, 0x00030159},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00018c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
- {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x12, 0x00032000}, {0x12, 0x00071000},
- {0x12, 0x000b0000}, {0x12, 0x000fc000},
- {0x13, 0x000287af}, {0x13, 0x000244b7},
- {0x13, 0x000204ab}, {0x13, 0x0001c49f},
- {0x13, 0x00018493}, {0x13, 0x00014297},
- {0x13, 0x00010295}, {0x13, 0x0000c298},
- {0x13, 0x0000819c}, {0x13, 0x000040a8},
- {0x13, 0x0000001c}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f424},
- {0x15, 0x0004f424}, {0x15, 0x0008f424},
- {0x15, 0x000cf424}, {0x16, 0x000e0330},
- {0x16, 0x000a0330}, {0x16, 0x00060330},
- {0x16, 0x00020330},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00018c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
- {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x19, 0x00000000}, {0x1a, 0x00010255},
- {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
- {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
- {0x1f, 0x00080001}, {0x20, 0x0000b614},
- {0x21, 0x0006c000}, {0x22, 0x00000000},
- {0x23, 0x00001558}, {0x24, 0x00000060},
- {0x25, 0x00000483}, {0x26, 0x0004f000},
- {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
- {0x29, 0x00004783}, {0x2a, 0x00000001},
- {0x2b, 0x00021334}, {0x2a, 0x00000000},
- {0x2b, 0x00000054}, {0x2a, 0x00000001},
- {0x2b, 0x00000808}, {0x2b, 0x00053333},
- {0x2c, 0x0000000c}, {0x2a, 0x00000002},
- {0x2b, 0x00000808}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000003},
- {0x2b, 0x00000808}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000004},
- {0x2b, 0x00000808}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000005},
- {0x2b, 0x00000808}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000006},
- {0x2b, 0x00000709}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000007},
- {0x2b, 0x00000709}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000008},
- {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000009},
- {0x2b, 0x0000060a}, {0x2b, 0x00053333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
- {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
- {0x2b, 0x0000060a}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
- {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
- {0x2b, 0x0000060a}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
- {0x2b, 0x0000050b}, {0x2b, 0x00066666},
- {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
- {0x10, 0x0004000f}, {0x11, 0x000e31fc},
- {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
- {0x10, 0x0002000f}, {0x11, 0x000203f9},
- {0x10, 0x0003000f}, {0x11, 0x000ff500},
- {0x10, 0x00000000}, {0x11, 0x00000000},
- {0x10, 0x0008000f}, {0x11, 0x0003f100},
- {0x10, 0x0009000f}, {0x11, 0x00023100},
- {0x12, 0x00032000}, {0x12, 0x00071000},
- {0x12, 0x000b0000}, {0x12, 0x000fc000},
- {0x13, 0x000287b3}, {0x13, 0x000244b7},
- {0x13, 0x000204ab}, {0x13, 0x0001c49f},
- {0x13, 0x00018493}, {0x13, 0x0001429b},
- {0x13, 0x00010299}, {0x13, 0x0000c29c},
- {0x13, 0x000081a0}, {0x13, 0x000040ac},
- {0x13, 0x00000020}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f405},
- {0x15, 0x0004f405}, {0x15, 0x0008f405},
- {0x15, 0x000cf405}, {0x16, 0x000e0330},
- {0x16, 0x000a0330}, {0x16, 0x00060330},
- {0x16, 0x00020330}, {0x00, 0x00010159},
- {0x18, 0x0000f401}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0x1f, 0x00080003},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0x1e, 0x00044457}, {0x1f, 0x00080000},
- {0x00, 0x00030159},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00018c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001adb0}, {0x0b, 0x00054867},
- {0x0c, 0x0008992e}, {0x0d, 0x0000e529},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x19, 0x00000000}, {0x1a, 0x00000255},
- {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
- {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
- {0x1f, 0x00080001}, {0x20, 0x0000b614},
- {0x21, 0x0006c000}, {0x22, 0x0000083c},
- {0x23, 0x00001558}, {0x24, 0x00000060},
- {0x25, 0x00000483}, {0x26, 0x0004f000},
- {0x27, 0x000ec7d9}, {0x28, 0x000977c0},
- {0x29, 0x00004783}, {0x2a, 0x00000001},
- {0x2b, 0x00021334}, {0x2a, 0x00000000},
- {0x2b, 0x00000054}, {0x2a, 0x00000001},
- {0x2b, 0x00000808}, {0x2b, 0x00053333},
- {0x2c, 0x0000000c}, {0x2a, 0x00000002},
- {0x2b, 0x00000808}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000003},
- {0x2b, 0x00000808}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000004},
- {0x2b, 0x00000808}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000005},
- {0x2b, 0x00000808}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000006},
- {0x2b, 0x00000709}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000007},
- {0x2b, 0x00000709}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000008},
- {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000009},
- {0x2b, 0x0000060a}, {0x2b, 0x00053333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
- {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
- {0x2b, 0x0000060a}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
- {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
- {0x2b, 0x0000060a}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
- {0x2b, 0x0000050b}, {0x2b, 0x00066666},
- {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
- {0x10, 0x0004000f}, {0x11, 0x000e31fc},
- {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
- {0x10, 0x0002000f}, {0x11, 0x000203f9},
- {0x10, 0x0003000f}, {0x11, 0x000ff500},
- {0x10, 0x00000000}, {0x11, 0x00000000},
- {0x10, 0x0008000f}, {0x11, 0x0003f100},
- {0x10, 0x0009000f}, {0x11, 0x00023100},
- {0x12, 0x000d8000}, {0x12, 0x00090000},
- {0x12, 0x00051000}, {0x12, 0x00012000},
- {0x13, 0x00028fb4}, {0x13, 0x00024fa8},
- {0x13, 0x000207a4}, {0x13, 0x0001c3b0},
- {0x13, 0x000183a4}, {0x13, 0x00014398},
- {0x13, 0x000101a4}, {0x13, 0x0000c198},
- {0x13, 0x000080a4}, {0x13, 0x00004098},
- {0x13, 0x00000000}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f405},
- {0x15, 0x0004f405}, {0x15, 0x0008f405},
- {0x15, 0x000cf405}, {0x16, 0x000e0330},
- {0x16, 0x000a0330}, {0x16, 0x00060330},
- {0x16, 0x00020330}, {0x00, 0x00010159},
- {0x18, 0x0000f401}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0x1f, 0x00080003},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0x1e, 0x00044457}, {0x1f, 0x00080000},
- {0x00, 0x00030159},
- {0xff, 0xffffffff}
-};
-
static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
{ /* RF_A */
.hssiparm1 = REG_FPGA0_XA_HSSI_PARM1,
@@ -1223,7 +627,7 @@ static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
},
};
-static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
+const u32 rtl8xxxu_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
REG_OFDM0_XA_RX_IQ_IMBALANCE,
REG_OFDM0_XB_RX_IQ_IMBALANCE,
REG_OFDM0_ENERGY_CCA_THRES,
@@ -1235,7 +639,7 @@ static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
REG_OFDM0_RX_IQ_EXT_ANTA
};
-static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
+u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
{
struct usb_device *udev = priv->udev;
int len;
@@ -1255,7 +659,7 @@ static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
return data;
}
-static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
+u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
{
struct usb_device *udev = priv->udev;
int len;
@@ -1275,7 +679,7 @@ static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
return data;
}
-static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
+u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
{
struct usb_device *udev = priv->udev;
int len;
@@ -1295,7 +699,7 @@ static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
return data;
}
-static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
+int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
{
struct usb_device *udev = priv->udev;
int ret;
@@ -1315,7 +719,7 @@ static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
return ret;
}
-static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
+int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
{
struct usb_device *udev = priv->udev;
int ret;
@@ -1334,7 +738,7 @@ static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
return ret;
}
-static int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val)
+int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val)
{
struct usb_device *udev = priv->udev;
int ret;
@@ -1393,8 +797,8 @@ write_error:
return -EAGAIN;
}
-static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
- enum rtl8xxxu_rfpath path, u8 reg)
+u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg)
{
u32 hssia, val32, retval;
@@ -1438,11 +842,11 @@ static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
* have write issues in high temperature conditions. We may have to
* retry writing them.
*/
-static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
- enum rtl8xxxu_rfpath path, u8 reg, u32 data)
+int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg, u32 data)
{
int ret, retval;
- u32 dataaddr;
+ u32 dataaddr, val32;
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE)
dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
@@ -1451,6 +855,12 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
data &= FPGA0_LSSI_PARM_DATA_MASK;
dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data;
+ if (priv->rtl_chip == RTL8192E) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+ val32 &= ~0x20000;
+ rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+ }
+
/* Use XB for path B */
ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr);
if (ret != sizeof(dataaddr))
@@ -1460,11 +870,17 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
udelay(1);
+ if (priv->rtl_chip == RTL8192E) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+ val32 |= 0x20000;
+ rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+ }
+
return retval;
}
-static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv,
- struct h2c_cmd *h2c, int len)
+int
+rtl8xxxu_gen1_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c, int len)
{
struct device *dev = &priv->udev->dev;
int mbox_nr, retry, retval = 0;
@@ -1475,8 +891,7 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv,
mbox_nr = priv->next_mbox;
mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
- mbox_ext_reg = priv->fops->mbox_ext_reg +
- (mbox_nr * priv->fops->mbox_ext_width);
+ mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2);
/*
* MBOX ready?
@@ -1498,19 +913,10 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv,
* Need to swap as it's being swapped again by rtl8xxxu_write16/32()
*/
if (len > sizeof(u32)) {
- if (priv->fops->mbox_ext_width == 4) {
- rtl8xxxu_write32(priv, mbox_ext_reg,
- le32_to_cpu(h2c->raw_wide.ext));
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
- dev_info(dev, "H2C_EXT %08x\n",
- le32_to_cpu(h2c->raw_wide.ext));
- } else {
- rtl8xxxu_write16(priv, mbox_ext_reg,
- le16_to_cpu(h2c->raw.ext));
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
- dev_info(dev, "H2C_EXT %04x\n",
- le16_to_cpu(h2c->raw.ext));
- }
+ rtl8xxxu_write16(priv, mbox_ext_reg, le16_to_cpu(h2c->raw.ext));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C_EXT %04x\n",
+ le16_to_cpu(h2c->raw.ext));
}
rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
@@ -1523,28 +929,58 @@ error:
return retval;
}
-static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data)
+int
+rtl8xxxu_gen2_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c, int len)
{
- struct h2c_cmd h2c;
- int reqnum = 0;
+ struct device *dev = &priv->udev->dev;
+ int mbox_nr, retry, retval = 0;
+ int mbox_reg, mbox_ext_reg;
+ u8 val8;
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
- h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
- h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
- h2c.bt_mp_oper.data = data;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+ mutex_lock(&priv->h2c_mutex);
- reqnum++;
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
- h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
- h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
- h2c.bt_mp_oper.addr = reg;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+ mbox_nr = priv->next_mbox;
+ mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
+ mbox_ext_reg = REG_HMBOX_EXT0_8723B + (mbox_nr * 4);
+
+ /*
+ * MBOX ready?
+ */
+ retry = 100;
+ do {
+ val8 = rtl8xxxu_read8(priv, REG_HMTFR);
+ if (!(val8 & BIT(mbox_nr)))
+ break;
+ } while (retry--);
+
+ if (!retry) {
+ dev_info(dev, "%s: Mailbox busy\n", __func__);
+ retval = -EBUSY;
+ goto error;
+ }
+
+ /*
+ * Need to swap as it's being swapped again by rtl8xxxu_write16/32()
+ */
+ if (len > sizeof(u32)) {
+ rtl8xxxu_write32(priv, mbox_ext_reg,
+ le32_to_cpu(h2c->raw_wide.ext));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C_EXT %08x\n",
+ le32_to_cpu(h2c->raw_wide.ext));
+ }
+ rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C %08x\n", le32_to_cpu(h2c->raw.data));
+
+ priv->next_mbox = (mbox_nr + 1) % H2C_MAX_MBOX;
+
+error:
+ mutex_unlock(&priv->h2c_mutex);
+ return retval;
}
-static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv)
{
u8 val8;
u32 val32;
@@ -1566,7 +1002,7 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
val32 &= ~OFDM_RF_PATH_TX_MASK;
if (priv->tx_paths == 2)
val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
- else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c)
+ else if (priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C)
val32 |= OFDM_RF_PATH_TX_B;
else
val32 |= OFDM_RF_PATH_TX_A;
@@ -1588,13 +1024,11 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
-static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv)
{
u8 sps0;
u32 val32;
- rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
-
sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
/* RF RX code for preamble power saving */
@@ -1629,8 +1063,7 @@ static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_SPS0_CTRL, sps0);
}
-
-static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_stop_tx_beacon(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -1654,7 +1087,7 @@ static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv)
*
* Note: We index from 0 in the code
*/
-static int rtl8723a_channel_to_group(int channel)
+static int rtl8xxxu_gen1_channel_to_group(int channel)
{
int group;
@@ -1668,7 +1101,10 @@ static int rtl8723a_channel_to_group(int channel)
return group;
}
-static int rtl8723b_channel_to_group(int channel)
+/*
+ * Valid for rtl8723bu and rtl8192eu
+ */
+int rtl8xxxu_gen2_channel_to_group(int channel)
{
int group;
@@ -1686,7 +1122,7 @@ static int rtl8723b_channel_to_group(int channel)
return group;
}
-static void rtl8723au_config_channel(struct ieee80211_hw *hw)
+void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
u32 val32, rsr;
@@ -1808,7 +1244,7 @@ static void rtl8723au_config_channel(struct ieee80211_hw *hw)
}
}
-static void rtl8723bu_config_channel(struct ieee80211_hw *hw)
+void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
u32 val32, rsr;
@@ -1938,22 +1374,34 @@ static void rtl8723bu_config_channel(struct ieee80211_hw *hw)
}
}
-static void
-rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+void
+rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
{
+ struct rtl8xxxu_power_base *power_base = priv->power_base;
u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
u8 val8;
int group, i;
- group = rtl8723a_channel_to_group(channel);
+ group = rtl8xxxu_gen1_channel_to_group(channel);
- cck[0] = priv->cck_tx_power_index_A[group];
- cck[1] = priv->cck_tx_power_index_B[group];
+ cck[0] = priv->cck_tx_power_index_A[group] - 1;
+ cck[1] = priv->cck_tx_power_index_B[group] - 1;
+
+ if (priv->hi_pa) {
+ if (cck[0] > 0x20)
+ cck[0] = 0x20;
+ if (cck[1] > 0x20)
+ cck[1] = 0x20;
+ }
ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+ if (ofdm[0])
+ ofdm[0] -= 1;
+ if (ofdm[1])
+ ofdm[1] -= 1;
ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
@@ -2009,27 +1457,39 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
ofdmbase[0] << 16 | ofdmbase[0] << 24;
ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 |
ofdmbase[1] << 16 | ofdmbase[1] << 24;
- rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06,
+ ofdm_a + power_base->reg_0e00);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06,
+ ofdm_b + power_base->reg_0830);
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24,
+ ofdm_a + power_base->reg_0e04);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24,
+ ofdm_b + power_base->reg_0834);
mcs_a = mcsbase[0] | mcsbase[0] << 8 |
mcsbase[0] << 16 | mcsbase[0] << 24;
mcs_b = mcsbase[1] | mcsbase[1] << 8 |
mcsbase[1] << 16 | mcsbase[1] << 24;
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00,
+ mcs_a + power_base->reg_0e10);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00,
+ mcs_b + power_base->reg_083c);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04,
+ mcs_a + power_base->reg_0e14);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04,
+ mcs_b + power_base->reg_0848);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08,
+ mcs_a + power_base->reg_0e18);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08,
+ mcs_b + power_base->reg_084c);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
+ mcs_a + power_base->reg_0e1c);
for (i = 0; i < 3; i++) {
if (i != 2)
val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
@@ -2037,7 +1497,8 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
}
- rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
+ mcs_b + power_base->reg_0868);
for (i = 0; i < 3; i++) {
if (i != 2)
val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
@@ -2047,45 +1508,6 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
}
-static void
-rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
-{
- u32 val32, ofdm, mcs;
- u8 cck, ofdmbase, mcsbase;
- int group, tx_idx;
-
- tx_idx = 0;
- group = rtl8723b_channel_to_group(channel);
-
- cck = priv->cck_tx_power_index_B[group];
- val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
- val32 &= 0xffff00ff;
- val32 |= (cck << 8);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
- val32 &= 0xff;
- val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
- rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
-
- ofdmbase = priv->ht40_1s_tx_power_index_B[group];
- ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
- ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
-
- rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
-
- mcsbase = priv->ht40_1s_tx_power_index_B[group];
- if (ht40)
- mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
- else
- mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
- mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
-
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
-}
-
static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
enum nl80211_iftype linktype)
{
@@ -2191,11 +1613,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
if (val32 & SYS_CFG_BT_FUNC) {
if (priv->chip_cut >= 3) {
sprintf(priv->chip_name, "8723BU");
- priv->rtlchip = 0x8723b;
+ priv->rtl_chip = RTL8723B;
} else {
sprintf(priv->chip_name, "8723AU");
priv->usb_interrupts = 1;
- priv->rtlchip = 0x8723a;
+ priv->rtl_chip = RTL8723A;
}
priv->rf_paths = 1;
@@ -2213,19 +1635,20 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
} else if (val32 & SYS_CFG_TYPE_ID) {
bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
bonding &= HPON_FSM_BONDING_MASK;
- if (priv->chip_cut >= 3) {
+ if (priv->fops->tx_desc_size ==
+ sizeof(struct rtl8xxxu_txdesc40)) {
if (bonding == HPON_FSM_BONDING_1T2R) {
sprintf(priv->chip_name, "8191EU");
priv->rf_paths = 2;
priv->rx_paths = 2;
priv->tx_paths = 1;
- priv->rtlchip = 0x8191e;
+ priv->rtl_chip = RTL8191E;
} else {
sprintf(priv->chip_name, "8192EU");
priv->rf_paths = 2;
priv->rx_paths = 2;
priv->tx_paths = 2;
- priv->rtlchip = 0x8192e;
+ priv->rtl_chip = RTL8192E;
}
} else if (bonding == HPON_FSM_BONDING_1T2R) {
sprintf(priv->chip_name, "8191CU");
@@ -2233,14 +1656,14 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->rx_paths = 2;
priv->tx_paths = 1;
priv->usb_interrupts = 1;
- priv->rtlchip = 0x8191c;
+ priv->rtl_chip = RTL8191C;
} else {
sprintf(priv->chip_name, "8192CU");
priv->rf_paths = 2;
priv->rx_paths = 2;
priv->tx_paths = 2;
priv->usb_interrupts = 1;
- priv->rtlchip = 0x8192c;
+ priv->rtl_chip = RTL8192C;
}
priv->has_wifi = 1;
} else {
@@ -2248,15 +1671,15 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->rf_paths = 1;
priv->rx_paths = 1;
priv->tx_paths = 1;
- priv->rtlchip = 0x8188c;
+ priv->rtl_chip = RTL8188C;
priv->usb_interrupts = 1;
priv->has_wifi = 1;
}
- switch (priv->rtlchip) {
- case 0x8188e:
- case 0x8192e:
- case 0x8723b:
+ switch (priv->rtl_chip) {
+ case RTL8188E:
+ case RTL8192E:
+ case RTL8723B:
switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
case SYS_CFG_VENDOR_ID_TSMC:
sprintf(priv->chip_vendor, "TSMC");
@@ -2326,241 +1749,6 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
return 0;
}
-static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
-{
- struct rtl8723au_efuse *efuse = &priv->efuse_wifi.efuse8723;
-
- if (efuse->rtl_id != cpu_to_le16(0x8129))
- return -EINVAL;
-
- ether_addr_copy(priv->mac_addr, efuse->mac_addr);
-
- memcpy(priv->cck_tx_power_index_A,
- efuse->cck_tx_power_index_A,
- sizeof(efuse->cck_tx_power_index_A));
- memcpy(priv->cck_tx_power_index_B,
- efuse->cck_tx_power_index_B,
- sizeof(efuse->cck_tx_power_index_B));
-
- memcpy(priv->ht40_1s_tx_power_index_A,
- efuse->ht40_1s_tx_power_index_A,
- sizeof(efuse->ht40_1s_tx_power_index_A));
- memcpy(priv->ht40_1s_tx_power_index_B,
- efuse->ht40_1s_tx_power_index_B,
- sizeof(efuse->ht40_1s_tx_power_index_B));
-
- memcpy(priv->ht20_tx_power_index_diff,
- efuse->ht20_tx_power_index_diff,
- sizeof(efuse->ht20_tx_power_index_diff));
- memcpy(priv->ofdm_tx_power_index_diff,
- efuse->ofdm_tx_power_index_diff,
- sizeof(efuse->ofdm_tx_power_index_diff));
-
- memcpy(priv->ht40_max_power_offset,
- efuse->ht40_max_power_offset,
- sizeof(efuse->ht40_max_power_offset));
- memcpy(priv->ht20_max_power_offset,
- efuse->ht20_max_power_offset,
- sizeof(efuse->ht20_max_power_offset));
-
- if (priv->efuse_wifi.efuse8723.version >= 0x01) {
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
- }
- dev_info(&priv->udev->dev, "Vendor: %.7s\n",
- efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.41s\n",
- efuse->device_name);
- return 0;
-}
-
-static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
-{
- struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu;
- int i;
-
- if (efuse->rtl_id != cpu_to_le16(0x8129))
- return -EINVAL;
-
- ether_addr_copy(priv->mac_addr, efuse->mac_addr);
-
- memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
- sizeof(efuse->tx_power_index_A.cck_base));
- memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
- sizeof(efuse->tx_power_index_B.cck_base));
-
- memcpy(priv->ht40_1s_tx_power_index_A,
- efuse->tx_power_index_A.ht40_base,
- sizeof(efuse->tx_power_index_A.ht40_base));
- memcpy(priv->ht40_1s_tx_power_index_B,
- efuse->tx_power_index_B.ht40_base,
- sizeof(efuse->tx_power_index_B.ht40_base));
-
- priv->ofdm_tx_power_diff[0].a =
- efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
- priv->ofdm_tx_power_diff[0].b =
- efuse->tx_power_index_B.ht20_ofdm_1s_diff.a;
-
- priv->ht20_tx_power_diff[0].a =
- efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
- priv->ht20_tx_power_diff[0].b =
- efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
-
- priv->ht40_tx_power_diff[0].a = 0;
- priv->ht40_tx_power_diff[0].b = 0;
-
- for (i = 1; i < RTL8723B_TX_COUNT; i++) {
- priv->ofdm_tx_power_diff[i].a =
- efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
- priv->ofdm_tx_power_diff[i].b =
- efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
-
- priv->ht20_tx_power_diff[i].a =
- efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
- priv->ht20_tx_power_diff[i].b =
- efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
-
- priv->ht40_tx_power_diff[i].a =
- efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
- priv->ht40_tx_power_diff[i].b =
- efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
- }
-
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f;
-
- dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name);
-
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- int i;
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8723bu_efuse));
- for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
- }
-
- return 0;
-}
-
-#ifdef CONFIG_RTL8XXXU_UNTESTED
-
-static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
-{
- struct rtl8192cu_efuse *efuse = &priv->efuse_wifi.efuse8192;
- int i;
-
- if (efuse->rtl_id != cpu_to_le16(0x8129))
- return -EINVAL;
-
- ether_addr_copy(priv->mac_addr, efuse->mac_addr);
-
- memcpy(priv->cck_tx_power_index_A,
- efuse->cck_tx_power_index_A,
- sizeof(efuse->cck_tx_power_index_A));
- memcpy(priv->cck_tx_power_index_B,
- efuse->cck_tx_power_index_B,
- sizeof(efuse->cck_tx_power_index_B));
-
- memcpy(priv->ht40_1s_tx_power_index_A,
- efuse->ht40_1s_tx_power_index_A,
- sizeof(efuse->ht40_1s_tx_power_index_A));
- memcpy(priv->ht40_1s_tx_power_index_B,
- efuse->ht40_1s_tx_power_index_B,
- sizeof(efuse->ht40_1s_tx_power_index_B));
- memcpy(priv->ht40_2s_tx_power_index_diff,
- efuse->ht40_2s_tx_power_index_diff,
- sizeof(efuse->ht40_2s_tx_power_index_diff));
-
- memcpy(priv->ht20_tx_power_index_diff,
- efuse->ht20_tx_power_index_diff,
- sizeof(efuse->ht20_tx_power_index_diff));
- memcpy(priv->ofdm_tx_power_index_diff,
- efuse->ofdm_tx_power_index_diff,
- sizeof(efuse->ofdm_tx_power_index_diff));
-
- memcpy(priv->ht40_max_power_offset,
- efuse->ht40_max_power_offset,
- sizeof(efuse->ht40_max_power_offset));
- memcpy(priv->ht20_max_power_offset,
- efuse->ht20_max_power_offset,
- sizeof(efuse->ht20_max_power_offset));
-
- dev_info(&priv->udev->dev, "Vendor: %.7s\n",
- efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.20s\n",
- efuse->device_name);
-
- if (efuse->rf_regulatory & 0x20) {
- sprintf(priv->chip_name, "8188RU");
- priv->hi_pa = 1;
- }
-
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8192cu_efuse));
- for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
- }
- return 0;
-}
-
-#endif
-
-static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
-{
- struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
- int i;
-
- if (efuse->rtl_id != cpu_to_le16(0x8129))
- return -EINVAL;
-
- ether_addr_copy(priv->mac_addr, efuse->mac_addr);
-
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
-
- dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
- dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
-
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8192eu_efuse));
- for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
- }
- /*
- * Temporarily disable 8192eu support
- */
- return -EINVAL;
- return 0;
-}
-
static int
rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data)
{
@@ -2708,36 +1896,11 @@ exit:
return ret;
}
-static void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 sys_func;
-
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
- val8 &= ~BIT(0);
- rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
-
- sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- sys_func &= ~SYS_FUNC_CPU_ENABLE;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
-
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
- val8 |= BIT(0);
- rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
-
- sys_func |= SYS_FUNC_CPU_ENABLE;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
-}
-
-static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
{
u8 val8;
u16 sys_func;
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
- val8 &= ~BIT(1);
- rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
-
val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
val8 &= ~BIT(0);
rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
@@ -2746,10 +1909,6 @@ static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv)
sys_func &= ~SYS_FUNC_CPU_ENABLE;
rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
- val8 &= ~BIT(1);
- rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
-
val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
val8 |= BIT(0);
rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
@@ -2806,7 +1965,7 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
/*
* Init H2C command
*/
- if (priv->rtlchip == 0x8723b)
+ if (priv->rtl_chip == RTL8723B)
rtl8xxxu_write8(priv, REG_HMTFR, 0x0f);
exit:
return ret;
@@ -2893,7 +2052,7 @@ fw_abort:
return ret;
}
-static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
+int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
{
struct device *dev = &priv->udev->dev;
const struct firmware *fw;
@@ -2942,78 +2101,7 @@ exit:
return ret;
}
-static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
-{
- char *fw_name;
- int ret;
-
- switch (priv->chip_cut) {
- case 0:
- fw_name = "/*(DEBLOBBED)*/";
- break;
- case 1:
- if (priv->enable_bluetooth)
- fw_name = "/*(DEBLOBBED)*/";
- else
- fw_name = "/*(DEBLOBBED)*/";
-
- break;
- default:
- return -EINVAL;
- }
-
- ret = rtl8xxxu_load_firmware(priv, fw_name);
- return ret;
-}
-
-static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv)
-{
- char *fw_name;
- int ret;
-
- if (priv->enable_bluetooth)
- fw_name = "/*(DEBLOBBED)*/";
- else
- fw_name = "/*(DEBLOBBED)*/";
-
- ret = rtl8xxxu_load_firmware(priv, fw_name);
- return ret;
-}
-
-#ifdef CONFIG_RTL8XXXU_UNTESTED
-
-static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
-{
- char *fw_name;
- int ret;
-
- if (!priv->vendor_umc)
- fw_name = "/*(DEBLOBBED)*/";
- else if (priv->chip_cut || priv->rtlchip == 0x8192c)
- fw_name = "/*(DEBLOBBED)*/";
- else
- fw_name = "/*(DEBLOBBED)*/";
-
- ret = rtl8xxxu_load_firmware(priv, fw_name);
-
- return ret;
-}
-
-#endif
-
-static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv)
-{
- char *fw_name;
- int ret;
-
- fw_name = "/*(DEBLOBBED)*/";
-
- ret = rtl8xxxu_load_firmware(priv, fw_name);
-
- return ret;
-}
-
-static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
{
u16 val16;
int i = 100;
@@ -3040,47 +2128,10 @@ static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
}
}
-static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
-{
- u32 val32;
-
- val32 = rtl8xxxu_read32(priv, 0x64);
- val32 &= ~(BIT(20) | BIT(24));
- rtl8xxxu_write32(priv, 0x64, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
- val32 &= ~BIT(4);
- rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
- val32 |= BIT(3);
- rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
- val32 |= BIT(24);
- rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
- val32 &= ~BIT(23);
- rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
- val32 |= (BIT(0) | BIT(1));
- rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_RFE_CTRL_ANTA_SRC);
- val32 &= 0xffffff00;
- val32 |= 0x77;
- rtl8xxxu_write32(priv, REG_RFE_CTRL_ANTA_SRC, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
- val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
- rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
-}
-
static int
-rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
+rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv)
{
+ struct rtl8xxxu_reg8val *array = priv->fops->mactable;
int i, ret;
u16 reg;
u8 val;
@@ -3095,19 +2146,20 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
ret = rtl8xxxu_write8(priv, reg, val);
if (ret != 1) {
dev_warn(&priv->udev->dev,
- "Failed to initialize MAC\n");
+ "Failed to initialize MAC "
+ "(reg: %04x, val %02x)\n", reg, val);
return -EAGAIN;
}
}
- if (priv->rtlchip != 0x8723b)
+ if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
return 0;
}
-static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_reg32val *array)
+int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_reg32val *array)
{
int i, ret;
u16 reg;
@@ -3132,50 +2184,30 @@ static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
return 0;
}
-/*
- * Most of this is black magic retrieved from the old rtl8723au driver
- */
-static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv)
{
u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
u16 val16;
u32 val32;
- /*
- * Todo: The vendor driver maintains a table of PHY register
- * addresses, which is initialized here. Do we need this?
- */
-
- if (priv->rtlchip == 0x8723b) {
- val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB |
- SYS_FUNC_DIO_RF;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
- } else {
- val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
- udelay(2);
- val8 |= AFE_PLL_320_ENABLE;
- rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
- udelay(2);
+ val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+ udelay(2);
+ val8 |= AFE_PLL_320_ENABLE;
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+ udelay(2);
- rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
- udelay(2);
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+ udelay(2);
- val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
- }
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
- if (priv->rtlchip != 0x8723b) {
- /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
- val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
- val32 &= ~AFE_XTAL_RF_GATE;
- if (priv->has_bluetooth)
- val32 &= ~AFE_XTAL_BT_GATE;
- rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
- }
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+ val32 &= ~AFE_XTAL_RF_GATE;
+ if (priv->has_bluetooth)
+ val32 &= ~AFE_XTAL_BT_GATE;
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
/* 6. 0x1f[7:0] = 0x07 */
val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
@@ -3185,21 +2217,36 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
else if (priv->tx_paths == 2)
rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
- else if (priv->rtlchip == 0x8723b) {
- /*
- * Why?
- */
- rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
- rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
- rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
- } else
+ else
rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
-
- if (priv->rtlchip == 0x8188c && priv->hi_pa &&
+ if (priv->rtl_chip == RTL8188R && priv->hi_pa &&
priv->vendor_umc && priv->chip_cut == 1)
rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
+ if (priv->hi_pa)
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
+ else
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
+
+ ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+ ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+ ldohci12 = 0x57;
+ lpldo = 1;
+ val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+ rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+}
+
+/*
+ * Most of this is black magic retrieved from the old rtl8723au driver
+ */
+static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+
+ priv->fops->init_phy_bb(priv);
+
if (priv->tx_paths == 1 && priv->rx_paths == 2) {
/*
* For 1T2R boards, patch the registers.
@@ -3217,8 +2264,10 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
- val32 &= 0xff000000;
- val32 |= 0x45000000;
+ val32 &= ~CCK0_AFE_RX_MASK;
+ val32 &= 0x00ffffff;
+ val32 |= 0x40000000;
+ val32 |= CCK0_AFE_RX_ANT_B;
rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
@@ -3258,13 +2307,6 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
}
- if (priv->rtlchip == 0x8723b)
- rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
- else if (priv->hi_pa)
- rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
- else
- rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
-
if (priv->has_xtalk) {
val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
@@ -3275,16 +2317,8 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
}
- if (priv->rtlchip != 0x8723bu) {
- ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
- ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
- ldohci12 = 0x57;
- lpldo = 1;
- val32 = (lpldo << 24) | (ldohci12 << 16) |
- (ldov12d << 8) | ldoa15;
-
- rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
- }
+ if (priv->rtl_chip == RTL8192E)
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x000f81fb);
return 0;
}
@@ -3337,9 +2371,9 @@ static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
return 0;
}
-static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_rfregval *table,
- enum rtl8xxxu_rfpath path)
+int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rfregval *table,
+ enum rtl8xxxu_rfpath path)
{
u32 val32;
u16 val16, rfsi_rfenv;
@@ -3423,7 +2457,7 @@ static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
return ret;
}
-static int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
{
int ret;
int i;
@@ -3454,7 +2488,7 @@ exit:
return ret;
}
-static int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
{
u32 val32;
int ret = 0;
@@ -3598,9 +2632,8 @@ static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv)
return ret;
}
-static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv,
- bool iqk_ok, int result[][8],
- int candidate, bool tx_only)
+void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv, bool iqk_ok,
+ int result[][8], int candidate, bool tx_only)
{
u32 oldval, x, tx0_a, reg;
int y, tx0_c;
@@ -3676,9 +2709,8 @@ static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv,
rtl8xxxu_write32(priv, REG_OFDM0_RX_IQ_EXT_ANTA, val32);
}
-static void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv,
- bool iqk_ok, int result[][8],
- int candidate, bool tx_only)
+void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv, bool iqk_ok,
+ int result[][8], int candidate, bool tx_only)
{
u32 oldval, x, tx1_a, reg;
int y, tx1_c;
@@ -3810,8 +2842,8 @@ static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
return false;
}
-static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv,
- int result[][8], int c1, int c2)
+bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2)
{
u32 i, j, diff, simubitmap, bound = 0;
int candidate[2] = {-1, -1}; /* for path A and path B */
@@ -3895,7 +2927,7 @@ static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv,
return false;
}
-static void
+void
rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
{
int i;
@@ -3906,8 +2938,8 @@ rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
backup[i] = rtl8xxxu_read32(priv, reg[i]);
}
-static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
- const u32 *reg, u32 *backup)
+void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
+ const u32 *reg, u32 *backup)
{
int i;
@@ -3917,8 +2949,8 @@ static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
rtl8xxxu_write32(priv, reg[i], backup[i]);
}
-static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
- u32 *backup, int count)
+void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count)
{
int i;
@@ -3926,8 +2958,8 @@ static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
backup[i] = rtl8xxxu_read32(priv, regs[i]);
}
-static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
- u32 *backup, int count)
+void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count)
{
int i;
@@ -3936,8 +2968,8 @@ static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
}
-static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
- bool path_a_on)
+void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
+ bool path_a_on)
{
u32 path_on;
int i;
@@ -3956,8 +2988,8 @@ static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
rtl8xxxu_write32(priv, regs[i], path_on);
}
-static void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
- const u32 *regs, u32 *backup)
+void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
+ const u32 *regs, u32 *backup)
{
int i = 0;
@@ -4062,369 +3094,6 @@ out:
return result;
}
-static int rtl8723bu_iqk_path_a(struct rtl8xxxu_priv *priv)
-{
- u32 reg_eac, reg_e94, reg_e9c, path_sel, val32;
- int result = 0;
-
- path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /*
- * Enable path A PA in TX IQK mode
- */
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0003f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xc7f87);
-
- /*
- * Tx IQK setting
- */
- rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
- rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
-
- /* path-A IQK setting */
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
-
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ea);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
-
- /* LO calibration setting */
- rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
-
- /*
- * Enter IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- val32 |= 0x80800000;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /*
- * The vendor driver indicates the USB module is always using
- * S0S1 path 1 for the 8723bu. This may be different for 8192eu
- */
- if (priv->rf_paths > 1)
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
- else
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
-
- /*
- * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
- * No trace of this in the 8192eu or 8188eu vendor drivers.
- */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
-
- /* One shot, path A LOK & IQK */
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
-
- mdelay(1);
-
- /* Restore Ant Path */
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
-#ifdef RTL8723BU_BT
- /* GNT_BT = 1 */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
-#endif
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* Check failed */
- reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
- reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
- reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
-
- val32 = (reg_e9c >> 16) & 0x3ff;
- if (val32 & 0x200)
- val32 = 0x400 - val32;
-
- if (!(reg_eac & BIT(28)) &&
- ((reg_e94 & 0x03ff0000) != 0x01420000) &&
- ((reg_e9c & 0x03ff0000) != 0x00420000) &&
- ((reg_e94 & 0x03ff0000) < 0x01100000) &&
- ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
- val32 < 0xf)
- result |= 0x01;
- else /* If TX not OK, ignore RX */
- goto out;
-
-out:
- return result;
-}
-
-static int rtl8723bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
-{
- u32 reg_ea4, reg_eac, reg_e94, reg_e9c, path_sel, val32;
- int result = 0;
-
- path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /*
- * Enable path A PA in TX IQK mode
- */
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
-
- /*
- * Tx IQK setting
- */
- rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
- rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
-
- /* path-A IQK setting */
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
-
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160ff0);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
-
- /* LO calibration setting */
- rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
-
- /*
- * Enter IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- val32 |= 0x80800000;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /*
- * The vendor driver indicates the USB module is always using
- * S0S1 path 1 for the 8723bu. This may be different for 8192eu
- */
- if (priv->rf_paths > 1)
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
- else
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
-
- /*
- * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
- * No trace of this in the 8192eu or 8188eu vendor drivers.
- */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
-
- /* One shot, path A LOK & IQK */
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
-
- mdelay(1);
-
- /* Restore Ant Path */
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
-#ifdef RTL8723BU_BT
- /* GNT_BT = 1 */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
-#endif
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* Check failed */
- reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
- reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
- reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
-
- val32 = (reg_e9c >> 16) & 0x3ff;
- if (val32 & 0x200)
- val32 = 0x400 - val32;
-
- if (!(reg_eac & BIT(28)) &&
- ((reg_e94 & 0x03ff0000) != 0x01420000) &&
- ((reg_e9c & 0x03ff0000) != 0x00420000) &&
- ((reg_e94 & 0x03ff0000) < 0x01100000) &&
- ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
- val32 < 0xf)
- result |= 0x01;
- else /* If TX not OK, ignore RX */
- goto out;
-
- val32 = 0x80007c00 | (reg_e94 &0x3ff0000) |
- ((reg_e9c & 0x3ff0000) >> 16);
- rtl8xxxu_write32(priv, REG_TX_IQK, val32);
-
- /*
- * Modify RX IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7d77);
-
- /*
- * PA, PAD setting
- */
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0xf80);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, 0x4021f);
-
- /*
- * RX IQK setting
- */
- rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
-
- /* path-A IQK setting */
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
-
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82110000);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816001f);
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
-
- /* LO calibration setting */
- rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1);
-
- /*
- * Enter IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- val32 |= 0x80800000;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- if (priv->rf_paths > 1)
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
- else
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
-
- /*
- * Disable BT
- */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
-
- /* One shot, path A LOK & IQK */
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
-
- mdelay(1);
-
- /* Restore Ant Path */
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
-#ifdef RTL8723BU_BT
- /* GNT_BT = 1 */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
-#endif
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* Check failed */
- reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
- reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
-
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x780);
-
- val32 = (reg_eac >> 16) & 0x3ff;
- if (val32 & 0x200)
- val32 = 0x400 - val32;
-
- if (!(reg_eac & BIT(27)) &&
- ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
- ((reg_eac & 0x03ff0000) != 0x00360000) &&
- ((reg_ea4 & 0x03ff0000) < 0x01100000) &&
- ((reg_ea4 & 0x03ff0000) > 0x00f00000) &&
- val32 < 0xf)
- result |= 0x02;
- else /* If TX not OK, ignore RX */
- goto out;
-out:
- return result;
-}
-
-#ifdef RTL8723BU_PATH_B
-static int rtl8723bu_iqk_path_b(struct rtl8xxxu_priv *priv)
-{
- u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, path_sel;
- int result = 0;
-
- path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
-
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* One shot, path B LOK & IQK */
- rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
- rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
-
- mdelay(1);
-
- /* Check failed */
- reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
- reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
- reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
- reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
- reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
-
- if (!(reg_eac & BIT(31)) &&
- ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
- ((reg_ebc & 0x03ff0000) != 0x00420000))
- result |= 0x01;
- else
- goto out;
-
- if (!(reg_eac & BIT(30)) &&
- (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
- (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
- result |= 0x02;
- else
- dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
- __func__);
-out:
- return result;
-}
-#endif
-
static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
int result[][8], int t)
{
@@ -4489,9 +3158,12 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
- val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
- rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+ if (!priv->no_pape) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+ val32 |= (FPGA0_RF_PAPE |
+ (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+ }
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
val32 &= ~BIT(10);
@@ -4627,249 +3299,18 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
}
}
-static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
- int result[][8], int t)
-{
- struct device *dev = &priv->udev->dev;
- u32 i, val32;
- int path_a_ok /*, path_b_ok */;
- int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
- REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
- REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
- REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
- REG_TX_OFDM_BBON, REG_TX_TO_RX,
- REG_TX_TO_TX, REG_RX_CCK,
- REG_RX_OFDM, REG_RX_WAIT_RIFS,
- REG_RX_TO_RX, REG_STANDBY,
- REG_SLEEP, REG_PMPD_ANAEN
- };
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
- REG_TXPAUSE, REG_BEACON_CTRL,
- REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
- };
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
- REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
- REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
- REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
- REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
- };
- u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
- u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
-
- /*
- * Note: IQ calibration must be performed after loading
- * PHY_REG.txt , and radio_a, radio_b.txt
- */
-
- if (t == 0) {
- /* Save ADDA parameters, turn Path A ADDA on */
- rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
- RTL8XXXU_ADDA_REGS);
- rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
- rtl8xxxu_save_regs(priv, iqk_bb_regs,
- priv->bb_backup, RTL8XXXU_BB_REGS);
- }
-
- rtl8xxxu_path_adda_on(priv, adda_regs, true);
-
- /* MAC settings */
- rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
-
- val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
- val32 |= 0x0f000000;
- rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
-
- rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
- rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
- rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
-
-#ifdef RTL8723BU_PATH_B
- /* Set RF mode to standby Path B */
- if (priv->tx_paths > 1)
- rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x10000);
-#endif
-
-#if 0
- /* Page B init */
- rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000);
-
- if (priv->tx_paths > 1)
- rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x0f600000);
-#endif
-
- /*
- * RX IQ calibration setting for 8723B D cut large current issue
- * when leaving IPS
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
-
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
-
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
- val32 |= 0x20;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
-
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd);
-
- for (i = 0; i < retry; i++) {
- path_a_ok = rtl8723bu_iqk_path_a(priv);
- if (path_a_ok == 0x01) {
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
-#if 0 /* Only needed in restore case, we may need this when going to suspend */
- priv->RFCalibrateInfo.TxLOK[RF_A] =
- rtl8xxxu_read_rfreg(priv, RF_A,
- RF6052_REG_TXM_IDAC);
-#endif
-
- val32 = rtl8xxxu_read32(priv,
- REG_TX_POWER_BEFORE_IQK_A);
- result[t][0] = (val32 >> 16) & 0x3ff;
- val32 = rtl8xxxu_read32(priv,
- REG_TX_POWER_AFTER_IQK_A);
- result[t][1] = (val32 >> 16) & 0x3ff;
-
- break;
- }
- }
-
- if (!path_a_ok)
- dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
-
- for (i = 0; i < retry; i++) {
- path_a_ok = rtl8723bu_rx_iqk_path_a(priv);
- if (path_a_ok == 0x03) {
- val32 = rtl8xxxu_read32(priv,
- REG_RX_POWER_BEFORE_IQK_A_2);
- result[t][2] = (val32 >> 16) & 0x3ff;
- val32 = rtl8xxxu_read32(priv,
- REG_RX_POWER_AFTER_IQK_A_2);
- result[t][3] = (val32 >> 16) & 0x3ff;
-
- break;
- }
- }
-
- if (!path_a_ok)
- dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
-
- if (priv->tx_paths > 1) {
-#if 1
- dev_warn(dev, "%s: Path B not supported\n", __func__);
-#else
-
- /*
- * Path A into standby
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
-
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- val32 |= 0x80800000;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* Turn Path B ADDA on */
- rtl8xxxu_path_adda_on(priv, adda_regs, false);
-
- for (i = 0; i < retry; i++) {
- path_b_ok = rtl8xxxu_iqk_path_b(priv);
- if (path_b_ok == 0x03) {
- val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
- result[t][4] = (val32 >> 16) & 0x3ff;
- val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
- result[t][5] = (val32 >> 16) & 0x3ff;
- break;
- }
- }
-
- if (!path_b_ok)
- dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
-
- for (i = 0; i < retry; i++) {
- path_b_ok = rtl8723bu_rx_iqk_path_b(priv);
- if (path_a_ok == 0x03) {
- val32 = rtl8xxxu_read32(priv,
- REG_RX_POWER_BEFORE_IQK_B_2);
- result[t][6] = (val32 >> 16) & 0x3ff;
- val32 = rtl8xxxu_read32(priv,
- REG_RX_POWER_AFTER_IQK_B_2);
- result[t][7] = (val32 >> 16) & 0x3ff;
- break;
- }
- }
-
- if (!path_b_ok)
- dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
-#endif
- }
-
- /* Back to BB mode, load original value */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- if (t) {
- /* Reload ADDA power saving parameters */
- rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
- RTL8XXXU_ADDA_REGS);
-
- /* Reload MAC parameters */
- rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
-
- /* Reload BB parameters */
- rtl8xxxu_restore_regs(priv, iqk_bb_regs,
- priv->bb_backup, RTL8XXXU_BB_REGS);
-
- /* Restore RX initial gain */
- val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
- val32 &= 0xffffff00;
- rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
- rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
-
- if (priv->tx_paths > 1) {
- val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
- val32 &= 0xffffff00;
- rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
- val32 | 0x50);
- rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
- val32 | xb_agc);
- }
-
- /* Load 0xe30 IQC default value */
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
- }
-}
-
-static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start)
+void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start)
{
struct h2c_cmd h2c;
- if (priv->fops->mbox_ext_width < 4)
- return;
-
memset(&h2c, 0, sizeof(struct h2c_cmd));
h2c.bt_wlan_calibration.cmd = H2C_8723B_BT_WLAN_CALIBRATION;
h2c.bt_wlan_calibration.data = start;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration));
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration));
}
-static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
int result[4][8]; /* last is final result */
@@ -4880,8 +3321,6 @@ static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
s32 reg_tmp = 0;
bool simu;
- rtl8xxxu_prepare_calibrate(priv, 1);
-
memset(result, 0, sizeof(result));
candidate = -1;
@@ -4967,137 +3406,8 @@ static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
candidate, (reg_ec4 == 0));
- rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
- priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
-
- rtl8xxxu_prepare_calibrate(priv, 0);
-}
-
-static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
-{
- struct device *dev = &priv->udev->dev;
- int result[4][8]; /* last is final result */
- int i, candidate;
- bool path_a_ok, path_b_ok;
- u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
- u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
- u32 val32, bt_control;
- s32 reg_tmp = 0;
- bool simu;
-
- rtl8xxxu_prepare_calibrate(priv, 1);
-
- memset(result, 0, sizeof(result));
- candidate = -1;
-
- path_a_ok = false;
- path_b_ok = false;
-
- bt_control = rtl8xxxu_read32(priv, REG_BT_CONTROL_8723BU);
-
- for (i = 0; i < 3; i++) {
- rtl8723bu_phy_iqcalibrate(priv, result, i);
-
- if (i == 1) {
- simu = rtl8723bu_simularity_compare(priv, result, 0, 1);
- if (simu) {
- candidate = 0;
- break;
- }
- }
-
- if (i == 2) {
- simu = rtl8723bu_simularity_compare(priv, result, 0, 2);
- if (simu) {
- candidate = 0;
- break;
- }
-
- simu = rtl8723bu_simularity_compare(priv, result, 1, 2);
- if (simu) {
- candidate = 1;
- } else {
- for (i = 0; i < 8; i++)
- reg_tmp += result[3][i];
-
- if (reg_tmp)
- candidate = 3;
- else
- candidate = -1;
- }
- }
- }
-
- for (i = 0; i < 4; i++) {
- reg_e94 = result[i][0];
- reg_e9c = result[i][1];
- reg_ea4 = result[i][2];
- reg_eac = result[i][3];
- reg_eb4 = result[i][4];
- reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
- }
-
- if (candidate >= 0) {
- reg_e94 = result[candidate][0];
- priv->rege94 = reg_e94;
- reg_e9c = result[candidate][1];
- priv->rege9c = reg_e9c;
- reg_ea4 = result[candidate][2];
- reg_eac = result[candidate][3];
- reg_eb4 = result[candidate][4];
- priv->regeb4 = reg_eb4;
- reg_ebc = result[candidate][5];
- priv->regebc = reg_ebc;
- reg_ec4 = result[candidate][6];
- reg_ecc = result[candidate][7];
- dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
- dev_dbg(dev,
- "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
- "ecc=%x\n ", __func__, reg_e94, reg_e9c,
- reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
- path_a_ok = true;
- path_b_ok = true;
- } else {
- reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
- reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
- }
-
- if (reg_e94 && candidate >= 0)
- rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
- candidate, (reg_ea4 == 0));
-
- if (priv->tx_paths > 1 && reg_eb4)
- rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
- candidate, (reg_ec4 == 0));
-
- rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
-
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control);
-
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x18000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xe6177);
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
- val32 |= 0x20;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd);
-
- if (priv->rf_paths > 1) {
- dev_dbg(dev, "%s: beware 2T not yet supported\n", __func__);
-#ifdef RTL8723BU_PATH_B
- if (RF_Path == 0x0) //S1
- ODM_SetIQCbyRFpath(pDM_Odm, 0);
- else //S0
- ODM_SetIQCbyRFpath(pDM_Odm, 1);
-#endif
- }
- rtl8xxxu_prepare_calibrate(priv, 0);
}
static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
@@ -5223,7 +3533,7 @@ static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density)
static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv)
{
u8 val8;
- int count, ret;
+ int count, ret = 0;
/* Start of rtl8723AU_card_enable_flow */
/* Act to Cardemu sequence*/
@@ -5268,69 +3578,11 @@ exit:
return ret;
}
-static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 val16;
- u32 val32;
- int count, ret;
-
- /* Turn off RF */
- rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
-
- /* Enable rising edge triggering interrupt */
- val16 = rtl8xxxu_read16(priv, REG_GPIO_INTM);
- val16 &= ~GPIO_INTM_EDGE_TRIG_IRQ;
- rtl8xxxu_write16(priv, REG_GPIO_INTM, val16);
-
- /* Release WLON reset 0x04[16]= 1*/
- val32 = rtl8xxxu_read32(priv, REG_GPIO_INTM);
- val32 |= APS_FSMCO_WLON_RESET;
- rtl8xxxu_write32(priv, REG_GPIO_INTM, val32);
-
- /* 0x0005[1] = 1 turn off MAC by HW state machine*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 |= BIT(1);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- if ((val8 & BIT(1)) == 0)
- break;
- udelay(10);
- }
-
- if (!count) {
- dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
- __func__);
- ret = -EBUSY;
- goto exit;
- }
-
- /* Enable BT control XTAL setting */
- val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
- val8 &= ~AFE_MISC_WL_XTAL_CTRL;
- rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
-
- /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
- val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
- val8 |= SYS_ISO_ANALOG_IPS;
- rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
-
- /* 0x0020[0] = 0 disable LDOA12 MACRO block*/
- val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
- val8 &= ~LDOA15_ENABLE;
- rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
-
-exit:
- return ret;
-}
-
-static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
+int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
{
u8 val8;
u8 val32;
- int count, ret;
+ int count, ret = 0;
rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
@@ -5382,7 +3634,7 @@ exit:
return ret;
}
-static void rtl8723a_disabled_to_emu(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -5402,294 +3654,6 @@ static void rtl8723a_disabled_to_emu(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
}
-static void rtl8192e_disabled_to_emu(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
-
- /* Clear suspend enable and power down enable*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~(BIT(3) | BIT(4));
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-}
-
-static int rtl8192e_emu_to_active(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u32 val32;
- int count, ret = 0;
-
- /* disable HWPDN 0x04[15]=0*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~BIT(7);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* disable SW LPS 0x04[10]= 0 */
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~BIT(2);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* disable WL suspend*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~(BIT(3) | BIT(4));
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* wait till 0x04[17] = 1 power ready*/
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if (val32 & BIT(17))
- break;
-
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* We should be able to optimize the following three entries into one */
-
- /* release WLON reset 0x04[16]= 1*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
- val8 |= BIT(0);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
-
- /* set, then poll until 0 */
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 |= APS_FSMCO_MAC_ENABLE;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
- ret = 0;
- break;
- }
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
-exit:
- return ret;
-}
-
-static int rtl8723a_emu_to_active(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u32 val32;
- int count, ret = 0;
-
- /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/
- val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
- val8 |= LDOA15_ENABLE;
- rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
-
- /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
- val8 = rtl8xxxu_read8(priv, 0x0067);
- val8 &= ~BIT(4);
- rtl8xxxu_write8(priv, 0x0067, val8);
-
- mdelay(1);
-
- /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
- val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
- val8 &= ~SYS_ISO_ANALOG_IPS;
- rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
-
- /* disable SW LPS 0x04[10]= 0 */
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~BIT(2);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* wait till 0x04[17] = 1 power ready*/
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if (val32 & BIT(17))
- break;
-
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* We should be able to optimize the following three entries into one */
-
- /* release WLON reset 0x04[16]= 1*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
- val8 |= BIT(0);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
-
- /* disable HWPDN 0x04[15]= 0*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~BIT(7);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* disable WL suspend*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~(BIT(3) | BIT(4));
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* set, then poll until 0 */
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 |= APS_FSMCO_MAC_ENABLE;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
- ret = 0;
- break;
- }
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */
- /*
- * Note: Vendor driver actually clears this bit, despite the
- * documentation claims it's being set!
- */
- val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
- val8 |= LEDCFG2_DPDT_SELECT;
- val8 &= ~LEDCFG2_DPDT_SELECT;
- rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
-
-exit:
- return ret;
-}
-
-static int rtl8723b_emu_to_active(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u32 val32;
- int count, ret = 0;
-
- /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface */
- val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
- val8 |= LDOA15_ENABLE;
- rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
-
- /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
- val8 = rtl8xxxu_read8(priv, 0x0067);
- val8 &= ~BIT(4);
- rtl8xxxu_write8(priv, 0x0067, val8);
-
- mdelay(1);
-
- /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
- val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
- val8 &= ~SYS_ISO_ANALOG_IPS;
- rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
-
- /* Disable SW LPS 0x04[10]= 0 */
- val32 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
- val32 &= ~APS_FSMCO_SW_LPS;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- /* Wait until 0x04[17] = 1 power ready */
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if (val32 & BIT(17))
- break;
-
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* We should be able to optimize the following three entries into one */
-
- /* Release WLON reset 0x04[16]= 1*/
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 |= APS_FSMCO_WLON_RESET;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- /* Disable HWPDN 0x04[15]= 0*/
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 &= ~APS_FSMCO_HW_POWERDOWN;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- /* Disable WL suspend*/
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- /* Set, then poll until 0 */
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 |= APS_FSMCO_MAC_ENABLE;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
- ret = 0;
- break;
- }
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* Enable WL control XTAL setting */
- val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
- val8 |= AFE_MISC_WL_XTAL_CTRL;
- rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
-
- /* Enable falling edge triggering interrupt */
- val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 1);
- val8 |= BIT(1);
- rtl8xxxu_write8(priv, REG_GPIO_INTM + 1, val8);
-
- /* Enable GPIO9 interrupt mode */
- val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2 + 1);
- val8 |= BIT(1);
- rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2 + 1, val8);
-
- /* Enable GPIO9 input mode */
- val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2);
- val8 &= ~BIT(1);
- rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2, val8);
-
- /* Enable HSISR GPIO[C:0] interrupt */
- val8 = rtl8xxxu_read8(priv, REG_HSIMR);
- val8 |= BIT(0);
- rtl8xxxu_write8(priv, REG_HSIMR, val8);
-
- /* Enable HSISR GPIO9 interrupt */
- val8 = rtl8xxxu_read8(priv, REG_HSIMR + 2);
- val8 |= BIT(1);
- rtl8xxxu_write8(priv, REG_HSIMR + 2, val8);
-
- val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL);
- val8 |= MULTI_WIFI_HW_ROF_EN;
- rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL, val8);
-
- /* For GPIO9 internal pull high setting BIT(14) */
- val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL + 1);
- val8 |= BIT(6);
- rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL + 1, val8);
-
-exit:
- return ret;
-}
-
static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -5715,7 +3679,7 @@ static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
return 0;
}
-static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv)
+int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
u32 val32;
@@ -5748,262 +3712,51 @@ static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv)
return retval;
}
-static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 val16;
- u32 val32;
- int ret;
-
- /*
- * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
- */
- rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
-
- rtl8723a_disabled_to_emu(priv);
-
- ret = rtl8723a_emu_to_active(priv);
- if (ret)
- goto exit;
-
- /*
- * 0x0004[19] = 1, reset 8051
- */
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
- val8 |= BIT(3);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
-
- /*
- * Enable MAC DMA/WMAC/SCHEDULE/SEC block
- * Set CR bit10 to enable 32k calibration.
- */
- val16 = rtl8xxxu_read16(priv, REG_CR);
- val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
- CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
- CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
- CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
- CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
- rtl8xxxu_write16(priv, REG_CR, val16);
-
- /* For EFuse PG */
- val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
- val32 &= ~(BIT(28) | BIT(29) | BIT(30));
- val32 |= (0x06 << 28);
- rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32);
-exit:
- return ret;
-}
-
-static int rtl8723bu_power_on(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv)
{
- u8 val8;
- u16 val16;
- u32 val32;
- int ret;
-
- rtl8723a_disabled_to_emu(priv);
-
- ret = rtl8723b_emu_to_active(priv);
- if (ret)
- goto exit;
-
+ /* Fix USB interface interference issue */
+ rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+ rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
/*
- * Enable MAC DMA/WMAC/SCHEDULE/SEC block
- * Set CR bit10 to enable 32k calibration.
+ * This sets TXDMA_OFFSET_DROP_DATA_EN (bit 9) as well as bits
+ * 8 and 5, for which I have found no documentation.
*/
- val16 = rtl8xxxu_read16(priv, REG_CR);
- val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
- CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
- CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
- CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
- CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
- rtl8xxxu_write16(priv, REG_CR, val16);
+ rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
/*
- * BT coexist power on settings. This is identical for 1 and 2
- * antenna parts.
+ * Solve too many protocol error on USB bus.
+ * Can't do this for 8188/8192 UMC A cut parts
*/
- rtl8xxxu_write8(priv, REG_PAD_CTRL1 + 3, 0x20);
-
- val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- val16 |= SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-
- rtl8xxxu_write8(priv, REG_BT_CONTROL_8723BU + 1, 0x18);
- rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
- /* Antenna inverse */
- rtl8xxxu_write8(priv, 0xfe08, 0x01);
-
- val16 = rtl8xxxu_read16(priv, REG_PWR_DATA);
- val16 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
- rtl8xxxu_write16(priv, REG_PWR_DATA, val16);
-
- val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
- val32 |= LEDCFG0_DPDT_SELECT;
- rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
-
- val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
- val8 &= ~PAD_CTRL1_SW_DPDT_SEL_DATA;
- rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
-exit:
- return ret;
-}
-
-#ifdef CONFIG_RTL8XXXU_UNTESTED
-
-static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 val16;
- u32 val32;
- int i;
-
- for (i = 100; i; i--) {
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
- if (val8 & APS_FSMCO_PFM_ALDN)
- break;
- }
-
- if (!i) {
- pr_info("%s: Poll failed\n", __func__);
- return -ENODEV;
- }
-
- /*
- * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
- */
- rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
- rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b);
- udelay(100);
-
- val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL);
- if (!(val8 & LDOV12D_ENABLE)) {
- pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8);
- val8 |= LDOV12D_ENABLE;
- rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8);
-
- udelay(100);
-
- val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
- val8 &= ~SYS_ISO_MD2PP;
- rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
- }
-
- /*
- * Auto enable WLAN
- */
- val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
- val16 |= APS_FSMCO_MAC_ENABLE;
- rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
-
- for (i = 1000; i; i--) {
- val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
- if (!(val16 & APS_FSMCO_MAC_ENABLE))
- break;
- }
- if (!i) {
- pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__);
- return -EBUSY;
- }
-
- /*
- * Enable radio, GPIO, LED
- */
- val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN |
- APS_FSMCO_PFM_ALDN;
- rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
-
- /*
- * Release RF digital isolation
- */
- val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
- val16 &= ~SYS_ISO_DIOR;
- rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
-
- val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
- val8 &= ~APSD_CTRL_OFF;
- rtl8xxxu_write8(priv, REG_APSD_CTRL, val8);
- for (i = 200; i; i--) {
- val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
- if (!(val8 & APSD_CTRL_OFF_STATUS))
- break;
- }
+ if (!(!priv->chip_cut && priv->vendor_umc)) {
+ rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+ rtl8xxxu_write8(priv, 0xfe41, 0x94);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
- if (!i) {
- pr_info("%s: APSD_CTRL poll failed\n", __func__);
- return -EBUSY;
- }
+ rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+ rtl8xxxu_write8(priv, 0xfe41, 0x19);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
- /*
- * Enable MAC DMA/WMAC/SCHEDULE/SEC block
- */
- val16 = rtl8xxxu_read16(priv, REG_CR);
- val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
- CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE |
- CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
- rtl8xxxu_write16(priv, REG_CR, val16);
+ rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+ rtl8xxxu_write8(priv, 0xfe41, 0x91);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
- /*
- * Workaround for 8188RU LNA power leakage problem.
- */
- if (priv->rtlchip == 0x8188c && priv->hi_pa) {
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
- val32 &= ~BIT(1);
- rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+ rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+ rtl8xxxu_write8(priv, 0xfe41, 0x81);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
}
- return 0;
}
-#endif
-
-static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv)
{
- u16 val16;
u32 val32;
- int ret;
-
- ret = 0;
-
- val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
- if (val32 & SYS_CFG_SPS_LDO_SEL) {
- rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0xc3);
- } else {
- /*
- * Raise 1.2V voltage
- */
- val32 = rtl8xxxu_read32(priv, REG_8192E_LDOV12_CTRL);
- val32 &= 0xff0fffff;
- val32 |= 0x00500000;
- rtl8xxxu_write32(priv, REG_8192E_LDOV12_CTRL, val32);
- rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83);
- }
-
- rtl8192e_disabled_to_emu(priv);
-
- ret = rtl8192e_emu_to_active(priv);
- if (ret)
- goto exit;
-
- rtl8xxxu_write16(priv, REG_CR, 0x0000);
-
- /*
- * Enable MAC DMA/WMAC/SCHEDULE/SEC block
- * Set CR bit10 to enable 32k calibration.
- */
- val16 = rtl8xxxu_read16(priv, REG_CR);
- val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
- CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
- CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
- CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
- CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
- rtl8xxxu_write16(priv, REG_CR, val16);
-exit:
- return ret;
+ val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
+ val32 |= TXDMA_OFFSET_DROP_DATA_EN;
+ rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
}
-static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
{
u8 val8;
u16 val16;
@@ -6012,7 +3765,7 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
/*
* Workaround for 8188RU LNA power leakage problem.
*/
- if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+ if (priv->rtl_chip == RTL8188R) {
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
val32 |= BIT(1);
rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
@@ -6053,40 +3806,6 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
}
-static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 val16;
-
- rtl8xxxu_flush_fifo(priv);
-
- /*
- * Disable TX report timer
- */
- val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
- val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
- rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
-
- rtl8xxxu_write16(priv, REG_CR, 0x0000);
-
- rtl8xxxu_active_to_lps(priv);
-
- /* Reset Firmware if running in RAM */
- if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
- rtl8xxxu_firmware_self_reset(priv);
-
- /* Reset MCU */
- val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- val16 &= ~SYS_FUNC_CPU_ENABLE;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-
- /* Reset MCU ready status */
- rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
-
- rtl8723bu_active_to_emu(priv);
- rtl8xxxu_emu_to_disabled(priv);
-}
-
#ifdef NEED_PS_TDMA
static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
@@ -6100,175 +3819,77 @@ static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
h2c.b_type_dma.data3 = arg3;
h2c.b_type_dma.data4 = arg4;
h2c.b_type_dma.data5 = arg5;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
}
#endif
-static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
{
- struct h2c_cmd h2c;
u32 val32;
- u8 val8;
-
- /*
- * No indication anywhere as to what 0x0790 does. The 2 antenna
- * vendor code preserves bits 6-7 here.
- */
- rtl8xxxu_write8(priv, 0x0790, 0x05);
- /*
- * 0x0778 seems to be related to enabling the number of antennas
- * In the vendor driver halbtc8723b2ant_InitHwConfig() sets it
- * to 0x03, while halbtc8723b1ant_InitHwConfig() sets it to 0x01
- */
- rtl8xxxu_write8(priv, 0x0778, 0x01);
-
- val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
- val8 |= BIT(5);
- rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
-
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780);
- rtl8723bu_write_btreg(priv, 0x3c, 0x15); /* BT TRx Mask on */
-
- /*
- * Set BT grant to low
- */
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.bt_grant.cmd = H2C_8723B_BT_GRANT;
- h2c.bt_grant.data = 0;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_grant));
-
- /*
- * WLAN action by PTA
- */
- rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
-
- /*
- * BT select S0/S1 controlled by WiFi
- */
- val8 = rtl8xxxu_read8(priv, 0x0067);
- val8 |= BIT(5);
- rtl8xxxu_write8(priv, 0x0067, val8);
-
- val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
- val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
- rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
-
- /*
- * Bits 6/7 are marked in/out ... but for what?
- */
- rtl8xxxu_write8(priv, 0x0974, 0xff);
-
- val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
- val32 |= (BIT(0) | BIT(1));
- rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
-
- rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
-
- val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
- val32 &= ~BIT(24);
- val32 |= BIT(23);
- rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
-
- /*
- * Fix external switch Main->S1, Aux->S0
- */
- val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
- val8 &= ~BIT(0);
- rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 &= ~(BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+}
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.ant_sel_rsv.cmd = H2C_8723B_ANT_SEL_RSV;
- h2c.ant_sel_rsv.ant_inverse = 1;
- h2c.ant_sel_rsv.int_switch_type = 0;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv));
+static void rtl8xxxu_old_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
- /*
- * 0x280, 0x00, 0x200, 0x80 - not clear
- */
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+ if (priv->ep_tx_normal_queue)
+ val8 = TX_PAGE_NUM_NORM_PQ;
+ else
+ val8 = 0;
- /*
- * Software control, antenna at WiFi side
- */
-#ifdef NEED_PS_TDMA
- rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00);
-#endif
+ rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
- rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
- rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
- rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
- rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ val32 = (TX_PAGE_NUM_PUBQ << RQPN_PUB_PQ_SHIFT) | RQPN_LOAD;
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.bt_info.cmd = H2C_8723B_BT_INFO;
- h2c.bt_info.data = BIT(0);
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info));
+ if (priv->ep_tx_high_queue)
+ val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
+ if (priv->ep_tx_low_queue)
+ val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT;
- h2c.ignore_wlan.data = 0;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan));
+ rtl8xxxu_write32(priv, REG_RQPN, val32);
}
-static void rtl8723b_disable_rf(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
{
+ struct rtl8xxxu_fileops *fops = priv->fops;
+ u32 hq, lq, nq, eq, pubq;
u32 val32;
- rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
-
- val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
- val32 &= ~(BIT(22) | BIT(23));
- rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
-}
-
-static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv)
-{
- u32 agg_rx;
- u8 agg_ctrl;
+ hq = 0;
+ lq = 0;
+ nq = 0;
+ eq = 0;
+ pubq = 0;
- /*
- * For now simply disable RX aggregation
- */
- agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
- agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+ if (priv->ep_tx_high_queue)
+ hq = fops->page_num_hi;
+ if (priv->ep_tx_low_queue)
+ lq = fops->page_num_lo;
+ if (priv->ep_tx_normal_queue)
+ nq = fops->page_num_norm;
- agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH);
- agg_rx &= ~RXDMA_USB_AGG_ENABLE;
- agg_rx &= ~0xff0f;
+ val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT);
+ rtl8xxxu_write32(priv, REG_RQPN_NPQ, val32);
- rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
- rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx);
-}
+ pubq = fops->total_page_num - hq - lq - nq;
-static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv)
-{
- u32 val32;
+ val32 = RQPN_LOAD;
+ val32 |= (hq << RQPN_HI_PQ_SHIFT);
+ val32 |= (lq << RQPN_LO_PQ_SHIFT);
+ val32 |= (pubq << RQPN_PUB_PQ_SHIFT);
- /* Time duration for NHM unit: 4us, 0x2710=40ms */
- rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0x2710);
- rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff);
- rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff52);
- rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff);
- /* TH8 */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 |= 0xff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
- /* Enable CCK */
- val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B);
- val32 |= BIT(8) | BIT(9) | BIT(10);
- rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32);
- /* Max power amongst all RX antennas */
- val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC);
- val32 |= BIT(7);
- rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
+ rtl8xxxu_write32(priv, REG_RQPN, val32);
}
static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
- struct rtl8xxxu_rfregval *rftable;
bool macpower;
int ret;
u8 val8;
@@ -6293,33 +3914,22 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
goto exit;
}
- dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
if (!macpower) {
- ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
- if (ret) {
- dev_warn(dev, "%s: LLT table init failed\n", __func__);
- goto exit;
- }
+ if (priv->fops->total_page_num)
+ rtl8xxxu_init_queue_reserved_page(priv);
+ else
+ rtl8xxxu_old_init_queue_reserved_page(priv);
+ }
- /*
- * Presumably this is for 8188EU as well
- * Enable TX report and TX report timer
- */
- if (priv->rtlchip == 0x8723bu) {
- val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
- val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
- rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
- /* Set MAX RPT MACID */
- rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02);
- /* TX report Timer. Unit: 32us */
- rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0);
+ ret = rtl8xxxu_init_queue_priority(priv);
+ dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
+ if (ret)
+ goto exit;
- /* tmp ps ? */
- val8 = rtl8xxxu_read8(priv, 0xa3);
- val8 &= 0xf8;
- rtl8xxxu_write8(priv, 0xa3, val8);
- }
- }
+ /*
+ * Set RX page boundary
+ */
+ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
ret = rtl8xxxu_download_firmware(priv);
dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
@@ -6330,41 +3940,10 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- /* Solve too many protocol error on USB bus */
- /* Can't do this for 8188/8192 UMC A cut parts */
- if (priv->rtlchip == 0x8723a ||
- ((priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c ||
- priv->rtlchip == 0x8188c) &&
- (priv->chip_cut || !priv->vendor_umc))) {
- rtl8xxxu_write8(priv, 0xfe40, 0xe6);
- rtl8xxxu_write8(priv, 0xfe41, 0x94);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe0);
- rtl8xxxu_write8(priv, 0xfe41, 0x19);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe5);
- rtl8xxxu_write8(priv, 0xfe41, 0x91);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe2);
- rtl8xxxu_write8(priv, 0xfe41, 0x81);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
- }
-
- if (priv->rtlchip == 0x8192e) {
- rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
- rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
- }
-
if (priv->fops->phy_init_antenna_selection)
priv->fops->phy_init_antenna_selection(priv);
- if (priv->rtlchip == 0x8723b)
- ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table);
- else
- ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+ ret = rtl8xxxu_init_mac(priv);
dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
if (ret)
@@ -6375,92 +3954,37 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- switch(priv->rtlchip) {
- case 0x8723a:
- rftable = rtl8723au_radioa_1t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- break;
- case 0x8723b:
- rftable = rtl8723bu_radioa_1t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- /*
- * PHY LCK
- */
- rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
- msleep(200);
- rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
- break;
- case 0x8188c:
- if (priv->hi_pa)
- rftable = rtl8188ru_radioa_1t_highpa_table;
- else
- rftable = rtl8192cu_radioa_1t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- break;
- case 0x8191c:
- rftable = rtl8192cu_radioa_1t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- break;
- case 0x8192c:
- rftable = rtl8192cu_radioa_2t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- if (ret)
- break;
- rftable = rtl8192cu_radiob_2t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
- break;
- default:
- ret = -EINVAL;
- }
-
+ ret = priv->fops->init_phy_rf(priv);
if (ret)
goto exit;
- /*
- * Chip specific quirks
- */
- if (priv->rtlchip == 0x8723a) {
- /* Fix USB interface interference issue */
- rtl8xxxu_write8(priv, 0xfe40, 0xe0);
- rtl8xxxu_write8(priv, 0xfe41, 0x8d);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
- rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+ /* RFSW Control - clear bit 14 ?? */
+ if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
+ rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
- /* Reduce 80M spur */
- rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
- } else {
- val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
- val32 |= TXDMA_OFFSET_DROP_DATA_EN;
- rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
+ val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+ FPGA0_RF_ANTSWB |
+ ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB) << FPGA0_RF_BD_CTRL_SHIFT);
+ if (!priv->no_pape) {
+ val32 |= (FPGA0_RF_PAPE |
+ (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
}
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
- if (!macpower) {
- if (priv->ep_tx_normal_queue)
- val8 = TX_PAGE_NUM_NORM_PQ;
- else
- val8 = 0;
-
- rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
-
- val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD;
-
- if (priv->ep_tx_high_queue)
- val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
- if (priv->ep_tx_low_queue)
- val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
-
- rtl8xxxu_write32(priv, REG_RQPN, val32);
+ /* 0x860[6:5]= 00 - why? - this sets antenna B */
+ if (priv->rtl_chip != RTL8192E)
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210);
+ if (!macpower) {
/*
* Set TX buffer boundary
*/
- val8 = TX_TOTAL_PAGE_NUM + 1;
+ if (priv->rtl_chip == RTL8192E)
+ val8 = TX_TOTAL_PAGE_NUM_8192E + 1;
+ else
+ val8 = TX_TOTAL_PAGE_NUM + 1;
- if (priv->rtlchip == 0x8723b)
+ if (priv->rtl_chip == RTL8723B)
val8 -= 1;
rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
@@ -6470,54 +3994,63 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
}
- ret = rtl8xxxu_init_queue_priority(priv);
- dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
- if (ret)
- goto exit;
+ /*
+ * The vendor drivers set PBP for all devices, except 8192e.
+ * There is no explanation for this in any of the sources.
+ */
+ val8 = (priv->fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
+ (priv->fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
+ if (priv->rtl_chip != RTL8192E)
+ rtl8xxxu_write8(priv, REG_PBP, val8);
- /* RFSW Control - clear bit 14 ?? */
- if (priv->rtlchip != 0x8723b)
- rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
- /* 0x07000760 */
- val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
- FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
- ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
- FPGA0_RF_BD_CTRL_SHIFT);
- rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
- /* 0x860[6:5]= 00 - why? - this sets antenna B */
- rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
+ dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+ if (!macpower) {
+ ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
+ if (ret) {
+ dev_warn(dev, "%s: LLT table init failed\n", __func__);
+ goto exit;
+ }
- priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
- RF6052_REG_MODE_AG);
+ /*
+ * Chip specific quirks
+ */
+ priv->fops->usb_quirks(priv);
- /*
- * Set RX page boundary
- */
- if (priv->rtlchip == 0x8723b)
- rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f);
- else
- rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
- /*
- * Transfer page size is always 128
- */
- if (priv->rtlchip == 0x8723b)
- val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) |
- (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT);
- else
- val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
- (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
- rtl8xxxu_write8(priv, REG_PBP, val8);
+ /*
+ * Presumably this is for 8188EU as well
+ * Enable TX report and TX report timer
+ */
+ if (priv->rtl_chip == RTL8723B) {
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+ /* Set MAX RPT MACID */
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02);
+ /* TX report Timer. Unit: 32us */
+ rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0);
+
+ /* tmp ps ? */
+ val8 = rtl8xxxu_read8(priv, 0xa3);
+ val8 &= 0xf8;
+ rtl8xxxu_write8(priv, 0xa3, val8);
+ }
+ }
/*
* Unit in 8 bytes, not obvious what it is used for
*/
rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4);
- /*
- * Enable all interrupts - not obvious USB needs to do this
- */
- rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
- rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+ if (priv->rtl_chip == RTL8192E) {
+ rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
+ rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
+ } else {
+ /*
+ * Enable all interrupts - not obvious USB needs to do this
+ */
+ rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
+ rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+ }
rtl8xxxu_set_mac(priv);
rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
@@ -6592,7 +4125,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Initialize burst parameters
*/
- if (priv->rtlchip == 0x8723b) {
+ if (priv->rtl_chip == RTL8723B) {
/*
* For USB high speed set 512B packets
*/
@@ -6643,9 +4176,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
priv->fops->set_tx_power(priv, 1, false);
/* Let the 8051 take control of antenna setting */
- val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
- val8 |= LEDCFG2_DPDT_SELECT;
- rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+ if (priv->rtl_chip != RTL8192E) {
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 |= LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+ }
rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff);
@@ -6657,6 +4192,20 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (priv->fops->init_statistics)
priv->fops->init_statistics(priv);
+ if (priv->rtl_chip == RTL8192E) {
+ /*
+ * 0x4c6[3] 1: RTS BW = Data BW
+ * 0: RTS BW depends on CCA / secondary CCA result.
+ */
+ val8 = rtl8xxxu_read8(priv, REG_QUEUE_CTRL);
+ val8 &= ~BIT(3);
+ rtl8xxxu_write8(priv, REG_QUEUE_CTRL, val8);
+ /*
+ * Reset USB mode switch setting
+ */
+ rtl8xxxu_write8(priv, REG_ACLK_MON, 0x00);
+ }
+
rtl8723a_phy_lc_calibrate(priv);
priv->fops->phy_iq_calibrate(priv);
@@ -6664,7 +4213,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* This should enable thermal meter
*/
- if (priv->fops->has_s0s1)
+ if (priv->fops->tx_desc_size == sizeof(struct rtl8xxxu_txdesc40))
rtl8xxxu_write_rfreg(priv,
RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
else
@@ -6674,7 +4223,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
- if (priv->rtlchip == 0x8723a) {
+ if (priv->rtl_chip == RTL8723A) {
/*
* 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
* but we need to find root cause.
@@ -6685,6 +4234,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
val32 |= FPGA_RF_MODE_CCK;
rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
}
+ } else if (priv->rtl_chip == RTL8192E) {
+ rtl8xxxu_write8(priv, REG_USB_HRPWM, 0x00);
}
val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
@@ -6692,17 +4243,20 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* ack for xmit mgmt frames. */
rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32);
+ if (priv->rtl_chip == RTL8192E) {
+ /*
+ * Fix LDPC rx hang issue.
+ */
+ val32 = rtl8xxxu_read32(priv, REG_AFE_MISC);
+ rtl8xxxu_write8(priv, REG_8192E_LDOV12_CTRL, 0x75);
+ val32 &= 0xfff00fff;
+ val32 |= 0x0007e000;
+ rtl8xxxu_write32(priv, REG_AFE_MISC, val32);
+ }
exit:
return ret;
}
-static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
-{
- struct rtl8xxxu_priv *priv = hw->priv;
-
- priv->fops->power_off(priv);
-}
-
static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
struct ieee80211_key_conf *key, const u8 *mac)
{
@@ -6767,8 +4321,7 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
}
-static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi)
+void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi)
{
struct h2c_cmd h2c;
@@ -6784,11 +4337,11 @@ static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv,
dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
__func__, ramask, h2c.ramask.arg, sizeof(h2c.ramask));
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask));
+ rtl8xxxu_gen1_h2c_cmd(priv, &h2c, sizeof(h2c.ramask));
}
-static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi)
+void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi)
{
struct h2c_cmd h2c;
u8 bw = 0;
@@ -6810,11 +4363,11 @@ static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv,
dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
__func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg));
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
}
-static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect)
+void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect)
{
struct h2c_cmd h2c;
@@ -6827,11 +4380,11 @@ static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv,
else
h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss));
+ rtl8xxxu_gen1_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss));
}
-static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect)
+void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect)
{
struct h2c_cmd h2c;
@@ -6843,7 +4396,7 @@ static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv,
else
h2c.media_status_rpt.parm &= ~BIT(0);
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
}
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
@@ -6913,7 +4466,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
- rtl8723a_stop_tx_beacon(priv);
+ rtl8xxxu_stop_tx_beacon(priv);
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
@@ -7006,7 +4559,7 @@ static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
* format. The descriptor checksum is still only calculated over the
* initial 32 bytes of the descriptor!
*/
-static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_txdesc32 *tx_desc)
{
__le16 *ptr = (__le16 *)tx_desc;
u16 csum = 0;
@@ -7018,7 +4571,7 @@ static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
*/
tx_desc->csum = cpu_to_le16(0);
- for (i = 0; i < (sizeof(struct rtl8723au_tx_desc) / sizeof(u16)); i++)
+ for (i = 0; i < (sizeof(struct rtl8xxxu_txdesc32) / sizeof(u16)); i++)
csum = csum ^ le16_to_cpu(ptr[i]);
tx_desc->csum |= cpu_to_le16(csum);
@@ -7156,8 +4709,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
- struct rtl8723au_tx_desc *tx_desc;
- struct rtl8723bu_tx_desc *tx_desc40;
+ struct rtl8xxxu_txdesc32 *tx_desc;
+ struct rtl8xxxu_txdesc40 *tx_desc40;
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
@@ -7202,7 +4755,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (control && control->sta)
sta = control->sta;
- tx_desc = (struct rtl8723au_tx_desc *)skb_push(skb, tx_desc_size);
+ tx_desc = (struct rtl8xxxu_txdesc32 *)skb_push(skb, tx_desc_size);
memset(tx_desc, 0, tx_desc_size);
tx_desc->pkt_size = cpu_to_le16(pktlen);
@@ -7259,37 +4812,35 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
tx_desc->txdw3 =
- cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A);
+ cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
if (ampdu_enable)
- tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A);
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
else
- tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A);
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
if (ieee80211_is_mgmt(hdr->frame_control)) {
tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
tx_desc->txdw4 |=
- cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A);
+ cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
tx_desc->txdw5 |=
- cpu_to_le32(6 <<
- TXDESC_RETRY_LIMIT_SHIFT_8723A);
+ cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
tx_desc->txdw5 |=
- cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A);
+ cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
}
if (ieee80211_is_data_qos(hdr->frame_control))
- tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS_8723A);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
(sta && vif && vif->bss_conf.use_short_preamble))
- tx_desc->txdw4 |=
- cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723A);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
(ieee80211_is_data_qos(hdr->frame_control) &&
sta && sta->ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
- tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
}
if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -7299,46 +4850,43 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
*/
tx_desc->txdw4 |=
cpu_to_le32(DESC_RATE_24M <<
- TXDESC_RTS_RATE_SHIFT_8723A);
+ TXDESC32_RTS_RATE_SHIFT);
tx_desc->txdw4 |=
- cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A);
- tx_desc->txdw4 |=
- cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A);
+ cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
}
} else {
- tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc;
+ tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc;
tx_desc40->txdw4 = cpu_to_le32(rate);
if (ieee80211_is_data(hdr->frame_control)) {
tx_desc->txdw4 |=
cpu_to_le32(0x1f <<
- TXDESC_DATA_RATE_FB_SHIFT_8723B);
+ TXDESC40_DATA_RATE_FB_SHIFT);
}
tx_desc40->txdw9 =
- cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B);
+ cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
if (ampdu_enable)
- tx_desc40->txdw2 |=
- cpu_to_le32(TXDESC_AGG_ENABLE_8723B);
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
else
- tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B);
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
if (ieee80211_is_mgmt(hdr->frame_control)) {
tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
tx_desc40->txdw3 |=
- cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723B);
+ cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
tx_desc40->txdw4 |=
- cpu_to_le32(6 <<
- TXDESC_RETRY_LIMIT_SHIFT_8723B);
+ cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
tx_desc40->txdw4 |=
- cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723B);
+ cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
}
if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
(sta && vif && vif->bss_conf.use_short_preamble))
tx_desc40->txdw5 |=
- cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723B);
+ cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
/*
@@ -7347,11 +4895,9 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
*/
tx_desc->txdw4 |=
cpu_to_le32(DESC_RATE_24M <<
- TXDESC_RTS_RATE_SHIFT_8723B);
- tx_desc->txdw3 |=
- cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723B);
- tx_desc->txdw3 |=
- cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B);
+ TXDESC40_RTS_RATE_SHIFT);
+ tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
+ tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
}
}
@@ -7491,15 +5037,21 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
}
}
-static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
{
- struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+ struct rtl8xxxu_rxdesc16 *rx_desc =
+ (struct rtl8xxxu_rxdesc16 *)skb->data;
struct rtl8723au_phy_stats *phy_stats;
+ __le32 *_rx_desc_le = (__le32 *)skb->data;
+ u32 *_rx_desc = (u32 *)skb->data;
int drvinfo_sz, desc_shift;
+ int i;
+
+ for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++)
+ _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
- skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+ skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
@@ -7531,16 +5083,21 @@ static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv,
return RX_TYPE_DATA_PKT;
}
-static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
{
- struct rtl8723bu_rx_desc *rx_desc =
- (struct rtl8723bu_rx_desc *)skb->data;
+ struct rtl8xxxu_rxdesc24 *rx_desc =
+ (struct rtl8xxxu_rxdesc24 *)skb->data;
struct rtl8723au_phy_stats *phy_stats;
+ __le32 *_rx_desc_le = (__le32 *)skb->data;
+ u32 *_rx_desc = (u32 *)skb->data;
int drvinfo_sz, desc_shift;
+ int i;
- skb_pull(skb, sizeof(struct rtl8723bu_rx_desc));
+ for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++)
+ _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+
+ skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24));
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
@@ -7632,12 +5189,7 @@ static void rtl8xxxu_rx_complete(struct urb *urb)
struct sk_buff *skb = (struct sk_buff *)urb->context;
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct device *dev = &priv->udev->dev;
- __le32 *_rx_desc_le = (__le32 *)skb->data;
- u32 *_rx_desc = (u32 *)skb->data;
- int rx_type, i;
-
- for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
- _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+ int rx_type;
skb_put(skb, urb->actual_length);
@@ -7676,14 +5228,15 @@ static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
{
struct sk_buff *skb;
int skb_size;
- int ret;
+ int ret, rx_desc_sz;
- skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE;
+ rx_desc_sz = priv->fops->rx_desc_size;
+ skb_size = rx_desc_sz + RTL_RX_BUFFER_SIZE;
skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
if (!skb)
return -ENOMEM;
- memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc));
+ memset(skb->data, 0, rx_desc_sz);
usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
skb_size, rtl8xxxu_rx_complete, skb);
usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor);
@@ -7749,7 +5302,7 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- rtl8723a_stop_tx_beacon(priv);
+ rtl8xxxu_stop_tx_beacon(priv);
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
@@ -8153,6 +5706,8 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
usb_kill_anchored_urbs(&priv->int_anchor);
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
priv->fops->disable_rf(priv);
/*
@@ -8285,6 +5840,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (id->idProduct == 0x7811)
untested = 0;
break;
+ case 0x050d:
+ if (id->idProduct == 0x1004)
+ untested = 0;
+ break;
default:
break;
}
@@ -8377,7 +5936,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
hw->wiphy->rts_threshold = 2347;
@@ -8413,13 +5972,14 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
hw = usb_get_intfdata(interface);
priv = hw->priv;
- rtl8xxxu_disable_device(hw);
+ ieee80211_unregister_hw(hw);
+
+ priv->fops->power_off(priv);
+
usb_set_intfdata(interface, NULL);
dev_info(&priv->udev->dev, "disconnecting\n");
- ieee80211_unregister_hw(hw);
-
kfree(priv->fw_data);
mutex_destroy(&priv->usb_buf_mutex);
mutex_destroy(&priv->h2c_mutex);
@@ -8428,115 +5988,6 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
ieee80211_free_hw(hw);
}
-static struct rtl8xxxu_fileops rtl8723au_fops = {
- .parse_efuse = rtl8723au_parse_efuse,
- .load_firmware = rtl8723au_load_firmware,
- .power_on = rtl8723au_power_on,
- .power_off = rtl8xxxu_power_off,
- .reset_8051 = rtl8xxxu_reset_8051,
- .llt_init = rtl8xxxu_init_llt_table,
- .phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
- .config_channel = rtl8723au_config_channel,
- .parse_rx_desc = rtl8723au_parse_rx_desc,
- .enable_rf = rtl8723a_enable_rf,
- .disable_rf = rtl8723a_disable_rf,
- .set_tx_power = rtl8723a_set_tx_power,
- .update_rate_mask = rtl8723au_update_rate_mask,
- .report_connect = rtl8723au_report_connect,
- .writeN_block_size = 1024,
- .mbox_ext_reg = REG_HMBOX_EXT_0,
- .mbox_ext_width = 2,
- .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
- .adda_1t_init = 0x0b1b25a0,
- .adda_1t_path_on = 0x0bdb25a0,
- .adda_2t_path_on_a = 0x04db25a4,
- .adda_2t_path_on_b = 0x0b1b25a4,
-};
-
-static struct rtl8xxxu_fileops rtl8723bu_fops = {
- .parse_efuse = rtl8723bu_parse_efuse,
- .load_firmware = rtl8723bu_load_firmware,
- .power_on = rtl8723bu_power_on,
- .power_off = rtl8723bu_power_off,
- .reset_8051 = rtl8723bu_reset_8051,
- .llt_init = rtl8xxxu_auto_llt_table,
- .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
- .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
- .config_channel = rtl8723bu_config_channel,
- .parse_rx_desc = rtl8723bu_parse_rx_desc,
- .init_aggregation = rtl8723bu_init_aggregation,
- .init_statistics = rtl8723bu_init_statistics,
- .enable_rf = rtl8723b_enable_rf,
- .disable_rf = rtl8723b_disable_rf,
- .set_tx_power = rtl8723b_set_tx_power,
- .update_rate_mask = rtl8723bu_update_rate_mask,
- .report_connect = rtl8723bu_report_connect,
- .writeN_block_size = 1024,
- .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
- .mbox_ext_width = 4,
- .tx_desc_size = sizeof(struct rtl8723bu_tx_desc),
- .has_s0s1 = 1,
- .adda_1t_init = 0x01c00014,
- .adda_1t_path_on = 0x01c00014,
- .adda_2t_path_on_a = 0x01c00014,
- .adda_2t_path_on_b = 0x01c00014,
-};
-
-#ifdef CONFIG_RTL8XXXU_UNTESTED
-
-static struct rtl8xxxu_fileops rtl8192cu_fops = {
- .parse_efuse = rtl8192cu_parse_efuse,
- .load_firmware = rtl8192cu_load_firmware,
- .power_on = rtl8192cu_power_on,
- .power_off = rtl8xxxu_power_off,
- .reset_8051 = rtl8xxxu_reset_8051,
- .llt_init = rtl8xxxu_init_llt_table,
- .phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
- .config_channel = rtl8723au_config_channel,
- .parse_rx_desc = rtl8723au_parse_rx_desc,
- .enable_rf = rtl8723a_enable_rf,
- .disable_rf = rtl8723a_disable_rf,
- .set_tx_power = rtl8723a_set_tx_power,
- .update_rate_mask = rtl8723au_update_rate_mask,
- .report_connect = rtl8723au_report_connect,
- .writeN_block_size = 128,
- .mbox_ext_reg = REG_HMBOX_EXT_0,
- .mbox_ext_width = 2,
- .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
- .adda_1t_init = 0x0b1b25a0,
- .adda_1t_path_on = 0x0bdb25a0,
- .adda_2t_path_on_a = 0x04db25a4,
- .adda_2t_path_on_b = 0x0b1b25a4,
-};
-
-#endif
-
-static struct rtl8xxxu_fileops rtl8192eu_fops = {
- .parse_efuse = rtl8192eu_parse_efuse,
- .load_firmware = rtl8192eu_load_firmware,
- .power_on = rtl8192eu_power_on,
- .power_off = rtl8xxxu_power_off,
- .reset_8051 = rtl8xxxu_reset_8051,
- .llt_init = rtl8xxxu_auto_llt_table,
- .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
- .config_channel = rtl8723bu_config_channel,
- .parse_rx_desc = rtl8723bu_parse_rx_desc,
- .enable_rf = rtl8723b_enable_rf,
- .disable_rf = rtl8723b_disable_rf,
- .set_tx_power = rtl8723b_set_tx_power,
- .update_rate_mask = rtl8723au_update_rate_mask,
- .report_connect = rtl8723au_report_connect,
- .writeN_block_size = 128,
- .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
- .mbox_ext_width = 4,
- .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
- .has_s0s1 = 1,
- .adda_1t_init = 0x0fc01616,
- .adda_1t_path_on = 0x0fc01616,
- .adda_2t_path_on_a = 0x0fc01616,
- .adda_2t_path_on_b = 0x0fc01616,
-};
-
static struct usb_device_id dev_table[] = {
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723au_fops},
@@ -8559,6 +6010,9 @@ static struct usb_device_id dev_table[] = {
/* Tested by Larry Finger */
{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Andrea Merello */
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
/* Currently untested 8188 series devices */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
@@ -8643,8 +6097,6 @@ static struct usb_device_id dev_table[] = {
/* Currently untested 8192 series devices */
{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff),
@@ -8700,6 +6152,7 @@ static struct usb_driver rtl8xxxu_driver = {
.probe = rtl8xxxu_probe,
.disconnect = rtl8xxxu_disconnect,
.id_table = dev_table,
+ .no_dynamic_id = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index e545e849f..b0e0c6423 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -109,6 +109,9 @@
#define AFE_XTAL_GATE_DIG BIT(17)
#define AFE_XTAL_BT_GATE BIT(20)
+/*
+ * 0x0028 is also known as REG_AFE_CTRL2 on 8723bu/8192eu
+ */
#define REG_AFE_PLL_CTRL 0x0028
#define AFE_PLL_ENABLE BIT(0)
#define AFE_PLL_320_ENABLE BIT(1)
@@ -192,6 +195,7 @@
control */
#define MULTI_GPS_FUNC_EN BIT(22) /* GPS function enable */
+#define REG_AFE_CTRL4 0x0078 /* 8192eu/8723bu */
#define REG_LDO_SW_CTRL 0x007c /* 8192eu */
#define REG_MCU_FW_DL 0x0080
@@ -383,7 +387,7 @@
#define REG_RQPN 0x0200
#define RQPN_HI_PQ_SHIFT 0
#define RQPN_LO_PQ_SHIFT 8
-#define RQPN_NORM_PQ_SHIFT 16
+#define RQPN_PUB_PQ_SHIFT 16
#define RQPN_LOAD BIT(31)
#define REG_FIFOPAGE 0x0204
@@ -417,13 +421,20 @@
/* spec version 11 */
/* 0x0400 ~ 0x047F Protocol Configuration */
-#define REG_VOQ_INFORMATION 0x0400
-#define REG_VIQ_INFORMATION 0x0404
-#define REG_BEQ_INFORMATION 0x0408
-#define REG_BKQ_INFORMATION 0x040c
-#define REG_MGQ_INFORMATION 0x0410
-#define REG_HGQ_INFORMATION 0x0414
-#define REG_BCNQ_INFORMATION 0x0418
+/* 8192c, 8192d */
+#define REG_VOQ_INFO 0x0400
+#define REG_VIQ_INFO 0x0404
+#define REG_BEQ_INFO 0x0408
+#define REG_BKQ_INFO 0x040c
+/* 8188e, 8723a, 8812a, 8821a, 8192e, 8723b */
+#define REG_Q0_INFO 0x400
+#define REG_Q1_INFO 0x404
+#define REG_Q2_INFO 0x408
+#define REG_Q3_INFO 0x40c
+
+#define REG_MGQ_INFO 0x0410
+#define REG_HGQ_INFO 0x0414
+#define REG_BCNQ_INFO 0x0418
#define REG_CPU_MGQ_INFORMATION 0x041c
#define REG_FWHW_TXQ_CTRL 0x0420
@@ -494,6 +505,9 @@
#define REG_DATA_SUBCHANNEL 0x0483
/* 8723au */
#define REG_INIDATA_RATE_SEL 0x0484
+/* MACID_SLEEP_1/3 for 8723b, 8192e, 8812a, 8821a */
+#define REG_MACID_SLEEP_3_8732B 0x0484
+#define REG_MACID_SLEEP_1_8732B 0x0488
#define REG_POWER_STATUS 0x04a4
#define REG_POWER_STAGE1 0x04b4
@@ -502,12 +516,20 @@
#define REG_PKT_VO_VI_LIFE_TIME 0x04c0
#define REG_PKT_BE_BK_LIFE_TIME 0x04c2
#define REG_STBC_SETTING 0x04c4
+#define REG_QUEUE_CTRL 0x04c6
#define REG_HT_SINGLE_AMPDU_8723B 0x04c7
#define REG_PROT_MODE_CTRL 0x04c8
#define REG_MAX_AGGR_NUM 0x04ca
#define REG_RTS_MAX_AGGR_NUM 0x04cb
#define REG_BAR_MODE_CTRL 0x04cc
#define REG_RA_TRY_RATE_AGG_LMT 0x04cf
+/* MACID_DROP for 8723a */
+#define REG_MACID_DROP_8732A 0x04d0
+/* EARLY_MODE_CONTROL 8188e */
+#define REG_EARLY_MODE_CONTROL_8188E 0x04d0
+/* MACID_SLEEP_2 for 8723b, 8192e, 8812a, 8821a */
+#define REG_MACID_SLEEP_2_8732B 0x04d0
+#define REG_MACID_SLEEP 0x04d4
#define REG_NQOS_SEQ 0x04dc
#define REG_QOS_SEQ 0x04de
#define REG_NEED_CPU_HANDLE 0x04e0
@@ -860,6 +882,10 @@
#define CCK0_SIDEBAND BIT(4)
#define REG_CCK0_AFE_SETTING 0x0a04
+#define CCK0_AFE_RX_MASK 0x0f000000
+#define CCK0_AFE_RX_ANT_AB BIT(24)
+#define CCK0_AFE_RX_ANT_A 0
+#define CCK0_AFE_RX_ANT_B (BIT(24) | BIT(26))
#define REG_CONFIG_ANT_A 0x0b68
#define REG_CONFIG_ANT_B 0x0b6c
@@ -1026,6 +1052,7 @@
#define USB_HIMR_ROK BIT(0) /* Receive DMA OK Interrupt */
#define REG_USB_SPECIAL_OPTION 0xfe55
+#define REG_USB_HRPWM 0xfe58
#define REG_USB_DMA_AGG_TO 0xfe5b
#define REG_USB_AGG_TO 0xfe5c
#define REG_USB_AGG_TH 0xfe5d
@@ -1111,6 +1138,7 @@
#define RF6052_REG_T_METER_8723B 0x42
#define RF6052_REG_UNKNOWN_43 0x43
#define RF6052_REG_UNKNOWN_55 0x55
+#define RF6052_REG_UNKNOWN_56 0x56
#define RF6052_REG_S0S1 0xb0
#define RF6052_REG_UNKNOWN_DF 0xdf
#define RF6052_REG_UNKNOWN_ED 0xed
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 7a40d8dff..264466f59 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -131,7 +131,7 @@ static struct ieee80211_rate rtl_ratetable_5g[] = {
};
static const struct ieee80211_supported_band rtl_band_2ghz = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.channels = rtl_channeltable_2g,
.n_channels = ARRAY_SIZE(rtl_channeltable_2g),
@@ -143,7 +143,7 @@ static const struct ieee80211_supported_band rtl_band_2ghz = {
};
static struct ieee80211_supported_band rtl_band_5ghz = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = rtl_channeltable_5g,
.n_channels = ARRAY_SIZE(rtl_channeltable_5g),
@@ -197,7 +197,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- /*hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ /*hw->wiphy->bands[NL80211_BAND_2GHZ]
*base on ant_num
*rx_mask: RX mask
*if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -328,26 +328,26 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
rtlhal->bandset == BAND_ON_BOTH) {
/* 1: 2.4 G bands */
/* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+ sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
- /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
* to default value(1T1R) */
- memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
+ memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz,
sizeof(struct ieee80211_supported_band));
/* <3> init ht cap base on ant_num */
_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
/* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
/* 2: 5 G bands */
/* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+ sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
- /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+ /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
* to default value(1T1R) */
- memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz,
+ memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz,
sizeof(struct ieee80211_supported_band));
/* <3> init ht cap base on ant_num */
@@ -355,15 +355,15 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
_rtl_init_hw_vht_capab(hw, &sband->vht_cap);
/* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+ sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
- /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
* to default value(1T1R) */
- memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]),
+ memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]),
&rtl_band_2ghz,
sizeof(struct ieee80211_supported_band));
@@ -371,14 +371,14 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
/* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
} else if (rtlhal->current_bandtype == BAND_ON_5G) {
/* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+ sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
- /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+ /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
* to default value(1T1R) */
- memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]),
+ memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]),
&rtl_band_5ghz,
sizeof(struct ieee80211_supported_band));
@@ -387,7 +387,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
_rtl_init_hw_vht_capab(hw, &sband->vht_cap);
/* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n",
rtlhal->current_bandtype);
@@ -861,7 +861,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
/* mac80211's rate_idx is like this:
*
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ * 2.4G band:rx_status->band == NL80211_BAND_2GHZ
*
* B/G rate:
* (rx_status->flag & RX_FLAG_HT) = 0,
@@ -871,7 +871,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
* (rx_status->flag & RX_FLAG_HT) = 1,
* DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
*
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * 5G band:rx_status->band == NL80211_BAND_5GHZ
* A rate:
* (rx_status->flag & RX_FLAG_HT) = 0,
* DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
@@ -958,7 +958,7 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
return rate_idx;
}
if (false == isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+ if (NL80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
switch (desc_rate) {
case DESC_RATE1M:
rate_idx = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 451456835..a30af6cc2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -70,83 +70,83 @@ static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh,
if (level_num == 2) {
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = LOW\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state = LOW\n");
if (btrssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
btrssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to High\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Low\n");
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = HIGH\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state = HIGH\n");
if (btrssi < rssi_thresh) {
btrssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Low\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = LOW\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state = LOW\n");
if (btrssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
btrssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Medium\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
(coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_STAY_MEDIUM)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi pre state = MEDIUM\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi pre state = MEDIUM\n");
if (btrssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
btrssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to High\n");
} else if (btrssi < rssi_thresh) {
btrssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Low\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Medium\n");
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = HIGH\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state = HIGH\n");
if (btrssi < rssi_thresh1) {
btrssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Medium\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at High\n");
}
}
}
@@ -173,32 +173,28 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
if (wifirssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
wifirssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to High\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Low\n");
}
} else {
if (wifirssi < rssi_thresh) {
wifirssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Low\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -209,14 +205,12 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
if (wifirssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
wifirssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Medium\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -225,31 +219,26 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
if (wifirssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
wifirssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to High\n");
} else if (wifirssi < rssi_thresh) {
wifirssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Low\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Medium\n");
}
} else {
if (wifirssi < rssi_thresh1) {
wifirssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Medium\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at High\n");
}
}
}
@@ -284,26 +273,26 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
}
}
if (pre_bt_disabled != bt_disabled) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
}
}
@@ -499,12 +488,12 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -518,9 +507,9 @@ static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -592,8 +581,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
if (!bt_link_info->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "No BT link exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "No BT link exists!!!\n");
return algorithm;
}
@@ -608,27 +597,27 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (numdiffprofile == 1) {
if (bt_link_info->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "A2DP only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "PAN(HS) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "PAN(EDR) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR;
}
@@ -637,21 +626,21 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (numdiffprofile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + A2DP ==> SCO\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
}
@@ -660,38 +649,38 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
if (stack_info->num_of_hid >= 2) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID*2 + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID*2 + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
}
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -701,30 +690,30 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + HID + A2DP ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + HID + A2DP ==> HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + A2DP + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -734,13 +723,13 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -752,12 +741,12 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "ErrorSCO+HID+A2DP+PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "ErrorSCO+HID+A2DP+PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO+HID+A2DP+PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO+HID+A2DP+PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -778,10 +767,10 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swinglvl;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -793,9 +782,9 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
h2c_parameter[0] = dec_btpwr_lvl;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
- dec_btpwr_lvl, h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+ dec_btpwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -803,15 +792,15 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
bool force_exec, u8 dec_btpwr_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power level = %d\n",
- (force_exec ? "force to" : ""), dec_btpwr_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power level = %d\n",
+ (force_exec ? "force to" : ""), dec_btpwr_lvl);
coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
}
halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
@@ -828,10 +817,10 @@ static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
if (enable_autoreport)
h2c_parameter[0] |= BIT0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_autoreport ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_autoreport ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -840,17 +829,17 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_autoreport)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
- ((enable_autoreport) ? "Enabled" : "Disabled"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""),
+ ((enable_autoreport) ? "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_autoreport;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -864,16 +853,16 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
bool force_exec, u8 fw_dac_swinglvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swinglvl);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swinglvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -891,8 +880,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
{
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
@@ -900,8 +889,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
* After initialized, we can use coex_dm->btRf0x1eBackup
*/
if (btcoexist->initilized) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -912,17 +901,17 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""),
- ((rx_rf_shrink_on) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""),
+ ((rx_rf_shrink_on) ? "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -939,8 +928,8 @@ static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
{
u8 val = (u8)level;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -958,22 +947,22 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swingon,
u32 dac_swinglvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
coex_dm->cur_dac_swing_on = dac_swingon;
coex_dm->cur_dac_swing_lvl = dac_swinglvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -991,8 +980,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
{
/* BB AGC Gain Table */
if (agc_table_en) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -1000,8 +989,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -1014,16 +1003,17 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
bool force_exec, bool agc_table_en)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- ((agc_table_en) ? "Enable" : "Disable"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ ((agc_table_en) ? "Enable" : "Disable"));
coex_dm->cur_agc_table_en = agc_table_en;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
@@ -1037,20 +1027,20 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -1059,30 +1049,30 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
- (force_exec ? "force to" : ""), val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- val0x6c4, val0x6c8, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
+ (force_exec ? "force to" : ""), val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x,\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1136,9 +1126,9 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1146,18 +1136,18 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d ",
- coex_dm->pre_ignore_wlan_act);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "bCurIgnoreWlanAct = %d!!\n",
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d ",
+ coex_dm->pre_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1185,11 +1175,11 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1213,20 +1203,20 @@ static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1353,8 +1343,8 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
u8 mimops = BTC_MIMO_PS_DYNAMIC;
u32 disra_mask = 0x0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], REAL set SS Type = %d\n", sstype);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], REAL set SS Type = %d\n", sstype);
disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
coex_dm->curra_masktype);
@@ -1386,9 +1376,9 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
bool force_exec, u8 new_sstype)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], %s Switch SS Type = %d\n",
- (force_exec ? "force to" : ""), new_sstype);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], %s Switch SS Type = %d\n",
+ (force_exec ? "force to" : ""), new_sstype);
coex_dm->cur_sstype = new_sstype;
if (!force_exec) {
@@ -1469,8 +1459,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non-connected idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) ||
@@ -1506,8 +1496,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Wifi connected + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Wifi connected + BT non connected-idle!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 2);
@@ -1534,8 +1524,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hson)
return false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Wifi connected + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Wifi connected + BT connected-idle!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 2);
@@ -1560,12 +1550,12 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Wifi Connected-Busy + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Wifi Connected-Idle + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Wifi Connected-Idle + BT Busy!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 1);
@@ -1592,9 +1582,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1689,9 +1678,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 71);
@@ -1795,9 +1783,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 6);
@@ -1886,9 +1873,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 2);
@@ -1983,9 +1969,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 7);
@@ -2074,9 +2059,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 3);
@@ -2178,13 +2162,13 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
int result;
u8 retry_cnt = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2288,11 +2272,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_cnt = coex_sta->bt_retry_cnt;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_cnt = %d\n", retry_cnt);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
- up, dn, m, n, wait_cnt);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_cnt = %d\n", retry_cnt);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+ up, dn, m, n, wait_cnt);
result = 0;
wait_cnt++;
/* no retry in the last 2-second duration */
@@ -2309,9 +2293,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex]Increase wifi duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex]Increase wifi duration!!\n");
}
} else if (retry_cnt <= 3) {
up--;
@@ -2334,9 +2317,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_cnt = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "Reduce wifi duration for retry<3\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "Reduce wifi duration for retry<3\n");
}
} else {
if (wait_cnt == 1)
@@ -2352,12 +2334,12 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_cnt = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "Decrease wifi duration for retryCounter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "Decrease wifi duration for retryCounter>3!!\n");
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
btc8192e_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2373,11 +2355,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, ");
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, ");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2388,9 +2370,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
true,
coex_dm->tdma_adj_type);
else
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -2594,8 +2575,8 @@ static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
(wifirssi_state == BTC_RSSI_STATE_LOW ||
wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
long_dist = true;
}
if (long_dist) {
@@ -3100,105 +3081,105 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
u8 algorithm = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], return for Manual CTRL <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
halbtc8192e2ant_action_bt_inquiry(btcoexist);
return;
}
coex_dm->cur_algorithm = algorithm;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (halbtc8192e2ant_is_common_action(btcoexist)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant common.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
- coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8192E_2ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = SCO\n");
halbtc8192e2ant_action_sco(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = SCO+PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
halbtc8192e2ant_action_sco_pan(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID\n");
halbtc8192e2ant_action_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = A2DP\n");
halbtc8192e2ant_action_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN(EDR)\n");
halbtc8192e2ant_action_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = HS mode\n");
halbtc8192e2ant_action_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN+A2DP\n");
halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
halbtc8192e2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID+A2DP\n");
halbtc8192e2ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = unknown!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = unknown!!\n");
/* halbtc8192e2ant_coex_alloff(btcoexist); */
break;
}
@@ -3212,8 +3193,8 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
u16 u16tmp = 0;
u8 u8tmp = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
if (backup) {
/* backup rf 0x1e value */
@@ -3296,8 +3277,8 @@ void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
halbtc8192e2ant_init_coex_dm(btcoexist);
}
@@ -3525,13 +3506,13 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8192e2ant_coex_alloff(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
}
}
@@ -3539,12 +3520,12 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3552,21 +3533,21 @@ void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_SCAN_START == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
}
void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_ASSOCIATE_START == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3582,11 +3563,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -3606,10 +3587,10 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3618,8 +3599,8 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type)
{
if (type == BTC_PACKET_DHCP)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3637,19 +3618,19 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
}
if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3666,8 +3647,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "bit1, send wifi BW&Chnl to BT!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "bit1, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3683,8 +3664,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3)) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "bit3, BT NOT ignore Wlan active!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "bit3, BT NOT ignore Wlan active!\n");
halbtc8192e2ant_IgnoreWlanAct(btcoexist,
FORCE_EXEC,
false);
@@ -3742,25 +3723,25 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Non-Connected idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
(bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3788,7 +3769,7 @@ void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3801,29 +3782,29 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "=======================Periodical=======================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "=======================Periodical=======================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "************************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 7e239d3ce..16add42a6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -74,28 +74,28 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -104,12 +104,12 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -118,26 +118,26 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -165,32 +165,28 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -201,14 +197,12 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -217,31 +211,26 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -435,9 +424,9 @@ static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -532,8 +521,8 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -548,27 +537,27 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
if (numdiffprofile == 1) {
if (bt_link_info->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = PAN(HS) only\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = PAN(EDR) only\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR;
}
@@ -577,21 +566,21 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (numdiffprofile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + PAN(HS)\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -599,32 +588,32 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -634,31 +623,31 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -668,13 +657,13 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -686,11 +675,11 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -717,9 +706,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -743,20 +732,20 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -765,10 +754,10 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c4, u32 val0x6c8,
u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
@@ -839,9 +828,9 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -849,16 +838,16 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -882,8 +871,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
if (ap_enable) {
if ((byte1 & BIT4) && !(byte1 & BIT5)) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], FW for 1Ant AP mode\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], FW for 1Ant AP mode\n");
real_byte1 &= ~BIT4;
real_byte1 |= BIT5;
@@ -904,13 +893,13 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = real_byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -929,22 +918,22 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
bool force_exec,
u8 lps_val, u8 rpwm_val)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -958,8 +947,8 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -1174,13 +1163,13 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
if (!force_exec) {
if (coex_dm->cur_ps_tdma_on)
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ******** TDMA(on, %d) *********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ******** TDMA(on, %d) *********\n",
+ coex_dm->cur_ps_tdma);
else
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ******** TDMA(off, %d) ********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ******** TDMA(off, %d) ********\n",
+ coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1394,45 +1383,45 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
if (!wifi_connected &&
BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (!wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (!wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else {
if (wifi_busy)
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
else
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
commom = false;
}
@@ -1451,8 +1440,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
u8 retry_count = 0, bt_info_ext;
bool wifi_busy = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
wifi_busy = true;
@@ -1481,8 +1470,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1513,9 +1502,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
up--;
@@ -1538,9 +1526,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
if (wait_count == 1)
@@ -1556,8 +1543,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
if (result == -1) {
@@ -1602,9 +1589,9 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
}
} else { /*no change */
/*if busy / idle change */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex],********* TDMA(on, %d) ********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex],********* TDMA(on, %d) ********\n",
+ coex_dm->cur_ps_tdma);
}
if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
@@ -2010,15 +1997,15 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
bool scan = false, link = false, roam = false;
bool under_4way = false, ap_enable = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
&under_4way);
if (under_4way) {
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -2032,8 +2019,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
else
halbtc8723b1ant_action_wifi_connected_special_packet(
btcoexist);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -2102,58 +2089,58 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
if (!halbtc8723b1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8723B_1ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = SCO\n");
halbtc8723b1ant_action_sco(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID\n");
halbtc8723b1ant_action_hid(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP\n");
halbtc8723b1ant_action_a2dp(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR)\n");
halbtc8723b1ant_action_pan_edr(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HS mode\n");
halbtc8723b1ant_action_pan_hs(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN+A2DP\n");
halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
halbtc8723b1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP\n");
halbtc8723b1ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = coexist All Off!!\n");
break;
}
coex_dm->pre_algorithm = coex_dm->cur_algorithm;
@@ -2171,24 +2158,24 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
@@ -2267,8 +2254,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2305,8 +2292,8 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u8 u8tmp = 0;
u32 cnt_bt_cal_chk = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 1Ant Init HW Config!!\n");
if (backup) {/* backup rf 0x1e value */
coex_dm->backup_arfr_cnt1 =
@@ -2333,14 +2320,14 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
cnt_bt_cal_chk++;
if (u32tmp & BIT0) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
- cnt_bt_cal_chk);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
+ cnt_bt_cal_chk);
mdelay(50);
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
- cnt_bt_cal_chk);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
+ cnt_bt_cal_chk);
break;
}
}
@@ -2383,8 +2370,8 @@ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2677,8 +2664,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
@@ -2689,8 +2676,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
NORMAL_EXEC, 0);
halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
halbtc8723b1ant_init_hw_config(btcoexist, false);
@@ -2705,12 +2692,12 @@ void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -2753,15 +2740,15 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected) /* non-connected scan */
btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
else /* wifi is connected */
btc8723b1ant_action_wifi_conn_scan(btcoexist);
} else if (BTC_SCAN_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) /* non-connected scan */
btc8723b1ant_action_wifi_not_conn(btcoexist);
else
@@ -2802,12 +2789,12 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -2830,11 +2817,11 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2855,10 +2842,10 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2900,8 +2887,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (BTC_PACKET_DHCP == type ||
BTC_PACKET_EAPOL == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], special Packet(%d) notify\n", type);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], special Packet(%d) notify\n", type);
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
}
}
@@ -2921,19 +2908,19 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
}
if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
@@ -2950,8 +2937,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if (coex_sta->bt_info_ext & BIT1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -2965,8 +2952,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (coex_sta->bt_info_ext & BIT3) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
halbtc8723b1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -3021,30 +3008,30 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status =
BT_8723B_1ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
}
if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3060,7 +3047,7 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
btcoexist->stop_coex_dm = true;
@@ -3078,11 +3065,11 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+ btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify to SLEEP\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify to SLEEP\n");
btcoexist->stop_coex_dm = true;
halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
true);
@@ -3092,8 +3079,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
halbtc8723b1ant_init_hw_config(btcoexist, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3103,8 +3090,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], *****************Coex DM Reset****************\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], *****************Coex DM Reset****************\n");
halbtc8723b1ant_init_hw_config(btcoexist, false);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -3119,31 +3106,31 @@ void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8723b_1ant,
- glcoex_ver_8723b_1ant, fw_ver,
- bt_patch_ver, bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_1ant,
+ glcoex_ver_8723b_1ant, fw_ver,
+ bt_patch_ver, bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 77cbd10e8..5f488ecae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -72,32 +72,28 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -106,14 +102,12 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -122,31 +116,26 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -173,36 +162,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -213,16 +194,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -231,36 +208,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -292,12 +259,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -311,9 +278,9 @@ static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -427,8 +394,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -443,27 +410,27 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], PAN(HS) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], PAN(EDR) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR;
}
@@ -472,21 +439,21 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -494,31 +461,31 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex],A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex],A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -528,37 +495,32 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP"
- " ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + "
- "PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + "
- "PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP + "
- "PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP + "
- "PAN(EDR) ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -568,15 +530,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP + "
- "PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP + "
- "PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -588,13 +548,11 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Error!!! SCO + HID"
- " + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP +"
- " PAN(EDR)==>PAN(EDR)+HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -624,17 +582,15 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
if (wifi_connected) {
if (bt_hs_on) {
if (bt_hs_rssi > 37) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt "
- "power for HS mode!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt power for HS mode!!\n");
ret = true;
}
} else {
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt "
- "power for Wifi is connected!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
ret = true;
}
}
@@ -653,10 +609,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -671,9 +627,9 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
if (dec_bt_pwr)
h2c_parameter[0] |= BIT1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -681,15 +637,15 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
bool force_exec, bool dec_bt_pwr)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power = %s\n",
- (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
return;
@@ -702,17 +658,16 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
bool force_exec, u8 fw_dac_swing_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], preFwDacSwingLvl=%d, "
- "curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -729,16 +684,16 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
{
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
/* Resume RF Rx LPF corner */
/* After initialized, we can use coex_dm->btRf0x1eBackup */
if (btcoexist->initilized) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -749,18 +704,17 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
- "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
+ "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreRfRxLpfShrink=%d, "
- "bCurRfRxLpfShrink=%d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -788,9 +742,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -799,18 +753,17 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
bool force_exec, bool low_penalty_ra)
{
/*return; */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""), (low_penalty_ra ?
- "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""), (low_penalty_ra ?
+ "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreLowPenaltyRa=%d, "
- "bCurLowPenaltyRa=%d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -824,8 +777,8 @@ static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
u32 level)
{
u8 val = (u8) level;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -843,20 +796,20 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swing_on,
u32 dac_swing_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
- (force_exec ? "force to" : ""),
- (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""),
+ (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
- " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
- coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -877,8 +830,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
/* BB AGC Gain Table */
if (agc_table_en) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
@@ -887,8 +840,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -901,15 +854,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
/* RF Gain */
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
if (agc_table_en) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x38fff);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x38ffe);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x380c3);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
@@ -920,15 +873,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
if (agc_table_en) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x38fff);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x38ffe);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x380c3);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
@@ -946,16 +899,17 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
bool force_exec, bool agc_table_en)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- (agc_table_en ? "Enable" : "Disable"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ (agc_table_en ? "Enable" : "Disable"));
coex_dm->cur_agc_table_en = agc_table_en;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
@@ -969,20 +923,20 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -991,29 +945,24 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c4, u32 val0x6c8,
u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
- " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
- (force_exec ? "force to" : ""), val0x6c0,
- val0x6c4, val0x6c8, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ force_exec ? "force to" : "",
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], preVal0x6c0=0x%x, "
- "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
- "preVal0x6cc=0x%x !!\n",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], curVal0x6c0=0x%x, "
- "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
- "curVal0x6cc=0x%x !!\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1099,9 +1048,9 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0;/* function enable*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, "
- "FW write 0x63=0x%x\n", h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1109,17 +1058,16 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d, "
- "bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1147,11 +1095,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1260,20 +1208,20 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
bool turn_on, u8 type)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1471,8 +1419,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non-connected idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
@@ -1495,9 +1443,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + "
- "BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1523,9 +1470,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + "
- "BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1549,17 +1495,15 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + "
- "BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
if (bt_hs_on)
return false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + "
- "BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
0x1, 0xfffff, 0x0);
@@ -1597,9 +1541,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
{
/* Set PS TDMA for max interval == 1 */
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1695,9 +1638,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
coex_dm->tdma_adj_type = 71;
@@ -1795,9 +1737,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
{
/* Set PS TDMA for max interval == 2 */
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
coex_dm->tdma_adj_type = 6;
@@ -1878,9 +1819,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1968,9 +1908,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
{
/* Set PS TDMA for max interval == 3 */
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
coex_dm->tdma_adj_type = 7;
@@ -2051,9 +1990,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
coex_dm->tdma_adj_type = 3;
@@ -2145,13 +2083,13 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
s32 result;
u8 retry_count = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2255,11 +2193,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/*accquire the BT TRx retry count from BT_Info byte2*/
retry_count = coex_sta->bt_retry_cnt;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_count = %d\n", retry_count);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
- up, dn, m, n, wait_count);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
result = 0;
wait_count++;
/* no retry in the last 2-second duration*/
@@ -2276,10 +2214,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi "
- "duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi duration!!\n");
} /* <=3 retry in the last 2-second duration*/
} else if (retry_count <= 3) {
up--;
@@ -2302,10 +2238,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration "
- "for retry_counter<3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
}
} else {
if (wait_count == 1)
@@ -2321,13 +2255,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration "
- "for retry_counter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
set_tdma_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2341,10 +2274,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
*/
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, "
- "curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2354,9 +2286,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->tdma_adj_type);
else
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under"
- " progress, will adjust next time!!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -2994,27 +2925,26 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
u8 algorithm = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), "
- "return for Manual CTRL <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8723b2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8723b2ant_action_bt_inquiry(btcoexist);
return;
} else {
@@ -3026,84 +2956,75 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
}
coex_dm->cur_algorithm = algorithm;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
- coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
+ coex_dm->cur_algorithm);
if (btc8723b2ant_is_common_action(btcoexist)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant common.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], preAlgorithm=%d, "
- "curAlgorithm=%d\n", coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8723B_2ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
btc8723b2ant_action_sco(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
btc8723b2ant_action_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
btc8723b2ant_action_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8723b2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
btc8723b2ant_action_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
btc8723b2ant_action_pan_hs(btcoexist);
- break;
+ break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
btc8723b2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8723b2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
btc8723b2ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = coexist All Off!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
btc8723b2ant_coex_alloff(btcoexist);
break;
}
@@ -3131,8 +3052,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
u8 u8tmp = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
coex_dm->bt_rf0x1e_backup =
btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
@@ -3157,8 +3078,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8723b2ant_init_coex_dm(btcoexist);
}
@@ -3393,15 +3314,15 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
btc8723b2ant_coex_alloff(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
ex_btc8723b2ant_init_hwconfig(btcoexist);
btc8723b2ant_init_coex_dm(btcoexist);
@@ -3412,12 +3333,12 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3425,21 +3346,21 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_SCAN_START == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
}
void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_ASSOCIATE_START == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3450,11 +3371,11 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 wifi_central_chnl;
if (BTC_MEDIA_CONNECT == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist,
@@ -3475,10 +3396,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66=0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3487,8 +3408,8 @@ void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type)
{
if (type == BTC_PACKET_DHCP)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3506,25 +3427,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data=[",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
if (i == 1)
bt_info = tmpbuf[i];
if (i == length-1)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmpbuf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmpbuf[i]);
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmpbuf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmpbuf[i]);
}
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), "
- "return for Manual CTRL<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
return;
}
@@ -3542,9 +3462,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
because bt is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check,"
- " send wifi BW&Chnl to BT!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3558,9 +3477,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if ((coex_sta->bt_info_ext & BIT3)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, "
- "set BT NOT to ignore Wlan active!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
false);
} else {
@@ -3613,28 +3531,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), "
- "BT Non-Connected idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), "
- "BT Non-Defined state!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3657,7 +3573,7 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -3671,33 +3587,31 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], =========================="
- "Periodical===========================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************"
- "************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ "
- "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
- board_info->btdm_ant_num, board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], *****************************"
- "***********************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 9cecf174a..3ce47c70b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -76,28 +76,28 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -106,12 +106,12 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -120,26 +120,26 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -165,32 +165,28 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -201,14 +197,12 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -218,31 +212,26 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
(rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -431,9 +420,9 @@ static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -504,8 +493,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -520,26 +509,26 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = PAN(HS) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = PAN(EDR) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
}
}
@@ -547,50 +536,50 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -599,29 +588,29 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -630,12 +619,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -646,12 +635,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -670,10 +659,10 @@ static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
if (enable_auto_report)
h2c_parameter[0] |= BIT0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -682,17 +671,16 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_auto_report)
{
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""), ((enable_auto_report) ?
- "Enabled" : "Disabled"));
+ btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""), ((enable_auto_report) ?
+ "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -718,9 +706,9 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -743,20 +731,20 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -764,10 +752,10 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
bool force_exec, u32 val0x6c0,
u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
- val0x6c8, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
coex_dm->cur_val_0x6c0 = val0x6c0;
coex_dm->cur_val_0x6c4 = val0x6c4;
coex_dm->cur_val_0x6c8 = val0x6c8;
@@ -839,9 +827,9 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -849,16 +837,16 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -887,13 +875,13 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1]<<24 |
- h2c_parameter[2]<<16 |
- h2c_parameter[3]<<8 |
- h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -910,22 +898,22 @@ static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
bool force_exec, u8 lps_val, u8 rpwm_val)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -939,8 +927,8 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -1036,13 +1024,13 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
if (!force_exec) {
if (coex_dm->cur_ps_tdma_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ coex_dm->cur_ps_tdma);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(off, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ********** TDMA(off, %d) **********\n",
+ coex_dm->cur_ps_tdma);
}
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1253,50 +1241,50 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
if (!wifi_connected &&
BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else {
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
}
common = false;
@@ -1313,8 +1301,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
long result;
u8 retry_count = 0, bt_info_ext;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
wifi_status) ||
@@ -1342,8 +1330,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1378,9 +1366,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration*/
@@ -1410,9 +1397,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, if retry count > 3 happens once,
@@ -1433,8 +1419,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
if (result == -1) {
@@ -1479,9 +1465,9 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
}
} else {
/*no change*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ coex_dm->cur_ps_tdma);
}
if (coex_dm->cur_ps_tdma != 1 &&
@@ -1603,27 +1589,27 @@ static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
halbtc8821a1ant_action_wifi_only(btcoexist);
}
}
if (pre_bt_disabled != bt_disabled) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
if (bt_disabled) {
btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
@@ -1897,15 +1883,15 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
bool scan = false, link = false, roam = false;
bool under_4way = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
if (under_4way) {
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -1914,8 +1900,8 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -1976,58 +1962,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
if (!halbtc8821a1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8821A_1ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = SCO\n");
halbtc8821a1ant_action_sco(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID\n");
halbtc8821a1ant_action_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP\n");
halbtc8821a1ant_action_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR)\n");
halbtc8821a1ant_action_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HS mode\n");
halbtc8821a1ant_action_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN+A2DP\n");
halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
halbtc8821a1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP\n");
halbtc8821a1ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = coexist All Off!!\n");
/*halbtc8821a1ant_coex_all_off(btcoexist);*/
break;
}
@@ -2045,31 +2031,31 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
bool wifi_under_5g = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
halbtc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2135,8 +2121,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2168,8 +2154,8 @@ static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
u8 u1_tmp = 0;
bool wifi_under_5g = false;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 1Ant Init HW Config!!\n");
if (back_up) {
coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
@@ -2220,8 +2206,8 @@ void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2515,8 +2501,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8821a1ant_set_ant_path(btcoexist,
BTC_ANT_PATH_BT, false, true);
@@ -2525,8 +2511,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
halbtc8821a1ant_run_coexist_mechanism(btcoexist);
@@ -2539,12 +2525,12 @@ void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_Lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_Lps = false;
}
}
@@ -2574,8 +2560,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected) {
/* non-connected scan*/
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2584,8 +2570,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
}
} else if (BTC_SCAN_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) {
/* non-connected scan*/
halbtc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2614,12 +2600,12 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2645,11 +2631,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
}
/* only 2.4G we need to inform bt the chnl mask*/
@@ -2672,9 +2658,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2702,8 +2690,8 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (BTC_PACKET_DHCP == type ||
BTC_PACKET_EAPOL == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], special Packet(%d) notify\n", type);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], special Packet(%d) notify\n", type);
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
}
}
@@ -2727,19 +2715,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
}
}
@@ -2756,8 +2744,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
/* Here we need to resend some wifi info to BT*/
/* because bt is reset and loss of the info.*/
if (coex_sta->bt_info_ext & BIT1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -2773,8 +2761,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
halbtc8821a1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -2782,8 +2770,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
}
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
if (!(coex_sta->bt_info_ext & BIT4)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
halbtc8821a1ant_bt_auto_report(btcoexist,
FORCE_EXEC, true);
}
@@ -2828,28 +2816,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
/* connection exists but no busy*/
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
(bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2866,8 +2854,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Halt notify\n");
btcoexist->stop_coex_dm = true;
@@ -2885,20 +2873,20 @@ void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify to SLEEP\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify to SLEEP\n");
btcoexist->stop_coex_dm = true;
halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
halbtc8821a1ant_init_hw_config(btcoexist, false);
halbtc8821a1ant_init_coex_dm(btcoexist);
@@ -2914,33 +2902,33 @@ ex_halbtc8821a1ant_periodical(
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8821a_1ant,
- glcoex_ver_8821a_1ant,
- fw_ver, bt_patch_ver,
- bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8821a_1ant,
+ glcoex_ver_8821a_1ant,
+ fw_ver, bt_patch_ver,
+ bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 044d91429..81f843bba 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -80,28 +80,28 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
if (bt_rssi >= tmp) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -110,12 +110,12 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -125,26 +125,26 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
(rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -171,32 +171,28 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -207,14 +203,12 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -223,31 +217,26 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -279,26 +268,26 @@ static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
}
}
if (pre_bt_disabled != bt_disabled) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
}
}
@@ -324,12 +313,12 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -343,9 +332,9 @@ static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -368,8 +357,8 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
stack_info->bt_link_exist = coex_sta->bt_link_exist;
if (!coex_sta->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], No profile exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], No profile exists!!!\n");
return algorithm;
}
@@ -384,26 +373,26 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (coex_sta->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
if (coex_sta->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else if (coex_sta->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
} else if (coex_sta->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], PAN(HS) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], PAN(EDR) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
}
}
@@ -411,50 +400,50 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (coex_sta->sco_exist) {
if (coex_sta->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
} else {
if (coex_sta->hid_exist &&
coex_sta->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else if (coex_sta->hid_exist &&
coex_sta->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -463,29 +452,29 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (coex_sta->sco_exist) {
if (coex_sta->hid_exist &&
coex_sta->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->hid_exist &&
coex_sta->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -494,12 +483,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -510,12 +499,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -544,15 +533,15 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
if (wifi_connected) {
if (bt_hs_on) {
if (bt_hs_rssi > 37) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt power for HS mode!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt power for HS mode!!\n");
ret = true;
}
} else {
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
ret = true;
}
}
@@ -570,10 +559,10 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -588,9 +577,9 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
if (dec_bt_pwr)
h2c_parameter[0] |= BIT1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -598,16 +587,16 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
bool force_exec, bool dec_bt_pwr)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power = %s\n",
- (force_exec ? "force to" : ""),
- ((dec_bt_pwr) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ (force_exec ? "force to" : ""),
+ ((dec_bt_pwr) ? "ON" : "OFF"));
coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
return;
@@ -627,10 +616,10 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
if (bt_lna_cons_on)
h2c_parameter[1] |= BIT0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
- (bt_lna_cons_on ? "ON!!" : "OFF!!"),
- h2c_parameter[0]<<8|h2c_parameter[1]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
+ bt_lna_cons_on ? "ON!!" : "OFF!!",
+ h2c_parameter[0] << 8 | h2c_parameter[1]);
btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
}
@@ -638,17 +627,17 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
bool force_exec, bool bt_lna_cons_on)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s BT Constrain = %s\n",
- (force_exec ? "force" : ""),
- ((bt_lna_cons_on) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s BT Constrain = %s\n",
+ (force_exec ? "force" : ""),
+ ((bt_lna_cons_on) ? "ON" : "OFF"));
coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
- coex_dm->pre_bt_lna_constrain,
- coex_dm->cur_bt_lna_constrain);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
+ coex_dm->pre_bt_lna_constrain,
+ coex_dm->cur_bt_lna_constrain);
if (coex_dm->pre_bt_lna_constrain ==
coex_dm->cur_bt_lna_constrain)
@@ -669,10 +658,10 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
h2c_parameter[1] = bt_psd_mode;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
- h2c_parameter[1],
- h2c_parameter[0]<<8|h2c_parameter[1]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
+ h2c_parameter[1],
+ h2c_parameter[0] << 8 | h2c_parameter[1]);
btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
}
@@ -680,15 +669,15 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
bool force_exec, u8 bt_psd_mode)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s BT PSD mode = 0x%x\n",
- (force_exec ? "force" : ""), bt_psd_mode);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s BT PSD mode = 0x%x\n",
+ (force_exec ? "force" : ""), bt_psd_mode);
coex_dm->cur_bt_psd_mode = bt_psd_mode;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
- coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
+ coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
return;
@@ -709,10 +698,10 @@ static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
if (enable_auto_report)
h2c_parameter[0] |= BIT0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -721,17 +710,17 @@ static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_auto_report)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
- ((enable_auto_report) ? "Enabled" : "Disabled"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""),
+ ((enable_auto_report) ? "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -746,16 +735,16 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
bool force_exec,
u8 fw_dac_swing_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -773,8 +762,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
{
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
@@ -782,8 +771,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
* After initialized, we can use coex_dm->bt_rf0x1e_backup
*/
if (btcoexist->initilized) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
0x1e, 0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -794,17 +783,17 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""),
- ((rx_rf_shrink_on) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""),
+ ((rx_rf_shrink_on) ? "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -835,9 +824,9 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9;
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -846,17 +835,17 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
bool force_exec, bool low_penalty_ra)
{
/*return;*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""),
- ((low_penalty_ra) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""),
+ ((low_penalty_ra) ? "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -872,8 +861,8 @@ static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
{
u8 val = (u8)level;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
}
@@ -891,21 +880,21 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swing_on,
u32 dac_swing_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swing_on) ? "ON" : "OFF"),
- dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swing_on) ? "ON" : "OFF"),
+ dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl ==
@@ -924,12 +913,12 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
bool adc_back_off)
{
if (adc_back_off) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB BackOff Level On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level On!\n");
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB BackOff Level Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level Off!\n");
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
}
}
@@ -937,16 +926,17 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
bool force_exec, bool adc_back_off)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn AdcBackOff = %s\n",
- (force_exec ? "force to" : ""),
- ((adc_back_off) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec ? "force to" : ""),
+ ((adc_back_off) ? "ON" : "OFF"));
coex_dm->cur_adc_back_off = adc_back_off;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
- coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
+ coex_dm->pre_adc_back_off,
+ coex_dm->cur_adc_back_off);
if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
return;
@@ -960,20 +950,20 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -981,28 +971,28 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
bool force_exec, u32 val0x6c0,
u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c0,
- coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8,
- coex_dm->pre_val0x6cc);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c0,
- coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8,
- coex_dm->cur_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c0,
+ coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8,
+ coex_dm->pre_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c0,
+ coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8,
+ coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1027,9 +1017,9 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
if (enable)
h2c_parameter[0] |= BIT0;/* function enable */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
}
@@ -1037,16 +1027,16 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1075,13 +1065,13 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1]<<24|
- h2c_parameter[2]<<16|
- h2c_parameter[3]<<8|
- h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1175,20 +1165,20 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type = %d\n",
- (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
- type);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type = %d\n",
+ (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
+ type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1374,8 +1364,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT IPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi IPS + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1392,13 +1382,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT IPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Busy + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT IPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi LPS + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
}
@@ -1416,8 +1406,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT LPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi IPS + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1433,13 +1423,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT LPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Busy + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT LPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi LPS + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
}
@@ -1458,8 +1448,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist,
BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi IPS + BT Busy!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1478,12 +1468,12 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Busy + BT Busy!!\n");
common = false;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi LPS + BT Busy!!\n");
halbtc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 21);
@@ -1505,8 +1495,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1601,8 +1591,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 71);
@@ -1706,8 +1696,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 6);
@@ -1796,8 +1786,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 2);
@@ -1892,8 +1882,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 7);
@@ -1982,8 +1972,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 3);
@@ -2085,13 +2075,13 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
int result;
u8 retry_count = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (coex_dm->reset_tdma_adjust) {
coex_dm->reset_tdma_adjust = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2195,11 +2185,11 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_count = %d\n", retry_count);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
- (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
+ (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
result = 0;
wait_count++;
@@ -2220,9 +2210,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration */
@@ -2251,9 +2240,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, if retry count > 3 happens once,
@@ -2274,12 +2262,12 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
btc8821a2_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2295,9 +2283,9 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2307,8 +2295,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->tdma_adj_type);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -3183,8 +3171,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u8 algorithm = 0;
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Manual control!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Manual control!!!\n");
return;
}
@@ -3192,8 +3180,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
halbtc8821a2ant_coex_under_5g(btcoexist);
return;
}
@@ -3201,81 +3189,82 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
halbtc8821a2ant_bt_inquiry_page(btcoexist);
return;
}
coex_dm->cur_algorithm = algorithm;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (halbtc8821a2ant_is_common_action(btcoexist)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant common.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->reset_tdma_adjust = true;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
- coex_dm->pre_algorithm, coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->reset_tdma_adjust = true;
}
switch (coex_dm->cur_algorithm) {
case BT_8821A_2ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
halbtc8821a2ant_action_sco(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
halbtc8821a2ant_action_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
halbtc8821a2ant_action_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
halbtc8821a2ant_action_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
halbtc8821a2ant_action_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
halbtc8821a2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
halbtc8821a2ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
halbtc8821a2ant_coex_all_off(btcoexist);
break;
}
@@ -3294,8 +3283,8 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
u8 u1tmp = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
/* backup rf 0x1e value */
coex_dm->bt_rf0x1e_backup =
@@ -3328,8 +3317,8 @@ ex_halbtc8821a2ant_init_coex_dm(
struct btc_coexist *btcoexist
)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
halbtc8821a2ant_init_coex_dm(btcoexist);
}
@@ -3574,13 +3563,13 @@ ex_halbtc8821a2ant_display_coex_info(
void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8821a2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
/*halbtc8821a2ant_init_coex_dm(btcoexist);*/
}
@@ -3589,12 +3578,12 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3602,22 +3591,22 @@ void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_SCAN_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
} else if (BTC_SCAN_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
}
}
void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_ASSOCIATE_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
} else if (BTC_ASSOCIATE_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
}
}
@@ -3629,11 +3618,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 wifi_central_chnl;
if (BTC_MEDIA_CONNECT == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
}
/* only 2.4G we need to inform bt the chnl mask*/
@@ -3654,9 +3643,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3664,8 +3655,8 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type) {
if (type == BTC_PACKET_DHCP) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
}
}
@@ -3685,19 +3676,19 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
}
}
@@ -3823,8 +3814,8 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Halt notify\n");
halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3837,31 +3828,31 @@ void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
}
halbtc8821a2ant_query_bt_info(btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index babd1490f..b660c214d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -141,8 +141,8 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
if (rtlphy->current_channel != 0)
chnl = rtlphy->current_channel;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+ btc_alg_dbg(ALGO_TRACE,
+ "static halbtc_get_wifi_central_chnl:%d\n", chnl);
return chnl;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index f41ca57dd..3cbe34c53 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -116,12 +116,17 @@ extern u32 btc_dbg_type[];
#define WIFI_P2P_GO_CONNECTED BIT3
#define WIFI_P2P_GC_CONNECTED BIT4
-#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \
- do { \
- if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
- printk(printstr, ##__VA_ARGS__); \
- } \
- } while (0)
+#define btc_alg_dbg(dbgflag, fmt, ...) \
+do { \
+ if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag)) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+} while (0)
+#define btc_iface_dbg(dbgflag, fmt, ...) \
+do { \
+ if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag)) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+} while (0)
+
#define BTC_RSSI_HIGH(_rssi_) \
((_rssi_ == BTC_RSSI_STATE_HIGH || \
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index e5037d13b..d12586d4f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -359,30 +359,28 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
bool find_buddy_priv = false;
- struct rtl_priv *tpriv = NULL;
+ struct rtl_priv *tpriv;
struct rtl_pci_priv *tpcipriv = NULL;
if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
list) {
- if (tpriv) {
- tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
- pcipriv->ndis_adapter.funcnumber);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
- tpcipriv->ndis_adapter.funcnumber);
-
- if ((pcipriv->ndis_adapter.busnumber ==
- tpcipriv->ndis_adapter.busnumber) &&
- (pcipriv->ndis_adapter.devnumber ==
- tpcipriv->ndis_adapter.devnumber) &&
- (pcipriv->ndis_adapter.funcnumber !=
- tpcipriv->ndis_adapter.funcnumber)) {
- find_buddy_priv = true;
- break;
- }
+ tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "pcipriv->ndis_adapter.funcnumber %x\n",
+ pcipriv->ndis_adapter.funcnumber);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "tpcipriv->ndis_adapter.funcnumber %x\n",
+ tpcipriv->ndis_adapter.funcnumber);
+
+ if ((pcipriv->ndis_adapter.busnumber ==
+ tpcipriv->ndis_adapter.busnumber) &&
+ (pcipriv->ndis_adapter.devnumber ==
+ tpcipriv->ndis_adapter.devnumber) &&
+ (pcipriv->ndis_adapter.funcnumber !=
+ tpcipriv->ndis_adapter.funcnumber)) {
+ find_buddy_priv = true;
+ break;
}
}
}
@@ -1213,7 +1211,8 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
/*Tx/Rx related var */
_rtl_pci_init_trx_var(hw);
- /*IBSS*/ mac->beacon_interval = 100;
+ /*IBSS*/
+ mac->beacon_interval = 100;
/*AMPDU*/
mac->min_space_cfg = 0;
@@ -2457,7 +2456,7 @@ int rtl_pci_resume(struct device *dev)
EXPORT_SYMBOL(rtl_pci_resume);
#endif /* CONFIG_PM_SLEEP */
-struct rtl_intf_ops rtl_pci_ops = {
+const struct rtl_intf_ops rtl_pci_ops = {
.read_efuse_byte = read_efuse_byte,
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index 5da670394..b951ebac1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -286,7 +286,7 @@ struct rtl_pci_priv {
int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
-extern struct rtl_intf_ops rtl_pci_ops;
+extern const struct rtl_intf_ops rtl_pci_ops;
int rtl_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index b69321d45..93579cac0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -443,14 +443,10 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- /* Idle for a while if we connect to AP a while ago. */
- if (mac->cnt_after_linked >= 2) {
- if (ppsc->dot11_psmode == EACTIVE) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Enter 802.11 power save mode...\n");
-
- rtl_lps_set_psmode(hw, EAUTOPS);
- }
+ if (ppsc->dot11_psmode == EACTIVE) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Enter 802.11 power save mode...\n");
+ rtl_lps_set_psmode(hw, EAUTOPS);
}
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 5be34118e..3524441fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -154,13 +154,13 @@ static bool _rtl_is_radar_freq(u16 center_freq)
static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
const struct ieee80211_reg_rule *reg_rule;
struct ieee80211_channel *ch;
unsigned int i;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
@@ -210,9 +210,9 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
struct ieee80211_channel *ch;
const struct ieee80211_reg_rule *reg_rule;
- if (!wiphy->bands[IEEE80211_BAND_2GHZ])
+ if (!wiphy->bands[NL80211_BAND_2GHZ])
return;
- sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
/*
*If no country IE has been received always enable active scan
@@ -262,10 +262,10 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
struct ieee80211_channel *ch;
unsigned int i;
- if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+ if (!wiphy->bands[NL80211_BAND_5GHZ])
return;
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
@@ -301,12 +301,12 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
static void _rtl_dump_channel_map(struct wiphy *wiphy)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
unsigned int i;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
sband = wiphy->bands[band];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index ce4da9d79..db9a7829d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1137,7 +1137,7 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Schedule TxPowerTracking !!\n");
- dm_txpower_track_cb_therm(hw);
+ dm_txpower_track_cb_therm(hw);
rtlpriv->dm.tm_trigger = 0;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index a2bb02c7b..416a9ba63 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -1903,8 +1903,7 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
} else {
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
}
-RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
-
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
}
static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 03cbe4cf1..316be5ff6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -240,7 +240,7 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
- ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 24eff8ea4..35e6bf7e2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -368,7 +368,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
status->rate = (u8)GET_RX_DESC_RXMCS(pdesc);
status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
- status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate);
status->macid = GET_RX_DESC_MACID(pdesc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 4b4612fe2..881821f4e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -645,7 +645,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
rtlpriv->psc.state_inap);
ppsc->last_sleep_jiffies = jiffies;
_rtl92se_phy_set_rf_sleep(hw);
- break;
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index 78a81c1e3..9475aa2a8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -208,8 +208,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
"Realtek regulatory, 40MHz, writeval = 0x%x\n",
writeval);
} else {
- if (rtlphy->pwrgroup_cnt == 1)
- chnlgroup = 0;
+ chnlgroup = 0;
if (rtlphy->pwrgroup_cnt >= 3) {
if (chnl <= 3)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 00a0531cc..44de695dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -134,9 +134,9 @@ static bool rtl8723e_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Need to decrease bt power\n");
- rtlpriv->btcoexist.cstate |=
- BT_COEX_STATE_DEC_BT_POWER;
- return true;
+ rtlpriv->btcoexist.cstate |=
+ BT_COEX_STATE_DEC_BT_POWER;
+ return true;
}
rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index b7b73cbe3..445f681d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1723,8 +1723,8 @@ static u8 _rtl8723be_phy_path_a_rx_iqk(struct ieee80211_hw *hw)
/* Allen 20131125 */
tmp = (reg_eac & 0x03FF0000) >> 16;
- if ((tmp & 0x200) > 0)
- tmp = 0x400 - tmp;
+ if ((tmp & 0x200) > 0)
+ tmp = 0x400 - tmp;
/* if Tx is OK, check whether Rx is OK */
if (!(reg_eac & BIT(27)) &&
(((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
@@ -2301,8 +2301,7 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
} else {
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
}
-RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
-
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
}
static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
@@ -2606,8 +2605,7 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
"IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
- RT_CLEAR_PS_LEVEL(ppsc,
- RT_RF_OFF_LEVL_HALT_NIC);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
"Set ERFON sleeped:%d ms\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index 5ed4492d3..97f5a0377 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -303,8 +303,8 @@ static void _rtl8723be_get_txpower_writeval_by_regulatory(
[chnlgroup][index + (rf ? 8 : 0)] &
(0x7f << (i * 8))) >> (i * 8));
- if (pwr_diff_limit[i] > pwr_diff)
- pwr_diff_limit[i] = pwr_diff;
+ if (pwr_diff_limit[i] > pwr_diff)
+ pwr_diff_limit[i] = pwr_diff;
}
customer_limit = (pwr_diff_limit[3] << 24) |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 6a8245c4e..17a681788 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -1957,9 +1957,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->swing_idx_ofdm_base[p] =
rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index fe900badd..71e4dd996 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2315,14 +2315,14 @@ static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw)
pci_read_config_byte(rtlpci->pdev, 0x34, &cap_pointer);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PCI configration 0x34 = 0x%2x\n", cap_pointer);
+ "PCI configuration 0x34 = 0x%2x\n", cap_pointer);
do {
pci_read_config_word(rtlpci->pdev, cap_pointer, &cap_hdr);
cap_id = cap_hdr & 0xFF;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "in pci configration, cap_pointer%x = %x\n",
+ "in pci configuration, cap_pointer%x = %x\n",
cap_pointer, cap_id);
if (cap_id == 0x01) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 74165b3eb..0c3b9ce86 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -418,9 +418,9 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
out = 0x16A; /* -3 dB */
}
} else {
- u32 swing = 0, swing_a = 0, swing_b = 0;
+ u32 swing = 0, swing_a = 0, swing_b = 0;
- if (band == BAND_ON_2_4G) {
+ if (band == BAND_ON_2_4G) {
if (reg_swing_2g == auto_temp) {
efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing);
swing = (swing == 0xFF) ? 0x00 : swing;
@@ -514,7 +514,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
"<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out);
- return out;
+ return out;
}
void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
@@ -959,7 +959,7 @@ static void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
u8 end, u8 base_val)
{
- char i = 0;
+ int i;
u8 temp_value = 0;
u32 temp_data = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index aac1ed3f7..41617b7b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1049,7 +1049,7 @@ static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work)
rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask);
}
-static struct rtl_intf_ops rtl_usb_ops = {
+static const struct rtl_intf_ops rtl_usb_ops = {
.adapter_start = rtl_usb_start,
.adapter_stop = rtl_usb_stop,
.adapter_tx = rtl_usb_tx,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 93bd7fcd2..4e0ab4d42 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1359,7 +1359,7 @@ struct rtl_mac {
u32 tx_ss_num;
u32 rx_ss_num;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_hw *hw;
struct ieee80211_vif *vif;
enum nl80211_iftype opmode;
@@ -2593,7 +2593,7 @@ struct rtl_priv {
*intf_ops : for diff interrface usb/pcie
*/
struct rtl_hal_cfg *cfg;
- struct rtl_intf_ops *intf_ops;
+ const struct rtl_intf_ops *intf_ops;
/*this var will be set by set_bit,
and was used to indicate status of
@@ -2870,7 +2870,7 @@ value to host byte ordering.*/
(ppsc->cur_ps_level |= _ps_flg)
#define container_of_dwork_rtl(x, y, z) \
- container_of(container_of(x, struct delayed_work, work), y, z)
+ container_of(to_delayed_work(x), y, z)
#define FILL_OCTET_STRING(_os, _octet, _len) \
(_os).octet = (u8 *)(_octet); \
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index a13d1f2b5..569918c48 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1291,7 +1291,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
return 0;
dsconfig = 1000 *
- ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
len = sizeof(config);
ret = rndis_query_oid(usbdev,
@@ -3476,7 +3476,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
priv->band.n_channels = ARRAY_SIZE(rndis_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = ARRAY_SIZE(rndis_rates);
- wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
memcpy(priv->cipher_suites, rndis_cipher_suites,
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 4df992de7..dbb23899d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -20,84 +20,84 @@
#include "rsi_common.h"
static const struct ieee80211_channel rsi_2ghz_channels[] = {
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
.hw_value = 1 }, /* Channel 1 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
.hw_value = 2 }, /* Channel 2 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
.hw_value = 3 }, /* Channel 3 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
.hw_value = 4 }, /* Channel 4 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
.hw_value = 5 }, /* Channel 5 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
.hw_value = 6 }, /* Channel 6 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
.hw_value = 7 }, /* Channel 7 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
.hw_value = 8 }, /* Channel 8 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
.hw_value = 9 }, /* Channel 9 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
.hw_value = 10 }, /* Channel 10 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
.hw_value = 11 }, /* Channel 11 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
.hw_value = 12 }, /* Channel 12 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
.hw_value = 13 }, /* Channel 13 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
.hw_value = 14 }, /* Channel 14 */
};
static const struct ieee80211_channel rsi_5ghz_channels[] = {
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5180,
.hw_value = 36, }, /* Channel 36 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5200,
.hw_value = 40, }, /* Channel 40 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5220,
.hw_value = 44, }, /* Channel 44 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5240,
.hw_value = 48, }, /* Channel 48 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5260,
.hw_value = 52, }, /* Channel 52 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5280,
.hw_value = 56, }, /* Channel 56 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5300,
.hw_value = 60, }, /* Channel 60 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5320,
.hw_value = 64, }, /* Channel 64 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5500,
.hw_value = 100, }, /* Channel 100 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5520,
.hw_value = 104, }, /* Channel 104 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5540,
.hw_value = 108, }, /* Channel 108 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5560,
.hw_value = 112, }, /* Channel 112 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5580,
.hw_value = 116, }, /* Channel 116 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5600,
.hw_value = 120, }, /* Channel 120 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5620,
.hw_value = 124, }, /* Channel 124 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5640,
.hw_value = 128, }, /* Channel 128 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5660,
.hw_value = 132, }, /* Channel 132 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5680,
.hw_value = 136, }, /* Channel 136 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5700,
.hw_value = 140, }, /* Channel 140 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5745,
.hw_value = 149, }, /* Channel 149 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5765,
.hw_value = 153, }, /* Channel 153 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5785,
.hw_value = 157, }, /* Channel 157 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5805,
.hw_value = 161, }, /* Channel 161 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5825,
.hw_value = 165, }, /* Channel 165 */
};
@@ -150,12 +150,12 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
struct ieee80211_supported_band *sbands = &adapter->sbands[band];
void *channels = NULL;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
memcpy(channels,
rsi_2ghz_channels,
sizeof(rsi_2ghz_channels));
- sbands->band = IEEE80211_BAND_2GHZ;
+ sbands->band = NL80211_BAND_2GHZ;
sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
sbands->bitrates = rsi_rates;
sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
@@ -164,7 +164,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
memcpy(channels,
rsi_5ghz_channels,
sizeof(rsi_5ghz_channels));
- sbands->band = IEEE80211_BAND_5GHZ;
+ sbands->band = NL80211_BAND_5GHZ;
sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
sbands->bitrates = &rsi_rates[4];
sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
@@ -775,7 +775,7 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
- enum ieee80211_band band = hw->conf.chandef.chan->band;
+ enum nl80211_band band = hw->conf.chandef.chan->band;
mutex_lock(&common->mutex);
common->fixedrate_mask[band] = 0;
@@ -999,8 +999,8 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
mutex_lock(&common->mutex);
/* Resetting all the fields to default values */
- common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0;
- common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0;
+ common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
+ common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
common->min_rate = 0xffff;
common->vif_info[0].is_ht = false;
common->vif_info[0].sgi = false;
@@ -1070,8 +1070,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
hw->max_rate_tries = MAX_RETRIES;
hw->max_tx_aggregation_subframes = 6;
- rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
- rsi_register_rates_channels(adapter, IEEE80211_BAND_5GHZ);
+ rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+ rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
hw->rate_control_algorithm = "AARF";
SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
@@ -1087,10 +1087,10 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->available_antennas_rx = 1;
wiphy->available_antennas_tx = 1;
- wiphy->bands[IEEE80211_BAND_2GHZ] =
- &adapter->sbands[IEEE80211_BAND_2GHZ];
- wiphy->bands[IEEE80211_BAND_5GHZ] =
- &adapter->sbands[IEEE80211_BAND_5GHZ];
+ wiphy->bands[NL80211_BAND_2GHZ] =
+ &adapter->sbands[NL80211_BAND_2GHZ];
+ wiphy->bands[NL80211_BAND_5GHZ] =
+ &adapter->sbands[NL80211_BAND_5GHZ];
status = ieee80211_register_hw(hw);
if (status)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index e43b59d5b..40658b62d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -210,7 +210,7 @@ static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
*/
static void rsi_set_default_parameters(struct rsi_common *common)
{
- common->band = IEEE80211_BAND_2GHZ;
+ common->band = NL80211_BAND_2GHZ;
common->channel_width = BW_20MHZ;
common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
common->channel = 1;
@@ -655,7 +655,7 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6);
- if (common->band == IEEE80211_BAND_5GHZ) {
+ if (common->band == NL80211_BAND_5GHZ) {
vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
if (conf_is_ht40(&common->priv->hw->conf)) {
vap_caps->default_ctrl_rate |=
@@ -872,7 +872,7 @@ int rsi_band_check(struct rsi_common *common)
else
common->channel_width = BW_40MHZ;
- if (common->band == IEEE80211_BAND_2GHZ) {
+ if (common->band == NL80211_BAND_2GHZ) {
if (common->channel_width)
common->endpoint = EP_2GHZ_40MHZ;
else
@@ -1046,7 +1046,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
if (common->channel_width == BW_40MHZ)
auto_rate->desc_word[7] |= cpu_to_le16(1);
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
min_rate = RSI_RATE_1;
rate_table_offset = 0;
} else {
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
index 702593f19..02920c93e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_pkt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c
@@ -27,22 +27,24 @@
int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
- struct ieee80211_hdr *tmp_hdr = NULL;
+ struct ieee80211_hdr *tmp_hdr;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss = NULL;
- int status = -EINVAL;
+ struct ieee80211_bss_conf *bss;
+ int status;
u8 ieee80211_size = MIN_802_11_HDR_LEN;
- u8 extnd_size = 0;
+ u8 extnd_size;
__le16 *frame_desc;
- u16 seq_num = 0;
+ u16 seq_num;
info = IEEE80211_SKB_CB(skb);
bss = &info->control.vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
- if (!bss->assoc)
+ if (!bss->assoc) {
+ status = -EINVAL;
goto err;
+ }
tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
@@ -123,15 +125,15 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
- struct ieee80211_hdr *wh = NULL;
+ struct ieee80211_hdr *wh;
struct ieee80211_tx_info *info;
- struct ieee80211_bss_conf *bss = NULL;
+ struct ieee80211_bss_conf *bss;
struct ieee80211_hw *hw = adapter->hw;
struct ieee80211_conf *conf = &hw->conf;
struct skb_info *tx_params;
int status = -E2BIG;
- __le16 *msg = NULL;
- u8 extnd_size = 0;
+ __le16 *msg;
+ u8 extnd_size;
u8 vap_id = 0;
info = IEEE80211_SKB_CB(skb);
@@ -182,7 +184,7 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
if (wh->addr1[0] & BIT(0))
msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
- if (common->band == IEEE80211_BAND_2GHZ)
+ if (common->band == NL80211_BAND_2GHZ)
msg[4] = cpu_to_le16(RSI_11B_MODE);
else
msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 5baed945f..dcd095787 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -211,7 +211,7 @@ struct rsi_hw {
struct ieee80211_hw *hw;
struct ieee80211_vif *vifs[RSI_MAX_VIFS];
struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
struct device *device;
u8 sc_nvifs;
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 0e51e27d2..dc478cedb 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -102,7 +102,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = {
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -111,7 +111,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = {
}
#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -311,12 +311,12 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
hw->sta_data_size = sizeof(struct cw1200_sta_priv);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &cw1200_band_2ghz;
if (have_5ghz)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &cw1200_band_5ghz;
/* Channel params have to be cleared before registering wiphy again */
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
if (!sband)
continue;
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index bff81b8d4..983788156 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -402,7 +402,7 @@ void cw1200_probe_work(struct work_struct *work)
}
wsm = (struct wsm_tx *)frame.skb->data;
scan.max_tx_rate = wsm->max_tx_rate;
- scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ scan.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
if (priv->join_status == CW1200_JOIN_STATUS_STA ||
priv->join_status == CW1200_JOIN_STATUS_IBSS) {
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index eda75acf6..4c7fd2618 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1278,7 +1278,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
join.dtim_period = priv->join_dtim_period;
join.channel_number = priv->channel->hw_value;
- join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ join.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
memcpy(join.bssid, bssid, sizeof(join.bssid));
@@ -1462,7 +1462,7 @@ int cw1200_enable_listening(struct cw1200_common *priv)
};
if (priv->channel) {
- start.band = priv->channel->band == IEEE80211_BAND_5GHZ ?
+ start.band = priv->channel->band == NL80211_BAND_5GHZ ?
WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
start.channel_number = priv->channel->hw_value;
} else {
@@ -2315,7 +2315,7 @@ static int cw1200_start_ap(struct cw1200_common *priv)
struct wsm_start start = {
.mode = priv->vif->p2p ?
WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
- .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ .band = (priv->channel->band == NL80211_BAND_5GHZ) ?
WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
.channel_number = priv->channel->hw_value,
.beacon_interval = conf->beacon_int,
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index d28bd49cb..3d170287c 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -1079,7 +1079,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
hdr->band = ((arg->channel_number & 0xff00) ||
(arg->channel_number > 14)) ?
- IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
hdr->freq = ieee80211_channel_to_frequency(
arg->channel_number,
hdr->band);
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 9e0ca3048..680d60eab 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -849,9 +849,9 @@ static int wsm_startup_indication(struct cw1200_common *priv,
/* Disable unsupported frequency bands */
if (!(priv->wsm_caps.fw_cap & 0x1))
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
if (!(priv->wsm_caps.fw_cap & 0x2))
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
priv->firmware_ready = 1;
wake_up(&priv->wsm_startup_done);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index a0536aed7..e74d60dad 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1482,7 +1482,7 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
- wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
+ wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz;
wl->hw->queues = 4;
diff --git a/drivers/net/wireless/ti/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
index b9e27b98b..fa01b0a0f 100644
--- a/drivers/net/wireless/ti/wl1251/ps.c
+++ b/drivers/net/wireless/ti/wl1251/ps.c
@@ -32,7 +32,7 @@ void wl1251_elp_work(struct work_struct *work)
struct delayed_work *dwork;
struct wl1251 *wl;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1251, elp_work);
wl1251_debug(DEBUG_PSM, "elp work");
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index cde0eaf99..a27d4c22b 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -53,7 +53,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
memset(status, 0, sizeof(struct ieee80211_rx_status));
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
status->mactime = desc->timestamp;
/*
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index adfb3e9de..29f13f890 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -469,8 +469,8 @@ static const u8 wl12xx_rate_to_idx_5ghz[] = {
};
static const u8 *wl12xx_band_rate_to_idx[] = {
- [IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
- [IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
+ [NL80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
+ [NL80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
};
enum wl12xx_hw_rates {
@@ -553,8 +553,8 @@ static struct wlcore_partition_set wl12xx_ptable[PART_TABLE_LEN] = {
.size = 0x00000004
},
.mem3 = {
- .start = 0x00040404,
- .size = 0x00000000
+ .start = 0x00000000,
+ .size = 0x00040404
},
},
@@ -1827,8 +1827,8 @@ static int wl12xx_setup(struct wl1271 *wl)
wl->fw_status_priv_len = 0;
wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
wl->ofdm_only_ap = true;
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
- wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl12xx_ht_cap);
+ wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
if (!fref_param) {
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index ebed13af9..8d475393f 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -27,7 +27,7 @@
static int wl1271_get_scan_channels(struct wl1271 *wl,
struct cfg80211_scan_request *req,
struct basic_scan_channel_params *channels,
- enum ieee80211_band band, bool passive)
+ enum nl80211_band band, bool passive)
{
struct conf_scan_settings *c = &wl->conf.scan;
int i, j;
@@ -92,7 +92,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
#define WL1271_NOTHING_TO_SCAN 1
static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- enum ieee80211_band band,
+ enum nl80211_band band,
bool passive, u32 basic_rate)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -144,12 +144,12 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
else
cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
- if (wl->scan.ssid_len && wl->scan.ssid) {
+ if (wl->scan.ssid_len) {
cmd->params.ssid_len = wl->scan.ssid_len;
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
@@ -218,7 +218,7 @@ out:
void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
u32 rate, mask;
switch (wl->scan.state) {
@@ -226,7 +226,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
break;
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
mask = wlvif->bitrate_masks[band];
if (wl->scan.req->no_cck) {
mask &= ~CONF_TX_CCK_RATES;
@@ -243,7 +243,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
mask = wlvif->bitrate_masks[band];
if (wl->scan.req->no_cck) {
mask &= ~CONF_TX_CCK_RATES;
@@ -263,7 +263,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
ret = wl1271_scan_send(wl, wlvif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -274,7 +274,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
ret = wl1271_scan_send(wl, wlvif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -378,7 +378,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
wl12xx_adjust_channels(cfg, cfg_channels);
if (!force_passive && cfg->active[0]) {
- u8 band = IEEE80211_BAND_2GHZ;
+ u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
wlvif->role_id, band,
req->ssids[0].ssid,
@@ -395,7 +395,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[1]) {
- u8 band = IEEE80211_BAND_5GHZ;
+ u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
wlvif->role_id, band,
req->ssids[0].ssid,
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
index a8d176ddc..63e95ba74 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.c
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -48,10 +48,10 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
cmd->stop_tx = ch_switch->block_tx;
switch (ch_switch->chandef.chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
@@ -187,7 +187,7 @@ int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
cmd->role_id = wlvif->role_id;
cmd->channel = wlvif->channel;
- if (wlvif->band == IEEE80211_BAND_5GHZ)
+ if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type);
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index ff6e46dd6..ef811848d 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -64,13 +64,13 @@ static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
u8 sync_band)
{
struct sk_buff *skb;
- enum ieee80211_band band;
+ enum nl80211_band band;
int freq;
if (sync_band == WLCORE_BAND_5GHZ)
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
else
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
freq = ieee80211_channel_to_frequency(sync_channel, band);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index be21289c3..fc02c27bc 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -137,8 +137,8 @@ static const u8 wl18xx_rate_to_idx_5ghz[] = {
};
static const u8 *wl18xx_band_rate_to_idx[] = {
- [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
- [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
+ [NL80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
+ [NL80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
};
enum wl18xx_hw_rates {
@@ -1302,12 +1302,12 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
/* sanity check - we don't support this */
- if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ))
+ if (WARN_ON(wlvif->band != NL80211_BAND_5GHZ))
return 0;
return CONF_TX_RATE_USE_WIDE_CHAN;
} else if (wl18xx_is_mimo_supported(wl) &&
- wlvif->band == IEEE80211_BAND_2GHZ) {
+ wlvif->band == NL80211_BAND_2GHZ) {
wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
/*
* we don't care about HT channel here - if a peer doesn't
@@ -1996,24 +1996,24 @@ static int wl18xx_setup(struct wl1271 *wl)
* siso40.
*/
if (wl18xx_is_mimo_supported(wl))
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_mimo_ht_cap_2ghz);
else
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso40_ht_cap_2ghz);
/* 5Ghz is always wide */
- wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso40_ht_cap_5ghz);
} else if (priv->conf.ht.mode == HT_MODE_WIDE) {
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso40_ht_cap_2ghz);
- wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso40_ht_cap_5ghz);
} else if (priv->conf.ht.mode == HT_MODE_SISO20) {
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso20_ht_cap);
- wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso20_ht_cap);
}
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index bc15aa2c3..4e5221544 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -110,7 +110,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
/* TODO: per-band ies? */
if (cmd->active[0]) {
- u8 band = IEEE80211_BAND_2GHZ;
+ u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
@@ -127,7 +127,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
if (cmd->active[1] || cmd->dfs) {
- u8 band = IEEE80211_BAND_5GHZ;
+ u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
@@ -253,7 +253,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
cmd->terminate_on_report = 0;
if (cmd->active[0]) {
- u8 band = IEEE80211_BAND_2GHZ;
+ u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
@@ -270,7 +270,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
}
if (cmd->active[1] || cmd->dfs) {
- u8 band = IEEE80211_BAND_5GHZ;
+ u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 3406ffb53..ebaf66ef3 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -43,7 +43,7 @@ void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
rate->idx = fw_rate;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
rate->idx -= CONF_HW_RATE_INDEX_6MBPS;
rate->flags = 0;
} else {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index f01d24baf..33153565a 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type);
static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
- enum ieee80211_band band,
+ enum nl80211_band band,
int channel)
{
struct wl12xx_cmd_role_start *cmd;
@@ -438,7 +438,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
cmd->role_id = wlvif->dev_role_id;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = channel;
@@ -524,7 +524,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
cmd->role_id = wlvif->role_id;
- if (wlvif->band == IEEE80211_BAND_5GHZ)
+ if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = wlvif->channel;
cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -693,10 +693,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
cmd->ap.local_rates = cpu_to_le32(supported_rates);
switch (wlvif->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
@@ -773,7 +773,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
cmd->role_id = wlvif->role_id;
- if (wlvif->band == IEEE80211_BAND_5GHZ)
+ if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = wlvif->channel;
cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -1164,7 +1164,7 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, role_id,
template_id_2_4,
skb->data, skb->len, 0, rate);
@@ -1195,7 +1195,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
wl1271_debug(DEBUG_SCAN, "set ap probe request template");
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
- if (wlvif->band == IEEE80211_BAND_2GHZ)
+ if (wlvif->band == NL80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
@@ -1628,19 +1628,19 @@ out:
return ret;
}
-static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
+static int wlcore_get_reg_conf_ch_idx(enum nl80211_band band, u16 ch)
{
/*
* map the given band/channel to the respective predefined
* bit expected by the fw
*/
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
/* channels 1..14 are mapped to 0..13 */
if (ch >= 1 && ch <= 14)
return ch - 1;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
switch (ch) {
case 8 ... 16:
/* channels 8,12,16 are mapped to 18,19,20 */
@@ -1670,7 +1670,7 @@ static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
}
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
int ch_bit_idx = 0;
@@ -1699,7 +1699,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
- for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
+ for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) {
band = wiphy->bands[b];
for (i = 0; i < band->n_channels; i++) {
struct ieee80211_channel *channel = &band->channels[i];
@@ -1851,7 +1851,7 @@ out:
}
static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- u8 role_id, enum ieee80211_band band, u8 channel)
+ u8 role_id, enum nl80211_band band, u8 channel)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
@@ -1870,10 +1870,10 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->role_id = role_id;
cmd->channel = channel;
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
@@ -1925,7 +1925,7 @@ out:
}
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
- enum ieee80211_band band, u8 channel)
+ enum nl80211_band band, u8 channel)
{
int ret = 0;
@@ -1995,7 +1995,7 @@ out:
/* start dev role and roc on its channel */
int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- enum ieee80211_band band, int channel)
+ enum nl80211_band band, int channel)
{
int ret;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index e28e2f230..52c3b4860 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -40,7 +40,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- enum ieee80211_band band, int channel);
+ enum nl80211_band band, int channel);
int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
@@ -83,14 +83,14 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid);
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
- enum ieee80211_band band, u8 channel);
+ enum nl80211_band band, u8 channel);
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid);
int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid);
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
- enum ieee80211_band band);
+ enum nl80211_band band);
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 feature, u8 enable, u8 value);
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 564ca750c..1cc6d5ab0 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -175,14 +175,25 @@ int wlcore_set_partition(struct wl1271 *wl,
if (ret < 0)
goto out;
- /* We don't need the size of the last partition, as it is
- * automatically calculated based on the total memory size and
- * the sizes of the previous partitions.
+ /* wl12xx only: We don't need the size of the last partition,
+ * as it is automatically calculated based on the total memory
+ * size and the sizes of the previous partitions.
+ *
+ * wl18xx re-defines the HW_PART3 addresses for logger over
+ * SDIO support. wl12xx is expecting the write to
+ * HW_PART3_START_ADDR at offset 24. This creates conflict
+ * between the addresses.
+ * In order to fix this the expected value is written to
+ * HW_PART3_SIZE_ADDR instead which is at offset 24 after changes.
*/
ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
if (ret < 0)
goto out;
+ ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
+ if (ret < 0)
+ goto out;
+
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 10cf37476..704ce6467 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -36,7 +36,8 @@
#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
-#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
+#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
+#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28)
#define HW_ACCESS_REGISTER_SIZE 4
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 1edf6eed7..2509c0050 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -243,7 +243,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
struct delayed_work *dwork;
struct wl1271 *wl;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, tx_watchdog_work);
mutex_lock(&wl->mutex);
@@ -1930,7 +1930,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
wlcore_enable_interrupts(wl);
- wl->band = IEEE80211_BAND_2GHZ;
+ wl->band = NL80211_BAND_2GHZ;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
@@ -2011,7 +2011,7 @@ static void wlcore_channel_switch_work(struct work_struct *work)
struct wl12xx_vif *wlvif;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
wl = wlvif->wl;
@@ -2047,7 +2047,7 @@ static void wlcore_connection_loss_work(struct work_struct *work)
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
wl = wlvif->wl;
@@ -2076,7 +2076,7 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
unsigned long time_spare;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif,
pending_auth_complete_work);
wl = wlvif->wl;
@@ -2240,8 +2240,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlvif->rate_set = CONF_TX_ENABLED_RATES;
}
- wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+ wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+ wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
/*
@@ -2330,7 +2330,7 @@ power_off:
* 11a channels if not supported
*/
if (!wl->enable_11a)
- wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
+ wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
@@ -5588,7 +5588,7 @@ static void wlcore_roc_complete_work(struct work_struct *work)
struct wl1271 *wl;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, roc_complete_work);
ret = wlcore_roc_completed(wl);
@@ -5871,7 +5871,7 @@ static const struct ieee80211_ops wl1271_ops = {
};
-u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
{
u8 idx;
@@ -6096,21 +6096,21 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
* We keep local copies of the band structs because we need to
* modify them on a per-device basis.
*/
- memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+ memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
sizeof(wl1271_band_2ghz));
- memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
- &wl->ht_cap[IEEE80211_BAND_2GHZ],
+ memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
+ &wl->ht_cap[NL80211_BAND_2GHZ],
sizeof(*wl->ht_cap));
- memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+ memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
sizeof(wl1271_band_5ghz));
- memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
- &wl->ht_cap[IEEE80211_BAND_5GHZ],
+ memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
+ &wl->ht_cap[NL80211_BAND_5GHZ],
sizeof(*wl->ht_cap));
- wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &wl->bands[IEEE80211_BAND_2GHZ];
- wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &wl->bands[IEEE80211_BAND_5GHZ];
+ wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &wl->bands[NL80211_BAND_2GHZ];
+ wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &wl->bands[NL80211_BAND_5GHZ];
/*
* allow 4 queues per mac address we support +
@@ -6205,7 +6205,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
wl->channel = 0;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->band = IEEE80211_BAND_2GHZ;
+ wl->band = NL80211_BAND_2GHZ;
wl->channel_type = NL80211_CHAN_NO_HT;
wl->flags = 0;
wl->sg_enabled = true;
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 4cd316e61..b36133b73 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -38,7 +38,7 @@ void wl1271_elp_work(struct work_struct *work)
struct wl12xx_vif *wlvif;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, elp_work);
wl1271_debug(DEBUG_PSM, "elp work");
@@ -202,7 +202,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
* enable beacon early termination.
* Not relevant for 5GHz and for high rates.
*/
- if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+ if ((wlvif->band == NL80211_BAND_2GHZ) &&
(wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, true);
if (ret < 0)
@@ -213,7 +213,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+ if ((wlvif->band == NL80211_BAND_2GHZ) &&
(wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 34e7e938e..c9bd294a0 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -64,9 +64,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
memset(status, 0, sizeof(struct ieee80211_rx_status));
if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
else
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index f5a7087cf..57c056563 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -146,7 +146,7 @@ struct wl1271_rx_descriptor {
} __packed;
int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
-u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+u8 wl1271_rate_to_idx(int rate, enum nl80211_band band);
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
struct wl12xx_rx_filter *filter);
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 1e3d51cd6..233436432 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -38,7 +38,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
struct wl12xx_vif *wlvif;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, scan_complete_work);
wl1271_debug(DEBUG_SCAN, "Scanning complete");
@@ -164,7 +164,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
u32 delta_per_probe;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
delta_per_probe = c->dwell_time_delta_per_probe_5;
else
delta_per_probe = c->dwell_time_delta_per_probe;
@@ -215,7 +215,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
channels[j].channel = req_channels[i]->hw_value;
if (n_pactive_ch &&
- (band == IEEE80211_BAND_2GHZ) &&
+ (band == NL80211_BAND_2GHZ) &&
(channels[j].channel >= 12) &&
(channels[j].channel <= 14) &&
(flags & IEEE80211_CHAN_NO_IR) &&
@@ -266,7 +266,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_2,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
false, true, 0,
MAX_CHANNELS_2GHZ,
&n_pactive_ch,
@@ -277,7 +277,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_2,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
false, false,
cfg->passive[0],
MAX_CHANNELS_2GHZ,
@@ -289,7 +289,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_5,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
false, true, 0,
wl->max_channels_5,
&n_pactive_ch,
@@ -300,7 +300,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_5,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
true, true,
cfg->passive[1],
wl->max_channels_5,
@@ -312,7 +312,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_5,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
false, false,
cfg->passive[1] + cfg->dfs,
wl->max_channels_5,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 020ac1a4b..cea9443c2 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -382,7 +382,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
ret = of_property_read_u32(dt_node, "ref-clock-frequency",
&pdev_data->ref_clock_freq);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(glue->dev,
"can't get reference clock frequency (%d)\n", ret);
return ret;
@@ -425,7 +425,7 @@ static int wl1271_probe(struct spi_device *spi)
}
ret = wlcore_probe_of(spi, glue, &pdev_data);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(glue->dev,
"can't get device tree parameters (%d)\n", ret);
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0ac36139..c1b8e4e9d 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -453,7 +453,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
- enum ieee80211_band rate_band)
+ enum nl80211_band rate_band)
{
struct ieee80211_supported_band *band;
u32 enabled_rates = 0;
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 79cb3ff8b..e2ba62d92 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -246,9 +246,9 @@ int wlcore_tx_complete(struct wl1271 *wl);
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
-u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
- enum ieee80211_band rate_band);
+ enum nl80211_band rate_band);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 72c31a8ed..8f28aa022 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -342,7 +342,7 @@ struct wl1271 {
struct wl12xx_vif *sched_vif;
/* The current band */
- enum ieee80211_band band;
+ enum nl80211_band band;
struct completion *elp_compl;
struct delayed_work elp_work;
@@ -517,7 +517,7 @@ void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct wl1271_station *wl_sta, bool in_conn);
static inline void
-wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
+wlcore_set_ht_cap(struct wl1271 *wl, enum nl80211_band band,
struct ieee80211_sta_ht_cap *ht_cap)
{
memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 23507ceb4..7d43b8057 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -392,7 +392,7 @@ struct wl12xx_vif {
u8 ssid_len;
/* The current band */
- enum ieee80211_band band;
+ enum nl80211_band band;
int channel;
enum nl80211_channel_type channel_type;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index d5c371d77..13fd734b6 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1287,7 +1287,7 @@ static void wl3501_tx_timeout(struct net_device *dev)
printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
dev->name, rc);
else {
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -1454,7 +1454,7 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
struct wl3501_card *this = netdev_priv(dev);
wrqu->freq.m = 100000 *
- ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(this->chan, NL80211_BAND_2GHZ);
wrqu->freq.e = 1;
return 0;
}
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 79d6f4b88..799f70f2d 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -844,7 +844,7 @@ static void zd1201_tx_timeout(struct net_device *dev)
usb_unlink_urb(zd->tx_urb);
dev->stats.tx_errors++;
/* Restart the timeout to quiet the watchdog: */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
static int zd1201_set_mac_address(struct net_device *dev, void *p)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index e539d9b1b..3e37a045f 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1068,7 +1068,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
}
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
- stats.band = IEEE80211_BAND_2GHZ;
+ stats.band = NL80211_BAND_2GHZ;
stats.signal = zd_check_signal(hw, status->signal_strength);
rate = zd_rx_rate(buffer, status);
@@ -1395,7 +1395,7 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
mac->band.n_channels = ARRAY_SIZE(zd_channels);
mac->band.channels = mac->channels;
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &mac->band;
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
index e346e8125..11e02be9d 100644
--- a/drivers/net/xen-netback/Makefile
+++ b/drivers/net/xen-netback/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
-xen-netback-y := netback.o xenbus.o interface.o
+xen-netback-y := netback.o xenbus.o interface.o hash.o
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index f44b38846..84d6cbdd1 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -220,6 +220,35 @@ struct xenvif_mcast_addr {
#define XEN_NETBK_MCAST_MAX 64
+#define XEN_NETBK_MAX_HASH_KEY_SIZE 40
+#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
+#define XEN_NETBK_HASH_TAG_SIZE 40
+
+struct xenvif_hash_cache_entry {
+ struct list_head link;
+ struct rcu_head rcu;
+ u8 tag[XEN_NETBK_HASH_TAG_SIZE];
+ unsigned int len;
+ u32 val;
+ int seq;
+};
+
+struct xenvif_hash_cache {
+ spinlock_t lock;
+ struct list_head list;
+ unsigned int count;
+ atomic_t seq;
+};
+
+struct xenvif_hash {
+ unsigned int alg;
+ u32 flags;
+ u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
+ u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+ unsigned int size;
+ struct xenvif_hash_cache cache;
+};
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -251,6 +280,8 @@ struct xenvif {
unsigned int num_queues; /* active queues, resource allocated */
unsigned int stalled_queues;
+ struct xenvif_hash hash;
+
struct xenbus_watch credit_watch;
struct xenbus_watch mcast_ctrl_watch;
@@ -260,6 +291,11 @@ struct xenvif {
struct dentry *xenvif_dbg_root;
#endif
+ struct xen_netif_ctrl_back_ring ctrl;
+ struct task_struct *ctrl_task;
+ wait_queue_head_t ctrl_wq;
+ unsigned int ctrl_irq;
+
/* Miscellaneous private stuff. */
struct net_device *dev;
};
@@ -285,10 +321,15 @@ struct xenvif *xenvif_alloc(struct device *parent,
int xenvif_init_queue(struct xenvif_queue *queue);
void xenvif_deinit_queue(struct xenvif_queue *queue);
-int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
- unsigned long rx_ring_ref, unsigned int tx_evtchn,
- unsigned int rx_evtchn);
-void xenvif_disconnect(struct xenvif *vif);
+int xenvif_connect_data(struct xenvif_queue *queue,
+ unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref,
+ unsigned int tx_evtchn,
+ unsigned int rx_evtchn);
+void xenvif_disconnect_data(struct xenvif *vif);
+int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+ unsigned int evtchn);
+void xenvif_disconnect_ctrl(struct xenvif *vif);
void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
@@ -300,10 +341,10 @@ int xenvif_queue_stopped(struct xenvif_queue *queue);
void xenvif_wake_queue(struct xenvif_queue *queue);
/* (Un)Map communication rings. */
-void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
-int xenvif_map_frontend_rings(struct xenvif_queue *queue,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref);
+void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
+int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
@@ -318,6 +359,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data);
+int xenvif_ctrl_kthread(void *data);
+
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
@@ -341,6 +384,7 @@ extern bool separate_tx_rx_irq;
extern unsigned int rx_drain_timeout_msecs;
extern unsigned int rx_stall_timeout_msecs;
extern unsigned int xenvif_max_queues;
+extern unsigned int xenvif_hash_cache_size;
#ifdef CONFIG_DEBUG_FS
extern struct dentry *xen_netback_dbg_root;
@@ -354,4 +398,18 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
void xenvif_mcast_addr_list_free(struct xenvif *vif);
+/* Hash */
+void xenvif_init_hash(struct xenvif *vif);
+void xenvif_deinit_hash(struct xenvif *vif);
+
+u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
+u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
+u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
+u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
+u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
+u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+ u32 off);
+
+void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
new file mode 100644
index 000000000..fb87cb39a
--- /dev/null
+++ b/drivers/net/xen-netback/hash.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2016 Citrix Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Softare Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define XEN_NETIF_DEFINE_TOEPLITZ
+
+#include "common.h"
+#include <linux/vmalloc.h>
+#include <linux/rculist.h>
+
+static void xenvif_del_hash(struct rcu_head *rcu)
+{
+ struct xenvif_hash_cache_entry *entry;
+
+ entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu);
+
+ kfree(entry);
+}
+
+static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
+ unsigned int len, u32 val)
+{
+ struct xenvif_hash_cache_entry *new, *entry, *oldest;
+ unsigned long flags;
+ bool found;
+
+ new = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!new)
+ return;
+
+ memcpy(new->tag, tag, len);
+ new->len = len;
+ new->val = val;
+
+ spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+ found = false;
+ oldest = NULL;
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ /* Make sure we don't add duplicate entries */
+ if (entry->len == len &&
+ memcmp(entry->tag, tag, len) == 0)
+ found = true;
+ if (!oldest || entry->seq < oldest->seq)
+ oldest = entry;
+ }
+
+ if (!found) {
+ new->seq = atomic_inc_return(&vif->hash.cache.seq);
+ list_add_rcu(&new->link, &vif->hash.cache.list);
+
+ if (++vif->hash.cache.count > xenvif_hash_cache_size) {
+ list_del_rcu(&oldest->link);
+ vif->hash.cache.count--;
+ call_rcu(&oldest->rcu, xenvif_del_hash);
+ }
+ }
+
+ spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
+
+ if (found)
+ kfree(new);
+}
+
+static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
+ unsigned int len)
+{
+ u32 val;
+
+ val = xen_netif_toeplitz_hash(vif->hash.key,
+ sizeof(vif->hash.key),
+ data, len);
+
+ if (xenvif_hash_cache_size != 0)
+ xenvif_add_hash(vif, data, len, val);
+
+ return val;
+}
+
+static void xenvif_flush_hash(struct xenvif *vif)
+{
+ struct xenvif_hash_cache_entry *entry;
+ unsigned long flags;
+
+ if (xenvif_hash_cache_size == 0)
+ return;
+
+ spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ list_del_rcu(&entry->link);
+ vif->hash.cache.count--;
+ call_rcu(&entry->rcu, xenvif_del_hash);
+ }
+
+ spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
+}
+
+static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
+ unsigned int len)
+{
+ struct xenvif_hash_cache_entry *entry;
+ u32 val;
+ bool found;
+
+ if (len >= XEN_NETBK_HASH_TAG_SIZE)
+ return 0;
+
+ if (xenvif_hash_cache_size == 0)
+ return xenvif_new_hash(vif, data, len);
+
+ rcu_read_lock();
+
+ found = false;
+
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ if (entry->len == len &&
+ memcmp(entry->tag, data, len) == 0) {
+ val = entry->val;
+ entry->seq = atomic_inc_return(&vif->hash.cache.seq);
+ found = true;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (!found)
+ val = xenvif_new_hash(vif, data, len);
+
+ return val;
+}
+
+void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
+{
+ struct flow_keys flow;
+ u32 hash = 0;
+ enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
+ u32 flags = vif->hash.flags;
+ bool has_tcp_hdr;
+
+ /* Quick rejection test: If the network protocol doesn't
+ * correspond to any enabled hash type then there's no point
+ * in parsing the packet header.
+ */
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV4))
+ break;
+
+ goto done;
+
+ case htons(ETH_P_IPV6):
+ if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6))
+ break;
+
+ goto done;
+
+ default:
+ goto done;
+ }
+
+ memset(&flow, 0, sizeof(flow));
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ goto done;
+
+ has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
+ !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (has_tcp_hdr &&
+ (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
+ u8 data[12];
+
+ memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
+ memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
+ memcpy(&data[8], &flow.ports.src, 2);
+ memcpy(&data[10], &flow.ports.dst, 2);
+
+ hash = xenvif_find_hash(vif, data, sizeof(data));
+ type = PKT_HASH_TYPE_L4;
+ } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
+ u8 data[8];
+
+ memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
+ memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
+
+ hash = xenvif_find_hash(vif, data, sizeof(data));
+ type = PKT_HASH_TYPE_L3;
+ }
+
+ break;
+
+ case htons(ETH_P_IPV6):
+ if (has_tcp_hdr &&
+ (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
+ u8 data[36];
+
+ memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
+ memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
+ memcpy(&data[32], &flow.ports.src, 2);
+ memcpy(&data[34], &flow.ports.dst, 2);
+
+ hash = xenvif_find_hash(vif, data, sizeof(data));
+ type = PKT_HASH_TYPE_L4;
+ } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
+ u8 data[32];
+
+ memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
+ memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
+
+ hash = xenvif_find_hash(vif, data, sizeof(data));
+ type = PKT_HASH_TYPE_L3;
+ }
+
+ break;
+ }
+
+done:
+ if (type == PKT_HASH_TYPE_NONE)
+ skb_clear_hash(skb);
+ else
+ __skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
+}
+
+u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
+{
+ switch (alg) {
+ case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
+ case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
+ break;
+
+ default:
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ }
+
+ vif->hash.alg = alg;
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
+{
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+ return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+
+ *flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
+{
+ if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ vif->hash.flags = flags;
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
+{
+ u8 *key = vif->hash.key;
+ struct gnttab_copy copy_op = {
+ .source.u.ref = gref,
+ .source.domid = vif->domid,
+ .dest.u.gmfn = virt_to_gfn(key),
+ .dest.domid = DOMID_SELF,
+ .dest.offset = xen_offset_in_page(key),
+ .len = len,
+ .flags = GNTCOPY_source_gref
+ };
+
+ if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ if (copy_op.len != 0) {
+ gnttab_batch_copy(&copy_op, 1);
+
+ if (copy_op.status != GNTST_okay)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ }
+
+ /* Clear any remaining key octets */
+ if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
+ memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
+
+ xenvif_flush_hash(vif);
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
+{
+ if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ vif->hash.size = size;
+ memset(vif->hash.mapping, 0, sizeof(u32) * size);
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+ u32 off)
+{
+ u32 *mapping = &vif->hash.mapping[off];
+ struct gnttab_copy copy_op = {
+ .source.u.ref = gref,
+ .source.domid = vif->domid,
+ .dest.u.gmfn = virt_to_gfn(mapping),
+ .dest.domid = DOMID_SELF,
+ .dest.offset = xen_offset_in_page(mapping),
+ .len = len * sizeof(u32),
+ .flags = GNTCOPY_source_gref
+ };
+
+ if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ while (len-- != 0)
+ if (mapping[off++] >= vif->num_queues)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ if (copy_op.len != 0) {
+ gnttab_batch_copy(&copy_op, 1);
+
+ if (copy_op.status != GNTST_okay)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ }
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+void xenvif_init_hash(struct xenvif *vif)
+{
+ if (xenvif_hash_cache_size == 0)
+ return;
+
+ spin_lock_init(&vif->hash.cache.lock);
+ INIT_LIST_HEAD(&vif->hash.cache.list);
+}
+
+void xenvif_deinit_hash(struct xenvif *vif)
+{
+ xenvif_flush_hash(vif);
+}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f5231a2dd..83deeebfc 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -128,6 +128,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
+{
+ struct xenvif *vif = dev_id;
+
+ wake_up(&vif->ctrl_wq);
+
+ return IRQ_HANDLED;
+}
+
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
@@ -142,6 +151,33 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}
+static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv,
+ select_queue_fallback_t fallback)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ unsigned int size = vif->hash.size;
+
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
+ u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
+
+ /* Make sure there is no hash information in the socket
+ * buffer otherwise it would be incorrectly forwarded
+ * to the frontend.
+ */
+ skb_clear_hash(skb);
+
+ return index;
+ }
+
+ xenvif_set_skb_hash(vif, skb);
+
+ if (size == 0)
+ return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
+
+ return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+}
+
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
@@ -386,6 +422,7 @@ static const struct ethtool_ops xenvif_ethtool_ops = {
};
static const struct net_device_ops xenvif_netdev_ops = {
+ .ndo_select_queue = xenvif_select_queue,
.ndo_start_xmit = xenvif_start_xmit,
.ndo_get_stats = xenvif_get_stats,
.ndo_open = xenvif_open,
@@ -527,9 +564,69 @@ void xenvif_carrier_on(struct xenvif *vif)
rtnl_unlock();
}
-int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
- unsigned long rx_ring_ref, unsigned int tx_evtchn,
- unsigned int rx_evtchn)
+int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+ unsigned int evtchn)
+{
+ struct net_device *dev = vif->dev;
+ void *addr;
+ struct xen_netif_ctrl_sring *shared;
+ struct task_struct *task;
+ int err = -ENOMEM;
+
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ &ring_ref, 1, &addr);
+ if (err)
+ goto err;
+
+ shared = (struct xen_netif_ctrl_sring *)addr;
+ BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
+
+ init_waitqueue_head(&vif->ctrl_wq);
+
+ err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
+ xenvif_ctrl_interrupt,
+ 0, dev->name, vif);
+ if (err < 0)
+ goto err_unmap;
+
+ vif->ctrl_irq = err;
+
+ xenvif_init_hash(vif);
+
+ task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
+ "%s-control", dev->name);
+ if (IS_ERR(task)) {
+ pr_warn("Could not allocate kthread for %s\n", dev->name);
+ err = PTR_ERR(task);
+ goto err_deinit;
+ }
+
+ get_task_struct(task);
+ vif->ctrl_task = task;
+
+ wake_up_process(vif->ctrl_task);
+
+ return 0;
+
+err_deinit:
+ xenvif_deinit_hash(vif);
+ unbind_from_irqhandler(vif->ctrl_irq, vif);
+ vif->ctrl_irq = 0;
+
+err_unmap:
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+ vif->ctrl.sring);
+ vif->ctrl.sring = NULL;
+
+err:
+ return err;
+}
+
+int xenvif_connect_data(struct xenvif_queue *queue,
+ unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref,
+ unsigned int tx_evtchn,
+ unsigned int rx_evtchn)
{
struct task_struct *task;
int err = -ENOMEM;
@@ -538,7 +635,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
BUG_ON(queue->task);
BUG_ON(queue->dealloc_task);
- err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
+ err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
+ rx_ring_ref);
if (err < 0)
goto err;
@@ -614,7 +712,7 @@ err_tx_unbind:
unbind_from_irqhandler(queue->tx_irq, queue);
queue->tx_irq = 0;
err_unmap:
- xenvif_unmap_frontend_rings(queue);
+ xenvif_unmap_frontend_data_rings(queue);
netif_napi_del(&queue->napi);
err:
module_put(THIS_MODULE);
@@ -634,7 +732,7 @@ void xenvif_carrier_off(struct xenvif *vif)
rtnl_unlock();
}
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_disconnect_data(struct xenvif *vif)
{
struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues;
@@ -668,12 +766,33 @@ void xenvif_disconnect(struct xenvif *vif)
queue->tx_irq = 0;
}
- xenvif_unmap_frontend_rings(queue);
+ xenvif_unmap_frontend_data_rings(queue);
}
xenvif_mcast_addr_list_free(vif);
}
+void xenvif_disconnect_ctrl(struct xenvif *vif)
+{
+ if (vif->ctrl_task) {
+ kthread_stop(vif->ctrl_task);
+ put_task_struct(vif->ctrl_task);
+ vif->ctrl_task = NULL;
+ }
+
+ if (vif->ctrl_irq) {
+ xenvif_deinit_hash(vif);
+ unbind_from_irqhandler(vif->ctrl_irq, vif);
+ vif->ctrl_irq = 0;
+ }
+
+ if (vif->ctrl.sring) {
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+ vif->ctrl.sring);
+ vif->ctrl.sring = NULL;
+ }
+}
+
/* Reverse the relevant parts of xenvif_init_queue().
* Used for queue teardown from xenvif_free(), and on the
* error handling paths in xenbus.c:connect().
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4412a57ec..edbae0b1e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -89,6 +89,11 @@ module_param(fatal_skb_slots, uint, 0444);
*/
#define XEN_NETBACK_TX_COPY_LEN 128
+/* This is the maximum number of flows in the hash cache. */
+#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
+unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
+module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
+MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
@@ -163,6 +168,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
needed++;
+ if (skb->sw_hash)
+ needed++;
do {
prod = queue->rx.sring->req_prod;
@@ -280,6 +287,8 @@ struct gop_frag_copy {
struct xenvif_rx_meta *meta;
int head;
int gso_type;
+ int protocol;
+ int hash_present;
struct page *page;
};
@@ -326,8 +335,15 @@ static void xenvif_setup_copy_gop(unsigned long gfn,
npo->copy_off += *len;
info->meta->size += *len;
+ if (!info->head)
+ return;
+
/* Leave a gap for the GSO descriptor. */
- if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
+ if ((1 << info->gso_type) & queue->vif->gso_mask)
+ queue->rx.req_cons++;
+
+ /* Leave a gap for the hash extra segment. */
+ if (info->hash_present)
queue->rx.req_cons++;
info->head = 0; /* There must be something in this buffer now */
@@ -362,6 +378,11 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
.npo = npo,
.head = *head,
.gso_type = XEN_NETIF_GSO_TYPE_NONE,
+ /* xenvif_set_skb_hash() will have either set a s/w
+ * hash or cleared the hash depending on
+ * whether the the frontend wants a hash for this skb.
+ */
+ .hash_present = skb->sw_hash,
};
unsigned long bytes;
@@ -550,6 +571,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue)
static void xenvif_rx_action(struct xenvif_queue *queue)
{
+ struct xenvif *vif = queue->vif;
s8 status;
u16 flags;
struct xen_netif_rx_response *resp;
@@ -585,9 +607,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ struct xen_netif_extra_info *extra = NULL;
if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_prefix_mask) {
+ vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
@@ -605,7 +628,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++;
- status = xenvif_check_gop(queue->vif,
+ status = xenvif_check_gop(vif,
XENVIF_RX_CB(skb)->meta_slots_used,
&npo);
@@ -627,21 +650,57 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
flags);
if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_mask) {
- struct xen_netif_extra_info *gso =
- (struct xen_netif_extra_info *)
+ vif->gso_mask) {
+ extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
resp->flags |= XEN_NETRXF_extra_info;
- gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
- gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
- gso->u.gso.pad = 0;
- gso->u.gso.features = 0;
+ extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+ extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+ extra->u.gso.pad = 0;
+ extra->u.gso.features = 0;
- gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
- gso->flags = 0;
+ extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ extra->flags = 0;
+ }
+
+ if (skb->sw_hash) {
+ /* Since the skb got here via xenvif_select_queue()
+ * we know that the hash has been re-calculated
+ * according to a configuration set by the frontend
+ * and therefore we know that it is legitimate to
+ * pass it to the frontend.
+ */
+ if (resp->flags & XEN_NETRXF_extra_info)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+ else
+ resp->flags |= XEN_NETRXF_extra_info;
+
+ extra = (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
+
+ extra->u.hash.algorithm =
+ XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
+
+ if (skb->l4_hash)
+ extra->u.hash.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+ else
+ extra->u.hash.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
+
+ *(uint32_t *)extra->u.hash.value =
+ skb_get_hash_raw(skb);
+
+ extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+ extra->flags = 0;
}
xenvif_add_frag_responses(queue, status,
@@ -1451,6 +1510,33 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
}
}
+ if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
+ struct xen_netif_extra_info *extra;
+ enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
+
+ extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
+
+ switch (extra->u.hash.type) {
+ case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
+ case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
+ type = PKT_HASH_TYPE_L3;
+ break;
+
+ case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
+ case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
+ type = PKT_HASH_TYPE_L4;
+ break;
+
+ default:
+ break;
+ }
+
+ if (type != PKT_HASH_TYPE_NONE)
+ skb_set_hash(skb,
+ *(u32 *)extra->u.hash.value,
+ type);
+ }
+
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
@@ -1926,7 +2012,7 @@ static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
return queue->dealloc_cons != queue->dealloc_prod;
}
-void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
+void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
{
if (queue->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
@@ -1936,9 +2022,9 @@ void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
queue->rx.sring);
}
-int xenvif_map_frontend_rings(struct xenvif_queue *queue,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref)
+int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref)
{
void *addr;
struct xen_netif_tx_sring *txs;
@@ -1965,7 +2051,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
return 0;
err:
- xenvif_unmap_frontend_rings(queue);
+ xenvif_unmap_frontend_data_rings(queue);
return err;
}
@@ -2164,6 +2250,135 @@ int xenvif_dealloc_kthread(void *data)
return 0;
}
+static void make_ctrl_response(struct xenvif *vif,
+ const struct xen_netif_ctrl_request *req,
+ u32 status, u32 data)
+{
+ RING_IDX idx = vif->ctrl.rsp_prod_pvt;
+ struct xen_netif_ctrl_response rsp = {
+ .id = req->id,
+ .type = req->type,
+ .status = status,
+ .data = data,
+ };
+
+ *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
+ vif->ctrl.rsp_prod_pvt = ++idx;
+}
+
+static void push_ctrl_response(struct xenvif *vif)
+{
+ int notify;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
+ if (notify)
+ notify_remote_via_irq(vif->ctrl_irq);
+}
+
+static void process_ctrl_request(struct xenvif *vif,
+ const struct xen_netif_ctrl_request *req)
+{
+ u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+ u32 data = 0;
+
+ switch (req->type) {
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
+ status = xenvif_set_hash_alg(vif, req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
+ status = xenvif_get_hash_flags(vif, &data);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
+ status = xenvif_set_hash_flags(vif, req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
+ status = xenvif_set_hash_key(vif, req->data[0],
+ req->data[1]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
+ status = XEN_NETIF_CTRL_STATUS_SUCCESS;
+ data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
+ status = xenvif_set_hash_mapping_size(vif,
+ req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
+ status = xenvif_set_hash_mapping(vif, req->data[0],
+ req->data[1],
+ req->data[2]);
+ break;
+
+ default:
+ break;
+ }
+
+ make_ctrl_response(vif, req, status, data);
+ push_ctrl_response(vif);
+}
+
+static void xenvif_ctrl_action(struct xenvif *vif)
+{
+ for (;;) {
+ RING_IDX req_prod, req_cons;
+
+ req_prod = vif->ctrl.sring->req_prod;
+ req_cons = vif->ctrl.req_cons;
+
+ /* Make sure we can see requests before we process them. */
+ rmb();
+
+ if (req_cons == req_prod)
+ break;
+
+ while (req_cons != req_prod) {
+ struct xen_netif_ctrl_request req;
+
+ RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
+ req_cons++;
+
+ process_ctrl_request(vif, &req);
+ }
+
+ vif->ctrl.req_cons = req_cons;
+ vif->ctrl.sring->req_event = req_cons + 1;
+ }
+}
+
+static bool xenvif_ctrl_work_todo(struct xenvif *vif)
+{
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
+ return 1;
+
+ return 0;
+}
+
+int xenvif_ctrl_kthread(void *data)
+{
+ struct xenvif *vif = data;
+
+ for (;;) {
+ wait_event_interruptible(vif->ctrl_wq,
+ xenvif_ctrl_work_todo(vif) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ while (xenvif_ctrl_work_todo(vif))
+ xenvif_ctrl_action(vif);
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
static int __init netback_init(void)
{
int rc = 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index bd182cd55..6a31f2610 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -38,7 +38,8 @@ struct backend_info {
const char *hotplug_script;
};
-static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
+static int connect_data_rings(struct backend_info *be,
+ struct xenvif_queue *queue);
static void connect(struct backend_info *be);
static int read_xenbus_vif_flags(struct backend_info *be);
static int backend_create_xenvif(struct backend_info *be);
@@ -367,6 +368,12 @@ static int netback_probe(struct xenbus_device *dev,
if (err)
pr_debug("Error writing multi-queue-max-queues\n");
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-ctrl-ring",
+ "%u", true);
+ if (err)
+ pr_debug("Error writing feature-ctrl-ring\n");
+
script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
if (IS_ERR(script)) {
err = PTR_ERR(script);
@@ -457,7 +464,8 @@ static void backend_disconnect(struct backend_info *be)
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
#endif /* CONFIG_DEBUG_FS */
- xenvif_disconnect(be->vif);
+ xenvif_disconnect_data(be->vif);
+ xenvif_disconnect_ctrl(be->vif);
}
}
@@ -825,6 +833,48 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
kfree(str);
}
+static int connect_ctrl_ring(struct backend_info *be)
+{
+ struct xenbus_device *dev = be->dev;
+ struct xenvif *vif = be->vif;
+ unsigned int val;
+ grant_ref_t ring_ref;
+ unsigned int evtchn;
+ int err;
+
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "ctrl-ring-ref", "%u", &val, NULL);
+ if (err)
+ goto done; /* The frontend does not have a control ring */
+
+ ring_ref = val;
+
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "event-channel-ctrl", "%u", &val, NULL);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "reading %s/event-channel-ctrl",
+ dev->otherend);
+ goto fail;
+ }
+
+ evtchn = val;
+
+ err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "mapping shared-frame %u port %u",
+ ring_ref, evtchn);
+ goto fail;
+ }
+
+done:
+ return 0;
+
+fail:
+ return err;
+}
+
static void connect(struct backend_info *be)
{
int err;
@@ -861,6 +911,12 @@ static void connect(struct backend_info *be)
xen_register_watchers(dev, be->vif);
read_xenbus_vif_flags(be);
+ err = connect_ctrl_ring(be);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "connecting control ring");
+ return;
+ }
+
/* Use the number of queues requested by the frontend */
be->vif->queues = vzalloc(requested_num_queues *
sizeof(struct xenvif_queue));
@@ -896,11 +952,12 @@ static void connect(struct backend_info *be)
queue->remaining_credit = credit_bytes;
queue->credit_usec = credit_usec;
- err = connect_rings(be, queue);
+ err = connect_data_rings(be, queue);
if (err) {
- /* connect_rings() cleans up after itself on failure,
- * but we need to clean up after xenvif_init_queue() here,
- * and also clean up any previously initialised queues.
+ /* connect_data_rings() cleans up after itself on
+ * failure, but we need to clean up after
+ * xenvif_init_queue() here, and also clean up any
+ * previously initialised queues.
*/
xenvif_deinit_queue(queue);
be->vif->num_queues = queue_index;
@@ -935,15 +992,17 @@ static void connect(struct backend_info *be)
err:
if (be->vif->num_queues > 0)
- xenvif_disconnect(be->vif); /* Clean up existing queues */
+ xenvif_disconnect_data(be->vif); /* Clean up existing queues */
vfree(be->vif->queues);
be->vif->queues = NULL;
be->vif->num_queues = 0;
+ xenvif_disconnect_ctrl(be->vif);
return;
}
-static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
+static int connect_data_rings(struct backend_info *be,
+ struct xenvif_queue *queue)
{
struct xenbus_device *dev = be->dev;
unsigned int num_queues = queue->vif->num_queues;
@@ -1007,8 +1066,8 @@ static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
}
/* Map the shared frame, irq etc. */
- err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
- tx_evtchn, rx_evtchn);
+ err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
+ tx_evtchn, rx_evtchn);
if (err) {
xenbus_dev_fatal(dev, err,
"mapping shared-frames %lu/%lu port tx %u rx %u",
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 7437c9dfd..ea8321a48 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -5,16 +5,6 @@
menu "Near Field Communication (NFC) devices"
depends on NFC
-config NFC_PN533
- tristate "NXP PN533 USB driver"
- depends on USB
- help
- NXP PN533 USB driver.
- This driver provides support for NFC NXP PN533 devices.
-
- Say Y here to compile support for PN533 devices into the
- kernel or say M to compile it as module (pn533).
-
config NFC_WILINK
tristate "Texas Instruments NFC WiLink driver"
depends on TI_ST && NFC_NCI
@@ -70,6 +60,7 @@ config NFC_PORT100
source "drivers/nfc/fdp/Kconfig"
source "drivers/nfc/pn544/Kconfig"
+source "drivers/nfc/pn533/Kconfig"
source "drivers/nfc/microread/Kconfig"
source "drivers/nfc/nfcmrvl/Kconfig"
source "drivers/nfc/st21nfca/Kconfig"
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 0a99e67da..bab8ef06a 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_NFC_FDP) += fdp/
obj-$(CONFIG_NFC_PN544) += pn544/
obj-$(CONFIG_NFC_MICROREAD) += microread/
-obj-$(CONFIG_NFC_PN533) += pn533.o
+obj-$(CONFIG_NFC_PN533) += pn533/
obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
obj-$(CONFIG_NFC_MEI_PHY) += mei_phy.o
obj-$(CONFIG_NFC_SIM) += nfcsim.o
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index 1cfc92a72..5a26a1189 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -102,7 +102,8 @@ static int fdp_nci_create_conn(struct nci_dev *ndev)
if (r)
return r;
- return nci_get_conn_info_by_id(ndev, 0);
+ return nci_get_conn_info_by_dest_type_params(ndev,
+ FDP_PATCH_CONN_DEST, NULL);
}
static inline int fdp_nci_get_versions(struct nci_dev *ndev)
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 11520f472..36099e557 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -418,7 +418,6 @@ MODULE_DEVICE_TABLE(acpi, acpi_id);
static struct i2c_driver nxp_nci_i2c_driver = {
.driver = {
.name = NXP_NCI_I2C_DRIVER_NAME,
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(acpi_id),
.of_match_table = of_match_ptr(of_nxp_nci_i2c_match),
},
diff --git a/drivers/nfc/pn533/Kconfig b/drivers/nfc/pn533/Kconfig
new file mode 100644
index 000000000..d94122dd3
--- /dev/null
+++ b/drivers/nfc/pn533/Kconfig
@@ -0,0 +1,27 @@
+config NFC_PN533
+ tristate
+ help
+ NXP PN533 core driver.
+ This driver provides core functionality for NXP PN533 NFC devices.
+
+config NFC_PN533_USB
+ tristate "NFC PN533 device support (USB)"
+ depends on USB
+ select NFC_PN533
+ ---help---
+ This module adds support for the NXP pn533 USB interface.
+ Select this if your platform is using the USB bus.
+
+ If you choose to build a module, it'll be called pn533_usb.
+ Say N if unsure.
+
+config NFC_PN533_I2C
+ tristate "NFC PN533 device support (I2C)"
+ depends on I2C
+ select NFC_PN533
+ ---help---
+ This module adds support for the NXP pn533 I2C interface.
+ Select this if your platform is using the I2C bus.
+
+ If you choose to build a module, it'll be called pn533_i2c.
+ Say N if unsure.
diff --git a/drivers/nfc/pn533/Makefile b/drivers/nfc/pn533/Makefile
new file mode 100644
index 000000000..51d24c622
--- /dev/null
+++ b/drivers/nfc/pn533/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for PN533 NFC driver
+#
+pn533_usb-objs = usb.o
+pn533_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_PN533) += pn533.o
+obj-$(CONFIG_NFC_PN533_USB) += pn533_usb.o
+obj-$(CONFIG_NFC_PN533_I2C) += pn533_i2c.o
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
new file mode 100644
index 000000000..1dc89248e
--- /dev/null
+++ b/drivers/nfc/pn533/i2c.c
@@ -0,0 +1,281 @@
+/*
+ * Driver for NXP PN533 NFC Chip - I2C transport layer
+ *
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2012-2013 Tieto Poland
+ * Copyright (C) 2016 HALE electronic
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <net/nfc/nfc.h>
+#include "pn533.h"
+
+#define VERSION "0.1"
+
+#define PN533_I2C_DRIVER_NAME "pn533_i2c"
+
+struct pn533_i2c_phy {
+ struct i2c_client *i2c_dev;
+ struct pn533 *priv;
+
+ bool aborted;
+
+ int hard_fault; /*
+ * < 0 if hardware error occurred (e.g. i2c err)
+ * and prevents normal operation.
+ */
+};
+
+static int pn533_i2c_send_ack(struct pn533 *dev, gfp_t flags)
+{
+ struct pn533_i2c_phy *phy = dev->phy;
+ struct i2c_client *client = phy->i2c_dev;
+ u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+ /* spec 6.2.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
+ int rc;
+
+ rc = i2c_master_send(client, ack, 6);
+
+ return rc;
+}
+
+static int pn533_i2c_send_frame(struct pn533 *dev,
+ struct sk_buff *out)
+{
+ struct pn533_i2c_phy *phy = dev->phy;
+ struct i2c_client *client = phy->i2c_dev;
+ int rc;
+
+ if (phy->hard_fault != 0)
+ return phy->hard_fault;
+
+ if (phy->priv == NULL)
+ phy->priv = dev;
+
+ phy->aborted = false;
+
+ print_hex_dump_debug("PN533_i2c TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
+ rc = i2c_master_send(client, out->data, out->len);
+
+ if (rc == -EREMOTEIO) { /* Retry, chip was in power down */
+ usleep_range(6000, 10000);
+ rc = i2c_master_send(client, out->data, out->len);
+ }
+
+ if (rc >= 0) {
+ if (rc != out->len)
+ rc = -EREMOTEIO;
+ else
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void pn533_i2c_abort_cmd(struct pn533 *dev, gfp_t flags)
+{
+ struct pn533_i2c_phy *phy = dev->phy;
+
+ phy->aborted = true;
+
+ /* An ack will cancel the last issued command */
+ pn533_i2c_send_ack(dev, flags);
+
+ /* schedule cmd_complete_work to finish current command execution */
+ pn533_recv_frame(phy->priv, NULL, -ENOENT);
+}
+
+static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb)
+{
+ struct i2c_client *client = phy->i2c_dev;
+ int len = PN533_EXT_FRAME_HEADER_LEN +
+ PN533_STD_FRAME_MAX_PAYLOAD_LEN +
+ PN533_STD_FRAME_TAIL_LEN + 1;
+ int r;
+
+ *skb = alloc_skb(len, GFP_KERNEL);
+ if (*skb == NULL)
+ return -ENOMEM;
+
+ r = i2c_master_recv(client, skb_put(*skb, len), len);
+ if (r != len) {
+ nfc_err(&client->dev, "cannot read. r=%d len=%d\n", r, len);
+ kfree_skb(*skb);
+ return -EREMOTEIO;
+ }
+
+ if (!((*skb)->data[0] & 0x01)) {
+ nfc_err(&client->dev, "READY flag not set");
+ kfree_skb(*skb);
+ return -EBUSY;
+ }
+
+ /* remove READY byte */
+ skb_pull(*skb, 1);
+ /* trim to frame size */
+ skb_trim(*skb, phy->priv->ops->rx_frame_size((*skb)->data));
+
+ return 0;
+}
+
+static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
+{
+ struct pn533_i2c_phy *phy = data;
+ struct i2c_client *client;
+ struct sk_buff *skb = NULL;
+ int r;
+
+ if (!phy || irq != phy->i2c_dev->irq) {
+ WARN_ON_ONCE(1);
+ return IRQ_NONE;
+ }
+
+ client = phy->i2c_dev;
+ dev_dbg(&client->dev, "IRQ\n");
+
+ if (phy->hard_fault != 0)
+ return IRQ_HANDLED;
+
+ r = pn533_i2c_read(phy, &skb);
+ if (r == -EREMOTEIO) {
+ phy->hard_fault = r;
+
+ pn533_recv_frame(phy->priv, NULL, -EREMOTEIO);
+
+ return IRQ_HANDLED;
+ } else if ((r == -ENOMEM) || (r == -EBADMSG) || (r == -EBUSY)) {
+ return IRQ_HANDLED;
+ }
+
+ if (!phy->aborted)
+ pn533_recv_frame(phy->priv, skb, 0);
+
+ return IRQ_HANDLED;
+}
+
+static struct pn533_phy_ops i2c_phy_ops = {
+ .send_frame = pn533_i2c_send_frame,
+ .send_ack = pn533_i2c_send_ack,
+ .abort_cmd = pn533_i2c_abort_cmd,
+};
+
+
+static int pn533_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pn533_i2c_phy *phy;
+ struct pn533 *priv;
+ int r = 0;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+ dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
+ return -ENODEV;
+ }
+
+ phy = devm_kzalloc(&client->dev, sizeof(struct pn533_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->i2c_dev = client;
+ i2c_set_clientdata(client, phy);
+
+ r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn,
+ IRQF_TRIGGER_FALLING |
+ IRQF_SHARED | IRQF_ONESHOT,
+ PN533_I2C_DRIVER_NAME, phy);
+
+ if (r < 0)
+ nfc_err(&client->dev, "Unable to register IRQ handler\n");
+
+ priv = pn533_register_device(PN533_DEVICE_PN532,
+ PN533_NO_TYPE_B_PROTOCOLS,
+ PN533_PROTO_REQ_ACK_RESP,
+ phy, &i2c_phy_ops, NULL,
+ &phy->i2c_dev->dev,
+ &client->dev);
+
+ if (IS_ERR(priv)) {
+ r = PTR_ERR(priv);
+ goto err_register;
+ }
+
+ phy->priv = priv;
+
+ return 0;
+
+err_register:
+ free_irq(client->irq, phy);
+
+ return r;
+}
+
+static int pn533_i2c_remove(struct i2c_client *client)
+{
+ struct pn533_i2c_phy *phy = i2c_get_clientdata(client);
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ pn533_unregister_device(phy->priv);
+
+ free_irq(client->irq, phy);
+
+ return 0;
+}
+
+static const struct of_device_id of_pn533_i2c_match[] = {
+ { .compatible = "nxp,pn533-i2c", },
+ { .compatible = "nxp,pn532-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_pn533_i2c_match);
+
+static struct i2c_device_id pn533_i2c_id_table[] = {
+ { PN533_I2C_DRIVER_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table);
+
+static struct i2c_driver pn533_i2c_driver = {
+ .driver = {
+ .name = PN533_I2C_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(of_pn533_i2c_match),
+ },
+ .probe = pn533_i2c_probe,
+ .id_table = pn533_i2c_id_table,
+ .remove = pn533_i2c_remove,
+};
+
+module_i2c_driver(pn533_i2c_driver);
+
+MODULE_AUTHOR("Michael Thalmeier <michael.thalmeier@hale.at>");
+MODULE_DESCRIPTION("PN533 I2C driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533/pn533.c
index bb3d5ea98..d9c55830b 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1,4 +1,6 @@
/*
+ * Driver for NXP PN533 NFC Chip - core functions
+ *
* Copyright (C) 2011 Instituto Nokia de Tecnologia
* Copyright (C) 2012-2013 Tieto Poland
*
@@ -20,137 +22,18 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/usb.h>
#include <linux/nfc.h>
#include <linux/netdevice.h>
#include <net/nfc/nfc.h>
+#include "pn533.h"
-#define VERSION "0.2"
-
-#define PN533_VENDOR_ID 0x4CC
-#define PN533_PRODUCT_ID 0x2533
-
-#define SCM_VENDOR_ID 0x4E6
-#define SCL3711_PRODUCT_ID 0x5591
-
-#define SONY_VENDOR_ID 0x054c
-#define PASORI_PRODUCT_ID 0x02e1
-
-#define ACS_VENDOR_ID 0x072f
-#define ACR122U_PRODUCT_ID 0x2200
-
-#define PN533_DEVICE_STD 0x1
-#define PN533_DEVICE_PASORI 0x2
-#define PN533_DEVICE_ACR122U 0x3
-
-#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
- NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
- NFC_PROTO_NFC_DEP_MASK |\
- NFC_PROTO_ISO14443_B_MASK)
-
-#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
- NFC_PROTO_MIFARE_MASK | \
- NFC_PROTO_FELICA_MASK | \
- NFC_PROTO_ISO14443_MASK | \
- NFC_PROTO_NFC_DEP_MASK)
-
-static const struct usb_device_id pn533_table[] = {
- { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID),
- .driver_info = PN533_DEVICE_STD },
- { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID),
- .driver_info = PN533_DEVICE_STD },
- { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID),
- .driver_info = PN533_DEVICE_PASORI },
- { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID),
- .driver_info = PN533_DEVICE_ACR122U },
- { }
-};
-MODULE_DEVICE_TABLE(usb, pn533_table);
+#define VERSION "0.3"
/* How much time we spend listening for initiators */
#define PN533_LISTEN_TIME 2
/* Delay between each poll frame (ms) */
#define PN533_POLL_INTERVAL 10
-/* Standard pn533 frame definitions (standard and extended)*/
-#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
- + 2) /* data[0] TFI, data[1] CC */
-#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
-
-#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \
- + 2) /* data[0] TFI, data[1] CC */
-
-#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
-#define PN533_CMD_DATAFRAME_MAXLEN 240 /* max data length (send) */
-
-/*
- * Max extended frame payload len, excluding TFI and CC
- * which are already in PN533_FRAME_HEADER_LEN.
- */
-#define PN533_STD_FRAME_MAX_PAYLOAD_LEN 263
-
-#define PN533_STD_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2),
- Postamble (1) */
-#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
-#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
-/* Half start code (3), LEN (4) should be 0xffff for extended frame */
-#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \
- && (hdr)->datalen_checksum == 0xFF)
-#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)])
-
-/* start of frame */
-#define PN533_STD_FRAME_SOF 0x00FF
-
-/* standard frame identifier: in/out/error */
-#define PN533_STD_FRAME_IDENTIFIER(f) (f->data[0]) /* TFI */
-#define PN533_STD_FRAME_DIR_OUT 0xD4
-#define PN533_STD_FRAME_DIR_IN 0xD5
-
-/* ACS ACR122 pn533 frame definitions */
-#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \
- + 2)
-#define PN533_ACR122_TX_FRAME_TAIL_LEN 0
-#define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \
- + 2)
-#define PN533_ACR122_RX_FRAME_TAIL_LEN 2
-#define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN
-
-/* CCID messages types */
-#define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62
-#define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B
-
-#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
-
-/* PN533 Commands */
-#define PN533_FRAME_CMD(f) (f->data[1])
-
-#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
-#define PN533_CMD_RF_CONFIGURATION 0x32
-#define PN533_CMD_IN_DATA_EXCHANGE 0x40
-#define PN533_CMD_IN_COMM_THRU 0x42
-#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
-#define PN533_CMD_IN_ATR 0x50
-#define PN533_CMD_IN_RELEASE 0x52
-#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
-
-#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
-#define PN533_CMD_TG_GET_DATA 0x86
-#define PN533_CMD_TG_SET_DATA 0x8e
-#define PN533_CMD_TG_SET_META_DATA 0x94
-#define PN533_CMD_UNDEF 0xff
-
-#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
-
-/* PN533 Return codes */
-#define PN533_CMD_RET_MASK 0x3F
-#define PN533_CMD_MI_MASK 0x40
-#define PN533_CMD_RET_SUCCESS 0x00
-
-struct pn533;
-
-typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
- struct sk_buff *resp);
-
/* structs for pn533 commands */
/* PN533_CMD_GET_FIRMWARE_VERSION */
@@ -220,19 +103,6 @@ union pn533_cmd_poll_initdata {
} __packed felica;
};
-/* Poll modulations */
-enum {
- PN533_POLL_MOD_106KBPS_A,
- PN533_POLL_MOD_212KBPS_FELICA,
- PN533_POLL_MOD_424KBPS_FELICA,
- PN533_POLL_MOD_106KBPS_JEWEL,
- PN533_POLL_MOD_847KBPS_B,
- PN533_LISTEN_MOD,
-
- __PN533_POLL_MOD_AFTER_LAST,
-};
-#define PN533_POLL_MOD_MAX (__PN533_POLL_MOD_AFTER_LAST - 1)
-
struct pn533_poll_modulations {
struct {
u8 maxtg;
@@ -336,219 +206,6 @@ struct pn533_cmd_jump_dep_response {
#define PN533_INIT_TARGET_RESP_ACTIVE 0x1
#define PN533_INIT_TARGET_RESP_DEP 0x4
-enum pn533_protocol_type {
- PN533_PROTO_REQ_ACK_RESP = 0,
- PN533_PROTO_REQ_RESP
-};
-
-struct pn533 {
- struct usb_device *udev;
- struct usb_interface *interface;
- struct nfc_dev *nfc_dev;
- u32 device_type;
- enum pn533_protocol_type protocol_type;
-
- struct urb *out_urb;
- struct urb *in_urb;
-
- struct sk_buff_head resp_q;
- struct sk_buff_head fragment_skb;
-
- struct workqueue_struct *wq;
- struct work_struct cmd_work;
- struct work_struct cmd_complete_work;
- struct delayed_work poll_work;
- struct work_struct mi_rx_work;
- struct work_struct mi_tx_work;
- struct work_struct mi_tm_rx_work;
- struct work_struct mi_tm_tx_work;
- struct work_struct tg_work;
- struct work_struct rf_work;
-
- struct list_head cmd_queue;
- struct pn533_cmd *cmd;
- u8 cmd_pending;
- struct mutex cmd_lock; /* protects cmd queue */
-
- void *cmd_complete_mi_arg;
- void *cmd_complete_dep_arg;
-
- struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
- u8 poll_mod_count;
- u8 poll_mod_curr;
- u8 poll_dep;
- u32 poll_protocols;
- u32 listen_protocols;
- struct timer_list listen_timer;
- int cancel_listen;
-
- u8 *gb;
- size_t gb_len;
-
- u8 tgt_available_prots;
- u8 tgt_active_prot;
- u8 tgt_mode;
-
- struct pn533_frame_ops *ops;
-};
-
-struct pn533_cmd {
- struct list_head queue;
- u8 code;
- int status;
- struct sk_buff *req;
- struct sk_buff *resp;
- int resp_len;
- pn533_send_async_complete_t complete_cb;
- void *complete_cb_context;
-};
-
-struct pn533_std_frame {
- u8 preamble;
- __be16 start_frame;
- u8 datalen;
- u8 datalen_checksum;
- u8 data[];
-} __packed;
-
-struct pn533_ext_frame { /* Extended Information frame */
- u8 preamble;
- __be16 start_frame;
- __be16 eif_flag; /* fixed to 0xFFFF */
- __be16 datalen;
- u8 datalen_checksum;
- u8 data[];
-} __packed;
-
-struct pn533_frame_ops {
- void (*tx_frame_init)(void *frame, u8 cmd_code);
- void (*tx_frame_finish)(void *frame);
- void (*tx_update_payload_len)(void *frame, int len);
- int tx_header_len;
- int tx_tail_len;
-
- bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev);
- int (*rx_frame_size)(void *frame);
- int rx_header_len;
- int rx_tail_len;
-
- int max_payload_len;
- u8 (*get_cmd_code)(void *frame);
-};
-
-struct pn533_acr122_ccid_hdr {
- u8 type;
- u32 datalen;
- u8 slot;
- u8 seq;
- u8 params[3]; /* 3 msg specific bytes or status, error and 1 specific
- byte for reposnse msg */
- u8 data[]; /* payload */
-} __packed;
-
-struct pn533_acr122_apdu_hdr {
- u8 class;
- u8 ins;
- u8 p1;
- u8 p2;
-} __packed;
-
-struct pn533_acr122_tx_frame {
- struct pn533_acr122_ccid_hdr ccid;
- struct pn533_acr122_apdu_hdr apdu;
- u8 datalen;
- u8 data[]; /* pn533 frame: TFI ... */
-} __packed;
-
-struct pn533_acr122_rx_frame {
- struct pn533_acr122_ccid_hdr ccid;
- u8 data[]; /* pn533 frame : TFI ... */
-} __packed;
-
-static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code)
-{
- struct pn533_acr122_tx_frame *frame = _frame;
-
- frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE;
- frame->ccid.datalen = sizeof(frame->apdu) + 1; /* sizeof(apdu_hdr) +
- sizeof(datalen) */
- frame->ccid.slot = 0;
- frame->ccid.seq = 0;
- frame->ccid.params[0] = 0;
- frame->ccid.params[1] = 0;
- frame->ccid.params[2] = 0;
-
- frame->data[0] = PN533_STD_FRAME_DIR_OUT;
- frame->data[1] = cmd_code;
- frame->datalen = 2; /* data[0] + data[1] */
-
- frame->apdu.class = 0xFF;
- frame->apdu.ins = 0;
- frame->apdu.p1 = 0;
- frame->apdu.p2 = 0;
-}
-
-static void pn533_acr122_tx_frame_finish(void *_frame)
-{
- struct pn533_acr122_tx_frame *frame = _frame;
-
- frame->ccid.datalen += frame->datalen;
-}
-
-static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
-{
- struct pn533_acr122_tx_frame *frame = _frame;
-
- frame->datalen += len;
-}
-
-static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
-{
- struct pn533_acr122_rx_frame *frame = _frame;
-
- if (frame->ccid.type != 0x83)
- return false;
-
- if (!frame->ccid.datalen)
- return false;
-
- if (frame->data[frame->ccid.datalen - 2] == 0x63)
- return false;
-
- return true;
-}
-
-static int pn533_acr122_rx_frame_size(void *frame)
-{
- struct pn533_acr122_rx_frame *f = frame;
-
- /* f->ccid.datalen already includes tail length */
- return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen;
-}
-
-static u8 pn533_acr122_get_cmd_code(void *frame)
-{
- struct pn533_acr122_rx_frame *f = frame;
-
- return PN533_FRAME_CMD(f);
-}
-
-static struct pn533_frame_ops pn533_acr122_frame_ops = {
- .tx_frame_init = pn533_acr122_tx_frame_init,
- .tx_frame_finish = pn533_acr122_tx_frame_finish,
- .tx_update_payload_len = pn533_acr122_tx_update_payload_len,
- .tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN,
- .tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN,
-
- .rx_is_frame_valid = pn533_acr122_is_rx_frame_valid,
- .rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN,
- .rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN,
- .rx_frame_size = pn533_acr122_rx_frame_size,
-
- .max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN,
- .get_cmd_code = pn533_acr122_get_cmd_code,
-};
-
/* The rule: value(high byte) + value(low byte) + checksum = 0 */
static inline u8 pn533_ext_checksum(u16 value)
{
@@ -642,8 +299,10 @@ static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev)
return true;
}
-static bool pn533_std_rx_frame_is_ack(struct pn533_std_frame *frame)
+bool pn533_rx_frame_is_ack(void *_frame)
{
+ struct pn533_std_frame *frame = _frame;
+
if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
return false;
@@ -652,6 +311,7 @@ static bool pn533_std_rx_frame_is_ack(struct pn533_std_frame *frame)
return true;
}
+EXPORT_SYMBOL_GPL(pn533_rx_frame_is_ack);
static inline int pn533_std_rx_frame_size(void *frame)
{
@@ -680,6 +340,14 @@ static u8 pn533_std_get_cmd_code(void *frame)
return PN533_FRAME_CMD(f);
}
+bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame)
+{
+ return (dev->ops->get_cmd_code(frame) ==
+ PN533_CMD_RESPONSE(dev->cmd->code));
+}
+EXPORT_SYMBOL_GPL(pn533_rx_frame_is_cmd_response);
+
+
static struct pn533_frame_ops pn533_std_frame_ops = {
.tx_frame_init = pn533_std_tx_frame_init,
.tx_frame_finish = pn533_std_tx_frame_finish,
@@ -696,172 +364,6 @@ static struct pn533_frame_ops pn533_std_frame_ops = {
.get_cmd_code = pn533_std_get_cmd_code,
};
-static bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame)
-{
- return (dev->ops->get_cmd_code(frame) ==
- PN533_CMD_RESPONSE(dev->cmd->code));
-}
-
-static void pn533_recv_response(struct urb *urb)
-{
- struct pn533 *dev = urb->context;
- struct pn533_cmd *cmd = dev->cmd;
- u8 *in_frame;
-
- cmd->status = urb->status;
-
- switch (urb->status) {
- case 0:
- break; /* success */
- case -ECONNRESET:
- case -ENOENT:
- dev_dbg(&dev->interface->dev,
- "The urb has been canceled (status %d)\n",
- urb->status);
- goto sched_wq;
- case -ESHUTDOWN:
- default:
- nfc_err(&dev->interface->dev,
- "Urb failure (status %d)\n", urb->status);
- goto sched_wq;
- }
-
- in_frame = dev->in_urb->transfer_buffer;
-
- dev_dbg(&dev->interface->dev, "Received a frame\n");
- print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
- dev->ops->rx_frame_size(in_frame), false);
-
- if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
- nfc_err(&dev->interface->dev, "Received an invalid frame\n");
- cmd->status = -EIO;
- goto sched_wq;
- }
-
- if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) {
- nfc_err(&dev->interface->dev,
- "It it not the response to the last command\n");
- cmd->status = -EIO;
- goto sched_wq;
- }
-
-sched_wq:
- queue_work(dev->wq, &dev->cmd_complete_work);
-}
-
-static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
-{
- dev->in_urb->complete = pn533_recv_response;
-
- return usb_submit_urb(dev->in_urb, flags);
-}
-
-static void pn533_recv_ack(struct urb *urb)
-{
- struct pn533 *dev = urb->context;
- struct pn533_cmd *cmd = dev->cmd;
- struct pn533_std_frame *in_frame;
- int rc;
-
- cmd->status = urb->status;
-
- switch (urb->status) {
- case 0:
- break; /* success */
- case -ECONNRESET:
- case -ENOENT:
- dev_dbg(&dev->interface->dev,
- "The urb has been stopped (status %d)\n",
- urb->status);
- goto sched_wq;
- case -ESHUTDOWN:
- default:
- nfc_err(&dev->interface->dev,
- "Urb failure (status %d)\n", urb->status);
- goto sched_wq;
- }
-
- in_frame = dev->in_urb->transfer_buffer;
-
- if (!pn533_std_rx_frame_is_ack(in_frame)) {
- nfc_err(&dev->interface->dev, "Received an invalid ack\n");
- cmd->status = -EIO;
- goto sched_wq;
- }
-
- rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
- if (rc) {
- nfc_err(&dev->interface->dev,
- "usb_submit_urb failed with result %d\n", rc);
- cmd->status = rc;
- goto sched_wq;
- }
-
- return;
-
-sched_wq:
- queue_work(dev->wq, &dev->cmd_complete_work);
-}
-
-static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
-{
- dev->in_urb->complete = pn533_recv_ack;
-
- return usb_submit_urb(dev->in_urb, flags);
-}
-
-static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
-{
- u8 ack[PN533_STD_FRAME_ACK_SIZE] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
- /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
- int rc;
-
- dev->out_urb->transfer_buffer = ack;
- dev->out_urb->transfer_buffer_length = sizeof(ack);
- rc = usb_submit_urb(dev->out_urb, flags);
-
- return rc;
-}
-
-static int __pn533_send_frame_async(struct pn533 *dev,
- struct sk_buff *out,
- struct sk_buff *in,
- int in_len)
-{
- int rc;
-
- dev->out_urb->transfer_buffer = out->data;
- dev->out_urb->transfer_buffer_length = out->len;
-
- dev->in_urb->transfer_buffer = in->data;
- dev->in_urb->transfer_buffer_length = in_len;
-
- print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
- out->data, out->len, false);
-
- rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
- if (rc)
- return rc;
-
- if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
- /* request for response for sent packet directly */
- rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
- if (rc)
- goto error;
- } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
- /* request for ACK if that's the case */
- rc = pn533_submit_urb_for_ack(dev, GFP_KERNEL);
- if (rc)
- goto error;
- }
-
- return 0;
-
-error:
- usb_unlink_urb(dev->out_urb);
- return rc;
-}
-
static void pn533_build_cmd_frame(struct pn533 *dev, u8 cmd_code,
struct sk_buff *skb)
{
@@ -897,7 +399,6 @@ static int pn533_send_async_complete(struct pn533 *dev)
goto done;
}
- skb_put(resp, dev->ops->rx_frame_size(resp->data));
skb_pull(resp, dev->ops->rx_header_len);
skb_trim(resp, resp->len - dev->ops->rx_tail_len);
@@ -910,15 +411,14 @@ done:
}
static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
- struct sk_buff *req, struct sk_buff *resp,
- int resp_len,
+ struct sk_buff *req,
pn533_send_async_complete_t complete_cb,
void *complete_cb_context)
{
struct pn533_cmd *cmd;
int rc = 0;
- dev_dbg(&dev->interface->dev, "Sending command 0x%x\n", cmd_code);
+ dev_dbg(dev->dev, "Sending command 0x%x\n", cmd_code);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
@@ -926,8 +426,6 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
cmd->code = cmd_code;
cmd->req = req;
- cmd->resp = resp;
- cmd->resp_len = resp_len;
cmd->complete_cb = complete_cb;
cmd->complete_cb_context = complete_cb_context;
@@ -936,7 +434,7 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
mutex_lock(&dev->cmd_lock);
if (!dev->cmd_pending) {
- rc = __pn533_send_frame_async(dev, req, resp, resp_len);
+ rc = dev->phy_ops->send_frame(dev, req);
if (rc)
goto error;
@@ -945,7 +443,7 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
goto unlock;
}
- dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x\n",
+ dev_dbg(dev->dev, "%s Queueing command 0x%x\n",
__func__, cmd_code);
INIT_LIST_HEAD(&cmd->queue);
@@ -965,20 +463,10 @@ static int pn533_send_data_async(struct pn533 *dev, u8 cmd_code,
pn533_send_async_complete_t complete_cb,
void *complete_cb_context)
{
- struct sk_buff *resp;
int rc;
- int resp_len = dev->ops->rx_header_len +
- dev->ops->max_payload_len +
- dev->ops->rx_tail_len;
-
- resp = nfc_alloc_recv_skb(resp_len, GFP_KERNEL);
- if (!resp)
- return -ENOMEM;
- rc = __pn533_send_async(dev, cmd_code, req, resp, resp_len, complete_cb,
+ rc = __pn533_send_async(dev, cmd_code, req, complete_cb,
complete_cb_context);
- if (rc)
- dev_kfree_skb(resp);
return rc;
}
@@ -988,20 +476,10 @@ static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code,
pn533_send_async_complete_t complete_cb,
void *complete_cb_context)
{
- struct sk_buff *resp;
int rc;
- int resp_len = dev->ops->rx_header_len +
- dev->ops->max_payload_len +
- dev->ops->rx_tail_len;
-
- resp = alloc_skb(resp_len, GFP_KERNEL);
- if (!resp)
- return -ENOMEM;
- rc = __pn533_send_async(dev, cmd_code, req, resp, resp_len, complete_cb,
+ rc = __pn533_send_async(dev, cmd_code, req, complete_cb,
complete_cb_context);
- if (rc)
- dev_kfree_skb(resp);
return rc;
}
@@ -1019,39 +497,25 @@ static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code,
pn533_send_async_complete_t complete_cb,
void *complete_cb_context)
{
- struct sk_buff *resp;
struct pn533_cmd *cmd;
int rc;
- int resp_len = dev->ops->rx_header_len +
- dev->ops->max_payload_len +
- dev->ops->rx_tail_len;
-
- resp = alloc_skb(resp_len, GFP_KERNEL);
- if (!resp)
- return -ENOMEM;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd) {
- dev_kfree_skb(resp);
+ if (!cmd)
return -ENOMEM;
- }
cmd->code = cmd_code;
cmd->req = req;
- cmd->resp = resp;
- cmd->resp_len = resp_len;
cmd->complete_cb = complete_cb;
cmd->complete_cb_context = complete_cb_context;
pn533_build_cmd_frame(dev, cmd_code, req);
- rc = __pn533_send_frame_async(dev, req, resp, resp_len);
- if (rc < 0) {
- dev_kfree_skb(resp);
+ rc = dev->phy_ops->send_frame(dev, req);
+ if (rc < 0)
kfree(cmd);
- } else {
+ else
dev->cmd = cmd;
- }
return rc;
}
@@ -1086,10 +550,9 @@ static void pn533_wq_cmd(struct work_struct *work)
mutex_unlock(&dev->cmd_lock);
- rc = __pn533_send_frame_async(dev, cmd->req, cmd->resp, cmd->resp_len);
+ rc = dev->phy_ops->send_frame(dev, cmd->req);
if (rc < 0) {
dev_kfree_skb(cmd->req);
- dev_kfree_skb(cmd->resp);
kfree(cmd);
return;
}
@@ -1121,7 +584,7 @@ static int pn533_send_sync_complete(struct pn533 *dev, void *_arg,
* 1. negative in case of error during TX path -> req should be freed
*
* 2. negative in case of error during RX path -> req should not be freed
- * as it's been already freed at the begining of RX path by
+ * as it's been already freed at the beginning of RX path by
* async_complete_cb.
*
* 3. valid pointer in case of succesfult RX path
@@ -1129,7 +592,7 @@ static int pn533_send_sync_complete(struct pn533 *dev, void *_arg,
* A caller has to check a return value with IS_ERR macro. If the test pass,
* the returned pointer is valid.
*
- * */
+ */
static struct sk_buff *pn533_send_cmd_sync(struct pn533 *dev, u8 cmd_code,
struct sk_buff *req)
{
@@ -1150,43 +613,6 @@ static struct sk_buff *pn533_send_cmd_sync(struct pn533 *dev, u8 cmd_code,
return arg.resp;
}
-static void pn533_send_complete(struct urb *urb)
-{
- struct pn533 *dev = urb->context;
-
- switch (urb->status) {
- case 0:
- break; /* success */
- case -ECONNRESET:
- case -ENOENT:
- dev_dbg(&dev->interface->dev,
- "The urb has been stopped (status %d)\n",
- urb->status);
- break;
- case -ESHUTDOWN:
- default:
- nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
- urb->status);
- }
-}
-
-static void pn533_abort_cmd(struct pn533 *dev, gfp_t flags)
-{
- /* ACR122U does not support any command which aborts last
- * issued command i.e. as ACK for standard PN533. Additionally,
- * it behaves stange, sending broken or incorrect responses,
- * when we cancel urb before the chip will send response.
- */
- if (dev->device_type == PN533_DEVICE_ACR122U)
- return;
-
- /* An ack will cancel the last issued command */
- pn533_send_ack(dev, flags);
-
- /* cancel the urb request */
- usb_kill_urb(dev->in_urb);
-}
-
static struct sk_buff *pn533_alloc_skb(struct pn533 *dev, unsigned int size)
{
struct sk_buff *skb;
@@ -1233,8 +659,10 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
if (target_data_len < sizeof(struct pn533_target_type_a))
return false;
- /* The lenght check of nfcid[] and ats[] are not being performed because
- the values are not being used */
+ /*
+ * The length check of nfcid[] and ats[] are not being performed because
+ * the values are not being used
+ */
/* Requirement 4.6.3.3 from NFC Forum Digital Spec */
ssd = PN533_TYPE_A_SENS_RES_SSD(type_a->sens_res);
@@ -1437,13 +865,14 @@ static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
return 0;
}
+static void pn533_poll_reset_mod_list(struct pn533 *dev);
static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
int tgdata_len)
{
struct nfc_target nfc_tgt;
int rc;
- dev_dbg(&dev->interface->dev, "%s: modulation=%d\n",
+ dev_dbg(dev->dev, "%s: modulation=%d\n",
__func__, dev->poll_mod_curr);
if (tg != 1)
@@ -1466,7 +895,7 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len);
break;
default:
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Unknown current poll modulation\n");
return -EPROTO;
}
@@ -1475,17 +904,18 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
return rc;
if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
- dev_dbg(&dev->interface->dev,
+ dev_dbg(dev->dev,
"The Tg found doesn't have the desired protocol\n");
return -EAGAIN;
}
- dev_dbg(&dev->interface->dev,
+ dev_dbg(dev->dev,
"Target found - supported protocols: 0x%x\n",
nfc_tgt.supported_protocols);
dev->tgt_available_prots = nfc_tgt.supported_protocols;
+ pn533_poll_reset_mod_list(dev);
nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1);
return 0;
@@ -1540,7 +970,8 @@ static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
int rc, tgdata_len;
/* Toggle the DEP polling */
- dev->poll_dep = 1;
+ if (dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK)
+ dev->poll_dep = 1;
nbtg = resp->data[0];
tg = resp->data[1];
@@ -1551,10 +982,8 @@ static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
rc = pn533_target_found(dev, tg, tgdata, tgdata_len);
/* We must stop the poll after a valid target found */
- if (rc == 0) {
- pn533_poll_reset_mod_list(dev);
+ if (rc == 0)
return 0;
- }
}
return -EAGAIN;
@@ -1577,8 +1006,10 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
0x0, 0x0, 0x0,
0x40}; /* SEL_RES for DEP */
- unsigned int skb_len = 36 + /* mode (1), mifare (6),
- felica (18), nfcid3 (10), gb_len (1) */
+ unsigned int skb_len = 36 + /*
+ * mode (1), mifare (6),
+ * felica (18), nfcid3 (10), gb_len (1)
+ */
gbytes_len +
1; /* len Tk*/
@@ -1614,8 +1045,6 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
return skb;
}
-#define PN533_CMD_DATAEXCH_HEAD_LEN 1
-#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
static void pn533_wq_tm_mi_recv(struct work_struct *work);
static struct sk_buff *pn533_build_response(struct pn533 *dev);
@@ -1626,7 +1055,7 @@ static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
u8 status, ret, mi;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
skb_queue_purge(&dev->resp_q);
@@ -1675,7 +1104,7 @@ static void pn533_wq_tm_mi_recv(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, 0);
if (!skb)
@@ -1689,8 +1118,6 @@ static void pn533_wq_tm_mi_recv(struct work_struct *work)
if (rc < 0)
dev_kfree_skb(skb);
-
- return;
}
static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
@@ -1701,7 +1128,7 @@ static void pn533_wq_tm_mi_send(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
/* Grab the first skb in the queue */
skb = skb_dequeue(&dev->fragment_skb);
@@ -1723,13 +1150,13 @@ static void pn533_wq_tm_mi_send(struct work_struct *work)
if (rc == 0) /* success */
return;
- dev_err(&dev->interface->dev,
+ dev_err(dev->dev,
"Error %d when trying to perform set meta data_exchange", rc);
dev_kfree_skb(skb);
error:
- pn533_send_ack(dev, GFP_KERNEL);
+ dev->phy_ops->send_ack(dev, GFP_KERNEL);
queue_work(dev->wq, &dev->cmd_work);
}
@@ -1739,7 +1166,7 @@ static void pn533_wq_tg_get_data(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, 0);
if (!skb)
@@ -1750,8 +1177,6 @@ static void pn533_wq_tg_get_data(struct work_struct *work)
if (rc < 0)
dev_kfree_skb(skb);
-
- return;
}
#define ATR_REQ_GB_OFFSET 17
@@ -1761,7 +1186,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
size_t gb_len;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (resp->len < ATR_REQ_GB_OFFSET + 1)
return -EINVAL;
@@ -1769,7 +1194,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
mode = resp->data[0];
cmd = &resp->data[1];
- dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
+ dev_dbg(dev->dev, "Target mode 0x%x len %d\n",
mode, resp->len);
if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) ==
@@ -1785,7 +1210,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
comm_mode, gb, gb_len);
if (rc < 0) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Error when signaling target activation\n");
return rc;
}
@@ -1800,7 +1225,7 @@ static void pn533_listen_mode_timer(unsigned long data)
{
struct pn533 *dev = (struct pn533 *)data;
- dev_dbg(&dev->interface->dev, "Listen mode timeout\n");
+ dev_dbg(dev->dev, "Listen mode timeout\n");
dev->cancel_listen = 1;
@@ -1815,12 +1240,12 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg,
{
int rc = 0;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
- nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
+ nfc_err(dev->dev, "RF setting error %d\n", rc);
return rc;
}
@@ -1838,7 +1263,7 @@ static void pn533_wq_rf(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, 2);
if (!skb)
@@ -1851,10 +1276,8 @@ static void pn533_wq_rf(struct work_struct *work)
pn533_rf_complete, NULL);
if (rc < 0) {
dev_kfree_skb(skb);
- nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
+ nfc_err(dev->dev, "RF setting error %d\n", rc);
}
-
- return;
}
static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
@@ -1879,7 +1302,7 @@ static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
return 0;
}
- dev_dbg(&dev->interface->dev, "Creating new target");
+ dev_dbg(dev->dev, "Creating new target");
nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
nfc_target.nfcid1_len = 10;
@@ -1917,7 +1340,7 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev)
u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- dev_dbg(&dev->interface->dev, "%s", __func__);
+ dev_dbg(dev->dev, "%s", __func__);
if (!dev->gb) {
dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
@@ -1974,21 +1397,20 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
struct pn533_poll_modulations *cur_mod;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
- nfc_err(&dev->interface->dev, "%s Poll complete error %d\n",
+ nfc_err(dev->dev, "%s Poll complete error %d\n",
__func__, rc);
if (rc == -ENOENT) {
if (dev->poll_mod_count != 0)
return rc;
- else
- goto stop_poll;
+ goto stop_poll;
} else if (rc < 0) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Error %d when running poll\n", rc);
goto stop_poll;
}
@@ -2008,7 +1430,7 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
goto done;
if (!dev->poll_mod_count) {
- dev_dbg(&dev->interface->dev, "Polling has been stopped\n");
+ dev_dbg(dev->dev, "Polling has been stopped\n");
goto done;
}
@@ -2021,7 +1443,7 @@ done:
return rc;
stop_poll:
- nfc_err(&dev->interface->dev, "Polling operation has been stopped\n");
+ nfc_err(dev->dev, "Polling operation has been stopped\n");
pn533_poll_reset_mod_list(dev);
dev->poll_protocols = 0;
@@ -2051,10 +1473,10 @@ static int pn533_send_poll_frame(struct pn533 *dev)
mod = dev->poll_mod_active[dev->poll_mod_curr];
- dev_dbg(&dev->interface->dev, "%s mod len %d\n",
+ dev_dbg(dev->dev, "%s mod len %d\n",
__func__, mod->len);
- if (dev->poll_dep) {
+ if ((dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK) && dev->poll_dep) {
dev->poll_dep = 0;
return pn533_poll_dep(dev->nfc_dev);
}
@@ -2068,7 +1490,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
}
if (!skb) {
- nfc_err(&dev->interface->dev, "Failed to allocate skb\n");
+ nfc_err(dev->dev, "Failed to allocate skb\n");
return -ENOMEM;
}
@@ -2076,7 +1498,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
NULL);
if (rc < 0) {
dev_kfree_skb(skb);
- nfc_err(&dev->interface->dev, "Polling loop error %d\n", rc);
+ nfc_err(dev->dev, "Polling loop error %d\n", rc);
}
return rc;
@@ -2090,13 +1512,13 @@ static void pn533_wq_poll(struct work_struct *work)
cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
- dev_dbg(&dev->interface->dev,
+ dev_dbg(dev->dev,
"%s cancel_listen %d modulation len %d\n",
__func__, dev->cancel_listen, cur_mod->len);
if (dev->cancel_listen == 1) {
dev->cancel_listen = 0;
- pn533_abort_cmd(dev, GFP_ATOMIC);
+ dev->phy_ops->abort_cmd(dev, GFP_ATOMIC);
}
rc = pn533_send_poll_frame(dev);
@@ -2105,8 +1527,6 @@ static void pn533_wq_poll(struct work_struct *work)
if (cur_mod->len == 0 && dev->poll_mod_count > 1)
mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
-
- return;
}
static int pn533_start_poll(struct nfc_dev *nfc_dev,
@@ -2117,18 +1537,18 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
u8 rand_mod;
int rc;
- dev_dbg(&dev->interface->dev,
+ dev_dbg(dev->dev,
"%s: im protocols 0x%x tm protocols 0x%x\n",
__func__, im_protocols, tm_protocols);
if (dev->tgt_active_prot) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Cannot poll with a target already activated\n");
return -EBUSY;
}
if (dev->tgt_mode) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Cannot poll while already being activated\n");
return -EBUSY;
}
@@ -2166,12 +1586,12 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
del_timer(&dev->listen_timer);
if (!dev->poll_mod_count) {
- dev_dbg(&dev->interface->dev,
+ dev_dbg(dev->dev,
"Polling operation was not running\n");
return;
}
- pn533_abort_cmd(dev, GFP_KERNEL);
+ dev->phy_ops->abort_cmd(dev, GFP_KERNEL);
flush_delayed_work(&dev->poll_work);
pn533_poll_reset_mod_list(dev);
}
@@ -2184,7 +1604,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
struct sk_buff *skb;
struct sk_buff *resp;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
if (!skb)
@@ -2200,7 +1620,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
rsp = (struct pn533_cmd_activate_response *)resp->data;
rc = rsp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Target activation failed (error 0x%x)\n", rc);
dev_kfree_skb(resp);
return -EIO;
@@ -2220,28 +1640,28 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev,
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
- dev_dbg(&dev->interface->dev, "%s: protocol=%u\n", __func__, protocol);
+ dev_dbg(dev->dev, "%s: protocol=%u\n", __func__, protocol);
if (dev->poll_mod_count) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Cannot activate while polling\n");
return -EBUSY;
}
if (dev->tgt_active_prot) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"There is already an active target\n");
return -EBUSY;
}
if (!dev->tgt_available_prots) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"There is no available target to activate\n");
return -EINVAL;
}
if (!(dev->tgt_available_prots & (1 << protocol))) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Target doesn't support requested proto %u\n",
protocol);
return -EINVAL;
@@ -2250,7 +1670,7 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev,
if (protocol == NFC_PROTO_NFC_DEP) {
rc = pn533_activate_target_nfcdep(dev);
if (rc) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Activating target with DEP failed %d\n", rc);
return rc;
}
@@ -2262,18 +1682,41 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev,
return 0;
}
+static int pn533_deactivate_target_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ int rc = 0;
+
+ dev_dbg(dev->dev, "%s\n", __func__);
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+
+ nfc_err(dev->dev, "Target release error %d\n", rc);
+
+ return rc;
+ }
+
+ rc = resp->data[0] & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS)
+ nfc_err(dev->dev,
+ "Error 0x%x when releasing the target\n", rc);
+
+ dev_kfree_skb(resp);
+ return rc;
+}
+
static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
struct nfc_target *target, u8 mode)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct sk_buff *skb;
- struct sk_buff *resp;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (!dev->tgt_active_prot) {
- nfc_err(&dev->interface->dev, "There is no active target\n");
+ nfc_err(dev->dev, "There is no active target\n");
return;
}
@@ -2286,17 +1729,12 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
*skb_put(skb, 1) = 1; /* TG*/
- resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_RELEASE, skb);
- if (IS_ERR(resp))
- return;
-
- rc = resp->data[0] & PN533_CMD_RET_MASK;
- if (rc != PN533_CMD_RET_SUCCESS)
- nfc_err(&dev->interface->dev,
- "Error 0x%x when releasing the target\n", rc);
-
- dev_kfree_skb(resp);
- return;
+ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_RELEASE, skb,
+ pn533_deactivate_target_complete, NULL);
+ if (rc < 0) {
+ dev_kfree_skb(skb);
+ nfc_err(dev->dev, "Target release error %d\n", rc);
+ }
}
@@ -2315,7 +1753,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
if (dev->tgt_available_prots &&
!(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"The target does not support DEP\n");
rc = -EINVAL;
goto error;
@@ -2325,7 +1763,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
rc = rsp->status & PN533_CMD_RET_MASK;
if (rc != PN533_CMD_RET_SUCCESS) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Bringing DEP link up failed (error 0x%x)\n", rc);
goto error;
}
@@ -2333,7 +1771,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
if (!dev->tgt_available_prots) {
struct nfc_target nfc_target;
- dev_dbg(&dev->interface->dev, "Creating new target\n");
+ dev_dbg(dev->dev, "Creating new target\n");
nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
nfc_target.nfcid1_len = 10;
@@ -2371,16 +1809,16 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (dev->poll_mod_count) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Cannot bring the DEP link up while polling\n");
return -EBUSY;
}
if (dev->tgt_active_prot) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"There is already an active target\n");
return -EBUSY;
}
@@ -2451,12 +1889,12 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
pn533_poll_reset_mod_list(dev);
if (dev->tgt_mode || dev->tgt_active_prot)
- pn533_abort_cmd(dev, GFP_KERNEL);
+ dev->phy_ops->abort_cmd(dev, GFP_KERNEL);
dev->tgt_active_prot = 0;
dev->tgt_mode = 0;
@@ -2476,7 +1914,7 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
struct sk_buff *skb, *tmp, *t;
unsigned int skb_len = 0, tmp_len = 0;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (skb_queue_empty(&dev->resp_q))
return NULL;
@@ -2489,7 +1927,7 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
skb_queue_walk_safe(&dev->resp_q, tmp, t)
skb_len += tmp->len;
- dev_dbg(&dev->interface->dev, "%s total length %d\n",
+ dev_dbg(dev->dev, "%s total length %d\n",
__func__, skb_len);
skb = alloc_skb(skb_len, GFP_KERNEL);
@@ -2517,7 +1955,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
int rc = 0;
u8 status, ret, mi;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -2531,7 +1969,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
skb_pull(resp, sizeof(status));
if (ret != PN533_CMD_RET_SUCCESS) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Exchanging data failed (error 0x%x)\n", ret);
rc = -EIO;
goto error;
@@ -2572,6 +2010,51 @@ _error:
return rc;
}
+/*
+ * Receive an incoming pn533 frame. skb contains only header and payload.
+ * If skb == NULL, it is a notification that the link below is dead.
+ */
+void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status)
+{
+ if (!dev->cmd)
+ goto sched_wq;
+
+ dev->cmd->status = status;
+
+ if (status != 0) {
+ dev_dbg(dev->dev, "%s: Error received: %d\n", __func__, status);
+ goto sched_wq;
+ }
+
+ if (skb == NULL) {
+ pr_err("NULL Frame -> link is dead\n");
+ goto sched_wq;
+ }
+
+ if (pn533_rx_frame_is_ack(skb->data)) {
+ dev_dbg(dev->dev, "%s: Received ACK frame\n", __func__);
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
+ dev->ops->rx_frame_size(skb->data), false);
+
+ if (!dev->ops->rx_is_frame_valid(skb->data, dev)) {
+ nfc_err(dev->dev, "Received an invalid frame\n");
+ dev->cmd->status = -EIO;
+ } else if (!pn533_rx_frame_is_cmd_response(dev, skb->data)) {
+ nfc_err(dev->dev, "It it not the response to the last command\n");
+ dev->cmd->status = -EIO;
+ }
+
+ dev->cmd->resp = skb;
+
+sched_wq:
+ queue_work(dev->wq, &dev->cmd_complete_work);
+}
+EXPORT_SYMBOL(pn533_recv_frame);
+
/* Split the Tx skb into small chunks */
static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
{
@@ -2627,10 +2110,10 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
struct pn533_data_exchange_arg *arg = NULL;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (!dev->tgt_active_prot) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Can't exchange data if there is no active target\n");
rc = -EINVAL;
goto error;
@@ -2694,7 +2177,7 @@ static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
{
u8 status;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
if (IS_ERR(resp))
return PTR_ERR(resp);
@@ -2726,7 +2209,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
/* let's split in multiple chunks if size's too big */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
@@ -2764,7 +2247,7 @@ static void pn533_wq_mi_recv(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
if (!skb)
@@ -2796,14 +2279,14 @@ static void pn533_wq_mi_recv(struct work_struct *work)
if (rc == 0) /* success */
return;
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Error %d when trying to perform data_exchange\n", rc);
dev_kfree_skb(skb);
kfree(dev->cmd_complete_mi_arg);
error:
- pn533_send_ack(dev, GFP_KERNEL);
+ dev->phy_ops->send_ack(dev, GFP_KERNEL);
queue_work(dev->wq, &dev->cmd_work);
}
@@ -2813,7 +2296,7 @@ static void pn533_wq_mi_send(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
/* Grab the first skb in the queue */
skb = skb_dequeue(&dev->fragment_skb);
@@ -2840,7 +2323,8 @@ static void pn533_wq_mi_send(struct work_struct *work)
default:
/* Still some fragments? */
- rc = pn533_send_cmd_direct_async(dev,PN533_CMD_IN_DATA_EXCHANGE,
+ rc = pn533_send_cmd_direct_async(dev,
+ PN533_CMD_IN_DATA_EXCHANGE,
skb,
pn533_data_exchange_complete,
dev->cmd_complete_dep_arg);
@@ -2851,14 +2335,14 @@ static void pn533_wq_mi_send(struct work_struct *work)
if (rc == 0) /* success */
return;
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Error %d when trying to perform data_exchange\n", rc);
dev_kfree_skb(skb);
kfree(dev->cmd_complete_dep_arg);
error:
- pn533_send_ack(dev, GFP_KERNEL);
+ dev->phy_ops->send_ack(dev, GFP_KERNEL);
queue_work(dev->wq, &dev->cmd_work);
}
@@ -2869,7 +2353,7 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
struct sk_buff *resp;
int skb_len;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
@@ -2916,7 +2400,7 @@ static int pn533_pasori_fw_reset(struct pn533 *dev)
struct sk_buff *skb;
struct sk_buff *resp;
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
+ dev_dbg(dev->dev, "%s\n", __func__);
skb = pn533_alloc_skb(dev, sizeof(u8));
if (!skb)
@@ -2933,71 +2417,6 @@ static int pn533_pasori_fw_reset(struct pn533 *dev)
return 0;
}
-struct pn533_acr122_poweron_rdr_arg {
- int rc;
- struct completion done;
-};
-
-static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
-{
- struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
-
- dev_dbg(&urb->dev->dev, "%s\n", __func__);
-
- print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
- urb->transfer_buffer, urb->transfer_buffer_length,
- false);
-
- arg->rc = urb->status;
- complete(&arg->done);
-}
-
-static int pn533_acr122_poweron_rdr(struct pn533 *dev)
-{
- /* Power on th reader (CCID cmd) */
- u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
- 0, 0, 0, 0, 0, 0, 3, 0, 0};
- u8 buf[255];
- int rc;
- void *cntx;
- struct pn533_acr122_poweron_rdr_arg arg;
-
- dev_dbg(&dev->interface->dev, "%s\n", __func__);
-
- init_completion(&arg.done);
- cntx = dev->in_urb->context; /* backup context */
-
- dev->in_urb->transfer_buffer = buf;
- dev->in_urb->transfer_buffer_length = 255;
- dev->in_urb->complete = pn533_acr122_poweron_rdr_resp;
- dev->in_urb->context = &arg;
-
- dev->out_urb->transfer_buffer = cmd;
- dev->out_urb->transfer_buffer_length = sizeof(cmd);
-
- print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
- cmd, sizeof(cmd), false);
-
- rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
- if (rc) {
- nfc_err(&dev->interface->dev,
- "Reader power on cmd error %d\n", rc);
- return rc;
- }
-
- rc = usb_submit_urb(dev->in_urb, GFP_KERNEL);
- if (rc) {
- nfc_err(&dev->interface->dev,
- "Can't submit reader poweron cmd response %d\n", rc);
- return rc;
- }
-
- wait_for_completion(&arg.done);
- dev->in_urb->context = cntx; /* restore context */
-
- return arg.rc;
-}
-
static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
@@ -3009,15 +2428,44 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
(u8 *)&rf_field, 1);
if (rc) {
- nfc_err(&dev->interface->dev, "Error on setting RF field\n");
+ nfc_err(dev->dev, "Error on setting RF field\n");
return rc;
}
return rc;
}
+static int pn532_sam_configuration(struct nfc_dev *nfc_dev)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct sk_buff *skb;
+ struct sk_buff *resp;
+
+ skb = pn533_alloc_skb(dev, 1);
+ if (!skb)
+ return -ENOMEM;
+
+ *skb_put(skb, 1) = 0x01;
+
+ resp = pn533_send_cmd_sync(dev, PN533_CMD_SAM_CONFIGURATION, skb);
+ if (IS_ERR(resp))
+ return PTR_ERR(resp);
+
+ dev_kfree_skb(resp);
+ return 0;
+}
+
static int pn533_dev_up(struct nfc_dev *nfc_dev)
{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+
+ if (dev->device_type == PN533_DEVICE_PN532) {
+ int rc = pn532_sam_configuration(nfc_dev);
+
+ if (rc)
+ return rc;
+ }
+
return pn533_rf_field(nfc_dev, 1);
}
@@ -3050,6 +2498,7 @@ static int pn533_setup(struct pn533 *dev)
case PN533_DEVICE_STD:
case PN533_DEVICE_PASORI:
case PN533_DEVICE_ACR122U:
+ case PN533_DEVICE_PN532:
max_retries.mx_rty_atr = 0x2;
max_retries.mx_rty_psl = 0x1;
max_retries.mx_rty_passive_act =
@@ -3062,7 +2511,7 @@ static int pn533_setup(struct pn533 *dev)
break;
default:
- nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+ nfc_err(dev->dev, "Unknown device type %d\n",
dev->device_type);
return -EINVAL;
}
@@ -3070,7 +2519,7 @@ static int pn533_setup(struct pn533 *dev)
rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
(u8 *)&max_retries, sizeof(max_retries));
if (rc) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Error on setting MAX_RETRIES config\n");
return rc;
}
@@ -3079,12 +2528,13 @@ static int pn533_setup(struct pn533 *dev)
rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
(u8 *)&timing, sizeof(timing));
if (rc) {
- nfc_err(&dev->interface->dev, "Error on setting RF timings\n");
+ nfc_err(dev->dev, "Error on setting RF timings\n");
return rc;
}
switch (dev->device_type) {
case PN533_DEVICE_STD:
+ case PN533_DEVICE_PN532:
break;
case PN533_DEVICE_PASORI:
@@ -3093,7 +2543,7 @@ static int pn533_setup(struct pn533 *dev)
rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
pasori_cfg, 3);
if (rc) {
- nfc_err(&dev->interface->dev,
+ nfc_err(dev->dev,
"Error while settings PASORI config\n");
return rc;
}
@@ -3106,208 +2556,130 @@ static int pn533_setup(struct pn533 *dev)
return 0;
}
-static int pn533_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
+struct pn533 *pn533_register_device(u32 device_type,
+ u32 protocols,
+ enum pn533_protocol_type protocol_type,
+ void *phy,
+ struct pn533_phy_ops *phy_ops,
+ struct pn533_frame_ops *fops,
+ struct device *dev,
+ struct device *parent)
{
struct pn533_fw_version fw_ver;
- struct pn533 *dev;
- struct usb_host_interface *iface_desc;
- struct usb_endpoint_descriptor *endpoint;
- int in_endpoint = 0;
- int out_endpoint = 0;
+ struct pn533 *priv;
int rc = -ENOMEM;
- int i;
- u32 protocols;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
- dev->udev = usb_get_dev(interface_to_usbdev(interface));
- dev->interface = interface;
- mutex_init(&dev->cmd_lock);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
- iface_desc = interface->cur_altsetting;
- for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
- endpoint = &iface_desc->endpoint[i].desc;
-
- if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
- in_endpoint = endpoint->bEndpointAddress;
-
- if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
- out_endpoint = endpoint->bEndpointAddress;
- }
-
- if (!in_endpoint || !out_endpoint) {
- nfc_err(&interface->dev,
- "Could not find bulk-in or bulk-out endpoint\n");
- rc = -ENODEV;
- goto error;
- }
-
- dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
- dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
-
- if (!dev->in_urb || !dev->out_urb)
- goto error;
-
- usb_fill_bulk_urb(dev->in_urb, dev->udev,
- usb_rcvbulkpipe(dev->udev, in_endpoint),
- NULL, 0, NULL, dev);
- usb_fill_bulk_urb(dev->out_urb, dev->udev,
- usb_sndbulkpipe(dev->udev, out_endpoint),
- NULL, 0, pn533_send_complete, dev);
-
- INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
- INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
- INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
- INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
- INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
- INIT_WORK(&dev->mi_tm_rx_work, pn533_wq_tm_mi_recv);
- INIT_WORK(&dev->mi_tm_tx_work, pn533_wq_tm_mi_send);
- INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
- INIT_WORK(&dev->rf_work, pn533_wq_rf);
- dev->wq = alloc_ordered_workqueue("pn533", 0);
- if (dev->wq == NULL)
+ priv->phy = phy;
+ priv->phy_ops = phy_ops;
+ priv->dev = dev;
+ if (fops != NULL)
+ priv->ops = fops;
+ else
+ priv->ops = &pn533_std_frame_ops;
+
+ priv->protocol_type = protocol_type;
+ priv->device_type = device_type;
+
+ mutex_init(&priv->cmd_lock);
+
+ INIT_WORK(&priv->cmd_work, pn533_wq_cmd);
+ INIT_WORK(&priv->cmd_complete_work, pn533_wq_cmd_complete);
+ INIT_WORK(&priv->mi_rx_work, pn533_wq_mi_recv);
+ INIT_WORK(&priv->mi_tx_work, pn533_wq_mi_send);
+ INIT_WORK(&priv->tg_work, pn533_wq_tg_get_data);
+ INIT_WORK(&priv->mi_tm_rx_work, pn533_wq_tm_mi_recv);
+ INIT_WORK(&priv->mi_tm_tx_work, pn533_wq_tm_mi_send);
+ INIT_DELAYED_WORK(&priv->poll_work, pn533_wq_poll);
+ INIT_WORK(&priv->rf_work, pn533_wq_rf);
+ priv->wq = alloc_ordered_workqueue("pn533", 0);
+ if (priv->wq == NULL)
goto error;
- init_timer(&dev->listen_timer);
- dev->listen_timer.data = (unsigned long) dev;
- dev->listen_timer.function = pn533_listen_mode_timer;
-
- skb_queue_head_init(&dev->resp_q);
- skb_queue_head_init(&dev->fragment_skb);
-
- INIT_LIST_HEAD(&dev->cmd_queue);
-
- usb_set_intfdata(interface, dev);
-
- dev->ops = &pn533_std_frame_ops;
-
- dev->protocol_type = PN533_PROTO_REQ_ACK_RESP;
- dev->device_type = id->driver_info;
- switch (dev->device_type) {
- case PN533_DEVICE_STD:
- protocols = PN533_ALL_PROTOCOLS;
- break;
-
- case PN533_DEVICE_PASORI:
- protocols = PN533_NO_TYPE_B_PROTOCOLS;
- break;
+ init_timer(&priv->listen_timer);
+ priv->listen_timer.data = (unsigned long) priv;
+ priv->listen_timer.function = pn533_listen_mode_timer;
- case PN533_DEVICE_ACR122U:
- protocols = PN533_NO_TYPE_B_PROTOCOLS;
- dev->ops = &pn533_acr122_frame_ops;
- dev->protocol_type = PN533_PROTO_REQ_RESP,
-
- rc = pn533_acr122_poweron_rdr(dev);
- if (rc < 0) {
- nfc_err(&dev->interface->dev,
- "Couldn't poweron the reader (error %d)\n", rc);
- goto destroy_wq;
- }
- break;
+ skb_queue_head_init(&priv->resp_q);
+ skb_queue_head_init(&priv->fragment_skb);
- default:
- nfc_err(&dev->interface->dev, "Unknown device type %d\n",
- dev->device_type);
- rc = -EINVAL;
- goto destroy_wq;
- }
+ INIT_LIST_HEAD(&priv->cmd_queue);
memset(&fw_ver, 0, sizeof(fw_ver));
- rc = pn533_get_firmware_version(dev, &fw_ver);
+ rc = pn533_get_firmware_version(priv, &fw_ver);
if (rc < 0)
goto destroy_wq;
- nfc_info(&dev->interface->dev,
- "NXP PN5%02X firmware ver %d.%d now attached\n",
+ nfc_info(dev, "NXP PN5%02X firmware ver %d.%d now attached\n",
fw_ver.ic, fw_ver.ver, fw_ver.rev);
- dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
- dev->ops->tx_header_len +
+ priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
+ priv->ops->tx_header_len +
PN533_CMD_DATAEXCH_HEAD_LEN,
- dev->ops->tx_tail_len);
- if (!dev->nfc_dev) {
+ priv->ops->tx_tail_len);
+ if (!priv->nfc_dev) {
rc = -ENOMEM;
goto destroy_wq;
}
- nfc_set_parent_dev(dev->nfc_dev, &interface->dev);
- nfc_set_drvdata(dev->nfc_dev, dev);
+ nfc_set_parent_dev(priv->nfc_dev, parent);
+ nfc_set_drvdata(priv->nfc_dev, priv);
- rc = nfc_register_device(dev->nfc_dev);
+ rc = nfc_register_device(priv->nfc_dev);
if (rc)
goto free_nfc_dev;
- rc = pn533_setup(dev);
+ rc = pn533_setup(priv);
if (rc)
goto unregister_nfc_dev;
- return 0;
+ return priv;
unregister_nfc_dev:
- nfc_unregister_device(dev->nfc_dev);
+ nfc_unregister_device(priv->nfc_dev);
free_nfc_dev:
- nfc_free_device(dev->nfc_dev);
+ nfc_free_device(priv->nfc_dev);
destroy_wq:
- destroy_workqueue(dev->wq);
+ destroy_workqueue(priv->wq);
error:
- usb_free_urb(dev->in_urb);
- usb_free_urb(dev->out_urb);
- usb_put_dev(dev->udev);
- kfree(dev);
- return rc;
+ kfree(priv);
+ return ERR_PTR(rc);
}
+EXPORT_SYMBOL_GPL(pn533_register_device);
-static void pn533_disconnect(struct usb_interface *interface)
+void pn533_unregister_device(struct pn533 *priv)
{
- struct pn533 *dev;
struct pn533_cmd *cmd, *n;
- dev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
+ nfc_unregister_device(priv->nfc_dev);
+ nfc_free_device(priv->nfc_dev);
- nfc_unregister_device(dev->nfc_dev);
- nfc_free_device(dev->nfc_dev);
+ flush_delayed_work(&priv->poll_work);
+ destroy_workqueue(priv->wq);
- usb_kill_urb(dev->in_urb);
- usb_kill_urb(dev->out_urb);
+ skb_queue_purge(&priv->resp_q);
- flush_delayed_work(&dev->poll_work);
- destroy_workqueue(dev->wq);
-
- skb_queue_purge(&dev->resp_q);
+ del_timer(&priv->listen_timer);
- del_timer(&dev->listen_timer);
-
- list_for_each_entry_safe(cmd, n, &dev->cmd_queue, queue) {
+ list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) {
list_del(&cmd->queue);
kfree(cmd);
}
- usb_free_urb(dev->in_urb);
- usb_free_urb(dev->out_urb);
- kfree(dev);
-
- nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
+ kfree(priv);
}
+EXPORT_SYMBOL_GPL(pn533_unregister_device);
-static struct usb_driver pn533_driver = {
- .name = "pn533",
- .probe = pn533_probe,
- .disconnect = pn533_disconnect,
- .id_table = pn533_table,
-};
-
-module_usb_driver(pn533_driver);
MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
MODULE_AUTHOR("Waldemar Rymarkiewicz <waldemar.rymarkiewicz@tieto.com>");
-MODULE_DESCRIPTION("PN533 usb driver ver " VERSION);
+MODULE_DESCRIPTION("PN533 driver ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
new file mode 100644
index 000000000..553c7d171
--- /dev/null
+++ b/drivers/nfc/pn533/pn533.h
@@ -0,0 +1,238 @@
+/*
+ * Driver for NXP PN533 NFC Chip
+ *
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2012-2013 Tieto Poland
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define PN533_DEVICE_STD 0x1
+#define PN533_DEVICE_PASORI 0x2
+#define PN533_DEVICE_ACR122U 0x3
+#define PN533_DEVICE_PN532 0x4
+
+#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
+ NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
+ NFC_PROTO_NFC_DEP_MASK |\
+ NFC_PROTO_ISO14443_B_MASK)
+
+#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_NFC_DEP_MASK)
+
+/* Standard pn533 frame definitions (standard and extended)*/
+#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
+ + 2) /* data[0] TFI, data[1] CC */
+#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+
+#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \
+ + 2) /* data[0] TFI, data[1] CC */
+
+#define PN533_CMD_DATAEXCH_HEAD_LEN 1
+#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+#define PN533_CMD_DATAFRAME_MAXLEN 240 /* max data length (send) */
+
+/*
+ * Max extended frame payload len, excluding TFI and CC
+ * which are already in PN533_FRAME_HEADER_LEN.
+ */
+#define PN533_STD_FRAME_MAX_PAYLOAD_LEN 263
+
+
+/* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */
+#define PN533_STD_FRAME_ACK_SIZE 6
+#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
+#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
+/* Half start code (3), LEN (4) should be 0xffff for extended frame */
+#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \
+ && (hdr)->datalen_checksum == 0xFF)
+#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)])
+
+/* start of frame */
+#define PN533_STD_FRAME_SOF 0x00FF
+
+/* standard frame identifier: in/out/error */
+#define PN533_STD_FRAME_IDENTIFIER(f) (f->data[0]) /* TFI */
+#define PN533_STD_FRAME_DIR_OUT 0xD4
+#define PN533_STD_FRAME_DIR_IN 0xD5
+
+/* PN533 Commands */
+#define PN533_FRAME_CMD(f) (f->data[1])
+
+#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
+#define PN533_CMD_SAM_CONFIGURATION 0x14
+#define PN533_CMD_RF_CONFIGURATION 0x32
+#define PN533_CMD_IN_DATA_EXCHANGE 0x40
+#define PN533_CMD_IN_COMM_THRU 0x42
+#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
+#define PN533_CMD_IN_ATR 0x50
+#define PN533_CMD_IN_RELEASE 0x52
+#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
+
+#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
+#define PN533_CMD_TG_GET_DATA 0x86
+#define PN533_CMD_TG_SET_DATA 0x8e
+#define PN533_CMD_TG_SET_META_DATA 0x94
+#define PN533_CMD_UNDEF 0xff
+
+#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
+
+/* PN533 Return codes */
+#define PN533_CMD_RET_MASK 0x3F
+#define PN533_CMD_MI_MASK 0x40
+#define PN533_CMD_RET_SUCCESS 0x00
+
+
+enum pn533_protocol_type {
+ PN533_PROTO_REQ_ACK_RESP = 0,
+ PN533_PROTO_REQ_RESP
+};
+
+/* Poll modulations */
+enum {
+ PN533_POLL_MOD_106KBPS_A,
+ PN533_POLL_MOD_212KBPS_FELICA,
+ PN533_POLL_MOD_424KBPS_FELICA,
+ PN533_POLL_MOD_106KBPS_JEWEL,
+ PN533_POLL_MOD_847KBPS_B,
+ PN533_LISTEN_MOD,
+
+ __PN533_POLL_MOD_AFTER_LAST,
+};
+#define PN533_POLL_MOD_MAX (__PN533_POLL_MOD_AFTER_LAST - 1)
+
+struct pn533_std_frame {
+ u8 preamble;
+ __be16 start_frame;
+ u8 datalen;
+ u8 datalen_checksum;
+ u8 data[];
+} __packed;
+
+struct pn533_ext_frame { /* Extended Information frame */
+ u8 preamble;
+ __be16 start_frame;
+ __be16 eif_flag; /* fixed to 0xFFFF */
+ __be16 datalen;
+ u8 datalen_checksum;
+ u8 data[];
+} __packed;
+
+struct pn533 {
+ struct nfc_dev *nfc_dev;
+ u32 device_type;
+ enum pn533_protocol_type protocol_type;
+
+ struct sk_buff_head resp_q;
+ struct sk_buff_head fragment_skb;
+
+ struct workqueue_struct *wq;
+ struct work_struct cmd_work;
+ struct work_struct cmd_complete_work;
+ struct delayed_work poll_work;
+ struct work_struct mi_rx_work;
+ struct work_struct mi_tx_work;
+ struct work_struct mi_tm_rx_work;
+ struct work_struct mi_tm_tx_work;
+ struct work_struct tg_work;
+ struct work_struct rf_work;
+
+ struct list_head cmd_queue;
+ struct pn533_cmd *cmd;
+ u8 cmd_pending;
+ struct mutex cmd_lock; /* protects cmd queue */
+
+ void *cmd_complete_mi_arg;
+ void *cmd_complete_dep_arg;
+
+ struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
+ u8 poll_mod_count;
+ u8 poll_mod_curr;
+ u8 poll_dep;
+ u32 poll_protocols;
+ u32 listen_protocols;
+ struct timer_list listen_timer;
+ int cancel_listen;
+
+ u8 *gb;
+ size_t gb_len;
+
+ u8 tgt_available_prots;
+ u8 tgt_active_prot;
+ u8 tgt_mode;
+
+ struct pn533_frame_ops *ops;
+
+ struct device *dev;
+ void *phy;
+ struct pn533_phy_ops *phy_ops;
+};
+
+typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
+ struct sk_buff *resp);
+
+struct pn533_cmd {
+ struct list_head queue;
+ u8 code;
+ int status;
+ struct sk_buff *req;
+ struct sk_buff *resp;
+ pn533_send_async_complete_t complete_cb;
+ void *complete_cb_context;
+};
+
+
+struct pn533_frame_ops {
+ void (*tx_frame_init)(void *frame, u8 cmd_code);
+ void (*tx_frame_finish)(void *frame);
+ void (*tx_update_payload_len)(void *frame, int len);
+ int tx_header_len;
+ int tx_tail_len;
+
+ bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev);
+ bool (*rx_frame_is_ack)(void *frame);
+ int (*rx_frame_size)(void *frame);
+ int rx_header_len;
+ int rx_tail_len;
+
+ int max_payload_len;
+ u8 (*get_cmd_code)(void *frame);
+};
+
+
+struct pn533_phy_ops {
+ int (*send_frame)(struct pn533 *priv,
+ struct sk_buff *out);
+ int (*send_ack)(struct pn533 *dev, gfp_t flags);
+ void (*abort_cmd)(struct pn533 *priv, gfp_t flags);
+};
+
+
+struct pn533 *pn533_register_device(u32 device_type,
+ u32 protocols,
+ enum pn533_protocol_type protocol_type,
+ void *phy,
+ struct pn533_phy_ops *phy_ops,
+ struct pn533_frame_ops *fops,
+ struct device *dev,
+ struct device *parent);
+
+void pn533_unregister_device(struct pn533 *priv);
+void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status);
+
+bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame);
+bool pn533_rx_frame_is_ack(void *_frame);
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
new file mode 100644
index 000000000..8ca060324
--- /dev/null
+++ b/drivers/nfc/pn533/usb.c
@@ -0,0 +1,597 @@
+/*
+ * Driver for NXP PN533 NFC Chip - USB transport layer
+ *
+ * Copyright (C) 2011 Instituto Nokia de Tecnologia
+ * Copyright (C) 2012-2013 Tieto Poland
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/nfc.h>
+#include <linux/netdevice.h>
+#include <net/nfc/nfc.h>
+#include "pn533.h"
+
+#define VERSION "0.1"
+
+#define PN533_VENDOR_ID 0x4CC
+#define PN533_PRODUCT_ID 0x2533
+
+#define SCM_VENDOR_ID 0x4E6
+#define SCL3711_PRODUCT_ID 0x5591
+
+#define SONY_VENDOR_ID 0x054c
+#define PASORI_PRODUCT_ID 0x02e1
+
+#define ACS_VENDOR_ID 0x072f
+#define ACR122U_PRODUCT_ID 0x2200
+
+static const struct usb_device_id pn533_usb_table[] = {
+ { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_STD },
+ { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_STD },
+ { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_PASORI },
+ { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID),
+ .driver_info = PN533_DEVICE_ACR122U },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, pn533_usb_table);
+
+struct pn533_usb_phy {
+ struct usb_device *udev;
+ struct usb_interface *interface;
+
+ struct urb *out_urb;
+ struct urb *in_urb;
+
+ struct pn533 *priv;
+};
+
+static void pn533_recv_response(struct urb *urb)
+{
+ struct pn533_usb_phy *phy = urb->context;
+ struct sk_buff *skb = NULL;
+
+ if (!urb->status) {
+ skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+ if (!skb) {
+ nfc_err(&phy->udev->dev, "failed to alloc memory\n");
+ } else {
+ memcpy(skb_put(skb, urb->actual_length),
+ urb->transfer_buffer, urb->actual_length);
+ }
+ }
+
+ pn533_recv_frame(phy->priv, skb, urb->status);
+}
+
+static int pn533_submit_urb_for_response(struct pn533_usb_phy *phy, gfp_t flags)
+{
+ phy->in_urb->complete = pn533_recv_response;
+
+ return usb_submit_urb(phy->in_urb, flags);
+}
+
+static void pn533_recv_ack(struct urb *urb)
+{
+ struct pn533_usb_phy *phy = urb->context;
+ struct pn533 *priv = phy->priv;
+ struct pn533_cmd *cmd = priv->cmd;
+ struct pn533_std_frame *in_frame;
+ int rc;
+
+ cmd->status = urb->status;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ dev_dbg(&phy->udev->dev,
+ "The urb has been stopped (status %d)\n",
+ urb->status);
+ goto sched_wq;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&phy->udev->dev,
+ "Urb failure (status %d)\n", urb->status);
+ goto sched_wq;
+ }
+
+ in_frame = phy->in_urb->transfer_buffer;
+
+ if (!pn533_rx_frame_is_ack(in_frame)) {
+ nfc_err(&phy->udev->dev, "Received an invalid ack\n");
+ cmd->status = -EIO;
+ goto sched_wq;
+ }
+
+ rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+ if (rc) {
+ nfc_err(&phy->udev->dev,
+ "usb_submit_urb failed with result %d\n", rc);
+ cmd->status = rc;
+ goto sched_wq;
+ }
+
+ return;
+
+sched_wq:
+ queue_work(priv->wq, &priv->cmd_complete_work);
+}
+
+static int pn533_submit_urb_for_ack(struct pn533_usb_phy *phy, gfp_t flags)
+{
+ phy->in_urb->complete = pn533_recv_ack;
+
+ return usb_submit_urb(phy->in_urb, flags);
+}
+
+static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
+{
+ struct pn533_usb_phy *phy = dev->phy;
+ u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00};
+ /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */
+ int rc;
+
+ phy->out_urb->transfer_buffer = ack;
+ phy->out_urb->transfer_buffer_length = sizeof(ack);
+ rc = usb_submit_urb(phy->out_urb, flags);
+
+ return rc;
+}
+
+static int pn533_usb_send_frame(struct pn533 *dev,
+ struct sk_buff *out)
+{
+ struct pn533_usb_phy *phy = dev->phy;
+ int rc;
+
+ if (phy->priv == NULL)
+ phy->priv = dev;
+
+ phy->out_urb->transfer_buffer = out->data;
+ phy->out_urb->transfer_buffer_length = out->len;
+
+ print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ out->data, out->len, false);
+
+ rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
+ /* request for response for sent packet directly */
+ rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+ if (rc)
+ goto error;
+ } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
+ /* request for ACK if that's the case */
+ rc = pn533_submit_urb_for_ack(phy, GFP_KERNEL);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ usb_unlink_urb(phy->out_urb);
+ return rc;
+}
+
+static void pn533_usb_abort_cmd(struct pn533 *dev, gfp_t flags)
+{
+ struct pn533_usb_phy *phy = dev->phy;
+
+ /* ACR122U does not support any command which aborts last
+ * issued command i.e. as ACK for standard PN533. Additionally,
+ * it behaves stange, sending broken or incorrect responses,
+ * when we cancel urb before the chip will send response.
+ */
+ if (dev->device_type == PN533_DEVICE_ACR122U)
+ return;
+
+ /* An ack will cancel the last issued command */
+ pn533_usb_send_ack(dev, flags);
+
+ /* cancel the urb request */
+ usb_kill_urb(phy->in_urb);
+}
+
+/* ACR122 specific structs and fucntions */
+
+/* ACS ACR122 pn533 frame definitions */
+#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \
+ + 2)
+#define PN533_ACR122_TX_FRAME_TAIL_LEN 0
+#define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \
+ + 2)
+#define PN533_ACR122_RX_FRAME_TAIL_LEN 2
+#define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN
+
+/* CCID messages types */
+#define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62
+#define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B
+
+#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
+
+
+struct pn533_acr122_ccid_hdr {
+ u8 type;
+ u32 datalen;
+ u8 slot;
+ u8 seq;
+
+ /*
+ * 3 msg specific bytes or status, error and 1 specific
+ * byte for reposnse msg
+ */
+ u8 params[3];
+ u8 data[]; /* payload */
+} __packed;
+
+struct pn533_acr122_apdu_hdr {
+ u8 class;
+ u8 ins;
+ u8 p1;
+ u8 p2;
+} __packed;
+
+struct pn533_acr122_tx_frame {
+ struct pn533_acr122_ccid_hdr ccid;
+ struct pn533_acr122_apdu_hdr apdu;
+ u8 datalen;
+ u8 data[]; /* pn533 frame: TFI ... */
+} __packed;
+
+struct pn533_acr122_rx_frame {
+ struct pn533_acr122_ccid_hdr ccid;
+ u8 data[]; /* pn533 frame : TFI ... */
+} __packed;
+
+static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code)
+{
+ struct pn533_acr122_tx_frame *frame = _frame;
+
+ frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE;
+ /* sizeof(apdu_hdr) + sizeof(datalen) */
+ frame->ccid.datalen = sizeof(frame->apdu) + 1;
+ frame->ccid.slot = 0;
+ frame->ccid.seq = 0;
+ frame->ccid.params[0] = 0;
+ frame->ccid.params[1] = 0;
+ frame->ccid.params[2] = 0;
+
+ frame->data[0] = PN533_STD_FRAME_DIR_OUT;
+ frame->data[1] = cmd_code;
+ frame->datalen = 2; /* data[0] + data[1] */
+
+ frame->apdu.class = 0xFF;
+ frame->apdu.ins = 0;
+ frame->apdu.p1 = 0;
+ frame->apdu.p2 = 0;
+}
+
+static void pn533_acr122_tx_frame_finish(void *_frame)
+{
+ struct pn533_acr122_tx_frame *frame = _frame;
+
+ frame->ccid.datalen += frame->datalen;
+}
+
+static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
+{
+ struct pn533_acr122_tx_frame *frame = _frame;
+
+ frame->datalen += len;
+}
+
+static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
+{
+ struct pn533_acr122_rx_frame *frame = _frame;
+
+ if (frame->ccid.type != 0x83)
+ return false;
+
+ if (!frame->ccid.datalen)
+ return false;
+
+ if (frame->data[frame->ccid.datalen - 2] == 0x63)
+ return false;
+
+ return true;
+}
+
+static int pn533_acr122_rx_frame_size(void *frame)
+{
+ struct pn533_acr122_rx_frame *f = frame;
+
+ /* f->ccid.datalen already includes tail length */
+ return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen;
+}
+
+static u8 pn533_acr122_get_cmd_code(void *frame)
+{
+ struct pn533_acr122_rx_frame *f = frame;
+
+ return PN533_FRAME_CMD(f);
+}
+
+static struct pn533_frame_ops pn533_acr122_frame_ops = {
+ .tx_frame_init = pn533_acr122_tx_frame_init,
+ .tx_frame_finish = pn533_acr122_tx_frame_finish,
+ .tx_update_payload_len = pn533_acr122_tx_update_payload_len,
+ .tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN,
+ .tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN,
+
+ .rx_is_frame_valid = pn533_acr122_is_rx_frame_valid,
+ .rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN,
+ .rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN,
+ .rx_frame_size = pn533_acr122_rx_frame_size,
+
+ .max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN,
+ .get_cmd_code = pn533_acr122_get_cmd_code,
+};
+
+struct pn533_acr122_poweron_rdr_arg {
+ int rc;
+ struct completion done;
+};
+
+static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
+{
+ struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
+
+ dev_dbg(&urb->dev->dev, "%s\n", __func__);
+
+ print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
+ urb->transfer_buffer, urb->transfer_buffer_length,
+ false);
+
+ arg->rc = urb->status;
+ complete(&arg->done);
+}
+
+static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
+{
+ /* Power on th reader (CCID cmd) */
+ u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON,
+ 0, 0, 0, 0, 0, 0, 3, 0, 0};
+ int rc;
+ void *cntx;
+ struct pn533_acr122_poweron_rdr_arg arg;
+
+ dev_dbg(&phy->udev->dev, "%s\n", __func__);
+
+ init_completion(&arg.done);
+ cntx = phy->in_urb->context; /* backup context */
+
+ phy->in_urb->complete = pn533_acr122_poweron_rdr_resp;
+ phy->in_urb->context = &arg;
+
+ phy->out_urb->transfer_buffer = cmd;
+ phy->out_urb->transfer_buffer_length = sizeof(cmd);
+
+ print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1,
+ cmd, sizeof(cmd), false);
+
+ rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
+ if (rc) {
+ nfc_err(&phy->udev->dev,
+ "Reader power on cmd error %d\n", rc);
+ return rc;
+ }
+
+ rc = usb_submit_urb(phy->in_urb, GFP_KERNEL);
+ if (rc) {
+ nfc_err(&phy->udev->dev,
+ "Can't submit reader poweron cmd response %d\n", rc);
+ return rc;
+ }
+
+ wait_for_completion(&arg.done);
+ phy->in_urb->context = cntx; /* restore context */
+
+ return arg.rc;
+}
+
+static void pn533_send_complete(struct urb *urb)
+{
+ struct pn533_usb_phy *phy = urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break; /* success */
+ case -ECONNRESET:
+ case -ENOENT:
+ dev_dbg(&phy->udev->dev,
+ "The urb has been stopped (status %d)\n",
+ urb->status);
+ break;
+ case -ESHUTDOWN:
+ default:
+ nfc_err(&phy->udev->dev,
+ "Urb failure (status %d)\n",
+ urb->status);
+ }
+}
+
+static struct pn533_phy_ops usb_phy_ops = {
+ .send_frame = pn533_usb_send_frame,
+ .send_ack = pn533_usb_send_ack,
+ .abort_cmd = pn533_usb_abort_cmd,
+};
+
+static int pn533_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct pn533 *priv;
+ struct pn533_usb_phy *phy;
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ int in_endpoint = 0;
+ int out_endpoint = 0;
+ int rc = -ENOMEM;
+ int i;
+ u32 protocols;
+ enum pn533_protocol_type protocol_type = PN533_PROTO_REQ_ACK_RESP;
+ struct pn533_frame_ops *fops = NULL;
+ unsigned char *in_buf;
+ int in_buf_len = PN533_EXT_FRAME_HEADER_LEN +
+ PN533_STD_FRAME_MAX_PAYLOAD_LEN +
+ PN533_STD_FRAME_TAIL_LEN;
+
+ phy = devm_kzalloc(&interface->dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ in_buf = kzalloc(in_buf_len, GFP_KERNEL);
+ if (!in_buf) {
+ rc = -ENOMEM;
+ goto out_free_phy;
+ }
+
+ phy->udev = usb_get_dev(interface_to_usbdev(interface));
+ phy->interface = interface;
+
+ iface_desc = interface->cur_altsetting;
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
+ in_endpoint = endpoint->bEndpointAddress;
+
+ if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
+ out_endpoint = endpoint->bEndpointAddress;
+ }
+
+ if (!in_endpoint || !out_endpoint) {
+ nfc_err(&interface->dev,
+ "Could not find bulk-in or bulk-out endpoint\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ phy->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ phy->out_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!phy->in_urb || !phy->out_urb)
+ goto error;
+
+ usb_fill_bulk_urb(phy->in_urb, phy->udev,
+ usb_rcvbulkpipe(phy->udev, in_endpoint),
+ in_buf, in_buf_len, NULL, phy);
+
+ usb_fill_bulk_urb(phy->out_urb, phy->udev,
+ usb_sndbulkpipe(phy->udev, out_endpoint),
+ NULL, 0, pn533_send_complete, phy);
+
+
+ switch (id->driver_info) {
+ case PN533_DEVICE_STD:
+ protocols = PN533_ALL_PROTOCOLS;
+ break;
+
+ case PN533_DEVICE_PASORI:
+ protocols = PN533_NO_TYPE_B_PROTOCOLS;
+ break;
+
+ case PN533_DEVICE_ACR122U:
+ protocols = PN533_NO_TYPE_B_PROTOCOLS;
+ fops = &pn533_acr122_frame_ops;
+ protocol_type = PN533_PROTO_REQ_RESP,
+
+ rc = pn533_acr122_poweron_rdr(phy);
+ if (rc < 0) {
+ nfc_err(&interface->dev,
+ "Couldn't poweron the reader (error %d)\n", rc);
+ goto error;
+ }
+ break;
+
+ default:
+ nfc_err(&interface->dev, "Unknown device type %lu\n",
+ id->driver_info);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ priv = pn533_register_device(id->driver_info, protocols, protocol_type,
+ phy, &usb_phy_ops, fops,
+ &phy->udev->dev, &interface->dev);
+
+ if (IS_ERR(priv)) {
+ rc = PTR_ERR(priv);
+ goto error;
+ }
+
+ phy->priv = priv;
+
+ usb_set_intfdata(interface, phy);
+
+ return 0;
+
+error:
+ usb_free_urb(phy->in_urb);
+ usb_free_urb(phy->out_urb);
+ usb_put_dev(phy->udev);
+ kfree(in_buf);
+out_free_phy:
+ kfree(phy);
+ return rc;
+}
+
+static void pn533_usb_disconnect(struct usb_interface *interface)
+{
+ struct pn533_usb_phy *phy = usb_get_intfdata(interface);
+
+ if (!phy)
+ return;
+
+ pn533_unregister_device(phy->priv);
+
+ usb_set_intfdata(interface, NULL);
+
+ usb_kill_urb(phy->in_urb);
+ usb_kill_urb(phy->out_urb);
+
+ kfree(phy->in_urb->transfer_buffer);
+ usb_free_urb(phy->in_urb);
+ usb_free_urb(phy->out_urb);
+
+ nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
+}
+
+static struct usb_driver pn533_usb_driver = {
+ .name = "pn533_usb",
+ .probe = pn533_usb_probe,
+ .disconnect = pn533_usb_disconnect,
+ .id_table = pn533_usb_table,
+};
+
+module_usb_driver(pn533_usb_driver);
+
+MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
+MODULE_AUTHOR("Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
+MODULE_AUTHOR("Waldemar Rymarkiewicz <waldemar.rymarkiewicz@tieto.com>");
+MODULE_DESCRIPTION("PN533 USB driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 5be350ddb..7c560b538 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -1106,7 +1106,6 @@ MODULE_DEVICE_TABLE(of, of_pn544_i2c_match);
static struct i2c_driver pn544_hci_i2c_driver = {
.driver = {
.name = PN544_HCI_I2C_DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_pn544_i2c_match),
.acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match),
},
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 8a56b5c6e..9dfae0efa 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -42,7 +42,7 @@
#define ST_NCI_I2C_DRIVER_NAME "st_nci_i2c"
-#define ST_NCI_GPIO_NAME_RESET "clf_reset"
+#define ST_NCI_GPIO_NAME_RESET "reset"
struct st_nci_i2c_phy {
struct i2c_client *i2c_dev;
@@ -211,19 +211,9 @@ static struct nfc_phy_ops i2c_phy_ops = {
static int st_nci_i2c_acpi_request_resources(struct i2c_client *client)
{
struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
- const struct acpi_device_id *id;
struct gpio_desc *gpiod_reset;
- struct device *dev;
-
- if (!client)
- return -EINVAL;
-
- dev = &client->dev;
-
- /* Match the struct device against a given list of ACPI IDs */
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return -ENODEV;
+ struct device *dev = &client->dev;
+ u8 tmp;
/* Get RESET GPIO from ACPI */
gpiod_reset = devm_gpiod_get_index(dev, ST_NCI_GPIO_NAME_RESET, 1,
@@ -237,10 +227,18 @@ static int st_nci_i2c_acpi_request_resources(struct i2c_client *client)
phy->irq_polarity = irq_get_trigger_type(client->irq);
- phy->se_status.is_ese_present =
- device_property_present(dev, "ese-present");
- phy->se_status.is_uicc_present =
- device_property_present(dev, "uicc-present");
+ phy->se_status.is_ese_present = false;
+ phy->se_status.is_uicc_present = false;
+
+ if (device_property_present(dev, "ese-present")) {
+ device_property_read_u8(dev, "ese-present", &tmp);
+ phy->se_status.is_ese_present = tmp;
+ }
+
+ if (device_property_present(dev, "uicc-present")) {
+ device_property_read_u8(dev, "uicc-present", &tmp);
+ phy->se_status.is_uicc_present = tmp;
+ }
return 0;
}
@@ -416,7 +414,6 @@ MODULE_DEVICE_TABLE(of, of_st_nci_i2c_match);
static struct i2c_driver st_nci_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = ST_NCI_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_st_nci_i2c_match),
.acpi_match_table = ACPI_PTR(st_nci_i2c_acpi_match),
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index a53e5df80..56f2112e0 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -113,8 +113,6 @@ static struct nci_hci_gate st_nci_gates[] = {
{NCI_HCI_IDENTITY_MGMT_GATE, NCI_HCI_INVALID_PIPE,
ST_NCI_HOST_CONTROLLER_ID},
- {NCI_HCI_LOOPBACK_GATE, NCI_HCI_INVALID_PIPE,
- ST_NCI_HOST_CONTROLLER_ID},
/* Secure element pipes are created by secure element host */
{ST_NCI_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE,
@@ -222,7 +220,7 @@ int st_nci_hci_load_session(struct nci_dev *ndev)
*/
dm_pipe_info = (struct st_nci_pipe_info *)skb_pipe_info->data;
if (dm_pipe_info->dst_gate_id == ST_NCI_APDU_READER_GATE &&
- dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
+ dm_pipe_info->src_host_id == ST_NCI_UICC_HOST_ID) {
pr_err("Unexpected apdu_reader pipe on host %x\n",
dm_pipe_info->src_host_id);
kfree_skb(skb_pipe_info);
@@ -385,9 +383,6 @@ void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
case ST_NCI_CONNECTIVITY_GATE:
st_nci_hci_connectivity_event_received(ndev, host, event, skb);
break;
- case NCI_HCI_LOOPBACK_GATE:
- st_nci_hci_loopback_event_received(ndev, event, skb);
- break;
}
}
EXPORT_SYMBOL_GPL(st_nci_hci_event_received);
@@ -520,7 +515,7 @@ int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
* Same for eSE.
*/
r = st_nci_control_se(ndev, se_idx, ST_NCI_SE_MODE_ON);
- if (r == ST_NCI_HCI_HOST_ID_ESE) {
+ if (r == ST_NCI_ESE_HOST_ID) {
st_nci_se_get_atr(ndev);
r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE,
ST_NCI_EVT_SE_SOFT_RESET, NULL, 0);
@@ -600,10 +595,12 @@ static int st_nci_hci_network_init(struct nci_dev *ndev)
* HCI will be used here only for proprietary commands.
*/
if (test_bit(ST_NCI_FACTORY_MODE, &info->flags))
- r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+ r = nci_nfcee_mode_set(ndev,
+ ndev->hci_dev->conn_info->dest_params->id,
NCI_NFCEE_DISABLE);
else
- r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->id,
+ r = nci_nfcee_mode_set(ndev,
+ ndev->hci_dev->conn_info->dest_params->id,
NCI_NFCEE_ENABLE);
free_dest_params:
@@ -629,17 +626,10 @@ int st_nci_discover_se(struct nci_dev *ndev)
if (test_bit(ST_NCI_FACTORY_MODE, &info->flags))
return 0;
- if (info->se_info.se_status->is_ese_present &&
- info->se_info.se_status->is_uicc_present) {
+ if (info->se_info.se_status->is_uicc_present)
white_list[wl_size++] = ST_NCI_UICC_HOST_ID;
+ if (info->se_info.se_status->is_ese_present)
white_list[wl_size++] = ST_NCI_ESE_HOST_ID;
- } else if (!info->se_info.se_status->is_ese_present &&
- info->se_info.se_status->is_uicc_present) {
- white_list[wl_size++] = ST_NCI_UICC_HOST_ID;
- } else if (info->se_info.se_status->is_ese_present &&
- !info->se_info.se_status->is_uicc_present) {
- white_list[wl_size++] = ST_NCI_ESE_HOST_ID;
- }
if (wl_size) {
r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE,
@@ -672,7 +662,7 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
pr_debug("\n");
switch (se_idx) {
- case ST_NCI_HCI_HOST_ID_ESE:
+ case ST_NCI_ESE_HOST_ID:
info->se_info.cb = cb;
info->se_info.cb_context = cb_context;
mod_timer(&info->se_info.bwi_timer, jiffies +
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 821dfa950..89e341eba 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -43,7 +43,7 @@
#define ST_NCI_SPI_DRIVER_NAME "st_nci_spi"
-#define ST_NCI_GPIO_NAME_RESET "clf_reset"
+#define ST_NCI_GPIO_NAME_RESET "reset"
struct st_nci_spi_phy {
struct spi_device *spi_dev;
@@ -226,19 +226,9 @@ static struct nfc_phy_ops spi_phy_ops = {
static int st_nci_spi_acpi_request_resources(struct spi_device *spi_dev)
{
struct st_nci_spi_phy *phy = spi_get_drvdata(spi_dev);
- const struct acpi_device_id *id;
struct gpio_desc *gpiod_reset;
- struct device *dev;
-
- if (!spi_dev)
- return -EINVAL;
-
- dev = &spi_dev->dev;
-
- /* Match the struct device against a given list of ACPI IDs */
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return -ENODEV;
+ struct device *dev = &spi_dev->dev;
+ u8 tmp;
/* Get RESET GPIO from ACPI */
gpiod_reset = devm_gpiod_get_index(dev, ST_NCI_GPIO_NAME_RESET, 1,
@@ -252,10 +242,18 @@ static int st_nci_spi_acpi_request_resources(struct spi_device *spi_dev)
phy->irq_polarity = irq_get_trigger_type(spi_dev->irq);
- phy->se_status.is_ese_present =
- device_property_present(dev, "ese-present");
- phy->se_status.is_uicc_present =
- device_property_present(dev, "uicc-present");
+ phy->se_status.is_ese_present = false;
+ phy->se_status.is_uicc_present = false;
+
+ if (device_property_present(dev, "ese-present")) {
+ device_property_read_u8(dev, "ese-present", &tmp);
+ tmp = phy->se_status.is_ese_present;
+ }
+
+ if (device_property_present(dev, "uicc-present")) {
+ device_property_read_u8(dev, "uicc-present", &tmp);
+ tmp = phy->se_status.is_uicc_present;
+ }
return 0;
}
diff --git a/drivers/nfc/st-nci/st-nci.h b/drivers/nfc/st-nci/st-nci.h
index 8b9f77b02..afaf138b7 100644
--- a/drivers/nfc/st-nci/st-nci.h
+++ b/drivers/nfc/st-nci/st-nci.h
@@ -32,7 +32,6 @@
* sequence of at most 32 characters.
*/
#define ST_NCI_ESE_MAX_LENGTH 33
-#define ST_NCI_HCI_HOST_ID_ESE 0xc0
#define ST_NCI_DEVICE_MGNT_GATE 0x01
@@ -93,8 +92,7 @@ struct st_nci_se_info {
* white list).
* @HCI_DM_FIELD_GENERATOR: Allow to generate different kind of RF
* technology. When using this command to anti-collision is done.
- * @HCI_LOOPBACK: Allow to echo a command and test the Dh to CLF
- * connectivity.
+ * @LOOPBACK: Allow to echo a command and test the Dh to CLF connectivity.
* @HCI_DM_VDC_MEASUREMENT_VALUE: Allow to measure the field applied on the
* CLF antenna. A value between 0 and 0x0f is returned. 0 is maximum.
* @HCI_DM_FWUPD_START: Allow to put CLF into firmware update mode. It is a
@@ -116,7 +114,7 @@ enum nfc_vendor_cmds {
HCI_DM_RESET,
HCI_GET_PARAM,
HCI_DM_FIELD_GENERATOR,
- HCI_LOOPBACK,
+ LOOPBACK,
HCI_DM_FWUPD_START,
HCI_DM_FWUPD_END,
HCI_DM_VDC_MEASUREMENT_VALUE,
@@ -124,17 +122,11 @@ enum nfc_vendor_cmds {
MANUFACTURER_SPECIFIC,
};
-struct st_nci_vendor_info {
- struct completion req_completion;
- struct sk_buff *rx_skb;
-};
-
struct st_nci_info {
struct llt_ndlc *ndlc;
unsigned long flags;
struct st_nci_se_info se_info;
- struct st_nci_vendor_info vendor_info;
};
void st_nci_remove(struct nci_dev *ndev);
@@ -156,8 +148,6 @@ void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe,
void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd,
struct sk_buff *skb);
-void st_nci_hci_loopback_event_received(struct nci_dev *ndev, u8 event,
- struct sk_buff *skb);
int st_nci_vendor_cmds_init(struct nci_dev *ndev);
#endif /* __LOCAL_ST_NCI_H_ */
diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c
index b5debce4a..1a836c77c 100644
--- a/drivers/nfc/st-nci/vendor_cmds.c
+++ b/drivers/nfc/st-nci/vendor_cmds.c
@@ -333,62 +333,28 @@ exit:
return r;
}
-void st_nci_hci_loopback_event_received(struct nci_dev *ndev, u8 event,
- struct sk_buff *skb)
-{
- struct st_nci_info *info = nci_get_drvdata(ndev);
-
- switch (event) {
- case ST_NCI_EVT_POST_DATA:
- info->vendor_info.rx_skb = skb;
- break;
- default:
- nfc_err(&ndev->nfc_dev->dev, "Unexpected event on loopback gate\n");
- }
- complete(&info->vendor_info.req_completion);
-}
-EXPORT_SYMBOL(st_nci_hci_loopback_event_received);
-
-static int st_nci_hci_loopback(struct nfc_dev *dev, void *data,
- size_t data_len)
+static int st_nci_loopback(struct nfc_dev *dev, void *data,
+ size_t data_len)
{
int r;
- struct sk_buff *msg;
+ struct sk_buff *msg, *skb;
struct nci_dev *ndev = nfc_get_drvdata(dev);
- struct st_nci_info *info = nci_get_drvdata(ndev);
if (data_len <= 0)
return -EPROTO;
- reinit_completion(&info->vendor_info.req_completion);
- info->vendor_info.rx_skb = NULL;
+ r = nci_nfcc_loopback(ndev, data, data_len, &skb);
+ if (r < 0)
+ return r;
- r = nci_hci_send_event(ndev, NCI_HCI_LOOPBACK_GATE,
- ST_NCI_EVT_POST_DATA, data, data_len);
- if (r != data_len) {
- r = -EPROTO;
- goto exit;
- }
-
- wait_for_completion_interruptible(&info->vendor_info.req_completion);
-
- if (!info->vendor_info.rx_skb ||
- info->vendor_info.rx_skb->len != data_len) {
- r = -EPROTO;
- goto exit;
- }
-
- msg = nfc_vendor_cmd_alloc_reply_skb(ndev->nfc_dev,
- ST_NCI_VENDOR_OUI,
- HCI_LOOPBACK,
- info->vendor_info.rx_skb->len);
+ msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
+ LOOPBACK, skb->len);
if (!msg) {
r = -ENOMEM;
goto free_skb;
}
- if (nla_put(msg, NFC_ATTR_VENDOR_DATA, info->vendor_info.rx_skb->len,
- info->vendor_info.rx_skb->data)) {
+ if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) {
kfree_skb(msg);
r = -ENOBUFS;
goto free_skb;
@@ -396,8 +362,7 @@ static int st_nci_hci_loopback(struct nfc_dev *dev, void *data,
r = nfc_vendor_cmd_reply(msg);
free_skb:
- kfree_skb(info->vendor_info.rx_skb);
-exit:
+ kfree_skb(skb);
return r;
}
@@ -485,8 +450,8 @@ static struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
},
{
.vendor_id = ST_NCI_VENDOR_OUI,
- .subcmd = HCI_LOOPBACK,
- .doit = st_nci_hci_loopback,
+ .subcmd = LOOPBACK,
+ .doit = st_nci_loopback,
},
{
.vendor_id = ST_NCI_VENDOR_OUI,
@@ -507,9 +472,6 @@ static struct nfc_vendor_cmd st_nci_vendor_cmds[] = {
int st_nci_vendor_cmds_init(struct nci_dev *ndev)
{
- struct st_nci_info *info = nci_get_drvdata(ndev);
-
- init_completion(&info->vendor_info.req_completion);
return nfc_set_vendor_cmds(ndev->nfc_dev, st_nci_vendor_cmds,
sizeof(st_nci_vendor_cmds));
}
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index dd8b150fb..dacb91660 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -176,7 +176,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
*/
info = (struct st21nfca_pipe_info *) skb_pipe_info->data;
if (info->dst_gate_id == ST21NFCA_APDU_READER_GATE &&
- info->src_host_id != ST21NFCA_ESE_HOST_ID) {
+ info->src_host_id == NFC_HCI_UICC_HOST_ID) {
pr_err("Unexpected apdu_reader pipe on host %x\n",
info->src_host_id);
kfree_skb(skb_pipe_info);
@@ -262,17 +262,10 @@ static int st21nfca_hci_ready(struct nfc_hci_dev *hdev)
int wl_size = 0;
int r;
- if (info->se_status->is_ese_present &&
- info->se_status->is_uicc_present) {
+ if (info->se_status->is_uicc_present)
white_list[wl_size++] = NFC_HCI_UICC_HOST_ID;
+ if (info->se_status->is_ese_present)
white_list[wl_size++] = ST21NFCA_ESE_HOST_ID;
- } else if (!info->se_status->is_ese_present &&
- info->se_status->is_uicc_present) {
- white_list[wl_size++] = NFC_HCI_UICC_HOST_ID;
- } else if (info->se_status->is_ese_present &&
- !info->se_status->is_uicc_present) {
- white_list[wl_size++] = ST21NFCA_ESE_HOST_ID;
- }
if (wl_size) {
r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index d5a099b02..5a82f5539 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -62,7 +62,7 @@
#define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c"
-#define ST21NFCA_GPIO_NAME_EN "clf_enable"
+#define ST21NFCA_GPIO_NAME_EN "enable"
struct st21nfca_i2c_phy {
struct i2c_client *i2c_dev;
@@ -507,19 +507,9 @@ static struct nfc_phy_ops i2c_phy_ops = {
static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client)
{
struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
- const struct acpi_device_id *id;
struct gpio_desc *gpiod_ena;
- struct device *dev;
-
- if (!client)
- return -EINVAL;
-
- dev = &client->dev;
-
- /* Match the struct device against a given list of ACPI IDs */
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return -ENODEV;
+ struct device *dev = &client->dev;
+ u8 tmp;
/* Get EN GPIO from ACPI */
gpiod_ena = devm_gpiod_get_index(dev, ST21NFCA_GPIO_NAME_EN, 1,
@@ -533,10 +523,18 @@ static int st21nfca_hci_i2c_acpi_request_resources(struct i2c_client *client)
phy->irq_polarity = irq_get_trigger_type(client->irq);
- phy->se_status.is_ese_present =
- device_property_present(dev, "ese-present");
- phy->se_status.is_uicc_present =
- device_property_present(dev, "uicc-present");
+ phy->se_status.is_ese_present = false;
+ phy->se_status.is_uicc_present = false;
+
+ if (device_property_present(dev, "ese-present")) {
+ device_property_read_u8(dev, "ese-present", &tmp);
+ phy->se_status.is_ese_present = tmp;
+ }
+
+ if (device_property_present(dev, "uicc-present")) {
+ device_property_read_u8(dev, "uicc-present", &tmp);
+ phy->se_status.is_uicc_present = tmp;
+ }
return 0;
}
@@ -723,7 +721,6 @@ MODULE_DEVICE_TABLE(of, of_st21nfca_i2c_match);
static struct i2c_driver st21nfca_hci_i2c_driver = {
.driver = {
- .owner = THIS_MODULE,
.name = ST21NFCA_HCI_I2C_DRIVER_NAME,
.of_match_table = of_match_ptr(of_st21nfca_i2c_match),
.acpi_match_table = ACPI_PTR(st21nfca_hci_i2c_acpi_match),
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index bd56a16e4..3a98563d4 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -32,8 +32,6 @@
#define ST21NFCA_EVT_CONNECTIVITY 0x10
#define ST21NFCA_EVT_TRANSACTION 0x12
-#define ST21NFCA_ESE_HOST_ID 0xc0
-
#define ST21NFCA_SE_TO_HOT_PLUG 1000
/* Connectivity pipe only */
#define ST21NFCA_SE_COUNT_PIPE_UICC 0x01
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 53c11621d..7c8a3bf07 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -88,4 +88,17 @@ config NVDIMM_PFN
Select Y if unsure
+config NVDIMM_DAX
+ bool "NVDIMM DAX: Raw access to persistent memory"
+ default LIBNVDIMM
+ depends on NVDIMM_PFN
+ help
+ Support raw device dax access to a persistent memory
+ namespace. For environments that want to hard partition
+ peristent memory, this capability provides a mechanism to
+ sub-divide a namespace into character devices that can only be
+ accessed via DAX (mmap(2)).
+
+ Select Y if unsure
+
endif
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index ea84d3c4e..909554c3f 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -23,3 +23,4 @@ libnvdimm-y += label.o
libnvdimm-$(CONFIG_ND_CLAIM) += claim.o
libnvdimm-$(CONFIG_BTT) += btt_devs.o
libnvdimm-$(CONFIG_NVDIMM_PFN) += pfn_devs.o
+libnvdimm-$(CONFIG_NVDIMM_DAX) += dax_devs.o
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index e9ff9229d..495e06d9f 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -21,19 +21,19 @@
#include <linux/sizes.h>
#include "nd.h"
-struct nd_blk_device {
- struct request_queue *queue;
- struct gendisk *disk;
- struct nd_namespace_blk *nsblk;
- struct nd_blk_region *ndbr;
- size_t disk_size;
- u32 sector_size;
- u32 internal_lbasize;
-};
+static u32 nsblk_meta_size(struct nd_namespace_blk *nsblk)
+{
+ return nsblk->lbasize - ((nsblk->lbasize >= 4096) ? 4096 : 512);
+}
+
+static u32 nsblk_internal_lbasize(struct nd_namespace_blk *nsblk)
+{
+ return roundup(nsblk->lbasize, INT_LBASIZE_ALIGNMENT);
+}
-static u32 nd_blk_meta_size(struct nd_blk_device *blk_dev)
+static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
{
- return blk_dev->nsblk->lbasize - blk_dev->sector_size;
+ return nsblk->lbasize - nsblk_meta_size(nsblk);
}
static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
@@ -57,20 +57,29 @@ static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
return SIZE_MAX;
}
+static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
+{
+ struct nd_region *nd_region;
+ struct device *parent;
+
+ parent = nsblk->common.dev.parent;
+ nd_region = container_of(parent, struct nd_region, dev);
+ return container_of(nd_region, struct nd_blk_region, nd_region);
+}
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
-static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
- struct bio_integrity_payload *bip, u64 lba,
- int rw)
+static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
+ struct bio_integrity_payload *bip, u64 lba, int rw)
{
- unsigned int len = nd_blk_meta_size(blk_dev);
+ struct nd_blk_region *ndbr = to_ndbr(nsblk);
+ unsigned int len = nsblk_meta_size(nsblk);
resource_size_t dev_offset, ns_offset;
- struct nd_namespace_blk *nsblk;
- struct nd_blk_region *ndbr;
+ u32 internal_lbasize, sector_size;
int err = 0;
- nsblk = blk_dev->nsblk;
- ndbr = blk_dev->ndbr;
- ns_offset = lba * blk_dev->internal_lbasize + blk_dev->sector_size;
+ internal_lbasize = nsblk_internal_lbasize(nsblk);
+ sector_size = nsblk_sector_size(nsblk);
+ ns_offset = lba * internal_lbasize + sector_size;
dev_offset = to_dev_offset(nsblk, ns_offset, len);
if (dev_offset == SIZE_MAX)
return -EIO;
@@ -104,25 +113,26 @@ static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
-static int nd_blk_rw_integrity(struct nd_blk_device *blk_dev,
- struct bio_integrity_payload *bip, u64 lba,
- int rw)
+static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
+ struct bio_integrity_payload *bip, u64 lba, int rw)
{
return 0;
}
#endif
-static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
- struct bio_integrity_payload *bip, struct page *page,
- unsigned int len, unsigned int off, int rw,
- sector_t sector)
+static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
+ struct bio_integrity_payload *bip, struct page *page,
+ unsigned int len, unsigned int off, int rw, sector_t sector)
{
- struct nd_blk_region *ndbr = blk_dev->ndbr;
+ struct nd_blk_region *ndbr = to_ndbr(nsblk);
resource_size_t dev_offset, ns_offset;
+ u32 internal_lbasize, sector_size;
int err = 0;
void *iobuf;
u64 lba;
+ internal_lbasize = nsblk_internal_lbasize(nsblk);
+ sector_size = nsblk_sector_size(nsblk);
while (len) {
unsigned int cur_len;
@@ -132,11 +142,11 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
* Block Window setup/move steps. the do_io routine is capable
* of handling len <= PAGE_SIZE.
*/
- cur_len = bip ? min(len, blk_dev->sector_size) : len;
+ cur_len = bip ? min(len, sector_size) : len;
- lba = div_u64(sector << SECTOR_SHIFT, blk_dev->sector_size);
- ns_offset = lba * blk_dev->internal_lbasize;
- dev_offset = to_dev_offset(blk_dev->nsblk, ns_offset, cur_len);
+ lba = div_u64(sector << SECTOR_SHIFT, sector_size);
+ ns_offset = lba * internal_lbasize;
+ dev_offset = to_dev_offset(nsblk, ns_offset, cur_len);
if (dev_offset == SIZE_MAX)
return -EIO;
@@ -147,13 +157,13 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
return err;
if (bip) {
- err = nd_blk_rw_integrity(blk_dev, bip, lba, rw);
+ err = nd_blk_rw_integrity(nsblk, bip, lba, rw);
if (err)
return err;
}
len -= cur_len;
off += cur_len;
- sector += blk_dev->sector_size >> SECTOR_SHIFT;
+ sector += sector_size >> SECTOR_SHIFT;
}
return err;
@@ -161,10 +171,8 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
{
- struct block_device *bdev = bio->bi_bdev;
- struct gendisk *disk = bdev->bd_disk;
struct bio_integrity_payload *bip;
- struct nd_blk_device *blk_dev;
+ struct nd_namespace_blk *nsblk;
struct bvec_iter iter;
unsigned long start;
struct bio_vec bvec;
@@ -183,17 +191,17 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
}
bip = bio_integrity(bio);
- blk_dev = disk->private_data;
+ nsblk = q->queuedata;
rw = bio_data_dir(bio);
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
BUG_ON(len > PAGE_SIZE);
- err = nd_blk_do_bvec(blk_dev, bip, bvec.bv_page, len,
- bvec.bv_offset, rw, iter.bi_sector);
+ err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
+ bvec.bv_offset, rw, iter.bi_sector);
if (err) {
- dev_info(&blk_dev->nsblk->common.dev,
+ dev_dbg(&nsblk->common.dev,
"io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE",
(unsigned long long) iter.bi_sector, len);
@@ -209,17 +217,16 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
-static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
+static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
resource_size_t offset, void *iobuf, size_t n, int rw)
{
- struct nd_blk_device *blk_dev = dev_get_drvdata(ndns->claim);
- struct nd_namespace_blk *nsblk = blk_dev->nsblk;
- struct nd_blk_region *ndbr = blk_dev->ndbr;
+ struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
+ struct nd_blk_region *ndbr = to_ndbr(nsblk);
resource_size_t dev_offset;
dev_offset = to_dev_offset(nsblk, offset, n);
- if (unlikely(offset + n > blk_dev->disk_size)) {
+ if (unlikely(offset + n > nsblk->size)) {
dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
return -EFAULT;
}
@@ -235,51 +242,65 @@ static const struct block_device_operations nd_blk_fops = {
.revalidate_disk = nvdimm_revalidate_disk,
};
-static int nd_blk_attach_disk(struct nd_namespace_common *ndns,
- struct nd_blk_device *blk_dev)
+static void nd_blk_release_queue(void *q)
+{
+ blk_cleanup_queue(q);
+}
+
+static void nd_blk_release_disk(void *disk)
+{
+ del_gendisk(disk);
+ put_disk(disk);
+}
+
+static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
{
+ struct device *dev = &nsblk->common.dev;
resource_size_t available_disk_size;
+ struct request_queue *q;
struct gendisk *disk;
u64 internal_nlba;
- internal_nlba = div_u64(blk_dev->disk_size, blk_dev->internal_lbasize);
- available_disk_size = internal_nlba * blk_dev->sector_size;
+ internal_nlba = div_u64(nsblk->size, nsblk_internal_lbasize(nsblk));
+ available_disk_size = internal_nlba * nsblk_sector_size(nsblk);
- blk_dev->queue = blk_alloc_queue(GFP_KERNEL);
- if (!blk_dev->queue)
+ q = blk_alloc_queue(GFP_KERNEL);
+ if (!q)
return -ENOMEM;
+ if (devm_add_action(dev, nd_blk_release_queue, q)) {
+ blk_cleanup_queue(q);
+ return -ENOMEM;
+ }
- blk_queue_make_request(blk_dev->queue, nd_blk_make_request);
- blk_queue_max_hw_sectors(blk_dev->queue, UINT_MAX);
- blk_queue_bounce_limit(blk_dev->queue, BLK_BOUNCE_ANY);
- blk_queue_logical_block_size(blk_dev->queue, blk_dev->sector_size);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, blk_dev->queue);
+ blk_queue_make_request(q, nd_blk_make_request);
+ blk_queue_max_hw_sectors(q, UINT_MAX);
+ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
+ blk_queue_logical_block_size(q, nsblk_sector_size(nsblk));
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ q->queuedata = nsblk;
- disk = blk_dev->disk = alloc_disk(0);
- if (!disk) {
- blk_cleanup_queue(blk_dev->queue);
+ disk = alloc_disk(0);
+ if (!disk)
+ return -ENOMEM;
+ if (devm_add_action(dev, nd_blk_release_disk, disk)) {
+ put_disk(disk);
return -ENOMEM;
}
- disk->driverfs_dev = &ndns->dev;
+ disk->driverfs_dev = dev;
disk->first_minor = 0;
disk->fops = &nd_blk_fops;
- disk->private_data = blk_dev;
- disk->queue = blk_dev->queue;
+ disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
- nvdimm_namespace_disk_name(ndns, disk->disk_name);
+ nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
set_capacity(disk, 0);
add_disk(disk);
- if (nd_blk_meta_size(blk_dev)) {
- int rc = nd_integrity_init(disk, nd_blk_meta_size(blk_dev));
+ if (nsblk_meta_size(nsblk)) {
+ int rc = nd_integrity_init(disk, nsblk_meta_size(nsblk));
- if (rc) {
- del_gendisk(disk);
- put_disk(disk);
- blk_cleanup_queue(blk_dev->queue);
+ if (rc)
return rc;
- }
}
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
@@ -291,56 +312,29 @@ static int nd_blk_probe(struct device *dev)
{
struct nd_namespace_common *ndns;
struct nd_namespace_blk *nsblk;
- struct nd_blk_device *blk_dev;
- int rc;
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return PTR_ERR(ndns);
- blk_dev = kzalloc(sizeof(*blk_dev), GFP_KERNEL);
- if (!blk_dev)
- return -ENOMEM;
-
nsblk = to_nd_namespace_blk(&ndns->dev);
- blk_dev->disk_size = nvdimm_namespace_capacity(ndns);
- blk_dev->ndbr = to_nd_blk_region(dev->parent);
- blk_dev->nsblk = to_nd_namespace_blk(&ndns->dev);
- blk_dev->internal_lbasize = roundup(nsblk->lbasize,
- INT_LBASIZE_ALIGNMENT);
- blk_dev->sector_size = ((nsblk->lbasize >= 4096) ? 4096 : 512);
- dev_set_drvdata(dev, blk_dev);
-
- ndns->rw_bytes = nd_blk_rw_bytes;
+ nsblk->size = nvdimm_namespace_capacity(ndns);
+ dev_set_drvdata(dev, nsblk);
+
+ ndns->rw_bytes = nsblk_rw_bytes;
if (is_nd_btt(dev))
- rc = nvdimm_namespace_attach_btt(ndns);
- else if (nd_btt_probe(ndns, blk_dev) == 0) {
+ return nvdimm_namespace_attach_btt(ndns);
+ else if (nd_btt_probe(dev, ndns) == 0) {
/* we'll come back as btt-blk */
- rc = -ENXIO;
+ return -ENXIO;
} else
- rc = nd_blk_attach_disk(ndns, blk_dev);
- if (rc)
- kfree(blk_dev);
- return rc;
-}
-
-static void nd_blk_detach_disk(struct nd_blk_device *blk_dev)
-{
- del_gendisk(blk_dev->disk);
- put_disk(blk_dev->disk);
- blk_cleanup_queue(blk_dev->queue);
+ return nsblk_attach_disk(nsblk);
}
static int nd_blk_remove(struct device *dev)
{
- struct nd_blk_device *blk_dev = dev_get_drvdata(dev);
-
if (is_nd_btt(dev))
- nvdimm_namespace_detach_btt(to_nd_btt(dev)->ndns);
- else
- nd_blk_detach_disk(blk_dev);
- kfree(blk_dev);
-
+ nvdimm_namespace_detach_btt(to_nd_btt(dev));
return 0;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index f068b6513..68a7c3c1e 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1306,7 +1306,7 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
struct btt *btt;
struct device *dev = &nd_btt->dev;
- btt = kzalloc(sizeof(struct btt), GFP_KERNEL);
+ btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
if (!btt)
return NULL;
@@ -1321,13 +1321,13 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
ret = discover_arenas(btt);
if (ret) {
dev_err(dev, "init: error in arena_discover: %d\n", ret);
- goto out_free;
+ return NULL;
}
if (btt->init_state != INIT_READY && nd_region->ro) {
dev_info(dev, "%s is read-only, unable to init btt metadata\n",
dev_name(&nd_region->dev));
- goto out_free;
+ return NULL;
} else if (btt->init_state != INIT_READY) {
btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
@@ -1337,29 +1337,25 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
ret = create_arenas(btt);
if (ret) {
dev_info(dev, "init: create_arenas: %d\n", ret);
- goto out_free;
+ return NULL;
}
ret = btt_meta_init(btt);
if (ret) {
dev_err(dev, "init: error in meta_init: %d\n", ret);
- goto out_free;
+ return NULL;
}
}
ret = btt_blk_init(btt);
if (ret) {
dev_err(dev, "init: error in blk_init: %d\n", ret);
- goto out_free;
+ return NULL;
}
btt_debugfs_init(btt);
return btt;
-
- out_free:
- kfree(btt);
- return NULL;
}
/**
@@ -1377,7 +1373,6 @@ static void btt_fini(struct btt *btt)
btt_blk_cleanup(btt);
free_arenas(btt);
debugfs_remove_recursive(btt->debugfs_dir);
- kfree(btt);
}
}
@@ -1388,11 +1383,15 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
struct btt *btt;
size_t rawsize;
- if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize)
+ if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
+ dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
return -ENODEV;
+ }
rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
if (rawsize < ARENA_MIN_SIZE) {
+ dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
+ dev_name(&ndns->dev), ARENA_MIN_SIZE + SZ_4K);
return -ENXIO;
}
nd_region = to_nd_region(nd_btt->dev.parent);
@@ -1406,9 +1405,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
}
EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
-int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns)
+int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
{
- struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
struct btt *btt = nd_btt->btt;
btt_fini(btt);
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index cb477518d..816d0dae6 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -273,10 +273,10 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
return 0;
}
-int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
+int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
{
int rc;
- struct device *dev;
+ struct device *btt_dev;
struct btt_sb *btt_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
@@ -284,21 +284,19 @@ int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
return -ENODEV;
nvdimm_bus_lock(&ndns->dev);
- dev = __nd_btt_create(nd_region, 0, NULL, ndns);
+ btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
nvdimm_bus_unlock(&ndns->dev);
- if (!dev)
+ if (!btt_dev)
return -ENOMEM;
- dev_set_drvdata(dev, drvdata);
- btt_sb = kzalloc(sizeof(*btt_sb), GFP_KERNEL);
- rc = __nd_btt_probe(to_nd_btt(dev), ndns, btt_sb);
- kfree(btt_sb);
- dev_dbg(&ndns->dev, "%s: btt: %s\n", __func__,
- rc == 0 ? dev_name(dev) : "<none>");
+ btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
+ rc = __nd_btt_probe(to_nd_btt(btt_dev), ndns, btt_sb);
+ dev_dbg(dev, "%s: btt: %s\n", __func__,
+ rc == 0 ? dev_name(btt_dev) : "<none>");
if (rc < 0) {
- struct nd_btt *nd_btt = to_nd_btt(dev);
+ struct nd_btt *nd_btt = to_nd_btt(btt_dev);
- __nd_detach_ndns(dev, &nd_btt->ndns);
- put_device(dev);
+ __nd_detach_ndns(btt_dev, &nd_btt->ndns);
+ put_device(btt_dev);
}
return rc;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 19f822d7f..f085f8bce 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -40,6 +40,8 @@ static int to_nd_device_type(struct device *dev)
return ND_DEVICE_REGION_PMEM;
else if (is_nd_blk(dev))
return ND_DEVICE_REGION_BLK;
+ else if (is_nd_dax(dev))
+ return ND_DEVICE_DAX_PMEM;
else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent))
return nd_region_to_nstype(to_nd_region(dev->parent));
@@ -122,9 +124,10 @@ static int nvdimm_bus_remove(struct device *dev)
struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
struct module *provider = to_bus_provider(dev);
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
- int rc;
+ int rc = 0;
- rc = nd_drv->remove(dev);
+ if (nd_drv->remove)
+ rc = nd_drv->remove(dev);
nd_region_disable(nvdimm_bus, dev);
dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
@@ -246,6 +249,8 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
void __nd_device_register(struct device *dev)
{
+ if (!dev)
+ return;
dev->bus = &nvdimm_bus_type;
get_device(dev);
async_schedule_domain(nd_async_device_register, dev,
@@ -292,8 +297,8 @@ int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
return -EINVAL;
}
- if (!nd_drv->probe || !nd_drv->remove) {
- pr_debug("->probe() and ->remove() must be specified\n");
+ if (!nd_drv->probe) {
+ pr_debug("%s ->probe() must be specified\n", mod_name);
return -EINVAL;
}
@@ -439,6 +444,12 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = {
.out_num = 3,
.out_sizes = { 4, 4, UINT_MAX, },
},
+ [ND_CMD_CALL] = {
+ .in_num = 2,
+ .in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
+ .out_num = 1,
+ .out_sizes = { UINT_MAX, },
+ },
};
const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd)
@@ -473,6 +484,12 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
.out_num = 3,
.out_sizes = { 4, 4, 8, },
},
+ [ND_CMD_CALL] = {
+ .in_num = 2,
+ .in_sizes = { sizeof(struct nd_cmd_pkg), UINT_MAX, },
+ .out_num = 1,
+ .out_sizes = { UINT_MAX, },
+ },
};
const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
@@ -500,6 +517,10 @@ u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
struct nd_cmd_vendor_hdr *hdr = buf;
return hdr->in_length;
+ } else if (cmd == ND_CMD_CALL) {
+ struct nd_cmd_pkg *pkg = buf;
+
+ return pkg->nd_size_in;
}
return UINT_MAX;
@@ -522,6 +543,12 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
return out_field[1];
else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
return out_field[1] - 8;
+ else if (cmd == ND_CMD_CALL) {
+ struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
+
+ return pkg->nd_size_out;
+ }
+
return UINT_MAX;
}
@@ -588,25 +615,31 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
unsigned int cmd = _IOC_NR(ioctl_cmd);
void __user *p = (void __user *) arg;
struct device *dev = &nvdimm_bus->dev;
+ struct nd_cmd_pkg pkg;
const char *cmd_name, *dimm_name;
- unsigned long dsm_mask;
+ unsigned long cmd_mask;
void *buf;
int rc, i;
if (nvdimm) {
desc = nd_cmd_dimm_desc(cmd);
cmd_name = nvdimm_cmd_name(cmd);
- dsm_mask = nvdimm->dsm_mask ? *(nvdimm->dsm_mask) : 0;
+ cmd_mask = nvdimm->cmd_mask;
dimm_name = dev_name(&nvdimm->dev);
} else {
desc = nd_cmd_bus_desc(cmd);
cmd_name = nvdimm_bus_cmd_name(cmd);
- dsm_mask = nd_desc->dsm_mask;
+ cmd_mask = nd_desc->cmd_mask;
dimm_name = "bus";
}
+ if (cmd == ND_CMD_CALL) {
+ if (copy_from_user(&pkg, p, sizeof(pkg)))
+ return -EFAULT;
+ }
+
if (!desc || (desc->out_num + desc->in_num == 0) ||
- !test_bit(cmd, &dsm_mask))
+ !test_bit(cmd, &cmd_mask))
return -ENOTTY;
/* fail write commands (when read-only) */
@@ -616,6 +649,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
case ND_CMD_SET_CONFIG_DATA:
case ND_CMD_ARS_START:
case ND_CMD_CLEAR_ERROR:
+ case ND_CMD_CALL:
dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
@@ -643,6 +677,16 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
in_len += in_size;
}
+ if (cmd == ND_CMD_CALL) {
+ dev_dbg(dev, "%s:%s, idx: %llu, in: %zu, out: %zu, len %zu\n",
+ __func__, dimm_name, pkg.nd_command,
+ in_len, out_len, buf_len);
+
+ for (i = 0; i < ARRAY_SIZE(pkg.nd_reserved2); i++)
+ if (pkg.nd_reserved2[i])
+ return -EINVAL;
+ }
+
/* process an output envelope */
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
@@ -783,6 +827,9 @@ int __init nvdimm_bus_init(void)
{
int rc;
+ BUILD_BUG_ON(sizeof(struct nd_smart_payload) != 128);
+ BUILD_BUG_ON(sizeof(struct nd_smart_threshold_payload) != 8);
+
rc = bus_register(&nvdimm_bus_type);
if (rc)
return rc;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index e8f03b0e9..8b2e3c4fb 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -12,6 +12,7 @@
*/
#include <linux/device.h>
#include <linux/sizes.h>
+#include <linux/pmem.h>
#include "nd-core.h"
#include "pfn.h"
#include "btt.h"
@@ -84,12 +85,33 @@ static bool is_idle(struct device *dev, struct nd_namespace_common *ndns)
seed = nd_region->btt_seed;
else if (is_nd_pfn(dev))
seed = nd_region->pfn_seed;
+ else if (is_nd_dax(dev))
+ seed = nd_region->dax_seed;
if (seed == dev || ndns || dev->driver)
return false;
return true;
}
+struct nd_pfn *to_nd_pfn_safe(struct device *dev)
+{
+ /*
+ * pfn device attributes are re-used by dax device instances, so we
+ * need to be careful to correct device-to-nd_pfn conversion.
+ */
+ if (is_nd_pfn(dev))
+ return to_nd_pfn(dev);
+
+ if (is_nd_dax(dev)) {
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+
+ return &nd_dax->nd_pfn;
+ }
+
+ WARN_ON(1);
+ return NULL;
+}
+
static void nd_detach_and_reset(struct device *dev,
struct nd_namespace_common **_ndns)
{
@@ -103,8 +125,8 @@ static void nd_detach_and_reset(struct device *dev,
nd_btt->lbasize = 0;
kfree(nd_btt->uuid);
nd_btt->uuid = NULL;
- } else if (is_nd_pfn(dev)) {
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ } else if (is_nd_pfn(dev) || is_nd_dax(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
kfree(nd_pfn->uuid);
nd_pfn->uuid = NULL;
@@ -199,3 +221,63 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
return sum;
}
EXPORT_SYMBOL(nd_sb_checksum);
+
+static int nsio_rw_bytes(struct nd_namespace_common *ndns,
+ resource_size_t offset, void *buf, size_t size, int rw)
+{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+
+ if (unlikely(offset + size > nsio->size)) {
+ dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
+ return -EFAULT;
+ }
+
+ if (rw == READ) {
+ unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
+
+ if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align)))
+ return -EIO;
+ return memcpy_from_pmem(buf, nsio->addr + offset, size);
+ } else {
+ memcpy_to_pmem(nsio->addr + offset, buf, size);
+ wmb_pmem();
+ }
+
+ return 0;
+}
+
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+{
+ struct resource *res = &nsio->res;
+ struct nd_namespace_common *ndns = &nsio->common;
+
+ nsio->size = resource_size(res);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev))) {
+ dev_warn(dev, "could not reserve region %pR\n", res);
+ return -EBUSY;
+ }
+
+ ndns->rw_bytes = nsio_rw_bytes;
+ if (devm_init_badblocks(dev, &nsio->bb))
+ return -ENOMEM;
+ nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
+ &nsio->res);
+
+ nsio->addr = devm_memremap(dev, res->start, resource_size(res),
+ ARCH_MEMREMAP_PMEM);
+ if (IS_ERR(nsio->addr))
+ return PTR_ERR(nsio->addr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_nsio_enable);
+
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio)
+{
+ struct resource *res = &nsio->res;
+
+ devm_memunmap(dev, nsio->addr);
+ devm_exit_badblocks(dev, &nsio->bb);
+ devm_release_mem_region(dev, res->start, resource_size(res));
+}
+EXPORT_SYMBOL_GPL(devm_nsio_disable);
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 182a93fe3..be8976431 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -251,7 +251,7 @@ static ssize_t commands_show(struct device *dev,
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
- for_each_set_bit(cmd, &nd_desc->dsm_mask, BITS_PER_LONG)
+ for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
len += sprintf(buf + len, "\n");
return len;
@@ -648,6 +648,9 @@ static __exit void libnvdimm_exit(void)
nd_region_exit();
nvdimm_exit();
nvdimm_bus_exit();
+ nd_region_devs_exit();
+ nvdimm_devs_exit();
+ ida_destroy(&nd_ida);
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
new file mode 100644
index 000000000..45fa82cae
--- /dev/null
+++ b/drivers/nvdimm/dax_devs.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include "nd-core.h"
+#include "pfn.h"
+#include "nd.h"
+
+static void nd_dax_release(struct device *dev)
+{
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_dax *nd_dax = to_nd_dax(dev);
+ struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
+
+ dev_dbg(dev, "%s\n", __func__);
+ nd_detach_ndns(dev, &nd_pfn->ndns);
+ ida_simple_remove(&nd_region->dax_ida, nd_pfn->id);
+ kfree(nd_pfn->uuid);
+ kfree(nd_dax);
+}
+
+static struct device_type nd_dax_device_type = {
+ .name = "nd_dax",
+ .release = nd_dax_release,
+};
+
+bool is_nd_dax(struct device *dev)
+{
+ return dev ? dev->type == &nd_dax_device_type : false;
+}
+EXPORT_SYMBOL(is_nd_dax);
+
+struct nd_dax *to_nd_dax(struct device *dev)
+{
+ struct nd_dax *nd_dax = container_of(dev, struct nd_dax, nd_pfn.dev);
+
+ WARN_ON(!is_nd_dax(dev));
+ return nd_dax;
+}
+EXPORT_SYMBOL(to_nd_dax);
+
+static const struct attribute_group *nd_dax_attribute_groups[] = {
+ &nd_pfn_attribute_group,
+ &nd_device_attribute_group,
+ &nd_numa_attribute_group,
+ NULL,
+};
+
+static struct nd_dax *nd_dax_alloc(struct nd_region *nd_region)
+{
+ struct nd_pfn *nd_pfn;
+ struct nd_dax *nd_dax;
+ struct device *dev;
+
+ nd_dax = kzalloc(sizeof(*nd_dax), GFP_KERNEL);
+ if (!nd_dax)
+ return NULL;
+
+ nd_pfn = &nd_dax->nd_pfn;
+ nd_pfn->id = ida_simple_get(&nd_region->dax_ida, 0, 0, GFP_KERNEL);
+ if (nd_pfn->id < 0) {
+ kfree(nd_dax);
+ return NULL;
+ }
+
+ dev = &nd_pfn->dev;
+ dev_set_name(dev, "dax%d.%d", nd_region->id, nd_pfn->id);
+ dev->groups = nd_dax_attribute_groups;
+ dev->type = &nd_dax_device_type;
+ dev->parent = &nd_region->dev;
+
+ return nd_dax;
+}
+
+struct device *nd_dax_create(struct nd_region *nd_region)
+{
+ struct device *dev = NULL;
+ struct nd_dax *nd_dax;
+
+ if (!is_nd_pmem(&nd_region->dev))
+ return NULL;
+
+ nd_dax = nd_dax_alloc(nd_region);
+ if (nd_dax)
+ dev = nd_pfn_devinit(&nd_dax->nd_pfn, NULL);
+ __nd_device_register(dev);
+ return dev;
+}
+
+int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
+{
+ int rc;
+ struct nd_dax *nd_dax;
+ struct device *dax_dev;
+ struct nd_pfn *nd_pfn;
+ struct nd_pfn_sb *pfn_sb;
+ struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
+
+ if (ndns->force_raw)
+ return -ENODEV;
+
+ nvdimm_bus_lock(&ndns->dev);
+ nd_dax = nd_dax_alloc(nd_region);
+ nd_pfn = &nd_dax->nd_pfn;
+ dax_dev = nd_pfn_devinit(nd_pfn, ndns);
+ nvdimm_bus_unlock(&ndns->dev);
+ if (!dax_dev)
+ return -ENOMEM;
+ pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ nd_pfn->pfn_sb = pfn_sb;
+ rc = nd_pfn_validate(nd_pfn, DAX_SIG);
+ dev_dbg(dev, "%s: dax: %s\n", __func__,
+ rc == 0 ? dev_name(dax_dev) : "<none>");
+ if (rc < 0) {
+ __nd_detach_ndns(dax_dev, &nd_pfn->ndns);
+ put_device(dax_dev);
+ } else
+ __nd_device_register(dax_dev);
+
+ return rc;
+}
+EXPORT_SYMBOL(nd_dax_probe);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index c56f88217..bbde28d3d 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -37,9 +37,9 @@ static int __validate_dimm(struct nvdimm_drvdata *ndd)
nvdimm = to_nvdimm(ndd->dev);
- if (!nvdimm->dsm_mask)
+ if (!nvdimm->cmd_mask)
return -ENXIO;
- if (!test_bit(ND_CMD_GET_CONFIG_DATA, nvdimm->dsm_mask))
+ if (!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask))
return -ENXIO;
return 0;
@@ -263,6 +263,12 @@ const char *nvdimm_name(struct nvdimm *nvdimm)
}
EXPORT_SYMBOL_GPL(nvdimm_name);
+unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
+{
+ return nvdimm->cmd_mask;
+}
+EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
+
void *nvdimm_provider_data(struct nvdimm *nvdimm)
{
if (nvdimm)
@@ -277,10 +283,10 @@ static ssize_t commands_show(struct device *dev,
struct nvdimm *nvdimm = to_nvdimm(dev);
int cmd, len = 0;
- if (!nvdimm->dsm_mask)
+ if (!nvdimm->cmd_mask)
return sprintf(buf, "\n");
- for_each_set_bit(cmd, nvdimm->dsm_mask, BITS_PER_LONG)
+ for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
len += sprintf(buf + len, "\n");
return len;
@@ -340,7 +346,7 @@ EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
const struct attribute_group **groups, unsigned long flags,
- unsigned long *dsm_mask)
+ unsigned long cmd_mask)
{
struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
struct device *dev;
@@ -355,7 +361,7 @@ struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data,
}
nvdimm->provider_data = provider_data;
nvdimm->flags = flags;
- nvdimm->dsm_mask = dsm_mask;
+ nvdimm->cmd_mask = cmd_mask;
atomic_set(&nvdimm->busy, 0);
dev = &nvdimm->dev;
dev_set_name(dev, "nmem%d", nvdimm->id);
@@ -546,3 +552,8 @@ int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
return 0;
}
EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
+
+void __exit nvdimm_devs_exit(void)
+{
+ ida_destroy(&dimm_ida);
+}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index f5cb88601..c5e3196c4 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1288,6 +1288,8 @@ static ssize_t mode_show(struct device *dev,
mode = "safe";
else if (claim && is_nd_pfn(claim))
mode = "memory";
+ else if (claim && is_nd_dax(claim))
+ mode = "dax";
else if (!claim && pmem_should_map_pages(dev))
mode = "memory";
else
@@ -1379,21 +1381,19 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
{
struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
- struct nd_namespace_common *ndns;
+ struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
+ struct nd_namespace_common *ndns = NULL;
resource_size_t size;
- if (nd_btt || nd_pfn) {
- struct device *host = NULL;
-
- if (nd_btt) {
- host = &nd_btt->dev;
+ if (nd_btt || nd_pfn || nd_dax) {
+ if (nd_btt)
ndns = nd_btt->ndns;
- } else if (nd_pfn) {
- host = &nd_pfn->dev;
+ else if (nd_pfn)
ndns = nd_pfn->ndns;
- }
+ else if (nd_dax)
+ ndns = nd_dax->nd_pfn.ndns;
- if (!ndns || !host)
+ if (!ndns)
return ERR_PTR(-ENODEV);
/*
@@ -1404,12 +1404,12 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
- dev_name(host));
+ dev_name(dev));
return ERR_PTR(-EBUSY);
}
- if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host,
+ if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
"host (%s) vs claim (%s) mismatch\n",
- dev_name(host),
+ dev_name(dev),
dev_name(ndns->claim)))
return ERR_PTR(-ENXIO);
} else {
@@ -1784,6 +1784,18 @@ void nd_region_create_blk_seed(struct nd_region *nd_region)
nd_device_register(nd_region->ns_seed);
}
+void nd_region_create_dax_seed(struct nd_region *nd_region)
+{
+ WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
+ nd_region->dax_seed = nd_dax_create(nd_region);
+ /*
+ * Seed creation failures are not fatal, provisioning is simply
+ * disabled until memory becomes available
+ */
+ if (!nd_region->dax_seed)
+ dev_err(&nd_region->dev, "failed to create dax namespace\n");
+}
+
void nd_region_create_pfn_seed(struct nd_region *nd_region)
{
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 1d1500f3d..284cdaa26 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -37,7 +37,7 @@ struct nvdimm_bus {
struct nvdimm {
unsigned long flags;
void *provider_data;
- unsigned long *dsm_mask;
+ unsigned long cmd_mask;
struct device dev;
atomic_t busy;
int id;
@@ -49,11 +49,14 @@ bool is_nd_blk(struct device *dev);
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void);
+void nvdimm_devs_exit(void);
+void nd_region_devs_exit(void);
void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev);
struct nd_region;
void nd_region_create_blk_seed(struct nd_region *nd_region);
void nd_region_create_btt_seed(struct nd_region *nd_region);
void nd_region_create_pfn_seed(struct nd_region *nd_region);
+void nd_region_create_dax_seed(struct nd_region *nd_region);
void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev);
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
@@ -91,4 +94,5 @@ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
ssize_t nd_namespace_store(struct device *dev,
struct nd_namespace_common **_ndns, const char *buf,
size_t len);
+struct nd_pfn *to_nd_pfn_safe(struct device *dev);
#endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 875c524fa..d0ac93c31 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -13,6 +13,7 @@
#ifndef __ND_H__
#define __ND_H__
#include <linux/libnvdimm.h>
+#include <linux/badblocks.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/mutex.h>
@@ -100,10 +101,12 @@ struct nd_region {
struct ida ns_ida;
struct ida btt_ida;
struct ida pfn_ida;
+ struct ida dax_ida;
unsigned long flags;
struct device *ns_seed;
struct device *btt_seed;
struct device *pfn_seed;
+ struct device *dax_seed;
u16 ndr_mappings;
u64 ndr_size;
u64 ndr_start;
@@ -160,6 +163,10 @@ struct nd_pfn {
struct nd_namespace_common *ndns;
};
+struct nd_dax {
+ struct nd_pfn nd_pfn;
+};
+
enum nd_async_mode {
ND_SYNC,
ND_ASYNC,
@@ -197,11 +204,12 @@ struct nd_gen_sb {
u64 nd_sb_checksum(struct nd_gen_sb *sb);
#if IS_ENABLED(CONFIG_BTT)
-int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata);
+int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
bool is_nd_btt(struct device *dev);
struct device *nd_btt_create(struct nd_region *nd_region);
#else
-static inline int nd_btt_probe(struct nd_namespace_common *ndns, void *drvdata)
+static inline int nd_btt_probe(struct device *dev,
+ struct nd_namespace_common *ndns)
{
return -ENODEV;
}
@@ -219,12 +227,16 @@ static inline struct device *nd_btt_create(struct nd_region *nd_region)
struct nd_pfn *to_nd_pfn(struct device *dev);
#if IS_ENABLED(CONFIG_NVDIMM_PFN)
-int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata);
+int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
bool is_nd_pfn(struct device *dev);
struct device *nd_pfn_create(struct nd_region *nd_region);
-int nd_pfn_validate(struct nd_pfn *nd_pfn);
+struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
+ struct nd_namespace_common *ndns);
+int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
+extern struct attribute_group nd_pfn_attribute_group;
#else
-static inline int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+static inline int nd_pfn_probe(struct device *dev,
+ struct nd_namespace_common *ndns)
{
return -ENODEV;
}
@@ -239,12 +251,35 @@ static inline struct device *nd_pfn_create(struct nd_region *nd_region)
return NULL;
}
-static inline int nd_pfn_validate(struct nd_pfn *nd_pfn)
+static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
return -ENODEV;
}
#endif
+struct nd_dax *to_nd_dax(struct device *dev);
+#if IS_ENABLED(CONFIG_NVDIMM_DAX)
+int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
+bool is_nd_dax(struct device *dev);
+struct device *nd_dax_create(struct nd_region *nd_region);
+#else
+static inline int nd_dax_probe(struct device *dev,
+ struct nd_namespace_common *ndns)
+{
+ return -ENODEV;
+}
+
+static inline bool is_nd_dax(struct device *dev)
+{
+ return false;
+}
+
+static inline struct device *nd_dax_create(struct nd_region *nd_region)
+{
+ return NULL;
+}
+#endif
+
struct nd_region *to_nd_region(struct device *dev);
int nd_region_to_nstype(struct nd_region *nd_region);
int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
@@ -263,11 +298,32 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
-int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns);
+int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
struct badblocks *bb, const struct resource *res);
+#if IS_ENABLED(CONFIG_ND_CLAIM)
+struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct resource *res, struct vmem_altmap *altmap);
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
+void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
+#else
+static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct resource *res, struct vmem_altmap *altmap)
+{
+ return ERR_PTR(-ENXIO);
+}
+static inline int devm_nsio_enable(struct device *dev,
+ struct nd_namespace_io *nsio)
+{
+ return -ENXIO;
+}
+static inline void devm_nsio_disable(struct device *dev,
+ struct nd_namespace_io *nsio)
+{
+}
+#endif
int nd_blk_region_init(struct nd_region *nd_region);
void __nd_iostat_start(struct bio *bio, unsigned long *start);
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
@@ -281,6 +337,19 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
return true;
}
void nd_iostat_end(struct bio *bio, unsigned long start);
+static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
+ unsigned int len)
+{
+ if (bb->count) {
+ sector_t first_bad;
+ int num_bad;
+
+ return !!badblocks_check(bb, sector, len / 512, &first_bad,
+ &num_bad);
+ }
+
+ return false;
+}
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
const u8 *nd_dev_to_uuid(struct device *dev);
bool pmem_should_map_pages(struct device *dev);
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index 8e343a3ca..dde985345 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -19,6 +19,7 @@
#define PFN_SIG_LEN 16
#define PFN_SIG "NVDIMM_PFN_INFO\0"
+#define DAX_SIG "NVDIMM_DAX_INFO\0"
struct nd_pfn_sb {
u8 signature[PFN_SIG_LEN];
@@ -33,7 +34,9 @@ struct nd_pfn_sb {
/* minor-version-1 additions for section alignment */
__le32 start_pad;
__le32 end_trunc;
- u8 padding[4004];
+ /* minor-version-2 record the base alignment of the mapping */
+ __le32 align;
+ u8 padding[4000];
__le64 checksum;
};
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index e071e214f..cea8350fb 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -10,6 +10,7 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
+#include <linux/memremap.h>
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/genhd.h>
@@ -56,7 +57,7 @@ EXPORT_SYMBOL(to_nd_pfn);
static ssize_t mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
switch (nd_pfn->mode) {
case PFN_MODE_RAM:
@@ -71,7 +72,7 @@ static ssize_t mode_show(struct device *dev,
static ssize_t mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
device_lock(dev);
@@ -105,7 +106,7 @@ static DEVICE_ATTR_RW(mode);
static ssize_t align_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
return sprintf(buf, "%lx\n", nd_pfn->align);
}
@@ -133,7 +134,7 @@ static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf)
static ssize_t align_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -151,7 +152,7 @@ static DEVICE_ATTR_RW(align);
static ssize_t uuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
if (nd_pfn->uuid)
return sprintf(buf, "%pUb\n", nd_pfn->uuid);
@@ -161,7 +162,7 @@ static ssize_t uuid_show(struct device *dev,
static ssize_t uuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -177,7 +178,7 @@ static DEVICE_ATTR_RW(uuid);
static ssize_t namespace_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
@@ -190,7 +191,7 @@ static ssize_t namespace_show(struct device *dev,
static ssize_t namespace_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -208,7 +209,7 @@ static DEVICE_ATTR_RW(namespace);
static ssize_t resource_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -234,7 +235,7 @@ static DEVICE_ATTR_RO(resource);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+ struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
device_lock(dev);
@@ -269,7 +270,7 @@ static struct attribute *nd_pfn_attributes[] = {
NULL,
};
-static struct attribute_group nd_pfn_attribute_group = {
+struct attribute_group nd_pfn_attribute_group = {
.attrs = nd_pfn_attributes,
};
@@ -280,16 +281,32 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = {
NULL,
};
-static struct device *__nd_pfn_create(struct nd_region *nd_region,
+struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
struct nd_namespace_common *ndns)
{
- struct nd_pfn *nd_pfn;
- struct device *dev;
+ struct device *dev = &nd_pfn->dev;
- /* we can only create pages for contiguous ranged of pmem */
- if (!is_nd_pmem(&nd_region->dev))
+ if (!nd_pfn)
return NULL;
+ nd_pfn->mode = PFN_MODE_NONE;
+ nd_pfn->align = HPAGE_SIZE;
+ dev = &nd_pfn->dev;
+ device_initialize(&nd_pfn->dev);
+ if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
+ dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
+ __func__, dev_name(ndns->claim));
+ put_device(dev);
+ return NULL;
+ }
+ return dev;
+}
+
+static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region)
+{
+ struct nd_pfn *nd_pfn;
+ struct device *dev;
+
nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL);
if (!nd_pfn)
return NULL;
@@ -300,35 +317,35 @@ static struct device *__nd_pfn_create(struct nd_region *nd_region,
return NULL;
}
- nd_pfn->mode = PFN_MODE_NONE;
- nd_pfn->align = HPAGE_SIZE;
dev = &nd_pfn->dev;
dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id);
- dev->parent = &nd_region->dev;
- dev->type = &nd_pfn_device_type;
dev->groups = nd_pfn_attribute_groups;
- device_initialize(&nd_pfn->dev);
- if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
- dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n",
- __func__, dev_name(ndns->claim));
- put_device(dev);
- return NULL;
- }
- return dev;
+ dev->type = &nd_pfn_device_type;
+ dev->parent = &nd_region->dev;
+
+ return nd_pfn;
}
struct device *nd_pfn_create(struct nd_region *nd_region)
{
- struct device *dev = __nd_pfn_create(nd_region, NULL);
+ struct nd_pfn *nd_pfn;
+ struct device *dev;
+
+ if (!is_nd_pmem(&nd_region->dev))
+ return NULL;
+
+ nd_pfn = nd_pfn_alloc(nd_region);
+ dev = nd_pfn_devinit(nd_pfn, NULL);
- if (dev)
- __nd_device_register(dev);
+ __nd_device_register(dev);
return dev;
}
-int nd_pfn_validate(struct nd_pfn *nd_pfn)
+int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
+ unsigned long align;
+ enum nd_pfn_mode mode;
struct nd_namespace_io *nsio;
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
struct nd_namespace_common *ndns = nd_pfn->ndns;
@@ -343,7 +360,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)))
return -ENXIO;
- if (memcmp(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN) != 0)
+ if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
return -ENODEV;
checksum = le64_to_cpu(pfn_sb->checksum);
@@ -360,6 +377,9 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
pfn_sb->end_trunc = 0;
}
+ if (__le16_to_cpu(pfn_sb->version_minor) < 2)
+ pfn_sb->align = 0;
+
switch (le32_to_cpu(pfn_sb->mode)) {
case PFN_MODE_RAM:
case PFN_MODE_PMEM:
@@ -368,20 +388,50 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
return -ENXIO;
}
+ align = le32_to_cpu(pfn_sb->align);
+ offset = le64_to_cpu(pfn_sb->dataoff);
+ if (align == 0)
+ align = 1UL << ilog2(offset);
+ mode = le32_to_cpu(pfn_sb->mode);
+
if (!nd_pfn->uuid) {
- /* from probe we allocate */
+ /*
+ * When probing a namepace via nd_pfn_probe() the uuid
+ * is NULL (see: nd_pfn_devinit()) we init settings from
+ * pfn_sb
+ */
nd_pfn->uuid = kmemdup(pfn_sb->uuid, 16, GFP_KERNEL);
if (!nd_pfn->uuid)
return -ENOMEM;
+ nd_pfn->align = align;
+ nd_pfn->mode = mode;
} else {
- /* from init we validate */
+ /*
+ * When probing a pfn / dax instance we validate the
+ * live settings against the pfn_sb
+ */
if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0)
return -ENODEV;
+
+ /*
+ * If the uuid validates, but other settings mismatch
+ * return EINVAL because userspace has managed to change
+ * the configuration without specifying new
+ * identification.
+ */
+ if (nd_pfn->align != align || nd_pfn->mode != mode) {
+ dev_err(&nd_pfn->dev,
+ "init failed, settings mismatch\n");
+ dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
+ nd_pfn->align, align, nd_pfn->mode,
+ mode);
+ return -EINVAL;
+ }
}
- if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) {
+ if (align > nvdimm_namespace_capacity(ndns)) {
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
- nd_pfn->align, nvdimm_namespace_capacity(ndns));
+ align, nvdimm_namespace_capacity(ndns));
return -EINVAL;
}
@@ -391,7 +441,6 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
* namespace has changed since the pfn superblock was
* established.
*/
- offset = le64_to_cpu(pfn_sb->dataoff);
nsio = to_nd_namespace_io(&ndns->dev);
if (offset >= resource_size(&nsio->res)) {
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
@@ -399,10 +448,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
return -EBUSY;
}
- nd_pfn->align = 1UL << ilog2(offset);
- if (!is_power_of_2(offset) || offset < PAGE_SIZE) {
- dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n",
- offset);
+ if ((align && !IS_ALIGNED(offset, align))
+ || !IS_ALIGNED(offset, PAGE_SIZE)) {
+ dev_err(&nd_pfn->dev,
+ "bad offset: %#llx dax disabled align: %#lx\n",
+ offset, align);
return -ENXIO;
}
@@ -410,11 +460,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
}
EXPORT_SYMBOL(nd_pfn_validate);
-int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
+int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
{
int rc;
- struct device *dev;
struct nd_pfn *nd_pfn;
+ struct device *pfn_dev;
struct nd_pfn_sb *pfn_sb;
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
@@ -422,25 +472,217 @@ int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata)
return -ENODEV;
nvdimm_bus_lock(&ndns->dev);
- dev = __nd_pfn_create(nd_region, ndns);
+ nd_pfn = nd_pfn_alloc(nd_region);
+ pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
nvdimm_bus_unlock(&ndns->dev);
- if (!dev)
+ if (!pfn_dev)
return -ENOMEM;
- dev_set_drvdata(dev, drvdata);
- pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
- nd_pfn = to_nd_pfn(dev);
+ pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
+ nd_pfn = to_nd_pfn(pfn_dev);
nd_pfn->pfn_sb = pfn_sb;
- rc = nd_pfn_validate(nd_pfn);
- nd_pfn->pfn_sb = NULL;
- kfree(pfn_sb);
- dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__,
- rc == 0 ? dev_name(dev) : "<none>");
+ rc = nd_pfn_validate(nd_pfn, PFN_SIG);
+ dev_dbg(dev, "%s: pfn: %s\n", __func__,
+ rc == 0 ? dev_name(pfn_dev) : "<none>");
if (rc < 0) {
- __nd_detach_ndns(dev, &nd_pfn->ndns);
- put_device(dev);
+ __nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
+ put_device(pfn_dev);
} else
- __nd_device_register(&nd_pfn->dev);
+ __nd_device_register(pfn_dev);
return rc;
}
EXPORT_SYMBOL(nd_pfn_probe);
+
+/*
+ * We hotplug memory at section granularity, pad the reserved area from
+ * the previous section base to the namespace base address.
+ */
+static unsigned long init_altmap_base(resource_size_t base)
+{
+ unsigned long base_pfn = PHYS_PFN(base);
+
+ return PFN_SECTION_ALIGN_DOWN(base_pfn);
+}
+
+static unsigned long init_altmap_reserve(resource_size_t base)
+{
+ unsigned long reserve = PHYS_PFN(SZ_8K);
+ unsigned long base_pfn = PHYS_PFN(base);
+
+ reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
+ return reserve;
+}
+
+static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct resource *res, struct vmem_altmap *altmap)
+{
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ u64 offset = le64_to_cpu(pfn_sb->dataoff);
+ u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
+ u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ resource_size_t base = nsio->res.start + start_pad;
+ struct vmem_altmap __altmap = {
+ .base_pfn = init_altmap_base(base),
+ .reserve = init_altmap_reserve(base),
+ };
+
+ memcpy(res, &nsio->res, sizeof(*res));
+ res->start += start_pad;
+ res->end -= end_trunc;
+
+ if (nd_pfn->mode == PFN_MODE_RAM) {
+ if (offset < SZ_8K)
+ return ERR_PTR(-EINVAL);
+ nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
+ altmap = NULL;
+ } else if (nd_pfn->mode == PFN_MODE_PMEM) {
+ nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE;
+ if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
+ dev_info(&nd_pfn->dev,
+ "number of pfns truncated from %lld to %ld\n",
+ le64_to_cpu(nd_pfn->pfn_sb->npfns),
+ nd_pfn->npfns);
+ memcpy(altmap, &__altmap, sizeof(*altmap));
+ altmap->free = PHYS_PFN(offset - SZ_8K);
+ altmap->alloc = 0;
+ } else
+ return ERR_PTR(-ENXIO);
+
+ return altmap;
+}
+
+static int nd_pfn_init(struct nd_pfn *nd_pfn)
+{
+ u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ u32 start_pad = 0, end_trunc = 0;
+ resource_size_t start, size;
+ struct nd_namespace_io *nsio;
+ struct nd_region *nd_region;
+ struct nd_pfn_sb *pfn_sb;
+ unsigned long npfns;
+ phys_addr_t offset;
+ const char *sig;
+ u64 checksum;
+ int rc;
+
+ pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
+ if (!pfn_sb)
+ return -ENOMEM;
+
+ nd_pfn->pfn_sb = pfn_sb;
+ if (is_nd_dax(&nd_pfn->dev))
+ sig = DAX_SIG;
+ else
+ sig = PFN_SIG;
+ rc = nd_pfn_validate(nd_pfn, sig);
+ if (rc != -ENODEV)
+ return rc;
+
+ /* no info block, do init */;
+ nd_region = to_nd_region(nd_pfn->dev.parent);
+ if (nd_region->ro) {
+ dev_info(&nd_pfn->dev,
+ "%s is read-only, unable to init metadata\n",
+ dev_name(&nd_region->dev));
+ return -ENXIO;
+ }
+
+ memset(pfn_sb, 0, sizeof(*pfn_sb));
+
+ /*
+ * Check if pmem collides with 'System RAM' when section aligned and
+ * trim it accordingly
+ */
+ nsio = to_nd_namespace_io(&ndns->dev);
+ start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
+ size = resource_size(&nsio->res);
+ if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED) {
+ start = nsio->res.start;
+ start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
+ }
+
+ start = nsio->res.start;
+ size = PHYS_SECTION_ALIGN_UP(start + size) - start;
+ if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED) {
+ size = resource_size(&nsio->res);
+ end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
+ }
+
+ if (start_pad + end_trunc)
+ dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
+ dev_name(&ndns->dev), start_pad + end_trunc);
+
+ /*
+ * Note, we use 64 here for the standard size of struct page,
+ * debugging options may cause it to be larger in which case the
+ * implementation will limit the pfns advertised through
+ * ->direct_access() to those that are included in the memmap.
+ */
+ start += start_pad;
+ size = resource_size(&nsio->res);
+ npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
+ if (nd_pfn->mode == PFN_MODE_PMEM) {
+ unsigned long memmap_size;
+
+ /*
+ * vmemmap_populate_hugepages() allocates the memmap array in
+ * HPAGE_SIZE chunks.
+ */
+ memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
+ offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
+ nd_pfn->align) - start;
+ } else if (nd_pfn->mode == PFN_MODE_RAM)
+ offset = ALIGN(start + SZ_8K + dax_label_reserve,
+ nd_pfn->align) - start;
+ else
+ return -ENXIO;
+
+ if (offset + start_pad + end_trunc >= size) {
+ dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
+ dev_name(&ndns->dev));
+ return -ENXIO;
+ }
+
+ npfns = (size - offset - start_pad - end_trunc) / SZ_4K;
+ pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
+ pfn_sb->dataoff = cpu_to_le64(offset);
+ pfn_sb->npfns = cpu_to_le64(npfns);
+ memcpy(pfn_sb->signature, sig, PFN_SIG_LEN);
+ memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
+ memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
+ pfn_sb->version_major = cpu_to_le16(1);
+ pfn_sb->version_minor = cpu_to_le16(2);
+ pfn_sb->start_pad = cpu_to_le32(start_pad);
+ pfn_sb->end_trunc = cpu_to_le32(end_trunc);
+ pfn_sb->align = cpu_to_le32(nd_pfn->align);
+ checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
+ pfn_sb->checksum = cpu_to_le64(checksum);
+
+ return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
+}
+
+/*
+ * Determine the effective resource range and vmem_altmap from an nd_pfn
+ * instance.
+ */
+struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
+ struct resource *res, struct vmem_altmap *altmap)
+{
+ int rc;
+
+ if (!nd_pfn->uuid || !nd_pfn->ndns)
+ return ERR_PTR(-ENODEV);
+
+ rc = nd_pfn_init(nd_pfn);
+ if (rc)
+ return ERR_PTR(rc);
+
+ /* we need a valid pfn_sb before we can init a vmem_altmap */
+ return __nvdimm_setup_pfn(nd_pfn, res, altmap);
+}
+EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 92f536596..608fc4464 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -33,10 +33,6 @@
#include "nd.h"
struct pmem_device {
- struct request_queue *pmem_queue;
- struct gendisk *pmem_disk;
- struct nd_namespace_common *ndns;
-
/* One contiguous memory region per device */
phys_addr_t phys_addr;
/* when non-zero this device is hosting a 'pfn' instance */
@@ -50,23 +46,10 @@ struct pmem_device {
struct badblocks bb;
};
-static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
-{
- if (bb->count) {
- sector_t first_bad;
- int num_bad;
-
- return !!badblocks_check(bb, sector, len / 512, &first_bad,
- &num_bad);
- }
-
- return false;
-}
-
static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
unsigned int len)
{
- struct device *dev = disk_to_dev(pmem->pmem_disk);
+ struct device *dev = pmem->bb.dev;
sector_t sector;
long cleared;
@@ -136,8 +119,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
unsigned long start;
struct bio_vec bvec;
struct bvec_iter iter;
- struct block_device *bdev = bio->bi_bdev;
- struct pmem_device *pmem = bdev->bd_disk->private_data;
+ struct pmem_device *pmem = q->queuedata;
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
@@ -162,7 +144,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
- struct pmem_device *pmem = bdev->bd_disk->private_data;
+ struct pmem_device *pmem = bdev->bd_queue->queuedata;
int rc;
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
@@ -182,14 +164,22 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
}
static long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
- struct pmem_device *pmem = bdev->bd_disk->private_data;
+ struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset;
+ if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
+ return -EIO;
*kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+ /*
+ * If badblocks are present, limit known good range to the
+ * requested range.
+ */
+ if (unlikely(pmem->bb.count))
+ return size;
return pmem->size - pmem->pfn_pad - offset;
}
@@ -200,104 +190,119 @@ static const struct block_device_operations pmem_fops = {
.revalidate_disk = nvdimm_revalidate_disk,
};
-static struct pmem_device *pmem_alloc(struct device *dev,
- struct resource *res, int id)
+static void pmem_release_queue(void *q)
{
+ blk_cleanup_queue(q);
+}
+
+void pmem_release_disk(void *disk)
+{
+ del_gendisk(disk);
+ put_disk(disk);
+}
+
+static int pmem_attach_disk(struct device *dev,
+ struct nd_namespace_common *ndns)
+{
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct vmem_altmap __altmap, *altmap = NULL;
+ struct resource *res = &nsio->res;
+ struct nd_pfn *nd_pfn = NULL;
+ int nid = dev_to_node(dev);
+ struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
+ struct resource pfn_res;
struct request_queue *q;
+ struct gendisk *disk;
+ void *addr;
+
+ /* while nsio_rw_bytes is active, parse a pfn info block if present */
+ if (is_nd_pfn(dev)) {
+ nd_pfn = to_nd_pfn(dev);
+ altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
+ if (IS_ERR(altmap))
+ return PTR_ERR(altmap);
+ }
+
+ /* we're attaching a block device, disable raw namespace access */
+ devm_nsio_disable(dev, nsio);
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
+ dev_set_drvdata(dev, pmem);
pmem->phys_addr = res->start;
pmem->size = resource_size(res);
if (!arch_has_wmb_pmem())
dev_warn(dev, "unable to guarantee persistence of writes\n");
- if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
- dev_name(dev))) {
- dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
- &pmem->phys_addr, pmem->size);
- return ERR_PTR(-EBUSY);
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev))) {
+ dev_warn(dev, "could not reserve region %pR\n", res);
+ return -EBUSY;
}
q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
if (!q)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
pmem->pfn_flags = PFN_DEV;
- if (pmem_should_map_pages(dev)) {
- pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
+ if (is_nd_pfn(dev)) {
+ addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
+ altmap);
+ pfn_sb = nd_pfn->pfn_sb;
+ pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
+ pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
+ pmem->pfn_flags |= PFN_MAP;
+ res = &pfn_res; /* for badblocks populate */
+ res->start += pmem->data_offset;
+ } else if (pmem_should_map_pages(dev)) {
+ addr = devm_memremap_pages(dev, &nsio->res,
&q->q_usage_counter, NULL);
pmem->pfn_flags |= PFN_MAP;
} else
- pmem->virt_addr = (void __pmem *) devm_memremap(dev,
- pmem->phys_addr, pmem->size,
- ARCH_MEMREMAP_PMEM);
+ addr = devm_memremap(dev, pmem->phys_addr,
+ pmem->size, ARCH_MEMREMAP_PMEM);
- if (IS_ERR(pmem->virt_addr)) {
+ /*
+ * At release time the queue must be dead before
+ * devm_memremap_pages is unwound
+ */
+ if (devm_add_action(dev, pmem_release_queue, q)) {
blk_cleanup_queue(q);
- return (void __force *) pmem->virt_addr;
+ return -ENOMEM;
}
- pmem->pmem_queue = q;
- return pmem;
-}
-
-static void pmem_detach_disk(struct pmem_device *pmem)
-{
- if (!pmem->pmem_disk)
- return;
-
- del_gendisk(pmem->pmem_disk);
- put_disk(pmem->pmem_disk);
- blk_cleanup_queue(pmem->pmem_queue);
-}
-
-static int pmem_attach_disk(struct device *dev,
- struct nd_namespace_common *ndns, struct pmem_device *pmem)
-{
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- int nid = dev_to_node(dev);
- struct resource bb_res;
- struct gendisk *disk;
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+ pmem->virt_addr = (void __pmem *) addr;
- blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
- blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
- blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
- blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
+ blk_queue_make_request(q, pmem_make_request);
+ blk_queue_physical_block_size(q, PAGE_SIZE);
+ blk_queue_max_hw_sectors(q, UINT_MAX);
+ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ q->queuedata = pmem;
disk = alloc_disk_node(0, nid);
- if (!disk) {
- blk_cleanup_queue(pmem->pmem_queue);
+ if (!disk)
+ return -ENOMEM;
+ if (devm_add_action(dev, pmem_release_disk, disk)) {
+ put_disk(disk);
return -ENOMEM;
}
disk->fops = &pmem_fops;
- disk->private_data = pmem;
- disk->queue = pmem->pmem_queue;
+ disk->queue = q;
disk->flags = GENHD_FL_EXT_DEVT;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
disk->driverfs_dev = dev;
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
/ 512);
- pmem->pmem_disk = disk;
- devm_exit_badblocks(dev, &pmem->bb);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
- bb_res.start = nsio->res.start + pmem->data_offset;
- bb_res.end = nsio->res.end;
- if (is_nd_pfn(dev)) {
- struct nd_pfn *nd_pfn = to_nd_pfn(dev);
- struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
-
- bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
- bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
- }
- nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
- &bb_res);
+ nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
disk->bb = &pmem->bb;
add_disk(disk);
revalidate_disk(disk);
@@ -305,346 +310,68 @@ static int pmem_attach_disk(struct device *dev,
return 0;
}
-static int pmem_rw_bytes(struct nd_namespace_common *ndns,
- resource_size_t offset, void *buf, size_t size, int rw)
-{
- struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
-
- if (unlikely(offset + size > pmem->size)) {
- dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
- return -EFAULT;
- }
-
- if (rw == READ) {
- unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
-
- if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
- return -EIO;
- return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
- } else {
- memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
- wmb_pmem();
- }
-
- return 0;
-}
-
-static int nd_pfn_init(struct nd_pfn *nd_pfn)
-{
- struct nd_pfn_sb *pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL);
- struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
- struct nd_namespace_common *ndns = nd_pfn->ndns;
- u32 start_pad = 0, end_trunc = 0;
- resource_size_t start, size;
- struct nd_namespace_io *nsio;
- struct nd_region *nd_region;
- unsigned long npfns;
- phys_addr_t offset;
- u64 checksum;
- int rc;
-
- if (!pfn_sb)
- return -ENOMEM;
-
- nd_pfn->pfn_sb = pfn_sb;
- rc = nd_pfn_validate(nd_pfn);
- if (rc == -ENODEV)
- /* no info block, do init */;
- else
- return rc;
-
- nd_region = to_nd_region(nd_pfn->dev.parent);
- if (nd_region->ro) {
- dev_info(&nd_pfn->dev,
- "%s is read-only, unable to init metadata\n",
- dev_name(&nd_region->dev));
- goto err;
- }
-
- memset(pfn_sb, 0, sizeof(*pfn_sb));
-
- /*
- * Check if pmem collides with 'System RAM' when section aligned and
- * trim it accordingly
- */
- nsio = to_nd_namespace_io(&ndns->dev);
- start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
- size = resource_size(&nsio->res);
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED) {
-
- start = nsio->res.start;
- start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
- }
-
- start = nsio->res.start;
- size = PHYS_SECTION_ALIGN_UP(start + size) - start;
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED) {
- size = resource_size(&nsio->res);
- end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
- }
-
- if (start_pad + end_trunc)
- dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
- dev_name(&ndns->dev), start_pad + end_trunc);
-
- /*
- * Note, we use 64 here for the standard size of struct page,
- * debugging options may cause it to be larger in which case the
- * implementation will limit the pfns advertised through
- * ->direct_access() to those that are included in the memmap.
- */
- start += start_pad;
- npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
- if (nd_pfn->mode == PFN_MODE_PMEM) {
- unsigned long memmap_size;
-
- /*
- * vmemmap_populate_hugepages() allocates the memmap array in
- * PMD_SIZE chunks.
- */
- memmap_size = ALIGN(64 * npfns, PMD_SIZE);
- offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
- - start;
- } else if (nd_pfn->mode == PFN_MODE_RAM)
- offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
- else
- goto err;
-
- if (offset + start_pad + end_trunc >= pmem->size) {
- dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
- dev_name(&ndns->dev));
- goto err;
- }
-
- npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
- pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
- pfn_sb->dataoff = cpu_to_le64(offset);
- pfn_sb->npfns = cpu_to_le64(npfns);
- memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
- memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
- memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
- pfn_sb->version_major = cpu_to_le16(1);
- pfn_sb->version_minor = cpu_to_le16(1);
- pfn_sb->start_pad = cpu_to_le32(start_pad);
- pfn_sb->end_trunc = cpu_to_le32(end_trunc);
- checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
- pfn_sb->checksum = cpu_to_le64(checksum);
-
- rc = nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
- if (rc)
- goto err;
-
- return 0;
- err:
- nd_pfn->pfn_sb = NULL;
- kfree(pfn_sb);
- return -ENXIO;
-}
-
-static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns)
-{
- struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
- struct pmem_device *pmem;
-
- /* free pmem disk */
- pmem = dev_get_drvdata(&nd_pfn->dev);
- pmem_detach_disk(pmem);
-
- /* release nd_pfn resources */
- kfree(nd_pfn->pfn_sb);
- nd_pfn->pfn_sb = NULL;
-
- return 0;
-}
-
-/*
- * We hotplug memory at section granularity, pad the reserved area from
- * the previous section base to the namespace base address.
- */
-static unsigned long init_altmap_base(resource_size_t base)
-{
- unsigned long base_pfn = PHYS_PFN(base);
-
- return PFN_SECTION_ALIGN_DOWN(base_pfn);
-}
-
-static unsigned long init_altmap_reserve(resource_size_t base)
-{
- unsigned long reserve = PHYS_PFN(SZ_8K);
- unsigned long base_pfn = PHYS_PFN(base);
-
- reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
- return reserve;
-}
-
-static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
-{
- int rc;
- struct resource res;
- struct request_queue *q;
- struct pmem_device *pmem;
- struct vmem_altmap *altmap;
- struct device *dev = &nd_pfn->dev;
- struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
- struct nd_namespace_common *ndns = nd_pfn->ndns;
- u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
- u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- resource_size_t base = nsio->res.start + start_pad;
- struct vmem_altmap __altmap = {
- .base_pfn = init_altmap_base(base),
- .reserve = init_altmap_reserve(base),
- };
-
- pmem = dev_get_drvdata(dev);
- pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
- pmem->pfn_pad = start_pad + end_trunc;
- nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
- if (nd_pfn->mode == PFN_MODE_RAM) {
- if (pmem->data_offset < SZ_8K)
- return -EINVAL;
- nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
- altmap = NULL;
- } else if (nd_pfn->mode == PFN_MODE_PMEM) {
- nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
- / PAGE_SIZE;
- if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
- dev_info(&nd_pfn->dev,
- "number of pfns truncated from %lld to %ld\n",
- le64_to_cpu(nd_pfn->pfn_sb->npfns),
- nd_pfn->npfns);
- altmap = & __altmap;
- altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
- altmap->alloc = 0;
- } else {
- rc = -ENXIO;
- goto err;
- }
-
- /* establish pfn range for lookup, and switch to direct map */
- q = pmem->pmem_queue;
- memcpy(&res, &nsio->res, sizeof(res));
- res.start += start_pad;
- res.end -= end_trunc;
- devm_memunmap(dev, (void __force *) pmem->virt_addr);
- pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
- &q->q_usage_counter, altmap);
- pmem->pfn_flags |= PFN_MAP;
- if (IS_ERR(pmem->virt_addr)) {
- rc = PTR_ERR(pmem->virt_addr);
- goto err;
- }
-
- /* attach pmem disk in "pfn-mode" */
- rc = pmem_attach_disk(dev, ndns, pmem);
- if (rc)
- goto err;
-
- return rc;
- err:
- nvdimm_namespace_detach_pfn(ndns);
- return rc;
-
-}
-
-static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
-{
- struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
- int rc;
-
- if (!nd_pfn->uuid || !nd_pfn->ndns)
- return -ENODEV;
-
- rc = nd_pfn_init(nd_pfn);
- if (rc)
- return rc;
- /* we need a valid pfn_sb before we can init a vmem_altmap */
- return __nvdimm_namespace_attach_pfn(nd_pfn);
-}
-
static int nd_pmem_probe(struct device *dev)
{
- struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_namespace_common *ndns;
- struct nd_namespace_io *nsio;
- struct pmem_device *pmem;
ndns = nvdimm_namespace_common_probe(dev);
if (IS_ERR(ndns))
return PTR_ERR(ndns);
- nsio = to_nd_namespace_io(&ndns->dev);
- pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
- if (IS_ERR(pmem))
- return PTR_ERR(pmem);
-
- pmem->ndns = ndns;
- dev_set_drvdata(dev, pmem);
- ndns->rw_bytes = pmem_rw_bytes;
- if (devm_init_badblocks(dev, &pmem->bb))
- return -ENOMEM;
- nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
+ if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
+ return -ENXIO;
- if (is_nd_btt(dev)) {
- /* btt allocates its own request_queue */
- blk_cleanup_queue(pmem->pmem_queue);
- pmem->pmem_queue = NULL;
+ if (is_nd_btt(dev))
return nvdimm_namespace_attach_btt(ndns);
- }
if (is_nd_pfn(dev))
- return nvdimm_namespace_attach_pfn(ndns);
+ return pmem_attach_disk(dev, ndns);
- if (nd_btt_probe(ndns, pmem) == 0 || nd_pfn_probe(ndns, pmem) == 0) {
- /*
- * We'll come back as either btt-pmem, or pfn-pmem, so
- * drop the queue allocation for now.
- */
- blk_cleanup_queue(pmem->pmem_queue);
+ /* if we find a valid info-block we'll come back as that personality */
+ if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
+ || nd_dax_probe(dev, ndns) == 0)
return -ENXIO;
- }
- return pmem_attach_disk(dev, ndns, pmem);
+ /* ...otherwise we're just a raw pmem device */
+ return pmem_attach_disk(dev, ndns);
}
static int nd_pmem_remove(struct device *dev)
{
- struct pmem_device *pmem = dev_get_drvdata(dev);
-
if (is_nd_btt(dev))
- nvdimm_namespace_detach_btt(pmem->ndns);
- else if (is_nd_pfn(dev))
- nvdimm_namespace_detach_pfn(pmem->ndns);
- else
- pmem_detach_disk(pmem);
-
+ nvdimm_namespace_detach_btt(to_nd_btt(dev));
return 0;
}
static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
{
- struct pmem_device *pmem = dev_get_drvdata(dev);
- struct nd_namespace_common *ndns = pmem->ndns;
struct nd_region *nd_region = to_nd_region(dev->parent);
- struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- struct resource res = {
- .start = nsio->res.start + pmem->data_offset,
- .end = nsio->res.end,
- };
+ struct pmem_device *pmem = dev_get_drvdata(dev);
+ resource_size_t offset = 0, end_trunc = 0;
+ struct nd_namespace_common *ndns;
+ struct nd_namespace_io *nsio;
+ struct resource res;
if (event != NVDIMM_REVALIDATE_POISON)
return;
- if (is_nd_pfn(dev)) {
+ if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ ndns = nd_btt->ndns;
+ } else if (is_nd_pfn(dev)) {
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
- res.start += __le32_to_cpu(pfn_sb->start_pad);
- res.end -= __le32_to_cpu(pfn_sb->end_trunc);
- }
+ ndns = nd_pfn->ndns;
+ offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
+ end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
+ } else
+ ndns = to_ndns(dev);
+ nsio = to_nd_namespace_io(&ndns->dev);
+ res.start = nsio->res.start + offset;
+ res.end = nsio->res.end - end_trunc;
nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
}
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 4b7715e29..05a912359 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -54,6 +54,7 @@ static int nd_region_probe(struct device *dev)
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
+ nd_region->dax_seed = nd_dax_create(nd_region);
if (err == 0)
return 0;
@@ -86,6 +87,7 @@ static int nd_region_remove(struct device *dev)
nd_region->ns_seed = NULL;
nd_region->btt_seed = NULL;
nd_region->pfn_seed = NULL;
+ nd_region->dax_seed = NULL;
dev_set_drvdata(dev, NULL);
nvdimm_bus_unlock(dev);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 139bf71ca..40fcfea26 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -306,6 +306,23 @@ static ssize_t pfn_seed_show(struct device *dev,
}
static DEVICE_ATTR_RO(pfn_seed);
+static ssize_t dax_seed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ ssize_t rc;
+
+ nvdimm_bus_lock(dev);
+ if (nd_region->dax_seed)
+ rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
+ else
+ rc = sprintf(buf, "\n");
+ nvdimm_bus_unlock(dev);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(dax_seed);
+
static ssize_t read_only_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -335,6 +352,7 @@ static struct attribute *nd_region_attributes[] = {
&dev_attr_mappings.attr,
&dev_attr_btt_seed.attr,
&dev_attr_pfn_seed.attr,
+ &dev_attr_dax_seed.attr,
&dev_attr_read_only.attr,
&dev_attr_set_cookie.attr,
&dev_attr_available_size.attr,
@@ -353,6 +371,9 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr)
return 0;
+ if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr)
+ return 0;
+
if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
return a->mode;
@@ -441,6 +462,13 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
nd_region_create_pfn_seed(nd_region);
nvdimm_bus_unlock(dev);
}
+ if (is_nd_dax(dev) && probe) {
+ nd_region = to_nd_region(dev->parent);
+ nvdimm_bus_lock(dev);
+ if (nd_region->dax_seed == dev)
+ nd_region_create_dax_seed(nd_region);
+ nvdimm_bus_unlock(dev);
+ }
}
void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
@@ -718,6 +746,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
ida_init(&nd_region->ns_ida);
ida_init(&nd_region->btt_ida);
ida_init(&nd_region->pfn_ida);
+ ida_init(&nd_region->dax_ida);
dev = &nd_region->dev;
dev_set_name(dev, "region%d", nd_region->id);
dev->parent = &nvdimm_bus->dev;
@@ -764,3 +793,8 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
__func__);
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
+
+void __exit nd_region_devs_exit(void)
+{
+ ida_destroy(&region_ida);
+}
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index c894841c6..d296fc3ae 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -18,7 +18,7 @@ config BLK_DEV_NVME_SCSI
depends on NVME_CORE
---help---
This adds support for the SG_IO ioctl on the NVMe character
- and block devices nodes, as well a a translation for a small
+ and block devices nodes, as well as a translation for a small
number of selected SCSI commands to NVMe commands to the NVMe
driver. If you don't know what this means you probably want
to say N here, unless you run a distro that abuses the SCSI
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 643f45713..d5fb55c0a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -58,6 +58,64 @@ static DEFINE_SPINLOCK(dev_list_lock);
static struct class *nvme_class;
+bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ enum nvme_ctrl_state new_state)
+{
+ enum nvme_ctrl_state old_state = ctrl->state;
+ bool changed = false;
+
+ spin_lock_irq(&ctrl->lock);
+ switch (new_state) {
+ case NVME_CTRL_LIVE:
+ switch (old_state) {
+ case NVME_CTRL_RESETTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_RESETTING:
+ switch (old_state) {
+ case NVME_CTRL_NEW:
+ case NVME_CTRL_LIVE:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_DELETING:
+ switch (old_state) {
+ case NVME_CTRL_LIVE:
+ case NVME_CTRL_RESETTING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ case NVME_CTRL_DEAD:
+ switch (old_state) {
+ case NVME_CTRL_DELETING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irq(&ctrl->lock);
+
+ if (changed)
+ ctrl->state = new_state;
+
+ return changed;
+}
+EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
+
static void nvme_free_ns(struct kref *kref)
{
struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref);
@@ -138,6 +196,111 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
+static inline void nvme_setup_flush(struct nvme_ns *ns,
+ struct nvme_command *cmnd)
+{
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+}
+
+static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd)
+{
+ struct nvme_dsm_range *range;
+ struct page *page;
+ int offset;
+ unsigned int nr_bytes = blk_rq_bytes(req);
+
+ range = kmalloc(sizeof(*range), GFP_ATOMIC);
+ if (!range)
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ range->cattr = cpu_to_le32(0);
+ range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift);
+ range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->dsm.opcode = nvme_cmd_dsm;
+ cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->dsm.nr = 0;
+ cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+
+ req->completion_data = range;
+ page = virt_to_page(range);
+ offset = offset_in_page(range);
+ blk_add_request_payload(req, page, offset, sizeof(*range));
+
+ /*
+ * we set __data_len back to the size of the area to be discarded
+ * on disk. This allows us to report completion on the full amount
+ * of blocks described by the request.
+ */
+ req->__data_len = nr_bytes;
+
+ return 0;
+}
+
+static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmnd)
+{
+ u16 control = 0;
+ u32 dsmgmt = 0;
+
+ if (req->cmd_flags & REQ_FUA)
+ control |= NVME_RW_FUA;
+ if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+ control |= NVME_RW_LR;
+
+ if (req->cmd_flags & REQ_RAHEAD)
+ dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+ cmnd->rw.command_id = req->tag;
+ cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+ cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+ if (ns->ms) {
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE3:
+ control |= NVME_RW_PRINFO_PRCHK_GUARD;
+ break;
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ control |= NVME_RW_PRINFO_PRCHK_GUARD |
+ NVME_RW_PRINFO_PRCHK_REF;
+ cmnd->rw.reftag = cpu_to_le32(
+ nvme_block_nr(ns, blk_rq_pos(req)));
+ break;
+ }
+ if (!blk_integrity_rq(req))
+ control |= NVME_RW_PRINFO_PRACT;
+ }
+
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+}
+
+int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmd)
+{
+ int ret = 0;
+
+ if (req->cmd_type == REQ_TYPE_DRV_PRIV)
+ memcpy(cmd, req->cmd, sizeof(*cmd));
+ else if (req->cmd_flags & REQ_FLUSH)
+ nvme_setup_flush(ns, cmd);
+ else if (req->cmd_flags & REQ_DISCARD)
+ ret = nvme_setup_discard(ns, req, cmd);
+ else
+ nvme_setup_rw(ns, req, cmd);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nvme_setup_cmd);
+
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
@@ -566,10 +729,14 @@ static void nvme_init_integrity(struct nvme_ns *ns)
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
+ integrity.tag_size = sizeof(u16) + sizeof(u32);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
integrity.profile = &t10_pi_type1_crc;
+ integrity.tag_size = sizeof(u16);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
default:
integrity.profile = NULL;
@@ -894,6 +1061,8 @@ EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
struct request_queue *q)
{
+ bool vwc = false;
+
if (ctrl->max_hw_sectors) {
u32 max_segments =
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
@@ -903,9 +1072,10 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
}
if (ctrl->stripe_size)
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
- if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
- blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
+ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
+ vwc = true;
+ blk_queue_write_cache(q, vwc, vwc);
}
/*
@@ -1055,6 +1225,9 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
return ctrl->ops->reset_ctrl(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl);
+ case NVME_IOCTL_RESCAN:
+ nvme_queue_scan(ctrl);
+ return 0;
default:
return -ENOTTY;
}
@@ -1082,6 +1255,17 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+static ssize_t nvme_sysfs_rescan(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ nvme_queue_scan(ctrl);
+ return count;
+}
+static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1185,6 +1369,7 @@ nvme_show_int_function(cntlid);
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
+ &dev_attr_rescan_controller.attr,
&dev_attr_model.attr,
&dev_attr_serial.attr,
&dev_attr_firmware_rev.attr,
@@ -1209,19 +1394,22 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
return nsa->ns_id - nsb->ns_id;
}
-static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
- struct nvme_ns *ns;
-
- lockdep_assert_held(&ctrl->namespaces_mutex);
+ struct nvme_ns *ns, *ret = NULL;
+ mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->ns_id == nsid)
- return ns;
+ if (ns->ns_id == nsid) {
+ kref_get(&ns->kref);
+ ret = ns;
+ break;
+ }
if (ns->ns_id > nsid)
break;
}
- return NULL;
+ mutex_unlock(&ctrl->namespaces_mutex);
+ return ret;
}
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -1230,8 +1418,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk;
int node = dev_to_node(ctrl->dev);
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
return;
@@ -1272,7 +1458,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
if (nvme_revalidate_disk(ns->disk))
goto out_free_disk;
+ mutex_lock(&ctrl->namespaces_mutex);
list_add_tail(&ns->list, &ctrl->namespaces);
+ mutex_unlock(&ctrl->namespaces_mutex);
+
kref_get(&ctrl->kref);
if (ns->type == NVME_NS_LIGHTNVM)
return;
@@ -1307,9 +1496,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
}
+
mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
mutex_unlock(&ns->ctrl->namespaces_mutex);
+
nvme_put_ns(ns);
}
@@ -1317,10 +1508,11 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns;
- ns = nvme_find_ns(ctrl, nsid);
+ ns = nvme_find_get_ns(ctrl, nsid);
if (ns) {
if (revalidate_disk(ns->disk))
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
} else
nvme_alloc_ns(ctrl, nsid);
}
@@ -1349,9 +1541,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
nvme_validate_ns(ctrl, nsid);
while (++prev < nsid) {
- ns = nvme_find_ns(ctrl, prev);
- if (ns)
+ ns = nvme_find_get_ns(ctrl, prev);
+ if (ns) {
nvme_ns_remove(ns);
+ nvme_put_ns(ns);
+ }
}
}
nn -= j;
@@ -1361,13 +1555,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
return ret;
}
-static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
+static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns, *next;
unsigned i;
- lockdep_assert_held(&ctrl->namespaces_mutex);
-
for (i = 1; i <= nn; i++)
nvme_validate_ns(ctrl, i);
@@ -1377,38 +1569,118 @@ static void __nvme_scan_namespaces(struct nvme_ctrl *ctrl, unsigned nn)
}
}
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
+static void nvme_scan_work(struct work_struct *work)
{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, scan_work);
struct nvme_id_ctrl *id;
unsigned nn;
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
+
if (nvme_identify_ctrl(ctrl, &id))
return;
- mutex_lock(&ctrl->namespaces_mutex);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
if (!nvme_scan_ns_list(ctrl, nn))
goto done;
}
- __nvme_scan_namespaces(ctrl, le32_to_cpup(&id->nn));
+ nvme_scan_ns_sequential(ctrl, nn);
done:
+ mutex_lock(&ctrl->namespaces_mutex);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
mutex_unlock(&ctrl->namespaces_mutex);
kfree(id);
+
+ if (ctrl->ops->post_scan)
+ ctrl->ops->post_scan(ctrl);
+}
+
+void nvme_queue_scan(struct nvme_ctrl *ctrl)
+{
+ /*
+ * Do not queue new scan work when a controller is reset during
+ * removal.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ schedule_work(&ctrl->scan_work);
}
-EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
+EXPORT_SYMBOL_GPL(nvme_queue_scan);
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
+ /*
+ * The dead states indicates the controller was not gracefully
+ * disconnected. In that case, we won't be able to flush any data while
+ * removing the namespaces' disks; fail all the queues now to avoid
+ * potentially having to clean up the failed sync later.
+ */
+ if (ctrl->state == NVME_CTRL_DEAD)
+ nvme_kill_queues(ctrl);
+
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
+static void nvme_async_event_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, async_event_work);
+
+ spin_lock_irq(&ctrl->lock);
+ while (ctrl->event_limit > 0) {
+ int aer_idx = --ctrl->event_limit;
+
+ spin_unlock_irq(&ctrl->lock);
+ ctrl->ops->submit_async_event(ctrl, aer_idx);
+ spin_lock_irq(&ctrl->lock);
+ }
+ spin_unlock_irq(&ctrl->lock);
+}
+
+void nvme_complete_async_event(struct nvme_ctrl *ctrl,
+ struct nvme_completion *cqe)
+{
+ u16 status = le16_to_cpu(cqe->status) >> 1;
+ u32 result = le32_to_cpu(cqe->result);
+
+ if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
+ ++ctrl->event_limit;
+ schedule_work(&ctrl->async_event_work);
+ }
+
+ if (status != NVME_SC_SUCCESS)
+ return;
+
+ switch (result & 0xff07) {
+ case NVME_AER_NOTICE_NS_CHANGED:
+ dev_info(ctrl->device, "rescanning\n");
+ nvme_queue_scan(ctrl);
+ break;
+ default:
+ dev_warn(ctrl->device, "async event result %08x\n", result);
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_complete_async_event);
+
+void nvme_queue_async_events(struct nvme_ctrl *ctrl)
+{
+ ctrl->event_limit = NVME_NR_AERS;
+ schedule_work(&ctrl->async_event_work);
+}
+EXPORT_SYMBOL_GPL(nvme_queue_async_events);
+
static DEFINE_IDA(nvme_instance_ida);
static int nvme_set_instance(struct nvme_ctrl *ctrl)
@@ -1440,6 +1712,10 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl)
void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
{
+ flush_work(&ctrl->async_event_work);
+ flush_work(&ctrl->scan_work);
+ nvme_remove_namespaces(ctrl);
+
device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
spin_lock(&dev_list_lock);
@@ -1475,12 +1751,16 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
{
int ret;
+ ctrl->state = NVME_CTRL_NEW;
+ spin_lock_init(&ctrl->lock);
INIT_LIST_HEAD(&ctrl->namespaces);
mutex_init(&ctrl->namespaces_mutex);
kref_init(&ctrl->kref);
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
+ INIT_WORK(&ctrl->scan_work, nvme_scan_work);
+ INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
ret = nvme_set_instance(ctrl);
if (ret)
@@ -1522,9 +1802,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (!kref_get_unless_zero(&ns->kref))
- continue;
-
/*
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
@@ -1535,8 +1812,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
blk_set_queue_dying(ns->queue);
blk_mq_abort_requeue_list(ns->queue);
blk_mq_start_stopped_hw_queues(ns->queue, true);
-
- nvme_put_ns(ns);
}
mutex_unlock(&ctrl->namespaces_mutex);
}
@@ -1607,9 +1882,9 @@ int __init nvme_core_init(void)
void nvme_core_exit(void)
{
- unregister_blkdev(nvme_major, "nvme");
class_destroy(nvme_class);
__unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
+ unregister_blkdev(nvme_major, "nvme");
}
MODULE_LICENSE("GPL");
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 9461dd639..a0af05583 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -367,8 +367,8 @@ static int nvme_nvm_get_l2p_tbl(struct nvm_dev *nvmdev, u64 slba, u32 nlb,
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q,
(struct nvme_command *)&c, entries, len);
if (ret) {
- dev_err(ns->ctrl->dev, "L2P table transfer failed (%d)\n",
- ret);
+ dev_err(ns->ctrl->device,
+ "L2P table transfer failed (%d)\n", ret);
ret = -EIO;
goto out;
}
@@ -387,41 +387,16 @@ out:
return ret;
}
-static void nvme_nvm_bb_tbl_fold(struct nvm_dev *nvmdev,
- int nr_dst_blks, u8 *dst_blks,
- int nr_src_blks, u8 *src_blks)
-{
- int blk, offset, pl, blktype;
-
- for (blk = 0; blk < nr_dst_blks; blk++) {
- offset = blk * nvmdev->plane_mode;
- blktype = src_blks[offset];
-
- /* Bad blocks on any planes take precedence over other types */
- for (pl = 0; pl < nvmdev->plane_mode; pl++) {
- if (src_blks[offset + pl] &
- (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
- blktype = src_blks[offset + pl];
- break;
- }
- }
-
- dst_blks[blk] = blktype;
- }
-}
-
static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
- int nr_dst_blks, nvm_bb_update_fn *update_bbtbl,
- void *priv)
+ u8 *blks)
{
struct request_queue *q = nvmdev->q;
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_nvm_command c = {};
struct nvme_nvm_bb_tbl *bb_tbl;
- u8 *dst_blks = NULL;
- int nr_src_blks = nr_dst_blks * nvmdev->plane_mode;
- int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_src_blks;
+ int nr_blks = nvmdev->blks_per_lun * nvmdev->plane_mode;
+ int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blks;
int ret = 0;
c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl;
@@ -432,54 +407,43 @@ static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa,
if (!bb_tbl)
return -ENOMEM;
- dst_blks = kzalloc(nr_dst_blks, GFP_KERNEL);
- if (!dst_blks) {
- ret = -ENOMEM;
- goto out;
- }
-
ret = nvme_submit_sync_cmd(ctrl->admin_q, (struct nvme_command *)&c,
bb_tbl, tblsz);
if (ret) {
- dev_err(ctrl->dev, "get bad block table failed (%d)\n", ret);
+ dev_err(ctrl->device, "get bad block table failed (%d)\n", ret);
ret = -EIO;
goto out;
}
if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' ||
bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') {
- dev_err(ctrl->dev, "bbt format mismatch\n");
+ dev_err(ctrl->device, "bbt format mismatch\n");
ret = -EINVAL;
goto out;
}
if (le16_to_cpu(bb_tbl->verid) != 1) {
ret = -EINVAL;
- dev_err(ctrl->dev, "bbt version not supported\n");
+ dev_err(ctrl->device, "bbt version not supported\n");
goto out;
}
- if (le32_to_cpu(bb_tbl->tblks) != nr_src_blks) {
+ if (le32_to_cpu(bb_tbl->tblks) != nr_blks) {
ret = -EINVAL;
- dev_err(ctrl->dev, "bbt unsuspected blocks returned (%u!=%u)",
- le32_to_cpu(bb_tbl->tblks), nr_src_blks);
+ dev_err(ctrl->device,
+ "bbt unsuspected blocks returned (%u!=%u)",
+ le32_to_cpu(bb_tbl->tblks), nr_blks);
goto out;
}
- nvme_nvm_bb_tbl_fold(nvmdev, nr_dst_blks, dst_blks,
- nr_src_blks, bb_tbl->blk);
-
- ppa = dev_to_generic_addr(nvmdev, ppa);
- ret = update_bbtbl(ppa, nr_dst_blks, dst_blks, priv);
-
+ memcpy(blks, bb_tbl->blk, nvmdev->blks_per_lun * nvmdev->plane_mode);
out:
- kfree(dst_blks);
kfree(bb_tbl);
return ret;
}
-static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
- int type)
+static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
+ int nr_ppas, int type)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
struct nvme_nvm_command c = {};
@@ -487,14 +451,15 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct nvm_rq *rqd,
c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl;
c.set_bb.nsid = cpu_to_le32(ns->ns_id);
- c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa);
- c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1);
+ c.set_bb.spba = cpu_to_le64(ppas->ppa);
+ c.set_bb.nlb = cpu_to_le16(nr_ppas - 1);
c.set_bb.value = type;
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, (struct nvme_command *)&c,
NULL, 0);
if (ret)
- dev_err(ns->ctrl->dev, "set bad block table failed (%d)\n", ret);
+ dev_err(ns->ctrl->device, "set bad block table failed (%d)\n",
+ ret);
return ret;
}
@@ -504,8 +469,9 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd,
c->ph_rw.opcode = rqd->opcode;
c->ph_rw.nsid = cpu_to_le32(ns->ns_id);
c->ph_rw.spba = cpu_to_le64(rqd->ppa_addr.ppa);
+ c->ph_rw.metadata = cpu_to_le64(rqd->dma_meta_list);
c->ph_rw.control = cpu_to_le16(rqd->flags);
- c->ph_rw.length = cpu_to_le16(rqd->nr_pages - 1);
+ c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1);
if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD)
c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns,
@@ -576,7 +542,7 @@ static int nvme_nvm_erase_block(struct nvm_dev *dev, struct nvm_rq *rqd)
c.erase.opcode = NVM_OP_ERASE;
c.erase.nsid = cpu_to_le32(ns->ns_id);
c.erase.spba = cpu_to_le64(rqd->ppa_addr.ppa);
- c.erase.length = cpu_to_le16(rqd->nr_pages - 1);
+ c.erase.length = cpu_to_le16(rqd->nr_ppas - 1);
return nvme_submit_sync_cmd(q, (struct nvme_command *)&c, NULL, 0);
}
@@ -601,10 +567,10 @@ static void *nvme_nvm_dev_dma_alloc(struct nvm_dev *dev, void *pool,
return dma_pool_alloc(pool, mem_flags, dma_handler);
}
-static void nvme_nvm_dev_dma_free(void *pool, void *ppa_list,
+static void nvme_nvm_dev_dma_free(void *pool, void *addr,
dma_addr_t dma_handler)
{
- dma_pool_free(pool, ppa_list, dma_handler);
+ dma_pool_free(pool, addr, dma_handler);
}
static struct nvm_dev_ops nvme_nvm_dev_ops = {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f846da4eb..1daa0482d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -67,7 +67,17 @@ enum nvme_quirks {
NVME_QUIRK_DISCARD_ZEROES = (1 << 2),
};
+enum nvme_ctrl_state {
+ NVME_CTRL_NEW,
+ NVME_CTRL_LIVE,
+ NVME_CTRL_RESETTING,
+ NVME_CTRL_DELETING,
+ NVME_CTRL_DEAD,
+};
+
struct nvme_ctrl {
+ enum nvme_ctrl_state state;
+ spinlock_t lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
struct device *dev;
@@ -84,7 +94,7 @@ struct nvme_ctrl {
char serial[20];
char model[40];
char firmware_rev[8];
- int cntlid;
+ u16 cntlid;
u32 ctrl_config;
@@ -99,6 +109,8 @@ struct nvme_ctrl {
u32 vs;
bool subsystem;
unsigned long quirks;
+ struct work_struct scan_work;
+ struct work_struct async_event_work;
};
/*
@@ -136,9 +148,10 @@ struct nvme_ctrl_ops {
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
- bool (*io_incapable)(struct nvme_ctrl *ctrl);
int (*reset_ctrl)(struct nvme_ctrl *ctrl);
void (*free_ctrl)(struct nvme_ctrl *ctrl);
+ void (*post_scan)(struct nvme_ctrl *ctrl);
+ void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx);
};
static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
@@ -150,17 +163,6 @@ static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
return val & NVME_CSTS_RDY;
}
-static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
-{
- u32 val = 0;
-
- if (ctrl->ops->io_incapable(ctrl))
- return true;
- if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
- return true;
- return val & NVME_CSTS_CFS;
-}
-
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
if (!ctrl->subsystem)
@@ -173,57 +175,20 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-static inline void nvme_setup_flush(struct nvme_ns *ns,
- struct nvme_command *cmnd)
+static inline unsigned nvme_map_len(struct request *rq)
{
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->common.opcode = nvme_cmd_flush;
- cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+ if (rq->cmd_flags & REQ_DISCARD)
+ return sizeof(struct nvme_dsm_range);
+ else
+ return blk_rq_bytes(rq);
}
-static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
- struct nvme_command *cmnd)
+static inline void nvme_cleanup_cmd(struct request *req)
{
- u16 control = 0;
- u32 dsmgmt = 0;
-
- if (req->cmd_flags & REQ_FUA)
- control |= NVME_RW_FUA;
- if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
- control |= NVME_RW_LR;
-
- if (req->cmd_flags & REQ_RAHEAD)
- dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
-
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
- cmnd->rw.command_id = req->tag;
- cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
- cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
- cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-
- if (ns->ms) {
- switch (ns->pi_type) {
- case NVME_NS_DPS_PI_TYPE3:
- control |= NVME_RW_PRINFO_PRCHK_GUARD;
- break;
- case NVME_NS_DPS_PI_TYPE1:
- case NVME_NS_DPS_PI_TYPE2:
- control |= NVME_RW_PRINFO_PRCHK_GUARD |
- NVME_RW_PRINFO_PRCHK_REF;
- cmnd->rw.reftag = cpu_to_le32(
- nvme_block_nr(ns, blk_rq_pos(req)));
- break;
- }
- if (!blk_integrity_rq(req))
- control |= NVME_RW_PRINFO_PRACT;
- }
-
- cmnd->rw.control = cpu_to_le16(control);
- cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+ if (req->cmd_flags & REQ_DISCARD)
+ kfree(req->completion_data);
}
-
static inline int nvme_error_status(u16 status)
{
switch (status & 0x7ff) {
@@ -242,6 +207,8 @@ static inline bool nvme_req_needs_retry(struct request *req, u16 status)
(jiffies - req->start_time) < req->timeout;
}
+bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
+ enum nvme_ctrl_state new_state);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -251,9 +218,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_put_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_identify(struct nvme_ctrl *ctrl);
-void nvme_scan_namespaces(struct nvme_ctrl *ctrl);
+void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
+#define NVME_NR_AERS 1
+void nvme_complete_async_event(struct nvme_ctrl *ctrl,
+ struct nvme_completion *cqe);
+void nvme_queue_async_events(struct nvme_ctrl *ctrl);
+
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
@@ -261,6 +233,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl);
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, unsigned int flags);
void nvme_requeue_req(struct request *req);
+int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+ struct nvme_command *cmd);
int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buf, unsigned bufflen);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4fd733ff7..befac5b19 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -54,8 +54,7 @@
* We handle AEN commands ourselves and don't even let the
* block layer know about them.
*/
-#define NVME_NR_AEN_COMMANDS 1
-#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
+#define NVME_AQ_BLKMQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AERS)
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
@@ -92,9 +91,7 @@ struct nvme_dev {
struct msix_entry *entry;
void __iomem *bar;
struct work_struct reset_work;
- struct work_struct scan_work;
struct work_struct remove_work;
- struct work_struct async_work;
struct timer_list watchdog_timer;
struct mutex shutdown_lock;
bool subsystem;
@@ -102,11 +99,6 @@ struct nvme_dev {
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
- unsigned long flags;
-
-#define NVME_CTRL_RESETTING 0
-#define NVME_CTRL_REMOVING 1
-
struct nvme_ctrl ctrl;
struct completion ioq_wait;
};
@@ -271,40 +263,6 @@ static int nvme_init_request(void *data, struct request *req,
return 0;
}
-static void nvme_queue_scan(struct nvme_dev *dev)
-{
- /*
- * Do not queue new scan work when a controller is reset during
- * removal.
- */
- if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
- return;
- queue_work(nvme_workq, &dev->scan_work);
-}
-
-static void nvme_complete_async_event(struct nvme_dev *dev,
- struct nvme_completion *cqe)
-{
- u16 status = le16_to_cpu(cqe->status) >> 1;
- u32 result = le32_to_cpu(cqe->result);
-
- if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) {
- ++dev->ctrl.event_limit;
- queue_work(nvme_workq, &dev->async_work);
- }
-
- if (status != NVME_SC_SUCCESS)
- return;
-
- switch (result & 0xff07) {
- case NVME_AER_NOTICE_NS_CHANGED:
- dev_info(dev->ctrl.device, "rescanning\n");
- nvme_queue_scan(dev);
- default:
- dev_warn(dev->ctrl.device, "async event result %08x\n", result);
- }
-}
-
/**
* __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
* @nvmeq: The queue to use
@@ -334,16 +292,11 @@ static __le64 **iod_list(struct request *req)
return (__le64 **)(iod->sg + req->nr_phys_segments);
}
-static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, unsigned size,
+ struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = rq->nr_phys_segments;
- unsigned size;
-
- if (rq->cmd_flags & REQ_DISCARD)
- size = sizeof(struct nvme_dsm_range);
- else
- size = blk_rq_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -368,6 +321,8 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
__le64 **list = iod_list(req);
dma_addr_t prp_dma = iod->first_dma;
+ nvme_cleanup_cmd(req);
+
if (iod->npages == 0)
dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
for (i = 0; i < iod->npages; i++) {
@@ -529,7 +484,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
}
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
- struct nvme_command *cmnd)
+ unsigned size, struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct request_queue *q = req->q;
@@ -546,7 +501,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
goto out;
- if (!nvme_setup_prps(dev, req, blk_rq_bytes(req)))
+ if (!nvme_setup_prps(dev, req, size))
goto out_unmap;
ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -596,37 +551,6 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
}
/*
- * We reuse the small pool to allocate the 16-byte range here as it is not
- * worth having a special pool for these or additional cases to handle freeing
- * the iod.
- */
-static int nvme_setup_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
- struct request *req, struct nvme_command *cmnd)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_dsm_range *range;
-
- range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
- &iod->first_dma);
- if (!range)
- return BLK_MQ_RQ_QUEUE_BUSY;
- iod_list(req)[0] = (__le64 *)range;
- iod->npages = 0;
-
- range->cattr = cpu_to_le32(0);
- range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
- range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
-
- memset(cmnd, 0, sizeof(*cmnd));
- cmnd->dsm.opcode = nvme_cmd_dsm;
- cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
- cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
- cmnd->dsm.nr = 0;
- cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- return BLK_MQ_RQ_QUEUE_OK;
-}
-
-/*
* NOTE: ns is NULL when called on the admin queue.
*/
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -637,6 +561,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_command cmnd;
+ unsigned map_len;
int ret = BLK_MQ_RQ_QUEUE_OK;
/*
@@ -652,23 +577,17 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- ret = nvme_init_iod(req, dev);
+ map_len = nvme_map_len(req);
+ ret = nvme_init_iod(req, map_len, dev);
if (ret)
return ret;
- if (req->cmd_flags & REQ_DISCARD) {
- ret = nvme_setup_discard(nvmeq, ns, req, &cmnd);
- } else {
- if (req->cmd_type == REQ_TYPE_DRV_PRIV)
- memcpy(&cmnd, req->cmd, sizeof(cmnd));
- else if (req->cmd_flags & REQ_FLUSH)
- nvme_setup_flush(ns, &cmnd);
- else
- nvme_setup_rw(ns, req, &cmnd);
+ ret = nvme_setup_cmd(ns, req, &cmnd);
+ if (ret)
+ goto out;
- if (req->nr_phys_segments)
- ret = nvme_map_data(dev, req, &cmnd);
- }
+ if (req->nr_phys_segments)
+ ret = nvme_map_data(dev, req, map_len, &cmnd);
if (ret)
goto out;
@@ -764,7 +683,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
*/
if (unlikely(nvmeq->qid == 0 &&
cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
- nvme_complete_async_event(nvmeq->dev, &cqe);
+ nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe);
continue;
}
@@ -833,21 +752,18 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
return 0;
}
-static void nvme_async_event_work(struct work_struct *work)
+static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
{
- struct nvme_dev *dev = container_of(work, struct nvme_dev, async_work);
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq = dev->queues[0];
struct nvme_command c;
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
+ c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
spin_lock_irq(&nvmeq->q_lock);
- while (dev->ctrl.event_limit > 0) {
- c.common.command_id = NVME_AQ_BLKMQ_DEPTH +
- --dev->ctrl.event_limit;
- __nvme_submit_cmd(nvmeq, &c);
- }
+ __nvme_submit_cmd(nvmeq, &c);
spin_unlock_irq(&nvmeq->q_lock);
}
@@ -939,7 +855,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* cancellation error. All outstanding requests are completed on
* shutdown, so we return BLK_EH_HANDLED.
*/
- if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
+ if (dev->ctrl.state == NVME_CTRL_RESETTING) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
@@ -1003,16 +919,15 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
return BLK_EH_RESET_TIMER;
}
-static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
+static void nvme_cancel_io(struct request *req, void *data, bool reserved)
{
- struct nvme_queue *nvmeq = data;
int status;
if (!blk_mq_request_started(req))
return;
- dev_dbg_ratelimited(nvmeq->dev->ctrl.device,
- "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
+ dev_dbg_ratelimited(((struct nvme_dev *) data)->ctrl.device,
+ "Cancelling I/O %d", req->tag);
status = NVME_SC_ABORT_REQ;
if (blk_queue_dying(req->q))
@@ -1069,14 +984,6 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
return 0;
}
-static void nvme_clear_queue(struct nvme_queue *nvmeq)
-{
- spin_lock_irq(&nvmeq->q_lock);
- if (nvmeq->tags && *nvmeq->tags)
- blk_mq_all_tag_busy_iter(*nvmeq->tags, nvme_cancel_queue_ios, nvmeq);
- spin_unlock_irq(&nvmeq->q_lock);
-}
-
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
struct nvme_queue *nvmeq = dev->queues[0];
@@ -1350,22 +1257,44 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
return result;
}
+static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
+{
+
+ /* If true, indicates loss of adapter communication, possibly by a
+ * NVMe Subsystem reset.
+ */
+ bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
+
+ /* If there is a reset ongoing, we shouldn't reset again. */
+ if (work_busy(&dev->reset_work))
+ return false;
+
+ /* We shouldn't reset unless the controller is on fatal error state
+ * _or_ if we lost the communication with it.
+ */
+ if (!(csts & NVME_CSTS_CFS) && !nssro)
+ return false;
+
+ /* If PCI error recovery process is happening, we cannot reset or
+ * the recovery mechanism will surely fail.
+ */
+ if (pci_channel_offline(to_pci_dev(dev->dev)))
+ return false;
+
+ return true;
+}
+
static void nvme_watchdog_timer(unsigned long data)
{
struct nvme_dev *dev = (struct nvme_dev *)data;
u32 csts = readl(dev->bar + NVME_REG_CSTS);
- /*
- * Skip controllers currently under reset.
- */
- if (!work_pending(&dev->reset_work) && !work_busy(&dev->reset_work) &&
- ((csts & NVME_CSTS_CFS) ||
- (dev->subsystem && (csts & NVME_CSTS_NSSRO)))) {
- if (queue_work(nvme_workq, &dev->reset_work)) {
+ /* Skip controllers under certain specific conditions. */
+ if (nvme_should_reset(dev, csts)) {
+ if (queue_work(nvme_workq, &dev->reset_work))
dev_warn(dev->dev,
"Failed status: 0x%x, reset controller.\n",
csts);
- }
return;
}
@@ -1465,7 +1394,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, i, vecs, nr_io_queues, size;
- nr_io_queues = num_possible_cpus();
+ nr_io_queues = num_online_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
@@ -1551,8 +1480,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return result;
}
-static void nvme_set_irq_hints(struct nvme_dev *dev)
+static void nvme_pci_post_scan(struct nvme_ctrl *ctrl)
{
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
struct nvme_queue *nvmeq;
int i;
@@ -1567,16 +1497,6 @@ static void nvme_set_irq_hints(struct nvme_dev *dev)
}
}
-static void nvme_dev_scan(struct work_struct *work)
-{
- struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
-
- if (!dev->tagset.tags)
- return;
- nvme_scan_namespaces(&dev->ctrl);
- nvme_set_irq_hints(dev);
-}
-
static void nvme_del_queue_end(struct request *req, int error)
{
struct nvme_queue *nvmeq = req->end_io_data;
@@ -1592,7 +1512,13 @@ static void nvme_del_cq_end(struct request *req, int error)
if (!error) {
unsigned long flags;
- spin_lock_irqsave(&nvmeq->q_lock, flags);
+ /*
+ * We might be called with the AQ q_lock held
+ * and the I/O queue q_lock should always
+ * nest inside the AQ one.
+ */
+ spin_lock_irqsave_nested(&nvmeq->q_lock, flags,
+ SINGLE_DEPTH_NESTING);
nvme_process_cq(nvmeq);
spin_unlock_irqrestore(&nvmeq->q_lock, flags);
}
@@ -1625,12 +1551,12 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
static void nvme_disable_io_queues(struct nvme_dev *dev)
{
- int pass;
+ int pass, queues = dev->online_queues - 1;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
for (pass = 0; pass < 2; pass++) {
- int sent = 0, i = dev->queue_count - 1;
+ int sent = 0, i = queues;
reinit_completion(&dev->ioq_wait);
retry:
@@ -1684,7 +1610,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
nvme_free_queues(dev, dev->online_queues);
}
- nvme_queue_scan(dev);
return 0;
}
@@ -1754,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev)
static void nvme_dev_unmap(struct nvme_dev *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int bars;
+
if (dev->bar)
iounmap(dev->bar);
- pci_release_regions(to_pci_dev(dev->dev));
+
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ pci_release_selected_regions(pdev, bars);
}
static void nvme_pci_disable(struct nvme_dev *dev)
@@ -1797,8 +1727,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
}
nvme_pci_disable(dev);
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_clear_queue(dev->queues[i]);
+ blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_io, dev);
+ blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_io, dev);
mutex_unlock(&dev->shutdown_lock);
}
@@ -1854,7 +1784,7 @@ static void nvme_reset_work(struct work_struct *work)
struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
int result = -ENODEV;
- if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
+ if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
goto out;
/*
@@ -1864,11 +1794,9 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
- if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
goto out;
- set_bit(NVME_CTRL_RESETTING, &dev->flags);
-
result = nvme_pci_enable(dev);
if (result)
goto out;
@@ -1890,8 +1818,14 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out;
- dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
- queue_work(nvme_workq, &dev->async_work);
+ /*
+ * A controller that can not execute IO typically requires user
+ * intervention to correct. For such degraded controllers, the driver
+ * should not submit commands the user did not request, so skip
+ * registering for asynchronous event notification on this condition.
+ */
+ if (dev->online_queues > 1)
+ nvme_queue_async_events(&dev->ctrl);
mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + HZ));
@@ -1901,13 +1835,20 @@ static void nvme_reset_work(struct work_struct *work)
*/
if (dev->online_queues < 2) {
dev_warn(dev->ctrl.device, "IO queues not created\n");
+ nvme_kill_queues(&dev->ctrl);
nvme_remove_namespaces(&dev->ctrl);
} else {
nvme_start_queues(&dev->ctrl);
nvme_dev_add(dev);
}
- clear_bit(NVME_CTRL_RESETTING, &dev->flags);
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
+ dev_warn(dev->ctrl.device, "failed to mark controller live\n");
+ goto out;
+ }
+
+ if (dev->online_queues > 1)
+ nvme_queue_scan(&dev->ctrl);
return;
out:
@@ -1921,7 +1862,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
nvme_kill_queues(&dev->ctrl);
if (pci_get_drvdata(pdev))
- pci_stop_and_remove_bus_device_locked(pdev);
+ device_release_driver(&pdev->dev);
nvme_put_ctrl(&dev->ctrl);
}
@@ -1955,13 +1896,6 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
return 0;
}
-static bool nvme_pci_io_incapable(struct nvme_ctrl *ctrl)
-{
- struct nvme_dev *dev = to_nvme_dev(ctrl);
-
- return !dev->bar || dev->online_queues < 2;
-}
-
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
return nvme_reset(to_nvme_dev(ctrl));
@@ -1972,9 +1906,10 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
- .io_incapable = nvme_pci_io_incapable,
.reset_ctrl = nvme_pci_reset_ctrl,
.free_ctrl = nvme_pci_free_ctrl,
+ .post_scan = nvme_pci_post_scan,
+ .submit_async_event = nvme_pci_submit_async_event,
};
static int nvme_dev_map(struct nvme_dev *dev)
@@ -1994,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
return 0;
release:
- pci_release_regions(pdev);
+ pci_release_selected_regions(pdev, bars);
return -ENODEV;
}
@@ -2026,10 +1961,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto free;
- INIT_WORK(&dev->scan_work, nvme_dev_scan);
INIT_WORK(&dev->reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
- INIT_WORK(&dev->async_work, nvme_async_event_work);
setup_timer(&dev->watchdog_timer, nvme_watchdog_timer,
(unsigned long)dev);
mutex_init(&dev->shutdown_lock);
@@ -2086,15 +2019,16 @@ static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- set_bit(NVME_CTRL_REMOVING, &dev->flags);
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
+
pci_set_drvdata(pdev, NULL);
- flush_work(&dev->async_work);
+
+ if (!pci_device_is_present(pdev))
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+
flush_work(&dev->reset_work);
- flush_work(&dev->scan_work);
- nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
nvme_dev_disable(dev, true);
- flush_work(&dev->reset_work);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
nvme_release_cmb(dev);
@@ -2135,14 +2069,17 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
* shutdown the controller to quiesce. The controller will be restarted
* after the slot reset through driver's slot_reset callback.
*/
- dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ dev_warn(dev->ctrl.device,
+ "frozen state error detected, reset controller\n");
nvme_dev_disable(dev, false);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
+ dev_warn(dev->ctrl.device,
+ "failure state error detected, request disconnect\n");
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -2177,6 +2114,12 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0953),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DISCARD_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a53),
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DISCARD_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a54),
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index ca52952d8..3041d48e7 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -1,6 +1,5 @@
menuconfig NVMEM
tristate "NVMEM Support"
- select REGMAP
help
Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
@@ -28,6 +27,7 @@ config NVMEM_IMX_OCOTP
config NVMEM_LPC18XX_EEPROM
tristate "NXP LPC18XX EEPROM Memory Support"
depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Say Y here to include support for NXP LPC18xx EEPROM memory found in
NXP LPC185x/3x and LPC435x/3x/2x/1x devices.
@@ -49,6 +49,7 @@ config NVMEM_MXS_OCOTP
config MTK_EFUSE
tristate "Mediatek SoCs EFUSE support"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_IOMEM
select REGMAP_MMIO
help
This is a driver to access hardware related data like sensor
@@ -61,7 +62,6 @@ config QCOM_QFPROM
tristate "QCOM QFPROM Support"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
- select REGMAP_MMIO
help
Say y here to enable QFPROM support. The QFPROM provides access
functions for QFPROM data to rest of the drivers via nvmem interface.
@@ -83,7 +83,6 @@ config ROCKCHIP_EFUSE
config NVMEM_SUNXI_SID
tristate "Allwinner SoCs SID support"
depends on ARCH_SUNXI
- select REGMAP_MMIO
help
This is a driver for the 'security ID' available on various Allwinner
devices.
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 0de3d878c..965911d9b 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -23,12 +23,10 @@
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
struct nvmem_device {
const char *name;
- struct regmap *regmap;
struct module *owner;
struct device dev;
int stride;
@@ -41,6 +39,9 @@ struct nvmem_device {
int flags;
struct bin_attribute eeprom;
struct device *base_dev;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ void *priv;
};
#define FLAG_COMPAT BIT(0)
@@ -66,6 +67,23 @@ static struct lock_class_key eeprom_lock_key;
#endif
#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (nvmem->reg_read)
+ return nvmem->reg_read(nvmem->priv, offset, val, bytes);
+
+ return -EINVAL;
+}
+
+static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (nvmem->reg_write)
+ return nvmem->reg_write(nvmem->priv, offset, val, bytes);
+
+ return -EINVAL;
+}
static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
@@ -93,9 +111,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
count = round_down(count, nvmem->word_size);
- rc = regmap_raw_read(nvmem->regmap, pos, buf, count);
+ rc = nvmem_reg_read(nvmem, pos, buf, count);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return count;
@@ -127,9 +145,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
count = round_down(count, nvmem->word_size);
- rc = regmap_raw_write(nvmem->regmap, pos, buf, count);
+ rc = nvmem_reg_write(nvmem, pos, buf, count);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return count;
@@ -348,7 +366,7 @@ static int nvmem_add_cells(struct nvmem_device *nvmem,
}
rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
- if (IS_ERR_VALUE(rval)) {
+ if (rval) {
kfree(cells[i]);
goto err;
}
@@ -421,18 +439,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
{
struct nvmem_device *nvmem;
struct device_node *np;
- struct regmap *rm;
int rval;
if (!config->dev)
return ERR_PTR(-EINVAL);
- rm = dev_get_regmap(config->dev, NULL);
- if (!rm) {
- dev_err(config->dev, "Regmap not found\n");
- return ERR_PTR(-EINVAL);
- }
-
nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
if (!nvmem)
return ERR_PTR(-ENOMEM);
@@ -444,14 +455,16 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
}
nvmem->id = rval;
- nvmem->regmap = rm;
nvmem->owner = config->owner;
- nvmem->stride = regmap_get_reg_stride(rm);
- nvmem->word_size = regmap_get_val_bytes(rm);
- nvmem->size = regmap_get_max_register(rm) + nvmem->stride;
+ nvmem->stride = config->stride;
+ nvmem->word_size = config->word_size;
+ nvmem->size = config->size;
nvmem->dev.type = &nvmem_provider_type;
nvmem->dev.bus = &nvmem_bus_type;
nvmem->dev.parent = config->dev;
+ nvmem->priv = config->priv;
+ nvmem->reg_read = config->reg_read;
+ nvmem->reg_write = config->reg_write;
np = config->dev->of_node;
nvmem->dev.of_node = np;
dev_set_name(&nvmem->dev, "%s%d",
@@ -948,9 +961,9 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
{
int rc;
- rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes);
+ rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
/* shift bits in-place */
@@ -977,7 +990,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
u8 *buf;
int rc;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return ERR_PTR(-EINVAL);
buf = kzalloc(cell->bytes, GFP_KERNEL);
@@ -985,7 +998,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
return ERR_PTR(-ENOMEM);
rc = __nvmem_cell_read(nvmem, cell, buf, len);
- if (IS_ERR_VALUE(rc)) {
+ if (rc) {
kfree(buf);
return ERR_PTR(rc);
}
@@ -1014,7 +1027,7 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
*b <<= bit_offset;
/* setup the first byte with lsb bits from nvmem */
- rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1);
+ rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
*b++ |= GENMASK(bit_offset - 1, 0) & v;
/* setup rest of the byte if any */
@@ -1031,7 +1044,7 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
/* if it's not end on byte boundary */
if ((nbits + bit_offset) % BITS_PER_BYTE) {
/* setup the last byte with msb bits from nvmem */
- rc = regmap_raw_read(nvmem->regmap,
+ rc = nvmem_reg_read(nvmem,
cell->offset + cell->bytes - 1, &v, 1);
*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
@@ -1054,7 +1067,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
struct nvmem_device *nvmem = cell->nvmem;
int rc;
- if (!nvmem || !nvmem->regmap || nvmem->read_only ||
+ if (!nvmem || nvmem->read_only ||
(cell->bit_offset == 0 && len != cell->bytes))
return -EINVAL;
@@ -1064,13 +1077,13 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
return PTR_ERR(buf);
}
- rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
+ rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
/* free the tmp buffer */
if (cell->bit_offset || cell->nbits)
kfree(buf);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return len;
@@ -1094,15 +1107,15 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
int rc;
ssize_t len;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return len;
@@ -1124,11 +1137,11 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell cell;
int rc;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return nvmem_cell_write(&cell, buf, cell.bytes);
@@ -1152,12 +1165,12 @@ int nvmem_device_read(struct nvmem_device *nvmem,
{
int rc;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return -EINVAL;
- rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes);
+ rc = nvmem_reg_read(nvmem, offset, buf, bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return bytes;
@@ -1180,12 +1193,12 @@ int nvmem_device_write(struct nvmem_device *nvmem,
{
int rc;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return -EINVAL;
- rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes);
+ rc = nvmem_reg_write(nvmem, offset, buf, bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index d7796eb54..75e66ef5b 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -22,7 +22,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
struct ocotp_priv {
@@ -31,59 +30,34 @@ struct ocotp_priv {
unsigned int nregs;
};
-static int imx_ocotp_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int imx_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct ocotp_priv *priv = context;
- unsigned int offset = *(u32 *)reg;
unsigned int count;
+ u32 *buf = val;
int i;
u32 index;
index = offset >> 2;
- count = val_size >> 2;
+ count = bytes >> 2;
if (count > (priv->nregs - index))
count = priv->nregs - index;
- for (i = index; i < (index + count); i++) {
- *(u32 *)val = readl(priv->base + 0x400 + i * 0x10);
- val += 4;
- }
+ for (i = index; i < (index + count); i++)
+ *buf++ = readl(priv->base + 0x400 + i * 0x10);
return 0;
}
-static int imx_ocotp_write(void *context, const void *data, size_t count)
-{
- /* Not implemented */
- return 0;
-}
-
-static struct regmap_bus imx_ocotp_bus = {
- .read = imx_ocotp_read,
- .write = imx_ocotp_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static bool imx_ocotp_writeable_reg(struct device *dev, unsigned int reg)
-{
- return false;
-}
-
-static struct regmap_config imx_ocotp_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .writeable_reg = imx_ocotp_writeable_reg,
- .name = "imx-ocotp",
-};
-
static struct nvmem_config imx_ocotp_nvmem_config = {
.name = "imx-ocotp",
.read_only = true,
+ .word_size = 4,
+ .stride = 4,
.owner = THIS_MODULE,
+ .reg_read = imx_ocotp_read,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
@@ -99,7 +73,6 @@ static int imx_ocotp_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct resource *res;
- struct regmap *regmap;
struct ocotp_priv *priv;
struct nvmem_device *nvmem;
@@ -114,15 +87,9 @@ static int imx_ocotp_probe(struct platform_device *pdev)
of_id = of_match_device(imx_ocotp_dt_ids, dev);
priv->nregs = (unsigned int)of_id->data;
- imx_ocotp_regmap_config.max_register = 4 * priv->nregs - 4;
-
- regmap = devm_regmap_init(dev, &imx_ocotp_bus, priv,
- &imx_ocotp_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
+ imx_ocotp_nvmem_config.size = 4 * priv->nregs;
imx_ocotp_nvmem_config.dev = dev;
+ imx_ocotp_nvmem_config.priv = priv;
nvmem = nvmem_register(&imx_ocotp_nvmem_config);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index 878fce789..c81ae4c6d 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/reset.h>
/* Registers */
@@ -51,12 +50,7 @@ struct lpc18xx_eeprom_dev {
struct nvmem_device *nvmem;
unsigned reg_bytes;
unsigned val_bytes;
-};
-
-static struct regmap_config lpc18xx_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
+ int size;
};
static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom,
@@ -95,30 +89,35 @@ static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom)
return -ETIMEDOUT;
}
-static int lpc18xx_eeprom_gather_write(void *context, const void *reg,
- size_t reg_size, const void *val,
- size_t val_size)
+static int lpc18xx_eeprom_gather_write(void *context, unsigned int reg,
+ void *val, size_t bytes)
{
struct lpc18xx_eeprom_dev *eeprom = context;
- unsigned int offset = *(u32 *)reg;
+ unsigned int offset = reg;
int ret;
- if (offset % lpc18xx_regmap_config.reg_stride)
+ /*
+ * The last page contains the EEPROM initialization data and is not
+ * writable.
+ */
+ if ((reg > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE) ||
+ (reg + bytes > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE))
return -EINVAL;
+
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_NO);
/* Wait 100 us while the EEPROM wakes up */
usleep_range(100, 200);
- while (val_size) {
+ while (bytes) {
writel(*(u32 *)val, eeprom->mem_base + offset);
ret = lpc18xx_eeprom_busywait_until_prog(eeprom);
if (ret < 0)
return ret;
- val_size -= eeprom->val_bytes;
+ bytes -= eeprom->val_bytes;
val += eeprom->val_bytes;
offset += eeprom->val_bytes;
}
@@ -129,23 +128,10 @@ static int lpc18xx_eeprom_gather_write(void *context, const void *reg,
return 0;
}
-static int lpc18xx_eeprom_write(void *context, const void *data, size_t count)
+static int lpc18xx_eeprom_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct lpc18xx_eeprom_dev *eeprom = context;
- unsigned int offset = eeprom->reg_bytes;
-
- if (count <= offset)
- return -EINVAL;
-
- return lpc18xx_eeprom_gather_write(context, data, eeprom->reg_bytes,
- data + offset, count - offset);
-}
-
-static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct lpc18xx_eeprom_dev *eeprom = context;
- unsigned int offset = *(u32 *)reg;
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_NO);
@@ -153,9 +139,9 @@ static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
/* Wait 100 us while the EEPROM wakes up */
usleep_range(100, 200);
- while (val_size) {
+ while (bytes) {
*(u32 *)val = readl(eeprom->mem_base + offset);
- val_size -= eeprom->val_bytes;
+ bytes -= eeprom->val_bytes;
val += eeprom->val_bytes;
offset += eeprom->val_bytes;
}
@@ -166,31 +152,13 @@ static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
return 0;
}
-static struct regmap_bus lpc18xx_eeprom_bus = {
- .write = lpc18xx_eeprom_write,
- .gather_write = lpc18xx_eeprom_gather_write,
- .read = lpc18xx_eeprom_read,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static bool lpc18xx_eeprom_writeable_reg(struct device *dev, unsigned int reg)
-{
- /*
- * The last page contains the EEPROM initialization data and is not
- * writable.
- */
- return reg <= lpc18xx_regmap_config.max_register -
- LPC18XX_EEPROM_PAGE_SIZE;
-}
-
-static bool lpc18xx_eeprom_readable_reg(struct device *dev, unsigned int reg)
-{
- return reg <= lpc18xx_regmap_config.max_register;
-}
static struct nvmem_config lpc18xx_nvmem_config = {
.name = "lpc18xx-eeprom",
+ .stride = 4,
+ .word_size = 4,
+ .reg_read = lpc18xx_eeprom_read,
+ .reg_write = lpc18xx_eeprom_gather_write,
.owner = THIS_MODULE,
};
@@ -200,7 +168,6 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct reset_control *rst;
unsigned long clk_rate;
- struct regmap *regmap;
struct resource *res;
int ret;
@@ -243,8 +210,8 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
goto err_clk;
}
- eeprom->val_bytes = lpc18xx_regmap_config.val_bits / BITS_PER_BYTE;
- eeprom->reg_bytes = lpc18xx_regmap_config.reg_bits / BITS_PER_BYTE;
+ eeprom->val_bytes = 4;
+ eeprom->reg_bytes = 4;
/*
* Clock rate is generated by dividing the system bus clock by the
@@ -264,19 +231,10 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_YES);
- lpc18xx_regmap_config.max_register = resource_size(res) - 1;
- lpc18xx_regmap_config.writeable_reg = lpc18xx_eeprom_writeable_reg;
- lpc18xx_regmap_config.readable_reg = lpc18xx_eeprom_readable_reg;
-
- regmap = devm_regmap_init(dev, &lpc18xx_eeprom_bus, eeprom,
- &lpc18xx_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed: %ld\n", PTR_ERR(regmap));
- ret = PTR_ERR(regmap);
- goto err_clk;
- }
-
+ eeprom->size = resource_size(res);
+ lpc18xx_nvmem_config.size = resource_size(res);
lpc18xx_nvmem_config.dev = dev;
+ lpc18xx_nvmem_config.priv = eeprom;
eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config);
if (IS_ERR(eeprom->nvmem)) {
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 3829e5fbf..b5305f08b 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -13,21 +13,35 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-static struct regmap_config qfprom_regmap_config = {
- .reg_bits = 32,
- .val_bits = 8,
- .reg_stride = 1,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
-};
+static int qfprom_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
-static struct nvmem_config econfig = {
- .name = "qfprom",
- .owner = THIS_MODULE,
-};
+ while (words--)
+ *val++ = readl(base + reg + (i++ * 4));
+
+ return 0;
+}
+
+static int qfprom_reg_write(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
+
+ while (words--)
+ writel(*val++, base + reg + (i++ * 4));
+
+ return 0;
+}
static int qfprom_remove(struct platform_device *pdev)
{
@@ -36,12 +50,20 @@ static int qfprom_remove(struct platform_device *pdev)
return nvmem_unregister(nvmem);
}
+static struct nvmem_config econfig = {
+ .name = "qfprom",
+ .owner = THIS_MODULE,
+ .stride = 4,
+ .word_size = 1,
+ .reg_read = qfprom_reg_read,
+ .reg_write = qfprom_reg_write,
+};
+
static int qfprom_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- struct regmap *regmap;
void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -49,14 +71,10 @@ static int qfprom_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- qfprom_regmap_config.max_register = resource_size(res) - 1;
-
- regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
+ econfig.size = resource_size(res);
econfig.dev = dev;
+ econfig.priv = base;
+
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index a00979511..4d3f391f0 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#define EFUSE_A_SHIFT 6
#define EFUSE_A_MASK 0x3ff
@@ -41,17 +40,9 @@ struct rockchip_efuse_chip {
struct clk *clk;
};
-static int rockchip_efuse_write(void *context, const void *data, size_t count)
+static int rockchip_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
- /* Nothing TBD, Read-Only */
- return 0;
-}
-
-static int rockchip_efuse_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- unsigned int offset = *(u32 *)reg;
struct rockchip_efuse_chip *efuse = context;
u8 *buf = val;
int ret;
@@ -64,12 +55,12 @@ static int rockchip_efuse_read(void *context,
writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL);
udelay(1);
- while (val_size) {
+ while (bytes--) {
writel(readl(efuse->base + REG_EFUSE_CTRL) &
(~(EFUSE_A_MASK << EFUSE_A_SHIFT)),
efuse->base + REG_EFUSE_CTRL);
writel(readl(efuse->base + REG_EFUSE_CTRL) |
- ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT),
+ ((offset++ & EFUSE_A_MASK) << EFUSE_A_SHIFT),
efuse->base + REG_EFUSE_CTRL);
udelay(1);
writel(readl(efuse->base + REG_EFUSE_CTRL) |
@@ -79,9 +70,6 @@ static int rockchip_efuse_read(void *context,
writel(readl(efuse->base + REG_EFUSE_CTRL) &
(~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL);
udelay(1);
-
- val_size -= 1;
- offset += 1;
}
/* Switch to standby mode */
@@ -92,22 +80,11 @@ static int rockchip_efuse_read(void *context,
return 0;
}
-static struct regmap_bus rockchip_efuse_bus = {
- .read = rockchip_efuse_read,
- .write = rockchip_efuse_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static struct regmap_config rockchip_efuse_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 1,
- .val_bits = 8,
-};
-
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
.owner = THIS_MODULE,
+ .stride = 1,
+ .word_size = 1,
.read_only = true,
};
@@ -121,7 +98,6 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
{
struct resource *res;
struct nvmem_device *nvmem;
- struct regmap *regmap;
struct rockchip_efuse_chip *efuse;
efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip),
@@ -139,16 +115,9 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
return PTR_ERR(efuse->clk);
efuse->dev = &pdev->dev;
-
- rockchip_efuse_regmap_config.max_register = resource_size(res) - 1;
-
- regmap = devm_regmap_init(efuse->dev, &rockchip_efuse_bus,
- efuse, &rockchip_efuse_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(efuse->dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
+ econfig.size = resource_size(res);
+ econfig.reg_read = rockchip_efuse_read;
+ econfig.priv = efuse;
econfig.dev = efuse->dev;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index bc88b4084..1567ccca8 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -21,13 +21,14 @@
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/random.h>
static struct nvmem_config econfig = {
.name = "sunxi-sid",
.read_only = true,
+ .stride = 4,
+ .word_size = 1,
.owner = THIS_MODULE,
};
@@ -51,54 +52,23 @@ static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
return sid_key; /* Only return the last byte */
}
-static int sunxi_sid_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int sunxi_sid_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
- unsigned int offset = *(u32 *)reg;
u8 *buf = val;
- while (val_size) {
- *buf++ = sunxi_sid_read_byte(sid, offset);
- val_size--;
- offset++;
- }
-
- return 0;
-}
+ while (bytes--)
+ *buf++ = sunxi_sid_read_byte(sid, offset++);
-static int sunxi_sid_write(void *context, const void *data, size_t count)
-{
- /* Unimplemented, dummy to keep regmap core happy */
return 0;
}
-static struct regmap_bus sunxi_sid_bus = {
- .read = sunxi_sid_read,
- .write = sunxi_sid_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg)
-{
- return false;
-}
-
-static struct regmap_config sunxi_sid_regmap_config = {
- .reg_bits = 32,
- .val_bits = 8,
- .reg_stride = 1,
- .writeable_reg = sunxi_sid_writeable_reg,
-};
-
static int sunxi_sid_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- struct regmap *regmap;
struct sunxi_sid *sid;
int ret, i, size;
char *randomness;
@@ -113,16 +83,10 @@ static int sunxi_sid_probe(struct platform_device *pdev)
return PTR_ERR(sid->base);
size = resource_size(res) - 1;
- sunxi_sid_regmap_config.max_register = size;
-
- regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid,
- &sunxi_sid_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
+ econfig.size = resource_size(res);
econfig.dev = dev;
+ econfig.reg_read = sunxi_sid_read;
+ econfig.priv = sid;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c
index 8641319ef..72e4faabc 100644
--- a/drivers/nvmem/vf610-ocotp.c
+++ b/drivers/nvmem/vf610-ocotp.c
@@ -25,7 +25,6 @@
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
/* OCOTP Register Offsets */
@@ -152,23 +151,16 @@ static int vf610_get_fuse_address(int base_addr_offset)
return -EINVAL;
}
-static int vf610_ocotp_write(void *context, const void *data, size_t count)
-{
- return 0;
-}
-
-static int vf610_ocotp_read(void *context,
- const void *off, size_t reg_size,
- void *val, size_t val_size)
+static int vf610_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct vf610_ocotp *ocotp = context;
void __iomem *base = ocotp->base;
- unsigned int offset = *(u32 *)off;
u32 reg, *buf = val;
int fuse_addr;
int ret;
- while (val_size > 0) {
+ while (bytes > 0) {
fuse_addr = vf610_get_fuse_address(offset);
if (fuse_addr > 0) {
writel(ocotp->timing, base + OCOTP_TIMING);
@@ -205,29 +197,19 @@ static int vf610_ocotp_read(void *context,
}
buf++;
- val_size--;
- offset += reg_size;
+ bytes -= 4;
+ offset += 4;
}
return 0;
}
-static struct regmap_bus vf610_ocotp_bus = {
- .read = vf610_ocotp_read,
- .write = vf610_ocotp_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static struct regmap_config ocotp_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static struct nvmem_config ocotp_config = {
.name = "ocotp",
.owner = THIS_MODULE,
+ .stride = 4,
+ .word_size = 4,
+ .reg_read = vf610_ocotp_read,
};
static const struct of_device_id ocotp_of_match[] = {
@@ -247,7 +229,6 @@ static int vf610_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
- struct regmap *regmap;
struct vf610_ocotp *ocotp_dev;
ocotp_dev = devm_kzalloc(&pdev->dev,
@@ -267,13 +248,8 @@ static int vf610_ocotp_probe(struct platform_device *pdev)
return PTR_ERR(ocotp_dev->clk);
}
- ocotp_regmap_config.max_register = resource_size(res);
- regmap = devm_regmap_init(dev,
- &vf610_ocotp_bus, ocotp_dev, &ocotp_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
+ ocotp_config.size = resource_size(res);
+ ocotp_config.priv = ocotp_dev;
ocotp_config.dev = dev;
ocotp_dev->nvmem = nvmem_register(&ocotp_config);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index e2a48415d..b3bec3aaa 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -112,4 +112,7 @@ config OF_OVERLAY
While this option is selected automatically when needed, you can
enable it manually to improve device tree unit test coverage.
+config OF_NUMA
+ bool
+
endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 156c072b3..d7efd9d45 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -10,9 +10,9 @@ obj-$(CONFIG_OF_UNITTEST) += unittest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
-obj-$(CONFIG_OF_MTD) += of_mtd.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
obj-$(CONFIG_OF_OVERLAY) += overlay.o
+obj-$(CONFIG_OF_NUMA) += of_numa.o
obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 91a469d55..0a553c084 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -4,6 +4,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -673,121 +674,6 @@ const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
}
EXPORT_SYMBOL(of_get_address);
-#ifdef PCI_IOBASE
-struct io_range {
- struct list_head list;
- phys_addr_t start;
- resource_size_t size;
-};
-
-static LIST_HEAD(io_range_list);
-static DEFINE_SPINLOCK(io_range_lock);
-#endif
-
-/*
- * Record the PCI IO range (expressed as CPU physical address + size).
- * Return a negative value if an error has occured, zero otherwise
- */
-int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
-{
- int err = 0;
-
-#ifdef PCI_IOBASE
- struct io_range *range;
- resource_size_t allocated_size = 0;
-
- /* check if the range hasn't been previously recorded */
- spin_lock(&io_range_lock);
- list_for_each_entry(range, &io_range_list, list) {
- if (addr >= range->start && addr + size <= range->start + size) {
- /* range already registered, bail out */
- goto end_register;
- }
- allocated_size += range->size;
- }
-
- /* range not registed yet, check for available space */
- if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
- /* if it's too big check if 64K space can be reserved */
- if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
- err = -E2BIG;
- goto end_register;
- }
-
- size = SZ_64K;
- pr_warn("Requested IO range too big, new size set to 64K\n");
- }
-
- /* add the range to the list */
- range = kzalloc(sizeof(*range), GFP_ATOMIC);
- if (!range) {
- err = -ENOMEM;
- goto end_register;
- }
-
- range->start = addr;
- range->size = size;
-
- list_add_tail(&range->list, &io_range_list);
-
-end_register:
- spin_unlock(&io_range_lock);
-#endif
-
- return err;
-}
-
-phys_addr_t pci_pio_to_address(unsigned long pio)
-{
- phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
-
-#ifdef PCI_IOBASE
- struct io_range *range;
- resource_size_t allocated_size = 0;
-
- if (pio > IO_SPACE_LIMIT)
- return address;
-
- spin_lock(&io_range_lock);
- list_for_each_entry(range, &io_range_list, list) {
- if (pio >= allocated_size && pio < allocated_size + range->size) {
- address = range->start + pio - allocated_size;
- break;
- }
- allocated_size += range->size;
- }
- spin_unlock(&io_range_lock);
-#endif
-
- return address;
-}
-
-unsigned long __weak pci_address_to_pio(phys_addr_t address)
-{
-#ifdef PCI_IOBASE
- struct io_range *res;
- resource_size_t offset = 0;
- unsigned long addr = -1;
-
- spin_lock(&io_range_lock);
- list_for_each_entry(res, &io_range_list, list) {
- if (address >= res->start && address < res->start + res->size) {
- addr = address - res->start + offset;
- break;
- }
- offset += res->size;
- }
- spin_unlock(&io_range_lock);
-
- return addr;
-#else
- if (address > IO_SPACE_LIMIT)
- return (unsigned long)-1;
-
- return (unsigned long) address;
-#endif
-}
-
static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags,
const char *name, struct resource *r)
diff --git a/drivers/of/base.c b/drivers/of/base.c
index b299de2b3..ebf84e3b5 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -394,7 +394,8 @@ bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
* before booting secondary cores. This function uses arch_match_cpu_phys_id
* which can be overridden by architecture specific implementation.
*
- * Returns a node pointer for the logical cpu if found, else NULL.
+ * Returns a node pointer for the logical cpu with refcount incremented, use
+ * of_node_put() on it when done. Returns NULL if not found.
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
@@ -1440,106 +1441,155 @@ void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
printk("\n");
}
-static int __of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name,
- const char *cells_name,
- int cell_count, int index,
- struct of_phandle_args *out_args)
+int of_phandle_iterator_init(struct of_phandle_iterator *it,
+ const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count)
{
- const __be32 *list, *list_end;
- int rc = 0, size, cur_index = 0;
- uint32_t count = 0;
- struct device_node *node = NULL;
- phandle phandle;
+ const __be32 *list;
+ int size;
+
+ memset(it, 0, sizeof(*it));
- /* Retrieve the phandle list property */
list = of_get_property(np, list_name, &size);
if (!list)
return -ENOENT;
- list_end = list + size / sizeof(*list);
- /* Loop over the phandles until all the requested entry is found */
- while (list < list_end) {
- rc = -EINVAL;
- count = 0;
+ it->cells_name = cells_name;
+ it->cell_count = cell_count;
+ it->parent = np;
+ it->list_end = list + size / sizeof(*list);
+ it->phandle_end = list;
+ it->cur = list;
+
+ return 0;
+}
+
+int of_phandle_iterator_next(struct of_phandle_iterator *it)
+{
+ uint32_t count = 0;
+
+ if (it->node) {
+ of_node_put(it->node);
+ it->node = NULL;
+ }
+
+ if (!it->cur || it->phandle_end >= it->list_end)
+ return -ENOENT;
+
+ it->cur = it->phandle_end;
+
+ /* If phandle is 0, then it is an empty entry with no arguments. */
+ it->phandle = be32_to_cpup(it->cur++);
+
+ if (it->phandle) {
/*
- * If phandle is 0, then it is an empty entry with no
- * arguments. Skip forward to the next entry.
+ * Find the provider node and parse the #*-cells property to
+ * determine the argument length.
*/
- phandle = be32_to_cpup(list++);
- if (phandle) {
- /*
- * Find the provider node and parse the #*-cells
- * property to determine the argument length.
- *
- * This is not needed if the cell count is hard-coded
- * (i.e. cells_name not set, but cell_count is set),
- * except when we're going to return the found node
- * below.
- */
- if (cells_name || cur_index == index) {
- node = of_find_node_by_phandle(phandle);
- if (!node) {
- pr_err("%s: could not find phandle\n",
- np->full_name);
- goto err;
- }
- }
+ it->node = of_find_node_by_phandle(it->phandle);
- if (cells_name) {
- if (of_property_read_u32(node, cells_name,
- &count)) {
- pr_err("%s: could not get %s for %s\n",
- np->full_name, cells_name,
- node->full_name);
- goto err;
- }
- } else {
- count = cell_count;
+ if (it->cells_name) {
+ if (!it->node) {
+ pr_err("%s: could not find phandle\n",
+ it->parent->full_name);
+ goto err;
}
- /*
- * Make sure that the arguments actually fit in the
- * remaining property data length
- */
- if (list + count > list_end) {
- pr_err("%s: arguments longer than property\n",
- np->full_name);
+ if (of_property_read_u32(it->node, it->cells_name,
+ &count)) {
+ pr_err("%s: could not get %s for %s\n",
+ it->parent->full_name,
+ it->cells_name,
+ it->node->full_name);
goto err;
}
+ } else {
+ count = it->cell_count;
+ }
+
+ /*
+ * Make sure that the arguments actually fit in the remaining
+ * property data length
+ */
+ if (it->cur + count > it->list_end) {
+ pr_err("%s: arguments longer than property\n",
+ it->parent->full_name);
+ goto err;
}
+ }
+
+ it->phandle_end = it->cur + count;
+ it->cur_count = count;
+
+ return 0;
+
+err:
+ if (it->node) {
+ of_node_put(it->node);
+ it->node = NULL;
+ }
+
+ return -EINVAL;
+}
+
+int of_phandle_iterator_args(struct of_phandle_iterator *it,
+ uint32_t *args,
+ int size)
+{
+ int i, count;
+
+ count = it->cur_count;
+
+ if (WARN_ON(size < count))
+ count = size;
+
+ for (i = 0; i < count; i++)
+ args[i] = be32_to_cpup(it->cur++);
+
+ return count;
+}
+
+static int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count, int index,
+ struct of_phandle_args *out_args)
+{
+ struct of_phandle_iterator it;
+ int rc, cur_index = 0;
+ /* Loop over the phandles until all the requested entry is found */
+ of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
/*
- * All of the error cases above bail out of the loop, so at
+ * All of the error cases bail out of the loop, so at
* this point, the parsing is successful. If the requested
* index matches, then fill the out_args structure and return,
* or return -ENOENT for an empty entry.
*/
rc = -ENOENT;
if (cur_index == index) {
- if (!phandle)
+ if (!it.phandle)
goto err;
if (out_args) {
- int i;
- if (WARN_ON(count > MAX_PHANDLE_ARGS))
- count = MAX_PHANDLE_ARGS;
- out_args->np = node;
- out_args->args_count = count;
- for (i = 0; i < count; i++)
- out_args->args[i] = be32_to_cpup(list++);
+ int c;
+
+ c = of_phandle_iterator_args(&it,
+ out_args->args,
+ MAX_PHANDLE_ARGS);
+ out_args->np = it.node;
+ out_args->args_count = c;
} else {
- of_node_put(node);
+ of_node_put(it.node);
}
/* Found it! return success */
return 0;
}
- of_node_put(node);
- node = NULL;
- list += count;
cur_index++;
}
@@ -1547,12 +1597,11 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
* Unlock node before returning result; will be one of:
* -ENOENT : index is for empty phandle
* -EINVAL : parsing error on data
- * [1..n] : Number of phandle (count mode; when index = -1)
*/
- rc = index < 0 ? cur_index : -ENOENT;
+
err:
- if (node)
- of_node_put(node);
+ if (it.node)
+ of_node_put(it.node);
return rc;
}
@@ -1684,8 +1733,20 @@ EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
const char *cells_name)
{
- return __of_parse_phandle_with_args(np, list_name, cells_name, 0, -1,
- NULL);
+ struct of_phandle_iterator it;
+ int rc, cur_index = 0;
+
+ rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
+ if (rc)
+ return rc;
+
+ while ((rc = of_phandle_iterator_next(&it)) == 0)
+ cur_index += 1;
+
+ if (rc != -ENOENT)
+ return rc;
+
+ return cur_index;
}
EXPORT_SYMBOL(of_count_phandle_with_args);
@@ -1777,6 +1838,9 @@ int of_remove_property(struct device_node *np, struct property *prop)
unsigned long flags;
int rc;
+ if (!prop)
+ return -ENODEV;
+
mutex_lock(&of_mutex);
raw_spin_lock_irqsave(&devtree_lock, flags);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index e5f47cec7..fd5cfad7c 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -88,7 +88,7 @@ void of_dma_configure(struct device *dev, struct device_node *np)
int ret;
bool coherent;
unsigned long offset;
- struct iommu_ops *iommu;
+ const struct iommu_ops *iommu;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index c647bd1b6..3033fa325 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -311,6 +311,7 @@ int of_detach_node(struct device_node *np)
return rc;
}
+EXPORT_SYMBOL_GPL(of_detach_node);
/**
* of_node_release() - release a dynamically allocated node
@@ -497,6 +498,11 @@ static void __of_changeset_entry_invert(struct of_changeset_entry *ce,
case OF_RECONFIG_UPDATE_PROPERTY:
rce->old_prop = ce->prop;
rce->prop = ce->old_prop;
+ /* update was used but original property did not exist */
+ if (!rce->prop) {
+ rce->action = OF_RECONFIG_REMOVE_PROPERTY;
+ rce->prop = ce->prop;
+ }
break;
}
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 3349d2aa6..33daffc43 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -161,39 +161,127 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size,
return res;
}
-/**
- * unflatten_dt_node - Alloc and populate a device_node from the flat tree
- * @blob: The parent device tree blob
- * @mem: Memory chunk to use for allocating device nodes and properties
- * @poffset: pointer to node in flat tree
- * @dad: Parent struct device_node
- * @nodepp: The device_node tree created by the call
- * @fpsize: Size of the node path up at the current depth.
- * @dryrun: If true, do not allocate device nodes but still calculate needed
- * memory size
- */
-static void * unflatten_dt_node(const void *blob,
- void *mem,
- int *poffset,
- struct device_node *dad,
- struct device_node **nodepp,
- unsigned long fpsize,
+static void populate_properties(const void *blob,
+ int offset,
+ void **mem,
+ struct device_node *np,
+ const char *nodename,
bool dryrun)
{
- const __be32 *p;
+ struct property *pp, **pprev = NULL;
+ int cur;
+ bool has_name = false;
+
+ pprev = &np->properties;
+ for (cur = fdt_first_property_offset(blob, offset);
+ cur >= 0;
+ cur = fdt_next_property_offset(blob, cur)) {
+ const __be32 *val;
+ const char *pname;
+ u32 sz;
+
+ val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
+ if (!val) {
+ pr_warn("%s: Cannot locate property at 0x%x\n",
+ __func__, cur);
+ continue;
+ }
+
+ if (!pname) {
+ pr_warn("%s: Cannot find property name at 0x%x\n",
+ __func__, cur);
+ continue;
+ }
+
+ if (!strcmp(pname, "name"))
+ has_name = true;
+
+ pp = unflatten_dt_alloc(mem, sizeof(struct property),
+ __alignof__(struct property));
+ if (dryrun)
+ continue;
+
+ /* We accept flattened tree phandles either in
+ * ePAPR-style "phandle" properties, or the
+ * legacy "linux,phandle" properties. If both
+ * appear and have different values, things
+ * will get weird. Don't do that.
+ */
+ if (!strcmp(pname, "phandle") ||
+ !strcmp(pname, "linux,phandle")) {
+ if (!np->phandle)
+ np->phandle = be32_to_cpup(val);
+ }
+
+ /* And we process the "ibm,phandle" property
+ * used in pSeries dynamic device tree
+ * stuff
+ */
+ if (!strcmp(pname, "ibm,phandle"))
+ np->phandle = be32_to_cpup(val);
+
+ pp->name = (char *)pname;
+ pp->length = sz;
+ pp->value = (__be32 *)val;
+ *pprev = pp;
+ pprev = &pp->next;
+ }
+
+ /* With version 0x10 we may not have the name property,
+ * recreate it here from the unit name if absent
+ */
+ if (!has_name) {
+ const char *p = nodename, *ps = p, *pa = NULL;
+ int len;
+
+ while (*p) {
+ if ((*p) == '@')
+ pa = p;
+ else if ((*p) == '/')
+ ps = p + 1;
+ p++;
+ }
+
+ if (pa < ps)
+ pa = p;
+ len = (pa - ps) + 1;
+ pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
+ __alignof__(struct property));
+ if (!dryrun) {
+ pp->name = "name";
+ pp->length = len;
+ pp->value = pp + 1;
+ *pprev = pp;
+ pprev = &pp->next;
+ memcpy(pp->value, ps, len - 1);
+ ((char *)pp->value)[len - 1] = 0;
+ pr_debug("fixed up name for %s -> %s\n",
+ nodename, (char *)pp->value);
+ }
+ }
+
+ if (!dryrun)
+ *pprev = NULL;
+}
+
+static unsigned int populate_node(const void *blob,
+ int offset,
+ void **mem,
+ struct device_node *dad,
+ unsigned int fpsize,
+ struct device_node **pnp,
+ bool dryrun)
+{
struct device_node *np;
- struct property *pp, **prev_pp = NULL;
const char *pathp;
unsigned int l, allocl;
- static int depth;
- int old_depth;
- int offset;
- int has_name = 0;
int new_format = 0;
- pathp = fdt_get_name(blob, *poffset, &l);
- if (!pathp)
- return mem;
+ pathp = fdt_get_name(blob, offset, &l);
+ if (!pathp) {
+ *pnp = NULL;
+ return 0;
+ }
allocl = ++l;
@@ -223,7 +311,7 @@ static void * unflatten_dt_node(const void *blob,
}
}
- np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl,
+ np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
__alignof__(struct device_node));
if (!dryrun) {
char *fn;
@@ -246,89 +334,15 @@ static void * unflatten_dt_node(const void *blob,
}
memcpy(fn, pathp, l);
- prev_pp = &np->properties;
if (dad != NULL) {
np->parent = dad;
np->sibling = dad->child;
dad->child = np;
}
}
- /* process properties */
- for (offset = fdt_first_property_offset(blob, *poffset);
- (offset >= 0);
- (offset = fdt_next_property_offset(blob, offset))) {
- const char *pname;
- u32 sz;
- if (!(p = fdt_getprop_by_offset(blob, offset, &pname, &sz))) {
- offset = -FDT_ERR_INTERNAL;
- break;
- }
-
- if (pname == NULL) {
- pr_info("Can't find property name in list !\n");
- break;
- }
- if (strcmp(pname, "name") == 0)
- has_name = 1;
- pp = unflatten_dt_alloc(&mem, sizeof(struct property),
- __alignof__(struct property));
- if (!dryrun) {
- /* We accept flattened tree phandles either in
- * ePAPR-style "phandle" properties, or the
- * legacy "linux,phandle" properties. If both
- * appear and have different values, things
- * will get weird. Don't do that. */
- if ((strcmp(pname, "phandle") == 0) ||
- (strcmp(pname, "linux,phandle") == 0)) {
- if (np->phandle == 0)
- np->phandle = be32_to_cpup(p);
- }
- /* And we process the "ibm,phandle" property
- * used in pSeries dynamic device tree
- * stuff */
- if (strcmp(pname, "ibm,phandle") == 0)
- np->phandle = be32_to_cpup(p);
- pp->name = (char *)pname;
- pp->length = sz;
- pp->value = (__be32 *)p;
- *prev_pp = pp;
- prev_pp = &pp->next;
- }
- }
- /* with version 0x10 we may not have the name property, recreate
- * it here from the unit name if absent
- */
- if (!has_name) {
- const char *p1 = pathp, *ps = pathp, *pa = NULL;
- int sz;
-
- while (*p1) {
- if ((*p1) == '@')
- pa = p1;
- if ((*p1) == '/')
- ps = p1 + 1;
- p1++;
- }
- if (pa < ps)
- pa = p1;
- sz = (pa - ps) + 1;
- pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz,
- __alignof__(struct property));
- if (!dryrun) {
- pp->name = "name";
- pp->length = sz;
- pp->value = pp + 1;
- *prev_pp = pp;
- prev_pp = &pp->next;
- memcpy(pp->value, ps, sz - 1);
- ((char *)pp->value)[sz - 1] = 0;
- pr_debug("fixed up name for %s -> %s\n", pathp,
- (char *)pp->value);
- }
- }
+ populate_properties(blob, offset, mem, np, pathp, dryrun);
if (!dryrun) {
- *prev_pp = NULL;
np->name = of_get_property(np, "name", NULL);
np->type = of_get_property(np, "device_type", NULL);
@@ -338,36 +352,105 @@ static void * unflatten_dt_node(const void *blob,
np->type = "<NULL>";
}
- old_depth = depth;
- *poffset = fdt_next_node(blob, *poffset, &depth);
- if (depth < 0)
- depth = 0;
- while (*poffset > 0 && depth > old_depth)
- mem = unflatten_dt_node(blob, mem, poffset, np, NULL,
- fpsize, dryrun);
+ *pnp = np;
+ return fpsize;
+}
+
+static void reverse_nodes(struct device_node *parent)
+{
+ struct device_node *child, *next;
+
+ /* In-depth first */
+ child = parent->child;
+ while (child) {
+ reverse_nodes(child);
- if (*poffset < 0 && *poffset != -FDT_ERR_NOTFOUND)
- pr_err("unflatten: error %d processing FDT\n", *poffset);
+ child = child->sibling;
+ }
+
+ /* Reverse the nodes in the child list */
+ child = parent->child;
+ parent->child = NULL;
+ while (child) {
+ next = child->sibling;
+
+ child->sibling = parent->child;
+ parent->child = child;
+ child = next;
+ }
+}
+
+/**
+ * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
+ * @blob: The parent device tree blob
+ * @mem: Memory chunk to use for allocating device nodes and properties
+ * @dad: Parent struct device_node
+ * @nodepp: The device_node tree created by the call
+ *
+ * It returns the size of unflattened device tree or error code
+ */
+static int unflatten_dt_nodes(const void *blob,
+ void *mem,
+ struct device_node *dad,
+ struct device_node **nodepp)
+{
+ struct device_node *root;
+ int offset = 0, depth = 0, initial_depth = 0;
+#define FDT_MAX_DEPTH 64
+ unsigned int fpsizes[FDT_MAX_DEPTH];
+ struct device_node *nps[FDT_MAX_DEPTH];
+ void *base = mem;
+ bool dryrun = !base;
+
+ if (nodepp)
+ *nodepp = NULL;
/*
- * Reverse the child list. Some drivers assumes node order matches .dts
- * node order
+ * We're unflattening device sub-tree if @dad is valid. There are
+ * possibly multiple nodes in the first level of depth. We need
+ * set @depth to 1 to make fdt_next_node() happy as it bails
+ * immediately when negative @depth is found. Otherwise, the device
+ * nodes except the first one won't be unflattened successfully.
*/
- if (!dryrun && np->child) {
- struct device_node *child = np->child;
- np->child = NULL;
- while (child) {
- struct device_node *next = child->sibling;
- child->sibling = np->child;
- np->child = child;
- child = next;
- }
+ if (dad)
+ depth = initial_depth = 1;
+
+ root = dad;
+ fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0;
+ nps[depth] = dad;
+
+ for (offset = 0;
+ offset >= 0 && depth >= initial_depth;
+ offset = fdt_next_node(blob, offset, &depth)) {
+ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+ continue;
+
+ fpsizes[depth+1] = populate_node(blob, offset, &mem,
+ nps[depth],
+ fpsizes[depth],
+ &nps[depth+1], dryrun);
+ if (!fpsizes[depth+1])
+ return mem - base;
+
+ if (!dryrun && nodepp && !*nodepp)
+ *nodepp = nps[depth+1];
+ if (!dryrun && !root)
+ root = nps[depth+1];
}
- if (nodepp)
- *nodepp = np;
+ if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
+ pr_err("%s: Error %d processing FDT\n", __func__, offset);
+ return -EINVAL;
+ }
- return mem;
+ /*
+ * Reverse the child list. Some drivers assumes node order matches .dts
+ * node order
+ */
+ if (!dryrun)
+ reverse_nodes(root);
+
+ return mem - base;
}
/**
@@ -378,23 +461,27 @@ static void * unflatten_dt_node(const void *blob,
* pointers of the nodes so the normal device-tree walking functions
* can be used.
* @blob: The blob to expand
+ * @dad: Parent device node
* @mynodes: The device_node tree created by the call
* @dt_alloc: An allocator that provides a virtual address to memory
* for the resulting tree
+ *
+ * Returns NULL on failure or the memory chunk containing the unflattened
+ * device tree on success.
*/
-static void __unflatten_device_tree(const void *blob,
- struct device_node **mynodes,
- void * (*dt_alloc)(u64 size, u64 align))
+static void *__unflatten_device_tree(const void *blob,
+ struct device_node *dad,
+ struct device_node **mynodes,
+ void *(*dt_alloc)(u64 size, u64 align))
{
- unsigned long size;
- int start;
+ int size;
void *mem;
pr_debug(" -> unflatten_device_tree()\n");
if (!blob) {
pr_debug("No device tree pointer\n");
- return;
+ return NULL;
}
pr_debug("Unflattening device tree:\n");
@@ -404,15 +491,16 @@ static void __unflatten_device_tree(const void *blob,
if (fdt_check_header(blob)) {
pr_err("Invalid device tree blob header\n");
- return;
+ return NULL;
}
/* First pass, scan for size */
- start = 0;
- size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0, true);
- size = ALIGN(size, 4);
+ size = unflatten_dt_nodes(blob, NULL, dad, NULL);
+ if (size < 0)
+ return NULL;
- pr_debug(" size is %lx, allocating...\n", size);
+ size = ALIGN(size, 4);
+ pr_debug(" size is %d, allocating...\n", size);
/* Allocate memory for the expanded device tree */
mem = dt_alloc(size + 4, __alignof__(struct device_node));
@@ -423,13 +511,13 @@ static void __unflatten_device_tree(const void *blob,
pr_debug(" unflattening %p...\n", mem);
/* Second pass, do actual unflattening */
- start = 0;
- unflatten_dt_node(blob, mem, &start, NULL, mynodes, 0, false);
+ unflatten_dt_nodes(blob, mem, dad, mynodes);
if (be32_to_cpup(mem + size) != 0xdeadbeef)
pr_warning("End of tree marker overwritten: %08x\n",
be32_to_cpup(mem + size));
pr_debug(" <- unflatten_device_tree()\n");
+ return mem;
}
static void *kernel_tree_alloc(u64 size, u64 align)
@@ -441,18 +529,29 @@ static DEFINE_MUTEX(of_fdt_unflatten_mutex);
/**
* of_fdt_unflatten_tree - create tree of device_nodes from flat blob
+ * @blob: Flat device tree blob
+ * @dad: Parent device node
+ * @mynodes: The device tree created by the call
*
* unflattens the device-tree passed by the firmware, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used.
+ *
+ * Returns NULL on failure or the memory chunk containing the unflattened
+ * device tree on success.
*/
-void of_fdt_unflatten_tree(const unsigned long *blob,
- struct device_node **mynodes)
+void *of_fdt_unflatten_tree(const unsigned long *blob,
+ struct device_node *dad,
+ struct device_node **mynodes)
{
+ void *mem;
+
mutex_lock(&of_fdt_unflatten_mutex);
- __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc);
+ mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc);
mutex_unlock(&of_fdt_unflatten_mutex);
+
+ return mem;
}
EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
@@ -969,10 +1068,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
* is set in which case we override whatever was found earlier.
*/
#ifdef CONFIG_CMDLINE
-#ifndef CONFIG_CMDLINE_FORCE
+#if defined(CONFIG_CMDLINE_EXTEND)
+ strlcat(data, " ", COMMAND_LINE_SIZE);
+ strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#elif defined(CONFIG_CMDLINE_FORCE)
+ strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#else
+ /* No arguments from boot loader, use kernel's cmdl*/
if (!((char *)data)[0])
-#endif
strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif
#endif /* CONFIG_CMDLINE */
pr_debug("Command line is: %s\n", (char*)data);
@@ -1118,7 +1223,7 @@ bool __init early_init_dt_scan(void *params)
*/
void __init unflatten_device_tree(void)
{
- __unflatten_device_tree(initial_boot_params, &of_root,
+ __unflatten_device_tree(initial_boot_params, NULL, &of_root,
early_init_dt_alloc_memory_arch);
/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 8453f08d2..e051e1b57 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -41,8 +41,8 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
return -EINVAL;
}
-static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child,
- u32 addr)
+static void of_mdiobus_register_phy(struct mii_bus *mdio,
+ struct device_node *child, u32 addr)
{
struct phy_device *phy;
bool is_c45;
@@ -56,8 +56,8 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
else
phy = get_phy_device(mdio, addr, is_c45);
- if (IS_ERR_OR_NULL(phy))
- return 1;
+ if (IS_ERR(phy))
+ return;
rc = irq_of_parse_and_map(child, 0);
if (rc > 0) {
@@ -81,25 +81,22 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
if (rc) {
phy_device_free(phy);
of_node_put(child);
- return 1;
+ return;
}
dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
child->name, addr);
-
- return 0;
}
-static int of_mdiobus_register_device(struct mii_bus *mdio,
- struct device_node *child,
- u32 addr)
+static void of_mdiobus_register_device(struct mii_bus *mdio,
+ struct device_node *child, u32 addr)
{
struct mdio_device *mdiodev;
int rc;
mdiodev = mdio_device_create(mdio, addr);
if (IS_ERR(mdiodev))
- return 1;
+ return;
/* Associate the OF node with the device structure so it
* can be looked up later.
@@ -112,13 +109,11 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
if (rc) {
mdio_device_free(mdiodev);
of_node_put(child);
- return 1;
+ return;
}
dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
child->name, addr);
-
- return 0;
}
int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
@@ -214,6 +209,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
bool scanphys = false;
int addr, rc;
+ /* Do not continue if the node is disabled */
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
/* Mask out all PHYs from auto probing. Instead the PHYs listed in
* the device tree are populated after the bus has been registered */
mdio->phy_mask = ~0;
diff --git a/drivers/of/of_mtd.c b/drivers/of/of_mtd.c
deleted file mode 100644
index b7361ed70..000000000
--- a/drivers/of/of_mtd.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
- *
- * OF helpers for mtd.
- *
- * This file is released under the GPLv2
- *
- */
-#include <linux/kernel.h>
-#include <linux/of_mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/export.h>
-
-/**
- * It maps 'enum nand_ecc_modes_t' found in include/linux/mtd/nand.h
- * into the device tree binding of 'nand-ecc', so that MTD
- * device driver can get nand ecc from device tree.
- */
-static const char *nand_ecc_modes[] = {
- [NAND_ECC_NONE] = "none",
- [NAND_ECC_SOFT] = "soft",
- [NAND_ECC_HW] = "hw",
- [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
- [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
- [NAND_ECC_SOFT_BCH] = "soft_bch",
-};
-
-/**
- * of_get_nand_ecc_mode - Get nand ecc mode for given device_node
- * @np: Pointer to the given device_node
- *
- * The function gets ecc mode string from property 'nand-ecc-mode',
- * and return its index in nand_ecc_modes table, or errno in error case.
- */
-int of_get_nand_ecc_mode(struct device_node *np)
-{
- const char *pm;
- int err, i;
-
- err = of_property_read_string(np, "nand-ecc-mode", &pm);
- if (err < 0)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
- if (!strcasecmp(pm, nand_ecc_modes[i]))
- return i;
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(of_get_nand_ecc_mode);
-
-/**
- * of_get_nand_ecc_step_size - Get ECC step size associated to
- * the required ECC strength (see below).
- * @np: Pointer to the given device_node
- *
- * return the ECC step size, or errno in error case.
- */
-int of_get_nand_ecc_step_size(struct device_node *np)
-{
- int ret;
- u32 val;
-
- ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
- return ret ? ret : val;
-}
-EXPORT_SYMBOL_GPL(of_get_nand_ecc_step_size);
-
-/**
- * of_get_nand_ecc_strength - Get required ECC strength over the
- * correspnding step size as defined by 'nand-ecc-size'
- * @np: Pointer to the given device_node
- *
- * return the ECC strength, or errno in error case.
- */
-int of_get_nand_ecc_strength(struct device_node *np)
-{
- int ret;
- u32 val;
-
- ret = of_property_read_u32(np, "nand-ecc-strength", &val);
- return ret ? ret : val;
-}
-EXPORT_SYMBOL_GPL(of_get_nand_ecc_strength);
-
-/**
- * of_get_nand_bus_width - Get nand bus witdh for given device_node
- * @np: Pointer to the given device_node
- *
- * return bus width option, or errno in error case.
- */
-int of_get_nand_bus_width(struct device_node *np)
-{
- u32 val;
-
- if (of_property_read_u32(np, "nand-bus-width", &val))
- return 8;
-
- switch(val) {
- case 8:
- case 16:
- return val;
- default:
- return -EIO;
- }
-}
-EXPORT_SYMBOL_GPL(of_get_nand_bus_width);
-
-/**
- * of_get_nand_on_flash_bbt - Get nand on flash bbt for given device_node
- * @np: Pointer to the given device_node
- *
- * return true if present false other wise
- */
-bool of_get_nand_on_flash_bbt(struct device_node *np)
-{
- return of_property_read_bool(np, "nand-on-flash-bbt");
-}
-EXPORT_SYMBOL_GPL(of_get_nand_on_flash_bbt);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
new file mode 100644
index 000000000..0f2784bc1
--- /dev/null
+++ b/drivers/of/of_numa.c
@@ -0,0 +1,211 @@
+/*
+ * OF NUMA Parsing support.
+ *
+ * Copyright (C) 2015 - 2016 Cavium Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/nodemask.h>
+
+#include <asm/numa.h>
+
+/* define default numa node to 0 */
+#define DEFAULT_NODE 0
+
+/*
+ * Even though we connect cpus to numa domains later in SMP
+ * init, we need to know the node ids now for all cpus.
+*/
+static void __init of_numa_parse_cpu_nodes(void)
+{
+ u32 nid;
+ int r;
+ struct device_node *cpus;
+ struct device_node *np = NULL;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (!cpus)
+ return;
+
+ for_each_child_of_node(cpus, np) {
+ /* Skip things that are not CPUs */
+ if (of_node_cmp(np->type, "cpu") != 0)
+ continue;
+
+ r = of_property_read_u32(np, "numa-node-id", &nid);
+ if (r)
+ continue;
+
+ pr_debug("NUMA: CPU on %u\n", nid);
+ if (nid >= MAX_NUMNODES)
+ pr_warn("NUMA: Node id %u exceeds maximum value\n",
+ nid);
+ else
+ node_set(nid, numa_nodes_parsed);
+ }
+}
+
+static int __init of_numa_parse_memory_nodes(void)
+{
+ struct device_node *np = NULL;
+ struct resource rsrc;
+ u32 nid;
+ int r = 0;
+
+ for (;;) {
+ np = of_find_node_by_type(np, "memory");
+ if (!np)
+ break;
+
+ r = of_property_read_u32(np, "numa-node-id", &nid);
+ if (r == -EINVAL)
+ /*
+ * property doesn't exist if -EINVAL, continue
+ * looking for more memory nodes with
+ * "numa-node-id" property
+ */
+ continue;
+ else if (r)
+ /* some other error */
+ break;
+
+ r = of_address_to_resource(np, 0, &rsrc);
+ if (r) {
+ pr_err("NUMA: bad reg property in memory node\n");
+ break;
+ }
+
+ pr_debug("NUMA: base = %llx len = %llx, node = %u\n",
+ rsrc.start, rsrc.end - rsrc.start + 1, nid);
+
+ r = numa_add_memblk(nid, rsrc.start,
+ rsrc.end - rsrc.start + 1);
+ if (r)
+ break;
+ }
+ of_node_put(np);
+
+ return r;
+}
+
+static int __init of_numa_parse_distance_map_v1(struct device_node *map)
+{
+ const __be32 *matrix;
+ int entry_count;
+ int i;
+
+ pr_info("NUMA: parsing numa-distance-map-v1\n");
+
+ matrix = of_get_property(map, "distance-matrix", NULL);
+ if (!matrix) {
+ pr_err("NUMA: No distance-matrix property in distance-map\n");
+ return -EINVAL;
+ }
+
+ entry_count = of_property_count_u32_elems(map, "distance-matrix");
+ if (entry_count <= 0) {
+ pr_err("NUMA: Invalid distance-matrix\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i + 2 < entry_count; i += 3) {
+ u32 nodea, nodeb, distance;
+
+ nodea = of_read_number(matrix, 1);
+ matrix++;
+ nodeb = of_read_number(matrix, 1);
+ matrix++;
+ distance = of_read_number(matrix, 1);
+ matrix++;
+
+ numa_set_distance(nodea, nodeb, distance);
+ pr_debug("NUMA: distance[node%d -> node%d] = %d\n",
+ nodea, nodeb, distance);
+
+ /* Set default distance of node B->A same as A->B */
+ if (nodeb > nodea)
+ numa_set_distance(nodeb, nodea, distance);
+ }
+
+ return 0;
+}
+
+static int __init of_numa_parse_distance_map(void)
+{
+ int ret = 0;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "numa-distance-map-v1");
+ if (np)
+ ret = of_numa_parse_distance_map_v1(np);
+
+ of_node_put(np);
+ return ret;
+}
+
+int of_node_to_nid(struct device_node *device)
+{
+ struct device_node *np;
+ u32 nid;
+ int r = -ENODATA;
+
+ np = of_node_get(device);
+
+ while (np) {
+ struct device_node *parent;
+
+ r = of_property_read_u32(np, "numa-node-id", &nid);
+ /*
+ * -EINVAL indicates the property was not found, and
+ * we walk up the tree trying to find a parent with a
+ * "numa-node-id". Any other type of error indicates
+ * a bad device tree and we give up.
+ */
+ if (r != -EINVAL)
+ break;
+
+ parent = of_get_parent(np);
+ of_node_put(np);
+ np = parent;
+ }
+ if (np && r)
+ pr_warn("NUMA: Invalid \"numa-node-id\" property in node %s\n",
+ np->name);
+ of_node_put(np);
+
+ if (!r) {
+ if (nid >= MAX_NUMNODES)
+ pr_warn("NUMA: Node id %u exceeds maximum value\n",
+ nid);
+ else
+ return nid;
+ }
+
+ return NUMA_NO_NODE;
+}
+EXPORT_SYMBOL(of_node_to_nid);
+
+int __init of_numa_init(void)
+{
+ int r;
+
+ of_numa_parse_cpu_nodes();
+ r = of_numa_parse_memory_nodes();
+ if (r)
+ return r;
+ return of_numa_parse_distance_map();
+}
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index ed01c0172..216648233 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
}
/* Need adjust the alignment to satisfy the CMA requirement */
- if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool"))
- align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+ if (IS_ENABLED(CONFIG_CMA)
+ && of_flat_dt_is_compatible(node, "shared-dma-pool")
+ && of_get_flat_dt_prop(node, "reusable", NULL)
+ && !of_get_flat_dt_prop(node, "no-map", NULL)) {
+ unsigned long order =
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
+
+ align = max(align, (phys_addr_t)PAGE_SIZE << order);
+ }
prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
if (prop) {
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 8d103e496..16e8daffa 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -297,19 +297,37 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup,
struct device_node *np)
{
+ const struct of_dev_auxdata *auxdata;
struct resource res;
+ int compatible = 0;
if (!lookup)
return NULL;
- for(; lookup->compatible != NULL; lookup++) {
- if (!of_device_is_compatible(np, lookup->compatible))
+ auxdata = lookup;
+ for (; auxdata->compatible; auxdata++) {
+ if (!of_device_is_compatible(np, auxdata->compatible))
continue;
+ compatible++;
if (!of_address_to_resource(np, 0, &res))
- if (res.start != lookup->phys_addr)
+ if (res.start != auxdata->phys_addr)
continue;
- pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
- return lookup;
+ pr_debug("%s: devname=%s\n", np->full_name, auxdata->name);
+ return auxdata;
+ }
+
+ if (!compatible)
+ return NULL;
+
+ /* Try compatible match if no phys_addr and name are specified */
+ auxdata = lookup;
+ for (; auxdata->compatible; auxdata++) {
+ if (!of_device_is_compatible(np, auxdata->compatible))
+ continue;
+ if (!auxdata->phys_addr && !auxdata->name) {
+ pr_debug("%s: compatible match\n", np->full_name);
+ return auxdata;
+ }
}
return NULL;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e986e6ee5..f34ed9310 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -8,7 +8,6 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/hashtable.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
@@ -921,7 +920,7 @@ static int __init unittest_data_add(void)
"not running tests\n", __func__);
return -ENOMEM;
}
- of_fdt_unflatten_tree(unittest_data, &unittest_data_node);
+ of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
if (!unittest_data_node) {
pr_warn("%s: No tree to attach; not running tests\n", __func__);
return -ENODATA;
@@ -1692,13 +1691,7 @@ static struct i2c_driver unittest_i2c_dev_driver = {
#if IS_BUILTIN(CONFIG_I2C_MUX)
-struct unittest_i2c_mux_data {
- int nchans;
- struct i2c_adapter *adap[];
-};
-
-static int unittest_i2c_mux_select_chan(struct i2c_adapter *adap,
- void *client, u32 chan)
+static int unittest_i2c_mux_select_chan(struct i2c_mux_core *muxc, u32 chan)
{
return 0;
}
@@ -1706,11 +1699,11 @@ static int unittest_i2c_mux_select_chan(struct i2c_adapter *adap,
static int unittest_i2c_mux_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int ret, i, nchans, size;
+ int ret, i, nchans;
struct device *dev = &client->dev;
struct i2c_adapter *adap = to_i2c_adapter(dev->parent);
struct device_node *np = client->dev.of_node, *child;
- struct unittest_i2c_mux_data *stm;
+ struct i2c_mux_core *muxc;
u32 reg, max_reg;
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
@@ -1734,25 +1727,20 @@ static int unittest_i2c_mux_probe(struct i2c_client *client,
return -EINVAL;
}
- size = offsetof(struct unittest_i2c_mux_data, adap[nchans]);
- stm = devm_kzalloc(dev, size, GFP_KERNEL);
- if (!stm) {
- dev_err(dev, "Out of memory\n");
+ muxc = i2c_mux_alloc(adap, dev, nchans, 0, 0,
+ unittest_i2c_mux_select_chan, NULL);
+ if (!muxc)
return -ENOMEM;
- }
- stm->nchans = nchans;
for (i = 0; i < nchans; i++) {
- stm->adap[i] = i2c_add_mux_adapter(adap, dev, client,
- 0, i, 0, unittest_i2c_mux_select_chan, NULL);
- if (!stm->adap[i]) {
+ ret = i2c_mux_add_adapter(muxc, 0, i, 0);
+ if (ret) {
dev_err(dev, "Failed to register mux #%d\n", i);
- for (i--; i >= 0; i--)
- i2c_del_mux_adapter(stm->adap[i]);
+ i2c_mux_del_adapters(muxc);
return -ENODEV;
}
}
- i2c_set_clientdata(client, stm);
+ i2c_set_clientdata(client, muxc);
return 0;
};
@@ -1761,12 +1749,10 @@ static int unittest_i2c_mux_remove(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = client->dev.of_node;
- struct unittest_i2c_mux_data *stm = i2c_get_clientdata(client);
- int i;
+ struct i2c_mux_core *muxc = i2c_get_clientdata(client);
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
- for (i = stm->nchans - 1; i >= 0; i--)
- i2c_del_mux_adapter(stm->adap[i]);
+ i2c_mux_del_adapters(muxc);
return 0;
}
diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
index c776333a6..74ed3e459 100644
--- a/drivers/parport/procfs.c
+++ b/drivers/parport/procfs.c
@@ -617,5 +617,5 @@ static void __exit parport_default_proc_unregister (void)
}
#endif
-module_init(parport_default_proc_register)
+subsys_initcall(parport_default_proc_register)
module_exit(parport_default_proc_unregister)
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 209292e06..56389be5d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -83,6 +83,9 @@ config HT_IRQ
config PCI_ATS
bool
+config PCI_ECAM
+ bool
+
config PCI_IOV
bool "PCI IOV support"
depends on PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 2154092dd..1fa692573 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -55,6 +55,8 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
+obj-$(CONFIG_PCI_ECAM) += ecam.o
+
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
obj-$(CONFIG_OF) += of.o
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
new file mode 100644
index 000000000..f9832ad8e
--- /dev/null
+++ b/drivers/pci/ecam.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "ecam.h"
+
+/*
+ * On 64-bit systems, we do a single ioremap for the whole config space
+ * since we have enough virtual address range available. On 32-bit, we
+ * ioremap the config space for each bus individually.
+ */
+static const bool per_bus_mapping = !config_enabled(CONFIG_64BIT);
+
+/*
+ * Create a PCI config space window
+ * - reserve mem region
+ * - alloc struct pci_config_window with space for all mappings
+ * - ioremap the config space
+ */
+struct pci_config_window *pci_ecam_create(struct device *dev,
+ struct resource *cfgres, struct resource *busr,
+ struct pci_ecam_ops *ops)
+{
+ struct pci_config_window *cfg;
+ unsigned int bus_range, bus_range_max, bsz;
+ struct resource *conflict;
+ int i, err;
+
+ if (busr->start > busr->end)
+ return ERR_PTR(-EINVAL);
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return ERR_PTR(-ENOMEM);
+
+ cfg->ops = ops;
+ cfg->busr.start = busr->start;
+ cfg->busr.end = busr->end;
+ cfg->busr.flags = IORESOURCE_BUS;
+ bus_range = resource_size(&cfg->busr);
+ bus_range_max = resource_size(cfgres) >> ops->bus_shift;
+ if (bus_range > bus_range_max) {
+ bus_range = bus_range_max;
+ cfg->busr.end = busr->start + bus_range - 1;
+ dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n",
+ cfgres, &cfg->busr, busr);
+ }
+ bsz = 1 << ops->bus_shift;
+
+ cfg->res.start = cfgres->start;
+ cfg->res.end = cfgres->end;
+ cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ cfg->res.name = "PCI ECAM";
+
+ conflict = request_resource_conflict(&iomem_resource, &cfg->res);
+ if (conflict) {
+ err = -EBUSY;
+ dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n",
+ &cfg->res, conflict->name, conflict);
+ goto err_exit;
+ }
+
+ if (per_bus_mapping) {
+ cfg->winp = kcalloc(bus_range, sizeof(*cfg->winp), GFP_KERNEL);
+ if (!cfg->winp)
+ goto err_exit_malloc;
+ for (i = 0; i < bus_range; i++) {
+ cfg->winp[i] = ioremap(cfgres->start + i * bsz, bsz);
+ if (!cfg->winp[i])
+ goto err_exit_iomap;
+ }
+ } else {
+ cfg->win = ioremap(cfgres->start, bus_range * bsz);
+ if (!cfg->win)
+ goto err_exit_iomap;
+ }
+
+ if (ops->init) {
+ err = ops->init(dev, cfg);
+ if (err)
+ goto err_exit;
+ }
+ dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr);
+ return cfg;
+
+err_exit_iomap:
+ dev_err(dev, "ECAM ioremap failed\n");
+err_exit_malloc:
+ err = -ENOMEM;
+err_exit:
+ pci_ecam_free(cfg);
+ return ERR_PTR(err);
+}
+
+void pci_ecam_free(struct pci_config_window *cfg)
+{
+ int i;
+
+ if (per_bus_mapping) {
+ if (cfg->winp) {
+ for (i = 0; i < resource_size(&cfg->busr); i++)
+ if (cfg->winp[i])
+ iounmap(cfg->winp[i]);
+ kfree(cfg->winp);
+ }
+ } else {
+ if (cfg->win)
+ iounmap(cfg->win);
+ }
+ if (cfg->res.parent)
+ release_resource(&cfg->res);
+ kfree(cfg);
+}
+
+/*
+ * Function to implement the pci_ops ->map_bus method
+ */
+void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_config_window *cfg = bus->sysdata;
+ unsigned int devfn_shift = cfg->ops->bus_shift - 8;
+ unsigned int busn = bus->number;
+ void __iomem *base;
+
+ if (busn < cfg->busr.start || busn > cfg->busr.end)
+ return NULL;
+
+ busn -= cfg->busr.start;
+ if (per_bus_mapping)
+ base = cfg->winp[busn];
+ else
+ base = cfg->win + (busn << cfg->ops->bus_shift);
+ return base + (devfn << devfn_shift) + where;
+}
+
+/* ECAM ops */
+struct pci_ecam_ops pci_generic_ecam_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+ }
+};
diff --git a/drivers/pci/ecam.h b/drivers/pci/ecam.h
new file mode 100644
index 000000000..9878bebd4
--- /dev/null
+++ b/drivers/pci/ecam.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+#ifndef DRIVERS_PCI_ECAM_H
+#define DRIVERS_PCI_ECAM_H
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+/*
+ * struct to hold pci ops and bus shift of the config window
+ * for a PCI controller.
+ */
+struct pci_config_window;
+struct pci_ecam_ops {
+ unsigned int bus_shift;
+ struct pci_ops pci_ops;
+ int (*init)(struct device *,
+ struct pci_config_window *);
+};
+
+/*
+ * struct to hold the mappings of a config space window. This
+ * is expected to be used as sysdata for PCI controllers that
+ * use ECAM.
+ */
+struct pci_config_window {
+ struct resource res;
+ struct resource busr;
+ void *priv;
+ struct pci_ecam_ops *ops;
+ union {
+ void __iomem *win; /* 64-bit single mapping */
+ void __iomem **winp; /* 32-bit per-bus mapping */
+ };
+};
+
+/* create and free pci_config_window */
+struct pci_config_window *pci_ecam_create(struct device *dev,
+ struct resource *cfgres, struct resource *busr,
+ struct pci_ecam_ops *ops);
+void pci_ecam_free(struct pci_config_window *cfg);
+
+/* map_bus when ->sysdata is an instance of pci_config_window */
+void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int where);
+/* default ECAM ops */
+extern struct pci_ecam_ops pci_generic_ecam_ops;
+
+#ifdef CONFIG_PCI_HOST_GENERIC
+/* for DT-based PCI controllers that support ECAM */
+int pci_host_common_probe(struct platform_device *pdev,
+ struct pci_ecam_ops *ops);
+#endif
+#endif
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 7a0780d56..5d2374e4e 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -69,14 +69,17 @@ config PCI_RCAR_GEN2
There are 3 internal PCI controllers available with a single
built-in EHCI/OHCI host controller present on each one.
-config PCI_RCAR_GEN2_PCIE
+config PCIE_RCAR
bool "Renesas R-Car PCIe controller"
depends on ARCH_RENESAS || (ARM && COMPILE_TEST)
+ select PCI_MSI
+ select PCI_MSI_IRQ_DOMAIN
help
- Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
+ Say Y here if you want PCIe controller support on R-Car SoCs.
config PCI_HOST_COMMON
bool
+ select PCI_ECAM
config PCI_HOST_GENERIC
bool "Generic PCI host controller"
@@ -231,4 +234,15 @@ config PCI_HOST_THUNDER_ECAM
help
Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs.
+config PCIE_ARMADA_8K
+ bool "Marvell Armada-8K PCIe controller"
+ depends on ARCH_MVEBU
+ select PCIE_DW
+ select PCIEPORTBUS
+ help
+ Say Y here if you want to enable PCIe controller support on
+ Armada-8K SoCs. The PCIe controller on Armada-8K is based on
+ Designware hardware and therefore the driver re-uses the
+ Designware core functions to implement the driver.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index d85b5faf9..9c8698e89 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
-obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
+obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o
obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
@@ -28,3 +28,4 @@ obj-$(CONFIG_PCI_HISI) += pcie-hisi.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o
obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o
+obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 2ca3a1f30..f44113040 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -142,13 +142,13 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
static void dra7xx_pcie_host_init(struct pcie_port *pp)
{
- dw_pcie_setup_rc(pp);
-
pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR;
pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR;
+ dw_pcie_setup_rc(pp);
+
dra7xx_pcie_establish_link(pp);
if (IS_ENABLED(CONFIG_PCI_MSI))
dw_pcie_msi_init(pp);
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
index e9f850f07..8cba7ab73 100644
--- a/drivers/pci/host/pci-host-common.c
+++ b/drivers/pci/host/pci-host-common.c
@@ -22,27 +22,21 @@
#include <linux/of_pci.h>
#include <linux/platform_device.h>
-#include "pci-host-common.h"
+#include "../ecam.h"
-static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
-{
- pci_free_resource_list(&pci->resources);
-}
-
-static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
+static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
+ struct list_head *resources, struct resource **bus_range)
{
int err, res_valid = 0;
- struct device *dev = pci->host.dev.parent;
struct device_node *np = dev->of_node;
resource_size_t iobase;
struct resource_entry *win;
- err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
- &iobase);
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase);
if (err)
return err;
- resource_list_for_each_entry(win, &pci->resources) {
+ resource_list_for_each_entry(win, resources) {
struct resource *parent, *res = win->res;
switch (resource_type(res)) {
@@ -60,7 +54,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
break;
case IORESOURCE_BUS:
- pci->cfg.bus_range = res;
+ *bus_range = res;
default:
continue;
}
@@ -79,65 +73,60 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
return 0;
out_release_res:
- gen_pci_release_of_pci_ranges(pci);
return err;
}
-static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
+static void gen_pci_unmap_cfg(void *ptr)
+{
+ pci_ecam_free((struct pci_config_window *)ptr);
+}
+
+static struct pci_config_window *gen_pci_init(struct device *dev,
+ struct list_head *resources, struct pci_ecam_ops *ops)
{
int err;
- u8 bus_max;
- resource_size_t busn;
- struct resource *bus_range;
- struct device *dev = pci->host.dev.parent;
- struct device_node *np = dev->of_node;
- u32 sz = 1 << pci->cfg.ops->bus_shift;
+ struct resource cfgres;
+ struct resource *bus_range = NULL;
+ struct pci_config_window *cfg;
- err = of_address_to_resource(np, 0, &pci->cfg.res);
+ /* Parse our PCI ranges and request their resources */
+ err = gen_pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
+ if (err)
+ goto err_out;
+
+ err = of_address_to_resource(dev->of_node, 0, &cfgres);
if (err) {
dev_err(dev, "missing \"reg\" property\n");
- return err;
+ goto err_out;
}
- /* Limit the bus-range to fit within reg */
- bus_max = pci->cfg.bus_range->start +
- (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
- pci->cfg.bus_range->end = min_t(resource_size_t,
- pci->cfg.bus_range->end, bus_max);
-
- pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range),
- sizeof(*pci->cfg.win), GFP_KERNEL);
- if (!pci->cfg.win)
- return -ENOMEM;
-
- /* Map our Configuration Space windows */
- if (!devm_request_mem_region(dev, pci->cfg.res.start,
- resource_size(&pci->cfg.res),
- "Configuration Space"))
- return -ENOMEM;
-
- bus_range = pci->cfg.bus_range;
- for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
- u32 idx = busn - bus_range->start;
-
- pci->cfg.win[idx] = devm_ioremap(dev,
- pci->cfg.res.start + idx * sz,
- sz);
- if (!pci->cfg.win[idx])
- return -ENOMEM;
+ cfg = pci_ecam_create(dev, &cfgres, bus_range, ops);
+ if (IS_ERR(cfg)) {
+ err = PTR_ERR(cfg);
+ goto err_out;
}
- return 0;
+ err = devm_add_action(dev, gen_pci_unmap_cfg, cfg);
+ if (err) {
+ gen_pci_unmap_cfg(cfg);
+ goto err_out;
+ }
+ return cfg;
+
+err_out:
+ pci_free_resource_list(resources);
+ return ERR_PTR(err);
}
int pci_host_common_probe(struct platform_device *pdev,
- struct gen_pci *pci)
+ struct pci_ecam_ops *ops)
{
- int err;
const char *type;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct pci_bus *bus, *child;
+ struct pci_config_window *cfg;
+ struct list_head resources;
type = of_get_property(np, "device_type", NULL);
if (!type || strcmp(type, "pci")) {
@@ -147,29 +136,18 @@ int pci_host_common_probe(struct platform_device *pdev,
of_pci_check_probe_only();
- pci->host.dev.parent = dev;
- INIT_LIST_HEAD(&pci->host.windows);
- INIT_LIST_HEAD(&pci->resources);
-
- /* Parse our PCI ranges and request their resources */
- err = gen_pci_parse_request_of_pci_ranges(pci);
- if (err)
- return err;
-
/* Parse and map our Configuration Space windows */
- err = gen_pci_parse_map_cfg_windows(pci);
- if (err) {
- gen_pci_release_of_pci_ranges(pci);
- return err;
- }
+ INIT_LIST_HEAD(&resources);
+ cfg = gen_pci_init(dev, &resources, ops);
+ if (IS_ERR(cfg))
+ return PTR_ERR(cfg);
/* Do not reassign resources if probe only */
if (!pci_has_flag(PCI_PROBE_ONLY))
pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
-
- bus = pci_scan_root_bus(dev, pci->cfg.bus_range->start,
- &pci->cfg.ops->ops, pci, &pci->resources);
+ bus = pci_scan_root_bus(dev, cfg->busr.start, &ops->pci_ops, cfg,
+ &resources);
if (!bus) {
dev_err(dev, "Scanning rootbus failed");
return -ENODEV;
diff --git a/drivers/pci/host/pci-host-common.h b/drivers/pci/host/pci-host-common.h
deleted file mode 100644
index 09f3fa0a5..000000000
--- a/drivers/pci/host/pci-host-common.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * Copyright (C) 2014 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#ifndef _PCI_HOST_COMMON_H
-#define _PCI_HOST_COMMON_H
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-struct gen_pci_cfg_bus_ops {
- u32 bus_shift;
- struct pci_ops ops;
-};
-
-struct gen_pci_cfg_windows {
- struct resource res;
- struct resource *bus_range;
- void __iomem **win;
-
- struct gen_pci_cfg_bus_ops *ops;
-};
-
-struct gen_pci {
- struct pci_host_bridge host;
- struct gen_pci_cfg_windows cfg;
- struct list_head resources;
-};
-
-int pci_host_common_probe(struct platform_device *pdev,
- struct gen_pci *pci);
-
-#endif /* _PCI_HOST_COMMON_H */
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index e8aa78faa..6eaceab1b 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -25,41 +25,12 @@
#include <linux/of_pci.h>
#include <linux/platform_device.h>
-#include "pci-host-common.h"
+#include "../ecam.h"
-static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
- unsigned int devfn,
- int where)
-{
- struct gen_pci *pci = bus->sysdata;
- resource_size_t idx = bus->number - pci->cfg.bus_range->start;
-
- return pci->cfg.win[idx] + ((devfn << 8) | where);
-}
-
-static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
+static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = {
.bus_shift = 16,
- .ops = {
- .map_bus = gen_pci_map_cfg_bus_cam,
- .read = pci_generic_config_read,
- .write = pci_generic_config_write,
- }
-};
-
-static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
- unsigned int devfn,
- int where)
-{
- struct gen_pci *pci = bus->sysdata;
- resource_size_t idx = bus->number - pci->cfg.bus_range->start;
-
- return pci->cfg.win[idx] + ((devfn << 12) | where);
-}
-
-static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
- .bus_shift = 20,
- .ops = {
- .map_bus = gen_pci_map_cfg_bus_ecam,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
}
@@ -70,25 +41,22 @@ static const struct of_device_id gen_pci_of_match[] = {
.data = &gen_pci_cfg_cam_bus_ops },
{ .compatible = "pci-host-ecam-generic",
- .data = &gen_pci_cfg_ecam_bus_ops },
+ .data = &pci_generic_ecam_ops },
{ },
};
+
MODULE_DEVICE_TABLE(of, gen_pci_of_match);
static int gen_pci_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
- struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
-
- if (!pci)
- return -ENOMEM;
+ struct pci_ecam_ops *ops;
- of_id = of_match_node(gen_pci_of_match, dev->of_node);
- pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
+ of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node);
+ ops = (struct pci_ecam_ops *)of_id->data;
- return pci_host_common_probe(pdev, pci);
+ return pci_host_common_probe(pdev, ops);
}
static struct platform_driver gen_pci_driver = {
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
index ed651baa7..7e9b2de2a 100644
--- a/drivers/pci/host/pci-hyperv.c
+++ b/drivers/pci/host/pci-hyperv.c
@@ -553,6 +553,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
/* Choose the function to be read. (See comment above) */
writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+ /* Make sure the function was chosen before we start reading. */
+ mb();
/* Read from that function's config space. */
switch (size) {
case 1:
@@ -565,6 +567,11 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
*val = readl(addr);
break;
}
+ /*
+ * Make sure the write was done before we release the spinlock
+ * allowing consecutive reads/writes.
+ */
+ mb();
spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
} else {
dev_err(&hpdev->hbus->hdev->device,
@@ -592,6 +599,8 @@ static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
/* Choose the function to be written. (See comment above) */
writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
+ /* Make sure the function was chosen before we start writing. */
+ wmb();
/* Write to that function's config space. */
switch (size) {
case 1:
@@ -604,6 +613,11 @@ static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
writel(val, addr);
break;
}
+ /*
+ * Make sure the write was done before we release the spinlock
+ * allowing consecutive reads/writes.
+ */
+ mb();
spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
} else {
dev_err(&hpdev->hbus->hdev->device,
@@ -1795,14 +1809,14 @@ static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
if (hbus->low_mmio_space && hbus->low_mmio_res) {
hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
- release_mem_region(hbus->low_mmio_res->start,
- resource_size(hbus->low_mmio_res));
+ vmbus_free_mmio(hbus->low_mmio_res->start,
+ resource_size(hbus->low_mmio_res));
}
if (hbus->high_mmio_space && hbus->high_mmio_res) {
hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
- release_mem_region(hbus->high_mmio_res->start,
- resource_size(hbus->high_mmio_res));
+ vmbus_free_mmio(hbus->high_mmio_res->start,
+ resource_size(hbus->high_mmio_res));
}
}
@@ -1880,8 +1894,8 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
release_low_mmio:
if (hbus->low_mmio_res) {
- release_mem_region(hbus->low_mmio_res->start,
- resource_size(hbus->low_mmio_res));
+ vmbus_free_mmio(hbus->low_mmio_res->start,
+ resource_size(hbus->low_mmio_res));
}
return ret;
@@ -1924,7 +1938,7 @@ static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
static void hv_free_config_window(struct hv_pcibus_device *hbus)
{
- release_mem_region(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
+ vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
}
/**
@@ -2268,11 +2282,6 @@ static int hv_pci_remove(struct hv_device *hdev)
hbus = hv_get_drvdata(hdev);
- ret = hv_send_resources_released(hdev);
- if (ret)
- dev_err(&hdev->device,
- "Couldn't send resources released packet(s)\n");
-
memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
init_completion(&comp_pkt.host_event);
pkt.teardown_packet.completion_func = hv_pci_generic_compl;
@@ -2295,6 +2304,11 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_unlock_rescan_remove();
}
+ ret = hv_send_resources_released(hdev);
+ if (ret)
+ dev_err(&hdev->device,
+ "Couldn't send resources released packet(s)\n");
+
vmbus_close(hdev->channel);
/* Delete any children which might still exist. */
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 2f817fa4c..b741a36a6 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -19,6 +19,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
+#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -31,19 +32,29 @@
#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+enum imx6_pcie_variants {
+ IMX6Q,
+ IMX6SX,
+ IMX6QP,
+};
+
struct imx6_pcie {
int reset_gpio;
+ bool gpio_active_high;
struct clk *pcie_bus;
struct clk *pcie_phy;
+ struct clk *pcie_inbound_axi;
struct clk *pcie;
struct pcie_port pp;
struct regmap *iomuxc_gpr;
+ enum imx6_pcie_variants variant;
void __iomem *mem_base;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
u32 tx_deemph_gen2_6db;
u32 tx_swing_full;
u32 tx_swing_low;
+ int link_gen;
};
/* PCIe Root Complex registers (memory-mapped) */
@@ -236,37 +247,93 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
u32 val, gpr1, gpr12;
- /*
- * If the bootloader already enabled the link we need some special
- * handling to get the core back into a state where it is safe to
- * touch it for configuration. As there is no dedicated reset signal
- * wired up for MX6QDL, we need to manually force LTSSM into "detect"
- * state before completely disabling LTSSM, which is a prerequisite
- * for core configuration.
- *
- * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
- * indication that the bootloader activated the link.
- */
- regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
- regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
+ /* Force PCIe PHY reset */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST,
+ IMX6Q_GPR1_PCIE_SW_RST);
+ break;
+ case IMX6Q:
+ /*
+ * If the bootloader already enabled the link we need some
+ * special handling to get the core back into a state where
+ * it is safe to touch it for configuration. As there is
+ * no dedicated reset signal wired up for MX6QDL, we need
+ * to manually force LTSSM into "detect" state before
+ * completely disabling LTSSM, which is a prerequisite for
+ * core configuration.
+ *
+ * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we
+ * have a strong indication that the bootloader activated
+ * the link.
+ */
+ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
+ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+
+ if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
+ (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
+ val = readl(pp->dbi_base + PCIE_PL_PFLR);
+ val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
+ val |= PCIE_PL_PFLR_FORCE_LINK;
+ writel(val, pp->dbi_base + PCIE_PL_PFLR);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+ break;
+ }
+
+ return 0;
+}
+
+static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
+{
+ struct pcie_port *pp = &imx6_pcie->pp;
+ int ret = 0;
- if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
- (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
- val = readl(pp->dbi_base + PCIE_PL_PFLR);
- val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
- val |= PCIE_PL_PFLR_FORCE_LINK;
- writel(val, pp->dbi_base + PCIE_PL_PFLR);
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_axi clock\n");
+ break;
+ }
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+ IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
+ break;
+ case IMX6QP: /* FALLTHROUGH */
+ case IMX6Q:
+ /* power up core phy and enable ref clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ /*
+ * the async reset input need ref clock to sync internally,
+ * when the ref clock comes after reset, internal synced
+ * reset time is too short, cannot meet the requirement.
+ * add one ~10us delay here.
+ */
+ udelay(10);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+ break;
}
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
-
- return 0;
+ return ret;
}
static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
@@ -292,43 +359,60 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
goto err_pcie;
}
- /* power up core phy and enable ref clock */
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
- /*
- * the async reset input need ref clock to sync internally,
- * when the ref clock comes after reset, internal synced
- * reset time is too short, cannot meet the requirement.
- * add one ~10us delay here.
- */
- udelay(10);
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
- IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+ ret = imx6_pcie_enable_ref_clk(imx6_pcie);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie ref clock\n");
+ goto err_ref_clk;
+ }
/* allow the clocks to stabilize */
usleep_range(200, 500);
/* Some boards don't have PCIe reset GPIO. */
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
- gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ imx6_pcie->gpio_active_high);
msleep(100);
- gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1);
+ gpio_set_value_cansleep(imx6_pcie->reset_gpio,
+ !imx6_pcie->gpio_active_high);
}
+
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
+ IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
+ break;
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_SW_RST, 0);
+
+ usleep_range(200, 500);
+ break;
+ case IMX6Q: /* Nothing to do */
+ break;
+ }
+
return 0;
+err_ref_clk:
+ clk_disable_unprepare(imx6_pcie->pcie);
err_pcie:
clk_disable_unprepare(imx6_pcie->pcie_bus);
err_pcie_bus:
clk_disable_unprepare(imx6_pcie->pcie_phy);
err_pcie_phy:
return ret;
-
}
static void imx6_pcie_init_phy(struct pcie_port *pp)
{
struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ if (imx6_pcie->variant == IMX6SX)
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6SX_GPR12_PCIE_RX_EQ_MASK,
+ IMX6SX_GPR12_PCIE_RX_EQ_2);
+
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
@@ -417,11 +501,15 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
goto err_reset_phy;
}
- /* Allow Gen2 mode after the link is up. */
- tmp = readl(pp->dbi_base + PCIE_RC_LCR);
- tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
- tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
- writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+ if (imx6_pcie->link_gen == 2) {
+ /* Allow Gen2 mode after the link is up. */
+ tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+ tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+ writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+ } else {
+ dev_info(pp->dev, "Link: Gen2 disabled\n");
+ }
/*
* Start Directed Speed Change so the best possible speed both link
@@ -445,8 +533,7 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
}
tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
- dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
-
+ dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
return 0;
err_reset_phy:
@@ -535,6 +622,9 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
pp = &imx6_pcie->pp;
pp->dev = &pdev->dev;
+ imx6_pcie->variant =
+ (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev);
+
/* Added for PCI abort handling */
hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
"imprecise external abort");
@@ -546,9 +636,14 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
/* Fetch GPIOs */
imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ imx6_pcie->gpio_active_high = of_property_read_bool(np,
+ "reset-gpio-active-high");
if (gpio_is_valid(imx6_pcie->reset_gpio)) {
ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
- GPIOF_OUT_INIT_LOW, "PCIe reset");
+ imx6_pcie->gpio_active_high ?
+ GPIOF_OUT_INIT_HIGH :
+ GPIOF_OUT_INIT_LOW,
+ "PCIe reset");
if (ret) {
dev_err(&pdev->dev, "unable to get reset gpio\n");
return ret;
@@ -577,6 +672,16 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->pcie);
}
+ if (imx6_pcie->variant == IMX6SX) {
+ imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev,
+ "pcie_inbound_axi");
+ if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
+ dev_err(&pdev->dev,
+ "pcie_incbound_axi clock missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_inbound_axi);
+ }
+ }
+
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
@@ -606,6 +711,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev)
&imx6_pcie->tx_swing_low))
imx6_pcie->tx_swing_low = 127;
+ /* Limit link speed */
+ ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed",
+ &imx6_pcie->link_gen);
+ if (ret)
+ imx6_pcie->link_gen = 1;
+
ret = imx6_add_pcie_port(pp, pdev);
if (ret < 0)
return ret;
@@ -623,7 +734,9 @@ static void imx6_pcie_shutdown(struct platform_device *pdev)
}
static const struct of_device_id imx6_pcie_of_match[] = {
- { .compatible = "fsl,imx6q-pcie", },
+ { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
+ { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
+ { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
{},
};
MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index 6153853ca..41515092e 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqreturn.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_pci.h>
@@ -53,6 +54,21 @@
#define IRQ_STATUS 0x184
#define MSI_IRQ_OFFSET 4
+/* Error IRQ bits */
+#define ERR_AER BIT(5) /* ECRC error */
+#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
+#define ERR_CORR BIT(3) /* Correctable error */
+#define ERR_NONFATAL BIT(2) /* Non-fatal error */
+#define ERR_FATAL BIT(1) /* Fatal error */
+#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */
+#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
+ ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI)
+#define ERR_IRQ_STATUS_RAW 0x1c0
+#define ERR_IRQ_STATUS 0x1c4
+#define ERR_IRQ_ENABLE_SET 0x1c8
+#define ERR_IRQ_ENABLE_CLR 0x1cc
+
/* Config space registers */
#define DEBUG0 0x728
@@ -243,6 +259,28 @@ void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
writel(offset, ks_pcie->va_app_base + IRQ_EOI);
}
+void ks_dw_pcie_enable_error_irq(void __iomem *reg_base)
+{
+ writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET);
+}
+
+irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
+ void __iomem *reg_base)
+{
+ u32 status;
+
+ status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & ERR_FATAL_IRQ)
+ dev_err(dev, "fatal error (status %#010x)\n", status);
+
+ /* Ack the IRQ; status bits are RW1C */
+ writel(status, reg_base + ERR_IRQ_STATUS);
+ return IRQ_HANDLED;
+}
+
static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
{
}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index b71f55bb0..6b8301ef2 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -15,6 +15,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -159,7 +160,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
char *controller, int *num_irqs)
{
- int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
+ int temp, max_host_irqs, legacy = 1, *host_irqs;
struct device *dev = ks_pcie->pp.dev;
struct device_node *np_pcie = dev->of_node, **np_temp;
@@ -180,11 +181,15 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
*np_temp = of_find_node_by_name(np_pcie, controller);
if (!(*np_temp)) {
dev_err(dev, "Node for %s is absent\n", controller);
- goto out;
+ return -EINVAL;
}
+
temp = of_irq_count(*np_temp);
- if (!temp)
- goto out;
+ if (!temp) {
+ dev_err(dev, "No IRQ entries in %s\n", controller);
+ return -EINVAL;
+ }
+
if (temp > max_host_irqs)
dev_warn(dev, "Too many %s interrupts defined %u\n",
(legacy ? "legacy" : "MSI"), temp);
@@ -198,12 +203,13 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
if (!host_irqs[temp])
break;
}
+
if (temp) {
*num_irqs = temp;
- ret = 0;
+ return 0;
}
-out:
- return ret;
+
+ return -EINVAL;
}
static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
@@ -226,6 +232,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
ks_pcie);
}
}
+
+ if (ks_pcie->error_irq > 0)
+ ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base);
}
/*
@@ -289,6 +298,14 @@ static struct pcie_host_ops keystone_pcie_host_ops = {
.scan_bus = ks_dw_pcie_v3_65_scan_bus,
};
+static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
+{
+ struct keystone_pcie *ks_pcie = priv;
+
+ return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev,
+ ks_pcie->va_app_base);
+}
+
static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
struct platform_device *pdev)
{
@@ -309,6 +326,22 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
return ret;
}
+ /*
+ * Index 0 is the platform interrupt for error interrupt
+ * from RC. This is optional.
+ */
+ ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
+ if (ks_pcie->error_irq <= 0)
+ dev_info(&pdev->dev, "no error IRQ defined\n");
+ else {
+ if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
+ IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) {
+ dev_err(&pdev->dev, "failed to request error IRQ %d\n",
+ ks_pcie->error_irq);
+ return ret;
+ }
+ }
+
pp->root_bus_nr = -1;
pp->ops = &keystone_pcie_host_ops;
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
@@ -317,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
return ret;
}
- return ret;
+ return 0;
}
static const struct of_device_id ks_pcie_of_match[] = {
@@ -346,7 +379,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *reg_p;
struct phy *phy;
- int ret = 0;
+ int ret;
ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
GFP_KERNEL);
@@ -376,6 +409,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
devm_release_mem_region(dev, res->start, resource_size(res));
pp->dev = dev;
+ ks_pcie->np = dev->of_node;
platform_set_drvdata(pdev, ks_pcie);
ks_pcie->clk = devm_clk_get(dev, "pcie");
if (IS_ERR(ks_pcie->clk)) {
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
index f0944e8c4..a5b0cb2ba 100644
--- a/drivers/pci/host/pci-keystone.h
+++ b/drivers/pci/host/pci-keystone.h
@@ -29,6 +29,9 @@ struct keystone_pcie {
int msi_host_irqs[MAX_MSI_HOST_IRQS];
struct device_node *msi_intc_np;
struct irq_domain *legacy_irq_domain;
+ struct device_node *np;
+
+ int error_irq;
/* Application register space */
void __iomem *va_app_base;
@@ -42,6 +45,9 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
/* Keystone specific PCI controller APIs */
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
+void ks_dw_pcie_enable_error_irq(void __iomem *reg_base);
+irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev,
+ void __iomem *reg_base);
int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
struct device_node *msi_intc_np);
int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 53b79c5f0..6b451df65 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1003,6 +1003,7 @@ static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
pcie->msi->dev = &pcie->pdev->dev;
}
+#ifdef CONFIG_PM_SLEEP
static int mvebu_pcie_suspend(struct device *dev)
{
struct mvebu_pcie *pcie;
@@ -1031,6 +1032,7 @@ static int mvebu_pcie_resume(struct device *dev)
return 0;
}
+#endif
static void mvebu_pcie_port_clk_put(void *data)
{
@@ -1298,9 +1300,8 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
-static struct dev_pm_ops mvebu_pcie_pm_ops = {
- .suspend_noirq = mvebu_pcie_suspend,
- .resume_noirq = mvebu_pcie_resume,
+static const struct dev_pm_ops mvebu_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
};
static struct platform_driver mvebu_pcie_driver = {
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 68d1f41b3..c388468c2 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -295,6 +295,7 @@ struct tegra_pcie {
struct reset_control *afi_rst;
struct reset_control *pcie_xrst;
+ bool legacy_phy;
struct phy *phy;
struct tegra_msi msi;
@@ -311,11 +312,14 @@ struct tegra_pcie {
struct tegra_pcie_port {
struct tegra_pcie *pcie;
+ struct device_node *np;
struct list_head list;
struct resource regs;
void __iomem *base;
unsigned int index;
unsigned int lanes;
+
+ struct phy **phys;
};
struct tegra_pcie_bus {
@@ -860,6 +864,128 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
return 0;
}
+static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ u32 value;
+
+ /* disable TX/RX data */
+ value = pads_readl(pcie, PADS_CTL);
+ value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
+ pads_writel(pcie, value, PADS_CTL);
+
+ /* override IDDQ */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, PADS_CTL, value);
+
+ /* reset PLL */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ usleep_range(20, 100);
+
+ return 0;
+}
+
+static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
+{
+ struct device *dev = port->pcie->dev;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < port->lanes; i++) {
+ err = phy_power_on(port->phys[i]);
+ if (err < 0) {
+ dev_err(dev, "failed to power on PHY#%u: %d\n", i,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
+{
+ struct device *dev = port->pcie->dev;
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < port->lanes; i++) {
+ err = phy_power_off(port->phys[i]);
+ if (err < 0) {
+ dev_err(dev, "failed to power off PHY#%u: %d\n", i,
+ err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
+{
+ struct tegra_pcie_port *port;
+ int err;
+
+ if (pcie->legacy_phy) {
+ if (pcie->phy)
+ err = phy_power_on(pcie->phy);
+ else
+ err = tegra_pcie_phy_enable(pcie);
+
+ if (err < 0)
+ dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+
+ return err;
+ }
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ err = tegra_pcie_port_phy_power_on(port);
+ if (err < 0) {
+ dev_err(pcie->dev,
+ "failed to power on PCIe port %u PHY: %d\n",
+ port->index, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
+{
+ struct tegra_pcie_port *port;
+ int err;
+
+ if (pcie->legacy_phy) {
+ if (pcie->phy)
+ err = phy_power_off(pcie->phy);
+ else
+ err = tegra_pcie_phy_disable(pcie);
+
+ if (err < 0)
+ dev_err(pcie->dev, "failed to power off PHY: %d\n",
+ err);
+
+ return err;
+ }
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ err = tegra_pcie_port_phy_power_off(port);
+ if (err < 0) {
+ dev_err(pcie->dev,
+ "failed to power off PCIe port %u PHY: %d\n",
+ port->index, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
{
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
@@ -899,13 +1025,9 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
afi_writel(pcie, value, AFI_FUSE);
}
- if (!pcie->phy)
- err = tegra_pcie_phy_enable(pcie);
- else
- err = phy_power_on(pcie->phy);
-
+ err = tegra_pcie_phy_power_on(pcie);
if (err < 0) {
- dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+ dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err);
return err;
}
@@ -942,9 +1064,9 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
/* TODO: disable and unprepare clocks? */
- err = phy_power_off(pcie->phy);
+ err = tegra_pcie_phy_power_off(pcie);
if (err < 0)
- dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
+ dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err);
reset_control_assert(pcie->pcie_xrst);
reset_control_assert(pcie->afi_rst);
@@ -1049,6 +1171,100 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
return 0;
}
+static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
+{
+ int err;
+
+ pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ err = PTR_ERR(pcie->phy);
+ dev_err(pcie->dev, "failed to get PHY: %d\n", err);
+ return err;
+ }
+
+ err = phy_init(pcie->phy);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to initialize PHY: %d\n", err);
+ return err;
+ }
+
+ pcie->legacy_phy = true;
+
+ return 0;
+}
+
+static struct phy *devm_of_phy_optional_get_index(struct device *dev,
+ struct device_node *np,
+ const char *consumer,
+ unsigned int index)
+{
+ struct phy *phy;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+
+ phy = devm_of_phy_get(dev, np, name);
+ kfree(name);
+
+ if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+
+static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
+{
+ struct device *dev = port->pcie->dev;
+ struct phy *phy;
+ unsigned int i;
+ int err;
+
+ port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+ if (!port->phys)
+ return -ENOMEM;
+
+ for (i = 0; i < port->lanes; i++) {
+ phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to get PHY#%u: %ld\n", i,
+ PTR_ERR(phy));
+ return PTR_ERR(phy);
+ }
+
+ err = phy_init(phy);
+ if (err < 0) {
+ dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
+ err);
+ return err;
+ }
+
+ port->phys[i] = phy;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device_node *np = pcie->dev->of_node;
+ struct tegra_pcie_port *port;
+ int err;
+
+ if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
+ return tegra_pcie_phys_get_legacy(pcie);
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ err = tegra_pcie_port_get_phys(port);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
@@ -1067,16 +1283,9 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
return err;
}
- pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
- if (IS_ERR(pcie->phy)) {
- err = PTR_ERR(pcie->phy);
- dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
- return err;
- }
-
- err = phy_init(pcie->phy);
+ err = tegra_pcie_phys_get(pcie);
if (err < 0) {
- dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
+ dev_err(&pdev->dev, "failed to get PHYs: %d\n", err);
return err;
}
@@ -1752,6 +1961,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
rp->index = index;
rp->lanes = value;
rp->pcie = pcie;
+ rp->np = port;
rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
if (IS_ERR(rp->base))
diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c
index d71935cb2..540d03061 100644
--- a/drivers/pci/host/pci-thunder-ecam.c
+++ b/drivers/pci/host/pci-thunder-ecam.c
@@ -13,18 +13,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
-#include "pci-host-common.h"
-
-/* Mapping is standard ECAM */
-static void __iomem *thunder_ecam_map_bus(struct pci_bus *bus,
- unsigned int devfn,
- int where)
-{
- struct gen_pci *pci = bus->sysdata;
- resource_size_t idx = bus->number - pci->cfg.bus_range->start;
-
- return pci->cfg.win[idx] + ((devfn << 12) | where);
-}
+#include "../ecam.h"
static void set_val(u32 v, int where, int size, u32 *val)
{
@@ -99,7 +88,7 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct gen_pci *pci = bus->sysdata;
+ struct pci_config_window *cfg = bus->sysdata;
int where_a = where & ~3;
void __iomem *addr;
u32 node_bits;
@@ -129,7 +118,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
* the config space access window. Since we are working with
* the high-order 32 bits, shift everything down by 32 bits.
*/
- node_bits = (pci->cfg.res.start >> 32) & (1 << 12);
+ node_bits = (cfg->res.start >> 32) & (1 << 12);
v |= node_bits;
set_val(v, where, size, val);
@@ -358,36 +347,24 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn,
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static struct gen_pci_cfg_bus_ops thunder_ecam_bus_ops = {
+static struct pci_ecam_ops pci_thunder_ecam_ops = {
.bus_shift = 20,
- .ops = {
- .map_bus = thunder_ecam_map_bus,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
.read = thunder_ecam_config_read,
.write = thunder_ecam_config_write,
}
};
static const struct of_device_id thunder_ecam_of_match[] = {
- { .compatible = "cavium,pci-host-thunder-ecam",
- .data = &thunder_ecam_bus_ops },
-
+ { .compatible = "cavium,pci-host-thunder-ecam" },
{ },
};
MODULE_DEVICE_TABLE(of, thunder_ecam_of_match);
static int thunder_ecam_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
- struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
-
- if (!pci)
- return -ENOMEM;
-
- of_id = of_match_node(thunder_ecam_of_match, dev->of_node);
- pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
-
- return pci_host_common_probe(pdev, pci);
+ return pci_host_common_probe(pdev, &pci_thunder_ecam_ops);
}
static struct platform_driver thunder_ecam_driver = {
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c
index cabb92a51..9b8ab94f3 100644
--- a/drivers/pci/host/pci-thunder-pem.c
+++ b/drivers/pci/host/pci-thunder-pem.c
@@ -20,34 +20,22 @@
#include <linux/of_pci.h>
#include <linux/platform_device.h>
-#include "pci-host-common.h"
+#include "../ecam.h"
#define PEM_CFG_WR 0x28
#define PEM_CFG_RD 0x30
struct thunder_pem_pci {
- struct gen_pci gen_pci;
u32 ea_entry[3];
void __iomem *pem_reg_base;
};
-static void __iomem *thunder_pem_map_bus(struct pci_bus *bus,
- unsigned int devfn, int where)
-{
- struct gen_pci *pci = bus->sysdata;
- resource_size_t idx = bus->number - pci->cfg.bus_range->start;
-
- return pci->cfg.win[idx] + ((devfn << 16) | where);
-}
-
static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u64 read_val;
- struct thunder_pem_pci *pem_pci;
- struct gen_pci *pci = bus->sysdata;
-
- pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci);
+ struct pci_config_window *cfg = bus->sysdata;
+ struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
if (devfn != 0 || where >= 2048) {
*val = ~0;
@@ -132,17 +120,17 @@ static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct gen_pci *pci = bus->sysdata;
+ struct pci_config_window *cfg = bus->sysdata;
- if (bus->number < pci->cfg.bus_range->start ||
- bus->number > pci->cfg.bus_range->end)
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* The first device on the bus is the PEM PCIe bridge.
* Special case its config access.
*/
- if (bus->number == pci->cfg.bus_range->start)
+ if (bus->number == cfg->busr.start)
return thunder_pem_bridge_read(bus, devfn, where, size, val);
return pci_generic_config_read(bus, devfn, where, size, val);
@@ -153,11 +141,11 @@ static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn,
* reserved bits, this makes the code simpler and is OK as the bits
* are not affected by writing zeros to them.
*/
-static u32 thunder_pem_bridge_w1c_bits(int where)
+static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned)
{
u32 w1c_bits = 0;
- switch (where & ~3) {
+ switch (where_aligned) {
case 0x04: /* Command/Status */
case 0x1c: /* Base and I/O Limit/Secondary Status */
w1c_bits = 0xff000000;
@@ -184,15 +172,36 @@ static u32 thunder_pem_bridge_w1c_bits(int where)
return w1c_bits;
}
+/* Some bits must be written to one so they appear to be read-only. */
+static u32 thunder_pem_bridge_w1_bits(u64 where_aligned)
+{
+ u32 w1_bits;
+
+ switch (where_aligned) {
+ case 0x1c: /* I/O Base / I/O Limit, Secondary Status */
+ /* Force 32-bit I/O addressing. */
+ w1_bits = 0x0101;
+ break;
+ case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */
+ /* Force 64-bit addressing */
+ w1_bits = 0x00010001;
+ break;
+ default:
+ w1_bits = 0;
+ break;
+ }
+ return w1_bits;
+}
+
static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- struct gen_pci *pci = bus->sysdata;
- struct thunder_pem_pci *pem_pci;
+ struct pci_config_window *cfg = bus->sysdata;
+ struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
u64 write_val, read_val;
+ u64 where_aligned = where & ~3ull;
u32 mask = 0;
- pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci);
if (devfn != 0 || where >= 2048)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -205,8 +214,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
*/
switch (size) {
case 1:
- read_val = where & ~3ull;
- writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
read_val >>= 32;
mask = ~(0xff << (8 * (where & 3)));
@@ -215,8 +223,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
val |= (u32)read_val;
break;
case 2:
- read_val = where & ~3ull;
- writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD);
+ writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD);
read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD);
read_val >>= 32;
mask = ~(0xffff << (8 * (where & 3)));
@@ -244,11 +251,17 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
}
/*
+ * Some bits must be read-only with value of one. Since the
+ * access method allows these to be cleared if a zero is
+ * written, force them to one before writing.
+ */
+ val |= thunder_pem_bridge_w1_bits(where_aligned);
+
+ /*
* Low order bits are the config address, the high order 32
* bits are the data to be written.
*/
- write_val = where & ~3ull;
- write_val |= (((u64)val) << 32);
+ write_val = (((u64)val) << 32) | where_aligned;
writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR);
return PCIBIOS_SUCCESSFUL;
}
@@ -256,53 +269,38 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn,
static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- struct gen_pci *pci = bus->sysdata;
+ struct pci_config_window *cfg = bus->sysdata;
- if (bus->number < pci->cfg.bus_range->start ||
- bus->number > pci->cfg.bus_range->end)
+ if (bus->number < cfg->busr.start ||
+ bus->number > cfg->busr.end)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* The first device on the bus is the PEM PCIe bridge.
* Special case its config access.
*/
- if (bus->number == pci->cfg.bus_range->start)
+ if (bus->number == cfg->busr.start)
return thunder_pem_bridge_write(bus, devfn, where, size, val);
return pci_generic_config_write(bus, devfn, where, size, val);
}
-static struct gen_pci_cfg_bus_ops thunder_pem_bus_ops = {
- .bus_shift = 24,
- .ops = {
- .map_bus = thunder_pem_map_bus,
- .read = thunder_pem_config_read,
- .write = thunder_pem_config_write,
- }
-};
-
-static const struct of_device_id thunder_pem_of_match[] = {
- { .compatible = "cavium,pci-host-thunder-pem",
- .data = &thunder_pem_bus_ops },
-
- { },
-};
-MODULE_DEVICE_TABLE(of, thunder_pem_of_match);
-
-static int thunder_pem_probe(struct platform_device *pdev)
+static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg)
{
- struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
resource_size_t bar4_start;
struct resource *res_pem;
struct thunder_pem_pci *pem_pci;
+ struct platform_device *pdev;
+
+ /* Only OF support for now */
+ if (!dev->of_node)
+ return -EINVAL;
pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL);
if (!pem_pci)
return -ENOMEM;
- of_id = of_match_node(thunder_pem_of_match, dev->of_node);
- pem_pci->gen_pci.cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data;
+ pdev = to_platform_device(dev);
/*
* The second register range is the PEM bridge to the PCIe
@@ -330,7 +328,29 @@ static int thunder_pem_probe(struct platform_device *pdev)
pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
- return pci_host_common_probe(pdev, &pem_pci->gen_pci);
+ cfg->priv = pem_pci;
+ return 0;
+}
+
+static struct pci_ecam_ops pci_thunder_pem_ops = {
+ .bus_shift = 24,
+ .init = thunder_pem_init,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = thunder_pem_config_read,
+ .write = thunder_pem_config_write,
+ }
+};
+
+static const struct of_device_id thunder_pem_of_match[] = {
+ { .compatible = "cavium,pci-host-thunder-pem" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, thunder_pem_of_match);
+
+static int thunder_pem_probe(struct platform_device *pdev)
+{
+ return pci_host_common_probe(pdev, &pci_thunder_pem_ops);
}
static struct platform_driver thunder_pem_driver = {
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c
new file mode 100644
index 000000000..55723567b
--- /dev/null
+++ b/drivers/pci/host/pcie-armada8k.c
@@ -0,0 +1,262 @@
+/*
+ * PCIe host controller driver for Marvell Armada-8K SoCs
+ *
+ * Armada-8K PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2016 Marvell Technology Group Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+
+#include "pcie-designware.h"
+
+struct armada8k_pcie {
+ void __iomem *base;
+ struct clk *clk;
+ struct pcie_port pp;
+};
+
+#define PCIE_VENDOR_REGS_OFFSET 0x8000
+
+#define PCIE_GLOBAL_CONTROL_REG 0x0
+#define PCIE_APP_LTSSM_EN BIT(2)
+#define PCIE_DEVICE_TYPE_SHIFT 4
+#define PCIE_DEVICE_TYPE_MASK 0xF
+#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */
+
+#define PCIE_GLOBAL_STATUS_REG 0x8
+#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1)
+#define PCIE_GLB_STS_PHY_LINK_UP BIT(9)
+
+#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C
+#define PCIE_GLOBAL_INT_MASK1_REG 0x20
+#define PCIE_INT_A_ASSERT_MASK BIT(9)
+#define PCIE_INT_B_ASSERT_MASK BIT(10)
+#define PCIE_INT_C_ASSERT_MASK BIT(11)
+#define PCIE_INT_D_ASSERT_MASK BIT(12)
+
+#define PCIE_ARCACHE_TRC_REG 0x50
+#define PCIE_AWCACHE_TRC_REG 0x54
+#define PCIE_ARUSER_REG 0x5C
+#define PCIE_AWUSER_REG 0x60
+/*
+ * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write
+ * allocate
+ */
+#define ARCACHE_DEFAULT_VALUE 0x3511
+#define AWCACHE_DEFAULT_VALUE 0x5311
+
+#define DOMAIN_OUTER_SHAREABLE 0x2
+#define AX_USER_DOMAIN_MASK 0x3
+#define AX_USER_DOMAIN_SHIFT 4
+
+#define to_armada8k_pcie(x) container_of(x, struct armada8k_pcie, pp)
+
+static int armada8k_pcie_link_up(struct pcie_port *pp)
+{
+ struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+ u32 reg;
+ u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP;
+
+ reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG);
+
+ if ((reg & mask) == mask)
+ return 1;
+
+ dev_dbg(pp->dev, "No link detected (Global-Status: 0x%08x).\n", reg);
+ return 0;
+}
+
+static void armada8k_pcie_establish_link(struct pcie_port *pp)
+{
+ struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+ void __iomem *base = pcie->base;
+ u32 reg;
+
+ if (!dw_pcie_link_up(pp)) {
+ /* Disable LTSSM state machine to enable configuration */
+ reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg &= ~(PCIE_APP_LTSSM_EN);
+ writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ }
+
+ /* Set the device to root complex mode */
+ reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT);
+ reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT;
+ writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+
+ /* Set the PCIe master AxCache attributes */
+ writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG);
+ writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG);
+
+ /* Set the PCIe master AxDomain attributes */
+ reg = readl(base + PCIE_ARUSER_REG);
+ reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+ reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+ writel(reg, base + PCIE_ARUSER_REG);
+
+ reg = readl(base + PCIE_AWUSER_REG);
+ reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT);
+ reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT;
+ writel(reg, base + PCIE_AWUSER_REG);
+
+ /* Enable INT A-D interrupts */
+ reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG);
+ reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK |
+ PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK;
+ writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG);
+
+ if (!dw_pcie_link_up(pp)) {
+ /* Configuration done. Start LTSSM */
+ reg = readl(base + PCIE_GLOBAL_CONTROL_REG);
+ reg |= PCIE_APP_LTSSM_EN;
+ writel(reg, base + PCIE_GLOBAL_CONTROL_REG);
+ }
+
+ /* Wait until the link becomes active again */
+ if (dw_pcie_wait_for_link(pp))
+ dev_err(pp->dev, "Link not up after reconfiguration\n");
+}
+
+static void armada8k_pcie_host_init(struct pcie_port *pp)
+{
+ dw_pcie_setup_rc(pp);
+ armada8k_pcie_establish_link(pp);
+}
+
+static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+ struct armada8k_pcie *pcie = to_armada8k_pcie(pp);
+ void __iomem *base = pcie->base;
+ u32 val;
+
+ /*
+ * Interrupts are directly handled by the device driver of the
+ * PCI device. However, they are also latched into the PCIe
+ * controller, so we simply discard them.
+ */
+ val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG);
+ writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG);
+
+ return IRQ_HANDLED;
+}
+
+static struct pcie_host_ops armada8k_pcie_host_ops = {
+ .link_up = armada8k_pcie_link_up,
+ .host_init = armada8k_pcie_host_init,
+};
+
+static int armada8k_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->root_bus_nr = -1;
+ pp->ops = &armada8k_pcie_host_ops;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (!pp->irq) {
+ dev_err(dev, "failed to get irq for port\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler,
+ IRQF_SHARED, "armada8k-pcie", pp);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int armada8k_pcie_probe(struct platform_device *pdev)
+{
+ struct armada8k_pcie *pcie;
+ struct pcie_port *pp;
+ struct device *dev = &pdev->dev;
+ struct resource *base;
+ int ret;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
+
+ clk_prepare_enable(pcie->clk);
+
+ pp = &pcie->pp;
+ pp->dev = dev;
+ platform_set_drvdata(pdev, pcie);
+
+ /* Get the dw-pcie unit configuration/control registers base. */
+ base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
+ pp->dbi_base = devm_ioremap_resource(dev, base);
+ if (IS_ERR(pp->dbi_base)) {
+ dev_err(dev, "couldn't remap regs base %p\n", base);
+ ret = PTR_ERR(pp->dbi_base);
+ goto fail;
+ }
+
+ pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET;
+
+ ret = armada8k_add_pcie_port(pp, pdev);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ if (!IS_ERR(pcie->clk))
+ clk_disable_unprepare(pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id armada8k_pcie_of_match[] = {
+ { .compatible = "marvell,armada8k-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match);
+
+static struct platform_driver armada8k_pcie_driver = {
+ .probe = armada8k_pcie_probe,
+ .driver = {
+ .name = "armada8k-pcie",
+ .of_match_table = of_match_ptr(armada8k_pcie_of_match),
+ },
+};
+
+module_platform_driver(armada8k_pcie_driver);
+
+MODULE_DESCRIPTION("Armada 8k PCIe host controller driver");
+MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>");
+MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index a4cccd356..aafd76654 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -434,7 +434,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
struct platform_device *pdev = to_platform_device(pp->dev);
struct pci_bus *bus, *child;
struct resource *cfg_res;
- u32 val;
int i, ret;
LIST_HEAD(res);
struct resource_entry *win;
@@ -544,25 +543,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
if (pp->ops->host_init)
pp->ops->host_init(pp);
- /*
- * If the platform provides ->rd_other_conf, it means the platform
- * uses its own address translation component rather than ATU, so
- * we should not program the ATU here.
- */
- if (!pp->ops->rd_other_conf)
- dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
- PCIE_ATU_TYPE_MEM, pp->mem_base,
- pp->mem_bus_addr, pp->mem_size);
-
- dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
-
- /* program correct class for RC */
- dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
-
- dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
- val |= PORT_LOGIC_SPEED_CHANGE;
- dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
-
pp->root_bus_nr = pp->busn->start;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr,
@@ -728,8 +708,6 @@ static struct pci_ops dw_pcie_ops = {
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
- u32 membase;
- u32 memlimit;
/* set the number of lanes */
dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
@@ -788,18 +766,31 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
val |= 0x00010100;
dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
- /* setup memory base, memory limit */
- membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
- memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
- val = memlimit | membase;
- dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
-
/* setup command register */
dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_rc(pp, val, PCI_COMMAND);
+
+ /*
+ * If the platform provides ->rd_other_conf, it means the platform
+ * uses its own address translation component rather than ATU, so
+ * we should not program the ATU here.
+ */
+ if (!pp->ops->rd_other_conf)
+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_TYPE_MEM, pp->mem_base,
+ pp->mem_bus_addr, pp->mem_size);
+
+ dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+ /* program correct class for RC */
+ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+ dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
index 5139e6443..3479d30e2 100644
--- a/drivers/pci/host/pcie-xilinx-nwl.c
+++ b/drivers/pci/host/pcie-xilinx-nwl.c
@@ -819,7 +819,7 @@ static int nwl_pcie_probe(struct platform_device *pdev)
err = nwl_pcie_bridge_init(pcie);
if (err) {
- dev_err(pcie->dev, "HW Initalization failed\n");
+ dev_err(pcie->dev, "HW Initialization failed\n");
return err;
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 2f6d3a1c1..f6221d739 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -138,6 +138,8 @@ static union apci_descriptor *ibm_slot_from_id(int id)
char *table;
size = ibm_get_table_from_acpi(&table);
+ if (size < 0)
+ return NULL;
des = (union apci_descriptor *)table;
if (memcmp(des->header.sig, "aPCI", 4) != 0)
goto ibm_slot_done;
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index b46b57d87..dc67f3977 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -175,7 +175,7 @@ static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn)
struct pci_dev *dev;
struct pci_controller *phb;
- if (pcibios_find_pci_bus(dn))
+ if (pci_find_bus_by_node(dn))
return -EINVAL;
/* Add pci bus */
@@ -212,7 +212,7 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
struct pci_dn *pdn;
int rc = 0;
- if (!pcibios_find_pci_bus(dn))
+ if (!pci_find_bus_by_node(dn))
return -EINVAL;
/* If pci slot is hotpluggable, use hotplug to remove it */
@@ -356,7 +356,7 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
pci_lock_rescan_remove();
- bus = pcibios_find_pci_bus(dn);
+ bus = pci_find_bus_by_node(dn);
if (!bus) {
ret = -EINVAL;
goto out;
@@ -380,7 +380,7 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
}
/* Remove all devices below slot */
- pcibios_remove_pci_devices(bus);
+ pci_hp_remove_devices(bus);
/* Unmap PCI IO space */
if (pcibios_unmap_io_space(bus)) {
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 611f60562..8d132024f 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -404,7 +404,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
if (state == PRESENT) {
pci_lock_rescan_remove();
- pcibios_add_pci_devices(slot->bus);
+ pci_hp_add_devices(slot->bus);
pci_unlock_rescan_remove();
slot->state = CONFIGURED;
} else if (state == EMPTY) {
@@ -426,7 +426,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
return -EINVAL;
pci_lock_rescan_remove();
- pcibios_remove_pci_devices(slot->bus);
+ pci_hp_remove_devices(slot->bus);
pci_unlock_rescan_remove();
vm_unmap_aliases();
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 7836d6913..ea41ea1d3 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -93,7 +93,7 @@ int rpaphp_enable_slot(struct slot *slot)
if (rc)
return rc;
- bus = pcibios_find_pci_bus(slot->dn);
+ bus = pci_find_bus_by_node(slot->dn);
if (!bus) {
err("%s: no pci_bus for dn %s\n", __func__, slot->dn->full_name);
return -EINVAL;
@@ -116,7 +116,7 @@ int rpaphp_enable_slot(struct slot *slot)
}
if (list_empty(&bus->devices))
- pcibios_add_pci_devices(bus);
+ pci_hp_add_devices(bus);
if (!list_empty(&bus->devices)) {
info->adapter_status = CONFIGURED;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 342b6918b..d319a9ca9 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -1008,6 +1008,9 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
if (i >= PCI_ROM_RESOURCE)
return -ENODEV;
+ if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
+ return -EINVAL;
+
if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
@@ -1024,10 +1027,6 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
pci_resource_to_user(pdev, i, res, &start, &end);
vma->vm_pgoff += start >> PAGE_SHIFT;
mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
-
- if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start))
- return -EINVAL;
-
return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 25e0327d4..c8b4dbdd1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2228,7 +2228,7 @@ void pci_pm_init(struct pci_dev *dev)
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
{
- unsigned long flags = IORESOURCE_PCI_FIXED;
+ unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
switch (prop) {
case PCI_EA_P_MEM:
@@ -2389,7 +2389,7 @@ out:
return offset + ent_size;
}
-/* Enhanced Allocation Initalization */
+/* Enhanced Allocation Initialization */
void pci_ea_init(struct pci_dev *dev)
{
int ea;
@@ -2547,7 +2547,7 @@ void pci_request_acs(void)
* pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
* @dev: the PCI device
*/
-static int pci_std_enable_acs(struct pci_dev *dev)
+static void pci_std_enable_acs(struct pci_dev *dev)
{
int pos;
u16 cap;
@@ -2555,7 +2555,7 @@ static int pci_std_enable_acs(struct pci_dev *dev)
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
if (!pos)
- return -ENODEV;
+ return;
pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
@@ -2573,8 +2573,6 @@ static int pci_std_enable_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_UF);
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
-
- return 0;
}
/**
@@ -2586,10 +2584,10 @@ void pci_enable_acs(struct pci_dev *dev)
if (!pci_acs_enable)
return;
- if (!pci_std_enable_acs(dev))
+ if (!pci_dev_specific_enable_acs(dev))
return;
- pci_dev_specific_enable_acs(dev);
+ pci_std_enable_acs(dev);
}
static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
@@ -3021,6 +3019,121 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
}
EXPORT_SYMBOL(pci_request_regions_exclusive);
+#ifdef PCI_IOBASE
+struct io_range {
+ struct list_head list;
+ phys_addr_t start;
+ resource_size_t size;
+};
+
+static LIST_HEAD(io_range_list);
+static DEFINE_SPINLOCK(io_range_lock);
+#endif
+
+/*
+ * Record the PCI IO range (expressed as CPU physical address + size).
+ * Return a negative value if an error has occured, zero otherwise
+ */
+int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
+{
+ int err = 0;
+
+#ifdef PCI_IOBASE
+ struct io_range *range;
+ resource_size_t allocated_size = 0;
+
+ /* check if the range hasn't been previously recorded */
+ spin_lock(&io_range_lock);
+ list_for_each_entry(range, &io_range_list, list) {
+ if (addr >= range->start && addr + size <= range->start + size) {
+ /* range already registered, bail out */
+ goto end_register;
+ }
+ allocated_size += range->size;
+ }
+
+ /* range not registed yet, check for available space */
+ if (allocated_size + size - 1 > IO_SPACE_LIMIT) {
+ /* if it's too big check if 64K space can be reserved */
+ if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) {
+ err = -E2BIG;
+ goto end_register;
+ }
+
+ size = SZ_64K;
+ pr_warn("Requested IO range too big, new size set to 64K\n");
+ }
+
+ /* add the range to the list */
+ range = kzalloc(sizeof(*range), GFP_ATOMIC);
+ if (!range) {
+ err = -ENOMEM;
+ goto end_register;
+ }
+
+ range->start = addr;
+ range->size = size;
+
+ list_add_tail(&range->list, &io_range_list);
+
+end_register:
+ spin_unlock(&io_range_lock);
+#endif
+
+ return err;
+}
+
+phys_addr_t pci_pio_to_address(unsigned long pio)
+{
+ phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
+
+#ifdef PCI_IOBASE
+ struct io_range *range;
+ resource_size_t allocated_size = 0;
+
+ if (pio > IO_SPACE_LIMIT)
+ return address;
+
+ spin_lock(&io_range_lock);
+ list_for_each_entry(range, &io_range_list, list) {
+ if (pio >= allocated_size && pio < allocated_size + range->size) {
+ address = range->start + pio - allocated_size;
+ break;
+ }
+ allocated_size += range->size;
+ }
+ spin_unlock(&io_range_lock);
+#endif
+
+ return address;
+}
+
+unsigned long __weak pci_address_to_pio(phys_addr_t address)
+{
+#ifdef PCI_IOBASE
+ struct io_range *res;
+ resource_size_t offset = 0;
+ unsigned long addr = -1;
+
+ spin_lock(&io_range_lock);
+ list_for_each_entry(res, &io_range_list, list) {
+ if (address >= res->start && address < res->start + res->size) {
+ addr = address - res->start + offset;
+ break;
+ }
+ offset += res->size;
+ }
+ spin_unlock(&io_range_lock);
+
+ return addr;
+#else
+ if (address > IO_SPACE_LIMIT)
+ return (unsigned long)-1;
+
+ return (unsigned long) address;
+#endif
+}
+
/**
* pci_remap_iospace - Remap the memory mapped I/O space
* @res: Resource describing the I/O space
@@ -4578,6 +4691,37 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+/**
+ * pci_add_dma_alias - Add a DMA devfn alias for a device
+ * @dev: the PCI device for which alias is added
+ * @devfn: alias slot and function
+ *
+ * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
+ * It should be called early, preferably as PCI fixup header quirk.
+ */
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
+{
+ if (!dev->dma_alias_mask)
+ dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
+ sizeof(long), GFP_KERNEL);
+ if (!dev->dma_alias_mask) {
+ dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n");
+ return;
+ }
+
+ set_bit(devfn, dev->dma_alias_mask);
+ dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+}
+
+bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
+{
+ return (dev1->dma_alias_mask &&
+ test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
+ (dev2->dma_alias_mask &&
+ test_bit(dev1->devfn, dev2->dma_alias_mask));
+}
+
bool pci_device_is_present(struct pci_dev *pdev)
{
u32 v;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 72db7f420..22ca6412b 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -81,3 +81,17 @@ endchoice
config PCIE_PME
def_bool y
depends on PCIEPORTBUS && PM
+
+config PCIE_DPC
+ tristate "PCIe Downstream Port Containment support"
+ depends on PCIEPORTBUS
+ default n
+ help
+ This enables PCI Express Downstream Port Containment (DPC)
+ driver support. DPC events from Root and Downstream ports
+ will be handled by the DPC driver. If your system doesn't
+ have this capability or you do not want to use this feature,
+ it is safe to answer N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pcie-dpc.
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
index 00c62df5a..b24525b3d 100644
--- a/drivers/pci/pcie/Makefile
+++ b/drivers/pci/pcie/Makefile
@@ -14,3 +14,5 @@ obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
obj-$(CONFIG_PCIEAER) += aer/
obj-$(CONFIG_PCIE_PME) += pme.o
+
+obj-$(CONFIG_PCIE_DPC) += pcie-dpc.o
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c
new file mode 100644
index 000000000..ab552f1bc
--- /dev/null
+++ b/drivers/pci/pcie/pcie-dpc.c
@@ -0,0 +1,163 @@
+/*
+ * PCI Express Downstream Port Containment services driver
+ * Copyright (C) 2016 Intel Corp.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pcieport_if.h>
+
+struct dpc_dev {
+ struct pcie_device *dev;
+ struct work_struct work;
+ int cap_pos;
+};
+
+static void dpc_wait_link_inactive(struct pci_dev *pdev)
+{
+ unsigned long timeout = jiffies + HZ;
+ u16 lnk_status;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ while (lnk_status & PCI_EXP_LNKSTA_DLLLA &&
+ !time_after(jiffies, timeout)) {
+ msleep(10);
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ }
+ if (lnk_status & PCI_EXP_LNKSTA_DLLLA)
+ dev_warn(&pdev->dev, "Link state not disabled for DPC event");
+}
+
+static void interrupt_event_handler(struct work_struct *work)
+{
+ struct dpc_dev *dpc = container_of(work, struct dpc_dev, work);
+ struct pci_dev *dev, *temp, *pdev = dpc->dev->port;
+ struct pci_bus *parent = pdev->subordinate;
+
+ pci_lock_rescan_remove();
+ list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+ bus_list) {
+ pci_dev_get(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
+ }
+ pci_unlock_rescan_remove();
+
+ dpc_wait_link_inactive(pdev);
+ pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS,
+ PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT);
+}
+
+static irqreturn_t dpc_irq(int irq, void *context)
+{
+ struct dpc_dev *dpc = (struct dpc_dev *)context;
+ struct pci_dev *pdev = dpc->dev->port;
+ u16 status, source;
+
+ pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status);
+ pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID,
+ &source);
+ if (!status)
+ return IRQ_NONE;
+
+ dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n",
+ status, source);
+
+ if (status & PCI_EXP_DPC_STATUS_TRIGGER) {
+ u16 reason = (status >> 1) & 0x3;
+
+ dev_warn(&dpc->dev->device, "DPC %s triggered, remove downstream devices\n",
+ (reason == 0) ? "unmasked uncorrectable error" :
+ (reason == 1) ? "ERR_NONFATAL" :
+ (reason == 2) ? "ERR_FATAL" : "extended error");
+ schedule_work(&dpc->work);
+ }
+ return IRQ_HANDLED;
+}
+
+#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
+static int dpc_probe(struct pcie_device *dev)
+{
+ struct dpc_dev *dpc;
+ struct pci_dev *pdev = dev->port;
+ int status;
+ u16 ctl, cap;
+
+ dpc = kzalloc(sizeof(*dpc), GFP_KERNEL);
+ if (!dpc)
+ return -ENOMEM;
+
+ dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
+ dpc->dev = dev;
+ INIT_WORK(&dpc->work, interrupt_event_handler);
+ set_service_data(dev, dpc);
+
+ status = request_irq(dev->irq, dpc_irq, IRQF_SHARED, "pcie-dpc", dpc);
+ if (status) {
+ dev_warn(&dev->device, "request IRQ%d failed: %d\n", dev->irq,
+ status);
+ goto out;
+ }
+
+ pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap);
+ pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
+
+ ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN;
+ pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
+
+ dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
+ cap & 0xf, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
+ FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
+ FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf,
+ FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
+ return status;
+ out:
+ kfree(dpc);
+ return status;
+}
+
+static void dpc_remove(struct pcie_device *dev)
+{
+ struct dpc_dev *dpc = get_service_data(dev);
+ struct pci_dev *pdev = dev->port;
+ u16 ctl;
+
+ pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl);
+ ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN);
+ pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl);
+
+ free_irq(dev->irq, dpc);
+ kfree(dpc);
+}
+
+static struct pcie_port_service_driver dpcdriver = {
+ .name = "dpc",
+ .port_type = PCI_EXP_TYPE_ROOT_PORT | PCI_EXP_TYPE_DOWNSTREAM,
+ .service = PCIE_PORT_SERVICE_DPC,
+ .probe = dpc_probe,
+ .remove = dpc_remove,
+};
+
+static int __init dpc_service_init(void)
+{
+ return pcie_port_service_register(&dpcdriver);
+}
+
+static void __exit dpc_service_exit(void)
+{
+ pcie_port_service_unregister(&dpcdriver);
+}
+
+MODULE_DESCRIPTION("PCI Express Downstream Port Containment driver");
+MODULE_AUTHOR("Keith Busch <keith.busch@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1");
+
+module_init(dpc_service_init);
+module_exit(dpc_service_exit);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d52554840..587aef360 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,14 +11,14 @@
#include <linux/compiler.h>
-#define PCIE_PORT_DEVICE_MAXSERVICES 4
+#define PCIE_PORT_DEVICE_MAXSERVICES 5
/*
* According to the PCI Express Base Specification 2.0, the indices of
* the MSI-X table entries used by port services must not exceed 31
*/
#define PCIE_PORT_MAX_MSIX_ENTRIES 32
-#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
+#define get_descriptor_id(type, service) (((type - 4) << 8) | service)
extern struct bus_type pcie_port_bus_type;
int pcie_port_device_register(struct pci_dev *dev);
@@ -67,17 +67,14 @@ static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
#endif /* !CONFIG_PCIE_PME */
#ifdef CONFIG_ACPI
-int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+void pcie_port_acpi_setup(struct pci_dev *port, int *mask);
-static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask)
{
- return pcie_port_acpi_setup(port, mask);
+ pcie_port_acpi_setup(port, mask);
}
#else /* !CONFIG_ACPI */
-static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
-{
- return 0;
-}
+static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask){}
#endif /* !CONFIG_ACPI */
#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
index b4d2894ee..6b8c2f1d0 100644
--- a/drivers/pci/pcie/portdrv_acpi.c
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -32,32 +32,30 @@
* NOTE: It turns out that we cannot do that for individual port services
* separately, because that would make some systems work incorrectly.
*/
-int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
{
struct acpi_pci_root *root;
acpi_handle handle;
u32 flags;
if (acpi_pci_disabled)
- return 0;
+ return;
handle = acpi_find_root_bridge_handle(port);
if (!handle)
- return -EINVAL;
+ return;
root = acpi_pci_find_root(handle);
if (!root)
- return -ENODEV;
+ return;
flags = root->osc_control_set;
- *srv_mask = PCIE_PORT_SERVICE_VC;
+ *srv_mask = PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC;
if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
*srv_mask |= PCIE_PORT_SERVICE_HP;
if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
*srv_mask |= PCIE_PORT_SERVICE_PME;
if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
*srv_mask |= PCIE_PORT_SERVICE_AER;
-
- return 0;
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 88122dc2e..32d4d0a3d 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -254,38 +254,28 @@ static void cleanup_service_irqs(struct pci_dev *dev)
static int get_port_device_capability(struct pci_dev *dev)
{
int services = 0;
- u32 reg32;
int cap_mask = 0;
- int err;
if (pcie_ports_disabled)
return 0;
cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
- | PCIE_PORT_SERVICE_VC;
+ | PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC;
if (pci_aer_available())
cap_mask |= PCIE_PORT_SERVICE_AER;
- if (pcie_ports_auto) {
- err = pcie_port_platform_notify(dev, &cap_mask);
- if (err)
- return 0;
- }
+ if (pcie_ports_auto)
+ pcie_port_platform_notify(dev, &cap_mask);
/* Hot-Plug Capable */
- if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
- pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT) {
- pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
- if (reg32 & PCI_EXP_SLTCAP_HPC) {
- services |= PCIE_PORT_SERVICE_HP;
- /*
- * Disable hot-plug interrupts in case they have been
- * enabled by the BIOS and the hot-plug service driver
- * is not loaded.
- */
- pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
- PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
- }
+ if ((cap_mask & PCIE_PORT_SERVICE_HP) && dev->is_hotplug_bridge) {
+ services |= PCIE_PORT_SERVICE_HP;
+ /*
+ * Disable hot-plug interrupts in case they have been enabled
+ * by the BIOS and the hot-plug service driver is not loaded.
+ */
+ pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
}
/* AER capable */
if ((cap_mask & PCIE_PORT_SERVICE_AER)
@@ -311,6 +301,8 @@ static int get_port_device_capability(struct pci_dev *dev)
*/
pcie_pme_interrupt_enable(dev, false);
}
+ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC))
+ services |= PCIE_PORT_SERVICE_DPC;
return services;
}
@@ -338,7 +330,7 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
device = &pcie->device;
device->bus = &pcie_port_bus_type;
device->release = release_pcie_device; /* callback to free pcie dev */
- dev_set_name(device, "%s:pcie%02x",
+ dev_set_name(device, "%s:pcie%03x",
pci_name(pdev),
get_descriptor_id(pci_pcie_type(pdev), service));
device->parent = &pdev->dev;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index bf8405fb4..8e3ef7209 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1537,6 +1537,7 @@ static void pci_release_dev(struct device *dev)
pcibios_release_device(pci_dev);
pci_bus_put(pci_dev->bus);
kfree(pci_dev->driver_override);
+ kfree(pci_dev->dma_alias_mask);
kfree(pci_dev);
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8e678027b..ee72ebe18 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3150,6 +3150,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
quirk_broken_intx_masking);
+/*
+ * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking,
+ * DisINTx can be set but the interrupt status bit is non-functional.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2,
+ quirk_broken_intx_masking);
+
static void quirk_no_bus_reset(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
@@ -3185,6 +3218,29 @@ static void quirk_no_pm_reset(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
+/*
+ * Thunderbolt controllers with broken MSI hotplug signaling:
+ * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part
+ * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge).
+ */
+static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
+{
+ if (pdev->is_hotplug_bridge &&
+ (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
+ pdev->revision <= 1))
+ pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
+ quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
+ quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
+ quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+ quirk_thunderbolt_hotplug_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
+ quirk_thunderbolt_hotplug_msi);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
@@ -3232,7 +3288,8 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
acpi_execute_simple_method(SXIO, NULL, 0);
acpi_execute_simple_method(SXLV, NULL, 0);
}
-DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547,
+DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
quirk_apple_poweroff_thunderbolt);
/*
@@ -3266,9 +3323,11 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
if (!nhi)
goto out;
if (nhi->vendor != PCI_VENDOR_ID_INTEL
- || (nhi->device != 0x1547 && nhi->device != 0x156c)
- || nhi->subsystem_vendor != 0x2222
- || nhi->subsystem_device != 0x1111)
+ || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
+ nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+ nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
+ || nhi->subsystem_vendor != 0x2222
+ || nhi->subsystem_device != 0x1111)
goto out;
dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
device_pm_wait_for_dev(&dev->dev, &nhi->dev);
@@ -3276,9 +3335,14 @@ out:
pci_dev_put(nhi);
pci_dev_put(sibling);
}
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547,
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
quirk_apple_wait_for_thunderbolt);
-DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d,
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+ quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
quirk_apple_wait_for_thunderbolt);
#endif
@@ -3610,10 +3674,8 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
static void quirk_dma_func0_alias(struct pci_dev *dev)
{
- if (PCI_FUNC(dev->devfn) != 0) {
- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- }
+ if (PCI_FUNC(dev->devfn) != 0)
+ pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
}
/*
@@ -3626,10 +3688,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
static void quirk_dma_func1_alias(struct pci_dev *dev)
{
- if (PCI_FUNC(dev->devfn) != 1) {
- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1);
- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- }
+ if (PCI_FUNC(dev->devfn) != 1)
+ pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1));
}
/*
@@ -3695,13 +3755,8 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev)
const struct pci_device_id *id;
id = pci_match_id(fixed_dma_alias_tbl, dev);
- if (id) {
- dev->dma_alias_devfn = id->driver_data;
- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
- dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
- PCI_SLOT(dev->dma_alias_devfn),
- PCI_FUNC(dev->dma_alias_devfn));
- }
+ if (id)
+ pci_add_dma_alias(dev, id->driver_data);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
@@ -3734,6 +3789,21 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
/*
+ * MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to
+ * be added as aliases to the DMA device in order to allow buffer access
+ * when IOMMU is enabled. Following devfns have to match RIT-LUT table
+ * programmed in the EEPROM.
+ */
+static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
+{
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0));
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0));
+ pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3));
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
+
+/*
* Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
* class code. Fix it.
*/
@@ -3936,6 +4006,55 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
return acs_flags & ~flags ? 0 : 1;
}
+/*
+ * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in
+ * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2,
+ * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and
+ * control registers whereas the PCIe spec packs them into words (Rev 3.0,
+ * 7.16 ACS Extended Capability). The bit definitions are correct, but the
+ * control register is at offset 8 instead of 6 and we should probably use
+ * dword accesses to them. This applies to the following PCI Device IDs, as
+ * found in volume 1 of the datasheet[2]:
+ *
+ * 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16}
+ * 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20}
+ *
+ * N.B. This doesn't fix what lspci shows.
+ *
+ * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
+ * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
+ */
+static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
+{
+ return pci_is_pcie(dev) &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT &&
+ ((dev->device & ~0xf) == 0xa110 ||
+ (dev->device >= 0xa167 && dev->device <= 0xa16a));
+}
+
+#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
+
+static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ int pos;
+ u32 cap, ctrl;
+
+ if (!pci_quirk_intel_spt_pch_acs_match(dev))
+ return -ENOTTY;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return -ENOTTY;
+
+ /* see pci_acs_flags_enabled() */
+ pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
+ acs_flags &= (cap | PCI_ACS_EC);
+
+ pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
+
+ return acs_flags & ~ctrl ? 0 : 1;
+}
+
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
{
/*
@@ -4024,6 +4143,7 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
/* Intel PCH root ports */
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
/* Cavium ThunderX */
@@ -4159,16 +4279,44 @@ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
return 0;
}
+static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
+{
+ int pos;
+ u32 cap, ctrl;
+
+ if (!pci_quirk_intel_spt_pch_acs_match(dev))
+ return -ENOTTY;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return -ENOTTY;
+
+ pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
+
+ ctrl |= (cap & PCI_ACS_SV);
+ ctrl |= (cap & PCI_ACS_RR);
+ ctrl |= (cap & PCI_ACS_CR);
+ ctrl |= (cap & PCI_ACS_UF);
+
+ pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
+
+ dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n");
+
+ return 0;
+}
+
static const struct pci_dev_enable_acs {
u16 vendor;
u16 device;
int (*enable_acs)(struct pci_dev *dev);
} pci_dev_enable_acs[] = {
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs },
{ 0 }
};
-void pci_dev_specific_enable_acs(struct pci_dev *dev)
+int pci_dev_specific_enable_acs(struct pci_dev *dev)
{
const struct pci_dev_enable_acs *i;
int ret;
@@ -4180,9 +4328,11 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev)
i->device == (u16)PCI_ANY_ID)) {
ret = i->enable_acs(dev);
if (ret >= 0)
- return;
+ return ret;
}
}
+
+ return -ENOTTY;
}
/*
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index a20ce7d5e..33e0f033a 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -40,11 +40,15 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
* If the device is broken and uses an alias requester ID for
* DMA, iterate over that too.
*/
- if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) {
- ret = fn(pdev, PCI_DEVID(pdev->bus->number,
- pdev->dma_alias_devfn), data);
- if (ret)
- return ret;
+ if (unlikely(pdev->dma_alias_mask)) {
+ u8 devfn;
+
+ for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) {
+ ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn),
+ data);
+ if (ret)
+ return ret;
+ }
}
for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 61cf61ac6..4d7bc3f41 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -228,7 +228,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
(__ioremap_at(io.start, cf->io_virt, cf->io_size,
- _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL)) {
+ pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)) {
dev_err(device, "can't ioremap ranges\n");
status = -ENOMEM;
goto fail1;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 0e537fdc1..140436a04 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -847,6 +847,14 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
if (!platform_get_irq(cpu_pmu->plat_device, 0))
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ /*
+ * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
+ * big.LITTLE). This is not an uncore PMU, and we have taken ctx
+ * sharing into account (e.g. with our pmu::filter_match callback and
+ * pmu::event_init group validation).
+ */
+ cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
+
return 0;
out_unregister:
@@ -942,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* For SPIs, we need to track the affinity per IRQ */
if (using_spi) {
- if (i >= pdev->num_resources) {
- of_node_put(dn);
+ if (i >= pdev->num_resources)
break;
- }
irqs[i] = cpu;
}
/* Keep track of the CPUs containing this PMU type */
cpumask_set_cpu(cpu, &pmu->supported_cpus);
- of_node_put(dn);
i++;
} while (1);
@@ -1005,8 +1010,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (!ret)
ret = init_fn(pmu);
} else {
- ret = probe_current_pmu(pmu, probe_table);
cpumask_setall(&pmu->supported_cpus);
+ ret = probe_current_pmu(pmu, probe_table);
}
if (ret) {
@@ -1035,6 +1040,7 @@ out_destroy:
out_free:
pr_info("%s: failed to register PMU devices!\n",
of_node_full_name(node));
+ kfree(pmu->irq_affinity);
kfree(pmu);
return ret;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 26566db09..b869b9883 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -15,6 +15,15 @@ config GENERIC_PHY
phy users can obtain reference to the PHY. All the users of this
framework should select this config.
+config PHY_BCM_NS_USB2
+ tristate "Broadcom Northstar USB 2.0 PHY Driver"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ select GENERIC_PHY
+ help
+ Enable this to support Broadcom USB 2.0 PHY connected to the USB
+ controller on Northstar family.
+
config PHY_BERLIN_USB
tristate "Marvell Berlin USB PHY Driver"
depends on ARCH_BERLIN && RESET_CONTROLLER && HAS_IOMEM && OF
@@ -113,14 +122,15 @@ config PHY_MIPHY365X
config PHY_RCAR_GEN2
tristate "Renesas R-Car generation 2 USB PHY driver"
- depends on ARCH_SHMOBILE
+ depends on ARCH_RENESAS
depends on GENERIC_PHY
help
Support for USB PHY found on Renesas R-Car generation 2 SoCs.
config PHY_RCAR_GEN3_USB2
tristate "Renesas R-Car generation 3 USB 2.0 PHY driver"
- depends on OF && ARCH_SHMOBILE
+ depends on ARCH_RENESAS
+ depends on EXTCON
select GENERIC_PHY
help
Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs.
@@ -218,9 +228,8 @@ config PHY_MT65XX_USB3
depends on ARCH_MEDIATEK && OF
select GENERIC_PHY
help
- Say 'Y' here to add support for Mediatek USB3.0 PHY driver
- for mt65xx SoCs. it supports two usb2.0 ports and
- one usb3.0 port.
+ Say 'Y' here to add support for Mediatek USB3.0 PHY driver,
+ it supports multiple usb2.0 and usb3.0 ports.
config PHY_HI6220_USB
tristate "hi6220 USB PHY support"
@@ -250,7 +259,8 @@ config PHY_SUN9I_USB
tristate "Allwinner sun9i SoC USB PHY driver"
depends on ARCH_SUNXI && HAS_IOMEM && OF
depends on RESET_CONTROLLER
- depends on USB_COMMON
+ depends on USB_SUPPORT
+ select USB_COMMON
select GENERIC_PHY
help
Enable this to support the transceiver that is part of Allwinner
@@ -403,14 +413,15 @@ config PHY_TUSB1210
help
Support for TI TUSB1210 USB ULPI PHY.
-config PHY_BRCMSTB_SATA
- tristate "Broadcom STB SATA PHY driver"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+config PHY_BRCM_SATA
+ tristate "Broadcom SATA PHY driver"
+ depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || COMPILE_TEST
depends on OF
select GENERIC_PHY
+ default ARCH_BCM_IPROC
help
- Enable this to support the SATA3 PHY on 28nm or 40nm Broadcom STB SoCs.
- Likely useful only with CONFIG_SATA_BRCMSTB enabled.
+ Enable this to support the Broadcom SATA PHY.
+ If unsure, say N.
config PHY_CYGNUS_PCIE
tristate "Broadcom Cygnus PCIe PHY driver"
@@ -421,4 +432,6 @@ config PHY_CYGNUS_PCIE
Enable this to support the Broadcom Cygnus PCIe PHY.
If unsure, say N.
+source "drivers/phy/tegra/Kconfig"
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 24596a96a..9c3e73cca 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_GENERIC_PHY) += phy-core.o
+obj-$(CONFIG_PHY_BCM_NS_USB2) += phy-bcm-ns-usb2.o
obj-$(CONFIG_PHY_BERLIN_USB) += phy-berlin-usb.o
obj-$(CONFIG_PHY_BERLIN_SATA) += phy-berlin-sata.o
obj-$(CONFIG_PHY_DM816X_USB) += phy-dm816x-usb.o
@@ -49,6 +50,8 @@ obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
obj-$(CONFIG_PHY_TUSB1210) += phy-tusb1210.o
-obj-$(CONFIG_PHY_BRCMSTB_SATA) += phy-brcmstb-sata.o
+obj-$(CONFIG_PHY_BRCM_SATA) += phy-brcm-sata.o
obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o
+
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/phy/phy-bcm-ns-usb2.c b/drivers/phy/phy-bcm-ns-usb2.c
new file mode 100644
index 000000000..58dff80e9
--- /dev/null
+++ b/drivers/phy/phy-bcm-ns-usb2.c
@@ -0,0 +1,137 @@
+/*
+ * Broadcom Northstar USB 2.0 PHY Driver
+ *
+ * Copyright (C) 2016 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bcma/bcma.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct bcm_ns_usb2 {
+ struct device *dev;
+ struct clk *ref_clk;
+ struct phy *phy;
+ void __iomem *dmu;
+};
+
+static int bcm_ns_usb2_phy_init(struct phy *phy)
+{
+ struct bcm_ns_usb2 *usb2 = phy_get_drvdata(phy);
+ struct device *dev = usb2->dev;
+ void __iomem *dmu = usb2->dmu;
+ u32 ref_clk_rate, usb2ctl, usb_pll_ndiv, usb_pll_pdiv;
+ int err = 0;
+
+ err = clk_prepare_enable(usb2->ref_clk);
+ if (err < 0) {
+ dev_err(dev, "Failed to prepare ref clock: %d\n", err);
+ goto err_out;
+ }
+
+ ref_clk_rate = clk_get_rate(usb2->ref_clk);
+ if (!ref_clk_rate) {
+ dev_err(dev, "Failed to get ref clock rate\n");
+ err = -EINVAL;
+ goto err_clk_off;
+ }
+
+ usb2ctl = readl(dmu + BCMA_DMU_CRU_USB2_CONTROL);
+
+ if (usb2ctl & BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK) {
+ usb_pll_pdiv = usb2ctl;
+ usb_pll_pdiv &= BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK;
+ usb_pll_pdiv >>= BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_SHIFT;
+ } else {
+ usb_pll_pdiv = 1 << 3;
+ }
+
+ /* Calculate ndiv based on a solid 1920 MHz that is for USB2 PHY */
+ usb_pll_ndiv = (1920000000 * usb_pll_pdiv) / ref_clk_rate;
+
+ /* Unlock DMU PLL settings with some magic value */
+ writel(0x0000ea68, dmu + BCMA_DMU_CRU_CLKSET_KEY);
+
+ /* Write USB 2.0 PLL control setting */
+ usb2ctl &= ~BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK;
+ usb2ctl |= usb_pll_ndiv << BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT;
+ writel(usb2ctl, dmu + BCMA_DMU_CRU_USB2_CONTROL);
+
+ /* Lock DMU PLL settings */
+ writel(0x00000000, dmu + BCMA_DMU_CRU_CLKSET_KEY);
+
+err_clk_off:
+ clk_disable_unprepare(usb2->ref_clk);
+err_out:
+ return err;
+}
+
+static const struct phy_ops ops = {
+ .init = bcm_ns_usb2_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static int bcm_ns_usb2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bcm_ns_usb2 *usb2;
+ struct resource *res;
+ struct phy_provider *phy_provider;
+
+ usb2 = devm_kzalloc(&pdev->dev, sizeof(*usb2), GFP_KERNEL);
+ if (!usb2)
+ return -ENOMEM;
+ usb2->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmu");
+ usb2->dmu = devm_ioremap_resource(dev, res);
+ if (IS_ERR(usb2->dmu)) {
+ dev_err(dev, "Failed to map DMU regs\n");
+ return PTR_ERR(usb2->dmu);
+ }
+
+ usb2->ref_clk = devm_clk_get(dev, "phy-ref-clk");
+ if (IS_ERR(usb2->ref_clk)) {
+ dev_err(dev, "Clock not defined\n");
+ return PTR_ERR(usb2->ref_clk);
+ }
+
+ usb2->phy = devm_phy_create(dev, NULL, &ops);
+ if (IS_ERR(usb2->phy))
+ return PTR_ERR(usb2->phy);
+
+ phy_set_drvdata(usb2->phy, usb2);
+ platform_set_drvdata(pdev, usb2);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id bcm_ns_usb2_id_table[] = {
+ { .compatible = "brcm,ns-usb2-phy", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, bcm_ns_usb2_id_table);
+
+static struct platform_driver bcm_ns_usb2_driver = {
+ .probe = bcm_ns_usb2_probe,
+ .driver = {
+ .name = "bcm_ns_usb2",
+ .of_match_table = bcm_ns_usb2_id_table,
+ },
+};
+module_platform_driver(bcm_ns_usb2_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-brcm-sata.c b/drivers/phy/phy-brcm-sata.c
new file mode 100644
index 000000000..6c4c5cb79
--- /dev/null
+++ b/drivers/phy/phy-brcm-sata.c
@@ -0,0 +1,412 @@
+/*
+ * Broadcom SATA3 AHCI Controller PHY Driver
+ *
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define SATA_PCB_BANK_OFFSET 0x23c
+#define SATA_PCB_REG_OFFSET(ofs) ((ofs) * 4)
+
+#define MAX_PORTS 2
+
+/* Register offset between PHYs in PCB space */
+#define SATA_PCB_REG_28NM_SPACE_SIZE 0x1000
+
+/* The older SATA PHY registers duplicated per port registers within the map,
+ * rather than having a separate map per port.
+ */
+#define SATA_PCB_REG_40NM_SPACE_SIZE 0x10
+
+/* Register offset between PHYs in PHY control space */
+#define SATA_PHY_CTRL_REG_28NM_SPACE_SIZE 0x8
+
+enum brcm_sata_phy_version {
+ BRCM_SATA_PHY_STB_28NM,
+ BRCM_SATA_PHY_STB_40NM,
+ BRCM_SATA_PHY_IPROC_NS2,
+};
+
+struct brcm_sata_port {
+ int portnum;
+ struct phy *phy;
+ struct brcm_sata_phy *phy_priv;
+ bool ssc_en;
+};
+
+struct brcm_sata_phy {
+ struct device *dev;
+ void __iomem *phy_base;
+ void __iomem *ctrl_base;
+ enum brcm_sata_phy_version version;
+
+ struct brcm_sata_port phys[MAX_PORTS];
+};
+
+enum sata_phy_regs {
+ BLOCK0_REG_BANK = 0x000,
+ BLOCK0_XGXSSTATUS = 0x81,
+ BLOCK0_XGXSSTATUS_PLL_LOCK = BIT(12),
+ BLOCK0_SPARE = 0x8d,
+ BLOCK0_SPARE_OOB_CLK_SEL_MASK = 0x3,
+ BLOCK0_SPARE_OOB_CLK_SEL_REFBY2 = 0x1,
+
+ PLL_REG_BANK_0 = 0x050,
+ PLL_REG_BANK_0_PLLCONTROL_0 = 0x81,
+
+ PLL1_REG_BANK = 0x060,
+ PLL1_ACTRL2 = 0x82,
+ PLL1_ACTRL3 = 0x83,
+ PLL1_ACTRL4 = 0x84,
+
+ OOB_REG_BANK = 0x150,
+ OOB_CTRL1 = 0x80,
+ OOB_CTRL1_BURST_MAX_MASK = 0xf,
+ OOB_CTRL1_BURST_MAX_SHIFT = 12,
+ OOB_CTRL1_BURST_MIN_MASK = 0xf,
+ OOB_CTRL1_BURST_MIN_SHIFT = 8,
+ OOB_CTRL1_WAKE_IDLE_MAX_MASK = 0xf,
+ OOB_CTRL1_WAKE_IDLE_MAX_SHIFT = 4,
+ OOB_CTRL1_WAKE_IDLE_MIN_MASK = 0xf,
+ OOB_CTRL1_WAKE_IDLE_MIN_SHIFT = 0,
+ OOB_CTRL2 = 0x81,
+ OOB_CTRL2_SEL_ENA_SHIFT = 15,
+ OOB_CTRL2_SEL_ENA_RC_SHIFT = 14,
+ OOB_CTRL2_RESET_IDLE_MAX_MASK = 0x3f,
+ OOB_CTRL2_RESET_IDLE_MAX_SHIFT = 8,
+ OOB_CTRL2_BURST_CNT_MASK = 0x3,
+ OOB_CTRL2_BURST_CNT_SHIFT = 6,
+ OOB_CTRL2_RESET_IDLE_MIN_MASK = 0x3f,
+ OOB_CTRL2_RESET_IDLE_MIN_SHIFT = 0,
+
+ TXPMD_REG_BANK = 0x1a0,
+ TXPMD_CONTROL1 = 0x81,
+ TXPMD_CONTROL1_TX_SSC_EN_FRC = BIT(0),
+ TXPMD_CONTROL1_TX_SSC_EN_FRC_VAL = BIT(1),
+ TXPMD_TX_FREQ_CTRL_CONTROL1 = 0x82,
+ TXPMD_TX_FREQ_CTRL_CONTROL2 = 0x83,
+ TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK = 0x3ff,
+ TXPMD_TX_FREQ_CTRL_CONTROL3 = 0x84,
+ TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
+};
+
+enum sata_phy_ctrl_regs {
+ PHY_CTRL_1 = 0x0,
+ PHY_CTRL_1_RESET = BIT(0),
+};
+
+static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
+{
+ struct brcm_sata_phy *priv = port->phy_priv;
+ u32 size = 0;
+
+ switch (priv->version) {
+ case BRCM_SATA_PHY_STB_28NM:
+ case BRCM_SATA_PHY_IPROC_NS2:
+ size = SATA_PCB_REG_28NM_SPACE_SIZE;
+ break;
+ case BRCM_SATA_PHY_STB_40NM:
+ size = SATA_PCB_REG_40NM_SPACE_SIZE;
+ break;
+ default:
+ dev_err(priv->dev, "invalid phy version\n");
+ break;
+ };
+
+ return priv->phy_base + (port->portnum * size);
+}
+
+static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port)
+{
+ struct brcm_sata_phy *priv = port->phy_priv;
+ u32 size = 0;
+
+ switch (priv->version) {
+ case BRCM_SATA_PHY_IPROC_NS2:
+ size = SATA_PHY_CTRL_REG_28NM_SPACE_SIZE;
+ break;
+ default:
+ dev_err(priv->dev, "invalid phy version\n");
+ break;
+ };
+
+ return priv->ctrl_base + (port->portnum * size);
+}
+
+static void brcm_sata_phy_wr(void __iomem *pcb_base, u32 bank,
+ u32 ofs, u32 msk, u32 value)
+{
+ u32 tmp;
+
+ writel(bank, pcb_base + SATA_PCB_BANK_OFFSET);
+ tmp = readl(pcb_base + SATA_PCB_REG_OFFSET(ofs));
+ tmp = (tmp & msk) | value;
+ writel(tmp, pcb_base + SATA_PCB_REG_OFFSET(ofs));
+}
+
+static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs)
+{
+ writel(bank, pcb_base + SATA_PCB_BANK_OFFSET);
+ return readl(pcb_base + SATA_PCB_REG_OFFSET(ofs));
+}
+
+/* These defaults were characterized by H/W group */
+#define STB_FMIN_VAL_DEFAULT 0x3df
+#define STB_FMAX_VAL_DEFAULT 0x3df
+#define STB_FMAX_VAL_SSC 0x83
+
+static int brcm_stb_sata_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ struct brcm_sata_phy *priv = port->phy_priv;
+ u32 tmp;
+
+ /* override the TX spread spectrum setting */
+ tmp = TXPMD_CONTROL1_TX_SSC_EN_FRC_VAL | TXPMD_CONTROL1_TX_SSC_EN_FRC;
+ brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_CONTROL1, ~tmp, tmp);
+
+ /* set fixed min freq */
+ brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL2,
+ ~TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK,
+ STB_FMIN_VAL_DEFAULT);
+
+ /* set fixed max freq depending on SSC config */
+ if (port->ssc_en) {
+ dev_info(priv->dev, "enabling SSC on port%d\n", port->portnum);
+ tmp = STB_FMAX_VAL_SSC;
+ } else {
+ tmp = STB_FMAX_VAL_DEFAULT;
+ }
+
+ brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3,
+ ~TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK, tmp);
+
+ return 0;
+}
+
+/* NS2 SATA PLL1 defaults were characterized by H/W group */
+#define NS2_PLL1_ACTRL2_MAGIC 0x1df8
+#define NS2_PLL1_ACTRL3_MAGIC 0x2b00
+#define NS2_PLL1_ACTRL4_MAGIC 0x8824
+
+static int brcm_ns2_sata_init(struct brcm_sata_port *port)
+{
+ int try;
+ unsigned int val;
+ void __iomem *base = brcm_sata_pcb_base(port);
+ void __iomem *ctrl_base = brcm_sata_ctrl_base(port);
+ struct device *dev = port->phy_priv->dev;
+
+ /* Configure OOB control */
+ val = 0x0;
+ val |= (0xc << OOB_CTRL1_BURST_MAX_SHIFT);
+ val |= (0x4 << OOB_CTRL1_BURST_MIN_SHIFT);
+ val |= (0x9 << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT);
+ val |= (0x3 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT);
+ brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
+ val = 0x0;
+ val |= (0x1b << OOB_CTRL2_RESET_IDLE_MAX_SHIFT);
+ val |= (0x2 << OOB_CTRL2_BURST_CNT_SHIFT);
+ val |= (0x9 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT);
+ brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
+
+ /* Configure PHY PLL register bank 1 */
+ val = NS2_PLL1_ACTRL2_MAGIC;
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
+ val = NS2_PLL1_ACTRL3_MAGIC;
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
+ val = NS2_PLL1_ACTRL4_MAGIC;
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
+
+ /* Configure PHY BLOCK0 register bank */
+ /* Set oob_clk_sel to refclk/2 */
+ brcm_sata_phy_wr(base, BLOCK0_REG_BANK, BLOCK0_SPARE,
+ ~BLOCK0_SPARE_OOB_CLK_SEL_MASK,
+ BLOCK0_SPARE_OOB_CLK_SEL_REFBY2);
+
+ /* Strobe PHY reset using PHY control register */
+ writel(PHY_CTRL_1_RESET, ctrl_base + PHY_CTRL_1);
+ mdelay(1);
+ writel(0x0, ctrl_base + PHY_CTRL_1);
+ mdelay(1);
+
+ /* Wait for PHY PLL lock by polling pll_lock bit */
+ try = 50;
+ while (try) {
+ val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ BLOCK0_XGXSSTATUS);
+ if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
+ break;
+ msleep(20);
+ try--;
+ }
+ if (!try) {
+ /* PLL did not lock; give up */
+ dev_err(dev, "port%d PLL did not lock\n", port->portnum);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "port%d initialized\n", port->portnum);
+
+ return 0;
+}
+
+static int brcm_sata_phy_init(struct phy *phy)
+{
+ int rc;
+ struct brcm_sata_port *port = phy_get_drvdata(phy);
+
+ switch (port->phy_priv->version) {
+ case BRCM_SATA_PHY_STB_28NM:
+ case BRCM_SATA_PHY_STB_40NM:
+ rc = brcm_stb_sata_init(port);
+ break;
+ case BRCM_SATA_PHY_IPROC_NS2:
+ rc = brcm_ns2_sata_init(port);
+ break;
+ default:
+ rc = -ENODEV;
+ };
+
+ return 0;
+}
+
+static const struct phy_ops phy_ops = {
+ .init = brcm_sata_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id brcm_sata_phy_of_match[] = {
+ { .compatible = "brcm,bcm7445-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_STB_28NM },
+ { .compatible = "brcm,bcm7425-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_STB_40NM },
+ { .compatible = "brcm,iproc-ns2-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_IPROC_NS2 },
+ {},
+};
+MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
+
+static int brcm_sata_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dn = dev->of_node, *child;
+ const struct of_device_id *of_id;
+ struct brcm_sata_phy *priv;
+ struct resource *res;
+ struct phy_provider *provider;
+ int ret, count = 0;
+
+ if (of_get_child_count(dn) == 0)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_set_drvdata(dev, priv);
+ priv->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ priv->phy_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->phy_base))
+ return PTR_ERR(priv->phy_base);
+
+ of_id = of_match_node(brcm_sata_phy_of_match, dn);
+ if (of_id)
+ priv->version = (enum brcm_sata_phy_version)of_id->data;
+ else
+ priv->version = BRCM_SATA_PHY_STB_28NM;
+
+ if (priv->version == BRCM_SATA_PHY_IPROC_NS2) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "phy-ctrl");
+ priv->ctrl_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ctrl_base))
+ return PTR_ERR(priv->ctrl_base);
+ }
+
+ for_each_available_child_of_node(dn, child) {
+ unsigned int id;
+ struct brcm_sata_port *port;
+
+ if (of_property_read_u32(child, "reg", &id)) {
+ dev_err(dev, "missing reg property in node %s\n",
+ child->name);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ if (id >= MAX_PORTS) {
+ dev_err(dev, "invalid reg: %u\n", id);
+ ret = -EINVAL;
+ goto put_child;
+ }
+ if (priv->phys[id].phy) {
+ dev_err(dev, "already registered port %u\n", id);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ port = &priv->phys[id];
+ port->portnum = id;
+ port->phy_priv = priv;
+ port->phy = devm_phy_create(dev, child, &phy_ops);
+ port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
+ if (IS_ERR(port->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ ret = PTR_ERR(port->phy);
+ goto put_child;
+ }
+
+ phy_set_drvdata(port->phy, port);
+ count++;
+ }
+
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "could not register PHY provider\n");
+ return PTR_ERR(provider);
+ }
+
+ dev_info(dev, "registered %d port(s)\n", count);
+
+ return 0;
+put_child:
+ of_node_put(child);
+ return ret;
+}
+
+static struct platform_driver brcm_sata_phy_driver = {
+ .probe = brcm_sata_phy_probe,
+ .driver = {
+ .of_match_table = brcm_sata_phy_of_match,
+ .name = "brcm-sata-phy",
+ }
+};
+module_platform_driver(brcm_sata_phy_driver);
+
+MODULE_DESCRIPTION("Broadcom SATA PHY driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Carino");
+MODULE_AUTHOR("Brian Norris");
+MODULE_ALIAS("platform:phy-brcm-sata");
diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c
deleted file mode 100644
index a23172ff4..000000000
--- a/drivers/phy/phy-brcmstb-sata.c
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Broadcom SATA3 AHCI Controller PHY Driver
- *
- * Copyright © 2009-2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-
-#define SATA_MDIO_BANK_OFFSET 0x23c
-#define SATA_MDIO_REG_OFFSET(ofs) ((ofs) * 4)
-
-#define MAX_PORTS 2
-
-/* Register offset between PHYs in PCB space */
-#define SATA_MDIO_REG_28NM_SPACE_SIZE 0x1000
-
-/* The older SATA PHY registers duplicated per port registers within the map,
- * rather than having a separate map per port.
- */
-#define SATA_MDIO_REG_40NM_SPACE_SIZE 0x10
-
-enum brcm_sata_phy_version {
- BRCM_SATA_PHY_28NM,
- BRCM_SATA_PHY_40NM,
-};
-
-struct brcm_sata_port {
- int portnum;
- struct phy *phy;
- struct brcm_sata_phy *phy_priv;
- bool ssc_en;
-};
-
-struct brcm_sata_phy {
- struct device *dev;
- void __iomem *phy_base;
- enum brcm_sata_phy_version version;
-
- struct brcm_sata_port phys[MAX_PORTS];
-};
-
-enum sata_mdio_phy_regs {
- PLL_REG_BANK_0 = 0x50,
- PLL_REG_BANK_0_PLLCONTROL_0 = 0x81,
-
- TXPMD_REG_BANK = 0x1a0,
- TXPMD_CONTROL1 = 0x81,
- TXPMD_CONTROL1_TX_SSC_EN_FRC = BIT(0),
- TXPMD_CONTROL1_TX_SSC_EN_FRC_VAL = BIT(1),
- TXPMD_TX_FREQ_CTRL_CONTROL1 = 0x82,
- TXPMD_TX_FREQ_CTRL_CONTROL2 = 0x83,
- TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK = 0x3ff,
- TXPMD_TX_FREQ_CTRL_CONTROL3 = 0x84,
- TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK = 0x3ff,
-};
-
-static inline void __iomem *brcm_sata_phy_base(struct brcm_sata_port *port)
-{
- struct brcm_sata_phy *priv = port->phy_priv;
- u32 offset = 0;
-
- if (priv->version == BRCM_SATA_PHY_28NM)
- offset = SATA_MDIO_REG_28NM_SPACE_SIZE;
- else if (priv->version == BRCM_SATA_PHY_40NM)
- offset = SATA_MDIO_REG_40NM_SPACE_SIZE;
- else
- dev_err(priv->dev, "invalid phy version\n");
-
- return priv->phy_base + (port->portnum * offset);
-}
-
-static void brcm_sata_mdio_wr(void __iomem *addr, u32 bank, u32 ofs,
- u32 msk, u32 value)
-{
- u32 tmp;
-
- writel(bank, addr + SATA_MDIO_BANK_OFFSET);
- tmp = readl(addr + SATA_MDIO_REG_OFFSET(ofs));
- tmp = (tmp & msk) | value;
- writel(tmp, addr + SATA_MDIO_REG_OFFSET(ofs));
-}
-
-/* These defaults were characterized by H/W group */
-#define FMIN_VAL_DEFAULT 0x3df
-#define FMAX_VAL_DEFAULT 0x3df
-#define FMAX_VAL_SSC 0x83
-
-static void brcm_sata_cfg_ssc(struct brcm_sata_port *port)
-{
- void __iomem *base = brcm_sata_phy_base(port);
- struct brcm_sata_phy *priv = port->phy_priv;
- u32 tmp;
-
- /* override the TX spread spectrum setting */
- tmp = TXPMD_CONTROL1_TX_SSC_EN_FRC_VAL | TXPMD_CONTROL1_TX_SSC_EN_FRC;
- brcm_sata_mdio_wr(base, TXPMD_REG_BANK, TXPMD_CONTROL1, ~tmp, tmp);
-
- /* set fixed min freq */
- brcm_sata_mdio_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL2,
- ~TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK,
- FMIN_VAL_DEFAULT);
-
- /* set fixed max freq depending on SSC config */
- if (port->ssc_en) {
- dev_info(priv->dev, "enabling SSC on port %d\n", port->portnum);
- tmp = FMAX_VAL_SSC;
- } else {
- tmp = FMAX_VAL_DEFAULT;
- }
-
- brcm_sata_mdio_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3,
- ~TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK, tmp);
-}
-
-static int brcm_sata_phy_init(struct phy *phy)
-{
- struct brcm_sata_port *port = phy_get_drvdata(phy);
-
- brcm_sata_cfg_ssc(port);
-
- return 0;
-}
-
-static const struct phy_ops phy_ops = {
- .init = brcm_sata_phy_init,
- .owner = THIS_MODULE,
-};
-
-static const struct of_device_id brcm_sata_phy_of_match[] = {
- { .compatible = "brcm,bcm7445-sata-phy",
- .data = (void *)BRCM_SATA_PHY_28NM },
- { .compatible = "brcm,bcm7425-sata-phy",
- .data = (void *)BRCM_SATA_PHY_40NM },
- {},
-};
-MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
-
-static int brcm_sata_phy_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *dn = dev->of_node, *child;
- const struct of_device_id *of_id;
- struct brcm_sata_phy *priv;
- struct resource *res;
- struct phy_provider *provider;
- int ret, count = 0;
-
- if (of_get_child_count(dn) == 0)
- return -ENODEV;
-
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- dev_set_drvdata(dev, priv);
- priv->dev = dev;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- priv->phy_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->phy_base))
- return PTR_ERR(priv->phy_base);
-
- of_id = of_match_node(brcm_sata_phy_of_match, dn);
- if (of_id)
- priv->version = (enum brcm_sata_phy_version)of_id->data;
- else
- priv->version = BRCM_SATA_PHY_28NM;
-
- for_each_available_child_of_node(dn, child) {
- unsigned int id;
- struct brcm_sata_port *port;
-
- if (of_property_read_u32(child, "reg", &id)) {
- dev_err(dev, "missing reg property in node %s\n",
- child->name);
- ret = -EINVAL;
- goto put_child;
- }
-
- if (id >= MAX_PORTS) {
- dev_err(dev, "invalid reg: %u\n", id);
- ret = -EINVAL;
- goto put_child;
- }
- if (priv->phys[id].phy) {
- dev_err(dev, "already registered port %u\n", id);
- ret = -EINVAL;
- goto put_child;
- }
-
- port = &priv->phys[id];
- port->portnum = id;
- port->phy_priv = priv;
- port->phy = devm_phy_create(dev, child, &phy_ops);
- port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
- if (IS_ERR(port->phy)) {
- dev_err(dev, "failed to create PHY\n");
- ret = PTR_ERR(port->phy);
- goto put_child;
- }
-
- phy_set_drvdata(port->phy, port);
- count++;
- }
-
- provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(provider)) {
- dev_err(dev, "could not register PHY provider\n");
- return PTR_ERR(provider);
- }
-
- dev_info(dev, "registered %d port(s)\n", count);
-
- return 0;
-put_child:
- of_node_put(child);
- return ret;
-}
-
-static struct platform_driver brcm_sata_phy_driver = {
- .probe = brcm_sata_phy_probe,
- .driver = {
- .of_match_table = brcm_sata_phy_of_match,
- .name = "brcmstb-sata-phy",
- }
-};
-module_platform_driver(brcm_sata_phy_driver);
-
-MODULE_DESCRIPTION("Broadcom STB SATA PHY driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marc Carino");
-MODULE_AUTHOR("Brian Norris");
-MODULE_ALIAS("platform:phy-brcmstb-sata");
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index e7e574dc6..b72e9a3b6 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -141,7 +141,7 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node)
if (phy_provider->dev->of_node == node)
return phy_provider;
- for_each_child_of_node(phy_provider->dev->of_node, child)
+ for_each_child_of_node(phy_provider->children, child)
if (child == node)
return phy_provider;
}
@@ -811,24 +811,59 @@ EXPORT_SYMBOL_GPL(devm_phy_destroy);
/**
* __of_phy_provider_register() - create/register phy provider with the framework
* @dev: struct device of the phy provider
+ * @children: device node containing children (if different from dev->of_node)
* @owner: the module owner containing of_xlate
* @of_xlate: function pointer to obtain phy instance from phy provider
*
* Creates struct phy_provider from dev and of_xlate function pointer.
* This is used in the case of dt boot for finding the phy instance from
* phy provider.
+ *
+ * If the PHY provider doesn't nest children directly but uses a separate
+ * child node to contain the individual children, the @children parameter
+ * can be used to override the default. If NULL, the default (dev->of_node)
+ * will be used. If non-NULL, the device node must be a child (or further
+ * descendant) of dev->of_node. Otherwise an ERR_PTR()-encoded -EINVAL
+ * error code is returned.
*/
struct phy_provider *__of_phy_provider_register(struct device *dev,
- struct module *owner, struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
{
struct phy_provider *phy_provider;
+ /*
+ * If specified, the device node containing the children must itself
+ * be the provider's device node or a child (or further descendant)
+ * thereof.
+ */
+ if (children) {
+ struct device_node *parent = of_node_get(children), *next;
+
+ while (parent) {
+ if (parent == dev->of_node)
+ break;
+
+ next = of_get_parent(parent);
+ of_node_put(parent);
+ parent = next;
+ }
+
+ if (!parent)
+ return ERR_PTR(-EINVAL);
+
+ of_node_put(parent);
+ } else {
+ children = dev->of_node;
+ }
+
phy_provider = kzalloc(sizeof(*phy_provider), GFP_KERNEL);
if (!phy_provider)
return ERR_PTR(-ENOMEM);
phy_provider->dev = dev;
+ phy_provider->children = of_node_get(children);
phy_provider->owner = owner;
phy_provider->of_xlate = of_xlate;
@@ -854,8 +889,9 @@ EXPORT_SYMBOL_GPL(__of_phy_provider_register);
* on the devres data, then, devres data is freed.
*/
struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
- struct module *owner, struct phy * (*of_xlate)(struct device *dev,
- struct of_phandle_args *args))
+ struct device_node *children, struct module *owner,
+ struct phy * (*of_xlate)(struct device *dev,
+ struct of_phandle_args *args))
{
struct phy_provider **ptr, *phy_provider;
@@ -863,7 +899,8 @@ struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
if (!ptr)
return ERR_PTR(-ENOMEM);
- phy_provider = __of_phy_provider_register(dev, owner, of_xlate);
+ phy_provider = __of_phy_provider_register(dev, children, owner,
+ of_xlate);
if (!IS_ERR(phy_provider)) {
*ptr = phy_provider;
devres_add(dev, ptr);
@@ -888,6 +925,7 @@ void of_phy_provider_unregister(struct phy_provider *phy_provider)
mutex_lock(&phy_provider_mutex);
list_del(&phy_provider->list);
+ of_node_put(phy_provider->children);
kfree(phy_provider);
mutex_unlock(&phy_provider_mutex);
}
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 2a54caba9..8b851f718 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -1,7 +1,7 @@
/*
* Samsung S5P/EXYNOS SoC series MIPI CSIS/DSIM DPHY driver
*
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2013,2016 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -13,96 +13,280 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon/exynos4-pmu.h>
+#include <linux/mfd/syscon/exynos5-pmu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/mfd/syscon.h>
-/* MIPI_PHYn_CONTROL reg. offset (for base address from ioremap): n = 0..1 */
-#define EXYNOS_MIPI_PHY_CONTROL(n) ((n) * 4)
-
enum exynos_mipi_phy_id {
+ EXYNOS_MIPI_PHY_ID_NONE = -1,
EXYNOS_MIPI_PHY_ID_CSIS0,
EXYNOS_MIPI_PHY_ID_DSIM0,
EXYNOS_MIPI_PHY_ID_CSIS1,
EXYNOS_MIPI_PHY_ID_DSIM1,
+ EXYNOS_MIPI_PHY_ID_CSIS2,
EXYNOS_MIPI_PHYS_NUM
};
-#define is_mipi_dsim_phy_id(id) \
- ((id) == EXYNOS_MIPI_PHY_ID_DSIM0 || (id) == EXYNOS_MIPI_PHY_ID_DSIM1)
+enum exynos_mipi_phy_regmap_id {
+ EXYNOS_MIPI_REGMAP_PMU,
+ EXYNOS_MIPI_REGMAP_DISP,
+ EXYNOS_MIPI_REGMAP_CAM0,
+ EXYNOS_MIPI_REGMAP_CAM1,
+ EXYNOS_MIPI_REGMAPS_NUM
+};
+
+struct mipi_phy_device_desc {
+ int num_phys;
+ int num_regmaps;
+ const char *regmap_names[EXYNOS_MIPI_REGMAPS_NUM];
+ struct exynos_mipi_phy_desc {
+ enum exynos_mipi_phy_id coupled_phy_id;
+ u32 enable_val;
+ unsigned int enable_reg;
+ enum exynos_mipi_phy_regmap_id enable_map;
+ u32 resetn_val;
+ unsigned int resetn_reg;
+ enum exynos_mipi_phy_regmap_id resetn_map;
+ } phys[EXYNOS_MIPI_PHYS_NUM];
+};
+
+static const struct mipi_phy_device_desc s5pv210_mipi_phy = {
+ .num_regmaps = 1,
+ .regmap_names = {"syscon"},
+ .num_phys = 4,
+ .phys = {
+ {
+ /* EXYNOS_MIPI_PHY_ID_CSIS0 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
+ .enable_val = EXYNOS4_MIPI_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
+ .resetn_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_DSIM0 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
+ .enable_val = EXYNOS4_MIPI_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
+ .resetn_reg = EXYNOS4_MIPI_PHY_CONTROL(0),
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_CSIS1 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM1,
+ .enable_val = EXYNOS4_MIPI_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS4_MIPI_PHY_SRESETN,
+ .resetn_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_DSIM1 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS1,
+ .enable_val = EXYNOS4_MIPI_PHY_ENABLE,
+ .enable_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS4_MIPI_PHY_MRESETN,
+ .resetn_reg = EXYNOS4_MIPI_PHY_CONTROL(1),
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ },
+ },
+};
+
+static const struct mipi_phy_device_desc exynos5420_mipi_phy = {
+ .num_regmaps = 1,
+ .regmap_names = {"syscon"},
+ .num_phys = 5,
+ .phys = {
+ {
+ /* EXYNOS_MIPI_PHY_ID_CSIS0 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY0_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS5_MIPI_PHY_S_RESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY0_CONTROL,
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_DSIM0 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY0_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS5_MIPI_PHY_M_RESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY0_CONTROL,
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_CSIS1 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM1,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY1_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS5_MIPI_PHY_S_RESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY1_CONTROL,
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_DSIM1 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS1,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY1_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS5_MIPI_PHY_M_RESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY1_CONTROL,
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_CSIS2 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5420_MIPI_PHY2_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = EXYNOS5_MIPI_PHY_S_RESETN,
+ .resetn_reg = EXYNOS5420_MIPI_PHY2_CONTROL,
+ .resetn_map = EXYNOS_MIPI_REGMAP_PMU,
+ },
+ },
+};
+
+#define EXYNOS5433_SYSREG_DISP_MIPI_PHY 0x100C
+#define EXYNOS5433_SYSREG_CAM0_MIPI_DPHY_CON 0x1014
+#define EXYNOS5433_SYSREG_CAM1_MIPI_DPHY_CON 0x1020
+
+static const struct mipi_phy_device_desc exynos5433_mipi_phy = {
+ .num_regmaps = 4,
+ .regmap_names = {
+ "samsung,pmu-syscon",
+ "samsung,disp-sysreg",
+ "samsung,cam0-sysreg",
+ "samsung,cam1-sysreg"
+ },
+ .num_phys = 5,
+ .phys = {
+ {
+ /* EXYNOS_MIPI_PHY_ID_CSIS0 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_DSIM0,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5433_MIPI_PHY0_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(0),
+ .resetn_reg = EXYNOS5433_SYSREG_CAM0_MIPI_DPHY_CON,
+ .resetn_map = EXYNOS_MIPI_REGMAP_CAM0,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_DSIM0 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_CSIS0,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5433_MIPI_PHY0_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(0),
+ .resetn_reg = EXYNOS5433_SYSREG_DISP_MIPI_PHY,
+ .resetn_map = EXYNOS_MIPI_REGMAP_DISP,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_CSIS1 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5433_MIPI_PHY1_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(1),
+ .resetn_reg = EXYNOS5433_SYSREG_CAM0_MIPI_DPHY_CON,
+ .resetn_map = EXYNOS_MIPI_REGMAP_CAM0,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_DSIM1 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5433_MIPI_PHY1_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(1),
+ .resetn_reg = EXYNOS5433_SYSREG_DISP_MIPI_PHY,
+ .resetn_map = EXYNOS_MIPI_REGMAP_DISP,
+ }, {
+ /* EXYNOS_MIPI_PHY_ID_CSIS2 */
+ .coupled_phy_id = EXYNOS_MIPI_PHY_ID_NONE,
+ .enable_val = EXYNOS5_PHY_ENABLE,
+ .enable_reg = EXYNOS5433_MIPI_PHY2_CONTROL,
+ .enable_map = EXYNOS_MIPI_REGMAP_PMU,
+ .resetn_val = BIT(0),
+ .resetn_reg = EXYNOS5433_SYSREG_CAM1_MIPI_DPHY_CON,
+ .resetn_map = EXYNOS_MIPI_REGMAP_CAM1,
+ },
+ },
+};
struct exynos_mipi_video_phy {
+ struct regmap *regmaps[EXYNOS_MIPI_REGMAPS_NUM];
+ int num_phys;
struct video_phy_desc {
struct phy *phy;
unsigned int index;
+ const struct exynos_mipi_phy_desc *data;
} phys[EXYNOS_MIPI_PHYS_NUM];
spinlock_t slock;
- void __iomem *regs;
- struct regmap *regmap;
};
-static int __set_phy_state(struct exynos_mipi_video_phy *state,
- enum exynos_mipi_phy_id id, unsigned int on)
+static inline int __is_running(const struct exynos_mipi_phy_desc *data,
+ struct exynos_mipi_video_phy *state)
{
- const unsigned int offset = EXYNOS4_MIPI_PHY_CONTROL(id / 2);
- void __iomem *addr;
- u32 val, reset;
+ u32 val;
+ int ret;
- if (is_mipi_dsim_phy_id(id))
- reset = EXYNOS4_MIPI_PHY_MRESETN;
- else
- reset = EXYNOS4_MIPI_PHY_SRESETN;
+ ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
+ if (ret)
+ return 0;
+
+ return val & data->resetn_val;
+}
+
+static int __set_phy_state(const struct exynos_mipi_phy_desc *data,
+ struct exynos_mipi_video_phy *state, unsigned int on)
+{
+ u32 val;
spin_lock(&state->slock);
- if (!IS_ERR(state->regmap)) {
- regmap_read(state->regmap, offset, &val);
- if (on)
- val |= reset;
- else
- val &= ~reset;
- regmap_write(state->regmap, offset, val);
- if (on)
- val |= EXYNOS4_MIPI_PHY_ENABLE;
- else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
- val &= ~EXYNOS4_MIPI_PHY_ENABLE;
- regmap_write(state->regmap, offset, val);
- } else {
- addr = state->regs + EXYNOS_MIPI_PHY_CONTROL(id / 2);
-
- val = readl(addr);
- if (on)
- val |= reset;
- else
- val &= ~reset;
- writel(val, addr);
- /* Clear ENABLE bit only if MRESETN, SRESETN bits are not set */
- if (on)
- val |= EXYNOS4_MIPI_PHY_ENABLE;
- else if (!(val & EXYNOS4_MIPI_PHY_RESET_MASK))
- val &= ~EXYNOS4_MIPI_PHY_ENABLE;
-
- writel(val, addr);
+ /* disable in PMU sysreg */
+ if (!on && data->coupled_phy_id >= 0 &&
+ !__is_running(state->phys[data->coupled_phy_id].data, state)) {
+ regmap_read(state->regmaps[data->enable_map], data->enable_reg,
+ &val);
+ val &= ~data->enable_val;
+ regmap_write(state->regmaps[data->enable_map], data->enable_reg,
+ val);
+ }
+
+ /* PHY reset */
+ regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
+ val = on ? (val | data->resetn_val) : (val & ~data->resetn_val);
+ regmap_write(state->regmaps[data->resetn_map], data->resetn_reg, val);
+
+ /* enable in PMU sysreg */
+ if (on) {
+ regmap_read(state->regmaps[data->enable_map], data->enable_reg,
+ &val);
+ val |= data->enable_val;
+ regmap_write(state->regmaps[data->enable_map], data->enable_reg,
+ val);
}
spin_unlock(&state->slock);
+
return 0;
}
#define to_mipi_video_phy(desc) \
- container_of((desc), struct exynos_mipi_video_phy, phys[(desc)->index]);
+ container_of((desc), struct exynos_mipi_video_phy, phys[(desc)->index])
static int exynos_mipi_video_phy_power_on(struct phy *phy)
{
struct video_phy_desc *phy_desc = phy_get_drvdata(phy);
struct exynos_mipi_video_phy *state = to_mipi_video_phy(phy_desc);
- return __set_phy_state(state, phy_desc->index, 1);
+ return __set_phy_state(phy_desc->data, state, 1);
}
static int exynos_mipi_video_phy_power_off(struct phy *phy)
@@ -110,7 +294,7 @@ static int exynos_mipi_video_phy_power_off(struct phy *phy)
struct video_phy_desc *phy_desc = phy_get_drvdata(phy);
struct exynos_mipi_video_phy *state = to_mipi_video_phy(phy_desc);
- return __set_phy_state(state, phy_desc->index, 0);
+ return __set_phy_state(phy_desc->data, state, 0);
}
static struct phy *exynos_mipi_video_phy_xlate(struct device *dev,
@@ -118,7 +302,7 @@ static struct phy *exynos_mipi_video_phy_xlate(struct device *dev,
{
struct exynos_mipi_video_phy *state = dev_get_drvdata(dev);
- if (WARN_ON(args->args[0] >= EXYNOS_MIPI_PHYS_NUM))
+ if (WARN_ON(args->args[0] >= state->num_phys))
return ERR_PTR(-ENODEV);
return state->phys[args->args[0]].phy;
@@ -132,32 +316,33 @@ static const struct phy_ops exynos_mipi_video_phy_ops = {
static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
{
+ const struct mipi_phy_device_desc *phy_dev;
struct exynos_mipi_video_phy *state;
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct phy_provider *phy_provider;
unsigned int i;
+ phy_dev = of_device_get_match_data(dev);
+ if (!phy_dev)
+ return -ENODEV;
+
state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- state->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
- if (IS_ERR(state->regmap)) {
- struct resource *res;
-
- dev_info(dev, "regmap lookup failed: %ld\n",
- PTR_ERR(state->regmap));
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(state->regs))
- return PTR_ERR(state->regs);
+ for (i = 0; i < phy_dev->num_regmaps; i++) {
+ state->regmaps[i] = syscon_regmap_lookup_by_phandle(np,
+ phy_dev->regmap_names[i]);
+ if (IS_ERR(state->regmaps[i]))
+ return PTR_ERR(state->regmaps[i]);
}
+ state->num_phys = phy_dev->num_phys;
+ spin_lock_init(&state->slock);
dev_set_drvdata(dev, state);
- spin_lock_init(&state->slock);
- for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
+ for (i = 0; i < state->num_phys; i++) {
struct phy *phy = devm_phy_create(dev, NULL,
&exynos_mipi_video_phy_ops);
if (IS_ERR(phy)) {
@@ -167,6 +352,7 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
state->phys[i].phy = phy;
state->phys[i].index = i;
+ state->phys[i].data = &phy_dev->phys[i];
phy_set_drvdata(phy, &state->phys[i]);
}
@@ -177,8 +363,17 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
}
static const struct of_device_id exynos_mipi_video_phy_of_match[] = {
- { .compatible = "samsung,s5pv210-mipi-video-phy" },
- { },
+ {
+ .compatible = "samsung,s5pv210-mipi-video-phy",
+ .data = &s5pv210_mipi_phy,
+ }, {
+ .compatible = "samsung,exynos5420-mipi-video-phy",
+ .data = &exynos5420_mipi_phy,
+ }, {
+ .compatible = "samsung,exynos5433-mipi-video-phy",
+ .data = &exynos5433_mipi_phy,
+ },
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, exynos_mipi_video_phy_of_match);
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 3acd2a180..213e2e153 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1143,7 +1143,8 @@ static int miphy28lp_probe_resets(struct device_node *node,
struct miphy28lp_dev *miphy_dev = miphy_phy->phydev;
int err;
- miphy_phy->miphy_rst = of_reset_control_get(node, "miphy-sw-rst");
+ miphy_phy->miphy_rst =
+ of_reset_control_get_shared(node, "miphy-sw-rst");
if (IS_ERR(miphy_phy->miphy_rst)) {
dev_err(miphy_dev->dev,
diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c
index c0e7b4b0c..4d85e730c 100644
--- a/drivers/phy/phy-mt65xx-usb3.c
+++ b/drivers/phy/phy-mt65xx-usb3.c
@@ -134,6 +134,11 @@
#define U3P_SR_COEF_DIVISOR 1000
#define U3P_FM_DET_CYCLE_CNT 1024
+struct mt65xx_phy_pdata {
+ /* avoid RX sensitivity level degradation only for mt8173 */
+ bool avoid_rx_sen_degradation;
+};
+
struct mt65xx_phy_instance {
struct phy *phy;
void __iomem *port_base;
@@ -145,6 +150,7 @@ struct mt65xx_u3phy {
struct device *dev;
void __iomem *sif_base; /* include sif2, but exclude port's */
struct clk *u3phya_ref; /* reference clock of usb3 anolog phy */
+ const struct mt65xx_phy_pdata *pdata;
struct mt65xx_phy_instance **phys;
int nphys;
};
@@ -241,22 +247,26 @@ static void phy_instance_init(struct mt65xx_u3phy *u3phy,
tmp = readl(port_base + U3P_U2PHYACR4);
tmp &= ~P2C_U2_GPIO_CTR_MSK;
writel(tmp, port_base + U3P_U2PHYACR4);
+ }
- tmp = readl(port_base + U3P_USBPHYACR2);
- tmp |= PA2_RG_SIF_U2PLL_FORCE_EN;
- writel(tmp, port_base + U3P_USBPHYACR2);
-
- tmp = readl(port_base + U3D_U2PHYDCR0);
- tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, port_base + U3D_U2PHYDCR0);
- } else {
- tmp = readl(port_base + U3D_U2PHYDCR0);
- tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, port_base + U3D_U2PHYDCR0);
-
- tmp = readl(port_base + U3P_U2PHYDTM0);
- tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
- writel(tmp, port_base + U3P_U2PHYDTM0);
+ if (u3phy->pdata->avoid_rx_sen_degradation) {
+ if (!index) {
+ tmp = readl(port_base + U3P_USBPHYACR2);
+ tmp |= PA2_RG_SIF_U2PLL_FORCE_EN;
+ writel(tmp, port_base + U3P_USBPHYACR2);
+
+ tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
+ writel(tmp, port_base + U3D_U2PHYDCR0);
+ } else {
+ tmp = readl(port_base + U3D_U2PHYDCR0);
+ tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
+ writel(tmp, port_base + U3D_U2PHYDCR0);
+
+ tmp = readl(port_base + U3P_U2PHYDTM0);
+ tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
+ writel(tmp, port_base + U3P_U2PHYDTM0);
+ }
}
tmp = readl(port_base + U3P_USBPHYACR6);
@@ -318,7 +328,7 @@ static void phy_instance_power_on(struct mt65xx_u3phy *u3phy,
tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD;
writel(tmp, u3phy->sif_base + U3P_XTALCTL3);
- /* [mt8173]switch 100uA current to SSUSB */
+ /* switch 100uA current to SSUSB */
tmp = readl(port_base + U3P_USBPHYACR5);
tmp |= PA5_RG_U2_HS_100U_U3_EN;
writel(tmp, port_base + U3P_USBPHYACR5);
@@ -335,7 +345,7 @@ static void phy_instance_power_on(struct mt65xx_u3phy *u3phy,
tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(4);
writel(tmp, port_base + U3P_USBPHYACR5);
- if (index) {
+ if (u3phy->pdata->avoid_rx_sen_degradation && index) {
tmp = readl(port_base + U3D_U2PHYDCR0);
tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
writel(tmp, port_base + U3D_U2PHYDCR0);
@@ -386,7 +396,9 @@ static void phy_instance_power_off(struct mt65xx_u3phy *u3phy,
tmp = readl(port_base + U3P_U3_PHYA_REG0);
tmp &= ~P3A_RG_U3_VUSB10_ON;
writel(tmp, port_base + U3P_U3_PHYA_REG0);
- } else {
+ }
+
+ if (u3phy->pdata->avoid_rx_sen_degradation && index) {
tmp = readl(port_base + U3D_U2PHYDCR0);
tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
writel(tmp, port_base + U3D_U2PHYDCR0);
@@ -402,7 +414,7 @@ static void phy_instance_exit(struct mt65xx_u3phy *u3phy,
u32 index = instance->index;
u32 tmp;
- if (index) {
+ if (u3phy->pdata->avoid_rx_sen_degradation && index) {
tmp = readl(port_base + U3D_U2PHYDCR0);
tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
writel(tmp, port_base + U3D_U2PHYDCR0);
@@ -502,8 +514,24 @@ static struct phy_ops mt65xx_u3phy_ops = {
.owner = THIS_MODULE,
};
+static const struct mt65xx_phy_pdata mt2701_pdata = {
+ .avoid_rx_sen_degradation = false,
+};
+
+static const struct mt65xx_phy_pdata mt8173_pdata = {
+ .avoid_rx_sen_degradation = true,
+};
+
+static const struct of_device_id mt65xx_u3phy_id_table[] = {
+ { .compatible = "mediatek,mt2701-u3phy", .data = &mt2701_pdata },
+ { .compatible = "mediatek,mt8173-u3phy", .data = &mt8173_pdata },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mt65xx_u3phy_id_table);
+
static int mt65xx_u3phy_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *child_np;
@@ -513,10 +541,15 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
struct resource res;
int port, retval;
+ match = of_match_node(mt65xx_u3phy_id_table, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+
u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL);
if (!u3phy)
return -ENOMEM;
+ u3phy->pdata = match->data;
u3phy->nphys = of_get_child_count(np);
u3phy->phys = devm_kcalloc(dev, u3phy->nphys,
sizeof(*u3phy->phys), GFP_KERNEL);
@@ -587,12 +620,6 @@ put_child:
return retval;
}
-static const struct of_device_id mt65xx_u3phy_id_table[] = {
- { .compatible = "mediatek,mt8173-u3phy", },
- { },
-};
-MODULE_DEVICE_TABLE(of, mt65xx_u3phy_id_table);
-
static struct platform_driver mt65xx_u3phy_driver = {
.probe = mt65xx_u3phy_probe,
.driver = {
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index c7a05996d..97d4dd6ea 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -195,6 +195,7 @@ static const struct of_device_id rcar_gen2_phy_match_table[] = {
{ .compatible = "renesas,usb-phy-r8a7790" },
{ .compatible = "renesas,usb-phy-r8a7791" },
{ .compatible = "renesas,usb-phy-r8a7794" },
+ { .compatible = "renesas,rcar-gen2-usb-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen2_phy_match_table);
diff --git a/drivers/phy/phy-rcar-gen3-usb2.c b/drivers/phy/phy-rcar-gen3-usb2.c
index bc4f7dd82..4be3f5dbb 100644
--- a/drivers/phy/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/phy-rcar-gen3-usb2.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/extcon.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -19,6 +20,7 @@
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
/******* USB2.0 Host registers (original offset is +0x200) *******/
#define USB2_INT_ENABLE 0x000
@@ -74,20 +76,17 @@
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
-struct rcar_gen3_data {
- void __iomem *base;
- struct clk *clk;
-};
-
struct rcar_gen3_chan {
- struct rcar_gen3_data usb2;
+ void __iomem *base;
+ struct extcon_dev *extcon;
struct phy *phy;
+ struct regulator *vbus;
bool has_otg;
};
static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
{
- void __iomem *usb2_base = ch->usb2.base;
+ void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_COMMCTRL);
dev_vdbg(&ch->phy->dev, "%s: %08x, %d\n", __func__, val, host);
@@ -100,7 +99,7 @@ static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
static void rcar_gen3_set_linectrl(struct rcar_gen3_chan *ch, int dp, int dm)
{
- void __iomem *usb2_base = ch->usb2.base;
+ void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_LINECTRL1);
dev_vdbg(&ch->phy->dev, "%s: %08x, %d, %d\n", __func__, val, dp, dm);
@@ -114,7 +113,7 @@ static void rcar_gen3_set_linectrl(struct rcar_gen3_chan *ch, int dp, int dm)
static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
{
- void __iomem *usb2_base = ch->usb2.base;
+ void __iomem *usb2_base = ch->base;
u32 val = readl(usb2_base + USB2_ADPCTRL);
dev_vdbg(&ch->phy->dev, "%s: %08x, %d\n", __func__, val, vbus);
@@ -130,6 +129,9 @@ static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
rcar_gen3_set_linectrl(ch, 1, 1);
rcar_gen3_set_host_mode(ch, 1);
rcar_gen3_enable_vbus_ctrl(ch, 1);
+
+ extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
+ extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
}
static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
@@ -137,28 +139,19 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
rcar_gen3_set_linectrl(ch, 0, 1);
rcar_gen3_set_host_mode(ch, 0);
rcar_gen3_enable_vbus_ctrl(ch, 0);
-}
-static bool rcar_gen3_check_vbus(struct rcar_gen3_chan *ch)
-{
- return !!(readl(ch->usb2.base + USB2_ADPCTRL) &
- USB2_ADPCTRL_OTGSESSVLD);
+ extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
+ extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
}
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
- return !!(readl(ch->usb2.base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
+ return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
}
static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
{
- bool is_host = true;
-
- /* B-device? */
- if (rcar_gen3_check_id(ch) && rcar_gen3_check_vbus(ch))
- is_host = false;
-
- if (is_host)
+ if (!rcar_gen3_check_id(ch))
rcar_gen3_init_for_host(ch);
else
rcar_gen3_init_for_peri(ch);
@@ -166,7 +159,7 @@ static void rcar_gen3_device_recognition(struct rcar_gen3_chan *ch)
static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
{
- void __iomem *usb2_base = ch->usb2.base;
+ void __iomem *usb2_base = ch->base;
u32 val;
val = readl(usb2_base + USB2_VBCTRL);
@@ -187,7 +180,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
static int rcar_gen3_phy_usb2_init(struct phy *p)
{
struct rcar_gen3_chan *channel = phy_get_drvdata(p);
- void __iomem *usb2_base = channel->usb2.base;
+ void __iomem *usb2_base = channel->base;
/* Initialize USB2 part */
writel(USB2_INT_ENABLE_INIT, usb2_base + USB2_INT_ENABLE);
@@ -205,7 +198,7 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
{
struct rcar_gen3_chan *channel = phy_get_drvdata(p);
- writel(0, channel->usb2.base + USB2_INT_ENABLE);
+ writel(0, channel->base + USB2_INT_ENABLE);
return 0;
}
@@ -213,8 +206,15 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p)
static int rcar_gen3_phy_usb2_power_on(struct phy *p)
{
struct rcar_gen3_chan *channel = phy_get_drvdata(p);
- void __iomem *usb2_base = channel->usb2.base;
+ void __iomem *usb2_base = channel->base;
u32 val;
+ int ret;
+
+ if (channel->vbus) {
+ ret = regulator_enable(channel->vbus);
+ if (ret)
+ return ret;
+ }
val = readl(usb2_base + USB2_USBCTR);
val |= USB2_USBCTR_PLL_RST;
@@ -225,17 +225,29 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p)
return 0;
}
+static int rcar_gen3_phy_usb2_power_off(struct phy *p)
+{
+ struct rcar_gen3_chan *channel = phy_get_drvdata(p);
+ int ret = 0;
+
+ if (channel->vbus)
+ ret = regulator_disable(channel->vbus);
+
+ return ret;
+}
+
static struct phy_ops rcar_gen3_phy_usb2_ops = {
.init = rcar_gen3_phy_usb2_init,
.exit = rcar_gen3_phy_usb2_exit,
.power_on = rcar_gen3_phy_usb2_power_on,
+ .power_off = rcar_gen3_phy_usb2_power_off,
.owner = THIS_MODULE,
};
static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
{
struct rcar_gen3_chan *ch = _ch;
- void __iomem *usb2_base = ch->usb2.base;
+ void __iomem *usb2_base = ch->base;
u32 status = readl(usb2_base + USB2_OBINTSTA);
irqreturn_t ret = IRQ_NONE;
@@ -251,10 +263,17 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
{ .compatible = "renesas,usb2-phy-r8a7795" },
+ { .compatible = "renesas,rcar-gen3-usb2-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table);
+static const unsigned int rcar_gen3_phy_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_NONE,
+};
+
static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -273,18 +292,30 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- channel->usb2.base = devm_ioremap_resource(dev, res);
- if (IS_ERR(channel->usb2.base))
- return PTR_ERR(channel->usb2.base);
+ channel->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(channel->base))
+ return PTR_ERR(channel->base);
/* call request_irq for OTG */
irq = platform_get_irq(pdev, 0);
if (irq >= 0) {
+ int ret;
+
irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
IRQF_SHARED, dev_name(dev), channel);
if (irq < 0)
dev_err(dev, "No irq handler (%d)\n", irq);
channel->has_otg = true;
+ channel->extcon = devm_extcon_dev_allocate(dev,
+ rcar_gen3_phy_cable);
+ if (IS_ERR(channel->extcon))
+ return PTR_ERR(channel->extcon);
+
+ ret = devm_extcon_dev_register(dev, channel->extcon);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register extcon\n");
+ return ret;
+ }
}
/* devm_phy_create() will call pm_runtime_enable(dev); */
@@ -294,6 +325,13 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
return PTR_ERR(channel->phy);
}
+ channel->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(channel->vbus)) {
+ if (PTR_ERR(channel->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(channel->vbus);
+ channel->vbus = NULL;
+ }
+
phy_set_drvdata(channel->phy, channel);
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
diff --git a/drivers/phy/phy-rockchip-dp.c b/drivers/phy/phy-rockchip-dp.c
index 793ecb6d8..8b267a746 100644
--- a/drivers/phy/phy-rockchip-dp.c
+++ b/drivers/phy/phy-rockchip-dp.c
@@ -90,7 +90,7 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
return -ENODEV;
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
- if (IS_ERR(dp))
+ if (!dp)
return -ENOMEM;
dp->dev = dev;
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index f62d89906..d60b149cf 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -216,7 +216,7 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
init.parent_names = &clk_name;
init.num_parents = 1;
} else {
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
}
diff --git a/drivers/phy/phy-stih407-usb.c b/drivers/phy/phy-stih407-usb.c
index 1d5ae5f8e..b1f44ab66 100644
--- a/drivers/phy/phy-stih407-usb.c
+++ b/drivers/phy/phy-stih407-usb.c
@@ -105,13 +105,13 @@ static int stih407_usb2_picophy_probe(struct platform_device *pdev)
phy_dev->dev = dev;
dev_set_drvdata(dev, phy_dev);
- phy_dev->rstc = devm_reset_control_get(dev, "global");
+ phy_dev->rstc = devm_reset_control_get_shared(dev, "global");
if (IS_ERR(phy_dev->rstc)) {
dev_err(dev, "failed to ctrl picoPHY reset\n");
return PTR_ERR(phy_dev->rstc);
}
- phy_dev->rstport = devm_reset_control_get(dev, "port");
+ phy_dev->rstport = devm_reset_control_get_exclusive(dev, "port");
if (IS_ERR(phy_dev->rstport)) {
dev_err(dev, "failed to ctrl picoPHY reset\n");
return PTR_ERR(phy_dev->rstport);
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index bae54f7a1..de3101fbb 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -175,7 +175,7 @@ static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
{
struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
u32 temp, usbc_bit = BIT(phy->index * 2);
- void *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
+ void __iomem *phyctl = phy_data->base + phy_data->cfg->phyctl_offset;
int i;
mutex_lock(&phy_data->mutex);
@@ -514,9 +514,9 @@ static int sun4i_usb_phy_remove(struct platform_device *pdev)
if (data->vbus_power_nb_registered)
power_supply_unreg_notifier(&data->vbus_power_nb);
- if (data->id_det_irq >= 0)
+ if (data->id_det_irq > 0)
devm_free_irq(dev, data->id_det_irq, data);
- if (data->vbus_det_irq >= 0)
+ if (data->vbus_det_irq > 0)
devm_free_irq(dev, data->vbus_det_irq, data);
cancel_delayed_work_sync(&data->detect);
@@ -645,11 +645,11 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
- if ((data->id_det_gpio && data->id_det_irq < 0) ||
- (data->vbus_det_gpio && data->vbus_det_irq < 0))
+ if ((data->id_det_gpio && data->id_det_irq <= 0) ||
+ (data->vbus_det_gpio && data->vbus_det_irq <= 0))
data->phy0_poll = true;
- if (data->id_det_irq >= 0) {
+ if (data->id_det_irq > 0) {
ret = devm_request_irq(dev, data->id_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
@@ -660,7 +660,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
}
- if (data->vbus_det_irq >= 0) {
+ if (data->vbus_det_irq > 0) {
ret = devm_request_irq(dev, data->vbus_det_irq,
sun4i_usb_phy0_id_vbus_det_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 0a477d24c..bf46844dc 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -293,11 +293,18 @@ static int ti_pipe3_init(struct phy *x)
ret = ti_pipe3_dpll_wait_lock(phy);
}
- /* Program the DPLL only if not locked */
+ /* SATA has issues if re-programmed when locked */
val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
- if (!(val & PLL_LOCK))
- if (ti_pipe3_dpll_program(phy))
- return -EINVAL;
+ if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
+ "ti,phy-pipe3-sata"))
+ return ret;
+
+ /* Program the DPLL */
+ ret = ti_pipe3_dpll_program(phy);
+ if (ret) {
+ ti_pipe3_disable_clocks(phy);
+ return -EINVAL;
+ }
return ret;
}
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 6b6af6cba..d9b10a39a 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -463,7 +463,8 @@ static int twl4030_phy_power_on(struct phy *phy)
twl4030_usb_set_mode(twl, twl->usb_mode);
if (twl->usb_mode == T2_USB_MODE_ULPI)
twl4030_i2c_access(twl, 0);
- schedule_delayed_work(&twl->id_workaround_work, 0);
+ twl->linkstat = MUSB_UNKNOWN;
+ schedule_delayed_work(&twl->id_workaround_work, HZ);
return 0;
}
@@ -537,6 +538,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
struct twl4030_usb *twl = _twl;
enum musb_vbus_id_status status;
bool status_changed = false;
+ int err;
status = twl4030_usb_linkstat(twl);
@@ -567,7 +569,9 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
}
- musb_mailbox(status);
+ err = musb_mailbox(status);
+ if (err)
+ twl->linkstat = MUSB_UNKNOWN;
}
/* don't schedule during sleep - irq works right then */
@@ -595,7 +599,8 @@ static int twl4030_phy_init(struct phy *phy)
struct twl4030_usb *twl = phy_get_drvdata(phy);
pm_runtime_get_sync(twl->dev);
- schedule_delayed_work(&twl->id_workaround_work, 0);
+ twl->linkstat = MUSB_UNKNOWN;
+ schedule_delayed_work(&twl->id_workaround_work, HZ);
pm_runtime_mark_last_busy(twl->dev);
pm_runtime_put_autosuspend(twl->dev);
@@ -763,7 +768,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
if (cable_present(twl->linkstat))
pm_runtime_put_noidle(twl->dev);
pm_runtime_mark_last_busy(twl->dev);
- pm_runtime_put_sync_suspend(twl->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(twl->dev);
pm_runtime_disable(twl->dev);
/* autogate 60MHz ULPI clock,
diff --git a/drivers/phy/tegra/Kconfig b/drivers/phy/tegra/Kconfig
new file mode 100644
index 000000000..a3b1de953
--- /dev/null
+++ b/drivers/phy/tegra/Kconfig
@@ -0,0 +1,8 @@
+config PHY_TEGRA_XUSB
+ tristate "NVIDIA Tegra XUSB pad controller driver"
+ depends on ARCH_TEGRA
+ help
+ Choose this option if you have an NVIDIA Tegra SoC.
+
+ To compile this driver as a module, choose M here: the module will
+ be called phy-tegra-xusb.
diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile
new file mode 100644
index 000000000..898589238
--- /dev/null
+++ b/drivers/phy/tegra/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_PHY_TEGRA_XUSB) += phy-tegra-xusb.o
+
+phy-tegra-xusb-y += xusb.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_124_SOC) += xusb-tegra124.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o
diff --git a/drivers/phy/tegra/xusb-tegra124.c b/drivers/phy/tegra/xusb-tegra124.c
new file mode 100644
index 000000000..119957249
--- /dev/null
+++ b/drivers/phy/tegra/xusb-tegra124.c
@@ -0,0 +1,1752 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "xusb.h"
+
+#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(x) ((x) ? 15 : 0)
+#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK 0x3f
+#define FUSE_SKU_CALIB_HS_IREF_CAP_SHIFT 13
+#define FUSE_SKU_CALIB_HS_IREF_CAP_MASK 0x3
+#define FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_SHIFT 11
+#define FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_MASK 0x3
+#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT 7
+#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK 0xf
+
+#define XUSB_PADCTL_USB2_PORT_CAP 0x008
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(x) ((x) * 4)
+#define XUSB_PADCTL_USB2_PORT_CAP_PORT_CAP_MASK 0x3
+#define XUSB_PADCTL_USB2_PORT_CAP_DISABLED 0x0
+#define XUSB_PADCTL_USB2_PORT_CAP_HOST 0x1
+#define XUSB_PADCTL_USB2_PORT_CAP_DEVICE 0x2
+#define XUSB_PADCTL_USB2_PORT_CAP_OTG 0x3
+
+#define XUSB_PADCTL_SS_PORT_MAP 0x014
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(x) (1 << (((x) * 4) + 3))
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 4)
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 4))
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 4))
+#define XUSB_PADCTL_SS_PORT_MAP_PORT_MAP_MASK 0x7
+
+#define XUSB_PADCTL_ELPG_PROGRAM 0x01c
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN (1 << 26)
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 25)
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN (1 << 24)
+#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(x) (1 << (18 + (x) * 4))
+#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(x) \
+ (1 << (17 + (x) * 4))
+#define XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(x) (1 << (16 + (x) * 4))
+
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1 0x040
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET (1 << 19)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK (0xf << 12)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST (1 << 1)
+
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2 0x044
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN (1 << 6)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN (1 << 5)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL (1 << 4)
+
+#define XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(x) (0x058 + (x) * 4)
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT 24
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_MASK 0xff
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_VAL 0x24
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT 16
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK 0x3f
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT 8
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK 0x3f
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT 8
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_MASK 0xffff
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_VAL 0xf070
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT 4
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_MASK 0xf
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_VAL 0xf
+
+#define XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(x) (0x068 + (x) * 4)
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT 24
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK 0x1f
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT 16
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK 0x7f
+#define XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_VAL 0x002008ee
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL2(x) ((x) < 2 ? 0x078 + (x) * 4 : \
+ 0x0f8 + (x) * 4)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT 28
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_MASK 0x3
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_VAL 0x1
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL5(x) ((x) < 2 ? 0x090 + (x) * 4 : \
+ 0x11c + (x) * 4)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL5_RX_QEYE_EN (1 << 8)
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL6(x) ((x) < 2 ? 0x098 + (x) * 4 : \
+ 0x128 + (x) * 4)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT 24
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK 0x3f
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_TAP_MASK 0x1f
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_AMP_MASK 0x7f
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT 16
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK 0xff
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_G_Z 0x21
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_TAP 0x32
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_AMP 0x33
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_CTLE_Z 0x48
+#define XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_LATCH_G_Z 0xa1
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x0a0 + (x) * 4)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI (1 << 21)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 (1 << 20)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD (1 << 19)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT 14
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_MASK 0x3
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_VAL(x) ((x) ? 0x0 : 0x3)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT 6
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_MASK 0x3f
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_VAL 0x0e
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT 0
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK 0x3f
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL1(x) (0x0ac + (x) * 4)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT 9
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_MASK 0x3
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT 3
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK 0x7
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR (1 << 2)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_FORCE_POWERUP (1 << 1)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_FORCE_POWERUP (1 << 0)
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x0b8
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD (1 << 12)
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT 2
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK 0x7
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL 0x5
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT 0
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK 0x3
+
+#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x0c0 + (x) * 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT 12
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_MASK 0x7
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT 8
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_MASK 0x7
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT 4
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_MASK 0x7
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT 0
+#define XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_MASK 0x7
+
+#define XUSB_PADCTL_HSIC_PADX_CTL1(x) (0x0c8 + (x) * 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE (1 << 10)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_RPU_DATA (1 << 9)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_RPD_STROBE (1 << 8)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA (1 << 7)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI (1 << 5)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX (1 << 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX (1 << 3)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX (1 << 2)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN (1 << 0)
+
+#define XUSB_PADCTL_HSIC_PADX_CTL2(x) (0x0d0 + (x) * 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT 4
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK 0x7
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT 0
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK 0x7
+
+#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL 0x0e0
+#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL_STRB_TRIM_MASK 0x1f
+
+#define XUSB_PADCTL_USB3_PAD_MUX 0x134
+#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
+#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (6 + (x)))
+
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1 0x138
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET (1 << 27)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE (1 << 24)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT 20
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_MASK 0x3
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD (1 << 3)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST (1 << 1)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ (1 << 0)
+
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2 0x13c
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT 20
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_MASK 0xf
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT 16
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_MASK 0xf
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TCLKOUT_EN (1 << 12)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TXCLKREF_SEL (1 << 4)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT 0
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_MASK 0x7
+
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL3 0x140
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL3_RCAL_BYPASS (1 << 7)
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1 0x148
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD (1 << 1)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ (1 << 0)
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL2 0x14c
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL5 0x158
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL6 0x15c
+
+struct tegra124_xusb_fuse_calibration {
+ u32 hs_curr_level[3];
+ u32 hs_iref_cap;
+ u32 hs_term_range_adj;
+ u32 hs_squelch_level;
+};
+
+struct tegra124_xusb_padctl {
+ struct tegra_xusb_padctl base;
+
+ struct tegra124_xusb_fuse_calibration fuse;
+};
+
+static inline struct tegra124_xusb_padctl *
+to_tegra124_xusb_padctl(struct tegra_xusb_padctl *padctl)
+{
+ return container_of(padctl, struct tegra124_xusb_padctl, base);
+}
+
+static int tegra124_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
+{
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ if (padctl->enable++ > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+out:
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static int tegra124_xusb_padctl_disable(struct tegra_xusb_padctl *padctl)
+{
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ if (WARN_ON(padctl->enable == 0))
+ goto out;
+
+ if (--padctl->enable > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+out:
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static int tegra124_usb3_save_context(struct tegra_xusb_padctl *padctl,
+ unsigned int index)
+{
+ struct tegra_xusb_usb3_port *port;
+ struct tegra_xusb_lane *lane;
+ u32 value, offset;
+
+ port = tegra_xusb_find_usb3_port(padctl, index);
+ if (!port)
+ return -ENODEV;
+
+ port->context_saved = true;
+ lane = port->base.lane;
+
+ if (lane->pad == padctl->pcie)
+ offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL6(lane->index);
+ else
+ offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL6;
+
+ value = padctl_readl(padctl, offset);
+ value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+ value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_TAP <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+ padctl_writel(padctl, value, offset);
+
+ value = padctl_readl(padctl, offset) >>
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
+ port->tap1 = value & XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_TAP_MASK;
+
+ value = padctl_readl(padctl, offset);
+ value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+ value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_AMP <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+ padctl_writel(padctl, value, offset);
+
+ value = padctl_readl(padctl, offset) >>
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
+ port->amp = value & XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_AMP_MASK;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
+ value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
+ (XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT));
+ value |= (port->tap1 <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
+ (port->amp <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
+
+ value = padctl_readl(padctl, offset);
+ value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+ value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_LATCH_G_Z <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+ padctl_writel(padctl, value, offset);
+
+ value = padctl_readl(padctl, offset);
+ value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+ value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_G_Z <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+ padctl_writel(padctl, value, offset);
+
+ value = padctl_readl(padctl, offset) >>
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
+ port->ctle_g = value &
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK;
+
+ value = padctl_readl(padctl, offset);
+ value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_MASK <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT);
+ value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_CTLE_Z <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SEL_SHIFT;
+ padctl_writel(padctl, value, offset);
+
+ value = padctl_readl(padctl, offset) >>
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_SHIFT;
+ port->ctle_z = value &
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL6_MISC_OUT_G_Z_MASK;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
+ value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
+ (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT));
+ value |= (port->ctle_g <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
+ (port->ctle_z <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
+
+ return 0;
+}
+
+static int tegra124_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+ unsigned int index, bool idle)
+{
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+ if (idle)
+ value |= XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
+ XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE;
+ else
+ value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
+ XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE);
+
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+ return 0;
+}
+
+#define TEGRA124_LANE(_name, _offset, _shift, _mask, _type) \
+ { \
+ .name = _name, \
+ .offset = _offset, \
+ .shift = _shift, \
+ .mask = _mask, \
+ .num_funcs = ARRAY_SIZE(tegra124_##_type##_functions), \
+ .funcs = tegra124_##_type##_functions, \
+ }
+
+static const char * const tegra124_usb2_functions[] = {
+ "snps",
+ "xusb",
+ "uart",
+};
+
+static const struct tegra_xusb_lane_soc tegra124_usb2_lanes[] = {
+ TEGRA124_LANE("usb2-0", 0x004, 0, 0x3, usb2),
+ TEGRA124_LANE("usb2-1", 0x004, 2, 0x3, usb2),
+ TEGRA124_LANE("usb2-2", 0x004, 4, 0x3, usb2),
+};
+
+static struct tegra_xusb_lane *
+tegra124_usb2_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_usb2_lane *usb2;
+ int err;
+
+ usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+ if (!usb2)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&usb2->base.list);
+ usb2->base.soc = &pad->soc->lanes[index];
+ usb2->base.index = index;
+ usb2->base.pad = pad;
+ usb2->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&usb2->base, np);
+ if (err < 0) {
+ kfree(usb2);
+ return ERR_PTR(err);
+ }
+
+ return &usb2->base;
+}
+
+static void tegra124_usb2_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+
+ kfree(usb2);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_usb2_lane_ops = {
+ .probe = tegra124_usb2_lane_probe,
+ .remove = tegra124_usb2_lane_remove,
+};
+
+static int tegra124_usb2_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_usb2_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_usb2_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+ struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra124_xusb_padctl *priv;
+ struct tegra_xusb_usb2_port *port;
+ unsigned int index = lane->index;
+ u32 value;
+ int err;
+
+ port = tegra_xusb_find_usb2_port(padctl, index);
+ if (!port) {
+ dev_err(&phy->dev, "no port found for USB2 lane %u\n", index);
+ return -ENODEV;
+ }
+
+ priv = to_tegra124_xusb_padctl(padctl);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+ value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
+ (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT));
+ value |= (priv->fuse.hs_squelch_level <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
+ (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
+ value &= ~(XUSB_PADCTL_USB2_PORT_CAP_PORT_CAP_MASK <<
+ XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(index));
+ value |= XUSB_PADCTL_USB2_PORT_CAP_HOST <<
+ XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_SHIFT(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+ value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT) |
+ (XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_MASK <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT) |
+ (XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_MASK <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT) |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI);
+ value |= (priv->fuse.hs_curr_level[index] +
+ usb2->hs_curr_level_offset) <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT;
+ value |= XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_VAL <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_SLEW_SHIFT;
+ value |= XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_VAL(index) <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_LS_RSLEW_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+ value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
+ (XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_MASK <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT) |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_FORCE_POWERUP |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_FORCE_POWERUP);
+ value |= (priv->fuse.hs_term_range_adj <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
+ (priv->fuse.hs_iref_cap <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_HS_IREF_CAP_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+
+ err = regulator_enable(port->supply);
+ if (err)
+ return err;
+
+ mutex_lock(&pad->lock);
+
+ if (pad->enable++ > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+ value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+out:
+ mutex_unlock(&pad->lock);
+ return 0;
+}
+
+static int tegra124_usb2_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb2_port *port;
+ u32 value;
+
+ port = tegra_xusb_find_usb2_port(padctl, lane->index);
+ if (!port) {
+ dev_err(&phy->dev, "no port found for USB2 lane %u\n",
+ lane->index);
+ return -ENODEV;
+ }
+
+ mutex_lock(&pad->lock);
+
+ if (WARN_ON(pad->enable == 0))
+ goto out;
+
+ if (--pad->enable > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+ value |= XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+out:
+ regulator_disable(port->supply);
+ mutex_unlock(&pad->lock);
+ return 0;
+}
+
+static const struct phy_ops tegra124_usb2_phy_ops = {
+ .init = tegra124_usb2_phy_init,
+ .exit = tegra124_usb2_phy_exit,
+ .power_on = tegra124_usb2_phy_power_on,
+ .power_off = tegra124_usb2_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_usb2_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_usb2_pad *usb2;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+ if (!usb2)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&usb2->lock);
+
+ pad = &usb2->base;
+ pad->ops = &tegra124_usb2_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(usb2);
+ goto out;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra124_usb2_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra124_usb2_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
+
+ kfree(usb2);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_usb2_ops = {
+ .probe = tegra124_usb2_pad_probe,
+ .remove = tegra124_usb2_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_usb2_pad = {
+ .name = "usb2",
+ .num_lanes = ARRAY_SIZE(tegra124_usb2_lanes),
+ .lanes = tegra124_usb2_lanes,
+ .ops = &tegra124_usb2_ops,
+};
+
+static const char * const tegra124_ulpi_functions[] = {
+ "snps",
+ "xusb",
+};
+
+static const struct tegra_xusb_lane_soc tegra124_ulpi_lanes[] = {
+ TEGRA124_LANE("ulpi-0", 0x004, 12, 0x1, ulpi),
+};
+
+static struct tegra_xusb_lane *
+tegra124_ulpi_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_ulpi_lane *ulpi;
+ int err;
+
+ ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
+ if (!ulpi)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ulpi->base.list);
+ ulpi->base.soc = &pad->soc->lanes[index];
+ ulpi->base.index = index;
+ ulpi->base.pad = pad;
+ ulpi->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&ulpi->base, np);
+ if (err < 0) {
+ kfree(ulpi);
+ return ERR_PTR(err);
+ }
+
+ return &ulpi->base;
+}
+
+static void tegra124_ulpi_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_ulpi_lane *ulpi = to_ulpi_lane(lane);
+
+ kfree(ulpi);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_ulpi_lane_ops = {
+ .probe = tegra124_ulpi_lane_probe,
+ .remove = tegra124_ulpi_lane_remove,
+};
+
+static int tegra124_ulpi_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_ulpi_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_ulpi_phy_power_on(struct phy *phy)
+{
+ return 0;
+}
+
+static int tegra124_ulpi_phy_power_off(struct phy *phy)
+{
+ return 0;
+}
+
+static const struct phy_ops tegra124_ulpi_phy_ops = {
+ .init = tegra124_ulpi_phy_init,
+ .exit = tegra124_ulpi_phy_exit,
+ .power_on = tegra124_ulpi_phy_power_on,
+ .power_off = tegra124_ulpi_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_ulpi_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_ulpi_pad *ulpi;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ ulpi = kzalloc(sizeof(*ulpi), GFP_KERNEL);
+ if (!ulpi)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &ulpi->base;
+ pad->ops = &tegra124_ulpi_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(ulpi);
+ goto out;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra124_ulpi_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra124_ulpi_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_ulpi_pad *ulpi = to_ulpi_pad(pad);
+
+ kfree(ulpi);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_ulpi_ops = {
+ .probe = tegra124_ulpi_pad_probe,
+ .remove = tegra124_ulpi_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_ulpi_pad = {
+ .name = "ulpi",
+ .num_lanes = ARRAY_SIZE(tegra124_ulpi_lanes),
+ .lanes = tegra124_ulpi_lanes,
+ .ops = &tegra124_ulpi_ops,
+};
+
+static const char * const tegra124_hsic_functions[] = {
+ "snps",
+ "xusb",
+};
+
+static const struct tegra_xusb_lane_soc tegra124_hsic_lanes[] = {
+ TEGRA124_LANE("hsic-0", 0x004, 14, 0x1, hsic),
+ TEGRA124_LANE("hsic-1", 0x004, 15, 0x1, hsic),
+};
+
+static struct tegra_xusb_lane *
+tegra124_hsic_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_hsic_lane *hsic;
+ int err;
+
+ hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
+ if (!hsic)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&hsic->base.list);
+ hsic->base.soc = &pad->soc->lanes[index];
+ hsic->base.index = index;
+ hsic->base.pad = pad;
+ hsic->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&hsic->base, np);
+ if (err < 0) {
+ kfree(hsic);
+ return ERR_PTR(err);
+ }
+
+ return &hsic->base;
+}
+
+static void tegra124_hsic_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
+
+ kfree(hsic);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_hsic_lane_ops = {
+ .probe = tegra124_hsic_lane_probe,
+ .remove = tegra124_hsic_lane_remove,
+};
+
+static int tegra124_hsic_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_hsic_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_hsic_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
+ struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+ int err;
+
+ err = regulator_enable(pad->supply);
+ if (err)
+ return err;
+
+ padctl_writel(padctl, hsic->strobe_trim,
+ XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+ if (hsic->auto_term)
+ value |= XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN;
+ else
+ value &= ~XUSB_PADCTL_HSIC_PAD_CTL1_AUTO_TERM_EN;
+
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+ value &= ~((XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT) |
+ (XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT) |
+ (XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT) |
+ (XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT));
+ value |= (hsic->tx_rtune_n <<
+ XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEN_SHIFT) |
+ (hsic->tx_rtune_p <<
+ XUSB_PADCTL_HSIC_PAD_CTL0_TX_RTUNEP_SHIFT) |
+ (hsic->tx_rslew_n <<
+ XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWN_SHIFT) |
+ (hsic->tx_rslew_p <<
+ XUSB_PADCTL_HSIC_PAD_CTL0_TX_RSLEWP_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL2(index));
+ value &= ~((XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
+ (XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT));
+ value |= (hsic->rx_strobe_trim <<
+ XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
+ (hsic->rx_data_trim <<
+ XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL2(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+ value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_RPD_STROBE |
+ XUSB_PADCTL_HSIC_PAD_CTL1_RPU_DATA |
+ XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX |
+ XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI |
+ XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX |
+ XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX);
+ value |= XUSB_PADCTL_HSIC_PAD_CTL1_RPD_DATA |
+ XUSB_PADCTL_HSIC_PAD_CTL1_RPU_STROBE;
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+ return 0;
+}
+
+static int tegra124_hsic_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+ value |= XUSB_PADCTL_HSIC_PAD_CTL1_PD_RX |
+ XUSB_PADCTL_HSIC_PAD_CTL1_PD_ZI |
+ XUSB_PADCTL_HSIC_PAD_CTL1_PD_TRX |
+ XUSB_PADCTL_HSIC_PAD_CTL1_PD_TX;
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+ regulator_disable(pad->supply);
+
+ return 0;
+}
+
+static const struct phy_ops tegra124_hsic_phy_ops = {
+ .init = tegra124_hsic_phy_init,
+ .exit = tegra124_hsic_phy_exit,
+ .power_on = tegra124_hsic_phy_power_on,
+ .power_off = tegra124_hsic_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_hsic_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_hsic_pad *hsic;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
+ if (!hsic)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &hsic->base;
+ pad->ops = &tegra124_hsic_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(hsic);
+ goto out;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra124_hsic_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra124_hsic_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_hsic_pad *hsic = to_hsic_pad(pad);
+
+ kfree(hsic);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_hsic_ops = {
+ .probe = tegra124_hsic_pad_probe,
+ .remove = tegra124_hsic_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_hsic_pad = {
+ .name = "hsic",
+ .num_lanes = ARRAY_SIZE(tegra124_hsic_lanes),
+ .lanes = tegra124_hsic_lanes,
+ .ops = &tegra124_hsic_ops,
+};
+
+static const char * const tegra124_pcie_functions[] = {
+ "pcie",
+ "usb3-ss",
+ "sata",
+};
+
+static const struct tegra_xusb_lane_soc tegra124_pcie_lanes[] = {
+ TEGRA124_LANE("pcie-0", 0x134, 16, 0x3, pcie),
+ TEGRA124_LANE("pcie-1", 0x134, 18, 0x3, pcie),
+ TEGRA124_LANE("pcie-2", 0x134, 20, 0x3, pcie),
+ TEGRA124_LANE("pcie-3", 0x134, 22, 0x3, pcie),
+ TEGRA124_LANE("pcie-4", 0x134, 24, 0x3, pcie),
+};
+
+static struct tegra_xusb_lane *
+tegra124_pcie_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_pcie_lane *pcie;
+ int err;
+
+ pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&pcie->base.list);
+ pcie->base.soc = &pad->soc->lanes[index];
+ pcie->base.index = index;
+ pcie->base.pad = pad;
+ pcie->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&pcie->base, np);
+ if (err < 0) {
+ kfree(pcie);
+ return ERR_PTR(err);
+ }
+
+ return &pcie->base;
+}
+
+static void tegra124_pcie_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_pcie_lane *pcie = to_pcie_lane(lane);
+
+ kfree(pcie);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_pcie_lane_ops = {
+ .probe = tegra124_pcie_lane_probe,
+ .remove = tegra124_pcie_lane_remove,
+};
+
+static int tegra124_pcie_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_pcie_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_pcie_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned long timeout;
+ int err = -ETIMEDOUT;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
+ value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN |
+ XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN |
+ XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+ value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+ timeout = jiffies + msecs_to_jiffies(50);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+ if (value & XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(100, 200);
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value |= XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+ return err;
+}
+
+static int tegra124_pcie_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value &= ~XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+ return 0;
+}
+
+static const struct phy_ops tegra124_pcie_phy_ops = {
+ .init = tegra124_pcie_phy_init,
+ .exit = tegra124_pcie_phy_exit,
+ .power_on = tegra124_pcie_phy_power_on,
+ .power_off = tegra124_pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_pcie_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_pcie_pad *pcie;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &pcie->base;
+ pad->ops = &tegra124_pcie_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(pcie);
+ goto out;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra124_pcie_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra124_pcie_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(pad);
+
+ kfree(pcie);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_pcie_ops = {
+ .probe = tegra124_pcie_pad_probe,
+ .remove = tegra124_pcie_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_pcie_pad = {
+ .name = "pcie",
+ .num_lanes = ARRAY_SIZE(tegra124_pcie_lanes),
+ .lanes = tegra124_pcie_lanes,
+ .ops = &tegra124_pcie_ops,
+};
+
+static const struct tegra_xusb_lane_soc tegra124_sata_lanes[] = {
+ TEGRA124_LANE("sata-0", 0x134, 26, 0x3, pcie),
+};
+
+static struct tegra_xusb_lane *
+tegra124_sata_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_sata_lane *sata;
+ int err;
+
+ sata = kzalloc(sizeof(*sata), GFP_KERNEL);
+ if (!sata)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&sata->base.list);
+ sata->base.soc = &pad->soc->lanes[index];
+ sata->base.index = index;
+ sata->base.pad = pad;
+ sata->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&sata->base, np);
+ if (err < 0) {
+ kfree(sata);
+ return ERR_PTR(err);
+ }
+
+ return &sata->base;
+}
+
+static void tegra124_sata_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_sata_lane *sata = to_sata_lane(lane);
+
+ kfree(sata);
+}
+
+static const struct tegra_xusb_lane_ops tegra124_sata_lane_ops = {
+ .probe = tegra124_sata_lane_probe,
+ .remove = tegra124_sata_lane_remove,
+};
+
+static int tegra124_sata_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra124_sata_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra124_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra124_sata_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned long timeout;
+ int err = -ETIMEDOUT;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
+ value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ timeout = jiffies + msecs_to_jiffies(50);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ if (value & XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(100, 200);
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value |= XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+ return err;
+}
+
+static int tegra124_sata_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value &= ~XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
+ value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+ value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
+ value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+
+ return 0;
+}
+
+static const struct phy_ops tegra124_sata_phy_ops = {
+ .init = tegra124_sata_phy_init,
+ .exit = tegra124_sata_phy_exit,
+ .power_on = tegra124_sata_phy_power_on,
+ .power_off = tegra124_sata_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra124_sata_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_sata_pad *sata;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ sata = kzalloc(sizeof(*sata), GFP_KERNEL);
+ if (!sata)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &sata->base;
+ pad->ops = &tegra124_sata_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(sata);
+ goto out;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra124_sata_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra124_sata_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_sata_pad *sata = to_sata_pad(pad);
+
+ kfree(sata);
+}
+
+static const struct tegra_xusb_pad_ops tegra124_sata_ops = {
+ .probe = tegra124_sata_pad_probe,
+ .remove = tegra124_sata_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra124_sata_pad = {
+ .name = "sata",
+ .num_lanes = ARRAY_SIZE(tegra124_sata_lanes),
+ .lanes = tegra124_sata_lanes,
+ .ops = &tegra124_sata_ops,
+};
+
+static const struct tegra_xusb_pad_soc *tegra124_pads[] = {
+ &tegra124_usb2_pad,
+ &tegra124_ulpi_pad,
+ &tegra124_hsic_pad,
+ &tegra124_pcie_pad,
+ &tegra124_sata_pad,
+};
+
+static int tegra124_usb2_port_enable(struct tegra_xusb_port *port)
+{
+ return 0;
+}
+
+static void tegra124_usb2_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra124_usb2_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_find_lane(port->padctl, "usb2", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra124_usb2_port_ops = {
+ .enable = tegra124_usb2_port_enable,
+ .disable = tegra124_usb2_port_disable,
+ .map = tegra124_usb2_port_map,
+};
+
+static int tegra124_ulpi_port_enable(struct tegra_xusb_port *port)
+{
+ return 0;
+}
+
+static void tegra124_ulpi_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra124_ulpi_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_find_lane(port->padctl, "ulpi", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra124_ulpi_port_ops = {
+ .enable = tegra124_ulpi_port_enable,
+ .disable = tegra124_ulpi_port_disable,
+ .map = tegra124_ulpi_port_map,
+};
+
+static int tegra124_hsic_port_enable(struct tegra_xusb_port *port)
+{
+ return 0;
+}
+
+static void tegra124_hsic_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra124_hsic_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_find_lane(port->padctl, "hsic", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra124_hsic_port_ops = {
+ .enable = tegra124_hsic_port_enable,
+ .disable = tegra124_hsic_port_disable,
+ .map = tegra124_hsic_port_map,
+};
+
+static int tegra124_usb3_port_enable(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
+ struct tegra_xusb_padctl *padctl = port->padctl;
+ struct tegra_xusb_lane *lane = usb3->base.lane;
+ unsigned int index = port->index, offset;
+ int ret = 0;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+
+ if (!usb3->internal)
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+ else
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, usb3->port);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+ /*
+ * TODO: move this code into the PCIe/SATA PHY ->power_on() callbacks
+ * and conditionalize based on mux function? This seems to work, but
+ * might not be the exact proper sequence.
+ */
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
+ value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT) |
+ (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT) |
+ (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT));
+ value |= (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_VAL <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_WANDER_SHIFT) |
+ (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_VAL <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_CDR_CNTL_SHIFT) |
+ (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_VAL <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_SHIFT);
+
+ if (usb3->context_saved) {
+ value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
+ (XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT));
+ value |= (usb3->ctle_g <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_G_SHIFT) |
+ (usb3->ctle_z <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL2_RX_EQ_Z_SHIFT);
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL2(index));
+
+ value = XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_VAL;
+
+ if (usb3->context_saved) {
+ value &= ~((XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
+ (XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_MASK <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT));
+ value |= (usb3->tap1 <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_TAP_SHIFT) |
+ (usb3->amp <<
+ XUSB_PADCTL_IOPHY_USB3_PAD_CTL4_DFE_CNTL_AMP_SHIFT);
+ }
+
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_USB3_PADX_CTL4(index));
+
+ if (lane->pad == padctl->pcie)
+ offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL2(lane->index);
+ else
+ offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL2;
+
+ value = padctl_readl(padctl, offset);
+ value &= ~(XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_MASK <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT);
+ value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_VAL <<
+ XUSB_PADCTL_IOPHY_MISC_PAD_CTL2_SPARE_IN_SHIFT;
+ padctl_writel(padctl, value, offset);
+
+ if (lane->pad == padctl->pcie)
+ offset = XUSB_PADCTL_IOPHY_MISC_PAD_PX_CTL5(lane->index);
+ else
+ offset = XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL5;
+
+ value = padctl_readl(padctl, offset);
+ value |= XUSB_PADCTL_IOPHY_MISC_PAD_CTL5_RX_QEYE_EN;
+ padctl_writel(padctl, value, offset);
+
+ /* Enable SATA PHY when SATA lane is used */
+ if (lane->pad == padctl->sata) {
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value &= ~(XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_MASK <<
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT);
+ value |= 0x2 <<
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL0_REFCLK_NDIV_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL2);
+ value &= ~((XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_MASK <<
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT) |
+ (XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_MASK <<
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT) |
+ (XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_MASK <<
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT) |
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TCLKOUT_EN);
+ value |= (0x7 <<
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL2_XDIGCLK_SEL_SHIFT) |
+ (0x8 <<
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL1_CP_CNTL_SHIFT) |
+ (0x8 <<
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL2_PLL0_CP_CNTL_SHIFT) |
+ XUSB_PADCTL_IOPHY_PLL_S0_CTL2_TXCLKREF_SEL;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL3);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL3_RCAL_BYPASS;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL3);
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ return ret;
+}
+
+static void tegra124_usb3_port_disable(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_padctl *padctl = port->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN_EARLY(port->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_CLAMP_EN(port->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(250, 350);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_SSPX_ELPG_VCORE_DOWN(port->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(port->index);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(port->index, 0x7);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+}
+
+static const struct tegra_xusb_lane_map tegra124_usb3_map[] = {
+ { 0, "pcie", 0 },
+ { 1, "pcie", 1 },
+ { 1, "sata", 0 },
+ { 0, NULL, 0 },
+};
+
+static struct tegra_xusb_lane *
+tegra124_usb3_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_port_find_lane(port, tegra124_usb3_map, "usb3-ss");
+}
+
+static const struct tegra_xusb_port_ops tegra124_usb3_port_ops = {
+ .enable = tegra124_usb3_port_enable,
+ .disable = tegra124_usb3_port_disable,
+ .map = tegra124_usb3_port_map,
+};
+
+static int
+tegra124_xusb_read_fuse_calibration(struct tegra124_xusb_fuse_calibration *fuse)
+{
+ unsigned int i;
+ int err;
+ u32 value;
+
+ err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(fuse->hs_curr_level); i++) {
+ fuse->hs_curr_level[i] =
+ (value >> FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(i)) &
+ FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK;
+ }
+ fuse->hs_iref_cap =
+ (value >> FUSE_SKU_CALIB_HS_IREF_CAP_SHIFT) &
+ FUSE_SKU_CALIB_HS_IREF_CAP_MASK;
+ fuse->hs_term_range_adj =
+ (value >> FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT) &
+ FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK;
+ fuse->hs_squelch_level =
+ (value >> FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_SHIFT) &
+ FUSE_SKU_CALIB_HS_SQUELCH_LEVEL_MASK;
+
+ return 0;
+}
+
+static struct tegra_xusb_padctl *
+tegra124_xusb_padctl_probe(struct device *dev,
+ const struct tegra_xusb_padctl_soc *soc)
+{
+ struct tegra124_xusb_padctl *padctl;
+ int err;
+
+ padctl = devm_kzalloc(dev, sizeof(*padctl), GFP_KERNEL);
+ if (!padctl)
+ return ERR_PTR(-ENOMEM);
+
+ padctl->base.dev = dev;
+ padctl->base.soc = soc;
+
+ err = tegra124_xusb_read_fuse_calibration(&padctl->fuse);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return &padctl->base;
+}
+
+static void tegra124_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
+{
+}
+
+static const struct tegra_xusb_padctl_ops tegra124_xusb_padctl_ops = {
+ .probe = tegra124_xusb_padctl_probe,
+ .remove = tegra124_xusb_padctl_remove,
+ .usb3_save_context = tegra124_usb3_save_context,
+ .hsic_set_idle = tegra124_hsic_set_idle,
+};
+
+const struct tegra_xusb_padctl_soc tegra124_xusb_padctl_soc = {
+ .num_pads = ARRAY_SIZE(tegra124_pads),
+ .pads = tegra124_pads,
+ .ports = {
+ .usb2 = {
+ .ops = &tegra124_usb2_port_ops,
+ .count = 3,
+ },
+ .ulpi = {
+ .ops = &tegra124_ulpi_port_ops,
+ .count = 1,
+ },
+ .hsic = {
+ .ops = &tegra124_hsic_port_ops,
+ .count = 2,
+ },
+ .usb3 = {
+ .ops = &tegra124_usb3_port_ops,
+ .count = 2,
+ },
+ },
+ .ops = &tegra124_xusb_padctl_ops,
+};
+EXPORT_SYMBOL_GPL(tegra124_xusb_padctl_soc);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra 124 XUSB Pad Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb-tegra210.c b/drivers/phy/tegra/xusb-tegra210.c
new file mode 100644
index 000000000..9d0689ebd
--- /dev/null
+++ b/drivers/phy/tegra/xusb-tegra210.c
@@ -0,0 +1,2045 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "xusb.h"
+
+#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(x) \
+ ((x) ? (11 + ((x) - 1) * 6) : 0)
+#define FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK 0x3f
+#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT 7
+#define FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK 0xf
+
+#define FUSE_USB_CALIB_EXT_RPD_CTRL_SHIFT 0
+#define FUSE_USB_CALIB_EXT_RPD_CTRL_MASK 0x1f
+
+#define XUSB_PADCTL_USB2_PAD_MUX 0x004
+#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT 16
+#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_MASK 0x3
+#define XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_XUSB 0x1
+#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT 18
+#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_MASK 0x3
+#define XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB 0x1
+
+#define XUSB_PADCTL_USB2_PORT_CAP 0x008
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(x) (0x1 << ((x) * 4))
+#define XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(x) (0x3 << ((x) * 4))
+
+#define XUSB_PADCTL_SS_PORT_MAP 0x014
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(x) (1 << (((x) * 5) + 4))
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_SHIFT(x) ((x) * 5)
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(x) (0x7 << ((x) * 5))
+#define XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(x, v) (((v) & 0x7) << ((x) * 5))
+
+#define XUSB_PADCTL_ELPG_PROGRAM1 0x024
+#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN (1 << 31)
+#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 30)
+#define XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN (1 << 29)
+#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(x) (1 << (2 + (x) * 3))
+#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(x) \
+ (1 << (1 + (x) * 3))
+#define XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(x) (1 << ((x) * 3))
+
+#define XUSB_PADCTL_USB3_PAD_MUX 0x028
+#define XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(x) (1 << (1 + (x)))
+#define XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(x) (1 << (8 + (x)))
+
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(x) (0x084 + (x) * 0x40)
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT 7
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK 0x3
+#define XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18 (1 << 6)
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL0(x) (0x088 + (x) * 0x40)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI (1 << 29)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 (1 << 27)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD (1 << 26)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT 0
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK 0x3f
+
+#define XUSB_PADCTL_USB2_OTG_PADX_CTL1(x) (0x08c + (x) * 0x40)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT 26
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_MASK 0x1f
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT 3
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK 0xf
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR (1 << 2)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_OVRD (1 << 1)
+#define XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_OVRD (1 << 0)
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0 0x284
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD (1 << 11)
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT 3
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK 0x7
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL 0x7
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT 0
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK 0x7
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_VAL 0x2
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1 0x288
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_PD_TRK (1 << 26)
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT 19
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_MASK 0x7f
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_VAL 0x0a
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT 12
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_MASK 0x7f
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_VAL 0x1e
+
+#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x300 + (x) * 0x20)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE (1 << 18)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 (1 << 17)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 (1 << 16)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE (1 << 15)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 (1 << 14)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 (1 << 13)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE (1 << 9)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 (1 << 8)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 (1 << 7)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE (1 << 6)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 (1 << 5)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 (1 << 4)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE (1 << 3)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 (1 << 2)
+#define XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 (1 << 1)
+
+#define XUSB_PADCTL_HSIC_PADX_CTL1(x) (0x304 + (x) * 0x20)
+#define XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT 0
+#define XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_MASK 0xf
+
+#define XUSB_PADCTL_HSIC_PADX_CTL2(x) (0x308 + (x) * 0x20)
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT 8
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK 0xf
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT 0
+#define XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK 0xff
+
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL 0x340
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_PD_TRK (1 << 19)
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT 12
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_MASK 0x7f
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_VAL 0x0a
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT 5
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_MASK 0x7f
+#define XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_VAL 0x1e
+
+#define XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL 0x344
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL1 0x360
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT 20
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK 0xff
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL 0x19
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SATA_VAL 0x1e
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT 16
+#define XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK 0x3
+#define XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS (1 << 15)
+#define XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD (1 << 4)
+#define XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE (1 << 3)
+#define XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT 1
+#define XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK 0x3
+#define XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ (1 << 0)
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL2 0x364
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT 4
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK 0xffffff
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL 0x136
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD (1 << 2)
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE (1 << 1)
+#define XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN (1 << 0)
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL4 0x36c
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN (1 << 15)
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT 12
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK 0x3
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL 0x2
+#define XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SATA_VAL 0x0
+#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN (1 << 8)
+#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT 4
+#define XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK 0xf
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL5 0x370
+#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT 16
+#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK 0xff
+#define XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL 0x2a
+
+#define XUSB_PADCTL_UPHY_PLL_P0_CTL8 0x37c
+#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE (1 << 31)
+#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD (1 << 15)
+#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN (1 << 13)
+#define XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN (1 << 12)
+
+#define XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL1(x) (0x460 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT 20
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_MASK 0x3
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_VAL 0x1
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN BIT(18)
+#define XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD BIT(13)
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL1 0x860
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL2 0x864
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL4 0x86c
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL5 0x870
+
+#define XUSB_PADCTL_UPHY_PLL_S0_CTL8 0x87c
+
+#define XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL1 0x960
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(x) (0xa60 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT 16
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_MASK 0x3
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_VAL 0x2
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(x) (0xa64 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT 0
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_MASK 0xffff
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_VAL 0x00fc
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL3(x) (0xa68 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL3_RX_DFE_VAL 0xc0077f1f
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(x) (0xa6c + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT 16
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_MASK 0xffff
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_VAL 0x01c7
+
+#define XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(x) (0xa74 + (x) * 0x40)
+#define XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL 0xfcf01368
+
+struct tegra210_xusb_fuse_calibration {
+ u32 hs_curr_level[4];
+ u32 hs_term_range_adj;
+ u32 rpd_ctrl;
+};
+
+struct tegra210_xusb_padctl {
+ struct tegra_xusb_padctl base;
+
+ struct tegra210_xusb_fuse_calibration fuse;
+};
+
+static inline struct tegra210_xusb_padctl *
+to_tegra210_xusb_padctl(struct tegra_xusb_padctl *padctl)
+{
+ return container_of(padctl, struct tegra210_xusb_padctl, base);
+}
+
+/* must be called under padctl->lock */
+static int tegra210_pex_uphy_enable(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(padctl->pcie);
+ unsigned long timeout;
+ u32 value;
+ int err;
+
+ if (pcie->enable > 0) {
+ pcie->enable++;
+ return 0;
+ }
+
+ err = clk_prepare_enable(pcie->pll);
+ if (err < 0)
+ return err;
+
+ err = reset_control_deassert(pcie->rst);
+ if (err < 0)
+ goto disable;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+ value &= ~(XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL5);
+ value &= ~(XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL5);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
+ value &= ~((XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
+ (XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT));
+ value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
+ XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+ value &= ~((XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT) |
+ (XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT));
+ value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+ value &= ~(XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL4);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+ if (value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+ if (!(value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE))
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+ if (value & XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN |
+ XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+ if (value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+ if (!(value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE))
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+ tegra210_xusb_pll_hw_control_enable();
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_P0_CTL8);
+
+ usleep_range(10, 20);
+
+ tegra210_xusb_pll_hw_sequence_start();
+
+ pcie->enable++;
+
+ return 0;
+
+reset:
+ reset_control_assert(pcie->rst);
+disable:
+ clk_disable_unprepare(pcie->pll);
+ return err;
+}
+
+static void tegra210_pex_uphy_disable(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(padctl->pcie);
+
+ mutex_lock(&padctl->lock);
+
+ if (WARN_ON(pcie->enable == 0))
+ goto unlock;
+
+ if (--pcie->enable > 0)
+ goto unlock;
+
+ reset_control_assert(pcie->rst);
+ clk_disable_unprepare(pcie->pll);
+
+unlock:
+ mutex_unlock(&padctl->lock);
+}
+
+/* must be called under padctl->lock */
+static int tegra210_sata_uphy_enable(struct tegra_xusb_padctl *padctl, bool usb)
+{
+ struct tegra_xusb_sata_pad *sata = to_sata_pad(padctl->sata);
+ unsigned long timeout;
+ u32 value;
+ int err;
+
+ if (sata->enable > 0) {
+ sata->enable++;
+ return 0;
+ }
+
+ err = clk_prepare_enable(sata->pll);
+ if (err < 0)
+ return err;
+
+ err = reset_control_deassert(sata->rst);
+ if (err < 0)
+ goto disable;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+ value &= ~(XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL2_CAL_CTRL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL5);
+ value &= ~(XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL5_DCO_CTRL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL5);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
+ value &= ~((XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT) |
+ (XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL4_REFCLK_SEL_SHIFT));
+ value |= XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_EN;
+
+ if (usb)
+ value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_USB_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT);
+ else
+ value |= (XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SATA_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL4_TXCLKREF_SEL_SHIFT);
+
+ /* XXX PLL0_XDIGCLK_EN */
+ /*
+ value &= ~(1 << 19);
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
+ */
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+ value &= ~((XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_MDIV_SHIFT) |
+ (XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT));
+
+ if (usb)
+ value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_USB_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
+ else
+ value |= XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SATA_VAL <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_FREQ_NDIV_SHIFT;
+
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+ value &= ~(XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_MASK <<
+ XUSB_PADCTL_UPHY_PLL_CTL1_SLEEP_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+ usleep_range(10, 20);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL4_REFCLKBUF_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL4);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+ if (value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+ if (!(value & XUSB_PADCTL_UPHY_PLL_CTL2_CAL_DONE))
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL1_ENABLE;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+ if (value & XUSB_PADCTL_UPHY_PLL_CTL1_LOCKDET_STATUS)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+ value |= XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN |
+ XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+ if (value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+ timeout = jiffies + msecs_to_jiffies(100);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+ if (!(value & XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_DONE))
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after_eq(jiffies, timeout)) {
+ err = -ETIMEDOUT;
+ goto reset;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_CLK_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+ tegra210_sata_pll_hw_control_enable();
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL1_PWR_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL2_CAL_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+ value &= ~XUSB_PADCTL_UPHY_PLL_CTL8_RCAL_OVRD;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_PLL_S0_CTL8);
+
+ usleep_range(10, 20);
+
+ tegra210_sata_pll_hw_sequence_start();
+
+ sata->enable++;
+
+ return 0;
+
+reset:
+ reset_control_assert(sata->rst);
+disable:
+ clk_disable_unprepare(sata->pll);
+ return err;
+}
+
+static void tegra210_sata_uphy_disable(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra_xusb_sata_pad *sata = to_sata_pad(padctl->sata);
+
+ mutex_lock(&padctl->lock);
+
+ if (WARN_ON(sata->enable == 0))
+ goto unlock;
+
+ if (--sata->enable > 0)
+ goto unlock;
+
+ reset_control_assert(sata->rst);
+ clk_disable_unprepare(sata->pll);
+
+unlock:
+ mutex_unlock(&padctl->lock);
+}
+
+static int tegra210_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
+{
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ if (padctl->enable++ > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+out:
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static int tegra210_xusb_padctl_disable(struct tegra_xusb_padctl *padctl)
+{
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ if (WARN_ON(padctl->enable == 0))
+ goto out;
+
+ if (--padctl->enable > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_VCORE_DOWN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN_EARLY;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_AUX_MUX_LP0_CLAMP_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+out:
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static int tegra210_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+ unsigned int index, bool idle)
+{
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+
+ value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE);
+
+ if (idle)
+ value |= XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE;
+ else
+ value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE);
+
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+
+ return 0;
+}
+
+static int tegra210_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
+ unsigned int index, bool enable)
+{
+ struct tegra_xusb_port *port;
+ struct tegra_xusb_lane *lane;
+ u32 value, offset;
+
+ port = tegra_xusb_find_port(padctl, "usb3", index);
+ if (!port)
+ return -ENODEV;
+
+ lane = port->lane;
+
+ if (lane->pad == padctl->pcie)
+ offset = XUSB_PADCTL_UPHY_MISC_PAD_PX_CTL1(lane->index);
+ else
+ offset = XUSB_PADCTL_UPHY_MISC_PAD_S0_CTL1;
+
+ value = padctl_readl(padctl, offset);
+
+ value &= ~((XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_MASK <<
+ XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT) |
+ XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN |
+ XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD);
+
+ if (!enable) {
+ value |= (XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_VAL <<
+ XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_IDLE_MODE_SHIFT) |
+ XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_TERM_EN |
+ XUSB_PADCTL_UPHY_MISC_PAD_CTL1_AUX_RX_MODE_OVRD;
+ }
+
+ padctl_writel(padctl, value, offset);
+
+ return 0;
+}
+
+#define TEGRA210_LANE(_name, _offset, _shift, _mask, _type) \
+ { \
+ .name = _name, \
+ .offset = _offset, \
+ .shift = _shift, \
+ .mask = _mask, \
+ .num_funcs = ARRAY_SIZE(tegra210_##_type##_functions), \
+ .funcs = tegra210_##_type##_functions, \
+ }
+
+static const char *tegra210_usb2_functions[] = {
+ "snps",
+ "xusb",
+ "uart"
+};
+
+static const struct tegra_xusb_lane_soc tegra210_usb2_lanes[] = {
+ TEGRA210_LANE("usb2-0", 0x004, 0, 0x3, usb2),
+ TEGRA210_LANE("usb2-1", 0x004, 2, 0x3, usb2),
+ TEGRA210_LANE("usb2-2", 0x004, 4, 0x3, usb2),
+ TEGRA210_LANE("usb2-3", 0x004, 6, 0x3, usb2),
+};
+
+static struct tegra_xusb_lane *
+tegra210_usb2_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_usb2_lane *usb2;
+ int err;
+
+ usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+ if (!usb2)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&usb2->base.list);
+ usb2->base.soc = &pad->soc->lanes[index];
+ usb2->base.index = index;
+ usb2->base.pad = pad;
+ usb2->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&usb2->base, np);
+ if (err < 0) {
+ kfree(usb2);
+ return ERR_PTR(err);
+ }
+
+ return &usb2->base;
+}
+
+static void tegra210_usb2_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+
+ kfree(usb2);
+}
+
+static const struct tegra_xusb_lane_ops tegra210_usb2_lane_ops = {
+ .probe = tegra210_usb2_lane_probe,
+ .remove = tegra210_usb2_lane_remove,
+};
+
+static int tegra210_usb2_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
+ value &= ~(XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_MASK <<
+ XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT);
+ value |= XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_XUSB <<
+ XUSB_PADCTL_USB2_PAD_MUX_USB2_BIAS_PAD_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
+
+ return tegra210_xusb_padctl_enable(padctl);
+}
+
+static int tegra210_usb2_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra210_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra210_usb2_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_usb2_lane *usb2 = to_usb2_lane(lane);
+ struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra210_xusb_padctl *priv;
+ struct tegra_xusb_usb2_port *port;
+ unsigned int index = lane->index;
+ u32 value;
+ int err;
+
+ port = tegra_xusb_find_usb2_port(padctl, index);
+ if (!port) {
+ dev_err(&phy->dev, "no port found for USB2 lane %u\n", index);
+ return -ENODEV;
+ }
+
+ priv = to_tegra210_xusb_padctl(padctl);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+ value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_MASK <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT) |
+ (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_MASK <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT));
+ value |= (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_VAL <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_DISCON_LEVEL_SHIFT);
+
+ if (tegra_sku_info.revision < TEGRA_REVISION_A02)
+ value |=
+ (XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_VAL <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL0_HS_SQUELCH_LEVEL_SHIFT);
+
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_PORT_CAP);
+ value &= ~XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_MASK(index);
+ value |= XUSB_PADCTL_USB2_PORT_CAP_PORTX_CAP_HOST(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_PORT_CAP);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+ value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_MASK <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT) |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD2 |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_PD_ZI);
+ value |= (priv->fuse.hs_curr_level[index] +
+ usb2->hs_curr_level_offset) <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL0_HS_CURR_LEVEL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL0(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+ value &= ~((XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_MASK <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
+ (XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_MASK <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT) |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DR |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_CHRP_OVRD |
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_PD_DISC_OVRD);
+ value |= (priv->fuse.hs_term_range_adj <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_TERM_RANGE_ADJ_SHIFT) |
+ (priv->fuse.rpd_ctrl <<
+ XUSB_PADCTL_USB2_OTG_PAD_CTL1_RPD_CTRL_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_OTG_PADX_CTL1(index));
+
+ value = padctl_readl(padctl,
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
+ value &= ~(XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_MASK <<
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_LEV_SHIFT);
+ value |= XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPAD_CTL1_VREG_FIX18;
+ padctl_writel(padctl, value,
+ XUSB_PADCTL_USB2_BATTERY_CHRG_OTGPADX_CTL1(index));
+
+ err = regulator_enable(port->supply);
+ if (err)
+ return err;
+
+ mutex_lock(&padctl->lock);
+
+ if (pad->enable > 0) {
+ pad->enable++;
+ mutex_unlock(&padctl->lock);
+ return 0;
+ }
+
+ err = clk_prepare_enable(pad->clk);
+ if (err)
+ goto disable_regulator;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ value &= ~((XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_MASK <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT) |
+ (XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_MASK <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT));
+ value |= (XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_VAL <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_START_TIMER_SHIFT) |
+ (XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_VAL <<
+ XUSB_PADCTL_USB2_BIAS_PAD_CTL1_TRK_DONE_RESET_TIMER_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+ value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+ udelay(1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ value &= ~XUSB_PADCTL_USB2_BIAS_PAD_CTL1_PD_TRK;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+
+ udelay(50);
+
+ clk_disable_unprepare(pad->clk);
+
+ pad->enable++;
+ mutex_unlock(&padctl->lock);
+
+ return 0;
+
+disable_regulator:
+ regulator_disable(port->supply);
+ mutex_unlock(&padctl->lock);
+ return err;
+}
+
+static int tegra210_usb2_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_usb2_pad *pad = to_usb2_pad(lane->pad);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra_xusb_usb2_port *port;
+ u32 value;
+
+ port = tegra_xusb_find_usb2_port(padctl, lane->index);
+ if (!port) {
+ dev_err(&phy->dev, "no port found for USB2 lane %u\n",
+ lane->index);
+ return -ENODEV;
+ }
+
+ mutex_lock(&padctl->lock);
+
+ if (WARN_ON(pad->enable == 0))
+ goto out;
+
+ if (--pad->enable > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+ value |= XUSB_PADCTL_USB2_BIAS_PAD_CTL0_PD;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL0);
+
+out:
+ regulator_disable(port->supply);
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static const struct phy_ops tegra210_usb2_phy_ops = {
+ .init = tegra210_usb2_phy_init,
+ .exit = tegra210_usb2_phy_exit,
+ .power_on = tegra210_usb2_phy_power_on,
+ .power_off = tegra210_usb2_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra210_usb2_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_usb2_pad *usb2;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ usb2 = kzalloc(sizeof(*usb2), GFP_KERNEL);
+ if (!usb2)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &usb2->base;
+ pad->ops = &tegra210_usb2_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(usb2);
+ goto out;
+ }
+
+ usb2->clk = devm_clk_get(&pad->dev, "trk");
+ if (IS_ERR(usb2->clk)) {
+ err = PTR_ERR(usb2->clk);
+ dev_err(&pad->dev, "failed to get trk clock: %d\n", err);
+ goto unregister;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra210_usb2_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra210_usb2_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_usb2_pad *usb2 = to_usb2_pad(pad);
+
+ kfree(usb2);
+}
+
+static const struct tegra_xusb_pad_ops tegra210_usb2_ops = {
+ .probe = tegra210_usb2_pad_probe,
+ .remove = tegra210_usb2_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra210_usb2_pad = {
+ .name = "usb2",
+ .num_lanes = ARRAY_SIZE(tegra210_usb2_lanes),
+ .lanes = tegra210_usb2_lanes,
+ .ops = &tegra210_usb2_ops,
+};
+
+static const char *tegra210_hsic_functions[] = {
+ "snps",
+ "xusb",
+};
+
+static const struct tegra_xusb_lane_soc tegra210_hsic_lanes[] = {
+ TEGRA210_LANE("hsic-0", 0x004, 14, 0x1, hsic),
+};
+
+static struct tegra_xusb_lane *
+tegra210_hsic_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_hsic_lane *hsic;
+ int err;
+
+ hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
+ if (!hsic)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&hsic->base.list);
+ hsic->base.soc = &pad->soc->lanes[index];
+ hsic->base.index = index;
+ hsic->base.pad = pad;
+ hsic->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&hsic->base, np);
+ if (err < 0) {
+ kfree(hsic);
+ return ERR_PTR(err);
+ }
+
+ return &hsic->base;
+}
+
+static void tegra210_hsic_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
+
+ kfree(hsic);
+}
+
+static const struct tegra_xusb_lane_ops tegra210_hsic_lane_ops = {
+ .probe = tegra210_hsic_lane_probe,
+ .remove = tegra210_hsic_lane_remove,
+};
+
+static int tegra210_hsic_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_PAD_MUX);
+ value &= ~(XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_MASK <<
+ XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT);
+ value |= XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_XUSB <<
+ XUSB_PADCTL_USB2_PAD_MUX_HSIC_PAD_TRK_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_PAD_MUX);
+
+ return tegra210_xusb_padctl_enable(padctl);
+}
+
+static int tegra210_hsic_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra210_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra210_hsic_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_hsic_lane *hsic = to_hsic_lane(lane);
+ struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ struct tegra210_xusb_padctl *priv;
+ unsigned int index = lane->index;
+ u32 value;
+ int err;
+
+ priv = to_tegra210_xusb_padctl(padctl);
+
+ err = regulator_enable(pad->supply);
+ if (err)
+ return err;
+
+ padctl_writel(padctl, hsic->strobe_trim,
+ XUSB_PADCTL_HSIC_STRB_TRIM_CONTROL);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+ value &= ~(XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT);
+ value |= (hsic->tx_rtune_p <<
+ XUSB_PADCTL_HSIC_PAD_CTL1_TX_RTUNEP_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL2(index));
+ value &= ~((XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
+ (XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_MASK <<
+ XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT));
+ value |= (hsic->rx_strobe_trim <<
+ XUSB_PADCTL_HSIC_PAD_CTL2_RX_STROBE_TRIM_SHIFT) |
+ (hsic->rx_data_trim <<
+ XUSB_PADCTL_HSIC_PAD_CTL2_RX_DATA_TRIM_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL2(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+ value &= ~(XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPU_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPU_STROBE |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE);
+ value |= XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPD_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_RPD_STROBE;
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+
+ err = clk_prepare_enable(pad->clk);
+ if (err)
+ goto disable;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
+ value &= ~((XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_MASK <<
+ XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT) |
+ (XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_MASK <<
+ XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT));
+ value |= (XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_VAL <<
+ XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_START_TIMER_SHIFT) |
+ (XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_VAL <<
+ XUSB_PADCTL_HSIC_PAD_TRK_CTL_TRK_DONE_RESET_TIMER_SHIFT);
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
+
+ udelay(1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
+ value &= ~XUSB_PADCTL_HSIC_PAD_TRK_CTL_PD_TRK;
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PAD_TRK_CTL);
+
+ udelay(50);
+
+ clk_disable_unprepare(pad->clk);
+
+ return 0;
+
+disable:
+ regulator_disable(pad->supply);
+ return err;
+}
+
+static int tegra210_hsic_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_hsic_pad *pad = to_hsic_pad(lane->pad);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ unsigned int index = lane->index;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_HSIC_PADX_CTL0(index));
+ value |= XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_RX_STROBE |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_ZI_STROBE |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA0 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_DATA1 |
+ XUSB_PADCTL_HSIC_PAD_CTL0_PD_TX_STROBE;
+ padctl_writel(padctl, value, XUSB_PADCTL_HSIC_PADX_CTL1(index));
+
+ regulator_disable(pad->supply);
+
+ return 0;
+}
+
+static const struct phy_ops tegra210_hsic_phy_ops = {
+ .init = tegra210_hsic_phy_init,
+ .exit = tegra210_hsic_phy_exit,
+ .power_on = tegra210_hsic_phy_power_on,
+ .power_off = tegra210_hsic_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra210_hsic_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_hsic_pad *hsic;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ hsic = kzalloc(sizeof(*hsic), GFP_KERNEL);
+ if (!hsic)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &hsic->base;
+ pad->ops = &tegra210_hsic_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(hsic);
+ goto out;
+ }
+
+ hsic->clk = devm_clk_get(&pad->dev, "trk");
+ if (IS_ERR(hsic->clk)) {
+ err = PTR_ERR(hsic->clk);
+ dev_err(&pad->dev, "failed to get trk clock: %d\n", err);
+ goto unregister;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra210_hsic_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra210_hsic_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_hsic_pad *hsic = to_hsic_pad(pad);
+
+ kfree(hsic);
+}
+
+static const struct tegra_xusb_pad_ops tegra210_hsic_ops = {
+ .probe = tegra210_hsic_pad_probe,
+ .remove = tegra210_hsic_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra210_hsic_pad = {
+ .name = "hsic",
+ .num_lanes = ARRAY_SIZE(tegra210_hsic_lanes),
+ .lanes = tegra210_hsic_lanes,
+ .ops = &tegra210_hsic_ops,
+};
+
+static const char *tegra210_pcie_functions[] = {
+ "pcie-x1",
+ "usb3-ss",
+ "sata",
+ "pcie-x4",
+};
+
+static const struct tegra_xusb_lane_soc tegra210_pcie_lanes[] = {
+ TEGRA210_LANE("pcie-0", 0x028, 12, 0x3, pcie),
+ TEGRA210_LANE("pcie-1", 0x028, 14, 0x3, pcie),
+ TEGRA210_LANE("pcie-2", 0x028, 16, 0x3, pcie),
+ TEGRA210_LANE("pcie-3", 0x028, 18, 0x3, pcie),
+ TEGRA210_LANE("pcie-4", 0x028, 20, 0x3, pcie),
+ TEGRA210_LANE("pcie-5", 0x028, 22, 0x3, pcie),
+ TEGRA210_LANE("pcie-6", 0x028, 24, 0x3, pcie),
+};
+
+static struct tegra_xusb_lane *
+tegra210_pcie_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_pcie_lane *pcie;
+ int err;
+
+ pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&pcie->base.list);
+ pcie->base.soc = &pad->soc->lanes[index];
+ pcie->base.index = index;
+ pcie->base.pad = pad;
+ pcie->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&pcie->base, np);
+ if (err < 0) {
+ kfree(pcie);
+ return ERR_PTR(err);
+ }
+
+ return &pcie->base;
+}
+
+static void tegra210_pcie_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_pcie_lane *pcie = to_pcie_lane(lane);
+
+ kfree(pcie);
+}
+
+static const struct tegra_xusb_lane_ops tegra210_pcie_lane_ops = {
+ .probe = tegra210_pcie_lane_probe,
+ .remove = tegra210_pcie_lane_remove,
+};
+
+static int tegra210_pcie_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra210_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra210_pcie_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra210_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra210_pcie_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+ int err;
+
+ mutex_lock(&padctl->lock);
+
+ err = tegra210_pex_uphy_enable(padctl);
+ if (err < 0)
+ goto unlock;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value |= XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+unlock:
+ mutex_unlock(&padctl->lock);
+ return err;
+}
+
+static int tegra210_pcie_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value &= ~XUSB_PADCTL_USB3_PAD_MUX_PCIE_IDDQ_DISABLE(lane->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+ tegra210_pex_uphy_disable(padctl);
+
+ return 0;
+}
+
+static const struct phy_ops tegra210_pcie_phy_ops = {
+ .init = tegra210_pcie_phy_init,
+ .exit = tegra210_pcie_phy_exit,
+ .power_on = tegra210_pcie_phy_power_on,
+ .power_off = tegra210_pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra210_pcie_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_pcie_pad *pcie;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &pcie->base;
+ pad->ops = &tegra210_pcie_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(pcie);
+ goto out;
+ }
+
+ pcie->pll = devm_clk_get(&pad->dev, "pll");
+ if (IS_ERR(pcie->pll)) {
+ err = PTR_ERR(pcie->pll);
+ dev_err(&pad->dev, "failed to get PLL: %d\n", err);
+ goto unregister;
+ }
+
+ pcie->rst = devm_reset_control_get(&pad->dev, "phy");
+ if (IS_ERR(pcie->rst)) {
+ err = PTR_ERR(pcie->rst);
+ dev_err(&pad->dev, "failed to get PCIe pad reset: %d\n", err);
+ goto unregister;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra210_pcie_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra210_pcie_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_pcie_pad *pcie = to_pcie_pad(pad);
+
+ kfree(pcie);
+}
+
+static const struct tegra_xusb_pad_ops tegra210_pcie_ops = {
+ .probe = tegra210_pcie_pad_probe,
+ .remove = tegra210_pcie_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra210_pcie_pad = {
+ .name = "pcie",
+ .num_lanes = ARRAY_SIZE(tegra210_pcie_lanes),
+ .lanes = tegra210_pcie_lanes,
+ .ops = &tegra210_pcie_ops,
+};
+
+static const struct tegra_xusb_lane_soc tegra210_sata_lanes[] = {
+ TEGRA210_LANE("sata-0", 0x028, 30, 0x3, pcie),
+};
+
+static struct tegra_xusb_lane *
+tegra210_sata_lane_probe(struct tegra_xusb_pad *pad, struct device_node *np,
+ unsigned int index)
+{
+ struct tegra_xusb_sata_lane *sata;
+ int err;
+
+ sata = kzalloc(sizeof(*sata), GFP_KERNEL);
+ if (!sata)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&sata->base.list);
+ sata->base.soc = &pad->soc->lanes[index];
+ sata->base.index = index;
+ sata->base.pad = pad;
+ sata->base.np = np;
+
+ err = tegra_xusb_lane_parse_dt(&sata->base, np);
+ if (err < 0) {
+ kfree(sata);
+ return ERR_PTR(err);
+ }
+
+ return &sata->base;
+}
+
+static void tegra210_sata_lane_remove(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_sata_lane *sata = to_sata_lane(lane);
+
+ kfree(sata);
+}
+
+static const struct tegra_xusb_lane_ops tegra210_sata_lane_ops = {
+ .probe = tegra210_sata_lane_probe,
+ .remove = tegra210_sata_lane_remove,
+};
+
+static int tegra210_sata_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra210_xusb_padctl_enable(lane->pad->padctl);
+}
+
+static int tegra210_sata_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ return tegra210_xusb_padctl_disable(lane->pad->padctl);
+}
+
+static int tegra210_sata_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+ int err;
+
+ mutex_lock(&padctl->lock);
+
+ err = tegra210_sata_uphy_enable(padctl, false);
+ if (err < 0)
+ goto unlock;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value |= XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+unlock:
+ mutex_unlock(&padctl->lock);
+ return err;
+}
+
+static int tegra210_sata_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB3_PAD_MUX);
+ value &= ~XUSB_PADCTL_USB3_PAD_MUX_SATA_IDDQ_DISABLE(lane->index);
+ padctl_writel(padctl, value, XUSB_PADCTL_USB3_PAD_MUX);
+
+ tegra210_sata_uphy_disable(lane->pad->padctl);
+
+ return 0;
+}
+
+static const struct phy_ops tegra210_sata_phy_ops = {
+ .init = tegra210_sata_phy_init,
+ .exit = tegra210_sata_phy_exit,
+ .power_on = tegra210_sata_phy_power_on,
+ .power_off = tegra210_sata_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct tegra_xusb_pad *
+tegra210_sata_pad_probe(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np)
+{
+ struct tegra_xusb_sata_pad *sata;
+ struct tegra_xusb_pad *pad;
+ int err;
+
+ sata = kzalloc(sizeof(*sata), GFP_KERNEL);
+ if (!sata)
+ return ERR_PTR(-ENOMEM);
+
+ pad = &sata->base;
+ pad->ops = &tegra210_sata_lane_ops;
+ pad->soc = soc;
+
+ err = tegra_xusb_pad_init(pad, padctl, np);
+ if (err < 0) {
+ kfree(sata);
+ goto out;
+ }
+
+ sata->rst = devm_reset_control_get(&pad->dev, "phy");
+ if (IS_ERR(sata->rst)) {
+ err = PTR_ERR(sata->rst);
+ dev_err(&pad->dev, "failed to get SATA pad reset: %d\n", err);
+ goto unregister;
+ }
+
+ err = tegra_xusb_pad_register(pad, &tegra210_sata_phy_ops);
+ if (err < 0)
+ goto unregister;
+
+ dev_set_drvdata(&pad->dev, pad);
+
+ return pad;
+
+unregister:
+ device_unregister(&pad->dev);
+out:
+ return ERR_PTR(err);
+}
+
+static void tegra210_sata_pad_remove(struct tegra_xusb_pad *pad)
+{
+ struct tegra_xusb_sata_pad *sata = to_sata_pad(pad);
+
+ kfree(sata);
+}
+
+static const struct tegra_xusb_pad_ops tegra210_sata_ops = {
+ .probe = tegra210_sata_pad_probe,
+ .remove = tegra210_sata_pad_remove,
+};
+
+static const struct tegra_xusb_pad_soc tegra210_sata_pad = {
+ .name = "sata",
+ .num_lanes = ARRAY_SIZE(tegra210_sata_lanes),
+ .lanes = tegra210_sata_lanes,
+ .ops = &tegra210_sata_ops,
+};
+
+static const struct tegra_xusb_pad_soc * const tegra210_pads[] = {
+ &tegra210_usb2_pad,
+ &tegra210_hsic_pad,
+ &tegra210_pcie_pad,
+ &tegra210_sata_pad,
+};
+
+static int tegra210_usb2_port_enable(struct tegra_xusb_port *port)
+{
+ return 0;
+}
+
+static void tegra210_usb2_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra210_usb2_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_find_lane(port->padctl, "usb2", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra210_usb2_port_ops = {
+ .enable = tegra210_usb2_port_enable,
+ .disable = tegra210_usb2_port_disable,
+ .map = tegra210_usb2_port_map,
+};
+
+static int tegra210_hsic_port_enable(struct tegra_xusb_port *port)
+{
+ return 0;
+}
+
+static void tegra210_hsic_port_disable(struct tegra_xusb_port *port)
+{
+}
+
+static struct tegra_xusb_lane *
+tegra210_hsic_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_find_lane(port->padctl, "hsic", port->index);
+}
+
+static const struct tegra_xusb_port_ops tegra210_hsic_port_ops = {
+ .enable = tegra210_hsic_port_enable,
+ .disable = tegra210_hsic_port_disable,
+ .map = tegra210_hsic_port_map,
+};
+
+static int tegra210_usb3_port_enable(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
+ struct tegra_xusb_padctl *padctl = port->padctl;
+ struct tegra_xusb_lane *lane = usb3->base.lane;
+ unsigned int index = port->index;
+ u32 value;
+ int err;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+
+ if (!usb3->internal)
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+ else
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_INTERNAL(index);
+
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, usb3->port);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+
+ /*
+ * TODO: move this code into the PCIe/SATA PHY ->power_on() callbacks
+ * and conditionalize based on mux function? This seems to work, but
+ * might not be the exact proper sequence.
+ */
+ err = regulator_enable(usb3->supply);
+ if (err < 0)
+ return err;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
+ value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_MASK <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT);
+ value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_VAL <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL1_TX_TERM_CTRL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL1(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
+ value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_MASK <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT);
+ value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_VAL <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL2_RX_CTLE_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL2(index));
+
+ padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL3_RX_DFE_VAL,
+ XUSB_PADCTL_UPHY_USB3_PADX_ECTL3(index));
+
+ value = padctl_readl(padctl, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
+ value &= ~(XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_MASK <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT);
+ value |= XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_VAL <<
+ XUSB_PADCTL_UPHY_USB3_PAD_ECTL4_RX_CDR_CTRL_SHIFT;
+ padctl_writel(padctl, value, XUSB_PADCTL_UPHY_USB3_PADX_ECTL4(index));
+
+ padctl_writel(padctl, XUSB_PADCTL_UPHY_USB3_PAD_ECTL6_RX_EQ_CTRL_H_VAL,
+ XUSB_PADCTL_UPHY_USB3_PADX_ECTL6(index));
+
+ if (lane->pad == padctl->sata)
+ err = tegra210_sata_uphy_enable(padctl, true);
+ else
+ err = tegra210_pex_uphy_enable(padctl);
+
+ if (err) {
+ dev_err(&port->dev, "%s: failed to enable UPHY: %d\n",
+ __func__, err);
+ return err;
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ return 0;
+}
+
+static void tegra210_usb3_port_disable(struct tegra_xusb_port *port)
+{
+ struct tegra_xusb_usb3_port *usb3 = to_usb3_port(port);
+ struct tegra_xusb_padctl *padctl = port->padctl;
+ struct tegra_xusb_lane *lane = port->lane;
+ unsigned int index = port->index;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN_EARLY(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_CLAMP_EN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ usleep_range(250, 350);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM1);
+ value |= XUSB_PADCTL_ELPG_PROGRAM1_SSPX_ELPG_VCORE_DOWN(index);
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM1);
+
+ if (lane->pad == padctl->sata)
+ tegra210_sata_uphy_disable(padctl);
+ else
+ tegra210_pex_uphy_disable(padctl);
+
+ regulator_disable(usb3->supply);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_SS_PORT_MAP);
+ value &= ~XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP_MASK(index);
+ value |= XUSB_PADCTL_SS_PORT_MAP_PORTX_MAP(index, 0x7);
+ padctl_writel(padctl, value, XUSB_PADCTL_SS_PORT_MAP);
+}
+
+static const struct tegra_xusb_lane_map tegra210_usb3_map[] = {
+ { 0, "pcie", 6 },
+ { 1, "pcie", 5 },
+ { 2, "pcie", 0 },
+ { 2, "pcie", 3 },
+ { 3, "pcie", 4 },
+ { 3, "pcie", 4 },
+ { 0, NULL, 0 }
+};
+
+static struct tegra_xusb_lane *
+tegra210_usb3_port_map(struct tegra_xusb_port *port)
+{
+ return tegra_xusb_port_find_lane(port, tegra210_usb3_map, "usb3-ss");
+}
+
+static const struct tegra_xusb_port_ops tegra210_usb3_port_ops = {
+ .enable = tegra210_usb3_port_enable,
+ .disable = tegra210_usb3_port_disable,
+ .map = tegra210_usb3_port_map,
+};
+
+static int
+tegra210_xusb_read_fuse_calibration(struct tegra210_xusb_fuse_calibration *fuse)
+{
+ unsigned int i;
+ u32 value;
+ int err;
+
+ err = tegra_fuse_readl(TEGRA_FUSE_SKU_CALIB_0, &value);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(fuse->hs_curr_level); i++) {
+ fuse->hs_curr_level[i] =
+ (value >> FUSE_SKU_CALIB_HS_CURR_LEVEL_PADX_SHIFT(i)) &
+ FUSE_SKU_CALIB_HS_CURR_LEVEL_PAD_MASK;
+ }
+
+ fuse->hs_term_range_adj =
+ (value >> FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_SHIFT) &
+ FUSE_SKU_CALIB_HS_TERM_RANGE_ADJ_MASK;
+
+ err = tegra_fuse_readl(TEGRA_FUSE_USB_CALIB_EXT_0, &value);
+ if (err < 0)
+ return err;
+
+ fuse->rpd_ctrl =
+ (value >> FUSE_USB_CALIB_EXT_RPD_CTRL_SHIFT) &
+ FUSE_USB_CALIB_EXT_RPD_CTRL_MASK;
+
+ return 0;
+}
+
+static struct tegra_xusb_padctl *
+tegra210_xusb_padctl_probe(struct device *dev,
+ const struct tegra_xusb_padctl_soc *soc)
+{
+ struct tegra210_xusb_padctl *padctl;
+ int err;
+
+ padctl = devm_kzalloc(dev, sizeof(*padctl), GFP_KERNEL);
+ if (!padctl)
+ return ERR_PTR(-ENOMEM);
+
+ padctl->base.dev = dev;
+ padctl->base.soc = soc;
+
+ err = tegra210_xusb_read_fuse_calibration(&padctl->fuse);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return &padctl->base;
+}
+
+static void tegra210_xusb_padctl_remove(struct tegra_xusb_padctl *padctl)
+{
+}
+
+static const struct tegra_xusb_padctl_ops tegra210_xusb_padctl_ops = {
+ .probe = tegra210_xusb_padctl_probe,
+ .remove = tegra210_xusb_padctl_remove,
+ .usb3_set_lfps_detect = tegra210_usb3_set_lfps_detect,
+ .hsic_set_idle = tegra210_hsic_set_idle,
+};
+
+const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc = {
+ .num_pads = ARRAY_SIZE(tegra210_pads),
+ .pads = tegra210_pads,
+ .ports = {
+ .usb2 = {
+ .ops = &tegra210_usb2_port_ops,
+ .count = 4,
+ },
+ .hsic = {
+ .ops = &tegra210_hsic_port_ops,
+ .count = 1,
+ },
+ .usb3 = {
+ .ops = &tegra210_usb3_port_ops,
+ .count = 4,
+ },
+ },
+ .ops = &tegra210_xusb_padctl_ops,
+};
+EXPORT_SYMBOL_GPL(tegra210_xusb_padctl_soc);
+
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_DESCRIPTION("NVIDIA Tegra 210 XUSB Pad Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
new file mode 100644
index 000000000..ec83dfdbc
--- /dev/null
+++ b/drivers/phy/tegra/xusb.c
@@ -0,0 +1,1021 @@
+/*
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "xusb.h"
+
+static struct phy *tegra_xusb_pad_of_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct tegra_xusb_pad *pad = dev_get_drvdata(dev);
+ struct phy *phy = NULL;
+ unsigned int i;
+
+ if (args->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < pad->soc->num_lanes; i++) {
+ if (!pad->lanes[i])
+ continue;
+
+ if (pad->lanes[i]->dev.of_node == args->np) {
+ phy = pad->lanes[i];
+ break;
+ }
+ }
+
+ if (phy == NULL)
+ phy = ERR_PTR(-ENODEV);
+
+ return phy;
+}
+
+static const struct of_device_id tegra_xusb_padctl_of_match[] = {
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+ {
+ .compatible = "nvidia,tegra124-xusb-padctl",
+ .data = &tegra124_xusb_padctl_soc,
+ },
+#endif
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+ {
+ .compatible = "nvidia,tegra210-xusb-padctl",
+ .data = &tegra210_xusb_padctl_soc,
+ },
+#endif
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
+
+static struct device_node *
+tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name)
+{
+ /*
+ * of_find_node_by_name() drops a reference, so make sure to grab one.
+ */
+ struct device_node *np = of_node_get(padctl->dev->of_node);
+
+ np = of_find_node_by_name(np, "pads");
+ if (np)
+ np = of_find_node_by_name(np, name);
+
+ return np;
+}
+
+static struct device_node *
+tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index)
+{
+ /*
+ * of_find_node_by_name() drops a reference, so make sure to grab one.
+ */
+ struct device_node *np = of_node_get(pad->dev.of_node);
+
+ np = of_find_node_by_name(np, "lanes");
+ if (!np)
+ return NULL;
+
+ return of_find_node_by_name(np, pad->soc->lanes[index].name);
+}
+
+int tegra_xusb_lane_lookup_function(struct tegra_xusb_lane *lane,
+ const char *function)
+{
+ unsigned int i;
+
+ for (i = 0; i < lane->soc->num_funcs; i++)
+ if (strcmp(function, lane->soc->funcs[i]) == 0)
+ return i;
+
+ return -EINVAL;
+}
+
+int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
+ struct device_node *np)
+{
+ struct device *dev = &lane->pad->dev;
+ const char *function;
+ int err;
+
+ err = of_property_read_string(np, "nvidia,function", &function);
+ if (err < 0)
+ return err;
+
+ err = tegra_xusb_lane_lookup_function(lane, function);
+ if (err < 0) {
+ dev_err(dev, "invalid function \"%s\" for lane \"%s\"\n",
+ function, np->name);
+ return err;
+ }
+
+ lane->function = err;
+
+ return 0;
+}
+
+static void tegra_xusb_lane_destroy(struct phy *phy)
+{
+ if (phy) {
+ struct tegra_xusb_lane *lane = phy_get_drvdata(phy);
+
+ lane->pad->ops->remove(lane);
+ phy_destroy(phy);
+ }
+}
+
+static void tegra_xusb_pad_release(struct device *dev)
+{
+ struct tegra_xusb_pad *pad = to_tegra_xusb_pad(dev);
+
+ pad->soc->ops->remove(pad);
+}
+
+static struct device_type tegra_xusb_pad_type = {
+ .release = tegra_xusb_pad_release,
+};
+
+int tegra_xusb_pad_init(struct tegra_xusb_pad *pad,
+ struct tegra_xusb_padctl *padctl,
+ struct device_node *np)
+{
+ int err;
+
+ device_initialize(&pad->dev);
+ INIT_LIST_HEAD(&pad->list);
+ pad->dev.parent = padctl->dev;
+ pad->dev.type = &tegra_xusb_pad_type;
+ pad->dev.of_node = np;
+ pad->padctl = padctl;
+
+ err = dev_set_name(&pad->dev, "%s", pad->soc->name);
+ if (err < 0)
+ goto unregister;
+
+ err = device_add(&pad->dev);
+ if (err < 0)
+ goto unregister;
+
+ return 0;
+
+unregister:
+ device_unregister(&pad->dev);
+ return err;
+}
+
+int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
+ const struct phy_ops *ops)
+{
+ struct device_node *children;
+ struct phy *lane;
+ unsigned int i;
+ int err;
+
+ children = of_find_node_by_name(pad->dev.of_node, "lanes");
+ if (!children)
+ return -ENODEV;
+
+ pad->lanes = devm_kcalloc(&pad->dev, pad->soc->num_lanes, sizeof(lane),
+ GFP_KERNEL);
+ if (!pad->lanes) {
+ of_node_put(children);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pad->soc->num_lanes; i++) {
+ struct device_node *np = tegra_xusb_pad_find_phy_node(pad, i);
+ struct tegra_xusb_lane *lane;
+
+ /* skip disabled lanes */
+ if (!np || !of_device_is_available(np)) {
+ of_node_put(np);
+ continue;
+ }
+
+ pad->lanes[i] = phy_create(&pad->dev, np, ops);
+ if (IS_ERR(pad->lanes[i])) {
+ err = PTR_ERR(pad->lanes[i]);
+ of_node_put(np);
+ goto remove;
+ }
+
+ lane = pad->ops->probe(pad, np, i);
+ if (IS_ERR(lane)) {
+ phy_destroy(pad->lanes[i]);
+ err = PTR_ERR(lane);
+ goto remove;
+ }
+
+ list_add_tail(&lane->list, &pad->padctl->lanes);
+ phy_set_drvdata(pad->lanes[i], lane);
+ }
+
+ pad->provider = of_phy_provider_register_full(&pad->dev, children,
+ tegra_xusb_pad_of_xlate);
+ if (IS_ERR(pad->provider)) {
+ err = PTR_ERR(pad->provider);
+ goto remove;
+ }
+
+ return 0;
+
+remove:
+ while (i--)
+ tegra_xusb_lane_destroy(pad->lanes[i]);
+
+ of_node_put(children);
+
+ return err;
+}
+
+void tegra_xusb_pad_unregister(struct tegra_xusb_pad *pad)
+{
+ unsigned int i = pad->soc->num_lanes;
+
+ of_phy_provider_unregister(pad->provider);
+
+ while (i--)
+ tegra_xusb_lane_destroy(pad->lanes[i]);
+
+ device_unregister(&pad->dev);
+}
+
+static struct tegra_xusb_pad *
+tegra_xusb_pad_create(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc)
+{
+ struct tegra_xusb_pad *pad;
+ struct device_node *np;
+ int err;
+
+ np = tegra_xusb_find_pad_node(padctl, soc->name);
+ if (!np || !of_device_is_available(np))
+ return NULL;
+
+ pad = soc->ops->probe(padctl, soc, np);
+ if (IS_ERR(pad)) {
+ err = PTR_ERR(pad);
+ dev_err(padctl->dev, "failed to create pad %s: %d\n",
+ soc->name, err);
+ return ERR_PTR(err);
+ }
+
+ /* XXX move this into ->probe() to avoid string comparison */
+ if (strcmp(soc->name, "pcie") == 0)
+ padctl->pcie = pad;
+
+ if (strcmp(soc->name, "sata") == 0)
+ padctl->sata = pad;
+
+ if (strcmp(soc->name, "usb2") == 0)
+ padctl->usb2 = pad;
+
+ if (strcmp(soc->name, "ulpi") == 0)
+ padctl->ulpi = pad;
+
+ if (strcmp(soc->name, "hsic") == 0)
+ padctl->hsic = pad;
+
+ return pad;
+}
+
+static void __tegra_xusb_remove_pads(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra_xusb_pad *pad, *tmp;
+
+ list_for_each_entry_safe_reverse(pad, tmp, &padctl->pads, list) {
+ list_del(&pad->list);
+ tegra_xusb_pad_unregister(pad);
+ }
+}
+
+static void tegra_xusb_remove_pads(struct tegra_xusb_padctl *padctl)
+{
+ mutex_lock(&padctl->lock);
+ __tegra_xusb_remove_pads(padctl);
+ mutex_unlock(&padctl->lock);
+}
+
+static void tegra_xusb_lane_program(struct tegra_xusb_lane *lane)
+{
+ struct tegra_xusb_padctl *padctl = lane->pad->padctl;
+ const struct tegra_xusb_lane_soc *soc = lane->soc;
+ u32 value;
+
+ /* choose function */
+ value = padctl_readl(padctl, soc->offset);
+ value &= ~(soc->mask << soc->shift);
+ value |= lane->function << soc->shift;
+ padctl_writel(padctl, value, soc->offset);
+}
+
+static void tegra_xusb_pad_program(struct tegra_xusb_pad *pad)
+{
+ unsigned int i;
+
+ for (i = 0; i < pad->soc->num_lanes; i++) {
+ struct tegra_xusb_lane *lane;
+
+ if (pad->lanes[i]) {
+ lane = phy_get_drvdata(pad->lanes[i]);
+ tegra_xusb_lane_program(lane);
+ }
+ }
+}
+
+static int tegra_xusb_setup_pads(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra_xusb_pad *pad;
+ unsigned int i;
+
+ mutex_lock(&padctl->lock);
+
+ for (i = 0; i < padctl->soc->num_pads; i++) {
+ const struct tegra_xusb_pad_soc *soc = padctl->soc->pads[i];
+ int err;
+
+ pad = tegra_xusb_pad_create(padctl, soc);
+ if (IS_ERR(pad)) {
+ err = PTR_ERR(pad);
+ dev_err(padctl->dev, "failed to create pad %s: %d\n",
+ soc->name, err);
+ __tegra_xusb_remove_pads(padctl);
+ mutex_unlock(&padctl->lock);
+ return err;
+ }
+
+ if (!pad)
+ continue;
+
+ list_add_tail(&pad->list, &padctl->pads);
+ }
+
+ list_for_each_entry(pad, &padctl->pads, list)
+ tegra_xusb_pad_program(pad);
+
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static bool tegra_xusb_lane_check(struct tegra_xusb_lane *lane,
+ const char *function)
+{
+ const char *func = lane->soc->funcs[lane->function];
+
+ return strcmp(function, func) == 0;
+}
+
+struct tegra_xusb_lane *tegra_xusb_find_lane(struct tegra_xusb_padctl *padctl,
+ const char *type,
+ unsigned int index)
+{
+ struct tegra_xusb_lane *lane, *hit = ERR_PTR(-ENODEV);
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+
+ list_for_each_entry(lane, &padctl->lanes, list) {
+ if (strcmp(lane->soc->name, name) == 0) {
+ hit = lane;
+ break;
+ }
+ }
+
+ kfree(name);
+ return hit;
+}
+
+struct tegra_xusb_lane *
+tegra_xusb_port_find_lane(struct tegra_xusb_port *port,
+ const struct tegra_xusb_lane_map *map,
+ const char *function)
+{
+ struct tegra_xusb_lane *lane, *match = ERR_PTR(-ENODEV);
+
+ for (map = map; map->type; map++) {
+ if (port->index != map->port)
+ continue;
+
+ lane = tegra_xusb_find_lane(port->padctl, map->type,
+ map->index);
+ if (IS_ERR(lane))
+ continue;
+
+ if (!tegra_xusb_lane_check(lane, function))
+ continue;
+
+ if (!IS_ERR(match))
+ dev_err(&port->dev, "conflicting match: %s-%u / %s\n",
+ map->type, map->index, match->soc->name);
+ else
+ match = lane;
+ }
+
+ return match;
+}
+
+static struct device_node *
+tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
+ unsigned int index)
+{
+ /*
+ * of_find_node_by_name() drops a reference, so make sure to grab one.
+ */
+ struct device_node *np = of_node_get(padctl->dev->of_node);
+
+ np = of_find_node_by_name(np, "ports");
+ if (np) {
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+ np = of_find_node_by_name(np, name);
+ kfree(name);
+ }
+
+ return np;
+}
+
+struct tegra_xusb_port *
+tegra_xusb_find_port(struct tegra_xusb_padctl *padctl, const char *type,
+ unsigned int index)
+{
+ struct tegra_xusb_port *port;
+ struct device_node *np;
+
+ np = tegra_xusb_find_port_node(padctl, type, index);
+ if (!np)
+ return NULL;
+
+ list_for_each_entry(port, &padctl->ports, list) {
+ if (np == port->dev.of_node) {
+ of_node_put(np);
+ return port;
+ }
+ }
+
+ of_node_put(np);
+
+ return NULL;
+}
+
+struct tegra_xusb_usb2_port *
+tegra_xusb_find_usb2_port(struct tegra_xusb_padctl *padctl, unsigned int index)
+{
+ struct tegra_xusb_port *port;
+
+ port = tegra_xusb_find_port(padctl, "usb2", index);
+ if (port)
+ return to_usb2_port(port);
+
+ return NULL;
+}
+
+struct tegra_xusb_usb3_port *
+tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl, unsigned int index)
+{
+ struct tegra_xusb_port *port;
+
+ port = tegra_xusb_find_port(padctl, "usb3", index);
+ if (port)
+ return to_usb3_port(port);
+
+ return NULL;
+}
+
+static void tegra_xusb_port_release(struct device *dev)
+{
+}
+
+static struct device_type tegra_xusb_port_type = {
+ .release = tegra_xusb_port_release,
+};
+
+static int tegra_xusb_port_init(struct tegra_xusb_port *port,
+ struct tegra_xusb_padctl *padctl,
+ struct device_node *np,
+ const char *name,
+ unsigned int index)
+{
+ int err;
+
+ INIT_LIST_HEAD(&port->list);
+ port->padctl = padctl;
+ port->index = index;
+
+ device_initialize(&port->dev);
+ port->dev.type = &tegra_xusb_port_type;
+ port->dev.of_node = of_node_get(np);
+ port->dev.parent = padctl->dev;
+
+ err = dev_set_name(&port->dev, "%s-%u", name, index);
+ if (err < 0)
+ goto unregister;
+
+ err = device_add(&port->dev);
+ if (err < 0)
+ goto unregister;
+
+ return 0;
+
+unregister:
+ device_unregister(&port->dev);
+ return err;
+}
+
+static void tegra_xusb_port_unregister(struct tegra_xusb_port *port)
+{
+ device_unregister(&port->dev);
+}
+
+static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
+{
+ struct tegra_xusb_port *port = &usb2->base;
+ struct device_node *np = port->dev.of_node;
+
+ usb2->internal = of_property_read_bool(np, "nvidia,internal");
+
+ usb2->supply = devm_regulator_get(&port->dev, "vbus");
+ if (IS_ERR(usb2->supply))
+ return PTR_ERR(usb2->supply);
+
+ return 0;
+}
+
+static int tegra_xusb_add_usb2_port(struct tegra_xusb_padctl *padctl,
+ unsigned int index)
+{
+ struct tegra_xusb_usb2_port *usb2;
+ struct device_node *np;
+ int err = 0;
+
+ /*
+ * USB2 ports don't require additional properties, but if the port is
+ * marked as disabled there is no reason to register it.
+ */
+ np = tegra_xusb_find_port_node(padctl, "usb2", index);
+ if (!np || !of_device_is_available(np))
+ goto out;
+
+ usb2 = devm_kzalloc(padctl->dev, sizeof(*usb2), GFP_KERNEL);
+ if (!usb2) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = tegra_xusb_port_init(&usb2->base, padctl, np, "usb2", index);
+ if (err < 0)
+ goto out;
+
+ usb2->base.ops = padctl->soc->ports.usb2.ops;
+
+ usb2->base.lane = usb2->base.ops->map(&usb2->base);
+ if (IS_ERR(usb2->base.lane)) {
+ err = PTR_ERR(usb2->base.lane);
+ goto out;
+ }
+
+ err = tegra_xusb_usb2_port_parse_dt(usb2);
+ if (err < 0) {
+ tegra_xusb_port_unregister(&usb2->base);
+ goto out;
+ }
+
+ list_add_tail(&usb2->base.list, &padctl->ports);
+
+out:
+ of_node_put(np);
+ return err;
+}
+
+static int tegra_xusb_ulpi_port_parse_dt(struct tegra_xusb_ulpi_port *ulpi)
+{
+ struct tegra_xusb_port *port = &ulpi->base;
+ struct device_node *np = port->dev.of_node;
+
+ ulpi->internal = of_property_read_bool(np, "nvidia,internal");
+
+ return 0;
+}
+
+static int tegra_xusb_add_ulpi_port(struct tegra_xusb_padctl *padctl,
+ unsigned int index)
+{
+ struct tegra_xusb_ulpi_port *ulpi;
+ struct device_node *np;
+ int err = 0;
+
+ np = tegra_xusb_find_port_node(padctl, "ulpi", index);
+ if (!np || !of_device_is_available(np))
+ goto out;
+
+ ulpi = devm_kzalloc(padctl->dev, sizeof(*ulpi), GFP_KERNEL);
+ if (!ulpi) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = tegra_xusb_port_init(&ulpi->base, padctl, np, "ulpi", index);
+ if (err < 0)
+ goto out;
+
+ ulpi->base.ops = padctl->soc->ports.ulpi.ops;
+
+ ulpi->base.lane = ulpi->base.ops->map(&ulpi->base);
+ if (IS_ERR(ulpi->base.lane)) {
+ err = PTR_ERR(ulpi->base.lane);
+ goto out;
+ }
+
+ err = tegra_xusb_ulpi_port_parse_dt(ulpi);
+ if (err < 0) {
+ tegra_xusb_port_unregister(&ulpi->base);
+ goto out;
+ }
+
+ list_add_tail(&ulpi->base.list, &padctl->ports);
+
+out:
+ of_node_put(np);
+ return err;
+}
+
+static int tegra_xusb_hsic_port_parse_dt(struct tegra_xusb_hsic_port *hsic)
+{
+ /* XXX */
+ return 0;
+}
+
+static int tegra_xusb_add_hsic_port(struct tegra_xusb_padctl *padctl,
+ unsigned int index)
+{
+ struct tegra_xusb_hsic_port *hsic;
+ struct device_node *np;
+ int err = 0;
+
+ np = tegra_xusb_find_port_node(padctl, "hsic", index);
+ if (!np || !of_device_is_available(np))
+ goto out;
+
+ hsic = devm_kzalloc(padctl->dev, sizeof(*hsic), GFP_KERNEL);
+ if (!hsic) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = tegra_xusb_port_init(&hsic->base, padctl, np, "hsic", index);
+ if (err < 0)
+ goto out;
+
+ hsic->base.ops = padctl->soc->ports.hsic.ops;
+
+ hsic->base.lane = hsic->base.ops->map(&hsic->base);
+ if (IS_ERR(hsic->base.lane)) {
+ err = PTR_ERR(hsic->base.lane);
+ goto out;
+ }
+
+ err = tegra_xusb_hsic_port_parse_dt(hsic);
+ if (err < 0) {
+ tegra_xusb_port_unregister(&hsic->base);
+ goto out;
+ }
+
+ list_add_tail(&hsic->base.list, &padctl->ports);
+
+out:
+ of_node_put(np);
+ return err;
+}
+
+static int tegra_xusb_usb3_port_parse_dt(struct tegra_xusb_usb3_port *usb3)
+{
+ struct tegra_xusb_port *port = &usb3->base;
+ struct device_node *np = port->dev.of_node;
+ u32 value;
+ int err;
+
+ err = of_property_read_u32(np, "nvidia,usb2-companion", &value);
+ if (err < 0) {
+ dev_err(&port->dev, "failed to read port: %d\n", err);
+ return err;
+ }
+
+ usb3->port = value;
+
+ usb3->internal = of_property_read_bool(np, "nvidia,internal");
+
+ usb3->supply = devm_regulator_get(&port->dev, "vbus");
+ if (IS_ERR(usb3->supply))
+ return PTR_ERR(usb3->supply);
+
+ return 0;
+}
+
+static int tegra_xusb_add_usb3_port(struct tegra_xusb_padctl *padctl,
+ unsigned int index)
+{
+ struct tegra_xusb_usb3_port *usb3;
+ struct device_node *np;
+ int err = 0;
+
+ /*
+ * If there is no supplemental configuration in the device tree the
+ * port is unusable. But it is valid to configure only a single port,
+ * hence return 0 instead of an error to allow ports to be optional.
+ */
+ np = tegra_xusb_find_port_node(padctl, "usb3", index);
+ if (!np || !of_device_is_available(np))
+ goto out;
+
+ usb3 = devm_kzalloc(padctl->dev, sizeof(*usb3), GFP_KERNEL);
+ if (!usb3) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = tegra_xusb_port_init(&usb3->base, padctl, np, "usb3", index);
+ if (err < 0)
+ goto out;
+
+ usb3->base.ops = padctl->soc->ports.usb3.ops;
+
+ usb3->base.lane = usb3->base.ops->map(&usb3->base);
+ if (IS_ERR(usb3->base.lane)) {
+ err = PTR_ERR(usb3->base.lane);
+ goto out;
+ }
+
+ err = tegra_xusb_usb3_port_parse_dt(usb3);
+ if (err < 0) {
+ tegra_xusb_port_unregister(&usb3->base);
+ goto out;
+ }
+
+ list_add_tail(&usb3->base.list, &padctl->ports);
+
+out:
+ of_node_put(np);
+ return err;
+}
+
+static void __tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra_xusb_port *port, *tmp;
+
+ list_for_each_entry_safe_reverse(port, tmp, &padctl->ports, list) {
+ list_del(&port->list);
+ tegra_xusb_port_unregister(port);
+ }
+}
+
+static int tegra_xusb_setup_ports(struct tegra_xusb_padctl *padctl)
+{
+ struct tegra_xusb_port *port;
+ unsigned int i;
+ int err = 0;
+
+ mutex_lock(&padctl->lock);
+
+ for (i = 0; i < padctl->soc->ports.usb2.count; i++) {
+ err = tegra_xusb_add_usb2_port(padctl, i);
+ if (err < 0)
+ goto remove_ports;
+ }
+
+ for (i = 0; i < padctl->soc->ports.ulpi.count; i++) {
+ err = tegra_xusb_add_ulpi_port(padctl, i);
+ if (err < 0)
+ goto remove_ports;
+ }
+
+ for (i = 0; i < padctl->soc->ports.hsic.count; i++) {
+ err = tegra_xusb_add_hsic_port(padctl, i);
+ if (err < 0)
+ goto remove_ports;
+ }
+
+ for (i = 0; i < padctl->soc->ports.usb3.count; i++) {
+ err = tegra_xusb_add_usb3_port(padctl, i);
+ if (err < 0)
+ goto remove_ports;
+ }
+
+ list_for_each_entry(port, &padctl->ports, list) {
+ err = port->ops->enable(port);
+ if (err < 0)
+ dev_err(padctl->dev, "failed to enable port %s: %d\n",
+ dev_name(&port->dev), err);
+ }
+
+ goto unlock;
+
+remove_ports:
+ __tegra_xusb_remove_ports(padctl);
+unlock:
+ mutex_unlock(&padctl->lock);
+ return err;
+}
+
+static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl)
+{
+ mutex_lock(&padctl->lock);
+ __tegra_xusb_remove_ports(padctl);
+ mutex_unlock(&padctl->lock);
+}
+
+static int tegra_xusb_padctl_probe(struct platform_device *pdev)
+{
+ struct device_node *np = of_node_get(pdev->dev.of_node);
+ const struct tegra_xusb_padctl_soc *soc;
+ struct tegra_xusb_padctl *padctl;
+ const struct of_device_id *match;
+ struct resource *res;
+ int err;
+
+ /* for backwards compatibility with old device trees */
+ np = of_find_node_by_name(np, "pads");
+ if (!np) {
+ dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n");
+ return tegra_xusb_padctl_legacy_probe(pdev);
+ }
+
+ of_node_put(np);
+
+ match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node);
+ soc = match->data;
+
+ padctl = soc->ops->probe(&pdev->dev, soc);
+ if (IS_ERR(padctl))
+ return PTR_ERR(padctl);
+
+ platform_set_drvdata(pdev, padctl);
+ INIT_LIST_HEAD(&padctl->ports);
+ INIT_LIST_HEAD(&padctl->lanes);
+ INIT_LIST_HEAD(&padctl->pads);
+ mutex_init(&padctl->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ padctl->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(padctl->regs)) {
+ err = PTR_ERR(padctl->regs);
+ goto remove;
+ }
+
+ padctl->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(padctl->rst)) {
+ err = PTR_ERR(padctl->rst);
+ goto remove;
+ }
+
+ err = reset_control_deassert(padctl->rst);
+ if (err < 0)
+ goto remove;
+
+ err = tegra_xusb_setup_pads(padctl);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to setup pads: %d\n", err);
+ goto reset;
+ }
+
+ err = tegra_xusb_setup_ports(padctl);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup XUSB ports: %d\n", err);
+ goto remove_pads;
+ }
+
+ return 0;
+
+remove_pads:
+ tegra_xusb_remove_pads(padctl);
+reset:
+ reset_control_assert(padctl->rst);
+remove:
+ soc->ops->remove(padctl);
+ return err;
+}
+
+static int tegra_xusb_padctl_remove(struct platform_device *pdev)
+{
+ struct tegra_xusb_padctl *padctl = platform_get_drvdata(pdev);
+ int err;
+
+ tegra_xusb_remove_ports(padctl);
+ tegra_xusb_remove_pads(padctl);
+
+ err = reset_control_assert(padctl->rst);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to assert reset: %d\n", err);
+
+ padctl->soc->ops->remove(padctl);
+
+ return err;
+}
+
+static struct platform_driver tegra_xusb_padctl_driver = {
+ .driver = {
+ .name = "tegra-xusb-padctl",
+ .of_match_table = tegra_xusb_padctl_of_match,
+ },
+ .probe = tegra_xusb_padctl_probe,
+ .remove = tegra_xusb_padctl_remove,
+};
+module_platform_driver(tegra_xusb_padctl_driver);
+
+struct tegra_xusb_padctl *tegra_xusb_padctl_get(struct device *dev)
+{
+ struct tegra_xusb_padctl *padctl;
+ struct platform_device *pdev;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,xusb-padctl", 0);
+ if (!np)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * This is slightly ugly. A better implementation would be to keep a
+ * registry of pad controllers, but since there will almost certainly
+ * only ever be one per SoC that would be a little overkill.
+ */
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ of_node_put(np);
+ return ERR_PTR(-ENODEV);
+ }
+
+ of_node_put(np);
+
+ padctl = platform_get_drvdata(pdev);
+ if (!padctl) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return padctl;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get);
+
+void tegra_xusb_padctl_put(struct tegra_xusb_padctl *padctl)
+{
+ if (padctl)
+ put_device(padctl->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_put);
+
+int tegra_xusb_padctl_usb3_save_context(struct tegra_xusb_padctl *padctl,
+ unsigned int port)
+{
+ if (padctl->soc->ops->usb3_save_context)
+ return padctl->soc->ops->usb3_save_context(padctl, port);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_save_context);
+
+int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
+ unsigned int port, bool idle)
+{
+ if (padctl->soc->ops->hsic_set_idle)
+ return padctl->soc->ops->hsic_set_idle(padctl, port, idle);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_hsic_set_idle);
+
+int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
+ unsigned int port, bool enable)
+{
+ if (padctl->soc->ops->usb3_set_lfps_detect)
+ return padctl->soc->ops->usb3_set_lfps_detect(padctl, port,
+ enable);
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_usb3_set_lfps_detect);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
new file mode 100644
index 000000000..b49dbc36e
--- /dev/null
+++ b/drivers/phy/tegra/xusb.h
@@ -0,0 +1,421 @@
+/*
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2015, Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __PHY_TEGRA_XUSB_H
+#define __PHY_TEGRA_XUSB_H
+
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+/* legacy entry points for backwards-compatibility */
+int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev);
+int tegra_xusb_padctl_legacy_remove(struct platform_device *pdev);
+
+struct phy;
+struct phy_provider;
+struct platform_device;
+struct regulator;
+
+/*
+ * lanes
+ */
+struct tegra_xusb_lane_soc {
+ const char *name;
+
+ unsigned int offset;
+ unsigned int shift;
+ unsigned int mask;
+
+ const char * const *funcs;
+ unsigned int num_funcs;
+};
+
+struct tegra_xusb_lane {
+ const struct tegra_xusb_lane_soc *soc;
+ struct tegra_xusb_pad *pad;
+ struct device_node *np;
+ struct list_head list;
+ unsigned int function;
+ unsigned int index;
+};
+
+int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
+ struct device_node *np);
+
+struct tegra_xusb_usb2_lane {
+ struct tegra_xusb_lane base;
+
+ u32 hs_curr_level_offset;
+};
+
+static inline struct tegra_xusb_usb2_lane *
+to_usb2_lane(struct tegra_xusb_lane *lane)
+{
+ return container_of(lane, struct tegra_xusb_usb2_lane, base);
+}
+
+struct tegra_xusb_ulpi_lane {
+ struct tegra_xusb_lane base;
+};
+
+static inline struct tegra_xusb_ulpi_lane *
+to_ulpi_lane(struct tegra_xusb_lane *lane)
+{
+ return container_of(lane, struct tegra_xusb_ulpi_lane, base);
+}
+
+struct tegra_xusb_hsic_lane {
+ struct tegra_xusb_lane base;
+
+ u32 strobe_trim;
+ u32 rx_strobe_trim;
+ u32 rx_data_trim;
+ u32 tx_rtune_n;
+ u32 tx_rtune_p;
+ u32 tx_rslew_n;
+ u32 tx_rslew_p;
+ bool auto_term;
+};
+
+static inline struct tegra_xusb_hsic_lane *
+to_hsic_lane(struct tegra_xusb_lane *lane)
+{
+ return container_of(lane, struct tegra_xusb_hsic_lane, base);
+}
+
+struct tegra_xusb_pcie_lane {
+ struct tegra_xusb_lane base;
+};
+
+static inline struct tegra_xusb_pcie_lane *
+to_pcie_lane(struct tegra_xusb_lane *lane)
+{
+ return container_of(lane, struct tegra_xusb_pcie_lane, base);
+}
+
+struct tegra_xusb_sata_lane {
+ struct tegra_xusb_lane base;
+};
+
+static inline struct tegra_xusb_sata_lane *
+to_sata_lane(struct tegra_xusb_lane *lane)
+{
+ return container_of(lane, struct tegra_xusb_sata_lane, base);
+}
+
+struct tegra_xusb_lane_ops {
+ struct tegra_xusb_lane *(*probe)(struct tegra_xusb_pad *pad,
+ struct device_node *np,
+ unsigned int index);
+ void (*remove)(struct tegra_xusb_lane *lane);
+};
+
+/*
+ * pads
+ */
+struct tegra_xusb_pad_soc;
+struct tegra_xusb_padctl;
+
+struct tegra_xusb_pad_ops {
+ struct tegra_xusb_pad *(*probe)(struct tegra_xusb_padctl *padctl,
+ const struct tegra_xusb_pad_soc *soc,
+ struct device_node *np);
+ void (*remove)(struct tegra_xusb_pad *pad);
+};
+
+struct tegra_xusb_pad_soc {
+ const char *name;
+
+ const struct tegra_xusb_lane_soc *lanes;
+ unsigned int num_lanes;
+
+ const struct tegra_xusb_pad_ops *ops;
+};
+
+struct tegra_xusb_pad {
+ const struct tegra_xusb_pad_soc *soc;
+ struct tegra_xusb_padctl *padctl;
+ struct phy_provider *provider;
+ struct phy **lanes;
+ struct device dev;
+
+ const struct tegra_xusb_lane_ops *ops;
+
+ struct list_head list;
+};
+
+static inline struct tegra_xusb_pad *to_tegra_xusb_pad(struct device *dev)
+{
+ return container_of(dev, struct tegra_xusb_pad, dev);
+}
+
+int tegra_xusb_pad_init(struct tegra_xusb_pad *pad,
+ struct tegra_xusb_padctl *padctl,
+ struct device_node *np);
+int tegra_xusb_pad_register(struct tegra_xusb_pad *pad,
+ const struct phy_ops *ops);
+void tegra_xusb_pad_unregister(struct tegra_xusb_pad *pad);
+
+struct tegra_xusb_usb2_pad {
+ struct tegra_xusb_pad base;
+
+ struct clk *clk;
+ unsigned int enable;
+ struct mutex lock;
+};
+
+static inline struct tegra_xusb_usb2_pad *
+to_usb2_pad(struct tegra_xusb_pad *pad)
+{
+ return container_of(pad, struct tegra_xusb_usb2_pad, base);
+}
+
+struct tegra_xusb_ulpi_pad {
+ struct tegra_xusb_pad base;
+};
+
+static inline struct tegra_xusb_ulpi_pad *
+to_ulpi_pad(struct tegra_xusb_pad *pad)
+{
+ return container_of(pad, struct tegra_xusb_ulpi_pad, base);
+}
+
+struct tegra_xusb_hsic_pad {
+ struct tegra_xusb_pad base;
+
+ struct regulator *supply;
+ struct clk *clk;
+};
+
+static inline struct tegra_xusb_hsic_pad *
+to_hsic_pad(struct tegra_xusb_pad *pad)
+{
+ return container_of(pad, struct tegra_xusb_hsic_pad, base);
+}
+
+struct tegra_xusb_pcie_pad {
+ struct tegra_xusb_pad base;
+
+ struct reset_control *rst;
+ struct clk *pll;
+
+ unsigned int enable;
+};
+
+static inline struct tegra_xusb_pcie_pad *
+to_pcie_pad(struct tegra_xusb_pad *pad)
+{
+ return container_of(pad, struct tegra_xusb_pcie_pad, base);
+}
+
+struct tegra_xusb_sata_pad {
+ struct tegra_xusb_pad base;
+
+ struct reset_control *rst;
+ struct clk *pll;
+
+ unsigned int enable;
+};
+
+static inline struct tegra_xusb_sata_pad *
+to_sata_pad(struct tegra_xusb_pad *pad)
+{
+ return container_of(pad, struct tegra_xusb_sata_pad, base);
+}
+
+/*
+ * ports
+ */
+struct tegra_xusb_port_ops;
+
+struct tegra_xusb_port {
+ struct tegra_xusb_padctl *padctl;
+ struct tegra_xusb_lane *lane;
+ unsigned int index;
+
+ struct list_head list;
+ struct device dev;
+
+ const struct tegra_xusb_port_ops *ops;
+};
+
+struct tegra_xusb_lane_map {
+ unsigned int port;
+ const char *type;
+ unsigned int index;
+ const char *func;
+};
+
+struct tegra_xusb_lane *
+tegra_xusb_port_find_lane(struct tegra_xusb_port *port,
+ const struct tegra_xusb_lane_map *map,
+ const char *function);
+
+struct tegra_xusb_port *
+tegra_xusb_find_port(struct tegra_xusb_padctl *padctl, const char *type,
+ unsigned int index);
+
+struct tegra_xusb_usb2_port {
+ struct tegra_xusb_port base;
+
+ struct regulator *supply;
+ bool internal;
+};
+
+static inline struct tegra_xusb_usb2_port *
+to_usb2_port(struct tegra_xusb_port *port)
+{
+ return container_of(port, struct tegra_xusb_usb2_port, base);
+}
+
+struct tegra_xusb_usb2_port *
+tegra_xusb_find_usb2_port(struct tegra_xusb_padctl *padctl,
+ unsigned int index);
+
+struct tegra_xusb_ulpi_port {
+ struct tegra_xusb_port base;
+
+ struct regulator *supply;
+ bool internal;
+};
+
+static inline struct tegra_xusb_ulpi_port *
+to_ulpi_port(struct tegra_xusb_port *port)
+{
+ return container_of(port, struct tegra_xusb_ulpi_port, base);
+}
+
+struct tegra_xusb_hsic_port {
+ struct tegra_xusb_port base;
+};
+
+static inline struct tegra_xusb_hsic_port *
+to_hsic_port(struct tegra_xusb_port *port)
+{
+ return container_of(port, struct tegra_xusb_hsic_port, base);
+}
+
+struct tegra_xusb_usb3_port {
+ struct tegra_xusb_port base;
+ struct regulator *supply;
+ bool context_saved;
+ unsigned int port;
+ bool internal;
+
+ u32 tap1;
+ u32 amp;
+ u32 ctle_z;
+ u32 ctle_g;
+};
+
+static inline struct tegra_xusb_usb3_port *
+to_usb3_port(struct tegra_xusb_port *port)
+{
+ return container_of(port, struct tegra_xusb_usb3_port, base);
+}
+
+struct tegra_xusb_usb3_port *
+tegra_xusb_find_usb3_port(struct tegra_xusb_padctl *padctl,
+ unsigned int index);
+
+struct tegra_xusb_port_ops {
+ int (*enable)(struct tegra_xusb_port *port);
+ void (*disable)(struct tegra_xusb_port *port);
+ struct tegra_xusb_lane *(*map)(struct tegra_xusb_port *port);
+};
+
+/*
+ * pad controller
+ */
+struct tegra_xusb_padctl_soc;
+
+struct tegra_xusb_padctl_ops {
+ struct tegra_xusb_padctl *
+ (*probe)(struct device *dev,
+ const struct tegra_xusb_padctl_soc *soc);
+ void (*remove)(struct tegra_xusb_padctl *padctl);
+
+ int (*usb3_save_context)(struct tegra_xusb_padctl *padctl,
+ unsigned int index);
+ int (*hsic_set_idle)(struct tegra_xusb_padctl *padctl,
+ unsigned int index, bool idle);
+ int (*usb3_set_lfps_detect)(struct tegra_xusb_padctl *padctl,
+ unsigned int index, bool enable);
+};
+
+struct tegra_xusb_padctl_soc {
+ const struct tegra_xusb_pad_soc * const *pads;
+ unsigned int num_pads;
+
+ struct {
+ struct {
+ const struct tegra_xusb_port_ops *ops;
+ unsigned int count;
+ } usb2, ulpi, hsic, usb3;
+ } ports;
+
+ const struct tegra_xusb_padctl_ops *ops;
+};
+
+struct tegra_xusb_padctl {
+ struct device *dev;
+ void __iomem *regs;
+ struct mutex lock;
+ struct reset_control *rst;
+
+ const struct tegra_xusb_padctl_soc *soc;
+
+ struct tegra_xusb_pad *pcie;
+ struct tegra_xusb_pad *sata;
+ struct tegra_xusb_pad *ulpi;
+ struct tegra_xusb_pad *usb2;
+ struct tegra_xusb_pad *hsic;
+
+ struct list_head ports;
+ struct list_head lanes;
+ struct list_head pads;
+
+ unsigned int enable;
+
+ struct clk *clk;
+};
+
+static inline void padctl_writel(struct tegra_xusb_padctl *padctl, u32 value,
+ unsigned long offset)
+{
+ dev_dbg(padctl->dev, "%08lx < %08x\n", offset, value);
+ writel(value, padctl->regs + offset);
+}
+
+static inline u32 padctl_readl(struct tegra_xusb_padctl *padctl,
+ unsigned long offset)
+{
+ u32 value = readl(padctl->regs + offset);
+ dev_dbg(padctl->dev, "%08lx > %08x\n", offset, value);
+ return value;
+}
+
+struct tegra_xusb_lane *tegra_xusb_find_lane(struct tegra_xusb_padctl *padctl,
+ const char *name,
+ unsigned int index);
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+extern const struct tegra_xusb_padctl_soc tegra124_xusb_padctl_soc;
+#endif
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+extern const struct tegra_xusb_padctl_soc tegra210_xusb_padctl_soc;
+#endif
+
+#endif /* __PHY_TEGRA_XUSB_H */
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index e4bc1151e..42a5c1ddd 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
-obj-$(CONFIG_PINCTRL_TEGRA) += tegra/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 2cc74384c..c356223e1 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -86,3 +86,16 @@ config PINCTRL_NSP_GPIO
The ChipcommonA GPIO controller support basic PINCONF functions such
as bias pull up, pull down, and drive strength configurations, when
these pins are muxed to GPIO.
+
+config PINCTRL_NS2_MUX
+ bool "Broadcom Northstar2 pinmux driver"
+ depends on OF
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ select PINMUX
+ select GENERIC_PINCONF
+ default ARM64 && ARCH_BCM_IPROC
+ help
+ Say yes here to enable the Broadcom NS2 MUX driver.
+
+ The Broadcom Northstar2 IOMUX driver supports group based IOMUX
+ configuration.
diff --git a/drivers/pinctrl/bcm/Makefile b/drivers/pinctrl/bcm/Makefile
index 6148367d5..3861a1c1f 100644
--- a/drivers/pinctrl/bcm/Makefile
+++ b/drivers/pinctrl/bcm/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_PINCTRL_BCM2835) += pinctrl-bcm2835.o
obj-$(CONFIG_PINCTRL_IPROC_GPIO) += pinctrl-iproc-gpio.o
obj-$(CONFIG_PINCTRL_CYGNUS_MUX) += pinctrl-cygnus-mux.o
obj-$(CONFIG_PINCTRL_NSP_GPIO) += pinctrl-nsp-gpio.o
+obj-$(CONFIG_PINCTRL_NS2_MUX) += pinctrl-ns2-mux.o
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
index c3c692e50..582f6df44 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c
@@ -1024,7 +1024,7 @@ static struct pinctrl_ops bcm281xx_pinctrl_ops = {
.get_group_pins = bcm281xx_pinctrl_get_group_pins,
.pin_dbg_show = bcm281xx_pinctrl_pin_dbg_show,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev)
@@ -1422,9 +1422,7 @@ static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
bcm281xx_pinctrl_desc.pins = bcm281xx_pinctrl.pins;
bcm281xx_pinctrl_desc.npins = bcm281xx_pinctrl.npins;
- pctl = pinctrl_register(&bcm281xx_pinctrl_desc,
- &pdev->dev,
- pdata);
+ pctl = devm_pinctrl_register(&pdev->dev, &bcm281xx_pinctrl_desc, pdata);
if (IS_ERR(pctl)) {
dev_err(&pdev->dev, "Failed to register pinctrl\n");
return PTR_ERR(pctl);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 08b1d93da..fa77165fa 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -342,6 +342,18 @@ static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset)
return bcm2835_gpio_get_bit(pc, GPLEV0, offset);
}
+static int bcm2835_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+ enum bcm2835_fsel fsel = bcm2835_pinctrl_fsel_get(pc, offset);
+
+ /* Alternative function doesn't clearly provide a direction */
+ if (fsel > BCM2835_FSEL_GPIO_OUT)
+ return -EINVAL;
+
+ return (fsel == BCM2835_FSEL_GPIO_IN);
+}
+
static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
@@ -370,6 +382,7 @@ static struct gpio_chip bcm2835_gpio_chip = {
.free = gpiochip_generic_free,
.direction_input = bcm2835_gpio_direction_input,
.direction_output = bcm2835_gpio_direction_output,
+ .get_direction = bcm2835_gpio_get_direction,
.get = bcm2835_gpio_get,
.set = bcm2835_gpio_set,
.to_irq = bcm2835_gpio_to_irq,
@@ -1027,7 +1040,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
return err;
}
- pc->pctl_dev = pinctrl_register(&bcm2835_pinctrl_desc, dev, pc);
+ pc->pctl_dev = devm_pinctrl_register(dev, &bcm2835_pinctrl_desc, pc);
if (IS_ERR(pc->pctl_dev)) {
gpiochip_remove(&pc->gpio_chip);
return PTR_ERR(pc->pctl_dev);
@@ -1045,7 +1058,6 @@ static int bcm2835_pinctrl_remove(struct platform_device *pdev)
{
struct bcm2835_pinctrl *pc = platform_get_drvdata(pdev);
- pinctrl_unregister(pc->pctl_dev);
gpiochip_remove(&pc->gpio_chip);
return 0;
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
index 9728f3db9..d31c95701 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-mux.c
@@ -737,7 +737,7 @@ static const struct pinctrl_ops cygnus_pinctrl_ops = {
.get_group_pins = cygnus_get_group_pins,
.pin_dbg_show = cygnus_pin_dbg_show,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int cygnus_get_functions_count(struct pinctrl_dev *pctrl_dev)
@@ -987,7 +987,7 @@ static int cygnus_pinmux_probe(struct platform_device *pdev)
cygnus_pinctrl_desc.pins = pins;
cygnus_pinctrl_desc.npins = num_pins;
- pinctrl->pctl = pinctrl_register(&cygnus_pinctrl_desc, &pdev->dev,
+ pinctrl->pctl = devm_pinctrl_register(&pdev->dev, &cygnus_pinctrl_desc,
pinctrl);
if (IS_ERR(pinctrl->pctl)) {
dev_err(&pdev->dev, "unable to register Cygnus IOMUX pinctrl\n");
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index d530ab4b9..3670f5ea7 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -379,7 +379,7 @@ static const struct pinctrl_ops iproc_pctrl_ops = {
.get_groups_count = iproc_get_groups_count,
.get_group_name = iproc_get_group_name,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int iproc_gpio_set_pull(struct iproc_gpio *chip, unsigned gpio,
@@ -623,7 +623,7 @@ static int iproc_gpio_register_pinconf(struct iproc_gpio *chip)
pctldesc->npins = gc->ngpio;
pctldesc->confops = &iproc_pconf_ops;
- chip->pctl = pinctrl_register(pctldesc, chip->dev, chip);
+ chip->pctl = devm_pinctrl_register(chip->dev, pctldesc, chip);
if (IS_ERR(chip->pctl)) {
dev_err(chip->dev, "unable to register pinctrl device\n");
return PTR_ERR(chip->pctl);
@@ -632,11 +632,6 @@ static int iproc_gpio_register_pinconf(struct iproc_gpio *chip)
return 0;
}
-static void iproc_gpio_unregister_pinconf(struct iproc_gpio *chip)
-{
- pinctrl_unregister(chip->pctl);
-}
-
static const struct of_device_id iproc_gpio_of_match[] = {
{ .compatible = "brcm,cygnus-ccm-gpio" },
{ .compatible = "brcm,cygnus-asiu-gpio" },
@@ -720,7 +715,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "no GPIO irqchip\n");
- goto err_unregister_pinconf;
+ goto err_rm_gpiochip;
}
gpiochip_set_chained_irqchip(gc, &iproc_gpio_irq_chip, irq,
@@ -729,9 +724,6 @@ static int iproc_gpio_probe(struct platform_device *pdev)
return 0;
-err_unregister_pinconf:
- iproc_gpio_unregister_pinconf(chip);
-
err_rm_gpiochip:
gpiochip_remove(gc);
diff --git a/drivers/pinctrl/bcm/pinctrl-ns2-mux.c b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
new file mode 100644
index 000000000..3fefd14ac
--- /dev/null
+++ b/drivers/pinctrl/bcm/pinctrl-ns2-mux.c
@@ -0,0 +1,1117 @@
+/* Copyright (C) 2016 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This file contains the Northstar2 IOMUX driver that supports group
+ * based PINMUX configuration. The PWM is functional only when the
+ * corresponding mfio pin group is selected as gpio.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+#define NS2_NUM_IOMUX 19
+#define NS2_NUM_PWM_MUX 4
+
+#define NS2_PIN_MUX_BASE0 0x00
+#define NS2_PIN_MUX_BASE1 0x01
+#define NS2_PIN_CONF_BASE 0x02
+#define NS2_MUX_PAD_FUNC1_OFFSET 0x04
+
+#define NS2_PIN_SRC_MASK 0x01
+#define NS2_PIN_PULL_MASK 0x03
+#define NS2_PIN_DRIVE_STRENGTH_MASK 0x07
+
+#define NS2_PIN_PULL_UP 0x01
+#define NS2_PIN_PULL_DOWN 0x02
+
+#define NS2_PIN_INPUT_EN_MASK 0x01
+
+/*
+ * Northstar2 IOMUX register description
+ *
+ * @base: base address number
+ * @offset: register offset for mux configuration of a group
+ * @shift: bit shift for mux configuration of a group
+ * @mask: mask bits
+ * @alt: alternate function to set to
+ */
+struct ns2_mux {
+ unsigned int base;
+ unsigned int offset;
+ unsigned int shift;
+ unsigned int mask;
+ unsigned int alt;
+};
+
+/*
+ * Keep track of Northstar2 IOMUX configuration and prevent double
+ * configuration
+ *
+ * @ns2_mux: Northstar2 IOMUX register description
+ * @is_configured: flag to indicate whether a mux setting has already
+ * been configured
+ */
+struct ns2_mux_log {
+ struct ns2_mux mux;
+ bool is_configured;
+};
+
+/*
+ * Group based IOMUX configuration
+ *
+ * @name: name of the group
+ * @pins: array of pins used by this group
+ * @num_pins: total number of pins used by this group
+ * @mux: Northstar2 group based IOMUX configuration
+ */
+struct ns2_pin_group {
+ const char *name;
+ const unsigned int *pins;
+ const unsigned int num_pins;
+ const struct ns2_mux mux;
+};
+
+/*
+ * Northstar2 mux function and supported pin groups
+ *
+ * @name: name of the function
+ * @groups: array of groups that can be supported by this function
+ * @num_groups: total number of groups that can be supported by function
+ */
+struct ns2_pin_function {
+ const char *name;
+ const char * const *groups;
+ const unsigned int num_groups;
+};
+
+/*
+ * Northstar2 IOMUX pinctrl core
+ *
+ * @pctl: pointer to pinctrl_dev
+ * @dev: pointer to device
+ * @base0: first IOMUX register base
+ * @base1: second IOMUX register base
+ * @pinconf_base: configuration register base
+ * @groups: pointer to array of groups
+ * @num_groups: total number of groups
+ * @functions: pointer to array of functions
+ * @num_functions: total number of functions
+ * @mux_log: pointer to the array of mux logs
+ * @lock: lock to protect register access
+ */
+struct ns2_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct device *dev;
+ void __iomem *base0;
+ void __iomem *base1;
+ void __iomem *pinconf_base;
+
+ const struct ns2_pin_group *groups;
+ unsigned int num_groups;
+
+ const struct ns2_pin_function *functions;
+ unsigned int num_functions;
+
+ struct ns2_mux_log *mux_log;
+
+ spinlock_t lock;
+};
+
+/*
+ * Pin configuration info
+ *
+ * @base: base address number
+ * @offset: register offset from base
+ * @src_shift: slew rate control bit shift in the register
+ * @input_en: input enable control bit shift
+ * @pull_shift: pull-up/pull-down control bit shift in the register
+ * @drive_shift: drive strength control bit shift in the register
+ */
+struct ns2_pinconf {
+ unsigned int base;
+ unsigned int offset;
+ unsigned int src_shift;
+ unsigned int input_en;
+ unsigned int pull_shift;
+ unsigned int drive_shift;
+};
+
+/*
+ * Description of a pin in Northstar2
+ *
+ * @pin: pin number
+ * @name: pin name
+ * @pin_conf: pin configuration structure
+ */
+struct ns2_pin {
+ unsigned int pin;
+ char *name;
+ struct ns2_pinconf pin_conf;
+};
+
+#define NS2_PIN_DESC(p, n, b, o, s, i, pu, d) \
+{ \
+ .pin = p, \
+ .name = n, \
+ .pin_conf = { \
+ .base = b, \
+ .offset = o, \
+ .src_shift = s, \
+ .input_en = i, \
+ .pull_shift = pu, \
+ .drive_shift = d, \
+ } \
+}
+
+/*
+ * List of pins in Northstar2
+ */
+static struct ns2_pin ns2_pins[] = {
+ NS2_PIN_DESC(0, "mfio_0", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(1, "mfio_1", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(2, "mfio_2", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(3, "mfio_3", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(4, "mfio_4", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(5, "mfio_5", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(6, "mfio_6", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(7, "mfio_7", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(8, "mfio_8", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(9, "mfio_9", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(10, "mfio_10", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(11, "mfio_11", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(12, "mfio_12", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(13, "mfio_13", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(14, "mfio_14", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(15, "mfio_15", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(16, "mfio_16", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(17, "mfio_17", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(18, "mfio_18", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(19, "mfio_19", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(20, "mfio_20", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(21, "mfio_21", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(22, "mfio_22", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(23, "mfio_23", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(24, "mfio_24", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(25, "mfio_25", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(26, "mfio_26", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(27, "mfio_27", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(28, "mfio_28", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(29, "mfio_29", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(30, "mfio_30", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(31, "mfio_31", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(32, "mfio_32", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(33, "mfio_33", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(34, "mfio_34", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(35, "mfio_35", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(36, "mfio_36", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(37, "mfio_37", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(38, "mfio_38", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(39, "mfio_39", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(40, "mfio_40", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(41, "mfio_41", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(42, "mfio_42", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(43, "mfio_43", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(44, "mfio_44", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(45, "mfio_45", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(46, "mfio_46", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(47, "mfio_47", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(48, "mfio_48", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(49, "mfio_49", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(50, "mfio_50", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(51, "mfio_51", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(52, "mfio_52", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(53, "mfio_53", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(54, "mfio_54", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(55, "mfio_55", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(56, "mfio_56", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(57, "mfio_57", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(58, "mfio_58", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(59, "mfio_59", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(60, "mfio_60", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(61, "mfio_61", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(62, "mfio_62", -1, 0, 0, 0, 0, 0),
+ NS2_PIN_DESC(63, "qspi_wp", 2, 0x0, 31, 30, 27, 24),
+ NS2_PIN_DESC(64, "qspi_hold", 2, 0x0, 23, 22, 19, 16),
+ NS2_PIN_DESC(65, "qspi_cs", 2, 0x0, 15, 14, 11, 8),
+ NS2_PIN_DESC(66, "qspi_sck", 2, 0x0, 7, 6, 3, 0),
+ NS2_PIN_DESC(67, "uart3_sin", 2, 0x04, 31, 30, 27, 24),
+ NS2_PIN_DESC(68, "uart3_sout", 2, 0x04, 23, 22, 19, 16),
+ NS2_PIN_DESC(69, "qspi_mosi", 2, 0x04, 15, 14, 11, 8),
+ NS2_PIN_DESC(70, "qspi_miso", 2, 0x04, 7, 6, 3, 0),
+ NS2_PIN_DESC(71, "spi0_fss", 2, 0x08, 31, 30, 27, 24),
+ NS2_PIN_DESC(72, "spi0_rxd", 2, 0x08, 23, 22, 19, 16),
+ NS2_PIN_DESC(73, "spi0_txd", 2, 0x08, 15, 14, 11, 8),
+ NS2_PIN_DESC(74, "spi0_sck", 2, 0x08, 7, 6, 3, 0),
+ NS2_PIN_DESC(75, "spi1_fss", 2, 0x0c, 31, 30, 27, 24),
+ NS2_PIN_DESC(76, "spi1_rxd", 2, 0x0c, 23, 22, 19, 16),
+ NS2_PIN_DESC(77, "spi1_txd", 2, 0x0c, 15, 14, 11, 8),
+ NS2_PIN_DESC(78, "spi1_sck", 2, 0x0c, 7, 6, 3, 0),
+ NS2_PIN_DESC(79, "sdio0_data7", 2, 0x10, 31, 30, 27, 24),
+ NS2_PIN_DESC(80, "sdio0_emmc_rst", 2, 0x10, 23, 22, 19, 16),
+ NS2_PIN_DESC(81, "sdio0_led_on", 2, 0x10, 15, 14, 11, 8),
+ NS2_PIN_DESC(82, "sdio0_wp", 2, 0x10, 7, 6, 3, 0),
+ NS2_PIN_DESC(83, "sdio0_data3", 2, 0x14, 31, 30, 27, 24),
+ NS2_PIN_DESC(84, "sdio0_data4", 2, 0x14, 23, 22, 19, 16),
+ NS2_PIN_DESC(85, "sdio0_data5", 2, 0x14, 15, 14, 11, 8),
+ NS2_PIN_DESC(86, "sdio0_data6", 2, 0x14, 7, 6, 3, 0),
+ NS2_PIN_DESC(87, "sdio0_cmd", 2, 0x18, 31, 30, 27, 24),
+ NS2_PIN_DESC(88, "sdio0_data0", 2, 0x18, 23, 22, 19, 16),
+ NS2_PIN_DESC(89, "sdio0_data1", 2, 0x18, 15, 14, 11, 8),
+ NS2_PIN_DESC(90, "sdio0_data2", 2, 0x18, 7, 6, 3, 0),
+ NS2_PIN_DESC(91, "sdio1_led_on", 2, 0x1c, 31, 30, 27, 24),
+ NS2_PIN_DESC(92, "sdio1_wp", 2, 0x1c, 23, 22, 19, 16),
+ NS2_PIN_DESC(93, "sdio0_cd_l", 2, 0x1c, 15, 14, 11, 8),
+ NS2_PIN_DESC(94, "sdio0_clk", 2, 0x1c, 7, 6, 3, 0),
+ NS2_PIN_DESC(95, "sdio1_data5", 2, 0x20, 31, 30, 27, 24),
+ NS2_PIN_DESC(96, "sdio1_data6", 2, 0x20, 23, 22, 19, 16),
+ NS2_PIN_DESC(97, "sdio1_data7", 2, 0x20, 15, 14, 11, 8),
+ NS2_PIN_DESC(98, "sdio1_emmc_rst", 2, 0x20, 7, 6, 3, 0),
+ NS2_PIN_DESC(99, "sdio1_data1", 2, 0x24, 31, 30, 27, 24),
+ NS2_PIN_DESC(100, "sdio1_data2", 2, 0x24, 23, 22, 19, 16),
+ NS2_PIN_DESC(101, "sdio1_data3", 2, 0x24, 15, 14, 11, 8),
+ NS2_PIN_DESC(102, "sdio1_data4", 2, 0x24, 7, 6, 3, 0),
+ NS2_PIN_DESC(103, "sdio1_cd_l", 2, 0x28, 31, 30, 27, 24),
+ NS2_PIN_DESC(104, "sdio1_clk", 2, 0x28, 23, 22, 19, 16),
+ NS2_PIN_DESC(105, "sdio1_cmd", 2, 0x28, 15, 14, 11, 8),
+ NS2_PIN_DESC(106, "sdio1_data0", 2, 0x28, 7, 6, 3, 0),
+ NS2_PIN_DESC(107, "ext_mdio_0", 2, 0x2c, 15, 14, 11, 8),
+ NS2_PIN_DESC(108, "ext_mdc_0", 2, 0x2c, 7, 6, 3, 0),
+ NS2_PIN_DESC(109, "usb3_p1_vbus_ppc", 2, 0x34, 31, 30, 27, 24),
+ NS2_PIN_DESC(110, "usb3_p1_overcurrent", 2, 0x34, 23, 22, 19, 16),
+ NS2_PIN_DESC(111, "usb3_p0_vbus_ppc", 2, 0x34, 15, 14, 11, 8),
+ NS2_PIN_DESC(112, "usb3_p0_overcurrent", 2, 0x34, 7, 6, 3, 0),
+ NS2_PIN_DESC(113, "usb2_presence_indication", 2, 0x38, 31, 30, 27, 24),
+ NS2_PIN_DESC(114, "usb2_vbus_present", 2, 0x38, 23, 22, 19, 16),
+ NS2_PIN_DESC(115, "usb2_vbus_ppc", 2, 0x38, 15, 14, 11, 8),
+ NS2_PIN_DESC(116, "usb2_overcurrent", 2, 0x38, 7, 6, 3, 0),
+ NS2_PIN_DESC(117, "sata_led1", 2, 0x3c, 15, 14, 11, 8),
+ NS2_PIN_DESC(118, "sata_led0", 2, 0x3c, 7, 6, 3, 0),
+};
+
+/*
+ * List of groups of pins
+ */
+
+static const unsigned int nand_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+ 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
+static const unsigned int nor_data_pins[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25};
+
+static const unsigned int gpio_0_1_pins[] = {24, 25};
+static const unsigned int pwm_0_pins[] = {24};
+static const unsigned int pwm_1_pins[] = {25};
+
+static const unsigned int uart1_ext_clk_pins[] = {26};
+static const unsigned int nor_adv_pins[] = {26};
+
+static const unsigned int gpio_2_5_pins[] = {27, 28, 29, 30};
+static const unsigned int pcie_ab1_clk_wak_pins[] = {27, 28, 29, 30};
+static const unsigned int nor_addr_0_3_pins[] = {27, 28, 29, 30};
+static const unsigned int pwm_2_pins[] = {27};
+static const unsigned int pwm_3_pins[] = {28};
+
+static const unsigned int gpio_6_7_pins[] = {31, 32};
+static const unsigned int pcie_a3_clk_wak_pins[] = {31, 32};
+static const unsigned int nor_addr_4_5_pins[] = {31, 32};
+
+static const unsigned int gpio_8_9_pins[] = {33, 34};
+static const unsigned int pcie_b3_clk_wak_pins[] = {33, 34};
+static const unsigned int nor_addr_6_7_pins[] = {33, 34};
+
+static const unsigned int gpio_10_11_pins[] = {35, 36};
+static const unsigned int pcie_b2_clk_wak_pins[] = {35, 36};
+static const unsigned int nor_addr_8_9_pins[] = {35, 36};
+
+static const unsigned int gpio_12_13_pins[] = {37, 38};
+static const unsigned int pcie_a2_clk_wak_pins[] = {37, 38};
+static const unsigned int nor_addr_10_11_pins[] = {37, 38};
+
+static const unsigned int gpio_14_17_pins[] = {39, 40, 41, 42};
+static const unsigned int uart0_modem_pins[] = {39, 40, 41, 42};
+static const unsigned int nor_addr_12_15_pins[] = {39, 40, 41, 42};
+
+static const unsigned int gpio_18_19_pins[] = {43, 44};
+static const unsigned int uart0_rts_cts_pins[] = {43, 44};
+
+static const unsigned int gpio_20_21_pins[] = {45, 46};
+static const unsigned int uart0_in_out_pins[] = {45, 46};
+
+static const unsigned int gpio_22_23_pins[] = {47, 48};
+static const unsigned int uart1_dcd_dsr_pins[] = {47, 48};
+
+static const unsigned int gpio_24_25_pins[] = {49, 50};
+static const unsigned int uart1_ri_dtr_pins[] = {49, 50};
+
+static const unsigned int gpio_26_27_pins[] = {51, 52};
+static const unsigned int uart1_rts_cts_pins[] = {51, 52};
+
+static const unsigned int gpio_28_29_pins[] = {53, 54};
+static const unsigned int uart1_in_out_pins[] = {53, 54};
+
+static const unsigned int gpio_30_31_pins[] = {55, 56};
+static const unsigned int uart2_rts_cts_pins[] = {55, 56};
+
+#define NS2_PIN_GROUP(group_name, ba, off, sh, ma, al) \
+{ \
+ .name = __stringify(group_name) "_grp", \
+ .pins = group_name ## _pins, \
+ .num_pins = ARRAY_SIZE(group_name ## _pins), \
+ .mux = { \
+ .base = ba, \
+ .offset = off, \
+ .shift = sh, \
+ .mask = ma, \
+ .alt = al, \
+ } \
+}
+
+/*
+ * List of Northstar2 pin groups
+ */
+static const struct ns2_pin_group ns2_pin_groups[] = {
+ NS2_PIN_GROUP(nand, 0, 0, 31, 1, 0),
+ NS2_PIN_GROUP(nor_data, 0, 0, 31, 1, 1),
+ NS2_PIN_GROUP(gpio_0_1, 0, 0, 31, 1, 0),
+
+ NS2_PIN_GROUP(uart1_ext_clk, 0, 4, 30, 3, 1),
+ NS2_PIN_GROUP(nor_adv, 0, 4, 30, 3, 2),
+
+ NS2_PIN_GROUP(gpio_2_5, 0, 4, 28, 3, 0),
+ NS2_PIN_GROUP(pcie_ab1_clk_wak, 0, 4, 28, 3, 1),
+ NS2_PIN_GROUP(nor_addr_0_3, 0, 4, 28, 3, 2),
+
+ NS2_PIN_GROUP(gpio_6_7, 0, 4, 26, 3, 0),
+ NS2_PIN_GROUP(pcie_a3_clk_wak, 0, 4, 26, 3, 1),
+ NS2_PIN_GROUP(nor_addr_4_5, 0, 4, 26, 3, 2),
+
+ NS2_PIN_GROUP(gpio_8_9, 0, 4, 24, 3, 0),
+ NS2_PIN_GROUP(pcie_b3_clk_wak, 0, 4, 24, 3, 1),
+ NS2_PIN_GROUP(nor_addr_6_7, 0, 4, 24, 3, 2),
+
+ NS2_PIN_GROUP(gpio_10_11, 0, 4, 22, 3, 0),
+ NS2_PIN_GROUP(pcie_b2_clk_wak, 0, 4, 22, 3, 1),
+ NS2_PIN_GROUP(nor_addr_8_9, 0, 4, 22, 3, 2),
+
+ NS2_PIN_GROUP(gpio_12_13, 0, 4, 20, 3, 0),
+ NS2_PIN_GROUP(pcie_a2_clk_wak, 0, 4, 20, 3, 1),
+ NS2_PIN_GROUP(nor_addr_10_11, 0, 4, 20, 3, 2),
+
+ NS2_PIN_GROUP(gpio_14_17, 0, 4, 18, 3, 0),
+ NS2_PIN_GROUP(uart0_modem, 0, 4, 18, 3, 1),
+ NS2_PIN_GROUP(nor_addr_12_15, 0, 4, 18, 3, 2),
+
+ NS2_PIN_GROUP(gpio_18_19, 0, 4, 16, 3, 0),
+ NS2_PIN_GROUP(uart0_rts_cts, 0, 4, 16, 3, 1),
+
+ NS2_PIN_GROUP(gpio_20_21, 0, 4, 14, 3, 0),
+ NS2_PIN_GROUP(uart0_in_out, 0, 4, 14, 3, 1),
+
+ NS2_PIN_GROUP(gpio_22_23, 0, 4, 12, 3, 0),
+ NS2_PIN_GROUP(uart1_dcd_dsr, 0, 4, 12, 3, 1),
+
+ NS2_PIN_GROUP(gpio_24_25, 0, 4, 10, 3, 0),
+ NS2_PIN_GROUP(uart1_ri_dtr, 0, 4, 10, 3, 1),
+
+ NS2_PIN_GROUP(gpio_26_27, 0, 4, 8, 3, 0),
+ NS2_PIN_GROUP(uart1_rts_cts, 0, 4, 8, 3, 1),
+
+ NS2_PIN_GROUP(gpio_28_29, 0, 4, 6, 3, 0),
+ NS2_PIN_GROUP(uart1_in_out, 0, 4, 6, 3, 1),
+
+ NS2_PIN_GROUP(gpio_30_31, 0, 4, 4, 3, 0),
+ NS2_PIN_GROUP(uart2_rts_cts, 0, 4, 4, 3, 1),
+
+ NS2_PIN_GROUP(pwm_0, 1, 0, 0, 1, 1),
+ NS2_PIN_GROUP(pwm_1, 1, 0, 1, 1, 1),
+ NS2_PIN_GROUP(pwm_2, 1, 0, 2, 1, 1),
+ NS2_PIN_GROUP(pwm_3, 1, 0, 3, 1, 1),
+};
+
+/*
+ * List of groups supported by functions
+ */
+
+static const char * const nand_grps[] = {"nand_grp"};
+
+static const char * const nor_grps[] = {"nor_data_grp", "nor_adv_grp",
+ "nor_addr_0_3_grp", "nor_addr_4_5_grp", "nor_addr_6_7_grp",
+ "nor_addr_8_9_grp", "nor_addr_10_11_grp", "nor_addr_12_15_grp"};
+
+static const char * const gpio_grps[] = {"gpio_0_1_grp", "gpio_2_5_grp",
+ "gpio_6_7_grp", "gpio_8_9_grp", "gpio_10_11_grp", "gpio_12_13_grp",
+ "gpio_14_17_grp", "gpio_18_19_grp", "gpio_20_21_grp", "gpio_22_23_grp",
+ "gpio_24_25_grp", "gpio_26_27_grp", "gpio_28_29_grp",
+ "gpio_30_31_grp"};
+
+static const char * const pcie_grps[] = {"pcie_ab1_clk_wak_grp",
+ "pcie_a3_clk_wak_grp", "pcie_b3_clk_wak_grp", "pcie_b2_clk_wak_grp",
+ "pcie_a2_clk_wak_grp"};
+
+static const char * const uart0_grps[] = {"uart0_modem_grp",
+ "uart0_rts_cts_grp", "uart0_in_out_grp"};
+
+static const char * const uart1_grps[] = {"uart1_ext_clk_grp",
+ "uart1_dcd_dsr_grp", "uart1_ri_dtr_grp", "uart1_rts_cts_grp",
+ "uart1_in_out_grp"};
+
+static const char * const uart2_grps[] = {"uart2_rts_cts_grp"};
+
+static const char * const pwm_grps[] = {"pwm_0_grp", "pwm_1_grp",
+ "pwm_2_grp", "pwm_3_grp"};
+
+#define NS2_PIN_FUNCTION(func) \
+{ \
+ .name = #func, \
+ .groups = func ## _grps, \
+ .num_groups = ARRAY_SIZE(func ## _grps), \
+}
+
+/*
+ * List of supported functions
+ */
+static const struct ns2_pin_function ns2_pin_functions[] = {
+ NS2_PIN_FUNCTION(nand),
+ NS2_PIN_FUNCTION(nor),
+ NS2_PIN_FUNCTION(gpio),
+ NS2_PIN_FUNCTION(pcie),
+ NS2_PIN_FUNCTION(uart0),
+ NS2_PIN_FUNCTION(uart1),
+ NS2_PIN_FUNCTION(uart2),
+ NS2_PIN_FUNCTION(pwm),
+};
+
+static int ns2_get_groups_count(struct pinctrl_dev *pctrl_dev)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->num_groups;
+}
+
+static const char *ns2_get_group_name(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->groups[selector].name;
+}
+
+static int ns2_get_group_pins(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector, const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ *pins = pinctrl->groups[selector].pins;
+ *num_pins = pinctrl->groups[selector].num_pins;
+
+ return 0;
+}
+
+static void ns2_pin_dbg_show(struct pinctrl_dev *pctrl_dev,
+ struct seq_file *s, unsigned int offset)
+{
+ seq_printf(s, " %s", dev_name(pctrl_dev->dev));
+}
+
+static struct pinctrl_ops ns2_pinctrl_ops = {
+ .get_groups_count = ns2_get_groups_count,
+ .get_group_name = ns2_get_group_name,
+ .get_group_pins = ns2_get_group_pins,
+ .pin_dbg_show = ns2_pin_dbg_show,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+static int ns2_get_functions_count(struct pinctrl_dev *pctrl_dev)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->num_functions;
+}
+
+static const char *ns2_get_function_name(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ return pinctrl->functions[selector].name;
+}
+
+static int ns2_get_function_groups(struct pinctrl_dev *pctrl_dev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int * const num_groups)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+
+ *groups = pinctrl->functions[selector].groups;
+ *num_groups = pinctrl->functions[selector].num_groups;
+
+ return 0;
+}
+
+static int ns2_pinmux_set(struct ns2_pinctrl *pinctrl,
+ const struct ns2_pin_function *func,
+ const struct ns2_pin_group *grp,
+ struct ns2_mux_log *mux_log)
+{
+ const struct ns2_mux *mux = &grp->mux;
+ int i;
+ u32 val, mask;
+ unsigned long flags;
+ void __iomem *base_address;
+
+ for (i = 0; i < NS2_NUM_IOMUX; i++) {
+ if ((mux->shift != mux_log[i].mux.shift) ||
+ (mux->base != mux_log[i].mux.base) ||
+ (mux->offset != mux_log[i].mux.offset))
+ continue;
+
+ /* if this is a new configuration, just do it! */
+ if (!mux_log[i].is_configured)
+ break;
+
+ /*
+ * IOMUX has been configured previously and one is trying to
+ * configure it to a different function
+ */
+ if (mux_log[i].mux.alt != mux->alt) {
+ dev_err(pinctrl->dev,
+ "double configuration error detected!\n");
+ dev_err(pinctrl->dev, "func:%s grp:%s\n",
+ func->name, grp->name);
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+ if (i == NS2_NUM_IOMUX)
+ return -EINVAL;
+
+ mask = mux->mask;
+ mux_log[i].mux.alt = mux->alt;
+ mux_log[i].is_configured = true;
+
+ switch (mux->base) {
+ case NS2_PIN_MUX_BASE0:
+ base_address = pinctrl->base0;
+ break;
+
+ case NS2_PIN_MUX_BASE1:
+ base_address = pinctrl->base1;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(base_address + grp->mux.offset);
+ val &= ~(mask << grp->mux.shift);
+ val |= grp->mux.alt << grp->mux.shift;
+ writel(val, (base_address + grp->mux.offset));
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ return 0;
+}
+
+static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev,
+ unsigned int func_select, unsigned int grp_select)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ const struct ns2_pin_function *func;
+ const struct ns2_pin_group *grp;
+
+ if (grp_select > pinctrl->num_groups ||
+ func_select > pinctrl->num_functions)
+ return -EINVAL;
+
+ func = &pinctrl->functions[func_select];
+ grp = &pinctrl->groups[grp_select];
+
+ dev_dbg(pctrl_dev->dev, "func:%u name:%s grp:%u name:%s\n",
+ func_select, func->name, grp_select, grp->name);
+
+ dev_dbg(pctrl_dev->dev, "offset:0x%08x shift:%u alt:%u\n",
+ grp->mux.offset, grp->mux.shift, grp->mux.alt);
+
+ return ns2_pinmux_set(pinctrl, func, grp, pinctrl->mux_log);
+}
+
+static int ns2_pin_set_enable(struct pinctrl_dev *pctrldev, unsigned int pin,
+ u16 enable)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ unsigned long flags;
+ u32 val;
+ void __iomem *base_address;
+
+ base_address = pinctrl->pinconf_base;
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(base_address + pin_data->pin_conf.offset);
+ val &= ~(NS2_PIN_SRC_MASK << pin_data->pin_conf.input_en);
+
+ if (!enable)
+ val |= NS2_PIN_INPUT_EN_MASK << pin_data->pin_conf.input_en;
+
+ writel(val, (base_address + pin_data->pin_conf.offset));
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ dev_dbg(pctrldev->dev, "pin:%u set enable:%d\n", pin, enable);
+ return 0;
+}
+
+static int ns2_pin_get_enable(struct pinctrl_dev *pctrldev, unsigned int pin)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ unsigned long flags;
+ int enable;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ enable = readl(pinctrl->pinconf_base + pin_data->pin_conf.offset);
+ enable = (enable >> pin_data->pin_conf.input_en) &
+ NS2_PIN_INPUT_EN_MASK;
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ if (!enable)
+ enable = NS2_PIN_INPUT_EN_MASK;
+ else
+ enable = 0;
+
+ dev_dbg(pctrldev->dev, "pin:%u get disable:%d\n", pin, enable);
+ return enable;
+}
+
+static int ns2_pin_set_slew(struct pinctrl_dev *pctrldev, unsigned int pin,
+ u16 slew)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ unsigned long flags;
+ u32 val;
+ void __iomem *base_address;
+
+ base_address = pinctrl->pinconf_base;
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(base_address + pin_data->pin_conf.offset);
+ val &= ~(NS2_PIN_SRC_MASK << pin_data->pin_conf.src_shift);
+
+ if (slew)
+ val |= NS2_PIN_SRC_MASK << pin_data->pin_conf.src_shift;
+
+ writel(val, (base_address + pin_data->pin_conf.offset));
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ dev_dbg(pctrldev->dev, "pin:%u set slew:%d\n", pin, slew);
+ return 0;
+}
+
+static int ns2_pin_get_slew(struct pinctrl_dev *pctrldev, unsigned int pin,
+ u16 *slew)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(pinctrl->pinconf_base + pin_data->pin_conf.offset);
+ *slew = (val >> pin_data->pin_conf.src_shift) & NS2_PIN_SRC_MASK;
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ dev_dbg(pctrldev->dev, "pin:%u get slew:%d\n", pin, *slew);
+ return 0;
+}
+
+static int ns2_pin_set_pull(struct pinctrl_dev *pctrldev, unsigned int pin,
+ bool pull_up, bool pull_down)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ unsigned long flags;
+ u32 val;
+ void __iomem *base_address;
+
+ base_address = pinctrl->pinconf_base;
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(base_address + pin_data->pin_conf.offset);
+ val &= ~(NS2_PIN_PULL_MASK << pin_data->pin_conf.pull_shift);
+
+ if (pull_up == true)
+ val |= NS2_PIN_PULL_UP << pin_data->pin_conf.pull_shift;
+ if (pull_down == true)
+ val |= NS2_PIN_PULL_DOWN << pin_data->pin_conf.pull_shift;
+ writel(val, (base_address + pin_data->pin_conf.offset));
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ dev_dbg(pctrldev->dev, "pin:%u set pullup:%d pulldown: %d\n",
+ pin, pull_up, pull_down);
+ return 0;
+}
+
+static void ns2_pin_get_pull(struct pinctrl_dev *pctrldev,
+ unsigned int pin, bool *pull_up,
+ bool *pull_down)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(pinctrl->pinconf_base + pin_data->pin_conf.offset);
+ val = (val >> pin_data->pin_conf.pull_shift) & NS2_PIN_PULL_MASK;
+ *pull_up = false;
+ *pull_down = false;
+
+ if (val == NS2_PIN_PULL_UP)
+ *pull_up = true;
+
+ if (val == NS2_PIN_PULL_DOWN)
+ *pull_down = true;
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+}
+
+static int ns2_pin_set_strength(struct pinctrl_dev *pctrldev, unsigned int pin,
+ u16 strength)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ u32 val;
+ unsigned long flags;
+ void __iomem *base_address;
+
+ /* make sure drive strength is supported */
+ if (strength < 2 || strength > 16 || (strength % 2))
+ return -ENOTSUPP;
+
+ base_address = pinctrl->pinconf_base;
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(base_address + pin_data->pin_conf.offset);
+ val &= ~(NS2_PIN_DRIVE_STRENGTH_MASK << pin_data->pin_conf.drive_shift);
+ val |= ((strength / 2) - 1) << pin_data->pin_conf.drive_shift;
+ writel(val, (base_address + pin_data->pin_conf.offset));
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ dev_dbg(pctrldev->dev, "pin:%u set drive strength:%d mA\n",
+ pin, strength);
+ return 0;
+}
+
+static int ns2_pin_get_strength(struct pinctrl_dev *pctrldev, unsigned int pin,
+ u16 *strength)
+{
+ struct ns2_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctrldev);
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pinctrl->lock, flags);
+ val = readl(pinctrl->pinconf_base + pin_data->pin_conf.offset);
+ *strength = (val >> pin_data->pin_conf.drive_shift) &
+ NS2_PIN_DRIVE_STRENGTH_MASK;
+ *strength = (*strength + 1) * 2;
+ spin_unlock_irqrestore(&pinctrl->lock, flags);
+
+ dev_dbg(pctrldev->dev, "pin:%u get drive strength:%d mA\n",
+ pin, *strength);
+ return 0;
+}
+
+static int ns2_pin_config_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct ns2_pin *pin_data = pctldev->desc->pins[pin].drv_data;
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ bool pull_up, pull_down;
+ u16 arg = 0;
+ int ret;
+
+ if (pin_data->pin_conf.base == -1)
+ return -ENOTSUPP;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ns2_pin_get_pull(pctldev, pin, &pull_up, &pull_down);
+ if ((pull_up == false) && (pull_down == false))
+ return 0;
+ else
+ return -EINVAL;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ns2_pin_get_pull(pctldev, pin, &pull_up, &pull_down);
+ if (pull_up)
+ return 0;
+ else
+ return -EINVAL;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ns2_pin_get_pull(pctldev, pin, &pull_up, &pull_down);
+ if (pull_down)
+ return 0;
+ else
+ return -EINVAL;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = ns2_pin_get_strength(pctldev, pin, &arg);
+ if (ret)
+ return ret;
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+
+ case PIN_CONFIG_SLEW_RATE:
+ ret = ns2_pin_get_slew(pctldev, pin, &arg);
+ if (ret)
+ return ret;
+ *config = pinconf_to_config_packed(param, arg);
+ return 0;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ ret = ns2_pin_get_enable(pctldev, pin);
+ if (ret)
+ return 0;
+ else
+ return -EINVAL;
+
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ns2_pin_config_set(struct pinctrl_dev *pctrldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct ns2_pin *pin_data = pctrldev->desc->pins[pin].drv_data;
+ enum pin_config_param param;
+ unsigned int i;
+ u16 arg;
+ int ret = -ENOTSUPP;
+
+ if (pin_data->pin_conf.base == -1)
+ return -ENOTSUPP;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = ns2_pin_set_pull(pctrldev, pin, false, false);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = ns2_pin_set_pull(pctrldev, pin, true, false);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = ns2_pin_set_pull(pctrldev, pin, false, true);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = ns2_pin_set_strength(pctrldev, pin, arg);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ ret = ns2_pin_set_slew(pctrldev, pin, arg);
+ if (ret < 0)
+ goto out;
+ break;
+
+ case PIN_CONFIG_INPUT_ENABLE:
+ ret = ns2_pin_set_enable(pctrldev, pin, arg);
+ if (ret < 0)
+ goto out;
+ break;
+
+ default:
+ dev_err(pctrldev->dev, "invalid configuration\n");
+ return -ENOTSUPP;
+ }
+ }
+out:
+ return ret;
+}
+static struct pinmux_ops ns2_pinmux_ops = {
+ .get_functions_count = ns2_get_functions_count,
+ .get_function_name = ns2_get_function_name,
+ .get_function_groups = ns2_get_function_groups,
+ .set_mux = ns2_pinmux_enable,
+};
+
+static const struct pinconf_ops ns2_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = ns2_pin_config_get,
+ .pin_config_set = ns2_pin_config_set,
+};
+
+static struct pinctrl_desc ns2_pinctrl_desc = {
+ .name = "ns2-pinmux",
+ .pctlops = &ns2_pinctrl_ops,
+ .pmxops = &ns2_pinmux_ops,
+ .confops = &ns2_pinconf_ops,
+};
+
+static int ns2_mux_log_init(struct ns2_pinctrl *pinctrl)
+{
+ struct ns2_mux_log *log;
+ unsigned int i;
+
+ pinctrl->mux_log = devm_kcalloc(pinctrl->dev, NS2_NUM_IOMUX,
+ sizeof(struct ns2_mux_log),
+ GFP_KERNEL);
+ if (!pinctrl->mux_log)
+ return -ENOMEM;
+
+ for (i = 0; i < NS2_NUM_IOMUX; i++)
+ pinctrl->mux_log[i].is_configured = false;
+ /* Group 0 uses bit 31 in the IOMUX_PAD_FUNCTION_0 register */
+ log = &pinctrl->mux_log[0];
+ log->mux.base = NS2_PIN_MUX_BASE0;
+ log->mux.offset = 0;
+ log->mux.shift = 31;
+ log->mux.alt = 0;
+
+ /*
+ * Groups 1 through 14 use two bits each in the
+ * IOMUX_PAD_FUNCTION_1 register starting with
+ * bit position 30.
+ */
+ for (i = 1; i < (NS2_NUM_IOMUX - NS2_NUM_PWM_MUX); i++) {
+ log = &pinctrl->mux_log[i];
+ log->mux.base = NS2_PIN_MUX_BASE0;
+ log->mux.offset = NS2_MUX_PAD_FUNC1_OFFSET;
+ log->mux.shift = 32 - (i * 2);
+ log->mux.alt = 0;
+ }
+
+ /*
+ * Groups 15 through 18 use one bit each in the
+ * AUX_SEL register.
+ */
+ for (i = 0; i < NS2_NUM_PWM_MUX; i++) {
+ log = &pinctrl->mux_log[(NS2_NUM_IOMUX - NS2_NUM_PWM_MUX) + i];
+ log->mux.base = NS2_PIN_MUX_BASE1;
+ log->mux.offset = 0;
+ log->mux.shift = i;
+ log->mux.alt = 0;
+ }
+ return 0;
+}
+
+static int ns2_pinmux_probe(struct platform_device *pdev)
+{
+ struct ns2_pinctrl *pinctrl;
+ struct resource *res;
+ int i, ret;
+ struct pinctrl_pin_desc *pins;
+ unsigned int num_pins = ARRAY_SIZE(ns2_pins);
+
+ pinctrl = devm_kzalloc(&pdev->dev, sizeof(*pinctrl), GFP_KERNEL);
+ if (!pinctrl)
+ return -ENOMEM;
+
+ pinctrl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pinctrl);
+ spin_lock_init(&pinctrl->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pinctrl->base0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pinctrl->base0)) {
+ dev_err(&pdev->dev, "unable to map I/O space\n");
+ return PTR_ERR(pinctrl->base0);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pinctrl->base1) {
+ dev_err(&pdev->dev, "unable to map I/O space\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ pinctrl->pinconf_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pinctrl->pinconf_base)) {
+ dev_err(&pdev->dev, "unable to map I/O space\n");
+ return PTR_ERR(pinctrl->pinconf_base);
+ }
+
+ ret = ns2_mux_log_init(pinctrl);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to initialize IOMUX log\n");
+ return ret;
+ }
+
+ pins = devm_kcalloc(&pdev->dev, num_pins, sizeof(*pins), GFP_KERNEL);
+ if (!pins)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pins; i++) {
+ pins[i].number = ns2_pins[i].pin;
+ pins[i].name = ns2_pins[i].name;
+ pins[i].drv_data = &ns2_pins[i];
+ }
+
+ pinctrl->groups = ns2_pin_groups;
+ pinctrl->num_groups = ARRAY_SIZE(ns2_pin_groups);
+ pinctrl->functions = ns2_pin_functions;
+ pinctrl->num_functions = ARRAY_SIZE(ns2_pin_functions);
+ ns2_pinctrl_desc.pins = pins;
+ ns2_pinctrl_desc.npins = num_pins;
+
+ pinctrl->pctl = pinctrl_register(&ns2_pinctrl_desc, &pdev->dev,
+ pinctrl);
+ if (!pinctrl->pctl) {
+ dev_err(&pdev->dev, "unable to register IOMUX pinctrl\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ns2_pinmux_of_match[] = {
+ {.compatible = "brcm,ns2-pinmux"},
+ { }
+};
+
+static struct platform_driver ns2_pinmux_driver = {
+ .driver = {
+ .name = "ns2-pinmux",
+ .of_match_table = ns2_pinmux_of_match,
+ },
+ .probe = ns2_pinmux_probe,
+};
+
+static int __init ns2_pinmux_init(void)
+{
+ return platform_driver_register(&ns2_pinmux_driver);
+}
+arch_initcall(ns2_pinmux_init);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index ac900435d..a8b37a9a8 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -363,7 +363,7 @@ static const struct pinctrl_ops nsp_pctrl_ops = {
.get_groups_count = nsp_get_groups_count,
.get_group_name = nsp_get_group_name,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int nsp_gpio_set_slew(struct nsp_gpio *chip, unsigned gpio, u16 slew)
@@ -609,7 +609,7 @@ static int nsp_gpio_register_pinconf(struct nsp_gpio *chip)
pctldesc->npins = gc->ngpio;
pctldesc->confops = &nsp_pconf_ops;
- chip->pctl = pinctrl_register(pctldesc, chip->dev, chip);
+ chip->pctl = devm_pinctrl_register(chip->dev, pctldesc, chip);
if (IS_ERR(chip->pctl)) {
dev_err(chip->dev, "unable to register pinctrl device\n");
return PTR_ERR(chip->pctl);
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index 46f2b4818..8f0dc02f7 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -104,7 +104,7 @@ static const struct pinctrl_ops berlin_pinctrl_ops = {
.get_groups_count = &berlin_pinctrl_get_group_count,
.get_group_name = &berlin_pinctrl_get_group_name,
.dt_node_to_map = &berlin_pinctrl_dt_node_to_map,
- .dt_free_map = &pinctrl_utils_dt_free_map,
+ .dt_free_map = &pinctrl_utils_free_map,
};
static int berlin_pinmux_get_functions_count(struct pinctrl_dev *pctrl_dev)
@@ -316,7 +316,8 @@ int berlin_pinctrl_probe_regmap(struct platform_device *pdev,
return ret;
}
- pctrl->pctrl_dev = pinctrl_register(&berlin_pctrl_desc, dev, pctrl);
+ pctrl->pctrl_dev = devm_pinctrl_register(dev, &berlin_pctrl_desc,
+ pctrl);
if (IS_ERR(pctrl->pctrl_dev)) {
dev_err(dev, "failed to register pinctrl driver\n");
return PTR_ERR(pctrl->pctrl_dev);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index f67a8b7a4..98d2a1bb4 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1872,6 +1872,69 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
}
EXPORT_SYMBOL_GPL(pinctrl_unregister);
+static void devm_pinctrl_dev_release(struct device *dev, void *res)
+{
+ struct pinctrl_dev *pctldev = *(struct pinctrl_dev **)res;
+
+ pinctrl_unregister(pctldev);
+}
+
+static int devm_pinctrl_dev_match(struct device *dev, void *res, void *data)
+{
+ struct pctldev **r = res;
+
+ if (WARN_ON(!r || !*r))
+ return 0;
+
+ return *r == data;
+}
+
+/**
+ * devm_pinctrl_register() - Resource managed version of pinctrl_register().
+ * @dev: parent device for this pin controller
+ * @pctldesc: descriptor for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ *
+ * Returns an error pointer if pincontrol register failed. Otherwise
+ * it returns valid pinctrl handle.
+ *
+ * The pinctrl device will be automatically released when the device is unbound.
+ */
+struct pinctrl_dev *devm_pinctrl_register(struct device *dev,
+ struct pinctrl_desc *pctldesc,
+ void *driver_data)
+{
+ struct pinctrl_dev **ptr, *pctldev;
+
+ ptr = devres_alloc(devm_pinctrl_dev_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ pctldev = pinctrl_register(pctldesc, dev, driver_data);
+ if (IS_ERR(pctldev)) {
+ devres_free(ptr);
+ return pctldev;
+ }
+
+ *ptr = pctldev;
+ devres_add(dev, ptr);
+
+ return pctldev;
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_register);
+
+/**
+ * devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().
+ * @dev: device for which which resource was allocated
+ * @pctldev: the pinctrl device to unregister.
+ */
+void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
+{
+ WARN_ON(devres_release(dev, devm_pinctrl_dev_release,
+ devm_pinctrl_dev_match, pctldev));
+}
+EXPORT_SYMBOL_GPL(devm_pinctrl_unregister);
+
static int __init pinctrl_init(void)
{
pr_info("initialized pinctrl subsystem\n");
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index 9cfa54407..eccb47480 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -209,9 +209,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
pin_reg = &info->pin_regs[pin_id];
if (pin_reg->mux_reg == -1) {
- dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
+ dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
info->pins[pin_id].name);
- return -EINVAL;
+ continue;
}
if (info->flags & SHARE_MUX_CONF_REG) {
@@ -789,7 +789,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
ipctl->info = info;
ipctl->dev = info->dev;
platform_set_drvdata(pdev, ipctl);
- ipctl->pctl = pinctrl_register(&imx_pinctrl_desc, &pdev->dev, ipctl);
+ ipctl->pctl = devm_pinctrl_register(&pdev->dev, &imx_pinctrl_desc, ipctl);
if (IS_ERR(ipctl->pctl)) {
dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
return PTR_ERR(ipctl->pctl);
@@ -799,12 +799,3 @@ int imx_pinctrl_probe(struct platform_device *pdev,
return 0;
}
-
-int imx_pinctrl_remove(struct platform_device *pdev)
-{
- struct imx_pinctrl *ipctl = platform_get_drvdata(pdev);
-
- pinctrl_unregister(ipctl->pctl);
-
- return 0;
-}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.h b/drivers/pinctrl/freescale/pinctrl-imx.h
index 3b8bd81a3..8af8aa289 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx.h
@@ -99,5 +99,4 @@ struct imx_pinctrl_soc_info {
int imx_pinctrl_probe(struct platform_device *pdev,
struct imx_pinctrl_soc_info *info);
-int imx_pinctrl_remove(struct platform_device *pdev);
#endif /* __DRIVERS_PINCTRL_IMX_H */
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index acaf84cad..b4400cb19 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -635,7 +635,7 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
ipctl->info = info;
ipctl->dev = info->dev;
platform_set_drvdata(pdev, ipctl);
- ipctl->pctl = pinctrl_register(pctl_desc, &pdev->dev, ipctl);
+ ipctl->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, ipctl);
if (IS_ERR(ipctl->pctl)) {
dev_err(&pdev->dev, "could not register IMX pinctrl driver\n");
return PTR_ERR(ipctl->pctl);
@@ -652,12 +652,3 @@ int imx1_pinctrl_core_probe(struct platform_device *pdev,
return 0;
}
-
-int imx1_pinctrl_core_remove(struct platform_device *pdev)
-{
- struct imx1_pinctrl *ipctl = platform_get_drvdata(pdev);
-
- pinctrl_unregister(ipctl->pctl);
-
- return 0;
-}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.c b/drivers/pinctrl/freescale/pinctrl-imx1.c
index d3bacb7d6..04723455d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.c
@@ -269,7 +269,6 @@ static struct platform_driver imx1_pinctrl_driver = {
.name = "imx1-pinctrl",
.of_match_table = imx1_pinctrl_of_match,
},
- .remove = imx1_pinctrl_core_remove,
};
module_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1.h b/drivers/pinctrl/freescale/pinctrl-imx1.h
index 692a54c15..174074308 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1.h
+++ b/drivers/pinctrl/freescale/pinctrl-imx1.h
@@ -69,5 +69,4 @@ struct imx1_pinctrl_soc_info {
int imx1_pinctrl_core_probe(struct platform_device *pdev,
struct imx1_pinctrl_soc_info *info);
-int imx1_pinctrl_core_remove(struct platform_device *pdev);
#endif /* __DRIVERS_PINCTRL_IMX1_H */
diff --git a/drivers/pinctrl/freescale/pinctrl-imx21.c b/drivers/pinctrl/freescale/pinctrl-imx21.c
index 9d9aca3db..aa1221f4d 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx21.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx21.c
@@ -332,7 +332,6 @@ static struct platform_driver imx21_pinctrl_driver = {
.name = "imx21-pinctrl",
.of_match_table = imx21_pinctrl_of_match,
},
- .remove = imx1_pinctrl_core_remove,
};
module_platform_driver_probe(imx21_pinctrl_driver, imx21_pinctrl_probe);
diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
index 293ed4381..81ad546d7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
@@ -331,7 +331,6 @@ static struct platform_driver imx25_pinctrl_driver = {
.of_match_table = of_match_ptr(imx25_pinctrl_of_match),
},
.probe = imx25_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx25_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx27.c b/drivers/pinctrl/freescale/pinctrl-imx27.c
index a461d5881..f828fbbba 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx27.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx27.c
@@ -405,7 +405,6 @@ static struct platform_driver imx27_pinctrl_driver = {
.of_match_table = of_match_ptr(imx27_pinctrl_of_match),
},
.probe = imx27_pinctrl_probe,
- .remove = imx1_pinctrl_core_remove,
};
static int __init imx27_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx35.c b/drivers/pinctrl/freescale/pinctrl-imx35.c
index 9109c10c5..13eb224a2 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx35.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx35.c
@@ -1021,7 +1021,6 @@ static struct platform_driver imx35_pinctrl_driver = {
.of_match_table = imx35_pinctrl_of_match,
},
.probe = imx35_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx35_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx50.c b/drivers/pinctrl/freescale/pinctrl-imx50.c
index 8acc4d960..95a36c88b 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx50.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx50.c
@@ -408,7 +408,6 @@ static struct platform_driver imx50_pinctrl_driver = {
.of_match_table = of_match_ptr(imx50_pinctrl_of_match),
},
.probe = imx50_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx50_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx51.c b/drivers/pinctrl/freescale/pinctrl-imx51.c
index 8dec494aa..0863e5279 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx51.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx51.c
@@ -784,7 +784,6 @@ static struct platform_driver imx51_pinctrl_driver = {
.of_match_table = imx51_pinctrl_of_match,
},
.probe = imx51_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx51_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx53.c b/drivers/pinctrl/freescale/pinctrl-imx53.c
index d39dfd6a3..64c9cbe2a 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx53.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx53.c
@@ -471,7 +471,6 @@ static struct platform_driver imx53_pinctrl_driver = {
.of_match_table = imx53_pinctrl_of_match,
},
.probe = imx53_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx53_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6dl.c b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
index 5a2cdb054..de17bac8a 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6dl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6dl.c
@@ -477,7 +477,6 @@ static struct platform_driver imx6dl_pinctrl_driver = {
.of_match_table = imx6dl_pinctrl_of_match,
},
.probe = imx6dl_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx6dl_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6q.c b/drivers/pinctrl/freescale/pinctrl-imx6q.c
index 7d50a36b1..55cd8a0e3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6q.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6q.c
@@ -483,7 +483,6 @@ static struct platform_driver imx6q_pinctrl_driver = {
.of_match_table = imx6q_pinctrl_of_match,
},
.probe = imx6q_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx6q_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sl.c b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
index e27d17fdc..bf455b8e7 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sl.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sl.c
@@ -384,7 +384,6 @@ static struct platform_driver imx6sl_pinctrl_driver = {
.of_match_table = imx6sl_pinctrl_of_match,
},
.probe = imx6sl_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx6sl_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6sx.c b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
index 117180c26..84118c388 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6sx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6sx.c
@@ -387,7 +387,6 @@ static struct platform_driver imx6sx_pinctrl_driver = {
.of_match_table = of_match_ptr(imx6sx_pinctrl_of_match),
},
.probe = imx6sx_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx6sx_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
index 78627c70c..c707fdd93 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx6ul.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -303,7 +303,6 @@ static struct platform_driver imx6ul_pinctrl_driver = {
.of_match_table = of_match_ptr(imx6ul_pinctrl_of_match),
},
.probe = imx6ul_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx6ul_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-imx7d.c b/drivers/pinctrl/freescale/pinctrl-imx7d.c
index 1c89613eb..d30d91f80 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx7d.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx7d.c
@@ -395,7 +395,6 @@ static struct platform_driver imx7d_pinctrl_driver = {
.of_match_table = of_match_ptr(imx7d_pinctrl_of_match),
},
.probe = imx7d_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init imx7d_pinctrl_init(void)
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 587d1ff62..6d81be096 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -318,7 +318,6 @@ static struct platform_driver vf610_pinctrl_driver = {
.of_match_table = vf610_pinctrl_of_match,
},
.probe = vf610_pinctrl_probe,
- .remove = imx_pinctrl_remove,
};
static int __init vf610_pinctrl_init(void)
diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig
index 4d2efad65..1c74e038b 100644
--- a/drivers/pinctrl/intel/Kconfig
+++ b/drivers/pinctrl/intel/Kconfig
@@ -6,6 +6,9 @@ config PINCTRL_BAYTRAIL
bool "Intel Baytrail GPIO pin control"
depends on GPIOLIB && ACPI
select GPIOLIB_IRQCHIP
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
help
driver for memory mapped GPIO functionality on Intel Baytrail
platforms. Supports 3 banks with 102, 28 and 44 gpios.
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 21b79a446..7abfd42e8 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
+#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -27,6 +28,9 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
/* memory mapped register offsets */
#define BYT_CONF0_REG 0x000
@@ -34,6 +38,7 @@
#define BYT_VAL_REG 0x008
#define BYT_DFT_REG 0x00c
#define BYT_INT_STAT_REG 0x800
+#define BYT_DEBOUNCE_REG 0x9d0
/* BYT_CONF0_REG register bits */
#define BYT_IODEN BIT(31)
@@ -41,6 +46,7 @@
#define BYT_TRIG_NEG BIT(26)
#define BYT_TRIG_POS BIT(25)
#define BYT_TRIG_LVL BIT(24)
+#define BYT_DEBOUNCE_EN BIT(20)
#define BYT_PULL_STR_SHIFT 9
#define BYT_PULL_STR_MASK (3 << BYT_PULL_STR_SHIFT)
#define BYT_PULL_STR_2K (0 << BYT_PULL_STR_SHIFT)
@@ -65,6 +71,16 @@
BYT_PIN_MUX)
#define BYT_VAL_RESTORE_MASK (BYT_DIR_MASK | BYT_LEVEL)
+/* BYT_DEBOUNCE_REG bits */
+#define BYT_DEBOUNCE_PULSE_MASK 0x7
+#define BYT_DEBOUNCE_PULSE_375US 1
+#define BYT_DEBOUNCE_PULSE_750US 2
+#define BYT_DEBOUNCE_PULSE_1500US 3
+#define BYT_DEBOUNCE_PULSE_3MS 4
+#define BYT_DEBOUNCE_PULSE_6MS 5
+#define BYT_DEBOUNCE_PULSE_12MS 6
+#define BYT_DEBOUNCE_PULSE_24MS 7
+
#define BYT_NGPIO_SCORE 102
#define BYT_NGPIO_NCORE 28
#define BYT_NGPIO_SUS 44
@@ -74,18 +90,231 @@
#define BYT_SUS_ACPI_UID "3"
/*
- * Baytrail gpio controller consist of three separate sub-controllers called
- * SCORE, NCORE and SUS. The sub-controllers are identified by their acpi UID.
- *
- * GPIO numbering is _not_ ordered meaning that gpio # 0 in ACPI namespace does
- * _not_ correspond to the first gpio register at controller's gpio base.
- * There is no logic or pattern in mapping gpio numbers to registers (pads) so
- * each sub-controller needs to have its own mapping table
+ * This is the function value most pins have for GPIO muxing. If the value
+ * differs from the default one, it must be explicitly mentioned. Otherwise, the
+ * pin control implementation will set the muxing value to default GPIO if it
+ * does not find a match for the requested function.
*/
+#define BYT_DEFAULT_GPIO_MUX 0
+
+struct byt_gpio_pin_context {
+ u32 conf0;
+ u32 val;
+};
+
+struct byt_simple_func_mux {
+ const char *name;
+ unsigned short func;
+};
+
+struct byt_mixed_func_mux {
+ const char *name;
+ const unsigned short *func_values;
+};
+
+struct byt_pingroup {
+ const char *name;
+ const unsigned int *pins;
+ size_t npins;
+ unsigned short has_simple_funcs;
+ union {
+ const struct byt_simple_func_mux *simple_funcs;
+ const struct byt_mixed_func_mux *mixed_funcs;
+ };
+ size_t nfuncs;
+};
+
+struct byt_function {
+ const char *name;
+ const char * const *groups;
+ size_t ngroups;
+};
+
+struct byt_community {
+ unsigned int pin_base;
+ size_t npins;
+ const unsigned int *pad_map;
+ void __iomem *reg_base;
+};
-/* score_pins[gpio_nr] = pad_nr */
+#define SIMPLE_FUNC(n, f) \
+ { \
+ .name = (n), \
+ .func = (f), \
+ }
+#define MIXED_FUNC(n, f) \
+ { \
+ .name = (n), \
+ .func_values = (f), \
+ }
+
+#define PIN_GROUP_SIMPLE(n, p, f) \
+ { \
+ .name = (n), \
+ .pins = (p), \
+ .npins = ARRAY_SIZE((p)), \
+ .has_simple_funcs = 1, \
+ { \
+ .simple_funcs = (f), \
+ }, \
+ .nfuncs = ARRAY_SIZE((f)), \
+ }
+#define PIN_GROUP_MIXED(n, p, f) \
+ { \
+ .name = (n), \
+ .pins = (p), \
+ .npins = ARRAY_SIZE((p)), \
+ .has_simple_funcs = 0, \
+ { \
+ .mixed_funcs = (f), \
+ }, \
+ .nfuncs = ARRAY_SIZE((f)), \
+ }
+
+#define FUNCTION(n, g) \
+ { \
+ .name = (n), \
+ .groups = (g), \
+ .ngroups = ARRAY_SIZE((g)), \
+ }
+
+#define COMMUNITY(p, n, map) \
+ { \
+ .pin_base = (p), \
+ .npins = (n), \
+ .pad_map = (map),\
+ }
-static unsigned const score_pins[BYT_NGPIO_SCORE] = {
+struct byt_pinctrl_soc_data {
+ const char *uid;
+ const struct pinctrl_pin_desc *pins;
+ size_t npins;
+ const struct byt_pingroup *groups;
+ size_t ngroups;
+ const struct byt_function *functions;
+ size_t nfunctions;
+ const struct byt_community *communities;
+ size_t ncommunities;
+};
+
+struct byt_gpio {
+ struct gpio_chip chip;
+ struct platform_device *pdev;
+ struct pinctrl_dev *pctl_dev;
+ struct pinctrl_desc pctl_desc;
+ raw_spinlock_t lock;
+ const struct byt_pinctrl_soc_data *soc_data;
+ struct byt_community *communities_copy;
+ struct byt_gpio_pin_context *saved_context;
+};
+
+/* SCORE pins, aka GPIOC_<pin_no> or GPIO_S0_SC[<pin_no>] */
+static const struct pinctrl_pin_desc byt_score_pins[] = {
+ PINCTRL_PIN(0, "SATA_GP0"),
+ PINCTRL_PIN(1, "SATA_GP1"),
+ PINCTRL_PIN(2, "SATA_LED#"),
+ PINCTRL_PIN(3, "PCIE_CLKREQ0"),
+ PINCTRL_PIN(4, "PCIE_CLKREQ1"),
+ PINCTRL_PIN(5, "PCIE_CLKREQ2"),
+ PINCTRL_PIN(6, "PCIE_CLKREQ3"),
+ PINCTRL_PIN(7, "SD3_WP"),
+ PINCTRL_PIN(8, "HDA_RST"),
+ PINCTRL_PIN(9, "HDA_SYNC"),
+ PINCTRL_PIN(10, "HDA_CLK"),
+ PINCTRL_PIN(11, "HDA_SDO"),
+ PINCTRL_PIN(12, "HDA_SDI0"),
+ PINCTRL_PIN(13, "HDA_SDI1"),
+ PINCTRL_PIN(14, "GPIO_S0_SC14"),
+ PINCTRL_PIN(15, "GPIO_S0_SC15"),
+ PINCTRL_PIN(16, "MMC1_CLK"),
+ PINCTRL_PIN(17, "MMC1_D0"),
+ PINCTRL_PIN(18, "MMC1_D1"),
+ PINCTRL_PIN(19, "MMC1_D2"),
+ PINCTRL_PIN(20, "MMC1_D3"),
+ PINCTRL_PIN(21, "MMC1_D4"),
+ PINCTRL_PIN(22, "MMC1_D5"),
+ PINCTRL_PIN(23, "MMC1_D6"),
+ PINCTRL_PIN(24, "MMC1_D7"),
+ PINCTRL_PIN(25, "MMC1_CMD"),
+ PINCTRL_PIN(26, "MMC1_RST"),
+ PINCTRL_PIN(27, "SD2_CLK"),
+ PINCTRL_PIN(28, "SD2_D0"),
+ PINCTRL_PIN(29, "SD2_D1"),
+ PINCTRL_PIN(30, "SD2_D2"),
+ PINCTRL_PIN(31, "SD2_D3_CD"),
+ PINCTRL_PIN(32, "SD2_CMD"),
+ PINCTRL_PIN(33, "SD3_CLK"),
+ PINCTRL_PIN(34, "SD3_D0"),
+ PINCTRL_PIN(35, "SD3_D1"),
+ PINCTRL_PIN(36, "SD3_D2"),
+ PINCTRL_PIN(37, "SD3_D3"),
+ PINCTRL_PIN(38, "SD3_CD"),
+ PINCTRL_PIN(39, "SD3_CMD"),
+ PINCTRL_PIN(40, "SD3_1P8EN"),
+ PINCTRL_PIN(41, "SD3_PWREN#"),
+ PINCTRL_PIN(42, "ILB_LPC_AD0"),
+ PINCTRL_PIN(43, "ILB_LPC_AD1"),
+ PINCTRL_PIN(44, "ILB_LPC_AD2"),
+ PINCTRL_PIN(45, "ILB_LPC_AD3"),
+ PINCTRL_PIN(46, "ILB_LPC_FRAME"),
+ PINCTRL_PIN(47, "ILB_LPC_CLK0"),
+ PINCTRL_PIN(48, "ILB_LPC_CLK1"),
+ PINCTRL_PIN(49, "ILB_LPC_CLKRUN"),
+ PINCTRL_PIN(50, "ILB_LPC_SERIRQ"),
+ PINCTRL_PIN(51, "PCU_SMB_DATA"),
+ PINCTRL_PIN(52, "PCU_SMB_CLK"),
+ PINCTRL_PIN(53, "PCU_SMB_ALERT"),
+ PINCTRL_PIN(54, "ILB_8254_SPKR"),
+ PINCTRL_PIN(55, "GPIO_S0_SC55"),
+ PINCTRL_PIN(56, "GPIO_S0_SC56"),
+ PINCTRL_PIN(57, "GPIO_S0_SC57"),
+ PINCTRL_PIN(58, "GPIO_S0_SC58"),
+ PINCTRL_PIN(59, "GPIO_S0_SC59"),
+ PINCTRL_PIN(60, "GPIO_S0_SC60"),
+ PINCTRL_PIN(61, "GPIO_S0_SC61"),
+ PINCTRL_PIN(62, "LPE_I2S2_CLK"),
+ PINCTRL_PIN(63, "LPE_I2S2_FRM"),
+ PINCTRL_PIN(64, "LPE_I2S2_DATAIN"),
+ PINCTRL_PIN(65, "LPE_I2S2_DATAOUT"),
+ PINCTRL_PIN(66, "SIO_SPI_CS"),
+ PINCTRL_PIN(67, "SIO_SPI_MISO"),
+ PINCTRL_PIN(68, "SIO_SPI_MOSI"),
+ PINCTRL_PIN(69, "SIO_SPI_CLK"),
+ PINCTRL_PIN(70, "SIO_UART1_RXD"),
+ PINCTRL_PIN(71, "SIO_UART1_TXD"),
+ PINCTRL_PIN(72, "SIO_UART1_RTS"),
+ PINCTRL_PIN(73, "SIO_UART1_CTS"),
+ PINCTRL_PIN(74, "SIO_UART2_RXD"),
+ PINCTRL_PIN(75, "SIO_UART2_TXD"),
+ PINCTRL_PIN(76, "SIO_UART2_RTS"),
+ PINCTRL_PIN(77, "SIO_UART2_CTS"),
+ PINCTRL_PIN(78, "SIO_I2C0_DATA"),
+ PINCTRL_PIN(79, "SIO_I2C0_CLK"),
+ PINCTRL_PIN(80, "SIO_I2C1_DATA"),
+ PINCTRL_PIN(81, "SIO_I2C1_CLK"),
+ PINCTRL_PIN(82, "SIO_I2C2_DATA"),
+ PINCTRL_PIN(83, "SIO_I2C2_CLK"),
+ PINCTRL_PIN(84, "SIO_I2C3_DATA"),
+ PINCTRL_PIN(85, "SIO_I2C3_CLK"),
+ PINCTRL_PIN(86, "SIO_I2C4_DATA"),
+ PINCTRL_PIN(87, "SIO_I2C4_CLK"),
+ PINCTRL_PIN(88, "SIO_I2C5_DATA"),
+ PINCTRL_PIN(89, "SIO_I2C5_CLK"),
+ PINCTRL_PIN(90, "SIO_I2C6_DATA"),
+ PINCTRL_PIN(91, "SIO_I2C6_CLK"),
+ PINCTRL_PIN(92, "GPIO_S0_SC92"),
+ PINCTRL_PIN(93, "GPIO_S0_SC93"),
+ PINCTRL_PIN(94, "SIO_PWM0"),
+ PINCTRL_PIN(95, "SIO_PWM1"),
+ PINCTRL_PIN(96, "PMC_PLT_CLK0"),
+ PINCTRL_PIN(97, "PMC_PLT_CLK1"),
+ PINCTRL_PIN(98, "PMC_PLT_CLK2"),
+ PINCTRL_PIN(99, "PMC_PLT_CLK3"),
+ PINCTRL_PIN(100, "PMC_PLT_CLK4"),
+ PINCTRL_PIN(101, "PMC_PLT_CLK5"),
+};
+
+static const unsigned int byt_score_pins_map[BYT_NGPIO_SCORE] = {
85, 89, 93, 96, 99, 102, 98, 101, 34, 37,
36, 38, 39, 35, 40, 84, 62, 61, 64, 59,
54, 56, 60, 55, 63, 57, 51, 50, 53, 47,
@@ -99,13 +328,263 @@ static unsigned const score_pins[BYT_NGPIO_SCORE] = {
97, 100,
};
-static unsigned const ncore_pins[BYT_NGPIO_NCORE] = {
- 19, 18, 17, 20, 21, 22, 24, 25, 23, 16,
- 14, 15, 12, 26, 27, 1, 4, 8, 11, 0,
- 3, 6, 10, 13, 2, 5, 9, 7,
+/* SCORE groups */
+static const unsigned int byt_score_uart1_pins[] = { 70, 71, 72, 73 };
+static const unsigned int byt_score_uart2_pins[] = { 74, 75, 76, 77 };
+static const struct byt_simple_func_mux byt_score_uart_mux[] = {
+ SIMPLE_FUNC("uart", 1),
+};
+
+static const unsigned int byt_score_pwm0_pins[] = { 94 };
+static const unsigned int byt_score_pwm1_pins[] = { 95 };
+static const struct byt_simple_func_mux byt_score_pwm_mux[] = {
+ SIMPLE_FUNC("pwm", 1),
+};
+
+static const unsigned int byt_score_sio_spi_pins[] = { 66, 67, 68, 69 };
+static const struct byt_simple_func_mux byt_score_spi_mux[] = {
+ SIMPLE_FUNC("spi", 1),
+};
+
+static const unsigned int byt_score_i2c5_pins[] = { 88, 89 };
+static const unsigned int byt_score_i2c6_pins[] = { 90, 91 };
+static const unsigned int byt_score_i2c4_pins[] = { 86, 87 };
+static const unsigned int byt_score_i2c3_pins[] = { 84, 85 };
+static const unsigned int byt_score_i2c2_pins[] = { 82, 83 };
+static const unsigned int byt_score_i2c1_pins[] = { 80, 81 };
+static const unsigned int byt_score_i2c0_pins[] = { 78, 79 };
+static const struct byt_simple_func_mux byt_score_i2c_mux[] = {
+ SIMPLE_FUNC("i2c", 1),
+};
+
+static const unsigned int byt_score_ssp0_pins[] = { 8, 9, 10, 11 };
+static const unsigned int byt_score_ssp1_pins[] = { 12, 13, 14, 15 };
+static const unsigned int byt_score_ssp2_pins[] = { 62, 63, 64, 65 };
+static const struct byt_simple_func_mux byt_score_ssp_mux[] = {
+ SIMPLE_FUNC("ssp", 1),
+};
+
+static const unsigned int byt_score_sdcard_pins[] = {
+ 7, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+static const unsigned short byt_score_sdcard_mux_values[] = {
+ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+};
+static const struct byt_mixed_func_mux byt_score_sdcard_mux[] = {
+ MIXED_FUNC("sdcard", byt_score_sdcard_mux_values),
+};
+
+static const unsigned int byt_score_sdio_pins[] = { 27, 28, 29, 30, 31, 32 };
+static const struct byt_simple_func_mux byt_score_sdio_mux[] = {
+ SIMPLE_FUNC("sdio", 1),
+};
+
+static const unsigned int byt_score_emmc_pins[] = {
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+};
+static const struct byt_simple_func_mux byt_score_emmc_mux[] = {
+ SIMPLE_FUNC("emmc", 1),
+};
+
+static const unsigned int byt_score_ilb_lpc_pins[] = {
+ 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+static const struct byt_simple_func_mux byt_score_lpc_mux[] = {
+ SIMPLE_FUNC("lpc", 1),
+};
+
+static const unsigned int byt_score_sata_pins[] = { 0, 1, 2 };
+static const struct byt_simple_func_mux byt_score_sata_mux[] = {
+ SIMPLE_FUNC("sata", 1),
+};
+
+static const unsigned int byt_score_plt_clk0_pins[] = { 96 };
+static const unsigned int byt_score_plt_clk1_pins[] = { 97 };
+static const unsigned int byt_score_plt_clk2_pins[] = { 98 };
+static const unsigned int byt_score_plt_clk3_pins[] = { 99 };
+static const unsigned int byt_score_plt_clk4_pins[] = { 100 };
+static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
+static const struct byt_simple_func_mux byt_score_plt_clk_mux[] = {
+ SIMPLE_FUNC("plt_clk", 1),
};
-static unsigned const sus_pins[BYT_NGPIO_SUS] = {
+static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
+static const struct byt_simple_func_mux byt_score_smbus_mux[] = {
+ SIMPLE_FUNC("smbus", 1),
+};
+
+static const struct byt_pingroup byt_score_groups[] = {
+ PIN_GROUP_SIMPLE("uart1_grp",
+ byt_score_uart1_pins, byt_score_uart_mux),
+ PIN_GROUP_SIMPLE("uart2_grp",
+ byt_score_uart2_pins, byt_score_uart_mux),
+ PIN_GROUP_SIMPLE("pwm0_grp",
+ byt_score_pwm0_pins, byt_score_pwm_mux),
+ PIN_GROUP_SIMPLE("pwm1_grp",
+ byt_score_pwm1_pins, byt_score_pwm_mux),
+ PIN_GROUP_SIMPLE("ssp2_grp",
+ byt_score_ssp2_pins, byt_score_pwm_mux),
+ PIN_GROUP_SIMPLE("sio_spi_grp",
+ byt_score_sio_spi_pins, byt_score_spi_mux),
+ PIN_GROUP_SIMPLE("i2c5_grp",
+ byt_score_i2c5_pins, byt_score_i2c_mux),
+ PIN_GROUP_SIMPLE("i2c6_grp",
+ byt_score_i2c6_pins, byt_score_i2c_mux),
+ PIN_GROUP_SIMPLE("i2c4_grp",
+ byt_score_i2c4_pins, byt_score_i2c_mux),
+ PIN_GROUP_SIMPLE("i2c3_grp",
+ byt_score_i2c3_pins, byt_score_i2c_mux),
+ PIN_GROUP_SIMPLE("i2c2_grp",
+ byt_score_i2c2_pins, byt_score_i2c_mux),
+ PIN_GROUP_SIMPLE("i2c1_grp",
+ byt_score_i2c1_pins, byt_score_i2c_mux),
+ PIN_GROUP_SIMPLE("i2c0_grp",
+ byt_score_i2c0_pins, byt_score_i2c_mux),
+ PIN_GROUP_SIMPLE("ssp0_grp",
+ byt_score_ssp0_pins, byt_score_ssp_mux),
+ PIN_GROUP_SIMPLE("ssp1_grp",
+ byt_score_ssp1_pins, byt_score_ssp_mux),
+ PIN_GROUP_MIXED("sdcard_grp",
+ byt_score_sdcard_pins, byt_score_sdcard_mux),
+ PIN_GROUP_SIMPLE("sdio_grp",
+ byt_score_sdio_pins, byt_score_sdio_mux),
+ PIN_GROUP_SIMPLE("emmc_grp",
+ byt_score_emmc_pins, byt_score_emmc_mux),
+ PIN_GROUP_SIMPLE("lpc_grp",
+ byt_score_ilb_lpc_pins, byt_score_lpc_mux),
+ PIN_GROUP_SIMPLE("sata_grp",
+ byt_score_sata_pins, byt_score_sata_mux),
+ PIN_GROUP_SIMPLE("plt_clk0_grp",
+ byt_score_plt_clk0_pins, byt_score_plt_clk_mux),
+ PIN_GROUP_SIMPLE("plt_clk1_grp",
+ byt_score_plt_clk1_pins, byt_score_plt_clk_mux),
+ PIN_GROUP_SIMPLE("plt_clk2_grp",
+ byt_score_plt_clk2_pins, byt_score_plt_clk_mux),
+ PIN_GROUP_SIMPLE("plt_clk3_grp",
+ byt_score_plt_clk3_pins, byt_score_plt_clk_mux),
+ PIN_GROUP_SIMPLE("plt_clk4_grp",
+ byt_score_plt_clk4_pins, byt_score_plt_clk_mux),
+ PIN_GROUP_SIMPLE("plt_clk5_grp",
+ byt_score_plt_clk5_pins, byt_score_plt_clk_mux),
+ PIN_GROUP_SIMPLE("smbus_grp",
+ byt_score_smbus_pins, byt_score_smbus_mux),
+};
+
+static const char * const byt_score_uart_groups[] = {
+ "uart1_grp", "uart2_grp",
+};
+static const char * const byt_score_pwm_groups[] = {
+ "pwm0_grp", "pwm1_grp",
+};
+static const char * const byt_score_ssp_groups[] = {
+ "ssp0_grp", "ssp1_grp", "ssp2_grp",
+};
+static const char * const byt_score_spi_groups[] = { "sio_spi_grp" };
+static const char * const byt_score_i2c_groups[] = {
+ "i2c0_grp", "i2c1_grp", "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp",
+ "i2c6_grp",
+};
+static const char * const byt_score_sdcard_groups[] = { "sdcard_grp" };
+static const char * const byt_score_sdio_groups[] = { "sdio_grp" };
+static const char * const byt_score_emmc_groups[] = { "emmc_grp" };
+static const char * const byt_score_lpc_groups[] = { "lpc_grp" };
+static const char * const byt_score_sata_groups[] = { "sata_grp" };
+static const char * const byt_score_plt_clk_groups[] = {
+ "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
+ "plt_clk4_grp", "plt_clk5_grp",
+};
+static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
+static const char * const byt_score_gpio_groups[] = {
+ "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
+ "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
+ "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
+ "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
+ "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
+ "plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
+
+};
+
+static const struct byt_function byt_score_functions[] = {
+ FUNCTION("uart", byt_score_uart_groups),
+ FUNCTION("pwm", byt_score_pwm_groups),
+ FUNCTION("ssp", byt_score_ssp_groups),
+ FUNCTION("spi", byt_score_spi_groups),
+ FUNCTION("i2c", byt_score_i2c_groups),
+ FUNCTION("sdcard", byt_score_sdcard_groups),
+ FUNCTION("sdio", byt_score_sdio_groups),
+ FUNCTION("emmc", byt_score_emmc_groups),
+ FUNCTION("lpc", byt_score_lpc_groups),
+ FUNCTION("sata", byt_score_sata_groups),
+ FUNCTION("plt_clk", byt_score_plt_clk_groups),
+ FUNCTION("smbus", byt_score_smbus_groups),
+ FUNCTION("gpio", byt_score_gpio_groups),
+};
+
+static const struct byt_community byt_score_communities[] = {
+ COMMUNITY(0, BYT_NGPIO_SCORE, byt_score_pins_map),
+};
+
+static const struct byt_pinctrl_soc_data byt_score_soc_data = {
+ .uid = BYT_SCORE_ACPI_UID,
+ .pins = byt_score_pins,
+ .npins = ARRAY_SIZE(byt_score_pins),
+ .groups = byt_score_groups,
+ .ngroups = ARRAY_SIZE(byt_score_groups),
+ .functions = byt_score_functions,
+ .nfunctions = ARRAY_SIZE(byt_score_functions),
+ .communities = byt_score_communities,
+ .ncommunities = ARRAY_SIZE(byt_score_communities),
+};
+
+/* SUS pins, aka GPIOS_<pin_no> or GPIO_S5[<pin_no>] */
+static const struct pinctrl_pin_desc byt_sus_pins[] = {
+ PINCTRL_PIN(0, "GPIO_S50"),
+ PINCTRL_PIN(1, "GPIO_S51"),
+ PINCTRL_PIN(2, "GPIO_S52"),
+ PINCTRL_PIN(3, "GPIO_S53"),
+ PINCTRL_PIN(4, "GPIO_S54"),
+ PINCTRL_PIN(5, "GPIO_S55"),
+ PINCTRL_PIN(6, "GPIO_S56"),
+ PINCTRL_PIN(7, "GPIO_S57"),
+ PINCTRL_PIN(8, "GPIO_S58"),
+ PINCTRL_PIN(9, "GPIO_S59"),
+ PINCTRL_PIN(10, "GPIO_S510"),
+ PINCTRL_PIN(11, "PMC_SUSPWRDNACK"),
+ PINCTRL_PIN(12, "PMC_SUSCLK0"),
+ PINCTRL_PIN(13, "GPIO_S513"),
+ PINCTRL_PIN(14, "USB_ULPI_RST"),
+ PINCTRL_PIN(15, "PMC_WAKE_PCIE0#"),
+ PINCTRL_PIN(16, "PMC_PWRBTN"),
+ PINCTRL_PIN(17, "GPIO_S517"),
+ PINCTRL_PIN(18, "PMC_SUS_STAT"),
+ PINCTRL_PIN(19, "USB_OC0"),
+ PINCTRL_PIN(20, "USB_OC1"),
+ PINCTRL_PIN(21, "PCU_SPI_CS1"),
+ PINCTRL_PIN(22, "GPIO_S522"),
+ PINCTRL_PIN(23, "GPIO_S523"),
+ PINCTRL_PIN(24, "GPIO_S524"),
+ PINCTRL_PIN(25, "GPIO_S525"),
+ PINCTRL_PIN(26, "GPIO_S526"),
+ PINCTRL_PIN(27, "GPIO_S527"),
+ PINCTRL_PIN(28, "GPIO_S528"),
+ PINCTRL_PIN(29, "GPIO_S529"),
+ PINCTRL_PIN(30, "GPIO_S530"),
+ PINCTRL_PIN(31, "USB_ULPI_CLK"),
+ PINCTRL_PIN(32, "USB_ULPI_DATA0"),
+ PINCTRL_PIN(33, "USB_ULPI_DATA1"),
+ PINCTRL_PIN(34, "USB_ULPI_DATA2"),
+ PINCTRL_PIN(35, "USB_ULPI_DATA3"),
+ PINCTRL_PIN(36, "USB_ULPI_DATA4"),
+ PINCTRL_PIN(37, "USB_ULPI_DATA5"),
+ PINCTRL_PIN(38, "USB_ULPI_DATA6"),
+ PINCTRL_PIN(39, "USB_ULPI_DATA7"),
+ PINCTRL_PIN(40, "USB_ULPI_DIR"),
+ PINCTRL_PIN(41, "USB_ULPI_NXT"),
+ PINCTRL_PIN(42, "USB_ULPI_STP"),
+ PINCTRL_PIN(43, "USB_ULPI_REFCLK"),
+};
+
+static const unsigned int byt_sus_pins_map[BYT_NGPIO_SUS] = {
29, 33, 30, 31, 32, 34, 36, 35, 38, 37,
18, 7, 11, 20, 17, 1, 8, 10, 19, 12,
0, 2, 23, 39, 28, 27, 22, 21, 24, 25,
@@ -113,86 +592,373 @@ static unsigned const sus_pins[BYT_NGPIO_SUS] = {
52, 53, 59, 40,
};
-static struct pinctrl_gpio_range byt_ranges[] = {
- {
- .name = BYT_SCORE_ACPI_UID, /* match with acpi _UID in probe */
- .npins = BYT_NGPIO_SCORE,
- .pins = score_pins,
- },
- {
- .name = BYT_NCORE_ACPI_UID,
- .npins = BYT_NGPIO_NCORE,
- .pins = ncore_pins,
- },
- {
- .name = BYT_SUS_ACPI_UID,
- .npins = BYT_NGPIO_SUS,
- .pins = sus_pins,
- },
- {
- },
+static const unsigned int byt_sus_usb_over_current_pins[] = { 19, 20 };
+static const struct byt_simple_func_mux byt_sus_usb_oc_mux[] = {
+ SIMPLE_FUNC("usb", 0),
+ SIMPLE_FUNC("gpio", 1),
};
-struct byt_gpio_pin_context {
- u32 conf0;
- u32 val;
+static const unsigned int byt_sus_usb_ulpi_pins[] = {
+ 14, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+static const unsigned short byt_sus_usb_ulpi_mode_values[] = {
+ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+};
+static const unsigned short byt_sus_usb_ulpi_gpio_mode_values[] = {
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+static const struct byt_mixed_func_mux byt_sus_usb_ulpi_mux[] = {
+ MIXED_FUNC("usb", byt_sus_usb_ulpi_mode_values),
+ MIXED_FUNC("gpio", byt_sus_usb_ulpi_gpio_mode_values),
};
-struct byt_gpio {
- struct gpio_chip chip;
- struct platform_device *pdev;
- raw_spinlock_t lock;
- void __iomem *reg_base;
- struct pinctrl_gpio_range *range;
- struct byt_gpio_pin_context *saved_context;
+static const unsigned int byt_sus_pcu_spi_pins[] = { 21 };
+static const struct byt_simple_func_mux byt_sus_pcu_spi_mux[] = {
+ SIMPLE_FUNC("spi", 0),
+ SIMPLE_FUNC("gpio", 1),
+};
+
+static const struct byt_pingroup byt_sus_groups[] = {
+ PIN_GROUP_SIMPLE("usb_oc_grp",
+ byt_sus_usb_over_current_pins, byt_sus_usb_oc_mux),
+ PIN_GROUP_MIXED("usb_ulpi_grp",
+ byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_mux),
+ PIN_GROUP_SIMPLE("pcu_spi_grp",
+ byt_sus_pcu_spi_pins, byt_sus_pcu_spi_mux),
+};
+
+static const char * const byt_sus_usb_groups[] = {
+ "usb_oc_grp", "usb_ulpi_grp",
+};
+static const char * const byt_sus_spi_groups[] = { "pcu_spi_grp" };
+static const char * const byt_sus_gpio_groups[] = {
+ "usb_oc_grp", "usb_ulpi_grp", "pcu_spi_grp",
+};
+
+static const struct byt_function byt_sus_functions[] = {
+ FUNCTION("usb", byt_sus_usb_groups),
+ FUNCTION("spi", byt_sus_spi_groups),
+ FUNCTION("gpio", byt_sus_gpio_groups),
+};
+
+static const struct byt_community byt_sus_communities[] = {
+ COMMUNITY(0, BYT_NGPIO_SUS, byt_sus_pins_map),
+};
+
+static const struct byt_pinctrl_soc_data byt_sus_soc_data = {
+ .uid = BYT_SUS_ACPI_UID,
+ .pins = byt_sus_pins,
+ .npins = ARRAY_SIZE(byt_sus_pins),
+ .groups = byt_sus_groups,
+ .ngroups = ARRAY_SIZE(byt_sus_groups),
+ .functions = byt_sus_functions,
+ .nfunctions = ARRAY_SIZE(byt_sus_functions),
+ .communities = byt_sus_communities,
+ .ncommunities = ARRAY_SIZE(byt_sus_communities),
};
-static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
- int reg)
+static const struct pinctrl_pin_desc byt_ncore_pins[] = {
+ PINCTRL_PIN(0, "GPIO_NCORE0"),
+ PINCTRL_PIN(1, "GPIO_NCORE1"),
+ PINCTRL_PIN(2, "GPIO_NCORE2"),
+ PINCTRL_PIN(3, "GPIO_NCORE3"),
+ PINCTRL_PIN(4, "GPIO_NCORE4"),
+ PINCTRL_PIN(5, "GPIO_NCORE5"),
+ PINCTRL_PIN(6, "GPIO_NCORE6"),
+ PINCTRL_PIN(7, "GPIO_NCORE7"),
+ PINCTRL_PIN(8, "GPIO_NCORE8"),
+ PINCTRL_PIN(9, "GPIO_NCORE9"),
+ PINCTRL_PIN(10, "GPIO_NCORE10"),
+ PINCTRL_PIN(11, "GPIO_NCORE11"),
+ PINCTRL_PIN(12, "GPIO_NCORE12"),
+ PINCTRL_PIN(13, "GPIO_NCORE13"),
+ PINCTRL_PIN(14, "GPIO_NCORE14"),
+ PINCTRL_PIN(15, "GPIO_NCORE15"),
+ PINCTRL_PIN(16, "GPIO_NCORE16"),
+ PINCTRL_PIN(17, "GPIO_NCORE17"),
+ PINCTRL_PIN(18, "GPIO_NCORE18"),
+ PINCTRL_PIN(19, "GPIO_NCORE19"),
+ PINCTRL_PIN(20, "GPIO_NCORE20"),
+ PINCTRL_PIN(21, "GPIO_NCORE21"),
+ PINCTRL_PIN(22, "GPIO_NCORE22"),
+ PINCTRL_PIN(23, "GPIO_NCORE23"),
+ PINCTRL_PIN(24, "GPIO_NCORE24"),
+ PINCTRL_PIN(25, "GPIO_NCORE25"),
+ PINCTRL_PIN(26, "GPIO_NCORE26"),
+ PINCTRL_PIN(27, "GPIO_NCORE27"),
+};
+
+static unsigned const byt_ncore_pins_map[BYT_NGPIO_NCORE] = {
+ 19, 18, 17, 20, 21, 22, 24, 25, 23, 16,
+ 14, 15, 12, 26, 27, 1, 4, 8, 11, 0,
+ 3, 6, 10, 13, 2, 5, 9, 7,
+};
+
+static const struct byt_community byt_ncore_communities[] = {
+ COMMUNITY(0, BYT_NGPIO_NCORE, byt_ncore_pins_map),
+};
+
+static const struct byt_pinctrl_soc_data byt_ncore_soc_data = {
+ .uid = BYT_NCORE_ACPI_UID,
+ .pins = byt_ncore_pins,
+ .npins = ARRAY_SIZE(byt_ncore_pins),
+ .communities = byt_ncore_communities,
+ .ncommunities = ARRAY_SIZE(byt_ncore_communities),
+};
+
+static const struct byt_pinctrl_soc_data *byt_soc_data[] = {
+ &byt_score_soc_data,
+ &byt_sus_soc_data,
+ &byt_ncore_soc_data,
+ NULL,
+};
+
+static struct byt_community *byt_get_community(struct byt_gpio *vg,
+ unsigned int pin)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
- u32 reg_offset;
+ struct byt_community *comm;
+ int i;
+
+ for (i = 0; i < vg->soc_data->ncommunities; i++) {
+ comm = vg->communities_copy + i;
+ if (pin < comm->pin_base + comm->npins && pin >= comm->pin_base)
+ return comm;
+ }
+ return NULL;
+}
+
+static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
+ int reg)
+{
+ struct byt_community *comm = byt_get_community(vg, offset);
+ u32 reg_offset = 0;
+
+ if (!comm)
+ return NULL;
+
+ offset -= comm->pin_base;
if (reg == BYT_INT_STAT_REG)
reg_offset = (offset / 32) * 4;
else
- reg_offset = vg->range->pins[offset] * 16;
+ reg_offset = comm->pad_map[offset] * 16;
+
+ return comm->reg_base + reg_offset + reg;
+}
+
+static int byt_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+ return vg->soc_data->ngroups;
+}
+
+static const char *byt_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+ return vg->soc_data->groups[selector].name;
+}
+
+static int byt_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const unsigned int **pins,
+ unsigned int *num_pins)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = vg->soc_data->groups[selector].pins;
+ *num_pins = vg->soc_data->groups[selector].npins;
+
+ return 0;
+}
+
+static const struct pinctrl_ops byt_pinctrl_ops = {
+ .get_groups_count = byt_get_groups_count,
+ .get_group_name = byt_get_group_name,
+ .get_group_pins = byt_get_group_pins,
+};
+
+static int byt_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+ return vg->soc_data->nfunctions;
+}
+
+static const char *byt_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned int selector)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+ return vg->soc_data->functions[selector].name;
+}
+
+static int byt_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned int selector,
+ const char * const **groups,
+ unsigned int *num_groups)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = vg->soc_data->functions[selector].groups;
+ *num_groups = vg->soc_data->functions[selector].ngroups;
+
+ return 0;
+}
+
+static int byt_get_group_simple_mux(const struct byt_pingroup group,
+ const char *func_name,
+ unsigned short *func)
+{
+ int i;
+
+ for (i = 0; i < group.nfuncs; i++) {
+ if (!strcmp(group.simple_funcs[i].name, func_name)) {
+ *func = group.simple_funcs[i].func;
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int byt_get_group_mixed_mux(const struct byt_pingroup group,
+ const char *func_name,
+ const unsigned short **func)
+{
+ int i;
+
+ for (i = 0; i < group.nfuncs; i++) {
+ if (!strcmp(group.mixed_funcs[i].name, func_name)) {
+ *func = group.mixed_funcs[i].func_values;
+ return 0;
+ }
+ }
- return vg->reg_base + reg_offset + reg;
+ return 1;
}
-static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
+static void byt_set_group_simple_mux(struct byt_gpio *vg,
+ const struct byt_pingroup group,
+ unsigned short func)
{
- void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
unsigned long flags;
- u32 value;
+ int i;
raw_spin_lock_irqsave(&vg->lock, flags);
- value = readl(reg);
- value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
- writel(value, reg);
+
+ for (i = 0; i < group.npins; i++) {
+ void __iomem *padcfg0;
+ u32 value;
+
+ padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ if (!padcfg0) {
+ dev_warn(&vg->pdev->dev,
+ "Group %s, pin %i not muxed (no padcfg0)\n",
+ group.name, i);
+ continue;
+ }
+
+ value = readl(padcfg0);
+ value &= ~BYT_PIN_MUX;
+ value |= func;
+ writel(value, padcfg0);
+ }
+
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+}
+
+static void byt_set_group_mixed_mux(struct byt_gpio *vg,
+ const struct byt_pingroup group,
+ const unsigned short *func)
+{
+ unsigned long flags;
+ int i;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
+
+ for (i = 0; i < group.npins; i++) {
+ void __iomem *padcfg0;
+ u32 value;
+
+ padcfg0 = byt_gpio_reg(vg, group.pins[i], BYT_CONF0_REG);
+ if (!padcfg0) {
+ dev_warn(&vg->pdev->dev,
+ "Group %s, pin %i not muxed (no padcfg0)\n",
+ group.name, i);
+ continue;
+ }
+
+ value = readl(padcfg0);
+ value &= ~BYT_PIN_MUX;
+ value |= func[i];
+ writel(value, padcfg0);
+ }
+
raw_spin_unlock_irqrestore(&vg->lock, flags);
}
+static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
+ unsigned int group_selector)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctldev);
+ const struct byt_function func = vg->soc_data->functions[func_selector];
+ const struct byt_pingroup group = vg->soc_data->groups[group_selector];
+ const unsigned short *mixed_func;
+ unsigned short simple_func;
+ int ret = 1;
+
+ if (group.has_simple_funcs)
+ ret = byt_get_group_simple_mux(group, func.name, &simple_func);
+ else
+ ret = byt_get_group_mixed_mux(group, func.name, &mixed_func);
+
+ if (ret)
+ byt_set_group_simple_mux(vg, group, BYT_DEFAULT_GPIO_MUX);
+ else if (group.has_simple_funcs)
+ byt_set_group_simple_mux(vg, group, simple_func);
+ else
+ byt_set_group_mixed_mux(vg, group, mixed_func);
+
+ return 0;
+}
+
static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
{
/* SCORE pin 92-93 */
- if (!strcmp(vg->range->name, BYT_SCORE_ACPI_UID) &&
- offset >= 92 && offset <= 93)
+ if (!strcmp(vg->soc_data->uid, BYT_SCORE_ACPI_UID) &&
+ offset >= 92 && offset <= 93)
return 1;
/* SUS pin 11-21 */
- if (!strcmp(vg->range->name, BYT_SUS_ACPI_UID) &&
- offset >= 11 && offset <= 21)
+ if (!strcmp(vg->soc_data->uid, BYT_SUS_ACPI_UID) &&
+ offset >= 11 && offset <= 21)
return 1;
return 0;
}
-static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
+static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
- void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
+ void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
+ value = readl(reg);
+ value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+ writel(value, reg);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+}
+
+static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
u32 value, gpio_mux;
unsigned long flags;
@@ -225,53 +991,318 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
+static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
byt_gpio_clear_triggering(vg, offset);
pm_runtime_put(&vg->pdev->dev);
}
-static int byt_irq_type(struct irq_data *d, unsigned type)
+static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset,
+ bool input)
{
- struct byt_gpio *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
- u32 offset = irqd_to_hwirq(d);
- u32 value;
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
- void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+ u32 value;
- if (offset >= vg->chip.ngpio)
+ raw_spin_lock_irqsave(&vg->lock, flags);
+
+ value = readl(val_reg);
+ value &= ~BYT_DIR_MASK;
+ if (input)
+ value |= BYT_OUTPUT_EN;
+ else
+ /*
+ * Before making any direction modifications, do a check if gpio
+ * is set for direct IRQ. On baytrail, setting GPIO to output
+ * does not make sense, so let's at least warn the caller before
+ * they shoot themselves in the foot.
+ */
+ WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
+ "Potential Error: Setting GPIO with direct_irq_en to output");
+ writel(value, val_reg);
+
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+ return 0;
+}
+
+static const struct pinmux_ops byt_pinmux_ops = {
+ .get_functions_count = byt_get_functions_count,
+ .get_function_name = byt_get_function_name,
+ .get_function_groups = byt_get_function_groups,
+ .set_mux = byt_set_mux,
+ .gpio_request_enable = byt_gpio_request_enable,
+ .gpio_disable_free = byt_gpio_disable_free,
+ .gpio_set_direction = byt_gpio_set_direction,
+};
+
+static void byt_get_pull_strength(u32 reg, u16 *strength)
+{
+ switch (reg & BYT_PULL_STR_MASK) {
+ case BYT_PULL_STR_2K:
+ *strength = 2000;
+ break;
+ case BYT_PULL_STR_10K:
+ *strength = 10000;
+ break;
+ case BYT_PULL_STR_20K:
+ *strength = 20000;
+ break;
+ case BYT_PULL_STR_40K:
+ *strength = 40000;
+ break;
+ }
+}
+
+static int byt_set_pull_strength(u32 *reg, u16 strength)
+{
+ *reg &= ~BYT_PULL_STR_MASK;
+
+ switch (strength) {
+ case 2000:
+ *reg |= BYT_PULL_STR_2K;
+ break;
+ case 10000:
+ *reg |= BYT_PULL_STR_10K;
+ break;
+ case 20000:
+ *reg |= BYT_PULL_STR_20K;
+ break;
+ case 40000:
+ *reg |= BYT_PULL_STR_40K;
+ break;
+ default:
return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
+ unsigned long *config)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ unsigned long flags;
+ u32 conf, pull, val, debounce;
+ u16 arg = 0;
raw_spin_lock_irqsave(&vg->lock, flags);
- value = readl(reg);
+ conf = readl(conf_reg);
+ pull = conf & BYT_PULL_ASSIGN_MASK;
+ val = readl(val_reg);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
- WARN(value & BYT_DIRECT_IRQ_EN,
- "Bad pad config for io mode, force direct_irq_en bit clearing");
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (pull)
+ return -EINVAL;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ /* Pull assignment is only applicable in input mode */
+ if ((val & BYT_INPUT_EN) || pull != BYT_PULL_ASSIGN_DOWN)
+ return -EINVAL;
- /* For level trigges the BYT_TRIG_POS and BYT_TRIG_NEG bits
- * are used to indicate high and low level triggering
- */
- value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
- BYT_TRIG_LVL);
+ byt_get_pull_strength(conf, &arg);
- writel(value, reg);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ /* Pull assignment is only applicable in input mode */
+ if ((val & BYT_INPUT_EN) || pull != BYT_PULL_ASSIGN_UP)
+ return -EINVAL;
- if (type & IRQ_TYPE_EDGE_BOTH)
- irq_set_handler_locked(d, handle_edge_irq);
- else if (type & IRQ_TYPE_LEVEL_MASK)
- irq_set_handler_locked(d, handle_level_irq);
+ byt_get_pull_strength(conf, &arg);
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ if (!(conf & BYT_DEBOUNCE_EN))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
+ debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+ switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
+ case BYT_DEBOUNCE_PULSE_375US:
+ arg = 375;
+ break;
+ case BYT_DEBOUNCE_PULSE_750US:
+ arg = 750;
+ break;
+ case BYT_DEBOUNCE_PULSE_1500US:
+ arg = 1500;
+ break;
+ case BYT_DEBOUNCE_PULSE_3MS:
+ arg = 3000;
+ break;
+ case BYT_DEBOUNCE_PULSE_6MS:
+ arg = 6000;
+ break;
+ case BYT_DEBOUNCE_PULSE_12MS:
+ arg = 12000;
+ break;
+ case BYT_DEBOUNCE_PULSE_24MS:
+ arg = 24000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
return 0;
}
+static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
+ unsigned int offset,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct byt_gpio *vg = pinctrl_dev_get_drvdata(pctl_dev);
+ unsigned int param, arg;
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ unsigned long flags;
+ u32 conf, val, debounce;
+ int i, ret = 0;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
+
+ conf = readl(conf_reg);
+ val = readl(val_reg);
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ conf &= ~BYT_PULL_ASSIGN_MASK;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 2000;
+
+ /*
+ * Pull assignment is only applicable in input mode. If
+ * chip is not in input mode, set it and warn about it.
+ */
+ if (val & BYT_INPUT_EN) {
+ val &= ~BYT_INPUT_EN;
+ writel(val, val_reg);
+ dev_warn(&vg->pdev->dev,
+ "pin %u forcibly set to input mode\n",
+ offset);
+ }
+
+ conf &= ~BYT_PULL_ASSIGN_MASK;
+ conf |= BYT_PULL_ASSIGN_DOWN;
+ ret = byt_set_pull_strength(&conf, arg);
+
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 2000;
+
+ /*
+ * Pull assignment is only applicable in input mode. If
+ * chip is not in input mode, set it and warn about it.
+ */
+ if (val & BYT_INPUT_EN) {
+ val &= ~BYT_INPUT_EN;
+ writel(val, val_reg);
+ dev_warn(&vg->pdev->dev,
+ "pin %u forcibly set to input mode\n",
+ offset);
+ }
+
+ conf &= ~BYT_PULL_ASSIGN_MASK;
+ conf |= BYT_PULL_ASSIGN_UP;
+ ret = byt_set_pull_strength(&conf, arg);
+
+ break;
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ debounce = readl(byt_gpio_reg(vg, offset,
+ BYT_DEBOUNCE_REG));
+ conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+
+ switch (arg) {
+ case 375:
+ conf |= BYT_DEBOUNCE_PULSE_375US;
+ break;
+ case 750:
+ conf |= BYT_DEBOUNCE_PULSE_750US;
+ break;
+ case 1500:
+ conf |= BYT_DEBOUNCE_PULSE_1500US;
+ break;
+ case 3000:
+ conf |= BYT_DEBOUNCE_PULSE_3MS;
+ break;
+ case 6000:
+ conf |= BYT_DEBOUNCE_PULSE_6MS;
+ break;
+ case 12000:
+ conf |= BYT_DEBOUNCE_PULSE_12MS;
+ break;
+ case 24000:
+ conf |= BYT_DEBOUNCE_PULSE_24MS;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ break;
+ default:
+ ret = -ENOTSUPP;
+ }
+
+ if (ret)
+ break;
+ }
+
+ if (!ret)
+ writel(conf, conf_reg);
+
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+ return ret;
+}
+
+static const struct pinconf_ops byt_pinconf_ops = {
+ .is_generic = true,
+ .pin_config_get = byt_pin_config_get,
+ .pin_config_set = byt_pin_config_set,
+};
+
+static const struct pinctrl_desc byt_pinctrl_desc = {
+ .pctlops = &byt_pinctrl_ops,
+ .pmxops = &byt_pinmux_ops,
+ .confops = &byt_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
struct byt_gpio *vg = gpiochip_get_data(chip);
+ void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 val;
@@ -285,69 +1316,58 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct byt_gpio *vg = gpiochip_get_data(chip);
- void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
+ void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 old_val;
- raw_spin_lock_irqsave(&vg->lock, flags);
+ if (!reg)
+ return;
+ raw_spin_lock_irqsave(&vg->lock, flags);
old_val = readl(reg);
-
if (value)
writel(old_val | BYT_LEVEL, reg);
else
writel(old_val & ~BYT_LEVEL, reg);
-
raw_spin_unlock_irqrestore(&vg->lock, flags);
}
-static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct byt_gpio *vg = gpiochip_get_data(chip);
- void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
+ void __iomem *reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
unsigned long flags;
u32 value;
+ if (!reg)
+ return -EINVAL;
+
raw_spin_lock_irqsave(&vg->lock, flags);
+ value = readl(reg);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
- value = readl(reg) | BYT_DIR_MASK;
- value &= ~BYT_INPUT_EN; /* active low */
- writel(value, reg);
+ if (!(value & BYT_OUTPUT_EN))
+ return GPIOF_DIR_OUT;
+ if (!(value & BYT_INPUT_EN))
+ return GPIOF_DIR_IN;
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ return -EINVAL;
+}
- return 0;
+static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ return pinctrl_gpio_direction_input(chip->base + offset);
}
static int byt_gpio_direction_output(struct gpio_chip *chip,
- unsigned gpio, int value)
+ unsigned int offset, int value)
{
- struct byt_gpio *vg = gpiochip_get_data(chip);
- void __iomem *conf_reg = byt_gpio_reg(chip, gpio, BYT_CONF0_REG);
- void __iomem *reg = byt_gpio_reg(chip, gpio, BYT_VAL_REG);
- unsigned long flags;
- u32 reg_val;
-
- raw_spin_lock_irqsave(&vg->lock, flags);
+ int ret = pinctrl_gpio_direction_output(chip->base + offset);
- /*
- * Before making any direction modifications, do a check if gpio
- * is set for direct IRQ. On baytrail, setting GPIO to output does
- * not make sense, so let's at least warn the caller before they shoot
- * themselves in the foot.
- */
- WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
- "Potential Error: Setting GPIO with direct_irq_en to output");
-
- reg_val = readl(reg) | BYT_DIR_MASK;
- reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN);
-
- if (value)
- writel(reg_val | BYT_LEVEL, reg);
- else
- writel(reg_val & ~BYT_LEVEL, reg);
+ if (ret)
+ return ret;
- raw_spin_unlock_irqrestore(&vg->lock, flags);
+ byt_gpio_set(chip, offset, value);
return 0;
}
@@ -356,20 +1376,45 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct byt_gpio *vg = gpiochip_get_data(chip);
int i;
- u32 conf0, val, offs;
+ u32 conf0, val;
- for (i = 0; i < vg->chip.ngpio; i++) {
+ for (i = 0; i < vg->soc_data->npins; i++) {
+ const struct byt_community *comm;
const char *pull_str = NULL;
const char *pull = NULL;
+ void __iomem *reg;
unsigned long flags;
const char *label;
- offs = vg->range->pins[i] * 16;
+ unsigned int pin;
raw_spin_lock_irqsave(&vg->lock, flags);
- conf0 = readl(vg->reg_base + offs + BYT_CONF0_REG);
- val = readl(vg->reg_base + offs + BYT_VAL_REG);
+ pin = vg->soc_data->pins[i].number;
+ reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+ if (!reg) {
+ seq_printf(s,
+ "Could not retrieve pin %i conf0 reg\n",
+ pin);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+ continue;
+ }
+ conf0 = readl(reg);
+
+ reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
+ if (!reg) {
+ seq_printf(s,
+ "Could not retrieve pin %i val reg\n", pin);
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+ continue;
+ }
+ val = readl(reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
+ comm = byt_get_community(vg, pin);
+ if (!comm) {
+ seq_printf(s,
+ "Could not get community for pin %i\n", pin);
+ continue;
+ }
label = gpiochip_is_requested(chip, i);
if (!label)
label = "Unrequested";
@@ -400,12 +1445,12 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
seq_printf(s,
" gpio-%-3d (%-20.20s) %s %s %s pad-%-3d offset:0x%03x mux:%d %s%s%s",
- i,
+ pin,
label,
val & BYT_INPUT_EN ? " " : "in",
val & BYT_OUTPUT_EN ? " " : "out",
val & BYT_LEVEL ? "hi" : "lo",
- vg->range->pins[i], offs,
+ comm->pad_map[i], comm->pad_map[i] * 32,
conf0 & 0x7,
conf0 & BYT_TRIG_NEG ? " fall" : " ",
conf0 & BYT_TRIG_POS ? " rise" : " ",
@@ -423,27 +1468,17 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
}
}
-static void byt_gpio_irq_handler(struct irq_desc *desc)
-{
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct byt_gpio *vg = gpiochip_get_data(irq_desc_get_handler_data(desc));
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- u32 base, pin;
- void __iomem *reg;
- unsigned long pending;
- unsigned virq;
-
- /* check from GPIO controller which pin triggered the interrupt */
- for (base = 0; base < vg->chip.ngpio; base += 32) {
- reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
- pending = readl(reg);
- for_each_set_bit(pin, &pending, 32) {
- virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
- generic_handle_irq(virq);
- }
- }
- chip->irq_eoi(data);
-}
+static const struct gpio_chip byt_gpio_chip = {
+ .owner = THIS_MODULE,
+ .request = gpiochip_generic_request,
+ .free = gpiochip_generic_free,
+ .get_direction = byt_gpio_get_direction,
+ .direction_input = byt_gpio_direction_input,
+ .direction_output = byt_gpio_direction_output,
+ .get = byt_gpio_get,
+ .set = byt_gpio_set,
+ .dbg_show = byt_gpio_dbg_show,
+};
static void byt_irq_ack(struct irq_data *d)
{
@@ -452,12 +1487,23 @@ static void byt_irq_ack(struct irq_data *d)
unsigned offset = irqd_to_hwirq(d);
void __iomem *reg;
+ reg = byt_gpio_reg(vg, offset, BYT_INT_STAT_REG);
+ if (!reg)
+ return;
+
raw_spin_lock(&vg->lock);
- reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
writel(BIT(offset % 32), reg);
raw_spin_unlock(&vg->lock);
}
+static void byt_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct byt_gpio *vg = gpiochip_get_data(gc);
+
+ byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
+}
+
static void byt_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -467,7 +1513,9 @@ static void byt_irq_unmask(struct irq_data *d)
void __iomem *reg;
u32 value;
- reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+ reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+ if (!reg)
+ return;
raw_spin_lock_irqsave(&vg->lock, flags);
value = readl(reg);
@@ -493,23 +1541,81 @@ static void byt_irq_unmask(struct irq_data *d)
raw_spin_unlock_irqrestore(&vg->lock, flags);
}
-static void byt_irq_mask(struct irq_data *d)
+static int byt_irq_type(struct irq_data *d, unsigned int type)
{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct byt_gpio *vg = gpiochip_get_data(gc);
+ struct byt_gpio *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d));
+ u32 offset = irqd_to_hwirq(d);
+ u32 value;
+ unsigned long flags;
+ void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
- byt_gpio_clear_triggering(vg, irqd_to_hwirq(d));
+ if (!reg || offset >= vg->chip.ngpio)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&vg->lock, flags);
+ value = readl(reg);
+
+ WARN(value & BYT_DIRECT_IRQ_EN,
+ "Bad pad config for io mode, force direct_irq_en bit clearing");
+
+ /* For level trigges the BYT_TRIG_POS and BYT_TRIG_NEG bits
+ * are used to indicate high and low level triggering
+ */
+ value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
+ BYT_TRIG_LVL);
+
+ writel(value, reg);
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+
+ raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+ return 0;
}
static struct irq_chip byt_irqchip = {
- .name = "BYT-GPIO",
- .irq_ack = byt_irq_ack,
- .irq_mask = byt_irq_mask,
- .irq_unmask = byt_irq_unmask,
- .irq_set_type = byt_irq_type,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .name = "BYT-GPIO",
+ .irq_ack = byt_irq_ack,
+ .irq_mask = byt_irq_mask,
+ .irq_unmask = byt_irq_unmask,
+ .irq_set_type = byt_irq_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
+static void byt_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct irq_data *data = irq_desc_get_irq_data(desc);
+ struct byt_gpio *vg = gpiochip_get_data(
+ irq_desc_get_handler_data(desc));
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ u32 base, pin;
+ void __iomem *reg;
+ unsigned long pending;
+ unsigned int virq;
+
+ /* check from GPIO controller which pin triggered the interrupt */
+ for (base = 0; base < vg->chip.ngpio; base += 32) {
+ reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
+
+ if (!reg) {
+ dev_warn(&vg->pdev->dev,
+ "Pin %i: could not retrieve interrupt status register\n",
+ base);
+ continue;
+ }
+
+ pending = readl(reg);
+ for_each_set_bit(pin, &pending, 32) {
+ virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
+ generic_handle_irq(virq);
+ }
+ }
+ chip->irq_eoi(data);
+}
+
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
{
void __iomem *reg;
@@ -521,8 +1627,18 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
* do not use direct IRQ mode. This will prevent spurious
* interrupts from misconfigured pins.
*/
- for (i = 0; i < vg->chip.ngpio; i++) {
- value = readl(byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG));
+ for (i = 0; i < vg->soc_data->npins; i++) {
+ unsigned int pin = vg->soc_data->pins[i].number;
+
+ reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+ if (!reg) {
+ dev_warn(&vg->pdev->dev,
+ "Pin %i: could not retrieve conf0 register\n",
+ i);
+ continue;
+ }
+
+ value = readl(reg);
if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
!(value & BYT_DIRECT_IRQ_EN)) {
byt_gpio_clear_triggering(vg, i);
@@ -531,8 +1647,16 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
}
/* clear interrupt status trigger registers */
- for (base = 0; base < vg->chip.ngpio; base += 32) {
- reg = byt_gpio_reg(&vg->chip, base, BYT_INT_STAT_REG);
+ for (base = 0; base < vg->soc_data->npins; base += 32) {
+ reg = byt_gpio_reg(vg, base, BYT_INT_STAT_REG);
+
+ if (!reg) {
+ dev_warn(&vg->pdev->dev,
+ "Pin %i: could not retrieve irq status reg\n",
+ base);
+ continue;
+ }
+
writel(0xffffffff, reg);
/* make sure trigger bits are cleared, if not then a pin
might be misconfigured in bios */
@@ -543,82 +1667,47 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
}
}
-static int byt_gpio_probe(struct platform_device *pdev)
+static int byt_gpio_probe(struct byt_gpio *vg)
{
- struct byt_gpio *vg;
struct gpio_chip *gc;
- struct resource *mem_rc, *irq_rc;
- struct device *dev = &pdev->dev;
- struct acpi_device *acpi_dev;
- struct pinctrl_gpio_range *range;
- acpi_handle handle = ACPI_HANDLE(dev);
+ struct resource *irq_rc;
int ret;
- if (acpi_bus_get_device(handle, &acpi_dev))
- return -ENODEV;
-
- vg = devm_kzalloc(dev, sizeof(struct byt_gpio), GFP_KERNEL);
- if (!vg) {
- dev_err(&pdev->dev, "can't allocate byt_gpio chip data\n");
- return -ENOMEM;
- }
-
- for (range = byt_ranges; range->name; range++) {
- if (!strcmp(acpi_dev->pnp.unique_id, range->name)) {
- vg->chip.ngpio = range->npins;
- vg->range = range;
- break;
- }
- }
-
- if (!vg->chip.ngpio || !vg->range)
- return -ENODEV;
-
- vg->pdev = pdev;
- platform_set_drvdata(pdev, vg);
-
- mem_rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vg->reg_base = devm_ioremap_resource(dev, mem_rc);
- if (IS_ERR(vg->reg_base))
- return PTR_ERR(vg->reg_base);
-
- raw_spin_lock_init(&vg->lock);
-
- gc = &vg->chip;
- gc->label = dev_name(&pdev->dev);
- gc->owner = THIS_MODULE;
- gc->request = byt_gpio_request;
- gc->free = byt_gpio_free;
- gc->direction_input = byt_gpio_direction_input;
- gc->direction_output = byt_gpio_direction_output;
- gc->get = byt_gpio_get;
- gc->set = byt_gpio_set;
- gc->dbg_show = byt_gpio_dbg_show;
- gc->base = -1;
- gc->can_sleep = false;
- gc->parent = dev;
+ /* Set up gpio chip */
+ vg->chip = byt_gpio_chip;
+ gc = &vg->chip;
+ gc->label = dev_name(&vg->pdev->dev);
+ gc->base = -1;
+ gc->can_sleep = false;
+ gc->parent = &vg->pdev->dev;
+ gc->ngpio = vg->soc_data->npins;
#ifdef CONFIG_PM_SLEEP
- vg->saved_context = devm_kcalloc(&pdev->dev, gc->ngpio,
+ vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
sizeof(*vg->saved_context), GFP_KERNEL);
#endif
-
ret = gpiochip_add_data(gc, vg);
if (ret) {
- dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
+ dev_err(&vg->pdev->dev, "failed adding byt-gpio chip\n");
return ret;
}
+ ret = gpiochip_add_pin_range(&vg->chip, dev_name(&vg->pdev->dev),
+ 0, 0, vg->soc_data->npins);
+ if (ret) {
+ dev_err(&vg->pdev->dev, "failed to add GPIO pin range\n");
+ goto fail;
+ }
+
/* set up interrupts */
- irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ irq_rc = platform_get_resource(vg->pdev, IORESOURCE_IRQ, 0);
if (irq_rc && irq_rc->start) {
byt_gpio_irq_init_hw(vg);
ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0,
handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
- dev_err(dev, "failed to add irqchip\n");
- gpiochip_remove(gc);
- return ret;
+ dev_err(&vg->pdev->dev, "failed to add irqchip\n");
+ goto fail;
}
gpiochip_set_chained_irqchip(gc, &byt_irqchip,
@@ -626,7 +1715,120 @@ static int byt_gpio_probe(struct platform_device *pdev)
byt_gpio_irq_handler);
}
- pm_runtime_enable(dev);
+ return ret;
+
+fail:
+ gpiochip_remove(&vg->chip);
+
+ return ret;
+}
+
+static int byt_set_soc_data(struct byt_gpio *vg,
+ const struct byt_pinctrl_soc_data *soc_data)
+{
+ int i;
+
+ vg->soc_data = soc_data;
+ vg->communities_copy = devm_kcalloc(&vg->pdev->dev,
+ soc_data->ncommunities,
+ sizeof(*vg->communities_copy),
+ GFP_KERNEL);
+ if (!vg->communities_copy)
+ return -ENOMEM;
+
+ for (i = 0; i < soc_data->ncommunities; i++) {
+ struct byt_community *comm = vg->communities_copy + i;
+ struct resource *mem_rc;
+
+ *comm = vg->soc_data->communities[i];
+
+ mem_rc = platform_get_resource(vg->pdev, IORESOURCE_MEM, 0);
+ comm->reg_base = devm_ioremap_resource(&vg->pdev->dev, mem_rc);
+ if (IS_ERR(comm->reg_base))
+ return PTR_ERR(comm->reg_base);
+ }
+
+ return 0;
+}
+
+static const struct acpi_device_id byt_gpio_acpi_match[] = {
+ { "INT33B2", (kernel_ulong_t)byt_soc_data },
+ { "INT33FC", (kernel_ulong_t)byt_soc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
+
+static int byt_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct byt_pinctrl_soc_data *soc_data = NULL;
+ const struct byt_pinctrl_soc_data **soc_table;
+ const struct acpi_device_id *acpi_id;
+ struct acpi_device *acpi_dev;
+ struct byt_gpio *vg;
+ int i, ret;
+
+ acpi_dev = ACPI_COMPANION(&pdev->dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ acpi_id = acpi_match_device(byt_gpio_acpi_match, &pdev->dev);
+ if (!acpi_id)
+ return -ENODEV;
+
+ soc_table = (const struct byt_pinctrl_soc_data **)acpi_id->driver_data;
+
+ for (i = 0; soc_table[i]; i++) {
+ if (!strcmp(acpi_dev->pnp.unique_id, soc_table[i]->uid)) {
+ soc_data = soc_table[i];
+ break;
+ }
+ }
+
+ if (!soc_data)
+ return -ENODEV;
+
+ vg = devm_kzalloc(&pdev->dev, sizeof(*vg), GFP_KERNEL);
+ if (!vg)
+ return -ENOMEM;
+
+ vg->pdev = pdev;
+ ret = byt_set_soc_data(vg, soc_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set soc data\n");
+ return ret;
+ }
+
+ vg->pctl_desc = byt_pinctrl_desc;
+ vg->pctl_desc.name = dev_name(&pdev->dev);
+ vg->pctl_desc.pins = vg->soc_data->pins;
+ vg->pctl_desc.npins = vg->soc_data->npins;
+
+ vg->pctl_dev = pinctrl_register(&vg->pctl_desc, &pdev->dev, vg);
+ if (IS_ERR(vg->pctl_dev)) {
+ dev_err(&pdev->dev, "failed to register pinctrl driver\n");
+ return PTR_ERR(vg->pctl_dev);
+ }
+
+ ret = byt_gpio_probe(vg);
+ if (ret) {
+ pinctrl_unregister(vg->pctl_dev);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, vg);
+ raw_spin_lock_init(&vg->lock);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+}
+
+static int byt_pinctrl_remove(struct platform_device *pdev)
+{
+ struct byt_gpio *vg = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ gpiochip_remove(&vg->chip);
+ pinctrl_unregister(vg->pctl_dev);
return 0;
}
@@ -638,15 +1840,22 @@ static int byt_gpio_suspend(struct device *dev)
struct byt_gpio *vg = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < vg->chip.ngpio; i++) {
+ for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
-
- reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
+ unsigned int pin = vg->soc_data->pins[i].number;
+
+ reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+ if (!reg) {
+ dev_warn(&vg->pdev->dev,
+ "Pin %i: could not retrieve conf0 register\n",
+ i);
+ continue;
+ }
value = readl(reg) & BYT_CONF0_RESTORE_MASK;
vg->saved_context[i].conf0 = value;
- reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
+ reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
value = readl(reg) & BYT_VAL_RESTORE_MASK;
vg->saved_context[i].val = value;
}
@@ -660,11 +1869,18 @@ static int byt_gpio_resume(struct device *dev)
struct byt_gpio *vg = platform_get_drvdata(pdev);
int i;
- for (i = 0; i < vg->chip.ngpio; i++) {
+ for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
-
- reg = byt_gpio_reg(&vg->chip, i, BYT_CONF0_REG);
+ unsigned int pin = vg->soc_data->pins[i].number;
+
+ reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
+ if (!reg) {
+ dev_warn(&vg->pdev->dev,
+ "Pin %i: could not retrieve conf0 register\n",
+ i);
+ continue;
+ }
value = readl(reg);
if ((value & BYT_CONF0_RESTORE_MASK) !=
vg->saved_context[i].conf0) {
@@ -674,7 +1890,7 @@ static int byt_gpio_resume(struct device *dev)
dev_info(dev, "restored pin %d conf0 %#08x", i, value);
}
- reg = byt_gpio_reg(&vg->chip, i, BYT_VAL_REG);
+ reg = byt_gpio_reg(vg, pin, BYT_VAL_REG);
value = readl(reg);
if ((value & BYT_VAL_RESTORE_MASK) !=
vg->saved_context[i].val) {
@@ -712,26 +1928,9 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
NULL)
};
-static const struct acpi_device_id byt_gpio_acpi_match[] = {
- { "INT33B2", 0 },
- { "INT33FC", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
-
-static int byt_gpio_remove(struct platform_device *pdev)
-{
- struct byt_gpio *vg = platform_get_drvdata(pdev);
-
- pm_runtime_disable(&pdev->dev);
- gpiochip_remove(&vg->chip);
-
- return 0;
-}
-
static struct platform_driver byt_gpio_driver = {
- .probe = byt_gpio_probe,
- .remove = byt_gpio_remove,
+ .probe = byt_pinctrl_probe,
+ .remove = byt_pinctrl_remove,
.driver = {
.name = "byt_gpio",
.pm = &byt_gpio_pm_ops,
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 4251e0747..ac4f564f1 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1526,17 +1526,16 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
pctrl->pctldesc.pins = pctrl->community->pins;
pctrl->pctldesc.npins = pctrl->community->npins;
- pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl);
+ pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc,
+ pctrl);
if (IS_ERR(pctrl->pctldev)) {
dev_err(&pdev->dev, "failed to register pinctrl driver\n");
return PTR_ERR(pctrl->pctldev);
}
ret = chv_gpio_probe(pctrl, irq);
- if (ret) {
- pinctrl_unregister(pctrl->pctldev);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, pctrl);
@@ -1548,7 +1547,6 @@ static int chv_pinctrl_remove(struct platform_device *pdev)
struct chv_pinctrl *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
- pinctrl_unregister(pctrl->pctldev);
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 6c2c816f8..3584e50fa 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -1045,17 +1045,16 @@ int intel_pinctrl_probe(struct platform_device *pdev,
pctrl->pctldesc.pins = pctrl->soc->pins;
pctrl->pctldesc.npins = pctrl->soc->npins;
- pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl);
+ pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc,
+ pctrl);
if (IS_ERR(pctrl->pctldev)) {
dev_err(&pdev->dev, "failed to register pinctrl driver\n");
return PTR_ERR(pctrl->pctldev);
}
ret = intel_gpio_probe(pctrl, irq);
- if (ret) {
- pinctrl_unregister(pctrl->pctldev);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, pctrl);
@@ -1068,7 +1067,6 @@ int intel_pinctrl_remove(struct platform_device *pdev)
struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
- pinctrl_unregister(pctrl->pctldev);
return 0;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index fba2dd99e..a607655d7 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -605,7 +605,7 @@ static int mtk_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
- pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
of_node_put(np);
return ret;
}
@@ -644,7 +644,7 @@ static int mtk_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
static const struct pinctrl_ops mtk_pctrl_ops = {
.dt_node_to_map = mtk_pctrl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
.get_groups_count = mtk_pctrl_get_groups_count,
.get_group_name = mtk_pctrl_get_group_name,
.get_group_pins = mtk_pctrl_get_group_pins,
@@ -1397,17 +1397,16 @@ int mtk_pctrl_init(struct platform_device *pdev,
pctl->pctl_desc.pmxops = &mtk_pmx_ops;
pctl->dev = &pdev->dev;
- pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
+ pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, &pctl->pctl_desc,
+ pctl);
if (IS_ERR(pctl->pctl_dev)) {
dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
return PTR_ERR(pctl->pctl_dev);
}
pctl->chip = devm_kzalloc(&pdev->dev, sizeof(*pctl->chip), GFP_KERNEL);
- if (!pctl->chip) {
- ret = -ENOMEM;
- goto pctrl_error;
- }
+ if (!pctl->chip)
+ return -ENOMEM;
*pctl->chip = mtk_gpio_chip;
pctl->chip->ngpio = pctl->devdata->npins;
@@ -1416,10 +1415,8 @@ int mtk_pctrl_init(struct platform_device *pdev,
pctl->chip->base = -1;
ret = gpiochip_add_data(pctl->chip, pctl);
- if (ret) {
- ret = -EINVAL;
- goto pctrl_error;
- }
+ if (ret)
+ return -EINVAL;
/* Register the GPIO to pin mappings. */
ret = gpiochip_add_pin_range(pctl->chip, dev_name(&pdev->dev),
@@ -1497,8 +1494,6 @@ int mtk_pctrl_init(struct platform_device *pdev,
chip_error:
gpiochip_remove(pctl->chip);
-pctrl_error:
- pinctrl_unregister(pctl->pctl_dev);
return ret;
}
diff --git a/drivers/pinctrl/meson/Makefile b/drivers/pinctrl/meson/Makefile
index c751d22fd..24434f139 100644
--- a/drivers/pinctrl/meson/Makefile
+++ b/drivers/pinctrl/meson/Makefile
@@ -1,2 +1,2 @@
-obj-y += pinctrl-meson8.o pinctrl-meson8b.o
+obj-y += pinctrl-meson8.o pinctrl-meson8b.o pinctrl-meson-gxbb.o
obj-y += pinctrl-meson.o
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
new file mode 100644
index 000000000..eeabafbbf
--- /dev/null
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -0,0 +1,432 @@
+/*
+ * Pin controller and GPIO driver for Amlogic Meson GXBB.
+ *
+ * Copyright (C) 2016 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <dt-bindings/gpio/meson-gxbb-gpio.h>
+#include "pinctrl-meson.h"
+
+#define EE_OFF 14
+
+static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = {
+ MESON_PIN(GPIOZ_0, EE_OFF),
+ MESON_PIN(GPIOZ_1, EE_OFF),
+ MESON_PIN(GPIOZ_2, EE_OFF),
+ MESON_PIN(GPIOZ_3, EE_OFF),
+ MESON_PIN(GPIOZ_4, EE_OFF),
+ MESON_PIN(GPIOZ_5, EE_OFF),
+ MESON_PIN(GPIOZ_6, EE_OFF),
+ MESON_PIN(GPIOZ_7, EE_OFF),
+ MESON_PIN(GPIOZ_8, EE_OFF),
+ MESON_PIN(GPIOZ_9, EE_OFF),
+ MESON_PIN(GPIOZ_10, EE_OFF),
+ MESON_PIN(GPIOZ_11, EE_OFF),
+ MESON_PIN(GPIOZ_12, EE_OFF),
+ MESON_PIN(GPIOZ_13, EE_OFF),
+ MESON_PIN(GPIOZ_14, EE_OFF),
+ MESON_PIN(GPIOZ_15, EE_OFF),
+
+ MESON_PIN(GPIOH_0, EE_OFF),
+ MESON_PIN(GPIOH_1, EE_OFF),
+ MESON_PIN(GPIOH_2, EE_OFF),
+ MESON_PIN(GPIOH_3, EE_OFF),
+
+ MESON_PIN(BOOT_0, EE_OFF),
+ MESON_PIN(BOOT_1, EE_OFF),
+ MESON_PIN(BOOT_2, EE_OFF),
+ MESON_PIN(BOOT_3, EE_OFF),
+ MESON_PIN(BOOT_4, EE_OFF),
+ MESON_PIN(BOOT_5, EE_OFF),
+ MESON_PIN(BOOT_6, EE_OFF),
+ MESON_PIN(BOOT_7, EE_OFF),
+ MESON_PIN(BOOT_8, EE_OFF),
+ MESON_PIN(BOOT_9, EE_OFF),
+ MESON_PIN(BOOT_10, EE_OFF),
+ MESON_PIN(BOOT_11, EE_OFF),
+ MESON_PIN(BOOT_12, EE_OFF),
+ MESON_PIN(BOOT_13, EE_OFF),
+ MESON_PIN(BOOT_14, EE_OFF),
+ MESON_PIN(BOOT_15, EE_OFF),
+ MESON_PIN(BOOT_16, EE_OFF),
+ MESON_PIN(BOOT_17, EE_OFF),
+
+ MESON_PIN(CARD_0, EE_OFF),
+ MESON_PIN(CARD_1, EE_OFF),
+ MESON_PIN(CARD_2, EE_OFF),
+ MESON_PIN(CARD_3, EE_OFF),
+ MESON_PIN(CARD_4, EE_OFF),
+ MESON_PIN(CARD_5, EE_OFF),
+ MESON_PIN(CARD_6, EE_OFF),
+
+ MESON_PIN(GPIODV_0, EE_OFF),
+ MESON_PIN(GPIODV_1, EE_OFF),
+ MESON_PIN(GPIODV_2, EE_OFF),
+ MESON_PIN(GPIODV_3, EE_OFF),
+ MESON_PIN(GPIODV_4, EE_OFF),
+ MESON_PIN(GPIODV_5, EE_OFF),
+ MESON_PIN(GPIODV_6, EE_OFF),
+ MESON_PIN(GPIODV_7, EE_OFF),
+ MESON_PIN(GPIODV_8, EE_OFF),
+ MESON_PIN(GPIODV_9, EE_OFF),
+ MESON_PIN(GPIODV_10, EE_OFF),
+ MESON_PIN(GPIODV_11, EE_OFF),
+ MESON_PIN(GPIODV_12, EE_OFF),
+ MESON_PIN(GPIODV_13, EE_OFF),
+ MESON_PIN(GPIODV_14, EE_OFF),
+ MESON_PIN(GPIODV_15, EE_OFF),
+ MESON_PIN(GPIODV_16, EE_OFF),
+ MESON_PIN(GPIODV_17, EE_OFF),
+ MESON_PIN(GPIODV_19, EE_OFF),
+ MESON_PIN(GPIODV_20, EE_OFF),
+ MESON_PIN(GPIODV_21, EE_OFF),
+ MESON_PIN(GPIODV_22, EE_OFF),
+ MESON_PIN(GPIODV_23, EE_OFF),
+ MESON_PIN(GPIODV_24, EE_OFF),
+ MESON_PIN(GPIODV_25, EE_OFF),
+ MESON_PIN(GPIODV_26, EE_OFF),
+ MESON_PIN(GPIODV_27, EE_OFF),
+ MESON_PIN(GPIODV_28, EE_OFF),
+ MESON_PIN(GPIODV_29, EE_OFF),
+
+ MESON_PIN(GPIOY_0, EE_OFF),
+ MESON_PIN(GPIOY_1, EE_OFF),
+ MESON_PIN(GPIOY_2, EE_OFF),
+ MESON_PIN(GPIOY_3, EE_OFF),
+ MESON_PIN(GPIOY_4, EE_OFF),
+ MESON_PIN(GPIOY_5, EE_OFF),
+ MESON_PIN(GPIOY_6, EE_OFF),
+ MESON_PIN(GPIOY_7, EE_OFF),
+ MESON_PIN(GPIOY_8, EE_OFF),
+ MESON_PIN(GPIOY_9, EE_OFF),
+ MESON_PIN(GPIOY_10, EE_OFF),
+ MESON_PIN(GPIOY_11, EE_OFF),
+ MESON_PIN(GPIOY_12, EE_OFF),
+ MESON_PIN(GPIOY_13, EE_OFF),
+ MESON_PIN(GPIOY_14, EE_OFF),
+ MESON_PIN(GPIOY_15, EE_OFF),
+ MESON_PIN(GPIOY_16, EE_OFF),
+
+ MESON_PIN(GPIOX_0, EE_OFF),
+ MESON_PIN(GPIOX_1, EE_OFF),
+ MESON_PIN(GPIOX_2, EE_OFF),
+ MESON_PIN(GPIOX_3, EE_OFF),
+ MESON_PIN(GPIOX_4, EE_OFF),
+ MESON_PIN(GPIOX_5, EE_OFF),
+ MESON_PIN(GPIOX_6, EE_OFF),
+ MESON_PIN(GPIOX_7, EE_OFF),
+ MESON_PIN(GPIOX_8, EE_OFF),
+ MESON_PIN(GPIOX_9, EE_OFF),
+ MESON_PIN(GPIOX_10, EE_OFF),
+ MESON_PIN(GPIOX_11, EE_OFF),
+ MESON_PIN(GPIOX_12, EE_OFF),
+ MESON_PIN(GPIOX_13, EE_OFF),
+ MESON_PIN(GPIOX_14, EE_OFF),
+ MESON_PIN(GPIOX_15, EE_OFF),
+ MESON_PIN(GPIOX_16, EE_OFF),
+ MESON_PIN(GPIOX_17, EE_OFF),
+ MESON_PIN(GPIOX_18, EE_OFF),
+ MESON_PIN(GPIOX_19, EE_OFF),
+ MESON_PIN(GPIOX_20, EE_OFF),
+ MESON_PIN(GPIOX_21, EE_OFF),
+ MESON_PIN(GPIOX_22, EE_OFF),
+
+ MESON_PIN(GPIOCLK_0, EE_OFF),
+ MESON_PIN(GPIOCLK_1, EE_OFF),
+ MESON_PIN(GPIOCLK_2, EE_OFF),
+ MESON_PIN(GPIOCLK_3, EE_OFF),
+
+ MESON_PIN(GPIO_TEST_N, EE_OFF),
+};
+
+static const struct pinctrl_pin_desc meson_gxbb_aobus_pins[] = {
+ MESON_PIN(GPIOAO_0, 0),
+ MESON_PIN(GPIOAO_1, 0),
+ MESON_PIN(GPIOAO_2, 0),
+ MESON_PIN(GPIOAO_3, 0),
+ MESON_PIN(GPIOAO_4, 0),
+ MESON_PIN(GPIOAO_5, 0),
+ MESON_PIN(GPIOAO_6, 0),
+ MESON_PIN(GPIOAO_7, 0),
+ MESON_PIN(GPIOAO_8, 0),
+ MESON_PIN(GPIOAO_9, 0),
+ MESON_PIN(GPIOAO_10, 0),
+ MESON_PIN(GPIOAO_11, 0),
+ MESON_PIN(GPIOAO_12, 0),
+ MESON_PIN(GPIOAO_13, 0),
+};
+
+static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
+static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
+static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
+static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
+
+static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
+ GPIO_GROUP(GPIOZ_0, EE_OFF),
+ GPIO_GROUP(GPIOZ_1, EE_OFF),
+ GPIO_GROUP(GPIOZ_2, EE_OFF),
+ GPIO_GROUP(GPIOZ_3, EE_OFF),
+ GPIO_GROUP(GPIOZ_4, EE_OFF),
+ GPIO_GROUP(GPIOZ_5, EE_OFF),
+ GPIO_GROUP(GPIOZ_6, EE_OFF),
+ GPIO_GROUP(GPIOZ_7, EE_OFF),
+ GPIO_GROUP(GPIOZ_8, EE_OFF),
+ GPIO_GROUP(GPIOZ_9, EE_OFF),
+ GPIO_GROUP(GPIOZ_10, EE_OFF),
+ GPIO_GROUP(GPIOZ_11, EE_OFF),
+ GPIO_GROUP(GPIOZ_12, EE_OFF),
+ GPIO_GROUP(GPIOZ_13, EE_OFF),
+ GPIO_GROUP(GPIOZ_14, EE_OFF),
+ GPIO_GROUP(GPIOZ_15, EE_OFF),
+
+ GPIO_GROUP(GPIOH_0, EE_OFF),
+ GPIO_GROUP(GPIOH_1, EE_OFF),
+ GPIO_GROUP(GPIOH_2, EE_OFF),
+ GPIO_GROUP(GPIOH_3, EE_OFF),
+
+ GPIO_GROUP(BOOT_0, EE_OFF),
+ GPIO_GROUP(BOOT_1, EE_OFF),
+ GPIO_GROUP(BOOT_2, EE_OFF),
+ GPIO_GROUP(BOOT_3, EE_OFF),
+ GPIO_GROUP(BOOT_4, EE_OFF),
+ GPIO_GROUP(BOOT_5, EE_OFF),
+ GPIO_GROUP(BOOT_6, EE_OFF),
+ GPIO_GROUP(BOOT_7, EE_OFF),
+ GPIO_GROUP(BOOT_8, EE_OFF),
+ GPIO_GROUP(BOOT_9, EE_OFF),
+ GPIO_GROUP(BOOT_10, EE_OFF),
+ GPIO_GROUP(BOOT_11, EE_OFF),
+ GPIO_GROUP(BOOT_12, EE_OFF),
+ GPIO_GROUP(BOOT_13, EE_OFF),
+ GPIO_GROUP(BOOT_14, EE_OFF),
+ GPIO_GROUP(BOOT_15, EE_OFF),
+ GPIO_GROUP(BOOT_16, EE_OFF),
+ GPIO_GROUP(BOOT_17, EE_OFF),
+
+ GPIO_GROUP(CARD_0, EE_OFF),
+ GPIO_GROUP(CARD_1, EE_OFF),
+ GPIO_GROUP(CARD_2, EE_OFF),
+ GPIO_GROUP(CARD_3, EE_OFF),
+ GPIO_GROUP(CARD_4, EE_OFF),
+ GPIO_GROUP(CARD_5, EE_OFF),
+ GPIO_GROUP(CARD_6, EE_OFF),
+
+ GPIO_GROUP(GPIODV_0, EE_OFF),
+ GPIO_GROUP(GPIODV_1, EE_OFF),
+ GPIO_GROUP(GPIODV_2, EE_OFF),
+ GPIO_GROUP(GPIODV_3, EE_OFF),
+ GPIO_GROUP(GPIODV_4, EE_OFF),
+ GPIO_GROUP(GPIODV_5, EE_OFF),
+ GPIO_GROUP(GPIODV_6, EE_OFF),
+ GPIO_GROUP(GPIODV_7, EE_OFF),
+ GPIO_GROUP(GPIODV_8, EE_OFF),
+ GPIO_GROUP(GPIODV_9, EE_OFF),
+ GPIO_GROUP(GPIODV_10, EE_OFF),
+ GPIO_GROUP(GPIODV_11, EE_OFF),
+ GPIO_GROUP(GPIODV_12, EE_OFF),
+ GPIO_GROUP(GPIODV_13, EE_OFF),
+ GPIO_GROUP(GPIODV_14, EE_OFF),
+ GPIO_GROUP(GPIODV_15, EE_OFF),
+ GPIO_GROUP(GPIODV_16, EE_OFF),
+ GPIO_GROUP(GPIODV_17, EE_OFF),
+ GPIO_GROUP(GPIODV_19, EE_OFF),
+ GPIO_GROUP(GPIODV_20, EE_OFF),
+ GPIO_GROUP(GPIODV_21, EE_OFF),
+ GPIO_GROUP(GPIODV_22, EE_OFF),
+ GPIO_GROUP(GPIODV_23, EE_OFF),
+ GPIO_GROUP(GPIODV_24, EE_OFF),
+ GPIO_GROUP(GPIODV_25, EE_OFF),
+ GPIO_GROUP(GPIODV_26, EE_OFF),
+ GPIO_GROUP(GPIODV_27, EE_OFF),
+ GPIO_GROUP(GPIODV_28, EE_OFF),
+ GPIO_GROUP(GPIODV_29, EE_OFF),
+
+ GPIO_GROUP(GPIOY_0, EE_OFF),
+ GPIO_GROUP(GPIOY_1, EE_OFF),
+ GPIO_GROUP(GPIOY_2, EE_OFF),
+ GPIO_GROUP(GPIOY_3, EE_OFF),
+ GPIO_GROUP(GPIOY_4, EE_OFF),
+ GPIO_GROUP(GPIOY_5, EE_OFF),
+ GPIO_GROUP(GPIOY_6, EE_OFF),
+ GPIO_GROUP(GPIOY_7, EE_OFF),
+ GPIO_GROUP(GPIOY_8, EE_OFF),
+ GPIO_GROUP(GPIOY_9, EE_OFF),
+ GPIO_GROUP(GPIOY_10, EE_OFF),
+ GPIO_GROUP(GPIOY_11, EE_OFF),
+ GPIO_GROUP(GPIOY_12, EE_OFF),
+ GPIO_GROUP(GPIOY_13, EE_OFF),
+ GPIO_GROUP(GPIOY_14, EE_OFF),
+ GPIO_GROUP(GPIOY_15, EE_OFF),
+ GPIO_GROUP(GPIOY_16, EE_OFF),
+
+ GPIO_GROUP(GPIOX_0, EE_OFF),
+ GPIO_GROUP(GPIOX_1, EE_OFF),
+ GPIO_GROUP(GPIOX_2, EE_OFF),
+ GPIO_GROUP(GPIOX_3, EE_OFF),
+ GPIO_GROUP(GPIOX_4, EE_OFF),
+ GPIO_GROUP(GPIOX_5, EE_OFF),
+ GPIO_GROUP(GPIOX_6, EE_OFF),
+ GPIO_GROUP(GPIOX_7, EE_OFF),
+ GPIO_GROUP(GPIOX_8, EE_OFF),
+ GPIO_GROUP(GPIOX_9, EE_OFF),
+ GPIO_GROUP(GPIOX_10, EE_OFF),
+ GPIO_GROUP(GPIOX_11, EE_OFF),
+ GPIO_GROUP(GPIOX_12, EE_OFF),
+ GPIO_GROUP(GPIOX_13, EE_OFF),
+ GPIO_GROUP(GPIOX_14, EE_OFF),
+ GPIO_GROUP(GPIOX_15, EE_OFF),
+ GPIO_GROUP(GPIOX_16, EE_OFF),
+ GPIO_GROUP(GPIOX_17, EE_OFF),
+ GPIO_GROUP(GPIOX_18, EE_OFF),
+ GPIO_GROUP(GPIOX_19, EE_OFF),
+ GPIO_GROUP(GPIOX_20, EE_OFF),
+ GPIO_GROUP(GPIOX_21, EE_OFF),
+ GPIO_GROUP(GPIOX_22, EE_OFF),
+
+ GPIO_GROUP(GPIOCLK_0, EE_OFF),
+ GPIO_GROUP(GPIOCLK_1, EE_OFF),
+ GPIO_GROUP(GPIOCLK_2, EE_OFF),
+ GPIO_GROUP(GPIOCLK_3, EE_OFF),
+
+ GPIO_GROUP(GPIO_TEST_N, EE_OFF),
+};
+
+static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
+ GPIO_GROUP(GPIOAO_0, 0),
+ GPIO_GROUP(GPIOAO_1, 0),
+ GPIO_GROUP(GPIOAO_2, 0),
+ GPIO_GROUP(GPIOAO_3, 0),
+ GPIO_GROUP(GPIOAO_4, 0),
+ GPIO_GROUP(GPIOAO_5, 0),
+ GPIO_GROUP(GPIOAO_6, 0),
+ GPIO_GROUP(GPIOAO_7, 0),
+ GPIO_GROUP(GPIOAO_8, 0),
+ GPIO_GROUP(GPIOAO_9, 0),
+ GPIO_GROUP(GPIOAO_10, 0),
+ GPIO_GROUP(GPIOAO_11, 0),
+ GPIO_GROUP(GPIOAO_12, 0),
+ GPIO_GROUP(GPIOAO_13, 0),
+
+ /* bank AO */
+ GROUP(uart_tx_ao_a, 0, 12),
+ GROUP(uart_rx_ao_a, 0, 11),
+ GROUP(uart_cts_ao_a, 0, 10),
+ GROUP(uart_rts_ao_a, 0, 9),
+};
+
+static const char * const gpio_periphs_groups[] = {
+ "GPIOZ_0", "GPIOZ_1", "GPIOZ_2", "GPIOZ_3", "GPIOZ_4",
+ "GPIOZ_5", "GPIOZ_6", "GPIOZ_7", "GPIOZ_8", "GPIOZ_9",
+ "GPIOZ_10", "GPIOZ_11", "GPIOZ_12", "GPIOZ_13", "GPIOZ_14",
+ "GPIOZ_15",
+
+ "GPIOH_0", "GPIOH_1", "GPIOH_2", "GPIOH_3",
+
+ "BOOT_0", "BOOT_1", "BOOT_2", "BOOT_3", "BOOT_4",
+ "BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
+ "BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
+ "BOOT_15", "BOOT_16", "BOOT_17",
+
+ "CARD_0", "CARD_1", "CARD_2", "CARD_3", "CARD_4",
+ "CARD_5", "CARD_6",
+
+ "GPIODV_0", "GPIODV_1", "GPIODV_2", "GPIODV_3", "GPIODV_4",
+ "GPIODV_5", "GPIODV_6", "GPIODV_7", "GPIODV_8", "GPIODV_9",
+ "GPIODV_10", "GPIODV_11", "GPIODV_12", "GPIODV_13", "GPIODV_14",
+ "GPIODV_15", "GPIODV_16", "GPIODV_17", "GPIODV_18", "GPIODV_19",
+ "GPIODV_20", "GPIODV_21", "GPIODV_22", "GPIODV_23", "GPIODV_24",
+ "GPIODV_25", "GPIODV_26", "GPIODV_27", "GPIODV_28", "GPIODV_29",
+
+ "GPIOY_0", "GPIOY_1", "GPIOY_2", "GPIOY_3", "GPIOY_4",
+ "GPIOY_5", "GPIOY_6", "GPIOY_7", "GPIOY_8", "GPIOY_9",
+ "GPIOY_10", "GPIOY_11", "GPIOY_12", "GPIOY_13", "GPIOY_14",
+ "GPIOY_15", "GPIOY_16",
+
+ "GPIOX_0", "GPIOX_1", "GPIOX_2", "GPIOX_3", "GPIOX_4",
+ "GPIOX_5", "GPIOX_6", "GPIOX_7", "GPIOX_8", "GPIOX_9",
+ "GPIOX_10", "GPIOX_11", "GPIOX_12", "GPIOX_13", "GPIOX_14",
+ "GPIOX_15", "GPIOX_16", "GPIOX_17", "GPIOX_18", "GPIOX_19",
+ "GPIOX_20", "GPIOX_21", "GPIOX_22",
+
+ "GPIO_TEST_N",
+};
+
+static const char * const gpio_aobus_groups[] = {
+ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3", "GPIOAO_4",
+ "GPIOAO_5", "GPIOAO_6", "GPIOAO_7", "GPIOAO_8", "GPIOAO_9",
+ "GPIOAO_10", "GPIOAO_11", "GPIOAO_12", "GPIOAO_13",
+};
+
+static const char * const uart_ao_groups[] = {
+ "uart_tx_ao_a", "uart_rx_ao_a", "uart_cts_ao_a", "uart_rts_ao_a"
+};
+
+static struct meson_pmx_func meson_gxbb_periphs_functions[] = {
+ FUNCTION(gpio_periphs),
+};
+
+static struct meson_pmx_func meson_gxbb_aobus_functions[] = {
+ FUNCTION(gpio_aobus),
+ FUNCTION(uart_ao),
+};
+
+static struct meson_bank meson_gxbb_periphs_banks[] = {
+ /* name first last pullen pull dir out in */
+ BANK("X", PIN(GPIOX_0, EE_OFF), PIN(GPIOX_22, EE_OFF), 4, 0, 4, 0, 12, 0, 13, 0, 14, 0),
+ BANK("Y", PIN(GPIOY_0, EE_OFF), PIN(GPIOY_16, EE_OFF), 1, 0, 1, 0, 3, 0, 4, 0, 5, 0),
+ BANK("DV", PIN(GPIODV_0, EE_OFF), PIN(GPIODV_29, EE_OFF), 0, 0, 0, 0, 0, 0, 1, 0, 2, 0),
+ BANK("H", PIN(GPIOH_0, EE_OFF), PIN(GPIOH_3, EE_OFF), 1, 20, 1, 20, 3, 20, 4, 20, 5, 20),
+ BANK("Z", PIN(GPIOZ_0, EE_OFF), PIN(GPIOZ_15, EE_OFF), 3, 0, 3, 0, 9, 0, 10, 0, 11, 0),
+ BANK("CARD", PIN(CARD_0, EE_OFF), PIN(CARD_6, EE_OFF), 2, 20, 2, 20, 6, 20, 7, 20, 8, 20),
+ BANK("BOOT", PIN(BOOT_0, EE_OFF), PIN(BOOT_17, EE_OFF), 2, 0, 2, 0, 6, 0, 7, 0, 8, 0),
+ BANK("CLK", PIN(GPIOCLK_0, EE_OFF), PIN(GPIOCLK_3, EE_OFF), 3, 28, 3, 28, 9, 28, 10, 28, 11, 28),
+};
+
+static struct meson_bank meson_gxbb_aobus_banks[] = {
+ /* name first last pullen pull dir out in */
+ BANK("AO", PIN(GPIOAO_0, 0), PIN(GPIOAO_13, 0), 0, 0, 0, 16, 0, 0, 0, 16, 1, 0),
+};
+
+static struct meson_domain_data meson_gxbb_periphs_domain_data = {
+ .name = "periphs-banks",
+ .banks = meson_gxbb_periphs_banks,
+ .num_banks = ARRAY_SIZE(meson_gxbb_periphs_banks),
+ .pin_base = 14,
+ .num_pins = 120,
+};
+
+static struct meson_domain_data meson_gxbb_aobus_domain_data = {
+ .name = "aobus-banks",
+ .banks = meson_gxbb_aobus_banks,
+ .num_banks = ARRAY_SIZE(meson_gxbb_aobus_banks),
+ .pin_base = 0,
+ .num_pins = 14,
+};
+
+struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
+ .pins = meson_gxbb_periphs_pins,
+ .groups = meson_gxbb_periphs_groups,
+ .funcs = meson_gxbb_periphs_functions,
+ .domain_data = &meson_gxbb_periphs_domain_data,
+ .num_pins = ARRAY_SIZE(meson_gxbb_periphs_pins),
+ .num_groups = ARRAY_SIZE(meson_gxbb_periphs_groups),
+ .num_funcs = ARRAY_SIZE(meson_gxbb_periphs_functions),
+};
+
+struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data = {
+ .pins = meson_gxbb_aobus_pins,
+ .groups = meson_gxbb_aobus_groups,
+ .funcs = meson_gxbb_aobus_functions,
+ .domain_data = &meson_gxbb_aobus_domain_data,
+ .num_pins = ARRAY_SIZE(meson_gxbb_aobus_pins),
+ .num_groups = ARRAY_SIZE(meson_gxbb_aobus_groups),
+ .num_funcs = ARRAY_SIZE(meson_gxbb_aobus_functions),
+};
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 0bdb8fd3a..11623c6b0 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -171,7 +171,7 @@ static const struct pinctrl_ops meson_pctrl_ops = {
.get_group_name = meson_get_group_name,
.get_group_pins = meson_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
.pin_dbg_show = meson_pin_dbg_show,
};
@@ -549,6 +549,14 @@ static const struct of_device_id meson_pinctrl_dt_match[] = {
.compatible = "amlogic,meson8b-aobus-pinctrl",
.data = &meson8b_aobus_pinctrl_data,
},
+ {
+ .compatible = "amlogic,meson-gxbb-periphs-pinctrl",
+ .data = &meson_gxbb_periphs_pinctrl_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-aobus-pinctrl",
+ .data = &meson_gxbb_aobus_pinctrl_data,
+ },
{ },
};
@@ -713,7 +721,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev)
pc->desc.pins = pc->data->pins;
pc->desc.npins = pc->data->num_pins;
- pc->pcdev = pinctrl_register(&pc->desc, pc->dev, pc);
+ pc->pcdev = devm_pinctrl_register(pc->dev, &pc->desc, pc);
if (IS_ERR(pc->pcdev)) {
dev_err(pc->dev, "can't register pinctrl device");
return PTR_ERR(pc->pcdev);
diff --git a/drivers/pinctrl/meson/pinctrl-meson.h b/drivers/pinctrl/meson/pinctrl-meson.h
index 9c93e0d49..d89442ea4 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.h
+++ b/drivers/pinctrl/meson/pinctrl-meson.h
@@ -199,3 +199,5 @@ extern struct meson_pinctrl_data meson8_cbus_pinctrl_data;
extern struct meson_pinctrl_data meson8_aobus_pinctrl_data;
extern struct meson_pinctrl_data meson8b_cbus_pinctrl_data;
extern struct meson_pinctrl_data meson8b_aobus_pinctrl_data;
+extern struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data;
+extern struct meson_pinctrl_data meson_gxbb_aobus_pinctrl_data;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index a100bcf4b..874f2edf8 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -564,7 +564,7 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
GROUP(eth_rx_clk, 6, 3),
GROUP(eth_txd0_1, 6, 4),
GROUP(eth_txd1_1, 6, 5),
- GROUP(eth_tx_en, 6, 0),
+ GROUP(eth_tx_en, 6, 6),
GROUP(eth_ref_clk, 6, 8),
GROUP(eth_mdc, 6, 9),
GROUP(eth_mdio_en, 6, 10),
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index 73dc1bc5f..9cc1cc3f5 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -417,18 +417,12 @@ static int armada_370_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int armada_370_pinctrl_remove(struct platform_device *pdev)
-{
- return mvebu_pinctrl_remove(pdev);
-}
-
static struct platform_driver armada_370_pinctrl_driver = {
.driver = {
.name = "armada-370-pinctrl",
.of_match_table = armada_370_pinctrl_of_match,
},
.probe = armada_370_pinctrl_probe,
- .remove = armada_370_pinctrl_remove,
};
module_platform_driver(armada_370_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
index 54e9fbd01..070651431 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-375.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -435,18 +435,12 @@ static int armada_375_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int armada_375_pinctrl_remove(struct platform_device *pdev)
-{
- return mvebu_pinctrl_remove(pdev);
-}
-
static struct platform_driver armada_375_pinctrl_driver = {
.driver = {
.name = "armada-375-pinctrl",
.of_match_table = of_match_ptr(armada_375_pinctrl_of_match),
},
.probe = armada_375_pinctrl_probe,
- .remove = armada_375_pinctrl_remove,
};
module_platform_driver(armada_375_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
index 6ec82c62d..4e84c8e49 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -446,18 +446,12 @@ static int armada_38x_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int armada_38x_pinctrl_remove(struct platform_device *pdev)
-{
- return mvebu_pinctrl_remove(pdev);
-}
-
static struct platform_driver armada_38x_pinctrl_driver = {
.driver = {
.name = "armada-38x-pinctrl",
.of_match_table = of_match_ptr(armada_38x_pinctrl_of_match),
},
.probe = armada_38x_pinctrl_probe,
- .remove = armada_38x_pinctrl_remove,
};
module_platform_driver(armada_38x_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
index fcfe9b478..e288f8ba0 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
@@ -428,18 +428,12 @@ static int armada_39x_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int armada_39x_pinctrl_remove(struct platform_device *pdev)
-{
- return mvebu_pinctrl_remove(pdev);
-}
-
static struct platform_driver armada_39x_pinctrl_driver = {
.driver = {
.name = "armada-39x-pinctrl",
.of_match_table = of_match_ptr(armada_39x_pinctrl_of_match),
},
.probe = armada_39x_pinctrl_probe,
- .remove = armada_39x_pinctrl_remove,
};
module_platform_driver(armada_39x_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
index bf70e0953..e4ea71a9d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
@@ -502,18 +502,12 @@ static int armada_xp_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int armada_xp_pinctrl_remove(struct platform_device *pdev)
-{
- return mvebu_pinctrl_remove(pdev);
-}
-
static struct platform_driver armada_xp_pinctrl_driver = {
.driver = {
.name = "armada-xp-pinctrl",
.of_match_table = armada_xp_pinctrl_of_match,
},
.probe = armada_xp_pinctrl_probe,
- .remove = armada_xp_pinctrl_remove,
.suspend = armada_xp_pinctrl_suspend,
.resume = armada_xp_pinctrl_resume,
};
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 95bfd0653..f93ae0dce 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -840,12 +840,9 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
static int dove_pinctrl_remove(struct platform_device *pdev)
{
- int ret;
-
- ret = mvebu_pinctrl_remove(pdev);
if (!IS_ERR(clk))
clk_disable_unprepare(clk);
- return ret;
+ return 0;
}
static struct platform_driver dove_pinctrl_driver = {
diff --git a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
index 0f07dc554..a78e9a499 100644
--- a/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
+++ b/drivers/pinctrl/mvebu/pinctrl-kirkwood.c
@@ -481,18 +481,12 @@ static int kirkwood_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int kirkwood_pinctrl_remove(struct platform_device *pdev)
-{
- return mvebu_pinctrl_remove(pdev);
-}
-
static struct platform_driver kirkwood_pinctrl_driver = {
.driver = {
.name = "kirkwood-pinctrl",
.of_match_table = kirkwood_pinctrl_of_match,
},
.probe = kirkwood_pinctrl_probe,
- .remove = kirkwood_pinctrl_remove,
};
module_platform_driver(kirkwood_pinctrl_driver);
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index 3ef798fac..b6ec6db78 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -711,7 +711,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
return ret;
}
- pctl->pctldev = pinctrl_register(&pctl->desc, &pdev->dev, pctl);
+ pctl->pctldev = devm_pinctrl_register(&pdev->dev, &pctl->desc, pctl);
if (IS_ERR(pctl->pctldev)) {
dev_err(&pdev->dev, "unable to register pinctrl driver\n");
return PTR_ERR(pctl->pctldev);
@@ -725,10 +725,3 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-
-int mvebu_pinctrl_remove(struct platform_device *pdev)
-{
- struct mvebu_pinctrl *pctl = platform_get_drvdata(pdev);
- pinctrl_unregister(pctl->pctldev);
- return 0;
-}
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.h b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
index 65a98e6f7..b75a5f4ad 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.h
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
@@ -202,6 +202,5 @@ static inline int default_mpp_ctrl_set(void __iomem *base, unsigned int pid,
}
int mvebu_pinctrl_probe(struct platform_device *pdev);
-int mvebu_pinctrl_remove(struct platform_device *pdev);
#endif
diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c
index 3b7122d82..345c3df66 100644
--- a/drivers/pinctrl/mvebu/pinctrl-orion.c
+++ b/drivers/pinctrl/mvebu/pinctrl-orion.c
@@ -239,18 +239,12 @@ static int orion_pinctrl_probe(struct platform_device *pdev)
return mvebu_pinctrl_probe(pdev);
}
-static int orion_pinctrl_remove(struct platform_device *pdev)
-{
- return mvebu_pinctrl_remove(pdev);
-}
-
static struct platform_driver orion_pinctrl_driver = {
.driver = {
.name = "orion-pinctrl",
.of_match_table = of_match_ptr(orion_pinctrl_of_match),
},
.probe = orion_pinctrl_probe,
- .remove = orion_pinctrl_remove,
};
module_platform_driver(orion_pinctrl_driver);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 1f7469c98..7d343c22c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -937,7 +937,7 @@ static int abx500_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = abx500_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
- pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
return ret;
}
}
@@ -951,7 +951,7 @@ static const struct pinctrl_ops abx500_pinctrl_ops = {
.get_group_pins = abx500_get_group_pins,
.pin_dbg_show = abx500_pin_dbg_show,
.dt_node_to_map = abx500_dt_node_to_map,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int abx500_pin_config_get(struct pinctrl_dev *pctldev,
@@ -1212,7 +1212,8 @@ static int abx500_gpio_probe(struct platform_device *pdev)
abx500_pinctrl_desc.pins = pct->soc->pins;
abx500_pinctrl_desc.npins = pct->soc->npins;
- pct->pctldev = pinctrl_register(&abx500_pinctrl_desc, &pdev->dev, pct);
+ pct->pctldev = devm_pinctrl_register(&pdev->dev, &abx500_pinctrl_desc,
+ pct);
if (IS_ERR(pct->pctldev)) {
dev_err(&pdev->dev,
"could not register abx500 pinctrl driver\n");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index c8969dd49..38faceff2 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/bitops.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
@@ -292,15 +293,14 @@ static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
unsigned offset, int gpio_mode)
{
- u32 bit = 1 << offset;
u32 afunc, bfunc;
- afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~bit;
- bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~bit;
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & ~BIT(offset);
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & ~BIT(offset);
if (gpio_mode & NMK_GPIO_ALT_A)
- afunc |= bit;
+ afunc |= BIT(offset);
if (gpio_mode & NMK_GPIO_ALT_B)
- bfunc |= bit;
+ bfunc |= BIT(offset);
writel(afunc, nmk_chip->addr + NMK_GPIO_AFSLA);
writel(bfunc, nmk_chip->addr + NMK_GPIO_AFSLB);
}
@@ -308,55 +308,52 @@ static void __nmk_gpio_set_mode(struct nmk_gpio_chip *nmk_chip,
static void __nmk_gpio_set_slpm(struct nmk_gpio_chip *nmk_chip,
unsigned offset, enum nmk_gpio_slpm mode)
{
- u32 bit = 1 << offset;
u32 slpm;
slpm = readl(nmk_chip->addr + NMK_GPIO_SLPC);
if (mode == NMK_GPIO_SLPM_NOCHANGE)
- slpm |= bit;
+ slpm |= BIT(offset);
else
- slpm &= ~bit;
+ slpm &= ~BIT(offset);
writel(slpm, nmk_chip->addr + NMK_GPIO_SLPC);
}
static void __nmk_gpio_set_pull(struct nmk_gpio_chip *nmk_chip,
unsigned offset, enum nmk_gpio_pull pull)
{
- u32 bit = 1 << offset;
u32 pdis;
pdis = readl(nmk_chip->addr + NMK_GPIO_PDIS);
if (pull == NMK_GPIO_PULL_NONE) {
- pdis |= bit;
- nmk_chip->pull_up &= ~bit;
+ pdis |= BIT(offset);
+ nmk_chip->pull_up &= ~BIT(offset);
} else {
- pdis &= ~bit;
+ pdis &= ~BIT(offset);
}
writel(pdis, nmk_chip->addr + NMK_GPIO_PDIS);
if (pull == NMK_GPIO_PULL_UP) {
- nmk_chip->pull_up |= bit;
- writel(bit, nmk_chip->addr + NMK_GPIO_DATS);
+ nmk_chip->pull_up |= BIT(offset);
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATS);
} else if (pull == NMK_GPIO_PULL_DOWN) {
- nmk_chip->pull_up &= ~bit;
- writel(bit, nmk_chip->addr + NMK_GPIO_DATC);
+ nmk_chip->pull_up &= ~BIT(offset);
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATC);
}
}
static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
unsigned offset, bool lowemi)
{
- u32 bit = BIT(offset);
- bool enabled = nmk_chip->lowemi & bit;
+ bool enabled = nmk_chip->lowemi & BIT(offset);
if (lowemi == enabled)
return;
if (lowemi)
- nmk_chip->lowemi |= bit;
+ nmk_chip->lowemi |= BIT(offset);
else
- nmk_chip->lowemi &= ~bit;
+ nmk_chip->lowemi &= ~BIT(offset);
writel_relaxed(nmk_chip->lowemi,
nmk_chip->addr + NMK_GPIO_LOWEMI);
@@ -365,22 +362,22 @@ static void __nmk_gpio_set_lowemi(struct nmk_gpio_chip *nmk_chip,
static void __nmk_gpio_make_input(struct nmk_gpio_chip *nmk_chip,
unsigned offset)
{
- writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
}
static void __nmk_gpio_set_output(struct nmk_gpio_chip *nmk_chip,
unsigned offset, int val)
{
if (val)
- writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATS);
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATS);
else
- writel(1 << offset, nmk_chip->addr + NMK_GPIO_DATC);
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DATC);
}
static void __nmk_gpio_make_output(struct nmk_gpio_chip *nmk_chip,
unsigned offset, int val)
{
- writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRS);
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRS);
__nmk_gpio_set_output(nmk_chip, offset, val);
}
@@ -614,34 +611,7 @@ static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev,
return NMK_GPIO_ALT_C;
}
-int nmk_gpio_get_mode(int gpio)
-{
- struct nmk_gpio_chip *nmk_chip;
- u32 afunc, bfunc, bit;
-
- nmk_chip = nmk_gpio_chips[gpio / NMK_GPIO_PER_CHIP];
- if (!nmk_chip)
- return -EINVAL;
-
- bit = 1 << (gpio % NMK_GPIO_PER_CHIP);
-
- clk_enable(nmk_chip->clk);
-
- afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & bit;
- bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & bit;
-
- clk_disable(nmk_chip->clk);
-
- return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
-}
-EXPORT_SYMBOL(nmk_gpio_get_mode);
-
-
/* IRQ functions */
-static inline int nmk_gpio_get_bitmask(int gpio)
-{
- return 1 << (gpio % NMK_GPIO_PER_CHIP);
-}
static void nmk_gpio_irq_ack(struct irq_data *d)
{
@@ -649,7 +619,7 @@ static void nmk_gpio_irq_ack(struct irq_data *d)
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
clk_enable(nmk_chip->clk);
- writel(nmk_gpio_get_bitmask(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
+ writel(BIT(d->hwirq), nmk_chip->addr + NMK_GPIO_IC);
clk_disable(nmk_chip->clk);
}
@@ -659,10 +629,9 @@ enum nmk_gpio_irq_type {
};
static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
- int gpio, enum nmk_gpio_irq_type which,
+ int offset, enum nmk_gpio_irq_type which,
bool enable)
{
- u32 bitmask = nmk_gpio_get_bitmask(gpio);
u32 *rimscval;
u32 *fimscval;
u32 rimscreg;
@@ -681,24 +650,24 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
}
/* we must individually set/clear the two edges */
- if (nmk_chip->edge_rising & bitmask) {
+ if (nmk_chip->edge_rising & BIT(offset)) {
if (enable)
- *rimscval |= bitmask;
+ *rimscval |= BIT(offset);
else
- *rimscval &= ~bitmask;
+ *rimscval &= ~BIT(offset);
writel(*rimscval, nmk_chip->addr + rimscreg);
}
- if (nmk_chip->edge_falling & bitmask) {
+ if (nmk_chip->edge_falling & BIT(offset)) {
if (enable)
- *fimscval |= bitmask;
+ *fimscval |= BIT(offset);
else
- *fimscval &= ~bitmask;
+ *fimscval &= ~BIT(offset);
writel(*fimscval, nmk_chip->addr + fimscreg);
}
}
static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
- int gpio, bool on)
+ int offset, bool on)
{
/*
* Ensure WAKEUP_ENABLE is on. No need to disable it if wakeup is
@@ -706,21 +675,19 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
* wakeup is anyhow controlled by the RIMSC and FIMSC registers.
*/
if (nmk_chip->sleepmode && on) {
- __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
+ __nmk_gpio_set_slpm(nmk_chip, offset,
NMK_GPIO_SLPM_WAKEUP_ENABLE);
}
- __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
+ __nmk_gpio_irq_modify(nmk_chip, offset, WAKE, on);
}
static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
{
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- u32 bitmask;
nmk_chip = irq_data_get_irq_chip_data(d);
- bitmask = nmk_gpio_get_bitmask(d->hwirq);
if (!nmk_chip)
return -EINVAL;
@@ -730,7 +697,7 @@ static int nmk_gpio_irq_maskunmask(struct irq_data *d, bool enable)
__nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, enable);
- if (!(nmk_chip->real_wake & bitmask))
+ if (!(nmk_chip->real_wake & BIT(d->hwirq)))
__nmk_gpio_set_wake(nmk_chip, d->hwirq, enable);
spin_unlock(&nmk_chip->lock);
@@ -754,12 +721,10 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- u32 bitmask;
nmk_chip = irq_data_get_irq_chip_data(d);
if (!nmk_chip)
return -EINVAL;
- bitmask = nmk_gpio_get_bitmask(d->hwirq);
clk_enable(nmk_chip->clk);
spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
@@ -769,9 +734,9 @@ static int nmk_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
__nmk_gpio_set_wake(nmk_chip, d->hwirq, on);
if (on)
- nmk_chip->real_wake |= bitmask;
+ nmk_chip->real_wake |= BIT(d->hwirq);
else
- nmk_chip->real_wake &= ~bitmask;
+ nmk_chip->real_wake &= ~BIT(d->hwirq);
spin_unlock(&nmk_chip->lock);
spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
@@ -786,10 +751,8 @@ static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
bool wake = irqd_is_wakeup_set(d);
struct nmk_gpio_chip *nmk_chip;
unsigned long flags;
- u32 bitmask;
nmk_chip = irq_data_get_irq_chip_data(d);
- bitmask = nmk_gpio_get_bitmask(d->hwirq);
if (!nmk_chip)
return -EINVAL;
if (type & IRQ_TYPE_LEVEL_HIGH)
@@ -806,13 +769,13 @@ static int nmk_gpio_irq_set_type(struct irq_data *d, unsigned int type)
if (enabled || wake)
__nmk_gpio_irq_modify(nmk_chip, d->hwirq, WAKE, false);
- nmk_chip->edge_rising &= ~bitmask;
+ nmk_chip->edge_rising &= ~BIT(d->hwirq);
if (type & IRQ_TYPE_EDGE_RISING)
- nmk_chip->edge_rising |= bitmask;
+ nmk_chip->edge_rising |= BIT(d->hwirq);
- nmk_chip->edge_falling &= ~bitmask;
+ nmk_chip->edge_falling &= ~BIT(d->hwirq);
if (type & IRQ_TYPE_EDGE_FALLING)
- nmk_chip->edge_falling |= bitmask;
+ nmk_chip->edge_falling |= BIT(d->hwirq);
if (enabled)
__nmk_gpio_irq_modify(nmk_chip, d->hwirq, NORMAL, true);
@@ -884,13 +847,27 @@ static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
/* I/O Functions */
+static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset)
+{
+ struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+ int dir;
+
+ clk_enable(nmk_chip->clk);
+
+ dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+
+ clk_disable(nmk_chip->clk);
+
+ return dir;
+}
+
static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
clk_enable(nmk_chip->clk);
- writel(1 << offset, nmk_chip->addr + NMK_GPIO_DIRC);
+ writel(BIT(offset), nmk_chip->addr + NMK_GPIO_DIRC);
clk_disable(nmk_chip->clk);
@@ -900,12 +877,11 @@ static int nmk_gpio_make_input(struct gpio_chip *chip, unsigned offset)
static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned offset)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
- u32 bit = 1 << offset;
int value;
clk_enable(nmk_chip->clk);
- value = (readl(nmk_chip->addr + NMK_GPIO_DAT) & bit) != 0;
+ value = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
clk_disable(nmk_chip->clk);
@@ -939,6 +915,19 @@ static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned offset,
}
#ifdef CONFIG_DEBUG_FS
+static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
+{
+ u32 afunc, bfunc;
+
+ clk_enable(nmk_chip->clk);
+
+ afunc = readl(nmk_chip->addr + NMK_GPIO_AFSLA) & BIT(offset);
+ bfunc = readl(nmk_chip->addr + NMK_GPIO_AFSLB) & BIT(offset);
+
+ clk_disable(nmk_chip->clk);
+
+ return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
+}
#include <linux/seq_file.h>
@@ -952,7 +941,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
bool is_out;
bool data_out;
bool pull;
- u32 bit = 1 << offset;
const char *modes[] = {
[NMK_GPIO_ALT_GPIO] = "gpio",
[NMK_GPIO_ALT_A] = "altA",
@@ -970,10 +958,10 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
};
clk_enable(nmk_chip->clk);
- is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & bit);
- pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & bit);
- data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & bit);
- mode = nmk_gpio_get_mode(gpio);
+ is_out = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset));
+ pull = !(readl(nmk_chip->addr + NMK_GPIO_PDIS) & BIT(offset));
+ data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
+ mode = nmk_gpio_get_mode(nmk_chip, offset);
if ((mode == NMK_GPIO_ALT_C) && pctldev)
mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
@@ -1007,11 +995,10 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
*/
if (irq > 0 && desc && desc->action) {
char *trigger;
- u32 bitmask = nmk_gpio_get_bitmask(gpio);
- if (nmk_chip->edge_rising & bitmask)
+ if (nmk_chip->edge_rising & BIT(offset))
trigger = "edge-rising";
- else if (nmk_chip->edge_falling & bitmask)
+ else if (nmk_chip->edge_falling & BIT(offset))
trigger = "edge-falling";
else
trigger = "edge-undefined";
@@ -1246,6 +1233,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
chip = &nmk_chip->chip;
chip->request = gpiochip_generic_request;
chip->free = gpiochip_generic_free;
+ chip->get_direction = nmk_gpio_get_dir;
chip->direction_input = nmk_gpio_make_input;
chip->get = nmk_gpio_get_input;
chip->direction_output = nmk_gpio_make_output;
@@ -1612,7 +1600,7 @@ static int nmk_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
- pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
return ret;
}
}
@@ -1626,7 +1614,7 @@ static const struct pinctrl_ops nmk_pinctrl_ops = {
.get_group_pins = nmk_get_group_pins,
.pin_dbg_show = nmk_pin_dbg_show,
.dt_node_to_map = nmk_pinctrl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int nmk_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
@@ -2044,7 +2032,7 @@ static int nmk_pinctrl_probe(struct platform_device *pdev)
nmk_pinctrl_desc.npins = npct->soc->npins;
npct->dev = &pdev->dev;
- npct->pctl = pinctrl_register(&nmk_pinctrl_desc, &pdev->dev, npct);
+ npct->pctl = devm_pinctrl_register(&pdev->dev, &nmk_pinctrl_desc, npct);
if (IS_ERR(npct->pctl)) {
dev_err(&pdev->dev, "could not register Nomadik pinctrl driver\n");
return PTR_ERR(npct->pctl);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 79e615971..d5bf9fae2 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -386,7 +386,7 @@ int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
return 0;
exit:
- pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
return ret;
}
EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index ecb57635a..54569a7ea 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -1058,7 +1058,8 @@ static int adi_pinctrl_probe(struct platform_device *pdev)
adi_pinmux_desc.npins = pinctrl->soc->npins;
/* Now register the pin controller and all pins it handles */
- pinctrl->pctl = pinctrl_register(&adi_pinmux_desc, &pdev->dev, pinctrl);
+ pinctrl->pctl = devm_pinctrl_register(&pdev->dev, &adi_pinmux_desc,
+ pinctrl);
if (IS_ERR(pinctrl->pctl)) {
dev_err(&pdev->dev, "could not register pinctrl ADI2 driver\n");
return PTR_ERR(pinctrl->pctl);
@@ -1069,18 +1070,8 @@ static int adi_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int adi_pinctrl_remove(struct platform_device *pdev)
-{
- struct adi_pinctrl *pinctrl = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pinctrl->pctl);
-
- return 0;
-}
-
static struct platform_driver adi_pinctrl_driver = {
.probe = adi_pinctrl_probe,
- .remove = adi_pinctrl_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 5c025f5b5..634b4d30e 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -580,7 +580,7 @@ static const struct pinctrl_ops amd_pinctrl_ops = {
.get_group_pins = amd_get_group_pins,
#ifdef CONFIG_OF
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
#endif
};
@@ -783,8 +783,8 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->ngroups = ARRAY_SIZE(kerncz_groups);
amd_pinctrl_desc.name = dev_name(&pdev->dev);
- gpio_dev->pctrl = pinctrl_register(&amd_pinctrl_desc,
- &pdev->dev, gpio_dev);
+ gpio_dev->pctrl = devm_pinctrl_register(&pdev->dev, &amd_pinctrl_desc,
+ gpio_dev);
if (IS_ERR(gpio_dev->pctrl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(gpio_dev->pctrl);
@@ -792,7 +792,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add_data(&gpio_dev->gc, gpio_dev);
if (ret)
- goto out1;
+ return ret;
ret = gpiochip_add_pin_range(&gpio_dev->gc, dev_name(&pdev->dev),
0, 0, TOTAL_NUMBER_OF_PINS);
@@ -825,8 +825,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
out2:
gpiochip_remove(&gpio_dev->gc);
-out1:
- pinctrl_unregister(gpio_dev->pctrl);
return ret;
}
@@ -837,13 +835,13 @@ static int amd_gpio_remove(struct platform_device *pdev)
gpio_dev = platform_get_drvdata(pdev);
gpiochip_remove(&gpio_dev->gc);
- pinctrl_unregister(gpio_dev->pctrl);
return 0;
}
static const struct acpi_device_id amd_gpio_acpi_match[] = {
{ "AMD0030", 0 },
+ { "AMDI0030", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match);
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index e844fdc6d..4e9fe7854 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -201,7 +201,7 @@ static const struct pinctrl_ops as3722_pinctrl_ops = {
.get_group_name = as3722_pinctrl_get_group_name,
.get_group_pins = as3722_pinctrl_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int as3722_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
@@ -569,8 +569,8 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
as3722_pinctrl_desc.name = dev_name(&pdev->dev);
as3722_pinctrl_desc.pins = as3722_pins_desc;
as3722_pinctrl_desc.npins = ARRAY_SIZE(as3722_pins_desc);
- as_pci->pctl = pinctrl_register(&as3722_pinctrl_desc,
- &pdev->dev, as_pci);
+ as_pci->pctl = devm_pinctrl_register(&pdev->dev, &as3722_pinctrl_desc,
+ as_pci);
if (IS_ERR(as_pci->pctl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(as_pci->pctl);
@@ -582,7 +582,7 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
ret = gpiochip_add_data(&as_pci->gpio_chip, as_pci);
if (ret < 0) {
dev_err(&pdev->dev, "Couldn't register gpiochip, %d\n", ret);
- goto fail_chip_add;
+ return ret;
}
ret = gpiochip_add_pin_range(&as_pci->gpio_chip, dev_name(&pdev->dev),
@@ -596,8 +596,6 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
fail_range_add:
gpiochip_remove(&as_pci->gpio_chip);
-fail_chip_add:
- pinctrl_unregister(as_pci->pctl);
return ret;
}
@@ -606,7 +604,6 @@ static int as3722_pinctrl_remove(struct platform_device *pdev)
struct as3722_pctrl_info *as_pci = platform_get_drvdata(pdev);
gpiochip_remove(&as_pci->gpio_chip);
- pinctrl_unregister(as_pci->pctl);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 2c447130b..a025b40d2 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -579,7 +579,7 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
}
if (ret < 0) {
- pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
dev_err(pctldev->dev, "can't create maps for node %s\n",
np_config->full_name);
}
@@ -592,7 +592,7 @@ static const struct pinctrl_ops atmel_pctlops = {
.get_group_name = atmel_pctl_get_group_name,
.get_group_pins = atmel_pctl_get_group_pins,
.dt_node_to_map = atmel_pctl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int atmel_pmx_get_functions_count(struct pinctrl_dev *pctldev)
@@ -1036,18 +1036,19 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
goto clk_prepare_enable_error;
}
- atmel_pioctrl->pinctrl_dev = pinctrl_register(&atmel_pinctrl_desc,
- &pdev->dev,
- atmel_pioctrl);
- if (!atmel_pioctrl->pinctrl_dev) {
+ atmel_pioctrl->pinctrl_dev = devm_pinctrl_register(&pdev->dev,
+ &atmel_pinctrl_desc,
+ atmel_pioctrl);
+ if (IS_ERR(atmel_pioctrl->pinctrl_dev)) {
+ ret = PTR_ERR(atmel_pioctrl->pinctrl_dev);
dev_err(dev, "pinctrl registration failed\n");
- goto pinctrl_register_error;
+ goto clk_unprep;
}
ret = gpiochip_add_data(atmel_pioctrl->gpio_chip, atmel_pioctrl);
if (ret) {
dev_err(dev, "failed to add gpiochip\n");
- goto gpiochip_add_error;
+ goto clk_unprep;
}
ret = gpiochip_add_pin_range(atmel_pioctrl->gpio_chip, dev_name(dev),
@@ -1061,15 +1062,15 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
return 0;
-clk_prepare_enable_error:
- irq_domain_remove(atmel_pioctrl->irq_domain);
-pinctrl_register_error:
- clk_disable_unprepare(atmel_pioctrl->clk);
-gpiochip_add_error:
- pinctrl_unregister(atmel_pioctrl->pinctrl_dev);
gpiochip_add_pin_range_error:
gpiochip_remove(atmel_pioctrl->gpio_chip);
+clk_unprep:
+ clk_disable_unprepare(atmel_pioctrl->clk);
+
+clk_prepare_enable_error:
+ irq_domain_remove(atmel_pioctrl->irq_domain);
+
return ret;
}
@@ -1079,7 +1080,6 @@ int atmel_pinctrl_remove(struct platform_device *pdev)
irq_domain_remove(atmel_pioctrl->irq_domain);
clk_disable_unprepare(atmel_pioctrl->clk);
- pinctrl_unregister(atmel_pioctrl->pinctrl_dev);
gpiochip_remove(atmel_pioctrl->gpio_chip);
return 0;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 523b6b794..b7c0d6f7c 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1252,7 +1252,8 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- info->pctl = pinctrl_register(&at91_pinctrl_desc, &pdev->dev, info);
+ info->pctl = devm_pinctrl_register(&pdev->dev, &at91_pinctrl_desc,
+ info);
if (IS_ERR(info->pctl)) {
dev_err(&pdev->dev, "could not register AT91 pinctrl driver\n");
@@ -1269,15 +1270,6 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int at91_pinctrl_remove(struct platform_device *pdev)
-{
- struct at91_pinctrl *info = platform_get_drvdata(pdev);
-
- pinctrl_unregister(info->pctl);
-
- return 0;
-}
-
static int at91_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct at91_gpio_chip *at91_gpio = gpiochip_get_data(chip);
@@ -1660,7 +1652,7 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
}
/* This structure is replicated for each GPIO block allocated at probe time */
-static struct gpio_chip at91_gpio_template = {
+static const struct gpio_chip at91_gpio_template = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
.get_direction = at91_gpio_get_direction,
@@ -1730,14 +1722,9 @@ static int at91_gpio_probe(struct platform_device *pdev)
goto err;
}
- ret = clk_prepare(at91_chip->clock);
- if (ret)
- goto clk_prepare_err;
-
- /* enable PIO controller's clock */
- ret = clk_enable(at91_chip->clock);
+ ret = clk_prepare_enable(at91_chip->clock);
if (ret) {
- dev_err(&pdev->dev, "failed to enable clock, ignoring.\n");
+ dev_err(&pdev->dev, "failed to prepare and enable clock, ignoring.\n");
goto clk_enable_err;
}
@@ -1797,10 +1784,8 @@ static int at91_gpio_probe(struct platform_device *pdev)
irq_setup_err:
gpiochip_remove(chip);
gpiochip_add_err:
- clk_disable(at91_chip->clock);
clk_enable_err:
- clk_unprepare(at91_chip->clock);
-clk_prepare_err:
+ clk_disable_unprepare(at91_chip->clock);
err:
dev_err(&pdev->dev, "Failure %i for GPIO %i\n", ret, alias_idx);
@@ -1821,7 +1806,6 @@ static struct platform_driver at91_pinctrl_driver = {
.of_match_table = at91_pinctrl_of_match,
},
.probe = at91_pinctrl_probe,
- .remove = at91_pinctrl_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index f1343d6ca..30ee56427 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -84,7 +84,7 @@ static struct pinctrl_ops dc_pinctrl_ops = {
.get_group_name = dc_get_group_name,
.get_group_pins = dc_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static const char *const dc_functions[] = {
@@ -280,7 +280,7 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
struct pinctrl_desc *pctl_desc;
char *pin_names;
int name_len = strlen("GP_xx") + 1;
- int i, j, ret;
+ int i, j;
pmap = devm_kzalloc(&pdev->dev, sizeof(*pmap), GFP_KERNEL);
if (!pmap)
@@ -326,26 +326,19 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
pmap->dev = &pdev->dev;
- pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
+ pmap->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, pmap);
if (IS_ERR(pmap->pctl)) {
dev_err(&pdev->dev, "pinctrl driver registration failed\n");
return PTR_ERR(pmap->pctl);
}
- ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
- if (ret < 0) {
- pinctrl_unregister(pmap->pctl);
- return ret;
- }
-
- return 0;
+ return dc_gpiochip_add(pmap, pdev->dev.of_node);
}
static int dc_pinctrl_remove(struct platform_device *pdev)
{
struct dc_pinmap *pmap = platform_get_drvdata(pdev);
- pinctrl_unregister(pmap->pctl);
gpiochip_remove(&pmap->chip);
return 0;
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index fc38a8540..a4d647424 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -336,7 +336,7 @@ int ltq_pinctrl_register(struct platform_device *pdev,
desc->pmxops = &ltq_pmx_ops;
info->dev = &pdev->dev;
- info->pctrl = pinctrl_register(desc, &pdev->dev, info);
+ info->pctrl = devm_pinctrl_register(&pdev->dev, desc, info);
if (IS_ERR(info->pctrl)) {
dev_err(&pdev->dev, "failed to register LTQ pinmux driver\n");
return PTR_ERR(info->pctrl);
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index b1767f7e4..8a931c7ba 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -1252,7 +1252,7 @@ static const struct pinctrl_ops lpc18xx_pctl_ops = {
.get_group_name = lpc18xx_pctl_get_group_name,
.get_group_pins = lpc18xx_pctl_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static struct pinctrl_desc lpc18xx_scu_desc = {
@@ -1355,7 +1355,7 @@ static int lpc18xx_scu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, scu);
- scu->pctl = pinctrl_register(&lpc18xx_scu_desc, &pdev->dev, scu);
+ scu->pctl = devm_pinctrl_register(&pdev->dev, &lpc18xx_scu_desc, scu);
if (IS_ERR(scu->pctl)) {
dev_err(&pdev->dev, "Could not register pinctrl driver\n");
clk_disable_unprepare(scu->clk);
@@ -1369,7 +1369,6 @@ static int lpc18xx_scu_remove(struct platform_device *pdev)
{
struct lpc18xx_scu_data *scu = platform_get_drvdata(pdev);
- pinctrl_unregister(scu->pctl);
clk_disable_unprepare(scu->clk);
return 0;
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
index f7e168044..8edb3f8c7 100644
--- a/drivers/pinctrl/pinctrl-palmas.c
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -656,7 +656,7 @@ static const struct pinctrl_ops palmas_pinctrl_ops = {
.get_group_name = palmas_pinctrl_get_group_name,
.get_group_pins = palmas_pinctrl_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int palmas_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
@@ -1043,7 +1043,8 @@ static int palmas_pinctrl_probe(struct platform_device *pdev)
palmas_pinctrl_desc.name = dev_name(&pdev->dev);
palmas_pinctrl_desc.pins = palmas_pins_desc;
palmas_pinctrl_desc.npins = ARRAY_SIZE(palmas_pins_desc);
- pci->pctl = pinctrl_register(&palmas_pinctrl_desc, &pdev->dev, pci);
+ pci->pctl = devm_pinctrl_register(&pdev->dev, &palmas_pinctrl_desc,
+ pci);
if (IS_ERR(pci->pctl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pci->pctl);
@@ -1051,21 +1052,12 @@ static int palmas_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int palmas_pinctrl_remove(struct platform_device *pdev)
-{
- struct palmas_pctrl_chip_info *pci = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pci->pctl);
- return 0;
-}
-
static struct platform_driver palmas_pinctrl_driver = {
.driver = {
.name = "palmas-pinctrl",
.of_match_table = palmas_pinctrl_of_match,
},
.probe = palmas_pinctrl_probe,
- .remove = palmas_pinctrl_remove,
};
module_platform_driver(palmas_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index 0b07d4bda..31ceb958b 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -1743,7 +1743,7 @@ static const struct pinctrl_ops pic32_pinctrl_ops = {
.get_group_name = pic32_pinctrl_get_group_name,
.get_group_pins = pic32_pinctrl_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int pic32_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
@@ -2194,7 +2194,8 @@ static int pic32_pinctrl_probe(struct platform_device *pdev)
pic32_pinctrl_desc.custom_params = pic32_mpp_bindings;
pic32_pinctrl_desc.num_custom_params = ARRAY_SIZE(pic32_mpp_bindings);
- pctl->pctldev = pinctrl_register(&pic32_pinctrl_desc, &pdev->dev, pctl);
+ pctl->pctldev = devm_pinctrl_register(&pdev->dev, &pic32_pinctrl_desc,
+ pctl);
if (IS_ERR(pctl->pctldev)) {
dev_err(&pdev->dev, "Failed to register pinctrl device\n");
return PTR_ERR(pctl->pctldev);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 2673cd9d1..c6d410ef8 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -913,7 +913,7 @@ static const struct pinctrl_ops pistachio_pinctrl_ops = {
.get_group_name = pistachio_pinctrl_get_group_name,
.get_group_pins = pistachio_pinctrl_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int pistachio_pinmux_get_functions_count(struct pinctrl_dev *pctldev)
@@ -1457,8 +1457,8 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev)
pistachio_pinctrl_desc.pins = pctl->pins;
pistachio_pinctrl_desc.npins = pctl->npins;
- pctl->pctldev = pinctrl_register(&pistachio_pinctrl_desc, &pdev->dev,
- pctl);
+ pctl->pctldev = devm_pinctrl_register(&pdev->dev, &pistachio_pinctrl_desc,
+ pctl);
if (IS_ERR(pctl->pctldev)) {
dev_err(&pdev->dev, "Failed to register pinctrl device\n");
return PTR_ERR(pctl->pctldev);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index bf032b9b4..a91026e8c 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -99,6 +99,15 @@ enum rockchip_pin_drv_type {
};
/**
+ * enum type index corresponding to rockchip_pull_list arrays index.
+ */
+enum rockchip_pin_pull_type {
+ PULL_TYPE_IO_DEFAULT = 0,
+ PULL_TYPE_IO_1V8_ONLY,
+ PULL_TYPE_MAX
+};
+
+/**
* @drv_type: drive strength variant using rockchip_perpin_drv_type
* @offset: if initialized to -1 it will be autocalculated, by specifying
* an initial offset value the relevant source offset can be reset
@@ -123,6 +132,7 @@ struct rockchip_drv {
* @bank_num: number of the bank, to account for holes
* @iomux: array describing the 4 iomux sources of the bank
* @drv: array describing the 4 drive strength sources of the bank
+ * @pull_type: array describing the 4 pull type sources of the bank
* @valid: are all necessary informations present
* @of_node: dt node of this bank
* @drvdata: common pinctrl basedata
@@ -143,6 +153,7 @@ struct rockchip_pin_bank {
u8 bank_num;
struct rockchip_iomux iomux[4];
struct rockchip_drv drv[4];
+ enum rockchip_pin_pull_type pull_type[4];
bool valid;
struct device_node *of_node;
struct rockchip_pinctrl *drvdata;
@@ -198,6 +209,31 @@ struct rockchip_pin_bank {
}, \
}
+#define PIN_BANK_DRV_FLAGS_PULL_FLAGS(id, pins, label, drv0, drv1, \
+ drv2, drv3, pull0, pull1, \
+ pull2, pull3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ }, \
+ .drv = { \
+ { .drv_type = drv0, .offset = -1 }, \
+ { .drv_type = drv1, .offset = -1 }, \
+ { .drv_type = drv2, .offset = -1 }, \
+ { .drv_type = drv3, .offset = -1 }, \
+ }, \
+ .pull_type[0] = pull0, \
+ .pull_type[1] = pull1, \
+ .pull_type[2] = pull2, \
+ .pull_type[3] = pull3, \
+ }
+
#define PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(id, pins, label, iom0, iom1, \
iom2, iom3, drv0, drv1, drv2, \
drv3, offset0, offset1, \
@@ -220,6 +256,34 @@ struct rockchip_pin_bank {
}, \
}
+#define PIN_BANK_IOMUX_FLAGS_DRV_FLAGS_OFFSET_PULL_FLAGS(id, pins, \
+ label, iom0, iom1, iom2, \
+ iom3, drv0, drv1, drv2, \
+ drv3, offset0, offset1, \
+ offset2, offset3, pull0, \
+ pull1, pull2, pull3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .type = iom0, .offset = -1 }, \
+ { .type = iom1, .offset = -1 }, \
+ { .type = iom2, .offset = -1 }, \
+ { .type = iom3, .offset = -1 }, \
+ }, \
+ .drv = { \
+ { .drv_type = drv0, .offset = offset0 }, \
+ { .drv_type = drv1, .offset = offset1 }, \
+ { .drv_type = drv2, .offset = offset2 }, \
+ { .drv_type = drv3, .offset = offset3 }, \
+ }, \
+ .pull_type[0] = pull0, \
+ .pull_type[1] = pull1, \
+ .pull_type[2] = pull2, \
+ .pull_type[3] = pull3, \
+ }
+
/**
*/
struct rockchip_pin_ctrl {
@@ -1020,12 +1084,27 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
return ret;
}
+static int rockchip_pull_list[PULL_TYPE_MAX][4] = {
+ {
+ PIN_CONFIG_BIAS_DISABLE,
+ PIN_CONFIG_BIAS_PULL_UP,
+ PIN_CONFIG_BIAS_PULL_DOWN,
+ PIN_CONFIG_BIAS_BUS_HOLD
+ },
+ {
+ PIN_CONFIG_BIAS_DISABLE,
+ PIN_CONFIG_BIAS_PULL_DOWN,
+ PIN_CONFIG_BIAS_DISABLE,
+ PIN_CONFIG_BIAS_PULL_UP
+ },
+};
+
static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
struct regmap *regmap;
- int reg, ret;
+ int reg, ret, pull_type;
u8 bit;
u32 data;
@@ -1048,22 +1127,11 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
case RK3288:
case RK3368:
case RK3399:
+ pull_type = bank->pull_type[pin_num / 8];
data >>= bit;
data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
- switch (data) {
- case 0:
- return PIN_CONFIG_BIAS_DISABLE;
- case 1:
- return PIN_CONFIG_BIAS_PULL_UP;
- case 2:
- return PIN_CONFIG_BIAS_PULL_DOWN;
- case 3:
- return PIN_CONFIG_BIAS_BUS_HOLD;
- }
-
- dev_err(info->dev, "unknown pull setting\n");
- return -EIO;
+ return rockchip_pull_list[pull_type][data];
default:
dev_err(info->dev, "unsupported pinctrl type\n");
return -EINVAL;
@@ -1076,7 +1144,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
struct regmap *regmap;
- int reg, ret;
+ int reg, ret, i, pull_type;
unsigned long flags;
u8 bit;
u32 data, rmask;
@@ -1105,30 +1173,28 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
case RK3288:
case RK3368:
case RK3399:
+ pull_type = bank->pull_type[pin_num / 8];
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(rockchip_pull_list[pull_type]);
+ i++) {
+ if (rockchip_pull_list[pull_type][i] == pull) {
+ ret = i;
+ break;
+ }
+ }
+
+ if (ret < 0) {
+ dev_err(info->dev, "unsupported pull setting %d\n",
+ pull);
+ return ret;
+ }
+
spin_lock_irqsave(&bank->slock, flags);
/* enable the write to the equivalent lower bits */
data = ((1 << RK3188_PULL_BITS_PER_PIN) - 1) << (bit + 16);
rmask = data | (data >> 16);
-
- switch (pull) {
- case PIN_CONFIG_BIAS_DISABLE:
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- data |= (1 << bit);
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- data |= (2 << bit);
- break;
- case PIN_CONFIG_BIAS_BUS_HOLD:
- data |= (3 << bit);
- break;
- default:
- spin_unlock_irqrestore(&bank->slock, flags);
- dev_err(info->dev, "unsupported pull setting %d\n",
- pull);
- return -EINVAL;
- }
+ data |= (ret << bit);
ret = regmap_update_bits(regmap, reg, rmask, data);
@@ -1208,6 +1274,16 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
+static int rockchip_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+{
+ struct rockchip_pin_bank *bank = gpiochip_get_data(chip);
+ u32 data;
+
+ data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
+
+ return !(data & BIT(offset));
+}
+
/*
* The calls to gpio_direction_output() and gpio_direction_input()
* leads to this function call (via the pinctrl_gpio_direction_{input|output}()
@@ -1636,7 +1712,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
if (ret)
return ret;
- info->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, info);
+ info->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, info);
if (IS_ERR(info->pctl_dev)) {
dev_err(&pdev->dev, "could not register pinctrl driver\n");
return PTR_ERR(info->pctl_dev);
@@ -1741,6 +1817,7 @@ static const struct gpio_chip rockchip_gpiolib_chip = {
.free = gpiochip_generic_free,
.set = rockchip_gpio_set,
.get = rockchip_gpio_get,
+ .get_direction = rockchip_gpio_get_direction,
.direction_input = rockchip_gpio_direction_input,
.direction_output = rockchip_gpio_direction_output,
.to_irq = rockchip_gpio_to_irq,
@@ -2541,19 +2618,24 @@ static struct rockchip_pin_ctrl rk3368_pin_ctrl = {
};
static struct rockchip_pin_bank rk3399_pin_banks[] = {
- PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(0, 32, "gpio0", IOMUX_SOURCE_PMU,
- IOMUX_SOURCE_PMU,
- IOMUX_SOURCE_PMU,
- IOMUX_SOURCE_PMU,
- DRV_TYPE_IO_1V8_ONLY,
- DRV_TYPE_IO_1V8_ONLY,
- DRV_TYPE_IO_DEFAULT,
- DRV_TYPE_IO_DEFAULT,
- 0x0,
- 0x8,
- -1,
- -1
- ),
+ PIN_BANK_IOMUX_FLAGS_DRV_FLAGS_OFFSET_PULL_FLAGS(0, 32, "gpio0",
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ DRV_TYPE_IO_1V8_ONLY,
+ DRV_TYPE_IO_1V8_ONLY,
+ DRV_TYPE_IO_DEFAULT,
+ DRV_TYPE_IO_DEFAULT,
+ 0x0,
+ 0x8,
+ -1,
+ -1,
+ PULL_TYPE_IO_1V8_ONLY,
+ PULL_TYPE_IO_1V8_ONLY,
+ PULL_TYPE_IO_DEFAULT,
+ PULL_TYPE_IO_DEFAULT
+ ),
PIN_BANK_IOMUX_DRV_FLAGS_OFFSET(1, 32, "gpio1", IOMUX_SOURCE_PMU,
IOMUX_SOURCE_PMU,
IOMUX_SOURCE_PMU,
@@ -2567,11 +2649,15 @@ static struct rockchip_pin_bank rk3399_pin_banks[] = {
0x30,
0x38
),
- PIN_BANK_DRV_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
- DRV_TYPE_IO_1V8_OR_3V0,
- DRV_TYPE_IO_1V8_ONLY,
- DRV_TYPE_IO_1V8_ONLY
- ),
+ PIN_BANK_DRV_FLAGS_PULL_FLAGS(2, 32, "gpio2", DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_OR_3V0,
+ DRV_TYPE_IO_1V8_ONLY,
+ DRV_TYPE_IO_1V8_ONLY,
+ PULL_TYPE_IO_DEFAULT,
+ PULL_TYPE_IO_DEFAULT,
+ PULL_TYPE_IO_1V8_ONLY,
+ PULL_TYPE_IO_1V8_ONLY
+ ),
PIN_BANK_DRV_FLAGS(3, 32, "gpio3", DRV_TYPE_IO_3V3_ONLY,
DRV_TYPE_IO_3V3_ONLY,
DRV_TYPE_IO_3V3_ONLY,
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index cf9bafa10..bfdf720db 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1580,6 +1580,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
else
mask &= ~soc_mask;
pcs->write(mask, pcswi->reg);
+
+ /* flush posted write */
+ mask = pcs->read(pcswi->reg);
raw_spin_unlock(&pcs->lock);
}
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index cab66c641..d0ba968af 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1724,7 +1724,7 @@ static int st_pctl_probe(struct platform_device *pdev)
pctl_desc->confops = &st_confops;
pctl_desc->name = dev_name(&pdev->dev);
- info->pctl = pinctrl_register(pctl_desc, &pdev->dev, info);
+ info->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, info);
if (IS_ERR(info->pctl)) {
dev_err(&pdev->dev, "Failed pinctrl registration\n");
return PTR_ERR(info->pctl);
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 6546b9bb2..edfba506e 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -582,7 +582,7 @@ static struct pinctrl_ops tb10x_pinctrl_ops = {
.get_group_name = tb10x_get_group_name,
.get_group_pins = tb10x_get_group_pins,
.dt_node_to_map = tb10x_dt_node_to_map,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int tb10x_get_functions_count(struct pinctrl_dev *pctl)
@@ -806,7 +806,7 @@ static int tb10x_pinctrl_probe(struct platform_device *pdev)
}
}
- state->pctl = pinctrl_register(&tb10x_pindesc, dev, state);
+ state->pctl = devm_pinctrl_register(dev, &tb10x_pindesc, state);
if (IS_ERR(state->pctl)) {
dev_err(dev, "could not register TB10x pin driver\n");
ret = PTR_ERR(state->pctl);
@@ -824,7 +824,6 @@ static int tb10x_pinctrl_remove(struct platform_device *pdev)
{
struct tb10x_pinctrl *state = platform_get_drvdata(pdev);
- pinctrl_unregister(state->pctl);
mutex_destroy(&state->mutex);
return 0;
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index b89ad3c0c..e70e36283 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -947,7 +947,8 @@ static int tz1090_pdc_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(pmx->regs))
return PTR_ERR(pmx->regs);
- pmx->pctl = pinctrl_register(&tz1090_pdc_pinctrl_desc, &pdev->dev, pmx);
+ pmx->pctl = devm_pinctrl_register(&pdev->dev, &tz1090_pdc_pinctrl_desc,
+ pmx);
if (IS_ERR(pmx->pctl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pmx->pctl);
@@ -960,15 +961,6 @@ static int tz1090_pdc_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int tz1090_pdc_pinctrl_remove(struct platform_device *pdev)
-{
- struct tz1090_pdc_pmx *pmx = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pmx->pctl);
-
- return 0;
-}
-
static const struct of_device_id tz1090_pdc_pinctrl_of_match[] = {
{ .compatible = "img,tz1090-pdc-pinctrl", },
{ },
@@ -980,7 +972,6 @@ static struct platform_driver tz1090_pdc_pinctrl_driver = {
.of_match_table = tz1090_pdc_pinctrl_of_match,
},
.probe = tz1090_pdc_pinctrl_probe,
- .remove = tz1090_pdc_pinctrl_remove,
};
static int __init tz1090_pdc_pinctrl_init(void)
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index 5425299d7..04cbe530b 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1962,7 +1962,8 @@ static int tz1090_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(pmx->regs))
return PTR_ERR(pmx->regs);
- pmx->pctl = pinctrl_register(&tz1090_pinctrl_desc, &pdev->dev, pmx);
+ pmx->pctl = devm_pinctrl_register(&pdev->dev, &tz1090_pinctrl_desc,
+ pmx);
if (IS_ERR(pmx->pctl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pmx->pctl);
@@ -1975,15 +1976,6 @@ static int tz1090_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int tz1090_pinctrl_remove(struct platform_device *pdev)
-{
- struct tz1090_pmx *pmx = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pmx->pctl);
-
- return 0;
-}
-
static const struct of_device_id tz1090_pinctrl_of_match[] = {
{ .compatible = "img,tz1090-pinctrl", },
{ },
@@ -1995,7 +1987,6 @@ static struct platform_driver tz1090_pinctrl_driver = {
.of_match_table = tz1090_pinctrl_of_match,
},
.probe = tz1090_pinctrl_probe,
- .remove = tz1090_pinctrl_remove,
};
static int __init tz1090_pinctrl_init(void)
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index c076021f3..d1af908a7 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1067,7 +1067,7 @@ static int u300_pmx_probe(struct platform_device *pdev)
if (IS_ERR(upmx->virtbase))
return PTR_ERR(upmx->virtbase);
- upmx->pctl = pinctrl_register(&u300_pmx_desc, &pdev->dev, upmx);
+ upmx->pctl = devm_pinctrl_register(&pdev->dev, &u300_pmx_desc, upmx);
if (IS_ERR(upmx->pctl)) {
dev_err(&pdev->dev, "could not register U300 pinmux driver\n");
return PTR_ERR(upmx->pctl);
@@ -1080,15 +1080,6 @@ static int u300_pmx_probe(struct platform_device *pdev)
return 0;
}
-static int u300_pmx_remove(struct platform_device *pdev)
-{
- struct u300_pmx *upmx = platform_get_drvdata(pdev);
-
- pinctrl_unregister(upmx->pctl);
-
- return 0;
-}
-
static const struct of_device_id u300_pinctrl_match[] = {
{ .compatible = "stericsson,pinctrl-u300" },
{},
@@ -1101,7 +1092,6 @@ static struct platform_driver u300_pmx_driver = {
.of_match_table = u300_pinctrl_match,
},
.probe = u300_pmx_probe,
- .remove = u300_pmx_remove,
};
static int __init u300_pmx_init(void)
diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c
index d77693f2c..9189fbafb 100644
--- a/drivers/pinctrl/pinctrl-utils.c
+++ b/drivers/pinctrl/pinctrl-utils.c
@@ -122,7 +122,7 @@ int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
}
EXPORT_SYMBOL_GPL(pinctrl_utils_add_config);
-void pinctrl_utils_dt_free_map(struct pinctrl_dev *pctldev,
+void pinctrl_utils_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
int i;
@@ -139,4 +139,4 @@ void pinctrl_utils_dt_free_map(struct pinctrl_dev *pctldev,
}
kfree(map);
}
-EXPORT_SYMBOL_GPL(pinctrl_utils_dt_free_map);
+EXPORT_SYMBOL_GPL(pinctrl_utils_free_map);
diff --git a/drivers/pinctrl/pinctrl-utils.h b/drivers/pinctrl/pinctrl-utils.h
index d0ffe1ce2..8f9f2d28c 100644
--- a/drivers/pinctrl/pinctrl-utils.h
+++ b/drivers/pinctrl/pinctrl-utils.h
@@ -37,7 +37,7 @@ int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
unsigned long **configs, unsigned *num_configs,
unsigned long config);
-void pinctrl_utils_dt_free_map(struct pinctrl_dev *pctldev,
+void pinctrl_utils_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps);
#endif /* __PINCTRL_UTILS_H__ */
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 76f1abd71..8fdc60c5a 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -862,7 +862,7 @@ static const struct pinctrl_ops zynq_pctrl_ops = {
.get_group_name = zynq_pctrl_get_group_name,
.get_group_pins = zynq_pctrl_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
/* pinmux */
@@ -1195,7 +1195,7 @@ static int zynq_pinctrl_probe(struct platform_device *pdev)
pctrl->funcs = zynq_pmux_functions;
pctrl->nfuncs = ARRAY_SIZE(zynq_pmux_functions);
- pctrl->pctrl = pinctrl_register(&zynq_desc, &pdev->dev, pctrl);
+ pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &zynq_desc, pctrl);
if (IS_ERR(pctrl->pctrl))
return PTR_ERR(pctrl->pctrl);
@@ -1206,15 +1206,6 @@ static int zynq_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int zynq_pinctrl_remove(struct platform_device *pdev)
-{
- struct zynq_pinctrl *pctrl = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pctrl->pctrl);
-
- return 0;
-}
-
static const struct of_device_id zynq_pinctrl_of_match[] = {
{ .compatible = "xlnx,pinctrl-zynq" },
{ }
@@ -1227,7 +1218,6 @@ static struct platform_driver zynq_pinctrl_driver = {
.of_match_table = zynq_pinctrl_of_match,
},
.probe = zynq_pinctrl_probe,
- .remove = zynq_pinctrl_remove,
};
static int __init zynq_pinctrl_init(void)
diff --git a/drivers/pinctrl/pxa/Kconfig b/drivers/pinctrl/pxa/Kconfig
index 990667ff7..c29bdcfa8 100644
--- a/drivers/pinctrl/pxa/Kconfig
+++ b/drivers/pinctrl/pxa/Kconfig
@@ -6,12 +6,20 @@ config PINCTRL_PXA
select PINCONF
select GENERIC_PINCONF
+config PINCTRL_PXA25X
+ tristate "Marvell PXA25x pin controller driver"
+ select PINCTRL_PXA
+ default y if PXA25x
+ help
+ This is the pinctrl, pinmux, pinconf driver for the Marvell
+ PXA2xx block found in the pxa25x platforms.
+
config PINCTRL_PXA27X
tristate "Marvell PXA27x pin controller driver"
select PINCTRL_PXA
default y if PXA27x
help
This is the pinctrl, pinmux, pinconf driver for the Marvell
- PXA2xx block found in the pxa25x and pxa27x platforms.
+ PXA2xx block found in the pxa27x platforms.
endif
diff --git a/drivers/pinctrl/pxa/Makefile b/drivers/pinctrl/pxa/Makefile
index f1d56af2b..ca2ade1a1 100644
--- a/drivers/pinctrl/pxa/Makefile
+++ b/drivers/pinctrl/pxa/Makefile
@@ -1,2 +1,3 @@
# Marvell PXA pin control drivers
+obj-$(CONFIG_PINCTRL_PXA25X) += pinctrl-pxa2xx.o pinctrl-pxa25x.o
obj-$(CONFIG_PINCTRL_PXA27X) += pinctrl-pxa2xx.o pinctrl-pxa27x.o
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa25x.c b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
new file mode 100644
index 000000000..b98ecb3c0
--- /dev/null
+++ b/drivers/pinctrl/pxa/pinctrl-pxa25x.c
@@ -0,0 +1,274 @@
+/*
+ * Marvell PXA25x family pin control
+ *
+ * Copyright (C) 2016 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-pxa2xx.h"
+
+static const struct pxa_desc_pin pxa25x_pins[] = {
+ PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(0)),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(1),
+ PXA_FUNCTION(0, 1, "GP_RST")),
+ PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(2)),
+ PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(3)),
+ PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(4)),
+ PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(5)),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(6),
+ PXA_FUNCTION(1, 1, "MMCCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(7),
+ PXA_FUNCTION(1, 1, "48_MHz")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(8),
+ PXA_FUNCTION(1, 1, "MMCCS0")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(9),
+ PXA_FUNCTION(1, 1, "MMCCS1")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(10),
+ PXA_FUNCTION(1, 1, "RTCCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(11),
+ PXA_FUNCTION(1, 1, "3_6_MHz")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(12),
+ PXA_FUNCTION(1, 1, "32_kHz")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(13),
+ PXA_FUNCTION(1, 2, "MBGNT")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(14),
+ PXA_FUNCTION(0, 1, "MBREQ")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(15),
+ PXA_FUNCTION(1, 2, "nCS_1")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(16),
+ PXA_FUNCTION(1, 2, "PWM0")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(17),
+ PXA_FUNCTION(1, 2, "PWM1")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(18),
+ PXA_FUNCTION(0, 1, "RDY")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(19),
+ PXA_FUNCTION(0, 1, "DREQ[1]")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(20),
+ PXA_FUNCTION(0, 1, "DREQ[0]")),
+ PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(21)),
+ PXA_GPIO_ONLY_PIN(PXA_PINCTRL_PIN(22)),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(23),
+ PXA_FUNCTION(1, 2, "SCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(24),
+ PXA_FUNCTION(1, 2, "SFRM")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(25),
+ PXA_FUNCTION(1, 2, "TXD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(26),
+ PXA_FUNCTION(0, 1, "RXD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(27),
+ PXA_FUNCTION(0, 1, "EXTCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(28),
+ PXA_FUNCTION(0, 1, "BITCLK"),
+ PXA_FUNCTION(0, 2, "BITCLK"),
+ PXA_FUNCTION(1, 1, "BITCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(29),
+ PXA_FUNCTION(0, 1, "SDATA_IN0"),
+ PXA_FUNCTION(0, 2, "SDATA_IN")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(30),
+ PXA_FUNCTION(1, 1, "SDATA_OUT"),
+ PXA_FUNCTION(1, 2, "SDATA_OUT")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(31),
+ PXA_FUNCTION(1, 1, "SYNC"),
+ PXA_FUNCTION(1, 2, "SYNC")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(32),
+ PXA_FUNCTION(0, 1, "SDATA_IN1"),
+ PXA_FUNCTION(1, 1, "SYSCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(33),
+ PXA_FUNCTION(1, 2, "nCS[5]")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(34),
+ PXA_FUNCTION(0, 1, "FFRXD"),
+ PXA_FUNCTION(1, 2, "MMCCS0")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(35),
+ PXA_FUNCTION(0, 1, "CTS")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(36),
+ PXA_FUNCTION(0, 1, "DCD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(37),
+ PXA_FUNCTION(0, 1, "DSR")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(38),
+ PXA_FUNCTION(0, 1, "RI")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(39),
+ PXA_FUNCTION(1, 1, "MMCC1"),
+ PXA_FUNCTION(1, 2, "FFTXD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(40),
+ PXA_FUNCTION(1, 2, "DTR")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(41),
+ PXA_FUNCTION(1, 2, "RTS")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(42),
+ PXA_FUNCTION(0, 1, "BTRXD"),
+ PXA_FUNCTION(0, 3, "HWRXD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(43),
+ PXA_FUNCTION(1, 2, "BTTXD"),
+ PXA_FUNCTION(1, 3, "HWTXD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(44),
+ PXA_FUNCTION(0, 1, "BTCTS"),
+ PXA_FUNCTION(0, 3, "HWCTS")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(45),
+ PXA_FUNCTION(1, 2, "BTRTS"),
+ PXA_FUNCTION(1, 3, "HWRTS")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(46),
+ PXA_FUNCTION(0, 1, "ICP_RXD"),
+ PXA_FUNCTION(0, 2, "RXD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(47),
+ PXA_FUNCTION(1, 1, "TXD"),
+ PXA_FUNCTION(1, 2, "ICP_TXD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(48),
+ PXA_FUNCTION(1, 1, "HWTXD"),
+ PXA_FUNCTION(1, 2, "nPOE")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(49),
+ PXA_FUNCTION(0, 1, "HWRXD"),
+ PXA_FUNCTION(1, 2, "nPWE")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(50),
+ PXA_FUNCTION(0, 1, "HWCTS"),
+ PXA_FUNCTION(1, 2, "nPIOR")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(51),
+ PXA_FUNCTION(1, 1, "HWRTS"),
+ PXA_FUNCTION(1, 2, "nPIOW")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(52),
+ PXA_FUNCTION(1, 2, "nPCE[1]")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(53),
+ PXA_FUNCTION(1, 1, "MMCCLK"),
+ PXA_FUNCTION(1, 2, "nPCE[2]")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(54),
+ PXA_FUNCTION(1, 1, "MMCCLK"),
+ PXA_FUNCTION(1, 2, "nPSKTSEL")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(55),
+ PXA_FUNCTION(1, 2, "nPREG")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(56),
+ PXA_FUNCTION(0, 1, "nPWAIT")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(57),
+ PXA_FUNCTION(0, 1, "nIOIS16")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(58),
+ PXA_FUNCTION(1, 2, "LDD<0>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(59),
+ PXA_FUNCTION(1, 2, "LDD<1>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(60),
+ PXA_FUNCTION(1, 2, "LDD<2>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(61),
+ PXA_FUNCTION(1, 2, "LDD<3>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(62),
+ PXA_FUNCTION(1, 2, "LDD<4>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(63),
+ PXA_FUNCTION(1, 2, "LDD<5>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(64),
+ PXA_FUNCTION(1, 2, "LDD<6>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(65),
+ PXA_FUNCTION(1, 2, "LDD<7>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(66),
+ PXA_FUNCTION(0, 1, "MBREQ"),
+ PXA_FUNCTION(1, 2, "LDD<8>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(67),
+ PXA_FUNCTION(1, 1, "MMCCS0"),
+ PXA_FUNCTION(1, 2, "LDD<9>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(68),
+ PXA_FUNCTION(1, 1, "MMCCS1"),
+ PXA_FUNCTION(1, 2, "LDD<10>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(69),
+ PXA_FUNCTION(1, 1, "MMCCLK"),
+ PXA_FUNCTION(1, 2, "LDD<11>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(70),
+ PXA_FUNCTION(1, 1, "RTCCLK"),
+ PXA_FUNCTION(1, 2, "LDD<12>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(71),
+ PXA_FUNCTION(1, 1, "3_6_MHz"),
+ PXA_FUNCTION(1, 2, "LDD<13>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(72),
+ PXA_FUNCTION(1, 1, "32_kHz"),
+ PXA_FUNCTION(1, 2, "LDD<14>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(73),
+ PXA_FUNCTION(1, 1, "MBGNT"),
+ PXA_FUNCTION(1, 2, "LDD<15>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(74),
+ PXA_FUNCTION(1, 2, "LCD_FCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(75),
+ PXA_FUNCTION(1, 2, "LCD_LCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(76),
+ PXA_FUNCTION(1, 2, "LCD_PCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(77),
+ PXA_FUNCTION(1, 2, "LCD_ACBIAS")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(78),
+ PXA_FUNCTION(1, 2, "nCS<2>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(79),
+ PXA_FUNCTION(1, 2, "nCS<3>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(80),
+ PXA_FUNCTION(1, 2, "nCS<4>")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(81),
+ PXA_FUNCTION(0, 1, "NSSPSCLK"),
+ PXA_FUNCTION(1, 1, "NSSPSCLK")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(82),
+ PXA_FUNCTION(0, 1, "NSSPSFRM"),
+ PXA_FUNCTION(1, 1, "NSSPSFRM")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(83),
+ PXA_FUNCTION(0, 2, "NSSPRXD"),
+ PXA_FUNCTION(1, 1, "NSSPTXD")),
+ PXA_GPIO_PIN(PXA_PINCTRL_PIN(84),
+ PXA_FUNCTION(0, 2, "NSSPRXD"),
+ PXA_FUNCTION(1, 1, "NSSPTXD")),
+};
+
+static int pxa25x_pinctrl_probe(struct platform_device *pdev)
+{
+ int ret, i;
+ void __iomem *base_af[8];
+ void __iomem *base_dir[4];
+ void __iomem *base_sleep[4];
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base_af[0] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base_af[0]))
+ return PTR_ERR(base_af[0]);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ base_dir[0] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base_dir[0]))
+ return PTR_ERR(base_dir[0]);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ base_dir[3] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base_dir[3]))
+ return PTR_ERR(base_dir[3]);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ base_sleep[0] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base_sleep[0]))
+ return PTR_ERR(base_sleep[0]);
+
+ for (i = 0; i < ARRAY_SIZE(base_af); i++)
+ base_af[i] = base_af[0] + sizeof(base_af[0]) * i;
+ for (i = 0; i < 3; i++)
+ base_dir[i] = base_dir[0] + sizeof(base_dir[0]) * i;
+ for (i = 0; i < ARRAY_SIZE(base_sleep); i++)
+ base_sleep[i] = base_sleep[0] + sizeof(base_af[0]) * i;
+
+ ret = pxa2xx_pinctrl_init(pdev, pxa25x_pins, ARRAY_SIZE(pxa25x_pins),
+ base_af, base_dir, base_sleep);
+ return ret;
+}
+
+static const struct of_device_id pxa25x_pinctrl_match[] = {
+ { .compatible = "marvell,pxa25x-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pxa25x_pinctrl_match);
+
+static struct platform_driver pxa25x_pinctrl_driver = {
+ .probe = pxa25x_pinctrl_probe,
+ .driver = {
+ .name = "pxa25x-pinctrl",
+ .of_match_table = pxa25x_pinctrl_match,
+ },
+};
+module_platform_driver(pxa25x_pinctrl_driver);
+
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_DESCRIPTION("Marvell PXA25x pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index f553313bc..866aa3ce1 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -57,7 +57,7 @@ static int pxa2xx_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
static const struct pinctrl_ops pxa2xx_pctl_ops = {
#ifdef CONFIG_OF
.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
#endif
.get_groups_count = pxa2xx_pctrl_get_groups_count,
.get_group_name = pxa2xx_pctrl_get_group_name,
@@ -416,7 +416,7 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
if (ret)
return ret;
- pctl->pctl_dev = pinctrl_register(&pctl->desc, &pdev->dev, pctl);
+ pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, &pctl->desc, pctl);
if (IS_ERR(pctl->pctl_dev)) {
dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
return PTR_ERR(pctl->pctl_dev);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 8777cf083..1a44e1d03 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -101,7 +101,7 @@ static const struct pinctrl_ops msm_pinctrl_ops = {
.get_group_name = msm_get_group_name,
.get_group_pins = msm_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int msm_get_functions_count(struct pinctrl_dev *pctldev)
@@ -898,17 +898,16 @@ int msm_pinctrl_probe(struct platform_device *pdev,
msm_pinctrl_desc.name = dev_name(&pdev->dev);
msm_pinctrl_desc.pins = pctrl->soc->pins;
msm_pinctrl_desc.npins = pctrl->soc->npins;
- pctrl->pctrl = pinctrl_register(&msm_pinctrl_desc, &pdev->dev, pctrl);
+ pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &msm_pinctrl_desc,
+ pctrl);
if (IS_ERR(pctrl->pctrl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pctrl->pctrl);
}
ret = msm_gpio_init(pctrl);
- if (ret) {
- pinctrl_unregister(pctrl->pctrl);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, pctrl);
@@ -923,7 +922,6 @@ int msm_pinctrl_remove(struct platform_device *pdev)
struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
- pinctrl_unregister(pctrl->pctrl);
unregister_restart_handler(&pctrl->restart_nb);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 4e12ded3c..686accb89 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -212,7 +212,7 @@ static const struct pinctrl_ops pmic_gpio_pinctrl_ops = {
.get_group_name = pmic_gpio_get_group_name,
.get_group_pins = pmic_gpio_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int pmic_gpio_get_functions_count(struct pinctrl_dev *pctldev)
@@ -764,14 +764,14 @@ static int pmic_gpio_probe(struct platform_device *pdev)
state->chip.of_gpio_n_cells = 2;
state->chip.can_sleep = false;
- state->ctrl = pinctrl_register(pctrldesc, dev, state);
+ state->ctrl = devm_pinctrl_register(dev, pctrldesc, state);
if (IS_ERR(state->ctrl))
return PTR_ERR(state->ctrl);
ret = gpiochip_add_data(&state->chip, state);
if (ret) {
dev_err(state->dev, "can't add gpio chip\n");
- goto err_chip;
+ return ret;
}
ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
@@ -784,8 +784,6 @@ static int pmic_gpio_probe(struct platform_device *pdev)
err_range:
gpiochip_remove(&state->chip);
-err_chip:
- pinctrl_unregister(state->ctrl);
return ret;
}
@@ -794,7 +792,6 @@ static int pmic_gpio_remove(struct platform_device *pdev)
struct pmic_gpio_state *state = platform_get_drvdata(pdev);
gpiochip_remove(&state->chip);
- pinctrl_unregister(state->ctrl);
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 2a3e5490a..1735ffef9 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -235,7 +235,7 @@ static const struct pinctrl_ops pmic_mpp_pinctrl_ops = {
.get_group_name = pmic_mpp_get_group_name,
.get_group_pins = pmic_mpp_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int pmic_mpp_get_functions_count(struct pinctrl_dev *pctldev)
@@ -877,14 +877,14 @@ static int pmic_mpp_probe(struct platform_device *pdev)
state->chip.of_gpio_n_cells = 2;
state->chip.can_sleep = false;
- state->ctrl = pinctrl_register(pctrldesc, dev, state);
+ state->ctrl = devm_pinctrl_register(dev, pctrldesc, state);
if (IS_ERR(state->ctrl))
return PTR_ERR(state->ctrl);
ret = gpiochip_add_data(&state->chip, state);
if (ret) {
dev_err(state->dev, "can't add gpio chip\n");
- goto err_chip;
+ return ret;
}
ret = gpiochip_add_pin_range(&state->chip, dev_name(dev), 0, 0, npins);
@@ -897,8 +897,6 @@ static int pmic_mpp_probe(struct platform_device *pdev)
err_range:
gpiochip_remove(&state->chip);
-err_chip:
- pinctrl_unregister(state->ctrl);
return ret;
}
@@ -907,7 +905,6 @@ static int pmic_mpp_remove(struct platform_device *pdev)
struct pmic_mpp_state *state = platform_get_drvdata(pdev);
gpiochip_remove(&state->chip);
- pinctrl_unregister(state->ctrl);
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index cd8580d97..d3f5501d1 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -200,7 +200,7 @@ static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
.get_group_name = pm8xxx_get_group_name,
.get_group_pins = pm8xxx_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
@@ -729,7 +729,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
pctrl->desc.custom_conf_items = pm8xxx_conf_items;
#endif
- pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+ pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &pctrl->desc, pctrl);
if (IS_ERR(pctrl->pctrl)) {
dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
return PTR_ERR(pctrl->pctrl);
@@ -745,7 +745,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(&pdev->dev, "failed register gpiochip\n");
- goto unregister_pinctrl;
+ return ret;
}
ret = gpiochip_add_pin_range(&pctrl->chip,
@@ -765,9 +765,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
unregister_gpiochip:
gpiochip_remove(&pctrl->chip);
-unregister_pinctrl:
- pinctrl_unregister(pctrl->pctrl);
-
return ret;
}
@@ -777,8 +774,6 @@ static int pm8xxx_gpio_remove(struct platform_device *pdev)
gpiochip_remove(&pctrl->chip);
- pinctrl_unregister(pctrl->pctrl);
-
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 54a5402a9..9191727af 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -277,7 +277,7 @@ static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
.get_group_name = pm8xxx_get_group_name,
.get_group_pins = pm8xxx_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
@@ -820,7 +820,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
pctrl->desc.custom_conf_items = pm8xxx_conf_items;
#endif
- pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+ pctrl->pctrl = devm_pinctrl_register(&pdev->dev, &pctrl->desc, pctrl);
if (IS_ERR(pctrl->pctrl)) {
dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
return PTR_ERR(pctrl->pctrl);
@@ -836,7 +836,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {
dev_err(&pdev->dev, "failed register gpiochip\n");
- goto unregister_pinctrl;
+ return ret;
}
ret = gpiochip_add_pin_range(&pctrl->chip,
@@ -856,9 +856,6 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
unregister_gpiochip:
gpiochip_remove(&pctrl->chip);
-unregister_pinctrl:
- pinctrl_unregister(pctrl->pctrl);
-
return ret;
}
@@ -868,8 +865,6 @@ static int pm8xxx_mpp_remove(struct platform_device *pdev)
gpiochip_remove(&pctrl->chip);
- pinctrl_unregister(pctrl->pctrl);
-
return 0;
}
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index dbbdf652c..fb71fc3e5 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -789,7 +789,7 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
if (ret)
return ret;
- pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, priv);
+ pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, priv);
if (IS_ERR(pctl_dev)) {
dev_err(&pdev->dev, "could not register pinctrl driver\n");
return PTR_ERR(pctl_dev);
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 5cc97f85d..ed0b70881 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -884,7 +884,8 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
if (ret)
return ret;
- drvdata->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, drvdata);
+ drvdata->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc,
+ drvdata);
if (IS_ERR(drvdata->pctl_dev)) {
dev_err(&pdev->dev, "could not register pinctrl driver\n");
return PTR_ERR(drvdata->pctl_dev);
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index ee0c1f256..9b9cee06e 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -175,6 +175,21 @@ void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
BUG();
}
+u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width)
+{
+ return sh_pfc_read_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width);
+}
+
+void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width, u32 data)
+{
+ if (pfc->info->unlock_reg)
+ sh_pfc_write_raw_reg(
+ sh_pfc_phys_to_virt(pfc, pfc->info->unlock_reg), 32,
+ ~data);
+
+ sh_pfc_write_raw_reg(sh_pfc_phys_to_virt(pfc, reg), width, data);
+}
+
static void sh_pfc_config_reg_helper(struct sh_pfc *pfc,
const struct pinmux_cfg_reg *crp,
unsigned int in_pos,
@@ -585,12 +600,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
static int sh_pfc_remove(struct platform_device *pdev)
{
- struct sh_pfc *pfc = platform_get_drvdata(pdev);
-
#ifdef CONFIG_PINCTRL_SH_PFC_GPIO
- sh_pfc_unregister_gpiochip(pfc);
+ sh_pfc_unregister_gpiochip(platform_get_drvdata(pdev));
#endif
- sh_pfc_unregister_pinctrl(pfc);
return 0;
}
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index 62f53b22a..dc1b2adb2 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -50,18 +50,19 @@ struct sh_pfc {
struct sh_pfc_chip *func;
#endif
- struct sh_pfc_pinctrl *pinctrl;
};
int sh_pfc_register_gpiochip(struct sh_pfc *pfc);
int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc);
int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
-int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc);
u32 sh_pfc_read_raw_reg(void __iomem *mapped_reg, unsigned int reg_width);
void sh_pfc_write_raw_reg(void __iomem *mapped_reg, unsigned int reg_width,
u32 data);
+u32 sh_pfc_read_reg(struct sh_pfc *pfc, u32 reg, unsigned int width);
+void sh_pfc_write_reg(struct sh_pfc *pfc, u32 reg, unsigned int width,
+ u32 data);
int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin);
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type);
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index a6681b8b1..97dff6a09 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -212,7 +212,7 @@ static int gpio_pin_to_irq(struct gpio_chip *gc, unsigned offset)
}
}
- return -ENOSYS;
+ return 0;
found:
return pfc->irqs[i];
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 0f4d48f94..eed8daa46 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -21,16 +21,21 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/io.h>
#include <linux/kernel.h>
#include "core.h"
#include "sh_pfc.h"
+/*
+ * All pins assigned to GPIO bank 3 can be used for SD interfaces in
+ * which case they support both 3.3V and 1.8V signalling.
+ */
#define CPU_ALL_PORT(fn, sfx) \
PORT_GP_32(0, fn, sfx), \
PORT_GP_30(1, fn, sfx), \
PORT_GP_30(2, fn, sfx), \
- PORT_GP_32(3, fn, sfx), \
+ PORT_GP_CFG_32(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
PORT_GP_32(4, fn, sfx), \
PORT_GP_32(5, fn, sfx)
@@ -4691,6 +4696,47 @@ static const char * const vin3_groups[] = {
"vin3_clk",
};
+#define IOCTRL6 0x8c
+
+static int r8a7790_get_io_voltage(struct sh_pfc *pfc, unsigned int pin)
+{
+ u32 data, mask;
+
+ if (WARN(pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31), "invalid pin %#x", pin))
+ return -EINVAL;
+
+ data = ioread32(pfc->windows->virt + IOCTRL6),
+ /* Bits in IOCTRL6 are numbered in opposite order to pins */
+ mask = 0x80000000 >> (pin & 0x1f);
+
+ return (data & mask) ? 3300 : 1800;
+}
+
+static int r8a7790_set_io_voltage(struct sh_pfc *pfc, unsigned int pin, u16 mV)
+{
+ u32 data, mask;
+
+ if (WARN(pin < RCAR_GP_PIN(3, 0) || pin > RCAR_GP_PIN(3, 31), "invalid pin %#x", pin))
+ return -EINVAL;
+
+ if (mV != 1800 && mV != 3300)
+ return -EINVAL;
+
+ data = ioread32(pfc->windows->virt + IOCTRL6);
+ /* Bits in IOCTRL6 are numbered in opposite order to pins */
+ mask = 0x80000000 >> (pin & 0x1f);
+
+ if (mV == 3300)
+ data |= mask;
+ else
+ data &= ~mask;
+
+ iowrite32(~data, pfc->windows->virt); /* unlock reg */
+ iowrite32(data, pfc->windows->virt + IOCTRL6);
+
+ return 0;
+}
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
@@ -5690,8 +5736,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+static const struct sh_pfc_soc_operations pinmux_ops = {
+ .get_io_voltage = r8a7790_get_io_voltage,
+ .set_io_voltage = r8a7790_set_io_voltage,
+};
+
const struct sh_pfc_soc_info r8a7790_pinmux_info = {
.name = "r8a77900_pfc",
+ .ops = &pinmux_ops,
.unlock_reg = 0xe6060000, /* PMMR */
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index 38912cff5..8bc2cf0c5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -1682,6 +1682,179 @@ static const unsigned int avb_avtp_match_b_pins[] = {
static const unsigned int avb_avtp_match_b_mux[] = {
AVB_AVTP_MATCH_B_MARK,
};
+/* - DU --------------------------------------------------------------------- */
+static const unsigned int du0_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 5),
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 2),
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
+ RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22), RCAR_GP_PIN(2, 21),
+ RCAR_GP_PIN(2, 20), RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 18),
+};
+static const unsigned int du0_rgb666_mux[] = {
+ DU0_DR7_MARK, DU0_DR6_MARK, DU0_DR5_MARK, DU0_DR4_MARK,
+ DU0_DR3_MARK, DU0_DR2_MARK,
+ DU0_DG7_MARK, DU0_DG6_MARK, DU0_DG5_MARK, DU0_DG4_MARK,
+ DU0_DG3_MARK, DU0_DG2_MARK,
+ DU0_DB7_MARK, DU0_DB6_MARK, DU0_DB5_MARK, DU0_DB4_MARK,
+ DU0_DB3_MARK, DU0_DB2_MARK,
+};
+static const unsigned int du0_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 5),
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 2),
+ RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 0),
+ RCAR_GP_PIN(2, 15), RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 13),
+ RCAR_GP_PIN(2, 12), RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 10),
+ RCAR_GP_PIN(2, 9), RCAR_GP_PIN(2, 8),
+ RCAR_GP_PIN(2, 23), RCAR_GP_PIN(2, 22), RCAR_GP_PIN(2, 21),
+ RCAR_GP_PIN(2, 20), RCAR_GP_PIN(2, 19), RCAR_GP_PIN(2, 18),
+ RCAR_GP_PIN(2, 17), RCAR_GP_PIN(2, 16),
+};
+static const unsigned int du0_rgb888_mux[] = {
+ DU0_DR7_MARK, DU0_DR6_MARK, DU0_DR5_MARK, DU0_DR4_MARK,
+ DU0_DR3_MARK, DU0_DR2_MARK, DU0_DR1_MARK, DU0_DR0_MARK,
+ DU0_DG7_MARK, DU0_DG6_MARK, DU0_DG5_MARK, DU0_DG4_MARK,
+ DU0_DG3_MARK, DU0_DG2_MARK, DU0_DG1_MARK, DU0_DG0_MARK,
+ DU0_DB7_MARK, DU0_DB6_MARK, DU0_DB5_MARK, DU0_DB4_MARK,
+ DU0_DB3_MARK, DU0_DB2_MARK, DU0_DB1_MARK, DU0_DB0_MARK,
+};
+static const unsigned int du0_clk0_out_pins[] = {
+ /* DOTCLKOUT0 */
+ RCAR_GP_PIN(2, 25),
+};
+static const unsigned int du0_clk0_out_mux[] = {
+ DU0_DOTCLKOUT0_MARK
+};
+static const unsigned int du0_clk1_out_pins[] = {
+ /* DOTCLKOUT1 */
+ RCAR_GP_PIN(2, 26),
+};
+static const unsigned int du0_clk1_out_mux[] = {
+ DU0_DOTCLKOUT1_MARK
+};
+static const unsigned int du0_clk_in_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(2, 24),
+};
+static const unsigned int du0_clk_in_mux[] = {
+ DU0_DOTCLKIN_MARK
+};
+static const unsigned int du0_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(2, 28), RCAR_GP_PIN(2, 27),
+};
+static const unsigned int du0_sync_mux[] = {
+ DU0_EXVSYNC_DU0_VSYNC_MARK, DU0_EXHSYNC_DU0_HSYNC_MARK
+};
+static const unsigned int du0_oddf_pins[] = {
+ /* EXODDF/ODDF/DISP/CDE */
+ RCAR_GP_PIN(2, 29),
+};
+static const unsigned int du0_oddf_mux[] = {
+ DU0_EXODDF_DU0_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du0_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(2, 31),
+};
+static const unsigned int du0_cde_mux[] = {
+ DU0_CDE_MARK,
+};
+static const unsigned int du0_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(2, 30),
+};
+static const unsigned int du0_disp_mux[] = {
+ DU0_DISP_MARK
+};
+static const unsigned int du1_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 6), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 3), RCAR_GP_PIN(4, 2),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 13),
+ RCAR_GP_PIN(4, 12), RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 23), RCAR_GP_PIN(4, 22), RCAR_GP_PIN(4, 21),
+ RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18),
+};
+static const unsigned int du1_rgb666_mux[] = {
+ DU1_DR7_MARK, DU1_DR6_MARK, DU1_DR5_MARK, DU1_DR4_MARK,
+ DU1_DR3_MARK, DU1_DR2_MARK,
+ DU1_DG7_MARK, DU1_DG6_MARK, DU1_DG5_MARK, DU1_DG4_MARK,
+ DU1_DG3_MARK, DU1_DG2_MARK,
+ DU1_DB7_MARK, DU1_DB6_MARK, DU1_DB5_MARK, DU1_DB4_MARK,
+ DU1_DB3_MARK, DU1_DB2_MARK,
+};
+static const unsigned int du1_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(4, 7), RCAR_GP_PIN(4, 6), RCAR_GP_PIN(4, 5),
+ RCAR_GP_PIN(4, 4), RCAR_GP_PIN(4, 3), RCAR_GP_PIN(4, 2),
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 0),
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 13),
+ RCAR_GP_PIN(4, 12), RCAR_GP_PIN(4, 11), RCAR_GP_PIN(4, 10),
+ RCAR_GP_PIN(4, 9), RCAR_GP_PIN(4, 8),
+ RCAR_GP_PIN(4, 23), RCAR_GP_PIN(4, 22), RCAR_GP_PIN(4, 21),
+ RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 19), RCAR_GP_PIN(4, 18),
+ RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16),
+};
+static const unsigned int du1_rgb888_mux[] = {
+ DU1_DR7_MARK, DU1_DR6_MARK, DU1_DR5_MARK, DU1_DR4_MARK,
+ DU1_DR3_MARK, DU1_DR2_MARK, DU1_DR1_MARK, DU1_DR0_MARK,
+ DU1_DG7_MARK, DU1_DG6_MARK, DU1_DG5_MARK, DU1_DG4_MARK,
+ DU1_DG3_MARK, DU1_DG2_MARK, DU1_DG1_MARK, DU1_DG0_MARK,
+ DU1_DB7_MARK, DU1_DB6_MARK, DU1_DB5_MARK, DU1_DB4_MARK,
+ DU1_DB3_MARK, DU1_DB2_MARK, DU1_DB1_MARK, DU1_DB0_MARK,
+};
+static const unsigned int du1_clk0_out_pins[] = {
+ /* DOTCLKOUT0 */
+ RCAR_GP_PIN(4, 25),
+};
+static const unsigned int du1_clk0_out_mux[] = {
+ DU1_DOTCLKOUT0_MARK
+};
+static const unsigned int du1_clk1_out_pins[] = {
+ /* DOTCLKOUT1 */
+ RCAR_GP_PIN(4, 26),
+};
+static const unsigned int du1_clk1_out_mux[] = {
+ DU1_DOTCLKOUT1_MARK
+};
+static const unsigned int du1_clk_in_pins[] = {
+ /* DOTCLKIN */
+ RCAR_GP_PIN(4, 24),
+};
+static const unsigned int du1_clk_in_mux[] = {
+ DU1_DOTCLKIN_MARK
+};
+static const unsigned int du1_sync_pins[] = {
+ /* EXVSYNC/VSYNC, EXHSYNC/HSYNC */
+ RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 27),
+};
+static const unsigned int du1_sync_mux[] = {
+ DU1_EXVSYNC_DU1_VSYNC_MARK, DU1_EXHSYNC_DU1_HSYNC_MARK
+};
+static const unsigned int du1_oddf_pins[] = {
+ /* EXODDF/ODDF/DISP/CDE */
+ RCAR_GP_PIN(4, 29),
+};
+static const unsigned int du1_oddf_mux[] = {
+ DU1_EXODDF_DU1_ODDF_DISP_CDE_MARK,
+};
+static const unsigned int du1_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(4, 31),
+};
+static const unsigned int du1_cde_mux[] = {
+ DU1_CDE_MARK
+};
+static const unsigned int du1_disp_pins[] = {
+ /* DISP */
+ RCAR_GP_PIN(4, 30),
+};
+static const unsigned int du1_disp_mux[] = {
+ DU1_DISP_MARK
+};
/* - ETH -------------------------------------------------------------------- */
static const unsigned int eth_link_pins[] = {
/* LINK */
@@ -3364,6 +3537,24 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(avb_avtp_match),
SH_PFC_PIN_GROUP(avb_avtp_capture_b),
SH_PFC_PIN_GROUP(avb_avtp_match_b),
+ SH_PFC_PIN_GROUP(du0_rgb666),
+ SH_PFC_PIN_GROUP(du0_rgb888),
+ SH_PFC_PIN_GROUP(du0_clk0_out),
+ SH_PFC_PIN_GROUP(du0_clk1_out),
+ SH_PFC_PIN_GROUP(du0_clk_in),
+ SH_PFC_PIN_GROUP(du0_sync),
+ SH_PFC_PIN_GROUP(du0_oddf),
+ SH_PFC_PIN_GROUP(du0_cde),
+ SH_PFC_PIN_GROUP(du0_disp),
+ SH_PFC_PIN_GROUP(du1_rgb666),
+ SH_PFC_PIN_GROUP(du1_rgb888),
+ SH_PFC_PIN_GROUP(du1_clk0_out),
+ SH_PFC_PIN_GROUP(du1_clk1_out),
+ SH_PFC_PIN_GROUP(du1_clk_in),
+ SH_PFC_PIN_GROUP(du1_sync),
+ SH_PFC_PIN_GROUP(du1_oddf),
+ SH_PFC_PIN_GROUP(du1_cde),
+ SH_PFC_PIN_GROUP(du1_disp),
SH_PFC_PIN_GROUP(eth_link),
SH_PFC_PIN_GROUP(eth_magic),
SH_PFC_PIN_GROUP(eth_mdio),
@@ -3622,6 +3813,30 @@ static const char * const avb_groups[] = {
"avb_avtp_match_b",
};
+static const char * const du0_groups[] = {
+ "du0_rgb666",
+ "du0_rgb888",
+ "du0_clk0_out",
+ "du0_clk1_out",
+ "du0_clk_in",
+ "du0_sync",
+ "du0_oddf",
+ "du0_cde",
+ "du0_disp",
+};
+
+static const char * const du1_groups[] = {
+ "du1_rgb666",
+ "du1_rgb888",
+ "du1_clk0_out",
+ "du1_clk1_out",
+ "du1_clk_in",
+ "du1_sync",
+ "du1_oddf",
+ "du1_cde",
+ "du1_disp",
+};
+
static const char * const eth_groups[] = {
"eth_link",
"eth_magic",
@@ -3969,6 +4184,8 @@ static const char * const vin1_groups[] = {
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(du1),
SH_PFC_FUNCTION(eth),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 5979dabc0..44632b1a5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -14,14 +14,14 @@
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_16(0, fn, sfx), \
- PORT_GP_28(1, fn, sfx), \
- PORT_GP_15(2, fn, sfx), \
- PORT_GP_16(3, fn, sfx), \
- PORT_GP_18(4, fn, sfx), \
- PORT_GP_26(5, fn, sfx), \
- PORT_GP_32(6, fn, sfx), \
- PORT_GP_4(7, fn, sfx)
+ PORT_GP_CFG_16(0, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_28(1, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_15(2, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_16(3, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_18(4, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_26(5, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_32(6, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH), \
+ PORT_GP_CFG_4(7, fn, sfx, SH_PFC_PIN_CFG_DRIVE_STRENGTH)
/*
* F_() : just information
* FM() : macro for FN_xxx / xxx_MARK
@@ -4564,6 +4564,207 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ },
};
+static const struct pinmux_drive_reg pinmux_drive_regs[] = {
+ { PINMUX_DRIVE_REG("DRVCTRL3", 0xe606030c) {
+ { RCAR_GP_PIN(2, 9), 8, 3 }, /* AVB_MDC */
+ { RCAR_GP_PIN(2, 10), 4, 3 }, /* AVB_MAGIC */
+ { RCAR_GP_PIN(2, 11), 0, 3 }, /* AVB_PHY_INT */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL4", 0xe6060310) {
+ { RCAR_GP_PIN(2, 12), 28, 3 }, /* AVB_LINK */
+ { RCAR_GP_PIN(2, 13), 24, 3 }, /* AVB_AVTP_MATCH */
+ { RCAR_GP_PIN(2, 14), 20, 3 }, /* AVB_AVTP_CAPTURE */
+ { RCAR_GP_PIN(2, 0), 16, 3 }, /* IRQ0 */
+ { RCAR_GP_PIN(2, 1), 12, 3 }, /* IRQ1 */
+ { RCAR_GP_PIN(2, 2), 8, 3 }, /* IRQ2 */
+ { RCAR_GP_PIN(2, 3), 4, 3 }, /* IRQ3 */
+ { RCAR_GP_PIN(2, 4), 0, 3 }, /* IRQ4 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL5", 0xe6060314) {
+ { RCAR_GP_PIN(2, 5), 28, 3 }, /* IRQ5 */
+ { RCAR_GP_PIN(2, 6), 24, 3 }, /* PWM0 */
+ { RCAR_GP_PIN(2, 7), 20, 3 }, /* PWM1 */
+ { RCAR_GP_PIN(2, 8), 16, 3 }, /* PWM2 */
+ { RCAR_GP_PIN(1, 0), 12, 3 }, /* A0 */
+ { RCAR_GP_PIN(1, 1), 8, 3 }, /* A1 */
+ { RCAR_GP_PIN(1, 2), 4, 3 }, /* A2 */
+ { RCAR_GP_PIN(1, 3), 0, 3 }, /* A3 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL6", 0xe6060318) {
+ { RCAR_GP_PIN(1, 4), 28, 3 }, /* A4 */
+ { RCAR_GP_PIN(1, 5), 24, 3 }, /* A5 */
+ { RCAR_GP_PIN(1, 6), 20, 3 }, /* A6 */
+ { RCAR_GP_PIN(1, 7), 16, 3 }, /* A7 */
+ { RCAR_GP_PIN(1, 8), 12, 3 }, /* A8 */
+ { RCAR_GP_PIN(1, 9), 8, 3 }, /* A9 */
+ { RCAR_GP_PIN(1, 10), 4, 3 }, /* A10 */
+ { RCAR_GP_PIN(1, 11), 0, 3 }, /* A11 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL7", 0xe606031c) {
+ { RCAR_GP_PIN(1, 12), 28, 3 }, /* A12 */
+ { RCAR_GP_PIN(1, 13), 24, 3 }, /* A13 */
+ { RCAR_GP_PIN(1, 14), 20, 3 }, /* A14 */
+ { RCAR_GP_PIN(1, 15), 16, 3 }, /* A15 */
+ { RCAR_GP_PIN(1, 16), 12, 3 }, /* A16 */
+ { RCAR_GP_PIN(1, 17), 8, 3 }, /* A17 */
+ { RCAR_GP_PIN(1, 18), 4, 3 }, /* A18 */
+ { RCAR_GP_PIN(1, 19), 0, 3 }, /* A19 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) {
+ { RCAR_GP_PIN(1, 20), 24, 3 }, /* CS0 */
+ { RCAR_GP_PIN(1, 21), 20, 3 }, /* CS1_A26 */
+ { RCAR_GP_PIN(1, 22), 16, 3 }, /* BS */
+ { RCAR_GP_PIN(1, 23), 12, 3 }, /* RD */
+ { RCAR_GP_PIN(1, 24), 8, 3 }, /* RD_WR */
+ { RCAR_GP_PIN(1, 25), 4, 3 }, /* WE0 */
+ { RCAR_GP_PIN(1, 26), 0, 3 }, /* WE1 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) {
+ { RCAR_GP_PIN(1, 27), 28, 3 }, /* EX_WAIT0 */
+ { RCAR_GP_PIN(0, 0), 20, 3 }, /* D0 */
+ { RCAR_GP_PIN(0, 1), 16, 3 }, /* D1 */
+ { RCAR_GP_PIN(0, 2), 12, 3 }, /* D2 */
+ { RCAR_GP_PIN(0, 3), 8, 3 }, /* D3 */
+ { RCAR_GP_PIN(0, 4), 4, 3 }, /* D4 */
+ { RCAR_GP_PIN(0, 5), 0, 3 }, /* D5 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) {
+ { RCAR_GP_PIN(0, 6), 28, 3 }, /* D6 */
+ { RCAR_GP_PIN(0, 7), 24, 3 }, /* D7 */
+ { RCAR_GP_PIN(0, 8), 20, 3 }, /* D8 */
+ { RCAR_GP_PIN(0, 9), 16, 3 }, /* D9 */
+ { RCAR_GP_PIN(0, 10), 12, 3 }, /* D10 */
+ { RCAR_GP_PIN(0, 11), 8, 3 }, /* D11 */
+ { RCAR_GP_PIN(0, 12), 4, 3 }, /* D12 */
+ { RCAR_GP_PIN(0, 13), 0, 3 }, /* D13 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL11", 0xe606032c) {
+ { RCAR_GP_PIN(0, 14), 28, 3 }, /* D14 */
+ { RCAR_GP_PIN(0, 15), 24, 3 }, /* D15 */
+ { RCAR_GP_PIN(7, 0), 20, 3 }, /* AVS1 */
+ { RCAR_GP_PIN(7, 1), 16, 3 }, /* AVS2 */
+ { RCAR_GP_PIN(7, 2), 12, 3 }, /* HDMI0_CEC */
+ { RCAR_GP_PIN(7, 3), 8, 3 }, /* HDMI1_CEC */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL13", 0xe6060334) {
+ { RCAR_GP_PIN(3, 0), 20, 3 }, /* SD0_CLK */
+ { RCAR_GP_PIN(3, 1), 16, 3 }, /* SD0_CMD */
+ { RCAR_GP_PIN(3, 2), 12, 3 }, /* SD0_DAT0 */
+ { RCAR_GP_PIN(3, 3), 8, 3 }, /* SD0_DAT1 */
+ { RCAR_GP_PIN(3, 4), 4, 3 }, /* SD0_DAT2 */
+ { RCAR_GP_PIN(3, 5), 0, 3 }, /* SD0_DAT3 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL14", 0xe6060338) {
+ { RCAR_GP_PIN(3, 6), 28, 3 }, /* SD1_CLK */
+ { RCAR_GP_PIN(3, 7), 24, 3 }, /* SD1_CMD */
+ { RCAR_GP_PIN(3, 8), 20, 3 }, /* SD1_DAT0 */
+ { RCAR_GP_PIN(3, 9), 16, 3 }, /* SD1_DAT1 */
+ { RCAR_GP_PIN(3, 10), 12, 3 }, /* SD1_DAT2 */
+ { RCAR_GP_PIN(3, 11), 8, 3 }, /* SD1_DAT3 */
+ { RCAR_GP_PIN(4, 0), 4, 3 }, /* SD2_CLK */
+ { RCAR_GP_PIN(4, 1), 0, 3 }, /* SD2_CMD */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL15", 0xe606033c) {
+ { RCAR_GP_PIN(4, 2), 28, 3 }, /* SD2_DAT0 */
+ { RCAR_GP_PIN(4, 3), 24, 3 }, /* SD2_DAT1 */
+ { RCAR_GP_PIN(4, 4), 20, 3 }, /* SD2_DAT2 */
+ { RCAR_GP_PIN(4, 5), 16, 3 }, /* SD2_DAT3 */
+ { RCAR_GP_PIN(4, 6), 12, 3 }, /* SD2_DS */
+ { RCAR_GP_PIN(4, 7), 8, 3 }, /* SD3_CLK */
+ { RCAR_GP_PIN(4, 8), 4, 3 }, /* SD3_CMD */
+ { RCAR_GP_PIN(4, 9), 0, 3 }, /* SD3_DAT0 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL16", 0xe6060340) {
+ { RCAR_GP_PIN(4, 10), 28, 3 }, /* SD3_DAT1 */
+ { RCAR_GP_PIN(4, 11), 24, 3 }, /* SD3_DAT2 */
+ { RCAR_GP_PIN(4, 12), 20, 3 }, /* SD3_DAT3 */
+ { RCAR_GP_PIN(4, 13), 16, 3 }, /* SD3_DAT4 */
+ { RCAR_GP_PIN(4, 14), 12, 3 }, /* SD3_DAT5 */
+ { RCAR_GP_PIN(4, 15), 8, 3 }, /* SD3_DAT6 */
+ { RCAR_GP_PIN(4, 16), 4, 3 }, /* SD3_DAT7 */
+ { RCAR_GP_PIN(4, 17), 0, 3 }, /* SD3_DS */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL17", 0xe6060344) {
+ { RCAR_GP_PIN(3, 12), 28, 3 }, /* SD0_CD */
+ { RCAR_GP_PIN(3, 13), 24, 3 }, /* SD0_WP */
+ { RCAR_GP_PIN(3, 14), 20, 3 }, /* SD1_CD */
+ { RCAR_GP_PIN(3, 15), 16, 3 }, /* SD1_WP */
+ { RCAR_GP_PIN(5, 0), 12, 3 }, /* SCK0 */
+ { RCAR_GP_PIN(5, 1), 8, 3 }, /* RX0 */
+ { RCAR_GP_PIN(5, 2), 4, 3 }, /* TX0 */
+ { RCAR_GP_PIN(5, 3), 0, 3 }, /* CTS0 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL18", 0xe6060348) {
+ { RCAR_GP_PIN(5, 4), 28, 3 }, /* RTS0_TANS */
+ { RCAR_GP_PIN(5, 5), 24, 3 }, /* RX1 */
+ { RCAR_GP_PIN(5, 6), 20, 3 }, /* TX1 */
+ { RCAR_GP_PIN(5, 7), 16, 3 }, /* CTS1 */
+ { RCAR_GP_PIN(5, 8), 12, 3 }, /* RTS1_TANS */
+ { RCAR_GP_PIN(5, 9), 8, 3 }, /* SCK2 */
+ { RCAR_GP_PIN(5, 10), 4, 3 }, /* TX2 */
+ { RCAR_GP_PIN(5, 11), 0, 3 }, /* RX2 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL19", 0xe606034c) {
+ { RCAR_GP_PIN(5, 12), 28, 3 }, /* HSCK0 */
+ { RCAR_GP_PIN(5, 13), 24, 3 }, /* HRX0 */
+ { RCAR_GP_PIN(5, 14), 20, 3 }, /* HTX0 */
+ { RCAR_GP_PIN(5, 15), 16, 3 }, /* HCTS0 */
+ { RCAR_GP_PIN(5, 16), 12, 3 }, /* HRTS0 */
+ { RCAR_GP_PIN(5, 17), 8, 3 }, /* MSIOF0_SCK */
+ { RCAR_GP_PIN(5, 18), 4, 3 }, /* MSIOF0_SYNC */
+ { RCAR_GP_PIN(5, 19), 0, 3 }, /* MSIOF0_SS1 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL20", 0xe6060350) {
+ { RCAR_GP_PIN(5, 20), 28, 3 }, /* MSIOF0_TXD */
+ { RCAR_GP_PIN(5, 21), 24, 3 }, /* MSIOF0_SS2 */
+ { RCAR_GP_PIN(5, 22), 20, 3 }, /* MSIOF0_RXD */
+ { RCAR_GP_PIN(5, 23), 16, 3 }, /* MLB_CLK */
+ { RCAR_GP_PIN(5, 24), 12, 3 }, /* MLB_SIG */
+ { RCAR_GP_PIN(5, 25), 8, 3 }, /* MLB_DAT */
+ { RCAR_GP_PIN(6, 0), 0, 3 }, /* SSI_SCK01239 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL21", 0xe6060354) {
+ { RCAR_GP_PIN(6, 1), 28, 3 }, /* SSI_WS01239 */
+ { RCAR_GP_PIN(6, 2), 24, 3 }, /* SSI_SDATA0 */
+ { RCAR_GP_PIN(6, 3), 20, 3 }, /* SSI_SDATA1 */
+ { RCAR_GP_PIN(6, 4), 16, 3 }, /* SSI_SDATA2 */
+ { RCAR_GP_PIN(6, 5), 12, 3 }, /* SSI_SCK34 */
+ { RCAR_GP_PIN(6, 6), 8, 3 }, /* SSI_WS34 */
+ { RCAR_GP_PIN(6, 7), 4, 3 }, /* SSI_SDATA3 */
+ { RCAR_GP_PIN(6, 8), 0, 3 }, /* SSI_SCK4 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL22", 0xe6060358) {
+ { RCAR_GP_PIN(6, 9), 28, 3 }, /* SSI_WS4 */
+ { RCAR_GP_PIN(6, 10), 24, 3 }, /* SSI_SDATA4 */
+ { RCAR_GP_PIN(6, 11), 20, 3 }, /* SSI_SCK5 */
+ { RCAR_GP_PIN(6, 12), 16, 3 }, /* SSI_WS5 */
+ { RCAR_GP_PIN(6, 13), 12, 3 }, /* SSI_SDATA5 */
+ { RCAR_GP_PIN(6, 14), 8, 3 }, /* SSI_SCK6 */
+ { RCAR_GP_PIN(6, 15), 4, 3 }, /* SSI_WS6 */
+ { RCAR_GP_PIN(6, 16), 0, 3 }, /* SSI_SDATA6 */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL23", 0xe606035c) {
+ { RCAR_GP_PIN(6, 17), 28, 3 }, /* SSI_SCK78 */
+ { RCAR_GP_PIN(6, 18), 24, 3 }, /* SSI_WS78 */
+ { RCAR_GP_PIN(6, 19), 20, 3 }, /* SSI_SDATA7 */
+ { RCAR_GP_PIN(6, 20), 16, 3 }, /* SSI_SDATA8 */
+ { RCAR_GP_PIN(6, 21), 12, 3 }, /* SSI_SDATA9 */
+ { RCAR_GP_PIN(6, 22), 8, 3 }, /* AUDIO_CLKA */
+ { RCAR_GP_PIN(6, 23), 4, 3 }, /* AUDIO_CLKB */
+ { RCAR_GP_PIN(6, 24), 0, 3 }, /* USB0_PWEN */
+ } },
+ { PINMUX_DRIVE_REG("DRVCTRL24", 0xe6060360) {
+ { RCAR_GP_PIN(6, 25), 28, 3 }, /* USB0_OVC */
+ { RCAR_GP_PIN(6, 26), 24, 3 }, /* USB1_PWEN */
+ { RCAR_GP_PIN(6, 27), 20, 3 }, /* USB1_OVC */
+ { RCAR_GP_PIN(6, 28), 16, 3 }, /* USB30_PWEN */
+ { RCAR_GP_PIN(6, 29), 12, 3 }, /* USB30_OVC */
+ { RCAR_GP_PIN(6, 30), 8, 3 }, /* USB31_PWEN */
+ { RCAR_GP_PIN(6, 31), 4, 3 }, /* USB31_OVC */
+ } },
+ { },
+};
+
const struct sh_pfc_soc_info r8a7795_pinmux_info = {
.name = "r8a77950_pfc",
.unlock_reg = 0xe6060000, /* PMMR */
@@ -4578,6 +4779,7 @@ const struct sh_pfc_soc_info r8a7795_pinmux_info = {
.nr_functions = ARRAY_SIZE(pinmux_functions),
.cfg_regs = pinmux_config_regs,
+ .drive_regs = pinmux_drive_regs,
.pinmux_data = pinmux_data,
.pinmux_data_size = ARRAY_SIZE(pinmux_data),
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index 87b0a599a..fdb445d68 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -476,6 +476,91 @@ static const struct pinmux_ops sh_pfc_pinmux_ops = {
.gpio_set_direction = sh_pfc_gpio_set_direction,
};
+static u32 sh_pfc_pinconf_find_drive_strength_reg(struct sh_pfc *pfc,
+ unsigned int pin, unsigned int *offset, unsigned int *size)
+{
+ const struct pinmux_drive_reg_field *field;
+ const struct pinmux_drive_reg *reg;
+ unsigned int i;
+
+ for (reg = pfc->info->drive_regs; reg->reg; ++reg) {
+ for (i = 0; i < ARRAY_SIZE(reg->fields); ++i) {
+ field = &reg->fields[i];
+
+ if (field->size && field->pin == pin) {
+ *offset = field->offset;
+ *size = field->size;
+
+ return reg->reg;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
+ unsigned int pin)
+{
+ unsigned long flags;
+ unsigned int offset;
+ unsigned int size;
+ u32 reg;
+ u32 val;
+
+ reg = sh_pfc_pinconf_find_drive_strength_reg(pfc, pin, &offset, &size);
+ if (!reg)
+ return -EINVAL;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+ val = sh_pfc_read_reg(pfc, reg, 32);
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ val = (val >> offset) & GENMASK(size - 1, 0);
+
+ /* Convert the value to mA based on a full drive strength value of 24mA.
+ * We can make the full value configurable later if needed.
+ */
+ return (val + 1) * (size == 2 ? 6 : 3);
+}
+
+static int sh_pfc_pinconf_set_drive_strength(struct sh_pfc *pfc,
+ unsigned int pin, u16 strength)
+{
+ unsigned long flags;
+ unsigned int offset;
+ unsigned int size;
+ unsigned int step;
+ u32 reg;
+ u32 val;
+
+ reg = sh_pfc_pinconf_find_drive_strength_reg(pfc, pin, &offset, &size);
+ if (!reg)
+ return -EINVAL;
+
+ step = size == 2 ? 6 : 3;
+
+ if (strength < step || strength > 24)
+ return -EINVAL;
+
+ /* Convert the value from mA based on a full drive strength value of
+ * 24mA. We can make the full value configurable later if needed.
+ */
+ strength = strength / step - 1;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ val = sh_pfc_read_reg(pfc, reg, 32);
+ val &= ~GENMASK(offset + size - 1, offset);
+ val |= strength << offset;
+
+ sh_pfc_write_reg(pfc, reg, 32, val);
+
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ return 0;
+}
+
/* Check whether the requested parameter is supported for a pin. */
static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
enum pin_config_param param)
@@ -493,6 +578,9 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin,
case PIN_CONFIG_BIAS_PULL_DOWN:
return pin->configs & SH_PFC_PIN_CFG_PULL_DOWN;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ return pin->configs & SH_PFC_PIN_CFG_DRIVE_STRENGTH;
+
case PIN_CONFIG_POWER_SOURCE:
return pin->configs & SH_PFC_PIN_CFG_IO_VOLTAGE;
@@ -532,6 +620,17 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
break;
}
+ case PIN_CONFIG_DRIVE_STRENGTH: {
+ int ret;
+
+ ret = sh_pfc_pinconf_get_drive_strength(pfc, _pin);
+ if (ret < 0)
+ return ret;
+
+ *config = ret;
+ break;
+ }
+
case PIN_CONFIG_POWER_SOURCE: {
int ret;
@@ -584,6 +683,18 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
break;
+ case PIN_CONFIG_DRIVE_STRENGTH: {
+ unsigned int arg =
+ pinconf_to_config_argument(configs[i]);
+ int ret;
+
+ ret = sh_pfc_pinconf_set_drive_strength(pfc, _pin, arg);
+ if (ret < 0)
+ return ret;
+
+ break;
+ }
+
case PIN_CONFIG_POWER_SOURCE: {
unsigned int arg =
pinconf_to_config_argument(configs[i]);
@@ -678,7 +789,6 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
return -ENOMEM;
pmx->pfc = pfc;
- pfc->pinctrl = pmx;
ret = sh_pfc_map_pins(pfc, pmx);
if (ret < 0)
@@ -692,19 +802,9 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
pmx->pctl_desc.pins = pmx->pins;
pmx->pctl_desc.npins = pfc->info->nr_pins;
- pmx->pctl = pinctrl_register(&pmx->pctl_desc, pfc->dev, pmx);
+ pmx->pctl = devm_pinctrl_register(pfc->dev, &pmx->pctl_desc, pmx);
if (IS_ERR(pmx->pctl))
return PTR_ERR(pmx->pctl);
return 0;
}
-
-int sh_pfc_unregister_pinctrl(struct sh_pfc *pfc)
-{
- struct sh_pfc_pinctrl *pmx = pfc->pinctrl;
-
- pinctrl_unregister(pmx->pctl);
-
- pfc->pinctrl = NULL;
- return 0;
-}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index a490834e2..656ea32f7 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -28,6 +28,7 @@ enum {
#define SH_PFC_PIN_CFG_PULL_UP (1 << 2)
#define SH_PFC_PIN_CFG_PULL_DOWN (1 << 3)
#define SH_PFC_PIN_CFG_IO_VOLTAGE (1 << 4)
+#define SH_PFC_PIN_CFG_DRIVE_STRENGTH (1 << 5)
#define SH_PFC_PIN_CFG_NO_GPIO (1 << 31)
struct sh_pfc_pin {
@@ -131,6 +132,21 @@ struct pinmux_cfg_reg {
{ var_fw0, var_fwn, 0 }, \
.enum_ids = (const u16 [])
+struct pinmux_drive_reg_field {
+ u16 pin;
+ u8 offset;
+ u8 size;
+};
+
+struct pinmux_drive_reg {
+ u32 reg;
+ const struct pinmux_drive_reg_field fields[8];
+};
+
+#define PINMUX_DRIVE_REG(name, r) \
+ .reg = r, \
+ .fields =
+
struct pinmux_data_reg {
u32 reg;
u8 reg_width;
@@ -199,6 +215,7 @@ struct sh_pfc_soc_info {
#endif
const struct pinmux_cfg_reg *cfg_regs;
+ const struct pinmux_drive_reg *drive_regs;
const struct pinmux_data_reg *data_regs;
const u16 *pinmux_data;
@@ -276,7 +293,7 @@ struct sh_pfc_soc_info {
* - msel: Module selector
*/
#define PINMUX_IPSR_MSEL(ipsr, fn, msel) \
- PINMUX_DATA(fn##_MARK, FN_##msel, FN_##ipsr, FN_##fn)
+ PINMUX_DATA(fn##_MARK, FN_##msel, FN_##fn, FN_##ipsr)
/*
* Describe a pinmux configuration for a single-function pin with GPIO
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 3d233fc34..168c0f5d4 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -5798,7 +5798,7 @@ static void atlas7_gpio_handle_irq(struct irq_desc *desc)
status = readl(ATLAS7_GPIO_INT_STATUS(bank));
if (!status) {
- pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n",
+ pr_warn("%s: gpio [%s] status %#x no interrupt is flagged\n",
__func__, gc->label, status);
handle_bad_irq(desc);
return;
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 0afaf79a4..4db52ba38 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -395,7 +395,7 @@ int spear_pinctrl_probe(struct platform_device *pdev,
spear_pinctrl_desc.pins = machdata->pins;
spear_pinctrl_desc.npins = machdata->npins;
- pmx->pctl = pinctrl_register(&spear_pinctrl_desc, &pdev->dev, pmx);
+ pmx->pctl = devm_pinctrl_register(&pdev->dev, &spear_pinctrl_desc, pmx);
if (IS_ERR(pmx->pctl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pmx->pctl);
@@ -403,12 +403,3 @@ int spear_pinctrl_probe(struct platform_device *pdev,
return 0;
}
-
-int spear_pinctrl_remove(struct platform_device *pdev)
-{
- struct spear_pmx *pmx = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pmx->pctl);
-
- return 0;
-}
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 27c2cc8d8..aa5cf7032 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -197,7 +197,6 @@ void pmx_init_gpio_pingroup_addr(struct spear_gpio_pingroup *gpio_pingroup,
unsigned count, u16 reg);
int spear_pinctrl_probe(struct platform_device *pdev,
struct spear_pinctrl_machdata *machdata);
-int spear_pinctrl_remove(struct platform_device *pdev);
#define SPEAR_PIN_0_TO_101 \
PINCTRL_PIN(0, "PLGPIO0"), \
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index 92611bb75..18210681c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2704,18 +2704,12 @@ static int spear1310_pinctrl_probe(struct platform_device *pdev)
return spear_pinctrl_probe(pdev, &spear1310_machdata);
}
-static int spear1310_pinctrl_remove(struct platform_device *pdev)
-{
- return spear_pinctrl_remove(pdev);
-}
-
static struct platform_driver spear1310_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = spear1310_pinctrl_of_match,
},
.probe = spear1310_pinctrl_probe,
- .remove = spear1310_pinctrl_remove,
};
static int __init spear1310_pinctrl_init(void)
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index f842e9dc4..c01fb23ee 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2020,18 +2020,12 @@ static int spear1340_pinctrl_probe(struct platform_device *pdev)
return spear_pinctrl_probe(pdev, &spear1340_machdata);
}
-static int spear1340_pinctrl_remove(struct platform_device *pdev)
-{
- return spear_pinctrl_remove(pdev);
-}
-
static struct platform_driver spear1340_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = spear1340_pinctrl_of_match,
},
.probe = spear1340_pinctrl_probe,
- .remove = spear1340_pinctrl_remove,
};
static int __init spear1340_pinctrl_init(void)
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index d998a2ccf..111148daa 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -677,18 +677,12 @@ static int spear300_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int spear300_pinctrl_remove(struct platform_device *pdev)
-{
- return spear_pinctrl_remove(pdev);
-}
-
static struct platform_driver spear300_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = spear300_pinctrl_of_match,
},
.probe = spear300_pinctrl_probe,
- .remove = spear300_pinctrl_remove,
};
static int __init spear300_pinctrl_init(void)
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 609b18ace..a7b000062 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -400,18 +400,12 @@ static int spear310_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int spear310_pinctrl_remove(struct platform_device *pdev)
-{
- return spear_pinctrl_remove(pdev);
-}
-
static struct platform_driver spear310_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = spear310_pinctrl_of_match,
},
.probe = spear310_pinctrl_probe,
- .remove = spear310_pinctrl_remove,
};
static int __init spear310_pinctrl_init(void)
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index c07114431..e2b381770 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -3441,18 +3441,12 @@ static int spear320_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int spear320_pinctrl_remove(struct platform_device *pdev)
-{
- return spear_pinctrl_remove(pdev);
-}
-
static struct platform_driver spear320_pinctrl_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = spear320_pinctrl_of_match,
},
.probe = spear320_pinctrl_probe,
- .remove = spear320_pinctrl_remove,
};
static int __init spear320_pinctrl_init(void)
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 8deb566ed..ae9fab82a 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -358,7 +358,7 @@ static int stm32_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = stm32_pctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
- pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_free_map(pctldev, *map, *num_maps);
return ret;
}
}
@@ -396,7 +396,7 @@ static int stm32_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
static const struct pinctrl_ops stm32_pctrl_ops = {
.dt_node_to_map = stm32_pctrl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
.get_groups_count = stm32_pctrl_get_groups_count,
.get_group_name = stm32_pctrl_get_group_name,
.get_group_pins = stm32_pctrl_get_group_pins,
@@ -454,6 +454,29 @@ static void stm32_pmx_set_mode(struct stm32_gpio_bank *bank,
clk_disable(bank->clk);
}
+static void stm32_pmx_get_mode(struct stm32_gpio_bank *bank,
+ int pin, u32 *mode, u32 *alt)
+{
+ u32 val;
+ int alt_shift = (pin % 8) * 4;
+ int alt_offset = STM32_GPIO_AFRL + (pin / 8) * 4;
+ unsigned long flags;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl_relaxed(bank->base + alt_offset);
+ val &= GENMASK(alt_shift + 3, alt_shift);
+ *alt = val >> alt_shift;
+
+ val = readl_relaxed(bank->base + STM32_GPIO_MODER);
+ val &= GENMASK(pin * 2 + 1, pin * 2);
+ *mode = val >> (pin * 2);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+}
+
static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev,
unsigned function,
unsigned group)
@@ -525,6 +548,24 @@ static void stm32_pconf_set_driving(struct stm32_gpio_bank *bank,
clk_disable(bank->clk);
}
+static u32 stm32_pconf_get_driving(struct stm32_gpio_bank *bank,
+ unsigned int offset)
+{
+ unsigned long flags;
+ u32 val;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl_relaxed(bank->base + STM32_GPIO_TYPER);
+ val &= BIT(offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+
+ return (val >> offset);
+}
+
static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
unsigned offset, u32 speed)
{
@@ -543,6 +584,24 @@ static void stm32_pconf_set_speed(struct stm32_gpio_bank *bank,
clk_disable(bank->clk);
}
+static u32 stm32_pconf_get_speed(struct stm32_gpio_bank *bank,
+ unsigned int offset)
+{
+ unsigned long flags;
+ u32 val;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl_relaxed(bank->base + STM32_GPIO_SPEEDR);
+ val &= GENMASK(offset * 2 + 1, offset * 2);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+
+ return (val >> (offset * 2));
+}
+
static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
unsigned offset, u32 bias)
{
@@ -561,6 +620,57 @@ static void stm32_pconf_set_bias(struct stm32_gpio_bank *bank,
clk_disable(bank->clk);
}
+static u32 stm32_pconf_get_bias(struct stm32_gpio_bank *bank,
+ unsigned int offset)
+{
+ unsigned long flags;
+ u32 val;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl_relaxed(bank->base + STM32_GPIO_PUPDR);
+ val &= GENMASK(offset * 2 + 1, offset * 2);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+
+ return (val >> (offset * 2));
+}
+
+static bool stm32_pconf_input_get(struct stm32_gpio_bank *bank,
+ unsigned int offset)
+{
+ unsigned long flags;
+ u32 val;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+
+ return val;
+}
+
+static bool stm32_pconf_output_get(struct stm32_gpio_bank *bank,
+ unsigned int offset)
+{
+ unsigned long flags;
+ u32 val;
+
+ clk_enable(bank->clk);
+ spin_lock_irqsave(&bank->lock, flags);
+ val = !!(readl_relaxed(bank->base + STM32_GPIO_ODR) & BIT(offset));
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+ clk_disable(bank->clk);
+
+ return val;
+}
+
static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev,
unsigned int pin, enum pin_config_param param,
enum pin_config_param arg)
@@ -634,9 +744,73 @@ static int stm32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
return 0;
}
+static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned int pin)
+{
+ struct pinctrl_gpio_range *range;
+ struct stm32_gpio_bank *bank;
+ int offset;
+ u32 mode, alt, drive, speed, bias;
+ static const char * const modes[] = {
+ "input", "output", "alternate", "analog" };
+ static const char * const speeds[] = {
+ "low", "medium", "high", "very high" };
+ static const char * const biasing[] = {
+ "floating", "pull up", "pull down", "" };
+ bool val;
+
+ range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin);
+ bank = gpio_range_to_bank(range);
+ offset = stm32_gpio_pin(pin);
+
+ stm32_pmx_get_mode(bank, offset, &mode, &alt);
+ bias = stm32_pconf_get_bias(bank, offset);
+
+ seq_printf(s, "%s ", modes[mode]);
+
+ switch (mode) {
+ /* input */
+ case 0:
+ val = stm32_pconf_input_get(bank, offset);
+ seq_printf(s, "- %s - %s",
+ val ? "high" : "low",
+ biasing[bias]);
+ break;
+
+ /* output */
+ case 1:
+ drive = stm32_pconf_get_driving(bank, offset);
+ speed = stm32_pconf_get_speed(bank, offset);
+ val = stm32_pconf_output_get(bank, offset);
+ seq_printf(s, "- %s - %s - %s - %s %s",
+ val ? "high" : "low",
+ drive ? "open drain" : "push pull",
+ biasing[bias],
+ speeds[speed], "speed");
+ break;
+
+ /* alternate */
+ case 2:
+ drive = stm32_pconf_get_driving(bank, offset);
+ speed = stm32_pconf_get_speed(bank, offset);
+ seq_printf(s, "%d - %s - %s - %s %s", alt,
+ drive ? "open drain" : "push pull",
+ biasing[bias],
+ speeds[speed], "speed");
+ break;
+
+ /* analog */
+ case 3:
+ break;
+ }
+}
+
+
static const struct pinconf_ops stm32_pconf_ops = {
.pin_config_group_get = stm32_pconf_group_get,
.pin_config_group_set = stm32_pconf_group_set,
+ .pin_config_dbg_show = stm32_pconf_dbg_show,
};
static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
@@ -813,10 +987,11 @@ int stm32_pctl_probe(struct platform_device *pdev)
pctl->pctl_desc.pmxops = &stm32_pmx_ops;
pctl->dev = &pdev->dev;
- pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
- if (!pctl->pctl_dev) {
+ pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, &pctl->pctl_desc,
+ pctl);
+ if (IS_ERR(pctl->pctl_dev)) {
dev_err(&pdev->dev, "Failed pinctrl registration\n");
- return -EINVAL;
+ return PTR_ERR(pctl->pctl_dev);
}
for (i = 0; i < pctl->nbanks; i++)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 3b017dbd2..54455af56 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -933,18 +933,15 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
pctrl_desc->pctlops = &sunxi_pctrl_ops;
pctrl_desc->pmxops = &sunxi_pmx_ops;
- pctl->pctl_dev = pinctrl_register(pctrl_desc,
- &pdev->dev, pctl);
+ pctl->pctl_dev = devm_pinctrl_register(&pdev->dev, pctrl_desc, pctl);
if (IS_ERR(pctl->pctl_dev)) {
dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
return PTR_ERR(pctl->pctl_dev);
}
pctl->chip = devm_kzalloc(&pdev->dev, sizeof(*pctl->chip), GFP_KERNEL);
- if (!pctl->chip) {
- ret = -ENOMEM;
- goto pinctrl_error;
- }
+ if (!pctl->chip)
+ return -ENOMEM;
last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number;
pctl->chip->owner = THIS_MODULE;
@@ -966,7 +963,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
ret = gpiochip_add_data(pctl->chip, pctl);
if (ret)
- goto pinctrl_error;
+ return ret;
for (i = 0; i < pctl->desc->npins; i++) {
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
@@ -1044,7 +1041,5 @@ clk_error:
clk_disable_unprepare(clk);
gpiochip_error:
gpiochip_remove(pctl->chip);
-pinctrl_error:
- pinctrl_unregister(pctl->pctl_dev);
return ret;
}
diff --git a/drivers/pinctrl/tegra/Makefile b/drivers/pinctrl/tegra/Makefile
index a927379b6..d9ea2be69 100644
--- a/drivers/pinctrl/tegra/Makefile
+++ b/drivers/pinctrl/tegra/Makefile
@@ -1,4 +1,4 @@
-obj-y += pinctrl-tegra.o
+obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
index 2f06029c9..6f68a9eeb 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra-xusb.c
@@ -267,7 +267,7 @@ static const struct pinctrl_ops tegra_xusb_padctl_pinctrl_ops = {
.get_group_name = tegra_xusb_padctl_get_group_name,
.get_group_pins = tegra_xusb_padctl_get_group_pins,
.dt_node_to_map = tegra_xusb_padctl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int tegra_xusb_padctl_get_functions_count(struct pinctrl_dev *pinctrl)
@@ -873,7 +873,7 @@ static const struct of_device_id tegra_xusb_padctl_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
-static int tegra_xusb_padctl_probe(struct platform_device *pdev)
+int tegra_xusb_padctl_legacy_probe(struct platform_device *pdev)
{
struct tegra_xusb_padctl *padctl;
const struct of_device_id *match;
@@ -914,7 +914,8 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
padctl->desc.confops = &tegra_xusb_padctl_pinconf_ops;
padctl->desc.owner = THIS_MODULE;
- padctl->pinctrl = pinctrl_register(&padctl->desc, &pdev->dev, padctl);
+ padctl->pinctrl = devm_pinctrl_register(&pdev->dev, &padctl->desc,
+ padctl);
if (IS_ERR(padctl->pinctrl)) {
dev_err(&pdev->dev, "failed to register pincontrol\n");
err = PTR_ERR(padctl->pinctrl);
@@ -924,7 +925,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
phy = devm_phy_create(&pdev->dev, NULL, &pcie_phy_ops);
if (IS_ERR(phy)) {
err = PTR_ERR(phy);
- goto unregister;
+ goto reset;
}
padctl->phys[TEGRA_XUSB_PADCTL_PCIE] = phy;
@@ -933,7 +934,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
phy = devm_phy_create(&pdev->dev, NULL, &sata_phy_ops);
if (IS_ERR(phy)) {
err = PTR_ERR(phy);
- goto unregister;
+ goto reset;
}
padctl->phys[TEGRA_XUSB_PADCTL_SATA] = phy;
@@ -944,42 +945,26 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev)
if (IS_ERR(padctl->provider)) {
err = PTR_ERR(padctl->provider);
dev_err(&pdev->dev, "failed to register PHYs: %d\n", err);
- goto unregister;
+ goto reset;
}
return 0;
-unregister:
- pinctrl_unregister(padctl->pinctrl);
reset:
reset_control_assert(padctl->rst);
return err;
}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_legacy_probe);
-static int tegra_xusb_padctl_remove(struct platform_device *pdev)
+int tegra_xusb_padctl_legacy_remove(struct platform_device *pdev)
{
struct tegra_xusb_padctl *padctl = platform_get_drvdata(pdev);
int err;
- pinctrl_unregister(padctl->pinctrl);
-
err = reset_control_assert(padctl->rst);
if (err < 0)
dev_err(&pdev->dev, "failed to assert reset: %d\n", err);
return err;
}
-
-static struct platform_driver tegra_xusb_padctl_driver = {
- .driver = {
- .name = "tegra-xusb-padctl",
- .of_match_table = tegra_xusb_padctl_of_match,
- },
- .probe = tegra_xusb_padctl_probe,
- .remove = tegra_xusb_padctl_remove,
-};
-module_platform_driver(tegra_xusb_padctl_driver);
-
-MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
-MODULE_DESCRIPTION("Tegra 124 XUSB Pad Control driver");
-MODULE_LICENSE("GPL v2");
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_legacy_remove);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 49388822c..6e82b290c 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -215,7 +215,7 @@ static int tegra_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
ret = tegra_pinctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
- pinctrl_utils_dt_free_map(pctldev, *map,
+ pinctrl_utils_free_map(pctldev, *map,
*num_maps);
of_node_put(np);
return ret;
@@ -233,7 +233,7 @@ static const struct pinctrl_ops tegra_pinctrl_ops = {
.pin_dbg_show = tegra_pinctrl_pin_dbg_show,
#endif
.dt_node_to_map = tegra_pinctrl_dt_node_to_map,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int tegra_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
@@ -417,7 +417,7 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
return -ENOTSUPP;
}
- if (*reg < 0 || *bit > 31) {
+ if (*reg < 0 || *bit < 0) {
if (report_err) {
const char *prop = "unknown";
int i;
@@ -625,6 +625,22 @@ static struct pinctrl_desc tegra_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static void tegra_pinctrl_clear_parked_bits(struct tegra_pmx *pmx)
+{
+ int i = 0;
+ const struct tegra_pingroup *g;
+ u32 val;
+
+ for (i = 0; i < pmx->soc->ngroups; ++i) {
+ if (pmx->soc->groups[i].parked_reg >= 0) {
+ g = &pmx->soc->groups[i];
+ val = pmx_readl(pmx, g->parked_bank, g->parked_reg);
+ val &= ~(1 << g->parked_bit);
+ pmx_writel(pmx, val, g->parked_bank, g->parked_reg);
+ }
+ }
+}
+
static bool gpio_node_has_range(void)
{
struct device_node *np;
@@ -719,12 +735,14 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
return PTR_ERR(pmx->regs[i]);
}
- pmx->pctl = pinctrl_register(&tegra_pinctrl_desc, &pdev->dev, pmx);
+ pmx->pctl = devm_pinctrl_register(&pdev->dev, &tegra_pinctrl_desc, pmx);
if (IS_ERR(pmx->pctl)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
return PTR_ERR(pmx->pctl);
}
+ tegra_pinctrl_clear_parked_bits(pmx);
+
if (!gpio_node_has_range())
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
@@ -735,13 +753,3 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
return 0;
}
EXPORT_SYMBOL_GPL(tegra_pinctrl_probe);
-
-int tegra_pinctrl_remove(struct platform_device *pdev)
-{
- struct tegra_pmx *pmx = platform_get_drvdata(pdev);
-
- pinctrl_unregister(pmx->pctl);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(tegra_pinctrl_remove);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h
index 1615db7e3..d2ced1738 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.h
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.h
@@ -93,6 +93,9 @@ struct tegra_function {
* @tri_reg: Tri-state register offset.
* @tri_bank: Tri-state register bank.
* @tri_bit: Tri-state register bit.
+ * @parked_reg: Parked register offset. -1 if unsupported.
+ * @parked_bank: Parked register bank. 0 if unsupported.
+ * @parked_bit: Parked register bit. 0 if unsupported.
* @einput_bit: Enable-input register bit.
* @odrain_bit: Open-drain register bit.
* @lock_bit: Lock register bit.
@@ -135,13 +138,16 @@ struct tegra_pingroup {
s16 pupd_reg;
s16 tri_reg;
s16 drv_reg;
+ s16 parked_reg;
u32 mux_bank:2;
u32 pupd_bank:2;
u32 tri_bank:2;
u32 drv_bank:2;
+ u32 parked_bank:2;
s32 mux_bit:6;
s32 pupd_bit:6;
s32 tri_bit:6;
+ s32 parked_bit:6;
s32 einput_bit:6;
s32 odrain_bit:6;
s32 lock_bit:6;
@@ -189,6 +195,4 @@ struct tegra_pinctrl_soc_data {
int tegra_pinctrl_probe(struct platform_device *pdev,
const struct tegra_pinctrl_soc_data *soc_data);
-int tegra_pinctrl_remove(struct platform_device *pdev);
-
#endif
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra114.c b/drivers/pinctrl/tegra/pinctrl-tegra114.c
index 05e49d513..4851d169f 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra114.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra114.c
@@ -1578,6 +1578,7 @@ static struct tegra_function tegra114_functions[] = {
.lock_bit = 7, \
.ioreset_bit = PINGROUP_BIT_##ior(8), \
.rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9), \
+ .parked_reg = -1, \
.drv_reg = -1, \
}
@@ -1598,6 +1599,7 @@ static struct tegra_function tegra114_functions[] = {
.rcv_sel_bit = -1, \
.drv_reg = DRV_PINGROUP_REG(r), \
.drv_bank = 0, \
+ .parked_reg = -1, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
@@ -1863,7 +1865,6 @@ static struct platform_driver tegra114_pinctrl_driver = {
.of_match_table = tegra114_pinctrl_of_match,
},
.probe = tegra114_pinctrl_probe,
- .remove = tegra_pinctrl_remove,
};
module_platform_driver(tegra114_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra124.c b/drivers/pinctrl/tegra/pinctrl-tegra124.c
index 7cd44c7c2..a0ce723a9 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra124.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra124.c
@@ -1747,6 +1747,7 @@ static struct tegra_function tegra124_functions[] = {
.lock_bit = 7, \
.ioreset_bit = PINGROUP_BIT_##ior(8), \
.rcv_sel_bit = PINGROUP_BIT_##rcv_sel(9), \
+ .parked_reg = -1, \
.drv_reg = -1, \
}
@@ -1767,6 +1768,7 @@ static struct tegra_function tegra124_functions[] = {
.rcv_sel_bit = -1, \
.drv_reg = DRV_PINGROUP_REG(r), \
.drv_bank = 0, \
+ .parked_reg = -1, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
@@ -2075,7 +2077,6 @@ static struct platform_driver tegra124_pinctrl_driver = {
.of_match_table = tegra124_pinctrl_of_match,
},
.probe = tegra124_pinctrl_probe,
- .remove = tegra_pinctrl_remove,
};
module_platform_driver(tegra124_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra20.c b/drivers/pinctrl/tegra/pinctrl-tegra20.c
index 4833db443..09bad6980 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra20.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra20.c
@@ -1994,6 +1994,7 @@ static struct tegra_function tegra20_functions[] = {
.tri_reg = ((tri_r) - TRISTATE_REG_A), \
.tri_bank = 0, \
.tri_bit = tri_b, \
+ .parked_reg = -1, \
.einput_bit = -1, \
.odrain_bit = -1, \
.lock_bit = -1, \
@@ -2013,6 +2014,7 @@ static struct tegra_function tegra20_functions[] = {
.pupd_bank = 2, \
.pupd_bit = pupd_b, \
.drv_reg = -1, \
+ .parked_reg = -1, \
}
/* Pin groups for drive strength registers (configurable version) */
@@ -2028,6 +2030,7 @@ static struct tegra_function tegra20_functions[] = {
.tri_reg = -1, \
.drv_reg = ((r) - PINGROUP_REG_A), \
.drv_bank = 3, \
+ .parked_reg = -1, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
@@ -2242,7 +2245,6 @@ static struct platform_driver tegra20_pinctrl_driver = {
.of_match_table = tegra20_pinctrl_of_match,
},
.probe = tegra20_pinctrl_probe,
- .remove = tegra_pinctrl_remove,
};
module_platform_driver(tegra20_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra210.c b/drivers/pinctrl/tegra/pinctrl-tegra210.c
index 252b46490..2d856af38 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra210.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra210.c
@@ -1310,6 +1310,9 @@ static struct tegra_function tegra210_functions[] = {
.lock_bit = 7, \
.ioreset_bit = -1, \
.rcv_sel_bit = PINGROUP_BIT_##e_io_hv(10), \
+ .parked_reg = PINGROUP_REG(r), \
+ .parked_bank = 1, \
+ .parked_bit = 5, \
.hsm_bit = PINGROUP_BIT_##hsm(9), \
.schmitt_bit = 12, \
.drvtype_bit = PINGROUP_BIT_##drvtype(13), \
@@ -1342,6 +1345,7 @@ static struct tegra_function tegra210_functions[] = {
.rcv_sel_bit = -1, \
.drv_reg = DRV_PINGROUP_REG(r), \
.drv_bank = 0, \
+ .parked_reg = -1, \
.hsm_bit = -1, \
.schmitt_bit = -1, \
.lpmd_bit = -1, \
@@ -1579,7 +1583,6 @@ static struct platform_driver tegra210_pinctrl_driver = {
.of_match_table = tegra210_pinctrl_of_match,
},
.probe = tegra210_pinctrl_probe,
- .remove = tegra_pinctrl_remove,
};
module_platform_driver(tegra210_pinctrl_driver);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra30.c b/drivers/pinctrl/tegra/pinctrl-tegra30.c
index 47b2fd8bb..fb7817fea 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra30.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra30.c
@@ -2139,6 +2139,7 @@ static struct tegra_function tegra30_functions[] = {
.lock_bit = 7, \
.ioreset_bit = PINGROUP_BIT_##ior(8), \
.rcv_sel_bit = -1, \
+ .parked_reg = -1, \
.drv_reg = -1, \
}
@@ -2159,6 +2160,7 @@ static struct tegra_function tegra30_functions[] = {
.rcv_sel_bit = -1, \
.drv_reg = DRV_PINGROUP_REG(r), \
.drv_bank = 0, \
+ .parked_reg = -1, \
.hsm_bit = hsm_b, \
.schmitt_bit = schmitt_b, \
.lpmd_bit = lpmd_b, \
@@ -2498,7 +2500,6 @@ static struct platform_driver tegra30_pinctrl_driver = {
.of_match_table = tegra30_pinctrl_of_match,
},
.probe = tegra30_pinctrl_probe,
- .remove = tegra_pinctrl_remove,
};
module_platform_driver(tegra30_pinctrl_driver);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
index 589872cc8..967400971 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -115,7 +115,7 @@ static const struct pinctrl_ops uniphier_pctlops = {
.pin_dbg_show = uniphier_pctl_pin_dbg_show,
#endif
.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
- .dt_free_map = pinctrl_utils_dt_free_map,
+ .dt_free_map = pinctrl_utils_free_map,
};
static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
@@ -665,7 +665,7 @@ int uniphier_pinctrl_probe(struct platform_device *pdev,
desc->pmxops = &uniphier_pmxops;
desc->confops = &uniphier_confops;
- priv->pctldev = pinctrl_register(desc, dev, priv);
+ priv->pctldev = devm_pinctrl_register(dev, desc, priv);
if (IS_ERR(priv->pctldev)) {
dev_err(dev, "failed to register UniPhier pinctrl driver\n");
return PTR_ERR(priv->pctldev);
@@ -676,13 +676,3 @@ int uniphier_pinctrl_probe(struct platform_device *pdev,
return 0;
}
EXPORT_SYMBOL_GPL(uniphier_pinctrl_probe);
-
-int uniphier_pinctrl_remove(struct platform_device *pdev)
-{
- struct uniphier_pinctrl_priv *priv = platform_get_drvdata(pdev);
-
- pinctrl_unregister(priv->pctldev);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(uniphier_pinctrl_remove);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
index a7056dccf..4a0439c80 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c
@@ -878,7 +878,6 @@ MODULE_DEVICE_TABLE(of, ph1_ld4_pinctrl_match);
static struct platform_driver ph1_ld4_pinctrl_driver = {
.probe = ph1_ld4_pinctrl_probe,
- .remove = uniphier_pinctrl_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ph1_ld4_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
index 1824831bb..150d33928 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c
@@ -1266,7 +1266,6 @@ MODULE_DEVICE_TABLE(of, ph1_ld6b_pinctrl_match);
static struct platform_driver ph1_ld6b_pinctrl_driver = {
.probe = ph1_ld6b_pinctrl_probe,
- .remove = uniphier_pinctrl_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ph1_ld6b_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
index ec8e92dfa..b1f09e68f 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c
@@ -1552,7 +1552,6 @@ MODULE_DEVICE_TABLE(of, ph1_pro4_pinctrl_match);
static struct platform_driver ph1_pro4_pinctrl_driver = {
.probe = ph1_pro4_pinctrl_probe,
- .remove = uniphier_pinctrl_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ph1_pro4_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
index e3d648eae..3087f7675 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c
@@ -1343,7 +1343,6 @@ MODULE_DEVICE_TABLE(of, ph1_pro5_pinctrl_match);
static struct platform_driver ph1_pro5_pinctrl_driver = {
.probe = ph1_pro5_pinctrl_probe,
- .remove = uniphier_pinctrl_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ph1_pro5_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
index bc00d7591..e868030ff 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c
@@ -1261,7 +1261,6 @@ MODULE_DEVICE_TABLE(of, proxstream2_pinctrl_match);
static struct platform_driver proxstream2_pinctrl_driver = {
.probe = proxstream2_pinctrl_probe,
- .remove = uniphier_pinctrl_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = proxstream2_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
index c3700a33a..ceb7a9899 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c
@@ -786,7 +786,6 @@ MODULE_DEVICE_TABLE(of, ph1_sld8_pinctrl_match);
static struct platform_driver ph1_sld8_pinctrl_driver = {
.probe = ph1_sld8_pinctrl_probe,
- .remove = uniphier_pinctrl_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = ph1_sld8_pinctrl_match,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
index e1e98b868..a21154f4b 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier.h
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
@@ -212,6 +212,4 @@ int uniphier_pinctrl_probe(struct platform_device *pdev,
struct pinctrl_desc *desc,
struct uniphier_pinctrl_socdata *socdata);
-int uniphier_pinctrl_remove(struct platform_device *pdev);
-
#endif /* __PINCTRL_UNIPHIER_H__ */
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 5c261bf55..cbc638631 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -583,7 +583,7 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
data->dev = &pdev->dev;
- data->pctl_dev = pinctrl_register(&wmt_desc, &pdev->dev, data);
+ data->pctl_dev = devm_pinctrl_register(&pdev->dev, &wmt_desc, data);
if (IS_ERR(data->pctl_dev)) {
dev_err(&pdev->dev, "Failed to register pinctrl\n");
return PTR_ERR(data->pctl_dev);
@@ -592,7 +592,7 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
err = gpiochip_add_data(&data->gpio_chip, data);
if (err) {
dev_err(&pdev->dev, "could not add GPIO chip\n");
- goto fail_gpio;
+ return err;
}
err = gpiochip_add_pin_range(&data->gpio_chip, dev_name(data->dev),
@@ -606,8 +606,6 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
fail_range:
gpiochip_remove(&data->gpio_chip);
-fail_gpio:
- pinctrl_unregister(data->pctl_dev);
return err;
}
@@ -616,7 +614,6 @@ int wmt_pinctrl_remove(struct platform_device *pdev)
struct wmt_pinctrl_data *data = platform_get_drvdata(pdev);
gpiochip_remove(&data->gpio_chip);
- pinctrl_unregister(data->pctl_dev);
return 0;
}
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index d03df4a60..76bdae1a9 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -64,4 +64,14 @@ config CROS_EC_PROTO
help
ChromeOS EC communication protocol helpers.
+config CROS_KBD_LED_BACKLIGHT
+ tristate "Backlight LED support for Chrome OS keyboards"
+ depends on LEDS_CLASS && ACPI
+ help
+ This option enables support for the keyboard backlight LEDs on
+ select Chrome OS systems.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_kbd_led_backlight.
+
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index bc498bda8..4f3462783 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,8 +1,9 @@
-obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
-obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
-cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
- cros_ec_lightbar.o cros_ec_vbc.o
-obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
-obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o
-obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o
+obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
+cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
+ cros_ec_lightbar.o cros_ec_vbc.o
+obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o
+obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o
+obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 2b441e9ae..e8a44a9bc 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -34,6 +34,7 @@
#define ATMEL_TS_I2C_ADDR 0x4a
#define ATMEL_TS_I2C_BL_ADDR 0x26
#define CYAPA_TP_I2C_ADDR 0x67
+#define ELAN_TP_I2C_ADDR 0x15
#define ISL_ALS_I2C_ADDR 0x44
#define TAOS_ALS_I2C_ADDR 0x29
@@ -73,7 +74,7 @@ struct i2c_peripheral {
int tries;
};
-#define MAX_I2C_PERIPHERALS 3
+#define MAX_I2C_PERIPHERALS 4
struct chromeos_laptop {
struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
@@ -86,6 +87,11 @@ static struct i2c_board_info cyapa_device = {
.flags = I2C_CLIENT_WAKE,
};
+static struct i2c_board_info elantech_device = {
+ I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+};
+
static struct i2c_board_info isl_als_device = {
I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
};
@@ -306,6 +312,16 @@ static int setup_atmel_224s_tp(enum i2c_adapter_type type)
return (!tp) ? -EAGAIN : 0;
}
+static int setup_elantech_tp(enum i2c_adapter_type type)
+{
+ if (tp)
+ return 0;
+
+ /* add elantech touchpad */
+ tp = add_i2c_device("trackpad", type, &elantech_device);
+ return (!tp) ? -EAGAIN : 0;
+}
+
static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
@@ -445,6 +461,8 @@ static struct chromeos_laptop dell_chromebook_11 = {
.i2c_peripherals = {
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+ /* Elan Touchpad option. */
+ { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
},
};
@@ -475,6 +493,8 @@ static struct chromeos_laptop acer_c720 = {
{ .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+ /* Elan Touchpad option. */
+ { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
/* Light Sensor. */
{ .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 },
},
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
index 34749200e..308a853ac 100644
--- a/drivers/platform/chrome/chromeos_pstore.c
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -8,6 +8,7 @@
* the Free Software Foundation, version 2 of the License.
*/
+#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -58,7 +59,7 @@ MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
static struct ramoops_platform_data chromeos_ramoops_data = {
.mem_size = 0x100000,
.mem_address = 0xf00000,
- .record_size = 0x20000,
+ .record_size = 0x40000,
.console_size = 0x20000,
.ftrace_size = 0x20000,
.dump_oops = 1,
@@ -71,9 +72,59 @@ static struct platform_device chromeos_ramoops = {
},
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ramoops_acpi_match[] = {
+ { "GOOG9999", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ramoops_acpi_match);
+
+static struct platform_driver chromeos_ramoops_acpi = {
+ .driver = {
+ .name = "chromeos_pstore",
+ .acpi_match_table = ACPI_PTR(cros_ramoops_acpi_match),
+ },
+};
+
+static int __init chromeos_probe_acpi(struct platform_device *pdev)
+{
+ struct resource *res;
+ resource_size_t len;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ len = resource_size(res);
+ if (!res->start || !len)
+ return -ENOMEM;
+
+ pr_info("chromeos ramoops using acpi device.\n");
+
+ chromeos_ramoops_data.mem_size = len;
+ chromeos_ramoops_data.mem_address = res->start;
+
+ return 0;
+}
+
+static bool __init chromeos_check_acpi(void)
+{
+ if (!platform_driver_probe(&chromeos_ramoops_acpi, chromeos_probe_acpi))
+ return true;
+ return false;
+}
+#else
+static inline bool chromeos_check_acpi(void) { return false; }
+#endif
+
static int __init chromeos_pstore_init(void)
{
- if (dmi_check_system(chromeos_pstore_dmi_table))
+ bool acpi_dev_found;
+
+ /* First check ACPI for non-hardcoded values from firmware. */
+ acpi_dev_found = chromeos_check_acpi();
+
+ if (acpi_dev_found || dmi_check_system(chromeos_pstore_dmi_table))
return platform_device_register(&chromeos_ramoops);
return -ENODEV;
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index d45cd254e..8abd80dbc 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -137,6 +137,10 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
return -EFAULT;
+ if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
+ (u_cmd.insize > EC_MAX_MSG_BYTES))
+ return -EINVAL;
+
s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
@@ -147,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
goto exit;
}
+ if (u_cmd.outsize != s_cmd->outsize ||
+ u_cmd.insize != s_cmd->insize) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
s_cmd->command += ec->cmd_offset;
ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
- if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+ if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
ret = -EFAULT;
exit:
kfree(s_cmd);
@@ -208,6 +218,9 @@ static const struct file_operations fops = {
.release = ec_device_release,
.read = ec_device_read,
.unlocked_ioctl = ec_device_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ec_device_ioctl,
+#endif
};
static void __remove(struct device *dev)
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index ff7640575..8df3d447c 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -412,9 +412,13 @@ static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct cros_ec_dev *ec = container_of(dev,
struct cros_ec_dev, class_dev);
- struct platform_device *pdev = container_of(ec->dev,
- struct platform_device, dev);
- if (pdev->id != 0)
+ struct platform_device *pdev = to_platform_device(ec->dev);
+ struct cros_ec_platform *pdata = pdev->dev.platform_data;
+ int is_cros_ec;
+
+ is_cros_ec = strcmp(pdata->ec_name, CROS_EC_DEV_NAME);
+
+ if (is_cros_ec != 0)
return 0;
/* Only instantiate this stuff if the EC has a lightbar */
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 990308ca3..b6e161f71 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -298,8 +298,8 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
ec_dev->max_passthru = 0;
ec_dev->pkt_xfer = NULL;
- ec_dev->din_size = EC_MSG_BYTES;
- ec_dev->dout_size = EC_MSG_BYTES;
+ ec_dev->din_size = EC_PROTO2_MSG_BYTES;
+ ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
} else {
/*
* It's possible for a test to occur too early when
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
new file mode 100644
index 000000000..ca3e4da85
--- /dev/null
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -0,0 +1,122 @@
+/*
+ * Keyboard backlight LED driver for Chrome OS.
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/leds.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Keyboard LED ACPI Device must be defined in firmware */
+#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT"
+#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
+#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
+
+#define ACPI_KEYBOARD_BACKLIGHT_MAX 100
+
+static void keyboard_led_set_brightness(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ union acpi_object param;
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = brightness;
+ input.count = 1;
+ input.pointer = &param;
+
+ status = acpi_evaluate_object(NULL, ACPI_KEYBOARD_BACKLIGHT_WRITE,
+ &input, NULL);
+ if (ACPI_FAILURE(status))
+ dev_err(cdev->dev, "Error setting keyboard LED value: %d\n",
+ status);
+}
+
+static enum led_brightness
+keyboard_led_get_brightness(struct led_classdev *cdev)
+{
+ unsigned long long brightness;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(NULL, ACPI_KEYBOARD_BACKLIGHT_READ,
+ NULL, &brightness);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cdev->dev, "Error getting keyboard LED value: %d\n",
+ status);
+ return -EIO;
+ }
+
+ return brightness;
+}
+
+static int keyboard_led_probe(struct platform_device *pdev)
+{
+ struct led_classdev *cdev;
+ acpi_handle handle;
+ acpi_status status;
+ int error;
+
+ /* Look for the keyboard LED ACPI Device */
+ status = acpi_get_handle(ACPI_ROOT_OBJECT,
+ ACPI_KEYBOARD_BACKLIGHT_DEVICE,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev, "Unable to find ACPI device %s: %d\n",
+ ACPI_KEYBOARD_BACKLIGHT_DEVICE, status);
+ return -ENXIO;
+ }
+
+ cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->name = "chromeos::kbd_backlight";
+ cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX;
+ cdev->flags |= LED_CORE_SUSPENDRESUME;
+ cdev->brightness_set = keyboard_led_set_brightness;
+ cdev->brightness_get = keyboard_led_get_brightness;
+
+ error = devm_led_classdev_register(&pdev->dev, cdev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct acpi_device_id keyboard_led_id[] = {
+ { "GOOG0002", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, keyboard_led_id);
+
+static struct platform_driver keyboard_led_driver = {
+ .driver = {
+ .name = "chromeos-keyboard-leds",
+ .acpi_match_table = ACPI_PTR(keyboard_led_id),
+ },
+ .probe = keyboard_led_probe,
+};
+module_platform_driver(keyboard_led_driver);
+
+MODULE_AUTHOR("Simon Que <sque@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:chromeos-keyboard-leds");
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 125e56901..b3ae30a4c 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -15,10 +15,6 @@ menuconfig MIPS_PLATFORM_DEVICES
if MIPS_PLATFORM_DEVICES
-config MIPS_ACPI
- bool
- default y if LOONGSON_MACH3X
-
config CPU_HWMON
tristate "Loongson CPU HWMon Driver"
depends on LOONGSON_MACH3X
diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile
index 43412849b..8dfd03924 100644
--- a/drivers/platform/mips/Makefile
+++ b/drivers/platform/mips/Makefile
@@ -1,2 +1 @@
-obj-$(CONFIG_MIPS_ACPI) += acpi_init.o
obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o
diff --git a/drivers/platform/mips/acpi_init.c b/drivers/platform/mips/acpi_init.c
deleted file mode 100644
index dbdad79ea..000000000
--- a/drivers/platform/mips/acpi_init.c
+++ /dev/null
@@ -1,150 +0,0 @@
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/export.h>
-
-#define SBX00_ACPI_IO_BASE 0x800
-#define SBX00_ACPI_IO_SIZE 0x100
-
-#define ACPI_PM_EVT_BLK (SBX00_ACPI_IO_BASE + 0x00) /* 4 bytes */
-#define ACPI_PM_CNT_BLK (SBX00_ACPI_IO_BASE + 0x04) /* 2 bytes */
-#define ACPI_PMA_CNT_BLK (SBX00_ACPI_IO_BASE + 0x0F) /* 1 byte */
-#define ACPI_PM_TMR_BLK (SBX00_ACPI_IO_BASE + 0x18) /* 4 bytes */
-#define ACPI_GPE0_BLK (SBX00_ACPI_IO_BASE + 0x10) /* 8 bytes */
-#define ACPI_END (SBX00_ACPI_IO_BASE + 0x80)
-
-#define PM_INDEX 0xCD6
-#define PM_DATA 0xCD7
-#define PM2_INDEX 0xCD0
-#define PM2_DATA 0xCD1
-
-/*
- * SCI interrupt need acpi space, allocate here
- */
-
-static int __init register_acpi_resource(void)
-{
- request_region(SBX00_ACPI_IO_BASE, SBX00_ACPI_IO_SIZE, "acpi");
- return 0;
-}
-
-static void pmio_write_index(u16 index, u8 reg, u8 value)
-{
- outb(reg, index);
- outb(value, index + 1);
-}
-
-static u8 pmio_read_index(u16 index, u8 reg)
-{
- outb(reg, index);
- return inb(index + 1);
-}
-
-void pm_iowrite(u8 reg, u8 value)
-{
- pmio_write_index(PM_INDEX, reg, value);
-}
-EXPORT_SYMBOL(pm_iowrite);
-
-u8 pm_ioread(u8 reg)
-{
- return pmio_read_index(PM_INDEX, reg);
-}
-EXPORT_SYMBOL(pm_ioread);
-
-void pm2_iowrite(u8 reg, u8 value)
-{
- pmio_write_index(PM2_INDEX, reg, value);
-}
-EXPORT_SYMBOL(pm2_iowrite);
-
-u8 pm2_ioread(u8 reg)
-{
- return pmio_read_index(PM2_INDEX, reg);
-}
-EXPORT_SYMBOL(pm2_ioread);
-
-static void acpi_hw_clear_status(void)
-{
- u16 value;
-
- /* PMStatus: Clear WakeStatus/PwrBtnStatus */
- value = inw(ACPI_PM_EVT_BLK);
- value |= (1 << 8 | 1 << 15);
- outw(value, ACPI_PM_EVT_BLK);
-
- /* GPEStatus: Clear all generated events */
- outl(inl(ACPI_GPE0_BLK), ACPI_GPE0_BLK);
-}
-
-void acpi_registers_setup(void)
-{
- u32 value;
-
- /* PM Status Base */
- pm_iowrite(0x20, ACPI_PM_EVT_BLK & 0xff);
- pm_iowrite(0x21, ACPI_PM_EVT_BLK >> 8);
-
- /* PM Control Base */
- pm_iowrite(0x22, ACPI_PM_CNT_BLK & 0xff);
- pm_iowrite(0x23, ACPI_PM_CNT_BLK >> 8);
-
- /* GPM Base */
- pm_iowrite(0x28, ACPI_GPE0_BLK & 0xff);
- pm_iowrite(0x29, ACPI_GPE0_BLK >> 8);
-
- /* ACPI End */
- pm_iowrite(0x2e, ACPI_END & 0xff);
- pm_iowrite(0x2f, ACPI_END >> 8);
-
- /* IO Decode: When AcpiDecodeEnable set, South-Bridge uses the contents
- * of the PM registers at index 0x20~0x2B to decode ACPI I/O address. */
- pm_iowrite(0x0e, 1 << 3);
-
- /* SCI_EN set */
- outw(1, ACPI_PM_CNT_BLK);
-
- /* Enable to generate SCI */
- pm_iowrite(0x10, pm_ioread(0x10) | 1);
-
- /* GPM3/GPM9 enable */
- value = inl(ACPI_GPE0_BLK + 4);
- outl(value | (1 << 14) | (1 << 22), ACPI_GPE0_BLK + 4);
-
- /* Set GPM9 as input */
- pm_iowrite(0x8d, pm_ioread(0x8d) & (~(1 << 1)));
-
- /* Set GPM9 as non-output */
- pm_iowrite(0x94, pm_ioread(0x94) | (1 << 3));
-
- /* GPM3 config ACPI trigger SCIOUT */
- pm_iowrite(0x33, pm_ioread(0x33) & (~(3 << 4)));
-
- /* GPM9 config ACPI trigger SCIOUT */
- pm_iowrite(0x3d, pm_ioread(0x3d) & (~(3 << 2)));
-
- /* GPM3 config falling edge trigger */
- pm_iowrite(0x37, pm_ioread(0x37) & (~(1 << 6)));
-
- /* No wait for STPGNT# in ACPI Sx state */
- pm_iowrite(0x7c, pm_ioread(0x7c) | (1 << 6));
-
- /* Set GPM3 pull-down enable */
- value = pm2_ioread(0xf6);
- value |= ((1 << 7) | (1 << 3));
- pm2_iowrite(0xf6, value);
-
- /* Set GPM9 pull-down enable */
- value = pm2_ioread(0xf8);
- value |= ((1 << 5) | (1 << 1));
- pm2_iowrite(0xf8, value);
-}
-
-int __init sbx00_acpi_init(void)
-{
- register_acpi_resource();
- acpi_registers_setup();
- acpi_hw_clear_status();
-
- return 0;
-}
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index 0f6c63e17..4300a558d 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -20,9 +20,9 @@ int loongson3_cpu_temp(int cpu)
u32 reg;
reg = LOONGSON_CHIPTEMP(cpu);
- if (loongson_sysconf.cputype == Loongson_3A)
+ if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1)
reg = (reg >> 8) & 0xff;
- else if (loongson_sysconf.cputype == Loongson_3B)
+ else
reg = ((reg >> 8) & 0xff) - 100;
return (int)reg * 1000;
@@ -80,13 +80,13 @@ static const struct attribute *hwmon_cputemp2[] = {
static ssize_t cpu0_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "CPU 0 Temprature\n");
+ return sprintf(buf, "CPU 0 Temperature\n");
}
static ssize_t cpu1_temp_label(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "CPU 1 Temprature\n");
+ return sprintf(buf, "CPU 1 Temperature\n");
}
static ssize_t get_cpu0_temp(struct device *dev,
@@ -169,7 +169,7 @@ static int __init loongson_hwmon_init(void)
ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj);
if (ret) {
- pr_err("fail to create cpu temprature interface!\n");
+ pr_err("fail to create cpu temperature interface!\n");
goto fail_create_sysfs_cputemp_files;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0410f693c..fc7c29c93 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -103,7 +103,6 @@ config DELL_SMBIOS
config DELL_LAPTOP
tristate "Dell Laptop Extras"
- depends on X86
depends on DELL_SMBIOS
depends on DMI
depends on BACKLIGHT_CLASS_DEVICE
@@ -505,7 +504,6 @@ config THINKPAD_ACPI_HOTKEY_POLL
config THINKPAD_EC
tristate
- depends on X86
---help---
This is a low-level driver for accessing the ThinkPad H8S embedded
controller over the LPC bus (not to be confused with the ACPI Embedded
@@ -513,7 +511,6 @@ config THINKPAD_EC
config TP_SMAPI
tristate "ThinkPad SMAPI Support"
- depends on X86
select THINKPAD_EC
default n
help
@@ -525,7 +522,7 @@ config TP_SMAPI
config SENSORS_HDAPS
tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
- depends on INPUT && X86
+ depends on INPUT
select THINKPAD_EC
select INPUT_POLLDEV
default n
@@ -770,7 +767,7 @@ config TOSHIBA_WMI
config ACPI_CMPC
tristate "CMPC Laptop Extras"
- depends on X86 && ACPI
+ depends on ACPI
depends on RFKILL || RFKILL=n
select INPUT
select BACKLIGHT_CLASS_DEVICE
@@ -867,9 +864,21 @@ config INTEL_IMR
If you are running on a Galileo/Quark say Y here.
+config INTEL_PMC_CORE
+ bool "Intel PMC Core driver"
+ depends on PCI
+ ---help---
+ The Intel Platform Controller Hub for Intel Core SoCs provides access
+ to Power Management Controller registers via a PCI interface. This
+ driver can utilize debugging capabilities and supported features as
+ exposed by the Power Management Controller.
+
+ Supported features:
+ - SLP_S0_RESIDENCY counter.
+
config IBM_RTL
tristate "Device driver to enable PRTL support"
- depends on X86 && PCI
+ depends on PCI
---help---
Enable support for IBM Premium Real Time Mode (PRTM).
This module will allow you the enter and exit PRTM in the BIOS via
@@ -903,7 +912,6 @@ config XO15_EBOOK
config SAMSUNG_LAPTOP
tristate "Samsung Laptop driver"
- depends on X86
depends on RFKILL || RFKILL = n
depends on ACPI_VIDEO || ACPI_VIDEO = n
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index a265ca3a3..829f52154 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -71,3 +71,4 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
intel_telemetry_pltdrv.o \
intel_telemetry_debugfs.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 1062fa42f..79d64ea00 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -793,15 +793,6 @@ static acpi_status __init AMW0_find_mailled(void)
return AE_OK;
}
-static int AMW0_set_cap_acpi_check_device_found __initdata;
-
-static acpi_status __init AMW0_set_cap_acpi_check_device_cb(acpi_handle handle,
- u32 level, void *context, void **retval)
-{
- AMW0_set_cap_acpi_check_device_found = 1;
- return AE_OK;
-}
-
static const struct acpi_device_id norfkill_ids[] __initconst = {
{ "VPC2004", 0},
{ "IBM0068", 0},
@@ -816,9 +807,10 @@ static int __init AMW0_set_cap_acpi_check_device(void)
const struct acpi_device_id *id;
for (id = norfkill_ids; id->id[0]; id++)
- acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb,
- NULL, NULL);
- return AMW0_set_cap_acpi_check_device_found;
+ if (acpi_dev_found(id->id))
+ return true;
+
+ return false;
}
static acpi_status __init AMW0_set_capabilities(void)
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f2b5d0a8a..15f131146 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -771,12 +771,14 @@ static int asus_read_brightness(struct backlight_device *bd)
{
struct asus_laptop *asus = bl_get_data(bd);
unsigned long long value;
- acpi_status rv = AE_OK;
+ acpi_status rv;
rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
NULL, &value);
- if (ACPI_FAILURE(rv))
+ if (ACPI_FAILURE(rv)) {
pr_warn("Error reading brightness\n");
+ return 0;
+ }
return value;
}
@@ -865,7 +867,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
int len = 0;
unsigned long long temp;
char buf[16]; /* enough for all info */
- acpi_status rv = AE_OK;
+ acpi_status rv;
/*
* We use the easy way, we don't care of off and count,
@@ -946,11 +948,10 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
const char *method)
{
int rv, value;
- int out = 0;
rv = parse_arg(buf, count, &value);
- if (rv > 0)
- out = value ? 1 : 0;
+ if (rv <= 0)
+ return rv;
if (write_acpi_int(asus->handle, method, value))
return -ENODEV;
@@ -1265,7 +1266,7 @@ static DEVICE_ATTR_RO(ls_value);
static int asus_gps_status(struct asus_laptop *asus)
{
unsigned long long status;
- acpi_status rv = AE_OK;
+ acpi_status rv;
rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
NULL, &status);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a96630d52..a26dca364 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -114,6 +114,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DEVID_LED6 0x00020016
/* Backlight and Brightness */
+#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
@@ -1730,6 +1731,7 @@ ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
+ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1756,6 +1758,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
&dev_attr_lid_resume.attr,
+ &dev_attr_als_enable.attr,
NULL
};
@@ -1776,6 +1779,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_TOUCHPAD;
else if (attr == &dev_attr_lid_resume.attr)
devid = ASUS_WMI_DEVID_LID_RESUME;
+ else if (attr == &dev_attr_als_enable.attr)
+ devid = ASUS_WMI_DEVID_ALS_ENABLE;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 14fd2ecb0..17b365f26 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -204,30 +204,10 @@ static void eeepc_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
}
}
-static acpi_status eeepc_wmi_parse_device(acpi_handle handle, u32 level,
- void *context, void **retval)
-{
- pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
- *(bool *)context = true;
- return AE_CTRL_TERMINATE;
-}
-
-static int eeepc_wmi_check_atkd(void)
-{
- acpi_status status;
- bool found = false;
-
- status = acpi_get_devices(EEEPC_ACPI_HID, eeepc_wmi_parse_device,
- &found, NULL);
-
- if (ACPI_FAILURE(status) || !found)
- return 0;
- return -1;
-}
-
static int eeepc_wmi_probe(struct platform_device *pdev)
{
- if (eeepc_wmi_check_atkd()) {
+ if (acpi_dev_found(EEEPC_ACPI_HID)) {
+ pr_warn("Found legacy ATKD device (%s)\n", EEEPC_ACPI_HID);
pr_warn("WMI device present, but legacy ATKD device is also "
"present and enabled\n");
pr_warn("You probably booted with acpi_osi=\"Linux\" or "
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ffc84cc7b..ce41bc342 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -69,7 +69,7 @@
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
#include <linux/leds.h>
#endif
#include <acpi/video.h>
@@ -100,13 +100,14 @@
/* FUNC interface - responses */
#define UNSUPPORTED_CMD 0x80000000
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* FUNC interface - LED control */
#define FUNC_LED_OFF 0x1
#define FUNC_LED_ON 0x30001
#define KEYBOARD_LAMPS 0x100
#define LOGOLAMP_POWERON 0x2000
#define LOGOLAMP_ALWAYS 0x4000
+#define RADIO_LED_ON 0x20
#endif
/* Hotkey details */
@@ -174,13 +175,14 @@ struct fujitsu_hotkey_t {
int rfkill_state;
int logolamp_registered;
int kblamps_registered;
+ int radio_led_registered;
};
static struct fujitsu_hotkey_t *fujitsu_hotkey;
static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
static enum led_brightness logolamp_get(struct led_classdev *cdev);
static void logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness);
@@ -200,6 +202,16 @@ static struct led_classdev kblamps_led = {
.brightness_get = kblamps_get,
.brightness_set = kblamps_set
};
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev);
+static void radio_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness);
+
+static struct led_classdev radio_led = {
+ .name = "fujitsu::radio_led",
+ .brightness_get = radio_led_get,
+ .brightness_set = radio_led_set
+};
#endif
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
@@ -249,7 +261,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
return value;
}
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* LED class callbacks */
static void logolamp_set(struct led_classdev *cdev,
@@ -275,6 +287,15 @@ static void kblamps_set(struct led_classdev *cdev,
call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
}
+static void radio_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ if (brightness >= LED_FULL)
+ call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+ else
+ call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+}
+
static enum led_brightness logolamp_get(struct led_classdev *cdev)
{
enum led_brightness brightness = LED_OFF;
@@ -299,6 +320,16 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
return brightness;
}
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev)
+{
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
#endif
/* Hardware access for LCD brightness control */
@@ -872,7 +903,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
/* Suspect this is a keymap of the application panel, print it */
pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
result = led_classdev_register(&fujitsu->pf_device->dev,
&logolamp_led);
@@ -895,6 +926,23 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result);
}
}
+
+ /*
+ * BTNI bit 24 seems to indicate the presence of a radio toggle
+ * button in place of a slide switch, and all such machines appear
+ * to also have an RF LED. Therefore use bit 24 as an indicator
+ * that an RF LED is present.
+ */
+ if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+ result = led_classdev_register(&fujitsu->pf_device->dev,
+ &radio_led);
+ if (result == 0) {
+ fujitsu_hotkey->radio_led_registered = 1;
+ } else {
+ pr_err("Could not register LED handler for radio LED, error %i\n",
+ result);
+ }
+ }
#endif
return result;
@@ -915,12 +963,15 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
struct input_dev *input = fujitsu_hotkey->input;
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
if (fujitsu_hotkey->logolamp_registered)
led_classdev_unregister(&logolamp_led);
if (fujitsu_hotkey->kblamps_registered)
led_classdev_unregister(&kblamps_led);
+
+ if (fujitsu_hotkey->radio_led_registered)
+ led_classdev_unregister(&radio_led);
#endif
input_unregister_device(input);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index be3bc2f4e..d1a091b93 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -48,7 +48,10 @@
#define CFG_CAMERA_BIT (19)
#if IS_ENABLED(CONFIG_ACPI_WMI)
-static const char ideapad_wmi_fnesc_event[] = "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6";
+static const char *const ideapad_wmi_fnesc_events[] = {
+ "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */
+ "56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */
+};
#endif
enum {
@@ -93,6 +96,7 @@ struct ideapad_private {
struct dentry *debug;
unsigned long cfg;
bool has_hw_rfkill_switch;
+ const char *fnesc_guid;
};
static bool no_bt_rfkill;
@@ -563,6 +567,7 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv)
static const struct key_entry ideapad_keymap[] = {
{ KE_KEY, 6, { KEY_SWITCHVIDEOMODE } },
{ KE_KEY, 7, { KEY_CAMERA } },
+ { KE_KEY, 8, { KEY_MICMUTE } },
{ KE_KEY, 11, { KEY_F16 } },
{ KE_KEY, 13, { KEY_WLAN } },
{ KE_KEY, 16, { KEY_PROG1 } },
@@ -805,6 +810,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
break;
case 13:
case 11:
+ case 8:
case 7:
case 6:
ideapad_input_report(priv, vpc_bit);
@@ -989,8 +995,16 @@ static int ideapad_acpi_add(struct platform_device *pdev)
ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
if (ret)
goto notification_failed;
+
#if IS_ENABLED(CONFIG_ACPI_WMI)
- ret = wmi_install_notify_handler(ideapad_wmi_fnesc_event, ideapad_wmi_notify, priv);
+ for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) {
+ ret = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i],
+ ideapad_wmi_notify, priv);
+ if (ret == AE_OK) {
+ priv->fnesc_guid = ideapad_wmi_fnesc_events[i];
+ break;
+ }
+ }
if (ret != AE_OK && ret != AE_NOT_EXIST)
goto notification_failed_wmi;
#endif
@@ -1020,7 +1034,8 @@ static int ideapad_acpi_remove(struct platform_device *pdev)
int i;
#if IS_ENABLED(CONFIG_ACPI_WMI)
- wmi_remove_notify_handler(ideapad_wmi_fnesc_event);
+ if (priv->fnesc_guid)
+ wmi_remove_notify_handler(priv->fnesc_guid);
#endif
acpi_remove_notify_handler(priv->adev->handle,
ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 0a919d816..cbe01021c 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -306,33 +306,32 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
#define to_intel_menlow_attr(_attr) \
container_of(_attr, struct intel_menlow_attribute, attr)
-static ssize_t aux0_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf, int idx)
{
struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
unsigned long long value;
int result;
- result = sensor_get_auxtrip(attr->handle, 0, &value);
+ result = sensor_get_auxtrip(attr->handle, idx, &value);
return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
}
-static ssize_t aux1_show(struct device *dev,
+static ssize_t aux0_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
- unsigned long long value;
- int result;
-
- result = sensor_get_auxtrip(attr->handle, 1, &value);
+ return aux_show(dev, dev_attr, buf, 0);
+}
- return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
+static ssize_t aux1_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return aux_show(dev, dev_attr, buf, 1);
}
-static ssize_t aux0_store(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr,
+ const char *buf, size_t count, int idx)
{
struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
int value;
@@ -345,27 +344,23 @@ static ssize_t aux0_store(struct device *dev,
if (value < 0)
return -EINVAL;
- result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_DECI_KELVIN(value));
+ result = sensor_set_auxtrip(attr->handle, idx,
+ CELSIUS_TO_DECI_KELVIN(value));
return result ? result : count;
}
-static ssize_t aux1_store(struct device *dev,
+static ssize_t aux0_store(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
- int value;
- int result;
-
- /*Sanity check; should be a positive integer */
- if (!sscanf(buf, "%d", &value))
- return -EINVAL;
-
- if (value < 0)
- return -EINVAL;
+ return aux_store(dev, dev_attr, buf, count, 0);
+}
- result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_DECI_KELVIN(value));
- return result ? result : count;
+static ssize_t aux1_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ return aux_store(dev, dev_attr, buf, count, 1);
}
/* BIOS can enable/disable the thermal user application in dabney platform */
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
new file mode 100644
index 000000000..2776bec89
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -0,0 +1,200 @@
+/*
+ * Intel Core SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/pmc_core.h>
+
+#include "intel_pmc_core.h"
+
+static struct pmc_dev pmc;
+
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL },
+ { 0, },
+};
+
+static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
+{
+ return readl(pmcdev->regbase + reg_offset);
+}
+
+static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
+{
+ return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
+}
+
+/**
+ * intel_pmc_slp_s0_counter_read() - Read SLP_S0 residency.
+ * @data: Out param that contains current SLP_S0 count.
+ *
+ * This API currently supports Intel Skylake SoC and Sunrise
+ * Point Platform Controller Hub. Future platform support
+ * should be added for platforms that support low power modes
+ * beyond Package C10 state.
+ *
+ * SLP_S0_RESIDENCY counter counts in 100 us granularity per
+ * step hence function populates the multiplied value in out
+ * parameter @data.
+ *
+ * Return: an error code or 0 on success.
+ */
+int intel_pmc_slp_s0_counter_read(u32 *data)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 value;
+
+ if (!pmcdev->has_slp_s0_res)
+ return -EACCES;
+
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+ *data = pmc_core_adjust_slp_s0_step(value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int pmc_core_dev_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ u32 counter_val;
+
+ counter_val = pmc_core_reg_read(pmcdev,
+ SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+ seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val));
+
+ return 0;
+}
+
+static int pmc_core_dev_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_dev_state_ops = {
+ .open = pmc_core_dev_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+ debugfs_remove_recursive(pmcdev->dbgfs_dir);
+}
+
+static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("pmc_core", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ pmcdev->dbgfs_dir = dir;
+ file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
+ dir, pmcdev, &pmc_core_dev_state_ops);
+
+ if (!file) {
+ pmc_core_dbgfs_unregister(pmcdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+#else
+static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ return 0;
+}
+
+static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static const struct x86_cpu_id intel_pmc_core_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+ { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+ {}
+};
+
+static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct device *ptr_dev = &dev->dev;
+ struct pmc_dev *pmcdev = &pmc;
+ const struct x86_cpu_id *cpu_id;
+ int err;
+
+ cpu_id = x86_match_cpu(intel_pmc_core_ids);
+ if (!cpu_id) {
+ dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n");
+ return -EINVAL;
+ }
+
+ err = pcim_enable_device(dev);
+ if (err < 0) {
+ dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n");
+ return err;
+ }
+
+ err = pci_read_config_dword(dev,
+ SPT_PMC_BASE_ADDR_OFFSET,
+ &pmcdev->base_addr);
+ if (err < 0) {
+ dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
+ return err;
+ }
+ dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
+
+ pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
+ pmcdev->base_addr,
+ SPT_PMC_MMIO_REG_LEN);
+ if (!pmcdev->regbase) {
+ dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n");
+ return -ENOMEM;
+ }
+
+ err = pmc_core_dbgfs_register(pmcdev);
+ if (err < 0) {
+ dev_err(&dev->dev, "PMC Core: debugfs register failed.\n");
+ return err;
+ }
+
+ pmc.has_slp_s0_res = true;
+ return 0;
+}
+
+static struct pci_driver intel_pmc_core_driver = {
+ .name = "intel_pmc_core",
+ .id_table = pmc_pci_ids,
+ .probe = pmc_core_probe,
+};
+
+builtin_pci_driver(intel_pmc_core_driver);
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
new file mode 100644
index 000000000..a9dadaf78
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -0,0 +1,51 @@
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef PMC_CORE_H
+#define PMC_CORE_H
+
+/* Sunrise Point Power Management Controller PCI Device ID */
+#define SPT_PMC_PCI_DEVICE_ID 0x9d21
+#define SPT_PMC_BASE_ADDR_OFFSET 0x48
+#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
+#define SPT_PMC_MMIO_REG_LEN 0x100
+#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
+
+/**
+ * struct pmc_dev - pmc device structure
+ * @base_addr: comtains pmc base address
+ * @regbase: pointer to io-remapped memory location
+ * @dbgfs_dir: path to debug fs interface
+ * @feature_available: flag to indicate whether
+ * the feature is available
+ * on a particular platform or not.
+ *
+ * pmc_dev contains info about power management controller device.
+ */
+struct pmc_dev {
+ u32 base_addr;
+ void __iomem *regbase;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+ bool has_slp_s0_res;
+};
+
+#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 0e73fd10b..63b371d6e 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -30,7 +30,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <asm/intel_scu_ipc.h>
#include <linux/device.h>
#include <linux/intel_pmic_gpio.h>
@@ -174,7 +174,7 @@ static int pmic_irq_type(struct irq_data *data, unsigned type)
static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
+ struct pmic_gpio *pg = gpiochip_get_data(chip);
return pg->irq_base + offset;
}
@@ -279,7 +279,7 @@ static int platform_pmic_gpio_probe(struct platform_device *pdev)
mutex_init(&pg->buslock);
pg->chip.parent = dev;
- retval = gpiochip_add(&pg->chip);
+ retval = gpiochip_add_data(&pg->chip, pg);
if (retval) {
pr_err("Can not add pmic gpio chip\n");
goto err;
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index a695a436a..0d4c3808a 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -25,7 +25,7 @@
struct telemetry_core_config {
struct telemetry_plt_config *plt_config;
- struct telemetry_core_ops *telem_ops;
+ const struct telemetry_core_ops *telem_ops;
};
static struct telemetry_core_config telm_core_conf;
@@ -95,7 +95,7 @@ static int telemetry_def_reset_events(void)
return 0;
}
-static struct telemetry_core_ops telm_defpltops = {
+static const struct telemetry_core_ops telm_defpltops = {
.set_sampling_period = telemetry_def_set_sampling_period,
.get_sampling_period = telemetry_def_get_sampling_period,
.get_trace_verbosity = telemetry_def_get_trace_verbosity,
@@ -332,7 +332,7 @@ EXPORT_SYMBOL_GPL(telemetry_set_trace_verbosity);
*
* Return: 0 success, < 0 for failure
*/
-int telemetry_set_pltdata(struct telemetry_core_ops *ops,
+int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
struct telemetry_plt_config *pltconfig)
{
if (ops)
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 781bd10ca..09c84a2b1 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1081,7 +1081,7 @@ out:
return ret;
}
-static struct telemetry_core_ops telm_pltops = {
+static const struct telemetry_core_ops telm_pltops = {
.get_trace_verbosity = telemetry_plt_get_trace_verbosity,
.set_trace_verbosity = telemetry_plt_set_trace_verbosity,
.set_sampling_period = telemetry_plt_set_sampling_period,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e9caa347a..1dba3598c 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1446,6 +1446,9 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
{
unsigned int i, result, bitmask, handle;
+ if (!handles)
+ return;
+
/* get enabled events and disable them */
sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 700e0fa0e..6505c9770 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -24,6 +24,8 @@
#define SURFACE_BUTTON_OBJ_NAME "VGBI"
#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons"
+#define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8
+
#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6
#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7
@@ -33,7 +35,7 @@
#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1
-#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
ACPI_MODULE_NAME("surface pro 3 button");
@@ -105,9 +107,12 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
key_code = KEY_VOLUMEDOWN;
break;
+ case SURFACE_BUTTON_NOTIFY_TABLET_MODE:
+ dev_warn_once(&device->dev, "Tablet mode is not supported\n");
+ break;
default:
dev_info_ratelimited(&device->dev,
- "Unsupported event [0x%x]\n", event);
+ "Unsupported event [0x%x]\n", event);
break;
}
input = button->input;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9255ff3ee..b65ce7519 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2043,6 +2043,7 @@ static int hotkey_autosleep_ack;
static u32 hotkey_orig_mask; /* events the BIOS had enabled */
static u32 hotkey_all_mask; /* all events supported in fw */
+static u32 hotkey_adaptive_all_mask; /* all adaptive events supported in fw */
static u32 hotkey_reserved_mask; /* events better left disabled */
static u32 hotkey_driver_mask; /* events needed by the driver */
static u32 hotkey_user_mask; /* events visible to userspace */
@@ -2742,6 +2743,17 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
static DEVICE_ATTR_RO(hotkey_all_mask);
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ hotkey_adaptive_all_mask | hotkey_source_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
+
/* sysfs hotkey recommended_mask --------------------------------------- */
static ssize_t hotkey_recommended_mask_show(struct device *dev,
struct device_attribute *attr,
@@ -2985,6 +2997,7 @@ static struct attribute *hotkey_attributes[] __initdata = {
&dev_attr_wakeup_hotunplug_complete.attr,
&dev_attr_hotkey_mask.attr,
&dev_attr_hotkey_all_mask.attr,
+ &dev_attr_hotkey_adaptive_all_mask.attr,
&dev_attr_hotkey_recommended_mask.attr,
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
&dev_attr_hotkey_source_mask.attr,
@@ -3321,20 +3334,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (!tp_features.hotkey)
return 1;
- /*
- * Check if we have an adaptive keyboard, like on the
- * Lenovo Carbon X1 2014 (2nd Gen).
- */
- if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) == 2) {
- tp_features.has_adaptive_kbd = true;
- res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
- &adaptive_kbd_attr_group);
- if (res)
- goto err_exit;
- }
- }
-
quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
ARRAY_SIZE(tpacpi_hotkey_qtable));
@@ -3357,30 +3356,70 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
- if ((hkeyv >> 8) != 1) {
- pr_err("unknown version of the HKEY interface: 0x%x\n",
- hkeyv);
- pr_err("please report this to %s\n", TPACPI_MAIL);
- } else {
+ vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+ "firmware HKEY interface version: 0x%x\n",
+ hkeyv);
+
+ switch (hkeyv >> 8) {
+ case 1:
/*
* MHKV 0x100 in A31, R40, R40e,
* T4x, X31, and later
*/
- vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "firmware HKEY interface version: 0x%x\n",
- hkeyv);
/* Paranoia check AND init hotkey_all_mask */
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
"MHKA", "qd")) {
- pr_err("missing MHKA handler, "
- "please report this to %s\n",
+ pr_err("missing MHKA handler, please report this to %s\n",
+ TPACPI_MAIL);
+ /* Fallback: pre-init for FN+F3,F4,F12 */
+ hotkey_all_mask = 0x080cU;
+ } else {
+ tp_features.hotkey_mask = 1;
+ }
+ break;
+
+ case 2:
+ /*
+ * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
+ */
+
+ /* Paranoia check AND init hotkey_all_mask */
+ if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+ "MHKA", "dd", 1)) {
+ pr_err("missing MHKA handler, please report this to %s\n",
TPACPI_MAIL);
/* Fallback: pre-init for FN+F3,F4,F12 */
hotkey_all_mask = 0x080cU;
} else {
tp_features.hotkey_mask = 1;
}
+
+ /*
+ * Check if we have an adaptive keyboard, like on the
+ * Lenovo Carbon X1 2014 (2nd Gen).
+ */
+ if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
+ "MHKA", "dd", 2)) {
+ if (hotkey_adaptive_all_mask != 0) {
+ tp_features.has_adaptive_kbd = true;
+ res = sysfs_create_group(
+ &tpacpi_pdev->dev.kobj,
+ &adaptive_kbd_attr_group);
+ if (res)
+ goto err_exit;
+ }
+ } else {
+ tp_features.has_adaptive_kbd = false;
+ hotkey_adaptive_all_mask = 0x0U;
+ }
+ break;
+
+ default:
+ pr_err("unknown version of the HKEY interface: 0x%x\n",
+ hkeyv);
+ pr_err("please report this to %s\n", TPACPI_MAIL);
+ break;
}
}
@@ -5001,6 +5040,8 @@ static int kbdlight_set_level(int level)
return 0;
}
+static int kbdlight_set_level_and_update(int level);
+
static int kbdlight_get_level(void)
{
int status = 0;
@@ -5068,7 +5109,7 @@ static void kbdlight_set_worker(struct work_struct *work)
container_of(work, struct tpacpi_led_classdev, work);
if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
- kbdlight_set_level(data->new_state);
+ kbdlight_set_level_and_update(data->new_state);
}
static void kbdlight_sysfs_set(struct led_classdev *led_cdev,
@@ -5099,7 +5140,6 @@ static struct tpacpi_led_classdev tpacpi_led_kbdlight = {
.max_brightness = 2,
.brightness_set = &kbdlight_sysfs_set,
.brightness_get = &kbdlight_sysfs_get,
- .flags = LED_CORE_SUSPENDRESUME,
}
};
@@ -5137,6 +5177,20 @@ static void kbdlight_exit(void)
flush_workqueue(tpacpi_wq);
}
+static int kbdlight_set_level_and_update(int level)
+{
+ int ret;
+ struct led_classdev *led_cdev;
+
+ ret = kbdlight_set_level(level);
+ led_cdev = &tpacpi_led_kbdlight.led_classdev;
+
+ if (ret == 0 && !(led_cdev->flags & LED_SUSPENDED))
+ led_cdev->brightness = level;
+
+ return ret;
+}
+
static int kbdlight_read(struct seq_file *m)
{
int level;
@@ -5177,13 +5231,35 @@ static int kbdlight_write(char *buf)
if (level == -1)
return -EINVAL;
- return kbdlight_set_level(level);
+ return kbdlight_set_level_and_update(level);
+}
+
+static void kbdlight_suspend(void)
+{
+ struct led_classdev *led_cdev;
+
+ if (!tp_features.kbdlight)
+ return;
+
+ led_cdev = &tpacpi_led_kbdlight.led_classdev;
+ led_update_brightness(led_cdev);
+ led_classdev_suspend(led_cdev);
+}
+
+static void kbdlight_resume(void)
+{
+ if (!tp_features.kbdlight)
+ return;
+
+ led_classdev_resume(&tpacpi_led_kbdlight.led_classdev);
}
static struct ibm_struct kbdlight_driver_data = {
.name = "kbdlight",
.read = kbdlight_read,
.write = kbdlight_write,
+ .suspend = kbdlight_suspend,
+ .resume = kbdlight_resume,
.exit = kbdlight_exit,
};
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index eb391a281..ceeb8c188 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -37,6 +37,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/uuid.h>
ACPI_MODULE_NAME("wmi");
MODULE_AUTHOR("Carlos Corbacho");
@@ -115,100 +116,21 @@ static struct acpi_driver acpi_wmi_driver = {
* GUID parsing functions
*/
-/**
- * wmi_parse_hexbyte - Convert a ASCII hex number to a byte
- * @src: Pointer to at least 2 characters to convert.
- *
- * Convert a two character ASCII hex string to a number.
- *
- * Return: 0-255 Success, the byte was parsed correctly
- * -1 Error, an invalid character was supplied
- */
-static int wmi_parse_hexbyte(const u8 *src)
-{
- int h;
- int value;
-
- /* high part */
- h = value = hex_to_bin(src[0]);
- if (value < 0)
- return -1;
-
- /* low part */
- value = hex_to_bin(src[1]);
- if (value >= 0)
- return (h << 4) | value;
- return -1;
-}
-
-/**
- * wmi_swap_bytes - Rearrange GUID bytes to match GUID binary
- * @src: Memory block holding binary GUID (16 bytes)
- * @dest: Memory block to hold byte swapped binary GUID (16 bytes)
- *
- * Byte swap a binary GUID to match it's real GUID value
- */
-static void wmi_swap_bytes(u8 *src, u8 *dest)
-{
- int i;
-
- for (i = 0; i <= 3; i++)
- memcpy(dest + i, src + (3 - i), 1);
-
- for (i = 0; i <= 1; i++)
- memcpy(dest + 4 + i, src + (5 - i), 1);
-
- for (i = 0; i <= 1; i++)
- memcpy(dest + 6 + i, src + (7 - i), 1);
-
- memcpy(dest + 8, src + 8, 8);
-}
-
-/**
- * wmi_parse_guid - Convert GUID from ASCII to binary
- * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
- * @dest: Memory block to hold binary GUID (16 bytes)
- *
- * N.B. The GUID need not be NULL terminated.
- *
- * Return: 'true' @dest contains binary GUID
- * 'false' @dest contents are undefined
- */
-static bool wmi_parse_guid(const u8 *src, u8 *dest)
-{
- static const int size[] = { 4, 2, 2, 2, 6 };
- int i, j, v;
-
- if (src[8] != '-' || src[13] != '-' ||
- src[18] != '-' || src[23] != '-')
- return false;
-
- for (j = 0; j < 5; j++, src++) {
- for (i = 0; i < size[j]; i++, src += 2, *dest++ = v) {
- v = wmi_parse_hexbyte(src);
- if (v < 0)
- return false;
- }
- }
-
- return true;
-}
-
static bool find_guid(const char *guid_string, struct wmi_block **out)
{
- char tmp[16], guid_input[16];
+ uuid_le guid_input;
struct wmi_block *wblock;
struct guid_block *block;
struct list_head *p;
- wmi_parse_guid(guid_string, tmp);
- wmi_swap_bytes(tmp, guid_input);
+ if (uuid_le_to_bin(guid_string, &guid_input))
+ return false;
list_for_each(p, &wmi_block_list) {
wblock = list_entry(p, struct wmi_block, list);
block = &wblock->gblock;
- if (memcmp(block->guid, guid_input, 16) == 0) {
+ if (memcmp(block->guid, &guid_input, 16) == 0) {
if (out)
*out = wblock;
return true;
@@ -498,20 +420,20 @@ wmi_notify_handler handler, void *data)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
- char tmp[16], guid_input[16];
+ uuid_le guid_input;
struct list_head *p;
if (!guid || !handler)
return AE_BAD_PARAMETER;
- wmi_parse_guid(guid, tmp);
- wmi_swap_bytes(tmp, guid_input);
+ if (uuid_le_to_bin(guid, &guid_input))
+ return AE_BAD_PARAMETER;
list_for_each(p, &wmi_block_list) {
acpi_status wmi_status;
block = list_entry(p, struct wmi_block, list);
- if (memcmp(block->gblock.guid, guid_input, 16) == 0) {
+ if (memcmp(block->gblock.guid, &guid_input, 16) == 0) {
if (block->handler &&
block->handler != wmi_notify_debug)
return AE_ALREADY_ACQUIRED;
@@ -539,20 +461,20 @@ acpi_status wmi_remove_notify_handler(const char *guid)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
- char tmp[16], guid_input[16];
+ uuid_le guid_input;
struct list_head *p;
if (!guid)
return AE_BAD_PARAMETER;
- wmi_parse_guid(guid, tmp);
- wmi_swap_bytes(tmp, guid_input);
+ if (uuid_le_to_bin(guid, &guid_input))
+ return AE_BAD_PARAMETER;
list_for_each(p, &wmi_block_list) {
acpi_status wmi_status;
block = list_entry(p, struct wmi_block, list);
- if (memcmp(block->gblock.guid, guid_input, 16) == 0) {
+ if (memcmp(block->gblock.guid, &guid_input, 16) == 0) {
if (!block->handler ||
block->handler == wmi_notify_debug)
return AE_NULL_ENTRY;
diff --git a/drivers/pnp/pnpbios/Kconfig b/drivers/pnp/pnpbios/Kconfig
index 50c3dd065..a786086b2 100644
--- a/drivers/pnp/pnpbios/Kconfig
+++ b/drivers/pnp/pnpbios/Kconfig
@@ -3,7 +3,7 @@
#
config PNPBIOS
bool "Plug and Play BIOS support"
- depends on ISA && X86
+ depends on ISA && X86_32
default n
---help---
Linux uses the PNPBIOS as defined in "Plug and Play BIOS
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index facd43b85..81603d990 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -521,10 +521,11 @@ static int __init pnpbios_init(void)
int ret;
if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) ||
- paravirt_enabled()) {
+ arch_pnpbios_disabled()) {
printk(KERN_INFO "PnPBIOS: Disabled\n");
return -ENODEV;
}
+
#ifdef CONFIG_PNPACPI
if (!acpi_disabled && !pnpacpi_disabled) {
pnpbios_disabled = 1;
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 898638271..01b6d3f9b 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -336,6 +336,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct rockchip_iodomain *iod;
+ struct device *parent;
int i, ret = 0;
if (!np)
@@ -351,7 +352,14 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
match = of_match_node(rockchip_iodomain_match, np);
iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data;
- iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ parent = pdev->dev.parent;
+ if (parent && parent->of_node) {
+ iod->grf = syscon_node_to_regmap(parent->of_node);
+ } else {
+ dev_dbg(&pdev->dev, "falling back to old binding\n");
+ iod->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ }
+
if (IS_ERR(iod->grf)) {
dev_err(&pdev->dev, "couldn't find grf regmap\n");
return PTR_ERR(iod->grf);
diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
index 3f314b1a3..35b01c7d7 100644
--- a/drivers/power/ipaq_micro_battery.c
+++ b/drivers/power/ipaq_micro_battery.c
@@ -261,7 +261,7 @@ static int micro_batt_probe(struct platform_device *pdev)
return 0;
ac_err:
- power_supply_unregister(micro_ac_power);
+ power_supply_unregister(micro_batt_power);
batt_err:
cancel_delayed_work_sync(&mb->update);
destroy_workqueue(mb->wq);
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index 57eb5c2bf..3b94620ce 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -540,14 +540,14 @@ static int max8925_power_probe(struct platform_device *pdev)
info->usb = power_supply_register(&pdev->dev, &usb_desc, &psy_cfg);
if (IS_ERR(info->usb)) {
ret = PTR_ERR(info->usb);
- goto out_usb;
+ goto out_unregister_ac;
}
info->usb->dev.parent = &pdev->dev;
info->battery = power_supply_register(&pdev->dev, &battery_desc, NULL);
if (IS_ERR(info->battery)) {
ret = PTR_ERR(info->battery);
- goto out_battery;
+ goto out_unregister_usb;
}
info->battery->dev.parent = &pdev->dev;
@@ -560,9 +560,9 @@ static int max8925_power_probe(struct platform_device *pdev)
max8925_init_charger(chip, info);
return 0;
-out_battery:
- power_supply_unregister(info->battery);
-out_usb:
+out_unregister_usb:
+ power_supply_unregister(info->usb);
+out_unregister_ac:
power_supply_unregister(info->ac);
out:
return ret;
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 456987c88..b13cd074c 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -565,11 +565,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
WARN_ON(tzd == NULL);
psy = tzd->devdata;
- ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
+ if (ret)
+ return ret;
/* Convert tenths of degree Celsius to milli degree Celsius. */
- if (!ret)
- *temp = val.intval * 100;
+ *temp = val.intval * 100;
return ret;
}
@@ -612,10 +613,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
@@ -628,10 +631,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
int ret;
psy = tcd->devdata;
- ret = psy->desc->get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
- if (!ret)
- *state = val.intval;
+ ret = power_supply_get_property(psy,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+ if (ret)
+ return ret;
+
+ *state = val.intval;
return ret;
}
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 0a6408a39..9bb2622c2 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -30,6 +30,14 @@ config POWER_RESET_AT91_RESET
This driver supports restart for Atmel AT91SAM9 and SAMA5
SoCs
+config POWER_RESET_AT91_SAMA5D2_SHDWC
+ tristate "Atmel AT91 SAMA5D2-Compatible shutdown controller driver"
+ depends on ARCH_AT91 || COMPILE_TEST
+ default SOC_SAMA5
+ help
+ This driver supports the alternate shutdown controller for some Atmel
+ SAMA5 SoCs. It is present for example on SAMA5D2 SoC.
+
config POWER_RESET_AXXIA
bool "LSI Axxia reset driver"
depends on ARCH_AXXIA
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 096fa6704..ab7aa8614 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o
obj-$(CONFIG_POWER_RESET_AT91_POWEROFF) += at91-poweroff.o
obj-$(CONFIG_POWER_RESET_AT91_RESET) += at91-reset.o
+obj-$(CONFIG_POWER_RESET_AT91_SAMA5D2_SHDWC) += at91-sama5d2_shdwc.o
obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
new file mode 100644
index 000000000..8a5ac9706
--- /dev/null
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -0,0 +1,282 @@
+/*
+ * Atmel SAMA5D2-Compatible Shutdown Controller (SHDWC) driver.
+ * Found on some SoCs as the sama5d2 (obviously).
+ *
+ * Copyright (C) 2015 Atmel Corporation,
+ * Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * Evolved from driver at91-poweroff.c.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * TODO:
+ * - addition to status of other wake-up inputs [1 - 15]
+ * - Analog Comparator wake-up alarm
+ * - Serial RX wake-up alarm
+ * - low power debouncer
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+
+#define SLOW_CLOCK_FREQ 32768
+
+#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
+#define AT91_SHDW_SHDW BIT(0) /* Shut Down command */
+#define AT91_SHDW_KEY (0xa5UL << 24) /* KEY Password */
+
+#define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
+#define AT91_SHDW_WKUPDBC_SHIFT 24
+#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16)
+#define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \
+ & AT91_SHDW_WKUPDBC_MASK)
+
+#define AT91_SHDW_SR 0x08 /* Shut Down Status Register */
+#define AT91_SHDW_WKUPIS_SHIFT 16
+#define AT91_SHDW_WKUPIS_MASK GENMASK(31, 16)
+#define AT91_SHDW_WKUPIS(x) ((1 << (x)) << AT91_SHDW_WKUPIS_SHIFT \
+ & AT91_SHDW_WKUPIS_MASK)
+
+#define AT91_SHDW_WUIR 0x0c /* Shutdown Wake-up Inputs Register */
+#define AT91_SHDW_WKUPEN_MASK GENMASK(15, 0)
+#define AT91_SHDW_WKUPEN(x) ((1 << (x)) & AT91_SHDW_WKUPEN_MASK)
+#define AT91_SHDW_WKUPT_SHIFT 16
+#define AT91_SHDW_WKUPT_MASK GENMASK(31, 16)
+#define AT91_SHDW_WKUPT(x) ((1 << (x)) << AT91_SHDW_WKUPT_SHIFT \
+ & AT91_SHDW_WKUPT_MASK)
+
+#define SHDW_WK_PIN(reg, cfg) ((reg) & AT91_SHDW_WKUPIS((cfg)->wkup_pin_input))
+#define SHDW_RTCWK(reg, cfg) (((reg) >> ((cfg)->sr_rtcwk_shift)) & 0x1)
+#define SHDW_RTCWKEN(cfg) (1 << ((cfg)->mr_rtcwk_shift))
+
+#define DBC_PERIOD_US(x) DIV_ROUND_UP_ULL((1000000 * (x)), \
+ SLOW_CLOCK_FREQ)
+
+struct shdwc_config {
+ u8 wkup_pin_input;
+ u8 mr_rtcwk_shift;
+ u8 sr_rtcwk_shift;
+};
+
+struct shdwc {
+ struct shdwc_config *cfg;
+ void __iomem *at91_shdwc_base;
+};
+
+/*
+ * Hold configuration here, cannot be more than one instance of the driver
+ * since pm_power_off itself is global.
+ */
+static struct shdwc *at91_shdwc;
+static struct clk *sclk;
+
+static const unsigned long long sdwc_dbc_period[] = {
+ 0, 3, 32, 512, 4096, 32768,
+};
+
+static void __init at91_wakeup_status(struct platform_device *pdev)
+{
+ struct shdwc *shdw = platform_get_drvdata(pdev);
+ u32 reg;
+ char *reason = "unknown";
+
+ reg = readl(shdw->at91_shdwc_base + AT91_SHDW_SR);
+
+ dev_dbg(&pdev->dev, "%s: status = %#x\n", __func__, reg);
+
+ /* Simple power-on, just bail out */
+ if (!reg)
+ return;
+
+ if (SHDW_WK_PIN(reg, shdw->cfg))
+ reason = "WKUP pin";
+ else if (SHDW_RTCWK(reg, shdw->cfg))
+ reason = "RTC";
+
+ pr_info("AT91: Wake-Up source: %s\n", reason);
+}
+
+static void at91_poweroff(void)
+{
+ writel(AT91_SHDW_KEY | AT91_SHDW_SHDW,
+ at91_shdwc->at91_shdwc_base + AT91_SHDW_CR);
+}
+
+static u32 at91_shdwc_debouncer_value(struct platform_device *pdev,
+ u32 in_period_us)
+{
+ int i;
+ int max_idx = ARRAY_SIZE(sdwc_dbc_period) - 1;
+ unsigned long long period_us;
+ unsigned long long max_period_us = DBC_PERIOD_US(sdwc_dbc_period[max_idx]);
+
+ if (in_period_us > max_period_us) {
+ dev_warn(&pdev->dev,
+ "debouncer period %u too big, reduced to %llu us\n",
+ in_period_us, max_period_us);
+ return max_idx;
+ }
+
+ for (i = max_idx - 1; i > 0; i--) {
+ period_us = DBC_PERIOD_US(sdwc_dbc_period[i]);
+ dev_dbg(&pdev->dev, "%s: ref[%d] = %llu\n",
+ __func__, i, period_us);
+ if (in_period_us > period_us)
+ break;
+ }
+
+ return i + 1;
+}
+
+static u32 at91_shdwc_get_wakeup_input(struct platform_device *pdev,
+ struct device_node *np)
+{
+ struct device_node *cnp;
+ u32 wk_input_mask;
+ u32 wuir = 0;
+ u32 wk_input;
+
+ for_each_child_of_node(np, cnp) {
+ if (of_property_read_u32(cnp, "reg", &wk_input)) {
+ dev_warn(&pdev->dev, "reg property is missing for %s\n",
+ cnp->full_name);
+ continue;
+ }
+
+ wk_input_mask = 1 << wk_input;
+ if (!(wk_input_mask & AT91_SHDW_WKUPEN_MASK)) {
+ dev_warn(&pdev->dev,
+ "wake-up input %d out of bounds ignore\n",
+ wk_input);
+ continue;
+ }
+ wuir |= wk_input_mask;
+
+ if (of_property_read_bool(cnp, "atmel,wakeup-active-high"))
+ wuir |= AT91_SHDW_WKUPT(wk_input);
+
+ dev_dbg(&pdev->dev, "%s: (child %d) wuir = %#x\n",
+ __func__, wk_input, wuir);
+ }
+
+ return wuir;
+}
+
+static void at91_shdwc_dt_configure(struct platform_device *pdev)
+{
+ struct shdwc *shdw = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ u32 mode = 0, tmp, input;
+
+ if (!np) {
+ dev_err(&pdev->dev, "device node not found\n");
+ return;
+ }
+
+ if (!of_property_read_u32(np, "debounce-delay-us", &tmp))
+ mode |= AT91_SHDW_WKUPDBC(at91_shdwc_debouncer_value(pdev, tmp));
+
+ if (of_property_read_bool(np, "atmel,wakeup-rtc-timer"))
+ mode |= SHDW_RTCWKEN(shdw->cfg);
+
+ dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
+ writel(mode, shdw->at91_shdwc_base + AT91_SHDW_MR);
+
+ input = at91_shdwc_get_wakeup_input(pdev, np);
+ writel(input, shdw->at91_shdwc_base + AT91_SHDW_WUIR);
+}
+
+static const struct shdwc_config sama5d2_shdwc_config = {
+ .wkup_pin_input = 0,
+ .mr_rtcwk_shift = 17,
+ .sr_rtcwk_shift = 5,
+};
+
+static const struct of_device_id at91_shdwc_of_match[] = {
+ {
+ .compatible = "atmel,sama5d2-shdwc",
+ .data = &sama5d2_shdwc_config,
+ }, {
+ /*sentinel*/
+ }
+};
+MODULE_DEVICE_TABLE(of, at91_shdwc_of_match);
+
+static int __init at91_shdwc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct of_device_id *match;
+ int ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ at91_shdwc = devm_kzalloc(&pdev->dev, sizeof(*at91_shdwc), GFP_KERNEL);
+ if (!at91_shdwc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, at91_shdwc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ at91_shdwc->at91_shdwc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(at91_shdwc->at91_shdwc_base)) {
+ dev_err(&pdev->dev, "Could not map reset controller address\n");
+ return PTR_ERR(at91_shdwc->at91_shdwc_base);
+ }
+
+ match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
+ at91_shdwc->cfg = (struct shdwc_config *)(match->data);
+
+ sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sclk))
+ return PTR_ERR(sclk);
+
+ ret = clk_prepare_enable(sclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not enable slow clock\n");
+ return ret;
+ }
+
+ at91_wakeup_status(pdev);
+
+ at91_shdwc_dt_configure(pdev);
+
+ pm_power_off = at91_poweroff;
+
+ return 0;
+}
+
+static int __exit at91_shdwc_remove(struct platform_device *pdev)
+{
+ struct shdwc *shdw = platform_get_drvdata(pdev);
+
+ if (pm_power_off == at91_poweroff)
+ pm_power_off = NULL;
+
+ /* Reset values to disable wake-up features */
+ writel(0, shdw->at91_shdwc_base + AT91_SHDW_MR);
+ writel(0, shdw->at91_shdwc_base + AT91_SHDW_WUIR);
+
+ clk_disable_unprepare(sclk);
+
+ return 0;
+}
+
+static struct platform_driver at91_shdwc_driver = {
+ .remove = __exit_p(at91_shdwc_remove),
+ .driver = {
+ .name = "at91-shdwc",
+ .of_match_table = at91_shdwc_of_match,
+ },
+};
+module_platform_driver_probe(at91_shdwc_driver, at91_shdwc_probe);
+
+MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
+MODULE_DESCRIPTION("Atmel shutdown controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index d6226d68b..768b9fcb5 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -382,8 +382,6 @@ static int sbs_get_battery_property(struct i2c_client *client,
if (ret & BATTERY_FULL_CHARGED)
val->intval = POWER_SUPPLY_STATUS_FULL;
- else if (ret & BATTERY_FULL_DISCHARGED)
- val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else if (ret & BATTERY_DISCHARGING)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
else
@@ -702,8 +700,6 @@ static void sbs_delayed_work(struct work_struct *work)
if (ret & BATTERY_FULL_CHARGED)
ret = POWER_SUPPLY_STATUS_FULL;
- else if (ret & BATTERY_FULL_DISCHARGED)
- ret = POWER_SUPPLY_STATUS_NOT_CHARGING;
else if (ret & BATTERY_DISCHARGING)
ret = POWER_SUPPLY_STATUS_DISCHARGING;
else
diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c
index d9f56730c..73dfae41d 100644
--- a/drivers/power/tps65217_charger.c
+++ b/drivers/power/tps65217_charger.c
@@ -197,6 +197,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_charger *charger;
+ struct power_supply_config cfg = {};
int ret;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -208,9 +209,12 @@ static int tps65217_charger_probe(struct platform_device *pdev)
charger->tps = tps;
charger->dev = &pdev->dev;
+ cfg.of_node = pdev->dev.of_node;
+ cfg.drv_data = charger;
+
charger->ac = devm_power_supply_register(&pdev->dev,
&tps65217_charger_desc,
- NULL);
+ &cfg);
if (IS_ERR(charger->ac)) {
dev_err(&pdev->dev, "failed: power supply register\n");
return PTR_ERR(charger->ac);
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 8fad0a704..b2766b867 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -34,6 +34,9 @@
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
+/* Local defines */
+#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
+
/* bitmasks for RAPL MSRs, used by primitive access functions */
#define ENERGY_STATUS_MASK 0xffffffff
@@ -86,6 +89,7 @@ enum rapl_domain_type {
RAPL_DOMAIN_PP0, /* core power plane */
RAPL_DOMAIN_PP1, /* graphics uncore */
RAPL_DOMAIN_DRAM,/* DRAM control_type */
+ RAPL_DOMAIN_PLATFORM, /* PSys control_type */
RAPL_DOMAIN_MAX,
};
@@ -251,9 +255,11 @@ static const char * const rapl_domain_names[] = {
"core",
"uncore",
"dram",
+ "psys",
};
static struct powercap_control_type *control_type; /* PowerCap Controller */
+static struct rapl_domain *platform_rapl_domain; /* Platform (PSys) domain */
/* caller to ensure CPU hotplug lock is held */
static struct rapl_package *find_package_by_id(int id)
@@ -409,6 +415,14 @@ static const struct powercap_zone_ops zone_ops[] = {
.set_enable = set_domain_enable,
.get_enable = get_domain_enable,
},
+ /* RAPL_DOMAIN_PLATFORM */
+ {
+ .get_energy_uj = get_energy_counter,
+ .get_max_energy_range_uj = get_max_energy_counter,
+ .release = release_zone,
+ .set_enable = set_domain_enable,
+ .get_enable = get_domain_enable,
+ },
};
static int set_power_limit(struct powercap_zone *power_zone, int id,
@@ -1101,6 +1115,8 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(0X5C, rapl_defaults_core),/* Broxton */
RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
+ RAPL_CPU(0x8E, rapl_defaults_core),/* Kabylake */
+ RAPL_CPU(0x9E, rapl_defaults_core),/* Kabylake */
{}
};
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
@@ -1160,6 +1176,13 @@ static int rapl_unregister_powercap(void)
powercap_unregister_zone(control_type,
&rd_package->power_zone);
}
+
+ if (platform_rapl_domain) {
+ powercap_unregister_zone(control_type,
+ &platform_rapl_domain->power_zone);
+ kfree(platform_rapl_domain);
+ }
+
powercap_unregister_control_type(control_type);
return 0;
@@ -1239,6 +1262,47 @@ err_cleanup:
return ret;
}
+static int rapl_register_psys(void)
+{
+ struct rapl_domain *rd;
+ struct powercap_zone *power_zone;
+ u64 val;
+
+ if (rdmsrl_safe_on_cpu(0, MSR_PLATFORM_ENERGY_STATUS, &val) || !val)
+ return -ENODEV;
+
+ if (rdmsrl_safe_on_cpu(0, MSR_PLATFORM_POWER_LIMIT, &val) || !val)
+ return -ENODEV;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+
+ rd->name = rapl_domain_names[RAPL_DOMAIN_PLATFORM];
+ rd->id = RAPL_DOMAIN_PLATFORM;
+ rd->msrs[0] = MSR_PLATFORM_POWER_LIMIT;
+ rd->msrs[1] = MSR_PLATFORM_ENERGY_STATUS;
+ rd->rpl[0].prim_id = PL1_ENABLE;
+ rd->rpl[0].name = pl1_name;
+ rd->rpl[1].prim_id = PL2_ENABLE;
+ rd->rpl[1].name = pl2_name;
+ rd->rp = find_package_by_id(0);
+
+ power_zone = powercap_register_zone(&rd->power_zone, control_type,
+ "psys", NULL,
+ &zone_ops[RAPL_DOMAIN_PLATFORM],
+ 2, &constraint_ops);
+
+ if (IS_ERR(power_zone)) {
+ kfree(rd);
+ return PTR_ERR(power_zone);
+ }
+
+ platform_rapl_domain = rd;
+
+ return 0;
+}
+
static int rapl_register_powercap(void)
{
struct rapl_domain *rd;
@@ -1255,6 +1319,10 @@ static int rapl_register_powercap(void)
list_for_each_entry(rp, &rapl_packages, plist)
if (rapl_package_register_powercap(rp))
goto err_cleanup_package;
+
+ /* Don't bail out if PSys is not supported */
+ rapl_register_psys();
+
return ret;
err_cleanup_package:
@@ -1289,6 +1357,9 @@ static int rapl_check_domain(int cpu, int domain)
case RAPL_DOMAIN_DRAM:
msr = MSR_DRAM_ENERGY_STATUS;
break;
+ case RAPL_DOMAIN_PLATFORM:
+ /* PSYS(PLATFORM) is not a CPU domain, so avoid printng error */
+ return -EINVAL;
default:
pr_err("invalid domain id %d\n", domain);
return -EINVAL;
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 38a8bbe74..83797d89c 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
struct pps_client_pp *device;
/* FIXME: oooh, this is ugly! */
- if (strcmp(pardev->name, KBUILD_MODNAME))
+ if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
/* not our port */
return;
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 579fd6529..d637c933c 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_SYS_OFFSET:
- sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
- if (!sysoff) {
- err = -ENOMEM;
- break;
- }
- if (copy_from_user(sysoff, (void __user *)arg,
- sizeof(*sysoff))) {
- err = -EFAULT;
+ sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
+ if (IS_ERR(sysoff)) {
+ err = PTR_ERR(sysoff);
+ sysoff = NULL;
break;
}
if (sysoff->n_samples > PTP_MAX_SAMPLES) {
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 7831bc6b5..ed337a8c3 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -75,6 +75,7 @@ static void free_pwms(struct pwm_chip *chip)
for (i = 0; i < chip->npwm; i++) {
struct pwm_device *pwm = &chip->pwms[i];
+
radix_tree_delete(&pwm_tree, pwm->pwm);
}
@@ -146,12 +147,12 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
if (IS_ERR(pwm))
return pwm;
- pwm_set_period(pwm, args->args[1]);
+ pwm->args.period = args->args[1];
if (args->args[2] & PWM_POLARITY_INVERTED)
- pwm_set_polarity(pwm, PWM_POLARITY_INVERSED);
+ pwm->args.polarity = PWM_POLARITY_INVERSED;
else
- pwm_set_polarity(pwm, PWM_POLARITY_NORMAL);
+ pwm->args.polarity = PWM_POLARITY_NORMAL;
return pwm;
}
@@ -172,7 +173,7 @@ of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
if (IS_ERR(pwm))
return pwm;
- pwm_set_period(pwm, args->args[1]);
+ pwm->args.period = args->args[1];
return pwm;
}
@@ -226,6 +227,19 @@ void *pwm_get_chip_data(struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(pwm_get_chip_data);
+static bool pwm_ops_check(const struct pwm_ops *ops)
+{
+ /* driver supports legacy, non-atomic operation */
+ if (ops->config && ops->enable && ops->disable)
+ return true;
+
+ /* driver supports atomic operation */
+ if (ops->apply)
+ return true;
+
+ return false;
+}
+
/**
* pwmchip_add_with_polarity() - register a new PWM chip
* @chip: the PWM chip to add
@@ -244,8 +258,10 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
unsigned int i;
int ret;
- if (!chip || !chip->dev || !chip->ops || !chip->ops->config ||
- !chip->ops->enable || !chip->ops->disable || !chip->npwm)
+ if (!chip || !chip->dev || !chip->ops || !chip->npwm)
+ return -EINVAL;
+
+ if (!pwm_ops_check(chip->ops))
return -EINVAL;
mutex_lock(&pwm_lock);
@@ -254,7 +270,7 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
if (ret < 0)
goto out;
- chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL);
+ chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL);
if (!chip->pwms) {
ret = -ENOMEM;
goto out;
@@ -268,8 +284,10 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
pwm->chip = chip;
pwm->pwm = chip->base + i;
pwm->hwpwm = i;
- pwm->polarity = polarity;
- mutex_init(&pwm->lock);
+ pwm->state.polarity = polarity;
+
+ if (chip->ops->get_state)
+ chip->ops->get_state(chip, pwm, &pwm->state);
radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
}
@@ -429,107 +447,139 @@ void pwm_free(struct pwm_device *pwm)
EXPORT_SYMBOL_GPL(pwm_free);
/**
- * pwm_config() - change a PWM device configuration
+ * pwm_apply_state() - atomically apply a new state to a PWM device
* @pwm: PWM device
- * @duty_ns: "on" time (in nanoseconds)
- * @period_ns: duration (in nanoseconds) of one cycle
- *
- * Returns: 0 on success or a negative error code on failure.
+ * @state: new state to apply. This can be adjusted by the PWM driver
+ * if the requested config is not achievable, for example,
+ * ->duty_cycle and ->period might be approximated.
*/
-int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
{
int err;
- if (!pwm || duty_ns < 0 || period_ns <= 0 || duty_ns > period_ns)
+ if (!pwm || !state || !state->period ||
+ state->duty_cycle > state->period)
return -EINVAL;
- err = pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns);
- if (err)
- return err;
-
- pwm->duty_cycle = duty_ns;
- pwm->period = period_ns;
+ if (!memcmp(state, &pwm->state, sizeof(*state)))
+ return 0;
- return 0;
-}
-EXPORT_SYMBOL_GPL(pwm_config);
+ if (pwm->chip->ops->apply) {
+ err = pwm->chip->ops->apply(pwm->chip, pwm, state);
+ if (err)
+ return err;
-/**
- * pwm_set_polarity() - configure the polarity of a PWM signal
- * @pwm: PWM device
- * @polarity: new polarity of the PWM signal
- *
- * Note that the polarity cannot be configured while the PWM device is
- * enabled.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
-{
- int err;
+ pwm->state = *state;
+ } else {
+ /*
+ * FIXME: restore the initial state in case of error.
+ */
+ if (state->polarity != pwm->state.polarity) {
+ if (!pwm->chip->ops->set_polarity)
+ return -ENOTSUPP;
+
+ /*
+ * Changing the polarity of a running PWM is
+ * only allowed when the PWM driver implements
+ * ->apply().
+ */
+ if (pwm->state.enabled) {
+ pwm->chip->ops->disable(pwm->chip, pwm);
+ pwm->state.enabled = false;
+ }
+
+ err = pwm->chip->ops->set_polarity(pwm->chip, pwm,
+ state->polarity);
+ if (err)
+ return err;
+
+ pwm->state.polarity = state->polarity;
+ }
- if (!pwm || !pwm->chip->ops)
- return -EINVAL;
+ if (state->period != pwm->state.period ||
+ state->duty_cycle != pwm->state.duty_cycle) {
+ err = pwm->chip->ops->config(pwm->chip, pwm,
+ state->duty_cycle,
+ state->period);
+ if (err)
+ return err;
- if (!pwm->chip->ops->set_polarity)
- return -ENOSYS;
+ pwm->state.duty_cycle = state->duty_cycle;
+ pwm->state.period = state->period;
+ }
- mutex_lock(&pwm->lock);
+ if (state->enabled != pwm->state.enabled) {
+ if (state->enabled) {
+ err = pwm->chip->ops->enable(pwm->chip, pwm);
+ if (err)
+ return err;
+ } else {
+ pwm->chip->ops->disable(pwm->chip, pwm);
+ }
- if (pwm_is_enabled(pwm)) {
- err = -EBUSY;
- goto unlock;
+ pwm->state.enabled = state->enabled;
+ }
}
- err = pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
- if (err)
- goto unlock;
-
- pwm->polarity = polarity;
-
-unlock:
- mutex_unlock(&pwm->lock);
- return err;
+ return 0;
}
-EXPORT_SYMBOL_GPL(pwm_set_polarity);
+EXPORT_SYMBOL_GPL(pwm_apply_state);
/**
- * pwm_enable() - start a PWM output toggling
+ * pwm_adjust_config() - adjust the current PWM config to the PWM arguments
* @pwm: PWM device
*
- * Returns: 0 on success or a negative error code on failure.
+ * This function will adjust the PWM config to the PWM arguments provided
+ * by the DT or PWM lookup table. This is particularly useful to adapt
+ * the bootloader config to the Linux one.
*/
-int pwm_enable(struct pwm_device *pwm)
+int pwm_adjust_config(struct pwm_device *pwm)
{
- int err = 0;
+ struct pwm_state state;
+ struct pwm_args pargs;
- if (!pwm)
- return -EINVAL;
+ pwm_get_args(pwm, &pargs);
+ pwm_get_state(pwm, &state);
- mutex_lock(&pwm->lock);
+ /*
+ * If the current period is zero it means that either the PWM driver
+ * does not support initial state retrieval or the PWM has not yet
+ * been configured.
+ *
+ * In either case, we setup the new period and polarity, and assign a
+ * duty cycle of 0.
+ */
+ if (!state.period) {
+ state.duty_cycle = 0;
+ state.period = pargs.period;
+ state.polarity = pargs.polarity;
- if (!test_and_set_bit(PWMF_ENABLED, &pwm->flags)) {
- err = pwm->chip->ops->enable(pwm->chip, pwm);
- if (err)
- clear_bit(PWMF_ENABLED, &pwm->flags);
+ return pwm_apply_state(pwm, &state);
}
- mutex_unlock(&pwm->lock);
+ /*
+ * Adjust the PWM duty cycle/period based on the period value provided
+ * in PWM args.
+ */
+ if (pargs.period != state.period) {
+ u64 dutycycle = (u64)state.duty_cycle * pargs.period;
- return err;
-}
-EXPORT_SYMBOL_GPL(pwm_enable);
+ do_div(dutycycle, state.period);
+ state.duty_cycle = dutycycle;
+ state.period = pargs.period;
+ }
-/**
- * pwm_disable() - stop a PWM output toggling
- * @pwm: PWM device
- */
-void pwm_disable(struct pwm_device *pwm)
-{
- if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags))
- pwm->chip->ops->disable(pwm->chip, pwm);
+ /*
+ * If the polarity changed, we should also change the duty cycle.
+ */
+ if (pargs.polarity != state.polarity) {
+ state.polarity = pargs.polarity;
+ state.duty_cycle = state.period - state.duty_cycle;
+ }
+
+ return pwm_apply_state(pwm, &state);
}
-EXPORT_SYMBOL_GPL(pwm_disable);
+EXPORT_SYMBOL_GPL(pwm_adjust_config);
static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
{
@@ -751,8 +801,8 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
if (IS_ERR(pwm))
goto out;
- pwm_set_period(pwm, chosen->period);
- pwm_set_polarity(pwm, chosen->polarity);
+ pwm->args.period = chosen->period;
+ pwm->args.polarity = chosen->polarity;
out:
mutex_unlock(&pwm_lookup_lock);
@@ -900,15 +950,23 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
for (i = 0; i < chip->npwm; i++) {
struct pwm_device *pwm = &chip->pwms[i];
+ struct pwm_state state;
+
+ pwm_get_state(pwm, &state);
seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label);
if (test_bit(PWMF_REQUESTED, &pwm->flags))
seq_puts(s, " requested");
- if (pwm_is_enabled(pwm))
+ if (state.enabled)
seq_puts(s, " enabled");
+ seq_printf(s, " period: %u ns", state.period);
+ seq_printf(s, " duty: %u ns", state.duty_cycle);
+ seq_printf(s, " polarity: %s",
+ state.polarity ? "inverse" : "normal");
+
seq_puts(s, "\n");
}
}
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index f994c7eaf..14fc011fa 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -272,7 +272,7 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
chip->chip.of_pwm_n_cells = 3;
chip->chip.can_sleep = 1;
- ret = pwmchip_add(&chip->chip);
+ ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index a80c10803..7d335422c 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -60,7 +60,7 @@ static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
return -EINVAL;
/* Store constant period value */
- pwm_set_period(pwm, DIV_ROUND_CLOSEST(NSEC_PER_SEC, freq));
+ pwm->args.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, freq);
return 0;
}
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 7101c7020..bd0ebd048 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -75,7 +75,7 @@ static int crc_pwm_config(struct pwm_chip *c, struct pwm_device *pwm,
return -EINVAL;
}
- if (pwm->period != period_ns) {
+ if (pwm_get_period(pwm) != period_ns) {
int clk_div;
/* changing the clk divisor, need to disable fisrt */
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 9861fed4e..19dc64cab 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -249,7 +249,7 @@ static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event),
LPC18XX_PWM_EVSTATEMSK_ALL);
- if (pwm->polarity == PWM_POLARITY_NORMAL) {
+ if (pwm_get_polarity(pwm) == PWM_POLARITY_NORMAL) {
set_event = lpc18xx_pwm->period_event;
clear_event = lpc18xx_data->duty_event;
res_action = LPC18XX_PWM_RES_SET;
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index b7e6ecba7..3e95090cd 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -192,7 +192,7 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
load_value, load_value, match_value, match_value);
omap->pdata->set_pwm(omap->dm_timer,
- pwm->polarity == PWM_POLARITY_INVERSED,
+ pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED,
true,
PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE);
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index cb2f7024c..58b709f29 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -160,7 +160,7 @@ pxa_pwm_of_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
if (IS_ERR(pwm))
return pwm;
- pwm_set_period(pwm, args->args[0]);
+ pwm->args.period = args->args[0];
return pwm;
}
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 7b8ac0678..1c85ecc9e 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -157,7 +157,7 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
return div;
/* Let the core driver set pwm->period if disabled and duty_ns == 0 */
- if (!test_bit(PWMF_ENABLED, &pwm->flags) && !duty_ns)
+ if (!pwm_is_enabled(pwm) && !duty_ns)
return 0;
rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 67af9f623..03a99a53c 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -354,7 +354,8 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
val = sun4i_pwm_readl(pwm, PWM_CTRL_REG);
for (i = 0; i < pwm->chip.npwm; i++)
if (!(val & BIT_CH(PWM_ACT_STATE, i)))
- pwm->chip.pwms[i].polarity = PWM_POLARITY_INVERSED;
+ pwm_set_polarity(&pwm->chip.pwms[i],
+ PWM_POLARITY_INVERSED);
clk_disable_unprepare(pwm->clk);
return 0;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 9c90886f4..01695d48d 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -26,6 +26,7 @@
struct pwm_export {
struct device child;
struct pwm_device *pwm;
+ struct mutex lock;
};
static struct pwm_export *child_to_pwm_export(struct device *child)
@@ -45,15 +46,20 @@ static ssize_t period_show(struct device *child,
char *buf)
{
const struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_state state;
- return sprintf(buf, "%u\n", pwm_get_period(pwm));
+ pwm_get_state(pwm, &state);
+
+ return sprintf(buf, "%u\n", state.period);
}
static ssize_t period_store(struct device *child,
struct device_attribute *attr,
const char *buf, size_t size)
{
- struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_export *export = child_to_pwm_export(child);
+ struct pwm_device *pwm = export->pwm;
+ struct pwm_state state;
unsigned int val;
int ret;
@@ -61,7 +67,11 @@ static ssize_t period_store(struct device *child,
if (ret)
return ret;
- ret = pwm_config(pwm, pwm_get_duty_cycle(pwm), val);
+ mutex_lock(&export->lock);
+ pwm_get_state(pwm, &state);
+ state.period = val;
+ ret = pwm_apply_state(pwm, &state);
+ mutex_unlock(&export->lock);
return ret ? : size;
}
@@ -71,15 +81,20 @@ static ssize_t duty_cycle_show(struct device *child,
char *buf)
{
const struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_state state;
+
+ pwm_get_state(pwm, &state);
- return sprintf(buf, "%u\n", pwm_get_duty_cycle(pwm));
+ return sprintf(buf, "%u\n", state.duty_cycle);
}
static ssize_t duty_cycle_store(struct device *child,
struct device_attribute *attr,
const char *buf, size_t size)
{
- struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_export *export = child_to_pwm_export(child);
+ struct pwm_device *pwm = export->pwm;
+ struct pwm_state state;
unsigned int val;
int ret;
@@ -87,7 +102,11 @@ static ssize_t duty_cycle_store(struct device *child,
if (ret)
return ret;
- ret = pwm_config(pwm, val, pwm_get_period(pwm));
+ mutex_lock(&export->lock);
+ pwm_get_state(pwm, &state);
+ state.duty_cycle = val;
+ ret = pwm_apply_state(pwm, &state);
+ mutex_unlock(&export->lock);
return ret ? : size;
}
@@ -97,33 +116,46 @@ static ssize_t enable_show(struct device *child,
char *buf)
{
const struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_state state;
+
+ pwm_get_state(pwm, &state);
- return sprintf(buf, "%d\n", pwm_is_enabled(pwm));
+ return sprintf(buf, "%d\n", state.enabled);
}
static ssize_t enable_store(struct device *child,
struct device_attribute *attr,
const char *buf, size_t size)
{
- struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_export *export = child_to_pwm_export(child);
+ struct pwm_device *pwm = export->pwm;
+ struct pwm_state state;
int val, ret;
ret = kstrtoint(buf, 0, &val);
if (ret)
return ret;
+ mutex_lock(&export->lock);
+
+ pwm_get_state(pwm, &state);
+
switch (val) {
case 0:
- pwm_disable(pwm);
+ state.enabled = false;
break;
case 1:
- ret = pwm_enable(pwm);
+ state.enabled = true;
break;
default:
ret = -EINVAL;
- break;
+ goto unlock;
}
+ ret = pwm_apply_state(pwm, &state);
+
+unlock:
+ mutex_unlock(&export->lock);
return ret ? : size;
}
@@ -133,8 +165,11 @@ static ssize_t polarity_show(struct device *child,
{
const struct pwm_device *pwm = child_to_pwm_device(child);
const char *polarity = "unknown";
+ struct pwm_state state;
+
+ pwm_get_state(pwm, &state);
- switch (pwm_get_polarity(pwm)) {
+ switch (state.polarity) {
case PWM_POLARITY_NORMAL:
polarity = "normal";
break;
@@ -151,8 +186,10 @@ static ssize_t polarity_store(struct device *child,
struct device_attribute *attr,
const char *buf, size_t size)
{
- struct pwm_device *pwm = child_to_pwm_device(child);
+ struct pwm_export *export = child_to_pwm_export(child);
+ struct pwm_device *pwm = export->pwm;
enum pwm_polarity polarity;
+ struct pwm_state state;
int ret;
if (sysfs_streq(buf, "normal"))
@@ -162,7 +199,11 @@ static ssize_t polarity_store(struct device *child,
else
return -EINVAL;
- ret = pwm_set_polarity(pwm, polarity);
+ mutex_lock(&export->lock);
+ pwm_get_state(pwm, &state);
+ state.polarity = polarity;
+ ret = pwm_apply_state(pwm, &state);
+ mutex_unlock(&export->lock);
return ret ? : size;
}
@@ -203,6 +244,7 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
}
export->pwm = pwm;
+ mutex_init(&export->lock);
export->child.release = pwm_export_release;
export->child.parent = parent;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c77dc08b1..144cbf5b3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -321,6 +321,15 @@ config REGULATOR_LP872X
help
This driver supports LP8720/LP8725 PMIC
+config REGULATOR_LP873X
+ tristate "TI LP873X Power regulators"
+ depends on MFD_LP873X && OF
+ help
+ This driver supports LP873X voltage regulator chips. LP873X
+ provides two step-down converters and two general-purpose LDO
+ voltage regulators. It supports software based voltage control
+ for different voltage domains
+
config REGULATOR_LP8755
tristate "TI LP8755 High Performance PMU driver"
depends on I2C
@@ -409,6 +418,7 @@ config REGULATOR_MAX8952
config REGULATOR_MAX8973
tristate "Maxim MAX8973 voltage regulator "
depends on I2C
+ depends on THERMAL && THERMAL_OF
select REGMAP_I2C
help
The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down
@@ -548,6 +558,13 @@ config REGULATOR_PV88060
Say y here to support the voltage regulators and convertors
PV88060
+config REGULATOR_PV88080
+ tristate "Powerventure Semiconductor PV88080 regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the buck convertors on PV88080
+
config REGULATOR_PV88090
tristate "Powerventure Semiconductor PV88090 regulator"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 61bfbb9d4..85a1d44a3 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -42,11 +42,12 @@ obj-$(CONFIG_REGULATOR_LM363X) += lm363x-regulator.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
+obj-$(CONFIG_REGULATOR_LP873X) += lp873x-regulator.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
obj-$(CONFIG_REGULATOR_LP8755) += lp8755.o
obj-$(CONFIG_REGULATOR_LTC3589) += ltc3589.o
-obj-$(CONFIG_REGULATOR_MAX14577) += max14577.o
+obj-$(CONFIG_REGULATOR_MAX14577) += max14577-regulator.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX77620) += max77620-regulator.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
@@ -55,10 +56,10 @@ obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o
obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
obj-$(CONFIG_REGULATOR_MAX8973) += max8973-regulator.o
-obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
+obj-$(CONFIG_REGULATOR_MAX8997) += max8997-regulator.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
-obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
+obj-$(CONFIG_REGULATOR_MAX77693) += max77693-regulator.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PV88060) += pv88060-regulator.o
+obj-$(CONFIG_REGULATOR_PV88080) += pv88080-regulator.o
obj-$(CONFIG_REGULATOR_PV88090) += pv88090-regulator.o
obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 000d566e3..a1cd0d4f8 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -139,6 +139,74 @@ struct act8865 {
int off_mask;
};
+static const struct regmap_range act8600_reg_ranges[] = {
+ regmap_reg_range(0x00, 0x01),
+ regmap_reg_range(0x10, 0x10),
+ regmap_reg_range(0x12, 0x12),
+ regmap_reg_range(0x20, 0x20),
+ regmap_reg_range(0x22, 0x22),
+ regmap_reg_range(0x30, 0x30),
+ regmap_reg_range(0x32, 0x32),
+ regmap_reg_range(0x40, 0x41),
+ regmap_reg_range(0x50, 0x51),
+ regmap_reg_range(0x60, 0x61),
+ regmap_reg_range(0x70, 0x71),
+ regmap_reg_range(0x80, 0x81),
+ regmap_reg_range(0x91, 0x91),
+ regmap_reg_range(0xA1, 0xA1),
+ regmap_reg_range(0xA8, 0xAA),
+ regmap_reg_range(0xB0, 0xB0),
+ regmap_reg_range(0xB2, 0xB2),
+ regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_range act8600_reg_ro_ranges[] = {
+ regmap_reg_range(0xAA, 0xAA),
+ regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_range act8600_reg_volatile_ranges[] = {
+ regmap_reg_range(0x00, 0x01),
+ regmap_reg_range(0x12, 0x12),
+ regmap_reg_range(0x22, 0x22),
+ regmap_reg_range(0x32, 0x32),
+ regmap_reg_range(0x41, 0x41),
+ regmap_reg_range(0x51, 0x51),
+ regmap_reg_range(0x61, 0x61),
+ regmap_reg_range(0x71, 0x71),
+ regmap_reg_range(0x81, 0x81),
+ regmap_reg_range(0xA8, 0xA8),
+ regmap_reg_range(0xAA, 0xAA),
+ regmap_reg_range(0xB0, 0xB0),
+ regmap_reg_range(0xC1, 0xC1),
+};
+
+static const struct regmap_access_table act8600_write_ranges_table = {
+ .yes_ranges = act8600_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(act8600_reg_ranges),
+ .no_ranges = act8600_reg_ro_ranges,
+ .n_no_ranges = ARRAY_SIZE(act8600_reg_ro_ranges),
+};
+
+static const struct regmap_access_table act8600_read_ranges_table = {
+ .yes_ranges = act8600_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(act8600_reg_ranges),
+};
+
+static const struct regmap_access_table act8600_volatile_ranges_table = {
+ .yes_ranges = act8600_reg_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(act8600_reg_volatile_ranges),
+};
+
+static const struct regmap_config act8600_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xFF,
+ .wr_table = &act8600_write_ranges_table,
+ .rd_table = &act8600_read_ranges_table,
+ .volatile_table = &act8600_volatile_ranges_table,
+};
+
static const struct regmap_config act8865_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -319,7 +387,6 @@ static struct of_regulator_match act8600_matches[] = {
};
static int act8865_pdata_from_dt(struct device *dev,
- struct device_node **of_node,
struct act8865_platform_data *pdata,
unsigned long type)
{
@@ -370,7 +437,7 @@ static int act8865_pdata_from_dt(struct device *dev,
regulator->id = i;
regulator->name = matches[i].name;
regulator->init_data = matches[i].init_data;
- of_node[i] = matches[i].of_node;
+ regulator->of_node = matches[i].of_node;
regulator++;
}
@@ -378,7 +445,6 @@ static int act8865_pdata_from_dt(struct device *dev,
}
#else
static inline int act8865_pdata_from_dt(struct device *dev,
- struct device_node **of_node,
struct act8865_platform_data *pdata,
unsigned long type)
{
@@ -386,8 +452,8 @@ static inline int act8865_pdata_from_dt(struct device *dev,
}
#endif
-static struct regulator_init_data
-*act8865_get_init_data(int id, struct act8865_platform_data *pdata)
+static struct act8865_regulator_data *act8865_get_regulator_data(
+ int id, struct act8865_platform_data *pdata)
{
int i;
@@ -396,7 +462,7 @@ static struct regulator_init_data
for (i = 0; i < pdata->num_regulators; i++) {
if (pdata->regulators[i].id == id)
- return pdata->regulators[i].init_data;
+ return &pdata->regulators[i];
}
return NULL;
@@ -418,9 +484,9 @@ static int act8865_pmic_probe(struct i2c_client *client,
const struct regulator_desc *regulators;
struct act8865_platform_data pdata_of, *pdata;
struct device *dev = &client->dev;
- struct device_node **of_node;
int i, ret, num_regulators;
struct act8865 *act8865;
+ const struct regmap_config *regmap_config;
unsigned long type;
int off_reg, off_mask;
int voltage_select = 0;
@@ -447,12 +513,14 @@ static int act8865_pmic_probe(struct i2c_client *client,
case ACT8600:
regulators = act8600_regulators;
num_regulators = ARRAY_SIZE(act8600_regulators);
+ regmap_config = &act8600_regmap_config;
off_reg = -1;
off_mask = -1;
break;
case ACT8846:
regulators = act8846_regulators;
num_regulators = ARRAY_SIZE(act8846_regulators);
+ regmap_config = &act8865_regmap_config;
off_reg = ACT8846_GLB_OFF_CTRL;
off_mask = ACT8846_OFF_SYSMASK;
break;
@@ -464,6 +532,7 @@ static int act8865_pmic_probe(struct i2c_client *client,
regulators = act8865_regulators;
num_regulators = ARRAY_SIZE(act8865_regulators);
}
+ regmap_config = &act8865_regmap_config;
off_reg = ACT8865_SYS_CTRL;
off_mask = ACT8865_MSTROFF;
break;
@@ -472,34 +541,22 @@ static int act8865_pmic_probe(struct i2c_client *client,
return -EINVAL;
}
- of_node = devm_kzalloc(dev, sizeof(struct device_node *) *
- num_regulators, GFP_KERNEL);
- if (!of_node)
- return -ENOMEM;
-
if (dev->of_node && !pdata) {
- ret = act8865_pdata_from_dt(dev, of_node, &pdata_of, type);
+ ret = act8865_pdata_from_dt(dev, &pdata_of, type);
if (ret < 0)
return ret;
pdata = &pdata_of;
}
- if (pdata->num_regulators > num_regulators) {
- dev_err(dev, "too many regulators: %d\n",
- pdata->num_regulators);
- return -EINVAL;
- }
-
act8865 = devm_kzalloc(dev, sizeof(struct act8865), GFP_KERNEL);
if (!act8865)
return -ENOMEM;
- act8865->regmap = devm_regmap_init_i2c(client, &act8865_regmap_config);
+ act8865->regmap = devm_regmap_init_i2c(client, regmap_config);
if (IS_ERR(act8865->regmap)) {
ret = PTR_ERR(act8865->regmap);
- dev_err(&client->dev, "Failed to allocate register map: %d\n",
- ret);
+ dev_err(dev, "Failed to allocate register map: %d\n", ret);
return ret;
}
@@ -518,15 +575,20 @@ static int act8865_pmic_probe(struct i2c_client *client,
for (i = 0; i < num_regulators; i++) {
const struct regulator_desc *desc = &regulators[i];
struct regulator_config config = { };
+ struct act8865_regulator_data *rdata;
struct regulator_dev *rdev;
config.dev = dev;
- config.init_data = act8865_get_init_data(desc->id, pdata);
- config.of_node = of_node[i];
config.driver_data = act8865;
config.regmap = act8865->regmap;
- rdev = devm_regulator_register(&client->dev, desc, &config);
+ rdata = act8865_get_regulator_data(desc->id, pdata);
+ if (rdata) {
+ config.init_data = rdata->init_data;
+ config.of_node = rdata->of_node;
+ }
+
+ rdev = devm_regulator_register(dev, desc, &config);
if (IS_ERR(rdev)) {
dev_err(dev, "failed to register %s\n", desc->name);
return PTR_ERR(rdev);
@@ -534,7 +596,6 @@ static int act8865_pmic_probe(struct i2c_client *client,
}
i2c_set_clientdata(client, act8865);
- devm_kfree(dev, of_node);
return 0;
}
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 63cd5e68c..3a6d0290c 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -296,7 +296,7 @@ static int anatop_regulator_probe(struct platform_device *pdev)
if (!sreg->sel && !strcmp(sreg->name, "vddpu"))
sreg->sel = 22;
- if (!sreg->sel) {
+ if (!sreg->bypass && !sreg->sel) {
dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n");
return -EINVAL;
}
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 8b046eec6..66337e127 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -372,7 +372,7 @@ static int as3722_ldo_set_current_limit(struct regulator_dev *rdev,
AS3722_LDO_ILIMIT_MASK, reg);
}
-static struct regulator_ops as3722_ldo0_ops = {
+static const struct regulator_ops as3722_ldo0_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -383,7 +383,7 @@ static struct regulator_ops as3722_ldo0_ops = {
.set_current_limit = as3722_ldo_set_current_limit,
};
-static struct regulator_ops as3722_ldo0_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo0_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -415,7 +415,7 @@ static int as3722_ldo3_get_current_limit(struct regulator_dev *rdev)
return 150000;
}
-static struct regulator_ops as3722_ldo3_ops = {
+static const struct regulator_ops as3722_ldo3_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -425,20 +425,45 @@ static struct regulator_ops as3722_ldo3_ops = {
.get_current_limit = as3722_ldo3_get_current_limit,
};
-static struct regulator_ops as3722_ldo3_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo3_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_current_limit = as3722_ldo3_get_current_limit,
};
+static const struct regulator_ops as3722_ldo6_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+ .get_bypass = regulator_get_bypass_regmap,
+ .set_bypass = regulator_set_bypass_regmap,
+};
+
+static const struct regulator_ops as3722_ldo6_extcntrl_ops = {
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .get_current_limit = as3722_ldo_get_current_limit,
+ .set_current_limit = as3722_ldo_set_current_limit,
+ .get_bypass = regulator_get_bypass_regmap,
+ .set_bypass = regulator_set_bypass_regmap,
+};
+
static const struct regulator_linear_range as3722_ldo_ranges[] = {
REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
};
-static struct regulator_ops as3722_ldo_ops = {
+static const struct regulator_ops as3722_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -450,7 +475,7 @@ static struct regulator_ops as3722_ldo_ops = {
.set_current_limit = as3722_ldo_set_current_limit,
};
-static struct regulator_ops as3722_ldo_extcntrl_ops = {
+static const struct regulator_ops as3722_ldo_extcntrl_ops = {
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -616,7 +641,7 @@ static const struct regulator_linear_range as3722_sd2345_ranges[] = {
REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
};
-static struct regulator_ops as3722_sd016_ops = {
+static const struct regulator_ops as3722_sd016_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -630,7 +655,7 @@ static struct regulator_ops as3722_sd016_ops = {
.set_mode = as3722_sd_set_mode,
};
-static struct regulator_ops as3722_sd016_extcntrl_ops = {
+static const struct regulator_ops as3722_sd016_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -641,7 +666,7 @@ static struct regulator_ops as3722_sd016_extcntrl_ops = {
.set_mode = as3722_sd_set_mode,
};
-static struct regulator_ops as3722_sd2345_ops = {
+static const struct regulator_ops as3722_sd2345_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
@@ -653,7 +678,7 @@ static struct regulator_ops as3722_sd2345_ops = {
.set_mode = as3722_sd_set_mode,
};
-static struct regulator_ops as3722_sd2345_extcntrl_ops = {
+static const struct regulator_ops as3722_sd2345_extcntrl_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -760,7 +785,7 @@ static int as3722_regulator_probe(struct platform_device *pdev)
struct as3722_regulator_config_data *reg_config;
struct regulator_dev *rdev;
struct regulator_config config = { };
- struct regulator_ops *ops;
+ const struct regulator_ops *ops;
int id;
int ret;
@@ -829,6 +854,24 @@ static int as3722_regulator_probe(struct platform_device *pdev)
}
}
break;
+ case AS3722_REGULATOR_ID_LDO6:
+ if (reg_config->ext_control)
+ ops = &as3722_ldo6_extcntrl_ops;
+ else
+ ops = &as3722_ldo6_ops;
+ as3722_regs->desc[id].enable_time = 500;
+ as3722_regs->desc[id].bypass_reg =
+ AS3722_LDO6_VOLTAGE_REG;
+ as3722_regs->desc[id].bypass_mask =
+ AS3722_LDO_VSEL_MASK;
+ as3722_regs->desc[id].bypass_val_on =
+ AS3722_LDO6_VSEL_BYPASS;
+ as3722_regs->desc[id].bypass_val_off =
+ AS3722_LDO6_VSEL_BYPASS;
+ as3722_regs->desc[id].linear_ranges = as3722_ldo_ranges;
+ as3722_regs->desc[id].n_linear_ranges =
+ ARRAY_SIZE(as3722_ldo_ranges);
+ break;
case AS3722_REGULATOR_ID_SD0:
case AS3722_REGULATOR_ID_SD1:
case AS3722_REGULATOR_ID_SD6:
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index fd0e4e37f..ec8184d53 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -132,6 +132,19 @@ static bool have_full_constraints(void)
return has_full_constraints || of_have_populated_dt();
}
+static bool regulator_ops_is_valid(struct regulator_dev *rdev, int ops)
+{
+ if (!rdev->constraints) {
+ rdev_err(rdev, "no constraints\n");
+ return false;
+ }
+
+ if (rdev->constraints->valid_ops_mask & ops)
+ return true;
+
+ return false;
+}
+
static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
{
if (rdev && rdev->supply)
@@ -198,28 +211,13 @@ static struct device_node *of_get_regulator(struct device *dev, const char *supp
return regnode;
}
-static int _regulator_can_change_status(struct regulator_dev *rdev)
-{
- if (!rdev->constraints)
- return 0;
-
- if (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_STATUS)
- return 1;
- else
- return 0;
-}
-
/* Platform voltage constraint check */
static int regulator_check_voltage(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
{
BUG_ON(*min_uV > *max_uV);
- if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
- return -ENODEV;
- }
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
rdev_err(rdev, "voltage operation not allowed\n");
return -EPERM;
}
@@ -275,11 +273,7 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
{
BUG_ON(*min_uA > *max_uA);
- if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
- return -ENODEV;
- }
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_CURRENT)) {
rdev_err(rdev, "current operation not allowed\n");
return -EPERM;
}
@@ -312,11 +306,7 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
return -EINVAL;
}
- if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
- return -ENODEV;
- }
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_MODE)) {
rdev_err(rdev, "mode operation not allowed\n");
return -EPERM;
}
@@ -333,20 +323,6 @@ static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
return -EINVAL;
}
-/* dynamic regulator mode switching constraint check */
-static int regulator_check_drms(struct regulator_dev *rdev)
-{
- if (!rdev->constraints) {
- rdev_err(rdev, "no constraints\n");
- return -ENODEV;
- }
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
- rdev_dbg(rdev, "drms operation not allowed\n");
- return -EPERM;
- }
- return 0;
-}
-
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -692,8 +668,7 @@ static int drms_uA_update(struct regulator_dev *rdev)
* first check to see if we can set modes at all, otherwise just
* tell the consumer everything is OK.
*/
- err = regulator_check_drms(rdev);
- if (err < 0)
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
return 0;
if (!rdev->desc->ops->get_optimum_mode &&
@@ -808,8 +783,6 @@ static int suspend_set_state(struct regulator_dev *rdev,
/* locks held by caller */
static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
{
- lockdep_assert_held_once(&rdev->mutex);
-
if (!rdev->constraints)
return -EINVAL;
@@ -893,7 +866,7 @@ static void print_constraints(struct regulator_dev *rdev)
rdev_dbg(rdev, "%s\n", buf);
if ((constraints->min_uV != constraints->max_uV) &&
- !(constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE))
+ !regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE))
rdev_warn(rdev,
"Voltage range but no REGULATOR_CHANGE_VOLTAGE\n");
}
@@ -906,7 +879,8 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
/* do we need to apply the constraint voltage */
if (rdev->constraints->apply_uV &&
- rdev->constraints->min_uV == rdev->constraints->max_uV) {
+ rdev->constraints->min_uV && rdev->constraints->max_uV) {
+ int target_min, target_max;
int current_uV = _regulator_get_voltage(rdev);
if (current_uV < 0) {
rdev_err(rdev,
@@ -914,15 +888,34 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
current_uV);
return current_uV;
}
- if (current_uV < rdev->constraints->min_uV ||
- current_uV > rdev->constraints->max_uV) {
+
+ /*
+ * If we're below the minimum voltage move up to the
+ * minimum voltage, if we're above the maximum voltage
+ * then move down to the maximum.
+ */
+ target_min = current_uV;
+ target_max = current_uV;
+
+ if (current_uV < rdev->constraints->min_uV) {
+ target_min = rdev->constraints->min_uV;
+ target_max = rdev->constraints->min_uV;
+ }
+
+ if (current_uV > rdev->constraints->max_uV) {
+ target_min = rdev->constraints->max_uV;
+ target_max = rdev->constraints->max_uV;
+ }
+
+ if (target_min != current_uV || target_max != current_uV) {
+ rdev_info(rdev, "Bringing %duV into %d-%duV\n",
+ current_uV, target_min, target_max);
ret = _regulator_do_set_voltage(
- rdev, rdev->constraints->min_uV,
- rdev->constraints->max_uV);
+ rdev, target_min, target_max);
if (ret < 0) {
rdev_err(rdev,
- "failed to apply %duV constraint(%d)\n",
- rdev->constraints->min_uV, ret);
+ "failed to apply %d-%duV constraint(%d)\n",
+ target_min, target_max, ret);
return ret;
}
}
@@ -1150,17 +1143,6 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
- if (rdev->constraints->active_discharge && ops->set_active_discharge) {
- bool ad_state = (rdev->constraints->active_discharge ==
- REGULATOR_ACTIVE_DISCHARGE_ENABLE) ? true : false;
-
- ret = ops->set_active_discharge(rdev, ad_state);
- if (ret < 0) {
- rdev_err(rdev, "failed to set active discharge\n");
- return ret;
- }
- }
-
print_constraints(rdev);
return 0;
}
@@ -1272,6 +1254,55 @@ static void unset_regulator_supplies(struct regulator_dev *rdev)
}
}
+#ifdef CONFIG_DEBUG_FS
+static ssize_t constraint_flags_read_file(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const struct regulator *regulator = file->private_data;
+ const struct regulation_constraints *c = regulator->rdev->constraints;
+ char *buf;
+ ssize_t ret;
+
+ if (!c)
+ return 0;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = snprintf(buf, PAGE_SIZE,
+ "always_on: %u\n"
+ "boot_on: %u\n"
+ "apply_uV: %u\n"
+ "ramp_disable: %u\n"
+ "soft_start: %u\n"
+ "pull_down: %u\n"
+ "over_current_protection: %u\n",
+ c->always_on,
+ c->boot_on,
+ c->apply_uV,
+ c->ramp_disable,
+ c->soft_start,
+ c->pull_down,
+ c->over_current_protection);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
+
+ return ret;
+}
+
+#endif
+
+static const struct file_operations constraint_flags_fops = {
+#ifdef CONFIG_DEBUG_FS
+ .open = simple_open,
+ .read = constraint_flags_read_file,
+ .llseek = default_llseek,
+#endif
+};
+
#define REG_STR_SIZE 64
static struct regulator *create_regulator(struct regulator_dev *rdev,
@@ -1327,6 +1358,9 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
&regulator->min_uV);
debugfs_create_u32("max_uV", 0444, regulator->debugfs,
&regulator->max_uV);
+ debugfs_create_file("constraint_flags", 0444,
+ regulator->debugfs, regulator,
+ &constraint_flags_fops);
}
/*
@@ -1334,7 +1368,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
* it is then we don't need to do nearly so much work for
* enable/disable calls.
*/
- if (!_regulator_can_change_status(rdev) &&
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS) &&
_regulator_is_enabled(rdev))
regulator->always_on = true;
@@ -1532,10 +1566,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
}
/* Cascade always-on state to supply */
- if (_regulator_is_enabled(rdev) && rdev->supply) {
+ if (_regulator_is_enabled(rdev)) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
_regulator_put(rdev->supply);
+ rdev->supply = NULL;
return ret;
}
}
@@ -2111,15 +2146,15 @@ static int _regulator_enable(struct regulator_dev *rdev)
lockdep_assert_held_once(&rdev->mutex);
/* check voltage and requested load before enabling */
- if (rdev->constraints &&
- (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
drms_uA_update(rdev);
if (rdev->use_count == 0) {
/* The regulator may on if it's not switchable or left on */
ret = _regulator_is_enabled(rdev);
if (ret == -EINVAL || ret == 0) {
- if (!_regulator_can_change_status(rdev))
+ if (!regulator_ops_is_valid(rdev,
+ REGULATOR_CHANGE_STATUS))
return -EPERM;
ret = _regulator_do_enable(rdev);
@@ -2221,7 +2256,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
(rdev->constraints && !rdev->constraints->always_on)) {
/* we are last user */
- if (_regulator_can_change_status(rdev)) {
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS)) {
ret = _notifier_call_chain(rdev,
REGULATOR_EVENT_PRE_DISABLE,
NULL);
@@ -2242,10 +2277,7 @@ static int _regulator_disable(struct regulator_dev *rdev)
rdev->use_count = 0;
} else if (rdev->use_count > 1) {
-
- if (rdev->constraints &&
- (rdev->constraints->valid_ops_mask &
- REGULATOR_CHANGE_DRMS))
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
drms_uA_update(rdev);
rdev->use_count--;
@@ -2489,8 +2521,7 @@ int regulator_can_change_voltage(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- if (rdev->constraints &&
- (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
if (rdev->desc->n_voltages - rdev->desc->linear_min_sel > 1)
return 1;
@@ -2644,7 +2675,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
int i, voltages, ret;
/* If we can't change voltage check the current voltage */
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
ret = regulator_get_voltage(regulator);
if (ret >= 0)
return min_uV <= ret && ret <= max_uV;
@@ -2850,7 +2881,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
* return successfully even though the regulator does not support
* changing the voltage.
*/
- if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE)) {
current_uV = _regulator_get_voltage(rdev);
if (min_uV <= current_uV && current_uV <= max_uV) {
regulator->min_uV = min_uV;
@@ -3109,6 +3140,23 @@ EXPORT_SYMBOL_GPL(regulator_sync_voltage);
static int _regulator_get_voltage(struct regulator_dev *rdev)
{
int sel, ret;
+ bool bypassed;
+
+ if (rdev->desc->ops->get_bypass) {
+ ret = rdev->desc->ops->get_bypass(rdev, &bypassed);
+ if (ret < 0)
+ return ret;
+ if (bypassed) {
+ /* if bypassed the regulator must have a supply */
+ if (!rdev->supply) {
+ rdev_err(rdev,
+ "bypassed regulator has no supply!\n");
+ return -EPROBE_DEFER;
+ }
+
+ return _regulator_get_voltage(rdev->supply->rdev);
+ }
+ }
if (rdev->desc->ops->get_voltage_sel) {
sel = rdev->desc->ops->get_voltage_sel(rdev);
@@ -3365,8 +3413,7 @@ int regulator_allow_bypass(struct regulator *regulator, bool enable)
if (!rdev->desc->ops->set_bypass)
return 0;
- if (rdev->constraints &&
- !(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_BYPASS))
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_BYPASS))
return 0;
mutex_lock(&rdev->mutex);
@@ -3842,7 +3889,12 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
static int regulator_register_resolve_supply(struct device *dev, void *data)
{
- return regulator_resolve_supply(dev_to_rdev(dev));
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+
+ if (regulator_resolve_supply(rdev))
+ rdev_dbg(rdev, "unable to resolve supply\n");
+
+ return 0;
}
/**
@@ -3916,8 +3968,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
rdev->dev.of_node = of_node_get(config->of_node);
}
- mutex_lock(&regulator_list_mutex);
-
mutex_init(&rdev->mutex);
rdev->reg_data = config->driver_data;
rdev->owner = regulator_desc->owner;
@@ -3942,7 +3992,9 @@ regulator_register(const struct regulator_desc *regulator_desc,
if ((config->ena_gpio || config->ena_gpio_initialized) &&
gpio_is_valid(config->ena_gpio)) {
+ mutex_lock(&regulator_list_mutex);
ret = regulator_ena_gpio_request(rdev, config);
+ mutex_unlock(&regulator_list_mutex);
if (ret != 0) {
rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
config->ena_gpio, ret);
@@ -3955,43 +4007,53 @@ regulator_register(const struct regulator_desc *regulator_desc,
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%lu",
(unsigned long) atomic_inc_return(&regulator_no));
- ret = device_register(&rdev->dev);
- if (ret != 0) {
- put_device(&rdev->dev);
- goto wash;
- }
-
- dev_set_drvdata(&rdev->dev, rdev);
/* set regulator constraints */
if (init_data)
constraints = &init_data->constraints;
- ret = set_machine_constraints(rdev, constraints);
- if (ret < 0)
- goto scrub;
-
if (init_data && init_data->supply_regulator)
rdev->supply_name = init_data->supply_regulator;
else if (regulator_desc->supply_name)
rdev->supply_name = regulator_desc->supply_name;
+ /*
+ * Attempt to resolve the regulator supply, if specified,
+ * but don't return an error if we fail because we will try
+ * to resolve it again later as more regulators are added.
+ */
+ if (regulator_resolve_supply(rdev))
+ rdev_dbg(rdev, "unable to resolve supply\n");
+
+ ret = set_machine_constraints(rdev, constraints);
+ if (ret < 0)
+ goto wash;
+
/* add consumers devices */
if (init_data) {
+ mutex_lock(&regulator_list_mutex);
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
if (ret < 0) {
+ mutex_unlock(&regulator_list_mutex);
dev_err(dev, "Failed to set supply %s\n",
init_data->consumer_supplies[i].supply);
goto unset_supplies;
}
}
+ mutex_unlock(&regulator_list_mutex);
}
+ ret = device_register(&rdev->dev);
+ if (ret != 0) {
+ put_device(&rdev->dev);
+ goto unset_supplies;
+ }
+
+ dev_set_drvdata(&rdev->dev, rdev);
rdev_init_debugfs(rdev);
- mutex_unlock(&regulator_list_mutex);
/* try to resolve regulators supply since a new one was registered */
class_for_each_device(&regulator_class, NULL, NULL,
@@ -4000,20 +4062,16 @@ regulator_register(const struct regulator_desc *regulator_desc,
return rdev;
unset_supplies:
+ mutex_lock(&regulator_list_mutex);
unset_regulator_supplies(rdev);
-
-scrub:
- regulator_ena_gpio_free(rdev);
- device_unregister(&rdev->dev);
- /* device core frees rdev */
- goto out;
-
+ mutex_unlock(&regulator_list_mutex);
wash:
+ kfree(rdev->constraints);
+ mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
+ mutex_unlock(&regulator_list_mutex);
clean:
kfree(rdev);
-out:
- mutex_unlock(&regulator_list_mutex);
kfree(config);
return ERR_PTR(ret);
}
@@ -4041,8 +4099,8 @@ void regulator_unregister(struct regulator_dev *rdev)
WARN_ON(rdev->open_count);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
- mutex_unlock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
+ mutex_unlock(&regulator_list_mutex);
device_unregister(&rdev->dev);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -4395,7 +4453,7 @@ static int __init regulator_late_cleanup(struct device *dev, void *data)
if (c && c->always_on)
return 0;
- if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_STATUS))
return 0;
mutex_lock(&rdev->mutex);
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 2cb5cc311..d7da81a87 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -65,6 +65,13 @@ enum {
FAN53555_CHIP_ID_03,
FAN53555_CHIP_ID_04,
FAN53555_CHIP_ID_05,
+ FAN53555_CHIP_ID_08 = 8,
+};
+
+/* IC mask revision */
+enum {
+ FAN53555_CHIP_REV_00 = 0x3,
+ FAN53555_CHIP_REV_13 = 0xf,
};
enum {
@@ -217,9 +224,26 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
/* Init voltage range and step */
switch (di->chip_id) {
case FAN53555_CHIP_ID_00:
+ switch (di->chip_rev) {
+ case FAN53555_CHIP_REV_00:
+ di->vsel_min = 600000;
+ di->vsel_step = 10000;
+ break;
+ case FAN53555_CHIP_REV_13:
+ di->vsel_min = 800000;
+ di->vsel_step = 10000;
+ break;
+ default:
+ dev_err(di->dev,
+ "Chip ID %d with rev %d not supported!\n",
+ di->chip_id, di->chip_rev);
+ return -EINVAL;
+ }
+ break;
case FAN53555_CHIP_ID_01:
case FAN53555_CHIP_ID_03:
case FAN53555_CHIP_ID_05:
+ case FAN53555_CHIP_ID_08:
di->vsel_min = 600000;
di->vsel_step = 10000;
break;
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index b1e32e748..bcf38fd51 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -460,7 +460,7 @@ int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
if (ret != 0)
return ret;
- *enable = val & rdev->desc->bypass_mask;
+ *enable = (val & rdev->desc->bypass_mask) == rdev->desc->bypass_val_on;
return 0;
}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 15c25c622..204b5c527 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -365,8 +365,8 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val)
mutex_lock(&lp3971->io_lock);
ret = lp3971_i2c_read(lp3971->i2c, reg, 1, &tmp);
- tmp = (tmp & ~mask) | val;
if (ret == 0) {
+ tmp = (tmp & ~mask) | val;
ret = lp3971_i2c_write(lp3971->i2c, reg, 1, &tmp);
dev_dbg(lp3971->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
(unsigned)val&0xff);
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 3a7e96e2c..ff0c275f9 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -211,8 +211,8 @@ static int lp3972_set_bits(struct lp3972 *lp3972, u8 reg, u16 mask, u16 val)
mutex_lock(&lp3972->io_lock);
ret = lp3972_i2c_read(lp3972->i2c, reg, 1, &tmp);
- tmp = (tmp & ~mask) | val;
if (ret == 0) {
+ tmp = (tmp & ~mask) | val;
ret = lp3972_i2c_write(lp3972->i2c, reg, 1, &tmp);
dev_dbg(lp3972->dev, "reg write 0x%02x -> 0x%02x\n", (int)reg,
(unsigned)val & 0xff);
diff --git a/drivers/regulator/lp873x-regulator.c b/drivers/regulator/lp873x-regulator.c
new file mode 100644
index 000000000..b4ffd113b
--- /dev/null
+++ b/drivers/regulator/lp873x-regulator.c
@@ -0,0 +1,241 @@
+/*
+ * Regulator driver for LP873X PMIC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License version 2 for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <linux/mfd/lp873x.h>
+
+#define LP873X_REGULATOR(_name, _id, _of, _ops, _n, _vr, _vm, _er, _em, \
+ _delay, _lr, _nlr, _cr) \
+ [_id] = { \
+ .desc = { \
+ .name = _name, \
+ .id = _id, \
+ .of_match = of_match_ptr(_of), \
+ .regulators_node = of_match_ptr("regulators"),\
+ .ops = &_ops, \
+ .n_voltages = _n, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .enable_reg = _er, \
+ .enable_mask = _em, \
+ .ramp_delay = _delay, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
+ }, \
+ .ctrl2_reg = _cr, \
+ }
+
+struct lp873x_regulator {
+ struct regulator_desc desc;
+ unsigned int ctrl2_reg;
+};
+
+static const struct lp873x_regulator regulators[];
+
+static const struct regulator_linear_range buck0_buck1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(0, 0x0, 0x13, 0),
+ REGULATOR_LINEAR_RANGE(700000, 0x14, 0x17, 10000),
+ REGULATOR_LINEAR_RANGE(735000, 0x18, 0x9d, 5000),
+ REGULATOR_LINEAR_RANGE(1420000, 0x9e, 0xff, 20000),
+};
+
+static const struct regulator_linear_range ldo0_ldo1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(800000, 0x0, 0x19, 100000),
+};
+
+static unsigned int lp873x_buck_ramp_delay[] = {
+ 30000, 15000, 10000, 7500, 3800, 1900, 940, 470
+};
+
+/* LP873X BUCK current limit */
+static const unsigned int lp873x_buck_uA[] = {
+ 1500000, 2000000, 2500000, 3000000, 3500000, 4000000,
+};
+
+static int lp873x_buck_set_ramp_delay(struct regulator_dev *rdev,
+ int ramp_delay)
+{
+ int id = rdev_get_id(rdev);
+ struct lp873x *lp873 = rdev_get_drvdata(rdev);
+ unsigned int reg;
+ int ret;
+
+ if (ramp_delay <= 470)
+ reg = 7;
+ else if (ramp_delay <= 940)
+ reg = 6;
+ else if (ramp_delay <= 1900)
+ reg = 5;
+ else if (ramp_delay <= 3800)
+ reg = 4;
+ else if (ramp_delay <= 7500)
+ reg = 3;
+ else if (ramp_delay <= 10000)
+ reg = 2;
+ else if (ramp_delay <= 15000)
+ reg = 1;
+ else
+ reg = 0;
+
+ ret = regmap_update_bits(lp873->regmap, regulators[id].ctrl2_reg,
+ LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE,
+ reg << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_SLEW_RATE));
+ if (ret) {
+ dev_err(lp873->dev, "SLEW RATE write failed: %d\n", ret);
+ return ret;
+ }
+
+ rdev->constraints->ramp_delay = lp873x_buck_ramp_delay[reg];
+
+ return 0;
+}
+
+static int lp873x_buck_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ int id = rdev_get_id(rdev);
+ struct lp873x *lp873 = rdev_get_drvdata(rdev);
+ int i;
+
+ for (i = ARRAY_SIZE(lp873x_buck_uA) - 1; i >= 0; i--) {
+ if (lp873x_buck_uA[i] >= min_uA &&
+ lp873x_buck_uA[i] <= max_uA)
+ return regmap_update_bits(lp873->regmap,
+ regulators[id].ctrl2_reg,
+ LP873X_BUCK0_CTRL_2_BUCK0_ILIM,
+ i << __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM));
+ }
+
+ return -EINVAL;
+}
+
+static int lp873x_buck_get_current_limit(struct regulator_dev *rdev)
+{
+ int id = rdev_get_id(rdev);
+ struct lp873x *lp873 = rdev_get_drvdata(rdev);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(lp873->regmap, regulators[id].ctrl2_reg, &val);
+ if (ret)
+ return ret;
+
+ val = (val & LP873X_BUCK0_CTRL_2_BUCK0_ILIM) >>
+ __ffs(LP873X_BUCK0_CTRL_2_BUCK0_ILIM);
+
+ return (val < ARRAY_SIZE(lp873x_buck_uA)) ?
+ lp873x_buck_uA[val] : -EINVAL;
+}
+
+/* Operations permitted on BUCK0, BUCK1 */
+static struct regulator_ops lp873x_buck01_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = lp873x_buck_set_ramp_delay,
+ .set_current_limit = lp873x_buck_set_current_limit,
+ .get_current_limit = lp873x_buck_get_current_limit,
+};
+
+/* Operations permitted on LDO0 and LDO1 */
+static struct regulator_ops lp873x_ldo01_ops = {
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct lp873x_regulator regulators[] = {
+ LP873X_REGULATOR("BUCK0", LP873X_BUCK_0, "buck0", lp873x_buck01_ops,
+ 256, LP873X_REG_BUCK0_VOUT,
+ LP873X_BUCK0_VOUT_BUCK0_VSET, LP873X_REG_BUCK0_CTRL_1,
+ LP873X_BUCK0_CTRL_1_BUCK0_EN, 10000,
+ buck0_buck1_ranges, 4, LP873X_REG_BUCK0_CTRL_2),
+ LP873X_REGULATOR("BUCK1", LP873X_BUCK_1, "buck1", lp873x_buck01_ops,
+ 256, LP873X_REG_BUCK1_VOUT,
+ LP873X_BUCK1_VOUT_BUCK1_VSET, LP873X_REG_BUCK1_CTRL_1,
+ LP873X_BUCK1_CTRL_1_BUCK1_EN, 10000,
+ buck0_buck1_ranges, 4, LP873X_REG_BUCK1_CTRL_2),
+ LP873X_REGULATOR("LDO0", LP873X_LDO_0, "ldo0", lp873x_ldo01_ops, 26,
+ LP873X_REG_LDO0_VOUT, LP873X_LDO0_VOUT_LDO0_VSET,
+ LP873X_REG_LDO0_CTRL,
+ LP873X_LDO0_CTRL_LDO0_EN, 0, ldo0_ldo1_ranges, 1,
+ 0xFF),
+ LP873X_REGULATOR("LDO1", LP873X_LDO_1, "ldo1", lp873x_ldo01_ops, 26,
+ LP873X_REG_LDO1_VOUT, LP873X_LDO1_VOUT_LDO1_VSET,
+ LP873X_REG_LDO1_CTRL,
+ LP873X_LDO1_CTRL_LDO1_EN, 0, ldo0_ldo1_ranges, 1,
+ 0xFF),
+};
+
+static int lp873x_regulator_probe(struct platform_device *pdev)
+{
+ struct lp873x *lp873 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+ int i;
+
+ platform_set_drvdata(pdev, lp873);
+
+ config.dev = &pdev->dev;
+ config.dev->of_node = lp873->dev->of_node;
+ config.driver_data = lp873;
+ config.regmap = lp873->regmap;
+
+ for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i].desc,
+ &config);
+ if (IS_ERR(rdev)) {
+ dev_err(lp873->dev, "failed to register %s regulator\n",
+ pdev->name);
+ return PTR_ERR(rdev);
+ }
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id lp873x_regulator_id_table[] = {
+ { "lp873x-regulator", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, lp873x_regulator_id_table);
+
+static struct platform_driver lp873x_regulator_driver = {
+ .driver = {
+ .name = "lp873x-pmic",
+ },
+ .probe = lp873x_regulator_probe,
+ .id_table = lp873x_regulator_id_table,
+};
+module_platform_driver(lp873x_regulator_driver);
+
+MODULE_AUTHOR("J Keerthy <j-keerthy@ti.com>");
+MODULE_DESCRIPTION("LP873X voltage regulator driver");
+MODULE_ALIAS("platform:lp873x-pmic");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577-regulator.c
index b2daa6641..b2daa6641 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577-regulator.c
diff --git a/drivers/regulator/max77620-regulator.c b/drivers/regulator/max77620-regulator.c
index 73a3356a5..a1b49a6d5 100644
--- a/drivers/regulator/max77620-regulator.c
+++ b/drivers/regulator/max77620-regulator.c
@@ -81,6 +81,7 @@ struct max77620_regulator_pdata {
int suspend_fps_pd_slot;
int suspend_fps_pu_slot;
int current_mode;
+ int ramp_rate_setting;
};
struct max77620_regulator {
@@ -122,6 +123,9 @@ static int max77620_regulator_set_fps_src(struct max77620_regulator *pmic,
unsigned int val;
int ret;
+ if (!rinfo)
+ return 0;
+
switch (fps_src) {
case MAX77620_FPS_SRC_0:
case MAX77620_FPS_SRC_1:
@@ -170,6 +174,9 @@ static int max77620_regulator_set_fps_slots(struct max77620_regulator *pmic,
int pd = rpdata->active_fps_pd_slot;
int ret = 0;
+ if (!rinfo)
+ return 0;
+
if (is_suspend) {
pu = rpdata->suspend_fps_pu_slot;
pd = rpdata->suspend_fps_pd_slot;
@@ -307,6 +314,43 @@ static int max77620_read_slew_rate(struct max77620_regulator *pmic, int id)
return 0;
}
+static int max77620_set_slew_rate(struct max77620_regulator *pmic, int id,
+ int slew_rate)
+{
+ struct max77620_regulator_info *rinfo = pmic->rinfo[id];
+ unsigned int val;
+ int ret;
+ u8 mask;
+
+ if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
+ if (slew_rate <= 13750)
+ val = 0;
+ else if (slew_rate <= 27500)
+ val = 1;
+ else if (slew_rate <= 55000)
+ val = 2;
+ else
+ val = 3;
+ val <<= MAX77620_SD_SR_SHIFT;
+ mask = MAX77620_SD_SR_MASK;
+ } else {
+ if (slew_rate <= 5000)
+ val = 1;
+ else
+ val = 0;
+ mask = MAX77620_LDO_SLEW_RATE_MASK;
+ }
+
+ ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, mask, val);
+ if (ret < 0) {
+ dev_err(pmic->dev, "Regulator %d slew rate set failed: %d\n",
+ id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int max77620_init_pmic(struct max77620_regulator *pmic, int id)
{
struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
@@ -351,6 +395,13 @@ static int max77620_init_pmic(struct max77620_regulator *pmic, int id)
if (ret < 0)
return ret;
+ if (rpdata->ramp_rate_setting) {
+ ret = max77620_set_slew_rate(pmic, id,
+ rpdata->ramp_rate_setting);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -502,35 +553,16 @@ static int max77620_regulator_set_ramp_delay(struct regulator_dev *rdev,
{
struct max77620_regulator *pmic = rdev_get_drvdata(rdev);
int id = rdev_get_id(rdev);
- struct max77620_regulator_info *rinfo = pmic->rinfo[id];
- int ret, val;
- u8 mask;
-
- if (rinfo->type == MAX77620_REGULATOR_TYPE_SD) {
- if (ramp_delay <= 13750)
- val = 0;
- else if (ramp_delay <= 27500)
- val = 1;
- else if (ramp_delay <= 55000)
- val = 2;
- else
- val = 3;
- val <<= MAX77620_SD_SR_SHIFT;
- mask = MAX77620_SD_SR_MASK;
- } else {
- if (ramp_delay <= 5000)
- val = 1;
- else
- val = 0;
- mask = MAX77620_LDO_SLEW_RATE_MASK;
- }
+ struct max77620_regulator_pdata *rpdata = &pmic->reg_pdata[id];
- ret = regmap_update_bits(pmic->rmap, rinfo->cfg_addr, mask, val);
- if (ret < 0)
- dev_err(pmic->dev, "Reg 0x%02x update failed: %d\n",
- rinfo->cfg_addr, ret);
+ /* Device specific ramp rate setting tells that platform has
+ * different ramp rate from advertised value. In this case,
+ * do not configure anything and just return success.
+ */
+ if (rpdata->ramp_rate_setting)
+ return 0;
- return ret;
+ return max77620_set_slew_rate(pmic, id, ramp_delay);
}
static int max77620_of_parse_cb(struct device_node *np,
@@ -563,6 +595,9 @@ static int max77620_of_parse_cb(struct device_node *np,
np, "maxim,suspend-fps-power-down-slot", &pval);
rpdata->suspend_fps_pd_slot = (!ret) ? pval : -1;
+ ret = of_property_read_u32(np, "maxim,ramp-rate-setting", &pval);
+ rpdata->ramp_rate_setting = (!ret) ? pval : 0;
+
return max77620_init_pmic(pmic, desc->id);
}
@@ -651,7 +686,6 @@ static struct max77620_regulator_info max77620_regs_info[MAX77620_NUM_REGS] = {
RAIL_SD(SD1, sd1, "in-sd1", SD1, 600000, 1550000, 12500, 0x22, SD1),
RAIL_SD(SD2, sd2, "in-sd2", SDX, 600000, 3787500, 12500, 0xFF, NONE),
RAIL_SD(SD3, sd3, "in-sd3", SDX, 600000, 3787500, 12500, 0xFF, NONE),
- RAIL_SD(SD4, sd4, "in-sd4", SDX, 600000, 3787500, 12500, 0xFF, NONE),
RAIL_LDO(LDO0, ldo0, "in-ldo0-1", N, 800000, 2375000, 25000),
RAIL_LDO(LDO1, ldo1, "in-ldo0-1", N, 800000, 2375000, 25000),
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index 17ccf365a..ac4fa581e 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -41,6 +41,8 @@
#define MAX77686_LDO_LOW_UVSTEP 25000
#define MAX77686_BUCK_MINUV 750000
#define MAX77686_BUCK_UVSTEP 50000
+#define MAX77686_BUCK_ENABLE_TIME 40 /* us */
+#define MAX77686_DVS_ENABLE_TIME 22 /* us */
#define MAX77686_RAMP_DELAY 100000 /* uV/us */
#define MAX77686_DVS_RAMP_DELAY 27500 /* uV/us */
#define MAX77686_DVS_MINUV 600000
@@ -422,6 +424,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.min_uV = MAX77686_BUCK_MINUV, \
.uV_step = MAX77686_BUCK_UVSTEP, \
.ramp_delay = MAX77686_RAMP_DELAY, \
+ .enable_time = MAX77686_BUCK_ENABLE_TIME, \
.n_voltages = MAX77686_VSEL_MASK + 1, \
.vsel_reg = MAX77686_REG_BUCK5OUT + (num - 5) * 2, \
.vsel_mask = MAX77686_VSEL_MASK, \
@@ -439,6 +442,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.min_uV = MAX77686_BUCK_MINUV, \
.uV_step = MAX77686_BUCK_UVSTEP, \
.ramp_delay = MAX77686_RAMP_DELAY, \
+ .enable_time = MAX77686_BUCK_ENABLE_TIME, \
.n_voltages = MAX77686_VSEL_MASK + 1, \
.vsel_reg = MAX77686_REG_BUCK1OUT, \
.vsel_mask = MAX77686_VSEL_MASK, \
@@ -456,6 +460,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
.min_uV = MAX77686_DVS_MINUV, \
.uV_step = MAX77686_DVS_UVSTEP, \
.ramp_delay = MAX77686_DVS_RAMP_DELAY, \
+ .enable_time = MAX77686_DVS_ENABLE_TIME, \
.n_voltages = MAX77686_DVS_VSEL_MASK + 1, \
.vsel_reg = MAX77686_REG_BUCK2DVS1 + (num - 2) * 10, \
.vsel_mask = MAX77686_DVS_VSEL_MASK, \
@@ -553,17 +558,7 @@ static struct platform_driver max77686_pmic_driver = {
.id_table = max77686_pmic_id,
};
-static int __init max77686_pmic_init(void)
-{
- return platform_driver_register(&max77686_pmic_driver);
-}
-subsys_initcall(max77686_pmic_init);
-
-static void __exit max77686_pmic_cleanup(void)
-{
- platform_driver_unregister(&max77686_pmic_driver);
-}
-module_exit(max77686_pmic_cleanup);
+module_platform_driver(max77686_pmic_driver);
MODULE_DESCRIPTION("MAXIM 77686 Regulator Driver");
MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>");
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693-regulator.c
index de730fd3f..de730fd3f 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693-regulator.c
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index c07ee13bd..1d3539324 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -5,7 +5,7 @@
* Simon Glass <sjg@chromium.org>
*
* Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@smasung.com>
+ * Chiwoong Byun <woong.byun@samsung.com>
* Jonghwa Lee <jonghwa3.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 5b75b7c2e..08d2f13ec 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -38,6 +38,9 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
+#include <linux/thermal.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
/* Register definitions */
#define MAX8973_VOUT 0x0
@@ -74,6 +77,7 @@
#define MAX8973_WDTMR_ENABLE BIT(6)
#define MAX8973_DISCH_ENBABLE BIT(5)
#define MAX8973_FT_ENABLE BIT(4)
+#define MAX77621_T_JUNCTION_120 BIT(7)
#define MAX8973_CKKADV_TRIP_MASK 0xC
#define MAX8973_CKKADV_TRIP_DISABLE 0xC
@@ -93,6 +97,12 @@
#define MAX8973_VOLATGE_STEP 6250
#define MAX8973_BUCK_N_VOLTAGE 0x80
+#define MAX77621_CHIPID_TJINT_S BIT(0)
+
+#define MAX77621_NORMAL_OPERATING_TEMP 100000
+#define MAX77621_TJINT_WARNING_TEMP_120 120000
+#define MAX77621_TJINT_WARNING_TEMP_140 140000
+
enum device_id {
MAX8973,
MAX77621
@@ -112,6 +122,9 @@ struct max8973_chip {
int curr_gpio_val;
struct regulator_ops ops;
enum device_id id;
+ int junction_temp_warning;
+ int irq;
+ struct thermal_zone_device *tz_device;
};
/*
@@ -391,6 +404,10 @@ static int max8973_init_dcdc(struct max8973_chip *max,
if (pdata->control_flags & MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE)
control1 |= MAX8973_FREQSHIFT_9PER;
+ if ((pdata->junction_temp_warning == MAX77621_TJINT_WARNING_TEMP_120) &&
+ (max->id == MAX77621))
+ control2 |= MAX77621_T_JUNCTION_120;
+
if (!(pdata->control_flags & MAX8973_CONTROL_PULL_DOWN_ENABLE))
control2 |= MAX8973_DISCH_ENBABLE;
@@ -457,6 +474,79 @@ static int max8973_init_dcdc(struct max8973_chip *max,
return ret;
}
+static int max8973_thermal_read_temp(void *data, int *temp)
+{
+ struct max8973_chip *mchip = data;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(mchip->regmap, MAX8973_CHIPID1, &val);
+ if (ret < 0) {
+ dev_err(mchip->dev, "Failed to read register CHIPID1, %d", ret);
+ return ret;
+ }
+
+ /* +1 degC to trigger cool devive */
+ if (val & MAX77621_CHIPID_TJINT_S)
+ *temp = mchip->junction_temp_warning + 1000;
+ else
+ *temp = MAX77621_NORMAL_OPERATING_TEMP;
+
+ return 0;
+}
+
+static irqreturn_t max8973_thermal_irq(int irq, void *data)
+{
+ struct max8973_chip *mchip = data;
+
+ thermal_zone_device_update(mchip->tz_device);
+
+ return IRQ_HANDLED;
+}
+
+static const struct thermal_zone_of_device_ops max77621_tz_ops = {
+ .get_temp = max8973_thermal_read_temp,
+};
+
+static int max8973_thermal_init(struct max8973_chip *mchip)
+{
+ struct thermal_zone_device *tzd;
+ struct irq_data *irq_data;
+ unsigned long irq_flags = 0;
+ int ret;
+
+ if (mchip->id != MAX77621)
+ return 0;
+
+ tzd = devm_thermal_zone_of_sensor_register(mchip->dev, 0, mchip,
+ &max77621_tz_ops);
+ if (IS_ERR(tzd)) {
+ ret = PTR_ERR(tzd);
+ dev_err(mchip->dev, "Failed to register thermal sensor: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (mchip->irq <= 0)
+ return 0;
+
+ irq_data = irq_get_irq_data(mchip->irq);
+ if (irq_data)
+ irq_flags = irqd_get_trigger_type(irq_data);
+
+ ret = devm_request_threaded_irq(mchip->dev, mchip->irq, NULL,
+ max8973_thermal_irq,
+ IRQF_ONESHOT | IRQF_SHARED | irq_flags,
+ dev_name(mchip->dev), mchip);
+ if (ret < 0) {
+ dev_err(mchip->dev, "Failed to request irq %d, %d\n",
+ mchip->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct regmap_config max8973_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -521,6 +611,11 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_DISABLED;
}
+ pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_140;
+ ret = of_property_read_u32(np, "junction-warn-millicelsius", &pval);
+ if (!ret && (pval <= MAX77621_TJINT_WARNING_TEMP_120))
+ pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_120;
+
return pdata;
}
@@ -608,6 +703,7 @@ static int max8973_probe(struct i2c_client *client,
max->enable_external_control = pdata->enable_ext_control;
max->curr_gpio_val = pdata->dvs_def_state;
max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state;
+ max->junction_temp_warning = pdata->junction_temp_warning;
if (gpio_is_valid(max->enable_gpio))
max->enable_external_control = true;
@@ -718,6 +814,7 @@ static int max8973_probe(struct i2c_client *client,
return ret;
}
+ max8973_thermal_init(max);
return 0;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997-regulator.c
index ea0196d44..efabc0ea0 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -2,7 +2,7 @@
* max8997.c - Regulator driver for the Maxim 8997/8966
*
* Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@smasung.com>
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 6b0aa80b2..cd828dbf9 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -45,9 +45,9 @@ static void of_get_regulation_constraints(struct device_node *np,
/* Voltage change possible? */
if (constraints->min_uV != constraints->max_uV)
constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
- /* Only one voltage? Then make sure it's set. */
- if (constraints->min_uV && constraints->max_uV &&
- constraints->min_uV == constraints->max_uV)
+
+ /* Do we have a voltage range, if so try to apply it? */
+ if (constraints->min_uV && constraints->max_uV)
constraints->apply_uV = true;
if (!of_property_read_u32(np, "regulator-microvolt-offset", &pval))
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 6efc7ee8a..f11d41dad 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -944,6 +944,8 @@ static int palmas_ldo_registration(struct palmas_pmic *pmic,
if (id == PALMAS_REG_LDO9) {
desc->ops = &palmas_ops_ldo9;
desc->bypass_reg = desc->enable_reg;
+ desc->bypass_val_on =
+ PALMAS_LDO9_CTRL_LDO_BYPASS_EN;
desc->bypass_mask =
PALMAS_LDO9_CTRL_LDO_BYPASS_EN;
}
@@ -1055,6 +1057,8 @@ static int tps65917_ldo_registration(struct palmas_pmic *pmic,
id == TPS65917_REG_LDO2) {
desc->ops = &tps65917_ops_ldo_1_2;
desc->bypass_reg = desc->enable_reg;
+ desc->bypass_val_on =
+ TPS65917_LDO1_CTRL_BYPASS_EN;
desc->bypass_mask =
TPS65917_LDO1_CTRL_BYPASS_EN;
}
@@ -1206,6 +1210,7 @@ static int palmas_smps_registration(struct palmas_pmic *pmic,
desc->enable_mask = SMPS10_BOOST_EN;
desc->bypass_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
PALMAS_SMPS10_CTRL);
+ desc->bypass_val_on = SMPS10_BYPASS_EN;
desc->bypass_mask = SMPS10_BYPASS_EN;
desc->min_uV = 3750000;
desc->uV_step = 1250000;
@@ -1462,10 +1467,10 @@ static struct palmas_pmic_driver_data tps65917_ddata = {
.ldo_register = tps65917_ldo_registration,
};
-static void palmas_dt_to_pdata(struct device *dev,
- struct device_node *node,
- struct palmas_pmic_platform_data *pdata,
- struct palmas_pmic_driver_data *ddata)
+static int palmas_dt_to_pdata(struct device *dev,
+ struct device_node *node,
+ struct palmas_pmic_platform_data *pdata,
+ struct palmas_pmic_driver_data *ddata)
{
struct device_node *regulators;
u32 prop;
@@ -1474,7 +1479,7 @@ static void palmas_dt_to_pdata(struct device *dev,
regulators = of_get_child_by_name(node, "regulators");
if (!regulators) {
dev_info(dev, "regulator node not found\n");
- return;
+ return 0;
}
ret = of_regulator_match(dev, regulators, ddata->palmas_matches,
@@ -1482,25 +1487,29 @@ static void palmas_dt_to_pdata(struct device *dev,
of_node_put(regulators);
if (ret < 0) {
dev_err(dev, "Error parsing regulator init data: %d\n", ret);
- return;
+ return 0;
}
for (idx = 0; idx < ddata->max_reg; idx++) {
- if (!ddata->palmas_matches[idx].init_data ||
- !ddata->palmas_matches[idx].of_node)
- continue;
+ static struct of_regulator_match *match;
+ struct palmas_reg_init *rinit;
+ struct device_node *np;
- pdata->reg_data[idx] = ddata->palmas_matches[idx].init_data;
+ match = &ddata->palmas_matches[idx];
+ np = match->of_node;
- pdata->reg_init[idx] = devm_kzalloc(dev,
- sizeof(struct palmas_reg_init), GFP_KERNEL);
+ if (!match->init_data || !np)
+ continue;
+
+ rinit = devm_kzalloc(dev, sizeof(*rinit), GFP_KERNEL);
+ if (!rinit)
+ return -ENOMEM;
- pdata->reg_init[idx]->warm_reset =
- of_property_read_bool(ddata->palmas_matches[idx].of_node,
- "ti,warm-reset");
+ pdata->reg_data[idx] = match->init_data;
+ pdata->reg_init[idx] = rinit;
- ret = of_property_read_u32(ddata->palmas_matches[idx].of_node,
- "ti,roof-floor", &prop);
+ rinit->warm_reset = of_property_read_bool(np, "ti,warm-reset");
+ ret = of_property_read_u32(np, "ti,roof-floor", &prop);
/* EINVAL: Property not found */
if (ret != -EINVAL) {
int econtrol;
@@ -1522,31 +1531,29 @@ static void palmas_dt_to_pdata(struct device *dev,
WARN_ON(1);
dev_warn(dev,
"%s: Invalid roof-floor option: %u\n",
- palmas_matches[idx].name, prop);
+ match->name, prop);
break;
}
}
- pdata->reg_init[idx]->roof_floor = econtrol;
+ rinit->roof_floor = econtrol;
}
- ret = of_property_read_u32(ddata->palmas_matches[idx].of_node,
- "ti,mode-sleep", &prop);
+ ret = of_property_read_u32(np, "ti,mode-sleep", &prop);
if (!ret)
- pdata->reg_init[idx]->mode_sleep = prop;
+ rinit->mode_sleep = prop;
- ret = of_property_read_bool(ddata->palmas_matches[idx].of_node,
- "ti,smps-range");
+ ret = of_property_read_bool(np, "ti,smps-range");
if (ret)
- pdata->reg_init[idx]->vsel =
- PALMAS_SMPS12_VOLTAGE_RANGE;
+ rinit->vsel = PALMAS_SMPS12_VOLTAGE_RANGE;
if (idx == PALMAS_REG_LDO8)
pdata->enable_ldo8_tracking = of_property_read_bool(
- ddata->palmas_matches[idx].of_node,
- "ti,enable-ldo8-tracking");
+ np, "ti,enable-ldo8-tracking");
}
pdata->ldo6_vibrator = of_property_read_bool(node, "ti,ldo6-vibrator");
+
+ return 0;
}
static const struct of_device_id of_palmas_match_tbl[] = {
@@ -1628,7 +1635,9 @@ static int palmas_regulators_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
pmic->palmas->pmic_ddata = driver_data;
- palmas_dt_to_pdata(&pdev->dev, node, pdata, driver_data);
+ ret = palmas_dt_to_pdata(&pdev->dev, node, pdata, driver_data);
+ if (ret)
+ return ret;
ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, &reg);
if (ret)
diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
new file mode 100644
index 000000000..d7107566c
--- /dev/null
+++ b/drivers/regulator/pv88080-regulator.c
@@ -0,0 +1,419 @@
+/*
+ * pv88080-regulator.c - Regulator device driver for PV88080
+ * Copyright (C) 2016 Powerventure Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include "pv88080-regulator.h"
+
+#define PV88080_MAX_REGULATORS 3
+
+/* PV88080 REGULATOR IDs */
+enum {
+ /* BUCKs */
+ PV88080_ID_BUCK1,
+ PV88080_ID_BUCK2,
+ PV88080_ID_BUCK3,
+};
+
+struct pv88080_regulator {
+ struct regulator_desc desc;
+ /* Current limiting */
+ unsigned int n_current_limits;
+ const int *current_limits;
+ unsigned int limit_mask;
+ unsigned int conf;
+ unsigned int conf2;
+ unsigned int conf5;
+};
+
+struct pv88080 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator_dev *rdev[PV88080_MAX_REGULATORS];
+};
+
+struct pv88080_buck_voltage {
+ int min_uV;
+ int max_uV;
+ int uV_step;
+};
+
+static const struct regmap_config pv88080_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+/* Current limits array (in uA) for BUCK1, BUCK2, BUCK3.
+ * Entry indexes corresponds to register values.
+ */
+
+static const int pv88080_buck1_limits[] = {
+ 3230000, 5130000, 6960000, 8790000
+};
+
+static const int pv88080_buck23_limits[] = {
+ 1496000, 2393000, 3291000, 4189000
+};
+
+static const struct pv88080_buck_voltage pv88080_buck_vol[2] = {
+ {
+ .min_uV = 600000,
+ .max_uV = 1393750,
+ .uV_step = 6250,
+ },
+ {
+ .min_uV = 1400000,
+ .max_uV = 2193750,
+ .uV_step = 6250,
+ },
+};
+
+static unsigned int pv88080_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+ unsigned int data;
+ int ret, mode = 0;
+
+ ret = regmap_read(rdev->regmap, info->conf, &data);
+ if (ret < 0)
+ return ret;
+
+ switch (data & PV88080_BUCK1_MODE_MASK) {
+ case PV88080_BUCK_MODE_SYNC:
+ mode = REGULATOR_MODE_FAST;
+ break;
+ case PV88080_BUCK_MODE_AUTO:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case PV88080_BUCK_MODE_SLEEP:
+ mode = REGULATOR_MODE_STANDBY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mode;
+}
+
+static int pv88080_buck_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+ int val = 0;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = PV88080_BUCK_MODE_SYNC;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = PV88080_BUCK_MODE_AUTO;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = PV88080_BUCK_MODE_SLEEP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(rdev->regmap, info->conf,
+ PV88080_BUCK1_MODE_MASK, val);
+}
+
+static int pv88080_set_current_limit(struct regulator_dev *rdev, int min,
+ int max)
+{
+ struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+ int i;
+
+ /* search for closest to maximum */
+ for (i = info->n_current_limits; i >= 0; i--) {
+ if (min <= info->current_limits[i]
+ && max >= info->current_limits[i]) {
+ return regmap_update_bits(rdev->regmap,
+ info->conf,
+ info->limit_mask,
+ i << PV88080_BUCK1_ILIM_SHIFT);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int pv88080_get_current_limit(struct regulator_dev *rdev)
+{
+ struct pv88080_regulator *info = rdev_get_drvdata(rdev);
+ unsigned int data;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, info->conf, &data);
+ if (ret < 0)
+ return ret;
+
+ data = (data & info->limit_mask) >> PV88080_BUCK1_ILIM_SHIFT;
+ return info->current_limits[data];
+}
+
+static struct regulator_ops pv88080_buck_ops = {
+ .get_mode = pv88080_buck_get_mode,
+ .set_mode = pv88080_buck_set_mode,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_current_limit = pv88080_set_current_limit,
+ .get_current_limit = pv88080_get_current_limit,
+};
+
+#define PV88080_BUCK(chip, regl_name, min, step, max, limits_array) \
+{\
+ .desc = {\
+ .id = chip##_ID_##regl_name,\
+ .name = __stringify(chip##_##regl_name),\
+ .of_match = of_match_ptr(#regl_name),\
+ .regulators_node = of_match_ptr("regulators"),\
+ .type = REGULATOR_VOLTAGE,\
+ .owner = THIS_MODULE,\
+ .ops = &pv88080_buck_ops,\
+ .min_uV = min, \
+ .uV_step = step, \
+ .n_voltages = ((max) - (min))/(step) + 1, \
+ .enable_reg = PV88080_REG_##regl_name##_CONF0, \
+ .enable_mask = PV88080_##regl_name##_EN, \
+ .vsel_reg = PV88080_REG_##regl_name##_CONF0, \
+ .vsel_mask = PV88080_V##regl_name##_MASK, \
+ },\
+ .current_limits = limits_array, \
+ .n_current_limits = ARRAY_SIZE(limits_array), \
+ .limit_mask = PV88080_##regl_name##_ILIM_MASK, \
+ .conf = PV88080_REG_##regl_name##_CONF1, \
+ .conf2 = PV88080_REG_##regl_name##_CONF2, \
+ .conf5 = PV88080_REG_##regl_name##_CONF5, \
+}
+
+static struct pv88080_regulator pv88080_regulator_info[] = {
+ PV88080_BUCK(PV88080, BUCK1, 600000, 6250, 1393750,
+ pv88080_buck1_limits),
+ PV88080_BUCK(PV88080, BUCK2, 600000, 6250, 1393750,
+ pv88080_buck23_limits),
+ PV88080_BUCK(PV88080, BUCK3, 600000, 6250, 1393750,
+ pv88080_buck23_limits),
+};
+
+static irqreturn_t pv88080_irq_handler(int irq, void *data)
+{
+ struct pv88080 *chip = data;
+ int i, reg_val, err, ret = IRQ_NONE;
+
+ err = regmap_read(chip->regmap, PV88080_REG_EVENT_A, &reg_val);
+ if (err < 0)
+ goto error_i2c;
+
+ if (reg_val & PV88080_E_VDD_FLT) {
+ for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_UNDER_VOLTAGE,
+ NULL);
+ }
+ }
+
+ err = regmap_write(chip->regmap, PV88080_REG_EVENT_A,
+ PV88080_E_VDD_FLT);
+ if (err < 0)
+ goto error_i2c;
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (reg_val & PV88080_E_OVER_TEMP) {
+ for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ if (chip->rdev[i] != NULL) {
+ regulator_notifier_call_chain(chip->rdev[i],
+ REGULATOR_EVENT_OVER_TEMP,
+ NULL);
+ }
+ }
+
+ err = regmap_write(chip->regmap, PV88080_REG_EVENT_A,
+ PV88080_E_OVER_TEMP);
+ if (err < 0)
+ goto error_i2c;
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+
+error_i2c:
+ dev_err(chip->dev, "I2C error : %d\n", err);
+ return IRQ_NONE;
+}
+
+/*
+ * I2C driver interface functions
+ */
+static int pv88080_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
+ struct pv88080 *chip;
+ struct regulator_config config = { };
+ int i, error, ret;
+ unsigned int conf2, conf5;
+
+ chip = devm_kzalloc(&i2c->dev, sizeof(struct pv88080), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = &i2c->dev;
+ chip->regmap = devm_regmap_init_i2c(i2c, &pv88080_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ error = PTR_ERR(chip->regmap);
+ dev_err(chip->dev, "Failed to allocate register map: %d\n",
+ error);
+ return error;
+ }
+
+ i2c_set_clientdata(i2c, chip);
+
+ if (i2c->irq != 0) {
+ ret = regmap_write(chip->regmap, PV88080_REG_MASK_A, 0xFF);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to mask A reg: %d\n", ret);
+ return ret;
+ }
+ ret = regmap_write(chip->regmap, PV88080_REG_MASK_B, 0xFF);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to mask B reg: %d\n", ret);
+ return ret;
+ }
+ ret = regmap_write(chip->regmap, PV88080_REG_MASK_C, 0xFF);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to mask C reg: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ pv88080_irq_handler,
+ IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+ "pv88080", chip);
+ if (ret != 0) {
+ dev_err(chip->dev, "Failed to request IRQ: %d\n",
+ i2c->irq);
+ return ret;
+ }
+
+ ret = regmap_update_bits(chip->regmap, PV88080_REG_MASK_A,
+ PV88080_M_VDD_FLT | PV88080_M_OVER_TEMP, 0);
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "Failed to update mask reg: %d\n", ret);
+ return ret;
+ }
+
+ } else {
+ dev_warn(chip->dev, "No IRQ configured\n");
+ }
+
+ config.dev = chip->dev;
+ config.regmap = chip->regmap;
+
+ for (i = 0; i < PV88080_MAX_REGULATORS; i++) {
+ if (init_data)
+ config.init_data = &init_data[i];
+
+ ret = regmap_read(chip->regmap,
+ pv88080_regulator_info[i].conf2, &conf2);
+ if (ret < 0)
+ return ret;
+
+ conf2 = ((conf2 >> PV88080_BUCK_VDAC_RANGE_SHIFT) &
+ PV88080_BUCK_VDAC_RANGE_MASK);
+
+ ret = regmap_read(chip->regmap,
+ pv88080_regulator_info[i].conf5, &conf5);
+ if (ret < 0)
+ return ret;
+
+ conf5 = ((conf5 >> PV88080_BUCK_VRANGE_GAIN_SHIFT) &
+ PV88080_BUCK_VRANGE_GAIN_MASK);
+
+ pv88080_regulator_info[i].desc.min_uV =
+ pv88080_buck_vol[conf2].min_uV * (conf5+1);
+ pv88080_regulator_info[i].desc.uV_step =
+ pv88080_buck_vol[conf2].uV_step * (conf5+1);
+ pv88080_regulator_info[i].desc.n_voltages =
+ ((pv88080_buck_vol[conf2].max_uV * (conf5+1))
+ - (pv88080_regulator_info[i].desc.min_uV))
+ /(pv88080_regulator_info[i].desc.uV_step) + 1;
+
+ config.driver_data = (void *)&pv88080_regulator_info[i];
+ chip->rdev[i] = devm_regulator_register(chip->dev,
+ &pv88080_regulator_info[i].desc, &config);
+ if (IS_ERR(chip->rdev[i])) {
+ dev_err(chip->dev,
+ "Failed to register PV88080 regulator\n");
+ return PTR_ERR(chip->rdev[i]);
+ }
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id pv88080_i2c_id[] = {
+ {"pv88080", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, pv88080_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pv88080_dt_ids[] = {
+ { .compatible = "pvs,pv88080", .data = &pv88080_i2c_id[0] },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pv88080_dt_ids);
+#endif
+
+static struct i2c_driver pv88080_regulator_driver = {
+ .driver = {
+ .name = "pv88080",
+ .of_match_table = of_match_ptr(pv88080_dt_ids),
+ },
+ .probe = pv88080_i2c_probe,
+ .id_table = pv88080_i2c_id,
+};
+
+module_i2c_driver(pv88080_regulator_driver);
+
+MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
+MODULE_DESCRIPTION("Regulator device driver for Powerventure PV88080");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/pv88080-regulator.h b/drivers/regulator/pv88080-regulator.h
new file mode 100644
index 000000000..5e9afde60
--- /dev/null
+++ b/drivers/regulator/pv88080-regulator.h
@@ -0,0 +1,92 @@
+/*
+ * pv88080-regulator.h - Regulator definitions for PV88080
+ * Copyright (C) 2016 Powerventure Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PV88080_REGISTERS_H__
+#define __PV88080_REGISTERS_H__
+
+/* System Control and Event Registers */
+#define PV88080_REG_EVENT_A 0x04
+#define PV88080_REG_MASK_A 0x09
+#define PV88080_REG_MASK_B 0x0a
+#define PV88080_REG_MASK_C 0x0b
+
+/* Regulator Registers */
+#define PV88080_REG_BUCK1_CONF0 0x27
+#define PV88080_REG_BUCK1_CONF1 0x28
+#define PV88080_REG_BUCK1_CONF2 0x59
+#define PV88080_REG_BUCK1_CONF5 0x5c
+#define PV88080_REG_BUCK2_CONF0 0x29
+#define PV88080_REG_BUCK2_CONF1 0x2a
+#define PV88080_REG_BUCK2_CONF2 0x61
+#define PV88080_REG_BUCK2_CONF5 0x64
+#define PV88080_REG_BUCK3_CONF0 0x2b
+#define PV88080_REG_BUCK3_CONF1 0x2c
+#define PV88080_REG_BUCK3_CONF2 0x69
+#define PV88080_REG_BUCK3_CONF5 0x6c
+
+/* PV88080_REG_EVENT_A (addr=0x04) */
+#define PV88080_E_VDD_FLT 0x01
+#define PV88080_E_OVER_TEMP 0x02
+
+/* PV88080_REG_MASK_A (addr=0x09) */
+#define PV88080_M_VDD_FLT 0x01
+#define PV88080_M_OVER_TEMP 0x02
+
+/* PV88080_REG_BUCK1_CONF0 (addr=0x27) */
+#define PV88080_BUCK1_EN 0x80
+#define PV88080_VBUCK1_MASK 0x7F
+/* PV88080_REG_BUCK2_CONF0 (addr=0x29) */
+#define PV88080_BUCK2_EN 0x80
+#define PV88080_VBUCK2_MASK 0x7F
+/* PV88080_REG_BUCK3_CONF0 (addr=0x2b) */
+#define PV88080_BUCK3_EN 0x80
+#define PV88080_VBUCK3_MASK 0x7F
+
+/* PV88080_REG_BUCK1_CONF1 (addr=0x28) */
+#define PV88080_BUCK1_ILIM_SHIFT 2
+#define PV88080_BUCK1_ILIM_MASK 0x0C
+#define PV88080_BUCK1_MODE_MASK 0x03
+
+/* PV88080_REG_BUCK2_CONF1 (addr=0x2a) */
+#define PV88080_BUCK2_ILIM_SHIFT 2
+#define PV88080_BUCK2_ILIM_MASK 0x0C
+#define PV88080_BUCK2_MODE_MASK 0x03
+
+/* PV88080_REG_BUCK3_CONF1 (addr=0x2c) */
+#define PV88080_BUCK3_ILIM_SHIFT 2
+#define PV88080_BUCK3_ILIM_MASK 0x0C
+#define PV88080_BUCK3_MODE_MASK 0x03
+
+#define PV88080_BUCK_MODE_SLEEP 0x00
+#define PV88080_BUCK_MODE_AUTO 0x01
+#define PV88080_BUCK_MODE_SYNC 0x02
+
+/* PV88080_REG_BUCK2_CONF2 (addr=0x61) */
+/* PV88080_REG_BUCK3_CONF2 (addr=0x69) */
+#define PV88080_BUCK_VDAC_RANGE_SHIFT 7
+#define PV88080_BUCK_VDAC_RANGE_MASK 0x01
+
+#define PV88080_BUCK_VDAC_RANGE_1 0x00
+#define PV88080_BUCK_VDAC_RANGE_2 0x01
+
+/* PV88080_REG_BUCK2_CONF5 (addr=0x64) */
+/* PV88080_REG_BUCK3_CONF5 (addr=0x6c) */
+#define PV88080_BUCK_VRANGE_GAIN_SHIFT 0
+#define PV88080_BUCK_VRANGE_GAIN_MASK 0x01
+
+#define PV88080_BUCK_VRANGE_GAIN_1 0x00
+#define PV88080_BUCK_VRANGE_GAIN_2 0x01
+
+#endif /* __PV88080_REGISTERS_H__ */
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 4689d62f4..fafa3488e 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -59,18 +59,18 @@ static int pwm_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
- unsigned int pwm_reg_period;
+ struct pwm_args pargs;
int dutycycle;
int ret;
- pwm_reg_period = pwm_get_period(drvdata->pwm);
+ pwm_get_args(drvdata->pwm, &pargs);
- dutycycle = (pwm_reg_period *
+ dutycycle = (pargs.period *
drvdata->duty_cycle_table[selector].dutycycle) / 100;
- ret = pwm_config(drvdata->pwm, dutycycle, pwm_reg_period);
+ ret = pwm_config(drvdata->pwm, dutycycle, pargs.period);
if (ret) {
- dev_err(&rdev->dev, "Failed to configure PWM\n");
+ dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
@@ -113,18 +113,6 @@ static int pwm_regulator_is_enabled(struct regulator_dev *dev)
return pwm_is_enabled(drvdata->pwm);
}
-/**
- * Continuous voltage call-backs
- */
-static int pwm_voltage_to_duty_cycle_percentage(struct regulator_dev *rdev, int req_uV)
-{
- int min_uV = rdev->constraints->min_uV;
- int max_uV = rdev->constraints->max_uV;
- int diff = max_uV - min_uV;
-
- return ((req_uV * 100) - (min_uV * 100)) / diff;
-}
-
static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
@@ -138,21 +126,42 @@ static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
{
struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
unsigned int ramp_delay = rdev->constraints->ramp_delay;
- unsigned int period = pwm_get_period(drvdata->pwm);
- int duty_cycle;
+ struct pwm_args pargs;
+ unsigned int req_diff = min_uV - rdev->constraints->min_uV;
+ unsigned int diff;
+ unsigned int duty_pulse;
+ u64 req_period;
+ u32 rem;
int ret;
- duty_cycle = pwm_voltage_to_duty_cycle_percentage(rdev, min_uV);
+ pwm_get_args(drvdata->pwm, &pargs);
+ diff = rdev->constraints->max_uV - rdev->constraints->min_uV;
+
+ /* First try to find out if we get the iduty cycle time which is
+ * factor of PWM period time. If (request_diff_to_min * pwm_period)
+ * is perfect divided by voltage_range_diff then it is possible to
+ * get duty cycle time which is factor of PWM period. This will help
+ * to get output voltage nearer to requested value as there is no
+ * calculation loss.
+ */
+ req_period = req_diff * pargs.period;
+ div_u64_rem(req_period, diff, &rem);
+ if (!rem) {
+ do_div(req_period, diff);
+ duty_pulse = (unsigned int)req_period;
+ } else {
+ duty_pulse = (pargs.period / 100) * ((req_diff * 100) / diff);
+ }
- ret = pwm_config(drvdata->pwm, (period / 100) * duty_cycle, period);
+ ret = pwm_config(drvdata->pwm, duty_pulse, pargs.period);
if (ret) {
- dev_err(&rdev->dev, "Failed to configure PWM\n");
+ dev_err(&rdev->dev, "Failed to configure PWM: %d\n", ret);
return ret;
}
ret = pwm_enable(drvdata->pwm);
if (ret) {
- dev_err(&rdev->dev, "Failed to enable PWM\n");
+ dev_err(&rdev->dev, "Failed to enable PWM: %d\n", ret);
return ret;
}
drvdata->volt_uV = min_uV;
@@ -200,8 +209,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
if ((length < sizeof(*duty_cycle_table)) ||
(length % sizeof(*duty_cycle_table))) {
- dev_err(&pdev->dev,
- "voltage-table length(%d) is invalid\n",
+ dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
length);
return -EINVAL;
}
@@ -214,7 +222,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
(u32 *)duty_cycle_table,
length / sizeof(u32));
if (ret) {
- dev_err(&pdev->dev, "Failed to read voltage-table\n");
+ dev_err(&pdev->dev, "Failed to read voltage-table: %d\n", ret);
return ret;
}
@@ -277,16 +285,24 @@ static int pwm_regulator_probe(struct platform_device *pdev)
drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
if (IS_ERR(drvdata->pwm)) {
- dev_err(&pdev->dev, "Failed to get PWM\n");
- return PTR_ERR(drvdata->pwm);
+ ret = PTR_ERR(drvdata->pwm);
+ dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret);
+ return ret;
}
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to the
+ * atomic PWM API.
+ */
+ pwm_apply_args(drvdata->pwm);
+
regulator = devm_regulator_register(&pdev->dev,
&drvdata->desc, &config);
if (IS_ERR(regulator)) {
- dev_err(&pdev->dev, "Failed to register regulator %s\n",
- drvdata->desc.name);
- return PTR_ERR(regulator);
+ ret = PTR_ERR(regulator);
+ dev_err(&pdev->dev, "Failed to register regulator %s: %d\n",
+ drvdata->desc.name, ret);
+ return ret;
}
return 0;
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 88a5dc88b..84cce21e9 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -246,6 +246,7 @@ enum spmi_common_control_register_index {
/* Minimum voltage stepper delay for each step. */
#define SPMI_FTSMPS_STEP_DELAY 8
+#define SPMI_DEFAULT_STEP_DELAY 20
/*
* The ratio SPMI_FTSMPS_STEP_MARGIN_NUM/SPMI_FTSMPS_STEP_MARGIN_DEN is used to
@@ -254,13 +255,6 @@ enum spmi_common_control_register_index {
#define SPMI_FTSMPS_STEP_MARGIN_NUM 4
#define SPMI_FTSMPS_STEP_MARGIN_DEN 5
-/*
- * This voltage in uV is returned by get_voltage functions when there is no way
- * to determine the current voltage level. It is needed because the regulator
- * framework treats a 0 uV voltage as an error.
- */
-#define VOLTAGE_UNKNOWN 1
-
/* VSET value to decide the range of ULT SMPS */
#define ULT_SMPS_RANGE_SPLIT 0x60
@@ -539,12 +533,12 @@ static int spmi_regulator_common_disable(struct regulator_dev *rdev)
}
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
- int min_uV, int max_uV, u8 *range_sel, u8 *voltage_sel,
- unsigned *selector)
+ int min_uV, int max_uV)
{
const struct spmi_voltage_range *range;
int uV = min_uV;
int lim_min_uV, lim_max_uV, i, range_id, range_max_uV;
+ int selector, voltage_sel;
/* Check if request voltage is outside of physically settable range. */
lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
@@ -570,14 +564,13 @@ static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
range_id = i;
range = &vreg->set_points->range[range_id];
- *range_sel = range->range_sel;
/*
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
- *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
- uV = *voltage_sel * range->step_uV + range->min_uV;
+ voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = voltage_sel * range->step_uV + range->min_uV;
if (uV > max_uV) {
dev_err(vreg->dev,
@@ -587,12 +580,48 @@ static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
return -EINVAL;
}
- *selector = 0;
+ selector = 0;
for (i = 0; i < range_id; i++)
- *selector += vreg->set_points->range[i].n_voltages;
- *selector += (uV - range->set_point_min_uV) / range->step_uV;
+ selector += vreg->set_points->range[i].n_voltages;
+ selector += (uV - range->set_point_min_uV) / range->step_uV;
- return 0;
+ return selector;
+}
+
+static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
+ unsigned selector, u8 *range_sel,
+ u8 *voltage_sel)
+{
+ const struct spmi_voltage_range *range, *end;
+
+ range = vreg->set_points->range;
+ end = range + vreg->set_points->count;
+
+ for (; range < end; range++) {
+ if (selector < range->n_voltages) {
+ *voltage_sel = selector;
+ *range_sel = range->range_sel;
+ return 0;
+ }
+
+ selector -= range->n_voltages;
+ }
+
+ return -EINVAL;
+}
+
+static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
+ const struct spmi_voltage_range *range)
+{
+ int sw_sel = hw_sel;
+ const struct spmi_voltage_range *r = vreg->set_points->range;
+
+ while (r != range) {
+ sw_sel += r->n_voltages;
+ r++;
+ }
+
+ return sw_sel;
}
static const struct spmi_voltage_range *
@@ -614,12 +643,11 @@ spmi_regulator_find_range(struct spmi_regulator *vreg)
}
static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
- int min_uV, int max_uV, u8 *range_sel, u8 *voltage_sel,
- unsigned *selector)
+ int min_uV, int max_uV)
{
const struct spmi_voltage_range *range;
int uV = min_uV;
- int i;
+ int i, selector;
range = spmi_regulator_find_range(vreg);
if (!range)
@@ -637,8 +665,8 @@ static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
- *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
- uV = *voltage_sel * range->step_uV + range->min_uV;
+ uV = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = uV * range->step_uV + range->min_uV;
if (uV > max_uV) {
/*
@@ -648,43 +676,49 @@ static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
goto different_range;
}
- *selector = 0;
+ selector = 0;
for (i = 0; i < vreg->set_points->count; i++) {
if (uV >= vreg->set_points->range[i].set_point_min_uV
&& uV <= vreg->set_points->range[i].set_point_max_uV) {
- *selector +=
+ selector +=
(uV - vreg->set_points->range[i].set_point_min_uV)
/ vreg->set_points->range[i].step_uV;
break;
}
- *selector += vreg->set_points->range[i].n_voltages;
+ selector += vreg->set_points->range[i].n_voltages;
}
- if (*selector >= vreg->set_points->n_voltages)
+ if (selector >= vreg->set_points->n_voltages)
goto different_range;
- return 0;
+ return selector;
different_range:
- return spmi_regulator_select_voltage(vreg, min_uV, max_uV,
- range_sel, voltage_sel, selector);
+ return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
}
-static int spmi_regulator_common_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int spmi_regulator_common_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- int ret;
- u8 buf[2];
- u8 range_sel, voltage_sel;
/*
* Favor staying in the current voltage range if possible. This avoids
* voltage spikes that occur when changing the voltage range.
*/
- ret = spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
- &range_sel, &voltage_sel, selector);
+ return spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV);
+}
+
+static int
+spmi_regulator_common_set_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ int ret;
+ u8 buf[2];
+ u8 range_sel, voltage_sel;
+
+ ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
@@ -719,24 +753,24 @@ static int spmi_regulator_common_get_voltage(struct regulator_dev *rdev)
range = spmi_regulator_find_range(vreg);
if (!range)
- return VOLTAGE_UNKNOWN;
+ return -EINVAL;
- return range->step_uV * voltage_sel + range->min_uV;
+ return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
-static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int spmi_regulator_single_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- int ret;
- u8 range_sel, sel;
- ret = spmi_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &sel, selector);
- if (ret) {
- dev_err(vreg->dev, "could not set voltage, ret=%d\n", ret);
- return ret;
- }
+ return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
+}
+
+static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 sel = selector;
/*
* Certain types of regulators do not have a range select register so
@@ -748,27 +782,24 @@ static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
static int spmi_regulator_single_range_get_voltage(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- const struct spmi_voltage_range *range = vreg->set_points->range;
- u8 voltage_sel;
+ u8 selector;
+ int ret;
- spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &voltage_sel, 1);
+ ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &selector, 1);
+ if (ret)
+ return ret;
- return range->step_uV * voltage_sel + range->min_uV;
+ return selector;
}
static int spmi_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+ unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
int ret;
u8 range_sel, voltage_sel;
- /*
- * Favor staying in the current voltage range if possible. This avoids
- * voltage spikes that occur when changing the voltage range.
- */
- ret = spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
- &range_sel, &voltage_sel, selector);
+ ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
@@ -783,7 +814,7 @@ static int spmi_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
voltage_sel |= ULT_SMPS_RANGE_SPLIT;
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_VOLTAGE_SET,
- voltage_sel, 0xff);
+ voltage_sel, 0xff);
}
static int spmi_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
@@ -796,12 +827,12 @@ static int spmi_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
range = spmi_regulator_find_range(vreg);
if (!range)
- return VOLTAGE_UNKNOWN;
+ return -EINVAL;
if (range->range_sel == 1)
voltage_sel &= ~ULT_SMPS_RANGE_SPLIT;
- return range->step_uV * voltage_sel + range->min_uV;
+ return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
static int spmi_regulator_common_list_voltage(struct regulator_dev *rdev,
@@ -1007,8 +1038,10 @@ static struct regulator_ops spmi_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1020,8 +1053,9 @@ static struct regulator_ops spmi_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1036,8 +1070,9 @@ static struct regulator_ops spmi_ln_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_bypass = spmi_regulator_common_set_bypass,
.get_bypass = spmi_regulator_common_get_bypass,
@@ -1056,8 +1091,9 @@ static struct regulator_ops spmi_boost_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_input_current_limit = spmi_regulator_set_ilim,
};
@@ -1066,9 +1102,10 @@ static struct regulator_ops spmi_ftsmps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1080,8 +1117,9 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_ult_lo_smps_set_voltage,
- .get_voltage = spmi_regulator_ult_lo_smps_get_voltage,
+ .set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1093,8 +1131,10 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1106,8 +1146,9 @@ static struct regulator_ops spmi_ult_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1201,7 +1242,7 @@ static int spmi_regulator_match(struct spmi_regulator *vreg, u16 force_type)
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_DIG_MAJOR_REV, version,
ARRAY_SIZE(version));
if (ret) {
- dev_err(vreg->dev, "could not read version registers\n");
+ dev_dbg(vreg->dev, "could not read version registers\n");
return ret;
}
dig_major_rev = version[SPMI_COMMON_REG_DIG_MAJOR_REV
@@ -1245,11 +1286,11 @@ found:
return 0;
}
-static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
+static int spmi_regulator_init_slew_rate(struct spmi_regulator *vreg)
{
int ret;
u8 reg = 0;
- int step, delay, slew_rate;
+ int step, delay, slew_rate, step_delay;
const struct spmi_voltage_range *range;
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_STEP_CTRL, &reg, 1);
@@ -1262,6 +1303,15 @@ static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
if (!range)
return -EINVAL;
+ switch (vreg->logical_type) {
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
+ step_delay = SPMI_FTSMPS_STEP_DELAY;
+ break;
+ default:
+ step_delay = SPMI_DEFAULT_STEP_DELAY;
+ break;
+ }
+
step = reg & SPMI_FTSMPS_STEP_CTRL_STEP_MASK;
step >>= SPMI_FTSMPS_STEP_CTRL_STEP_SHIFT;
@@ -1270,7 +1320,7 @@ static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
/* slew_rate has units of uV/us */
slew_rate = SPMI_FTSMPS_CLOCK_RATE * range->step_uV * (1 << step);
- slew_rate /= 1000 * (SPMI_FTSMPS_STEP_DELAY << delay);
+ slew_rate /= 1000 * (step_delay << delay);
slew_rate *= SPMI_FTSMPS_STEP_MARGIN_NUM;
slew_rate /= SPMI_FTSMPS_STEP_MARGIN_DEN;
@@ -1411,10 +1461,16 @@ static int spmi_regulator_of_parse(struct device_node *node,
return ret;
}
- if (vreg->logical_type == SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS) {
- ret = spmi_regulator_ftsmps_init_slew_rate(vreg);
+ switch (vreg->logical_type) {
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
+ ret = spmi_regulator_init_slew_rate(vreg);
if (ret)
return ret;
+ default:
+ break;
}
if (vreg->logical_type != SPMI_REGULATOR_LOGICAL_TYPE_VS)
@@ -1510,10 +1566,61 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8994_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "s7", 0x2600, "vdd_s7", },
+ { "s8", 0x2900, "vdd_s8", },
+ { "s9", 0x2c00, "vdd_s9", },
+ { "s10", 0x2f00, "vdd_s10", },
+ { "s11", 0x3200, "vdd_s11", },
+ { "s12", 0x3500, "vdd_s12", },
+ { "l1", 0x4000, "vdd_l1", },
+ { "l2", 0x4100, "vdd_l2_l26_l28", },
+ { "l3", 0x4200, "vdd_l3_l11", },
+ { "l4", 0x4300, "vdd_l4_l27_l31", },
+ { "l5", 0x4400, "vdd_l5_l7", },
+ { "l6", 0x4500, "vdd_l6_l12_l32", },
+ { "l7", 0x4600, "vdd_l5_l7", },
+ { "l8", 0x4700, "vdd_l8_l16_l30", },
+ { "l9", 0x4800, "vdd_l9_l10_l18_l22", },
+ { "l10", 0x4900, "vdd_l9_l10_l18_l22", },
+ { "l11", 0x4a00, "vdd_l3_l11", },
+ { "l12", 0x4b00, "vdd_l6_l12_l32", },
+ { "l13", 0x4c00, "vdd_l13_l19_l23_l24", },
+ { "l14", 0x4d00, "vdd_l14_l15", },
+ { "l15", 0x4e00, "vdd_l14_l15", },
+ { "l16", 0x4f00, "vdd_l8_l16_l30", },
+ { "l17", 0x5000, "vdd_l17_l29", },
+ { "l18", 0x5100, "vdd_l9_l10_l18_l22", },
+ { "l19", 0x5200, "vdd_l13_l19_l23_l24", },
+ { "l20", 0x5300, "vdd_l20_l21", },
+ { "l21", 0x5400, "vdd_l20_l21", },
+ { "l22", 0x5500, "vdd_l9_l10_l18_l22", },
+ { "l23", 0x5600, "vdd_l13_l19_l23_l24", },
+ { "l24", 0x5700, "vdd_l13_l19_l23_l24", },
+ { "l25", 0x5800, "vdd_l25", },
+ { "l26", 0x5900, "vdd_l2_l26_l28", },
+ { "l27", 0x5a00, "vdd_l4_l27_l31", },
+ { "l28", 0x5b00, "vdd_l2_l26_l28", },
+ { "l29", 0x5c00, "vdd_l17_l29", },
+ { "l30", 0x5d00, "vdd_l8_l16_l30", },
+ { "l31", 0x5e00, "vdd_l4_l27_l31", },
+ { "l32", 0x5f00, "vdd_l6_l12_l32", },
+ { "lvs1", 0x8000, "vdd_lvs_1_2", },
+ { "lvs2", 0x8100, "vdd_lvs_1_2", },
+ { }
+};
+
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
+ { .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
@@ -1573,7 +1680,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
ret = spmi_regulator_match(vreg, reg->force_type);
if (ret)
- goto err;
+ continue;
config.dev = dev;
config.driver_data = vreg;
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index d86a3dcd6..40d07ba03 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -55,6 +55,42 @@
/* max steps for increase voltage of Buck1/2, equal 100mv*/
#define MAX_STEPS_ONE_TIME 8
+#define RK8XX_DESC(_id, _match, _supply, _min, _max, _step, _vreg, \
+ _vmask, _ereg, _emask, _etime) \
+ [_id] = { \
+ .name = (_match), \
+ .supply_name = (_supply), \
+ .of_match = of_match_ptr(_match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = (_id), \
+ .n_voltages = (((_max) - (_min)) / (_step) + 1), \
+ .owner = THIS_MODULE, \
+ .min_uV = (_min) * 1000, \
+ .uV_step = (_step) * 1000, \
+ .vsel_reg = (_vreg), \
+ .vsel_mask = (_vmask), \
+ .enable_reg = (_ereg), \
+ .enable_mask = (_emask), \
+ .enable_time = (_etime), \
+ .ops = &rk808_reg_ops, \
+ }
+
+#define RK8XX_DESC_SWITCH(_id, _match, _supply, _ereg, _emask) \
+ [_id] = { \
+ .name = (_match), \
+ .supply_name = (_supply), \
+ .of_match = of_match_ptr(_match), \
+ .regulators_node = of_match_ptr("regulators"), \
+ .type = REGULATOR_VOLTAGE, \
+ .id = (_id), \
+ .enable_reg = (_ereg), \
+ .enable_mask = (_emask), \
+ .owner = THIS_MODULE, \
+ .ops = &rk808_switch_ops \
+ }
+
+
struct rk808_regulator_data {
struct gpio_desc *dvs_gpio[2];
};
@@ -66,27 +102,11 @@ static const int rk808_buck_config_regs[] = {
RK808_BUCK4_CONFIG_REG,
};
-static const struct regulator_linear_range rk808_buck_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(712500, 0, 63, 12500),
-};
-
-static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(1800000, 0, 15, 100000),
-};
-
-static const struct regulator_linear_range rk808_ldo_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(1800000, 0, 16, 100000),
-};
-
static const struct regulator_linear_range rk808_ldo3_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(800000, 0, 13, 100000),
REGULATOR_LINEAR_RANGE(2500000, 15, 15, 0),
};
-static const struct regulator_linear_range rk808_ldo6_voltage_ranges[] = {
- REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000),
-};
-
static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
{
struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
@@ -242,6 +262,21 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
{
unsigned int reg;
+ int sel = regulator_map_voltage_linear(rdev, uv, uv);
+
+ if (sel < 0)
+ return -EINVAL;
+
+ reg = rdev->desc->vsel_reg + RK808_SLP_REG_OFFSET;
+
+ return regmap_update_bits(rdev->regmap, reg,
+ rdev->desc->vsel_mask,
+ sel);
+}
+
+static int rk808_set_suspend_voltage_range(struct regulator_dev *rdev, int uv)
+{
+ unsigned int reg;
int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
if (sel < 0)
@@ -277,8 +312,8 @@ static int rk808_set_suspend_disable(struct regulator_dev *rdev)
}
static struct regulator_ops rk808_buck1_2_ops = {
- .list_voltage = regulator_list_voltage_linear_range,
- .map_voltage = regulator_map_voltage_linear_range,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = rk808_buck1_2_get_voltage_sel_regmap,
.set_voltage_sel = rk808_buck1_2_set_voltage_sel,
.set_voltage_time_sel = rk808_buck1_2_set_voltage_time_sel,
@@ -292,6 +327,19 @@ static struct regulator_ops rk808_buck1_2_ops = {
};
static struct regulator_ops rk808_reg_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_voltage = rk808_set_suspend_voltage,
+ .set_suspend_enable = rk808_set_suspend_enable,
+ .set_suspend_disable = rk808_set_suspend_disable,
+};
+
+static struct regulator_ops rk808_reg_ops_ranges = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -299,7 +347,7 @@ static struct regulator_ops rk808_reg_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
- .set_suspend_voltage = rk808_set_suspend_voltage,
+ .set_suspend_voltage = rk808_set_suspend_voltage_range,
.set_suspend_enable = rk808_set_suspend_enable,
.set_suspend_disable = rk808_set_suspend_disable,
};
@@ -316,12 +364,14 @@ static const struct regulator_desc rk808_reg[] = {
{
.name = "DCDC_REG1",
.supply_name = "vcc1",
+ .of_match = of_match_ptr("DCDC_REG1"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RK808_ID_DCDC1,
.ops = &rk808_buck1_2_ops,
.type = REGULATOR_VOLTAGE,
+ .min_uV = 712500,
+ .uV_step = 12500,
.n_voltages = 64,
- .linear_ranges = rk808_buck_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges),
.vsel_reg = RK808_BUCK1_ON_VSEL_REG,
.vsel_mask = RK808_BUCK_VSEL_MASK,
.enable_reg = RK808_DCDC_EN_REG,
@@ -330,12 +380,14 @@ static const struct regulator_desc rk808_reg[] = {
}, {
.name = "DCDC_REG2",
.supply_name = "vcc2",
+ .of_match = of_match_ptr("DCDC_REG2"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RK808_ID_DCDC2,
.ops = &rk808_buck1_2_ops,
.type = REGULATOR_VOLTAGE,
+ .min_uV = 712500,
+ .uV_step = 12500,
.n_voltages = 64,
- .linear_ranges = rk808_buck_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_buck_voltage_ranges),
.vsel_reg = RK808_BUCK2_ON_VSEL_REG,
.vsel_mask = RK808_BUCK_VSEL_MASK,
.enable_reg = RK808_DCDC_EN_REG,
@@ -344,6 +396,8 @@ static const struct regulator_desc rk808_reg[] = {
}, {
.name = "DCDC_REG3",
.supply_name = "vcc3",
+ .of_match = of_match_ptr("DCDC_REG3"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RK808_ID_DCDC3,
.ops = &rk808_switch_ops,
.type = REGULATOR_VOLTAGE,
@@ -351,55 +405,23 @@ static const struct regulator_desc rk808_reg[] = {
.enable_reg = RK808_DCDC_EN_REG,
.enable_mask = BIT(2),
.owner = THIS_MODULE,
- }, {
- .name = "DCDC_REG4",
- .supply_name = "vcc4",
- .id = RK808_ID_DCDC4,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 16,
- .linear_ranges = rk808_buck4_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_buck4_voltage_ranges),
- .vsel_reg = RK808_BUCK4_ON_VSEL_REG,
- .vsel_mask = RK808_BUCK4_VSEL_MASK,
- .enable_reg = RK808_DCDC_EN_REG,
- .enable_mask = BIT(3),
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG1",
- .supply_name = "vcc6",
- .id = RK808_ID_LDO1,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO1_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(0),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG2",
- .supply_name = "vcc6",
- .id = RK808_ID_LDO2,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO2_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(1),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
+ },
+ RK8XX_DESC(RK808_ID_DCDC4, "DCDC_REG4", "vcc4", 1800, 3300, 100,
+ RK808_BUCK4_ON_VSEL_REG, RK808_BUCK4_VSEL_MASK,
+ RK808_DCDC_EN_REG, BIT(3), 0),
+ RK8XX_DESC(RK808_ID_LDO1, "LDO_REG1", "vcc6", 1800, 3400, 100,
+ RK808_LDO1_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(0), 400),
+ RK8XX_DESC(RK808_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100,
+ RK808_LDO2_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(1), 400),
+ {
.name = "LDO_REG3",
.supply_name = "vcc7",
+ .of_match = of_match_ptr("LDO_REG3"),
+ .regulators_node = of_match_ptr("regulators"),
.id = RK808_ID_LDO3,
- .ops = &rk808_reg_ops,
+ .ops = &rk808_reg_ops_ranges,
.type = REGULATOR_VOLTAGE,
.n_voltages = 16,
.linear_ranges = rk808_ldo3_voltage_ranges,
@@ -410,117 +432,26 @@ static const struct regulator_desc rk808_reg[] = {
.enable_mask = BIT(2),
.enable_time = 400,
.owner = THIS_MODULE,
- }, {
- .name = "LDO_REG4",
- .supply_name = "vcc9",
- .id = RK808_ID_LDO4,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO4_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(3),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG5",
- .supply_name = "vcc9",
- .id = RK808_ID_LDO5,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO5_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(4),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG6",
- .supply_name = "vcc10",
- .id = RK808_ID_LDO6,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 18,
- .linear_ranges = rk808_ldo6_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges),
- .vsel_reg = RK808_LDO6_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(5),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG7",
- .supply_name = "vcc7",
- .id = RK808_ID_LDO7,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 18,
- .linear_ranges = rk808_ldo6_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo6_voltage_ranges),
- .vsel_reg = RK808_LDO7_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(6),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "LDO_REG8",
- .supply_name = "vcc11",
- .id = RK808_ID_LDO8,
- .ops = &rk808_reg_ops,
- .type = REGULATOR_VOLTAGE,
- .n_voltages = 17,
- .linear_ranges = rk808_ldo_voltage_ranges,
- .n_linear_ranges = ARRAY_SIZE(rk808_ldo_voltage_ranges),
- .vsel_reg = RK808_LDO8_ON_VSEL_REG,
- .vsel_mask = RK808_LDO_VSEL_MASK,
- .enable_reg = RK808_LDO_EN_REG,
- .enable_mask = BIT(7),
- .enable_time = 400,
- .owner = THIS_MODULE,
- }, {
- .name = "SWITCH_REG1",
- .supply_name = "vcc8",
- .id = RK808_ID_SWITCH1,
- .ops = &rk808_switch_ops,
- .type = REGULATOR_VOLTAGE,
- .enable_reg = RK808_DCDC_EN_REG,
- .enable_mask = BIT(5),
- .owner = THIS_MODULE,
- }, {
- .name = "SWITCH_REG2",
- .supply_name = "vcc12",
- .id = RK808_ID_SWITCH2,
- .ops = &rk808_switch_ops,
- .type = REGULATOR_VOLTAGE,
- .enable_reg = RK808_DCDC_EN_REG,
- .enable_mask = BIT(6),
- .owner = THIS_MODULE,
},
-};
-
-static struct of_regulator_match rk808_reg_matches[] = {
- [RK808_ID_DCDC1] = { .name = "DCDC_REG1" },
- [RK808_ID_DCDC2] = { .name = "DCDC_REG2" },
- [RK808_ID_DCDC3] = { .name = "DCDC_REG3" },
- [RK808_ID_DCDC4] = { .name = "DCDC_REG4" },
- [RK808_ID_LDO1] = { .name = "LDO_REG1" },
- [RK808_ID_LDO2] = { .name = "LDO_REG2" },
- [RK808_ID_LDO3] = { .name = "LDO_REG3" },
- [RK808_ID_LDO4] = { .name = "LDO_REG4" },
- [RK808_ID_LDO5] = { .name = "LDO_REG5" },
- [RK808_ID_LDO6] = { .name = "LDO_REG6" },
- [RK808_ID_LDO7] = { .name = "LDO_REG7" },
- [RK808_ID_LDO8] = { .name = "LDO_REG8" },
- [RK808_ID_SWITCH1] = { .name = "SWITCH_REG1" },
- [RK808_ID_SWITCH2] = { .name = "SWITCH_REG2" },
+ RK8XX_DESC(RK808_ID_LDO4, "LDO_REG4", "vcc9", 1800, 3400, 100,
+ RK808_LDO4_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(3), 400),
+ RK8XX_DESC(RK808_ID_LDO5, "LDO_REG5", "vcc9", 1800, 3400, 100,
+ RK808_LDO5_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(4), 400),
+ RK8XX_DESC(RK808_ID_LDO6, "LDO_REG6", "vcc10", 800, 2500, 100,
+ RK808_LDO6_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(5), 400),
+ RK8XX_DESC(RK808_ID_LDO7, "LDO_REG7", "vcc7", 800, 2500, 100,
+ RK808_LDO7_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(6), 400),
+ RK8XX_DESC(RK808_ID_LDO8, "LDO_REG8", "vcc11", 1800, 3400, 100,
+ RK808_LDO8_ON_VSEL_REG, RK808_LDO_VSEL_MASK, RK808_LDO_EN_REG,
+ BIT(7), 400),
+ RK8XX_DESC_SWITCH(RK808_ID_SWITCH1, "SWITCH_REG1", "vcc8",
+ RK808_DCDC_EN_REG, BIT(5)),
+ RK8XX_DESC_SWITCH(RK808_ID_SWITCH2, "SWITCH_REG2", "vcc12",
+ RK808_DCDC_EN_REG, BIT(6)),
};
static int rk808_regulator_dt_parse_pdata(struct device *dev,
@@ -529,17 +460,12 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev,
struct rk808_regulator_data *pdata)
{
struct device_node *np;
- int tmp, ret, i;
+ int tmp, ret = 0, i;
np = of_get_child_by_name(client_dev->of_node, "regulators");
if (!np)
return -ENXIO;
- ret = of_regulator_match(dev, np, rk808_reg_matches,
- RK808_NUM_REGULATORS);
- if (ret < 0)
- goto dt_parse_end;
-
for (i = 0; i < ARRAY_SIZE(pdata->dvs_gpio); i++) {
pdata->dvs_gpio[i] =
devm_gpiod_get_index_optional(client_dev, "dvs", i,
@@ -586,18 +512,12 @@ static int rk808_regulator_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pdata);
+ config.dev = &client->dev;
+ config.driver_data = pdata;
+ config.regmap = rk808->regmap;
+
/* Instantiate the regulators */
for (i = 0; i < RK808_NUM_REGULATORS; i++) {
- if (!rk808_reg_matches[i].init_data ||
- !rk808_reg_matches[i].of_node)
- continue;
-
- config.dev = &client->dev;
- config.driver_data = pdata;
- config.regmap = rk808->regmap;
- config.of_node = rk808_reg_matches[i].of_node;
- config.init_data = rk808_reg_matches[i].init_data;
-
rk808_rdev = devm_regulator_register(&pdev->dev,
&rk808_reg[i], &config);
if (IS_ERR(rk808_rdev)) {
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 6dfa3502e..02fb6b4ea 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -267,6 +267,7 @@ static struct regulator_ops s2mps11_buck_ops = {
.ops = &s2mps11_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .ramp_delay = RAMP_DELAY_12_MVUS, \
.min_uV = MIN_800_MV, \
.uV_step = step, \
.n_voltages = S2MPS11_LDO_N_VOLTAGES, \
@@ -1237,17 +1238,7 @@ static struct platform_driver s2mps11_pmic_driver = {
.id_table = s2mps11_pmic_id,
};
-static int __init s2mps11_pmic_init(void)
-{
- return platform_driver_register(&s2mps11_pmic_driver);
-}
-subsys_initcall(s2mps11_pmic_init);
-
-static void __exit s2mps11_pmic_exit(void)
-{
- platform_driver_unregister(&s2mps11_pmic_driver);
-}
-module_exit(s2mps11_pmic_exit);
+module_platform_driver(s2mps11_pmic_driver);
/* Module information */
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index 572816e30..c139890c1 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -94,11 +94,14 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev,
int ramp_delay)
{
struct tps51632_chip *tps = rdev_get_drvdata(rdev);
- int bit = ramp_delay/6000;
+ int bit;
int ret;
- if (bit)
- bit--;
+ if (ramp_delay == 0)
+ bit = 0;
+ else
+ bit = DIV_ROUND_UP(ramp_delay, 6000) - 1;
+
ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit));
if (ret < 0)
dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret);
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 9d6ea3a4d..67cac2682 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -600,7 +600,7 @@ static int pmic_probe(struct spi_device *spi)
memset(hw, 0, sizeof(struct tps6524x));
hw->dev = dev;
- hw->spi = spi_dev_get(spi);
+ hw->spi = spi;
mutex_init(&hw->lock);
for (i = 0; i < N_REGULATORS; i++, info++, init_data++) {
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 955a6fb13..faeb5ee92 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -21,7 +21,7 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/i2c/twl.h>
-
+#include <linux/delay.h>
/*
* The TWL4030/TW5030/TPS659x0/TWL6030 family chips include power management, a
@@ -188,6 +188,74 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
return grp && (val == TWL6030_CFG_STATE_ON);
}
+#define PB_I2C_BUSY BIT(0)
+#define PB_I2C_BWEN BIT(1)
+
+/* Wait until buffer empty/ready to send a word on power bus. */
+static int twl4030_wait_pb_ready(void)
+{
+
+ int ret;
+ int timeout = 10;
+ u8 val;
+
+ do {
+ ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
+ TWL4030_PM_MASTER_PB_CFG);
+ if (ret < 0)
+ return ret;
+
+ if (!(val & PB_I2C_BUSY))
+ return 0;
+
+ mdelay(1);
+ timeout--;
+ } while (timeout);
+
+ return -ETIMEDOUT;
+}
+
+/* Send a word over the powerbus */
+static int twl4030_send_pb_msg(unsigned msg)
+{
+ u8 val;
+ int ret;
+
+ /* save powerbus configuration */
+ ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &val,
+ TWL4030_PM_MASTER_PB_CFG);
+ if (ret < 0)
+ return ret;
+
+ /* Enable i2c access to powerbus */
+ ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val | PB_I2C_BWEN,
+ TWL4030_PM_MASTER_PB_CFG);
+ if (ret < 0)
+ return ret;
+
+ ret = twl4030_wait_pb_ready();
+ if (ret < 0)
+ return ret;
+
+ ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, msg >> 8,
+ TWL4030_PM_MASTER_PB_WORD_MSB);
+ if (ret < 0)
+ return ret;
+
+ ret = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, msg & 0xff,
+ TWL4030_PM_MASTER_PB_WORD_LSB);
+ if (ret < 0)
+ return ret;
+
+ ret = twl4030_wait_pb_ready();
+ if (ret < 0)
+ return ret;
+
+ /* Restore powerbus configuration */
+ return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, val,
+ TWL4030_PM_MASTER_PB_CFG);
+}
+
static int twl4030reg_enable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -303,7 +371,6 @@ static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
unsigned message;
- int status;
/* We can only set the mode through state machine commands... */
switch (mode) {
@@ -317,20 +384,19 @@ static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
return -EINVAL;
}
- /* Ensure the resource is associated with some group */
- status = twlreg_grp(rdev);
- if (status < 0)
- return status;
- if (!(status & (P3_GRP_4030 | P2_GRP_4030 | P1_GRP_4030)))
- return -EACCES;
-
- status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
- message >> 8, TWL4030_PM_MASTER_PB_WORD_MSB);
- if (status < 0)
- return status;
+ return twl4030_send_pb_msg(message);
+}
- return twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
- message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
+static inline unsigned int twl4030reg_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case RES_STATE_ACTIVE:
+ return REGULATOR_MODE_NORMAL;
+ case RES_STATE_SLEEP:
+ return REGULATOR_MODE_STANDBY;
+ default:
+ return -EINVAL;
+ }
}
static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
@@ -835,10 +901,11 @@ static struct regulator_ops twlsmps_ops = {
#define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
remap_conf) \
TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
- remap_conf, TWL4030, twl4030fixed_ops)
+ remap_conf, TWL4030, twl4030fixed_ops, \
+ twl4030reg_map_mode)
#define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \
TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \
- 0x0, TWL6030, twl6030fixed_ops)
+ 0x0, TWL6030, twl6030fixed_ops, 0x0)
#define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \
static const struct twlreg_info TWL4030_INFO_##label = { \
@@ -855,6 +922,7 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.enable_time = turnon_delay, \
+ .of_map_mode = twl4030reg_map_mode, \
}, \
}
@@ -870,6 +938,7 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
.enable_time = turnon_delay, \
+ .of_map_mode = twl4030reg_map_mode, \
}, \
}
@@ -915,7 +984,7 @@ static const struct twlreg_info TWL6032_INFO_##label = { \
}
#define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
- family, operations) \
+ family, operations, map_mode) \
static const struct twlreg_info TWLFIXED_INFO_##label = { \
.base = offset, \
.id = num, \
@@ -930,6 +999,7 @@ static const struct twlreg_info TWLFIXED_INFO_##label = { \
.owner = THIS_MODULE, \
.min_uV = mVolts * 1000, \
.enable_time = turnon_delay, \
+ .of_map_mode = map_mode, \
}, \
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index c4a13a984..a8958b8fb 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -57,6 +57,8 @@ static DEFINE_IDA(rproc_dev_index);
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
+ [RPROC_WATCHDOG] = "watchdog",
+ [RPROC_FATAL_ERROR] = "fatal error",
};
/* translate rproc_crash_type to string */
@@ -856,12 +858,8 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
* copy this information to device memory.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (!loaded_table) {
- ret = -EINVAL;
- goto clean_up;
- }
-
- memcpy(loaded_table, rproc->cached_table, tablesz);
+ if (loaded_table)
+ memcpy(loaded_table, rproc->cached_table, tablesz);
/* power up the remote processor */
ret = rproc->ops->start(rproc);
@@ -1030,8 +1028,9 @@ static void rproc_crash_handler_work(struct work_struct *work)
}
/**
- * rproc_boot() - boot a remote processor
+ * __rproc_boot() - boot a remote processor
* @rproc: handle of a remote processor
+ * @wait: wait for rproc registration completion
*
* Boot a remote processor (i.e. load its firmware, power it on, ...).
*
@@ -1040,7 +1039,7 @@ static void rproc_crash_handler_work(struct work_struct *work)
*
* Returns 0 on success, and an appropriate error value otherwise.
*/
-int rproc_boot(struct rproc *rproc)
+static int __rproc_boot(struct rproc *rproc, bool wait)
{
const struct firmware *firmware_p;
struct device *dev;
@@ -1088,6 +1087,10 @@ int rproc_boot(struct rproc *rproc)
goto downref_rproc;
}
+ /* if rproc virtio is not yet configured, wait */
+ if (wait)
+ wait_for_completion(&rproc->firmware_loading_complete);
+
ret = rproc_fw_boot(rproc, firmware_p);
release_firmware(firmware_p);
@@ -1101,9 +1104,29 @@ unlock_mutex:
mutex_unlock(&rproc->lock);
return ret;
}
+
+/**
+ * rproc_boot() - boot a remote processor
+ * @rproc: handle of a remote processor
+ */
+int rproc_boot(struct rproc *rproc)
+{
+ return __rproc_boot(rproc, true);
+}
EXPORT_SYMBOL(rproc_boot);
/**
+ * rproc_boot_nowait() - boot a remote processor
+ * @rproc: handle of a remote processor
+ *
+ * Same as rproc_boot() but don't wait for rproc registration completion
+ */
+int rproc_boot_nowait(struct rproc *rproc)
+{
+ return __rproc_boot(rproc, false);
+}
+
+/**
* rproc_shutdown() - power off the remote processor
* @rproc: the remote processor
*
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 8041b95cb..57e1de59b 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -48,6 +48,7 @@ struct rproc_fw_ops {
/* from remoteproc_core.c */
void rproc_release(struct kref *kref);
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
+int rproc_boot_nowait(struct rproc *rproc);
/* from remoteproc_virtio.c */
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index e44872fb9..cc9155631 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -161,7 +161,7 @@ static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
}
/* now that the vqs are all set, boot the remote processor */
- ret = rproc_boot(rproc);
+ ret = rproc_boot_nowait(rproc);
if (ret) {
dev_err(&rproc->dev, "rproc_boot() failed %d\n", ret);
goto error;
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index df37212a5..0b2733db0 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -12,5 +12,8 @@ menuconfig RESET_CONTROLLER
If unsure, say no.
+config RESET_OXNAS
+ bool
+
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index a1fc8eda7..f173fc384 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_ARCH_STI) += sti/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
obj-$(CONFIG_ATH79) += reset-ath79.o
+obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index f15f150b7..72b32bd15 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -8,6 +8,7 @@
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
+#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -18,19 +19,27 @@
#include <linux/reset-controller.h>
#include <linux/slab.h>
-static DEFINE_MUTEX(reset_controller_list_mutex);
+static DEFINE_MUTEX(reset_list_mutex);
static LIST_HEAD(reset_controller_list);
/**
* struct reset_control - a reset control
* @rcdev: a pointer to the reset controller device
* this reset control belongs to
+ * @list: list entry for the rcdev's reset controller list
* @id: ID of the reset controller in the reset
* controller device
+ * @refcnt: Number of gets of this reset_control
+ * @shared: Is this a shared (1), or an exclusive (0) reset_control?
+ * @deassert_cnt: Number of times this reset line has been deasserted
*/
struct reset_control {
struct reset_controller_dev *rcdev;
+ struct list_head list;
unsigned int id;
+ unsigned int refcnt;
+ int shared;
+ atomic_t deassert_count;
};
/**
@@ -62,9 +71,11 @@ int reset_controller_register(struct reset_controller_dev *rcdev)
rcdev->of_xlate = of_reset_simple_xlate;
}
- mutex_lock(&reset_controller_list_mutex);
+ INIT_LIST_HEAD(&rcdev->reset_control_head);
+
+ mutex_lock(&reset_list_mutex);
list_add(&rcdev->list, &reset_controller_list);
- mutex_unlock(&reset_controller_list_mutex);
+ mutex_unlock(&reset_list_mutex);
return 0;
}
@@ -76,18 +87,23 @@ EXPORT_SYMBOL_GPL(reset_controller_register);
*/
void reset_controller_unregister(struct reset_controller_dev *rcdev)
{
- mutex_lock(&reset_controller_list_mutex);
+ mutex_lock(&reset_list_mutex);
list_del(&rcdev->list);
- mutex_unlock(&reset_controller_list_mutex);
+ mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_controller_unregister);
/**
* reset_control_reset - reset the controlled device
* @rstc: reset controller
+ *
+ * Calling this on a shared reset controller is an error.
*/
int reset_control_reset(struct reset_control *rstc)
{
+ if (WARN_ON(rstc->shared))
+ return -EINVAL;
+
if (rstc->rcdev->ops->reset)
return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
@@ -98,26 +114,48 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
/**
* reset_control_assert - asserts the reset line
* @rstc: reset controller
+ *
+ * Calling this on an exclusive reset controller guarantees that the reset
+ * will be asserted. When called on a shared reset controller the line may
+ * still be deasserted, as long as other users keep it so.
+ *
+ * For shared reset controls a driver cannot expect the hw's registers and
+ * internal state to be reset, but must be prepared for this to happen.
*/
int reset_control_assert(struct reset_control *rstc)
{
- if (rstc->rcdev->ops->assert)
- return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
+ if (!rstc->rcdev->ops->assert)
+ return -ENOTSUPP;
- return -ENOTSUPP;
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
+ return -EINVAL;
+
+ if (atomic_dec_return(&rstc->deassert_count) != 0)
+ return 0;
+ }
+
+ return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
}
EXPORT_SYMBOL_GPL(reset_control_assert);
/**
* reset_control_deassert - deasserts the reset line
* @rstc: reset controller
+ *
+ * After calling this function, the reset is guaranteed to be deasserted.
*/
int reset_control_deassert(struct reset_control *rstc)
{
- if (rstc->rcdev->ops->deassert)
- return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
+ if (!rstc->rcdev->ops->deassert)
+ return -ENOTSUPP;
- return -ENOTSUPP;
+ if (rstc->shared) {
+ if (atomic_inc_return(&rstc->deassert_count) != 1)
+ return 0;
+ }
+
+ return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
}
EXPORT_SYMBOL_GPL(reset_control_deassert);
@@ -136,18 +174,54 @@ int reset_control_status(struct reset_control *rstc)
}
EXPORT_SYMBOL_GPL(reset_control_status);
-/**
- * of_reset_control_get_by_index - Lookup and obtain a reference to a reset
- * controller by index.
- * @node: device to be reset by the controller
- * @index: index of the reset controller
- *
- * This is to be used to perform a list of resets for a device or power domain
- * in whatever order. Returns a struct reset_control or IS_ERR() condition
- * containing errno.
- */
-struct reset_control *of_reset_control_get_by_index(struct device_node *node,
- int index)
+static struct reset_control *__reset_control_get(
+ struct reset_controller_dev *rcdev,
+ unsigned int index, int shared)
+{
+ struct reset_control *rstc;
+
+ lockdep_assert_held(&reset_list_mutex);
+
+ list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
+ if (rstc->id == index) {
+ if (WARN_ON(!rstc->shared || !shared))
+ return ERR_PTR(-EBUSY);
+
+ rstc->refcnt++;
+ return rstc;
+ }
+ }
+
+ rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
+ if (!rstc)
+ return ERR_PTR(-ENOMEM);
+
+ try_module_get(rcdev->owner);
+
+ rstc->rcdev = rcdev;
+ list_add(&rstc->list, &rcdev->reset_control_head);
+ rstc->id = index;
+ rstc->refcnt = 1;
+ rstc->shared = shared;
+
+ return rstc;
+}
+
+static void __reset_control_put(struct reset_control *rstc)
+{
+ lockdep_assert_held(&reset_list_mutex);
+
+ if (--rstc->refcnt)
+ return;
+
+ module_put(rstc->rcdev->owner);
+
+ list_del(&rstc->list);
+ kfree(rstc);
+}
+
+struct reset_control *__of_reset_control_get(struct device_node *node,
+ const char *id, int index, int shared)
{
struct reset_control *rstc;
struct reset_controller_dev *r, *rcdev;
@@ -155,12 +229,22 @@ struct reset_control *of_reset_control_get_by_index(struct device_node *node,
int rstc_id;
int ret;
+ if (!node)
+ return ERR_PTR(-EINVAL);
+
+ if (id) {
+ index = of_property_match_string(node,
+ "reset-names", id);
+ if (index < 0)
+ return ERR_PTR(-ENOENT);
+ }
+
ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
index, &args);
if (ret)
return ERR_PTR(ret);
- mutex_lock(&reset_controller_list_mutex);
+ mutex_lock(&reset_list_mutex);
rcdev = NULL;
list_for_each_entry(r, &reset_controller_list, list) {
if (args.np == r->of_node) {
@@ -171,78 +255,29 @@ struct reset_control *of_reset_control_get_by_index(struct device_node *node,
of_node_put(args.np);
if (!rcdev) {
- mutex_unlock(&reset_controller_list_mutex);
+ mutex_unlock(&reset_list_mutex);
return ERR_PTR(-EPROBE_DEFER);
}
if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
- mutex_unlock(&reset_controller_list_mutex);
+ mutex_unlock(&reset_list_mutex);
return ERR_PTR(-EINVAL);
}
rstc_id = rcdev->of_xlate(rcdev, &args);
if (rstc_id < 0) {
- mutex_unlock(&reset_controller_list_mutex);
+ mutex_unlock(&reset_list_mutex);
return ERR_PTR(rstc_id);
}
- try_module_get(rcdev->owner);
- mutex_unlock(&reset_controller_list_mutex);
-
- rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
- if (!rstc) {
- module_put(rcdev->owner);
- return ERR_PTR(-ENOMEM);
- }
+ /* reset_list_mutex also protects the rcdev's reset_control list */
+ rstc = __reset_control_get(rcdev, rstc_id, shared);
- rstc->rcdev = rcdev;
- rstc->id = rstc_id;
+ mutex_unlock(&reset_list_mutex);
return rstc;
}
-EXPORT_SYMBOL_GPL(of_reset_control_get_by_index);
-
-/**
- * of_reset_control_get - Lookup and obtain a reference to a reset controller.
- * @node: device to be reset by the controller
- * @id: reset line name
- *
- * Returns a struct reset_control or IS_ERR() condition containing errno.
- *
- * Use of id names is optional.
- */
-struct reset_control *of_reset_control_get(struct device_node *node,
- const char *id)
-{
- int index = 0;
-
- if (id) {
- index = of_property_match_string(node,
- "reset-names", id);
- if (index < 0)
- return ERR_PTR(-ENOENT);
- }
- return of_reset_control_get_by_index(node, index);
-}
-EXPORT_SYMBOL_GPL(of_reset_control_get);
-
-/**
- * reset_control_get - Lookup and obtain a reference to a reset controller.
- * @dev: device to be reset by the controller
- * @id: reset line name
- *
- * Returns a struct reset_control or IS_ERR() condition containing errno.
- *
- * Use of id names is optional.
- */
-struct reset_control *reset_control_get(struct device *dev, const char *id)
-{
- if (!dev)
- return ERR_PTR(-EINVAL);
-
- return of_reset_control_get(dev->of_node, id);
-}
-EXPORT_SYMBOL_GPL(reset_control_get);
+EXPORT_SYMBOL_GPL(__of_reset_control_get);
/**
* reset_control_put - free the reset controller
@@ -254,8 +289,9 @@ void reset_control_put(struct reset_control *rstc)
if (IS_ERR(rstc))
return;
- module_put(rstc->rcdev->owner);
- kfree(rstc);
+ mutex_lock(&reset_list_mutex);
+ __reset_control_put(rstc);
+ mutex_unlock(&reset_list_mutex);
}
EXPORT_SYMBOL_GPL(reset_control_put);
@@ -264,16 +300,8 @@ static void devm_reset_control_release(struct device *dev, void *res)
reset_control_put(*(struct reset_control **)res);
}
-/**
- * devm_reset_control_get - resource managed reset_control_get()
- * @dev: device to be reset by the controller
- * @id: reset line name
- *
- * Managed reset_control_get(). For reset controllers returned from this
- * function, reset_control_put() is called automatically on driver detach.
- * See reset_control_get() for more information.
- */
-struct reset_control *devm_reset_control_get(struct device *dev, const char *id)
+struct reset_control *__devm_reset_control_get(struct device *dev,
+ const char *id, int index, int shared)
{
struct reset_control **ptr, *rstc;
@@ -282,7 +310,8 @@ struct reset_control *devm_reset_control_get(struct device *dev, const char *id)
if (!ptr)
return ERR_PTR(-ENOMEM);
- rstc = reset_control_get(dev, id);
+ rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
+ id, index, shared);
if (!IS_ERR(rstc)) {
*ptr = rstc;
devres_add(dev, ptr);
@@ -292,7 +321,7 @@ struct reset_control *devm_reset_control_get(struct device *dev, const char *id)
return rstc;
}
-EXPORT_SYMBOL_GPL(devm_reset_control_get);
+EXPORT_SYMBOL_GPL(__devm_reset_control_get);
/**
* device_reset - find reset controller associated with the device
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 3b8a4f5a1..54cca0055 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -35,6 +35,7 @@
struct lpc18xx_rgu_data {
struct reset_controller_dev rcdev;
+ struct notifier_block restart_nb;
struct clk *clk_delay;
struct clk *clk_reg;
void __iomem *base;
@@ -44,12 +45,13 @@ struct lpc18xx_rgu_data {
#define to_rgu_data(p) container_of(p, struct lpc18xx_rgu_data, rcdev)
-static void __iomem *rgu_base;
-
-static int lpc18xx_rgu_restart(struct notifier_block *this, unsigned long mode,
+static int lpc18xx_rgu_restart(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
- writel(BIT(LPC18XX_RGU_CORE_RST), rgu_base + LPC18XX_RGU_CTRL0);
+ struct lpc18xx_rgu_data *rc = container_of(nb, struct lpc18xx_rgu_data,
+ restart_nb);
+
+ writel(BIT(LPC18XX_RGU_CORE_RST), rc->base + LPC18XX_RGU_CTRL0);
mdelay(2000);
pr_emerg("%s: unable to restart system\n", __func__);
@@ -57,11 +59,6 @@ static int lpc18xx_rgu_restart(struct notifier_block *this, unsigned long mode,
return NOTIFY_DONE;
}
-static struct notifier_block lpc18xx_rgu_restart_nb = {
- .notifier_call = lpc18xx_rgu_restart,
- .priority = 192,
-};
-
/*
* The LPC18xx RGU has mostly self-deasserting resets except for the
* two reset lines going to the internal Cortex-M0 cores.
@@ -205,8 +202,9 @@ static int lpc18xx_rgu_probe(struct platform_device *pdev)
goto dis_clks;
}
- rgu_base = rc->base;
- ret = register_restart_handler(&lpc18xx_rgu_restart_nb);
+ rc->restart_nb.priority = 192,
+ rc->restart_nb.notifier_call = lpc18xx_rgu_restart,
+ ret = register_restart_handler(&rc->restart_nb);
if (ret)
dev_warn(&pdev->dev, "failed to register restart handler\n");
@@ -225,7 +223,7 @@ static int lpc18xx_rgu_remove(struct platform_device *pdev)
struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
int ret;
- ret = unregister_restart_handler(&lpc18xx_rgu_restart_nb);
+ ret = unregister_restart_handler(&rc->restart_nb);
if (ret)
dev_warn(&pdev->dev, "failed to unregister restart handler\n");
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
new file mode 100644
index 000000000..c60fb2dac
--- /dev/null
+++ b/drivers/reset/reset-oxnas.c
@@ -0,0 +1,136 @@
+/*
+ * drivers/reset/reset-oxnas.c
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2009 Oxford Semiconductor Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+/* Regmap offsets */
+#define RST_SET_REGOFFSET 0x34
+#define RST_CLR_REGOFFSET 0x38
+
+struct oxnas_reset {
+ struct regmap *regmap;
+ struct reset_controller_dev rcdev;
+};
+
+static int oxnas_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct oxnas_reset *data =
+ container_of(rcdev, struct oxnas_reset, rcdev);
+
+ regmap_write(data->regmap, RST_SET_REGOFFSET, BIT(id));
+ msleep(50);
+ regmap_write(data->regmap, RST_CLR_REGOFFSET, BIT(id));
+
+ return 0;
+}
+
+static int oxnas_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct oxnas_reset *data =
+ container_of(rcdev, struct oxnas_reset, rcdev);
+
+ regmap_write(data->regmap, RST_SET_REGOFFSET, BIT(id));
+
+ return 0;
+}
+
+static int oxnas_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct oxnas_reset *data =
+ container_of(rcdev, struct oxnas_reset, rcdev);
+
+ regmap_write(data->regmap, RST_CLR_REGOFFSET, BIT(id));
+
+ return 0;
+}
+
+static const struct reset_control_ops oxnas_reset_ops = {
+ .reset = oxnas_reset_reset,
+ .assert = oxnas_reset_assert,
+ .deassert = oxnas_reset_deassert,
+};
+
+static const struct of_device_id oxnas_reset_dt_ids[] = {
+ { .compatible = "oxsemi,ox810se-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids);
+
+static int oxnas_reset_probe(struct platform_device *pdev)
+{
+ struct oxnas_reset *data;
+ struct device *parent;
+
+ parent = pdev->dev.parent;
+ if (!parent) {
+ dev_err(&pdev->dev, "no parent\n");
+ return -ENODEV;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&pdev->dev, "failed to get parent regmap\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ data->rcdev.owner = THIS_MODULE;
+ data->rcdev.nr_resets = 32;
+ data->rcdev.ops = &oxnas_reset_ops;
+ data->rcdev.of_node = pdev->dev.of_node;
+
+ return reset_controller_register(&data->rcdev);
+}
+
+static int oxnas_reset_remove(struct platform_device *pdev)
+{
+ struct oxnas_reset *data = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(&data->rcdev);
+
+ return 0;
+}
+
+static struct platform_driver oxnas_reset_driver = {
+ .probe = oxnas_reset_probe,
+ .remove = oxnas_reset_remove,
+ .driver = {
+ .name = "oxnas-reset",
+ .of_match_table = oxnas_reset_dt_ids,
+ },
+};
+
+module_platform_driver(oxnas_reset_driver);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 1fcd27c1f..fe03b2aef 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -436,17 +436,19 @@ static struct bus_type rpmsg_bus = {
};
/**
- * register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
+ * __register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
* @rpdrv: pointer to a struct rpmsg_driver
+ * @owner: owning module/driver
*
* Returns 0 on success, and an appropriate error value on failure.
*/
-int register_rpmsg_driver(struct rpmsg_driver *rpdrv)
+int __register_rpmsg_driver(struct rpmsg_driver *rpdrv, struct module *owner)
{
rpdrv->drv.bus = &rpmsg_bus;
+ rpdrv->drv.owner = owner;
return driver_register(&rpdrv->drv);
}
-EXPORT_SYMBOL(register_rpmsg_driver);
+EXPORT_SYMBOL(__register_rpmsg_driver);
/**
* unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 3e84315c6..18639e0cb 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -573,24 +573,6 @@ config RTC_DRV_EM3027
This driver can also be built as a module. If so, the module
will be called rtc-em3027.
-config RTC_DRV_RV3029C2
- tristate "Micro Crystal RV3029"
- help
- If you say yes here you get support for the Micro Crystal
- RV3029 RTC chips.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-rv3029c2.
-
-config RTC_DRV_RV3029_HWMON
- bool "HWMON support for RV3029"
- depends on RTC_DRV_RV3029C2 && HWMON
- depends on !(RTC_DRV_RV3029C2=y && HWMON=m)
- default y
- help
- Say Y here if you want to expose temperature sensor data on
- rtc-rv3029.
-
config RTC_DRV_RV8803
tristate "Micro Crystal RV8803"
help
@@ -634,6 +616,15 @@ config RTC_DRV_M41T94
This driver can also be built as a module. If so, the module
will be called rtc-m41t94.
+config RTC_DRV_DS1302
+ tristate "Dallas/Maxim DS1302"
+ depends on SPI
+ help
+ If you say yes here you get support for the Dallas DS1302 RTC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-ds1302.
+
config RTC_DRV_DS1305
tristate "Dallas/Maxim DS1305/DS1306"
help
@@ -777,6 +768,25 @@ config RTC_DRV_PCF2127
This driver can also be built as a module. If so, the module
will be called rtc-pcf2127.
+config RTC_DRV_RV3029C2
+ tristate "Micro Crystal RV3029/3049"
+ depends on RTC_I2C_AND_SPI
+ help
+ If you say yes here you get support for the Micro Crystal
+ RV3029 and RV3049 RTC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rv3029c2.
+
+config RTC_DRV_RV3029_HWMON
+ bool "HWMON support for RV3029/3049"
+ depends on RTC_DRV_RV3029C2 && HWMON
+ depends on !(RTC_DRV_RV3029C2=y && HWMON=m)
+ default y
+ help
+ Say Y here if you want to expose temperature sensor data on
+ rtc-rv3029.
+
comment "Platform RTC drivers"
# this 'CMOS' RTC driver is arch dependent because <asm-generic/rtc.h>
@@ -834,12 +844,6 @@ config RTC_DRV_DS1286
help
If you say yes here you get support for the Dallas DS1286 RTC chips.
-config RTC_DRV_DS1302
- tristate "Dallas DS1302"
- depends on SH_SECUREEDGE5410
- help
- If you say yes here you get support for the Dallas DS1302 RTC chips.
-
config RTC_DRV_DS1511
tristate "Dallas DS1511"
depends on HAS_IOMEM
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 7206e2fa4..99732e6f8 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -268,7 +268,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct sam9_rtc *rtc = dev_get_drvdata(dev);
- u32 mr = mr = rtt_readl(rtc, MR);
+ u32 mr = rtt_readl(rtc, MR);
seq_printf(seq, "update_IRQ\t: %s\n",
(mr & AT91_RTT_RTTINCIEN) ? "yes" : "no");
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 84fb54103..fbe9c7243 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -401,7 +401,7 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+#if IS_ENABLED(CONFIG_RTC_INTF_PROC)
static int cmos_procfs(struct device *dev, struct seq_file *seq)
{
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 1ba4371cb..a20bcf0e3 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -302,6 +302,13 @@ static int da9052_rtc_probe(struct platform_device *pdev)
if (ret != 0)
rtc_err(rtc, "Failed to disable TICKS: %d\n", ret);
+ device_init_wakeup(&pdev->dev, true);
+ rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
+ &da9052_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc->rtc))
+ return PTR_ERR(rtc->rtc);
+
ret = da9052_request_irq(rtc->da9052, DA9052_IRQ_ALARM, "ALM",
da9052_rtc_irq, rtc);
if (ret != 0) {
@@ -309,11 +316,7 @@ static int da9052_rtc_probe(struct platform_device *pdev)
return ret;
}
- device_init_wakeup(&pdev->dev, true);
-
- rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
- &da9052_rtc_ops, THIS_MODULE);
- return PTR_ERR_OR_ZERO(rtc->rtc);
+ return 0;
}
static struct platform_driver da9052_rtc_driver = {
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
index 12dbd7085..9c82b1da2 100644
--- a/drivers/rtc/rtc-ds1216.c
+++ b/drivers/rtc/rtc-ds1216.c
@@ -11,8 +11,6 @@
#include <linux/bcd.h>
#include <linux/slab.h>
-#define DRV_VERSION "0.2"
-
struct ds1216_regs {
u8 tsec;
u8 sec;
@@ -176,5 +174,4 @@ module_platform_driver_probe(ds1216_rtc_platform_driver, ds1216_rtc_probe);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("DS1216 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-ds1216");
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 8247a29a4..756e509f6 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -20,8 +20,6 @@
#include <linux/io.h>
#include <linux/slab.h>
-#define DRV_VERSION "1.0"
-
struct ds1286_priv {
struct rtc_device *rtc;
u32 __iomem *rtcregs;
@@ -363,5 +361,4 @@ module_platform_driver(ds1286_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("DS1286 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-ds1286");
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 6bef7a523..f5dd09fe5 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -9,16 +9,16 @@
* this archive for more details.
*/
+#include <linux/bcd.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
#include <linux/rtc.h>
-#include <linux/io.h>
-#include <linux/bcd.h>
+#include <linux/spi/spi.h>
#define DRV_NAME "rtc-ds1302"
-#define DRV_VERSION "0.1.1"
#define RTC_CMD_READ 0x81 /* Read command */
#define RTC_CMD_WRITE 0x80 /* Write command */
@@ -28,6 +28,8 @@
#define RTC_ADDR_RAM0 0x20 /* Address of RAM0 */
#define RTC_ADDR_TCR 0x08 /* Address of trickle charge register */
+#define RTC_CLCK_BURST 0x1F /* Address of clock burst */
+#define RTC_CLCK_LEN 0x08 /* Size of clock burst */
#define RTC_ADDR_CTRL 0x07 /* Address of control register */
#define RTC_ADDR_YEAR 0x06 /* Address of year register */
#define RTC_ADDR_DAY 0x05 /* Address of day of week register */
@@ -37,219 +39,181 @@
#define RTC_ADDR_MIN 0x01 /* Address of minute register */
#define RTC_ADDR_SEC 0x00 /* Address of second register */
-#ifdef CONFIG_SH_SECUREEDGE5410
-#include <asm/rtc.h>
-#include <mach/secureedge5410.h>
-
-#define RTC_RESET 0x1000
-#define RTC_IODATA 0x0800
-#define RTC_SCLK 0x0400
-
-#define set_dp(x) SECUREEDGE_WRITE_IOPORT(x, 0x1c00)
-#define get_dp() SECUREEDGE_READ_IOPORT()
-#define ds1302_set_tx()
-#define ds1302_set_rx()
-
-static inline int ds1302_hw_init(void)
+static int ds1302_rtc_set_time(struct device *dev, struct rtc_time *time)
{
- return 0;
+ struct spi_device *spi = dev_get_drvdata(dev);
+ u8 buf[1 + RTC_CLCK_LEN];
+ u8 *bp = buf;
+ int status;
+
+ /* Enable writing */
+ bp = buf;
+ *bp++ = RTC_ADDR_CTRL << 1 | RTC_CMD_WRITE;
+ *bp++ = RTC_CMD_WRITE_ENABLE;
+
+ status = spi_write_then_read(spi, buf, 2,
+ NULL, 0);
+ if (status)
+ return status;
+
+ /* Write registers starting at the first time/date address. */
+ bp = buf;
+ *bp++ = RTC_CLCK_BURST << 1 | RTC_CMD_WRITE;
+
+ *bp++ = bin2bcd(time->tm_sec);
+ *bp++ = bin2bcd(time->tm_min);
+ *bp++ = bin2bcd(time->tm_hour);
+ *bp++ = bin2bcd(time->tm_mday);
+ *bp++ = bin2bcd(time->tm_mon + 1);
+ *bp++ = time->tm_wday + 1;
+ *bp++ = bin2bcd(time->tm_year % 100);
+ *bp++ = RTC_CMD_WRITE_DISABLE;
+
+ /* use write-then-read since dma from stack is nonportable */
+ return spi_write_then_read(spi, buf, sizeof(buf),
+ NULL, 0);
}
-static inline void ds1302_reset(void)
+static int ds1302_rtc_get_time(struct device *dev, struct rtc_time *time)
{
- set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+ struct spi_device *spi = dev_get_drvdata(dev);
+ u8 addr = RTC_CLCK_BURST << 1 | RTC_CMD_READ;
+ u8 buf[RTC_CLCK_LEN - 1];
+ int status;
+
+ /* Use write-then-read to get all the date/time registers
+ * since dma from stack is nonportable
+ */
+ status = spi_write_then_read(spi, &addr, sizeof(addr),
+ buf, sizeof(buf));
+ if (status < 0)
+ return status;
+
+ /* Decode the registers */
+ time->tm_sec = bcd2bin(buf[RTC_ADDR_SEC]);
+ time->tm_min = bcd2bin(buf[RTC_ADDR_MIN]);
+ time->tm_hour = bcd2bin(buf[RTC_ADDR_HOUR]);
+ time->tm_wday = buf[RTC_ADDR_DAY] - 1;
+ time->tm_mday = bcd2bin(buf[RTC_ADDR_DATE]);
+ time->tm_mon = bcd2bin(buf[RTC_ADDR_MON]) - 1;
+ time->tm_year = bcd2bin(buf[RTC_ADDR_YEAR]) + 100;
+
+ /* Time may not be set */
+ return rtc_valid_tm(time);
}
-static inline void ds1302_clock(void)
-{
- set_dp(get_dp() | RTC_SCLK); /* clock high */
- set_dp(get_dp() & ~RTC_SCLK); /* clock low */
-}
-
-static inline void ds1302_start(void)
-{
- set_dp(get_dp() | RTC_RESET);
-}
-
-static inline void ds1302_stop(void)
-{
- set_dp(get_dp() & ~RTC_RESET);
-}
-
-static inline void ds1302_txbit(int bit)
-{
- set_dp((get_dp() & ~RTC_IODATA) | (bit ? RTC_IODATA : 0));
-}
-
-static inline int ds1302_rxbit(void)
-{
- return !!(get_dp() & RTC_IODATA);
-}
-
-#else
-#error "Add support for your platform"
-#endif
+static struct rtc_class_ops ds1302_rtc_ops = {
+ .read_time = ds1302_rtc_get_time,
+ .set_time = ds1302_rtc_set_time,
+};
-static void ds1302_sendbits(unsigned int val)
+static int ds1302_probe(struct spi_device *spi)
{
- int i;
-
- ds1302_set_tx();
-
- for (i = 8; (i); i--, val >>= 1) {
- ds1302_txbit(val & 0x1);
- ds1302_clock();
+ struct rtc_device *rtc;
+ u8 addr;
+ u8 buf[4];
+ u8 *bp = buf;
+ int status;
+
+ /* Sanity check board setup data. This may be hooked up
+ * in 3wire mode, but we don't care. Note that unless
+ * there's an inverter in place, this needs SPI_CS_HIGH!
+ */
+ if (spi->bits_per_word && (spi->bits_per_word != 8)) {
+ dev_err(&spi->dev, "bad word length\n");
+ return -EINVAL;
+ } else if (spi->max_speed_hz > 2000000) {
+ dev_err(&spi->dev, "speed is too high\n");
+ return -EINVAL;
+ } else if (spi->mode & SPI_CPHA) {
+ dev_err(&spi->dev, "bad mode\n");
+ return -EINVAL;
}
-}
-
-static unsigned int ds1302_recvbits(void)
-{
- unsigned int val;
- int i;
-
- ds1302_set_rx();
- for (i = 0, val = 0; (i < 8); i++) {
- val |= (ds1302_rxbit() << i);
- ds1302_clock();
+ addr = RTC_ADDR_CTRL << 1 | RTC_CMD_READ;
+ status = spi_write_then_read(spi, &addr, sizeof(addr), buf, 1);
+ if (status < 0) {
+ dev_err(&spi->dev, "control register read error %d\n",
+ status);
+ return status;
}
- return val;
-}
-
-static unsigned int ds1302_readbyte(unsigned int addr)
-{
- unsigned int val;
-
- ds1302_reset();
-
- ds1302_start();
- ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_READ);
- val = ds1302_recvbits();
- ds1302_stop();
-
- return val;
-}
-
-static void ds1302_writebyte(unsigned int addr, unsigned int val)
-{
- ds1302_reset();
-
- ds1302_start();
- ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_WRITE);
- ds1302_sendbits(val);
- ds1302_stop();
-}
-
-static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- tm->tm_sec = bcd2bin(ds1302_readbyte(RTC_ADDR_SEC));
- tm->tm_min = bcd2bin(ds1302_readbyte(RTC_ADDR_MIN));
- tm->tm_hour = bcd2bin(ds1302_readbyte(RTC_ADDR_HOUR));
- tm->tm_wday = bcd2bin(ds1302_readbyte(RTC_ADDR_DAY));
- tm->tm_mday = bcd2bin(ds1302_readbyte(RTC_ADDR_DATE));
- tm->tm_mon = bcd2bin(ds1302_readbyte(RTC_ADDR_MON)) - 1;
- tm->tm_year = bcd2bin(ds1302_readbyte(RTC_ADDR_YEAR));
-
- if (tm->tm_year < 70)
- tm->tm_year += 100;
-
- dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
- "mday=%d, mon=%d, year=%d, wday=%d\n",
- __func__,
- tm->tm_sec, tm->tm_min, tm->tm_hour,
- tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
-
- return rtc_valid_tm(tm);
-}
-
-static int ds1302_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- ds1302_writebyte(RTC_ADDR_CTRL, RTC_CMD_WRITE_ENABLE);
- /* Stop RTC */
- ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) | 0x80);
-
- ds1302_writebyte(RTC_ADDR_SEC, bin2bcd(tm->tm_sec));
- ds1302_writebyte(RTC_ADDR_MIN, bin2bcd(tm->tm_min));
- ds1302_writebyte(RTC_ADDR_HOUR, bin2bcd(tm->tm_hour));
- ds1302_writebyte(RTC_ADDR_DAY, bin2bcd(tm->tm_wday));
- ds1302_writebyte(RTC_ADDR_DATE, bin2bcd(tm->tm_mday));
- ds1302_writebyte(RTC_ADDR_MON, bin2bcd(tm->tm_mon + 1));
- ds1302_writebyte(RTC_ADDR_YEAR, bin2bcd(tm->tm_year % 100));
+ if ((buf[0] & ~RTC_CMD_WRITE_DISABLE) != 0) {
+ status = spi_write_then_read(spi, &addr, sizeof(addr), buf, 1);
+ if (status < 0) {
+ dev_err(&spi->dev, "control register read error %d\n",
+ status);
+ return status;
+ }
+
+ if ((buf[0] & ~RTC_CMD_WRITE_DISABLE) != 0) {
+ dev_err(&spi->dev, "junk in control register\n");
+ return -ENODEV;
+ }
+ }
+ if (buf[0] == 0) {
+ bp = buf;
+ *bp++ = RTC_ADDR_CTRL << 1 | RTC_CMD_WRITE;
+ *bp++ = RTC_CMD_WRITE_DISABLE;
+
+ status = spi_write_then_read(spi, buf, 2, NULL, 0);
+ if (status < 0) {
+ dev_err(&spi->dev, "control register write error %d\n",
+ status);
+ return status;
+ }
+
+ addr = RTC_ADDR_CTRL << 1 | RTC_CMD_READ;
+ status = spi_write_then_read(spi, &addr, sizeof(addr), buf, 1);
+ if (status < 0) {
+ dev_err(&spi->dev,
+ "error %d reading control register\n",
+ status);
+ return status;
+ }
+
+ if (buf[0] != RTC_CMD_WRITE_DISABLE) {
+ dev_err(&spi->dev, "failed to detect chip\n");
+ return -ENODEV;
+ }
+ }
- /* Start RTC */
- ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) & ~0x80);
+ spi_set_drvdata(spi, spi);
- ds1302_writebyte(RTC_ADDR_CTRL, RTC_CMD_WRITE_DISABLE);
+ rtc = devm_rtc_device_register(&spi->dev, "ds1302",
+ &ds1302_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ status = PTR_ERR(rtc);
+ dev_err(&spi->dev, "error %d registering rtc\n", status);
+ return status;
+ }
return 0;
}
-static int ds1302_rtc_ioctl(struct device *dev, unsigned int cmd,
- unsigned long arg)
+static int ds1302_remove(struct spi_device *spi)
{
- switch (cmd) {
-#ifdef RTC_SET_CHARGE
- case RTC_SET_CHARGE:
- {
- int tcs_val;
-
- if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int)))
- return -EFAULT;
-
- ds1302_writebyte(RTC_ADDR_TCR, (0xa0 | tcs_val * 0xf));
- return 0;
- }
-#endif
- }
-
- return -ENOIOCTLCMD;
+ spi_set_drvdata(spi, NULL);
+ return 0;
}
-static struct rtc_class_ops ds1302_rtc_ops = {
- .read_time = ds1302_rtc_read_time,
- .set_time = ds1302_rtc_set_time,
- .ioctl = ds1302_rtc_ioctl,
+#ifdef CONFIG_OF
+static const struct of_device_id ds1302_dt_ids[] = {
+ { .compatible = "maxim,ds1302", },
+ { /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, ds1302_dt_ids);
+#endif
-static int __init ds1302_rtc_probe(struct platform_device *pdev)
-{
- struct rtc_device *rtc;
-
- if (ds1302_hw_init()) {
- dev_err(&pdev->dev, "Failed to init communication channel");
- return -EINVAL;
- }
-
- /* Reset */
- ds1302_reset();
-
- /* Write a magic value to the DS1302 RAM, and see if it sticks. */
- ds1302_writebyte(RTC_ADDR_RAM0, 0x42);
- if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42) {
- dev_err(&pdev->dev, "Failed to probe");
- return -ENODEV;
- }
-
- rtc = devm_rtc_device_register(&pdev->dev, "ds1302",
- &ds1302_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
-
- platform_set_drvdata(pdev, rtc);
-
- return 0;
-}
-
-static struct platform_driver ds1302_platform_driver = {
- .driver = {
- .name = DRV_NAME,
- },
+static struct spi_driver ds1302_driver = {
+ .driver.name = "rtc-ds1302",
+ .driver.of_match_table = of_match_ptr(ds1302_dt_ids),
+ .probe = ds1302_probe,
+ .remove = ds1302_remove,
};
-module_platform_driver_probe(ds1302_platform_driver, ds1302_rtc_probe);
+module_spi_driver(ds1302_driver);
MODULE_DESCRIPTION("Dallas DS1302 RTC driver");
-MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Paul Mundt, David McCullough");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index ecb7dbae9..821d9c089 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -275,9 +275,13 @@ static s32 ds1307_native_smbus_write_block_data(const struct i2c_client *client,
{
u8 suboffset = 0;
- if (length <= I2C_SMBUS_BLOCK_MAX)
- return i2c_smbus_write_i2c_block_data(client,
+ if (length <= I2C_SMBUS_BLOCK_MAX) {
+ s32 retval = i2c_smbus_write_i2c_block_data(client,
command, length, values);
+ if (retval < 0)
+ return retval;
+ return length;
+ }
while (suboffset < length) {
s32 retval = i2c_smbus_write_i2c_block_data(client,
@@ -538,12 +542,8 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
buf[5] = 0;
buf[6] = 0;
- /* optionally enable ALARM1 */
+ /* disable alarms */
buf[7] = control & ~(DS1337_BIT_A1IE | DS1337_BIT_A2IE);
- if (t->enabled) {
- dev_dbg(dev, "alarm IRQ armed\n");
- buf[7] |= DS1337_BIT_A1IE; /* only ALARM1 is used */
- }
buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I);
ret = ds1307->write_block_data(client,
@@ -553,6 +553,13 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return ret;
}
+ /* optionally enable ALARM1 */
+ if (t->enabled) {
+ dev_dbg(dev, "alarm IRQ armed\n");
+ buf[7] |= DS1337_BIT_A1IE; /* only ALARM1 is used */
+ i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, buf[7]);
+ }
+
return 0;
}
@@ -1144,12 +1151,10 @@ static struct clk_init_data ds3231_clks_init[] = {
[DS3231_CLK_SQW] = {
.name = "ds3231_clk_sqw",
.ops = &ds3231_clk_sqw_ops,
- .flags = CLK_IS_ROOT,
},
[DS3231_CLK_32KHZ] = {
.name = "ds3231_clk_32khz",
.ops = &ds3231_clk_32khz_ops,
- .flags = CLK_IS_ROOT,
},
};
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index 3d389bd8a..23fa9f0cb 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -24,7 +24,6 @@
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
-#define DS1343_DRV_VERSION "01.00"
#define DALLAS_MAXIM_DS1343 0
#define DALLAS_MAXIM_DS1344 1
@@ -747,4 +746,3 @@ MODULE_DESCRIPTION("DS1343 RTC SPI Driver");
MODULE_AUTHOR("Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>,"
"Ankur Srivastava <sankurece@gmail.com>");
MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DS1343_DRV_VERSION);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index da3d04ce8..1b2dcb58c 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -25,8 +25,6 @@
#include <linux/io.h>
#include <linux/module.h>
-#define DRV_VERSION "0.6"
-
enum ds1511reg {
DS1511_SEC = 0x0,
DS1511_MIN = 0x1,
@@ -537,4 +535,3 @@ module_platform_driver(ds1511_rtc_driver);
MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>");
MODULE_DESCRIPTION("Dallas DS1511 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 38422ab4e..9961ec646 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -20,8 +20,6 @@
#include <linux/io.h>
#include <linux/module.h>
-#define DRV_VERSION "0.3"
-
#define RTC_REG_SIZE 0x2000
#define RTC_OFFSET 0x1ff0
@@ -359,4 +357,3 @@ module_platform_driver(ds1553_rtc_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Dallas DS1553 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 92b1cbf2c..5c18ac739 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -13,8 +13,6 @@
#include <linux/rtc.h>
#include <linux/module.h>
-#define DRV_VERSION "0.4"
-
/* Registers */
#define DS1672_REG_CNT_BASE 0
@@ -165,8 +163,6 @@ static int ds1672_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
-
rtc = devm_rtc_device_register(&client->dev, ds1672_driver.driver.name,
&ds1672_rtc_ops, THIS_MODULE);
@@ -213,4 +209,3 @@ module_i2c_driver(ds1672_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("Dallas/Maxim DS1672 timekeeper driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index 1e6cfc84b..b3ce3c652 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -32,8 +32,6 @@
#include <linux/proc_fs.h>
#endif
-#define DRV_VERSION "0.42.0"
-
/* ----------------------------------------------------------------------- */
/* Standard read/write functions if platform does not provide overrides */
@@ -2213,6 +2211,7 @@ ds1685_rtc_poweroff(struct platform_device *pdev)
(ctrl4a | RTC_CTRL_4A_PAB));
/* Spin ... we do not switch back to bank0. */
+ while(1);
unreachable();
}
}
@@ -2224,5 +2223,4 @@ MODULE_AUTHOR("Joshua Kinard <kumba@gentoo.org>");
MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd-electronics.com>");
MODULE_DESCRIPTION("Dallas/Maxim DS1685/DS1687-series RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-ds1685");
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index c5168b3bc..3abf1cbfb 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -24,8 +24,6 @@
#include <linux/io.h>
#include <linux/module.h>
-#define DRV_VERSION "0.4"
-
#define RTC_SIZE 8
#define RTC_CONTROL 0
@@ -239,5 +237,4 @@ module_platform_driver(ds1742_rtc_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Dallas DS1742 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-ds1742");
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 7edc88972..04fbd7fff 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -369,6 +369,11 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
if (ret)
return ret;
+ ds3232->rtc = devm_rtc_device_register(dev, name, &ds3232_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(ds3232->rtc))
+ return PTR_ERR(ds3232->rtc);
+
if (ds3232->irq > 0) {
ret = devm_request_threaded_irq(dev, ds3232->irq, NULL,
ds3232_irq,
@@ -380,10 +385,8 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
} else
device_init_wakeup(dev, 1);
}
- ds3232->rtc = devm_rtc_device_register(dev, name, &ds3232_rtc_ops,
- THIS_MODULE);
- return PTR_ERR_OR_ZERO(ds3232->rtc);
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index a1628adf9..694038208 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -28,8 +28,6 @@
#define EP93XX_RTC_SWCOMP_INT_MASK 0x0000ffff
#define EP93XX_RTC_SWCOMP_INT_SHIFT 0
-#define DRV_VERSION "0.3"
-
/*
* struct device dev.platform_data is used to store our private data
* because struct rtc_device does not have a variable to hold it.
@@ -184,5 +182,4 @@ module_platform_driver(ep93xx_rtc_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("EP93XX RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:ep93xx-rtc");
diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-gemini.c
index f46b6d46a..b57505efa 100644
--- a/drivers/rtc/rtc-gemini.c
+++ b/drivers/rtc/rtc-gemini.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#define DRV_NAME "rtc-gemini"
-#define DRV_VERSION "0.2"
MODULE_AUTHOR("Hans Ulli Kroll <ulli.kroll@googlemail.com>");
MODULE_DESCRIPTION("RTC driver for Gemini SoC");
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index b1b4746a0..207270376 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -413,7 +413,7 @@ static struct clk *hym8563_clkout_register_clk(struct hym8563 *hym8563)
init.name = "hym8563-clkout";
init.ops = &hym8563_clkout_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
hym8563->clkout_hw.init = &init;
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 839d1fd63..38586a024 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -20,8 +20,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
-#define DRV_VERSION "0.1"
-
/* ISL register offsets */
#define ISL12022_REG_SC 0x00
#define ISL12022_REG_MN 0x01
@@ -258,8 +256,6 @@ static int isl12022_probe(struct i2c_client *client,
if (!isl12022)
return -ENOMEM;
- dev_dbg(&client->dev, "chip found, driver version " DRV_VERSION "\n");
-
i2c_set_clientdata(client, isl12022);
isl12022->rtc = devm_rtc_device_register(&client->dev,
@@ -299,4 +295,3 @@ module_i2c_driver(isl12022_driver);
MODULE_AUTHOR("roman.fietze@telemotive.de");
MODULE_DESCRIPTION("ISL 12022 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index b57a304ff..2893785f0 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -15,8 +15,6 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
-#define DRV_VERSION "0.3"
-
/* Register map */
/* rtc section */
#define ISL1208_REG_SC 0x00
@@ -632,9 +630,6 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (isl1208_i2c_validate_client(client) < 0)
return -ENODEV;
- dev_info(&client->dev,
- "chip found, driver version " DRV_VERSION "\n");
-
if (client->irq > 0) {
rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
isl1208_rtc_interrupt,
@@ -706,4 +701,3 @@ module_i2c_driver(isl1208_driver);
MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
MODULE_DESCRIPTION("Intersil ISL1208 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index d107a8e72..d1bf93a87 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -32,41 +32,42 @@
#include <linux/watchdog.h>
#endif
-#define M41T80_REG_SSEC 0
-#define M41T80_REG_SEC 1
-#define M41T80_REG_MIN 2
-#define M41T80_REG_HOUR 3
-#define M41T80_REG_WDAY 4
-#define M41T80_REG_DAY 5
-#define M41T80_REG_MON 6
-#define M41T80_REG_YEAR 7
-#define M41T80_REG_ALARM_MON 0xa
-#define M41T80_REG_ALARM_DAY 0xb
-#define M41T80_REG_ALARM_HOUR 0xc
-#define M41T80_REG_ALARM_MIN 0xd
-#define M41T80_REG_ALARM_SEC 0xe
-#define M41T80_REG_FLAGS 0xf
-#define M41T80_REG_SQW 0x13
+#define M41T80_REG_SSEC 0x00
+#define M41T80_REG_SEC 0x01
+#define M41T80_REG_MIN 0x02
+#define M41T80_REG_HOUR 0x03
+#define M41T80_REG_WDAY 0x04
+#define M41T80_REG_DAY 0x05
+#define M41T80_REG_MON 0x06
+#define M41T80_REG_YEAR 0x07
+#define M41T80_REG_ALARM_MON 0x0a
+#define M41T80_REG_ALARM_DAY 0x0b
+#define M41T80_REG_ALARM_HOUR 0x0c
+#define M41T80_REG_ALARM_MIN 0x0d
+#define M41T80_REG_ALARM_SEC 0x0e
+#define M41T80_REG_FLAGS 0x0f
+#define M41T80_REG_SQW 0x13
#define M41T80_DATETIME_REG_SIZE (M41T80_REG_YEAR + 1)
#define M41T80_ALARM_REG_SIZE \
(M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON)
-#define M41T80_SEC_ST (1 << 7) /* ST: Stop Bit */
-#define M41T80_ALMON_AFE (1 << 7) /* AFE: AF Enable Bit */
-#define M41T80_ALMON_SQWE (1 << 6) /* SQWE: SQW Enable Bit */
-#define M41T80_ALHOUR_HT (1 << 6) /* HT: Halt Update Bit */
-#define M41T80_FLAGS_AF (1 << 6) /* AF: Alarm Flag Bit */
-#define M41T80_FLAGS_BATT_LOW (1 << 4) /* BL: Battery Low Bit */
-#define M41T80_WATCHDOG_RB2 (1 << 7) /* RB: Watchdog resolution */
-#define M41T80_WATCHDOG_RB1 (1 << 1) /* RB: Watchdog resolution */
-#define M41T80_WATCHDOG_RB0 (1 << 0) /* RB: Watchdog resolution */
-
-#define M41T80_FEATURE_HT (1 << 0) /* Halt feature */
-#define M41T80_FEATURE_BL (1 << 1) /* Battery low indicator */
-#define M41T80_FEATURE_SQ (1 << 2) /* Squarewave feature */
-#define M41T80_FEATURE_WD (1 << 3) /* Extra watchdog resolution */
-#define M41T80_FEATURE_SQ_ALT (1 << 4) /* RSx bits are in reg 4 */
+#define M41T80_SEC_ST BIT(7) /* ST: Stop Bit */
+#define M41T80_ALMON_AFE BIT(7) /* AFE: AF Enable Bit */
+#define M41T80_ALMON_SQWE BIT(6) /* SQWE: SQW Enable Bit */
+#define M41T80_ALHOUR_HT BIT(6) /* HT: Halt Update Bit */
+#define M41T80_FLAGS_OF BIT(2) /* OF: Oscillator Failure Bit */
+#define M41T80_FLAGS_AF BIT(6) /* AF: Alarm Flag Bit */
+#define M41T80_FLAGS_BATT_LOW BIT(4) /* BL: Battery Low Bit */
+#define M41T80_WATCHDOG_RB2 BIT(7) /* RB: Watchdog resolution */
+#define M41T80_WATCHDOG_RB1 BIT(1) /* RB: Watchdog resolution */
+#define M41T80_WATCHDOG_RB0 BIT(0) /* RB: Watchdog resolution */
+
+#define M41T80_FEATURE_HT BIT(0) /* Halt feature */
+#define M41T80_FEATURE_BL BIT(1) /* Battery low indicator */
+#define M41T80_FEATURE_SQ BIT(2) /* Squarewave feature */
+#define M41T80_FEATURE_WD BIT(3) /* Extra watchdog resolution */
+#define M41T80_FEATURE_SQ_ALT BIT(4) /* RSx bits are in reg 4 */
static DEFINE_MUTEX(m41t80_rtc_mutex);
static const struct i2c_device_id m41t80_id[] = {
@@ -90,27 +91,65 @@ struct m41t80_data {
struct rtc_device *rtc;
};
+static irqreturn_t m41t80_handle_irq(int irq, void *dev_id)
+{
+ struct i2c_client *client = dev_id;
+ struct m41t80_data *m41t80 = i2c_get_clientdata(client);
+ struct mutex *lock = &m41t80->rtc->ops_lock;
+ unsigned long events = 0;
+ int flags, flags_afe;
+
+ mutex_lock(lock);
+
+ flags_afe = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+ if (flags_afe < 0) {
+ mutex_unlock(lock);
+ return IRQ_NONE;
+ }
+
+ flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (flags <= 0) {
+ mutex_unlock(lock);
+ return IRQ_NONE;
+ }
+
+ if (flags & M41T80_FLAGS_AF) {
+ flags &= ~M41T80_FLAGS_AF;
+ flags_afe &= ~M41T80_ALMON_AFE;
+ events |= RTC_AF;
+ }
+
+ if (events) {
+ rtc_update_irq(m41t80->rtc, 1, events);
+ i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS, flags);
+ i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ flags_afe);
+ }
+
+ mutex_unlock(lock);
+
+ return IRQ_HANDLED;
+}
+
static int m41t80_get_datetime(struct i2c_client *client,
struct rtc_time *tm)
{
- u8 buf[M41T80_DATETIME_REG_SIZE], dt_addr[1] = { M41T80_REG_SEC };
- struct i2c_msg msgs[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = dt_addr,
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
- .buf = buf + M41T80_REG_SEC,
- },
- };
+ unsigned char buf[8];
+ int err, flags;
+
+ flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (flags < 0)
+ return flags;
- if (i2c_transfer(client->adapter, msgs, 2) < 0) {
- dev_err(&client->dev, "read error\n");
+ if (flags & M41T80_FLAGS_OF) {
+ dev_err(&client->dev, "Oscillator failure, data is invalid.\n");
+ return -EINVAL;
+ }
+
+ err = i2c_smbus_read_i2c_block_data(client, M41T80_REG_SSEC,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to read date\n");
return -EIO;
}
@@ -129,70 +168,42 @@ static int m41t80_get_datetime(struct i2c_client *client,
/* Sets the given date and time to the real time clock. */
static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
- u8 wbuf[1 + M41T80_DATETIME_REG_SIZE];
- u8 *buf = &wbuf[1];
- u8 dt_addr[1] = { M41T80_REG_SEC };
- struct i2c_msg msgs_in[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = dt_addr,
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
- .buf = buf + M41T80_REG_SEC,
- },
- };
- struct i2c_msg msgs[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1 + M41T80_DATETIME_REG_SIZE,
- .buf = wbuf,
- },
- };
+ unsigned char buf[8];
+ int err, flags;
- /* Read current reg values into buf[1..7] */
- if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
- dev_err(&client->dev, "read error\n");
- return -EIO;
- }
+ if (tm->tm_year < 100 || tm->tm_year > 199)
+ return -EINVAL;
- wbuf[0] = 0; /* offset into rtc's regs */
- /* Merge time-data and register flags into buf[0..7] */
buf[M41T80_REG_SSEC] = 0;
- buf[M41T80_REG_SEC] =
- bin2bcd(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f);
- buf[M41T80_REG_MIN] =
- bin2bcd(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f);
- buf[M41T80_REG_HOUR] =
- bin2bcd(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f);
- buf[M41T80_REG_WDAY] =
- (tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07);
- buf[M41T80_REG_DAY] =
- bin2bcd(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
- buf[M41T80_REG_MON] =
- bin2bcd(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
-
- /* assume 20YY not 19YY */
- if (tm->tm_year < 100 || tm->tm_year > 199) {
- dev_err(&client->dev, "Year must be between 2000 and 2099. It's %d.\n",
- tm->tm_year + 1900);
- return -EINVAL;
+ buf[M41T80_REG_SEC] = bin2bcd(tm->tm_sec);
+ buf[M41T80_REG_MIN] = bin2bcd(tm->tm_min);
+ buf[M41T80_REG_HOUR] = bin2bcd(tm->tm_hour);
+ buf[M41T80_REG_DAY] = bin2bcd(tm->tm_mday);
+ buf[M41T80_REG_MON] = bin2bcd(tm->tm_mon + 1);
+ buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year - 100);
+ buf[M41T80_REG_WDAY] = tm->tm_wday;
+
+ err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_SSEC,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write to date registers\n");
+ return err;
}
- buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year % 100);
- if (i2c_transfer(client->adapter, msgs, 1) != 1) {
- dev_err(&client->dev, "write error\n");
+ /* Clear the OF bit of Flags Register */
+ flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (flags < 0)
+ return flags;
+
+ if (i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS,
+ flags & ~M41T80_FLAGS_OF)) {
+ dev_err(&client->dev, "Unable to write flags register\n");
return -EIO;
}
- return 0;
+
+ return err;
}
-#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -206,9 +217,6 @@ static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
}
return 0;
}
-#else
-#define m41t80_rtc_proc NULL
-#endif
static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
@@ -220,19 +228,117 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
return m41t80_set_datetime(to_i2c_client(dev), tm);
}
-/*
- * XXX - m41t80 alarm functionality is reported broken.
- * until it is fixed, don't register alarm functions.
- */
+static int m41t80_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int flags, retval;
+
+ flags = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+ if (flags < 0)
+ return flags;
+
+ if (enabled)
+ flags |= M41T80_ALMON_AFE;
+ else
+ flags &= ~M41T80_ALMON_AFE;
+
+ retval = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, flags);
+ if (retval < 0) {
+ dev_info(dev, "Unable to enable alarm IRQ %d\n", retval);
+ return retval;
+ }
+ return 0;
+}
+
+static int m41t80_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 alarmvals[5];
+ int ret, err;
+
+ alarmvals[0] = bin2bcd(alrm->time.tm_mon + 1);
+ alarmvals[1] = bin2bcd(alrm->time.tm_mday);
+ alarmvals[2] = bin2bcd(alrm->time.tm_hour);
+ alarmvals[3] = bin2bcd(alrm->time.tm_min);
+ alarmvals[4] = bin2bcd(alrm->time.tm_sec);
+
+ /* Clear AF and AFE flags */
+ ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
+ if (ret < 0)
+ return ret;
+ err = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ ret & ~(M41T80_ALMON_AFE));
+ if (err < 0) {
+ dev_err(dev, "Unable to clear AFE bit\n");
+ return err;
+ }
+
+ ret = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (ret < 0)
+ return ret;
+
+ err = i2c_smbus_write_byte_data(client, M41T80_REG_FLAGS,
+ ret & ~(M41T80_FLAGS_AF));
+ if (err < 0) {
+ dev_err(dev, "Unable to clear AF bit\n");
+ return err;
+ }
+
+ /* Write the alarm */
+ err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_ALARM_MON,
+ 5, alarmvals);
+ if (err)
+ return err;
+
+ /* Enable the alarm interrupt */
+ if (alrm->enabled) {
+ alarmvals[0] |= M41T80_ALMON_AFE;
+ err = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
+ alarmvals[0]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ u8 alarmvals[5];
+ int flags, ret;
+
+ ret = i2c_smbus_read_i2c_block_data(client, M41T80_REG_ALARM_MON,
+ 5, alarmvals);
+ if (ret != 5)
+ return ret < 0 ? ret : -EIO;
+
+ flags = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
+ if (flags < 0)
+ return flags;
+
+ alrm->time.tm_sec = bcd2bin(alarmvals[4] & 0x7f);
+ alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
+ alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
+ alrm->time.tm_wday = -1;
+ alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
+ alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
+ alrm->time.tm_year = -1;
+
+ alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
+ alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
+
+ return 0;
+}
+
static struct rtc_class_ops m41t80_rtc_ops = {
.read_time = m41t80_rtc_read_time,
.set_time = m41t80_rtc_set_time,
.proc = m41t80_rtc_proc,
};
-#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
-static ssize_t m41t80_sysfs_show_flags(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
int val;
@@ -242,10 +348,10 @@ static ssize_t m41t80_sysfs_show_flags(struct device *dev,
return val;
return sprintf(buf, "%#x\n", val);
}
-static DEVICE_ATTR(flags, S_IRUGO, m41t80_sysfs_show_flags, NULL);
+static DEVICE_ATTR_RO(flags);
-static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t sqwfreq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct m41t80_data *clientdata = i2c_get_clientdata(client);
@@ -272,14 +378,19 @@ static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev,
}
return sprintf(buf, "%d\n", val);
}
-static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+
+static ssize_t sqwfreq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct m41t80_data *clientdata = i2c_get_clientdata(client);
int almon, sqw, reg_sqw, rc;
- int val = simple_strtoul(buf, NULL, 0);
+ unsigned long val;
+
+ rc = kstrtoul(buf, 0, &val);
+ if (rc < 0)
+ return rc;
if (!(clientdata->features & M41T80_FEATURE_SQ))
return -EINVAL;
@@ -308,7 +419,7 @@ static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
sqw = (sqw & 0x0f) | (val << 4);
rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
- almon & ~M41T80_ALMON_SQWE);
+ almon & ~M41T80_ALMON_SQWE);
if (rc < 0)
return rc;
@@ -318,35 +429,24 @@ static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
return rc;
rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
- almon | M41T80_ALMON_SQWE);
- if (rc <0)
+ almon | M41T80_ALMON_SQWE);
+ if (rc < 0)
return rc;
}
return count;
}
-static DEVICE_ATTR(sqwfreq, S_IRUGO | S_IWUSR,
- m41t80_sysfs_show_sqwfreq, m41t80_sysfs_set_sqwfreq);
+static DEVICE_ATTR_RW(sqwfreq);
static struct attribute *attrs[] = {
&dev_attr_flags.attr,
&dev_attr_sqwfreq.attr,
NULL,
};
+
static struct attribute_group attr_group = {
.attrs = attrs,
};
-static int m41t80_sysfs_register(struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &attr_group);
-}
-#else
-static int m41t80_sysfs_register(struct device *dev)
-{
- return 0;
-}
-#endif
-
#ifdef CONFIG_RTC_DRV_M41T80_WDT
/*
*****************************************************************************
@@ -394,7 +494,7 @@ static void wdt_ping(void)
/*
* WDS = 1 (0x80), mulitplier = WD_TIMO, resolution = 1s (0x02)
*/
- i2c_data[1] = wdt_margin<<2 | 0x82;
+ i2c_data[1] = wdt_margin << 2 | 0x82;
/*
* M41T65 has three bits for watchdog resolution. Don't set bit 7, as
@@ -636,49 +736,76 @@ static struct notifier_block wdt_notifier = {
*
*****************************************************************************
*/
+
+static void m41t80_remove_sysfs_group(void *_dev)
+{
+ struct device *dev = _dev;
+
+ sysfs_remove_group(&dev->kobj, &attr_group);
+}
+
static int m41t80_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int rc = 0;
struct rtc_device *rtc = NULL;
struct rtc_time tm;
- struct m41t80_data *clientdata = NULL;
+ struct m41t80_data *m41t80_data = NULL;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C
- | I2C_FUNC_SMBUS_BYTE_DATA))
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&adapter->dev, "doesn't support I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK\n");
return -ENODEV;
+ }
- clientdata = devm_kzalloc(&client->dev, sizeof(*clientdata),
- GFP_KERNEL);
- if (!clientdata)
+ m41t80_data = devm_kzalloc(&client->dev, sizeof(*m41t80_data),
+ GFP_KERNEL);
+ if (!m41t80_data)
return -ENOMEM;
- clientdata->features = id->driver_data;
- i2c_set_clientdata(client, clientdata);
+ m41t80_data->features = id->driver_data;
+ i2c_set_clientdata(client, m41t80_data);
+
+ if (client->irq > 0) {
+ rc = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, m41t80_handle_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "m41t80", client);
+ if (rc) {
+ dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
+ client->irq = 0;
+ } else {
+ m41t80_rtc_ops.read_alarm = m41t80_read_alarm;
+ m41t80_rtc_ops.set_alarm = m41t80_set_alarm;
+ m41t80_rtc_ops.alarm_irq_enable = m41t80_alarm_irq_enable;
+ /* Enable the wakealarm */
+ device_init_wakeup(&client->dev, true);
+ }
+ }
rtc = devm_rtc_device_register(&client->dev, client->name,
- &m41t80_rtc_ops, THIS_MODULE);
+ &m41t80_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- clientdata->rtc = rtc;
+ m41t80_data->rtc = rtc;
/* Make sure HT (Halt Update) bit is cleared */
rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
if (rc >= 0 && rc & M41T80_ALHOUR_HT) {
- if (clientdata->features & M41T80_FEATURE_HT) {
+ if (m41t80_data->features & M41T80_FEATURE_HT) {
m41t80_get_datetime(client, &tm);
dev_info(&client->dev, "HT bit was set!\n");
dev_info(&client->dev,
- "Power Down at "
- "%04i-%02i-%02i %02i:%02i:%02i\n",
+ "Power Down at %04i-%02i-%02i %02i:%02i:%02i\n",
tm.tm_year + 1900,
tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
}
rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_HOUR,
- rc & ~M41T80_ALHOUR_HT);
+ rc & ~M41T80_ALHOUR_HT);
}
if (rc < 0) {
@@ -691,18 +818,30 @@ static int m41t80_probe(struct i2c_client *client,
if (rc >= 0 && rc & M41T80_SEC_ST)
rc = i2c_smbus_write_byte_data(client, M41T80_REG_SEC,
- rc & ~M41T80_SEC_ST);
+ rc & ~M41T80_SEC_ST);
if (rc < 0) {
dev_err(&client->dev, "Can't clear ST bit\n");
return rc;
}
- rc = m41t80_sysfs_register(&client->dev);
- if (rc)
+ /* Export sysfs entries */
+ rc = sysfs_create_group(&(&client->dev)->kobj, &attr_group);
+ if (rc) {
+ dev_err(&client->dev, "Failed to create sysfs group: %d\n", rc);
+ return rc;
+ }
+
+ rc = devm_add_action(&client->dev, m41t80_remove_sysfs_group,
+ &client->dev);
+ if (rc) {
+ m41t80_remove_sysfs_group(&client->dev);
+ dev_err(&client->dev,
+ "Failed to add sysfs cleanup action: %d\n", rc);
return rc;
+ }
#ifdef CONFIG_RTC_DRV_M41T80_WDT
- if (clientdata->features & M41T80_FEATURE_HT) {
+ if (m41t80_data->features & M41T80_FEATURE_HT) {
save_client = client;
rc = misc_register(&wdt_dev);
if (rc)
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index c62b51217..810f4ea48 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -22,8 +22,6 @@
#include <linux/io.h>
#include <linux/err.h>
-#define DRV_VERSION "1.0"
-
struct m48t35_rtc {
u8 pad[0x7ff8]; /* starts at 0x7ff8 */
u8 control;
@@ -190,5 +188,4 @@ module_platform_driver(m48t35_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("M48T35 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-m48t35");
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index a17b7a3ce..f72b91f25 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -39,9 +39,6 @@
#define M48T86_REG_B_SET (1 << 7)
#define M48T86_REG_D_VRT (1 << 7)
-#define DRV_VERSION "0.1"
-
-
static int m48t86_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned char reg;
@@ -178,5 +175,4 @@ module_platform_driver(m48t86_rtc_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("M48T86 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-m48t86");
diff --git a/drivers/rtc/rtc-max6900.c b/drivers/rtc/rtc-max6900.c
index b2a76077b..48b6b411f 100644
--- a/drivers/rtc/rtc-max6900.c
+++ b/drivers/rtc/rtc-max6900.c
@@ -17,8 +17,6 @@
#include <linux/rtc.h>
#include <linux/delay.h>
-#define DRV_VERSION "0.2"
-
/*
* register indices
*/
@@ -218,8 +216,6 @@ max6900_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
- dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
-
rtc = devm_rtc_device_register(&client->dev, max6900_driver.driver.name,
&max6900_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
@@ -249,4 +245,3 @@ module_i2c_driver(max6900_driver);
MODULE_DESCRIPTION("Maxim MAX6900 RTC driver");
MODULE_AUTHOR("Dale Farnsworth <dale@farnsworth.org>");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index a65868065..30b8ef6a3 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -250,18 +250,6 @@ static irqreturn_t mc13xxx_rtc_alarm_handler(int irq, void *dev)
return IRQ_HANDLED;
}
-static irqreturn_t mc13xxx_rtc_update_handler(int irq, void *dev)
-{
- struct mc13xxx_rtc *priv = dev;
- struct mc13xxx *mc13xxx = priv->mc13xxx;
-
- rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
-
- mc13xxx_irq_ack(mc13xxx, irq);
-
- return IRQ_HANDLED;
-}
-
static const struct rtc_class_ops mc13xxx_rtc_ops = {
.read_time = mc13xxx_rtc_read_time,
.set_mmss64 = mc13xxx_rtc_set_mmss,
@@ -307,11 +295,6 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
if (ret)
goto err_irq_request;
- ret = mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_1HZ,
- mc13xxx_rtc_update_handler, DRIVER_NAME, priv);
- if (ret)
- goto err_irq_request;
-
ret = mc13xxx_irq_request_nounmask(mc13xxx, MC13XXX_IRQ_TODA,
mc13xxx_rtc_alarm_handler, DRIVER_NAME, priv);
if (ret)
@@ -326,7 +309,6 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
err_irq_request:
mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv);
- mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_1HZ, priv);
mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv);
mc13xxx_unlock(mc13xxx);
@@ -341,7 +323,6 @@ static int mc13xxx_rtc_remove(struct platform_device *pdev)
mc13xxx_lock(priv->mc13xxx);
mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_TODA, priv);
- mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_1HZ, priv);
mc13xxx_irq_free(priv->mc13xxx, MC13XXX_IRQ_RTCRST, priv);
mc13xxx_unlock(priv->mc13xxx);
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 548ea6f6f..0094d9bdd 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -266,7 +266,7 @@ static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
}
-#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+#if IS_ENABLED(CONFIG_RTC_INTF_PROC)
static int mrst_procfs(struct device *dev, struct seq_file *seq)
{
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 7bd89d900..359876a88 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -240,9 +240,6 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
mxc_rtc_irq_enable(&pdev->dev, RTC_ALM_BIT, 0);
}
- if (status & RTC_1HZ_BIT)
- events |= (RTC_UF | RTC_IRQF);
-
if (status & PIT_ALL_ON)
events |= (RTC_PF | RTC_IRQF);
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index da27738b1..f22e06070 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -46,8 +46,6 @@
#include <linux/module.h>
#include <linux/sysfs.h>
-#define DRV_VERSION "0.6"
-
/* REGISTERS */
#define PCF2123_REG_CTRL1 (0x00) /* Control Register 1 */
#define PCF2123_REG_CTRL2 (0x01) /* Control Register 2 */
@@ -395,7 +393,6 @@ static int pcf2123_probe(struct spi_device *spi)
}
}
- dev_info(&spi->dev, "chip found, driver version " DRV_VERSION "\n");
dev_info(&spi->dev, "spiclk %u KHz.\n",
(spi->max_speed_hz + 500) / 1000);
@@ -474,4 +471,3 @@ module_spi_driver(pcf2123_driver);
MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>");
MODULE_DESCRIPTION("NXP PCF2123 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index c8f95b8e4..b9ddbb001 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -23,8 +23,6 @@
#include <linux/of.h>
#include <linux/err.h>
-#define DRV_VERSION "0.4.4"
-
#define PCF8563_REG_ST1 0x00 /* status */
#define PCF8563_REG_ST2 0x01
#define PCF8563_BIT_AIE (1 << 1)
@@ -535,7 +533,7 @@ static struct clk *pcf8563_clkout_register_clk(struct pcf8563 *pcf8563)
init.name = "pcf8563-clkout";
init.ops = &pcf8563_clkout_ops;
- init.flags = CLK_IS_ROOT;
+ init.flags = 0;
init.parent_names = NULL;
init.num_parents = 0;
pcf8563->clkout_hw.init = &init;
@@ -580,8 +578,6 @@ static int pcf8563_probe(struct i2c_client *client,
if (!pcf8563)
return -ENOMEM;
- dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
-
i2c_set_clientdata(client, pcf8563);
pcf8563->client = client;
device_set_wakeup_capable(&client->dev, 1);
@@ -662,4 +658,3 @@ module_i2c_driver(pcf8563_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("Philips PCF8563/Epson RTC8564 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-rs5c313.c b/drivers/rtc/rtc-rs5c313.c
index 5f48167c8..89f38e3e9 100644
--- a/drivers/rtc/rtc-rs5c313.c
+++ b/drivers/rtc/rtc-rs5c313.c
@@ -50,7 +50,6 @@
#include <linux/io.h>
#define DRV_NAME "rs5c313"
-#define DRV_VERSION "1.13"
#ifdef CONFIG_SH_LANDISK
/*****************************************************/
@@ -407,7 +406,6 @@ static void __exit rs5c313_rtc_exit(void)
module_init(rs5c313_rtc_init);
module_exit(rs5c313_rtc_exit);
-MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("kogiidena , Nobuhiro Iwamatsu <iwamatsu@nigauri.org>");
MODULE_DESCRIPTION("Ricoh RS5C313 RTC device driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 1162fecab..9a306983a 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -25,8 +25,6 @@
#include <linux/spi/spi.h>
#include <linux/module.h>
-#define DRV_VERSION "0.2"
-
#define RS5C348_REG_SECS 0
#define RS5C348_REG_MINS 1
#define RS5C348_REG_HOURS 2
@@ -171,7 +169,6 @@ static int rs5c348_probe(struct spi_device *spi)
goto kfree_exit;
}
- dev_info(&spi->dev, "chip found, driver version " DRV_VERSION "\n");
dev_info(&spi->dev, "spiclk %u KHz.\n",
(spi->max_speed_hz + 500) / 1000);
@@ -230,5 +227,4 @@ module_spi_driver(rs5c348_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Ricoh RS5C348 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("spi:rtc-rs5c348");
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 28871cd7e..ef8622942 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -16,9 +16,6 @@
#include <linux/slab.h>
#include <linux/module.h>
-#define DRV_VERSION "0.6"
-
-
/*
* Ricoh has a family of I2C based RTCs, which differ only slightly from
* each other. Differences center on pinout (e.g. how many interrupts,
@@ -240,11 +237,11 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return 0;
}
-#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+#if IS_ENABLED(CONFIG_RTC_INTF_PROC)
#define NEED_TRIM
#endif
-#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+#if IS_ENABLED(CONFIG_RTC_INTF_SYSFS)
#define NEED_TRIM
#endif
@@ -412,7 +409,7 @@ static int rs5c_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
-#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
+#if IS_ENABLED(CONFIG_RTC_INTF_PROC)
static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
{
@@ -441,7 +438,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = {
.alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
};
-#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
+#if IS_ENABLED(CONFIG_RTC_INTF_SYSFS)
static ssize_t rs5c372_sysfs_show_trim(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -509,9 +506,9 @@ static int rs5c_oscillator_setup(struct rs5c372 *rs5c372)
int addr, i, ret = 0;
if (rs5c372->type == rtc_r2025sd) {
- if (!(rs5c372->regs[RS5C_REG_CTRL2] & R2025_CTRL2_XST))
+ if (rs5c372->regs[RS5C_REG_CTRL2] & R2025_CTRL2_XST)
return ret;
- rs5c372->regs[RS5C_REG_CTRL2] &= ~R2025_CTRL2_XST;
+ rs5c372->regs[RS5C_REG_CTRL2] |= R2025_CTRL2_XST;
} else {
if (!(rs5c372->regs[RS5C_REG_CTRL2] & RS5C_CTRL2_XSTP))
return ret;
@@ -640,7 +637,7 @@ static int rs5c372_probe(struct i2c_client *client,
if (rs5c372_get_datetime(client, &tm) < 0)
dev_warn(&client->dev, "clock needs to be set\n");
- dev_info(&client->dev, "%s found, %s, driver version " DRV_VERSION "\n",
+ dev_info(&client->dev, "%s found, %s\n",
({ char *s; switch (rs5c372->type) {
case rtc_r2025sd: s = "r2025sd"; break;
case rtc_r2221tl: s = "r2221tl"; break;
@@ -696,4 +693,3 @@ MODULE_AUTHOR(
"Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("Ricoh RS5C372 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index d0cbf0804..1f9f7b4bf 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -1,5 +1,5 @@
/*
- * Micro Crystal RV-3029 rtc class driver
+ * Micro Crystal RV-3029 / RV-3049 rtc class driver
*
* Author: Gregory Hermant <gregory.hermant@calao-systems.com>
* Michael Buesch <m@bues.ch>
@@ -14,13 +14,14 @@
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/spi/spi.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-
+#include <linux/regmap.h>
/* Register map */
/* control section */
@@ -75,6 +76,7 @@
#define RV3029_A_DW 0x14
#define RV3029_A_MO 0x15
#define RV3029_A_YR 0x16
+#define RV3029_A_AE_X BIT(7)
#define RV3029_ALARM_SECTION_LEN 0x07
/* timer section */
@@ -116,85 +118,84 @@
#define RV3029_USR2_RAM_PAGE 0x3C
#define RV3029_USR2_SECTION_LEN 0x04
-static int
-rv3029_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
- unsigned len)
+struct rv3029_data {
+ struct device *dev;
+ struct rtc_device *rtc;
+ struct regmap *regmap;
+ int irq;
+};
+
+static int rv3029_read_regs(struct device *dev, u8 reg, u8 *buf,
+ unsigned int len)
{
- int ret;
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
- (reg + len > RV3029_USR1_RAM_PAGE + 8))
+ (reg + len > RV3029_USR1_RAM_PAGE + 8))
return -EINVAL;
- ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf);
- if (ret < 0)
- return ret;
- if (ret < len)
- return -EIO;
- return 0;
+ return regmap_bulk_read(rv3029->regmap, reg, buf, len);
}
-static int
-rv3029_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
- unsigned len)
+static int rv3029_write_regs(struct device *dev, u8 reg, u8 const buf[],
+ unsigned int len)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+
if ((reg > RV3029_USR1_RAM_PAGE + 7) ||
- (reg + len > RV3029_USR1_RAM_PAGE + 8))
+ (reg + len > RV3029_USR1_RAM_PAGE + 8))
return -EINVAL;
- return i2c_smbus_write_i2c_block_data(client, reg, len, buf);
+ return regmap_bulk_write(rv3029->regmap, reg, buf, len);
}
-static int
-rv3029_i2c_update_bits(struct i2c_client *client, u8 reg, u8 mask, u8 set)
+static int rv3029_update_bits(struct device *dev, u8 reg, u8 mask, u8 set)
{
u8 buf;
int ret;
- ret = rv3029_i2c_read_regs(client, reg, &buf, 1);
+ ret = rv3029_read_regs(dev, reg, &buf, 1);
if (ret < 0)
return ret;
buf &= ~mask;
buf |= set & mask;
- ret = rv3029_i2c_write_regs(client, reg, &buf, 1);
+ ret = rv3029_write_regs(dev, reg, &buf, 1);
if (ret < 0)
return ret;
return 0;
}
-static int
-rv3029_i2c_get_sr(struct i2c_client *client, u8 *buf)
+static int rv3029_get_sr(struct device *dev, u8 *buf)
{
- int ret = rv3029_i2c_read_regs(client, RV3029_STATUS, buf, 1);
+ int ret = rv3029_read_regs(dev, RV3029_STATUS, buf, 1);
if (ret < 0)
return -EIO;
- dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
+ dev_dbg(dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
return 0;
}
-static int
-rv3029_i2c_set_sr(struct i2c_client *client, u8 val)
+static int rv3029_set_sr(struct device *dev, u8 val)
{
u8 buf[1];
int sr;
buf[0] = val;
- sr = rv3029_i2c_write_regs(client, RV3029_STATUS, buf, 1);
- dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
+ sr = rv3029_write_regs(dev, RV3029_STATUS, buf, 1);
+ dev_dbg(dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
if (sr < 0)
return -EIO;
return 0;
}
-static int rv3029_eeprom_busywait(struct i2c_client *client)
+static int rv3029_eeprom_busywait(struct device *dev)
{
int i, ret;
u8 sr;
for (i = 100; i > 0; i--) {
- ret = rv3029_i2c_get_sr(client, &sr);
+ ret = rv3029_get_sr(dev, &sr);
if (ret < 0)
break;
if (!(sr & RV3029_STATUS_EEBUSY))
@@ -202,28 +203,28 @@ static int rv3029_eeprom_busywait(struct i2c_client *client)
usleep_range(1000, 10000);
}
if (i <= 0) {
- dev_err(&client->dev, "EEPROM busy wait timeout.\n");
+ dev_err(dev, "EEPROM busy wait timeout.\n");
return -ETIMEDOUT;
}
return ret;
}
-static int rv3029_eeprom_exit(struct i2c_client *client)
+static int rv3029_eeprom_exit(struct device *dev)
{
/* Re-enable eeprom refresh */
- return rv3029_i2c_update_bits(client, RV3029_ONOFF_CTRL,
- RV3029_ONOFF_CTRL_EERE,
- RV3029_ONOFF_CTRL_EERE);
+ return rv3029_update_bits(dev, RV3029_ONOFF_CTRL,
+ RV3029_ONOFF_CTRL_EERE,
+ RV3029_ONOFF_CTRL_EERE);
}
-static int rv3029_eeprom_enter(struct i2c_client *client)
+static int rv3029_eeprom_enter(struct device *dev)
{
int ret;
u8 sr;
/* Check whether we are in the allowed voltage range. */
- ret = rv3029_i2c_get_sr(client, &sr);
+ ret = rv3029_get_sr(dev, &sr);
if (ret < 0)
return ret;
if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
@@ -232,129 +233,168 @@ static int rv3029_eeprom_enter(struct i2c_client *client)
*/
sr &= ~RV3029_STATUS_VLOW1;
sr &= ~RV3029_STATUS_VLOW2;
- ret = rv3029_i2c_set_sr(client, sr);
+ ret = rv3029_set_sr(dev, sr);
if (ret < 0)
return ret;
usleep_range(1000, 10000);
- ret = rv3029_i2c_get_sr(client, &sr);
+ ret = rv3029_get_sr(dev, &sr);
if (ret < 0)
return ret;
if (sr & (RV3029_STATUS_VLOW1 | RV3029_STATUS_VLOW2)) {
- dev_err(&client->dev,
+ dev_err(dev,
"Supply voltage is too low to safely access the EEPROM.\n");
return -ENODEV;
}
}
/* Disable eeprom refresh. */
- ret = rv3029_i2c_update_bits(client, RV3029_ONOFF_CTRL,
- RV3029_ONOFF_CTRL_EERE, 0);
+ ret = rv3029_update_bits(dev, RV3029_ONOFF_CTRL, RV3029_ONOFF_CTRL_EERE,
+ 0);
if (ret < 0)
return ret;
/* Wait for any previous eeprom accesses to finish. */
- ret = rv3029_eeprom_busywait(client);
+ ret = rv3029_eeprom_busywait(dev);
if (ret < 0)
- rv3029_eeprom_exit(client);
+ rv3029_eeprom_exit(dev);
return ret;
}
-static int rv3029_eeprom_read(struct i2c_client *client, u8 reg,
+static int rv3029_eeprom_read(struct device *dev, u8 reg,
u8 buf[], size_t len)
{
int ret, err;
- err = rv3029_eeprom_enter(client);
+ err = rv3029_eeprom_enter(dev);
if (err < 0)
return err;
- ret = rv3029_i2c_read_regs(client, reg, buf, len);
+ ret = rv3029_read_regs(dev, reg, buf, len);
- err = rv3029_eeprom_exit(client);
+ err = rv3029_eeprom_exit(dev);
if (err < 0)
return err;
return ret;
}
-static int rv3029_eeprom_write(struct i2c_client *client, u8 reg,
+static int rv3029_eeprom_write(struct device *dev, u8 reg,
u8 const buf[], size_t len)
{
int ret, err;
size_t i;
u8 tmp;
- err = rv3029_eeprom_enter(client);
+ err = rv3029_eeprom_enter(dev);
if (err < 0)
return err;
for (i = 0; i < len; i++, reg++) {
- ret = rv3029_i2c_read_regs(client, reg, &tmp, 1);
+ ret = rv3029_read_regs(dev, reg, &tmp, 1);
if (ret < 0)
break;
if (tmp != buf[i]) {
- ret = rv3029_i2c_write_regs(client, reg, &buf[i], 1);
+ ret = rv3029_write_regs(dev, reg, &buf[i], 1);
if (ret < 0)
break;
}
- ret = rv3029_eeprom_busywait(client);
+ ret = rv3029_eeprom_busywait(dev);
if (ret < 0)
break;
}
- err = rv3029_eeprom_exit(client);
+ err = rv3029_eeprom_exit(dev);
if (err < 0)
return err;
return ret;
}
-static int rv3029_eeprom_update_bits(struct i2c_client *client,
+static int rv3029_eeprom_update_bits(struct device *dev,
u8 reg, u8 mask, u8 set)
{
u8 buf;
int ret;
- ret = rv3029_eeprom_read(client, reg, &buf, 1);
+ ret = rv3029_eeprom_read(dev, reg, &buf, 1);
if (ret < 0)
return ret;
buf &= ~mask;
buf |= set & mask;
- ret = rv3029_eeprom_write(client, reg, &buf, 1);
+ ret = rv3029_eeprom_write(dev, reg, &buf, 1);
if (ret < 0)
return ret;
return 0;
}
-static int
-rv3029_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
+static irqreturn_t rv3029_handle_irq(int irq, void *dev_id)
+{
+ struct device *dev = dev_id;
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
+ struct mutex *lock = &rv3029->rtc->ops_lock;
+ unsigned long events = 0;
+ u8 flags, controls;
+ int ret;
+
+ mutex_lock(lock);
+
+ ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+ if (ret) {
+ dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
+ mutex_unlock(lock);
+ return IRQ_NONE;
+ }
+
+ ret = rv3029_read_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
+ if (ret) {
+ dev_warn(dev, "Read IRQ Flags Register error %d\n", ret);
+ mutex_unlock(lock);
+ return IRQ_NONE;
+ }
+
+ if (flags & RV3029_IRQ_FLAGS_AF) {
+ flags &= ~RV3029_IRQ_FLAGS_AF;
+ controls &= ~RV3029_IRQ_CTRL_AIE;
+ events |= RTC_AF;
+ }
+
+ if (events) {
+ rtc_update_irq(rv3029->rtc, 1, events);
+ rv3029_write_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
+ rv3029_write_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+ }
+ mutex_unlock(lock);
+
+ return IRQ_HANDLED;
+}
+
+static int rv3029_read_time(struct device *dev, struct rtc_time *tm)
{
u8 buf[1];
int ret;
u8 regs[RV3029_WATCH_SECTION_LEN] = { 0, };
- ret = rv3029_i2c_get_sr(client, buf);
+ ret = rv3029_get_sr(dev, buf);
if (ret < 0) {
- dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+ dev_err(dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
- ret = rv3029_i2c_read_regs(client, RV3029_W_SEC, regs,
- RV3029_WATCH_SECTION_LEN);
+ ret = rv3029_read_regs(dev, RV3029_W_SEC, regs,
+ RV3029_WATCH_SECTION_LEN);
if (ret < 0) {
- dev_err(&client->dev, "%s: reading RTC section failed\n",
- __func__);
+ dev_err(dev, "%s: reading RTC section failed\n", __func__);
return ret;
}
- tm->tm_sec = bcd2bin(regs[RV3029_W_SEC-RV3029_W_SEC]);
- tm->tm_min = bcd2bin(regs[RV3029_W_MINUTES-RV3029_W_SEC]);
+ tm->tm_sec = bcd2bin(regs[RV3029_W_SEC - RV3029_W_SEC]);
+ tm->tm_min = bcd2bin(regs[RV3029_W_MINUTES - RV3029_W_SEC]);
/* HR field has a more complex interpretation */
{
- const u8 _hr = regs[RV3029_W_HOURS-RV3029_W_SEC];
+ const u8 _hr = regs[RV3029_W_HOURS - RV3029_W_SEC];
if (_hr & RV3029_REG_HR_12_24) {
/* 12h format */
@@ -365,77 +405,86 @@ rv3029_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
tm->tm_hour = bcd2bin(_hr & 0x3f);
}
- tm->tm_mday = bcd2bin(regs[RV3029_W_DATE-RV3029_W_SEC]);
- tm->tm_mon = bcd2bin(regs[RV3029_W_MONTHS-RV3029_W_SEC]) - 1;
- tm->tm_year = bcd2bin(regs[RV3029_W_YEARS-RV3029_W_SEC]) + 100;
- tm->tm_wday = bcd2bin(regs[RV3029_W_DAYS-RV3029_W_SEC]) - 1;
+ tm->tm_mday = bcd2bin(regs[RV3029_W_DATE - RV3029_W_SEC]);
+ tm->tm_mon = bcd2bin(regs[RV3029_W_MONTHS - RV3029_W_SEC]) - 1;
+ tm->tm_year = bcd2bin(regs[RV3029_W_YEARS - RV3029_W_SEC]) + 100;
+ tm->tm_wday = bcd2bin(regs[RV3029_W_DAYS - RV3029_W_SEC]) - 1;
return 0;
}
-static int rv3029_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
- return rv3029_i2c_read_time(to_i2c_client(dev), tm);
-}
-
-static int
-rv3029_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
+static int rv3029_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct rtc_time *const tm = &alarm->time;
int ret;
- u8 regs[8];
+ u8 regs[8], controls, flags;
- ret = rv3029_i2c_get_sr(client, regs);
+ ret = rv3029_get_sr(dev, regs);
if (ret < 0) {
- dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+ dev_err(dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
- ret = rv3029_i2c_read_regs(client, RV3029_A_SC, regs,
- RV3029_ALARM_SECTION_LEN);
+ ret = rv3029_read_regs(dev, RV3029_A_SC, regs,
+ RV3029_ALARM_SECTION_LEN);
if (ret < 0) {
- dev_err(&client->dev, "%s: reading alarm section failed\n",
- __func__);
+ dev_err(dev, "%s: reading alarm section failed\n", __func__);
return ret;
}
- tm->tm_sec = bcd2bin(regs[RV3029_A_SC-RV3029_A_SC] & 0x7f);
- tm->tm_min = bcd2bin(regs[RV3029_A_MN-RV3029_A_SC] & 0x7f);
- tm->tm_hour = bcd2bin(regs[RV3029_A_HR-RV3029_A_SC] & 0x3f);
- tm->tm_mday = bcd2bin(regs[RV3029_A_DT-RV3029_A_SC] & 0x3f);
- tm->tm_mon = bcd2bin(regs[RV3029_A_MO-RV3029_A_SC] & 0x1f) - 1;
- tm->tm_year = bcd2bin(regs[RV3029_A_YR-RV3029_A_SC] & 0x7f) + 100;
- tm->tm_wday = bcd2bin(regs[RV3029_A_DW-RV3029_A_SC] & 0x07) - 1;
+ ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+ if (ret) {
+ dev_err(dev, "Read IRQ Control Register error %d\n", ret);
+ return ret;
+ }
+ ret = rv3029_read_regs(dev, RV3029_IRQ_FLAGS, &flags, 1);
+ if (ret < 0) {
+ dev_err(dev, "Read IRQ Flags Register error %d\n", ret);
+ return ret;
+ }
- return 0;
-}
+ tm->tm_sec = bcd2bin(regs[RV3029_A_SC - RV3029_A_SC] & 0x7f);
+ tm->tm_min = bcd2bin(regs[RV3029_A_MN - RV3029_A_SC] & 0x7f);
+ tm->tm_hour = bcd2bin(regs[RV3029_A_HR - RV3029_A_SC] & 0x3f);
+ tm->tm_mday = bcd2bin(regs[RV3029_A_DT - RV3029_A_SC] & 0x3f);
+ tm->tm_mon = bcd2bin(regs[RV3029_A_MO - RV3029_A_SC] & 0x1f) - 1;
+ tm->tm_year = bcd2bin(regs[RV3029_A_YR - RV3029_A_SC] & 0x7f) + 100;
+ tm->tm_wday = bcd2bin(regs[RV3029_A_DW - RV3029_A_SC] & 0x07) - 1;
-static int
-rv3029_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- return rv3029_i2c_read_alarm(to_i2c_client(dev), alarm);
+ alarm->enabled = !!(controls & RV3029_IRQ_CTRL_AIE);
+ alarm->pending = (flags & RV3029_IRQ_FLAGS_AF) && alarm->enabled;
+
+ return 0;
}
-static int rv3029_rtc_i2c_alarm_set_irq(struct i2c_client *client,
- int enable)
+static int rv3029_alarm_irq_enable(struct device *dev, unsigned int enable)
{
int ret;
+ u8 controls;
+
+ ret = rv3029_read_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
+ if (ret < 0) {
+ dev_warn(dev, "Read IRQ Control Register error %d\n", ret);
+ return ret;
+ }
/* enable/disable AIE irq */
- ret = rv3029_i2c_update_bits(client, RV3029_IRQ_CTRL,
- RV3029_IRQ_CTRL_AIE,
- (enable ? RV3029_IRQ_CTRL_AIE : 0));
+ if (enable)
+ controls |= RV3029_IRQ_CTRL_AIE;
+ else
+ controls &= ~RV3029_IRQ_CTRL_AIE;
+
+ ret = rv3029_write_regs(dev, RV3029_IRQ_CTRL, &controls, 1);
if (ret < 0) {
- dev_err(&client->dev, "can't update INT reg\n");
+ dev_err(dev, "can't update INT reg\n");
return ret;
}
return 0;
}
-static int rv3029_rtc_i2c_set_alarm(struct i2c_client *client,
- struct rtc_wkalrm *alarm)
+static int rv3029_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct rtc_time *const tm = &alarm->time;
int ret;
@@ -449,57 +498,48 @@ static int rv3029_rtc_i2c_set_alarm(struct i2c_client *client,
if (tm->tm_year < 100)
return -EINVAL;
- ret = rv3029_i2c_get_sr(client, regs);
+ ret = rv3029_get_sr(dev, regs);
if (ret < 0) {
- dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+ dev_err(dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
- regs[RV3029_A_SC-RV3029_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
- regs[RV3029_A_MN-RV3029_A_SC] = bin2bcd(tm->tm_min & 0x7f);
- regs[RV3029_A_HR-RV3029_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
- regs[RV3029_A_DT-RV3029_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
- regs[RV3029_A_MO-RV3029_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
- regs[RV3029_A_DW-RV3029_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
- regs[RV3029_A_YR-RV3029_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
- ret = rv3029_i2c_write_regs(client, RV3029_A_SC, regs,
- RV3029_ALARM_SECTION_LEN);
+ /* Activate all the alarms with AE_x bit */
+ regs[RV3029_A_SC - RV3029_A_SC] = bin2bcd(tm->tm_sec) | RV3029_A_AE_X;
+ regs[RV3029_A_MN - RV3029_A_SC] = bin2bcd(tm->tm_min) | RV3029_A_AE_X;
+ regs[RV3029_A_HR - RV3029_A_SC] = (bin2bcd(tm->tm_hour) & 0x3f)
+ | RV3029_A_AE_X;
+ regs[RV3029_A_DT - RV3029_A_SC] = (bin2bcd(tm->tm_mday) & 0x3f)
+ | RV3029_A_AE_X;
+ regs[RV3029_A_MO - RV3029_A_SC] = (bin2bcd(tm->tm_mon + 1) & 0x1f)
+ | RV3029_A_AE_X;
+ regs[RV3029_A_DW - RV3029_A_SC] = (bin2bcd(tm->tm_wday + 1) & 0x7)
+ | RV3029_A_AE_X;
+ regs[RV3029_A_YR - RV3029_A_SC] = (bin2bcd(tm->tm_year - 100))
+ | RV3029_A_AE_X;
+
+ /* Write the alarm */
+ ret = rv3029_write_regs(dev, RV3029_A_SC, regs,
+ RV3029_ALARM_SECTION_LEN);
if (ret < 0)
return ret;
if (alarm->enabled) {
- /* clear AF flag */
- ret = rv3029_i2c_update_bits(client, RV3029_IRQ_FLAGS,
- RV3029_IRQ_FLAGS_AF, 0);
- if (ret < 0) {
- dev_err(&client->dev, "can't clear alarm flag\n");
- return ret;
- }
/* enable AIE irq */
- ret = rv3029_rtc_i2c_alarm_set_irq(client, 1);
+ ret = rv3029_alarm_irq_enable(dev, 1);
if (ret)
return ret;
-
- dev_dbg(&client->dev, "alarm IRQ armed\n");
} else {
/* disable AIE irq */
- ret = rv3029_rtc_i2c_alarm_set_irq(client, 0);
+ ret = rv3029_alarm_irq_enable(dev, 0);
if (ret)
return ret;
-
- dev_dbg(&client->dev, "alarm IRQ disabled\n");
}
return 0;
}
-static int rv3029_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
-{
- return rv3029_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
-}
-
-static int
-rv3029_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
+static int rv3029_set_time(struct device *dev, struct rtc_time *tm)
{
u8 regs[8];
int ret;
@@ -512,39 +552,34 @@ rv3029_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
if (tm->tm_year < 100)
return -EINVAL;
- regs[RV3029_W_SEC-RV3029_W_SEC] = bin2bcd(tm->tm_sec);
- regs[RV3029_W_MINUTES-RV3029_W_SEC] = bin2bcd(tm->tm_min);
- regs[RV3029_W_HOURS-RV3029_W_SEC] = bin2bcd(tm->tm_hour);
- regs[RV3029_W_DATE-RV3029_W_SEC] = bin2bcd(tm->tm_mday);
- regs[RV3029_W_MONTHS-RV3029_W_SEC] = bin2bcd(tm->tm_mon+1);
- regs[RV3029_W_DAYS-RV3029_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
- regs[RV3029_W_YEARS-RV3029_W_SEC] = bin2bcd(tm->tm_year - 100);
+ regs[RV3029_W_SEC - RV3029_W_SEC] = bin2bcd(tm->tm_sec);
+ regs[RV3029_W_MINUTES - RV3029_W_SEC] = bin2bcd(tm->tm_min);
+ regs[RV3029_W_HOURS - RV3029_W_SEC] = bin2bcd(tm->tm_hour);
+ regs[RV3029_W_DATE - RV3029_W_SEC] = bin2bcd(tm->tm_mday);
+ regs[RV3029_W_MONTHS - RV3029_W_SEC] = bin2bcd(tm->tm_mon + 1);
+ regs[RV3029_W_DAYS - RV3029_W_SEC] = bin2bcd(tm->tm_wday + 1) & 0x7;
+ regs[RV3029_W_YEARS - RV3029_W_SEC] = bin2bcd(tm->tm_year - 100);
- ret = rv3029_i2c_write_regs(client, RV3029_W_SEC, regs,
- RV3029_WATCH_SECTION_LEN);
+ ret = rv3029_write_regs(dev, RV3029_W_SEC, regs,
+ RV3029_WATCH_SECTION_LEN);
if (ret < 0)
return ret;
- ret = rv3029_i2c_get_sr(client, regs);
+ ret = rv3029_get_sr(dev, regs);
if (ret < 0) {
- dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+ dev_err(dev, "%s: reading SR failed\n", __func__);
return ret;
}
/* clear PON bit */
- ret = rv3029_i2c_set_sr(client, (regs[0] & ~RV3029_STATUS_PON));
+ ret = rv3029_set_sr(dev, (regs[0] & ~RV3029_STATUS_PON));
if (ret < 0) {
- dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+ dev_err(dev, "%s: reading SR failed\n", __func__);
return ret;
}
return 0;
}
-static int rv3029_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
- return rv3029_i2c_set_time(to_i2c_client(dev), tm);
-}
-
static const struct rv3029_trickle_tab_elem {
u32 r; /* resistance in ohms */
u8 conf; /* trickle config bits */
@@ -602,9 +637,9 @@ static const struct rv3029_trickle_tab_elem {
},
};
-static void rv3029_trickle_config(struct i2c_client *client)
+static void rv3029_trickle_config(struct device *dev)
{
- struct device_node *of_node = client->dev.of_node;
+ struct device_node *of_node = dev->of_node;
const struct rv3029_trickle_tab_elem *elem;
int i, err;
u32 ohms;
@@ -626,27 +661,25 @@ static void rv3029_trickle_config(struct i2c_client *client)
break;
}
trickle_set_bits = elem->conf;
- dev_info(&client->dev,
+ dev_info(dev,
"Trickle charger enabled at %d ohms resistance.\n",
elem->r);
}
- err = rv3029_eeprom_update_bits(client, RV3029_CONTROL_E2P_EECTRL,
+ err = rv3029_eeprom_update_bits(dev, RV3029_CONTROL_E2P_EECTRL,
RV3029_TRICKLE_MASK,
trickle_set_bits);
- if (err < 0) {
- dev_err(&client->dev,
- "Failed to update trickle charger config\n");
- }
+ if (err < 0)
+ dev_err(dev, "Failed to update trickle charger config\n");
}
#ifdef CONFIG_RTC_DRV_RV3029_HWMON
-static int rv3029_read_temp(struct i2c_client *client, int *temp_mC)
+static int rv3029_read_temp(struct device *dev, int *temp_mC)
{
int ret;
u8 temp;
- ret = rv3029_i2c_read_regs(client, RV3029_TEMP_PAGE, &temp, 1);
+ ret = rv3029_read_regs(dev, RV3029_TEMP_PAGE, &temp, 1);
if (ret < 0)
return ret;
@@ -659,10 +692,9 @@ static ssize_t rv3029_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = dev_get_drvdata(dev);
int ret, temp_mC;
- ret = rv3029_read_temp(client, &temp_mC);
+ ret = rv3029_read_temp(dev, &temp_mC);
if (ret < 0)
return ret;
@@ -674,7 +706,6 @@ static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
const char *buf,
size_t count)
{
- struct i2c_client *client = dev_get_drvdata(dev);
unsigned long interval_ms;
int ret;
u8 th_set_bits = 0;
@@ -688,7 +719,7 @@ static ssize_t rv3029_hwmon_set_update_interval(struct device *dev,
if (interval_ms >= 16000)
th_set_bits |= RV3029_EECTRL_THP;
}
- ret = rv3029_eeprom_update_bits(client, RV3029_CONTROL_E2P_EECTRL,
+ ret = rv3029_eeprom_update_bits(dev, RV3029_CONTROL_E2P_EECTRL,
RV3029_EECTRL_THE | RV3029_EECTRL_THP,
th_set_bits);
if (ret < 0)
@@ -701,11 +732,10 @@ static ssize_t rv3029_hwmon_show_update_interval(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct i2c_client *client = dev_get_drvdata(dev);
int ret, interval_ms;
u8 eectrl;
- ret = rv3029_eeprom_read(client, RV3029_CONTROL_E2P_EECTRL,
+ ret = rv3029_eeprom_read(dev, RV3029_CONTROL_E2P_EECTRL,
&eectrl, 1);
if (ret < 0)
return ret;
@@ -735,82 +765,226 @@ static struct attribute *rv3029_hwmon_attrs[] = {
};
ATTRIBUTE_GROUPS(rv3029_hwmon);
-static void rv3029_hwmon_register(struct i2c_client *client)
+static void rv3029_hwmon_register(struct device *dev, const char *name)
{
+ struct rv3029_data *rv3029 = dev_get_drvdata(dev);
struct device *hwmon_dev;
- hwmon_dev = devm_hwmon_device_register_with_groups(
- &client->dev, client->name, client, rv3029_hwmon_groups);
+ hwmon_dev = devm_hwmon_device_register_with_groups(dev, name, rv3029,
+ rv3029_hwmon_groups);
if (IS_ERR(hwmon_dev)) {
- dev_warn(&client->dev,
- "unable to register hwmon device %ld\n",
- PTR_ERR(hwmon_dev));
+ dev_warn(dev, "unable to register hwmon device %ld\n",
+ PTR_ERR(hwmon_dev));
}
}
#else /* CONFIG_RTC_DRV_RV3029_HWMON */
-static void rv3029_hwmon_register(struct i2c_client *client)
+static void rv3029_hwmon_register(struct device *dev, const char *name)
{
}
#endif /* CONFIG_RTC_DRV_RV3029_HWMON */
-static const struct rtc_class_ops rv3029_rtc_ops = {
- .read_time = rv3029_rtc_read_time,
- .set_time = rv3029_rtc_set_time,
- .read_alarm = rv3029_rtc_read_alarm,
- .set_alarm = rv3029_rtc_set_alarm,
+static struct rtc_class_ops rv3029_rtc_ops = {
+ .read_time = rv3029_read_time,
+ .set_time = rv3029_set_time,
};
-static struct i2c_device_id rv3029_id[] = {
- { "rv3029", 0 },
- { "rv3029c2", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, rv3029_id);
-
-static int rv3029_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name)
{
- struct rtc_device *rtc;
+ struct rv3029_data *rv3029;
int rc = 0;
u8 buf[1];
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL))
- return -ENODEV;
+ rv3029 = devm_kzalloc(dev, sizeof(*rv3029), GFP_KERNEL);
+ if (!rv3029)
+ return -ENOMEM;
- rc = rv3029_i2c_get_sr(client, buf);
+ rv3029->regmap = regmap;
+ rv3029->irq = irq;
+ rv3029->dev = dev;
+ dev_set_drvdata(dev, rv3029);
+
+ rc = rv3029_get_sr(dev, buf);
if (rc < 0) {
- dev_err(&client->dev, "reading status failed\n");
+ dev_err(dev, "reading status failed\n");
return rc;
}
- rv3029_trickle_config(client);
- rv3029_hwmon_register(client);
-
- rtc = devm_rtc_device_register(&client->dev, client->name,
- &rv3029_rtc_ops, THIS_MODULE);
+ rv3029_trickle_config(dev);
+ rv3029_hwmon_register(dev, name);
- if (IS_ERR(rtc))
- return PTR_ERR(rtc);
+ rv3029->rtc = devm_rtc_device_register(dev, name, &rv3029_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rv3029->rtc)) {
+ dev_err(dev, "unable to register the class device\n");
+ return PTR_ERR(rv3029->rtc);
+ }
- i2c_set_clientdata(client, rtc);
+ if (rv3029->irq > 0) {
+ rc = devm_request_threaded_irq(dev, rv3029->irq,
+ NULL, rv3029_handle_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "rv3029", dev);
+ if (rc) {
+ dev_warn(dev, "unable to request IRQ, alarms disabled\n");
+ rv3029->irq = 0;
+ } else {
+ rv3029_rtc_ops.read_alarm = rv3029_read_alarm;
+ rv3029_rtc_ops.set_alarm = rv3029_set_alarm;
+ rv3029_rtc_ops.alarm_irq_enable = rv3029_alarm_irq_enable;
+ }
+ }
return 0;
}
+#if IS_ENABLED(CONFIG_I2C)
+
+static int rv3029_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK |
+ I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(&client->dev, "Adapter does not support SMBUS_I2C_BLOCK or SMBUS_I2C_BYTE\n");
+ return -ENODEV;
+ }
+
+ regmap = devm_regmap_init_i2c(client, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "%s: regmap allocation failed: %ld\n",
+ __func__, PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return rv3029_probe(&client->dev, regmap, client->irq, client->name);
+}
+
+static struct i2c_device_id rv3029_id[] = {
+ { "rv3029", 0 },
+ { "rv3029c2", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rv3029_id);
+
static struct i2c_driver rv3029_driver = {
.driver = {
.name = "rtc-rv3029c2",
},
- .probe = rv3029_probe,
+ .probe = rv3029_i2c_probe,
.id_table = rv3029_id,
};
-module_i2c_driver(rv3029_driver);
+static int rv3029_register_driver(void)
+{
+ return i2c_add_driver(&rv3029_driver);
+}
+
+static void rv3029_unregister_driver(void)
+{
+ i2c_del_driver(&rv3029_driver);
+}
+
+#else
+
+static int rv3029_register_driver(void)
+{
+ return 0;
+}
+
+static void rv3029_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int rv3049_probe(struct spi_device *spi)
+{
+ static const struct regmap_config config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ };
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_spi(spi, &config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "%s: regmap allocation failed: %ld\n",
+ __func__, PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ return rv3029_probe(&spi->dev, regmap, spi->irq, "rv3049");
+}
+
+static struct spi_driver rv3049_driver = {
+ .driver = {
+ .name = "rv3049",
+ },
+ .probe = rv3049_probe,
+};
+
+static int rv3049_register_driver(void)
+{
+ return spi_register_driver(&rv3049_driver);
+}
+
+static void rv3049_unregister_driver(void)
+{
+ spi_unregister_driver(&rv3049_driver);
+}
+
+#else
+
+static int rv3049_register_driver(void)
+{
+ return 0;
+}
+
+static void rv3049_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init rv30x9_init(void)
+{
+ int ret;
+
+ ret = rv3029_register_driver();
+ if (ret) {
+ pr_err("Failed to register rv3029 driver: %d\n", ret);
+ return ret;
+ }
+
+ ret = rv3049_register_driver();
+ if (ret) {
+ pr_err("Failed to register rv3049 driver: %d\n", ret);
+ rv3029_unregister_driver();
+ }
+
+ return ret;
+}
+module_init(rv30x9_init)
+
+static void __exit rv30x9_exit(void)
+{
+ rv3049_unregister_driver();
+ rv3029_unregister_driver();
+}
+module_exit(rv30x9_exit)
MODULE_AUTHOR("Gregory Hermant <gregory.hermant@calao-systems.com>");
MODULE_AUTHOR("Michael Buesch <m@bues.ch>");
-MODULE_DESCRIPTION("Micro Crystal RV3029 RTC driver");
+MODULE_DESCRIPTION("Micro Crystal RV3029/RV3049 RTC driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:rv3049");
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 161e25d01..0c362a3d1 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -18,8 +18,6 @@
#include <linux/rtc.h>
#include <linux/log2.h>
-#define DRV_VERSION "0.1"
-
#define RX8581_REG_SC 0x00 /* Second in BCD */
#define RX8581_REG_MN 0x01 /* Minute in BCD */
#define RX8581_REG_HR 0x02 /* Hour in BCD */
@@ -292,8 +290,6 @@ static int rx8581_probe(struct i2c_client *client,
rx8581->write_block_data = rx8581_write_block_data;
}
- dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
-
rx8581->rtc = devm_rtc_device_register(&client->dev,
rx8581_driver.driver.name, &rx8581_rtc_ops, THIS_MODULE);
@@ -325,4 +321,3 @@ module_i2c_driver(rx8581_driver);
MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com>");
MODULE_DESCRIPTION("Epson RX-8581 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 2b81dd4ba..a45845a57 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -30,7 +30,6 @@
#include <asm/rtc.h>
#define DRV_NAME "sh-rtc"
-#define DRV_VERSION "0.2.3"
#define RTC_REG(r) ((r) * rtc_reg_size)
@@ -790,7 +789,6 @@ static struct platform_driver sh_rtc_platform_driver = {
module_platform_driver_probe(sh_rtc_platform_driver, sh_rtc_probe);
MODULE_DESCRIPTION("SuperH on-chip RTC driver");
-MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, "
"Jamie Lenehan <lenehan@twibble.org>, "
"Angelo Castello <angelo.castello@st.com>");
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 950c5d0b6..0f11c2a22 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -322,7 +322,7 @@ static int snvs_rtc_suspend(struct device *dev)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
- enable_irq_wake(data->irq);
+ return enable_irq_wake(data->irq);
return 0;
}
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index ba6a83b5b..a456cb617 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -23,8 +23,6 @@
#include <linux/io.h>
#include <linux/module.h>
-#define DRV_VERSION "0.1"
-
#define RTC_REG_SIZE 0x20000
#define RTC_OFFSET 0x1fff0
@@ -366,4 +364,3 @@ module_platform_driver(stk17ta8_rtc_driver);
MODULE_AUTHOR("Thomas Hommel <thomas.hommel@ge.com>");
MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index ca54d039d..e6aaaa52e 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -107,14 +107,19 @@ static struct stmp3xxx_wdt_pdata wdt_pdata = {
static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
{
+ int rc = -1;
struct platform_device *wdt_pdev =
platform_device_alloc("stmp3xxx_rtc_wdt", rtc_pdev->id);
if (wdt_pdev) {
wdt_pdev->dev.parent = &rtc_pdev->dev;
wdt_pdev->dev.platform_data = &wdt_pdata;
- platform_device_add(wdt_pdev);
+ rc = platform_device_add(wdt_pdev);
}
+
+ if (rc)
+ dev_err(&rtc_pdev->dev,
+ "failed to register stmp3xxx_rtc_wdt\n");
}
#else
static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index e404faac6..a3418a8a3 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -344,7 +344,7 @@ static struct platform_driver tps6586x_rtc_driver = {
};
module_platform_driver(tps6586x_rtc_driver);
-MODULE_ALIAS("platform:rtc-tps6586x");
+MODULE_ALIAS("platform:tps6586x-rtc");
MODULE_DESCRIPTION("TI TPS6586x RTC driver");
MODULE_AUTHOR("Laxman dewangan <ldewangan@nvidia.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-x1205.c b/drivers/rtc/rtc-x1205.c
index 5638b7ba8..f08f18e4f 100644
--- a/drivers/rtc/rtc-x1205.c
+++ b/drivers/rtc/rtc-x1205.c
@@ -24,8 +24,6 @@
#include <linux/module.h>
#include <linux/bitops.h>
-#define DRV_VERSION "1.0.8"
-
/* offsets into CCR area */
#define CCR_SEC 0
@@ -634,8 +632,6 @@ static int x1205_probe(struct i2c_client *client,
if (x1205_validate_client(client) < 0)
return -ENODEV;
- dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
-
rtc = devm_rtc_device_register(&client->dev, x1205_driver.driver.name,
&x1205_rtc_ops, THIS_MODULE);
@@ -693,4 +689,3 @@ MODULE_AUTHOR(
"Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("Xicor/Intersil X1205 RTC driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 8b28762f0..da18a8ae3 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -45,6 +45,7 @@
#define RTC_INT_SEC BIT(0)
#define RTC_INT_ALRM BIT(1)
#define RTC_OSC_EN BIT(24)
+#define RTC_BATT_EN BIT(31)
#define RTC_CALIB_DEF 0x198233
#define RTC_CALIB_MASK 0x1FFFFF
@@ -55,6 +56,7 @@ struct xlnx_rtc_dev {
void __iomem *reg_base;
int alarm_irq;
int sec_irq;
+ int calibval;
};
static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
@@ -62,21 +64,63 @@ static int xlnx_rtc_set_time(struct device *dev, struct rtc_time *tm)
struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
unsigned long new_time;
- new_time = rtc_tm_to_time64(tm);
+ /*
+ * The value written will be updated after 1 sec into the
+ * seconds read register, so we need to program time +1 sec
+ * to get the correct time on read.
+ */
+ new_time = rtc_tm_to_time64(tm) + 1;
if (new_time > RTC_SEC_MAX_VAL)
return -EINVAL;
+ /*
+ * Writing into calibration register will clear the Tick Counter and
+ * force the next second to be signaled exactly in 1 second period
+ */
+ xrtcdev->calibval &= RTC_CALIB_MASK;
+ writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+
writel(new_time, xrtcdev->reg_base + RTC_SET_TM_WR);
+ /*
+ * Clear the rtc interrupt status register after setting the
+ * time. During a read_time function, the code should read the
+ * RTC_INT_STATUS register and if bit 0 is still 0, it means
+ * that one second has not elapsed yet since RTC was set and
+ * the current time should be read from SET_TIME_READ register;
+ * otherwise, CURRENT_TIME register is read to report the time
+ */
+ writel(RTC_INT_SEC, xrtcdev->reg_base + RTC_INT_STS);
+
return 0;
}
static int xlnx_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
+ u32 status;
+ unsigned long read_time;
struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev);
- rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_CUR_TM), tm);
+ status = readl(xrtcdev->reg_base + RTC_INT_STS);
+
+ if (status & RTC_INT_SEC) {
+ /*
+ * RTC has updated the CURRENT_TIME with the time written into
+ * SET_TIME_WRITE register.
+ */
+ rtc_time64_to_tm(readl(xrtcdev->reg_base + RTC_CUR_TM), tm);
+ } else {
+ /*
+ * Time written in SET_TIME_WRITE has not yet updated into
+ * the seconds read register, so read the time from the
+ * SET_TIME_WRITE instead of CURRENT_TIME register.
+ * Since we add +1 sec while writing, we need to -1 sec while
+ * reading.
+ */
+ read_time = readl(xrtcdev->reg_base + RTC_SET_TM_RD) - 1;
+ rtc_time64_to_tm(read_time, tm);
+ }
return rtc_valid_tm(tm);
}
@@ -120,16 +164,23 @@ static int xlnx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev, u32 calibval)
+static void xlnx_init_rtc(struct xlnx_rtc_dev *xrtcdev)
{
+ u32 rtc_ctrl;
+
+ /* Enable RTC switch to battery when VCC_PSAUX is not available */
+ rtc_ctrl = readl(xrtcdev->reg_base + RTC_CTRL);
+ rtc_ctrl |= RTC_BATT_EN;
+ writel(rtc_ctrl, xrtcdev->reg_base + RTC_CTRL);
+
/*
* Based on crystal freq of 33.330 KHz
* set the seconds counter and enable, set fractions counter
* to default value suggested as per design spec
* to correct RTC delay in frequency over period of time.
*/
- calibval &= RTC_CALIB_MASK;
- writel(calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
+ xrtcdev->calibval &= RTC_CALIB_MASK;
+ writel(xrtcdev->calibval, (xrtcdev->reg_base + RTC_CALIB_WR));
}
static const struct rtc_class_ops xlnx_rtc_ops = {
@@ -150,11 +201,9 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id)
if (!(status & (RTC_INT_SEC | RTC_INT_ALRM)))
return IRQ_NONE;
- /* Clear interrupt */
- writel(status, xrtcdev->reg_base + RTC_INT_STS);
+ /* Clear RTC_INT_ALRM interrupt only */
+ writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS);
- if (status & RTC_INT_SEC)
- rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_UF);
if (status & RTC_INT_ALRM)
rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_AF);
@@ -166,7 +215,6 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
struct xlnx_rtc_dev *xrtcdev;
struct resource *res;
int ret;
- unsigned int calibvalue;
xrtcdev = devm_kzalloc(&pdev->dev, sizeof(*xrtcdev), GFP_KERNEL);
if (!xrtcdev)
@@ -207,11 +255,11 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
}
ret = of_property_read_u32(pdev->dev.of_node, "calibration",
- &calibvalue);
+ &xrtcdev->calibval);
if (ret)
- calibvalue = RTC_CALIB_DEF;
+ xrtcdev->calibval = RTC_CALIB_DEF;
- xlnx_init_rtc(xrtcdev, calibvalue);
+ xlnx_init_rtc(xrtcdev);
device_init_wakeup(&pdev->dev, 1);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index c78db05e7..8973d34ce 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -75,6 +75,8 @@ static void dasd_block_timeout(unsigned long);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
+static void dasd_hosts_init(struct dentry *, struct dasd_device *);
+static void dasd_hosts_exit(struct dasd_device *);
/*
* SECTION: Operations on the device structure.
@@ -267,6 +269,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device)
dasd_debugfs_setup(dev_name(&device->cdev->dev),
dasd_debugfs_root_entry);
dasd_profile_init(&device->profile, device->debugfs_dentry);
+ dasd_hosts_init(device->debugfs_dentry, device);
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
@@ -304,6 +307,7 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
return rc;
dasd_device_clear_timer(device);
dasd_profile_exit(&device->profile);
+ dasd_hosts_exit(device);
debugfs_remove(device->debugfs_dentry);
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
@@ -1150,6 +1154,58 @@ int dasd_profile_on(struct dasd_profile *profile)
#endif /* CONFIG_DASD_PROFILE */
+static int dasd_hosts_show(struct seq_file *m, void *v)
+{
+ struct dasd_device *device;
+ int rc = -EOPNOTSUPP;
+
+ device = m->private;
+ dasd_get_device(device);
+
+ if (device->discipline->hosts_print)
+ rc = device->discipline->hosts_print(device, m);
+
+ dasd_put_device(device);
+ return rc;
+}
+
+static int dasd_hosts_open(struct inode *inode, struct file *file)
+{
+ struct dasd_device *device = inode->i_private;
+
+ return single_open(file, dasd_hosts_show, device);
+}
+
+static const struct file_operations dasd_hosts_fops = {
+ .owner = THIS_MODULE,
+ .open = dasd_hosts_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void dasd_hosts_exit(struct dasd_device *device)
+{
+ debugfs_remove(device->hosts_dentry);
+ device->hosts_dentry = NULL;
+}
+
+static void dasd_hosts_init(struct dentry *base_dentry,
+ struct dasd_device *device)
+{
+ struct dentry *pde;
+ umode_t mode;
+
+ if (!base_dentry)
+ return;
+
+ mode = S_IRUSR | S_IFREG;
+ pde = debugfs_create_file("host_access_list", mode, base_dentry,
+ device, &dasd_hosts_fops);
+ if (pde && !IS_ERR(pde))
+ device->hosts_dentry = pde;
+}
+
/*
* Allocate memory for a channel program with 'cplength' channel
* command words and 'datasize' additional space. There are two
@@ -1582,6 +1638,9 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
unsigned long long now;
+ int nrf_suppressed = 0;
+ int fp_suppressed = 0;
+ u8 *sense = NULL;
int expires;
if (IS_ERR(irb)) {
@@ -1617,7 +1676,23 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
dasd_put_device(device);
return;
}
- device->discipline->dump_sense_dbf(device, irb, "int");
+
+ /*
+ * In some cases 'File Protected' or 'No Record Found' errors
+ * might be expected and debug log messages for the
+ * corresponding interrupts shouldn't be written then.
+ * Check if either of the according suppress bits is set.
+ */
+ sense = dasd_get_sense(irb);
+ if (sense) {
+ fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
+ test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
+ test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+ }
+ if (!(fp_suppressed || nrf_suppressed))
+ device->discipline->dump_sense_dbf(device, irb, "int");
+
if (device->features & DASD_FEATURE_ERPLOG)
device->discipline->dump_sense(device, cqr, irb);
device->discipline->check_for_device_change(device, cqr, irb);
@@ -2256,6 +2331,7 @@ static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
{
struct dasd_device *device;
struct dasd_ccw_req *cqr, *n;
+ u8 *sense = NULL;
int rc;
retry:
@@ -2302,6 +2378,20 @@ retry:
rc = 0;
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
/*
+ * In some cases the 'File Protected' or 'Incorrect Length'
+ * error might be expected and error recovery would be
+ * unnecessary in these cases. Check if the according suppress
+ * bit is set.
+ */
+ sense = dasd_get_sense(&cqr->irb);
+ if (sense && sense[1] & SNS1_FILE_PROTECTED &&
+ test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
+ continue;
+ if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
+ test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
+ continue;
+
+ /*
* for alias devices simplify error recovery and
* return to upper layer
* do not skip ERP requests
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index d26134713..8305ab688 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1367,8 +1367,14 @@ dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
struct dasd_device *device = default_erp->startdev;
- dev_err(&device->cdev->dev,
- "The specified record was not found\n");
+ /*
+ * In some cases the 'No Record Found' error might be expected and
+ * log messages shouldn't be written then.
+ * Check if the according suppress bit is set.
+ */
+ if (!test_bit(DASD_CQR_SUPPRESS_NRF, &default_erp->flags))
+ dev_err(&device->cdev->dev,
+ "The specified record was not found\n");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
@@ -1393,8 +1399,14 @@ dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
struct dasd_device *device = erp->startdev;
- dev_err(&device->cdev->dev, "Accessing the DASD failed because of "
- "a hardware error\n");
+ /*
+ * In some cases the 'File Protected' error might be expected and
+ * log messages shouldn't be written then.
+ * Check if the according suppress bit is set.
+ */
+ if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
+ dev_err(&device->cdev->dev,
+ "Accessing the DASD failed because of a hardware error\n");
return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 2f18f6109..3cdbce45e 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -982,6 +982,32 @@ out:
static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
static ssize_t
+dasd_access_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ccw_device *cdev = to_ccwdev(dev);
+ struct dasd_device *device;
+ int count;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return PTR_ERR(device);
+
+ if (device->discipline->host_access_count)
+ count = device->discipline->host_access_count(device);
+ else
+ count = -EOPNOTSUPP;
+
+ dasd_put_device(device);
+ if (count < 0)
+ return count;
+
+ return sprintf(buf, "%d\n", count);
+}
+
+static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
+
+static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1471,6 +1497,7 @@ static struct attribute * dasd_attrs[] = {
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr,
+ &dev_attr_host_access_count.attr,
&dev_attr_path_masks.attr,
NULL,
};
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index c1b4ae55e..42b34cd1f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/init.h>
+#include <linux/seq_file.h>
#include <asm/css_chars.h>
#include <asm/debug.h>
@@ -120,6 +121,11 @@ struct check_attention_work_data {
__u8 lpum;
};
+static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
+ struct dasd_device *, struct dasd_device *,
+ unsigned int, int, unsigned int, unsigned int,
+ unsigned int, unsigned int);
+
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
static int
@@ -256,10 +262,13 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
case DASD_ECKD_CCW_READ_CKD_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
- case DASD_ECKD_CCW_READ_COUNT:
data->mask.perm = 0x1;
data->attributes.operation = private->attrib.operation;
break;
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->mask.perm = 0x1;
+ data->attributes.operation = DASD_BYPASS_CACHE;
+ break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
@@ -528,10 +537,13 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
case DASD_ECKD_CCW_READ_CKD_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
- case DASD_ECKD_CCW_READ_COUNT:
dedata->mask.perm = 0x1;
dedata->attributes.operation = basepriv->attrib.operation;
break;
+ case DASD_ECKD_CCW_READ_COUNT:
+ dedata->mask.perm = 0x1;
+ dedata->attributes.operation = DASD_BYPASS_CACHE;
+ break;
case DASD_ECKD_CCW_READ_TRACK:
case DASD_ECKD_CCW_READ_TRACK_DATA:
dedata->mask.perm = 0x1;
@@ -2095,6 +2107,180 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
return 0;
}
+/*
+ * Build the TCW request for the format check
+ */
+static struct dasd_ccw_req *
+dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
+ int enable_pav, struct eckd_count *fmt_buffer,
+ int rpt)
+{
+ struct dasd_eckd_private *start_priv;
+ struct dasd_device *startdev = NULL;
+ struct tidaw *last_tidaw = NULL;
+ struct dasd_ccw_req *cqr;
+ struct itcw *itcw;
+ int itcw_size;
+ int count;
+ int rc;
+ int i;
+
+ if (enable_pav)
+ startdev = dasd_alias_get_start_dev(base);
+
+ if (!startdev)
+ startdev = base;
+
+ start_priv = startdev->private;
+
+ count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
+
+ /*
+ * we're adding 'count' amount of tidaw to the itcw.
+ * calculate the corresponding itcw_size
+ */
+ itcw_size = itcw_calc_size(0, count, 0);
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+ if (IS_ERR(cqr))
+ return cqr;
+
+ start_priv->count++;
+
+ itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
+ if (IS_ERR(itcw)) {
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ cqr->cpaddr = itcw_get_tcw(itcw);
+ rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
+ sizeof(struct eckd_count),
+ count * sizeof(struct eckd_count), 0, rpt);
+ if (rc)
+ goto out_err;
+
+ for (i = 0; i < count; i++) {
+ last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
+ sizeof(struct eckd_count));
+ if (IS_ERR(last_tidaw)) {
+ rc = -EINVAL;
+ goto out_err;
+ }
+ }
+
+ last_tidaw->flags |= TIDAW_FLAGS_LAST;
+ itcw_finalize(itcw);
+
+ cqr->cpmode = 1;
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->basedev = base;
+ cqr->retries = startdev->default_retries;
+ cqr->expires = startdev->default_expires * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ /* Set flags to suppress output for expected errors */
+ set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
+ set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
+
+ return cqr;
+
+out_err:
+ dasd_sfree_request(cqr, startdev);
+
+ return ERR_PTR(rc);
+}
+
+/*
+ * Build the CCW request for the format check
+ */
+static struct dasd_ccw_req *
+dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
+ int enable_pav, struct eckd_count *fmt_buffer, int rpt)
+{
+ struct dasd_eckd_private *start_priv;
+ struct dasd_eckd_private *base_priv;
+ struct dasd_device *startdev = NULL;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ void *data;
+ int cplength, datasize;
+ int use_prefix;
+ int count;
+ int i;
+
+ if (enable_pav)
+ startdev = dasd_alias_get_start_dev(base);
+
+ if (!startdev)
+ startdev = base;
+
+ start_priv = startdev->private;
+ base_priv = base->private;
+
+ count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
+
+ use_prefix = base_priv->features.feature[8] & 0x01;
+
+ if (use_prefix) {
+ cplength = 1;
+ datasize = sizeof(struct PFX_eckd_data);
+ } else {
+ cplength = 2;
+ datasize = sizeof(struct DE_eckd_data) +
+ sizeof(struct LO_eckd_data);
+ }
+ cplength += count;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
+ startdev);
+ if (IS_ERR(cqr))
+ return cqr;
+
+ start_priv->count++;
+ data = cqr->data;
+ ccw = cqr->cpaddr;
+
+ if (use_prefix) {
+ prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
+ count, 0, 0);
+ } else {
+ define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
+ DASD_ECKD_CCW_READ_COUNT, startdev);
+
+ data += sizeof(struct DE_eckd_data);
+ ccw[-1].flags |= CCW_FLAG_CC;
+
+ locate_record(ccw++, data, fdata->start_unit, 0, count,
+ DASD_ECKD_CCW_READ_COUNT, base, 0);
+ }
+
+ for (i = 0; i < count; i++) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+ ccw->flags = CCW_FLAG_SLI;
+ ccw->count = 8;
+ ccw->cda = (__u32)(addr_t) fmt_buffer;
+ ccw++;
+ fmt_buffer++;
+ }
+
+ cqr->startdev = startdev;
+ cqr->memdev = startdev;
+ cqr->basedev = base;
+ cqr->retries = DASD_RETRIES;
+ cqr->expires = startdev->default_expires * HZ;
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ /* Set flags to suppress output for expected errors */
+ set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
+
+ return cqr;
+}
+
static struct dasd_ccw_req *
dasd_eckd_build_format(struct dasd_device *base,
struct format_data_t *fdata,
@@ -2362,9 +2548,24 @@ dasd_eckd_build_format(struct dasd_device *base,
*/
static struct dasd_ccw_req *
dasd_eckd_format_build_ccw_req(struct dasd_device *base,
- struct format_data_t *fdata, int enable_pav)
+ struct format_data_t *fdata, int enable_pav,
+ int tpm, struct eckd_count *fmt_buffer, int rpt)
{
- return dasd_eckd_build_format(base, fdata, enable_pav);
+ struct dasd_ccw_req *ccw_req;
+
+ if (!fmt_buffer) {
+ ccw_req = dasd_eckd_build_format(base, fdata, enable_pav);
+ } else {
+ if (tpm)
+ ccw_req = dasd_eckd_build_check_tcw(base, fdata,
+ enable_pav,
+ fmt_buffer, rpt);
+ else
+ ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
+ fmt_buffer, rpt);
+ }
+
+ return ccw_req;
}
/*
@@ -2409,12 +2610,15 @@ static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
*/
static int dasd_eckd_format_process_data(struct dasd_device *base,
struct format_data_t *fdata,
- int enable_pav)
+ int enable_pav, int tpm,
+ struct eckd_count *fmt_buffer, int rpt,
+ struct irb *irb)
{
struct dasd_eckd_private *private = base->private;
struct dasd_ccw_req *cqr, *n;
struct list_head format_queue;
struct dasd_device *device;
+ char *sense = NULL;
int old_start, old_stop, format_step;
int step, retry;
int rc;
@@ -2428,8 +2632,18 @@ static int dasd_eckd_format_process_data(struct dasd_device *base,
old_start = fdata->start_unit;
old_stop = fdata->stop_unit;
- format_step = DASD_CQR_MAX_CCW / recs_per_track(&private->rdc_data, 0,
- fdata->blksize);
+ if (!tpm && fmt_buffer != NULL) {
+ /* Command Mode / Format Check */
+ format_step = 1;
+ } else if (tpm && fmt_buffer != NULL) {
+ /* Transport Mode / Format Check */
+ format_step = DASD_CQR_MAX_CCW / rpt;
+ } else {
+ /* Normal Formatting */
+ format_step = DASD_CQR_MAX_CCW /
+ recs_per_track(&private->rdc_data, 0, fdata->blksize);
+ }
+
do {
retry = 0;
while (fdata->start_unit <= old_stop) {
@@ -2440,7 +2654,8 @@ static int dasd_eckd_format_process_data(struct dasd_device *base,
}
cqr = dasd_eckd_format_build_ccw_req(base, fdata,
- enable_pav);
+ enable_pav, tpm,
+ fmt_buffer, rpt);
if (IS_ERR(cqr)) {
rc = PTR_ERR(cqr);
if (rc == -ENOMEM) {
@@ -2458,6 +2673,10 @@ static int dasd_eckd_format_process_data(struct dasd_device *base,
}
list_add_tail(&cqr->blocklist, &format_queue);
+ if (fmt_buffer) {
+ step = fdata->stop_unit - fdata->start_unit + 1;
+ fmt_buffer += rpt * step;
+ }
fdata->start_unit = fdata->stop_unit + 1;
fdata->stop_unit = old_stop;
}
@@ -2468,15 +2687,41 @@ out_err:
list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
device = cqr->startdev;
private = device->private;
- if (cqr->status == DASD_CQR_FAILED)
+
+ if (cqr->status == DASD_CQR_FAILED) {
+ /*
+ * Only get sense data if called by format
+ * check
+ */
+ if (fmt_buffer && irb) {
+ sense = dasd_get_sense(&cqr->irb);
+ memcpy(irb, &cqr->irb, sizeof(*irb));
+ }
rc = -EIO;
+ }
list_del_init(&cqr->blocklist);
dasd_sfree_request(cqr, device);
private->count--;
}
- if (rc)
+ if (rc && rc != -EIO)
goto out;
+ if (rc == -EIO) {
+ /*
+ * In case fewer than the expected records are on the
+ * track, we will most likely get a 'No Record Found'
+ * error (in command mode) or a 'File Protected' error
+ * (in transport mode). Those particular cases shouldn't
+ * pass the -EIO to the IOCTL, therefore reset the rc
+ * and continue.
+ */
+ if (sense &&
+ (sense[1] & SNS1_NO_REC_FOUND ||
+ sense[1] & SNS1_FILE_PROTECTED))
+ retry = 1;
+ else
+ goto out;
+ }
} while (retry);
@@ -2490,7 +2735,225 @@ out:
static int dasd_eckd_format_device(struct dasd_device *base,
struct format_data_t *fdata, int enable_pav)
{
- return dasd_eckd_format_process_data(base, fdata, enable_pav);
+ return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
+ 0, NULL);
+}
+
+/*
+ * Helper function to count consecutive records of a single track.
+ */
+static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
+ int max)
+{
+ int head;
+ int i;
+
+ head = fmt_buffer[start].head;
+
+ /*
+ * There are 3 conditions where we stop counting:
+ * - if data reoccurs (same head and record may reoccur), which may
+ * happen due to the way DASD_ECKD_CCW_READ_COUNT works
+ * - when the head changes, because we're iterating over several tracks
+ * then (DASD_ECKD_CCW_READ_COUNT_MT)
+ * - when we've reached the end of sensible data in the buffer (the
+ * record will be 0 then)
+ */
+ for (i = start; i < max; i++) {
+ if (i > start) {
+ if ((fmt_buffer[i].head == head &&
+ fmt_buffer[i].record == 1) ||
+ fmt_buffer[i].head != head ||
+ fmt_buffer[i].record == 0)
+ break;
+ }
+ }
+
+ return i - start;
+}
+
+/*
+ * Evaluate a given range of tracks. Data like number of records, blocksize,
+ * record ids, and key length are compared with expected data.
+ *
+ * If a mismatch occurs, the corresponding error bit is set, as well as
+ * additional information, depending on the error.
+ */
+static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
+ struct format_check_t *cdata,
+ int rpt_max, int rpt_exp,
+ int trk_per_cyl, int tpm)
+{
+ struct ch_t geo;
+ int max_entries;
+ int count = 0;
+ int trkcount;
+ int blksize;
+ int pos = 0;
+ int i, j;
+ int kl;
+
+ trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
+ max_entries = trkcount * rpt_max;
+
+ for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
+ /* Calculate the correct next starting position in the buffer */
+ if (tpm) {
+ while (fmt_buffer[pos].record == 0 &&
+ fmt_buffer[pos].dl == 0) {
+ if (pos++ > max_entries)
+ break;
+ }
+ } else {
+ if (i != cdata->expect.start_unit)
+ pos += rpt_max - count;
+ }
+
+ /* Calculate the expected geo values for the current track */
+ set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
+
+ /* Count and check number of records */
+ count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
+
+ if (count < rpt_exp) {
+ cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
+ break;
+ }
+ if (count > rpt_exp) {
+ cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
+ break;
+ }
+
+ for (j = 0; j < count; j++, pos++) {
+ blksize = cdata->expect.blksize;
+ kl = 0;
+
+ /*
+ * Set special values when checking CDL formatted
+ * devices.
+ */
+ if ((cdata->expect.intensity & 0x08) &&
+ geo.cyl == 0 && geo.head == 0) {
+ if (j < 3) {
+ blksize = sizes_trk0[j] - 4;
+ kl = 4;
+ }
+ }
+ if ((cdata->expect.intensity & 0x08) &&
+ geo.cyl == 0 && geo.head == 1) {
+ blksize = LABEL_SIZE - 44;
+ kl = 44;
+ }
+
+ /* Check blocksize */
+ if (fmt_buffer[pos].dl != blksize) {
+ cdata->result = DASD_FMT_ERR_BLKSIZE;
+ goto out;
+ }
+ /* Check if key length is 0 */
+ if (fmt_buffer[pos].kl != kl) {
+ cdata->result = DASD_FMT_ERR_KEY_LENGTH;
+ goto out;
+ }
+ /* Check if record_id is correct */
+ if (fmt_buffer[pos].cyl != geo.cyl ||
+ fmt_buffer[pos].head != geo.head ||
+ fmt_buffer[pos].record != (j + 1)) {
+ cdata->result = DASD_FMT_ERR_RECORD_ID;
+ goto out;
+ }
+ }
+ }
+
+out:
+ /*
+ * In case of no errors, we need to decrease by one
+ * to get the correct positions.
+ */
+ if (!cdata->result) {
+ i--;
+ pos--;
+ }
+
+ cdata->unit = i;
+ cdata->num_records = count;
+ cdata->rec = fmt_buffer[pos].record;
+ cdata->blksize = fmt_buffer[pos].dl;
+ cdata->key_length = fmt_buffer[pos].kl;
+}
+
+/*
+ * Check the format of a range of tracks of a DASD.
+ */
+static int dasd_eckd_check_device_format(struct dasd_device *base,
+ struct format_check_t *cdata,
+ int enable_pav)
+{
+ struct dasd_eckd_private *private = base->private;
+ struct eckd_count *fmt_buffer;
+ struct irb irb;
+ int rpt_max, rpt_exp;
+ int fmt_buffer_size;
+ int trk_per_cyl;
+ int trkcount;
+ int tpm = 0;
+ int rc;
+
+ trk_per_cyl = private->rdc_data.trk_per_cyl;
+
+ /* Get maximum and expected amount of records per track */
+ rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
+ rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
+
+ trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
+ fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
+
+ fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
+ if (!fmt_buffer)
+ return -ENOMEM;
+
+ /*
+ * A certain FICON feature subset is needed to operate in transport
+ * mode. Additionally, the support for transport mode is implicitly
+ * checked by comparing the buffer size with fcx_max_data. As long as
+ * the buffer size is smaller we can operate in transport mode and
+ * process multiple tracks. If not, only one track at once is being
+ * processed using command mode.
+ */
+ if ((private->features.feature[40] & 0x04) &&
+ fmt_buffer_size <= private->fcx_max_data)
+ tpm = 1;
+
+ rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
+ tpm, fmt_buffer, rpt_max, &irb);
+ if (rc && rc != -EIO)
+ goto out;
+ if (rc == -EIO) {
+ /*
+ * If our first attempt with transport mode enabled comes back
+ * with an incorrect length error, we're going to retry the
+ * check with command mode.
+ */
+ if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
+ tpm = 0;
+ rc = dasd_eckd_format_process_data(base, &cdata->expect,
+ enable_pav, tpm,
+ fmt_buffer, rpt_max,
+ &irb);
+ if (rc)
+ goto out;
+ } else {
+ goto out;
+ }
+ }
+
+ dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
+ trk_per_cyl, tpm);
+
+out:
+ kfree(fmt_buffer);
+
+ return rc;
}
static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
@@ -3037,6 +3500,16 @@ static int prepare_itcw(struct itcw *itcw,
lredata->auxiliary.check_bytes = 0x2;
pfx_cmd = DASD_ECKD_CCW_PFX;
break;
+ case DASD_ECKD_CCW_READ_COUNT_MT:
+ dedata->mask.perm = 0x1;
+ dedata->attributes.operation = DASD_BYPASS_CACHE;
+ dedata->ga_extended |= 0x42;
+ dedata->blk_size = blksize;
+ lredata->operation.orientation = 0x2;
+ lredata->operation.operation = 0x16;
+ lredata->auxiliary.check_bytes = 0x01;
+ pfx_cmd = DASD_ECKD_CCW_PFX_READ;
+ break;
default:
DBF_DEV_EVENT(DBF_ERR, basedev,
"prepare itcw, unknown opcode 0x%x", cmd);
@@ -3084,13 +3557,19 @@ static int prepare_itcw(struct itcw *itcw,
}
}
- lredata->auxiliary.length_valid = 1;
- lredata->auxiliary.length_scope = 1;
+ if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
+ lredata->auxiliary.length_valid = 0;
+ lredata->auxiliary.length_scope = 0;
+ lredata->sector = 0xff;
+ } else {
+ lredata->auxiliary.length_valid = 1;
+ lredata->auxiliary.length_scope = 1;
+ lredata->sector = sector;
+ }
lredata->auxiliary.imbedded_ccw_valid = 1;
lredata->length = tlf;
lredata->imbedded_ccw = cmd;
lredata->count = count;
- lredata->sector = sector;
set_ch_t(&lredata->seek_addr, begcyl, beghead);
lredata->search_arg.cyl = lredata->seek_addr.cyl;
lredata->search_arg.head = lredata->seek_addr.head;
@@ -4412,10 +4891,34 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
static void dasd_eckd_dump_sense(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
- if (scsw_is_tm(&irb->scsw))
+ u8 *sense = dasd_get_sense(irb);
+
+ if (scsw_is_tm(&irb->scsw)) {
+ /*
+ * In some cases the 'File Protected' or 'Incorrect Length'
+ * error might be expected and log messages shouldn't be written
+ * then. Check if the according suppress bit is set.
+ */
+ if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
+ test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
+ return;
+ if (scsw_cstat(&irb->scsw) == 0x40 &&
+ test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
+ return;
+
dasd_eckd_dump_sense_tcw(device, req, irb);
- else
+ } else {
+ /*
+ * In some cases the 'No Record Found' error might be expected
+ * and log messages shouldn't be written then. Check if the
+ * according suppress bit is set.
+ */
+ if (sense && sense[1] & SNS1_NO_REC_FOUND &&
+ test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
+ return;
+
dasd_eckd_dump_sense_ccw(device, req, irb);
+ }
}
static int dasd_eckd_pm_freeze(struct dasd_device *device)
@@ -4627,6 +5130,167 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
return rc;
}
+static int dasd_eckd_query_host_access(struct dasd_device *device,
+ struct dasd_psf_query_host_access *data)
+{
+ struct dasd_eckd_private *private = device->private;
+ struct dasd_psf_query_host_access *host_access;
+ struct dasd_psf_prssd_data *prssdp;
+ struct dasd_ccw_req *cqr;
+ struct ccw1 *ccw;
+ int rc;
+
+ /* not available for HYPER PAV alias devices */
+ if (!device->block && private->lcu->pav == HYPER_PAV)
+ return -EOPNOTSUPP;
+
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ sizeof(struct dasd_psf_prssd_data) + 1,
+ device);
+ if (IS_ERR(cqr)) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate read message buffer request");
+ return PTR_ERR(cqr);
+ }
+ host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
+ if (!host_access) {
+ dasd_sfree_request(cqr, device);
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate host_access buffer");
+ return -ENOMEM;
+ }
+ cqr->startdev = device;
+ cqr->memdev = device;
+ cqr->block = NULL;
+ cqr->retries = 256;
+ cqr->expires = 10 * HZ;
+
+ /* Prepare for Read Subsystem Data */
+ prssdp = (struct dasd_psf_prssd_data *) cqr->data;
+ memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
+ prssdp->order = PSF_ORDER_PRSSD;
+ prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
+ /* LSS and Volume that will be queried */
+ prssdp->lss = private->ned->ID;
+ prssdp->volume = private->ned->unit_addr;
+ /* all other bytes of prssdp must be zero */
+
+ ccw = cqr->cpaddr;
+ ccw->cmd_code = DASD_ECKD_CCW_PSF;
+ ccw->count = sizeof(struct dasd_psf_prssd_data);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t) prssdp;
+
+ /* Read Subsystem Data - query host access */
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_RSSD;
+ ccw->count = sizeof(struct dasd_psf_query_host_access);
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->cda = (__u32)(addr_t) host_access;
+
+ cqr->buildclk = get_tod_clock();
+ cqr->status = DASD_CQR_FILLED;
+ rc = dasd_sleep_on(cqr);
+ if (rc == 0) {
+ *data = *host_access;
+ } else {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Reading host access data failed with rc=%d\n",
+ rc);
+ rc = -EOPNOTSUPP;
+ }
+
+ dasd_sfree_request(cqr, cqr->memdev);
+ kfree(host_access);
+ return rc;
+}
+/*
+ * return number of grouped devices
+ */
+static int dasd_eckd_host_access_count(struct dasd_device *device)
+{
+ struct dasd_psf_query_host_access *access;
+ struct dasd_ckd_path_group_entry *entry;
+ struct dasd_ckd_host_information *info;
+ int count = 0;
+ int rc, i;
+
+ access = kzalloc(sizeof(*access), GFP_NOIO);
+ if (!access) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate access buffer");
+ return -ENOMEM;
+ }
+ rc = dasd_eckd_query_host_access(device, access);
+ if (rc) {
+ kfree(access);
+ return rc;
+ }
+
+ info = (struct dasd_ckd_host_information *)
+ access->host_access_information;
+ for (i = 0; i < info->entry_count; i++) {
+ entry = (struct dasd_ckd_path_group_entry *)
+ (info->entry + i * info->entry_size);
+ if (entry->status_flags & DASD_ECKD_PG_GROUPED)
+ count++;
+ }
+
+ kfree(access);
+ return count;
+}
+
+/*
+ * write host access information to a sequential file
+ */
+static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
+{
+ struct dasd_psf_query_host_access *access;
+ struct dasd_ckd_path_group_entry *entry;
+ struct dasd_ckd_host_information *info;
+ char sysplex[9] = "";
+ int rc, i, j;
+
+ access = kzalloc(sizeof(*access), GFP_NOIO);
+ if (!access) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate access buffer");
+ return -ENOMEM;
+ }
+ rc = dasd_eckd_query_host_access(device, access);
+ if (rc) {
+ kfree(access);
+ return rc;
+ }
+
+ info = (struct dasd_ckd_host_information *)
+ access->host_access_information;
+ for (i = 0; i < info->entry_count; i++) {
+ entry = (struct dasd_ckd_path_group_entry *)
+ (info->entry + i * info->entry_size);
+ /* PGID */
+ seq_puts(m, "pgid ");
+ for (j = 0; j < 11; j++)
+ seq_printf(m, "%02x", entry->pgid[j]);
+ seq_putc(m, '\n');
+ /* FLAGS */
+ seq_printf(m, "status_flags %02x\n", entry->status_flags);
+ /* SYSPLEX NAME */
+ memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
+ EBCASC(sysplex, sizeof(sysplex));
+ seq_printf(m, "sysplex_name %8s\n", sysplex);
+ /* SUPPORTED CYLINDER */
+ seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
+ /* TIMESTAMP */
+ seq_printf(m, "timestamp %lu\n", (unsigned long)
+ entry->timestamp);
+ }
+ kfree(access);
+
+ return 0;
+}
+
/*
* Perform Subsystem Function - CUIR response
*/
@@ -5084,6 +5748,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.term_IO = dasd_term_IO,
.handle_terminated_request = dasd_eckd_handle_terminated_request,
.format_device = dasd_eckd_format_device,
+ .check_device_format = dasd_eckd_check_device_format,
.erp_action = dasd_eckd_erp_action,
.erp_postaction = dasd_eckd_erp_postaction,
.check_for_device_change = dasd_eckd_check_for_device_change,
@@ -5099,6 +5764,8 @@ static struct dasd_discipline dasd_eckd_discipline = {
.get_uid = dasd_eckd_get_uid,
.kick_validate = dasd_eckd_kick_validate_server,
.check_attention = dasd_eckd_check_attention,
+ .host_access_count = dasd_eckd_host_access_count,
+ .hosts_print = dasd_hosts_print,
};
static int __init
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 6d9a6d351..59803626e 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -35,6 +35,7 @@
#define DASD_ECKD_CCW_READ_MT 0x86
#define DASD_ECKD_CCW_WRITE_KD_MT 0x8d
#define DASD_ECKD_CCW_READ_KD_MT 0x8e
+#define DASD_ECKD_CCW_READ_COUNT_MT 0x92
#define DASD_ECKD_CCW_RELEASE 0x94
#define DASD_ECKD_CCW_WRITE_FULL_TRACK 0x95
#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
@@ -53,6 +54,7 @@
*/
#define PSF_ORDER_PRSSD 0x18
#define PSF_ORDER_CUIR_RESPONSE 0x1A
+#define PSF_SUBORDER_QHA 0x1C
#define PSF_ORDER_SSC 0x1D
/*
@@ -81,6 +83,8 @@
#define ATTENTION_LENGTH_CUIR 0x0e
#define ATTENTION_FORMAT_CUIR 0x01
+#define DASD_ECKD_PG_GROUPED 0x10
+
/*
* Size that is reportet for large volumes in the old 16-bit no_cyl field
*/
@@ -403,13 +407,41 @@ struct dasd_psf_cuir_response {
__u8 ssid;
} __packed;
+struct dasd_ckd_path_group_entry {
+ __u8 status_flags;
+ __u8 pgid[11];
+ __u8 sysplex_name[8];
+ __u32 timestamp;
+ __u32 cylinder;
+ __u8 reserved[4];
+} __packed;
+
+struct dasd_ckd_host_information {
+ __u8 access_flags;
+ __u8 entry_size;
+ __u16 entry_count;
+ __u8 entry[16390];
+} __packed;
+
+struct dasd_psf_query_host_access {
+ __u8 access_flag;
+ __u8 version;
+ __u16 CKD_length;
+ __u16 SCSI_length;
+ __u8 unused[10];
+ __u8 host_access_information[16394];
+} __packed;
+
/*
* Perform Subsystem Function - Prepare for Read Subsystem Data
*/
struct dasd_psf_prssd_data {
unsigned char order;
unsigned char flags;
- unsigned char reserved[4];
+ unsigned char reserved1;
+ unsigned char reserved2;
+ unsigned char lss;
+ unsigned char volume;
unsigned char suborder;
unsigned char varies[5];
} __attribute__ ((packed));
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 0f0add932..ac7027e6d 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -236,6 +236,13 @@ struct dasd_ccw_req {
* stolen. Should not be combined with
* DASD_CQR_FLAGS_USE_ERP
*/
+/*
+ * The following flags are used to suppress output of certain errors.
+ * These flags should only be used for format checks!
+ */
+#define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */
+#define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/
+#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
/* Signature for error recovery functions. */
typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *);
@@ -318,7 +325,8 @@ struct dasd_discipline {
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
* term_IO cancels it (e.g. in case of a timeout). format_device
- * returns a ccw chain to be used to format the device.
+ * formats the device and check_device_format compares the format of
+ * a device with the expected format_data.
* handle_terminated_request allows to examine a cqr and prepare
* it for retry.
*/
@@ -329,7 +337,9 @@ struct dasd_discipline {
int (*term_IO) (struct dasd_ccw_req *);
void (*handle_terminated_request) (struct dasd_ccw_req *);
int (*format_device) (struct dasd_device *,
- struct format_data_t *, int enable_pav);
+ struct format_data_t *, int);
+ int (*check_device_format)(struct dasd_device *,
+ struct format_check_t *, int);
int (*free_cp) (struct dasd_ccw_req *, struct request *);
/*
@@ -365,6 +375,8 @@ struct dasd_discipline {
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
void (*kick_validate) (struct dasd_device *);
int (*check_attention)(struct dasd_device *, __u8);
+ int (*host_access_count)(struct dasd_device *);
+ int (*hosts_print)(struct dasd_device *, struct seq_file *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -487,6 +499,7 @@ struct dasd_device {
unsigned long blk_timeout;
struct dentry *debugfs_dentry;
+ struct dentry *hosts_dentry;
struct dasd_profile profile;
};
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 90f30cc31..9dfbd972f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -238,6 +238,23 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
return rc;
}
+static int dasd_check_format(struct dasd_block *block,
+ struct format_check_t *cdata)
+{
+ struct dasd_device *base;
+ int rc;
+
+ base = block->base;
+ if (!base->discipline->check_device_format)
+ return -ENOTTY;
+
+ rc = base->discipline->check_device_format(base, cdata, 1);
+ if (rc == -EAGAIN)
+ rc = base->discipline->check_device_format(base, cdata, 0);
+
+ return rc;
+}
+
/*
* Format device.
*/
@@ -272,6 +289,47 @@ dasd_ioctl_format(struct block_device *bdev, void __user *argp)
}
rc = dasd_format(base->block, &fdata);
dasd_put_device(base);
+
+ return rc;
+}
+
+/*
+ * Check device format
+ */
+static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
+{
+ struct format_check_t cdata;
+ struct dasd_device *base;
+ int rc = 0;
+
+ if (!argp)
+ return -EINVAL;
+
+ base = dasd_device_from_gendisk(bdev->bd_disk);
+ if (!base)
+ return -ENODEV;
+ if (bdev != bdev->bd_contains) {
+ pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
+ dev_name(&base->cdev->dev));
+ rc = -EINVAL;
+ goto out_err;
+ }
+
+ if (copy_from_user(&cdata, argp, sizeof(cdata))) {
+ rc = -EFAULT;
+ goto out_err;
+ }
+
+ rc = dasd_check_format(base->block, &cdata);
+ if (rc)
+ goto out_err;
+
+ if (copy_to_user(argp, &cdata, sizeof(cdata)))
+ rc = -EFAULT;
+
+out_err:
+ dasd_put_device(base);
+
return rc;
}
@@ -519,6 +577,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDFMT:
rc = dasd_ioctl_format(bdev, argp);
break;
+ case BIODASDCHECKFMT:
+ rc = dasd_ioctl_check_format(bdev, argp);
+ break;
case BIODASDINFO:
rc = dasd_ioctl_information(block, cmd, argp);
break;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b83908670..bed53c46d 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q,
struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
- void __pmem **kaddr, pfn_t *pfn);
+ void __pmem **kaddr, pfn_t *pfn, long size);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -884,7 +884,7 @@ fail:
static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct dcssblk_dev_info *dev_info;
unsigned long offset, dev_sz;
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index dd2f7c832..41e28b23b 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -18,6 +18,8 @@ obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o
obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
+obj-$(CONFIG_PCI) += sclp_pci.o
+
obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
obj-$(CONFIG_VMCP) += vmcp.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index e7e078b3c..931d10e86 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -289,7 +289,7 @@ static void raw3215_timeout(unsigned long __data)
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_TIMER_RUNS;
- if (!(raw->port.flags & ASYNC_SUSPENDED)) {
+ if (!tty_port_suspended(&raw->port)) {
raw3215_mk_write_req(raw);
raw3215_start_io(raw);
if ((raw->queued_read || raw->queued_write) &&
@@ -311,8 +311,7 @@ static void raw3215_timeout(unsigned long __data)
*/
static inline void raw3215_try_io(struct raw3215_info *raw)
{
- if (!(raw->port.flags & ASYNC_INITIALIZED) ||
- (raw->port.flags & ASYNC_SUSPENDED))
+ if (!tty_port_initialized(&raw->port) || tty_port_suspended(&raw->port))
return;
if (raw->queued_read != NULL)
raw3215_start_io(raw);
@@ -494,7 +493,7 @@ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length)
/* While console is frozen for suspend we have no other
* choice but to drop message from the buffer to make
* room for even more messages. */
- if (raw->port.flags & ASYNC_SUSPENDED) {
+ if (tty_port_suspended(&raw->port)) {
raw3215_drop_line(raw);
continue;
}
@@ -616,10 +615,10 @@ static int raw3215_startup(struct raw3215_info *raw)
{
unsigned long flags;
- if (raw->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&raw->port))
return 0;
raw->line_pos = 0;
- raw->port.flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(&raw->port, 1);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_try_io(raw);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -635,8 +634,7 @@ static void raw3215_shutdown(struct raw3215_info *raw)
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
- if (!(raw->port.flags & ASYNC_INITIALIZED) ||
- (raw->flags & RAW3215_FIXED))
+ if (!tty_port_initialized(&raw->port) || (raw->flags & RAW3215_FIXED))
return;
/* Wait for outstanding requests, then free irq */
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@@ -650,7 +648,7 @@ static void raw3215_shutdown(struct raw3215_info *raw)
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
remove_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_RUNNING);
- raw->port.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&raw->port, 1);
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
@@ -773,7 +771,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev)
raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
- raw->port.flags |= ASYNC_SUSPENDED;
+ tty_port_set_suspended(&raw->port, 1);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
return 0;
}
@@ -786,7 +784,7 @@ static int raw3215_pm_start(struct ccw_device *cdev)
/* Allow I/O again and flush output buffer. */
raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
- raw->port.flags &= ~ASYNC_SUSPENDED;
+ tty_port_set_suspended(&raw->port, 0);
raw->flags |= RAW3215_FLUSHING;
raw3215_try_io(raw);
raw->flags &= ~RAW3215_FLUSHING;
@@ -859,7 +857,7 @@ static void con3215_flush(void)
unsigned long flags;
raw = raw3215[0]; /* console 3215 is the first one */
- if (raw->port.flags & ASYNC_SUSPENDED)
+ if (tty_port_suspended(&raw->port))
/* The console is still frozen for suspend. */
if (ccw_device_force_console(raw->cdev))
/* Forcing didn't work, no panic message .. */
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 4d7a9badf..6b1577c73 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -400,7 +400,7 @@ con3270_deactivate(struct raw3270_view *view)
del_timer(&cp->timer);
}
-static int
+static void
con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
@@ -418,7 +418,6 @@ con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
cp->update_flags = CON_UPDATE_ALL;
con3270_set_timer(cp, 1);
}
- return RAW3270_IO_DONE;
}
/* Console view to a 3270 device. */
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 71e974738..85eca1cef 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -217,7 +217,7 @@ fs3270_deactivate(struct raw3270_view *view)
fp->init->callback(fp->init, NULL);
}
-static int
+static void
fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
@@ -233,7 +233,6 @@ fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
}
- return RAW3270_IO_DONE;
}
/*
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 220acb4cb..a2da898ce 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -90,6 +90,8 @@ module_param(tubxcorrect, bool, 0);
*/
DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
+static void __raw3270_disconnect(struct raw3270 *rp);
+
/*
* Encode array for 12 bit 3270 addresses.
*/
@@ -229,29 +231,6 @@ raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
}
/*
- * Stop running ccw.
- */
-static int
-__raw3270_halt_io(struct raw3270 *rp, struct raw3270_request *rq)
-{
- int retries;
- int rc;
-
- if (raw3270_request_final(rq))
- return 0;
- /* Check if interrupt has already been processed */
- for (retries = 0; retries < 5; retries++) {
- if (retries < 2)
- rc = ccw_device_halt(rp->cdev, (long) rq);
- else
- rc = ccw_device_clear(rp->cdev, (long) rq);
- if (rc == 0)
- break; /* termination successful */
- }
- return rc;
-}
-
-/*
* Add the request to the request queue, try to start it if the
* 3270 device is idle. Return without waiting for end of i/o.
*/
@@ -342,7 +321,6 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct raw3270 *rp;
struct raw3270_view *view;
struct raw3270_request *rq;
- int rc;
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
@@ -350,57 +328,31 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
rq = (struct raw3270_request *) intparm;
view = rq ? rq->view : rp->view;
- if (IS_ERR(irb))
- rc = RAW3270_IO_RETRY;
- else if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
- rq->rc = -EIO;
- rc = RAW3270_IO_DONE;
- } else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END |
- DEV_STAT_UNIT_EXCEP)) {
+ if (!IS_ERR(irb)) {
/* Handle CE-DE-UE and subsequent UDE */
- set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
- rc = RAW3270_IO_BUSY;
- } else if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
- /* Wait for UDE if busy flag is set. */
- if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+ if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END)
clear_bit(RAW3270_FLAGS_BUSY, &rp->flags);
- /* Got it, now retry. */
- rc = RAW3270_IO_RETRY;
- } else
- rc = RAW3270_IO_BUSY;
- } else if (view)
- rc = view->fn->intv(view, rq, irb);
- else
- rc = RAW3270_IO_DONE;
+ if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END |
+ DEV_STAT_DEV_END |
+ DEV_STAT_UNIT_EXCEP))
+ set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+ /* Handle disconnected devices */
+ if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ (irb->ecw[0] & SNS0_INTERVENTION_REQ)) {
+ set_bit(RAW3270_FLAGS_BUSY, &rp->flags);
+ if (rp->state > RAW3270_STATE_RESET)
+ __raw3270_disconnect(rp);
+ }
+ /* Call interrupt handler of the view */
+ if (view)
+ view->fn->intv(view, rq, irb);
+ }
- switch (rc) {
- case RAW3270_IO_DONE:
- break;
- case RAW3270_IO_BUSY:
- /*
- * Intervention required by the operator. We have to wait
- * for unsolicited device end.
- */
+ if (test_bit(RAW3270_FLAGS_BUSY, &rp->flags))
+ /* Device busy, do not start I/O */
return;
- case RAW3270_IO_RETRY:
- if (!rq)
- break;
- rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
- (unsigned long) rq, 0, 0);
- if (rq->rc == 0)
- return; /* Successfully restarted. */
- break;
- case RAW3270_IO_STOP:
- if (!rq)
- break;
- __raw3270_halt_io(rp, rq);
- rq->rc = -EIO;
- break;
- default:
- BUG();
- }
- if (rq) {
- BUG_ON(list_empty(&rq->list));
+
+ if (rq && !list_empty(&rq->list)) {
/* The request completed, remove from queue and do callback. */
list_del_init(&rq->list);
if (rq->callback)
@@ -408,6 +360,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
/* Do put_device for get_device in raw3270_start. */
raw3270_put_view(view);
}
+
/*
* Try to start each request on request queue until one is
* started successful.
@@ -685,23 +638,34 @@ raw3270_reset(struct raw3270_view *view)
return rc;
}
-static int
+static void
+__raw3270_disconnect(struct raw3270 *rp)
+{
+ struct raw3270_request *rq;
+ struct raw3270_view *view;
+
+ rp->state = RAW3270_STATE_INIT;
+ rp->view = &rp->init_view;
+ /* Cancel all queued requests */
+ while (!list_empty(&rp->req_queue)) {
+ rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+ view = rq->view;
+ rq->rc = -EACCES;
+ list_del_init(&rq->list);
+ if (rq->callback)
+ rq->callback(rq, rq->callback_data);
+ raw3270_put_view(view);
+ }
+ /* Start from scratch */
+ __raw3270_reset_device(rp);
+}
+
+static void
raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
struct irb *irb)
{
struct raw3270 *rp;
- /*
- * Unit-Check Processing:
- * Expect Command Reject or Intervention Required.
- */
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- /* Request finished abnormally. */
- if (irb->ecw[0] & SNS0_INTERVENTION_REQ) {
- set_bit(RAW3270_FLAGS_BUSY, &view->dev->flags);
- return RAW3270_IO_BUSY;
- }
- }
if (rq) {
if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
if (irb->ecw[0] & SNS0_CMD_REJECT)
@@ -715,7 +679,6 @@ raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
rp = view->dev;
raw3270_read_modified(rp);
}
- return RAW3270_IO_DONE;
}
static struct raw3270_fn raw3270_init_fn = {
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index e1e41c286..56519cbb1 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -125,19 +125,13 @@ raw3270_request_final(struct raw3270_request *rq)
void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
-/* Return value of *intv (see raw3270_fn below) can be one of the following: */
-#define RAW3270_IO_DONE 0 /* request finished */
-#define RAW3270_IO_BUSY 1 /* request still active */
-#define RAW3270_IO_RETRY 2 /* retry current request */
-#define RAW3270_IO_STOP 3 /* kill current request */
-
/*
* Functions of a 3270 view.
*/
struct raw3270_fn {
int (*activate)(struct raw3270_view *);
void (*deactivate)(struct raw3270_view *);
- int (*intv)(struct raw3270_view *,
+ void (*intv)(struct raw3270_view *,
struct raw3270_request *, struct irb *);
void (*release)(struct raw3270_view *);
void (*free)(struct raw3270_view *);
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 026e38990..7a10c5633 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -17,33 +17,35 @@
#define MAX_KMEM_PAGES (sizeof(unsigned long) << 3)
#define SCLP_CONSOLE_PAGES 6
+#define SCLP_EVTYP_MASK(T) (1U << (32 - (T)))
+
#define EVTYP_OPCMD 0x01
#define EVTYP_MSG 0x02
+#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_DIAG_TEST 0x07
#define EVTYP_STATECHANGE 0x08
#define EVTYP_PMSGCMD 0x09
-#define EVTYP_CNTLPROGOPCMD 0x20
-#define EVTYP_CNTLPROGIDENT 0x0B
-#define EVTYP_SIGQUIESCE 0x1D
+#define EVTYP_ASYNC 0x0A
+#define EVTYP_CTLPROGIDENT 0x0B
+#define EVTYP_ERRNOTIFY 0x18
#define EVTYP_VT220MSG 0x1A
-#define EVTYP_CONFMGMDATA 0x04
#define EVTYP_SDIAS 0x1C
-#define EVTYP_ASYNC 0x0A
+#define EVTYP_SIGQUIESCE 0x1D
#define EVTYP_OCF 0x1E
-#define EVTYP_OPCMD_MASK 0x80000000
-#define EVTYP_MSG_MASK 0x40000000
-#define EVTYP_DIAG_TEST_MASK 0x02000000
-#define EVTYP_STATECHANGE_MASK 0x01000000
-#define EVTYP_PMSGCMD_MASK 0x00800000
-#define EVTYP_CTLPROGOPCMD_MASK 0x00000001
-#define EVTYP_CTLPROGIDENT_MASK 0x00200000
-#define EVTYP_SIGQUIESCE_MASK 0x00000008
-#define EVTYP_VT220MSG_MASK 0x00000040
-#define EVTYP_CONFMGMDATA_MASK 0x10000000
-#define EVTYP_SDIAS_MASK 0x00000010
-#define EVTYP_ASYNC_MASK 0x00400000
-#define EVTYP_OCF_MASK 0x00000004
+#define EVTYP_OPCMD_MASK SCLP_EVTYP_MASK(EVTYP_OPCMD)
+#define EVTYP_MSG_MASK SCLP_EVTYP_MASK(EVTYP_MSG)
+#define EVTYP_CONFMGMDATA_MASK SCLP_EVTYP_MASK(EVTYP_CONFMGMDATA)
+#define EVTYP_DIAG_TEST_MASK SCLP_EVTYP_MASK(EVTYP_DIAG_TEST)
+#define EVTYP_STATECHANGE_MASK SCLP_EVTYP_MASK(EVTYP_STATECHANGE)
+#define EVTYP_PMSGCMD_MASK SCLP_EVTYP_MASK(EVTYP_PMSGCMD)
+#define EVTYP_ASYNC_MASK SCLP_EVTYP_MASK(EVTYP_ASYNC)
+#define EVTYP_CTLPROGIDENT_MASK SCLP_EVTYP_MASK(EVTYP_CTLPROGIDENT)
+#define EVTYP_ERRNOTIFY_MASK SCLP_EVTYP_MASK(EVTYP_ERRNOTIFY)
+#define EVTYP_VT220MSG_MASK SCLP_EVTYP_MASK(EVTYP_VT220MSG)
+#define EVTYP_SDIAS_MASK SCLP_EVTYP_MASK(EVTYP_SDIAS)
+#define EVTYP_SIGQUIESCE_MASK SCLP_EVTYP_MASK(EVTYP_SIGQUIESCE)
+#define EVTYP_OCF_MASK SCLP_EVTYP_MASK(EVTYP_OCF)
#define GNRLMSGFLGS_DOM 0x8000
#define GNRLMSGFLGS_SNDALRM 0x4000
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index d3947ea3e..e3fc75391 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -576,67 +576,6 @@ __initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
/*
- * PCI I/O adapter configuration related functions.
- */
-#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
-#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
-
-#define SCLP_RECONFIG_PCI_ATPYE 2
-
-struct pci_cfg_sccb {
- struct sccb_header header;
- u8 atype; /* adapter type */
- u8 reserved1;
- u16 reserved2;
- u32 aid; /* adapter identifier */
-} __packed;
-
-static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
-{
- struct pci_cfg_sccb *sccb;
- int rc;
-
- if (!SCLP_HAS_PCI_RECONFIG)
- return -EOPNOTSUPP;
-
- sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- return -ENOMEM;
-
- sccb->header.length = PAGE_SIZE;
- sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
- sccb->aid = fid;
- rc = sclp_sync_request(cmd, sccb);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0020:
- case 0x0120:
- break;
- default:
- pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
- cmd, sccb->header.response_code);
- rc = -EIO;
- break;
- }
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-
-int sclp_pci_configure(u32 fid)
-{
- return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
-}
-EXPORT_SYMBOL(sclp_pci_configure);
-
-int sclp_pci_deconfigure(u32 fid)
-{
- return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
-}
-EXPORT_SYMBOL(sclp_pci_deconfigure);
-
-/*
* Channel path configuration related functions.
*/
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index f344e5bd2..90d92fbe7 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -93,7 +93,7 @@ static struct sclp_req *cpi_prepare_req(void)
/* setup SCCB for Control-Program Identification */
sccb->header.length = sizeof(struct cpi_sccb);
sccb->cpi_evbuf.header.length = sizeof(struct cpi_evbuf);
- sccb->cpi_evbuf.header.type = 0x0b;
+ sccb->cpi_evbuf.header.type = EVTYP_CTLPROGIDENT;
evb = &sccb->cpi_evbuf;
/* set system type */
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index 6804354c4..0ac520dd1 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -49,7 +49,9 @@ struct read_info_sccb {
u8 _pad_117[119 - 117]; /* 117-118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */
- u8 _pad_122[4096 - 122]; /* 122-4095 */
+ u8 _pad_122[124 - 122]; /* 122-123 */
+ u32 hmfai; /* 124-127 */
+ u8 _pad_128[4096 - 128]; /* 128-4095 */
} __packed __aligned(PAGE_SIZE);
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
@@ -155,6 +157,8 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
sclp.mtid = (sccb->fac42 & 0x80) ? (sccb->fac42 & 31) : 0;
sclp.mtid_cp = (sccb->fac42 & 0x80) ? (sccb->fac43 & 31) : 0;
sclp.mtid_prev = (sccb->fac42 & 0x80) ? (sccb->fac66 & 31) : 0;
+
+ sclp.hmfai = sccb->hmfai;
}
/*
diff --git a/drivers/s390/char/sclp_pci.c b/drivers/s390/char/sclp_pci.c
new file mode 100644
index 000000000..4dbb3dfd4
--- /dev/null
+++ b/drivers/s390/char/sclp_pci.c
@@ -0,0 +1,193 @@
+/*
+ * PCI I/O adapter configuration related functions.
+ *
+ * Copyright IBM Corp. 2016
+ */
+#define KMSG_COMPONENT "sclp_cmd"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
+#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
+
+#define SCLP_ATYPE_PCI 2
+
+#define SCLP_ERRNOTIFY_AQ_REPAIR 1
+#define SCLP_ERRNOTIFY_AQ_INFO_LOG 2
+
+static DEFINE_MUTEX(sclp_pci_mutex);
+static struct sclp_register sclp_pci_event = {
+ .send_mask = EVTYP_ERRNOTIFY_MASK,
+};
+
+struct err_notify_evbuf {
+ struct evbuf_header header;
+ u8 action;
+ u8 atype;
+ u32 fh;
+ u32 fid;
+ u8 data[0];
+} __packed;
+
+struct err_notify_sccb {
+ struct sccb_header header;
+ struct err_notify_evbuf evbuf;
+} __packed;
+
+struct pci_cfg_sccb {
+ struct sccb_header header;
+ u8 atype; /* adapter type */
+ u8 reserved1;
+ u16 reserved2;
+ u32 aid; /* adapter identifier */
+} __packed;
+
+static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
+{
+ struct pci_cfg_sccb *sccb;
+ int rc;
+
+ if (!SCLP_HAS_PCI_RECONFIG)
+ return -EOPNOTSUPP;
+
+ sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+
+ sccb->header.length = PAGE_SIZE;
+ sccb->atype = SCLP_ATYPE_PCI;
+ sccb->aid = fid;
+ rc = sclp_sync_request(cmd, sccb);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ case 0x0120:
+ break;
+ default:
+ pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
+ cmd, sccb->header.response_code);
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long) sccb);
+ return rc;
+}
+
+int sclp_pci_configure(u32 fid)
+{
+ return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_configure);
+
+int sclp_pci_deconfigure(u32 fid)
+{
+ return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
+}
+EXPORT_SYMBOL(sclp_pci_deconfigure);
+
+static void sclp_pci_callback(struct sclp_req *req, void *data)
+{
+ struct completion *completion = data;
+
+ complete(completion);
+}
+
+static int sclp_pci_check_report(struct zpci_report_error_header *report)
+{
+ if (report->version != 1)
+ return -EINVAL;
+
+ if (report->action != SCLP_ERRNOTIFY_AQ_REPAIR &&
+ report->action != SCLP_ERRNOTIFY_AQ_INFO_LOG)
+ return -EINVAL;
+
+ if (report->length > (PAGE_SIZE - sizeof(struct err_notify_sccb)))
+ return -EINVAL;
+
+ return 0;
+}
+
+int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct err_notify_sccb *sccb;
+ struct sclp_req req;
+ int ret;
+
+ ret = sclp_pci_check_report(report);
+ if (ret)
+ return ret;
+
+ mutex_lock(&sclp_pci_mutex);
+ ret = sclp_register(&sclp_pci_event);
+ if (ret)
+ goto out_unlock;
+
+ if (!(sclp_pci_event.sclp_receive_mask & EVTYP_ERRNOTIFY_MASK)) {
+ ret = -EOPNOTSUPP;
+ goto out_unregister;
+ }
+
+ sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb) {
+ ret = -ENOMEM;
+ goto out_unregister;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.callback_data = &completion;
+ req.callback = sclp_pci_callback;
+ req.command = SCLP_CMDW_WRITE_EVENT_DATA;
+ req.status = SCLP_REQ_FILLED;
+ req.sccb = sccb;
+
+ sccb->evbuf.header.length = sizeof(sccb->evbuf) + report->length;
+ sccb->evbuf.header.type = EVTYP_ERRNOTIFY;
+ sccb->header.length = sizeof(sccb->header) + sccb->evbuf.header.length;
+
+ sccb->evbuf.action = report->action;
+ sccb->evbuf.atype = SCLP_ATYPE_PCI;
+ sccb->evbuf.fh = fh;
+ sccb->evbuf.fid = fid;
+
+ memcpy(sccb->evbuf.data, report->data, report->length);
+
+ ret = sclp_add_request(&req);
+ if (ret)
+ goto out_free_req;
+
+ wait_for_completion(&completion);
+ if (req.status != SCLP_REQ_DONE) {
+ pr_warn("request failed (status=0x%02x)\n",
+ req.status);
+ ret = -EIO;
+ goto out_free_req;
+ }
+
+ if (sccb->header.response_code != 0x0020) {
+ pr_warn("request failed with response code 0x%x\n",
+ sccb->header.response_code);
+ ret = -EIO;
+ }
+
+out_free_req:
+ free_page((unsigned long) sccb);
+out_unregister:
+ sclp_unregister(&sclp_pci_event);
+out_unlock:
+ mutex_unlock(&sclp_pci_mutex);
+ return ret;
+}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index e96fc7fd9..272cb6cd1 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -92,6 +92,7 @@ struct tty3270 {
unsigned char inattr; /* Visible/invisible input. */
int throttle, attn; /* tty throttle/unthrottle. */
struct tasklet_struct readlet; /* Tasklet to issue read request. */
+ struct tasklet_struct hanglet; /* Tasklet to hang up the tty. */
struct kbd_data *kbd; /* key_maps stuff. */
/* Escape sequence parsing. */
@@ -319,6 +320,27 @@ tty3270_blank_line(struct tty3270 *tp)
}
/*
+ * Create a blank screen and remove all lines from the history.
+ */
+static void
+tty3270_blank_screen(struct tty3270 *tp)
+{
+ struct string *s, *n;
+ int i;
+
+ for (i = 0; i < tp->view.rows - 2; i++)
+ tp->screen[i].len = 0;
+ tp->nr_up = 0;
+ list_for_each_entry_safe(s, n, &tp->lines, list) {
+ list_del(&s->list);
+ if (!list_empty(&s->update))
+ list_del(&s->update);
+ tp->nr_lines--;
+ free_string(&tp->freemem, s);
+ }
+}
+
+/*
* Write request completion callback.
*/
static void
@@ -405,7 +427,10 @@ tty3270_update(struct tty3270 *tp)
if (raw3270_request_add_data(wrq, str, len) != 0)
break;
list_del_init(&s->update);
- sba = s->string + s->len - 3;
+ if (s->string[s->len - 4] == TO_RA)
+ sba = s->string + s->len - 3;
+ else
+ sba = invalid_sba;
}
if (list_empty(&tp->update))
updated |= TTY_UPDATE_LIST;
@@ -622,6 +647,16 @@ tty3270_issue_read(struct tty3270 *tp, int lock)
}
/*
+ * Hang up the tty
+ */
+static void
+tty3270_hangup_tasklet(struct tty3270 *tp)
+{
+ tty_port_tty_hangup(&tp->port, true);
+ raw3270_put_view(&tp->view);
+}
+
+/*
* Switch to the tty view.
*/
static int
@@ -642,7 +677,7 @@ tty3270_deactivate(struct raw3270_view *view)
del_timer(&tp->timer);
}
-static int
+static void
tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
@@ -654,17 +689,19 @@ tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
}
if (rq) {
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
rq->rc = -EIO;
- else
+ raw3270_get_view(&tp->view);
+ tasklet_schedule(&tp->hanglet);
+ } else {
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
+ }
} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
/* Interrupt without an outstanding request -> update all */
tp->update_flags = TTY_UPDATE_ALL;
tty3270_set_timer(tp, 1);
}
- return RAW3270_IO_DONE;
}
/*
@@ -716,6 +753,9 @@ tty3270_alloc_view(void)
tasklet_init(&tp->readlet,
(void (*)(unsigned long)) tty3270_read_tasklet,
(unsigned long) tp->read);
+ tasklet_init(&tp->hanglet,
+ (void (*)(unsigned long)) tty3270_hangup_tasklet,
+ (unsigned long) tp);
INIT_WORK(&tp->resize_work, tty3270_resize_work);
return tp;
@@ -814,6 +854,7 @@ static void tty3270_resize_work(struct work_struct *work)
return;
/* Switch to new output size */
spin_lock_bh(&tp->view.lock);
+ tty3270_blank_screen(tp);
oscreen = tp->screen;
orows = tp->view.rows;
tp->view.model = tp->n_model;
@@ -824,7 +865,6 @@ static void tty3270_resize_work(struct work_struct *work)
free_string(&tp->freemem, tp->status);
tty3270_create_prompt(tp);
tty3270_create_status(tp);
- tp->nr_up = 0;
while (tp->nr_lines < tp->view.rows - 2)
tty3270_blank_line(tp);
tp->update_flags = TTY_UPDATE_ALL;
@@ -838,6 +878,7 @@ static void tty3270_resize_work(struct work_struct *work)
ws.ws_row = tp->view.rows - 2;
ws.ws_col = tp->view.cols;
tty_do_resize(tty, &ws);
+ tty_kref_put(tty);
}
static void
@@ -845,6 +886,8 @@ tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
{
struct tty3270 *tp = container_of(view, struct tty3270, view);
+ if (tp->n_model == model && tp->n_rows == rows && tp->n_cols == cols)
+ return;
tp->n_model = model;
tp->n_rows = rows;
tp->n_cols = cols;
@@ -923,10 +966,8 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
tp->port.low_latency = 0;
- /* why to reassign? */
- tty_port_tty_set(&tp->port, tty);
tp->inattr = TF_INPUT;
- return tty_port_install(&tp->port, driver, tty);
+ goto port_install;
}
if (tty3270_max_index < tty->index + 1)
tty3270_max_index = tty->index + 1;
@@ -952,7 +993,6 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc;
}
- tty_port_tty_set(&tp->port, tty);
tp->port.low_latency = 0;
tty->winsize.ws_row = tp->view.rows - 2;
tty->winsize.ws_col = tp->view.cols;
@@ -974,6 +1014,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
raw3270_activate_view(&tp->view);
+port_install:
rc = tty_port_install(&tp->port, driver, tty);
if (rc) {
raw3270_put_view(&tp->view);
@@ -1010,18 +1051,18 @@ tty3270_close(struct tty_struct *tty, struct file * filp)
if (tty->count > 1)
return;
- if (tp) {
- tty->driver_data = NULL;
+ if (tp)
tty_port_tty_set(&tp->port, NULL);
- }
}
static void tty3270_cleanup(struct tty_struct *tty)
{
struct tty3270 *tp = tty->driver_data;
- if (tp)
+ if (tp) {
+ tty->driver_data = NULL;
raw3270_put_view(&tp->view);
+ }
}
/*
@@ -1788,7 +1829,22 @@ tty3270_unthrottle(struct tty_struct * tty)
static void
tty3270_hangup(struct tty_struct *tty)
{
- // FIXME: implement
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ spin_lock_bh(&tp->view.lock);
+ tp->cx = tp->saved_cx = 0;
+ tp->cy = tp->saved_cy = 0;
+ tp->highlight = tp->saved_highlight = TAX_RESET;
+ tp->f_color = tp->saved_f_color = TAC_RESET;
+ tty3270_blank_screen(tp);
+ while (tp->nr_lines < tp->view.rows - 2)
+ tty3270_blank_line(tp);
+ tp->update_flags = TTY_UPDATE_ALL;
+ spin_unlock_bh(&tp->view.lock);
+ tty3270_set_timer(tp, 1);
}
static void
@@ -1804,7 +1860,7 @@ static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
tp = tty->driver_data;
if (!tp)
return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
return kbd_ioctl(tp->kbd, cmd, arg);
}
@@ -1818,7 +1874,7 @@ static long tty3270_compat_ioctl(struct tty_struct *tty,
tp = tty->driver_data;
if (!tp)
return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 24ec282e1..327255da1 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -787,7 +787,7 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_device *ap_dev)
/*
* AP state machine jump table
*/
-ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
+static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
[AP_STATE_RESET_START] = {
[AP_EVENT_POLL] = ap_sm_reset,
[AP_EVENT_TIMEOUT] = ap_sm_nop,
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index c3e22523f..ad17fc588 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -642,7 +642,7 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
kfree(header);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
skb_queue_tail(&ch->sweep_queue, sweep_skb);
fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
@@ -911,7 +911,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
if (ctcm_test_and_set_busy(dev))
return NETDEV_TX_BUSY;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
@@ -994,7 +994,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
goto done;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device error - dropped",
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index edf16bfba..c103fc7ef 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -671,7 +671,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
kfree(header);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
skb_queue_tail(&ch->sweep_queue, sweep_skb);
fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 0ba3a2f81..b0e8ffdf8 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1407,7 +1407,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
return NETDEV_TX_BUSY;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
rc = netiucv_transmit_skb(privptr->conn, skb);
netiucv_clear_busy(dev);
return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 787153764..b7b74776e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3481,7 +3481,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
}
- queue->card->dev->trans_start = jiffies;
+ netif_trans_update(queue->card->dev);
if (queue->card->options.performance_stats) {
queue->card->perf_stats.outbound_do_qdio_cnt++;
queue->card->perf_stats.outbound_do_qdio_start_time =
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 80b1979e8..df036b872 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
qeth_l2_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ac544330d..709b52339 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_set_offline(cgdev);
if (card->dev) {
+ netif_napi_del(&card->napi);
unregister_netdev(card->dev);
card->dev = NULL;
}
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 157d3d203..9310a547b 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -26,7 +26,8 @@ void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
- scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
+ scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun,
+ SCSI_SCAN_MANUAL);
}
static void zfcp_unit_scsi_scan_work(struct work_struct *work)
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index e077ebd89..4612691c6 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -383,20 +383,12 @@ static struct device_node *get_node(phandle n, DATA *data)
}
/* Copy in a whole string from userspace into kernelspace. */
-static int copyin_string(char __user *user, size_t len, char **ptr)
+static char * copyin_string(char __user *user, size_t len)
{
- char *tmp;
-
if ((ssize_t)len < 0 || (ssize_t)(len + 1) < 0)
- return -EINVAL;
-
- tmp = memdup_user_nul(user, len);
- if (IS_ERR(tmp))
- return PTR_ERR(tmp);
+ return ERR_PTR(-EINVAL);
- *ptr = tmp;
-
- return 0;
+ return memdup_user_nul(user, len);
}
/*
@@ -415,9 +407,9 @@ static int opiocget(void __user *argp, DATA *data)
dp = get_node(op.op_nodeid, data);
- err = copyin_string(op.op_name, op.op_namelen, &str);
- if (err)
- return err;
+ str = copyin_string(op.op_name, op.op_namelen);
+ if (IS_ERR(str))
+ return PTR_ERR(str);
pval = of_get_property(dp, str, &len);
err = 0;
@@ -440,7 +432,7 @@ static int opiocnextprop(void __user *argp, DATA *data)
struct device_node *dp;
struct property *prop;
char *str;
- int err, len;
+ int len;
if (copy_from_user(&op, argp, sizeof(op)))
return -EFAULT;
@@ -449,9 +441,9 @@ static int opiocnextprop(void __user *argp, DATA *data)
if (!dp)
return -EINVAL;
- err = copyin_string(op.op_name, op.op_namelen, &str);
- if (err)
- return err;
+ str = copyin_string(op.op_name, op.op_namelen);
+ if (IS_ERR(str))
+ return PTR_ERR(str);
if (str[0] == '\0') {
prop = dp->properties;
@@ -494,14 +486,14 @@ static int opiocset(void __user *argp, DATA *data)
if (!dp)
return -EINVAL;
- err = copyin_string(op.op_name, op.op_namelen, &str);
- if (err)
- return err;
+ str = copyin_string(op.op_name, op.op_namelen);
+ if (IS_ERR(str))
+ return PTR_ERR(str);
- err = copyin_string(op.op_buf, op.op_buflen, &tmp);
- if (err) {
+ tmp = copyin_string(op.op_buf, op.op_buflen);
+ if (IS_ERR(tmp)) {
kfree(str);
- return err;
+ return PTR_ERR(tmp);
}
err = of_set_property(dp, str, tmp, op.op_buflen);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e80768f8e..98e5d51a3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -17,6 +17,7 @@ config SCSI
tristate "SCSI device support"
depends on BLOCK
select SCSI_DMA if HAS_DMA
+ select SG_POOL
---help---
If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or
any other SCSI device under Linux, say Y and make sure that you know
@@ -202,12 +203,12 @@ config SCSI_ENCLOSURE
certain enclosure conditions to be reported and is not required.
config SCSI_CONSTANTS
- bool "Verbose SCSI error reporting (kernel size +=75K)"
+ bool "Verbose SCSI error reporting (kernel size += 36K)"
depends on SCSI
help
The error messages regarding your SCSI hardware will be easier to
understand if you say Y here; it will enlarge your kernel by about
- 75 KB. If in doubt, say Y.
+ 36 KB. If in doubt, say Y.
config SCSI_LOGGING
bool "SCSI logging facility"
@@ -813,17 +814,6 @@ config SCSI_GENERIC_NCR5380_MMIO
To compile this driver as a module, choose M here: the
module will be called g_NCR5380_mmio.
-config SCSI_GENERIC_NCR53C400
- bool "Enable NCR53c400 extensions"
- depends on SCSI_GENERIC_NCR5380
- help
- This enables certain optimizations for the NCR53c400 SCSI cards.
- You might as well try it out. Note that this driver will only probe
- for the Trantor T130B in its default configuration; you might have
- to pass a command line option to the kernel at boot time if it does
- not detect your card. See the file
- <file:Documentation/scsi/g_NCR5380.txt> for details.
-
config SCSI_IPS
tristate "IBM ServeRAID support"
depends on PCI && SCSI
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 3eff2a69f..43908bbb3 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -29,29 +29,9 @@
* Ronald van Cuijlenborg, Alan Cox and others.
*/
-/*
- * Further development / testing that should be done :
- * 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete
- * code so that everything does the same thing that's done at the
- * end of a pseudo-DMA read operation.
- *
- * 2. Fix REAL_DMA (interrupt driven, polled works fine) -
- * basically, transfer size needs to be reduced by one
- * and the last byte read as is done with PSEUDO_DMA.
- *
- * 4. Test SCSI-II tagged queueing (I have no devices which support
- * tagged queueing)
- */
+/* Ported to Atari by Roman Hodek and others. */
-#ifndef notyet
-#undef REAL_DMA
-#endif
-
-#ifdef BOARD_REQUIRES_NO_DELAY
-#define io_recovery_delay(x)
-#else
-#define io_recovery_delay(x) udelay(x)
-#endif
+/* Adapted for the Sun 3 by Sam Creasey. */
/*
* Design
@@ -126,17 +106,10 @@
* DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
* transceivers.
*
- * DONT_USE_INTR - if defined, never use interrupts, even if we probe or
- * override-configure an IRQ.
- *
* PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
*
* REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
*
- * REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't
- * rely on phase mismatch and EOP interrupts to determine end
- * of phase.
- *
* These macros MUST be defined :
*
* NCR5380_read(register) - read from the specified register
@@ -147,29 +120,29 @@
* specific implementation of the NCR5380
*
* Either real DMA *or* pseudo DMA may be implemented
- * REAL functions :
- * NCR5380_REAL_DMA should be defined if real DMA is to be used.
- * Note that the DMA setup functions should return the number of bytes
- * that they were able to program the controller for.
- *
- * Also note that generic i386/PC versions of these macros are
- * available as NCR5380_i386_dma_write_setup,
- * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
*
* NCR5380_dma_write_setup(instance, src, count) - initialize
* NCR5380_dma_read_setup(instance, dst, count) - initialize
* NCR5380_dma_residual(instance); - residual count
*
- * PSEUDO functions :
- * NCR5380_pwrite(instance, src, count)
- * NCR5380_pread(instance, dst, count);
- *
* The generic driver is initialized by calling NCR5380_init(instance),
* after setting the appropriate host specific fields and ID. If the
* driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
* possible) function may be used.
*/
+#ifndef NCR5380_io_delay
+#define NCR5380_io_delay(x)
+#endif
+
+#ifndef NCR5380_acquire_dma_irq
+#define NCR5380_acquire_dma_irq(x) (1)
+#endif
+
+#ifndef NCR5380_release_dma_irq
+#define NCR5380_release_dma_irq(x)
+#endif
+
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
@@ -280,12 +253,20 @@ static struct {
{0, NULL}
},
basrs[] = {
+ {BASR_END_DMA_TRANSFER, "END OF DMA"},
+ {BASR_DRQ, "DRQ"},
+ {BASR_PARITY_ERROR, "PARITY ERROR"},
+ {BASR_IRQ, "IRQ"},
+ {BASR_PHASE_MATCH, "PHASE MATCH"},
+ {BASR_BUSY_ERROR, "BUSY ERROR"},
{BASR_ATN, "ATN"},
{BASR_ACK, "ACK"},
{0, NULL}
},
icrs[] = {
{ICR_ASSERT_RST, "ASSERT RST"},
+ {ICR_ARBITRATION_PROGRESS, "ARB. IN PROGRESS"},
+ {ICR_ARBITRATION_LOST, "LOST ARB."},
{ICR_ASSERT_ACK, "ASSERT ACK"},
{ICR_ASSERT_BSY, "ASSERT BSY"},
{ICR_ASSERT_SEL, "ASSERT SEL"},
@@ -294,14 +275,14 @@ icrs[] = {
{0, NULL}
},
mrs[] = {
- {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"},
- {MR_TARGET, "MODE TARGET"},
- {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"},
- {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"},
- {MR_ENABLE_EOP_INTR, "MODE EOP INTR"},
- {MR_MONITOR_BSY, "MODE MONITOR BSY"},
- {MR_DMA_MODE, "MODE DMA"},
- {MR_ARBITRATE, "MODE ARBITRATION"},
+ {MR_BLOCK_DMA_MODE, "BLOCK DMA MODE"},
+ {MR_TARGET, "TARGET"},
+ {MR_ENABLE_PAR_CHECK, "PARITY CHECK"},
+ {MR_ENABLE_PAR_INTR, "PARITY INTR"},
+ {MR_ENABLE_EOP_INTR, "EOP INTR"},
+ {MR_MONITOR_BSY, "MONITOR BSY"},
+ {MR_DMA_MODE, "DMA MODE"},
+ {MR_ARBITRATE, "ARBITRATE"},
{0, NULL}
};
@@ -322,23 +303,23 @@ static void NCR5380_print(struct Scsi_Host *instance)
icr = NCR5380_read(INITIATOR_COMMAND_REG);
basr = NCR5380_read(BUS_AND_STATUS_REG);
- printk("STATUS_REG: %02x ", status);
+ printk(KERN_DEBUG "SR = 0x%02x : ", status);
for (i = 0; signals[i].mask; ++i)
if (status & signals[i].mask)
- printk(",%s", signals[i].name);
- printk("\nBASR: %02x ", basr);
+ printk(KERN_CONT "%s, ", signals[i].name);
+ printk(KERN_CONT "\nBASR = 0x%02x : ", basr);
for (i = 0; basrs[i].mask; ++i)
if (basr & basrs[i].mask)
- printk(",%s", basrs[i].name);
- printk("\nICR: %02x ", icr);
+ printk(KERN_CONT "%s, ", basrs[i].name);
+ printk(KERN_CONT "\nICR = 0x%02x : ", icr);
for (i = 0; icrs[i].mask; ++i)
if (icr & icrs[i].mask)
- printk(",%s", icrs[i].name);
- printk("\nMODE: %02x ", mr);
+ printk(KERN_CONT "%s, ", icrs[i].name);
+ printk(KERN_CONT "\nMR = 0x%02x : ", mr);
for (i = 0; mrs[i].mask; ++i)
if (mr & mrs[i].mask)
- printk(",%s", mrs[i].name);
- printk("\n");
+ printk(KERN_CONT "%s, ", mrs[i].name);
+ printk(KERN_CONT "\n");
}
static struct {
@@ -477,52 +458,18 @@ static void prepare_info(struct Scsi_Host *instance)
instance->base, instance->irq,
instance->can_queue, instance->cmd_per_lun,
instance->sg_tablesize, instance->this_id,
- hostdata->flags & FLAG_NO_DMA_FIXUP ? "NO_DMA_FIXUP " : "",
+ hostdata->flags & FLAG_DMA_FIXUP ? "DMA_FIXUP " : "",
hostdata->flags & FLAG_NO_PSEUDO_DMA ? "NO_PSEUDO_DMA " : "",
hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "",
-#ifdef AUTOPROBE_IRQ
- "AUTOPROBE_IRQ "
-#endif
#ifdef DIFFERENTIAL
"DIFFERENTIAL "
#endif
-#ifdef REAL_DMA
- "REAL_DMA "
-#endif
-#ifdef REAL_DMA_POLL
- "REAL_DMA_POLL "
-#endif
#ifdef PARITY
"PARITY "
#endif
-#ifdef PSEUDO_DMA
- "PSEUDO_DMA "
-#endif
"");
}
-#ifdef PSEUDO_DMA
-static int __maybe_unused NCR5380_write_info(struct Scsi_Host *instance,
- char *buffer, int length)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- hostdata->spin_max_r = 0;
- hostdata->spin_max_w = 0;
- return 0;
-}
-
-static int __maybe_unused NCR5380_show_info(struct seq_file *m,
- struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- seq_printf(m, "Highwater I/O busy spin counts: write %d, read %d\n",
- hostdata->spin_max_w, hostdata->spin_max_r);
- return 0;
-}
-#endif
-
/**
* NCR5380_init - initialise an NCR5380
* @instance: adapter to configure
@@ -543,6 +490,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
int i;
unsigned long deadline;
+ instance->max_lun = 7;
+
hostdata->host = instance;
hostdata->id_mask = 1 << instance->this_id;
hostdata->id_higher_mask = 0;
@@ -551,9 +500,8 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags)
hostdata->id_higher_mask |= i;
for (i = 0; i < 8; ++i)
hostdata->busy[i] = 0;
-#ifdef REAL_DMA
- hostdata->dmalen = 0;
-#endif
+ hostdata->dma_len = 0;
+
spin_lock_init(&hostdata->lock);
hostdata->connected = NULL;
hostdata->sensing = NULL;
@@ -719,6 +667,9 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
cmd->result = 0;
+ if (!NCR5380_acquire_dma_irq(instance))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
spin_lock_irqsave(&hostdata->lock, flags);
/*
@@ -743,6 +694,19 @@ static int NCR5380_queue_command(struct Scsi_Host *instance,
return 0;
}
+static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+
+ /* Caller does the locking needed to set & test these data atomically */
+ if (list_empty(&hostdata->disconnected) &&
+ list_empty(&hostdata->unissued) &&
+ list_empty(&hostdata->autosense) &&
+ !hostdata->connected &&
+ !hostdata->selecting)
+ NCR5380_release_dma_irq(instance);
+}
+
/**
* dequeue_next_cmd - dequeue a command for processing
* @instance: the scsi host instance
@@ -844,17 +808,14 @@ static void NCR5380_main(struct work_struct *work)
if (!NCR5380_select(instance, cmd)) {
dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
+ maybe_release_dma_irq(instance);
} else {
dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
"main: select failed, returning %p to queue\n", cmd);
requeue_cmd(instance, cmd);
}
}
- if (hostdata->connected
-#ifdef REAL_DMA
- && !hostdata->dmalen
-#endif
- ) {
+ if (hostdata->connected && !hostdata->dma_len) {
dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n");
NCR5380_information_transfer(instance);
done = 0;
@@ -865,7 +826,88 @@ static void NCR5380_main(struct work_struct *work)
} while (!done);
}
-#ifndef DONT_USE_INTR
+/*
+ * NCR5380_dma_complete - finish DMA transfer
+ * @instance: the scsi host instance
+ *
+ * Called by the interrupt handler when DMA finishes or a phase
+ * mismatch occurs (which would end the DMA transfer).
+ */
+
+static void NCR5380_dma_complete(struct Scsi_Host *instance)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ int transferred;
+ unsigned char **data;
+ int *count;
+ int saved_data = 0, overrun = 0;
+ unsigned char p;
+
+ if (hostdata->read_overruns) {
+ p = hostdata->connected->SCp.phase;
+ if (p & SR_IO) {
+ udelay(10);
+ if ((NCR5380_read(BUS_AND_STATUS_REG) &
+ (BASR_PHASE_MATCH | BASR_ACK)) ==
+ (BASR_PHASE_MATCH | BASR_ACK)) {
+ saved_data = NCR5380_read(INPUT_DATA_REG);
+ overrun = 1;
+ dsprintk(NDEBUG_DMA, instance, "read overrun handled\n");
+ }
+ }
+ }
+
+#ifdef CONFIG_SUN3
+ if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
+ pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
+ instance->host_no);
+ BUG();
+ }
+
+ if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
+ (BASR_PHASE_MATCH | BASR_ACK)) {
+ pr_err("scsi%d: BASR %02x\n", instance->host_no,
+ NCR5380_read(BUS_AND_STATUS_REG));
+ pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n",
+ instance->host_no);
+ BUG();
+ }
+#endif
+
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+
+ transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
+ hostdata->dma_len = 0;
+
+ data = (unsigned char **)&hostdata->connected->SCp.ptr;
+ count = &hostdata->connected->SCp.this_residual;
+ *data += transferred;
+ *count -= transferred;
+
+ if (hostdata->read_overruns) {
+ int cnt, toPIO;
+
+ if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
+ cnt = toPIO = hostdata->read_overruns;
+ if (overrun) {
+ dsprintk(NDEBUG_DMA, instance,
+ "Got an input overrun, using saved byte\n");
+ *(*data)++ = saved_data;
+ (*count)--;
+ cnt--;
+ toPIO--;
+ }
+ if (toPIO > 0) {
+ dsprintk(NDEBUG_DMA, instance,
+ "Doing %d byte PIO to 0x%p\n", cnt, *data);
+ NCR5380_transfer_pio(instance, &p, &cnt, data);
+ *count -= toPIO - cnt;
+ }
+ }
+ }
+}
/**
* NCR5380_intr - generic NCR5380 irq handler
@@ -901,7 +943,7 @@ static void NCR5380_main(struct work_struct *work)
* the Busy Monitor interrupt is enabled together with DMA Mode.
*/
-static irqreturn_t NCR5380_intr(int irq, void *dev_id)
+static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
{
struct Scsi_Host *instance = dev_id;
struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -919,7 +961,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n",
irq, basr, sr, mr);
-#if defined(REAL_DMA)
if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) {
/* Probably End of DMA, Phase Mismatch or Loss of BSY.
* We ack IRQ after clearing Mode Register. Workarounds
@@ -928,26 +969,14 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n");
- int transferred;
-
- if (!hostdata->connected)
- panic("scsi%d : DMA interrupt with no connected cmd\n",
- instance->hostno);
-
- transferred = hostdata->dmalen - NCR5380_dma_residual(instance);
- hostdata->connected->SCp.this_residual -= transferred;
- hostdata->connected->SCp.ptr += transferred;
- hostdata->dmalen = 0;
-
- /* FIXME: we need to poll briefly then defer a workqueue task ! */
- NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2 * HZ);
-
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(MODE_REG, MR_BASE);
- NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- } else
-#endif /* REAL_DMA */
- if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) &&
+ if (hostdata->connected) {
+ NCR5380_dma_complete(instance);
+ queue_work(hostdata->work_q, &hostdata->main_task);
+ } else {
+ NCR5380_write(MODE_REG, MR_BASE);
+ NCR5380_read(RESET_PARITY_INTERRUPT_REG);
+ }
+ } else if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) &&
(sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) {
/* Probably reselected */
NCR5380_write(SELECT_ENABLE_REG, 0);
@@ -966,10 +995,16 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+#endif
}
handled = 1;
} else {
shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+#endif
}
spin_unlock_irqrestore(&hostdata->lock, flags);
@@ -977,8 +1012,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-#endif
-
/*
* Function : int NCR5380_select(struct Scsi_Host *instance,
* struct scsi_cmnd *cmd)
@@ -1217,14 +1250,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
* was true but before BSY was false during selection, the information
* transfer phase should be a MESSAGE OUT phase so that we can send the
* IDENTIFY message.
- *
- * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
- * message (2 bytes) with a tag ID that we increment with every command
- * until it wraps back to 0.
- *
- * XXX - it turns out that there are some broken SCSI-II devices,
- * which claim to support tagged queuing but fail when more than
- * some number of commands are issued at once.
*/
/* Wait for start of REQ/ACK handshake */
@@ -1247,9 +1272,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
len = 1;
- cmd->tag = 0;
-
- /* Send message(s) */
data = tmp;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &data);
@@ -1259,6 +1281,10 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
hostdata->connected = cmd;
hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun;
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_INTR;
+#endif
+
initialize_SCp(cmd);
cmd = NULL;
@@ -1495,7 +1521,6 @@ timeout:
return -1;
}
-#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL)
/*
* Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
* unsigned char *phase, int *count, unsigned char **data)
@@ -1520,53 +1545,47 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
unsigned char **data)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- register int c = *count;
- register unsigned char p = *phase;
- register unsigned char *d = *data;
+ int c = *count;
+ unsigned char p = *phase;
+ unsigned char *d = *data;
unsigned char tmp;
- int foo;
-#if defined(REAL_DMA_POLL)
- int cnt, toPIO;
- unsigned char saved_data = 0, overrun = 0, residue;
-#endif
+ int result = 0;
if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
*phase = tmp;
return -1;
}
-#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
+
+ hostdata->connected->SCp.phase = p;
+
if (p & SR_IO) {
- if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS))
- c -= 2;
+ if (hostdata->read_overruns)
+ c -= hostdata->read_overruns;
+ else if (hostdata->flags & FLAG_DMA_FIXUP)
+ --c;
}
- hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);
dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n",
- (p & SR_IO) ? "receive" : "send", c, *data);
+ (p & SR_IO) ? "receive" : "send", c, d);
+
+#ifdef CONFIG_SUN3
+ /* send start chain */
+ sun3scsi_dma_start(c, *data);
#endif
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
-
-#ifdef REAL_DMA
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY |
MR_ENABLE_EOP_INTR);
-#elif defined(REAL_DMA_POLL)
- NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY);
-#else
- /*
- * Note : on my sample board, watch-dog timeouts occurred when interrupts
- * were not disabled for the duration of a single DMA transfer, from
- * before the setting of DMA mode to after transfer of the last byte.
- */
-
- if (hostdata->flags & FLAG_NO_DMA_FIXUP)
- NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY |
- MR_ENABLE_EOP_INTR);
- else
- NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY);
-#endif /* def REAL_DMA */
- dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
+ if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) {
+ /* On the Medusa, it is a must to initialize the DMA before
+ * starting the NCR. This is also the cleaner way for the TT.
+ */
+ if (p & SR_IO)
+ result = NCR5380_dma_recv_setup(instance, d, c);
+ else
+ result = NCR5380_dma_send_setup(instance, d, c);
+ }
/*
* On the PAS16 at least I/O recovery delays are not needed here.
@@ -1574,24 +1593,49 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
*/
if (p & SR_IO) {
- io_recovery_delay(1);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
+ NCR5380_io_delay(1);
NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
} else {
- io_recovery_delay(1);
+ NCR5380_io_delay(1);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
- io_recovery_delay(1);
+ NCR5380_io_delay(1);
NCR5380_write(START_DMA_SEND_REG, 0);
- io_recovery_delay(1);
+ NCR5380_io_delay(1);
}
-#if defined(REAL_DMA_POLL)
- do {
- tmp = NCR5380_read(BUS_AND_STATUS_REG);
- } while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR | BASR_END_DMA_TRANSFER)));
+#ifdef CONFIG_SUN3
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+#endif
+ sun3_dma_active = 1;
+#endif
+
+ if (hostdata->flags & FLAG_LATE_DMA_SETUP) {
+ /* On the Falcon, the DMA setup must be done after the last
+ * NCR access, else the DMA setup gets trashed!
+ */
+ if (p & SR_IO)
+ result = NCR5380_dma_recv_setup(instance, d, c);
+ else
+ result = NCR5380_dma_send_setup(instance, d, c);
+ }
+
+ /* On failure, NCR5380_dma_xxxx_setup() returns a negative int. */
+ if (result < 0)
+ return result;
+
+ /* For real DMA, result is the byte count. DMA interrupt is expected. */
+ if (result > 0) {
+ hostdata->dma_len = result;
+ return 0;
+ }
+
+ /* The result is zero iff pseudo DMA send/receive was completed. */
+ hostdata->dma_len = c;
/*
- * At this point, either we've completed DMA, or we have a phase mismatch,
- * or we've unexpectedly lost BUSY (which is a real error).
+ * A note regarding the DMA errata workarounds for early NMOS silicon.
*
* For DMA sends, we want to wait until the last byte has been
* transferred out over the bus before we turn off DMA mode. Alas, there
@@ -1618,79 +1662,16 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
* properly, or the target switches to MESSAGE IN phase to signal a
* disconnection (either operation bringing the DMA to a clean halt).
* However, in order to handle scatter-receive, we must work around the
- * problem. The chosen fix is to DMA N-2 bytes, then check for the
+ * problem. The chosen fix is to DMA fewer bytes, then check for the
* condition before taking the NCR5380 out of DMA mode. One or two extra
* bytes are transferred via PIO as necessary to fill out the original
* request.
*/
- if (p & SR_IO) {
- if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS)) {
- udelay(10);
- if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
- (BASR_PHASE_MATCH | BASR_ACK)) {
- saved_data = NCR5380_read(INPUT_DATA_REGISTER);
- overrun = 1;
- }
- }
- } else {
- int limit = 100;
- while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) || (NCR5380_read(STATUS_REG) & SR_REQ)) {
- if (!(tmp & BASR_PHASE_MATCH))
- break;
- if (--limit < 0)
- break;
- }
- }
-
- dsprintk(NDEBUG_DMA, "polled DMA transfer complete, basr 0x%02x, sr 0x%02x\n",
- tmp, NCR5380_read(STATUS_REG));
-
- NCR5380_write(MODE_REG, MR_BASE);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
- residue = NCR5380_dma_residual(instance);
- c -= residue;
- *count -= c;
- *data += c;
- *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
-
- if (!(hostdata->flags & FLAG_NO_DMA_FIXUPS) &&
- *phase == p && (p & SR_IO) && residue == 0) {
- if (overrun) {
- dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
- **data = saved_data;
- *data += 1;
- *count -= 1;
- cnt = toPIO = 1;
- } else {
- printk("No overrun??\n");
- cnt = toPIO = 2;
- }
- dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data);
- NCR5380_transfer_pio(instance, phase, &cnt, data);
- *count -= toPIO - cnt;
- }
-
- dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count));
- return 0;
-
-#elif defined(REAL_DMA)
- return 0;
-#else /* defined(REAL_DMA_POLL) */
- if (p & SR_IO) {
- foo = NCR5380_pread(instance, d,
- hostdata->flags & FLAG_NO_DMA_FIXUP ? c : c - 1);
- if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) {
+ if (hostdata->flags & FLAG_DMA_FIXUP) {
+ if (p & SR_IO) {
/*
- * We can't disable DMA mode after successfully transferring
- * what we plan to be the last byte, since that would open up
- * a race condition where if the target asserted REQ before
- * we got the DMA mode reset, the NCR5380 would have latched
- * an additional byte into the INPUT DATA register and we'd
- * have dropped it.
- *
- * The workaround was to transfer one fewer bytes than we
+ * The workaround was to transfer fewer bytes than we
* intended to with the pseudo-DMA read function, wait for
* the chip to latch the last byte, read it, and then disable
* pseudo-DMA mode.
@@ -1706,19 +1687,16 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
if (NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
BASR_DRQ, BASR_DRQ, HZ) < 0) {
- foo = -1;
+ result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: DRQ timeout\n");
}
if (NCR5380_poll_politely(instance, STATUS_REG,
SR_REQ, 0, HZ) < 0) {
- foo = -1;
+ result = -1;
shost_printk(KERN_ERR, instance, "PDMA read: !REQ timeout\n");
}
- d[c - 1] = NCR5380_read(INPUT_DATA_REG);
- }
- } else {
- foo = NCR5380_pwrite(instance, d, c);
- if (!foo && !(hostdata->flags & FLAG_NO_DMA_FIXUP)) {
+ d[*count - 1] = NCR5380_read(INPUT_DATA_REG);
+ } else {
/*
* Wait for the last byte to be sent. If REQ is being asserted for
* the byte we're interested, we'll ACK it and it will go false.
@@ -1726,21 +1704,15 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
if (NCR5380_poll_politely2(instance,
BUS_AND_STATUS_REG, BASR_DRQ, BASR_DRQ,
BUS_AND_STATUS_REG, BASR_PHASE_MATCH, 0, HZ) < 0) {
- foo = -1;
+ result = -1;
shost_printk(KERN_ERR, instance, "PDMA write: DRQ and phase timeout\n");
}
}
}
- NCR5380_write(MODE_REG, MR_BASE);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- *data = d + c;
- *count = 0;
- *phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
- return foo;
-#endif /* def REAL_DMA */
+
+ NCR5380_dma_complete(instance);
+ return result;
}
-#endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */
/*
* Function : NCR5380_information_transfer (struct Scsi_Host *instance)
@@ -1770,6 +1742,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
struct scsi_cmnd *cmd;
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_INTR;
+#endif
+
while ((cmd = hostdata->connected)) {
struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
@@ -1781,6 +1757,31 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
old_phase = phase;
NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
}
+#ifdef CONFIG_SUN3
+ if (phase == PHASE_CMDOUT) {
+ void *d;
+ unsigned long count;
+
+ if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
+ count = cmd->SCp.buffer->length;
+ d = sg_virt(cmd->SCp.buffer);
+ } else {
+ count = cmd->SCp.this_residual;
+ d = cmd->SCp.ptr;
+ }
+
+ if (sun3_dma_setup_done != cmd &&
+ sun3scsi_dma_xfer_len(count, cmd) > 0) {
+ sun3scsi_dma_setup(instance, d, count,
+ rq_data_dir(cmd->request));
+ sun3_dma_setup_done = cmd;
+ }
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_INTR;
+#endif
+ }
+#endif /* CONFIG_SUN3 */
+
if (sink && (phase != PHASE_MSGOUT)) {
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
@@ -1831,13 +1832,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* in an unconditional loop.
*/
-#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
transfersize = 0;
- if (!cmd->device->borken &&
- !(hostdata->flags & FLAG_NO_PSEUDO_DMA))
+ if (!cmd->device->borken)
transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
- if (transfersize) {
+ if (transfersize > 0) {
len = transfersize;
if (NCR5380_transfer_dma(instance, &phase,
&len, (unsigned char **)&cmd->SCp.ptr)) {
@@ -1853,11 +1852,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
do_abort(instance);
cmd->result = DID_ERROR << 16;
/* XXX - need to source or sink data here, as appropriate */
- } else
- cmd->SCp.this_residual -= transfersize - len;
- } else
-#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
- {
+ }
+ } else {
/* Break up transfer into 3 ms chunks,
* presuming 6 accesses per handshake.
*/
@@ -1868,6 +1864,10 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
(unsigned char **)&cmd->SCp.ptr);
cmd->SCp.this_residual -= transfersize - len;
}
+#ifdef CONFIG_SUN3
+ if (sun3_dma_setup_done == cmd)
+ sun3_dma_setup_done = NULL;
+#endif
return;
case PHASE_MSGIN:
len = 1;
@@ -1912,6 +1912,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* Enable reselect interrupts */
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
+ maybe_release_dma_irq(instance);
return;
case MESSAGE_REJECT:
/* Accept message by clearing ACK */
@@ -1944,6 +1946,9 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* Enable reselect interrupts */
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+#ifdef SUN3_SCSI_VME
+ dregs->csr |= CSR_DMA_ENABLE;
+#endif
return;
/*
* The SCSI data pointer is *IMPLICITLY* saved on a disconnect
@@ -2047,6 +2052,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
hostdata->connected = NULL;
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
+ maybe_release_dma_irq(instance);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return;
}
@@ -2094,10 +2100,8 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
unsigned char target_mask;
- unsigned char lun, phase;
- int len;
+ unsigned char lun;
unsigned char msg[3];
- unsigned char *data;
struct NCR5380_cmd *ncmd;
struct scsi_cmnd *tmp;
@@ -2139,15 +2143,26 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
return;
}
- len = 1;
- data = msg;
- phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
+#ifdef CONFIG_SUN3
+ /* acknowledge toggle to MSGIN */
+ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
- if (len) {
- do_abort(instance);
- return;
+ /* peek at the byte without really hitting the bus */
+ msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
+#else
+ {
+ int len = 1;
+ unsigned char *data = msg;
+ unsigned char phase = PHASE_MSGIN;
+
+ NCR5380_transfer_pio(instance, &phase, &len, &data);
+
+ if (len) {
+ do_abort(instance);
+ return;
+ }
}
+#endif /* CONFIG_SUN3 */
if (!(msg[0] & 0x80)) {
shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got ");
@@ -2195,59 +2210,37 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
return;
}
- /* Accept message by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
- hostdata->connected = tmp;
- dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n",
- scmd_id(tmp), tmp->device->lun, tmp->tag);
-}
+#ifdef CONFIG_SUN3
+ {
+ void *d;
+ unsigned long count;
-/*
- * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
- *
- * Purpose : called by interrupt handler when DMA finishes or a phase
- * mismatch occurs (which would finish the DMA transfer).
- *
- * Inputs : instance - this instance of the NCR5380.
- *
- * Returns : pointer to the scsi_cmnd structure for which the I_T_L
- * nexus has been reestablished, on failure NULL is returned.
- */
-
-#ifdef REAL_DMA
-static void NCR5380_dma_complete(NCR5380_instance * instance) {
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- int transferred;
+ if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
+ count = tmp->SCp.buffer->length;
+ d = sg_virt(tmp->SCp.buffer);
+ } else {
+ count = tmp->SCp.this_residual;
+ d = tmp->SCp.ptr;
+ }
- /*
- * XXX this might not be right.
- *
- * Wait for final byte to transfer, ie wait for ACK to go false.
- *
- * We should use the Last Byte Sent bit, unfortunately this is
- * not available on the 5380/5381 (only the various CMOS chips)
- *
- * FIXME: timeout, and need to handle long timeout/irq case
- */
+ if (sun3_dma_setup_done != tmp &&
+ sun3scsi_dma_xfer_len(count, tmp) > 0) {
+ sun3scsi_dma_setup(instance, d, count,
+ rq_data_dir(tmp->request));
+ sun3_dma_setup_done = tmp;
+ }
+ }
- NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_ACK, 0, 5*HZ);
+ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
+#endif /* CONFIG_SUN3 */
+ /* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- /*
- * The only places we should see a phase mismatch and have to send
- * data from the same set of pointers will be the data transfer
- * phases. So, residual, requested length are only important here.
- */
-
- if (!(hostdata->connected->SCp.phase & SR_CD)) {
- transferred = instance->dmalen - NCR5380_dma_residual();
- hostdata->connected->SCp.this_residual -= transferred;
- hostdata->connected->SCp.ptr += transferred;
- }
+ hostdata->connected = tmp;
+ dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu\n",
+ scmd_id(tmp), tmp->device->lun);
}
-#endif /* def REAL_DMA */
/**
* list_find_cmd - test for presence of a command in a linked list
@@ -2360,9 +2353,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (hostdata->connected == cmd) {
dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
hostdata->connected = NULL;
-#ifdef REAL_DMA
hostdata->dma_len = 0;
-#endif
if (do_abort(instance)) {
set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
@@ -2388,6 +2379,7 @@ out:
dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
queue_work(hostdata->work_q, &hostdata->main_task);
+ maybe_release_dma_irq(instance);
spin_unlock_irqrestore(&hostdata->lock, flags);
return result;
@@ -2445,7 +2437,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
set_host_byte(cmd, DID_RESET);
- cmd->scsi_done(cmd);
+ complete_cmd(instance, cmd);
}
INIT_LIST_HEAD(&hostdata->disconnected);
@@ -2465,11 +2457,10 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
for (i = 0; i < 8; ++i)
hostdata->busy[i] = 0;
-#ifdef REAL_DMA
hostdata->dma_len = 0;
-#endif
queue_work(hostdata->work_q, &hostdata->main_task);
+ maybe_release_dma_irq(instance);
spin_unlock_irqrestore(&hostdata->lock, flags);
return SUCCESS;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index a79288682..c60728785 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -199,13 +199,6 @@
#define PHASE_SR_TO_TCR(phase) ((phase) >> 2)
-/*
- * "Special" value for the (unsigned char) command tag, to indicate
- * I_T_L nexus instead of I_T_L_Q.
- */
-
-#define TAG_NONE 0xff
-
/*
* These are "special" values for the irq and dma_channel fields of the
* Scsi_Host structure
@@ -220,28 +213,17 @@
#define NO_IRQ 0
#endif
-#define FLAG_NO_DMA_FIXUP 1 /* No DMA errata workarounds */
+#define FLAG_DMA_FIXUP 1 /* Use DMA errata workarounds */
#define FLAG_NO_PSEUDO_DMA 8 /* Inhibit DMA */
#define FLAG_LATE_DMA_SETUP 32 /* Setup NCR before DMA H/W */
-#define FLAG_TAGGED_QUEUING 64 /* as X3T9.2 spelled it */
#define FLAG_TOSHIBA_DELAY 128 /* Allow for borken CD-ROMs */
-#ifdef SUPPORT_TAGS
-struct tag_alloc {
- DECLARE_BITMAP(allocated, MAX_TAGS);
- int nr_allocated;
- int queue_size;
-};
-#endif
-
struct NCR5380_hostdata {
NCR5380_implementation_fields; /* implementation specific */
struct Scsi_Host *host; /* Host backpointer */
unsigned char id_mask, id_higher_mask; /* 1 << id, all bits greater */
unsigned char busy[8]; /* index = target, bit = lun */
-#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
int dma_len; /* requested length of DMA */
-#endif
unsigned char last_message; /* last message OUT */
struct scsi_cmnd *connected; /* currently connected cmnd */
struct scsi_cmnd *selecting; /* cmnd to be connected */
@@ -256,13 +238,6 @@ struct NCR5380_hostdata {
int read_overruns; /* number of bytes to cut from a
* transfer to handle chip overruns */
struct work_struct main_task;
-#ifdef SUPPORT_TAGS
- struct tag_alloc TagAlloc[8][8]; /* 8 targets and 8 LUNs */
-#endif
-#ifdef PSEUDO_DMA
- unsigned spin_max_r;
- unsigned spin_max_w;
-#endif
struct workqueue_struct *work_q;
unsigned long accesses_per_ms; /* chip register accesses per ms */
};
@@ -305,132 +280,20 @@ static void NCR5380_print(struct Scsi_Host *instance);
#define NCR5380_dprint_phase(flg, arg) do {} while (0)
#endif
-#if defined(AUTOPROBE_IRQ)
static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
-#endif
static int NCR5380_init(struct Scsi_Host *instance, int flags);
static int NCR5380_maybe_reset_bus(struct Scsi_Host *);
static void NCR5380_exit(struct Scsi_Host *instance);
static void NCR5380_information_transfer(struct Scsi_Host *instance);
-#ifndef DONT_USE_INTR
static irqreturn_t NCR5380_intr(int irq, void *dev_id);
-#endif
static void NCR5380_main(struct work_struct *work);
static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
-#if defined(PSEUDO_DMA) || defined(REAL_DMA) || defined(REAL_DMA_POLL)
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-#endif
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
+static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int);
+static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
-#if (defined(REAL_DMA) || defined(REAL_DMA_POLL))
-
-#if defined(i386) || defined(__alpha__)
-
-/**
- * NCR5380_pc_dma_setup - setup ISA DMA
- * @instance: adapter to set up
- * @ptr: block to transfer (virtual address)
- * @count: number of bytes to transfer
- * @mode: DMA controller mode to use
- *
- * Program the DMA controller ready to perform an ISA DMA transfer
- * on this chip.
- *
- * Locks: takes and releases the ISA DMA lock.
- */
-
-static __inline__ int NCR5380_pc_dma_setup(struct Scsi_Host *instance, unsigned char *ptr, unsigned int count, unsigned char mode)
-{
- unsigned limit;
- unsigned long bus_addr = virt_to_bus(ptr);
- unsigned long flags;
-
- if (instance->dma_channel <= 3) {
- if (count > 65536)
- count = 65536;
- limit = 65536 - (bus_addr & 0xFFFF);
- } else {
- if (count > 65536 * 2)
- count = 65536 * 2;
- limit = 65536 * 2 - (bus_addr & 0x1FFFF);
- }
-
- if (count > limit)
- count = limit;
-
- if ((count & 1) || (bus_addr & 1))
- panic("scsi%d : attempted unaligned DMA transfer\n", instance->host_no);
-
- flags=claim_dma_lock();
- disable_dma(instance->dma_channel);
- clear_dma_ff(instance->dma_channel);
- set_dma_addr(instance->dma_channel, bus_addr);
- set_dma_count(instance->dma_channel, count);
- set_dma_mode(instance->dma_channel, mode);
- enable_dma(instance->dma_channel);
- release_dma_lock(flags);
-
- return count;
-}
-
-/**
- * NCR5380_pc_dma_write_setup - setup ISA DMA write
- * @instance: adapter to set up
- * @ptr: block to transfer (virtual address)
- * @count: number of bytes to transfer
- *
- * Program the DMA controller ready to perform an ISA DMA write to the
- * SCSI controller.
- *
- * Locks: called routines take and release the ISA DMA lock.
- */
-
-static __inline__ int NCR5380_pc_dma_write_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count)
-{
- return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_WRITE);
-}
-
-/**
- * NCR5380_pc_dma_read_setup - setup ISA DMA read
- * @instance: adapter to set up
- * @ptr: block to transfer (virtual address)
- * @count: number of bytes to transfer
- *
- * Program the DMA controller ready to perform an ISA DMA read from the
- * SCSI controller.
- *
- * Locks: called routines take and release the ISA DMA lock.
- */
-
-static __inline__ int NCR5380_pc_dma_read_setup(struct Scsi_Host *instance, unsigned char *src, unsigned int count)
-{
- return NCR5380_pc_dma_setup(instance, src, count, DMA_MODE_READ);
-}
-
-/**
- * NCR5380_pc_dma_residual - return bytes left
- * @instance: adapter
- *
- * Reports the number of bytes left over after the DMA was terminated.
- *
- * Locks: takes and releases the ISA DMA lock.
- */
-
-static __inline__ int NCR5380_pc_dma_residual(struct Scsi_Host *instance)
-{
- unsigned long flags;
- int tmp;
-
- flags = claim_dma_lock();
- clear_dma_ff(instance->dma_channel);
- tmp = get_dma_residue(instance->dma_channel);
- release_dma_lock(flags);
-
- return tmp;
-}
-#endif /* defined(i386) || defined(__alpha__) */
-#endif /* defined(REAL_DMA) */
#endif /* __KERNEL__ */
#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 7dfd0fa27..6678d1fd8 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -555,8 +555,6 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- if (!cmd_fibcontext)
- return -ENOMEM;
aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
@@ -1037,8 +1035,6 @@ static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
dev = (struct aac_dev *)scsicmd->device->host->hostdata;
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- if (!cmd_fibcontext)
- return -ENOMEM;
aac_fib_init(cmd_fibcontext);
dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
@@ -1950,10 +1946,6 @@ static int aac_read(struct scsi_cmnd * scsicmd)
* Alocate and initialize a Fib
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- if (!cmd_fibcontext) {
- printk(KERN_WARNING "aac_read: fib allocation failed\n");
- return -1;
- }
status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
@@ -2048,16 +2040,6 @@ static int aac_write(struct scsi_cmnd * scsicmd)
* Allocate and initialize a Fib then setup a BlockWrite command
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- if (!cmd_fibcontext) {
- /* FIB temporarily unavailable,not catastrophic failure */
-
- /* scsicmd->result = DID_ERROR << 16;
- * scsicmd->scsi_done(scsicmd);
- * return 0;
- */
- printk(KERN_WARNING "aac_write: fib allocation failed\n");
- return -1;
- }
status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
@@ -2283,8 +2265,6 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
* Allocate and initialize a Fib
*/
cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
- if (!cmd_fibcontext)
- return SCSI_MLQUEUE_HOST_BUSY;
aac_fib_init(cmd_fibcontext);
@@ -3184,8 +3164,6 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
* Allocate and initialize a Fib then setup a BlockWrite command
*/
cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
- if (!cmd_fibcontext)
- return -1;
status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index edc2643df..969c312de 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -63,7 +63,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 41052
+# define AAC_DRIVER_BUILD 41066
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -621,6 +621,11 @@ struct aac_driver_ident
#define AAC_QUIRK_SCSI_32 0x0020
/*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
+/*
* The adapter interface specs all queues to be located in the same
* physically contiguous block. The host structure that defines the
* commuication queues will assume they are each a separate physically
@@ -721,7 +726,7 @@ struct sa_registers {
};
-#define Sa_MINIPORT_REVISION 1
+#define SA_INIT_NUM_MSIXVECTORS 1
#define sa_readw(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
#define sa_readl(AEP, CSR) readl(&((AEP)->regs.sa->CSR))
@@ -2066,6 +2071,10 @@ extern struct aac_common aac_config;
#define AifEnAddJBOD 30 /* JBOD created */
#define AifEnDeleteJBOD 31 /* JBOD deleted */
+#define AifBuManagerEvent 42 /* Bu management*/
+#define AifBuCacheDataLoss 10
+#define AifBuCacheDataRecover 11
+
#define AifCmdJobProgress 2 /* Progress report */
#define AifJobCtrZero 101 /* Array Zero progress */
#define AifJobStsSuccess 1 /* Job completes */
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 87397deff..341ea327a 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -106,7 +106,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
if (dev->max_fib_size != sizeof(struct hw_fib))
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
- init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION);
+ init->Sa_MSIXVectors = cpu_to_le32(SA_INIT_NUM_MSIXVECTORS);
init->fsrev = cpu_to_le32(dev->fsrev);
/*
@@ -393,21 +393,8 @@ void aac_define_int_mode(struct aac_dev *dev)
msi_count = i;
} else {
dev->msi_enabled = 0;
- printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n",
- dev->name, dev->id, i);
- }
- }
-
- if (!dev->msi_enabled) {
- msi_count = 1;
- i = pci_enable_msi(dev->pdev);
-
- if (!i) {
- dev->msi_enabled = 1;
- dev->msi = 1;
- } else {
- printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n",
- dev->name, dev->id, i);
+ dev_err(&dev->pdev->dev,
+ "MSIX not supported!! Will try INTX 0x%x.\n", i);
}
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index bb7988d53..0aeecec1f 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -901,6 +901,31 @@ void aac_printf(struct aac_dev *dev, u32 val)
memset(cp, 0, 256);
}
+static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
+{
+ return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
+}
+
+
+static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
+{
+ switch (aac_aif_data(aifcmd, 1)) {
+ case AifBuCacheDataLoss:
+ if (aac_aif_data(aifcmd, 2))
+ dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
+ aac_aif_data(aifcmd, 2));
+ else
+ dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
+ break;
+ case AifBuCacheDataRecover:
+ if (aac_aif_data(aifcmd, 2))
+ dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
+ aac_aif_data(aifcmd, 2));
+ else
+ dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
+ break;
+ }
+}
/**
* aac_handle_aif - Handle a message from the firmware
@@ -1154,6 +1179,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
ADD : DELETE;
break;
}
+ case AifBuManagerEvent:
+ aac_handle_aif_bu(dev, aifcmd);
break;
}
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index d677b5286..7e836205a 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -392,9 +392,10 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
if (likely(fib->callback && fib->callback_data)) {
fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
fib->callback(fib->callback_data, fib);
- } else {
- aac_fib_complete(fib);
- }
+ } else
+ dev_info(&dev->pdev->dev,
+ "Invalid callback_fib[%d] (*%p)(%p)\n",
+ index, fib->callback, fib->callback_data);
} else {
unsigned long flagv;
dprintk((KERN_INFO "event_wait up\n"));
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 79a1cec1a..79871f351 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
- { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
@@ -1299,6 +1299,9 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
shost->this_id = shost->max_id;
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
+
/*
* dmb - we may need to move the setting of these parms somewhere else once
* we get a fib that can report the actual numbers
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index bc0203f3d..28f8b8a1b 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -135,7 +135,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
if (mode & AAC_INT_MODE_AIF) {
/* handle AIF */
- aac_intr_normal(dev, 0, 2, 0, NULL);
+ if (dev->aif_thread && dev->fsa_dev)
+ aac_intr_normal(dev, 0, 2, 0, NULL);
if (dev->msi_enabled)
aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
mode = 0;
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 9f636a34d..0fdc98bc2 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -477,7 +477,7 @@ static int asd_init_chip(struct asd_ha_struct *asd_ha)
err = asd_start_seqs(asd_ha);
if (err) {
- asd_printk("coudln't start seqs for %s\n",
+ asd_printk("couldn't start seqs for %s\n",
pci_name(asd_ha->pcidev));
goto out;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index eb041d680..af94cf06f 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -1352,7 +1352,7 @@ int asd_start_seqs(struct asd_ha_struct *asd_ha)
for_each_sequencer(lseq_mask, lseq_mask, lseq) {
err = asd_seq_start_lseq(asd_ha, lseq);
if (err) {
- asd_printk("coudln't start LSEQ %d for %s\n", lseq,
+ asd_printk("couldn't start LSEQ %d for %s\n", lseq,
pci_name(asd_ha->pcidev));
return err;
}
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index 221f18c5d..8e9cfe8f2 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -13,13 +13,14 @@
#include <scsi/scsi_host.h>
-#define PSEUDO_DMA
-
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
#define NCR5380_read(reg) cumanascsi_read(instance, reg)
#define NCR5380_write(reg, value) cumanascsi_write(instance, reg, value)
#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_recv_setup cumanascsi_pread
+#define NCR5380_dma_send_setup cumanascsi_pwrite
+#define NCR5380_dma_residual(instance) (0)
#define NCR5380_intr cumanascsi_intr
#define NCR5380_queue_command cumanascsi_queue_command
@@ -41,8 +42,8 @@ void cumanascsi_setup(char *str, int *ints)
#define L(v) (((v)<<16)|((v) & 0x0000ffff))
#define H(v) (((v)>>16)|((v) & 0xffff0000))
-static inline int
-NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len)
+static inline int cumanascsi_pwrite(struct Scsi_Host *host,
+ unsigned char *addr, int len)
{
unsigned long *laddr;
void __iomem *dma = priv(host)->dma + 0x2000;
@@ -101,11 +102,14 @@ NCR5380_pwrite(struct Scsi_Host *host, unsigned char *addr, int len)
}
end:
writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
- return len;
+
+ if (len)
+ return -1;
+ return 0;
}
-static inline int
-NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len)
+static inline int cumanascsi_pread(struct Scsi_Host *host,
+ unsigned char *addr, int len)
{
unsigned long *laddr;
void __iomem *dma = priv(host)->dma + 0x2000;
@@ -163,7 +167,10 @@ NCR5380_pread(struct Scsi_Host *host, unsigned char *addr, int len)
}
end:
writeb(priv(host)->ctrl | 0x40, priv(host)->base + CTRL);
- return len;
+
+ if (len)
+ return -1;
+ return 0;
}
static unsigned char cumanascsi_read(struct Scsi_Host *host, unsigned int reg)
@@ -239,7 +246,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,
host->irq = ec->irq;
- ret = NCR5380_init(host, 0);
+ ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
if (ret)
goto out_unmap;
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index faa1bee07..edce5f3cf 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -365,7 +365,7 @@ static struct scsi_host_template cumanascsi2_template = {
.eh_abort_handler = fas216_eh_abort,
.can_queue = 1,
.this_id = 7,
- .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "cumanascsi2",
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index a8ad6880d..e93e047f4 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -484,7 +484,7 @@ static struct scsi_host_template eesox_template = {
.eh_abort_handler = fas216_eh_abort,
.can_queue = 1,
.this_id = 7,
- .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "eesox",
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 1fab1d189..a396024a3 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -14,9 +14,6 @@
#include <scsi/scsi_host.h>
-/*#define PSEUDO_DMA*/
-#define DONT_USE_INTR
-
#define priv(host) ((struct NCR5380_hostdata *)(host)->hostdata)
#define NCR5380_read(reg) \
@@ -24,7 +21,10 @@
#define NCR5380_write(reg, value) \
writeb(value, priv(instance)->base + ((reg) << 2))
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
+#define NCR5380_dma_recv_setup oakscsi_pread
+#define NCR5380_dma_send_setup oakscsi_pwrite
+#define NCR5380_dma_residual(instance) (0)
#define NCR5380_queue_command oakscsi_queue_command
#define NCR5380_info oakscsi_info
@@ -40,23 +40,23 @@
#define STAT ((128 + 16) << 2)
#define DATA ((128 + 8) << 2)
-static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *addr,
- int len)
+static inline int oakscsi_pwrite(struct Scsi_Host *instance,
+ unsigned char *addr, int len)
{
void __iomem *base = priv(instance)->base;
printk("writing %p len %d\n",addr, len);
- if(!len) return -1;
while(1)
{
int status;
while (((status = readw(base + STAT)) & 0x100)==0);
}
+ return 0;
}
-static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *addr,
- int len)
+static inline int oakscsi_pread(struct Scsi_Host *instance,
+ unsigned char *addr, int len)
{
void __iomem *base = priv(instance)->base;
printk("reading %p len %d\n", addr, len);
@@ -73,7 +73,7 @@ printk("reading %p len %d\n", addr, len);
if(status & 0x200 || !timeout)
{
printk("status = %08X\n", status);
- return 1;
+ return -1;
}
}
@@ -143,7 +143,7 @@ static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
host->irq = NO_IRQ;
host->n_io_port = 255;
- ret = NCR5380_init(host, 0);
+ ret = NCR5380_init(host, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP);
if (ret)
goto out_unmap;
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 5e1b73e1b..79aa88911 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -291,7 +291,7 @@ static struct scsi_host_template powertecscsi_template = {
.can_queue = 8,
.this_id = 7,
- .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .sg_tablesize = SG_MAX_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
.cmd_per_lun = 2,
.use_clustering = ENABLE_CLUSTERING,
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
deleted file mode 100644
index 389825ba5..000000000
--- a/drivers/scsi/atari_NCR5380.c
+++ /dev/null
@@ -1,2676 +0,0 @@
-/*
- * NCR 5380 generic driver routines. These should make it *trivial*
- * to implement 5380 SCSI drivers under Linux with a non-trantor
- * architecture.
- *
- * Note that these routines also work with NR53c400 family chips.
- *
- * Copyright 1993, Drew Eckhardt
- * Visionary Computing
- * (Unix and Linux consulting and custom programming)
- * drew@colorado.edu
- * +1 (303) 666-5836
- *
- * For more information, please consult
- *
- * NCR 5380 Family
- * SCSI Protocol Controller
- * Databook
- *
- * NCR Microelectronics
- * 1635 Aeroplaza Drive
- * Colorado Springs, CO 80916
- * 1+ (719) 578-3400
- * 1+ (800) 334-5454
- */
-
-/* Ported to Atari by Roman Hodek and others. */
-
-/* Adapted for the sun3 by Sam Creasey. */
-
-/*
- * Design
- *
- * This is a generic 5380 driver. To use it on a different platform,
- * one simply writes appropriate system specific macros (ie, data
- * transfer - some PC's will use the I/O bus, 68K's must use
- * memory mapped) and drops this file in their 'C' wrapper.
- *
- * As far as command queueing, two queues are maintained for
- * each 5380 in the system - commands that haven't been issued yet,
- * and commands that are currently executing. This means that an
- * unlimited number of commands may be queued, letting
- * more commands propagate from the higher driver levels giving higher
- * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
- * allowing multiple commands to propagate all the way to a SCSI-II device
- * while a command is already executing.
- *
- *
- * Issues specific to the NCR5380 :
- *
- * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
- * piece of hardware that requires you to sit in a loop polling for
- * the REQ signal as long as you are connected. Some devices are
- * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
- * while doing long seek operations. [...] These
- * broken devices are the exception rather than the rule and I'd rather
- * spend my time optimizing for the normal case.
- *
- * Architecture :
- *
- * At the heart of the design is a coroutine, NCR5380_main,
- * which is started from a workqueue for each NCR5380 host in the
- * system. It attempts to establish I_T_L or I_T_L_Q nexuses by
- * removing the commands from the issue queue and calling
- * NCR5380_select() if a nexus is not established.
- *
- * Once a nexus is established, the NCR5380_information_transfer()
- * phase goes through the various phases as instructed by the target.
- * if the target goes into MSG IN and sends a DISCONNECT message,
- * the command structure is placed into the per instance disconnected
- * queue, and NCR5380_main tries to find more work. If the target is
- * idle for too long, the system will try to sleep.
- *
- * If a command has disconnected, eventually an interrupt will trigger,
- * calling NCR5380_intr() which will in turn call NCR5380_reselect
- * to reestablish a nexus. This will run main if necessary.
- *
- * On command termination, the done function will be called as
- * appropriate.
- *
- * SCSI pointers are maintained in the SCp field of SCSI command
- * structures, being initialized after the command is connected
- * in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
- * Note that in violation of the standard, an implicit SAVE POINTERS operation
- * is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
- */
-
-/*
- * Using this file :
- * This file a skeleton Linux SCSI driver for the NCR 5380 series
- * of chips. To use it, you write an architecture specific functions
- * and macros and include this file in your driver.
- *
- * These macros control options :
- * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
- * for commands that return with a CHECK CONDITION status.
- *
- * DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
- * transceivers.
- *
- * REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
- *
- * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible
- *
- * These macros MUST be defined :
- *
- * NCR5380_read(register) - read from the specified register
- *
- * NCR5380_write(register, value) - write to the specific register
- *
- * NCR5380_implementation_fields - additional fields needed for this
- * specific implementation of the NCR5380
- *
- * Either real DMA *or* pseudo DMA may be implemented
- * REAL functions :
- * NCR5380_REAL_DMA should be defined if real DMA is to be used.
- * Note that the DMA setup functions should return the number of bytes
- * that they were able to program the controller for.
- *
- * Also note that generic i386/PC versions of these macros are
- * available as NCR5380_i386_dma_write_setup,
- * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
- *
- * NCR5380_dma_write_setup(instance, src, count) - initialize
- * NCR5380_dma_read_setup(instance, dst, count) - initialize
- * NCR5380_dma_residual(instance); - residual count
- *
- * PSEUDO functions :
- * NCR5380_pwrite(instance, src, count)
- * NCR5380_pread(instance, dst, count);
- *
- * The generic driver is initialized by calling NCR5380_init(instance),
- * after setting the appropriate host specific fields and ID. If the
- * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
- * possible) function may be used.
- */
-
-static int do_abort(struct Scsi_Host *);
-static void do_reset(struct Scsi_Host *);
-
-#ifdef SUPPORT_TAGS
-
-/*
- * Functions for handling tagged queuing
- * =====================================
- *
- * ++roman (01/96): Now I've implemented SCSI-2 tagged queuing. Some notes:
- *
- * Using consecutive numbers for the tags is no good idea in my eyes. There
- * could be wrong re-usings if the counter (8 bit!) wraps and some early
- * command has been preempted for a long time. My solution: a bitfield for
- * remembering used tags.
- *
- * There's also the problem that each target has a certain queue size, but we
- * cannot know it in advance :-( We just see a QUEUE_FULL status being
- * returned. So, in this case, the driver internal queue size assumption is
- * reduced to the number of active tags if QUEUE_FULL is returned by the
- * target.
- *
- * We're also not allowed running tagged commands as long as an untagged
- * command is active. And REQUEST SENSE commands after a contingent allegiance
- * condition _must_ be untagged. To keep track whether an untagged command has
- * been issued, the host->busy array is still employed, as it is without
- * support for tagged queuing.
- *
- * One could suspect that there are possible race conditions between
- * is_lun_busy(), cmd_get_tag() and cmd_free_tag(). But I think this isn't the
- * case: is_lun_busy() and cmd_get_tag() are both called from NCR5380_main(),
- * which already guaranteed to be running at most once. It is also the only
- * place where tags/LUNs are allocated. So no other allocation can slip
- * between that pair, there could only happen a reselection, which can free a
- * tag, but that doesn't hurt. Only the sequence in cmd_free_tag() becomes
- * important: the tag bit must be cleared before 'nr_allocated' is decreased.
- */
-
-static void __init init_tags(struct NCR5380_hostdata *hostdata)
-{
- int target, lun;
- struct tag_alloc *ta;
-
- if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
- return;
-
- for (target = 0; target < 8; ++target) {
- for (lun = 0; lun < 8; ++lun) {
- ta = &hostdata->TagAlloc[target][lun];
- bitmap_zero(ta->allocated, MAX_TAGS);
- ta->nr_allocated = 0;
- /* At the beginning, assume the maximum queue size we could
- * support (MAX_TAGS). This value will be decreased if the target
- * returns QUEUE_FULL status.
- */
- ta->queue_size = MAX_TAGS;
- }
- }
-}
-
-
-/* Check if we can issue a command to this LUN: First see if the LUN is marked
- * busy by an untagged command. If the command should use tagged queuing, also
- * check that there is a free tag and the target's queue won't overflow. This
- * function should be called with interrupts disabled to avoid race
- * conditions.
- */
-
-static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
-{
- u8 lun = cmd->device->lun;
- struct Scsi_Host *instance = cmd->device->host;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- if (hostdata->busy[cmd->device->id] & (1 << lun))
- return 1;
- if (!should_be_tagged ||
- !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
- !cmd->device->tagged_supported)
- return 0;
- if (hostdata->TagAlloc[scmd_id(cmd)][lun].nr_allocated >=
- hostdata->TagAlloc[scmd_id(cmd)][lun].queue_size) {
- dsprintk(NDEBUG_TAGS, instance, "target %d lun %d: no free tags\n",
- scmd_id(cmd), lun);
- return 1;
- }
- return 0;
-}
-
-
-/* Allocate a tag for a command (there are no checks anymore, check_lun_busy()
- * must be called before!), or reserve the LUN in 'busy' if the command is
- * untagged.
- */
-
-static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
-{
- u8 lun = cmd->device->lun;
- struct Scsi_Host *instance = cmd->device->host;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- /* If we or the target don't support tagged queuing, allocate the LUN for
- * an untagged command.
- */
- if (!should_be_tagged ||
- !(hostdata->flags & FLAG_TAGGED_QUEUING) ||
- !cmd->device->tagged_supported) {
- cmd->tag = TAG_NONE;
- hostdata->busy[cmd->device->id] |= (1 << lun);
- dsprintk(NDEBUG_TAGS, instance, "target %d lun %d now allocated by untagged command\n",
- scmd_id(cmd), lun);
- } else {
- struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
-
- cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
- set_bit(cmd->tag, ta->allocated);
- ta->nr_allocated++;
- dsprintk(NDEBUG_TAGS, instance, "using tag %d for target %d lun %d (%d tags allocated)\n",
- cmd->tag, scmd_id(cmd), lun, ta->nr_allocated);
- }
-}
-
-
-/* Mark the tag of command 'cmd' as free, or in case of an untagged command,
- * unlock the LUN.
- */
-
-static void cmd_free_tag(struct scsi_cmnd *cmd)
-{
- u8 lun = cmd->device->lun;
- struct Scsi_Host *instance = cmd->device->host;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- if (cmd->tag == TAG_NONE) {
- hostdata->busy[cmd->device->id] &= ~(1 << lun);
- dsprintk(NDEBUG_TAGS, instance, "target %d lun %d untagged cmd freed\n",
- scmd_id(cmd), lun);
- } else if (cmd->tag >= MAX_TAGS) {
- shost_printk(KERN_NOTICE, instance,
- "trying to free bad tag %d!\n", cmd->tag);
- } else {
- struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
- clear_bit(cmd->tag, ta->allocated);
- ta->nr_allocated--;
- dsprintk(NDEBUG_TAGS, instance, "freed tag %d for target %d lun %d\n",
- cmd->tag, scmd_id(cmd), lun);
- }
-}
-
-
-static void free_all_tags(struct NCR5380_hostdata *hostdata)
-{
- int target, lun;
- struct tag_alloc *ta;
-
- if (!(hostdata->flags & FLAG_TAGGED_QUEUING))
- return;
-
- for (target = 0; target < 8; ++target) {
- for (lun = 0; lun < 8; ++lun) {
- ta = &hostdata->TagAlloc[target][lun];
- bitmap_zero(ta->allocated, MAX_TAGS);
- ta->nr_allocated = 0;
- }
- }
-}
-
-#endif /* SUPPORT_TAGS */
-
-/**
- * merge_contiguous_buffers - coalesce scatter-gather list entries
- * @cmd: command requesting IO
- *
- * Try to merge several scatter-gather buffers into one DMA transfer.
- * This is possible if the scatter buffers lie on physically
- * contiguous addresses. The first scatter-gather buffer's data are
- * assumed to be already transferred into cmd->SCp.this_residual.
- * Every buffer merged avoids an interrupt and a DMA setup operation.
- */
-
-static void merge_contiguous_buffers(struct scsi_cmnd *cmd)
-{
-#if !defined(CONFIG_SUN3)
- unsigned long endaddr;
-#if (NDEBUG & NDEBUG_MERGING)
- unsigned long oldlen = cmd->SCp.this_residual;
- int cnt = 1;
-#endif
-
- for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
- cmd->SCp.buffers_residual &&
- virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) {
- dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n",
- page_address(sg_page(&cmd->SCp.buffer[1])), endaddr);
-#if (NDEBUG & NDEBUG_MERGING)
- ++cnt;
-#endif
- ++cmd->SCp.buffer;
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual += cmd->SCp.buffer->length;
- endaddr += cmd->SCp.buffer->length;
- }
-#if (NDEBUG & NDEBUG_MERGING)
- if (oldlen != cmd->SCp.this_residual)
- dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
- cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
-#endif
-#endif /* !defined(CONFIG_SUN3) */
-}
-
-/**
- * initialize_SCp - init the scsi pointer field
- * @cmd: command block to set up
- *
- * Set up the internal fields in the SCSI command.
- */
-
-static inline void initialize_SCp(struct scsi_cmnd *cmd)
-{
- /*
- * Initialize the Scsi Pointer field so that all of the commands in the
- * various queues are valid.
- */
-
- if (scsi_bufflen(cmd)) {
- cmd->SCp.buffer = scsi_sglist(cmd);
- cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
-
- merge_contiguous_buffers(cmd);
- } else {
- cmd->SCp.buffer = NULL;
- cmd->SCp.buffers_residual = 0;
- cmd->SCp.ptr = NULL;
- cmd->SCp.this_residual = 0;
- }
-
- cmd->SCp.Status = 0;
- cmd->SCp.Message = 0;
-}
-
-/**
- * NCR5380_poll_politely2 - wait for two chip register values
- * @instance: controller to poll
- * @reg1: 5380 register to poll
- * @bit1: Bitmask to check
- * @val1: Expected value
- * @reg2: Second 5380 register to poll
- * @bit2: Second bitmask to check
- * @val2: Second expected value
- * @wait: Time-out in jiffies
- *
- * Polls the chip in a reasonably efficient manner waiting for an
- * event to occur. After a short quick poll we begin to yield the CPU
- * (if possible). In irq contexts the time-out is arbitrarily limited.
- * Callers may hold locks as long as they are held in irq mode.
- *
- * Returns 0 if either or both event(s) occurred otherwise -ETIMEDOUT.
- */
-
-static int NCR5380_poll_politely2(struct Scsi_Host *instance,
- int reg1, int bit1, int val1,
- int reg2, int bit2, int val2, int wait)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned long deadline = jiffies + wait;
- unsigned long n;
-
- /* Busy-wait for up to 10 ms */
- n = min(10000U, jiffies_to_usecs(wait));
- n *= hostdata->accesses_per_ms;
- n /= 2000;
- do {
- if ((NCR5380_read(reg1) & bit1) == val1)
- return 0;
- if ((NCR5380_read(reg2) & bit2) == val2)
- return 0;
- cpu_relax();
- } while (n--);
-
- if (irqs_disabled() || in_interrupt())
- return -ETIMEDOUT;
-
- /* Repeatedly sleep for 1 ms until deadline */
- while (time_is_after_jiffies(deadline)) {
- schedule_timeout_uninterruptible(1);
- if ((NCR5380_read(reg1) & bit1) == val1)
- return 0;
- if ((NCR5380_read(reg2) & bit2) == val2)
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
- int reg, int bit, int val, int wait)
-{
- return NCR5380_poll_politely2(instance, reg, bit, val,
- reg, bit, val, wait);
-}
-
-#if NDEBUG
-static struct {
- unsigned char mask;
- const char *name;
-} signals[] = {
- {SR_DBP, "PARITY"},
- {SR_RST, "RST"},
- {SR_BSY, "BSY"},
- {SR_REQ, "REQ"},
- {SR_MSG, "MSG"},
- {SR_CD, "CD"},
- {SR_IO, "IO"},
- {SR_SEL, "SEL"},
- {0, NULL}
-},
-basrs[] = {
- {BASR_ATN, "ATN"},
- {BASR_ACK, "ACK"},
- {0, NULL}
-},
-icrs[] = {
- {ICR_ASSERT_RST, "ASSERT RST"},
- {ICR_ASSERT_ACK, "ASSERT ACK"},
- {ICR_ASSERT_BSY, "ASSERT BSY"},
- {ICR_ASSERT_SEL, "ASSERT SEL"},
- {ICR_ASSERT_ATN, "ASSERT ATN"},
- {ICR_ASSERT_DATA, "ASSERT DATA"},
- {0, NULL}
-},
-mrs[] = {
- {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"},
- {MR_TARGET, "MODE TARGET"},
- {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"},
- {MR_ENABLE_PAR_INTR, "MODE PARITY INTR"},
- {MR_ENABLE_EOP_INTR, "MODE EOP INTR"},
- {MR_MONITOR_BSY, "MODE MONITOR BSY"},
- {MR_DMA_MODE, "MODE DMA"},
- {MR_ARBITRATE, "MODE ARBITRATION"},
- {0, NULL}
-};
-
-/**
- * NCR5380_print - print scsi bus signals
- * @instance: adapter state to dump
- *
- * Print the SCSI bus signals for debugging purposes
- */
-
-static void NCR5380_print(struct Scsi_Host *instance)
-{
- unsigned char status, data, basr, mr, icr, i;
-
- data = NCR5380_read(CURRENT_SCSI_DATA_REG);
- status = NCR5380_read(STATUS_REG);
- mr = NCR5380_read(MODE_REG);
- icr = NCR5380_read(INITIATOR_COMMAND_REG);
- basr = NCR5380_read(BUS_AND_STATUS_REG);
-
- printk("STATUS_REG: %02x ", status);
- for (i = 0; signals[i].mask; ++i)
- if (status & signals[i].mask)
- printk(",%s", signals[i].name);
- printk("\nBASR: %02x ", basr);
- for (i = 0; basrs[i].mask; ++i)
- if (basr & basrs[i].mask)
- printk(",%s", basrs[i].name);
- printk("\nICR: %02x ", icr);
- for (i = 0; icrs[i].mask; ++i)
- if (icr & icrs[i].mask)
- printk(",%s", icrs[i].name);
- printk("\nMODE: %02x ", mr);
- for (i = 0; mrs[i].mask; ++i)
- if (mr & mrs[i].mask)
- printk(",%s", mrs[i].name);
- printk("\n");
-}
-
-static struct {
- unsigned char value;
- const char *name;
-} phases[] = {
- {PHASE_DATAOUT, "DATAOUT"},
- {PHASE_DATAIN, "DATAIN"},
- {PHASE_CMDOUT, "CMDOUT"},
- {PHASE_STATIN, "STATIN"},
- {PHASE_MSGOUT, "MSGOUT"},
- {PHASE_MSGIN, "MSGIN"},
- {PHASE_UNKNOWN, "UNKNOWN"}
-};
-
-/**
- * NCR5380_print_phase - show SCSI phase
- * @instance: adapter to dump
- *
- * Print the current SCSI phase for debugging purposes
- */
-
-static void NCR5380_print_phase(struct Scsi_Host *instance)
-{
- unsigned char status;
- int i;
-
- status = NCR5380_read(STATUS_REG);
- if (!(status & SR_REQ))
- shost_printk(KERN_DEBUG, instance, "REQ not asserted, phase unknown.\n");
- else {
- for (i = 0; (phases[i].value != PHASE_UNKNOWN) &&
- (phases[i].value != (status & PHASE_MASK)); ++i)
- ;
- shost_printk(KERN_DEBUG, instance, "phase %s\n", phases[i].name);
- }
-}
-#endif
-
-/**
- * NCR58380_info - report driver and host information
- * @instance: relevant scsi host instance
- *
- * For use as the host template info() handler.
- */
-
-static const char *NCR5380_info(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- return hostdata->info;
-}
-
-static void prepare_info(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- snprintf(hostdata->info, sizeof(hostdata->info),
- "%s, io_port 0x%lx, n_io_port %d, "
- "base 0x%lx, irq %d, "
- "can_queue %d, cmd_per_lun %d, "
- "sg_tablesize %d, this_id %d, "
- "flags { %s%s}, "
- "options { %s} ",
- instance->hostt->name, instance->io_port, instance->n_io_port,
- instance->base, instance->irq,
- instance->can_queue, instance->cmd_per_lun,
- instance->sg_tablesize, instance->this_id,
- hostdata->flags & FLAG_TAGGED_QUEUING ? "TAGGED_QUEUING " : "",
- hostdata->flags & FLAG_TOSHIBA_DELAY ? "TOSHIBA_DELAY " : "",
-#ifdef DIFFERENTIAL
- "DIFFERENTIAL "
-#endif
-#ifdef REAL_DMA
- "REAL_DMA "
-#endif
-#ifdef PARITY
- "PARITY "
-#endif
-#ifdef SUPPORT_TAGS
- "SUPPORT_TAGS "
-#endif
- "");
-}
-
-/**
- * NCR5380_init - initialise an NCR5380
- * @instance: adapter to configure
- * @flags: control flags
- *
- * Initializes *instance and corresponding 5380 chip,
- * with flags OR'd into the initial flags value.
- *
- * Notes : I assume that the host, hostno, and id bits have been
- * set correctly. I don't care about the irq and other fields.
- *
- * Returns 0 for success
- */
-
-static int __init NCR5380_init(struct Scsi_Host *instance, int flags)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- int i;
- unsigned long deadline;
-
- hostdata->host = instance;
- hostdata->id_mask = 1 << instance->this_id;
- hostdata->id_higher_mask = 0;
- for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
- if (i > hostdata->id_mask)
- hostdata->id_higher_mask |= i;
- for (i = 0; i < 8; ++i)
- hostdata->busy[i] = 0;
-#ifdef SUPPORT_TAGS
- init_tags(hostdata);
-#endif
-#if defined (REAL_DMA)
- hostdata->dma_len = 0;
-#endif
- spin_lock_init(&hostdata->lock);
- hostdata->connected = NULL;
- hostdata->sensing = NULL;
- INIT_LIST_HEAD(&hostdata->autosense);
- INIT_LIST_HEAD(&hostdata->unissued);
- INIT_LIST_HEAD(&hostdata->disconnected);
-
- hostdata->flags = flags;
-
- INIT_WORK(&hostdata->main_task, NCR5380_main);
- hostdata->work_q = alloc_workqueue("ncr5380_%d",
- WQ_UNBOUND | WQ_MEM_RECLAIM,
- 1, instance->host_no);
- if (!hostdata->work_q)
- return -ENOMEM;
-
- prepare_info(instance);
-
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(MODE_REG, MR_BASE);
- NCR5380_write(TARGET_COMMAND_REG, 0);
- NCR5380_write(SELECT_ENABLE_REG, 0);
-
- /* Calibrate register polling loop */
- i = 0;
- deadline = jiffies + 1;
- do {
- cpu_relax();
- } while (time_is_after_jiffies(deadline));
- deadline += msecs_to_jiffies(256);
- do {
- NCR5380_read(STATUS_REG);
- ++i;
- cpu_relax();
- } while (time_is_after_jiffies(deadline));
- hostdata->accesses_per_ms = i / 256;
-
- return 0;
-}
-
-/**
- * NCR5380_maybe_reset_bus - Detect and correct bus wedge problems.
- * @instance: adapter to check
- *
- * If the system crashed, it may have crashed with a connected target and
- * the SCSI bus busy. Check for BUS FREE phase. If not, try to abort the
- * currently established nexus, which we know nothing about. Failing that
- * do a bus reset.
- *
- * Note that a bus reset will cause the chip to assert IRQ.
- *
- * Returns 0 if successful, otherwise -ENXIO.
- */
-
-static int NCR5380_maybe_reset_bus(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- int pass;
-
- for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) {
- switch (pass) {
- case 1:
- case 3:
- case 5:
- shost_printk(KERN_ERR, instance, "SCSI bus busy, waiting up to five seconds\n");
- NCR5380_poll_politely(instance,
- STATUS_REG, SR_BSY, 0, 5 * HZ);
- break;
- case 2:
- shost_printk(KERN_ERR, instance, "bus busy, attempting abort\n");
- do_abort(instance);
- break;
- case 4:
- shost_printk(KERN_ERR, instance, "bus busy, attempting reset\n");
- do_reset(instance);
- /* Wait after a reset; the SCSI standard calls for
- * 250ms, we wait 500ms to be on the safe side.
- * But some Toshiba CD-ROMs need ten times that.
- */
- if (hostdata->flags & FLAG_TOSHIBA_DELAY)
- msleep(2500);
- else
- msleep(500);
- break;
- case 6:
- shost_printk(KERN_ERR, instance, "bus locked solid\n");
- return -ENXIO;
- }
- }
- return 0;
-}
-
-/**
- * NCR5380_exit - remove an NCR5380
- * @instance: adapter to remove
- *
- * Assumes that no more work can be queued (e.g. by NCR5380_intr).
- */
-
-static void NCR5380_exit(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- cancel_work_sync(&hostdata->main_task);
- destroy_workqueue(hostdata->work_q);
-}
-
-/**
- * complete_cmd - finish processing a command and return it to the SCSI ML
- * @instance: the host instance
- * @cmd: command to complete
- */
-
-static void complete_cmd(struct Scsi_Host *instance,
- struct scsi_cmnd *cmd)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- dsprintk(NDEBUG_QUEUES, instance, "complete_cmd: cmd %p\n", cmd);
-
- if (hostdata->sensing == cmd) {
- /* Autosense processing ends here */
- if ((cmd->result & 0xff) != SAM_STAT_GOOD) {
- scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- set_host_byte(cmd, DID_ERROR);
- } else
- scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- hostdata->sensing = NULL;
- }
-
-#ifdef SUPPORT_TAGS
- cmd_free_tag(cmd);
-#else
- hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
-#endif
- cmd->scsi_done(cmd);
-}
-
-/**
- * NCR5380_queue_command - queue a command
- * @instance: the relevant SCSI adapter
- * @cmd: SCSI command
- *
- * cmd is added to the per-instance issue queue, with minor
- * twiddling done to the host specific fields of cmd. If the
- * main coroutine is not running, it is restarted.
- */
-
-static int NCR5380_queue_command(struct Scsi_Host *instance,
- struct scsi_cmnd *cmd)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
- unsigned long flags;
-
-#if (NDEBUG & NDEBUG_NO_WRITE)
- switch (cmd->cmnd[0]) {
- case WRITE_6:
- case WRITE_10:
- shost_printk(KERN_DEBUG, instance, "WRITE attempted with NDEBUG_NO_WRITE set\n");
- cmd->result = (DID_ERROR << 16);
- cmd->scsi_done(cmd);
- return 0;
- }
-#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
-
- cmd->result = 0;
-
- /*
- * ++roman: Just disabling the NCR interrupt isn't sufficient here,
- * because also a timer int can trigger an abort or reset, which would
- * alter queues and touch the lock.
- */
- if (!NCR5380_acquire_dma_irq(instance))
- return SCSI_MLQUEUE_HOST_BUSY;
-
- spin_lock_irqsave(&hostdata->lock, flags);
-
- /*
- * Insert the cmd into the issue queue. Note that REQUEST SENSE
- * commands are added to the head of the queue since any command will
- * clear the contingent allegiance condition that exists and the
- * sense data is only guaranteed to be valid while the condition exists.
- */
-
- if (cmd->cmnd[0] == REQUEST_SENSE)
- list_add(&ncmd->list, &hostdata->unissued);
- else
- list_add_tail(&ncmd->list, &hostdata->unissued);
-
- spin_unlock_irqrestore(&hostdata->lock, flags);
-
- dsprintk(NDEBUG_QUEUES, instance, "command %p added to %s of queue\n",
- cmd, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
-
- /* Kick off command processing */
- queue_work(hostdata->work_q, &hostdata->main_task);
- return 0;
-}
-
-static inline void maybe_release_dma_irq(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
- /* Caller does the locking needed to set & test these data atomically */
- if (list_empty(&hostdata->disconnected) &&
- list_empty(&hostdata->unissued) &&
- list_empty(&hostdata->autosense) &&
- !hostdata->connected &&
- !hostdata->selecting)
- NCR5380_release_dma_irq(instance);
-}
-
-/**
- * dequeue_next_cmd - dequeue a command for processing
- * @instance: the scsi host instance
- *
- * Priority is given to commands on the autosense queue. These commands
- * need autosense because of a CHECK CONDITION result.
- *
- * Returns a command pointer if a command is found for a target that is
- * not already busy. Otherwise returns NULL.
- */
-
-static struct scsi_cmnd *dequeue_next_cmd(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd;
- struct scsi_cmnd *cmd;
-
- if (hostdata->sensing || list_empty(&hostdata->autosense)) {
- list_for_each_entry(ncmd, &hostdata->unissued, list) {
- cmd = NCR5380_to_scmd(ncmd);
- dsprintk(NDEBUG_QUEUES, instance, "dequeue: cmd=%p target=%d busy=0x%02x lun=%llu\n",
- cmd, scmd_id(cmd), hostdata->busy[scmd_id(cmd)], cmd->device->lun);
-
- if (
-#ifdef SUPPORT_TAGS
- !is_lun_busy(cmd, 1)
-#else
- !(hostdata->busy[scmd_id(cmd)] & (1 << cmd->device->lun))
-#endif
- ) {
- list_del(&ncmd->list);
- dsprintk(NDEBUG_QUEUES, instance,
- "dequeue: removed %p from issue queue\n", cmd);
- return cmd;
- }
- }
- } else {
- /* Autosense processing begins here */
- ncmd = list_first_entry(&hostdata->autosense,
- struct NCR5380_cmd, list);
- list_del(&ncmd->list);
- cmd = NCR5380_to_scmd(ncmd);
- dsprintk(NDEBUG_QUEUES, instance,
- "dequeue: removed %p from autosense queue\n", cmd);
- scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
- hostdata->sensing = cmd;
- return cmd;
- }
- return NULL;
-}
-
-static void requeue_cmd(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
- if (hostdata->sensing == cmd) {
- scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- list_add(&ncmd->list, &hostdata->autosense);
- hostdata->sensing = NULL;
- } else
- list_add(&ncmd->list, &hostdata->unissued);
-}
-
-/**
- * NCR5380_main - NCR state machines
- *
- * NCR5380_main is a coroutine that runs as long as more work can
- * be done on the NCR5380 host adapters in a system. Both
- * NCR5380_queue_command() and NCR5380_intr() will try to start it
- * in case it is not running.
- */
-
-static void NCR5380_main(struct work_struct *work)
-{
- struct NCR5380_hostdata *hostdata =
- container_of(work, struct NCR5380_hostdata, main_task);
- struct Scsi_Host *instance = hostdata->host;
- int done;
-
- /*
- * ++roman: Just disabling the NCR interrupt isn't sufficient here,
- * because also a timer int can trigger an abort or reset, which can
- * alter queues and touch the Falcon lock.
- */
-
- do {
- done = 1;
-
- spin_lock_irq(&hostdata->lock);
- while (!hostdata->connected && !hostdata->selecting) {
- struct scsi_cmnd *cmd = dequeue_next_cmd(instance);
-
- if (!cmd)
- break;
-
- dsprintk(NDEBUG_MAIN, instance, "main: dequeued %p\n", cmd);
-
- /*
- * Attempt to establish an I_T_L nexus here.
- * On success, instance->hostdata->connected is set.
- * On failure, we must add the command back to the
- * issue queue so we can keep trying.
- */
- /*
- * REQUEST SENSE commands are issued without tagged
- * queueing, even on SCSI-II devices because the
- * contingent allegiance condition exists for the
- * entire unit.
- */
- /* ++roman: ...and the standard also requires that
- * REQUEST SENSE command are untagged.
- */
-
-#ifdef SUPPORT_TAGS
- cmd_get_tag(cmd, cmd->cmnd[0] != REQUEST_SENSE);
-#endif
- if (!NCR5380_select(instance, cmd)) {
- dsprintk(NDEBUG_MAIN, instance, "main: select complete\n");
- maybe_release_dma_irq(instance);
- } else {
- dsprintk(NDEBUG_MAIN | NDEBUG_QUEUES, instance,
- "main: select failed, returning %p to queue\n", cmd);
- requeue_cmd(instance, cmd);
-#ifdef SUPPORT_TAGS
- cmd_free_tag(cmd);
-#endif
- }
- }
- if (hostdata->connected
-#ifdef REAL_DMA
- && !hostdata->dma_len
-#endif
- ) {
- dsprintk(NDEBUG_MAIN, instance, "main: performing information transfer\n");
- NCR5380_information_transfer(instance);
- done = 0;
- }
- spin_unlock_irq(&hostdata->lock);
- if (!done)
- cond_resched();
- } while (!done);
-}
-
-
-#ifdef REAL_DMA
-/*
- * Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
- *
- * Purpose : Called by interrupt handler when DMA finishes or a phase
- * mismatch occurs (which would finish the DMA transfer).
- *
- * Inputs : instance - this instance of the NCR5380.
- */
-
-static void NCR5380_dma_complete(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- int transferred;
- unsigned char **data;
- int *count;
- int saved_data = 0, overrun = 0;
- unsigned char p;
-
- if (hostdata->read_overruns) {
- p = hostdata->connected->SCp.phase;
- if (p & SR_IO) {
- udelay(10);
- if ((NCR5380_read(BUS_AND_STATUS_REG) &
- (BASR_PHASE_MATCH|BASR_ACK)) ==
- (BASR_PHASE_MATCH|BASR_ACK)) {
- saved_data = NCR5380_read(INPUT_DATA_REG);
- overrun = 1;
- dsprintk(NDEBUG_DMA, instance, "read overrun handled\n");
- }
- }
- }
-
-#if defined(CONFIG_SUN3)
- if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
- pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n",
- instance->host_no);
- BUG();
- }
-
- /* make sure we're not stuck in a data phase */
- if ((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) ==
- (BASR_PHASE_MATCH | BASR_ACK)) {
- pr_err("scsi%d: BASR %02x\n", instance->host_no,
- NCR5380_read(BUS_AND_STATUS_REG));
- pr_err("scsi%d: bus stuck in data phase -- probably a single byte overrun!\n",
- instance->host_no);
- BUG();
- }
-#endif
-
- NCR5380_write(MODE_REG, MR_BASE);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-
- transferred = hostdata->dma_len - NCR5380_dma_residual(instance);
- hostdata->dma_len = 0;
-
- data = (unsigned char **)&hostdata->connected->SCp.ptr;
- count = &hostdata->connected->SCp.this_residual;
- *data += transferred;
- *count -= transferred;
-
- if (hostdata->read_overruns) {
- int cnt, toPIO;
-
- if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
- cnt = toPIO = hostdata->read_overruns;
- if (overrun) {
- dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
- *(*data)++ = saved_data;
- (*count)--;
- cnt--;
- toPIO--;
- }
- dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
- NCR5380_transfer_pio(instance, &p, &cnt, data);
- *count -= toPIO - cnt;
- }
- }
-}
-#endif /* REAL_DMA */
-
-
-/**
- * NCR5380_intr - generic NCR5380 irq handler
- * @irq: interrupt number
- * @dev_id: device info
- *
- * Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
- * from the disconnected queue, and restarting NCR5380_main()
- * as required.
- *
- * The chip can assert IRQ in any of six different conditions. The IRQ flag
- * is then cleared by reading the Reset Parity/Interrupt Register (RPIR).
- * Three of these six conditions are latched in the Bus and Status Register:
- * - End of DMA (cleared by ending DMA Mode)
- * - Parity error (cleared by reading RPIR)
- * - Loss of BSY (cleared by reading RPIR)
- * Two conditions have flag bits that are not latched:
- * - Bus phase mismatch (non-maskable in DMA Mode, cleared by ending DMA Mode)
- * - Bus reset (non-maskable)
- * The remaining condition has no flag bit at all:
- * - Selection/reselection
- *
- * Hence, establishing the cause(s) of any interrupt is partly guesswork.
- * In "The DP8490 and DP5380 Comparison Guide", National Semiconductor
- * claimed that "the design of the [DP8490] interrupt logic ensures
- * interrupts will not be lost (they can be on the DP5380)."
- * The L5380/53C80 datasheet from LOGIC Devices has more details.
- *
- * Checking for bus reset by reading RST is futile because of interrupt
- * latency, but a bus reset will reset chip logic. Checking for parity error
- * is unnecessary because that interrupt is never enabled. A Loss of BSY
- * condition will clear DMA Mode. We can tell when this occurs because the
- * the Busy Monitor interrupt is enabled together with DMA Mode.
- */
-
-static irqreturn_t NCR5380_intr(int irq, void *dev_id)
-{
- struct Scsi_Host *instance = dev_id;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- int handled = 0;
- unsigned char basr;
- unsigned long flags;
-
- spin_lock_irqsave(&hostdata->lock, flags);
-
- basr = NCR5380_read(BUS_AND_STATUS_REG);
- if (basr & BASR_IRQ) {
- unsigned char mr = NCR5380_read(MODE_REG);
- unsigned char sr = NCR5380_read(STATUS_REG);
-
- dsprintk(NDEBUG_INTR, instance, "IRQ %d, BASR 0x%02x, SR 0x%02x, MR 0x%02x\n",
- irq, basr, sr, mr);
-
-#if defined(REAL_DMA)
- if ((mr & MR_DMA_MODE) || (mr & MR_MONITOR_BSY)) {
- /* Probably End of DMA, Phase Mismatch or Loss of BSY.
- * We ack IRQ after clearing Mode Register. Workarounds
- * for End of DMA errata need to happen in DMA Mode.
- */
-
- dsprintk(NDEBUG_INTR, instance, "interrupt in DMA mode\n");
-
- if (hostdata->connected) {
- NCR5380_dma_complete(instance);
- queue_work(hostdata->work_q, &hostdata->main_task);
- } else {
- NCR5380_write(MODE_REG, MR_BASE);
- NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- }
- } else
-#endif /* REAL_DMA */
- if ((NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_mask) &&
- (sr & (SR_SEL | SR_IO | SR_BSY | SR_RST)) == (SR_SEL | SR_IO)) {
- /* Probably reselected */
- NCR5380_write(SELECT_ENABLE_REG, 0);
- NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-
- dsprintk(NDEBUG_INTR, instance, "interrupt with SEL and IO\n");
-
- if (!hostdata->connected) {
- NCR5380_reselect(instance);
- queue_work(hostdata->work_q, &hostdata->main_task);
- }
- if (!hostdata->connected)
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- } else {
- /* Probably Bus Reset */
- NCR5380_read(RESET_PARITY_INTERRUPT_REG);
-
- dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
-#ifdef SUN3_SCSI_VME
- dregs->csr |= CSR_DMA_ENABLE;
-#endif
- }
- handled = 1;
- } else {
- shost_printk(KERN_NOTICE, instance, "interrupt without IRQ bit\n");
-#ifdef SUN3_SCSI_VME
- dregs->csr |= CSR_DMA_ENABLE;
-#endif
- }
-
- spin_unlock_irqrestore(&hostdata->lock, flags);
-
- return IRQ_RETVAL(handled);
-}
-
-/*
- * Function : int NCR5380_select(struct Scsi_Host *instance,
- * struct scsi_cmnd *cmd)
- *
- * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
- * including ARBITRATION, SELECTION, and initial message out for
- * IDENTIFY and queue messages.
- *
- * Inputs : instance - instantiation of the 5380 driver on which this
- * target lives, cmd - SCSI command to execute.
- *
- * Returns cmd if selection failed but should be retried,
- * NULL if selection failed and should not be retried, or
- * NULL if selection succeeded (hostdata->connected == cmd).
- *
- * Side effects :
- * If bus busy, arbitration failed, etc, NCR5380_select() will exit
- * with registers as they should have been on entry - ie
- * SELECT_ENABLE will be set appropriately, the NCR5380
- * will cease to drive any SCSI bus signals.
- *
- * If successful : I_T_L or I_T_L_Q nexus will be established,
- * instance->connected will be set to cmd.
- * SELECT interrupt will be disabled.
- *
- * If failed (no target) : cmd->scsi_done() will be called, and the
- * cmd->result host byte set to DID_BAD_TARGET.
- */
-
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
- struct scsi_cmnd *cmd)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char tmp[3], phase;
- unsigned char *data;
- int len;
- int err;
-
- NCR5380_dprint(NDEBUG_ARBITRATION, instance);
- dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
- instance->this_id);
-
- /*
- * Arbitration and selection phases are slow and involve dropping the
- * lock, so we have to watch out for EH. An exception handler may
- * change 'selecting' to NULL. This function will then return NULL
- * so that the caller will forget about 'cmd'. (During information
- * transfer phases, EH may change 'connected' to NULL.)
- */
- hostdata->selecting = cmd;
-
- /*
- * Set the phase bits to 0, otherwise the NCR5380 won't drive the
- * data bus during SELECTION.
- */
-
- NCR5380_write(TARGET_COMMAND_REG, 0);
-
- /*
- * Start arbitration.
- */
-
- NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
- NCR5380_write(MODE_REG, MR_ARBITRATE);
-
- /* The chip now waits for BUS FREE phase. Then after the 800 ns
- * Bus Free Delay, arbitration will begin.
- */
-
- spin_unlock_irq(&hostdata->lock);
- err = NCR5380_poll_politely2(instance, MODE_REG, MR_ARBITRATE, 0,
- INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS,
- ICR_ARBITRATION_PROGRESS, HZ);
- spin_lock_irq(&hostdata->lock);
- if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE)) {
- /* Reselection interrupt */
- goto out;
- }
- if (!hostdata->selecting) {
- /* Command was aborted */
- NCR5380_write(MODE_REG, MR_BASE);
- goto out;
- }
- if (err < 0) {
- NCR5380_write(MODE_REG, MR_BASE);
- shost_printk(KERN_ERR, instance,
- "select: arbitration timeout\n");
- goto out;
- }
- spin_unlock_irq(&hostdata->lock);
-
- /* The SCSI-2 arbitration delay is 2.4 us */
- udelay(3);
-
- /* Check for lost arbitration */
- if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
- (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) ||
- (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
- NCR5380_write(MODE_REG, MR_BASE);
- dsprintk(NDEBUG_ARBITRATION, instance, "lost arbitration, deasserting MR_ARBITRATE\n");
- spin_lock_irq(&hostdata->lock);
- goto out;
- }
-
- /* After/during arbitration, BSY should be asserted.
- * IBM DPES-31080 Version S31Q works now
- * Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman)
- */
- NCR5380_write(INITIATOR_COMMAND_REG,
- ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY);
-
- /*
- * Again, bus clear + bus settle time is 1.2us, however, this is
- * a minimum so we'll udelay ceil(1.2)
- */
-
- if (hostdata->flags & FLAG_TOSHIBA_DELAY)
- udelay(15);
- else
- udelay(2);
-
- spin_lock_irq(&hostdata->lock);
-
- /* NCR5380_reselect() clears MODE_REG after a reselection interrupt */
- if (!(NCR5380_read(MODE_REG) & MR_ARBITRATE))
- goto out;
-
- if (!hostdata->selecting) {
- NCR5380_write(MODE_REG, MR_BASE);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- goto out;
- }
-
- dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
-
- /*
- * Now that we have won arbitration, start Selection process, asserting
- * the host and target ID's on the SCSI bus.
- */
-
- NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask | (1 << scmd_id(cmd)));
-
- /*
- * Raise ATN while SEL is true before BSY goes false from arbitration,
- * since this is the only way to guarantee that we'll get a MESSAGE OUT
- * phase immediately after selection.
- */
-
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY |
- ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL);
- NCR5380_write(MODE_REG, MR_BASE);
-
- /*
- * Reselect interrupts must be turned off prior to the dropping of BSY,
- * otherwise we will trigger an interrupt.
- */
- NCR5380_write(SELECT_ENABLE_REG, 0);
-
- spin_unlock_irq(&hostdata->lock);
-
- /*
- * The initiator shall then wait at least two deskew delays and release
- * the BSY signal.
- */
- udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
-
- /* Reset BSY */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA |
- ICR_ASSERT_ATN | ICR_ASSERT_SEL);
-
- /*
- * Something weird happens when we cease to drive BSY - looks
- * like the board/chip is letting us do another read before the
- * appropriate propagation delay has expired, and we're confusing
- * a BSY signal from ourselves as the target's response to SELECTION.
- *
- * A small delay (the 'C++' frontend breaks the pipeline with an
- * unnecessary jump, making it work on my 386-33/Trantor T128, the
- * tighter 'C' code breaks and requires this) solves the problem -
- * the 1 us delay is arbitrary, and only used because this delay will
- * be the same on other platforms and since it works here, it should
- * work there.
- *
- * wingel suggests that this could be due to failing to wait
- * one deskew delay.
- */
-
- udelay(1);
-
- dsprintk(NDEBUG_SELECTION, instance, "selecting target %d\n", scmd_id(cmd));
-
- /*
- * The SCSI specification calls for a 250 ms timeout for the actual
- * selection.
- */
-
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, SR_BSY,
- msecs_to_jiffies(250));
-
- if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
- spin_lock_irq(&hostdata->lock);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_reselect(instance);
- if (!hostdata->connected)
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
- goto out;
- }
-
- if (err < 0) {
- spin_lock_irq(&hostdata->lock);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- /* Can't touch cmd if it has been reclaimed by the scsi ML */
- if (hostdata->selecting) {
- cmd->result = DID_BAD_TARGET << 16;
- complete_cmd(instance, cmd);
- dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
- cmd = NULL;
- }
- goto out;
- }
-
- /*
- * No less than two deskew delays after the initiator detects the
- * BSY signal is true, it shall release the SEL signal and may
- * change the DATA BUS. -wingel
- */
-
- udelay(1);
-
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
-
- /*
- * Since we followed the SCSI spec, and raised ATN while SEL
- * was true but before BSY was false during selection, the information
- * transfer phase should be a MESSAGE OUT phase so that we can send the
- * IDENTIFY message.
- *
- * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
- * message (2 bytes) with a tag ID that we increment with every command
- * until it wraps back to 0.
- *
- * XXX - it turns out that there are some broken SCSI-II devices,
- * which claim to support tagged queuing but fail when more than
- * some number of commands are issued at once.
- */
-
- /* Wait for start of REQ/ACK handshake */
-
- err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
- spin_lock_irq(&hostdata->lock);
- if (err < 0) {
- shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- goto out;
- }
- if (!hostdata->selecting) {
- do_abort(instance);
- goto out;
- }
-
- dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
- scmd_id(cmd));
- tmp[0] = IDENTIFY(1, cmd->device->lun);
-
-#ifdef SUPPORT_TAGS
- if (cmd->tag != TAG_NONE) {
- tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG;
- tmp[2] = cmd->tag;
- len = 3;
- } else
- len = 1;
-#else
- len = 1;
- cmd->tag = 0;
-#endif /* SUPPORT_TAGS */
-
- /* Send message(s) */
- data = tmp;
- phase = PHASE_MSGOUT;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- dsprintk(NDEBUG_SELECTION, instance, "nexus established.\n");
- /* XXX need to handle errors here */
-
- hostdata->connected = cmd;
-#ifndef SUPPORT_TAGS
- hostdata->busy[cmd->device->id] |= 1 << cmd->device->lun;
-#endif
-#ifdef SUN3_SCSI_VME
- dregs->csr |= CSR_INTR;
-#endif
-
- initialize_SCp(cmd);
-
- cmd = NULL;
-
-out:
- if (!hostdata->selecting)
- return NULL;
- hostdata->selecting = NULL;
- return cmd;
-}
-
-/*
- * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
- * unsigned char *phase, int *count, unsigned char **data)
- *
- * Purpose : transfers data in given phase using polled I/O
- *
- * Inputs : instance - instance of driver, *phase - pointer to
- * what phase is expected, *count - pointer to number of
- * bytes to transfer, **data - pointer to data pointer.
- *
- * Returns : -1 when different phase is entered without transferring
- * maximum number of bytes, 0 if all bytes are transferred or exit
- * is in same phase.
- *
- * Also, *phase, *count, *data are modified in place.
- *
- * XXX Note : handling for bus free may be useful.
- */
-
-/*
- * Note : this code is not as quick as it could be, however it
- * IS 100% reliable, and for the actual data transfer where speed
- * counts, we will always do a pseudo DMA or DMA transfer.
- */
-
-static int NCR5380_transfer_pio(struct Scsi_Host *instance,
- unsigned char *phase, int *count,
- unsigned char **data)
-{
- unsigned char p = *phase, tmp;
- int c = *count;
- unsigned char *d = *data;
-
- /*
- * The NCR5380 chip will only drive the SCSI bus when the
- * phase specified in the appropriate bits of the TARGET COMMAND
- * REGISTER match the STATUS REGISTER
- */
-
- NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
-
- do {
- /*
- * Wait for assertion of REQ, after which the phase bits will be
- * valid
- */
-
- if (NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ) < 0)
- break;
-
- dsprintk(NDEBUG_HANDSHAKE, instance, "REQ asserted\n");
-
- /* Check for phase mismatch */
- if ((NCR5380_read(STATUS_REG) & PHASE_MASK) != p) {
- dsprintk(NDEBUG_PIO, instance, "phase mismatch\n");
- NCR5380_dprint_phase(NDEBUG_PIO, instance);
- break;
- }
-
- /* Do actual transfer from SCSI bus to / from memory */
- if (!(p & SR_IO))
- NCR5380_write(OUTPUT_DATA_REG, *d);
- else
- *d = NCR5380_read(CURRENT_SCSI_DATA_REG);
-
- ++d;
-
- /*
- * The SCSI standard suggests that in MSGOUT phase, the initiator
- * should drop ATN on the last byte of the message phase
- * after REQ has been asserted for the handshake but before
- * the initiator raises ACK.
- */
-
- if (!(p & SR_IO)) {
- if (!((p & SR_MSG) && c > 1)) {
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
- NCR5380_dprint(NDEBUG_PIO, instance);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
- ICR_ASSERT_DATA | ICR_ASSERT_ACK);
- } else {
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
- ICR_ASSERT_DATA | ICR_ASSERT_ATN);
- NCR5380_dprint(NDEBUG_PIO, instance);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
- ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
- }
- } else {
- NCR5380_dprint(NDEBUG_PIO, instance);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
- }
-
- if (NCR5380_poll_politely(instance,
- STATUS_REG, SR_REQ, 0, 5 * HZ) < 0)
- break;
-
- dsprintk(NDEBUG_HANDSHAKE, instance, "REQ negated, handshake complete\n");
-
-/*
- * We have several special cases to consider during REQ/ACK handshaking :
- * 1. We were in MSGOUT phase, and we are on the last byte of the
- * message. ATN must be dropped as ACK is dropped.
- *
- * 2. We are in a MSGIN phase, and we are on the last byte of the
- * message. We must exit with ACK asserted, so that the calling
- * code may raise ATN before dropping ACK to reject the message.
- *
- * 3. ACK and ATN are clear and the target may proceed as normal.
- */
- if (!(p == PHASE_MSGIN && c == 1)) {
- if (p == PHASE_MSGOUT && c > 1)
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
- else
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- }
- } while (--c);
-
- dsprintk(NDEBUG_PIO, instance, "residual %d\n", c);
-
- *count = c;
- *data = d;
- tmp = NCR5380_read(STATUS_REG);
- /* The phase read from the bus is valid if either REQ is (already)
- * asserted or if ACK hasn't been released yet. The latter applies if
- * we're in MSG IN, DATA IN or STATUS and all bytes have been received.
- */
- if ((tmp & SR_REQ) || ((tmp & SR_IO) && c == 0))
- *phase = tmp & PHASE_MASK;
- else
- *phase = PHASE_UNKNOWN;
-
- if (!c || (*phase == p))
- return 0;
- else
- return -1;
-}
-
-/**
- * do_reset - issue a reset command
- * @instance: adapter to reset
- *
- * Issue a reset sequence to the NCR5380 and try and get the bus
- * back into sane shape.
- *
- * This clears the reset interrupt flag because there may be no handler for
- * it. When the driver is initialized, the NCR5380_intr() handler has not yet
- * been installed. And when in EH we may have released the ST DMA interrupt.
- */
-
-static void do_reset(struct Scsi_Host *instance)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- NCR5380_write(TARGET_COMMAND_REG,
- PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
- udelay(50);
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- local_irq_restore(flags);
-}
-
-/**
- * do_abort - abort the currently established nexus by going to
- * MESSAGE OUT phase and sending an ABORT message.
- * @instance: relevant scsi host instance
- *
- * Returns 0 on success, -1 on failure.
- */
-
-static int do_abort(struct Scsi_Host *instance)
-{
- unsigned char *msgptr, phase, tmp;
- int len;
- int rc;
-
- /* Request message out phase */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
-
- /*
- * Wait for the target to indicate a valid phase by asserting
- * REQ. Once this happens, we'll have either a MSGOUT phase
- * and can immediately send the ABORT message, or we'll have some
- * other phase and will have to source/sink data.
- *
- * We really don't care what value was on the bus or what value
- * the target sees, so we just handshake.
- */
-
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ);
- if (rc < 0)
- goto timeout;
-
- tmp = NCR5380_read(STATUS_REG) & PHASE_MASK;
-
- NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
-
- if (tmp != PHASE_MSGOUT) {
- NCR5380_write(INITIATOR_COMMAND_REG,
- ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
- rc = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 3 * HZ);
- if (rc < 0)
- goto timeout;
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
- }
-
- tmp = ABORT;
- msgptr = &tmp;
- len = 1;
- phase = PHASE_MSGOUT;
- NCR5380_transfer_pio(instance, &phase, &len, &msgptr);
-
- /*
- * If we got here, and the command completed successfully,
- * we're about to go into bus free state.
- */
-
- return len ? -1 : 0;
-
-timeout:
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- return -1;
-}
-
-#if defined(REAL_DMA)
-/*
- * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
- * unsigned char *phase, int *count, unsigned char **data)
- *
- * Purpose : transfers data in given phase using either real
- * or pseudo DMA.
- *
- * Inputs : instance - instance of driver, *phase - pointer to
- * what phase is expected, *count - pointer to number of
- * bytes to transfer, **data - pointer to data pointer.
- *
- * Returns : -1 when different phase is entered without transferring
- * maximum number of bytes, 0 if all bytes or transferred or exit
- * is in same phase.
- *
- * Also, *phase, *count, *data are modified in place.
- */
-
-
-static int NCR5380_transfer_dma(struct Scsi_Host *instance,
- unsigned char *phase, int *count,
- unsigned char **data)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- register int c = *count;
- register unsigned char p = *phase;
-
-#if defined(CONFIG_SUN3)
- /* sanity check */
- if (!sun3_dma_setup_done) {
- pr_err("scsi%d: transfer_dma without setup!\n",
- instance->host_no);
- BUG();
- }
- hostdata->dma_len = c;
-
- dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n",
- (p & SR_IO) ? "receive" : "send", c, *data);
-
- /* netbsd turns off ints here, why not be safe and do it too */
-
- /* send start chain */
- sun3scsi_dma_start(c, *data);
-
- NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
- NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY |
- MR_ENABLE_EOP_INTR);
- if (p & SR_IO) {
- NCR5380_write(INITIATOR_COMMAND_REG, 0);
- NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
- } else {
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_DATA);
- NCR5380_write(START_DMA_SEND_REG, 0);
- }
-
-#ifdef SUN3_SCSI_VME
- dregs->csr |= CSR_DMA_ENABLE;
-#endif
-
- sun3_dma_active = 1;
-
-#else /* !defined(CONFIG_SUN3) */
- register unsigned char *d = *data;
- unsigned char tmp;
-
- if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
- *phase = tmp;
- return -1;
- }
-
- if (hostdata->read_overruns && (p & SR_IO))
- c -= hostdata->read_overruns;
-
- dsprintk(NDEBUG_DMA, instance, "initializing DMA %s: length %d, address %p\n",
- (p & SR_IO) ? "receive" : "send", c, d);
-
- NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
- NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_MONITOR_BSY |
- MR_ENABLE_EOP_INTR);
-
- if (!(hostdata->flags & FLAG_LATE_DMA_SETUP)) {
- /* On the Medusa, it is a must to initialize the DMA before
- * starting the NCR. This is also the cleaner way for the TT.
- */
- hostdata->dma_len = (p & SR_IO) ?
- NCR5380_dma_read_setup(instance, d, c) :
- NCR5380_dma_write_setup(instance, d, c);
- }
-
- if (p & SR_IO)
- NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
- else {
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
- NCR5380_write(START_DMA_SEND_REG, 0);
- }
-
- if (hostdata->flags & FLAG_LATE_DMA_SETUP) {
- /* On the Falcon, the DMA setup must be done after the last */
- /* NCR access, else the DMA setup gets trashed!
- */
- hostdata->dma_len = (p & SR_IO) ?
- NCR5380_dma_read_setup(instance, d, c) :
- NCR5380_dma_write_setup(instance, d, c);
- }
-#endif /* !defined(CONFIG_SUN3) */
-
- return 0;
-}
-#endif /* defined(REAL_DMA) */
-
-/*
- * Function : NCR5380_information_transfer (struct Scsi_Host *instance)
- *
- * Purpose : run through the various SCSI phases and do as the target
- * directs us to. Operates on the currently connected command,
- * instance->connected.
- *
- * Inputs : instance, instance for which we are doing commands
- *
- * Side effects : SCSI things happen, the disconnected queue will be
- * modified if a command disconnects, *instance->connected will
- * change.
- *
- * XXX Note : we need to watch for bus free or a reset condition here
- * to recover from an unexpected bus free condition.
- */
-
-static void NCR5380_information_transfer(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char msgout = NOP;
- int sink = 0;
- int len;
- int transfersize;
- unsigned char *data;
- unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
- struct scsi_cmnd *cmd;
-
-#ifdef SUN3_SCSI_VME
- dregs->csr |= CSR_INTR;
-#endif
-
- while ((cmd = hostdata->connected)) {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(cmd);
-
- tmp = NCR5380_read(STATUS_REG);
- /* We only have a valid SCSI phase when REQ is asserted */
- if (tmp & SR_REQ) {
- phase = (tmp & PHASE_MASK);
- if (phase != old_phase) {
- old_phase = phase;
- NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
- }
-#if defined(CONFIG_SUN3)
- if (phase == PHASE_CMDOUT) {
-#if defined(REAL_DMA)
- void *d;
- unsigned long count;
-
- if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- count = cmd->SCp.buffer->length;
- d = sg_virt(cmd->SCp.buffer);
- } else {
- count = cmd->SCp.this_residual;
- d = cmd->SCp.ptr;
- }
- /* this command setup for dma yet? */
- if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != cmd)) {
- if (cmd->request->cmd_type == REQ_TYPE_FS) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(cmd->request));
- sun3_dma_setup_done = cmd;
- }
- }
-#endif
-#ifdef SUN3_SCSI_VME
- dregs->csr |= CSR_INTR;
-#endif
- }
-#endif /* CONFIG_SUN3 */
-
- if (sink && (phase != PHASE_MSGOUT)) {
- NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
-
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN |
- ICR_ASSERT_ACK);
- while (NCR5380_read(STATUS_REG) & SR_REQ)
- ;
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
- ICR_ASSERT_ATN);
- sink = 0;
- continue;
- }
-
- switch (phase) {
- case PHASE_DATAOUT:
-#if (NDEBUG & NDEBUG_NO_DATAOUT)
- shost_printk(KERN_DEBUG, instance, "NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n");
- sink = 1;
- do_abort(instance);
- cmd->result = DID_ERROR << 16;
- complete_cmd(instance, cmd);
- hostdata->connected = NULL;
- return;
-#endif
- case PHASE_DATAIN:
- /*
- * If there is no room left in the current buffer in the
- * scatter-gather list, move onto the next one.
- */
-
- if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
- ++cmd->SCp.buffer;
- --cmd->SCp.buffers_residual;
- cmd->SCp.this_residual = cmd->SCp.buffer->length;
- cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
- merge_contiguous_buffers(cmd);
- dsprintk(NDEBUG_INFORMATION, instance, "%d bytes and %d buffers left\n",
- cmd->SCp.this_residual,
- cmd->SCp.buffers_residual);
- }
-
- /*
- * The preferred transfer method is going to be
- * PSEUDO-DMA for systems that are strictly PIO,
- * since we can let the hardware do the handshaking.
- *
- * For this to work, we need to know the transfersize
- * ahead of time, since the pseudo-DMA code will sit
- * in an unconditional loop.
- */
-
- /* ++roman: I suggest, this should be
- * #if def(REAL_DMA)
- * instead of leaving REAL_DMA out.
- */
-
-#if defined(REAL_DMA)
-#if !defined(CONFIG_SUN3)
- transfersize = 0;
- if (!cmd->device->borken)
-#endif
- transfersize = NCR5380_dma_xfer_len(instance, cmd, phase);
-
- if (transfersize >= DMA_MIN_SIZE) {
- len = transfersize;
- cmd->SCp.phase = phase;
- if (NCR5380_transfer_dma(instance, &phase,
- &len, (unsigned char **)&cmd->SCp.ptr)) {
- /*
- * If the watchdog timer fires, all future
- * accesses to this device will use the
- * polled-IO.
- */
- scmd_printk(KERN_INFO, cmd,
- "switching to slow handshake\n");
- cmd->device->borken = 1;
- sink = 1;
- do_abort(instance);
- cmd->result = DID_ERROR << 16;
- /* XXX - need to source or sink data here, as appropriate */
- } else {
-#ifdef REAL_DMA
- /* ++roman: When using real DMA,
- * information_transfer() should return after
- * starting DMA since it has nothing more to
- * do.
- */
- return;
-#else
- cmd->SCp.this_residual -= transfersize - len;
-#endif
- }
- } else
-#endif /* defined(REAL_DMA) */
- {
- /* Break up transfer into 3 ms chunks,
- * presuming 6 accesses per handshake.
- */
- transfersize = min((unsigned long)cmd->SCp.this_residual,
- hostdata->accesses_per_ms / 2);
- len = transfersize;
- NCR5380_transfer_pio(instance, &phase, &len,
- (unsigned char **)&cmd->SCp.ptr);
- cmd->SCp.this_residual -= transfersize - len;
- }
-#if defined(CONFIG_SUN3) && defined(REAL_DMA)
- /* if we had intended to dma that command clear it */
- if (sun3_dma_setup_done == cmd)
- sun3_dma_setup_done = NULL;
-#endif
- return;
- case PHASE_MSGIN:
- len = 1;
- data = &tmp;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- cmd->SCp.Message = tmp;
-
- switch (tmp) {
- case ABORT:
- case COMMAND_COMPLETE:
- /* Accept message by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- dsprintk(NDEBUG_QUEUES, instance,
- "COMMAND COMPLETE %p target %d lun %llu\n",
- cmd, scmd_id(cmd), cmd->device->lun);
-
- hostdata->connected = NULL;
-#ifdef SUPPORT_TAGS
- cmd_free_tag(cmd);
- if (status_byte(cmd->SCp.Status) == QUEUE_FULL) {
- u8 lun = cmd->device->lun;
- struct tag_alloc *ta = &hostdata->TagAlloc[scmd_id(cmd)][lun];
-
- dsprintk(NDEBUG_TAGS, instance,
- "QUEUE_FULL %p target %d lun %d nr_allocated %d\n",
- cmd, scmd_id(cmd), lun, ta->nr_allocated);
- if (ta->queue_size > ta->nr_allocated)
- ta->queue_size = ta->nr_allocated;
- }
-#endif
-
- cmd->result &= ~0xffff;
- cmd->result |= cmd->SCp.Status;
- cmd->result |= cmd->SCp.Message << 8;
-
- if (cmd->cmnd[0] == REQUEST_SENSE)
- complete_cmd(instance, cmd);
- else {
- if (cmd->SCp.Status == SAM_STAT_CHECK_CONDITION ||
- cmd->SCp.Status == SAM_STAT_COMMAND_TERMINATED) {
- dsprintk(NDEBUG_QUEUES, instance, "autosense: adding cmd %p to tail of autosense queue\n",
- cmd);
- list_add_tail(&ncmd->list,
- &hostdata->autosense);
- } else
- complete_cmd(instance, cmd);
- }
-
- /*
- * Restore phase bits to 0 so an interrupted selection,
- * arbitration can resume.
- */
- NCR5380_write(TARGET_COMMAND_REG, 0);
-
- /* Enable reselect interrupts */
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-
- maybe_release_dma_irq(instance);
- return;
- case MESSAGE_REJECT:
- /* Accept message by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- switch (hostdata->last_message) {
- case HEAD_OF_QUEUE_TAG:
- case ORDERED_QUEUE_TAG:
- case SIMPLE_QUEUE_TAG:
- /* The target obviously doesn't support tagged
- * queuing, even though it announced this ability in
- * its INQUIRY data ?!? (maybe only this LUN?) Ok,
- * clear 'tagged_supported' and lock the LUN, since
- * the command is treated as untagged further on.
- */
- cmd->device->tagged_supported = 0;
- hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
- cmd->tag = TAG_NONE;
- dsprintk(NDEBUG_TAGS, instance, "target %d lun %llu rejected QUEUE_TAG message; tagged queuing disabled\n",
- scmd_id(cmd), cmd->device->lun);
- break;
- }
- break;
- case DISCONNECT:
- /* Accept message by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- hostdata->connected = NULL;
- list_add(&ncmd->list, &hostdata->disconnected);
- dsprintk(NDEBUG_INFORMATION | NDEBUG_QUEUES,
- instance, "connected command %p for target %d lun %llu moved to disconnected queue\n",
- cmd, scmd_id(cmd), cmd->device->lun);
-
- /*
- * Restore phase bits to 0 so an interrupted selection,
- * arbitration can resume.
- */
- NCR5380_write(TARGET_COMMAND_REG, 0);
-
- /* Enable reselect interrupts */
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-#ifdef SUN3_SCSI_VME
- dregs->csr |= CSR_DMA_ENABLE;
-#endif
- return;
- /*
- * The SCSI data pointer is *IMPLICITLY* saved on a disconnect
- * operation, in violation of the SCSI spec so we can safely
- * ignore SAVE/RESTORE pointers calls.
- *
- * Unfortunately, some disks violate the SCSI spec and
- * don't issue the required SAVE_POINTERS message before
- * disconnecting, and we have to break spec to remain
- * compatible.
- */
- case SAVE_POINTERS:
- case RESTORE_POINTERS:
- /* Accept message by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- break;
- case EXTENDED_MESSAGE:
- /*
- * Start the message buffer with the EXTENDED_MESSAGE
- * byte, since spi_print_msg() wants the whole thing.
- */
- extended_msg[0] = EXTENDED_MESSAGE;
- /* Accept first byte by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
- spin_unlock_irq(&hostdata->lock);
-
- dsprintk(NDEBUG_EXTENDED, instance, "receiving extended message\n");
-
- len = 2;
- data = extended_msg + 1;
- phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- dsprintk(NDEBUG_EXTENDED, instance, "length %d, code 0x%02x\n",
- (int)extended_msg[1],
- (int)extended_msg[2]);
-
- if (!len && extended_msg[1] > 0 &&
- extended_msg[1] <= sizeof(extended_msg) - 2) {
- /* Accept third byte by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- len = extended_msg[1] - 1;
- data = extended_msg + 3;
- phase = PHASE_MSGIN;
-
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- dsprintk(NDEBUG_EXTENDED, instance, "message received, residual %d\n",
- len);
-
- switch (extended_msg[2]) {
- case EXTENDED_SDTR:
- case EXTENDED_WDTR:
- case EXTENDED_MODIFY_DATA_POINTER:
- case EXTENDED_EXTENDED_IDENTIFY:
- tmp = 0;
- }
- } else if (len) {
- shost_printk(KERN_ERR, instance, "error receiving extended message\n");
- tmp = 0;
- } else {
- shost_printk(KERN_NOTICE, instance, "extended message code %02x length %d is too long\n",
- extended_msg[2], extended_msg[1]);
- tmp = 0;
- }
-
- spin_lock_irq(&hostdata->lock);
- if (!hostdata->connected)
- return;
-
- /* Fall through to reject message */
-
- /*
- * If we get something weird that we aren't expecting,
- * reject it.
- */
- default:
- if (!tmp) {
- shost_printk(KERN_ERR, instance, "rejecting message ");
- spi_print_msg(extended_msg);
- printk("\n");
- } else if (tmp != EXTENDED_MESSAGE)
- scmd_printk(KERN_INFO, cmd,
- "rejecting unknown message %02x\n",
- tmp);
- else
- scmd_printk(KERN_INFO, cmd,
- "rejecting unknown extended message code %02x, length %d\n",
- extended_msg[1], extended_msg[0]);
-
- msgout = MESSAGE_REJECT;
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
- break;
- } /* switch (tmp) */
- break;
- case PHASE_MSGOUT:
- len = 1;
- data = &msgout;
- hostdata->last_message = msgout;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- if (msgout == ABORT) {
- hostdata->connected = NULL;
- cmd->result = DID_ERROR << 16;
- complete_cmd(instance, cmd);
- maybe_release_dma_irq(instance);
- NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
- return;
- }
- msgout = NOP;
- break;
- case PHASE_CMDOUT:
- len = cmd->cmd_len;
- data = cmd->cmnd;
- /*
- * XXX for performance reasons, on machines with a
- * PSEUDO-DMA architecture we should probably
- * use the dma transfer function.
- */
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- break;
- case PHASE_STATIN:
- len = 1;
- data = &tmp;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
- cmd->SCp.Status = tmp;
- break;
- default:
- shost_printk(KERN_ERR, instance, "unknown phase\n");
- NCR5380_dprint(NDEBUG_ANY, instance);
- } /* switch(phase) */
- } else {
- spin_unlock_irq(&hostdata->lock);
- NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
- spin_lock_irq(&hostdata->lock);
- }
- }
-}
-
-/*
- * Function : void NCR5380_reselect (struct Scsi_Host *instance)
- *
- * Purpose : does reselection, initializing the instance->connected
- * field to point to the scsi_cmnd for which the I_T_L or I_T_L_Q
- * nexus has been reestablished,
- *
- * Inputs : instance - this instance of the NCR5380.
- */
-
-
-/* it might eventually prove necessary to do a dma setup on
- reselection, but it doesn't seem to be needed now -- sam */
-
-static void NCR5380_reselect(struct Scsi_Host *instance)
-{
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char target_mask;
- unsigned char lun;
-#ifdef SUPPORT_TAGS
- unsigned char tag;
-#endif
- unsigned char msg[3];
- int __maybe_unused len;
- unsigned char __maybe_unused *data, __maybe_unused phase;
- struct NCR5380_cmd *ncmd;
- struct scsi_cmnd *tmp;
-
- /*
- * Disable arbitration, etc. since the host adapter obviously
- * lost, and tell an interrupted NCR5380_select() to restart.
- */
-
- NCR5380_write(MODE_REG, MR_BASE);
-
- target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-
- dsprintk(NDEBUG_RESELECTION, instance, "reselect\n");
-
- /*
- * At this point, we have detected that our SCSI ID is on the bus,
- * SEL is true and BSY was false for at least one bus settle delay
- * (400 ns).
- *
- * We must assert BSY ourselves, until the target drops the SEL
- * signal.
- */
-
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
- if (NCR5380_poll_politely(instance,
- STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- return;
- }
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
- /*
- * Wait for target to go into MSGIN.
- */
-
- if (NCR5380_poll_politely(instance,
- STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
- do_abort(instance);
- return;
- }
-
-#if defined(CONFIG_SUN3) && defined(REAL_DMA)
- /* acknowledge toggle to MSGIN */
- NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(PHASE_MSGIN));
-
- /* peek at the byte without really hitting the bus */
- msg[0] = NCR5380_read(CURRENT_SCSI_DATA_REG);
-#else
- len = 1;
- data = msg;
- phase = PHASE_MSGIN;
- NCR5380_transfer_pio(instance, &phase, &len, &data);
-
- if (len) {
- do_abort(instance);
- return;
- }
-#endif
-
- if (!(msg[0] & 0x80)) {
- shost_printk(KERN_ERR, instance, "expecting IDENTIFY message, got ");
- spi_print_msg(msg);
- printk("\n");
- do_abort(instance);
- return;
- }
- lun = msg[0] & 0x07;
-
-#if defined(SUPPORT_TAGS) && !defined(CONFIG_SUN3)
- /* If the phase is still MSGIN, the target wants to send some more
- * messages. In case it supports tagged queuing, this is probably a
- * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
- */
- tag = TAG_NONE;
- if (phase == PHASE_MSGIN && (hostdata->flags & FLAG_TAGGED_QUEUING)) {
- /* Accept previous IDENTIFY message by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- len = 2;
- data = msg + 1;
- if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
- msg[1] == SIMPLE_QUEUE_TAG)
- tag = msg[2];
- dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n",
- target_mask, lun, tag);
- }
-#endif
-
- /*
- * Find the command corresponding to the I_T_L or I_T_L_Q nexus we
- * just reestablished, and remove it from the disconnected queue.
- */
-
- tmp = NULL;
- list_for_each_entry(ncmd, &hostdata->disconnected, list) {
- struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-
- if (target_mask == (1 << scmd_id(cmd)) &&
- lun == (u8)cmd->device->lun
-#ifdef SUPPORT_TAGS
- && (tag == cmd->tag)
-#endif
- ) {
- list_del(&ncmd->list);
- tmp = cmd;
- break;
- }
- }
-
- if (tmp) {
- dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
- "reselect: removed %p from disconnected queue\n", tmp);
- } else {
-
-#ifdef SUPPORT_TAGS
- shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d tag %d not in disconnected queue.\n",
- target_mask, lun, tag);
-#else
- shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
- target_mask, lun);
-#endif
- /*
- * Since we have an established nexus that we can't do anything
- * with, we must abort it.
- */
- do_abort(instance);
- return;
- }
-
-#if defined(CONFIG_SUN3) && defined(REAL_DMA)
- /* engage dma setup for the command we just saw */
- {
- void *d;
- unsigned long count;
-
- if (!tmp->SCp.this_residual && tmp->SCp.buffers_residual) {
- count = tmp->SCp.buffer->length;
- d = sg_virt(tmp->SCp.buffer);
- } else {
- count = tmp->SCp.this_residual;
- d = tmp->SCp.ptr;
- }
- /* setup this command for dma if not already */
- if ((count >= DMA_MIN_SIZE) && (sun3_dma_setup_done != tmp)) {
- sun3scsi_dma_setup(instance, d, count,
- rq_data_dir(tmp->request));
- sun3_dma_setup_done = tmp;
- }
- }
-
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
-#endif
-
- /* Accept message by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-
-#if defined(SUPPORT_TAGS) && defined(CONFIG_SUN3)
- /* If the phase is still MSGIN, the target wants to send some more
- * messages. In case it supports tagged queuing, this is probably a
- * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus.
- */
- tag = TAG_NONE;
- if (phase == PHASE_MSGIN && setup_use_tagged_queuing) {
- /* Accept previous IDENTIFY message by clearing ACK */
- NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- len = 2;
- data = msg + 1;
- if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
- msg[1] == SIMPLE_QUEUE_TAG)
- tag = msg[2];
- dsprintk(NDEBUG_TAGS, instance, "reselect: target mask %02x, lun %d sent tag %d\n"
- target_mask, lun, tag);
- }
-#endif
-
- hostdata->connected = tmp;
- dsprintk(NDEBUG_RESELECTION, instance, "nexus established, target %d, lun %llu, tag %d\n",
- scmd_id(tmp), tmp->device->lun, tmp->tag);
-}
-
-
-/**
- * list_find_cmd - test for presence of a command in a linked list
- * @haystack: list of commands
- * @needle: command to search for
- */
-
-static bool list_find_cmd(struct list_head *haystack,
- struct scsi_cmnd *needle)
-{
- struct NCR5380_cmd *ncmd;
-
- list_for_each_entry(ncmd, haystack, list)
- if (NCR5380_to_scmd(ncmd) == needle)
- return true;
- return false;
-}
-
-/**
- * list_remove_cmd - remove a command from linked list
- * @haystack: list of commands
- * @needle: command to remove
- */
-
-static bool list_del_cmd(struct list_head *haystack,
- struct scsi_cmnd *needle)
-{
- if (list_find_cmd(haystack, needle)) {
- struct NCR5380_cmd *ncmd = scsi_cmd_priv(needle);
-
- list_del(&ncmd->list);
- return true;
- }
- return false;
-}
-
-/**
- * NCR5380_abort - scsi host eh_abort_handler() method
- * @cmd: the command to be aborted
- *
- * Try to abort a given command by removing it from queues and/or sending
- * the target an abort message. This may not succeed in causing a target
- * to abort the command. Nonetheless, the low-level driver must forget about
- * the command because the mid-layer reclaims it and it may be re-issued.
- *
- * The normal path taken by a command is as follows. For EH we trace this
- * same path to locate and abort the command.
- *
- * unissued -> selecting -> [unissued -> selecting ->]... connected ->
- * [disconnected -> connected ->]...
- * [autosense -> connected ->] done
- *
- * If cmd was not found at all then presumably it has already been completed,
- * in which case return SUCCESS to try to avoid further EH measures.
- *
- * If the command has not completed yet, we must not fail to find it.
- * We have no option but to forget the aborted command (even if it still
- * lacks sense data). The mid-layer may re-issue a command that is in error
- * recovery (see scsi_send_eh_cmnd), but the logic and data structures in
- * this driver are such that a command can appear on one queue only.
- *
- * The lock protects driver data structures, but EH handlers also use it
- * to serialize their own execution and prevent their own re-entry.
- */
-
-static int NCR5380_abort(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *instance = cmd->device->host;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned long flags;
- int result = SUCCESS;
-
- spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
- scmd_printk(KERN_INFO, cmd, __func__);
-#endif
- NCR5380_dprint(NDEBUG_ANY, instance);
- NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
- if (list_del_cmd(&hostdata->unissued, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from issue queue\n", cmd);
- cmd->result = DID_ABORT << 16;
- cmd->scsi_done(cmd); /* No tag or busy flag to worry about */
- goto out;
- }
-
- if (hostdata->selecting == cmd) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: cmd %p == selecting\n", cmd);
- hostdata->selecting = NULL;
- cmd->result = DID_ABORT << 16;
- complete_cmd(instance, cmd);
- goto out;
- }
-
- if (list_del_cmd(&hostdata->disconnected, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from disconnected list\n", cmd);
- /* Can't call NCR5380_select() and send ABORT because that
- * means releasing the lock. Need a bus reset.
- */
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
-
- if (hostdata->connected == cmd) {
- dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd);
- hostdata->connected = NULL;
-#ifdef REAL_DMA
- hostdata->dma_len = 0;
-#endif
- if (do_abort(instance)) {
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- result = FAILED;
- goto out;
- }
- set_host_byte(cmd, DID_ABORT);
- complete_cmd(instance, cmd);
- goto out;
- }
-
- if (list_del_cmd(&hostdata->autosense, cmd)) {
- dsprintk(NDEBUG_ABORT, instance,
- "abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ERROR);
- complete_cmd(instance, cmd);
- }
-
-out:
- if (result == FAILED)
- dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
- else
- dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
-
- queue_work(hostdata->work_q, &hostdata->main_task);
- maybe_release_dma_irq(instance);
- spin_unlock_irqrestore(&hostdata->lock, flags);
-
- return result;
-}
-
-
-/**
- * NCR5380_bus_reset - reset the SCSI bus
- * @cmd: SCSI command undergoing EH
- *
- * Returns SUCCESS
- */
-
-static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *instance = cmd->device->host;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
- int i;
- unsigned long flags;
- struct NCR5380_cmd *ncmd;
-
- spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
- scmd_printk(KERN_INFO, cmd, __func__);
-#endif
- NCR5380_dprint(NDEBUG_ANY, instance);
- NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
- do_reset(instance);
-
- /* reset NCR registers */
- NCR5380_write(MODE_REG, MR_BASE);
- NCR5380_write(TARGET_COMMAND_REG, 0);
- NCR5380_write(SELECT_ENABLE_REG, 0);
-
- /* After the reset, there are no more connected or disconnected commands
- * and no busy units; so clear the low-level status here to avoid
- * conflicts when the mid-level code tries to wake up the affected
- * commands!
- */
-
- if (list_del_cmd(&hostdata->unissued, cmd)) {
- cmd->result = DID_RESET << 16;
- cmd->scsi_done(cmd);
- }
-
- if (hostdata->selecting) {
- hostdata->selecting->result = DID_RESET << 16;
- complete_cmd(instance, hostdata->selecting);
- hostdata->selecting = NULL;
- }
-
- list_for_each_entry(ncmd, &hostdata->disconnected, list) {
- struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-
- set_host_byte(cmd, DID_RESET);
- cmd->scsi_done(cmd);
- }
- INIT_LIST_HEAD(&hostdata->disconnected);
-
- list_for_each_entry(ncmd, &hostdata->autosense, list) {
- struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
-
- set_host_byte(cmd, DID_RESET);
- cmd->scsi_done(cmd);
- }
- INIT_LIST_HEAD(&hostdata->autosense);
-
- if (hostdata->connected) {
- set_host_byte(hostdata->connected, DID_RESET);
- complete_cmd(instance, hostdata->connected);
- hostdata->connected = NULL;
- }
-
-#ifdef SUPPORT_TAGS
- free_all_tags(hostdata);
-#endif
- for (i = 0; i < 8; ++i)
- hostdata->busy[i] = 0;
-#ifdef REAL_DMA
- hostdata->dma_len = 0;
-#endif
-
- queue_work(hostdata->work_q, &hostdata->main_task);
- maybe_release_dma_irq(instance);
- spin_unlock_irqrestore(&hostdata->lock, flags);
-
- return SUCCESS;
-}
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c
index 78d1b2963..a59ad94ea 100644
--- a/drivers/scsi/atari_scsi.c
+++ b/drivers/scsi/atari_scsi.c
@@ -14,55 +14,23 @@
*
*/
-
-/**************************************************************************/
-/* */
-/* Notes for Falcon SCSI: */
-/* ---------------------- */
-/* */
-/* Since the Falcon SCSI uses the ST-DMA chip, that is shared among */
-/* several device drivers, locking and unlocking the access to this */
-/* chip is required. But locking is not possible from an interrupt, */
-/* since it puts the process to sleep if the lock is not available. */
-/* This prevents "late" locking of the DMA chip, i.e. locking it just */
-/* before using it, since in case of disconnection-reconnection */
-/* commands, the DMA is started from the reselection interrupt. */
-/* */
-/* Two possible schemes for ST-DMA-locking would be: */
-/* 1) The lock is taken for each command separately and disconnecting */
-/* is forbidden (i.e. can_queue = 1). */
-/* 2) The DMA chip is locked when the first command comes in and */
-/* released when the last command is finished and all queues are */
-/* empty. */
-/* The first alternative would result in bad performance, since the */
-/* interleaving of commands would not be used. The second is unfair to */
-/* other drivers using the ST-DMA, because the queues will seldom be */
-/* totally empty if there is a lot of disk traffic. */
-/* */
-/* For this reasons I decided to employ a more elaborate scheme: */
-/* - First, we give up the lock every time we can (for fairness), this */
-/* means every time a command finishes and there are no other commands */
-/* on the disconnected queue. */
-/* - If there are others waiting to lock the DMA chip, we stop */
-/* issuing commands, i.e. moving them onto the issue queue. */
-/* Because of that, the disconnected queue will run empty in a */
-/* while. Instead we go to sleep on a 'fairness_queue'. */
-/* - If the lock is released, all processes waiting on the fairness */
-/* queue will be woken. The first of them tries to re-lock the DMA, */
-/* the others wait for the first to finish this task. After that, */
-/* they can all run on and do their commands... */
-/* This sounds complicated (and it is it :-(), but it seems to be a */
-/* good compromise between fairness and performance: As long as no one */
-/* else wants to work with the ST-DMA chip, SCSI can go along as */
-/* usual. If now someone else comes, this behaviour is changed to a */
-/* "fairness mode": just already initiated commands are finished and */
-/* then the lock is released. The other one waiting will probably win */
-/* the race for locking the DMA, since it was waiting for longer. And */
-/* after it has finished, SCSI can go ahead again. Finally: I hope I */
-/* have not produced any deadlock possibilities! */
-/* */
-/**************************************************************************/
-
+/*
+ * Notes for Falcon SCSI DMA
+ *
+ * The 5380 device is one of several that all share the DMA chip. Hence
+ * "locking" and "unlocking" access to this chip is required.
+ *
+ * Two possible schemes for ST DMA acquisition by atari_scsi are:
+ * 1) The lock is taken for each command separately (i.e. can_queue == 1).
+ * 2) The lock is taken when the first command arrives and released
+ * when the last command is finished (i.e. can_queue > 1).
+ *
+ * The first alternative limits SCSI bus utilization, since interleaving
+ * commands is not possible. The second gives better performance but is
+ * unfair to other drivers needing to use the ST DMA chip. In order to
+ * allow the IDE and floppy drivers equal access to the ST DMA chip
+ * the default is can_queue == 1.
+ */
#include <linux/module.h>
#include <linux/types.h>
@@ -83,13 +51,10 @@
#include <scsi/scsi_host.h>
-/* Definitions for the core NCR5380 driver. */
-
-#define REAL_DMA
-#define SUPPORT_TAGS
-#define MAX_TAGS 32
#define DMA_MIN_SIZE 32
+/* Definitions for the core NCR5380 driver. */
+
#define NCR5380_implementation_fields /* none */
#define NCR5380_read(reg) atari_scsi_reg_read(reg)
@@ -99,9 +64,9 @@
#define NCR5380_abort atari_scsi_abort
#define NCR5380_info atari_scsi_info
-#define NCR5380_dma_read_setup(instance, data, count) \
+#define NCR5380_dma_recv_setup(instance, data, count) \
atari_scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_write_setup(instance, data, count) \
+#define NCR5380_dma_send_setup(instance, data, count) \
atari_scsi_dma_setup(instance, data, count, 1)
#define NCR5380_dma_residual(instance) \
atari_scsi_dma_residual(instance)
@@ -159,14 +124,11 @@ static inline unsigned long SCSI_DMA_GETADR(void)
return adr;
}
-#ifdef REAL_DMA
static void atari_scsi_fetch_restbytes(void);
-#endif
static unsigned char (*atari_scsi_reg_read)(unsigned char reg);
static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value);
-#ifdef REAL_DMA
static unsigned long atari_dma_residual, atari_dma_startaddr;
static short atari_dma_active;
/* pointer to the dribble buffer */
@@ -185,7 +147,6 @@ static char *atari_dma_orig_addr;
/* mask for address bits that can't be used with the ST-DMA */
static unsigned long atari_dma_stram_mask;
#define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0)
-#endif
static int setup_can_queue = -1;
module_param(setup_can_queue, int, 0);
@@ -193,16 +154,12 @@ static int setup_cmd_per_lun = -1;
module_param(setup_cmd_per_lun, int, 0);
static int setup_sg_tablesize = -1;
module_param(setup_sg_tablesize, int, 0);
-static int setup_use_tagged_queuing = -1;
-module_param(setup_use_tagged_queuing, int, 0);
static int setup_hostid = -1;
module_param(setup_hostid, int, 0);
static int setup_toshiba_delay = -1;
module_param(setup_toshiba_delay, int, 0);
-#if defined(REAL_DMA)
-
static int scsi_dma_is_ignored_buserr(unsigned char dma_stat)
{
int i;
@@ -255,12 +212,9 @@ static void scsi_dma_buserr(int irq, void *dummy)
}
#endif
-#endif
-
static irqreturn_t scsi_tt_intr(int irq, void *dev)
{
-#ifdef REAL_DMA
struct Scsi_Host *instance = dev;
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int dma_stat;
@@ -342,8 +296,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dev)
tt_scsi_dma.dma_ctrl = 0;
}
-#endif /* REAL_DMA */
-
NCR5380_intr(irq, dev);
return IRQ_HANDLED;
@@ -352,7 +304,6 @@ static irqreturn_t scsi_tt_intr(int irq, void *dev)
static irqreturn_t scsi_falcon_intr(int irq, void *dev)
{
-#ifdef REAL_DMA
struct Scsi_Host *instance = dev;
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int dma_stat;
@@ -405,15 +356,12 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dev)
atari_dma_orig_addr = NULL;
}
-#endif /* REAL_DMA */
-
NCR5380_intr(irq, dev);
return IRQ_HANDLED;
}
-#ifdef REAL_DMA
static void atari_scsi_fetch_restbytes(void)
{
int nr;
@@ -436,7 +384,6 @@ static void atari_scsi_fetch_restbytes(void)
*dst++ = *src++;
}
}
-#endif /* REAL_DMA */
/* This function releases the lock on the DMA chip if there is no
@@ -464,6 +411,10 @@ static int falcon_get_lock(struct Scsi_Host *instance)
if (IS_A_TT())
return 1;
+ if (stdma_is_locked_by(scsi_falcon_intr) &&
+ instance->hostt->can_queue > 1)
+ return 1;
+
if (in_interrupt())
return stdma_try_lock(scsi_falcon_intr, instance);
@@ -495,8 +446,7 @@ static int __init atari_scsi_setup(char *str)
setup_sg_tablesize = ints[3];
if (ints[0] >= 4)
setup_hostid = ints[4];
- if (ints[0] >= 5)
- setup_use_tagged_queuing = ints[5];
+ /* ints[5] (use_tagged_queuing) is ignored */
/* ints[6] (use_pdma) is ignored */
if (ints[0] >= 7)
setup_toshiba_delay = ints[7];
@@ -508,8 +458,6 @@ __setup("atascsi=", atari_scsi_setup);
#endif /* !MODULE */
-#if defined(REAL_DMA)
-
static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
void *data, unsigned long count,
int dir)
@@ -545,9 +493,6 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
*/
dma_cache_maintenance(addr, count, dir);
- if (count == 0)
- printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n");
-
if (IS_A_TT()) {
tt_scsi_dma.dma_ctrl = dir;
SCSI_DMA_WRITE_P(dma_addr, addr);
@@ -624,6 +569,9 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
{
unsigned long possible_len, limit;
+ if (wanted_len < DMA_MIN_SIZE)
+ return 0;
+
if (IS_A_TT())
/* TT SCSI DMA can transfer arbitrary #bytes */
return wanted_len;
@@ -703,9 +651,6 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
}
-#endif /* REAL_DMA */
-
-
/* NCR5380 register access functions
*
* There are separate functions for TT and Falcon, because the access
@@ -736,7 +681,7 @@ static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value)
}
-#include "atari_NCR5380.c"
+#include "NCR5380.c"
static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
{
@@ -745,7 +690,6 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
local_irq_save(flags);
-#ifdef REAL_DMA
/* Abort a maybe active DMA transfer */
if (IS_A_TT()) {
tt_scsi_dma.dma_ctrl = 0;
@@ -754,7 +698,6 @@ static int atari_scsi_bus_reset(struct scsi_cmnd *cmd)
atari_dma_active = 0;
atari_dma_orig_addr = NULL;
}
-#endif
rv = NCR5380_bus_reset(cmd);
@@ -781,6 +724,7 @@ static struct scsi_host_template atari_scsi_template = {
.eh_abort_handler = atari_scsi_abort,
.eh_bus_reset_handler = atari_scsi_bus_reset,
.this_id = 7,
+ .cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
.cmd_size = NCR5380_CMD_SIZE,
};
@@ -804,24 +748,11 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_reg_write = atari_scsi_falcon_reg_write;
}
- /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary.
- * Higher values should work, too; try it!
- * (But cmd_per_lun costs memory!)
- *
- * But there seems to be a bug somewhere that requires CAN_QUEUE to be
- * 2*CMD_PER_LUN. At least on a TT, no spurious timeouts seen since
- * changed CMD_PER_LUN...
- *
- * Note: The Falcon currently uses 8/1 setting due to unsolved problems
- * with cmd_per_lun != 1
- */
if (ATARIHW_PRESENT(TT_SCSI)) {
atari_scsi_template.can_queue = 16;
- atari_scsi_template.cmd_per_lun = 8;
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
- atari_scsi_template.can_queue = 8;
- atari_scsi_template.cmd_per_lun = 1;
+ atari_scsi_template.can_queue = 1;
atari_scsi_template.sg_tablesize = SG_NONE;
}
@@ -850,8 +781,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
}
}
-
-#ifdef REAL_DMA
/* If running on a Falcon and if there's TT-Ram (i.e., more than one
* memory block, since there's always ST-Ram in a Falcon), then
* allocate a STRAM_BUFFER_SIZE byte dribble buffer for transfers
@@ -867,7 +796,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);
atari_dma_orig_addr = 0;
}
-#endif
instance = scsi_host_alloc(&atari_scsi_template,
sizeof(struct NCR5380_hostdata));
@@ -879,9 +807,6 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
instance->irq = irq->start;
host_flags |= IS_A_TT() ? 0 : FLAG_LATE_DMA_SETUP;
-#ifdef SUPPORT_TAGS
- host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
-#endif
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
error = NCR5380_init(instance, host_flags);
@@ -897,7 +822,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
goto fail_irq;
}
tt_mfp.active_edge |= 0x80; /* SCSI int on L->H */
-#ifdef REAL_DMA
+
tt_scsi_dma.dma_ctrl = 0;
atari_dma_residual = 0;
@@ -919,17 +844,14 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
hostdata->read_overruns = 4;
}
-#endif
} else {
/* Nothing to do for the interrupt: the ST-DMA is initialized
* already.
*/
-#ifdef REAL_DMA
atari_dma_residual = 0;
atari_dma_active = 0;
atari_dma_stram_mask = (ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000
: 0xff000000);
-#endif
}
NCR5380_maybe_reset_bus(instance);
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index 06dc215ea..0f797a55d 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -874,8 +874,8 @@ bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
/*
* itnim callbacks
*/
-void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
- struct bfad_itnim_s **itnim_drv);
+int bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
+ struct bfad_itnim_s **itnim_drv);
void bfa_fcb_itnim_free(struct bfad_s *bfad,
struct bfad_itnim_s *itnim_drv);
void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 4f089d76a..2e3b19e7e 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -588,12 +588,13 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
struct bfa_fcs_lport_s *port = rport->port;
struct bfa_fcs_itnim_s *itnim;
struct bfad_itnim_s *itnim_drv;
+ int ret;
/*
* call bfad to allocate the itnim
*/
- bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv);
- if (itnim == NULL) {
+ ret = bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv);
+ if (ret) {
bfa_trc(port->fcs, rport->pwwn);
return NULL;
}
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 6c805e13f..02d806012 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -440,13 +440,13 @@ bfad_im_slave_destroy(struct scsi_device *sdev)
* BFA FCS itnim alloc callback, after successful PRLI
* Context: Interrupt
*/
-void
+int
bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
struct bfad_itnim_s **itnim_drv)
{
*itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
if (*itnim_drv == NULL)
- return;
+ return -ENOMEM;
(*itnim_drv)->im = bfad->im;
*itnim = &(*itnim_drv)->fcs_itnim;
@@ -457,6 +457,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
*/
INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
bfad->bfad_flags |= BFAD_RPORT_ONLINE;
+ return 0;
}
/*
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 97600dcec..5f698d038 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -356,7 +356,7 @@ struct bfi_ioc_image_hdr_s {
u8 port0_mode; /* device mode for port 0 */
u8 port1_mode; /* device mode for port 1 */
u32 exec; /* exec vector */
- u32 bootenv; /* fimware boot env */
+ u32 bootenv; /* firmware boot env */
u32 rsvd_b[2];
struct bfi_ioc_fwver_s fwver;
u32 md5sum[BFI_IOC_MD5SUM_SZ];
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 499e369ea..fdd4eb4e4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -65,7 +65,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.9.6"
+#define BNX2FC_VERSION "2.10.3"
#define PFX "bnx2fc: "
@@ -261,6 +261,7 @@ struct bnx2fc_interface {
u8 vlan_enabled;
int vlan_id;
bool enabled;
+ u8 tm_timeout;
};
#define bnx2fc_from_ctlr(x) \
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index d7029ea5d..a18819939 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -107,6 +107,26 @@ MODULE_PARM_DESC(debug_logging,
"\t\t0x10 - fcoe L2 fame related logs.\n"
"\t\t0xff - LOG all messages.");
+uint bnx2fc_devloss_tmo;
+module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO);
+MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports "
+ "attached via bnx2fc.");
+
+uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
+module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO);
+MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default "
+ "0xffff.");
+
+uint bnx2fc_queue_depth;
+module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO);
+MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices "
+ "attached via bnx2fc.");
+
+uint bnx2fc_log_fka;
+module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
+ "initiating a FIP keep alive when debug logging is enabled.");
+
static int bnx2fc_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu);
/* notification function for CPU hotplug events */
@@ -692,7 +712,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
int rc = 0;
shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
- shost->max_lun = BNX2FC_MAX_LUN;
+ shost->max_lun = bnx2fc_max_luns;
shost->max_id = BNX2FC_MAX_FCP_TGT;
shost->max_channel = 0;
if (lport->vport)
@@ -1061,6 +1081,20 @@ static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
*/
static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
+ struct fip_header *fiph;
+ struct ethhdr *eth_hdr;
+ u16 op;
+ u8 sub;
+
+ fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
+ eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+ op = ntohs(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka)
+ BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n",
+ eth_hdr->h_source, eth_hdr->h_dest);
+
skb->dev = bnx2fc_from_ctlr(fip)->netdev;
dev_queue_xmit(skb);
}
@@ -1102,6 +1136,9 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
return -EIO;
}
+ if (bnx2fc_devloss_tmo)
+ fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo;
+
if (disabled) {
fc_vport_set_state(vport, FC_VPORT_DISABLED);
} else {
@@ -1495,6 +1532,9 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
}
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ if (bnx2fc_devloss_tmo)
+ fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo;
+
/* Allocate exchange manager */
if (!npiv)
rc = bnx2fc_em_config(lport, hba);
@@ -1999,6 +2039,8 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
return;
}
+ pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name);
+
/* Add HBA to the adapter list */
mutex_lock(&bnx2fc_dev_lock);
list_add_tail(&hba->list, &adapter_list);
@@ -2293,6 +2335,7 @@ static int _bnx2fc_create(struct net_device *netdev,
ctlr = bnx2fc_to_ctlr(interface);
cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
interface->vlan_id = vlan_id;
+ interface->tm_timeout = BNX2FC_TM_TIMEOUT;
interface->timer_work_queue =
create_singlethread_workqueue("bnx2fc_timer_wq");
@@ -2612,6 +2655,15 @@ static int bnx2fc_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
+static int bnx2fc_slave_configure(struct scsi_device *sdev)
+{
+ if (!bnx2fc_queue_depth)
+ return 0;
+
+ scsi_change_queue_depth(sdev, bnx2fc_queue_depth);
+ return 0;
+}
+
/**
* bnx2fc_mod_init - module init entry point
*
@@ -2858,6 +2910,50 @@ static struct fc_function_template bnx2fc_vport_xport_function = {
.bsg_request = fc_lport_bsg_request,
};
+/*
+ * Additional scsi_host attributes.
+ */
+static ssize_t
+bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct fc_lport *lport = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_interface *interface = port->priv;
+
+ sprintf(buf, "%u\n", interface->tm_timeout);
+ return strlen(buf);
+}
+
+static ssize_t
+bnx2fc_tm_timeout_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct fc_lport *lport = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_interface *interface = port->priv;
+ int rval, val;
+
+ rval = kstrtouint(buf, 10, &val);
+ if (rval)
+ return rval;
+ if (val > 255)
+ return -ERANGE;
+
+ interface->tm_timeout = (u8)val;
+ return strlen(buf);
+}
+
+static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show,
+ bnx2fc_tm_timeout_store);
+
+static struct device_attribute *bnx2fc_host_attrs[] = {
+ &dev_attr_tm_timeout,
+ NULL,
+};
+
/**
* scsi_host_template structure used while registering with SCSI-ml
*/
@@ -2877,6 +2973,8 @@ static struct scsi_host_template bnx2fc_shost_template = {
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
.max_sectors = 1024,
.track_queue_depth = 1,
+ .slave_configure = bnx2fc_slave_configure,
+ .shost_attrs = bnx2fc_host_attrs,
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 2230dab67..026f394a3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -179,12 +179,24 @@ static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
bnx2fc_unmap_sg_list(io_req);
io_req->sc_cmd = NULL;
+
+ /* Sanity checks before returning command to mid-layer */
if (!sc_cmd) {
printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
"IO(0x%x) already cleaned up\n",
io_req->xid);
return;
}
+ if (!sc_cmd->device) {
+ pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid);
+ return;
+ }
+ if (!sc_cmd->device->host) {
+ pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n",
+ io_req->xid);
+ return;
+ }
+
sc_cmd->result = err_code << 16;
BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
@@ -770,7 +782,7 @@ retry_tmf:
spin_unlock_bh(&tgt->tgt_lock);
rc = wait_for_completion_timeout(&io_req->tm_done,
- BNX2FC_TM_TIMEOUT * HZ);
+ interface->tm_timeout * HZ);
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 72894378f..133901fd3 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -675,7 +675,7 @@ bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
{
struct list_head *list;
struct list_head *tmp;
- struct bnx2i_endpoint *ep;
+ struct bnx2i_endpoint *ep = NULL;
read_lock_bh(&hba->ep_rdwr_lock);
list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
@@ -703,7 +703,7 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
{
struct list_head *list;
struct list_head *tmp;
- struct bnx2i_endpoint *ep;
+ struct bnx2i_endpoint *ep = NULL;
read_lock_bh(&hba->ep_rdwr_lock);
list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index fa09d4be2..83458f7a2 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -292,850 +292,30 @@ bool scsi_opcode_sa_name(int opcode, int service_action,
struct error_info {
unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */
- const char * text;
+ unsigned short size;
};
/*
- * The canonical list of T10 Additional Sense Codes is available at:
- * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
+ * There are 700+ entries in this table. To save space, we don't store
+ * (code, pointer) pairs, which would make sizeof(struct
+ * error_info)==16 on 64 bits. Rather, the second element just stores
+ * the size (including \0) of the corresponding string, and we use the
+ * sum of these to get the appropriate offset into additional_text
+ * defined below. This approach saves 12 bytes per entry.
*/
-
static const struct error_info additional[] =
{
- {0x0000, "No additional sense information"},
- {0x0001, "Filemark detected"},
- {0x0002, "End-of-partition/medium detected"},
- {0x0003, "Setmark detected"},
- {0x0004, "Beginning-of-partition/medium detected"},
- {0x0005, "End-of-data detected"},
- {0x0006, "I/O process terminated"},
- {0x0007, "Programmable early warning detected"},
- {0x0011, "Audio play operation in progress"},
- {0x0012, "Audio play operation paused"},
- {0x0013, "Audio play operation successfully completed"},
- {0x0014, "Audio play operation stopped due to error"},
- {0x0015, "No current audio status to return"},
- {0x0016, "Operation in progress"},
- {0x0017, "Cleaning requested"},
- {0x0018, "Erase operation in progress"},
- {0x0019, "Locate operation in progress"},
- {0x001A, "Rewind operation in progress"},
- {0x001B, "Set capacity operation in progress"},
- {0x001C, "Verify operation in progress"},
- {0x001D, "ATA pass through information available"},
- {0x001E, "Conflicting SA creation request"},
- {0x001F, "Logical unit transitioning to another power condition"},
- {0x0020, "Extended copy information available"},
- {0x0021, "Atomic command aborted due to ACA"},
-
- {0x0100, "No index/sector signal"},
-
- {0x0200, "No seek complete"},
-
- {0x0300, "Peripheral device write fault"},
- {0x0301, "No write current"},
- {0x0302, "Excessive write errors"},
-
- {0x0400, "Logical unit not ready, cause not reportable"},
- {0x0401, "Logical unit is in process of becoming ready"},
- {0x0402, "Logical unit not ready, initializing command required"},
- {0x0403, "Logical unit not ready, manual intervention required"},
- {0x0404, "Logical unit not ready, format in progress"},
- {0x0405, "Logical unit not ready, rebuild in progress"},
- {0x0406, "Logical unit not ready, recalculation in progress"},
- {0x0407, "Logical unit not ready, operation in progress"},
- {0x0408, "Logical unit not ready, long write in progress"},
- {0x0409, "Logical unit not ready, self-test in progress"},
- {0x040A, "Logical unit not accessible, asymmetric access state "
- "transition"},
- {0x040B, "Logical unit not accessible, target port in standby state"},
- {0x040C, "Logical unit not accessible, target port in unavailable "
- "state"},
- {0x040D, "Logical unit not ready, structure check required"},
- {0x040E, "Logical unit not ready, security session in progress"},
- {0x0410, "Logical unit not ready, auxiliary memory not accessible"},
- {0x0411, "Logical unit not ready, notify (enable spinup) required"},
- {0x0412, "Logical unit not ready, offline"},
- {0x0413, "Logical unit not ready, SA creation in progress"},
- {0x0414, "Logical unit not ready, space allocation in progress"},
- {0x0415, "Logical unit not ready, robotics disabled"},
- {0x0416, "Logical unit not ready, configuration required"},
- {0x0417, "Logical unit not ready, calibration required"},
- {0x0418, "Logical unit not ready, a door is open"},
- {0x0419, "Logical unit not ready, operating in sequential mode"},
- {0x041A, "Logical unit not ready, start stop unit command in "
- "progress"},
- {0x041B, "Logical unit not ready, sanitize in progress"},
- {0x041C, "Logical unit not ready, additional power use not yet "
- "granted"},
- {0x041D, "Logical unit not ready, configuration in progress"},
- {0x041E, "Logical unit not ready, microcode activation required"},
- {0x041F, "Logical unit not ready, microcode download required"},
- {0x0420, "Logical unit not ready, logical unit reset required"},
- {0x0421, "Logical unit not ready, hard reset required"},
- {0x0422, "Logical unit not ready, power cycle required"},
-
- {0x0500, "Logical unit does not respond to selection"},
-
- {0x0600, "No reference position found"},
-
- {0x0700, "Multiple peripheral devices selected"},
-
- {0x0800, "Logical unit communication failure"},
- {0x0801, "Logical unit communication time-out"},
- {0x0802, "Logical unit communication parity error"},
- {0x0803, "Logical unit communication CRC error (Ultra-DMA/32)"},
- {0x0804, "Unreachable copy target"},
-
- {0x0900, "Track following error"},
- {0x0901, "Tracking servo failure"},
- {0x0902, "Focus servo failure"},
- {0x0903, "Spindle servo failure"},
- {0x0904, "Head select fault"},
- {0x0905, "Vibration induced tracking error"},
-
- {0x0A00, "Error log overflow"},
-
- {0x0B00, "Warning"},
- {0x0B01, "Warning - specified temperature exceeded"},
- {0x0B02, "Warning - enclosure degraded"},
- {0x0B03, "Warning - background self-test failed"},
- {0x0B04, "Warning - background pre-scan detected medium error"},
- {0x0B05, "Warning - background medium scan detected medium error"},
- {0x0B06, "Warning - non-volatile cache now volatile"},
- {0x0B07, "Warning - degraded power to non-volatile cache"},
- {0x0B08, "Warning - power loss expected"},
- {0x0B09, "Warning - device statistics notification active"},
-
- {0x0C00, "Write error"},
- {0x0C01, "Write error - recovered with auto reallocation"},
- {0x0C02, "Write error - auto reallocation failed"},
- {0x0C03, "Write error - recommend reassignment"},
- {0x0C04, "Compression check miscompare error"},
- {0x0C05, "Data expansion occurred during compression"},
- {0x0C06, "Block not compressible"},
- {0x0C07, "Write error - recovery needed"},
- {0x0C08, "Write error - recovery failed"},
- {0x0C09, "Write error - loss of streaming"},
- {0x0C0A, "Write error - padding blocks added"},
- {0x0C0B, "Auxiliary memory write error"},
- {0x0C0C, "Write error - unexpected unsolicited data"},
- {0x0C0D, "Write error - not enough unsolicited data"},
- {0x0C0E, "Multiple write errors"},
- {0x0C0F, "Defects in error window"},
- {0x0C10, "Incomplete multiple atomic write operations"},
-
- {0x0D00, "Error detected by third party temporary initiator"},
- {0x0D01, "Third party device failure"},
- {0x0D02, "Copy target device not reachable"},
- {0x0D03, "Incorrect copy target device type"},
- {0x0D04, "Copy target device data underrun"},
- {0x0D05, "Copy target device data overrun"},
-
- {0x0E00, "Invalid information unit"},
- {0x0E01, "Information unit too short"},
- {0x0E02, "Information unit too long"},
- {0x0E03, "Invalid field in command information unit"},
-
- {0x1000, "Id CRC or ECC error"},
- {0x1001, "Logical block guard check failed"},
- {0x1002, "Logical block application tag check failed"},
- {0x1003, "Logical block reference tag check failed"},
- {0x1004, "Logical block protection error on recover buffered data"},
- {0x1005, "Logical block protection method error"},
-
- {0x1100, "Unrecovered read error"},
- {0x1101, "Read retries exhausted"},
- {0x1102, "Error too long to correct"},
- {0x1103, "Multiple read errors"},
- {0x1104, "Unrecovered read error - auto reallocate failed"},
- {0x1105, "L-EC uncorrectable error"},
- {0x1106, "CIRC unrecovered error"},
- {0x1107, "Data re-synchronization error"},
- {0x1108, "Incomplete block read"},
- {0x1109, "No gap found"},
- {0x110A, "Miscorrected error"},
- {0x110B, "Unrecovered read error - recommend reassignment"},
- {0x110C, "Unrecovered read error - recommend rewrite the data"},
- {0x110D, "De-compression CRC error"},
- {0x110E, "Cannot decompress using declared algorithm"},
- {0x110F, "Error reading UPC/EAN number"},
- {0x1110, "Error reading ISRC number"},
- {0x1111, "Read error - loss of streaming"},
- {0x1112, "Auxiliary memory read error"},
- {0x1113, "Read error - failed retransmission request"},
- {0x1114, "Read error - lba marked bad by application client"},
- {0x1115, "Write after sanitize required"},
-
- {0x1200, "Address mark not found for id field"},
-
- {0x1300, "Address mark not found for data field"},
-
- {0x1400, "Recorded entity not found"},
- {0x1401, "Record not found"},
- {0x1402, "Filemark or setmark not found"},
- {0x1403, "End-of-data not found"},
- {0x1404, "Block sequence error"},
- {0x1405, "Record not found - recommend reassignment"},
- {0x1406, "Record not found - data auto-reallocated"},
- {0x1407, "Locate operation failure"},
-
- {0x1500, "Random positioning error"},
- {0x1501, "Mechanical positioning error"},
- {0x1502, "Positioning error detected by read of medium"},
-
- {0x1600, "Data synchronization mark error"},
- {0x1601, "Data sync error - data rewritten"},
- {0x1602, "Data sync error - recommend rewrite"},
- {0x1603, "Data sync error - data auto-reallocated"},
- {0x1604, "Data sync error - recommend reassignment"},
-
- {0x1700, "Recovered data with no error correction applied"},
- {0x1701, "Recovered data with retries"},
- {0x1702, "Recovered data with positive head offset"},
- {0x1703, "Recovered data with negative head offset"},
- {0x1704, "Recovered data with retries and/or circ applied"},
- {0x1705, "Recovered data using previous sector id"},
- {0x1706, "Recovered data without ECC - data auto-reallocated"},
- {0x1707, "Recovered data without ECC - recommend reassignment"},
- {0x1708, "Recovered data without ECC - recommend rewrite"},
- {0x1709, "Recovered data without ECC - data rewritten"},
-
- {0x1800, "Recovered data with error correction applied"},
- {0x1801, "Recovered data with error corr. & retries applied"},
- {0x1802, "Recovered data - data auto-reallocated"},
- {0x1803, "Recovered data with CIRC"},
- {0x1804, "Recovered data with L-EC"},
- {0x1805, "Recovered data - recommend reassignment"},
- {0x1806, "Recovered data - recommend rewrite"},
- {0x1807, "Recovered data with ECC - data rewritten"},
- {0x1808, "Recovered data with linking"},
-
- {0x1900, "Defect list error"},
- {0x1901, "Defect list not available"},
- {0x1902, "Defect list error in primary list"},
- {0x1903, "Defect list error in grown list"},
-
- {0x1A00, "Parameter list length error"},
-
- {0x1B00, "Synchronous data transfer error"},
-
- {0x1C00, "Defect list not found"},
- {0x1C01, "Primary defect list not found"},
- {0x1C02, "Grown defect list not found"},
-
- {0x1D00, "Miscompare during verify operation"},
- {0x1D01, "Miscompare verify of unmapped LBA"},
-
- {0x1E00, "Recovered id with ECC correction"},
-
- {0x1F00, "Partial defect list transfer"},
-
- {0x2000, "Invalid command operation code"},
- {0x2001, "Access denied - initiator pending-enrolled"},
- {0x2002, "Access denied - no access rights"},
- {0x2003, "Access denied - invalid mgmt id key"},
- {0x2004, "Illegal command while in write capable state"},
- {0x2005, "Obsolete"},
- {0x2006, "Illegal command while in explicit address mode"},
- {0x2007, "Illegal command while in implicit address mode"},
- {0x2008, "Access denied - enrollment conflict"},
- {0x2009, "Access denied - invalid LU identifier"},
- {0x200A, "Access denied - invalid proxy token"},
- {0x200B, "Access denied - ACL LUN conflict"},
- {0x200C, "Illegal command when not in append-only mode"},
-
- {0x2100, "Logical block address out of range"},
- {0x2101, "Invalid element address"},
- {0x2102, "Invalid address for write"},
- {0x2103, "Invalid write crossing layer jump"},
- {0x2104, "Unaligned write command"},
- {0x2105, "Write boundary violation"},
- {0x2106, "Attempt to read invalid data"},
- {0x2107, "Read boundary violation"},
-
- {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"},
-
- {0x2300, "Invalid token operation, cause not reportable"},
- {0x2301, "Invalid token operation, unsupported token type"},
- {0x2302, "Invalid token operation, remote token usage not supported"},
- {0x2303, "Invalid token operation, remote rod token creation not "
- "supported"},
- {0x2304, "Invalid token operation, token unknown"},
- {0x2305, "Invalid token operation, token corrupt"},
- {0x2306, "Invalid token operation, token revoked"},
- {0x2307, "Invalid token operation, token expired"},
- {0x2308, "Invalid token operation, token cancelled"},
- {0x2309, "Invalid token operation, token deleted"},
- {0x230A, "Invalid token operation, invalid token length"},
-
- {0x2400, "Invalid field in cdb"},
- {0x2401, "CDB decryption error"},
- {0x2402, "Obsolete"},
- {0x2403, "Obsolete"},
- {0x2404, "Security audit value frozen"},
- {0x2405, "Security working key frozen"},
- {0x2406, "Nonce not unique"},
- {0x2407, "Nonce timestamp out of range"},
- {0x2408, "Invalid XCDB"},
-
- {0x2500, "Logical unit not supported"},
-
- {0x2600, "Invalid field in parameter list"},
- {0x2601, "Parameter not supported"},
- {0x2602, "Parameter value invalid"},
- {0x2603, "Threshold parameters not supported"},
- {0x2604, "Invalid release of persistent reservation"},
- {0x2605, "Data decryption error"},
- {0x2606, "Too many target descriptors"},
- {0x2607, "Unsupported target descriptor type code"},
- {0x2608, "Too many segment descriptors"},
- {0x2609, "Unsupported segment descriptor type code"},
- {0x260A, "Unexpected inexact segment"},
- {0x260B, "Inline data length exceeded"},
- {0x260C, "Invalid operation for copy source or destination"},
- {0x260D, "Copy segment granularity violation"},
- {0x260E, "Invalid parameter while port is enabled"},
- {0x260F, "Invalid data-out buffer integrity check value"},
- {0x2610, "Data decryption key fail limit reached"},
- {0x2611, "Incomplete key-associated data set"},
- {0x2612, "Vendor specific key reference not found"},
-
- {0x2700, "Write protected"},
- {0x2701, "Hardware write protected"},
- {0x2702, "Logical unit software write protected"},
- {0x2703, "Associated write protect"},
- {0x2704, "Persistent write protect"},
- {0x2705, "Permanent write protect"},
- {0x2706, "Conditional write protect"},
- {0x2707, "Space allocation failed write protect"},
- {0x2708, "Zone is read only"},
-
- {0x2800, "Not ready to ready change, medium may have changed"},
- {0x2801, "Import or export element accessed"},
- {0x2802, "Format-layer may have changed"},
- {0x2803, "Import/export element accessed, medium changed"},
-
- {0x2900, "Power on, reset, or bus device reset occurred"},
- {0x2901, "Power on occurred"},
- {0x2902, "Scsi bus reset occurred"},
- {0x2903, "Bus device reset function occurred"},
- {0x2904, "Device internal reset"},
- {0x2905, "Transceiver mode changed to single-ended"},
- {0x2906, "Transceiver mode changed to lvd"},
- {0x2907, "I_T nexus loss occurred"},
-
- {0x2A00, "Parameters changed"},
- {0x2A01, "Mode parameters changed"},
- {0x2A02, "Log parameters changed"},
- {0x2A03, "Reservations preempted"},
- {0x2A04, "Reservations released"},
- {0x2A05, "Registrations preempted"},
- {0x2A06, "Asymmetric access state changed"},
- {0x2A07, "Implicit asymmetric access state transition failed"},
- {0x2A08, "Priority changed"},
- {0x2A09, "Capacity data has changed"},
- {0x2A0A, "Error history I_T nexus cleared"},
- {0x2A0B, "Error history snapshot released"},
- {0x2A0C, "Error recovery attributes have changed"},
- {0x2A0D, "Data encryption capabilities changed"},
- {0x2A10, "Timestamp changed"},
- {0x2A11, "Data encryption parameters changed by another i_t nexus"},
- {0x2A12, "Data encryption parameters changed by vendor specific "
- "event"},
- {0x2A13, "Data encryption key instance counter has changed"},
- {0x2A14, "SA creation capabilities data has changed"},
- {0x2A15, "Medium removal prevention preempted"},
-
- {0x2B00, "Copy cannot execute since host cannot disconnect"},
-
- {0x2C00, "Command sequence error"},
- {0x2C01, "Too many windows specified"},
- {0x2C02, "Invalid combination of windows specified"},
- {0x2C03, "Current program area is not empty"},
- {0x2C04, "Current program area is empty"},
- {0x2C05, "Illegal power condition request"},
- {0x2C06, "Persistent prevent conflict"},
- {0x2C07, "Previous busy status"},
- {0x2C08, "Previous task set full status"},
- {0x2C09, "Previous reservation conflict status"},
- {0x2C0A, "Partition or collection contains user objects"},
- {0x2C0B, "Not reserved"},
- {0x2C0C, "Orwrite generation does not match"},
- {0x2C0D, "Reset write pointer not allowed"},
- {0x2C0E, "Zone is offline"},
-
- {0x2D00, "Overwrite error on update in place"},
-
- {0x2E00, "Insufficient time for operation"},
- {0x2E01, "Command timeout before processing"},
- {0x2E02, "Command timeout during processing"},
- {0x2E03, "Command timeout during processing due to error recovery"},
-
- {0x2F00, "Commands cleared by another initiator"},
- {0x2F01, "Commands cleared by power loss notification"},
- {0x2F02, "Commands cleared by device server"},
- {0x2F03, "Some commands cleared by queuing layer event"},
-
- {0x3000, "Incompatible medium installed"},
- {0x3001, "Cannot read medium - unknown format"},
- {0x3002, "Cannot read medium - incompatible format"},
- {0x3003, "Cleaning cartridge installed"},
- {0x3004, "Cannot write medium - unknown format"},
- {0x3005, "Cannot write medium - incompatible format"},
- {0x3006, "Cannot format medium - incompatible medium"},
- {0x3007, "Cleaning failure"},
- {0x3008, "Cannot write - application code mismatch"},
- {0x3009, "Current session not fixated for append"},
- {0x300A, "Cleaning request rejected"},
- {0x300C, "WORM medium - overwrite attempted"},
- {0x300D, "WORM medium - integrity check"},
- {0x3010, "Medium not formatted"},
- {0x3011, "Incompatible volume type"},
- {0x3012, "Incompatible volume qualifier"},
- {0x3013, "Cleaning volume expired"},
-
- {0x3100, "Medium format corrupted"},
- {0x3101, "Format command failed"},
- {0x3102, "Zoned formatting failed due to spare linking"},
- {0x3103, "Sanitize command failed"},
-
- {0x3200, "No defect spare location available"},
- {0x3201, "Defect list update failure"},
-
- {0x3300, "Tape length error"},
-
- {0x3400, "Enclosure failure"},
-
- {0x3500, "Enclosure services failure"},
- {0x3501, "Unsupported enclosure function"},
- {0x3502, "Enclosure services unavailable"},
- {0x3503, "Enclosure services transfer failure"},
- {0x3504, "Enclosure services transfer refused"},
- {0x3505, "Enclosure services checksum error"},
-
- {0x3600, "Ribbon, ink, or toner failure"},
-
- {0x3700, "Rounded parameter"},
-
- {0x3800, "Event status notification"},
- {0x3802, "Esn - power management class event"},
- {0x3804, "Esn - media class event"},
- {0x3806, "Esn - device busy class event"},
- {0x3807, "Thin Provisioning soft threshold reached"},
-
- {0x3900, "Saving parameters not supported"},
-
- {0x3A00, "Medium not present"},
- {0x3A01, "Medium not present - tray closed"},
- {0x3A02, "Medium not present - tray open"},
- {0x3A03, "Medium not present - loadable"},
- {0x3A04, "Medium not present - medium auxiliary memory accessible"},
-
- {0x3B00, "Sequential positioning error"},
- {0x3B01, "Tape position error at beginning-of-medium"},
- {0x3B02, "Tape position error at end-of-medium"},
- {0x3B03, "Tape or electronic vertical forms unit not ready"},
- {0x3B04, "Slew failure"},
- {0x3B05, "Paper jam"},
- {0x3B06, "Failed to sense top-of-form"},
- {0x3B07, "Failed to sense bottom-of-form"},
- {0x3B08, "Reposition error"},
- {0x3B09, "Read past end of medium"},
- {0x3B0A, "Read past beginning of medium"},
- {0x3B0B, "Position past end of medium"},
- {0x3B0C, "Position past beginning of medium"},
- {0x3B0D, "Medium destination element full"},
- {0x3B0E, "Medium source element empty"},
- {0x3B0F, "End of medium reached"},
- {0x3B11, "Medium magazine not accessible"},
- {0x3B12, "Medium magazine removed"},
- {0x3B13, "Medium magazine inserted"},
- {0x3B14, "Medium magazine locked"},
- {0x3B15, "Medium magazine unlocked"},
- {0x3B16, "Mechanical positioning or changer error"},
- {0x3B17, "Read past end of user object"},
- {0x3B18, "Element disabled"},
- {0x3B19, "Element enabled"},
- {0x3B1A, "Data transfer device removed"},
- {0x3B1B, "Data transfer device inserted"},
- {0x3B1C, "Too many logical objects on partition to support "
- "operation"},
-
- {0x3D00, "Invalid bits in identify message"},
-
- {0x3E00, "Logical unit has not self-configured yet"},
- {0x3E01, "Logical unit failure"},
- {0x3E02, "Timeout on logical unit"},
- {0x3E03, "Logical unit failed self-test"},
- {0x3E04, "Logical unit unable to update self-test log"},
-
- {0x3F00, "Target operating conditions have changed"},
- {0x3F01, "Microcode has been changed"},
- {0x3F02, "Changed operating definition"},
- {0x3F03, "Inquiry data has changed"},
- {0x3F04, "Component device attached"},
- {0x3F05, "Device identifier changed"},
- {0x3F06, "Redundancy group created or modified"},
- {0x3F07, "Redundancy group deleted"},
- {0x3F08, "Spare created or modified"},
- {0x3F09, "Spare deleted"},
- {0x3F0A, "Volume set created or modified"},
- {0x3F0B, "Volume set deleted"},
- {0x3F0C, "Volume set deassigned"},
- {0x3F0D, "Volume set reassigned"},
- {0x3F0E, "Reported luns data has changed"},
- {0x3F0F, "Echo buffer overwritten"},
- {0x3F10, "Medium loadable"},
- {0x3F11, "Medium auxiliary memory accessible"},
- {0x3F12, "iSCSI IP address added"},
- {0x3F13, "iSCSI IP address removed"},
- {0x3F14, "iSCSI IP address changed"},
- {0x3F15, "Inspect referrals sense descriptors"},
- {0x3F16, "Microcode has been changed without reset"},
-/*
- * {0x40NN, "Ram failure"},
- * {0x40NN, "Diagnostic failure on component nn"},
- * {0x41NN, "Data path failure"},
- * {0x42NN, "Power-on or self-test failure"},
- */
- {0x4300, "Message error"},
-
- {0x4400, "Internal target failure"},
- {0x4401, "Persistent reservation information lost"},
- {0x4471, "ATA device failed set features"},
-
- {0x4500, "Select or reselect failure"},
-
- {0x4600, "Unsuccessful soft reset"},
-
- {0x4700, "Scsi parity error"},
- {0x4701, "Data phase CRC error detected"},
- {0x4702, "Scsi parity error detected during st data phase"},
- {0x4703, "Information unit iuCRC error detected"},
- {0x4704, "Asynchronous information protection error detected"},
- {0x4705, "Protocol service CRC error"},
- {0x4706, "Phy test function in progress"},
- {0x477f, "Some commands cleared by iSCSI Protocol event"},
-
- {0x4800, "Initiator detected error message received"},
-
- {0x4900, "Invalid message error"},
-
- {0x4A00, "Command phase error"},
-
- {0x4B00, "Data phase error"},
- {0x4B01, "Invalid target port transfer tag received"},
- {0x4B02, "Too much write data"},
- {0x4B03, "Ack/nak timeout"},
- {0x4B04, "Nak received"},
- {0x4B05, "Data offset error"},
- {0x4B06, "Initiator response timeout"},
- {0x4B07, "Connection lost"},
- {0x4B08, "Data-in buffer overflow - data buffer size"},
- {0x4B09, "Data-in buffer overflow - data buffer descriptor area"},
- {0x4B0A, "Data-in buffer error"},
- {0x4B0B, "Data-out buffer overflow - data buffer size"},
- {0x4B0C, "Data-out buffer overflow - data buffer descriptor area"},
- {0x4B0D, "Data-out buffer error"},
- {0x4B0E, "PCIe fabric error"},
- {0x4B0F, "PCIe completion timeout"},
- {0x4B10, "PCIe completer abort"},
- {0x4B11, "PCIe poisoned tlp received"},
- {0x4B12, "PCIe eCRC check failed"},
- {0x4B13, "PCIe unsupported request"},
- {0x4B14, "PCIe acs violation"},
- {0x4B15, "PCIe tlp prefix blocked"},
-
- {0x4C00, "Logical unit failed self-configuration"},
-/*
- * {0x4DNN, "Tagged overlapped commands (nn = queue tag)"},
- */
- {0x4E00, "Overlapped commands attempted"},
-
- {0x5000, "Write append error"},
- {0x5001, "Write append position error"},
- {0x5002, "Position error related to timing"},
-
- {0x5100, "Erase failure"},
- {0x5101, "Erase failure - incomplete erase operation detected"},
-
- {0x5200, "Cartridge fault"},
-
- {0x5300, "Media load or eject failed"},
- {0x5301, "Unload tape failure"},
- {0x5302, "Medium removal prevented"},
- {0x5303, "Medium removal prevented by data transfer element"},
- {0x5304, "Medium thread or unthread failure"},
- {0x5305, "Volume identifier invalid"},
- {0x5306, "Volume identifier missing"},
- {0x5307, "Duplicate volume identifier"},
- {0x5308, "Element status unknown"},
- {0x5309, "Data transfer device error - load failed"},
- {0x530a, "Data transfer device error - unload failed"},
- {0x530b, "Data transfer device error - unload missing"},
- {0x530c, "Data transfer device error - eject failed"},
- {0x530d, "Data transfer device error - library communication failed"},
-
- {0x5400, "Scsi to host system interface failure"},
-
- {0x5500, "System resource failure"},
- {0x5501, "System buffer full"},
- {0x5502, "Insufficient reservation resources"},
- {0x5503, "Insufficient resources"},
- {0x5504, "Insufficient registration resources"},
- {0x5505, "Insufficient access control resources"},
- {0x5506, "Auxiliary memory out of space"},
- {0x5507, "Quota error"},
- {0x5508, "Maximum number of supplemental decryption keys exceeded"},
- {0x5509, "Medium auxiliary memory not accessible"},
- {0x550A, "Data currently unavailable"},
- {0x550B, "Insufficient power for operation"},
- {0x550C, "Insufficient resources to create rod"},
- {0x550D, "Insufficient resources to create rod token"},
- {0x550E, "Insufficient zone resources"},
-
- {0x5700, "Unable to recover table-of-contents"},
-
- {0x5800, "Generation does not exist"},
-
- {0x5900, "Updated block read"},
-
- {0x5A00, "Operator request or state change input"},
- {0x5A01, "Operator medium removal request"},
- {0x5A02, "Operator selected write protect"},
- {0x5A03, "Operator selected write permit"},
-
- {0x5B00, "Log exception"},
- {0x5B01, "Threshold condition met"},
- {0x5B02, "Log counter at maximum"},
- {0x5B03, "Log list codes exhausted"},
-
- {0x5C00, "Rpl status change"},
- {0x5C01, "Spindles synchronized"},
- {0x5C02, "Spindles not synchronized"},
-
- {0x5D00, "Failure prediction threshold exceeded"},
- {0x5D01, "Media failure prediction threshold exceeded"},
- {0x5D02, "Logical unit failure prediction threshold exceeded"},
- {0x5D03, "Spare area exhaustion prediction threshold exceeded"},
- {0x5D10, "Hardware impending failure general hard drive failure"},
- {0x5D11, "Hardware impending failure drive error rate too high"},
- {0x5D12, "Hardware impending failure data error rate too high"},
- {0x5D13, "Hardware impending failure seek error rate too high"},
- {0x5D14, "Hardware impending failure too many block reassigns"},
- {0x5D15, "Hardware impending failure access times too high"},
- {0x5D16, "Hardware impending failure start unit times too high"},
- {0x5D17, "Hardware impending failure channel parametrics"},
- {0x5D18, "Hardware impending failure controller detected"},
- {0x5D19, "Hardware impending failure throughput performance"},
- {0x5D1A, "Hardware impending failure seek time performance"},
- {0x5D1B, "Hardware impending failure spin-up retry count"},
- {0x5D1C, "Hardware impending failure drive calibration retry count"},
- {0x5D20, "Controller impending failure general hard drive failure"},
- {0x5D21, "Controller impending failure drive error rate too high"},
- {0x5D22, "Controller impending failure data error rate too high"},
- {0x5D23, "Controller impending failure seek error rate too high"},
- {0x5D24, "Controller impending failure too many block reassigns"},
- {0x5D25, "Controller impending failure access times too high"},
- {0x5D26, "Controller impending failure start unit times too high"},
- {0x5D27, "Controller impending failure channel parametrics"},
- {0x5D28, "Controller impending failure controller detected"},
- {0x5D29, "Controller impending failure throughput performance"},
- {0x5D2A, "Controller impending failure seek time performance"},
- {0x5D2B, "Controller impending failure spin-up retry count"},
- {0x5D2C, "Controller impending failure drive calibration retry count"},
- {0x5D30, "Data channel impending failure general hard drive failure"},
- {0x5D31, "Data channel impending failure drive error rate too high"},
- {0x5D32, "Data channel impending failure data error rate too high"},
- {0x5D33, "Data channel impending failure seek error rate too high"},
- {0x5D34, "Data channel impending failure too many block reassigns"},
- {0x5D35, "Data channel impending failure access times too high"},
- {0x5D36, "Data channel impending failure start unit times too high"},
- {0x5D37, "Data channel impending failure channel parametrics"},
- {0x5D38, "Data channel impending failure controller detected"},
- {0x5D39, "Data channel impending failure throughput performance"},
- {0x5D3A, "Data channel impending failure seek time performance"},
- {0x5D3B, "Data channel impending failure spin-up retry count"},
- {0x5D3C, "Data channel impending failure drive calibration retry "
- "count"},
- {0x5D40, "Servo impending failure general hard drive failure"},
- {0x5D41, "Servo impending failure drive error rate too high"},
- {0x5D42, "Servo impending failure data error rate too high"},
- {0x5D43, "Servo impending failure seek error rate too high"},
- {0x5D44, "Servo impending failure too many block reassigns"},
- {0x5D45, "Servo impending failure access times too high"},
- {0x5D46, "Servo impending failure start unit times too high"},
- {0x5D47, "Servo impending failure channel parametrics"},
- {0x5D48, "Servo impending failure controller detected"},
- {0x5D49, "Servo impending failure throughput performance"},
- {0x5D4A, "Servo impending failure seek time performance"},
- {0x5D4B, "Servo impending failure spin-up retry count"},
- {0x5D4C, "Servo impending failure drive calibration retry count"},
- {0x5D50, "Spindle impending failure general hard drive failure"},
- {0x5D51, "Spindle impending failure drive error rate too high"},
- {0x5D52, "Spindle impending failure data error rate too high"},
- {0x5D53, "Spindle impending failure seek error rate too high"},
- {0x5D54, "Spindle impending failure too many block reassigns"},
- {0x5D55, "Spindle impending failure access times too high"},
- {0x5D56, "Spindle impending failure start unit times too high"},
- {0x5D57, "Spindle impending failure channel parametrics"},
- {0x5D58, "Spindle impending failure controller detected"},
- {0x5D59, "Spindle impending failure throughput performance"},
- {0x5D5A, "Spindle impending failure seek time performance"},
- {0x5D5B, "Spindle impending failure spin-up retry count"},
- {0x5D5C, "Spindle impending failure drive calibration retry count"},
- {0x5D60, "Firmware impending failure general hard drive failure"},
- {0x5D61, "Firmware impending failure drive error rate too high"},
- {0x5D62, "Firmware impending failure data error rate too high"},
- {0x5D63, "Firmware impending failure seek error rate too high"},
- {0x5D64, "Firmware impending failure too many block reassigns"},
- {0x5D65, "Firmware impending failure access times too high"},
- {0x5D66, "Firmware impending failure start unit times too high"},
- {0x5D67, "Firmware impending failure channel parametrics"},
- {0x5D68, "Firmware impending failure controller detected"},
- {0x5D69, "Firmware impending failure throughput performance"},
- {0x5D6A, "Firmware impending failure seek time performance"},
- {0x5D6B, "Firmware impending failure spin-up retry count"},
- {0x5D6C, "Firmware impending failure drive calibration retry count"},
- {0x5DFF, "Failure prediction threshold exceeded (false)"},
-
- {0x5E00, "Low power condition on"},
- {0x5E01, "Idle condition activated by timer"},
- {0x5E02, "Standby condition activated by timer"},
- {0x5E03, "Idle condition activated by command"},
- {0x5E04, "Standby condition activated by command"},
- {0x5E05, "Idle_b condition activated by timer"},
- {0x5E06, "Idle_b condition activated by command"},
- {0x5E07, "Idle_c condition activated by timer"},
- {0x5E08, "Idle_c condition activated by command"},
- {0x5E09, "Standby_y condition activated by timer"},
- {0x5E0A, "Standby_y condition activated by command"},
- {0x5E41, "Power state change to active"},
- {0x5E42, "Power state change to idle"},
- {0x5E43, "Power state change to standby"},
- {0x5E45, "Power state change to sleep"},
- {0x5E47, "Power state change to device control"},
-
- {0x6000, "Lamp failure"},
-
- {0x6100, "Video acquisition error"},
- {0x6101, "Unable to acquire video"},
- {0x6102, "Out of focus"},
-
- {0x6200, "Scan head positioning error"},
-
- {0x6300, "End of user area encountered on this track"},
- {0x6301, "Packet does not fit in available space"},
-
- {0x6400, "Illegal mode for this track"},
- {0x6401, "Invalid packet size"},
-
- {0x6500, "Voltage fault"},
-
- {0x6600, "Automatic document feeder cover up"},
- {0x6601, "Automatic document feeder lift up"},
- {0x6602, "Document jam in automatic document feeder"},
- {0x6603, "Document miss feed automatic in document feeder"},
-
- {0x6700, "Configuration failure"},
- {0x6701, "Configuration of incapable logical units failed"},
- {0x6702, "Add logical unit failed"},
- {0x6703, "Modification of logical unit failed"},
- {0x6704, "Exchange of logical unit failed"},
- {0x6705, "Remove of logical unit failed"},
- {0x6706, "Attachment of logical unit failed"},
- {0x6707, "Creation of logical unit failed"},
- {0x6708, "Assign failure occurred"},
- {0x6709, "Multiply assigned logical unit"},
- {0x670A, "Set target port groups command failed"},
- {0x670B, "ATA device feature not enabled"},
-
- {0x6800, "Logical unit not configured"},
- {0x6801, "Subsidiary logical unit not configured"},
-
- {0x6900, "Data loss on logical unit"},
- {0x6901, "Multiple logical unit failures"},
- {0x6902, "Parity/data mismatch"},
-
- {0x6A00, "Informational, refer to log"},
-
- {0x6B00, "State change has occurred"},
- {0x6B01, "Redundancy level got better"},
- {0x6B02, "Redundancy level got worse"},
-
- {0x6C00, "Rebuild failure occurred"},
-
- {0x6D00, "Recalculate failure occurred"},
-
- {0x6E00, "Command to logical unit failed"},
-
- {0x6F00, "Copy protection key exchange failure - authentication "
- "failure"},
- {0x6F01, "Copy protection key exchange failure - key not present"},
- {0x6F02, "Copy protection key exchange failure - key not established"},
- {0x6F03, "Read of scrambled sector without authentication"},
- {0x6F04, "Media region code is mismatched to logical unit region"},
- {0x6F05, "Drive region must be permanent/region reset count error"},
- {0x6F06, "Insufficient block count for binding nonce recording"},
- {0x6F07, "Conflict in binding nonce recording"},
-/*
- * {0x70NN, "Decompression exception short algorithm id of nn"},
- */
- {0x7100, "Decompression exception long algorithm id"},
-
- {0x7200, "Session fixation error"},
- {0x7201, "Session fixation error writing lead-in"},
- {0x7202, "Session fixation error writing lead-out"},
- {0x7203, "Session fixation error - incomplete track in session"},
- {0x7204, "Empty or partially written reserved track"},
- {0x7205, "No more track reservations allowed"},
- {0x7206, "RMZ extension is not allowed"},
- {0x7207, "No more test zone extensions are allowed"},
-
- {0x7300, "Cd control error"},
- {0x7301, "Power calibration area almost full"},
- {0x7302, "Power calibration area is full"},
- {0x7303, "Power calibration area error"},
- {0x7304, "Program memory area update failure"},
- {0x7305, "Program memory area is full"},
- {0x7306, "RMA/PMA is almost full"},
- {0x7310, "Current power calibration area almost full"},
- {0x7311, "Current power calibration area is full"},
- {0x7317, "RDZ is full"},
-
- {0x7400, "Security error"},
- {0x7401, "Unable to decrypt data"},
- {0x7402, "Unencrypted data encountered while decrypting"},
- {0x7403, "Incorrect data encryption key"},
- {0x7404, "Cryptographic integrity validation failed"},
- {0x7405, "Error decrypting data"},
- {0x7406, "Unknown signature verification key"},
- {0x7407, "Encryption parameters not useable"},
- {0x7408, "Digital signature validation failure"},
- {0x7409, "Encryption mode mismatch on read"},
- {0x740A, "Encrypted block not raw read enabled"},
- {0x740B, "Incorrect Encryption parameters"},
- {0x740C, "Unable to decrypt parameter list"},
- {0x740D, "Encryption algorithm disabled"},
- {0x7410, "SA creation parameter value invalid"},
- {0x7411, "SA creation parameter value rejected"},
- {0x7412, "Invalid SA usage"},
- {0x7421, "Data Encryption configuration prevented"},
- {0x7430, "SA creation parameter not supported"},
- {0x7440, "Authentication failed"},
- {0x7461, "External data encryption key manager access error"},
- {0x7462, "External data encryption key manager error"},
- {0x7463, "External data encryption key not found"},
- {0x7464, "External data encryption request not authorized"},
- {0x746E, "External data encryption control timeout"},
- {0x746F, "External data encryption control error"},
- {0x7471, "Logical unit access not authorized"},
- {0x7479, "Security conflict in translated device"},
-
- {0, NULL}
+#define SENSE_CODE(c, s) {c, sizeof(s)},
+#include "sense_codes.h"
+#undef SENSE_CODE
};
+static const char *additional_text =
+#define SENSE_CODE(c, s) s "\0"
+#include "sense_codes.h"
+#undef SENSE_CODE
+ ;
+
struct error_info2 {
unsigned char code1, code2_min, code2_max;
const char * str;
@@ -1197,11 +377,14 @@ scsi_extd_sense_format(unsigned char asc, unsigned char ascq, const char **fmt)
{
int i;
unsigned short code = ((asc << 8) | ascq);
+ unsigned offset = 0;
*fmt = NULL;
- for (i = 0; additional[i].text; i++)
+ for (i = 0; i < ARRAY_SIZE(additional); i++) {
if (additional[i].code12 == code)
- return additional[i].text;
+ return additional_text + offset;
+ offset += additional[i].size;
+ }
for (i = 0; additional2[i].fmt; i++) {
if (additional2[i].code1 == asc &&
ascq >= additional2[i].code2_min &&
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index d8a5cb3cd..ce1507023 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -1615,6 +1615,13 @@ err1:
* place at the same time and the failure was due to CXL services being
* unable to keep up.
*
+ * As this routine is called on ioctl context, it holds the ioctl r/w
+ * semaphore that is used to drain ioctls in recovery scenarios. The
+ * implementation to achieve the pacing described above (a local mutex)
+ * requires that the ioctl r/w semaphore be dropped and reacquired to
+ * avoid a 3-way deadlock when multiple process recoveries operate in
+ * parallel.
+ *
* Because a user can detect an error condition before the kernel, it is
* quite possible for this routine to act as the kernel's EEH detection
* source (MMIO read of mbox_r). Because of this, there is a window of
@@ -1642,9 +1649,17 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
int rc = 0;
atomic_inc(&cfg->recovery_threads);
+ up_read(&cfg->ioctl_rwsem);
rc = mutex_lock_interruptible(mutex);
+ down_read(&cfg->ioctl_rwsem);
if (rc)
goto out;
+ rc = check_state(cfg);
+ if (rc) {
+ dev_err(dev, "%s: Failed state! rc=%d\n", __func__, rc);
+ rc = -ENODEV;
+ goto out;
+ }
dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
__func__, recover->reason, rctxid);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index a655cf29c..752b5c9d1 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -190,15 +190,18 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
ALUA_FAILOVER_RETRIES, NULL, req_flags);
}
-struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
- int group_id)
+static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
+ int group_id)
{
struct alua_port_group *pg;
+ if (!id_str || !id_size || !strlen(id_str))
+ return NULL;
+
list_for_each_entry(pg, &port_group_list, node) {
if (pg->group_id != group_id)
continue;
- if (pg->device_id_len != id_size)
+ if (!pg->device_id_len || pg->device_id_len != id_size)
continue;
if (strncmp(pg->device_id_str, id_str, id_size))
continue;
@@ -219,8 +222,8 @@ struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
* Allocate a new port_group structure for a given
* device.
*/
-struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
- int group_id, int tpgs)
+static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
+ int group_id, int tpgs)
{
struct alua_port_group *pg, *tmp_pg;
@@ -232,14 +235,14 @@ struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
sizeof(pg->device_id_str));
if (pg->device_id_len <= 0) {
/*
- * Internal error: TPGS supported but no device
- * identifcation found. Disable ALUA support.
+ * TPGS supported but no device identification found.
+ * Generate private device identification.
*/
- kfree(pg);
sdev_printk(KERN_INFO, sdev,
"%s: No device descriptors found\n",
ALUA_DH_NAME);
- return ERR_PTR(-ENXIO);
+ pg->device_id_str[0] = '\0';
+ pg->device_id_len = 0;
}
pg->group_id = group_id;
pg->tpgs = tpgs;
@@ -354,9 +357,15 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
return SCSI_DH_NOMEM;
return SCSI_DH_DEV_UNSUPP;
}
- sdev_printk(KERN_INFO, sdev,
- "%s: device %s port group %x rel port %x\n",
- ALUA_DH_NAME, pg->device_id_str, group_id, rel_port);
+ if (pg->device_id_len)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: device %s port group %x rel port %x\n",
+ ALUA_DH_NAME, pg->device_id_str,
+ group_id, rel_port);
+ else
+ sdev_printk(KERN_INFO, sdev,
+ "%s: port group %x rel port %x\n",
+ ALUA_DH_NAME, group_id, rel_port);
/* Check for existing port group references */
spin_lock(&h->pg_lock);
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 6c14e68b9..9b5a457d4 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -34,11 +34,14 @@
* Definitions for the generic 5380 driver.
*/
-#define DONT_USE_INTR
-
#define NCR5380_read(reg) inb(instance->io_port + reg)
#define NCR5380_write(reg, value) outb(value, instance->io_port + reg)
+#define NCR5380_dma_xfer_len(instance, cmd, phase) (0)
+#define NCR5380_dma_recv_setup(instance, dst, len) (0)
+#define NCR5380_dma_send_setup(instance, src, len) (0)
+#define NCR5380_dma_residual(instance) (0)
+
#define NCR5380_implementation_fields /* none */
#include "NCR5380.h"
@@ -62,7 +65,6 @@ static struct scsi_host_template dmx3191d_driver_template = {
.cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
.cmd_size = NCR5380_CMD_SIZE,
- .max_sectors = 128,
};
static int dmx3191d_probe_one(struct pci_dev *pdev,
@@ -93,7 +95,7 @@ static int dmx3191d_probe_one(struct pci_dev *pdev,
*/
shost->irq = NO_IRQ;
- error = NCR5380_init(shost, FLAG_NO_PSEUDO_DMA);
+ error = NCR5380_init(shost, 0);
if (error)
goto out_host_put;
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index 6c736b071..459863f94 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -1,6 +1,3 @@
-#define PSEUDO_DMA
-#define DONT_USE_INTR
-
/*
* DTC 3180/3280 driver, by
* Ray Van Tassle rayvt@comm.mot.com
@@ -54,7 +51,6 @@
#include <scsi/scsi_host.h>
#include "dtc.h"
-#define AUTOPROBE_IRQ
#include "NCR5380.h"
/*
@@ -229,7 +225,7 @@ found:
instance->base = addr;
((struct NCR5380_hostdata *)(instance)->hostdata)->base = base;
- if (NCR5380_init(instance, FLAG_NO_DMA_FIXUP))
+ if (NCR5380_init(instance, FLAG_LATE_DMA_SETUP))
goto out_unregister;
NCR5380_maybe_reset_bus(instance);
@@ -244,9 +240,10 @@ found:
if (instance->irq == 255)
instance->irq = NO_IRQ;
-#ifndef DONT_USE_INTR
/* With interrupts enabled, it will sometimes hang when doing heavy
* reads. So better not enable them until I finger it out. */
+ instance->irq = NO_IRQ;
+
if (instance->irq != NO_IRQ)
if (request_irq(instance->irq, dtc_intr, 0,
"dtc", instance)) {
@@ -258,11 +255,7 @@ found:
printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
}
-#else
- if (instance->irq != NO_IRQ)
- printk(KERN_WARNING "scsi%d : interrupts not used. Might as well not jumper it.\n", instance->host_no);
- instance->irq = NO_IRQ;
-#endif
+
dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n",
instance->host_no, instance->irq);
@@ -323,7 +316,8 @@ static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev,
* timeout.
*/
-static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
+static inline int dtc_pread(struct Scsi_Host *instance,
+ unsigned char *dst, int len)
{
unsigned char *d = dst;
int i; /* For counting time spent in the poll-loop */
@@ -352,8 +346,6 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
++i;
rtrc(0);
- if (i > hostdata->spin_max_r)
- hostdata->spin_max_r = i;
return (0);
}
@@ -370,7 +362,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
* timeout.
*/
-static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
+static inline int dtc_pwrite(struct Scsi_Host *instance,
+ unsigned char *src, int len)
{
int i;
struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -400,8 +393,6 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
rtrc(7);
/* Check for parity error here. fixme. */
rtrc(0);
- if (i > hostdata->spin_max_w)
- hostdata->spin_max_w = i;
return (0);
}
@@ -440,8 +431,6 @@ static struct scsi_host_template driver_template = {
.detect = dtc_detect,
.release = dtc_release,
.proc_name = "dtc3x80",
- .show_info = dtc_show_info,
- .write_info = dtc_write_info,
.info = dtc_info,
.queuecommand = dtc_queue_command,
.eh_abort_handler = dtc_abort,
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
index 56732cba8..fcb0a8ea7 100644
--- a/drivers/scsi/dtc.h
+++ b/drivers/scsi/dtc.h
@@ -21,14 +21,17 @@
#define NCR5380_dma_xfer_len(instance, cmd, phase) \
dtc_dma_xfer_len(cmd)
+#define NCR5380_dma_recv_setup dtc_pread
+#define NCR5380_dma_send_setup dtc_pwrite
+#define NCR5380_dma_residual(instance) (0)
#define NCR5380_intr dtc_intr
#define NCR5380_queue_command dtc_queue_command
#define NCR5380_abort dtc_abort
#define NCR5380_bus_reset dtc_bus_reset
#define NCR5380_info dtc_info
-#define NCR5380_show_info dtc_show_info
-#define NCR5380_write_info dtc_write_info
+
+#define NCR5380_io_delay(x) udelay(x)
/* 15 12 11 10
1001 1100 0000 0000 */
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index ca8003f0d..4299fa485 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -729,6 +729,7 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev
break;
case 0x24:
SD(sh)->EATA_revision = 'z';
+ break;
default:
SD(sh)->EATA_revision = '?';
}
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 33581ba43..2aca4d16f 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -246,7 +246,7 @@ static struct scsi_host_template driver_template = {
.eh_target_reset_handler = esas2r_target_reset,
.can_queue = 128,
.this_id = -1,
- .sg_tablesize = SCSI_MAX_SG_SEGMENTS,
+ .sg_tablesize = SG_CHUNK_SIZE,
.cmd_per_lun =
ESAS2R_DEFAULT_CMD_PER_LUN,
.present = 0,
@@ -271,7 +271,7 @@ module_param(num_sg_lists, int, 0);
MODULE_PARM_DESC(num_sg_lists,
"Number of scatter/gather lists. Default 1024.");
-int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
+int sg_tablesize = SG_CHUNK_SIZE;
module_param(sg_tablesize, int, 0);
MODULE_PARM_DESC(sg_tablesize,
"Maximum number of entries in a scatter/gather table.");
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index ce129e595..9ddc9200e 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.17a"
+#define DRV_VERSION "1.6.0.21"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index f3032ca50..d9fd2f841 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -439,7 +439,6 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
int sg_count = 0;
unsigned long flags = 0;
unsigned long ptr;
- struct fc_rport_priv *rdata;
spinlock_t *io_lock = NULL;
int io_lock_acquired = 0;
@@ -455,14 +454,17 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
return 0;
}
- rdata = lp->tt.rport_lookup(lp, rport->port_id);
- if (!rdata || (rdata->rp_state == RPORT_ST_DELETE)) {
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "returning IO as rport is removed\n");
- atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
- sc->result = DID_NO_CONNECT;
- done(sc);
- return 0;
+ if (rport) {
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+ if (!rp || rp->rp_state != RPORT_ST_READY) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "returning DID_NO_CONNECT for IO as rport is removed\n");
+ atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
+ sc->result = DID_NO_CONNECT<<16;
+ done(sc);
+ return 0;
+ }
}
if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -1091,6 +1093,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
atomic64_inc(
&term_stats->terminate_fw_timeouts);
break;
+ case FCPIO_ITMF_REJECTED:
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "abort reject recd. id %d\n",
+ (int)(id & FNIC_TAG_MASK));
+ break;
case FCPIO_IO_NOT_FOUND:
if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_io_not_found);
@@ -1111,9 +1118,15 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
spin_unlock_irqrestore(io_lock, flags);
return;
}
- CMD_ABTS_STATUS(sc) = hdr_status;
+
CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
+ /* If the status is IO not found consider it as success */
+ if (hdr_status == FCPIO_IO_NOT_FOUND)
+ CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;
+ else
+ CMD_ABTS_STATUS(sc) = hdr_status;
+
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
@@ -1926,21 +1939,31 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ start_time = io_req->start_time;
/*
* firmware completed the abort, check the status,
- * free the io_req irrespective of failure or success
+ * free the io_req if successful. If abort fails,
+ * Device reset will clean the I/O.
*/
- if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
+ if (CMD_ABTS_STATUS(sc) == FCPIO_SUCCESS)
+ CMD_SP(sc) = NULL;
+ else {
ret = FAILED;
-
- CMD_SP(sc) = NULL;
+ spin_unlock_irqrestore(io_lock, flags);
+ goto fnic_abort_cmd_end;
+ }
spin_unlock_irqrestore(io_lock, flags);
- start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
+ if (sc->scsi_done) {
+ /* Call SCSI completion function to complete the IO */
+ sc->result = (DID_ABORT << 16);
+ sc->scsi_done(sc);
+ }
+
fnic_abort_cmd_end:
FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
sc->request->tag, sc,
@@ -2018,7 +2041,9 @@ lr_io_req_end:
* successfully aborted, 1 otherwise
*/
static int fnic_clean_pending_aborts(struct fnic *fnic,
- struct scsi_cmnd *lr_sc)
+ struct scsi_cmnd *lr_sc,
+ bool new_sc)
+
{
int tag, abt_tag;
struct fnic_io_req *io_req;
@@ -2036,10 +2061,10 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
spin_lock_irqsave(io_lock, flags);
sc = scsi_host_find_tag(fnic->lport->host, tag);
/*
- * ignore this lun reset cmd or cmds that do not belong to
- * this lun
+ * ignore this lun reset cmd if issued using new SC
+ * or cmds that do not belong to this lun
*/
- if (!sc || sc == lr_sc || sc->device != lun_dev) {
+ if (!sc || ((sc == lr_sc) && new_sc) || sc->device != lun_dev) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
@@ -2145,11 +2170,27 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
goto clean_pending_aborts_end;
}
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
- CMD_SP(sc) = NULL;
+
+ /* original sc used for lr is handled by dev reset code */
+ if (sc != lr_sc)
+ CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
- fnic_release_ioreq_buf(fnic, io_req, sc);
- mempool_free(io_req, fnic->io_req_pool);
+ /* original sc used for lr is handled by dev reset code */
+ if (sc != lr_sc) {
+ fnic_release_ioreq_buf(fnic, io_req, sc);
+ mempool_free(io_req, fnic->io_req_pool);
+ }
+
+ /*
+ * Any IO is returned during reset, it needs to call scsi_done
+ * to return the scsi_cmnd to upper layer.
+ */
+ if (sc->scsi_done) {
+ /* Set result to let upper SCSI layer retry */
+ sc->result = DID_RESET << 16;
+ sc->scsi_done(sc);
+ }
}
schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
@@ -2243,6 +2284,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
+ bool new_sc = 0;
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
@@ -2288,13 +2330,12 @@ int fnic_device_reset(struct scsi_cmnd *sc)
* fix the way the EH ioctls work for real, but until
* that happens we fail these explicit requests here.
*/
- if (shost_use_blk_mq(sc->device->host))
- goto fnic_device_reset_end;
tag = fnic_scsi_host_start_tag(fnic, sc);
if (unlikely(tag == SCSI_NO_TAG))
goto fnic_device_reset_end;
tag_gen_flag = 1;
+ new_sc = 1;
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
@@ -2429,7 +2470,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
* the lun reset cmd. If all cmds get cleaned, the lun reset
* succeeds
*/
- if (fnic_clean_pending_aborts(fnic, sc)) {
+ if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 90091e693..516bd6c4f 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -18,50 +18,10 @@
*
* Added ISAPNP support for DTC436 adapters,
* Thomas Sailer, sailer@ife.ee.ethz.ch
- */
-
-/*
- * TODO : flesh out DMA support, find some one actually using this (I have
- * a memory mapped Trantor board that works fine)
- */
-
-/*
- * The card is detected and initialized in one of several ways :
- * 1. With command line overrides - NCR5380=port,irq may be
- * used on the LILO command line to override the defaults.
- *
- * 2. With the GENERIC_NCR5380_OVERRIDE compile time define. This is
- * specified as an array of address, irq, dma, board tuples. Ie, for
- * one board at 0x350, IRQ5, no dma, I could say
- * -DGENERIC_NCR5380_OVERRIDE={{0xcc000, 5, DMA_NONE, BOARD_NCR5380}}
- *
- * -1 should be specified for no or DMA interrupt, -2 to autoprobe for an
- * IRQ line if overridden on the command line.
*
- * 3. When included as a module, with arguments passed on the command line:
- * ncr_irq=xx the interrupt
- * ncr_addr=xx the port or base address (for port or memory
- * mapped, resp.)
- * ncr_dma=xx the DMA
- * ncr_5380=1 to set up for a NCR5380 board
- * ncr_53c400=1 to set up for a NCR53C400 board
- * e.g.
- * modprobe g_NCR5380 ncr_irq=5 ncr_addr=0x350 ncr_5380=1
- * for a port mapped NCR5380 board or
- * modprobe g_NCR5380 ncr_irq=255 ncr_addr=0xc8000 ncr_53c400=1
- * for a memory mapped NCR53C400 board with interrupts disabled.
- *
- * 255 should be specified for no or DMA interrupt, 254 to autoprobe for an
- * IRQ line if overridden on the command line.
- *
+ * See Documentation/scsi/g_NCR5380.txt for more info.
*/
-#define AUTOPROBE_IRQ
-
-#ifdef CONFIG_SCSI_GENERIC_NCR53C400
-#define PSEUDO_DMA
-#endif
-
#include <asm/io.h>
#include <linux/blkdev.h>
#include <linux/module.h>
@@ -270,7 +230,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
#ifndef SCSI_G_NCR5380_MEM
int i;
int port_idx = -1;
- unsigned long region_size = 16;
+ unsigned long region_size;
#endif
static unsigned int __initdata ncr_53c400a_ports[] = {
0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0
@@ -290,6 +250,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
#ifdef SCSI_G_NCR5380_MEM
unsigned long base;
void __iomem *iomem;
+ resource_size_t iomem_size;
#endif
if (ncr_irq)
@@ -350,25 +311,17 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
flags = 0;
switch (overrides[current_override].board) {
case BOARD_NCR5380:
- flags = FLAG_NO_PSEUDO_DMA;
- break;
- case BOARD_NCR53C400:
-#ifdef PSEUDO_DMA
- flags = FLAG_NO_DMA_FIXUP;
-#endif
+ flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
break;
case BOARD_NCR53C400A:
- flags = FLAG_NO_DMA_FIXUP;
ports = ncr_53c400a_ports;
magic = ncr_53c400a_magic;
break;
case BOARD_HP_C2502:
- flags = FLAG_NO_DMA_FIXUP;
ports = ncr_53c400a_ports;
magic = hp_c2502_magic;
break;
case BOARD_DTC3181E:
- flags = FLAG_NO_DMA_FIXUP;
ports = dtc_3181e_ports;
magic = ncr_53c400a_magic;
break;
@@ -381,20 +334,22 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
/* Disable the adapter and look for a free io port */
magic_configure(-1, 0, magic);
+ region_size = 16;
+
if (overrides[current_override].NCR5380_map_name != PORT_AUTO)
for (i = 0; ports[i]; i++) {
- if (!request_region(ports[i], 16, "ncr53c80"))
+ if (!request_region(ports[i], region_size, "ncr53c80"))
continue;
if (overrides[current_override].NCR5380_map_name == ports[i])
break;
- release_region(ports[i], 16);
+ release_region(ports[i], region_size);
} else
for (i = 0; ports[i]; i++) {
- if (!request_region(ports[i], 16, "ncr53c80"))
+ if (!request_region(ports[i], region_size, "ncr53c80"))
continue;
if (inb(ports[i]) == 0xff)
break;
- release_region(ports[i], 16);
+ release_region(ports[i], region_size);
}
if (ports[i]) {
/* At this point we have our region reserved */
@@ -410,17 +365,19 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
else
{
/* Not a 53C400A style setup - just grab */
- if(!(request_region(overrides[current_override].NCR5380_map_name, NCR5380_region_size, "ncr5380")))
+ region_size = 8;
+ if (!request_region(overrides[current_override].NCR5380_map_name,
+ region_size, "ncr5380"))
continue;
- region_size = NCR5380_region_size;
}
#else
base = overrides[current_override].NCR5380_map_name;
- if (!request_mem_region(base, NCR5380_region_size, "ncr5380"))
+ iomem_size = NCR53C400_region_size;
+ if (!request_mem_region(base, iomem_size, "ncr5380"))
continue;
- iomem = ioremap(base, NCR5380_region_size);
+ iomem = ioremap(base, iomem_size);
if (!iomem) {
- release_mem_region(base, NCR5380_region_size);
+ release_mem_region(base, iomem_size);
continue;
}
#endif
@@ -458,6 +415,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
#else
instance->base = overrides[current_override].NCR5380_map_name;
hostdata->iomem = iomem;
+ hostdata->iomem_size = iomem_size;
switch (overrides[current_override].board) {
case BOARD_NCR53C400:
hostdata->c400_ctl_status = 0x100;
@@ -472,7 +430,7 @@ static int __init generic_NCR5380_detect(struct scsi_host_template *tpnt)
}
#endif
- if (NCR5380_init(instance, flags))
+ if (NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP))
goto out_unregister;
switch (overrides[current_override].board) {
@@ -524,7 +482,7 @@ out_release:
release_region(overrides[current_override].NCR5380_map_name, region_size);
#else
iounmap(iomem);
- release_mem_region(base, NCR5380_region_size);
+ release_mem_region(base, iomem_size);
#endif
return count;
}
@@ -546,45 +504,18 @@ static int generic_NCR5380_release_resources(struct Scsi_Host *instance)
#ifndef SCSI_G_NCR5380_MEM
release_region(instance->io_port, instance->n_io_port);
#else
- iounmap(((struct NCR5380_hostdata *)instance->hostdata)->iomem);
- release_mem_region(instance->base, NCR5380_region_size);
-#endif
- return 0;
-}
-
-#ifdef BIOSPARAM
-/**
- * generic_NCR5380_biosparam
- * @disk: disk to compute geometry for
- * @dev: device identifier for this disk
- * @ip: sizes to fill in
- *
- * Generates a BIOS / DOS compatible H-C-S mapping for the specified
- * device / size.
- *
- * XXX Most SCSI boards use this mapping, I could be incorrect. Someone
- * using hard disks on a trantor should verify that this mapping
- * corresponds to that used by the BIOS / ASPI driver by running the linux
- * fdisk program and matching the H_C_S coordinates to what DOS uses.
- *
- * Locks: none
- */
+ {
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
-static int
-generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int *ip)
-{
- ip[0] = 64;
- ip[1] = 32;
- ip[2] = capacity >> 11;
+ iounmap(hostdata->iomem);
+ release_mem_region(instance->base, hostdata->iomem_size);
+ }
+#endif
return 0;
}
-#endif
-
-#ifdef PSEUDO_DMA
/**
- * NCR5380_pread - pseudo DMA read
+ * generic_NCR5380_pread - pseudo DMA read
* @instance: adapter to read from
* @dst: buffer to read into
* @len: buffer length
@@ -593,7 +524,8 @@ generic_NCR5380_biosparam(struct scsi_device *sdev, struct block_device *bdev,
* controller
*/
-static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
+static inline int generic_NCR5380_pread(struct Scsi_Host *instance,
+ unsigned char *dst, int len)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
@@ -661,7 +593,7 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
}
/**
- * NCR5380_write - pseudo DMA write
+ * generic_NCR5380_pwrite - pseudo DMA write
* @instance: adapter to read from
* @dst: buffer to read into
* @len: buffer length
@@ -670,7 +602,8 @@ static inline int NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst,
* controller
*/
-static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
+static inline int generic_NCR5380_pwrite(struct Scsi_Host *instance,
+ unsigned char *src, int len)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int blocks = len / 128;
@@ -738,10 +671,15 @@ static inline int NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src,
return 0;
}
-static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd)
+static int generic_NCR5380_dma_xfer_len(struct Scsi_Host *instance,
+ struct scsi_cmnd *cmd)
{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
int transfersize = cmd->transfersize;
+ if (hostdata->flags & FLAG_NO_PSEUDO_DMA)
+ return 0;
+
/* Limit transfers to 32K, for xx400 & xx406
* pseudoDMA that transfers in 128 bytes blocks.
*/
@@ -756,8 +694,6 @@ static int generic_NCR5380_dma_xfer_len(struct scsi_cmnd *cmd)
return transfersize;
}
-#endif /* PSEUDO_DMA */
-
/*
* Include the NCR5380 core code that we build our driver around
*/
@@ -773,7 +709,6 @@ static struct scsi_host_template driver_template = {
.queuecommand = generic_NCR5380_queue_command,
.eh_abort_handler = generic_NCR5380_abort,
.eh_bus_reset_handler = generic_NCR5380_bus_reset,
- .bios_param = NCR5380_BIOSPARAM,
.can_queue = 16,
.this_id = 7,
.sg_tablesize = SG_ALL,
diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h
index 6f3d2ac4f..595177428 100644
--- a/drivers/scsi/g_NCR5380.h
+++ b/drivers/scsi/g_NCR5380.h
@@ -14,13 +14,6 @@
#ifndef GENERIC_NCR5380_H
#define GENERIC_NCR5380_H
-#ifdef CONFIG_SCSI_GENERIC_NCR53C400
-#define BIOSPARAM
-#define NCR5380_BIOSPARAM generic_NCR5380_biosparam
-#else
-#define NCR5380_BIOSPARAM NULL
-#endif
-
#define __STRVAL(x) #x
#define STRVAL(x) __STRVAL(x)
@@ -30,12 +23,6 @@
#define NCR5380_map_type int
#define NCR5380_map_name port
-#ifdef CONFIG_SCSI_GENERIC_NCR53C400
-#define NCR5380_region_size 16
-#else
-#define NCR5380_region_size 8
-#endif
-
#define NCR5380_read(reg) \
inb(instance->io_port + (reg))
#define NCR5380_write(reg, value) \
@@ -55,7 +42,7 @@
#define NCR5380_map_name base
#define NCR53C400_mem_base 0x3880
#define NCR53C400_host_buffer 0x3900
-#define NCR5380_region_size 0x3a00
+#define NCR53C400_region_size 0x3a00
#define NCR5380_read(reg) \
readb(((struct NCR5380_hostdata *)shost_priv(instance))->iomem + \
@@ -66,6 +53,7 @@
#define NCR5380_implementation_fields \
void __iomem *iomem; \
+ resource_size_t iomem_size; \
int c400_ctl_status; \
int c400_blk_cnt; \
int c400_host_buf;
@@ -73,16 +61,18 @@
#endif
#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- generic_NCR5380_dma_xfer_len(cmd)
+ generic_NCR5380_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_recv_setup generic_NCR5380_pread
+#define NCR5380_dma_send_setup generic_NCR5380_pwrite
+#define NCR5380_dma_residual(instance) (0)
#define NCR5380_intr generic_NCR5380_intr
#define NCR5380_queue_command generic_NCR5380_queue_command
#define NCR5380_abort generic_NCR5380_abort
#define NCR5380_bus_reset generic_NCR5380_bus_reset
-#define NCR5380_pread generic_NCR5380_pread
-#define NCR5380_pwrite generic_NCR5380_pwrite
#define NCR5380_info generic_NCR5380_info
-#define NCR5380_show_info generic_NCR5380_show_info
+
+#define NCR5380_io_delay(x) udelay(x)
#define BOARD_NCR5380 0
#define BOARD_NCR53C400 1
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 29e89f340..d7cab724f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -23,7 +23,7 @@
#include <scsi/sas_ata.h>
#include <scsi/libsas.h>
-#define DRV_VERSION "v1.3"
+#define DRV_VERSION "v1.4"
#define HISI_SAS_MAX_PHYS 9
#define HISI_SAS_MAX_QUEUES 32
@@ -133,6 +133,9 @@ struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
+ int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx,
+ struct domain_device *device);
+ struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
int (*get_free_slot)(struct hisi_hba *hisi_hba, int *q, int *s);
void (*start_delivery)(struct hisi_hba *hisi_hba);
@@ -298,7 +301,7 @@ struct hisi_sas_command_table_stp {
u8 atapi_cdb[ATAPI_CDB_LEN];
};
-#define HISI_SAS_SGE_PAGE_CNT SCSI_MAX_SG_SEGMENTS
+#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE
struct hisi_sas_sge_page {
struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 097ab4f27..18dd5ea2c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -227,7 +227,11 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
} else
n_elem = task->num_scatter;
- rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
+ if (hisi_hba->hw->slot_index_alloc)
+ rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
+ device);
+ else
+ rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
if (rc)
goto err_out;
rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
@@ -417,7 +421,10 @@ static int hisi_sas_dev_found(struct domain_device *device)
struct hisi_sas_device *sas_dev;
struct device *dev = &hisi_hba->pdev->dev;
- sas_dev = hisi_sas_alloc_dev(device);
+ if (hisi_hba->hw->alloc_dev)
+ sas_dev = hisi_hba->hw->alloc_dev(device);
+ else
+ sas_dev = hisi_sas_alloc_dev(device);
if (!sas_dev) {
dev_err(dev, "fail alloc dev: max support %d devices\n",
HISI_SAS_MAX_DEVICES);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b73374764..bd20c5488 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -465,6 +465,62 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
return readl(regs);
}
+/* This function needs to be protected from pre-emption. */
+static int
+slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
+ struct domain_device *device)
+{
+ unsigned int index = 0;
+ void *bitmap = hisi_hba->slot_index_tags;
+ int sata_dev = dev_is_sata(device);
+
+ while (1) {
+ index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
+ index);
+ if (index >= hisi_hba->slot_index_count)
+ return -SAS_QUEUE_FULL;
+ /*
+ * SAS IPTT bit0 should be 1
+ */
+ if (sata_dev || (index & 1))
+ break;
+ index++;
+ }
+
+ set_bit(index, bitmap);
+ *slot_idx = index;
+ return 0;
+}
+
+static struct
+hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
+{
+ struct hisi_hba *hisi_hba = device->port->ha->lldd_ha;
+ struct hisi_sas_device *sas_dev = NULL;
+ int i, sata_dev = dev_is_sata(device);
+
+ spin_lock(&hisi_hba->lock);
+ for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
+ /*
+ * SATA device id bit0 should be 0
+ */
+ if (sata_dev && (i & 1))
+ continue;
+ if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
+ hisi_hba->devices[i].device_id = i;
+ sas_dev = &hisi_hba->devices[i];
+ sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
+ sas_dev->dev_type = device->dev_type;
+ sas_dev->hisi_hba = hisi_hba;
+ sas_dev->sas_device = device;
+ break;
+ }
+ }
+ spin_unlock(&hisi_hba->lock);
+
+ return sas_dev;
+}
+
static void config_phy_opt_mode_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
{
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
@@ -544,7 +600,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
}
qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
- (device->max_linkrate << ITCT_HDR_MCR_OFF) |
+ (device->linkrate << ITCT_HDR_MCR_OFF) |
(1 << ITCT_HDR_VLN_OFF) |
(port->id << ITCT_HDR_PORT_ID_OFF));
itct->qw0 = cpu_to_le64(qw0);
@@ -554,10 +610,11 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
itct->sas_addr = __swab64(itct->sas_addr);
/* qw2 */
- itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
- (0xff00ULL << ITCT_HDR_BITLT_OFF) |
- (0xff00ULL << ITCT_HDR_MCTLT_OFF) |
- (0xff00ULL << ITCT_HDR_RTOLT_OFF));
+ if (!dev_is_sata(device))
+ itct->qw2 = cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF) |
+ (0x1ULL << ITCT_HDR_BITLT_OFF) |
+ (0x32ULL << ITCT_HDR_MCTLT_OFF) |
+ (0x1ULL << ITCT_HDR_RTOLT_OFF));
}
static void free_device_v2_hw(struct hisi_hba *hisi_hba,
@@ -715,7 +772,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF);
hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1);
hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4);
- hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x4E20);
+ hisi_sas_write32(hisi_hba, MAX_CON_TIME_LIMIT_TIME, 0x32);
hisi_sas_write32(hisi_hba, BUS_INACTIVE_LIMIT_TIME, 0x1);
hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1);
@@ -1573,6 +1630,9 @@ static u8 get_ata_protocol(u8 cmd, int direction)
switch (cmd) {
case ATA_CMD_FPDMA_WRITE:
case ATA_CMD_FPDMA_READ:
+ case ATA_CMD_FPDMA_RECV:
+ case ATA_CMD_FPDMA_SEND:
+ case ATA_CMD_NCQ_NON_DATA:
return SATA_PROTOCOL_FPDMA;
case ATA_CMD_ID_ATA:
@@ -1993,22 +2053,23 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
irqreturn_t res = IRQ_HANDLED;
u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
- int phy_no;
+ int phy_no, offset;
phy_no = sas_phy->id;
initial_fis = &hisi_hba->initial_fis[phy_no];
fis = &initial_fis->fis;
- ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk | 1 << phy_no);
+ offset = 4 * (phy_no / 4);
+ ent_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK1 + offset);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset,
+ ent_msk | 1 << ((phy_no % 4) * 8));
- ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1);
- ent_tmp = ent_int;
+ ent_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC1 + offset);
+ ent_tmp = ent_int & (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF *
+ (phy_no % 4)));
ent_int >>= ENT_INT_SRC1_D2H_FIS_CH1_OFF * (phy_no % 4);
if ((ent_int & ENT_INT_SRC1_D2H_FIS_CH0_MSK) == 0) {
dev_warn(dev, "sata int: phy%d did not receive FIS\n", phy_no);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
res = IRQ_NONE;
goto end;
}
@@ -2056,8 +2117,8 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
queue_work(hisi_hba->wq, &phy->phyup_ws);
end:
- hisi_sas_write32(hisi_hba, ENT_INT_SRC1, ent_tmp);
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, ent_msk);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC1 + offset, ent_tmp);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1 + offset, ent_msk);
return res;
}
@@ -2165,6 +2226,8 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
.setup_itct = setup_itct_v2_hw,
+ .slot_index_alloc = slot_index_alloc_quirk_v2_hw,
+ .alloc_dev = alloc_dev_quirk_v2_hw,
.sl_notify = sl_notify_v2_hw,
.get_wideport_bitmap = get_wideport_bitmap_v2_hw,
.free_device = free_device_v2_hw,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5be944c8b..ff8dcd5b0 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -60,7 +60,7 @@
* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
* with an optional trailing '-' followed by a byte value (0-255).
*/
-#define HPSA_DRIVER_VERSION "3.4.14-0"
+#define HPSA_DRIVER_VERSION "3.4.16-0"
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
#define HPSA "hpsa"
@@ -294,6 +294,9 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h);
static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
struct ReportExtendedLUNdata *buf, int bufsize);
static int hpsa_luns_changed(struct ctlr_info *h);
+static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
+ struct hpsa_scsi_dev_t *dev,
+ unsigned char *scsi3addr);
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -728,6 +731,29 @@ static ssize_t unique_id_show(struct device *dev,
sn[12], sn[13], sn[14], sn[15]);
}
+static ssize_t sas_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctlr_info *h;
+ struct scsi_device *sdev;
+ struct hpsa_scsi_dev_t *hdev;
+ unsigned long flags;
+ u64 sas_address;
+
+ sdev = to_scsi_device(dev);
+ h = sdev_to_hba(sdev);
+ spin_lock_irqsave(&h->lock, flags);
+ hdev = sdev->hostdata;
+ if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
+ spin_unlock_irqrestore(&h->lock, flags);
+ return -ENODEV;
+ }
+ sas_address = hdev->sas_address;
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
+}
+
static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -840,6 +866,7 @@ static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(sas_address, S_IRUGO, sas_address_show, NULL);
static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
host_show_hp_ssd_smart_path_enabled, NULL);
static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
@@ -865,6 +892,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
&dev_attr_unique_id,
&dev_attr_hp_ssd_smart_path_enabled,
&dev_attr_path_info,
+ &dev_attr_sas_address,
NULL,
};
@@ -1637,9 +1665,8 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
for (j = 0; j < ndevices; j++) {
if (dev[j] == NULL)
continue;
- if (dev[j]->devtype != TYPE_DISK)
- continue;
- if (dev[j]->devtype != TYPE_ZBC)
+ if (dev[j]->devtype != TYPE_DISK &&
+ dev[j]->devtype != TYPE_ZBC)
continue;
if (is_logical_device(dev[j]))
continue;
@@ -1684,9 +1711,8 @@ static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
for (i = 0; i < ndevices; i++) {
if (dev[i] == NULL)
continue;
- if (dev[i]->devtype != TYPE_DISK)
- continue;
- if (dev[i]->devtype != TYPE_ZBC)
+ if (dev[i]->devtype != TYPE_DISK &&
+ dev[i]->devtype != TYPE_ZBC)
continue;
if (!is_logical_device(dev[i]))
continue;
@@ -1720,6 +1746,51 @@ static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
return rc;
}
+static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *dev)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < h->nr_cmds; i++) {
+ struct CommandList *c = h->cmd_pool + i;
+ int refcount = atomic_inc_return(&c->refcount);
+
+ if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
+ dev->scsi3addr)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags); /* Implied MB */
+ if (!hpsa_is_cmd_idle(c))
+ ++count;
+ spin_unlock_irqrestore(&h->lock, flags);
+ }
+
+ cmd_free(h, c);
+ }
+
+ return count;
+}
+
+static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
+ struct hpsa_scsi_dev_t *device)
+{
+ int cmds = 0;
+ int waits = 0;
+
+ while (1) {
+ cmds = hpsa_find_outstanding_commands_for_dev(h, device);
+ if (cmds == 0)
+ break;
+ if (++waits > 20)
+ break;
+ dev_warn(&h->pdev->dev,
+ "%s: removing device with %d outstanding commands!\n",
+ __func__, cmds);
+ msleep(1000);
+ }
+}
+
static void hpsa_remove_device(struct ctlr_info *h,
struct hpsa_scsi_dev_t *device)
{
@@ -1743,8 +1814,13 @@ static void hpsa_remove_device(struct ctlr_info *h,
hpsa_show_dev_msg(KERN_WARNING, h, device,
"didn't find device for removal.");
}
- } else /* HBA */
+ } else { /* HBA */
+
+ device->removed = 1;
+ hpsa_wait_for_outstanding_commands_for_dev(h, device);
+
hpsa_remove_sas_device(device);
+ }
}
static void adjust_hpsa_scsi_table(struct ctlr_info *h,
@@ -2146,7 +2222,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
static int handle_ioaccel_mode2_error(struct ctlr_info *h,
struct CommandList *c,
struct scsi_cmnd *cmd,
- struct io_accel2_cmd *c2)
+ struct io_accel2_cmd *c2,
+ struct hpsa_scsi_dev_t *dev)
{
int data_len;
int retry = 0;
@@ -2210,8 +2287,27 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
case IOACCEL2_STATUS_SR_INVALID_DEVICE:
case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
- /* We will get an event from ctlr to trigger rescan */
- retry = 1;
+ /*
+ * Did an HBA disk disappear? We will eventually
+ * get a state change event from the controller but
+ * in the meantime, we need to tell the OS that the
+ * HBA disk is no longer there and stop I/O
+ * from going down. This allows the potential re-insert
+ * of the disk to get the same device node.
+ */
+ if (dev->physical_device && dev->expose_device) {
+ cmd->result = DID_NO_CONNECT << 16;
+ dev->removed = 1;
+ h->drv_req_rescan = 1;
+ dev_warn(&h->pdev->dev,
+ "%s: device is gone!\n", __func__);
+ } else
+ /*
+ * Retry by sending down the RAID path.
+ * We will get an event from ctlr to
+ * trigger rescan regardless.
+ */
+ retry = 1;
break;
default:
retry = 1;
@@ -2335,13 +2431,15 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
c2->error_data.serv_response ==
IOACCEL2_SERV_RESPONSE_FAILURE) {
if (c2->error_data.status ==
- IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
+ IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
dev->offload_enabled = 0;
+ dev->offload_to_be_enabled = 0;
+ }
return hpsa_retry_cmd(h, c);
}
- if (handle_ioaccel_mode2_error(h, c, cmd, c2))
+ if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
return hpsa_retry_cmd(h, c);
return hpsa_cmd_free_and_done(h, c, cmd);
@@ -2806,7 +2904,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
goto out;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -2832,7 +2930,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
/* fill_cmd can't fail here, no data buffer to map. */
(void) fill_cmd(c, reset_type, h, NULL, 0, 0,
scsi3addr, TYPE_MSG);
- rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
if (rc) {
dev_warn(&h->pdev->dev, "Failed to send reset command\n");
goto out;
@@ -3080,7 +3178,7 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
return -1;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3123,7 +3221,7 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3151,7 +3249,7 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
goto out;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3182,7 +3280,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- NO_TIMEOUT);
+ DEFAULT_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(h, c);
@@ -3250,7 +3348,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
c->Request.CDB[5] = 0;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
- NO_TIMEOUT);
+ DEFAULT_TIMEOUT);
if (rc)
goto out;
@@ -3462,7 +3560,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
if (extended_response)
c->Request.CDB[1] = extended_response;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3569,7 +3667,8 @@ static int hpsa_volume_offline(struct ctlr_info *h,
c = cmd_alloc(h);
(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
- rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+ DEFAULT_TIMEOUT);
if (rc) {
cmd_free(h, c);
return 0;
@@ -3644,7 +3743,8 @@ static int hpsa_device_supports_aborts(struct ctlr_info *h,
c = cmd_alloc(h);
(void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
- (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+ (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+ DEFAULT_TIMEOUT);
/* no unmap needed here because no data xfer. */
ei = c->err_info;
switch (ei->CommandStatus) {
@@ -5234,6 +5334,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
dev = cmd->device->hostdata;
if (!dev) {
+ cmd->result = NOT_READY << 16; /* host byte */
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+
+ if (dev->removed) {
cmd->result = DID_NO_CONNECT << 16;
cmd->scsi_done(cmd);
return 0;
@@ -5414,7 +5520,7 @@ static int hpsa_send_test_unit_ready(struct ctlr_info *h,
/* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
(void) fill_cmd(c, TEST_UNIT_READY, h,
NULL, 0, 0, lunaddr, TYPE_CMD);
- rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
if (rc)
return rc;
/* no unmap needed here because no data xfer. */
@@ -5638,7 +5744,7 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
0, 0, scsi3addr, TYPE_MSG);
if (h->needs_abort_tags_swizzled)
swizzle_abort_tag(&c->Request.CDB[4]);
- (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+ (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
hpsa_get_tag(h, abort, &taglower, &tagupper);
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
__func__, tagupper, taglower);
@@ -5803,7 +5909,7 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
c = cmd_alloc(h);
setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
- (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
+ (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT);
hpsa_get_tag(h, abort, &taglower, &tagupper);
dev_dbg(&h->pdev->dev,
"%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
@@ -6348,7 +6454,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
}
- rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+ DEFAULT_TIMEOUT);
if (iocommand.buf_size > 0)
hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
@@ -6480,7 +6587,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
}
- status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
+ status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
+ DEFAULT_TIMEOUT);
if (sg_used)
hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
@@ -8254,8 +8362,10 @@ static void hpsa_ack_ctlr_events(struct ctlr_info *h)
event_type = "configuration change";
/* Stop sending new RAID offload reqs via the IO accelerator */
scsi_block_requests(h->scsi_host);
- for (i = 0; i < h->ndevices; i++)
+ for (i = 0; i < h->ndevices; i++) {
h->dev[i]->offload_enabled = 0;
+ h->dev[i]->offload_to_be_enabled = 0;
+ }
hpsa_drain_accel_commands(h);
/* Set 'accelerator path config change' bit */
dev_warn(&h->pdev->dev,
@@ -8541,11 +8651,6 @@ reinit_after_soft_reset:
if (rc)
goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
- /* hook into SCSI subsystem */
- rc = hpsa_scsi_add_host(h);
- if (rc)
- goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
-
/* create the resubmit workqueue */
h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
if (!h->rescan_ctlr_wq) {
@@ -8642,6 +8747,11 @@ reinit_after_soft_reset:
dev_info(&h->pdev->dev,
"Can't track change to report lun data\n");
+ /* hook into SCSI subsystem */
+ rc = hpsa_scsi_add_host(h);
+ if (rc)
+ goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
+
/* Monitor the controller for firmware lockups */
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
@@ -8703,7 +8813,7 @@ static void hpsa_flush_cache(struct ctlr_info *h)
goto out;
}
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, NO_TIMEOUT);
+ PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
if (rc)
goto out;
if (c->err_info->CommandStatus != 0)
@@ -8742,7 +8852,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8754,7 +8864,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, NO_TIMEOUT);
+ PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8764,7 +8874,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
goto errout;
rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ PCI_DMA_FROMDEVICE, DEFAULT_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -9602,6 +9712,7 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
static int
hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
+ *identifier = 0;
return 0;
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index d06bb7417..a1487e67f 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -63,6 +63,7 @@ struct hpsa_scsi_dev_t {
unsigned char scsi3addr[8]; /* as presented to the HW */
u8 physical_device : 1;
u8 expose_device;
+ u8 removed : 1; /* device is marked for death */
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
unsigned char device_id[16]; /* from inquiry pg. 0x83 */
u64 sas_address;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d6a691e27..d6803a9e5 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -10093,6 +10093,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg->intr_flag = IPR_USE_MSI;
else {
ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->clear_isr = 1;
ioa_cfg->nvectors = 1;
dev_info(&pdev->dev, "Cannot enable MSI.\n");
}
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 13098b09a..a4dd5c915 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -794,7 +794,7 @@ static void port_timeout(unsigned long data)
* case stay in the stopped state.
*/
dev_err(sciport_to_dev(iport),
- "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+ "%s: SCIC Port 0x%p failed to stop before timeout.\n",
__func__,
iport);
} else if (current_state == SCI_PORT_STOPPING) {
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index cfd0084f1..b709d2b20 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -3169,7 +3169,10 @@ static enum sci_status isci_request_stp_request_construct(struct isci_request *i
status = sci_io_request_construct_basic_sata(ireq);
if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ)) {
+ qc->tf.command == ATA_CMD_FPDMA_READ ||
+ qc->tf.command == ATA_CMD_FPDMA_RECV ||
+ qc->tf.command == ATA_CMD_FPDMA_SEND ||
+ qc->tf.command == ATA_CMD_NCQ_NON_DATA)) {
fis->sector_count = qc->tag << 3;
ireq->tc->type.stp.ncq_tag = qc->tag;
}
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 8f0ea97cf..d45366761 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -306,6 +306,42 @@ static struct attribute_group iscsi_boot_initiator_attr_group = {
.is_visible = iscsi_boot_ini_attr_is_visible,
};
+/* iBFT ACPI Table attributes */
+iscsi_boot_rd_attr(acpitbl_signature, signature, ISCSI_BOOT_ACPITBL_SIGNATURE);
+iscsi_boot_rd_attr(acpitbl_oem_id, oem_id, ISCSI_BOOT_ACPITBL_OEM_ID);
+iscsi_boot_rd_attr(acpitbl_oem_table_id, oem_table_id,
+ ISCSI_BOOT_ACPITBL_OEM_TABLE_ID);
+
+static struct attribute *acpitbl_attrs[] = {
+ &iscsi_boot_attr_acpitbl_signature.attr,
+ &iscsi_boot_attr_acpitbl_oem_id.attr,
+ &iscsi_boot_attr_acpitbl_oem_table_id.attr,
+ NULL
+};
+
+static umode_t iscsi_boot_acpitbl_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct iscsi_boot_kobj *boot_kobj =
+ container_of(kobj, struct iscsi_boot_kobj, kobj);
+
+ if (attr == &iscsi_boot_attr_acpitbl_signature.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ACPITBL_SIGNATURE);
+ if (attr == &iscsi_boot_attr_acpitbl_oem_id.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ACPITBL_OEM_ID);
+ if (attr == &iscsi_boot_attr_acpitbl_oem_table_id.attr)
+ return boot_kobj->is_visible(boot_kobj->data,
+ ISCSI_BOOT_ACPITBL_OEM_TABLE_ID);
+ return 0;
+}
+
+static struct attribute_group iscsi_boot_acpitbl_attr_group = {
+ .attrs = acpitbl_attrs,
+ .is_visible = iscsi_boot_acpitbl_attr_is_visible,
+};
+
static struct iscsi_boot_kobj *
iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
struct attribute_group *attr_group,
@@ -436,6 +472,32 @@ iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
EXPORT_SYMBOL_GPL(iscsi_boot_create_ethernet);
/**
+ * iscsi_boot_create_acpitbl() - create boot acpi table sysfs dir
+ * @boot_kset: boot kset
+ * @index: not used
+ * @data: driver specific data
+ * @show: attr show function
+ * @is_visible: attr visibility function
+ * @release: release function
+ *
+ * Note: The boot sysfs lib will free the data passed in for the caller
+ * when all refs to the acpitbl kobject have been released.
+ */
+struct iscsi_boot_kobj *
+iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index,
+ void *data,
+ ssize_t (*show)(void *data, int type, char *buf),
+ umode_t (*is_visible)(void *data, int type),
+ void (*release)(void *data))
+{
+ return iscsi_boot_create_kobj(boot_kset,
+ &iscsi_boot_acpitbl_attr_group,
+ "acpi_header", index, data, show,
+ is_visible, release);
+}
+EXPORT_SYMBOL_GPL(iscsi_boot_create_acpitbl);
+
+/**
* iscsi_boot_create_kset() - creates root sysfs tree
* @set_name: name of root dir
*/
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 2e4c82f83..ace4f1f41 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -131,10 +131,10 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
struct iscsi_tcp_conn *tcp_conn;
read_descriptor_t rd_desc;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) {
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
return;
}
tcp_conn = conn->dd_data;
@@ -154,7 +154,7 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
/* If we had to (atomically) map a highmem page,
* unmap it now. */
iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void iscsi_sw_tcp_state_change(struct sock *sk)
@@ -165,10 +165,10 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
struct iscsi_session *session;
void (*old_state_change)(struct sock *);
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) {
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
return;
}
session = conn->session;
@@ -179,7 +179,7 @@ static void iscsi_sw_tcp_state_change(struct sock *sk)
tcp_sw_conn = tcp_conn->dd_data;
old_state_change = tcp_sw_conn->old_state_change;
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
old_state_change(sk);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 6bffd91b9..c051694bf 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2127,7 +2127,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
struct iscsi_conn *conn;
struct iscsi_task *task;
struct iscsi_tm *hdr;
- int rc, age;
+ int age;
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
@@ -2188,10 +2188,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
hdr = &conn->tmhdr;
iscsi_prep_abort_task_pdu(task, hdr);
- if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
- rc = FAILED;
+ if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout))
goto failed;
- }
switch (conn->tmf_state) {
case TMF_SUCCESS:
@@ -2423,7 +2421,7 @@ static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
*
* This will attempt to send a warm target reset.
*/
-int iscsi_eh_target_reset(struct scsi_cmnd *sc)
+static int iscsi_eh_target_reset(struct scsi_cmnd *sc)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
@@ -2495,7 +2493,6 @@ done:
mutex_unlock(&session->eh_mutex);
return rc;
}
-EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
/**
* iscsi_eh_recover_target - reset target and possibly the session
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 9c706d8c1..935c43095 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -205,7 +205,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->task_done = sas_ata_task_done;
if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
+ qc->tf.command == ATA_CMD_FPDMA_READ ||
+ qc->tf.command == ATA_CMD_FPDMA_RECV ||
+ qc->tf.command == ATA_CMD_FPDMA_SEND ||
+ qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
/* Need to zero out the tag libata assigned us */
qc->tf.nsect = 0;
}
@@ -548,7 +551,7 @@ static struct ata_port_operations sas_sata_ops = {
static struct ata_port_info sata_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
- ATA_FLAG_SAS_HOST,
+ ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 90a3ca5a4..d5bd42059 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -694,6 +694,7 @@ struct lpfc_hba {
uint8_t wwnn[8];
uint8_t wwpn[8];
uint32_t RandomData[7];
+ uint32_t fcp_embed_io;
/* HBA Config Parameters */
uint32_t cfg_ack0;
@@ -757,7 +758,6 @@ struct lpfc_hba {
uint32_t cfg_fdmi_on;
#define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
-#define LPFC_FDMI_SMART_SAN 2 /* SmartSAN supported */
uint32_t cfg_enable_SmartSAN;
lpfc_vpd_t vpd; /* vital product data */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 343ae9482..cfec2eca4 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -4584,15 +4584,14 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
# lpfc_fdmi_on: Controls FDMI support.
# 0 No FDMI support (default)
# 1 Traditional FDMI support
-# 2 Smart SAN support
-# If lpfc_enable_SmartSAN is set 1, the driver sets lpfc_fdmi_on to value 2
-# overwriting the current value. If lpfc_enable_SmartSAN is set 0, the
-# driver uses the current value of lpfc_fdmi_on provided it has value 0 or 1.
-# A value of 2 with lpfc_enable_SmartSAN set to 0 causes the driver to
-# set lpfc_fdmi_on back to 1.
-# Value range [0,2]. Default value is 0.
+# Traditional FDMI support means the driver will assume FDMI-2 support;
+# however, if that fails, it will fallback to FDMI-1.
+# If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
+# If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
+# lpfc_fdmi_on.
+# Value range [0,1]. Default value is 0.
*/
-LPFC_ATTR_R(fdmi_on, 0, 0, 2, "Enable FDMI support");
+LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");
/*
# Specifies the maximum number of ELS cmds we can have outstanding (for
@@ -5150,7 +5149,6 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
sysfs_remove_bin_file(&shost->shost_dev.kobj, &sysfs_ctlreg_attr);
}
-
/*
* Dynamic FC Host Attributes Support
*/
@@ -5857,14 +5855,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
else
phba->cfg_poll = lpfc_poll;
- /* Ensure fdmi_on and enable_SmartSAN don't conflict */
- if (phba->cfg_enable_SmartSAN) {
- phba->cfg_fdmi_on = LPFC_FDMI_SMART_SAN;
- } else {
- if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN)
- phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
- }
-
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 79e261d2a..a38816e96 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -2322,7 +2322,7 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
memset(ae, 0, 256);
- strncpy(ae->un.AttrString, "Smart SAN Version 1.0",
+ strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
sizeof(ae->un.AttrString));
len = strnlen(ae->un.AttrString,
sizeof(ae->un.AttrString));
@@ -2397,7 +2397,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
uint32_t size;
ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
- ae->un.AttrInt = cpu_to_be32(0);
+ ae->un.AttrInt = cpu_to_be32(1);
size = FOURBYTES + sizeof(uint32_t);
ad->AttrLen = cpu_to_be16(size);
ad->AttrType = cpu_to_be16(RPRT_SMART_SECURITY);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7f5abb8f5..0498f5760 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -690,16 +690,17 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
if (fabric_param_changed) {
/* Reset FDMI attribute masks based on config parameter */
- if (phba->cfg_fdmi_on == LPFC_FDMI_NO_SUPPORT) {
- vport->fdmi_hba_mask = 0;
- vport->fdmi_port_mask = 0;
- } else {
+ if (phba->cfg_enable_SmartSAN ||
+ (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
/* Setup appropriate attribute masks */
vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
- if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN)
+ if (phba->cfg_enable_SmartSAN)
vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
else
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
+ } else {
+ vport->fdmi_hba_mask = 0;
+ vport->fdmi_port_mask = 0;
}
}
@@ -1069,7 +1070,10 @@ stop_rr_fcf_flogi:
lpfc_sli4_unreg_all_rpis(vport);
}
}
- lpfc_issue_reg_vfi(vport);
+
+ /* Do not register VFI if the driver aborted FLOGI */
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
}
@@ -4705,6 +4709,144 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
desc->length = cpu_to_be32(sizeof(desc->info));
}
+void
+lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
+ struct lpfc_vport *vport)
+{
+ desc->tag = cpu_to_be32(RDP_BBC_DESC_TAG);
+
+ desc->bbc_info.port_bbc = cpu_to_be32(
+ vport->fc_sparam.cmn.bbCreditMsb |
+ vport->fc_sparam.cmn.bbCreditlsb << 8);
+ if (vport->phba->fc_topology != LPFC_TOPOLOGY_LOOP)
+ desc->bbc_info.attached_port_bbc = cpu_to_be32(
+ vport->phba->fc_fabparam.cmn.bbCreditMsb |
+ vport->phba->fc_fabparam.cmn.bbCreditlsb << 8);
+ else
+ desc->bbc_info.attached_port_bbc = 0;
+
+ desc->bbc_info.rtt = 0;
+ desc->length = cpu_to_be32(sizeof(desc->bbc_info));
+}
+
+void
+lpfc_rdp_res_oed_temp_desc(struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
+{
+ uint32_t flags;
+
+ desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+ desc->oed_info.hi_alarm =
+ cpu_to_be16(page_a2[SSF_TEMP_HIGH_ALARM]);
+ desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TEMP_LOW_ALARM]);
+ desc->oed_info.hi_warning =
+ cpu_to_be16(page_a2[SSF_TEMP_HIGH_WARNING]);
+ desc->oed_info.lo_warning =
+ cpu_to_be16(page_a2[SSF_TEMP_LOW_WARNING]);
+ flags = 0xf; /* All four are valid */
+ flags |= ((0xf & RDP_OED_TEMPERATURE) << RDP_OED_TYPE_SHIFT);
+ desc->oed_info.function_flags = cpu_to_be32(flags);
+ desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+void
+lpfc_rdp_res_oed_voltage_desc(struct fc_rdp_oed_sfp_desc *desc,
+ uint8_t *page_a2)
+{
+ uint32_t flags;
+
+ desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+ desc->oed_info.hi_alarm =
+ cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_ALARM]);
+ desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_ALARM]);
+ desc->oed_info.hi_warning =
+ cpu_to_be16(page_a2[SSF_VOLTAGE_HIGH_WARNING]);
+ desc->oed_info.lo_warning =
+ cpu_to_be16(page_a2[SSF_VOLTAGE_LOW_WARNING]);
+ flags = 0xf; /* All four are valid */
+ flags |= ((0xf & RDP_OED_VOLTAGE) << RDP_OED_TYPE_SHIFT);
+ desc->oed_info.function_flags = cpu_to_be32(flags);
+ desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+void
+lpfc_rdp_res_oed_txbias_desc(struct fc_rdp_oed_sfp_desc *desc,
+ uint8_t *page_a2)
+{
+ uint32_t flags;
+
+ desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+ desc->oed_info.hi_alarm =
+ cpu_to_be16(page_a2[SSF_BIAS_HIGH_ALARM]);
+ desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_BIAS_LOW_ALARM]);
+ desc->oed_info.hi_warning =
+ cpu_to_be16(page_a2[SSF_BIAS_HIGH_WARNING]);
+ desc->oed_info.lo_warning =
+ cpu_to_be16(page_a2[SSF_BIAS_LOW_WARNING]);
+ flags = 0xf; /* All four are valid */
+ flags |= ((0xf & RDP_OED_TXBIAS) << RDP_OED_TYPE_SHIFT);
+ desc->oed_info.function_flags = cpu_to_be32(flags);
+ desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+void
+lpfc_rdp_res_oed_txpower_desc(struct fc_rdp_oed_sfp_desc *desc,
+ uint8_t *page_a2)
+{
+ uint32_t flags;
+
+ desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+ desc->oed_info.hi_alarm =
+ cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_ALARM]);
+ desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_TXPOWER_LOW_ALARM]);
+ desc->oed_info.hi_warning =
+ cpu_to_be16(page_a2[SSF_TXPOWER_HIGH_WARNING]);
+ desc->oed_info.lo_warning =
+ cpu_to_be16(page_a2[SSF_TXPOWER_LOW_WARNING]);
+ flags = 0xf; /* All four are valid */
+ flags |= ((0xf & RDP_OED_TXPOWER) << RDP_OED_TYPE_SHIFT);
+ desc->oed_info.function_flags = cpu_to_be32(flags);
+ desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+
+void
+lpfc_rdp_res_oed_rxpower_desc(struct fc_rdp_oed_sfp_desc *desc,
+ uint8_t *page_a2)
+{
+ uint32_t flags;
+
+ desc->tag = cpu_to_be32(RDP_OED_DESC_TAG);
+
+ desc->oed_info.hi_alarm =
+ cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_ALARM]);
+ desc->oed_info.lo_alarm = cpu_to_be16(page_a2[SSF_RXPOWER_LOW_ALARM]);
+ desc->oed_info.hi_warning =
+ cpu_to_be16(page_a2[SSF_RXPOWER_HIGH_WARNING]);
+ desc->oed_info.lo_warning =
+ cpu_to_be16(page_a2[SSF_RXPOWER_LOW_WARNING]);
+ flags = 0xf; /* All four are valid */
+ flags |= ((0xf & RDP_OED_RXPOWER) << RDP_OED_TYPE_SHIFT);
+ desc->oed_info.function_flags = cpu_to_be32(flags);
+ desc->length = cpu_to_be32(sizeof(desc->oed_info));
+}
+
+void
+lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
+ uint8_t *page_a0, struct lpfc_vport *vport)
+{
+ desc->tag = cpu_to_be32(RDP_OPD_DESC_TAG);
+ memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
+ memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
+ memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
+ memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2);
+ memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
+ desc->length = cpu_to_be32(sizeof(desc->opd_info));
+}
+
int
lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
{
@@ -4776,6 +4918,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
if (rdp_cap == 0)
rdp_cap = RDP_CAP_UNKNOWN;
+ if (phba->cfg_link_speed != LPFC_USER_LINK_SPEED_AUTO)
+ rdp_cap |= RDP_CAP_USER_CONFIGURED;
desc->info.port_speed.capabilities = cpu_to_be16(rdp_cap);
desc->length = cpu_to_be32(sizeof(desc->info));
@@ -4875,6 +5019,19 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
lpfc_rdp_res_diag_port_names(&rdp_res->diag_port_names_desc, phba);
lpfc_rdp_res_attach_port_names(&rdp_res->attached_port_names_desc,
vport, ndlp);
+ lpfc_rdp_res_bbc_desc(&rdp_res->bbc_desc, &rdp_context->link_stat,
+ vport);
+ lpfc_rdp_res_oed_temp_desc(&rdp_res->oed_temp_desc,
+ rdp_context->page_a2);
+ lpfc_rdp_res_oed_voltage_desc(&rdp_res->oed_voltage_desc,
+ rdp_context->page_a2);
+ lpfc_rdp_res_oed_txbias_desc(&rdp_res->oed_txbias_desc,
+ rdp_context->page_a2);
+ lpfc_rdp_res_oed_txpower_desc(&rdp_res->oed_txpower_desc,
+ rdp_context->page_a2);
+ lpfc_rdp_res_oed_rxpower_desc(&rdp_res->oed_rxpower_desc,
+ rdp_context->page_a2);
+ lpfc_rdp_res_opd_desc(&rdp_res->opd_desc, rdp_context->page_a0, vport);
fec_size = lpfc_rdp_res_fec_desc(&rdp_res->fec_desc,
&rdp_context->link_stat);
rdp_res->length = cpu_to_be32(fec_size + RDP_DESC_PAYLOAD_SIZE);
@@ -7849,8 +8006,9 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
return;
}
- if ((phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) &&
- (vport->load_flag & FC_ALLOW_FDMI))
+ if ((phba->cfg_enable_SmartSAN ||
+ (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) &&
+ (vport->load_flag & FC_ALLOW_FDMI))
lpfc_start_fdmi(vport);
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 25b5dcd1a..ed2239377 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -4545,7 +4545,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
(!(vport->load_flag & FC_UNLOADING)) &&
(bf_get(lpfc_sli_intf_if_type,
&phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_2)) {
+ LPFC_SLI_INTF_IF_TYPE_2) &&
+ (atomic_read(&ndlp->kref.refcount) > 0)) {
mbox->context1 = lpfc_nlp_get(ndlp);
mbox->mbox_cmpl =
lpfc_sli4_unreg_rpi_cmpl_clr;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index dd20412c7..39f0fd000 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -1134,9 +1134,10 @@ struct fc_rdp_link_error_status_desc {
#define RDP_PS_16GB 0x0400
#define RDP_PS_32GB 0x0200
-#define RDP_CAP_UNKNOWN 0x0001
-#define RDP_PS_UNKNOWN 0x0002
-#define RDP_PS_NOT_ESTABLISHED 0x0001
+#define RDP_CAP_USER_CONFIGURED 0x0002
+#define RDP_CAP_UNKNOWN 0x0001
+#define RDP_PS_UNKNOWN 0x0002
+#define RDP_PS_NOT_ESTABLISHED 0x0001
struct fc_rdp_port_speed {
uint16_t capabilities;
@@ -1192,6 +1193,58 @@ struct fc_rdp_sfp_desc {
struct fc_rdp_sfp_info sfp_info;
};
+/* Buffer Credit Descriptor */
+struct fc_rdp_bbc_info {
+ uint32_t port_bbc; /* FC_Port buffer-to-buffer credit */
+ uint32_t attached_port_bbc;
+ uint32_t rtt; /* Round trip time */
+};
+#define RDP_BBC_DESC_TAG 0x00010006
+struct fc_rdp_bbc_desc {
+ uint32_t tag;
+ uint32_t length;
+ struct fc_rdp_bbc_info bbc_info;
+};
+
+#define RDP_OED_TEMPERATURE 0x1
+#define RDP_OED_VOLTAGE 0x2
+#define RDP_OED_TXBIAS 0x3
+#define RDP_OED_TXPOWER 0x4
+#define RDP_OED_RXPOWER 0x5
+
+#define RDP_OED_TYPE_SHIFT 28
+/* Optical Element Data descriptor */
+struct fc_rdp_oed_info {
+ uint16_t hi_alarm;
+ uint16_t lo_alarm;
+ uint16_t hi_warning;
+ uint16_t lo_warning;
+ uint32_t function_flags;
+};
+#define RDP_OED_DESC_TAG 0x00010007
+struct fc_rdp_oed_sfp_desc {
+ uint32_t tag;
+ uint32_t length;
+ struct fc_rdp_oed_info oed_info;
+};
+
+/* Optical Product Data descriptor */
+struct fc_rdp_opd_sfp_info {
+ uint8_t vendor_name[16];
+ uint8_t model_number[16];
+ uint8_t serial_number[16];
+ uint8_t reserved[2];
+ uint8_t revision[2];
+ uint8_t date[8];
+};
+
+#define RDP_OPD_DESC_TAG 0x00010008
+struct fc_rdp_opd_sfp_desc {
+ uint32_t tag;
+ uint32_t length;
+ struct fc_rdp_opd_sfp_info opd_info;
+};
+
struct fc_rdp_req_frame {
uint32_t rdp_command; /* ELS command opcode (0x18)*/
uint32_t rdp_des_length; /* RDP Payload Word 1 */
@@ -1208,7 +1261,14 @@ struct fc_rdp_res_frame {
struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */
struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */
struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */
- struct fc_fec_rdp_desc fec_desc; /* FC Word 34 - 37 */
+ struct fc_rdp_bbc_desc bbc_desc; /* FC Word 34-38*/
+ struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 39-43*/
+ struct fc_rdp_oed_sfp_desc oed_voltage_desc; /* FC word 44-48*/
+ struct fc_rdp_oed_sfp_desc oed_txbias_desc; /* FC word 49-53*/
+ struct fc_rdp_oed_sfp_desc oed_txpower_desc; /* FC word 54-58*/
+ struct fc_rdp_oed_sfp_desc oed_rxpower_desc; /* FC word 59-63*/
+ struct fc_rdp_opd_sfp_desc opd_desc; /* FC word 64-80*/
+ struct fc_fec_rdp_desc fec_desc; /* FC word 81-84*/
};
@@ -1216,7 +1276,10 @@ struct fc_rdp_res_frame {
+ sizeof(struct fc_rdp_sfp_desc) \
+ sizeof(struct fc_rdp_port_speed_desc) \
+ sizeof(struct fc_rdp_link_error_status_desc) \
- + (sizeof(struct fc_rdp_port_name_desc) * 2))
+ + (sizeof(struct fc_rdp_port_name_desc) * 2) \
+ + sizeof(struct fc_rdp_bbc_desc) \
+ + (sizeof(struct fc_rdp_oed_sfp_desc) * 5) \
+ + sizeof(struct fc_rdp_opd_sfp_desc))
/******** FDMI ********/
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 608f9415f..0c7070bf2 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -2557,7 +2557,26 @@ struct lpfc_mbx_memory_dump_type3 {
/* SFF-8472 Table 3.1a Diagnostics: Data Fields Address/Page A2 */
-#define SSF_AW_THRESHOLDS 0
+#define SSF_TEMP_HIGH_ALARM 0
+#define SSF_TEMP_LOW_ALARM 2
+#define SSF_TEMP_HIGH_WARNING 4
+#define SSF_TEMP_LOW_WARNING 6
+#define SSF_VOLTAGE_HIGH_ALARM 8
+#define SSF_VOLTAGE_LOW_ALARM 10
+#define SSF_VOLTAGE_HIGH_WARNING 12
+#define SSF_VOLTAGE_LOW_WARNING 14
+#define SSF_BIAS_HIGH_ALARM 16
+#define SSF_BIAS_LOW_ALARM 18
+#define SSF_BIAS_HIGH_WARNING 20
+#define SSF_BIAS_LOW_WARNING 22
+#define SSF_TXPOWER_HIGH_ALARM 24
+#define SSF_TXPOWER_LOW_ALARM 26
+#define SSF_TXPOWER_HIGH_WARNING 28
+#define SSF_TXPOWER_LOW_WARNING 30
+#define SSF_RXPOWER_HIGH_ALARM 32
+#define SSF_RXPOWER_LOW_ALARM 34
+#define SSF_RXPOWER_HIGH_WARNING 36
+#define SSF_RXPOWER_LOW_WARNING 38
#define SSF_EXT_CAL_CONSTANTS 56
#define SSF_CC_DMI 95
#define SFF_TEMPERATURE_B1 96
@@ -2865,6 +2884,9 @@ struct lpfc_sli4_parameters {
uint32_t word17;
uint32_t word18;
uint32_t word19;
+#define cfg_ext_embed_cb_SHIFT 0
+#define cfg_ext_embed_cb_MASK 0x00000001
+#define cfg_ext_embed_cb_WORD word19
};
struct lpfc_mbx_get_sli4_parameters {
@@ -3919,6 +3941,9 @@ union lpfc_wqe {
union lpfc_wqe128 {
uint32_t words[32];
struct lpfc_wqe_generic generic;
+ struct fcp_icmnd64_wqe fcp_icmd;
+ struct fcp_iread64_wqe fcp_iread;
+ struct fcp_iwrite64_wqe fcp_iwrite;
struct xmit_seq64_wqe xmit_sequence;
struct gen_req64_wqe gen_req;
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 90d7b42b4..6029c4839 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -6158,11 +6158,12 @@ lpfc_create_shost(struct lpfc_hba *phba)
* any initial discovery should be completed.
*/
vport->load_flag |= FC_ALLOW_FDMI;
- if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) {
+ if (phba->cfg_enable_SmartSAN ||
+ (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
/* Setup appropriate attribute masks */
vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
- if (phba->cfg_fdmi_on == LPFC_FDMI_SMART_SAN)
+ if (phba->cfg_enable_SmartSAN)
vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
else
vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
@@ -7264,8 +7265,15 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.fcp_cq[idx] = qdesc;
/* Create Fast Path FCP WQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
- phba->sli4_hba.wq_ecount);
+ if (phba->fcp_embed_io) {
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ LPFC_WQE128_SIZE,
+ LPFC_WQE128_DEF_COUNT);
+ } else {
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ }
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0503 Failed allocate fast-path FCP "
@@ -9510,6 +9518,15 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+ /*
+ * Issue IOs with CDB embedded in WQE to minimized the number
+ * of DMAs the firmware has to do. Setting this to 1 also forces
+ * the driver to use 128 bytes WQEs for FCP IOs.
+ */
+ if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
+ phba->fcp_embed_io = 1;
+ else
+ phba->fcp_embed_io = 0;
return 0;
}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index f87f90e9b..12dbe99cc 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2145,10 +2145,12 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
reg_vfi->e_d_tov = phba->fc_edtov;
reg_vfi->r_a_tov = phba->fc_ratov;
- reg_vfi->bde.addrHigh = putPaddrHigh(phys);
- reg_vfi->bde.addrLow = putPaddrLow(phys);
- reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
- reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ if (phys) {
+ reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+ reg_vfi->bde.addrLow = putPaddrLow(phys);
+ reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+ reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ }
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
/* Only FC supports upd bit */
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 193733e8c..56a3df4fd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1512,6 +1512,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
if ((mb = phba->sli.mbox_active)) {
if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
(ndlp == (struct lpfc_nodelist *) mb->context2)) {
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
mb->context2 = NULL;
mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -1527,6 +1528,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
__lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
+ ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
lpfc_nlp_put(ndlp);
list_del(&mb->list);
phba->sli.mboxq_cnt--;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 2207726b8..70edf21ae 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -2000,10 +2000,9 @@ lpfc_sli_hbqbuf_get(struct list_head *rb_list)
* @phba: Pointer to HBA context object.
* @tag: Tag of the hbq buffer.
*
- * This function is called with hbalock held. This function searches
- * for the hbq buffer associated with the given tag in the hbq buffer
- * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
- * it returns NULL.
+ * This function searches for the hbq buffer associated with the given tag in
+ * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
+ * otherwise it returns NULL.
**/
static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
@@ -2012,8 +2011,6 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
struct hbq_dmabuf *hbq_buf;
uint32_t hbqno;
- lockdep_assert_held(&phba->hbalock);
-
hbqno = tag >> 16;
if (hbqno >= LPFC_MAX_HBQS)
return NULL;
@@ -2211,6 +2208,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
lpfc_unreg_login(phba, vpi, rpi, pmb);
+ pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
@@ -4688,6 +4686,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
break;
}
+ phba->fcp_embed_io = 0; /* SLI4 FC support only */
rc = lpfc_sli_config_port(phba, mode);
@@ -6320,10 +6319,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
mqe = &mboxq->u.mqe;
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
- if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
+ if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
phba->hba_flag |= HBA_FCOE_MODE;
- else
+ phba->fcp_embed_io = 0; /* SLI4 FC support only */
+ } else {
phba->hba_flag &= ~HBA_FCOE_MODE;
+ }
if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
LPFC_DCBX_CEE_MODE)
@@ -8218,12 +8219,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
else
command_type = ELS_COMMAND_NON_FIP;
+ if (phba->fcp_embed_io)
+ memset(wqe, 0, sizeof(union lpfc_wqe128));
/* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
- abort_tag = (uint32_t) iocbq->iotag;
- xritag = iocbq->sli4_xritag;
wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
wqe->generic.wqe_com.word10 = 0;
+
+ abort_tag = (uint32_t) iocbq->iotag;
+ xritag = iocbq->sli4_xritag;
/* words0-2 bpl convert bde */
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -8372,11 +8376,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpFCP2Rcvy);
bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
- bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
if (iocbq->iocb_flag & LPFC_IO_OAS) {
@@ -8387,6 +8389,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
(phba->cfg_XLanePriority << 1));
}
}
+ /* Note, word 10 is already initialized to 0 */
+
+ if (phba->fcp_embed_io) {
+ struct lpfc_scsi_buf *lpfc_cmd;
+ struct sli4_sge *sgl;
+ union lpfc_wqe128 *wqe128;
+ struct fcp_cmnd *fcp_cmnd;
+ uint32_t *ptr;
+
+ /* 128 byte wqe support here */
+ wqe128 = (union lpfc_wqe128 *)wqe;
+
+ lpfc_cmd = iocbq->context1;
+ sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+ /* Word 0-2 - FCP_CMND */
+ wqe128->generic.bde.tus.f.bdeFlags =
+ BUFF_TYPE_BDE_IMMED;
+ wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+ wqe128->generic.bde.addrHigh = 0;
+ wqe128->generic.bde.addrLow = 88; /* Word 22 */
+
+ bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
+
+ /* Word 22-29 FCP CMND Payload */
+ ptr = &wqe128->words[22];
+ memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+ }
break;
case CMD_FCP_IREAD64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8401,11 +8432,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpFCP2Rcvy);
bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
/* Always open the exchange */
- bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
LPFC_WQE_LENLOC_WORD4);
- bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
if (iocbq->iocb_flag & LPFC_IO_OAS) {
@@ -8416,6 +8445,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
(phba->cfg_XLanePriority << 1));
}
}
+ /* Note, word 10 is already initialized to 0 */
+
+ if (phba->fcp_embed_io) {
+ struct lpfc_scsi_buf *lpfc_cmd;
+ struct sli4_sge *sgl;
+ union lpfc_wqe128 *wqe128;
+ struct fcp_cmnd *fcp_cmnd;
+ uint32_t *ptr;
+
+ /* 128 byte wqe support here */
+ wqe128 = (union lpfc_wqe128 *)wqe;
+
+ lpfc_cmd = iocbq->context1;
+ sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+ /* Word 0-2 - FCP_CMND */
+ wqe128->generic.bde.tus.f.bdeFlags =
+ BUFF_TYPE_BDE_IMMED;
+ wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+ wqe128->generic.bde.addrHigh = 0;
+ wqe128->generic.bde.addrLow = 88; /* Word 22 */
+
+ bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
+
+ /* Word 22-29 FCP CMND Payload */
+ ptr = &wqe128->words[22];
+ memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+ }
break;
case CMD_FCP_ICMND64_CR:
/* word3 iocb=iotag wqe=payload_offset_len */
@@ -8427,13 +8485,11 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* word3 iocb=IO_TAG wqe=reserved */
bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
/* Always open the exchange */
- bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
LPFC_WQE_LENLOC_NONE);
- bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
iocbq->iocb.ulpFCP2Rcvy);
if (iocbq->iocb_flag & LPFC_IO_OAS) {
@@ -8444,6 +8500,35 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
(phba->cfg_XLanePriority << 1));
}
}
+ /* Note, word 10 is already initialized to 0 */
+
+ if (phba->fcp_embed_io) {
+ struct lpfc_scsi_buf *lpfc_cmd;
+ struct sli4_sge *sgl;
+ union lpfc_wqe128 *wqe128;
+ struct fcp_cmnd *fcp_cmnd;
+ uint32_t *ptr;
+
+ /* 128 byte wqe support here */
+ wqe128 = (union lpfc_wqe128 *)wqe;
+
+ lpfc_cmd = iocbq->context1;
+ sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ fcp_cmnd = lpfc_cmd->fcp_cmnd;
+
+ /* Word 0-2 - FCP_CMND */
+ wqe128->generic.bde.tus.f.bdeFlags =
+ BUFF_TYPE_BDE_IMMED;
+ wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
+ wqe128->generic.bde.addrHigh = 0;
+ wqe128->generic.bde.addrLow = 88; /* Word 22 */
+
+ bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
+
+ /* Word 22-29 FCP CMND Payload */
+ ptr = &wqe128->words[22];
+ memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
+ }
break;
case CMD_GEN_REQUEST64_CR:
/* For this command calculate the xmit length of the
@@ -8675,12 +8760,19 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sglq *sglq;
- union lpfc_wqe wqe;
+ union lpfc_wqe *wqe;
+ union lpfc_wqe128 wqe128;
struct lpfc_queue *wq;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
lockdep_assert_held(&phba->hbalock);
+ /*
+ * The WQE can be either 64 or 128 bytes,
+ * so allocate space on the stack assuming the largest.
+ */
+ wqe = (union lpfc_wqe *)&wqe128;
+
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
@@ -8727,7 +8819,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_ERROR;
}
- if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+ if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
return IOCB_ERROR;
if ((piocb->iocb_flag & LPFC_IO_FCP) ||
@@ -8737,12 +8829,12 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
} else {
wq = phba->sli4_hba.oas_wq;
}
- if (lpfc_sli4_wq_put(wq, &wqe))
+ if (lpfc_sli4_wq_put(wq, wqe))
return IOCB_ERROR;
} else {
if (unlikely(!phba->sli4_hba.els_wq))
return IOCB_ERROR;
- if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+ if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
return IOCB_ERROR;
}
lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
@@ -8757,9 +8849,9 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
* pointer from the lpfc_hba struct.
*
* Return codes:
- * IOCB_ERROR - Error
- * IOCB_SUCCESS - Success
- * IOCB_BUSY - Busy
+ * IOCB_ERROR - Error
+ * IOCB_SUCCESS - Success
+ * IOCB_BUSY - Busy
**/
int
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 4dc22562a..fa0d531bf 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.0.0.10."
+#define LPFC_DRIVER_VERSION "11.1.0.0."
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -30,4 +30,4 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright(c) 2004-2015 Emulex. All rights reserved."
+#define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex. All rights reserved."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index b3f85def1..c27f4b724 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2013 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -395,7 +395,8 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* At this point we are fully registered with SCSI Layer. */
vport->load_flag |= FC_ALLOW_FDMI;
- if (phba->cfg_fdmi_on > LPFC_FDMI_NO_SUPPORT) {
+ if (phba->cfg_enable_SmartSAN ||
+ (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
/* Setup appropriate attribute masks */
vport->fdmi_hba_mask = phba->pport->fdmi_hba_mask;
vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index bb2381314..a590089b9 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -28,24 +28,23 @@
/* Definitions for the core NCR5380 driver. */
-#define PSEUDO_DMA
-
-#define NCR5380_implementation_fields unsigned char *pdma_base
+#define NCR5380_implementation_fields unsigned char *pdma_base; \
+ int pdma_residual
#define NCR5380_read(reg) macscsi_read(instance, reg)
#define NCR5380_write(reg, value) macscsi_write(instance, reg, value)
-#define NCR5380_pread macscsi_pread
-#define NCR5380_pwrite macscsi_pwrite
-#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_xfer_len(instance, cmd, phase) \
+ macscsi_dma_xfer_len(instance, cmd)
+#define NCR5380_dma_recv_setup macscsi_pread
+#define NCR5380_dma_send_setup macscsi_pwrite
+#define NCR5380_dma_residual(instance) (hostdata->pdma_residual)
#define NCR5380_intr macscsi_intr
#define NCR5380_queue_command macscsi_queue_command
#define NCR5380_abort macscsi_abort
#define NCR5380_bus_reset macscsi_bus_reset
#define NCR5380_info macscsi_info
-#define NCR5380_show_info macscsi_show_info
-#define NCR5380_write_info macscsi_write_info
#include "NCR5380.h"
@@ -57,8 +56,6 @@ static int setup_sg_tablesize = -1;
module_param(setup_sg_tablesize, int, 0);
static int setup_use_pdma = -1;
module_param(setup_use_pdma, int, 0);
-static int setup_use_tagged_queuing = -1;
-module_param(setup_use_tagged_queuing, int, 0);
static int setup_hostid = -1;
module_param(setup_hostid, int, 0);
static int setup_toshiba_delay = -1;
@@ -97,8 +94,7 @@ static int __init mac_scsi_setup(char *str)
setup_sg_tablesize = ints[3];
if (ints[0] >= 4)
setup_hostid = ints[4];
- if (ints[0] >= 5)
- setup_use_tagged_queuing = ints[5];
+ /* ints[5] (use_tagged_queuing) is ignored */
if (ints[0] >= 6)
setup_use_pdma = ints[6];
if (ints[0] >= 7)
@@ -109,19 +105,9 @@ static int __init mac_scsi_setup(char *str)
__setup("mac5380=", mac_scsi_setup);
#endif /* !MODULE */
-#ifdef PSEUDO_DMA
-/*
- Pseudo-DMA: (Ove Edlund)
- The code attempts to catch bus errors that occur if one for example
- "trips over the cable".
- XXX: Since bus errors in the PDMA routines never happen on my
- computer, the bus error code is untested.
- If the code works as intended, a bus error results in Pseudo-DMA
- being disabled, meaning that the driver switches to slow handshake.
- If bus errors are NOT extremely rare, this has to be changed.
-*/
-
-#define CP_IO_TO_MEM(s,d,len) \
+/* Pseudo DMA asm originally by Ove Edlund */
+
+#define CP_IO_TO_MEM(s,d,n) \
__asm__ __volatile__ \
(" cmp.w #4,%2\n" \
" bls 8f\n" \
@@ -158,61 +144,73 @@ __asm__ __volatile__ \
" 9: \n" \
".section .fixup,\"ax\"\n" \
" .even\n" \
- "90: moveq.l #1, %2\n" \
+ "91: moveq.l #1, %2\n" \
+ " jra 9b\n" \
+ "94: moveq.l #4, %2\n" \
" jra 9b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
- " .long 1b,90b\n" \
- " .long 3b,90b\n" \
- " .long 31b,90b\n" \
- " .long 32b,90b\n" \
- " .long 33b,90b\n" \
- " .long 34b,90b\n" \
- " .long 35b,90b\n" \
- " .long 36b,90b\n" \
- " .long 37b,90b\n" \
- " .long 5b,90b\n" \
- " .long 7b,90b\n" \
+ " .long 1b,91b\n" \
+ " .long 3b,94b\n" \
+ " .long 31b,94b\n" \
+ " .long 32b,94b\n" \
+ " .long 33b,94b\n" \
+ " .long 34b,94b\n" \
+ " .long 35b,94b\n" \
+ " .long 36b,94b\n" \
+ " .long 37b,94b\n" \
+ " .long 5b,94b\n" \
+ " .long 7b,91b\n" \
".previous" \
- : "=a"(s), "=a"(d), "=d"(len) \
- : "0"(s), "1"(d), "2"(len) \
+ : "=a"(s), "=a"(d), "=d"(n) \
+ : "0"(s), "1"(d), "2"(n) \
: "d0")
static int macscsi_pread(struct Scsi_Host *instance,
unsigned char *dst, int len)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char *d;
- unsigned char *s;
-
- s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
- d = dst;
-
- /* These conditions are derived from MacOS */
-
- while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
- !(NCR5380_read(STATUS_REG) & SR_REQ))
- ;
-
- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
- (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)) {
- pr_err("Error in macscsi_pread\n");
- return -1;
+ unsigned char *s = hostdata->pdma_base + (INPUT_DATA_REG << 4);
+ unsigned char *d = dst;
+ int n = len;
+ int transferred;
+
+ while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ BASR_DRQ | BASR_PHASE_MATCH,
+ BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+ CP_IO_TO_MEM(s, d, n);
+
+ transferred = d - dst - n;
+ hostdata->pdma_residual = len - transferred;
+
+ /* No bus error. */
+ if (n == 0)
+ return 0;
+
+ /* Target changed phase early? */
+ if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "%s: !REQ and !ACK\n", __func__);
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+ return 0;
+
+ dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ "%s: bus error (%d/%d)\n", __func__, transferred, len);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ d = dst + transferred;
+ n = len - transferred;
}
- CP_IO_TO_MEM(s, d, len);
-
- if (len != 0) {
- pr_notice("Bus error in macscsi_pread\n");
- return -1;
- }
-
- return 0;
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "%s: phase mismatch or !DRQ\n", __func__);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ return -1;
}
-#define CP_MEM_TO_IO(s,d,len) \
+#define CP_MEM_TO_IO(s,d,n) \
__asm__ __volatile__ \
(" cmp.w #4,%2\n" \
" bls 8f\n" \
@@ -249,59 +247,89 @@ __asm__ __volatile__ \
" 9: \n" \
".section .fixup,\"ax\"\n" \
" .even\n" \
- "90: moveq.l #1, %2\n" \
+ "91: moveq.l #1, %2\n" \
+ " jra 9b\n" \
+ "94: moveq.l #4, %2\n" \
" jra 9b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
- " .long 1b,90b\n" \
- " .long 3b,90b\n" \
- " .long 31b,90b\n" \
- " .long 32b,90b\n" \
- " .long 33b,90b\n" \
- " .long 34b,90b\n" \
- " .long 35b,90b\n" \
- " .long 36b,90b\n" \
- " .long 37b,90b\n" \
- " .long 5b,90b\n" \
- " .long 7b,90b\n" \
+ " .long 1b,91b\n" \
+ " .long 3b,94b\n" \
+ " .long 31b,94b\n" \
+ " .long 32b,94b\n" \
+ " .long 33b,94b\n" \
+ " .long 34b,94b\n" \
+ " .long 35b,94b\n" \
+ " .long 36b,94b\n" \
+ " .long 37b,94b\n" \
+ " .long 5b,94b\n" \
+ " .long 7b,91b\n" \
".previous" \
- : "=a"(s), "=a"(d), "=d"(len) \
- : "0"(s), "1"(d), "2"(len) \
+ : "=a"(s), "=a"(d), "=d"(n) \
+ : "0"(s), "1"(d), "2"(n) \
: "d0")
static int macscsi_pwrite(struct Scsi_Host *instance,
unsigned char *src, int len)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
- unsigned char *s;
- unsigned char *d;
-
- s = src;
- d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
-
- /* These conditions are derived from MacOS */
+ unsigned char *s = src;
+ unsigned char *d = hostdata->pdma_base + (OUTPUT_DATA_REG << 4);
+ int n = len;
+ int transferred;
+
+ while (!NCR5380_poll_politely(instance, BUS_AND_STATUS_REG,
+ BASR_DRQ | BASR_PHASE_MATCH,
+ BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
+ CP_MEM_TO_IO(s, d, n);
+
+ transferred = s - src - n;
+ hostdata->pdma_residual = len - transferred;
+
+ /* Target changed phase early? */
+ if (NCR5380_poll_politely2(instance, STATUS_REG, SR_REQ, SR_REQ,
+ BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "%s: !REQ and !ACK\n", __func__);
+ if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
+ return 0;
+
+ /* No bus error. */
+ if (n == 0) {
+ if (NCR5380_poll_politely(instance, TARGET_COMMAND_REG,
+ TCR_LAST_BYTE_SENT,
+ TCR_LAST_BYTE_SENT, HZ / 64) < 0)
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "%s: Last Byte Sent timeout\n", __func__);
+ return 0;
+ }
+
+ dsprintk(NDEBUG_PSEUDO_DMA, instance,
+ "%s: bus error (%d/%d)\n", __func__, transferred, len);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
+ s = src + transferred;
+ n = len - transferred;
+ }
- while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) &&
- (!(NCR5380_read(STATUS_REG) & SR_REQ) ||
- (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)))
- ;
+ scmd_printk(KERN_ERR, hostdata->connected,
+ "%s: phase mismatch or !DRQ\n", __func__);
+ NCR5380_dprint(NDEBUG_PSEUDO_DMA, instance);
- if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ)) {
- pr_err("Error in macscsi_pwrite\n");
- return -1;
- }
+ return -1;
+}
- CP_MEM_TO_IO(s, d, len);
+static int macscsi_dma_xfer_len(struct Scsi_Host *instance,
+ struct scsi_cmnd *cmd)
+{
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
- if (len != 0) {
- pr_notice("Bus error in macscsi_pwrite\n");
- return -1;
- }
+ if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
+ cmd->SCp.this_residual < 16)
+ return 0;
- return 0;
+ return cmd->SCp.this_residual;
}
-#endif
#include "NCR5380.c"
@@ -311,8 +339,6 @@ static int macscsi_pwrite(struct Scsi_Host *instance,
static struct scsi_host_template mac_scsi_template = {
.module = THIS_MODULE,
.proc_name = DRV_MODULE_NAME,
- .show_info = macscsi_show_info,
- .write_info = macscsi_write_info,
.name = "Macintosh NCR5380 SCSI",
.info = macscsi_info,
.queuecommand = macscsi_queue_command,
@@ -320,7 +346,7 @@ static struct scsi_host_template mac_scsi_template = {
.eh_bus_reset_handler = macscsi_bus_reset,
.can_queue = 16,
.this_id = 7,
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = 1,
.cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
.cmd_size = NCR5380_CMD_SIZE,
@@ -338,9 +364,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
if (!pio_mem)
return -ENODEV;
-#ifdef PSEUDO_DMA
pdma_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-#endif
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -358,8 +382,6 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;
- if (setup_use_pdma < 0)
- setup_use_pdma = 0;
instance = scsi_host_alloc(&mac_scsi_template,
sizeof(struct NCR5380_hostdata));
@@ -379,12 +401,9 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
} else
host_flags |= FLAG_NO_PSEUDO_DMA;
-#ifdef SUPPORT_TAGS
- host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
-#endif
host_flags |= setup_toshiba_delay > 0 ? FLAG_TOSHIBA_DELAY : 0;
- error = NCR5380_init(instance, host_flags);
+ error = NCR5380_init(instance, host_flags | FLAG_LATE_DMA_SETUP);
if (error)
goto fail_init;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index fce414a2c..ca86c885d 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.810.09.00-rc1"
-#define MEGASAS_RELDATE "Jan. 28, 2016"
+#define MEGASAS_VERSION "06.811.02.00-rc1"
+#define MEGASAS_RELDATE "April 12, 2016"
/*
* Device IDs
@@ -1344,6 +1344,8 @@ struct megasas_ctrl_info {
#define SCAN_PD_CHANNEL 0x1
#define SCAN_VD_CHANNEL 0x2
+#define MEGASAS_KDUMP_QUEUE_DEPTH 100
+
enum MR_SCSI_CMD_TYPE {
READ_WRITE_LDIO = 0,
NON_READ_WRITE_LDIO = 1,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e6ebc7ae2..f4b069045 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2670,17 +2670,6 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
}
/**
- * megasas_reset_device - Device reset handler entry point
- */
-static int megasas_reset_device(struct scsi_cmnd *scmd)
-{
- /*
- * First wait for all commands to complete
- */
- return megasas_generic_reset(scmd);
-}
-
-/**
* megasas_reset_bus_host - Bus & host reset handler entry point
*/
static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
@@ -2702,6 +2691,50 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
}
/**
+ * megasas_task_abort - Issues task abort request to firmware
+ * (supported only for fusion adapters)
+ * @scmd: SCSI command pointer
+ */
+static int megasas_task_abort(struct scsi_cmnd *scmd)
+{
+ int ret;
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ if (instance->ctrl_context)
+ ret = megasas_task_abort_fusion(scmd);
+ else {
+ sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
+ ret = FAILED;
+ }
+
+ return ret;
+}
+
+/**
+ * megasas_reset_target: Issues target reset request to firmware
+ * (supported only for fusion adapters)
+ * @scmd: SCSI command pointer
+ */
+static int megasas_reset_target(struct scsi_cmnd *scmd)
+{
+ int ret;
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)scmd->device->host->hostdata;
+
+ if (instance->ctrl_context)
+ ret = megasas_reset_target_fusion(scmd);
+ else {
+ sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
+ ret = FAILED;
+ }
+
+ return ret;
+}
+
+/**
* megasas_bios_param - Returns disk geometry for a disk
* @sdev: device handle
* @bdev: block device
@@ -2969,8 +3002,8 @@ static struct scsi_host_template megasas_template = {
.slave_alloc = megasas_slave_alloc,
.slave_destroy = megasas_slave_destroy,
.queuecommand = megasas_queue_command,
- .eh_device_reset_handler = megasas_reset_device,
- .eh_bus_reset_handler = megasas_reset_bus_host,
+ .eh_target_reset_handler = megasas_reset_target,
+ .eh_abort_handler = megasas_task_abort,
.eh_host_reset_handler = megasas_reset_bus_host,
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
@@ -5152,7 +5185,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->instancet->enable_intr(instance);
- dev_err(&instance->pdev->dev, "INIT adapter done\n");
+ dev_info(&instance->pdev->dev, "INIT adapter done\n");
megasas_setup_jbod_map(instance);
@@ -5598,14 +5631,6 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
- /* Fusion only supports host reset */
- if (instance->ctrl_context) {
- host->hostt->eh_device_reset_handler = NULL;
- host->hostt->eh_bus_reset_handler = NULL;
- host->hostt->eh_target_reset_handler = megasas_reset_target_fusion;
- host->hostt->eh_abort_handler = megasas_task_abort_fusion;
- }
-
/*
* Notify the mid-layer about the new controller
*/
@@ -5761,13 +5786,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
break;
}
- instance->system_info_buf = pci_zalloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
-
- if (!instance->system_info_buf)
- dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
-
/* Crash dump feature related initialisation*/
instance->drv_buf_index = 0;
instance->drv_buf_alloc = 0;
@@ -5777,14 +5795,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
spin_lock_init(&instance->crashdump_lock);
instance->crash_dump_buf = NULL;
- if (!reset_devices)
- instance->crash_dump_buf = pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
- if (!instance->crash_dump_buf)
- dev_err(&pdev->dev, "Can't allocate Firmware "
- "crash dump DMA buffer\n");
-
megasas_poll_wait_aen = 0;
instance->flag_ieee = 0;
instance->ev = NULL;
@@ -5803,11 +5813,26 @@ static int megasas_probe_one(struct pci_dev *pdev,
goto fail_alloc_dma_buf;
}
- instance->pd_info = pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+ if (!reset_devices) {
+ instance->system_info_buf = pci_zalloc_consistent(pdev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h);
+ if (!instance->system_info_buf)
+ dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
+
+ instance->pd_info = pci_alloc_consistent(pdev,
+ sizeof(struct MR_PD_INFO), &instance->pd_info_h);
- if (!instance->pd_info)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+ if (!instance->pd_info)
+ dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
+
+ instance->crash_dump_buf = pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
+ if (!instance->crash_dump_buf)
+ dev_err(&pdev->dev, "Can't allocate Firmware "
+ "crash dump DMA buffer\n");
+ }
/*
* Initialize locks and queues
@@ -7174,6 +7199,16 @@ static int __init megasas_init(void)
int rval;
/*
+ * Booted in kdump kernel, minimize memory footprints by
+ * disabling few features
+ */
+ if (reset_devices) {
+ msix_vectors = 1;
+ rdpq_enable = 0;
+ dual_qdepth_disable = 1;
+ }
+
+ /*
* Announce driver version and other information
*/
pr_info("megasas: %s\n", MEGASAS_VERSION);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 98a848bdf..ec837544f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -257,6 +257,9 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
if (!instance->is_rdpq)
instance->max_fw_cmds = min_t(u16, instance->max_fw_cmds, 1024);
+ if (reset_devices)
+ instance->max_fw_cmds = min(instance->max_fw_cmds,
+ (u16)MEGASAS_KDUMP_QUEUE_DEPTH);
/*
* Reduce the max supported cmds by 1. This is to ensure that the
* reply_q_sz (1 more than the max cmd that driver may send)
@@ -851,7 +854,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
ret = 1;
goto fail_fw_init;
}
- dev_err(&instance->pdev->dev, "Init cmd success\n");
+ dev_info(&instance->pdev->dev, "Init cmd success\n");
ret = 0;
@@ -2759,6 +2762,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
" will reset adapter scsi%d.\n",
instance->host->host_no);
+ megasas_complete_cmd_dpc_fusion((unsigned long)instance);
retval = 1;
goto out;
}
@@ -2766,6 +2770,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
if (reason == MFI_IO_TIMEOUT_OCR) {
dev_info(&instance->pdev->dev,
"MFI IO is timed out, initiating OCR\n");
+ megasas_complete_cmd_dpc_fusion((unsigned long)instance);
retval = 1;
goto out;
}
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index dfad5b8c1..a9a659fc2 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.39
+ * mpi2.h Version: 02.00.42
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -100,6 +100,9 @@
* Added MPI2_DIAG_SBR_RELOAD.
* 03-19-15 02.00.38 Bumped MPI2_HEADER_VERSION_UNIT.
* 05-25-15 02.00.39 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-25-15 02.00.40 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 12-15-15 02.00.41 Bumped MPI_HEADER_VERSION_UNIT
+ * 01-01-16 02.00.42 Bumped MPI_HEADER_VERSION_UNIT
* --------------------------------------------------------------------------
*/
@@ -139,7 +142,7 @@
#define MPI2_VERSION_02_06 (0x0206)
/*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x27)
+#define MPI2_HEADER_VERSION_UNIT (0x2A)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 9cf09bf7c..95356a82e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.33
+ * mpi2_cnfg.h Version: 02.00.35
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -183,9 +183,12 @@
* Added MPI2_BIOSPAGE1_OPTIONS_ADVANCED_CONFIG.
* Added AdapterOrderAux fields to BIOS Page 3.
* 03-16-15 02.00.31 Updated for MPI v2.6.
+ * Added Flags field to IO Unit Page 7.
* Added new SAS Phy Event codes
* 05-25-15 02.00.33 Added more defines for the BiosOptions field of
* MPI2_CONFIG_PAGE_BIOS_1.
+ * 08-25-15 02.00.34 Bumped Header Version.
+ * 12-18-15 02.00.35 Added SATADeviceWaitTime to SAS IO Unit Page 4.
* --------------------------------------------------------------------------
*/
@@ -958,13 +961,16 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
U8 Reserved3; /*0x17 */
U32 BoardPowerRequirement; /*0x18 */
U32 PCISlotPowerAllocation; /*0x1C */
- U32 Reserved6; /* 0x20 */
- U32 Reserved7; /* 0x24 */
+/* reserved prior to MPI v2.6 */
+ U8 Flags; /* 0x20 */
+ U8 Reserved6; /* 0x21 */
+ U16 Reserved7; /* 0x22 */
+ U32 Reserved8; /* 0x24 */
} MPI2_CONFIG_PAGE_IO_UNIT_7,
*PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
-#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04)
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x05)
/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0)
@@ -1045,6 +1051,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+/* defines for IO Unit Page 7 Flags field */
+#define MPI2_IOUNITPAGE7_FLAG_CABLE_POWER_EXC (0x01)
/*IO Unit Page 8 */
@@ -2271,7 +2279,7 @@ typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 {
U8
BootDeviceWaitTime; /*0x24 */
U8
- Reserved4; /*0x25 */
+ SATADeviceWaitTime; /*0x25 */
U16
Reserved5; /*0x26 */
U8
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index c38f624b8..bba56b61d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -6,7 +6,7 @@
* Title: MPI SCSI initiator mode messages and structures
* Creation Date: June 23, 2006
*
- * mpi2_init.h Version: 02.00.17
+ * mpi2_init.h Version: 02.00.20
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -51,6 +51,9 @@
* Added MPI26_SCSIIO_IOFLAGS_ESCAPE_PASSTHROUGH.
* Added MPI2_SEP_REQ_SLOTSTATUS_DEV_OFF and
* MPI2_SEP_REPLY_SLOTSTATUS_DEV_OFF.
+ * 08-26-15 02.00.18 Added SCSITASKMGMT_MSGFLAGS for Target Reset.
+ * 12-18-15 02.00.19 Added EEDPObservedValue added to SCSI IO Reply message.
+ * 01-04-16 02.00.20 Modified EEDP reported values in SCSI IO Reply message.
* --------------------------------------------------------------------------
*/
@@ -359,8 +362,14 @@ typedef struct _MPI2_SCSI_IO_REPLY {
U16 TaskTag; /*0x20 */
U16 SCSIStatusQualifier; /* 0x22 */
U32 BidirectionalTransferCount; /*0x24 */
- U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
- U32 Reserved6; /*0x2C */
+ /* MPI 2.5+ only; Reserved in MPI 2.0 */
+ U32 EEDPErrorOffset; /* 0x28 */
+ /* MPI 2.5+ only; Reserved in MPI 2.0 */
+ U16 EEDPObservedAppTag; /* 0x2C */
+ /* MPI 2.5+ only; Reserved in MPI 2.0 */
+ U16 EEDPObservedGuard; /* 0x2E */
+ /* MPI 2.5+ only; Reserved in MPI 2.0 */
+ U32 EEDPObservedRefTag; /* 0x30 */
} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index cf510ed91..8bae305bc 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.26
+ * mpi2_ioc.h Version: 02.00.27
*
* NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
* prefix are for use only on MPI v2.5 products, and must not be used
@@ -134,9 +134,13 @@
* Added Encrypted Hash Extended Image.
* 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS.
* 11-18-14 02.00.25 Updated copyright information.
- * 03-16-15 02.00.26 Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
+ * 03-16-15 02.00.26 Updated for MPI v2.6.
+ * Added MPI2_EVENT_ACTIVE_CABLE_EXCEPTION and
+ * MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT.
+ * Added MPI26_FW_HEADER_PID_FAMILY_3324_SAS and
* MPI26_FW_HEADER_PID_FAMILY_3516_SAS.
* Added MPI26_CTRL_OP_SHUTDOWN.
+ * 08-25-15 02.00.27 Added IC ARCH Class based signature defines
* --------------------------------------------------------------------------
*/
@@ -168,7 +172,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
U16 MsgVersion; /*0x0C */
U16 HeaderVersion; /*0x0E */
U32 Reserved5; /*0x10 */
- U16 Reserved6; /*0x14 */
+ U16 ConfigurationFlags; /* 0x14 */
U8 HostPageSize; /*0x16 */
U8 HostMSIxVectors; /*0x17 */
U16 Reserved8; /*0x18 */
@@ -516,6 +520,7 @@ typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
#define MPI2_EVENT_HOST_MESSAGE (0x0028)
#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+#define MPI2_EVENT_ACTIVE_CABLE_EXCEPTION (0x0034)
#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
@@ -580,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
-/*Power Performance Change Event */
+/*Power Performance Change Event data */
typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE {
U8 CurrentPowerMode; /*0x00 */
@@ -605,6 +610,21 @@ typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE {
#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05)
#define MPI2_EVENT_PM_MODE_STANDBY (0x06)
+/* Active Cable Exception Event data */
+
+typedef struct _MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT {
+ U32 ActiveCablePowerRequirement; /* 0x00 */
+ U8 ReasonCode; /* 0x04 */
+ U8 ReceptacleID; /* 0x05 */
+ U16 Reserved1; /* 0x06 */
+} MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ *PTR_MPI26_EVENT_DATA_ACTIVE_CABLE_EXCEPT,
+ Mpi26EventDataActiveCableExcept_t,
+ *pMpi26EventDataActiveCableExcept_t;
+
+/* defines for ReasonCode field */
+#define MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER (0x00)
+
/*Hard Reset Received Event data */
typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED {
@@ -1366,7 +1386,16 @@ typedef struct _MPI2_FW_IMAGE_HEADER {
/*Signature0 field */
#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
-#define MPI26_FW_HEADER_SIGNATURE0 (0x5AEAA55A)
+/* Last byte is defined by architecture */
+#define MPI26_FW_HEADER_SIGNATURE0_BASE (0x5AEAA500)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_0 (0x5A)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_1 (0x00)
+#define MPI26_FW_HEADER_SIGNATURE0_ARC_2 (0x01)
+/* legacy (0x5AEAA55A) */
+#define MPI26_FW_HEADER_SIGNATURE0 \
+ (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_0)
+#define MPI26_FW_HEADER_SIGNATURE0_3516 \
+ (MPI26_FW_HEADER_SIGNATURE0_BASE+MPI26_FW_HEADER_SIGNATURE0_ARC_1)
/*Signature1 field */
#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
@@ -1778,6 +1807,7 @@ typedef struct _MPI26_IOUNIT_CONTROL_REQUEST {
#define MPI26_CTRL_OP_SAS_PHY_LINK_RESET (0x06)
#define MPI26_CTRL_OP_SAS_PHY_HARD_RESET (0x07)
#define MPI26_CTRL_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI26_CTRL_OP_LINK_CLEAR_ERROR_LOG (0x09)
#define MPI26_CTRL_OP_SAS_SEND_PRIMITIVE (0x0A)
#define MPI26_CTRL_OP_FORCE_FULL_DISCOVERY (0x0B)
#define MPI26_CTRL_OP_REMOVE_DEVICE (0x0D)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8c44b9c42..751f13ede 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -57,6 +57,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/time.h>
+#include <linux/ktime.h>
#include <linux/kthread.h>
#include <linux/aer.h>
@@ -654,6 +655,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
case MPI2_EVENT_TEMP_THRESHOLD:
desc = "Temperature Threshold";
break;
+ case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
+ desc = "Active cable exception";
+ break;
}
if (!desc)
@@ -1100,18 +1104,16 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
+ * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
* @ioc: per adapter object
- * Context: ISR conext
+ * Context: non ISR conext
*
- * Called when a Task Management request has completed. We want
- * to flush the other reply queues so all the outstanding IO has been
- * completed back to OS before we process the TM completetion.
+ * Called when a Task Management request has completed.
*
* Return nothing.
*/
void
-mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
{
struct adapter_reply_queue *reply_q;
@@ -1122,12 +1124,13 @@ mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
return;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery)
return;
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
- _base_interrupt(reply_q->vector, (void *)reply_q);
+ synchronize_irq(reply_q->vector);
}
}
@@ -3207,10 +3210,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
sg_tablesize = min_t(unsigned short, sg_tablesize,
- SCSI_MAX_SG_CHAIN_SEGMENTS);
+ SG_MAX_SEGMENTS);
pr_warn(MPT3SAS_FMT
"sg_tablesize(%u) is bigger than kernel"
- " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name,
+ " defined SG_CHUNK_SIZE(%u)\n", ioc->name,
sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
}
ioc->shost->sg_tablesize = sg_tablesize;
@@ -4387,7 +4390,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
Mpi2IOCInitRequest_t mpi_request;
Mpi2IOCInitReply_t mpi_reply;
int i, r = 0;
- struct timeval current_time;
+ ktime_t current_time;
u16 ioc_status;
u32 reply_post_free_array_sz = 0;
Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
@@ -4449,9 +4452,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
/* This time stamp specifies number of milliseconds
* since epoch ~ midnight January 1, 1970.
*/
- do_gettimeofday(&current_time);
- mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
- (current_time.tv_usec / 1000));
+ current_time = ktime_get_real();
+ mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
if (ioc->logging_level & MPT_DEBUG_INIT) {
__le32 *mfp;
@@ -5424,6 +5426,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
+ if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
+ _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
r = _base_make_ioc_operational(ioc, CAN_SLEEP);
if (r)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 32580b514..892c9be00 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -73,8 +73,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "12.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 12
+#define MPT3SAS_DRIVER_VERSION "13.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 13
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -90,7 +90,7 @@
/*
* Set MPT3SAS_SG_DEPTH value based on user input.
*/
-#define MPT_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS
+#define MPT_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE
#define MPT_MIN_PHYS_SEGMENTS 16
#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
@@ -112,6 +112,8 @@
#define MPT3SAS_SAS_QUEUE_DEPTH 254
#define MPT3SAS_RAID_QUEUE_DEPTH 128
+#define MPT3SAS_RAID_MAX_SECTORS 8192
+
#define MPT_NAME_LENGTH 32 /* generic length of strings */
#define MPT_STRING_LENGTH 64
@@ -1234,7 +1236,8 @@ void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
-void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc);
+
+void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
/* hi-priority queue */
u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index e0e4920d0..6bff13e7a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -174,13 +174,13 @@ struct sense_info {
* struct fw_event_work - firmware event struct
* @list: link list framework
* @work: work object (ioc->fault_reset_work_q)
- * @cancel_pending_work: flag set during reset handling
* @ioc: per adapter object
* @device_handle: device handle
* @VF_ID: virtual function id
* @VP_ID: virtual port id
* @ignore: flag meaning this event has been marked to ignore
- * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
+ * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
+ * @refcount: kref for this event
* @event_data: reply event data payload follows
*
* This object stored on ioc->fw_event_list.
@@ -188,8 +188,6 @@ struct sense_info {
struct fw_event_work {
struct list_head list;
struct work_struct work;
- u8 cancel_pending_work;
- struct delayed_work delayed_work;
struct MPT3SAS_ADAPTER *ioc;
u16 device_handle;
@@ -1911,6 +1909,14 @@ scsih_slave_configure(struct scsi_device *sdev)
(unsigned long long)raid_device->wwid,
raid_device->num_pds, ds);
+ if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ MPT3SAS_RAID_MAX_SECTORS);
+ sdev_printk(KERN_INFO, sdev,
+ "Set queue's max_sector to: %u\n",
+ MPT3SAS_RAID_MAX_SECTORS);
+ }
+
scsih_change_queue_depth(sdev, qdepth);
/* raid transport support */
@@ -2118,7 +2124,6 @@ _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
return 1;
if (ioc->tm_cmds.smid != smid)
return 1;
- mpt3sas_base_flush_reply_queues(ioc);
ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (mpi_reply) {
@@ -2303,6 +2308,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
}
}
+ /* sync IRQs in case those were busy during flush. */
+ mpt3sas_base_sync_reply_irqs(ioc);
+
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
mpi_reply = ioc->tm_cmds.reply;
@@ -2804,12 +2812,12 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
/*
* Wait on the fw_event to complete. If this returns 1, then
* the event was never executed, and we need a put for the
- * reference the delayed_work had on the fw_event.
+ * reference the work had on the fw_event.
*
* If it did execute, we wait for it to finish, and the put will
* happen from _firmware_event_work()
*/
- if (cancel_delayed_work_sync(&fw_event->delayed_work))
+ if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
fw_event_work_put(fw_event);
@@ -3961,7 +3969,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
- cpu_to_be32(scsi_get_lba(scmd));
+ cpu_to_be32(scsi_prot_ref_tag(scmd));
break;
case SCSI_PROT_DIF_TYPE3:
@@ -7850,6 +7858,7 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
Mpi2EventNotificationReply_t *mpi_reply;
u16 event;
u16 sz;
+ Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
/* events turned off due to host reset or driver unloading */
if (ioc->remove_host || ioc->pci_error_recovery)
@@ -7962,6 +7971,19 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi2EventDataTemperature_t *)
mpi_reply->EventData);
break;
+ case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
+ ActiveCableEventData =
+ (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
+ if (ActiveCableEventData->ReasonCode ==
+ MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
+ pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
+ ioc->name, ActiveCableEventData->ReceptacleID);
+ pr_info("cannot be powered and devices connected to this active cable");
+ pr_info("will not be seen. This active cable");
+ pr_info("requires %d mW of power",
+ ActiveCableEventData->ActiveCablePowerRequirement);
+ }
+ break;
default: /* ignore the rest */
return 1;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index c7c250519..8280046fd 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -704,24 +704,7 @@ static struct pci_device_id mvs_pci_table[] = {
.class_mask = 0,
.driver_data = chip_9445,
},
- {
- .vendor = PCI_VENDOR_ID_MARVELL_EXT,
- .device = 0x9485,
- .subvendor = PCI_ANY_ID,
- .subdevice = 0x9480,
- .class = 0,
- .class_mask = 0,
- .driver_data = chip_9485,
- },
- {
- .vendor = PCI_VENDOR_ID_MARVELL_EXT,
- .device = 0x9485,
- .subvendor = PCI_ANY_ID,
- .subdevice = 0x9485,
- .class = 0,
- .class_mask = 0,
- .driver_data = chip_9485,
- },
+ { PCI_VDEVICE(MARVELL_EXT, 0x9485), chip_9485 }, /* Marvell 9480/9485 (any vendor/model) */
{ PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
{ PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
{ PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 83cd3ea2d..5b9fcff6c 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -429,7 +429,10 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
if (qc) {
if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
+ qc->tf.command == ATA_CMD_FPDMA_READ ||
+ qc->tf.command == ATA_CMD_FPDMA_RECV ||
+ qc->tf.command == ATA_CMD_FPDMA_SEND ||
+ qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
*tag = qc->tag;
return 1;
}
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 512037e27..2f689ae7a 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -1,5 +1,3 @@
-#define PSEUDO_DMA
-
/*
* This driver adapted from Drew Eckhardt's Trantor T128 driver
*
@@ -77,7 +75,6 @@
#include <scsi/scsi_host.h>
#include "pas16.h"
-#define AUTOPROBE_IRQ
#include "NCR5380.h"
@@ -377,7 +374,7 @@ static int __init pas16_detect(struct scsi_host_template *tpnt)
instance->io_port = io_port;
- if (NCR5380_init(instance, 0))
+ if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
goto out_unregister;
NCR5380_maybe_reset_bus(instance);
@@ -460,7 +457,7 @@ static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev,
}
/*
- * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * Function : int pas16_pread (struct Scsi_Host *instance,
* unsigned char *dst, int len)
*
* Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
@@ -472,14 +469,14 @@ static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev,
* timeout.
*/
-static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
- int len) {
+static inline int pas16_pread(struct Scsi_Host *instance,
+ unsigned char *dst, int len)
+{
register unsigned char *d = dst;
register unsigned short reg = (unsigned short) (instance->io_port +
P_DATA_REG_OFFSET);
register int i = len;
int ii = 0;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
++ii;
@@ -492,13 +489,11 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
instance->host_no);
return -1;
}
- if (ii > hostdata->spin_max_r)
- hostdata->spin_max_r = ii;
return 0;
}
/*
- * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * Function : int pas16_pwrite (struct Scsi_Host *instance,
* unsigned char *src, int len)
*
* Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
@@ -510,13 +505,13 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
* timeout.
*/
-static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src,
- int len) {
+static inline int pas16_pwrite(struct Scsi_Host *instance,
+ unsigned char *src, int len)
+{
register unsigned char *s = src;
register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
register int i = len;
int ii = 0;
- struct NCR5380_hostdata *hostdata = shost_priv(instance);
while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
++ii;
@@ -529,8 +524,6 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
instance->host_no);
return -1;
}
- if (ii > hostdata->spin_max_w)
- hostdata->spin_max_w = ii;
return 0;
}
@@ -550,8 +543,6 @@ static struct scsi_host_template driver_template = {
.detect = pas16_detect,
.release = pas16_release,
.proc_name = "pas16",
- .show_info = pas16_show_info,
- .write_info = pas16_write_info,
.info = pas16_info,
.queuecommand = pas16_queue_command,
.eh_abort_handler = pas16_abort,
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
index d37527717..9fe7f3366 100644
--- a/drivers/scsi/pas16.h
+++ b/drivers/scsi/pas16.h
@@ -103,14 +103,15 @@
#define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_recv_setup pas16_pread
+#define NCR5380_dma_send_setup pas16_pwrite
+#define NCR5380_dma_residual(instance) (0)
#define NCR5380_intr pas16_intr
#define NCR5380_queue_command pas16_queue_command
#define NCR5380_abort pas16_abort
#define NCR5380_bus_reset pas16_bus_reset
#define NCR5380_info pas16_info
-#define NCR5380_show_info pas16_show_info
-#define NCR5380_write_info pas16_write_info
/* 15 14 12 10 7 5 3
1101 0100 1010 1000 */
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 062ab34b8..6bd7bf4f4 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -418,8 +418,6 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
pm8001_ha->io_mem[logicalBar].membase =
pci_resource_start(pdev, bar);
- pm8001_ha->io_mem[logicalBar].membase &=
- (u32)PCI_BASE_ADDRESS_MEM_MASK;
pm8001_ha->io_mem[logicalBar].memsize =
pci_resource_len(pdev, bar);
pm8001_ha->io_mem[logicalBar].memvirtaddr =
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 949198c01..dc33dfa8f 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -280,7 +280,10 @@ u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
struct ata_queued_cmd *qc = task->uldd_task;
if (qc) {
if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
- qc->tf.command == ATA_CMD_FPDMA_READ) {
+ qc->tf.command == ATA_CMD_FPDMA_READ ||
+ qc->tf.command == ATA_CMD_FPDMA_RECV ||
+ qc->tf.command == ATA_CMD_FPDMA_SEND ||
+ qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
*tag = qc->tag;
return 1;
}
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 8ca50db17..3d5b0a3c2 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -17,3 +17,12 @@ config TCM_QLA2XXX
default n
---help---
Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
+
+if TCM_QLA2XXX
+config TCM_QLA2XXX_DEBUG
+ bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs"
+ default n
+ ---help---
+ Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs
+ This will include code to enable the SCSI command jammer
+endif
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5649c200d..a92a62dea 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2548,7 +2548,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!vha->flags.online)
return;
- if (rsp->msix->cpuid != smp_processor_id()) {
+ if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
/* if kernel does not notify qla of IRQ's CPU change,
* then set it here.
*/
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index b5029e543..15dff7099 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -6,6 +6,7 @@
*/
#include "qla_def.h"
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
@@ -1812,7 +1813,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
struct host_system_info *phost_info;
struct register_host_info *preg_hsi;
struct new_utsname *p_sysid = NULL;
- struct timeval tv;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
@@ -1886,8 +1886,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
p_sysid->domainname, DOMNAME_LENGTH);
strncpy(phost_info->hostdriver,
QLA2XXX_VERSION, VERSION_LENGTH);
- do_gettimeofday(&tv);
- preg_hsi->utc = (uint64_t)tv.tv_sec;
+ preg_hsi->utc = (uint64_t)ktime_get_real_seconds();
ql_dbg(ql_dbg_init, vha, 0x0149,
"ISP%04X: Host registration with firmware\n",
ha->pdev->device);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index b6b4cfdd7..54380b434 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1229,7 +1229,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
if (buf == NULL) {
ql_log(ql_log_fatal, vha, 0x010c,
"Unable to allocate memory.\n");
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < n; i++) {
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 5e9392316..9f6012b78 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -3222,7 +3222,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
if (!ha->fcp_prio_cfg) {
ql_log(ql_log_warn, vha, 0x00d5,
- "Unable to allocate memory for fcp priorty data (%x).\n",
+ "Unable to allocate memory for fcp priority data (%x).\n",
FCP_PRIO_CFG_SIZE);
return QLA_FUNCTION_FAILED;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8a44d1541..ca39deb4f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -637,8 +637,10 @@ static void qlt_free_session_done(struct work_struct *work)
}
/* ha->tgt.sess_lock supposed to be held on entry */
-void qlt_unreg_sess(struct qla_tgt_sess *sess)
+static void qlt_release_session(struct kref *kref)
{
+ struct qla_tgt_sess *sess =
+ container_of(kref, struct qla_tgt_sess, sess_kref);
struct scsi_qla_host *vha = sess->vha;
if (sess->se_sess)
@@ -651,8 +653,16 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
}
-EXPORT_SYMBOL(qlt_unreg_sess);
+void qlt_put_sess(struct qla_tgt_sess *sess)
+{
+ if (!sess)
+ return;
+
+ assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+ kref_put(&sess->sess_kref, qlt_release_session);
+}
+EXPORT_SYMBOL(qlt_put_sess);
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
@@ -857,12 +867,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n",
sess);
- if (sess->se_sess) {
+ if (sess->se_sess)
ha->tgt.tgt_ops->shutdown_sess(sess);
- ha->tgt.tgt_ops->put_sess(sess);
- } else {
- qlt_unreg_sess(sess);
- }
+ qlt_put_sess(sess);
} else {
schedule_delayed_work(&tgt->sess_del_work,
sess->expires - elapsed);
@@ -917,7 +924,7 @@ static struct qla_tgt_sess *qlt_create_sess(
}
}
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
(fcport->flags & FCF_CONF_COMP_SUPPORTED));
@@ -947,6 +954,7 @@ static struct qla_tgt_sess *qlt_create_sess(
sess->s_id = fcport->d_id;
sess->loop_id = fcport->loop_id;
sess->local = local;
+ kref_init(&sess->sess_kref);
INIT_LIST_HEAD(&sess->del_list_entry);
/* Under normal circumstances we want to logout from firmware when
@@ -991,7 +999,7 @@ static struct qla_tgt_sess *qlt_create_sess(
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
* access across ->tgt.sess_lock reaquire.
*/
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
}
return sess;
@@ -1035,7 +1043,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
} else {
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
if (sess->deleted) {
qlt_undelete_sess(sess);
@@ -1060,7 +1068,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->port_name, sess->loop_id);
sess->local = 0;
}
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
@@ -3817,7 +3825,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
* Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
*/
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
@@ -3836,7 +3844,7 @@ out_term:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
@@ -3936,13 +3944,13 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
if (!cmd) {
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
return;
}
/*
- * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
+ * __qlt_do_work() will call qlt_put_sess() to release
* the extra reference taken above by qlt_make_local_sess()
*/
__qlt_do_work(cmd);
@@ -4003,13 +4011,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3062,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
return -ENOMEM;
}
@@ -5911,7 +5919,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
goto out_term2;
}
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
}
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5924,7 +5932,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
goto out_term;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
@@ -5935,8 +5943,7 @@ out_term:
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
}
@@ -5976,7 +5983,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
goto out_term;
}
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
}
iocb = a;
@@ -5988,14 +5995,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (rc != 0)
goto out_term;
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d857feeb6..f26c5f60e 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -738,7 +738,6 @@ struct qla_tgt_func_tmpl {
struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
const uint8_t *);
void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
- void (*put_sess)(struct qla_tgt_sess *);
void (*shutdown_sess)(struct qla_tgt_sess *);
};
@@ -930,6 +929,7 @@ struct qla_tgt_sess {
int generation;
struct se_session *se_sess;
+ struct kref sess_kref;
struct scsi_qla_host *vha;
struct qla_tgt *tgt;
@@ -1101,7 +1101,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
extern int qlt_lport_register(void *, u64, u64, u64,
int (*callback)(struct scsi_qla_host *, void *, u64, u64));
extern void qlt_lport_deregister(struct scsi_qla_host *);
-extern void qlt_unreg_sess(struct qla_tgt_sess *);
+void qlt_put_sess(struct qla_tgt_sess *sess);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index c1461d225..6643f6fc7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -339,22 +339,6 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
qlt_free_cmd(cmd);
}
-static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
-{
- struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
- struct scsi_qla_host *vha;
- unsigned long flags;
-
- BUG_ON(!sess);
- vha = sess->vha;
-
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- target_sess_cmd_list_set_waiting(se_sess);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
-
- return 1;
-}
-
static void tcm_qla2xxx_close_session(struct se_session *se_sess)
{
struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
@@ -365,7 +349,8 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
vha = sess->vha;
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- qlt_unreg_sess(sess);
+ target_sess_cmd_list_set_waiting(se_sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -457,6 +442,10 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
struct se_cmd *se_cmd = &cmd->se_cmd;
struct se_session *se_sess;
struct qla_tgt_sess *sess;
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+ struct se_portal_group *se_tpg;
+ struct tcm_qla2xxx_tpg *tpg;
+#endif
int flags = TARGET_SCF_ACK_KREF;
if (bidi)
@@ -477,6 +466,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
return -EINVAL;
}
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+ se_tpg = se_sess->se_tpg;
+ tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg);
+ if (unlikely(tpg->tpg_attrib.jam_host)) {
+ /* return, and dont run target_submit_cmd,discarding command */
+ return 0;
+ }
+#endif
+
cmd->vha->tgt_counters.qla_core_sbt_cmd++;
return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr,
@@ -758,23 +756,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
}
-static void tcm_qla2xxx_release_session(struct kref *kref)
-{
- struct se_session *se_sess = container_of(kref,
- struct se_session, sess_kref);
-
- qlt_unreg_sess(se_sess->fabric_sess_ptr);
-}
-
-static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
-{
- if (!sess)
- return;
-
- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
- kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
-}
-
static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
{
assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
@@ -844,6 +825,9 @@ DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+DEF_QLA_TPG_ATTRIB(jam_host);
+#endif
static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
&tcm_qla2xxx_tpg_attrib_attr_generate_node_acls,
@@ -851,6 +835,9 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
&tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect,
&tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect,
&tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only,
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+ &tcm_qla2xxx_tpg_attrib_attr_jam_host,
+#endif
NULL,
};
@@ -1023,6 +1010,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
tpg->tpg_attrib.demo_mode_write_protect = 1;
tpg->tpg_attrib.cache_dynamic_acls = 1;
tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.jam_host = 0;
ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
if (ret < 0) {
@@ -1579,7 +1567,6 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
.find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
- .put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
};
@@ -1847,7 +1834,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
- .shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
@@ -1890,7 +1876,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
- .shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 3bbf4cb6f..37e026a48 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -34,6 +34,7 @@ struct tcm_qla2xxx_tpg_attrib {
int prod_mode_write_protect;
int demo_mode_login_only;
int fabric_prot_type;
+ int jam_host;
};
struct tcm_qla2xxx_tpg {
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1deb6adc4..75455d4da 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -621,6 +621,9 @@ int scsi_change_queue_depth(struct scsi_device *sdev, int depth)
wmb();
}
+ if (sdev->request_queue)
+ blk_set_queue_depth(sdev->request_queue, depth);
+
return sdev->queue_depth;
}
EXPORT_SYMBOL(scsi_change_queue_depth);
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index ce79de822..b1383a714 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -293,3 +293,56 @@ int scsi_set_sense_information(u8 *buf, int buf_len, u64 info)
return 0;
}
EXPORT_SYMBOL(scsi_set_sense_information);
+
+/**
+ * scsi_set_sense_field_pointer - set the field pointer sense key
+ * specific information in a formatted sense data buffer
+ * @buf: Where to build sense data
+ * @buf_len: buffer length
+ * @fp: field pointer to be set
+ * @bp: bit pointer to be set
+ * @cd: command/data bit
+ *
+ * Return value:
+ * 0 on success or EINVAL for invalid sense buffer length
+ */
+int scsi_set_sense_field_pointer(u8 *buf, int buf_len, u16 fp, u8 bp, bool cd)
+{
+ u8 *ucp, len;
+
+ if ((buf[0] & 0x7f) == 0x72) {
+ len = buf[7];
+ ucp = (char *)scsi_sense_desc_find(buf, len + 8, 2);
+ if (!ucp) {
+ buf[7] = len + 8;
+ ucp = buf + 8 + len;
+ }
+
+ if (buf_len < len + 8)
+ /* Not enough room for info */
+ return -EINVAL;
+
+ ucp[0] = 2;
+ ucp[1] = 6;
+ ucp[4] = 0x80; /* Valid bit */
+ if (cd)
+ ucp[4] |= 0x40;
+ if (bp < 0x8)
+ ucp[4] |= 0x8 | bp;
+ put_unaligned_be16(fp, &ucp[5]);
+ } else if ((buf[0] & 0x7f) == 0x70) {
+ len = buf[7];
+ if (len < 18)
+ buf[7] = 18;
+
+ buf[15] = 0x80;
+ if (cd)
+ buf[15] |= 0x40;
+ if (bp < 0x8)
+ buf[15] |= 0x8 | bp;
+ put_unaligned_be16(fp, &buf[16]);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(scsi_set_sense_field_pointer);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index f3d69a98c..0f9ba41e2 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6,23 +6,15 @@
* anything out of the ordinary is seen.
* ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
- * This version is more generic, simulating a variable number of disk
- * (or disk like devices) sharing a common amount of RAM. To be more
- * realistic, the simulated devices have the transport attributes of
- * SAS disks.
+ * Copyright (C) 2001 - 2016 Douglas Gilbert
*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
*
* For documentation see http://sg.danny.cz/sg/sdebug26.html
*
- * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
- * dpg: work for devfs large number of disks [20010809]
- * forked for lk 2.5 series [20011216, 20020101]
- * use vmalloc() more inquiry+mode_sense [20020302]
- * add timers for delayed responses [20020721]
- * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
- * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
- * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
- * module options to "modprobe scsi_debug num_tgts=2" [20021221]
*/
@@ -32,7 +24,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/timer.h>
+#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -49,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <linux/hrtimer.h>
+#include <linux/uuid.h>
#include <net/checksum.h>
@@ -66,8 +59,9 @@
#include "sd.h"
#include "scsi_logging.h"
-#define SCSI_DEBUG_VERSION "1.85"
-static const char *scsi_debug_version_date = "20141022";
+/* make sure inq_product_rev string corresponds to this version */
+#define SDEBUG_VERSION "1.86"
+static const char *sdebug_version_date = "20160430";
#define MY_NAME "scsi_debug"
@@ -102,7 +96,6 @@ static const char *scsi_debug_version_date = "20141022";
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
-
/* Default values for driver parameters */
#define DEF_NUM_HOST 1
#define DEF_NUM_TGTS 1
@@ -111,7 +104,7 @@ static const char *scsi_debug_version_date = "20141022";
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
#define DEF_ATO 1
-#define DEF_DELAY 1 /* if > 0 unit is a jiffy */
+#define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
#define DEF_DEV_SIZE_MB 8
#define DEF_DIF 0
#define DEF_DIX 0
@@ -131,9 +124,9 @@ static const char *scsi_debug_version_date = "20141022";
#define DEF_OPTS 0
#define DEF_OPT_BLKS 1024
#define DEF_PHYSBLK_EXP 0
-#define DEF_PTYPE 0
+#define DEF_PTYPE TYPE_DISK
#define DEF_REMOVABLE false
-#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
+#define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
#define DEF_SECTOR_SIZE 512
#define DEF_UNMAP_ALIGNMENT 0
#define DEF_UNMAP_GRANULARITY 1
@@ -143,43 +136,54 @@ static const char *scsi_debug_version_date = "20141022";
#define DEF_VPD_USE_HOSTNO 1
#define DEF_WRITESAME_LENGTH 0xFFFF
#define DEF_STRICT 0
-#define DELAY_OVERRIDDEN -9999
-
-/* bit mask values for scsi_debug_opts */
-#define SCSI_DEBUG_OPT_NOISE 1
-#define SCSI_DEBUG_OPT_MEDIUM_ERR 2
-#define SCSI_DEBUG_OPT_TIMEOUT 4
-#define SCSI_DEBUG_OPT_RECOVERED_ERR 8
-#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
-#define SCSI_DEBUG_OPT_DIF_ERR 32
-#define SCSI_DEBUG_OPT_DIX_ERR 64
-#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
-#define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
-#define SCSI_DEBUG_OPT_Q_NOISE 0x200
-#define SCSI_DEBUG_OPT_ALL_TSF 0x400
-#define SCSI_DEBUG_OPT_RARE_TSF 0x800
-#define SCSI_DEBUG_OPT_N_WCE 0x1000
-#define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
-#define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
-#define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
+#define DEF_STATISTICS false
+#define DEF_SUBMIT_QUEUES 1
+#define DEF_UUID_CTL 0
+#define JDELAY_OVERRIDDEN -9999
+
+#define SDEBUG_LUN_0_VAL 0
+
+/* bit mask values for sdebug_opts */
+#define SDEBUG_OPT_NOISE 1
+#define SDEBUG_OPT_MEDIUM_ERR 2
+#define SDEBUG_OPT_TIMEOUT 4
+#define SDEBUG_OPT_RECOVERED_ERR 8
+#define SDEBUG_OPT_TRANSPORT_ERR 16
+#define SDEBUG_OPT_DIF_ERR 32
+#define SDEBUG_OPT_DIX_ERR 64
+#define SDEBUG_OPT_MAC_TIMEOUT 128
+#define SDEBUG_OPT_SHORT_TRANSFER 0x100
+#define SDEBUG_OPT_Q_NOISE 0x200
+#define SDEBUG_OPT_ALL_TSF 0x400
+#define SDEBUG_OPT_RARE_TSF 0x800
+#define SDEBUG_OPT_N_WCE 0x1000
+#define SDEBUG_OPT_RESET_NOISE 0x2000
+#define SDEBUG_OPT_NO_CDB_NOISE 0x4000
+#define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
+ SDEBUG_OPT_RESET_NOISE)
+#define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
+ SDEBUG_OPT_TRANSPORT_ERR | \
+ SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
+ SDEBUG_OPT_SHORT_TRANSFER)
/* When "every_nth" > 0 then modulo "every_nth" commands:
- * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
+ * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
- * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
+ * commands if SDEBUG_OPT_RECOVERED_ERR is set.
* - a TRANSPORT_ERROR is simulated on successful read and write
- * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
+ * commands if SDEBUG_OPT_TRANSPORT_ERR is set.
*
* When "every_nth" < 0 then after "- every_nth" commands:
- * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
+ * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
- * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
+ * commands if SDEBUG_OPT_RECOVERED_ERR is set.
* - a TRANSPORT_ERROR is simulated on successful read and write
- * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
- * This will continue until some other action occurs (e.g. the user
- * writing a new value (other than -1 or 1) to every_nth via sysfs).
+ * commands if _DEBUG_OPT_TRANSPORT_ERR is set.
+ * This will continue on every subsequent command until some other action
+ * occurs (e.g. the user * writing a new value (other than -1 or 1) to
+ * every_nth via sysfs).
*/
-/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
+/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
* priority order. In the subset implemented here lower numbers have higher
* priority. The UA numbers should be a sequence starting from 0 with
* SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
@@ -192,11 +196,7 @@ static const char *scsi_debug_version_date = "20141022";
#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
#define SDEBUG_NUM_UAS 7
-/* for check_readiness() */
-#define UAS_ONLY 1 /* check for UAs only */
-#define UAS_TUR 0 /* if no UAs then check if media access possible */
-
-/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
+/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
@@ -205,21 +205,108 @@ static const char *scsi_debug_version_date = "20141022";
* or "peripheral device" addressing (value 0) */
#define SAM2_LUN_ADDRESS_METHOD 0
-/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
- * (for response) at one time. Can be reduced by max_queue option. Command
- * responses are not queued when delay=0 and ndelay=0. The per-device
- * DEF_CMD_PER_LUN can be changed via sysfs:
- * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
- * SCSI_DEBUG_CANQUEUE. */
-#define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
-#define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
+/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
+ * (for response) per submit queue at one time. Can be reduced by max_queue
+ * option. Command responses are not queued when jdelay=0 and ndelay=0. The
+ * per-device DEF_CMD_PER_LUN can be changed via sysfs:
+ * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
+ * but cannot exceed SDEBUG_CANQUEUE .
+ */
+#define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
+#define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
#define DEF_CMD_PER_LUN 255
-#if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
-#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
-#endif
+#define F_D_IN 1
+#define F_D_OUT 2
+#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
+#define F_D_UNKN 8
+#define F_RL_WLUN_OK 0x10
+#define F_SKIP_UA 0x20
+#define F_DELAY_OVERR 0x40
+#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
+#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
+#define F_INV_OP 0x200
+#define F_FAKE_RW 0x400
+#define F_M_ACCESS 0x800 /* media access */
+
+#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
+#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
+#define FF_SA (F_SA_HIGH | F_SA_LOW)
+
+#define SDEBUG_MAX_PARTS 4
+
+#define SDEBUG_MAX_CMD_LEN 32
+
+
+struct sdebug_dev_info {
+ struct list_head dev_list;
+ unsigned int channel;
+ unsigned int target;
+ u64 lun;
+ uuid_be lu_name;
+ struct sdebug_host_info *sdbg_host;
+ unsigned long uas_bm[1];
+ atomic_t num_in_q;
+ atomic_t stopped;
+ bool used;
+};
+
+struct sdebug_host_info {
+ struct list_head host_list;
+ struct Scsi_Host *shost;
+ struct device dev;
+ struct list_head dev_info_list;
+};
+
+#define to_sdebug_host(d) \
+ container_of(d, struct sdebug_host_info, dev)
+
+struct sdebug_defer {
+ struct hrtimer hrt;
+ struct execute_work ew;
+ int sqa_idx; /* index of sdebug_queue array */
+ int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
+ int issuing_cpu;
+};
+
+struct sdebug_queued_cmd {
+ /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
+ * instance indicates this slot is in use.
+ */
+ struct sdebug_defer *sd_dp;
+ struct scsi_cmnd *a_cmnd;
+ unsigned int inj_recovered:1;
+ unsigned int inj_transport:1;
+ unsigned int inj_dif:1;
+ unsigned int inj_dix:1;
+ unsigned int inj_short:1;
+};
+
+struct sdebug_queue {
+ struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
+ unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
+ spinlock_t qc_lock;
+ atomic_t blocked; /* to temporarily stop more being queued */
+};
+
+static atomic_t sdebug_cmnd_count; /* number of incoming commands */
+static atomic_t sdebug_completions; /* count of deferred completions */
+static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
+static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
+
+struct opcode_info_t {
+ u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
+ /* for terminating element */
+ u8 opcode; /* if num_attached > 0, preferred */
+ u16 sa; /* service action */
+ u32 flags; /* OR-ed set of SDEB_F_* */
+ int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
+ const struct opcode_info_t *arrp; /* num_attached elements or NULL */
+ u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
+ /* ignore cdb bytes after position 15 */
+};
-/* SCSI opcodes (first byte of cdb) mapped onto these indexes */
+/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
enum sdeb_opcode_index {
SDEB_I_INVALID_OPCODE = 0,
SDEB_I_INQUIRY = 1,
@@ -254,6 +341,7 @@ enum sdeb_opcode_index {
SDEB_I_LAST_ELEMENT = 30, /* keep this last */
};
+
static const unsigned char opcode_ind_arr[256] = {
/* 0x0; 0x0->0x1f: 6 byte cdbs */
SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
@@ -274,7 +362,7 @@ static const unsigned char opcode_ind_arr[256] = {
0, 0, 0, SDEB_I_XDWRITEREAD, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
SDEB_I_RELEASE,
0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
-/* 0x60; 0x60->0x7d are reserved */
+/* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, SDEB_I_VARIABLE_LEN,
@@ -297,24 +385,6 @@ static const unsigned char opcode_ind_arr[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
-#define F_D_IN 1
-#define F_D_OUT 2
-#define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
-#define F_D_UNKN 8
-#define F_RL_WLUN_OK 0x10
-#define F_SKIP_UA 0x20
-#define F_DELAY_OVERR 0x40
-#define F_SA_LOW 0x80 /* cdb byte 1, bits 4 to 0 */
-#define F_SA_HIGH 0x100 /* as used by variable length cdbs */
-#define F_INV_OP 0x200
-#define F_FAKE_RW 0x400
-#define F_M_ACCESS 0x800 /* media access */
-
-#define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
-#define FF_DIRECT_IO (F_M_ACCESS | F_FAKE_RW)
-#define FF_SA (F_SA_HIGH | F_SA_LOW)
-
-struct sdebug_dev_info;
static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -337,18 +407,6 @@ static int resp_xdwriteread_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
-struct opcode_info_t {
- u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff
- * for terminating element */
- u8 opcode; /* if num_attached > 0, preferred */
- u16 sa; /* service action */
- u32 flags; /* OR-ed set of SDEB_F_* */
- int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
- const struct opcode_info_t *arrp; /* num_attached elements or NULL */
- u8 len_mask[16]; /* len=len_mask[0], then mask for cdb[1]... */
- /* ignore cdb bytes after position 15 */
-};
-
static const struct opcode_info_t msense_iarr[1] = {
{0, 0x1a, 0, F_D_IN, NULL, NULL,
{6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
@@ -509,61 +567,52 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
};
-struct sdebug_scmd_extra_t {
- bool inj_recovered;
- bool inj_transport;
- bool inj_dif;
- bool inj_dix;
- bool inj_short;
-};
-
-static int scsi_debug_add_host = DEF_NUM_HOST;
-static int scsi_debug_ato = DEF_ATO;
-static int scsi_debug_delay = DEF_DELAY;
-static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
-static int scsi_debug_dif = DEF_DIF;
-static int scsi_debug_dix = DEF_DIX;
-static int scsi_debug_dsense = DEF_D_SENSE;
-static int scsi_debug_every_nth = DEF_EVERY_NTH;
-static int scsi_debug_fake_rw = DEF_FAKE_RW;
-static unsigned int scsi_debug_guard = DEF_GUARD;
-static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
-static int scsi_debug_max_luns = DEF_MAX_LUNS;
-static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
+static int sdebug_add_host = DEF_NUM_HOST;
+static int sdebug_ato = DEF_ATO;
+static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
+static int sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int sdebug_dif = DEF_DIF;
+static int sdebug_dix = DEF_DIX;
+static int sdebug_dsense = DEF_D_SENSE;
+static int sdebug_every_nth = DEF_EVERY_NTH;
+static int sdebug_fake_rw = DEF_FAKE_RW;
+static unsigned int sdebug_guard = DEF_GUARD;
+static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
+static int sdebug_max_luns = DEF_MAX_LUNS;
+static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
-static int scsi_debug_ndelay = DEF_NDELAY;
-static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
-static int scsi_debug_no_uld = 0;
-static int scsi_debug_num_parts = DEF_NUM_PARTS;
-static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
-static int scsi_debug_opt_blks = DEF_OPT_BLKS;
-static int scsi_debug_opts = DEF_OPTS;
-static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
-static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
-static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
-static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
-static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
-static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
-static unsigned int scsi_debug_lbpu = DEF_LBPU;
-static unsigned int scsi_debug_lbpws = DEF_LBPWS;
-static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
-static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
-static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
-static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
-static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
-static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
-static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
-static bool scsi_debug_removable = DEF_REMOVABLE;
-static bool scsi_debug_clustering;
-static bool scsi_debug_host_lock = DEF_HOST_LOCK;
-static bool scsi_debug_strict = DEF_STRICT;
+static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
+static int sdebug_no_lun_0 = DEF_NO_LUN_0;
+static int sdebug_no_uld;
+static int sdebug_num_parts = DEF_NUM_PARTS;
+static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
+static int sdebug_opt_blks = DEF_OPT_BLKS;
+static int sdebug_opts = DEF_OPTS;
+static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
+static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
+static int sdebug_scsi_level = DEF_SCSI_LEVEL;
+static int sdebug_sector_size = DEF_SECTOR_SIZE;
+static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
+static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
+static unsigned int sdebug_lbpu = DEF_LBPU;
+static unsigned int sdebug_lbpws = DEF_LBPWS;
+static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
+static unsigned int sdebug_lbprz = DEF_LBPRZ;
+static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
+static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
+static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
+static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
+static int sdebug_uuid_ctl = DEF_UUID_CTL;
+static bool sdebug_removable = DEF_REMOVABLE;
+static bool sdebug_clustering;
+static bool sdebug_host_lock = DEF_HOST_LOCK;
+static bool sdebug_strict = DEF_STRICT;
static bool sdebug_any_injecting_opt;
-
-static atomic_t sdebug_cmnd_count;
-static atomic_t sdebug_completions;
-static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */
-
-#define DEV_READONLY(TGT) (0)
+static bool sdebug_verbose;
+static bool have_dif_prot;
+static bool sdebug_statistics = DEF_STATISTICS;
+static bool sdebug_mq_active;
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -574,59 +623,10 @@ static int sdebug_heads; /* heads per disk */
static int sdebug_cylinders_per; /* cylinders per surface */
static int sdebug_sectors_per; /* sectors per cylinder */
-#define SDEBUG_MAX_PARTS 4
-
-#define SCSI_DEBUG_MAX_CMD_LEN 32
-
-static unsigned int scsi_debug_lbp(void)
-{
- return ((0 == scsi_debug_fake_rw) &&
- (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
-}
-
-struct sdebug_dev_info {
- struct list_head dev_list;
- unsigned int channel;
- unsigned int target;
- u64 lun;
- struct sdebug_host_info *sdbg_host;
- unsigned long uas_bm[1];
- atomic_t num_in_q;
- char stopped; /* TODO: should be atomic */
- bool used;
-};
-
-struct sdebug_host_info {
- struct list_head host_list;
- struct Scsi_Host *shost;
- struct device dev;
- struct list_head dev_info_list;
-};
-
-#define to_sdebug_host(d) \
- container_of(d, struct sdebug_host_info, dev)
-
static LIST_HEAD(sdebug_host_list);
static DEFINE_SPINLOCK(sdebug_host_list_lock);
-
-struct sdebug_hrtimer { /* ... is derived from hrtimer */
- struct hrtimer hrt; /* must be first element */
- int qa_indx;
-};
-
-struct sdebug_queued_cmd {
- /* in_use flagged by a bit in queued_in_use_bm[] */
- struct timer_list *cmnd_timerp;
- struct tasklet_struct *tletp;
- struct sdebug_hrtimer *sd_hrtp;
- struct scsi_cmnd * a_cmnd;
-};
-static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
-static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
-
-
-static unsigned char * fake_storep; /* ramdisk storage */
+static unsigned char *fake_storep; /* ramdisk storage */
static struct sd_dif_tuple *dif_storep; /* protection info */
static void *map_storep; /* provisioning map */
@@ -640,7 +640,9 @@ static int dix_writes;
static int dix_reads;
static int dif_errors;
-static DEFINE_SPINLOCK(queued_arr_lock);
+static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
+static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
+
static DEFINE_RWLOCK(atomic_rw);
static char sdebug_proc_name[] = MY_NAME;
@@ -662,19 +664,22 @@ static const int illegal_condition_result =
static const int device_qfull_result =
(DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
-static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
- 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
- 0, 0, 0, 0};
-static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
- 0, 0, 0x2, 0x4b};
-static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
- 0, 0, 0x0, 0x0};
+
+/* Only do the extra work involved in logical block provisioning if one or
+ * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
+ * real reads and writes (i.e. not skipping them for speed).
+ */
+static inline bool scsi_debug_lbp(void)
+{
+ return 0 == sdebug_fake_rw &&
+ (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
+}
static void *fake_store(unsigned long long lba)
{
lba = do_div(lba, sdebug_store_sectors);
- return fake_storep + lba * scsi_debug_sector_size;
+ return fake_storep + lba * sdebug_sector_size;
}
static struct sd_dif_tuple *dif_store(sector_t sector)
@@ -684,9 +689,6 @@ static struct sd_dif_tuple *dif_store(sector_t sector)
return dif_storep + sector;
}
-static int sdebug_add_adapter(void);
-static void sdebug_remove_adapter(void);
-
static void sdebug_max_tgts_luns(void)
{
struct sdebug_host_info *sdbg_host;
@@ -696,11 +698,11 @@ static void sdebug_max_tgts_luns(void)
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
hpnt = sdbg_host->shost;
if ((hpnt->this_id >= 0) &&
- (scsi_debug_num_tgts > hpnt->this_id))
- hpnt->max_id = scsi_debug_num_tgts + 1;
+ (sdebug_num_tgts > hpnt->this_id))
+ hpnt->max_id = sdebug_num_tgts + 1;
else
- hpnt->max_id = scsi_debug_num_tgts;
- /* scsi_debug_max_luns; */
+ hpnt->max_id = sdebug_num_tgts;
+ /* sdebug_max_luns; */
hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
}
spin_unlock(&sdebug_host_list_lock);
@@ -709,9 +711,9 @@ static void sdebug_max_tgts_luns(void)
enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
/* Set in_bit to -1 to indicate no bit position of invalid field */
-static void
-mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
- int in_byte, int in_bit)
+static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
+ enum sdeb_cmd_data c_d,
+ int in_byte, int in_bit)
{
unsigned char *sbuff;
u8 sks[4];
@@ -725,8 +727,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
}
asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_build_sense_buffer(scsi_debug_dsense, sbuff, ILLEGAL_REQUEST,
- asc, 0);
+ scsi_build_sense_buffer(sdebug_dsense, sbuff, ILLEGAL_REQUEST, asc, 0);
memset(sks, 0, sizeof(sks));
sks[0] = 0x80;
if (c_d)
@@ -736,7 +737,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
sks[0] |= 0x7 & in_bit;
}
put_unaligned_be16(in_byte, sks + 1);
- if (scsi_debug_dsense) {
+ if (sdebug_dsense) {
sl = sbuff[7] + 8;
sbuff[7] = sl;
sbuff[sl] = 0x2;
@@ -744,7 +745,7 @@ mk_sense_invalid_fld(struct scsi_cmnd *scp, enum sdeb_cmd_data c_d,
memcpy(sbuff + sl + 4, sks, 3);
} else
memcpy(sbuff + 15, sks, 3);
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ if (sdebug_verbose)
sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
"]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
@@ -762,23 +763,22 @@ static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
}
memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
- scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
+ scsi_build_sense_buffer(sdebug_dsense, sbuff, key, asc, asq);
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ if (sdebug_verbose)
sdev_printk(KERN_INFO, scp->device,
"%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
my_name, key, asc, asq);
}
-static void
-mk_sense_invalid_opcode(struct scsi_cmnd *scp)
+static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
{
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
}
static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
{
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
+ if (sdebug_verbose) {
if (0x1261 == cmd)
sdev_printk(KERN_INFO, dev,
"%s: BLKFLSBUF [0x1261]\n", __func__);
@@ -810,11 +810,9 @@ static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
spin_unlock(&sdebug_host_list_lock);
}
-static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
- struct sdebug_dev_info * devip)
+static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
int k;
- bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
if (k != SDEBUG_NUM_UAS) {
@@ -822,40 +820,41 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
switch (k) {
case SDEBUG_UA_POR:
- mk_sense_buffer(SCpnt, UNIT_ATTENTION,
- UA_RESET_ASC, POWER_ON_RESET_ASCQ);
- if (debug)
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
+ POWER_ON_RESET_ASCQ);
+ if (sdebug_verbose)
cp = "power on reset";
break;
case SDEBUG_UA_BUS_RESET:
- mk_sense_buffer(SCpnt, UNIT_ATTENTION,
- UA_RESET_ASC, BUS_RESET_ASCQ);
- if (debug)
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
+ BUS_RESET_ASCQ);
+ if (sdebug_verbose)
cp = "bus reset";
break;
case SDEBUG_UA_MODE_CHANGED:
- mk_sense_buffer(SCpnt, UNIT_ATTENTION,
- UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
- if (debug)
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
+ MODE_CHANGED_ASCQ);
+ if (sdebug_verbose)
cp = "mode parameters changed";
break;
case SDEBUG_UA_CAPACITY_CHANGED:
- mk_sense_buffer(SCpnt, UNIT_ATTENTION,
- UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
- if (debug)
+ mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
+ CAPACITY_CHANGED_ASCQ);
+ if (sdebug_verbose)
cp = "capacity data changed";
break;
case SDEBUG_UA_MICROCODE_CHANGED:
- mk_sense_buffer(SCpnt, UNIT_ATTENTION,
- TARGET_CHANGED_ASC, MICROCODE_CHANGED_ASCQ);
- if (debug)
+ mk_sense_buffer(scp, UNIT_ATTENTION,
+ TARGET_CHANGED_ASC,
+ MICROCODE_CHANGED_ASCQ);
+ if (sdebug_verbose)
cp = "microcode has been changed";
break;
case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
- mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ mk_sense_buffer(scp, UNIT_ATTENTION,
TARGET_CHANGED_ASC,
MICROCODE_CHANGED_WO_RESET_ASCQ);
- if (debug)
+ if (sdebug_verbose)
cp = "microcode has been changed without reset";
break;
case SDEBUG_UA_LUNS_CHANGED:
@@ -864,40 +863,30 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
* ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
* on the target, until a REPORT LUNS command is
* received. SPC-4 behavior is to report it only once.
- * NOTE: scsi_debug_scsi_level does not use the same
+ * NOTE: sdebug_scsi_level does not use the same
* values as struct scsi_device->scsi_level.
*/
- if (scsi_debug_scsi_level >= 6) /* SPC-4 and above */
+ if (sdebug_scsi_level >= 6) /* SPC-4 and above */
clear_luns_changed_on_target(devip);
- mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ mk_sense_buffer(scp, UNIT_ATTENTION,
TARGET_CHANGED_ASC,
LUNS_CHANGED_ASCQ);
- if (debug)
+ if (sdebug_verbose)
cp = "reported luns data has changed";
break;
default:
- pr_warn("%s: unexpected unit attention code=%d\n",
- __func__, k);
- if (debug)
+ pr_warn("unexpected unit attention code=%d\n", k);
+ if (sdebug_verbose)
cp = "unknown";
break;
}
clear_bit(k, devip->uas_bm);
- if (debug)
- sdev_printk(KERN_INFO, SCpnt->device,
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, scp->device,
"%s reports: Unit attention: %s\n",
my_name, cp);
return check_condition_result;
}
- if ((UAS_TUR == uas_only) && devip->stopped) {
- mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
- 0x2);
- if (debug)
- sdev_printk(KERN_INFO, SCpnt->device,
- "%s reports: Not ready: %s\n", my_name,
- "initializing command required");
- return check_condition_result;
- }
return 0;
}
@@ -911,7 +900,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
if (!sdb->length)
return 0;
if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
- return (DID_ERROR << 16);
+ return DID_ERROR << 16;
act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
arr, arr_len);
@@ -935,13 +924,17 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static const char * inq_vendor_id = "Linux ";
static const char * inq_product_id = "scsi_debug ";
-static const char *inq_product_rev = "0184"; /* version less '.' */
+static const char *inq_product_rev = "0186"; /* version less '.' */
+/* Use some locally assigned NAAs for SAS addresses. */
+static const u64 naa3_comp_a = 0x3222222000000000ULL;
+static const u64 naa3_comp_b = 0x3333333000000000ULL;
+static const u64 naa3_comp_c = 0x3111111000000000ULL;
/* Device identification VPD page. Returns number of bytes placed in arr */
-static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
- int target_dev_id, int dev_id_num,
- const char * dev_id_str,
- int dev_id_str_len)
+static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
+ int target_dev_id, int dev_id_num,
+ const char *dev_id_str, int dev_id_str_len,
+ const uuid_be *lu_name)
{
int num, port_a;
char b[32];
@@ -958,19 +951,25 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
arr[3] = num;
num += 4;
if (dev_id_num >= 0) {
- /* NAA-5, Logical unit identifier (binary) */
- arr[num++] = 0x1; /* binary (not necessarily sas) */
- arr[num++] = 0x3; /* PIV=0, lu, naa */
- arr[num++] = 0x0;
- arr[num++] = 0x8;
- arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
- arr[num++] = 0x33;
- arr[num++] = 0x33;
- arr[num++] = 0x30;
- arr[num++] = (dev_id_num >> 24);
- arr[num++] = (dev_id_num >> 16) & 0xff;
- arr[num++] = (dev_id_num >> 8) & 0xff;
- arr[num++] = dev_id_num & 0xff;
+ if (sdebug_uuid_ctl) {
+ /* Locally assigned UUID */
+ arr[num++] = 0x1; /* binary (not necessarily sas) */
+ arr[num++] = 0xa; /* PIV=0, lu, naa */
+ arr[num++] = 0x0;
+ arr[num++] = 0x12;
+ arr[num++] = 0x10; /* uuid type=1, locally assigned */
+ arr[num++] = 0x0;
+ memcpy(arr + num, lu_name, 16);
+ num += 16;
+ } else {
+ /* NAA-3, Logical unit identifier (binary) */
+ arr[num++] = 0x1; /* binary (not necessarily sas) */
+ arr[num++] = 0x3; /* PIV=0, lu, naa */
+ arr[num++] = 0x0;
+ arr[num++] = 0x8;
+ put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
+ num += 8;
+ }
/* Target relative port number */
arr[num++] = 0x61; /* proto=sas, binary */
arr[num++] = 0x94; /* PIV=1, target port, rel port */
@@ -981,47 +980,35 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
arr[num++] = 0x0;
arr[num++] = 0x1; /* relative port A */
}
- /* NAA-5, Target port identifier */
+ /* NAA-3, Target port identifier */
arr[num++] = 0x61; /* proto=sas, binary */
arr[num++] = 0x93; /* piv=1, target port, naa */
arr[num++] = 0x0;
arr[num++] = 0x8;
- arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
- arr[num++] = 0x22;
- arr[num++] = 0x22;
- arr[num++] = 0x20;
- arr[num++] = (port_a >> 24);
- arr[num++] = (port_a >> 16) & 0xff;
- arr[num++] = (port_a >> 8) & 0xff;
- arr[num++] = port_a & 0xff;
- /* NAA-5, Target port group identifier */
+ put_unaligned_be64(naa3_comp_a + port_a, arr + num);
+ num += 8;
+ /* NAA-3, Target port group identifier */
arr[num++] = 0x61; /* proto=sas, binary */
arr[num++] = 0x95; /* piv=1, target port group id */
arr[num++] = 0x0;
arr[num++] = 0x4;
arr[num++] = 0;
arr[num++] = 0;
- arr[num++] = (port_group_id >> 8) & 0xff;
- arr[num++] = port_group_id & 0xff;
- /* NAA-5, Target device identifier */
+ put_unaligned_be16(port_group_id, arr + num);
+ num += 2;
+ /* NAA-3, Target device identifier */
arr[num++] = 0x61; /* proto=sas, binary */
arr[num++] = 0xa3; /* piv=1, target device, naa */
arr[num++] = 0x0;
arr[num++] = 0x8;
- arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
- arr[num++] = 0x22;
- arr[num++] = 0x22;
- arr[num++] = 0x20;
- arr[num++] = (target_dev_id >> 24);
- arr[num++] = (target_dev_id >> 16) & 0xff;
- arr[num++] = (target_dev_id >> 8) & 0xff;
- arr[num++] = target_dev_id & 0xff;
+ put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
+ num += 8;
/* SCSI name string: Target device identifier */
arr[num++] = 0x63; /* proto=sas, UTF-8 */
arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
arr[num++] = 0x0;
arr[num++] = 24;
- memcpy(arr + num, "naa.52222220", 12);
+ memcpy(arr + num, "naa.32222220", 12);
num += 12;
snprintf(b, sizeof(b), "%08X", target_dev_id);
memcpy(arr + num, b, 8);
@@ -1031,7 +1018,6 @@ static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
return num;
}
-
static unsigned char vpd84_data[] = {
/* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
0x22,0x22,0x22,0x0,0xbb,0x1,
@@ -1039,14 +1025,14 @@ static unsigned char vpd84_data[] = {
};
/* Software interface identification VPD page */
-static int inquiry_evpd_84(unsigned char * arr)
+static int inquiry_vpd_84(unsigned char *arr)
{
memcpy(arr, vpd84_data, sizeof(vpd84_data));
return sizeof(vpd84_data);
}
/* Management network addresses VPD page */
-static int inquiry_evpd_85(unsigned char * arr)
+static int inquiry_vpd_85(unsigned char *arr)
{
int num = 0;
const char * na1 = "https://www.kernel.org/config";
@@ -1081,7 +1067,7 @@ static int inquiry_evpd_85(unsigned char * arr)
}
/* SCSI ports VPD page */
-static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
+static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
{
int num = 0;
int port_a, port_b;
@@ -1101,15 +1087,8 @@ static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
arr[num++] = 0x93; /* PIV=1, target port, NAA */
arr[num++] = 0x0; /* reserved */
arr[num++] = 0x8; /* length */
- arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
- arr[num++] = 0x22;
- arr[num++] = 0x22;
- arr[num++] = 0x20;
- arr[num++] = (port_a >> 24);
- arr[num++] = (port_a >> 16) & 0xff;
- arr[num++] = (port_a >> 8) & 0xff;
- arr[num++] = port_a & 0xff;
-
+ put_unaligned_be64(naa3_comp_a + port_a, arr + num);
+ num += 8;
arr[num++] = 0x0; /* reserved */
arr[num++] = 0x0; /* reserved */
arr[num++] = 0x0;
@@ -1123,14 +1102,8 @@ static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
arr[num++] = 0x93; /* PIV=1, target port, NAA */
arr[num++] = 0x0; /* reserved */
arr[num++] = 0x8; /* length */
- arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
- arr[num++] = 0x22;
- arr[num++] = 0x22;
- arr[num++] = 0x20;
- arr[num++] = (port_b >> 24);
- arr[num++] = (port_b >> 16) & 0xff;
- arr[num++] = (port_b >> 8) & 0xff;
- arr[num++] = port_b & 0xff;
+ put_unaligned_be64(naa3_comp_a + port_b, arr + num);
+ num += 8;
return num;
}
@@ -1181,7 +1154,7 @@ static unsigned char vpd89_data[] = {
};
/* ATA Information VPD page */
-static int inquiry_evpd_89(unsigned char * arr)
+static int inquiry_vpd_89(unsigned char *arr)
{
memcpy(arr, vpd89_data, sizeof(vpd89_data));
return sizeof(vpd89_data);
@@ -1196,47 +1169,42 @@ static unsigned char vpdb0_data[] = {
};
/* Block limits VPD page (SBC-3) */
-static int inquiry_evpd_b0(unsigned char * arr)
+static int inquiry_vpd_b0(unsigned char *arr)
{
unsigned int gran;
memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
/* Optimal transfer length granularity */
- gran = 1 << scsi_debug_physblk_exp;
- arr[2] = (gran >> 8) & 0xff;
- arr[3] = gran & 0xff;
+ gran = 1 << sdebug_physblk_exp;
+ put_unaligned_be16(gran, arr + 2);
/* Maximum Transfer Length */
- if (sdebug_store_sectors > 0x400) {
- arr[4] = (sdebug_store_sectors >> 24) & 0xff;
- arr[5] = (sdebug_store_sectors >> 16) & 0xff;
- arr[6] = (sdebug_store_sectors >> 8) & 0xff;
- arr[7] = sdebug_store_sectors & 0xff;
- }
+ if (sdebug_store_sectors > 0x400)
+ put_unaligned_be32(sdebug_store_sectors, arr + 4);
/* Optimal Transfer Length */
- put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
+ put_unaligned_be32(sdebug_opt_blks, &arr[8]);
- if (scsi_debug_lbpu) {
+ if (sdebug_lbpu) {
/* Maximum Unmap LBA Count */
- put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
+ put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
/* Maximum Unmap Block Descriptor Count */
- put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
+ put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
}
/* Unmap Granularity Alignment */
- if (scsi_debug_unmap_alignment) {
- put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
+ if (sdebug_unmap_alignment) {
+ put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
arr[28] |= 0x80; /* UGAVALID */
}
/* Optimal Unmap Granularity */
- put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
+ put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
/* Maximum WRITE SAME Length */
- put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
+ put_unaligned_be64(sdebug_write_same_length, &arr[32]);
return 0x3c; /* Mandatory page length for Logical Block Provisioning */
@@ -1244,7 +1212,7 @@ static int inquiry_evpd_b0(unsigned char * arr)
}
/* Block device characteristics VPD page (SBC-3) */
-static int inquiry_evpd_b1(unsigned char *arr)
+static int inquiry_vpd_b1(unsigned char *arr)
{
memset(arr, 0, 0x3c);
arr[0] = 0;
@@ -1255,24 +1223,22 @@ static int inquiry_evpd_b1(unsigned char *arr)
return 0x3c;
}
-/* Logical block provisioning VPD page (SBC-3) */
-static int inquiry_evpd_b2(unsigned char *arr)
+/* Logical block provisioning VPD page (SBC-4) */
+static int inquiry_vpd_b2(unsigned char *arr)
{
memset(arr, 0, 0x4);
arr[0] = 0; /* threshold exponent */
-
- if (scsi_debug_lbpu)
+ if (sdebug_lbpu)
arr[1] = 1 << 7;
-
- if (scsi_debug_lbpws)
+ if (sdebug_lbpws)
arr[1] |= 1 << 6;
-
- if (scsi_debug_lbpws10)
+ if (sdebug_lbpws10)
arr[1] |= 1 << 5;
-
- if (scsi_debug_lbprz)
- arr[1] |= 1 << 2;
-
+ if (sdebug_lbprz && scsi_debug_lbp())
+ arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
+ /* anc_sup=0; dp=0 (no provisioning group descriptor) */
+ /* minimum_percentage=0; provisioning_type=0 (unknown) */
+ /* threshold_percentage=0 */
return 0x4;
}
@@ -1285,19 +1251,20 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
unsigned char * arr;
unsigned char *cmd = scp->cmnd;
int alloc_len, n, ret;
- bool have_wlun;
+ bool have_wlun, is_disk;
- alloc_len = (cmd[3] << 8) + cmd[4];
+ alloc_len = get_unaligned_be16(cmd + 3);
arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
- have_wlun = (scp->device->lun == SCSI_W_LUN_REPORT_LUNS);
+ is_disk = (sdebug_ptype == TYPE_DISK);
+ have_wlun = scsi_is_wlun(scp->device->lun);
if (have_wlun)
- pq_pdt = 0x1e; /* present, wlun */
- else if (scsi_debug_no_lun_0 && (0 == devip->lun))
- pq_pdt = 0x7f; /* not present, no device type */
+ pq_pdt = TYPE_WLUN; /* present, wlun */
+ else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
+ pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
else
- pq_pdt = (scsi_debug_ptype & 0x1f);
+ pq_pdt = (sdebug_ptype & 0x1f);
arr[0] = pq_pdt;
if (0x2 & cmd[1]) { /* CMDDT bit set */
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
@@ -1310,7 +1277,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
port_group_id = (((host_no + 1) & 0x7f) << 8) +
(devip->channel & 0x7f);
- if (0 == scsi_debug_vpd_use_hostno)
+ if (sdebug_vpd_use_hostno == 0)
host_no = 0;
lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
(devip->target * 1000) + devip->lun);
@@ -1328,11 +1295,12 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0x86; /* extended inquiry */
arr[n++] = 0x87; /* mode page policy */
arr[n++] = 0x88; /* SCSI ports */
- arr[n++] = 0x89; /* ATA information */
- arr[n++] = 0xb0; /* Block limits (SBC) */
- arr[n++] = 0xb1; /* Block characteristics (SBC) */
- if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
- arr[n++] = 0xb2;
+ if (is_disk) { /* SBC only */
+ arr[n++] = 0x89; /* ATA information */
+ arr[n++] = 0xb0; /* Block limits */
+ arr[n++] = 0xb1; /* Block characteristics */
+ arr[n++] = 0xb2; /* Logical Block Prov */
+ }
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
arr[1] = cmd[2]; /*sanity */
@@ -1340,21 +1308,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
memcpy(&arr[4], lu_id_str, len);
} else if (0x83 == cmd[2]) { /* device identification */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
- target_dev_id, lu_id_num,
- lu_id_str, len);
+ arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
+ target_dev_id, lu_id_num,
+ lu_id_str, len,
+ &devip->lu_name);
} else if (0x84 == cmd[2]) { /* Software interface ident. */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_evpd_84(&arr[4]);
+ arr[3] = inquiry_vpd_84(&arr[4]);
} else if (0x85 == cmd[2]) { /* Management network addresses */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_evpd_85(&arr[4]);
+ arr[3] = inquiry_vpd_85(&arr[4]);
} else if (0x86 == cmd[2]) { /* extended inquiry */
arr[1] = cmd[2]; /*sanity */
arr[3] = 0x3c; /* number of following entries */
- if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
+ if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
arr[4] = 0x4; /* SPT: GRD_CHK:1 */
- else if (scsi_debug_dif)
+ else if (have_dif_prot)
arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
else
arr[4] = 0x0; /* no protection stuff */
@@ -1368,39 +1337,38 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[10] = 0x82; /* mlus, per initiator port */
} else if (0x88 == cmd[2]) { /* SCSI Ports */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
- } else if (0x89 == cmd[2]) { /* ATA information */
+ arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
+ } else if (is_disk && 0x89 == cmd[2]) { /* ATA information */
arr[1] = cmd[2]; /*sanity */
- n = inquiry_evpd_89(&arr[4]);
- arr[2] = (n >> 8);
- arr[3] = (n & 0xff);
- } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
+ n = inquiry_vpd_89(&arr[4]);
+ put_unaligned_be16(n, arr + 2);
+ } else if (is_disk && 0xb0 == cmd[2]) { /* Block limits */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_evpd_b0(&arr[4]);
- } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
+ arr[3] = inquiry_vpd_b0(&arr[4]);
+ } else if (is_disk && 0xb1 == cmd[2]) { /* Block char. */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_evpd_b1(&arr[4]);
- } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
+ arr[3] = inquiry_vpd_b1(&arr[4]);
+ } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
arr[1] = cmd[2]; /*sanity */
- arr[3] = inquiry_evpd_b2(&arr[4]);
+ arr[3] = inquiry_vpd_b2(&arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
return check_condition_result;
}
- len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
+ len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
ret = fill_from_dev_buffer(scp, arr,
min(len, SDEBUG_MAX_INQ_ARR_SZ));
kfree(arr);
return ret;
}
/* drops through here for a standard inquiry */
- arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
- arr[2] = scsi_debug_scsi_level;
+ arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
+ arr[2] = sdebug_scsi_level;
arr[3] = 2; /* response_data_format==2 */
arr[4] = SDEBUG_LONG_INQ_SZ - 5;
- arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
- if (0 == scsi_debug_vpd_use_hostno)
+ arr[5] = (int)have_dif_prot; /* PROTECT bit */
+ if (sdebug_vpd_use_hostno == 0)
arr[5] = 0x10; /* claim: implicit TGPS */
arr[6] = 0x10; /* claim: MultiP */
/* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
@@ -1409,21 +1377,26 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
memcpy(&arr[16], inq_product_id, 16);
memcpy(&arr[32], inq_product_rev, 4);
/* version descriptors (2 bytes each) follow */
- arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
- arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
+ put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
+ put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
n = 62;
- if (scsi_debug_ptype == 0) {
- arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
- } else if (scsi_debug_ptype == 1) {
- arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
- }
- arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
+ if (is_disk) { /* SBC-4 no version claimed */
+ put_unaligned_be16(0x600, arr + n);
+ n += 2;
+ } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
+ put_unaligned_be16(0x525, arr + n);
+ n += 2;
+ }
+ put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
ret = fill_from_dev_buffer(scp, arr,
min(alloc_len, SDEBUG_LONG_INQ_SZ));
kfree(arr);
return ret;
}
+static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
+ 0, 0, 0x0, 0x0};
+
static int resp_requests(struct scsi_cmnd * scp,
struct sdebug_dev_info * devip)
{
@@ -1452,7 +1425,7 @@ static int resp_requests(struct scsi_cmnd * scp,
}
} else {
memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
- if (arr[0] >= 0x70 && dsense == scsi_debug_dsense)
+ if (arr[0] >= 0x70 && dsense == sdebug_dsense)
; /* have sense and formats match */
else if (arr[0] <= 0x70) {
if (dsense) {
@@ -1489,24 +1462,25 @@ static int resp_start_stop(struct scsi_cmnd * scp,
struct sdebug_dev_info * devip)
{
unsigned char *cmd = scp->cmnd;
- int power_cond, start;
+ int power_cond, stop;
power_cond = (cmd[4] & 0xf0) >> 4;
if (power_cond) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
return check_condition_result;
}
- start = cmd[4] & 1;
- if (start == devip->stopped)
- devip->stopped = !start;
+ stop = !(cmd[4] & 1);
+ atomic_xchg(&devip->stopped, stop);
return 0;
}
static sector_t get_sdebug_capacity(void)
{
- if (scsi_debug_virtual_gb > 0)
- return (sector_t)scsi_debug_virtual_gb *
- (1073741824 / scsi_debug_sector_size);
+ static const unsigned int gibibyte = 1073741824;
+
+ if (sdebug_virtual_gb > 0)
+ return (sector_t)sdebug_virtual_gb *
+ (gibibyte / sdebug_sector_size);
else
return sdebug_store_sectors;
}
@@ -1523,18 +1497,10 @@ static int resp_readcap(struct scsi_cmnd * scp,
memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
if (sdebug_capacity < 0xffffffff) {
capac = (unsigned int)sdebug_capacity - 1;
- arr[0] = (capac >> 24);
- arr[1] = (capac >> 16) & 0xff;
- arr[2] = (capac >> 8) & 0xff;
- arr[3] = capac & 0xff;
- } else {
- arr[0] = 0xff;
- arr[1] = 0xff;
- arr[2] = 0xff;
- arr[3] = 0xff;
- }
- arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
- arr[7] = scsi_debug_sector_size & 0xff;
+ put_unaligned_be32(capac, arr + 0);
+ } else
+ put_unaligned_be32(0xffffffff, arr + 0);
+ put_unaligned_be16(sdebug_sector_size, arr + 6);
return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
}
@@ -1544,34 +1510,31 @@ static int resp_readcap16(struct scsi_cmnd * scp,
{
unsigned char *cmd = scp->cmnd;
unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
- unsigned long long capac;
- int k, alloc_len;
+ int alloc_len;
- alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
- + cmd[13]);
+ alloc_len = get_unaligned_be32(cmd + 10);
/* following just in case virtual_gb changed */
sdebug_capacity = get_sdebug_capacity();
memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
- capac = sdebug_capacity - 1;
- for (k = 0; k < 8; ++k, capac >>= 8)
- arr[7 - k] = capac & 0xff;
- arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
- arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
- arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
- arr[11] = scsi_debug_sector_size & 0xff;
- arr[13] = scsi_debug_physblk_exp & 0xf;
- arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
+ put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
+ put_unaligned_be32(sdebug_sector_size, arr + 8);
+ arr[13] = sdebug_physblk_exp & 0xf;
+ arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
if (scsi_debug_lbp()) {
arr[14] |= 0x80; /* LBPME */
- if (scsi_debug_lbprz)
- arr[14] |= 0x40; /* LBPRZ */
+ /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
+ * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
+ * in the wider field maps to 0 in this field.
+ */
+ if (sdebug_lbprz & 1) /* precisely what the draft requires */
+ arr[14] |= 0x40;
}
- arr[15] = scsi_debug_lowest_aligned & 0xff;
+ arr[15] = sdebug_lowest_aligned & 0xff;
- if (scsi_debug_dif) {
- arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
+ if (have_dif_prot) {
+ arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
arr[12] |= 1; /* PROT_EN */
}
@@ -1590,9 +1553,7 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp,
int n, ret, alen, rlen;
int port_group_a, port_group_b, port_a, port_b;
- alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
- + cmd[9]);
-
+ alen = get_unaligned_be32(cmd + 6);
arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
if (! arr)
return DID_REQUEUE << 16;
@@ -1605,49 +1566,46 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp,
port_a = 0x1; /* relative port A */
port_b = 0x2; /* relative port B */
port_group_a = (((host_no + 1) & 0x7f) << 8) +
- (devip->channel & 0x7f);
+ (devip->channel & 0x7f);
port_group_b = (((host_no + 1) & 0x7f) << 8) +
- (devip->channel & 0x7f) + 0x80;
+ (devip->channel & 0x7f) + 0x80;
/*
* The asymmetric access state is cycled according to the host_id.
*/
n = 4;
- if (0 == scsi_debug_vpd_use_hostno) {
- arr[n++] = host_no % 3; /* Asymm access state */
- arr[n++] = 0x0F; /* claim: all states are supported */
+ if (sdebug_vpd_use_hostno == 0) {
+ arr[n++] = host_no % 3; /* Asymm access state */
+ arr[n++] = 0x0F; /* claim: all states are supported */
} else {
- arr[n++] = 0x0; /* Active/Optimized path */
- arr[n++] = 0x01; /* claim: only support active/optimized paths */
+ arr[n++] = 0x0; /* Active/Optimized path */
+ arr[n++] = 0x01; /* only support active/optimized paths */
}
- arr[n++] = (port_group_a >> 8) & 0xff;
- arr[n++] = port_group_a & 0xff;
+ put_unaligned_be16(port_group_a, arr + n);
+ n += 2;
arr[n++] = 0; /* Reserved */
arr[n++] = 0; /* Status code */
arr[n++] = 0; /* Vendor unique */
arr[n++] = 0x1; /* One port per group */
arr[n++] = 0; /* Reserved */
arr[n++] = 0; /* Reserved */
- arr[n++] = (port_a >> 8) & 0xff;
- arr[n++] = port_a & 0xff;
+ put_unaligned_be16(port_a, arr + n);
+ n += 2;
arr[n++] = 3; /* Port unavailable */
arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
- arr[n++] = (port_group_b >> 8) & 0xff;
- arr[n++] = port_group_b & 0xff;
+ put_unaligned_be16(port_group_b, arr + n);
+ n += 2;
arr[n++] = 0; /* Reserved */
arr[n++] = 0; /* Status code */
arr[n++] = 0; /* Vendor unique */
arr[n++] = 0x1; /* One port per group */
arr[n++] = 0; /* Reserved */
arr[n++] = 0; /* Reserved */
- arr[n++] = (port_b >> 8) & 0xff;
- arr[n++] = port_b & 0xff;
+ put_unaligned_be16(port_b, arr + n);
+ n += 2;
rlen = n - 4;
- arr[0] = (rlen >> 24) & 0xff;
- arr[1] = (rlen >> 16) & 0xff;
- arr[2] = (rlen >> 8) & 0xff;
- arr[3] = rlen & 0xff;
+ put_unaligned_be32(rlen, arr + 0);
/*
* Return the smallest value of either
@@ -1662,8 +1620,8 @@ static int resp_report_tgtpgs(struct scsi_cmnd * scp,
return ret;
}
-static int
-resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_rsup_opcodes(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
bool rctd;
u8 reporting_opts, req_opcode, sdeb_i, supp;
@@ -1813,8 +1771,8 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return errsts;
}
-static int
-resp_rsup_tmfs(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_rsup_tmfs(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
bool repd;
u32 alloc_len, len;
@@ -1871,17 +1829,19 @@ static int resp_format_pg(unsigned char * p, int pcontrol, int target)
0, 0, 0, 0, 0x40, 0, 0, 0};
memcpy(p, format_pg, sizeof(format_pg));
- p[10] = (sdebug_sectors_per >> 8) & 0xff;
- p[11] = sdebug_sectors_per & 0xff;
- p[12] = (scsi_debug_sector_size >> 8) & 0xff;
- p[13] = scsi_debug_sector_size & 0xff;
- if (scsi_debug_removable)
+ put_unaligned_be16(sdebug_sectors_per, p + 10);
+ put_unaligned_be16(sdebug_sector_size, p + 12);
+ if (sdebug_removable)
p[20] |= 0x20; /* should agree with INQUIRY */
if (1 == pcontrol)
memset(p + 2, 0, sizeof(format_pg) - 2);
return sizeof(format_pg);
}
+static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
+ 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
+ 0, 0, 0, 0};
+
static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
{ /* Caching page for mode_sense */
unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
@@ -1889,7 +1849,7 @@ static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
- if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
+ if (SDEBUG_OPT_N_WCE & sdebug_opts)
caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
memcpy(p, caching_pg, sizeof(caching_pg));
if (1 == pcontrol)
@@ -1899,6 +1859,9 @@ static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
return sizeof(caching_pg);
}
+static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
+ 0, 0, 0x2, 0x4b};
+
static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
{ /* Control mode page for mode_sense */
unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
@@ -1906,12 +1869,12 @@ static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b};
- if (scsi_debug_dsense)
+ if (sdebug_dsense)
ctrl_m_pg[2] |= 0x4;
else
ctrl_m_pg[2] &= ~0x4;
- if (scsi_debug_ato)
+ if (sdebug_ato)
ctrl_m_pg[5] |= 0x80; /* ATO=1 */
memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
@@ -1955,31 +1918,29 @@ static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
{ /* SAS phy control and discover mode page for mode_sense */
unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
- 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
- 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
+ 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
0x2, 0, 0, 0, 0, 0, 0, 0,
0x88, 0x99, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
- 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
- 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
+ 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
0x3, 0, 0, 0, 0, 0, 0, 0,
0x88, 0x99, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
int port_a, port_b;
+ put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
+ put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
+ put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
+ put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
port_a = target_dev_id + 1;
port_b = port_a + 1;
memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
- p[20] = (port_a >> 24);
- p[21] = (port_a >> 16) & 0xff;
- p[22] = (port_a >> 8) & 0xff;
- p[23] = port_a & 0xff;
- p[48 + 20] = (port_b >> 24);
- p[48 + 21] = (port_b >> 16) & 0xff;
- p[48 + 22] = (port_b >> 8) & 0xff;
- p[48 + 23] = port_b & 0xff;
+ put_unaligned_be32(port_a, p + 20);
+ put_unaligned_be32(port_b, p + 48 + 20);
if (1 == pcontrol)
memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
return sizeof(sas_pcd_m_pg);
@@ -1999,29 +1960,30 @@ static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
#define SDEBUG_MAX_MSENSE_SZ 256
-static int
-resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_mode_sense(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
- unsigned char dbd, llbaa;
int pcontrol, pcode, subpcode, bd_len;
unsigned char dev_spec;
- int k, alloc_len, msense_6, offset, len, target_dev_id;
+ int alloc_len, offset, len, target_dev_id;
int target = scp->device->id;
unsigned char * ap;
unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
unsigned char *cmd = scp->cmnd;
+ bool dbd, llbaa, msense_6, is_disk, bad_pcode;
- dbd = !!(cmd[1] & 0x8);
+ dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
pcode = cmd[2] & 0x3f;
subpcode = cmd[3];
msense_6 = (MODE_SENSE == cmd[0]);
- llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
- if ((0 == scsi_debug_ptype) && (0 == dbd))
+ llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
+ is_disk = (sdebug_ptype == TYPE_DISK);
+ if (is_disk && !dbd)
bd_len = llbaa ? 16 : 8;
else
bd_len = 0;
- alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
+ alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
if (0x3 == pcontrol) { /* Saving values not supported */
mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
@@ -2029,9 +1991,9 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
(devip->target * 1000) - 3;
- /* set DPOFUA bit for disks */
- if (0 == scsi_debug_ptype)
- dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
+ /* for disks set DPOFUA bit and clear write protect (WP) bit */
+ if (is_disk)
+ dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
else
dev_spec = 0x0;
if (msense_6) {
@@ -2050,30 +2012,16 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
sdebug_capacity = get_sdebug_capacity();
if (8 == bd_len) {
- if (sdebug_capacity > 0xfffffffe) {
- ap[0] = 0xff;
- ap[1] = 0xff;
- ap[2] = 0xff;
- ap[3] = 0xff;
- } else {
- ap[0] = (sdebug_capacity >> 24) & 0xff;
- ap[1] = (sdebug_capacity >> 16) & 0xff;
- ap[2] = (sdebug_capacity >> 8) & 0xff;
- ap[3] = sdebug_capacity & 0xff;
- }
- ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
- ap[7] = scsi_debug_sector_size & 0xff;
+ if (sdebug_capacity > 0xfffffffe)
+ put_unaligned_be32(0xffffffff, ap + 0);
+ else
+ put_unaligned_be32(sdebug_capacity, ap + 0);
+ put_unaligned_be16(sdebug_sector_size, ap + 6);
offset += bd_len;
ap = arr + offset;
} else if (16 == bd_len) {
- unsigned long long capac = sdebug_capacity;
-
- for (k = 0; k < 8; ++k, capac >>= 8)
- ap[7 - k] = capac & 0xff;
- ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
- ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
- ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
- ap[15] = scsi_debug_sector_size & 0xff;
+ put_unaligned_be64((u64)sdebug_capacity, ap + 0);
+ put_unaligned_be32(sdebug_sector_size, ap + 12);
offset += bd_len;
ap = arr + offset;
}
@@ -2083,6 +2031,8 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
}
+ bad_pcode = false;
+
switch (pcode) {
case 0x1: /* Read-Write error recovery page, direct access */
len = resp_err_recov_pg(ap, pcontrol, target);
@@ -2093,12 +2043,18 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
offset += len;
break;
case 0x3: /* Format device page, direct access */
- len = resp_format_pg(ap, pcontrol, target);
- offset += len;
+ if (is_disk) {
+ len = resp_format_pg(ap, pcontrol, target);
+ offset += len;
+ } else
+ bad_pcode = true;
break;
case 0x8: /* Caching page, direct access */
- len = resp_caching_pg(ap, pcontrol, target);
- offset += len;
+ if (is_disk) {
+ len = resp_caching_pg(ap, pcontrol, target);
+ offset += len;
+ } else
+ bad_pcode = true;
break;
case 0xa: /* Control Mode page, all devices */
len = resp_ctrl_m_pg(ap, pcontrol, target);
@@ -2127,8 +2083,12 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if ((0 == subpcode) || (0xff == subpcode)) {
len = resp_err_recov_pg(ap, pcontrol, target);
len += resp_disconnect_pg(ap + len, pcontrol, target);
- len += resp_format_pg(ap + len, pcontrol, target);
- len += resp_caching_pg(ap + len, pcontrol, target);
+ if (is_disk) {
+ len += resp_format_pg(ap + len, pcontrol,
+ target);
+ len += resp_caching_pg(ap + len, pcontrol,
+ target);
+ }
len += resp_ctrl_m_pg(ap + len, pcontrol, target);
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
if (0xff == subpcode) {
@@ -2137,29 +2097,31 @@ resp_mode_sense(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
len += resp_sas_sha_m_spg(ap + len, pcontrol);
}
len += resp_iec_m_pg(ap + len, pcontrol, target);
+ offset += len;
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
}
- offset += len;
break;
default:
+ bad_pcode = true;
+ break;
+ }
+ if (bad_pcode) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
return check_condition_result;
}
if (msense_6)
arr[0] = offset - 1;
- else {
- arr[0] = ((offset - 2) >> 8) & 0xff;
- arr[1] = (offset - 2) & 0xff;
- }
+ else
+ put_unaligned_be16((offset - 2), arr + 0);
return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
}
#define SDEBUG_MAX_MSELECT_SZ 512
-static int
-resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_mode_select(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
int param_len, res, mpage;
@@ -2170,21 +2132,20 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
memset(arr, 0, sizeof(arr));
pf = cmd[1] & 0x10;
sp = cmd[1] & 0x1;
- param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
+ param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
return check_condition_result;
}
res = fetch_to_dev_buffer(scp, arr, param_len);
if (-1 == res)
- return (DID_ERROR << 16);
- else if ((res < param_len) &&
- (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ return DID_ERROR << 16;
+ else if (sdebug_verbose && (res < param_len))
sdev_printk(KERN_INFO, scp->device,
"%s: cdb indicated=%d, IO sent=%d bytes\n",
__func__, param_len, res);
- md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
- bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
+ md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
+ bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
if (md_len > 2) {
mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
return check_condition_result;
@@ -2197,7 +2158,7 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return check_condition_result;
}
spf = !!(arr[off] & 0x40);
- pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
+ pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
(arr[off + 1] + 2);
if ((pg_len + off) > param_len) {
mk_sense_buffer(scp, ILLEGAL_REQUEST,
@@ -2216,7 +2177,7 @@ resp_mode_select(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (ctrl_m_pg[1] == arr[off + 1]) {
memcpy(ctrl_m_pg + 2, arr + off + 2,
sizeof(ctrl_m_pg) - 2);
- scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
+ sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
goto set_mode_changed_ua;
}
break;
@@ -2279,7 +2240,7 @@ static int resp_log_sense(struct scsi_cmnd * scp,
pcontrol = (cmd[2] & 0xc0) >> 6;
pcode = cmd[2] & 0x3f;
subpcode = cmd[3] & 0xff;
- alloc_len = (cmd[7] << 8) + cmd[8];
+ alloc_len = get_unaligned_be16(cmd + 7);
arr[0] = pcode;
if (0 == subpcode) {
switch (pcode) {
@@ -2336,7 +2297,7 @@ static int resp_log_sense(struct scsi_cmnd * scp,
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
return check_condition_result;
}
- len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
+ len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
return fill_from_dev_buffer(scp, arr,
min(len, SDEBUG_MAX_INQ_ARR_SZ));
}
@@ -2358,8 +2319,8 @@ static int check_device_access_params(struct scsi_cmnd *scp,
}
/* Returns number of bytes copied or -1 if error. */
-static int
-do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
+static int do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num,
+ bool do_write)
{
int ret;
u64 block, rest = 0;
@@ -2384,15 +2345,15 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
rest = block + num - sdebug_store_sectors;
ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep + (block * scsi_debug_sector_size),
- (num - rest) * scsi_debug_sector_size, 0, do_write);
- if (ret != (num - rest) * scsi_debug_sector_size)
+ fake_storep + (block * sdebug_sector_size),
+ (num - rest) * sdebug_sector_size, 0, do_write);
+ if (ret != (num - rest) * sdebug_sector_size)
return ret;
if (rest) {
ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
- fake_storep, rest * scsi_debug_sector_size,
- (num - rest) * scsi_debug_sector_size, do_write);
+ fake_storep, rest * sdebug_sector_size,
+ (num - rest) * sdebug_sector_size, do_write);
}
return ret;
@@ -2401,13 +2362,12 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write)
/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
* arr into fake_store(lba,num) and return true. If comparison fails then
* return false. */
-static bool
-comp_write_worker(u64 lba, u32 num, const u8 *arr)
+static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
{
bool res;
u64 block, rest = 0;
u32 store_blks = sdebug_store_sectors;
- u32 lb_size = scsi_debug_sector_size;
+ u32 lb_size = sdebug_sector_size;
block = do_div(lba, store_blks);
if (block + num > store_blks)
@@ -2434,7 +2394,7 @@ static __be16 dif_compute_csum(const void *buf, int len)
{
__be16 csum;
- if (scsi_debug_guard)
+ if (sdebug_guard)
csum = (__force __be16)ip_compute_csum(buf, len);
else
csum = cpu_to_be16(crc_t10dif(buf, len));
@@ -2445,7 +2405,7 @@ static __be16 dif_compute_csum(const void *buf, int len)
static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
sector_t sector, u32 ei_lba)
{
- __be16 csum = dif_compute_csum(data, scsi_debug_sector_size);
+ __be16 csum = dif_compute_csum(data, sdebug_sector_size);
if (sdt->guard_tag != csum) {
pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
@@ -2454,13 +2414,13 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
be16_to_cpu(csum));
return 0x01;
}
- if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
+ if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
pr_err("REF check failed on sector %lu\n",
(unsigned long)sector);
return 0x03;
}
- if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+ if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
be32_to_cpu(sdt->ref_tag) != ei_lba) {
pr_err("REF check failed on sector %lu\n",
(unsigned long)sector);
@@ -2541,10 +2501,10 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
return 0;
}
-static int
-resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
u8 *cmd = scp->cmnd;
+ struct sdebug_queued_cmd *sqcp;
u64 lba;
u32 num;
u32 ei_lba;
@@ -2591,40 +2551,43 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
check_prot = false;
break;
}
- if (check_prot) {
- if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+ if (unlikely(have_dif_prot && check_prot)) {
+ if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
(cmd[1] & 0xe0)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
- scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
+ sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
(cmd[1] & 0xe0) == 0)
sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
"to DIF device\n");
}
- if (sdebug_any_injecting_opt) {
- struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
+ if (unlikely(sdebug_any_injecting_opt)) {
+ sqcp = (struct sdebug_queued_cmd *)scp->host_scribble;
- if (ep->inj_short)
- num /= 2;
- }
+ if (sqcp) {
+ if (sqcp->inj_short)
+ num /= 2;
+ }
+ } else
+ sqcp = NULL;
/* inline check_device_access_params() */
- if (lba + num > sdebug_capacity) {
+ if (unlikely(lba + num > sdebug_capacity)) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
}
/* transfer length excessive (tie in to block limits VPD page) */
- if (num > sdebug_store_sectors) {
+ if (unlikely(num > sdebug_store_sectors)) {
/* needs work to find which cdb byte 'num' comes from */
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
- if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
- (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
- ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
+ if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
+ (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
+ ((lba + num) > OPT_MEDIUM_ERR_ADDR))) {
/* claim unrecoverable read error */
mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
/* set info field and valid bit for fixed descriptor */
@@ -2641,7 +2604,7 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
read_lock_irqsave(&atomic_rw, iflags);
/* DIX + T10 DIF */
- if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
+ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
int prot_ret = prot_verify_read(scp, lba, num, ei_lba);
if (prot_ret) {
@@ -2653,27 +2616,25 @@ resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ret = do_device_access(scp, lba, num, false);
read_unlock_irqrestore(&atomic_rw, iflags);
- if (ret == -1)
+ if (unlikely(ret == -1))
return DID_ERROR << 16;
scsi_in(scp)->resid = scsi_bufflen(scp) - ret;
- if (sdebug_any_injecting_opt) {
- struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
-
- if (ep->inj_recovered) {
+ if (unlikely(sqcp)) {
+ if (sqcp->inj_recovered) {
mk_sense_buffer(scp, RECOVERED_ERROR,
THRESHOLD_EXCEEDED, 0);
return check_condition_result;
- } else if (ep->inj_transport) {
+ } else if (sqcp->inj_transport) {
mk_sense_buffer(scp, ABORTED_COMMAND,
TRANSPORT_PROBLEM, ACK_NAK_TO);
return check_condition_result;
- } else if (ep->inj_dif) {
+ } else if (sqcp->inj_dif) {
/* Logical block guard check failed */
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
return illegal_condition_result;
- } else if (ep->inj_dix) {
+ } else if (sqcp->inj_dix) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
return illegal_condition_result;
}
@@ -2750,13 +2711,13 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
ret = dif_verify(sdt, daddr, sector, ei_lba);
if (ret) {
- dump_sector(daddr, scsi_debug_sector_size);
+ dump_sector(daddr, sdebug_sector_size);
goto out;
}
sector++;
ei_lba++;
- dpage_offset += scsi_debug_sector_size;
+ dpage_offset += sdebug_sector_size;
}
diter.consumed = dpage_offset;
sg_miter_stop(&diter);
@@ -2777,24 +2738,18 @@ out:
static unsigned long lba_to_map_index(sector_t lba)
{
- if (scsi_debug_unmap_alignment) {
- lba += scsi_debug_unmap_granularity -
- scsi_debug_unmap_alignment;
- }
- sector_div(lba, scsi_debug_unmap_granularity);
-
+ if (sdebug_unmap_alignment)
+ lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
+ sector_div(lba, sdebug_unmap_granularity);
return lba;
}
static sector_t map_index_to_lba(unsigned long index)
{
- sector_t lba = index * scsi_debug_unmap_granularity;
-
- if (scsi_debug_unmap_alignment) {
- lba -= scsi_debug_unmap_granularity -
- scsi_debug_unmap_alignment;
- }
+ sector_t lba = index * sdebug_unmap_granularity;
+ if (sdebug_unmap_alignment)
+ lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
return lba;
}
@@ -2815,7 +2770,6 @@ static unsigned int map_state(sector_t lba, unsigned int *num)
end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
*num = end - lba;
-
return mapped;
}
@@ -2841,27 +2795,27 @@ static void unmap_region(sector_t lba, unsigned int len)
unsigned long index = lba_to_map_index(lba);
if (lba == map_index_to_lba(index) &&
- lba + scsi_debug_unmap_granularity <= end &&
+ lba + sdebug_unmap_granularity <= end &&
index < map_size) {
clear_bit(index, map_storep);
- if (scsi_debug_lbprz) {
+ if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
memset(fake_storep +
- lba * scsi_debug_sector_size, 0,
- scsi_debug_sector_size *
- scsi_debug_unmap_granularity);
+ lba * sdebug_sector_size,
+ (sdebug_lbprz & 1) ? 0 : 0xff,
+ sdebug_sector_size *
+ sdebug_unmap_granularity);
}
if (dif_storep) {
memset(dif_storep + lba, 0xff,
sizeof(*dif_storep) *
- scsi_debug_unmap_granularity);
+ sdebug_unmap_granularity);
}
}
lba = map_index_to_lba(index + 1);
}
}
-static int
-resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
u8 *cmd = scp->cmnd;
u64 lba;
@@ -2910,26 +2864,26 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
check_prot = false;
break;
}
- if (check_prot) {
- if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+ if (unlikely(have_dif_prot && check_prot)) {
+ if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
(cmd[1] & 0xe0)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
- scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
+ sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
(cmd[1] & 0xe0) == 0)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
}
/* inline check_device_access_params() */
- if (lba + num > sdebug_capacity) {
+ if (unlikely(lba + num > sdebug_capacity)) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
return check_condition_result;
}
/* transfer length excessive (tie in to block limits VPD page) */
- if (num > sdebug_store_sectors) {
+ if (unlikely(num > sdebug_store_sectors)) {
/* needs work to find which cdb byte 'num' comes from */
mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
@@ -2938,7 +2892,7 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
write_lock_irqsave(&atomic_rw, iflags);
/* DIX + T10 DIF */
- if (scsi_debug_dix && scsi_prot_sg_count(scp)) {
+ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
int prot_ret = prot_verify_write(scp, lba, num, ei_lba);
if (prot_ret) {
@@ -2949,43 +2903,46 @@ resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
ret = do_device_access(scp, lba, num, true);
- if (scsi_debug_lbp())
+ if (unlikely(scsi_debug_lbp()))
map_region(lba, num);
write_unlock_irqrestore(&atomic_rw, iflags);
- if (-1 == ret)
- return (DID_ERROR << 16);
- else if ((ret < (num * scsi_debug_sector_size)) &&
- (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ if (unlikely(-1 == ret))
+ return DID_ERROR << 16;
+ else if (unlikely(sdebug_verbose &&
+ (ret < (num * sdebug_sector_size))))
sdev_printk(KERN_INFO, scp->device,
"%s: write: cdb indicated=%u, IO sent=%d bytes\n",
- my_name, num * scsi_debug_sector_size, ret);
+ my_name, num * sdebug_sector_size, ret);
- if (sdebug_any_injecting_opt) {
- struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
+ if (unlikely(sdebug_any_injecting_opt)) {
+ struct sdebug_queued_cmd *sqcp =
+ (struct sdebug_queued_cmd *)scp->host_scribble;
- if (ep->inj_recovered) {
- mk_sense_buffer(scp, RECOVERED_ERROR,
- THRESHOLD_EXCEEDED, 0);
- return check_condition_result;
- } else if (ep->inj_dif) {
- /* Logical block guard check failed */
- mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
- return illegal_condition_result;
- } else if (ep->inj_dix) {
- mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
- return illegal_condition_result;
+ if (sqcp) {
+ if (sqcp->inj_recovered) {
+ mk_sense_buffer(scp, RECOVERED_ERROR,
+ THRESHOLD_EXCEEDED, 0);
+ return check_condition_result;
+ } else if (sqcp->inj_dif) {
+ /* Logical block guard check failed */
+ mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
+ return illegal_condition_result;
+ } else if (sqcp->inj_dix) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
+ return illegal_condition_result;
+ }
}
}
return 0;
}
-static int
-resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
- bool unmap, bool ndob)
+static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
+ u32 ei_lba, bool unmap, bool ndob)
{
unsigned long iflags;
unsigned long long i;
int ret;
+ u64 lba_off;
ret = check_device_access_params(scp, lba, num);
if (ret)
@@ -2998,31 +2955,29 @@ resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, u32 ei_lba,
goto out;
}
+ lba_off = lba * sdebug_sector_size;
/* if ndob then zero 1 logical block, else fetch 1 logical block */
if (ndob) {
- memset(fake_storep + (lba * scsi_debug_sector_size), 0,
- scsi_debug_sector_size);
+ memset(fake_storep + lba_off, 0, sdebug_sector_size);
ret = 0;
} else
- ret = fetch_to_dev_buffer(scp, fake_storep +
- (lba * scsi_debug_sector_size),
- scsi_debug_sector_size);
+ ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
+ sdebug_sector_size);
if (-1 == ret) {
write_unlock_irqrestore(&atomic_rw, iflags);
- return (DID_ERROR << 16);
- } else if ((ret < (num * scsi_debug_sector_size)) &&
- (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ return DID_ERROR << 16;
+ } else if (sdebug_verbose && (ret < (num * sdebug_sector_size)))
sdev_printk(KERN_INFO, scp->device,
"%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
my_name, "write same",
- num * scsi_debug_sector_size, ret);
+ num * sdebug_sector_size, ret);
/* Copy first sector to remaining blocks */
for (i = 1 ; i < num ; i++)
- memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
- fake_storep + (lba * scsi_debug_sector_size),
- scsi_debug_sector_size);
+ memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
+ fake_storep + lba_off,
+ sdebug_sector_size);
if (scsi_debug_lbp())
map_region(lba, num);
@@ -3032,8 +2987,8 @@ out:
return 0;
}
-static int
-resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_write_same_10(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
u8 *cmd = scp->cmnd;
u32 lba;
@@ -3042,7 +2997,7 @@ resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
bool unmap = false;
if (cmd[1] & 0x8) {
- if (scsi_debug_lbpws10 == 0) {
+ if (sdebug_lbpws10 == 0) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
return check_condition_result;
} else
@@ -3050,15 +3005,15 @@ resp_write_same_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
lba = get_unaligned_be32(cmd + 2);
num = get_unaligned_be16(cmd + 7);
- if (num > scsi_debug_write_same_length) {
+ if (num > sdebug_write_same_length) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
return check_condition_result;
}
return resp_write_same(scp, lba, num, ei_lba, unmap, false);
}
-static int
-resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_write_same_16(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
u8 *cmd = scp->cmnd;
u64 lba;
@@ -3068,7 +3023,7 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
bool ndob = false;
if (cmd[1] & 0x8) { /* UNMAP */
- if (scsi_debug_lbpws == 0) {
+ if (sdebug_lbpws == 0) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
return check_condition_result;
} else
@@ -3078,7 +3033,7 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ndob = true;
lba = get_unaligned_be64(cmd + 2);
num = get_unaligned_be32(cmd + 10);
- if (num > scsi_debug_write_same_length) {
+ if (num > sdebug_write_same_length) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
return check_condition_result;
}
@@ -3088,8 +3043,8 @@ resp_write_same_16(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
/* Note the mode field is in the same position as the (lower) service action
* field. For the Report supported operation codes command, SPC-4 suggests
* each mode of this command should be reported separately; for future. */
-static int
-resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_write_buffer(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
u8 *cmd = scp->cmnd;
struct scsi_device *sdp = scp->device;
@@ -3134,15 +3089,15 @@ resp_write_buffer(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return 0;
}
-static int
-resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_comp_write(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
u8 *cmd = scp->cmnd;
u8 *arr;
u8 *fake_storep_hold;
u64 lba;
u32 dnum;
- u32 lb_size = scsi_debug_sector_size;
+ u32 lb_size = sdebug_sector_size;
u8 num;
unsigned long iflags;
int ret;
@@ -3152,13 +3107,13 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
if (0 == num)
return 0; /* degenerate case, not an error */
- if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
+ if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
(cmd[1] & 0xe0)) {
mk_sense_invalid_opcode(scp);
return check_condition_result;
}
- if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
- scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
+ if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
+ sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
(cmd[1] & 0xe0) == 0)
sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
"to DIF device\n");
@@ -3193,8 +3148,7 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
if (ret == -1) {
retval = DID_ERROR << 16;
goto cleanup;
- } else if ((ret < (dnum * lb_size)) &&
- (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ } else if (sdebug_verbose && (ret < (dnum * lb_size)))
sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
"indicated=%u, IO sent=%d bytes\n", my_name,
dnum * lb_size, ret);
@@ -3217,8 +3171,7 @@ struct unmap_block_desc {
__be32 __reserved;
};
-static int
-resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
unsigned char *buf;
struct unmap_block_desc *desc;
@@ -3233,12 +3186,12 @@ resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
BUG_ON(scsi_bufflen(scp) != payload_len);
descriptors = (payload_len - 8) / 16;
- if (descriptors > scsi_debug_unmap_max_desc) {
+ if (descriptors > sdebug_unmap_max_desc) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
return check_condition_result;
}
- buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
+ buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
if (!buf) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
INSUFF_RES_ASCQ);
@@ -3276,8 +3229,8 @@ out:
#define SDEBUG_GET_LBA_STATUS_LEN 32
-static int
-resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_get_lba_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
u8 *cmd = scp->cmnd;
u64 lba;
@@ -3316,63 +3269,94 @@ resp_get_lba_status(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
}
-#define SDEBUG_RLUN_ARR_SZ 256
-
-static int resp_report_luns(struct scsi_cmnd * scp,
- struct sdebug_dev_info * devip)
+/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
+ * (W-LUN), the normal Linux scanning logic does not associate it with a
+ * device (e.g. /dev/sg7). The following magic will make that association:
+ * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
+ * where <n> is a host number. If there are multiple targets in a host then
+ * the above will associate a W-LUN to each target. To only get a W-LUN
+ * for target 2, then use "echo '- 2 49409' > scan" .
+ */
+static int resp_report_luns(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
+ unsigned char *cmd = scp->cmnd;
unsigned int alloc_len;
- int lun_cnt, i, upper, num, n, want_wlun, shortish;
+ unsigned char select_report;
u64 lun;
- unsigned char *cmd = scp->cmnd;
- int select_report = (int)cmd[2];
- struct scsi_lun *one_lun;
- unsigned char arr[SDEBUG_RLUN_ARR_SZ];
- unsigned char * max_addr;
+ struct scsi_lun *lun_p;
+ u8 *arr;
+ unsigned int lun_cnt; /* normal LUN count (max: 256) */
+ unsigned int wlun_cnt; /* report luns W-LUN count */
+ unsigned int tlun_cnt; /* total LUN count */
+ unsigned int rlen; /* response length (in bytes) */
+ int i, res;
clear_luns_changed_on_target(devip);
- alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
- shortish = (alloc_len < 4);
- if (shortish || (select_report > 2)) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, shortish ? 6 : 2, -1);
+
+ select_report = cmd[2];
+ alloc_len = get_unaligned_be32(cmd + 6);
+
+ if (alloc_len < 4) {
+ pr_err("alloc len too small %d\n", alloc_len);
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
return check_condition_result;
}
- /* can produce response with up to 16k luns (lun 0 to lun 16383) */
- memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
- lun_cnt = scsi_debug_max_luns;
- if (1 == select_report)
+
+ switch (select_report) {
+ case 0: /* all LUNs apart from W-LUNs */
+ lun_cnt = sdebug_max_luns;
+ wlun_cnt = 0;
+ break;
+ case 1: /* only W-LUNs */
lun_cnt = 0;
- else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
+ wlun_cnt = 1;
+ break;
+ case 2: /* all LUNs */
+ lun_cnt = sdebug_max_luns;
+ wlun_cnt = 1;
+ break;
+ case 0x10: /* only administrative LUs */
+ case 0x11: /* see SPC-5 */
+ case 0x12: /* only subsiduary LUs owned by referenced LU */
+ default:
+ pr_debug("select report invalid %d\n", select_report);
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
+ return check_condition_result;
+ }
+
+ if (sdebug_no_lun_0 && (lun_cnt > 0))
--lun_cnt;
- want_wlun = (select_report > 0) ? 1 : 0;
- num = lun_cnt + want_wlun;
- arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
- arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
- n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
- sizeof(struct scsi_lun)), num);
- if (n < num) {
- want_wlun = 0;
- lun_cnt = n;
- }
- one_lun = (struct scsi_lun *) &arr[8];
- max_addr = arr + SDEBUG_RLUN_ARR_SZ;
- for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
- ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
- i++, lun++) {
- upper = (lun >> 8) & 0x3f;
- if (upper)
- one_lun[i].scsi_lun[0] =
- (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
- one_lun[i].scsi_lun[1] = lun & 0xff;
- }
- if (want_wlun) {
- one_lun[i].scsi_lun[0] = (SCSI_W_LUN_REPORT_LUNS >> 8) & 0xff;
- one_lun[i].scsi_lun[1] = SCSI_W_LUN_REPORT_LUNS & 0xff;
- i++;
- }
- alloc_len = (unsigned char *)(one_lun + i) - arr;
- return fill_from_dev_buffer(scp, arr,
- min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
+
+ tlun_cnt = lun_cnt + wlun_cnt;
+
+ rlen = (tlun_cnt * sizeof(struct scsi_lun)) + 8;
+ arr = vmalloc(rlen);
+ if (!arr) {
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+ INSUFF_RES_ASCQ);
+ return check_condition_result;
+ }
+ memset(arr, 0, rlen);
+ pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
+ select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
+
+ /* luns start at byte 8 in response following the header */
+ lun_p = (struct scsi_lun *)&arr[8];
+
+ /* LUNs use single level peripheral device addressing method */
+ lun = sdebug_no_lun_0 ? 1 : 0;
+ for (i = 0; i < lun_cnt; i++)
+ int_to_scsilun(lun++, lun_p++);
+
+ if (wlun_cnt)
+ int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p++);
+
+ put_unaligned_be32(rlen - 8, &arr[0]);
+
+ res = fill_from_dev_buffer(scp, arr, rlen);
+ vfree(arr);
+ return res;
}
static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
@@ -3385,7 +3369,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
struct sg_mapping_iter miter;
/* better not to use temporary buffer. */
- buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
+ buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
if (!buf) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
INSUFF_RES_ASCQ);
@@ -3411,8 +3395,8 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
return 0;
}
-static int
-resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
+static int resp_xdwriteread_10(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
{
u8 *cmd = scp->cmnd;
u64 lba;
@@ -3437,41 +3421,66 @@ resp_xdwriteread_10(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
return resp_xdwriteread(scp, lba, num, devip);
}
-/* When timer or tasklet goes off this function is called. */
-static void sdebug_q_cmd_complete(unsigned long indx)
+static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
{
- int qa_indx;
+ struct sdebug_queue *sqp = sdebug_q_arr;
+
+ if (sdebug_mq_active) {
+ u32 tag = blk_mq_unique_tag(cmnd->request);
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag);
+
+ if (unlikely(hwq >= submit_queues)) {
+ pr_warn("Unexpected hwq=%d, apply modulo\n", hwq);
+ hwq %= submit_queues;
+ }
+ pr_debug("tag=%u, hwq=%d\n", tag, hwq);
+ return sqp + hwq;
+ } else
+ return sqp;
+}
+
+/* Queued (deferred) command completions converge here. */
+static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
+{
+ int qc_idx;
int retiring = 0;
unsigned long iflags;
+ struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct scsi_cmnd *scp;
struct sdebug_dev_info *devip;
- atomic_inc(&sdebug_completions);
- qa_indx = indx;
- if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
- pr_err("wild qa_indx=%d\n", qa_indx);
+ qc_idx = sd_dp->qc_idx;
+ sqp = sdebug_q_arr + sd_dp->sqa_idx;
+ if (sdebug_statistics) {
+ atomic_inc(&sdebug_completions);
+ if (raw_smp_processor_id() != sd_dp->issuing_cpu)
+ atomic_inc(&sdebug_miss_cpus);
+ }
+ if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
+ pr_err("wild qc_idx=%d\n", qc_idx);
return;
}
- spin_lock_irqsave(&queued_arr_lock, iflags);
- sqcp = &queued_arr[qa_indx];
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ sqcp = &sqp->qc_arr[qc_idx];
scp = sqcp->a_cmnd;
- if (NULL == scp) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("scp is NULL\n");
+ if (unlikely(scp == NULL)) {
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d\n",
+ sd_dp->sqa_idx, qc_idx);
return;
}
devip = (struct sdebug_dev_info *)scp->device->hostdata;
- if (devip)
+ if (likely(devip))
atomic_dec(&devip->num_in_q);
else
pr_err("devip=NULL\n");
- if (atomic_read(&retired_max_queue) > 0)
+ if (unlikely(atomic_read(&retired_max_queue) > 0))
retiring = 1;
sqcp->a_cmnd = NULL;
- if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
pr_err("Unexpected completion\n");
return;
}
@@ -3480,105 +3489,71 @@ static void sdebug_q_cmd_complete(unsigned long indx)
int k, retval;
retval = atomic_read(&retired_max_queue);
- if (qa_indx >= retval) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ if (qc_idx >= retval) {
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
pr_err("index %d too large\n", retval);
return;
}
- k = find_last_bit(queued_in_use_bm, retval);
- if ((k < scsi_debug_max_queue) || (k == retval))
+ k = find_last_bit(sqp->in_use_bm, retval);
+ if ((k < sdebug_max_queue) || (k == retval))
atomic_set(&retired_max_queue, 0);
else
atomic_set(&retired_max_queue, k + 1);
}
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
scp->scsi_done(scp); /* callback to mid level */
}
/* When high resolution timer goes off this function is called. */
-static enum hrtimer_restart
-sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
+static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
{
- int qa_indx;
- int retiring = 0;
- unsigned long iflags;
- struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
- struct sdebug_queued_cmd *sqcp;
- struct scsi_cmnd *scp;
- struct sdebug_dev_info *devip;
-
- atomic_inc(&sdebug_completions);
- qa_indx = sd_hrtp->qa_indx;
- if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
- pr_err("wild qa_indx=%d\n", qa_indx);
- goto the_end;
- }
- spin_lock_irqsave(&queued_arr_lock, iflags);
- sqcp = &queued_arr[qa_indx];
- scp = sqcp->a_cmnd;
- if (NULL == scp) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("scp is NULL\n");
- goto the_end;
- }
- devip = (struct sdebug_dev_info *)scp->device->hostdata;
- if (devip)
- atomic_dec(&devip->num_in_q);
- else
- pr_err("devip=NULL\n");
- if (atomic_read(&retired_max_queue) > 0)
- retiring = 1;
-
- sqcp->a_cmnd = NULL;
- if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("Unexpected completion\n");
- goto the_end;
- }
-
- if (unlikely(retiring)) { /* user has reduced max_queue */
- int k, retval;
-
- retval = atomic_read(&retired_max_queue);
- if (qa_indx >= retval) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- pr_err("index %d too large\n", retval);
- goto the_end;
- }
- k = find_last_bit(queued_in_use_bm, retval);
- if ((k < scsi_debug_max_queue) || (k == retval))
- atomic_set(&retired_max_queue, 0);
- else
- atomic_set(&retired_max_queue, k + 1);
- }
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- scp->scsi_done(scp); /* callback to mid level */
-the_end:
+ struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
+ hrt);
+ sdebug_q_cmd_complete(sd_dp);
return HRTIMER_NORESTART;
}
-static struct sdebug_dev_info *
-sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
+/* When work queue schedules work, it calls this function. */
+static void sdebug_q_cmd_wq_complete(struct work_struct *work)
+{
+ struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
+ ew.work);
+ sdebug_q_cmd_complete(sd_dp);
+}
+
+static bool got_shared_uuid;
+static uuid_be shared_uuid;
+
+static struct sdebug_dev_info *sdebug_device_create(
+ struct sdebug_host_info *sdbg_host, gfp_t flags)
{
struct sdebug_dev_info *devip;
devip = kzalloc(sizeof(*devip), flags);
if (devip) {
+ if (sdebug_uuid_ctl == 1)
+ uuid_be_gen(&devip->lu_name);
+ else if (sdebug_uuid_ctl == 2) {
+ if (got_shared_uuid)
+ devip->lu_name = shared_uuid;
+ else {
+ uuid_be_gen(&shared_uuid);
+ got_shared_uuid = true;
+ devip->lu_name = shared_uuid;
+ }
+ }
devip->sdbg_host = sdbg_host;
list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
}
return devip;
}
-static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
+static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
{
- struct sdebug_host_info * sdbg_host;
- struct sdebug_dev_info * open_devip = NULL;
- struct sdebug_dev_info * devip =
- (struct sdebug_dev_info *)sdev->hostdata;
+ struct sdebug_host_info *sdbg_host;
+ struct sdebug_dev_info *open_devip = NULL;
+ struct sdebug_dev_info *devip;
- if (devip)
- return devip;
sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
if (!sdbg_host) {
pr_err("Host info NULL\n");
@@ -3614,7 +3589,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
static int scsi_debug_slave_alloc(struct scsi_device *sdp)
{
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ if (sdebug_verbose)
pr_info("slave_alloc <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
@@ -3623,19 +3598,22 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
static int scsi_debug_slave_configure(struct scsi_device *sdp)
{
- struct sdebug_dev_info *devip;
+ struct sdebug_dev_info *devip =
+ (struct sdebug_dev_info *)sdp->hostdata;
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ if (sdebug_verbose)
pr_info("slave_configure <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
- if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
- sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
- devip = devInfoReg(sdp);
- if (NULL == devip)
- return 1; /* no resources, will be marked offline */
+ if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
+ sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
+ if (devip == NULL) {
+ devip = find_build_dev_info(sdp);
+ if (devip == NULL)
+ return 1; /* no resources, will be marked offline */
+ }
sdp->hostdata = devip;
blk_queue_max_segment_size(sdp->request_queue, -1U);
- if (scsi_debug_no_uld)
+ if (sdebug_no_uld)
sdp->no_uld_attach = 1;
return 0;
}
@@ -3645,7 +3623,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
struct sdebug_dev_info *devip =
(struct sdebug_dev_info *)sdp->hostdata;
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ if (sdebug_verbose)
pr_info("slave_destroy <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (devip) {
@@ -3655,135 +3633,130 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-/* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
-static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
+static void stop_qc_helper(struct sdebug_defer *sd_dp)
+{
+ if (!sd_dp)
+ return;
+ if ((sdebug_jdelay > 0) || (sdebug_ndelay > 0))
+ hrtimer_cancel(&sd_dp->hrt);
+ else if (sdebug_jdelay < 0)
+ cancel_work_sync(&sd_dp->ew.work);
+}
+
+/* If @cmnd found deletes its timer or work queue and returns true; else
+ returns false */
+static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
{
unsigned long iflags;
- int k, qmax, r_qmax;
+ int j, k, qmax, r_qmax;
+ struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
-
- spin_lock_irqsave(&queued_arr_lock, iflags);
- qmax = scsi_debug_max_queue;
- r_qmax = atomic_read(&retired_max_queue);
- if (r_qmax > qmax)
- qmax = r_qmax;
- for (k = 0; k < qmax; ++k) {
- if (test_bit(k, queued_in_use_bm)) {
- sqcp = &queued_arr[k];
- if (cmnd == sqcp->a_cmnd) {
+ struct sdebug_defer *sd_dp;
+
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ qmax = sdebug_max_queue;
+ r_qmax = atomic_read(&retired_max_queue);
+ if (r_qmax > qmax)
+ qmax = r_qmax;
+ for (k = 0; k < qmax; ++k) {
+ if (test_bit(k, sqp->in_use_bm)) {
+ sqcp = &sqp->qc_arr[k];
+ if (cmnd != sqcp->a_cmnd)
+ continue;
+ /* found */
devip = (struct sdebug_dev_info *)
- cmnd->device->hostdata;
+ cmnd->device->hostdata;
if (devip)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
- spin_unlock_irqrestore(&queued_arr_lock,
- iflags);
- if (scsi_debug_ndelay > 0) {
- if (sqcp->sd_hrtp)
- hrtimer_cancel(
- &sqcp->sd_hrtp->hrt);
- } else if (scsi_debug_delay > 0) {
- if (sqcp->cmnd_timerp)
- del_timer_sync(
- sqcp->cmnd_timerp);
- } else if (scsi_debug_delay < 0) {
- if (sqcp->tletp)
- tasklet_kill(sqcp->tletp);
- }
- clear_bit(k, queued_in_use_bm);
- return 1;
+ sd_dp = sqcp->sd_dp;
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ stop_qc_helper(sd_dp);
+ clear_bit(k, sqp->in_use_bm);
+ return true;
}
}
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
}
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- return 0;
+ return false;
}
-/* Deletes (stops) timers or tasklets of all queued commands */
+/* Deletes (stops) timers or work queues of all queued commands */
static void stop_all_queued(void)
{
unsigned long iflags;
- int k;
+ int j, k;
+ struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct sdebug_dev_info *devip;
-
- spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
- if (test_bit(k, queued_in_use_bm)) {
- sqcp = &queued_arr[k];
- if (sqcp->a_cmnd) {
+ struct sdebug_defer *sd_dp;
+
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
+ if (test_bit(k, sqp->in_use_bm)) {
+ sqcp = &sqp->qc_arr[k];
+ if (sqcp->a_cmnd == NULL)
+ continue;
devip = (struct sdebug_dev_info *)
sqcp->a_cmnd->device->hostdata;
if (devip)
atomic_dec(&devip->num_in_q);
sqcp->a_cmnd = NULL;
- spin_unlock_irqrestore(&queued_arr_lock,
- iflags);
- if (scsi_debug_ndelay > 0) {
- if (sqcp->sd_hrtp)
- hrtimer_cancel(
- &sqcp->sd_hrtp->hrt);
- } else if (scsi_debug_delay > 0) {
- if (sqcp->cmnd_timerp)
- del_timer_sync(
- sqcp->cmnd_timerp);
- } else if (scsi_debug_delay < 0) {
- if (sqcp->tletp)
- tasklet_kill(sqcp->tletp);
- }
- clear_bit(k, queued_in_use_bm);
- spin_lock_irqsave(&queued_arr_lock, iflags);
+ sd_dp = sqcp->sd_dp;
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ stop_qc_helper(sd_dp);
+ clear_bit(k, sqp->in_use_bm);
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
}
}
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
}
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
}
/* Free queued command memory on heap */
static void free_all_queued(void)
{
- unsigned long iflags;
- int k;
+ int j, k;
+ struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
- spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
- sqcp = &queued_arr[k];
- kfree(sqcp->cmnd_timerp);
- sqcp->cmnd_timerp = NULL;
- kfree(sqcp->tletp);
- sqcp->tletp = NULL;
- kfree(sqcp->sd_hrtp);
- sqcp->sd_hrtp = NULL;
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+ for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
+ sqcp = &sqp->qc_arr[k];
+ kfree(sqcp->sd_dp);
+ sqcp->sd_dp = NULL;
+ }
}
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
}
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
+ bool ok;
+
++num_aborts;
if (SCpnt) {
- if (SCpnt->device &&
- (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
- sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
- __func__);
- stop_queued_cmnd(SCpnt);
+ ok = stop_queued_cmnd(SCpnt);
+ if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s: command%s found\n", __func__,
+ ok ? "" : " not");
}
return SUCCESS;
}
static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
{
- struct sdebug_dev_info * devip;
-
++num_dev_resets;
if (SCpnt && SCpnt->device) {
struct scsi_device *sdp = SCpnt->device;
+ struct sdebug_dev_info *devip =
+ (struct sdebug_dev_info *)sdp->hostdata;
- if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
- devip = devInfoReg(sdp);
if (devip)
set_bit(SDEBUG_UA_POR, devip->uas_bm);
}
@@ -3804,7 +3777,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
sdp = SCpnt->device;
if (!sdp)
goto lie;
- if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
hp = sdp->host;
if (!hp)
@@ -3819,7 +3792,7 @@ static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
++k;
}
}
- if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp,
"%s: %d device(s) found in target\n", __func__, k);
lie:
@@ -3838,7 +3811,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
if (!(SCpnt && SCpnt->device))
goto lie;
sdp = SCpnt->device;
- if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
hp = sdp->host;
if (hp) {
@@ -3852,7 +3825,7 @@ static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
}
}
}
- if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp,
"%s: %d device(s) found in host\n", __func__, k);
lie:
@@ -3866,7 +3839,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
int k = 0;
++num_host_resets;
- if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
+ if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
spin_lock(&sdebug_host_list_lock);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
@@ -3878,7 +3851,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
}
spin_unlock(&sdebug_host_list_lock);
stop_all_queued();
- if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: %d device(s) found\n", __func__, k);
return SUCCESS;
@@ -3893,22 +3866,22 @@ static void __init sdebug_build_parts(unsigned char *ramp,
int heads_by_sects, start_sec, end_sec;
/* assume partition table already zeroed */
- if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
+ if ((sdebug_num_parts < 1) || (store_size < 1048576))
return;
- if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
- scsi_debug_num_parts = SDEBUG_MAX_PARTS;
+ if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
+ sdebug_num_parts = SDEBUG_MAX_PARTS;
pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
}
num_sectors = (int)sdebug_store_sectors;
sectors_per_part = (num_sectors - sdebug_sectors_per)
- / scsi_debug_num_parts;
+ / sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
starts[0] = sdebug_sectors_per;
- for (k = 1; k < scsi_debug_num_parts; ++k)
+ for (k = 1; k < sdebug_num_parts; ++k)
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
- starts[scsi_debug_num_parts] = num_sectors;
- starts[scsi_debug_num_parts + 1] = 0;
+ starts[sdebug_num_parts] = num_sectors;
+ starts[sdebug_num_parts + 1] = 0;
ramp[510] = 0x55; /* magic partition markings */
ramp[511] = 0xAA;
@@ -3934,67 +3907,118 @@ static void __init sdebug_build_parts(unsigned char *ramp,
}
}
-static int
-schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
- int scsi_result, int delta_jiff)
+static void block_unblock_all_queues(bool block)
+{
+ int j;
+ struct sdebug_queue *sqp;
+
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
+ atomic_set(&sqp->blocked, (int)block);
+}
+
+/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
+ * commands will be processed normally before triggers occur.
+ */
+static void tweak_cmnd_count(void)
+{
+ int count, modulo;
+
+ modulo = abs(sdebug_every_nth);
+ if (modulo < 2)
+ return;
+ block_unblock_all_queues(true);
+ count = atomic_read(&sdebug_cmnd_count);
+ atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
+ block_unblock_all_queues(false);
+}
+
+static void clear_queue_stats(void)
+{
+ atomic_set(&sdebug_cmnd_count, 0);
+ atomic_set(&sdebug_completions, 0);
+ atomic_set(&sdebug_miss_cpus, 0);
+ atomic_set(&sdebug_a_tsf, 0);
+}
+
+static void setup_inject(struct sdebug_queue *sqp,
+ struct sdebug_queued_cmd *sqcp)
+{
+ if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0)
+ return;
+ sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
+ sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
+ sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
+ sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
+ sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
+}
+
+/* Complete the processing of the thread that queued a SCSI command to this
+ * driver. It either completes the command by calling cmnd_done() or
+ * schedules a hr timer or work queue then returns 0. Returns
+ * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
+ */
+static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
+ int scsi_result, int delta_jiff)
{
unsigned long iflags;
int k, num_in_q, qdepth, inject;
- struct sdebug_queued_cmd *sqcp = NULL;
+ struct sdebug_queue *sqp;
+ struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
+ struct sdebug_defer *sd_dp;
- /* this should never happen */
- if (WARN_ON(!cmnd))
- return SCSI_MLQUEUE_HOST_BUSY;
-
- if (NULL == devip) {
- pr_warn("called devip == NULL\n");
- /* no particularly good error to report back */
- return SCSI_MLQUEUE_HOST_BUSY;
+ if (unlikely(devip == NULL)) {
+ if (scsi_result == 0)
+ scsi_result = DID_NO_CONNECT << 16;
+ goto respond_in_thread;
}
-
sdp = cmnd->device;
- if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ if (unlikely(sdebug_verbose && scsi_result))
sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
__func__, scsi_result);
if (delta_jiff == 0)
goto respond_in_thread;
/* schedule the response at a later time if resources permit */
- spin_lock_irqsave(&queued_arr_lock, iflags);
+ sqp = get_queue(cmnd);
+ spin_lock_irqsave(&sqp->qc_lock, iflags);
+ if (unlikely(atomic_read(&sqp->blocked))) {
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
num_in_q = atomic_read(&devip->num_in_q);
qdepth = cmnd->device->queue_depth;
inject = 0;
- if ((qdepth > 0) && (num_in_q >= qdepth)) {
+ if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
if (scsi_result) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
goto respond_in_thread;
} else
scsi_result = device_qfull_result;
- } else if ((scsi_debug_every_nth != 0) &&
- (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
- (scsi_result == 0)) {
+ } else if (unlikely(sdebug_every_nth &&
+ (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
+ (scsi_result == 0))) {
if ((num_in_q == (qdepth - 1)) &&
(atomic_inc_return(&sdebug_a_tsf) >=
- abs(scsi_debug_every_nth))) {
+ abs(sdebug_every_nth))) {
atomic_set(&sdebug_a_tsf, 0);
inject = 1;
scsi_result = device_qfull_result;
}
}
- k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
- if (k >= scsi_debug_max_queue) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (unlikely(k >= sdebug_max_queue)) {
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (scsi_result)
goto respond_in_thread;
- else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
+ else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
scsi_result = device_qfull_result;
- if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
+ if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdp,
"%s: max_queue=%d exceeded, %s\n",
- __func__, scsi_debug_max_queue,
+ __func__, sdebug_max_queue,
(scsi_result ? "status: TASK SET FULL" :
"report: host busy"));
if (scsi_result)
@@ -4002,55 +4026,56 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
else
return SCSI_MLQUEUE_HOST_BUSY;
}
- __set_bit(k, queued_in_use_bm);
+ __set_bit(k, sqp->in_use_bm);
atomic_inc(&devip->num_in_q);
- sqcp = &queued_arr[k];
+ sqcp = &sqp->qc_arr[k];
sqcp->a_cmnd = cmnd;
+ cmnd->host_scribble = (unsigned char *)sqcp;
cmnd->result = scsi_result;
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- if (delta_jiff > 0) {
- if (NULL == sqcp->cmnd_timerp) {
- sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
- GFP_ATOMIC);
- if (NULL == sqcp->cmnd_timerp)
- return SCSI_MLQUEUE_HOST_BUSY;
- init_timer(sqcp->cmnd_timerp);
- }
- sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
- sqcp->cmnd_timerp->data = k;
- sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
- add_timer(sqcp->cmnd_timerp);
- } else if (scsi_debug_ndelay > 0) {
- ktime_t kt = ktime_set(0, scsi_debug_ndelay);
- struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
-
- if (NULL == sd_hp) {
- sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
- if (NULL == sd_hp)
+ sd_dp = sqcp->sd_dp;
+ spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
+ setup_inject(sqp, sqcp);
+ if (delta_jiff > 0 || sdebug_ndelay > 0) {
+ ktime_t kt;
+
+ if (delta_jiff > 0) {
+ struct timespec ts;
+
+ jiffies_to_timespec(delta_jiff, &ts);
+ kt = ktime_set(ts.tv_sec, ts.tv_nsec);
+ } else
+ kt = ktime_set(0, sdebug_ndelay);
+ if (NULL == sd_dp) {
+ sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
+ if (NULL == sd_dp)
return SCSI_MLQUEUE_HOST_BUSY;
- sqcp->sd_hrtp = sd_hp;
- hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
- sd_hp->qa_indx = k;
+ sqcp->sd_dp = sd_dp;
+ hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
}
- hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
- } else { /* delay < 0 */
- if (NULL == sqcp->tletp) {
- sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
- GFP_ATOMIC);
- if (NULL == sqcp->tletp)
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
+ hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
+ } else { /* jdelay < 0, use work queue */
+ if (NULL == sd_dp) {
+ sd_dp = kzalloc(sizeof(*sqcp->sd_dp), GFP_ATOMIC);
+ if (NULL == sd_dp)
return SCSI_MLQUEUE_HOST_BUSY;
- tasklet_init(sqcp->tletp,
- sdebug_q_cmd_complete, k);
+ sqcp->sd_dp = sd_dp;
+ sd_dp->sqa_idx = sqp - sdebug_q_arr;
+ sd_dp->qc_idx = k;
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
}
- if (-1 == delta_jiff)
- tasklet_hi_schedule(sqcp->tletp);
- else
- tasklet_schedule(sqcp->tletp);
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
+ schedule_work(&sd_dp->ew.work);
}
- if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
- (scsi_result == device_qfull_result))
+ if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) &&
+ (scsi_result == device_qfull_result)))
sdev_printk(KERN_INFO, sdp,
"%s: num_in_q=%d +1, %s%s\n", __func__,
num_in_q, (inject ? "<inject> " : ""),
@@ -4069,52 +4094,55 @@ respond_in_thread: /* call back to mid-layer using invocation thread */
as it can when the corresponding attribute in the
/sys/bus/pseudo/drivers/scsi_debug directory is changed.
*/
-module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
-module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
-module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);
-module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
-module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
-module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
-module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
-module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
-module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
-module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
-module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
-module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
-module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
-module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
-module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
-module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
-module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
-module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
-module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
-module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
-module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
-module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
-module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
-module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
-module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
-module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
-module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
-module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
-module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
-module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
-module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
-module_param_named(strict, scsi_debug_strict, bool, S_IRUGO | S_IWUSR);
-module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
-module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
-module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
-module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
-module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
-module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
+module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
+module_param_named(ato, sdebug_ato, int, S_IRUGO);
+module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
+module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
+module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
+module_param_named(dif, sdebug_dif, int, S_IRUGO);
+module_param_named(dix, sdebug_dix, int, S_IRUGO);
+module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
+module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
+module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
+module_param_named(guard, sdebug_guard, uint, S_IRUGO);
+module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
+module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
+module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
+module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
+module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
+module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
+module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
+module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
+module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
+module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
+module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
+module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
+module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
+module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
+module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
+module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
+module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
+module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
+module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
+module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
+module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
+module_param_named(submit_queues, submit_queues, int, S_IRUGO);
+module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
+module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
+module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
+module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
+module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
+module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
+module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
S_IRUGO | S_IWUSR);
-module_param_named(write_same_length, scsi_debug_write_same_length, int,
+module_param_named(write_same_length, sdebug_write_same_length, int,
S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(SCSI_DEBUG_VERSION);
+MODULE_VERSION(SDEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
@@ -4127,11 +4155,12 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
-MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
+MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
-MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
+MODULE_PARM_DESC(lbprz,
+ "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
@@ -4145,30 +4174,42 @@ MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err...
MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
-MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
+MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
+MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
+MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
+MODULE_PARM_DESC(uuid_ctl,
+ "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
-static char sdebug_info[256];
+#define SDEBUG_INFO_LEN 256
+static char sdebug_info[SDEBUG_INFO_LEN];
static const char * scsi_debug_info(struct Scsi_Host * shp)
{
- sprintf(sdebug_info, "scsi_debug, version %s [%s], "
- "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
- scsi_debug_version_date, scsi_debug_dev_size_mb,
- scsi_debug_opts);
+ int k;
+
+ k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
+ my_name, SDEBUG_VERSION, sdebug_version_date);
+ if (k >= (SDEBUG_INFO_LEN - 1))
+ return sdebug_info;
+ scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
+ " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
+ sdebug_dev_size_mb, sdebug_opts, submit_queues,
+ "statistics", (int)sdebug_statistics);
return sdebug_info;
}
/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
-static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
+static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
+ int length)
{
char arr[16];
int opts;
@@ -4180,9 +4221,11 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt
arr[minLen] = '\0';
if (1 != sscanf(arr, "%d", &opts))
return -EINVAL;
- scsi_debug_opts = opts;
- if (scsi_debug_every_nth != 0)
- atomic_set(&sdebug_cmnd_count, 0);
+ sdebug_opts = opts;
+ sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
+ sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
+ if (sdebug_every_nth != 0)
+ tweak_cmnd_count();
return length;
}
@@ -4191,69 +4234,83 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt
* output are not atomics so might be inaccurate in a busy system. */
static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- int f, l;
- char b[32];
-
- if (scsi_debug_every_nth > 0)
- snprintf(b, sizeof(b), " (curr:%d)",
- ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
- atomic_read(&sdebug_a_tsf) :
- atomic_read(&sdebug_cmnd_count)));
- else
- b[0] = '\0';
-
- seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
- "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
- "every_nth=%d%s\n"
- "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
- "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
- "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
- "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
- "usec_in_jiffy=%lu\n",
- SCSI_DEBUG_VERSION, scsi_debug_version_date,
- scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
- scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
- scsi_debug_max_luns, atomic_read(&sdebug_completions),
- scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
- sdebug_sectors_per, num_aborts, num_dev_resets,
- num_target_resets, num_bus_resets, num_host_resets,
- dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
-
- f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
- if (f != scsi_debug_max_queue) {
- l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
- seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
- "queued_in_use_bm", f, l);
+ int f, j, l;
+ struct sdebug_queue *sqp;
+
+ seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
+ SDEBUG_VERSION, sdebug_version_date);
+ seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
+ sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
+ sdebug_opts, sdebug_every_nth);
+ seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
+ sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
+ sdebug_sector_size, "bytes");
+ seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
+ sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
+ num_aborts);
+ seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
+ num_dev_resets, num_target_resets, num_bus_resets,
+ num_host_resets);
+ seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
+ dix_reads, dix_writes, dif_errors);
+ seq_printf(m, "usec_in_jiffy=%lu, %s=%d, mq_active=%d\n",
+ TICK_NSEC / 1000, "statistics", sdebug_statistics,
+ sdebug_mq_active);
+ seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d\n",
+ atomic_read(&sdebug_cmnd_count),
+ atomic_read(&sdebug_completions),
+ "miss_cpus", atomic_read(&sdebug_miss_cpus),
+ atomic_read(&sdebug_a_tsf));
+
+ seq_printf(m, "submit_queues=%d\n", submit_queues);
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+ seq_printf(m, " queue %d:\n", j);
+ f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+ if (f != sdebug_max_queue) {
+ l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
+ seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
+ "first,last bits", f, l);
+ }
}
return 0;
}
static ssize_t delay_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
}
-/* Returns -EBUSY if delay is being changed and commands are queued */
+/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
+ * of delay is jiffies.
+ */
static ssize_t delay_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int delay, res;
+ int jdelay, res;
- if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
+ if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
res = count;
- if (scsi_debug_delay != delay) {
- unsigned long iflags;
- int k;
-
- spin_lock_irqsave(&queued_arr_lock, iflags);
- k = find_first_bit(queued_in_use_bm,
- scsi_debug_max_queue);
- if (k != scsi_debug_max_queue)
- res = -EBUSY; /* have queued commands */
- else {
- scsi_debug_delay = delay;
- scsi_debug_ndelay = 0;
+ if (sdebug_jdelay != jdelay) {
+ int j, k;
+ struct sdebug_queue *sqp;
+
+ block_unblock_all_queues(true);
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
+ ++j, ++sqp) {
+ k = find_first_bit(sqp->in_use_bm,
+ sdebug_max_queue);
+ if (k != sdebug_max_queue) {
+ res = -EBUSY; /* queued commands */
+ break;
+ }
+ }
+ if (res > 0) {
+ /* make sure sdebug_defer instances get
+ * re-allocated for new delay variant */
+ free_all_queued();
+ sdebug_jdelay = jdelay;
+ sdebug_ndelay = 0;
}
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ block_unblock_all_queues(false);
}
return res;
}
@@ -4263,31 +4320,41 @@ static DRIVER_ATTR_RW(delay);
static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
}
/* Returns -EBUSY if ndelay is being changed and commands are queued */
-/* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
+/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
- size_t count)
+ size_t count)
{
- unsigned long iflags;
- int ndelay, res, k;
+ int ndelay, res;
if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
- (ndelay >= 0) && (ndelay < 1000000000)) {
+ (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
res = count;
- if (scsi_debug_ndelay != ndelay) {
- spin_lock_irqsave(&queued_arr_lock, iflags);
- k = find_first_bit(queued_in_use_bm,
- scsi_debug_max_queue);
- if (k != scsi_debug_max_queue)
- res = -EBUSY; /* have queued commands */
- else {
- scsi_debug_ndelay = ndelay;
- scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
- : DEF_DELAY;
+ if (sdebug_ndelay != ndelay) {
+ int j, k;
+ struct sdebug_queue *sqp;
+
+ block_unblock_all_queues(true);
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
+ ++j, ++sqp) {
+ k = find_first_bit(sqp->in_use_bm,
+ sdebug_max_queue);
+ if (k != sdebug_max_queue) {
+ res = -EBUSY; /* queued commands */
+ break;
+ }
+ }
+ if (res > 0) {
+ /* make sure sdebug_defer instances get
+ * re-allocated for new delay variant */
+ free_all_queued();
+ sdebug_ndelay = ndelay;
+ sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
+ : DEF_JDELAY;
}
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ block_unblock_all_queues(false);
}
return res;
}
@@ -4297,7 +4364,7 @@ static DRIVER_ATTR_RW(ndelay);
static ssize_t opts_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
}
static ssize_t opts_store(struct device_driver *ddp, const char *buf,
@@ -4317,26 +4384,17 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf,
}
return -EINVAL;
opts_done:
- scsi_debug_opts = opts;
- if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
- sdebug_any_injecting_opt = true;
- else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
- sdebug_any_injecting_opt = true;
- else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
- sdebug_any_injecting_opt = true;
- else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
- sdebug_any_injecting_opt = true;
- else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
- sdebug_any_injecting_opt = true;
- atomic_set(&sdebug_cmnd_count, 0);
- atomic_set(&sdebug_a_tsf, 0);
+ sdebug_opts = opts;
+ sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
+ sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
+ tweak_cmnd_count();
return count;
}
static DRIVER_ATTR_RW(opts);
static ssize_t ptype_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
}
static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4344,7 +4402,7 @@ static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- scsi_debug_ptype = n;
+ sdebug_ptype = n;
return count;
}
return -EINVAL;
@@ -4353,7 +4411,7 @@ static DRIVER_ATTR_RW(ptype);
static ssize_t dsense_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
}
static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4361,7 +4419,7 @@ static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- scsi_debug_dsense = n;
+ sdebug_dsense = n;
return count;
}
return -EINVAL;
@@ -4370,7 +4428,7 @@ static DRIVER_ATTR_RW(dsense);
static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
}
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4379,11 +4437,11 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
n = (n > 0);
- scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
- if (scsi_debug_fake_rw != n) {
+ sdebug_fake_rw = (sdebug_fake_rw > 0);
+ if (sdebug_fake_rw != n) {
if ((0 == n) && (NULL == fake_storep)) {
unsigned long sz =
- (unsigned long)scsi_debug_dev_size_mb *
+ (unsigned long)sdebug_dev_size_mb *
1048576;
fake_storep = vmalloc(sz);
@@ -4393,7 +4451,7 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
}
memset(fake_storep, 0, sz);
}
- scsi_debug_fake_rw = n;
+ sdebug_fake_rw = n;
}
return count;
}
@@ -4403,7 +4461,7 @@ static DRIVER_ATTR_RW(fake_rw);
static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
}
static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4411,7 +4469,7 @@ static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- scsi_debug_no_lun_0 = n;
+ sdebug_no_lun_0 = n;
return count;
}
return -EINVAL;
@@ -4420,7 +4478,7 @@ static DRIVER_ATTR_RW(no_lun_0);
static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
}
static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4428,7 +4486,7 @@ static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- scsi_debug_num_tgts = n;
+ sdebug_num_tgts = n;
sdebug_max_tgts_luns();
return count;
}
@@ -4438,19 +4496,19 @@ static DRIVER_ATTR_RW(num_tgts);
static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
}
static DRIVER_ATTR_RO(dev_size_mb);
static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
}
static DRIVER_ATTR_RO(num_parts);
static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
}
static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4458,8 +4516,12 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
int nth;
if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
- scsi_debug_every_nth = nth;
- atomic_set(&sdebug_cmnd_count, 0);
+ sdebug_every_nth = nth;
+ if (nth && !sdebug_statistics) {
+ pr_info("every_nth needs statistics=1, set it\n");
+ sdebug_statistics = true;
+ }
+ tweak_cmnd_count();
return count;
}
return -EINVAL;
@@ -4468,7 +4530,7 @@ static DRIVER_ATTR_RW(every_nth);
static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
}
static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4477,10 +4539,14 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- changed = (scsi_debug_max_luns != n);
- scsi_debug_max_luns = n;
+ if (n > 256) {
+ pr_warn("max_luns can be no more than 256\n");
+ return -EINVAL;
+ }
+ changed = (sdebug_max_luns != n);
+ sdebug_max_luns = n;
sdebug_max_tgts_luns();
- if (changed && (scsi_debug_scsi_level >= 5)) { /* >= SPC-3 */
+ if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
@@ -4503,28 +4569,34 @@ static DRIVER_ATTR_RW(max_luns);
static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
}
/* N.B. max_queue can be changed while there are queued commands. In flight
* commands beyond the new max_queue will be completed. */
static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- unsigned long iflags;
- int n, k;
+ int j, n, k, a;
+ struct sdebug_queue *sqp;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
- (n <= SCSI_DEBUG_CANQUEUE)) {
- spin_lock_irqsave(&queued_arr_lock, iflags);
- k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
- scsi_debug_max_queue = n;
- if (SCSI_DEBUG_CANQUEUE == k)
+ (n <= SDEBUG_CANQUEUE)) {
+ block_unblock_all_queues(true);
+ k = 0;
+ for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
+ ++j, ++sqp) {
+ a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
+ if (a > k)
+ k = a;
+ }
+ sdebug_max_queue = n;
+ if (k == SDEBUG_CANQUEUE)
atomic_set(&retired_max_queue, 0);
else if (k >= n)
atomic_set(&retired_max_queue, k + 1);
else
atomic_set(&retired_max_queue, 0);
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ block_unblock_all_queues(false);
return count;
}
return -EINVAL;
@@ -4533,19 +4605,19 @@ static DRIVER_ATTR_RW(max_queue);
static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
}
static DRIVER_ATTR_RO(no_uld);
static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
}
static DRIVER_ATTR_RO(scsi_level);
static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
}
static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4554,8 +4626,8 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
bool changed;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- changed = (scsi_debug_virtual_gb != n);
- scsi_debug_virtual_gb = n;
+ changed = (sdebug_virtual_gb != n);
+ sdebug_virtual_gb = n;
sdebug_capacity = get_sdebug_capacity();
if (changed) {
struct sdebug_host_info *sdhp;
@@ -4580,9 +4652,12 @@ static DRIVER_ATTR_RW(virtual_gb);
static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_add_host);
}
+static int sdebug_add_adapter(void);
+static void sdebug_remove_adapter(void);
+
static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
size_t count)
{
@@ -4605,7 +4680,7 @@ static DRIVER_ATTR_RW(add_host);
static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
}
static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4613,40 +4688,68 @@ static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- scsi_debug_vpd_use_hostno = n;
+ sdebug_vpd_use_hostno = n;
return count;
}
return -EINVAL;
}
static DRIVER_ATTR_RW(vpd_use_hostno);
+static ssize_t statistics_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
+}
+static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+
+ if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
+ if (n > 0)
+ sdebug_statistics = true;
+ else {
+ clear_queue_stats();
+ sdebug_statistics = false;
+ }
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(statistics);
+
static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
}
static DRIVER_ATTR_RO(sector_size);
+static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
+}
+static DRIVER_ATTR_RO(submit_queues);
+
static ssize_t dix_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
}
static DRIVER_ATTR_RO(dix);
static ssize_t dif_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
}
static DRIVER_ATTR_RO(dif);
static ssize_t guard_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
}
static DRIVER_ATTR_RO(guard);
static ssize_t ato_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
}
static DRIVER_ATTR_RO(ato);
@@ -4669,7 +4772,7 @@ static DRIVER_ATTR_RO(map);
static ssize_t removable_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
}
static ssize_t removable_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4677,7 +4780,7 @@ static ssize_t removable_store(struct device_driver *ddp, const char *buf,
int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- scsi_debug_removable = (n > 0);
+ sdebug_removable = (n > 0);
return count;
}
return -EINVAL;
@@ -4686,32 +4789,17 @@ static DRIVER_ATTR_RW(removable);
static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
}
-/* Returns -EBUSY if host_lock is being changed and commands are queued */
+/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n, res;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- bool new_host_lock = (n > 0);
-
- res = count;
- if (new_host_lock != scsi_debug_host_lock) {
- unsigned long iflags;
- int k;
-
- spin_lock_irqsave(&queued_arr_lock, iflags);
- k = find_first_bit(queued_in_use_bm,
- scsi_debug_max_queue);
- if (k != scsi_debug_max_queue)
- res = -EBUSY; /* have queued commands */
- else
- scsi_debug_host_lock = new_host_lock;
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- }
- return res;
+ sdebug_host_lock = (n > 0);
+ return count;
}
return -EINVAL;
}
@@ -4719,7 +4807,7 @@ static DRIVER_ATTR_RW(host_lock);
static ssize_t strict_show(struct device_driver *ddp, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_strict);
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
}
static ssize_t strict_store(struct device_driver *ddp, const char *buf,
size_t count)
@@ -4727,13 +4815,19 @@ static ssize_t strict_store(struct device_driver *ddp, const char *buf,
int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- scsi_debug_strict = (n > 0);
+ sdebug_strict = (n > 0);
return count;
}
return -EINVAL;
}
static DRIVER_ATTR_RW(strict);
+static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
+}
+static DRIVER_ATTR_RO(uuid_ctl);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
@@ -4761,6 +4855,8 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_add_host.attr,
&driver_attr_vpd_use_hostno.attr,
&driver_attr_sector_size.attr,
+ &driver_attr_statistics.attr,
+ &driver_attr_submit_queues.attr,
&driver_attr_dix.attr,
&driver_attr_dif.attr,
&driver_attr_guard.attr,
@@ -4770,6 +4866,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_host_lock.attr,
&driver_attr_ndelay.attr,
&driver_attr_strict.attr,
+ &driver_attr_uuid_ctl.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -4783,33 +4880,33 @@ static int __init scsi_debug_init(void)
int k;
int ret;
- atomic_set(&sdebug_cmnd_count, 0);
- atomic_set(&sdebug_completions, 0);
atomic_set(&retired_max_queue, 0);
- if (scsi_debug_ndelay >= 1000000000) {
+ if (sdebug_ndelay >= 1000 * 1000 * 1000) {
pr_warn("ndelay must be less than 1 second, ignored\n");
- scsi_debug_ndelay = 0;
- } else if (scsi_debug_ndelay > 0)
- scsi_debug_delay = DELAY_OVERRIDDEN;
+ sdebug_ndelay = 0;
+ } else if (sdebug_ndelay > 0)
+ sdebug_jdelay = JDELAY_OVERRIDDEN;
- switch (scsi_debug_sector_size) {
+ switch (sdebug_sector_size) {
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
- pr_err("invalid sector_size %d\n", scsi_debug_sector_size);
+ pr_err("invalid sector_size %d\n", sdebug_sector_size);
return -EINVAL;
}
- switch (scsi_debug_dif) {
+ switch (sdebug_dif) {
case SD_DIF_TYPE0_PROTECTION:
+ break;
case SD_DIF_TYPE1_PROTECTION:
case SD_DIF_TYPE2_PROTECTION:
case SD_DIF_TYPE3_PROTECTION:
+ have_dif_prot = true;
break;
default:
@@ -4817,39 +4914,53 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
- if (scsi_debug_guard > 1) {
+ if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;
}
- if (scsi_debug_ato > 1) {
+ if (sdebug_ato > 1) {
pr_err("ato must be 0 or 1\n");
return -EINVAL;
}
- if (scsi_debug_physblk_exp > 15) {
- pr_err("invalid physblk_exp %u\n", scsi_debug_physblk_exp);
+ if (sdebug_physblk_exp > 15) {
+ pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
return -EINVAL;
}
+ if (sdebug_max_luns > 256) {
+ pr_warn("max_luns can be no more than 256, use default\n");
+ sdebug_max_luns = DEF_MAX_LUNS;
+ }
- if (scsi_debug_lowest_aligned > 0x3fff) {
- pr_err("lowest_aligned too big: %u\n",
- scsi_debug_lowest_aligned);
+ if (sdebug_lowest_aligned > 0x3fff) {
+ pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
return -EINVAL;
}
- if (scsi_debug_dev_size_mb < 1)
- scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
- sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
- sdebug_store_sectors = sz / scsi_debug_sector_size;
+ if (submit_queues < 1) {
+ pr_err("submit_queues must be 1 or more\n");
+ return -EINVAL;
+ }
+ sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
+ GFP_KERNEL);
+ if (sdebug_q_arr == NULL)
+ return -ENOMEM;
+ for (k = 0; k < submit_queues; ++k)
+ spin_lock_init(&sdebug_q_arr[k].qc_lock);
+
+ if (sdebug_dev_size_mb < 1)
+ sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
+ sz = (unsigned long)sdebug_dev_size_mb * 1048576;
+ sdebug_store_sectors = sz / sdebug_sector_size;
sdebug_capacity = get_sdebug_capacity();
/* play around with geometry, don't waste too much on track 0 */
sdebug_heads = 8;
sdebug_sectors_per = 32;
- if (scsi_debug_dev_size_mb >= 256)
+ if (sdebug_dev_size_mb >= 256)
sdebug_heads = 64;
- else if (scsi_debug_dev_size_mb >= 16)
+ else if (sdebug_dev_size_mb >= 16)
sdebug_heads = 32;
sdebug_cylinders_per = (unsigned long)sdebug_capacity /
(sdebug_sectors_per * sdebug_heads);
@@ -4861,18 +4972,19 @@ static int __init scsi_debug_init(void)
(sdebug_sectors_per * sdebug_heads);
}
- if (0 == scsi_debug_fake_rw) {
+ if (sdebug_fake_rw == 0) {
fake_storep = vmalloc(sz);
if (NULL == fake_storep) {
pr_err("out of memory, 1\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_q_arr;
}
memset(fake_storep, 0, sz);
- if (scsi_debug_num_parts > 0)
+ if (sdebug_num_parts > 0)
sdebug_build_parts(fake_storep, sz);
}
- if (scsi_debug_dix) {
+ if (sdebug_dix) {
int dif_size;
dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
@@ -4891,20 +5003,21 @@ static int __init scsi_debug_init(void)
/* Logical Block Provisioning */
if (scsi_debug_lbp()) {
- scsi_debug_unmap_max_blocks =
- clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
+ sdebug_unmap_max_blocks =
+ clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
- scsi_debug_unmap_max_desc =
- clamp(scsi_debug_unmap_max_desc, 0U, 256U);
+ sdebug_unmap_max_desc =
+ clamp(sdebug_unmap_max_desc, 0U, 256U);
- scsi_debug_unmap_granularity =
- clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
+ sdebug_unmap_granularity =
+ clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
- if (scsi_debug_unmap_alignment &&
- scsi_debug_unmap_granularity <=
- scsi_debug_unmap_alignment) {
+ if (sdebug_unmap_alignment &&
+ sdebug_unmap_granularity <=
+ sdebug_unmap_alignment) {
pr_err("ERR: unmap_granularity <= unmap_alignment\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_vm;
}
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
@@ -4921,7 +5034,7 @@ static int __init scsi_debug_init(void)
bitmap_zero(map_storep, map_size);
/* Map first 1KB for partition table */
- if (scsi_debug_num_parts)
+ if (sdebug_num_parts)
map_region(0, 2);
}
@@ -4942,8 +5055,8 @@ static int __init scsi_debug_init(void)
goto bus_unreg;
}
- host_to_add = scsi_debug_add_host;
- scsi_debug_add_host = 0;
+ host_to_add = sdebug_add_host;
+ sdebug_add_host = 0;
for (k = 0; k < host_to_add; k++) {
if (sdebug_add_adapter()) {
@@ -4952,8 +5065,8 @@ static int __init scsi_debug_init(void)
}
}
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- pr_info("built %d host(s)\n", scsi_debug_add_host);
+ if (sdebug_verbose)
+ pr_info("built %d host(s)\n", sdebug_add_host);
return 0;
@@ -4965,13 +5078,14 @@ free_vm:
vfree(map_storep);
vfree(dif_storep);
vfree(fake_storep);
-
+free_q_arr:
+ kfree(sdebug_q_arr);
return ret;
}
static void __exit scsi_debug_exit(void)
{
- int k = scsi_debug_add_host;
+ int k = sdebug_add_host;
stop_all_queued();
free_all_queued();
@@ -4983,6 +5097,7 @@ static void __exit scsi_debug_exit(void)
vfree(dif_storep);
vfree(fake_storep);
+ kfree(sdebug_q_arr);
}
device_initcall(scsi_debug_init);
@@ -5011,7 +5126,7 @@ static int sdebug_add_adapter(void)
INIT_LIST_HEAD(&sdbg_host->dev_info_list);
- devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
+ devs_per_host = sdebug_num_tgts * sdebug_max_luns;
for (k = 0; k < devs_per_host; k++) {
sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
if (!sdbg_devinfo) {
@@ -5028,14 +5143,14 @@ static int sdebug_add_adapter(void)
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
sdbg_host->dev.release = &sdebug_release_adapter;
- dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
+ dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_add_host);
error = device_register(&sdbg_host->dev);
if (error)
goto clean;
- ++scsi_debug_add_host;
+ ++sdebug_add_host;
return error;
clean:
@@ -5064,78 +5179,54 @@ static void sdebug_remove_adapter(void)
if (!sdbg_host)
return;
- device_unregister(&sdbg_host->dev);
- --scsi_debug_add_host;
+ device_unregister(&sdbg_host->dev);
+ --sdebug_add_host;
}
-static int
-sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
+static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
{
int num_in_q = 0;
- unsigned long iflags;
struct sdebug_dev_info *devip;
- spin_lock_irqsave(&queued_arr_lock, iflags);
+ block_unblock_all_queues(true);
devip = (struct sdebug_dev_info *)sdev->hostdata;
if (NULL == devip) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ block_unblock_all_queues(false);
return -ENODEV;
}
num_in_q = atomic_read(&devip->num_in_q);
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
if (qdepth < 1)
qdepth = 1;
- /* allow to exceed max host queued_arr elements for testing */
- if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
- qdepth = SCSI_DEBUG_CANQUEUE + 10;
+ /* allow to exceed max host qc_arr elements for testing */
+ if (qdepth > SDEBUG_CANQUEUE + 10)
+ qdepth = SDEBUG_CANQUEUE + 10;
scsi_change_queue_depth(sdev, qdepth);
- if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
- sdev_printk(KERN_INFO, sdev,
- "%s: qdepth=%d, num_in_q=%d\n",
+ if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
+ sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
__func__, qdepth, num_in_q);
}
+ block_unblock_all_queues(false);
return sdev->queue_depth;
}
-static int
-check_inject(struct scsi_cmnd *scp)
+static bool fake_timeout(struct scsi_cmnd *scp)
{
- struct sdebug_scmd_extra_t *ep = scsi_cmd_priv(scp);
-
- memset(ep, 0, sizeof(struct sdebug_scmd_extra_t));
-
- if (atomic_inc_return(&sdebug_cmnd_count) >=
- abs(scsi_debug_every_nth)) {
- atomic_set(&sdebug_cmnd_count, 0);
- if (scsi_debug_every_nth < -1)
- scsi_debug_every_nth = -1;
- if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
- return 1; /* ignore command causing timeout */
- else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
+ if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
+ if (sdebug_every_nth < -1)
+ sdebug_every_nth = -1;
+ if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
+ return true; /* ignore command causing timeout */
+ else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
scsi_medium_access_command(scp))
- return 1; /* time out reads and writes */
- if (sdebug_any_injecting_opt) {
- int opts = scsi_debug_opts;
-
- if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
- ep->inj_recovered = true;
- else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
- ep->inj_transport = true;
- else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
- ep->inj_dif = true;
- else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
- ep->inj_dix = true;
- else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
- ep->inj_short = true;
- }
+ return true; /* time out reads and writes */
}
- return 0;
+ return false;
}
-static int
-scsi_debug_queuecommand(struct scsi_cmnd *scp)
+static int scsi_debug_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scp)
{
u8 sdeb_i;
struct scsi_device *sdp = scp->device;
@@ -5146,15 +5237,16 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp)
int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
int k, na;
int errsts = 0;
- int errsts_no_connect = DID_NO_CONNECT << 16;
u32 flags;
u16 sa;
u8 opcode = cmd[0];
bool has_wlun_rl;
- bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
scsi_set_resid(scp, 0);
- if (debug && !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
+ if (sdebug_statistics)
+ atomic_inc(&sdebug_cmnd_count);
+ if (unlikely(sdebug_verbose &&
+ !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
char b[120];
int n, len, sb;
@@ -5167,19 +5259,25 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp)
n += scnprintf(b + n, sb - n, "%02x ",
(u32)cmd[k]);
}
- sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name, b);
+ if (sdebug_mq_active)
+ sdev_printk(KERN_INFO, sdp, "%s: tag=%u, cmd %s\n",
+ my_name, blk_mq_unique_tag(scp->request),
+ b);
+ else
+ sdev_printk(KERN_INFO, sdp, "%s: cmd %s\n", my_name,
+ b);
}
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
- if ((sdp->lun >= scsi_debug_max_luns) && !has_wlun_rl)
- return schedule_resp(scp, NULL, errsts_no_connect, 0);
+ if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
+ goto err_out;
sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
devip = (struct sdebug_dev_info *)sdp->hostdata;
- if (!devip) {
- devip = devInfoReg(sdp);
+ if (unlikely(!devip)) {
+ devip = find_build_dev_info(sdp);
if (NULL == devip)
- return schedule_resp(scp, NULL, errsts_no_connect, 0);
+ goto err_out;
}
na = oip->num_attached;
r_pfp = oip->pfp;
@@ -5211,18 +5309,18 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp)
}
} /* else (when na==0) we assume the oip is a match */
flags = oip->flags;
- if (F_INV_OP & flags) {
+ if (unlikely(F_INV_OP & flags)) {
mk_sense_invalid_opcode(scp);
goto check_cond;
}
- if (has_wlun_rl && !(F_RL_WLUN_OK & flags)) {
- if (debug)
- sdev_printk(KERN_INFO, sdp, "scsi_debug: Opcode: "
- "0x%x not supported for wlun\n", opcode);
+ if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
+ if (sdebug_verbose)
+ sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
+ my_name, opcode, " supported for wlun");
mk_sense_invalid_opcode(scp);
goto check_cond;
}
- if (scsi_debug_strict) { /* check cdb against mask */
+ if (unlikely(sdebug_strict)) { /* check cdb against mask */
u8 rem;
int j;
@@ -5238,52 +5336,40 @@ scsi_debug_queuecommand(struct scsi_cmnd *scp)
}
}
}
- if (!(F_SKIP_UA & flags) &&
- SDEBUG_NUM_UAS != find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS)) {
- errsts = check_readiness(scp, UAS_ONLY, devip);
+ if (unlikely(!(F_SKIP_UA & flags) &&
+ find_first_bit(devip->uas_bm,
+ SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
+ errsts = make_ua(scp, devip);
if (errsts)
goto check_cond;
}
- if ((F_M_ACCESS & flags) && devip->stopped) {
+ if (unlikely((F_M_ACCESS & flags) && atomic_read(&devip->stopped))) {
mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
- if (debug)
+ if (sdebug_verbose)
sdev_printk(KERN_INFO, sdp, "%s reports: Not ready: "
"%s\n", my_name, "initializing command "
"required");
errsts = check_condition_result;
goto fini;
}
- if (scsi_debug_fake_rw && (F_FAKE_RW & flags))
+ if (sdebug_fake_rw && (F_FAKE_RW & flags))
goto fini;
- if (scsi_debug_every_nth) {
- if (check_inject(scp))
+ if (unlikely(sdebug_every_nth)) {
+ if (fake_timeout(scp))
return 0; /* ignore command: make trouble */
}
- if (oip->pfp) /* if this command has a resp_* function, call it */
- errsts = oip->pfp(scp, devip);
+ if (likely(oip->pfp))
+ errsts = oip->pfp(scp, devip); /* calls a resp_* function */
else if (r_pfp) /* if leaf function ptr NULL, try the root's */
errsts = r_pfp(scp, devip);
fini:
return schedule_resp(scp, devip, errsts,
- ((F_DELAY_OVERR & flags) ? 0 : scsi_debug_delay));
+ ((F_DELAY_OVERR & flags) ? 0 : sdebug_jdelay));
check_cond:
return schedule_resp(scp, devip, check_condition_result, 0);
-}
-
-static int
-sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
-{
- if (scsi_debug_host_lock) {
- unsigned long iflags;
- int rc;
-
- spin_lock_irqsave(shost->host_lock, iflags);
- rc = scsi_debug_queuecommand(cmd);
- spin_unlock_irqrestore(shost->host_lock, iflags);
- return rc;
- } else
- return scsi_debug_queuecommand(cmd);
+err_out:
+ return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, 0);
}
static struct scsi_host_template sdebug_driver_template = {
@@ -5296,36 +5382,34 @@ static struct scsi_host_template sdebug_driver_template = {
.slave_configure = scsi_debug_slave_configure,
.slave_destroy = scsi_debug_slave_destroy,
.ioctl = scsi_debug_ioctl,
- .queuecommand = sdebug_queuecommand_lock_or_not,
+ .queuecommand = scsi_debug_queuecommand,
.change_queue_depth = sdebug_change_qdepth,
.eh_abort_handler = scsi_debug_abort,
.eh_device_reset_handler = scsi_debug_device_reset,
.eh_target_reset_handler = scsi_debug_target_reset,
.eh_bus_reset_handler = scsi_debug_bus_reset,
.eh_host_reset_handler = scsi_debug_host_reset,
- .can_queue = SCSI_DEBUG_CANQUEUE,
+ .can_queue = SDEBUG_CANQUEUE,
.this_id = 7,
- .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .sg_tablesize = SG_MAX_SEGMENTS,
.cmd_per_lun = DEF_CMD_PER_LUN,
.max_sectors = -1U,
.use_clustering = DISABLE_CLUSTERING,
.module = THIS_MODULE,
.track_queue_depth = 1,
- .cmd_size = sizeof(struct sdebug_scmd_extra_t),
};
static int sdebug_driver_probe(struct device * dev)
{
int error = 0;
- int opts;
struct sdebug_host_info *sdbg_host;
struct Scsi_Host *hpnt;
- int host_prot;
+ int hprot;
sdbg_host = to_sdebug_host(dev);
- sdebug_driver_template.can_queue = scsi_debug_max_queue;
- if (scsi_debug_clustering)
+ sdebug_driver_template.can_queue = sdebug_max_queue;
+ if (sdebug_clustering)
sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;
hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
if (NULL == hpnt) {
@@ -5333,72 +5417,75 @@ static int sdebug_driver_probe(struct device * dev)
error = -ENODEV;
return error;
}
+ if (submit_queues > nr_cpu_ids) {
+ pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%d\n",
+ my_name, submit_queues, nr_cpu_ids);
+ submit_queues = nr_cpu_ids;
+ }
+ /* Decide whether to tell scsi subsystem that we want mq */
+ /* Following should give the same answer for each host */
+ sdebug_mq_active = shost_use_blk_mq(hpnt) && (submit_queues > 1);
+ if (sdebug_mq_active)
+ hpnt->nr_hw_queues = submit_queues;
sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
- if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
- hpnt->max_id = scsi_debug_num_tgts + 1;
+ if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
+ hpnt->max_id = sdebug_num_tgts + 1;
else
- hpnt->max_id = scsi_debug_num_tgts;
- /* = scsi_debug_max_luns; */
+ hpnt->max_id = sdebug_num_tgts;
+ /* = sdebug_max_luns; */
hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
- host_prot = 0;
+ hprot = 0;
- switch (scsi_debug_dif) {
+ switch (sdebug_dif) {
case SD_DIF_TYPE1_PROTECTION:
- host_prot = SHOST_DIF_TYPE1_PROTECTION;
- if (scsi_debug_dix)
- host_prot |= SHOST_DIX_TYPE1_PROTECTION;
+ hprot = SHOST_DIF_TYPE1_PROTECTION;
+ if (sdebug_dix)
+ hprot |= SHOST_DIX_TYPE1_PROTECTION;
break;
case SD_DIF_TYPE2_PROTECTION:
- host_prot = SHOST_DIF_TYPE2_PROTECTION;
- if (scsi_debug_dix)
- host_prot |= SHOST_DIX_TYPE2_PROTECTION;
+ hprot = SHOST_DIF_TYPE2_PROTECTION;
+ if (sdebug_dix)
+ hprot |= SHOST_DIX_TYPE2_PROTECTION;
break;
case SD_DIF_TYPE3_PROTECTION:
- host_prot = SHOST_DIF_TYPE3_PROTECTION;
- if (scsi_debug_dix)
- host_prot |= SHOST_DIX_TYPE3_PROTECTION;
+ hprot = SHOST_DIF_TYPE3_PROTECTION;
+ if (sdebug_dix)
+ hprot |= SHOST_DIX_TYPE3_PROTECTION;
break;
default:
- if (scsi_debug_dix)
- host_prot |= SHOST_DIX_TYPE0_PROTECTION;
+ if (sdebug_dix)
+ hprot |= SHOST_DIX_TYPE0_PROTECTION;
break;
}
- scsi_host_set_prot(hpnt, host_prot);
+ scsi_host_set_prot(hpnt, hprot);
- pr_info("host protection%s%s%s%s%s%s%s\n",
- (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
- (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
- (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
- (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
- (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
- (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
- (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
+ if (have_dif_prot || sdebug_dix)
+ pr_info("host protection%s%s%s%s%s%s%s\n",
+ (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
+ (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
+ (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
+ (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
+ (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
+ (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
+ (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
- if (scsi_debug_guard == 1)
+ if (sdebug_guard == 1)
scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
else
scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
- opts = scsi_debug_opts;
- if (SCSI_DEBUG_OPT_RECOVERED_ERR & opts)
- sdebug_any_injecting_opt = true;
- else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & opts)
- sdebug_any_injecting_opt = true;
- else if (SCSI_DEBUG_OPT_DIF_ERR & opts)
- sdebug_any_injecting_opt = true;
- else if (SCSI_DEBUG_OPT_DIX_ERR & opts)
- sdebug_any_injecting_opt = true;
- else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & opts)
- sdebug_any_injecting_opt = true;
-
+ sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
+ sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
+ if (sdebug_every_nth) /* need stats counters for every_nth */
+ sdebug_statistics = true;
error = scsi_add_host(hpnt, &sdbg_host->dev);
if (error) {
pr_err("scsi_add_host failed\n");
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index ff41c310c..eaccd651c 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -429,7 +429,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* here, and we don't know what device it is
* trying to work with, leave it as-is.
*/
- vmax = 8; /* max length of vendor */
+ vmax = sizeof(devinfo->vendor);
vskip = vendor;
while (vmax > 0 && *vskip == ' ') {
vmax--;
@@ -439,7 +439,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
while (vmax > 0 && vskip[vmax - 1] == ' ')
--vmax;
- mmax = 16; /* max length of model */
+ mmax = sizeof(devinfo->model);
mskip = model;
while (mmax > 0 && *mskip == ' ') {
mmax--;
@@ -455,10 +455,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
* Behave like the older version of get_device_flags.
*/
if (memcmp(devinfo->vendor, vskip, vmax) ||
- devinfo->vendor[vmax])
+ (vmax < sizeof(devinfo->vendor) &&
+ devinfo->vendor[vmax]))
continue;
if (memcmp(devinfo->model, mskip, mmax) ||
- devinfo->model[mmax])
+ (mmax < sizeof(devinfo->model) &&
+ devinfo->model[mmax]))
continue;
return devinfo;
} else {
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1b9c049bd..106a6adbd 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -452,7 +452,7 @@ static void scsi_report_sense(struct scsi_device *sdev,
* When a deferred error is detected the current command has
* not been executed and needs retrying.
*/
-static int scsi_check_sense(struct scsi_cmnd *scmd)
+int scsi_check_sense(struct scsi_cmnd *scmd)
{
struct scsi_device *sdev = scmd->device;
struct scsi_sense_hdr sshdr;
@@ -602,6 +602,7 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
return SUCCESS;
}
}
+EXPORT_SYMBOL_GPL(scsi_check_sense);
static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
{
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f704d0264..c71344aeb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -14,8 +14,6 @@
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/export.h>
-#include <linux/mempool.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -40,39 +38,6 @@
#include "scsi_logging.h"
-#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
-#define SG_MEMPOOL_SIZE 2
-
-struct scsi_host_sg_pool {
- size_t size;
- char *name;
- struct kmem_cache *slab;
- mempool_t *pool;
-};
-
-#define SP(x) { .size = x, "sgpool-" __stringify(x) }
-#if (SCSI_MAX_SG_SEGMENTS < 32)
-#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
-#endif
-static struct scsi_host_sg_pool scsi_sg_pools[] = {
- SP(8),
- SP(16),
-#if (SCSI_MAX_SG_SEGMENTS > 32)
- SP(32),
-#if (SCSI_MAX_SG_SEGMENTS > 64)
- SP(64),
-#if (SCSI_MAX_SG_SEGMENTS > 128)
- SP(128),
-#if (SCSI_MAX_SG_SEGMENTS > 256)
-#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
-#endif
-#endif
-#endif
-#endif
- SP(SCSI_MAX_SG_SEGMENTS)
-};
-#undef SP
-
struct kmem_cache *scsi_sdb_cache;
/*
@@ -553,66 +518,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
scsi_run_queue(sdev->request_queue);
}
-static inline unsigned int scsi_sgtable_index(unsigned short nents)
-{
- unsigned int index;
-
- BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
-
- if (nents <= 8)
- index = 0;
- else
- index = get_count_order(nents) - 3;
-
- return index;
-}
-
-static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
-{
- struct scsi_host_sg_pool *sgp;
-
- sgp = scsi_sg_pools + scsi_sgtable_index(nents);
- mempool_free(sgl, sgp->pool);
-}
-
-static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
-{
- struct scsi_host_sg_pool *sgp;
-
- sgp = scsi_sg_pools + scsi_sgtable_index(nents);
- return mempool_alloc(sgp->pool, gfp_mask);
-}
-
-static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
-{
- if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
- return;
- __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
-}
-
-static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
-{
- struct scatterlist *first_chunk = NULL;
- int ret;
-
- BUG_ON(!nents);
-
- if (mq) {
- if (nents <= SCSI_MAX_SG_SEGMENTS) {
- sdb->table.nents = sdb->table.orig_nents = nents;
- sg_init_table(sdb->table.sgl, nents);
- return 0;
- }
- first_chunk = sdb->table.sgl;
- }
-
- ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
- first_chunk, GFP_ATOMIC, scsi_sg_alloc);
- if (unlikely(ret))
- scsi_free_sgtable(sdb, mq);
- return ret;
-}
-
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
if (cmd->request->cmd_type == REQ_TYPE_FS) {
@@ -625,12 +530,17 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
+ struct scsi_data_buffer *sdb;
+
if (cmd->sdb.table.nents)
- scsi_free_sgtable(&cmd->sdb, true);
- if (cmd->request->next_rq && cmd->request->next_rq->special)
- scsi_free_sgtable(cmd->request->next_rq->special, true);
+ sg_free_table_chained(&cmd->sdb.table, true);
+ if (cmd->request->next_rq) {
+ sdb = cmd->request->next_rq->special;
+ if (sdb)
+ sg_free_table_chained(&sdb->table, true);
+ }
if (scsi_prot_sg_count(cmd))
- scsi_free_sgtable(cmd->prot_sdb, true);
+ sg_free_table_chained(&cmd->prot_sdb->table, true);
}
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
@@ -669,19 +579,19 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
- scsi_free_sgtable(&cmd->sdb, false);
+ sg_free_table_chained(&cmd->sdb.table, false);
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
if (scsi_prot_sg_count(cmd))
- scsi_free_sgtable(cmd->prot_sdb, false);
+ sg_free_table_chained(&cmd->prot_sdb->table, false);
}
static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
{
struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
- scsi_free_sgtable(bidi_sdb, false);
+ sg_free_table_chained(&bidi_sdb->table, false);
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL;
}
@@ -1088,8 +998,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
/*
* If sg table allocation fails, requeue request later.
*/
- if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
- req->mq_ctx != NULL)))
+ if (unlikely(sg_alloc_table_chained(&sdb->table, req->nr_phys_segments,
+ sdb->table.sgl)))
return BLKPREP_DEFER;
/*
@@ -1161,7 +1071,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
- if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
+ if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
+ prot_sdb->table.sgl)) {
error = BLKPREP_DEFER;
goto err_exit;
}
@@ -1935,7 +1846,7 @@ static int scsi_mq_prep_fn(struct request *req)
if (scsi_host_get_prot(shost)) {
cmd->prot_sdb = (void *)sg +
min_t(unsigned int,
- shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
+ shost->sg_tablesize, SG_CHUNK_SIZE) *
sizeof(struct scatterlist);
memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
@@ -2108,7 +2019,7 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
* this limit is imposed by hardware restrictions
*/
blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
- SCSI_MAX_SG_CHAIN_SEGMENTS));
+ SG_MAX_SEGMENTS));
if (scsi_host_prot_dma(shost)) {
shost->sg_prot_tablesize =
@@ -2190,8 +2101,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
unsigned int cmd_size, sgl_size, tbl_size;
tbl_size = shost->sg_tablesize;
- if (tbl_size > SCSI_MAX_SG_SEGMENTS)
- tbl_size = SCSI_MAX_SG_SEGMENTS;
+ if (tbl_size > SG_CHUNK_SIZE)
+ tbl_size = SG_CHUNK_SIZE;
sgl_size = tbl_size * sizeof(struct scatterlist);
cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
if (scsi_host_get_prot(shost))
@@ -2267,8 +2178,6 @@ EXPORT_SYMBOL(scsi_unblock_requests);
int __init scsi_init_queue(void)
{
- int i;
-
scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
sizeof(struct scsi_data_buffer),
0, 0, NULL);
@@ -2277,53 +2186,12 @@ int __init scsi_init_queue(void)
return -ENOMEM;
}
- for (i = 0; i < SG_MEMPOOL_NR; i++) {
- struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
- int size = sgp->size * sizeof(struct scatterlist);
-
- sgp->slab = kmem_cache_create(sgp->name, size, 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!sgp->slab) {
- printk(KERN_ERR "SCSI: can't init sg slab %s\n",
- sgp->name);
- goto cleanup_sdb;
- }
-
- sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
- sgp->slab);
- if (!sgp->pool) {
- printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
- sgp->name);
- goto cleanup_sdb;
- }
- }
-
return 0;
-
-cleanup_sdb:
- for (i = 0; i < SG_MEMPOOL_NR; i++) {
- struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
- if (sgp->pool)
- mempool_destroy(sgp->pool);
- if (sgp->slab)
- kmem_cache_destroy(sgp->slab);
- }
- kmem_cache_destroy(scsi_sdb_cache);
-
- return -ENOMEM;
}
void scsi_exit_queue(void)
{
- int i;
-
kmem_cache_destroy(scsi_sdb_cache);
-
- for (i = 0; i < SG_MEMPOOL_NR; i++) {
- struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
- mempool_destroy(sgp->pool);
- kmem_cache_destroy(sgp->slab);
- }
}
/**
@@ -3199,6 +3067,7 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
* - EUI-64 based 12-byte
* - NAA IEEE Registered
* - NAA IEEE Extended
+ * - T10 Vendor ID
* as longer descriptors reduce the likelyhood
* of identification clashes.
*/
@@ -3217,6 +3086,21 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
goto next_desig;
switch (d[1] & 0xf) {
+ case 0x1:
+ /* T10 Vendor ID */
+ if (cur_id_size > d[3])
+ break;
+ /* Prefer anything */
+ if (cur_id_type > 0x01 && cur_id_type != 0xff)
+ break;
+ cur_id_size = d[3];
+ if (cur_id_size + 4 > id_len)
+ cur_id_size = id_len - 4;
+ cur_id_str = d + 4;
+ cur_id_type = d[1] & 0xf;
+ id_size = snprintf(id, id_len, "t10.%*pE",
+ cur_id_size, cur_id_str);
+ break;
case 0x2:
/* EUI-64 */
if (cur_id_size > d[3])
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 27b4d0a6a..57a4b9973 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -116,7 +116,7 @@ extern void scsi_exit_procfs(void);
extern char scsi_scan_type[];
extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
- unsigned int, u64, int);
+ unsigned int, u64, enum scsi_scan_mode);
extern void scsi_forget_host(struct Scsi_Host *);
extern void scsi_rescan_device(struct device *);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 251598eb3..7a74b82e8 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -251,7 +251,8 @@ static int scsi_add_single_device(uint host, uint channel, uint id, uint lun)
if (shost->transportt->user_scan)
error = shost->transportt->user_scan(shost, channel, id, lun);
else
- error = scsi_scan_host_selected(shost, channel, id, lun, 1);
+ error = scsi_scan_host_selected(shost, channel, id, lun,
+ SCSI_SCAN_MANUAL);
scsi_host_put(shost);
return error;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6b5811de6..e0a78f53d 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -96,10 +96,13 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
-char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
-module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
-MODULE_PARM_DESC(scan, "sync, async or none");
+module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
+ S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
+ "Setting to 'manual' disables automatic scanning, but allows "
+ "for manual device scan via the 'scan' sysfs attribute.");
static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
@@ -1041,7 +1044,8 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
* @lun: LUN of target device
* @bflagsp: store bflags here if not NULL
* @sdevp: probe the LUN corresponding to this scsi_device
- * @rescan: if nonzero skip some code only needed on first scan
+ * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only
+ * needed on first scan
* @hostdata: passed to scsi_alloc_sdev()
*
* Description:
@@ -1056,7 +1060,8 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
**/
static int scsi_probe_and_add_lun(struct scsi_target *starget,
u64 lun, int *bflagsp,
- struct scsi_device **sdevp, int rescan,
+ struct scsi_device **sdevp,
+ enum scsi_scan_mode rescan,
void *hostdata)
{
struct scsi_device *sdev;
@@ -1070,7 +1075,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
*/
sdev = scsi_device_lookup_by_target(starget, lun);
if (sdev) {
- if (rescan || !scsi_device_created(sdev)) {
+ if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: device exists on %s\n",
dev_name(&sdev->sdev_gendev)));
@@ -1206,7 +1211,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* Modifies sdevscan->lun.
**/
static void scsi_sequential_lun_scan(struct scsi_target *starget,
- int bflags, int scsi_level, int rescan)
+ int bflags, int scsi_level,
+ enum scsi_scan_mode rescan)
{
uint max_dev_lun;
u64 sparse_lun, lun;
@@ -1301,7 +1307,7 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
* 1: could not scan with REPORT LUN
**/
static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
- int rescan)
+ enum scsi_scan_mode rescan)
{
char devname[64];
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
@@ -1547,7 +1553,7 @@ void scsi_rescan_device(struct device *dev)
EXPORT_SYMBOL(scsi_rescan_device);
static void __scsi_scan_target(struct device *parent, unsigned int channel,
- unsigned int id, u64 lun, int rescan)
+ unsigned int id, u64 lun, enum scsi_scan_mode rescan)
{
struct Scsi_Host *shost = dev_to_shost(parent);
int bflags = 0;
@@ -1605,7 +1611,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
* @channel: channel to scan
* @id: target id to scan
* @lun: Specific LUN to scan or SCAN_WILD_CARD
- * @rescan: passed to LUN scanning routines
+ * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for
+ * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
+ * and SCSI_SCAN_MANUAL to force scanning even if
+ * 'scan=manual' is set.
*
* Description:
* Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
@@ -1615,13 +1624,17 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
* sequential scan of LUNs on the target id.
**/
void scsi_scan_target(struct device *parent, unsigned int channel,
- unsigned int id, u64 lun, int rescan)
+ unsigned int id, u64 lun, enum scsi_scan_mode rescan)
{
struct Scsi_Host *shost = dev_to_shost(parent);
if (strncmp(scsi_scan_type, "none", 4) == 0)
return;
+ if (rescan != SCSI_SCAN_MANUAL &&
+ strncmp(scsi_scan_type, "manual", 6) == 0)
+ return;
+
mutex_lock(&shost->scan_mutex);
if (!shost->async_scan)
scsi_complete_async_scans();
@@ -1635,7 +1648,8 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
EXPORT_SYMBOL(scsi_scan_target);
static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
- unsigned int id, u64 lun, int rescan)
+ unsigned int id, u64 lun,
+ enum scsi_scan_mode rescan)
{
uint order_id;
@@ -1666,7 +1680,8 @@ static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
}
int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
- unsigned int id, u64 lun, int rescan)
+ unsigned int id, u64 lun,
+ enum scsi_scan_mode rescan)
{
SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
"%s: <%u:%u:%llu>\n",
@@ -1845,7 +1860,8 @@ void scsi_scan_host(struct Scsi_Host *shost)
{
struct async_scan_data *data;
- if (strncmp(scsi_scan_type, "none", 4) == 0)
+ if (strncmp(scsi_scan_type, "none", 4) == 0 ||
+ strncmp(scsi_scan_type, "manual", 6) == 0)
return;
if (scsi_autopm_get_host(shost) < 0)
return;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index c7e4b5e0b..073492705 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -145,7 +145,8 @@ static int scsi_scan(struct Scsi_Host *shost, const char *str)
if (shost->transportt->user_scan)
res = shost->transportt->user_scan(shost, channel, id, lun);
else
- res = scsi_scan_host_selected(shost, channel, id, lun, 1);
+ res = scsi_scan_host_selected(shost, channel, id, lun,
+ SCSI_SCAN_MANUAL);
return res;
}
diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c
index 08bb47b53..0ff083bbf 100644
--- a/drivers/scsi/scsi_trace.c
+++ b/drivers/scsi/scsi_trace.c
@@ -17,6 +17,7 @@
*/
#include <linux/kernel.h>
#include <linux/trace_seq.h>
+#include <asm/unaligned.h>
#include <trace/events/scsi.h>
#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
@@ -231,6 +232,158 @@ out:
}
static const char *
+scsi_trace_maintenance_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p), *cmd;
+ u32 alloc_len;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case MI_REPORT_IDENTIFYING_INFORMATION:
+ cmd = "REPORT_IDENTIFYING_INFORMATION";
+ break;
+ case MI_REPORT_TARGET_PGS:
+ cmd = "REPORT_TARGET_PORT_GROUPS";
+ break;
+ case MI_REPORT_ALIASES:
+ cmd = "REPORT_ALIASES";
+ break;
+ case MI_REPORT_SUPPORTED_OPERATION_CODES:
+ cmd = "REPORT_SUPPORTED_OPERATION_CODES";
+ break;
+ case MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS:
+ cmd = "REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS";
+ break;
+ case MI_REPORT_PRIORITY:
+ cmd = "REPORT_PRIORITY";
+ break;
+ case MI_REPORT_TIMESTAMP:
+ cmd = "REPORT_TIMESTAMP";
+ break;
+ case MI_MANAGEMENT_PROTOCOL_IN:
+ cmd = "MANAGEMENT_PROTOCOL_IN";
+ break;
+ default:
+ trace_seq_puts(p, "UNKNOWN");
+ goto out;
+ }
+
+ alloc_len = get_unaligned_be32(&cdb[6]);
+
+ trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_maintenance_out(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p), *cmd;
+ u32 alloc_len;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case MO_SET_IDENTIFYING_INFORMATION:
+ cmd = "SET_IDENTIFYING_INFORMATION";
+ break;
+ case MO_SET_TARGET_PGS:
+ cmd = "SET_TARGET_PORT_GROUPS";
+ break;
+ case MO_CHANGE_ALIASES:
+ cmd = "CHANGE_ALIASES";
+ break;
+ case MO_SET_PRIORITY:
+ cmd = "SET_PRIORITY";
+ break;
+ case MO_SET_TIMESTAMP:
+ cmd = "SET_TIMESTAMP";
+ break;
+ case MO_MANAGEMENT_PROTOCOL_OUT:
+ cmd = "MANAGEMENT_PROTOCOL_OUT";
+ break;
+ default:
+ trace_seq_puts(p, "UNKNOWN");
+ goto out;
+ }
+
+ alloc_len = get_unaligned_be32(&cdb[6]);
+
+ trace_seq_printf(p, "%s alloc_len=%u", cmd, alloc_len);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_zbc_in(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p), *cmd;
+ u64 zone_id;
+ u32 alloc_len;
+ u8 options;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case ZI_REPORT_ZONES:
+ cmd = "REPORT_ZONES";
+ break;
+ default:
+ trace_seq_puts(p, "UNKNOWN");
+ goto out;
+ }
+
+ zone_id = get_unaligned_be64(&cdb[2]);
+ alloc_len = get_unaligned_be32(&cdb[10]);
+ options = cdb[14] & 0x3f;
+
+ trace_seq_printf(p, "%s zone=%llu alloc_len=%u options=%u partial=%u",
+ cmd, (unsigned long long)zone_id, alloc_len,
+ options, (cdb[14] >> 7) & 1);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
+scsi_trace_zbc_out(struct trace_seq *p, unsigned char *cdb, int len)
+{
+ const char *ret = trace_seq_buffer_ptr(p), *cmd;
+ u64 zone_id;
+
+ switch (SERVICE_ACTION16(cdb)) {
+ case ZO_CLOSE_ZONE:
+ cmd = "CLOSE_ZONE";
+ break;
+ case ZO_FINISH_ZONE:
+ cmd = "FINISH_ZONE";
+ break;
+ case ZO_OPEN_ZONE:
+ cmd = "OPEN_ZONE";
+ break;
+ case ZO_RESET_WRITE_POINTER:
+ cmd = "RESET_WRITE_POINTER";
+ break;
+ default:
+ trace_seq_puts(p, "UNKNOWN");
+ goto out;
+ }
+
+ zone_id = get_unaligned_be64(&cdb[2]);
+
+ trace_seq_printf(p, "%s zone=%llu all=%u", cmd,
+ (unsigned long long)zone_id, cdb[14] & 1);
+
+out:
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *
scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
{
switch (SERVICE_ACTION32(cdb)) {
@@ -282,6 +435,14 @@ scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
return scsi_trace_service_action_in(p, cdb, len);
case VARIABLE_LENGTH_CMD:
return scsi_trace_varlen(p, cdb, len);
+ case MAINTENANCE_IN:
+ return scsi_trace_maintenance_in(p, cdb, len);
+ case MAINTENANCE_OUT:
+ return scsi_trace_maintenance_out(p, cdb, len);
+ case ZBC_IN:
+ return scsi_trace_zbc_in(p, cdb, len);
+ case ZBC_OUT:
+ return scsi_trace_zbc_out(p, cdb, len);
default:
return scsi_trace_misc(p, cdb, len);
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 8a8822641..0f3a38695 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2027,11 +2027,10 @@ static void fc_vport_dev_release(struct device *dev)
kfree(vport);
}
-int scsi_is_fc_vport(const struct device *dev)
+static int scsi_is_fc_vport(const struct device *dev)
{
return dev->release == fc_vport_dev_release;
}
-EXPORT_SYMBOL(scsi_is_fc_vport);
static int fc_vport_match(struct attribute_container *cont,
struct device *dev)
@@ -2110,7 +2109,8 @@ fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
if ((channel == rport->channel) &&
(id == rport->scsi_target_id)) {
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_scan_target(&rport->dev, channel, id, lun, 1);
+ scsi_scan_target(&rport->dev, channel, id, lun,
+ SCSI_SCAN_MANUAL);
return;
}
}
@@ -3277,7 +3277,8 @@ fc_scsi_scan_rport(struct work_struct *work)
(rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
!(i->f->disable_target_scan)) {
scsi_scan_target(&rport->dev, rport->channel,
- rport->scsi_target_id, SCAN_WILD_CARD, 1);
+ rport->scsi_target_id, SCAN_WILD_CARD,
+ SCSI_SCAN_RESCAN);
}
spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 441481623..42bca619f 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1009,7 +1009,7 @@ static void iscsi_flashnode_sess_release(struct device *dev)
kfree(fnode_sess);
}
-struct device_type iscsi_flashnode_sess_dev_type = {
+static struct device_type iscsi_flashnode_sess_dev_type = {
.name = "iscsi_flashnode_sess_dev_type",
.groups = iscsi_flashnode_sess_attr_groups,
.release = iscsi_flashnode_sess_release,
@@ -1195,13 +1195,13 @@ static void iscsi_flashnode_conn_release(struct device *dev)
kfree(fnode_conn);
}
-struct device_type iscsi_flashnode_conn_dev_type = {
+static struct device_type iscsi_flashnode_conn_dev_type = {
.name = "iscsi_flashnode_conn_dev_type",
.groups = iscsi_flashnode_conn_attr_groups,
.release = iscsi_flashnode_conn_release,
};
-struct bus_type iscsi_flashnode_bus;
+static struct bus_type iscsi_flashnode_bus;
int iscsi_flashnode_bus_match(struct device *dev,
struct device_driver *drv)
@@ -1212,7 +1212,7 @@ int iscsi_flashnode_bus_match(struct device *dev,
}
EXPORT_SYMBOL_GPL(iscsi_flashnode_bus_match);
-struct bus_type iscsi_flashnode_bus = {
+static struct bus_type iscsi_flashnode_bus = {
.name = "iscsi_flashnode",
.match = &iscsi_flashnode_bus_match,
};
@@ -1324,11 +1324,10 @@ EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn);
* 1 on success
* 0 on failure
*/
-int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
+static int iscsi_is_flashnode_conn_dev(struct device *dev, void *data)
{
return dev->bus == &iscsi_flashnode_bus;
}
-EXPORT_SYMBOL_GPL(iscsi_is_flashnode_conn_dev);
static int iscsi_destroy_flashnode_conn(struct iscsi_bus_flash_conn *fnode_conn)
{
@@ -1783,6 +1782,7 @@ struct iscsi_scan_data {
unsigned int channel;
unsigned int id;
u64 lun;
+ enum scsi_scan_mode rescan;
};
static int iscsi_user_scan_session(struct device *dev, void *data)
@@ -1819,7 +1819,7 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
(scan_data->id == SCAN_WILD_CARD ||
scan_data->id == id))
scsi_scan_target(&session->dev, 0, id,
- scan_data->lun, 1);
+ scan_data->lun, scan_data->rescan);
}
user_scan_exit:
@@ -1836,6 +1836,7 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
scan_data.channel = channel;
scan_data.id = id;
scan_data.lun = lun;
+ scan_data.rescan = SCSI_SCAN_MANUAL;
return device_for_each_child(&shost->shost_gendev, &scan_data,
iscsi_user_scan_session);
@@ -1852,6 +1853,7 @@ static void iscsi_scan_session(struct work_struct *work)
scan_data.channel = 0;
scan_data.id = SCAN_WILD_CARD;
scan_data.lun = SCAN_WILD_CARD;
+ scan_data.rescan = SCSI_SCAN_RESCAN;
iscsi_user_scan_session(&session->dev, &scan_data);
atomic_dec(&ihost->nr_scans);
@@ -2067,13 +2069,10 @@ EXPORT_SYMBOL_GPL(iscsi_alloc_session);
int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(session);
- struct iscsi_cls_host *ihost;
unsigned long flags;
int id = 0;
int err;
- ihost = shost->shost_data;
session->sid = atomic_add_return(1, &iscsi_session_nr);
if (target_id == ISCSI_MAX_TARGET) {
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index b6f958193..3f0ff0721 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1614,7 +1614,8 @@ int sas_rphy_add(struct sas_rphy *rphy)
else
lun = 0;
- scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0);
+ scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun,
+ SCSI_SCAN_INITIAL);
}
return 0;
@@ -1739,8 +1740,8 @@ static int sas_user_scan(struct Scsi_Host *shost, uint channel,
if ((channel == SCAN_WILD_CARD || channel == 0) &&
(id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) {
- scsi_scan_target(&rphy->dev, 0,
- rphy->scsi_target_id, lun, 1);
+ scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id,
+ lun, SCSI_SCAN_MANUAL);
}
}
mutex_unlock(&sas_host->lock);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 41c3a2c4f..60bff78e9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -137,15 +137,15 @@ static const char *sd_cache_types[] = {
static void sd_set_flush_flag(struct scsi_disk *sdkp)
{
- unsigned flush = 0;
+ bool wc = false, fua = false;
if (sdkp->WCE) {
- flush |= REQ_FLUSH;
+ wc = true;
if (sdkp->DPOFUA)
- flush |= REQ_FUA;
+ fua = true;
}
- blk_queue_flush(sdkp->disk->queue, flush);
+ blk_queue_write_cache(sdkp->disk->queue, wc, fua);
}
static ssize_t
@@ -779,7 +779,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
* discarded on disk. This allows us to report completion on the full
* amount of blocks described by the request.
*/
- blk_add_request_payload(rq, page, len);
+ blk_add_request_payload(rq, page, 0, len);
ret = scsi_init_io(cmd);
rq->__data_len = nr_bytes;
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_device *sdp;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
+ if (!sdkp)
+ return 0;
+
+ sdp = sdkp->device;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
@@ -1459,6 +1463,7 @@ out:
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
+ scsi_disk_put(sdkp);
return retval;
}
diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h
new file mode 100644
index 000000000..e4e1dccd1
--- /dev/null
+++ b/drivers/scsi/sense_codes.h
@@ -0,0 +1,826 @@
+/*
+ * The canonical list of T10 Additional Sense Codes is available at:
+ * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
+ */
+
+SENSE_CODE(0x0000, "No additional sense information")
+SENSE_CODE(0x0001, "Filemark detected")
+SENSE_CODE(0x0002, "End-of-partition/medium detected")
+SENSE_CODE(0x0003, "Setmark detected")
+SENSE_CODE(0x0004, "Beginning-of-partition/medium detected")
+SENSE_CODE(0x0005, "End-of-data detected")
+SENSE_CODE(0x0006, "I/O process terminated")
+SENSE_CODE(0x0007, "Programmable early warning detected")
+SENSE_CODE(0x0011, "Audio play operation in progress")
+SENSE_CODE(0x0012, "Audio play operation paused")
+SENSE_CODE(0x0013, "Audio play operation successfully completed")
+SENSE_CODE(0x0014, "Audio play operation stopped due to error")
+SENSE_CODE(0x0015, "No current audio status to return")
+SENSE_CODE(0x0016, "Operation in progress")
+SENSE_CODE(0x0017, "Cleaning requested")
+SENSE_CODE(0x0018, "Erase operation in progress")
+SENSE_CODE(0x0019, "Locate operation in progress")
+SENSE_CODE(0x001A, "Rewind operation in progress")
+SENSE_CODE(0x001B, "Set capacity operation in progress")
+SENSE_CODE(0x001C, "Verify operation in progress")
+SENSE_CODE(0x001D, "ATA pass through information available")
+SENSE_CODE(0x001E, "Conflicting SA creation request")
+SENSE_CODE(0x001F, "Logical unit transitioning to another power condition")
+SENSE_CODE(0x0020, "Extended copy information available")
+SENSE_CODE(0x0021, "Atomic command aborted due to ACA")
+
+SENSE_CODE(0x0100, "No index/sector signal")
+
+SENSE_CODE(0x0200, "No seek complete")
+
+SENSE_CODE(0x0300, "Peripheral device write fault")
+SENSE_CODE(0x0301, "No write current")
+SENSE_CODE(0x0302, "Excessive write errors")
+
+SENSE_CODE(0x0400, "Logical unit not ready, cause not reportable")
+SENSE_CODE(0x0401, "Logical unit is in process of becoming ready")
+SENSE_CODE(0x0402, "Logical unit not ready, initializing command required")
+SENSE_CODE(0x0403, "Logical unit not ready, manual intervention required")
+SENSE_CODE(0x0404, "Logical unit not ready, format in progress")
+SENSE_CODE(0x0405, "Logical unit not ready, rebuild in progress")
+SENSE_CODE(0x0406, "Logical unit not ready, recalculation in progress")
+SENSE_CODE(0x0407, "Logical unit not ready, operation in progress")
+SENSE_CODE(0x0408, "Logical unit not ready, long write in progress")
+SENSE_CODE(0x0409, "Logical unit not ready, self-test in progress")
+SENSE_CODE(0x040A, "Logical unit not accessible, asymmetric access state transition")
+SENSE_CODE(0x040B, "Logical unit not accessible, target port in standby state")
+SENSE_CODE(0x040C, "Logical unit not accessible, target port in unavailable state")
+SENSE_CODE(0x040D, "Logical unit not ready, structure check required")
+SENSE_CODE(0x040E, "Logical unit not ready, security session in progress")
+SENSE_CODE(0x0410, "Logical unit not ready, auxiliary memory not accessible")
+SENSE_CODE(0x0411, "Logical unit not ready, notify (enable spinup) required")
+SENSE_CODE(0x0412, "Logical unit not ready, offline")
+SENSE_CODE(0x0413, "Logical unit not ready, SA creation in progress")
+SENSE_CODE(0x0414, "Logical unit not ready, space allocation in progress")
+SENSE_CODE(0x0415, "Logical unit not ready, robotics disabled")
+SENSE_CODE(0x0416, "Logical unit not ready, configuration required")
+SENSE_CODE(0x0417, "Logical unit not ready, calibration required")
+SENSE_CODE(0x0418, "Logical unit not ready, a door is open")
+SENSE_CODE(0x0419, "Logical unit not ready, operating in sequential mode")
+SENSE_CODE(0x041A, "Logical unit not ready, start stop unit command in progress")
+SENSE_CODE(0x041B, "Logical unit not ready, sanitize in progress")
+SENSE_CODE(0x041C, "Logical unit not ready, additional power use not yet granted")
+SENSE_CODE(0x041D, "Logical unit not ready, configuration in progress")
+SENSE_CODE(0x041E, "Logical unit not ready, microcode activation required")
+SENSE_CODE(0x041F, "Logical unit not ready, microcode download required")
+SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required")
+SENSE_CODE(0x0421, "Logical unit not ready, hard reset required")
+SENSE_CODE(0x0422, "Logical unit not ready, power cycle required")
+
+SENSE_CODE(0x0500, "Logical unit does not respond to selection")
+
+SENSE_CODE(0x0600, "No reference position found")
+
+SENSE_CODE(0x0700, "Multiple peripheral devices selected")
+
+SENSE_CODE(0x0800, "Logical unit communication failure")
+SENSE_CODE(0x0801, "Logical unit communication time-out")
+SENSE_CODE(0x0802, "Logical unit communication parity error")
+SENSE_CODE(0x0803, "Logical unit communication CRC error (Ultra-DMA/32)")
+SENSE_CODE(0x0804, "Unreachable copy target")
+
+SENSE_CODE(0x0900, "Track following error")
+SENSE_CODE(0x0901, "Tracking servo failure")
+SENSE_CODE(0x0902, "Focus servo failure")
+SENSE_CODE(0x0903, "Spindle servo failure")
+SENSE_CODE(0x0904, "Head select fault")
+SENSE_CODE(0x0905, "Vibration induced tracking error")
+
+SENSE_CODE(0x0A00, "Error log overflow")
+
+SENSE_CODE(0x0B00, "Warning")
+SENSE_CODE(0x0B01, "Warning - specified temperature exceeded")
+SENSE_CODE(0x0B02, "Warning - enclosure degraded")
+SENSE_CODE(0x0B03, "Warning - background self-test failed")
+SENSE_CODE(0x0B04, "Warning - background pre-scan detected medium error")
+SENSE_CODE(0x0B05, "Warning - background medium scan detected medium error")
+SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile")
+SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache")
+SENSE_CODE(0x0B08, "Warning - power loss expected")
+SENSE_CODE(0x0B09, "Warning - device statistics notification active")
+
+SENSE_CODE(0x0C00, "Write error")
+SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation")
+SENSE_CODE(0x0C02, "Write error - auto reallocation failed")
+SENSE_CODE(0x0C03, "Write error - recommend reassignment")
+SENSE_CODE(0x0C04, "Compression check miscompare error")
+SENSE_CODE(0x0C05, "Data expansion occurred during compression")
+SENSE_CODE(0x0C06, "Block not compressible")
+SENSE_CODE(0x0C07, "Write error - recovery needed")
+SENSE_CODE(0x0C08, "Write error - recovery failed")
+SENSE_CODE(0x0C09, "Write error - loss of streaming")
+SENSE_CODE(0x0C0A, "Write error - padding blocks added")
+SENSE_CODE(0x0C0B, "Auxiliary memory write error")
+SENSE_CODE(0x0C0C, "Write error - unexpected unsolicited data")
+SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data")
+SENSE_CODE(0x0C0E, "Multiple write errors")
+SENSE_CODE(0x0C0F, "Defects in error window")
+SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations")
+
+SENSE_CODE(0x0D00, "Error detected by third party temporary initiator")
+SENSE_CODE(0x0D01, "Third party device failure")
+SENSE_CODE(0x0D02, "Copy target device not reachable")
+SENSE_CODE(0x0D03, "Incorrect copy target device type")
+SENSE_CODE(0x0D04, "Copy target device data underrun")
+SENSE_CODE(0x0D05, "Copy target device data overrun")
+
+SENSE_CODE(0x0E00, "Invalid information unit")
+SENSE_CODE(0x0E01, "Information unit too short")
+SENSE_CODE(0x0E02, "Information unit too long")
+SENSE_CODE(0x0E03, "Invalid field in command information unit")
+
+SENSE_CODE(0x1000, "Id CRC or ECC error")
+SENSE_CODE(0x1001, "Logical block guard check failed")
+SENSE_CODE(0x1002, "Logical block application tag check failed")
+SENSE_CODE(0x1003, "Logical block reference tag check failed")
+SENSE_CODE(0x1004, "Logical block protection error on recover buffered data")
+SENSE_CODE(0x1005, "Logical block protection method error")
+
+SENSE_CODE(0x1100, "Unrecovered read error")
+SENSE_CODE(0x1101, "Read retries exhausted")
+SENSE_CODE(0x1102, "Error too long to correct")
+SENSE_CODE(0x1103, "Multiple read errors")
+SENSE_CODE(0x1104, "Unrecovered read error - auto reallocate failed")
+SENSE_CODE(0x1105, "L-EC uncorrectable error")
+SENSE_CODE(0x1106, "CIRC unrecovered error")
+SENSE_CODE(0x1107, "Data re-synchronization error")
+SENSE_CODE(0x1108, "Incomplete block read")
+SENSE_CODE(0x1109, "No gap found")
+SENSE_CODE(0x110A, "Miscorrected error")
+SENSE_CODE(0x110B, "Unrecovered read error - recommend reassignment")
+SENSE_CODE(0x110C, "Unrecovered read error - recommend rewrite the data")
+SENSE_CODE(0x110D, "De-compression CRC error")
+SENSE_CODE(0x110E, "Cannot decompress using declared algorithm")
+SENSE_CODE(0x110F, "Error reading UPC/EAN number")
+SENSE_CODE(0x1110, "Error reading ISRC number")
+SENSE_CODE(0x1111, "Read error - loss of streaming")
+SENSE_CODE(0x1112, "Auxiliary memory read error")
+SENSE_CODE(0x1113, "Read error - failed retransmission request")
+SENSE_CODE(0x1114, "Read error - lba marked bad by application client")
+SENSE_CODE(0x1115, "Write after sanitize required")
+
+SENSE_CODE(0x1200, "Address mark not found for id field")
+
+SENSE_CODE(0x1300, "Address mark not found for data field")
+
+SENSE_CODE(0x1400, "Recorded entity not found")
+SENSE_CODE(0x1401, "Record not found")
+SENSE_CODE(0x1402, "Filemark or setmark not found")
+SENSE_CODE(0x1403, "End-of-data not found")
+SENSE_CODE(0x1404, "Block sequence error")
+SENSE_CODE(0x1405, "Record not found - recommend reassignment")
+SENSE_CODE(0x1406, "Record not found - data auto-reallocated")
+SENSE_CODE(0x1407, "Locate operation failure")
+
+SENSE_CODE(0x1500, "Random positioning error")
+SENSE_CODE(0x1501, "Mechanical positioning error")
+SENSE_CODE(0x1502, "Positioning error detected by read of medium")
+
+SENSE_CODE(0x1600, "Data synchronization mark error")
+SENSE_CODE(0x1601, "Data sync error - data rewritten")
+SENSE_CODE(0x1602, "Data sync error - recommend rewrite")
+SENSE_CODE(0x1603, "Data sync error - data auto-reallocated")
+SENSE_CODE(0x1604, "Data sync error - recommend reassignment")
+
+SENSE_CODE(0x1700, "Recovered data with no error correction applied")
+SENSE_CODE(0x1701, "Recovered data with retries")
+SENSE_CODE(0x1702, "Recovered data with positive head offset")
+SENSE_CODE(0x1703, "Recovered data with negative head offset")
+SENSE_CODE(0x1704, "Recovered data with retries and/or circ applied")
+SENSE_CODE(0x1705, "Recovered data using previous sector id")
+SENSE_CODE(0x1706, "Recovered data without ECC - data auto-reallocated")
+SENSE_CODE(0x1707, "Recovered data without ECC - recommend reassignment")
+SENSE_CODE(0x1708, "Recovered data without ECC - recommend rewrite")
+SENSE_CODE(0x1709, "Recovered data without ECC - data rewritten")
+
+SENSE_CODE(0x1800, "Recovered data with error correction applied")
+SENSE_CODE(0x1801, "Recovered data with error corr. & retries applied")
+SENSE_CODE(0x1802, "Recovered data - data auto-reallocated")
+SENSE_CODE(0x1803, "Recovered data with CIRC")
+SENSE_CODE(0x1804, "Recovered data with L-EC")
+SENSE_CODE(0x1805, "Recovered data - recommend reassignment")
+SENSE_CODE(0x1806, "Recovered data - recommend rewrite")
+SENSE_CODE(0x1807, "Recovered data with ECC - data rewritten")
+SENSE_CODE(0x1808, "Recovered data with linking")
+
+SENSE_CODE(0x1900, "Defect list error")
+SENSE_CODE(0x1901, "Defect list not available")
+SENSE_CODE(0x1902, "Defect list error in primary list")
+SENSE_CODE(0x1903, "Defect list error in grown list")
+
+SENSE_CODE(0x1A00, "Parameter list length error")
+
+SENSE_CODE(0x1B00, "Synchronous data transfer error")
+
+SENSE_CODE(0x1C00, "Defect list not found")
+SENSE_CODE(0x1C01, "Primary defect list not found")
+SENSE_CODE(0x1C02, "Grown defect list not found")
+
+SENSE_CODE(0x1D00, "Miscompare during verify operation")
+SENSE_CODE(0x1D01, "Miscompare verify of unmapped LBA")
+
+SENSE_CODE(0x1E00, "Recovered id with ECC correction")
+
+SENSE_CODE(0x1F00, "Partial defect list transfer")
+
+SENSE_CODE(0x2000, "Invalid command operation code")
+SENSE_CODE(0x2001, "Access denied - initiator pending-enrolled")
+SENSE_CODE(0x2002, "Access denied - no access rights")
+SENSE_CODE(0x2003, "Access denied - invalid mgmt id key")
+SENSE_CODE(0x2004, "Illegal command while in write capable state")
+SENSE_CODE(0x2005, "Obsolete")
+SENSE_CODE(0x2006, "Illegal command while in explicit address mode")
+SENSE_CODE(0x2007, "Illegal command while in implicit address mode")
+SENSE_CODE(0x2008, "Access denied - enrollment conflict")
+SENSE_CODE(0x2009, "Access denied - invalid LU identifier")
+SENSE_CODE(0x200A, "Access denied - invalid proxy token")
+SENSE_CODE(0x200B, "Access denied - ACL LUN conflict")
+SENSE_CODE(0x200C, "Illegal command when not in append-only mode")
+
+SENSE_CODE(0x2100, "Logical block address out of range")
+SENSE_CODE(0x2101, "Invalid element address")
+SENSE_CODE(0x2102, "Invalid address for write")
+SENSE_CODE(0x2103, "Invalid write crossing layer jump")
+SENSE_CODE(0x2104, "Unaligned write command")
+SENSE_CODE(0x2105, "Write boundary violation")
+SENSE_CODE(0x2106, "Attempt to read invalid data")
+SENSE_CODE(0x2107, "Read boundary violation")
+
+SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)")
+
+SENSE_CODE(0x2300, "Invalid token operation, cause not reportable")
+SENSE_CODE(0x2301, "Invalid token operation, unsupported token type")
+SENSE_CODE(0x2302, "Invalid token operation, remote token usage not supported")
+SENSE_CODE(0x2303, "Invalid token operation, remote rod token creation not supported")
+SENSE_CODE(0x2304, "Invalid token operation, token unknown")
+SENSE_CODE(0x2305, "Invalid token operation, token corrupt")
+SENSE_CODE(0x2306, "Invalid token operation, token revoked")
+SENSE_CODE(0x2307, "Invalid token operation, token expired")
+SENSE_CODE(0x2308, "Invalid token operation, token cancelled")
+SENSE_CODE(0x2309, "Invalid token operation, token deleted")
+SENSE_CODE(0x230A, "Invalid token operation, invalid token length")
+
+SENSE_CODE(0x2400, "Invalid field in cdb")
+SENSE_CODE(0x2401, "CDB decryption error")
+SENSE_CODE(0x2402, "Obsolete")
+SENSE_CODE(0x2403, "Obsolete")
+SENSE_CODE(0x2404, "Security audit value frozen")
+SENSE_CODE(0x2405, "Security working key frozen")
+SENSE_CODE(0x2406, "Nonce not unique")
+SENSE_CODE(0x2407, "Nonce timestamp out of range")
+SENSE_CODE(0x2408, "Invalid XCDB")
+
+SENSE_CODE(0x2500, "Logical unit not supported")
+
+SENSE_CODE(0x2600, "Invalid field in parameter list")
+SENSE_CODE(0x2601, "Parameter not supported")
+SENSE_CODE(0x2602, "Parameter value invalid")
+SENSE_CODE(0x2603, "Threshold parameters not supported")
+SENSE_CODE(0x2604, "Invalid release of persistent reservation")
+SENSE_CODE(0x2605, "Data decryption error")
+SENSE_CODE(0x2606, "Too many target descriptors")
+SENSE_CODE(0x2607, "Unsupported target descriptor type code")
+SENSE_CODE(0x2608, "Too many segment descriptors")
+SENSE_CODE(0x2609, "Unsupported segment descriptor type code")
+SENSE_CODE(0x260A, "Unexpected inexact segment")
+SENSE_CODE(0x260B, "Inline data length exceeded")
+SENSE_CODE(0x260C, "Invalid operation for copy source or destination")
+SENSE_CODE(0x260D, "Copy segment granularity violation")
+SENSE_CODE(0x260E, "Invalid parameter while port is enabled")
+SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value")
+SENSE_CODE(0x2610, "Data decryption key fail limit reached")
+SENSE_CODE(0x2611, "Incomplete key-associated data set")
+SENSE_CODE(0x2612, "Vendor specific key reference not found")
+
+SENSE_CODE(0x2700, "Write protected")
+SENSE_CODE(0x2701, "Hardware write protected")
+SENSE_CODE(0x2702, "Logical unit software write protected")
+SENSE_CODE(0x2703, "Associated write protect")
+SENSE_CODE(0x2704, "Persistent write protect")
+SENSE_CODE(0x2705, "Permanent write protect")
+SENSE_CODE(0x2706, "Conditional write protect")
+SENSE_CODE(0x2707, "Space allocation failed write protect")
+SENSE_CODE(0x2708, "Zone is read only")
+
+SENSE_CODE(0x2800, "Not ready to ready change, medium may have changed")
+SENSE_CODE(0x2801, "Import or export element accessed")
+SENSE_CODE(0x2802, "Format-layer may have changed")
+SENSE_CODE(0x2803, "Import/export element accessed, medium changed")
+
+SENSE_CODE(0x2900, "Power on, reset, or bus device reset occurred")
+SENSE_CODE(0x2901, "Power on occurred")
+SENSE_CODE(0x2902, "Scsi bus reset occurred")
+SENSE_CODE(0x2903, "Bus device reset function occurred")
+SENSE_CODE(0x2904, "Device internal reset")
+SENSE_CODE(0x2905, "Transceiver mode changed to single-ended")
+SENSE_CODE(0x2906, "Transceiver mode changed to lvd")
+SENSE_CODE(0x2907, "I_T nexus loss occurred")
+
+SENSE_CODE(0x2A00, "Parameters changed")
+SENSE_CODE(0x2A01, "Mode parameters changed")
+SENSE_CODE(0x2A02, "Log parameters changed")
+SENSE_CODE(0x2A03, "Reservations preempted")
+SENSE_CODE(0x2A04, "Reservations released")
+SENSE_CODE(0x2A05, "Registrations preempted")
+SENSE_CODE(0x2A06, "Asymmetric access state changed")
+SENSE_CODE(0x2A07, "Implicit asymmetric access state transition failed")
+SENSE_CODE(0x2A08, "Priority changed")
+SENSE_CODE(0x2A09, "Capacity data has changed")
+SENSE_CODE(0x2A0A, "Error history I_T nexus cleared")
+SENSE_CODE(0x2A0B, "Error history snapshot released")
+SENSE_CODE(0x2A0C, "Error recovery attributes have changed")
+SENSE_CODE(0x2A0D, "Data encryption capabilities changed")
+SENSE_CODE(0x2A10, "Timestamp changed")
+SENSE_CODE(0x2A11, "Data encryption parameters changed by another i_t nexus")
+SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event")
+SENSE_CODE(0x2A13, "Data encryption key instance counter has changed")
+SENSE_CODE(0x2A14, "SA creation capabilities data has changed")
+SENSE_CODE(0x2A15, "Medium removal prevention preempted")
+
+SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect")
+
+SENSE_CODE(0x2C00, "Command sequence error")
+SENSE_CODE(0x2C01, "Too many windows specified")
+SENSE_CODE(0x2C02, "Invalid combination of windows specified")
+SENSE_CODE(0x2C03, "Current program area is not empty")
+SENSE_CODE(0x2C04, "Current program area is empty")
+SENSE_CODE(0x2C05, "Illegal power condition request")
+SENSE_CODE(0x2C06, "Persistent prevent conflict")
+SENSE_CODE(0x2C07, "Previous busy status")
+SENSE_CODE(0x2C08, "Previous task set full status")
+SENSE_CODE(0x2C09, "Previous reservation conflict status")
+SENSE_CODE(0x2C0A, "Partition or collection contains user objects")
+SENSE_CODE(0x2C0B, "Not reserved")
+SENSE_CODE(0x2C0C, "Orwrite generation does not match")
+SENSE_CODE(0x2C0D, "Reset write pointer not allowed")
+SENSE_CODE(0x2C0E, "Zone is offline")
+
+SENSE_CODE(0x2D00, "Overwrite error on update in place")
+
+SENSE_CODE(0x2E00, "Insufficient time for operation")
+SENSE_CODE(0x2E01, "Command timeout before processing")
+SENSE_CODE(0x2E02, "Command timeout during processing")
+SENSE_CODE(0x2E03, "Command timeout during processing due to error recovery")
+
+SENSE_CODE(0x2F00, "Commands cleared by another initiator")
+SENSE_CODE(0x2F01, "Commands cleared by power loss notification")
+SENSE_CODE(0x2F02, "Commands cleared by device server")
+SENSE_CODE(0x2F03, "Some commands cleared by queuing layer event")
+
+SENSE_CODE(0x3000, "Incompatible medium installed")
+SENSE_CODE(0x3001, "Cannot read medium - unknown format")
+SENSE_CODE(0x3002, "Cannot read medium - incompatible format")
+SENSE_CODE(0x3003, "Cleaning cartridge installed")
+SENSE_CODE(0x3004, "Cannot write medium - unknown format")
+SENSE_CODE(0x3005, "Cannot write medium - incompatible format")
+SENSE_CODE(0x3006, "Cannot format medium - incompatible medium")
+SENSE_CODE(0x3007, "Cleaning failure")
+SENSE_CODE(0x3008, "Cannot write - application code mismatch")
+SENSE_CODE(0x3009, "Current session not fixated for append")
+SENSE_CODE(0x300A, "Cleaning request rejected")
+SENSE_CODE(0x300C, "WORM medium - overwrite attempted")
+SENSE_CODE(0x300D, "WORM medium - integrity check")
+SENSE_CODE(0x3010, "Medium not formatted")
+SENSE_CODE(0x3011, "Incompatible volume type")
+SENSE_CODE(0x3012, "Incompatible volume qualifier")
+SENSE_CODE(0x3013, "Cleaning volume expired")
+
+SENSE_CODE(0x3100, "Medium format corrupted")
+SENSE_CODE(0x3101, "Format command failed")
+SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking")
+SENSE_CODE(0x3103, "Sanitize command failed")
+
+SENSE_CODE(0x3200, "No defect spare location available")
+SENSE_CODE(0x3201, "Defect list update failure")
+
+SENSE_CODE(0x3300, "Tape length error")
+
+SENSE_CODE(0x3400, "Enclosure failure")
+
+SENSE_CODE(0x3500, "Enclosure services failure")
+SENSE_CODE(0x3501, "Unsupported enclosure function")
+SENSE_CODE(0x3502, "Enclosure services unavailable")
+SENSE_CODE(0x3503, "Enclosure services transfer failure")
+SENSE_CODE(0x3504, "Enclosure services transfer refused")
+SENSE_CODE(0x3505, "Enclosure services checksum error")
+
+SENSE_CODE(0x3600, "Ribbon, ink, or toner failure")
+
+SENSE_CODE(0x3700, "Rounded parameter")
+
+SENSE_CODE(0x3800, "Event status notification")
+SENSE_CODE(0x3802, "Esn - power management class event")
+SENSE_CODE(0x3804, "Esn - media class event")
+SENSE_CODE(0x3806, "Esn - device busy class event")
+SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached")
+
+SENSE_CODE(0x3900, "Saving parameters not supported")
+
+SENSE_CODE(0x3A00, "Medium not present")
+SENSE_CODE(0x3A01, "Medium not present - tray closed")
+SENSE_CODE(0x3A02, "Medium not present - tray open")
+SENSE_CODE(0x3A03, "Medium not present - loadable")
+SENSE_CODE(0x3A04, "Medium not present - medium auxiliary memory accessible")
+
+SENSE_CODE(0x3B00, "Sequential positioning error")
+SENSE_CODE(0x3B01, "Tape position error at beginning-of-medium")
+SENSE_CODE(0x3B02, "Tape position error at end-of-medium")
+SENSE_CODE(0x3B03, "Tape or electronic vertical forms unit not ready")
+SENSE_CODE(0x3B04, "Slew failure")
+SENSE_CODE(0x3B05, "Paper jam")
+SENSE_CODE(0x3B06, "Failed to sense top-of-form")
+SENSE_CODE(0x3B07, "Failed to sense bottom-of-form")
+SENSE_CODE(0x3B08, "Reposition error")
+SENSE_CODE(0x3B09, "Read past end of medium")
+SENSE_CODE(0x3B0A, "Read past beginning of medium")
+SENSE_CODE(0x3B0B, "Position past end of medium")
+SENSE_CODE(0x3B0C, "Position past beginning of medium")
+SENSE_CODE(0x3B0D, "Medium destination element full")
+SENSE_CODE(0x3B0E, "Medium source element empty")
+SENSE_CODE(0x3B0F, "End of medium reached")
+SENSE_CODE(0x3B11, "Medium magazine not accessible")
+SENSE_CODE(0x3B12, "Medium magazine removed")
+SENSE_CODE(0x3B13, "Medium magazine inserted")
+SENSE_CODE(0x3B14, "Medium magazine locked")
+SENSE_CODE(0x3B15, "Medium magazine unlocked")
+SENSE_CODE(0x3B16, "Mechanical positioning or changer error")
+SENSE_CODE(0x3B17, "Read past end of user object")
+SENSE_CODE(0x3B18, "Element disabled")
+SENSE_CODE(0x3B19, "Element enabled")
+SENSE_CODE(0x3B1A, "Data transfer device removed")
+SENSE_CODE(0x3B1B, "Data transfer device inserted")
+SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation")
+
+SENSE_CODE(0x3D00, "Invalid bits in identify message")
+
+SENSE_CODE(0x3E00, "Logical unit has not self-configured yet")
+SENSE_CODE(0x3E01, "Logical unit failure")
+SENSE_CODE(0x3E02, "Timeout on logical unit")
+SENSE_CODE(0x3E03, "Logical unit failed self-test")
+SENSE_CODE(0x3E04, "Logical unit unable to update self-test log")
+
+SENSE_CODE(0x3F00, "Target operating conditions have changed")
+SENSE_CODE(0x3F01, "Microcode has been changed")
+SENSE_CODE(0x3F02, "Changed operating definition")
+SENSE_CODE(0x3F03, "Inquiry data has changed")
+SENSE_CODE(0x3F04, "Component device attached")
+SENSE_CODE(0x3F05, "Device identifier changed")
+SENSE_CODE(0x3F06, "Redundancy group created or modified")
+SENSE_CODE(0x3F07, "Redundancy group deleted")
+SENSE_CODE(0x3F08, "Spare created or modified")
+SENSE_CODE(0x3F09, "Spare deleted")
+SENSE_CODE(0x3F0A, "Volume set created or modified")
+SENSE_CODE(0x3F0B, "Volume set deleted")
+SENSE_CODE(0x3F0C, "Volume set deassigned")
+SENSE_CODE(0x3F0D, "Volume set reassigned")
+SENSE_CODE(0x3F0E, "Reported luns data has changed")
+SENSE_CODE(0x3F0F, "Echo buffer overwritten")
+SENSE_CODE(0x3F10, "Medium loadable")
+SENSE_CODE(0x3F11, "Medium auxiliary memory accessible")
+SENSE_CODE(0x3F12, "iSCSI IP address added")
+SENSE_CODE(0x3F13, "iSCSI IP address removed")
+SENSE_CODE(0x3F14, "iSCSI IP address changed")
+SENSE_CODE(0x3F15, "Inspect referrals sense descriptors")
+SENSE_CODE(0x3F16, "Microcode has been changed without reset")
+/*
+ * SENSE_CODE(0x40NN, "Ram failure")
+ * SENSE_CODE(0x40NN, "Diagnostic failure on component nn")
+ * SENSE_CODE(0x41NN, "Data path failure")
+ * SENSE_CODE(0x42NN, "Power-on or self-test failure")
+ */
+SENSE_CODE(0x4300, "Message error")
+
+SENSE_CODE(0x4400, "Internal target failure")
+SENSE_CODE(0x4401, "Persistent reservation information lost")
+SENSE_CODE(0x4471, "ATA device failed set features")
+
+SENSE_CODE(0x4500, "Select or reselect failure")
+
+SENSE_CODE(0x4600, "Unsuccessful soft reset")
+
+SENSE_CODE(0x4700, "Scsi parity error")
+SENSE_CODE(0x4701, "Data phase CRC error detected")
+SENSE_CODE(0x4702, "Scsi parity error detected during st data phase")
+SENSE_CODE(0x4703, "Information unit iuCRC error detected")
+SENSE_CODE(0x4704, "Asynchronous information protection error detected")
+SENSE_CODE(0x4705, "Protocol service CRC error")
+SENSE_CODE(0x4706, "Phy test function in progress")
+SENSE_CODE(0x477f, "Some commands cleared by iSCSI Protocol event")
+
+SENSE_CODE(0x4800, "Initiator detected error message received")
+
+SENSE_CODE(0x4900, "Invalid message error")
+
+SENSE_CODE(0x4A00, "Command phase error")
+
+SENSE_CODE(0x4B00, "Data phase error")
+SENSE_CODE(0x4B01, "Invalid target port transfer tag received")
+SENSE_CODE(0x4B02, "Too much write data")
+SENSE_CODE(0x4B03, "Ack/nak timeout")
+SENSE_CODE(0x4B04, "Nak received")
+SENSE_CODE(0x4B05, "Data offset error")
+SENSE_CODE(0x4B06, "Initiator response timeout")
+SENSE_CODE(0x4B07, "Connection lost")
+SENSE_CODE(0x4B08, "Data-in buffer overflow - data buffer size")
+SENSE_CODE(0x4B09, "Data-in buffer overflow - data buffer descriptor area")
+SENSE_CODE(0x4B0A, "Data-in buffer error")
+SENSE_CODE(0x4B0B, "Data-out buffer overflow - data buffer size")
+SENSE_CODE(0x4B0C, "Data-out buffer overflow - data buffer descriptor area")
+SENSE_CODE(0x4B0D, "Data-out buffer error")
+SENSE_CODE(0x4B0E, "PCIe fabric error")
+SENSE_CODE(0x4B0F, "PCIe completion timeout")
+SENSE_CODE(0x4B10, "PCIe completer abort")
+SENSE_CODE(0x4B11, "PCIe poisoned tlp received")
+SENSE_CODE(0x4B12, "PCIe eCRC check failed")
+SENSE_CODE(0x4B13, "PCIe unsupported request")
+SENSE_CODE(0x4B14, "PCIe acs violation")
+SENSE_CODE(0x4B15, "PCIe tlp prefix blocked")
+
+SENSE_CODE(0x4C00, "Logical unit failed self-configuration")
+/*
+ * SENSE_CODE(0x4DNN, "Tagged overlapped commands (nn = queue tag)")
+ */
+SENSE_CODE(0x4E00, "Overlapped commands attempted")
+
+SENSE_CODE(0x5000, "Write append error")
+SENSE_CODE(0x5001, "Write append position error")
+SENSE_CODE(0x5002, "Position error related to timing")
+
+SENSE_CODE(0x5100, "Erase failure")
+SENSE_CODE(0x5101, "Erase failure - incomplete erase operation detected")
+
+SENSE_CODE(0x5200, "Cartridge fault")
+
+SENSE_CODE(0x5300, "Media load or eject failed")
+SENSE_CODE(0x5301, "Unload tape failure")
+SENSE_CODE(0x5302, "Medium removal prevented")
+SENSE_CODE(0x5303, "Medium removal prevented by data transfer element")
+SENSE_CODE(0x5304, "Medium thread or unthread failure")
+SENSE_CODE(0x5305, "Volume identifier invalid")
+SENSE_CODE(0x5306, "Volume identifier missing")
+SENSE_CODE(0x5307, "Duplicate volume identifier")
+SENSE_CODE(0x5308, "Element status unknown")
+SENSE_CODE(0x5309, "Data transfer device error - load failed")
+SENSE_CODE(0x530a, "Data transfer device error - unload failed")
+SENSE_CODE(0x530b, "Data transfer device error - unload missing")
+SENSE_CODE(0x530c, "Data transfer device error - eject failed")
+SENSE_CODE(0x530d, "Data transfer device error - library communication failed")
+
+SENSE_CODE(0x5400, "Scsi to host system interface failure")
+
+SENSE_CODE(0x5500, "System resource failure")
+SENSE_CODE(0x5501, "System buffer full")
+SENSE_CODE(0x5502, "Insufficient reservation resources")
+SENSE_CODE(0x5503, "Insufficient resources")
+SENSE_CODE(0x5504, "Insufficient registration resources")
+SENSE_CODE(0x5505, "Insufficient access control resources")
+SENSE_CODE(0x5506, "Auxiliary memory out of space")
+SENSE_CODE(0x5507, "Quota error")
+SENSE_CODE(0x5508, "Maximum number of supplemental decryption keys exceeded")
+SENSE_CODE(0x5509, "Medium auxiliary memory not accessible")
+SENSE_CODE(0x550A, "Data currently unavailable")
+SENSE_CODE(0x550B, "Insufficient power for operation")
+SENSE_CODE(0x550C, "Insufficient resources to create rod")
+SENSE_CODE(0x550D, "Insufficient resources to create rod token")
+SENSE_CODE(0x550E, "Insufficient zone resources")
+
+SENSE_CODE(0x5700, "Unable to recover table-of-contents")
+
+SENSE_CODE(0x5800, "Generation does not exist")
+
+SENSE_CODE(0x5900, "Updated block read")
+
+SENSE_CODE(0x5A00, "Operator request or state change input")
+SENSE_CODE(0x5A01, "Operator medium removal request")
+SENSE_CODE(0x5A02, "Operator selected write protect")
+SENSE_CODE(0x5A03, "Operator selected write permit")
+
+SENSE_CODE(0x5B00, "Log exception")
+SENSE_CODE(0x5B01, "Threshold condition met")
+SENSE_CODE(0x5B02, "Log counter at maximum")
+SENSE_CODE(0x5B03, "Log list codes exhausted")
+
+SENSE_CODE(0x5C00, "Rpl status change")
+SENSE_CODE(0x5C01, "Spindles synchronized")
+SENSE_CODE(0x5C02, "Spindles not synchronized")
+
+SENSE_CODE(0x5D00, "Failure prediction threshold exceeded")
+SENSE_CODE(0x5D01, "Media failure prediction threshold exceeded")
+SENSE_CODE(0x5D02, "Logical unit failure prediction threshold exceeded")
+SENSE_CODE(0x5D03, "Spare area exhaustion prediction threshold exceeded")
+SENSE_CODE(0x5D10, "Hardware impending failure general hard drive failure")
+SENSE_CODE(0x5D11, "Hardware impending failure drive error rate too high")
+SENSE_CODE(0x5D12, "Hardware impending failure data error rate too high")
+SENSE_CODE(0x5D13, "Hardware impending failure seek error rate too high")
+SENSE_CODE(0x5D14, "Hardware impending failure too many block reassigns")
+SENSE_CODE(0x5D15, "Hardware impending failure access times too high")
+SENSE_CODE(0x5D16, "Hardware impending failure start unit times too high")
+SENSE_CODE(0x5D17, "Hardware impending failure channel parametrics")
+SENSE_CODE(0x5D18, "Hardware impending failure controller detected")
+SENSE_CODE(0x5D19, "Hardware impending failure throughput performance")
+SENSE_CODE(0x5D1A, "Hardware impending failure seek time performance")
+SENSE_CODE(0x5D1B, "Hardware impending failure spin-up retry count")
+SENSE_CODE(0x5D1C, "Hardware impending failure drive calibration retry count")
+SENSE_CODE(0x5D20, "Controller impending failure general hard drive failure")
+SENSE_CODE(0x5D21, "Controller impending failure drive error rate too high")
+SENSE_CODE(0x5D22, "Controller impending failure data error rate too high")
+SENSE_CODE(0x5D23, "Controller impending failure seek error rate too high")
+SENSE_CODE(0x5D24, "Controller impending failure too many block reassigns")
+SENSE_CODE(0x5D25, "Controller impending failure access times too high")
+SENSE_CODE(0x5D26, "Controller impending failure start unit times too high")
+SENSE_CODE(0x5D27, "Controller impending failure channel parametrics")
+SENSE_CODE(0x5D28, "Controller impending failure controller detected")
+SENSE_CODE(0x5D29, "Controller impending failure throughput performance")
+SENSE_CODE(0x5D2A, "Controller impending failure seek time performance")
+SENSE_CODE(0x5D2B, "Controller impending failure spin-up retry count")
+SENSE_CODE(0x5D2C, "Controller impending failure drive calibration retry count")
+SENSE_CODE(0x5D30, "Data channel impending failure general hard drive failure")
+SENSE_CODE(0x5D31, "Data channel impending failure drive error rate too high")
+SENSE_CODE(0x5D32, "Data channel impending failure data error rate too high")
+SENSE_CODE(0x5D33, "Data channel impending failure seek error rate too high")
+SENSE_CODE(0x5D34, "Data channel impending failure too many block reassigns")
+SENSE_CODE(0x5D35, "Data channel impending failure access times too high")
+SENSE_CODE(0x5D36, "Data channel impending failure start unit times too high")
+SENSE_CODE(0x5D37, "Data channel impending failure channel parametrics")
+SENSE_CODE(0x5D38, "Data channel impending failure controller detected")
+SENSE_CODE(0x5D39, "Data channel impending failure throughput performance")
+SENSE_CODE(0x5D3A, "Data channel impending failure seek time performance")
+SENSE_CODE(0x5D3B, "Data channel impending failure spin-up retry count")
+SENSE_CODE(0x5D3C, "Data channel impending failure drive calibration retry count")
+SENSE_CODE(0x5D40, "Servo impending failure general hard drive failure")
+SENSE_CODE(0x5D41, "Servo impending failure drive error rate too high")
+SENSE_CODE(0x5D42, "Servo impending failure data error rate too high")
+SENSE_CODE(0x5D43, "Servo impending failure seek error rate too high")
+SENSE_CODE(0x5D44, "Servo impending failure too many block reassigns")
+SENSE_CODE(0x5D45, "Servo impending failure access times too high")
+SENSE_CODE(0x5D46, "Servo impending failure start unit times too high")
+SENSE_CODE(0x5D47, "Servo impending failure channel parametrics")
+SENSE_CODE(0x5D48, "Servo impending failure controller detected")
+SENSE_CODE(0x5D49, "Servo impending failure throughput performance")
+SENSE_CODE(0x5D4A, "Servo impending failure seek time performance")
+SENSE_CODE(0x5D4B, "Servo impending failure spin-up retry count")
+SENSE_CODE(0x5D4C, "Servo impending failure drive calibration retry count")
+SENSE_CODE(0x5D50, "Spindle impending failure general hard drive failure")
+SENSE_CODE(0x5D51, "Spindle impending failure drive error rate too high")
+SENSE_CODE(0x5D52, "Spindle impending failure data error rate too high")
+SENSE_CODE(0x5D53, "Spindle impending failure seek error rate too high")
+SENSE_CODE(0x5D54, "Spindle impending failure too many block reassigns")
+SENSE_CODE(0x5D55, "Spindle impending failure access times too high")
+SENSE_CODE(0x5D56, "Spindle impending failure start unit times too high")
+SENSE_CODE(0x5D57, "Spindle impending failure channel parametrics")
+SENSE_CODE(0x5D58, "Spindle impending failure controller detected")
+SENSE_CODE(0x5D59, "Spindle impending failure throughput performance")
+SENSE_CODE(0x5D5A, "Spindle impending failure seek time performance")
+SENSE_CODE(0x5D5B, "Spindle impending failure spin-up retry count")
+SENSE_CODE(0x5D5C, "Spindle impending failure drive calibration retry count")
+SENSE_CODE(0x5D60, "Firmware impending failure general hard drive failure")
+SENSE_CODE(0x5D61, "Firmware impending failure drive error rate too high")
+SENSE_CODE(0x5D62, "Firmware impending failure data error rate too high")
+SENSE_CODE(0x5D63, "Firmware impending failure seek error rate too high")
+SENSE_CODE(0x5D64, "Firmware impending failure too many block reassigns")
+SENSE_CODE(0x5D65, "Firmware impending failure access times too high")
+SENSE_CODE(0x5D66, "Firmware impending failure start unit times too high")
+SENSE_CODE(0x5D67, "Firmware impending failure channel parametrics")
+SENSE_CODE(0x5D68, "Firmware impending failure controller detected")
+SENSE_CODE(0x5D69, "Firmware impending failure throughput performance")
+SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance")
+SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count")
+SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count")
+SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)")
+
+SENSE_CODE(0x5E00, "Low power condition on")
+SENSE_CODE(0x5E01, "Idle condition activated by timer")
+SENSE_CODE(0x5E02, "Standby condition activated by timer")
+SENSE_CODE(0x5E03, "Idle condition activated by command")
+SENSE_CODE(0x5E04, "Standby condition activated by command")
+SENSE_CODE(0x5E05, "Idle_b condition activated by timer")
+SENSE_CODE(0x5E06, "Idle_b condition activated by command")
+SENSE_CODE(0x5E07, "Idle_c condition activated by timer")
+SENSE_CODE(0x5E08, "Idle_c condition activated by command")
+SENSE_CODE(0x5E09, "Standby_y condition activated by timer")
+SENSE_CODE(0x5E0A, "Standby_y condition activated by command")
+SENSE_CODE(0x5E41, "Power state change to active")
+SENSE_CODE(0x5E42, "Power state change to idle")
+SENSE_CODE(0x5E43, "Power state change to standby")
+SENSE_CODE(0x5E45, "Power state change to sleep")
+SENSE_CODE(0x5E47, "Power state change to device control")
+
+SENSE_CODE(0x6000, "Lamp failure")
+
+SENSE_CODE(0x6100, "Video acquisition error")
+SENSE_CODE(0x6101, "Unable to acquire video")
+SENSE_CODE(0x6102, "Out of focus")
+
+SENSE_CODE(0x6200, "Scan head positioning error")
+
+SENSE_CODE(0x6300, "End of user area encountered on this track")
+SENSE_CODE(0x6301, "Packet does not fit in available space")
+
+SENSE_CODE(0x6400, "Illegal mode for this track")
+SENSE_CODE(0x6401, "Invalid packet size")
+
+SENSE_CODE(0x6500, "Voltage fault")
+
+SENSE_CODE(0x6600, "Automatic document feeder cover up")
+SENSE_CODE(0x6601, "Automatic document feeder lift up")
+SENSE_CODE(0x6602, "Document jam in automatic document feeder")
+SENSE_CODE(0x6603, "Document miss feed automatic in document feeder")
+
+SENSE_CODE(0x6700, "Configuration failure")
+SENSE_CODE(0x6701, "Configuration of incapable logical units failed")
+SENSE_CODE(0x6702, "Add logical unit failed")
+SENSE_CODE(0x6703, "Modification of logical unit failed")
+SENSE_CODE(0x6704, "Exchange of logical unit failed")
+SENSE_CODE(0x6705, "Remove of logical unit failed")
+SENSE_CODE(0x6706, "Attachment of logical unit failed")
+SENSE_CODE(0x6707, "Creation of logical unit failed")
+SENSE_CODE(0x6708, "Assign failure occurred")
+SENSE_CODE(0x6709, "Multiply assigned logical unit")
+SENSE_CODE(0x670A, "Set target port groups command failed")
+SENSE_CODE(0x670B, "ATA device feature not enabled")
+
+SENSE_CODE(0x6800, "Logical unit not configured")
+SENSE_CODE(0x6801, "Subsidiary logical unit not configured")
+
+SENSE_CODE(0x6900, "Data loss on logical unit")
+SENSE_CODE(0x6901, "Multiple logical unit failures")
+SENSE_CODE(0x6902, "Parity/data mismatch")
+
+SENSE_CODE(0x6A00, "Informational, refer to log")
+
+SENSE_CODE(0x6B00, "State change has occurred")
+SENSE_CODE(0x6B01, "Redundancy level got better")
+SENSE_CODE(0x6B02, "Redundancy level got worse")
+
+SENSE_CODE(0x6C00, "Rebuild failure occurred")
+
+SENSE_CODE(0x6D00, "Recalculate failure occurred")
+
+SENSE_CODE(0x6E00, "Command to logical unit failed")
+
+SENSE_CODE(0x6F00, "Copy protection key exchange failure - authentication failure")
+SENSE_CODE(0x6F01, "Copy protection key exchange failure - key not present")
+SENSE_CODE(0x6F02, "Copy protection key exchange failure - key not established")
+SENSE_CODE(0x6F03, "Read of scrambled sector without authentication")
+SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region")
+SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error")
+SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording")
+SENSE_CODE(0x6F07, "Conflict in binding nonce recording")
+/*
+ * SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn")
+ */
+SENSE_CODE(0x7100, "Decompression exception long algorithm id")
+
+SENSE_CODE(0x7200, "Session fixation error")
+SENSE_CODE(0x7201, "Session fixation error writing lead-in")
+SENSE_CODE(0x7202, "Session fixation error writing lead-out")
+SENSE_CODE(0x7203, "Session fixation error - incomplete track in session")
+SENSE_CODE(0x7204, "Empty or partially written reserved track")
+SENSE_CODE(0x7205, "No more track reservations allowed")
+SENSE_CODE(0x7206, "RMZ extension is not allowed")
+SENSE_CODE(0x7207, "No more test zone extensions are allowed")
+
+SENSE_CODE(0x7300, "Cd control error")
+SENSE_CODE(0x7301, "Power calibration area almost full")
+SENSE_CODE(0x7302, "Power calibration area is full")
+SENSE_CODE(0x7303, "Power calibration area error")
+SENSE_CODE(0x7304, "Program memory area update failure")
+SENSE_CODE(0x7305, "Program memory area is full")
+SENSE_CODE(0x7306, "RMA/PMA is almost full")
+SENSE_CODE(0x7310, "Current power calibration area almost full")
+SENSE_CODE(0x7311, "Current power calibration area is full")
+SENSE_CODE(0x7317, "RDZ is full")
+
+SENSE_CODE(0x7400, "Security error")
+SENSE_CODE(0x7401, "Unable to decrypt data")
+SENSE_CODE(0x7402, "Unencrypted data encountered while decrypting")
+SENSE_CODE(0x7403, "Incorrect data encryption key")
+SENSE_CODE(0x7404, "Cryptographic integrity validation failed")
+SENSE_CODE(0x7405, "Error decrypting data")
+SENSE_CODE(0x7406, "Unknown signature verification key")
+SENSE_CODE(0x7407, "Encryption parameters not useable")
+SENSE_CODE(0x7408, "Digital signature validation failure")
+SENSE_CODE(0x7409, "Encryption mode mismatch on read")
+SENSE_CODE(0x740A, "Encrypted block not raw read enabled")
+SENSE_CODE(0x740B, "Incorrect Encryption parameters")
+SENSE_CODE(0x740C, "Unable to decrypt parameter list")
+SENSE_CODE(0x740D, "Encryption algorithm disabled")
+SENSE_CODE(0x7410, "SA creation parameter value invalid")
+SENSE_CODE(0x7411, "SA creation parameter value rejected")
+SENSE_CODE(0x7412, "Invalid SA usage")
+SENSE_CODE(0x7421, "Data Encryption configuration prevented")
+SENSE_CODE(0x7430, "SA creation parameter not supported")
+SENSE_CODE(0x7440, "Authentication failed")
+SENSE_CODE(0x7461, "External data encryption key manager access error")
+SENSE_CODE(0x7462, "External data encryption key manager error")
+SENSE_CODE(0x7463, "External data encryption key not found")
+SENSE_CODE(0x7464, "External data encryption request not authorized")
+SENSE_CODE(0x746E, "External data encryption control timeout")
+SENSE_CODE(0x746F, "External data encryption control error")
+SENSE_CODE(0x7471, "Logical unit access not authorized")
+SENSE_CODE(0x7479, "Security conflict in translated device")
diff --git a/drivers/scsi/snic/snic.h b/drivers/scsi/snic/snic.h
index d7f5ba6ba..8ed778d4d 100644
--- a/drivers/scsi/snic/snic.h
+++ b/drivers/scsi/snic/snic.h
@@ -95,6 +95,8 @@
#define SNIC_DEV_RST_NOTSUP BIT(25)
#define SNIC_SCSI_CLEANUP BIT(26)
#define SNIC_HOST_RESET_ISSUED BIT(27)
+#define SNIC_HOST_RESET_CMD_TERM \
+ (SNIC_DEV_RST_NOTSUP | SNIC_SCSI_CLEANUP | SNIC_HOST_RESET_ISSUED)
#define SNIC_ABTS_TIMEOUT 30000 /* msec */
#define SNIC_LUN_RESET_TIMEOUT 30000 /* msec */
@@ -216,9 +218,10 @@ enum snic_msix_intr_index {
SNIC_MSIX_INTR_MAX,
};
+#define SNIC_INTRHDLR_NAMSZ (2 * IFNAMSIZ)
struct snic_msix_entry {
int requested;
- char devname[IFNAMSIZ];
+ char devname[SNIC_INTRHDLR_NAMSZ];
irqreturn_t (*isr)(int, void *);
void *devid;
};
diff --git a/drivers/scsi/snic/snic_ctl.c b/drivers/scsi/snic/snic_ctl.c
index ab0e06b0b..449b03f3b 100644
--- a/drivers/scsi/snic/snic_ctl.c
+++ b/drivers/scsi/snic/snic_ctl.c
@@ -39,17 +39,15 @@ snic_handle_link(struct work_struct *work)
{
struct snic *snic = container_of(work, struct snic, link_work);
- if (snic->config.xpt_type != SNIC_DAS) {
- SNIC_HOST_INFO(snic->shost, "Link Event Received.\n");
- SNIC_ASSERT_NOT_IMPL(1);
-
+ if (snic->config.xpt_type == SNIC_DAS)
return;
- }
snic->link_status = svnic_dev_link_status(snic->vdev);
snic->link_down_cnt = svnic_dev_link_down_cnt(snic->vdev);
SNIC_HOST_INFO(snic->shost, "Link Event: Link %s.\n",
((snic->link_status) ? "Up" : "Down"));
+
+ SNIC_ASSERT_NOT_IMPL(1);
}
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 1686f0196..d30280326 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -264,12 +264,14 @@ snic_stats_show(struct seq_file *sfp, void *data)
"Aborts Fail : %lld\n"
"Aborts Driver Timeout : %lld\n"
"Abort FW Timeout : %lld\n"
- "Abort IO NOT Found : %lld\n",
+ "Abort IO NOT Found : %lld\n"
+ "Abort Queuing Failed : %lld\n",
(u64) atomic64_read(&stats->abts.num),
(u64) atomic64_read(&stats->abts.fail),
(u64) atomic64_read(&stats->abts.drv_tmo),
(u64) atomic64_read(&stats->abts.fw_tmo),
- (u64) atomic64_read(&stats->abts.io_not_found));
+ (u64) atomic64_read(&stats->abts.io_not_found),
+ (u64) atomic64_read(&stats->abts.q_fail));
/* Dump Reset Stats */
seq_printf(sfp,
@@ -316,7 +318,9 @@ snic_stats_show(struct seq_file *sfp, void *data)
seq_printf(sfp,
"Last ISR Time : %llu (%8lu.%8lu)\n"
"Last Ack Time : %llu (%8lu.%8lu)\n"
- "ISRs : %llu\n"
+ "Ack ISRs : %llu\n"
+ "IO Cmpl ISRs : %llu\n"
+ "Err Notify ISRs : %llu\n"
"Max CQ Entries : %lld\n"
"Data Count Mismatch : %lld\n"
"IOs w/ Timeout Status : %lld\n"
@@ -324,12 +328,17 @@ snic_stats_show(struct seq_file *sfp, void *data)
"IOs w/ SGL Invalid Stat : %lld\n"
"WQ Desc Alloc Fail : %lld\n"
"Queue Full : %lld\n"
+ "Queue Ramp Up : %lld\n"
+ "Queue Ramp Down : %lld\n"
+ "Queue Last Queue Depth : %lld\n"
"Target Not Ready : %lld\n",
(u64) stats->misc.last_isr_time,
last_isr_tms.tv_sec, last_isr_tms.tv_nsec,
(u64)stats->misc.last_ack_time,
last_ack_tms.tv_sec, last_ack_tms.tv_nsec,
- (u64) atomic64_read(&stats->misc.isr_cnt),
+ (u64) atomic64_read(&stats->misc.ack_isr_cnt),
+ (u64) atomic64_read(&stats->misc.cmpl_isr_cnt),
+ (u64) atomic64_read(&stats->misc.errnotify_isr_cnt),
(u64) atomic64_read(&stats->misc.max_cq_ents),
(u64) atomic64_read(&stats->misc.data_cnt_mismat),
(u64) atomic64_read(&stats->misc.io_tmo),
@@ -337,6 +346,9 @@ snic_stats_show(struct seq_file *sfp, void *data)
(u64) atomic64_read(&stats->misc.sgl_inval),
(u64) atomic64_read(&stats->misc.wq_alloc_fail),
(u64) atomic64_read(&stats->misc.qfull),
+ (u64) atomic64_read(&stats->misc.qsz_rampup),
+ (u64) atomic64_read(&stats->misc.qsz_rampdown),
+ (u64) atomic64_read(&stats->misc.last_qsz),
(u64) atomic64_read(&stats->misc.tgt_not_rdy));
return 0;
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index 5f6321759..b0fefd67c 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -171,7 +171,7 @@ snic_scsi_scan_tgt(struct work_struct *work)
tgt->channel,
tgt->scsi_tgt_id,
SCAN_WILD_CARD,
- 1);
+ SCSI_SCAN_RESCAN);
spin_lock_irqsave(shost->host_lock, flags);
tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
@@ -480,10 +480,21 @@ int
snic_disc_start(struct snic *snic)
{
struct snic_disc *disc = &snic->disc;
+ unsigned long flags;
int ret = 0;
SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
+ spin_lock_irqsave(&snic->snic_lock, flags);
+ if (snic->in_remove) {
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+ SNIC_ERR("snic driver removal in progress ...\n");
+ ret = 0;
+
+ return ret;
+ }
+ spin_unlock_irqrestore(&snic->snic_lock, flags);
+
mutex_lock(&disc->mutex);
if (disc->state == SNIC_DISC_PENDING) {
disc->req_cnt++;
@@ -533,6 +544,8 @@ snic_tgt_del_all(struct snic *snic)
struct list_head *cur, *nxt;
unsigned long flags;
+ scsi_flush_work(snic->shost);
+
mutex_lock(&snic->disc.mutex);
spin_lock_irqsave(snic->shost->host_lock, flags);
@@ -545,7 +558,7 @@ snic_tgt_del_all(struct snic *snic)
tgt = NULL;
}
spin_unlock_irqrestore(snic->shost->host_lock, flags);
-
- scsi_flush_work(snic->shost);
mutex_unlock(&snic->disc.mutex);
+
+ flush_workqueue(snic_glob->event_q);
} /* end of snic_tgt_del_all */
diff --git a/drivers/scsi/snic/snic_fwint.h b/drivers/scsi/snic/snic_fwint.h
index 2cfaf2dc9..c5f9e1917 100644
--- a/drivers/scsi/snic/snic_fwint.h
+++ b/drivers/scsi/snic/snic_fwint.h
@@ -414,7 +414,7 @@ enum snic_ev_type {
/* Payload 88 bytes = 128 - 24 - 16 */
#define SNIC_HOST_REQ_PAYLOAD ((int)(SNIC_HOST_REQ_LEN - \
sizeof(struct snic_io_hdr) - \
- (2 * sizeof(u64))))
+ (2 * sizeof(u64)) - sizeof(ulong)))
/*
* snic_host_req: host -> firmware request
@@ -448,6 +448,8 @@ struct snic_host_req {
/* hba reset */
struct snic_hba_reset reset;
} u;
+
+ ulong req_pa;
}; /* end of snic_host_req structure */
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 993db7de4..8e6954839 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -48,7 +48,7 @@ snic_wq_cmpl_frame_send(struct vnic_wq *wq,
SNIC_TRC(snic->shost->host_no, 0, 0,
((ulong)(buf->os_buf) - sizeof(struct snic_req_info)), 0, 0,
0);
- pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+
buf->os_buf = NULL;
}
@@ -137,13 +137,36 @@ snic_select_wq(struct snic *snic)
return 0;
}
+static int
+snic_wqdesc_avail(struct snic *snic, int q_num, int req_type)
+{
+ int nr_wqdesc = snic->config.wq_enet_desc_count;
+
+ if (q_num > 0) {
+ /*
+ * Multi Queue case, additional care is required.
+ * Per WQ active requests need to be maintained.
+ */
+ SNIC_HOST_INFO(snic->shost, "desc_avail: Multi Queue case.\n");
+ SNIC_BUG_ON(q_num > 0);
+
+ return -1;
+ }
+
+ nr_wqdesc -= atomic64_read(&snic->s_stats.fw.actv_reqs);
+
+ return ((req_type == SNIC_REQ_HBA_RESET) ? nr_wqdesc : nr_wqdesc - 1);
+}
+
int
snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
{
dma_addr_t pa = 0;
unsigned long flags;
struct snic_fw_stats *fwstats = &snic->s_stats.fw;
+ struct snic_host_req *req = (struct snic_host_req *) os_buf;
long act_reqs;
+ long desc_avail = 0;
int q_num = 0;
snic_print_desc(__func__, os_buf, len);
@@ -156,11 +179,15 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
return -ENOMEM;
}
+ req->req_pa = (ulong)pa;
+
q_num = snic_select_wq(snic);
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
- if (!svnic_wq_desc_avail(snic->wq)) {
+ desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
+ if (desc_avail <= 0) {
pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
+ req->req_pa = 0;
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
SNIC_DBG("host = %d, WQ is Full\n", snic->shost->host_no);
@@ -169,10 +196,13 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
}
snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1);
+ /*
+ * Update stats
+ * note: when multi queue enabled, fw actv_reqs should be per queue.
+ */
+ act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
- /* Update stats */
- act_reqs = atomic64_inc_return(&fwstats->actv_reqs);
if (act_reqs > atomic64_read(&fwstats->max_actv_reqs))
atomic64_set(&fwstats->max_actv_reqs, act_reqs);
@@ -318,11 +348,31 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
"Req_free:rqi %p:ioreq %p:abt %p:dr %p\n",
rqi, rqi->req, rqi->abort_req, rqi->dr_req);
- if (rqi->abort_req)
+ if (rqi->abort_req) {
+ if (rqi->abort_req->req_pa)
+ pci_unmap_single(snic->pdev,
+ rqi->abort_req->req_pa,
+ sizeof(struct snic_host_req),
+ PCI_DMA_TODEVICE);
+
mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
+ }
+
+ if (rqi->dr_req) {
+ if (rqi->dr_req->req_pa)
+ pci_unmap_single(snic->pdev,
+ rqi->dr_req->req_pa,
+ sizeof(struct snic_host_req),
+ PCI_DMA_TODEVICE);
- if (rqi->dr_req)
mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
+ }
+
+ if (rqi->req->req_pa)
+ pci_unmap_single(snic->pdev,
+ rqi->req->req_pa,
+ rqi->req_len,
+ PCI_DMA_TODEVICE);
mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
}
diff --git a/drivers/scsi/snic/snic_isr.c b/drivers/scsi/snic/snic_isr.c
index a85fae25e..f55200312 100644
--- a/drivers/scsi/snic/snic_isr.c
+++ b/drivers/scsi/snic/snic_isr.c
@@ -38,7 +38,7 @@ snic_isr_msix_wq(int irq, void *data)
unsigned long wq_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
- atomic64_inc(&snic->s_stats.misc.isr_cnt);
+ atomic64_inc(&snic->s_stats.misc.ack_isr_cnt);
wq_work_done = snic_wq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_WQ],
@@ -56,7 +56,7 @@ snic_isr_msix_io_cmpl(int irq, void *data)
unsigned long iocmpl_work_done = 0;
snic->s_stats.misc.last_isr_time = jiffies;
- atomic64_inc(&snic->s_stats.misc.isr_cnt);
+ atomic64_inc(&snic->s_stats.misc.cmpl_isr_cnt);
iocmpl_work_done = snic_fwcq_cmpl_handler(snic, -1);
svnic_intr_return_credits(&snic->intr[SNIC_MSIX_IO_CMPL],
@@ -73,7 +73,7 @@ snic_isr_msix_err_notify(int irq, void *data)
struct snic *snic = data;
snic->s_stats.misc.last_isr_time = jiffies;
- atomic64_inc(&snic->s_stats.misc.isr_cnt);
+ atomic64_inc(&snic->s_stats.misc.errnotify_isr_cnt);
svnic_intr_return_all_credits(&snic->intr[SNIC_MSIX_ERR_NOTIFY]);
snic_log_q_error(snic);
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 2b3c25371..396b32dca 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -98,11 +98,18 @@ snic_slave_configure(struct scsi_device *sdev)
static int
snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
+ struct snic *snic = shost_priv(sdev->host);
int qsz = 0;
qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
+ if (qsz < sdev->queue_depth)
+ atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
+ else if (qsz > sdev->queue_depth)
+ atomic64_inc(&snic->s_stats.misc.qsz_rampup);
+
+ atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
+
scsi_change_queue_depth(sdev, qsz);
- SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth);
return sdev->queue_depth;
}
@@ -624,19 +631,6 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_tmreq_pool;
}
- /*
- * Initialization done with PCI system, hardware, firmware.
- * Add shost to SCSI
- */
- ret = snic_add_host(shost, pdev);
- if (ret) {
- SNIC_HOST_ERR(shost,
- "Adding scsi host Failed ... exiting. %d\n",
- ret);
-
- goto err_notify_unset;
- }
-
spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
list_add_tail(&snic->list, &snic_glob->snic_list);
spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
@@ -669,8 +663,6 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < snic->intr_count; i++)
svnic_intr_unmask(&snic->intr[i]);
- snic_set_state(snic, SNIC_ONLINE);
-
/* Get snic params */
ret = snic_get_conf(snic);
if (ret) {
@@ -681,6 +673,21 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_get_conf;
}
+ /*
+ * Initialization done with PCI system, hardware, firmware.
+ * Add shost to SCSI
+ */
+ ret = snic_add_host(shost, pdev);
+ if (ret) {
+ SNIC_HOST_ERR(shost,
+ "Adding scsi host Failed ... exiting. %d\n",
+ ret);
+
+ goto err_get_conf;
+ }
+
+ snic_set_state(snic, SNIC_ONLINE);
+
ret = snic_disc_start(snic);
if (ret) {
SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
@@ -705,6 +712,8 @@ err_req_intr:
svnic_dev_disable(snic->vdev);
err_vdev_enable:
+ svnic_dev_notify_unset(snic->vdev);
+
for (i = 0; i < snic->wq_count; i++) {
int rc = 0;
@@ -718,9 +727,6 @@ err_vdev_enable:
}
snic_del_host(snic->shost);
-err_notify_unset:
- svnic_dev_notify_unset(snic->vdev);
-
err_free_tmreq_pool:
mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index 2c7b4c321..abada16b3 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -221,11 +221,15 @@ snic_queue_icmnd_req(struct snic *snic,
pa, /* sense buffer pa */
SCSI_SENSE_BUFFERSIZE);
+ atomic64_inc(&snic->s_stats.io.active);
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
- if (ret)
+ if (ret) {
+ atomic64_dec(&snic->s_stats.io.active);
SNIC_HOST_ERR(snic->shost,
"QIcmnd: Queuing Icmnd Failed. ret = %d\n",
ret);
+ } else
+ snic_stats_update_active_ios(&snic->s_stats);
return ret;
} /* end of snic_queue_icmnd_req */
@@ -361,8 +365,7 @@ snic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc)
if (ret) {
SNIC_HOST_ERR(shost, "Failed to Q, Scsi Req w/ err %d.\n", ret);
ret = SCSI_MLQUEUE_HOST_BUSY;
- } else
- snic_stats_update_active_ios(&snic->s_stats);
+ }
atomic_dec(&snic->ios_inflight);
@@ -598,6 +601,12 @@ snic_icmnd_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
sc->device->lun, sc, sc->cmnd[0], snic_cmd_tag(sc),
CMD_FLAGS(sc), rqi);
+ if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ return;
+ }
+
SNIC_BUG_ON(rqi != (struct snic_req_info *)ctx);
WARN_ON_ONCE(req);
if (!rqi) {
@@ -779,6 +788,11 @@ snic_process_itmf_cmpl(struct snic *snic,
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & SNIC_HOST_RESET_CMD_TERM) {
+ spin_unlock_irqrestore(io_lock, flags);
+
+ return ret;
+ }
rqi = (struct snic_req_info *) CMD_SP(sc);
WARN_ON_ONCE(!rqi);
@@ -1001,10 +1015,11 @@ snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
unsigned long flags, gflags;
int ret = 0;
+ snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_HOST_INFO(snic->shost,
- "reset_cmpl:HBA Reset Completion received.\n");
+ "reset_cmpl:Tag %d ctx %lx cmpl status %s HBA Reset Completion received.\n",
+ cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
- snic_io_hdr_dec(&fwreq->hdr, &typ, &hdr_stat, &cmnd_id, &hid, &ctx);
SNIC_SCSI_DBG(snic->shost,
"reset_cmpl: type = %x, hdr_stat = %x, cmnd_id = %x, hid = %x, ctx = %lx\n",
typ, hdr_stat, cmnd_id, hid, ctx);
@@ -1012,6 +1027,9 @@ snic_hba_reset_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
/* spl case, host reset issued through ioctl */
if (cmnd_id == SCSI_NO_TAG) {
rqi = (struct snic_req_info *) ctx;
+ SNIC_HOST_INFO(snic->shost,
+ "reset_cmpl:Tag %d ctx %lx cmpl stat %s\n",
+ cmnd_id, ctx, snic_io_status_to_str(hdr_stat));
sc = rqi->sc;
goto ioctl_hba_rst;
@@ -1038,6 +1056,10 @@ ioctl_hba_rst:
return ret;
}
+ SNIC_HOST_INFO(snic->shost,
+ "reset_cmpl: sc %p rqi %p Tag %d flags 0x%llx\n",
+ sc, rqi, cmnd_id, CMD_FLAGS(sc));
+
io_lock = snic_io_lock_hash(snic, sc);
spin_lock_irqsave(io_lock, flags);
@@ -1454,11 +1476,19 @@ snic_abort_finish(struct snic *snic, struct scsi_cmnd *sc)
case SNIC_STAT_IO_SUCCESS:
case SNIC_STAT_IO_NOT_FOUND:
ret = SUCCESS;
+ /*
+ * If abort path doesn't call scsi_done(),
+ * the # IO timeouts == 2, will cause the LUN offline.
+ * Call scsi_done to complete the IO.
+ */
+ sc->result = (DID_ERROR << 16);
+ sc->scsi_done(sc);
break;
default:
/* Firmware completed abort with error */
ret = FAILED;
+ rqi = NULL;
break;
}
@@ -1554,6 +1584,7 @@ snic_send_abort_and_wait(struct snic *snic, struct scsi_cmnd *sc)
/* Now Queue the abort command to firmware */
ret = snic_queue_abort_req(snic, rqi, sc, tmf);
if (ret) {
+ atomic64_inc(&snic->s_stats.abts.q_fail);
SNIC_HOST_ERR(snic->shost,
"send_abt_cmd: IO w/ Tag 0x%x fail w/ err %d flags 0x%llx\n",
tag, ret, CMD_FLAGS(sc));
@@ -1830,6 +1861,9 @@ snic_dr_clean_single_req(struct snic *snic,
snic_release_req_buf(snic, rqi, sc);
+ sc->result = (DID_ERROR << 16);
+ sc->scsi_done(sc);
+
ret = 0;
return ret;
@@ -2384,6 +2418,13 @@ snic_cmpl_pending_tmreq(struct snic *snic, struct scsi_cmnd *sc)
"Completing Pending TM Req sc %p, state %s flags 0x%llx\n",
sc, snic_io_status_to_str(CMD_STATE(sc)), CMD_FLAGS(sc));
+ /*
+ * CASE : FW didn't post itmf completion due to PCIe Errors.
+ * Marking the abort status as Success to call scsi completion
+ * in snic_abort_finish()
+ */
+ CMD_ABTS_STATUS(sc) = SNIC_STAT_IO_SUCCESS;
+
rqi = (struct snic_req_info *) CMD_SP(sc);
if (!rqi)
return;
@@ -2459,8 +2500,9 @@ snic_scsi_cleanup(struct snic *snic, int ex_tag)
cleanup:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
SNIC_HOST_INFO(snic->shost,
- "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p. rqi %p duration %llu msecs\n",
- sc, rqi, (jiffies - st_time));
+ "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n",
+ sc, sc->request->tag, CMD_FLAGS(sc), rqi,
+ jiffies_to_msecs(jiffies - st_time));
/* Update IO stats */
snic_stats_update_io_cmpl(&snic->s_stats);
diff --git a/drivers/scsi/snic/snic_stats.h b/drivers/scsi/snic/snic_stats.h
index 11e614849..fd1066b1c 100644
--- a/drivers/scsi/snic/snic_stats.h
+++ b/drivers/scsi/snic/snic_stats.h
@@ -42,6 +42,7 @@ struct snic_abort_stats {
atomic64_t drv_tmo; /* Abort Driver Timeouts */
atomic64_t fw_tmo; /* Abort Firmware Timeouts */
atomic64_t io_not_found;/* Abort IO Not Found */
+ atomic64_t q_fail; /* Abort Queuing Failed */
};
struct snic_reset_stats {
@@ -69,7 +70,9 @@ struct snic_fw_stats {
struct snic_misc_stats {
u64 last_isr_time;
u64 last_ack_time;
- atomic64_t isr_cnt;
+ atomic64_t ack_isr_cnt;
+ atomic64_t cmpl_isr_cnt;
+ atomic64_t errnotify_isr_cnt;
atomic64_t max_cq_ents; /* Max CQ Entries */
atomic64_t data_cnt_mismat; /* Data Count Mismatch */
atomic64_t io_tmo;
@@ -81,6 +84,9 @@ struct snic_misc_stats {
atomic64_t no_icmnd_itmf_cmpls;
atomic64_t io_under_run;
atomic64_t qfull;
+ atomic64_t qsz_rampup;
+ atomic64_t qsz_rampdown;
+ atomic64_t last_qsz;
atomic64_t tgt_not_rdy;
};
@@ -101,9 +107,9 @@ static inline void
snic_stats_update_active_ios(struct snic_stats *s_stats)
{
struct snic_io_stats *io = &s_stats->io;
- u32 nr_active_ios;
+ int nr_active_ios;
- nr_active_ios = atomic64_inc_return(&io->active);
+ nr_active_ios = atomic64_read(&io->active);
if (atomic64_read(&io->max_active) < nr_active_ios)
atomic64_set(&io->max_active, nr_active_ios);
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index e0b5549bc..dad5fc66e 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -263,12 +263,20 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
- struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+ struct devcmd2_result *result = NULL;
unsigned int i;
int delay;
int err;
u32 posted;
+ u32 fetch_idx;
u32 new_posted;
+ u8 color;
+
+ fetch_idx = ioread32(&dc2c->wq_ctrl->fetch_index);
+ if (fetch_idx == 0xFFFFFFFF) { /* check for hardware gone */
+ /* Hardware surprise removal: return error */
+ return -ENODEV;
+ }
posted = ioread32(&dc2c->wq_ctrl->posted_index);
@@ -278,6 +286,13 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+ if (new_posted == fetch_idx) {
+ pr_err("%s: wq is full while issuing devcmd2 command %d, fetch index: %u, posted index: %u\n",
+ pci_name(vdev->pdev), _CMD_N(cmd), fetch_idx, posted);
+
+ return -EBUSY;
+ }
+
dc2c->cmd_ring[posted].cmd = cmd;
dc2c->cmd_ring[posted].flags = 0;
@@ -299,14 +314,22 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
return 0;
+ result = dc2c->result + dc2c->next_result;
+ color = dc2c->color;
+
+ /*
+ * Increment next_result, after posting the devcmd, irrespective of
+ * devcmd result, and it should be done only once.
+ */
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+
for (delay = 0; delay < wait; delay++) {
udelay(100);
- if (result->color == dc2c->color) {
- dc2c->next_result++;
- if (dc2c->next_result == dc2c->result_size) {
- dc2c->next_result = 0;
- dc2c->color = dc2c->color ? 0 : 1;
- }
+ if (result->color == color) {
if (result->error) {
err = (int) result->error;
if (err != ERR_ECMDUNKNOWN ||
@@ -317,13 +340,6 @@ static int _svnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
return err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
- /*
- * Adding the rmb() prevents the compiler
- * and/or CPU from reordering the reads which
- * would potentially result in reading stale
- * values.
- */
- rmb();
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
vdev->args[i] = result->results[i];
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index dbf1882cf..7af5226aa 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1974,9 +1974,12 @@ static long read_tape(struct scsi_tape *STp, long count,
transfer = (int)cmdstatp->uremainder64;
else
transfer = 0;
- if (STp->block_size == 0 &&
- cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR)
- transfer = bytes;
+ if (cmdstatp->sense_hdr.sense_key == MEDIUM_ERROR) {
+ if (STp->block_size == 0)
+ transfer = bytes;
+ /* Some drives set ILI with MEDIUM ERROR */
+ cmdstatp->flags &= ~SENSE_ILI;
+ }
if (cmdstatp->flags & SENSE_ILI) { /* ILI */
if (STp->block_size == 0 &&
diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c
index b9de487bb..3c4c07038 100644
--- a/drivers/scsi/sun3_scsi.c
+++ b/drivers/scsi/sun3_scsi.c
@@ -36,14 +36,10 @@
#include <scsi/scsi_host.h>
#include "sun3_scsi.h"
-/* Definitions for the core NCR5380 driver. */
-
-#define REAL_DMA
-/* #define SUPPORT_TAGS */
/* minimum number of bytes to do dma on */
#define DMA_MIN_SIZE 129
-/* #define MAX_TAGS 32 */
+/* Definitions for the core NCR5380 driver. */
#define NCR5380_implementation_fields /* none */
@@ -55,14 +51,12 @@
#define NCR5380_abort sun3scsi_abort
#define NCR5380_info sun3scsi_info
-#define NCR5380_dma_read_setup(instance, data, count) \
- sun3scsi_dma_setup(instance, data, count, 0)
-#define NCR5380_dma_write_setup(instance, data, count) \
- sun3scsi_dma_setup(instance, data, count, 1)
+#define NCR5380_dma_recv_setup(instance, data, count) (count)
+#define NCR5380_dma_send_setup(instance, data, count) (count)
#define NCR5380_dma_residual(instance) \
sun3scsi_dma_residual(instance)
#define NCR5380_dma_xfer_len(instance, cmd, phase) \
- sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd, !((phase) & SR_IO))
+ sun3scsi_dma_xfer_len(cmd->SCp.this_residual, cmd)
#define NCR5380_acquire_dma_irq(instance) (1)
#define NCR5380_release_dma_irq(instance)
@@ -78,10 +72,6 @@ static int setup_cmd_per_lun = -1;
module_param(setup_cmd_per_lun, int, 0);
static int setup_sg_tablesize = -1;
module_param(setup_sg_tablesize, int, 0);
-#ifdef SUPPORT_TAGS
-static int setup_use_tagged_queuing = -1;
-module_param(setup_use_tagged_queuing, int, 0);
-#endif
static int setup_hostid = -1;
module_param(setup_hostid, int, 0);
@@ -263,14 +253,13 @@ static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
return last_residual;
}
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
- struct scsi_cmnd *cmd,
- int write_flag)
+static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted_len,
+ struct scsi_cmnd *cmd)
{
- if (cmd->request->cmd_type == REQ_TYPE_FS)
- return wanted;
- else
+ if (wanted_len < DMA_MIN_SIZE || cmd->request->cmd_type != REQ_TYPE_FS)
return 0;
+
+ return wanted_len;
}
static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
@@ -408,7 +397,7 @@ static int sun3scsi_dma_finish(int write_flag)
}
-#include "atari_NCR5380.c"
+#include "NCR5380.c"
#ifdef SUN3_SCSI_VME
#define SUN3_SCSI_NAME "Sun3 NCR5380 VME SCSI"
@@ -516,10 +505,6 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
instance->io_port = (unsigned long)ioaddr;
instance->irq = irq->start;
-#ifdef SUPPORT_TAGS
- host_flags |= setup_use_tagged_queuing > 0 ? FLAG_TAGGED_QUEUING : 0;
-#endif
-
error = NCR5380_init(instance, host_flags);
if (error)
goto fail_init;
@@ -527,15 +512,9 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
error = request_irq(instance->irq, scsi_sun3_intr, 0,
"NCR5380", instance);
if (error) {
-#ifdef REAL_DMA
pr_err(PFX "scsi%d: IRQ %d not free, bailing out\n",
instance->host_no, instance->irq);
goto fail_irq;
-#else
- pr_warn(PFX "scsi%d: IRQ %d not free, interrupts disabled\n",
- instance->host_no, instance->irq);
- instance->irq = NO_IRQ;
-#endif
}
dregs->csr = 0;
@@ -565,8 +544,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
return 0;
fail_host:
- if (instance->irq != NO_IRQ)
- free_irq(instance->irq, instance);
+ free_irq(instance->irq, instance);
fail_irq:
NCR5380_exit(instance);
fail_init:
@@ -583,8 +561,7 @@ static int __exit sun3_scsi_remove(struct platform_device *pdev)
struct Scsi_Host *instance = platform_get_drvdata(pdev);
scsi_remove_host(instance);
- if (instance->irq != NO_IRQ)
- free_irq(instance->irq, instance);
+ free_irq(instance->irq, instance);
NCR5380_exit(instance);
scsi_host_put(instance);
if (udc_regs)
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index 4615fda60..8a8608ac6 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -1,5 +1,3 @@
-#define PSEUDO_DMA
-
/*
* Trantor T128/T128F/T228 driver
* Note : architecturally, the T100 and T130 are different and won't
@@ -76,7 +74,6 @@
#include <scsi/scsi_host.h>
#include "t128.h"
-#define AUTOPROBE_IRQ
#include "NCR5380.h"
static struct override {
@@ -210,7 +207,7 @@ found:
instance->base = base;
((struct NCR5380_hostdata *)instance->hostdata)->base = p;
- if (NCR5380_init(instance, 0))
+ if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
goto out_unregister;
NCR5380_maybe_reset_bus(instance);
@@ -294,7 +291,7 @@ static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev,
}
/*
- * Function : int NCR5380_pread (struct Scsi_Host *instance,
+ * Function : int t128_pread (struct Scsi_Host *instance,
* unsigned char *dst, int len)
*
* Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
@@ -306,8 +303,8 @@ static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev,
* timeout.
*/
-static inline int
-NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
+static inline int t128_pread(struct Scsi_Host *instance,
+ unsigned char *dst, int len)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
void __iomem *reg, *base = hostdata->base;
@@ -340,7 +337,7 @@ NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
}
/*
- * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
+ * Function : int t128_pwrite (struct Scsi_Host *instance,
* unsigned char *src, int len)
*
* Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
@@ -352,8 +349,8 @@ NCR5380_pread(struct Scsi_Host *instance, unsigned char *dst, int len)
* timeout.
*/
-static inline int
-NCR5380_pwrite(struct Scsi_Host *instance, unsigned char *src, int len)
+static inline int t128_pwrite(struct Scsi_Host *instance,
+ unsigned char *src, int len)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
void __iomem *reg, *base = hostdata->base;
@@ -394,8 +391,6 @@ static struct scsi_host_template driver_template = {
.detect = t128_detect,
.release = t128_release,
.proc_name = "t128",
- .show_info = t128_show_info,
- .write_info = t128_write_info,
.info = t128_info,
.queuecommand = t128_queue_command,
.eh_abort_handler = t128_abort,
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
index dd16d8549..c95bcd839 100644
--- a/drivers/scsi/t128.h
+++ b/drivers/scsi/t128.h
@@ -77,14 +77,17 @@
#define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
+#define NCR5380_dma_recv_setup t128_pread
+#define NCR5380_dma_send_setup t128_pwrite
+#define NCR5380_dma_residual(instance) (0)
#define NCR5380_intr t128_intr
#define NCR5380_queue_command t128_queue_command
#define NCR5380_abort t128_abort
#define NCR5380_bus_reset t128_bus_reset
#define NCR5380_info t128_info
-#define NCR5380_show_info t128_show_info
-#define NCR5380_write_info t128_write_info
+
+#define NCR5380_io_delay(x) udelay(x)
/* 15 14 12 10 7 5 3
1101 0100 1010 1000 */
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 5ade71306..380230f03 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_MACH_DOVE) += dove/
obj-y += fsl/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-$(CONFIG_ARCH_QCOM) += qcom/
-obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
+obj-$(CONFIG_ARCH_RENESAS) += renesas/
+obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_SOC_SAMSUNG) += samsung/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/soc/brcmstb/Kconfig b/drivers/soc/brcmstb/Kconfig
index 39cab3bd5..7fec3b4c8 100644
--- a/drivers/soc/brcmstb/Kconfig
+++ b/drivers/soc/brcmstb/Kconfig
@@ -1,6 +1,7 @@
menuconfig SOC_BRCMSTB
bool "Broadcom STB SoC drivers"
depends on ARM
+ select SOC_BUS
help
Enables drivers for the Broadcom Set-Top Box (STB) series of chips.
This option alone enables only some support code, while the drivers
diff --git a/drivers/soc/brcmstb/common.c b/drivers/soc/brcmstb/common.c
index c262c029b..94e733555 100644
--- a/drivers/soc/brcmstb/common.c
+++ b/drivers/soc/brcmstb/common.c
@@ -12,10 +12,18 @@
* GNU General Public License for more details.
*/
+#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/soc/brcmstb/brcmstb.h>
+#include <linux/sys_soc.h>
#include <soc/brcmstb/common.h>
+static u32 family_id;
+static u32 product_id;
+
static const struct of_device_id brcmstb_machine_match[] = {
{ .compatible = "brcm,brcmstb", },
{ }
@@ -31,3 +39,61 @@ bool soc_is_brcmstb(void)
return of_match_node(brcmstb_machine_match, root) != NULL;
}
+
+static const struct of_device_id sun_top_ctrl_match[] = {
+ { .compatible = "brcm,brcmstb-sun-top-ctrl", },
+ { }
+};
+
+static int __init brcmstb_soc_device_init(void)
+{
+ struct soc_device_attribute *soc_dev_attr;
+ struct soc_device *soc_dev;
+ struct device_node *sun_top_ctrl;
+ void __iomem *sun_top_ctrl_base;
+ int ret = 0;
+
+ sun_top_ctrl = of_find_matching_node(NULL, sun_top_ctrl_match);
+ if (!sun_top_ctrl)
+ return -ENODEV;
+
+ sun_top_ctrl_base = of_iomap(sun_top_ctrl, 0);
+ if (!sun_top_ctrl_base)
+ return -ENODEV;
+
+ family_id = readl(sun_top_ctrl_base);
+ product_id = readl(sun_top_ctrl_base + 0x4);
+
+ soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+ if (!soc_dev_attr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ soc_dev_attr->family = kasprintf(GFP_KERNEL, "%x",
+ family_id >> 28 ?
+ family_id >> 16 : family_id >> 8);
+ soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "%x",
+ product_id >> 28 ?
+ product_id >> 16 : product_id >> 8);
+ soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c%d",
+ ((product_id & 0xf0) >> 4) + 'A',
+ product_id & 0xf);
+
+ soc_dev = soc_device_register(soc_dev_attr);
+ if (IS_ERR(soc_dev)) {
+ kfree(soc_dev_attr->family);
+ kfree(soc_dev_attr->soc_id);
+ kfree(soc_dev_attr->revision);
+ kfree(soc_dev_attr);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ iounmap(sun_top_ctrl_base);
+ return ret;
+}
+arch_initcall(brcmstb_soc_device_init);
diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
index 658457125..333eb2215 100644
--- a/drivers/soc/fsl/qe/gpio.c
+++ b/drivers/soc/fsl/qe/gpio.c
@@ -18,6 +18,8 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/gpio/driver.h>
+/* FIXME: needed for gpio_to_chip() get rid of this */
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -37,15 +39,9 @@ struct qe_gpio_chip {
struct qe_pio_regs saved_regs;
};
-static inline struct qe_gpio_chip *
-to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc)
-{
- return container_of(mm_gc, struct qe_gpio_chip, mm_gc);
-}
-
static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
- struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(&mm_gc->gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
qe_gc->cpdata = in_be32(&regs->cpdata);
@@ -69,7 +65,7 @@ static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
struct qe_pio_regs __iomem *regs = mm_gc->regs;
unsigned long flags;
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
@@ -89,7 +85,7 @@ static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -104,7 +100,7 @@ static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+ struct qe_gpio_chip *qe_gc = gpiochip_get_data(gc);
unsigned long flags;
qe_gpio_set(gc, gpio, val);
@@ -165,7 +161,7 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
}
mm_gc = to_of_mm_gpio_chip(gc);
- qe_gc = to_qe_gpio_chip(mm_gc);
+ qe_gc = gpiochip_get_data(gc);
spin_lock_irqsave(&qe_gc->lock, flags);
@@ -302,7 +298,7 @@ static int __init qe_add_gpiochips(void)
gc->get = qe_gpio_get;
gc->set = qe_gpio_set;
- ret = of_mm_gpiochip_add(np, mm_gc);
+ ret = of_mm_gpiochip_add_data(np, mm_gc, qe_gc);
if (ret)
goto err;
continue;
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index 0d9b19a78..a003ba26c 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -52,6 +52,7 @@
#define PWRAP_DEW_WRITE_TEST_VAL 0xa55a
/* macro for manual command */
+#define PWRAP_MAN_CMD_SPI_WRITE_NEW (1 << 14)
#define PWRAP_MAN_CMD_SPI_WRITE (1 << 13)
#define PWRAP_MAN_CMD_OP_CSH (0x0 << 8)
#define PWRAP_MAN_CMD_OP_CSL (0x1 << 8)
@@ -69,33 +70,75 @@
PWRAP_WDT_SRC_EN_HARB_STAUPD_DLE | \
PWRAP_WDT_SRC_EN_HARB_STAUPD_ALE)
-/* macro for slave device wrapper registers */
-#define PWRAP_DEW_BASE 0xbc00
-#define PWRAP_DEW_EVENT_OUT_EN (PWRAP_DEW_BASE + 0x0)
-#define PWRAP_DEW_DIO_EN (PWRAP_DEW_BASE + 0x2)
-#define PWRAP_DEW_EVENT_SRC_EN (PWRAP_DEW_BASE + 0x4)
-#define PWRAP_DEW_EVENT_SRC (PWRAP_DEW_BASE + 0x6)
-#define PWRAP_DEW_EVENT_FLAG (PWRAP_DEW_BASE + 0x8)
-#define PWRAP_DEW_READ_TEST (PWRAP_DEW_BASE + 0xa)
-#define PWRAP_DEW_WRITE_TEST (PWRAP_DEW_BASE + 0xc)
-#define PWRAP_DEW_CRC_EN (PWRAP_DEW_BASE + 0xe)
-#define PWRAP_DEW_CRC_VAL (PWRAP_DEW_BASE + 0x10)
-#define PWRAP_DEW_MON_GRP_SEL (PWRAP_DEW_BASE + 0x12)
-#define PWRAP_DEW_MON_FLAG_SEL (PWRAP_DEW_BASE + 0x14)
-#define PWRAP_DEW_EVENT_TEST (PWRAP_DEW_BASE + 0x16)
-#define PWRAP_DEW_CIPHER_KEY_SEL (PWRAP_DEW_BASE + 0x18)
-#define PWRAP_DEW_CIPHER_IV_SEL (PWRAP_DEW_BASE + 0x1a)
-#define PWRAP_DEW_CIPHER_LOAD (PWRAP_DEW_BASE + 0x1c)
-#define PWRAP_DEW_CIPHER_START (PWRAP_DEW_BASE + 0x1e)
-#define PWRAP_DEW_CIPHER_RDY (PWRAP_DEW_BASE + 0x20)
-#define PWRAP_DEW_CIPHER_MODE (PWRAP_DEW_BASE + 0x22)
-#define PWRAP_DEW_CIPHER_SWRST (PWRAP_DEW_BASE + 0x24)
-#define PWRAP_MT8173_DEW_CIPHER_IV0 (PWRAP_DEW_BASE + 0x26)
-#define PWRAP_MT8173_DEW_CIPHER_IV1 (PWRAP_DEW_BASE + 0x28)
-#define PWRAP_MT8173_DEW_CIPHER_IV2 (PWRAP_DEW_BASE + 0x2a)
-#define PWRAP_MT8173_DEW_CIPHER_IV3 (PWRAP_DEW_BASE + 0x2c)
-#define PWRAP_MT8173_DEW_CIPHER_IV4 (PWRAP_DEW_BASE + 0x2e)
-#define PWRAP_MT8173_DEW_CIPHER_IV5 (PWRAP_DEW_BASE + 0x30)
+/* defines for slave device wrapper registers */
+enum dew_regs {
+ PWRAP_DEW_BASE,
+ PWRAP_DEW_DIO_EN,
+ PWRAP_DEW_READ_TEST,
+ PWRAP_DEW_WRITE_TEST,
+ PWRAP_DEW_CRC_EN,
+ PWRAP_DEW_CRC_VAL,
+ PWRAP_DEW_MON_GRP_SEL,
+ PWRAP_DEW_CIPHER_KEY_SEL,
+ PWRAP_DEW_CIPHER_IV_SEL,
+ PWRAP_DEW_CIPHER_RDY,
+ PWRAP_DEW_CIPHER_MODE,
+ PWRAP_DEW_CIPHER_SWRST,
+
+ /* MT6397 only regs */
+ PWRAP_DEW_EVENT_OUT_EN,
+ PWRAP_DEW_EVENT_SRC_EN,
+ PWRAP_DEW_EVENT_SRC,
+ PWRAP_DEW_EVENT_FLAG,
+ PWRAP_DEW_MON_FLAG_SEL,
+ PWRAP_DEW_EVENT_TEST,
+ PWRAP_DEW_CIPHER_LOAD,
+ PWRAP_DEW_CIPHER_START,
+
+ /* MT6323 only regs */
+ PWRAP_DEW_CIPHER_EN,
+ PWRAP_DEW_RDDMY_NO,
+};
+
+static const u32 mt6323_regs[] = {
+ [PWRAP_DEW_BASE] = 0x0000,
+ [PWRAP_DEW_DIO_EN] = 0x018a,
+ [PWRAP_DEW_READ_TEST] = 0x018c,
+ [PWRAP_DEW_WRITE_TEST] = 0x018e,
+ [PWRAP_DEW_CRC_EN] = 0x0192,
+ [PWRAP_DEW_CRC_VAL] = 0x0194,
+ [PWRAP_DEW_MON_GRP_SEL] = 0x0196,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0x0198,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0x019a,
+ [PWRAP_DEW_CIPHER_EN] = 0x019c,
+ [PWRAP_DEW_CIPHER_RDY] = 0x019e,
+ [PWRAP_DEW_CIPHER_MODE] = 0x01a0,
+ [PWRAP_DEW_CIPHER_SWRST] = 0x01a2,
+ [PWRAP_DEW_RDDMY_NO] = 0x01a4,
+};
+
+static const u32 mt6397_regs[] = {
+ [PWRAP_DEW_BASE] = 0xbc00,
+ [PWRAP_DEW_EVENT_OUT_EN] = 0xbc00,
+ [PWRAP_DEW_DIO_EN] = 0xbc02,
+ [PWRAP_DEW_EVENT_SRC_EN] = 0xbc04,
+ [PWRAP_DEW_EVENT_SRC] = 0xbc06,
+ [PWRAP_DEW_EVENT_FLAG] = 0xbc08,
+ [PWRAP_DEW_READ_TEST] = 0xbc0a,
+ [PWRAP_DEW_WRITE_TEST] = 0xbc0c,
+ [PWRAP_DEW_CRC_EN] = 0xbc0e,
+ [PWRAP_DEW_CRC_VAL] = 0xbc10,
+ [PWRAP_DEW_MON_GRP_SEL] = 0xbc12,
+ [PWRAP_DEW_MON_FLAG_SEL] = 0xbc14,
+ [PWRAP_DEW_EVENT_TEST] = 0xbc16,
+ [PWRAP_DEW_CIPHER_KEY_SEL] = 0xbc18,
+ [PWRAP_DEW_CIPHER_IV_SEL] = 0xbc1a,
+ [PWRAP_DEW_CIPHER_LOAD] = 0xbc1c,
+ [PWRAP_DEW_CIPHER_START] = 0xbc1e,
+ [PWRAP_DEW_CIPHER_RDY] = 0xbc20,
+ [PWRAP_DEW_CIPHER_MODE] = 0xbc22,
+ [PWRAP_DEW_CIPHER_SWRST] = 0xbc24,
+};
enum pwrap_regs {
PWRAP_MUX_SEL,
@@ -158,6 +201,13 @@ enum pwrap_regs {
PWRAP_DCM_EN,
PWRAP_DCM_DBC_PRD,
+ /* MT2701 only regs */
+ PWRAP_ADC_CMD_ADDR,
+ PWRAP_PWRAP_ADC_CMD,
+ PWRAP_ADC_RDY_ADDR,
+ PWRAP_ADC_RDATA_ADDR1,
+ PWRAP_ADC_RDATA_ADDR2,
+
/* MT8135 only regs */
PWRAP_CSHEXT,
PWRAP_EVENT_IN_EN,
@@ -194,6 +244,92 @@ enum pwrap_regs {
PWRAP_CIPHER_EN,
};
+static int mt2701_regs[] = {
+ [PWRAP_MUX_SEL] = 0x0,
+ [PWRAP_WRAP_EN] = 0x4,
+ [PWRAP_DIO_EN] = 0x8,
+ [PWRAP_SIDLY] = 0xc,
+ [PWRAP_RDDMY] = 0x18,
+ [PWRAP_SI_CK_CON] = 0x1c,
+ [PWRAP_CSHEXT_WRITE] = 0x20,
+ [PWRAP_CSHEXT_READ] = 0x24,
+ [PWRAP_CSLEXT_START] = 0x28,
+ [PWRAP_CSLEXT_END] = 0x2c,
+ [PWRAP_STAUPD_PRD] = 0x30,
+ [PWRAP_STAUPD_GRPEN] = 0x34,
+ [PWRAP_STAUPD_MAN_TRIG] = 0x38,
+ [PWRAP_STAUPD_STA] = 0x3c,
+ [PWRAP_WRAP_STA] = 0x44,
+ [PWRAP_HARB_INIT] = 0x48,
+ [PWRAP_HARB_HPRIO] = 0x4c,
+ [PWRAP_HIPRIO_ARB_EN] = 0x50,
+ [PWRAP_HARB_STA0] = 0x54,
+ [PWRAP_HARB_STA1] = 0x58,
+ [PWRAP_MAN_EN] = 0x5c,
+ [PWRAP_MAN_CMD] = 0x60,
+ [PWRAP_MAN_RDATA] = 0x64,
+ [PWRAP_MAN_VLDCLR] = 0x68,
+ [PWRAP_WACS0_EN] = 0x6c,
+ [PWRAP_INIT_DONE0] = 0x70,
+ [PWRAP_WACS0_CMD] = 0x74,
+ [PWRAP_WACS0_RDATA] = 0x78,
+ [PWRAP_WACS0_VLDCLR] = 0x7c,
+ [PWRAP_WACS1_EN] = 0x80,
+ [PWRAP_INIT_DONE1] = 0x84,
+ [PWRAP_WACS1_CMD] = 0x88,
+ [PWRAP_WACS1_RDATA] = 0x8c,
+ [PWRAP_WACS1_VLDCLR] = 0x90,
+ [PWRAP_WACS2_EN] = 0x94,
+ [PWRAP_INIT_DONE2] = 0x98,
+ [PWRAP_WACS2_CMD] = 0x9c,
+ [PWRAP_WACS2_RDATA] = 0xa0,
+ [PWRAP_WACS2_VLDCLR] = 0xa4,
+ [PWRAP_INT_EN] = 0xa8,
+ [PWRAP_INT_FLG_RAW] = 0xac,
+ [PWRAP_INT_FLG] = 0xb0,
+ [PWRAP_INT_CLR] = 0xb4,
+ [PWRAP_SIG_ADR] = 0xb8,
+ [PWRAP_SIG_MODE] = 0xbc,
+ [PWRAP_SIG_VALUE] = 0xc0,
+ [PWRAP_SIG_ERRVAL] = 0xc4,
+ [PWRAP_CRC_EN] = 0xc8,
+ [PWRAP_TIMER_EN] = 0xcc,
+ [PWRAP_TIMER_STA] = 0xd0,
+ [PWRAP_WDT_UNIT] = 0xd4,
+ [PWRAP_WDT_SRC_EN] = 0xd8,
+ [PWRAP_WDT_FLG] = 0xdc,
+ [PWRAP_DEBUG_INT_SEL] = 0xe0,
+ [PWRAP_DVFS_ADR0] = 0xe4,
+ [PWRAP_DVFS_WDATA0] = 0xe8,
+ [PWRAP_DVFS_ADR1] = 0xec,
+ [PWRAP_DVFS_WDATA1] = 0xf0,
+ [PWRAP_DVFS_ADR2] = 0xf4,
+ [PWRAP_DVFS_WDATA2] = 0xf8,
+ [PWRAP_DVFS_ADR3] = 0xfc,
+ [PWRAP_DVFS_WDATA3] = 0x100,
+ [PWRAP_DVFS_ADR4] = 0x104,
+ [PWRAP_DVFS_WDATA4] = 0x108,
+ [PWRAP_DVFS_ADR5] = 0x10c,
+ [PWRAP_DVFS_WDATA5] = 0x110,
+ [PWRAP_DVFS_ADR6] = 0x114,
+ [PWRAP_DVFS_WDATA6] = 0x118,
+ [PWRAP_DVFS_ADR7] = 0x11c,
+ [PWRAP_DVFS_WDATA7] = 0x120,
+ [PWRAP_CIPHER_KEY_SEL] = 0x124,
+ [PWRAP_CIPHER_IV_SEL] = 0x128,
+ [PWRAP_CIPHER_EN] = 0x12c,
+ [PWRAP_CIPHER_RDY] = 0x130,
+ [PWRAP_CIPHER_MODE] = 0x134,
+ [PWRAP_CIPHER_SWRST] = 0x138,
+ [PWRAP_DCM_EN] = 0x13c,
+ [PWRAP_DCM_DBC_PRD] = 0x140,
+ [PWRAP_ADC_CMD_ADDR] = 0x144,
+ [PWRAP_PWRAP_ADC_CMD] = 0x148,
+ [PWRAP_ADC_RDY_ADDR] = 0x14c,
+ [PWRAP_ADC_RDATA_ADDR1] = 0x150,
+ [PWRAP_ADC_RDATA_ADDR2] = 0x154,
+};
+
static int mt8173_regs[] = {
[PWRAP_MUX_SEL] = 0x0,
[PWRAP_WRAP_EN] = 0x4,
@@ -349,36 +485,28 @@ static int mt8135_regs[] = {
[PWRAP_DCM_DBC_PRD] = 0x160,
};
+enum pmic_type {
+ PMIC_MT6323,
+ PMIC_MT6397,
+};
+
enum pwrap_type {
+ PWRAP_MT2701,
PWRAP_MT8135,
PWRAP_MT8173,
};
-struct pmic_wrapper_type {
- int *regs;
- enum pwrap_type type;
- u32 arb_en_all;
-};
-
-static struct pmic_wrapper_type pwrap_mt8135 = {
- .regs = mt8135_regs,
- .type = PWRAP_MT8135,
- .arb_en_all = 0x1ff,
-};
-
-static struct pmic_wrapper_type pwrap_mt8173 = {
- .regs = mt8173_regs,
- .type = PWRAP_MT8173,
- .arb_en_all = 0x3f,
+struct pwrap_slv_type {
+ const u32 *dew_regs;
+ enum pmic_type type;
};
struct pmic_wrapper {
struct device *dev;
void __iomem *base;
struct regmap *regmap;
- int *regs;
- enum pwrap_type type;
- u32 arb_en_all;
+ const struct pmic_wrapper_type *master;
+ const struct pwrap_slv_type *slave;
struct clk *clk_spi;
struct clk *clk_wrap;
struct reset_control *rstc;
@@ -387,24 +515,26 @@ struct pmic_wrapper {
void __iomem *bridge_base;
};
-static inline int pwrap_is_mt8135(struct pmic_wrapper *wrp)
-{
- return wrp->type == PWRAP_MT8135;
-}
-
-static inline int pwrap_is_mt8173(struct pmic_wrapper *wrp)
-{
- return wrp->type == PWRAP_MT8173;
-}
+struct pmic_wrapper_type {
+ int *regs;
+ enum pwrap_type type;
+ u32 arb_en_all;
+ u32 int_en_all;
+ u32 spi_w;
+ u32 wdt_src;
+ int has_bridge:1;
+ int (*init_reg_clock)(struct pmic_wrapper *wrp);
+ int (*init_soc_specific)(struct pmic_wrapper *wrp);
+};
static u32 pwrap_readl(struct pmic_wrapper *wrp, enum pwrap_regs reg)
{
- return readl(wrp->base + wrp->regs[reg]);
+ return readl(wrp->base + wrp->master->regs[reg]);
}
static void pwrap_writel(struct pmic_wrapper *wrp, u32 val, enum pwrap_regs reg)
{
- writel(val, wrp->base + wrp->regs[reg]);
+ writel(val, wrp->base + wrp->master->regs[reg]);
}
static bool pwrap_is_fsm_idle(struct pmic_wrapper *wrp)
@@ -522,15 +652,15 @@ static int pwrap_reset_spislave(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 1, PWRAP_MAN_EN);
pwrap_writel(wrp, 0, PWRAP_DIO_EN);
- pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_CSL,
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_CSL,
PWRAP_MAN_CMD);
- pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_OUTS,
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
PWRAP_MAN_CMD);
- pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_CSH,
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_CSH,
PWRAP_MAN_CMD);
for (i = 0; i < 4; i++)
- pwrap_writel(wrp, PWRAP_MAN_CMD_SPI_WRITE | PWRAP_MAN_CMD_OP_OUTS,
+ pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS,
PWRAP_MAN_CMD);
ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle);
@@ -562,7 +692,8 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
for (i = 0; i < 4; i++) {
pwrap_writel(wrp, i, PWRAP_SIDLY);
- pwrap_read(wrp, PWRAP_DEW_READ_TEST, &rdata);
+ pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_READ_TEST],
+ &rdata);
if (rdata == PWRAP_DEW_READ_TEST_VAL) {
dev_dbg(wrp->dev, "[Read Test] pass, SIDLY=%x\n", i);
pass |= 1 << i;
@@ -580,19 +711,47 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
return 0;
}
-static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
+static int pwrap_mt8135_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+ pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
+
+ return 0;
+}
+
+static int pwrap_mt8173_init_reg_clock(struct pmic_wrapper *wrp)
{
- if (pwrap_is_mt8135(wrp)) {
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
- pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
- } else {
- pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
- pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+
+ return 0;
+}
+
+static int pwrap_mt2701_init_reg_clock(struct pmic_wrapper *wrp)
+{
+ switch (wrp->slave->type) {
+ case PMIC_MT6397:
+ pwrap_writel(wrp, 0xc, PWRAP_RDDMY);
+ pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ);
+ pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
+ pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+ break;
+
+ case PMIC_MT6323:
+ pwrap_writel(wrp, 0x8, PWRAP_RDDMY);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_RDDMY_NO],
+ 0x8);
+ pwrap_writel(wrp, 0x5, PWRAP_CSHEXT_WRITE);
+ pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_READ);
pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
+ break;
}
return 0;
@@ -608,7 +767,8 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp)
u32 rdata;
int ret;
- ret = pwrap_read(wrp, PWRAP_DEW_CIPHER_RDY, &rdata);
+ ret = pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_RDY],
+ &rdata);
if (ret)
return 0;
@@ -625,20 +785,37 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 0x1, PWRAP_CIPHER_KEY_SEL);
pwrap_writel(wrp, 0x2, PWRAP_CIPHER_IV_SEL);
- if (pwrap_is_mt8135(wrp)) {
+ switch (wrp->master->type) {
+ case PWRAP_MT8135:
pwrap_writel(wrp, 1, PWRAP_CIPHER_LOAD);
pwrap_writel(wrp, 1, PWRAP_CIPHER_START);
- } else {
+ break;
+ case PWRAP_MT2701:
+ case PWRAP_MT8173:
pwrap_writel(wrp, 1, PWRAP_CIPHER_EN);
+ break;
}
/* Config cipher mode @PMIC */
- pwrap_write(wrp, PWRAP_DEW_CIPHER_SWRST, 0x1);
- pwrap_write(wrp, PWRAP_DEW_CIPHER_SWRST, 0x0);
- pwrap_write(wrp, PWRAP_DEW_CIPHER_KEY_SEL, 0x1);
- pwrap_write(wrp, PWRAP_DEW_CIPHER_IV_SEL, 0x2);
- pwrap_write(wrp, PWRAP_DEW_CIPHER_LOAD, 0x1);
- pwrap_write(wrp, PWRAP_DEW_CIPHER_START, 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_SWRST], 0x0);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_KEY_SEL], 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_IV_SEL], 0x2);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_LOAD], 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_START], 0x1);
+
+ switch (wrp->slave->type) {
+ case PMIC_MT6397:
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_LOAD],
+ 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_START],
+ 0x1);
+ break;
+ case PMIC_MT6323:
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_EN],
+ 0x1);
+ break;
+ }
/* wait for cipher data ready@AP */
ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready);
@@ -655,7 +832,7 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
}
/* wait for cipher mode idle */
- pwrap_write(wrp, PWRAP_DEW_CIPHER_MODE, 0x1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1);
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
if (ret) {
dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret);
@@ -665,9 +842,11 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 1, PWRAP_CIPHER_MODE);
/* Write Test */
- if (pwrap_write(wrp, PWRAP_DEW_WRITE_TEST, PWRAP_DEW_WRITE_TEST_VAL) ||
- pwrap_read(wrp, PWRAP_DEW_WRITE_TEST, &rdata) ||
- (rdata != PWRAP_DEW_WRITE_TEST_VAL)) {
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_WRITE_TEST],
+ PWRAP_DEW_WRITE_TEST_VAL) ||
+ pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_WRITE_TEST],
+ &rdata) ||
+ (rdata != PWRAP_DEW_WRITE_TEST_VAL)) {
dev_err(wrp->dev, "rdata=0x%04X\n", rdata);
return -EFAULT;
}
@@ -675,6 +854,63 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp)
return 0;
}
+static int pwrap_mt8135_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* enable pwrap events and pwrap bridge in AP side */
+ pwrap_writel(wrp, 0x1, PWRAP_EVENT_IN_EN);
+ pwrap_writel(wrp, 0xffff, PWRAP_EVENT_DST_EN);
+ writel(0x7f, wrp->bridge_base + PWRAP_MT8135_BRIDGE_IORD_ARB_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS3_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS4_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_UNIT);
+ writel(0xffff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_SRC_EN);
+ writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_TIMER_EN);
+ writel(0x7ff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INT_EN);
+
+ /* enable PMIC event out and sources */
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_OUT_EN],
+ 0x1) ||
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_SRC_EN],
+ 0xffff)) {
+ dev_err(wrp->dev, "enable dewrap fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt8173_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* PMIC_DEWRAP enables */
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_OUT_EN],
+ 0x1) ||
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_EVENT_SRC_EN],
+ 0xffff)) {
+ dev_err(wrp->dev, "enable dewrap fail\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int pwrap_mt2701_init_soc_specific(struct pmic_wrapper *wrp)
+{
+ /* GPS_INTF initialization */
+ switch (wrp->slave->type) {
+ case PMIC_MT6323:
+ pwrap_writel(wrp, 0x076c, PWRAP_ADC_CMD_ADDR);
+ pwrap_writel(wrp, 0x8000, PWRAP_PWRAP_ADC_CMD);
+ pwrap_writel(wrp, 0x072c, PWRAP_ADC_RDY_ADDR);
+ pwrap_writel(wrp, 0x072e, PWRAP_ADC_RDATA_ADDR1);
+ pwrap_writel(wrp, 0x0730, PWRAP_ADC_RDATA_ADDR2);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int pwrap_init(struct pmic_wrapper *wrp)
{
int ret;
@@ -684,7 +920,7 @@ static int pwrap_init(struct pmic_wrapper *wrp)
if (wrp->rstc_bridge)
reset_control_reset(wrp->rstc_bridge);
- if (pwrap_is_mt8173(wrp)) {
+ if (wrp->master->type == PWRAP_MT8173) {
/* Enable DCM */
pwrap_writel(wrp, 3, PWRAP_DCM_EN);
pwrap_writel(wrp, 0, PWRAP_DCM_DBC_PRD);
@@ -697,11 +933,11 @@ static int pwrap_init(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 1, PWRAP_WRAP_EN);
- pwrap_writel(wrp, wrp->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+ pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
pwrap_writel(wrp, 1, PWRAP_WACS2_EN);
- ret = pwrap_init_reg_clock(wrp);
+ ret = wrp->master->init_reg_clock(wrp);
if (ret)
return ret;
@@ -711,7 +947,7 @@ static int pwrap_init(struct pmic_wrapper *wrp)
return ret;
/* Enable dual IO mode */
- pwrap_write(wrp, PWRAP_DEW_DIO_EN, 1);
+ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1);
/* Check IDLE & INIT_DONE in advance */
ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle);
@@ -723,7 +959,7 @@ static int pwrap_init(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 1, PWRAP_DIO_EN);
/* Read Test */
- pwrap_read(wrp, PWRAP_DEW_READ_TEST, &rdata);
+ pwrap_read(wrp, wrp->slave->dew_regs[PWRAP_DEW_READ_TEST], &rdata);
if (rdata != PWRAP_DEW_READ_TEST_VAL) {
dev_err(wrp->dev, "Read test failed after switch to DIO mode: 0x%04x != 0x%04x\n",
PWRAP_DEW_READ_TEST_VAL, rdata);
@@ -736,15 +972,16 @@ static int pwrap_init(struct pmic_wrapper *wrp)
return ret;
/* Signature checking - using CRC */
- if (pwrap_write(wrp, PWRAP_DEW_CRC_EN, 0x1))
+ if (pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_EN], 0x1))
return -EFAULT;
pwrap_writel(wrp, 0x1, PWRAP_CRC_EN);
pwrap_writel(wrp, 0x0, PWRAP_SIG_MODE);
- pwrap_writel(wrp, PWRAP_DEW_CRC_VAL, PWRAP_SIG_ADR);
- pwrap_writel(wrp, wrp->arb_en_all, PWRAP_HIPRIO_ARB_EN);
+ pwrap_writel(wrp, wrp->slave->dew_regs[PWRAP_DEW_CRC_VAL],
+ PWRAP_SIG_ADR);
+ pwrap_writel(wrp, wrp->master->arb_en_all, PWRAP_HIPRIO_ARB_EN);
- if (pwrap_is_mt8135(wrp))
+ if (wrp->master->type == PWRAP_MT8135)
pwrap_writel(wrp, 0x7, PWRAP_RRARB_EN);
pwrap_writel(wrp, 0x1, PWRAP_WACS0_EN);
@@ -753,31 +990,10 @@ static int pwrap_init(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 0x5, PWRAP_STAUPD_PRD);
pwrap_writel(wrp, 0xff, PWRAP_STAUPD_GRPEN);
- if (pwrap_is_mt8135(wrp)) {
- /* enable pwrap events and pwrap bridge in AP side */
- pwrap_writel(wrp, 0x1, PWRAP_EVENT_IN_EN);
- pwrap_writel(wrp, 0xffff, PWRAP_EVENT_DST_EN);
- writel(0x7f, wrp->bridge_base + PWRAP_MT8135_BRIDGE_IORD_ARB_EN);
- writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS3_EN);
- writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WACS4_EN);
- writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_UNIT);
- writel(0xffff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_WDT_SRC_EN);
- writel(0x1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_TIMER_EN);
- writel(0x7ff, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INT_EN);
-
- /* enable PMIC event out and sources */
- if (pwrap_write(wrp, PWRAP_DEW_EVENT_OUT_EN, 0x1) ||
- pwrap_write(wrp, PWRAP_DEW_EVENT_SRC_EN, 0xffff)) {
- dev_err(wrp->dev, "enable dewrap fail\n");
- return -EFAULT;
- }
- } else {
- /* PMIC_DEWRAP enables */
- if (pwrap_write(wrp, PWRAP_DEW_EVENT_OUT_EN, 0x1) ||
- pwrap_write(wrp, PWRAP_DEW_EVENT_SRC_EN, 0xffff)) {
- dev_err(wrp->dev, "enable dewrap fail\n");
- return -EFAULT;
- }
+ if (wrp->master->init_soc_specific) {
+ ret = wrp->master->init_soc_specific(wrp);
+ if (ret)
+ return ret;
}
/* Setup the init done registers */
@@ -785,7 +1001,7 @@ static int pwrap_init(struct pmic_wrapper *wrp)
pwrap_writel(wrp, 1, PWRAP_INIT_DONE0);
pwrap_writel(wrp, 1, PWRAP_INIT_DONE1);
- if (pwrap_is_mt8135(wrp)) {
+ if (wrp->master->has_bridge) {
writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE3);
writel(1, wrp->bridge_base + PWRAP_MT8135_BRIDGE_INIT_DONE4);
}
@@ -816,8 +1032,70 @@ static const struct regmap_config pwrap_regmap_config = {
.max_register = 0xffff,
};
+static const struct pwrap_slv_type pmic_mt6323 = {
+ .dew_regs = mt6323_regs,
+ .type = PMIC_MT6323,
+};
+
+static const struct pwrap_slv_type pmic_mt6397 = {
+ .dew_regs = mt6397_regs,
+ .type = PMIC_MT6397,
+};
+
+static const struct of_device_id of_slave_match_tbl[] = {
+ {
+ .compatible = "mediatek,mt6323",
+ .data = &pmic_mt6323,
+ }, {
+ .compatible = "mediatek,mt6397",
+ .data = &pmic_mt6397,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, of_slave_match_tbl);
+
+static const struct pmic_wrapper_type pwrap_mt2701 = {
+ .regs = mt2701_regs,
+ .type = PWRAP_MT2701,
+ .arb_en_all = 0x3f,
+ .int_en_all = ~(u32)(BIT(31) | BIT(2)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE_NEW,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_mt2701_init_reg_clock,
+ .init_soc_specific = pwrap_mt2701_init_soc_specific,
+};
+
+static struct pmic_wrapper_type pwrap_mt8135 = {
+ .regs = mt8135_regs,
+ .type = PWRAP_MT8135,
+ .arb_en_all = 0x1ff,
+ .int_en_all = ~(u32)(BIT(31) | BIT(1)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_ALL,
+ .has_bridge = 1,
+ .init_reg_clock = pwrap_mt8135_init_reg_clock,
+ .init_soc_specific = pwrap_mt8135_init_soc_specific,
+};
+
+static struct pmic_wrapper_type pwrap_mt8173 = {
+ .regs = mt8173_regs,
+ .type = PWRAP_MT8173,
+ .arb_en_all = 0x3f,
+ .int_en_all = ~(u32)(BIT(31) | BIT(1)),
+ .spi_w = PWRAP_MAN_CMD_SPI_WRITE,
+ .wdt_src = PWRAP_WDT_SRC_MASK_NO_STAUPD,
+ .has_bridge = 0,
+ .init_reg_clock = pwrap_mt8173_init_reg_clock,
+ .init_soc_specific = pwrap_mt8173_init_soc_specific,
+};
+
static struct of_device_id of_pwrap_match_tbl[] = {
{
+ .compatible = "mediatek,mt2701-pwrap",
+ .data = &pwrap_mt2701,
+ }, {
.compatible = "mediatek,mt8135-pwrap",
.data = &pwrap_mt8135,
}, {
@@ -831,24 +1109,30 @@ MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl);
static int pwrap_probe(struct platform_device *pdev)
{
- int ret, irq, wdt_src;
+ int ret, irq;
struct pmic_wrapper *wrp;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id =
of_match_device(of_pwrap_match_tbl, &pdev->dev);
- const struct pmic_wrapper_type *type;
+ const struct of_device_id *of_slave_id = NULL;
struct resource *res;
+ if (pdev->dev.of_node->child)
+ of_slave_id = of_match_node(of_slave_match_tbl,
+ pdev->dev.of_node->child);
+ if (!of_slave_id) {
+ dev_dbg(&pdev->dev, "slave pmic should be defined in dts\n");
+ return -EINVAL;
+ }
+
wrp = devm_kzalloc(&pdev->dev, sizeof(*wrp), GFP_KERNEL);
if (!wrp)
return -ENOMEM;
platform_set_drvdata(pdev, wrp);
- type = of_id->data;
- wrp->regs = type->regs;
- wrp->type = type->type;
- wrp->arb_en_all = type->arb_en_all;
+ wrp->master = of_id->data;
+ wrp->slave = of_slave_id->data;
wrp->dev = &pdev->dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap");
@@ -863,7 +1147,7 @@ static int pwrap_probe(struct platform_device *pdev)
return ret;
}
- if (pwrap_is_mt8135(wrp)) {
+ if (wrp->master->has_bridge) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"pwrap-bridge");
wrp->bridge_base = devm_ioremap_resource(wrp->dev, res);
@@ -925,11 +1209,9 @@ static int pwrap_probe(struct platform_device *pdev)
* Since STAUPD was not used on mt8173 platform,
* so STAUPD of WDT_SRC which should be turned off
*/
- wdt_src = pwrap_is_mt8173(wrp) ?
- PWRAP_WDT_SRC_MASK_NO_STAUPD : PWRAP_WDT_SRC_MASK_ALL;
- pwrap_writel(wrp, wdt_src, PWRAP_WDT_SRC_EN);
+ pwrap_writel(wrp, wrp->master->wdt_src, PWRAP_WDT_SRC_EN);
pwrap_writel(wrp, 0x1, PWRAP_TIMER_EN);
- pwrap_writel(wrp, ~((1 << 31) | (1 << 1)), PWRAP_INT_EN);
+ pwrap_writel(wrp, wrp->master->int_en_all, PWRAP_INT_EN);
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH,
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 731fa066f..6609d7e0e 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -33,6 +33,7 @@
*/
struct qcom_smd_rpm {
struct qcom_smd_channel *rpm_channel;
+ struct device *dev;
struct completion ack;
struct mutex lock;
@@ -149,14 +150,14 @@ out:
}
EXPORT_SYMBOL(qcom_rpm_smd_write);
-static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
+static int qcom_smd_rpm_callback(struct qcom_smd_channel *channel,
const void *data,
size_t count)
{
const struct qcom_rpm_header *hdr = data;
size_t hdr_length = le32_to_cpu(hdr->length);
const struct qcom_rpm_message *msg;
- struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
+ struct qcom_smd_rpm *rpm = qcom_smd_get_drvdata(channel);
const u8 *buf = data + sizeof(struct qcom_rpm_header);
const u8 *end = buf + hdr_length;
char msgbuf[32];
@@ -165,7 +166,7 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
hdr_length < sizeof(struct qcom_rpm_message)) {
- dev_err(&qsdev->dev, "invalid request\n");
+ dev_err(rpm->dev, "invalid request\n");
return 0;
}
@@ -206,7 +207,9 @@ static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev)
mutex_init(&rpm->lock);
init_completion(&rpm->ack);
+ rpm->dev = &sdev->dev;
rpm->rpm_channel = sdev->channel;
+ qcom_smd_set_drvdata(sdev->channel, rpm);
dev_set_drvdata(&sdev->dev, rpm);
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index 498fd0581..ac1957dfd 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -106,9 +106,9 @@ static const struct {
* @channels: list of all channels detected on this edge
* @channels_lock: guard for modifications of @channels
* @allocated: array of bitmaps representing already allocated channels
- * @need_rescan: flag that the @work needs to scan smem for new channels
* @smem_available: last available amount of smem triggering a channel scan
- * @work: work item for edge house keeping
+ * @scan_work: work item for discovering new channels
+ * @state_work: work item for edge state changes
*/
struct qcom_smd_edge {
struct qcom_smd *smd;
@@ -127,10 +127,12 @@ struct qcom_smd_edge {
DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
- bool need_rescan;
unsigned smem_available;
- struct work_struct work;
+ wait_queue_head_t new_channel_event;
+
+ struct work_struct scan_work;
+ struct work_struct state_work;
};
/*
@@ -186,13 +188,16 @@ struct qcom_smd_channel {
int fifo_size;
void *bounce_buffer;
- int (*cb)(struct qcom_smd_device *, const void *, size_t);
+ qcom_smd_cb_t cb;
spinlock_t recv_lock;
int pkt_size;
+ void *drvdata;
+
struct list_head list;
+ struct list_head dev_list;
};
/**
@@ -378,6 +383,19 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
}
/*
+ * Set the callback for a channel, with appropriate locking
+ */
+static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
+ qcom_smd_cb_t cb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ channel->cb = cb;
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+};
+
+/*
* Calculate the amount of data available in the rx fifo
*/
static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
@@ -497,7 +515,6 @@ static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
*/
static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
{
- struct qcom_smd_device *qsdev = channel->qsdev;
unsigned tail;
size_t len;
void *ptr;
@@ -517,7 +534,7 @@ static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
len = channel->pkt_size;
}
- ret = channel->cb(qsdev, ptr, len);
+ ret = channel->cb(channel, ptr, len);
if (ret < 0)
return ret;
@@ -601,7 +618,8 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
struct qcom_smd_edge *edge = data;
struct qcom_smd_channel *channel;
unsigned available;
- bool kick_worker = false;
+ bool kick_scanner = false;
+ bool kick_state = false;
/*
* Handle state changes or data on each of the channels on this edge
@@ -609,7 +627,7 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
spin_lock(&edge->channels_lock);
list_for_each_entry(channel, &edge->channels, list) {
spin_lock(&channel->recv_lock);
- kick_worker |= qcom_smd_channel_intr(channel);
+ kick_state |= qcom_smd_channel_intr(channel);
spin_unlock(&channel->recv_lock);
}
spin_unlock(&edge->channels_lock);
@@ -622,12 +640,13 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
available = qcom_smem_get_free_space(edge->remote_pid);
if (available != edge->smem_available) {
edge->smem_available = available;
- edge->need_rescan = true;
- kick_worker = true;
+ kick_scanner = true;
}
- if (kick_worker)
- schedule_work(&edge->work);
+ if (kick_scanner)
+ schedule_work(&edge->scan_work);
+ if (kick_state)
+ schedule_work(&edge->state_work);
return IRQ_HANDLED;
}
@@ -793,18 +812,12 @@ static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
}
/*
- * Probe the smd client.
- *
- * The remote side have indicated that it want the channel to be opened, so
- * complete the state handshake and probe our client driver.
+ * Helper for opening a channel
*/
-static int qcom_smd_dev_probe(struct device *dev)
+static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
+ qcom_smd_cb_t cb)
{
- struct qcom_smd_device *qsdev = to_smd_device(dev);
- struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
- struct qcom_smd_channel *channel = qsdev->channel;
size_t bb_size;
- int ret;
/*
* Packets are maximum 4k, but reduce if the fifo is smaller
@@ -814,12 +827,44 @@ static int qcom_smd_dev_probe(struct device *dev)
if (!channel->bounce_buffer)
return -ENOMEM;
- channel->cb = qsdrv->callback;
-
+ qcom_smd_channel_set_callback(channel, cb);
qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
-
qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+ return 0;
+}
+
+/*
+ * Helper for closing and resetting a channel
+ */
+static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
+{
+ qcom_smd_channel_set_callback(channel, NULL);
+
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ qcom_smd_channel_reset(channel);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_smd_dev_probe(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+ struct qcom_smd_channel *channel = qsdev->channel;
+ int ret;
+
+ ret = qcom_smd_channel_open(channel, qsdrv->callback);
+ if (ret)
+ return ret;
+
ret = qsdrv->probe(qsdev);
if (ret)
goto err;
@@ -831,11 +876,7 @@ static int qcom_smd_dev_probe(struct device *dev)
err:
dev_err(&qsdev->dev, "probe failed\n");
- channel->cb = NULL;
- kfree(channel->bounce_buffer);
- channel->bounce_buffer = NULL;
-
- qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ qcom_smd_channel_close(channel);
return ret;
}
@@ -850,16 +891,15 @@ static int qcom_smd_dev_remove(struct device *dev)
struct qcom_smd_device *qsdev = to_smd_device(dev);
struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
struct qcom_smd_channel *channel = qsdev->channel;
- unsigned long flags;
+ struct qcom_smd_channel *tmp;
+ struct qcom_smd_channel *ch;
qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
/*
* Make sure we don't race with the code receiving data.
*/
- spin_lock_irqsave(&channel->recv_lock, flags);
- channel->cb = NULL;
- spin_unlock_irqrestore(&channel->recv_lock, flags);
+ qcom_smd_channel_set_callback(channel, NULL);
/* Wake up any sleepers in qcom_smd_send() */
wake_up_interruptible(&channel->fblockread_event);
@@ -872,15 +912,14 @@ static int qcom_smd_dev_remove(struct device *dev)
qsdrv->remove(qsdev);
/*
- * The client is now gone, cleanup and reset the channel state.
+ * The client is now gone, close and release all channels associated
+ * with this sdev
*/
- channel->qsdev = NULL;
- kfree(channel->bounce_buffer);
- channel->bounce_buffer = NULL;
-
- qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
-
- qcom_smd_channel_reset(channel);
+ list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
+ qcom_smd_channel_close(ch);
+ list_del(&ch->dev_list);
+ ch->qsdev = NULL;
+ }
return 0;
}
@@ -996,6 +1035,18 @@ int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
}
EXPORT_SYMBOL(qcom_smd_driver_register);
+void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
+{
+ return channel->drvdata;
+}
+EXPORT_SYMBOL(qcom_smd_get_drvdata);
+
+void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
+{
+ channel->drvdata = data;
+}
+EXPORT_SYMBOL(qcom_smd_set_drvdata);
+
/**
* qcom_smd_driver_unregister - unregister a smd driver
* @qsdrv: qcom_smd_driver struct
@@ -1006,6 +1057,78 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
}
EXPORT_SYMBOL(qcom_smd_driver_unregister);
+static struct qcom_smd_channel *
+qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_channel *ret = NULL;
+ unsigned long flags;
+ unsigned state;
+
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (strcmp(channel->name, name))
+ continue;
+
+ state = GET_RX_CHANNEL_INFO(channel, state);
+ if (state != SMD_CHANNEL_OPENING &&
+ state != SMD_CHANNEL_OPENED)
+ continue;
+
+ ret = channel;
+ break;
+ }
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+ return ret;
+}
+
+/**
+ * qcom_smd_open_channel() - claim additional channels on the same edge
+ * @sdev: smd_device handle
+ * @name: channel name
+ * @cb: callback method to use for incoming data
+ *
+ * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
+ * ready.
+ */
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
+ const char *name,
+ qcom_smd_cb_t cb)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_device *sdev = parent->qsdev;
+ struct qcom_smd_edge *edge = parent->edge;
+ int ret;
+
+ /* Wait up to HZ for the channel to appear */
+ ret = wait_event_interruptible_timeout(edge->new_channel_event,
+ (channel = qcom_smd_find_channel(edge, name)) != NULL,
+ HZ);
+ if (!ret)
+ return ERR_PTR(-ETIMEDOUT);
+
+ if (channel->state != SMD_CHANNEL_CLOSED) {
+ dev_err(&sdev->dev, "channel %s is busy\n", channel->name);
+ return ERR_PTR(-EBUSY);
+ }
+
+ channel->qsdev = sdev;
+ ret = qcom_smd_channel_open(channel, cb);
+ if (ret) {
+ channel->qsdev = NULL;
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Append the list of channel to the channels associated with the sdev
+ */
+ list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
+
+ return channel;
+}
+EXPORT_SYMBOL(qcom_smd_open_channel);
+
/*
* Allocate the qcom_smd_channel object for a newly found smd channel,
* retrieving and validating the smem items involved.
@@ -1027,6 +1150,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
if (!channel)
return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&channel->dev_list);
channel->edge = edge;
channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
if (!channel->name)
@@ -1089,8 +1213,9 @@ free_name_and_channel:
* qcom_smd_create_channel() to create representations of these and add
* them to the edge's list of channels.
*/
-static void qcom_discover_channels(struct qcom_smd_edge *edge)
+static void qcom_channel_scan_worker(struct work_struct *work)
{
+ struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
struct qcom_smd_alloc_entry *alloc_tbl;
struct qcom_smd_alloc_entry *entry;
struct qcom_smd_channel *channel;
@@ -1140,10 +1265,12 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
set_bit(i, edge->allocated[tbl]);
+
+ wake_up_interruptible(&edge->new_channel_event);
}
}
- schedule_work(&edge->work);
+ schedule_work(&edge->state_work);
}
/*
@@ -1151,29 +1278,23 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
* then scans all registered channels for state changes that should be handled
* by creating or destroying smd client devices for the registered channels.
*
- * LOCKING: edge->channels_lock is not needed to be held during the traversal
- * of the channels list as it's done synchronously with the only writer.
+ * LOCKING: edge->channels_lock only needs to cover the list operations, as the
+ * worker is killed before any channels are deallocated
*/
static void qcom_channel_state_worker(struct work_struct *work)
{
struct qcom_smd_channel *channel;
struct qcom_smd_edge *edge = container_of(work,
struct qcom_smd_edge,
- work);
+ state_work);
unsigned remote_state;
-
- /*
- * Rescan smem if we have reason to belive that there are new channels.
- */
- if (edge->need_rescan) {
- edge->need_rescan = false;
- qcom_discover_channels(edge);
- }
+ unsigned long flags;
/*
* Register a device for any closed channel where the remote processor
* is showing interest in opening the channel.
*/
+ spin_lock_irqsave(&edge->channels_lock, flags);
list_for_each_entry(channel, &edge->channels, list) {
if (channel->state != SMD_CHANNEL_CLOSED)
continue;
@@ -1183,7 +1304,9 @@ static void qcom_channel_state_worker(struct work_struct *work)
remote_state != SMD_CHANNEL_OPENED)
continue;
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
qcom_smd_create_device(channel);
+ spin_lock_irqsave(&edge->channels_lock, flags);
}
/*
@@ -1200,8 +1323,11 @@ static void qcom_channel_state_worker(struct work_struct *work)
remote_state == SMD_CHANNEL_OPENED)
continue;
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
qcom_smd_destroy_device(channel);
+ spin_lock_irqsave(&edge->channels_lock, flags);
}
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
}
/*
@@ -1219,7 +1345,8 @@ static int qcom_smd_parse_edge(struct device *dev,
INIT_LIST_HEAD(&edge->channels);
spin_lock_init(&edge->channels_lock);
- INIT_WORK(&edge->work, qcom_channel_state_worker);
+ INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
+ INIT_WORK(&edge->state_work, qcom_channel_state_worker);
edge->of_node = of_node_get(node);
@@ -1303,13 +1430,13 @@ static int qcom_smd_probe(struct platform_device *pdev)
for_each_available_child_of_node(pdev->dev.of_node, node) {
edge = &smd->edges[i++];
edge->smd = smd;
+ init_waitqueue_head(&edge->new_channel_event);
ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
if (ret)
continue;
- edge->need_rescan = true;
- schedule_work(&edge->work);
+ schedule_work(&edge->scan_work);
}
platform_set_drvdata(pdev, smd);
@@ -1332,8 +1459,10 @@ static int qcom_smd_remove(struct platform_device *pdev)
edge = &smd->edges[i];
disable_irq(edge->irq);
- cancel_work_sync(&edge->work);
+ cancel_work_sync(&edge->scan_work);
+ cancel_work_sync(&edge->state_work);
+ /* No need to lock here, because the writer is gone */
list_for_each_entry(channel, &edge->channels, list) {
if (!channel->qsdev)
continue;
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 19019aa09..2e1aa9f13 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -684,8 +684,7 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
smem->regions[i].aux_base = (u32)r.start;
smem->regions[i].size = resource_size(&r);
- smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
- resource_size(&r));
+ smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
if (!smem->regions[i].virt_base)
return -ENOMEM;
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index 5548a31e1..f9d7a85b2 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -2,6 +2,8 @@
* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2014,2015, Linaro Ltd.
*
+ * SAW power controller driver
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
@@ -12,7 +14,6 @@
* GNU General Public License for more details.
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -274,7 +275,7 @@ check_spm:
return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO;
}
-static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
+static const struct cpuidle_ops qcom_cpuidle_ops __initconst = {
.suspend = qcom_idle_enter,
.init = qcom_cpuidle_init,
};
@@ -378,8 +379,5 @@ static struct platform_driver spm_driver = {
.of_match_table = spm_match_table,
},
};
-module_platform_driver(spm_driver);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SAW power controller driver");
-MODULE_ALIAS("platform:saw");
+builtin_platform_driver(spm_driver);
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index 4ca82a270..f633dcc96 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -100,17 +100,17 @@ struct wcnss_download_nv_resp {
/**
* wcnss_ctrl_smd_callback() - handler from SMD responses
- * @qsdev: smd device handle
+ * @channel: smd channel handle
* @data: pointer to the incoming data packet
* @count: size of the incoming data packet
*
* Handles any incoming packets from the remote WCNSS_CTRL service.
*/
-static int wcnss_ctrl_smd_callback(struct qcom_smd_device *qsdev,
+static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
const void *data,
size_t count)
{
- struct wcnss_ctrl *wcnss = dev_get_drvdata(&qsdev->dev);
+ struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(channel);
const struct wcnss_download_nv_resp *nvresp;
const struct wcnss_version_resp *version;
const struct wcnss_msg_hdr *hdr = data;
@@ -246,7 +246,7 @@ static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
init_completion(&wcnss->ack);
INIT_WORK(&wcnss->download_nv_work, wcnss_download_nv);
- dev_set_drvdata(&sdev->dev, wcnss);
+ qcom_smd_set_drvdata(sdev->channel, wcnss);
return wcnss_request_version(wcnss);
}
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
new file mode 100644
index 000000000..151fcd3f0
--- /dev/null
+++ b/drivers/soc/renesas/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o
+obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o
+obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o
+# R-Car M2-N is identical to R-Car M2-W w.r.t. power domains.
+obj-$(CONFIG_ARCH_R8A7793) += rcar-sysc.o r8a7791-sysc.o
+obj-$(CONFIG_ARCH_R8A7794) += rcar-sysc.o r8a7794-sysc.o
+obj-$(CONFIG_ARCH_R8A7795) += rcar-sysc.o r8a7795-sysc.o
diff --git a/drivers/soc/renesas/r8a7779-sysc.c b/drivers/soc/renesas/r8a7779-sysc.c
new file mode 100644
index 000000000..9e8e6b7fa
--- /dev/null
+++ b/drivers/soc/renesas/r8a7779-sysc.c
@@ -0,0 +1,34 @@
+/*
+ * Renesas R-Car H1 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7779-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7779_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7779_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "arm1", 0x40, 1, R8A7779_PD_ARM1, R8A7779_PD_ALWAYS_ON,
+ PD_CPU_CR },
+ { "arm2", 0x40, 2, R8A7779_PD_ARM2, R8A7779_PD_ALWAYS_ON,
+ PD_CPU_CR },
+ { "arm3", 0x40, 3, R8A7779_PD_ARM3, R8A7779_PD_ALWAYS_ON,
+ PD_CPU_CR },
+ { "sgx", 0xc0, 0, R8A7779_PD_SGX, R8A7779_PD_ALWAYS_ON },
+ { "vdp", 0x100, 0, R8A7779_PD_VDP, R8A7779_PD_ALWAYS_ON },
+ { "imp", 0x140, 0, R8A7779_PD_IMP, R8A7779_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7779_sysc_info __initconst = {
+ .areas = r8a7779_areas,
+ .num_areas = ARRAY_SIZE(r8a7779_areas),
+};
diff --git a/drivers/soc/renesas/r8a7790-sysc.c b/drivers/soc/renesas/r8a7790-sysc.c
new file mode 100644
index 000000000..7a567ad0f
--- /dev/null
+++ b/drivers/soc/renesas/r8a7790-sysc.c
@@ -0,0 +1,48 @@
+/*
+ * Renesas R-Car H2 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7790-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7790_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7790_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7790_PD_CA15_SCU, R8A7790_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7790_PD_CA15_CPU0, R8A7790_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7790_PD_CA15_CPU1, R8A7790_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu2", 0x40, 2, R8A7790_PD_CA15_CPU2, R8A7790_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu3", 0x40, 3, R8A7790_PD_CA15_CPU3, R8A7790_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca7-scu", 0x100, 0, R8A7790_PD_CA7_SCU, R8A7790_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7790_PD_CA7_CPU0, R8A7790_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7790_PD_CA7_CPU1, R8A7790_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu2", 0x1c0, 2, R8A7790_PD_CA7_CPU2, R8A7790_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu3", 0x1c0, 3, R8A7790_PD_CA7_CPU3, R8A7790_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sh-4a", 0x80, 0, R8A7790_PD_SH_4A, R8A7790_PD_ALWAYS_ON },
+ { "rgx", 0xc0, 0, R8A7790_PD_RGX, R8A7790_PD_ALWAYS_ON },
+ { "imp", 0x140, 0, R8A7790_PD_IMP, R8A7790_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7790_sysc_info __initconst = {
+ .areas = r8a7790_areas,
+ .num_areas = ARRAY_SIZE(r8a7790_areas),
+};
diff --git a/drivers/soc/renesas/r8a7791-sysc.c b/drivers/soc/renesas/r8a7791-sysc.c
new file mode 100644
index 000000000..03b9f41a3
--- /dev/null
+++ b/drivers/soc/renesas/r8a7791-sysc.c
@@ -0,0 +1,33 @@
+/*
+ * Renesas R-Car M2-W/N System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7791-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7791_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7791_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7791_PD_CA15_SCU, R8A7791_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7791_PD_CA15_CPU0, R8A7791_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7791_PD_CA15_CPU1, R8A7791_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sh-4a", 0x80, 0, R8A7791_PD_SH_4A, R8A7791_PD_ALWAYS_ON },
+ { "sgx", 0xc0, 0, R8A7791_PD_SGX, R8A7791_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7791_sysc_info __initconst = {
+ .areas = r8a7791_areas,
+ .num_areas = ARRAY_SIZE(r8a7791_areas),
+};
diff --git a/drivers/soc/renesas/r8a7794-sysc.c b/drivers/soc/renesas/r8a7794-sysc.c
new file mode 100644
index 000000000..c4da2941e
--- /dev/null
+++ b/drivers/soc/renesas/r8a7794-sysc.c
@@ -0,0 +1,33 @@
+/*
+ * Renesas R-Car E2 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7794-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7794_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7794_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca7-scu", 0x100, 0, R8A7794_PD_CA7_SCU, R8A7794_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca7-cpu0", 0x1c0, 0, R8A7794_PD_CA7_CPU0, R8A7794_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "ca7-cpu1", 0x1c0, 1, R8A7794_PD_CA7_CPU1, R8A7794_PD_CA7_SCU,
+ PD_CPU_NOCR },
+ { "sh-4a", 0x80, 0, R8A7794_PD_SH_4A, R8A7794_PD_ALWAYS_ON },
+ { "sgx", 0xc0, 0, R8A7794_PD_SGX, R8A7794_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7794_sysc_info __initconst = {
+ .areas = r8a7794_areas,
+ .num_areas = ARRAY_SIZE(r8a7794_areas),
+};
diff --git a/drivers/soc/renesas/r8a7795-sysc.c b/drivers/soc/renesas/r8a7795-sysc.c
new file mode 100644
index 000000000..5e7537c96
--- /dev/null
+++ b/drivers/soc/renesas/r8a7795-sysc.c
@@ -0,0 +1,56 @@
+/*
+ * Renesas R-Car H3 System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7795-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7795_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7795_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A7795_PD_CA57_SCU, R8A7795_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A7795_PD_CA57_CPU0, R8A7795_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A7795_PD_CA57_CPU1, R8A7795_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu2", 0x80, 2, R8A7795_PD_CA57_CPU2, R8A7795_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu3", 0x80, 3, R8A7795_PD_CA57_CPU3, R8A7795_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca53-scu", 0x140, 0, R8A7795_PD_CA53_SCU, R8A7795_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A7795_PD_CA53_CPU0, R8A7795_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A7795_PD_CA53_CPU1, R8A7795_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A7795_PD_CA53_CPU2, R8A7795_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A7795_PD_CA53_CPU3, R8A7795_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "a3vp", 0x340, 0, R8A7795_PD_A3VP, R8A7795_PD_ALWAYS_ON },
+ { "cr7", 0x240, 0, R8A7795_PD_CR7, R8A7795_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A7795_PD_A3VC, R8A7795_PD_ALWAYS_ON },
+ { "a2vc0", 0x3c0, 0, R8A7795_PD_A2VC0, R8A7795_PD_A3VC },
+ { "a2vc1", 0x3c0, 1, R8A7795_PD_A2VC1, R8A7795_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A7795_PD_3DG_A, R8A7795_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A7795_PD_3DG_B, R8A7795_PD_3DG_A },
+ { "3dg-c", 0x100, 2, R8A7795_PD_3DG_C, R8A7795_PD_3DG_B },
+ { "3dg-d", 0x100, 3, R8A7795_PD_3DG_D, R8A7795_PD_3DG_C },
+ { "3dg-e", 0x100, 4, R8A7795_PD_3DG_E, R8A7795_PD_3DG_D },
+ { "a3ir", 0x180, 0, R8A7795_PD_A3IR, R8A7795_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7795_sysc_info __initconst = {
+ .areas = r8a7795_areas,
+ .num_areas = ARRAY_SIZE(r8a7795_areas),
+};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
new file mode 100644
index 000000000..79dbc7708
--- /dev/null
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -0,0 +1,401 @@
+/*
+ * R-Car SYSC Power management support
+ *
+ * Copyright (C) 2014 Magnus Damm
+ * Copyright (C) 2015-2016 Glider bvba
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/soc/renesas/rcar-sysc.h>
+
+#include "rcar-sysc.h"
+
+/* SYSC Common */
+#define SYSCSR 0x00 /* SYSC Status Register */
+#define SYSCISR 0x04 /* Interrupt Status Register */
+#define SYSCISCR 0x08 /* Interrupt Status Clear Register */
+#define SYSCIER 0x0c /* Interrupt Enable Register */
+#define SYSCIMR 0x10 /* Interrupt Mask Register */
+
+/* SYSC Status Register */
+#define SYSCSR_PONENB 1 /* Ready for power resume requests */
+#define SYSCSR_POFFENB 0 /* Ready for power shutoff requests */
+
+/*
+ * Power Control Register Offsets inside the register block for each domain
+ * Note: The "CR" registers for ARM cores exist on H1 only
+ * Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2
+ * Use PSCI on R-Car Gen3
+ */
+#define PWRSR_OFFS 0x00 /* Power Status Register */
+#define PWROFFCR_OFFS 0x04 /* Power Shutoff Control Register */
+#define PWROFFSR_OFFS 0x08 /* Power Shutoff Status Register */
+#define PWRONCR_OFFS 0x0c /* Power Resume Control Register */
+#define PWRONSR_OFFS 0x10 /* Power Resume Status Register */
+#define PWRER_OFFS 0x14 /* Power Shutoff/Resume Error */
+
+
+#define SYSCSR_RETRIES 100
+#define SYSCSR_DELAY_US 1
+
+#define PWRER_RETRIES 100
+#define PWRER_DELAY_US 1
+
+#define SYSCISR_RETRIES 1000
+#define SYSCISR_DELAY_US 1
+
+#define RCAR_PD_ALWAYS_ON 32 /* Always-on power area */
+
+static void __iomem *rcar_sysc_base;
+static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
+
+static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
+{
+ unsigned int sr_bit, reg_offs;
+ int k;
+
+ if (on) {
+ sr_bit = SYSCSR_PONENB;
+ reg_offs = PWRONCR_OFFS;
+ } else {
+ sr_bit = SYSCSR_POFFENB;
+ reg_offs = PWROFFCR_OFFS;
+ }
+
+ /* Wait until SYSC is ready to accept a power request */
+ for (k = 0; k < SYSCSR_RETRIES; k++) {
+ if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit))
+ break;
+ udelay(SYSCSR_DELAY_US);
+ }
+
+ if (k == SYSCSR_RETRIES)
+ return -EAGAIN;
+
+ /* Submit power shutoff or power resume request */
+ iowrite32(BIT(sysc_ch->chan_bit),
+ rcar_sysc_base + sysc_ch->chan_offs + reg_offs);
+
+ return 0;
+}
+
+static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
+{
+ unsigned int isr_mask = BIT(sysc_ch->isr_bit);
+ unsigned int chan_mask = BIT(sysc_ch->chan_bit);
+ unsigned int status;
+ unsigned long flags;
+ int ret = 0;
+ int k;
+
+ spin_lock_irqsave(&rcar_sysc_lock, flags);
+
+ iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
+
+ /* Submit power shutoff or resume request until it was accepted */
+ for (k = 0; k < PWRER_RETRIES; k++) {
+ ret = rcar_sysc_pwr_on_off(sysc_ch, on);
+ if (ret)
+ goto out;
+
+ status = ioread32(rcar_sysc_base +
+ sysc_ch->chan_offs + PWRER_OFFS);
+ if (!(status & chan_mask))
+ break;
+
+ udelay(PWRER_DELAY_US);
+ }
+
+ if (k == PWRER_RETRIES) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Wait until the power shutoff or resume request has completed * */
+ for (k = 0; k < SYSCISR_RETRIES; k++) {
+ if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask)
+ break;
+ udelay(SYSCISR_DELAY_US);
+ }
+
+ if (k == SYSCISR_RETRIES)
+ ret = -EIO;
+
+ iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
+
+ out:
+ spin_unlock_irqrestore(&rcar_sysc_lock, flags);
+
+ pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
+ sysc_ch->isr_bit, ioread32(rcar_sysc_base + SYSCISR), ret);
+ return ret;
+}
+
+int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch)
+{
+ return rcar_sysc_power(sysc_ch, false);
+}
+
+int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch)
+{
+ return rcar_sysc_power(sysc_ch, true);
+}
+
+static bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
+{
+ unsigned int st;
+
+ st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS);
+ if (st & BIT(sysc_ch->chan_bit))
+ return true;
+
+ return false;
+}
+
+void __iomem *rcar_sysc_init(phys_addr_t base)
+{
+ rcar_sysc_base = ioremap_nocache(base, PAGE_SIZE);
+ if (!rcar_sysc_base)
+ panic("unable to ioremap R-Car SYSC hardware block\n");
+
+ return rcar_sysc_base;
+}
+
+struct rcar_sysc_pd {
+ struct generic_pm_domain genpd;
+ struct rcar_sysc_ch ch;
+ unsigned int flags;
+ char name[0];
+};
+
+static inline struct rcar_sysc_pd *to_rcar_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct rcar_sysc_pd, genpd);
+}
+
+static int rcar_sysc_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+
+ if (pd->flags & PD_NO_CR) {
+ pr_debug("%s: Cannot control %s\n", __func__, genpd->name);
+ return -EBUSY;
+ }
+
+ if (pd->flags & PD_BUSY) {
+ pr_debug("%s: %s busy\n", __func__, genpd->name);
+ return -EBUSY;
+ }
+
+ return rcar_sysc_power_down(&pd->ch);
+}
+
+static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
+{
+ struct rcar_sysc_pd *pd = to_rcar_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+
+ if (pd->flags & PD_NO_CR) {
+ pr_debug("%s: Cannot control %s\n", __func__, genpd->name);
+ return 0;
+ }
+
+ return rcar_sysc_power_up(&pd->ch);
+}
+
+static bool has_cpg_mstp;
+
+static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+{
+ struct generic_pm_domain *genpd = &pd->genpd;
+ const char *name = pd->genpd.name;
+ struct dev_power_governor *gov = &simple_qos_governor;
+
+ if (pd->flags & PD_CPU) {
+ /*
+ * This domain contains a CPU core and therefore it should
+ * only be turned off if the CPU is not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "CPU");
+ pd->flags |= PD_BUSY;
+ gov = &pm_domain_always_on_gov;
+ } else if (pd->flags & PD_SCU) {
+ /*
+ * This domain contains an SCU and cache-controller, and
+ * therefore it should only be turned off if the CPU cores are
+ * not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "SCU");
+ pd->flags |= PD_BUSY;
+ gov = &pm_domain_always_on_gov;
+ } else if (pd->flags & PD_NO_CR) {
+ /*
+ * This domain cannot be turned off.
+ */
+ pd->flags |= PD_BUSY;
+ gov = &pm_domain_always_on_gov;
+ }
+
+ if (!(pd->flags & (PD_CPU | PD_SCU))) {
+ /* Enable Clock Domain for I/O devices */
+ genpd->flags = GENPD_FLAG_PM_CLK;
+ if (has_cpg_mstp) {
+ genpd->attach_dev = cpg_mstp_attach_dev;
+ genpd->detach_dev = cpg_mstp_detach_dev;
+ } else {
+ genpd->attach_dev = cpg_mssr_attach_dev;
+ genpd->detach_dev = cpg_mssr_detach_dev;
+ }
+ }
+
+ genpd->power_off = rcar_sysc_pd_power_off;
+ genpd->power_on = rcar_sysc_pd_power_on;
+
+ if (pd->flags & (PD_CPU | PD_NO_CR)) {
+ /* Skip CPUs (handled by SMP code) and areas without control */
+ pr_debug("%s: Not touching %s\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ if (!rcar_sysc_power_is_off(&pd->ch)) {
+ pr_debug("%s: %s is already powered\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ rcar_sysc_power_up(&pd->ch);
+
+finalize:
+ pm_genpd_init(genpd, gov, false);
+}
+
+static const struct of_device_id rcar_sysc_matches[] = {
+#ifdef CONFIG_ARCH_R8A7779
+ { .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7790
+ { .compatible = "renesas,r8a7790-sysc", .data = &r8a7790_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7791
+ { .compatible = "renesas,r8a7791-sysc", .data = &r8a7791_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7793
+ /* R-Car M2-N is identical to R-Car M2-W w.r.t. power domains. */
+ { .compatible = "renesas,r8a7793-sysc", .data = &r8a7791_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7794
+ { .compatible = "renesas,r8a7794-sysc", .data = &r8a7794_sysc_info },
+#endif
+#ifdef CONFIG_ARCH_R8A7795
+ { .compatible = "renesas,r8a7795-sysc", .data = &r8a7795_sysc_info },
+#endif
+ { /* sentinel */ }
+};
+
+struct rcar_pm_domains {
+ struct genpd_onecell_data onecell_data;
+ struct generic_pm_domain *domains[RCAR_PD_ALWAYS_ON + 1];
+};
+
+static int __init rcar_sysc_pd_init(void)
+{
+ const struct rcar_sysc_info *info;
+ const struct of_device_id *match;
+ struct rcar_pm_domains *domains;
+ struct device_node *np;
+ u32 syscier, syscimr;
+ void __iomem *base;
+ unsigned int i;
+ int error;
+
+ np = of_find_matching_node_and_match(NULL, rcar_sysc_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ info = match->data;
+
+ has_cpg_mstp = of_find_compatible_node(NULL, NULL,
+ "renesas,cpg-mstp-clocks");
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%s: Cannot map regs\n", np->full_name);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ rcar_sysc_base = base;
+
+ domains = kzalloc(sizeof(*domains), GFP_KERNEL);
+ if (!domains) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ domains->onecell_data.domains = domains->domains;
+ domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
+
+ for (i = 0, syscier = 0; i < info->num_areas; i++)
+ syscier |= BIT(info->areas[i].isr_bit);
+
+ /*
+ * Mask all interrupt sources to prevent the CPU from receiving them.
+ * Make sure not to clear reserved bits that were set before.
+ */
+ syscimr = ioread32(base + SYSCIMR);
+ syscimr |= syscier;
+ pr_debug("%s: syscimr = 0x%08x\n", np->full_name, syscimr);
+ iowrite32(syscimr, base + SYSCIMR);
+
+ /*
+ * SYSC needs all interrupt sources enabled to control power.
+ */
+ pr_debug("%s: syscier = 0x%08x\n", np->full_name, syscier);
+ iowrite32(syscier, base + SYSCIER);
+
+ for (i = 0; i < info->num_areas; i++) {
+ const struct rcar_sysc_area *area = &info->areas[i];
+ struct rcar_sysc_pd *pd;
+
+ pd = kzalloc(sizeof(*pd) + strlen(area->name) + 1, GFP_KERNEL);
+ if (!pd) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ strcpy(pd->name, area->name);
+ pd->genpd.name = pd->name;
+ pd->ch.chan_offs = area->chan_offs;
+ pd->ch.chan_bit = area->chan_bit;
+ pd->ch.isr_bit = area->isr_bit;
+ pd->flags = area->flags;
+
+ rcar_sysc_pd_setup(pd);
+ if (area->parent >= 0)
+ pm_genpd_add_subdomain(domains->domains[area->parent],
+ &pd->genpd);
+
+ domains->domains[area->isr_bit] = &pd->genpd;
+ }
+
+ of_genpd_add_provider_onecell(np, &domains->onecell_data);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+early_initcall(rcar_sysc_pd_init);
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
new file mode 100644
index 000000000..5e766174c
--- /dev/null
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -0,0 +1,58 @@
+/*
+ * Renesas R-Car System Controller
+ *
+ * Copyright (C) 2016 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+#ifndef __SOC_RENESAS_RCAR_SYSC_H__
+#define __SOC_RENESAS_RCAR_SYSC_H__
+
+#include <linux/types.h>
+
+
+/*
+ * Power Domain flags
+ */
+#define PD_CPU BIT(0) /* Area contains main CPU core */
+#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
+#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
+
+#define PD_BUSY BIT(3) /* Busy, for internal use only */
+
+#define PD_CPU_CR PD_CPU /* CPU area has CR (R-Car H1) */
+#define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR (R-Car Gen2/3) */
+#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
+
+
+/*
+ * Description of a Power Area
+ */
+
+struct rcar_sysc_area {
+ const char *name;
+ u16 chan_offs; /* Offset of PWRSR register for this area */
+ u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */
+ u8 isr_bit; /* Bit in SYSCI*R */
+ int parent; /* -1 if none */
+ unsigned int flags; /* See PD_* */
+};
+
+
+/*
+ * SoC-specific Power Area Description
+ */
+
+struct rcar_sysc_info {
+ const struct rcar_sysc_area *areas;
+ unsigned int num_areas;
+};
+
+extern const struct rcar_sysc_info r8a7779_sysc_info;
+extern const struct rcar_sysc_info r8a7790_sysc_info;
+extern const struct rcar_sysc_info r8a7791_sysc_info;
+extern const struct rcar_sysc_info r8a7794_sysc_info;
+extern const struct rcar_sysc_info r8a7795_sysc_info;
+#endif /* __SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 43155e1f9..44842a205 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -19,6 +19,7 @@
#include <linux/mfd/syscon.h>
#include <dt-bindings/power/rk3288-power.h>
#include <dt-bindings/power/rk3368-power.h>
+#include <dt-bindings/power/rk3399-power.h>
struct rockchip_domain_info {
int pwr_mask;
@@ -45,10 +46,20 @@ struct rockchip_pmu_info {
const struct rockchip_domain_info *domain_info;
};
+#define MAX_QOS_REGS_NUM 5
+#define QOS_PRIORITY 0x08
+#define QOS_MODE 0x0c
+#define QOS_BANDWIDTH 0x10
+#define QOS_SATURATION 0x14
+#define QOS_EXTCONTROL 0x18
+
struct rockchip_pm_domain {
struct generic_pm_domain genpd;
const struct rockchip_domain_info *info;
struct rockchip_pmu *pmu;
+ int num_qos;
+ struct regmap **qos_regmap;
+ u32 *qos_save_regs[MAX_QOS_REGS_NUM];
int num_clks;
struct clk *clks[];
};
@@ -66,11 +77,11 @@ struct rockchip_pmu {
#define DOMAIN(pwr, status, req, idle, ack) \
{ \
- .pwr_mask = BIT(pwr), \
- .status_mask = BIT(status), \
- .req_mask = BIT(req), \
- .idle_mask = BIT(idle), \
- .ack_mask = BIT(ack), \
+ .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \
+ .status_mask = (status >= 0) ? BIT(status) : 0, \
+ .req_mask = (req >= 0) ? BIT(req) : 0, \
+ .idle_mask = (idle >= 0) ? BIT(idle) : 0, \
+ .ack_mask = (ack >= 0) ? BIT(ack) : 0, \
}
#define DOMAIN_RK3288(pwr, status, req) \
@@ -79,6 +90,9 @@ struct rockchip_pmu {
#define DOMAIN_RK3368(pwr, status, req) \
DOMAIN(pwr, status, req, (req) + 16, req)
+#define DOMAIN_RK3399(pwr, status, req) \
+ DOMAIN(pwr, status, req, req, req)
+
static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
@@ -96,6 +110,9 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
struct rockchip_pmu *pmu = pd->pmu;
unsigned int val;
+ if (pd_info->req_mask == 0)
+ return 0;
+
regmap_update_bits(pmu->regmap, pmu->info->req_offset,
pd_info->req_mask, idle ? -1U : 0);
@@ -111,11 +128,64 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
return 0;
}
+static int rockchip_pmu_save_qos(struct rockchip_pm_domain *pd)
+{
+ int i;
+
+ for (i = 0; i < pd->num_qos; i++) {
+ regmap_read(pd->qos_regmap[i],
+ QOS_PRIORITY,
+ &pd->qos_save_regs[0][i]);
+ regmap_read(pd->qos_regmap[i],
+ QOS_MODE,
+ &pd->qos_save_regs[1][i]);
+ regmap_read(pd->qos_regmap[i],
+ QOS_BANDWIDTH,
+ &pd->qos_save_regs[2][i]);
+ regmap_read(pd->qos_regmap[i],
+ QOS_SATURATION,
+ &pd->qos_save_regs[3][i]);
+ regmap_read(pd->qos_regmap[i],
+ QOS_EXTCONTROL,
+ &pd->qos_save_regs[4][i]);
+ }
+ return 0;
+}
+
+static int rockchip_pmu_restore_qos(struct rockchip_pm_domain *pd)
+{
+ int i;
+
+ for (i = 0; i < pd->num_qos; i++) {
+ regmap_write(pd->qos_regmap[i],
+ QOS_PRIORITY,
+ pd->qos_save_regs[0][i]);
+ regmap_write(pd->qos_regmap[i],
+ QOS_MODE,
+ pd->qos_save_regs[1][i]);
+ regmap_write(pd->qos_regmap[i],
+ QOS_BANDWIDTH,
+ pd->qos_save_regs[2][i]);
+ regmap_write(pd->qos_regmap[i],
+ QOS_SATURATION,
+ pd->qos_save_regs[3][i]);
+ regmap_write(pd->qos_regmap[i],
+ QOS_EXTCONTROL,
+ pd->qos_save_regs[4][i]);
+ }
+
+ return 0;
+}
+
static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
{
struct rockchip_pmu *pmu = pd->pmu;
unsigned int val;
+ /* check idle status for idle-only domains */
+ if (pd->info->status_mask == 0)
+ return !rockchip_pmu_domain_is_idle(pd);
+
regmap_read(pmu->regmap, pmu->info->status_offset, &val);
/* 1'b0: power on, 1'b1: power off */
@@ -127,6 +197,9 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
{
struct rockchip_pmu *pmu = pd->pmu;
+ if (pd->info->pwr_mask == 0)
+ return;
+
regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
pd->info->pwr_mask, on ? 0 : -1U);
@@ -147,7 +220,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
clk_enable(pd->clks[i]);
if (!power_on) {
- /* FIXME: add code to save AXI_QOS */
+ rockchip_pmu_save_qos(pd);
/* if powering down, idle request to NIU first */
rockchip_pmu_set_idle_request(pd, true);
@@ -159,7 +232,7 @@ static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
/* if powering up, leave idle mode */
rockchip_pmu_set_idle_request(pd, false);
- /* FIXME: add code to restore AXI_QOS */
+ rockchip_pmu_restore_qos(pd);
}
for (i = pd->num_clks - 1; i >= 0; i--)
@@ -227,9 +300,10 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
{
const struct rockchip_domain_info *pd_info;
struct rockchip_pm_domain *pd;
+ struct device_node *qos_node;
struct clk *clk;
int clk_cnt;
- int i;
+ int i, j;
u32 id;
int error;
@@ -289,6 +363,45 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
clk, node->name);
}
+ pd->num_qos = of_count_phandle_with_args(node, "pm_qos",
+ NULL);
+
+ if (pd->num_qos > 0) {
+ pd->qos_regmap = devm_kcalloc(pmu->dev, pd->num_qos,
+ sizeof(*pd->qos_regmap),
+ GFP_KERNEL);
+ if (!pd->qos_regmap) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+
+ for (j = 0; j < MAX_QOS_REGS_NUM; j++) {
+ pd->qos_save_regs[j] = devm_kcalloc(pmu->dev,
+ pd->num_qos,
+ sizeof(u32),
+ GFP_KERNEL);
+ if (!pd->qos_save_regs[j]) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ for (j = 0; j < pd->num_qos; j++) {
+ qos_node = of_parse_phandle(node, "pm_qos", j);
+ if (!qos_node) {
+ error = -ENODEV;
+ goto err_out;
+ }
+ pd->qos_regmap[j] = syscon_node_to_regmap(qos_node);
+ if (IS_ERR(pd->qos_regmap[j])) {
+ error = -ENODEV;
+ of_node_put(qos_node);
+ goto err_out;
+ }
+ of_node_put(qos_node);
+ }
+ }
+
error = rockchip_pd_power(pd, true);
if (error) {
dev_err(pmu->dev,
@@ -360,6 +473,61 @@ static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
regmap_write(pmu->regmap, domain_reg_offset + 4, count);
}
+static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
+ struct device_node *parent)
+{
+ struct device_node *np;
+ struct generic_pm_domain *child_domain, *parent_domain;
+ int error;
+
+ for_each_child_of_node(parent, np) {
+ u32 idx;
+
+ error = of_property_read_u32(parent, "reg", &idx);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to retrieve domain id (reg): %d\n",
+ parent->name, error);
+ goto err_out;
+ }
+ parent_domain = pmu->genpd_data.domains[idx];
+
+ error = rockchip_pm_add_one_domain(pmu, np);
+ if (error) {
+ dev_err(pmu->dev, "failed to handle node %s: %d\n",
+ np->name, error);
+ goto err_out;
+ }
+
+ error = of_property_read_u32(np, "reg", &idx);
+ if (error) {
+ dev_err(pmu->dev,
+ "%s: failed to retrieve domain id (reg): %d\n",
+ np->name, error);
+ goto err_out;
+ }
+ child_domain = pmu->genpd_data.domains[idx];
+
+ error = pm_genpd_add_subdomain(parent_domain, child_domain);
+ if (error) {
+ dev_err(pmu->dev, "%s failed to add subdomain %s: %d\n",
+ parent_domain->name, child_domain->name, error);
+ goto err_out;
+ } else {
+ dev_dbg(pmu->dev, "%s add subdomain: %s\n",
+ parent_domain->name, child_domain->name);
+ }
+
+ rockchip_pm_add_subdomain(pmu, np);
+ }
+
+ return 0;
+
+err_out:
+ of_node_put(np);
+ return error;
+}
+
static int rockchip_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -406,6 +574,10 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
}
pmu->regmap = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(pmu->regmap)) {
+ dev_err(dev, "no regmap available\n");
+ return PTR_ERR(pmu->regmap);
+ }
/*
* Configure power up and down transition delays for CORE
@@ -426,6 +598,14 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
of_node_put(node);
goto err_out;
}
+
+ error = rockchip_pm_add_subdomain(pmu, node);
+ if (error < 0) {
+ dev_err(dev, "failed to handle subdomain node %s: %d\n",
+ node->name, error);
+ of_node_put(node);
+ goto err_out;
+ }
}
if (error) {
@@ -457,6 +637,36 @@ static const struct rockchip_domain_info rk3368_pm_domains[] = {
[RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2),
};
+static const struct rockchip_domain_info rk3399_pm_domains[] = {
+ [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1),
+ [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1),
+ [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1),
+ [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15),
+ [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16),
+ [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1),
+ [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2),
+ [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14),
+ [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17),
+ [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0),
+ [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3),
+ [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4),
+ [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5),
+ [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6),
+ [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1),
+ [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7),
+ [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8),
+ [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9),
+ [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10),
+ [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11),
+ [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23),
+ [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24),
+ [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12),
+ [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22),
+ [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27),
+ [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28),
+ [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29),
+};
+
static const struct rockchip_pmu_info rk3288_pmu = {
.pwr_offset = 0x08,
.status_offset = 0x0c,
@@ -491,6 +701,23 @@ static const struct rockchip_pmu_info rk3368_pmu = {
.domain_info = rk3368_pm_domains,
};
+static const struct rockchip_pmu_info rk3399_pmu = {
+ .pwr_offset = 0x14,
+ .status_offset = 0x18,
+ .req_offset = 0x60,
+ .idle_offset = 0x64,
+ .ack_offset = 0x68,
+
+ .core_pwrcnt_offset = 0x9c,
+ .gpu_pwrcnt_offset = 0xa4,
+
+ .core_power_transition_time = 24,
+ .gpu_power_transition_time = 24,
+
+ .num_domains = ARRAY_SIZE(rk3399_pm_domains),
+ .domain_info = rk3399_pm_domains,
+};
+
static const struct of_device_id rockchip_pm_domain_dt_match[] = {
{
.compatible = "rockchip,rk3288-power-controller",
@@ -500,6 +727,10 @@ static const struct of_device_id rockchip_pm_domain_dt_match[] = {
.compatible = "rockchip,rk3368-power-controller",
.data = (void *)&rk3368_pmu,
},
+ {
+ .compatible = "rockchip,rk3399-power-controller",
+ .data = (void *)&rk3399_pmu,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index d0c3c3e08..03089ad2f 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -31,7 +31,6 @@ config ARCH_TEGRA_3x_SOC
config ARCH_TEGRA_114_SOC
bool "Enable support for Tegra114 family"
select ARM_ERRATA_798181 if SMP
- select ARM_L1_CACHE_SHIFT_6
select HAVE_ARM_ARCH_TIMER
select PINCTRL_TEGRA114
select TEGRA_TIMER
@@ -41,7 +40,6 @@ config ARCH_TEGRA_114_SOC
config ARCH_TEGRA_124_SOC
bool "Enable support for Tegra124 family"
- select ARM_L1_CACHE_SHIFT_6
select HAVE_ARM_ARCH_TIMER
select PINCTRL_TEGRA124
select TEGRA_TIMER
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index bc34cf748..bb173456b 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -28,12 +28,16 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/reboot.h>
#include <linux/reset.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <soc/tegra/common.h>
@@ -101,6 +105,16 @@
#define GPU_RG_CNTRL 0x2d4
+struct tegra_powergate {
+ struct generic_pm_domain genpd;
+ struct tegra_pmc *pmc;
+ unsigned int id;
+ struct clk **clks;
+ unsigned int num_clks;
+ struct reset_control **resets;
+ unsigned int num_resets;
+};
+
struct tegra_pmc_soc {
unsigned int num_powergates;
const char *const *powergates;
@@ -113,8 +127,11 @@ struct tegra_pmc_soc {
/**
* struct tegra_pmc - NVIDIA Tegra PMC
+ * @dev: pointer to PMC device structure
* @base: pointer to I/O remapped register region
* @clk: pointer to pclk clock
+ * @soc: pointer to SoC data structure
+ * @debugfs: pointer to debugfs entry
* @rate: currently configured rate of pclk
* @suspend_mode: lowest suspend mode available
* @cpu_good_time: CPU power good time (in microseconds)
@@ -128,12 +145,14 @@ struct tegra_pmc_soc {
* @cpu_pwr_good_en: CPU power good signal is enabled
* @lp0_vec_phys: physical base address of the LP0 warm boot code
* @lp0_vec_size: size of the LP0 warm boot code
+ * @powergates_available: Bitmap of available power gates
* @powergates_lock: mutex for power gate register access
*/
struct tegra_pmc {
struct device *dev;
void __iomem *base;
struct clk *clk;
+ struct dentry *debugfs;
const struct tegra_pmc_soc *soc;
@@ -151,6 +170,7 @@ struct tegra_pmc {
bool cpu_pwr_good_en;
u32 lp0_vec_phys;
u32 lp0_vec_size;
+ DECLARE_BITMAP(powergates_available, TEGRA_POWERGATE_MAX);
struct mutex powergates_lock;
};
@@ -160,6 +180,12 @@ static struct tegra_pmc *pmc = &(struct tegra_pmc) {
.suspend_mode = TEGRA_SUSPEND_NONE,
};
+static inline struct tegra_powergate *
+to_powergate(struct generic_pm_domain *domain)
+{
+ return container_of(domain, struct tegra_powergate, genpd);
+}
+
static u32 tegra_pmc_readl(unsigned long offset)
{
return readl(pmc->base + offset);
@@ -170,38 +196,287 @@ static void tegra_pmc_writel(u32 value, unsigned long offset)
writel(value, pmc->base + offset);
}
+static inline bool tegra_powergate_state(int id)
+{
+ if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+ return (tegra_pmc_readl(GPU_RG_CNTRL) & 0x1) == 0;
+ else
+ return (tegra_pmc_readl(PWRGATE_STATUS) & BIT(id)) != 0;
+}
+
+static inline bool tegra_powergate_is_valid(int id)
+{
+ return (pmc->soc && pmc->soc->powergates[id]);
+}
+
+static inline bool tegra_powergate_is_available(int id)
+{
+ return test_bit(id, pmc->powergates_available);
+}
+
+static int tegra_powergate_lookup(struct tegra_pmc *pmc, const char *name)
+{
+ unsigned int i;
+
+ if (!pmc || !pmc->soc || !name)
+ return -EINVAL;
+
+ for (i = 0; i < pmc->soc->num_powergates; i++) {
+ if (!tegra_powergate_is_valid(i))
+ continue;
+
+ if (!strcmp(name, pmc->soc->powergates[i]))
+ return i;
+ }
+
+ dev_err(pmc->dev, "powergate %s not found\n", name);
+
+ return -ENODEV;
+}
+
/**
* tegra_powergate_set() - set the state of a partition
* @id: partition ID
* @new_state: new state of the partition
*/
-static int tegra_powergate_set(int id, bool new_state)
+static int tegra_powergate_set(unsigned int id, bool new_state)
{
bool status;
+ int err;
- mutex_lock(&pmc->powergates_lock);
+ if (id == TEGRA_POWERGATE_3D && pmc->soc->has_gpu_clamps)
+ return -EINVAL;
- status = tegra_pmc_readl(PWRGATE_STATUS) & (1 << id);
+ mutex_lock(&pmc->powergates_lock);
- if (status == new_state) {
+ if (tegra_powergate_state(id) == new_state) {
mutex_unlock(&pmc->powergates_lock);
return 0;
}
tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+ err = readx_poll_timeout(tegra_powergate_state, id, status,
+ status == new_state, 10, 100000);
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ return err;
+}
+
+static int __tegra_powergate_remove_clamping(unsigned int id)
+{
+ u32 mask;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ /*
+ * On Tegra124 and later, the clamps for the GPU are controlled by a
+ * separate register (with different semantics).
+ */
+ if (id == TEGRA_POWERGATE_3D) {
+ if (pmc->soc->has_gpu_clamps) {
+ tegra_pmc_writel(0, GPU_RG_CNTRL);
+ goto out;
+ }
+ }
+
+ /*
+ * Tegra 2 has a bug where PCIE and VDE clamping masks are
+ * swapped relatively to the partition ids
+ */
+ if (id == TEGRA_POWERGATE_VDEC)
+ mask = (1 << TEGRA_POWERGATE_PCIE);
+ else if (id == TEGRA_POWERGATE_PCIE)
+ mask = (1 << TEGRA_POWERGATE_VDEC);
+ else
+ mask = (1 << id);
+
+ tegra_pmc_writel(mask, REMOVE_CLAMPING);
+
+out:
mutex_unlock(&pmc->powergates_lock);
return 0;
}
+static void tegra_powergate_disable_clocks(struct tegra_powergate *pg)
+{
+ unsigned int i;
+
+ for (i = 0; i < pg->num_clks; i++)
+ clk_disable_unprepare(pg->clks[i]);
+}
+
+static int tegra_powergate_enable_clocks(struct tegra_powergate *pg)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_clks; i++) {
+ err = clk_prepare_enable(pg->clks[i]);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ while (i--)
+ clk_disable_unprepare(pg->clks[i]);
+
+ return err;
+}
+
+static int tegra_powergate_reset_assert(struct tegra_powergate *pg)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_resets; i++) {
+ err = reset_control_assert(pg->resets[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_powergate_reset_deassert(struct tegra_powergate *pg)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < pg->num_resets; i++) {
+ err = reset_control_deassert(pg->resets[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_powergate_power_up(struct tegra_powergate *pg,
+ bool disable_clocks)
+{
+ int err;
+
+ err = tegra_powergate_reset_assert(pg);
+ if (err)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_set(pg->id, true);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_enable_clocks(pg);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ err = __tegra_powergate_remove_clamping(pg->id);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_reset_deassert(pg);
+ if (err)
+ goto powergate_off;
+
+ usleep_range(10, 20);
+
+ if (disable_clocks)
+ tegra_powergate_disable_clocks(pg);
+
+ return 0;
+
+disable_clks:
+ tegra_powergate_disable_clocks(pg);
+ usleep_range(10, 20);
+powergate_off:
+ tegra_powergate_set(pg->id, false);
+
+ return err;
+}
+
+static int tegra_powergate_power_down(struct tegra_powergate *pg)
+{
+ int err;
+
+ err = tegra_powergate_enable_clocks(pg);
+ if (err)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_reset_assert(pg);
+ if (err)
+ goto disable_clks;
+
+ usleep_range(10, 20);
+
+ tegra_powergate_disable_clocks(pg);
+
+ usleep_range(10, 20);
+
+ err = tegra_powergate_set(pg->id, false);
+ if (err)
+ goto assert_resets;
+
+ return 0;
+
+assert_resets:
+ tegra_powergate_enable_clocks(pg);
+ usleep_range(10, 20);
+ tegra_powergate_reset_deassert(pg);
+ usleep_range(10, 20);
+disable_clks:
+ tegra_powergate_disable_clocks(pg);
+
+ return err;
+}
+
+static int tegra_genpd_power_on(struct generic_pm_domain *domain)
+{
+ struct tegra_powergate *pg = to_powergate(domain);
+ struct tegra_pmc *pmc = pg->pmc;
+ int err;
+
+ err = tegra_powergate_power_up(pg, true);
+ if (err)
+ dev_err(pmc->dev, "failed to turn on PM domain %s: %d\n",
+ pg->genpd.name, err);
+
+ return err;
+}
+
+static int tegra_genpd_power_off(struct generic_pm_domain *domain)
+{
+ struct tegra_powergate *pg = to_powergate(domain);
+ struct tegra_pmc *pmc = pg->pmc;
+ int err;
+
+ err = tegra_powergate_power_down(pg);
+ if (err)
+ dev_err(pmc->dev, "failed to turn off PM domain %s: %d\n",
+ pg->genpd.name, err);
+
+ return err;
+}
+
/**
* tegra_powergate_power_on() - power on partition
* @id: partition ID
*/
-int tegra_powergate_power_on(int id)
+int tegra_powergate_power_on(unsigned int id)
{
- if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
+ if (!tegra_powergate_is_available(id))
return -EINVAL;
return tegra_powergate_set(id, true);
@@ -211,9 +486,9 @@ int tegra_powergate_power_on(int id)
* tegra_powergate_power_off() - power off partition
* @id: partition ID
*/
-int tegra_powergate_power_off(int id)
+int tegra_powergate_power_off(unsigned int id)
{
- if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
+ if (!tegra_powergate_is_available(id))
return -EINVAL;
return tegra_powergate_set(id, false);
@@ -224,53 +499,30 @@ EXPORT_SYMBOL(tegra_powergate_power_off);
* tegra_powergate_is_powered() - check if partition is powered
* @id: partition ID
*/
-int tegra_powergate_is_powered(int id)
+int tegra_powergate_is_powered(unsigned int id)
{
- u32 status;
+ int status;
- if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
+ if (!tegra_powergate_is_valid(id))
return -EINVAL;
- status = tegra_pmc_readl(PWRGATE_STATUS) & (1 << id);
- return !!status;
+ mutex_lock(&pmc->powergates_lock);
+ status = tegra_powergate_state(id);
+ mutex_unlock(&pmc->powergates_lock);
+
+ return status;
}
/**
* tegra_powergate_remove_clamping() - remove power clamps for partition
* @id: partition ID
*/
-int tegra_powergate_remove_clamping(int id)
+int tegra_powergate_remove_clamping(unsigned int id)
{
- u32 mask;
-
- if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
+ if (!tegra_powergate_is_available(id))
return -EINVAL;
- /*
- * On Tegra124 and later, the clamps for the GPU are controlled by a
- * separate register (with different semantics).
- */
- if (id == TEGRA_POWERGATE_3D) {
- if (pmc->soc->has_gpu_clamps) {
- tegra_pmc_writel(0, GPU_RG_CNTRL);
- return 0;
- }
- }
-
- /*
- * Tegra 2 has a bug where PCIE and VDE clamping masks are
- * swapped relatively to the partition ids
- */
- if (id == TEGRA_POWERGATE_VDEC)
- mask = (1 << TEGRA_POWERGATE_PCIE);
- else if (id == TEGRA_POWERGATE_PCIE)
- mask = (1 << TEGRA_POWERGATE_VDEC);
- else
- mask = (1 << id);
-
- tegra_pmc_writel(mask, REMOVE_CLAMPING);
-
- return 0;
+ return __tegra_powergate_remove_clamping(id);
}
EXPORT_SYMBOL(tegra_powergate_remove_clamping);
@@ -282,38 +534,23 @@ EXPORT_SYMBOL(tegra_powergate_remove_clamping);
*
* Must be called with clk disabled, and returns with clk enabled.
*/
-int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
struct reset_control *rst)
{
- int ret;
-
- reset_control_assert(rst);
-
- ret = tegra_powergate_power_on(id);
- if (ret)
- goto err_power;
-
- ret = clk_prepare_enable(clk);
- if (ret)
- goto err_clk;
-
- usleep_range(10, 20);
-
- ret = tegra_powergate_remove_clamping(id);
- if (ret)
- goto err_clamp;
+ struct tegra_powergate pg;
+ int err;
- usleep_range(10, 20);
- reset_control_deassert(rst);
+ pg.id = id;
+ pg.clks = &clk;
+ pg.num_clks = 1;
+ pg.resets = &rst;
+ pg.num_resets = 1;
- return 0;
+ err = tegra_powergate_power_up(&pg, false);
+ if (err)
+ pr_err("failed to turn on partition %d: %d\n", id, err);
-err_clamp:
- clk_disable_unprepare(clk);
-err_clk:
- tegra_powergate_power_off(id);
-err_power:
- return ret;
+ return err;
}
EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
@@ -325,9 +562,9 @@ EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
* Returns the partition ID corresponding to the CPU partition ID or a
* negative error code on failure.
*/
-static int tegra_get_cpu_powergate_id(int cpuid)
+static int tegra_get_cpu_powergate_id(unsigned int cpuid)
{
- if (pmc->soc && cpuid > 0 && cpuid < pmc->soc->num_cpu_powergates)
+ if (pmc->soc && cpuid < pmc->soc->num_cpu_powergates)
return pmc->soc->cpu_powergates[cpuid];
return -EINVAL;
@@ -337,7 +574,7 @@ static int tegra_get_cpu_powergate_id(int cpuid)
* tegra_pmc_cpu_is_powered() - check if CPU partition is powered
* @cpuid: CPU partition ID
*/
-bool tegra_pmc_cpu_is_powered(int cpuid)
+bool tegra_pmc_cpu_is_powered(unsigned int cpuid)
{
int id;
@@ -352,7 +589,7 @@ bool tegra_pmc_cpu_is_powered(int cpuid)
* tegra_pmc_cpu_power_on() - power on CPU partition
* @cpuid: CPU partition ID
*/
-int tegra_pmc_cpu_power_on(int cpuid)
+int tegra_pmc_cpu_power_on(unsigned int cpuid)
{
int id;
@@ -367,7 +604,7 @@ int tegra_pmc_cpu_power_on(int cpuid)
* tegra_pmc_cpu_remove_clamping() - remove power clamps for CPU partition
* @cpuid: CPU partition ID
*/
-int tegra_pmc_cpu_remove_clamping(int cpuid)
+int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
{
int id;
@@ -416,16 +653,18 @@ static struct notifier_block tegra_pmc_restart_handler = {
static int powergate_show(struct seq_file *s, void *data)
{
unsigned int i;
+ int status;
seq_printf(s, " powergate powered\n");
seq_printf(s, "------------------\n");
for (i = 0; i < pmc->soc->num_powergates; i++) {
- if (!pmc->soc->powergates[i])
+ status = tegra_powergate_is_powered(i);
+ if (status < 0)
continue;
seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
- tegra_powergate_is_powered(i) ? "yes" : "no");
+ status ? "yes" : "no");
}
return 0;
@@ -445,17 +684,164 @@ static const struct file_operations powergate_fops = {
static int tegra_powergate_debugfs_init(void)
{
- struct dentry *d;
+ pmc->debugfs = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
+ &powergate_fops);
+ if (!pmc->debugfs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int tegra_powergate_of_get_clks(struct tegra_powergate *pg,
+ struct device_node *np)
+{
+ struct clk *clk;
+ unsigned int i, count;
+ int err;
+
+ count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+ if (count == 0)
+ return -ENODEV;
+
+ pg->clks = kcalloc(count, sizeof(clk), GFP_KERNEL);
+ if (!pg->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ pg->clks[i] = of_clk_get(np, i);
+ if (IS_ERR(pg->clks[i])) {
+ err = PTR_ERR(pg->clks[i]);
+ goto err;
+ }
+ }
+
+ pg->num_clks = count;
+
+ return 0;
+
+err:
+ while (i--)
+ clk_put(pg->clks[i]);
+ kfree(pg->clks);
+
+ return err;
+}
+
+static int tegra_powergate_of_get_resets(struct tegra_powergate *pg,
+ struct device_node *np)
+{
+ struct reset_control *rst;
+ unsigned int i, count;
+ int err;
+
+ count = of_count_phandle_with_args(np, "resets", "#reset-cells");
+ if (count == 0)
+ return -ENODEV;
- d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
- &powergate_fops);
- if (!d)
+ pg->resets = kcalloc(count, sizeof(rst), GFP_KERNEL);
+ if (!pg->resets)
return -ENOMEM;
+ for (i = 0; i < count; i++) {
+ pg->resets[i] = of_reset_control_get_by_index(np, i);
+ if (IS_ERR(pg->resets[i])) {
+ err = PTR_ERR(pg->resets[i]);
+ goto error;
+ }
+ }
+
+ pg->num_resets = count;
+
return 0;
+
+error:
+ while (i--)
+ reset_control_put(pg->resets[i]);
+ kfree(pg->resets);
+
+ return err;
+}
+
+static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
+{
+ struct tegra_powergate *pg;
+ bool off;
+ int id;
+
+ pg = kzalloc(sizeof(*pg), GFP_KERNEL);
+ if (!pg)
+ goto error;
+
+ id = tegra_powergate_lookup(pmc, np->name);
+ if (id < 0)
+ goto free_mem;
+
+ /*
+ * Clear the bit for this powergate so it cannot be managed
+ * directly via the legacy APIs for controlling powergates.
+ */
+ clear_bit(id, pmc->powergates_available);
+
+ pg->id = id;
+ pg->genpd.name = np->name;
+ pg->genpd.power_off = tegra_genpd_power_off;
+ pg->genpd.power_on = tegra_genpd_power_on;
+ pg->pmc = pmc;
+
+ if (tegra_powergate_of_get_clks(pg, np))
+ goto set_available;
+
+ if (tegra_powergate_of_get_resets(pg, np))
+ goto remove_clks;
+
+ off = !tegra_powergate_is_powered(pg->id);
+
+ pm_genpd_init(&pg->genpd, NULL, off);
+
+ if (of_genpd_add_provider_simple(np, &pg->genpd))
+ goto remove_resets;
+
+ dev_dbg(pmc->dev, "added power domain %s\n", pg->genpd.name);
+
+ return;
+
+remove_resets:
+ while (pg->num_resets--)
+ reset_control_put(pg->resets[pg->num_resets]);
+ kfree(pg->resets);
+
+remove_clks:
+ while (pg->num_clks--)
+ clk_put(pg->clks[pg->num_clks]);
+ kfree(pg->clks);
+
+set_available:
+ set_bit(id, pmc->powergates_available);
+
+free_mem:
+ kfree(pg);
+
+error:
+ dev_err(pmc->dev, "failed to create power domain for %s\n", np->name);
+}
+
+static void tegra_powergate_init(struct tegra_pmc *pmc)
+{
+ struct device_node *np, *child;
+
+ np = of_get_child_by_name(pmc->dev->of_node, "powergates");
+ if (!np)
+ return;
+
+ for_each_child_of_node(np, child) {
+ tegra_powergate_add(pmc, child);
+ of_node_put(child);
+ }
+
+ of_node_put(np);
}
-static int tegra_io_rail_prepare(int id, unsigned long *request,
+static int tegra_io_rail_prepare(unsigned int id, unsigned long *request,
unsigned long *status, unsigned int *bit)
{
unsigned long rate, value;
@@ -512,15 +898,17 @@ static void tegra_io_rail_unprepare(void)
tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
}
-int tegra_io_rail_power_on(int id)
+int tegra_io_rail_power_on(unsigned int id)
{
unsigned long request, status, value;
unsigned int bit, mask;
int err;
+ mutex_lock(&pmc->powergates_lock);
+
err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err < 0)
- return err;
+ if (err)
+ goto error;
mask = 1 << bit;
@@ -531,27 +919,32 @@ int tegra_io_rail_power_on(int id)
tegra_pmc_writel(value, request);
err = tegra_io_rail_poll(status, mask, 0, 250);
- if (err < 0) {
+ if (err) {
pr_info("tegra_io_rail_poll() failed: %d\n", err);
- return err;
+ goto error;
}
tegra_io_rail_unprepare();
- return 0;
+error:
+ mutex_unlock(&pmc->powergates_lock);
+
+ return err;
}
EXPORT_SYMBOL(tegra_io_rail_power_on);
-int tegra_io_rail_power_off(int id)
+int tegra_io_rail_power_off(unsigned int id)
{
unsigned long request, status, value;
unsigned int bit, mask;
int err;
+ mutex_lock(&pmc->powergates_lock);
+
err = tegra_io_rail_prepare(id, &request, &status, &bit);
- if (err < 0) {
+ if (err) {
pr_info("tegra_io_rail_prepare() failed: %d\n", err);
- return err;
+ goto error;
}
mask = 1 << bit;
@@ -563,12 +956,15 @@ int tegra_io_rail_power_off(int id)
tegra_pmc_writel(value, request);
err = tegra_io_rail_poll(status, mask, mask, 250);
- if (err < 0)
- return err;
+ if (err)
+ goto error;
tegra_io_rail_unprepare();
- return 0;
+error:
+ mutex_unlock(&pmc->powergates_lock);
+
+ return err;
}
EXPORT_SYMBOL(tegra_io_rail_power_off);
@@ -727,7 +1123,7 @@ static void tegra_pmc_init(struct tegra_pmc *pmc)
tegra_pmc_writel(value, PMC_CNTRL);
}
-void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
+static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
{
static const char disabled[] = "emergency thermal reset disabled";
u32 pmu_addr, ctrl_id, reg_addr, reg_data, pinmux;
@@ -805,7 +1201,7 @@ out:
static int tegra_pmc_probe(struct platform_device *pdev)
{
- void __iomem *base = pmc->base;
+ void __iomem *base;
struct resource *res;
int err;
@@ -815,11 +1211,9 @@ static int tegra_pmc_probe(struct platform_device *pdev)
/* take over the memory region from the early initialization */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmc->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(pmc->base))
- return PTR_ERR(pmc->base);
-
- iounmap(base);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
pmc->clk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(pmc->clk)) {
@@ -842,11 +1236,19 @@ static int tegra_pmc_probe(struct platform_device *pdev)
err = register_restart_handler(&tegra_pmc_restart_handler);
if (err) {
+ debugfs_remove(pmc->debugfs);
dev_err(&pdev->dev, "unable to register restart handler, %d\n",
err);
return err;
}
+ tegra_powergate_init(pmc);
+
+ mutex_lock(&pmc->powergates_lock);
+ iounmap(pmc->base);
+ pmc->base = base;
+ mutex_unlock(&pmc->powergates_lock);
+
return 0;
}
@@ -964,7 +1366,6 @@ static const char * const tegra124_powergates[] = {
[TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_PCIE] = "pcie",
[TEGRA_POWERGATE_VDEC] = "vdec",
- [TEGRA_POWERGATE_L2] = "l2",
[TEGRA_POWERGATE_MPE] = "mpe",
[TEGRA_POWERGATE_HEG] = "heg",
[TEGRA_POWERGATE_SATA] = "sata",
@@ -1006,17 +1407,13 @@ static const char * const tegra210_powergates[] = {
[TEGRA_POWERGATE_3D] = "3d",
[TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_PCIE] = "pcie",
- [TEGRA_POWERGATE_L2] = "l2",
[TEGRA_POWERGATE_MPE] = "mpe",
- [TEGRA_POWERGATE_HEG] = "heg",
[TEGRA_POWERGATE_SATA] = "sata",
[TEGRA_POWERGATE_CPU1] = "cpu1",
[TEGRA_POWERGATE_CPU2] = "cpu2",
[TEGRA_POWERGATE_CPU3] = "cpu3",
- [TEGRA_POWERGATE_CELP] = "celp",
[TEGRA_POWERGATE_CPU0] = "cpu0",
[TEGRA_POWERGATE_C0NC] = "c0nc",
- [TEGRA_POWERGATE_C1NC] = "c1nc",
[TEGRA_POWERGATE_SOR] = "sor",
[TEGRA_POWERGATE_DIS] = "dis",
[TEGRA_POWERGATE_DISB] = "disb",
@@ -1080,6 +1477,7 @@ static int __init tegra_pmc_early_init(void)
const struct of_device_id *match;
struct device_node *np;
struct resource regs;
+ unsigned int i;
bool invert;
u32 value;
@@ -1129,6 +1527,11 @@ static int __init tegra_pmc_early_init(void)
return -ENXIO;
}
+ /* Create a bit-map of the available and valid partitions */
+ for (i = 0; i < pmc->soc->num_powergates; i++)
+ if (pmc->soc->powergates[i])
+ set_bit(i, pmc->powergates_available);
+
mutex_init(&pmc->powergates_lock);
/*
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
index c337764de..282e37137 100644
--- a/drivers/soc/versatile/soc-realview.c
+++ b/drivers/soc/versatile/soc-realview.c
@@ -31,18 +31,6 @@ static const struct of_device_id realview_soc_of_match[] = {
static u32 realview_coreid;
-static const char *realview_board_str(u32 id)
-{
- switch ((id >> 16) & 0xfff) {
- case 0x0147:
- return "HBI-0147";
- case 0x0159:
- return "HBI-0159";
- default:
- return "Unknown";
- }
-}
-
static const char *realview_arch_str(u32 id)
{
switch ((id >> 8) & 0xf) {
@@ -69,7 +57,7 @@ static ssize_t realview_get_board(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%s\n", realview_board_str(realview_coreid));
+ return sprintf(buf, "HBI-%03x\n", ((realview_coreid >> 16) & 0xfff));
}
static struct device_attribute realview_board_attr =
@@ -133,8 +121,9 @@ static int realview_soc_probe(struct platform_device *pdev)
device_create_file(soc_device_to_device(soc_dev), &realview_arch_attr);
device_create_file(soc_device_to_device(soc_dev), &realview_build_attr);
- dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x\n",
- realview_coreid);
+ dev_info(&pdev->dev, "RealView Syscon Core ID: 0x%08x, HBI-%03x\n",
+ realview_coreid,
+ ((realview_coreid >> 16) & 0xfff));
/* FIXME: add attributes for SoC to sysfs */
return 0;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 9d8c84bb1..4b931ec8d 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -410,7 +410,6 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
depends on HAS_DMA
- depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
SPI master controller for OMAP24XX and later Multichannel SPI
@@ -432,10 +431,23 @@ config SPI_OMAP_100K
config SPI_ORION
tristate "Orion SPI master"
- depends on PLAT_ORION || COMPILE_TEST
+ depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
help
This enables using the SPI master controller on the Orion chips.
+config SPI_PIC32
+ tristate "Microchip PIC32 series SPI"
+ depends on MACH_PIC32 || COMPILE_TEST
+ help
+ SPI driver for Microchip PIC32 SPI master controller.
+
+config SPI_PIC32_SQI
+ tristate "Microchip PIC32 Quad SPI driver"
+ depends on MACH_PIC32 || COMPILE_TEST
+ depends on HAS_DMA
+ help
+ SPI driver for PIC32 Quad SPI controller.
+
config SPI_PL022
tristate "ARM AMBA PL022 SSP controller"
depends on ARM_AMBA
@@ -469,7 +481,6 @@ config SPI_PXA2XX_PCI
config SPI_ROCKCHIP
tristate "Rockchip SPI controller driver"
- depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
help
This selects a driver for Rockchip SPI controller.
@@ -569,7 +580,7 @@ config SPI_SIRF
config SPI_ST_SSC4
tristate "STMicroelectronics SPI SSC-based driver"
- depends on ARCH_STI
+ depends on ARCH_STI || COMPILE_TEST
help
STMicroelectronics SoCs support for SPI. If you say yes to
this option, support will be included for the SSC driven SPI.
@@ -656,7 +667,7 @@ config SPI_XILINX
config SPI_XLP
tristate "Netlogic XLP SPI controller driver"
- depends on CPU_XLP || COMPILE_TEST
+ depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
help
Enable support for the SPI controller on the Netlogic XLP SoCs.
Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX, XLP9XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index fbb255c5a..3c74d0035 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -62,6 +62,8 @@ obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
+obj-$(CONFIG_SPI_PIC32) += spi-pic32.o
+obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index c968ab210..2b1456e5e 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -525,7 +525,6 @@ static int spi_engine_probe(struct platform_device *pdev)
if (ret)
goto err_ref_clk_disable;
- master->dev.parent = &pdev->dev;
master->dev.of_node = pdev->dev.of_node;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
master->bits_per_word_mask = SPI_BPW_MASK(8);
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
index cc3f938f0..afb51699d 100644
--- a/drivers/spi/spi-bcm53xx.c
+++ b/drivers/spi/spi-bcm53xx.c
@@ -10,6 +10,7 @@
#include "spi-bcm53xx.h"
#define BCM53XXSPI_MAX_SPI_BAUD 13500000 /* 216 MHz? */
+#define BCM53XXSPI_FLASH_WINDOW SZ_32M
/* The longest observed required wait was 19 ms */
#define BCM53XXSPI_SPE_TIMEOUT_MS 80
@@ -17,8 +18,10 @@
struct bcm53xxspi {
struct bcma_device *core;
struct spi_master *master;
+ void __iomem *mmio_base;
size_t read_offset;
+ bool bspi; /* Boot SPI mode with memory mapping */
};
static inline u32 bcm53xxspi_read(struct bcm53xxspi *b53spi, u16 offset)
@@ -32,6 +35,50 @@ static inline void bcm53xxspi_write(struct bcm53xxspi *b53spi, u16 offset,
bcma_write32(b53spi->core, offset, value);
}
+static void bcm53xxspi_disable_bspi(struct bcm53xxspi *b53spi)
+{
+ struct device *dev = &b53spi->core->dev;
+ unsigned long deadline;
+ u32 tmp;
+
+ if (!b53spi->bspi)
+ return;
+
+ tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL);
+ if (tmp & 0x1)
+ return;
+
+ deadline = jiffies + usecs_to_jiffies(200);
+ do {
+ tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_BUSY_STATUS);
+ if (!(tmp & 0x1)) {
+ bcm53xxspi_write(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL,
+ 0x1);
+ ndelay(200);
+ b53spi->bspi = false;
+ return;
+ }
+ udelay(1);
+ } while (!time_after_eq(jiffies, deadline));
+
+ dev_warn(dev, "Timeout disabling BSPI\n");
+}
+
+static void bcm53xxspi_enable_bspi(struct bcm53xxspi *b53spi)
+{
+ u32 tmp;
+
+ if (b53spi->bspi)
+ return;
+
+ tmp = bcm53xxspi_read(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL);
+ if (!(tmp & 0x1))
+ return;
+
+ bcm53xxspi_write(b53spi, B53SPI_BSPI_MAST_N_BOOT_CTRL, 0x0);
+ b53spi->bspi = true;
+}
+
static inline unsigned int bcm53xxspi_calc_timeout(size_t len)
{
/* Do some magic calculation based on length and buad. Add 10% and 1. */
@@ -176,6 +223,8 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
u8 *buf;
size_t left;
+ bcm53xxspi_disable_bspi(b53spi);
+
if (t->tx_buf) {
buf = (u8 *)t->tx_buf;
left = t->len;
@@ -206,6 +255,22 @@ static int bcm53xxspi_transfer_one(struct spi_master *master,
return 0;
}
+static int bcm53xxspi_flash_read(struct spi_device *spi,
+ struct spi_flash_read_message *msg)
+{
+ struct bcm53xxspi *b53spi = spi_master_get_devdata(spi->master);
+ int ret = 0;
+
+ if (msg->from + msg->len > BCM53XXSPI_FLASH_WINDOW)
+ return -EINVAL;
+
+ bcm53xxspi_enable_bspi(b53spi);
+ memcpy_fromio(msg->buf, b53spi->mmio_base + msg->from, msg->len);
+ msg->retlen = msg->len;
+
+ return ret;
+}
+
/**************************************************
* BCMA
**************************************************/
@@ -222,6 +287,7 @@ MODULE_DEVICE_TABLE(bcma, bcm53xxspi_bcma_tbl);
static int bcm53xxspi_bcma_probe(struct bcma_device *core)
{
+ struct device *dev = &core->dev;
struct bcm53xxspi *b53spi;
struct spi_master *master;
int err;
@@ -231,7 +297,7 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core)
return -ENOTSUPP;
}
- master = spi_alloc_master(&core->dev, sizeof(*b53spi));
+ master = spi_alloc_master(dev, sizeof(*b53spi));
if (!master)
return -ENOMEM;
@@ -239,11 +305,19 @@ static int bcm53xxspi_bcma_probe(struct bcma_device *core)
b53spi->master = master;
b53spi->core = core;
+ if (core->addr_s[0])
+ b53spi->mmio_base = devm_ioremap(dev, core->addr_s[0],
+ BCM53XXSPI_FLASH_WINDOW);
+ b53spi->bspi = true;
+ bcm53xxspi_disable_bspi(b53spi);
+
master->transfer_one = bcm53xxspi_transfer_one;
+ if (b53spi->mmio_base)
+ master->spi_flash_read = bcm53xxspi_flash_read;
bcma_set_drvdata(core, b53spi);
- err = devm_spi_register_master(&core->dev, master);
+ err = devm_spi_register_master(dev, master);
if (err) {
spi_master_put(master);
bcma_set_drvdata(core, NULL);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 121a4135b..1c57ce64a 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -19,44 +19,46 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
/* Name of this driver */
#define CDNS_SPI_NAME "cdns-spi"
/* Register offset definitions */
-#define CDNS_SPI_CR_OFFSET 0x00 /* Configuration Register, RW */
-#define CDNS_SPI_ISR_OFFSET 0x04 /* Interrupt Status Register, RO */
-#define CDNS_SPI_IER_OFFSET 0x08 /* Interrupt Enable Register, WO */
-#define CDNS_SPI_IDR_OFFSET 0x0c /* Interrupt Disable Register, WO */
-#define CDNS_SPI_IMR_OFFSET 0x10 /* Interrupt Enabled Mask Register, RO */
-#define CDNS_SPI_ER_OFFSET 0x14 /* Enable/Disable Register, RW */
-#define CDNS_SPI_DR_OFFSET 0x18 /* Delay Register, RW */
-#define CDNS_SPI_TXD_OFFSET 0x1C /* Data Transmit Register, WO */
-#define CDNS_SPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
-#define CDNS_SPI_SICR_OFFSET 0x24 /* Slave Idle Count Register, RW */
-#define CDNS_SPI_THLD_OFFSET 0x28 /* Transmit FIFO Watermark Register,RW */
-
+#define CDNS_SPI_CR 0x00 /* Configuration Register, RW */
+#define CDNS_SPI_ISR 0x04 /* Interrupt Status Register, RO */
+#define CDNS_SPI_IER 0x08 /* Interrupt Enable Register, WO */
+#define CDNS_SPI_IDR 0x0c /* Interrupt Disable Register, WO */
+#define CDNS_SPI_IMR 0x10 /* Interrupt Enabled Mask Register, RO */
+#define CDNS_SPI_ER 0x14 /* Enable/Disable Register, RW */
+#define CDNS_SPI_DR 0x18 /* Delay Register, RW */
+#define CDNS_SPI_TXD 0x1C /* Data Transmit Register, WO */
+#define CDNS_SPI_RXD 0x20 /* Data Receive Register, RO */
+#define CDNS_SPI_SICR 0x24 /* Slave Idle Count Register, RW */
+#define CDNS_SPI_THLD 0x28 /* Transmit FIFO Watermark Register,RW */
+
+#define SPI_AUTOSUSPEND_TIMEOUT 3000
/*
* SPI Configuration Register bit Masks
*
* This register contains various control bits that affect the operation
* of the SPI controller
*/
-#define CDNS_SPI_CR_MANSTRT_MASK 0x00010000 /* Manual TX Start */
-#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */
-#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */
-#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */
-#define CDNS_SPI_CR_PERI_SEL_MASK 0x00000200 /* Peripheral Select Decode */
-#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */
-#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */
-#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */
-#define CDNS_SPI_CR_SSFORCE_MASK 0x00004000 /* Manual SS Enable Mask */
-#define CDNS_SPI_CR_BAUD_DIV_4_MASK 0x00000008 /* Default Baud Div Mask */
-#define CDNS_SPI_CR_DEFAULT_MASK (CDNS_SPI_CR_MSTREN_MASK | \
- CDNS_SPI_CR_SSCTRL_MASK | \
- CDNS_SPI_CR_SSFORCE_MASK | \
- CDNS_SPI_CR_BAUD_DIV_4_MASK)
+#define CDNS_SPI_CR_MANSTRT 0x00010000 /* Manual TX Start */
+#define CDNS_SPI_CR_CPHA 0x00000004 /* Clock Phase Control */
+#define CDNS_SPI_CR_CPOL 0x00000002 /* Clock Polarity Control */
+#define CDNS_SPI_CR_SSCTRL 0x00003C00 /* Slave Select Mask */
+#define CDNS_SPI_CR_PERI_SEL 0x00000200 /* Peripheral Select Decode */
+#define CDNS_SPI_CR_BAUD_DIV 0x00000038 /* Baud Rate Divisor Mask */
+#define CDNS_SPI_CR_MSTREN 0x00000001 /* Master Enable Mask */
+#define CDNS_SPI_CR_MANSTRTEN 0x00008000 /* Manual TX Enable Mask */
+#define CDNS_SPI_CR_SSFORCE 0x00004000 /* Manual SS Enable Mask */
+#define CDNS_SPI_CR_BAUD_DIV_4 0x00000008 /* Default Baud Div Mask */
+#define CDNS_SPI_CR_DEFAULT (CDNS_SPI_CR_MSTREN | \
+ CDNS_SPI_CR_SSCTRL | \
+ CDNS_SPI_CR_SSFORCE | \
+ CDNS_SPI_CR_BAUD_DIV_4)
/*
* SPI Configuration Register - Baud rate and slave select
@@ -77,21 +79,21 @@
* All the four interrupt registers (Status/Mask/Enable/Disable) have the same
* bit definitions.
*/
-#define CDNS_SPI_IXR_TXOW_MASK 0x00000004 /* SPI TX FIFO Overwater */
-#define CDNS_SPI_IXR_MODF_MASK 0x00000002 /* SPI Mode Fault */
-#define CDNS_SPI_IXR_RXNEMTY_MASK 0x00000010 /* SPI RX FIFO Not Empty */
-#define CDNS_SPI_IXR_DEFAULT_MASK (CDNS_SPI_IXR_TXOW_MASK | \
- CDNS_SPI_IXR_MODF_MASK)
-#define CDNS_SPI_IXR_TXFULL_MASK 0x00000008 /* SPI TX Full */
-#define CDNS_SPI_IXR_ALL_MASK 0x0000007F /* SPI all interrupts */
+#define CDNS_SPI_IXR_TXOW 0x00000004 /* SPI TX FIFO Overwater */
+#define CDNS_SPI_IXR_MODF 0x00000002 /* SPI Mode Fault */
+#define CDNS_SPI_IXR_RXNEMTY 0x00000010 /* SPI RX FIFO Not Empty */
+#define CDNS_SPI_IXR_DEFAULT (CDNS_SPI_IXR_TXOW | \
+ CDNS_SPI_IXR_MODF)
+#define CDNS_SPI_IXR_TXFULL 0x00000008 /* SPI TX Full */
+#define CDNS_SPI_IXR_ALL 0x0000007F /* SPI all interrupts */
/*
* SPI Enable Register bit Masks
*
* This register is used to enable or disable the SPI controller
*/
-#define CDNS_SPI_ER_ENABLE_MASK 0x00000001 /* SPI Enable Bit Mask */
-#define CDNS_SPI_ER_DISABLE_MASK 0x0 /* SPI Disable Bit Mask */
+#define CDNS_SPI_ER_ENABLE 0x00000001 /* SPI Enable Bit Mask */
+#define CDNS_SPI_ER_DISABLE 0x0 /* SPI Disable Bit Mask */
/* SPI FIFO depth in bytes */
#define CDNS_SPI_FIFO_DEPTH 128
@@ -149,56 +151,51 @@ static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
*/
static void cdns_spi_init_hw(struct cdns_spi *xspi)
{
- u32 ctrl_reg = CDNS_SPI_CR_DEFAULT_MASK;
+ u32 ctrl_reg = CDNS_SPI_CR_DEFAULT;
if (xspi->is_decoded_cs)
- ctrl_reg |= CDNS_SPI_CR_PERI_SEL_MASK;
+ ctrl_reg |= CDNS_SPI_CR_PERI_SEL;
- cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
- CDNS_SPI_ER_DISABLE_MASK);
- cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
- CDNS_SPI_IXR_ALL_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+ cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_ALL);
/* Clear the RX FIFO */
- while (cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET) &
- CDNS_SPI_IXR_RXNEMTY_MASK)
- cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
-
- cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
- CDNS_SPI_IXR_ALL_MASK);
- cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
- cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
- CDNS_SPI_ER_ENABLE_MASK);
+ while (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_RXNEMTY)
+ cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+ cdns_spi_write(xspi, CDNS_SPI_ISR, CDNS_SPI_IXR_ALL);
+ cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
}
/**
* cdns_spi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
- * @is_on: Select(0) or deselect (1) the chip select line
+ * @is_high: Select(0) or deselect (1) the chip select line
*/
static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
{
struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
u32 ctrl_reg;
- ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
if (is_high) {
/* Deselect the slave */
- ctrl_reg |= CDNS_SPI_CR_SSCTRL_MASK;
+ ctrl_reg |= CDNS_SPI_CR_SSCTRL;
} else {
/* Select the slave */
- ctrl_reg &= ~CDNS_SPI_CR_SSCTRL_MASK;
+ ctrl_reg &= ~CDNS_SPI_CR_SSCTRL;
if (!(xspi->is_decoded_cs))
ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) <<
CDNS_SPI_SS_SHIFT) &
- CDNS_SPI_CR_SSCTRL_MASK;
+ CDNS_SPI_CR_SSCTRL;
else
ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) &
- CDNS_SPI_CR_SSCTRL_MASK;
+ CDNS_SPI_CR_SSCTRL;
}
- cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+ cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
}
/**
@@ -212,14 +209,15 @@ static void cdns_spi_config_clock_mode(struct spi_device *spi)
struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
u32 ctrl_reg, new_ctrl_reg;
- new_ctrl_reg = ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+ new_ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
+ ctrl_reg = new_ctrl_reg;
/* Set the SPI clock phase and clock polarity */
- new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA_MASK | CDNS_SPI_CR_CPOL_MASK);
+ new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA | CDNS_SPI_CR_CPOL);
if (spi->mode & SPI_CPHA)
- new_ctrl_reg |= CDNS_SPI_CR_CPHA_MASK;
+ new_ctrl_reg |= CDNS_SPI_CR_CPHA;
if (spi->mode & SPI_CPOL)
- new_ctrl_reg |= CDNS_SPI_CR_CPOL_MASK;
+ new_ctrl_reg |= CDNS_SPI_CR_CPOL;
if (new_ctrl_reg != ctrl_reg) {
/*
@@ -228,11 +226,9 @@ static void cdns_spi_config_clock_mode(struct spi_device *spi)
* polarity as it will cause the SPI slave to see spurious clock
* transitions. To workaround the issue toggle the ER register.
*/
- cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
- CDNS_SPI_ER_DISABLE_MASK);
- cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, new_ctrl_reg);
- cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
- CDNS_SPI_ER_ENABLE_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
+ cdns_spi_write(xspi, CDNS_SPI_CR, new_ctrl_reg);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
}
}
@@ -251,7 +247,7 @@ static void cdns_spi_config_clock_mode(struct spi_device *spi)
* controller.
*/
static void cdns_spi_config_clock_freq(struct spi_device *spi,
- struct spi_transfer *transfer)
+ struct spi_transfer *transfer)
{
struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
u32 ctrl_reg, baud_rate_val;
@@ -259,7 +255,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
frequency = clk_get_rate(xspi->ref_clk);
- ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
/* Set the clock frequency */
if (xspi->speed_hz != transfer->speed_hz) {
@@ -269,12 +265,12 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
(frequency / (2 << baud_rate_val)) > transfer->speed_hz)
baud_rate_val++;
- ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV_MASK;
+ ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV;
ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
xspi->speed_hz = frequency / (2 << baud_rate_val);
}
- cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+ cdns_spi_write(xspi, CDNS_SPI_CR, ctrl_reg);
}
/**
@@ -313,10 +309,9 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
(xspi->tx_bytes > 0)) {
if (xspi->txbuf)
- cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET,
- *xspi->txbuf++);
+ cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
else
- cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, 0);
+ cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
xspi->tx_bytes--;
trans_cnt++;
@@ -344,19 +339,18 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
u32 intr_status, status;
status = IRQ_NONE;
- intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET);
- cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status);
+ intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR);
+ cdns_spi_write(xspi, CDNS_SPI_ISR, intr_status);
- if (intr_status & CDNS_SPI_IXR_MODF_MASK) {
+ if (intr_status & CDNS_SPI_IXR_MODF) {
/* Indicate that transfer is completed, the SPI subsystem will
* identify the error as the remaining bytes to be
* transferred is non-zero
*/
- cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
- CDNS_SPI_IXR_DEFAULT_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_IDR, CDNS_SPI_IXR_DEFAULT);
spi_finalize_current_transfer(master);
status = IRQ_HANDLED;
- } else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) {
+ } else if (intr_status & CDNS_SPI_IXR_TXOW) {
unsigned long trans_cnt;
trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
@@ -365,7 +359,7 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
while (trans_cnt) {
u8 data;
- data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
+ data = cdns_spi_read(xspi, CDNS_SPI_RXD);
if (xspi->rxbuf)
*xspi->rxbuf++ = data;
@@ -378,8 +372,8 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
cdns_spi_fill_tx_fifo(xspi);
} else {
/* Transfer is completed */
- cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
- CDNS_SPI_IXR_DEFAULT_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_IDR,
+ CDNS_SPI_IXR_DEFAULT);
spi_finalize_current_transfer(master);
}
status = IRQ_HANDLED;
@@ -387,6 +381,7 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
return status;
}
+
static int cdns_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -421,8 +416,7 @@ static int cdns_transfer_one(struct spi_master *master,
cdns_spi_fill_tx_fifo(xspi);
- cdns_spi_write(xspi, CDNS_SPI_IER_OFFSET,
- CDNS_SPI_IXR_DEFAULT_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
return transfer->len;
}
@@ -439,8 +433,7 @@ static int cdns_prepare_transfer_hardware(struct spi_master *master)
{
struct cdns_spi *xspi = spi_master_get_devdata(master);
- cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
- CDNS_SPI_ER_ENABLE_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_ENABLE);
return 0;
}
@@ -458,8 +451,7 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master)
{
struct cdns_spi *xspi = spi_master_get_devdata(master);
- cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
- CDNS_SPI_ER_DISABLE_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
return 0;
}
@@ -481,7 +473,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
u32 num_cs;
master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
- if (master == NULL)
+ if (!master)
return -ENOMEM;
xspi = spi_master_get_devdata(master);
@@ -521,6 +513,11 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_apb;
}
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_set_active(&pdev->dev);
+
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@@ -535,11 +532,14 @@ static int cdns_spi_probe(struct platform_device *pdev)
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "irq number is invalid\n");
- goto remove_master;
+ goto clk_dis_all;
}
ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
@@ -547,7 +547,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
if (ret != 0) {
ret = -ENXIO;
dev_err(&pdev->dev, "request_irq failed\n");
- goto remove_master;
+ goto clk_dis_all;
}
master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
@@ -555,6 +555,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
master->transfer_one = cdns_transfer_one;
master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
master->set_cs = cdns_spi_chipselect;
+ master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA;
/* Set to default valid value */
@@ -572,6 +573,8 @@ static int cdns_spi_probe(struct platform_device *pdev)
return ret;
clk_dis_all:
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(xspi->ref_clk);
clk_dis_apb:
clk_disable_unprepare(xspi->pclk);
@@ -595,11 +598,12 @@ static int cdns_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct cdns_spi *xspi = spi_master_get_devdata(master);
- cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
- CDNS_SPI_ER_DISABLE_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE);
clk_disable_unprepare(xspi->ref_clk);
clk_disable_unprepare(xspi->pclk);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
spi_unregister_master(master);
@@ -613,21 +617,14 @@ static int cdns_spi_remove(struct platform_device *pdev)
* This function disables the SPI controller and
* changes the driver state to "suspend"
*
- * Return: Always 0
+ * Return: 0 on success and error value on error
*/
static int __maybe_unused cdns_spi_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spi_master *master = platform_get_drvdata(pdev);
- struct cdns_spi *xspi = spi_master_get_devdata(master);
-
- spi_master_suspend(master);
-
- clk_disable_unprepare(xspi->ref_clk);
-
- clk_disable_unprepare(xspi->pclk);
- return 0;
+ return spi_master_suspend(master);
}
/**
@@ -642,8 +639,23 @@ static int __maybe_unused cdns_spi_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct spi_master *master = platform_get_drvdata(pdev);
+
+ return spi_master_resume(master);
+}
+
+/**
+ * cdns_spi_runtime_resume - Runtime resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function enables the clocks
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused cnds_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
struct cdns_spi *xspi = spi_master_get_devdata(master);
- int ret = 0;
+ int ret;
ret = clk_prepare_enable(xspi->pclk);
if (ret) {
@@ -657,13 +669,33 @@ static int __maybe_unused cdns_spi_resume(struct device *dev)
clk_disable(xspi->pclk);
return ret;
}
- spi_master_resume(master);
+ return 0;
+}
+
+/**
+ * cdns_spi_runtime_suspend - Runtime suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the clocks
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused cnds_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(xspi->ref_clk);
+ clk_disable_unprepare(xspi->pclk);
return 0;
}
-static SIMPLE_DEV_PM_OPS(cdns_spi_dev_pm_ops, cdns_spi_suspend,
- cdns_spi_resume);
+static const struct dev_pm_ops cdns_spi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(cnds_runtime_suspend,
+ cnds_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(cdns_spi_suspend, cdns_spi_resume)
+};
static const struct of_device_id cdns_spi_of_match[] = {
{ .compatible = "xlnx,zynq-spi-r1p6" },
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index fddb7a3be..d36c11b73 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -23,7 +23,6 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
-#include <linux/edma.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
@@ -33,8 +32,6 @@
#include <linux/platform_data/spi-davinci.h>
-#define SPI_NO_RESOURCE ((resource_size_t)-1)
-
#define CS_DEFAULT 0xFF
#define SPIFMT_PHASE_MASK BIT(16)
@@ -130,8 +127,6 @@ struct davinci_spi {
struct dma_chan *dma_rx;
struct dma_chan *dma_tx;
- int dma_rx_chnum;
- int dma_tx_chnum;
struct davinci_spi_platform_data pdata;
@@ -797,35 +792,19 @@ static irqreturn_t davinci_spi_irq(s32 irq, void *data)
static int davinci_spi_request_dma(struct davinci_spi *dspi)
{
- dma_cap_mask_t mask;
struct device *sdev = dspi->bitbang.master->dev.parent;
- int r;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
- &dspi->dma_rx_chnum);
- if (!dspi->dma_rx) {
- dev_err(sdev, "request RX DMA channel failed\n");
- r = -ENODEV;
- goto rx_dma_failed;
- }
+ dspi->dma_rx = dma_request_chan(sdev, "rx");
+ if (IS_ERR(dspi->dma_rx))
+ return PTR_ERR(dspi->dma_rx);
- dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
- &dspi->dma_tx_chnum);
- if (!dspi->dma_tx) {
- dev_err(sdev, "request TX DMA channel failed\n");
- r = -ENODEV;
- goto tx_dma_failed;
+ dspi->dma_tx = dma_request_chan(sdev, "tx");
+ if (IS_ERR(dspi->dma_tx)) {
+ dma_release_channel(dspi->dma_rx);
+ return PTR_ERR(dspi->dma_tx);
}
return 0;
-
-tx_dma_failed:
- dma_release_channel(dspi->dma_rx);
-rx_dma_failed:
- return r;
}
#if defined(CONFIG_OF)
@@ -936,8 +915,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
struct resource *r;
- resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
- resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
int ret = 0;
u32 spipc0;
@@ -1044,27 +1021,15 @@ static int davinci_spi_probe(struct platform_device *pdev)
}
}
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r)
- dma_rx_chan = r->start;
- r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (r)
- dma_tx_chan = r->start;
-
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
- if (dma_rx_chan != SPI_NO_RESOURCE &&
- dma_tx_chan != SPI_NO_RESOURCE) {
- dspi->dma_rx_chnum = dma_rx_chan;
- dspi->dma_tx_chnum = dma_tx_chan;
-
- ret = davinci_spi_request_dma(dspi);
- if (ret)
- goto free_clk;
-
- dev_info(&pdev->dev, "DMA: supported\n");
- dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, event queue: %d\n",
- &dma_rx_chan, &dma_tx_chan,
- pdata->dma_event_q);
+
+ ret = davinci_spi_request_dma(dspi);
+ if (ret == -EPROBE_DEFER) {
+ goto free_clk;
+ } else if (ret) {
+ dev_info(&pdev->dev, "DMA is not supported (%d)\n", ret);
+ dspi->dma_rx = NULL;
+ dspi->dma_tx = NULL;
}
dspi->get_rx = davinci_spi_rx_buf_u8;
@@ -1102,8 +1067,10 @@ static int davinci_spi_probe(struct platform_device *pdev)
return ret;
free_dma:
- dma_release_channel(dspi->dma_rx);
- dma_release_channel(dspi->dma_tx);
+ if (dspi->dma_rx) {
+ dma_release_channel(dspi->dma_rx);
+ dma_release_channel(dspi->dma_tx);
+ }
free_clk:
clk_disable_unprepare(dspi->clk);
free_master:
@@ -1134,6 +1101,11 @@ static int davinci_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(dspi->clk);
spi_master_put(master);
+ if (dspi->dma_rx) {
+ dma_release_channel(dspi->dma_rx);
+ dma_release_channel(dspi->dma_tx);
+ }
+
return 0;
}
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
index 3b7d91d94..b62a99caa 100644
--- a/drivers/spi/spi-dln2.c
+++ b/drivers/spi/spi-dln2.c
@@ -683,6 +683,7 @@ static int dln2_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct dln2_spi *dln2;
struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
int ret;
master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
@@ -700,6 +701,7 @@ static int dln2_spi_probe(struct platform_device *pdev)
}
dln2->master = master;
+ dln2->master->dev.of_node = dev->of_node;
dln2->pdev = pdev;
dln2->port = pdata->port;
/* cs/mode can never be 0xff, so the first transfer will set them */
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index 332ccb053..ef7db75c9 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -67,7 +67,7 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dws->irq = pdev->irq;
/*
- * Specific handling for paltforms, like dma setup,
+ * Specific handling for platforms, like dma setup,
* clock rate, FIFO depth.
*/
if (desc) {
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index bb00be8d1..17a6387e2 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -567,7 +567,7 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
if (IS_ERR(txd)) {
ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
- dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+ dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
msg->status = PTR_ERR(txd);
return;
}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index c1a2d747b..9e9dadb52 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -121,18 +121,22 @@ enum dspi_trans_mode {
struct fsl_dspi_devtype_data {
enum dspi_trans_mode trans_mode;
+ u8 max_clock_factor;
};
static const struct fsl_dspi_devtype_data vf610_data = {
.trans_mode = DSPI_EOQ_MODE,
+ .max_clock_factor = 2,
};
static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
.trans_mode = DSPI_TCFQ_MODE,
+ .max_clock_factor = 8,
};
static const struct fsl_dspi_devtype_data ls2085a_data = {
.trans_mode = DSPI_TCFQ_MODE,
+ .max_clock_factor = 8,
};
struct fsl_dspi {
@@ -726,6 +730,9 @@ static int dspi_probe(struct platform_device *pdev)
}
clk_prepare_enable(dspi->clk);
+ master->max_speed_hz =
+ clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
+
init_waitqueue_head(&dspi->waitq);
platform_set_drvdata(pdev, master);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7cb0c1921..8d85a3c34 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -245,7 +245,12 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
if (ret)
return ret;
- wait_for_completion(&mpc8xxx_spi->done);
+ /* Won't hang up forever, SPI bus sometimes got lost interrupts... */
+ ret = wait_for_completion_timeout(&mpc8xxx_spi->done, 2 * HZ);
+ if (ret == 0)
+ dev_err(mpc8xxx_spi->dev,
+ "Transaction hanging up (left %d bytes)\n",
+ mpc8xxx_spi->count);
/* disable rx ints */
mpc8xxx_spi_write_reg(&reg_base->mask, 0);
@@ -539,16 +544,31 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
if (events & SPIE_NE) {
u32 rx_data, tmp;
u8 rx_data_8;
+ int rx_nr_bytes = 4;
+ int ret;
/* Spin until RX is done */
- while (SPIE_RXCNT(events) < min(4, mspi->len)) {
- cpu_relax();
- events = mpc8xxx_spi_read_reg(&reg_base->event);
+ if (SPIE_RXCNT(events) < min(4, mspi->len)) {
+ ret = spin_event_timeout(
+ !(SPIE_RXCNT(events =
+ mpc8xxx_spi_read_reg(&reg_base->event)) <
+ min(4, mspi->len)),
+ 10000, 0); /* 10 msec */
+ if (!ret)
+ dev_err(mspi->dev,
+ "tired waiting for SPIE_RXCNT\n");
}
if (mspi->len >= 4) {
rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
+ } else if (mspi->len <= 0) {
+ dev_err(mspi->dev,
+ "unexpected RX(SPIE_NE) interrupt occurred,\n"
+ "(local rxlen %d bytes, reg rxlen %d bytes)\n",
+ min(4, mspi->len), SPIE_RXCNT(events));
+ rx_nr_bytes = 0;
} else {
+ rx_nr_bytes = mspi->len;
tmp = mspi->len;
rx_data = 0;
while (tmp--) {
@@ -559,7 +579,7 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
rx_data <<= (4 - mspi->len) * 8;
}
- mspi->len -= 4;
+ mspi->len -= rx_nr_bytes;
if (mspi->rx)
mspi->get_rx(rx_data, mspi);
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index 07e4ce827..3b1700939 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -175,6 +175,7 @@ err:
static int octeon_spi_probe(struct platform_device *pdev)
{
struct resource *res_mem;
+ void __iomem *reg_base;
struct spi_master *master;
struct octeon_spi *p;
int err = -ENOENT;
@@ -186,19 +187,13 @@ static int octeon_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (res_mem == NULL) {
- dev_err(&pdev->dev, "found no memory resource\n");
- err = -ENXIO;
- goto fail;
- }
- if (!devm_request_mem_region(&pdev->dev, res_mem->start,
- resource_size(res_mem), res_mem->name)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
+ reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(reg_base)) {
+ err = PTR_ERR(reg_base);
goto fail;
}
- p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
- resource_size(res_mem));
+
+ p->register_base = (u64)reg_base;
master->num_chipselect = 4;
master->mode_bits = SPI_CPHA |
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0caa3c8be..1d237e93a 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -103,9 +102,6 @@ struct omap2_mcspi_dma {
struct dma_chan *dma_tx;
struct dma_chan *dma_rx;
- int dma_tx_sync_dev;
- int dma_rx_sync_dev;
-
struct completion dma_tx_completion;
struct completion dma_rx_completion;
@@ -964,8 +960,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
struct spi_master *master = spi->master;
struct omap2_mcspi *mcspi;
struct omap2_mcspi_dma *mcspi_dma;
- dma_cap_mask_t mask;
- unsigned sig;
+ int ret = 0;
mcspi = spi_master_get_devdata(master);
mcspi_dma = mcspi->dma_channels + spi->chip_select;
@@ -973,34 +968,25 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
init_completion(&mcspi_dma->dma_rx_completion);
init_completion(&mcspi_dma->dma_tx_completion);
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- sig = mcspi_dma->dma_rx_sync_dev;
-
- mcspi_dma->dma_rx =
- dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
- &sig, &master->dev,
- mcspi_dma->dma_rx_ch_name);
- if (!mcspi_dma->dma_rx)
+ mcspi_dma->dma_rx = dma_request_chan(&master->dev,
+ mcspi_dma->dma_rx_ch_name);
+ if (IS_ERR(mcspi_dma->dma_rx)) {
+ ret = PTR_ERR(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
goto no_dma;
+ }
- sig = mcspi_dma->dma_tx_sync_dev;
- mcspi_dma->dma_tx =
- dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
- &sig, &master->dev,
- mcspi_dma->dma_tx_ch_name);
-
- if (!mcspi_dma->dma_tx) {
+ mcspi_dma->dma_tx = dma_request_chan(&master->dev,
+ mcspi_dma->dma_tx_ch_name);
+ if (IS_ERR(mcspi_dma->dma_tx)) {
+ ret = PTR_ERR(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
dma_release_channel(mcspi_dma->dma_rx);
mcspi_dma->dma_rx = NULL;
- goto no_dma;
}
- return 0;
-
no_dma:
- dev_warn(&spi->dev, "not using DMA for McSPI\n");
- return -EAGAIN;
+ return ret;
}
static int omap2_mcspi_setup(struct spi_device *spi)
@@ -1039,8 +1025,9 @@ static int omap2_mcspi_setup(struct spi_device *spi)
if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
ret = omap2_mcspi_request_dma(spi);
- if (ret < 0 && ret != -EAGAIN)
- return ret;
+ if (ret)
+ dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n",
+ ret);
}
ret = pm_runtime_get_sync(mcspi->dev);
@@ -1434,42 +1421,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
}
for (i = 0; i < master->num_chipselect; i++) {
- char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
- char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
- struct resource *dma_res;
-
- sprintf(dma_rx_ch_name, "rx%d", i);
- if (!pdev->dev.of_node) {
- dma_res =
- platform_get_resource_byname(pdev,
- IORESOURCE_DMA,
- dma_rx_ch_name);
- if (!dma_res) {
- dev_dbg(&pdev->dev,
- "cannot get DMA RX channel\n");
- status = -ENODEV;
- break;
- }
-
- mcspi->dma_channels[i].dma_rx_sync_dev =
- dma_res->start;
- }
- sprintf(dma_tx_ch_name, "tx%d", i);
- if (!pdev->dev.of_node) {
- dma_res =
- platform_get_resource_byname(pdev,
- IORESOURCE_DMA,
- dma_tx_ch_name);
- if (!dma_res) {
- dev_dbg(&pdev->dev,
- "cannot get DMA TX channel\n");
- status = -ENODEV;
- break;
- }
-
- mcspi->dma_channels[i].dma_tx_sync_dev =
- dma_res->start;
- }
+ sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
+ sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
}
if (status < 0)
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c
new file mode 100644
index 000000000..ca3c8d94b
--- /dev/null
+++ b/drivers/spi/spi-pic32-sqi.c
@@ -0,0 +1,727 @@
+/*
+ * PIC32 Quad SPI controller driver.
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+/* SQI registers */
+#define PESQI_XIP_CONF1_REG 0x00
+#define PESQI_XIP_CONF2_REG 0x04
+#define PESQI_CONF_REG 0x08
+#define PESQI_CTRL_REG 0x0C
+#define PESQI_CLK_CTRL_REG 0x10
+#define PESQI_CMD_THRES_REG 0x14
+#define PESQI_INT_THRES_REG 0x18
+#define PESQI_INT_ENABLE_REG 0x1C
+#define PESQI_INT_STAT_REG 0x20
+#define PESQI_TX_DATA_REG 0x24
+#define PESQI_RX_DATA_REG 0x28
+#define PESQI_STAT1_REG 0x2C
+#define PESQI_STAT2_REG 0x30
+#define PESQI_BD_CTRL_REG 0x34
+#define PESQI_BD_CUR_ADDR_REG 0x38
+#define PESQI_BD_BASE_ADDR_REG 0x40
+#define PESQI_BD_STAT_REG 0x44
+#define PESQI_BD_POLL_CTRL_REG 0x48
+#define PESQI_BD_TX_DMA_STAT_REG 0x4C
+#define PESQI_BD_RX_DMA_STAT_REG 0x50
+#define PESQI_THRES_REG 0x54
+#define PESQI_INT_SIGEN_REG 0x58
+
+/* PESQI_CONF_REG fields */
+#define PESQI_MODE 0x7
+#define PESQI_MODE_BOOT 0
+#define PESQI_MODE_PIO 1
+#define PESQI_MODE_DMA 2
+#define PESQI_MODE_XIP 3
+#define PESQI_MODE_SHIFT 0
+#define PESQI_CPHA BIT(3)
+#define PESQI_CPOL BIT(4)
+#define PESQI_LSBF BIT(5)
+#define PESQI_RXLATCH BIT(7)
+#define PESQI_SERMODE BIT(8)
+#define PESQI_WP_EN BIT(9)
+#define PESQI_HOLD_EN BIT(10)
+#define PESQI_BURST_EN BIT(12)
+#define PESQI_CS_CTRL_HW BIT(15)
+#define PESQI_SOFT_RESET BIT(16)
+#define PESQI_LANES_SHIFT 20
+#define PESQI_SINGLE_LANE 0
+#define PESQI_DUAL_LANE 1
+#define PESQI_QUAD_LANE 2
+#define PESQI_CSEN_SHIFT 24
+#define PESQI_EN BIT(23)
+
+/* PESQI_CLK_CTRL_REG fields */
+#define PESQI_CLK_EN BIT(0)
+#define PESQI_CLK_STABLE BIT(1)
+#define PESQI_CLKDIV_SHIFT 8
+#define PESQI_CLKDIV 0xff
+
+/* PESQI_INT_THR/CMD_THR_REG */
+#define PESQI_TXTHR_MASK 0x1f
+#define PESQI_TXTHR_SHIFT 8
+#define PESQI_RXTHR_MASK 0x1f
+#define PESQI_RXTHR_SHIFT 0
+
+/* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
+#define PESQI_TXEMPTY BIT(0)
+#define PESQI_TXFULL BIT(1)
+#define PESQI_TXTHR BIT(2)
+#define PESQI_RXEMPTY BIT(3)
+#define PESQI_RXFULL BIT(4)
+#define PESQI_RXTHR BIT(5)
+#define PESQI_BDDONE BIT(9) /* BD processing complete */
+#define PESQI_PKTCOMP BIT(10) /* packet processing complete */
+#define PESQI_DMAERR BIT(11) /* error */
+
+/* PESQI_BD_CTRL_REG */
+#define PESQI_DMA_EN BIT(0) /* enable DMA engine */
+#define PESQI_POLL_EN BIT(1) /* enable polling */
+#define PESQI_BDP_START BIT(2) /* start BD processor */
+
+/* PESQI controller buffer descriptor */
+struct buf_desc {
+ u32 bd_ctrl; /* control */
+ u32 bd_status; /* reserved */
+ u32 bd_addr; /* DMA buffer addr */
+ u32 bd_nextp; /* next item in chain */
+};
+
+/* bd_ctrl */
+#define BD_BUFLEN 0x1ff
+#define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
+#define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */
+#define BD_LIFM BIT(18) /* last data of pkt */
+#define BD_LAST BIT(19) /* end of list */
+#define BD_DATA_RECV BIT(20) /* receive data */
+#define BD_DDR BIT(21) /* DDR mode */
+#define BD_DUAL BIT(22) /* Dual SPI */
+#define BD_QUAD BIT(23) /* Quad SPI */
+#define BD_LSBF BIT(25) /* LSB First */
+#define BD_STAT_CHECK BIT(27) /* Status poll */
+#define BD_DEVSEL_SHIFT 28 /* CS */
+#define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
+#define BD_EN BIT(31) /* BD owned by H/W */
+
+/**
+ * struct ring_desc - Representation of SQI ring descriptor
+ * @list: list element to add to free or used list.
+ * @bd: PESQI controller buffer descriptor
+ * @bd_dma: DMA address of PESQI controller buffer descriptor
+ * @xfer_len: transfer length
+ */
+struct ring_desc {
+ struct list_head list;
+ struct buf_desc *bd;
+ dma_addr_t bd_dma;
+ u32 xfer_len;
+};
+
+/* Global constants */
+#define PESQI_BD_BUF_LEN_MAX 256
+#define PESQI_BD_COUNT 256 /* max 64KB data per spi message */
+
+struct pic32_sqi {
+ void __iomem *regs;
+ struct clk *sys_clk;
+ struct clk *base_clk; /* drives spi clock */
+ struct spi_master *master;
+ int irq;
+ struct completion xfer_done;
+ struct ring_desc *ring;
+ void *bd;
+ dma_addr_t bd_dma;
+ struct list_head bd_list_free; /* free */
+ struct list_head bd_list_used; /* allocated */
+ struct spi_device *cur_spi;
+ u32 cur_speed;
+ u8 cur_mode;
+};
+
+static inline void pic32_setbits(void __iomem *reg, u32 set)
+{
+ writel(readl(reg) | set, reg);
+}
+
+static inline void pic32_clrbits(void __iomem *reg, u32 clr)
+{
+ writel(readl(reg) & ~clr, reg);
+}
+
+static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
+{
+ u32 val, div;
+
+ /* div = base_clk / (2 * spi_clk) */
+ div = clk_get_rate(sqi->base_clk) / (2 * sck);
+ div &= PESQI_CLKDIV;
+
+ val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
+ /* apply new divider */
+ val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
+ val |= div << PESQI_CLKDIV_SHIFT;
+ writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
+
+ /* wait for stability */
+ return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
+ val & PESQI_CLK_STABLE, 1, 5000);
+}
+
+static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
+{
+ u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
+
+ writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
+ /* INT_SIGEN works as interrupt-gate to INTR line */
+ writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
+{
+ writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
+ writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
+}
+
+static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
+{
+ struct pic32_sqi *sqi = dev_id;
+ u32 enable, status;
+
+ enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
+ status = readl(sqi->regs + PESQI_INT_STAT_REG);
+
+ /* check spurious interrupt */
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & PESQI_DMAERR) {
+ enable = 0;
+ goto irq_done;
+ }
+
+ if (status & PESQI_TXTHR)
+ enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
+
+ if (status & PESQI_RXTHR)
+ enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
+
+ if (status & PESQI_BDDONE)
+ enable &= ~PESQI_BDDONE;
+
+ /* packet processing completed */
+ if (status & PESQI_PKTCOMP) {
+ /* mask all interrupts */
+ enable = 0;
+ /* complete trasaction */
+ complete(&sqi->xfer_done);
+ }
+
+irq_done:
+ /* interrupts are sticky, so mask when handled */
+ writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
+
+ return IRQ_HANDLED;
+}
+
+static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
+{
+ struct ring_desc *rdesc;
+
+ if (list_empty(&sqi->bd_list_free))
+ return NULL;
+
+ rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
+ list_del(&rdesc->list);
+ list_add_tail(&rdesc->list, &sqi->bd_list_used);
+ return rdesc;
+}
+
+static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
+{
+ list_del(&rdesc->list);
+ list_add(&rdesc->list, &sqi->bd_list_free);
+}
+
+static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
+ struct spi_message *mesg,
+ struct spi_transfer *xfer)
+{
+ struct spi_device *spi = mesg->spi;
+ struct scatterlist *sg, *sgl;
+ struct ring_desc *rdesc;
+ struct buf_desc *bd;
+ int nents, i;
+ u32 bd_ctrl;
+ u32 nbits;
+
+ /* Device selection */
+ bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
+
+ /* half-duplex: select transfer buffer, direction and lane */
+ if (xfer->rx_buf) {
+ bd_ctrl |= BD_DATA_RECV;
+ nbits = xfer->rx_nbits;
+ sgl = xfer->rx_sg.sgl;
+ nents = xfer->rx_sg.nents;
+ } else {
+ nbits = xfer->tx_nbits;
+ sgl = xfer->tx_sg.sgl;
+ nents = xfer->tx_sg.nents;
+ }
+
+ if (nbits & SPI_NBITS_QUAD)
+ bd_ctrl |= BD_QUAD;
+ else if (nbits & SPI_NBITS_DUAL)
+ bd_ctrl |= BD_DUAL;
+
+ /* LSB first */
+ if (spi->mode & SPI_LSB_FIRST)
+ bd_ctrl |= BD_LSBF;
+
+ /* ownership to hardware */
+ bd_ctrl |= BD_EN;
+
+ for_each_sg(sgl, sg, nents, i) {
+ /* get ring descriptor */
+ rdesc = ring_desc_get(sqi);
+ if (!rdesc)
+ break;
+
+ bd = rdesc->bd;
+
+ /* BD CTRL: length */
+ rdesc->xfer_len = sg_dma_len(sg);
+ bd->bd_ctrl = bd_ctrl;
+ bd->bd_ctrl |= rdesc->xfer_len;
+
+ /* BD STAT */
+ bd->bd_status = 0;
+
+ /* BD BUFFER ADDRESS */
+ bd->bd_addr = sg->dma_address;
+ }
+
+ return 0;
+}
+
+static int pic32_sqi_prepare_hardware(struct spi_master *master)
+{
+ struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+ /* enable spi interface */
+ pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+ /* enable spi clk */
+ pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+
+ return 0;
+}
+
+static bool pic32_sqi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *x)
+{
+ /* Do DMA irrespective of transfer size */
+ return true;
+}
+
+static int pic32_sqi_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_device *spi = msg->spi;
+ struct ring_desc *rdesc, *next;
+ struct spi_transfer *xfer;
+ struct pic32_sqi *sqi;
+ int ret = 0, mode;
+ u32 val;
+
+ sqi = spi_master_get_devdata(master);
+
+ reinit_completion(&sqi->xfer_done);
+ msg->actual_length = 0;
+
+ /* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
+ * and "delay_usecs". But spi_device specific speed and mode change
+ * can be handled at best during spi chip-select switch.
+ */
+ if (sqi->cur_spi != spi) {
+ /* set spi speed */
+ if (sqi->cur_speed != spi->max_speed_hz) {
+ sqi->cur_speed = spi->max_speed_hz;
+ ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
+ if (ret)
+ dev_warn(&spi->dev, "set_clk, %d\n", ret);
+ }
+
+ /* set spi mode */
+ mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
+ if (sqi->cur_mode != mode) {
+ val = readl(sqi->regs + PESQI_CONF_REG);
+ val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
+ if (mode & SPI_CPOL)
+ val |= PESQI_CPOL;
+ if (mode & SPI_LSB_FIRST)
+ val |= PESQI_LSBF;
+ val |= PESQI_CPHA;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ sqi->cur_mode = mode;
+ }
+ sqi->cur_spi = spi;
+ }
+
+ /* prepare hardware desc-list(BD) for transfer(s) */
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ ret = pic32_sqi_one_transfer(sqi, msg, xfer);
+ if (ret) {
+ dev_err(&spi->dev, "xfer %p err\n", xfer);
+ goto xfer_out;
+ }
+ }
+
+ /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
+ * element of the list.
+ */
+ rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
+ rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
+ BD_LIFM | BD_PKT_INT_EN;
+
+ /* set base address BD list for DMA engine */
+ rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
+ writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
+
+ /* enable interrupt */
+ pic32_sqi_enable_int(sqi);
+
+ /* enable DMA engine */
+ val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
+ writel(val, sqi->regs + PESQI_BD_CTRL_REG);
+
+ /* wait for xfer completion */
+ ret = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
+ if (ret <= 0) {
+ dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
+ ret = -EIO;
+ msg->status = ret;
+ } else {
+ /* success */
+ msg->status = 0;
+ ret = 0;
+ }
+
+ /* disable DMA */
+ writel(0, sqi->regs + PESQI_BD_CTRL_REG);
+
+ pic32_sqi_disable_int(sqi);
+
+xfer_out:
+ list_for_each_entry_safe_reverse(rdesc, next,
+ &sqi->bd_list_used, list) {
+ /* Update total byte transferred */
+ msg->actual_length += rdesc->xfer_len;
+ /* release ring descr */
+ ring_desc_put(sqi, rdesc);
+ }
+ spi_finalize_current_message(spi->master);
+
+ return ret;
+}
+
+static int pic32_sqi_unprepare_hardware(struct spi_master *master)
+{
+ struct pic32_sqi *sqi = spi_master_get_devdata(master);
+
+ /* disable clk */
+ pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
+ /* disable spi */
+ pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
+
+ return 0;
+}
+
+static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
+{
+ struct ring_desc *rdesc;
+ struct buf_desc *bd;
+ int i;
+
+ /* allocate coherent DMAable memory for hardware buffer descriptors. */
+ sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ &sqi->bd_dma, GFP_DMA32);
+ if (!sqi->bd) {
+ dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
+ return -ENOMEM;
+ }
+
+ /* allocate software ring descriptors */
+ sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
+ if (!sqi->ring) {
+ dma_free_coherent(&sqi->master->dev,
+ sizeof(*bd) * PESQI_BD_COUNT,
+ sqi->bd, sqi->bd_dma);
+ return -ENOMEM;
+ }
+
+ bd = (struct buf_desc *)sqi->bd;
+
+ INIT_LIST_HEAD(&sqi->bd_list_free);
+ INIT_LIST_HEAD(&sqi->bd_list_used);
+
+ /* initialize ring-desc */
+ for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
+ INIT_LIST_HEAD(&rdesc->list);
+ rdesc->bd = &bd[i];
+ rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
+ list_add_tail(&rdesc->list, &sqi->bd_list_free);
+ }
+
+ /* Prepare BD: chain to next BD(s) */
+ for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
+ bd[i].bd_nextp = rdesc[i + 1].bd_dma;
+ bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
+
+ return 0;
+}
+
+static void ring_desc_ring_free(struct pic32_sqi *sqi)
+{
+ dma_free_coherent(&sqi->master->dev,
+ sizeof(struct buf_desc) * PESQI_BD_COUNT,
+ sqi->bd, sqi->bd_dma);
+ kfree(sqi->ring);
+}
+
+static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
+{
+ unsigned long flags;
+ u32 val;
+
+ /* Soft-reset of PESQI controller triggers interrupt.
+ * We are not yet ready to handle them so disable CPU
+ * interrupt for the time being.
+ */
+ local_irq_save(flags);
+
+ /* assert soft-reset */
+ writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
+
+ /* wait until clear */
+ readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
+ !(val & PESQI_SOFT_RESET), 1, 5000);
+
+ /* disable all interrupts */
+ pic32_sqi_disable_int(sqi);
+
+ /* Now it is safe to enable back CPU interrupt */
+ local_irq_restore(flags);
+
+ /* tx and rx fifo interrupt threshold */
+ val = readl(sqi->regs + PESQI_CMD_THRES_REG);
+ val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+ val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+ val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+ writel(val, sqi->regs + PESQI_CMD_THRES_REG);
+
+ val = readl(sqi->regs + PESQI_INT_THRES_REG);
+ val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
+ val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
+ val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
+ writel(val, sqi->regs + PESQI_INT_THRES_REG);
+
+ /* default configuration */
+ val = readl(sqi->regs + PESQI_CONF_REG);
+
+ /* set mode: DMA */
+ val &= ~PESQI_MODE;
+ val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ /* DATAEN - SQIID0-ID3 */
+ val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
+
+ /* burst/INCR4 enable */
+ val |= PESQI_BURST_EN;
+
+ /* CSEN - all CS */
+ val |= 3U << PESQI_CSEN_SHIFT;
+ writel(val, sqi->regs + PESQI_CONF_REG);
+
+ /* write poll count */
+ writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
+
+ sqi->cur_speed = 0;
+ sqi->cur_mode = -1;
+}
+
+static int pic32_sqi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct pic32_sqi *sqi;
+ struct resource *reg;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
+ if (!master)
+ return -ENOMEM;
+
+ sqi = spi_master_get_devdata(master);
+ sqi->master = master;
+
+ reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sqi->regs = devm_ioremap_resource(&pdev->dev, reg);
+ if (IS_ERR(sqi->regs)) {
+ ret = PTR_ERR(sqi->regs);
+ goto err_free_master;
+ }
+
+ /* irq */
+ sqi->irq = platform_get_irq(pdev, 0);
+ if (sqi->irq < 0) {
+ dev_err(&pdev->dev, "no irq found\n");
+ ret = sqi->irq;
+ goto err_free_master;
+ }
+
+ /* clocks */
+ sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
+ if (IS_ERR(sqi->sys_clk)) {
+ ret = PTR_ERR(sqi->sys_clk);
+ dev_err(&pdev->dev, "no sys_clk ?\n");
+ goto err_free_master;
+ }
+
+ sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
+ if (IS_ERR(sqi->base_clk)) {
+ ret = PTR_ERR(sqi->base_clk);
+ dev_err(&pdev->dev, "no base clk ?\n");
+ goto err_free_master;
+ }
+
+ ret = clk_prepare_enable(sqi->sys_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "sys clk enable failed\n");
+ goto err_free_master;
+ }
+
+ ret = clk_prepare_enable(sqi->base_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "base clk enable failed\n");
+ clk_disable_unprepare(sqi->sys_clk);
+ goto err_free_master;
+ }
+
+ init_completion(&sqi->xfer_done);
+
+ /* initialize hardware */
+ pic32_sqi_hw_init(sqi);
+
+ /* allocate buffers & descriptors */
+ ret = ring_desc_ring_alloc(sqi);
+ if (ret) {
+ dev_err(&pdev->dev, "ring alloc failed\n");
+ goto err_disable_clk;
+ }
+
+ /* install irq handlers */
+ ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
+ dev_name(&pdev->dev), sqi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
+ goto err_free_ring;
+ }
+
+ /* register master */
+ master->num_chipselect = 2;
+ master->max_speed_hz = clk_get_rate(sqi->base_clk);
+ master->dma_alignment = 32;
+ master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
+ master->dev.of_node = of_node_get(pdev->dev.of_node);
+ master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
+ SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->can_dma = pic32_sqi_can_dma;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->transfer_one_message = pic32_sqi_one_message;
+ master->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
+ master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&master->dev, "failed registering spi master\n");
+ free_irq(sqi->irq, sqi);
+ goto err_free_ring;
+ }
+
+ platform_set_drvdata(pdev, sqi);
+
+ return 0;
+
+err_free_ring:
+ ring_desc_ring_free(sqi);
+
+err_disable_clk:
+ clk_disable_unprepare(sqi->base_clk);
+ clk_disable_unprepare(sqi->sys_clk);
+
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int pic32_sqi_remove(struct platform_device *pdev)
+{
+ struct pic32_sqi *sqi = platform_get_drvdata(pdev);
+
+ /* release resources */
+ free_irq(sqi->irq, sqi);
+ ring_desc_ring_free(sqi);
+
+ /* disable clk */
+ clk_disable_unprepare(sqi->base_clk);
+ clk_disable_unprepare(sqi->sys_clk);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_sqi_of_ids[] = {
+ {.compatible = "microchip,pic32mzda-sqi",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
+
+static struct platform_driver pic32_sqi_driver = {
+ .driver = {
+ .name = "sqi-pic32",
+ .of_match_table = of_match_ptr(pic32_sqi_of_ids),
+ },
+ .probe = pic32_sqi_probe,
+ .remove = pic32_sqi_remove,
+};
+
+module_platform_driver(pic32_sqi_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
+MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
new file mode 100644
index 000000000..73db87f80
--- /dev/null
+++ b/drivers/spi/spi-pic32.c
@@ -0,0 +1,878 @@
+/*
+ * Microchip PIC32 SPI controller driver.
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* SPI controller registers */
+struct pic32_spi_regs {
+ u32 ctrl;
+ u32 ctrl_clr;
+ u32 ctrl_set;
+ u32 ctrl_inv;
+ u32 status;
+ u32 status_clr;
+ u32 status_set;
+ u32 status_inv;
+ u32 buf;
+ u32 dontuse[3];
+ u32 baud;
+ u32 dontuse2[3];
+ u32 ctrl2;
+ u32 ctrl2_clr;
+ u32 ctrl2_set;
+ u32 ctrl2_inv;
+};
+
+/* Bit fields of SPI Control Register */
+#define CTRL_RX_INT_SHIFT 0 /* Rx interrupt generation */
+#define RX_FIFO_EMTPY 0
+#define RX_FIFO_NOT_EMPTY 1 /* not empty */
+#define RX_FIFO_HALF_FULL 2 /* full by half or more */
+#define RX_FIFO_FULL 3 /* completely full */
+
+#define CTRL_TX_INT_SHIFT 2 /* TX interrupt generation */
+#define TX_FIFO_ALL_EMPTY 0 /* completely empty */
+#define TX_FIFO_EMTPY 1 /* empty */
+#define TX_FIFO_HALF_EMPTY 2 /* empty by half or more */
+#define TX_FIFO_NOT_FULL 3 /* atleast one empty */
+
+#define CTRL_MSTEN BIT(5) /* enable master mode */
+#define CTRL_CKP BIT(6) /* active low */
+#define CTRL_CKE BIT(8) /* Tx on falling edge */
+#define CTRL_SMP BIT(9) /* Rx at middle or end of tx */
+#define CTRL_BPW_MASK 0x03 /* bits per word/sample */
+#define CTRL_BPW_SHIFT 10
+#define PIC32_BPW_8 0
+#define PIC32_BPW_16 1
+#define PIC32_BPW_32 2
+#define CTRL_SIDL BIT(13) /* sleep when idle */
+#define CTRL_ON BIT(15) /* enable macro */
+#define CTRL_ENHBUF BIT(16) /* enable enhanced buffering */
+#define CTRL_MCLKSEL BIT(23) /* select clock source */
+#define CTRL_MSSEN BIT(28) /* macro driven /SS */
+#define CTRL_FRMEN BIT(31) /* enable framing mode */
+
+/* Bit fields of SPI Status Register */
+#define STAT_RF_EMPTY BIT(5) /* RX Fifo empty */
+#define STAT_RX_OV BIT(6) /* err, s/w needs to clear */
+#define STAT_TX_UR BIT(8) /* UR in Framed SPI modes */
+#define STAT_FRM_ERR BIT(12) /* Multiple Frame Sync pulse */
+#define STAT_TF_LVL_MASK 0x1F
+#define STAT_TF_LVL_SHIFT 16
+#define STAT_RF_LVL_MASK 0x1F
+#define STAT_RF_LVL_SHIFT 24
+
+/* Bit fields of SPI Baud Register */
+#define BAUD_MASK 0x1ff
+
+/* Bit fields of SPI Control2 Register */
+#define CTRL2_TX_UR_EN BIT(10) /* Enable int on Tx under-run */
+#define CTRL2_RX_OV_EN BIT(11) /* Enable int on Rx over-run */
+#define CTRL2_FRM_ERR_EN BIT(12) /* Enable frame err int */
+
+/* Minimum DMA transfer size */
+#define PIC32_DMA_LEN_MIN 64
+
+struct pic32_spi {
+ dma_addr_t dma_base;
+ struct pic32_spi_regs __iomem *regs;
+ int fault_irq;
+ int rx_irq;
+ int tx_irq;
+ u32 fifo_n_byte; /* FIFO depth in bytes */
+ struct clk *clk;
+ struct spi_master *master;
+ /* Current controller setting */
+ u32 speed_hz; /* spi-clk rate */
+ u32 mode;
+ u32 bits_per_word;
+ u32 fifo_n_elm; /* FIFO depth in words */
+#define PIC32F_DMA_PREP 0 /* DMA chnls configured */
+ unsigned long flags;
+ /* Current transfer state */
+ struct completion xfer_done;
+ /* PIO transfer specific */
+ const void *tx;
+ const void *tx_end;
+ const void *rx;
+ const void *rx_end;
+ int len;
+ void (*rx_fifo)(struct pic32_spi *);
+ void (*tx_fifo)(struct pic32_spi *);
+};
+
+static inline void pic32_spi_enable(struct pic32_spi *pic32s)
+{
+ writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_set);
+}
+
+static inline void pic32_spi_disable(struct pic32_spi *pic32s)
+{
+ writel(CTRL_ON | CTRL_SIDL, &pic32s->regs->ctrl_clr);
+
+ /* avoid SPI registers read/write at immediate next CPU clock */
+ ndelay(20);
+}
+
+static void pic32_spi_set_clk_rate(struct pic32_spi *pic32s, u32 spi_ck)
+{
+ u32 div;
+
+ /* div = (clk_in / 2 * spi_ck) - 1 */
+ div = DIV_ROUND_CLOSEST(clk_get_rate(pic32s->clk), 2 * spi_ck) - 1;
+
+ writel(div & BAUD_MASK, &pic32s->regs->baud);
+}
+
+static inline u32 pic32_rx_fifo_level(struct pic32_spi *pic32s)
+{
+ u32 sr = readl(&pic32s->regs->status);
+
+ return (sr >> STAT_RF_LVL_SHIFT) & STAT_RF_LVL_MASK;
+}
+
+static inline u32 pic32_tx_fifo_level(struct pic32_spi *pic32s)
+{
+ u32 sr = readl(&pic32s->regs->status);
+
+ return (sr >> STAT_TF_LVL_SHIFT) & STAT_TF_LVL_MASK;
+}
+
+/* Return the max entries we can fill into tx fifo */
+static u32 pic32_tx_max(struct pic32_spi *pic32s, int n_bytes)
+{
+ u32 tx_left, tx_room, rxtx_gap;
+
+ tx_left = (pic32s->tx_end - pic32s->tx) / n_bytes;
+ tx_room = pic32s->fifo_n_elm - pic32_tx_fifo_level(pic32s);
+
+ /*
+ * Another concern is about the tx/rx mismatch, we
+ * though to use (pic32s->fifo_n_byte - rxfl - txfl) as
+ * one maximum value for tx, but it doesn't cover the
+ * data which is out of tx/rx fifo and inside the
+ * shift registers. So a ctrl from sw point of
+ * view is taken.
+ */
+ rxtx_gap = ((pic32s->rx_end - pic32s->rx) -
+ (pic32s->tx_end - pic32s->tx)) / n_bytes;
+ return min3(tx_left, tx_room, (u32)(pic32s->fifo_n_elm - rxtx_gap));
+}
+
+/* Return the max entries we should read out of rx fifo */
+static u32 pic32_rx_max(struct pic32_spi *pic32s, int n_bytes)
+{
+ u32 rx_left = (pic32s->rx_end - pic32s->rx) / n_bytes;
+
+ return min_t(u32, rx_left, pic32_rx_fifo_level(pic32s));
+}
+
+#define BUILD_SPI_FIFO_RW(__name, __type, __bwl) \
+static void pic32_spi_rx_##__name(struct pic32_spi *pic32s) \
+{ \
+ __type v; \
+ u32 mx = pic32_rx_max(pic32s, sizeof(__type)); \
+ for (; mx; mx--) { \
+ v = read##__bwl(&pic32s->regs->buf); \
+ if (pic32s->rx_end - pic32s->len) \
+ *(__type *)(pic32s->rx) = v; \
+ pic32s->rx += sizeof(__type); \
+ } \
+} \
+ \
+static void pic32_spi_tx_##__name(struct pic32_spi *pic32s) \
+{ \
+ __type v; \
+ u32 mx = pic32_tx_max(pic32s, sizeof(__type)); \
+ for (; mx ; mx--) { \
+ v = (__type)~0U; \
+ if (pic32s->tx_end - pic32s->len) \
+ v = *(__type *)(pic32s->tx); \
+ write##__bwl(v, &pic32s->regs->buf); \
+ pic32s->tx += sizeof(__type); \
+ } \
+}
+
+BUILD_SPI_FIFO_RW(byte, u8, b);
+BUILD_SPI_FIFO_RW(word, u16, w);
+BUILD_SPI_FIFO_RW(dword, u32, l);
+
+static void pic32_err_stop(struct pic32_spi *pic32s, const char *msg)
+{
+ /* disable all interrupts */
+ disable_irq_nosync(pic32s->fault_irq);
+ disable_irq_nosync(pic32s->rx_irq);
+ disable_irq_nosync(pic32s->tx_irq);
+
+ /* Show err message and abort xfer with err */
+ dev_err(&pic32s->master->dev, "%s\n", msg);
+ if (pic32s->master->cur_msg)
+ pic32s->master->cur_msg->status = -EIO;
+ complete(&pic32s->xfer_done);
+}
+
+static irqreturn_t pic32_spi_fault_irq(int irq, void *dev_id)
+{
+ struct pic32_spi *pic32s = dev_id;
+ u32 status;
+
+ status = readl(&pic32s->regs->status);
+
+ /* Error handling */
+ if (status & (STAT_RX_OV | STAT_TX_UR)) {
+ writel(STAT_RX_OV, &pic32s->regs->status_clr);
+ writel(STAT_TX_UR, &pic32s->regs->status_clr);
+ pic32_err_stop(pic32s, "err_irq: fifo ov/ur-run\n");
+ return IRQ_HANDLED;
+ }
+
+ if (status & STAT_FRM_ERR) {
+ pic32_err_stop(pic32s, "err_irq: frame error");
+ return IRQ_HANDLED;
+ }
+
+ if (!pic32s->master->cur_msg) {
+ pic32_err_stop(pic32s, "err_irq: no mesg");
+ return IRQ_NONE;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t pic32_spi_rx_irq(int irq, void *dev_id)
+{
+ struct pic32_spi *pic32s = dev_id;
+
+ pic32s->rx_fifo(pic32s);
+
+ /* rx complete ? */
+ if (pic32s->rx_end == pic32s->rx) {
+ /* disable all interrupts */
+ disable_irq_nosync(pic32s->fault_irq);
+ disable_irq_nosync(pic32s->rx_irq);
+
+ /* complete current xfer */
+ complete(&pic32s->xfer_done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t pic32_spi_tx_irq(int irq, void *dev_id)
+{
+ struct pic32_spi *pic32s = dev_id;
+
+ pic32s->tx_fifo(pic32s);
+
+ /* tx complete? disable tx interrupt */
+ if (pic32s->tx_end == pic32s->tx)
+ disable_irq_nosync(pic32s->tx_irq);
+
+ return IRQ_HANDLED;
+}
+
+static void pic32_spi_dma_rx_notify(void *data)
+{
+ struct pic32_spi *pic32s = data;
+
+ complete(&pic32s->xfer_done);
+}
+
+static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
+ struct spi_transfer *xfer)
+{
+ struct spi_master *master = pic32s->master;
+ struct dma_async_tx_descriptor *desc_rx;
+ struct dma_async_tx_descriptor *desc_tx;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!master->dma_rx || !master->dma_tx)
+ return -ENODEV;
+
+ desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ ret = -EINVAL;
+ goto err_dma;
+ }
+
+ /* Put callback on the RX transfer, that should finish last */
+ desc_rx->callback = pic32_spi_dma_rx_notify;
+ desc_rx->callback_param = pic32s;
+
+ cookie = dmaengine_submit(desc_rx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ goto err_dma;
+
+ cookie = dmaengine_submit(desc_tx);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ goto err_dma_tx;
+
+ dma_async_issue_pending(master->dma_rx);
+ dma_async_issue_pending(master->dma_tx);
+
+ return 0;
+
+err_dma_tx:
+ dmaengine_terminate_all(master->dma_rx);
+err_dma:
+ return ret;
+}
+
+static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
+{
+ int buf_offset = offsetof(struct pic32_spi_regs, buf);
+ struct spi_master *master = pic32s->master;
+ struct dma_slave_config cfg;
+ int ret;
+
+ cfg.device_fc = true;
+ cfg.src_addr = pic32s->dma_base + buf_offset;
+ cfg.dst_addr = pic32s->dma_base + buf_offset;
+ cfg.src_maxburst = pic32s->fifo_n_elm / 2; /* fill one-half */
+ cfg.dst_maxburst = pic32s->fifo_n_elm / 2; /* drain one-half */
+ cfg.src_addr_width = dma_width;
+ cfg.dst_addr_width = dma_width;
+ /* tx channel */
+ cfg.slave_id = pic32s->tx_irq;
+ cfg.direction = DMA_MEM_TO_DEV;
+ ret = dmaengine_slave_config(master->dma_tx, &cfg);
+ if (ret) {
+ dev_err(&master->dev, "tx channel setup failed\n");
+ return ret;
+ }
+ /* rx channel */
+ cfg.slave_id = pic32s->rx_irq;
+ cfg.direction = DMA_DEV_TO_MEM;
+ ret = dmaengine_slave_config(master->dma_rx, &cfg);
+ if (ret)
+ dev_err(&master->dev, "rx channel setup failed\n");
+
+ return ret;
+}
+
+static int pic32_spi_set_word_size(struct pic32_spi *pic32s, u8 bits_per_word)
+{
+ enum dma_slave_buswidth dmawidth;
+ u32 buswidth, v;
+
+ switch (bits_per_word) {
+ case 8:
+ pic32s->rx_fifo = pic32_spi_rx_byte;
+ pic32s->tx_fifo = pic32_spi_tx_byte;
+ buswidth = PIC32_BPW_8;
+ dmawidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 16:
+ pic32s->rx_fifo = pic32_spi_rx_word;
+ pic32s->tx_fifo = pic32_spi_tx_word;
+ buswidth = PIC32_BPW_16;
+ dmawidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case 32:
+ pic32s->rx_fifo = pic32_spi_rx_dword;
+ pic32s->tx_fifo = pic32_spi_tx_dword;
+ buswidth = PIC32_BPW_32;
+ dmawidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ default:
+ /* not supported */
+ return -EINVAL;
+ }
+
+ /* calculate maximum number of words fifos can hold */
+ pic32s->fifo_n_elm = DIV_ROUND_UP(pic32s->fifo_n_byte,
+ bits_per_word / 8);
+ /* set word size */
+ v = readl(&pic32s->regs->ctrl);
+ v &= ~(CTRL_BPW_MASK << CTRL_BPW_SHIFT);
+ v |= buswidth << CTRL_BPW_SHIFT;
+ writel(v, &pic32s->regs->ctrl);
+
+ /* re-configure dma width, if required */
+ if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+ pic32_spi_dma_config(pic32s, dmawidth);
+
+ return 0;
+}
+
+static int pic32_spi_prepare_hardware(struct spi_master *master)
+{
+ struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+ pic32_spi_enable(pic32s);
+
+ return 0;
+}
+
+static int pic32_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct pic32_spi *pic32s = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ u32 val;
+
+ /* set device specific bits_per_word */
+ if (pic32s->bits_per_word != spi->bits_per_word) {
+ pic32_spi_set_word_size(pic32s, spi->bits_per_word);
+ pic32s->bits_per_word = spi->bits_per_word;
+ }
+
+ /* device specific speed change */
+ if (pic32s->speed_hz != spi->max_speed_hz) {
+ pic32_spi_set_clk_rate(pic32s, spi->max_speed_hz);
+ pic32s->speed_hz = spi->max_speed_hz;
+ }
+
+ /* device specific mode change */
+ if (pic32s->mode != spi->mode) {
+ val = readl(&pic32s->regs->ctrl);
+ /* active low */
+ if (spi->mode & SPI_CPOL)
+ val |= CTRL_CKP;
+ else
+ val &= ~CTRL_CKP;
+ /* tx on rising edge */
+ if (spi->mode & SPI_CPHA)
+ val &= ~CTRL_CKE;
+ else
+ val |= CTRL_CKE;
+
+ /* rx at end of tx */
+ val |= CTRL_SMP;
+ writel(val, &pic32s->regs->ctrl);
+ pic32s->mode = spi->mode;
+ }
+
+ return 0;
+}
+
+static bool pic32_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+ /* skip using DMA on small size transfer to avoid overhead.*/
+ return (xfer->len >= PIC32_DMA_LEN_MIN) &&
+ test_bit(PIC32F_DMA_PREP, &pic32s->flags);
+}
+
+static int pic32_spi_one_transfer(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct pic32_spi *pic32s;
+ bool dma_issued = false;
+ int ret;
+
+ pic32s = spi_master_get_devdata(master);
+
+ /* handle transfer specific word size change */
+ if (transfer->bits_per_word &&
+ (transfer->bits_per_word != pic32s->bits_per_word)) {
+ ret = pic32_spi_set_word_size(pic32s, transfer->bits_per_word);
+ if (ret)
+ return ret;
+ pic32s->bits_per_word = transfer->bits_per_word;
+ }
+
+ /* handle transfer specific speed change */
+ if (transfer->speed_hz && (transfer->speed_hz != pic32s->speed_hz)) {
+ pic32_spi_set_clk_rate(pic32s, transfer->speed_hz);
+ pic32s->speed_hz = transfer->speed_hz;
+ }
+
+ reinit_completion(&pic32s->xfer_done);
+
+ /* transact by DMA mode */
+ if (transfer->rx_sg.nents && transfer->tx_sg.nents) {
+ ret = pic32_spi_dma_transfer(pic32s, transfer);
+ if (ret) {
+ dev_err(&spi->dev, "dma submit error\n");
+ return ret;
+ }
+
+ /* DMA issued */
+ dma_issued = true;
+ } else {
+ /* set current transfer information */
+ pic32s->tx = (const void *)transfer->tx_buf;
+ pic32s->rx = (const void *)transfer->rx_buf;
+ pic32s->tx_end = pic32s->tx + transfer->len;
+ pic32s->rx_end = pic32s->rx + transfer->len;
+ pic32s->len = transfer->len;
+
+ /* transact by interrupt driven PIO */
+ enable_irq(pic32s->fault_irq);
+ enable_irq(pic32s->rx_irq);
+ enable_irq(pic32s->tx_irq);
+ }
+
+ /* wait for completion */
+ ret = wait_for_completion_timeout(&pic32s->xfer_done, 2 * HZ);
+ if (ret <= 0) {
+ dev_err(&spi->dev, "wait error/timedout\n");
+ if (dma_issued) {
+ dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_all(master->dma_rx);
+ }
+ ret = -ETIMEDOUT;
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int pic32_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ /* nothing to do */
+ return 0;
+}
+
+static int pic32_spi_unprepare_hardware(struct spi_master *master)
+{
+ struct pic32_spi *pic32s = spi_master_get_devdata(master);
+
+ pic32_spi_disable(pic32s);
+
+ return 0;
+}
+
+/* This may be called multiple times by same spi dev */
+static int pic32_spi_setup(struct spi_device *spi)
+{
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "No max speed HZ parameter\n");
+ return -EINVAL;
+ }
+
+ /* PIC32 spi controller can drive /CS during transfer depending
+ * on tx fifo fill-level. /CS will stay asserted as long as TX
+ * fifo is non-empty, else will be deasserted indicating
+ * completion of the ongoing transfer. This might result into
+ * unreliable/erroneous SPI transactions.
+ * To avoid that we will always handle /CS by toggling GPIO.
+ */
+ if (!gpio_is_valid(spi->cs_gpio))
+ return -EINVAL;
+
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+ return 0;
+}
+
+static void pic32_spi_cleanup(struct spi_device *spi)
+{
+ /* de-activate cs-gpio */
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+}
+
+static void pic32_spi_dma_prep(struct pic32_spi *pic32s, struct device *dev)
+{
+ struct spi_master *master = pic32s->master;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ master->dma_rx = dma_request_slave_channel_compat(mask, NULL, NULL,
+ dev, "spi-rx");
+ if (!master->dma_rx) {
+ dev_warn(dev, "RX channel not found.\n");
+ goto out_err;
+ }
+
+ master->dma_tx = dma_request_slave_channel_compat(mask, NULL, NULL,
+ dev, "spi-tx");
+ if (!master->dma_tx) {
+ dev_warn(dev, "TX channel not found.\n");
+ goto out_err;
+ }
+
+ if (pic32_spi_dma_config(pic32s, DMA_SLAVE_BUSWIDTH_1_BYTE))
+ goto out_err;
+
+ /* DMA chnls allocated and prepared */
+ set_bit(PIC32F_DMA_PREP, &pic32s->flags);
+
+ return;
+
+out_err:
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+}
+
+static void pic32_spi_dma_unprep(struct pic32_spi *pic32s)
+{
+ if (!test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+ return;
+
+ clear_bit(PIC32F_DMA_PREP, &pic32s->flags);
+ if (pic32s->master->dma_rx)
+ dma_release_channel(pic32s->master->dma_rx);
+
+ if (pic32s->master->dma_tx)
+ dma_release_channel(pic32s->master->dma_tx);
+}
+
+static void pic32_spi_hw_init(struct pic32_spi *pic32s)
+{
+ u32 ctrl;
+
+ /* disable hardware */
+ pic32_spi_disable(pic32s);
+
+ ctrl = readl(&pic32s->regs->ctrl);
+ /* enable enhanced fifo of 128bit deep */
+ ctrl |= CTRL_ENHBUF;
+ pic32s->fifo_n_byte = 16;
+
+ /* disable framing mode */
+ ctrl &= ~CTRL_FRMEN;
+
+ /* enable master mode while disabled */
+ ctrl |= CTRL_MSTEN;
+
+ /* set tx fifo threshold interrupt */
+ ctrl &= ~(0x3 << CTRL_TX_INT_SHIFT);
+ ctrl |= (TX_FIFO_HALF_EMPTY << CTRL_TX_INT_SHIFT);
+
+ /* set rx fifo threshold interrupt */
+ ctrl &= ~(0x3 << CTRL_RX_INT_SHIFT);
+ ctrl |= (RX_FIFO_NOT_EMPTY << CTRL_RX_INT_SHIFT);
+
+ /* select clk source */
+ ctrl &= ~CTRL_MCLKSEL;
+
+ /* set manual /CS mode */
+ ctrl &= ~CTRL_MSSEN;
+
+ writel(ctrl, &pic32s->regs->ctrl);
+
+ /* enable error reporting */
+ ctrl = CTRL2_TX_UR_EN | CTRL2_RX_OV_EN | CTRL2_FRM_ERR_EN;
+ writel(ctrl, &pic32s->regs->ctrl2_set);
+}
+
+static int pic32_spi_hw_probe(struct platform_device *pdev,
+ struct pic32_spi *pic32s)
+{
+ struct resource *mem;
+ int ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pic32s->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(pic32s->regs))
+ return PTR_ERR(pic32s->regs);
+
+ pic32s->dma_base = mem->start;
+
+ /* get irq resources: err-irq, rx-irq, tx-irq */
+ pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
+ if (pic32s->fault_irq < 0) {
+ dev_err(&pdev->dev, "fault-irq not found\n");
+ return pic32s->fault_irq;
+ }
+
+ pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
+ if (pic32s->rx_irq < 0) {
+ dev_err(&pdev->dev, "rx-irq not found\n");
+ return pic32s->rx_irq;
+ }
+
+ pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
+ if (pic32s->tx_irq < 0) {
+ dev_err(&pdev->dev, "tx-irq not found\n");
+ return pic32s->tx_irq;
+ }
+
+ /* get clock */
+ pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
+ if (IS_ERR(pic32s->clk)) {
+ dev_err(&pdev->dev, "clk not found\n");
+ ret = PTR_ERR(pic32s->clk);
+ goto err_unmap_mem;
+ }
+
+ ret = clk_prepare_enable(pic32s->clk);
+ if (ret)
+ goto err_unmap_mem;
+
+ pic32_spi_hw_init(pic32s);
+
+ return 0;
+
+err_unmap_mem:
+ dev_err(&pdev->dev, "%s failed, err %d\n", __func__, ret);
+ return ret;
+}
+
+static int pic32_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct pic32_spi *pic32s;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*pic32s));
+ if (!master)
+ return -ENOMEM;
+
+ pic32s = spi_master_get_devdata(master);
+ pic32s->master = master;
+
+ ret = pic32_spi_hw_probe(pdev, pic32s);
+ if (ret)
+ goto err_master;
+
+ master->dev.of_node = of_node_get(pdev->dev.of_node);
+ master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
+ master->num_chipselect = 1; /* single chip-select */
+ master->max_speed_hz = clk_get_rate(pic32s->clk);
+ master->setup = pic32_spi_setup;
+ master->cleanup = pic32_spi_cleanup;
+ master->flags = SPI_MASTER_MUST_TX | SPI_MASTER_MUST_RX;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(32);
+ master->transfer_one = pic32_spi_one_transfer;
+ master->prepare_message = pic32_spi_prepare_message;
+ master->unprepare_message = pic32_spi_unprepare_message;
+ master->prepare_transfer_hardware = pic32_spi_prepare_hardware;
+ master->unprepare_transfer_hardware = pic32_spi_unprepare_hardware;
+
+ /* optional DMA support */
+ pic32_spi_dma_prep(pic32s, &pdev->dev);
+ if (test_bit(PIC32F_DMA_PREP, &pic32s->flags))
+ master->can_dma = pic32_spi_can_dma;
+
+ init_completion(&pic32s->xfer_done);
+ pic32s->mode = -1;
+
+ /* install irq handlers (with irq-disabled) */
+ irq_set_status_flags(pic32s->fault_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, pic32s->fault_irq,
+ pic32_spi_fault_irq, IRQF_NO_THREAD,
+ dev_name(&pdev->dev), pic32s);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request fault-irq %d\n", pic32s->rx_irq);
+ goto err_bailout;
+ }
+
+ /* receive interrupt handler */
+ irq_set_status_flags(pic32s->rx_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, pic32s->rx_irq,
+ pic32_spi_rx_irq, IRQF_NO_THREAD,
+ dev_name(&pdev->dev), pic32s);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request rx-irq %d\n", pic32s->rx_irq);
+ goto err_bailout;
+ }
+
+ /* transmit interrupt handler */
+ irq_set_status_flags(pic32s->tx_irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, pic32s->tx_irq,
+ pic32_spi_tx_irq, IRQF_NO_THREAD,
+ dev_name(&pdev->dev), pic32s);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request tx-irq %d\n", pic32s->tx_irq);
+ goto err_bailout;
+ }
+
+ /* register master */
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&master->dev, "failed registering spi master\n");
+ goto err_bailout;
+ }
+
+ platform_set_drvdata(pdev, pic32s);
+
+ return 0;
+
+err_bailout:
+ clk_disable_unprepare(pic32s->clk);
+err_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int pic32_spi_remove(struct platform_device *pdev)
+{
+ struct pic32_spi *pic32s;
+
+ pic32s = platform_get_drvdata(pdev);
+ pic32_spi_disable(pic32s);
+ clk_disable_unprepare(pic32s->clk);
+ pic32_spi_dma_unprep(pic32s);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_spi_of_match[] = {
+ {.compatible = "microchip,pic32mzda-spi",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, pic32_spi_of_match);
+
+static struct platform_driver pic32_spi_driver = {
+ .driver = {
+ .name = "spi-pic32",
+ .of_match_table = of_match_ptr(pic32_spi_of_match),
+ },
+ .probe = pic32_spi_probe,
+ .remove = pic32_spi_remove,
+};
+
+module_platform_driver(pic32_spi_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
+MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SPI controller.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 365fc22c3..a18a03d0a 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -33,12 +33,10 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
dmadev = drv_data->tx_chan->device->dev;
sgt = &drv_data->tx_sgt;
buf = drv_data->tx;
- drv_data->tx_map_len = len;
} else {
dmadev = drv_data->rx_chan->device->dev;
sgt = &drv_data->rx_sgt;
buf = drv_data->rx;
- drv_data->rx_map_len = len;
}
nents = DIV_ROUND_UP(len, SZ_2K);
@@ -55,11 +53,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes = min_t(size_t, len, SZ_2K);
- if (buf)
- sg_set_buf(sg, pbuf, bytes);
- else
- sg_set_buf(sg, drv_data->dummy, bytes);
-
+ sg_set_buf(sg, pbuf, bytes);
pbuf += bytes;
len -= bytes;
}
@@ -133,9 +127,6 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
if (!error) {
pxa2xx_spi_unmap_dma_buffers(drv_data);
- drv_data->tx += drv_data->tx_map_len;
- drv_data->rx += drv_data->rx_map_len;
-
msg->actual_length += drv_data->len;
msg->state = pxa2xx_spi_next_transfer(drv_data);
} else {
@@ -267,19 +258,22 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
{
struct dma_async_tx_descriptor *tx_desc, *rx_desc;
+ int err = 0;
tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
if (!tx_desc) {
dev_err(&drv_data->pdev->dev,
"failed to get DMA TX descriptor\n");
- return -EBUSY;
+ err = -EBUSY;
+ goto err_tx;
}
rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
if (!rx_desc) {
dev_err(&drv_data->pdev->dev,
"failed to get DMA RX descriptor\n");
- return -EBUSY;
+ err = -EBUSY;
+ goto err_rx;
}
/* We are ready when RX completes */
@@ -289,6 +283,12 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
dmaengine_submit(rx_desc);
dmaengine_submit(tx_desc);
return 0;
+
+err_rx:
+ dmaengine_terminate_async(drv_data->tx_chan);
+err_tx:
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+ return err;
}
void pxa2xx_spi_dma_start(struct driver_data *drv_data)
@@ -308,10 +308,6 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL);
- if (!drv_data->dummy)
- return -ENOMEM;
-
drv_data->tx_chan = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->tx_param, dev, "tx");
if (!drv_data->tx_chan)
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 520ed1dd5..5202de94f 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -144,16 +144,16 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
struct dw_dma_slave *slave = c->tx_param;
slave->dma_dev = &dma_dev->dev;
- slave->src_master = 1;
- slave->dst_master = 0;
+ slave->m_master = 0;
+ slave->p_master = 1;
}
if (c->rx_param) {
struct dw_dma_slave *slave = c->rx_param;
slave->dma_dev = &dma_dev->dev;
- slave->src_master = 1;
- slave->dst_master = 0;
+ slave->m_master = 0;
+ slave->p_master = 1;
}
spi_pdata.dma_filter = lpss_dma_filter;
@@ -173,8 +173,8 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
ssp->type = c->type;
snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
- ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL,
- CLK_IS_ROOT, c->max_clk_rate);
+ ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL, 0,
+ c->max_clk_rate);
if (IS_ERR(ssp->clk))
return PTR_ERR(ssp->clk);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 86138e410..fe07c0592 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -570,9 +570,8 @@ static void giveback(struct driver_data *drv_data)
/* see if the next and current messages point
* to the same chip
*/
- if (next_msg && next_msg->spi != msg->spi)
- next_msg = NULL;
- if (!next_msg || msg->state == ERROR_STATE)
+ if ((next_msg && next_msg->spi != msg->spi) ||
+ msg->state == ERROR_STATE)
cs_deassert(drv_data);
}
@@ -928,6 +927,7 @@ static void pump_transfers(unsigned long data)
u32 dma_thresh = drv_data->cur_chip->dma_threshold;
u32 dma_burst = drv_data->cur_chip->dma_burst_size;
u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
+ int err;
/* Get current state information */
message = drv_data->cur_msg;
@@ -1047,7 +1047,12 @@ static void pump_transfers(unsigned long data)
/* Ensure we have the correct interrupt handler */
drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
- pxa2xx_spi_dma_prepare(drv_data, dma_burst);
+ err = pxa2xx_spi_dma_prepare(drv_data, dma_burst);
+ if (err) {
+ message->status = err;
+ giveback(drv_data);
+ return;
+ }
/* Clear status and start DMA engine */
cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
@@ -1543,7 +1548,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->pdev = pdev;
drv_data->ssp = ssp;
- master->dev.parent = &pdev->dev;
master->dev.of_node = pdev->dev.of_node;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
@@ -1556,6 +1560,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
master->auto_runtime_pm = true;
+ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
drv_data->ssp_type = ssp->type;
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index a1ef88948..e6b09000f 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -56,7 +56,6 @@ struct driver_data {
struct sg_table tx_sgt;
int rx_nents;
int tx_nents;
- void *dummy;
atomic_t dma_running;
/* Current message transfer state info */
@@ -69,8 +68,6 @@ struct driver_data {
void *rx;
void *rx_end;
int dma_mapped;
- size_t rx_map_len;
- size_t tx_map_len;
u8 n_bytes;
int (*write)(struct driver_data *drv_data);
int (*read)(struct driver_data *drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 810a7fae3..c338ef113 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -937,6 +937,10 @@ static int spi_qup_pm_suspend_runtime(struct device *device)
config = readl(controller->base + QUP_CONFIG);
config |= QUP_CONFIG_CLOCK_AUTO_GATE;
writel_relaxed(config, controller->base + QUP_CONFIG);
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+
return 0;
}
@@ -945,6 +949,15 @@ static int spi_qup_pm_resume_runtime(struct device *device)
struct spi_master *master = dev_get_drvdata(device);
struct spi_qup *controller = spi_master_get_devdata(master);
u32 config;
+ int ret;
+
+ ret = clk_prepare_enable(controller->iclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(controller->cclk);
+ if (ret)
+ return ret;
/* Disable clocks auto gaiting */
config = readl_relaxed(controller->base + QUP_CONFIG);
@@ -1017,6 +1030,8 @@ static int spi_qup_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ spi_master_put(master);
+
return 0;
}
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 6c6c0013e..1026e180e 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -578,7 +578,7 @@ static int rockchip_spi_transfer_one(
struct spi_device *spi,
struct spi_transfer *xfer)
{
- int ret = 1;
+ int ret = 0;
struct rockchip_spi *rs = spi_master_get_devdata(master);
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -627,6 +627,8 @@ static int rockchip_spi_transfer_one(
spi_enable_chip(rs, 1);
ret = rockchip_spi_prepare_dma(rs);
}
+ /* successful DMA prepare means the transfer is in progress */
+ ret = ret ? ret : 1;
} else {
spi_enable_chip(rs, 1);
ret = rockchip_spi_pio_transfer(rs);
@@ -744,10 +746,8 @@ static int rockchip_spi_probe(struct platform_device *pdev)
rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
if (IS_ERR(rs->dma_rx.ch)) {
if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
- dma_release_channel(rs->dma_tx.ch);
- rs->dma_tx.ch = NULL;
ret = -EPROBE_DEFER;
- goto err_get_fifo_len;
+ goto err_free_dma_tx;
}
dev_warn(rs->dev, "Failed to request RX DMA channel\n");
rs->dma_rx.ch = NULL;
@@ -775,10 +775,11 @@ static int rockchip_spi_probe(struct platform_device *pdev)
err_register_master:
pm_runtime_disable(&pdev->dev);
- if (rs->dma_tx.ch)
- dma_release_channel(rs->dma_tx.ch);
if (rs->dma_rx.ch)
dma_release_channel(rs->dma_rx.ch);
+err_free_dma_tx:
+ if (rs->dma_tx.ch)
+ dma_release_channel(rs->dma_tx.ch);
err_get_fifo_len:
clk_disable_unprepare(rs->spiclk);
err_spiclk_enable:
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
index f17c0abe2..d5adf9f31 100644
--- a/drivers/spi/spi-st-ssc4.c
+++ b/drivers/spi/spi-st-ssc4.c
@@ -345,12 +345,13 @@ static int spi_st_probe(struct platform_device *pdev)
spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
if (IS_ERR(spi_st->clk)) {
dev_err(&pdev->dev, "Unable to request clock\n");
- return PTR_ERR(spi_st->clk);
+ ret = PTR_ERR(spi_st->clk);
+ goto put_master;
}
ret = spi_st_clk_enable(spi_st);
if (ret)
- return ret;
+ goto put_master;
init_completion(&spi_st->done);
@@ -408,7 +409,8 @@ static int spi_st_probe(struct platform_device *pdev)
clk_disable:
spi_st_clk_disable(spi_st);
-
+put_master:
+ spi_master_put(master);
return ret;
}
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index 1ddd9e230..cf007f3b8 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -173,13 +173,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
{
struct sun4i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
/* We don't support transfer larger than the FIFO */
if (tfr->len > SUN4I_FIFO_DEPTH)
- return -EINVAL;
+ return -EMSGSIZE;
+
+ if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
+ return -EMSGSIZE;
reinit_completion(&sspi->done);
sspi->tx_buf = tfr->tx_buf;
@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
- /* Fill the TX FIFO */
- sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+ /*
+ * Fill the TX FIFO
+ * Filling the FIFO fully causes timeout for some reason
+ * at least on spi2 on A10s
+ */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
/* Enable the interrupts */
sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 42e2c4bd6..7fce79a60 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
unsigned int mclk_rate, div, timeout;
+ unsigned int start, end, tx_time;
unsigned int tx_len = 0;
int ret = 0;
u32 reg;
@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+ tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
+ start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(tx_time));
+ end = jiffies;
if (!timeout) {
+ dev_warn(&master->dev,
+ "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
+ dev_name(&spi->dev), tfr->len, tfr->speed_hz,
+ jiffies_to_msecs(end - start), tx_time);
ret = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 443f66453..29ea8d2f9 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -646,6 +646,13 @@ free_master:
static int ti_qspi_remove(struct platform_device *pdev)
{
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
+ int rc;
+
+ rc = spi_master_suspend(qspi->master);
+ if (rc)
+ return rc;
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index aab9b492c..18aeaceee 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -360,7 +360,7 @@ static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
ret = clk_enable(xqspi->refclk);
if (ret)
- goto clk_err;
+ return ret;
ret = clk_enable(xqspi->pclk);
if (ret)
@@ -369,6 +369,7 @@ static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
return 0;
clk_err:
+ clk_disable(xqspi->refclk);
return ret;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 0239b45ee..77e6e4595 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -717,9 +717,11 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
if (vmalloced_buf) {
desc_len = min_t(int, max_seg_size, PAGE_SIZE);
sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
- } else {
+ } else if (virt_addr_valid(buf)) {
desc_len = min_t(int, max_seg_size, master->max_dma_len);
sgs = DIV_ROUND_UP(len, desc_len);
+ } else {
+ return -EINVAL;
}
ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
@@ -933,7 +935,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
* This is a standard implementation of transfer_one_message() for
- * drivers which impelment a transfer_one() operation. It provides
+ * drivers which implement a transfer_one() operation. It provides
* standard handling of delays and chip select management.
*/
static int spi_transfer_one_message(struct spi_master *master,
@@ -1764,6 +1766,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
master->num_chipselect = 1;
master->dev.class = &spi_master_class;
master->dev.parent = dev;
+ pm_suspend_ignore_children(&master->dev, true);
spi_master_set_devdata(master, &master[1]);
return master;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 6b3da1bb0..2b9b0941d 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -25,6 +25,7 @@
#define CREATE_TRACE_POINTS
#include <trace/events/spmi.h>
+static bool is_registered;
static DEFINE_IDA(ctrl_ida);
static void spmi_dev_release(struct device *dev)
@@ -507,7 +508,7 @@ int spmi_controller_add(struct spmi_controller *ctrl)
int ret;
/* Can't register until after driver model init */
- if (WARN_ON(!spmi_bus_type.p))
+ if (WARN_ON(!is_registered))
return -EAGAIN;
ret = device_add(&ctrl->dev);
@@ -576,7 +577,14 @@ module_exit(spmi_exit);
static int __init spmi_init(void)
{
- return bus_register(&spmi_bus_type);
+ int ret;
+
+ ret = bus_register(&spmi_bus_type);
+ if (ret)
+ return ret;
+
+ is_registered = true;
+ return 0;
}
postcore_initcall(spmi_init);
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index f92e266d4..180e027b1 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -8,7 +8,7 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
@@ -22,15 +22,10 @@
* Shared
**************************************************/
-static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip)
-{
- return container_of(chip, struct ssb_bus, gpio);
-}
-
#if IS_ENABLED(CONFIG_SSB_EMBEDDED)
static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
if (bus->bustype == SSB_BUSTYPE_SSB)
return irq_find_mapping(bus->irq_domain, gpio);
@@ -45,7 +40,7 @@ static int ssb_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio);
}
@@ -53,7 +48,7 @@ static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio)
static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
int value)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
}
@@ -61,7 +56,7 @@ static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio,
static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
unsigned gpio)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 0);
return 0;
@@ -70,7 +65,7 @@ static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip,
static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 1 << gpio);
ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0);
@@ -79,7 +74,7 @@ static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip,
static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_chipco_gpio_control(&bus->chipco, 1 << gpio, 0);
/* clear pulldown */
@@ -92,7 +87,7 @@ static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio)
static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
/* clear pullup */
ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0);
@@ -246,7 +241,7 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
if (err)
return err;
- err = gpiochip_add(chip);
+ err = gpiochip_add_data(chip, bus);
if (err) {
ssb_gpio_irq_chipco_domain_exit(bus);
return err;
@@ -263,7 +258,7 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio);
}
@@ -271,7 +266,7 @@ static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio)
static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
int value)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
}
@@ -279,7 +274,7 @@ static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio,
static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
unsigned gpio)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 0);
return 0;
@@ -288,7 +283,7 @@ static int ssb_gpio_extif_direction_input(struct gpio_chip *chip,
static int ssb_gpio_extif_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
- struct ssb_bus *bus = ssb_gpio_get_bus(chip);
+ struct ssb_bus *bus = gpiochip_get_data(chip);
ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 1 << gpio);
ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0);
@@ -439,7 +434,7 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
if (err)
return err;
- err = gpiochip_add(chip);
+ err = gpiochip_add_data(chip, bus);
if (err) {
ssb_gpio_irq_extif_domain_exit(bus);
return err;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 45251e8fd..a81bdb894 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,8 +66,6 @@ source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
-source "drivers/staging/rdma/Kconfig"
-
source "drivers/staging/android/Kconfig"
source "drivers/staging/board/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 3184844eb..d112e0819 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_VHBA) += vhba/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_STAGING_RDMA) += rdma/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index bd90d2002..6480f60eb 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -14,27 +14,13 @@ config ASHMEM
It is, in theory, a good memory allocator for low-memory devices,
because it can discard shared memory units when under memory pressure.
-config ANDROID_TIMED_OUTPUT
- bool "Timed output class driver"
- default y
-
-config ANDROID_TIMED_GPIO
- tristate "Android timed gpio driver"
- depends on GPIOLIB || COMPILE_TEST
- depends on ANDROID_TIMED_OUTPUT
- default n
- ---help---
- Unlike generic gpio is to allow programs to access and manipulate gpio
- registers from user space, timed output/gpio is a system to allow changing
- a gpio pin and restore it automatically after a specified timeout.
-
config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
---help---
Registers processes to be killed when low memory conditions, this is useful
as there is no particular swap space on android.
- The registered process will kills according to the priorities in android init
+ The registered process will kill according to the priorities in android init
scripts (/init.rc), and it defines priority values with minimum free memory size
for each priority.
@@ -52,6 +38,7 @@ config SW_SYNC
bool "Software synchronization objects"
default n
depends on SYNC
+ depends on SYNC_FILE
---help---
A sync object driver that uses a 32bit counter to coordinate
synchronization. Useful when there is no hardware primitive backing
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c7b6c99cc..980d6dc4b 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -3,8 +3,6 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
-obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
-obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_SYNC) += sync.o sync_debug.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 85365672c..a2cf93b59 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -184,7 +184,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct scatterlist *sg;
int i, ret;
- buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
@@ -341,7 +341,7 @@ static struct ion_handle *ion_handle_create(struct ion_client *client,
{
struct ion_handle *handle;
- handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
kref_init(&handle->ref);
@@ -396,7 +396,7 @@ static int ion_handle_put_nolock(struct ion_handle *handle)
return ret;
}
-int ion_handle_put(struct ion_handle *handle)
+static int ion_handle_put(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
int ret;
@@ -438,8 +438,8 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
return handle ? handle : ERR_PTR(-EINVAL);
}
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
{
struct ion_handle *handle;
@@ -827,7 +827,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
}
task_unlock(current->group_leader);
- client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
goto err_put_task_struct;
@@ -1035,7 +1035,7 @@ static void ion_vm_open(struct vm_area_struct *vma)
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list;
- vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
if (!vma_list)
return;
vma_list->vma = vma;
@@ -1650,7 +1650,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
struct ion_device *idev;
int ret;
- idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ idev = kzalloc(sizeof(*idev), GFP_KERNEL);
if (!idev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 0813163f9..e0553fee9 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -55,7 +55,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
if (allocated_size > chunk_heap->size - chunk_heap->allocated)
return -ENOMEM;
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
@@ -154,7 +154,7 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
if (ret)
return ERR_PTR(ret);
- chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
if (!chunk_heap)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 5678870bf..814a3c92a 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -68,6 +68,8 @@ static int __init ion_dummy_init(void)
int i, err;
idev = ion_device_create(NULL);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
GFP_KERNEL);
if (!heaps)
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 83a3af06d..5a396a1a8 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -208,7 +208,7 @@ static int ion_test_open(struct inode *inode, struct file *file)
struct ion_test_data *data;
struct miscdevice *miscdev = file->private_data;
- data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 2509e5df7..24d2745e9 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -131,7 +131,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (!p)
continue;
- if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
+ if (task_lmk_waiting(p) &&
time_before_eq(jiffies, lowmem_deathpending_timeout)) {
task_unlock(p);
rcu_read_unlock();
@@ -162,13 +162,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (selected) {
task_lock(selected);
send_sig(SIGKILL, selected, 0);
- /*
- * FIXME: lowmemorykiller shouldn't abuse global OOM killer
- * infrastructure. There is no real reason why the selected
- * task should have access to the memory reserves.
- */
if (selected->mm)
- mark_oom_victim(selected);
+ task_set_lmk_waiting(selected);
task_unlock(selected);
lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
" to free %ldkB on behalf of '%s' (%d) because\n"
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 3a8f21031..1d14c83c7 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -16,10 +16,7 @@
#include <linux/debugfs.h>
#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
#include <linux/kernel.h>
-#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -32,7 +29,6 @@
#include "trace/sync.h"
static const struct fence_ops android_fence_ops;
-static const struct file_operations sync_file_fops;
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name)
@@ -136,170 +132,6 @@ struct fence *sync_pt_create(struct sync_timeline *obj, int size)
}
EXPORT_SYMBOL(sync_pt_create);
-static struct sync_file *sync_file_alloc(int size, const char *name)
-{
- struct sync_file *sync_file;
-
- sync_file = kzalloc(size, GFP_KERNEL);
- if (!sync_file)
- return NULL;
-
- sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
- sync_file, 0);
- if (IS_ERR(sync_file->file))
- goto err;
-
- kref_init(&sync_file->kref);
- strlcpy(sync_file->name, name, sizeof(sync_file->name));
-
- init_waitqueue_head(&sync_file->wq);
-
- return sync_file;
-
-err:
- kfree(sync_file);
- return NULL;
-}
-
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
-{
- struct sync_file_cb *check;
- struct sync_file *sync_file;
-
- check = container_of(cb, struct sync_file_cb, cb);
- sync_file = check->sync_file;
-
- if (atomic_dec_and_test(&sync_file->status))
- wake_up_all(&sync_file->wq);
-}
-
-/* TODO: implement a create which takes more that one fence */
-struct sync_file *sync_file_create(const char *name, struct fence *fence)
-{
- struct sync_file *sync_file;
-
- sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]),
- name);
- if (!sync_file)
- return NULL;
-
- sync_file->num_fences = 1;
- atomic_set(&sync_file->status, 1);
-
- sync_file->cbs[0].fence = fence;
- sync_file->cbs[0].sync_file = sync_file;
- if (fence_add_callback(fence, &sync_file->cbs[0].cb,
- fence_check_cb_func))
- atomic_dec(&sync_file->status);
-
- sync_file_debug_add(sync_file);
-
- return sync_file;
-}
-EXPORT_SYMBOL(sync_file_create);
-
-struct sync_file *sync_file_fdget(int fd)
-{
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
-
- if (file->f_op != &sync_file_fops)
- goto err;
-
- return file->private_data;
-
-err:
- fput(file);
- return NULL;
-}
-EXPORT_SYMBOL(sync_file_fdget);
-
-void sync_file_put(struct sync_file *sync_file)
-{
- fput(sync_file->file);
-}
-EXPORT_SYMBOL(sync_file_put);
-
-void sync_file_install(struct sync_file *sync_file, int fd)
-{
- fd_install(fd, sync_file->file);
-}
-EXPORT_SYMBOL(sync_file_install);
-
-static void sync_file_add_pt(struct sync_file *sync_file, int *i,
- struct fence *fence)
-{
- sync_file->cbs[*i].fence = fence;
- sync_file->cbs[*i].sync_file = sync_file;
-
- if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
- fence_check_cb_func)) {
- fence_get(fence);
- (*i)++;
- }
-}
-
-struct sync_file *sync_file_merge(const char *name,
- struct sync_file *a, struct sync_file *b)
-{
- int num_fences = a->num_fences + b->num_fences;
- struct sync_file *sync_file;
- int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
-
- sync_file = sync_file_alloc(size, name);
- if (!sync_file)
- return NULL;
-
- atomic_set(&sync_file->status, num_fences);
-
- /*
- * Assume sync_file a and b are both ordered and have no
- * duplicates with the same context.
- *
- * If a sync_file can only be created with sync_file_merge
- * and sync_file_create, this is a reasonable assumption.
- */
- for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].fence;
- struct fence *pt_b = b->cbs[i_b].fence;
-
- if (pt_a->context < pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_a);
-
- i_a++;
- } else if (pt_a->context > pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_b);
-
- i_b++;
- } else {
- if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_file_add_pt(sync_file, &i, pt_a);
- else
- sync_file_add_pt(sync_file, &i, pt_b);
-
- i_a++;
- i_b++;
- }
- }
-
- for (; i_a < a->num_fences; i_a++)
- sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
-
- for (; i_b < b->num_fences; i_b++)
- sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
-
- if (num_fences > i)
- atomic_sub(num_fences - i, &sync_file->status);
- sync_file->num_fences = i;
-
- sync_file_debug_add(sync_file);
- return sync_file;
-}
-EXPORT_SYMBOL(sync_file_merge);
-
static const char *android_fence_get_driver_name(struct fence *fence)
{
struct sync_timeline *parent = fence_parent(fence);
@@ -387,191 +219,3 @@ static const struct fence_ops android_fence_ops = {
.fence_value_str = android_fence_value_str,
.timeline_value_str = android_fence_timeline_value_str,
};
-
-static void sync_file_free(struct kref *kref)
-{
- struct sync_file *sync_file = container_of(kref, struct sync_file,
- kref);
- int i;
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- fence_remove_callback(sync_file->cbs[i].fence,
- &sync_file->cbs[i].cb);
- fence_put(sync_file->cbs[i].fence);
- }
-
- kfree(sync_file);
-}
-
-static int sync_file_release(struct inode *inode, struct file *file)
-{
- struct sync_file *sync_file = file->private_data;
-
- sync_file_debug_remove(sync_file);
-
- kref_put(&sync_file->kref, sync_file_free);
- return 0;
-}
-
-static unsigned int sync_file_poll(struct file *file, poll_table *wait)
-{
- struct sync_file *sync_file = file->private_data;
- int status;
-
- poll_wait(file, &sync_file->wq, wait);
-
- status = atomic_read(&sync_file->status);
-
- if (!status)
- return POLLIN;
- if (status < 0)
- return POLLERR;
- return 0;
-}
-
-static long sync_file_ioctl_merge(struct sync_file *sync_file,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_file *fence2, *fence3;
- struct sync_merge_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fd;
- }
-
- fence2 = sync_file_fdget(data.fd2);
- if (!fence2) {
- err = -ENOENT;
- goto err_put_fd;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence3 = sync_file_merge(data.name, sync_file, fence2);
- if (!fence3) {
- err = -ENOMEM;
- goto err_put_fence2;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fence3;
- }
-
- sync_file_install(fence3, fd);
- sync_file_put(fence2);
- return 0;
-
-err_put_fence3:
- sync_file_put(fence3);
-
-err_put_fence2:
- sync_file_put(fence2);
-
-err_put_fd:
- put_unused_fd(fd);
- return err;
-}
-
-static int sync_fill_fence_info(struct fence *fence, void *data, int size)
-{
- struct sync_fence_info *info = data;
-
- if (size < sizeof(*info))
- return -ENOMEM;
-
- strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
- sizeof(info->obj_name));
- strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
- sizeof(info->driver_name));
- if (fence_is_signaled(fence))
- info->status = fence->status >= 0 ? 1 : fence->status;
- else
- info->status = 0;
- info->timestamp_ns = ktime_to_ns(fence->timestamp);
-
- return sizeof(*info);
-}
-
-static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
- unsigned long arg)
-{
- struct sync_file_info *info;
- __u32 size;
- __u32 len = 0;
- int ret, i;
-
- if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
- return -EFAULT;
-
- if (size < sizeof(struct sync_file_info))
- return -EINVAL;
-
- if (size > 4096)
- size = 4096;
-
- info = kzalloc(size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- strlcpy(info->name, sync_file->name, sizeof(info->name));
- info->status = atomic_read(&sync_file->status);
- if (info->status >= 0)
- info->status = !info->status;
-
- len = sizeof(struct sync_file_info);
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- struct fence *fence = sync_file->cbs[i].fence;
-
- ret = sync_fill_fence_info(fence, (u8 *)info + len, size - len);
-
- if (ret < 0)
- goto out;
-
- len += ret;
- }
-
- info->len = len;
-
- if (copy_to_user((void __user *)arg, info, len))
- ret = -EFAULT;
- else
- ret = 0;
-
-out:
- kfree(info);
-
- return ret;
-}
-
-static long sync_file_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sync_file *sync_file = file->private_data;
-
- switch (cmd) {
- case SYNC_IOC_MERGE:
- return sync_file_ioctl_merge(sync_file, arg);
-
- case SYNC_IOC_FENCE_INFO:
- return sync_file_ioctl_fence_info(sync_file, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sync_file_fops = {
- .release = sync_file_release,
- .poll = sync_file_poll,
- .unlocked_ioctl = sync_file_ioctl,
- .compat_ioctl = sync_file_ioctl,
-};
-
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index d2a173433..b56885c14 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -20,10 +20,10 @@
#include <linux/spinlock.h>
#include <linux/fence.h>
-#include "uapi/sync.h"
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
struct sync_timeline;
-struct sync_file;
/**
* struct sync_timeline_ops - sync object implementation ops
@@ -86,38 +86,6 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
child_list_lock);
}
-struct sync_file_cb {
- struct fence_cb cb;
- struct fence *fence;
- struct sync_file *sync_file;
-};
-
-/**
- * struct sync_file - sync file to export to the userspace
- * @file: file representing this fence
- * @kref: reference count on fence.
- * @name: name of sync_file. Useful for debugging
- * @sync_file_list: membership in global file list
- * @num_fences number of sync_pts in the fence
- * @wq: wait queue for fence signaling
- * @status: 0: signaled, >0:active, <0: error
- * @cbs: sync_pts callback information
- */
-struct sync_file {
- struct file *file;
- struct kref kref;
- char name[32];
-#ifdef CONFIG_DEBUG_FS
- struct list_head sync_file_list;
-#endif
- int num_fences;
-
- wait_queue_head_t wq;
- atomic_t status;
-
- struct sync_file_cb cbs[];
-};
-
/*
* API for sync_timeline implementers
*/
@@ -167,61 +135,6 @@ void sync_timeline_signal(struct sync_timeline *obj);
*/
struct fence *sync_pt_create(struct sync_timeline *parent, int size);
-/**
- * sync_fence_create() - creates a sync fence
- * @name: name of fence to create
- * @fence: fence to add to the sync_fence
- *
- * Creates a sync_file containg @fence. Once this is called, the sync_file
- * takes ownership of @fence.
- */
-struct sync_file *sync_file_create(const char *name, struct fence *fence);
-
-/*
- * API for sync_file consumers
- */
-
-/**
- * sync_file_merge() - merge two sync_files
- * @name: name of new fence
- * @a: sync_file a
- * @b: sync_file b
- *
- * Creates a new sync_file which contains copies of all the fences in both
- * @a and @b. @a and @b remain valid, independent sync_file. Returns the
- * new merged sync_file or NULL in case of error.
- */
-struct sync_file *sync_file_merge(const char *name,
- struct sync_file *a, struct sync_file *b);
-
-/**
- * sync_file_fdget() - get a sync_file from an fd
- * @fd: fd referencing a fence
- *
- * Ensures @fd references a valid sync_file, increments the refcount of the
- * backing file. Returns the sync_file or NULL in case of error.
- */
-struct sync_file *sync_file_fdget(int fd);
-
-/**
- * sync_file_put() - puts a reference of a sync_file
- * @sync_file: sync_file to put
- *
- * Puts a reference on @sync_fence. If this is the last reference, the
- * sync_fil and all it's sync_pts will be freed
- */
-void sync_file_put(struct sync_file *sync_file);
-
-/**
- * sync_file_install() - installs a sync_file into a file descriptor
- * @sync_file: sync_file to install
- * @fd: file descriptor in which to install the fence
- *
- * Installs @sync_file into @fd. @fd's should be acquired through
- * get_unused_fd_flags(O_CLOEXEC).
- */
-void sync_file_install(struct sync_file *sync_file, int fd);
-
#ifdef CONFIG_DEBUG_FS
void sync_timeline_debug_add(struct sync_timeline *obj);
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 5a7ec58fb..5f57499c9 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
#include <linux/time64.h>
+#include <linux/sync_file.h>
#include "sw_sync.h"
#ifdef CONFIG_DEBUG_FS
@@ -262,8 +263,7 @@ static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
goto err;
}
- data.name[sizeof(data.name) - 1] = '\0';
- sync_file = sync_file_create(data.name, fence);
+ sync_file = sync_file_create(fence);
if (!sync_file) {
fence_put(fence);
err = -ENOMEM;
@@ -272,12 +272,12 @@ static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
data.fence = fd;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- sync_file_put(sync_file);
+ fput(sync_file->file);
err = -EFAULT;
goto err;
}
- sync_file_install(sync_file, fd);
+ fd_install(fd, sync_file->file);
return 0;
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
deleted file mode 100644
index 914fd1005..000000000
--- a/drivers/staging/android/timed_gpio.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/* drivers/misc/timed_gpio.c
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/hrtimer.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/ktime.h>
-
-#include "timed_output.h"
-#include "timed_gpio.h"
-
-struct timed_gpio_data {
- struct timed_output_dev dev;
- struct hrtimer timer;
- spinlock_t lock;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
-};
-
-static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
-{
- struct timed_gpio_data *data =
- container_of(timer, struct timed_gpio_data, timer);
-
- gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
- return HRTIMER_NORESTART;
-}
-
-static int gpio_get_time(struct timed_output_dev *dev)
-{
- struct timed_gpio_data *data;
- ktime_t t;
-
- data = container_of(dev, struct timed_gpio_data, dev);
-
- if (!hrtimer_active(&data->timer))
- return 0;
-
- t = hrtimer_get_remaining(&data->timer);
-
- return ktime_to_ms(t);
-}
-
-static void gpio_enable(struct timed_output_dev *dev, int value)
-{
- struct timed_gpio_data *data =
- container_of(dev, struct timed_gpio_data, dev);
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
-
- /* cancel previous timer and set GPIO according to value */
- hrtimer_cancel(&data->timer);
- gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
-
- if (value > 0) {
- if (value > data->max_timeout)
- value = data->max_timeout;
-
- hrtimer_start(&data->timer,
- ktime_set(value / 1000, (value % 1000) * 1000000),
- HRTIMER_MODE_REL);
- }
-
- spin_unlock_irqrestore(&data->lock, flags);
-}
-
-static int timed_gpio_probe(struct platform_device *pdev)
-{
- struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct timed_gpio *cur_gpio;
- struct timed_gpio_data *gpio_data, *gpio_dat;
- int i, ret;
-
- if (!pdata)
- return -EBUSY;
-
- gpio_data = devm_kcalloc(&pdev->dev, pdata->num_gpios,
- sizeof(*gpio_data), GFP_KERNEL);
- if (!gpio_data)
- return -ENOMEM;
-
- for (i = 0; i < pdata->num_gpios; i++) {
- cur_gpio = &pdata->gpios[i];
- gpio_dat = &gpio_data[i];
-
- hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- gpio_dat->timer.function = gpio_timer_func;
- spin_lock_init(&gpio_dat->lock);
-
- gpio_dat->dev.name = cur_gpio->name;
- gpio_dat->dev.get_time = gpio_get_time;
- gpio_dat->dev.enable = gpio_enable;
- ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
- if (ret < 0)
- goto err_out;
- ret = timed_output_dev_register(&gpio_dat->dev);
- if (ret < 0) {
- gpio_free(cur_gpio->gpio);
- goto err_out;
- }
-
- gpio_dat->gpio = cur_gpio->gpio;
- gpio_dat->max_timeout = cur_gpio->max_timeout;
- gpio_dat->active_low = cur_gpio->active_low;
- gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
- }
-
- platform_set_drvdata(pdev, gpio_data);
-
- return 0;
-
-err_out:
- while (--i >= 0) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
-
- return ret;
-}
-
-static int timed_gpio_remove(struct platform_device *pdev)
-{
- struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < pdata->num_gpios; i++) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
-
- return 0;
-}
-
-static struct platform_driver timed_gpio_driver = {
- .probe = timed_gpio_probe,
- .remove = timed_gpio_remove,
- .driver = {
- .name = TIMED_GPIO_NAME,
- },
-};
-
-module_platform_driver(timed_gpio_driver);
-
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_DESCRIPTION("timed gpio driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
deleted file mode 100644
index d29e169d7..000000000
--- a/drivers/staging/android/timed_gpio.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* include/linux/timed_gpio.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _LINUX_TIMED_GPIO_H
-#define _LINUX_TIMED_GPIO_H
-
-#define TIMED_GPIO_NAME "timed-gpio"
-
-struct timed_gpio {
- const char *name;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
-};
-
-struct timed_gpio_platform_data {
- int num_gpios;
- struct timed_gpio *gpios;
-};
-
-#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
deleted file mode 100644
index aff9cdb00..000000000
--- a/drivers/staging/android/timed_output.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/* drivers/misc/timed_output.c
- *
- * Copyright (C) 2009 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "timed_output: " fmt
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/err.h>
-
-#include "timed_output.h"
-
-static struct class *timed_output_class;
-static atomic_t device_count;
-
-static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct timed_output_dev *tdev = dev_get_drvdata(dev);
- int remaining = tdev->get_time(tdev);
-
- return sprintf(buf, "%d\n", remaining);
-}
-
-static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct timed_output_dev *tdev = dev_get_drvdata(dev);
- int value;
- int rc;
-
- rc = kstrtoint(buf, 0, &value);
- if (rc != 0)
- return -EINVAL;
-
- tdev->enable(tdev, value);
-
- return size;
-}
-static DEVICE_ATTR_RW(enable);
-
-static struct attribute *timed_output_attrs[] = {
- &dev_attr_enable.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(timed_output);
-
-static int create_timed_output_class(void)
-{
- if (!timed_output_class) {
- timed_output_class = class_create(THIS_MODULE, "timed_output");
- if (IS_ERR(timed_output_class))
- return PTR_ERR(timed_output_class);
- atomic_set(&device_count, 0);
- timed_output_class->dev_groups = timed_output_groups;
- }
-
- return 0;
-}
-
-int timed_output_dev_register(struct timed_output_dev *tdev)
-{
- int ret;
-
- if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
- return -EINVAL;
-
- ret = create_timed_output_class();
- if (ret < 0)
- return ret;
-
- tdev->index = atomic_inc_return(&device_count);
- tdev->dev = device_create(timed_output_class, NULL,
- MKDEV(0, tdev->index), NULL, "%s", tdev->name);
- if (IS_ERR(tdev->dev))
- return PTR_ERR(tdev->dev);
-
- dev_set_drvdata(tdev->dev, tdev);
- tdev->state = 0;
- return 0;
-}
-EXPORT_SYMBOL_GPL(timed_output_dev_register);
-
-void timed_output_dev_unregister(struct timed_output_dev *tdev)
-{
- tdev->enable(tdev, 0);
- device_destroy(timed_output_class, MKDEV(0, tdev->index));
-}
-EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
-
-static int __init timed_output_init(void)
-{
- return create_timed_output_class();
-}
-device_initcall(timed_output_init);
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
deleted file mode 100644
index 13d2ca51c..000000000
--- a/drivers/staging/android/timed_output.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* include/linux/timed_output.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _LINUX_TIMED_OUTPUT_H
-#define _LINUX_TIMED_OUTPUT_H
-
-struct timed_output_dev {
- const char *name;
-
- /* enable the output and set the timer */
- void (*enable)(struct timed_output_dev *sdev, int timeout);
-
- /* returns the current number of milliseconds remaining on the timer */
- int (*get_time)(struct timed_output_dev *sdev);
-
- /* private data */
- struct device *dev;
- int index;
- int state;
-};
-
-int timed_output_dev_register(struct timed_output_dev *dev);
-void timed_output_dev_unregister(struct timed_output_dev *dev);
-
-#endif
diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h
deleted file mode 100644
index a0cf357e5..000000000
--- a/drivers/staging/android/uapi/sync.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_SYNC_H
-#define _UAPI_LINUX_SYNC_H
-
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-/**
- * struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
- * @name: name of new fence
- * @fence: returns the fd of the new fence to userspace
- */
-struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
-};
-
-/**
- * struct sync_fence_info - detailed fence information
- * @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implementing the parent
- * @status: status of the fence 0:active 1:signaled <0:error
- * @timestamp_ns: timestamp of status change in nanoseconds
- */
-struct sync_fence_info {
- char obj_name[32];
- char driver_name[32];
- __s32 status;
- __u64 timestamp_ns;
-};
-
-/**
- * struct sync_file_info - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_file_info returned to
- * userspace including pt_info.
- * @name: name of fence
- * @status: status of fence. 1: signaled 0:active <0:error
- * @sync_fence_info: array of sync_fence_info for every fence in the sync_file
- */
-struct sync_file_info {
- __u32 len;
- char name[32];
- __s32 status;
-
- __u8 sync_fence_info[0];
-};
-
-#define SYNC_IOC_MAGIC '>'
-
-/**
- * DOC: SYNC_IOC_MERGE - merge two fences
- *
- * Takes a struct sync_merge_data. Creates a new fence containing copies of
- * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
- * new fence's fd in sync_merge_data.fence
- */
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
-
-/**
- * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
- *
- * Takes a struct sync_file_info_data with extra space allocated for pt_info.
- * Caller should write the size of the buffer into len. On return, len is
- * updated to reflect the total size of the sync_file_info_data including
- * pt_info.
- *
- * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
- * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
- */
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2, struct sync_file_info)
-
-#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index bb63ece4d..4de4fd06e 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -87,10 +87,10 @@ static const struct board_staging_clk lcdc0_clocks[] __initconst = {
static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
{
- .pdev = &lcdc0_device,
- .clocks = lcdc0_clocks,
- .nclocks = ARRAY_SIZE(lcdc0_clocks),
- .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
+ .pdev = &lcdc0_device,
+ .clocks = lcdc0_clocks,
+ .nclocks = ARRAY_SIZE(lcdc0_clocks),
+ .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
},
};
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 90c28016c..c7d7682b1 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -80,14 +80,14 @@ static void __comedi_buf_free(struct comedi_device *dev,
static void __comedi_buf_alloc(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned n_pages)
+ unsigned int n_pages)
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned long flags;
- unsigned i;
+ unsigned int i;
if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
dev_err(dev->class_dev,
@@ -208,7 +208,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
/* allocate new buffer */
if (new_size) {
- unsigned n_pages = new_size >> PAGE_SHIFT;
+ unsigned int n_pages = new_size >> PAGE_SHIFT;
__comedi_buf_alloc(dev, s, n_pages);
@@ -302,7 +302,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
{
struct comedi_async *async = s->async;
unsigned int count = 0;
- const unsigned num_sample_bytes = comedi_bytes_per_sample(s);
+ const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(comedi_buf_write_free);
unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
- unsigned num_bytes;
+ unsigned int num_bytes;
if (!async)
return 0;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 7c7b477b0..629080f39 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -186,7 +186,7 @@ static bool comedi_clear_board_dev(struct comedi_device *dev)
return cleared;
}
-static struct comedi_device *comedi_clear_board_minor(unsigned minor)
+static struct comedi_device *comedi_clear_board_minor(unsigned int minor)
{
struct comedi_device *dev;
@@ -209,8 +209,8 @@ static void comedi_free_board_dev(struct comedi_device *dev)
}
}
-static struct comedi_subdevice
-*comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned minor)
+static struct comedi_subdevice *
+comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned int minor)
{
struct comedi_subdevice *s;
unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
@@ -223,7 +223,7 @@ static struct comedi_subdevice
return s;
}
-static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
+static struct comedi_device *comedi_dev_get_from_board_minor(unsigned int minor)
{
struct comedi_device *dev;
@@ -233,7 +233,8 @@ static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
return dev;
}
-static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
+static struct comedi_device *
+comedi_dev_get_from_subdevice_minor(unsigned int minor)
{
struct comedi_device *dev;
struct comedi_subdevice *s;
@@ -258,7 +259,7 @@ static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
* reference incremented. Return NULL if no COMEDI device exists with the
* specified minor device number.
*/
-struct comedi_device *comedi_dev_get_from_minor(unsigned minor)
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor)
{
if (minor < COMEDI_NUM_BOARD_MINORS)
return comedi_dev_get_from_board_minor(minor);
@@ -342,7 +343,8 @@ static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
}
static int resize_async_buffer(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned new_size)
+ struct comedi_subdevice *s,
+ unsigned int new_size)
{
struct comedi_async *async = s->async;
int retval;
@@ -616,19 +618,20 @@ static struct attribute *comedi_dev_attrs[] = {
ATTRIBUTE_GROUPS(comedi_dev);
static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s,
- unsigned bits)
+ unsigned int bits)
{
s->runflags &= ~bits;
}
static void __comedi_set_subdevice_runflags(struct comedi_subdevice *s,
- unsigned bits)
+ unsigned int bits)
{
s->runflags |= bits;
}
static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
- unsigned mask, unsigned bits)
+ unsigned int mask,
+ unsigned int bits)
{
unsigned long flags;
@@ -638,15 +641,15 @@ static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
spin_unlock_irqrestore(&s->spin_lock, flags);
}
-static unsigned __comedi_get_subdevice_runflags(struct comedi_subdevice *s)
+static unsigned int __comedi_get_subdevice_runflags(struct comedi_subdevice *s)
{
return s->runflags;
}
-static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
+static unsigned int comedi_get_subdevice_runflags(struct comedi_subdevice *s)
{
unsigned long flags;
- unsigned runflags;
+ unsigned int runflags;
spin_lock_irqsave(&s->spin_lock, flags);
runflags = __comedi_get_subdevice_runflags(s);
@@ -654,12 +657,12 @@ static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
return runflags;
}
-static bool comedi_is_runflags_running(unsigned runflags)
+static bool comedi_is_runflags_running(unsigned int runflags)
{
return runflags & COMEDI_SRF_RUNNING;
}
-static bool comedi_is_runflags_in_error(unsigned runflags)
+static bool comedi_is_runflags_in_error(unsigned int runflags)
{
return runflags & COMEDI_SRF_ERROR;
}
@@ -673,7 +676,7 @@ static bool comedi_is_runflags_in_error(unsigned runflags)
*/
bool comedi_is_subdevice_running(struct comedi_subdevice *s)
{
- unsigned runflags = comedi_get_subdevice_runflags(s);
+ unsigned int runflags = comedi_get_subdevice_runflags(s);
return comedi_is_runflags_running(runflags);
}
@@ -681,14 +684,14 @@ EXPORT_SYMBOL_GPL(comedi_is_subdevice_running);
static bool __comedi_is_subdevice_running(struct comedi_subdevice *s)
{
- unsigned runflags = __comedi_get_subdevice_runflags(s);
+ unsigned int runflags = __comedi_get_subdevice_runflags(s);
return comedi_is_runflags_running(runflags);
}
bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
{
- unsigned runflags = __comedi_get_subdevice_runflags(s);
+ unsigned int runflags = __comedi_get_subdevice_runflags(s);
return runflags & COMEDI_SRF_FREE_SPRIV;
}
@@ -2038,7 +2041,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- unsigned minor = iminor(file_inode(file));
+ unsigned int minor = iminor(file_inode(file));
struct comedi_file *cfp = file->private_data;
struct comedi_device *dev = cfp->dev;
int rc;
@@ -2342,7 +2345,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
add_wait_queue(&async->wait_head, &wait);
while (count == 0 && !retval) {
- unsigned runflags;
+ unsigned int runflags;
unsigned int wp, n1, n2;
set_current_state(TASK_INTERRUPTIBLE);
@@ -2485,7 +2488,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
n = min_t(size_t, m, nbytes);
if (n == 0) {
- unsigned runflags = comedi_get_subdevice_runflags(s);
+ unsigned int runflags =
+ comedi_get_subdevice_runflags(s);
if (!comedi_is_runflags_running(runflags)) {
if (comedi_is_runflags_in_error(runflags))
@@ -2573,7 +2577,7 @@ out:
static int comedi_open(struct inode *inode, struct file *file)
{
- const unsigned minor = iminor(inode);
+ const unsigned int minor = iminor(inode);
struct comedi_file *cfp;
struct comedi_device *dev = comedi_dev_get_from_minor(minor);
int rc;
@@ -2733,7 +2737,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
{
struct comedi_device *dev;
struct device *csdev;
- unsigned i;
+ unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -2791,7 +2795,7 @@ int comedi_alloc_subdevice_minor(struct comedi_subdevice *s)
{
struct comedi_device *dev = s->device;
struct device *csdev;
- unsigned i;
+ unsigned int i;
mutex_lock(&comedi_subdevice_minor_table_lock);
for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i) {
@@ -2841,7 +2845,7 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s)
static void comedi_cleanup_board_minors(void)
{
struct comedi_device *dev;
- unsigned i;
+ unsigned int i;
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
dev = comedi_clear_board_minor(i);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 115807215..dcb637665 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -173,7 +173,7 @@ struct comedi_subdevice {
void *lock;
void *busy;
- unsigned runflags;
+ unsigned int runflags;
spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
unsigned int io_bits;
@@ -566,7 +566,7 @@ struct comedi_device {
void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
-struct comedi_device *comedi_dev_get_from_minor(unsigned minor);
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
int comedi_dev_put(struct comedi_device *dev);
bool comedi_is_subdevice_running(struct comedi_subdevice *s);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index fc153c705..f092e5037 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -564,7 +564,7 @@ unsigned int comedi_handle_events(struct comedi_device *dev,
if (events == 0)
return events;
- if (events & COMEDI_CB_CANCEL_MASK)
+ if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
s->cancel(dev, s);
comedi_event(dev, s);
@@ -575,38 +575,35 @@ EXPORT_SYMBOL_GPL(comedi_handle_events);
static int insn_rw_emulate_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct comedi_insn new_insn;
+ struct comedi_insn _insn;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int base_chan = (chan < 32) ? 0 : chan;
+ unsigned int _data[2];
int ret;
- static const unsigned channels_per_bitfield = 32;
-
- unsigned chan = CR_CHAN(insn->chanspec);
- const unsigned base_bitfield_channel =
- (chan < channels_per_bitfield) ? 0 : chan;
- unsigned int new_data[2];
- memset(new_data, 0, sizeof(new_data));
- memset(&new_insn, 0, sizeof(new_insn));
- new_insn.insn = INSN_BITS;
- new_insn.chanspec = base_bitfield_channel;
- new_insn.n = 2;
- new_insn.subdev = insn->subdev;
+ memset(_data, 0, sizeof(_data));
+ memset(&_insn, 0, sizeof(_insn));
+ _insn.insn = INSN_BITS;
+ _insn.chanspec = base_chan;
+ _insn.n = 2;
+ _insn.subdev = insn->subdev;
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
- new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel))
- : 0; /* bits */
+ _data[0] = 1 << (chan - base_chan); /* mask */
+ _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
}
- ret = s->insn_bits(dev, s, &new_insn, new_data);
+ ret = s->insn_bits(dev, s, &_insn, _data);
if (ret < 0)
return ret;
if (insn->insn == INSN_READ)
- data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1;
+ data[0] = (_data[1] >> (chan - base_chan)) & 1;
return 1;
}
@@ -628,6 +625,9 @@ static int __comedi_device_postconfig_async(struct comedi_device *dev,
"async subdevices must have a do_cmdtest() function\n");
return -EINVAL;
}
+ if (!s->cancel)
+ dev_warn(dev->class_dev,
+ "async subdevices should have a cancel() function\n");
async = kzalloc(sizeof(*async), GFP_KERNEL);
if (!async)
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index d4b8c0195..f03e4c8c2 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -1,16 +1,14 @@
/*
- comedi/drivers/amcc_s5933.h
-
- Stuff for AMCC S5933 PCI Controller
-
- Author: Michal Dobes <dobes@tesnet.cz>
-
- Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver
- made by Andrea Cisternino <acister@pcape1.pi.infn.it>
- and as result of espionage from MITE code made by David A. Schleef.
- Thanks to AMCC for their on-line documentation and bus master DMA
- example.
-*/
+ * Stuff for AMCC S5933 PCI Controller
+ *
+ * Author: Michal Dobes <dobes@tesnet.cz>
+ *
+ * Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver
+ * made by Andrea Cisternino <acister@pcape1.pi.infn.it>
+ * and as result of espionage from MITE code made by David A. Schleef.
+ * Thanks to AMCC for their on-line documentation and bus master DMA
+ * example.
+ */
#ifndef _AMCC_S5933_H_
#define _AMCC_S5933_H_
@@ -58,7 +56,7 @@
#define INTCSR_INTR_ASSERTED 0x800000
/****************************************************************************/
-/* AMCC - PCI non-volatile ram command register (byte 3 of master control/status register) */
+/* AMCC - PCI non-volatile ram command register (byte 3 of AMCC_OP_REG_MCSR) */
/****************************************************************************/
#define MCSR_NV_LOAD_LOW_ADDR 0x0
#define MCSR_NV_LOAD_HIGH_ADDR 0x20
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index d1539e798..f6e4e9842 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -101,7 +101,7 @@ struct dio200_subdev_8255 {
};
struct dio200_subdev_intr {
- spinlock_t spinlock;
+ spinlock_t spinlock; /* protects the 'active' flag */
unsigned int ofs;
unsigned int valid_isns;
unsigned int enabled_isns;
@@ -221,7 +221,7 @@ static void dio200_start_intr(struct comedi_device *dev,
struct dio200_subdev_intr *subpriv = s->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int n;
- unsigned isn_bits;
+ unsigned int isn_bits;
/* Determine interrupt sources to enable. */
isn_bits = 0;
@@ -284,9 +284,9 @@ static int dio200_handle_read_intr(struct comedi_device *dev,
{
const struct dio200_board *board = dev->board_ptr;
struct dio200_subdev_intr *subpriv = s->private;
- unsigned triggered;
- unsigned intstat;
- unsigned cur_enabled;
+ unsigned int triggered;
+ unsigned int intstat;
+ unsigned int cur_enabled;
unsigned long flags;
triggered = 0;
@@ -439,7 +439,7 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
static int dio200_subdev_intr_init(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int offset,
- unsigned valid_isns)
+ unsigned int valid_isns)
{
const struct dio200_board *board = dev->board_ptr;
struct dio200_subdev_intr *subpriv;
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index b1946ce6e..58b0b6b1a 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -1,46 +1,44 @@
/*
- comedi/drivers/amplc_pc263.c
- Driver for Amplicon PC263 and PCI263 relay boards.
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ * Driver for Amplicon PC263 relay board.
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: amplc_pc263
-Description: Amplicon PC263
-Author: Ian Abbott <abbotti@mev.co.uk>
-Devices: [Amplicon] PC263 (pc263)
-Updated: Fri, 12 Apr 2013 15:19:36 +0100
-Status: works
-
-Configuration options:
- [0] - I/O port base address
-
-The board appears as one subdevice, with 16 digital outputs, each
-connected to a reed-relay. Relay contacts are closed when output is 1.
-The state of the outputs can be read.
-*/
+ * Driver: amplc_pc263
+ * Description: Amplicon PC263
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Devices: [Amplicon] PC263 (pc263)
+ * Updated: Fri, 12 Apr 2013 15:19:36 +0100
+ * Status: works
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ *
+ * The board appears as one subdevice, with 16 digital outputs, each
+ * connected to a reed-relay. Relay contacts are closed when output is 1.
+ * The state of the outputs can be read.
+ */
#include <linux/module.h>
#include "../comedidev.h"
/* PC263 registers */
-
-/*
- * Board descriptions for Amplicon PC263.
- */
+#define PC263_DO_0_7_REG 0x00
+#define PC263_DO_8_15_REG 0x01
struct pc263_board {
const char *name;
@@ -58,8 +56,8 @@ static int pc263_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase);
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
+ outb(s->state & 0xff, dev->iobase + PC263_DO_0_7_REG);
+ outb((s->state >> 8) & 0xff, dev->iobase + PC263_DO_8_15_REG);
}
data[1] = s->state;
@@ -80,28 +78,30 @@ static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
+ /* Digital Output subdevice */
s = &dev->subdevices[0];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pc263_do_insn_bits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pc263_do_insn_bits;
+
/* read initial relay state */
- s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
+ s->state = inb(dev->iobase + PC263_DO_0_7_REG) |
+ (inb(dev->iobase + PC263_DO_8_15_REG) << 8);
return 0;
}
static struct comedi_driver amplc_pc263_driver = {
- .driver_name = "amplc_pc263",
- .module = THIS_MODULE,
- .attach = pc263_attach,
- .detach = comedi_legacy_detach,
- .board_name = &pc263_boards[0].name,
- .offset = sizeof(struct pc263_board),
- .num_names = ARRAY_SIZE(pc263_boards),
+ .driver_name = "amplc_pc263",
+ .module = THIS_MODULE,
+ .attach = pc263_attach,
+ .detach = comedi_legacy_detach,
+ .board_name = &pc263_boards[0].name,
+ .offset = sizeof(struct pc263_board),
+ .num_names = ARRAY_SIZE(pc263_boards),
};
module_comedi_driver(amplc_pc263_driver);
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index cac011fdd..2e6decf1b 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -132,48 +132,53 @@
* DACCON values.
*/
/* (r/w) Scan trigger. */
-#define PCI224_DACCON_TRIG_MASK (7 << 0)
-#define PCI224_DACCON_TRIG_NONE (0 << 0) /* none */
-#define PCI224_DACCON_TRIG_SW (1 << 0) /* software trig */
-#define PCI224_DACCON_TRIG_EXTP (2 << 0) /* ext +ve edge */
-#define PCI224_DACCON_TRIG_EXTN (3 << 0) /* ext -ve edge */
-#define PCI224_DACCON_TRIG_Z2CT0 (4 << 0) /* Z2 CT0 out */
-#define PCI224_DACCON_TRIG_Z2CT1 (5 << 0) /* Z2 CT1 out */
-#define PCI224_DACCON_TRIG_Z2CT2 (6 << 0) /* Z2 CT2 out */
+#define PCI224_DACCON_TRIG(x) (((x) & 0x7) << 0)
+#define PCI224_DACCON_TRIG_MASK PCI224_DACCON_TRIG(7)
+#define PCI224_DACCON_TRIG_NONE PCI224_DACCON_TRIG(0) /* none */
+#define PCI224_DACCON_TRIG_SW PCI224_DACCON_TRIG(1) /* soft trig */
+#define PCI224_DACCON_TRIG_EXTP PCI224_DACCON_TRIG(2) /* ext + edge */
+#define PCI224_DACCON_TRIG_EXTN PCI224_DACCON_TRIG(3) /* ext - edge */
+#define PCI224_DACCON_TRIG_Z2CT0 PCI224_DACCON_TRIG(4) /* Z2 CT0 out */
+#define PCI224_DACCON_TRIG_Z2CT1 PCI224_DACCON_TRIG(5) /* Z2 CT1 out */
+#define PCI224_DACCON_TRIG_Z2CT2 PCI224_DACCON_TRIG(6) /* Z2 CT2 out */
/* (r/w) Polarity (PCI224 only, PCI234 always bipolar!). */
-#define PCI224_DACCON_POLAR_MASK (1 << 3)
-#define PCI224_DACCON_POLAR_UNI (0 << 3) /* range [0,Vref] */
-#define PCI224_DACCON_POLAR_BI (1 << 3) /* range [-Vref,Vref] */
+#define PCI224_DACCON_POLAR(x) (((x) & 0x1) << 3)
+#define PCI224_DACCON_POLAR_MASK PCI224_DACCON_POLAR(1)
+#define PCI224_DACCON_POLAR_UNI PCI224_DACCON_POLAR(0) /* [0,+V] */
+#define PCI224_DACCON_POLAR_BI PCI224_DACCON_POLAR(1) /* [-V,+V] */
/* (r/w) Internal Vref (PCI224 only, when LK1 in position 1-2). */
-#define PCI224_DACCON_VREF_MASK (3 << 4)
-#define PCI224_DACCON_VREF_1_25 (0 << 4) /* Vref = 1.25V */
-#define PCI224_DACCON_VREF_2_5 (1 << 4) /* Vref = 2.5V */
-#define PCI224_DACCON_VREF_5 (2 << 4) /* Vref = 5V */
-#define PCI224_DACCON_VREF_10 (3 << 4) /* Vref = 10V */
+#define PCI224_DACCON_VREF(x) (((x) & 0x3) << 4)
+#define PCI224_DACCON_VREF_MASK PCI224_DACCON_VREF(3)
+#define PCI224_DACCON_VREF_1_25 PCI224_DACCON_VREF(0) /* 1.25V */
+#define PCI224_DACCON_VREF_2_5 PCI224_DACCON_VREF(1) /* 2.5V */
+#define PCI224_DACCON_VREF_5 PCI224_DACCON_VREF(2) /* 5V */
+#define PCI224_DACCON_VREF_10 PCI224_DACCON_VREF(3) /* 10V */
/* (r/w) Wraparound mode enable (to play back stored waveform). */
-#define PCI224_DACCON_FIFOWRAP (1 << 7)
+#define PCI224_DACCON_FIFOWRAP BIT(7)
/* (r/w) FIFO enable. It MUST be set! */
-#define PCI224_DACCON_FIFOENAB (1 << 8)
+#define PCI224_DACCON_FIFOENAB BIT(8)
/* (r/w) FIFO interrupt trigger level (most values are not very useful). */
-#define PCI224_DACCON_FIFOINTR_MASK (7 << 9)
-#define PCI224_DACCON_FIFOINTR_EMPTY (0 << 9) /* when empty */
-#define PCI224_DACCON_FIFOINTR_NEMPTY (1 << 9) /* when not empty */
-#define PCI224_DACCON_FIFOINTR_NHALF (2 << 9) /* when not half full */
-#define PCI224_DACCON_FIFOINTR_HALF (3 << 9) /* when half full */
-#define PCI224_DACCON_FIFOINTR_NFULL (4 << 9) /* when not full */
-#define PCI224_DACCON_FIFOINTR_FULL (5 << 9) /* when full */
+#define PCI224_DACCON_FIFOINTR(x) (((x) & 0x7) << 9)
+#define PCI224_DACCON_FIFOINTR_MASK PCI224_DACCON_FIFOINTR(7)
+#define PCI224_DACCON_FIFOINTR_EMPTY PCI224_DACCON_FIFOINTR(0) /* empty */
+#define PCI224_DACCON_FIFOINTR_NEMPTY PCI224_DACCON_FIFOINTR(1) /* !empty */
+#define PCI224_DACCON_FIFOINTR_NHALF PCI224_DACCON_FIFOINTR(2) /* !half */
+#define PCI224_DACCON_FIFOINTR_HALF PCI224_DACCON_FIFOINTR(3) /* half */
+#define PCI224_DACCON_FIFOINTR_NFULL PCI224_DACCON_FIFOINTR(4) /* !full */
+#define PCI224_DACCON_FIFOINTR_FULL PCI224_DACCON_FIFOINTR(5) /* full */
/* (r-o) FIFO fill level. */
-#define PCI224_DACCON_FIFOFL_MASK (7 << 12)
-#define PCI224_DACCON_FIFOFL_EMPTY (1 << 12) /* 0 */
-#define PCI224_DACCON_FIFOFL_ONETOHALF (0 << 12) /* [1,2048] */
-#define PCI224_DACCON_FIFOFL_HALFTOFULL (4 << 12) /* [2049,4095] */
-#define PCI224_DACCON_FIFOFL_FULL (6 << 12) /* 4096 */
+#define PCI224_DACCON_FIFOFL(x) (((x) & 0x7) << 12)
+#define PCI224_DACCON_FIFOFL_MASK PCI224_DACCON_FIFOFL(7)
+#define PCI224_DACCON_FIFOFL_EMPTY PCI224_DACCON_FIFOFL(1) /* 0 */
+#define PCI224_DACCON_FIFOFL_ONETOHALF PCI224_DACCON_FIFOFL(0) /* 1-2048 */
+#define PCI224_DACCON_FIFOFL_HALFTOFULL PCI224_DACCON_FIFOFL(4) /* 2049-4095 */
+#define PCI224_DACCON_FIFOFL_FULL PCI224_DACCON_FIFOFL(6) /* 4096 */
/* (r-o) DAC busy flag. */
-#define PCI224_DACCON_BUSY (1 << 15)
+#define PCI224_DACCON_BUSY BIT(15)
/* (w-o) FIFO reset. */
-#define PCI224_DACCON_FIFORESET (1 << 12)
+#define PCI224_DACCON_FIFORESET BIT(12)
/* (w-o) Global reset (not sure what it does). */
-#define PCI224_DACCON_GLOBALRESET (1 << 13)
+#define PCI224_DACCON_GLOBALRESET BIT(13)
/*
* DAC FIFO size.
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 907c39cc8..42945de31 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -237,47 +237,50 @@
/*
* DACCON read-write values.
*/
-#define PCI230_DAC_OR_UNI (0 << 0) /* Output range unipolar */
-#define PCI230_DAC_OR_BIP (1 << 0) /* Output range bipolar */
-#define PCI230_DAC_OR_MASK (1 << 0)
+#define PCI230_DAC_OR(x) (((x) & 0x1) << 0)
+#define PCI230_DAC_OR_UNI PCI230_DAC_OR(0) /* Output unipolar */
+#define PCI230_DAC_OR_BIP PCI230_DAC_OR(1) /* Output bipolar */
+#define PCI230_DAC_OR_MASK PCI230_DAC_OR(1)
/*
* The following applies only if DAC FIFO support is enabled in the EXTFUNC
* register (and only for PCI230+ hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_EN (1 << 8) /* FIFO enable */
+#define PCI230P2_DAC_FIFO_EN BIT(8) /* FIFO enable */
/*
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_TRIG_NONE (0 << 2) /* No trigger */
-#define PCI230P2_DAC_TRIG_SW (1 << 2) /* Software trigger trigger */
-#define PCI230P2_DAC_TRIG_EXTP (2 << 2) /* EXTTRIG +ve edge trigger */
-#define PCI230P2_DAC_TRIG_EXTN (3 << 2) /* EXTTRIG -ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT0 (4 << 2) /* CT0-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT1 (5 << 2) /* CT1-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT2 (6 << 2) /* CT2-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_MASK (7 << 2)
-#define PCI230P2_DAC_FIFO_WRAP (1 << 7) /* FIFO wraparound mode */
-#define PCI230P2_DAC_INT_FIFO_EMPTY (0 << 9) /* FIFO interrupt empty */
-#define PCI230P2_DAC_INT_FIFO_NEMPTY (1 << 9)
-#define PCI230P2_DAC_INT_FIFO_NHALF (2 << 9) /* FIFO intr not half full */
-#define PCI230P2_DAC_INT_FIFO_HALF (3 << 9)
-#define PCI230P2_DAC_INT_FIFO_NFULL (4 << 9) /* FIFO interrupt not full */
-#define PCI230P2_DAC_INT_FIFO_FULL (5 << 9)
-#define PCI230P2_DAC_INT_FIFO_MASK (7 << 9)
+#define PCI230P2_DAC_TRIG(x) (((x) & 0x7) << 2)
+#define PCI230P2_DAC_TRIG_NONE PCI230P2_DAC_TRIG(0) /* none */
+#define PCI230P2_DAC_TRIG_SW PCI230P2_DAC_TRIG(1) /* soft trig */
+#define PCI230P2_DAC_TRIG_EXTP PCI230P2_DAC_TRIG(2) /* ext + edge */
+#define PCI230P2_DAC_TRIG_EXTN PCI230P2_DAC_TRIG(3) /* ext - edge */
+#define PCI230P2_DAC_TRIG_Z2CT0 PCI230P2_DAC_TRIG(4) /* Z2 CT0 out */
+#define PCI230P2_DAC_TRIG_Z2CT1 PCI230P2_DAC_TRIG(5) /* Z2 CT1 out */
+#define PCI230P2_DAC_TRIG_Z2CT2 PCI230P2_DAC_TRIG(6) /* Z2 CT2 out */
+#define PCI230P2_DAC_TRIG_MASK PCI230P2_DAC_TRIG(7)
+#define PCI230P2_DAC_FIFO_WRAP BIT(7) /* FIFO wraparound mode */
+#define PCI230P2_DAC_INT_FIFO(x) (((x) & 7) << 9)
+#define PCI230P2_DAC_INT_FIFO_EMPTY PCI230P2_DAC_INT_FIFO(0) /* empty */
+#define PCI230P2_DAC_INT_FIFO_NEMPTY PCI230P2_DAC_INT_FIFO(1) /* !empty */
+#define PCI230P2_DAC_INT_FIFO_NHALF PCI230P2_DAC_INT_FIFO(2) /* !half */
+#define PCI230P2_DAC_INT_FIFO_HALF PCI230P2_DAC_INT_FIFO(3) /* half */
+#define PCI230P2_DAC_INT_FIFO_NFULL PCI230P2_DAC_INT_FIFO(4) /* !full */
+#define PCI230P2_DAC_INT_FIFO_FULL PCI230P2_DAC_INT_FIFO(5) /* full */
+#define PCI230P2_DAC_INT_FIFO_MASK PCI230P2_DAC_INT_FIFO(7)
/*
* DACCON read-only values.
*/
-#define PCI230_DAC_BUSY (1 << 1) /* DAC busy. */
+#define PCI230_DAC_BUSY BIT(1) /* DAC busy. */
/*
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED (1 << 5) /* Underrun error */
-#define PCI230P2_DAC_FIFO_EMPTY (1 << 13) /* FIFO empty */
-#define PCI230P2_DAC_FIFO_FULL (1 << 14) /* FIFO full */
-#define PCI230P2_DAC_FIFO_HALF (1 << 15) /* FIFO half full */
+#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED BIT(5) /* Underrun error */
+#define PCI230P2_DAC_FIFO_EMPTY BIT(13) /* FIFO empty */
+#define PCI230P2_DAC_FIFO_FULL BIT(14) /* FIFO full */
+#define PCI230P2_DAC_FIFO_HALF BIT(15) /* FIFO half full */
/*
* DACCON write-only, transient values.
@@ -286,8 +289,8 @@
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR (1 << 5) /* Clear underrun */
-#define PCI230P2_DAC_FIFO_RESET (1 << 12) /* FIFO reset */
+#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR BIT(5) /* Clear underrun */
+#define PCI230P2_DAC_FIFO_RESET BIT(12) /* FIFO reset */
/*
* PCI230+ hardware version 2 DAC FIFO levels.
@@ -304,44 +307,48 @@
/*
* ADCCON read/write values.
*/
-#define PCI230_ADC_TRIG_NONE (0 << 0) /* No trigger */
-#define PCI230_ADC_TRIG_SW (1 << 0) /* Software trigger trigger */
-#define PCI230_ADC_TRIG_EXTP (2 << 0) /* EXTTRIG +ve edge trigger */
-#define PCI230_ADC_TRIG_EXTN (3 << 0) /* EXTTRIG -ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT0 (4 << 0) /* CT0-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT1 (5 << 0) /* CT1-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT2 (6 << 0) /* CT2-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_MASK (7 << 0)
-#define PCI230_ADC_IR_UNI (0 << 3) /* Input range unipolar */
-#define PCI230_ADC_IR_BIP (1 << 3) /* Input range bipolar */
-#define PCI230_ADC_IR_MASK (1 << 3)
-#define PCI230_ADC_IM_SE (0 << 4) /* Input mode single ended */
-#define PCI230_ADC_IM_DIF (1 << 4) /* Input mode differential */
-#define PCI230_ADC_IM_MASK (1 << 4)
-#define PCI230_ADC_FIFO_EN (1 << 8) /* FIFO enable */
-#define PCI230_ADC_INT_FIFO_EMPTY (0 << 9)
-#define PCI230_ADC_INT_FIFO_NEMPTY (1 << 9) /* FIFO interrupt not empty */
-#define PCI230_ADC_INT_FIFO_NHALF (2 << 9)
-#define PCI230_ADC_INT_FIFO_HALF (3 << 9) /* FIFO interrupt half full */
-#define PCI230_ADC_INT_FIFO_NFULL (4 << 9)
-#define PCI230_ADC_INT_FIFO_FULL (5 << 9) /* FIFO interrupt full */
-#define PCI230P_ADC_INT_FIFO_THRESH (7 << 9) /* FIFO interrupt threshold */
-#define PCI230_ADC_INT_FIFO_MASK (7 << 9)
+#define PCI230_ADC_TRIG(x) (((x) & 0x7) << 0)
+#define PCI230_ADC_TRIG_NONE PCI230_ADC_TRIG(0) /* none */
+#define PCI230_ADC_TRIG_SW PCI230_ADC_TRIG(1) /* soft trig */
+#define PCI230_ADC_TRIG_EXTP PCI230_ADC_TRIG(2) /* ext + edge */
+#define PCI230_ADC_TRIG_EXTN PCI230_ADC_TRIG(3) /* ext - edge */
+#define PCI230_ADC_TRIG_Z2CT0 PCI230_ADC_TRIG(4) /* Z2 CT0 out*/
+#define PCI230_ADC_TRIG_Z2CT1 PCI230_ADC_TRIG(5) /* Z2 CT1 out */
+#define PCI230_ADC_TRIG_Z2CT2 PCI230_ADC_TRIG(6) /* Z2 CT2 out */
+#define PCI230_ADC_TRIG_MASK PCI230_ADC_TRIG(7)
+#define PCI230_ADC_IR(x) (((x) & 0x1) << 3)
+#define PCI230_ADC_IR_UNI PCI230_ADC_IR(0) /* Input unipolar */
+#define PCI230_ADC_IR_BIP PCI230_ADC_IR(1) /* Input bipolar */
+#define PCI230_ADC_IR_MASK PCI230_ADC_IR(1)
+#define PCI230_ADC_IM(x) (((x) & 0x1) << 4)
+#define PCI230_ADC_IM_SE PCI230_ADC_IM(0) /* single ended */
+#define PCI230_ADC_IM_DIF PCI230_ADC_IM(1) /* differential */
+#define PCI230_ADC_IM_MASK PCI230_ADC_IM(1)
+#define PCI230_ADC_FIFO_EN BIT(8) /* FIFO enable */
+#define PCI230_ADC_INT_FIFO(x) (((x) & 0x7) << 9)
+#define PCI230_ADC_INT_FIFO_EMPTY PCI230_ADC_INT_FIFO(0) /* empty */
+#define PCI230_ADC_INT_FIFO_NEMPTY PCI230_ADC_INT_FIFO(1) /* !empty */
+#define PCI230_ADC_INT_FIFO_NHALF PCI230_ADC_INT_FIFO(2) /* !half */
+#define PCI230_ADC_INT_FIFO_HALF PCI230_ADC_INT_FIFO(3) /* half */
+#define PCI230_ADC_INT_FIFO_NFULL PCI230_ADC_INT_FIFO(4) /* !full */
+#define PCI230_ADC_INT_FIFO_FULL PCI230_ADC_INT_FIFO(5) /* full */
+#define PCI230P_ADC_INT_FIFO_THRESH PCI230_ADC_INT_FIFO(7) /* threshold */
+#define PCI230_ADC_INT_FIFO_MASK PCI230_ADC_INT_FIFO(7)
/*
* ADCCON write-only, transient values.
*/
-#define PCI230_ADC_FIFO_RESET (1 << 12) /* FIFO reset */
-#define PCI230_ADC_GLOB_RESET (1 << 13) /* Global reset */
+#define PCI230_ADC_FIFO_RESET BIT(12) /* FIFO reset */
+#define PCI230_ADC_GLOB_RESET BIT(13) /* Global reset */
/*
* ADCCON read-only values.
*/
-#define PCI230_ADC_BUSY (1 << 15) /* ADC busy */
-#define PCI230_ADC_FIFO_EMPTY (1 << 12) /* FIFO empty */
-#define PCI230_ADC_FIFO_FULL (1 << 13) /* FIFO full */
-#define PCI230_ADC_FIFO_HALF (1 << 14) /* FIFO half full */
-#define PCI230_ADC_FIFO_FULL_LATCHED (1 << 5) /* FIFO overrun occurred */
+#define PCI230_ADC_BUSY BIT(15) /* ADC busy */
+#define PCI230_ADC_FIFO_EMPTY BIT(12) /* FIFO empty */
+#define PCI230_ADC_FIFO_FULL BIT(13) /* FIFO full */
+#define PCI230_ADC_FIFO_HALF BIT(14) /* FIFO half full */
+#define PCI230_ADC_FIFO_FULL_LATCHED BIT(5) /* FIFO overrun occurred */
/*
* PCI230 ADC FIFO levels.
@@ -353,10 +360,10 @@
* PCI230+ EXTFUNC values.
*/
/* Route EXTTRIG pin to external gate inputs. */
-#define PCI230P_EXTFUNC_GAT_EXTTRIG (1 << 0)
+#define PCI230P_EXTFUNC_GAT_EXTTRIG BIT(0)
/* PCI230+ hardware version 2 values. */
/* Allow DAC FIFO to be enabled. */
-#define PCI230P2_EXTFUNC_DACFIFO (1 << 1)
+#define PCI230P2_EXTFUNC_DACFIFO BIT(1)
/*
* Counter/timer clock input configuration sources.
@@ -379,8 +386,12 @@
#define GAT_GND 1 /* GND (i.e. disabled) */
#define GAT_EXT 2 /* external gate input (PPCn on PCI230) */
#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
-/* Macro to construct gate input configuration register value. */
-#define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
+
+static inline unsigned int pci230_gat_config(unsigned int chan,
+ unsigned int src)
+{
+ return ((chan & 3) << 3) | (src & 7);
+}
/*
* Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI230 and PCI260:
@@ -398,20 +409,20 @@
* Interrupt enables/status register values.
*/
#define PCI230_INT_DISABLE 0
-#define PCI230_INT_PPI_C0 (1 << 0)
-#define PCI230_INT_PPI_C3 (1 << 1)
-#define PCI230_INT_ADC (1 << 2)
-#define PCI230_INT_ZCLK_CT1 (1 << 5)
+#define PCI230_INT_PPI_C0 BIT(0)
+#define PCI230_INT_PPI_C3 BIT(1)
+#define PCI230_INT_ADC BIT(2)
+#define PCI230_INT_ZCLK_CT1 BIT(5)
/* For PCI230+ hardware version 2 when DAC FIFO enabled. */
-#define PCI230P2_INT_DAC (1 << 4)
+#define PCI230P2_INT_DAC BIT(4)
/*
* (Potentially) shared resources and their owners
*/
enum {
- RES_Z2CT0 = (1U << 0), /* Z2-CT0 */
- RES_Z2CT1 = (1U << 1), /* Z2-CT1 */
- RES_Z2CT2 = (1U << 2) /* Z2-CT2 */
+ RES_Z2CT0 = BIT(0), /* Z2-CT0 */
+ RES_Z2CT1 = BIT(1), /* Z2-CT1 */
+ RES_Z2CT2 = BIT(2) /* Z2-CT2 */
};
enum {
@@ -626,10 +637,10 @@ static void pci230_release_all_resources(struct comedi_device *dev,
pci230_release_shared(dev, (unsigned char)~0, owner);
}
-static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
+static unsigned int pci230_divide_ns(u64 ns, unsigned int timebase,
unsigned int flags)
{
- uint64_t div;
+ u64 div;
unsigned int rem;
div = ns;
@@ -652,7 +663,7 @@ static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
* Given desired period in ns, returns the required internal clock source
* and gets the initial count.
*/
-static unsigned int pci230_choose_clk_count(uint64_t ns, unsigned int *count,
+static unsigned int pci230_choose_clk_count(u64 ns, unsigned int *count,
unsigned int flags)
{
unsigned int clk_src, cnt;
@@ -676,7 +687,7 @@ static void pci230_ns_to_single_timer(unsigned int *ns, unsigned int flags)
}
static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
- unsigned int mode, uint64_t ns,
+ unsigned int mode, u64 ns,
unsigned int flags)
{
unsigned int clk_src;
@@ -1263,7 +1274,8 @@ static void pci230_ao_start(struct comedi_device *dev,
irqflags);
}
/* Set CT1 gate high to start counting. */
- outb(GAT_CONFIG(1, GAT_VCC), dev->iobase + PCI230_ZGAT_SCE);
+ outb(pci230_gat_config(1, GAT_VCC),
+ dev->iobase + PCI230_ZGAT_SCE);
break;
case TRIG_INT:
async->inttrig = pci230_ao_inttrig_scan_begin;
@@ -1351,7 +1363,8 @@ static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* cmd->scan_begin_arg is sampling period in ns.
* Gate it off for now.
*/
- outb(GAT_CONFIG(1, GAT_GND), dev->iobase + PCI230_ZGAT_SCE);
+ outb(pci230_gat_config(1, GAT_GND),
+ dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
cmd->scan_begin_arg,
cmd->flags);
@@ -1792,9 +1805,9 @@ static int pci230_ai_inttrig_scan_begin(struct comedi_device *dev,
spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
if (devpriv->ai_cmd_started) {
/* Trigger scan by waggling CT0 gate source. */
- zgat = GAT_CONFIG(0, GAT_GND);
+ zgat = pci230_gat_config(0, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
}
spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
@@ -1926,20 +1939,20 @@ static void pci230_ai_start(struct comedi_device *dev,
* Conversion timer CT2 needs to be gated by
* inverted output of monostable CT2.
*/
- zgat = GAT_CONFIG(2, GAT_NOUTNM2);
+ zgat = pci230_gat_config(2, GAT_NOUTNM2);
} else {
/*
* Conversion timer CT2 needs to be gated on
* continuously.
*/
- zgat = GAT_CONFIG(2, GAT_VCC);
+ zgat = pci230_gat_config(2, GAT_VCC);
}
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/* Set monostable CT0 trigger source. */
switch (cmd->scan_begin_src) {
default:
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
break;
case TRIG_EXT:
/*
@@ -1950,21 +1963,21 @@ static void pci230_ai_start(struct comedi_device *dev,
* input in order to use it as an external scan
* trigger.
*/
- zgat = GAT_CONFIG(0, GAT_EXT);
+ zgat = pci230_gat_config(0, GAT_EXT);
break;
case TRIG_TIMER:
/*
* Monostable CT0 triggered by rising edge on
* inverted output of CT1 (falling edge on CT1).
*/
- zgat = GAT_CONFIG(0, GAT_NOUTNM2);
+ zgat = pci230_gat_config(0, GAT_NOUTNM2);
break;
case TRIG_INT:
/*
* Monostable CT0 is triggered by inttrig
* function waggling the CT0 gate source.
*/
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
break;
}
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
@@ -1974,7 +1987,7 @@ static void pci230_ai_start(struct comedi_device *dev,
* Scan period timer CT1 needs to be
* gated on to start counting.
*/
- zgat = GAT_CONFIG(1, GAT_VCC);
+ zgat = pci230_gat_config(1, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
break;
case TRIG_INT:
@@ -2216,7 +2229,7 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* Note, counter/timer output 2 can be monitored on the
* connector: PCI230 pin 21, PCI260 pin 18.
*/
- zgat = GAT_CONFIG(2, GAT_GND);
+ zgat = pci230_gat_config(2, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
/* Set counter/timer 2 to the specified conversion period. */
pci230_ct_setup_ns_mode(dev, 2, I8254_MODE3, cmd->convert_arg,
@@ -2234,10 +2247,10 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* monostable to stop it triggering. The trigger
* source will be changed later.
*/
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 0, I8254_MODE1,
- ((uint64_t)cmd->convert_arg *
+ ((u64)cmd->convert_arg *
cmd->scan_end_arg),
CMDF_ROUND_UP);
if (cmd->scan_begin_src == TRIG_TIMER) {
@@ -2247,7 +2260,7 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
*
* Set up CT1 but gate it off for now.
*/
- zgat = GAT_CONFIG(1, GAT_GND);
+ zgat = pci230_gat_config(1, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
cmd->scan_begin_arg,
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index b6768aa90..8d4069bc5 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -1,49 +1,53 @@
/*
- comedi/drivers/amplc_pci263.c
- Driver for Amplicon PCI263 relay board.
+ * Driver for Amplicon PCI263 relay board.
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: amplc_pci263
-Description: Amplicon PCI263
-Author: Ian Abbott <abbotti@mev.co.uk>
-Devices: [Amplicon] PCI263 (amplc_pci263)
-Updated: Fri, 12 Apr 2013 15:19:36 +0100
-Status: works
-
-Configuration options: not applicable, uses PCI auto config
-
-The board appears as one subdevice, with 16 digital outputs, each
-connected to a reed-relay. Relay contacts are closed when output is 1.
-The state of the outputs can be read.
-*/
+ * Driver: amplc_pci263
+ * Description: Amplicon PCI263
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Devices: [Amplicon] PCI263 (amplc_pci263)
+ * Updated: Fri, 12 Apr 2013 15:19:36 +0100
+ * Status: works
+ *
+ * Configuration options: not applicable, uses PCI auto config
+ *
+ * The board appears as one subdevice, with 16 digital outputs, each
+ * connected to a reed-relay. Relay contacts are closed when output is 1.
+ * The state of the outputs can be read.
+ */
#include <linux/module.h>
#include "../comedi_pci.h"
+/* PCI263 registers */
+#define PCI263_DO_0_7_REG 0x00
+#define PCI263_DO_8_15_REG 0x01
+
static int pci263_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase);
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
+ outb(s->state & 0xff, dev->iobase + PCI263_DO_0_7_REG);
+ outb((s->state >> 8) & 0xff, dev->iobase + PCI263_DO_8_15_REG);
}
data[1] = s->state;
@@ -67,16 +71,18 @@ static int pci263_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
+ /* Digital Output subdevice */
s = &dev->subdevices[0];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci263_do_insn_bits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pci263_do_insn_bits;
+
/* read initial relay state */
- s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
+ s->state = inb(dev->iobase + PCI263_DO_0_7_REG) |
+ (inb(dev->iobase + PCI263_DO_8_15_REG) << 8);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 1a109e30d..8ee732571 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -47,8 +47,8 @@
*/
#define C6XDIGIO_DATA_REG 0x00
#define C6XDIGIO_DATA_CHAN(x) (((x) + 1) << 4)
-#define C6XDIGIO_DATA_PWM (1 << 5)
-#define C6XDIGIO_DATA_ENCODER (1 << 6)
+#define C6XDIGIO_DATA_PWM BIT(5)
+#define C6XDIGIO_DATA_ENCODER BIT(6)
#define C6XDIGIO_STATUS_REG 0x01
#define C6XDIGIO_CTRL_REG 0x02
diff --git a/drivers/staging/comedi/drivers/comedi_8254.h b/drivers/staging/comedi/drivers/comedi_8254.h
index f4610ead6..a12c29455 100644
--- a/drivers/staging/comedi/drivers/comedi_8254.h
+++ b/drivers/staging/comedi/drivers/comedi_8254.h
@@ -53,13 +53,15 @@ struct comedi_subdevice;
#define I8254_COUNTER2_REG 0x02
#define I8254_CTRL_REG 0x03
#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
-#define I8254_CTRL_READBACK_COUNT ((3 << 6) | (1 << 4))
-#define I8254_CTRL_READBACK_STATUS ((3 << 6) | (1 << 5))
+#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
+#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
+#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
-#define I8254_CTRL_LATCH (0 << 4)
-#define I8254_CTRL_LSB_ONLY (1 << 4)
-#define I8254_CTRL_MSB_ONLY (2 << 4)
-#define I8254_CTRL_LSB_MSB (3 << 4)
+#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
+#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
+#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
+#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
+#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
/* counter maps zero to 0x10000 */
#define I8254_MAX_COUNT 0x10000
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 87d86130d..2cd5aa687 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -26,7 +26,7 @@
* Much of the functionality of this driver was determined from reading
* the source code for the Windows driver.
*
- * The FPGA on the board requires fimware, which is available from
+ * The FPGA on the board requires firmware, which is available from
* http://www.comedi.org in the comedi_nonfree_firmware tarball.
*
* Configuration options: not applicable, uses PCI auto config
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 3be10963f..e0a34c268 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1,98 +1,82 @@
/*
- comedi/drivers/das1800.c
- Driver for Keitley das1700/das1800 series boards
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: das1800
-Description: Keithley Metrabyte DAS1800 (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
- DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
- DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
- DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
- DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
- DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
- DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
- DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
- DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
- DAS-1802AO (das-1802ao)
-Status: works
-
-The waveform analog output on the 'ao' cards is not supported.
-If you need it, send me (Frank Hess) an email.
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed or externally triggered conversions)
- [2] - DMA0 (optional, requires irq)
- [3] - DMA1 (optional, requires irq and dma0)
-*/
-/*
+ * Comedi driver for Keithley DAS-1700/DAS-1800 series boards
+ * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-This driver supports the following Keithley boards:
-
-das-1701st
-das-1701st-da
-das-1701ao
-das-1702st
-das-1702st-da
-das-1702hr
-das-1702hr-da
-das-1702ao
-das-1801st
-das-1801st-da
-das-1801hc
-das-1801ao
-das-1802st
-das-1802st-da
-das-1802hr
-das-1802hr-da
-das-1802hc
-das-1802ao
-
-Options:
- [0] - base io address
- [1] - irq (optional, required for timed or externally triggered conversions)
- [2] - dma0 (optional, requires irq)
- [3] - dma1 (optional, requires irq and dma0)
-
-irq can be omitted, although the cmd interface will not work without it.
-
-analog input cmd triggers supported:
- start_src: TRIG_NOW | TRIG_EXT
- scan_begin_src: TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT
- scan_end_src: TRIG_COUNT
- convert_src: TRIG_TIMER | TRIG_EXT (TRIG_EXT requires scan_begin_src == TRIG_FOLLOW)
- stop_src: TRIG_COUNT | TRIG_EXT | TRIG_NONE
-
-scan_begin_src triggers TRIG_TIMER and TRIG_EXT use the card's
-'burst mode' which limits the valid conversion time to 64 microseconds
-(convert_arg <= 64000). This limitation does not apply if scan_begin_src
-is TRIG_FOLLOW.
-
-NOTES:
-Only the DAS-1801ST has been tested by me.
-Unipolar and bipolar ranges cannot be mixed in the channel/gain list.
-
-TODO:
- Make it automatically allocate irq and dma channels if they are not specified
- Add support for analog out on 'ao' cards
- read insn for analog out
-*/
+/*
+ * Driver: das1800
+ * Description: Keithley Metrabyte DAS1800 (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
+ * DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
+ * DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
+ * DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
+ * DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
+ * DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
+ * DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
+ * DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
+ * DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
+ * DAS-1802AO (das-1802ao)
+ * Status: works
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional, required for analog input cmd support)
+ * [2] - DMA0 (optional, requires irq)
+ * [3] - DMA1 (optional, requires irq and dma0)
+ *
+ * analog input cmd triggers supported:
+ *
+ * start_src TRIG_NOW command starts immediately
+ * TRIG_EXT command starts on external pin TGIN
+ *
+ * scan_begin_src TRIG_FOLLOW paced/external scans start immediately
+ * TRIG_TIMER burst scans start periodically
+ * TRIG_EXT burst scans start on external pin XPCLK
+ *
+ * scan_end_src TRIG_COUNT scan ends after last channel
+ *
+ * convert_src TRIG_TIMER paced/burst conversions are timed
+ * TRIG_EXT conversions on external pin XPCLK
+ * (requires scan_begin_src == TRIG_FOLLOW)
+ *
+ * stop_src TRIG_COUNT command stops after stop_arg scans
+ * TRIG_EXT command stops on external pin TGIN
+ * TRIG_NONE command runs until canceled
+ *
+ * If TRIG_EXT is used for both the start_src and stop_src, the first TGIN
+ * trigger starts the command, and the second trigger will stop it. If only
+ * one is TRIG_EXT, the first trigger will either stop or start the command.
+ * The external pin TGIN is normally set for negative edge triggering. It
+ * can be set to positive edge with the CR_INVERT flag. If TRIG_EXT is used
+ * for both the start_src and stop_src they must have the same polarity.
+ *
+ * Minimum conversion speed is limited to 64 microseconds (convert_arg <= 64000)
+ * for 'burst' scans. This limitation does not apply for 'paced' scans. The
+ * maximum conversion speed is limited by the board (convert_arg >= ai_speed).
+ * Maximum conversion speeds are not always achievable depending on the
+ * board setup (see user manual).
+ *
+ * NOTES:
+ * Only the DAS-1801ST has been tested by me.
+ * Unipolar and bipolar ranges cannot be mixed in the channel/gain list.
+ *
+ * The waveform analog output on the 'ao' cards is not supported.
+ * If you need it, send me (Frank Hess) an email.
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -107,7 +91,6 @@ TODO:
/* misc. defines */
#define DAS1800_SIZE 16 /* uses 16 io addresses */
#define FIFO_SIZE 1024 /* 1024 sample fifo */
-#define UNIPOLAR 0x4 /* bit that determines whether input range is uni/bipolar */
#define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */
/* Registers for the das1800 */
@@ -125,6 +108,7 @@ TODO:
#define CGSL 0x8
#define TGEN 0x10
#define TGSL 0x20
+#define TGPL 0x40
#define ATEN 0x80
#define DAS1800_CONTROL_B 0x5
#define DMA_CH5 0x1
@@ -133,7 +117,7 @@ TODO:
#define DMA_CH5_CH6 0x5
#define DMA_CH6_CH7 0x6
#define DMA_CH7_CH5 0x7
-#define DMA_ENABLED 0x3 /* mask used to determine if dma is enabled */
+#define DMA_ENABLED 0x3
#define DMA_DUAL 0x4
#define IRQ3 0x8
#define IRQ5 0x10
@@ -151,319 +135,214 @@ TODO:
#define SD 0x40
#define UB 0x80
#define DAS1800_STATUS 0x7
-/* bits that prevent interrupt status bits (and CVEN) from being cleared on write */
-#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
#define INT 0x1
#define DMATC 0x2
#define CT0TC 0x8
#define OVF 0x10
#define FHF 0x20
#define FNE 0x40
-#define CVEN_MASK 0x40 /* masks CVEN on write */
#define CVEN 0x80
+#define CVEN_MASK 0x40
+#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
#define DAS1800_BURST_LENGTH 0x8
#define DAS1800_BURST_RATE 0x9
#define DAS1800_QRAM_ADDRESS 0xa
#define DAS1800_COUNTER 0xc
-#define IOBASE2 0x400 /* offset of additional ioports used on 'ao' cards */
+#define IOBASE2 0x400
-enum {
- das1701st, das1701st_da, das1702st, das1702st_da, das1702hr,
- das1702hr_da,
- das1701ao, das1702ao, das1801st, das1801st_da, das1802st, das1802st_da,
- das1802hr, das1802hr_da, das1801hc, das1802hc, das1801ao, das1802ao
-};
-
-/* analog input ranges */
-static const struct comedi_lrange range_ai_das1801 = {
+static const struct comedi_lrange das1801_ai_range = {
8, {
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02),
- UNI_RANGE(5),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.02)
+ BIP_RANGE(5), /* bipolar gain = 1 */
+ BIP_RANGE(1), /* bipolar gain = 10 */
+ BIP_RANGE(0.1), /* bipolar gain = 50 */
+ BIP_RANGE(0.02), /* bipolar gain = 250 */
+ UNI_RANGE(5), /* unipolar gain = 1 */
+ UNI_RANGE(1), /* unipolar gain = 10 */
+ UNI_RANGE(0.1), /* unipolar gain = 50 */
+ UNI_RANGE(0.02) /* unipolar gain = 250 */
}
};
-static const struct comedi_lrange range_ai_das1802 = {
+static const struct comedi_lrange das1802_ai_range = {
8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
+ BIP_RANGE(10), /* bipolar gain = 1 */
+ BIP_RANGE(5), /* bipolar gain = 2 */
+ BIP_RANGE(2.5), /* bipolar gain = 4 */
+ BIP_RANGE(1.25), /* bipolar gain = 8 */
+ UNI_RANGE(10), /* unipolar gain = 1 */
+ UNI_RANGE(5), /* unipolar gain = 2 */
+ UNI_RANGE(2.5), /* unipolar gain = 4 */
+ UNI_RANGE(1.25) /* unipolar gain = 8 */
}
};
+/*
+ * The waveform analog outputs on the 'ao' boards are not currently
+ * supported. They have a comedi_lrange of:
+ * { 2, { BIP_RANGE(10), BIP_RANGE(5) } }
+ */
+
+enum das1800_boardid {
+ BOARD_DAS1701ST,
+ BOARD_DAS1701ST_DA,
+ BOARD_DAS1702ST,
+ BOARD_DAS1702ST_DA,
+ BOARD_DAS1702HR,
+ BOARD_DAS1702HR_DA,
+ BOARD_DAS1701AO,
+ BOARD_DAS1702AO,
+ BOARD_DAS1801ST,
+ BOARD_DAS1801ST_DA,
+ BOARD_DAS1802ST,
+ BOARD_DAS1802ST_DA,
+ BOARD_DAS1802HR,
+ BOARD_DAS1802HR_DA,
+ BOARD_DAS1801HC,
+ BOARD_DAS1802HC,
+ BOARD_DAS1801AO,
+ BOARD_DAS1802AO
+};
+
+/* board probe id values (hi byte of the digital input register) */
+#define DAS1800_ID_ST_DA 0x3
+#define DAS1800_ID_HR_DA 0x4
+#define DAS1800_ID_AO 0x5
+#define DAS1800_ID_HR 0x6
+#define DAS1800_ID_ST 0x7
+#define DAS1800_ID_HC 0x8
+
struct das1800_board {
const char *name;
- int ai_speed; /* max conversion period in nanoseconds */
- int resolution; /* bits of ai resolution */
- int qram_len; /* length of card's channel / gain queue */
- int common; /* supports AREF_COMMON flag */
- int do_n_chan; /* number of digital output channels */
- int ao_ability; /* 0 == no analog out, 1 == basic analog out, 2 == waveform analog out */
- int ao_n_chan; /* number of analog out channels */
- const struct comedi_lrange *range_ai; /* available input ranges */
+ unsigned char id;
+ unsigned int ai_speed;
+ unsigned int is_01_series:1;
};
-/* Warning: the maximum conversion speeds listed below are
- * not always achievable depending on board setup (see
- * user manual.)
- */
static const struct das1800_board das1800_boards[] = {
- {
- .name = "das-1701st",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1701st-da",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1702st",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702st-da",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702hr",
- .ai_speed = 20000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702hr-da",
- .ai_speed = 20000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1701ao",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1702ao",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801st",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1801st-da",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802st",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802st-da",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802hr",
- .ai_speed = 10000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802hr-da",
- .ai_speed = 10000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801hc",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 64,
- .common = 0,
- .do_n_chan = 8,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802hc",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 64,
- .common = 0,
- .do_n_chan = 8,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801ao",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802ao",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
+ [BOARD_DAS1701ST] = {
+ .name = "das-1701st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1701ST_DA] = {
+ .name = "das-1701st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1702ST] = {
+ .name = "das-1702st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1702ST_DA] = {
+ .name = "das-1702st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1702HR] = {
+ .name = "das-1702hr",
+ .id = DAS1800_ID_HR,
+ .ai_speed = 20000,
+ },
+ [BOARD_DAS1702HR_DA] = {
+ .name = "das-1702hr-da",
+ .id = DAS1800_ID_HR_DA,
+ .ai_speed = 20000,
+ },
+ [BOARD_DAS1701AO] = {
+ .name = "das-1701ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1702AO] = {
+ .name = "das-1702ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1801ST] = {
+ .name = "das-1801st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1801ST_DA] = {
+ .name = "das-1801st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802ST] = {
+ .name = "das-1802st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1802ST_DA] = {
+ .name = "das-1802st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1802HR] = {
+ .name = "das-1802hr",
+ .id = DAS1800_ID_HR,
+ .ai_speed = 10000,
+ },
+ [BOARD_DAS1802HR_DA] = {
+ .name = "das-1802hr-da",
+ .id = DAS1800_ID_HR_DA,
+ .ai_speed = 10000,
+ },
+ [BOARD_DAS1801HC] = {
+ .name = "das-1801hc",
+ .id = DAS1800_ID_HC,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802HC] = {
+ .name = "das-1802hc",
+ .id = DAS1800_ID_HC,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1801AO] = {
+ .name = "das-1801ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802AO] = {
+ .name = "das-1802ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 3000,
+ },
};
struct das1800_private {
struct comedi_isadma *dma;
- int irq_dma_bits; /* bits for control register b */
- /* dma bits for control register b, stored so that dma can be
- * turned on and off */
+ int irq_dma_bits;
int dma_bits;
- uint16_t *fifo_buf; /* bounce buffer for analog input FIFO */
- unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */
- unsigned short ao_update_bits; /* remembers the last write to the
- * 'update' dac */
-};
-
-/* analog out range for 'ao' boards */
-/*
-static const struct comedi_lrange range_ao_2 = {
- 2, {
- BIP_RANGE(10),
- BIP_RANGE(5)
- }
+ unsigned short *fifo_buf;
+ unsigned long iobase2;
+ bool ai_is_unipolar;
};
-*/
-
-static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev,
- uint16_t sample)
-{
- const struct das1800_board *board = dev->board_ptr;
-
- sample += 1 << (board->resolution - 1);
- return sample;
-}
-static void munge_data(struct comedi_device *dev, uint16_t *array,
- unsigned int num_elements)
+static void das1800_ai_munge(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index)
{
+ struct das1800_private *devpriv = dev->private;
+ unsigned short *array = data;
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
unsigned int i;
- int unipolar;
- /* see if card is using a unipolar or bipolar range so we can munge data correctly */
- unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
+ if (devpriv->ai_is_unipolar)
+ return;
- /* convert to unsigned type if we are in a bipolar mode */
- if (!unipolar) {
- for (i = 0; i < num_elements; i++)
- array[i] = munge_bipolar_sample(dev, array[i]);
- }
+ for (i = 0; i < num_samples; i++)
+ array[i] = comedi_offset_munge(s, array[i]);
}
static void das1800_handle_fifo_half_full(struct comedi_device *dev,
@@ -473,7 +352,6 @@ static void das1800_handle_fifo_half_full(struct comedi_device *dev,
unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2);
insw(dev->iobase + DAS1800_FIFO, devpriv->fifo_buf, nsamples);
- munge_data(dev, devpriv->fifo_buf, nsamples);
comedi_buf_write_samples(s, devpriv->fifo_buf, nsamples);
}
@@ -482,14 +360,9 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned short dpnt;
- int unipolar;
-
- unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
while (inb(dev->iobase + DAS1800_STATUS) & FNE) {
dpnt = inw(dev->iobase + DAS1800_FIFO);
- /* convert to unsigned type */
- dpnt = munge_bipolar_sample(dev, dpnt);
comedi_buf_write_samples(s, &dpnt, 1);
if (cmd->stop_src == TRIG_COUNT &&
@@ -498,7 +371,6 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
}
}
-/* Utility function used by das1800_flush_dma() and das1800_handle_dma() */
static void das1800_flush_dma_channel(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_isadma_desc *desc)
@@ -511,12 +383,9 @@ static void das1800_flush_dma_channel(struct comedi_device *dev,
nsamples = comedi_bytes_to_samples(s, nbytes);
nsamples = comedi_nsamples_left(s, nsamples);
- munge_data(dev, desc->virt_addr, nsamples);
comedi_buf_write_samples(s, desc->virt_addr, nsamples);
}
-/* flushes remaining data from board when external trigger has stopped acquisition
- * and we are using dma transfers */
static void das1800_flush_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
@@ -560,7 +429,8 @@ static void das1800_handle_dma(struct comedi_device *dev,
}
}
-static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das1800_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
struct comedi_isadma *dma = devpriv->dma;
@@ -583,7 +453,6 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-/* the guts of the interrupt handler, that is shared with das1800_ai_poll */
static void das1800_ai_handler(struct comedi_device *dev)
{
struct das1800_private *devpriv = dev->private;
@@ -592,17 +461,16 @@ static void das1800_ai_handler(struct comedi_device *dev)
struct comedi_cmd *cmd = &async->cmd;
unsigned int status = inb(dev->iobase + DAS1800_STATUS);
- /* select adc for base address + 0 */
+ /* select adc register (spinlock is already held) */
outb(ADC, dev->iobase + DAS1800_SELECT);
- /* dma buffer full */
- if (devpriv->irq_dma_bits & DMA_ENABLED) {
- /* look for data from dma transfer even if dma terminal count hasn't happened yet */
+
+ /* get samples with dma, fifo, or polled as necessary */
+ if (devpriv->irq_dma_bits & DMA_ENABLED)
das1800_handle_dma(dev, s, status);
- } else if (status & FHF) { /* if fifo half full */
+ else if (status & FHF)
das1800_handle_fifo_half_full(dev, s);
- } else if (status & FNE) { /* if fifo not empty */
+ else if (status & FNE)
das1800_handle_fifo_not_empty(dev, s);
- }
/* if the card's fifo has overflowed */
if (status & OVF) {
@@ -618,7 +486,7 @@ static void das1800_ai_handler(struct comedi_device *dev)
if (status & CT0TC) {
/* clear CT0TC interrupt bit */
outb(CLEAR_INTR_MASK & ~CT0TC, dev->iobase + DAS1800_STATUS);
- /* make sure we get all remaining data from board before quitting */
+ /* get all remaining samples before quitting */
if (devpriv->irq_dma_bits & DMA_ENABLED)
das1800_flush_dma(dev, s);
else
@@ -637,9 +505,14 @@ static int das1800_ai_poll(struct comedi_device *dev,
{
unsigned long flags;
- /* prevent race with interrupt handler */
+ /*
+ * Protects the indirect addressing selected by DAS1800_SELECT
+ * in das1800_ai_handler() also prevents race with das1800_interrupt().
+ */
spin_lock_irqsave(&dev->spinlock, flags);
+
das1800_ai_handler(dev);
+
spin_unlock_irqrestore(&dev->spinlock, flags);
return comedi_buf_n_bytes_ready(s);
@@ -655,9 +528,12 @@ static irqreturn_t das1800_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
- /* Prevent race with das1800_ai_poll() on multi processor systems.
- * Also protects indirect addressing in das1800_ai_handler */
+ /*
+ * Protects the indirect addressing selected by DAS1800_SELECT
+ * in das1800_ai_handler() also prevents race with das1800_ai_poll().
+ */
spin_lock(&dev->spinlock);
+
status = inb(dev->iobase + DAS1800_STATUS);
/* if interrupt was not caused by das-1800 */
@@ -674,46 +550,87 @@ static irqreturn_t das1800_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
-/* converts requested conversion timing to timing compatible with
- * hardware, used only when card is in 'burst mode'
- */
-static unsigned int burst_convert_arg(unsigned int convert_arg, int flags)
+static int das1800_ai_fixup_paced_timing(struct comedi_device *dev,
+ struct comedi_cmd *cmd)
{
- unsigned int micro_sec;
+ unsigned int arg = cmd->convert_arg;
+
+ /*
+ * Paced mode:
+ * scan_begin_src is TRIG_FOLLOW
+ * convert_src is TRIG_TIMER
+ *
+ * The convert_arg sets the pacer sample acquisition time.
+ * The max acquisition speed is limited to the boards
+ * 'ai_speed' (this was already verified). The min speed is
+ * limited by the cascaded 8254 timer.
+ */
+ comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
+ return comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+}
- /* in burst mode, the maximum conversion time is 64 microseconds */
- if (convert_arg > 64000)
- convert_arg = 64000;
+static int das1800_ai_fixup_burst_timing(struct comedi_device *dev,
+ struct comedi_cmd *cmd)
+{
+ unsigned int arg = cmd->convert_arg;
+ int err = 0;
- /* the conversion time must be an integral number of microseconds */
- switch (flags & CMDF_ROUND_MASK) {
+ /*
+ * Burst mode:
+ * scan_begin_src is TRIG_TIMER or TRIG_EXT
+ * convert_src is TRIG_TIMER
+ *
+ * The convert_arg sets burst sample acquisition time.
+ * The max acquisition speed is limited to the boards
+ * 'ai_speed' (this was already verified). The min speed is
+ * limiited to 64 microseconds,
+ */
+ err |= comedi_check_trigger_arg_max(&arg, 64000);
+
+ /* round to microseconds then verify */
+ switch (cmd->flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- micro_sec = (convert_arg + 500) / 1000;
+ arg = DIV_ROUND_CLOSEST(arg, 1000);
break;
case CMDF_ROUND_DOWN:
- micro_sec = convert_arg / 1000;
+ arg = arg / 1000;
break;
case CMDF_ROUND_UP:
- micro_sec = (convert_arg - 1) / 1000 + 1;
+ arg = DIV_ROUND_UP(arg, 1000);
break;
}
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg * 1000);
+
+ /*
+ * The pacer can be used to set the scan sample rate. The max scan
+ * speed is limited by the conversion speed and the number of channels
+ * to convert. The min speed is limited by the cascaded 8254 timer.
+ */
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ arg = cmd->convert_arg * cmd->chanlist_len;
+ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
+
+ arg = cmd->scan_begin_arg;
+ comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
+ }
- /* return number of nanoseconds */
- return micro_sec * 1000;
+ return err;
}
static int das1800_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- unsigned int unipolar0 = CR_RANGE(cmd->chanlist[0]) & UNIPOLAR;
+ unsigned int range = CR_RANGE(cmd->chanlist[0]);
+ bool unipolar0 = comedi_range_is_unipolar(s, range);
int i;
for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int unipolar = CR_RANGE(cmd->chanlist[i]) & UNIPOLAR;
+ range = CR_RANGE(cmd->chanlist[i]);
- if (unipolar != unipolar0) {
+ if (unipolar0 != comedi_range_is_unipolar(s, range)) {
dev_dbg(dev->class_dev,
"unipolar and bipolar ranges cannot be mixed in the chanlist\n");
return -EINVAL;
@@ -723,14 +640,12 @@ static int das1800_ai_check_chanlist(struct comedi_device *dev,
return 0;
}
-/* test analog input cmd */
-static int das1800_ai_do_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int das1800_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
const struct das1800_board *board = dev->board_ptr;
int err = 0;
- unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
@@ -755,16 +670,23 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
/* Step 2b : and mutually compatible */
+ /* burst scans must use timed conversions */
if (cmd->scan_begin_src != TRIG_FOLLOW &&
cmd->convert_src != TRIG_TIMER)
err |= -EINVAL;
+ /* the external pin TGIN must use the same polarity */
+ if (cmd->start_src == TRIG_EXT && cmd->stop_src == TRIG_EXT)
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg,
+ cmd->stop_arg);
+
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
+ if (cmd->start_arg == TRIG_NOW)
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->convert_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
@@ -789,31 +711,13 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
if (err)
return 3;
- /* step 4: fix up any arguments */
+ /* Step 4: fix up any arguments */
- if (cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER) {
- /* we are not in burst mode */
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- } else if (cmd->convert_src == TRIG_TIMER) {
- /* we are in burst mode */
- arg = burst_convert_arg(cmd->convert_arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->chanlist_len;
- err |= comedi_check_trigger_arg_max(&cmd->
- scan_begin_arg,
- arg);
-
- arg = cmd->scan_begin_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg,
- cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg,
- arg);
- }
+ if (cmd->convert_src == TRIG_TIMER) {
+ if (cmd->scan_begin_src == TRIG_FOLLOW)
+ err |= das1800_ai_fixup_paced_timing(dev, cmd);
+ else /* TRIG_TIMER or TRIG_EXT */
+ err |= das1800_ai_fixup_burst_timing(dev, cmd);
}
if (err)
@@ -829,74 +733,22 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
return 0;
}
-/* returns appropriate bits for control register a, depending on command */
-static int control_a_bits(const struct comedi_cmd *cmd)
+static unsigned char das1800_ai_chanspec_bits(struct comedi_subdevice *s,
+ unsigned int chanspec)
{
- int control_a;
-
- control_a = FFEN; /* enable fifo */
- if (cmd->stop_src == TRIG_EXT)
- control_a |= ATEN;
- switch (cmd->start_src) {
- case TRIG_EXT:
- control_a |= TGEN | CGSL;
- break;
- case TRIG_NOW:
- control_a |= CGEN;
- break;
- default:
- break;
- }
-
- return control_a;
-}
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+ unsigned char bits;
-/* returns appropriate bits for control register c, depending on command */
-static int control_c_bits(const struct comedi_cmd *cmd)
-{
- int control_c;
- int aref;
-
- /* set clock source to internal or external, select analog reference,
- * select unipolar / bipolar
- */
- aref = CR_AREF(cmd->chanlist[0]);
- control_c = UQEN; /* enable upper qram addresses */
+ bits = UQEN;
if (aref != AREF_DIFF)
- control_c |= SD;
+ bits |= SD;
if (aref == AREF_COMMON)
- control_c |= CMEN;
- /* if a unipolar range was selected */
- if (CR_RANGE(cmd->chanlist[0]) & UNIPOLAR)
- control_c |= UB;
- switch (cmd->scan_begin_src) {
- case TRIG_FOLLOW: /* not in burst mode */
- switch (cmd->convert_src) {
- case TRIG_TIMER:
- /* trig on cascaded counters */
- control_c |= IPCLK;
- break;
- case TRIG_EXT:
- /* trig on falling edge of external trigger */
- control_c |= XPCLK;
- break;
- default:
- break;
- }
- break;
- case TRIG_TIMER:
- /* burst mode with internal pacer clock */
- control_c |= BMDE | IPCLK;
- break;
- case TRIG_EXT:
- /* burst mode with external trigger */
- control_c |= BMDE | XPCLK;
- break;
- default:
- break;
- }
+ bits |= CMEN;
+ if (comedi_range_is_unipolar(s, range))
+ bits |= UB;
- return control_c;
+ return bits;
}
static unsigned int das1800_ai_transfer_size(struct comedi_device *dev,
@@ -960,43 +812,48 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
}
}
-/* programs channel/gain list into card */
-static void program_chanlist(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
+static void das1800_ai_set_chanlist(struct comedi_device *dev,
+ unsigned int *chanlist, unsigned int len)
{
- int i, n, chan_range;
- unsigned long irq_flags;
- const int range_mask = 0x3; /* masks unipolar/bipolar bit off range */
- const int range_bitshift = 8;
-
- n = cmd->chanlist_len;
- /* spinlock protects indirect addressing */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */
- outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*set QRAM address start */
+ unsigned long flags;
+ unsigned int i;
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ /* select QRAM register and set start address */
+ outb(QRAM, dev->iobase + DAS1800_SELECT);
+ outb(len - 1, dev->iobase + DAS1800_QRAM_ADDRESS);
+
/* make channel / gain list */
- for (i = 0; i < n; i++) {
- chan_range =
- CR_CHAN(cmd->chanlist[i]) |
- ((CR_RANGE(cmd->chanlist[i]) & range_mask) <<
- range_bitshift);
- outw(chan_range, dev->iobase + DAS1800_QRAM);
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(chanlist[i]);
+ unsigned int range = CR_RANGE(chanlist[i]);
+ unsigned short val;
+
+ val = chan | ((range & 0x3) << 8);
+ outw(val, dev->iobase + DAS1800_QRAM);
}
- outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+
+ /* finish write to QRAM */
+ outb(len - 1, dev->iobase + DAS1800_QRAM_ADDRESS);
+
+ spin_unlock_irqrestore(&dev->spinlock, flags);
}
-/* analog input do_cmd */
-static int das1800_ai_do_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int das1800_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
int control_a, control_c;
struct comedi_async *async = s->async;
const struct comedi_cmd *cmd = &async->cmd;
+ unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- /* disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY
- * (because dma in handler is unsafe at hard real-time priority) */
+ /*
+ * Disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY (because dma in
+ * handler is unsafe at hard real-time priority).
+ */
if (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY))
devpriv->irq_dma_bits &= ~DMA_ENABLED;
else
@@ -1010,14 +867,42 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
devpriv->irq_dma_bits |= FIMD;
}
- das1800_cancel(dev, s);
+ das1800_ai_cancel(dev, s);
- /* determine proper bits for control registers */
- control_a = control_a_bits(cmd);
- control_c = control_c_bits(cmd);
+ devpriv->ai_is_unipolar = comedi_range_is_unipolar(s, range0);
+
+ control_a = FFEN;
+ if (cmd->stop_src == TRIG_EXT)
+ control_a |= ATEN;
+ if (cmd->start_src == TRIG_EXT)
+ control_a |= TGEN | CGSL;
+ else /* TRIG_NOW */
+ control_a |= CGEN;
+ if (control_a & (ATEN | TGEN)) {
+ if ((cmd->start_arg & CR_INVERT) || (cmd->stop_arg & CR_INVERT))
+ control_a |= TGPL;
+ }
+
+ control_c = das1800_ai_chanspec_bits(s, cmd->chanlist[0]);
+ /* set clock source to internal or external */
+ if (cmd->scan_begin_src == TRIG_FOLLOW) {
+ /* not in burst mode */
+ if (cmd->convert_src == TRIG_TIMER) {
+ /* trig on cascaded counters */
+ control_c |= IPCLK;
+ } else { /* TRIG_EXT */
+ /* trig on falling edge of external trigger */
+ control_c |= XPCLK;
+ }
+ } else if (cmd->scan_begin_src == TRIG_TIMER) {
+ /* burst mode with internal pacer clock */
+ control_c |= BMDE | IPCLK;
+ } else { /* TRIG_EXT */
+ /* burst mode with external trigger */
+ control_c |= BMDE | XPCLK;
+ }
- /* setup card and start */
- program_chanlist(dev, cmd);
+ das1800_ai_set_chanlist(dev, cmd->chanlist, cmd->chanlist_len);
/* setup cascaded counters for conversion/scan frequency */
if ((cmd->scan_begin_src == TRIG_FOLLOW ||
@@ -1035,118 +920,117 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
outb(control_c, dev->iobase + DAS1800_CONTROL_C);
/* set conversion rate and length for burst mode */
if (control_c & BMDE) {
- /* program conversion period with number of microseconds minus 1 */
- outb(cmd->convert_arg / 1000 - 1,
+ outb(cmd->convert_arg / 1000 - 1, /* microseconds - 1 */
dev->iobase + DAS1800_BURST_RATE);
outb(cmd->chanlist_len - 1, dev->iobase + DAS1800_BURST_LENGTH);
}
- outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B); /* enable irq/dma */
- outb(control_a, dev->iobase + DAS1800_CONTROL_A); /* enable fifo and triggering */
- outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
+
+ /* enable and start conversions */
+ outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B);
+ outb(control_a, dev->iobase + DAS1800_CONTROL_A);
+ outb(CVEN, dev->iobase + DAS1800_STATUS);
return 0;
}
-/* read analog input */
-static int das1800_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- const struct das1800_board *board = dev->board_ptr;
- int i, n;
- int chan, range, aref, chan_range;
- int timeout = 1000;
- unsigned short dpnt;
- int conv_flags = 0;
- unsigned long irq_flags;
+ unsigned char status;
- /* set up analog reference and unipolar / bipolar mode */
- aref = CR_AREF(insn->chanspec);
- conv_flags |= UQEN;
- if (aref != AREF_DIFF)
- conv_flags |= SD;
- if (aref == AREF_COMMON)
- conv_flags |= CMEN;
- /* if a unipolar range was selected */
- if (CR_RANGE(insn->chanspec) & UNIPOLAR)
- conv_flags |= UB;
+ status = inb(dev->iobase + DAS1800_STATUS);
+ if (status & FNE)
+ return 0;
+ return -EBUSY;
+}
+
+static int das1800_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int range = CR_RANGE(insn->chanspec);
+ bool is_unipolar = comedi_range_is_unipolar(s, range);
+ int ret = 0;
+ int n;
+ unsigned short dpnt;
+ unsigned long flags;
- outb(conv_flags, dev->iobase + DAS1800_CONTROL_C); /* software conversion enabled */
+ outb(das1800_ai_chanspec_bits(s, insn->chanspec),
+ dev->iobase + DAS1800_CONTROL_C); /* software pacer */
outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* reset fifo */
outb(FFEN, dev->iobase + DAS1800_CONTROL_A);
- chan = CR_CHAN(insn->chanspec);
- /* mask of unipolar/bipolar bit from range */
- range = CR_RANGE(insn->chanspec) & 0x3;
- chan_range = chan | (range << 8);
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */
- outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /* set QRAM address start */
- outw(chan_range, dev->iobase + DAS1800_QRAM);
- outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
- outb(ADC, dev->iobase + DAS1800_SELECT); /* select ADC for baseAddress + 0x0 */
+ das1800_ai_set_chanlist(dev, &insn->chanspec, 1);
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ /* select ai fifo register */
+ outb(ADC, dev->iobase + DAS1800_SELECT);
for (n = 0; n < insn->n; n++) {
/* trigger conversion */
outb(0, dev->iobase + DAS1800_FIFO);
- for (i = 0; i < timeout; i++) {
- if (inb(dev->iobase + DAS1800_STATUS) & FNE)
- break;
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "timeout\n");
- n = -ETIME;
- goto exit;
- }
+
+ ret = comedi_timeout(dev, s, insn, das1800_ai_eoc, 0);
+ if (ret)
+ break;
+
dpnt = inw(dev->iobase + DAS1800_FIFO);
- /* shift data to offset binary for bipolar ranges */
- if ((conv_flags & UB) == 0)
- dpnt += 1 << (board->resolution - 1);
+ if (!is_unipolar)
+ dpnt = comedi_offset_munge(s, dpnt);
data[n] = dpnt;
}
-exit:
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
- return n;
+ return ret ? ret : insn->n;
}
-/* writes to an analog output channel */
-static int das1800_ao_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct das1800_board *board = dev->board_ptr;
- struct das1800_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
-/* int range = CR_RANGE(insn->chanspec); */
- int update_chan = board->ao_n_chan - 1;
- unsigned short output;
- unsigned long irq_flags;
-
- /* card expects two's complement data */
- output = data[0] - (1 << (board->resolution - 1));
- /* if the write is to the 'update' channel, we need to remember its value */
- if (chan == update_chan)
- devpriv->ao_update_bits = output;
- /* write to channel */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(DAC(chan), dev->iobase + DAS1800_SELECT); /* select dac channel for baseAddress + 0x0 */
- outw(output, dev->iobase + DAS1800_DAC);
- /* now we need to write to 'update' channel to update all dac channels */
- if (chan != update_chan) {
- outb(DAC(update_chan), dev->iobase + DAS1800_SELECT); /* select 'update' channel for baseAddress + 0x0 */
- outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int update_chan = s->n_chan - 1;
+ unsigned long flags;
+ int i;
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val = data[i];
+
+ s->readback[chan] = val;
+
+ val = comedi_offset_munge(s, val);
+
+ /* load this channel (and update if it's the last channel) */
+ outb(DAC(chan), dev->iobase + DAS1800_SELECT);
+ outw(val, dev->iobase + DAS1800_DAC);
+
+ /* update all channels */
+ if (chan != update_chan) {
+ val = comedi_offset_munge(s, s->readback[update_chan]);
+
+ outb(DAC(update_chan), dev->iobase + DAS1800_SELECT);
+ outw(val, dev->iobase + DAS1800_DAC);
+ }
}
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
- return 1;
+ return insn->n;
}
-/* reads from digital input channels */
-static int das1800_di_rbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
data[1] = inb(dev->iobase + DAS1800_DIGITAL) & 0xf;
data[0] = 0;
@@ -1154,10 +1038,10 @@ static int das1800_di_rbits(struct comedi_device *dev,
return insn->n;
}
-static int das1800_do_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int das1800_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outb(s->state, dev->iobase + DAS1800_DIGITAL);
@@ -1220,68 +1104,68 @@ static void das1800_free_dma(struct comedi_device *dev)
comedi_isadma_free(devpriv->dma);
}
-static const struct das1800_board *das1800_probe(struct comedi_device *dev)
+static int das1800_probe(struct comedi_device *dev)
{
const struct das1800_board *board = dev->board_ptr;
- int index = board ? board - das1800_boards : -EINVAL;
- int id;
+ unsigned char id;
+
+ id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf;
/*
* The dev->board_ptr will be set by comedi_device_attach() if the
* board name provided by the user matches a board->name in this
* driver. If so, this function sanity checks the id to verify that
* the board is correct.
- *
- * If the dev->board_ptr is not set, the user is trying to attach
- * an unspecified board to this driver. In this case the id is used
- * to 'probe' for the correct dev->board_ptr.
*/
- id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf;
+ if (board) {
+ if (board->id == id)
+ return 0;
+ dev_err(dev->class_dev,
+ "probed id does not match board id (0x%x != 0x%x)\n",
+ id, board->id);
+ return -ENODEV;
+ }
+
+ /*
+ * If the dev->board_ptr is not set, the user is trying to attach
+ * an unspecified board to this driver. In this case the id is used
+ * to 'probe' for the dev->board_ptr.
+ */
switch (id) {
- case 0x3:
- if (index == das1801st_da || index == das1802st_da ||
- index == das1701st_da || index == das1702st_da)
- return board;
- index = das1801st;
+ case DAS1800_ID_ST_DA:
+ /* das-1701st-da, das-1702st-da, das-1801st-da, das-1802st-da */
+ board = &das1800_boards[BOARD_DAS1801ST_DA];
break;
- case 0x4:
- if (index == das1802hr_da || index == das1702hr_da)
- return board;
- index = das1802hr;
+ case DAS1800_ID_HR_DA:
+ /* das-1702hr-da, das-1802hr-da */
+ board = &das1800_boards[BOARD_DAS1802HR_DA];
break;
- case 0x5:
- if (index == das1801ao || index == das1802ao ||
- index == das1701ao || index == das1702ao)
- return board;
- index = das1801ao;
+ case DAS1800_ID_AO:
+ /* das-1701ao, das-1702ao, das-1801ao, das-1802ao */
+ board = &das1800_boards[BOARD_DAS1801AO];
break;
- case 0x6:
- if (index == das1802hr || index == das1702hr)
- return board;
- index = das1802hr;
+ case DAS1800_ID_HR:
+ /* das-1702hr, das-1802hr */
+ board = &das1800_boards[BOARD_DAS1802HR];
break;
- case 0x7:
- if (index == das1801st || index == das1802st ||
- index == das1701st || index == das1702st)
- return board;
- index = das1801st;
+ case DAS1800_ID_ST:
+ /* das-1701st, das-1702st, das-1801st, das-1802st */
+ board = &das1800_boards[BOARD_DAS1801ST];
break;
- case 0x8:
- if (index == das1801hc || index == das1802hc)
- return board;
- index = das1801hc;
+ case DAS1800_ID_HC:
+ /* das-1801hc, das-1802hc */
+ board = &das1800_boards[BOARD_DAS1801HC];
break;
default:
- dev_err(dev->class_dev,
- "Board model: probe returned 0x%x (unknown, please report)\n",
- id);
- return NULL;
+ dev_err(dev->class_dev, "invalid probe id 0x%x\n", id);
+ return -ENODEV;
}
- dev_err(dev->class_dev,
- "Board model (probed, not recommended): %s series\n",
- das1800_boards[index].name);
-
- return &das1800_boards[index];
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+ dev_warn(dev->class_dev,
+ "probed id 0x%0x: %s series (not recommended)\n",
+ id, board->name);
+ return 0;
}
static int das1800_attach(struct comedi_device *dev,
@@ -1291,7 +1175,9 @@ static int das1800_attach(struct comedi_device *dev,
struct das1800_private *devpriv;
struct comedi_subdevice *s;
unsigned int irq = it->options[1];
+ bool is_16bit;
int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -1301,16 +1187,15 @@ static int das1800_attach(struct comedi_device *dev,
if (ret)
return ret;
- board = das1800_probe(dev);
- if (!board) {
- dev_err(dev->class_dev, "unable to determine board type\n");
- return -ENODEV;
- }
- dev->board_ptr = board;
- dev->board_name = board->name;
+ ret = das1800_probe(dev);
+ if (ret)
+ return ret;
+ board = dev->board_ptr;
- /* if it is an 'ao' board with fancy analog out then we need extra io ports */
- if (board->ao_ability == 2) {
+ is_16bit = board->id == DAS1800_ID_HR || board->id == DAS1800_ID_HR_DA;
+
+ /* waveform 'ao' boards have additional io ports */
+ if (board->id == DAS1800_ID_AO) {
unsigned long iobase2 = dev->iobase + IOBASE2;
ret = __comedi_request_region(dev, iobase2, DAS1800_SIZE);
@@ -1353,7 +1238,9 @@ static int das1800_attach(struct comedi_device *dev,
if (dev->irq & it->options[2])
das1800_init_dma(dev, it);
- devpriv->fifo_buf = kmalloc_array(FIFO_SIZE, sizeof(uint16_t), GFP_KERNEL);
+ devpriv->fifo_buf = kmalloc_array(FIFO_SIZE,
+ sizeof(*devpriv->fifo_buf),
+ GFP_KERNEL);
if (!devpriv->fifo_buf)
return -ENOMEM;
@@ -1366,70 +1253,94 @@ static int das1800_attach(struct comedi_device *dev,
if (ret)
return ret;
- /* analog input subdevice */
+ /*
+ * Analog Input subdevice
+ *
+ * The "hc" type boards have 64 analog input channels and a 64
+ * entry QRAM fifo.
+ *
+ * All the other board types have 16 on-board channels. Each channel
+ * can be expanded to 16 channels with the addition of an EXP-1800
+ * expansion board for a total of 256 channels. The QRAM fifo on
+ * these boards has 256 entries.
+ *
+ * From the datasheets it's not clear what the comedi channel to
+ * actual physical channel mapping is when EXP-1800 boards are used.
+ */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
- if (board->common)
- s->subdev_flags |= SDF_COMMON;
- s->n_chan = board->qram_len;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = board->range_ai;
- s->insn_read = das1800_ai_rinsn;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
+ if (board->id != DAS1800_ID_HC)
+ s->subdev_flags |= SDF_COMMON;
+ s->n_chan = (board->id == DAS1800_ID_HC) ? 64 : 256;
+ s->maxdata = is_16bit ? 0xffff : 0x0fff;
+ s->range_table = board->is_01_series ? &das1801_ai_range
+ : &das1802_ai_range;
+ s->insn_read = das1800_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmd = das1800_ai_do_cmd;
- s->do_cmdtest = das1800_ai_do_cmdtest;
- s->poll = das1800_ai_poll;
- s->cancel = das1800_cancel;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = das1800_ai_cmd;
+ s->do_cmdtest = das1800_ai_cmdtest;
+ s->poll = das1800_ai_poll;
+ s->cancel = das1800_ai_cancel;
+ s->munge = das1800_ai_munge;
}
- /* analog out */
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- if (board->ao_ability == 1) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->ao_n_chan;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = &range_bipolar10;
- s->insn_write = das1800_ao_winsn;
+ if (board->id == DAS1800_ID_ST_DA || board->id == DAS1800_ID_HR_DA) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = (board->id == DAS1800_ID_ST_DA) ? 4 : 2;
+ s->maxdata = is_16bit ? 0xffff : 0x0fff;
+ s->range_table = &range_bipolar10;
+ s->insn_write = das1800_ao_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ /* initialize all channels to 0V */
+ for (i = 0; i < s->n_chan; i++) {
+ /* spinlock is not necessary during the attach */
+ outb(DAC(i), dev->iobase + DAS1800_SELECT);
+ outw(0, dev->iobase + DAS1800_DAC);
+ }
+ } else if (board->id == DAS1800_ID_AO) {
+ /*
+ * 'ao' boards have waveform analog outputs that are not
+ * currently supported.
+ */
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
- /* di */
+ /* Digital Input subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das1800_di_rbits;
-
- /* do */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das1800_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->do_n_chan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das1800_do_wbits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = (board->id == DAS1800_ID_HC) ? 8 : 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das1800_do_insn_bits;
- das1800_cancel(dev, dev->read_subdev);
+ das1800_ai_cancel(dev, dev->read_subdev);
/* initialize digital out channels */
outb(0, dev->iobase + DAS1800_DIGITAL);
- /* initialize analog out channels */
- if (board->ao_ability == 1) {
- /* select 'update' dac channel for baseAddress + 0x0 */
- outb(DAC(board->ao_n_chan - 1),
- dev->iobase + DAS1800_SELECT);
- outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
- }
-
return 0;
};
@@ -1458,5 +1369,5 @@ static struct comedi_driver das1800_driver = {
module_comedi_driver(das1800_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for DAS1800 compatible ISA boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 40bf00984..d5295bbdd 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -69,49 +69,61 @@
* Register map
*/
#define DT2821_ADCSR_REG 0x00
-#define DT2821_ADCSR_ADERR (1 << 15)
-#define DT2821_ADCSR_ADCLK (1 << 9)
-#define DT2821_ADCSR_MUXBUSY (1 << 8)
-#define DT2821_ADCSR_ADDONE (1 << 7)
-#define DT2821_ADCSR_IADDONE (1 << 6)
+#define DT2821_ADCSR_ADERR BIT(15)
+#define DT2821_ADCSR_ADCLK BIT(9)
+#define DT2821_ADCSR_MUXBUSY BIT(8)
+#define DT2821_ADCSR_ADDONE BIT(7)
+#define DT2821_ADCSR_IADDONE BIT(6)
#define DT2821_ADCSR_GS(x) (((x) & 0x3) << 4)
#define DT2821_ADCSR_CHAN(x) (((x) & 0xf) << 0)
#define DT2821_CHANCSR_REG 0x02
-#define DT2821_CHANCSR_LLE (1 << 15)
-#define DT2821_CHANCSR_PRESLA(x) (((x) & 0xf) >> 8)
+#define DT2821_CHANCSR_LLE BIT(15)
+#define DT2821_CHANCSR_TO_PRESLA(x) (((x) >> 8) & 0xf)
#define DT2821_CHANCSR_NUMB(x) ((((x) - 1) & 0xf) << 0)
#define DT2821_ADDAT_REG 0x04
#define DT2821_DACSR_REG 0x06
-#define DT2821_DACSR_DAERR (1 << 15)
+#define DT2821_DACSR_DAERR BIT(15)
#define DT2821_DACSR_YSEL(x) ((x) << 9)
-#define DT2821_DACSR_SSEL (1 << 8)
-#define DT2821_DACSR_DACRDY (1 << 7)
-#define DT2821_DACSR_IDARDY (1 << 6)
-#define DT2821_DACSR_DACLK (1 << 5)
-#define DT2821_DACSR_HBOE (1 << 1)
-#define DT2821_DACSR_LBOE (1 << 0)
+#define DT2821_DACSR_SSEL BIT(8)
+#define DT2821_DACSR_DACRDY BIT(7)
+#define DT2821_DACSR_IDARDY BIT(6)
+#define DT2821_DACSR_DACLK BIT(5)
+#define DT2821_DACSR_HBOE BIT(1)
+#define DT2821_DACSR_LBOE BIT(0)
#define DT2821_DADAT_REG 0x08
#define DT2821_DIODAT_REG 0x0a
#define DT2821_SUPCSR_REG 0x0c
-#define DT2821_SUPCSR_DMAD (1 << 15)
-#define DT2821_SUPCSR_ERRINTEN (1 << 14)
-#define DT2821_SUPCSR_CLRDMADNE (1 << 13)
-#define DT2821_SUPCSR_DDMA (1 << 12)
-#define DT2821_SUPCSR_DS_PIO (0 << 10)
-#define DT2821_SUPCSR_DS_AD_CLK (1 << 10)
-#define DT2821_SUPCSR_DS_DA_CLK (2 << 10)
-#define DT2821_SUPCSR_DS_AD_TRIG (3 << 10)
-#define DT2821_SUPCSR_BUFFB (1 << 9)
-#define DT2821_SUPCSR_SCDN (1 << 8)
-#define DT2821_SUPCSR_DACON (1 << 7)
-#define DT2821_SUPCSR_ADCINIT (1 << 6)
-#define DT2821_SUPCSR_DACINIT (1 << 5)
-#define DT2821_SUPCSR_PRLD (1 << 4)
-#define DT2821_SUPCSR_STRIG (1 << 3)
-#define DT2821_SUPCSR_XTRIG (1 << 2)
-#define DT2821_SUPCSR_XCLK (1 << 1)
-#define DT2821_SUPCSR_BDINIT (1 << 0)
+#define DT2821_SUPCSR_DMAD BIT(15)
+#define DT2821_SUPCSR_ERRINTEN BIT(14)
+#define DT2821_SUPCSR_CLRDMADNE BIT(13)
+#define DT2821_SUPCSR_DDMA BIT(12)
+#define DT2821_SUPCSR_DS(x) (((x) & 0x3) << 10)
+#define DT2821_SUPCSR_DS_PIO DT2821_SUPCSR_DS(0)
+#define DT2821_SUPCSR_DS_AD_CLK DT2821_SUPCSR_DS(1)
+#define DT2821_SUPCSR_DS_DA_CLK DT2821_SUPCSR_DS(2)
+#define DT2821_SUPCSR_DS_AD_TRIG DT2821_SUPCSR_DS(3)
+#define DT2821_SUPCSR_BUFFB BIT(9)
+#define DT2821_SUPCSR_SCDN BIT(8)
+#define DT2821_SUPCSR_DACON BIT(7)
+#define DT2821_SUPCSR_ADCINIT BIT(6)
+#define DT2821_SUPCSR_DACINIT BIT(5)
+#define DT2821_SUPCSR_PRLD BIT(4)
+#define DT2821_SUPCSR_STRIG BIT(3)
+#define DT2821_SUPCSR_XTRIG BIT(2)
+#define DT2821_SUPCSR_XCLK BIT(1)
+#define DT2821_SUPCSR_BDINIT BIT(0)
#define DT2821_TMRCTR_REG 0x0e
+#define DT2821_TMRCTR_PRESCALE(x) (((x) & 0xf) << 8)
+#define DT2821_TMRCTR_DIVIDER(x) ((255 - ((x) & 0xff)) << 0)
+
+/* Pacer Clock */
+#define DT2821_OSC_BASE 250 /* 4 MHz (in nanoseconds) */
+#define DT2821_PRESCALE(x) BIT(x)
+#define DT2821_PRESCALE_MAX 15
+#define DT2821_DIVIDER_MAX 255
+#define DT2821_OSC_MAX (DT2821_OSC_BASE * \
+ DT2821_PRESCALE(DT2821_PRESCALE_MAX) * \
+ DT2821_DIVIDER_MAX)
static const struct comedi_lrange range_dt282x_ai_lo_bipolar = {
4, {
@@ -364,10 +376,10 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
{
unsigned int prescale, base, divider;
- for (prescale = 0; prescale < 16; prescale++) {
- if (prescale == 1)
+ for (prescale = 0; prescale <= DT2821_PRESCALE_MAX; prescale++) {
+ if (prescale == 1) /* 0 and 1 are both divide by 1 */
continue;
- base = 250 * (1 << prescale);
+ base = DT2821_OSC_BASE * DT2821_PRESCALE(prescale);
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
@@ -380,15 +392,17 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
divider = DIV_ROUND_UP(*ns, base);
break;
}
- if (divider < 256) {
- *ns = divider * base;
- return (prescale << 8) | (255 - divider);
- }
+ if (divider <= DT2821_DIVIDER_MAX)
+ break;
+ }
+ if (divider > DT2821_DIVIDER_MAX) {
+ prescale = DT2821_PRESCALE_MAX;
+ divider = DT2821_DIVIDER_MAX;
+ base = DT2821_OSC_BASE * DT2821_PRESCALE(prescale);
}
- base = 250 * (1 << 15);
- divider = 255;
*ns = divider * base;
- return (15 << 8) | (255 - divider);
+ return DT2821_TMRCTR_PRESCALE(prescale) |
+ DT2821_TMRCTR_DIVIDER(divider);
}
static void dt282x_munge(struct comedi_device *dev,
@@ -683,13 +697,8 @@ static int dt282x_ai_cmdtest(struct comedi_device *dev,
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 4000);
-
-#define SLOWEST_TIMER (250*(1<<15)*255)
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg, SLOWEST_TIMER);
+ err |= comedi_check_trigger_arg_max(&cmd->convert_arg, DT2821_OSC_MAX);
err |= comedi_check_trigger_arg_min(&cmd->convert_arg, board->ai_speed);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
@@ -1084,20 +1093,6 @@ static int dt282x_initialize(struct comedi_device *dev)
return 0;
}
-/*
- options:
- 0 i/o base
- 1 irq
- 2 dma1
- 3 dma2
- 4 0=single ended, 1=differential
- 5 ai 0=straight binary, 1=2's comp
- 6 ao0 0=straight binary, 1=2's comp
- 7 ao1 0=straight binary, 1=2's comp
- 8 ai 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V
- 9 ao0 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
- 10 ao1 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
- */
static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct dt282x_board *board = dev->board_ptr;
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 8f24702c3..b1c086013 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -46,355 +46,451 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/log2.h>
#include "../comedi_pci.h"
#include "mite.h"
-#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
+/*
+ * Mite registers
+ */
+#define MITE_UNKNOWN_DMA_BURST_REG 0x28
+#define UNKNOWN_DMA_BURST_ENABLE_BITS 0x600
+
+#define MITE_PCI_CONFIG_OFFSET 0x300
+#define MITE_CSIGR 0x460 /* chip signature */
+#define CSIGR_TO_IOWINS(x) (((x) >> 29) & 0x7)
+#define CSIGR_TO_WINS(x) (((x) >> 24) & 0x1f)
+#define CSIGR_TO_WPDEP(x) (((x) >> 20) & 0x7)
+#define CSIGR_TO_DMAC(x) (((x) >> 16) & 0xf)
+#define CSIGR_TO_IMODE(x) (((x) >> 12) & 0x3) /* pci=0x3 */
+#define CSIGR_TO_MMODE(x) (((x) >> 8) & 0x3) /* minimite=1 */
+#define CSIGR_TO_TYPE(x) (((x) >> 4) & 0xf) /* mite=0, minimite=1 */
+#define CSIGR_TO_VER(x) (((x) >> 0) & 0xf)
+
+#define MITE_CHAN(x) (0x500 + 0x100 * (x))
+#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
+#define CHOR_DMARESET BIT(31)
+#define CHOR_SET_SEND_TC BIT(11)
+#define CHOR_CLR_SEND_TC BIT(10)
+#define CHOR_SET_LPAUSE BIT(9)
+#define CHOR_CLR_LPAUSE BIT(8)
+#define CHOR_CLRDONE BIT(7)
+#define CHOR_CLRRB BIT(6)
+#define CHOR_CLRLC BIT(5)
+#define CHOR_FRESET BIT(4)
+#define CHOR_ABORT BIT(3) /* stop without emptying fifo */
+#define CHOR_STOP BIT(2) /* stop after emptying fifo */
+#define CHOR_CONT BIT(1)
+#define CHOR_START BIT(0)
+#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
+#define CHCR_SET_DMA_IE BIT(31)
+#define CHCR_CLR_DMA_IE BIT(30)
+#define CHCR_SET_LINKP_IE BIT(29)
+#define CHCR_CLR_LINKP_IE BIT(28)
+#define CHCR_SET_SAR_IE BIT(27)
+#define CHCR_CLR_SAR_IE BIT(26)
+#define CHCR_SET_DONE_IE BIT(25)
+#define CHCR_CLR_DONE_IE BIT(24)
+#define CHCR_SET_MRDY_IE BIT(23)
+#define CHCR_CLR_MRDY_IE BIT(22)
+#define CHCR_SET_DRDY_IE BIT(21)
+#define CHCR_CLR_DRDY_IE BIT(20)
+#define CHCR_SET_LC_IE BIT(19)
+#define CHCR_CLR_LC_IE BIT(18)
+#define CHCR_SET_CONT_RB_IE BIT(17)
+#define CHCR_CLR_CONT_RB_IE BIT(16)
+#define CHCR_FIFO(x) (((x) & 0x1) << 15)
+#define CHCR_FIFODIS CHCR_FIFO(1)
+#define CHCR_FIFO_ON CHCR_FIFO(0)
+#define CHCR_BURST(x) (((x) & 0x1) << 14)
+#define CHCR_BURSTEN CHCR_BURST(1)
+#define CHCR_NO_BURSTEN CHCR_BURST(0)
+#define CHCR_BYTE_SWAP_DEVICE BIT(6)
+#define CHCR_BYTE_SWAP_MEMORY BIT(4)
+#define CHCR_DIR(x) (((x) & 0x1) << 3)
+#define CHCR_DEV_TO_MEM CHCR_DIR(1)
+#define CHCR_MEM_TO_DEV CHCR_DIR(0)
+#define CHCR_MODE(x) (((x) & 0x7) << 0)
+#define CHCR_NORMAL CHCR_MODE(0)
+#define CHCR_CONTINUE CHCR_MODE(1)
+#define CHCR_RINGBUFF CHCR_MODE(2)
+#define CHCR_LINKSHORT CHCR_MODE(4)
+#define CHCR_LINKLONG CHCR_MODE(5)
+#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
+#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory config */
+#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
+#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device config */
+#define DCR_NORMAL BIT(29)
+#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
+#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link config */
+#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
+#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
+#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
+#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
+#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
+#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
+#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
+#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
+#define CHSR_INT BIT(31)
+#define CHSR_LPAUSES BIT(29)
+#define CHSR_SARS BIT(27)
+#define CHSR_DONE BIT(25)
+#define CHSR_MRDY BIT(23)
+#define CHSR_DRDY BIT(21)
+#define CHSR_LINKC BIT(19)
+#define CHSR_CONTS_RB BIT(17)
+#define CHSR_ERROR BIT(15)
+#define CHSR_SABORT BIT(14)
+#define CHSR_HABORT BIT(13)
+#define CHSR_STOPS BIT(12)
+#define CHSR_OPERR(x) (((x) & 0x3) << 10)
+#define CHSR_OPERR_MASK CHSR_OPERR(3)
+#define CHSR_OPERR_NOERROR CHSR_OPERR(0)
+#define CHSR_OPERR_FIFOERROR CHSR_OPERR(1)
+#define CHSR_OPERR_LINKERROR CHSR_OPERR(1) /* ??? */
+#define CHSR_XFERR BIT(9)
+#define CHSR_END BIT(8)
+#define CHSR_DRQ1 BIT(7)
+#define CHSR_DRQ0 BIT(6)
+#define CHSR_LERR(x) (((x) & 0x3) << 4)
+#define CHSR_LERR_MASK CHSR_LERR(3)
+#define CHSR_LBERR CHSR_LERR(1)
+#define CHSR_LRERR CHSR_LERR(2)
+#define CHSR_LOERR CHSR_LERR(3)
+#define CHSR_MERR(x) (((x) & 0x3) << 2)
+#define CHSR_MERR_MASK CHSR_MERR(3)
+#define CHSR_MBERR CHSR_MERR(1)
+#define CHSR_MRERR CHSR_MERR(2)
+#define CHSR_MOERR CHSR_MERR(3)
+#define CHSR_DERR(x) (((x) & 0x3) << 0)
+#define CHSR_DERR_MASK CHSR_DERR(3)
+#define CHSR_DBERR CHSR_DERR(1)
+#define CHSR_DRERR CHSR_DERR(2)
+#define CHSR_DOERR CHSR_DERR(3)
+#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
+
+/* common bits for the memory/device/link config registers */
+#define CR_RL(x) (((x) & 0x7) << 21)
+#define CR_REQS(x) (((x) & 0x7) << 16)
+#define CR_REQS_MASK CR_REQS(7)
+#define CR_ASEQ(x) (((x) & 0x3) << 10)
+#define CR_ASEQDONT CR_ASEQ(0)
+#define CR_ASEQUP CR_ASEQ(1)
+#define CR_ASEQDOWN CR_ASEQ(2)
+#define CR_ASEQ_MASK CR_ASEQ(3)
+#define CR_PSIZE(x) (((x) & 0x3) << 8)
+#define CR_PSIZE8 CR_PSIZE(1)
+#define CR_PSIZE16 CR_PSIZE(2)
+#define CR_PSIZE32 CR_PSIZE(3)
+#define CR_PORT(x) (((x) & 0x3) << 6)
+#define CR_PORTCPU CR_PORT(0)
+#define CR_PORTIO CR_PORT(1)
+#define CR_PORTVXI CR_PORT(2)
+#define CR_PORTMXI CR_PORT(3)
+#define CR_AMDEVICE BIT(0)
+
+static unsigned int MITE_IODWBSR_1_WSIZE_bits(unsigned int size)
+{
+ return (ilog2(size) - 1) & 0x1f;
+}
-struct mite_struct *mite_alloc(struct pci_dev *pcidev)
+static unsigned int mite_retry_limit(unsigned int retry_limit)
{
- struct mite_struct *mite;
- unsigned int i;
+ unsigned int value = 0;
- mite = kzalloc(sizeof(*mite), GFP_KERNEL);
- if (mite) {
- spin_lock_init(&mite->lock);
- mite->pcidev = pcidev;
- for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
- mite->channels[i].mite = mite;
- mite->channels[i].channel = i;
- mite->channels[i].done = 1;
- }
- }
- return mite;
+ if (retry_limit)
+ value = 1 + ilog2(retry_limit);
+ if (value > 0x7)
+ value = 0x7;
+ return CR_RL(value);
}
-EXPORT_SYMBOL_GPL(mite_alloc);
-static void dump_chip_signature(u32 csigr_bits)
+static unsigned int mite_drq_reqs(unsigned int drq_line)
{
- pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n",
- mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits),
- mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
- pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
- mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
- mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
+ /* This also works on m-series when using channels (drq_line) 4 or 5. */
+ return CR_REQS((drq_line & 0x3) | 0x4);
}
-static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel)
+static unsigned int mite_fifo_size(struct mite *mite, unsigned int channel)
{
- unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel));
- unsigned empty_count = (fcr_bits >> 16) & 0xff;
- unsigned full_count = fcr_bits & 0xff;
+ unsigned int fcr_bits = readl(mite->mmio + MITE_FCR(channel));
+ unsigned int empty_count = (fcr_bits >> 16) & 0xff;
+ unsigned int full_count = fcr_bits & 0xff;
return empty_count + full_count;
}
-int mite_setup2(struct comedi_device *dev,
- struct mite_struct *mite, bool use_win1)
+static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
{
- unsigned long length;
- int i;
- u32 csigr_bits;
- unsigned unknown_dma_burst_bits;
+ struct mite *mite = mite_chan->mite;
- pci_set_master(mite->pcidev);
+ return readl(mite->mmio + MITE_DAR(mite_chan->channel));
+}
- mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0);
- if (!mite->mite_io_addr) {
- dev_err(dev->class_dev,
- "Failed to remap mite io memory address\n");
- return -ENOMEM;
- }
- mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0);
+/**
+ * mite_bytes_in_transit() - Returns the number of unread bytes in the fifo.
+ * @mite_chan: MITE dma channel.
+ */
+u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
- dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
- if (!dev->mmio) {
- dev_err(dev->class_dev,
- "Failed to remap daq io memory address\n");
- return -ENOMEM;
- }
- mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
- length = pci_resource_len(mite->pcidev, 1);
+ return readl(mite->mmio + MITE_FCR(mite_chan->channel)) & 0xff;
+}
+EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
- if (use_win1) {
- writel(0, mite->mite_io_addr + MITE_IODWBSR);
- dev_info(dev->class_dev,
- "using I/O Window Base Size register 1\n");
- writel(mite->daq_phys_addr | WENAB |
- MITE_IODWBSR_1_WSIZE_bits(length),
- mite->mite_io_addr + MITE_IODWBSR_1);
- writel(0, mite->mite_io_addr + MITE_IODWCR_1);
- } else {
- writel(mite->daq_phys_addr | WENAB,
- mite->mite_io_addr + MITE_IODWBSR);
- }
- /*
- * Make sure dma bursts work. I got this from running a bus analyzer
- * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
- * of 0x61f and bursts worked. 6281 powered up with register value of
- * 0x1f and bursts didn't work. The NI windows driver reads the
- * register, then does a bitwise-or of 0x600 with it and writes it back.
- */
- unknown_dma_burst_bits =
- readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
- unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
- writel(unknown_dma_burst_bits,
- mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+/* returns lower bound for number of bytes transferred from device to memory */
+static u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
+{
+ u32 device_byte_count;
- csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
- mite->num_channels = mite_csigr_dmac(csigr_bits);
- if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
- dev_warn(dev->class_dev,
- "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
- mite->num_channels, MAX_MITE_DMA_CHANNELS);
- mite->num_channels = MAX_MITE_DMA_CHANNELS;
- }
- dump_chip_signature(csigr_bits);
- for (i = 0; i < mite->num_channels; i++) {
- writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
- /* disable interrupts */
- writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
- CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
- mite->mite_io_addr + MITE_CHCR(i));
- }
- mite->fifo_size = mite_fifo_size(mite, 0);
- dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
- return 0;
+ device_byte_count = mite_device_bytes_transferred(mite_chan);
+ return device_byte_count - mite_bytes_in_transit(mite_chan);
}
-EXPORT_SYMBOL_GPL(mite_setup2);
-void mite_detach(struct mite_struct *mite)
+/* returns upper bound for number of bytes transferred from device to memory */
+static u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
{
- if (!mite)
- return;
-
- if (mite->mite_io_addr)
- iounmap(mite->mite_io_addr);
+ u32 in_transit_count;
- kfree(mite);
+ in_transit_count = mite_bytes_in_transit(mite_chan);
+ return mite_device_bytes_transferred(mite_chan) - in_transit_count;
}
-EXPORT_SYMBOL_GPL(mite_detach);
-struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite)
+/* returns lower bound for number of bytes read from memory to device */
+static u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
{
- struct mite_dma_descriptor_ring *ring =
- kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
+ u32 device_byte_count;
- if (!ring)
- return NULL;
- ring->hw_dev = get_device(&mite->pcidev->dev);
- if (!ring->hw_dev) {
- kfree(ring);
- return NULL;
- }
- ring->n_links = 0;
- ring->descriptors = NULL;
- ring->descriptors_dma_addr = 0;
- return ring;
-};
-EXPORT_SYMBOL_GPL(mite_alloc_ring);
+ device_byte_count = mite_device_bytes_transferred(mite_chan);
+ return device_byte_count + mite_bytes_in_transit(mite_chan);
+}
-void mite_free_ring(struct mite_dma_descriptor_ring *ring)
+/* returns upper bound for number of bytes read from memory to device */
+static u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
{
- if (ring) {
- if (ring->descriptors) {
- dma_free_coherent(ring->hw_dev,
- ring->n_links *
- sizeof(struct mite_dma_descriptor),
- ring->descriptors,
- ring->descriptors_dma_addr);
- }
- put_device(ring->hw_dev);
- kfree(ring);
- }
-};
-EXPORT_SYMBOL_GPL(mite_free_ring);
+ u32 in_transit_count;
+
+ in_transit_count = mite_bytes_in_transit(mite_chan);
+ return mite_device_bytes_transferred(mite_chan) + in_transit_count;
+}
-struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite,
- struct
- mite_dma_descriptor_ring
- *ring, unsigned min_channel,
- unsigned max_channel)
+static void mite_sync_input_dma(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s)
{
- int i;
- unsigned long flags;
- struct mite_channel *channel = NULL;
+ struct comedi_async *async = s->async;
+ int count;
+ unsigned int nbytes, old_alloc_count;
+
+ old_alloc_count = async->buf_write_alloc_count;
+ /* write alloc as much as we can */
+ comedi_buf_write_alloc(s, async->prealloc_bufsz);
+ nbytes = mite_bytes_written_to_memory_lb(mite_chan);
+ if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
+ old_alloc_count) > 0) {
+ dev_warn(s->device->class_dev,
+ "mite: DMA overwrite of free area\n");
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
+ }
+
+ count = nbytes - async->buf_write_count;
/*
- * spin lock so mite_release_channel can be called safely
- * from interrupts
+ * it's possible count will be negative due to conservative value
+ * returned by mite_bytes_written_to_memory_lb
*/
- spin_lock_irqsave(&mite->lock, flags);
- for (i = min_channel; i <= max_channel; ++i) {
- if (mite->channel_allocated[i] == 0) {
- mite->channel_allocated[i] = 1;
- channel = &mite->channels[i];
- channel->ring = ring;
- break;
- }
+ if (count > 0) {
+ comedi_buf_write_free(s, count);
+ comedi_inc_scan_progress(s, count);
+ async->events |= COMEDI_CB_BLOCK;
}
- spin_unlock_irqrestore(&mite->lock, flags);
- return channel;
}
-EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
-void mite_release_channel(struct mite_channel *mite_chan)
+static void mite_sync_output_dma(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned long flags;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
+ unsigned int old_alloc_count = async->buf_read_alloc_count;
+ u32 nbytes_ub, nbytes_lb;
+ int count;
+ bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
- /* spin lock to prevent races with mite_request_channel */
- spin_lock_irqsave(&mite->lock, flags);
- if (mite->channel_allocated[mite_chan->channel]) {
- mite_dma_disarm(mite_chan);
- mite_dma_reset(mite_chan);
+ /* read alloc as much as we can */
+ comedi_buf_read_alloc(s, async->prealloc_bufsz);
+ nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
+ if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
+ nbytes_lb = stop_count;
+ nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
+ if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
+ nbytes_ub = stop_count;
+
+ if ((!finite_regen || stop_count > old_alloc_count) &&
+ ((int)(nbytes_ub - old_alloc_count) > 0)) {
+ dev_warn(s->device->class_dev, "mite: DMA underrun\n");
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
+ }
+
+ if (finite_regen) {
/*
- * disable all channel's interrupts (do it after disarm/reset so
- * MITE_CHCR reg isn't changed while dma is still active!)
+ * This is a special case where we continuously output a finite
+ * buffer. In this case, we do not free any of the memory,
+ * hence we expect that old_alloc_count will reach a maximum of
+ * stop_count bytes.
*/
- writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
- CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
- CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
- mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
- mite->channel_allocated[mite_chan->channel] = 0;
- mite_chan->ring = NULL;
- mmiowb();
+ return;
+ }
+
+ count = nbytes_lb - async->buf_read_count;
+ if (count > 0) {
+ comedi_buf_read_free(s, count);
+ async->events |= COMEDI_CB_BLOCK;
}
- spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_release_channel);
-void mite_dma_arm(struct mite_channel *mite_chan)
+/**
+ * mite_sync_dma() - Sync the MITE dma with the COMEDI async buffer.
+ * @mite_chan: MITE dma channel.
+ * @s: COMEDI subdevice.
+ */
+void mite_sync_dma(struct mite_channel *mite_chan, struct comedi_subdevice *s)
+{
+ if (mite_chan->dir == COMEDI_INPUT)
+ mite_sync_input_dma(mite_chan, s);
+ else
+ mite_sync_output_dma(mite_chan, s);
+}
+EXPORT_SYMBOL_GPL(mite_sync_dma);
+
+static unsigned int mite_get_status(struct mite_channel *mite_chan)
{
- struct mite_struct *mite = mite_chan->mite;
- int chor;
+ struct mite *mite = mite_chan->mite;
+ unsigned int status;
unsigned long flags;
- /*
- * memory barrier is intended to insure any twiddling with the buffer
- * is done before writing to the mite to arm dma transfer
- */
- smp_mb();
- /* arm */
- chor = CHOR_START;
spin_lock_irqsave(&mite->lock, flags);
- mite_chan->done = 0;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ status = readl(mite->mmio + MITE_CHSR(mite_chan->channel));
+ if (status & CHSR_DONE) {
+ mite_chan->done = 1;
+ writel(CHOR_CLRDONE,
+ mite->mmio + MITE_CHOR(mite_chan->channel));
+ }
mmiowb();
spin_unlock_irqrestore(&mite->lock, flags);
- /* mite_dma_tcr(mite, channel); */
+ return status;
}
-EXPORT_SYMBOL_GPL(mite_dma_arm);
-/**************************************/
-
-int mite_buf_change(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s)
+/**
+ * mite_ack_linkc() - Check and ack the LINKC interrupt,
+ * @mite_chan: MITE dma channel.
+ * @s: COMEDI subdevice.
+ * @sync: flag to force a mite_sync_dma().
+ *
+ * This will also ack the DONE interrupt if active.
+ */
+void mite_ack_linkc(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s,
+ bool sync)
{
- struct comedi_async *async = s->async;
- unsigned int n_links;
+ struct mite *mite = mite_chan->mite;
+ unsigned int status;
- if (ring->descriptors) {
- dma_free_coherent(ring->hw_dev,
- ring->n_links *
- sizeof(struct mite_dma_descriptor),
- ring->descriptors,
- ring->descriptors_dma_addr);
+ status = mite_get_status(mite_chan);
+ if (status & CHSR_LINKC) {
+ writel(CHOR_CLRLC, mite->mmio + MITE_CHOR(mite_chan->channel));
+ sync = true;
}
- ring->descriptors = NULL;
- ring->descriptors_dma_addr = 0;
- ring->n_links = 0;
+ if (sync)
+ mite_sync_dma(mite_chan, s);
- if (async->prealloc_bufsz == 0)
- return 0;
-
- n_links = async->prealloc_bufsz >> PAGE_SHIFT;
-
- ring->descriptors =
- dma_alloc_coherent(ring->hw_dev,
- n_links * sizeof(struct mite_dma_descriptor),
- &ring->descriptors_dma_addr, GFP_KERNEL);
- if (!ring->descriptors) {
+ if (status & CHSR_XFERR) {
dev_err(s->device->class_dev,
- "mite: ring buffer allocation failed\n");
- return -ENOMEM;
+ "mite: transfer error %08x\n", status);
+ s->async->events |= COMEDI_CB_ERROR;
}
- ring->n_links = n_links;
-
- return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
}
-EXPORT_SYMBOL_GPL(mite_buf_change);
+EXPORT_SYMBOL_GPL(mite_ack_linkc);
-/*
- * initializes the ring buffer descriptors to provide correct DMA transfer links
- * to the exact amount of memory required. When the ring buffer is allocated in
- * mite_buf_change, the default is to initialize the ring to refer to the entire
- * DMA data buffer. A command may call this function later to re-initialize and
- * shorten the amount of memory that will be transferred.
+/**
+ * mite_done() - Check is a MITE dma transfer is complete.
+ * @mite_chan: MITE dma channel.
+ *
+ * This will also ack the DONE interrupt if active.
*/
-int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s,
- unsigned int nbytes)
+int mite_done(struct mite_channel *mite_chan)
{
- struct comedi_async *async = s->async;
- unsigned int n_full_links = nbytes >> PAGE_SHIFT;
- unsigned int remainder = nbytes % PAGE_SIZE;
- int i;
-
- dev_dbg(s->device->class_dev,
- "mite: init ring buffer to %u bytes\n", nbytes);
-
- if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
- dev_err(s->device->class_dev,
- "mite: ring buffer too small for requested init\n");
- return -ENOMEM;
- }
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
+ int done;
- /* We set the descriptors for all full links. */
- for (i = 0; i < n_full_links; ++i) {
- ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
- ring->descriptors[i].addr =
- cpu_to_le32(async->buf_map->page_list[i].dma_addr);
- ring->descriptors[i].next =
- cpu_to_le32(ring->descriptors_dma_addr +
- (i + 1) * sizeof(struct mite_dma_descriptor));
- }
+ mite_get_status(mite_chan);
+ spin_lock_irqsave(&mite->lock, flags);
+ done = mite_chan->done;
+ spin_unlock_irqrestore(&mite->lock, flags);
+ return done;
+}
+EXPORT_SYMBOL_GPL(mite_done);
- /* the last link is either a remainder or was a full link. */
- if (remainder > 0) {
- /* set the lesser count for the remainder link */
- ring->descriptors[i].count = cpu_to_le32(remainder);
- ring->descriptors[i].addr =
- cpu_to_le32(async->buf_map->page_list[i].dma_addr);
- /* increment i so that assignment below refs last link */
- ++i;
- }
+static void mite_dma_reset(struct mite_channel *mite_chan)
+{
+ writel(CHOR_DMARESET | CHOR_FRESET,
+ mite_chan->mite->mmio + MITE_CHOR(mite_chan->channel));
+}
- /* Assign the last link->next to point back to the head of the list. */
- ring->descriptors[i - 1].next = cpu_to_le32(ring->descriptors_dma_addr);
+/**
+ * mite_dma_arm() - Start a MITE dma transfer.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_dma_arm(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
/*
- * barrier is meant to insure that all the writes to the dma descriptors
- * have completed before the dma controller is commanded to read them
+ * memory barrier is intended to insure any twiddling with the buffer
+ * is done before writing to the mite to arm dma transfer
*/
- smp_wmb();
- return 0;
+ smp_mb();
+ spin_lock_irqsave(&mite->lock, flags);
+ mite_chan->done = 0;
+ /* arm */
+ writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
+ mmiowb();
+ spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
+EXPORT_SYMBOL_GPL(mite_dma_arm);
+
+/**
+ * mite_dma_disarm() - Stop a MITE dma transfer.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_dma_disarm(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
+
+ /* disarm */
+ writel(CHOR_ABORT, mite->mmio + MITE_CHOR(mite_chan->channel));
+}
+EXPORT_SYMBOL_GPL(mite_dma_disarm);
+/**
+ * mite_prep_dma() - Prepare a MITE dma channel for transfers.
+ * @mite_chan: MITE dma channel.
+ * @num_device_bits: device transfer size (8, 16, or 32-bits).
+ * @num_memory_bits: memory transfer size (8, 16, or 32-bits).
+ */
void mite_prep_dma(struct mite_channel *mite_chan,
unsigned int num_device_bits, unsigned int num_memory_bits)
{
- unsigned int chor, chcr, mcr, dcr, lkcr;
- struct mite_struct *mite = mite_chan->mite;
+ struct mite *mite = mite_chan->mite;
+ unsigned int chcr, mcr, dcr, lkcr;
- /* reset DMA and FIFO */
- chor = CHOR_DMARESET | CHOR_FRESET;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ mite_dma_reset(mite_chan);
/* short link chaining mode */
chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
@@ -421,10 +517,10 @@ void mite_prep_dma(struct mite_channel *mite_chan,
if (mite_chan->dir == COMEDI_INPUT)
chcr |= CHCR_DEV_TO_MEM;
- writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+ writel(chcr, mite->mmio + MITE_CHCR(mite_chan->channel));
/* to/from memory */
- mcr = CR_RL(64) | CR_ASEQUP;
+ mcr = mite_retry_limit(64) | CR_ASEQUP;
switch (num_memory_bits) {
case 8:
mcr |= CR_PSIZE8;
@@ -439,11 +535,11 @@ void mite_prep_dma(struct mite_channel *mite_chan,
pr_warn("bug! invalid mem bit width for dma transfer\n");
break;
}
- writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
+ writel(mcr, mite->mmio + MITE_MCR(mite_chan->channel));
/* from/to device */
- dcr = CR_RL(64) | CR_ASEQUP;
- dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
+ dcr = mite_retry_limit(64) | CR_ASEQUP;
+ dcr |= CR_PORTIO | CR_AMDEVICE | mite_drq_reqs(mite_chan->channel);
switch (num_device_bits) {
case 8:
dcr |= CR_PSIZE8;
@@ -458,223 +554,402 @@ void mite_prep_dma(struct mite_channel *mite_chan,
pr_warn("bug! invalid dev bit width for dma transfer\n");
break;
}
- writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
+ writel(dcr, mite->mmio + MITE_DCR(mite_chan->channel));
/* reset the DAR */
- writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+ writel(0, mite->mmio + MITE_DAR(mite_chan->channel));
/* the link is 32bits */
- lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
- writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
+ lkcr = mite_retry_limit(64) | CR_ASEQUP | CR_PSIZE32;
+ writel(lkcr, mite->mmio + MITE_LKCR(mite_chan->channel));
/* starting address for link chaining */
- writel(mite_chan->ring->descriptors_dma_addr,
- mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
+ writel(mite_chan->ring->dma_addr,
+ mite->mmio + MITE_LKAR(mite_chan->channel));
}
EXPORT_SYMBOL_GPL(mite_prep_dma);
-static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
+static struct mite_channel *__mite_request_channel(struct mite *mite,
+ struct mite_ring *ring,
+ unsigned int min_channel,
+ unsigned int max_channel)
{
- struct mite_struct *mite = mite_chan->mite;
+ struct mite_channel *mite_chan = NULL;
+ unsigned long flags;
+ int i;
- return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+ /*
+ * spin lock so mite_release_channel can be called safely
+ * from interrupts
+ */
+ spin_lock_irqsave(&mite->lock, flags);
+ for (i = min_channel; i <= max_channel; ++i) {
+ mite_chan = &mite->channels[i];
+ if (!mite_chan->ring) {
+ mite_chan->ring = ring;
+ break;
+ }
+ mite_chan = NULL;
+ }
+ spin_unlock_irqrestore(&mite->lock, flags);
+ return mite_chan;
}
-u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
+/**
+ * mite_request_channel_in_range() - Request a MITE dma channel.
+ * @mite: MITE device.
+ * @ring: MITE dma ring.
+ * @min_channel: minimum channel index to use.
+ * @max_channel: maximum channel index to use.
+ */
+struct mite_channel *mite_request_channel_in_range(struct mite *mite,
+ struct mite_ring *ring,
+ unsigned int min_channel,
+ unsigned int max_channel)
{
- struct mite_struct *mite = mite_chan->mite;
-
- return readl(mite->mite_io_addr +
- MITE_FCR(mite_chan->channel)) & 0x000000FF;
+ return __mite_request_channel(mite, ring, min_channel, max_channel);
}
-EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
+EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
-/* returns lower bound for number of bytes transferred from device to memory */
-u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
+/**
+ * mite_request_channel() - Request a MITE dma channel.
+ * @mite: MITE device.
+ * @ring: MITE dma ring.
+ */
+struct mite_channel *mite_request_channel(struct mite *mite,
+ struct mite_ring *ring)
{
- u32 device_byte_count;
-
- device_byte_count = mite_device_bytes_transferred(mite_chan);
- return device_byte_count - mite_bytes_in_transit(mite_chan);
+ return __mite_request_channel(mite, ring, 0, mite->num_channels - 1);
}
-EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb);
+EXPORT_SYMBOL_GPL(mite_request_channel);
-/* returns upper bound for number of bytes transferred from device to memory */
-u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
+/**
+ * mite_release_channel() - Release a MITE dma channel.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_release_channel(struct mite_channel *mite_chan)
{
- u32 in_transit_count;
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
- in_transit_count = mite_bytes_in_transit(mite_chan);
- return mite_device_bytes_transferred(mite_chan) - in_transit_count;
+ /* spin lock to prevent races with mite_request_channel */
+ spin_lock_irqsave(&mite->lock, flags);
+ if (mite_chan->ring) {
+ mite_dma_disarm(mite_chan);
+ mite_dma_reset(mite_chan);
+ /*
+ * disable all channel's interrupts (do it after disarm/reset so
+ * MITE_CHCR reg isn't changed while dma is still active!)
+ */
+ writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
+ CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
+ CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+ CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+ mite->mmio + MITE_CHCR(mite_chan->channel));
+ mite_chan->ring = NULL;
+ mmiowb();
+ }
+ spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub);
+EXPORT_SYMBOL_GPL(mite_release_channel);
-/* returns lower bound for number of bytes read from memory to device */
-u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
+/**
+ * mite_init_ring_descriptors() - Initialize a MITE dma ring descriptors.
+ * @ring: MITE dma ring.
+ * @s: COMEDI subdevice.
+ * @nbytes: the size of the dma ring (in bytes).
+ *
+ * Initializes the ring buffer descriptors to provide correct DMA transfer
+ * links to the exact amount of memory required. When the ring buffer is
+ * allocated by mite_buf_change(), the default is to initialize the ring
+ * to refer to the entire DMA data buffer. A command may call this function
+ * later to re-initialize and shorten the amount of memory that will be
+ * transferred.
+ */
+int mite_init_ring_descriptors(struct mite_ring *ring,
+ struct comedi_subdevice *s,
+ unsigned int nbytes)
{
- u32 device_byte_count;
+ struct comedi_async *async = s->async;
+ struct mite_dma_desc *desc = NULL;
+ unsigned int n_full_links = nbytes >> PAGE_SHIFT;
+ unsigned int remainder = nbytes % PAGE_SIZE;
+ int i;
- device_byte_count = mite_device_bytes_transferred(mite_chan);
- return device_byte_count + mite_bytes_in_transit(mite_chan);
-}
-EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
+ dev_dbg(s->device->class_dev,
+ "mite: init ring buffer to %u bytes\n", nbytes);
-/* returns upper bound for number of bytes read from memory to device */
-u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
-{
- u32 in_transit_count;
+ if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
+ dev_err(s->device->class_dev,
+ "mite: ring buffer too small for requested init\n");
+ return -ENOMEM;
+ }
- in_transit_count = mite_bytes_in_transit(mite_chan);
- return mite_device_bytes_transferred(mite_chan) + in_transit_count;
-}
-EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
+ /* We set the descriptors for all full links. */
+ for (i = 0; i < n_full_links; ++i) {
+ desc = &ring->descs[i];
+ desc->count = cpu_to_le32(PAGE_SIZE);
+ desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
+ desc->next = cpu_to_le32(ring->dma_addr +
+ (i + 1) * sizeof(*desc));
+ }
-unsigned mite_dma_tcr(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
+ /* the last link is either a remainder or was a full link. */
+ if (remainder > 0) {
+ desc = &ring->descs[i];
+ /* set the lesser count for the remainder link */
+ desc->count = cpu_to_le32(remainder);
+ desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
+ }
+
+ /* Assign the last link->next to point back to the head of the list. */
+ desc->next = cpu_to_le32(ring->dma_addr);
- return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
+ /*
+ * barrier is meant to insure that all the writes to the dma descriptors
+ * have completed before the dma controller is commanded to read them
+ */
+ smp_wmb();
+ return 0;
}
-EXPORT_SYMBOL_GPL(mite_dma_tcr);
+EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
-void mite_dma_disarm(struct mite_channel *mite_chan)
+static void mite_free_dma_descs(struct mite_ring *ring)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned chor;
+ struct mite_dma_desc *descs = ring->descs;
- /* disarm */
- chor = CHOR_ABORT;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ if (descs) {
+ dma_free_coherent(ring->hw_dev,
+ ring->n_links * sizeof(*descs),
+ descs, ring->dma_addr);
+ ring->descs = NULL;
+ ring->dma_addr = 0;
+ ring->n_links = 0;
+ }
}
-EXPORT_SYMBOL_GPL(mite_dma_disarm);
-int mite_sync_input_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s)
+/**
+ * mite_buf_change() - COMEDI subdevice (*buf_change) for a MITE dma ring.
+ * @ring: MITE dma ring.
+ * @s: COMEDI subdevice.
+ */
+int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
- int count;
- unsigned int nbytes, old_alloc_count;
+ struct mite_dma_desc *descs;
+ unsigned int n_links;
- old_alloc_count = async->buf_write_alloc_count;
- /* write alloc as much as we can */
- comedi_buf_write_alloc(s, async->prealloc_bufsz);
+ mite_free_dma_descs(ring);
- nbytes = mite_bytes_written_to_memory_lb(mite_chan);
- if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
- old_alloc_count) > 0) {
- dev_warn(s->device->class_dev,
- "mite: DMA overwrite of free area\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return -1;
+ if (async->prealloc_bufsz == 0)
+ return 0;
+
+ n_links = async->prealloc_bufsz >> PAGE_SHIFT;
+
+ descs = dma_alloc_coherent(ring->hw_dev,
+ n_links * sizeof(*descs),
+ &ring->dma_addr, GFP_KERNEL);
+ if (!descs) {
+ dev_err(s->device->class_dev,
+ "mite: ring buffer allocation failed\n");
+ return -ENOMEM;
}
+ ring->descs = descs;
+ ring->n_links = n_links;
- count = nbytes - async->buf_write_count;
- /*
- * it's possible count will be negative due to conservative value
- * returned by mite_bytes_written_to_memory_lb
- */
- if (count <= 0)
- return 0;
+ return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(mite_buf_change);
- comedi_buf_write_free(s, count);
- comedi_inc_scan_progress(s, count);
- async->events |= COMEDI_CB_BLOCK;
- return 0;
+/**
+ * mite_alloc_ring() - Allocate a MITE dma ring.
+ * @mite: MITE device.
+ */
+struct mite_ring *mite_alloc_ring(struct mite *mite)
+{
+ struct mite_ring *ring;
+
+ ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+ ring->hw_dev = get_device(&mite->pcidev->dev);
+ if (!ring->hw_dev) {
+ kfree(ring);
+ return NULL;
+ }
+ ring->n_links = 0;
+ ring->descs = NULL;
+ ring->dma_addr = 0;
+ return ring;
}
-EXPORT_SYMBOL_GPL(mite_sync_input_dma);
+EXPORT_SYMBOL_GPL(mite_alloc_ring);
-int mite_sync_output_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s)
+/**
+ * mite_free_ring() - Free a MITE dma ring and its descriptors.
+ * @ring: MITE dma ring.
+ */
+void mite_free_ring(struct mite_ring *ring)
{
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
- unsigned int old_alloc_count = async->buf_read_alloc_count;
- u32 nbytes_ub, nbytes_lb;
- int count;
- bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
+ if (ring) {
+ mite_free_dma_descs(ring);
+ put_device(ring->hw_dev);
+ kfree(ring);
+ }
+}
+EXPORT_SYMBOL_GPL(mite_free_ring);
- /* read alloc as much as we can */
- comedi_buf_read_alloc(s, async->prealloc_bufsz);
- nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
- if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
- nbytes_lb = stop_count;
- nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
- if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
- nbytes_ub = stop_count;
+static int mite_setup(struct comedi_device *dev, struct mite *mite,
+ bool use_win1)
+{
+ resource_size_t daq_phys_addr;
+ unsigned long length;
+ int i;
+ u32 csigr_bits;
+ unsigned int unknown_dma_burst_bits;
+ unsigned int wpdep;
- if ((!finite_regen || stop_count > old_alloc_count) &&
- ((int)(nbytes_ub - old_alloc_count) > 0)) {
- dev_warn(s->device->class_dev, "mite: DMA underrun\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return -1;
+ pci_set_master(mite->pcidev);
+
+ mite->mmio = pci_ioremap_bar(mite->pcidev, 0);
+ if (!mite->mmio)
+ return -ENOMEM;
+
+ dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
+ if (!dev->mmio)
+ return -ENOMEM;
+ daq_phys_addr = pci_resource_start(mite->pcidev, 1);
+ length = pci_resource_len(mite->pcidev, 1);
+
+ if (use_win1) {
+ writel(0, mite->mmio + MITE_IODWBSR);
+ dev_dbg(dev->class_dev,
+ "mite: using I/O Window Base Size register 1\n");
+ writel(daq_phys_addr | WENAB |
+ MITE_IODWBSR_1_WSIZE_bits(length),
+ mite->mmio + MITE_IODWBSR_1);
+ writel(0, mite->mmio + MITE_IODWCR_1);
+ } else {
+ writel(daq_phys_addr | WENAB, mite->mmio + MITE_IODWBSR);
}
+ /*
+ * Make sure dma bursts work. I got this from running a bus analyzer
+ * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
+ * of 0x61f and bursts worked. 6281 powered up with register value of
+ * 0x1f and bursts didn't work. The NI windows driver reads the
+ * register, then does a bitwise-or of 0x600 with it and writes it back.
+ *
+ * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
+ * written and read back. The bits 0x1f always read as 1.
+ * The rest always read as zero.
+ */
+ unknown_dma_burst_bits = readl(mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
+ unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
+ writel(unknown_dma_burst_bits, mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
- if (finite_regen) {
- /*
- * This is a special case where we continuously output a finite
- * buffer. In this case, we do not free any of the memory,
- * hence we expect that old_alloc_count will reach a maximum of
- * stop_count bytes.
- */
- return 0;
+ csigr_bits = readl(mite->mmio + MITE_CSIGR);
+ mite->num_channels = CSIGR_TO_DMAC(csigr_bits);
+ if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
+ dev_warn(dev->class_dev,
+ "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
+ mite->num_channels, MAX_MITE_DMA_CHANNELS);
+ mite->num_channels = MAX_MITE_DMA_CHANNELS;
}
- count = nbytes_lb - async->buf_read_count;
- if (count <= 0)
- return 0;
+ /* get the wpdep bits and convert it to the write port fifo depth */
+ wpdep = CSIGR_TO_WPDEP(csigr_bits);
+ if (wpdep)
+ wpdep = BIT(wpdep);
- if (count) {
- comedi_buf_read_free(s, count);
- async->events |= COMEDI_CB_BLOCK;
+ dev_dbg(dev->class_dev,
+ "mite: version = %i, type = %i, mite mode = %i, interface mode = %i\n",
+ CSIGR_TO_VER(csigr_bits), CSIGR_TO_TYPE(csigr_bits),
+ CSIGR_TO_MMODE(csigr_bits), CSIGR_TO_IMODE(csigr_bits));
+ dev_dbg(dev->class_dev,
+ "mite: num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
+ CSIGR_TO_DMAC(csigr_bits), wpdep,
+ CSIGR_TO_WINS(csigr_bits), CSIGR_TO_IOWINS(csigr_bits));
+
+ for (i = 0; i < mite->num_channels; i++) {
+ writel(CHOR_DMARESET, mite->mmio + MITE_CHOR(i));
+ /* disable interrupts */
+ writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+ CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+ CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+ mite->mmio + MITE_CHCR(i));
}
+ mite->fifo_size = mite_fifo_size(mite, 0);
+ dev_dbg(dev->class_dev, "mite: fifo size is %i.\n", mite->fifo_size);
return 0;
}
-EXPORT_SYMBOL_GPL(mite_sync_output_dma);
-unsigned mite_get_status(struct mite_channel *mite_chan)
+/**
+ * mite_attach() - Allocate and initialize a MITE device for a comedi driver.
+ * @dev: COMEDI device.
+ * @use_win1: flag to use I/O Window 1 instead of I/O Window 0.
+ *
+ * Called by a COMEDI drivers (*auto_attach).
+ *
+ * Returns a pointer to the MITE device on success, or NULL if the MITE cannot
+ * be allocated or remapped.
+ */
+struct mite *mite_attach(struct comedi_device *dev, bool use_win1)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned status;
- unsigned long flags;
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct mite *mite;
+ unsigned int i;
+ int ret;
- spin_lock_irqsave(&mite->lock, flags);
- status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
- if (status & CHSR_DONE) {
- mite_chan->done = 1;
- writel(CHOR_CLRDONE,
- mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ mite = kzalloc(sizeof(*mite), GFP_KERNEL);
+ if (!mite)
+ return NULL;
+
+ spin_lock_init(&mite->lock);
+ mite->pcidev = pcidev;
+ for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
+ mite->channels[i].mite = mite;
+ mite->channels[i].channel = i;
+ mite->channels[i].done = 1;
}
- mmiowb();
- spin_unlock_irqrestore(&mite->lock, flags);
- return status;
+
+ ret = mite_setup(dev, mite, use_win1);
+ if (ret) {
+ if (mite->mmio)
+ iounmap(mite->mmio);
+ kfree(mite);
+ return NULL;
+ }
+
+ return mite;
}
-EXPORT_SYMBOL_GPL(mite_get_status);
+EXPORT_SYMBOL_GPL(mite_attach);
-int mite_done(struct mite_channel *mite_chan)
+/**
+ * mite_detach() - Unmap and free a MITE device for a comedi driver.
+ * @mite: MITE device.
+ *
+ * Called by a COMEDI drivers (*detach).
+ */
+void mite_detach(struct mite *mite)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned long flags;
- int done;
+ if (!mite)
+ return;
- mite_get_status(mite_chan);
- spin_lock_irqsave(&mite->lock, flags);
- done = mite_chan->done;
- spin_unlock_irqrestore(&mite->lock, flags);
- return done;
+ if (mite->mmio)
+ iounmap(mite->mmio);
+
+ kfree(mite);
}
-EXPORT_SYMBOL_GPL(mite_done);
+EXPORT_SYMBOL_GPL(mite_detach);
static int __init mite_module_init(void)
{
return 0;
}
+module_init(mite_module_init);
static void __exit mite_module_exit(void)
{
}
-
-module_init(mite_module_init);
module_exit(mite_module_exit);
MODULE_AUTHOR("Comedi http://www.comedi.org");
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 87534b07e..b6349aed9 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -19,8 +19,6 @@
#ifndef _MITE_H_
#define _MITE_H_
-#include <linux/io.h>
-#include <linux/log2.h>
#include <linux/spinlock.h>
#define MAX_MITE_DMA_CHANNELS 8
@@ -30,323 +28,74 @@ struct comedi_subdevice;
struct device;
struct pci_dev;
-struct mite_dma_descriptor {
+struct mite_dma_desc {
__le32 count;
__le32 addr;
__le32 next;
u32 dar;
};
-struct mite_dma_descriptor_ring {
+struct mite_ring {
struct device *hw_dev;
unsigned int n_links;
- struct mite_dma_descriptor *descriptors;
- dma_addr_t descriptors_dma_addr;
+ struct mite_dma_desc *descs;
+ dma_addr_t dma_addr;
};
struct mite_channel {
- struct mite_struct *mite;
- unsigned channel;
+ struct mite *mite;
+ unsigned int channel;
int dir;
int done;
- struct mite_dma_descriptor_ring *ring;
+ struct mite_ring *ring;
};
-struct mite_struct {
+struct mite {
struct pci_dev *pcidev;
- resource_size_t mite_phys_addr;
- void __iomem *mite_io_addr;
- resource_size_t daq_phys_addr;
+ void __iomem *mmio;
struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
- short channel_allocated[MAX_MITE_DMA_CHANNELS];
int num_channels;
- unsigned fifo_size;
+ unsigned int fifo_size;
+ /* protects mite_channel from being released by the driver */
spinlock_t lock;
};
-struct mite_struct *mite_alloc(struct pci_dev *pcidev);
+u32 mite_bytes_in_transit(struct mite_channel *);
-int mite_setup2(struct comedi_device *, struct mite_struct *, bool use_win1);
+void mite_sync_dma(struct mite_channel *, struct comedi_subdevice *);
+void mite_ack_linkc(struct mite_channel *, struct comedi_subdevice *s,
+ bool sync);
+int mite_done(struct mite_channel *);
-static inline int mite_setup(struct comedi_device *dev,
- struct mite_struct *mite)
-{
- return mite_setup2(dev, mite, false);
-}
+void mite_dma_arm(struct mite_channel *);
+void mite_dma_disarm(struct mite_channel *);
-void mite_detach(struct mite_struct *mite);
-struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite);
-void mite_free_ring(struct mite_dma_descriptor_ring *ring);
-struct mite_channel *
-mite_request_channel_in_range(struct mite_struct *mite,
- struct mite_dma_descriptor_ring *ring,
- unsigned min_channel, unsigned max_channel);
-static inline struct mite_channel *
-mite_request_channel(struct mite_struct *mite,
- struct mite_dma_descriptor_ring *ring)
-{
- return mite_request_channel_in_range(mite, ring, 0,
- mite->num_channels - 1);
-}
-
-void mite_release_channel(struct mite_channel *mite_chan);
-
-unsigned mite_dma_tcr(struct mite_channel *mite_chan);
-void mite_dma_arm(struct mite_channel *mite_chan);
-void mite_dma_disarm(struct mite_channel *mite_chan);
-int mite_sync_input_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s);
-int mite_sync_output_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s);
-u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan);
-u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan);
-u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan);
-u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan);
-u32 mite_bytes_in_transit(struct mite_channel *mite_chan);
-unsigned mite_get_status(struct mite_channel *mite_chan);
-int mite_done(struct mite_channel *mite_chan);
-
-void mite_prep_dma(struct mite_channel *mite_chan,
+void mite_prep_dma(struct mite_channel *,
unsigned int num_device_bits, unsigned int num_memory_bits);
-int mite_buf_change(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s);
-int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s,
- unsigned int nbytes);
-
-enum mite_registers {
- /*
- * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
- * written and read back. The bits 0x1f always read as 1.
- * The rest always read as zero.
- */
- MITE_UNKNOWN_DMA_BURST_REG = 0x28,
- MITE_IODWBSR = 0xc0, /* IO Device Window Base Size Register */
- MITE_IODWBSR_1 = 0xc4, /* IO Device Window Base Size Register 1 */
- MITE_IODWCR_1 = 0xf4,
- MITE_PCI_CONFIG_OFFSET = 0x300,
- MITE_CSIGR = 0x460 /* chip signature */
-};
-
-#define MITE_CHAN(x) (0x500 + 0x100 * (x))
-#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
-#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
-#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
-#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory configuration */
-#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
-#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device configuration */
-#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
-#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link configuration */
-#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
-#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
-#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
-#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
-#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
-#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
-#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
-#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
-#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
-
-enum MITE_IODWBSR_bits {
- WENAB = 0x80, /* window enable */
-};
-
-static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size)
-{
- unsigned order = 0;
-
- BUG_ON(size == 0);
- order = ilog2(size);
- BUG_ON(order < 1);
- return (order - 1) & 0x1f;
-}
-
-enum MITE_UNKNOWN_DMA_BURST_bits {
- UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600
-};
-
-static inline int mite_csigr_version(u32 csigr_bits)
-{
- return csigr_bits & 0xf;
-};
-
-static inline int mite_csigr_type(u32 csigr_bits)
-{ /* original mite = 0, minimite = 1 */
- return (csigr_bits >> 4) & 0xf;
-};
-
-static inline int mite_csigr_mmode(u32 csigr_bits)
-{ /* mite mode, minimite = 1 */
- return (csigr_bits >> 8) & 0x3;
-};
-
-static inline int mite_csigr_imode(u32 csigr_bits)
-{ /* cpu port interface mode, pci = 0x3 */
- return (csigr_bits >> 12) & 0x3;
-};
-
-static inline int mite_csigr_dmac(u32 csigr_bits)
-{ /* number of dma channels */
- return (csigr_bits >> 16) & 0xf;
-};
-static inline int mite_csigr_wpdep(u32 csigr_bits)
-{ /* write post fifo depth */
- unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7;
+struct mite_channel *mite_request_channel_in_range(struct mite *,
+ struct mite_ring *,
+ unsigned int min_channel,
+ unsigned int max_channel);
+struct mite_channel *mite_request_channel(struct mite *, struct mite_ring *);
+void mite_release_channel(struct mite_channel *);
- return (wpdep_bits) ? (1 << (wpdep_bits - 1)) : 0;
-}
-
-static inline int mite_csigr_wins(u32 csigr_bits)
-{
- return (csigr_bits >> 24) & 0x1f;
-};
-
-static inline int mite_csigr_iowins(u32 csigr_bits)
-{ /* number of io windows */
- return (csigr_bits >> 29) & 0x7;
-};
-
-enum MITE_MCR_bits {
- MCRPON = 0,
-};
-
-enum MITE_DCR_bits {
- DCR_NORMAL = (1 << 29),
- DCRPON = 0,
-};
-
-enum MITE_CHOR_bits {
- CHOR_DMARESET = (1 << 31),
- CHOR_SET_SEND_TC = (1 << 11),
- CHOR_CLR_SEND_TC = (1 << 10),
- CHOR_SET_LPAUSE = (1 << 9),
- CHOR_CLR_LPAUSE = (1 << 8),
- CHOR_CLRDONE = (1 << 7),
- CHOR_CLRRB = (1 << 6),
- CHOR_CLRLC = (1 << 5),
- CHOR_FRESET = (1 << 4),
- CHOR_ABORT = (1 << 3), /* stop without emptying fifo */
- CHOR_STOP = (1 << 2), /* stop after emptying fifo */
- CHOR_CONT = (1 << 1),
- CHOR_START = (1 << 0),
- CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE),
-};
-
-enum MITE_CHCR_bits {
- CHCR_SET_DMA_IE = (1 << 31),
- CHCR_CLR_DMA_IE = (1 << 30),
- CHCR_SET_LINKP_IE = (1 << 29),
- CHCR_CLR_LINKP_IE = (1 << 28),
- CHCR_SET_SAR_IE = (1 << 27),
- CHCR_CLR_SAR_IE = (1 << 26),
- CHCR_SET_DONE_IE = (1 << 25),
- CHCR_CLR_DONE_IE = (1 << 24),
- CHCR_SET_MRDY_IE = (1 << 23),
- CHCR_CLR_MRDY_IE = (1 << 22),
- CHCR_SET_DRDY_IE = (1 << 21),
- CHCR_CLR_DRDY_IE = (1 << 20),
- CHCR_SET_LC_IE = (1 << 19),
- CHCR_CLR_LC_IE = (1 << 18),
- CHCR_SET_CONT_RB_IE = (1 << 17),
- CHCR_CLR_CONT_RB_IE = (1 << 16),
- CHCR_FIFODIS = (1 << 15),
- CHCR_FIFO_ON = 0,
- CHCR_BURSTEN = (1 << 14),
- CHCR_NO_BURSTEN = 0,
- CHCR_BYTE_SWAP_DEVICE = (1 << 6),
- CHCR_BYTE_SWAP_MEMORY = (1 << 4),
- CHCR_DIR = (1 << 3),
- CHCR_DEV_TO_MEM = CHCR_DIR,
- CHCR_MEM_TO_DEV = 0,
- CHCR_NORMAL = (0 << 0),
- CHCR_CONTINUE = (1 << 0),
- CHCR_RINGBUFF = (2 << 0),
- CHCR_LINKSHORT = (4 << 0),
- CHCR_LINKLONG = (5 << 0),
- CHCRPON =
- (CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
- CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE),
-};
-
-enum ConfigRegister_bits {
- CR_REQS_MASK = 0x7 << 16,
- CR_ASEQDONT = 0x0 << 10,
- CR_ASEQUP = 0x1 << 10,
- CR_ASEQDOWN = 0x2 << 10,
- CR_ASEQ_MASK = 0x3 << 10,
- CR_PSIZE8 = (1 << 8),
- CR_PSIZE16 = (2 << 8),
- CR_PSIZE32 = (3 << 8),
- CR_PORTCPU = (0 << 6),
- CR_PORTIO = (1 << 6),
- CR_PORTVXI = (2 << 6),
- CR_PORTMXI = (3 << 6),
- CR_AMDEVICE = (1 << 0),
-};
-
-static inline int CR_REQS(int source)
-{
- return (source & 0x7) << 16;
-};
-
-static inline int CR_REQSDRQ(unsigned drq_line)
-{
- /* This also works on m-series when using channels (drq_line) 4 or 5. */
- return CR_REQS((drq_line & 0x3) | 0x4);
-}
-
-static inline int CR_RL(unsigned int retry_limit)
-{
- int value = 0;
+int mite_init_ring_descriptors(struct mite_ring *, struct comedi_subdevice *,
+ unsigned int nbytes);
+int mite_buf_change(struct mite_ring *, struct comedi_subdevice *);
- if (retry_limit)
- value = 1 + ilog2(retry_limit);
- if (value > 0x7)
- value = 0x7;
- return (value & 0x7) << 21;
-}
+struct mite_ring *mite_alloc_ring(struct mite *);
+void mite_free_ring(struct mite_ring *);
-enum CHSR_bits {
- CHSR_INT = (1 << 31),
- CHSR_LPAUSES = (1 << 29),
- CHSR_SARS = (1 << 27),
- CHSR_DONE = (1 << 25),
- CHSR_MRDY = (1 << 23),
- CHSR_DRDY = (1 << 21),
- CHSR_LINKC = (1 << 19),
- CHSR_CONTS_RB = (1 << 17),
- CHSR_ERROR = (1 << 15),
- CHSR_SABORT = (1 << 14),
- CHSR_HABORT = (1 << 13),
- CHSR_STOPS = (1 << 12),
- CHSR_OPERR_mask = (3 << 10),
- CHSR_OPERR_NOERROR = (0 << 10),
- CHSR_OPERR_FIFOERROR = (1 << 10),
- CHSR_OPERR_LINKERROR = (1 << 10), /* ??? */
- CHSR_XFERR = (1 << 9),
- CHSR_END = (1 << 8),
- CHSR_DRQ1 = (1 << 7),
- CHSR_DRQ0 = (1 << 6),
- CHSR_LxERR_mask = (3 << 4),
- CHSR_LBERR = (1 << 4),
- CHSR_LRERR = (2 << 4),
- CHSR_LOERR = (3 << 4),
- CHSR_MxERR_mask = (3 << 2),
- CHSR_MBERR = (1 << 2),
- CHSR_MRERR = (2 << 2),
- CHSR_MOERR = (3 << 2),
- CHSR_DxERR_mask = (3 << 0),
- CHSR_DBERR = (1 << 0),
- CHSR_DRERR = (2 << 0),
- CHSR_DOERR = (3 << 0),
-};
+struct mite *mite_attach(struct comedi_device *, bool use_win1);
+void mite_detach(struct mite *);
-static inline void mite_dma_reset(struct mite_channel *mite_chan)
-{
- writel(CHOR_DMARESET | CHOR_FRESET,
- mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
-};
+/*
+ * Mite registers (used outside of the mite driver)
+ */
+#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size */
+#define MITE_IODWBSR_1 0xc4 /* IO Device Window1 Base Size */
+#define WENAB BIT(7) /* window enable */
+#define MITE_IODWCR_1 0xf4
#endif
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 46647c64f..0dcb826a9 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1,17 +1,16 @@
/*
- comedi/drivers/ni_660x.c
- Hardware driver for NI 660x devices
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Hardware driver for NI 660x devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Driver: ni_660x
@@ -42,91 +41,13 @@
#include "mite.h"
#include "ni_tio.h"
-enum ni_660x_constants {
- min_counter_pfi_chan = 8,
- max_dio_pfi_chan = 31,
- counters_per_chip = 4
-};
-
-#define NUM_PFI_CHANNELS 40
-/* really there are only up to 3 dma channels, but the register layout allows
-for 4 */
-#define MAX_DMA_CHANNEL 4
-
/* See Register-Level Programmer Manual page 3.1 */
enum ni_660x_register {
- NI660X_G0_INT_ACK,
- NI660X_G0_STATUS,
- NI660X_G1_INT_ACK,
- NI660X_G1_STATUS,
- NI660X_G01_STATUS,
- NI660X_G0_CMD,
- NI660X_STC_DIO_PARALLEL_INPUT,
- NI660X_G1_CMD,
- NI660X_G0_HW_SAVE,
- NI660X_G1_HW_SAVE,
+ /* see enum ni_gpct_register */
+ NI660X_STC_DIO_PARALLEL_INPUT = NITIO_NUM_REGS,
NI660X_STC_DIO_OUTPUT,
NI660X_STC_DIO_CONTROL,
- NI660X_G0_SW_SAVE,
- NI660X_G1_SW_SAVE,
- NI660X_G0_MODE,
- NI660X_G01_STATUS1,
- NI660X_G1_MODE,
NI660X_STC_DIO_SERIAL_INPUT,
- NI660X_G0_LOADA,
- NI660X_G01_STATUS2,
- NI660X_G0_LOADB,
- NI660X_G1_LOADA,
- NI660X_G1_LOADB,
- NI660X_G0_INPUT_SEL,
- NI660X_G1_INPUT_SEL,
- NI660X_G0_AUTO_INC,
- NI660X_G1_AUTO_INC,
- NI660X_G01_RESET,
- NI660X_G0_INT_ENA,
- NI660X_G1_INT_ENA,
- NI660X_G0_CNT_MODE,
- NI660X_G1_CNT_MODE,
- NI660X_G0_GATE2,
- NI660X_G1_GATE2,
- NI660X_G0_DMA_CFG,
- NI660X_G0_DMA_STATUS,
- NI660X_G1_DMA_CFG,
- NI660X_G1_DMA_STATUS,
- NI660X_G2_INT_ACK,
- NI660X_G2_STATUS,
- NI660X_G3_INT_ACK,
- NI660X_G3_STATUS,
- NI660X_G23_STATUS,
- NI660X_G2_CMD,
- NI660X_G3_CMD,
- NI660X_G2_HW_SAVE,
- NI660X_G3_HW_SAVE,
- NI660X_G2_SW_SAVE,
- NI660X_G3_SW_SAVE,
- NI660X_G2_MODE,
- NI660X_G23_STATUS1,
- NI660X_G3_MODE,
- NI660X_G2_LOADA,
- NI660X_G23_STATUS2,
- NI660X_G2_LOADB,
- NI660X_G3_LOADA,
- NI660X_G3_LOADB,
- NI660X_G2_INPUT_SEL,
- NI660X_G3_INPUT_SEL,
- NI660X_G2_AUTO_INC,
- NI660X_G3_AUTO_INC,
- NI660X_G23_RESET,
- NI660X_G2_INT_ENA,
- NI660X_G3_INT_ENA,
- NI660X_G2_CNT_MODE,
- NI660X_G3_CNT_MODE,
- NI660X_G3_GATE2,
- NI660X_G2_GATE2,
- NI660X_G2_DMA_CFG,
- NI660X_G2_DMA_STATUS,
- NI660X_G3_DMA_CFG,
- NI660X_G3_DMA_STATUS,
NI660X_DIO32_INPUT,
NI660X_DIO32_OUTPUT,
NI660X_CLK_CFG,
@@ -156,224 +77,134 @@ enum ni_660x_register {
NI660X_NUM_REGS,
};
-static inline unsigned IOConfigReg(unsigned pfi_channel)
-{
- unsigned reg = NI660X_IO_CFG_0_1 + pfi_channel / 2;
-
- BUG_ON(reg > NI660X_IO_CFG_38_39);
- return reg;
-}
-
-enum ni_660x_register_width {
- DATA_1B,
- DATA_2B,
- DATA_4B
-};
+#define NI660X_CLK_CFG_COUNTER_SWAP BIT(21)
-enum ni_660x_register_direction {
- NI_660x_READ,
- NI_660x_WRITE,
- NI_660x_READ_WRITE
-};
+#define NI660X_GLOBAL_INT_COUNTER0 BIT(8)
+#define NI660X_GLOBAL_INT_COUNTER1 BIT(9)
+#define NI660X_GLOBAL_INT_COUNTER2 BIT(10)
+#define NI660X_GLOBAL_INT_COUNTER3 BIT(11)
+#define NI660X_GLOBAL_INT_CASCADE BIT(29)
+#define NI660X_GLOBAL_INT_GLOBAL_POL BIT(30)
+#define NI660X_GLOBAL_INT_GLOBAL BIT(31)
-enum ni_660x_pfi_output_select {
- pfi_output_select_high_Z = 0,
- pfi_output_select_counter = 1,
- pfi_output_select_do = 2,
- num_pfi_output_selects
-};
+#define NI660X_DMA_CFG_SEL(_c, _s) (((_s) & 0x1f) << (8 * (_c)))
+#define NI660X_DMA_CFG_SEL_MASK(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
+#define NI660X_DMA_CFG_SEL_NONE(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
+#define NI660X_DMA_CFG_RESET(_c) NI660X_DMA_CFG_SEL((_c), 0x80)
-enum ni_660x_subdevices {
- NI_660X_DIO_SUBDEV = 1,
- NI_660X_GPCT_SUBDEV_0 = 2
-};
-static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index)
-{
- return NI_660X_GPCT_SUBDEV_0 + index;
-}
+#define NI660X_IO_CFG(x) (NI660X_IO_CFG_0_1 + ((x) / 2))
+#define NI660X_IO_CFG_OUT_SEL(_c, _s) (((_s) & 0x3) << (((_c) % 2) ? 0 : 8))
+#define NI660X_IO_CFG_OUT_SEL_MASK(_c) NI660X_IO_CFG_OUT_SEL((_c), 0x3)
+#define NI660X_IO_CFG_IN_SEL(_c, _s) (((_s) & 0x7) << (((_c) % 2) ? 4 : 12))
+#define NI660X_IO_CFG_IN_SEL_MASK(_c) NI660X_IO_CFG_IN_SEL((_c), 0x7)
-struct NI_660xRegisterData {
- const char *name; /* Register Name */
+struct ni_660x_register_data {
int offset; /* Offset from base address from GPCT chip */
- enum ni_660x_register_direction direction;
- enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */
-};
-
-static const struct NI_660xRegisterData registerData[NI660X_NUM_REGS] = {
- {"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
- {"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
- {"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
- {"G1 Status Register", 0x006, NI_660x_READ, DATA_2B},
- {"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B},
- {"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B},
- {"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B},
- {"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B},
- {"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B},
- {"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B},
- {"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B},
- {"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B},
- {"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B},
- {"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B},
- {"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B},
- {"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B},
- {"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B},
- {"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B},
- {"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B},
- {"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B},
- {"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B},
- {"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B},
- {"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B},
- {"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B},
- {"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B},
- {"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B},
- {"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B},
- {"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B},
- {"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B},
- {"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B},
- {"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B},
- {"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B},
- {"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B},
- {"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B},
- {"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B},
- {"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B},
- {"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B},
- {"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B},
- {"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B},
- {"G2 Status Register", 0x104, NI_660x_READ, DATA_2B},
- {"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B},
- {"G3 Status Register", 0x106, NI_660x_READ, DATA_2B},
- {"G23 Status Register", 0x108, NI_660x_READ, DATA_2B},
- {"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B},
- {"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B},
- {"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B},
- {"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B},
- {"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B},
- {"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B},
- {"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B},
- {"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B},
- {"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B},
- {"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B},
- {"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B},
- {"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B},
- {"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B},
- {"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B},
- {"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B},
- {"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B},
- {"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B},
- {"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B},
- {"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B},
- {"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B},
- {"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B},
- {"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B},
- {"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B},
- {"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B},
- {"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B},
- {"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B},
- {"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B},
- {"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B},
- {"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B},
- {"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B},
- {"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B},
- {"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B},
- {"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B},
- {"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B},
- {"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B},
- {"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B}
-};
-
-/* kind of ENABLE for the second counter */
-enum clock_config_register_bits {
- CounterSwap = 0x1 << 21
-};
-
-/* ioconfigreg */
-static inline unsigned ioconfig_bitshift(unsigned pfi_channel)
-{
- return (pfi_channel % 2) ? 0 : 8;
-}
-
-static inline unsigned pfi_output_select_mask(unsigned pfi_channel)
-{
- return 0x3 << ioconfig_bitshift(pfi_channel);
-}
-
-static inline unsigned pfi_output_select_bits(unsigned pfi_channel,
- unsigned output_select)
-{
- return (output_select & 0x3) << ioconfig_bitshift(pfi_channel);
-}
-
-static inline unsigned pfi_input_select_mask(unsigned pfi_channel)
-{
- return 0x7 << (4 + ioconfig_bitshift(pfi_channel));
-}
-
-static inline unsigned pfi_input_select_bits(unsigned pfi_channel,
- unsigned input_select)
-{
- return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel));
-}
-
-/* dma configuration register bits */
-static inline unsigned dma_select_mask(unsigned dma_channel)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return 0x1f << (8 * dma_channel);
-}
-
-enum dma_selection {
- dma_selection_none = 0x1f,
-};
-
-static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel);
-}
-
-static inline unsigned dma_reset_bit(unsigned dma_channel)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return 0x80 << (8 * dma_channel);
-}
-
-enum global_interrupt_status_register_bits {
- Counter_0_Int_Bit = 0x100,
- Counter_1_Int_Bit = 0x200,
- Counter_2_Int_Bit = 0x400,
- Counter_3_Int_Bit = 0x800,
- Cascade_Int_Bit = 0x20000000,
- Global_Int_Bit = 0x80000000
+ char size; /* 2 or 4 bytes */
};
-enum global_interrupt_config_register_bits {
- Cascade_Int_Enable_Bit = 0x20000000,
- Global_Int_Polarity_Bit = 0x40000000,
- Global_Int_Enable_Bit = 0x80000000
+static const struct ni_660x_register_data ni_660x_reg_data[NI660X_NUM_REGS] = {
+ [NITIO_G0_INT_ACK] = { 0x004, 2 }, /* write */
+ [NITIO_G0_STATUS] = { 0x004, 2 }, /* read */
+ [NITIO_G1_INT_ACK] = { 0x006, 2 }, /* write */
+ [NITIO_G1_STATUS] = { 0x006, 2 }, /* read */
+ [NITIO_G01_STATUS] = { 0x008, 2 }, /* read */
+ [NITIO_G0_CMD] = { 0x00c, 2 }, /* write */
+ [NI660X_STC_DIO_PARALLEL_INPUT] = { 0x00e, 2 }, /* read */
+ [NITIO_G1_CMD] = { 0x00e, 2 }, /* write */
+ [NITIO_G0_HW_SAVE] = { 0x010, 4 }, /* read */
+ [NITIO_G1_HW_SAVE] = { 0x014, 4 }, /* read */
+ [NI660X_STC_DIO_OUTPUT] = { 0x014, 2 }, /* write */
+ [NI660X_STC_DIO_CONTROL] = { 0x016, 2 }, /* write */
+ [NITIO_G0_SW_SAVE] = { 0x018, 4 }, /* read */
+ [NITIO_G1_SW_SAVE] = { 0x01c, 4 }, /* read */
+ [NITIO_G0_MODE] = { 0x034, 2 }, /* write */
+ [NITIO_G01_STATUS1] = { 0x036, 2 }, /* read */
+ [NITIO_G1_MODE] = { 0x036, 2 }, /* write */
+ [NI660X_STC_DIO_SERIAL_INPUT] = { 0x038, 2 }, /* read */
+ [NITIO_G0_LOADA] = { 0x038, 4 }, /* write */
+ [NITIO_G01_STATUS2] = { 0x03a, 2 }, /* read */
+ [NITIO_G0_LOADB] = { 0x03c, 4 }, /* write */
+ [NITIO_G1_LOADA] = { 0x040, 4 }, /* write */
+ [NITIO_G1_LOADB] = { 0x044, 4 }, /* write */
+ [NITIO_G0_INPUT_SEL] = { 0x048, 2 }, /* write */
+ [NITIO_G1_INPUT_SEL] = { 0x04a, 2 }, /* write */
+ [NITIO_G0_AUTO_INC] = { 0x088, 2 }, /* write */
+ [NITIO_G1_AUTO_INC] = { 0x08a, 2 }, /* write */
+ [NITIO_G01_RESET] = { 0x090, 2 }, /* write */
+ [NITIO_G0_INT_ENA] = { 0x092, 2 }, /* write */
+ [NITIO_G1_INT_ENA] = { 0x096, 2 }, /* write */
+ [NITIO_G0_CNT_MODE] = { 0x0b0, 2 }, /* write */
+ [NITIO_G1_CNT_MODE] = { 0x0b2, 2 }, /* write */
+ [NITIO_G0_GATE2] = { 0x0b4, 2 }, /* write */
+ [NITIO_G1_GATE2] = { 0x0b6, 2 }, /* write */
+ [NITIO_G0_DMA_CFG] = { 0x0b8, 2 }, /* write */
+ [NITIO_G0_DMA_STATUS] = { 0x0b8, 2 }, /* read */
+ [NITIO_G1_DMA_CFG] = { 0x0ba, 2 }, /* write */
+ [NITIO_G1_DMA_STATUS] = { 0x0ba, 2 }, /* read */
+ [NITIO_G2_INT_ACK] = { 0x104, 2 }, /* write */
+ [NITIO_G2_STATUS] = { 0x104, 2 }, /* read */
+ [NITIO_G3_INT_ACK] = { 0x106, 2 }, /* write */
+ [NITIO_G3_STATUS] = { 0x106, 2 }, /* read */
+ [NITIO_G23_STATUS] = { 0x108, 2 }, /* read */
+ [NITIO_G2_CMD] = { 0x10c, 2 }, /* write */
+ [NITIO_G3_CMD] = { 0x10e, 2 }, /* write */
+ [NITIO_G2_HW_SAVE] = { 0x110, 4 }, /* read */
+ [NITIO_G3_HW_SAVE] = { 0x114, 4 }, /* read */
+ [NITIO_G2_SW_SAVE] = { 0x118, 4 }, /* read */
+ [NITIO_G3_SW_SAVE] = { 0x11c, 4 }, /* read */
+ [NITIO_G2_MODE] = { 0x134, 2 }, /* write */
+ [NITIO_G23_STATUS1] = { 0x136, 2 }, /* read */
+ [NITIO_G3_MODE] = { 0x136, 2 }, /* write */
+ [NITIO_G2_LOADA] = { 0x138, 4 }, /* write */
+ [NITIO_G23_STATUS2] = { 0x13a, 2 }, /* read */
+ [NITIO_G2_LOADB] = { 0x13c, 4 }, /* write */
+ [NITIO_G3_LOADA] = { 0x140, 4 }, /* write */
+ [NITIO_G3_LOADB] = { 0x144, 4 }, /* write */
+ [NITIO_G2_INPUT_SEL] = { 0x148, 2 }, /* write */
+ [NITIO_G3_INPUT_SEL] = { 0x14a, 2 }, /* write */
+ [NITIO_G2_AUTO_INC] = { 0x188, 2 }, /* write */
+ [NITIO_G3_AUTO_INC] = { 0x18a, 2 }, /* write */
+ [NITIO_G23_RESET] = { 0x190, 2 }, /* write */
+ [NITIO_G2_INT_ENA] = { 0x192, 2 }, /* write */
+ [NITIO_G3_INT_ENA] = { 0x196, 2 }, /* write */
+ [NITIO_G2_CNT_MODE] = { 0x1b0, 2 }, /* write */
+ [NITIO_G3_CNT_MODE] = { 0x1b2, 2 }, /* write */
+ [NITIO_G2_GATE2] = { 0x1b4, 2 }, /* write */
+ [NITIO_G3_GATE2] = { 0x1b6, 2 }, /* write */
+ [NITIO_G2_DMA_CFG] = { 0x1b8, 2 }, /* write */
+ [NITIO_G2_DMA_STATUS] = { 0x1b8, 2 }, /* read */
+ [NITIO_G3_DMA_CFG] = { 0x1ba, 2 }, /* write */
+ [NITIO_G3_DMA_STATUS] = { 0x1ba, 2 }, /* read */
+ [NI660X_DIO32_INPUT] = { 0x414, 4 }, /* read */
+ [NI660X_DIO32_OUTPUT] = { 0x510, 4 }, /* write */
+ [NI660X_CLK_CFG] = { 0x73c, 4 }, /* write */
+ [NI660X_GLOBAL_INT_STATUS] = { 0x754, 4 }, /* read */
+ [NI660X_DMA_CFG] = { 0x76c, 4 }, /* write */
+ [NI660X_GLOBAL_INT_CFG] = { 0x770, 4 }, /* write */
+ [NI660X_IO_CFG_0_1] = { 0x77c, 2 }, /* read/write */
+ [NI660X_IO_CFG_2_3] = { 0x77e, 2 }, /* read/write */
+ [NI660X_IO_CFG_4_5] = { 0x780, 2 }, /* read/write */
+ [NI660X_IO_CFG_6_7] = { 0x782, 2 }, /* read/write */
+ [NI660X_IO_CFG_8_9] = { 0x784, 2 }, /* read/write */
+ [NI660X_IO_CFG_10_11] = { 0x786, 2 }, /* read/write */
+ [NI660X_IO_CFG_12_13] = { 0x788, 2 }, /* read/write */
+ [NI660X_IO_CFG_14_15] = { 0x78a, 2 }, /* read/write */
+ [NI660X_IO_CFG_16_17] = { 0x78c, 2 }, /* read/write */
+ [NI660X_IO_CFG_18_19] = { 0x78e, 2 }, /* read/write */
+ [NI660X_IO_CFG_20_21] = { 0x790, 2 }, /* read/write */
+ [NI660X_IO_CFG_22_23] = { 0x792, 2 }, /* read/write */
+ [NI660X_IO_CFG_24_25] = { 0x794, 2 }, /* read/write */
+ [NI660X_IO_CFG_26_27] = { 0x796, 2 }, /* read/write */
+ [NI660X_IO_CFG_28_29] = { 0x798, 2 }, /* read/write */
+ [NI660X_IO_CFG_30_31] = { 0x79a, 2 }, /* read/write */
+ [NI660X_IO_CFG_32_33] = { 0x79c, 2 }, /* read/write */
+ [NI660X_IO_CFG_34_35] = { 0x79e, 2 }, /* read/write */
+ [NI660X_IO_CFG_36_37] = { 0x7a0, 2 }, /* read/write */
+ [NI660X_IO_CFG_38_39] = { 0x7a2, 2 } /* read/write */
};
-/* Offset of the GPCT chips from the base-address of the card */
-/* First chip is at base-address + 0x00, etc. */
-static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
+#define NI660X_CHIP_OFFSET 0x800
enum ni_660x_boardid {
BOARD_PCI6601,
@@ -385,7 +216,7 @@ enum ni_660x_boardid {
struct ni_660x_board {
const char *name;
- unsigned n_chips; /* total number of TIO chips */
+ unsigned int n_chips; /* total number of TIO chips */
};
static const struct ni_660x_board ni_660x_boards[] = {
@@ -411,280 +242,95 @@ static const struct ni_660x_board ni_660x_boards[] = {
},
};
-#define NI_660X_MAX_NUM_CHIPS 2
-#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip)
+#define NI660X_NUM_PFI_CHANNELS 40
+
+/* there are only up to 3 dma channels, but the register layout allows for 4 */
+#define NI660X_MAX_DMA_CHANNEL 4
+
+#define NI660X_COUNTERS_PER_CHIP 4
+#define NI660X_MAX_CHIPS 2
+#define NI660X_MAX_COUNTERS (NI660X_MAX_CHIPS * \
+ NI660X_COUNTERS_PER_CHIP)
struct ni_660x_private {
- struct mite_struct *mite;
+ struct mite *mite;
struct ni_gpct_device *counter_dev;
- uint64_t pfi_direction_bits;
- struct mite_dma_descriptor_ring
- *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip];
+ struct mite_ring *ring[NI660X_MAX_CHIPS][NI660X_COUNTERS_PER_CHIP];
+ /* protects mite channel request/release */
spinlock_t mite_channel_lock;
- /* interrupt_lock prevents races between interrupt and comedi_poll */
+ /* prevents races between interrupt and comedi_poll */
spinlock_t interrupt_lock;
- unsigned dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS];
- spinlock_t soft_reg_copy_lock;
- unsigned short pfi_output_selects[NUM_PFI_CHANNELS];
+ unsigned int dma_cfg[NI660X_MAX_CHIPS];
+ unsigned int io_cfg[NI660X_NUM_PFI_CHANNELS];
+ u64 io_dir;
};
-static inline unsigned ni_660x_num_counters(struct comedi_device *dev)
-{
- const struct ni_660x_board *board = dev->board_ptr;
-
- return board->n_chips * counters_per_chip;
-}
-
-static enum ni_660x_register ni_gpct_to_660x_register(enum ni_gpct_register reg)
-{
- switch (reg) {
- case NITIO_G0_AUTO_INC:
- return NI660X_G0_AUTO_INC;
- case NITIO_G1_AUTO_INC:
- return NI660X_G1_AUTO_INC;
- case NITIO_G2_AUTO_INC:
- return NI660X_G2_AUTO_INC;
- case NITIO_G3_AUTO_INC:
- return NI660X_G3_AUTO_INC;
- case NITIO_G0_CMD:
- return NI660X_G0_CMD;
- case NITIO_G1_CMD:
- return NI660X_G1_CMD;
- case NITIO_G2_CMD:
- return NI660X_G2_CMD;
- case NITIO_G3_CMD:
- return NI660X_G3_CMD;
- case NITIO_G0_HW_SAVE:
- return NI660X_G0_HW_SAVE;
- case NITIO_G1_HW_SAVE:
- return NI660X_G1_HW_SAVE;
- case NITIO_G2_HW_SAVE:
- return NI660X_G2_HW_SAVE;
- case NITIO_G3_HW_SAVE:
- return NI660X_G3_HW_SAVE;
- case NITIO_G0_SW_SAVE:
- return NI660X_G0_SW_SAVE;
- case NITIO_G1_SW_SAVE:
- return NI660X_G1_SW_SAVE;
- case NITIO_G2_SW_SAVE:
- return NI660X_G2_SW_SAVE;
- case NITIO_G3_SW_SAVE:
- return NI660X_G3_SW_SAVE;
- case NITIO_G0_MODE:
- return NI660X_G0_MODE;
- case NITIO_G1_MODE:
- return NI660X_G1_MODE;
- case NITIO_G2_MODE:
- return NI660X_G2_MODE;
- case NITIO_G3_MODE:
- return NI660X_G3_MODE;
- case NITIO_G0_LOADA:
- return NI660X_G0_LOADA;
- case NITIO_G1_LOADA:
- return NI660X_G1_LOADA;
- case NITIO_G2_LOADA:
- return NI660X_G2_LOADA;
- case NITIO_G3_LOADA:
- return NI660X_G3_LOADA;
- case NITIO_G0_LOADB:
- return NI660X_G0_LOADB;
- case NITIO_G1_LOADB:
- return NI660X_G1_LOADB;
- case NITIO_G2_LOADB:
- return NI660X_G2_LOADB;
- case NITIO_G3_LOADB:
- return NI660X_G3_LOADB;
- case NITIO_G0_INPUT_SEL:
- return NI660X_G0_INPUT_SEL;
- case NITIO_G1_INPUT_SEL:
- return NI660X_G1_INPUT_SEL;
- case NITIO_G2_INPUT_SEL:
- return NI660X_G2_INPUT_SEL;
- case NITIO_G3_INPUT_SEL:
- return NI660X_G3_INPUT_SEL;
- case NITIO_G01_STATUS:
- return NI660X_G01_STATUS;
- case NITIO_G23_STATUS:
- return NI660X_G23_STATUS;
- case NITIO_G01_RESET:
- return NI660X_G01_RESET;
- case NITIO_G23_RESET:
- return NI660X_G23_RESET;
- case NITIO_G01_STATUS1:
- return NI660X_G01_STATUS1;
- case NITIO_G23_STATUS1:
- return NI660X_G23_STATUS1;
- case NITIO_G01_STATUS2:
- return NI660X_G01_STATUS2;
- case NITIO_G23_STATUS2:
- return NI660X_G23_STATUS2;
- case NITIO_G0_CNT_MODE:
- return NI660X_G0_CNT_MODE;
- case NITIO_G1_CNT_MODE:
- return NI660X_G1_CNT_MODE;
- case NITIO_G2_CNT_MODE:
- return NI660X_G2_CNT_MODE;
- case NITIO_G3_CNT_MODE:
- return NI660X_G3_CNT_MODE;
- case NITIO_G0_GATE2:
- return NI660X_G0_GATE2;
- case NITIO_G1_GATE2:
- return NI660X_G1_GATE2;
- case NITIO_G2_GATE2:
- return NI660X_G2_GATE2;
- case NITIO_G3_GATE2:
- return NI660X_G3_GATE2;
- case NITIO_G0_DMA_CFG:
- return NI660X_G0_DMA_CFG;
- case NITIO_G0_DMA_STATUS:
- return NI660X_G0_DMA_STATUS;
- case NITIO_G1_DMA_CFG:
- return NI660X_G1_DMA_CFG;
- case NITIO_G1_DMA_STATUS:
- return NI660X_G1_DMA_STATUS;
- case NITIO_G2_DMA_CFG:
- return NI660X_G2_DMA_CFG;
- case NITIO_G2_DMA_STATUS:
- return NI660X_G2_DMA_STATUS;
- case NITIO_G3_DMA_CFG:
- return NI660X_G3_DMA_CFG;
- case NITIO_G3_DMA_STATUS:
- return NI660X_G3_DMA_STATUS;
- case NITIO_G0_INT_ACK:
- return NI660X_G0_INT_ACK;
- case NITIO_G1_INT_ACK:
- return NI660X_G1_INT_ACK;
- case NITIO_G2_INT_ACK:
- return NI660X_G2_INT_ACK;
- case NITIO_G3_INT_ACK:
- return NI660X_G3_INT_ACK;
- case NITIO_G0_STATUS:
- return NI660X_G0_STATUS;
- case NITIO_G1_STATUS:
- return NI660X_G1_STATUS;
- case NITIO_G2_STATUS:
- return NI660X_G2_STATUS;
- case NITIO_G3_STATUS:
- return NI660X_G3_STATUS;
- case NITIO_G0_INT_ENA:
- return NI660X_G0_INT_ENA;
- case NITIO_G1_INT_ENA:
- return NI660X_G1_INT_ENA;
- case NITIO_G2_INT_ENA:
- return NI660X_G2_INT_ENA;
- case NITIO_G3_INT_ENA:
- return NI660X_G3_INT_ENA;
- default:
- BUG();
- return 0;
- }
-}
-
-static inline void ni_660x_write_register(struct comedi_device *dev,
- unsigned chip, unsigned bits,
- enum ni_660x_register reg)
+static void ni_660x_write(struct comedi_device *dev, unsigned int chip,
+ unsigned int bits, unsigned int reg)
{
- unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset;
+ unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
+ ni_660x_reg_data[reg].offset;
- switch (registerData[reg].size) {
- case DATA_2B:
+ if (ni_660x_reg_data[reg].size == 2)
writew(bits, dev->mmio + addr);
- break;
- case DATA_4B:
+ else
writel(bits, dev->mmio + addr);
- break;
- default:
- BUG();
- break;
- }
}
-static inline unsigned ni_660x_read_register(struct comedi_device *dev,
- unsigned chip,
- enum ni_660x_register reg)
+static unsigned int ni_660x_read(struct comedi_device *dev,
+ unsigned int chip, unsigned int reg)
{
- unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset;
+ unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
+ ni_660x_reg_data[reg].offset;
- switch (registerData[reg].size) {
- case DATA_2B:
+ if (ni_660x_reg_data[reg].size == 2)
return readw(dev->mmio + addr);
- case DATA_4B:
- return readl(dev->mmio + addr);
- default:
- BUG();
- break;
- }
- return 0;
+ return readl(dev->mmio + addr);
}
-static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
+static void ni_660x_gpct_write(struct ni_gpct *counter, unsigned int bits,
+ enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
- unsigned chip = counter->chip_index;
- ni_660x_write_register(dev, chip, bits, ni_660x_register);
+ ni_660x_write(dev, counter->chip_index, bits, reg);
}
-static unsigned ni_gpct_read_register(struct ni_gpct *counter,
+static unsigned int ni_660x_gpct_read(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
- unsigned chip = counter->chip_index;
-
- return ni_660x_read_register(dev, chip, ni_660x_register);
-}
-
-static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
- *priv,
- struct ni_gpct
- *counter)
-{
- unsigned chip = counter->chip_index;
- return priv->mite_rings[chip][counter->counter_index];
+ return ni_660x_read(dev, counter->chip_index, reg);
}
static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
- unsigned mite_channel,
+ unsigned int mite_channel,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned chip = counter->chip_index;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[chip] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[chip] |=
- dma_select_bits(mite_channel, counter->counter_index);
- ni_660x_write_register(dev, chip,
- devpriv->dma_configuration_soft_copies[chip] |
- dma_reset_bit(mite_channel), NI660X_DMA_CFG);
+ unsigned int chip = counter->chip_index;
+
+ devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL(mite_channel,
+ counter->counter_index);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
+ NI660X_DMA_CFG_RESET(mite_channel),
+ NI660X_DMA_CFG);
mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
- unsigned mite_channel,
+ unsigned int mite_channel,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned chip = counter->chip_index;
- unsigned long flags;
+ unsigned int chip = counter->chip_index;
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[chip] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[chip] |=
- dma_select_bits(mite_channel, dma_selection_none);
- ni_660x_write_register(dev, chip,
- devpriv->dma_configuration_soft_copies[chip],
- NI660X_DMA_CFG);
+ devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
static int ni_660x_request_mite_channel(struct comedi_device *dev,
@@ -692,13 +338,13 @@ static int ni_660x_request_mite_channel(struct comedi_device *dev,
enum comedi_io_direction direction)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned long flags;
+ struct mite_ring *ring;
struct mite_channel *mite_chan;
+ unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(counter->mite_chan);
- mite_chan = mite_request_channel(devpriv->mite,
- mite_ring(devpriv, counter));
+ ring = devpriv->ring[counter->chip_index][counter->counter_index];
+ mite_chan = mite_request_channel(devpriv->mite, ring);
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
@@ -757,7 +403,7 @@ static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static void set_tio_counterswap(struct comedi_device *dev, int chip)
{
- unsigned bits = 0;
+ unsigned int bits = 0;
/*
* See P. 3.5 of the Register-Level Programming manual.
@@ -766,9 +412,9 @@ static void set_tio_counterswap(struct comedi_device *dev, int chip)
* first chip.
*/
if (chip)
- bits = CounterSwap;
+ bits = NI660X_CLK_CFG_COUNTER_SWAP;
- ni_660x_write_register(dev, chip, bits, NI660X_CLK_CFG);
+ ni_660x_write(dev, chip, bits, NI660X_CLK_CFG);
}
static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
@@ -785,17 +431,20 @@ static irqreturn_t ni_660x_interrupt(int irq, void *d)
struct comedi_device *dev = d;
struct ni_660x_private *devpriv = dev->private;
struct comedi_subdevice *s;
- unsigned i;
+ unsigned int i;
unsigned long flags;
if (!dev->attached)
return IRQ_NONE;
+ /* make sure dev->attached is checked before doing anything else */
+ smp_mb();
+
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- smp_mb();
- for (i = 0; i < ni_660x_num_counters(dev); ++i) {
- s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)];
- ni_660x_handle_gpct_interrupt(dev, s);
+ for (i = 0; i < dev->n_subdevices; ++i) {
+ s = &dev->subdevices[i];
+ if (s->type == COMEDI_SUBD_COUNTER)
+ ni_660x_handle_gpct_interrupt(dev, s);
}
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return IRQ_HANDLED;
@@ -810,7 +459,7 @@ static int ni_660x_input_poll(struct comedi_device *dev,
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- mite_sync_input_dma(counter->mite_chan, s);
+ mite_sync_dma(counter->mite_chan, s);
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return comedi_buf_read_n_available(s);
}
@@ -820,9 +469,11 @@ static int ni_660x_buf_change(struct comedi_device *dev,
{
struct ni_660x_private *devpriv = dev->private;
struct ni_gpct *counter = s->private;
+ struct mite_ring *ring;
int ret;
- ret = mite_buf_change(mite_ring(devpriv, counter), s);
+ ring = devpriv->ring[counter->chip_index][counter->counter_index];
+ ret = mite_buf_change(ring, s);
if (ret < 0)
return ret;
@@ -832,7 +483,7 @@ static int ni_660x_buf_change(struct comedi_device *dev,
static int ni_660x_allocate_private(struct comedi_device *dev)
{
struct ni_660x_private *devpriv;
- unsigned i;
+ unsigned int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -840,9 +491,8 @@ static int ni_660x_allocate_private(struct comedi_device *dev)
spin_lock_init(&devpriv->mite_channel_lock);
spin_lock_init(&devpriv->interrupt_lock);
- spin_lock_init(&devpriv->soft_reg_copy_lock);
- for (i = 0; i < NUM_PFI_CHANNELS; ++i)
- devpriv->pfi_output_selects[i] = pfi_output_select_counter;
+ for (i = 0; i < NI660X_NUM_PFI_CHANNELS; ++i)
+ devpriv->io_cfg[i] = NI_660X_PFI_OUTPUT_COUNTER;
return 0;
}
@@ -851,14 +501,13 @@ static int ni_660x_alloc_mite_rings(struct comedi_device *dev)
{
const struct ni_660x_board *board = dev->board_ptr;
struct ni_660x_private *devpriv = dev->private;
- unsigned i;
- unsigned j;
+ unsigned int i;
+ unsigned int j;
for (i = 0; i < board->n_chips; ++i) {
- for (j = 0; j < counters_per_chip; ++j) {
- devpriv->mite_rings[i][j] =
- mite_alloc_ring(devpriv->mite);
- if (!devpriv->mite_rings[i][j])
+ for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j) {
+ devpriv->ring[i][j] = mite_alloc_ring(devpriv->mite);
+ if (!devpriv->ring[i][j])
return -ENOMEM;
}
}
@@ -869,120 +518,101 @@ static void ni_660x_free_mite_rings(struct comedi_device *dev)
{
const struct ni_660x_board *board = dev->board_ptr;
struct ni_660x_private *devpriv = dev->private;
- unsigned i;
- unsigned j;
+ unsigned int i;
+ unsigned int j;
for (i = 0; i < board->n_chips; ++i) {
- for (j = 0; j < counters_per_chip; ++j)
- mite_free_ring(devpriv->mite_rings[i][j]);
- }
-}
-
-static void init_tio_chip(struct comedi_device *dev, int chipset)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned i;
-
- /* init dma configuration register */
- devpriv->dma_configuration_soft_copies[chipset] = 0;
- for (i = 0; i < MAX_DMA_CHANNEL; ++i) {
- devpriv->dma_configuration_soft_copies[chipset] |=
- dma_select_bits(i, dma_selection_none) & dma_select_mask(i);
+ for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j)
+ mite_free_ring(devpriv->ring[i][j]);
}
- ni_660x_write_register(dev, chipset,
- devpriv->dma_configuration_soft_copies[chipset],
- NI660X_DMA_CFG);
- for (i = 0; i < NUM_PFI_CHANNELS; ++i)
- ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
}
static int ni_660x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned base_bitfield_channel = CR_CHAN(insn->chanspec);
-
- /* Check if we have to write some bits */
- if (data[0]) {
- s->state &= ~(data[0] << base_bitfield_channel);
- s->state |= (data[0] & data[1]) << base_bitfield_channel;
- /* Write out the new digital output lines */
- ni_660x_write_register(dev, 0, s->state, NI660X_DIO32_OUTPUT);
+ unsigned int shift = CR_CHAN(insn->chanspec);
+ unsigned int mask = data[0] << shift;
+ unsigned int bits = data[1] << shift;
+
+ /*
+ * There are 40 channels in this subdevice but only 32 are usable
+ * as DIO. The shift adjusts the mask/bits to account for the base
+ * channel in insn->chanspec. The state update can then be handled
+ * normally for the 32 usable channels.
+ */
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+ ni_660x_write(dev, 0, s->state, NI660X_DIO32_OUTPUT);
}
- /* on return, data[1] contains the value of the digital
- * input and output lines. */
- data[1] = (ni_660x_read_register(dev, 0, NI660X_DIO32_INPUT) >>
- base_bitfield_channel);
+
+ /*
+ * Return the input channels, shifted back to account for the base
+ * channel.
+ */
+ data[1] = ni_660x_read(dev, 0, NI660X_DIO32_INPUT) >> shift;
return insn->n;
}
static void ni_660x_select_pfi_output(struct comedi_device *dev,
- unsigned pfi_channel,
- unsigned output_select)
+ unsigned int chan, unsigned int out_sel)
{
const struct ni_660x_board *board = dev->board_ptr;
- static const unsigned counter_4_7_first_pfi = 8;
- static const unsigned counter_4_7_last_pfi = 23;
- unsigned active_chipset = 0;
- unsigned idle_chipset = 0;
- unsigned active_bits;
- unsigned idle_bits;
+ unsigned int active_chip = 0;
+ unsigned int idle_chip = 0;
+ unsigned int bits;
if (board->n_chips > 1) {
- if (output_select == pfi_output_select_counter &&
- pfi_channel >= counter_4_7_first_pfi &&
- pfi_channel <= counter_4_7_last_pfi) {
- active_chipset = 1;
- idle_chipset = 0;
+ if (out_sel == NI_660X_PFI_OUTPUT_COUNTER &&
+ chan >= 8 && chan <= 23) {
+ /* counters 4-7 pfi channels */
+ active_chip = 1;
+ idle_chip = 0;
} else {
- active_chipset = 0;
- idle_chipset = 1;
+ /* counters 0-3 pfi channels */
+ active_chip = 0;
+ idle_chip = 1;
}
}
- if (idle_chipset != active_chipset) {
- idle_bits =
- ni_660x_read_register(dev, idle_chipset,
- IOConfigReg(pfi_channel));
- idle_bits &= ~pfi_output_select_mask(pfi_channel);
- idle_bits |=
- pfi_output_select_bits(pfi_channel,
- pfi_output_select_high_Z);
- ni_660x_write_register(dev, idle_chipset, idle_bits,
- IOConfigReg(pfi_channel));
+ if (idle_chip != active_chip) {
+ /* set the pfi channel to high-z on the inactive chip */
+ bits = ni_660x_read(dev, idle_chip, NI660X_IO_CFG(chan));
+ bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
+ bits |= NI660X_IO_CFG_OUT_SEL(chan, 0); /* high-z */
+ ni_660x_write(dev, idle_chip, bits, NI660X_IO_CFG(chan));
}
- active_bits =
- ni_660x_read_register(dev, active_chipset,
- IOConfigReg(pfi_channel));
- active_bits &= ~pfi_output_select_mask(pfi_channel);
- active_bits |= pfi_output_select_bits(pfi_channel, output_select);
- ni_660x_write_register(dev, active_chipset, active_bits,
- IOConfigReg(pfi_channel));
+ /* set the pfi channel output on the active chip */
+ bits = ni_660x_read(dev, active_chip, NI660X_IO_CFG(chan));
+ bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
+ bits |= NI660X_IO_CFG_OUT_SEL(chan, out_sel);
+ ni_660x_write(dev, active_chip, bits, NI660X_IO_CFG(chan));
}
-static int ni_660x_set_pfi_routing(struct comedi_device *dev, unsigned chan,
- unsigned source)
+static int ni_660x_set_pfi_routing(struct comedi_device *dev,
+ unsigned int chan, unsigned int source)
{
struct ni_660x_private *devpriv = dev->private;
- if (source > num_pfi_output_selects)
- return -EINVAL;
- if (source == pfi_output_select_high_Z)
- return -EINVAL;
- if (chan < min_counter_pfi_chan) {
- if (source == pfi_output_select_counter)
+ switch (source) {
+ case NI_660X_PFI_OUTPUT_COUNTER:
+ if (chan < 8)
return -EINVAL;
- } else if (chan > max_dio_pfi_chan) {
- if (source == pfi_output_select_do)
+ break;
+ case NI_660X_PFI_OUTPUT_DIO:
+ if (chan > 31)
return -EINVAL;
+ default:
+ return -EINVAL;
}
- devpriv->pfi_output_selects[chan] = source;
- if (devpriv->pfi_direction_bits & (((uint64_t) 1) << chan))
- ni_660x_select_pfi_output(dev, chan,
- devpriv->pfi_output_selects[chan]);
+ devpriv->io_cfg[chan] = source;
+ if (devpriv->io_dir & (1ULL << chan))
+ ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
return 0;
}
@@ -993,25 +623,24 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
{
struct ni_660x_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- uint64_t bit = 1ULL << chan;
+ u64 bit = 1ULL << chan;
unsigned int val;
int ret;
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
- devpriv->pfi_direction_bits |= bit;
- ni_660x_select_pfi_output(dev, chan,
- devpriv->pfi_output_selects[chan]);
+ devpriv->io_dir |= bit;
+ ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
break;
case INSN_CONFIG_DIO_INPUT:
- devpriv->pfi_direction_bits &= ~bit;
- ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z);
+ devpriv->io_dir &= ~bit;
+ ni_660x_select_pfi_output(dev, chan, 0); /* high-z */
break;
case INSN_CONFIG_DIO_QUERY:
- data[1] = (devpriv->pfi_direction_bits & bit) ? COMEDI_OUTPUT
- : COMEDI_INPUT;
+ data[1] = (devpriv->io_dir & bit) ? COMEDI_OUTPUT
+ : COMEDI_INPUT;
break;
case INSN_CONFIG_SET_ROUTING:
@@ -1021,14 +650,14 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
break;
case INSN_CONFIG_GET_ROUTING:
- data[1] = devpriv->pfi_output_selects[chan];
+ data[1] = devpriv->io_cfg[chan];
break;
case INSN_CONFIG_FILTER:
- val = ni_660x_read_register(dev, 0, IOConfigReg(chan));
- val &= ~pfi_input_select_mask(chan);
- val |= pfi_input_select_bits(chan, data[1]);
- ni_660x_write_register(dev, 0, val, IOConfigReg(chan));
+ val = ni_660x_read(dev, 0, NI660X_IO_CFG(chan));
+ val &= ~NI660X_IO_CFG_IN_SEL_MASK(chan);
+ val |= NI660X_IO_CFG_IN_SEL(chan, data[1]);
+ ni_660x_write(dev, 0, val, NI660X_IO_CFG(chan));
break;
default:
@@ -1038,6 +667,33 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
+static void ni_660x_init_tio_chips(struct comedi_device *dev,
+ unsigned int n_chips)
+{
+ struct ni_660x_private *devpriv = dev->private;
+ unsigned int chip;
+ unsigned int chan;
+
+ /*
+ * We use the ioconfig registers to control dio direction, so zero
+ * output enables in stc dio control reg.
+ */
+ ni_660x_write(dev, 0, 0, NI660X_STC_DIO_CONTROL);
+
+ for (chip = 0; chip < n_chips; ++chip) {
+ /* init dma configuration register */
+ devpriv->dma_cfg[chip] = 0;
+ for (chan = 0; chan < NI660X_MAX_DMA_CHANNEL; ++chan)
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(chan);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip],
+ NI660X_DMA_CFG);
+
+ /* init ioconfig registers */
+ for (chan = 0; chan < NI660X_NUM_PFI_CHANNELS; ++chan)
+ ni_660x_write(dev, chip, 0, NI660X_IO_CFG(chan));
+ }
+}
+
static int ni_660x_auto_attach(struct comedi_device *dev,
unsigned long context)
{
@@ -1045,9 +701,12 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
const struct ni_660x_board *board = NULL;
struct ni_660x_private *devpriv;
struct comedi_subdevice *s;
+ struct ni_gpct_device *gpct_dev;
+ unsigned int n_counters;
+ int subdev;
int ret;
- unsigned i;
- unsigned global_interrupt_config_bits;
+ unsigned int i;
+ unsigned int global_interrupt_config_bits;
if (context < ARRAY_SIZE(ni_660x_boards))
board = &ni_660x_boards[context];
@@ -1065,91 +724,147 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
return ret;
devpriv = dev->private;
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, true); /* use win1 */
if (!devpriv->mite)
return -ENOMEM;
- ret = mite_setup2(dev, devpriv->mite, true);
- if (ret < 0)
- return ret;
-
ret = ni_660x_alloc_mite_rings(dev);
if (ret < 0)
return ret;
- ret = comedi_alloc_subdevices(dev, 2 + NI_660X_MAX_NUM_COUNTERS);
+ ni_660x_init_tio_chips(dev, board->n_chips);
+
+ n_counters = board->n_chips * NI660X_COUNTERS_PER_CHIP;
+ gpct_dev = ni_gpct_device_construct(dev,
+ ni_660x_gpct_write,
+ ni_660x_gpct_read,
+ ni_gpct_variant_660x,
+ n_counters);
+ if (!gpct_dev)
+ return -ENOMEM;
+ devpriv->counter_dev = gpct_dev;
+
+ ret = comedi_alloc_subdevices(dev, 2 + NI660X_MAX_COUNTERS);
if (ret)
return ret;
- s = &dev->subdevices[0];
+ subdev = 0;
+
+ s = &dev->subdevices[subdev++];
/* Old GENERAL-PURPOSE COUNTER/TIME (GPCT) subdevice, no longer used */
s->type = COMEDI_SUBD_UNUSED;
- s = &dev->subdevices[NI_660X_DIO_SUBDEV];
- /* DIGITAL I/O SUBDEVICE */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = NUM_PFI_CHANNELS;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_660x_dio_insn_bits;
- s->insn_config = ni_660x_dio_insn_config;
- /* we use the ioconfig registers to control dio direction, so zero
- output enables in stc dio control reg */
- ni_660x_write_register(dev, 0, 0, NI660X_STC_DIO_CONTROL);
-
- devpriv->counter_dev = ni_gpct_device_construct(dev,
- &ni_gpct_write_register,
- &ni_gpct_read_register,
- ni_gpct_variant_660x,
- ni_660x_num_counters
- (dev));
- if (!devpriv->counter_dev)
- return -ENOMEM;
- for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) {
- s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)];
- if (i < ni_660x_num_counters(dev)) {
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
+ /*
+ * Digital I/O subdevice
+ *
+ * There are 40 channels but only the first 32 can be digital I/Os.
+ * The last 8 are dedicated to counters 0 and 1.
+ *
+ * Counter 0-3 signals are from the first TIO chip.
+ * Counter 4-7 signals are from the second TIO chip.
+ *
+ * Comedi External
+ * PFI Chan DIO Chan Counter Signal
+ * ------- -------- --------------
+ * 0 0
+ * 1 1
+ * 2 2
+ * 3 3
+ * 4 4
+ * 5 5
+ * 6 6
+ * 7 7
+ * 8 8 CTR 7 OUT
+ * 9 9 CTR 7 AUX
+ * 10 10 CTR 7 GATE
+ * 11 11 CTR 7 SOURCE
+ * 12 12 CTR 6 OUT
+ * 13 13 CTR 6 AUX
+ * 14 14 CTR 6 GATE
+ * 15 15 CTR 6 SOURCE
+ * 16 16 CTR 5 OUT
+ * 17 17 CTR 5 AUX
+ * 18 18 CTR 5 GATE
+ * 19 19 CTR 5 SOURCE
+ * 20 20 CTR 4 OUT
+ * 21 21 CTR 4 AUX
+ * 22 22 CTR 4 GATE
+ * 23 23 CTR 4 SOURCE
+ * 24 24 CTR 3 OUT
+ * 25 25 CTR 3 AUX
+ * 26 26 CTR 3 GATE
+ * 27 27 CTR 3 SOURCE
+ * 28 28 CTR 2 OUT
+ * 29 29 CTR 2 AUX
+ * 30 30 CTR 2 GATE
+ * 31 31 CTR 2 SOURCE
+ * 32 CTR 1 OUT
+ * 33 CTR 1 AUX
+ * 34 CTR 1 GATE
+ * 35 CTR 1 SOURCE
+ * 36 CTR 0 OUT
+ * 37 CTR 0 AUX
+ * 38 CTR 0 GATE
+ * 39 CTR 0 SOURCE
+ */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = NI660X_NUM_PFI_CHANNELS;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = ni_660x_dio_insn_bits;
+ s->insn_config = ni_660x_dio_insn_config;
+
+ /*
+ * Default the DIO channels as:
+ * chan 0-7: DIO inputs
+ * chan 8-39: counter signal inputs
+ */
+ for (i = 0; i < s->n_chan; ++i) {
+ unsigned int source = (i < 8) ? NI_660X_PFI_OUTPUT_DIO
+ : NI_660X_PFI_OUTPUT_COUNTER;
+
+ ni_660x_set_pfi_routing(dev, i, source);
+ ni_660x_select_pfi_output(dev, i, 0); /* high-z */
+ }
+
+ /* Counter subdevices (4 NI TIO General Purpose Counters per chip) */
+ for (i = 0; i < NI660X_MAX_COUNTERS; ++i) {
+ s = &dev->subdevices[subdev++];
+ if (i < n_counters) {
+ struct ni_gpct *counter = &gpct_dev->counters[i];
+
+ counter->chip_index = i / NI660X_COUNTERS_PER_CHIP;
+ counter->counter_index = i % NI660X_COUNTERS_PER_CHIP;
+
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
SDF_LSAMPL | SDF_CMD_READ;
- s->n_chan = 3;
- s->maxdata = 0xffffffff;
- s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_write;
- s->insn_config = ni_tio_insn_config;
- s->do_cmd = &ni_660x_cmd;
- s->len_chanlist = 1;
- s->do_cmdtest = ni_tio_cmdtest;
- s->cancel = &ni_660x_cancel;
- s->poll = &ni_660x_input_poll;
+ s->n_chan = 3;
+ s->maxdata = 0xffffffff;
+ s->insn_read = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
+ s->insn_config = ni_tio_insn_config;
+ s->len_chanlist = 1;
+ s->do_cmd = ni_660x_cmd;
+ s->do_cmdtest = ni_tio_cmdtest;
+ s->cancel = ni_660x_cancel;
+ s->poll = ni_660x_input_poll;
+ s->buf_change = ni_660x_buf_change;
s->async_dma_dir = DMA_BIDIRECTIONAL;
- s->buf_change = &ni_660x_buf_change;
- s->private = &devpriv->counter_dev->counters[i];
+ s->private = counter;
- devpriv->counter_dev->counters[i].chip_index =
- i / counters_per_chip;
- devpriv->counter_dev->counters[i].counter_index =
- i % counters_per_chip;
+ ni_tio_init_counter(counter);
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
}
- for (i = 0; i < board->n_chips; ++i)
- init_tio_chip(dev, i);
-
- for (i = 0; i < ni_660x_num_counters(dev); ++i)
- ni_tio_init_counter(&devpriv->counter_dev->counters[i]);
-
- for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
- if (i < min_counter_pfi_chan)
- ni_660x_set_pfi_routing(dev, i, pfi_output_select_do);
- else
- ni_660x_set_pfi_routing(dev, i,
- pfi_output_select_counter);
- ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z);
- }
- /* to be safe, set counterswap bits on tio chips after all the counter
- outputs have been set to high impedance mode */
+
+ /*
+ * To be safe, set counterswap bits on tio chips after all the counter
+ * outputs have been set to high impedance mode.
+ */
for (i = 0; i < board->n_chips; ++i)
set_tio_counterswap(dev, i);
@@ -1160,11 +875,11 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
return ret;
}
dev->irq = pcidev->irq;
- global_interrupt_config_bits = Global_Int_Enable_Bit;
+ global_interrupt_config_bits = NI660X_GLOBAL_INT_GLOBAL;
if (board->n_chips > 1)
- global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
- ni_660x_write_register(dev, 0, global_interrupt_config_bits,
- NI660X_GLOBAL_INT_CFG);
+ global_interrupt_config_bits |= NI660X_GLOBAL_INT_CASCADE;
+ ni_660x_write(dev, 0, global_interrupt_config_bits,
+ NI660X_GLOBAL_INT_CFG);
return 0;
}
@@ -1173,11 +888,12 @@ static void ni_660x_detach(struct comedi_device *dev)
{
struct ni_660x_private *devpriv = dev->private;
- if (dev->irq)
+ if (dev->irq) {
+ ni_660x_write(dev, 0, 0, NI660X_GLOBAL_INT_CFG);
free_irq(dev->irq, dev);
+ }
if (devpriv) {
- if (devpriv->counter_dev)
- ni_gpct_device_destroy(devpriv->counter_dev);
+ ni_gpct_device_destroy(devpriv->counter_dev);
ni_660x_free_mite_rings(dev);
mite_detach(devpriv->mite);
}
@@ -1218,5 +934,5 @@ static struct pci_driver ni_660x_pci_driver = {
module_comedi_pci_driver(ni_660x_driver, ni_660x_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for NI 660x counter/timer boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index 83f878adb..be8d5cd3f 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -1,27 +1,22 @@
/*
- ni_labpc.h
-
- Header for ni_labpc.c and ni_labpc_cs.c
-
- Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header for ni_labpc ISA/PCMCIA/PCI drivers
+ *
+ * Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _NI_LABPC_H
#define _NI_LABPC_H
-#define EEPROM_SIZE 256 /* 256 byte eeprom */
-#define NUM_AO_CHAN 2 /* boards have two analog output channels */
-
enum transfer_type { fifo_not_empty_transfer, fifo_half_full_transfer,
isa_dma_transfer
};
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index 863afb28e..b0dfb8eed 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -84,8 +84,10 @@ static const struct comedi_lrange range_labpc_ao = {
}
};
-/* functions that do inb/outb and readb/writeb so we can use
- * function pointers to decide which to use */
+/*
+ * functions that do inb/outb and readb/writeb so we can use
+ * function pointers to decide which to use
+ */
static unsigned int labpc_inb(struct comedi_device *dev, unsigned long reg)
{
return inb(dev->iobase + reg);
@@ -656,19 +658,24 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* figure out what method we will use to transfer data */
if (devpriv->dma &&
- /* dma unsafe at RT priority,
- * and too much setup time for CMDF_WAKE_EOS */
- (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0)
+ (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0) {
+ /*
+ * dma unsafe at RT priority,
+ * and too much setup time for CMDF_WAKE_EOS
+ */
xfer = isa_dma_transfer;
- else if (/* pc-plus has no fifo-half full interrupt */
- board->is_labpc1200 &&
- /* wake-end-of-scan should interrupt on fifo not empty */
- (cmd->flags & CMDF_WAKE_EOS) == 0 &&
- /* make sure we are taking more than just a few points */
- (cmd->stop_src != TRIG_COUNT || devpriv->count > 256))
+ } else if (board->is_labpc1200 &&
+ (cmd->flags & CMDF_WAKE_EOS) == 0 &&
+ (cmd->stop_src != TRIG_COUNT || devpriv->count > 256)) {
+ /*
+ * pc-plus has no fifo-half full interrupt
+ * wake-end-of-scan should interrupt on fifo not empty
+ * make sure we are taking more than just a few points
+ */
xfer = fifo_half_full_transfer;
- else
+ } else {
xfer = fifo_not_empty_transfer;
+ }
devpriv->current_transfer = xfer;
labpc_ai_set_chan_and_gain(dev, mode, chan, range, aref);
@@ -679,9 +686,11 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* manual says to set scan enable bit on second pass */
if (mode == MODE_MULT_CHAN_UP || mode == MODE_MULT_CHAN_DOWN) {
devpriv->cmd1 |= CMD1_SCANEN;
- /* need a brief delay before enabling scan, or scan
- * list will get screwed when you switch
- * between scan up to scan down mode - dunno why */
+ /*
+ * Need a brief delay before enabling scan, or scan
+ * list will get screwed when you switch between
+ * scan up to scan down mode - dunno why.
+ */
udelay(1);
devpriv->write_byte(dev, devpriv->cmd1, CMD1_REG);
}
@@ -728,8 +737,10 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->cmd4 = 0;
if (cmd->convert_src != TRIG_EXT)
devpriv->cmd4 |= CMD4_ECLKRCV;
- /* XXX should discard first scan when using interval scanning
- * since manual says it is not synced with scan clock */
+ /*
+ * XXX should discard first scan when using interval scanning
+ * since manual says it is not synced with scan clock.
+ */
if (!labpc_use_continuous_mode(cmd, mode)) {
devpriv->cmd4 |= CMD4_INTSCAN;
if (cmd->scan_begin_src == TRIG_EXT)
@@ -795,8 +806,10 @@ static int labpc_drain_fifo(struct comedi_device *dev)
return 0;
}
-/* makes sure all data acquired by board is transferred to comedi (used
- * when acquisition is terminated by stop_src == TRIG_EXT). */
+/*
+ * Makes sure all data acquired by board is transferred to comedi (used
+ * when acquisition is terminated by stop_src == TRIG_EXT).
+ */
static void labpc_drain_dregs(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
@@ -907,9 +920,11 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
channel = CR_CHAN(insn->chanspec);
- /* turn off pacing of analog output channel */
- /* note: hardware bug in daqcard-1200 means pacing cannot
- * be independently enabled/disabled for its the two channels */
+ /*
+ * Turn off pacing of analog output channel.
+ * NOTE: hardware bug in daqcard-1200 means pacing cannot
+ * be independently enabled/disabled for its the two channels.
+ */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->cmd2 &= ~CMD2_LDAC(channel);
devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG);
@@ -1261,7 +1276,7 @@ int labpc_common_attach(struct comedi_device *dev,
if (board->has_ao) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
- s->n_chan = NUM_AO_CHAN;
+ s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table = &range_labpc_ao;
s->insn_write = labpc_ao_insn_write;
@@ -1307,12 +1322,12 @@ int labpc_common_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_UNUSED;
}
- /* EEPROM */
+ /* EEPROM (256 bytes) */
s = &dev->subdevices[4];
if (board->is_labpc1200) {
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = EEPROM_SIZE;
+ s->n_chan = 256;
s->maxdata = 0xff;
s->insn_write = labpc_eeprom_insn_write;
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index a1c69ac07..3d4d0b9ad 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -1,57 +1,50 @@
/*
- comedi/drivers/ni_labpc_cs.c
- Driver for National Instruments daqcard-1200 boards
- Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
- from the pcmcia package.
- The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_labpc_cs
-Description: National Instruments Lab-PC (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
-Status: works
-
-Thanks go to Fredrik Lingvall for much testing and perseverance in
-helping to debug daqcard-1200 support.
-
-The 1200 series boards have onboard calibration dacs for correcting
-analog input/output offsets and gains. The proper settings for these
-caldacs are stored on the board's eeprom. To read the caldac values
-from the eeprom and store them into a file that can be then be used by
-comedilib, use the comedi_calibrate program.
-
-Configuration options:
- none
-
-The daqcard-1200 has quirky chanlist requirements
-when scanning multiple channels. Multiple channel scan
-sequence must start at highest channel, then decrement down to
-channel 0. Chanlists consisting of all one channel
-are also legal, and allow you to pace conversions in bursts.
-
-*/
+ * Driver for National Instruments daqcard-1200 boards
+ * Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
+ * from the pcmcia package.
+ * The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * General Public License for more details.
+ */
/*
-
-NI manuals:
-340988a (daqcard-1200)
-
-*/
+ * Driver: ni_labpc_cs
+ * Description: National Instruments Lab-PC (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
+ * Status: works
+ *
+ * Thanks go to Fredrik Lingvall for much testing and perseverance in
+ * helping to debug daqcard-1200 support.
+ *
+ * The 1200 series boards have onboard calibration dacs for correcting
+ * analog input/output offsets and gains. The proper settings for these
+ * caldacs are stored on the board's eeprom. To read the caldac values
+ * from the eeprom and store them into a file that can be then be used by
+ * comedilib, use the comedi_calibrate program.
+ *
+ * Configuration options: none
+ *
+ * The daqcard-1200 has quirky chanlist requirements when scanning multiple
+ * channels. Multiple channel scan sequence must start at highest channel,
+ * then decrement down to channel 0. Chanlists consisting of all one channel
+ * are also legal, and allow you to pace conversions in bursts.
+ *
+ * NI manuals:
+ * 340988a (daqcard-1200)
+ */
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 77d403801..cac089193 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -51,8 +51,8 @@ static const struct labpc_boardinfo labpc_pci_boards[] = {
};
/* ripped from mite.h and mite_setup2() to avoid mite dependency */
-#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
-#define WENAB (1 << 7) /* window enable */
+#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
+#define WENAB BIT(7) /* window enable */
static int labpc_pci_mite_init(struct pci_dev *pcidev)
{
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
index 2a274a3e4..8c52179e3 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_regs.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_regs.h
@@ -9,32 +9,32 @@
* Register map (all registers are 8-bit)
*/
#define STAT1_REG 0x00 /* R: Status 1 reg */
-#define STAT1_DAVAIL (1 << 0)
-#define STAT1_OVERRUN (1 << 1)
-#define STAT1_OVERFLOW (1 << 2)
-#define STAT1_CNTINT (1 << 3)
-#define STAT1_GATA0 (1 << 5)
-#define STAT1_EXTGATA0 (1 << 6)
+#define STAT1_DAVAIL BIT(0)
+#define STAT1_OVERRUN BIT(1)
+#define STAT1_OVERFLOW BIT(2)
+#define STAT1_CNTINT BIT(3)
+#define STAT1_GATA0 BIT(5)
+#define STAT1_EXTGATA0 BIT(6)
#define CMD1_REG 0x00 /* W: Command 1 reg */
#define CMD1_MA(x) (((x) & 0x7) << 0)
-#define CMD1_TWOSCMP (1 << 3)
+#define CMD1_TWOSCMP BIT(3)
#define CMD1_GAIN(x) (((x) & 0x7) << 4)
-#define CMD1_SCANEN (1 << 7)
+#define CMD1_SCANEN BIT(7)
#define CMD2_REG 0x01 /* W: Command 2 reg */
-#define CMD2_PRETRIG (1 << 0)
-#define CMD2_HWTRIG (1 << 1)
-#define CMD2_SWTRIG (1 << 2)
-#define CMD2_TBSEL (1 << 3)
-#define CMD2_2SDAC0 (1 << 4)
-#define CMD2_2SDAC1 (1 << 5)
-#define CMD2_LDAC(x) (1 << (6 + (x)))
+#define CMD2_PRETRIG BIT(0)
+#define CMD2_HWTRIG BIT(1)
+#define CMD2_SWTRIG BIT(2)
+#define CMD2_TBSEL BIT(3)
+#define CMD2_2SDAC0 BIT(4)
+#define CMD2_2SDAC1 BIT(5)
+#define CMD2_LDAC(x) BIT(6 + ((x) & 0x1))
#define CMD3_REG 0x02 /* W: Command 3 reg */
-#define CMD3_DMAEN (1 << 0)
-#define CMD3_DIOINTEN (1 << 1)
-#define CMD3_DMATCINTEN (1 << 2)
-#define CMD3_CNTINTEN (1 << 3)
-#define CMD3_ERRINTEN (1 << 4)
-#define CMD3_FIFOINTEN (1 << 5)
+#define CMD3_DMAEN BIT(0)
+#define CMD3_DIOINTEN BIT(1)
+#define CMD3_DMATCINTEN BIT(2)
+#define CMD3_CNTINTEN BIT(3)
+#define CMD3_ERRINTEN BIT(4)
+#define CMD3_FIFOINTEN BIT(5)
#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
@@ -43,32 +43,32 @@
#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
#define CMD6_REG 0x0e /* W: Command 6 reg */
-#define CMD6_NRSE (1 << 0)
-#define CMD6_ADCUNI (1 << 1)
-#define CMD6_DACUNI(x) (1 << (2 + (x)))
-#define CMD6_HFINTEN (1 << 5)
-#define CMD6_DQINTEN (1 << 6)
-#define CMD6_SCANUP (1 << 7)
+#define CMD6_NRSE BIT(0)
+#define CMD6_ADCUNI BIT(1)
+#define CMD6_DACUNI(x) BIT(2 + ((x) & 0x1))
+#define CMD6_HFINTEN BIT(5)
+#define CMD6_DQINTEN BIT(6)
+#define CMD6_SCANUP BIT(7)
#define CMD4_REG 0x0f /* W: Command 3 reg */
-#define CMD4_INTSCAN (1 << 0)
-#define CMD4_EOIRCV (1 << 1)
-#define CMD4_ECLKDRV (1 << 2)
-#define CMD4_SEDIFF (1 << 3)
-#define CMD4_ECLKRCV (1 << 4)
+#define CMD4_INTSCAN BIT(0)
+#define CMD4_EOIRCV BIT(1)
+#define CMD4_ECLKDRV BIT(2)
+#define CMD4_SEDIFF BIT(3)
+#define CMD4_ECLKRCV BIT(4)
#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
#define CMD5_REG 0x1c /* W: Command 5 reg */
-#define CMD5_WRTPRT (1 << 2)
-#define CMD5_DITHEREN (1 << 3)
-#define CMD5_CALDACLD (1 << 4)
-#define CMD5_SCLK (1 << 5)
-#define CMD5_SDATA (1 << 6)
-#define CMD5_EEPROMCS (1 << 7)
+#define CMD5_WRTPRT BIT(2)
+#define CMD5_DITHEREN BIT(3)
+#define CMD5_CALDACLD BIT(4)
+#define CMD5_SCLK BIT(5)
+#define CMD5_SDATA BIT(6)
+#define CMD5_EEPROMCS BIT(7)
#define STAT2_REG 0x1d /* R: Status 2 reg */
-#define STAT2_PROMOUT (1 << 0)
-#define STAT2_OUTA1 (1 << 1)
-#define STAT2_FIFONHF (1 << 2)
+#define STAT2_PROMOUT BIT(0)
+#define STAT2_OUTA1 BIT(1)
+#define STAT2_FIFONHF BIT(2)
#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
diff --git a/drivers/staging/comedi/drivers/ni_mio_c_common.c b/drivers/staging/comedi/drivers/ni_mio_c_common.c
deleted file mode 100644
index e69de29bb..000000000
--- a/drivers/staging/comedi/drivers/ni_mio_c_common.c
+++ /dev/null
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index dcaf7e89f..8dabb1951 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1,56 +1,53 @@
/*
- comedi/drivers/ni_mio_common.c
- Hardware driver for DAQ-STC based boards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
- Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Hardware driver for DAQ-STC based boards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- This file is meant to be included by another file, e.g.,
- ni_atmio.c or ni_pcimio.c.
-
- Interrupt support originally added by Truxton Fulton
- <trux@truxton.com>
-
- References (from ftp://ftp.natinst.com/support/manuals):
-
- 340747b.pdf AT-MIO E series Register Level Programmer Manual
- 341079b.pdf PCI E Series RLPM
- 340934b.pdf DAQ-STC reference manual
- 67xx and 611x registers (from ftp://ftp.ni.com/support/daq/mhddk/documentation/)
- release_ni611x.pdf
- release_ni67xx.pdf
- Other possibly relevant info:
-
- 320517c.pdf User manual (obsolete)
- 320517f.pdf User manual (new)
- 320889a.pdf delete
- 320906c.pdf maximum signal ratings
- 321066a.pdf about 16x
- 321791a.pdf discontinuation of at-mio-16e-10 rev. c
- 321808a.pdf about at-mio-16e-10 rev P
- 321837a.pdf discontinuation of at-mio-16de-10 rev d
- 321838a.pdf about at-mio-16de-10 rev N
-
- ISSUES:
-
- - the interrupt routine needs to be cleaned up
-
- 2006-02-07: S-Series PCI-6143: Support has been added but is not
- fully tested as yet. Terry Barnaby, BEAM Ltd.
-*/
+ * This file is meant to be included by another file, e.g.,
+ * ni_atmio.c or ni_pcimio.c.
+ *
+ * Interrupt support originally added by Truxton Fulton <trux@truxton.com>
+ *
+ * References (ftp://ftp.natinst.com/support/manuals):
+ * 340747b.pdf AT-MIO E series Register Level Programmer Manual
+ * 341079b.pdf PCI E Series RLPM
+ * 340934b.pdf DAQ-STC reference manual
+ *
+ * 67xx and 611x registers (ftp://ftp.ni.com/support/daq/mhddk/documentation/)
+ * release_ni611x.pdf
+ * release_ni67xx.pdf
+ *
+ * Other possibly relevant info:
+ * 320517c.pdf User manual (obsolete)
+ * 320517f.pdf User manual (new)
+ * 320889a.pdf delete
+ * 320906c.pdf maximum signal ratings
+ * 321066a.pdf about 16x
+ * 321791a.pdf discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf about at-mio-16e-10 rev P
+ * 321837a.pdf discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf about at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ * - the interrupt routine needs to be cleaned up
+ *
+ * 2006-02-07: S-Series PCI-6143: Support has been added but is not
+ * fully tested as yet. Terry Barnaby, BEAM Ltd.
+ */
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -216,19 +213,8 @@ enum ni_common_subdevices {
NI_FREQ_OUT_SUBDEV,
NI_NUM_SUBDEVICES
};
-static inline unsigned NI_GPCT_SUBDEV(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NI_GPCT0_SUBDEV;
- case 1:
- return NI_GPCT1_SUBDEV;
- default:
- break;
- }
- BUG();
- return NI_GPCT0_SUBDEV;
-}
+
+#define NI_GPCT_SUBDEV(x) (NI_GPCT0_SUBDEV + (x))
enum timebase_nanoseconds {
TIMEBASE_1_NS = 50,
@@ -242,7 +228,7 @@ enum timebase_nanoseconds {
static const int num_adc_stages_611x = 3;
-static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
+static void ni_writel(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writel(data, dev->mmio + reg);
@@ -250,7 +236,7 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
outl(data, dev->iobase + reg);
}
-static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
+static void ni_writew(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writew(data, dev->mmio + reg);
@@ -258,7 +244,7 @@ static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
outw(data, dev->iobase + reg);
}
-static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
+static void ni_writeb(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writeb(data, dev->mmio + reg);
@@ -266,7 +252,7 @@ static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
outb(data, dev->iobase + reg);
}
-static uint32_t ni_readl(struct comedi_device *dev, int reg)
+static unsigned int ni_readl(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readl(dev->mmio + reg);
@@ -274,7 +260,7 @@ static uint32_t ni_readl(struct comedi_device *dev, int reg)
return inl(dev->iobase + reg);
}
-static uint16_t ni_readw(struct comedi_device *dev, int reg)
+static unsigned int ni_readw(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readw(dev->mmio + reg);
@@ -282,7 +268,7 @@ static uint16_t ni_readw(struct comedi_device *dev, int reg)
return inw(dev->iobase + reg);
}
-static uint8_t ni_readb(struct comedi_device *dev, int reg)
+static unsigned int ni_readb(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readb(dev->mmio + reg);
@@ -457,7 +443,8 @@ static unsigned int m_series_stc_read(struct comedi_device *dev,
}
}
-static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg)
+static void ni_stc_writew(struct comedi_device *dev,
+ unsigned int data, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -476,7 +463,8 @@ static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg)
}
}
-static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg)
+static void ni_stc_writel(struct comedi_device *dev,
+ unsigned int data, int reg)
{
struct ni_private *devpriv = dev->private;
@@ -488,11 +476,11 @@ static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg)
}
}
-static uint16_t ni_stc_readw(struct comedi_device *dev, int reg)
+static unsigned int ni_stc_readw(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
- uint16_t val;
+ unsigned int val;
if (devpriv->is_m_series) {
val = m_series_stc_read(dev, reg);
@@ -509,10 +497,10 @@ static uint16_t ni_stc_readw(struct comedi_device *dev, int reg)
return val;
}
-static uint32_t ni_stc_readl(struct comedi_device *dev, int reg)
+static unsigned int ni_stc_readl(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
- uint32_t val;
+ unsigned int val;
if (devpriv->is_m_series) {
val = m_series_stc_read(dev, reg);
@@ -524,7 +512,8 @@ static uint32_t ni_stc_readl(struct comedi_device *dev, int reg)
}
static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
- unsigned bit_mask, unsigned bit_values)
+ unsigned int bit_mask,
+ unsigned int bit_values)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -556,6 +545,11 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
devpriv->g0_g1_select_reg |= bit_values & bit_mask;
ni_writeb(dev, devpriv->g0_g1_select_reg, reg);
break;
+ case NI_M_CDIO_DMA_SEL_REG:
+ devpriv->cdio_dma_select_reg &= ~bit_mask;
+ devpriv->cdio_dma_select_reg |= bit_values & bit_mask;
+ ni_writeb(dev, devpriv->cdio_dma_select_reg, reg);
+ break;
default:
dev_err(dev->class_dev, "called with invalid register %d\n",
reg);
@@ -566,116 +560,35 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
}
#ifdef PCIDMA
-/* DMA channel setup */
-static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel)
-{
- if (channel < 4)
- return 1 << channel;
- if (channel == 4)
- return 0x3;
- if (channel == 5)
- return 0x5;
- BUG();
- return 0;
-}
-
-static inline void ni_set_ai_dma_channel(struct comedi_device *dev,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
- NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
-}
-
-static inline void ni_set_ai_dma_no_channel(struct comedi_device *dev)
-{
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG, NI_E_DMA_AI_SEL_MASK, 0);
-}
-
-static inline void ni_set_ao_dma_channel(struct comedi_device *dev,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
- NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
-}
-
-static inline void ni_set_ao_dma_no_channel(struct comedi_device *dev)
-{
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG, NI_E_DMA_AO_SEL_MASK, 0);
-}
-
-static inline void ni_set_gpct_dma_channel(struct comedi_device *dev,
- unsigned gpct_index,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
- NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
- NI_E_DMA_G0_G1_SEL(gpct_index, bits));
-}
-
-static inline void ni_set_gpct_dma_no_channel(struct comedi_device *dev,
- unsigned gpct_index)
-{
- ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
- NI_E_DMA_G0_G1_SEL_MASK(gpct_index), 0);
-}
-
-static inline void ni_set_cdo_dma_channel(struct comedi_device *dev,
- unsigned mite_channel)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- unsigned bits;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK;
- /*
- * XXX just guessing ni_stc_dma_channel_select_bitfield()
- * returns the right bits, under the assumption the cdio dma
- * selection works just like ai/ao/gpct.
- * Definitely works for dma channels 0 and 1.
- */
- bits = ni_stc_dma_channel_select_bitfield(mite_channel);
- devpriv->cdio_dma_select_reg |= NI_M_CDIO_DMA_SEL_CDO(bits);
- ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
-
-static inline void ni_set_cdo_dma_no_channel(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK;
- ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
+/* selects the MITE channel to use for DMA */
+#define NI_STC_DMA_CHAN_SEL(x) (((x) < 4) ? BIT(x) : \
+ ((x) == 4) ? 0x3 : \
+ ((x) == 5) ? 0x5 : 0x0)
+/* DMA channel setup */
static int ni_request_ai_mite_channel(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->ai_mite_chan);
- devpriv->ai_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
- if (!devpriv->ai_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for analog input\n");
return -EBUSY;
}
- devpriv->ai_mite_chan->dir = COMEDI_INPUT;
- ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel);
+ mite_chan->dir = COMEDI_INPUT;
+ devpriv->ai_mite_chan = mite_chan;
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
@@ -683,37 +596,42 @@ static int ni_request_ai_mite_channel(struct comedi_device *dev)
static int ni_request_ao_mite_channel(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->ao_mite_chan);
- devpriv->ao_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
- if (!devpriv->ao_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for analog outut\n");
return -EBUSY;
}
- devpriv->ao_mite_chan->dir = COMEDI_OUTPUT;
- ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel);
+ mite_chan->dir = COMEDI_OUTPUT;
+ devpriv->ao_mite_chan = mite_chan;
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
static int ni_request_gpct_mite_channel(struct comedi_device *dev,
- unsigned gpct_index,
+ unsigned int gpct_index,
enum comedi_io_direction direction)
{
struct ni_private *devpriv = dev->private;
- unsigned long flags;
+ struct ni_gpct *counter = &devpriv->counter_dev->counters[gpct_index];
struct mite_channel *mite_chan;
+ unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->counter_dev->counters[gpct_index].mite_chan);
- mite_chan =
- mite_request_channel(devpriv->mite,
- devpriv->gpct_mite_ring[gpct_index]);
+ mite_chan = mite_request_channel(devpriv->mite,
+ devpriv->gpct_mite_ring[gpct_index]);
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
@@ -721,37 +639,50 @@ static int ni_request_gpct_mite_channel(struct comedi_device *dev,
return -EBUSY;
}
mite_chan->dir = direction;
- ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index],
- mite_chan);
- ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
+ ni_tio_set_mite_channel(counter, mite_chan);
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
+ NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
+ NI_E_DMA_G0_G1_SEL(gpct_index, bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
-#endif /* PCIDMA */
-
static int ni_request_cdo_mite_channel(struct comedi_device *dev)
{
-#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->cdo_mite_chan);
- devpriv->cdo_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
- if (!devpriv->cdo_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for correlated digital output\n");
return -EBUSY;
}
- devpriv->cdo_mite_chan->dir = COMEDI_OUTPUT;
- ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel);
+ mite_chan->dir = COMEDI_OUTPUT;
+ devpriv->cdo_mite_chan = mite_chan;
+
+ /*
+ * XXX just guessing NI_STC_DMA_CHAN_SEL()
+ * returns the right bits, under the assumption the cdio dma
+ * selection works just like ai/ao/gpct.
+ * Definitely works for dma channels 0 and 1.
+ */
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_M_CDIO_DMA_SEL_REG,
+ NI_M_CDIO_DMA_SEL_CDO_MASK,
+ NI_M_CDIO_DMA_SEL_CDO(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
return 0;
}
+#endif /* PCIDMA */
static void ni_release_ai_mite_channel(struct comedi_device *dev)
{
@@ -761,7 +692,8 @@ static void ni_release_ai_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan) {
- ni_set_ai_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AI_SEL_MASK, 0);
mite_release_channel(devpriv->ai_mite_chan);
devpriv->ai_mite_chan = NULL;
}
@@ -777,7 +709,8 @@ static void ni_release_ao_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ao_mite_chan) {
- ni_set_ao_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AO_SEL_MASK, 0);
mite_release_channel(devpriv->ao_mite_chan);
devpriv->ao_mite_chan = NULL;
}
@@ -787,7 +720,7 @@ static void ni_release_ao_mite_channel(struct comedi_device *dev)
#ifdef PCIDMA
static void ni_release_gpct_mite_channel(struct comedi_device *dev,
- unsigned gpct_index)
+ unsigned int gpct_index)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -797,7 +730,8 @@ static void ni_release_gpct_mite_channel(struct comedi_device *dev,
struct mite_channel *mite_chan =
devpriv->counter_dev->counters[gpct_index].mite_chan;
- ni_set_gpct_dma_no_channel(dev, gpct_index);
+ ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
+ NI_E_DMA_G0_G1_SEL_MASK(gpct_index), 0);
ni_tio_set_mite_channel(&devpriv->
counter_dev->counters[gpct_index],
NULL);
@@ -805,30 +739,27 @@ static void ni_release_gpct_mite_channel(struct comedi_device *dev,
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
-#endif /* PCIDMA */
static void ni_release_cdo_mite_channel(struct comedi_device *dev)
{
-#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
- ni_set_cdo_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_M_CDIO_DMA_SEL_REG,
+ NI_M_CDIO_DMA_SEL_CDO_MASK, 0);
mite_release_channel(devpriv->cdo_mite_chan);
devpriv->cdo_mite_chan = NULL;
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
}
-#ifdef PCIDMA
static void ni_e_series_enable_second_irq(struct comedi_device *dev,
- unsigned gpct_index, short enable)
+ unsigned int gpct_index, short enable)
{
struct ni_private *devpriv = dev->private;
- uint16_t val = 0;
+ unsigned int val = 0;
int reg;
if (devpriv->is_m_series || gpct_index > 1)
@@ -875,8 +806,10 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
ni_writeb(dev, 0, NI_M_STATIC_AI_CTRL_REG(0));
ni_writeb(dev, 1, NI_M_STATIC_AI_CTRL_REG(0));
#if 0
- /* the NI example code does 3 convert pulses for 625x boards,
- but that appears to be wrong in practice. */
+ /*
+ * The NI example code does 3 convert pulses for 625x
+ * boards, But that appears to be wrong in practice.
+ */
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
NISTC_AI_CMD1_REG);
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
@@ -888,8 +821,8 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
}
}
-static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
- int addr)
+static inline void ni_ao_win_outw(struct comedi_device *dev,
+ unsigned int data, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -900,8 +833,8 @@ static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
spin_unlock_irqrestore(&devpriv->window_lock, flags);
}
-static inline void ni_ao_win_outl(struct comedi_device *dev, uint32_t data,
- int addr)
+static inline void ni_ao_win_outl(struct comedi_device *dev,
+ unsigned int data, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -925,20 +858,21 @@ static inline unsigned short ni_ao_win_inw(struct comedi_device *dev, int addr)
return data;
}
-/* ni_set_bits( ) allows different parts of the ni_mio_common driver to
-* share registers (such as Interrupt_A_Register) without interfering with
-* each other.
-*
-* NOTE: the switch/case statements are optimized out for a constant argument
-* so this is actually quite fast--- If you must wrap another function around this
-* make it inline to avoid a large speed penalty.
-*
-* value should only be 1 or 0.
-*/
+/*
+ * ni_set_bits( ) allows different parts of the ni_mio_common driver to
+ * share registers (such as Interrupt_A_Register) without interfering with
+ * each other.
+ *
+ * NOTE: the switch/case statements are optimized out for a constant argument
+ * so this is actually quite fast--- If you must wrap another function around
+ * this make it inline to avoid a large speed penalty.
+ *
+ * value should only be 1 or 0.
+ */
static inline void ni_set_bits(struct comedi_device *dev, int reg,
- unsigned bits, unsigned value)
+ unsigned int bits, unsigned int value)
{
- unsigned bit_values;
+ unsigned int bit_values;
if (value)
bit_values = bits;
@@ -956,7 +890,7 @@ static void ni_sync_ai_dma(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan)
- mite_sync_input_dma(devpriv->ai_mite_chan, s);
+ mite_sync_dma(devpriv->ai_mite_chan, s);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
@@ -972,9 +906,8 @@ static int ni_ai_drain_dma(struct comedi_device *dev)
if (devpriv->ai_mite_chan) {
for (i = 0; i < timeout; i++) {
if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E)
- && mite_bytes_in_transit(devpriv->ai_mite_chan) ==
- 0)
+ NISTC_AI_STATUS1_FIFO_E) &&
+ mite_bytes_in_transit(devpriv->ai_mite_chan) == 0)
break;
udelay(5);
}
@@ -994,19 +927,6 @@ static int ni_ai_drain_dma(struct comedi_device *dev)
return retval;
}
-static void mite_handle_b_linkc(struct mite_struct *mite,
- struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ao_mite_chan)
- mite_sync_output_dma(devpriv->ao_mite_chan, s);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-}
-
static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
{
static const int timeout = 10000;
@@ -1018,9 +938,11 @@ static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG);
if (b_status & NISTC_AO_STATUS1_FIFO_HF)
break;
- /* if we poll too often, the pci bus activity seems
- to slow the dma transfer down */
- udelay(10);
+ /*
+ * If we poll too often, the pci bus activity seems
+ * to slow the dma transfer down.
+ */
+ usleep_range(10, 100);
}
if (i == timeout) {
dev_err(dev->class_dev, "timed out waiting for dma load\n");
@@ -1038,7 +960,7 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
struct ni_private *devpriv = dev->private;
int i;
unsigned short d;
- u32 packed_data;
+ unsigned int packed_data;
for (i = 0; i < n; i++) {
comedi_buf_read_samples(s, &d, 1);
@@ -1128,7 +1050,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
{
struct ni_private *devpriv = dev->private;
struct comedi_async *async = s->async;
- u32 dl;
+ unsigned int dl;
unsigned short data;
int i;
@@ -1148,7 +1070,10 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
comedi_buf_write_samples(s, &data, 1);
}
} else if (devpriv->is_6143) {
- /* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */
+ /*
+ * This just reads the FIFO assuming the data is present,
+ * no checks on the FIFO status are performed.
+ */
for (i = 0; i < n / 2; i++) {
dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
@@ -1192,16 +1117,13 @@ static void ni_handle_fifo_half_full(struct comedi_device *dev)
}
#endif
-/*
- Empties the AI fifo
-*/
+/* Empties the AI fifo */
static void ni_handle_fifo_dregs(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- u32 dl;
+ unsigned int dl;
unsigned short data;
- unsigned short fifo_empty;
int i;
if (devpriv->is_611x) {
@@ -1237,15 +1159,16 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
}
} else {
- fifo_empty = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E;
- while (fifo_empty == 0) {
+ unsigned short fe; /* fifo empty */
+
+ fe = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
+ NISTC_AI_STATUS1_FIFO_E;
+ while (fe == 0) {
for (i = 0;
i < ARRAY_SIZE(devpriv->ai_fifo_buffer); i++) {
- fifo_empty = ni_stc_readw(dev,
- NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E;
- if (fifo_empty)
+ fe = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
+ NISTC_AI_STATUS1_FIFO_E;
+ if (fe)
break;
devpriv->ai_fifo_buffer[i] =
ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
@@ -1260,7 +1183,7 @@ static void get_last_sample_611x(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned short data;
- u32 dl;
+ unsigned int dl;
if (!devpriv->is_611x)
return;
@@ -1278,7 +1201,7 @@ static void get_last_sample_6143(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned short data;
- u32 dl;
+ unsigned int dl;
if (!devpriv->is_6143)
return;
@@ -1365,42 +1288,23 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
}
-static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
- unsigned ai_mite_status)
+static void handle_a_interrupt(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned short status)
{
- struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
- /* 67xx boards don't have ai subdevice, but their gpct0 might generate an a interrupt */
- if (s->type == COMEDI_SUBD_UNUSED)
- return;
-
-#ifdef PCIDMA
- if (ai_mite_status & CHSR_LINKC)
- ni_sync_ai_dma(dev);
-
- if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
- CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
- CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
- dev_err(dev->class_dev,
- "unknown mite interrupt (ai_mite_status=%08x)\n",
- ai_mite_status);
- s->async->events |= COMEDI_CB_ERROR;
- /* disable_irq(dev->irq); */
- }
-#endif
-
/* test for all uncommon interrupt events at the same time */
if (status & (NISTC_AI_STATUS1_ERR |
NISTC_AI_STATUS1_SC_TC | NISTC_AI_STATUS1_START1)) {
if (status == 0xffff) {
dev_err(dev->class_dev, "Card removed?\n");
- /* we probably aren't even running a command now,
- * so it's a good idea to be careful. */
- if (comedi_is_subdevice_running(s)) {
+ /*
+ * We probably aren't even running a command now,
+ * so it's a good idea to be careful.
+ */
+ if (comedi_is_subdevice_running(s))
s->async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- }
return;
}
if (status & NISTC_AI_STATUS1_ERR) {
@@ -1412,8 +1316,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
s->async->events |= COMEDI_CB_ERROR;
if (status & NISTC_AI_STATUS1_OVER)
s->async->events |= COMEDI_CB_OVERFLOW;
-
- comedi_handle_events(dev, s);
return;
}
if (status & NISTC_AI_STATUS1_SC_TC) {
@@ -1425,8 +1327,11 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & NISTC_AI_STATUS1_FIFO_HF) {
int i;
static const int timeout = 10;
- /* pcmcia cards (at least 6036) seem to stop producing interrupts if we
- *fail to get the fifo less than half full, so loop to be sure.*/
+ /*
+ * PCMCIA cards (at least 6036) seem to stop producing
+ * interrupts if we fail to get the fifo less than half
+ * full, so loop to be sure.
+ */
for (i = 0; i < timeout; ++i) {
ni_handle_fifo_half_full(dev);
if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
@@ -1438,8 +1343,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & NISTC_AI_STATUS1_STOP)
ni_handle_eos(dev, s);
-
- comedi_handle_events(dev, s);
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1465,29 +1368,9 @@ static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
}
static void handle_b_interrupt(struct comedi_device *dev,
- unsigned short b_status, unsigned ao_mite_status)
+ struct comedi_subdevice *s,
+ unsigned short b_status)
{
- struct comedi_subdevice *s = dev->write_subdev;
- /* unsigned short ack=0; */
-
-#ifdef PCIDMA
- /* Currently, mite.c requires us to handle LINKC */
- if (ao_mite_status & CHSR_LINKC) {
- struct ni_private *devpriv = dev->private;
-
- mite_handle_b_linkc(devpriv->mite, dev);
- }
-
- if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
- CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
- CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
- dev_err(dev->class_dev,
- "unknown mite interrupt (ao_mite_status=%08x)\n",
- ao_mite_status);
- s->async->events |= COMEDI_CB_ERROR;
- }
-#endif
-
if (b_status == 0xffff)
return;
if (b_status & NISTC_AO_STATUS1_OVERRUN) {
@@ -1515,8 +1398,6 @@ static void handle_b_interrupt(struct comedi_device *dev,
}
}
#endif
-
- comedi_handle_events(dev, s);
}
static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -1606,8 +1487,11 @@ static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
if (devpriv->is_611x || devpriv->is_6713) {
mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
} else {
- /* doing 32 instead of 16 bit wide transfers from memory
- makes the mite do 32 bit pci transfers, doubling pci bandwidth. */
+ /*
+ * Doing 32 instead of 16 bit wide transfers from
+ * memory makes the mite do 32 bit pci transfers,
+ * doubling pci bandwidth.
+ */
mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
}
mite_dma_arm(devpriv->ao_mite_chan);
@@ -1622,16 +1506,15 @@ static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
#endif /* PCIDMA */
/*
- used for both cancel ioctl and board initialization
-
- this is pretty harsh for a cancel, but it works...
+ * used for both cancel ioctl and board initialization
+ *
+ * this is pretty harsh for a cancel, but it works...
*/
-
static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct ni_private *devpriv = dev->private;
- unsigned ai_personal;
- unsigned ai_out_ctrl;
+ unsigned int ai_personal;
+ unsigned int ai_out_ctrl;
ni_release_ai_mite_channel(dev);
/* ai configuration */
@@ -1736,12 +1619,12 @@ static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
unsigned int chan, range, aref;
unsigned int i;
unsigned int dither;
- unsigned range_code;
+ unsigned int range_code;
ni_stc_writew(dev, 1, NISTC_CFG_MEM_CLR_REG);
if ((list[0] & CR_ALT_SOURCE)) {
- unsigned bypass_bits;
+ unsigned int bypass_bits;
chan = CR_CHAN(list[0]);
range = CR_RANGE(list[0]);
@@ -1760,7 +1643,7 @@ static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
ni_writel(dev, 0, NI_M_CFG_BYPASS_FIFO_REG);
}
for (i = 0; i < n_chan; i++) {
- unsigned config_bits = 0;
+ unsigned int config_bits = 0;
chan = CR_CHAN(list[i]);
aref = CR_AREF(list[i]);
@@ -1842,8 +1725,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
return;
}
if (n_chan == 1 && !devpriv->is_611x && !devpriv->is_6143) {
- if (devpriv->changain_state
- && devpriv->changain_spec == list[0]) {
+ if (devpriv->changain_state &&
+ devpriv->changain_spec == list[0]) {
/* ready to go. */
return;
}
@@ -1857,8 +1740,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
/* Set up Calibration mode if required */
if (devpriv->is_6143) {
- if ((list[0] & CR_ALT_SOURCE)
- && !devpriv->ai_calib_source_enabled) {
+ if ((list[0] & CR_ALT_SOURCE) &&
+ !devpriv->ai_calib_source_enabled) {
/* Strobe Relay enable bit */
ni_writew(dev, devpriv->ai_calib_source |
NI6143_CALIB_CHAN_RELAY_ON,
@@ -1866,9 +1749,10 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
ni_writew(dev, devpriv->ai_calib_source,
NI6143_CALIB_CHAN_REG);
devpriv->ai_calib_source_enabled = 1;
- msleep_interruptible(100); /* Allow relays to change */
- } else if (!(list[0] & CR_ALT_SOURCE)
- && devpriv->ai_calib_source_enabled) {
+ /* Allow relays to change */
+ msleep_interruptible(100);
+ } else if (!(list[0] & CR_ALT_SOURCE) &&
+ devpriv->ai_calib_source_enabled) {
/* Strobe Relay disable bit */
ni_writew(dev, devpriv->ai_calib_source |
NI6143_CALIB_CHAN_RELAY_OFF,
@@ -1876,7 +1760,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
ni_writew(dev, devpriv->ai_calib_source,
NI6143_CALIB_CHAN_REG);
devpriv->ai_calib_source_enabled = 0;
- msleep_interruptible(100); /* Allow relays to change */
+ /* Allow relays to change */
+ msleep_interruptible(100);
}
}
@@ -1949,7 +1834,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
struct ni_private *devpriv = dev->private;
unsigned int mask = (s->maxdata + 1) >> 1;
int i, n;
- unsigned signbits;
+ unsigned int signbits;
unsigned int d;
unsigned long dl;
@@ -1997,7 +1882,11 @@ static int ni_ai_insn_read(struct comedi_device *dev,
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
NISTC_AI_CMD1_REG);
- /* The 6143 has 32-bit FIFOs. You need to strobe a bit to move a single 16bit stranded sample into the FIFO */
+ /*
+ * The 6143 has 32-bit FIFOs. You need to strobe a
+ * bit to move a single 16bit stranded sample into
+ * the FIFO.
+ */
dl = 0;
for (i = 0; i < NI_TIMEOUT; i++) {
if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
@@ -2035,7 +1924,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
data[n] = dl;
} else {
d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
- d += signbits; /* subtle: needs to be short addition */
+ /* subtle: needs to be short addition */
+ d += signbits;
data[n] = d;
}
}
@@ -2043,8 +1933,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
return insn->n;
}
-static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
- unsigned int flags)
+static int ni_ns_to_timer(const struct comedi_device *dev,
+ unsigned int nanosec, unsigned int flags)
{
struct ni_private *devpriv = dev->private;
int divider;
@@ -2064,14 +1954,14 @@ static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
return divider - 1;
}
-static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer)
+static unsigned int ni_timer_to_ns(const struct comedi_device *dev, int timer)
{
struct ni_private *devpriv = dev->private;
return devpriv->clock_ns * (timer + 1);
}
-static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
+static void ni_cmd_set_mite_transfer(struct mite_ring *ring,
struct comedi_subdevice *sdev,
const struct comedi_cmd *cmd,
unsigned int max_count) {
@@ -2102,8 +1992,8 @@ static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
#endif
}
-static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev,
- unsigned num_channels)
+static unsigned int ni_min_ai_scan_period_ns(struct comedi_device *dev,
+ unsigned int num_channels)
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
@@ -2294,7 +2184,7 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
int start_stop_select = 0;
unsigned int stop_count;
int interrupt_a_enable = 0;
- unsigned ai_trig;
+ unsigned int ai_trig;
if (dev->irq == 0) {
dev_err(dev->class_dev, "cannot run command without an irq\n");
@@ -2307,8 +2197,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* start configuration */
ni_stc_writew(dev, NISTC_RESET_AI_CFG_START, NISTC_RESET_REG);
- /* disable analog triggering for now, since it
- * interferes with the use of pfi0 */
+ /*
+ * Disable analog triggering for now, since it interferes
+ * with the use of pfi0.
+ */
devpriv->an_trig_etc_reg &= ~NISTC_ATRIG_ETC_ENA;
ni_stc_writew(dev, devpriv->an_trig_etc_reg, NISTC_ATRIG_ETC_REG);
@@ -2369,7 +2261,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (stop_count == 0) {
devpriv->ai_cmd2 |= NISTC_AI_CMD2_END_ON_EOS;
interrupt_a_enable |= NISTC_INTA_ENA_AI_STOP;
- /* this is required to get the last sample for chanlist_len > 1, not sure why */
+ /*
+ * This is required to get the last sample for
+ * chanlist_len > 1, not sure why.
+ */
if (cmd->chanlist_len > 1)
start_stop_select |= NISTC_AI_STOP_POLARITY |
NISTC_AI_STOP_EDGE;
@@ -2489,7 +2384,7 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
switch (devpriv->aimode) {
case AIMODE_HALF_FULL:
- /*generate FIFO interrupts and DMA requests on half-full */
+ /* FIFO interrupts and DMA requests on half-full */
#ifdef PCIDMA
ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_HF_E,
NISTC_AI_MODE3_REG);
@@ -2880,9 +2775,11 @@ static int ni_ao_inttrig(struct comedi_device *dev,
if (trig_num != cmd->start_arg)
return -EINVAL;
- /* Null trig at beginning prevent ao start trigger from executing more than
- once per command (and doing things like trying to allocate the ao dma channel
- multiple times) */
+ /*
+ * Null trig at beginning prevent ao start trigger from executing more
+ * than once per command (and doing things like trying to allocate the
+ * ao dma channel multiple times).
+ */
s->async->inttrig = NULL;
ni_set_bits(dev, NISTC_INTB_ENA_REG,
@@ -2951,7 +2848,7 @@ static void ni_ao_cmd_personalize(struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
const struct ni_board_struct *board = dev->board_ptr;
- unsigned bits;
+ unsigned int bits;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -2999,6 +2896,7 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
struct ni_private *devpriv = dev->private;
+ unsigned int trigsel;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -3012,39 +2910,20 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
}
ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
- {
- unsigned int trigsel = devpriv->ao_trigger_select;
-
- switch (cmd->start_src) {
- case TRIG_INT:
- case TRIG_NOW:
- trigsel &= ~(NISTC_AO_TRIG_START1_POLARITY |
- NISTC_AO_TRIG_START1_SEL_MASK);
- trigsel |= NISTC_AO_TRIG_START1_EDGE |
- NISTC_AO_TRIG_START1_SYNC;
- break;
- case TRIG_EXT:
- trigsel = NISTC_AO_TRIG_START1_SEL(
- CR_CHAN(cmd->start_arg) + 1);
- if (cmd->start_arg & CR_INVERT)
- /*
- * 0=active high, 1=active low.
- * see daq-stc 3-24 (p186)
- */
- trigsel |= NISTC_AO_TRIG_START1_POLARITY;
- if (cmd->start_arg & CR_EDGE)
- /* 0=edge detection disabled, 1=enabled */
- trigsel |= NISTC_AO_TRIG_START1_EDGE;
- break;
- default:
- BUG();
- break;
- }
-
- devpriv->ao_trigger_select = trigsel;
- ni_stc_writew(dev, devpriv->ao_trigger_select,
- NISTC_AO_TRIG_SEL_REG);
+ if (cmd->start_src == TRIG_INT) {
+ trigsel = NISTC_AO_TRIG_START1_EDGE |
+ NISTC_AO_TRIG_START1_SYNC;
+ } else { /* TRIG_EXT */
+ trigsel = NISTC_AO_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) + 1);
+ /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
+ if (cmd->start_arg & CR_INVERT)
+ trigsel |= NISTC_AO_TRIG_START1_POLARITY;
+ /* 0=edge detection disabled, 1=enabled */
+ if (cmd->start_arg & CR_EDGE)
+ trigsel |= NISTC_AO_TRIG_START1_EDGE;
}
+ ni_stc_writew(dev, trigsel, NISTC_AO_TRIG_SEL_REG);
+
/* AO_Delayed_START1 = 0, we do not support delayed start...yet */
/* sync */
@@ -3149,8 +3028,9 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
NISTC_AO_MODE1_UPDATE_SRC_POLARITY
);
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ unsigned int trigvar;
+
devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA;
/*
@@ -3181,34 +3061,25 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
* eseries/ni67xx and tMSeries.h for mseries.
*/
- {
- unsigned trigvar = ni_ns_to_timer(dev,
- cmd->scan_begin_arg,
- CMDF_ROUND_NEAREST);
+ trigvar = ni_ns_to_timer(dev, cmd->scan_begin_arg,
+ CMDF_ROUND_NEAREST);
- /*
- * Wait N TB3 ticks after the start trigger before
- * clocking(N must be >=2).
- */
- /* following line: 2-1 per STC */
- ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD,
- NISTC_AO_CMD1_REG);
- /* following line: N-1 per STC */
- ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
- }
- break;
- case TRIG_EXT:
+ /*
+ * Wait N TB3 ticks after the start trigger before
+ * clocking (N must be >=2).
+ */
+ /* following line: 2-1 per STC */
+ ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
+ ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
+ /* following line: N-1 per STC */
+ ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+ } else { /* TRIG_EXT */
/* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC(
CR_CHAN(cmd->scan_begin_arg));
if (cmd->scan_begin_arg & CR_INVERT)
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY;
- break;
- default:
- BUG();
- break;
}
ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
@@ -3231,7 +3102,7 @@ static void ni_ao_cmd_set_channels(struct comedi_device *dev,
{
struct ni_private *devpriv = dev->private;
const struct comedi_cmd *cmd = &s->async->cmd;
- unsigned bits = 0;
+ unsigned int bits = 0;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -3474,7 +3345,6 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE;
else
devpriv->ao_mode3 = 0;
- devpriv->ao_trigger_select = 0;
ni_stc_writew(dev, 0, NISTC_AO_PERSONAL_REG);
ni_stc_writew(dev, 0, NISTC_AO_CMD1_REG);
@@ -3550,6 +3420,7 @@ static int ni_dio_insn_bits(struct comedi_device *dev,
return insn->n;
}
+#ifdef PCIDMA
static int ni_m_series_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -3652,13 +3523,11 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
unsigned int trig_num)
{
struct comedi_cmd *cmd = &s->async->cmd;
- const unsigned timeout = 1000;
+ const unsigned int timeout = 1000;
int retval = 0;
- unsigned i;
-#ifdef PCIDMA
+ unsigned int i;
struct ni_private *devpriv = dev->private;
unsigned long flags;
-#endif
if (trig_num != cmd->start_arg)
return -EINVAL;
@@ -3668,7 +3537,6 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
/* read alloc the entire buffer */
comedi_buf_read_alloc(s, s->async->prealloc_bufsz);
-#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
@@ -3680,7 +3548,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
if (retval < 0)
return retval;
-#endif
+
/*
* XXX not sure what interrupt C group does
* wait for dma to fill output fifo
@@ -3690,7 +3558,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
if (ni_readl(dev, NI_M_CDIO_STATUS_REG) &
NI_M_CDIO_STATUS_CDO_FIFO_FULL)
break;
- udelay(10);
+ usleep_range(10, 100);
}
if (i == timeout) {
dev_err(dev->class_dev, "dma failed to fill cdo fifo!\n");
@@ -3708,7 +3576,7 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct ni_private *devpriv = dev->private;
const struct comedi_cmd *cmd = &s->async->cmd;
- unsigned cdo_mode_bits;
+ unsigned int cdo_mode_bits;
int retval;
ni_writel(dev, NI_M_CDO_CMD_RESET, NI_M_CDIO_CMD_REG);
@@ -3759,28 +3627,14 @@ static int ni_cdio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static void handle_cdio_interrupt(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
- unsigned cdio_status;
+ unsigned int cdio_status;
struct comedi_subdevice *s = &dev->subdevices[NI_DIO_SUBDEV];
-#ifdef PCIDMA
unsigned long flags;
-#endif
- if (!devpriv->is_m_series)
- return;
-#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->cdo_mite_chan) {
- unsigned cdo_mite_status =
- mite_get_status(devpriv->cdo_mite_chan);
- if (cdo_mite_status & CHSR_LINKC) {
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->cdo_mite_chan->channel));
- }
- mite_sync_output_dma(devpriv->cdo_mite_chan, s);
- }
+ if (devpriv->cdo_mite_chan)
+ mite_ack_linkc(devpriv->cdo_mite_chan, s, true);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif
cdio_status = ni_readl(dev, NI_M_CDIO_STATUS_REG);
if (cdio_status & NI_M_CDIO_STATUS_CDO_ERROR) {
@@ -3796,6 +3650,7 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
}
comedi_handle_events(dev, s);
}
+#endif /* PCIDMA */
static int ni_serial_hw_readwrite8(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -3813,7 +3668,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
status1 = ni_stc_readw(dev, NISTC_STATUS1_REG);
if (status1 & NISTC_STATUS1_SERIO_IN_PROG) {
err = -EBUSY;
- goto Error;
+ goto error;
}
devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_START;
@@ -3829,7 +3684,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
dev_err(dev->class_dev,
"SPI serial I/O didn't finish in time!\n");
err = -ETIME;
- goto Error;
+ goto error;
}
}
@@ -3842,7 +3697,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
if (data_in)
*data_in = ni_stc_readw(dev, NISTC_DIO_SERIAL_IN_REG);
-Error:
+error:
ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
return err;
@@ -3860,16 +3715,20 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
udelay((devpriv->serial_interval_ns + 999) / 1000);
for (mask = 0x80; mask; mask >>= 1) {
- /* Output current bit; note that we cannot touch s->state
- because it is a per-subdevice field, and serial is
- a separate subdevice from DIO. */
+ /*
+ * Output current bit; note that we cannot touch s->state
+ * because it is a per-subdevice field, and serial is
+ * a separate subdevice from DIO.
+ */
devpriv->dio_output &= ~NISTC_DIO_SDOUT;
if (data_out & mask)
devpriv->dio_output |= NISTC_DIO_SDOUT;
ni_stc_writew(dev, devpriv->dio_output, NISTC_DIO_OUT_REG);
- /* Assert SDCLK (active low, inverted), wait for half of
- the delay, deassert SDCLK, and wait for the other half. */
+ /*
+ * Assert SDCLK (active low, inverted), wait for half of
+ * the delay, deassert SDCLK, and wait for the other half.
+ */
devpriv->dio_control |= NISTC_DIO_SDCLK;
ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
@@ -3897,7 +3756,7 @@ static int ni_serial_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned clk_fout = devpriv->clock_and_fout;
+ unsigned int clk_fout = devpriv->clock_and_fout;
int err = insn->n;
unsigned char byte_out, byte_in = 0;
@@ -3916,8 +3775,10 @@ static int ni_serial_insn_config(struct comedi_device *dev,
data[1] = SERIAL_DISABLED;
devpriv->serial_interval_ns = data[1];
} else if (data[1] <= SERIAL_600NS) {
- /* Warning: this clock speed is too fast to reliably
- control SCXI. */
+ /*
+ * Warning: this clock speed is too fast to reliably
+ * control SCXI.
+ */
devpriv->dio_control &= ~NISTC_DIO_CTRL_HW_SER_TIMEBASE;
clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE;
clk_fout &= ~NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
@@ -3933,10 +3794,12 @@ static int ni_serial_insn_config(struct comedi_device *dev,
devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_TIMEBASE;
clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE |
NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
- /* Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects
- 600ns/1.2us. If you turn divide_by_2 off with the
- slow clock, you will still get 10us, except then
- all your delays are wrong. */
+ /*
+ * Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects
+ * 600ns/1.2us. If you turn divide_by_2 off with the
+ * slow clock, you will still get 10us, except then
+ * all your delays are wrong.
+ */
data[1] = SERIAL_10US;
devpriv->serial_interval_ns = data[1];
} else {
@@ -4046,15 +3909,11 @@ static unsigned int ni_gpct_to_stc_register(struct comedi_device *dev,
return regmap->mio_reg;
}
-static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
+static void ni_gpct_write_register(struct ni_gpct *counter, unsigned int bits,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
- static const unsigned gpct_interrupt_a_enable_mask =
- NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC;
- static const unsigned gpct_interrupt_b_enable_mask =
- NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC;
if (stc_register == 0)
return;
@@ -4082,25 +3941,22 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
/* 16 bit registers */
case NITIO_G0_INT_ENA:
- BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
ni_set_bitfield(dev, stc_register,
- gpct_interrupt_a_enable_mask, bits);
+ NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC,
+ bits);
break;
case NITIO_G1_INT_ENA:
- BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
ni_set_bitfield(dev, stc_register,
- gpct_interrupt_b_enable_mask, bits);
+ NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC,
+ bits);
break;
- case NITIO_G01_RESET:
- BUG_ON(bits & ~(NISTC_RESET_G0 | NISTC_RESET_G1));
- /* fall-through */
default:
ni_stc_writew(dev, bits, stc_register);
}
}
-static unsigned ni_gpct_read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
+static unsigned int ni_gpct_read_register(struct ni_gpct *counter,
+ enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
@@ -4227,7 +4083,7 @@ static int ni_m_series_pwm_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned up_count, down_count;
+ unsigned int up_count, down_count;
switch (data[0]) {
case INSN_CONFIG_PWM_OUTPUT:
@@ -4287,7 +4143,7 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned up_count, down_count;
+ unsigned int up_count, down_count;
switch (data[0]) {
case INSN_CONFIG_PWM_OUTPUT:
@@ -4343,13 +4199,13 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
static int pack_mb88341(int addr, int val, int *bitstring)
{
/*
- Fujitsu MB 88341
- Note that address bits are reversed. Thanks to
- Ingo Keen for noticing this.
-
- Note also that the 88341 expects address values from
- 1-12, whereas we use channel numbers 0-11. The NI
- docs use 1-12, also, so be careful here.
+ * Fujitsu MB 88341
+ * Note that address bits are reversed. Thanks to
+ * Ingo Keen for noticing this.
+ *
+ * Note also that the 88341 expects address values from
+ * 1-12, whereas we use channel numbers 0-11. The NI
+ * docs use 1-12, also, so be careful here.
*/
addr++;
*bitstring = ((addr & 0x1) << 11) |
@@ -4495,12 +4351,12 @@ static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
s->n_chan = n_chans;
if (diffbits) {
- unsigned int *maxdata_list;
+ unsigned int *maxdata_list = devpriv->caldac_maxdata_list;
if (n_chans > MAX_N_CALDACS)
dev_err(dev->class_dev,
"BUG! MAX_N_CALDACS too small\n");
- s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list;
+ s->maxdata_list = maxdata_list;
chan = 0;
for (i = 0; i < n_dacs; i++) {
type = board->caldac[i];
@@ -4574,8 +4430,8 @@ static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
return 1;
}
-static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
- unsigned chan)
+static unsigned int ni_old_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
/* pre-m-series boards have fixed signals on pfi pins */
switch (chan) {
@@ -4607,7 +4463,7 @@ static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
}
static int ni_old_set_pfi_routing(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
/* pre-m-series boards have fixed signals on pfi pins */
if (source != ni_old_get_pfi_routing(dev, chan))
@@ -4615,21 +4471,21 @@ static int ni_old_set_pfi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_m_series_get_pfi_routing(struct comedi_device *dev,
- unsigned chan)
+static unsigned int ni_m_series_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
- const unsigned array_offset = chan / 3;
+ const unsigned int array_offset = chan / 3;
return NI_M_PFI_OUT_SEL_TO_SRC(chan,
devpriv->pfi_output_select_reg[array_offset]);
}
static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
- unsigned index = chan / 3;
+ unsigned int index = chan / 3;
unsigned short val = devpriv->pfi_output_select_reg[index];
if ((source & 0x1f) != source)
@@ -4643,7 +4499,8 @@ static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
+static unsigned int ni_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
@@ -4652,8 +4509,8 @@ static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
: ni_old_get_pfi_routing(dev, chan);
}
-static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
- unsigned source)
+static int ni_set_pfi_routing(struct comedi_device *dev,
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
@@ -4663,11 +4520,11 @@ static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
}
static int ni_config_filter(struct comedi_device *dev,
- unsigned pfi_channel,
+ unsigned int pfi_channel,
enum ni_pfi_filter_select filter)
{
struct ni_private *devpriv = dev->private;
- unsigned bits;
+ unsigned int bits;
if (!devpriv->is_m_series)
return -ENOTSUPP;
@@ -4818,9 +4675,12 @@ static int cs5529_ai_insn_read(struct comedi_device *dev,
unsigned int channel_select;
const unsigned int INTERNAL_REF = 0x1000;
- /* Set calibration adc source. Docs lie, reference select bits 8 to 11
+ /*
+ * Set calibration adc source. Docs lie, reference select bits 8 to 11
* do nothing. bit 12 seems to chooses internal reference voltage, bit
- * 13 causes the adc input to go overrange (maybe reads external reference?) */
+ * 13 causes the adc input to go overrange (maybe reads external
+ * reference?)
+ */
if (insn->chanspec & CR_ALT_SOURCE)
channel_select = INTERNAL_REF;
else
@@ -4875,27 +4735,28 @@ static int init_cs5529(struct comedi_device *dev)
* Find best multiplier/divider to try and get the PLL running at 80 MHz
* given an arbitrary frequency input clock.
*/
-static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
- unsigned *freq_divider,
- unsigned *freq_multiplier,
- unsigned *actual_period_ns)
-{
- unsigned div;
- unsigned best_div = 1;
- unsigned mult;
- unsigned best_mult = 1;
- static const unsigned pico_per_nano = 1000;
-
- const unsigned reference_picosec = reference_period_ns * pico_per_nano;
- /* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to
- * 20 MHz for most timing clocks */
- static const unsigned target_picosec = 12500;
- static const unsigned fudge_factor_80_to_20Mhz = 4;
+static int ni_mseries_get_pll_parameters(unsigned int reference_period_ns,
+ unsigned int *freq_divider,
+ unsigned int *freq_multiplier,
+ unsigned int *actual_period_ns)
+{
+ unsigned int div;
+ unsigned int best_div = 1;
+ unsigned int mult;
+ unsigned int best_mult = 1;
+ static const unsigned int pico_per_nano = 1000;
+ const unsigned int reference_picosec = reference_period_ns *
+ pico_per_nano;
+ /*
+ * m-series wants the phased-locked loop to output 80MHz, which is
+ * divided by 4 to 20 MHz for most timing clocks
+ */
+ static const unsigned int target_picosec = 12500;
int best_period_picosec = 0;
for (div = 1; div <= NI_M_PLL_MAX_DIVISOR; ++div) {
for (mult = 1; mult <= NI_M_PLL_MAX_MULTIPLIER; ++mult) {
- unsigned new_period_ps =
+ unsigned int new_period_ps =
(reference_picosec * div) / mult;
if (abs(new_period_ps - target_picosec) <
abs(best_period_picosec - target_picosec)) {
@@ -4910,29 +4771,33 @@ static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
*freq_divider = best_div;
*freq_multiplier = best_mult;
- *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec *
- fudge_factor_80_to_20Mhz,
+ /* return the actual period (* fudge factor for 80 to 20 MHz) */
+ *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec * 4,
pico_per_nano);
return 0;
}
static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
- unsigned source, unsigned period_ns)
+ unsigned int source,
+ unsigned int period_ns)
{
struct ni_private *devpriv = dev->private;
- static const unsigned min_period_ns = 50;
- static const unsigned max_period_ns = 1000;
- static const unsigned timeout = 1000;
- unsigned pll_control_bits;
- unsigned freq_divider;
- unsigned freq_multiplier;
- unsigned rtsi;
- unsigned i;
+ static const unsigned int min_period_ns = 50;
+ static const unsigned int max_period_ns = 1000;
+ static const unsigned int timeout = 1000;
+ unsigned int pll_control_bits;
+ unsigned int freq_divider;
+ unsigned int freq_multiplier;
+ unsigned int rtsi;
+ unsigned int i;
int retval;
if (source == NI_MIO_PLL_PXI10_CLOCK)
period_ns = 100;
- /* these limits are somewhat arbitrary, but NI advertises 1 to 20MHz range so we'll use that */
+ /*
+ * These limits are somewhat arbitrary, but NI advertises 1 to 20MHz
+ * range so we'll use that.
+ */
if (period_ns < min_period_ns || period_ns > max_period_ns) {
dev_err(dev->class_dev,
"%s: you must specify an input clock frequency between %i and %i nanosec for the phased-lock loop\n",
@@ -4982,7 +4847,7 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
ni_writew(dev, pll_control_bits, NI_M_PLL_CTRL_REG);
devpriv->clock_source = source;
- /* it seems to typically take a few hundred microseconds for PLL to lock */
+ /* it takes a few hundred microseconds for PLL to lock */
for (i = 0; i < timeout; ++i) {
if (ni_readw(dev, NI_M_PLL_STATUS_REG) & NI_M_PLL_STATUS_LOCKED)
break;
@@ -4998,7 +4863,7 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
}
static int ni_set_master_clock(struct comedi_device *dev,
- unsigned source, unsigned period_ns)
+ unsigned int source, unsigned int period_ns)
{
struct ni_private *devpriv = dev->private;
@@ -5043,7 +4908,7 @@ static int ni_set_master_clock(struct comedi_device *dev,
}
static int ni_valid_rtsi_output_source(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
@@ -5078,7 +4943,7 @@ static int ni_valid_rtsi_output_source(struct comedi_device *dev,
}
static int ni_set_rtsi_routing(struct comedi_device *dev,
- unsigned chan, unsigned src)
+ unsigned int chan, unsigned int src)
{
struct ni_private *devpriv = dev->private;
@@ -5098,7 +4963,8 @@ static int ni_set_rtsi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_get_rtsi_routing(struct comedi_device *dev, unsigned chan)
+static unsigned int ni_get_rtsi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
@@ -5262,10 +5128,10 @@ static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static irqreturn_t ni_E_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
+ struct comedi_subdevice *s_ai = dev->read_subdev;
+ struct comedi_subdevice *s_ao = dev->write_subdev;
unsigned short a_status;
unsigned short b_status;
- unsigned int ai_mite_status = 0;
- unsigned int ao_mite_status = 0;
unsigned long flags;
#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
@@ -5273,7 +5139,7 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
if (!dev->attached)
return IRQ_NONE;
- smp_mb(); /* make sure dev->attached is checked before handler does anything else. */
+ smp_mb(); /* make sure dev->attached is checked */
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&dev->spinlock, flags);
@@ -5284,34 +5150,33 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
unsigned long flags_too;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
- if (devpriv->ai_mite_chan) {
- ai_mite_status = mite_get_status(devpriv->ai_mite_chan);
- if (ai_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->
- ai_mite_chan->channel));
- }
- if (devpriv->ao_mite_chan) {
- ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
- if (ao_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->
- ao_mite_chan->channel));
- }
+ if (s_ai && devpriv->ai_mite_chan)
+ mite_ack_linkc(devpriv->ai_mite_chan, s_ai, false);
+ if (s_ao && devpriv->ao_mite_chan)
+ mite_ack_linkc(devpriv->ao_mite_chan, s_ao, false);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too);
}
#endif
ack_a_interrupt(dev, a_status);
ack_b_interrupt(dev, b_status);
- if ((a_status & NISTC_AI_STATUS1_INTA) || (ai_mite_status & CHSR_INT))
- handle_a_interrupt(dev, a_status, ai_mite_status);
- if ((b_status & NISTC_AO_STATUS1_INTB) || (ao_mite_status & CHSR_INT))
- handle_b_interrupt(dev, b_status, ao_mite_status);
+ if (s_ai) {
+ if (a_status & NISTC_AI_STATUS1_INTA)
+ handle_a_interrupt(dev, s_ai, a_status);
+ /* handle any interrupt or dma events */
+ comedi_handle_events(dev, s_ai);
+ }
+ if (s_ao) {
+ if (b_status & NISTC_AO_STATUS1_INTB)
+ handle_b_interrupt(dev, s_ao, b_status);
+ /* handle any interrupt or dma events */
+ comedi_handle_events(dev, s_ao);
+ }
handle_gpct_interrupt(dev, 0);
handle_gpct_interrupt(dev, 1);
- handle_cdio_interrupt(dev);
+#ifdef PCIDMA
+ if (devpriv->is_m_series)
+ handle_cdio_interrupt(dev);
+#endif
spin_unlock_irqrestore(&dev->spinlock, flags);
return IRQ_HANDLED;
@@ -5333,7 +5198,7 @@ static int ni_alloc_private(struct comedi_device *dev)
}
static int ni_E_init(struct comedi_device *dev,
- unsigned interrupt_pin, unsigned irq_polarity)
+ unsigned int interrupt_pin, unsigned int irq_polarity)
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
@@ -5450,6 +5315,7 @@ static int ni_E_init(struct comedi_device *dev,
s->maxdata = 1;
s->range_table = &range_digital;
if (devpriv->is_m_series) {
+#ifdef PCIDMA
s->subdev_flags |= SDF_LSAMPL;
s->insn_bits = ni_m_series_dio_insn_bits;
s->insn_config = ni_m_series_dio_insn_config;
@@ -5469,6 +5335,7 @@ static int ni_E_init(struct comedi_device *dev,
NI_M_CDI_CMD_RESET,
NI_M_CDIO_CMD_REG);
ni_writel(dev, s->io_bits, NI_M_DIO_DIR_REG);
+#endif /* PCIDMA */
} else {
s->insn_bits = ni_dio_insn_bits;
s->insn_config = ni_dio_insn_config;
@@ -5675,8 +5542,6 @@ static void mio_common_detach(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
- if (devpriv) {
- if (devpriv->counter_dev)
- ni_gpct_device_destroy(devpriv->counter_dev);
- }
+ if (devpriv)
+ ni_gpct_device_destroy(devpriv->counter_dev);
}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 4a5aee058..ed04dea91 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -282,12 +282,12 @@ static const struct nidio_board nidio_boards[] = {
};
struct nidio96_private {
- struct mite_struct *mite;
+ struct mite *mite;
int boardtype;
int dio;
unsigned short OpModeBits;
struct mite_channel *di_mite_chan;
- struct mite_dma_descriptor_ring *di_mite_ring;
+ struct mite_ring *di_mite_ring;
spinlock_t mite_channel_lock;
};
@@ -322,8 +322,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->di_mite_chan) {
- mite_dma_disarm(devpriv->di_mite_chan);
- mite_dma_reset(devpriv->di_mite_chan);
mite_release_channel(devpriv->di_mite_chan);
devpriv->di_mite_chan = NULL;
writeb(primary_DMAChannel_bits(0) |
@@ -368,7 +366,7 @@ static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
spin_lock_irqsave(&dev->spinlock, irq_flags);
spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan)
- mite_sync_input_dma(devpriv->di_mite_chan, s);
+ mite_sync_dma(devpriv->di_mite_chan, s);
spin_unlock(&devpriv->mite_channel_lock);
count = comedi_buf_n_bytes_ready(s);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
@@ -381,12 +379,10 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
struct nidio96_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
- struct mite_struct *mite = devpriv->mite;
unsigned int auxdata;
int flags;
int status;
int work = 0;
- unsigned int m_status = 0;
/* interrupcions parasites */
if (!dev->attached) {
@@ -401,24 +397,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
flags = readb(dev->mmio + Group_1_Flags);
spin_lock(&devpriv->mite_channel_lock);
- if (devpriv->di_mite_chan)
- m_status = mite_get_status(devpriv->di_mite_chan);
-
- if (m_status & CHSR_INT) {
- if (m_status & CHSR_LINKC) {
- writel(CHOR_CLRLC,
- mite->mite_io_addr +
- MITE_CHOR(devpriv->di_mite_chan->channel));
- mite_sync_input_dma(devpriv->di_mite_chan, s);
- /* XXX need to byteswap */
- }
- if (m_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_DRDY |
- CHSR_DRQ1 | CHSR_MRDY)) {
- dev_dbg(dev->class_dev,
- "unknown mite interrupt, disabling IRQ\n");
- async->events |= COMEDI_CB_ERROR;
- disable_irq(dev->irq);
- }
+ if (devpriv->di_mite_chan) {
+ mite_ack_linkc(devpriv->di_mite_chan, s, false);
+ /* XXX need to byteswap sync'ed dma */
}
spin_unlock(&devpriv->mite_channel_lock);
@@ -914,14 +895,10 @@ static int nidio_auto_attach(struct comedi_device *dev,
spin_lock_init(&devpriv->mite_channel_lock);
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, false); /* use win0 */
if (!devpriv->mite)
return -ENOMEM;
- ret = mite_setup(dev, devpriv->mite);
- if (ret < 0)
- return ret;
-
devpriv->di_mite_ring = mite_alloc_ring(devpriv->mite);
if (!devpriv->di_mite_ring)
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 231e37d6b..344aa343e 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1061,6 +1061,8 @@ static int pcimio_dio_change(struct comedi_device *dev,
static void m_series_init_eeprom_buffer(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite *mite = devpriv->mite;
+ resource_size_t daq_phys_addr;
static const int Start_Cal_EEPROM = 0x400;
static const unsigned window_size = 10;
static const int serial_number_eeprom_offset = 0x4;
@@ -1070,15 +1072,17 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
unsigned old_iodwcr1_bits;
int i;
- old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR);
- old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR);
- writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr),
- devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- writel(0x1 | old_iodwcr1_bits,
- devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0xf, devpriv->mite->mite_io_addr + 0x30);
+ /* IO Window 1 needs to be temporarily mapped to read the eeprom */
+ daq_phys_addr = pci_resource_start(mite->pcidev, 1);
+
+ old_iodwbsr_bits = readl(mite->mmio + MITE_IODWBSR);
+ old_iodwbsr1_bits = readl(mite->mmio + MITE_IODWBSR_1);
+ old_iodwcr1_bits = readl(mite->mmio + MITE_IODWCR_1);
+ writel(0x0, mite->mmio + MITE_IODWBSR);
+ writel(((0x80 | window_size) | daq_phys_addr),
+ mite->mmio + MITE_IODWBSR_1);
+ writel(0x1 | old_iodwcr1_bits, mite->mmio + MITE_IODWCR_1);
+ writel(0xf, mite->mmio + 0x30);
BUG_ON(serial_number_eeprom_length > sizeof(devpriv->serial_number));
for (i = 0; i < serial_number_eeprom_length; ++i) {
@@ -1090,10 +1094,10 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
devpriv->eeprom_buffer[i] = ni_readb(dev, Start_Cal_EEPROM + i);
- writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
- writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0x0, devpriv->mite->mite_io_addr + 0x30);
+ writel(old_iodwbsr1_bits, mite->mmio + MITE_IODWBSR_1);
+ writel(old_iodwbsr_bits, mite->mmio + MITE_IODWBSR);
+ writel(old_iodwcr1_bits, mite->mmio + MITE_IODWCR_1);
+ writel(0x0, mite->mmio + 0x30);
}
static void init_6143(struct comedi_device *dev)
@@ -1168,7 +1172,7 @@ static int pcimio_auto_attach(struct comedi_device *dev,
return ret;
devpriv = dev->private;
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, false); /* use win0 */
if (!devpriv->mite)
return -ENOMEM;
@@ -1193,10 +1197,6 @@ static int pcimio_auto_attach(struct comedi_device *dev,
if (board->reg_type == ni_reg_6713)
devpriv->is_6713 = 1;
- ret = mite_setup(dev, devpriv->mite);
- if (ret < 0)
- return ret;
-
devpriv->ai_mite_ring = mite_alloc_ring(devpriv->mite);
if (!devpriv->ai_mite_ring)
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 1d5af25b9..1966519cb 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1,24 +1,23 @@
/*
- module/ni_stc.h
- Register descriptions for NI DAQ-STC chip
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Register descriptions for NI DAQ-STC chip
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- References:
- DAQ-STC Technical Reference Manual
+ * References:
+ * DAQ-STC Technical Reference Manual
*/
#ifndef _COMEDI_NI_STC_H
@@ -958,7 +957,7 @@ struct ni_board_struct {
unsigned int ao_maxdata;
int ao_fifo_depth;
const struct comedi_lrange *ao_range_table;
- unsigned ao_speed;
+ unsigned int ao_speed;
int reg_type;
unsigned int has_8255:1;
@@ -1002,12 +1001,11 @@ struct ni_private {
unsigned short ao_mode3;
unsigned short ao_cmd1;
unsigned short ao_cmd2;
- unsigned short ao_trigger_select;
struct ni_gpct_device *counter_dev;
unsigned short an_trig_etc_reg;
- unsigned ai_offset[512];
+ unsigned int ai_offset[512];
unsigned long serial_interval_ns;
unsigned char serial_hw_mode;
@@ -1025,24 +1023,24 @@ struct ni_private {
unsigned short g0_g1_select_reg;
unsigned short cdio_dma_select_reg;
- unsigned clock_ns;
- unsigned clock_source;
+ unsigned int clock_ns;
+ unsigned int clock_source;
unsigned short pwm_up_count;
unsigned short pwm_down_count;
unsigned short ai_fifo_buffer[0x2000];
- uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE];
+ u8 eeprom_buffer[M_SERIES_EEPROM_SIZE];
__be32 serial_number;
- struct mite_struct *mite;
+ struct mite *mite;
struct mite_channel *ai_mite_chan;
struct mite_channel *ao_mite_chan;
struct mite_channel *cdo_mite_chan;
- struct mite_dma_descriptor_ring *ai_mite_ring;
- struct mite_dma_descriptor_ring *ao_mite_ring;
- struct mite_dma_descriptor_ring *cdo_mite_ring;
- struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT];
+ struct mite_ring *ai_mite_ring;
+ struct mite_ring *ao_mite_ring;
+ struct mite_ring *cdo_mite_ring;
+ struct mite_ring *gpct_mite_ring[NUM_GPCT];
/* ni_pcimio board type flags (based on the boardinfo reg_type) */
unsigned int is_m_series:1;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index b74e44ec5..7043eb054 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -1,19 +1,18 @@
/*
- comedi/drivers/ni_tio.c
- Support for NI general purpose counters
-
- Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Support for NI general purpose counters
+ *
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Module: ni_tio
@@ -36,13 +35,10 @@
* DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
* DAQ 6601/6602 User Manual (NI 322137B-01)
* 340934b.pdf DAQ-STC reference manual
+ *
+ * TODO: Support use of both banks X and Y
*/
-/*
-TODO:
- Support use of both banks X and Y
-*/
-
#include <linux/module.h>
#include <linux/slab.h>
@@ -115,20 +111,7 @@ TODO:
#define NI_660X_LOGIC_LOW_GATE2_SEL 0x1f
#define NI_660X_MAX_UP_DOWN_PIN 7
-static inline unsigned GI_ALT_SYNC(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_ALT_SYNC;
- case ni_gpct_variant_660x:
- return GI_660X_ALT_SYNC;
- }
-}
-
-static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
+static inline unsigned int GI_PRESCALE_X2(enum ni_gpct_variant variant)
{
switch (variant) {
case ni_gpct_variant_e_series:
@@ -141,7 +124,7 @@ static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
}
}
-static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant)
+static inline unsigned int GI_PRESCALE_X8(enum ni_gpct_variant variant)
{
switch (variant) {
case ni_gpct_variant_e_series:
@@ -154,19 +137,6 @@ static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant)
}
}
-static inline unsigned GI_HW_ARM_SEL_MASK(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_HW_ARM_SEL_MASK;
- case ni_gpct_variant_660x:
- return GI_660X_HW_ARM_SEL_MASK;
- }
-}
-
static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
{
switch (counter_dev->variant) {
@@ -179,17 +149,45 @@ static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
}
}
+/**
+ * ni_tio_write() - Write a TIO register using the driver provided callback.
+ * @counter: struct ni_gpct counter.
+ * @value: the value to write
+ * @reg: the register to write.
+ */
+void ni_tio_write(struct ni_gpct *counter, unsigned int value,
+ enum ni_gpct_register reg)
+{
+ if (reg < NITIO_NUM_REGS)
+ counter->counter_dev->write(counter, value, reg);
+}
+EXPORT_SYMBOL_GPL(ni_tio_write);
+
+/**
+ * ni_tio_read() - Read a TIO register using the driver provided callback.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to read.
+ */
+unsigned int ni_tio_read(struct ni_gpct *counter, enum ni_gpct_register reg)
+{
+ if (reg < NITIO_NUM_REGS)
+ return counter->counter_dev->read(counter, reg);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ni_tio_read);
+
static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
- write_register(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
+ ni_tio_write(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
}
-static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
- unsigned generic_clock_source)
+static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
+ unsigned int generic_clock_source,
+ u64 *period_ps)
{
- uint64_t clock_period_ps;
+ u64 clock_period_ps;
switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -222,19 +220,80 @@ static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
clock_period_ps *= 8;
break;
default:
- BUG();
- break;
+ return -EINVAL;
}
- return clock_period_ps;
+ *period_ps = clock_period_ps;
+ return 0;
}
-static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+static void ni_tio_set_bits_transient(struct ni_gpct *counter,
+ enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value,
+ unsigned int transient)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned counting_mode_bits =
+ unsigned long flags;
+
+ if (reg < NITIO_NUM_REGS) {
+ spin_lock_irqsave(&counter_dev->regs_lock, flags);
+ counter_dev->regs[reg] &= ~mask;
+ counter_dev->regs[reg] |= (value & mask);
+ ni_tio_write(counter, counter_dev->regs[reg] | transient, reg);
+ mmiowb();
+ spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
+ }
+}
+
+/**
+ * ni_tio_set_bits() - Safely write a counter register.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to write.
+ * @mask: the bits to change.
+ * @value: the new bits value.
+ *
+ * Used to write to, and update the software copy, a register whose bits may
+ * be twiddled in interrupt context, or whose software copy may be read in
+ * interrupt context.
+ */
+void ni_tio_set_bits(struct ni_gpct *counter, enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value)
+{
+ ni_tio_set_bits_transient(counter, reg, mask, value, 0x0);
+}
+EXPORT_SYMBOL_GPL(ni_tio_set_bits);
+
+/**
+ * ni_tio_get_soft_copy() - Safely read the software copy of a counter register.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to read.
+ *
+ * Used to get the software copy of a register whose bits might be modified
+ * in interrupt context, or whose software copy might need to be read in
+ * interrupt context.
+ */
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
+ enum ni_gpct_register reg)
+{
+ struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned int value = 0;
+ unsigned long flags;
+
+ if (reg < NITIO_NUM_REGS) {
+ spin_lock_irqsave(&counter_dev->regs_lock, flags);
+ value = counter_dev->regs[reg];
+ spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
+ }
+ return value;
+}
+EXPORT_SYMBOL_GPL(ni_tio_get_soft_copy);
+
+static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+{
+ struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned int cidx = counter->counter_index;
+ unsigned int counting_mode_bits =
ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx));
- unsigned bits = 0;
+ unsigned int bits = 0;
if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
GI_SRC_POL_INVERT)
@@ -246,14 +305,15 @@ static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
return bits;
}
-static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
+static int ni_m_series_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
- unsigned clock_source = 0;
- unsigned src;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
+ unsigned int clock_source = 0;
+ unsigned int src;
+ unsigned int i;
src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(cidx)));
@@ -304,19 +364,20 @@ static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
- BUG();
- break;
+ return -EINVAL;
}
clock_source |= ni_tio_clock_src_modifiers(counter);
- return clock_source;
+ *clk_src = clock_source;
+ return 0;
}
-static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
+static int ni_660x_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
- unsigned clock_source = 0;
- unsigned cidx = counter->counter_index;
- unsigned src;
- unsigned i;
+ unsigned int clock_source = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int src;
+ unsigned int i;
src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(cidx)));
@@ -361,78 +422,88 @@ static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
}
if (i <= NI_660X_MAX_SRC_PIN)
break;
- BUG();
- break;
+ return -EINVAL;
}
clock_source |= ni_tio_clock_src_modifiers(counter);
- return clock_source;
+ *clk_src = clock_source;
+ return 0;
}
-static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter)
+static int ni_tio_generic_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
switch (counter->counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- return ni_m_series_clock_src_select(counter);
+ return ni_m_series_clock_src_select(counter, clk_src);
case ni_gpct_variant_660x:
- return ni_660x_clock_src_select(counter);
+ return ni_660x_clock_src_select(counter, clk_src);
}
}
-static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
+static void ni_tio_set_sync_mode(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned counting_mode_reg = NITIO_CNT_MODE_REG(cidx);
- static const uint64_t min_normal_sync_period_ps = 25000;
- unsigned mode;
- uint64_t clock_period_ps;
-
- if (ni_tio_counting_mode_registers_present(counter_dev) == 0)
+ unsigned int cidx = counter->counter_index;
+ static const u64 min_normal_sync_period_ps = 25000;
+ unsigned int mask = 0;
+ unsigned int bits = 0;
+ unsigned int reg;
+ unsigned int mode;
+ unsigned int clk_src;
+ u64 ps;
+ bool force_alt_sync;
+
+ /* only m series and 660x variants have counting mode registers */
+ switch (counter_dev->variant) {
+ case ni_gpct_variant_e_series:
+ default:
return;
+ case ni_gpct_variant_m_series:
+ mask = GI_M_ALT_SYNC;
+ break;
+ case ni_gpct_variant_660x:
+ mask = GI_660X_ALT_SYNC;
+ break;
+ }
- mode = ni_tio_get_soft_copy(counter, counting_mode_reg);
+ reg = NITIO_CNT_MODE_REG(cidx);
+ mode = ni_tio_get_soft_copy(counter, reg);
switch (mode & GI_CNT_MODE_MASK) {
case GI_CNT_MODE_QUADX1:
case GI_CNT_MODE_QUADX2:
case GI_CNT_MODE_QUADX4:
case GI_CNT_MODE_SYNC_SRC:
- force_alt_sync = 1;
+ force_alt_sync = true;
break;
default:
+ force_alt_sync = false;
break;
}
- clock_period_ps = ni_tio_clock_period_ps(counter,
- ni_tio_generic_clock_src_select(counter));
+ ni_tio_generic_clock_src_select(counter, &clk_src);
+ ni_tio_clock_period_ps(counter, clk_src, &ps);
/*
* It's not clear what we should do if clock_period is unknown, so we
- * are not using the alt sync bit in that case, but allow the caller
- * to decide by using the force_alt_sync parameter.
+ * are not using the alt sync bit in that case.
*/
- if (force_alt_sync ||
- (clock_period_ps && clock_period_ps < min_normal_sync_period_ps)) {
- ni_tio_set_bits(counter, counting_mode_reg,
- GI_ALT_SYNC(counter_dev->variant),
- GI_ALT_SYNC(counter_dev->variant));
- } else {
- ni_tio_set_bits(counter, counting_mode_reg,
- GI_ALT_SYNC(counter_dev->variant),
- 0x0);
- }
+ if (force_alt_sync || (ps && ps < min_normal_sync_period_ps))
+ bits = mask;
+
+ ni_tio_set_bits(counter, reg, mask, bits);
}
-static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
+static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mode_reg_mask;
- unsigned mode_reg_values;
- unsigned input_select_bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mode_reg_mask;
+ unsigned int mode_reg_values;
+ unsigned int input_select_bits = 0;
/* these bits map directly on to the mode register */
- static const unsigned mode_reg_direct_mask =
+ static const unsigned int mode_reg_direct_mask =
NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
@@ -458,7 +529,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
mode_reg_mask, mode_reg_values);
if (ni_tio_counting_mode_registers_present(counter_dev)) {
- unsigned bits = 0;
+ unsigned int bits = 0;
bits |= GI_CNT_MODE(mode >> NI_GPCT_COUNTING_MODE_SHIFT);
bits |= GI_INDEX_PHASE((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT));
@@ -467,7 +538,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
GI_CNT_MODE_MASK | GI_INDEX_PHASE_MASK |
GI_INDEX_MODE, bits);
- ni_tio_set_sync_mode(counter, 0);
+ ni_tio_set_sync_mode(counter);
}
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_CNT_DIR_MASK,
@@ -484,65 +555,68 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
return 0;
}
-int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
+int ni_tio_arm(struct ni_gpct *counter, bool arm, unsigned int start_trigger)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned command_transient_bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int transient_bits = 0;
if (arm) {
+ unsigned int mask = 0;
+ unsigned int bits = 0;
+
+ /* only m series and 660x have counting mode registers */
+ switch (counter_dev->variant) {
+ case ni_gpct_variant_e_series:
+ default:
+ break;
+ case ni_gpct_variant_m_series:
+ mask = GI_M_HW_ARM_SEL_MASK;
+ break;
+ case ni_gpct_variant_660x:
+ mask = GI_660X_HW_ARM_SEL_MASK;
+ break;
+ }
+
switch (start_trigger) {
case NI_GPCT_ARM_IMMEDIATE:
- command_transient_bits |= GI_ARM;
+ transient_bits |= GI_ARM;
break;
case NI_GPCT_ARM_PAIRED_IMMEDIATE:
- command_transient_bits |= GI_ARM | GI_ARM_COPY;
+ transient_bits |= GI_ARM | GI_ARM_COPY;
break;
default:
+ /*
+ * for m series and 660x, pass-through the least
+ * significant bits so we can figure out what select
+ * later
+ */
+ if (mask && (start_trigger & NI_GPCT_ARM_UNKNOWN)) {
+ bits |= GI_HW_ARM_ENA |
+ (GI_HW_ARM_SEL(start_trigger) & mask);
+ } else {
+ return -EINVAL;
+ }
break;
}
- if (ni_tio_counting_mode_registers_present(counter_dev)) {
- unsigned bits = 0;
- unsigned sel_mask;
- sel_mask = GI_HW_ARM_SEL_MASK(counter_dev->variant);
-
- switch (start_trigger) {
- case NI_GPCT_ARM_IMMEDIATE:
- case NI_GPCT_ARM_PAIRED_IMMEDIATE:
- break;
- default:
- if (start_trigger & NI_GPCT_ARM_UNKNOWN) {
- /*
- * pass-through the least significant
- * bits so we can figure out what
- * select later
- */
- bits |= GI_HW_ARM_ENA |
- (GI_HW_ARM_SEL(start_trigger) &
- sel_mask);
- } else {
- return -EINVAL;
- }
- break;
- }
+ if (mask)
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
- GI_HW_ARM_ENA | sel_mask, bits);
- }
+ GI_HW_ARM_ENA | mask, bits);
} else {
- command_transient_bits |= GI_DISARM;
+ transient_bits |= GI_DISARM;
}
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
- 0, 0, command_transient_bits);
+ 0, 0, transient_bits);
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_arm);
-static unsigned ni_660x_clk_src(unsigned int clock_source)
+static int ni_660x_clk_src(unsigned int clock_source, unsigned int *bits)
{
- unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
- unsigned ni_660x_clock;
- unsigned i;
+ unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+ unsigned int ni_660x_clock;
+ unsigned int i;
switch (clk_src) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -583,18 +657,17 @@ static unsigned ni_660x_clk_src(unsigned int clock_source)
}
if (i <= NI_660X_MAX_SRC_PIN)
break;
- ni_660x_clock = 0;
- BUG();
- break;
+ return -EINVAL;
}
- return GI_SRC_SEL(ni_660x_clock);
+ *bits = GI_SRC_SEL(ni_660x_clock);
+ return 0;
}
-static unsigned ni_m_clk_src(unsigned int clock_source)
+static int ni_m_clk_src(unsigned int clock_source, unsigned int *bits)
{
- unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
- unsigned ni_m_series_clock;
- unsigned i;
+ unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+ unsigned int ni_m_series_clock;
+ unsigned int i;
switch (clk_src) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -641,21 +714,18 @@ static unsigned ni_m_clk_src(unsigned int clock_source)
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
- pr_err("invalid clock source 0x%lx\n",
- (unsigned long)clock_source);
- BUG();
- ni_m_series_clock = 0;
- break;
+ return -EINVAL;
}
- return GI_SRC_SEL(ni_m_series_clock);
+ *bits = GI_SRC_SEL(ni_m_series_clock);
+ return 0;
};
static void ni_tio_set_source_subselect(struct ni_gpct *counter,
unsigned int clock_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
+ unsigned int cidx = counter->counter_index;
+ unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
if (counter_dev->variant != ni_gpct_variant_m_series)
return;
@@ -674,8 +744,8 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter,
default:
return;
}
- write_register(counter, counter_dev->regs[second_gate_reg],
- second_gate_reg);
+ ni_tio_write(counter, counter_dev->regs[second_gate_reg],
+ second_gate_reg);
}
static int ni_tio_set_clock_src(struct ni_gpct *counter,
@@ -683,20 +753,28 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
unsigned int period_ns)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int bits = 0;
+ int ret;
- /* FIXME: validate clock source */
switch (counter_dev->variant) {
case ni_gpct_variant_660x:
- bits |= ni_660x_clk_src(clock_source);
+ ret = ni_660x_clk_src(clock_source, &bits);
break;
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- bits |= ni_m_clk_src(clock_source);
+ ret = ni_m_clk_src(clock_source, &bits);
break;
}
+ if (ret) {
+ struct comedi_device *dev = counter_dev->dev;
+
+ dev_err(dev->class_dev, "invalid clock source 0x%x\n",
+ clock_source);
+ return ret;
+ }
+
if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
bits |= GI_SRC_POL_INVERT;
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
@@ -722,28 +800,34 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
GI_PRESCALE_X8(counter_dev->variant), bits);
}
counter->clock_period_ps = period_ns * 1000;
- ni_tio_set_sync_mode(counter, 0);
+ ni_tio_set_sync_mode(counter);
return 0;
}
-static void ni_tio_get_clock_src(struct ni_gpct *counter,
- unsigned int *clock_source,
- unsigned int *period_ns)
+static int ni_tio_get_clock_src(struct ni_gpct *counter,
+ unsigned int *clock_source,
+ unsigned int *period_ns)
{
- uint64_t temp64;
-
- *clock_source = ni_tio_generic_clock_src_select(counter);
- temp64 = ni_tio_clock_period_ps(counter, *clock_source);
+ u64 temp64;
+ int ret;
+
+ ret = ni_tio_generic_clock_src_select(counter, clock_source);
+ if (ret)
+ return ret;
+ ret = ni_tio_clock_period_ps(counter, *clock_source, &temp64);
+ if (ret)
+ return ret;
do_div(temp64, 1000); /* ps to ns */
*period_ns = temp64;
+ return 0;
}
static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
- unsigned cidx = counter->counter_index;
- unsigned gate_sel;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int gate_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
@@ -782,9 +866,9 @@ static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
- unsigned cidx = counter->counter_index;
- unsigned gate_sel;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int gate_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
@@ -824,11 +908,11 @@ static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate2_sel;
- unsigned i;
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int gate2_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
@@ -863,17 +947,17 @@ static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
- write_register(counter, counter_dev->regs[gate2_reg], gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg], gate2_reg);
return 0;
}
static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate2_sel;
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int gate2_sel;
/*
* FIXME: We don't know what the m-series second gate codes are,
@@ -887,20 +971,20 @@ static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
- write_register(counter, counter_dev->regs[gate2_reg], gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg], gate2_reg);
return 0;
}
-int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int gate_source)
+int ni_tio_set_gate_src(struct ni_gpct *counter,
+ unsigned int gate, unsigned int src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned mode = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int chan = CR_CHAN(src);
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int mode = 0;
- switch (gate_index) {
+ switch (gate) {
case 0:
if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
@@ -908,9 +992,9 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
GI_GATING_DISABLED);
return 0;
}
- if (gate_source & CR_INVERT)
+ if (src & CR_INVERT)
mode |= GI_GATE_POL_INVERT;
- if (gate_source & CR_EDGE)
+ if (src & CR_EDGE)
mode |= GI_RISING_EDGE_GATING;
else
mode |= GI_LEVEL_GATING;
@@ -921,9 +1005,9 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- return ni_m_set_gate(counter, gate_source);
+ return ni_m_set_gate(counter, src);
case ni_gpct_variant_660x:
- return ni_660x_set_gate(counter, gate_source);
+ return ni_660x_set_gate(counter, src);
}
break;
case 1:
@@ -932,22 +1016,21 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
counter_dev->regs[gate2_reg] &= ~GI_GATE2_MODE;
- write_register(counter, counter_dev->regs[gate2_reg],
- gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg],
+ gate2_reg);
return 0;
}
- if (gate_source & CR_INVERT)
+ if (src & CR_INVERT)
counter_dev->regs[gate2_reg] |= GI_GATE2_POL_INVERT;
else
counter_dev->regs[gate2_reg] &= ~GI_GATE2_POL_INVERT;
switch (counter_dev->variant) {
case ni_gpct_variant_m_series:
- return ni_m_set_gate2(counter, gate_source);
+ return ni_m_set_gate2(counter, src);
case ni_gpct_variant_660x:
- return ni_660x_set_gate2(counter, gate_source);
+ return ni_660x_set_gate2(counter, src);
default:
- BUG();
- break;
+ return -EINVAL;
}
break;
default:
@@ -957,11 +1040,11 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
}
EXPORT_SYMBOL_GPL(ni_tio_set_gate_src);
-static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
+static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index,
unsigned int source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int abz_reg, shift, mask;
if (counter_dev->variant != ni_gpct_variant_m_series)
@@ -987,175 +1070,221 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
counter_dev->regs[abz_reg] &= ~mask;
counter_dev->regs[abz_reg] |= (source << shift) & mask;
- write_register(counter, counter_dev->regs[abz_reg], abz_reg);
+ ni_tio_write(counter, counter_dev->regs[abz_reg], abz_reg);
return 0;
}
-static unsigned ni_660x_gate_to_generic_gate(unsigned gate)
+static int ni_660x_gate_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_660X_SRC_PIN_I_GATE_SEL:
- return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_GATE_PIN_I_GATE_SEL:
- return NI_GPCT_GATE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_GATE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_NEXT_SRC_GATE_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_660X_NEXT_OUT_GATE_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_660X_LOGIC_LOW_GATE_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_660X_RTSI_GATE_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_660X_RTSI_GATE_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_660X_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) {
- if (gate == NI_660X_PIN_GATE_SEL(i))
- return NI_GPCT_GATE_PIN_GATE_SELECT(i);
+ if (gate == NI_660X_PIN_GATE_SEL(i)) {
+ source = NI_GPCT_GATE_PIN_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_660X_MAX_GATE_PIN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_m_gate_to_generic_gate(unsigned gate)
+static int ni_m_gate_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_M_TIMESTAMP_MUX_GATE_SEL:
- return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+ source = NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+ break;
case NI_M_AI_START2_GATE_SEL:
- return NI_GPCT_AI_START2_GATE_SELECT;
+ source = NI_GPCT_AI_START2_GATE_SELECT;
+ break;
case NI_M_PXI_STAR_TRIGGER_GATE_SEL:
- return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+ source = NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+ break;
case NI_M_NEXT_OUT_GATE_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_M_AI_START1_GATE_SEL:
- return NI_GPCT_AI_START1_GATE_SELECT;
+ source = NI_GPCT_AI_START1_GATE_SELECT;
+ break;
case NI_M_NEXT_SRC_GATE_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_M_ANALOG_TRIG_OUT_GATE_SEL:
- return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+ source = NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+ break;
case NI_M_LOGIC_LOW_GATE_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_M_RTSI_GATE_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_M_RTSI_GATE_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_M_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
- if (gate == NI_M_PFI_GATE_SEL(i))
- return NI_GPCT_PFI_GATE_SELECT(i);
+ if (gate == NI_M_PFI_GATE_SEL(i)) {
+ source = NI_GPCT_PFI_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_M_MAX_PFI_CHAN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_660x_gate2_to_generic_gate(unsigned gate)
+static int ni_660x_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_660X_SRC_PIN_I_GATE2_SEL:
- return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_UD_PIN_I_GATE2_SEL:
- return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+ source = NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+ break;
case NI_660X_NEXT_SRC_GATE2_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_660X_NEXT_OUT_GATE2_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_660X_SELECTED_GATE2_SEL:
- return NI_GPCT_SELECTED_GATE_GATE_SELECT;
+ source = NI_GPCT_SELECTED_GATE_GATE_SELECT;
+ break;
case NI_660X_LOGIC_LOW_GATE2_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_660X_RTSI_GATE2_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_660X_RTSI_GATE2_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_660X_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) {
- if (gate == NI_660X_UD_PIN_GATE2_SEL(i))
- return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+ if (gate == NI_660X_UD_PIN_GATE2_SEL(i)) {
+ source = NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_660X_MAX_UP_DOWN_PIN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_m_gate2_to_generic_gate(unsigned gate)
+static int ni_m_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
{
/*
* FIXME: the second gate sources for the m series are undocumented,
* so we just return the raw bits for now.
*/
- switch (gate) {
- default:
- return gate;
- }
+ *src = gate;
return 0;
};
-static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
+static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned int gate_index,
unsigned int *gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mode;
+ unsigned int reg;
+ unsigned int gate;
+ int ret;
+
+ mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
+ if (((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) ||
+ (gate_index == 1 &&
+ !(counter_dev->regs[NITIO_GATE2_REG(cidx)] & GI_GATE2_MODE))) {
+ *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+ return 0;
+ }
switch (gate_index) {
case 0:
- if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) {
- *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
- return 0;
- }
-
- gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter,
- NITIO_INPUT_SEL_REG(cidx)));
+ reg = NITIO_INPUT_SEL_REG(cidx);
+ gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter, reg));
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- *gate_source = ni_m_gate_to_generic_gate(gate);
+ ret = ni_m_gate_to_generic_gate(gate, gate_source);
break;
case ni_gpct_variant_660x:
- *gate_source = ni_660x_gate_to_generic_gate(gate);
+ ret = ni_660x_gate_to_generic_gate(gate, gate_source);
break;
}
+ if (ret)
+ return ret;
if (mode & GI_GATE_POL_INVERT)
*gate_source |= CR_INVERT;
if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
*gate_source |= CR_EDGE;
break;
case 1:
- if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED ||
- !(counter_dev->regs[gate2_reg] & GI_GATE2_MODE)) {
- *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
- return 0;
- }
-
- gate = GI_BITS_TO_GATE2(counter_dev->regs[gate2_reg]);
+ reg = NITIO_GATE2_REG(cidx);
+ gate = GI_BITS_TO_GATE2(counter_dev->regs[reg]);
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- *gate_source = ni_m_gate2_to_generic_gate(gate);
+ ret = ni_m_gate2_to_generic_gate(gate, gate_source);
break;
case ni_gpct_variant_660x:
- *gate_source = ni_660x_gate2_to_generic_gate(gate);
+ ret = ni_660x_gate2_to_generic_gate(gate, gate_source);
break;
}
- if (counter_dev->regs[gate2_reg] & GI_GATE2_POL_INVERT)
+ if (ret)
+ return ret;
+ if (counter_dev->regs[reg] & GI_GATE2_POL_INVERT)
*gate_source |= CR_INVERT;
/* second gate can't have edge/level mode set independently */
if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
@@ -1173,45 +1302,52 @@ int ni_tio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
- unsigned status;
+ unsigned int cidx = counter->counter_index;
+ unsigned int status;
+ int ret = 0;
switch (data[0]) {
case INSN_CONFIG_SET_COUNTER_MODE:
- return ni_tio_set_counter_mode(counter, data[1]);
+ ret = ni_tio_set_counter_mode(counter, data[1]);
+ break;
case INSN_CONFIG_ARM:
- return ni_tio_arm(counter, 1, data[1]);
+ ret = ni_tio_arm(counter, true, data[1]);
+ break;
case INSN_CONFIG_DISARM:
- ni_tio_arm(counter, 0, 0);
- return 0;
+ ret = ni_tio_arm(counter, false, 0);
+ break;
case INSN_CONFIG_GET_COUNTER_STATUS:
data[1] = 0;
- status = read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
+ status = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
if (status & GI_ARMED(cidx)) {
data[1] |= COMEDI_COUNTER_ARMED;
if (status & GI_COUNTING(cidx))
data[1] |= COMEDI_COUNTER_COUNTING;
}
data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
- return 0;
+ break;
case INSN_CONFIG_SET_CLOCK_SRC:
- return ni_tio_set_clock_src(counter, data[1], data[2]);
+ ret = ni_tio_set_clock_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_GET_CLOCK_SRC:
- ni_tio_get_clock_src(counter, &data[1], &data[2]);
- return 0;
+ ret = ni_tio_get_clock_src(counter, &data[1], &data[2]);
+ break;
case INSN_CONFIG_SET_GATE_SRC:
- return ni_tio_set_gate_src(counter, data[1], data[2]);
+ ret = ni_tio_set_gate_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_GET_GATE_SRC:
- return ni_tio_get_gate_src(counter, data[1], &data[2]);
+ ret = ni_tio_get_gate_src(counter, data[1], &data[2]);
+ break;
case INSN_CONFIG_SET_OTHER_SRC:
- return ni_tio_set_other_src(counter, data[1], data[2]);
+ ret = ni_tio_set_other_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_RESET:
ni_tio_reset_count_and_disarm(counter);
- return 0;
- default:
break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+ return ret ? ret : insn->n;
}
EXPORT_SYMBOL_GPL(ni_tio_insn_config);
@@ -1219,7 +1355,7 @@ static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int val;
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
@@ -1235,9 +1371,9 @@ static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
* will be correct since the count value will definitely have latched
* by then.
*/
- val = read_register(counter, NITIO_SW_SAVE_REG(cidx));
- if (val != read_register(counter, NITIO_SW_SAVE_REG(cidx)))
- val = read_register(counter, NITIO_SW_SAVE_REG(cidx));
+ val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
+ if (val != ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx)))
+ val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
return val;
}
@@ -1250,7 +1386,7 @@ int ni_tio_insn_read(struct comedi_device *dev,
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int channel = CR_CHAN(insn->chanspec);
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
int i;
for (i = 0; i < insn->n; i++) {
@@ -1270,11 +1406,10 @@ int ni_tio_insn_read(struct comedi_device *dev,
}
EXPORT_SYMBOL_GPL(ni_tio_insn_read);
-static unsigned ni_tio_next_load_register(struct ni_gpct *counter)
+static unsigned int ni_tio_next_load_register(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
- const unsigned bits =
- read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
+ unsigned int cidx = counter->counter_index;
+ unsigned int bits = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
return (bits & GI_NEXT_LOAD_SRC(cidx))
? NITIO_LOADB_REG(cidx)
@@ -1288,9 +1423,9 @@ int ni_tio_insn_write(struct comedi_device *dev,
{
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned channel = CR_CHAN(insn->chanspec);
- unsigned cidx = counter->counter_index;
- unsigned load_reg;
+ unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned int cidx = counter->counter_index;
+ unsigned int load_reg;
if (insn->n < 1)
return 0;
@@ -1306,19 +1441,19 @@ int ni_tio_insn_write(struct comedi_device *dev,
* load register is already selected.
*/
load_reg = ni_tio_next_load_register(counter);
- write_register(counter, data[0], load_reg);
+ ni_tio_write(counter, data[0], load_reg);
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, GI_LOAD);
/* restore load reg */
- write_register(counter, counter_dev->regs[load_reg], load_reg);
+ ni_tio_write(counter, counter_dev->regs[load_reg], load_reg);
break;
case 1:
counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0];
- write_register(counter, data[0], NITIO_LOADA_REG(cidx));
+ ni_tio_write(counter, data[0], NITIO_LOADA_REG(cidx));
break;
case 2:
counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0];
- write_register(counter, data[0], NITIO_LOADB_REG(cidx));
+ ni_tio_write(counter, data[0], NITIO_LOADB_REG(cidx));
break;
default:
return -EINVAL;
@@ -1330,13 +1465,13 @@ EXPORT_SYMBOL_GPL(ni_tio_insn_write);
void ni_tio_init_counter(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
ni_tio_reset_count_and_disarm(counter);
/* initialize counter registers */
counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
~0, GI_SYNC_GATE);
@@ -1344,10 +1479,10 @@ void ni_tio_init_counter(struct ni_gpct *counter)
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_LOADA_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_LOADA_REG(cidx));
counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_LOADB_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_LOADB_REG(cidx));
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
@@ -1356,7 +1491,7 @@ void ni_tio_init_counter(struct ni_gpct *counter)
if (ni_tio_has_gate2_registers(counter_dev)) {
counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_GATE2_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_GATE2_REG(cidx));
}
ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0);
@@ -1367,17 +1502,17 @@ EXPORT_SYMBOL_GPL(ni_tio_init_counter);
struct ni_gpct_device *
ni_gpct_device_construct(struct comedi_device *dev,
- void (*write_register)(struct ni_gpct *counter,
- unsigned bits,
- enum ni_gpct_register reg),
- unsigned (*read_register)(struct ni_gpct *counter,
- enum ni_gpct_register reg),
+ void (*write)(struct ni_gpct *counter,
+ unsigned int value,
+ enum ni_gpct_register reg),
+ unsigned int (*read)(struct ni_gpct *counter,
+ enum ni_gpct_register reg),
enum ni_gpct_variant variant,
- unsigned num_counters)
+ unsigned int num_counters)
{
struct ni_gpct_device *counter_dev;
struct ni_gpct *counter;
- unsigned i;
+ unsigned int i;
if (num_counters == 0)
return NULL;
@@ -1387,8 +1522,8 @@ ni_gpct_device_construct(struct comedi_device *dev,
return NULL;
counter_dev->dev = dev;
- counter_dev->write_register = write_register;
- counter_dev->read_register = read_register;
+ counter_dev->write = write;
+ counter_dev->read = read;
counter_dev->variant = variant;
spin_lock_init(&counter_dev->regs_lock);
@@ -1413,7 +1548,7 @@ EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
{
- if (!counter_dev->counters)
+ if (!counter_dev)
return;
kfree(counter_dev->counters);
kfree(counter_dev);
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 25aedd0e5..4978358f9 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -1,29 +1,24 @@
/*
- drivers/ni_tio.h
- Header file for NI general purpose counter support code (ni_tio.c)
-
- COMEDI - Linux Control and Measurement Device Interface
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header file for NI general purpose counter support code (ni_tio.c)
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_NI_TIO_H
#define _COMEDI_NI_TIO_H
#include "../comedidev.h"
-/* forward declarations */
-struct mite_struct;
-struct ni_gpct_device;
-
enum ni_gpct_register {
NITIO_G0_AUTO_INC,
NITIO_G1_AUTO_INC,
@@ -106,35 +101,34 @@ enum ni_gpct_variant {
struct ni_gpct {
struct ni_gpct_device *counter_dev;
- unsigned counter_index;
- unsigned chip_index;
- uint64_t clock_period_ps; /* clock period in picoseconds */
+ unsigned int counter_index;
+ unsigned int chip_index;
+ u64 clock_period_ps; /* clock period in picoseconds */
struct mite_channel *mite_chan;
- spinlock_t lock;
+ spinlock_t lock; /* protects 'mite_chan' */
};
struct ni_gpct_device {
struct comedi_device *dev;
- void (*write_register)(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg);
- unsigned (*read_register)(struct ni_gpct *counter,
- enum ni_gpct_register reg);
+ void (*write)(struct ni_gpct *, unsigned int value,
+ enum ni_gpct_register);
+ unsigned int (*read)(struct ni_gpct *, enum ni_gpct_register);
enum ni_gpct_variant variant;
struct ni_gpct *counters;
- unsigned num_counters;
- unsigned regs[NITIO_NUM_REGS];
- spinlock_t regs_lock;
+ unsigned int num_counters;
+ unsigned int regs[NITIO_NUM_REGS];
+ spinlock_t regs_lock; /* protects 'regs' */
};
struct ni_gpct_device *
ni_gpct_device_construct(struct comedi_device *,
- void (*write_register)(struct ni_gpct *,
- unsigned bits,
- enum ni_gpct_register),
- unsigned (*read_register)(struct ni_gpct *,
- enum ni_gpct_register),
+ void (*write)(struct ni_gpct *,
+ unsigned int value,
+ enum ni_gpct_register),
+ unsigned int (*read)(struct ni_gpct *,
+ enum ni_gpct_register),
enum ni_gpct_variant,
- unsigned num_counters);
+ unsigned int num_counters);
void ni_gpct_device_destroy(struct ni_gpct_device *);
void ni_tio_init_counter(struct ni_gpct *);
int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *,
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
index 2bceae493..b15b10833 100644
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ b/drivers/staging/comedi/drivers/ni_tio_internal.h
@@ -1,20 +1,19 @@
/*
- drivers/ni_tio_internal.h
- Header file for NI general purpose counter support code (ni_tio.c and
- ni_tiocmd.c)
-
- COMEDI - Linux Control and Measurement Device Interface
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header file for NI general purpose counter support code (ni_tio.c and
+ * ni_tiocmd.c)
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_NI_TIO_INTERNAL_H
#define _COMEDI_NI_TIO_INTERNAL_H
@@ -24,68 +23,73 @@
#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x))
#define GI_AUTO_INC_MASK 0xff
#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x))
-#define GI_ARM (1 << 0)
-#define GI_SAVE_TRACE (1 << 1)
-#define GI_LOAD (1 << 2)
-#define GI_DISARM (1 << 4)
+#define GI_ARM BIT(0)
+#define GI_SAVE_TRACE BIT(1)
+#define GI_LOAD BIT(2)
+#define GI_DISARM BIT(4)
#define GI_CNT_DIR(x) (((x) & 0x3) << 5)
-#define GI_CNT_DIR_MASK (3 << 5)
-#define GI_WRITE_SWITCH (1 << 7)
-#define GI_SYNC_GATE (1 << 8)
-#define GI_LITTLE_BIG_ENDIAN (1 << 9)
-#define GI_BANK_SWITCH_START (1 << 10)
-#define GI_BANK_SWITCH_MODE (1 << 11)
-#define GI_BANK_SWITCH_ENABLE (1 << 12)
-#define GI_ARM_COPY (1 << 13)
-#define GI_SAVE_TRACE_COPY (1 << 14)
-#define GI_DISARM_COPY (1 << 15)
+#define GI_CNT_DIR_MASK GI_CNT_DIR(3)
+#define GI_WRITE_SWITCH BIT(7)
+#define GI_SYNC_GATE BIT(8)
+#define GI_LITTLE_BIG_ENDIAN BIT(9)
+#define GI_BANK_SWITCH_START BIT(10)
+#define GI_BANK_SWITCH_MODE BIT(11)
+#define GI_BANK_SWITCH_ENABLE BIT(12)
+#define GI_ARM_COPY BIT(13)
+#define GI_SAVE_TRACE_COPY BIT(14)
+#define GI_DISARM_COPY BIT(15)
#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x))
#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x))
#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x))
-#define GI_GATING_DISABLED (0 << 0)
-#define GI_LEVEL_GATING (1 << 0)
-#define GI_RISING_EDGE_GATING (2 << 0)
-#define GI_FALLING_EDGE_GATING (3 << 0)
-#define GI_GATING_MODE_MASK (3 << 0)
-#define GI_GATE_ON_BOTH_EDGES (1 << 2)
-#define GI_EDGE_GATE_STARTS_STOPS (0 << 3)
-#define GI_EDGE_GATE_STOPS_STARTS (1 << 3)
-#define GI_EDGE_GATE_STARTS (2 << 3)
-#define GI_EDGE_GATE_NO_STARTS_OR_STOPS (3 << 3)
-#define GI_EDGE_GATE_MODE_MASK (3 << 3)
-#define GI_STOP_ON_GATE (0 << 5)
-#define GI_STOP_ON_GATE_OR_TC (1 << 5)
-#define GI_STOP_ON_GATE_OR_SECOND_TC (2 << 5)
-#define GI_STOP_MODE_MASK (3 << 5)
-#define GI_LOAD_SRC_SEL (1 << 7)
-#define GI_OUTPUT_TC_PULSE (1 << 8)
-#define GI_OUTPUT_TC_TOGGLE (2 << 8)
-#define GI_OUTPUT_TC_OR_GATE_TOGGLE (3 << 8)
-#define GI_OUTPUT_MODE_MASK (3 << 8)
-#define GI_NO_HARDWARE_DISARM (0 << 10)
-#define GI_DISARM_AT_TC (1 << 10)
-#define GI_DISARM_AT_GATE (2 << 10)
-#define GI_DISARM_AT_TC_OR_GATE (3 << 10)
-#define GI_COUNTING_ONCE_MASK (3 << 10)
-#define GI_LOADING_ON_TC (1 << 12)
-#define GI_GATE_POL_INVERT (1 << 13)
-#define GI_LOADING_ON_GATE (1 << 14)
-#define GI_RELOAD_SRC_SWITCHING (1 << 15)
+#define GI_GATING_MODE(x) (((x) & 0x3) << 0)
+#define GI_GATING_DISABLED GI_GATING_MODE(0)
+#define GI_LEVEL_GATING GI_GATING_MODE(1)
+#define GI_RISING_EDGE_GATING GI_GATING_MODE(2)
+#define GI_FALLING_EDGE_GATING GI_GATING_MODE(3)
+#define GI_GATING_MODE_MASK GI_GATING_MODE(3)
+#define GI_GATE_ON_BOTH_EDGES BIT(2)
+#define GI_EDGE_GATE_MODE(x) (((x) & 0x3) << 3)
+#define GI_EDGE_GATE_STARTS_STOPS GI_EDGE_GATE_MODE(0)
+#define GI_EDGE_GATE_STOPS_STARTS GI_EDGE_GATE_MODE(1)
+#define GI_EDGE_GATE_STARTS GI_EDGE_GATE_MODE(2)
+#define GI_EDGE_GATE_NO_STARTS_OR_STOPS GI_EDGE_GATE_MODE(3)
+#define GI_EDGE_GATE_MODE_MASK GI_EDGE_GATE_MODE(3)
+#define GI_STOP_MODE(x) (((x) & 0x3) << 5)
+#define GI_STOP_ON_GATE GI_STOP_MODE(0)
+#define GI_STOP_ON_GATE_OR_TC GI_STOP_MODE(1)
+#define GI_STOP_ON_GATE_OR_SECOND_TC GI_STOP_MODE(2)
+#define GI_STOP_MODE_MASK GI_STOP_MODE(3)
+#define GI_LOAD_SRC_SEL BIT(7)
+#define GI_OUTPUT_MODE(x) (((x) & 0x3) << 8)
+#define GI_OUTPUT_TC_PULSE GI_OUTPUT_MODE(1)
+#define GI_OUTPUT_TC_TOGGLE GI_OUTPUT_MODE(2)
+#define GI_OUTPUT_TC_OR_GATE_TOGGLE GI_OUTPUT_MODE(3)
+#define GI_OUTPUT_MODE_MASK GI_OUTPUT_MODE(3)
+#define GI_COUNTING_ONCE(x) (((x) & 0x3) << 10)
+#define GI_NO_HARDWARE_DISARM GI_COUNTING_ONCE(0)
+#define GI_DISARM_AT_TC GI_COUNTING_ONCE(1)
+#define GI_DISARM_AT_GATE GI_COUNTING_ONCE(2)
+#define GI_DISARM_AT_TC_OR_GATE GI_COUNTING_ONCE(3)
+#define GI_COUNTING_ONCE_MASK GI_COUNTING_ONCE(3)
+#define GI_LOADING_ON_TC BIT(12)
+#define GI_GATE_POL_INVERT BIT(13)
+#define GI_LOADING_ON_GATE BIT(14)
+#define GI_RELOAD_SRC_SWITCHING BIT(15)
#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x))
#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x))
#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x))
-#define GI_READ_ACKS_IRQ (1 << 0)
-#define GI_WRITE_ACKS_IRQ (1 << 1)
+#define GI_READ_ACKS_IRQ BIT(0)
+#define GI_WRITE_ACKS_IRQ BIT(1)
#define GI_BITS_TO_SRC(x) (((x) >> 2) & 0x1f)
#define GI_SRC_SEL(x) (((x) & 0x1f) << 2)
-#define GI_SRC_SEL_MASK (0x1f << 2)
+#define GI_SRC_SEL_MASK GI_SRC_SEL(0x1f)
#define GI_BITS_TO_GATE(x) (((x) >> 7) & 0x1f)
#define GI_GATE_SEL(x) (((x) & 0x1f) << 7)
-#define GI_GATE_SEL_MASK (0x1f << 7)
-#define GI_GATE_SEL_LOAD_SRC (1 << 12)
-#define GI_OR_GATE (1 << 13)
-#define GI_OUTPUT_POL_INVERT (1 << 14)
-#define GI_SRC_POL_INVERT (1 << 15)
+#define GI_GATE_SEL_MASK GI_GATE_SEL(0x1f)
+#define GI_GATE_SEL_LOAD_SRC BIT(12)
+#define GI_OR_GATE BIT(13)
+#define GI_OUTPUT_POL_INVERT BIT(14)
+#define GI_SRC_POL_INVERT BIT(15)
#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x))
#define GI_CNT_MODE(x) (((x) & 0x7) << 0)
#define GI_CNT_MODE_NORMAL GI_CNT_MODE(0)
@@ -94,152 +98,84 @@
#define GI_CNT_MODE_QUADX4 GI_CNT_MODE(3)
#define GI_CNT_MODE_TWO_PULSE GI_CNT_MODE(4)
#define GI_CNT_MODE_SYNC_SRC GI_CNT_MODE(6)
-#define GI_CNT_MODE_MASK (7 << 0)
-#define GI_INDEX_MODE (1 << 4)
+#define GI_CNT_MODE_MASK GI_CNT_MODE(7)
+#define GI_INDEX_MODE BIT(4)
#define GI_INDEX_PHASE(x) (((x) & 0x3) << 5)
-#define GI_INDEX_PHASE_MASK (3 << 5)
-#define GI_HW_ARM_ENA (1 << 7)
+#define GI_INDEX_PHASE_MASK GI_INDEX_PHASE(3)
+#define GI_HW_ARM_ENA BIT(7)
#define GI_HW_ARM_SEL(x) ((x) << 8)
-#define GI_660X_HW_ARM_SEL_MASK (0x7 << 8)
-#define GI_M_HW_ARM_SEL_MASK (0x1f << 8)
-#define GI_660X_PRESCALE_X8 (1 << 12)
-#define GI_M_PRESCALE_X8 (1 << 13)
-#define GI_660X_ALT_SYNC (1 << 13)
-#define GI_M_ALT_SYNC (1 << 14)
-#define GI_660X_PRESCALE_X2 (1 << 14)
-#define GI_M_PRESCALE_X2 (1 << 15)
+#define GI_660X_HW_ARM_SEL_MASK GI_HW_ARM_SEL(0x7)
+#define GI_M_HW_ARM_SEL_MASK GI_HW_ARM_SEL(0x1f)
+#define GI_660X_PRESCALE_X8 BIT(12)
+#define GI_M_PRESCALE_X8 BIT(13)
+#define GI_660X_ALT_SYNC BIT(13)
+#define GI_M_ALT_SYNC BIT(14)
+#define GI_660X_PRESCALE_X2 BIT(14)
+#define GI_M_PRESCALE_X2 BIT(15)
#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x))
-#define GI_GATE2_MODE (1 << 0)
+#define GI_GATE2_MODE BIT(0)
#define GI_BITS_TO_GATE2(x) (((x) >> 7) & 0x1f)
#define GI_GATE2_SEL(x) (((x) & 0x1f) << 7)
-#define GI_GATE2_SEL_MASK (0x1f << 7)
-#define GI_GATE2_POL_INVERT (1 << 13)
-#define GI_GATE2_SUBSEL (1 << 14)
-#define GI_SRC_SUBSEL (1 << 15)
+#define GI_GATE2_SEL_MASK GI_GATE2_SEL(0x1f)
+#define GI_GATE2_POL_INVERT BIT(13)
+#define GI_GATE2_SUBSEL BIT(14)
+#define GI_SRC_SUBSEL BIT(15)
#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2))
-#define GI_SAVE(x) (((x) % 2) ? (1 << 1) : (1 << 0))
-#define GI_COUNTING(x) (((x) % 2) ? (1 << 3) : (1 << 2))
-#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? (1 << 5) : (1 << 4))
-#define GI_STALE_DATA(x) (((x) % 2) ? (1 << 7) : (1 << 6))
-#define GI_ARMED(x) (((x) % 2) ? (1 << 9) : (1 << 8))
-#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? (1 << 11) : (1 << 10))
-#define GI_TC_ERROR(x) (((x) % 2) ? (1 << 13) : (1 << 12))
-#define GI_GATE_ERROR(x) (((x) % 2) ? (1 << 15) : (1 << 14))
+#define GI_SAVE(x) (((x) % 2) ? BIT(1) : BIT(0))
+#define GI_COUNTING(x) (((x) % 2) ? BIT(3) : BIT(2))
+#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? BIT(5) : BIT(4))
+#define GI_STALE_DATA(x) (((x) % 2) ? BIT(7) : BIT(6))
+#define GI_ARMED(x) (((x) % 2) ? BIT(9) : BIT(8))
+#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? BIT(11) : BIT(10))
+#define GI_TC_ERROR(x) (((x) % 2) ? BIT(13) : BIT(12))
+#define GI_GATE_ERROR(x) (((x) % 2) ? BIT(15) : BIT(14))
#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2))
-#define GI_RESET(x) (1 << (2 + ((x) % 2)))
+#define GI_RESET(x) BIT(2 + ((x) % 2))
#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2))
#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2))
-#define GI_OUTPUT(x) (((x) % 2) ? (1 << 1) : (1 << 0))
-#define GI_HW_SAVE(x) (((x) % 2) ? (1 << 13) : (1 << 12))
-#define GI_PERMANENT_STALE(x) (((x) % 2) ? (1 << 15) : (1 << 14))
+#define GI_OUTPUT(x) (((x) % 2) ? BIT(1) : BIT(0))
+#define GI_HW_SAVE(x) (((x) % 2) ? BIT(13) : BIT(12))
+#define GI_PERMANENT_STALE(x) (((x) % 2) ? BIT(15) : BIT(14))
#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x))
-#define GI_DMA_ENABLE (1 << 0)
-#define GI_DMA_WRITE (1 << 1)
-#define GI_DMA_INT_ENA (1 << 2)
-#define GI_DMA_RESET (1 << 3)
-#define GI_DMA_BANKSW_ERROR (1 << 4)
+#define GI_DMA_ENABLE BIT(0)
+#define GI_DMA_WRITE BIT(1)
+#define GI_DMA_INT_ENA BIT(2)
+#define GI_DMA_RESET BIT(3)
+#define GI_DMA_BANKSW_ERROR BIT(4)
#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x))
-#define GI_DMA_READBANK (1 << 13)
-#define GI_DRQ_ERROR (1 << 14)
-#define GI_DRQ_STATUS (1 << 15)
+#define GI_DMA_READBANK BIT(13)
+#define GI_DRQ_ERROR BIT(14)
+#define GI_DRQ_STATUS BIT(15)
#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x))
#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x))
-#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 1) : (1 << 5))
-#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 2) : (1 << 6))
-#define GI_TC_INTERRUPT_ACK (1 << 14)
-#define GI_GATE_INTERRUPT_ACK (1 << 15)
+#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? BIT(1) : BIT(5))
+#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? BIT(2) : BIT(6))
+#define GI_TC_INTERRUPT_ACK BIT(14)
+#define GI_GATE_INTERRUPT_ACK BIT(15)
#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x))
-#define GI_GATE_INTERRUPT (1 << 2)
-#define GI_TC (1 << 3)
-#define GI_INTERRUPT (1 << 15)
+#define GI_GATE_INTERRUPT BIT(2)
+#define GI_TC BIT(3)
+#define GI_INTERRUPT BIT(15)
#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x))
-#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 9) : (1 << 6))
-#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 10) : (1 << 8))
-
-static inline void write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
-{
- BUG_ON(reg >= NITIO_NUM_REGS);
- counter->counter_dev->write_register(counter, bits, reg);
-}
-
-static inline unsigned read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
-{
- BUG_ON(reg >= NITIO_NUM_REGS);
- return counter->counter_dev->read_register(counter, reg);
-}
+#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? BIT(9) : BIT(6))
+#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? BIT(10) : BIT(8))
-static inline int ni_tio_counting_mode_registers_present(const struct
- ni_gpct_device
- *counter_dev)
-{
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- return 0;
- case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x:
- return 1;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline void ni_tio_set_bits_transient(struct ni_gpct *counter,
- enum ni_gpct_register
- register_index, unsigned bit_mask,
- unsigned bit_values,
- unsigned transient_bit_values)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned long flags;
-
- BUG_ON(register_index >= NITIO_NUM_REGS);
- spin_lock_irqsave(&counter_dev->regs_lock, flags);
- counter_dev->regs[register_index] &= ~bit_mask;
- counter_dev->regs[register_index] |= (bit_values & bit_mask);
- write_register(counter,
- counter_dev->regs[register_index] | transient_bit_values,
- register_index);
- mmiowb();
- spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
-}
+void ni_tio_write(struct ni_gpct *, unsigned int value, enum ni_gpct_register);
+unsigned int ni_tio_read(struct ni_gpct *, enum ni_gpct_register);
-/* ni_tio_set_bits( ) is for safely writing to registers whose bits may be
- * twiddled in interrupt context, or whose software copy may be read in
- * interrupt context.
- */
-static inline void ni_tio_set_bits(struct ni_gpct *counter,
- enum ni_gpct_register register_index,
- unsigned bit_mask, unsigned bit_values)
+static inline bool
+ni_tio_counting_mode_registers_present(const struct ni_gpct_device *counter_dev)
{
- ni_tio_set_bits_transient(counter, register_index, bit_mask, bit_values,
- 0x0);
+ /* m series and 660x variants have counting mode registers */
+ return counter_dev->variant != ni_gpct_variant_e_series;
}
-/* ni_tio_get_soft_copy( ) is for safely reading the software copy of a register
-whose bits might be modified in interrupt context, or whose software copy
-might need to be read in interrupt context.
-*/
-static inline unsigned ni_tio_get_soft_copy(const struct ni_gpct *counter,
- enum ni_gpct_register
- register_index)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned long flags;
- unsigned value;
-
- BUG_ON(register_index >= NITIO_NUM_REGS);
- spin_lock_irqsave(&counter_dev->regs_lock, flags);
- value = counter_dev->regs[register_index];
- spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
- return value;
-}
+void ni_tio_set_bits(struct ni_gpct *, enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value);
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *,
+ enum ni_gpct_register reg);
-int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger);
-int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int gate_source);
+int ni_tio_arm(struct ni_gpct *, bool arm, unsigned int start_trigger);
+int ni_tio_set_gate_src(struct ni_gpct *, unsigned int gate, unsigned int src);
#endif /* _COMEDI_NI_TIO_INTERNAL_H */
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 823e47910..9007c5754 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -1,19 +1,18 @@
/*
- comedi/drivers/ni_tiocmd.c
- Command support for NI general purpose counters
-
- Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Command support for NI general purpose counters
+ *
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Module: ni_tiocmd
@@ -36,13 +35,10 @@
* DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
* DAQ 6601/6602 User Manual (NI 322137B-01)
* 340934b.pdf DAQ-STC reference manual
+ *
+ * TODO: Support use of both banks X and Y
*/
-/*
-TODO:
- Support use of both banks X and Y
-*/
-
#include <linux/module.h>
#include "ni_tio_internal.h"
#include "mite.h"
@@ -51,9 +47,9 @@ static void ni_tio_configure_dma(struct ni_gpct *counter,
bool enable, bool read)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mask;
- unsigned bits;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mask;
+ unsigned int bits;
mask = GI_READ_ACKS_IRQ | GI_WRITE_ACKS_IRQ;
bits = 0;
@@ -103,7 +99,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
spin_unlock_irqrestore(&counter->lock, flags);
if (ret < 0)
return ret;
- ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+ ret = ni_tio_arm(counter, true, NI_GPCT_ARM_IMMEDIATE);
s->async->inttrig = NULL;
return ret;
@@ -113,7 +109,7 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
int ret = 0;
@@ -129,9 +125,6 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
case ni_gpct_variant_e_series:
mite_prep_dma(counter->mite_chan, 16, 32);
break;
- default:
- BUG();
- break;
}
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
ni_tio_configure_dma(counter, true, true);
@@ -143,9 +136,9 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
mite_dma_arm(counter->mite_chan);
if (cmd->start_src == TRIG_NOW)
- ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+ ret = ni_tio_arm(counter, true, NI_GPCT_ARM_IMMEDIATE);
else if (cmd->start_src == TRIG_EXT)
- ret = ni_tio_arm(counter, 1, cmd->start_arg);
+ ret = ni_tio_arm(counter, true, cmd->start_arg);
}
return ret;
}
@@ -163,9 +156,9 @@ static int ni_tio_cmd_setup(struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
int set_gate_source = 0;
- unsigned gate_source;
+ unsigned int gate_source;
int retval = 0;
if (cmd->scan_begin_src == TRIG_EXT) {
@@ -289,10 +282,10 @@ EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
int ni_tio_cancel(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned long flags;
- ni_tio_arm(counter, 0, 0);
+ ni_tio_arm(counter, false, 0);
spin_lock_irqsave(&counter->lock, flags);
if (counter->mite_chan)
mite_dma_disarm(counter->mite_chan);
@@ -305,9 +298,6 @@ int ni_tio_cancel(struct ni_gpct *counter)
}
EXPORT_SYMBOL_GPL(ni_tio_cancel);
- /* During buffered input counter operation for e-series, the gate
- interrupt is acked automatically by the dma controller, due to the
- Gi_Read/Write_Acknowledges_IRQ bits in the input select register. */
static int should_ack_gate(struct ni_gpct *counter)
{
unsigned long flags;
@@ -315,12 +305,19 @@ static int should_ack_gate(struct ni_gpct *counter)
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
- /* not sure if 660x really supports gate
- interrupts (the bits are not listed
- in register-level manual) */
case ni_gpct_variant_660x:
+ /*
+ * not sure if 660x really supports gate interrupts
+ * (the bits are not listed in register-level manual)
+ */
return 1;
case ni_gpct_variant_e_series:
+ /*
+ * During buffered input counter operation for e-series,
+ * the gate interrupt is acked automatically by the dma
+ * controller, due to the Gi_Read/Write_Acknowledges_IRQ
+ * bits in the input select register.
+ */
spin_lock_irqsave(&counter->lock, flags);
{
if (!counter->mite_chan ||
@@ -338,15 +335,14 @@ static int should_ack_gate(struct ni_gpct *counter)
static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
int *gate_error,
int *tc_error,
- int *perm_stale_data,
- int *stale_data)
+ int *perm_stale_data)
{
- unsigned cidx = counter->counter_index;
- const unsigned short gxx_status = read_register(counter,
+ unsigned int cidx = counter->counter_index;
+ const unsigned short gxx_status = ni_tio_read(counter,
NITIO_SHARED_STATUS_REG(cidx));
- const unsigned short gi_status = read_register(counter,
+ const unsigned short gi_status = ni_tio_read(counter,
NITIO_STATUS_REG(cidx));
- unsigned ack = 0;
+ unsigned int ack = 0;
if (gate_error)
*gate_error = 0;
@@ -354,15 +350,15 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
*tc_error = 0;
if (perm_stale_data)
*perm_stale_data = 0;
- if (stale_data)
- *stale_data = 0;
if (gxx_status & GI_GATE_ERROR(cidx)) {
ack |= GI_GATE_ERROR_CONFIRM(cidx);
if (gate_error) {
- /*660x don't support automatic acknowledgment
- of gate interrupt via dma read/write
- and report bogus gate errors */
+ /*
+ * 660x don't support automatic acknowledgment
+ * of gate interrupt via dma read/write
+ * and report bogus gate errors
+ */
if (counter->counter_dev->variant !=
ni_gpct_variant_660x)
*gate_error = 1;
@@ -380,14 +376,10 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
ack |= GI_GATE_INTERRUPT_ACK;
}
if (ack)
- write_register(counter, ack, NITIO_INT_ACK_REG(cidx));
+ ni_tio_write(counter, ack, NITIO_INT_ACK_REG(cidx));
if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) &
GI_LOADING_ON_GATE) {
- if (gxx_status & GI_STALE_DATA(cidx)) {
- if (stale_data)
- *stale_data = 1;
- }
- if (read_register(counter, NITIO_STATUS2_REG(cidx)) &
+ if (ni_tio_read(counter, NITIO_STATUS2_REG(cidx)) &
GI_PERMANENT_STALE(cidx)) {
dev_info(counter->counter_dev->dev->class_dev,
"%s: Gi_Permanent_Stale_Data detected.\n",
@@ -400,22 +392,21 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
void ni_tio_acknowledge(struct ni_gpct *counter)
{
- ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
+ ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(ni_tio_acknowledge);
void ni_tio_handle_interrupt(struct ni_gpct *counter,
struct comedi_subdevice *s)
{
- unsigned cidx = counter->counter_index;
- unsigned gpct_mite_status;
+ unsigned int cidx = counter->counter_index;
unsigned long flags;
int gate_error;
int tc_error;
int perm_stale_data;
ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
- &perm_stale_data, NULL);
+ &perm_stale_data);
if (gate_error) {
dev_notice(counter->counter_dev->dev->class_dev,
"%s: Gi_Gate_Error detected.\n", __func__);
@@ -426,7 +417,7 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
case ni_gpct_variant_660x:
- if (read_register(counter, NITIO_DMA_STATUS_REG(cidx)) &
+ if (ni_tio_read(counter, NITIO_DMA_STATUS_REG(cidx)) &
GI_DRQ_ERROR) {
dev_notice(counter->counter_dev->dev->class_dev,
"%s: Gi_DRQ_Error detected.\n", __func__);
@@ -437,16 +428,8 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
break;
}
spin_lock_irqsave(&counter->lock, flags);
- if (!counter->mite_chan) {
- spin_unlock_irqrestore(&counter->lock, flags);
- return;
- }
- gpct_mite_status = mite_get_status(counter->mite_chan);
- if (gpct_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- counter->mite_chan->mite->mite_io_addr +
- MITE_CHOR(counter->mite_chan->channel));
- mite_sync_input_dma(counter->mite_chan, s);
+ if (counter->mite_chan)
+ mite_ack_linkc(counter->mite_chan, s, true);
spin_unlock_irqrestore(&counter->lock, flags);
}
EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
diff --git a/drivers/staging/comedi/drivers/plx9052.h b/drivers/staging/comedi/drivers/plx9052.h
index fbcf25069..2892e6528 100644
--- a/drivers/staging/comedi/drivers/plx9052.h
+++ b/drivers/staging/comedi/drivers/plx9052.h
@@ -1,22 +1,21 @@
/*
- comedi/drivers/plx9052.h
- Definitions for the PLX-9052 PCI interface chip
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Definitions for the PLX-9052 PCI interface chip
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _PLX9052_H_
#define _PLX9052_H_
@@ -25,55 +24,56 @@
* INTCSR - Interrupt Control/Status register
*/
#define PLX9052_INTCSR 0x4c
-#define PLX9052_INTCSR_LI1ENAB (1 << 0) /* LI1 enabled */
-#define PLX9052_INTCSR_LI1POL (1 << 1) /* LI1 active high */
-#define PLX9052_INTCSR_LI1STAT (1 << 2) /* LI1 active */
-#define PLX9052_INTCSR_LI2ENAB (1 << 3) /* LI2 enabled */
-#define PLX9052_INTCSR_LI2POL (1 << 4) /* LI2 active high */
-#define PLX9052_INTCSR_LI2STAT (1 << 5) /* LI2 active */
-#define PLX9052_INTCSR_PCIENAB (1 << 6) /* PCIINT enabled */
-#define PLX9052_INTCSR_SOFTINT (1 << 7) /* generate soft int */
-#define PLX9052_INTCSR_LI1SEL (1 << 8) /* LI1 edge */
-#define PLX9052_INTCSR_LI2SEL (1 << 9) /* LI2 edge */
-#define PLX9052_INTCSR_LI1CLRINT (1 << 10) /* LI1 clear int */
-#define PLX9052_INTCSR_LI2CLRINT (1 << 11) /* LI2 clear int */
-#define PLX9052_INTCSR_ISAMODE (1 << 12) /* ISA interface mode */
+#define PLX9052_INTCSR_LI1ENAB BIT(0) /* LI1 enabled */
+#define PLX9052_INTCSR_LI1POL BIT(1) /* LI1 active high */
+#define PLX9052_INTCSR_LI1STAT BIT(2) /* LI1 active */
+#define PLX9052_INTCSR_LI2ENAB BIT(3) /* LI2 enabled */
+#define PLX9052_INTCSR_LI2POL BIT(4) /* LI2 active high */
+#define PLX9052_INTCSR_LI2STAT BIT(5) /* LI2 active */
+#define PLX9052_INTCSR_PCIENAB BIT(6) /* PCIINT enabled */
+#define PLX9052_INTCSR_SOFTINT BIT(7) /* generate soft int */
+#define PLX9052_INTCSR_LI1SEL BIT(8) /* LI1 edge */
+#define PLX9052_INTCSR_LI2SEL BIT(9) /* LI2 edge */
+#define PLX9052_INTCSR_LI1CLRINT BIT(10) /* LI1 clear int */
+#define PLX9052_INTCSR_LI2CLRINT BIT(11) /* LI2 clear int */
+#define PLX9052_INTCSR_ISAMODE BIT(12) /* ISA interface mode */
/*
* CNTRL - User I/O, Direct Slave Response, Serial EEPROM, and
* Initialization Control register
*/
#define PLX9052_CNTRL 0x50
-#define PLX9052_CNTRL_WAITO (1 << 0) /* UIO0 or WAITO# select */
-#define PLX9052_CNTRL_UIO0_DIR (1 << 1) /* UIO0 direction */
-#define PLX9052_CNTRL_UIO0_DATA (1 << 2) /* UIO0 data */
-#define PLX9052_CNTRL_LLOCKO (1 << 3) /* UIO1 or LLOCKo# select */
-#define PLX9052_CNTRL_UIO1_DIR (1 << 4) /* UIO1 direction */
-#define PLX9052_CNTRL_UIO1_DATA (1 << 5) /* UIO1 data */
-#define PLX9052_CNTRL_CS2 (1 << 6) /* UIO2 or CS2# select */
-#define PLX9052_CNTRL_UIO2_DIR (1 << 7) /* UIO2 direction */
-#define PLX9052_CNTRL_UIO2_DATA (1 << 8) /* UIO2 data */
-#define PLX9052_CNTRL_CS3 (1 << 9) /* UIO3 or CS3# select */
-#define PLX9052_CNTRL_UIO3_DIR (1 << 10) /* UIO3 direction */
-#define PLX9052_CNTRL_UIO3_DATA (1 << 11) /* UIO3 data */
-#define PLX9052_CNTRL_PCIBAR01 (0 << 12) /* bar 0 (mem) and 1 (I/O) */
-#define PLX9052_CNTRL_PCIBAR0 (1 << 12) /* bar 0 (mem) only */
-#define PLX9052_CNTRL_PCIBAR1 (2 << 12) /* bar 1 (I/O) only */
-#define PLX9052_CNTRL_PCI2_1_FEATURES (1 << 14) /* PCI r2.1 features enabled */
-#define PLX9052_CNTRL_PCI_R_W_FLUSH (1 << 15) /* read w/write flush mode */
-#define PLX9052_CNTRL_PCI_R_NO_FLUSH (1 << 16) /* read no flush mode */
-#define PLX9052_CNTRL_PCI_R_NO_WRITE (1 << 17) /* read no write mode */
-#define PLX9052_CNTRL_PCI_W_RELEASE (1 << 18) /* write release bus mode */
-#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* slave retry clks */
-#define PLX9052_CNTRL_LOCK_ENAB (1 << 23) /* slave LOCK# enable */
+#define PLX9052_CNTRL_WAITO BIT(0) /* UIO0 or WAITO# select */
+#define PLX9052_CNTRL_UIO0_DIR BIT(1) /* UIO0 direction */
+#define PLX9052_CNTRL_UIO0_DATA BIT(2) /* UIO0 data */
+#define PLX9052_CNTRL_LLOCKO BIT(3) /* UIO1 or LLOCKo# select */
+#define PLX9052_CNTRL_UIO1_DIR BIT(4) /* UIO1 direction */
+#define PLX9052_CNTRL_UIO1_DATA BIT(5) /* UIO1 data */
+#define PLX9052_CNTRL_CS2 BIT(6) /* UIO2 or CS2# select */
+#define PLX9052_CNTRL_UIO2_DIR BIT(7) /* UIO2 direction */
+#define PLX9052_CNTRL_UIO2_DATA BIT(8) /* UIO2 data */
+#define PLX9052_CNTRL_CS3 BIT(9) /* UIO3 or CS3# select */
+#define PLX9052_CNTRL_UIO3_DIR BIT(10) /* UIO3 direction */
+#define PLX9052_CNTRL_UIO3_DATA BIT(11) /* UIO3 data */
+#define PLX9052_CNTRL_PCIBAR(x) (((x) & 0x3) << 12)
+#define PLX9052_CNTRL_PCIBAR01 PLX9052_CNTRL_PCIBAR(0) /* mem and IO */
+#define PLX9052_CNTRL_PCIBAR0 PLX9052_CNTRL_PCIBAR(1) /* mem only */
+#define PLX9052_CNTRL_PCIBAR1 PLX9052_CNTRL_PCIBAR(2) /* IO only */
+#define PLX9052_CNTRL_PCI2_1_FEATURES BIT(14) /* PCI v2.1 features enabled */
+#define PLX9052_CNTRL_PCI_R_W_FLUSH BIT(15) /* read w/write flush mode */
+#define PLX9052_CNTRL_PCI_R_NO_FLUSH BIT(16) /* read no flush mode */
+#define PLX9052_CNTRL_PCI_R_NO_WRITE BIT(17) /* read no write mode */
+#define PLX9052_CNTRL_PCI_W_RELEASE BIT(18) /* write release bus mode */
+#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* retry clks */
+#define PLX9052_CNTRL_LOCK_ENAB BIT(23) /* slave LOCK# enable */
#define PLX9052_CNTRL_EEPROM_MASK (0x1f << 24) /* EEPROM bits */
-#define PLX9052_CNTRL_EEPROM_CLK (1 << 24) /* EEPROM clock */
-#define PLX9052_CNTRL_EEPROM_CS (1 << 25) /* EEPROM chip select */
-#define PLX9052_CNTRL_EEPROM_DOUT (1 << 26) /* EEPROM write bit */
-#define PLX9052_CNTRL_EEPROM_DIN (1 << 27) /* EEPROM read bit */
-#define PLX9052_CNTRL_EEPROM_PRESENT (1 << 28) /* EEPROM present */
-#define PLX9052_CNTRL_RELOAD_CFG (1 << 29) /* reload configuration */
-#define PLX9052_CNTRL_PCI_RESET (1 << 30) /* PCI adapter reset */
-#define PLX9052_CNTRL_MASK_REV (1 << 31) /* mask revision */
+#define PLX9052_CNTRL_EEPROM_CLK BIT(24) /* EEPROM clock */
+#define PLX9052_CNTRL_EEPROM_CS BIT(25) /* EEPROM chip select */
+#define PLX9052_CNTRL_EEPROM_DOUT BIT(26) /* EEPROM write bit */
+#define PLX9052_CNTRL_EEPROM_DIN BIT(27) /* EEPROM read bit */
+#define PLX9052_CNTRL_EEPROM_PRESENT BIT(28) /* EEPROM present */
+#define PLX9052_CNTRL_RELOAD_CFG BIT(29) /* reload configuration */
+#define PLX9052_CNTRL_PCI_RESET BIT(30) /* PCI adapter reset */
+#define PLX9052_CNTRL_MASK_REV BIT(31) /* mask revision */
#endif /* _PLX9052_H_ */
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index f5cd6d500..8d1aee00b 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -88,7 +88,7 @@ enum marb_bits {
/* direct slave LLOCKo# enable */
MARB_DS_LLOCK_ENABLE = 0x00400000,
MARB_PCI_REQUEST_MODE = 0x00800000,
- MARB_PCIv21_MODE = 0x01000000, /* pci specification v2.1 mode */
+ MARB_PCIV21_MODE = 0x01000000, /* pci specification v2.1 mode */
MARB_PCI_READ_NO_WRITE_MODE = 0x02000000,
MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000,
/* gate local bus latency timer with BREQ */
diff --git a/drivers/staging/comedi/drivers/z8536.h b/drivers/staging/comedi/drivers/z8536.h
index 7be53109c..47eadbf4d 100644
--- a/drivers/staging/comedi/drivers/z8536.h
+++ b/drivers/staging/comedi/drivers/z8536.h
@@ -24,11 +24,12 @@
#define Z8536_CFG_CTRL_PCE_CT3E BIT(4) /* Port C & C/T 3 Enable */
#define Z8536_CFG_CTRL_PLC BIT(3) /* Port A/B Link Control */
#define Z8536_CFG_CTRL_PAE BIT(2) /* Port A Enable */
-#define Z8536_CFG_CTRL_LC_INDEP (0 << 0)/* C/Ts Independent */
-#define Z8536_CFG_CTRL_LC_GATE (1 << 0)/* C/T 1 Out Gates C/T 2 */
-#define Z8536_CFG_CTRL_LC_TRIG (2 << 0)/* C/T 1 Out Triggers C/T 2 */
-#define Z8536_CFG_CTRL_LC_CLK (3 << 0)/* C/T 1 Out Clocks C/T 2 */
-#define Z8536_CFG_CTRL_LC_MASK (3 << 0)/* C/T Link Control mask */
+#define Z8536_CFG_CTRL_LC(x) (((x) & 0x3) << 0) /* Link Control */
+#define Z8536_CFG_CTRL_LC_INDEP Z8536_CFG_CTRL_LC(0)/* Independent */
+#define Z8536_CFG_CTRL_LC_GATE Z8536_CFG_CTRL_LC(1)/* 1 Gates 2 */
+#define Z8536_CFG_CTRL_LC_TRIG Z8536_CFG_CTRL_LC(2)/* 1 Triggers 2 */
+#define Z8536_CFG_CTRL_LC_CLK Z8536_CFG_CTRL_LC(3)/* 1 Clocks 2 */
+#define Z8536_CFG_CTRL_LC_MASK Z8536_CFG_CTRL_LC(3)
/* Interrupt Vector registers */
#define Z8536_PA_INT_VECT_REG 0x02
@@ -43,15 +44,16 @@
#define Z8536_CT2_CMDSTAT_REG 0x0b
#define Z8536_CT3_CMDSTAT_REG 0x0c
#define Z8536_CT_CMDSTAT_REG(x) (0x0a + (x))
-#define Z8536_CMD_NULL (0 << 5)/* Null Code */
-#define Z8536_CMD_CLR_IP_IUS (1 << 5)/* Clear IP & IUS */
-#define Z8536_CMD_SET_IUS (2 << 5)/* Set IUS */
-#define Z8536_CMD_CLR_IUS (3 << 5)/* Clear IUS */
-#define Z8536_CMD_SET_IP (4 << 5)/* Set IP */
-#define Z8536_CMD_CLR_IP (5 << 5)/* Clear IP */
-#define Z8536_CMD_SET_IE (6 << 5)/* Set IE */
-#define Z8536_CMD_CLR_IE (7 << 5)/* Clear IE */
-#define Z8536_CMD_MASK (7 << 5)
+#define Z8536_CMD(x) (((x) & 0x7) << 5)
+#define Z8536_CMD_NULL Z8536_CMD(0) /* Null Code */
+#define Z8536_CMD_CLR_IP_IUS Z8536_CMD(1) /* Clear IP & IUS */
+#define Z8536_CMD_SET_IUS Z8536_CMD(2) /* Set IUS */
+#define Z8536_CMD_CLR_IUS Z8536_CMD(3) /* Clear IUS */
+#define Z8536_CMD_SET_IP Z8536_CMD(4) /* Set IP */
+#define Z8536_CMD_CLR_IP Z8536_CMD(5) /* Clear IP */
+#define Z8536_CMD_SET_IE Z8536_CMD(6) /* Set IE */
+#define Z8536_CMD_CLR_IE Z8536_CMD(7) /* Clear IE */
+#define Z8536_CMD_MASK Z8536_CMD(7)
#define Z8536_STAT_IUS BIT(7) /* Interrupt Under Service */
#define Z8536_STAT_IE BIT(6) /* Interrupt Enable */
@@ -105,46 +107,51 @@
#define Z8536_CT_MODE_ETE BIT(4) /* External Trigger Enable */
#define Z8536_CT_MODE_EGE BIT(3) /* External Gate Enable */
#define Z8536_CT_MODE_REB BIT(2) /* Retrigger Enable Bit */
-#define Z8536_CT_MODE_DCS_PULSE (0 << 0)/* Duty Cycle - Pulse */
-#define Z8536_CT_MODE_DCS_ONESHOT (1 << 0)/* Duty Cycle - One-Shot */
-#define Z8536_CT_MODE_DCS_SQRWAVE (2 << 0)/* Duty Cycle - Square Wave */
-#define Z8536_CT_MODE_DCS_DO_NOT_USE (3 << 0)/* Duty Cycle - Do Not Use */
-#define Z8536_CT_MODE_DCS_MASK (3 << 0)/* Duty Cycle mask */
+#define Z8536_CT_MODE_DCS(x) (((x) & 0x3) << 0) /* Duty Cycle */
+#define Z8536_CT_MODE_DCS_PULSE Z8536_CT_MODE_DCS(0) /* Pulse */
+#define Z8536_CT_MODE_DCS_ONESHOT Z8536_CT_MODE_DCS(1) /* One-Shot */
+#define Z8536_CT_MODE_DCS_SQRWAVE Z8536_CT_MODE_DCS(2) /* Square Wave */
+#define Z8536_CT_MODE_DCS_DO_NOT_USE Z8536_CT_MODE_DCS(3) /* Do Not Use */
+#define Z8536_CT_MODE_DCS_MASK Z8536_CT_MODE_DCS(3)
/* Port A/B Mode Specification registers */
#define Z8536_PA_MODE_REG 0x20
#define Z8536_PB_MODE_REG 0x28
-#define Z8536_PAB_MODE_PTS_BIT (0 << 6)/* Bit Port */
-#define Z8536_PAB_MODE_PTS_INPUT (1 << 6)/* Input Port */
-#define Z8536_PAB_MODE_PTS_OUTPUT (2 << 6)/* Output Port */
-#define Z8536_PAB_MODE_PTS_BIDIR (3 << 6)/* Bidirectional Port */
-#define Z8536_PAB_MODE_PTS_MASK (3 << 6)/* Port Type Select mask */
+#define Z8536_PAB_MODE_PTS(x) (((x) & 0x3) << 6) /* Port type */
+#define Z8536_PAB_MODE_PTS_BIT Z8536_PAB_MODE_PTS(0 << 6)/* Bit */
+#define Z8536_PAB_MODE_PTS_INPUT Z8536_PAB_MODE_PTS(1 << 6)/* Input */
+#define Z8536_PAB_MODE_PTS_OUTPUT Z8536_PAB_MODE_PTS(2 << 6)/* Output */
+#define Z8536_PAB_MODE_PTS_BIDIR Z8536_PAB_MODE_PTS(3 << 6)/* Bidir */
+#define Z8536_PAB_MODE_PTS_MASK Z8536_PAB_MODE_PTS(3 << 6)
#define Z8536_PAB_MODE_ITB BIT(5) /* Interrupt on Two Bytes */
#define Z8536_PAB_MODE_SB BIT(4) /* Single Buffered mode */
#define Z8536_PAB_MODE_IMO BIT(3) /* Interrupt on Match Only */
-#define Z8536_PAB_MODE_PMS_DISABLE (0 << 1)/* Disable Pattern Match */
-#define Z8536_PAB_MODE_PMS_AND (1 << 1)/* "AND" mode */
-#define Z8536_PAB_MODE_PMS_OR (2 << 1)/* "OR" mode */
-#define Z8536_PAB_MODE_PMS_OR_PEV (3 << 1)/* "OR-Priority" mode */
-#define Z8536_PAB_MODE_PMS_MASK (3 << 1)/* Pattern Mode mask */
+#define Z8536_PAB_MODE_PMS(x) (((x) & 0x3) << 1) /* Pattern Mode */
+#define Z8536_PAB_MODE_PMS_DISABLE Z8536_PAB_MODE_PMS(0)/* Disabled */
+#define Z8536_PAB_MODE_PMS_AND Z8536_PAB_MODE_PMS(1)/* "AND" */
+#define Z8536_PAB_MODE_PMS_OR Z8536_PAB_MODE_PMS(2)/* "OR" */
+#define Z8536_PAB_MODE_PMS_OR_PEV Z8536_PAB_MODE_PMS(3)/* "OR-Priority" */
+#define Z8536_PAB_MODE_PMS_MASK Z8536_PAB_MODE_PMS(3)
#define Z8536_PAB_MODE_LPM BIT(0) /* Latch on Pattern Match */
#define Z8536_PAB_MODE_DTE BIT(0) /* Deskew Timer Enabled */
/* Port A/B Handshake Specification registers */
#define Z8536_PA_HANDSHAKE_REG 0x21
#define Z8536_PB_HANDSHAKE_REG 0x29
-#define Z8536_PAB_HANDSHAKE_HST_INTER (0 << 6)/* Interlocked Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_STROBED (1 << 6)/* Strobed Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_PULSED (2 << 6)/* Pulsed Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_3WIRE (3 << 6)/* Three-Wire Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_MASK (3 << 6)/* Handshake Type mask */
-#define Z8536_PAB_HANDSHAKE_RWS_DISABLE (0 << 3)/* Req/Wait Disabled */
-#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT (1 << 3)/* Output Wait */
-#define Z8536_PAB_HANDSHAKE_RWS_INWAIT (3 << 3)/* Input Wait */
-#define Z8536_PAB_HANDSHAKE_RWS_SPREQ (4 << 3)/* Special Request */
-#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ (5 << 4)/* Output Request */
-#define Z8536_PAB_HANDSHAKE_RWS_INREQ (7 << 3)/* Input Request */
-#define Z8536_PAB_HANDSHAKE_RWS_MASK (7 << 3)/* Req/Wait mask */
+#define Z8536_PAB_HANDSHAKE_HST(x) (((x) & 0x3) << 6) /* Handshake Type */
+#define Z8536_PAB_HANDSHAKE_HST_INTER Z8536_PAB_HANDSHAKE_HST(0)/*Interlock*/
+#define Z8536_PAB_HANDSHAKE_HST_STROBED Z8536_PAB_HANDSHAKE_HST(1)/* Strobed */
+#define Z8536_PAB_HANDSHAKE_HST_PULSED Z8536_PAB_HANDSHAKE_HST(2)/* Pulsed */
+#define Z8536_PAB_HANDSHAKE_HST_3WIRE Z8536_PAB_HANDSHAKE_HST(3)/* 3-Wire */
+#define Z8536_PAB_HANDSHAKE_HST_MASK Z8536_PAB_HANDSHAKE_HST(3)
+#define Z8536_PAB_HANDSHAKE_RWS(x) (((x) & 0x7) << 3) /* Req/Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_DISABLE Z8536_PAB_HANDSHAKE_RWS(0)/* Disabled */
+#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT Z8536_PAB_HANDSHAKE_RWS(1)/* Out Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_INWAIT Z8536_PAB_HANDSHAKE_RWS(3)/* In Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_SPREQ Z8536_PAB_HANDSHAKE_RWS(4)/* Special */
+#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ Z8536_PAB_HANDSHAKE_RWS(5)/* Out Req */
+#define Z8536_PAB_HANDSHAKE_RWS_INREQ Z8536_PAB_HANDSHAKE_RWS(7)/* In Req */
+#define Z8536_PAB_HANDSHAKE_RWS_MASK Z8536_PAB_HANDSHAKE_RWS(7)
#define Z8536_PAB_HANDSHAKE_DESKEW(x) ((x) << 0)/* Deskew Time */
#define Z8536_PAB_HANDSHAKE_DESKEW_MASK (3 << 0)/* Deskew Time mask */
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 0ff3139e5..46c050cc7 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -1168,7 +1168,7 @@ static void cls_uart_init(struct channel_t *ch)
/* Clear out UART and FIFO */
readb(&ch->ch_cls_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT),
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
&ch->ch_cls_uart->isr_fcr);
udelay(10);
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 4eb410e09..af2e835ef 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -48,7 +48,7 @@ static void dgnc_do_remap(struct dgnc_board *brd);
/*
* File operations permitted on Control/Management major.
*/
-static const struct file_operations dgnc_BoardFops = {
+static const struct file_operations dgnc_board_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dgnc_mgmt_ioctl,
.open = dgnc_mgmt_open,
@@ -58,11 +58,11 @@ static const struct file_operations dgnc_BoardFops = {
/*
* Globals
*/
-uint dgnc_NumBoards;
-struct dgnc_board *dgnc_Board[MAXBOARDS];
+uint dgnc_num_boards;
+struct dgnc_board *dgnc_board[MAXBOARDS];
DEFINE_SPINLOCK(dgnc_global_lock);
DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
-uint dgnc_Major;
+uint dgnc_major;
int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
/*
@@ -92,7 +92,7 @@ struct board_id {
unsigned int is_pci_express;
};
-static struct board_id dgnc_Ids[] = {
+static struct board_id dgnc_ids[] = {
{ PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
@@ -140,14 +140,14 @@ static void cleanup(bool sysfiles)
if (sysfiles)
dgnc_remove_driver_sysfiles(&dgnc_driver);
- device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+ device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
class_destroy(dgnc_class);
- unregister_chrdev(dgnc_Major, "dgnc");
+ unregister_chrdev(dgnc_major, "dgnc");
- for (i = 0; i < dgnc_NumBoards; ++i) {
- dgnc_remove_ports_sysfiles(dgnc_Board[i]);
- dgnc_tty_uninit(dgnc_Board[i]);
- dgnc_cleanup_board(dgnc_Board[i]);
+ for (i = 0; i < dgnc_num_boards; ++i) {
+ dgnc_remove_ports_sysfiles(dgnc_board[i]);
+ dgnc_tty_uninit(dgnc_board[i]);
+ dgnc_cleanup_board(dgnc_board[i]);
}
dgnc_tty_post_uninit();
@@ -217,12 +217,12 @@ static int dgnc_start(void)
*
* Register management/dpa devices
*/
- rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
+ rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
if (rc < 0) {
pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
return rc;
}
- dgnc_Major = rc;
+ dgnc_major = rc;
dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
if (IS_ERR(dgnc_class)) {
@@ -232,7 +232,7 @@ static int dgnc_start(void)
}
dev = device_create(dgnc_class, NULL,
- MKDEV(dgnc_Major, 0),
+ MKDEV(dgnc_major, 0),
NULL, "dgnc_mgmt");
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
@@ -262,11 +262,11 @@ static int dgnc_start(void)
return 0;
failed_tty:
- device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+ device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
failed_device:
class_destroy(dgnc_class);
failed_class:
- unregister_chrdev(dgnc_Major, "dgnc");
+ unregister_chrdev(dgnc_major, "dgnc");
return rc;
}
@@ -283,7 +283,7 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = dgnc_found_board(pdev, ent->driver_data);
if (rc == 0)
- dgnc_NumBoards++;
+ dgnc_num_boards++;
return rc;
}
@@ -346,7 +346,7 @@ static void dgnc_cleanup_board(struct dgnc_board *brd)
}
}
- dgnc_Board[brd->boardnum] = NULL;
+ dgnc_board[brd->boardnum] = NULL;
kfree(brd);
}
@@ -365,8 +365,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
unsigned long flags;
/* get the board structure and prep it */
- dgnc_Board[dgnc_NumBoards] = kzalloc(sizeof(*brd), GFP_KERNEL);
- brd = dgnc_Board[dgnc_NumBoards];
+ dgnc_board[dgnc_num_boards] = kzalloc(sizeof(*brd), GFP_KERNEL);
+ brd = dgnc_board[dgnc_num_boards];
if (!brd)
return -ENOMEM;
@@ -382,15 +382,15 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
/* store the info for the board we've found */
brd->magic = DGNC_BOARD_MAGIC;
- brd->boardnum = dgnc_NumBoards;
+ brd->boardnum = dgnc_num_boards;
brd->vendor = dgnc_pci_tbl[id].vendor;
brd->device = dgnc_pci_tbl[id].device;
brd->pdev = pdev;
brd->pci_bus = pdev->bus->number;
brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgnc_Ids[id].name;
- brd->maxports = dgnc_Ids[id].maxports;
- if (dgnc_Ids[i].is_pci_express)
+ brd->name = dgnc_ids[id].name;
+ brd->maxports = dgnc_ids[id].maxports;
+ if (dgnc_ids[i].is_pci_express)
brd->bd_flags |= BD_IS_PCI_EXPRESS;
brd->dpastatus = BD_NOFEP;
init_waitqueue_head(&brd->state_wait);
@@ -642,8 +642,8 @@ static void dgnc_poll_handler(ulong dummy)
unsigned long new_time;
/* Go thru each board, kicking off a tasklet for each if needed */
- for (i = 0; i < dgnc_NumBoards; i++) {
- brd = dgnc_Board[i];
+ for (i = 0; i < dgnc_num_boards; i++) {
+ brd = dgnc_board[i];
spin_lock_irqsave(&brd->bd_lock, flags);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index e4be81b66..95ec729fa 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -202,18 +202,13 @@ struct dgnc_board {
* to our channels.
*/
- struct tty_driver SerialDriver;
- char SerialName[200];
- struct tty_driver PrintDriver;
- char PrintName[200];
+ struct tty_driver *serial_driver;
+ char serial_name[200];
+ struct tty_driver *print_driver;
+ char print_name[200];
- bool dgnc_Major_Serial_Registered;
- bool dgnc_Major_TransparentPrint_Registered;
-
- uint dgnc_Serial_Major;
- uint dgnc_TransparentPrint_Major;
-
- uint TtyRefCnt;
+ bool dgnc_major_serial_registered;
+ bool dgnc_major_transparent_print_registered;
u16 dpatype; /* The board "type",
* as defined by DPA
@@ -399,12 +394,12 @@ struct channel_t {
/*
* Our Global Variables.
*/
-extern uint dgnc_Major; /* Our driver/mgmt major */
+extern uint dgnc_major; /* Our driver/mgmt major */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
-extern uint dgnc_NumBoards; /* Total number of boards */
-extern struct dgnc_board *dgnc_Board[MAXBOARDS]; /* Array of board
+extern uint dgnc_num_boards; /* Total number of boards */
+extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board
* structs
*/
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index ba29a8d91..683c09839 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -111,7 +111,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
spin_lock_irqsave(&dgnc_global_lock, flags);
memset(&ddi, 0, sizeof(ddi));
- ddi.dinfo_nboards = dgnc_NumBoards;
+ ddi.dinfo_nboards = dgnc_num_boards;
sprintf(ddi.dinfo_version, "%s", DG_PART);
spin_unlock_irqrestore(&dgnc_global_lock, flags);
@@ -131,27 +131,27 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&brd, uarg, sizeof(int)))
return -EFAULT;
- if (brd < 0 || brd >= dgnc_NumBoards)
+ if (brd < 0 || brd >= dgnc_num_boards)
return -ENODEV;
memset(&di, 0, sizeof(di));
di.info_bdnum = brd;
- spin_lock_irqsave(&dgnc_Board[brd]->bd_lock, flags);
+ spin_lock_irqsave(&dgnc_board[brd]->bd_lock, flags);
- di.info_bdtype = dgnc_Board[brd]->dpatype;
- di.info_bdstate = dgnc_Board[brd]->dpastatus;
+ di.info_bdtype = dgnc_board[brd]->dpatype;
+ di.info_bdstate = dgnc_board[brd]->dpastatus;
di.info_ioport = 0;
- di.info_physaddr = (ulong)dgnc_Board[brd]->membase;
- di.info_physsize = (ulong)dgnc_Board[brd]->membase
- - dgnc_Board[brd]->membase_end;
- if (dgnc_Board[brd]->state != BOARD_FAILED)
- di.info_nports = dgnc_Board[brd]->nasync;
+ di.info_physaddr = (ulong)dgnc_board[brd]->membase;
+ di.info_physsize = (ulong)dgnc_board[brd]->membase
+ - dgnc_board[brd]->membase_end;
+ if (dgnc_board[brd]->state != BOARD_FAILED)
+ di.info_nports = dgnc_board[brd]->nasync;
else
di.info_nports = 0;
- spin_unlock_irqrestore(&dgnc_Board[brd]->bd_lock, flags);
+ spin_unlock_irqrestore(&dgnc_board[brd]->bd_lock, flags);
if (copy_to_user(uarg, &di, sizeof(di)))
return -EFAULT;
@@ -174,14 +174,14 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
channel = ni.channel;
/* Verify boundaries on board */
- if (board >= dgnc_NumBoards)
+ if (board >= dgnc_num_boards)
return -ENODEV;
/* Verify boundaries on channel */
- if (channel >= dgnc_Board[board]->nasync)
+ if (channel >= dgnc_board[board]->nasync)
return -ENODEV;
- ch = dgnc_Board[board]->channels[channel];
+ ch = dgnc_board[board]->channels[channel];
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return -ENODEV;
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 31ac437cb..ba57e9546 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -77,8 +77,6 @@ struct board_ops dgnc_neo_ops = {
.send_immediate_char = neo_send_immediate_char
};
-static uint dgnc_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-
/*
* This function allows calls to ensure that all outstanding
* PCI writes have been completed, by doing a PCI read against
@@ -116,7 +114,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
+ &ch->ch_neo_uart->fctr);
/* Feed the UART our trigger levels */
writeb(8, &ch->ch_neo_uart->tfifo);
@@ -150,7 +149,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
/* Turn on UART enhanced bits */
writeb(efr, &ch->ch_neo_uart->efr);
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 4;
writeb(32, &ch->ch_neo_uart->rfifo);
@@ -187,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
/* Turn on UART enhanced bits */
writeb(efr, &ch->ch_neo_uart->efr);
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 4;
writeb(32, &ch->ch_neo_uart->rfifo);
@@ -225,7 +226,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
writeb(8, &ch->ch_neo_uart->tfifo);
ch->ch_t_tlevel = 8;
@@ -265,7 +267,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 0;
@@ -302,7 +305,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 0;
@@ -321,7 +325,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
{
/* if hardware flow control is set, then skip this whole thing */
- if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) || ch->ch_c_cflag & CRTSCTS)
+ if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) ||
+ ch->ch_c_cflag & CRTSCTS)
return;
/* Tell UART what start/stop chars it should be looking for */
@@ -351,8 +356,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
/* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
- if (time_after_eq(jiffies, ch->ch_stop_sending_break)
- || force) {
+ if (force ||
+ time_after_eq(jiffies, ch->ch_stop_sending_break)) {
unsigned char temp = readb(&ch->ch_neo_uart->lcr);
writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
@@ -374,14 +379,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
unsigned char cause;
unsigned long flags;
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- if (port >= brd->maxports)
- return;
-
ch = brd->channels[port];
- if (ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
/* Here we try to figure out what caused the interrupt to happen */
@@ -393,7 +392,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
break;
/*
- * Yank off the upper 2 bits, which just show that the FIFO's are enabled.
+ * Yank off the upper 2 bits,
+ * which just show that the FIFO's are enabled.
*/
isr &= ~(UART_17158_IIR_FIFO_ENABLED);
@@ -666,7 +666,8 @@ static void neo_param(struct tty_struct *tty)
};
/* Only use the TXPrint baud rate if the terminal unit is NOT open */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ (un->un_type == DGNC_PRINT))
baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
else
baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
@@ -679,7 +680,8 @@ static void neo_param(struct tty_struct *tty)
jindex = baud;
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16))
+ if ((iindex >= 0) && (iindex < 4) &&
+ (jindex >= 0) && (jindex < 16))
baud = bauds[iindex][jindex];
else
baud = 0;
@@ -787,7 +789,8 @@ static void neo_param(struct tty_struct *tty)
neo_set_cts_flow_control(ch);
} else if (ch->ch_c_iflag & IXON) {
/* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_output_flow_control(ch);
else
neo_set_ixon_flow_control(ch);
@@ -799,7 +802,8 @@ static void neo_param(struct tty_struct *tty)
neo_set_rts_flow_control(ch);
} else if (ch->ch_c_iflag & IXOFF) {
/* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_input_flow_control(ch);
else
neo_set_ixoff_flow_control(ch);
@@ -910,9 +914,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
struct dgnc_board *brd = voidbrd;
struct channel_t *ch;
int port = 0;
- int type = 0;
- int current_port;
- u32 tmp;
+ int type;
u32 uart_poll;
unsigned long flags;
unsigned long flags2;
@@ -947,29 +949,12 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
/* At this point, we have at least SOMETHING to service, dig further... */
- current_port = 0;
-
/* Loop on each port */
while ((uart_poll & 0xff) != 0) {
- tmp = uart_poll;
-
- /* Check current port to see if it has interrupt pending */
- if ((tmp & dgnc_offset_table[current_port]) != 0) {
- port = current_port;
- type = tmp >> (8 + (port * 3));
- type &= 0x7;
- } else {
- current_port++;
- continue;
- }
+ type = uart_poll >> (8 + (port * 3));
+ type &= 0x7;
- /* Remove this port + type from uart_poll */
- uart_poll &= ~(dgnc_offset_table[port]);
-
- if (!type) {
- /* If no type, just ignore it, and move onto next port */
- continue;
- }
+ uart_poll &= ~(0x01 << port);
/* Switch on type of interrupt we have */
switch (type) {
@@ -981,7 +966,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
/* Verify the port is in range. */
if (port >= brd->nasync)
- continue;
+ break;
ch = brd->channels[port];
neo_copy_data_from_uart_to_queue(ch);
@@ -991,14 +976,14 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
dgnc_check_queue_flow_control(ch);
spin_unlock_irqrestore(&ch->ch_lock, flags2);
- continue;
+ break;
case UART_17158_RX_LINE_STATUS:
/*
* RXRDY and RX LINE Status (logic OR of LSR[4:1])
*/
neo_parse_lsr(brd, port);
- continue;
+ break;
case UART_17158_TXRDY:
/*
@@ -1014,14 +999,14 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
* it should be, I was getting things like RXDY too. Weird.
*/
neo_parse_isr(brd, port);
- continue;
+ break;
case UART_17158_MSR:
/*
* MSR or flow control was seen.
*/
neo_parse_isr(brd, port);
- continue;
+ break;
default:
/*
@@ -1030,8 +1015,10 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
* these once and awhile.
* Its harmless, just ignore it and move on.
*/
- continue;
+ break;
}
+
+ port++;
}
/*
@@ -1172,7 +1159,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
linestatus = 0;
/* Copy data from uart to the queue */
- memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
+ memcpy_fromio(ch->ch_rqueue + head,
+ &ch->ch_neo_uart->txrxburst, n);
/*
* Since RX_FIFO_DATA_ERROR was 0, we are guaranteed
@@ -1225,7 +1213,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
* we don't miss our TX FIFO emptys.
*/
if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
- linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR);
+ linestatus &= ~(UART_LSR_THRE |
+ UART_17158_TX_AND_FIFO_CLR);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
}
@@ -1255,7 +1244,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
qleft++;
}
- memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
+ memcpy_fromio(ch->ch_rqueue + head,
+ &ch->ch_neo_uart->txrxburst, 1);
ch->ch_equeue[head] = (unsigned char)linestatus;
/* Ditch any remaining linestatus value. */
@@ -1328,7 +1318,8 @@ static void neo_flush_uart_write(struct channel_t *ch)
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
+ &ch->ch_neo_uart->isr_fcr);
neo_pci_posting_flush(ch->ch_bd);
for (i = 0; i < 10; i++) {
@@ -1356,7 +1347,8 @@ static void neo_flush_uart_read(struct channel_t *ch)
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr);
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
+ &ch->ch_neo_uart->isr_fcr);
neo_pci_posting_flush(ch->ch_bd);
for (i = 0; i < 10; i++) {
@@ -1427,7 +1419,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
- writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
+ writeb(ch->ch_wqueue[ch->ch_w_tail],
+ &ch->ch_neo_uart->txrx);
ch->ch_w_tail++;
ch->ch_w_tail &= WQUEUEMASK;
ch->ch_txcount++;
@@ -1494,7 +1487,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
- memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
+ memcpy_toio(&ch->ch_neo_uart->txrxburst,
+ ch->ch_wqueue + tail, s);
/* Add and flip queue if needed */
tail = (tail + s) & WQUEUEMASK;
@@ -1628,7 +1622,8 @@ static void neo_uart_init(struct channel_t *ch)
/* Clear out UART and FIFO */
readb(&ch->ch_neo_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ &ch->ch_neo_uart->isr_fcr);
readb(&ch->ch_neo_uart->lsr);
readb(&ch->ch_neo_uart->msr);
@@ -1725,7 +1720,8 @@ static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
neo_pci_posting_flush(ch->ch_bd);
}
-static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int address)
+static unsigned int neo_read_eeprom(unsigned char __iomem *base,
+ unsigned int address)
{
unsigned int enable;
unsigned int bits;
@@ -1783,10 +1779,15 @@ static void neo_vpd(struct dgnc_board *brd)
brd->vpd[(i * 2) + 1] = (a >> 8) & 0xff;
}
- if (((brd->vpd[0x08] != 0x82) /* long resource name tag */
- && (brd->vpd[0x10] != 0x82)) /* long resource name tag (PCI-66 files)*/
- || (brd->vpd[0x7F] != 0x78)) { /* small resource end tag */
-
+ /*
+ * brd->vpd has different name tags by below index.
+ * 0x08 : long resource name tag
+ * 0x10 : long resource name tage (PCI-66 files)
+ * 0x7F : small resource end tag
+ */
+ if (((brd->vpd[0x08] != 0x82) &&
+ (brd->vpd[0x10] != 0x82)) ||
+ (brd->vpd[0x7F] != 0x78)) {
memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE);
} else {
/* Search for the serial number */
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
index 74a072599..b8d41c561 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -33,7 +33,7 @@ static DRIVER_ATTR(version, S_IRUSR, dgnc_driver_version_show, NULL);
static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_NumBoards);
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
}
static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL);
@@ -189,19 +189,21 @@ static ssize_t dgnc_ports_msignals_show(struct device *p,
DGNC_VERIFY_BOARD(p, bd);
for (i = 0; i < bd->nasync; i++) {
- if (bd->channels[i]->ch_open_count) {
+ struct channel_t *ch = bd->channels[i];
+
+ if (ch->ch_open_count) {
count += snprintf(buf + count, PAGE_SIZE - count,
"%d %s %s %s %s %s %s\n",
- bd->channels[i]->ch_portnum,
- (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ ch->ch_portnum,
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
} else {
count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", bd->channels[i]->ch_portnum);
+ "%d\n", ch->ch_portnum);
}
}
return count;
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index bcd2bdfb9..4eeecc992 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -176,57 +176,42 @@ int dgnc_tty_preinit(void)
*/
int dgnc_tty_register(struct dgnc_board *brd)
{
- int rc = 0;
-
- brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
+ int rc;
- snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
+ brd->serial_driver = tty_alloc_driver(brd->maxports,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
- brd->SerialDriver.name = brd->SerialName;
- brd->SerialDriver.name_base = 0;
- brd->SerialDriver.major = 0;
- brd->SerialDriver.minor_start = 0;
- brd->SerialDriver.num = brd->maxports;
- brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL;
- brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL;
- brd->SerialDriver.init_termios = DgncDefaultTermios;
- brd->SerialDriver.driver_name = DRVSTR;
- brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
+ if (IS_ERR(brd->serial_driver))
+ return PTR_ERR(brd->serial_driver);
- /*
- * The kernel wants space to store pointers to
- * tty_struct's and termios's.
- */
- brd->SerialDriver.ttys = kcalloc(brd->maxports,
- sizeof(*brd->SerialDriver.ttys),
- GFP_KERNEL);
- if (!brd->SerialDriver.ttys)
- return -ENOMEM;
+ snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
- kref_init(&brd->SerialDriver.kref);
- brd->SerialDriver.termios = kcalloc(brd->maxports,
- sizeof(*brd->SerialDriver.termios),
- GFP_KERNEL);
- if (!brd->SerialDriver.termios)
- return -ENOMEM;
+ brd->serial_driver->name = brd->serial_name;
+ brd->serial_driver->name_base = 0;
+ brd->serial_driver->major = 0;
+ brd->serial_driver->minor_start = 0;
+ brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->serial_driver->init_termios = DgncDefaultTermios;
+ brd->serial_driver->driver_name = DRVSTR;
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
*/
- tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops);
+ tty_set_operations(brd->serial_driver, &dgnc_tty_ops);
- if (!brd->dgnc_Major_Serial_Registered) {
+ if (!brd->dgnc_major_serial_registered) {
/* Register tty devices */
- rc = tty_register_driver(&brd->SerialDriver);
+ rc = tty_register_driver(brd->serial_driver);
if (rc < 0) {
dev_dbg(&brd->pdev->dev,
"Can't register tty device (%d)\n", rc);
- return rc;
+ goto free_serial_driver;
}
- brd->dgnc_Major_Serial_Registered = true;
+ brd->dgnc_major_serial_registered = true;
}
/*
@@ -234,60 +219,55 @@ int dgnc_tty_register(struct dgnc_board *brd)
* again, separately so we don't get the LD confused about what major
* we are when we get into the dgnc_tty_open() routine.
*/
- brd->PrintDriver.magic = TTY_DRIVER_MAGIC;
- snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
-
- brd->PrintDriver.name = brd->PrintName;
- brd->PrintDriver.name_base = 0;
- brd->PrintDriver.major = brd->SerialDriver.major;
- brd->PrintDriver.minor_start = 0x80;
- brd->PrintDriver.num = brd->maxports;
- brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL;
- brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL;
- brd->PrintDriver.init_termios = DgncDefaultTermios;
- brd->PrintDriver.driver_name = DRVSTR;
- brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
+ brd->print_driver = tty_alloc_driver(brd->maxports,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+
+ if (IS_ERR(brd->print_driver)) {
+ rc = PTR_ERR(brd->print_driver);
+ goto unregister_serial_driver;
+ }
- /*
- * The kernel wants space to store pointers to
- * tty_struct's and termios's. Must be separated from
- * the Serial Driver so we don't get confused
- */
- brd->PrintDriver.ttys = kcalloc(brd->maxports,
- sizeof(*brd->PrintDriver.ttys),
- GFP_KERNEL);
- if (!brd->PrintDriver.ttys)
- return -ENOMEM;
- kref_init(&brd->PrintDriver.kref);
- brd->PrintDriver.termios = kcalloc(brd->maxports,
- sizeof(*brd->PrintDriver.termios),
- GFP_KERNEL);
- if (!brd->PrintDriver.termios)
- return -ENOMEM;
+ snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
+
+ brd->print_driver->name = brd->print_name;
+ brd->print_driver->name_base = 0;
+ brd->print_driver->major = brd->serial_driver->major;
+ brd->print_driver->minor_start = 0x80;
+ brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->print_driver->init_termios = DgncDefaultTermios;
+ brd->print_driver->driver_name = DRVSTR;
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
*/
- tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops);
+ tty_set_operations(brd->print_driver, &dgnc_tty_ops);
- if (!brd->dgnc_Major_TransparentPrint_Registered) {
+ if (!brd->dgnc_major_transparent_print_registered) {
/* Register Transparent Print devices */
- rc = tty_register_driver(&brd->PrintDriver);
+ rc = tty_register_driver(brd->print_driver);
if (rc < 0) {
dev_dbg(&brd->pdev->dev,
"Can't register Transparent Print device(%d)\n",
rc);
- return rc;
+ goto free_print_driver;
}
- brd->dgnc_Major_TransparentPrint_Registered = true;
+ brd->dgnc_major_transparent_print_registered = true;
}
- dgnc_BoardsByMajor[brd->SerialDriver.major] = brd;
- brd->dgnc_Serial_Major = brd->SerialDriver.major;
- brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major;
+ dgnc_BoardsByMajor[brd->serial_driver->major] = brd;
+
+ return 0;
+
+free_print_driver:
+ put_tty_driver(brd->print_driver);
+unregister_serial_driver:
+ tty_unregister_driver(brd->serial_driver);
+free_serial_driver:
+ put_tty_driver(brd->serial_driver);
return rc;
}
@@ -364,12 +344,12 @@ int dgnc_tty_init(struct dgnc_board *brd)
{
struct device *classp;
- classp = tty_register_device(&brd->SerialDriver, i,
+ classp = tty_register_device(brd->serial_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_tun.un_sysfs = classp;
dgnc_create_tty_sysfs(&ch->ch_tun, classp);
- classp = tty_register_device(&brd->PrintDriver, i,
+ classp = tty_register_device(brd->print_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_pun.un_sysfs = classp;
dgnc_create_tty_sysfs(&ch->ch_pun, classp);
@@ -407,40 +387,32 @@ void dgnc_tty_uninit(struct dgnc_board *brd)
{
int i = 0;
- if (brd->dgnc_Major_Serial_Registered) {
- dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL;
- brd->dgnc_Serial_Major = 0;
+ if (brd->dgnc_major_serial_registered) {
+ dgnc_BoardsByMajor[brd->serial_driver->major] = NULL;
for (i = 0; i < brd->nasync; i++) {
if (brd->channels[i])
dgnc_remove_tty_sysfs(brd->channels[i]->
ch_tun.un_sysfs);
- tty_unregister_device(&brd->SerialDriver, i);
+ tty_unregister_device(brd->serial_driver, i);
}
- tty_unregister_driver(&brd->SerialDriver);
- brd->dgnc_Major_Serial_Registered = false;
+ tty_unregister_driver(brd->serial_driver);
+ brd->dgnc_major_serial_registered = false;
}
- if (brd->dgnc_Major_TransparentPrint_Registered) {
- dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL;
- brd->dgnc_TransparentPrint_Major = 0;
+ if (brd->dgnc_major_transparent_print_registered) {
+ dgnc_BoardsByMajor[brd->print_driver->major] = NULL;
for (i = 0; i < brd->nasync; i++) {
if (brd->channels[i])
dgnc_remove_tty_sysfs(brd->channels[i]->
ch_pun.un_sysfs);
- tty_unregister_device(&brd->PrintDriver, i);
+ tty_unregister_device(brd->print_driver, i);
}
- tty_unregister_driver(&brd->PrintDriver);
- brd->dgnc_Major_TransparentPrint_Registered = false;
+ tty_unregister_driver(brd->print_driver);
+ brd->dgnc_major_transparent_print_registered = false;
}
- kfree(brd->SerialDriver.ttys);
- brd->SerialDriver.ttys = NULL;
- kfree(brd->SerialDriver.termios);
- brd->SerialDriver.termios = NULL;
- kfree(brd->PrintDriver.ttys);
- brd->PrintDriver.ttys = NULL;
- kfree(brd->PrintDriver.termios);
- brd->PrintDriver.termios = NULL;
+ put_tty_driver(brd->serial_driver);
+ put_tty_driver(brd->print_driver);
}
/*
@@ -606,6 +578,8 @@ void dgnc_input(struct channel_t *ch)
* or the amount of data the card actually has pending...
*/
while (n) {
+ unsigned char *ch_pos = ch->ch_equeue + tail;
+
s = ((head >= tail) ? head : RQUEUESIZE) - tail;
s = min(s, n);
@@ -620,29 +594,20 @@ void dgnc_input(struct channel_t *ch)
*/
if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
for (i = 0; i < s; i++) {
- if (*(ch->ch_equeue + tail + i) & UART_LSR_BI)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_BREAK);
- else if (*(ch->ch_equeue + tail + i) &
- UART_LSR_PE)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_PARITY);
- else if (*(ch->ch_equeue + tail + i) &
- UART_LSR_FE)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_FRAME);
- else
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_NORMAL);
+ unsigned char ch = *(ch_pos + i);
+ char flag = TTY_NORMAL;
+
+ if (ch & UART_LSR_BI)
+ flag = TTY_BREAK;
+ else if (ch & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART_LSR_FE)
+ flag = TTY_FRAME;
+
+ tty_insert_flip_char(tp->port, ch, flag);
}
} else {
- tty_insert_flip_string(tp->port,
- ch->ch_rqueue + tail,
- s);
+ tty_insert_flip_string(tp->port, ch_pos, s);
}
tail += s;
@@ -1117,6 +1082,14 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
if (!ch->ch_wqueue)
ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
+ if (!ch->ch_rqueue || !ch->ch_equeue || !ch->ch_wqueue) {
+ kfree(ch->ch_rqueue);
+ kfree(ch->ch_equeue);
+ kfree(ch->ch_wqueue);
+
+ return -ENOMEM;
+ }
+
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags &= ~(CH_OPENING);
@@ -1255,7 +1228,7 @@ static int dgnc_block_til_ready(struct tty_struct *tty,
if (file->f_flags & O_NONBLOCK)
break;
- if (tty->flags & (1 << TTY_IO_ERROR)) {
+ if (tty_io_error(tty)) {
retval = -EIO;
break;
}
@@ -1539,19 +1512,8 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
*/
static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
-
- if (!tty)
- return bytes_available;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return bytes_available;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return bytes_available;
+ struct un_t *un = tty->driver_data;
+ struct channel_t *ch = un->un_ch;
/*
* If its not the Transparent print device, return
@@ -2058,17 +2020,7 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
static int dgnc_get_modem_info(struct channel_t *ch,
unsigned int __user *value)
{
- int result;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- result = dgnc_get_mstat(ch);
-
- if (result < 0)
- return -ENXIO;
-
- return put_user(result, value);
+ return put_user(dgnc_get_mstat(ch), value);
}
/*
@@ -2529,6 +2481,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct dgnc_board *bd;
+ struct board_ops *ch_bd_ops;
struct channel_t *ch;
struct un_t *un;
int rc;
@@ -2550,6 +2503,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return -ENODEV;
+ ch_bd_ops = bd->bd_ops;
+
spin_lock_irqsave(&ch->ch_lock, flags);
if (un->un_open_count <= 0) {
@@ -2574,7 +2529,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2582,7 +2537,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_lock_irqsave(&ch->ch_lock, flags);
if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2599,13 +2554,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2617,13 +2572,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2652,7 +2607,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_lock_irqsave(&ch->ch_lock, flags);
tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
@@ -2689,7 +2644,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
ch->ch_r_head = ch->ch_r_tail;
- ch->ch_bd->bd_ops->flush_uart_read(ch);
+ ch_bd_ops->flush_uart_read(ch);
/* Force queue flow control to be released, if needed */
dgnc_check_queue_flow_control(ch);
}
@@ -2697,9 +2652,9 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
if (!(un->un_type == DGNC_PRINT)) {
ch->ch_w_head = ch->ch_w_tail;
- ch->ch_bd->bd_ops->flush_uart_write(ch);
+ ch_bd_ops->flush_uart_write(ch);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
ch->ch_tun.un_flags &=
~(UN_LOW | UN_EMPTY);
wake_up_interruptible(&ch->ch_tun.un_flags_wait);
@@ -2731,14 +2686,14 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* flush rx */
ch->ch_flags &= ~CH_STOP;
ch->ch_r_head = ch->ch_r_tail;
- ch->ch_bd->bd_ops->flush_uart_read(ch);
+ ch_bd_ops->flush_uart_read(ch);
/* Force queue flow control to be released, if needed */
dgnc_check_queue_flow_control(ch);
}
/* now wait for all the output to drain */
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2748,7 +2703,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case TCSETAW:
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2771,7 +2726,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* set information for ditty */
if (cmd == (DIGI_SETAW)) {
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2804,7 +2759,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
else
ch->ch_flags &= ~(CH_LOOPBACK);
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2824,7 +2779,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return rc;
spin_lock_irqsave(&ch->ch_lock, flags);
dgnc_set_custom_speed(ch, new_rate);
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2845,7 +2800,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_immediate_char(ch, c);
+ ch_bd_ops->send_immediate_char(ch, c);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2933,13 +2888,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* Is the UART empty? Add that value to whats in our TX queue.
*/
- count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch);
+ count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch);
/*
* Figure out how much data the RealPort Server believes should
* be in our TX queue.
*/
- tdist = (buf.tIn - buf.tOut) & 0xffff;
+ tdist = (buf.tx_in - buf.tx_out) & 0xffff;
/*
* If we have more data than the RealPort Server believes we
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 523a2d34f..5b983e6f5 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -109,8 +109,8 @@ struct digi_info {
struct digi_getbuffer /* Struct for holding buffer use counts */
{
- unsigned long tIn;
- unsigned long tOut;
+ unsigned long tx_in;
+ unsigned long tx_out;
unsigned long rxbuf;
unsigned long txbuf;
unsigned long txdone;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index e8cacaecf..3bd91758b 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -418,9 +418,9 @@ static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
struct fc_regs *preg = udc->p_regs;
- _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum-1].EP_DCR1, DCR1_EPn_REQEN);
+ _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPn_REQEN);
mdelay(DMA_DISABLE_TIME); /* DCR1_EPn_REQEN Clear */
- _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum-1].EP_DMA_CTRL, EPn_DMA_EN);
+ _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPn_DMA_EN);
}
/*-------------------------------------------------------------------------*/
@@ -909,7 +909,7 @@ static int _nbu2ss_epn_out_pio(
/* Copy of every four bytes */
for (i = 0; i < iWordLength; i++) {
pBuf32->dw =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ);
+ _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
pBuf32++;
}
result = iWordLength * sizeof(u32);
@@ -919,7 +919,7 @@ static int _nbu2ss_epn_out_pio(
if (data > 0) {
/*---------------------------------------------------------*/
/* Copy of fraction byte */
- Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ);
+ Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
for (i = 0 ; i < data ; i++)
pBuf32->byte.DATA[i] = Temp32.byte.DATA[i];
result += data;
@@ -1128,7 +1128,7 @@ static int _nbu2ss_epn_in_pio(
if (iWordLength > 0) {
for (i = 0; i < iWordLength; i++) {
_nbu2ss_writel(
- &preg->EP_REGS[ep->epnum-1].EP_WRITE
+ &preg->EP_REGS[ep->epnum - 1].EP_WRITE
, pBuf32->dw
);
@@ -1290,7 +1290,7 @@ static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
if (ep->epnum > 0) {
length = _nbu2ss_readl(
- &ep->udc->p_regs->EP_REGS[ep->epnum-1].EP_LEN_DCNT);
+ &ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
length &= EPn_LDATA;
if (length < ep->ep.maxpacket)
@@ -1463,7 +1463,7 @@ static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
bit_data = EP0_STL;
} else {
- data = _nbu2ss_readl(&preg->EP_REGS[epnum-1].EP_CONTROL);
+ data = _nbu2ss_readl(&preg->EP_REGS[epnum - 1].EP_CONTROL);
if ((data & EPn_EN) == 0)
return -1;
@@ -1558,7 +1558,7 @@ static void _nbu2ss_epn_set_stall(
; limit_cnt++) {
regdata = _nbu2ss_readl(
- &preg->EP_REGS[ep->epnum-1].EP_STATUS);
+ &preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((regdata & EPn_IN_DATA) == 0)
break;
@@ -1983,7 +1983,7 @@ static inline void _nbu2ss_epn_in_int(
if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
status =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_STATUS);
+ _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((status & EPn_IN_FULL) == 0) {
/*-----------------------------------------*/
@@ -2894,7 +2894,7 @@ static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA;
} else {
- data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_LEN_DCNT)
+ data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_LEN_DCNT)
& EPn_LDATA;
}
@@ -3051,7 +3051,7 @@ static int nbu2ss_gad_vbus_session(struct usb_gadget *pgadget, int is_active)
}
/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned mA)
+static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned int mA)
{
struct nbu2ss_udc *udc;
unsigned long flags;
@@ -3101,7 +3101,7 @@ static int nbu2ss_gad_pullup(struct usb_gadget *pgadget, int is_on)
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_ioctl(
struct usb_gadget *pgadget,
- unsigned code,
+ unsigned int code,
unsigned long param)
{
return 0;
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 4a2cc38de..39769e3a8 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -97,7 +97,7 @@
#define BIT30 0x40000000
#define BIT31 0x80000000
-#define TEST_FORCE_ENABLE (BIT18+BIT16)
+#define TEST_FORCE_ENABLE (BIT18 + BIT16)
#define INT_SEL BIT10
#define CONSTFS BIT09
@@ -125,15 +125,15 @@
/*------- (0x0008) USB Address Register */
#define USB_ADDR 0x007F0000
#define SOF_STATUS BIT15
-#define UFRAME (BIT14+BIT13+BIT12)
+#define UFRAME (BIT14 + BIT13 + BIT12)
#define FRAME 0x000007FF
#define USB_ADRS_SHIFT 16
/*------- (0x000C) UTMI Characteristic 1 Register */
-#define SQUSET (BIT07+BIT06+BIT05+BIT04)
+#define SQUSET (BIT07 + BIT06 + BIT05 + BIT04)
-#define USB_SQUSET (BIT06+BIT05+BIT04)
+#define USB_SQUSET (BIT06 + BIT05 + BIT04)
/*------- (0x0010) TEST Control Register */
#define FORCEHS BIT02
@@ -196,7 +196,7 @@
#define RSUM_EN BIT01
#define USB_INT_EN_BIT \
- (EP0_EN|SPEED_MODE_EN|USB_RST_EN|SPND_EN|RSUM_EN)
+ (EP0_EN | SPEED_MODE_EN | USB_RST_EN | SPND_EN | RSUM_EN)
/*------- (0x0028) EP0 Control Register */
#define EP0_STGSEL BIT18
@@ -205,9 +205,9 @@
#define EP0_PIDCLR BIT09
#define EP0_BCLR BIT08
#define EP0_DEND BIT07
-#define EP0_DW (BIT06+BIT05)
+#define EP0_DW (BIT06 + BIT05)
#define EP0_DW4 0
-#define EP0_DW3 (BIT06+BIT05)
+#define EP0_DW3 (BIT06 + BIT05)
#define EP0_DW2 BIT06
#define EP0_DW1 BIT05
@@ -238,7 +238,7 @@
#define STG_START_INT BIT01
#define SETUP_INT BIT00
-#define EP0_STATUS_RW_BIT (BIT16|BIT15|BIT11|0xFF)
+#define EP0_STATUS_RW_BIT (BIT16 | BIT15 | BIT11 | 0xFF)
/*------- (0x0030) EP0 Interrupt Enable Register */
#define EP0_PERR_NAK_EN BIT16
@@ -256,7 +256,7 @@
#define SETUP_EN BIT00
#define EP0_INT_EN_BIT \
- (EP0_OUT_OR_EN|EP0_OUT_EN|EP0_IN_EN|STG_END_EN|SETUP_EN)
+ (EP0_OUT_OR_EN | EP0_OUT_EN | EP0_IN_EN | STG_END_EN | SETUP_EN)
/*------- (0x0034) EP0 Length Register */
#define EP0_LDATA 0x0000007F
@@ -270,7 +270,7 @@
#define EPn_BUF_SINGLE BIT30
#define EPn_DIR0 BIT26
-#define EPn_MODE (BIT25+BIT24)
+#define EPn_MODE (BIT25 + BIT24)
#define EPn_BULK 0
#define EPn_INTERRUPT BIT24
#define EPn_ISO BIT25
@@ -283,9 +283,9 @@
#define EPn_BCLR BIT09
#define EPn_CBCLR BIT08
#define EPn_DEND BIT07
-#define EPn_DW (BIT06+BIT05)
+#define EPn_DW (BIT06 + BIT05)
#define EPn_DW4 0
-#define EPn_DW3 (BIT06+BIT05)
+#define EPn_DW3 (BIT06 + BIT05)
#define EPn_DW2 BIT06
#define EPn_DW1 BIT05
@@ -324,7 +324,7 @@
#define EPn_IN_EMPTY BIT00 /* R */
#define EPn_INT_EN \
- (EPn_OUT_END_INT|EPn_OUT_INT|EPn_IN_END_INT|EPn_IN_INT)
+ (EPn_OUT_END_INT | EPn_OUT_INT | EPn_IN_END_INT | EPn_IN_INT)
/*------- (0x0048:) EPn Interrupt Enable Register */
#define EPn_OUT_END_EN BIT23 /* RW */
@@ -368,7 +368,7 @@
#define ARBITER_CTR BIT31 /* RW */
#define MCYCLE_RST BIT12 /* RW */
-#define ENDIAN_CTR (BIT09+BIT08) /* RW */
+#define ENDIAN_CTR (BIT09 + BIT08) /* RW */
#define ENDIAN_BYTE_SWAP BIT09
#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR
@@ -376,7 +376,7 @@
#define HTRANS_MODE BIT04 /* RW */
#define WBURST_TYPE BIT02 /* RW */
-#define BURST_TYPE (BIT01+BIT00) /* RW */
+#define BURST_TYPE (BIT01 + BIT00) /* RW */
#define BURST_MAX_16 0
#define BURST_MAX_8 BIT00
#define BURST_MAX_4 BIT01
@@ -412,7 +412,7 @@
#define EPC_RST BIT00 /* RW */
/*------- (0x1014) USBF_EPTEST Register */
-#define LINESTATE (BIT09+BIT08) /* R */
+#define LINESTATE (BIT09 + BIT08) /* R */
#define DM_LEVEL BIT09 /* R */
#define DP_LEVEL BIT08 /* R */
@@ -485,7 +485,7 @@ struct fc_regs {
struct ep_regs EP_REGS[REG_EP_NUM]; /* Endpoint Register */
- u8 Reserved220[0x1000-0x220]; /* (0x0220:0x0FFF) Reserved */
+ u8 Reserved220[0x1000 - 0x220]; /* (0x0220:0x0FFF) Reserved */
u32 AHBSCTR; /* (0x1000) AHBSCTR */
u32 AHBMCTR; /* (0x1004) AHBMCTR */
@@ -494,16 +494,16 @@ struct fc_regs {
u32 EPCTR; /* (0x1010) EPCTR */
u32 USBF_EPTEST; /* (0x1014) USBF_EPTEST */
- u8 Reserved1018[0x20-0x18]; /* (0x1018:0x101F) Reserved */
+ u8 Reserved1018[0x20 - 0x18]; /* (0x1018:0x101F) Reserved */
u32 USBSSVER; /* (0x1020) USBSSVER */
u32 USBSSCONF; /* (0x1024) USBSSCONF */
- u8 Reserved1028[0x110-0x28]; /* (0x1028:0x110F) Reserved */
+ u8 Reserved1028[0x110 - 0x28]; /* (0x1028:0x110F) Reserved */
struct ep_dcr EP_DCR[REG_EP_NUM]; /* */
- u8 Reserved1200[0x1000-0x200]; /* Reserved */
+ u8 Reserved1200[0x1000 - 0x200]; /* Reserved */
} __aligned(32);
#define EP0_PACKETSIZE 64
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index ba9fc444b..82b46cd27 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -414,7 +414,7 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
while (len--) {
u8 i, data;
- data = *(u8 *) buf++;
+ data = *(u8 *)buf++;
/* set data bus */
for (i = 0; i < 8; ++i)
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index a6f091fb9..4dcea2e0b 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -141,7 +141,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len--) {
- data = *(u8 *) buf;
+ data = *(u8 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -170,7 +170,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u8 *) buf;
+ prev_data = *(u8 *)buf;
#endif
buf++;
}
@@ -191,7 +191,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len) {
- data = *(u16 *) buf;
+ data = *(u16 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -220,7 +220,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *) buf;
+ prev_data = *(u16 *)buf;
#endif
buf += 2;
len -= 2;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 241d7c6be..e4a355aef 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -1254,7 +1254,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len) {
- data = *(u16 *) buf;
+ data = *(u16 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -1283,7 +1283,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *) buf;
+ prev_data = *(u16 *)buf;
#endif
buf += 2;
len -= 2;
@@ -1436,7 +1436,7 @@ static int __init fbtft_device_init(void)
}
strncpy(fbtft_device_param_gpios[i].name, p_name,
FBTFT_GPIO_NAME_SIZE - 1);
- fbtft_device_param_gpios[i++].gpio = (int) val;
+ fbtft_device_param_gpios[i++].gpio = (int)val;
if (i == MAX_GPIOS) {
pr_err("gpios parameter: exceeded max array size: %d\n",
MAX_GPIOS);
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
index 8214102f1..179536a9b 100644
--- a/drivers/staging/fsl-mc/README.txt
+++ b/drivers/staging/fsl-mc/README.txt
@@ -11,11 +11,11 @@ Contents summary
-Overview of DPAA2 objects
-DPAA2 Linux driver architecture overview
-bus driver
- -dprc driver
+ -DPRC driver
-allocator
- -dpio driver
+ -DPIO driver
-Ethernet
- -mac
+ -MAC
DPAA2 Overview
--------------
@@ -37,6 +37,9 @@ interfaces, an L2 switch, or accelerator instances.
The MC provides memory-mapped I/O command interfaces (MC portals)
which DPAA2 software drivers use to operate on DPAA2 objects:
+The diagram below shows an overview of the DPAA2 resource management
+architecture:
+
+--------------------------------------+
| OS |
| DPAA2 drivers |
@@ -77,13 +80,13 @@ DPIO objects.
Overview of DPAA2 Objects
-------------------------
-The section provides a brief overview of some key objects
-in the DPAA2 hardware. A simple scenario is described illustrating
-the objects involved in creating a network interfaces.
+The section provides a brief overview of some key DPAA2 objects.
+A simple scenario is described illustrating the objects involved
+in creating a network interfaces.
-DPRC (Datapath Resource Container)
- A DPRC is an container object that holds all the other
+ A DPRC is a container object that holds all the other
types of DPAA2 objects. In the example diagram below there
are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
in the container.
@@ -101,23 +104,23 @@ the objects involved in creating a network interfaces.
| |
+---------------------------------------------------------+
- From the point of view of an OS, a DPRC is bus-like. Like
- a plug-and-play bus, such as PCI, DPRC commands can be used to
- enumerate the contents of the DPRC, discover the hardware
- objects present (including mappable regions and interrupts).
+ From the point of view of an OS, a DPRC behaves similar to a plug and
+ play bus, like PCI. DPRC commands can be used to enumerate the contents
+ of the DPRC, discover the hardware objects present (including mappable
+ regions and interrupts).
- dprc.1 (bus)
+ DPRC.1 (bus)
|
+--+--------+-------+-------+-------+
| | | | |
- dpmcp.1 dpio.1 dpbp.1 dpni.1 dpmac.1
- dpmcp.2 dpio.2
- dpmcp.3
+ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
+ DPMCP.2 DPIO.2
+ DPMCP.3
Hardware objects can be created and destroyed dynamically, providing
the ability to hot plug/unplug objects in and out of the DPRC.
- A DPRC has a mappable mmio region (an MC portal) that can be used
+ A DPRC has a mappable MMIO region (an MC portal) that can be used
to send MC commands. It has an interrupt for status events (like
hotplug).
@@ -137,10 +140,11 @@ the objects involved in creating a network interfaces.
A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
queuing mechanisms, configuration mechanisms, buffer management,
physical ports, and interrupts. DPAA2 uses a more granular approach
- utilizing multiple hardware objects. Each object has specialized
- functions, and are used together by software to provide Ethernet network
- interface functionality. This approach provides efficient use of finite
- hardware resources, flexibility, and performance advantages.
+ utilizing multiple hardware objects. Each object provides specialized
+ functions. Groups of these objects are used by software to provide
+ Ethernet network interface functionality. This approach provides
+ efficient use of finite hardware resources, flexibility, and
+ performance advantages.
The diagram below shows the objects needed for a simple
network interface configuration on a system with 2 CPUs.
@@ -168,46 +172,52 @@ the objects involved in creating a network interfaces.
Below the objects are described. For each object a brief description
is provided along with a summary of the kinds of operations the object
- supports and a summary of key resources of the object (mmio regions
- and irqs).
+ supports and a summary of key resources of the object (MMIO regions
+ and IRQs).
-DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
hardware device that connects to an Ethernet PHY and allows
physical transmission and reception of Ethernet frames.
- -mmio regions: none
- -irqs: dpni link change
+ -MMIO regions: none
+ -IRQs: DPNI link change
-commands: set link up/down, link config, get stats,
- irq config, enable, reset
+ IRQ config, enable, reset
-DPNI (Datapath Network Interface): contains TX/RX queues,
- network interface configuration, and rx buffer pool configuration
- mechanisms.
- -mmio regions: none
- -irqs: link state
+ network interface configuration, and RX buffer pool configuration
+ mechanisms. The TX/RX queues are in memory and are identified by
+ queue number.
+ -MMIO regions: none
+ -IRQs: link state
-commands: port config, offload config, queue config,
- parse/classify config, irq config, enable, reset
+ parse/classify config, IRQ config, enable, reset
-DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
- packets and do hardware buffer pool management operations. For
- optimum performance there is typically DPIO per CPU. This allows
- each CPU to perform simultaneous enqueue/dequeue operations.
- -mmio regions: queue operations, buffer mgmt
- -irqs: data availability, congestion notification, buffer
+ packets and do hardware buffer pool management operations. The DPAA2
+ architecture separates the mechanism to access queues (the DPIO object)
+ from the queues themselves. The DPIO provides an MMIO interface to
+ enqueue/dequeue packets. To enqueue something a descriptor is written
+ to the DPIO MMIO region, which includes the target queue number.
+ There will typically be one DPIO assigned to each CPU. This allows all
+ CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
+ expected to be shared by different DPAA2 drivers.
+ -MMIO regions: queue operations, buffer management
+ -IRQs: data availability, congestion notification, buffer
pool depletion
- -commands: irq config, enable, reset
+ -commands: IRQ config, enable, reset
-DPBP (Datapath Buffer Pool): represents a hardware buffer
pool.
- -mmio regions: none
- -irqs: none
+ -MMIO regions: none
+ -IRQs: none
-commands: enable, reset
-DPMCP (Datapath MC Portal): provides an MC command portal.
Used by drivers to send commands to the MC to manage
objects.
- -mmio regions: MC command portal
- -irqs: command completion
- -commands: irq config, enable, reset
+ -MMIO regions: MC command portal
+ -IRQs: command completion
+ -commands: IRQ config, enable, reset
Object Connections
------------------
@@ -268,22 +278,22 @@ of each driver follows.
| Stack |
+------------+ +------------+
| Allocator |. . . . . . . | Ethernet |
- |(dpmcp,dpbp)| | (dpni) |
+ |(DPMCP,DPBP)| | (DPNI) |
+-.----------+ +---+---+----+
. . ^ |
. . <data avail, | |<enqueue,
. . tx confirm> | | dequeue>
+-------------+ . | |
| DPRC driver | . +---+---V----+ +---------+
- | (dprc) | . . . . . .| DPIO driver| | MAC |
- +----------+--+ | (dpio) | | (dpmac) |
+ | (DPRC) | . . . . . .| DPIO driver| | MAC |
+ +----------+--+ | (DPIO) | | (DPMAC) |
| +------+-----+ +-----+---+
|<dev add/remove> | |
| | |
+----+--------------+ | +--+---+
- | mc-bus driver | | | PHY |
+ | MC-bus driver | | | PHY |
| | | |driver|
- | /fsl-mc@80c000000 | | +--+---+
+ | /soc/fsl-mc | | +--+---+
+-------------------+ | |
| |
================================ HARDWARE =========|=================|======
@@ -298,25 +308,27 @@ of each driver follows.
A brief description of each driver is provided below.
- mc-bus driver
+ MC-bus driver
-------------
- The mc-bus driver is a platform driver and is probed from an
- "/fsl-mc@xxxx" node in the device tree passed in by boot firmware.
- It is responsible for bootstrapping the DPAA2 kernel infrastructure.
+ The MC-bus driver is a platform driver and is probed from a
+ node in the device tree (compatible "fsl,qoriq-mc") passed in by boot
+ firmware. It is responsible for bootstrapping the DPAA2 kernel
+ infrastructure.
Key functions include:
-registering a new bus type named "fsl-mc" with the kernel,
and implementing bus call-backs (e.g. match/uevent/dev_groups)
- -implemeting APIs for DPAA2 driver registration and for device
+ -implementing APIs for DPAA2 driver registration and for device
add/remove
- -creates an MSI irq domain
- -do a device add of the 'root' DPRC device, which is needed
- to bootstrap things
+ -creates an MSI IRQ domain
+ -doing a 'device add' to expose the 'root' DPRC, in turn triggering
+ a bind of the root DPRC to the DPRC driver
DPRC driver
-----------
- The dprc-driver is bound DPRC objects and does runtime management
+ The DPRC driver is bound to DPRC objects and does runtime management
of a bus instance. It performs the initial bus scan of the DPRC
- and handles interrupts for container events such as hot plug.
+ and handles interrupts for container events such as hot plug by
+ re-scanning the DPRC.
Allocator
----------
@@ -334,14 +346,20 @@ A brief description of each driver is provided below.
DPIO driver
-----------
The DPIO driver is bound to DPIO objects and provides services that allow
- other drivers such as the Ethernet driver to receive and transmit data.
+ other drivers such as the Ethernet driver to enqueue and dequeue data for
+ their respective objects.
Key services include:
-data availability notifications
-hardware queuing operations (enqueue and dequeue of data)
-hardware buffer pool management
+ To transmit a packet the Ethernet driver puts data on a queue and
+ invokes a DPIO API. For receive, the Ethernet driver registers
+ a data availability notification callback. To dequeue a packet
+ a DPIO API is used.
+
There is typically one DPIO object per physical CPU for optimum
- performance, allowing each CPU to simultaneously enqueue
+ performance, allowing different CPUs to simultaneously enqueue
and dequeue data.
The DPIO driver operates on behalf of all DPAA2 drivers
@@ -362,3 +380,7 @@ A brief description of each driver is provided below.
by the appropriate PHY driver via an mdio bus. The MAC driver
plays a role of being a proxy between the PHY driver and the
MC. It does this proxy via the MC commands to a DPMAC object.
+ If the PHY driver signals a link change, the MAC driver notifies
+ the MC via a DPMAC command. If a network interface is brought
+ up or down, the MC notifies the DPMAC driver via an interrupt and
+ the driver can take appropriate action.
diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO
index 389436891..54a8bc692 100644
--- a/drivers/staging/fsl-mc/TODO
+++ b/drivers/staging/fsl-mc/TODO
@@ -1,21 +1,8 @@
-* Decide if multiple root fsl-mc buses will be supported per Linux instance,
- and if so add support for this.
-
* Add at least one device driver for a DPAA2 object (child device of the
fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
driver support, which depends on drivers for several objects: DPNI,
DPIO, DPMAC. Other pre-requisites include:
- * interrupt support. for meaningful driver support we need
- interrupts, and thus need message interrupt support by the bus
- driver.
- -Note: this has dependencies on generic MSI support work
- in process upstream, see [1] and [2].
-
- * Management Complex (MC) command serialization. locking mechanisms
- are needed by drivers to serialize commands sent to the MC, including
- from atomic context.
-
* MC firmware uprev. The MC firmware upon which the fsl-mc
bus driver and DPAA2 object drivers are based is continuing
to evolve, so minor updates are needed to keep in sync with binary
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 2d97173f8..c31fe1bca 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -293,7 +293,7 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(0, 8, irq_index);
cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -334,7 +334,7 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
}
@@ -502,6 +502,7 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
+ cmd.params[0] |= mc_enc(0, 32, *status);
cmd.params[0] |= mc_enc(32, 8, irq_index);
/* send command to mc*/
@@ -580,3 +581,75 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
EXPORT_SYMBOL(dpbp_get_attributes);
+
+/**
+ * dpbp_set_notifications() - Set notifications towards software
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @cfg: notifications configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
+ cmd_flags,
+ token);
+
+ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry);
+ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit);
+ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry);
+ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit);
+ cmd.params[2] |= mc_enc(0, 16, cfg->options);
+ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx);
+ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_get_notifications() - Get the notifications configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @cfg: notifications configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32);
+ cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32);
+ cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32);
+ cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32);
+ cfg->options = (u16)mc_dec(cmd.params[2], 0, 16);
+ cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64);
+ cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index a87e9f84f..c9b52dd7b 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -32,9 +32,9 @@
#ifndef _FSL_DPMCP_CMD_H
#define _FSL_DPMCP_CMD_H
-/* DPMCP Version */
-#define DPMCP_VER_MAJOR 2
-#define DPMCP_VER_MINOR 1
+/* Minimal supported DPMCP Version */
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
/* Command IDs */
#define DPMCP_CMDID_CLOSE 0x800
@@ -52,6 +52,5 @@
#define DPMCP_CMDID_SET_IRQ_MASK 0x014
#define DPMCP_CMDID_GET_IRQ_MASK 0x015
#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
-#define DPMCP_CMDID_CLEAR_IRQ_STATUS 0x017
#endif /* _FSL_DPMCP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index b0248f574..fd6dd4e07 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -213,7 +213,7 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(0, 8, irq_index);
cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -254,7 +254,7 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
}
@@ -435,37 +435,6 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
}
/**
- * dpmcp_clear_irq_status() - Clear a pending interrupt's status
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @status: Bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
* dpmcp_get_attributes - Retrieve DPMCP attributes.
*
* @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
index 6df351f0c..fe79d4d92 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
@@ -82,12 +82,12 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
* struct dpmcp_irq_cfg - IRQ configuration
* @paddr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dpmcp_irq_cfg {
uint64_t paddr;
uint32_t val;
- int user_irq_id;
+ int irq_num;
};
int dpmcp_set_irq(struct fsl_mc_io *mc_io,
@@ -133,12 +133,6 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
uint8_t irq_index,
uint32_t *status);
-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t status);
-
/**
* struct dpmcp_attr - Structure representing DPMCP attributes
* @id: DPMCP object ID
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 6552c2034..9b854fa8e 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -40,9 +40,9 @@
#ifndef _FSL_DPRC_CMD_H
#define _FSL_DPRC_CMD_H
-/* DPRC Version */
-#define DPRC_VER_MAJOR 4
-#define DPRC_VER_MINOR 0
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 5
+#define DPRC_MIN_VER_MINOR 0
/* Command IDs */
#define DPRC_CMDID_CLOSE 0x800
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 31488a7b9..7fc47173c 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -312,6 +312,15 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
continue;
}
+ /*
+ * add a quirk for all versions of dpsec < 4.0...none
+ * are coherent regardless of what the MC reports.
+ */
+ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
+ (obj_desc->ver_major < 4))
+ obj_desc->flags |=
+ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+
irq_count += obj_desc->irq_count;
dev_dbg(&mc_bus_dev->dev,
"Discovered object: type %s, id %d\n",
@@ -423,6 +432,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
goto out;
+ status = 0;
error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
&status);
if (error < 0) {
@@ -692,6 +702,25 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
goto error_cleanup_msi_domain;
}
+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ &mc_bus->dprc_attr);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
+ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
+ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+ mc_bus->dprc_attr.version.major,
+ mc_bus->dprc_attr.version.minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+
mutex_init(&mc_bus->scan_mutex);
/*
@@ -779,9 +808,7 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
static const struct fsl_mc_device_match_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
- .obj_type = "dprc",
- .ver_major = DPRC_VER_MAJOR,
- .ver_minor = DPRC_VER_MINOR},
+ .obj_type = "dprc"},
{.vendor = 0x0},
};
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 381b9a96a..a2c47377c 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -265,7 +265,7 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32);
*type = mc_dec(cmd.params[2], 32, 32);
return 0;
@@ -296,7 +296,7 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(32, 8, irq_index);
cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -466,6 +466,7 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
+ cmd.params[0] |= mc_enc(0, 32, *status);
cmd.params[0] |= mc_enc(32, 8, irq_index);
/* send command to mc*/
@@ -948,6 +949,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
obj_desc->state = mc_dec(cmd.params[1], 32, 32);
obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
@@ -1042,6 +1044,7 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
@@ -1108,7 +1111,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(32, 8, irq_index);
cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
cmd.params[2] |= mc_enc(32, 32, obj_id);
cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
@@ -1189,7 +1192,7 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
@@ -1437,14 +1440,8 @@ EXPORT_SYMBOL(dprc_set_obj_label);
* @endpoint1: Endpoint 1 configuration parameters
* @endpoint2: Endpoint 2 configuration parameters
* @cfg: Connection configuration. The connection configuration is ignored for
- * connections made to DPMAC objects, where rate is set according to
- * MAC configuration.
- * The committed rate is the guaranteed rate for the connection.
- * The maximum rate is an upper limit allowed for the connection; it is
- * expected to be equal or higher than the committed rate.
- * When committed and maximum rates are both zero, the connection is set
- * to "best effort" mode, having lower priority compared to connections
- * with committed or maximum rates.
+ * connections made to DPMAC objects, where rate is retrieved from the
+ * MAC configuration.
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -1555,7 +1552,10 @@ int dprc_disconnect(struct fsl_mc_io *mc_io,
* @token: Token of DPRC object
* @endpoint1: Endpoint 1 configuration parameters
* @endpoint2: Returned endpoint 2 configuration parameters
-* @state: Returned link state: 1 - link is up, 0 - link is down
+* @state: Returned link state:
+* 1 - link is up;
+* 0 - link is down;
+* -1 - no connection (endpoint2 information is irrelevant)
*
* Return: '0' on Success; -ENAVAIL if connection does not exist.
*/
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
index 86f8543c2..fb08f22a7 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
@@ -39,7 +39,6 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
struct fsl_mc_resource *resource;
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
- bool mutex_locked = false;
if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
goto out;
@@ -55,13 +54,12 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->max_count < 0))
- goto out;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count < 0 ||
res_pool->free_count > res_pool->max_count))
- goto out;
+ goto out_unlock;
resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
GFP_KERNEL);
@@ -69,7 +67,7 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
error = -ENOMEM;
dev_err(&mc_bus_dev->dev,
"Failed to allocate memory for fsl_mc_resource\n");
- goto out;
+ goto out_unlock;
}
resource->type = pool_type;
@@ -82,10 +80,9 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
res_pool->free_count++;
res_pool->max_count++;
error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
return error;
}
@@ -106,7 +103,6 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
struct fsl_mc_resource_pool *res_pool;
struct fsl_mc_resource *resource;
int error = -EINVAL;
- bool mutex_locked = false;
if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
goto out;
@@ -122,13 +118,12 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->max_count <= 0))
- goto out;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count <= 0 ||
res_pool->free_count > res_pool->max_count))
- goto out;
+ goto out_unlock;
/*
* If the device is currently allocated, its resource is not
@@ -139,7 +134,7 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
dev_err(&mc_bus_dev->dev,
"Device %s cannot be removed from resource pool\n",
dev_name(&mc_dev->dev));
- goto out;
+ goto out_unlock;
}
list_del(&resource->node);
@@ -150,10 +145,9 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
devm_kfree(&mc_bus_dev->dev, resource);
mc_dev->resource = NULL;
error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
return error;
}
@@ -188,21 +182,19 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
struct fsl_mc_resource *resource;
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
- bool mutex_locked = false;
BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
FSL_MC_NUM_POOL_TYPES);
*new_resource = NULL;
if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
- goto error;
+ goto out;
res_pool = &mc_bus->resource_pools[pool_type];
if (WARN_ON(res_pool->mc_bus != mc_bus))
- goto error;
+ goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
resource = list_first_entry_or_null(&res_pool->free_list,
struct fsl_mc_resource, node);
@@ -212,28 +204,26 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
dev_err(&mc_bus_dev->dev,
"No more resources of type %s left\n",
fsl_mc_pool_type_strings[pool_type]);
- goto error;
+ goto out_unlock;
}
if (WARN_ON(resource->type != pool_type))
- goto error;
+ goto out_unlock;
if (WARN_ON(resource->parent_pool != res_pool))
- goto error;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count <= 0 ||
res_pool->free_count > res_pool->max_count))
- goto error;
+ goto out_unlock;
list_del(&resource->node);
INIT_LIST_HEAD(&resource->node);
res_pool->free_count--;
+ error = 0;
+out_unlock:
mutex_unlock(&res_pool->mutex);
*new_resource = resource;
- return 0;
-error:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
+out:
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
@@ -241,26 +231,23 @@ EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
void fsl_mc_resource_free(struct fsl_mc_resource *resource)
{
struct fsl_mc_resource_pool *res_pool;
- bool mutex_locked = false;
res_pool = resource->parent_pool;
if (WARN_ON(resource->type != res_pool->type))
- goto out;
+ return;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->free_count < 0 ||
res_pool->free_count >= res_pool->max_count))
- goto out;
+ goto out_unlock;
if (WARN_ON(!list_empty(&resource->node)))
- goto out;
+ goto out_unlock;
list_add_tail(&resource->node, &res_pool->free_list);
res_pool->free_count++;
-out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
}
EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
@@ -306,10 +293,22 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
return error;
+ error = -EINVAL;
dpmcp_dev = resource->data;
if (WARN_ON(!dpmcp_dev))
goto error_cleanup_resource;
+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+ dev_err(&dpmcp_dev->dev,
+ "ERROR: Version %d.%d of DPMCP not supported.\n",
+ dpmcp_dev->obj_desc.ver_major,
+ dpmcp_dev->obj_desc.ver_minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_resource;
+ }
+
if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
goto error_cleanup_resource;
@@ -722,20 +721,14 @@ static const struct fsl_mc_device_match_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpbp",
- .ver_major = DPBP_VER_MAJOR,
- .ver_minor = DPBP_VER_MINOR
},
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpmcp",
- .ver_major = DPMCP_VER_MAJOR,
- .ver_minor = DPMCP_VER_MINOR
},
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpcon",
- .ver_major = DPCON_VER_MAJOR,
- .ver_minor = DPCON_VER_MINOR
},
{.vendor = 0x0},
};
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
index b59455661..405364307 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/mc-bus.c
@@ -40,8 +40,6 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
- bool major_version_mismatch = false;
- bool minor_version_mismatch = false;
if (WARN_ON(!fsl_mc_bus_exists()))
goto out;
@@ -64,32 +62,12 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
if (id->vendor == mc_dev->obj_desc.vendor &&
strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
- if (id->ver_major == mc_dev->obj_desc.ver_major) {
- found = true;
- if (id->ver_minor != mc_dev->obj_desc.ver_minor)
- minor_version_mismatch = true;
- } else {
- major_version_mismatch = true;
- }
+ found = true;
break;
}
}
- if (major_version_mismatch) {
- dev_warn(dev,
- "Major version mismatch: driver version %u.%u, MC object version %u.%u\n",
- id->ver_major, id->ver_minor,
- mc_dev->obj_desc.ver_major,
- mc_dev->obj_desc.ver_minor);
- } else if (minor_version_mismatch) {
- dev_warn(dev,
- "Minor version mismatch: driver version %u.%u, MC object version %u.%u\n",
- id->ver_major, id->ver_minor,
- mc_dev->obj_desc.ver_major,
- mc_dev->obj_desc.ver_minor);
- }
-
out:
dev_dbg(dev, "%smatched\n", found ? "" : "not ");
return found;
@@ -251,11 +229,10 @@ static bool fsl_mc_is_root_dprc(struct device *dev)
return dev == root_dprc_dev;
}
-static int get_dprc_icid(struct fsl_mc_io *mc_io,
- int container_id, u16 *icid)
+static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
{
u16 dprc_handle;
- struct dprc_attributes attr;
int error;
error = dprc_open(mc_io, 0, container_id, &dprc_handle);
@@ -264,15 +241,14 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
return error;
}
- memset(&attr, 0, sizeof(attr));
- error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr);
+ memset(attr, 0, sizeof(struct dprc_attributes));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
if (error < 0) {
dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
error);
goto common_cleanup;
}
- *icid = attr.icid;
error = 0;
common_cleanup:
@@ -280,6 +256,34 @@ common_cleanup:
return error;
}
+static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ int container_id, u16 *icid)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0)
+ *icid = attr.icid;
+
+ return error;
+}
+
+static int get_dprc_version(struct fsl_mc_io *mc_io,
+ int container_id, u16 *major, u16 *minor)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0) {
+ *major = attr.version.major;
+ *minor = attr.version.minor;
+ }
+
+ return error;
+}
+
static int translate_mc_addr(struct fsl_mc_device *mc_dev,
enum dprc_region_type mc_region_type,
u64 mc_offset, phys_addr_t *phys_addr)
@@ -376,6 +380,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
regions[i].end = regions[i].start + region_desc.size - 1;
regions[i].name = "fsl-mc object MMIO region";
regions[i].flags = IORESOURCE_IO;
+ if (region_desc.flags & DPRC_REGION_CACHEABLE)
+ regions[i].flags |= IORESOURCE_CACHEABLE;
}
mc_dev->regions = regions;
@@ -491,6 +497,10 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
goto error_cleanup_dev;
}
+ /* Objects are coherent, unless 'no shareability' flag set. */
+ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
+
/*
* The device-specific probe callback will get invoked by device_add()
*/
@@ -722,20 +732,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
"Freescale Management Complex Firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
- if (mc_version.major < MC_VER_MAJOR) {
- dev_err(&pdev->dev,
- "ERROR: MC firmware version not supported by driver (driver version: %u.%u)\n",
- MC_VER_MAJOR, MC_VER_MINOR);
- error = -ENOTSUPP;
- goto error_cleanup_mc_io;
- }
-
- if (mc_version.major > MC_VER_MAJOR) {
- dev_warn(&pdev->dev,
- "WARNING: driver may not support newer MC firmware features (driver version: %u.%u)\n",
- MC_VER_MAJOR, MC_VER_MINOR);
- }
-
error = get_mc_addr_translation_ranges(&pdev->dev,
&mc->translation_ranges,
&mc->num_translation_ranges);
@@ -749,11 +745,15 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io;
}
+ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
+ error = get_dprc_version(mc_io, container_id,
+ &obj_desc.ver_major, &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
strcpy(obj_desc.type, "dprc");
obj_desc.id = container_id;
- obj_desc.ver_major = DPRC_VER_MAJOR;
- obj_desc.ver_minor = DPRC_VER_MINOR;
obj_desc.irq_count = 1;
obj_desc.region_count = 0;
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
index 3a8258ff4..e202b2b88 100644
--- a/drivers/staging/fsl-mc/bus/mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -37,10 +37,8 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
/*
* set_desc should not be set by the caller
*/
- if (WARN_ON(ops->set_desc))
- return;
-
- ops->set_desc = fsl_mc_msi_set_desc;
+ if (ops->set_desc == NULL)
+ ops->set_desc = fsl_mc_msi_set_desc;
}
static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
@@ -65,7 +63,7 @@ static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
msi_desc->msg.address_lo;
irq_cfg.val = msi_desc->msg.data;
- irq_cfg.user_irq_id = msi_desc->irq;
+ irq_cfg.irq_num = msi_desc->irq;
if (owner_mc_dev == mc_bus_dev) {
/*
@@ -129,10 +127,8 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
/*
* irq_write_msi_msg should not be set by the caller
*/
- if (WARN_ON(chip->irq_write_msi_msg))
- return;
-
- chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+ if (chip->irq_write_msi_msg == NULL)
+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
}
/**
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
index efa9bf33c..c57b454a2 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h
@@ -34,7 +34,7 @@
/* DPBP Version */
#define DPBP_VER_MAJOR 2
-#define DPBP_VER_MINOR 1
+#define DPBP_VER_MINOR 2
/* Command IDs */
#define DPBP_CMDID_CLOSE 0x800
@@ -57,4 +57,6 @@
#define DPBP_CMDID_GET_IRQ_STATUS 0x016
#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index 37ed95143..e14e85a5d 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -85,12 +85,12 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
* struct dpbp_irq_cfg - IRQ configuration
* @addr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dpbp_irq_cfg {
u64 addr;
u32 val;
- int user_irq_id;
+ int irq_num;
};
int dpbp_set_irq(struct fsl_mc_io *mc_io,
@@ -168,6 +168,53 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpbp_attr *attr);
+/**
+ * DPBP notifications options
+ */
+
+/**
+ * BPSCN write will attempt to allocate into a cache (coherent write)
+ */
+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
+
+/**
+ * struct dpbp_notification_cfg - Structure representing DPBP notifications
+ * towards software
+ * @depletion_entry: below this threshold the pool is "depleted";
+ * set it to '0' to disable it
+ * @depletion_exit: greater than or equal to this threshold the pool exit its
+ * "depleted" state
+ * @surplus_entry: above this threshold the pool is in "surplus" state;
+ * set it to '0' to disable it
+ * @surplus_exit: less than or equal to this threshold the pool exit its
+ * "surplus" state
+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned.
+ * @message_ctx: The context that will be part of the BPSCN message and will
+ * be written to 'message_iova'
+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
+ */
+struct dpbp_notification_cfg {
+ u32 depletion_entry;
+ u32 depletion_exit;
+ u32 surplus_entry;
+ u32 surplus_exit;
+ u64 message_iova;
+ u64 message_ctx;
+ u16 options;
+};
+
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
+
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
+
/** @} */
#endif /* __FSL_DPBP_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index 94c492706..593b2bbe7 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -94,11 +94,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
*/
#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
-/* IOMMU bypass - indicates whether objects of this container are permitted
- * to bypass the IOMMU.
- */
-#define DPRC_CFG_OPT_IOMMU_BYPASS 0x00000010
-
/* AIOP - Indicates that container belongs to AIOP. */
#define DPRC_CFG_OPT_AIOP 0x00000020
@@ -173,12 +168,12 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
* struct dprc_irq_cfg - IRQ configuration
* @paddr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dprc_irq_cfg {
phys_addr_t paddr;
u32 val;
- int user_irq_id;
+ int irq_num;
};
int dprc_set_irq(struct fsl_mc_io *mc_io,
@@ -353,6 +348,14 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
#define DPRC_OBJ_STATE_PLUGGED 0x00000002
/**
+ * Shareability flag - Object flag indicating no memory shareability.
+ * the object generates memory accesses that are non coherent with other
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+
+/**
* struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
* @type: Type of object: NULL terminated string
* @id: ID of logical object resource
@@ -363,6 +366,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
* @region_count: Number of mappable regions supported by the object
* @state: Object state: combination of DPRC_OBJ_STATE_ states
* @label: Object label
+ * @flags: Object's flags
*/
struct dprc_obj_desc {
char type[16];
@@ -374,6 +378,7 @@ struct dprc_obj_desc {
u8 region_count;
u32 state;
char label[16];
+ u16 flags;
};
int dprc_get_obj(struct fsl_mc_io *mc_io,
diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h
index ee5f1d2bf..cab1ae90f 100644
--- a/drivers/staging/fsl-mc/include/mc-private.h
+++ b/drivers/staging/fsl-mc/include/mc-private.h
@@ -94,12 +94,14 @@ struct fsl_mc_resource_pool {
* from the physical DPRC.
* @irq_resources: Pointer to array of IRQ objects for the IRQ pool
* @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
*/
struct fsl_mc_bus {
struct fsl_mc_device mc_dev;
struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
struct fsl_mc_device_irq *irq_resources;
struct mutex scan_mutex; /* serializes bus scanning */
+ struct dprc_attributes dprc_attr;
};
#define to_fsl_mc_bus(_mc_dev) \
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
index 4cd3ed3ee..8b23a553f 100644
--- a/drivers/staging/fwserial/dma_fifo.c
+++ b/drivers/staging/fwserial/dma_fifo.c
@@ -35,7 +35,7 @@
/*
* private helper fn to determine if check is in open interval (lo,hi)
*/
-static bool addr_check(unsigned check, unsigned lo, unsigned hi)
+static bool addr_check(unsigned int check, unsigned int lo, unsigned int hi)
{
return check - (lo + 1) < (hi - 1) - lo;
}
@@ -64,7 +64,7 @@ void dma_fifo_init(struct dma_fifo *fifo)
* The 'apparent' size will be rounded up to next greater aligned size.
* Returns 0 if no error, otherwise an error code
*/
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
+int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
int tx_limit, int open_limit, gfp_t gfp_mask)
{
int capacity;
@@ -190,7 +190,7 @@ int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
*/
int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
{
- unsigned len, n, ofs, l, limit;
+ unsigned int len, n, ofs, l, limit;
if (!fifo->data)
return -ENOENT;
@@ -210,7 +210,7 @@ int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
n = len;
ofs = fifo->out % fifo->capacity;
l = fifo->capacity - ofs;
- limit = min_t(unsigned, l, fifo->tx_limit);
+ limit = min_t(unsigned int, l, fifo->tx_limit);
if (n > limit) {
n = limit;
fifo->out += limit;
diff --git a/drivers/staging/fwserial/dma_fifo.h b/drivers/staging/fwserial/dma_fifo.h
index 410988224..37a91c6a1 100644
--- a/drivers/staging/fwserial/dma_fifo.h
+++ b/drivers/staging/fwserial/dma_fifo.h
@@ -45,9 +45,9 @@
#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */
struct dma_fifo {
- unsigned in;
- unsigned out; /* updated when dma is pended */
- unsigned done; /* updated upon dma completion */
+ unsigned int in;
+ unsigned int out; /* updated when dma is pended */
+ unsigned int done; /* updated upon dma completion */
struct {
unsigned corrupt:1;
};
@@ -55,7 +55,7 @@ struct dma_fifo {
int guard; /* ofs of guard area */
int capacity; /* size + reserved */
int avail; /* # of unused bytes in fifo */
- unsigned align; /* must be power of 2 */
+ unsigned int align; /* must be power of 2 */
int tx_limit; /* max # of bytes per dma transaction */
int open_limit; /* max # of outstanding allowed */
int open; /* # of outstanding dma transactions */
@@ -66,9 +66,9 @@ struct dma_fifo {
struct dma_pending {
struct list_head link;
void *data;
- unsigned len;
- unsigned next;
- unsigned out;
+ unsigned int len;
+ unsigned int next;
+ unsigned int out;
};
static inline void dp_mark_completed(struct dma_pending *dp)
@@ -82,7 +82,7 @@ static inline bool dp_is_completed(struct dma_pending *dp)
}
void dma_fifo_init(struct dma_fifo *fifo);
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
+int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
int tx_limit, int open_limit, gfp_t gfp_mask);
void dma_fifo_free(struct dma_fifo *fifo);
void dma_fifo_reset(struct dma_fifo *fifo);
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 9b23b5c95..c241c0ae3 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -132,7 +132,7 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
#ifdef FWTTY_PROFILING
-static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat)
+static void fwtty_profile_fifo(struct fwtty_port *port, unsigned int *stat)
{
spin_lock_bh(&port->lock);
fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
@@ -143,7 +143,7 @@ static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
{
/* for each stat, print sum of 0 to 2^k, then individually */
int k = 4;
- unsigned sum;
+ unsigned int sum;
int j;
char t[10];
@@ -303,9 +303,10 @@ static void fwtty_restart_tx(struct fwtty_port *port)
* Note: in loopback, the port->lock is being held. Only use functions that
* don't attempt to reclaim the port->lock.
*/
-static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
+static void fwtty_update_port_status(struct fwtty_port *port,
+ unsigned int status)
{
- unsigned delta;
+ unsigned int delta;
struct tty_struct *tty;
/* simulated LSR/MSR status from remote */
@@ -396,9 +397,9 @@ static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
*
* Note: caller must be holding port lock
*/
-static unsigned __fwtty_port_line_status(struct fwtty_port *port)
+static unsigned int __fwtty_port_line_status(struct fwtty_port *port)
{
- unsigned status = 0;
+ unsigned int status = 0;
/* TODO: add module param to tie RNG to DTR as well */
@@ -424,7 +425,7 @@ static int __fwtty_write_port_status(struct fwtty_port *port)
{
struct fwtty_peer *peer;
int err = -ENOENT;
- unsigned status = __fwtty_port_line_status(port);
+ unsigned int status = __fwtty_port_line_status(port);
rcu_read_lock();
peer = rcu_dereference(port->peer);
@@ -454,7 +455,7 @@ static int fwtty_write_port_status(struct fwtty_port *port)
static void fwtty_throttle_port(struct fwtty_port *port)
{
struct tty_struct *tty;
- unsigned old;
+ unsigned int old;
tty = tty_port_tty_get(&port->port);
if (!tty)
@@ -540,7 +541,7 @@ static void fwtty_emit_breaks(struct work_struct *work)
static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
{
int c, n = len;
- unsigned lsr;
+ unsigned int lsr;
int err = 0;
fwtty_dbg(port, "%d\n", n);
@@ -635,7 +636,7 @@ static void fwtty_port_handler(struct fw_card *card,
if (addr != port->rx_handler.offset || len != 4) {
rcode = RCODE_ADDRESS_ERROR;
} else {
- fwtty_update_port_status(port, *(unsigned *)data);
+ fwtty_update_port_status(port, *(unsigned int *)data);
rcode = RCODE_COMPLETE;
}
break;
@@ -828,7 +829,7 @@ static void fwtty_write_xchar(struct fwtty_port *port, char ch)
rcu_read_unlock();
}
-static struct fwtty_port *fwtty_port_get(unsigned index)
+static struct fwtty_port *fwtty_port_get(unsigned int index)
{
struct fwtty_port *port;
@@ -934,9 +935,9 @@ static int fwtty_port_carrier_raised(struct tty_port *tty_port)
return rc;
}
-static unsigned set_termios(struct fwtty_port *port, struct tty_struct *tty)
+static unsigned int set_termios(struct fwtty_port *port, struct tty_struct *tty)
{
- unsigned baud, frame;
+ unsigned int baud, frame;
baud = tty_termios_baud_rate(&tty->termios);
tty_termios_encode_baud_rate(&tty->termios, baud, baud);
@@ -988,7 +989,7 @@ static int fwtty_port_activate(struct tty_port *tty_port,
struct tty_struct *tty)
{
struct fwtty_port *port = to_port(tty_port, port);
- unsigned baud;
+ unsigned int baud;
int err;
set_bit(TTY_IO_ERROR, &tty->flags);
@@ -1264,7 +1265,7 @@ static int set_serial_info(struct fwtty_port *port,
return 0;
}
-static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
+static int fwtty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct fwtty_port *port = tty->driver_data;
@@ -1297,7 +1298,7 @@ static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct fwtty_port *port = tty->driver_data;
- unsigned baud;
+ unsigned int baud;
spin_lock_bh(&port->lock);
baud = set_termios(port, tty);
@@ -1305,7 +1306,7 @@ static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
if ((baud == 0) && (old->c_cflag & CBAUD)) {
port->mctrl &= ~(TIOCM_DTR | TIOCM_RTS);
} else if ((baud != 0) && !(old->c_cflag & CBAUD)) {
- if (C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (C_CRTSCTS(tty) || !tty_throttled(tty))
port->mctrl |= TIOCM_DTR | TIOCM_RTS;
else
port->mctrl |= TIOCM_DTR;
@@ -1369,7 +1370,7 @@ static int fwtty_break_ctl(struct tty_struct *tty, int state)
static int fwtty_tiocmget(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
- unsigned tiocm;
+ unsigned int tiocm;
spin_lock_bh(&port->lock);
tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
@@ -1380,7 +1381,8 @@ static int fwtty_tiocmget(struct tty_struct *tty)
return tiocm;
}
-static int fwtty_tiocmset(struct tty_struct *tty, unsigned set, unsigned clear)
+static int fwtty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
{
struct fwtty_port *port = tty->driver_data;
@@ -1699,7 +1701,7 @@ static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
spin_unlock_bh(&peer->port->lock);
- if (port->port.console && port->fwcon_ops->notify != NULL)
+ if (port->port.console && port->fwcon_ops->notify)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
@@ -1806,7 +1808,7 @@ static void fwserial_release_port(struct fwtty_port *port, bool reset)
RCU_INIT_POINTER(port->peer, NULL);
spin_unlock_bh(&port->lock);
- if (port->port.console && port->fwcon_ops->notify != NULL)
+ if (port->port.console && port->fwcon_ops->notify)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
}
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 6fa936501..30b2481fe 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -22,7 +22,7 @@
#ifdef FWTTY_PROFILING
#define DISTRIBUTION_MAX_SIZE 8192
#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
-static inline void fwtty_profile_data(unsigned stat[], unsigned val)
+static inline void fwtty_profile_data(unsigned int stat[], unsigned int val)
{
int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
++stat[n];
@@ -78,7 +78,7 @@ struct fwtty_peer {
u64 guid;
int generation;
int node_id;
- unsigned speed;
+ unsigned int speed;
int max_payload;
u64 mgmt_addr;
@@ -160,17 +160,17 @@ struct fwserial_mgmt_pkt {
#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ)
struct stats {
- unsigned xchars;
- unsigned dropped;
- unsigned tx_stall;
- unsigned fifo_errs;
- unsigned sent;
- unsigned lost;
- unsigned throttled;
- unsigned reads[DISTRIBUTION_MAX_INDEX + 1];
- unsigned writes[DISTRIBUTION_MAX_INDEX + 1];
- unsigned txns[DISTRIBUTION_MAX_INDEX + 1];
- unsigned unthrottle[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int xchars;
+ unsigned int dropped;
+ unsigned int tx_stall;
+ unsigned int fifo_errs;
+ unsigned int sent;
+ unsigned int lost;
+ unsigned int throttled;
+ unsigned int reads[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int writes[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int txns[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int unthrottle[DISTRIBUTION_MAX_INDEX + 1];
};
struct fwconsole_ops {
@@ -237,7 +237,7 @@ struct fwconsole_ops {
struct fwtty_port {
struct tty_port port;
struct device *device;
- unsigned index;
+ unsigned int index;
struct fw_serial *serial;
struct fw_address_handler rx_handler;
@@ -246,21 +246,21 @@ struct fwtty_port {
wait_queue_head_t wait_tx;
struct delayed_work emit_breaks;
- unsigned cps;
+ unsigned int cps;
unsigned long break_last;
struct work_struct hangup;
- unsigned mstatus;
+ unsigned int mstatus;
spinlock_t lock;
- unsigned mctrl;
+ unsigned int mctrl;
struct delayed_work drain;
struct dma_fifo tx_fifo;
int max_payload;
- unsigned status_mask;
- unsigned ignore_mask;
- unsigned break_ctl:1,
+ unsigned int status_mask;
+ unsigned int ignore_mask;
+ unsigned int break_ctl:1,
write_only:1,
overrun:1,
loopback:1;
@@ -349,7 +349,7 @@ extern struct tty_driver *fwtty_driver;
* being used for isochronous traffic)
* 2) isochronous arbitration always wins.
*/
-static inline int link_speed_to_max_payload(unsigned speed)
+static inline int link_speed_to_max_payload(unsigned int speed)
{
/* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */
return min(512 << speed, 4096);
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 6bedd6683..400969170 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -278,8 +278,9 @@ static void gdm_mux_rcv_complete(struct urb *urb)
}
}
-static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
- int tty_index, struct tty_dev *tty_dev, int complete))
+static int gdm_mux_recv(void *priv_dev,
+ int (*cb)(void *data, int len, int tty_index,
+ struct tty_dev *tty_dev, int complete))
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 9db9b903f..d650d7720 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -708,7 +708,7 @@ static void do_tx(struct work_struct *work)
#define SDU_PARAM_LEN 12
static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
- unsigned int dftEpsId, unsigned int epsId,
+ unsigned int dft_eps_ID, unsigned int eps_ID,
void (*cb)(void *data), void *cb_data,
int dev_idx, int nic_type)
{
@@ -746,8 +746,8 @@ static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
}
sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
- sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
- sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
+ sdu->dft_eps_ID = gdm_cpu_to_dev32(&udev->gdm_ed, dft_eps_ID);
+ sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, eps_ID);
sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
t_sdu->len = send_len + HCI_HEADER_SIZE;
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
index 7fba8a687..dbc4446cf 100644
--- a/drivers/staging/gdm724x/hci_packet.h
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -58,7 +58,7 @@ struct sdu_header {
struct sdu {
u16 cmd_evt;
u16 len;
- u32 dftEpsId;
+ u32 dft_eps_ID;
u32 bearer_ID;
u32 nic_type;
u8 data[0];
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index 9d8347769..a0232e8ae 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -88,7 +88,8 @@ static void netlink_rcv(struct sk_buff *skb)
}
struct sock *netlink_init(int unit,
- void (*cb)(struct net_device *dev, u16 type, void *msg, int len))
+ void (*cb)(struct net_device *dev, u16 type,
+ void *msg, int len))
{
struct sock *sock;
struct netlink_kernel_cfg cfg = {
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 7b7c9786c..a221f261c 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -93,7 +93,6 @@ static int readlength_bitstream(char *bitdata, int *lendata, int *offset)
return 0;
}
-
/*
* read first 13 bytes to check bitstream magic number
*/
@@ -201,7 +200,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
#endif /* DEBUG_FPGA */
if (!xl_supported_prog_bus_width(bus_bytes)) {
pr_err("unsupported program bus width %d\n",
- bus_bytes);
+ bus_bytes);
return -1;
}
@@ -222,7 +221,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
pr_info("device init done\n");
for (i = 0; i < size; i += bus_bytes)
- xl_shift_bytes_out(bus_bytes, bitdata+i);
+ xl_shift_bytes_out(bus_bytes, bitdata + i);
pr_info("program done\n");
@@ -277,7 +276,7 @@ static int gs_set_download_method(struct fpgaimage *fimage)
static int init_driver(void)
{
firmware_pdev = platform_device_register_simple("fpgaboot", -1,
- NULL, 0);
+ NULL, 0);
return PTR_ERR_OR_ZERO(firmware_pdev);
}
@@ -331,7 +330,6 @@ err_out1:
kfree(fimage);
return -1;
-
}
static int __init gs_fpgaboot_init(void)
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
index f41f4cc79..8cc32555d 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
@@ -51,6 +51,6 @@ struct fpgaimage {
char part[MAX_STR];
char date[MAX_STR];
char time[MAX_STR];
- int32_t lendata;
+ int lendata;
char *fpgadata;
};
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
index 819db53da..c9391198f 100644
--- a/drivers/staging/gs_fpgaboot/io.c
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -35,7 +35,6 @@ static inline void byte0_out(unsigned char data);
static inline void byte1_out(unsigned char data);
static inline void xl_cclk_b(int32_t i);
-
/* Assert and Deassert CCLK */
void xl_shift_cclk(int count)
{
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index b5fad29a9..f0eb8441d 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -31,7 +31,8 @@ act2000_isa_reset(unsigned short portbase)
int serial = 0;
found = 0;
- if ((reg = inb(portbase + ISA_COR)) != 0xff) {
+ reg = inb(portbase + ISA_COR);
+ if (reg != 0xff) {
outb(reg | ISA_COR_RESET, portbase + ISA_COR);
mdelay(10);
outb(reg, portbase + ISA_COR);
@@ -232,7 +233,7 @@ act2000_isa_receive(act2000_card *card)
{
u_char c;
- if (test_and_set_bit(ACT2000_LOCK_RX, (void *) &card->ilock) != 0)
+ if (test_and_set_bit(ACT2000_LOCK_RX, (void *)&card->ilock) != 0)
return;
while (!act2000_isa_readb(card, &c)) {
if (card->idat.isa.rcvidx < 8) {
@@ -247,7 +248,7 @@ act2000_isa_receive(act2000_card *card)
card->idat.isa.rcvignore = 1;
printk(KERN_WARNING
"act2000_isa_receive: no memory\n");
- test_and_clear_bit(ACT2000_LOCK_RX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_RX, (void *)&card->ilock);
return;
}
memcpy(skb_put(card->idat.isa.rcvskb, 8), card->idat.isa.rcvhdr, 8);
@@ -287,7 +288,7 @@ act2000_isa_receive(act2000_card *card)
(card->idat.isa.rcvidx < card->idat.isa.rcvlen)))
act2000_schedule_poll(card);
}
- test_and_clear_bit(ACT2000_LOCK_RX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_RX, (void *)&card->ilock);
}
void
@@ -298,12 +299,13 @@ act2000_isa_send(act2000_card *card)
actcapi_msg *msg;
int l;
- if (test_and_set_bit(ACT2000_LOCK_TX, (void *) &card->ilock) != 0)
+ if (test_and_set_bit(ACT2000_LOCK_TX, (void *)&card->ilock) != 0)
return;
while (1) {
spin_lock_irqsave(&card->lock, flags);
if (!(card->sbuf)) {
- if ((card->sbuf = skb_dequeue(&card->sndq))) {
+ card->sbuf = skb_dequeue(&card->sndq);
+ if (card->sbuf) {
card->ack_msg = card->sbuf->data;
msg = (actcapi_msg *)card->sbuf->data;
if ((msg->hdr.cmd.cmd == 0x86) &&
@@ -317,7 +319,7 @@ act2000_isa_send(act2000_card *card)
spin_unlock_irqrestore(&card->lock, flags);
if (!(card->sbuf)) {
/* No more data to send */
- test_and_clear_bit(ACT2000_LOCK_TX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_TX, (void *)&card->ilock);
return;
}
skb = card->sbuf;
@@ -325,7 +327,7 @@ act2000_isa_send(act2000_card *card)
while (skb->len) {
if (act2000_isa_writeb(card, *(skb->data))) {
/* Fifo is full, but more data to send */
- test_and_clear_bit(ACT2000_LOCK_TX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_TX, (void *)&card->ilock);
/* Schedule myself */
act2000_schedule_tx(card);
return;
@@ -356,7 +358,6 @@ act2000_isa_send(act2000_card *card)
static int
act2000_isa_getid(act2000_card *card)
{
-
act2000_fwid fid;
u_char *p = (u_char *)&fid;
int count = 0;
@@ -378,7 +379,8 @@ act2000_isa_getid(act2000_card *card)
printk(KERN_WARNING "act2000: Wrong Firmware-ID!\n");
return -EPROTO;
}
- if ((p = strchr(fid.revision, '\n')))
+ p = strchr(fid.revision, '\n');
+ if (p)
*p = '\0';
printk(KERN_INFO "act2000: Firmware-ID: %s\n", fid.revision);
if (card->flags & ACT2000_FLAGS_IVALID) {
@@ -439,5 +441,5 @@ act2000_isa_download(act2000_card *card, act2000_ddef __user *cb)
}
kfree(buf);
msleep_interruptible(500);
- return (act2000_isa_getid(card));
+ return act2000_isa_getid(card);
}
diff --git a/drivers/staging/i4l/pcbit/capi.h b/drivers/staging/i4l/pcbit/capi.h
index 635f63476..6f6f4dd07 100644
--- a/drivers/staging/i4l/pcbit/capi.h
+++ b/drivers/staging/i4l/pcbit/capi.h
@@ -17,7 +17,7 @@
#define REQ_DISPLAY 0x04
#define REQ_USER_TO_USER 0x08
-#define AppInfoMask REQ_CAUSE | REQ_DISPLAY | REQ_USER_TO_USER
+#define AppInfoMask (REQ_CAUSE | REQ_DISPLAY | REQ_USER_TO_USER)
/* Connection Setup */
extern int capi_conn_req(const char *calledPN, struct sk_buff **buf,
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index 4172e22ae..c5270e229 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -284,7 +284,7 @@ static int pcbit_command(isdn_ctrl *ctl)
default:
printk(KERN_DEBUG "pcbit_command: unknown command\n");
break;
- };
+ }
return 0;
}
@@ -699,8 +699,8 @@ void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg,
*/
static char statbuf[STATBUF_LEN];
-static int stat_st = 0;
-static int stat_end = 0;
+static int stat_st;
+static int stat_end;
static int pcbit_stat(u_char __user *buf, int len, int driver, int channel)
{
@@ -968,7 +968,7 @@ static int pcbit_ioctl(isdn_ctrl *ctl)
default:
printk("error: unknown ioctl\n");
break;
- };
+ }
return 0;
}
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index b2262ba6f..e72c16420 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -254,7 +254,7 @@ static void pcbit_fsm_timer(unsigned long data)
dev = chan2dev(chan);
- if (dev == NULL) {
+ if (!dev) {
printk(KERN_WARNING "pcbit: timer for unknown device\n");
return;
}
diff --git a/drivers/staging/i4l/pcbit/layer2.h b/drivers/staging/i4l/pcbit/layer2.h
index be1327bc1..6b9063e38 100644
--- a/drivers/staging/i4l/pcbit/layer2.h
+++ b/drivers/staging/i4l/pcbit/layer2.h
@@ -109,7 +109,7 @@
#define SCHED_READ 0x01
#define SCHED_WRITE 0x02
-#define SET_RUN_TIMEOUT 2 * HZ /* 2 seconds */
+#define SET_RUN_TIMEOUT (2 * HZ) /* 2 seconds */
struct frame_buf {
ulong msg;
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index fa67da940..f066aa30f 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -27,18 +27,6 @@ config ADIS16203
To compile this driver as a module, say M here: the module will be
called adis16203.
-config ADIS16204
- tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16204 Programmable
- High-g Digital Impact Sensor and Recorder.
-
- To compile this driver as a module, say M here: the module will be
- called adis16204.
-
config ADIS16209
tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
depends on SPI
@@ -51,17 +39,6 @@ config ADIS16209
To compile this driver as a module, say M here: the module will be
called adis16209.
-config ADIS16220
- tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor"
- depends on SPI
- select IIO_ADIS_LIB
- help
- Say Y here to build support for Analog Devices adis16220 programmable
- digital vibration sensor.
-
- To compile this driver as a module, say M here: the module will be
- called adis16220.
-
config ADIS16240
tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1ed137f1a..415329c96 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -8,15 +8,9 @@ obj-$(CONFIG_ADIS16201) += adis16201.o
adis16203-y := adis16203_core.o
obj-$(CONFIG_ADIS16203) += adis16203.o
-adis16204-y := adis16204_core.o
-obj-$(CONFIG_ADIS16204) += adis16204.o
-
adis16209-y := adis16209_core.o
obj-$(CONFIG_ADIS16209) += adis16209.o
-adis16220-y := adis16220_core.o
-obj-$(CONFIG_ADIS16220) += adis16220.o
-
adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index e6b8c9af6..64844adca 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -3,51 +3,129 @@
#define ADIS16201_STARTUP_DELAY 220 /* ms */
-#define ADIS16201_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16201_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16201_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
-#define ADIS16201_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
-#define ADIS16201_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16201_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16201_XINCL_OUT 0x0C /* Output, x-axis inclination */
-#define ADIS16201_YINCL_OUT 0x0E /* Output, y-axis inclination */
-#define ADIS16201_XACCL_OFFS 0x10 /* Calibration, x-axis acceleration offset */
-#define ADIS16201_YACCL_OFFS 0x12 /* Calibration, y-axis acceleration offset */
-#define ADIS16201_XACCL_SCALE 0x14 /* x-axis acceleration scale factor */
-#define ADIS16201_YACCL_SCALE 0x16 /* y-axis acceleration scale factor */
-#define ADIS16201_XINCL_OFFS 0x18 /* Calibration, x-axis inclination offset */
-#define ADIS16201_YINCL_OFFS 0x1A /* Calibration, y-axis inclination offset */
-#define ADIS16201_XINCL_SCALE 0x1C /* x-axis inclination scale factor */
-#define ADIS16201_YINCL_SCALE 0x1E /* y-axis inclination scale factor */
-#define ADIS16201_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16201_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16201_ALM_SMPL1 0x24 /* Alarm 1, sample period */
-#define ADIS16201_ALM_SMPL2 0x26 /* Alarm 2, sample period */
-#define ADIS16201_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16201_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16201_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16201_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16201_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16201_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16201_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16201_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16201_GLOB_CMD 0x3E /* Operation, system command register */
+/* Flash memory write count */
+#define ADIS16201_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16201_SUPPLY_OUT 0x02
+
+/* Output, x-axis accelerometer */
+#define ADIS16201_XACCL_OUT 0x04
+
+/* Output, y-axis accelerometer */
+#define ADIS16201_YACCL_OUT 0x06
+
+/* Output, auxiliary ADC input */
+#define ADIS16201_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16201_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16201_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16201_YINCL_OUT 0x0E
+
+/* Calibration, x-axis acceleration offset */
+#define ADIS16201_XACCL_OFFS 0x10
+
+/* Calibration, y-axis acceleration offset */
+#define ADIS16201_YACCL_OFFS 0x12
+
+/* x-axis acceleration scale factor */
+#define ADIS16201_XACCL_SCALE 0x14
+
+/* y-axis acceleration scale factor */
+#define ADIS16201_YACCL_SCALE 0x16
+
+/* Calibration, x-axis inclination offset */
+#define ADIS16201_XINCL_OFFS 0x18
+
+/* Calibration, y-axis inclination offset */
+#define ADIS16201_YINCL_OFFS 0x1A
+
+/* x-axis inclination scale factor */
+#define ADIS16201_XINCL_SCALE 0x1C
+
+/* y-axis inclination scale factor */
+#define ADIS16201_YINCL_SCALE 0x1E
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16201_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16201_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16201_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16201_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16201_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16201_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16201_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16201_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16201_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16201_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16201_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16201_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16201_GLOB_CMD 0x3E
/* MSC_CTRL */
-#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* Self-test enable */
+#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
/* DIAG_STAT */
-#define ADIS16201_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM1 BIT(8)
+
+/* SPI communications failure */
+#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 06c0b75ed..6f3f8ff2a 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -167,6 +167,7 @@ static const struct adis_data adis16201_data = {
.diag_stat_reg = ADIS16201_DIAG_STAT,
.self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16201_STARTUP_DELAY,
.status_error_msgs = adis16201_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 6426e38bf..b483e4e64 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -3,45 +3,111 @@
#define ADIS16203_STARTUP_DELAY 220 /* ms */
-#define ADIS16203_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16203_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16203_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16203_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16203_XINCL_OUT 0x0C /* Output, x-axis inclination */
-#define ADIS16203_YINCL_OUT 0x0E /* Output, y-axis inclination */
-#define ADIS16203_INCL_NULL 0x18 /* Incline null calibration */
-#define ADIS16203_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16203_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16203_ALM_SMPL1 0x24 /* Alarm 1, sample period */
-#define ADIS16203_ALM_SMPL2 0x26 /* Alarm 2, sample period */
-#define ADIS16203_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16203_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16203_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16203_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16203_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16203_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16203_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16203_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16203_GLOB_CMD 0x3E /* Operation, system command register */
+/* Flash memory write count */
+#define ADIS16203_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16203_SUPPLY_OUT 0x02
+
+/* Output, auxiliary ADC input */
+#define ADIS16203_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16203_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16203_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16203_YINCL_OUT 0x0E
+
+/* Incline null calibration */
+#define ADIS16203_INCL_NULL 0x18
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16203_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16203_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16203_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16203_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16203_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16203_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16203_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16203_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16203_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16203_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16203_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16203_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16203_GLOB_CMD 0x3E
/* MSC_CTRL */
-#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9) /* Reverses rotation of both inclination outputs */
-#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
+/* Reverses rotation of both inclination outputs */
+#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9)
+
+/* Self-test enable */
+#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
/* DIAG_STAT */
-#define ADIS16203_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag */
-#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM1 BIT(8)
+
+/* Self-test diagnostic error flag */
+#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
+/* SPI communications failure */
+#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16203_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4)
#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index de5b84ac8..c70671778 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -134,6 +134,7 @@ static const struct adis_data adis16203_data = {
.diag_stat_reg = ADIS16203_DIAG_STAT,
.self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16203_STARTUP_DELAY,
.status_error_msgs = adis16203_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
deleted file mode 100644
index 0b23f0b5c..000000000
--- a/drivers/staging/iio/accel/adis16204.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef SPI_ADIS16204_H_
-#define SPI_ADIS16204_H_
-
-#define ADIS16204_STARTUP_DELAY 220 /* ms */
-
-#define ADIS16204_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16204_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16204_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
-#define ADIS16204_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
-#define ADIS16204_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16204_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16204_X_PEAK_OUT 0x0C /* Twos complement */
-#define ADIS16204_Y_PEAK_OUT 0x0E /* Twos complement */
-#define ADIS16204_XACCL_NULL 0x10 /* Calibration, x-axis acceleration offset null */
-#define ADIS16204_YACCL_NULL 0x12 /* Calibration, y-axis acceleration offset null */
-#define ADIS16204_XACCL_SCALE 0x14 /* X-axis scale factor calibration register */
-#define ADIS16204_YACCL_SCALE 0x16 /* Y-axis scale factor calibration register */
-#define ADIS16204_XY_RSS_OUT 0x18 /* XY combined acceleration (RSS) */
-#define ADIS16204_XY_PEAK_OUT 0x1A /* Peak, XY combined output (RSS) */
-#define ADIS16204_CAP_BUF_1 0x1C /* Capture buffer output register 1 */
-#define ADIS16204_CAP_BUF_2 0x1E /* Capture buffer output register 2 */
-#define ADIS16204_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16204_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16204_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16204_CAPT_PNTR 0x2A /* Capture register address pointer */
-#define ADIS16204_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16204_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16204_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16204_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16204_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16204_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16204_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16204_GLOB_CMD 0x3E /* Operation, system command register */
-
-/* MSC_CTRL */
-#define ADIS16204_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16204_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16204_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16204_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16204_MSC_CTRL_DATA_RDY_DIO2 BIT(0) /* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-
-/* DIAG_STAT */
-#define ADIS16204_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16204_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag: 1 = error condition,
- 0 = normal operation */
-#define ADIS16204_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16204_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16204_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16204_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 2.975 V */
-
-/* GLOB_CMD */
-#define ADIS16204_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16204_GLOB_CMD_CLEAR_STAT BIT(4)
-#define ADIS16204_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16204_ERROR_ACTIVE BIT(14)
-
-enum adis16204_scan {
- ADIS16204_SCAN_ACC_X,
- ADIS16204_SCAN_ACC_Y,
- ADIS16204_SCAN_ACC_XY,
- ADIS16204_SCAN_SUPPLY,
- ADIS16204_SCAN_AUX_ADC,
- ADIS16204_SCAN_TEMP,
-};
-
-#endif /* SPI_ADIS16204_H_ */
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
deleted file mode 100644
index 20a9df64f..000000000
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * ADIS16204 Programmable High-g Digital Impact Sensor and Recorder
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16204.h"
-
-/* Unique to this driver currently */
-
-static const u8 adis16204_addresses[][2] = {
- [ADIS16204_SCAN_ACC_X] = { ADIS16204_XACCL_NULL, ADIS16204_X_PEAK_OUT },
- [ADIS16204_SCAN_ACC_Y] = { ADIS16204_YACCL_NULL, ADIS16204_Y_PEAK_OUT },
- [ADIS16204_SCAN_ACC_XY] = { 0, ADIS16204_XY_PEAK_OUT },
-};
-
-static int adis16204_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
- int addrind;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16204_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.61 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* 0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- switch (chan->channel2) {
- case IIO_MOD_X:
- case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
- *val2 = IIO_G_TO_M_S_2(17125); /* 17.125 mg */
- break;
- case IIO_MOD_Y:
- case IIO_MOD_Z:
- *val2 = IIO_G_TO_M_S_2(8407); /* 8.407 mg */
- break;
- }
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- case IIO_CHAN_INFO_PEAK:
- if (mask == IIO_CHAN_INFO_CALIBBIAS) {
- bits = 12;
- addrind = 0;
- } else { /* PEAK_SEPARATE */
- bits = 14;
- addrind = 1;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16204_addresses[chan->scan_index][addrind];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16204_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16204_addresses[chan->scan_index][1];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16204_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16204_SUPPLY_OUT, ADIS16204_SCAN_SUPPLY, 0, 12),
- ADIS_AUX_ADC_CHAN(ADIS16204_AUX_ADC, ADIS16204_SCAN_AUX_ADC, 0, 12),
- ADIS_TEMP_CHAN(ADIS16204_TEMP_OUT, ADIS16204_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16204_XACCL_OUT, ADIS16204_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16204_YACCL_OUT, ADIS16204_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 14),
- ADIS_ACCEL_CHAN(ROOT_SUM_SQUARED_X_Y, ADIS16204_XY_RSS_OUT,
- ADIS16204_SCAN_ACC_XY, BIT(IIO_CHAN_INFO_PEAK), 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(5),
-};
-
-static const struct iio_info adis16204_info = {
- .read_raw = &adis16204_read_raw,
- .write_raw = &adis16204_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16204_status_error_msgs[] = {
- [ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
- [ADIS16204_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16204_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16204_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16204_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
-};
-
-static const struct adis_data adis16204_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16204_MSC_CTRL,
- .glob_cmd_reg = ADIS16204_GLOB_CMD,
- .diag_stat_reg = ADIS16204_DIAG_STAT,
-
- .self_test_mask = ADIS16204_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16204_STARTUP_DELAY,
-
- .status_error_msgs = adis16204_status_error_msgs,
- .status_error_mask = BIT(ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT) |
- BIT(ADIS16204_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16204_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16204_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16204_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16204_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16204_info;
- indio_dev->channels = adis16204_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16204_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16204_data);
- if (ret)
- return ret;
-
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16204_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16204_driver = {
- .driver = {
- .name = "adis16204",
- },
- .probe = adis16204_probe,
- .remove = adis16204_remove,
-};
-module_spi_driver(adis16204_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16204");
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 813698d18..315f1c0c4 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -5,88 +5,127 @@
/* Flash memory write count */
#define ADIS16209_FLASH_CNT 0x00
+
/* Output, power supply */
#define ADIS16209_SUPPLY_OUT 0x02
+
/* Output, x-axis accelerometer */
#define ADIS16209_XACCL_OUT 0x04
+
/* Output, y-axis accelerometer */
#define ADIS16209_YACCL_OUT 0x06
+
/* Output, auxiliary ADC input */
#define ADIS16209_AUX_ADC 0x08
+
/* Output, temperature */
#define ADIS16209_TEMP_OUT 0x0A
+
/* Output, x-axis inclination */
#define ADIS16209_XINCL_OUT 0x0C
+
/* Output, y-axis inclination */
#define ADIS16209_YINCL_OUT 0x0E
+
/* Output, +/-180 vertical rotational position */
#define ADIS16209_ROT_OUT 0x10
+
/* Calibration, x-axis acceleration offset null */
#define ADIS16209_XACCL_NULL 0x12
+
/* Calibration, y-axis acceleration offset null */
#define ADIS16209_YACCL_NULL 0x14
+
/* Calibration, x-axis inclination offset null */
#define ADIS16209_XINCL_NULL 0x16
+
/* Calibration, y-axis inclination offset null */
#define ADIS16209_YINCL_NULL 0x18
+
/* Calibration, vertical rotation offset null */
#define ADIS16209_ROT_NULL 0x1A
+
/* Alarm 1 amplitude threshold */
#define ADIS16209_ALM_MAG1 0x20
+
/* Alarm 2 amplitude threshold */
#define ADIS16209_ALM_MAG2 0x22
+
/* Alarm 1, sample period */
#define ADIS16209_ALM_SMPL1 0x24
+
/* Alarm 2, sample period */
#define ADIS16209_ALM_SMPL2 0x26
+
/* Alarm control */
#define ADIS16209_ALM_CTRL 0x28
+
/* Auxiliary DAC data */
#define ADIS16209_AUX_DAC 0x30
+
/* General-purpose digital input/output control */
#define ADIS16209_GPIO_CTRL 0x32
+
/* Miscellaneous control */
#define ADIS16209_MSC_CTRL 0x34
+
/* Internal sample period (rate) control */
#define ADIS16209_SMPL_PRD 0x36
+
/* Operation, filter configuration */
#define ADIS16209_AVG_CNT 0x38
+
/* Operation, sleep mode control */
#define ADIS16209_SLP_CNT 0x3A
+
/* Diagnostics, system status register */
#define ADIS16209_DIAG_STAT 0x3C
+
/* Operation, system command register */
#define ADIS16209_GLOB_CMD 0x3E
/* MSC_CTRL */
+
/* Self-test at power-on: 1 = disabled, 0 = enabled */
#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
/* Self-test enable */
#define ADIS16209_MSC_CTRL_SELF_TEST_EN BIT(8)
+
/* Data-ready enable: 1 = enabled, 0 = disabled */
#define ADIS16209_MSC_CTRL_DATA_RDY_EN BIT(2)
+
/* Data-ready polarity: 1 = active high, 0 = active low */
#define ADIS16209_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
/* DIAG_STAT */
+
/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16209_DIAG_STAT_ALARM2 BIT(9)
+
/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16209_DIAG_STAT_ALARM1 BIT(8)
+
/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */
#define ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
/* SPI communications failure */
#define ADIS16209_DIAG_STAT_SPI_FAIL_BIT 3
+
/* Flash update failure */
#define ADIS16209_DIAG_STAT_FLASH_UPT_BIT 2
+
/* Power supply above 3.625 V */
#define ADIS16209_DIAG_STAT_POWER_HIGH_BIT 1
+
/* Power supply below 3.15 V */
#define ADIS16209_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16209_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16209_GLOB_CMD_CLEAR_STAT BIT(4)
#define ADIS16209_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 8b42bf8c3..8dbad5862 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -168,6 +168,7 @@ static const struct adis_data adis16209_data = {
.diag_stat_reg = ADIS16209_DIAG_STAT,
.self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16209_STARTUP_DELAY,
.status_error_msgs = adis16209_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16220.h b/drivers/staging/iio/accel/adis16220.h
deleted file mode 100644
index eab863311..000000000
--- a/drivers/staging/iio/accel/adis16220.h
+++ /dev/null
@@ -1,140 +0,0 @@
-#ifndef SPI_ADIS16220_H_
-#define SPI_ADIS16220_H_
-
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16220_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16220_FLASH_CNT 0x00
-/* Control, acceleration offset adjustment control */
-#define ADIS16220_ACCL_NULL 0x02
-/* Control, AIN1 offset adjustment control */
-#define ADIS16220_AIN1_NULL 0x04
-/* Control, AIN2 offset adjustment control */
-#define ADIS16220_AIN2_NULL 0x06
-/* Output, power supply during capture */
-#define ADIS16220_CAPT_SUPPLY 0x0A
-/* Output, temperature during capture */
-#define ADIS16220_CAPT_TEMP 0x0C
-/* Output, peak acceleration during capture */
-#define ADIS16220_CAPT_PEAKA 0x0E
-/* Output, peak AIN1 level during capture */
-#define ADIS16220_CAPT_PEAK1 0x10
-/* Output, peak AIN2 level during capture */
-#define ADIS16220_CAPT_PEAK2 0x12
-/* Output, capture buffer for acceleration */
-#define ADIS16220_CAPT_BUFA 0x14
-/* Output, capture buffer for AIN1 */
-#define ADIS16220_CAPT_BUF1 0x16
-/* Output, capture buffer for AIN2 */
-#define ADIS16220_CAPT_BUF2 0x18
-/* Control, capture buffer address pointer */
-#define ADIS16220_CAPT_PNTR 0x1A
-/* Control, capture control register */
-#define ADIS16220_CAPT_CTRL 0x1C
-/* Control, capture period (automatic mode) */
-#define ADIS16220_CAPT_PRD 0x1E
-/* Control, Alarm A, acceleration peak threshold */
-#define ADIS16220_ALM_MAGA 0x20
-/* Control, Alarm 1, AIN1 peak threshold */
-#define ADIS16220_ALM_MAG1 0x22
-/* Control, Alarm 2, AIN2 peak threshold */
-#define ADIS16220_ALM_MAG2 0x24
-/* Control, Alarm S, peak threshold */
-#define ADIS16220_ALM_MAGS 0x26
-/* Control, alarm configuration register */
-#define ADIS16220_ALM_CTRL 0x28
-/* Control, general I/O configuration */
-#define ADIS16220_GPIO_CTRL 0x32
-/* Control, self-test control, AIN configuration */
-#define ADIS16220_MSC_CTRL 0x34
-/* Control, digital I/O configuration */
-#define ADIS16220_DIO_CTRL 0x36
-/* Control, filter configuration */
-#define ADIS16220_AVG_CNT 0x38
-/* Status, system status */
-#define ADIS16220_DIAG_STAT 0x3C
-/* Control, system commands */
-#define ADIS16220_GLOB_CMD 0x3E
-/* Status, self-test response */
-#define ADIS16220_ST_DELTA 0x40
-/* Lot Identification Code 1 */
-#define ADIS16220_LOT_ID1 0x52
-/* Lot Identification Code 2 */
-#define ADIS16220_LOT_ID2 0x54
-/* Product identifier; convert to decimal = 16220 */
-#define ADIS16220_PROD_ID 0x56
-/* Serial number */
-#define ADIS16220_SERIAL_NUM 0x58
-
-#define ADIS16220_CAPTURE_SIZE 2048
-
-/* MSC_CTRL */
-#define ADIS16220_MSC_CTRL_SELF_TEST_EN BIT(8)
-#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN1 BIT(1)
-#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN2 BIT(0)
-
-/* DIO_CTRL */
-#define ADIS16220_MSC_CTRL_DIO2_BUSY_IND (BIT(5) | BIT(4))
-#define ADIS16220_MSC_CTRL_DIO1_BUSY_IND (BIT(3) | BIT(2))
-#define ADIS16220_MSC_CTRL_DIO2_ACT_HIGH BIT(1)
-#define ADIS16220_MSC_CTRL_DIO1_ACT_HIGH BIT(0)
-
-/* DIAG_STAT */
-/* AIN2 sample > ALM_MAG2 */
-#define ADIS16220_DIAG_STAT_ALM_MAG2 BIT(14)
-/* AIN1 sample > ALM_MAG1 */
-#define ADIS16220_DIAG_STAT_ALM_MAG1 BIT(13)
-/* Acceleration sample > ALM_MAGA */
-#define ADIS16220_DIAG_STAT_ALM_MAGA BIT(12)
-/* Error condition programmed into ALM_MAGS[11:0] and ALM_CTRL[5:4] is true */
-#define ADIS16220_DIAG_STAT_ALM_MAGS BIT(11)
-/* |Peak value in AIN2 data capture| > ALM_MAG2 */
-#define ADIS16220_DIAG_STAT_PEAK_AIN2 BIT(10)
-/* |Peak value in AIN1 data capture| > ALM_MAG1 */
-#define ADIS16220_DIAG_STAT_PEAK_AIN1 BIT(9)
-/* |Peak value in acceleration data capture| > ALM_MAGA */
-#define ADIS16220_DIAG_STAT_PEAK_ACCEL BIT(8)
-/* Data ready, capture complete */
-#define ADIS16220_DIAG_STAT_DATA_RDY BIT(7)
-#define ADIS16220_DIAG_STAT_FLASH_CHK BIT(6)
-#define ADIS16220_DIAG_STAT_SELF_TEST BIT(5)
-/* Capture period violation/interruption */
-#define ADIS16220_DIAG_STAT_VIOLATION_BIT 4
-/* SPI communications failure */
-#define ADIS16220_DIAG_STAT_SPI_FAIL_BIT 3
-/* Flash update failure */
-#define ADIS16220_DIAG_STAT_FLASH_UPT_BIT 2
-/* Power supply above 3.625 V */
-#define ADIS16220_DIAG_STAT_POWER_HIGH_BIT 1
-/* Power supply below 3.15 V */
-#define ADIS16220_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-#define ADIS16220_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16220_GLOB_CMD_SELF_TEST BIT(2)
-#define ADIS16220_GLOB_CMD_PWR_DOWN BIT(1)
-
-#define ADIS16220_MAX_TX 2048
-#define ADIS16220_MAX_RX 2048
-
-#define ADIS16220_SPI_BURST (u32)(1000 * 1000)
-#define ADIS16220_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct adis16220_state - device instance specific data
- * @adis: adis device
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct adis16220_state {
- struct adis adis;
-
- struct mutex buf_lock;
- u8 tx[ADIS16220_MAX_TX] ____cacheline_aligned;
- u8 rx[ADIS16220_MAX_RX];
-};
-
-#endif /* SPI_ADIS16220_H_ */
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
deleted file mode 100644
index d0165218b..000000000
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * ADIS16220 Programmable Digital Vibration Sensor driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#include "adis16220.h"
-
-static ssize_t adis16220_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16220_state *st = iio_priv(indio_dev);
- ssize_t ret;
- u16 val;
-
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis_read_reg_16(&st->adis, this_attr->address, &val);
- mutex_unlock(&indio_dev->mlock);
- if (ret)
- return ret;
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t adis16220_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct adis16220_state *st = iio_priv(indio_dev);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = adis_write_reg_16(&st->adis, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int adis16220_capture(struct iio_dev *indio_dev)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- int ret;
-
- /* initiates a manual data capture */
- ret = adis_write_reg_16(&st->adis, ADIS16220_GLOB_CMD, 0xBF08);
- if (ret)
- dev_err(&indio_dev->dev, "problem beginning capture");
-
- usleep_range(10000, 11000); /* delay for capture to finish */
-
- return ret;
-}
-
-static ssize_t adis16220_write_capture(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool val;
- int ret;
-
- ret = strtobool(buf, &val);
- if (ret)
- return ret;
- if (!val)
- return -EINVAL;
- ret = adis16220_capture(indio_dev);
- if (ret)
- return ret;
-
- return len;
-}
-
-static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
- char *buf,
- loff_t off,
- size_t count,
- int addr)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .delay_usecs = 25,
- }, {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .cs_change = 1,
- .delay_usecs = 25,
- },
- };
- int ret;
- int i;
-
- if (unlikely(!count))
- return count;
-
- if ((off >= ADIS16220_CAPTURE_SIZE) || (count & 1) || (off & 1))
- return -EINVAL;
-
- if (off + count > ADIS16220_CAPTURE_SIZE)
- count = ADIS16220_CAPTURE_SIZE - off;
-
- /* write the begin position of capture buffer */
- ret = adis_write_reg_16(&st->adis,
- ADIS16220_CAPT_PNTR,
- off > 1);
- if (ret)
- return -EIO;
-
- /* read count/2 values from capture buffer */
- mutex_lock(&st->buf_lock);
-
- for (i = 0; i < count; i += 2) {
- st->tx[i] = ADIS_READ_REG(addr);
- st->tx[i + 1] = 0;
- }
- xfers[1].len = count;
-
- ret = spi_sync_transfer(st->adis.spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- mutex_unlock(&st->buf_lock);
- return -EIO;
- }
-
- memcpy(buf, st->rx, count);
-
- mutex_unlock(&st->buf_lock);
- return count;
-}
-
-static ssize_t adis16220_accel_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf,
- loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUFA);
-}
-
-static struct bin_attribute accel_bin = {
- .attr = {
- .name = "accel_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_accel_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-static ssize_t adis16220_adc1_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUF1);
-}
-
-static struct bin_attribute adc1_bin = {
- .attr = {
- .name = "in0_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_adc1_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-static ssize_t adis16220_adc2_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUF2);
-}
-
-static struct bin_attribute adc2_bin = {
- .attr = {
- .name = "in1_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_adc2_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-#define IIO_DEV_ATTR_CAPTURE(_store) \
- IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0)
-
-static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
-
-#define IIO_DEV_ATTR_CAPTURE_COUNT(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(capture_count, _mode, _show, _store, _addr)
-
-static IIO_DEV_ATTR_CAPTURE_COUNT(S_IWUSR | S_IRUGO,
- adis16220_read_16bit,
- adis16220_write_16bit,
- ADIS16220_CAPT_PNTR);
-
-enum adis16220_channel {
- in_supply, in_1, in_2, accel, temp
-};
-
-struct adis16220_address_spec {
- u8 addr;
- u8 bits;
- bool sign;
-};
-
-/* Address / bits / signed */
-static const struct adis16220_address_spec adis16220_addresses[][3] = {
- [in_supply] = { { ADIS16220_CAPT_SUPPLY, 12, 0 }, },
- [in_1] = { { ADIS16220_CAPT_BUF1, 16, 1 },
- { ADIS16220_AIN1_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAK1, 16, 1 }, },
- [in_2] = { { ADIS16220_CAPT_BUF2, 16, 1 },
- { ADIS16220_AIN2_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAK2, 16, 1 }, },
- [accel] = { { ADIS16220_CAPT_BUFA, 16, 1 },
- { ADIS16220_ACCL_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAKA, 16, 1 }, },
- [temp] = { { ADIS16220_CAPT_TEMP, 12, 0 }, }
-};
-
-static int adis16220_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- const struct adis16220_address_spec *addr;
- int ret = -EINVAL;
- int addrind = 0;
- u16 uval;
- s16 sval;
- u8 bits;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- addrind = 0;
- break;
- case IIO_CHAN_INFO_OFFSET:
- if (chan->type == IIO_TEMP) {
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- }
- addrind = 1;
- break;
- case IIO_CHAN_INFO_PEAK:
- addrind = 2;
- break;
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_TEMP:
- *val = -470; /* -0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val2 = IIO_G_TO_M_S_2(19073); /* 19.073 g */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220700; /* 1.2207 mV */
- } else {
- /* Should really be dependent on VDD */
- *val2 = 305180; /* 305.18 uV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- default:
- return -EINVAL;
- }
- addr = &adis16220_addresses[chan->address][addrind];
- if (addr->sign) {
- ret = adis_read_reg_16(&st->adis, addr->addr, &sval);
- if (ret)
- return ret;
- bits = addr->bits;
- sval &= (1 << bits) - 1;
- sval = (s16)(sval << (16 - bits)) >> (16 - bits);
- *val = sval;
- return IIO_VAL_INT;
- }
- ret = adis_read_reg_16(&st->adis, addr->addr, &uval);
- if (ret)
- return ret;
- bits = addr->bits;
- uval &= (1 << bits) - 1;
- *val = uval;
- return IIO_VAL_INT;
-}
-
-static const struct iio_chan_spec adis16220_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "supply",
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = in_supply,
- }, {
- .type = IIO_ACCEL,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_PEAK),
- .address = accel,
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = temp,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = in_1,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 2,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = in_2,
- }
-};
-
-static struct attribute *adis16220_attributes[] = {
- &iio_dev_attr_capture.dev_attr.attr,
- &iio_dev_attr_capture_count.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16220_attribute_group = {
- .attrs = adis16220_attributes,
-};
-
-static const struct iio_info adis16220_info = {
- .attrs = &adis16220_attribute_group,
- .driver_module = THIS_MODULE,
- .read_raw = &adis16220_read_raw,
-};
-
-static const char * const adis16220_status_error_msgs[] = {
- [ADIS16220_DIAG_STAT_VIOLATION_BIT] = "Capture period violation/interruption",
- [ADIS16220_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16220_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16220_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16220_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16220_data = {
- .read_delay = 35,
- .write_delay = 35,
- .msc_ctrl_reg = ADIS16220_MSC_CTRL,
- .glob_cmd_reg = ADIS16220_GLOB_CMD,
- .diag_stat_reg = ADIS16220_DIAG_STAT,
-
- .self_test_mask = ADIS16220_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16220_STARTUP_DELAY,
-
- .status_error_msgs = adis16220_status_error_msgs,
- .status_error_mask = BIT(ADIS16220_DIAG_STAT_VIOLATION_BIT) |
- BIT(ADIS16220_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16220_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16220_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16220_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16220_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16220_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16220_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adis16220_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16220_channels);
-
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
- if (ret)
- return ret;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
- if (ret)
- goto error_rm_accel_bin;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc2_bin);
- if (ret)
- goto error_rm_adc1_bin;
-
- ret = adis_init(&st->adis, indio_dev, spi, &adis16220_data);
- if (ret)
- goto error_rm_adc2_bin;
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(&st->adis);
- if (ret)
- goto error_rm_adc2_bin;
- return 0;
-
-error_rm_adc2_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
-error_rm_adc1_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
-error_rm_accel_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
- return ret;
-}
-
-static int adis16220_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
-
- return 0;
-}
-
-static struct spi_driver adis16220_driver = {
- .driver = {
- .name = "adis16220",
- },
- .probe = adis16220_probe,
- .remove = adis16220_remove,
-};
-module_spi_driver(adis16220_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16220");
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
index 66b5ad2f4..b2cb37b95 100644
--- a/drivers/staging/iio/accel/adis16240.h
+++ b/drivers/staging/iio/accel/adis16240.h
@@ -5,110 +5,160 @@
/* Flash memory write count */
#define ADIS16240_FLASH_CNT 0x00
+
/* Output, power supply */
#define ADIS16240_SUPPLY_OUT 0x02
+
/* Output, x-axis accelerometer */
#define ADIS16240_XACCL_OUT 0x04
+
/* Output, y-axis accelerometer */
#define ADIS16240_YACCL_OUT 0x06
+
/* Output, z-axis accelerometer */
#define ADIS16240_ZACCL_OUT 0x08
+
/* Output, auxiliary ADC input */
#define ADIS16240_AUX_ADC 0x0A
+
/* Output, temperature */
#define ADIS16240_TEMP_OUT 0x0C
+
/* Output, x-axis acceleration peak */
#define ADIS16240_XPEAK_OUT 0x0E
+
/* Output, y-axis acceleration peak */
#define ADIS16240_YPEAK_OUT 0x10
+
/* Output, z-axis acceleration peak */
#define ADIS16240_ZPEAK_OUT 0x12
+
/* Output, sum-of-squares acceleration peak */
#define ADIS16240_XYZPEAK_OUT 0x14
+
/* Output, Capture Buffer 1, X and Y acceleration */
#define ADIS16240_CAPT_BUF1 0x16
+
/* Output, Capture Buffer 2, Z acceleration */
#define ADIS16240_CAPT_BUF2 0x18
+
/* Diagnostic, error flags */
#define ADIS16240_DIAG_STAT 0x1A
+
/* Diagnostic, event counter */
#define ADIS16240_EVNT_CNTR 0x1C
+
/* Diagnostic, check sum value from firmware test */
#define ADIS16240_CHK_SUM 0x1E
+
/* Calibration, x-axis acceleration offset adjustment */
#define ADIS16240_XACCL_OFF 0x20
+
/* Calibration, y-axis acceleration offset adjustment */
#define ADIS16240_YACCL_OFF 0x22
+
/* Calibration, z-axis acceleration offset adjustment */
#define ADIS16240_ZACCL_OFF 0x24
+
/* Clock, hour and minute */
#define ADIS16240_CLK_TIME 0x2E
+
/* Clock, month and day */
#define ADIS16240_CLK_DATE 0x30
+
/* Clock, year */
#define ADIS16240_CLK_YEAR 0x32
+
/* Wake-up setting, hour and minute */
#define ADIS16240_WAKE_TIME 0x34
+
/* Wake-up setting, month and day */
#define ADIS16240_WAKE_DATE 0x36
+
/* Alarm 1 amplitude threshold */
#define ADIS16240_ALM_MAG1 0x38
+
/* Alarm 2 amplitude threshold */
#define ADIS16240_ALM_MAG2 0x3A
+
/* Alarm control */
#define ADIS16240_ALM_CTRL 0x3C
+
/* Capture, external trigger control */
#define ADIS16240_XTRIG_CTRL 0x3E
+
/* Capture, address pointer */
#define ADIS16240_CAPT_PNTR 0x40
+
/* Capture, configuration and control */
#define ADIS16240_CAPT_CTRL 0x42
+
/* General-purpose digital input/output control */
#define ADIS16240_GPIO_CTRL 0x44
+
/* Miscellaneous control */
#define ADIS16240_MSC_CTRL 0x46
+
/* Internal sample period (rate) control */
#define ADIS16240_SMPL_PRD 0x48
+
/* System command */
#define ADIS16240_GLOB_CMD 0x4A
/* MSC_CTRL */
+
/* Enables sum-of-squares output (XYZPEAK_OUT) */
#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
+
/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
+
/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
+
/* Data-ready enable: 1 = enabled, 0 = disabled */
#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
+
/* Data-ready polarity: 1 = active high, 0 = active low */
#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
/* DIAG_STAT */
+
/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
+
/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
+
/* Capture buffer full: 1 = capture buffer is full */
#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
+
/* Flash test, checksum flag: 1 = mismatch, 0 = match */
#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
+
/* Power-on, self-test flag: 1 = failure, 0 = pass */
#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
+
/* Power-on self-test: 1 = in-progress, 0 = complete */
#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
+
/* SPI communications failure */
#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
+
/* Flash update failure */
#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
+
/* Power supply above 3.625 V */
#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
+
/* Power supply below 3.15 V */
#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16240_GLOB_CMD_RESUME BIT(8)
#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 1b5b685a8..d5b99e610 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -29,13 +29,13 @@
static ssize_t adis16240_spi_read_signed(struct device *dev,
struct device_attribute *attr,
char *buf,
- unsigned bits)
+ unsigned int bits)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct adis *st = iio_priv(indio_dev);
int ret;
s16 val = 0;
- unsigned shift = 16 - bits;
+ unsigned int shift = 16 - bits;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = adis_read_reg_16(st,
@@ -222,6 +222,7 @@ static const struct adis_data adis16240_data = {
.diag_stat_reg = ADIS16240_DIAG_STAT,
.self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16240_STARTUP_DELAY,
.status_error_msgs = adis16240_status_error_msgs,
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index f843f19cf..1cf6b7980 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -35,10 +35,10 @@
#define AD7192_REG_DATA 3 /* Data Register (RO, 24/32-bit) */
#define AD7192_REG_ID 4 /* ID Register (RO, 8-bit) */
#define AD7192_REG_GPOCON 5 /* GPOCON Register (RO, 8-bit) */
-#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit
- * (AD7792)/24-bit (AD7192)) */
-#define AD7192_REG_FULLSALE 7 /* Full-Scale Register
- * (RW, 16-bit (AD7792)/24-bit (AD7192)) */
+#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit */
+ /* (AD7792)/24-bit (AD7192)) */
+#define AD7192_REG_FULLSALE 7 /* Full-Scale Register */
+ /* (RW, 16-bit (AD7792)/24-bit (AD7192)) */
/* Communications Register Bit Designations (AD7192_REG_COMM) */
#define AD7192_COMM_WEN BIT(7) /* Write Enable */
@@ -80,13 +80,13 @@
#define AD7192_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
/* Mode Register: AD7192_MODE_CLKSRC options */
-#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected
- * from MCLK1 to MCLK2 */
+#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected*/
+ /* from MCLK1 to MCLK2 */
#define AD7192_CLK_EXT_MCLK2 1 /* External Clock applied to MCLK2 */
-#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not
- * available at the MCLK2 pin */
-#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available
- * at the MCLK2 pin */
+#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not */
+ /* available at the MCLK2 pin */
+#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available*/
+ /* at the MCLK2 pin */
/* Configuration Register Bit Designations (AD7192_REG_CONF) */
@@ -349,11 +349,9 @@ static ssize_t ad7192_write_frequency(struct device *dev,
if (lval == 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
div = st->mclk / (lval * st->f_order * 1024);
if (div < 1 || div > 1023) {
@@ -366,7 +364,7 @@ static ssize_t ad7192_write_frequency(struct device *dev,
ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
out:
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret ? ret : len;
}
@@ -434,11 +432,9 @@ static ssize_t ad7192_set(struct device *dev,
if (ret < 0)
return ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch ((u32)this_attr->address) {
case AD7192_REG_GPOCON:
@@ -461,7 +457,7 @@ static ssize_t ad7192_set(struct device *dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret ? ret : len;
}
@@ -555,11 +551,9 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
int ret, i;
unsigned int tmp;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -582,7 +576,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 62e5ecacf..a06b46cb8 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -155,7 +155,7 @@ static void ad7280_crc8_build_table(unsigned char *crc_tab)
}
}
-static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
+static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned int val)
{
unsigned char crc;
@@ -165,7 +165,7 @@ static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
return crc ^ (val & 0xFF);
}
-static int ad7280_check_crc(struct ad7280_state *st, unsigned val)
+static int ad7280_check_crc(struct ad7280_state *st, unsigned int val)
{
unsigned char crc = ad7280_calc_crc8(st->crc_tab, val >> 10);
@@ -191,7 +191,7 @@ static void ad7280_delay(struct ad7280_state *st)
usleep_range(250, 500);
}
-static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
+static int __ad7280_read32(struct ad7280_state *st, unsigned int *val)
{
int ret;
struct spi_transfer t = {
@@ -211,10 +211,10 @@ static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
return 0;
}
-static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
- unsigned addr, bool all, unsigned val)
+static int ad7280_write(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr, bool all, unsigned int val)
{
- unsigned reg = devaddr << 27 | addr << 21 |
+ unsigned int reg = devaddr << 27 | addr << 21 |
(val & 0xFF) << 13 | all << 12;
reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
@@ -223,11 +223,11 @@ static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
return spi_write(st->spi, &st->buf[0], 4);
}
-static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
- unsigned addr)
+static int ad7280_read(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr)
{
int ret;
- unsigned tmp;
+ unsigned int tmp;
/* turns off the read operation on all parts */
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
@@ -261,11 +261,11 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
return (tmp >> 13) & 0xFF;
}
-static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
- unsigned addr)
+static int ad7280_read_channel(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr)
{
int ret;
- unsigned tmp;
+ unsigned int tmp;
ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
if (ret)
@@ -299,11 +299,11 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
return (tmp >> 11) & 0xFFF;
}
-static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
- unsigned *array)
+static int ad7280_read_all_channels(struct ad7280_state *st, unsigned int cnt,
+ unsigned int *array)
{
int i, ret;
- unsigned tmp, sum = 0;
+ unsigned int tmp, sum = 0;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
AD7280A_CELL_VOLTAGE_1 << 2);
@@ -338,7 +338,7 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
static int ad7280_chain_setup(struct ad7280_state *st)
{
- unsigned val, n;
+ unsigned int val, n;
int ret;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
@@ -401,7 +401,7 @@ static ssize_t ad7280_store_balance_sw(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
bool readin;
int ret;
- unsigned devaddr, ch;
+ unsigned int devaddr, ch;
ret = strtobool(buf, &readin);
if (ret)
@@ -431,7 +431,7 @@ static ssize_t ad7280_show_balance_timer(struct device *dev,
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- unsigned msecs;
+ unsigned int msecs;
mutex_lock(&indio_dev->mlock);
ret = ad7280_read(st, this_attr->address >> 8,
@@ -602,7 +602,7 @@ static ssize_t ad7280_read_channel_config(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned val;
+ unsigned int val;
switch ((u32)this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
@@ -683,7 +683,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct ad7280_state *st = iio_priv(indio_dev);
- unsigned *channels;
+ unsigned int *channels;
int i, ret;
channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
diff --git a/drivers/staging/iio/adc/ad7280a.h b/drivers/staging/iio/adc/ad7280a.h
index 732347a9b..ccfb90d20 100644
--- a/drivers/staging/iio/adc/ad7280a.h
+++ b/drivers/staging/iio/adc/ad7280a.h
@@ -29,10 +29,10 @@
#define AD7280A_ALERT_REMOVE_AUX4_AUX5 BIT(1)
struct ad7280_platform_data {
- unsigned acquisition_time;
- unsigned conversion_averaging;
- unsigned chain_last_alert_ignore;
- bool thermistor_term_en;
+ unsigned int acquisition_time;
+ unsigned int conversion_averaging;
+ unsigned int chain_last_alert_ignore;
+ bool thermistor_term_en;
};
#endif /* IIO_ADC_AD7280_H_ */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index cca946924..39f50440d 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -28,16 +28,16 @@
*/
struct ad7606_platform_data {
- unsigned default_os;
- unsigned default_range;
- unsigned gpio_convst;
- unsigned gpio_reset;
- unsigned gpio_range;
- unsigned gpio_os0;
- unsigned gpio_os1;
- unsigned gpio_os2;
- unsigned gpio_frstdata;
- unsigned gpio_stby;
+ unsigned int default_os;
+ unsigned int default_range;
+ unsigned int gpio_convst;
+ unsigned int gpio_reset;
+ unsigned int gpio_range;
+ unsigned int gpio_os0;
+ unsigned int gpio_os1;
+ unsigned int gpio_os2;
+ unsigned int gpio_frstdata;
+ unsigned int gpio_stby;
};
/**
@@ -52,7 +52,7 @@ struct ad7606_chip_info {
const char *name;
u16 int_vref_mv;
const struct iio_chan_spec *channels;
- unsigned num_channels;
+ unsigned int num_channels;
};
/**
@@ -67,8 +67,8 @@ struct ad7606_state {
struct work_struct poll_work;
wait_queue_head_t wq_data_avail;
const struct ad7606_bus_ops *bops;
- unsigned range;
- unsigned oversampling;
+ unsigned int range;
+ unsigned int oversampling;
bool done;
void __iomem *base_address;
@@ -86,7 +86,7 @@ struct ad7606_bus_ops {
};
struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address, unsigned id,
+ void __iomem *base_address, unsigned int id,
const struct ad7606_bus_ops *bops);
int ad7606_remove(struct iio_dev *indio_dev, int irq);
int ad7606_reset(struct ad7606_state *st);
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index fe6caeee0..f79ee6185 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -36,7 +36,7 @@ int ad7606_reset(struct ad7606_state *st)
return -ENODEV;
}
-static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned ch)
+static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
{
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
@@ -88,12 +88,12 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7606_scan_direct(indio_dev, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad7606_scan_direct(indio_dev, chan->address);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -155,7 +155,7 @@ static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
return sprintf(buf, "%u\n", st->oversampling);
}
-static int ad7606_oversampling_get_index(unsigned val)
+static int ad7606_oversampling_get_index(unsigned int val)
{
unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
int i;
@@ -446,7 +446,7 @@ static const struct iio_info ad7606_info_range = {
struct iio_dev *ad7606_probe(struct device *dev, int irq,
void __iomem *base_address,
- unsigned id,
+ unsigned int id,
const struct ad7606_bus_ops *bops)
{
struct ad7606_platform_data *pdata = dev->platform_data;
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index d873a5164..9587fa86d 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -22,6 +22,7 @@ static int ad7606_spi_read_block(struct device *dev,
struct spi_device *spi = to_spi_device(dev);
int i, ret;
unsigned short *data = buf;
+ __be16 *bdata = buf;
ret = spi_read(spi, buf, count * 2);
if (ret < 0) {
@@ -30,7 +31,7 @@ static int ad7606_spi_read_block(struct device *dev,
}
for (i = 0; i < count; i++)
- data[i] = be16_to_cpu(data[i]);
+ data[i] = be16_to_cpu(bdata[i]);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 1439cfdbb..c9a0c2aa6 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -63,7 +63,7 @@ static int ad7780_set_mode(struct ad_sigma_delta *sigma_delta,
enum ad_sigma_delta_mode mode)
{
struct ad7780_state *st = ad_sigma_delta_to_ad7780(sigma_delta);
- unsigned val;
+ unsigned int val;
switch (mode) {
case AD_SD_MODE_SINGLE:
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 18b27a198..358400b22 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -31,7 +31,7 @@ static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
}
static int ad9832_write_frequency(struct ad9832_state *st,
- unsigned addr, unsigned long fout)
+ unsigned int addr, unsigned long fout)
{
unsigned long regval;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index d1218d896..170ac980a 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -12,20 +12,16 @@
#include <linux/sysfs.h>
#include <linux/i2c.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <asm/div64.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/kfifo_buf.h>
-#include "ad5933.h"
-
/* AD5933/AD5934 Registers */
#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 2 bytes */
#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 2 bytes */
@@ -86,6 +82,18 @@
#define AD5933_POLL_TIME_ms 10
#define AD5933_INIT_EXCITATION_TIME_ms 100
+/**
+ * struct ad5933_platform_data - platform specific data
+ * @ext_clk_Hz: the external clock frequency in Hz, if not set
+ * the driver uses the internal clock (16.776 MHz)
+ * @vref_mv: the external reference voltage in millivolt
+ */
+
+struct ad5933_platform_data {
+ unsigned long ext_clk_Hz;
+ unsigned short vref_mv;
+};
+
struct ad5933_state {
struct i2c_client *client;
struct regulator *reg;
@@ -93,14 +101,14 @@ struct ad5933_state {
unsigned long mclk_hz;
unsigned char ctrl_hb;
unsigned char ctrl_lb;
- unsigned range_avail[4];
+ unsigned int range_avail[4];
unsigned short vref_mv;
unsigned short settling_cycles;
unsigned short freq_points;
- unsigned freq_start;
- unsigned freq_inc;
- unsigned state;
- unsigned poll_time_jiffies;
+ unsigned int freq_start;
+ unsigned int freq_inc;
+ unsigned int state;
+ unsigned int poll_time_jiffies;
};
static struct ad5933_platform_data ad5933_default_pdata = {
@@ -214,7 +222,7 @@ static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event)
}
static int ad5933_set_freq(struct ad5933_state *st,
- unsigned reg, unsigned long freq)
+ unsigned int reg, unsigned long freq)
{
unsigned long long freqreg;
union {
@@ -274,7 +282,7 @@ static int ad5933_setup(struct ad5933_state *st)
static void ad5933_calc_out_ranges(struct ad5933_state *st)
{
int i;
- unsigned normalized_3v3[4] = {1980, 198, 383, 970};
+ unsigned int normalized_3v3[4] = {1980, 198, 383, 970};
for (i = 0; i < 4; i++)
st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300;
@@ -307,10 +315,10 @@ static ssize_t ad5933_show_frequency(struct device *dev,
freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
- freqreg = (u64) freqreg * (u64) (st->mclk_hz / 4);
+ freqreg = (u64)freqreg * (u64)(st->mclk_hz / 4);
do_div(freqreg, 1 << 27);
- return sprintf(buf, "%d\n", (int) freqreg);
+ return sprintf(buf, "%d\n", (int)freqreg);
}
static ssize_t ad5933_store_frequency(struct device *dev,
@@ -358,7 +366,7 @@ static ssize_t ad5933_show(struct device *dev,
int ret = 0, len = 0;
mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
+ switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%u\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
@@ -409,7 +417,7 @@ static ssize_t ad5933_store(struct device *dev,
}
mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
+ switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
@@ -436,10 +444,10 @@ static ssize_t ad5933_store(struct device *dev,
st->settling_cycles = val;
/* 2x, 4x handling, see datasheet */
- if (val > 511)
- val = (val >> 1) | (1 << 9);
- else if (val > 1022)
+ if (val > 1022)
val = (val >> 2) | (3 << 9);
+ else if (val > 511)
+ val = (val >> 1) | (1 << 9);
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client,
@@ -683,8 +691,9 @@ static void ad5933_work(struct work_struct *work)
}
if (status & AD5933_STAT_SWEEP_DONE) {
- /* last sample received - power down do nothing until
- * the ring enable is toggled */
+ /* last sample received - power down do
+ * nothing until the ring enable is toggled
+ */
ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
} else {
/* we just received a valid datum, move on to the next */
@@ -699,7 +708,7 @@ static int ad5933_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret, voltage_uv = 0;
- struct ad5933_platform_data *pdata = client->dev.platform_data;
+ struct ad5933_platform_data *pdata = dev_get_platdata(&client->dev);
struct ad5933_state *st;
struct iio_dev *indio_dev;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.h b/drivers/staging/iio/impedance-analyzer/ad5933.h
deleted file mode 100644
index b140e42d6..000000000
--- a/drivers/staging/iio/impedance-analyzer/ad5933.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * AD5933 AD5934 Impedance Converter, Network Analyzer
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef IIO_ADC_AD5933_H_
-#define IIO_ADC_AD5933_H_
-
-/*
- * TODO: struct ad5933_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad5933_platform_data - platform specific data
- * @ext_clk_Hz: the external clock frequency in Hz, if not set
- * the driver uses the internal clock (16.776 MHz)
- * @vref_mv: the external reference voltage in millivolt
- */
-
-struct ad5933_platform_data {
- unsigned long ext_clk_Hz;
- unsigned short vref_mv;
-};
-
-#endif /* IIO_ADC_AD5933_H_ */
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 6e2ba458c..2e3b1d64e 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -69,7 +69,6 @@ enum als_ir_mode {
};
struct isl29028_chip {
- struct device *dev;
struct mutex lock;
struct regmap *regmap;
@@ -166,20 +165,21 @@ static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
{
+ struct device *dev = regmap_get_device(chip->regmap);
unsigned int lsb;
unsigned int msb;
int ret;
ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_L, &lsb);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in reading register ALSIR_L err %d\n", ret);
return ret;
}
ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_U, &msb);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in reading register ALSIR_U err %d\n", ret);
return ret;
}
@@ -190,12 +190,13 @@ static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox)
{
+ struct device *dev = regmap_get_device(chip->regmap);
unsigned int data;
int ret;
ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data);
if (ret < 0) {
- dev_err(chip->dev, "Error in reading register %d, error %d\n",
+ dev_err(dev, "Error in reading register %d, error %d\n",
ISL29028_REG_PROX_DATA, ret);
return ret;
}
@@ -218,13 +219,14 @@ static int isl29028_proxim_get(struct isl29028_chip *chip, int *prox_data)
static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
int als_ir_data;
if (chip->als_ir_mode != MODE_ALS) {
ret = isl29028_set_als_ir_mode(chip, MODE_ALS);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in enabling ALS mode err %d\n", ret);
return ret;
}
@@ -251,12 +253,13 @@ static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
if (chip->als_ir_mode != MODE_IR) {
ret = isl29028_set_als_ir_mode(chip, MODE_IR);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in enabling IR mode err %d\n", ret);
return ret;
}
@@ -271,25 +274,26 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct isl29028_chip *chip = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(chip->regmap);
int ret = -EINVAL;
mutex_lock(&chip->lock);
switch (chan->type) {
case IIO_PROXIMITY:
if (mask != IIO_CHAN_INFO_SAMP_FREQ) {
- dev_err(chip->dev,
+ dev_err(dev,
"proximity: mask value 0x%08lx not supported\n",
mask);
break;
}
if (val < 1 || val > 100) {
- dev_err(chip->dev,
+ dev_err(dev,
"Samp_freq %d is not in range[1:100]\n", val);
break;
}
ret = isl29028_set_proxim_sampling(chip, val);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Setting proximity samp_freq fail, err %d\n",
ret);
break;
@@ -299,19 +303,19 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
case IIO_LIGHT:
if (mask != IIO_CHAN_INFO_SCALE) {
- dev_err(chip->dev,
+ dev_err(dev,
"light: mask value 0x%08lx not supported\n",
mask);
break;
}
if ((val != 125) && (val != 2000)) {
- dev_err(chip->dev,
+ dev_err(dev,
"lux scale %d is invalid [125, 2000]\n", val);
break;
}
ret = isl29028_set_als_scale(chip, val);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Setting lux scale fail with error %d\n", ret);
break;
}
@@ -319,7 +323,7 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
break;
default:
- dev_err(chip->dev, "Unsupported channel type\n");
+ dev_err(dev, "Unsupported channel type\n");
break;
}
mutex_unlock(&chip->lock);
@@ -331,6 +335,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct isl29028_chip *chip = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(chip->regmap);
int ret = -EINVAL;
mutex_lock(&chip->lock);
@@ -370,7 +375,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
break;
default:
- dev_err(chip->dev, "mask value 0x%08lx not supported\n", mask);
+ dev_err(dev, "mask value 0x%08lx not supported\n", mask);
break;
}
mutex_unlock(&chip->lock);
@@ -417,6 +422,7 @@ static const struct iio_info isl29028_info = {
static int isl29028_chip_init(struct isl29028_chip *chip)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
chip->enable_prox = false;
@@ -426,35 +432,33 @@ static int isl29028_chip_init(struct isl29028_chip *chip)
ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_TEST1_MODE, ret);
return ret;
}
ret = regmap_write(chip->regmap, ISL29028_REG_TEST2_MODE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_TEST2_MODE, ret);
return ret;
}
ret = regmap_write(chip->regmap, ISL29028_REG_CONFIGURE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_CONFIGURE, ret);
return ret;
}
ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling);
if (ret < 0) {
- dev_err(chip->dev, "setting the proximity, err = %d\n",
- ret);
+ dev_err(dev, "setting the proximity, err = %d\n", ret);
return ret;
}
ret = isl29028_set_als_scale(chip, chip->lux_scale);
if (ret < 0)
- dev_err(chip->dev,
- "setting als scale failed, err = %d\n", ret);
+ dev_err(dev, "setting als scale failed, err = %d\n", ret);
return ret;
}
@@ -496,19 +500,19 @@ static int isl29028_probe(struct i2c_client *client,
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- chip->dev = &client->dev;
mutex_init(&chip->lock);
chip->regmap = devm_regmap_init_i2c(client, &isl29028_regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
- dev_err(chip->dev, "regmap initialization failed: %d\n", ret);
+ dev_err(&client->dev, "regmap initialization failed: %d\n",
+ ret);
return ret;
}
ret = isl29028_chip_init(chip);
if (ret < 0) {
- dev_err(chip->dev, "chip initialization failed: %d\n", ret);
+ dev_err(&client->dev, "chip initialization failed: %d\n", ret);
return ret;
}
@@ -520,7 +524,8 @@ static int isl29028_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
if (ret < 0) {
- dev_err(chip->dev, "iio registration fails with error %d\n",
+ dev_err(&client->dev,
+ "iio registration fails with error %d\n",
ret);
return ret;
}
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 5f308bae4..d553c8e18 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -187,9 +187,11 @@ struct tsl2X7X_chip {
const struct tsl2x7x_chip_info *chip_info;
const struct iio_info *info;
s64 event_timestamp;
- /* This structure is intentionally large to accommodate
- * updates via sysfs. */
- /* Sized to 9 = max 8 segments + 1 termination segment */
+ /*
+ * This structure is intentionally large to accommodate
+ * updates via sysfs.
+ * Sized to 9 = max 8 segments + 1 termination segment
+ */
struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE];
};
@@ -349,13 +351,13 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) {
/* device is not enabled */
dev_err(&chip->client->dev, "%s: device is not enabled\n",
- __func__);
+ __func__);
ret = -EBUSY;
goto out_unlock;
}
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
+ (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: Failed to read STATUS Reg\n", __func__);
@@ -371,8 +373,8 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
for (i = 0; i < 4; i++) {
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i)),
- &buf[i]);
+ (TSL2X7X_CMD_REG |
+ (TSL2X7X_ALS_CHAN0LO + i)), &buf[i]);
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to read. err=%x\n", ret);
@@ -382,9 +384,9 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* clear any existing interrupt status */
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG |
- TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_ALS_INT_CLR));
+ (TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_ALS_INT_CLR));
if (ret < 0) {
dev_err(&chip->client->dev,
"i2c_write_command failed - err = %d\n", ret);
@@ -411,7 +413,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* calculate ratio */
ratio = (ch1 << 15) / ch0;
/* convert to unscaled lux using the pointer to the table */
- p = (struct tsl2x7x_lux *) chip->tsl2x7x_device_lux;
+ p = (struct tsl2x7x_lux *)chip->tsl2x7x_device_lux;
while (p->ratio != 0 && p->ratio < ratio)
p++;
@@ -488,7 +490,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
}
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
+ (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
if (ret < 0) {
dev_err(&chip->client->dev, "i2c err=%d\n", ret);
goto prox_poll_err;
@@ -515,8 +517,8 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
for (i = 0; i < 2; i++) {
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG |
- (TSL2X7X_PRX_LO + i)), &chdata[i]);
+ (TSL2X7X_CMD_REG |
+ (TSL2X7X_PRX_LO + i)), &chdata[i]);
if (ret < 0)
goto prox_poll_err;
}
@@ -542,19 +544,19 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
{
/* If Operational settings defined elsewhere.. */
if (chip->pdata && chip->pdata->platform_default_settings)
- memcpy(&(chip->tsl2x7x_settings),
- chip->pdata->platform_default_settings,
- sizeof(tsl2x7x_default_settings));
+ memcpy(&chip->tsl2x7x_settings,
+ chip->pdata->platform_default_settings,
+ sizeof(tsl2x7x_default_settings));
else
- memcpy(&(chip->tsl2x7x_settings),
- &tsl2x7x_default_settings,
- sizeof(tsl2x7x_default_settings));
+ memcpy(&chip->tsl2x7x_settings,
+ &tsl2x7x_default_settings,
+ sizeof(tsl2x7x_default_settings));
/* Load up the proper lux table. */
if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0)
memcpy(chip->tsl2x7x_device_lux,
- chip->pdata->platform_lux_table,
- sizeof(chip->pdata->platform_lux_table));
+ chip->pdata->platform_lux_table,
+ sizeof(chip->pdata->platform_lux_table));
else
memcpy(chip->tsl2x7x_device_lux,
(struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
@@ -576,7 +578,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
int lux_val;
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to write CNTRL register, ret=%d\n", ret);
@@ -592,7 +594,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
}
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to write ctrl reg: ret=%d\n", ret);
@@ -609,7 +611,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
lux_val = tsl2x7x_get_lux(indio_dev);
if (lux_val < 0) {
dev_err(&chip->client->dev,
- "%s: failed to get lux\n", __func__);
+ "%s: failed to get lux\n", __func__);
return lux_val;
}
@@ -620,9 +622,9 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
dev_info(&chip->client->dev,
- "%s als_calibrate completed\n", chip->client->name);
+ "%s als_calibrate completed\n", chip->client->name);
- return (int) gain_trim_val;
+ return (int)gain_trim_val;
}
static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
@@ -695,23 +697,28 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
chip->als_saturation = als_count * 922; /* 90% of full scale */
chip->als_time_scale = (als_time + 25) / 50;
- /* TSL2X7X Specific power-on / adc enable sequence
- * Power on the device 1st. */
+ /*
+ * TSL2X7X Specific power-on / adc enable sequence
+ * Power on the device 1st.
+ */
utmp = TSL2X7X_CNTL_PWR_ON;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on CNTRL reg.\n", __func__);
return ret;
}
- /* Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers. */
+ /*
+ * Use the following shadow copy for our delay before enabling ADC.
+ * Write all the registers.
+ */
for (i = 0, dev_reg = chip->tsl2x7x_config;
i < TSL2X7X_MAX_CONFIG_REG; i++) {
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG + i, *dev_reg++);
+ TSL2X7X_CMD_REG + i,
+ *dev_reg++);
if (ret < 0) {
dev_err(&chip->client->dev,
"failed on write to reg %d.\n", i);
@@ -721,13 +728,15 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
mdelay(3); /* Power-on settling time */
- /* NOW enable the ADC
- * initialize the desired mode of operation */
+ /*
+ * NOW enable the ADC
+ * initialize the desired mode of operation
+ */
utmp = TSL2X7X_CNTL_PWR_ON |
TSL2X7X_CNTL_ADC_ENBL |
TSL2X7X_CNTL_PROX_DET_ENBL;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on 2nd CTRL reg.\n", __func__);
@@ -741,12 +750,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
- (chip->tsl2x7x_settings.interrupts_en == 0x30))
+ (chip->tsl2x7x_settings.interrupts_en == 0x30))
reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
reg_val |= chip->tsl2x7x_settings.interrupts_en;
ret = i2c_smbus_write_byte_data(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL), reg_val);
+ (TSL2X7X_CMD_REG |
+ TSL2X7X_CNTRL), reg_val);
if (ret < 0)
dev_err(&chip->client->dev,
"%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
@@ -754,8 +764,9 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
/* Clear out any initial interrupts */
ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_PROXALS_INT_CLR);
+ TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_PROXALS_INT_CLR);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: Failed to clear Int status\n",
@@ -776,7 +787,7 @@ static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
if (chip->pdata && chip->pdata->power_off)
chip->pdata->power_off(chip->client);
@@ -819,7 +830,7 @@ int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
static
void tsl2x7x_prox_calculate(int *data, int length,
- struct tsl2x7x_prox_stat *statP)
+ struct tsl2x7x_prox_stat *statP)
{
int i;
int sample_sum;
@@ -843,7 +854,7 @@ void tsl2x7x_prox_calculate(int *data, int length,
tmp = data[i] - statP->mean;
sample_sum += tmp * tmp;
}
- statP->stddev = int_sqrt((long)sample_sum)/length;
+ statP->stddev = int_sqrt((long)sample_sum) / length;
}
/**
@@ -886,20 +897,21 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
tsl2x7x_get_prox(indio_dev);
prox_history[i] = chip->prox_data;
dev_info(&chip->client->dev, "2 i=%d prox data= %d\n",
- i, chip->prox_data);
+ i, chip->prox_data);
}
tsl2x7x_chip_off(indio_dev);
calP = &prox_stat_data[PROX_STAT_CAL];
tsl2x7x_prox_calculate(prox_history,
- chip->tsl2x7x_settings.prox_max_samples_cal, calP);
+ chip->tsl2x7x_settings.prox_max_samples_cal,
+ calP);
chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
- calP->min, calP->mean, calP->max);
+ calP->min, calP->mean, calP->max);
dev_info(&chip->client->dev,
- "%s proximity threshold set to %d\n",
- chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
+ "%s proximity threshold set to %d\n",
+ chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
/* back to the way they were */
chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
@@ -908,7 +920,8 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
}
static ssize_t tsl2x7x_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -916,7 +929,8 @@ static ssize_t tsl2x7x_power_state_show(struct device *dev,
}
static ssize_t tsl2x7x_power_state_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -933,7 +947,8 @@ static ssize_t tsl2x7x_power_state_store(struct device *dev,
}
static ssize_t tsl2x7x_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -950,13 +965,15 @@ static ssize_t tsl2x7x_gain_available_show(struct device *dev,
}
static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
}
static ssize_t tsl2x7x_als_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z;
@@ -970,7 +987,8 @@ static ssize_t tsl2x7x_als_time_show(struct device *dev,
}
static ssize_t tsl2x7x_als_time_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -986,7 +1004,7 @@ static ssize_t tsl2x7x_als_time_store(struct device *dev,
TSL2X7X_MAX_TIMER_CNT - (u8)result.fract;
dev_info(&chip->client->dev, "%s: als time = %d",
- __func__, chip->tsl2x7x_settings.als_time);
+ __func__, chip->tsl2x7x_settings.als_time);
tsl2x7x_invoke_change(indio_dev);
@@ -997,7 +1015,8 @@ static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
".00272 - .696");
static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -1006,7 +1025,8 @@ static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
}
static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1025,7 +1045,8 @@ static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
/* persistence settings */
static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z, filter_delay;
@@ -1041,7 +1062,8 @@ static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
}
static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1063,7 +1085,7 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
dev_info(&chip->client->dev, "%s: als persistence = %d",
- __func__, filter_delay);
+ __func__, filter_delay);
tsl2x7x_invoke_change(indio_dev);
@@ -1071,7 +1093,8 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
}
static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z, filter_delay;
@@ -1087,7 +1110,8 @@ static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
}
static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1109,7 +1133,7 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
dev_info(&chip->client->dev, "%s: prox persistence = %d",
- __func__, filter_delay);
+ __func__, filter_delay);
tsl2x7x_invoke_change(indio_dev);
@@ -1117,7 +1141,8 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
}
static ssize_t tsl2x7x_do_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1134,7 +1159,8 @@ static ssize_t tsl2x7x_do_calibrate(struct device *dev,
}
static ssize_t tsl2x7x_luxtable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int i = 0;
@@ -1146,8 +1172,10 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
chip->tsl2x7x_device_lux[i].ch0,
chip->tsl2x7x_device_lux[i].ch1);
if (chip->tsl2x7x_device_lux[i].ratio == 0) {
- /* We just printed the first "0" entry.
- * Now get rid of the extra "," and break. */
+ /*
+ * We just printed the first "0" entry.
+ * Now get rid of the extra "," and break.
+ */
offset--;
break;
}
@@ -1159,11 +1187,12 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
}
static ssize_t tsl2x7x_luxtable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(chip->tsl2x7x_device_lux)*3 + 1];
+ int value[ARRAY_SIZE(chip->tsl2x7x_device_lux) * 3 + 1];
int n;
get_options(buf, ARRAY_SIZE(value), value);
@@ -1175,7 +1204,7 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
*/
n = value[0];
if ((n % 3) || n < 6 ||
- n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
+ n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
return -EINVAL;
}
@@ -1198,7 +1227,8 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
}
static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1391,10 +1421,10 @@ static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
}
static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1529,7 +1559,7 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
u8 value;
value = i2c_smbus_read_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_STATUS);
+ TSL2X7X_CMD_REG | TSL2X7X_STATUS);
/* What type of interrupt do we need to process */
if (value & TSL2X7X_STA_PRX_INTR) {
@@ -1545,16 +1575,16 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
if (value & TSL2X7X_STA_ALS_INTR) {
tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */
iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_EITHER),
- timestamp);
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
}
/* Clear interrupt now that we have handled it. */
ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_PROXALS_INT_CLR);
+ TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_PROXALS_INT_CLR);
if (ret < 0)
dev_err(&chip->client->dev,
"Failed to clear irq from event handler. err = %d\n",
@@ -1616,6 +1646,7 @@ static struct attribute *tsl2X7X_ALS_event_attrs[] = {
&dev_attr_in_intensity0_thresh_period.attr,
NULL,
};
+
static struct attribute *tsl2X7X_PRX_event_attrs[] = {
&dev_attr_in_proximity0_thresh_period.attr,
NULL,
@@ -1857,7 +1888,7 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
};
static int tsl2x7x_probe(struct i2c_client *clientp,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
int ret;
unsigned char device_id;
@@ -1873,14 +1904,14 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
i2c_set_clientdata(clientp, indio_dev);
ret = tsl2x7x_i2c_read(chip->client,
- TSL2X7X_CHIPID, &device_id);
+ TSL2X7X_CHIPID, &device_id);
if (ret < 0)
return ret;
if ((!tsl2x7x_device_id(&device_id, id->driver_data)) ||
- (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
+ (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
dev_info(&chip->client->dev,
- "%s: i2c device found does not match expected id\n",
+ "%s: i2c device found does not match expected id\n",
__func__);
return -EINVAL;
}
@@ -1892,8 +1923,10 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
return ret;
}
- /* ALS and PROX functions can be invoked via user space poll
- * or H/W interrupt. If busy return last sample. */
+ /*
+ * ALS and PROX functions can be invoked via user space poll
+ * or H/W interrupt. If busy return last sample.
+ */
mutex_init(&chip->als_mutex);
mutex_init(&chip->prox_mutex);
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 69287108f..4b5f05fda 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -333,7 +333,8 @@ static int ade7753_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(3); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(3);
@@ -528,7 +529,6 @@ static int ade7753_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-/* fixme, confirm ordering in this function */
static int ade7753_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index f4188e17d..c46bef641 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -351,7 +351,8 @@ static int ade7754_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(14); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(14);
@@ -558,7 +559,6 @@ powerdown_on_error:
return ret;
}
-/* fixme, confirm ordering in this function */
static int ade7754_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
index f6739e2c2..1d04ec952 100644
--- a/drivers/staging/iio/meter/ade7758.h
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -129,6 +129,7 @@ struct ade7758_state {
unsigned char tx_buf[8];
};
+
#ifdef CONFIG_IIO_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
@@ -138,25 +139,22 @@ void ade7758_remove_trigger(struct iio_dev *indio_dev);
int ade7758_probe_trigger(struct iio_dev *indio_dev);
ssize_t ade7758_read_data_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-
+ struct device_attribute *attr, char *buf);
int ade7758_configure_ring(struct iio_dev *indio_dev);
void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
int ade7758_set_irq(struct device *dev, bool enable);
-int ade7758_spi_write_reg_8(struct device *dev,
- u8 reg_address, u8 val);
-int ade7758_spi_read_reg_8(struct device *dev,
- u8 reg_address, u8 *val);
+int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val);
+int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val);
#else /* CONFIG_IIO_BUFFER */
static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
{
}
+
static inline int ade7758_probe_trigger(struct iio_dev *indio_dev)
{
return 0;
@@ -166,16 +164,20 @@ static int ade7758_configure_ring(struct iio_dev *indio_dev)
{
return 0;
}
+
static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
{
}
+
static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
{
return 0;
}
+
static inline void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
{
}
+
#endif /* CONFIG_IIO_BUFFER */
#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 40f5afaa9..ebb8a1993 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -24,9 +24,7 @@
#include "meter.h"
#include "ade7758.h"
-int ade7758_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
+int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -42,9 +40,8 @@ int ade7758_spi_write_reg_8(struct device *dev,
return ret;
}
-static int ade7758_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
+static int ade7758_spi_write_reg_16(struct device *dev, u8 reg_address,
+ u16 value)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -68,9 +65,8 @@ static int ade7758_spi_write_reg_16(struct device *dev,
return ret;
}
-static int ade7758_spi_write_reg_24(struct device *dev,
- u8 reg_address,
- u32 value)
+static int ade7758_spi_write_reg_24(struct device *dev, u8 reg_address,
+ u32 value)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -95,9 +91,7 @@ static int ade7758_spi_write_reg_24(struct device *dev,
return ret;
}
-int ade7758_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
+int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -124,7 +118,7 @@ int ade7758_spi_read_reg_8(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
*val = st->rx[0];
@@ -134,9 +128,8 @@ error_ret:
return ret;
}
-static int ade7758_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
+static int ade7758_spi_read_reg_16(struct device *dev, u8 reg_address,
+ u16 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -156,7 +149,6 @@ static int ade7758_spi_read_reg_16(struct device *dev,
},
};
-
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7758_READ_REG(reg_address);
st->tx[1] = 0;
@@ -165,7 +157,7 @@ static int ade7758_spi_read_reg_16(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
@@ -176,9 +168,8 @@ error_ret:
return ret;
}
-static int ade7758_spi_read_reg_24(struct device *dev,
- u8 reg_address,
- u32 *val)
+static int ade7758_spi_read_reg_24(struct device *dev, u8 reg_address,
+ u32 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -207,7 +198,7 @@ static int ade7758_spi_read_reg_24(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
*val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
@@ -218,8 +209,7 @@ error_ret:
}
static ssize_t ade7758_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 val = 0;
@@ -233,8 +223,7 @@ static ssize_t ade7758_read_8bit(struct device *dev,
}
static ssize_t ade7758_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u16 val = 0;
@@ -248,8 +237,7 @@ static ssize_t ade7758_read_16bit(struct device *dev,
}
static ssize_t ade7758_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u32 val = 0;
@@ -263,9 +251,8 @@ static ssize_t ade7758_read_24bit(struct device *dev,
}
static ssize_t ade7758_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -281,9 +268,8 @@ error_ret:
}
static ssize_t ade7758_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -427,7 +413,8 @@ int ade7758_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(16); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(16);
@@ -479,16 +466,13 @@ err_ret:
}
static ssize_t ade7758_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 t;
int sps;
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &t);
+ ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
if (ret)
return ret;
@@ -499,9 +483,8 @@ static ssize_t ade7758_read_frequency(struct device *dev,
}
static ssize_t ade7758_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
u16 val;
@@ -532,18 +515,14 @@ static ssize_t ade7758_write_frequency(struct device *dev,
goto out;
}
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &reg);
+ ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &reg);
if (ret)
goto out;
reg &= ~(5 << 3);
reg |= t << 5;
- ret = ade7758_spi_write_reg_8(dev,
- ADE7758_WAVMODE,
- reg);
+ ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
out:
mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 9a24e0226..a6b76d4b1 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -33,7 +33,7 @@ static int ade7758_spi_read_burst(struct iio_dev *indio_dev)
return ret;
}
-static int ade7758_write_waveform_type(struct device *dev, unsigned type)
+static int ade7758_write_waveform_type(struct device *dev, unsigned int type)
{
int ret;
u8 reg;
@@ -85,7 +85,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
**/
static int ade7758_ring_preenable(struct iio_dev *indio_dev)
{
- unsigned channel;
+ unsigned int channel;
if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 684e612a8..80144d40d 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -289,7 +289,8 @@ static int ade7759_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(3); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(3);
@@ -476,7 +477,6 @@ static int ade7759_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-/* fixme, confirm ordering in this function */
static int ade7759_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 9e439af71..75e8685e6 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -421,7 +421,8 @@ static int ade7854_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(17); /* 1: interrupt enabled when all periodical
- (at 8 kHz rate) DSP computations finish. */
+ * (at 8 kHz rate) DSP computations finish.
+ */
else
irqen &= ~BIT(17);
diff --git a/drivers/staging/iio/resolver/ad2s1210.h b/drivers/staging/iio/resolver/ad2s1210.h
index c7158f6e6..e9b214770 100644
--- a/drivers/staging/iio/resolver/ad2s1210.h
+++ b/drivers/staging/iio/resolver/ad2s1210.h
@@ -12,9 +12,9 @@
#define _AD2S1210_H
struct ad2s1210_platform_data {
- unsigned sample;
- unsigned a[2];
- unsigned res[2];
- bool gpioin;
+ unsigned int sample;
+ unsigned int a[2];
+ unsigned int res[2];
+ bool gpioin;
};
#endif /* _AD2S1210_H */
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 035dd456d..38dca69a0 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -55,12 +55,12 @@ static struct bfin_timer iio_bfin_timer_code[MAX_BLACKFIN_GPTIMERS] = {
};
struct bfin_tmr_state {
- struct iio_trigger *trig;
- struct bfin_timer *t;
- unsigned timer_num;
- bool output_enable;
- unsigned int duty;
- int irq;
+ struct iio_trigger *trig;
+ struct bfin_timer *t;
+ unsigned int timer_num;
+ bool output_enable;
+ unsigned int duty;
+ int irq;
};
static int iio_bfin_tmr_set_state(struct iio_trigger *trig, bool state)
@@ -178,7 +178,7 @@ static const struct iio_trigger_ops iio_bfin_tmr_trigger_ops = {
static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
{
- struct iio_bfin_timer_trigger_pdata *pdata = pdev->dev.platform_data;
+ struct iio_bfin_timer_trigger_pdata *pdata;
struct bfin_tmr_state *st;
unsigned int config;
int ret;
@@ -221,6 +221,7 @@ static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
config = PWM_OUT | PERIOD_CNT | IRQ_ENA;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->output_enable) {
unsigned long long val;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 40af75c42..4141afb10 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -60,41 +60,12 @@
#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
/*
- * libcfs pseudo device operations
- *
- * It's just draft now.
- */
-
-struct cfs_psdev_file {
- unsigned long off;
- void *private_data;
- unsigned long reserved1;
- unsigned long reserved2;
-};
-
-struct cfs_psdev_ops {
- int (*p_open)(unsigned long, void *);
- int (*p_close)(unsigned long, void *);
- int (*p_read)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_write)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void __user *);
-};
-
-/*
- * Drop into debugger, if possible. Implementation is provided by platform.
- */
-
-void cfs_enter_debugger(void);
-
-/*
* Defined by platform
*/
-int unshare_fs_struct(void);
sigset_t cfs_block_allsigs(void);
sigset_t cfs_block_sigs(unsigned long sigs);
sigset_t cfs_block_sigsinv(unsigned long sigs);
void cfs_restore_sigs(sigset_t);
-int cfs_signal_pending(void);
void cfs_clear_sigpending(void);
/*
@@ -117,7 +88,25 @@ void cfs_get_random_bytes(void *buf, int size);
#include "libcfs_workitem.h"
#include "libcfs_hash.h"
#include "libcfs_fail.h"
-#include "libcfs_crypto.h"
+
+struct libcfs_ioctl_handler {
+ struct list_head item;
+ int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
+};
+
+#define DECLARE_IOCTL_HANDLER(ident, func) \
+ struct libcfs_ioctl_handler ident = { \
+ .item = LIST_HEAD_INIT(ident.item), \
+ .handle_ioctl = func \
+ }
+
+int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
+int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
+
+int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
+ const struct libcfs_ioctl_hdr __user *uparam);
+int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
+int libcfs_ioctl(unsigned long cmd, void __user *arg);
/* container_of depends on "likely" which is defined in libcfs_private.h */
static inline void *__container_of(void *ptr, unsigned long shift)
@@ -143,8 +132,6 @@ extern struct miscdevice libcfs_dev;
extern char lnet_upcall[1024];
extern char lnet_debug_log_upcall[1024];
-extern struct cfs_psdev_ops libcfs_psdev_ops;
-
extern struct cfs_wi_sched *cfs_sched_rehash;
struct lnet_debugfs_symlink_def {
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 9e62c5971..81d8079e3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -203,6 +203,85 @@ int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
*/
int cfs_cpu_ht_nsiblings(int cpu);
+/*
+ * allocate per-cpu-partition data, returned value is an array of pointers,
+ * variable can be indexed by CPU ID.
+ * cptab != NULL: size of array is number of CPU partitions
+ * cptab == NULL: size of array is number of HW cores
+ */
+void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
+/*
+ * destory per-cpu-partition variable
+ */
+void cfs_percpt_free(void *vars);
+int cfs_percpt_number(void *vars);
+
+#define cfs_percpt_for_each(var, i, vars) \
+ for (i = 0; i < cfs_percpt_number(vars) && \
+ ((var) = (vars)[i]) != NULL; i++)
+
+/*
+ * percpu partition lock
+ *
+ * There are some use-cases like this in Lustre:
+ * . each CPU partition has it's own private data which is frequently changed,
+ * and mostly by the local CPU partition.
+ * . all CPU partitions share some global data, these data are rarely changed.
+ *
+ * LNet is typical example.
+ * CPU partition lock is designed for this kind of use-cases:
+ * . each CPU partition has it's own private lock
+ * . change on private data just needs to take the private lock
+ * . read on shared data just needs to take _any_ of private locks
+ * . change on shared data needs to take _all_ private locks,
+ * which is slow and should be really rare.
+ */
+enum {
+ CFS_PERCPT_LOCK_EX = -1, /* negative */
+};
+
+struct cfs_percpt_lock {
+ /* cpu-partition-table for this lock */
+ struct cfs_cpt_table *pcl_cptab;
+ /* exclusively locked */
+ unsigned int pcl_locked;
+ /* private lock table */
+ spinlock_t **pcl_locks;
+};
+
+/* return number of private locks */
+#define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab)
+
+/*
+ * create a cpu-partition lock based on CPU partition table \a cptab,
+ * each private lock has extra \a psize bytes padding data
+ */
+struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+ struct lock_class_key *keys);
+/* destroy a cpu-partition lock */
+void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
+
+/* lock private lock \a index of \a pcl */
+void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
+
+/* unlock private lock \a index of \a pcl */
+void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
+
+#define CFS_PERCPT_LOCK_KEYS 256
+
+/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
+#define cfs_percpt_lock_alloc(cptab) \
+({ \
+ static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \
+ struct cfs_percpt_lock *___lk; \
+ \
+ if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \
+ ___lk = cfs_percpt_lock_create(cptab, NULL); \
+ else \
+ ___lk = cfs_percpt_lock_create(cptab, ___keys); \
+ ___lk; \
+})
+
/**
* iterate over all CPU partitions in \a cptab
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index e8663697e..02be7d760 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -46,7 +46,8 @@ enum cfs_crypto_hash_alg {
CFS_HASH_ALG_SHA384,
CFS_HASH_ALG_SHA512,
CFS_HASH_ALG_CRC32C,
- CFS_HASH_ALG_MAX
+ CFS_HASH_ALG_MAX,
+ CFS_HASH_ALG_UNKNOWN = 0xff
};
static struct cfs_crypto_hash_type hash_types[] = {
@@ -59,11 +60,22 @@ static struct cfs_crypto_hash_type hash_types[] = {
[CFS_HASH_ALG_SHA256] = { "sha256", 0, 32 },
[CFS_HASH_ALG_SHA384] = { "sha384", 0, 48 },
[CFS_HASH_ALG_SHA512] = { "sha512", 0, 64 },
+ [CFS_HASH_ALG_MAX] = { NULL, 0, 64 },
};
-/** Return pointer to type of hash for valid hash algorithm identifier */
+/* Maximum size of hash_types[].cht_size */
+#define CFS_CRYPTO_HASH_DIGESTSIZE_MAX 64
+
+/**
+ * Return hash algorithm information for the specified algorithm identifier
+ *
+ * Hash information includes algorithm name, initial seed, hash size.
+ *
+ * \retval cfs_crypto_hash_type for valid ID (CFS_HASH_ALG_*)
+ * \retval NULL for unknown algorithm identifier
+ */
static inline const struct cfs_crypto_hash_type *
- cfs_crypto_hash_type(unsigned char hash_alg)
+cfs_crypto_hash_type(enum cfs_crypto_hash_alg hash_alg)
{
struct cfs_crypto_hash_type *ht;
@@ -75,8 +87,16 @@ static inline const struct cfs_crypto_hash_type *
return NULL;
}
-/** Return hash name for valid hash algorithm identifier or "unknown" */
-static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
+/**
+ * Return hash name for hash algorithm identifier
+ *
+ * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval string name of known hash algorithm
+ * \retval "unknown" if hash algorithm is unknown
+ */
+static inline const char *
+cfs_crypto_hash_name(enum cfs_crypto_hash_alg hash_alg)
{
const struct cfs_crypto_hash_type *ht;
@@ -86,8 +106,15 @@ static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
return "unknown";
}
-/** Return digest size for valid algorithm identifier or 0 */
-static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
+/**
+ * Return digest size for hash algorithm type
+ *
+ * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval hash algorithm digest size in bytes
+ * \retval 0 if hash algorithm type is unknown
+ */
+static inline int cfs_crypto_hash_digestsize(enum cfs_crypto_hash_alg hash_alg)
{
const struct cfs_crypto_hash_type *ht;
@@ -97,36 +124,24 @@ static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
return 0;
}
-/** Return hash identifier for valid hash algorithm name or 0xFF */
+/**
+ * Find hash algorithm ID for the specified algorithm name
+ *
+ * \retval hash algorithm ID for valid ID (CFS_HASH_ALG_*)
+ * \retval CFS_HASH_ALG_UNKNOWN for unknown algorithm name
+ */
static inline unsigned char cfs_crypto_hash_alg(const char *algname)
{
- unsigned char i;
+ enum cfs_crypto_hash_alg hash_alg;
- for (i = 0; i < CFS_HASH_ALG_MAX; i++)
- if (!strcmp(hash_types[i].cht_name, algname))
- break;
- return (i == CFS_HASH_ALG_MAX ? 0xFF : i);
+ for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
+ if (strcmp(hash_types[hash_alg].cht_name, algname) == 0)
+ return hash_alg;
+
+ return CFS_HASH_ALG_UNKNOWN;
}
-/** Calculate hash digest for buffer.
- * @param alg id of hash algorithm
- * @param buf buffer of data
- * @param buf_len buffer len
- * @param key initial value for algorithm, if it is NULL,
- * default initial value should be used.
- * @param key_len len of initial value
- * @param hash [out] pointer to hash, if it is NULL, hash_len is
- * set to valid digest size in bytes, retval -ENOSPC.
- * @param hash_len [in,out] size of hash buffer
- * @returns status of operation
- * @retval -EINVAL if buf, buf_len, hash_len or alg_id is invalid
- * @retval -ENODEV if this algorithm is unsupported
- * @retval -ENOSPC if pointer to hash is NULL, or hash_len less than
- * digest size
- * @retval 0 for success
- * @retval < 0 other errors from lower layers.
- */
-int cfs_crypto_hash_digest(unsigned char alg,
+int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
const void *buf, unsigned int buf_len,
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len);
@@ -134,66 +149,17 @@ int cfs_crypto_hash_digest(unsigned char alg,
/* cfs crypto hash descriptor */
struct cfs_crypto_hash_desc;
-/** Allocate and initialize descriptor for hash algorithm.
- * @param alg algorithm id
- * @param key initial value for algorithm, if it is NULL,
- * default initial value should be used.
- * @param key_len len of initial value
- * @returns pointer to descriptor of hash instance
- * @retval ERR_PTR(error) when errors occurred.
- */
-struct cfs_crypto_hash_desc*
- cfs_crypto_hash_init(unsigned char alg,
- unsigned char *key, unsigned int key_len);
-
-/** Update digest by part of data.
- * @param desc hash descriptor
- * @param page data page
- * @param offset data offset
- * @param len data len
- * @returns status of operation
- * @retval 0 for success.
- */
+struct cfs_crypto_hash_desc *
+cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
+ unsigned char *key, unsigned int key_len);
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
struct page *page, unsigned int offset,
unsigned int len);
-
-/** Update digest by part of data.
- * @param desc hash descriptor
- * @param buf pointer to data buffer
- * @param buf_len size of data at buffer
- * @returns status of operation
- * @retval 0 for success.
- */
int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf,
unsigned int buf_len);
-
-/** Finalize hash calculation, copy hash digest to buffer, destroy hash
- * descriptor.
- * @param desc hash descriptor
- * @param hash buffer pointer to store hash digest
- * @param hash_len pointer to hash buffer size, if NULL
- * destroy hash descriptor
- * @returns status of operation
- * @retval -ENOSPC if hash is NULL, or *hash_len less than
- * digest size
- * @retval 0 for success
- * @retval < 0 other errors from lower layers.
- */
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc,
unsigned char *hash, unsigned int *hash_len);
-/**
- * Register crypto hash algorithms
- */
int cfs_crypto_register(void);
-
-/**
- * Unregister
- */
void cfs_crypto_unregister(void);
-
-/** Return hash speed in Mbytes per second for valid hash algorithm
- * identifier. If test was unsuccessful -1 would be returned.
- */
-int cfs_crypto_hash_speed(unsigned char hash_alg);
+int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg);
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 98430e710..455c54d0d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -85,7 +85,6 @@ struct ptldebug_header {
#define PH_FLAG_FIRST_RECORD 1
/* Debugging subsystems (32 bits, non-overlapping) */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
#define S_UNDEFINED 0x00000001
#define S_MDC 0x00000002
#define S_MDS 0x00000004
@@ -118,10 +117,14 @@ struct ptldebug_header {
#define S_MGS 0x20000000
#define S_FID 0x40000000 /* b_new_cmd */
#define S_FLD 0x80000000 /* b_new_cmd */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
+
+#define LIBCFS_DEBUG_SUBSYS_NAMES { \
+ "undefined", "mdc", "mds", "osc", "ost", "class", "log", \
+ "llite", "rpc", "mgmt", "lnet", "lnd", "pinger", "filter", "", \
+ "echo", "ldlm", "lov", "lquota", "osd", "lfsck", "", "", "lmv", \
+ "", "sec", "gss", "", "mgc", "mgs", "fid", "fld", NULL }
/* Debugging masks (32 bits, non-overlapping) */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
#define D_INODE 0x00000002
#define D_SUPER 0x00000004
@@ -151,9 +154,14 @@ struct ptldebug_header {
#define D_QUOTA 0x04000000
#define D_SEC 0x08000000
#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
-/* keep these in sync with lnet/{utils,libcfs}/debug.c */
+#define D_HSM 0x20000000
-#define D_HSM D_TRACE
+#define LIBCFS_DEBUG_MASKS_NAMES { \
+ "trace", "inode", "super", "ext2", "malloc", "cache", "info", \
+ "ioctl", "neterror", "net", "warning", "buffs", "other", \
+ "dentry", "nettrace", "page", "dlmtrace", "error", "emerg", \
+ "ha", "rpctrace", "vfstrace", "reada", "mmap", "config", \
+ "console", "quota", "sec", "lfsck", "hsm", NULL }
#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index aa69c6a33..2e008bffc 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -38,6 +38,7 @@
extern unsigned long cfs_fail_loc;
extern unsigned int cfs_fail_val;
+extern int cfs_fail_err;
extern wait_queue_head_t cfs_race_waitq;
extern int cfs_race_state;
@@ -70,9 +71,14 @@ enum {
#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */
#define CFS_FAIL_USR1 0x04000000 /* user flag */
-#define CFS_FAIL_PRECHECK(id) (cfs_fail_loc && \
- (cfs_fail_loc & CFS_FAIL_MASK_LOC) == \
- ((id) & CFS_FAIL_MASK_LOC))
+#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */
+
+static inline bool CFS_FAIL_PRECHECK(__u32 id)
+{
+ return cfs_fail_loc != 0 &&
+ ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
+ (cfs_fail_loc & id & CFS_FAULT));
+}
static inline int cfs_fail_check_set(__u32 id, __u32 value,
int set, int quiet)
@@ -144,6 +150,9 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \
cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET)
+#define CFS_FAULT_CHECK(id) \
+ CFS_FAIL_CHECK(CFS_FAULT | (id))
+
/* The idea here is to synchronise two threads to force a race. The
* first thread that calls this with a matching fail_loc is put to
* sleep. The next thread that calls with the same fail_loc wakes up
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index c3f2332fa..119986bc7 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -245,7 +245,7 @@ struct cfs_hash {
/** # of iterators (caller of cfs_hash_for_each_*) */
__u32 hs_iterators;
/** rehash workitem */
- cfs_workitem_t hs_rehash_wi;
+ struct cfs_workitem hs_rehash_wi;
/** refcount on this hash table */
atomic_t hs_refcount;
/** rehash buckets-table */
@@ -262,7 +262,7 @@ struct cfs_hash {
/** bits when we found the max depth */
unsigned int hs_dep_bits;
/** workitem to output max depth */
- cfs_workitem_t hs_dep_wi;
+ struct cfs_workitem hs_dep_wi;
#endif
/** name of htable */
char hs_name[0];
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 5ca99bd6f..4b9102bd9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -34,13 +34,16 @@
* libcfs/include/libcfs/libcfs_ioctl.h
*
* Low-level ioctl data structures. Kernel ioctl functions declared here,
- * and user space functions are in libcfsutil_ioctl.h.
+ * and user space functions are in libcfs/util/ioctl.h.
*
*/
#ifndef __LIBCFS_IOCTL_H__
#define __LIBCFS_IOCTL_H__
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
#define LIBCFS_IOCTL_VERSION 0x0001000a
#define LIBCFS_IOCTL_VERSION2 0x0001000b
@@ -49,6 +52,9 @@ struct libcfs_ioctl_hdr {
__u32 ioc_version;
};
+/** max size to copy from userspace */
+#define LIBCFS_IOC_DATA_MAX (128 * 1024)
+
struct libcfs_ioctl_data {
struct libcfs_ioctl_hdr ioc_hdr;
@@ -73,67 +79,48 @@ struct libcfs_ioctl_data {
char ioc_bulk[0];
};
-#define ioc_priority ioc_u32[0]
-
struct libcfs_debug_ioctl_data {
struct libcfs_ioctl_hdr hdr;
unsigned int subs;
unsigned int debug;
};
-#define LIBCFS_IOC_INIT(data) \
-do { \
- memset(&data, 0, sizeof(data)); \
- data.ioc_version = LIBCFS_IOCTL_VERSION; \
- data.ioc_len = sizeof(data); \
-} while (0)
-
-struct libcfs_ioctl_handler {
- struct list_head item;
- int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
-};
-
-#define DECLARE_IOCTL_HANDLER(ident, func) \
- struct libcfs_ioctl_handler ident = { \
- /* .item = */ LIST_HEAD_INIT(ident.item), \
- /* .handle_ioctl = */ func \
- }
+/* 'f' ioctls are defined in lustre_ioctl.h and lustre_user.h except for: */
+#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
+#define IOCTL_LIBCFS_TYPE long
-/* FIXME check conflict with lustre_lib.h */
-#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
-
-#define IOC_LIBCFS_TYPE 'e'
-#define IOC_LIBCFS_MIN_NR 30
+#define IOC_LIBCFS_TYPE ('e')
+#define IOC_LIBCFS_MIN_NR 30
/* libcfs ioctls */
-#define IOC_LIBCFS_PANIC _IOWR('e', 30, long)
-#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
-#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
-#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
+/* IOC_LIBCFS_PANIC obsolete in 2.8.0, was _IOWR('e', 30, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_MEMHOG obsolete in 2.8.0, was _IOWR('e', 36, IOCTL_LIBCFS_TYPE) */
/* lnet ioctls */
-#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
-#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
-#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long)
-#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long)
-/* #define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) */
-#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long)
-#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long)
-#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long)
-#define IOC_LIBCFS_PING _IOWR('e', 61, long)
-/* #define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) */
-#define IOC_LIBCFS_LNETST _IOWR('e', 63, long)
-#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, long)
+#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, IOCTL_LIBCFS_TYPE)
/* lnd ioctls */
-#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long)
-#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long)
-#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, long)
-#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, long)
-#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, long)
-#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, long)
-#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, long)
+#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE)
/* ioctl 77 is free for use */
-#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, long)
-#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long)
-#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long)
+#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE)
/*
* DLC Specific IOCTL numbers.
@@ -155,76 +142,4 @@ struct libcfs_ioctl_handler {
#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE)
#define IOC_LIBCFS_MAX_NR 91
-static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
-{
- int len = sizeof(*data);
-
- len += cfs_size_round(data->ioc_inllen1);
- len += cfs_size_round(data->ioc_inllen2);
- return len;
-}
-
-static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
-{
- if (data->ioc_hdr.ioc_len > (1 << 30)) {
- CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inllen1 > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inllen2 > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
- CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
- CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf1 && !data->ioc_plen1) {
- CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf2 && !data->ioc_plen2) {
- CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_plen1 && !data->ioc_pbuf1) {
- CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
- return 1;
- }
- if (data->ioc_plen2 && !data->ioc_pbuf2) {
- CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
- return 1;
- }
- if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
- CERROR("LIBCFS ioctl: packlen != ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen1 &&
- data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
- return 1;
- }
- if (data->ioc_inllen2 &&
- data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
- data->ioc_inllen2 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
- return 1;
- }
- return 0;
-}
-
-int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
- __u32 *buf_len);
-int libcfs_ioctl_popdata(void __user *arg, void *buf, int size);
-int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
-
#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index 082fe6de9..ac4e8cfe6 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -40,21 +40,32 @@
#ifndef __LIBCFS_PRIM_H__
#define __LIBCFS_PRIM_H__
-void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
-
/*
* Memory
*/
-#ifndef memory_pressure_get
-#define memory_pressure_get() (0)
-#endif
-#ifndef memory_pressure_set
-#define memory_pressure_set() do {} while (0)
-#endif
-#ifndef memory_pressure_clr
-#define memory_pressure_clr() do {} while (0)
+#if BITS_PER_LONG == 32
+/* limit to lowmem on 32-bit systems */
+#define NUM_CACHEPAGES \
+ min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
+#else
+#define NUM_CACHEPAGES totalram_pages
#endif
+static inline unsigned int memory_pressure_get(void)
+{
+ return current->flags & PF_MEMALLOC;
+}
+
+static inline void memory_pressure_set(void)
+{
+ current->flags |= PF_MEMALLOC;
+}
+
+static inline void memory_pressure_clr(void)
+{
+ current->flags &= ~PF_MEMALLOC;
+}
+
static inline int cfs_memory_pressure_get_and_set(void)
{
int old = memory_pressure_get();
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 13335437c..2fd2a9690 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -182,25 +182,6 @@ int libcfs_debug_clear_buffer(void);
int libcfs_debug_mark_buffer(const char *text);
/*
- * allocate per-cpu-partition data, returned value is an array of pointers,
- * variable can be indexed by CPU ID.
- * cptable != NULL: size of array is number of CPU partitions
- * cptable == NULL: size of array is number of HW cores
- */
-void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
-/*
- * destroy per-cpu-partition variable
- */
-void cfs_percpt_free(void *vars);
-int cfs_percpt_number(void *vars);
-void *cfs_percpt_current(void *vars);
-void *cfs_percpt_index(void *vars, int idx);
-
-#define cfs_percpt_for_each(var, i, vars) \
- for (i = 0; i < cfs_percpt_number(vars) && \
- ((var) = (vars)[i]) != NULL; i++)
-
-/*
* allocate a variable array, returned value is an array of pointers.
* Caller can specify length of array by count.
*/
@@ -302,62 +283,6 @@ do { \
#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
-/*
- * percpu partition lock
- *
- * There are some use-cases like this in Lustre:
- * . each CPU partition has it's own private data which is frequently changed,
- * and mostly by the local CPU partition.
- * . all CPU partitions share some global data, these data are rarely changed.
- *
- * LNet is typical example.
- * CPU partition lock is designed for this kind of use-cases:
- * . each CPU partition has it's own private lock
- * . change on private data just needs to take the private lock
- * . read on shared data just needs to take _any_ of private locks
- * . change on shared data needs to take _all_ private locks,
- * which is slow and should be really rare.
- */
-
-enum {
- CFS_PERCPT_LOCK_EX = -1, /* negative */
-};
-
-struct cfs_percpt_lock {
- /* cpu-partition-table for this lock */
- struct cfs_cpt_table *pcl_cptab;
- /* exclusively locked */
- unsigned int pcl_locked;
- /* private lock table */
- spinlock_t **pcl_locks;
-};
-
-/* return number of private locks */
-static inline int
-cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
-{
- return cfs_cpt_number(pcl->pcl_cptab);
-}
-
-/*
- * create a cpu-partition lock based on CPU partition table \a cptab,
- * each private lock has extra \a psize bytes padding data
- */
-struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
-/* destroy a cpu-partition lock */
-void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
-
-/* lock private lock \a index of \a pcl */
-void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
-/* unlock private lock \a index of \a pcl */
-void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
-/* create percpt (atomic) refcount based on @cptab */
-atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
-/* destroy percpt refcount */
-void cfs_percpt_atomic_free(atomic_t **refs);
-/* return sum of all percpu refs */
-int cfs_percpt_atomic_summary(atomic_t **refs);
-
/** Compile-time assertion.
* Check an invariant described by a constant expression at compile time by
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index 5cc64f327..f9b20c5ac 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -73,7 +73,7 @@ int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
struct cfs_workitem;
typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
-typedef struct cfs_workitem {
+struct cfs_workitem {
/** chain on runq or rerunq */
struct list_head wi_list;
/** working function */
@@ -84,10 +84,10 @@ typedef struct cfs_workitem {
unsigned short wi_running:1;
/** scheduled */
unsigned short wi_scheduled:1;
-} cfs_workitem_t;
+};
static inline void
-cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
+cfs_wi_init(struct cfs_workitem *wi, void *data, cfs_wi_action_t action)
{
INIT_LIST_HEAD(&wi->wi_list);
@@ -97,9 +97,9 @@ cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
wi->wi_action = action;
}
-void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
+void cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
+int cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
+void cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
int cfs_wi_startup(void);
void cfs_wi_shutdown(void);
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index d94b26616..a268ef7aa 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -60,6 +60,7 @@
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/pagemap.h>
#include <linux/random.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
@@ -83,7 +84,6 @@
#include <stdarg.h>
#include "linux-cpu.h"
#include "linux-time.h"
-#include "linux-mem.h"
#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index c04979ae0..f63cb47bc 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -23,7 +23,7 @@
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * libcfs/include/libcfs/linux/linux-mem.h
+ * libcfs/include/libcfs/linux/linux-cpu.h
*
* Basic library routines.
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
deleted file mode 100644
index 837eb2274..000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-mem.h
- *
- * Basic library routines.
- */
-
-#ifndef __LIBCFS_LINUX_CFS_MEM_H__
-#define __LIBCFS_LINUX_CFS_MEM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/memcontrol.h>
-#include <linux/mm_inline.h>
-
-#ifndef HAVE_LIBCFS_CPT
-/* Need this for cfs_cpt_table */
-#include "../libcfs_cpu.h"
-#endif
-
-#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
-#define page_index(p) ((p)->index)
-
-#define memory_pressure_get() (current->flags & PF_MEMALLOC)
-#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
-#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
-
-#if BITS_PER_LONG == 32
-/* limit to lowmem on 32-bit systems */
-#define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
-#else
-#define NUM_CACHEPAGES totalram_pages
-#endif
-
-#define DECL_MMSPACE mm_segment_t __oldfs
-#define MMSPACE_OPEN \
- do { __oldfs = get_fs(); set_fs(get_ds()); } while (0)
-#define MMSPACE_CLOSE set_fs(__oldfs)
-
-#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index ed8764b11..7656b09b8 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -70,12 +70,12 @@ static inline unsigned long cfs_time_current(void)
static inline long cfs_time_seconds(int seconds)
{
- return ((long)seconds) * HZ;
+ return ((long)seconds) * msecs_to_jiffies(MSEC_PER_SEC);
}
static inline long cfs_duration_sec(long d)
{
- return d / HZ;
+ return d / msecs_to_jiffies(MSEC_PER_SEC);
}
#define cfs_time_current_64 get_jiffies_64
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
index 84a19e96e..6ce9accb9 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
@@ -37,10 +37,37 @@
#define LNET_MAX_SHOW_NUM_CPT 128
#define LNET_UNDEFINED_HOPS ((__u32) -1)
+struct lnet_ioctl_config_lnd_cmn_tunables {
+ __u32 lct_version;
+ __u32 lct_peer_timeout;
+ __u32 lct_peer_tx_credits;
+ __u32 lct_peer_rtr_credits;
+ __u32 lct_max_tx_credits;
+};
+
+struct lnet_ioctl_config_o2iblnd_tunables {
+ __u32 lnd_version;
+ __u32 lnd_peercredits_hiw;
+ __u32 lnd_map_on_demand;
+ __u32 lnd_concurrent_sends;
+ __u32 lnd_fmr_pool_size;
+ __u32 lnd_fmr_flush_trigger;
+ __u32 lnd_fmr_cache;
+ __u32 pad;
+};
+
+struct lnet_ioctl_config_lnd_tunables {
+ struct lnet_ioctl_config_lnd_cmn_tunables lt_cmn;
+ union {
+ struct lnet_ioctl_config_o2iblnd_tunables lt_o2ib;
+ } lt_tun_u;
+};
+
struct lnet_ioctl_net_config {
char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN];
__u32 ni_status;
__u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT];
+ char cfg_bulk[0];
};
#define LNET_TINY_BUF_IDX 0
@@ -81,7 +108,7 @@ struct lnet_ioctl_config_data {
__s32 net_peer_rtr_credits;
__s32 net_max_tx_credits;
__u32 net_cksum_algo;
- __u32 net_pad;
+ __u32 net_interface_count;
} cfg_net;
struct {
__u32 buf_enable;
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index dfc0208dc..513a8225f 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -463,10 +463,6 @@ int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
void lnet_destroy_routes(void);
int lnet_get_route(int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
-int lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid,
- int *peer_timeout, int *peer_tx_credits,
- int *peer_rtr_cr, int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config);
int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
void lnet_router_debugfs_init(void);
@@ -478,9 +474,8 @@ int lnet_rtrpools_enable(void);
void lnet_rtrpools_disable(void);
void lnet_rtrpools_free(int keep_pools);
lnet_remotenet_t *lnet_find_net_locked(__u32 net);
-int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
- __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
- __s32 credits);
+int lnet_dyn_add_ni(lnet_pid_t requested_pid,
+ struct lnet_ioctl_config_data *conf);
int lnet_dyn_del_ni(__u32 net);
int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 29c72f8c2..24c4a08e6 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -273,6 +273,8 @@ typedef struct lnet_ni {
int **ni_refs; /* percpt reference count */
time64_t ni_last_alive;/* when I was last alive */
lnet_ni_status_t *ni_status; /* my health status */
+ /* per NI LND tunables */
+ struct lnet_ioctl_config_lnd_tunables *ni_lnd_tunables;
/* equivalent interfaces to use */
char *ni_interfaces[LNET_MAX_INTERFACES];
} lnet_ni_t;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 0d32e6541..6c59f2ff2 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -335,8 +335,8 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
peer->ibp_nid = nid;
peer->ibp_error = 0;
peer->ibp_last_alive = 0;
- peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
- peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
+ peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni);
+ peer->ibp_queue_depth = ni->ni_peertxcredits;
atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
@@ -1283,65 +1283,86 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
}
}
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
int negotiated_nfrags)
{
- __u16 nfrags = (negotiated_nfrags != -1) ?
- negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand;
+ kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ __u16 nfrags;
+ int mod;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
LASSERT(hdev->ibh_mrs);
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- nfrags <= rd->rd_nfrags)
+ if (mod > 0 && nfrags <= rd->rd_nfrags)
return NULL;
return hdev->ibh_mrs;
}
-static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
+static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
{
- LASSERT(!pool->fpo_map_count);
+ LASSERT(!fpo->fpo_map_count);
- if (pool->fpo_fmr_pool)
- ib_destroy_fmr_pool(pool->fpo_fmr_pool);
+ if (fpo->fpo_is_fmr) {
+ if (fpo->fmr.fpo_fmr_pool)
+ ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ } else {
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i = 0;
+
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ i++;
+ }
+ if (i < fpo->fast_reg.fpo_pool_size)
+ CERROR("FastReg pool still has %d regions registered\n",
+ fpo->fast_reg.fpo_pool_size - i);
+ }
- if (pool->fpo_hdev)
- kiblnd_hdev_decref(pool->fpo_hdev);
+ if (fpo->fpo_hdev)
+ kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(pool, sizeof(*pool));
+ LIBCFS_FREE(fpo, sizeof(*fpo));
}
static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *pool;
+ kib_fmr_pool_t *fpo, *tmp;
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
- list_del(&pool->fpo_list);
- kiblnd_destroy_fmr_pool(pool);
+ list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
+ list_del(&fpo->fpo_list);
+ kiblnd_destroy_fmr_pool(fpo);
}
}
-static int kiblnd_fmr_pool_size(int ncpts)
+static int
+kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
+ int size = tunables->lnd_fmr_pool_size / ncpts;
return max(IBLND_FMR_POOL, size);
}
-static int kiblnd_fmr_flush_trigger(int ncpts)
+static int
+kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
+ int size = tunables->lnd_fmr_flush_trigger / ncpts;
return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
- kib_fmr_pool_t **pp_fpo)
+static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
{
- /* FMR pool for RDMA */
- kib_dev_t *dev = fps->fps_net->ibn_dev;
- kib_fmr_pool_t *fpo;
struct ib_fmr_pool_param param = {
.max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
.page_shift = PAGE_SHIFT,
@@ -1351,7 +1372,78 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
.dirty_watermark = fps->fps_flush_trigger,
.flush_function = NULL,
.flush_arg = NULL,
- .cache = !!*kiblnd_tunables.kib_fmr_cache};
+ .cache = !!fps->fps_cache };
+ int rc = 0;
+
+ fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
+ &param);
+ if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
+ rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
+ if (rc != -ENOSYS)
+ CERROR("Failed to create FMR pool: %d\n", rc);
+ else
+ CERROR("FMRs are not supported\n");
+ }
+
+ return rc;
+}
+
+static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+{
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i, rc;
+
+ INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size = 0;
+ for (i = 0; i < fps->fps_pool_size; i++) {
+ LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
+ sizeof(*frd));
+ if (!frd) {
+ CERROR("Failed to allocate a new fast_reg descriptor\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
+ IB_MR_TYPE_MEM_REG,
+ LNET_MAX_PAYLOAD / PAGE_SIZE);
+ if (IS_ERR(frd->frd_mr)) {
+ rc = PTR_ERR(frd->frd_mr);
+ CERROR("Failed to allocate ib_alloc_mr: %d\n", rc);
+ frd->frd_mr = NULL;
+ goto out_middle;
+ }
+
+ frd->frd_valid = true;
+
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size++;
+ }
+
+ return 0;
+
+out_middle:
+ if (frd->frd_mr)
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+
+out:
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ }
+
+ return rc;
+}
+
+static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
+ kib_fmr_pool_t **pp_fpo)
+{
+ kib_dev_t *dev = fps->fps_net->ibn_dev;
+ struct ib_device_attr *dev_attr;
+ kib_fmr_pool_t *fpo;
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1359,22 +1451,41 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
return -ENOMEM;
fpo->fpo_hdev = kiblnd_current_hdev(dev);
-
- fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
- if (IS_ERR(fpo->fpo_fmr_pool)) {
- rc = PTR_ERR(fpo->fpo_fmr_pool);
- CERROR("Failed to create FMR pool: %d\n", rc);
-
- kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(*fpo));
- return rc;
+ dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs;
+
+ /* Check for FMR or FastReg support */
+ fpo->fpo_is_fmr = 0;
+ if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
+ LCONSOLE_INFO("Using FMR for registration\n");
+ fpo->fpo_is_fmr = 1;
+ } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ LCONSOLE_INFO("Using FastReg for registration\n");
+ } else {
+ rc = -ENOSYS;
+ LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
+ goto out_fpo;
}
+ if (fpo->fpo_is_fmr)
+ rc = kiblnd_alloc_fmr_pool(fps, fpo);
+ else
+ rc = kiblnd_alloc_freg_pool(fps, fpo);
+ if (rc)
+ goto out_fpo;
+
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
+ fpo->fpo_owner = fps;
*pp_fpo = fpo;
return 0;
+
+out_fpo:
+ kiblnd_hdev_decref(fpo->fpo_hdev);
+ LIBCFS_FREE(fpo, sizeof(*fpo));
+ return rc;
}
static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
@@ -1407,9 +1518,10 @@ static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
}
}
-static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
- kib_net_t *net, int pool_size,
- int flush_trigger)
+static int
+kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
+ kib_net_t *net,
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
kib_fmr_pool_t *fpo;
int rc;
@@ -1418,8 +1530,11 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
fps->fps_net = net;
fps->fps_cpt = cpt;
- fps->fps_pool_size = pool_size;
- fps->fps_flush_trigger = flush_trigger;
+
+ fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
+ fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
+ fps->fps_cache = tunables->lnd_fmr_cache;
+
spin_lock_init(&fps->fps_lock);
INIT_LIST_HEAD(&fps->fps_pool_list);
INIT_LIST_HEAD(&fps->fps_failed_pool_list);
@@ -1440,25 +1555,64 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
return cfs_time_aftereq(now, fpo->fpo_deadline);
}
+static int
+kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+{
+ __u64 *pages = tx->tx_pages;
+ kib_hca_dev_t *hdev;
+ int npages;
+ int size;
+ int i;
+
+ hdev = tx->tx_pool->tpo_hdev;
+
+ for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+ for (size = 0; size < rd->rd_frags[i].rf_nob;
+ size += hdev->ibh_page_size) {
+ pages[npages++] = (rd->rd_frags[i].rf_addr &
+ hdev->ibh_page_mask) + size;
+ }
+ }
+
+ return npages;
+}
+
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
LIST_HEAD(zombies);
kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps = fpo->fpo_owner;
+ kib_fmr_poolset_t *fps;
unsigned long now = cfs_time_current();
kib_fmr_pool_t *tmp;
int rc;
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(!rc);
+ if (!fpo)
+ return;
- if (status) {
- rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT(!rc);
- }
+ fps = fpo->fpo_owner;
+ if (fpo->fpo_is_fmr) {
+ if (fmr->fmr_pfmr) {
+ rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
+ LASSERT(!rc);
+ fmr->fmr_pfmr = NULL;
+ }
+
+ if (status) {
+ rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ LASSERT(!rc);
+ }
+ } else {
+ struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
+ if (frd) {
+ frd->frd_valid = false;
+ spin_lock(&fps->fps_lock);
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ spin_unlock(&fps->fps_lock);
+ fmr->fmr_frd = NULL;
+ }
+ }
fmr->fmr_pool = NULL;
- fmr->fmr_pfmr = NULL;
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--; /* decref the pool */
@@ -1479,11 +1633,15 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
kiblnd_destroy_fmr_pool_list(&zombies);
}
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u64 iov, kib_fmr_t *fmr)
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
+ kib_fmr_t *fmr)
{
- struct ib_pool_fmr *pfmr;
+ __u64 *pages = tx->tx_pages;
+ bool is_rx = (rd != tx->tx_rd);
+ bool tx_pages_mapped = 0;
kib_fmr_pool_t *fpo;
+ int npages = 0;
__u64 version;
int rc;
@@ -1493,21 +1651,95 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
fpo->fpo_map_count++;
- spin_unlock(&fps->fps_lock);
- pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
- pages, npages, iov);
- if (likely(!IS_ERR(pfmr))) {
- fmr->fmr_pool = fpo;
- fmr->fmr_pfmr = pfmr;
- return 0;
+ if (fpo->fpo_is_fmr) {
+ struct ib_pool_fmr *pfmr;
+
+ spin_unlock(&fps->fps_lock);
+
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
+ pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
+ if (likely(!IS_ERR(pfmr))) {
+ fmr->fmr_key = is_rx ? pfmr->fmr->rkey :
+ pfmr->fmr->lkey;
+ fmr->fmr_frd = NULL;
+ fmr->fmr_pfmr = pfmr;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ rc = PTR_ERR(pfmr);
+ } else {
+ if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
+ struct kib_fast_reg_descriptor *frd;
+ struct ib_reg_wr *wr;
+ struct ib_mr *mr;
+ int n;
+
+ frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
+ struct kib_fast_reg_descriptor,
+ frd_list);
+ list_del(&frd->frd_list);
+ spin_unlock(&fps->fps_lock);
+
+ mr = frd->frd_mr;
+
+ if (!frd->frd_valid) {
+ __u32 key = is_rx ? mr->rkey : mr->lkey;
+ struct ib_send_wr *inv_wr;
+
+ inv_wr = &frd->frd_inv_wr;
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_id = IBLND_WID_MR;
+ inv_wr->ex.invalidate_rkey = key;
+
+ /* Bump the key */
+ key = ib_inc_rkey(key);
+ ib_update_fast_reg_key(mr, key);
+ }
+
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, NULL, PAGE_SIZE);
+ if (unlikely(n != tx->tx_nfrags)) {
+ CERROR("Failed to map mr %d/%d elements\n",
+ n, tx->tx_nfrags);
+ return n < 0 ? n : -EINVAL;
+ }
+
+ mr->iova = iov;
+
+ /* Prepare FastReg WR */
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = is_rx ? mr->rkey : mr->lkey;
+ wr->access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+
+ fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
+ fmr->fmr_frd = frd;
+ fmr->fmr_pfmr = NULL;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ spin_unlock(&fps->fps_lock);
+ rc = -EBUSY;
}
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--;
- if (PTR_ERR(pfmr) != -EAGAIN) {
+ if (rc != -EAGAIN) {
spin_unlock(&fps->fps_lock);
- return PTR_ERR(pfmr);
+ return rc;
}
/* EAGAIN and ... */
@@ -1932,25 +2164,28 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
}
}
-static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
+static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
+ int ncpts)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
unsigned long flags;
int cpt;
- int rc = 0;
+ int rc;
int i;
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (!*kiblnd_tunables.kib_map_on_demand) {
+ if (!tunables->lnd_map_on_demand) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
goto create_tx_pool;
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_fmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
+ if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_fmr_pool_size,
+ tunables->lnd_fmr_pool_size,
*kiblnd_tunables.kib_ntx / 4);
rc = -EINVAL;
goto failed;
@@ -1965,8 +2200,11 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
/*
* premapping can fail if ibd_nmr > 1, so we always create
* FMR pool and map-on-demand if premapping failed
+ *
+ * cfs_precpt_alloc is creating an array of struct kib_fmr_poolset
+ * The number of struct kib_fmr_poolsets create is equal to the
+ * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
*/
-
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_fmr_poolset_t));
if (!net->ibn_fmr_ps) {
@@ -1977,9 +2215,8 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
for (i = 0; i < ncpts; i++) {
cpt = !cpts ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
- kiblnd_fmr_pool_size(ncpts),
- kiblnd_fmr_flush_trigger(ncpts));
+ rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
+ net, tunables);
if (rc) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
@@ -1991,6 +2228,11 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
LASSERT(i == ncpts);
create_tx_pool:
+ /*
+ * cfs_precpt_alloc is creating an array of struct kib_tx_poolset
+ * The number of struct kib_tx_poolsets create is equal to the
+ * number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
+ */
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_tx_poolset_t));
if (!net->ibn_tx_ps) {
@@ -2694,10 +2936,9 @@ static int kiblnd_startup(lnet_ni_t *ni)
net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
tv.tv_nsec / NSEC_PER_USEC;
- ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
- ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
- ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
- ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
+ rc = kiblnd_tunables_setup(ni);
+ if (rc)
+ goto net_failed;
if (ni->ni_interfaces[0]) {
/* Use the IPoIB interface specified in 'networks=' */
@@ -2736,7 +2977,7 @@ static int kiblnd_startup(lnet_ni_t *ni)
if (rc)
goto failed;
- rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
+ rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
if (rc) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
@@ -2779,8 +3020,6 @@ static void __exit ko2iblnd_exit(void)
static int __init ko2iblnd_init(void)
{
- int rc;
-
CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
CLASSERT(offsetof(kib_msg_t,
ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
@@ -2789,9 +3028,7 @@ static int __init ko2iblnd_init(void)
ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
- rc = kiblnd_tunables_init();
- if (rc)
- return rc;
+ kiblnd_tunables_init();
lnet_register_lnd(&the_o2iblnd);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index bfcbdd167..b22984fd9 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -87,22 +87,10 @@ typedef struct {
int *kib_timeout; /* comms timeout (seconds) */
int *kib_keepalive; /* keepalive timeout (seconds) */
int *kib_ntx; /* # tx descs */
- int *kib_credits; /* # concurrent sends */
- int *kib_peertxcredits; /* # concurrent sends to 1 peer */
- int *kib_peerrtrcredits; /* # per-peer router buffer credits */
- int *kib_peercredits_hiw; /* # when eagerly to return credits */
- int *kib_peertimeout; /* seconds to consider peer dead */
char **kib_default_ipif; /* default IPoIB interface */
int *kib_retry_count;
int *kib_rnr_retry_count;
- int *kib_concurrent_sends; /* send work queue sizing */
int *kib_ib_mtu; /* IB MTU */
- int *kib_map_on_demand; /* map-on-demand if RD has more */
- /* fragments than this value, 0 */
- /* disable map-on-demand */
- int *kib_fmr_pool_size; /* # FMRs in pool */
- int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
- int *kib_fmr_cache; /* enable FMR pool cache? */
int *kib_require_priv_port; /* accept only privileged ports */
int *kib_use_priv_port; /* use privileged port for active connect */
int *kib_nscheds; /* # threads on each CPT */
@@ -116,43 +104,21 @@ extern kib_tunables_t kiblnd_tunables;
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
-#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MSG_QUEUE_SIZE_V1 : \
- *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
-#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_CREDIT_HIGHWATER_V1 : \
- *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+/* when eagerly to return credits */
+#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_CREDIT_HIGHWATER_V1 : \
+ t->lnd_peercredits_hiw)
#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \
cb, dev, \
ps, qpt)
-static inline int
-kiblnd_concurrent_sends_v1(void)
-{
- if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
- return IBLND_MSG_QUEUE_SIZE_V1 * 2;
-
- if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
- return IBLND_MSG_QUEUE_SIZE_V1 / 2;
-
- return *kiblnd_tunables.kib_concurrent_sends;
-}
-
-#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- kiblnd_concurrent_sends_v1() : \
- *kiblnd_tunables.kib_concurrent_sends)
/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \
- *kiblnd_tunables.kib_map_on_demand : \
- IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
-#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
/************************/
/* derived constants... */
@@ -171,7 +137,8 @@ kiblnd_concurrent_sends_v1(void)
/* WRs and CQEs (per connection) */
#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
#define IBLND_SEND_WRS(c) \
- ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version))
+ ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \
+ c->ibc_peer->ibp_ni))
#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
struct kib_hca_dev;
@@ -286,24 +253,44 @@ typedef struct {
int fps_cpt; /* CPT id */
int fps_pool_size;
int fps_flush_trigger;
+ int fps_cache;
int fps_increasing; /* is allocating new pool */
unsigned long fps_next_retry; /* time stamp for retry if*/
/* failed to allocate */
} kib_fmr_poolset_t;
+struct kib_fast_reg_descriptor { /* For fast registration */
+ struct list_head frd_list;
+ struct ib_send_wr frd_inv_wr;
+ struct ib_reg_wr frd_fastreg_wr;
+ struct ib_mr *frd_mr;
+ bool frd_valid;
+};
+
typedef struct {
struct list_head fpo_list; /* chain on pool list */
struct kib_hca_dev *fpo_hdev; /* device for this pool */
kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
- struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ union {
+ struct {
+ struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ } fmr;
+ struct { /* For fast registration */
+ struct list_head fpo_pool_list;
+ int fpo_pool_size;
+ } fast_reg;
+ };
unsigned long fpo_deadline; /* deadline of this pool */
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
+ int fpo_is_fmr;
} kib_fmr_pool_t;
typedef struct {
- struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
- kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+ kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+ struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
+ struct kib_fast_reg_descriptor *fmr_frd;
+ u32 fmr_key;
} kib_fmr_t;
typedef struct kib_net {
@@ -615,6 +602,48 @@ extern kib_data_t kiblnd_data;
void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
+
+/* max # of fragments configured by user */
+static inline int
+kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int mod;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ return mod ? mod : IBLND_MAX_RDMA_FRAGS;
+}
+
+static inline int
+kiblnd_rdma_frags(int version, struct lnet_ni *ni)
+{
+ return version == IBLND_MSG_VERSION_1 ?
+ IBLND_MAX_RDMA_FRAGS :
+ kiblnd_cfg_rdma_frags(ni);
+}
+
+static inline int
+kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int concurrent_sends;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ concurrent_sends = tunables->lnd_concurrent_sends;
+
+ if (version == IBLND_MSG_VERSION_1) {
+ if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 * 2;
+
+ if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 / 2;
+ }
+
+ return concurrent_sends;
+}
+
static inline void
kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
{
@@ -737,10 +766,14 @@ kiblnd_send_keepalive(kib_conn_t *conn)
static inline int
kiblnd_need_noop(kib_conn_t *conn)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
+ IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
@@ -799,7 +832,8 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
#define IBLND_WID_TX 1
#define IBLND_WID_RX 2
#define IBLND_WID_RDMA 3
-#define IBLND_WID_MASK 3UL
+#define IBLND_WID_MR 4
+#define IBLND_WID_MASK 7UL
static inline __u64
kiblnd_ptr2wreqid(void *ptr, int type)
@@ -947,20 +981,20 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
int negotiated_nfrags);
void kiblnd_map_rx_descs(kib_conn_t *conn);
void kiblnd_unmap_rx_descs(kib_conn_t *conn);
void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
- int npages, __u64 iov, kib_fmr_t *fmr);
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
+ kib_fmr_t *fmr);
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
-int kiblnd_tunables_init(void);
-void kiblnd_tunables_fini(void);
+int kiblnd_tunables_setup(struct lnet_ni *ni);
+void kiblnd_tunables_init(void);
int kiblnd_connd(void *arg);
int kiblnd_scheduler(void *arg);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 2323e8d3a..845e49a52 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -561,36 +561,23 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
}
static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
{
kib_hca_dev_t *hdev;
- __u64 *pages = tx->tx_pages;
kib_fmr_poolset_t *fps;
- int npages;
- int size;
int cpt;
int rc;
- int i;
LASSERT(tx->tx_pool);
LASSERT(tx->tx_pool->tpo_pool.po_owner);
hdev = tx->tx_pool->tpo_hdev;
-
- for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
- for (size = 0; size < rd->rd_frags[i].rf_nob;
- size += hdev->ibh_page_size) {
- pages[npages++] = (rd->rd_frags[i].rf_addr &
- hdev->ibh_page_mask) + size;
- }
- }
-
cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
fps = net->ibn_fmr_ps[cpt];
- rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
+ rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr);
if (rc) {
- CERROR("Can't map %d pages: %d\n", npages, rc);
+ CERROR("Can't map %u bytes: %d\n", nob, rc);
return rc;
}
@@ -598,8 +585,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
* If rd is not tx_rd, it's going to get sent to a peer, who will need
* the rkey
*/
- rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
- tx->fmr.fmr_pfmr->fmr->lkey;
+ rd->rd_key = tx->fmr.fmr_key;
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
rd->rd_frags[0].rf_nob = nob;
rd->rd_nfrags = 1;
@@ -613,10 +599,8 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
LASSERT(net);
- if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
+ if (net->ibn_fmr_ps)
kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
- tx->fmr.fmr_pfmr = NULL;
- }
if (tx->tx_nfrags) {
kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
@@ -628,8 +612,8 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
int nfrags)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
struct ib_mr *mr = NULL;
__u32 nob;
int i;
@@ -652,7 +636,7 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
nob += rd->rd_frags[i].rf_nob;
}
- mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ?
+ mr = kiblnd_find_rd_dma_mr(ni, rd, tx->tx_conn ?
tx->tx_conn->ibc_max_frags : -1);
if (mr) {
/* found pre-mapping MR */
@@ -704,7 +688,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
sg_set_page(sg, page, fragnob, page_offset);
- sg++;
+ sg = sg_next(sg);
if (offset + fragnob < iov->iov_len) {
offset += fragnob;
@@ -748,7 +732,7 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
sg_set_page(sg, kiov->kiov_page, fragnob,
kiov->kiov_offset + offset);
- sg++;
+ sg = sg_next(sg);
offset = 0;
kiov++;
@@ -765,6 +749,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
{
kib_msg_t *msg = tx->tx_msg;
kib_peer_t *peer = conn->ibc_peer;
+ struct lnet_ni *ni = peer->ibp_ni;
int ver = conn->ibc_version;
int rc;
int done;
@@ -780,7 +765,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
LASSERT(conn->ibc_credits >= 0);
LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
- if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
+ if (conn->ibc_nsends_posted == kiblnd_concurrent_sends(ver, ni)) {
/* tx completions outstanding... */
CDEBUG(D_NET, "%s: posted enough\n",
libcfs_nid2str(peer->ibp_nid));
@@ -851,14 +836,26 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+ struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
+ struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+ struct ib_send_wr *wrq = &tx->tx_wrq[0].wr;
+
+ if (frd) {
+ if (!frd->frd_valid) {
+ wrq = &frd->frd_inv_wr;
+ wrq->next = &frd->frd_fastreg_wr.wr;
+ } else {
+ wrq = &frd->frd_fastreg_wr.wr;
+ }
+ frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
+ }
- LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+ LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
"bad wr_id %llx, opc %d, flags %d, peer: %s\n",
- wrq->wr_id, wrq->opcode, wrq->send_flags,
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- wrq = NULL;
- rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq);
+ bad->wr_id, bad->opcode, bad->send_flags,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ bad = NULL;
+ rc = ib_post_send(conn->ibc_cmid->qp, wrq, &bad);
}
conn->ibc_last_send = jiffies;
@@ -919,7 +916,7 @@ kiblnd_check_sends(kib_conn_t *conn)
spin_lock(&conn->ibc_lock);
- LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
+ LASSERT(conn->ibc_nsends_posted <= kiblnd_concurrent_sends(ver, ni));
LASSERT(!IBLND_OOB_CAPABLE(ver) ||
conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
LASSERT(conn->ibc_reserved_credits >= 0);
@@ -1066,7 +1063,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
kib_msg_t *ibmsg = tx->tx_msg;
kib_rdma_desc_t *srcrd = tx->tx_rd;
struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next;
+ struct ib_rdma_wr *wrq, *next;
int rc = resid;
int srcidx = 0;
int dstidx = 0;
@@ -2333,11 +2330,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
- IBLND_MSG_QUEUE_SIZE(version)) {
+ kiblnd_msg_queue_size(version, ni)) {
CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
libcfs_nid2str(nid),
reqmsg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(version));
+ kiblnd_msg_queue_size(version, ni));
if (version == IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
@@ -2346,24 +2343,24 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
if (reqmsg->ibm_u.connparams.ibcp_max_frags >
- IBLND_RDMA_FRAGS(version)) {
+ kiblnd_rdma_frags(version, ni)) {
CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
+ kiblnd_rdma_frags(version, ni));
if (version >= IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
} else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
- IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) {
+ kiblnd_rdma_frags(version, ni) && !net->ibn_fmr_ps) {
CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
+ kiblnd_rdma_frags(version, ni));
- if (version >= IBLND_MSG_VERSION)
+ if (version == IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
@@ -2524,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
return 0;
failed:
- if (ni)
+ if (ni) {
lnet_ni_decref(ni);
+ rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
+ rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+ }
rej.ibr_version = version;
- rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
kiblnd_reject(cmid, &rej);
return -ECONNREFUSED;
@@ -2580,12 +2578,15 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version,
reason = "Unknown";
break;
- case IBLND_REJECT_RDMA_FRAGS:
+ case IBLND_REJECT_RDMA_FRAGS: {
+ struct lnet_ioctl_config_lnd_tunables *tunables;
+
if (!cp) {
reason = "can't negotiate max frags";
goto out;
}
- if (!*kiblnd_tunables.kib_map_on_demand) {
+ tunables = peer->ibp_ni->ni_lnd_tunables;
+ if (!tunables->lt_tun_u.lt_o2ib.lnd_map_on_demand) {
reason = "map_on_demand must be enabled";
goto out;
}
@@ -2597,7 +2598,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version,
peer->ibp_max_frags = frag_num;
reason = "rdma fragments";
break;
-
+ }
case IBLND_REJECT_MSG_QUEUE_SIZE:
if (!cp) {
reason = "can't negotiate queue depth";
@@ -3430,6 +3431,12 @@ kiblnd_complete(struct ib_wc *wc)
default:
LBUG();
+ case IBLND_WID_MR:
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)
+ CNETERR("FastReg failed: %d\n", wc->status);
+ break;
+
case IBLND_WID_RDMA:
/*
* We only get RDMA completion notification if it fails. All
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b4607dad3..f8fdd4ae3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -152,74 +152,135 @@ kib_tunables_t kiblnd_tunables = {
.kib_timeout = &timeout,
.kib_keepalive = &keepalive,
.kib_ntx = &ntx,
- .kib_credits = &credits,
- .kib_peertxcredits = &peer_credits,
- .kib_peercredits_hiw = &peer_credits_hiw,
- .kib_peerrtrcredits = &peer_buffer_credits,
- .kib_peertimeout = &peer_timeout,
.kib_default_ipif = &ipif_name,
.kib_retry_count = &retry_count,
.kib_rnr_retry_count = &rnr_retry_count,
- .kib_concurrent_sends = &concurrent_sends,
.kib_ib_mtu = &ib_mtu,
- .kib_map_on_demand = &map_on_demand,
- .kib_fmr_pool_size = &fmr_pool_size,
- .kib_fmr_flush_trigger = &fmr_flush_trigger,
- .kib_fmr_cache = &fmr_cache,
.kib_require_priv_port = &require_privileged_port,
.kib_use_priv_port = &use_privileged_port,
.kib_nscheds = &nscheds
};
-int
-kiblnd_tunables_init(void)
+static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
+
+/* # messages/RDMAs in-flight */
+int kiblnd_msg_queue_size(int version, lnet_ni_t *ni)
{
+ if (version == IBLND_MSG_VERSION_1)
+ return IBLND_MSG_QUEUE_SIZE_V1;
+ else if (ni)
+ return ni->ni_peertxcredits;
+ else
+ return peer_credits;
+}
+
+int kiblnd_tunables_setup(struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+
+ /*
+ * if there was no tunables specified, setup the tunables to be
+ * defaulted
+ */
+ if (!ni->ni_lnd_tunables) {
+ LIBCFS_ALLOC(ni->ni_lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ if (!ni->ni_lnd_tunables)
+ return -ENOMEM;
+
+ memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib,
+ &default_tunables, sizeof(*tunables));
+ }
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+
+ /* Current API version */
+ tunables->lnd_version = 0;
+
if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
*kiblnd_tunables.kib_ib_mtu);
return -EINVAL;
}
- if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT)
- *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT;
+ if (!ni->ni_peertimeout)
+ ni->ni_peertimeout = peer_timeout;
+
+ if (!ni->ni_maxtxcredits)
+ ni->ni_maxtxcredits = credits;
+
+ if (!ni->ni_peertxcredits)
+ ni->ni_peertxcredits = peer_credits;
- if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX)
- *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
+ if (!ni->ni_peerrtrcredits)
+ ni->ni_peerrtrcredits = peer_buffer_credits;
- if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
- *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
+ if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT)
+ ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT;
- if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
- *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
+ if (ni->ni_peertxcredits > IBLND_CREDITS_MAX)
+ ni->ni_peertxcredits = IBLND_CREDITS_MAX;
- if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
- *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
+ if (ni->ni_peertxcredits > credits)
+ ni->ni_peertxcredits = credits;
- if (*kiblnd_tunables.kib_map_on_demand < 0 ||
- *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
- *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
+ if (!tunables->lnd_peercredits_hiw)
+ tunables->lnd_peercredits_hiw = peer_credits_hiw;
- if (*kiblnd_tunables.kib_map_on_demand == 1)
- *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
+ if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2)
+ tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2;
- if (!*kiblnd_tunables.kib_concurrent_sends) {
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
- *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
- else
- *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
+ if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits)
+ tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1;
+
+ if (tunables->lnd_map_on_demand < 0 ||
+ tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
+ /* disable map-on-demand */
+ tunables->lnd_map_on_demand = 0;
+ }
+
+ if (tunables->lnd_map_on_demand == 1) {
+ /* don't make sense to create map if only one fragment */
+ tunables->lnd_map_on_demand = 2;
+ }
+
+ if (!tunables->lnd_concurrent_sends) {
+ if (tunables->lnd_map_on_demand > 0 &&
+ tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
+ tunables->lnd_concurrent_sends =
+ ni->ni_peertxcredits * 2;
+ } else {
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits;
+ }
}
- if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
- *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
+ if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2)
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2;
- if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
- *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
+ if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2)
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2;
- if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
+ if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) {
CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
- *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
+ tunables->lnd_concurrent_sends, ni->ni_peertxcredits);
}
+ if (!tunables->lnd_fmr_pool_size)
+ tunables->lnd_fmr_pool_size = fmr_pool_size;
+ if (!tunables->lnd_fmr_flush_trigger)
+ tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
+ if (!tunables->lnd_fmr_cache)
+ tunables->lnd_fmr_cache = fmr_cache;
+
return 0;
}
+
+void kiblnd_tunables_init(void)
+{
+ default_tunables.lnd_version = 0;
+ default_tunables.lnd_peercredits_hiw = peer_credits_hiw,
+ default_tunables.lnd_map_on_demand = map_on_demand;
+ default_tunables.lnd_concurrent_sends = concurrent_sends;
+ default_tunables.lnd_fmr_pool_size = fmr_pool_size;
+ default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
+ default_tunables.lnd_fmr_cache = fmr_cache;
+}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index cca7b2f7f..406c0e7a5 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2582,7 +2582,6 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- return;
}
void
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index d4ce06d0a..964b4e338 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -675,7 +675,6 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = ksocknal_data_ready;
sock->sk->sk_write_space = ksocknal_write_space;
- return;
}
void
@@ -695,8 +694,6 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
* sk_user_data is NULL.
*/
sock->sk->sk_user_data = NULL;
-
- return ;
}
int
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c3d628bac..8c260c3d5 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -232,130 +232,24 @@ int libcfs_panic_in_progress;
static const char *
libcfs_debug_subsys2str(int subsys)
{
- switch (1 << subsys) {
- default:
+ static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES;
+
+ if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
return NULL;
- case S_UNDEFINED:
- return "undefined";
- case S_MDC:
- return "mdc";
- case S_MDS:
- return "mds";
- case S_OSC:
- return "osc";
- case S_OST:
- return "ost";
- case S_CLASS:
- return "class";
- case S_LOG:
- return "log";
- case S_LLITE:
- return "llite";
- case S_RPC:
- return "rpc";
- case S_LNET:
- return "lnet";
- case S_LND:
- return "lnd";
- case S_PINGER:
- return "pinger";
- case S_FILTER:
- return "filter";
- case S_ECHO:
- return "echo";
- case S_LDLM:
- return "ldlm";
- case S_LOV:
- return "lov";
- case S_LQUOTA:
- return "lquota";
- case S_OSD:
- return "osd";
- case S_LFSCK:
- return "lfsck";
- case S_LMV:
- return "lmv";
- case S_SEC:
- return "sec";
- case S_GSS:
- return "gss";
- case S_MGC:
- return "mgc";
- case S_MGS:
- return "mgs";
- case S_FID:
- return "fid";
- case S_FLD:
- return "fld";
- }
+
+ return libcfs_debug_subsystems[subsys];
}
/* libcfs_debug_token2mask() expects the returned string in lower-case */
static const char *
libcfs_debug_dbg2str(int debug)
{
- switch (1 << debug) {
- default:
+ static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES;
+
+ if (debug >= ARRAY_SIZE(libcfs_debug_masks))
return NULL;
- case D_TRACE:
- return "trace";
- case D_INODE:
- return "inode";
- case D_SUPER:
- return "super";
- case D_EXT2:
- return "ext2";
- case D_MALLOC:
- return "malloc";
- case D_CACHE:
- return "cache";
- case D_INFO:
- return "info";
- case D_IOCTL:
- return "ioctl";
- case D_NETERROR:
- return "neterror";
- case D_NET:
- return "net";
- case D_WARNING:
- return "warning";
- case D_BUFFS:
- return "buffs";
- case D_OTHER:
- return "other";
- case D_DENTRY:
- return "dentry";
- case D_NETTRACE:
- return "nettrace";
- case D_PAGE:
- return "page";
- case D_DLMTRACE:
- return "dlmtrace";
- case D_ERROR:
- return "error";
- case D_EMERG:
- return "emerg";
- case D_HA:
- return "ha";
- case D_RPCTRACE:
- return "rpctrace";
- case D_VFSTRACE:
- return "vfstrace";
- case D_READA:
- return "reada";
- case D_MMAP:
- return "mmap";
- case D_CONFIG:
- return "config";
- case D_CONSOLE:
- return "console";
- case D_QUOTA:
- return "quota";
- case D_SEC:
- return "sec";
- case D_LFSCK:
- return "lfsck";
- }
+
+ return libcfs_debug_masks[debug];
}
int
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index dadaf7685..086e690bd 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -41,6 +41,9 @@ EXPORT_SYMBOL(cfs_fail_loc);
unsigned int cfs_fail_val;
EXPORT_SYMBOL(cfs_fail_val);
+int cfs_fail_err;
+EXPORT_SYMBOL(cfs_fail_err);
+
DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq);
EXPORT_SYMBOL(cfs_race_waitq);
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index f60feb3a3..cc45ed82b 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -942,10 +942,10 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
* @flags - CFS_HASH_REHASH enable synamic hash resizing
* - CFS_HASH_SORT enable chained hash sort
*/
-static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
+static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static int cfs_hash_dep_print(cfs_workitem_t *wi)
+static int cfs_hash_dep_print(struct cfs_workitem *wi)
{
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
int dep;
@@ -1847,7 +1847,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
}
static int
-cfs_hash_rehash_worker(cfs_workitem_t *wi)
+cfs_hash_rehash_worker(struct cfs_workitem *wi)
{
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
struct cfs_hash_bucket **bkts;
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 2de9eeae0..83543f928 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free);
* reason we always allocate cacheline-aligned memory block.
*/
struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
+cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+ struct lock_class_key *keys)
{
struct cfs_percpt_lock *pcl;
spinlock_t *lock;
@@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
return NULL;
}
- cfs_percpt_for_each(lock, i, pcl->pcl_locks)
+ if (!keys)
+ CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n");
+
+ cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
spin_lock_init(lock);
+ if (keys != NULL)
+ lockdep_set_class(lock, &keys[i]);
+ }
return pcl;
}
-EXPORT_SYMBOL(cfs_percpt_lock_alloc);
+EXPORT_SYMBOL(cfs_percpt_lock_create);
/**
* lock a CPU partition
@@ -142,44 +149,3 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
}
}
EXPORT_SYMBOL(cfs_percpt_unlock);
-
-/** free cpu-partition refcount */
-void
-cfs_percpt_atomic_free(atomic_t **refs)
-{
- cfs_percpt_free(refs);
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_free);
-
-/** allocate cpu-partition refcount with initial value @init_val */
-atomic_t **
-cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
-{
- atomic_t **refs;
- atomic_t *ref;
- int i;
-
- refs = cfs_percpt_alloc(cptab, sizeof(*ref));
- if (!refs)
- return NULL;
-
- cfs_percpt_for_each(ref, i, refs)
- atomic_set(ref, init_val);
- return refs;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
-
-/** return sum of cpu-partition refs */
-int
-cfs_percpt_atomic_summary(atomic_t **refs)
-{
- atomic_t *ref;
- int i;
- int val = 0;
-
- cfs_percpt_for_each(ref, i, refs)
- val += atomic_read(ref);
-
- return val;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_summary);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index c5a695151..d0e81bb41 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -115,34 +115,6 @@ cfs_percpt_number(void *vars)
EXPORT_SYMBOL(cfs_percpt_number);
/*
- * return memory block shadowed from current CPU
- */
-void *
-cfs_percpt_current(void *vars)
-{
- struct cfs_var_array *arr;
- int cpt;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
- cpt = cfs_cpt_current(arr->va_cptab, 0);
- if (cpt < 0)
- return NULL;
-
- return arr->va_ptrs[cpt];
-}
-
-void *
-cfs_percpt_index(void *vars, int idx)
-{
- struct cfs_var_array *arr;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- LASSERT(idx >= 0 && idx < arr->va_count);
- return arr->va_ptrs[idx];
-}
-
-/*
* free variable array, see more detail in cfs_array_alloc
*/
void
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 389fb9eee..b52518c54 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -755,8 +755,13 @@ cfs_cpt_table_create(int ncpt)
struct cfs_cpu_partition *part;
int n;
- if (cpt >= ncpt)
- goto failed;
+ /*
+ * Each emulated NUMA node has all allowed CPUs in
+ * the mask.
+ * End loop when all partitions have assigned CPUs.
+ */
+ if (cpt == ncpt)
+ break;
part = &cptab->ctb_parts[cpt];
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 8c9377ed8..84f9b7b47 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -30,13 +30,34 @@
#include <crypto/hash.h>
#include <linux/scatterlist.h>
#include "../../../include/linux/libcfs/libcfs.h"
+#include "../../../include/linux/libcfs/libcfs_crypto.h"
#include "linux-crypto.h"
+
/**
- * Array of hash algorithm speed in MByte per second
+ * Array of hash algorithm speed in MByte per second
*/
static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
-static int cfs_crypto_hash_alloc(unsigned char alg_id,
+/**
+ * Initialize the state descriptor for the specified hash algorithm.
+ *
+ * An internal routine to allocate the hash-specific state in \a hdesc for
+ * use with cfs_crypto_hash_digest() to compute the hash of a single message,
+ * though possibly in multiple chunks. The descriptor internal state should
+ * be freed with cfs_crypto_hash_final().
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ * \param[out] type pointer to the hash description in hash_types[]
+ * array
+ * \param[in,out] hdesc hash state descriptor to be initialized
+ * \param[in] key initial hash value/state, NULL to use default
+ * value
+ * \param[in] key_len length of \a key
+ *
+ * \retval 0 on success
+ * \retval negative errno on failure
+ */
+static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
const struct cfs_crypto_hash_type **type,
struct ahash_request **req,
unsigned char *key,
@@ -45,11 +66,11 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
struct crypto_ahash *tfm;
int err = 0;
- *type = cfs_crypto_hash_type(alg_id);
+ *type = cfs_crypto_hash_type(hash_alg);
if (!*type) {
CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
- alg_id, CFS_HASH_ALG_MAX);
+ hash_alg, CFS_HASH_ALG_MAX);
return -EINVAL;
}
tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
@@ -70,12 +91,6 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
ahash_request_set_callback(*req, 0, NULL, NULL);
- /** Shash have different logic for initialization then digest
- * shash: crypto_hash_setkey, crypto_hash_init
- * digest: crypto_digest_init, crypto_digest_setkey
- * Skip this function for digest, because we use shash logic at
- * cfs_crypto_hash_alloc.
- */
if (key)
err = crypto_ahash_setkey(tfm, key, key_len);
else if ((*type)->cht_key != 0)
@@ -90,7 +105,7 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
- cfs_crypto_hash_speeds[alg_id]);
+ cfs_crypto_hash_speeds[hash_alg]);
err = crypto_ahash_init(*req);
if (err) {
@@ -100,7 +115,33 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
return err;
}
-int cfs_crypto_hash_digest(unsigned char alg_id,
+/**
+ * Calculate hash digest for the passed buffer.
+ *
+ * This should be used when computing the hash on a single contiguous buffer.
+ * It combines the hash initialization, computation, and cleanup.
+ *
+ * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*)
+ * \param[in] buf data buffer on which to compute hash
+ * \param[in] buf_len length of \a buf in bytes
+ * \param[in] key initial value/state for algorithm,
+ * if \a key = NULL use default initial value
+ * \param[in] key_len length of \a key in bytes
+ * \param[out] hash pointer to computed hash value,
+ * if \a hash = NULL then \a hash_len is to digest
+ * size in bytes, retval -ENOSPC
+ * \param[in,out] hash_len size of \a hash buffer
+ *
+ * \retval -EINVAL \a buf, \a buf_len, \a hash_len,
+ * \a hash_alg invalid
+ * \retval -ENOENT \a hash_alg is unsupported
+ * \retval -ENOSPC \a hash is NULL, or \a hash_len less than
+ * digest size
+ * \retval 0 for success
+ * \retval negative errno for other errors from lower
+ * layers.
+ */
+int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
const void *buf, unsigned int buf_len,
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len)
@@ -113,7 +154,7 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
if (!buf || buf_len == 0 || !hash_len)
return -EINVAL;
- err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
+ err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
if (err != 0)
return err;
@@ -134,15 +175,32 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
}
EXPORT_SYMBOL(cfs_crypto_hash_digest);
+/**
+ * Allocate and initialize desriptor for hash algorithm.
+ *
+ * This should be used to initialize a hash descriptor for multiple calls
+ * to a single hash function when computing the hash across multiple
+ * separate buffers or pages using cfs_crypto_hash_update{,_page}().
+ *
+ * The hash descriptor should be freed with cfs_crypto_hash_final().
+ *
+ * \param[in] hash_alg algorithm id (CFS_HASH_ALG_*)
+ * \param[in] key initial value/state for algorithm, if \a key = NULL
+ * use default initial value
+ * \param[in] key_len length of \a key in bytes
+ *
+ * \retval pointer to descriptor of hash instance
+ * \retval ERR_PTR(errno) in case of error
+ */
struct cfs_crypto_hash_desc *
- cfs_crypto_hash_init(unsigned char alg_id,
- unsigned char *key, unsigned int key_len)
+cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
+ unsigned char *key, unsigned int key_len)
{
struct ahash_request *req;
int err;
const struct cfs_crypto_hash_type *type;
- err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
+ err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
if (err)
return ERR_PTR(err);
@@ -150,6 +208,17 @@ struct cfs_crypto_hash_desc *
}
EXPORT_SYMBOL(cfs_crypto_hash_init);
+/**
+ * Update hash digest computed on data within the given \a page
+ *
+ * \param[in] hdesc hash state descriptor
+ * \param[in] page data page on which to compute the hash
+ * \param[in] offset offset within \a page at which to start hash
+ * \param[in] len length of data on which to compute hash
+ *
+ * \retval 0 for success
+ * \retval negative errno on failure
+ */
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
struct page *page, unsigned int offset,
unsigned int len)
@@ -158,13 +227,23 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
struct scatterlist sl;
sg_init_table(&sl, 1);
- sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
+ sg_set_page(&sl, page, len, offset & ~PAGE_MASK);
ahash_request_set_crypt(req, &sl, NULL, sl.length);
return crypto_ahash_update(req);
}
EXPORT_SYMBOL(cfs_crypto_hash_update_page);
+/**
+ * Update hash digest computed on the specified data
+ *
+ * \param[in] hdesc hash state descriptor
+ * \param[in] buf data buffer on which to compute the hash
+ * \param[in] buf_len length of \buf on which to compute hash
+ *
+ * \retval 0 for success
+ * \retval negative errno on failure
+ */
int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
const void *buf, unsigned int buf_len)
{
@@ -178,7 +257,18 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
}
EXPORT_SYMBOL(cfs_crypto_hash_update);
-/* If hash_len pointer is NULL - destroy descriptor. */
+/**
+ * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor
+ *
+ * \param[in] hdesc hash descriptor
+ * \param[out] hash pointer to hash buffer to store hash digest
+ * \param[in,out] hash_len pointer to hash buffer size, if \a hdesc = NULL
+ * only free \a hdesc instead of computing the hash
+ *
+ * \retval 0 for success
+ * \retval -EOVERFLOW if hash_len is too small for the hash digest
+ * \retval negative errno for other errors from lower layers
+ */
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
unsigned char *hash, unsigned int *hash_len)
{
@@ -186,99 +276,153 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
struct ahash_request *req = (void *)hdesc;
int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- if (!hash_len) {
- crypto_free_ahash(crypto_ahash_reqtfm(req));
- ahash_request_free(req);
- return 0;
+ if (!hash || !hash_len) {
+ err = 0;
+ goto free_ahash;
}
- if (!hash || *hash_len < size) {
- *hash_len = size;
- return -ENOSPC;
+ if (*hash_len < size) {
+ err = -EOVERFLOW;
+ goto free_ahash;
}
+
ahash_request_set_crypt(req, NULL, hash, 0);
err = crypto_ahash_final(req);
-
- if (err < 0) {
- /* May be caller can fix error */
- return err;
- }
+ if (!err)
+ *hash_len = size;
+free_ahash:
crypto_free_ahash(crypto_ahash_reqtfm(req));
ahash_request_free(req);
return err;
}
EXPORT_SYMBOL(cfs_crypto_hash_final);
-static void cfs_crypto_performance_test(unsigned char alg_id,
- const unsigned char *buf,
- unsigned int buf_len)
+/**
+ * Compute the speed of specified hash function
+ *
+ * Run a speed test on the given hash algorithm on buffer of the given size.
+ * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and
+ * is available through the cfs_crypto_hash_speed() function.
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ * \param[in] buf data buffer on which to compute the hash
+ * \param[in] buf_len length of \buf on which to compute hash
+ */
+static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
{
+ int buf_len = max(PAGE_SIZE, 1048576UL);
+ void *buf;
unsigned long start, end;
int bcount, err = 0;
- int sec = 1; /* do test only 1 sec */
- unsigned char hash[64];
- unsigned int hash_len = 64;
-
- for (start = jiffies, end = start + sec * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0,
- hash, &hash_len);
+ struct page *page;
+ unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
+ unsigned int hash_len = sizeof(hash);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ buf = kmap(page);
+ memset(buf, 0xAD, PAGE_SIZE);
+ kunmap(page);
+
+ for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC),
+ bcount = 0; time_before(jiffies, end); bcount++) {
+ struct cfs_crypto_hash_desc *hdesc;
+ int i;
+
+ hdesc = cfs_crypto_hash_init(hash_alg, NULL, 0);
+ if (IS_ERR(hdesc)) {
+ err = PTR_ERR(hdesc);
+ break;
+ }
+
+ for (i = 0; i < buf_len / PAGE_SIZE; i++) {
+ err = cfs_crypto_hash_update_page(hdesc, page, 0,
+ PAGE_SIZE);
+ if (err)
+ break;
+ }
+
+ err = cfs_crypto_hash_final(hdesc, hash, &hash_len);
if (err)
break;
}
end = jiffies;
-
+ __free_page(page);
+out_err:
if (err) {
- cfs_crypto_hash_speeds[alg_id] = -1;
- CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n",
- cfs_crypto_hash_name(alg_id), err);
+ cfs_crypto_hash_speeds[hash_alg] = err;
+ CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
+ cfs_crypto_hash_name(hash_alg), err);
} else {
unsigned long tmp;
tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
1000) / (1024 * 1024);
- cfs_crypto_hash_speeds[alg_id] = (int)tmp;
+ cfs_crypto_hash_speeds[hash_alg] = (int)tmp;
+ CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n",
+ cfs_crypto_hash_name(hash_alg),
+ cfs_crypto_hash_speeds[hash_alg]);
}
- CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n",
- cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]);
}
-int cfs_crypto_hash_speed(unsigned char hash_alg)
+/**
+ * hash speed in Mbytes per second for valid hash algorithm
+ *
+ * Return the performance of the specified \a hash_alg that was previously
+ * computed using cfs_crypto_performance_test().
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval positive speed of the hash function in MB/s
+ * \retval -ENOENT if \a hash_alg is unsupported
+ * \retval negative errno if \a hash_alg speed is unavailable
+ */
+int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg)
{
if (hash_alg < CFS_HASH_ALG_MAX)
return cfs_crypto_hash_speeds[hash_alg];
- return -1;
+ return -ENOENT;
}
EXPORT_SYMBOL(cfs_crypto_hash_speed);
/**
- * Do performance test for all hash algorithms.
+ * Run the performance test for all hash algorithms.
+ *
+ * Run the cfs_crypto_performance_test() benchmark for all of the available
+ * hash functions using a 1MB buffer size. This is a reasonable buffer size
+ * for Lustre RPCs, even if the actual RPC size is larger or smaller.
+ *
+ * Since the setup cost and computation speed of various hash algorithms is
+ * a function of the buffer size (and possibly internal contention of offload
+ * engines), this speed only represents an estimate of the actual speed under
+ * actual usage, but is reasonable for comparing available algorithms.
+ *
+ * The actual speeds are available via cfs_crypto_hash_speed() for later
+ * comparison.
+ *
+ * \retval 0 on success
+ * \retval -ENOMEM if no memory is available for test buffer
*/
static int cfs_crypto_test_hashes(void)
{
- unsigned char i;
- unsigned char *data;
- unsigned int j;
- /* Data block size for testing hash. Maximum
- * kmalloc size for 2.6.18 kernel is 128K
- */
- unsigned int data_len = 1 * 128 * 1024;
-
- data = kmalloc(data_len, 0);
- if (!data)
- return -ENOMEM;
+ enum cfs_crypto_hash_alg hash_alg;
- for (j = 0; j < data_len; j++)
- data[j] = j & 0xff;
+ for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
+ cfs_crypto_performance_test(hash_alg);
- for (i = 0; i < CFS_HASH_ALG_MAX; i++)
- cfs_crypto_performance_test(i, data, data_len);
-
- kfree(data);
return 0;
}
static int adler32;
+/**
+ * Register available hash functions
+ *
+ * \retval 0
+ */
int cfs_crypto_register(void)
{
request_module("crc32c");
@@ -290,6 +434,9 @@ int cfs_crypto_register(void)
return 0;
}
+/**
+ * Unregister previously registered hash functions
+ */
void cfs_crypto_unregister(void)
{
if (adler32 == 0)
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index ebc60ac9b..d89f71ee4 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -40,10 +40,75 @@
#define LNET_MINOR 240
+static inline size_t libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
+{
+ size_t len = sizeof(*data);
+
+ len += cfs_size_round(data->ioc_inllen1);
+ len += cfs_size_round(data->ioc_inllen2);
+ return len;
+}
+
+static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
+{
+ if (data->ioc_hdr.ioc_len > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inllen1 > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inllen2 > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
+ CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
+ CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_pbuf1 && !data->ioc_plen1) {
+ CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_pbuf2 && !data->ioc_plen2) {
+ CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_plen1 && !data->ioc_pbuf1) {
+ CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
+ return true;
+ }
+ if (data->ioc_plen2 && !data->ioc_pbuf2) {
+ CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
+ return true;
+ }
+ if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
+ CERROR("LIBCFS ioctl: packlen != ioc_len\n");
+ return true;
+ }
+ if (data->ioc_inllen1 &&
+ data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
+ CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
+ return true;
+ }
+ if (data->ioc_inllen2 &&
+ data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
+ data->ioc_inllen2 - 1] != '\0') {
+ CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
+ return true;
+ }
+ return false;
+}
+
int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
{
if (libcfs_ioctl_is_invalid(data)) {
- CERROR("LNET: ioctl not correctly formatted\n");
+ CERROR("libcfs ioctl: parameter not correctly formatted\n");
return -EINVAL;
}
@@ -57,68 +122,47 @@ int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
return 0;
}
-int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
- __u32 *len)
+int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
+ const struct libcfs_ioctl_hdr __user *uhdr)
{
struct libcfs_ioctl_hdr hdr;
+ int err = 0;
- if (copy_from_user(&hdr, arg, sizeof(hdr)))
+ if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
return -EFAULT;
if (hdr.ioc_version != LIBCFS_IOCTL_VERSION &&
hdr.ioc_version != LIBCFS_IOCTL_VERSION2) {
- CERROR("LNET: version mismatch expected %#x, got %#x\n",
+ CERROR("libcfs ioctl: version mismatch expected %#x, got %#x\n",
LIBCFS_IOCTL_VERSION, hdr.ioc_version);
return -EINVAL;
}
- *len = hdr.ioc_len;
-
- return 0;
-}
-
-int libcfs_ioctl_popdata(void __user *arg, void *data, int size)
-{
- if (copy_to_user(arg, data, size))
- return -EFAULT;
- return 0;
-}
-
-static int
-libcfs_psdev_open(struct inode *inode, struct file *file)
-{
- int rc = 0;
+ if (hdr.ioc_len < sizeof(struct libcfs_ioctl_data)) {
+ CERROR("libcfs ioctl: user buffer too small for ioctl\n");
+ return -EINVAL;
+ }
- if (!inode)
+ if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) {
+ CERROR("libcfs ioctl: user buffer is too large %d/%d\n",
+ hdr.ioc_len, LIBCFS_IOC_DATA_MAX);
return -EINVAL;
- if (libcfs_psdev_ops.p_open)
- rc = libcfs_psdev_ops.p_open(0, NULL);
- else
- return -EPERM;
- return rc;
-}
+ }
-/* called when closing /dev/device */
-static int
-libcfs_psdev_release(struct inode *inode, struct file *file)
-{
- int rc = 0;
+ LIBCFS_ALLOC(*hdr_pp, hdr.ioc_len);
+ if (!*hdr_pp)
+ return -ENOMEM;
- if (!inode)
- return -EINVAL;
- if (libcfs_psdev_ops.p_close)
- rc = libcfs_psdev_ops.p_close(0, NULL);
- else
- rc = -EPERM;
- return rc;
+ if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len)) {
+ LIBCFS_FREE(*hdr_pp, hdr.ioc_len);
+ err = -EFAULT;
+ }
+ return err;
}
-static long libcfs_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static long
+libcfs_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct cfs_psdev_file pfile;
- int rc = 0;
-
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -130,26 +174,12 @@ static long libcfs_ioctl(struct file *file,
return -EINVAL;
}
- /* Handle platform-dependent IOC requests */
- switch (cmd) {
- case IOC_LIBCFS_PANIC:
- if (!capable(CFS_CAP_SYS_BOOT))
- return -EPERM;
- panic("debugctl-invoked panic");
- return 0;
- }
-
- if (libcfs_psdev_ops.p_ioctl)
- rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg);
- else
- rc = -EPERM;
- return rc;
+ return libcfs_ioctl(cmd, (void __user *)arg);
}
static const struct file_operations libcfs_fops = {
- .unlocked_ioctl = libcfs_ioctl,
- .open = libcfs_psdev_open,
- .release = libcfs_psdev_release,
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = libcfs_psdev_ioctl,
};
struct miscdevice libcfs_dev = {
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 890844602..bbe19a684 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -46,30 +46,6 @@
#include <linux/kgdb.h>
#endif
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if add_wait_queue_exclusive_head is used.
- */
-void
-add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&waitq->lock, flags);
- __add_wait_queue_exclusive(waitq, link);
- spin_unlock_irqrestore(&waitq->lock, flags);
-}
-EXPORT_SYMBOL(add_wait_queue_exclusive_head);
-
sigset_t
cfs_block_allsigs(void)
{
@@ -128,13 +104,6 @@ cfs_restore_sigs(sigset_t old)
}
EXPORT_SYMBOL(cfs_restore_sigs);
-int
-cfs_signal_pending(void)
-{
- return signal_pending(current);
-}
-EXPORT_SYMBOL(cfs_signal_pending);
-
void
cfs_clear_sigpending(void)
{
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index cdc640bfd..f2d041118 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -54,9 +54,6 @@
# define DEBUG_SUBSYSTEM S_LNET
-#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \
- sizeof(struct lnet_ioctl_config_data))
-
#include "../../include/linux/libcfs/libcfs.h"
#include <asm/div64.h>
@@ -68,20 +65,6 @@
static struct dentry *lnet_debugfs_root;
-/* called when opening /dev/device */
-static int libcfs_psdev_open(unsigned long flags, void *args)
-{
- try_module_get(THIS_MODULE);
- return 0;
-}
-
-/* called when closing /dev/device */
-static int libcfs_psdev_release(unsigned long flags, void *args)
-{
- module_put(THIS_MODULE);
- return 0;
-}
-
static DECLARE_RWSEM(ioctl_list_sem);
static LIST_HEAD(ioctl_list);
@@ -115,39 +98,47 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
}
EXPORT_SYMBOL(libcfs_deregister_ioctl);
-static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
- void __user *arg, struct libcfs_ioctl_hdr *hdr)
+int libcfs_ioctl(unsigned long cmd, void __user *uparam)
{
struct libcfs_ioctl_data *data = NULL;
- int err = -EINVAL;
+ struct libcfs_ioctl_hdr *hdr;
+ int err;
+
+ /* 'cmd' and permissions get checked in our arch-specific caller */
+ err = libcfs_ioctl_getdata(&hdr, uparam);
+ if (err) {
+ CDEBUG_LIMIT(D_ERROR,
+ "libcfs ioctl: data header error %d\n", err);
+ return err;
+ }
- /*
- * The libcfs_ioctl_data_adjust() function performs adjustment
- * operations on the libcfs_ioctl_data structure to make
- * it usable by the code. This doesn't need to be called
- * for new data structures added.
- */
if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) {
+ /*
+ * The libcfs_ioctl_data_adjust() function performs adjustment
+ * operations on the libcfs_ioctl_data structure to make
+ * it usable by the code. This doesn't need to be called
+ * for new data structures added.
+ */
data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
err = libcfs_ioctl_data_adjust(data);
if (err)
- return err;
+ goto out;
}
+ CDEBUG(D_IOCTL, "libcfs ioctl cmd %lu\n", cmd);
switch (cmd) {
case IOC_LIBCFS_CLEAR_DEBUG:
libcfs_debug_clear_buffer();
- return 0;
- /*
- * case IOC_LIBCFS_PANIC:
- * Handled in arch/cfs_module.c
- */
+ break;
+
case IOC_LIBCFS_MARK_DEBUG:
- if (!data->ioc_inlbuf1 ||
- data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
- return -EINVAL;
+ if (!data || !data->ioc_inlbuf1 ||
+ data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') {
+ err = -EINVAL;
+ goto out;
+ }
libcfs_debug_mark_buffer(data->ioc_inlbuf1);
- return 0;
+ break;
default: {
struct libcfs_ioctl_handler *hand;
@@ -156,67 +147,23 @@ static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
down_read(&ioctl_list_sem);
list_for_each_entry(hand, &ioctl_list, item) {
err = hand->handle_ioctl(cmd, hdr);
- if (err != -EINVAL) {
- if (err == 0)
- err = libcfs_ioctl_popdata(arg,
- hdr, hdr->ioc_len);
- break;
+ if (err == -EINVAL)
+ continue;
+
+ if (!err) {
+ if (copy_to_user(uparam, hdr, hdr->ioc_len))
+ err = -EFAULT;
}
+ break;
}
up_read(&ioctl_list_sem);
- break;
- }
- }
-
- return err;
-}
-
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd,
- void __user *arg)
-{
- struct libcfs_ioctl_hdr *hdr;
- int err = 0;
- __u32 buf_len;
-
- err = libcfs_ioctl_getdata_len(arg, &buf_len);
- if (err)
- return err;
-
- /*
- * do a check here to restrict the size of the memory
- * to allocate to guard against DoS attacks.
- */
- if (buf_len > LNET_MAX_IOCTL_BUF_LEN) {
- CERROR("LNET: user buffer exceeds kernel buffer\n");
- return -EINVAL;
- }
-
- LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL);
- if (!hdr)
- return -ENOMEM;
-
- /* 'cmd' and permissions get checked in our arch-specific caller */
- if (copy_from_user(hdr, arg, buf_len)) {
- CERROR("LNET ioctl: data error\n");
- err = -EFAULT;
- goto out;
+ break; }
}
-
- err = libcfs_ioctl_handle(pfile, cmd, arg, hdr);
-
out:
- LIBCFS_FREE(hdr, buf_len);
+ LIBCFS_FREE(hdr, hdr->ioc_len);
return err;
}
-struct cfs_psdev_ops libcfs_psdev_ops = {
- libcfs_psdev_open,
- libcfs_psdev_release,
- NULL,
- NULL,
- libcfs_ioctl
-};
-
int lprocfs_call_handler(void *data, int write, loff_t *ppos,
void __user *buffer, size_t *lenp,
int (*handler)(void *data, int write, loff_t pos,
@@ -478,6 +425,13 @@ static struct ctl_table lnet_table[] = {
.proc_handler = &proc_dointvec
},
{
+ .procname = "fail_err",
+ .data = &cfs_fail_err,
+ .maxlen = sizeof(cfs_fail_err),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
}
};
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 244eb89ee..7739b9469 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -707,10 +707,9 @@ int cfs_tracefile_dump_all_pages(char *filename)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
char *buf;
+ mm_segment_t __oldfs;
int rc;
- DECL_MMSPACE;
-
cfs_tracefile_write_lock();
filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
@@ -729,11 +728,12 @@ int cfs_tracefile_dump_all_pages(char *filename)
rc = 0;
goto close;
}
+ __oldfs = get_fs();
+ set_fs(get_ds());
/* ok, for now, just write the pages. in the future we'll be building
* iobufs with the pages and calling generic_direct_IO
*/
- MMSPACE_OPEN;
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
@@ -752,7 +752,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
list_del(&tage->linkage);
cfs_tage_free(tage);
}
- MMSPACE_CLOSE;
+ set_fs(__oldfs);
rc = vfs_fsync(filp, 1);
if (rc)
pr_err("sync returns %d\n", rc);
@@ -986,13 +986,12 @@ static int tracefiled(void *arg)
struct tracefiled_ctl *tctl = arg;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
+ mm_segment_t __oldfs;
struct file *filp;
char *buf;
int last_loop = 0;
int rc;
- DECL_MMSPACE;
-
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
@@ -1025,8 +1024,8 @@ static int tracefiled(void *arg)
__LASSERT(list_empty(&pc.pc_pages));
goto end_loop;
}
-
- MMSPACE_OPEN;
+ __oldfs = get_fs();
+ set_fs(get_ds());
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
static loff_t f_pos;
@@ -1051,7 +1050,7 @@ static int tracefiled(void *arg)
break;
}
}
- MMSPACE_CLOSE;
+ set_fs(__oldfs);
filp_close(filp, NULL);
put_pages_on_daemon_list(&pc);
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index c72fe00dc..92236ae59 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -111,7 +111,7 @@ cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
* 1. when it returns no one shall try to schedule the workitem.
*/
void
-cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
* cancel schedule request of workitem \a wi
*/
int
-cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
int rc;
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(cfs_wi_deschedule);
* be added, and even dynamic creation of serialised queues might be supported.
*/
void
-cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
@@ -229,12 +229,12 @@ static int cfs_wi_scheduler(void *arg)
while (!sched->ws_stopping) {
int nloops = 0;
int rc;
- cfs_workitem_t *wi;
+ struct cfs_workitem *wi;
while (!list_empty(&sched->ws_runq) &&
nloops < CFS_WI_RESCHED) {
- wi = list_entry(sched->ws_runq.next, cfs_workitem_t,
- wi_list);
+ wi = list_entry(sched->ws_runq.next,
+ struct cfs_workitem, wi_list);
LASSERT(wi->wi_scheduled && !wi->wi_running);
list_del_init(&wi->wi_list);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 876475554..fe0dbe746 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1215,9 +1215,9 @@ lnet_shutdown_lndni(struct lnet_ni *ni)
}
static int
-lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
- __s32 peer_cr, __s32 peer_buf_cr, __s32 credits)
+lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
{
+ struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
int rc = -EINVAL;
int lnd_type;
lnd_t *lnd;
@@ -1275,6 +1275,21 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
ni->ni_lnd = lnd;
+ if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
+ lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
+
+ if (lnd_tunables) {
+ LIBCFS_ALLOC(ni->ni_lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ if (!ni->ni_lnd_tunables) {
+ mutex_unlock(&the_lnet.ln_lnd_mutex);
+ rc = -ENOMEM;
+ goto failed0;
+ }
+ memcpy(ni->ni_lnd_tunables, lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ }
+
rc = lnd->lnd_startup(ni);
mutex_unlock(&the_lnet.ln_lnd_mutex);
@@ -1292,20 +1307,28 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
* If given some LND tunable parameters, parse those now to
* override the values in the NI structure.
*/
- if (peer_buf_cr >= 0)
- ni->ni_peerrtrcredits = peer_buf_cr;
- if (peer_timeout >= 0)
- ni->ni_peertimeout = peer_timeout;
+ if (conf && conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0) {
+ ni->ni_peerrtrcredits =
+ conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
+ }
+ if (conf && conf->cfg_config_u.cfg_net.net_peer_timeout >= 0) {
+ ni->ni_peertimeout =
+ conf->cfg_config_u.cfg_net.net_peer_timeout;
+ }
/*
* TODO
* Note: For now, don't allow the user to change
* peertxcredits as this number is used in the
* IB LND to control queue depth.
- * if (peer_cr != -1)
- * ni->ni_peertxcredits = peer_cr;
+ *
+ * if (conf && conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
+ * ni->ni_peertxcredits =
+ * conf->cfg_config_u.cfg_net.net_peer_tx_credits;
*/
- if (credits >= 0)
- ni->ni_maxtxcredits = credits;
+ if (conf && conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0) {
+ ni->ni_maxtxcredits =
+ conf->cfg_config_u.cfg_net.net_max_tx_credits;
+ }
LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
@@ -1367,7 +1390,7 @@ lnet_startup_lndnis(struct list_head *nilist)
while (!list_empty(nilist)) {
ni = list_entry(nilist->next, lnet_ni_t, ni_list);
list_del(&ni->ni_list);
- rc = lnet_startup_lndni(ni, -1, -1, -1, -1);
+ rc = lnet_startup_lndni(ni, NULL);
if (rc < 0)
goto failed;
@@ -1641,25 +1664,20 @@ EXPORT_SYMBOL(LNetNIFini);
* parameters
*
* \param[in] ni network interface structure
- * \param[out] cpt_count the number of cpts the ni is on
- * \param[out] nid Network Interface ID
- * \param[out] peer_timeout NI peer timeout
- * \param[out] peer_tx_crdits NI peer transmit credits
- * \param[out] peer_rtr_credits NI peer router credits
- * \param[out] max_tx_credits NI max transmit credit
- * \param[out] net_config Network configuration
+ * \param[out] config NI configuration
*/
static void
-lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
- int *peer_timeout, int *peer_tx_credits,
- int *peer_rtr_credits, int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config)
+lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
{
+ struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
+ struct lnet_ioctl_net_config *net_config;
+ size_t min_size, tunable_size = 0;
int i;
- if (!ni)
+ if (!ni || !config)
return;
+ net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
if (!net_config)
return;
@@ -1675,11 +1693,11 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
sizeof(net_config->ni_interfaces[i]));
}
- *nid = ni->ni_nid;
- *peer_timeout = ni->ni_peertimeout;
- *peer_tx_credits = ni->ni_peertxcredits;
- *peer_rtr_credits = ni->ni_peerrtrcredits;
- *max_tx_credits = ni->ni_maxtxcredits;
+ config->cfg_nid = ni->ni_nid;
+ config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout;
+ config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits;
+ config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits;
+ config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits;
net_config->ni_status = ni->ni_status->ns_status;
@@ -1689,18 +1707,40 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
for (i = 0; i < num_cpts; i++)
net_config->ni_cpts[i] = ni->ni_cpts[i];
- *cpt_count = num_cpts;
+ config->cfg_ncpts = num_cpts;
+ }
+
+ /*
+ * See if user land tools sent in a newer and larger version
+ * of struct lnet_tunables than what the kernel uses.
+ */
+ min_size = sizeof(*config) + sizeof(*net_config);
+
+ if (config->cfg_hdr.ioc_len > min_size)
+ tunable_size = config->cfg_hdr.ioc_len - min_size;
+
+ /* Don't copy to much data to user space */
+ min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables));
+ lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
+
+ if (ni->ni_lnd_tunables && lnd_cfg && min_size) {
+ memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size);
+ config->cfg_config_u.cfg_net.net_interface_count = 1;
+
+ /* Tell user land that kernel side has less data */
+ if (tunable_size > sizeof(*ni->ni_lnd_tunables)) {
+ min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
+ config->cfg_hdr.ioc_len -= min_size;
+ }
}
}
-int
-lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
- int *peer_tx_credits, int *peer_rtr_credits,
- int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config)
+static int
+lnet_get_net_config(struct lnet_ioctl_config_data *config)
{
struct lnet_ni *ni;
struct list_head *tmp;
+ int idx = config->cfg_count;
int cpt, i = 0;
int rc = -ENOENT;
@@ -1712,9 +1752,7 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
ni = list_entry(tmp, lnet_ni_t, ni_list);
lnet_ni_lock(ni);
- lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout,
- peer_tx_credits, peer_rtr_credits,
- max_tx_credits, net_config);
+ lnet_fill_ni_info(ni, config);
lnet_ni_unlock(ni);
rc = 0;
break;
@@ -1725,10 +1763,9 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
}
int
-lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
- __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
- __s32 credits)
+lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
{
+ char *nets = conf->cfg_config_u.cfg_net.net_intf;
lnet_ping_info_t *pinfo;
lnet_handle_md_t md_handle;
struct lnet_ni *ni;
@@ -1773,8 +1810,7 @@ lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
list_del_init(&ni->ni_list);
- rc = lnet_startup_lndni(ni, peer_timeout, peer_cr,
- peer_buf_cr, credits);
+ rc = lnet_startup_lndni(ni, conf);
if (rc)
goto failed1;
@@ -1864,6 +1900,10 @@ LNetCtl(unsigned int cmd, void *arg)
int rc;
unsigned long secs_passed;
+ BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX <
+ sizeof(struct lnet_ioctl_net_config) +
+ sizeof(struct lnet_ioctl_config_data));
+
switch (cmd) {
case IOC_LIBCFS_GET_NI:
rc = LNetGetId(data->ioc_count, &id);
@@ -1918,27 +1958,14 @@ LNetCtl(unsigned int cmd, void *arg)
&config->cfg_config_u.cfg_route.rtr_priority);
case IOC_LIBCFS_GET_NET: {
- struct lnet_ioctl_net_config *net_config;
- size_t total = sizeof(*config) + sizeof(*net_config);
-
+ size_t total = sizeof(*config) +
+ sizeof(struct lnet_ioctl_net_config);
config = arg;
if (config->cfg_hdr.ioc_len < total)
return -EINVAL;
- net_config = (struct lnet_ioctl_net_config *)
- config->cfg_bulk;
- if (!net_config)
- return -EINVAL;
-
- return lnet_get_net_config(config->cfg_count,
- &config->cfg_ncpts,
- &config->cfg_nid,
- &config->cfg_config_u.cfg_net.net_peer_timeout,
- &config->cfg_config_u.cfg_net.net_peer_tx_credits,
- &config->cfg_config_u.cfg_net.net_peer_rtr_credits,
- &config->cfg_config_u.cfg_net.net_max_tx_credits,
- net_config);
+ return lnet_get_net_config(config);
}
case IOC_LIBCFS_GET_LNET_STATS: {
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 449069c9e..480cc9c6c 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -107,6 +107,9 @@ lnet_ni_free(struct lnet_ni *ni)
if (ni->ni_cpts)
cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
+ if (ni->ni_lnd_tunables)
+ LIBCFS_FREE(ni->ni_lnd_tunables, sizeof(*ni->ni_lnd_tunables));
+
for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) {
LIBCFS_FREE(ni->ni_interfaces[i],
strlen(ni->ni_interfaces[i]) + 1);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index f19aa9320..c5d5bedb3 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -407,7 +407,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
LASSERT(niov > 0);
LASSERT(nkiov > 0);
this_nob = min(iov->iov_len - iovoffset,
- (__kernel_size_t) kiov->kiov_len - kiovoffset);
+ (__kernel_size_t)kiov->kiov_len - kiovoffset);
this_nob = min(this_nob, nob);
if (!addr)
@@ -477,7 +477,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
do {
LASSERT(nkiov > 0);
LASSERT(niov > 0);
- this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
+ this_nob = min((__kernel_size_t)kiov->kiov_len - kiovoffset,
iov->iov_len - iovoffset);
this_nob = min(this_nob, nob);
@@ -996,7 +996,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
LASSERT(msg2->msg_txpeer->lp_ni == ni);
LASSERT(msg2->msg_tx_delayed);
- (void) lnet_post_send_locked(msg2, 1);
+ (void)lnet_post_send_locked(msg2, 1);
}
}
@@ -1019,7 +1019,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
LASSERT(msg2->msg_txpeer == txpeer);
LASSERT(msg2->msg_tx_delayed);
- (void) lnet_post_send_locked(msg2, 1);
+ (void)lnet_post_send_locked(msg2, 1);
}
}
@@ -1142,7 +1142,7 @@ routing_off:
lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
- (void) lnet_post_routed_recv_locked(msg2, 1);
+ (void)lnet_post_routed_recv_locked(msg2, 1);
}
}
if (rxpeer) {
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 93037c116..246b5c141 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -108,12 +108,7 @@ lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr)
rc = -EINVAL;
goto out_unlock;
}
- rc = lnet_dyn_add_ni(LNET_PID_LUSTRE,
- conf->cfg_config_u.cfg_net.net_intf,
- conf->cfg_config_u.cfg_net.net_peer_timeout,
- conf->cfg_config_u.cfg_net.net_peer_tx_credits,
- conf->cfg_config_u.cfg_net.net_peer_rtr_credits,
- conf->cfg_config_u.cfg_net.net_max_tx_credits);
+ rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, conf);
out_unlock:
mutex_unlock(&lnet_config_mutex);
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index dcb6e506f..a63d86c4c 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -49,10 +49,10 @@ module_param(brw_inject_errors, int, 0644);
MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
static void
-brw_client_fini(sfw_test_instance_t *tsi)
+brw_client_fini(struct sfw_test_instance *tsi)
{
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
+ struct srpc_bulk *bulk;
+ struct sfw_test_unit *tsu;
LASSERT(tsi->tsi_is_client);
@@ -67,21 +67,21 @@ brw_client_fini(sfw_test_instance_t *tsi)
}
static int
-brw_client_init(sfw_test_instance_t *tsi)
+brw_client_init(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
int flags;
int npg;
int len;
int opc;
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
+ struct srpc_bulk *bulk;
+ struct sfw_test_unit *tsu;
LASSERT(sn);
LASSERT(tsi->tsi_is_client);
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+ struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
opc = breq->blk_opc;
flags = breq->blk_flags;
@@ -91,9 +91,8 @@ brw_client_init(sfw_test_instance_t *tsi)
* but we have to keep it for compatibility
*/
len = npg * PAGE_SIZE;
-
} else {
- test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
+ struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
/*
* I should never get this step if it's unknown feature
@@ -225,7 +224,7 @@ bad_data:
}
static void
-brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
+brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
{
int i;
struct page *pg;
@@ -237,7 +236,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
}
static int
-brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
+brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
{
int i;
struct page *pg;
@@ -255,14 +254,14 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
}
static int
-brw_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
+brw_client_prep_rpc(struct sfw_test_unit *tsu,
+ lnet_process_id_t dest, struct srpc_client_rpc **rpcpp)
{
- srpc_bulk_t *bulk = tsu->tsu_private;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_client_rpc_t *rpc;
- srpc_brw_reqst_t *req;
+ struct srpc_bulk *bulk = tsu->tsu_private;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_client_rpc *rpc;
+ struct srpc_brw_reqst *req;
int flags;
int npg;
int len;
@@ -273,15 +272,14 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
LASSERT(bulk);
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+ struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
opc = breq->blk_opc;
flags = breq->blk_flags;
npg = breq->blk_npg;
len = npg * PAGE_SIZE;
-
} else {
- test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
+ struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
/*
* I should never get this step if it's unknown feature
@@ -299,7 +297,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
if (rc)
return rc;
- memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg]));
+ memcpy(&rpc->crpc_bulk, bulk, offsetof(struct srpc_bulk, bk_iovs[npg]));
if (opc == LST_BRW_WRITE)
brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
else
@@ -315,21 +313,21 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
}
static void
-brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
{
__u64 magic = BRW_MAGIC;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_msg_t *msg = &rpc->crpc_replymsg;
- srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
- srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_msg *msg = &rpc->crpc_replymsg;
+ struct srpc_brw_reply *reply = &msg->msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
LASSERT(sn);
if (rpc->crpc_status) {
CERROR("BRW RPC to %s failed with %d\n",
libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
+ if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_brw_errors);
return;
}
@@ -363,7 +361,7 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
static void
brw_server_rpc_done(struct srpc_server_rpc *rpc)
{
- srpc_bulk_t *blk = rpc->srpc_bulk;
+ struct srpc_bulk *blk = rpc->srpc_bulk;
if (!blk)
return;
@@ -384,9 +382,9 @@ static int
brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
{
__u64 magic = BRW_MAGIC;
- srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
- srpc_brw_reqst_t *reqst;
- srpc_msg_t *reqstmsg;
+ struct srpc_brw_reply *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst;
+ struct srpc_msg *reqstmsg;
LASSERT(rpc->srpc_bulk);
LASSERT(rpc->srpc_reqstbuf);
@@ -420,10 +418,10 @@ static int
brw_server_handle(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *replymsg = &rpc->srpc_replymsg;
- srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply;
- srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst;
+ struct srpc_msg *replymsg = &rpc->srpc_replymsg;
+ struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_brw_reply *reply = &replymsg->msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst = &reqstmsg->msg_body.brw_reqst;
int npg;
int rc;
@@ -459,7 +457,7 @@ brw_server_handle(struct srpc_server_rpc *rpc)
if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
/* compat with old version */
- if (reqst->brw_len & ~CFS_PAGE_MASK) {
+ if (reqst->brw_len & ~PAGE_MASK) {
reply->brw_status = EINVAL;
return 0;
}
@@ -490,7 +488,8 @@ brw_server_handle(struct srpc_server_rpc *rpc)
return 0;
}
-sfw_test_client_ops_t brw_test_client;
+struct sfw_test_client_ops brw_test_client;
+
void brw_init_test_client(void)
{
brw_test_client.tso_init = brw_client_init;
@@ -499,7 +498,8 @@ void brw_init_test_client(void)
brw_test_client.tso_done_rpc = brw_client_done_rpc;
};
-srpc_service_t brw_test_service;
+struct srpc_service brw_test_service;
+
void brw_init_test_service(void)
{
brw_test_service.sv_id = SRPC_SERVICE_BRW;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 79ee6c0bf..408c614b6 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -51,9 +51,9 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
char *name;
int rc;
- if (!args->lstio_ses_idp || /* address for output sid */
- !args->lstio_ses_key || /* no key is specified */
- !args->lstio_ses_namep || /* session name */
+ if (!args->lstio_ses_idp || /* address for output sid */
+ !args->lstio_ses_key || /* no key is specified */
+ !args->lstio_ses_namep || /* session name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -95,11 +95,11 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
{
/* no checking of key */
- if (!args->lstio_ses_idp || /* address for output sid */
- !args->lstio_ses_keyp || /* address for output key */
- !args->lstio_ses_featp || /* address for output features */
- !args->lstio_ses_ndinfo || /* address for output ndinfo */
- !args->lstio_ses_namep || /* address for output name */
+ if (!args->lstio_ses_idp || /* address for output sid */
+ !args->lstio_ses_keyp || /* address for output key */
+ !args->lstio_ses_featp || /* address for output features */
+ !args->lstio_ses_ndinfo || /* address for output ndinfo */
+ !args->lstio_ses_namep || /* address for output name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -125,7 +125,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
if (!args->lstio_dbg_resultp)
return -EINVAL;
- if (args->lstio_dbg_namep && /* name of batch/group */
+ if (args->lstio_dbg_namep && /* name of batch/group */
(args->lstio_dbg_nmlen <= 0 ||
args->lstio_dbg_nmlen > LST_NAME_SIZE))
return -EINVAL;
@@ -326,7 +326,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (!args->lstio_grp_idsp || /* array of ids */
+ if (!args->lstio_grp_idsp || /* array of ids */
args->lstio_grp_count <= 0 ||
!args->lstio_grp_resultp ||
!args->lstio_grp_featp ||
@@ -394,13 +394,13 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_grp_entp && /* output: group entry */
- !args->lstio_grp_dentsp) /* output: node entry */
+ if (!args->lstio_grp_entp && /* output: group entry */
+ !args->lstio_grp_dentsp) /* output: node entry */
return -EINVAL;
- if (args->lstio_grp_dentsp) { /* have node entry */
- if (!args->lstio_grp_idxp || /* node index */
- !args->lstio_grp_ndentp) /* # of node entry */
+ if (args->lstio_grp_dentsp) { /* have node entry */
+ if (!args->lstio_grp_idxp || /* node index */
+ !args->lstio_grp_ndentp) /* # of node entry */
return -EINVAL;
if (copy_from_user(&ndent, args->lstio_grp_ndentp,
@@ -612,18 +612,18 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (!args->lstio_bat_namep || /* batch name */
+ if (!args->lstio_bat_namep || /* batch name */
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_bat_entp && /* output: batch entry */
- !args->lstio_bat_dentsp) /* output: node entry */
+ if (!args->lstio_bat_entp && /* output: batch entry */
+ !args->lstio_bat_dentsp) /* output: node entry */
return -EINVAL;
- if (args->lstio_bat_dentsp) { /* have node entry */
- if (!args->lstio_bat_idxp || /* node index */
- !args->lstio_bat_ndentp) /* # of node entry */
+ if (args->lstio_bat_dentsp) { /* have node entry */
+ if (!args->lstio_bat_idxp || /* node index */
+ !args->lstio_bat_ndentp) /* # of node entry */
return -EINVAL;
if (copy_from_user(&index, args->lstio_bat_idxp,
@@ -722,18 +722,18 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
if (!args->lstio_tes_resultp ||
!args->lstio_tes_retp ||
- !args->lstio_tes_bat_name || /* no specified batch */
+ !args->lstio_tes_bat_name || /* no specified batch */
args->lstio_tes_bat_nmlen <= 0 ||
args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
- !args->lstio_tes_sgrp_name || /* no source group */
+ !args->lstio_tes_sgrp_name || /* no source group */
args->lstio_tes_sgrp_nmlen <= 0 ||
args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
- !args->lstio_tes_dgrp_name || /* no target group */
+ !args->lstio_tes_dgrp_name || /* no target group */
args->lstio_tes_dgrp_nmlen <= 0 ||
args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_tes_loop || /* negative is infinite */
+ if (!args->lstio_tes_loop || /* negative is infinite */
args->lstio_tes_concur <= 0 ||
args->lstio_tes_dist <= 0 ||
args->lstio_tes_span <= 0)
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
if (args->lstio_tes_param &&
(args->lstio_tes_param_len <= 0 ||
args->lstio_tes_param_len >
- PAGE_SIZE - sizeof(lstcon_test_t)))
+ PAGE_SIZE - sizeof(struct lstcon_test)))
return -EINVAL;
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 35a227d0c..6f6875811 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -46,13 +46,13 @@
#include "conrpc.h"
#include "console.h"
-void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *,
- lstcon_node_t *, lstcon_trans_stat_t *);
+void lstcon_rpc_stat_reply(struct lstcon_rpc_trans *, struct srpc_msg *,
+ struct lstcon_node *, lstcon_trans_stat_t *);
static void
-lstcon_rpc_done(srpc_client_rpc_t *rpc)
+lstcon_rpc_done(struct srpc_client_rpc *rpc)
{
- lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
+ struct lstcon_rpc *crpc = (struct lstcon_rpc *)rpc->crpc_priv;
LASSERT(crpc && rpc == crpc->crp_rpc);
LASSERT(crpc->crp_posted && !crpc->crp_finished);
@@ -90,8 +90,8 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
}
static int
-lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc)
+lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
+ int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
{
crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
feats, bulk_npg, bulk_len,
@@ -115,16 +115,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
}
static int
-lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
+lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats,
+ int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
{
- lstcon_rpc_t *crpc = NULL;
+ struct lstcon_rpc *crpc = NULL;
int rc;
spin_lock(&console_session.ses_rpc_lock);
crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist,
- lstcon_rpc_t, crp_link);
+ struct lstcon_rpc, crp_link);
if (crpc)
list_del_init(&crpc->crp_link);
@@ -148,9 +148,9 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
}
void
-lstcon_rpc_put(lstcon_rpc_t *crpc)
+lstcon_rpc_put(struct lstcon_rpc *crpc)
{
- srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
+ struct srpc_bulk *bulk = &crpc->crp_rpc->crpc_bulk;
int i;
LASSERT(list_empty(&crpc->crp_link));
@@ -183,9 +183,9 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
}
static void
-lstcon_rpc_post(lstcon_rpc_t *crpc)
+lstcon_rpc_post(struct lstcon_rpc *crpc)
{
- lstcon_rpc_trans_t *trans = crpc->crp_trans;
+ struct lstcon_rpc_trans *trans = crpc->crp_trans;
LASSERT(trans);
@@ -236,9 +236,9 @@ lstcon_rpc_trans_name(int transop)
int
lstcon_rpc_trans_prep(struct list_head *translist, int transop,
- lstcon_rpc_trans_t **transpp)
+ struct lstcon_rpc_trans **transpp)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
if (translist) {
list_for_each_entry(trans, translist, tas_link) {
@@ -278,26 +278,26 @@ lstcon_rpc_trans_prep(struct list_head *translist, int transop,
}
void
-lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc)
+lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *crpc)
{
list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
crpc->crp_trans = trans;
}
void
-lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
+lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_node_t *nd;
+ struct srpc_client_rpc *rpc;
+ struct lstcon_rpc *crpc;
+ struct lstcon_node *nd;
list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp) { /* rpc done or aborted already */
+ if (!crpc->crp_posted || /* not posted */
+ crpc->crp_stamp) { /* rpc done or aborted already */
if (!crpc->crp_stamp) {
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = -EINTR;
@@ -326,7 +326,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
}
static int
-lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
+lstcon_rpc_trans_check(struct lstcon_rpc_trans *trans)
{
if (console_session.ses_shutdown &&
!list_empty(&trans->tas_olink)) /* Not an end session RPC */
@@ -336,9 +336,9 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
}
int
-lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
+lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout)
{
- lstcon_rpc_t *crpc;
+ struct lstcon_rpc *crpc;
int rc;
if (list_empty(&trans->tas_rpcs_list))
@@ -386,11 +386,11 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
}
static int
-lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
+lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp)
{
- lstcon_node_t *nd = crpc->crp_node;
- srpc_client_rpc_t *rpc = crpc->crp_rpc;
- srpc_generic_reply_t *rep;
+ struct lstcon_node *nd = crpc->crp_node;
+ struct srpc_client_rpc *rpc = crpc->crp_rpc;
+ struct srpc_generic_reply *rep;
LASSERT(nd && rpc);
LASSERT(crpc->crp_stamp);
@@ -423,10 +423,10 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
}
void
-lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
+lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, lstcon_trans_stat_t *stat)
{
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *rep;
int error;
LASSERT(stat);
@@ -466,17 +466,17 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
}
int
-lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
+lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
struct list_head __user *head_up,
lstcon_rpc_readent_func_t readent)
{
struct list_head tmp;
struct list_head __user *next;
lstcon_rpc_ent_t *ent;
- srpc_generic_reply_t *rep;
- lstcon_rpc_t *crpc;
- srpc_msg_t *msg;
- lstcon_node_t *nd;
+ struct srpc_generic_reply *rep;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *msg;
+ struct lstcon_node *nd;
long dur;
struct timeval tv;
int error;
@@ -520,7 +520,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
continue;
/* RPC is done */
- rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
+ rep = (struct srpc_generic_reply *)&msg->msg_body.reply;
if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) ||
copy_to_user(&ent->rpe_fwk_errno, &rep->status,
@@ -531,7 +531,6 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
continue;
error = readent(trans->tas_opc, msg, ent);
-
if (error)
return error;
}
@@ -540,11 +539,11 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
}
void
-lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
+lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_rpc_t *tmp;
+ struct srpc_client_rpc *rpc;
+ struct lstcon_rpc *crpc;
+ struct lstcon_rpc *tmp;
int count = 0;
list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) {
@@ -563,10 +562,10 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
}
/*
- * rpcs can be still not callbacked (even LNetMDUnlink is called)
- * because huge timeout for inaccessible network, don't make
- * user wait for them, just abandon them, they will be recycled
- * in callback
+ * rpcs can be still not callbacked (even LNetMDUnlink is
+ * called) because huge timeout for inaccessible network,
+ * don't make user wait for them, just abandon them, they
+ * will be recycled in callback
*/
LASSERT(crpc->crp_status);
@@ -593,11 +592,11 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
}
int
-lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
- unsigned feats, lstcon_rpc_t **crpc)
+lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
+ unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_mksn_reqst_t *msrq;
- srpc_rmsn_reqst_t *rsrq;
+ struct srpc_mksn_reqst *msrq;
+ struct srpc_rmsn_reqst *rsrq;
int rc;
switch (transop) {
@@ -632,9 +631,9 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
}
int
-lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
+lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_debug_reqst_t *drq;
+ struct srpc_debug_reqst *drq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
@@ -650,11 +649,11 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
}
int
-lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
- lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc)
+lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+ struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
{
- lstcon_batch_t *batch;
- srpc_batch_reqst_t *brq;
+ struct lstcon_batch *batch;
+ struct srpc_batch_reqst *brq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
@@ -676,16 +675,16 @@ lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
LASSERT(!tsb->tsb_index);
- batch = (lstcon_batch_t *)tsb;
+ batch = (struct lstcon_batch *)tsb;
brq->bar_arg = batch->bat_arg;
return 0;
}
int
-lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
+lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_stat_reqst_t *srq;
+ struct srpc_stat_reqst *srq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
@@ -716,12 +715,12 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
}
static int
-lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
+lstcon_dstnodes_prep(struct lstcon_group *grp, int idx,
int dist, int span, int nkiov, lnet_kiov_t *kiov)
{
lnet_process_id_packed_t *pid;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int start;
int end;
int i = 0;
@@ -770,9 +769,9 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
}
static int
-lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
+lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
{
- test_ping_req_t *prq = &req->tsr_u.ping;
+ struct test_ping_req *prq = &req->tsr_u.ping;
prq->png_size = param->png_size;
prq->png_flags = param->png_flags;
@@ -781,9 +780,9 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
}
static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
{
- test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
+ struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
brq->blk_opc = param->blk_opc;
brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
@@ -794,9 +793,9 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
}
static int
-lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
+lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
{
- test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1;
+ struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
brq->blk_opc = param->blk_opc;
brq->blk_flags = param->blk_flags;
@@ -807,13 +806,13 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
}
int
-lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
- lstcon_test_t *test, lstcon_rpc_t **crpc)
+lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+ struct lstcon_test *test, struct lstcon_rpc **crpc)
{
- lstcon_group_t *sgrp = test->tes_src_grp;
- lstcon_group_t *dgrp = test->tes_dst_grp;
- srpc_test_reqst_t *trq;
- srpc_bulk_t *bulk;
+ struct lstcon_group *sgrp = test->tes_src_grp;
+ struct lstcon_group *dgrp = test->tes_dst_grp;
+ struct srpc_test_reqst *trq;
+ struct srpc_bulk *bulk;
int i;
int npg = 0;
int nob = 0;
@@ -841,7 +840,6 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
trq->tsr_ndest = 0;
trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
-
} else {
bulk = &(*crpc)->crp_rpc->crpc_bulk;
@@ -917,10 +915,10 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
}
static int
-lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
- lstcon_node_t *nd, srpc_msg_t *reply)
+lstcon_sesnew_stat_reply(struct lstcon_rpc_trans *trans,
+ struct lstcon_node *nd, struct srpc_msg *reply)
{
- srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
+ struct srpc_mksn_reply *mksn_rep = &reply->msg_body.mksn_reply;
int status = mksn_rep->mksn_status;
if (!status &&
@@ -940,7 +938,7 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
if (!trans->tas_feats_updated) {
spin_lock(&console_session.ses_rpc_lock);
- if (!trans->tas_feats_updated) { /* recheck with lock */
+ if (!trans->tas_feats_updated) { /* recheck with lock */
trans->tas_feats_updated = 1;
trans->tas_features = reply->msg_ses_feats;
}
@@ -964,14 +962,14 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
}
void
-lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
- lstcon_node_t *nd, lstcon_trans_stat_t *stat)
+lstcon_rpc_stat_reply(struct lstcon_rpc_trans *trans, struct srpc_msg *msg,
+ struct lstcon_node *nd, lstcon_trans_stat_t *stat)
{
- srpc_rmsn_reply_t *rmsn_rep;
- srpc_debug_reply_t *dbg_rep;
- srpc_batch_reply_t *bat_rep;
- srpc_test_reply_t *test_rep;
- srpc_stat_reply_t *stat_rep;
+ struct srpc_rmsn_reply *rmsn_rep;
+ struct srpc_debug_reply *dbg_rep;
+ struct srpc_batch_reply *bat_rep;
+ struct srpc_test_reply *test_rep;
+ struct srpc_stat_reply *stat_rep;
int rc = 0;
switch (trans->tas_opc) {
@@ -1085,12 +1083,12 @@ int
lstcon_rpc_trans_ndlist(struct list_head *ndlist,
struct list_head *translist, int transop,
void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp)
+ struct lstcon_rpc_trans **transpp)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- lstcon_rpc_t *rpc;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
+ struct lstcon_rpc *rpc;
unsigned feats;
int rc;
@@ -1130,14 +1128,16 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
case LST_TRANS_TSBCLIADD:
case LST_TRANS_TSBSRVADD:
rc = lstcon_testrpc_prep(nd, transop, feats,
- (lstcon_test_t *)arg, &rpc);
+ (struct lstcon_test *)arg,
+ &rpc);
break;
case LST_TRANS_TSBRUN:
case LST_TRANS_TSBSTOP:
case LST_TRANS_TSBCLIQRY:
case LST_TRANS_TSBSRVQRY:
rc = lstcon_batrpc_prep(nd, transop, feats,
- (lstcon_tsb_hdr_t *)arg, &rpc);
+ (struct lstcon_tsb_hdr *)arg,
+ &rpc);
break;
case LST_TRANS_STATQRY:
rc = lstcon_statrpc_prep(nd, feats, &rpc);
@@ -1170,17 +1170,18 @@ static void
lstcon_rpc_pinger(void *arg)
{
struct stt_timer *ptimer = (struct stt_timer *)arg;
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
- srpc_debug_reqst_t *drq;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *rep;
+ struct srpc_debug_reqst *drq;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int intv;
int count = 0;
int rc;
- /* RPC pinger is a special case of transaction,
+ /*
+ * RPC pinger is a special case of transaction,
* it's called by timer at 8 seconds interval.
*/
mutex_lock(&console_session.ses_mutex);
@@ -1326,9 +1327,9 @@ lstcon_rpc_pinger_stop(void)
void
lstcon_rpc_cleanup_wait(void)
{
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- lstcon_rpc_t *temp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_rpc *crpc;
+ struct lstcon_rpc *temp;
struct list_head *pacer;
struct list_head zlist;
@@ -1338,7 +1339,7 @@ lstcon_rpc_cleanup_wait(void)
while (!list_empty(&console_session.ses_trans_list)) {
list_for_each(pacer, &console_session.ses_trans_list) {
- trans = list_entry(pacer, lstcon_rpc_trans_t,
+ trans = list_entry(pacer, struct lstcon_rpc_trans,
tas_link);
CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
@@ -1370,7 +1371,7 @@ lstcon_rpc_cleanup_wait(void)
list_for_each_entry_safe(crpc, temp, &zlist, crp_link) {
list_del(&crpc->crp_link);
- LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
+ LIBCFS_FREE(crpc, sizeof(struct lstcon_rpc));
}
}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 3e7839dad..90c3385a3 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -63,9 +63,9 @@ struct lstcon_tsb_hdr;
struct lstcon_test;
struct lstcon_node;
-typedef struct lstcon_rpc {
+struct lstcon_rpc {
struct list_head crp_link; /* chain on rpc transaction */
- srpc_client_rpc_t *crp_rpc; /* client rpc */
+ struct srpc_client_rpc *crp_rpc; /* client rpc */
struct lstcon_node *crp_node; /* destination node */
struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
@@ -76,9 +76,9 @@ typedef struct lstcon_rpc {
unsigned int crp_embedded:1;
int crp_status; /* console rpc errors */
unsigned long crp_stamp; /* replied time stamp */
-} lstcon_rpc_t;
+};
-typedef struct lstcon_rpc_trans {
+struct lstcon_rpc_trans {
struct list_head tas_olink; /* link chain on owner list */
struct list_head tas_link; /* link chain on global list */
int tas_opc; /* operation code of transaction */
@@ -87,7 +87,7 @@ typedef struct lstcon_rpc_trans {
wait_queue_head_t tas_waitq; /* wait queue head */
atomic_t tas_remaining; /* # of un-scheduled rpcs */
struct list_head tas_rpcs_list; /* queued requests */
-} lstcon_rpc_trans_t;
+};
#define LST_TRANS_PRIVATE 0x1000
@@ -106,35 +106,35 @@ typedef struct lstcon_rpc_trans {
#define LST_TRANS_STATQRY 0x21
typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *,
+typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
lstcon_rpc_ent_t __user *);
int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned version, lstcon_rpc_t **crpc);
+ unsigned version, struct lstcon_rpc **crpc);
int lstcon_dbgrpc_prep(struct lstcon_node *nd,
- unsigned version, lstcon_rpc_t **crpc);
+ unsigned version, struct lstcon_rpc **crpc);
int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_tsb_hdr *tsb, lstcon_rpc_t **crpc);
+ struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc);
int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_test *test, lstcon_rpc_t **crpc);
+ struct lstcon_test *test, struct lstcon_rpc **crpc);
int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
- lstcon_rpc_t **crpc);
-void lstcon_rpc_put(lstcon_rpc_t *crpc);
+ struct lstcon_rpc **crpc);
+void lstcon_rpc_put(struct lstcon_rpc *crpc);
int lstcon_rpc_trans_prep(struct list_head *translist,
- int transop, lstcon_rpc_trans_t **transpp);
+ int transop, struct lstcon_rpc_trans **transpp);
int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
struct list_head *translist, int transop,
void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp);
-void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
+ struct lstcon_rpc_trans **transpp);
+void lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans,
lstcon_trans_stat_t *stat);
-int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
+int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
struct list_head __user *head_up,
lstcon_rpc_readent_func_t readent);
-void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
-void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
-void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req);
-int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout);
+void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
+void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
+void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
+int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
int lstcon_rpc_pinger_start(void);
void lstcon_rpc_pinger_stop(void);
void lstcon_rpc_cleanup_wait(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 1a923ea3a..a03e52d29 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -61,7 +61,7 @@ do { \
struct lstcon_session console_session;
static void
-lstcon_node_get(lstcon_node_t *nd)
+lstcon_node_get(struct lstcon_node *nd)
{
LASSERT(nd->nd_ref >= 1);
@@ -69,9 +69,9 @@ lstcon_node_get(lstcon_node_t *nd)
}
static int
-lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
+lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
LASSERT(id.nid != LNET_NID_ANY);
@@ -90,11 +90,11 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
if (!create)
return -ENOENT;
- LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+ LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
if (!*ndpp)
return -ENOMEM;
- ndl = (lstcon_ndlink_t *)(*ndpp + 1);
+ ndl = (struct lstcon_ndlink *)(*ndpp + 1);
ndl->ndl_node = *ndpp;
@@ -103,7 +103,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
ndl->ndl_node->nd_stamp = cfs_time_current();
ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
ndl->ndl_node->nd_timeout = 0;
- memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t));
+ memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc));
/*
* queued in global hash & list, no refcount is taken by
@@ -117,16 +117,16 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
}
static void
-lstcon_node_put(lstcon_node_t *nd)
+lstcon_node_put(struct lstcon_node *nd)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
LASSERT(nd->nd_ref > 0);
if (--nd->nd_ref > 0)
return;
- ndl = (lstcon_ndlink_t *)(nd + 1);
+ ndl = (struct lstcon_ndlink *)(nd + 1);
LASSERT(!list_empty(&ndl->ndl_link));
LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -135,16 +135,16 @@ lstcon_node_put(lstcon_node_t *nd)
list_del(&ndl->ndl_link);
list_del(&ndl->ndl_hlink);
- LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+ LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
}
static int
lstcon_ndlink_find(struct list_head *hash,
- lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create)
+ lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int rc;
if (id.nid == LNET_NID_ANY)
@@ -168,7 +168,7 @@ lstcon_ndlink_find(struct list_head *hash,
if (rc)
return rc;
- LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t));
+ LIBCFS_ALLOC(ndl, sizeof(struct lstcon_ndlink));
if (!ndl) {
lstcon_node_put(nd);
return -ENOMEM;
@@ -184,7 +184,7 @@ lstcon_ndlink_find(struct list_head *hash,
}
static void
-lstcon_ndlink_release(lstcon_ndlink_t *ndl)
+lstcon_ndlink_release(struct lstcon_ndlink *ndl)
{
LASSERT(list_empty(&ndl->ndl_link));
LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -196,12 +196,12 @@ lstcon_ndlink_release(lstcon_ndlink_t *ndl)
}
static int
-lstcon_group_alloc(char *name, lstcon_group_t **grpp)
+lstcon_group_alloc(char *name, struct lstcon_group **grpp)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int i;
- LIBCFS_ALLOC(grp, offsetof(lstcon_group_t,
+ LIBCFS_ALLOC(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
if (!grp)
return -ENOMEM;
@@ -209,7 +209,7 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
grp->grp_ref = 1;
if (name) {
if (strlen(name) > sizeof(grp->grp_name) - 1) {
- LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ LIBCFS_FREE(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
return -E2BIG;
}
@@ -229,18 +229,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
}
static void
-lstcon_group_addref(lstcon_group_t *grp)
+lstcon_group_addref(struct lstcon_group *grp)
{
grp->grp_ref++;
}
-static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *);
+static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
static void
-lstcon_group_drain(lstcon_group_t *grp, int keep)
+lstcon_group_drain(struct lstcon_group *grp, int keep)
{
- lstcon_ndlink_t *ndl;
- lstcon_ndlink_t *tmp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_ndlink *tmp;
list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
if (!(ndl->ndl_node->nd_state & keep))
@@ -249,7 +249,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep)
}
static void
-lstcon_group_decref(lstcon_group_t *grp)
+lstcon_group_decref(struct lstcon_group *grp)
{
int i;
@@ -264,20 +264,20 @@ lstcon_group_decref(lstcon_group_t *grp)
for (i = 0; i < LST_NODE_HASHSIZE; i++)
LASSERT(list_empty(&grp->grp_ndl_hash[i]));
- LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ LIBCFS_FREE(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
}
static int
-lstcon_group_find(const char *name, lstcon_group_t **grpp)
+lstcon_group_find(const char *name, struct lstcon_group **grpp)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
continue;
- lstcon_group_addref(grp); /* +1 ref for caller */
+ lstcon_group_addref(grp); /* +1 ref for caller */
*grpp = grp;
return 0;
}
@@ -286,8 +286,8 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp)
}
static int
-lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
- lstcon_ndlink_t **ndlpp, int create)
+lstcon_group_ndlink_find(struct lstcon_group *grp, lnet_process_id_t id,
+ struct lstcon_ndlink **ndlpp, int create)
{
int rc;
@@ -305,7 +305,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
}
static void
-lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
+lstcon_group_ndlink_release(struct lstcon_group *grp, struct lstcon_ndlink *ndl)
{
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -313,8 +313,8 @@ lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
}
static void
-lstcon_group_ndlink_move(lstcon_group_t *old,
- lstcon_group_t *new, lstcon_ndlink_t *ndl)
+lstcon_group_ndlink_move(struct lstcon_group *old,
+ struct lstcon_group *new, struct lstcon_ndlink *ndl)
{
unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
LST_NODE_HASHSIZE;
@@ -329,21 +329,21 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
}
static void
-lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
+lstcon_group_move(struct lstcon_group *old, struct lstcon_group *new)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
while (!list_empty(&old->grp_ndl_list)) {
ndl = list_entry(old->grp_ndl_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
lstcon_group_ndlink_move(old, new, ndl);
}
}
static int
-lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_sesrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
- lstcon_group_t *grp = (lstcon_group_t *)arg;
+ struct lstcon_group *grp = (struct lstcon_group *)arg;
switch (transop) {
case LST_TRANS_SESNEW:
@@ -370,10 +370,10 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_debug_reply_t *rep;
+ struct srpc_debug_reply *rep;
switch (transop) {
case LST_TRANS_SESNEW:
@@ -399,13 +399,13 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
}
static int
-lstcon_group_nodes_add(lstcon_group_t *grp,
+lstcon_group_nodes_add(struct lstcon_group *grp,
int count, lnet_process_id_t __user *ids_up,
unsigned *featp, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int i;
int rc;
@@ -466,13 +466,13 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
}
static int
-lstcon_group_nodes_remove(lstcon_group_t *grp,
+lstcon_group_nodes_remove(struct lstcon_group *grp,
int count, lnet_process_id_t __user *ids_up,
struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int rc;
int i;
@@ -523,7 +523,7 @@ error:
int
lstcon_group_add(char *name)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST;
@@ -548,7 +548,7 @@ int
lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
unsigned *featp, struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
LASSERT(count > 0);
@@ -578,8 +578,8 @@ lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
int
lstcon_group_del(char *name)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -621,7 +621,7 @@ lstcon_group_del(char *name)
int
lstcon_group_clean(char *name, int args)
{
- lstcon_group_t *grp = NULL;
+ struct lstcon_group *grp = NULL;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -654,7 +654,7 @@ int
lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
struct list_head __user *result_up)
{
- lstcon_group_t *grp = NULL;
+ struct lstcon_group *grp = NULL;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -683,8 +683,8 @@ lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
int
lstcon_group_refresh(char *name, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -725,7 +725,7 @@ lstcon_group_refresh(char *name, struct list_head __user *result_up)
int
lstcon_group_list(int index, int len, char __user *name_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
LASSERT(index >= 0);
LASSERT(name_up);
@@ -733,7 +733,7 @@ lstcon_group_list(int index, int len, char __user *name_up)
list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
if (!index--) {
return copy_to_user(name_up, grp->grp_name, len) ?
- -EFAULT : 0;
+ -EFAULT : 0;
}
}
@@ -744,8 +744,8 @@ static int
lstcon_nodes_getent(struct list_head *head, int *index_p,
int *count_p, lstcon_node_ent_t __user *dents_up)
{
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int count = 0;
int index = 0;
@@ -786,8 +786,8 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
lstcon_node_ent_t __user *dents_up)
{
lstcon_ndlist_ent_t *gentp;
- lstcon_group_t *grp;
- lstcon_ndlink_t *ndl;
+ struct lstcon_group *grp;
+ struct lstcon_ndlink *ndl;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -828,9 +828,9 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
}
static int
-lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
+lstcon_batch_find(const char *name, struct lstcon_batch **batpp)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
@@ -845,7 +845,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
int
lstcon_batch_add(char *name)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int i;
int rc;
@@ -855,7 +855,7 @@ lstcon_batch_add(char *name)
return rc;
}
- LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t));
+ LIBCFS_ALLOC(bat, sizeof(struct lstcon_batch));
if (!bat) {
CERROR("Can't allocate descriptor for batch %s\n", name);
return -ENOMEM;
@@ -865,7 +865,7 @@ lstcon_batch_add(char *name)
sizeof(struct list_head) * LST_NODE_HASHSIZE);
if (!bat->bat_cli_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -ENOMEM;
}
@@ -875,7 +875,7 @@ lstcon_batch_add(char *name)
if (!bat->bat_srv_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -ENOMEM;
}
@@ -883,7 +883,7 @@ lstcon_batch_add(char *name)
if (strlen(name) > sizeof(bat->bat_name) - 1) {
LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -E2BIG;
}
strncpy(bat->bat_name, name, sizeof(bat->bat_name));
@@ -911,7 +911,7 @@ lstcon_batch_add(char *name)
int
lstcon_batch_list(int index, int len, char __user *name_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
LASSERT(name_up);
LASSERT(index >= 0);
@@ -934,9 +934,9 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
lstcon_test_batch_ent_t *entp;
struct list_head *clilst;
struct list_head *srvlst;
- lstcon_test_t *test = NULL;
- lstcon_batch_t *bat;
- lstcon_ndlink_t *ndl;
+ struct lstcon_test *test = NULL;
+ struct lstcon_batch *bat;
+ struct lstcon_ndlink *ndl;
int rc;
rc = lstcon_batch_find(name, &bat);
@@ -977,7 +977,6 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
if (!test) {
entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
entp->u.tbe_batch.bae_state = bat->bat_state;
-
} else {
entp->u.tbe_test.tse_type = test->tes_type;
entp->u.tbe_test.tse_loop = test->tes_loop;
@@ -999,7 +998,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
}
static int
-lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_batrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
switch (transop) {
case LST_TRANS_TSBRUN:
@@ -1021,10 +1020,10 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_batch_op(lstcon_batch_t *bat, int transop,
+lstcon_batch_op(struct lstcon_batch *bat, int transop,
struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
@@ -1047,7 +1046,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop,
int
lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
if (lstcon_batch_find(name, &bat)) {
@@ -1069,7 +1068,7 @@ lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
int
lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
if (lstcon_batch_find(name, &bat)) {
@@ -1089,17 +1088,17 @@ lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
}
static void
-lstcon_batch_destroy(lstcon_batch_t *bat)
+lstcon_batch_destroy(struct lstcon_batch *bat)
{
- lstcon_ndlink_t *ndl;
- lstcon_test_t *test;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_test *test;
int i;
list_del(&bat->bat_link);
while (!list_empty(&bat->bat_test_list)) {
test = list_entry(bat->bat_test_list.next,
- lstcon_test_t, tes_link);
+ struct lstcon_test, tes_link);
LASSERT(list_empty(&test->tes_trans_list));
list_del(&test->tes_link);
@@ -1107,7 +1106,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
lstcon_group_decref(test->tes_src_grp);
lstcon_group_decref(test->tes_dst_grp);
- LIBCFS_FREE(test, offsetof(lstcon_test_t,
+ LIBCFS_FREE(test, offsetof(struct lstcon_test,
tes_param[test->tes_paramlen]));
}
@@ -1115,7 +1114,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_cli_list)) {
ndl = list_entry(bat->bat_cli_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -1123,7 +1122,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_srv_list)) {
ndl = list_entry(bat->bat_srv_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -1138,19 +1137,19 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
sizeof(struct list_head) * LST_NODE_HASHSIZE);
LIBCFS_FREE(bat->bat_srv_hash,
sizeof(struct list_head) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
}
static int
-lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
- lstcon_test_t *test;
- lstcon_batch_t *batch;
- lstcon_ndlink_t *ndl;
+ struct lstcon_test *test;
+ struct lstcon_batch *batch;
+ struct lstcon_ndlink *ndl;
struct list_head *hash;
struct list_head *head;
- test = (lstcon_test_t *)arg;
+ test = (struct lstcon_test *)arg;
LASSERT(test);
batch = test->tes_batch;
@@ -1186,10 +1185,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up)
+lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int transop;
int rc;
@@ -1237,7 +1236,7 @@ again:
}
static int
-lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
+lstcon_verify_batch(const char *name, struct lstcon_batch **batch)
{
int rc;
@@ -1256,10 +1255,10 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
}
static int
-lstcon_verify_group(const char *name, lstcon_group_t **grp)
+lstcon_verify_group(const char *name, struct lstcon_group **grp)
{
int rc;
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
rc = lstcon_group_find(name, grp);
if (rc) {
@@ -1284,11 +1283,11 @@ lstcon_test_add(char *batch_name, int type, int loop,
void *param, int paramlen, int *retp,
struct list_head __user *result_up)
{
- lstcon_test_t *test = NULL;
+ struct lstcon_test *test = NULL;
int rc;
- lstcon_group_t *src_grp = NULL;
- lstcon_group_t *dst_grp = NULL;
- lstcon_batch_t *batch = NULL;
+ struct lstcon_group *src_grp = NULL;
+ struct lstcon_group *dst_grp = NULL;
+ struct lstcon_batch *batch = NULL;
/*
* verify that a batch of the given name exists, and the groups
@@ -1310,7 +1309,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
if (dst_grp->grp_userland)
*retp = 1;
- LIBCFS_ALLOC(test, offsetof(lstcon_test_t, tes_param[paramlen]));
+ LIBCFS_ALLOC(test, offsetof(struct lstcon_test, tes_param[paramlen]));
if (!test) {
CERROR("Can't allocate test descriptor\n");
rc = -ENOMEM;
@@ -1357,7 +1356,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
return rc;
out:
if (test)
- LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen]));
+ LIBCFS_FREE(test, offsetof(struct lstcon_test, tes_param[paramlen]));
if (dst_grp)
lstcon_group_decref(dst_grp);
@@ -1369,9 +1368,9 @@ out:
}
static int
-lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
+lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
{
- lstcon_test_t *test;
+ struct lstcon_test *test;
list_for_each_entry(test, &batch->bat_test_list, tes_link) {
if (idx == test->tes_hdr.tsb_index) {
@@ -1384,10 +1383,10 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
}
static int
-lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_tsbrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
+ struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
LASSERT(transop == LST_TRANS_TSBCLIQRY ||
transop == LST_TRANS_TSBSRVQRY);
@@ -1404,12 +1403,12 @@ int
lstcon_test_batch_query(char *name, int testidx, int client,
int timeout, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
struct list_head *translist;
struct list_head *ndlist;
- lstcon_tsb_hdr_t *hdr;
- lstcon_batch_t *batch;
- lstcon_test_t *test = NULL;
+ struct lstcon_tsb_hdr *hdr;
+ struct lstcon_batch *batch;
+ struct lstcon_test *test = NULL;
int transop;
int rc;
@@ -1423,7 +1422,6 @@ lstcon_test_batch_query(char *name, int testidx, int client,
translist = &batch->bat_trans_list;
ndlist = &batch->bat_cli_list;
hdr = &batch->bat_hdr;
-
} else {
/* query specified test only */
rc = lstcon_test_find(batch, testidx, &test);
@@ -1448,7 +1446,8 @@ lstcon_test_batch_query(char *name, int testidx, int client,
lstcon_rpc_trans_postwait(trans, timeout);
- if (!testidx && /* query a batch, not a test */
+ /* query a batch, not a test */
+ if (!testidx &&
!lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) &&
!lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) {
/* all RPCs finished, and no active test */
@@ -1463,10 +1462,10 @@ lstcon_test_batch_query(char *name, int testidx, int client,
}
static int
-lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_statrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
+ struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
sfw_counters_t __user *sfwk_stat;
srpc_counters_t __user *srpc_stat;
lnet_counters_t __user *lnet_stat;
@@ -1491,7 +1490,7 @@ lstcon_ndlist_stat(struct list_head *ndlist,
int timeout, struct list_head __user *result_up)
{
struct list_head head;
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
INIT_LIST_HEAD(&head);
@@ -1516,7 +1515,7 @@ int
lstcon_group_stat(char *grp_name, int timeout,
struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(grp_name, &grp);
@@ -1536,8 +1535,8 @@ int
lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
int timeout, struct list_head __user *result_up)
{
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int i;
int rc;
@@ -1581,7 +1580,7 @@ lstcon_debug_ndlist(struct list_head *ndlist,
struct list_head *translist,
int timeout, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
@@ -1611,7 +1610,7 @@ int
lstcon_batch_debug(int timeout, char *name,
int client, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
rc = lstcon_batch_find(name, &bat);
@@ -1629,7 +1628,7 @@ int
lstcon_group_debug(int timeout, char *name,
struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -1649,8 +1648,8 @@ lstcon_nodes_debug(int timeout,
struct list_head __user *result_up)
{
lnet_process_id_t id;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *grp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *grp;
int i;
int rc;
@@ -1749,7 +1748,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
if (strlen(name) > sizeof(console_session.ses_name) - 1)
return -E2BIG;
- strncpy(console_session.ses_name, name,
+ strlcpy(console_session.ses_name, name,
sizeof(console_session.ses_name));
rc = lstcon_batch_add(LST_DEFAULT_BATCH);
@@ -1758,7 +1757,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
rc = lstcon_rpc_pinger_start();
if (rc) {
- lstcon_batch_t *bat = NULL;
+ struct lstcon_batch *bat = NULL;
lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
lstcon_batch_destroy(bat);
@@ -1782,7 +1781,7 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
char __user *name_up, int len)
{
lstcon_ndlist_ent_t *entp;
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
int rc = 0;
if (console_session.ses_state != LST_SESSION_ACTIVE)
@@ -1813,9 +1812,9 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
int
lstcon_session_end(void)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
- lstcon_batch_t *bat;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
+ struct lstcon_batch *bat;
int rc = 0;
LASSERT(console_session.ses_state == LST_SESSION_ACTIVE);
@@ -1849,7 +1848,7 @@ lstcon_session_end(void)
/* destroy all batches */
while (!list_empty(&console_session.ses_bat_list)) {
bat = list_entry(console_session.ses_bat_list.next,
- lstcon_batch_t, bat_link);
+ struct lstcon_batch, bat_link);
lstcon_batch_destroy(bat);
}
@@ -1857,7 +1856,7 @@ lstcon_session_end(void)
/* destroy all groups */
while (!list_empty(&console_session.ses_grp_list)) {
grp = list_entry(console_session.ses_grp_list.next,
- lstcon_group_t, grp_link);
+ struct lstcon_group, grp_link);
LASSERT(grp->grp_ref == 1);
lstcon_group_decref(grp);
@@ -1906,12 +1905,12 @@ lstcon_session_feats_check(unsigned feats)
static int
lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
{
- srpc_msg_t *rep = &rpc->srpc_replymsg;
- srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg;
- srpc_join_reqst_t *jreq = &req->msg_body.join_reqst;
- srpc_join_reply_t *jrep = &rep->msg_body.join_reply;
- lstcon_group_t *grp = NULL;
- lstcon_ndlink_t *ndl;
+ struct srpc_msg *rep = &rpc->srpc_replymsg;
+ struct srpc_msg *req = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_join_reqst *jreq = &req->msg_body.join_reqst;
+ struct srpc_join_reply *jrep = &rep->msg_body.join_reply;
+ struct lstcon_group *grp = NULL;
+ struct lstcon_ndlink *ndl;
int rc = 0;
sfw_unpack_message(req);
@@ -1987,7 +1986,8 @@ out:
return rc;
}
-static srpc_service_t lstcon_acceptor_service;
+static struct srpc_service lstcon_acceptor_service;
+
static void lstcon_init_acceptor_service(void)
{
/* initialize selftest console acceptor service table */
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 554f58244..becd22e41 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -50,22 +50,25 @@
#include "selftest.h"
#include "conrpc.h"
-typedef struct lstcon_node {
+/* node descriptor */
+struct lstcon_node {
lnet_process_id_t nd_id; /* id of the node */
int nd_ref; /* reference count */
int nd_state; /* state of the node */
int nd_timeout; /* session timeout */
unsigned long nd_stamp; /* timestamp of last replied RPC */
struct lstcon_rpc nd_ping; /* ping rpc */
-} lstcon_node_t; /* node descriptor */
+};
-typedef struct {
+/* node link descriptor */
+struct lstcon_ndlink {
struct list_head ndl_link; /* chain on list */
struct list_head ndl_hlink; /* chain on hash */
- lstcon_node_t *ndl_node; /* pointer to node */
-} lstcon_ndlink_t; /* node link descriptor */
+ struct lstcon_node *ndl_node; /* pointer to node */
+};
-typedef struct {
+/* (alias of nodes) group descriptor */
+struct lstcon_group {
struct list_head grp_link; /* chain on global group list
*/
int grp_ref; /* reference count */
@@ -76,18 +79,19 @@ typedef struct {
struct list_head grp_trans_list; /* transaction list */
struct list_head grp_ndl_list; /* nodes list */
struct list_head grp_ndl_hash[0]; /* hash table for nodes */
-} lstcon_group_t; /* (alias of nodes) group descriptor */
+};
#define LST_BATCH_IDLE 0xB0 /* idle batch */
#define LST_BATCH_RUNNING 0xB1 /* running batch */
-typedef struct lstcon_tsb_hdr {
+struct lstcon_tsb_hdr {
lst_bid_t tsb_id; /* batch ID */
int tsb_index; /* test index */
-} lstcon_tsb_hdr_t;
+};
-typedef struct {
- lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
+/* (tests ) batch descriptor */
+struct lstcon_batch {
+ struct lstcon_tsb_hdr bat_hdr; /* test_batch header */
struct list_head bat_link; /* chain on session's batches list */
int bat_ntest; /* # of test */
int bat_state; /* state of the batch */
@@ -95,20 +99,21 @@ typedef struct {
* for run, force for stop */
char bat_name[LST_NAME_SIZE];/* name of batch */
- struct list_head bat_test_list; /* list head of tests (lstcon_test_t)
+ struct list_head bat_test_list; /* list head of tests (struct lstcon_test)
*/
struct list_head bat_trans_list; /* list head of transaction */
struct list_head bat_cli_list; /* list head of client nodes
- * (lstcon_node_t) */
+ * (struct lstcon_node) */
struct list_head *bat_cli_hash; /* hash table of client nodes */
struct list_head bat_srv_list; /* list head of server nodes */
struct list_head *bat_srv_hash; /* hash table of server nodes */
-} lstcon_batch_t; /* (tests ) batch descriptor */
+};
-typedef struct lstcon_test {
- lstcon_tsb_hdr_t tes_hdr; /* test batch header */
+/* a single test descriptor */
+struct lstcon_test {
+ struct lstcon_tsb_hdr tes_hdr; /* test batch header */
struct list_head tes_link; /* chain on batch's tests list */
- lstcon_batch_t *tes_batch; /* pointer to batch */
+ struct lstcon_batch *tes_batch; /* pointer to batch */
int tes_type; /* type of the test, i.e: bulk, ping */
int tes_stop_onerr; /* stop on error */
@@ -120,12 +125,12 @@ typedef struct lstcon_test {
int tes_cliidx; /* client index, used for RPC creating */
struct list_head tes_trans_list; /* transaction list */
- lstcon_group_t *tes_src_grp; /* group run the test */
- lstcon_group_t *tes_dst_grp; /* target group */
+ struct lstcon_group *tes_src_grp; /* group run the test */
+ struct lstcon_group *tes_dst_grp; /* target group */
int tes_paramlen; /* test parameter length */
char tes_param[0]; /* test parameter */
-} lstcon_test_t; /* a single test descriptor */
+};
#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */
@@ -152,7 +157,7 @@ struct lstcon_session {
unsigned ses_expired:1; /* console is timedout */
__u64 ses_id_cookie; /* batch id cookie */
char ses_name[LST_NAME_SIZE];/* session name */
- lstcon_rpc_trans_t *ses_ping; /* session pinger */
+ struct lstcon_rpc_trans *ses_ping; /* session pinger */
struct stt_timer ses_ping_timer; /* timer for pinger */
lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index e2c532399..30e4f71f1 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -109,19 +109,19 @@ static struct smoketest_framework {
struct list_head fw_tests; /* registered test cases */
atomic_t fw_nzombies; /* # zombie sessions */
spinlock_t fw_lock; /* serialise */
- sfw_session_t *fw_session; /* _the_ session */
+ struct sfw_session *fw_session; /* _the_ session */
int fw_shuttingdown; /* shutdown in progress */
struct srpc_server_rpc *fw_active_srpc;/* running RPC */
} sfw_data;
/* forward ref's */
-int sfw_stop_batch(sfw_batch_t *tsb, int force);
-void sfw_destroy_session(sfw_session_t *sn);
+int sfw_stop_batch(struct sfw_batch *tsb, int force);
+void sfw_destroy_session(struct sfw_session *sn);
-static inline sfw_test_case_t *
+static inline struct sfw_test_case *
sfw_find_test_case(int id)
{
- sfw_test_case_t *tsc;
+ struct sfw_test_case *tsc;
LASSERT(id <= SRPC_SERVICE_MAX_ID);
LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -135,9 +135,9 @@ sfw_find_test_case(int id)
}
static int
-sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
+sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
{
- sfw_test_case_t *tsc;
+ struct sfw_test_case *tsc;
if (sfw_find_test_case(service->sv_id)) {
CERROR("Failed to register test %s (%d)\n",
@@ -145,7 +145,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
return -EEXIST;
}
- LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t));
+ LIBCFS_ALLOC(tsc, sizeof(struct sfw_test_case));
if (!tsc)
return -ENOMEM;
@@ -159,7 +159,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
static void
sfw_add_session_timer(void)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
struct stt_timer *timer = &sn->sn_timer;
LASSERT(!sfw_data.fw_shuttingdown);
@@ -177,7 +177,7 @@ sfw_add_session_timer(void)
static int
sfw_del_session_timer(void)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (!sn || !sn->sn_timer_active)
return 0;
@@ -196,10 +196,10 @@ static void
sfw_deactivate_session(void)
__must_hold(&sfw_data.fw_lock)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
int nactive = 0;
- sfw_batch_t *tsb;
- sfw_test_case_t *tsc;
+ struct sfw_batch *tsb;
+ struct sfw_test_case *tsc;
if (!sn)
return;
@@ -226,7 +226,7 @@ __must_hold(&sfw_data.fw_lock)
}
if (nactive)
- return; /* wait for active batches to stop */
+ return; /* wait for active batches to stop */
list_del_init(&sn->sn_list);
spin_unlock(&sfw_data.fw_lock);
@@ -239,7 +239,7 @@ __must_hold(&sfw_data.fw_lock)
static void
sfw_session_expired(void *data)
{
- sfw_session_t *sn = data;
+ struct sfw_session *sn = data;
spin_lock(&sfw_data.fw_lock);
@@ -257,12 +257,12 @@ sfw_session_expired(void *data)
}
static inline void
-sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
+sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
unsigned features, const char *name)
{
struct stt_timer *timer = &sn->sn_timer;
- memset(sn, 0, sizeof(sfw_session_t));
+ memset(sn, 0, sizeof(struct sfw_session));
INIT_LIST_HEAD(&sn->sn_list);
INIT_LIST_HEAD(&sn->sn_batches);
atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
@@ -298,7 +298,7 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
}
static void
-sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
+sfw_client_rpc_fini(struct srpc_client_rpc *rpc)
{
LASSERT(!rpc->crpc_bulk.bk_niov);
LASSERT(list_empty(&rpc->crpc_list));
@@ -318,11 +318,11 @@ sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
spin_unlock(&sfw_data.fw_lock);
}
-static sfw_batch_t *
+static struct sfw_batch *
sfw_find_batch(lst_bid_t bid)
{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct sfw_batch *bat;
LASSERT(sn);
@@ -334,11 +334,11 @@ sfw_find_batch(lst_bid_t bid)
return NULL;
}
-static sfw_batch_t *
+static struct sfw_batch *
sfw_bid2batch(lst_bid_t bid)
{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct sfw_batch *bat;
LASSERT(sn);
@@ -346,7 +346,7 @@ sfw_bid2batch(lst_bid_t bid)
if (bat)
return bat;
- LIBCFS_ALLOC(bat, sizeof(sfw_batch_t));
+ LIBCFS_ALLOC(bat, sizeof(struct sfw_batch));
if (!bat)
return NULL;
@@ -361,11 +361,11 @@ sfw_bid2batch(lst_bid_t bid)
}
static int
-sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
+sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
sfw_counters_t *cnt = &reply->str_fw;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -402,10 +402,10 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
}
int
-sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
+sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
- srpc_msg_t *msg = container_of(request, srpc_msg_t,
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct srpc_msg *msg = container_of(request, struct srpc_msg,
msg_body.mksn_reqst);
int cplen = 0;
@@ -438,7 +438,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
/*
* reject the request if it requires unknown features
* NB: old version will always accept all features because it's not
- * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also
+ * aware of srpc_msg::msg_ses_feats, it's a defect but it's also
* harmless because it will return zero feature to console, and it's
* console's responsibility to make sure all nodes in a session have
* same feature mask.
@@ -449,7 +449,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
}
/* brand new or create by force */
- LIBCFS_ALLOC(sn, sizeof(sfw_session_t));
+ LIBCFS_ALLOC(sn, sizeof(struct sfw_session));
if (!sn) {
CERROR("dropping RPC mksn under memory pressure\n");
return -ENOMEM;
@@ -473,9 +473,9 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
}
static int
-sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
+sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -505,9 +505,9 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
}
static int
-sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
+sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (!sn) {
reply->dbg_status = ESRCH;
@@ -526,10 +526,10 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
}
static void
-sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
+sfw_test_rpc_fini(struct srpc_client_rpc *rpc)
{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct sfw_test_unit *tsu = rpc->crpc_priv;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
/* Called with hold of tsi->tsi_lock */
LASSERT(list_empty(&rpc->crpc_list));
@@ -537,7 +537,7 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
}
static inline int
-sfw_test_buffers(sfw_test_instance_t *tsi)
+sfw_test_buffers(struct sfw_test_instance *tsi)
{
struct sfw_test_case *tsc;
struct srpc_service *svc;
@@ -614,10 +614,10 @@ sfw_unload_test(struct sfw_test_instance *tsi)
}
static void
-sfw_destroy_test_instance(sfw_test_instance_t *tsi)
+sfw_destroy_test_instance(struct sfw_test_instance *tsi)
{
- srpc_client_rpc_t *rpc;
- sfw_test_unit_t *tsu;
+ struct srpc_client_rpc *rpc;
+ struct sfw_test_unit *tsu;
if (!tsi->tsi_is_client)
goto clean;
@@ -630,14 +630,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
while (!list_empty(&tsi->tsi_units)) {
tsu = list_entry(tsi->tsi_units.next,
- sfw_test_unit_t, tsu_list);
+ struct sfw_test_unit, tsu_list);
list_del(&tsu->tsu_list);
LIBCFS_FREE(tsu, sizeof(*tsu));
}
while (!list_empty(&tsi->tsi_free_rpcs)) {
rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
}
@@ -648,34 +648,34 @@ clean:
}
static void
-sfw_destroy_batch(sfw_batch_t *tsb)
+sfw_destroy_batch(struct sfw_batch *tsb)
{
- sfw_test_instance_t *tsi;
+ struct sfw_test_instance *tsi;
LASSERT(!sfw_batch_active(tsb));
LASSERT(list_empty(&tsb->bat_list));
while (!list_empty(&tsb->bat_tests)) {
tsi = list_entry(tsb->bat_tests.next,
- sfw_test_instance_t, tsi_list);
+ struct sfw_test_instance, tsi_list);
list_del_init(&tsi->tsi_list);
sfw_destroy_test_instance(tsi);
}
- LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
+ LIBCFS_FREE(tsb, sizeof(struct sfw_batch));
}
void
-sfw_destroy_session(sfw_session_t *sn)
+sfw_destroy_session(struct sfw_session *sn)
{
- sfw_batch_t *batch;
+ struct sfw_batch *batch;
LASSERT(list_empty(&sn->sn_list));
LASSERT(sn != sfw_data.fw_session);
while (!list_empty(&sn->sn_batches)) {
batch = list_entry(sn->sn_batches.next,
- sfw_batch_t, bat_list);
+ struct sfw_batch, bat_list);
list_del_init(&batch->bat_list);
sfw_destroy_batch(batch);
}
@@ -685,28 +685,28 @@ sfw_destroy_session(sfw_session_t *sn)
}
static void
-sfw_unpack_addtest_req(srpc_msg_t *msg)
+sfw_unpack_addtest_req(struct srpc_msg *msg)
{
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST);
LASSERT(req->tsr_is_client);
if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
+ return; /* no flipping needed */
LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
if (req->tsr_service == SRPC_SERVICE_BRW) {
if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
+ struct test_bulk_req *bulk = &req->tsr_u.bulk_v0;
__swab32s(&bulk->blk_opc);
__swab32s(&bulk->blk_npg);
__swab32s(&bulk->blk_flags);
} else {
- test_bulk_req_v1_t *bulk = &req->tsr_u.bulk_v1;
+ struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1;
__swab16s(&bulk->blk_opc);
__swab16s(&bulk->blk_flags);
@@ -718,7 +718,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
}
if (req->tsr_service == SRPC_SERVICE_PING) {
- test_ping_req_t *ping = &req->tsr_u.ping;
+ struct test_ping_req *ping = &req->tsr_u.ping;
__swab32s(&ping->png_size);
__swab32s(&ping->png_flags);
@@ -729,14 +729,14 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
}
static int
-sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
+sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
{
- srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
- srpc_bulk_t *bk = rpc->srpc_bulk;
+ struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
+ struct srpc_bulk *bk = rpc->srpc_bulk;
int ndest = req->tsr_ndest;
- sfw_test_unit_t *tsu;
- sfw_test_instance_t *tsi;
+ struct sfw_test_unit *tsu;
+ struct sfw_test_instance *tsi;
int i;
int rc;
@@ -789,13 +789,13 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
int j;
dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
- LASSERT(dests); /* my pages are within KVM always */
+ LASSERT(dests); /* my pages are within KVM always */
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
sfw_unpack_id(id);
for (j = 0; j < tsi->tsi_concur; j++) {
- LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t));
+ LIBCFS_ALLOC(tsu, sizeof(struct sfw_test_unit));
if (!tsu) {
rc = -ENOMEM;
CERROR("Can't allocate tsu for %d\n",
@@ -824,11 +824,11 @@ error:
}
static void
-sfw_test_unit_done(sfw_test_unit_t *tsu)
+sfw_test_unit_done(struct sfw_test_unit *tsu)
{
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_batch_t *tsb = tsi->tsi_batch;
- sfw_session_t *sn = tsb->bat_session;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_batch *tsb = tsi->tsi_batch;
+ struct sfw_session *sn = tsb->bat_session;
LASSERT(sfw_test_active(tsi));
@@ -844,8 +844,8 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
spin_lock(&sfw_data.fw_lock);
- if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
- sn == sfw_data.fw_session) { /* sn also active */
+ if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */
+ sn == sfw_data.fw_session) { /* sn also active */
spin_unlock(&sfw_data.fw_lock);
return;
}
@@ -866,10 +866,10 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
}
static void
-sfw_test_rpc_done(srpc_client_rpc_t *rpc)
+sfw_test_rpc_done(struct srpc_client_rpc *rpc)
{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct sfw_test_unit *tsu = rpc->crpc_priv;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
int done = 0;
tsi->tsi_ops->tso_done_rpc(tsu, rpc);
@@ -900,19 +900,19 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
}
int
-sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
+sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
unsigned features, int nblk, int blklen,
- srpc_client_rpc_t **rpcpp)
+ struct srpc_client_rpc **rpcpp)
{
- srpc_client_rpc_t *rpc = NULL;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct srpc_client_rpc *rpc = NULL;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
spin_lock(&tsi->tsi_lock);
LASSERT(sfw_test_active(tsi));
/* pick request from buffer */
rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
if (rpc) {
LASSERT(nblk == rpc->crpc_bulk.bk_niov);
list_del_init(&rpc->crpc_list);
@@ -942,11 +942,11 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
}
static int
-sfw_run_test(swi_workitem_t *wi)
+sfw_run_test(struct swi_workitem *wi)
{
- sfw_test_unit_t *tsu = wi->swi_workitem.wi_data;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- srpc_client_rpc_t *rpc = NULL;
+ struct sfw_test_unit *tsu = wi->swi_workitem.wi_data;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct srpc_client_rpc *rpc = NULL;
LASSERT(wi == &tsu->tsu_worker);
@@ -991,11 +991,11 @@ test_done:
}
static int
-sfw_run_batch(sfw_batch_t *tsb)
+sfw_run_batch(struct sfw_batch *tsb)
{
- swi_workitem_t *wi;
- sfw_test_unit_t *tsu;
- sfw_test_instance_t *tsi;
+ struct swi_workitem *wi;
+ struct sfw_test_unit *tsu;
+ struct sfw_test_instance *tsi;
if (sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
@@ -1026,10 +1026,10 @@ sfw_run_batch(sfw_batch_t *tsb)
}
int
-sfw_stop_batch(sfw_batch_t *tsb, int force)
+sfw_stop_batch(struct sfw_batch *tsb, int force)
{
- sfw_test_instance_t *tsi;
- srpc_client_rpc_t *rpc;
+ struct sfw_test_instance *tsi;
+ struct srpc_client_rpc *rpc;
if (!sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
@@ -1068,9 +1068,9 @@ sfw_stop_batch(sfw_batch_t *tsb, int force)
}
static int
-sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
+sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
{
- sfw_test_instance_t *tsi;
+ struct sfw_test_instance *tsi;
if (testidx < 0)
return -EINVAL;
@@ -1115,11 +1115,11 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
static int
sfw_add_test(struct srpc_server_rpc *rpc)
{
- sfw_session_t *sn = sfw_data.fw_session;
- srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
- srpc_test_reqst_t *request;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
+ struct srpc_test_reqst *request;
int rc;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -1183,11 +1183,11 @@ sfw_add_test(struct srpc_server_rpc *rpc)
}
static int
-sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
+sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
int rc = 0;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -1227,8 +1227,8 @@ static int
sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reply = &rpc->srpc_replymsg;
- srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_msg *reply = &rpc->srpc_replymsg;
+ struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
unsigned features = LST_FEATS_MASK;
int rc = 0;
@@ -1244,7 +1244,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
/* Remove timer to avoid racing with it or expiring active session */
if (sfw_del_session_timer()) {
- CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
+ CERROR("dropping RPC %s from %s: racing with expiry timer\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer));
spin_unlock(&sfw_data.fw_lock);
return -EAGAIN;
@@ -1261,7 +1261,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
sv->sv_id != SRPC_SERVICE_DEBUG) {
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (sn &&
sn->sn_features != request->msg_ses_feats) {
@@ -1273,7 +1273,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
}
} else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
- /**
+ /*
* NB: at this point, old version will ignore features and
* create new session anyway, so console should be able
* to handle this
@@ -1377,12 +1377,12 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
return rc;
}
-srpc_client_rpc_t *
+struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
unsigned features, int nbulkiov, int bulklen,
- void (*done)(srpc_client_rpc_t *), void *priv)
+ void (*done)(struct srpc_client_rpc *), void *priv)
{
- srpc_client_rpc_t *rpc = NULL;
+ struct srpc_client_rpc *rpc = NULL;
spin_lock(&sfw_data.fw_lock);
@@ -1391,7 +1391,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
srpc_init_client_rpc(rpc, peer, service, 0, 0,
@@ -1415,7 +1415,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
}
void
-sfw_unpack_message(srpc_msg_t *msg)
+sfw_unpack_message(struct srpc_msg *msg)
{
if (msg->msg_magic == SRPC_MSG_MAGIC)
return; /* no flipping needed */
@@ -1424,7 +1424,7 @@ sfw_unpack_message(srpc_msg_t *msg)
LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
if (msg->msg_type == SRPC_MSG_STAT_REQST) {
- srpc_stat_reqst_t *req = &msg->msg_body.stat_reqst;
+ struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst;
__swab32s(&req->str_type);
__swab64s(&req->str_rpyid);
@@ -1433,7 +1433,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
- srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
+ struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
__swab32s(&rep->str_status);
sfw_unpack_sid(rep->str_sid);
@@ -1444,7 +1444,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
- srpc_mksn_reqst_t *req = &msg->msg_body.mksn_reqst;
+ struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst;
__swab64s(&req->mksn_rpyid);
__swab32s(&req->mksn_force);
@@ -1453,7 +1453,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
- srpc_mksn_reply_t *rep = &msg->msg_body.mksn_reply;
+ struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply;
__swab32s(&rep->mksn_status);
__swab32s(&rep->mksn_timeout);
@@ -1462,7 +1462,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
- srpc_rmsn_reqst_t *req = &msg->msg_body.rmsn_reqst;
+ struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst;
__swab64s(&req->rmsn_rpyid);
sfw_unpack_sid(req->rmsn_sid);
@@ -1470,7 +1470,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
- srpc_rmsn_reply_t *rep = &msg->msg_body.rmsn_reply;
+ struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply;
__swab32s(&rep->rmsn_status);
sfw_unpack_sid(rep->rmsn_sid);
@@ -1478,7 +1478,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
- srpc_debug_reqst_t *req = &msg->msg_body.dbg_reqst;
+ struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst;
__swab64s(&req->dbg_rpyid);
__swab32s(&req->dbg_flags);
@@ -1487,7 +1487,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
- srpc_debug_reply_t *rep = &msg->msg_body.dbg_reply;
+ struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply;
__swab32s(&rep->dbg_nbatch);
__swab32s(&rep->dbg_timeout);
@@ -1496,7 +1496,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
- srpc_batch_reqst_t *req = &msg->msg_body.bat_reqst;
+ struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst;
__swab32s(&req->bar_opc);
__swab64s(&req->bar_rpyid);
@@ -1508,7 +1508,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
- srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
+ struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
__swab32s(&rep->bar_status);
sfw_unpack_sid(rep->bar_sid);
@@ -1516,7 +1516,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_TEST_REQST) {
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
__swab64s(&req->tsr_rpyid);
__swab64s(&req->tsr_bulkid);
@@ -1530,7 +1530,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
- srpc_test_reply_t *rep = &msg->msg_body.tes_reply;
+ struct srpc_test_reply *rep = &msg->msg_body.tes_reply;
__swab32s(&rep->tsr_status);
sfw_unpack_sid(rep->tsr_sid);
@@ -1538,7 +1538,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
- srpc_join_reqst_t *req = &msg->msg_body.join_reqst;
+ struct srpc_join_reqst *req = &msg->msg_body.join_reqst;
__swab64s(&req->join_rpyid);
sfw_unpack_sid(req->join_sid);
@@ -1546,7 +1546,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
- srpc_join_reply_t *rep = &msg->msg_body.join_reply;
+ struct srpc_join_reply *rep = &msg->msg_body.join_reply;
__swab32s(&rep->join_status);
__swab32s(&rep->join_timeout);
@@ -1558,7 +1558,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
void
-sfw_abort_rpc(srpc_client_rpc_t *rpc)
+sfw_abort_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -1569,7 +1569,7 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc)
}
void
-sfw_post_rpc(srpc_client_rpc_t *rpc)
+sfw_post_rpc(struct srpc_client_rpc *rpc)
{
spin_lock(&rpc->crpc_lock);
@@ -1584,7 +1584,7 @@ sfw_post_rpc(srpc_client_rpc_t *rpc)
spin_unlock(&rpc->crpc_lock);
}
-static srpc_service_t sfw_services[] = {
+static struct srpc_service sfw_services[] = {
{
/* sv_id */ SRPC_SERVICE_DEBUG,
/* sv_name */ "debug",
@@ -1628,8 +1628,8 @@ sfw_startup(void)
int i;
int rc;
int error;
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
+ struct srpc_service *sv;
+ struct sfw_test_case *tsc;
if (session_timeout < 0) {
CERROR("Session timeout must be non-negative: %d\n",
@@ -1721,8 +1721,8 @@ sfw_startup(void)
void
sfw_shutdown(void)
{
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
+ struct srpc_service *sv;
+ struct sfw_test_case *tsc;
int i;
spin_lock(&sfw_data.fw_lock);
@@ -1759,10 +1759,10 @@ sfw_shutdown(void)
}
while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
- srpc_client_rpc_t *rpc;
+ struct srpc_client_rpc *rpc;
rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
@@ -1778,7 +1778,7 @@ sfw_shutdown(void)
while (!list_empty(&sfw_data.fw_tests)) {
tsc = list_entry(sfw_data.fw_tests.next,
- sfw_test_case_t, tsc_list);
+ struct sfw_test_case, tsc_list);
srpc_wait_service_shutdown(tsc->tsc_srv_service);
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 81a45045e..ad26fe9dd 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -56,9 +56,9 @@ struct lst_ping_data {
static struct lst_ping_data lst_ping_data;
static int
-ping_client_init(sfw_test_instance_t *tsi)
+ping_client_init(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
LASSERT(tsi->tsi_is_client);
LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
@@ -70,9 +70,9 @@ ping_client_init(sfw_test_instance_t *tsi)
}
static void
-ping_client_fini(sfw_test_instance_t *tsi)
+ping_client_fini(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
int errors;
LASSERT(sn);
@@ -86,12 +86,12 @@ ping_client_fini(sfw_test_instance_t *tsi)
}
static int
-ping_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpc)
+ping_client_prep_rpc(struct sfw_test_unit *tsu, lnet_process_id_t dest,
+ struct srpc_client_rpc **rpc)
{
- srpc_ping_reqst_t *req;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct srpc_ping_reqst *req;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
struct timespec64 ts;
int rc;
@@ -118,18 +118,18 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu,
}
static void
-ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
{
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
- srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_ping_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
+ struct srpc_ping_reply *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
struct timespec64 ts;
LASSERT(sn);
if (rpc->crpc_status) {
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
+ if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_ping_errors);
CERROR("Unable to ping %s (%d): %d\n",
libcfs_id2str(rpc->crpc_dest),
@@ -171,10 +171,10 @@ static int
ping_server_handle(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_msg_t *replymsg = &rpc->srpc_replymsg;
- srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst;
- srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
+ struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_msg *replymsg = &rpc->srpc_replymsg;
+ struct srpc_ping_reqst *req = &reqstmsg->msg_body.ping_reqst;
+ struct srpc_ping_reply *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
LASSERT(sv->sv_id == SRPC_SERVICE_PING);
@@ -210,7 +210,8 @@ ping_server_handle(struct srpc_server_rpc *rpc)
return 0;
}
-sfw_test_client_ops_t ping_test_client;
+struct sfw_test_client_ops ping_test_client;
+
void ping_init_test_client(void)
{
ping_test_client.tso_init = ping_client_init;
@@ -219,7 +220,8 @@ void ping_init_test_client(void)
ping_test_client.tso_done_rpc = ping_client_done_rpc;
}
-srpc_service_t ping_test_service;
+struct srpc_service ping_test_service;
+
void ping_init_test_service(void)
{
ping_test_service.sv_id = SRPC_SERVICE_PING;
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 7d7748d96..3c45a7cfa 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -46,19 +46,19 @@
#include "selftest.h"
-typedef enum {
+enum srpc_state {
SRPC_STATE_NONE,
SRPC_STATE_NI_INIT,
SRPC_STATE_EQ_INIT,
SRPC_STATE_RUNNING,
SRPC_STATE_STOPPING,
-} srpc_state_t;
+};
static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
- srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
+ struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
- srpc_state_t rpc_state;
+ enum srpc_state rpc_state;
srpc_counters_t rpc_counters;
__u64 rpc_matchbits; /* matchbits counter */
} srpc_data;
@@ -71,7 +71,7 @@ srpc_serv_portal(int svc_id)
}
/* forward ref's */
-int srpc_handle_rpc(swi_workitem_t *wi);
+int srpc_handle_rpc(struct swi_workitem *wi);
void srpc_get_counters(srpc_counters_t *cnt)
{
@@ -88,7 +88,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
}
static int
-srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
+srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
{
nob = min_t(int, nob, PAGE_SIZE);
@@ -102,7 +102,7 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
}
void
-srpc_free_bulk(srpc_bulk_t *bk)
+srpc_free_bulk(struct srpc_bulk *bk)
{
int i;
struct page *pg;
@@ -117,25 +117,25 @@ srpc_free_bulk(srpc_bulk_t *bk)
__free_page(pg);
}
- LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
+ LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
}
-srpc_bulk_t *
+struct srpc_bulk *
srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
{
- srpc_bulk_t *bk;
+ struct srpc_bulk *bk;
int i;
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
- offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
if (!bk) {
CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
return NULL;
}
- memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
bk->bk_sink = sink;
bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg;
@@ -256,7 +256,7 @@ srpc_service_init(struct srpc_service *svc)
svc->sv_shuttingdown = 0;
svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct srpc_service_cd));
+ sizeof(*svc->sv_cpt_data));
if (!svc->sv_cpt_data)
return -ENOMEM;
@@ -338,7 +338,7 @@ srpc_add_service(struct srpc_service *sv)
}
int
-srpc_remove_service(srpc_service_t *sv)
+srpc_remove_service(struct srpc_service *sv)
{
int id = sv->sv_id;
@@ -357,7 +357,7 @@ srpc_remove_service(srpc_service_t *sv)
static int
srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
int len, int options, lnet_process_id_t peer,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
int rc;
lnet_md_t md;
@@ -396,7 +396,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
static int
srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
int options, lnet_process_id_t peer, lnet_nid_t self,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
int rc;
lnet_md_t md;
@@ -449,7 +449,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
lnet_process_id_t any = { 0 };
@@ -697,7 +697,7 @@ srpc_finish_service(struct srpc_service *sv)
/* called with sv->sv_lock held */
static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
+srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
@@ -755,11 +755,11 @@ srpc_abort_service(struct srpc_service *sv)
}
void
-srpc_shutdown_service(srpc_service_t *sv)
+srpc_shutdown_service(struct srpc_service *sv)
{
struct srpc_service_cd *scd;
struct srpc_server_rpc *rpc;
- srpc_buffer_t *buf;
+ struct srpc_buffer *buf;
int i;
CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
@@ -792,9 +792,9 @@ srpc_shutdown_service(srpc_service_t *sv)
}
static int
-srpc_send_request(srpc_client_rpc_t *rpc)
+srpc_send_request(struct srpc_client_rpc *rpc)
{
- srpc_event_t *ev = &rpc->crpc_reqstev;
+ struct srpc_event *ev = &rpc->crpc_reqstev;
int rc;
ev->ev_fired = 0;
@@ -803,7 +803,7 @@ srpc_send_request(srpc_client_rpc_t *rpc)
rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
rpc->crpc_service, &rpc->crpc_reqstmsg,
- sizeof(srpc_msg_t), LNET_MD_OP_PUT,
+ sizeof(struct srpc_msg), LNET_MD_OP_PUT,
rpc->crpc_dest, LNET_NID_ANY,
&rpc->crpc_reqstmdh, ev);
if (rc) {
@@ -814,9 +814,9 @@ srpc_send_request(srpc_client_rpc_t *rpc)
}
static int
-srpc_prepare_reply(srpc_client_rpc_t *rpc)
+srpc_prepare_reply(struct srpc_client_rpc *rpc)
{
- srpc_event_t *ev = &rpc->crpc_replyev;
+ struct srpc_event *ev = &rpc->crpc_replyev;
__u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
int rc;
@@ -827,7 +827,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
*id = srpc_next_id();
rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
- &rpc->crpc_replymsg, sizeof(srpc_msg_t),
+ &rpc->crpc_replymsg,
+ sizeof(struct srpc_msg),
LNET_MD_OP_PUT, rpc->crpc_dest,
&rpc->crpc_replymdh, ev);
if (rc) {
@@ -838,10 +839,10 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
}
static int
-srpc_prepare_bulk(srpc_client_rpc_t *rpc)
+srpc_prepare_bulk(struct srpc_client_rpc *rpc)
{
- srpc_bulk_t *bk = &rpc->crpc_bulk;
- srpc_event_t *ev = &rpc->crpc_bulkev;
+ struct srpc_bulk *bk = &rpc->crpc_bulk;
+ struct srpc_event *ev = &rpc->crpc_bulkev;
__u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
int rc;
int opt;
@@ -873,8 +874,8 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc)
static int
srpc_do_bulk(struct srpc_server_rpc *rpc)
{
- srpc_event_t *ev = &rpc->srpc_ev;
- srpc_bulk_t *bk = rpc->srpc_bulk;
+ struct srpc_event *ev = &rpc->srpc_ev;
+ struct srpc_bulk *bk = rpc->srpc_bulk;
__u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
int rc;
int opt;
@@ -903,7 +904,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
{
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
- srpc_buffer_t *buffer;
+ struct srpc_buffer *buffer;
LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
@@ -948,7 +949,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
buffer = list_entry(scd->scd_buf_blocked.next,
- srpc_buffer_t, buf_list);
+ struct srpc_buffer, buf_list);
list_del(&buffer->buf_list);
srpc_init_server_rpc(rpc, scd, buffer);
@@ -963,12 +964,12 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
/* handles an incoming RPC */
int
-srpc_handle_rpc(swi_workitem_t *wi)
+srpc_handle_rpc(struct swi_workitem *wi)
{
struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
- srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_event *ev = &rpc->srpc_ev;
int rc = 0;
LASSERT(wi == &rpc->srpc_wi);
@@ -995,8 +996,8 @@ srpc_handle_rpc(swi_workitem_t *wi)
default:
LBUG();
case SWI_STATE_NEWBORN: {
- srpc_msg_t *msg;
- srpc_generic_reply_t *reply;
+ struct srpc_msg *msg;
+ struct srpc_generic_reply *reply;
msg = &rpc->srpc_reqstbuf->buf_msg;
reply = &rpc->srpc_replymsg.msg_body.reply;
@@ -1077,7 +1078,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
static void
srpc_client_rpc_expired(void *data)
{
- srpc_client_rpc_t *rpc = data;
+ struct srpc_client_rpc *rpc = data;
CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
@@ -1096,7 +1097,7 @@ srpc_client_rpc_expired(void *data)
}
static void
-srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
+srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
{
struct stt_timer *timer = &rpc->crpc_timer;
@@ -1117,7 +1118,7 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
* running on any CPU.
*/
static void
-srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
+srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
{
/* timer not planted or already exploded */
if (!rpc->crpc_timeout)
@@ -1138,9 +1139,9 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
}
static void
-srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
+srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
{
- swi_workitem_t *wi = &rpc->crpc_wi;
+ struct swi_workitem *wi = &rpc->crpc_wi;
LASSERT(status || wi->swi_state == SWI_STATE_DONE);
@@ -1175,11 +1176,11 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
/* sends an outgoing RPC */
int
-srpc_send_rpc(swi_workitem_t *wi)
+srpc_send_rpc(struct swi_workitem *wi)
{
int rc = 0;
- srpc_client_rpc_t *rpc;
- srpc_msg_t *reply;
+ struct srpc_client_rpc *rpc;
+ struct srpc_msg *reply;
int do_bulk;
LASSERT(wi);
@@ -1237,7 +1238,7 @@ srpc_send_rpc(swi_workitem_t *wi)
wi->swi_state = SWI_STATE_REQUEST_SENT;
/* perhaps more events, fall thru */
case SWI_STATE_REQUEST_SENT: {
- srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
+ enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service);
if (!rpc->crpc_replyev.ev_fired)
break;
@@ -1308,15 +1309,15 @@ abort:
return 0;
}
-srpc_client_rpc_t *
+struct srpc_client_rpc *
srpc_create_client_rpc(lnet_process_id_t peer, int service,
int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
{
- srpc_client_rpc_t *rpc;
+ struct srpc_client_rpc *rpc;
- LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
+ LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
crpc_bulk.bk_iovs[nbulkiov]));
if (!rpc)
return NULL;
@@ -1328,12 +1329,12 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
/* called with rpc->crpc_lock held */
void
-srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
+srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
{
LASSERT(why);
- if (rpc->crpc_aborted || /* already aborted */
- rpc->crpc_closed) /* callback imminent */
+ if (rpc->crpc_aborted || /* already aborted */
+ rpc->crpc_closed) /* callback imminent */
return;
CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
@@ -1347,7 +1348,7 @@ srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
/* called with rpc->crpc_lock held */
void
-srpc_post_rpc(srpc_client_rpc_t *rpc)
+srpc_post_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(!rpc->crpc_aborted);
LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
@@ -1363,7 +1364,7 @@ srpc_post_rpc(srpc_client_rpc_t *rpc)
int
srpc_send_reply(struct srpc_server_rpc *rpc)
{
- srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_event *ev = &rpc->srpc_ev;
struct srpc_msg *msg = &rpc->srpc_replymsg;
struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
struct srpc_service_cd *scd = rpc->srpc_scd;
@@ -1401,7 +1402,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
rpc->srpc_peer, rpc->srpc_self,
&rpc->srpc_replymdh, ev);
if (rc)
- ev->ev_fired = 1; /* no more event expected */
+ ev->ev_fired = 1; /* no more event expected */
return rc;
}
@@ -1410,13 +1411,13 @@ static void
srpc_lnet_ev_handler(lnet_event_t *ev)
{
struct srpc_service_cd *scd;
- srpc_event_t *rpcev = ev->md.user_ptr;
- srpc_client_rpc_t *crpc;
+ struct srpc_event *rpcev = ev->md.user_ptr;
+ struct srpc_client_rpc *crpc;
struct srpc_server_rpc *srpc;
- srpc_buffer_t *buffer;
- srpc_service_t *sv;
- srpc_msg_t *msg;
- srpc_msg_type_t type;
+ struct srpc_buffer *buffer;
+ struct srpc_service *sv;
+ struct srpc_msg *msg;
+ enum srpc_msg_type type;
LASSERT(!in_interrupt());
@@ -1486,7 +1487,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
LASSERT(ev->type != LNET_EVENT_UNLINK ||
sv->sv_shuttingdown);
- buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
+ buffer = container_of(ev->md.start, struct srpc_buffer, buf_msg);
buffer->buf_peer = ev->initiator;
buffer->buf_self = ev->target.nid;
@@ -1509,7 +1510,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
scd->scd_buf_err = 0;
}
- if (!scd->scd_buf_err && /* adding buffer is enabled */
+ if (!scd->scd_buf_err && /* adding buffer is enabled */
!scd->scd_buf_adjust &&
scd->scd_buf_nposted < scd->scd_buf_low) {
scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
@@ -1663,7 +1664,7 @@ srpc_shutdown(void)
spin_lock(&srpc_data.rpc_glock);
for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
- srpc_service_t *sv = srpc_data.rpc_services[i];
+ struct srpc_service *sv = srpc_data.rpc_services[i];
LASSERTF(!sv, "service not empty: id %d, name %s\n",
i, sv->sv_name);
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index a79c315f2..c9b904cad 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -44,7 +44,7 @@
*
* XXX: *REPLY == *REQST + 1
*/
-typedef enum {
+enum srpc_msg_type {
SRPC_MSG_MKSN_REQST = 0,
SRPC_MSG_MKSN_REPLY = 1,
SRPC_MSG_RMSN_REQST = 2,
@@ -63,7 +63,7 @@ typedef enum {
SRPC_MSG_PING_REPLY = 15,
SRPC_MSG_JOIN_REQST = 16,
SRPC_MSG_JOIN_REPLY = 17,
-} srpc_msg_type_t;
+};
/* CAVEAT EMPTOR:
* All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
@@ -72,122 +72,122 @@ typedef enum {
* All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field
* session id if needed.
*/
-typedef struct {
+struct srpc_generic_reqst {
__u64 rpyid; /* reply buffer matchbits */
__u64 bulkid; /* bulk buffer matchbits */
-} WIRE_ATTR srpc_generic_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_generic_reply {
__u32 status;
lst_sid_t sid;
-} WIRE_ATTR srpc_generic_reply_t;
+} WIRE_ATTR;
/* FRAMEWORK RPCs */
-typedef struct {
+struct srpc_mksn_reqst {
__u64 mksn_rpyid; /* reply buffer matchbits */
lst_sid_t mksn_sid; /* session id */
__u32 mksn_force; /* use brute force */
char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */
+} WIRE_ATTR; /* make session request */
-typedef struct {
+struct srpc_mksn_reply {
__u32 mksn_status; /* session status */
lst_sid_t mksn_sid; /* session id */
__u32 mksn_timeout; /* session timeout */
char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR srpc_mksn_reply_t; /* make session reply */
+} WIRE_ATTR; /* make session reply */
-typedef struct {
+struct srpc_rmsn_reqst {
__u64 rmsn_rpyid; /* reply buffer matchbits */
lst_sid_t rmsn_sid; /* session id */
-} WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */
+} WIRE_ATTR; /* remove session request */
-typedef struct {
+struct srpc_rmsn_reply {
__u32 rmsn_status;
lst_sid_t rmsn_sid; /* session id */
-} WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */
+} WIRE_ATTR; /* remove session reply */
-typedef struct {
+struct srpc_join_reqst {
__u64 join_rpyid; /* reply buffer matchbits */
lst_sid_t join_sid; /* session id to join */
char join_group[LST_NAME_SIZE]; /* group name */
-} WIRE_ATTR srpc_join_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_join_reply {
__u32 join_status; /* returned status */
lst_sid_t join_sid; /* session id */
__u32 join_timeout; /* # seconds' inactivity to
* expire */
char join_session[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR srpc_join_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_debug_reqst {
__u64 dbg_rpyid; /* reply buffer matchbits */
lst_sid_t dbg_sid; /* session id */
__u32 dbg_flags; /* bitmap of debug */
-} WIRE_ATTR srpc_debug_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_debug_reply {
__u32 dbg_status; /* returned code */
lst_sid_t dbg_sid; /* session id */
__u32 dbg_timeout; /* session timeout */
__u32 dbg_nbatch; /* # of batches in the node */
char dbg_name[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR srpc_debug_reply_t;
+} WIRE_ATTR;
#define SRPC_BATCH_OPC_RUN 1
#define SRPC_BATCH_OPC_STOP 2
#define SRPC_BATCH_OPC_QUERY 3
-typedef struct {
+struct srpc_batch_reqst {
__u64 bar_rpyid; /* reply buffer matchbits */
lst_sid_t bar_sid; /* session id */
lst_bid_t bar_bid; /* batch id */
__u32 bar_opc; /* create/start/stop batch */
__u32 bar_testidx; /* index of test */
__u32 bar_arg; /* parameters */
-} WIRE_ATTR srpc_batch_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_batch_reply {
__u32 bar_status; /* status of request */
lst_sid_t bar_sid; /* session id */
__u32 bar_active; /* # of active tests in batch/test */
__u32 bar_time; /* remained time */
-} WIRE_ATTR srpc_batch_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_stat_reqst {
__u64 str_rpyid; /* reply buffer matchbits */
lst_sid_t str_sid; /* session id */
__u32 str_type; /* type of stat */
-} WIRE_ATTR srpc_stat_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_stat_reply {
__u32 str_status;
lst_sid_t str_sid;
sfw_counters_t str_fw;
srpc_counters_t str_rpc;
lnet_counters_t str_lnet;
-} WIRE_ATTR srpc_stat_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_bulk_req {
__u32 blk_opc; /* bulk operation code */
__u32 blk_npg; /* # of pages */
__u32 blk_flags; /* reserved flags */
-} WIRE_ATTR test_bulk_req_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_bulk_req_v1 {
__u16 blk_opc; /* bulk operation code */
__u16 blk_flags; /* data check flags */
__u32 blk_len; /* data length */
__u32 blk_offset; /* reserved: offset */
-} WIRE_ATTR test_bulk_req_v1_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_ping_req {
__u32 png_size; /* size of ping message */
__u32 png_flags; /* reserved flags */
-} WIRE_ATTR test_ping_req_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_test_reqst {
__u64 tsr_rpyid; /* reply buffer matchbits */
__u64 tsr_bulkid; /* bulk buffer matchbits */
lst_sid_t tsr_sid; /* session id */
@@ -201,82 +201,82 @@ typedef struct {
__u32 tsr_ndest; /* # of dest nodes */
union {
- test_ping_req_t ping;
- test_bulk_req_t bulk_v0;
- test_bulk_req_v1_t bulk_v1;
- } tsr_u;
-} WIRE_ATTR srpc_test_reqst_t;
+ struct test_ping_req ping;
+ struct test_bulk_req bulk_v0;
+ struct test_bulk_req_v1 bulk_v1;
+ } tsr_u;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_test_reply {
__u32 tsr_status; /* returned code */
lst_sid_t tsr_sid;
-} WIRE_ATTR srpc_test_reply_t;
+} WIRE_ATTR;
/* TEST RPCs */
-typedef struct {
+struct srpc_ping_reqst {
__u64 pnr_rpyid;
__u32 pnr_magic;
__u32 pnr_seq;
__u64 pnr_time_sec;
__u64 pnr_time_usec;
-} WIRE_ATTR srpc_ping_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_ping_reply {
__u32 pnr_status;
__u32 pnr_magic;
__u32 pnr_seq;
-} WIRE_ATTR srpc_ping_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_brw_reqst {
__u64 brw_rpyid; /* reply buffer matchbits */
__u64 brw_bulkid; /* bulk buffer matchbits */
__u32 brw_rw; /* read or write */
__u32 brw_len; /* bulk data len */
__u32 brw_flags; /* bulk data patterns */
-} WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */
+} WIRE_ATTR; /* bulk r/w request */
-typedef struct {
+struct srpc_brw_reply {
__u32 brw_status;
-} WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */
+} WIRE_ATTR; /* bulk r/w reply */
#define SRPC_MSG_MAGIC 0xeeb0f00d
#define SRPC_MSG_VERSION 1
-typedef struct srpc_msg {
+struct srpc_msg {
__u32 msg_magic; /* magic number */
__u32 msg_version; /* message version number */
- __u32 msg_type; /* type of message body: srpc_msg_type_t */
+ __u32 msg_type; /* type of message body: srpc_msg_type */
__u32 msg_reserved0;
__u32 msg_reserved1;
__u32 msg_ses_feats; /* test session features */
union {
- srpc_generic_reqst_t reqst;
- srpc_generic_reply_t reply;
-
- srpc_mksn_reqst_t mksn_reqst;
- srpc_mksn_reply_t mksn_reply;
- srpc_rmsn_reqst_t rmsn_reqst;
- srpc_rmsn_reply_t rmsn_reply;
- srpc_debug_reqst_t dbg_reqst;
- srpc_debug_reply_t dbg_reply;
- srpc_batch_reqst_t bat_reqst;
- srpc_batch_reply_t bat_reply;
- srpc_stat_reqst_t stat_reqst;
- srpc_stat_reply_t stat_reply;
- srpc_test_reqst_t tes_reqst;
- srpc_test_reply_t tes_reply;
- srpc_join_reqst_t join_reqst;
- srpc_join_reply_t join_reply;
-
- srpc_ping_reqst_t ping_reqst;
- srpc_ping_reply_t ping_reply;
- srpc_brw_reqst_t brw_reqst;
- srpc_brw_reply_t brw_reply;
+ struct srpc_generic_reqst reqst;
+ struct srpc_generic_reply reply;
+
+ struct srpc_mksn_reqst mksn_reqst;
+ struct srpc_mksn_reply mksn_reply;
+ struct srpc_rmsn_reqst rmsn_reqst;
+ struct srpc_rmsn_reply rmsn_reply;
+ struct srpc_debug_reqst dbg_reqst;
+ struct srpc_debug_reply dbg_reply;
+ struct srpc_batch_reqst bat_reqst;
+ struct srpc_batch_reply bat_reply;
+ struct srpc_stat_reqst stat_reqst;
+ struct srpc_stat_reply stat_reply;
+ struct srpc_test_reqst tes_reqst;
+ struct srpc_test_reply tes_reply;
+ struct srpc_join_reqst join_reqst;
+ struct srpc_join_reply join_reply;
+
+ struct srpc_ping_reqst ping_reqst;
+ struct srpc_ping_reply ping_reply;
+ struct srpc_brw_reqst brw_reqst;
+ struct srpc_brw_reply brw_reply;
} msg_body;
-} WIRE_ATTR srpc_msg_t;
+} WIRE_ATTR;
static inline void
-srpc_unpack_msg_hdr(srpc_msg_t *msg)
+srpc_unpack_msg_hdr(struct srpc_msg *msg)
{
if (msg->msg_magic == SRPC_MSG_MAGIC)
return; /* no flipping needed */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index e689ca184..4eac1c9e6 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -93,7 +93,7 @@ struct sfw_test_instance;
/* all reply/bulk RDMAs go to this portal */
#define SRPC_RDMA_PORTAL 52
-static inline srpc_msg_type_t
+static inline enum srpc_msg_type
srpc_service2request(int service)
{
switch (service) {
@@ -128,13 +128,13 @@ srpc_service2request(int service)
}
}
-static inline srpc_msg_type_t
+static inline enum srpc_msg_type
srpc_service2reply(int service)
{
return srpc_service2request(service) + 1;
}
-typedef enum {
+enum srpc_event_type {
SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
* received */
SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
@@ -143,57 +143,58 @@ typedef enum {
SRPC_REPLY_SENT = 5, /* outgoing reply sent */
SRPC_REQUEST_RCVD = 6, /* incoming request received */
SRPC_REQUEST_SENT = 7, /* outgoing request sent */
-} srpc_event_type_t;
+};
/* RPC event */
-typedef struct {
- srpc_event_type_t ev_type; /* what's up */
+struct srpc_event {
+ enum srpc_event_type ev_type; /* what's up */
lnet_event_kind_t ev_lnet; /* LNet event type */
int ev_fired; /* LNet event fired? */
int ev_status; /* LNet event status */
void *ev_data; /* owning server/client RPC */
-} srpc_event_t;
+};
-typedef struct {
+/* bulk descriptor */
+struct srpc_bulk {
int bk_len; /* len of bulk data */
lnet_handle_md_t bk_mdh;
int bk_sink; /* sink/source */
int bk_niov; /* # iov in bk_iovs */
lnet_kiov_t bk_iovs[0];
-} srpc_bulk_t; /* bulk descriptor */
+};
/* message buffer descriptor */
-typedef struct srpc_buffer {
+struct srpc_buffer {
struct list_head buf_list; /* chain on srpc_service::*_msgq */
- srpc_msg_t buf_msg;
+ struct srpc_msg buf_msg;
lnet_handle_md_t buf_mdh;
lnet_nid_t buf_self;
lnet_process_id_t buf_peer;
-} srpc_buffer_t;
+};
struct swi_workitem;
typedef int (*swi_action_t) (struct swi_workitem *);
-typedef struct swi_workitem {
+struct swi_workitem {
struct cfs_wi_sched *swi_sched;
- cfs_workitem_t swi_workitem;
+ struct cfs_workitem swi_workitem;
swi_action_t swi_action;
int swi_state;
-} swi_workitem_t;
+};
/* server-side state of a RPC */
struct srpc_server_rpc {
/* chain on srpc_service::*_rpcq */
struct list_head srpc_list;
struct srpc_service_cd *srpc_scd;
- swi_workitem_t srpc_wi;
- srpc_event_t srpc_ev; /* bulk/reply event */
+ struct swi_workitem srpc_wi;
+ struct srpc_event srpc_ev; /* bulk/reply event */
lnet_nid_t srpc_self;
lnet_process_id_t srpc_peer;
- srpc_msg_t srpc_replymsg;
+ struct srpc_msg srpc_replymsg;
lnet_handle_md_t srpc_replymdh;
- srpc_buffer_t *srpc_reqstbuf;
- srpc_bulk_t *srpc_bulk;
+ struct srpc_buffer *srpc_reqstbuf;
+ struct srpc_bulk *srpc_bulk;
unsigned int srpc_aborted; /* being given up */
int srpc_status;
@@ -201,14 +202,14 @@ struct srpc_server_rpc {
};
/* client-side state of a RPC */
-typedef struct srpc_client_rpc {
+struct srpc_client_rpc {
struct list_head crpc_list; /* chain on user's lists */
spinlock_t crpc_lock; /* serialize */
int crpc_service;
atomic_t crpc_refcount;
int crpc_timeout; /* # seconds to wait for reply */
struct stt_timer crpc_timer;
- swi_workitem_t crpc_wi;
+ struct swi_workitem crpc_wi;
lnet_process_id_t crpc_dest;
void (*crpc_done)(struct srpc_client_rpc *);
@@ -221,20 +222,20 @@ typedef struct srpc_client_rpc {
unsigned int crpc_closed:1; /* completed */
/* RPC events */
- srpc_event_t crpc_bulkev; /* bulk event */
- srpc_event_t crpc_reqstev; /* request event */
- srpc_event_t crpc_replyev; /* reply event */
+ struct srpc_event crpc_bulkev; /* bulk event */
+ struct srpc_event crpc_reqstev; /* request event */
+ struct srpc_event crpc_replyev; /* reply event */
/* bulk, request(reqst), and reply exchanged on wire */
- srpc_msg_t crpc_reqstmsg;
- srpc_msg_t crpc_replymsg;
+ struct srpc_msg crpc_reqstmsg;
+ struct srpc_msg crpc_replymsg;
lnet_handle_md_t crpc_reqstmdh;
lnet_handle_md_t crpc_replymdh;
- srpc_bulk_t crpc_bulk;
-} srpc_client_rpc_t;
+ struct srpc_bulk crpc_bulk;
+};
#define srpc_client_rpc_size(rpc) \
-offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
+offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
#define srpc_client_rpc_addref(rpc) \
do { \
@@ -266,13 +267,13 @@ struct srpc_service_cd {
/** backref to service */
struct srpc_service *scd_svc;
/** event buffer */
- srpc_event_t scd_ev;
+ struct srpc_event scd_ev;
/** free RPC descriptors */
struct list_head scd_rpc_free;
/** in-flight RPCs */
struct list_head scd_rpc_active;
/** workitem for posting buffer */
- swi_workitem_t scd_buf_wi;
+ struct swi_workitem scd_buf_wi;
/** CPT id */
int scd_cpt;
/** error code for scd_buf_wi */
@@ -306,7 +307,7 @@ struct srpc_service_cd {
#define SFW_FRWK_WI_MIN 16
#define SFW_FRWK_WI_MAX 256
-typedef struct srpc_service {
+struct srpc_service {
int sv_id; /* service id */
const char *sv_name; /* human readable name */
int sv_wi_total; /* total server workitems */
@@ -320,9 +321,9 @@ typedef struct srpc_service {
*/
int (*sv_handler)(struct srpc_server_rpc *);
int (*sv_bulk_ready)(struct srpc_server_rpc *, int);
-} srpc_service_t;
+};
-typedef struct {
+struct sfw_session {
struct list_head sn_list; /* chain on fw_zombie_sessions */
lst_sid_t sn_id; /* unique identifier */
unsigned int sn_timeout; /* # seconds' inactivity to expire */
@@ -335,37 +336,37 @@ typedef struct {
atomic_t sn_brw_errors;
atomic_t sn_ping_errors;
unsigned long sn_started;
-} sfw_session_t;
+};
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
(sid0).ses_stamp == (sid1).ses_stamp)
-typedef struct {
+struct sfw_batch {
struct list_head bat_list; /* chain on sn_batches */
lst_bid_t bat_id; /* batch id */
int bat_error; /* error code of batch */
- sfw_session_t *bat_session; /* batch's session */
+ struct sfw_session *bat_session; /* batch's session */
atomic_t bat_nactive; /* # of active tests */
struct list_head bat_tests; /* test instances */
-} sfw_batch_t;
+};
-typedef struct {
+struct sfw_test_client_ops {
int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
* client */
void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
* client */
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
- srpc_client_rpc_t **rpc); /* prep a tests rpc */
+ struct srpc_client_rpc **rpc); /* prep a tests rpc */
void (*tso_done_rpc)(struct sfw_test_unit *tsu,
- srpc_client_rpc_t *rpc); /* done a test rpc */
-} sfw_test_client_ops_t;
+ struct srpc_client_rpc *rpc); /* done a test rpc */
+};
-typedef struct sfw_test_instance {
+struct sfw_test_instance {
struct list_head tsi_list; /* chain on batch */
int tsi_service; /* test type */
- sfw_batch_t *tsi_batch; /* batch */
- sfw_test_client_ops_t *tsi_ops; /* test client operation
+ struct sfw_batch *tsi_batch; /* batch */
+ struct sfw_test_client_ops *tsi_ops; /* test client operation
*/
/* public parameter for all test units */
@@ -384,11 +385,11 @@ typedef struct sfw_test_instance {
struct list_head tsi_active_rpcs; /* active rpcs */
union {
- test_ping_req_t ping; /* ping parameter */
- test_bulk_req_t bulk_v0; /* bulk parameter */
- test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */
+ struct test_ping_req ping; /* ping parameter */
+ struct test_bulk_req bulk_v0; /* bulk parameter */
+ struct test_bulk_req_v1 bulk_v1; /* bulk v1 parameter */
} tsi_u;
-} sfw_test_instance_t;
+};
/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
* pages are not used */
@@ -397,57 +398,58 @@ typedef struct sfw_test_instance {
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
-typedef struct sfw_test_unit {
+struct sfw_test_unit {
struct list_head tsu_list; /* chain on lst_test_instance */
lnet_process_id_t tsu_dest; /* id of dest node */
int tsu_loop; /* loop count of the test */
- sfw_test_instance_t *tsu_instance; /* pointer to test instance */
+ struct sfw_test_instance *tsu_instance; /* pointer to test instance */
void *tsu_private; /* private data */
- swi_workitem_t tsu_worker; /* workitem of the test unit */
-} sfw_test_unit_t;
+ struct swi_workitem tsu_worker; /* workitem of the test unit */
+};
-typedef struct sfw_test_case {
+struct sfw_test_case {
struct list_head tsc_list; /* chain on fw_tests */
- srpc_service_t *tsc_srv_service; /* test service */
- sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
-} sfw_test_case_t;
+ struct srpc_service *tsc_srv_service; /* test service */
+ struct sfw_test_client_ops *tsc_cli_ops; /* ops of test client */
+};
-srpc_client_rpc_t *
+struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
unsigned features, int nbulkiov, int bulklen,
- void (*done)(srpc_client_rpc_t *), void *priv);
-int sfw_create_test_rpc(sfw_test_unit_t *tsu,
+ void (*done)(struct srpc_client_rpc *), void *priv);
+int sfw_create_test_rpc(struct sfw_test_unit *tsu,
lnet_process_id_t peer, unsigned features,
- int nblk, int blklen, srpc_client_rpc_t **rpc);
-void sfw_abort_rpc(srpc_client_rpc_t *rpc);
-void sfw_post_rpc(srpc_client_rpc_t *rpc);
-void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
-void sfw_unpack_message(srpc_msg_t *msg);
+ int nblk, int blklen, struct srpc_client_rpc **rpc);
+void sfw_abort_rpc(struct srpc_client_rpc *rpc);
+void sfw_post_rpc(struct srpc_client_rpc *rpc);
+void sfw_client_rpc_done(struct srpc_client_rpc *rpc);
+void sfw_unpack_message(struct srpc_msg *msg);
void sfw_free_pages(struct srpc_server_rpc *rpc);
-void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
+void sfw_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i);
int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
int sink);
-int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
+int sfw_make_session(struct srpc_mksn_reqst *request,
+ struct srpc_mksn_reply *reply);
-srpc_client_rpc_t *
+struct srpc_client_rpc *
srpc_create_client_rpc(lnet_process_id_t peer, int service,
int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
-void srpc_post_rpc(srpc_client_rpc_t *rpc);
-void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
-void srpc_free_bulk(srpc_bulk_t *bk);
-srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len,
- int sink);
-int srpc_send_rpc(swi_workitem_t *wi);
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv);
+void srpc_post_rpc(struct srpc_client_rpc *rpc);
+void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
+void srpc_free_bulk(struct srpc_bulk *bk);
+struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
+ unsigned bulk_len, int sink);
+int srpc_send_rpc(struct swi_workitem *wi);
int srpc_send_reply(struct srpc_server_rpc *rpc);
-int srpc_add_service(srpc_service_t *sv);
-int srpc_remove_service(srpc_service_t *sv);
-void srpc_shutdown_service(srpc_service_t *sv);
-void srpc_abort_service(srpc_service_t *sv);
-int srpc_finish_service(srpc_service_t *sv);
-int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
-void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
+int srpc_add_service(struct srpc_service *sv);
+int srpc_remove_service(struct srpc_service *sv);
+void srpc_shutdown_service(struct srpc_service *sv);
+void srpc_abort_service(struct srpc_service *sv);
+int srpc_finish_service(struct srpc_service *sv);
+int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer);
+void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer);
void srpc_get_counters(srpc_counters_t *cnt);
void srpc_set_counters(const srpc_counters_t *cnt);
@@ -461,15 +463,17 @@ srpc_serv_is_framework(struct srpc_service *svc)
}
static inline int
-swi_wi_action(cfs_workitem_t *wi)
+swi_wi_action(struct cfs_workitem *wi)
{
- swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem);
+ struct swi_workitem *swi;
+
+ swi = container_of(wi, struct swi_workitem, swi_workitem);
return swi->swi_action(swi);
}
static inline void
-swi_init_workitem(swi_workitem_t *swi, void *data,
+swi_init_workitem(struct swi_workitem *swi, void *data,
swi_action_t action, struct cfs_wi_sched *sched)
{
swi->swi_sched = sched;
@@ -479,19 +483,19 @@ swi_init_workitem(swi_workitem_t *swi, void *data,
}
static inline void
-swi_schedule_workitem(swi_workitem_t *wi)
+swi_schedule_workitem(struct swi_workitem *wi)
{
cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem);
}
static inline void
-swi_exit_workitem(swi_workitem_t *swi)
+swi_exit_workitem(struct swi_workitem *swi)
{
cfs_wi_exit(swi->swi_sched, &swi->swi_workitem);
}
static inline int
-swi_deschedule_workitem(swi_workitem_t *swi)
+swi_deschedule_workitem(struct swi_workitem *swi)
{
return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
}
@@ -502,7 +506,7 @@ void sfw_shutdown(void);
void srpc_shutdown(void);
static inline void
-srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
+srpc_destroy_client_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(rpc);
LASSERT(!srpc_event_pending(rpc));
@@ -515,14 +519,14 @@ srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
}
static inline void
-srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+srpc_init_client_rpc(struct srpc_client_rpc *rpc, lnet_process_id_t peer,
int service, int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
{
LASSERT(nbulkiov <= LNET_MAX_IOV);
- memset(rpc, 0, offsetof(srpc_client_rpc_t,
+ memset(rpc, 0, offsetof(struct srpc_client_rpc,
crpc_bulk.bk_iovs[nbulkiov]));
INIT_LIST_HEAD(&rpc->crpc_list);
@@ -592,7 +596,7 @@ do { \
} while (0)
static inline void
-srpc_wait_service_shutdown(srpc_service_t *sv)
+srpc_wait_service_shutdown(struct srpc_service *sv)
{
int i = 2;
@@ -607,16 +611,16 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
}
}
-extern sfw_test_client_ops_t brw_test_client;
+extern struct sfw_test_client_ops brw_test_client;
void brw_init_test_client(void);
-extern srpc_service_t brw_test_service;
+extern struct srpc_service brw_test_service;
void brw_init_test_service(void);
-extern sfw_test_client_ops_t ping_test_client;
+extern struct sfw_test_client_ops ping_test_client;
void ping_init_test_client(void);
-extern srpc_service_t ping_test_service;
+extern struct srpc_service ping_test_service;
void ping_init_test_service(void);
#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 8be52526a..b6c4aae00 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -49,7 +49,7 @@
* sorted by increasing expiry time. The number of slots is 2**7 (128),
* to cover a time period of 1024 seconds into the future before wrapping.
*/
-#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
+#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
#define STTIMER_NSLOTS (1 << 7)
@@ -170,20 +170,22 @@ stt_check_timers(unsigned long *last)
static int
stt_timer_main(void *arg)
{
+ int rc = 0;
+
cfs_block_allsigs();
while (!stt_data.stt_shuttingdown) {
stt_check_timers(&stt_data.stt_prev_slot);
- wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME));
+ rc = wait_event_timeout(stt_data.stt_waitq,
+ stt_data.stt_shuttingdown,
+ cfs_time_seconds(STTIMER_SLOTTIME));
}
spin_lock(&stt_data.stt_lock);
stt_data.stt_nthreads--;
spin_unlock(&stt_data.stt_lock);
- return 0;
+ return rc;
}
static int
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 39269c3c5..3a4df6264 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -66,6 +66,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
unsigned int debug_mask;
int rc;
+ LASSERT(exp && !IS_ERR(exp));
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
LUSTRE_MDS_VERSION, SEQ_QUERY);
if (!req)
@@ -101,19 +102,22 @@ static int seq_client_rpc(struct lu_client_seq *seq,
req->rq_no_delay = req->rq_no_resend = 1;
debug_mask = D_CONSOLE;
} else {
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
+ req->rq_reply_portal = MDC_REPLY_PORTAL;
req->rq_request_portal = SEQ_METADATA_PORTAL;
- else
+ } else {
+ req->rq_reply_portal = OSC_REPLY_PORTAL;
req->rq_request_portal = SEQ_DATA_PORTAL;
+ }
debug_mask = D_INFO;
}
ptlrpc_at_set_req_timeout(req);
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
rc = ptlrpc_queue_wait(req);
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
goto out_req;
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 062f388cf..5a04e99d9 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -178,8 +178,9 @@ restart_fixup:
if (n_range->lsr_end <= c_range->lsr_end) {
*n_range = *c_range;
fld_cache_entry_delete(cache, f_curr);
- } else
+ } else {
n_range->lsr_start = c_range->lsr_end;
+ }
}
/* we could have overlap over next
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index e8a3caf20..75d6a4863 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -101,12 +101,6 @@ struct fld_cache {
unsigned int fci_no_shrink:1;
};
-enum fld_op {
- FLD_CREATE = 0,
- FLD_DELETE = 1,
- FLD_LOOKUP = 2
-};
-
enum {
/* 4M of FLD cache will not hurt client a lot. */
FLD_SERVER_CACHE_SIZE = (4 * 0x100000),
@@ -126,7 +120,8 @@ enum {
extern struct lu_fld_hash fld_hash[];
int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op);
+ struct lu_seq_range *range, __u32 fld_op,
+ struct ptlrpc_request **reqp);
extern struct lprocfs_vars fld_client_debugfs_list[];
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index a3d122d85..304c0ec26 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -64,9 +64,9 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
};
@@ -75,15 +75,15 @@ static void fld_enter_request(struct client_obd *cli)
struct mdc_cache_waiter mcw;
struct l_wait_info lwi = { 0 };
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
} else {
cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
}
@@ -92,10 +92,9 @@ static void fld_exit_request(struct client_obd *cli)
struct list_head *l, *tmp;
struct mdc_cache_waiter *mcw;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
/* No free request slots anymore */
break;
@@ -106,7 +105,7 @@ static void fld_exit_request(struct client_obd *cli)
cli->cl_r_in_flight++;
wake_up(&mcw->mcw_waitq);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
@@ -392,55 +391,82 @@ void fld_client_fini(struct lu_client_fld *fld)
EXPORT_SYMBOL(fld_client_fini);
int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op)
+ struct lu_seq_range *range, __u32 fld_op,
+ struct ptlrpc_request **reqp)
{
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
struct lu_seq_range *prange;
__u32 *op;
- int rc;
+ int rc = 0;
struct obd_import *imp;
LASSERT(exp);
imp = class_exp2cliimp(exp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
- FLD_QUERY);
- if (!req)
- return -ENOMEM;
-
- op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
- *op = fld_op;
+ switch (fld_op) {
+ case FLD_QUERY:
+ req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY,
+ LUSTRE_MDS_VERSION, FLD_QUERY);
+ if (!req)
+ return -ENOMEM;
+
+ /*
+ * XXX: only needed when talking to old server(< 2.6), it should
+ * be removed when < 2.6 server is not supported
+ */
+ op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
+ *op = FLD_LOOKUP;
+
+ if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
+ req->rq_allow_replay = 1;
+ break;
+ case FLD_READ:
+ req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_READ,
+ LUSTRE_MDS_VERSION, FLD_READ);
+ if (!req)
+ return -ENOMEM;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA,
+ RCL_SERVER, PAGE_SIZE);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return rc;
prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD);
*prange = *range;
-
ptlrpc_request_set_replen(req);
req->rq_request_portal = FLD_REQUEST_PORTAL;
req->rq_reply_portal = MDC_REPLY_PORTAL;
ptlrpc_at_set_req_timeout(req);
- if (fld_op == FLD_LOOKUP &&
- imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
- req->rq_allow_replay = 1;
-
- if (fld_op != FLD_LOOKUP)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
fld_enter_request(&exp->exp_obd->u.cli);
rc = ptlrpc_queue_wait(req);
fld_exit_request(&exp->exp_obd->u.cli);
- if (fld_op != FLD_LOOKUP)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
goto out_req;
- prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
- if (!prange) {
- rc = -EFAULT;
- goto out_req;
+ if (fld_op == FLD_QUERY) {
+ prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
+ if (!prange) {
+ rc = -EFAULT;
+ goto out_req;
+ }
+ *range = *prange;
}
- *range = *prange;
+
out_req:
- ptlrpc_req_finished(req);
+ if (rc || !reqp) {
+ ptlrpc_req_finished(req);
+ req = NULL;
+ }
+
+ if (reqp)
+ *reqp = req;
+
return rc;
}
@@ -468,7 +494,7 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
res.lsr_start = seq;
fld_range_set_type(&res, flags);
- rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP);
+ rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL);
if (rc == 0) {
*mds = res.lsr_index;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index fb971ded5..d4c33dd11 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -82,7 +82,6 @@
* - i_mutex
* - PG_locked
* - cl_object_header::coh_page_guard
- * - cl_object_header::coh_lock_guard
* - lu_site::ls_guard
*
* See the top comment in cl_object.c for the description of overall locking and
@@ -98,9 +97,12 @@
* super-class definitions.
*/
#include "lu_object.h"
+#include <linux/atomic.h>
#include "linux/lustre_compat25.h"
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
struct inode;
@@ -138,7 +140,7 @@ struct cl_device_operations {
* cl_req_slice_add().
*
* \see osc_req_init(), lov_req_init(), lovsub_req_init()
- * \see ccc_req_init()
+ * \see vvp_req_init()
*/
int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
struct cl_req *req);
@@ -147,7 +149,7 @@ struct cl_device_operations {
/**
* Device in the client stack.
*
- * \see ccc_device, lov_device, lovsub_device, osc_device
+ * \see vvp_device, lov_device, lovsub_device, osc_device
*/
struct cl_device {
/** Super-class. */
@@ -243,7 +245,7 @@ enum cl_attr_valid {
* be discarded from the memory, all its sub-objects are torn-down and
* destroyed too.
*
- * \see ccc_object, lov_object, lovsub_object, osc_object
+ * \see vvp_object, lov_object, lovsub_object, osc_object
*/
struct cl_object {
/** super class */
@@ -322,7 +324,7 @@ struct cl_object_operations {
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
@@ -383,11 +385,17 @@ struct cl_object_operations {
* object. Layers are supposed to fill parts of \a lvb that will be
* shipped to the glimpse originator as a glimpse result.
*
- * \see ccc_object_glimpse(), lovsub_object_glimpse(),
+ * \see vvp_object_glimpse(), lovsub_object_glimpse(),
* \see osc_object_glimpse()
*/
int (*coo_glimpse)(const struct lu_env *env,
const struct cl_object *obj, struct ost_lvb *lvb);
+ /**
+ * Object prune method. Called when the layout is going to change on
+ * this object, therefore each layer has to clean up their cache,
+ * mainly pages and locks.
+ */
+ int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
};
/**
@@ -398,22 +406,6 @@ struct cl_object_header {
* here.
*/
struct lu_object_header coh_lu;
- /** \name locks
- * \todo XXX move locks below to the separate cache-lines, they are
- * mostly useless otherwise.
- */
- /** @{ */
- /** Lock protecting page tree. */
- spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
- /** @} locks */
- /** Radix tree of cl_page's, cached for this object. */
- struct radix_tree_root coh_tree;
- /** # of pages in radix tree. */
- unsigned long coh_pages;
- /** List of cl_lock's granted for this object. */
- struct list_head coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
@@ -460,10 +452,6 @@ struct cl_object_header {
co_lu.lo_linkage)
/** @} cl_object */
-#ifndef pgoff_t
-#define pgoff_t unsigned long
-#endif
-
#define CL_PAGE_EOF ((pgoff_t)~0ull)
/** \addtogroup cl_page cl_page
@@ -727,16 +715,10 @@ struct cl_page {
atomic_t cp_ref;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
- /** Logical page index within the object. Immutable after creation. */
- pgoff_t cp_index;
/** List of slices. Immutable after creation. */
struct list_head cp_layers;
- /** Parent page, NULL for top-level page. Immutable after creation. */
- struct cl_page *cp_parent;
- /** Lower-layer page. NULL for bottommost page. Immutable after
- * creation.
- */
- struct cl_page *cp_child;
+ /** vmpage */
+ struct page *cp_vmpage;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
@@ -787,10 +769,11 @@ struct cl_page {
/**
* Per-layer part of cl_page.
*
- * \see ccc_page, lov_page, osc_page
+ * \see vvp_page, lov_page, osc_page
*/
struct cl_page_slice {
struct cl_page *cpl_page;
+ pgoff_t cpl_index;
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
@@ -804,16 +787,9 @@ struct cl_page_slice {
/**
* Lock mode. For the client extent locks.
*
- * \warning: cl_lock_mode_match() assumes particular ordering here.
* \ingroup cl_lock
*/
enum cl_lock_mode {
- /**
- * Mode of a lock that protects no data, and exists only as a
- * placeholder. This is used for `glimpse' requests. A phantom lock
- * might get promoted to real lock at some point.
- */
- CLM_PHANTOM,
CLM_READ,
CLM_WRITE,
CLM_GROUP
@@ -846,11 +822,6 @@ struct cl_page_operations {
*/
/**
- * \return the underlying VM page. Optional.
- */
- struct page *(*cpo_vmpage)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
* Called when \a io acquires this page into the exclusive
* ownership. When this method returns, it is guaranteed that the is
* not owned by other io, and no transfer is going on against
@@ -897,14 +868,6 @@ struct cl_page_operations {
void (*cpo_export)(const struct lu_env *env,
const struct cl_page_slice *slice, int uptodate);
/**
- * Unmaps page from the user space (if it is mapped).
- *
- * \see cl_page_unmap()
- * \see vvp_page_unmap()
- */
- int (*cpo_unmap)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
* Checks whether underlying VM page is locked (in the suitable
* sense). Used for assertions.
*
@@ -957,7 +920,7 @@ struct cl_page_operations {
*/
int (*cpo_is_under_lock)(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *io);
+ struct cl_io *io, pgoff_t *max);
/**
* Optional debugging helper. Prints given page slice.
@@ -1027,26 +990,6 @@ struct cl_page_operations {
*/
int (*cpo_make_ready)(const struct lu_env *env,
const struct cl_page_slice *slice);
- /**
- * Announce that this page is to be written out
- * opportunistically, that is, page is dirty, it is not
- * necessary to start write-out transfer right now, but
- * eventually page has to be written out.
- *
- * Main caller of this is the write path (see
- * vvp_io_commit_write()), using this method to build a
- * "transfer cache" from which large transfers are then
- * constructed by the req-formation engine.
- *
- * \todo XXX it would make sense to add page-age tracking
- * semantics here, and to oblige the req-formation engine to
- * send the page out not later than it is too old.
- *
- * \see cl_page_cache_add()
- */
- int (*cpo_cache_add)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
} io[CRT_NR];
/**
* Tell transfer engine that only [to, from] part of a page should be
@@ -1098,9 +1041,8 @@ struct cl_page_operations {
*/
#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -1111,9 +1053,8 @@ do { \
*/
#define CL_PAGE_HEADER(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -1130,6 +1071,12 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
#define cl_page_in_use(pg) __page_in_use(pg, 1)
#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+static inline struct page *cl_page_vmpage(struct cl_page *page)
+{
+ LASSERT(page->cp_vmpage);
+ return page->cp_vmpage;
+}
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
@@ -1150,12 +1097,6 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
* (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
* cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
*
- * All locks for a given object are linked into cl_object_header::coh_locks
- * list (protected by cl_object_header::coh_lock_guard spin-lock) through
- * cl_lock::cll_linkage. Currently this list is not sorted in any way. We can
- * sort it in starting lock offset, or use altogether different data structure
- * like a tree.
- *
* Typical cl_lock consists of the two layers:
*
* - vvp_lock (vvp specific data), and
@@ -1177,111 +1118,29 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
*
* LIFE CYCLE
*
- * cl_lock is reference counted. When reference counter drops to 0, lock is
- * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING
- * lock is destroyed when last reference is released. Referencing between
- * top-lock and its sub-locks is described in the lov documentation module.
- *
- * STATE MACHINE
- *
- * Also, cl_lock is a state machine. This requires some clarification. One of
- * the goals of client IO re-write was to make IO path non-blocking, or at
- * least to make it easier to make it non-blocking in the future. Here
- * `non-blocking' means that when a system call (read, write, truncate)
- * reaches a situation where it has to wait for a communication with the
- * server, it should --instead of waiting-- remember its current state and
- * switch to some other work. E.g,. instead of waiting for a lock enqueue,
- * client should proceed doing IO on the next stripe, etc. Obviously this is
- * rather radical redesign, and it is not planned to be fully implemented at
- * this time, instead we are putting some infrastructure in place, that would
- * make it easier to do asynchronous non-blocking IO easier in the
- * future. Specifically, where old locking code goes to sleep (waiting for
- * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
- * enqueue reply comes, its completion handler signals that lock state-machine
- * is ready to transit to the next state. There is some generic code in
- * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
- * this cl_lock.c code, it looks like locking is done in normal blocking
- * fashion, and it the same time it is possible to switch to the non-blocking
- * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
- * functions).
- *
- * For a description of state machine states and transitions see enum
- * cl_lock_state.
- *
- * There are two ways to restrict a set of states which lock might move to:
- *
- * - placing a "hold" on a lock guarantees that lock will not be moved
- * into cl_lock_state::CLS_FREEING state until hold is released. Hold
- * can be only acquired on a lock that is not in
- * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
- * cl_lock::cll_holds. Hold protects lock from cancellation and
- * destruction. Requests to cancel and destroy a lock on hold will be
- * recorded, but only honored when last hold on a lock is released;
- *
- * - placing a "user" on a lock guarantees that lock will not leave
- * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
- * states, once it enters this set. That is, if a user is added onto a
- * lock in a state not from this set, it doesn't immediately enforce
- * lock to move to this set, but once lock enters this set it will
- * remain there until all users are removed. Lock users are counted in
- * cl_lock::cll_users.
- *
- * User is used to assure that lock is not canceled or destroyed while
- * it is being enqueued, or actively used by some IO.
- *
- * Currently, a user always comes with a hold (cl_lock_invariant()
- * checks that a number of holds is not less than a number of users).
- *
- * CONCURRENCY
- *
- * This is how lock state-machine operates. struct cl_lock contains a mutex
- * cl_lock::cll_guard that protects struct fields.
- *
- * - mutex is taken, and cl_lock::cll_state is examined.
- *
- * - for every state there are possible target states where lock can move
- * into. They are tried in order. Attempts to move into next state are
- * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
- *
- * - if the transition can be performed immediately, state is changed,
- * and mutex is released.
- *
- * - if the transition requires blocking, _try() function returns
- * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
- * sleep, waiting for possibility of lock state change. It is woken
- * up when some event occurs, that makes lock state change possible
- * (e.g., the reception of the reply from the server), and repeats
- * the loop.
- *
- * Top-lock and sub-lock has separate mutexes and the latter has to be taken
- * first to avoid dead-lock.
- *
- * To see an example of interaction of all these issues, take a look at the
- * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
- * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
- * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
- * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
- * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
- * done in parallel, rather than one after another (this is used for glimpse
- * locks, that cannot dead-lock).
+ * cl_lock is a cacheless data container for the requirements of locks to
+ * complete the IO. cl_lock is created before I/O starts and destroyed when the
+ * I/O is complete.
+ *
+ * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
+ * to cl_lock at OSC layer. LDLM lock is still cacheable.
*
* INTERFACE AND USAGE
*
- * struct cl_lock_operations provide a number of call-backs that are invoked
- * when events of interest occurs. Layers can intercept and handle glimpse,
- * blocking, cancel ASTs and a reception of the reply from the server.
+ * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A
+ * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
+ * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
+ * consists of multiple sub cl_locks, each sub locks will be enqueued
+ * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
+ * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
+ * OST side.
*
- * One important difference with the old client locking model is that new
- * client has a representation for the top-lock, whereas in the old code only
- * sub-locks existed as real data structures and file-level locks are
- * represented by "request sets" that are created and destroyed on each and
- * every lock creation.
+ * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
+ * method will be called for each layer to release the resource held by this
+ * lock. At OSC layer, the reference count of LDLM lock, which is held at
+ * clo_enqueue time, is released.
*
- * Top-locks are cached, and can be found in the cache by the system calls. It
- * is possible that top-lock is in cache, but some of its sub-locks were
- * canceled and destroyed. In that case top-lock has to be enqueued again
- * before it can be used.
+ * LDLM lock can only be canceled if there is no cl_lock using it.
*
* Overall process of the locking during IO operation is as following:
*
@@ -1294,7 +1153,7 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
*
* - when all locks are acquired, IO is performed;
*
- * - locks are released into cache.
+ * - locks are released after IO is complete.
*
* Striping introduces major additional complexity into locking. The
* fundamental problem is that it is generally unsafe to actively use (hold)
@@ -1316,16 +1175,6 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
* buf is a part of memory mapped Lustre file, a lock or locks protecting buf
* has to be held together with the usual lock on [offset, offset + count].
*
- * As multi-stripe locks have to be allowed, it makes sense to cache them, so
- * that, for example, a sequence of O_APPEND writes can proceed quickly
- * without going down to the individual stripes to do lock matching. On the
- * other hand, multi-stripe locks shouldn't be used by normal read/write
- * calls. To achieve this, every layer can implement ->clo_fits_into() method,
- * that is called by lock matching code (cl_lock_lookup()), and that can be
- * used to selectively disable matching of certain locks for certain IOs. For
- * example, lov layer implements lov_lock_fits_into() that allow multi-stripe
- * locks to be matched only for truncates and O_APPEND writes.
- *
* Interaction with DLM
*
* In the expected setup, cl_lock is ultimately backed up by a collection of
@@ -1356,295 +1205,27 @@ struct cl_lock_descr {
__u32 cld_enq_flags;
};
-#define DDESCR "%s(%d):[%lu, %lu]"
+#define DDESCR "%s(%d):[%lu, %lu]:%x"
#define PDESCR(descr) \
cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
- (descr)->cld_start, (descr)->cld_end
+ (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
const char *cl_lock_mode_name(const enum cl_lock_mode mode);
/**
- * Lock state-machine states.
- *
- * \htmlonly
- * <pre>
- *
- * Possible state transitions:
- *
- * +------------------>NEW
- * | |
- * | | cl_enqueue_try()
- * | |
- * | cl_unuse_try() V
- * | +--------------QUEUING (*)
- * | | |
- * | | | cl_enqueue_try()
- * | | |
- * | | cl_unuse_try() V
- * sub-lock | +-------------ENQUEUED (*)
- * canceled | | |
- * | | | cl_wait_try()
- * | | |
- * | | (R)
- * | | |
- * | | V
- * | | HELD<---------+
- * | | | |
- * | | | | cl_use_try()
- * | | cl_unuse_try() | |
- * | | | |
- * | | V ---+
- * | +------------>INTRANSIT (D) <--+
- * | | |
- * | cl_unuse_try() | | cached lock found
- * | | | cl_use_try()
- * | | |
- * | V |
- * +------------------CACHED---------+
- * |
- * (C)
- * |
- * V
- * FREEING
- *
- * Legend:
- *
- * In states marked with (*) transition to the same state (i.e., a loop
- * in the diagram) is possible.
- *
- * (R) is the point where Receive call-back is invoked: it allows layers
- * to handle arrival of lock reply.
- *
- * (C) is the point where Cancellation call-back is invoked.
- *
- * (D) is the transit state which means the lock is changing.
- *
- * Transition to FREEING state is possible from any other state in the
- * diagram in case of unrecoverable error.
- * </pre>
- * \endhtmlonly
- *
- * These states are for individual cl_lock object. Top-lock and its sub-locks
- * can be in the different states. Another way to say this is that we have
- * nested state-machines.
- *
- * Separate QUEUING and ENQUEUED states are needed to support non-blocking
- * operation for locks with multiple sub-locks. Imagine lock on a file F, that
- * intersects 3 stripes S0, S1, and S2. To enqueue F client has to send
- * enqueue to S0, wait for its completion, then send enqueue for S1, wait for
- * its completion and at last enqueue lock for S2, and wait for its
- * completion. In that case, top-lock is in QUEUING state while S0, S1 are
- * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
- * that in this case, sub-locks move from state to state, and top-lock remains
- * in the same state).
- */
-enum cl_lock_state {
- /**
- * Lock that wasn't yet enqueued
- */
- CLS_NEW,
- /**
- * Enqueue is in progress, blocking for some intermediate interaction
- * with the other side.
- */
- CLS_QUEUING,
- /**
- * Lock is fully enqueued, waiting for server to reply when it is
- * granted.
- */
- CLS_ENQUEUED,
- /**
- * Lock granted, actively used by some IO.
- */
- CLS_HELD,
- /**
- * This state is used to mark the lock is being used, or unused.
- * We need this state because the lock may have several sublocks,
- * so it's impossible to have an atomic way to bring all sublocks
- * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
- * at unuse case.
- * If a thread is referring to a lock, and it sees the lock is in this
- * state, it must wait for the lock.
- * See state diagram for details.
- */
- CLS_INTRANSIT,
- /**
- * Lock granted, not used.
- */
- CLS_CACHED,
- /**
- * Lock is being destroyed.
- */
- CLS_FREEING,
- CLS_NR
-};
-
-enum cl_lock_flags {
- /**
- * lock has been cancelled. This flag is never cleared once set (by
- * cl_lock_cancel0()).
- */
- CLF_CANCELLED = 1 << 0,
- /** cancellation is pending for this lock. */
- CLF_CANCELPEND = 1 << 1,
- /** destruction is pending for this lock. */
- CLF_DOOMED = 1 << 2,
- /** from enqueue RPC reply upcall. */
- CLF_FROM_UPCALL = 1 << 3,
-};
-
-/**
- * Lock closure.
- *
- * Lock closure is a collection of locks (both top-locks and sub-locks) that
- * might be updated in a result of an operation on a certain lock (which lock
- * this is a closure of).
- *
- * Closures are needed to guarantee dead-lock freedom in the presence of
- *
- * - nested state-machines (top-lock state-machine composed of sub-lock
- * state-machines), and
- *
- * - shared sub-locks.
- *
- * Specifically, many operations, such as lock enqueue, wait, unlock,
- * etc. start from a top-lock, and then operate on a sub-locks of this
- * top-lock, holding a top-lock mutex. When sub-lock state changes as a result
- * of such operation, this change has to be propagated to all top-locks that
- * share this sub-lock. Obviously, no natural lock ordering (e.g.,
- * top-to-bottom or bottom-to-top) captures this scenario, so try-locking has
- * to be used. Lock closure systematizes this try-and-repeat logic.
- */
-struct cl_lock_closure {
- /**
- * Lock that is mutexed when closure construction is started. When
- * closure in is `wait' mode (cl_lock_closure::clc_wait), mutex on
- * origin is released before waiting.
- */
- struct cl_lock *clc_origin;
- /**
- * List of enclosed locks, so far. Locks are linked here through
- * cl_lock::cll_inclosure.
- */
- struct list_head clc_list;
- /**
- * True iff closure is in a `wait' mode. This determines what
- * cl_lock_enclosure() does when a lock L to be added to the closure
- * is currently mutexed by some other thread.
- *
- * If cl_lock_closure::clc_wait is not set, then closure construction
- * fails with CLO_REPEAT immediately.
- *
- * In wait mode, cl_lock_enclosure() waits until next attempt to build
- * a closure might succeed. To this end it releases an origin mutex
- * (cl_lock_closure::clc_origin), that has to be the only lock mutex
- * owned by the current thread, and then waits on L mutex (by grabbing
- * it and immediately releasing), before returning CLO_REPEAT to the
- * caller.
- */
- int clc_wait;
- /** Number of locks in the closure. */
- int clc_nr;
-};
-
-/**
* Layered client lock.
*/
struct cl_lock {
- /** Reference counter. */
- atomic_t cll_ref;
/** List of slices. Immutable after creation. */
struct list_head cll_layers;
- /**
- * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
- * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
- */
- struct list_head cll_linkage;
- /**
- * Parameters of this lock. Protected by
- * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
- * cl_lock::cll_guard. Modified only on lock creation and in
- * cl_lock_modify().
- */
+ /** lock attribute, extent, cl_object, etc. */
struct cl_lock_descr cll_descr;
- /** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- wait_queue_head_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
- struct mutex cll_guard;
- struct task_struct *cll_guarder;
- int cll_depth;
-
- /**
- * the owner for INTRANSIT state
- */
- struct task_struct *cll_intransit_owner;
- int cll_error;
- /**
- * Number of holds on a lock. A hold prevents a lock from being
- * canceled and destroyed. Protected by cl_lock::cll_guard.
- *
- * \see cl_lock_hold(), cl_lock_unhold(), cl_lock_release()
- */
- int cll_holds;
- /**
- * Number of lock users. Valid in cl_lock_state::CLS_HELD state
- * only. Lock user pins lock in CLS_HELD state. Protected by
- * cl_lock::cll_guard.
- *
- * \see cl_wait(), cl_unuse().
- */
- int cll_users;
- /**
- * Flag bit-mask. Values from enum cl_lock_flags. Updates are
- * protected by cl_lock::cll_guard.
- */
- unsigned long cll_flags;
- /**
- * A linkage into a list of locks in a closure.
- *
- * \see cl_lock_closure
- */
- struct list_head cll_inclosure;
- /**
- * Confict lock at queuing time.
- */
- struct cl_lock *cll_conflict;
- /**
- * A list of references to this lock, for debugging.
- */
- struct lu_ref cll_reference;
- /**
- * A list of holds on this lock, for debugging.
- */
- struct lu_ref cll_holders;
- /**
- * A reference for cl_lock::cll_descr::cld_obj. For debugging.
- */
- struct lu_ref_link cll_obj_ref;
-#ifdef CONFIG_LOCKDEP
- /* "dep_map" name is assumed by lockdep.h macros. */
- struct lockdep_map dep_map;
-#endif
};
/**
* Per-layer part of cl_lock
*
- * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
+ * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
@@ -1658,174 +1239,36 @@ struct cl_lock_slice {
};
/**
- * Possible (non-error) return values of ->clo_{enqueue,wait,unlock}().
- *
- * NOTE: lov_subresult() depends on ordering here.
- */
-enum cl_lock_transition {
- /** operation cannot be completed immediately. Wait for state change. */
- CLO_WAIT = 1,
- /** operation had to release lock mutex, restart. */
- CLO_REPEAT = 2,
- /** lower layer re-enqueued. */
- CLO_REENQUEUED = 3,
-};
-
-/**
*
* \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
*/
struct cl_lock_operations {
- /**
- * \name statemachine
- *
- * State machine transitions. These 3 methods are called to transfer
- * lock from one state to another, as described in the commentary
- * above enum #cl_lock_state.
- *
- * \retval 0 this layer has nothing more to do to before
- * transition to the target state happens;
- *
- * \retval CLO_REPEAT method had to release and re-acquire cl_lock
- * mutex, repeat invocation of transition method
- * across all layers;
- *
- * \retval CLO_WAIT this layer cannot move to the target state
- * immediately, as it has to wait for certain event
- * (e.g., the communication with the server). It
- * is guaranteed, that when the state transfer
- * becomes possible, cl_lock::cll_wq wait-queue
- * is signaled. Caller can wait for this event by
- * calling cl_lock_state_wait();
- *
- * \retval -ve failure, abort state transition, move the lock
- * into cl_lock_state::CLS_FREEING state, and set
- * cl_lock::cll_error.
- *
- * Once all layers voted to agree to transition (by returning 0), lock
- * is moved into corresponding target state. All state transition
- * methods are optional.
- */
/** @{ */
/**
* Attempts to enqueue the lock. Called top-to-bottom.
*
- * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
+ * \retval 0 this layer has enqueued the lock successfully
+ * \retval >0 this layer has enqueued the lock, but need to wait on
+ * @anchor for resources
+ * \retval -ve failure
+ *
+ * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
* \see osc_lock_enqueue()
*/
int (*clo_enqueue)(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags);
+ struct cl_io *io, struct cl_sync_io *anchor);
/**
- * Attempts to wait for enqueue result. Called top-to-bottom.
- *
- * \see ccc_lock_wait(), lov_lock_wait(), osc_lock_wait()
- */
- int (*clo_wait)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Attempts to unlock the lock. Called bottom-to-top. In addition to
- * usual return values of lock state-machine methods, this can return
- * -ESTALE to indicate that lock cannot be returned to the cache, and
- * has to be re-initialized.
- * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
- */
- int (*clo_unuse)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Notifies layer that cached lock is started being used.
- *
- * \pre lock->cll_state == CLS_CACHED
- *
- * \see lov_lock_use(), osc_lock_use()
- */
- int (*clo_use)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} statemachine */
- /**
- * A method invoked when lock state is changed (as a result of state
- * transition). This is used, for example, to track when the state of
- * a sub-lock changes, to propagate this change to the corresponding
- * top-lock. Optional
- *
- * \see lovsub_lock_state()
- */
- void (*clo_state)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state st);
- /**
- * Returns true, iff given lock is suitable for the given io, idea
- * being, that there are certain "unsafe" locks, e.g., ones acquired
- * for O_APPEND writes, that we don't want to re-use for a normal
- * write, to avoid the danger of cascading evictions. Optional. Runs
- * under cl_object_header::coh_lock_guard.
- *
- * XXX this should take more information about lock needed by
- * io. Probably lock description or something similar.
- *
- * \see lov_fits_into()
- */
- int (*clo_fits_into)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io);
- /**
- * \name ast
- * Asynchronous System Traps. All of then are optional, all are
- * executed bottom-to-top.
- */
- /** @{ */
-
- /**
- * Cancellation callback. Cancel a lock voluntarily, or under
- * the request of server.
+ * Cancel a lock, release its DLM lock ref, while does not cancel the
+ * DLM lock
*/
void (*clo_cancel)(const struct lu_env *env,
const struct cl_lock_slice *slice);
- /**
- * Lock weighting ast. Executed to estimate how precious this lock
- * is. The sum of results across all layers is used to determine
- * whether lock worth keeping in cache given present memory usage.
- *
- * \see osc_lock_weigh(), vvp_lock_weigh(), lovsub_lock_weigh().
- */
- unsigned long (*clo_weigh)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} ast */
-
- /**
- * \see lovsub_lock_closure()
- */
- int (*clo_closure)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_lock_closure *closure);
- /**
- * Executed bottom-to-top when lock description changes (e.g., as a
- * result of server granting more generous lock than was requested).
- *
- * \see lovsub_lock_modify()
- */
- int (*clo_modify)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *updated);
- /**
- * Notifies layers (bottom-to-top) that lock is going to be
- * destroyed. Responsibility of layers is to prevent new references on
- * this lock from being acquired once this method returns.
- *
- * This can be called multiple times due to the races.
- *
- * \see cl_lock_delete()
- * \see osc_lock_delete(), lovsub_lock_delete()
- */
- void (*clo_delete)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
+ /** @} */
/**
* Destructor. Frees resources and the slice.
*
- * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
+ * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
* \see osc_lock_fini()
*/
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
@@ -2016,7 +1459,7 @@ enum cl_io_state {
* This is usually embedded into layer session data, rather than allocated
* dynamically.
*
- * \see vvp_io, lov_io, osc_io, ccc_io
+ * \see vvp_io, lov_io, osc_io
*/
struct cl_io_slice {
struct cl_io *cis_io;
@@ -2031,6 +1474,8 @@ struct cl_io_slice {
struct list_head cis_linkage;
};
+typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
+ struct cl_page *);
/**
* Per-layer io operations.
* \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -2114,7 +1559,7 @@ struct cl_io_operations {
void (*cio_fini)(const struct lu_env *env,
const struct cl_io_slice *slice);
} op[CIT_OP_NR];
- struct {
+
/**
* Submit pages from \a queue->c2_qin for IO, and move
* successfully submitted pages into \a queue->c2_qout. Return
@@ -2127,7 +1572,15 @@ struct cl_io_operations {
const struct cl_io_slice *slice,
enum cl_req_type crt,
struct cl_2queue *queue);
- } req_op[CRT_NR];
+ /**
+ * Queue async page for write.
+ * The difference between cio_submit and cio_queue is that
+ * cio_submit is for urgent request.
+ */
+ int (*cio_commit_async)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
/**
* Read missing page.
*
@@ -2140,31 +1593,6 @@ struct cl_io_operations {
const struct cl_io_slice *slice,
const struct cl_page_slice *page);
/**
- * Prepare write of a \a page. Called bottom-to-top by a top-level
- * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
- * get data from user-level buffer.
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_prepare_write(), lov_io_prepare_write(),
- * osc_io_prepare_write().
- */
- int (*cio_prepare_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_commit_write(), lov_io_commit_write(),
- * osc_io_commit_write().
- */
- int (*cio_commit_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
* Optional debugging helper. Print given io slice.
*/
int (*cio_print)(const struct lu_env *env, void *cookie,
@@ -2216,9 +1644,13 @@ enum cl_enq_flags {
*/
CEF_AGL = 0x00000020,
/**
+ * enqueue a lock to test DLM lock existence.
+ */
+ CEF_PEEK = 0x00000040,
+ /**
* mask of enq_flags.
*/
- CEF_MASK = 0x0000003f,
+ CEF_MASK = 0x0000007f,
};
/**
@@ -2228,12 +1660,12 @@ enum cl_enq_flags {
struct cl_io_lock_link {
/** linkage into one of cl_lockset lists. */
struct list_head cill_linkage;
- struct cl_lock_descr cill_descr;
- struct cl_lock *cill_lock;
+ struct cl_lock cill_lock;
/** optional destructor */
void (*cill_fini)(const struct lu_env *env,
struct cl_io_lock_link *link);
};
+#define cill_descr cill_lock.cll_descr
/**
* Lock-set represents a collection of locks, that io needs at a
@@ -2267,8 +1699,6 @@ struct cl_io_lock_link {
struct cl_lockset {
/** locks to be acquired. */
struct list_head cls_todo;
- /** locks currently being processed. */
- struct list_head cls_curr;
/** locks acquired. */
struct list_head cls_done;
};
@@ -2632,9 +2062,7 @@ struct cl_site {
* and top-locks (and top-pages) are accounted here.
*/
struct cache_stats cs_pages;
- struct cache_stats cs_locks;
atomic_t cs_pages_state[CPS_NR];
- atomic_t cs_locks_state[CLS_NR];
};
int cl_site_init(struct cl_site *s, struct cl_device *top);
@@ -2725,7 +2153,7 @@ static inline void cl_device_fini(struct cl_device *d)
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
+ struct cl_object *obj, pgoff_t index,
const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
@@ -2758,7 +2186,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
struct ost_lvb *lvb);
int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf);
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj);
+int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
/**
@@ -2772,7 +2200,7 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
static inline void cl_object_page_init(struct cl_object *clob, int size)
{
clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
- cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+ cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
}
static inline void *cl_object_page_slice(struct cl_object *clob,
@@ -2781,6 +2209,16 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
return (void *)((char *)page + clob->co_slice_off);
}
+/**
+ * Return refcount of cl_object.
+ */
+static inline int cl_object_refc(struct cl_object *clob)
+{
+ struct lu_object_header *header = clob->co_lu.lo_header;
+
+ return atomic_read(&header->loh_ref);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
@@ -2794,28 +2232,20 @@ enum {
};
/* callback of cl_page_gang_lookup() */
-typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
- struct cl_page *, void *);
-int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index);
struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
enum cl_page_type type);
-struct cl_page *cl_page_find_sub(const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent);
+struct cl_page *cl_page_alloc(const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type);
void cl_page_get(struct cl_page *page);
void cl_page_put(const struct lu_env *env, struct cl_page *page);
void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer,
const struct cl_page *pg);
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg);
-struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page);
struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
-struct cl_page *cl_page_top(struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype);
@@ -2872,12 +2302,10 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
void cl_page_discard(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
-int cl_page_unmap(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
+ struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
int cl_page_size(const struct cl_object *obj);
@@ -2890,138 +2318,66 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
const struct cl_lock_descr *descr);
/* @} helper */
+/**
+ * Data structure managing a client's cached pages. A count of
+ * "unstable" pages is maintained, and an LRU of clean pages is
+ * maintained. "unstable" pages are pages pinned by the ptlrpc
+ * layer for recovery purposes.
+ */
+struct cl_client_cache {
+ /**
+ * # of users (OSCs)
+ */
+ atomic_t ccc_users;
+ /**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
+ * # of LRU entries available
+ */
+ atomic_t ccc_lru_left;
+ /**
+ * List of entities(OSCs) for this LRU cache
+ */
+ struct list_head ccc_lru;
+ /**
+ * Max # of LRU entries
+ */
+ unsigned long ccc_lru_max;
+ /**
+ * Lock to protect ccc_lru list
+ */
+ spinlock_t ccc_lru_lock;
+ /**
+ * # of unstable pages for this mount point
+ */
+ atomic_t ccc_unstable_nr;
+ /**
+ * Waitq for awaiting unstable pages to reach zero.
+ * Used at umounting time and signaled on BRW commit
+ */
+ wait_queue_head_t ccc_unstable_waitq;
+
+};
+
/** @} cl_page */
/** \defgroup cl_lock cl_lock
* @{
*/
-struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
- struct cl_object *obj, pgoff_t index,
- struct cl_lock *except, int pending,
- int canceld);
-static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- struct cl_lock *except,
- int pending, int canceld)
-{
- LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
- return cl_lock_at_pgoff(env, obj, page->cp_index, except,
- pending, canceld);
-}
-
+int cl_lock_request(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock);
+int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_io *io);
+void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct lu_device_type *dtype);
-
-void cl_lock_get(struct cl_lock *lock);
-void cl_lock_get_trust(struct cl_lock *lock);
-void cl_lock_put(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock);
-
-int cl_lock_is_intransit(struct cl_lock *lock);
-
-int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
- int keep_mutex);
-
-/** \name statemachine statemachine
- * Interface to lock state machine consists of 3 parts:
- *
- * - "try" functions that attempt to effect a state transition. If state
- * transition is not possible right now (e.g., if it has to wait for some
- * asynchronous event to occur), these functions return
- * cl_lock_transition::CLO_WAIT.
- *
- * - "non-try" functions that implement synchronous blocking interface on
- * top of non-blocking "try" functions. These functions repeatedly call
- * corresponding "try" versions, and if state transition is not possible
- * immediately, wait for lock state change.
- *
- * - methods from cl_lock_operations, called by "try" functions. Lock can
- * be advanced to the target state only when all layers voted that they
- * are ready for this transition. "Try" functions call methods under lock
- * mutex. If a layer had to release a mutex, it re-acquires it and returns
- * cl_lock_transition::CLO_REPEAT, causing "try" function to call all
- * layers again.
- *
- * TRY NON-TRY METHOD FINAL STATE
- *
- * cl_enqueue_try() cl_enqueue() cl_lock_operations::clo_enqueue() CLS_ENQUEUED
- *
- * cl_wait_try() cl_wait() cl_lock_operations::clo_wait() CLS_HELD
- *
- * cl_unuse_try() cl_unuse() cl_lock_operations::clo_unuse() CLS_CACHED
- *
- * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
- *
- * @{
- */
-
-int cl_wait(const struct lu_env *env, struct cl_lock *lock);
-void cl_unuse(const struct lu_env *env, struct cl_lock *lock);
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags);
-int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock);
-int cl_wait_try(const struct lu_env *env, struct cl_lock *lock);
-int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic);
-
-/** @} statemachine */
-
-void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state);
-int cl_queue_match(const struct list_head *queue,
- const struct cl_lock_descr *need);
-
-void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_is_mutexed(struct cl_lock *lock);
-int cl_lock_nr_mutexed(const struct lu_env *env);
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_ext_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need);
-int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc);
-
-void cl_lock_closure_init(const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait);
-void cl_lock_closure_fini(struct cl_lock_closure *closure);
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-void cl_lock_disclosure(const struct lu_env *env,
- struct cl_lock_closure *closure);
-int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock, struct cl_sync_io *anchor);
void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error);
-void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
-
-unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
/** @} cl_lock */
@@ -3050,15 +2406,14 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
struct cl_lock_descr *descr);
int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue);
int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue,
long timeout);
+int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
int cl_io_is_going(const struct lu_env *env);
/**
@@ -3114,6 +2469,12 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
}
+static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
+{
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
+}
+
/**
* Iterate over pages in a page list.
*/
@@ -3130,9 +2491,14 @@ void cl_page_list_init(struct cl_page_list *plist);
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page);
void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
struct cl_page *page);
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head);
+void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
+ struct cl_page *page);
void cl_page_list_disown(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
+void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
void cl_2queue_init(struct cl_2queue *queue);
void cl_2queue_disown(const struct lu_env *env,
@@ -3177,13 +2543,18 @@ struct cl_sync_io {
atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
+ /** callback to invoke when this IO is finished */
+ void (*csi_end_io)(const struct lu_env *,
+ struct cl_sync_io *);
};
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
+void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
+ void (*end)(const struct lu_env *, struct cl_sync_io *));
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout);
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret);
+void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
/** @} cl_sync_io */
@@ -3241,6 +2612,9 @@ void *cl_env_reenter(void);
void cl_env_reexit(void *cookie);
void cl_env_implant(struct lu_env *env, int *refcheck);
void cl_env_unplant(struct lu_env *env, int *refcheck);
+unsigned int cl_env_cache_purge(unsigned int nr);
+struct lu_env *cl_env_percpu_get(void);
+void cl_env_percpu_put(struct lu_env *env);
/** @} cl_env */
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
deleted file mode 100644
index 5d839a9f7..000000000
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Definitions shared between vvp and liblustre, and other clients in the
- * future.
- *
- * Author: Oleg Drokin <oleg.drokin@sun.com>
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#ifndef LCLIENT_H
-#define LCLIENT_H
-
-blkcnt_t dirty_cnt(struct inode *inode);
-
-int cl_glimpse_size0(struct inode *inode, int agl);
-int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
- struct inode *inode, struct cl_object *clob, int agl);
-
-static inline int cl_glimpse_size(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 0);
-}
-
-static inline int cl_agl(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 1);
-}
-
-/**
- * Locking policy for setattr.
- */
-enum ccc_setattr_lock_type {
- /** Locking is done by server */
- SETATTR_NOLOCK,
- /** Extent lock is enqueued */
- SETATTR_EXTENT_LOCK,
- /** Existing local extent lock is used */
- SETATTR_MATCH_LOCK
-};
-
-/**
- * IO state private to vvp or slp layers.
- */
-struct ccc_io {
- /** super class */
- struct cl_io_slice cui_cl;
- struct cl_io_lock_link cui_link;
- /**
- * I/O vector information to or from which read/write is going.
- */
- struct iov_iter *cui_iter;
- /**
- * Total size for the left IO.
- */
- size_t cui_tot_count;
-
- union {
- struct {
- enum ccc_setattr_lock_type cui_local_lock;
- } setattr;
- } u;
- /**
- * True iff io is processing glimpse right now.
- */
- int cui_glimpse;
- /**
- * Layout version when this IO is initialized
- */
- __u32 cui_layout_gen;
- /**
- * File descriptor against which IO is done.
- */
- struct ll_file_data *cui_fd;
- struct kiocb *cui_iocb;
-};
-
-/**
- * True, if \a io is a normal io, False for splice_{read,write}.
- * must be implemented in arch specific code.
- */
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
-
-extern struct lu_context_key ccc_key;
-extern struct lu_context_key ccc_session_key;
-
-struct ccc_thread_info {
- struct cl_lock_descr cti_descr;
- struct cl_io cti_io;
- struct cl_attr cti_attr;
-};
-
-static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
-{
- struct ccc_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &ccc_key);
- LASSERT(info);
- return info;
-}
-
-static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
-{
- struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
-
- memset(attr, 0, sizeof(*attr));
- return attr;
-}
-
-static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
-{
- struct cl_io *io = &ccc_env_info(env)->cti_io;
-
- memset(io, 0, sizeof(*io));
- return io;
-}
-
-struct ccc_session {
- struct ccc_io cs_ios;
-};
-
-static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
-{
- struct ccc_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &ccc_session_key);
- LASSERT(ses);
- return ses;
-}
-
-static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
-{
- return &ccc_env_session(env)->cs_ios;
-}
-
-/**
- * ccc-private object state.
- */
-struct ccc_object {
- struct cl_object_header cob_header;
- struct cl_object cob_cl;
- struct inode *cob_inode;
-
- /**
- * A list of dirty pages pending IO in the cache. Used by
- * SOM. Protected by ll_inode_info::lli_lock.
- *
- * \see ccc_page::cpg_pending_linkage
- */
- struct list_head cob_pending_list;
-
- /**
- * Access this counter is protected by inode->i_sem. Now that
- * the lifetime of transient pages must be covered by inode sem,
- * we don't need to hold any lock..
- */
- int cob_transient_pages;
- /**
- * Number of outstanding mmaps on this file.
- *
- * \see ll_vm_open(), ll_vm_close().
- */
- atomic_t cob_mmap_cnt;
-
- /**
- * various flags
- * cob_discard_page_warned
- * if pages belonging to this object are discarded when a client
- * is evicted, some debug info will be printed, this flag will be set
- * during processing the first discarded page, then avoid flooding
- * debug message for lots of discarded pages.
- *
- * \see ll_dirty_page_discard_warn.
- */
- unsigned int cob_discard_page_warned:1;
-};
-
-/**
- * ccc-private page state.
- */
-struct ccc_page {
- struct cl_page_slice cpg_cl;
- int cpg_defer_uptodate;
- int cpg_ra_used;
- int cpg_write_queued;
- /**
- * Non-empty iff this page is already counted in
- * ccc_object::cob_pending_list. Protected by
- * ccc_object::cob_pending_guard. This list is only used as a flag,
- * that is, never iterated through, only checked for list_empty(), but
- * having a list is useful for debugging.
- */
- struct list_head cpg_pending_linkage;
- /** VM page */
- struct page *cpg_page;
-};
-
-static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
-{
- return container_of(slice, struct ccc_page, cpg_cl);
-}
-
-struct ccc_device {
- struct cl_device cdv_cl;
- struct super_block *cdv_sb;
- struct cl_device *cdv_next;
-};
-
-struct ccc_lock {
- struct cl_lock_slice clk_cl;
-};
-
-struct ccc_req {
- struct cl_req_slice crq_cl;
-};
-
-void *ccc_key_init (const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_key_fini (const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-
-int ccc_device_init (const struct lu_env *env,
- struct lu_device *d,
- const char *name, struct lu_device *next);
-struct lu_device *ccc_device_fini (const struct lu_env *env,
- struct lu_device *d);
-struct lu_device *ccc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg,
- const struct lu_device_operations *luops,
- const struct cl_device_operations *clops);
-struct lu_device *ccc_device_free (const struct lu_env *env,
- struct lu_device *d);
-struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops);
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-void ccc_umount(const struct lu_env *env, struct cl_device *dev);
-int ccc_global_init(struct lu_device_type *device_type);
-void ccc_global_fini(struct lu_device_type *device_type);
-int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
- const struct cl_object_conf *conf);
-int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
-int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io,
- const struct cl_lock_operations *lkops);
-int ccc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb);
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice);
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
-int ccc_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-void ccc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice);
-void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
-int ccc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags);
-int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io);
-void ccc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state);
-
-int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end);
-int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end);
-void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
-void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
- size_t nob);
-void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
- struct cl_io *io);
-int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count, int *exceed);
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret);
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *oa, u64 flags);
-
-struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
-struct lu_object *ccc2lu (struct ccc_object *vob);
-struct ccc_device *lu2ccc_dev (const struct lu_device *d);
-struct ccc_device *cl2ccc_dev (const struct cl_device *d);
-struct ccc_object *lu2ccc (const struct lu_object *obj);
-struct ccc_object *cl2ccc (const struct cl_object *obj);
-struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
-struct ccc_io *cl2ccc_io (const struct lu_env *env,
- const struct cl_io_slice *slice);
-struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
-struct page *cl2vm_page (const struct cl_page_slice *slice);
-struct inode *ccc_object_inode(const struct cl_object *obj);
-struct ccc_object *cl_inode2ccc (struct inode *inode);
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
-
-int ccc_object_invariant(const struct cl_object *obj);
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
-void cl_inode_fini(struct inode *inode);
-int cl_local_size(struct inode *inode);
-
-__u16 ll_dirent_type_get(struct lu_dirent *ent);
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
-__u32 cl_fid_build_gen(const struct lu_fid *fid);
-
-# define CLOBINVRNT(env, clob, expr) \
- ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
-
-int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
-int cl_ocd_update(struct obd_device *host,
- struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data);
-
-struct ccc_grouplock {
- struct lu_env *cg_env;
- struct cl_io *cg_io;
- struct cl_lock *cg_lock;
- unsigned long cg_gid;
-};
-
-int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ccc_grouplock *cg);
-void cl_put_grouplock(struct ccc_grouplock *cg);
-
-/**
- * New interfaces to get and put lov_stripe_md from lov layer. This violates
- * layering because lov_stripe_md is supposed to be a private data in lov.
- *
- * NB: If you find you have to use these interfaces for your new code, please
- * think about it again. These interfaces may be removed in the future for
- * better layering.
- */
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
-void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
-int lov_read_and_clear_async_rc(struct cl_object *clob);
-
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
-void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
-
-/**
- * Data structure managing a client's cached clean pages. An LRU of
- * pages is maintained, along with other statistics.
- */
-struct cl_client_cache {
- atomic_t ccc_users; /* # of users (OSCs) of this data */
- struct list_head ccc_lru; /* LRU list of cached clean pages */
- spinlock_t ccc_lru_lock; /* lock for list */
- atomic_t ccc_lru_left; /* # of LRU entries available */
- unsigned long ccc_lru_max; /* Max # of LRU entries possible */
- unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
-};
-
-#endif /*LCLIENT_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
deleted file mode 100644
index 3907bf4ce..000000000
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LINUX_OBD_H
-#define __LINUX_OBD_H
-
-#ifndef __OBD_H
-#error Do not #include this file directly. #include <obd.h> instead
-#endif
-
-#include "../obd_support.h"
-
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/sched.h> /* for struct task_struct, for current.h */
-#include <linux/mount.h>
-
-#include "../lustre_intent.h"
-
-struct ll_iattr {
- struct iattr iattr;
- unsigned int ia_attr_flags;
-};
-
-#define CLIENT_OBD_LIST_LOCK_DEBUG 1
-
-struct client_obd_lock {
- spinlock_t lock;
-
- unsigned long time;
- struct task_struct *task;
- const char *func;
- int line;
-};
-
-static inline void __client_obd_list_lock(struct client_obd_lock *lock,
- const char *func, int line)
-{
- unsigned long cur = jiffies;
-
- while (1) {
- if (spin_trylock(&lock->lock)) {
- LASSERT(!lock->task);
- lock->task = current;
- lock->func = func;
- lock->line = line;
- lock->time = jiffies;
- break;
- }
-
- if (time_before(cur + 5 * HZ, jiffies) &&
- time_before(lock->time + 5 * HZ, jiffies)) {
- struct task_struct *task = lock->task;
-
- if (!task)
- continue;
-
- LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
- current->comm, current->pid,
- lock, task->comm, task->pid,
- lock->func, lock->line,
- (jiffies - lock->time) / HZ);
- LCONSOLE_WARN("====== for current process =====\n");
- dump_stack();
- LCONSOLE_WARN("====== end =======\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1000 * HZ);
- }
- cpu_relax();
- }
-}
-
-#define client_obd_list_lock(lock) \
- __client_obd_list_lock(lock, __func__, __LINE__)
-
-static inline void client_obd_list_unlock(struct client_obd_lock *lock)
-{
- LASSERT(lock->task);
- lock->task = NULL;
- lock->time = jiffies;
- spin_unlock(&lock->lock);
-}
-
-static inline void client_obd_list_lock_init(struct client_obd_lock *lock)
-{
- spin_lock_init(&lock->lock);
-}
-
-static inline void client_obd_list_lock_done(struct client_obd_lock *lock)
-{}
-
-#endif /* __LINUX_OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 242bb1ef6..281651218 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -198,7 +198,6 @@ typedef int (*lu_printer_t)(const struct lu_env *env,
* Operations specific for particular lu_object.
*/
struct lu_object_operations {
-
/**
* Allocate lower-layer parts of the object by calling
* lu_device_operations::ldo_object_alloc() of the corresponding
@@ -656,21 +655,21 @@ static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
* @{
*/
-int lu_site_init (struct lu_site *s, struct lu_device *d);
-void lu_site_fini (struct lu_site *s);
-int lu_site_init_finish (struct lu_site *s);
-void lu_stack_fini (const struct lu_env *env, struct lu_device *top);
-void lu_device_get (struct lu_device *d);
-void lu_device_put (struct lu_device *d);
-int lu_device_init (struct lu_device *d, struct lu_device_type *t);
-void lu_device_fini (struct lu_device *d);
-int lu_object_header_init(struct lu_object_header *h);
+int lu_site_init(struct lu_site *s, struct lu_device *d);
+void lu_site_fini(struct lu_site *s);
+int lu_site_init_finish(struct lu_site *s);
+void lu_stack_fini(const struct lu_env *env, struct lu_device *top);
+void lu_device_get(struct lu_device *d);
+void lu_device_put(struct lu_device *d);
+int lu_device_init(struct lu_device *d, struct lu_device_type *t);
+void lu_device_fini(struct lu_device *d);
+int lu_object_header_init(struct lu_object_header *h);
void lu_object_header_fini(struct lu_object_header *h);
-int lu_object_init (struct lu_object *o,
- struct lu_object_header *h, struct lu_device *d);
-void lu_object_fini (struct lu_object *o);
-void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
-void lu_object_add (struct lu_object *before, struct lu_object *o);
+int lu_object_init(struct lu_object *o,
+ struct lu_object_header *h, struct lu_device *d);
+void lu_object_fini(struct lu_object *o);
+void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
+void lu_object_add(struct lu_object *before, struct lu_object *o);
/**
* Helpers to initialize and finalize device types.
@@ -781,9 +780,8 @@ int lu_cdebug_printer(const struct lu_env *env,
*/
#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -794,9 +792,8 @@ do { \
*/
#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
(object)->lo_header); \
lu_cdebug_printer(env, &msgdata, "\n"); \
@@ -1007,6 +1004,10 @@ enum lu_context_tag {
*/
LCT_LOCAL = 1 << 7,
/**
+ * session for server thread
+ **/
+ LCT_SERVER_SESSION = BIT(8),
+ /**
* Set when at least one of keys, having values in this context has
* non-NULL lu_context_key::lct_exit() method. This is used to
* optimize lu_context_exit() call.
@@ -1118,7 +1119,7 @@ struct lu_context_key {
{ \
type *value; \
\
- CLASSERT(PAGE_SIZE >= sizeof (*value)); \
+ CLASSERT(PAGE_SIZE >= sizeof(*value)); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \
@@ -1154,12 +1155,12 @@ do { \
(key)->lct_owner = THIS_MODULE; \
} while (0)
-int lu_context_key_register(struct lu_context_key *key);
-void lu_context_key_degister(struct lu_context_key *key);
-void *lu_context_key_get (const struct lu_context *ctx,
- const struct lu_context_key *key);
-void lu_context_key_quiesce (struct lu_context_key *key);
-void lu_context_key_revive (struct lu_context_key *key);
+int lu_context_key_register(struct lu_context_key *key);
+void lu_context_key_degister(struct lu_context_key *key);
+void *lu_context_key_get(const struct lu_context *ctx,
+ const struct lu_context_key *key);
+void lu_context_key_quiesce(struct lu_context_key *key);
+void lu_context_key_revive(struct lu_context_key *key);
/*
* LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
@@ -1216,21 +1217,21 @@ void lu_context_key_revive (struct lu_context_key *key);
LU_TYPE_START(mod, __VA_ARGS__); \
LU_TYPE_STOP(mod, __VA_ARGS__)
-int lu_context_init (struct lu_context *ctx, __u32 tags);
-void lu_context_fini (struct lu_context *ctx);
-void lu_context_enter (struct lu_context *ctx);
-void lu_context_exit (struct lu_context *ctx);
-int lu_context_refill(struct lu_context *ctx);
+int lu_context_init(struct lu_context *ctx, __u32 tags);
+void lu_context_fini(struct lu_context *ctx);
+void lu_context_enter(struct lu_context *ctx);
+void lu_context_exit(struct lu_context *ctx);
+int lu_context_refill(struct lu_context *ctx);
/*
* Helper functions to operate on multiple keys. These are used by the default
* device type operations, defined by LU_TYPE_INIT_FINI().
*/
-int lu_context_key_register_many(struct lu_context_key *k, ...);
+int lu_context_key_register_many(struct lu_context_key *k, ...);
void lu_context_key_degister_many(struct lu_context_key *k, ...);
-void lu_context_key_revive_many (struct lu_context_key *k, ...);
-void lu_context_key_quiesce_many (struct lu_context_key *k, ...);
+void lu_context_key_revive_many(struct lu_context_key *k, ...);
+void lu_context_key_quiesce_many(struct lu_context_key *k, ...);
/**
* Environment.
@@ -1246,9 +1247,9 @@ struct lu_env {
struct lu_context *le_ses;
};
-int lu_env_init (struct lu_env *env, __u32 tags);
-void lu_env_fini (struct lu_env *env);
-int lu_env_refill(struct lu_env *env);
+int lu_env_init(struct lu_env *env, __u32 tags);
+void lu_env_fini(struct lu_env *env);
+int lu_env_refill(struct lu_env *env);
/** @} lu_context */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 5aae1d06a..9c53c1792 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -183,6 +183,12 @@ struct lu_seq_range {
__u32 lsr_flags;
};
+struct lu_seq_range_array {
+ __u32 lsra_count;
+ __u32 lsra_padding;
+ struct lu_seq_range lsra_lsr[0];
+};
+
#define LU_SEQ_RANGE_MDT 0x0
#define LU_SEQ_RANGE_OST 0x1
#define LU_SEQ_RANGE_ANY 0x3
@@ -578,7 +584,7 @@ static inline __u64 ostid_seq(const struct ost_id *ostid)
if (fid_seq_is_mdt0(ostid->oi.oi_seq))
return FID_SEQ_OST_MDT0;
- if (fid_seq_is_default(ostid->oi.oi_seq))
+ if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
return FID_SEQ_LOV_DEFAULT;
if (fid_is_idif(&ostid->oi_fid))
@@ -590,9 +596,12 @@ static inline __u64 ostid_seq(const struct ost_id *ostid)
/* extract OST objid from a wire ost_id (id/seq) pair */
static inline __u64 ostid_id(const struct ost_id *ostid)
{
- if (fid_seq_is_mdt0(ostid_seq(ostid)))
+ if (fid_seq_is_mdt0(ostid->oi.oi_seq))
return ostid->oi.oi_id & IDIF_OID_MASK;
+ if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
+ return ostid->oi.oi_id;
+
if (fid_is_idif(&ostid->oi_fid))
return fid_idif_id(fid_seq(&ostid->oi_fid),
fid_oid(&ostid->oi_fid), 0);
@@ -636,12 +645,22 @@ static inline void ostid_set_seq_llog(struct ost_id *oi)
*/
static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
{
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
if (oid >= IDIF_MAX_OID) {
CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
return;
}
oi->oi.oi_id = oid;
+ } else if (fid_is_idif(&oi->oi_fid)) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Bad %llu to set "DOSTID"\n",
+ oid, POSTID(oi));
+ return;
+ }
+ oi->oi_fid.f_seq = fid_idif_seq(oid,
+ fid_idif_ost_idx(&oi->oi_fid));
+ oi->oi_fid.f_oid = oid;
+ oi->oi_fid.f_ver = oid >> 48;
} else {
if (oid > OBIF_MAX_OID) {
CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
@@ -651,25 +670,31 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
}
}
-static inline void ostid_inc_id(struct ost_id *oi)
+static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
{
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
- CERROR("Bad inc "DOSTID"\n", POSTID(oi));
- return;
+ if (unlikely(fid_seq_is_igif(fid->f_seq))) {
+ CERROR("bad IGIF, "DFID"\n", PFID(fid));
+ return -EBADF;
+ }
+
+ if (fid_is_idif(fid)) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set IDIF "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
}
- oi->oi.oi_id++;
+ fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
+ fid->f_oid = oid;
+ fid->f_ver = oid >> 48;
} else {
- oi->oi_fid.f_oid++;
+ if (oid > OBIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set REG "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
+ }
+ fid->f_oid = oid;
}
-}
-
-static inline void ostid_dec_id(struct ost_id *oi)
-{
- if (fid_seq_is_mdt0(ostid_seq(oi)))
- oi->oi.oi_id--;
- else
- oi->oi_fid.f_oid--;
+ return 0;
}
/**
@@ -684,30 +709,34 @@ static inline void ostid_dec_id(struct ost_id *oi)
static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
__u32 ost_idx)
{
+ __u64 seq = ostid_seq(ostid);
+
if (ost_idx > 0xffff) {
CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
ost_idx);
return -EBADF;
}
- if (fid_seq_is_mdt0(ostid_seq(ostid))) {
+ if (fid_seq_is_mdt0(seq)) {
+ __u64 oid = ostid_id(ostid);
+
/* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
* that we map into the IDIF namespace. It allows up to 2^48
* objects per OST, as this is the object namespace that has
* been in production for years. This can handle create rates
* of 1M objects/s/OST for 9 years, or combinations thereof.
*/
- if (ostid_id(ostid) >= IDIF_MAX_OID) {
+ if (oid >= IDIF_MAX_OID) {
CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
POSTID(ostid), ost_idx);
return -EBADF;
}
- fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
+ fid->f_seq = fid_idif_seq(oid, ost_idx);
/* truncate to 32 bits by assignment */
- fid->f_oid = ostid_id(ostid);
+ fid->f_oid = oid;
/* in theory, not currently used */
- fid->f_ver = ostid_id(ostid) >> 48;
- } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
+ fid->f_ver = oid >> 48;
+ } else if (likely(!fid_seq_is_default(seq))) {
/* This is either an IDIF object, which identifies objects across
* all OSTs, or a regular FID. The IDIF namespace maps legacy
* OST objects into the FID namespace. In both cases, we just
@@ -1001,8 +1030,9 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr)
size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
size += sizeof(struct luda_type);
- } else
+ } else {
size = sizeof(struct lu_dirent) + namelen;
+ }
return (size + 7) & ~7;
}
@@ -1256,6 +1286,9 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
+#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
+ * name in request
+ */
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
@@ -1428,6 +1461,8 @@ enum obdo_flags {
*/
OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
+ OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
+ OBD_FL_SHORT_IO = 0x00400000, /* short io request */
/* Note that while these checksum values are currently separate bits,
* in 2.x we can actually allow all values from 1-31 if we wanted.
@@ -1525,6 +1560,11 @@ static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
oi->oi.oi_seq = seq;
}
+static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
+{
+ oi->oi.oi_id = oid;
+}
+
static inline __u64 lmm_oi_id(struct ost_id *oi)
{
return oi->oi.oi_id;
@@ -1732,6 +1772,11 @@ void lustre_swab_obd_statfs(struct obd_statfs *os);
#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
+#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
+ * that the client is running low on
+ * space for unstable pages; asking
+ * it to sync quickly
+ */
#define OBD_OBJECT_EOF 0xffffffffffffffffULL
@@ -2436,6 +2481,7 @@ struct mdt_rec_reint {
void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+/* lmv structures */
struct lmv_desc {
__u32 ld_tgt_count; /* how many MDS's */
__u32 ld_active_tgt_count; /* how many active */
@@ -2460,7 +2506,6 @@ struct lmv_stripe_md {
struct lu_fid mea_ids[0];
};
-/* lmv structures */
#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
#define MEA_MAGIC_ALL_CHARS 0xb222a11c
#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
@@ -2470,9 +2515,10 @@ struct lmv_stripe_md {
#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
enum fld_rpc_opc {
- FLD_QUERY = 900,
+ FLD_QUERY = 900,
+ FLD_READ = 901,
FLD_LAST_OPC,
- FLD_FIRST_OPC = FLD_QUERY
+ FLD_FIRST_OPC = FLD_QUERY
};
enum seq_rpc_opc {
@@ -2486,6 +2532,12 @@ enum seq_op {
SEQ_ALLOC_META = 1
};
+enum fld_op {
+ FLD_CREATE = 0,
+ FLD_DELETE = 1,
+ FLD_LOOKUP = 2,
+};
+
/*
* LOV data structures
*/
@@ -2582,6 +2634,8 @@ struct ldlm_extent {
__u64 gid;
};
+#define LDLM_GID_ANY ((__u64)-1)
+
static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
struct ldlm_extent *ex2)
{
@@ -3304,7 +3358,7 @@ struct getinfo_fid2path {
char gf_path[0];
} __packed;
-void lustre_swab_fid2path (struct getinfo_fid2path *gf);
+void lustre_swab_fid2path(struct getinfo_fid2path *gf);
enum {
LAYOUT_INTENT_ACCESS = 0,
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 276906e64..59ba48ac3 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -193,37 +193,37 @@ struct ost_id {
* *INFO - set/get lov_user_mds_data
*/
/* see <lustre_lib.h> for ioctl numberss 101-150 */
-#define LL_IOC_GETFLAGS _IOR ('f', 151, long)
-#define LL_IOC_SETFLAGS _IOW ('f', 152, long)
-#define LL_IOC_CLRFLAGS _IOW ('f', 153, long)
+#define LL_IOC_GETFLAGS _IOR('f', 151, long)
+#define LL_IOC_SETFLAGS _IOW('f', 152, long)
+#define LL_IOC_CLRFLAGS _IOW('f', 153, long)
/* LL_IOC_LOV_SETSTRIPE: See also OBD_IOC_LOV_SETSTRIPE */
-#define LL_IOC_LOV_SETSTRIPE _IOW ('f', 154, long)
+#define LL_IOC_LOV_SETSTRIPE _IOW('f', 154, long)
/* LL_IOC_LOV_GETSTRIPE: See also OBD_IOC_LOV_GETSTRIPE */
-#define LL_IOC_LOV_GETSTRIPE _IOW ('f', 155, long)
+#define LL_IOC_LOV_GETSTRIPE _IOW('f', 155, long)
/* LL_IOC_LOV_SETEA: See also OBD_IOC_LOV_SETEA */
-#define LL_IOC_LOV_SETEA _IOW ('f', 156, long)
-#define LL_IOC_RECREATE_OBJ _IOW ('f', 157, long)
-#define LL_IOC_RECREATE_FID _IOW ('f', 157, struct lu_fid)
-#define LL_IOC_GROUP_LOCK _IOW ('f', 158, long)
-#define LL_IOC_GROUP_UNLOCK _IOW ('f', 159, long)
+#define LL_IOC_LOV_SETEA _IOW('f', 156, long)
+#define LL_IOC_RECREATE_OBJ _IOW('f', 157, long)
+#define LL_IOC_RECREATE_FID _IOW('f', 157, struct lu_fid)
+#define LL_IOC_GROUP_LOCK _IOW('f', 158, long)
+#define LL_IOC_GROUP_UNLOCK _IOW('f', 159, long)
/* LL_IOC_QUOTACHECK: See also OBD_IOC_QUOTACHECK */
-#define LL_IOC_QUOTACHECK _IOW ('f', 160, int)
+#define LL_IOC_QUOTACHECK _IOW('f', 160, int)
/* LL_IOC_POLL_QUOTACHECK: See also OBD_IOC_POLL_QUOTACHECK */
-#define LL_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
+#define LL_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
/* LL_IOC_QUOTACTL: See also OBD_IOC_QUOTACTL */
#define LL_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
-#define LL_IOC_FLUSHCTX _IOW ('f', 166, long)
-#define LL_IOC_RMTACL _IOW ('f', 167, long)
-#define LL_IOC_GETOBDCOUNT _IOR ('f', 168, long)
+#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
+#define LL_IOC_RMTACL _IOW('f', 167, long)
+#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid)
#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long)
-#define LL_IOC_PATH2FID _IOR ('f', 173, long)
+#define LL_IOC_PATH2FID _IOR('f', 173, long)
#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *)
-#define LL_IOC_GET_MDTIDX _IOR ('f', 175, int)
+#define LL_IOC_GET_MDTIDX _IOR('f', 175, int)
/* see <lustre_lib.h> for ioctl numbers 177-210 */
@@ -676,7 +676,12 @@ static inline const char *changelog_type2str(int type)
#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */
/* HSM cleaning needed */
/* Flags for rename */
-#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of target */
+#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of
+ * target
+ */
+#define CLF_RENAME_LAST_EXISTS 0x0002 /* rename unlink last hardlink of target
+ * has an archive in backend
+ */
/* Flags for HSM */
/* 12b used (from high weight to low weight):
@@ -833,9 +838,8 @@ struct ioc_data_version {
__u64 idv_flags; /* See LL_DV_xxx */
};
-#define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling
- * version. Dirty caches are left unchanged.
- */
+#define LL_DV_RD_FLUSH BIT(0) /* Flush dirty pages from clients */
+#define LL_DV_WR_FLUSH BIT(1) /* Flush all caching pages from clients */
#ifndef offsetof
# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
@@ -1095,12 +1099,12 @@ struct hsm_action_list {
__u32 padding1;
char hal_fsname[0]; /* null-terminated */
/* struct hsm_action_item[hal_count] follows, aligned on 8-byte
- * boundaries. See hai_zero
+ * boundaries. See hai_first
*/
} __packed;
#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round (int val)
+static inline int cfs_size_round(int val)
{
return (val + 7) & (~0x7);
}
@@ -1109,7 +1113,7 @@ static inline int cfs_size_round (int val)
#endif
/* Return pointer to first hai in action list */
-static inline struct hsm_action_item *hai_zero(struct hsm_action_list *hal)
+static inline struct hsm_action_item *hai_first(struct hsm_action_list *hal)
{
return (struct hsm_action_item *)(hal->hal_fsname +
cfs_size_round(strlen(hal-> \
@@ -1131,7 +1135,7 @@ static inline int hal_size(struct hsm_action_list *hal)
struct hsm_action_item *hai;
sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1);
- hai = hai_zero(hal);
+ hai = hai_first(hal);
for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
sz += cfs_size_round(hai->hai_len);
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index bb16ae980..e229e91f7 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -161,7 +161,7 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
int offset;
int bufcount;
- LASSERT (index >= 0);
+ LASSERT(index >= 0);
bufcount = lcfg->lcfg_bufcount;
if (index >= bufcount)
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 95fd36063..b36821ffb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -130,7 +130,6 @@ struct lustre_sb_info {
struct lustre_mount_data *lsi_lmd; /* mount command info */
struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */
struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/
- struct vfsmount *lsi_srv_mnt; /* the one server mount */
atomic_t lsi_mounts; /* references to the srv_mnt */
char lsi_svname[MTI_NAME_MAXLEN];
char lsi_osd_obdname[64];
@@ -158,7 +157,6 @@ struct lustre_sb_info {
struct lustre_mount_info {
char *lmi_name;
struct super_block *lmi_sb;
- struct vfsmount *lmi_mnt;
struct list_head lmi_list_chain;
};
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 8b0364f71..9cade144f 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -71,6 +71,7 @@ struct obd_device;
*/
enum ldlm_error {
ELDLM_OK = 0,
+ ELDLM_LOCK_MATCHED = 1,
ELDLM_LOCK_CHANGED = 300,
ELDLM_LOCK_ABORTED = 301,
@@ -269,7 +270,7 @@ struct ldlm_pool {
struct completion pl_kobj_unregister;
};
-typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
+typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
/**
* LVB operations.
@@ -446,8 +447,11 @@ struct ldlm_namespace {
/** Limit of parallel AST RPC count. */
unsigned ns_max_parallel_ast;
- /** Callback to cancel locks before replaying it during recovery. */
- ldlm_cancel_for_recovery ns_cancel_for_recovery;
+ /**
+ * Callback to check if a lock is good to be canceled by ELC or
+ * during recovery.
+ */
+ ldlm_cancel_cbt ns_cancel;
/** LDLM lock stats */
struct lprocfs_stats *ns_stats;
@@ -479,9 +483,9 @@ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
}
static inline void ns_register_cancel(struct ldlm_namespace *ns,
- ldlm_cancel_for_recovery arg)
+ ldlm_cancel_cbt arg)
{
- ns->ns_cancel_for_recovery = arg;
+ ns->ns_cancel = arg;
}
struct ldlm_lock;
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 7f2ba2ffe..e7e0c21a9 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -37,17 +37,11 @@
/** l_flags bits marked as "gone" bits */
#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
-/** l_flags bits marked as "hide_lock" bits */
-#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
-
/** l_flags bits marked as "inherit" bits */
#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
-/** l_flags bits marked as "local_only" bits */
-#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
-
-/** l_flags bits marked as "on_wire" bits */
-#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL
+/** l_flags bits marked as "off_wire" bits */
+#define LDLM_FL_OFF_WIRE_MASK 0x00FFFFFF00000000ULL
/** extent, mode, or resource changed */
#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */
@@ -204,7 +198,7 @@
#define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36)
#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
-/** whatever it might mean */
+/** whatever it might mean -- never transmitted? */
#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL /* bit 37 */
#define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37)
#define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37)
@@ -287,18 +281,18 @@
* has canceled this lock and is waiting for rpc_lock which is taken by
* the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
* the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
- *
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
- * dropped to let ldlm_callback_handler() return EINVAL to the server. It
- * is used when ELC RPC is already prepared and is waiting for rpc_lock,
- * too late to send a separate CANCEL RPC.
*/
#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */
#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46)
#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46)
#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
-/** whatever it might mean */
+/**
+ * Set by ldlm_cancel_callback() when lock cache is dropped to let
+ * ldlm_callback_handler() return EINVAL to the server. It is used when
+ * ELC RPC is already prepared and is waiting for rpc_lock, too late to
+ * send a separate CANCEL RPC.
+ */
#define LDLM_FL_BL_DONE 0x0000800000000000ULL /* bit 47 */
#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47)
#define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47)
@@ -381,104 +375,16 @@
/** test for ldlm_lock flag bit set */
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+/** multi-bit test: are any of mask bits set? */
+#define LDLM_HAVE_MASK(_l, _m) ((_l)->l_flags & LDLM_FL_##_m##_MASK)
+
/** set a ldlm_lock flag bit */
#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
/** clear a ldlm_lock flag bit */
#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
-/** Mask of flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
-
-/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
-#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
-
/** @} subgroup */
/** @} group */
-#ifdef WIRESHARK_COMPILE
-static int hf_lustre_ldlm_fl_lock_changed = -1;
-static int hf_lustre_ldlm_fl_block_granted = -1;
-static int hf_lustre_ldlm_fl_block_conv = -1;
-static int hf_lustre_ldlm_fl_block_wait = -1;
-static int hf_lustre_ldlm_fl_ast_sent = -1;
-static int hf_lustre_ldlm_fl_replay = -1;
-static int hf_lustre_ldlm_fl_intent_only = -1;
-static int hf_lustre_ldlm_fl_has_intent = -1;
-static int hf_lustre_ldlm_fl_flock_deadlock = -1;
-static int hf_lustre_ldlm_fl_discard_data = -1;
-static int hf_lustre_ldlm_fl_no_timeout = -1;
-static int hf_lustre_ldlm_fl_block_nowait = -1;
-static int hf_lustre_ldlm_fl_test_lock = -1;
-static int hf_lustre_ldlm_fl_cancel_on_block = -1;
-static int hf_lustre_ldlm_fl_deny_on_contention = -1;
-static int hf_lustre_ldlm_fl_ast_discard_data = -1;
-static int hf_lustre_ldlm_fl_fail_loc = -1;
-static int hf_lustre_ldlm_fl_skipped = -1;
-static int hf_lustre_ldlm_fl_cbpending = -1;
-static int hf_lustre_ldlm_fl_wait_noreproc = -1;
-static int hf_lustre_ldlm_fl_cancel = -1;
-static int hf_lustre_ldlm_fl_local_only = -1;
-static int hf_lustre_ldlm_fl_failed = -1;
-static int hf_lustre_ldlm_fl_canceling = -1;
-static int hf_lustre_ldlm_fl_local = -1;
-static int hf_lustre_ldlm_fl_lvb_ready = -1;
-static int hf_lustre_ldlm_fl_kms_ignore = -1;
-static int hf_lustre_ldlm_fl_cp_reqd = -1;
-static int hf_lustre_ldlm_fl_cleaned = -1;
-static int hf_lustre_ldlm_fl_atomic_cb = -1;
-static int hf_lustre_ldlm_fl_bl_ast = -1;
-static int hf_lustre_ldlm_fl_bl_done = -1;
-static int hf_lustre_ldlm_fl_no_lru = -1;
-static int hf_lustre_ldlm_fl_fail_notified = -1;
-static int hf_lustre_ldlm_fl_destroyed = -1;
-static int hf_lustre_ldlm_fl_server_lock = -1;
-static int hf_lustre_ldlm_fl_res_locked = -1;
-static int hf_lustre_ldlm_fl_waited = -1;
-static int hf_lustre_ldlm_fl_ns_srv = -1;
-static int hf_lustre_ldlm_fl_excl = -1;
-
-const value_string lustre_ldlm_flags_vals[] = {
- {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
- {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
- {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
- {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
- {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
- {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
- {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
- {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
- {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
- {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
- {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
- {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
- {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
- {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
- {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
- {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
- {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
- {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
- {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
- {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
- {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
- {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
- {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
- {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
- {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
- {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
- {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
- {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
- {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
- {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
- {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
- {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
- {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
- {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
- {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
- {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
- {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
- {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
- {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
- {LDLM_FL_EXCL, "LDLM_FL_EXCL"},
- { 0, NULL }
-};
-#endif /* WIRESHARK_COMPILE */
+
#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index ab4a92390..12e8b585c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -308,10 +308,10 @@ static inline int fid_seq_in_fldb(__u64 seq)
fid_seq_is_root(seq) || fid_seq_is_dot(seq);
}
-static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq)
+static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
{
if (fid_seq_is_mdt0(seq)) {
- fid->f_seq = fid_idif_seq(0, 0);
+ fid->f_seq = fid_idif_seq(0, ost_idx);
} else {
LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
fid_seq_is_idif(seq), "%#llx\n", seq);
@@ -498,19 +498,6 @@ static inline void ostid_build_res_name(struct ost_id *oi,
}
}
-static inline void ostid_res_name_to_id(struct ost_id *oi,
- struct ldlm_res_id *name)
-{
- if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_SEQ_OFF])) {
- /* old resid */
- ostid_set_seq(oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
- ostid_set_id(oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
- } else {
- /* new resid */
- fid_extract_from_res_name(&oi->oi_fid, name);
- }
-}
-
/**
* Return true if the resource is for the object identified by this id & group.
*/
@@ -546,7 +533,8 @@ static inline void ost_fid_build_resid(const struct lu_fid *fid,
}
static inline void ost_fid_from_resid(struct lu_fid *fid,
- const struct ldlm_res_id *name)
+ const struct ldlm_res_id *name,
+ int ost_idx)
{
if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
/* old resid */
@@ -554,7 +542,7 @@ static inline void ost_fid_from_resid(struct lu_fid *fid,
ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
- ostid_to_fid(fid, &oi, 0);
+ ostid_to_fid(fid, &oi, ost_idx);
} else {
/* new resid */
fid_extract_from_res_name(fid, name);
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index dac2d84d8..8325c82b3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -109,7 +109,7 @@ static inline char *ptlrpc_import_state_name(enum lustre_imp_state state)
"RECOVER", "FULL", "EVICTED",
};
- LASSERT (state <= LUSTRE_IMP_EVICTED);
+ LASSERT(state <= LUSTRE_IMP_EVICTED);
return import_state_names[state];
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index f2223d558..00b976766 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -280,16 +280,16 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_DATA_TYPE long
#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DESTROY _IOW ('f', 104, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_DESTROY _IOW('f', 104, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SETATTR _IOW ('f', 107, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SETATTR _IOW('f', 107, OBD_IOC_DATA_TYPE)
#define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE)
#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SYNC _IOW ('f', 114, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SYNC _IOW('f', 114, OBD_IOC_DATA_TYPE)
#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
#define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE)
@@ -308,13 +308,13 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CLIENT_RECOVER _IOW ('f', 133, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PING_TARGET _IOW ('f', 136, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CLIENT_RECOVER _IOW('f', 133, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PING_TARGET _IOW('f', 136, OBD_IOC_DATA_TYPE)
#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139)
-#define OBD_IOC_NO_TRANSNO _IOW ('f', 140, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SET_READONLY _IOW ('f', 141, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_ABORT_RECOVERY _IOR ('f', 142, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_NO_TRANSNO _IOW('f', 140, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SET_READONLY _IOW('f', 141, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_ABORT_RECOVERY _IOR('f', 142, OBD_IOC_DATA_TYPE)
#define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE)
@@ -324,27 +324,27 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CHANGELOG_SEND _IOW ('f', 148, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CHANGELOG_SEND _IOW('f', 148, OBD_IOC_DATA_TYPE)
#define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE)
#define OBD_IOC_FID2PATH _IOWR ('f', 150, OBD_IOC_DATA_TYPE)
/* see also <lustre/lustre_user.h> for ioctls 151-153 */
/* OBD_IOC_LOV_SETSTRIPE: See also LL_IOC_LOV_SETSTRIPE */
-#define OBD_IOC_LOV_SETSTRIPE _IOW ('f', 154, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_SETSTRIPE _IOW('f', 154, OBD_IOC_DATA_TYPE)
/* OBD_IOC_LOV_GETSTRIPE: See also LL_IOC_LOV_GETSTRIPE */
-#define OBD_IOC_LOV_GETSTRIPE _IOW ('f', 155, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_GETSTRIPE _IOW('f', 155, OBD_IOC_DATA_TYPE)
/* OBD_IOC_LOV_SETEA: See also LL_IOC_LOV_SETEA */
-#define OBD_IOC_LOV_SETEA _IOW ('f', 156, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_SETEA _IOW('f', 156, OBD_IOC_DATA_TYPE)
/* see <lustre/lustre_user.h> for ioctls 157-159 */
/* OBD_IOC_QUOTACHECK: See also LL_IOC_QUOTACHECK */
-#define OBD_IOC_QUOTACHECK _IOW ('f', 160, int)
+#define OBD_IOC_QUOTACHECK _IOW('f', 160, int)
/* OBD_IOC_POLL_QUOTACHECK: See also LL_IOC_POLL_QUOTACHECK */
-#define OBD_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
+#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
/* OBD_IOC_QUOTACTL: See also LL_IOC_QUOTACTL */
#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
/* see also <lustre/lustre_user.h> for ioctls 163-176 */
-#define OBD_IOC_CHANGELOG_REG _IOW ('f', 177, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_DEREG _IOW ('f', 178, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_CLEAR _IOW ('f', 179, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_DEREG _IOW('f', 178, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_CLEAR _IOW('f', 179, struct obd_ioctl_data)
#define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE)
#define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE)
@@ -352,7 +352,7 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
#define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE)
#define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARAM _IOW ('f', 187, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PARAM _IOW('f', 187, OBD_IOC_DATA_TYPE)
#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
@@ -522,6 +522,28 @@ struct l_wait_info {
sigmask(SIGTERM) | sigmask(SIGQUIT) | \
sigmask(SIGALRM))
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link) \
+{ \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&((waitq)->lock), flags); \
+ __add_wait_queue_exclusive(waitq, link); \
+ spin_unlock_irqrestore(&((waitq)->lock), flags); \
+}
+
/*
* wait for @condition to become true, but no longer than timeout, specified
* by @info.
@@ -578,7 +600,7 @@ do { \
\
if (condition) \
break; \
- if (cfs_signal_pending()) { \
+ if (signal_pending(current)) { \
if (info->lwi_on_signal && \
(__timeout == 0 || __allow_intr)) { \
if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index af77eb359..f267ff8a6 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -64,9 +64,27 @@ struct obd_export;
struct ptlrpc_request;
struct obd_device;
+/**
+ * Serializes in-flight MDT-modifying RPC requests to preserve idempotency.
+ *
+ * This mutex is used to implement execute-once semantics on the MDT.
+ * The MDT stores the last transaction ID and result for every client in
+ * its last_rcvd file. If the client doesn't get a reply, it can safely
+ * resend the request and the MDT will reconstruct the reply being aware
+ * that the request has already been executed. Without this lock,
+ * execution status of concurrent in-flight requests would be
+ * overwritten.
+ *
+ * This design limits the extent to which we can keep a full pipeline of
+ * in-flight requests from a single client. This limitation could be
+ * overcome by allowing multiple slots per client in the last_rcvd file.
+ */
struct mdc_rpc_lock {
+ /** Lock protecting in-flight RPC concurrency. */
struct mutex rpcl_mutex;
+ /** Intent associated with currently executing request. */
struct lookup_intent *rpcl_it;
+ /** Used for MDS/RPC load testing purposes. */
int rpcl_fakes;
};
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 69586a522..a7973d5de 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -1327,7 +1327,9 @@ struct ptlrpc_request {
/* allow the req to be sent if the import is in recovery
* status
*/
- rq_allow_replay:1;
+ rq_allow_replay:1,
+ /* bulk request, sent to server, but uncommitted */
+ rq_unstable:1;
unsigned int rq_nr_resend;
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
index 383fe6feb..a42cf90c1 100644
--- a/drivers/staging/lustre/lustre/include/lustre_param.h
+++ b/drivers/staging/lustre/lustre/include/lustre_param.h
@@ -89,6 +89,7 @@ int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh);
/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */
#define PARAM_OST "ost."
+#define PARAM_OSD "osd."
#define PARAM_OSC "osc."
#define PARAM_MDT "mdt."
#define PARAM_MDD "mdd."
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index b2e67fcf9..0aac4391e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -137,6 +137,7 @@ extern struct req_format RQF_MGS_CONFIG_READ;
/* fid/fld req_format */
extern struct req_format RQF_SEQ_QUERY;
extern struct req_format RQF_FLD_QUERY;
+extern struct req_format RQF_FLD_READ;
/* MDS req_format */
extern struct req_format RQF_MDS_CONNECT;
extern struct req_format RQF_MDS_DISCONNECT;
@@ -199,7 +200,7 @@ extern struct req_format RQF_OST_BRW_READ;
extern struct req_format RQF_OST_BRW_WRITE;
extern struct req_format RQF_OST_STATFS;
extern struct req_format RQF_OST_SET_GRANT_INFO;
-extern struct req_format RQF_OST_GET_INFO_GENERIC;
+extern struct req_format RQF_OST_GET_INFO;
extern struct req_format RQF_OST_GET_INFO_LAST_ID;
extern struct req_format RQF_OST_GET_INFO_LAST_FID;
extern struct req_format RQF_OST_SET_INFO_LAST_FID;
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4264d9765..2d926e0ee 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -37,7 +37,7 @@
#ifndef __OBD_H
#define __OBD_H
-#include "linux/obd.h"
+#include <linux/spinlock.h>
#define IOC_OSC_TYPE 'h'
#define IOC_OSC_MIN_NR 20
@@ -54,6 +54,7 @@
#include "lustre_export.h"
#include "lustre_fid.h"
#include "lustre_fld.h"
+#include "lustre_intent.h"
#define MAX_OBD_DEVICES 8192
@@ -165,9 +166,6 @@ struct obd_info {
obd_enqueue_update_f oi_cb_up;
};
-void lov_stripe_lock(struct lov_stripe_md *md);
-void lov_stripe_unlock(struct lov_stripe_md *md);
-
struct obd_type {
struct list_head typ_chain;
struct obd_ops *typ_dt_ops;
@@ -293,14 +291,10 @@ struct client_obd {
* blocking everywhere, but we don't want to slow down fast-path of
* our main platform.)
*
- * Exact type of ->cl_loi_list_lock is defined in arch/obd.h together
- * with client_obd_list_{un,}lock() and
- * client_obd_list_lock_{init,done}() functions.
- *
* NB by Jinshan: though field names are still _loi_, but actually
* osc_object{}s are in the list.
*/
- struct client_obd_lock cl_loi_list_lock;
+ spinlock_t cl_loi_list_lock;
struct list_head cl_loi_ready_list;
struct list_head cl_loi_hp_ready_list;
struct list_head cl_loi_write_list;
@@ -327,7 +321,8 @@ struct client_obd {
atomic_t cl_lru_shrinkers;
atomic_t cl_lru_in_list;
struct list_head cl_lru_list; /* lru page list */
- struct client_obd_lock cl_lru_list_lock; /* page list protector */
+ spinlock_t cl_lru_list_lock; /* page list protector */
+ atomic_t cl_unstable_count;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
atomic_t cl_destroy_in_flight;
@@ -364,6 +359,7 @@ struct client_obd {
/* ptlrpc work for writeback in ptlrpcd context */
void *cl_writeback_work;
+ void *cl_lru_work;
/* hash tables for osc_quota_info */
struct cfs_hash *cl_quota_hash[MAXQUOTAS];
};
@@ -391,45 +387,9 @@ struct ost_pool {
struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
};
-/* Round-robin allocator data */
-struct lov_qos_rr {
- __u32 lqr_start_idx; /* start index of new inode */
- __u32 lqr_offset_idx; /* aliasing for start_idx */
- int lqr_start_count; /* reseed counter */
- struct ost_pool lqr_pool; /* round-robin optimized list */
- unsigned long lqr_dirty:1; /* recalc round-robin list */
-};
-
/* allow statfs data caching for 1 second */
#define OBD_STATFS_CACHE_SECONDS 1
-struct lov_statfs_data {
- struct obd_info lsd_oi;
- struct obd_statfs lsd_statfs;
-};
-
-/* Stripe placement optimization */
-struct lov_qos {
- struct list_head lq_oss_list; /* list of OSSs that targets use */
- struct rw_semaphore lq_rw_sem;
- __u32 lq_active_oss_count;
- unsigned int lq_prio_free; /* priority for free space */
- unsigned int lq_threshold_rr;/* priority for rr */
- struct lov_qos_rr lq_rr; /* round robin qos data */
- unsigned long lq_dirty:1, /* recalc qos data */
- lq_same_space:1,/* the ost's all have approx.
- * the same space avail
- */
- lq_reset:1, /* zero current penalties */
- lq_statfs_in_progress:1; /* statfs op in
- progress */
- /* qos statfs data */
- struct lov_statfs_data *lq_statfs_data;
- wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
- * requests completion
- */
-};
-
struct lov_tgt_desc {
struct list_head ltd_kill;
struct obd_uuid ltd_uuid;
@@ -442,25 +402,6 @@ struct lov_tgt_desc {
ltd_reap:1; /* should this target be deleted */
};
-/* Pool metadata */
-#define pool_tgt_size(_p) _p->pool_obds.op_size
-#define pool_tgt_count(_p) _p->pool_obds.op_count
-#define pool_tgt_array(_p) _p->pool_obds.op_array
-#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
-
-struct pool_desc {
- char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
- struct ost_pool pool_obds; /* pool members */
- atomic_t pool_refcount; /* pool ref. counter */
- struct lov_qos_rr pool_rr; /* round robin qos */
- struct hlist_node pool_hash; /* access by poolname */
- struct list_head pool_list; /* serial access */
- struct dentry *pool_debugfs_entry; /* file in debugfs */
- struct obd_device *pool_lobd; /* obd of the lov/lod to which
- * this pool belongs
- */
-};
-
struct lov_obd {
struct lov_desc desc;
struct lov_tgt_desc **lov_tgts; /* sparse array */
@@ -468,8 +409,6 @@ struct lov_obd {
struct mutex lov_lock;
struct obd_connect_data lov_ocd;
atomic_t lov_refcount;
- __u32 lov_tgt_count; /* how many OBD's */
- __u32 lov_active_tgt_count; /* how many active */
__u32 lov_death_row;/* tgts scheduled to be deleted */
__u32 lov_tgt_size; /* size of tgts array */
int lov_connects;
@@ -479,7 +418,7 @@ struct lov_obd {
struct dentry *lov_pool_debugfs_entry;
enum lustre_sec_part lov_sp_me;
- /* Cached LRU pages from upper layer */
+ /* Cached LRU and unstable data from upper layer */
void *lov_cache;
struct rw_semaphore lov_notify_lock;
@@ -511,7 +450,7 @@ struct lmv_obd {
struct obd_uuid cluuid;
struct obd_export *exp;
- struct mutex init_mutex;
+ struct mutex lmv_init_mutex;
int connected;
int max_easize;
int max_def_easize;
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index 637fa2211..f6c18df90 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -35,6 +35,7 @@
#ifndef __OBD_CKSUM
#define __OBD_CKSUM
#include "../../include/linux/libcfs/libcfs.h"
+#include "../../include/linux/libcfs/libcfs_crypto.h"
#include "lustre/lustre_idl.h"
static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type)
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 706869f8c..32863bcb3 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -477,7 +477,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
struct lu_context session_ctx;
struct lu_env env;
- lu_context_init(&session_ctx, LCT_SESSION);
+ lu_context_init(&session_ctx, LCT_SESSION | LCT_SERVER_SESSION);
session_ctx.lc_thread = NULL;
lu_context_enter(&session_ctx);
@@ -490,8 +490,9 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
obd->obd_lu_dev = d;
d->ld_obd = obd;
rc = 0;
- } else
+ } else {
rc = PTR_ERR(d);
+ }
}
lu_context_exit(&session_ctx);
lu_context_fini(&session_ctx);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index f8ee3a325..60034d39b 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -58,6 +58,7 @@ extern int at_early_margin;
extern int at_extra;
extern unsigned int obd_sync_filter;
extern unsigned int obd_max_dirty_pages;
+extern atomic_t obd_unstable_pages;
extern atomic_t obd_dirty_pages;
extern atomic_t obd_dirty_transit_pages;
extern char obd_jobid_var[];
@@ -289,6 +290,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OST_ENOINO 0x229
#define OBD_FAIL_OST_DQACQ_NET 0x230
#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
+#define OBD_FAIL_OST_SET_INFO_NET 0x232
#define OBD_FAIL_LDLM 0x300
#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301
@@ -319,6 +321,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LDLM_AGL_DELAY 0x31a
#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
#define OBD_FAIL_LDLM_OST_LVB 0x31c
+#define OBD_FAIL_LDLM_ENQUEUE_HANG 0x31d
/* LOCKLESS IO */
#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
@@ -426,6 +429,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_FLD 0x1100
#define OBD_FAIL_FLD_QUERY_NET 0x1101
+#define OBD_FAIL_FLD_READ_NET 0x1102
#define OBD_FAIL_SEC_CTX 0x1200
#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
deleted file mode 100644
index 96141d17d..000000000
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ /dev/null
@@ -1,1203 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl code shared between vvp and liblustre (and other Lustre clients in the
- * future).
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../../include/linux/libcfs/libcfs.h"
-# include <linux/fs.h>
-# include <linux/sched.h>
-# include <linux/mm.h>
-# include <linux/quotaops.h>
-# include <linux/highmem.h>
-# include <linux/pagemap.h>
-# include <linux/rbtree.h>
-
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_ver.h"
-#include "../include/lustre_mdc.h"
-#include "../include/cl_object.h"
-
-#include "../include/lclient.h"
-
-#include "../llite/llite_internal.h"
-
-static const struct cl_req_operations ccc_req_ops;
-
-/*
- * ccc_ prefix stands for "Common Client Code".
- */
-
-static struct kmem_cache *ccc_lock_kmem;
-static struct kmem_cache *ccc_object_kmem;
-static struct kmem_cache *ccc_thread_kmem;
-static struct kmem_cache *ccc_session_kmem;
-static struct kmem_cache *ccc_req_kmem;
-
-static struct lu_kmem_descr ccc_caches[] = {
- {
- .ckd_cache = &ccc_lock_kmem,
- .ckd_name = "ccc_lock_kmem",
- .ckd_size = sizeof(struct ccc_lock)
- },
- {
- .ckd_cache = &ccc_object_kmem,
- .ckd_name = "ccc_object_kmem",
- .ckd_size = sizeof(struct ccc_object)
- },
- {
- .ckd_cache = &ccc_thread_kmem,
- .ckd_name = "ccc_thread_kmem",
- .ckd_size = sizeof(struct ccc_thread_info),
- },
- {
- .ckd_cache = &ccc_session_kmem,
- .ckd_name = "ccc_session_kmem",
- .ckd_size = sizeof(struct ccc_session)
- },
- {
- .ckd_cache = &ccc_req_kmem,
- .ckd_name = "ccc_req_kmem",
- .ckd_size = sizeof(struct ccc_req)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/*****************************************************************************
- *
- * Vvp device and device type functions.
- *
- */
-
-void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
-{
- struct ccc_thread_info *info;
-
- info = kmem_cache_zalloc(ccc_thread_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-void ccc_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_thread_info *info = data;
-
- kmem_cache_free(ccc_thread_kmem, info);
-}
-
-void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct ccc_session *session;
-
- session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS);
- if (!session)
- session = ERR_PTR(-ENOMEM);
- return session;
-}
-
-void ccc_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_session *session = data;
-
- kmem_cache_free(ccc_session_kmem, session);
-}
-
-struct lu_context_key ccc_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = ccc_key_init,
- .lct_fini = ccc_key_fini
-};
-
-struct lu_context_key ccc_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = ccc_session_key_init,
- .lct_fini = ccc_session_key_fini
-};
-
-/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
-/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
-
-int ccc_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct ccc_device *vdv;
- int rc;
-
- vdv = lu2ccc_dev(d);
- vdv->cdv_next = lu2cl_dev(next);
-
- LASSERT(d->ld_site && next->ld_type);
- next->ld_site = d->ld_site;
- rc = next->ld_type->ldt_ops->ldto_device_init(
- env, next, next->ld_type->ldt_name, NULL);
- if (rc == 0) {
- lu_device_get(next);
- lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
- }
- return rc;
-}
-
-struct lu_device *ccc_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
-}
-
-struct lu_device *ccc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg,
- const struct lu_device_operations *luops,
- const struct cl_device_operations *clops)
-{
- struct ccc_device *vdv;
- struct lu_device *lud;
- struct cl_site *site;
- int rc;
-
- vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
- if (!vdv)
- return ERR_PTR(-ENOMEM);
-
- lud = &vdv->cdv_cl.cd_lu_dev;
- cl_device_init(&vdv->cdv_cl, t);
- ccc2lu_dev(vdv)->ld_ops = luops;
- vdv->cdv_cl.cd_ops = clops;
-
- site = kzalloc(sizeof(*site), GFP_NOFS);
- if (site) {
- rc = cl_site_init(site, &vdv->cdv_cl);
- if (rc == 0)
- rc = lu_site_init_finish(&site->cs_lu);
- else {
- LASSERT(!lud->ld_site);
- CERROR("Cannot init lu_site, rc %d.\n", rc);
- kfree(site);
- }
- } else
- rc = -ENOMEM;
- if (rc != 0) {
- ccc_device_free(env, lud);
- lud = ERR_PTR(rc);
- }
- return lud;
-}
-
-struct lu_device *ccc_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct ccc_device *vdv = lu2ccc_dev(d);
- struct cl_site *site = lu2cl_site(d->ld_site);
- struct lu_device *next = cl2lu_dev(vdv->cdv_next);
-
- if (d->ld_site) {
- cl_site_fini(site);
- kfree(site);
- }
- cl_device_fini(lu2cl_dev(d));
- kfree(vdv);
- return next;
-}
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct ccc_req *vrq;
- int result;
-
- vrq = kmem_cache_zalloc(ccc_req_kmem, GFP_NOFS);
- if (vrq) {
- cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-/**
- * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
- * fails. Access to this environment is serialized by ccc_inode_fini_guard
- * mutex.
- */
-static struct lu_env *ccc_inode_fini_env;
-
-/**
- * A mutex serializing calls to slp_inode_fini() under extreme memory
- * pressure, when environments cannot be allocated.
- */
-static DEFINE_MUTEX(ccc_inode_fini_guard);
-static int dummy_refcheck;
-
-int ccc_global_init(struct lu_device_type *device_type)
-{
- int result;
-
- result = lu_kmem_init(ccc_caches);
- if (result)
- return result;
-
- result = lu_device_type_init(device_type);
- if (result)
- goto out_kmem;
-
- ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
- LCT_REMEMBER|LCT_NOREF);
- if (IS_ERR(ccc_inode_fini_env)) {
- result = PTR_ERR(ccc_inode_fini_env);
- goto out_device;
- }
-
- ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
- return 0;
-out_device:
- lu_device_type_fini(device_type);
-out_kmem:
- lu_kmem_fini(ccc_caches);
- return result;
-}
-
-void ccc_global_fini(struct lu_device_type *device_type)
-{
- if (ccc_inode_fini_env) {
- cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
- ccc_inode_fini_env = NULL;
- }
- lu_device_type_fini(device_type);
- lu_kmem_fini(ccc_caches);
-}
-
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
-struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops)
-{
- struct ccc_object *vob;
- struct lu_object *obj;
-
- vob = kmem_cache_zalloc(ccc_object_kmem, GFP_NOFS);
- if (vob) {
- struct cl_object_header *hdr;
-
- obj = ccc2lu(vob);
- hdr = &vob->cob_header;
- cl_object_header_init(hdr);
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
-
- vob->cob_cl.co_ops = clops;
- obj->lo_ops = luops;
- } else
- obj = NULL;
- return obj;
-}
-
-int ccc_object_init0(const struct lu_env *env,
- struct ccc_object *vob,
- const struct cl_object_conf *conf)
-{
- vob->cob_inode = conf->coc_inode;
- vob->cob_transient_pages = 0;
- cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
- return 0;
-}
-
-int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
- struct ccc_object *vob = lu2ccc(obj);
- struct lu_object *below;
- struct lu_device *under;
- int result;
-
- under = &dev->cdv_next->cd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below) {
- const struct cl_object_conf *cconf;
-
- cconf = lu2cl_conf(conf);
- INIT_LIST_HEAD(&vob->cob_pending_list);
- lu_object_add(obj, below);
- result = ccc_object_init0(env, vob, cconf);
- } else
- result = -ENOMEM;
- return result;
-}
-
-void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct ccc_object *vob = lu2ccc(obj);
-
- lu_object_fini(obj);
- lu_object_header_fini(obj->lo_header);
- kmem_cache_free(ccc_object_kmem, vob);
-}
-
-int ccc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused,
- const struct cl_lock_operations *lkops)
-{
- struct ccc_lock *clk;
- int result;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS);
- if (clk) {
- cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-int ccc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- lvb->lvb_mtime = cl_inode_mtime(inode);
- lvb->lvb_atime = cl_inode_atime(inode);
- lvb->lvb_ctime = cl_inode_ctime(inode);
- /*
- * LU-417: Add dirty pages block count lest i_blocks reports 0, some
- * "cp" or "tar" on remote node may think it's a completely sparse file
- * and skip it.
- */
- if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
- lvb->lvb_blocks = dirty_cnt(inode);
- return 0;
-}
-
-static void ccc_object_size_lock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- ll_inode_size_lock(inode);
- cl_object_attr_lock(obj);
-}
-
-static void ccc_object_size_unlock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- cl_object_attr_unlock(obj);
- ll_inode_size_unlock(inode);
-}
-
-/*****************************************************************************
- *
- * Page operations.
- *
- */
-
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return cl2vm_page(slice);
-}
-
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
- struct cl_page *page = slice->cpl_page;
-
- int result;
-
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- io->ci_type == CIT_FAULT) {
- if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
- result = -EBUSY;
- else {
- desc->cld_start = page->cp_index;
- desc->cld_end = page->cp_index;
- desc->cld_obj = page->cp_obj;
- desc->cld_mode = CLM_READ;
- result = cl_queue_match(&io->ci_lockset.cls_done,
- desc) ? -EBUSY : 0;
- }
- } else
- result = 0;
- return result;
-}
-
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
-{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
-int ccc_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /* transient page should always be sent. */
- return 0;
-}
-
-/*****************************************************************************
- *
- * Lock operations.
- *
- */
-
-void ccc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
-}
-
-void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
-{
- struct ccc_lock *clk = cl2ccc_lock(slice);
-
- kmem_cache_free(ccc_lock_kmem, clk);
-}
-
-int ccc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-/**
- * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
- * layer. This function is executed every time io finds an existing lock in
- * the lock cache while creating new lock. This function has to decide whether
- * cached lock "fits" into io.
- *
- * \param slice lock to be checked
- * \param io IO that wants a lock.
- *
- * \see lov_lock_fits_into().
- */
-int ccc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- const struct cl_lock *lock = slice->cls_lock;
- const struct cl_lock_descr *descr = &lock->cll_descr;
- const struct ccc_io *cio = ccc_env_io(env);
- int result;
-
- /*
- * Work around DLM peculiarity: it assumes that glimpse
- * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
- * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
- * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
- * doesn't enqueue CLM_WRITE sub-locks.
- */
- if (cio->cui_glimpse)
- result = descr->cld_mode != CLM_WRITE;
-
- /*
- * Also, don't match incomplete write locks for read, otherwise read
- * would enqueue missing sub-locks in the write mode.
- */
- else if (need->cld_mode != descr->cld_mode)
- result = lock->cll_state >= CLS_ENQUEUED;
- else
- result = 1;
- return result;
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
- * whenever lock state changes. Transfers object attributes, that might be
- * updated as a result of lock acquiring into inode.
- */
-void ccc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct cl_lock *lock = slice->cls_lock;
-
- /*
- * Refresh inode attributes when the lock is moving into CLS_HELD
- * state, and only when this is a result of real enqueue, rather than
- * of finding lock in the cache.
- */
- if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
- struct cl_object *obj;
- struct inode *inode;
-
- obj = slice->cls_obj;
- inode = ccc_object_inode(obj);
-
- /* vmtruncate() sets the i_size
- * under both a DLM lock and the
- * ll_inode_size_lock(). If we don't get the
- * ll_inode_size_lock() here we can match the DLM lock and
- * reset i_size. generic_file_write can then trust the
- * stale i_size when doing appending writes and effectively
- * cancel the result of the truncate. Getting the
- * ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order.
- */
- if (lock->cll_descr.cld_start == 0 &&
- lock->cll_descr.cld_end == CL_PAGE_EOF)
- cl_merge_lvb(env, inode);
- }
-}
-
-/*****************************************************************************
- *
- * io operations.
- *
- */
-
-int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
- struct cl_object *obj = io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
-
- memset(&cio->cui_link, 0, sizeof(cio->cui_link));
-
- if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- descr->cld_mode = CLM_GROUP;
- descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
- } else {
- descr->cld_mode = mode;
- }
- descr->cld_obj = obj;
- descr->cld_start = start;
- descr->cld_end = end;
- descr->cld_enq_flags = enqflags;
-
- cl_io_lock_add(env, io, &cio->cui_link);
- return 0;
-}
-
-void ccc_io_update_iov(const struct lu_env *env,
- struct ccc_io *cio, struct cl_io *io)
-{
- size_t size = io->u.ci_rw.crw_count;
-
- if (!cl_is_normalio(env, io) || !cio->cui_iter)
- return;
-
- iov_iter_truncate(cio->cui_iter, size);
-}
-
-int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end)
-{
- struct cl_object *obj = io->ci_obj;
-
- return ccc_io_one_lock_index(env, io, enqflags, mode,
- cl_index(obj, start), cl_index(obj, end));
-}
-
-void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- CLOBINVRNT(env, ios->cis_io->ci_obj,
- ccc_object_invariant(ios->cis_io->ci_obj));
-}
-
-void ccc_io_advance(const struct lu_env *env,
- const struct cl_io_slice *ios,
- size_t nob)
-{
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = ios->cis_io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- if (!cl_is_normalio(env, io))
- return;
-
- iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob);
-}
-
-/**
- * Helper function that if necessary adjusts file size (inode->i_size), when
- * position at the offset \a pos is accessed. File size can be arbitrary stale
- * on a Lustre client, but client at least knows KMS. If accessed area is
- * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
- *
- * Locking: cl_isize_lock is used to serialize changes to inode size and to
- * protect consistency between inode size and cl_object
- * attributes. cl_object_size_lock() protects consistency between cl_attr's of
- * top-object and sub-objects.
- */
-int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count, int *exceed)
-{
- struct cl_attr *attr = ccc_env_thread_attr(env);
- struct inode *inode = ccc_object_inode(obj);
- loff_t pos = start + count - 1;
- loff_t kms;
- int result;
-
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being accessed and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock already acquired by
- * the caller, because to change the class, other client has to take
- * DLM lock conflicting with our lock. Also, any updates to ->i_size
- * by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- ccc_object_size_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- kms = attr->cat_kms;
- if (pos > kms) {
- /*
- * A glimpse is necessary to determine whether we
- * return a short read (B) or some zeroes at the end
- * of the buffer (C)
- */
- ccc_object_size_unlock(obj);
- result = cl_glimpse_lock(env, io, inode, obj, 0);
- if (result == 0 && exceed) {
- /* If objective page index exceed end-of-file
- * page index, return directly. Do not expect
- * kernel will check such case correctly.
- * linux-2.6.18-128.1.1 miss to do that.
- * --bug 17336
- */
- loff_t size = cl_isize_read(inode);
- loff_t cur_index = start >> PAGE_SHIFT;
- loff_t size_index = (size - 1) >>
- PAGE_SHIFT;
-
- if ((size == 0 && cur_index != 0) ||
- size_index < cur_index)
- *exceed = 1;
- }
- return result;
- }
- /*
- * region is within kms and, hence, within real file
- * size (A). We need to increase i_size to cover the
- * read region so that generic_file_read() will do its
- * job, but that doesn't mean the kms size is
- * _correct_, it is only the _minimum_ size. If
- * someone does a stat they will get the correct size
- * which will always be >= the kms value here.
- * b=11081
- */
- if (cl_isize_read(inode) < kms) {
- cl_isize_write_nolock(inode, kms);
- CDEBUG(D_VFSTRACE,
- DFID" updating i_size %llu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (__u64)cl_isize_read(inode));
-
- }
- }
- ccc_object_size_unlock(obj);
- return result;
-}
-
-/*****************************************************************************
- *
- * Transfer operations.
- *
- */
-
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct ccc_req *vrq;
-
- if (ioret > 0)
- cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
-
- vrq = cl2ccc_req(slice);
- kmem_cache_free(ccc_req_kmem, vrq);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for ccc
- * layer. ccc is responsible for
- *
- * - o_[mac]time
- *
- * - o_mode
- *
- * - o_parent_seq
- *
- * - o_[ug]id
- *
- * - o_parent_oid
- *
- * - o_parent_ver
- *
- * - o_ioepoch,
- *
- */
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct inode *inode;
- struct obdo *oa;
- u32 valid_flags;
-
- oa = attr->cra_oa;
- inode = ccc_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE;
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID;
- }
- }
- obdo_from_inode(oa, inode, valid_flags & flags);
- obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
- memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
- JOBSTATS_JOBID_SIZE);
-}
-
-static const struct cl_req_operations ccc_req_ops = {
- .cro_attr_set = ccc_req_attr_set,
- .cro_completion = ccc_req_completion
-};
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
-{
- struct lu_env *env;
- struct cl_io *io;
- int result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = ccc_env_thread_io(env);
- io->ci_obj = cl_i2info(inode)->lli_clob;
-
- io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
- io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
- io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
- io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
- io->u.ci_setattr.sa_valid = attr->ia_valid;
-
-again:
- if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
- struct ccc_io *cio = ccc_env_io(env);
-
- if (attr->ia_valid & ATTR_FILE)
- /* populate the file descriptor for ftruncate to honor
- * group lock - see LU-787
- */
- cio->cui_fd = cl_iattr2fd(inode, attr);
-
- result = cl_io_loop(env, io);
- } else {
- result = io->ci_result;
- }
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
- /* HSM import case: file is released, cannot be restored
- * no need to fail except if restore registration failed
- * with -ENODATA
- */
- if (result == -ENODATA && io->ci_restore_needed &&
- io->ci_result != -ENODATA)
- result = 0;
- cl_env_put(env, &refcheck);
- return result;
-}
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
-{
- return &vdv->cdv_cl.cd_lu_dev;
-}
-
-struct ccc_device *lu2ccc_dev(const struct lu_device *d)
-{
- return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
-}
-
-struct ccc_device *cl2ccc_dev(const struct cl_device *d)
-{
- return container_of0(d, struct ccc_device, cdv_cl);
-}
-
-struct lu_object *ccc2lu(struct ccc_object *vob)
-{
- return &vob->cob_cl.co_lu;
-}
-
-struct ccc_object *lu2ccc(const struct lu_object *obj)
-{
- return container_of0(obj, struct ccc_object, cob_cl.co_lu);
-}
-
-struct ccc_object *cl2ccc(const struct cl_object *obj)
-{
- return container_of0(obj, struct ccc_object, cob_cl);
-}
-
-struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
-{
- return container_of(slice, struct ccc_lock, clk_cl);
-}
-
-struct ccc_io *cl2ccc_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct ccc_io *cio;
-
- cio = container_of(slice, struct ccc_io, cui_cl);
- LASSERT(cio == ccc_env_io(env));
- return cio;
-}
-
-struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct ccc_req, crq_cl);
-}
-
-struct page *cl2vm_page(const struct cl_page_slice *slice)
-{
- return cl2ccc_page(slice)->cpg_page;
-}
-
-/*****************************************************************************
- *
- * Accessors.
- *
- */
-int ccc_object_invariant(const struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
- struct cl_inode_info *lli = cl_i2info(inode);
-
- return (S_ISREG(cl_inode_mode(inode)) ||
- /* i_mode of unlinked inode is zeroed. */
- cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
-}
-
-struct inode *ccc_object_inode(const struct cl_object *obj)
-{
- return cl2ccc(obj)->cob_inode;
-}
-
-/**
- * Initialize or update CLIO structures for regular files when new
- * meta-data arrives from the server.
- *
- * \param inode regular file inode
- * \param md new file metadata from MDS
- * - allocates cl_object if necessary,
- * - updated layout, if object was already here.
- */
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
-{
- struct lu_env *env;
- struct cl_inode_info *lli;
- struct cl_object *clob;
- struct lu_site *site;
- struct lu_fid *fid;
- struct cl_object_conf conf = {
- .coc_inode = inode,
- .u = {
- .coc_md = md
- }
- };
- int result = 0;
- int refcheck;
-
- LASSERT(md->body->valid & OBD_MD_FLID);
- LASSERT(S_ISREG(cl_inode_mode(inode)));
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- site = cl_i2sbi(inode)->ll_site;
- lli = cl_i2info(inode);
- fid = &lli->lli_fid;
- LASSERT(fid_is_sane(fid));
-
- if (!lli->lli_clob) {
- /* clob is slave of inode, empty lli_clob means for new inode,
- * there is no clob in cache with the given fid, so it is
- * unnecessary to perform lookup-alloc-lookup-insert, just
- * alloc and insert directly.
- */
- LASSERT(inode->i_state & I_NEW);
- conf.coc_lu.loc_flags = LOC_F_NEW;
- clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
- fid, &conf);
- if (!IS_ERR(clob)) {
- /*
- * No locking is necessary, as new inode is
- * locked by I_NEW bit.
- */
- lli->lli_clob = clob;
- lli->lli_has_smd = lsm_has_objects(md->lsm);
- lu_object_ref_add(&clob->co_lu, "inode", inode);
- } else
- result = PTR_ERR(clob);
- } else {
- result = cl_conf_set(env, lli->lli_clob, &conf);
- }
-
- cl_env_put(env, &refcheck);
-
- if (result != 0)
- CERROR("Failure to initialize cl object "DFID": %d\n",
- PFID(fid), result);
- return result;
-}
-
-/**
- * Wait for others drop their references of the object at first, then we drop
- * the last one, which will lead to the object be destroyed immediately.
- * Must be called after cl_object_kill() against this object.
- *
- * The reason we want to do this is: destroying top object will wait for sub
- * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
- * to initiate top object destroying which may deadlock. See bz22520.
- */
-static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
-{
- struct lu_object_header *header = obj->co_lu.lo_header;
- wait_queue_t waiter;
-
- if (unlikely(atomic_read(&header->loh_ref) != 1)) {
- struct lu_site *site = obj->co_lu.lo_dev->ld_site;
- struct lu_site_bkt_data *bkt;
-
- bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
-
- while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&header->loh_ref) == 1)
- break;
- schedule();
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
- }
-
- cl_object_put(env, obj);
-}
-
-void cl_inode_fini(struct inode *inode)
-{
- struct lu_env *env;
- struct cl_inode_info *lli = cl_i2info(inode);
- struct cl_object *clob = lli->lli_clob;
- int refcheck;
- int emergency;
-
- if (clob) {
- void *cookie;
-
- cookie = cl_env_reenter();
- env = cl_env_get(&refcheck);
- emergency = IS_ERR(env);
- if (emergency) {
- mutex_lock(&ccc_inode_fini_guard);
- LASSERT(ccc_inode_fini_env);
- cl_env_implant(ccc_inode_fini_env, &refcheck);
- env = ccc_inode_fini_env;
- }
- /*
- * cl_object cache is a slave to inode cache (which, in turn
- * is a slave to dentry cache), don't keep cl_object in memory
- * when its master is evicted.
- */
- cl_object_kill(env, clob);
- lu_object_ref_del(&clob->co_lu, "inode", inode);
- cl_object_put_last(env, clob);
- lli->lli_clob = NULL;
- if (emergency) {
- cl_env_unplant(ccc_inode_fini_env, &refcheck);
- mutex_unlock(&ccc_inode_fini_guard);
- } else
- cl_env_put(env, &refcheck);
- cl_env_reexit(cookie);
- }
-}
-
-/**
- * return IF_* type for given lu_dirent entry.
- * IF_* flag shld be converted to particular OS file type in
- * platform llite module.
- */
-__u16 ll_dirent_type_get(struct lu_dirent *ent)
-{
- __u16 type = 0;
- struct luda_type *lt;
- int len = 0;
-
- if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
- const unsigned align = sizeof(struct luda_type) - 1;
-
- len = le16_to_cpu(ent->lde_namelen);
- len = (len + align) & ~align;
- lt = (void *)ent->lde_name + len;
- type = IFTODT(le16_to_cpu(lt->lt_type));
- }
- return type;
-}
-
-/**
- * build inode number from passed @fid
- */
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
-{
- if (BITS_PER_LONG == 32 || api32)
- return fid_flatten32(fid);
- else
- return fid_flatten(fid);
-}
-
-/**
- * build inode generation from passed @fid. If our FID overflows the 32-bit
- * inode number then return a non-zero generation to distinguish them.
- */
-__u32 cl_fid_build_gen(const struct lu_fid *fid)
-{
- __u32 gen;
-
- if (fid_is_igif(fid)) {
- gen = lu_igif_gen(fid);
- return gen;
- }
-
- gen = fid_flatten(fid) >> 32;
- return gen;
-}
-
-/* lsm is unreliable after hsm implementation as layout can be changed at
- * any time. This is only to support old, non-clio-ized interfaces. It will
- * cause deadlock if clio operations are called with this extra layout refcount
- * because in case the layout changed during the IO, ll_layout_refresh() will
- * have to wait for the refcount to become zero to destroy the older layout.
- *
- * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT.
- */
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
-{
- return lov_lsm_get(cl_i2info(inode)->lli_clob);
-}
-
-inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
-{
- lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index e5d1344e8..621323f6e 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -54,7 +54,7 @@ struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
lock_res(lock->l_resource);
- lock->l_flags |= LDLM_FL_RES_LOCKED;
+ ldlm_set_res_locked(lock);
return lock->l_resource;
}
EXPORT_SYMBOL(lock_res_and_lock);
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(lock_res_and_lock);
void unlock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- lock->l_flags &= ~LDLM_FL_RES_LOCKED;
+ ldlm_clear_res_locked(lock);
unlock_res(lock->l_resource);
spin_unlock(&lock->l_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index a803e200f..cf1f17836 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -75,12 +75,12 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
* just after we finish and take our lock into account in its
* calculation of the kms
*/
- lock->l_flags |= LDLM_FL_KMS_IGNORE;
+ ldlm_set_kms_ignore(lock);
list_for_each(tmp, &res->lr_granted) {
lck = list_entry(tmp, struct ldlm_lock, l_res_link);
- if (lck->l_flags & LDLM_FL_KMS_IGNORE)
+ if (ldlm_is_kms_ignore(lck))
continue;
if (lck->l_policy_data.l_extent.end >= old_kms)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index b88b78606..349bfcc9b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -101,8 +101,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
+ if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
@@ -436,7 +435,7 @@ ldlm_flock_interrupted_wait(void *data)
lock_res_and_lock(lock);
/* client side - set flag to prevent lock from being put on LRU list */
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
}
@@ -520,30 +519,29 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- if (lock->l_flags & LDLM_FL_DESTROYED) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- return 0;
- }
-
- if (lock->l_flags & LDLM_FL_FAILED) {
+ if (ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
return -EIO;
}
- if (rc) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
- rc);
- return rc;
- }
-
LDLM_DEBUG(lock, "client-side enqueue granted");
lock_res_and_lock(lock);
+ /*
+ * Protect against race where lock could have been just destroyed
+ * due to overlap in ldlm_process_flock_lock().
+ */
+ if (ldlm_is_destroyed(lock)) {
+ unlock_res_and_lock(lock);
+ LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
+ return 0;
+ }
+
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
- if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
+ if (ldlm_is_flock_deadlock(lock)) {
LDLM_DEBUG(lock, "client-side enqueue deadlock received");
rc = -EDEADLK;
} else if (flags & LDLM_FL_TEST_LOCK) {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index e21373e73..32f227f37 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -95,9 +95,10 @@ enum {
LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
- LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs)
- */
+ LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither
+ * sending nor waiting for any rpcs)
+ */
+ LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -145,7 +146,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
enum ldlm_desc_ast_t ast_type);
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
+#define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
@@ -216,8 +218,6 @@ enum ldlm_policy_res {
LDLM_POLICY_SKIP_LOCK
};
-typedef enum ldlm_policy_res ldlm_policy_res_t;
-
#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
@@ -305,9 +305,10 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
int ret = 0;
lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL)))
+ if ((lock->l_req_mode == lock->l_granted_mode) &&
+ !ldlm_is_cp_reqd(lock))
+ ret = 1;
+ else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
unlock_res_and_lock(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 7dd7df59a..b4ffbe2fc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -314,7 +314,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_write_list);
INIT_LIST_HEAD(&cli->cl_loi_read_list);
- client_obd_list_lock_init(&cli->cl_loi_list_lock);
+ spin_lock_init(&cli->cl_loi_list_lock);
atomic_set(&cli->cl_pending_w_pages, 0);
atomic_set(&cli->cl_pending_r_pages, 0);
cli->cl_r_in_flight = 0;
@@ -333,7 +333,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
atomic_set(&cli->cl_lru_busy, 0);
atomic_set(&cli->cl_lru_in_list, 0);
INIT_LIST_HEAD(&cli->cl_lru_list);
- client_obd_list_lock_init(&cli->cl_lru_list_lock);
+ spin_lock_init(&cli->cl_lru_list_lock);
+ atomic_set(&cli->cl_unstable_count, 0);
init_waitqueue_head(&cli->cl_destroy_waitq);
atomic_set(&cli->cl_destroy_in_flight, 0);
@@ -355,6 +356,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
LNET_MTU >> PAGE_SHIFT);
+ /*
+ * set cl_chunkbits default value to PAGE_CACHE_SHIFT,
+ * it will be updated at OSC connection time.
+ */
+ cli->cl_chunkbits = PAGE_SHIFT;
+
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
@@ -429,7 +436,6 @@ err_ldlm:
ldlm_put_ref();
err:
return rc;
-
}
EXPORT_SYMBOL(client_obd_setup);
@@ -438,6 +444,7 @@ int client_obd_cleanup(struct obd_device *obddev)
ldlm_namespace_free_post(obddev->obd_namespace);
obddev->obd_namespace = NULL;
+ obd_cleanup_client_import(obddev);
LASSERT(!obddev->u.cli.cl_import);
ldlm_put_ref();
@@ -748,6 +755,7 @@ int ldlm_error2errno(enum ldlm_error error)
switch (error) {
case ELDLM_OK:
+ case ELDLM_LOCK_MATCHED:
result = 0;
break;
case ELDLM_LOCK_CHANGED:
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index ecd65a7a3..bff94ea12 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -185,7 +185,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
"final lock_put on destroyed lock, freeing it.");
res = lock->l_resource;
- LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
+ LASSERT(ldlm_is_destroyed(lock));
LASSERT(list_empty(&lock->l_res_link));
LASSERT(list_empty(&lock->l_pending_chain));
@@ -229,15 +229,25 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
/**
* Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
+ *
+ * If \a last_use is non-zero, it will remove the lock from LRU only if
+ * it matches lock's l_last_used.
+ *
+ * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
+ * doesn't match lock's l_last_used;
+ * otherwise, the lock hasn't been in the LRU list.
+ * \retval 1 the lock was in LRU list and removed.
*/
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- int rc;
+ int rc = 0;
spin_lock(&ns->ns_lock);
- rc = ldlm_lock_remove_from_lru_nolock(lock);
+ if (last_use == 0 || last_use == lock->l_last_used)
+ rc = ldlm_lock_remove_from_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
+
return rc;
}
@@ -252,8 +262,7 @@ static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
LASSERT(list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
list_add_tail(&lock->l_lru, &ns->ns_unused_list);
- if (lock->l_flags & LDLM_FL_SKIPPED)
- lock->l_flags &= ~LDLM_FL_SKIPPED;
+ ldlm_clear_skipped(lock);
LASSERT(ns->ns_nr_unused >= 0);
ns->ns_nr_unused++;
}
@@ -318,11 +327,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
LBUG();
}
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
LASSERT(list_empty(&lock->l_lru));
return 0;
}
- lock->l_flags |= LDLM_FL_DESTROYED;
+ ldlm_set_destroyed(lock);
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
@@ -544,7 +553,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it
*/
- if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
+ if (flags == 0 && !ldlm_is_destroyed(lock)) {
lu_ref_add(&lock->l_reference, "handle", current);
return lock;
}
@@ -554,21 +563,22 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(lock->l_resource);
lu_ref_add_atomic(&lock->l_reference, "handle", current);
- if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
+ if (unlikely(ldlm_is_destroyed(lock))) {
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock);
return NULL;
}
- if (flags && (lock->l_flags & flags)) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
+ if (flags) {
+ if (lock->l_flags & flags) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
- if (flags)
lock->l_flags |= flags;
+ }
unlock_res_and_lock(lock);
return lock;
@@ -599,14 +609,14 @@ EXPORT_SYMBOL(ldlm_lock2desc);
static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
struct list_head *work_list)
{
- if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
+ if (!ldlm_is_ast_sent(lock)) {
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
- lock->l_flags |= LDLM_FL_AST_SENT;
+ ldlm_set_ast_sent(lock);
/* If the enqueuing client said so, tell the AST recipient to
* discard dirty data, rather than writing back.
*/
- if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
- lock->l_flags |= LDLM_FL_DISCARD_DATA;
+ if (ldlm_is_ast_discard_data(new))
+ ldlm_set_discard_data(lock);
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
@@ -621,8 +631,8 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
struct list_head *work_list)
{
- if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
- lock->l_flags |= LDLM_FL_CP_REQD;
+ if (!ldlm_is_cp_reqd(lock)) {
+ ldlm_set_cp_reqd(lock);
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
LASSERT(list_empty(&lock->l_cp_ast));
list_add(&lock->l_cp_ast, work_list);
@@ -657,7 +667,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
struct ldlm_lock *lock;
lock = ldlm_handle2lock(lockh);
- LASSERT(lock);
+ LASSERTF(lock, "Non-existing lock: %llx\n", lockh->cookie);
ldlm_lock_addref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
}
@@ -704,7 +714,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
if (lock) {
lock_res_and_lock(lock);
if (lock->l_readers != 0 || lock->l_writers != 0 ||
- !(lock->l_flags & LDLM_FL_CBPENDING)) {
+ !ldlm_is_cbpending(lock)) {
ldlm_lock_addref_internal_nolock(lock, mode);
result = 0;
}
@@ -770,17 +780,17 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_decref_internal_nolock(lock, mode);
- if (lock->l_flags & LDLM_FL_LOCAL &&
+ if (ldlm_is_local(lock) &&
!lock->l_readers && !lock->l_writers) {
/* If this is a local lock on a server namespace and this was
* the last reference, cancel the lock.
*/
CDEBUG(D_INFO, "forcing cancel of local lock\n");
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
}
if (!lock->l_readers && !lock->l_writers &&
- (lock->l_flags & LDLM_FL_CBPENDING)) {
+ ldlm_is_cbpending(lock)) {
/* If we received a blocked AST and this was the last reference,
* run the callback.
*/
@@ -791,16 +801,14 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_remove_from_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
- if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
+ if (ldlm_is_atomic_cb(lock) ||
ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
ldlm_handle_bl_callback(ns, NULL, lock);
} else if (!lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_NO_LRU) &&
- !(lock->l_flags & LDLM_FL_BL_AST)) {
-
+ !ldlm_is_no_lru(lock) && !ldlm_is_bl_ast(lock)) {
LDLM_DEBUG(lock, "add lock into lru list");
/* If this is a client-side namespace and this was the last
@@ -809,7 +817,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_add_to_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
@@ -853,7 +861,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
@@ -971,7 +979,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(lock, "About to add lock:");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1073,10 +1081,9 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
* whose parents already hold a lock so forward progress
* can still happen.
*/
- if (lock->l_flags & LDLM_FL_CBPENDING &&
- !(flags & LDLM_FL_CBPENDING))
+ if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
continue;
- if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
+ if (!unref && ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
continue;
@@ -1092,6 +1099,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
if (unlikely(match == LCK_GROUP) &&
lock->l_resource->lr_type == LDLM_EXTENT &&
+ policy->l_extent.gid != LDLM_GID_ANY &&
lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
continue;
@@ -1104,11 +1112,10 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
policy->l_inodebits.bits))
continue;
- if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
+ if (!unref && LDLM_HAVE_MASK(lock, GONE))
continue;
- if ((flags & LDLM_FL_LOCAL_ONLY) &&
- !(lock->l_flags & LDLM_FL_LOCAL))
+ if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock))
continue;
if (flags & LDLM_FL_TEST_LOCK) {
@@ -1142,7 +1149,7 @@ EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
*/
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
{
- lock->l_flags |= LDLM_FL_LVB_READY;
+ ldlm_set_lvb_ready(lock);
wake_up_all(&lock->l_waitq);
}
EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
@@ -1243,8 +1250,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
if (lock) {
ldlm_lock2handle(lock, lockh);
- if ((flags & LDLM_FL_LVB_READY) &&
- (!(lock->l_flags & LDLM_FL_LVB_READY))) {
+ if ((flags & LDLM_FL_LVB_READY) && !ldlm_is_lvb_ready(lock)) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
@@ -1271,7 +1277,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
l_wait_event(lock->l_waitq,
lock->l_flags & wait_flags,
&lwi);
- if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
+ if (!ldlm_is_lvb_ready(lock)) {
if (flags & LDLM_FL_TEST_LOCK)
LDLM_LOCK_RELEASE(lock);
else
@@ -1325,10 +1331,10 @@ enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
lock = ldlm_handle2lock(lockh);
if (lock) {
lock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_GONE_MASK)
+ if (LDLM_HAVE_MASK(lock, GONE))
goto out;
- if (lock->l_flags & LDLM_FL_CBPENDING &&
+ if (ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
goto out;
@@ -1542,7 +1548,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
/* Some flags from the enqueue want to make it into the AST, via the
* lock's l_flags.
*/
- lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+ if (*flags & LDLM_FL_AST_DISCARD_DATA)
+ ldlm_set_ast_discard_data(lock);
/*
* This distinction between local lock trees is very important; a client
@@ -1581,7 +1588,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
lock_res_and_lock(lock);
list_del_init(&lock->l_bl_ast);
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ LASSERT(ldlm_is_ast_sent(lock));
LASSERT(lock->l_bl_ast_run == 0);
LASSERT(lock->l_blocking_lock);
lock->l_bl_ast_run++;
@@ -1628,12 +1635,12 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
list_del_init(&lock->l_cp_ast);
- LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
+ LASSERT(ldlm_is_cp_reqd(lock));
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225
*/
completion_callback = lock->l_completion_ast;
- lock->l_flags &= ~LDLM_FL_CP_REQD;
+ ldlm_clear_cp_reqd(lock);
unlock_res_and_lock(lock);
if (completion_callback)
@@ -1778,8 +1785,8 @@ out:
void ldlm_cancel_callback(struct ldlm_lock *lock)
{
check_res_locked(lock->l_resource);
- if (!(lock->l_flags & LDLM_FL_CANCEL)) {
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (!ldlm_is_cancel(lock)) {
+ ldlm_set_cancel(lock);
if (lock->l_blocking_ast) {
unlock_res_and_lock(lock);
lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
@@ -1789,7 +1796,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "no blocking ast");
}
}
- lock->l_flags |= LDLM_FL_BL_DONE;
+ ldlm_set_bl_done(lock);
}
/**
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index ebe9042ad..ab739f079 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -124,10 +124,10 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
LDLM_DEBUG(lock, "client blocking AST callback handler");
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (ldlm_is_cancel_on_block(lock))
+ ldlm_set_cancel(lock);
do_ast = !lock->l_readers && !lock->l_writers;
unlock_res_and_lock(lock);
@@ -172,7 +172,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
if (lock->l_granted_mode == lock->l_req_mode ||
- lock->l_flags & LDLM_FL_DESTROYED)
+ ldlm_is_destroyed(lock))
break;
}
}
@@ -215,7 +215,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
}
lock_res_and_lock(lock);
- if ((lock->l_flags & LDLM_FL_DESTROYED) ||
+ if (ldlm_is_destroyed(lock) ||
lock->l_granted_mode == lock->l_req_mode) {
/* bug 11300: the lock has already been granted */
unlock_res_and_lock(lock);
@@ -291,7 +291,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
out:
if (rc < 0) {
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_FAILED;
+ ldlm_set_failed(lock);
unlock_res_and_lock(lock);
wake_up(&lock->l_waitq);
}
@@ -360,8 +360,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
spin_lock(&blp->blp_lock);
- if (blwi->blwi_lock &&
- blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+ if (blwi->blwi_lock && ldlm_is_discard_data(blwi->blwi_lock)) {
/* add LDLM_FL_DISCARD_DATA requests to the priority list */
list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
} else {
@@ -626,23 +625,22 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
return 0;
}
- if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
+ if (ldlm_is_fail_loc(lock) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
lock_res_and_lock(lock);
lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
- LDLM_AST_FLAGS);
+ LDLM_FL_AST_MASK);
if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
/* If somebody cancels lock and cache is already dropped,
* or lock is failed before cp_ast received on client,
* we can tell the server we have no lock. Otherwise, we
* should send cancel after dropping the cache.
*/
- if (((lock->l_flags & LDLM_FL_CANCELING) &&
- (lock->l_flags & LDLM_FL_BL_DONE)) ||
- (lock->l_flags & LDLM_FL_FAILED)) {
+ if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
+ ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
unlock_res_and_lock(lock);
@@ -656,7 +654,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
* Let ldlm_cancel_lru() be fast.
*/
ldlm_lock_remove_from_lru(lock);
- lock->l_flags |= LDLM_FL_BL_AST;
+ ldlm_set_bl_ast(lock);
}
unlock_res_and_lock(lock);
@@ -674,7 +672,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
case LDLM_BL_CALLBACK:
CDEBUG(D_INODE, "blocking ast\n");
req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
- if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
+ if (!ldlm_is_cancel_on_block(lock)) {
rc = ldlm_callback_reply(req, 0);
if (req->rq_no_reply || rc)
ldlm_callback_errmsg(req, "Normal process", rc,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 74e193e52..107314e28 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -153,7 +153,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
long delay;
int result;
- if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
+ if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
} else {
@@ -252,7 +252,7 @@ noreproc:
lwd.lwd_lock = lock;
- if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
+ if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
@@ -269,7 +269,7 @@ noreproc:
if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
- lock->l_flags |= LDLM_FL_FAIL_LOC;
+ ldlm_set_fail_loc(lock);
rc = -EINTR;
} else {
/* Go to sleep until the lock is granted or cancelled. */
@@ -296,7 +296,7 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* Check that lock is not granted or failed, we might race. */
if ((lock->l_req_mode != lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
+ !ldlm_is_failed(lock)) {
/* Make sure that this lock will not be found by raced
* bl_ast and -EINVAL reply is sent to server anyways.
* bug 17645
@@ -347,7 +347,6 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
struct ldlm_lock *lock;
struct ldlm_reply *reply;
int cleanup_phase = 1;
- int size = 0;
lock = ldlm_handle2lock(lockh);
/* ldlm_cli_enqueue is holding a reference on this lock. */
@@ -375,8 +374,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
goto cleanup;
}
- if (lvb_len != 0) {
- LASSERT(lvb);
+ if (lvb_len > 0) {
+ int size = 0;
size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
RCL_SERVER);
@@ -390,12 +389,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = -EINVAL;
goto cleanup;
}
+ lvb_len = size;
}
if (rc == ELDLM_LOCK_ABORTED) {
- if (lvb_len != 0)
+ if (lvb_len > 0 && lvb)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lvb, size);
+ lvb, lvb_len);
if (rc == 0)
rc = ELDLM_LOCK_ABORTED;
goto cleanup;
@@ -421,7 +421,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
*flags = ldlm_flags_from_wire(reply->lock_flags);
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_INHERIT_FLAGS);
+ LDLM_FL_INHERIT_MASK);
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
* to wait with no timeout as well
*/
@@ -489,7 +489,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
/* If the lock has already been granted by a completion AST, don't
* clobber the LVB with an older one.
*/
- if (lvb_len != 0) {
+ if (lvb_len > 0) {
/* We must lock or a racing completion might update lvb without
* letting us know and we'll clobber the correct value.
* Cannot unlock after the check either, as that still leaves
@@ -498,7 +498,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
lock_res_and_lock(lock);
if (lock->l_req_mode != lock->l_granted_mode)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lock->l_lvb_data, size);
+ lock->l_lvb_data, lvb_len);
unlock_res_and_lock(lock);
if (rc < 0) {
cleanup_phase = 1;
@@ -518,7 +518,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
}
}
- if (lvb_len && lvb) {
+ if (lvb_len > 0 && lvb) {
/* Copy the LVB here, and not earlier, because the completion
* AST (if any) can override what we got in the reply
*/
@@ -601,7 +601,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+ LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
to_free = !ns_connect_lru_resize(ns) &&
opc == LDLM_ENQUEUE ? 1 : 0;
@@ -821,12 +821,11 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "client-side cancel");
/* Set this flag to prevent others from getting new references*/
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
(LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
ldlm_cancel_callback(lock);
- rc = (lock->l_flags & LDLM_FL_BL_AST) ?
- LDLM_FL_BL_AST : LDLM_FL_CANCELING;
+ rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
unlock_res_and_lock(lock);
if (local_only) {
@@ -1131,31 +1130,30 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
* dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
* readahead requests, ...)
*/
-static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res
+ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ int unused, int added, int count)
{
- ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
- ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
-
- lock_res_and_lock(lock);
+ enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
/* don't check added & count since we want to process all locks
- * from unused list
+ * from unused list.
+ * It's fine to not take lock to access lock->l_resource since
+ * the lock has already been granted so it won't change.
*/
switch (lock->l_resource->lr_type) {
case LDLM_EXTENT:
case LDLM_IBITS:
- if (cb && cb(lock))
+ if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
break;
default:
result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
+ lock_res_and_lock(lock);
+ ldlm_set_skipped(lock);
+ unlock_res_and_lock(lock);
break;
}
- unlock_res_and_lock(lock);
return result;
}
@@ -1168,10 +1166,10 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
unsigned long cur = cfs_time_current();
struct ldlm_pool *pl = &ns->ns_pool;
@@ -1196,8 +1194,13 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
/* Stop when SLV is not yet come from server or lv is smaller than
* it is.
*/
- return (slv == 0 || lv < slv) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ if (slv == 0 || lv < slv)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ return LDLM_POLICY_CANCEL_LOCK;
}
/**
@@ -1209,10 +1212,10 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
/* Stop LRU processing when we reach past @count or have checked all
* locks in LRU.
@@ -1230,16 +1233,35 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
- /* Stop LRU processing if young lock is found and we reach past count */
- return ((added >= count) &&
- time_before(cfs_time_current(),
- cfs_time_add(lock->l_last_used, ns->ns_max_age))) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ if ((added >= count) &&
+ time_before(cfs_time_current(),
+ cfs_time_add(lock->l_last_used, ns->ns_max_age)))
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ return LDLM_POLICY_CANCEL_LOCK;
+}
+
+static enum ldlm_policy_res
+ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ enum ldlm_policy_res result;
+
+ result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
+ if (result == LDLM_POLICY_KEEP_LOCK)
+ return result;
+
+ return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
}
/**
@@ -1251,10 +1273,9 @@ static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res
+ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ int unused, int added, int count)
{
/* Stop LRU processing when we reach past count or have checked all
* locks in LRU.
@@ -1263,7 +1284,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
+typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
+ struct ldlm_namespace *,
struct ldlm_lock *, int,
int, int);
@@ -1281,6 +1303,8 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
return ldlm_cancel_lrur_policy;
else if (flags & LDLM_CANCEL_PASSED)
return ldlm_cancel_passed_policy;
+ else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
+ return ldlm_cancel_lrur_no_wait_policy;
} else {
if (flags & LDLM_CANCEL_AGED)
return ldlm_cancel_aged_policy;
@@ -1329,6 +1353,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
+ int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
@@ -1341,7 +1366,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
LASSERT(pf);
while (!list_empty(&ns->ns_unused_list)) {
- ldlm_policy_res_t result;
+ enum ldlm_policy_res result;
+ time_t last_use = 0;
/* all unused locks */
if (remained-- <= 0)
@@ -1354,17 +1380,20 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
/* No locks which got blocking requests. */
- LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+ LASSERT(!ldlm_is_bl_ast(lock));
- if (flags & LDLM_CANCEL_NO_WAIT &&
- lock->l_flags & LDLM_FL_SKIPPED)
+ if (no_wait && ldlm_is_skipped(lock))
/* already processed */
continue;
+ last_use = lock->l_last_used;
+ if (last_use == cfs_time_current())
+ continue;
+
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again.
*/
- if (!(lock->l_flags & LDLM_FL_CANCELING))
+ if (!ldlm_is_canceling(lock))
break;
ldlm_lock_remove_from_lru_nolock(lock);
@@ -1407,12 +1436,14 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
+ if (ldlm_is_canceling(lock) ||
+ (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
- * by itself, or the lock is no longer unused.
+ * by itself, or the lock is no longer unused or
+ * the lock has been used since the pf() call and
+ * pages could be put under it.
*/
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference,
@@ -1429,7 +1460,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* where while we are doing cancel here, server is also
* silently cancelling this lock.
*/
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+ ldlm_clear_cancel_on_block(lock);
/* Setting the CBPENDING flag is a little misleading,
* but prevents an important race; namely, once
@@ -1526,8 +1557,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
/* If somebody is already doing CANCEL, or blocking AST came,
* skip this lock.
*/
- if (lock->l_flags & LDLM_FL_BL_AST ||
- lock->l_flags & LDLM_FL_CANCELING)
+ if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
continue;
if (lockmode_compat(lock->l_granted_mode, mode))
@@ -1771,7 +1801,6 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_iter_helper, &helper);
-
}
/* non-blocking function to manipulate a lock whose cb_data is being put away.
@@ -1887,7 +1916,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
int flags;
/* Bug 11974: Do not replay a lock which is actively being canceled */
- if (lock->l_flags & LDLM_FL_CANCELING) {
+ if (ldlm_is_canceling(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
return 0;
}
@@ -1896,7 +1925,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
* server might have long dropped it, but notification of that event was
* lost by network. (and server granted conflicting lock already)
*/
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ if (ldlm_is_cancel_on_block(lock)) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
return 0;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 9dede87ad..e99c89c34 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -124,9 +124,15 @@ int ldlm_debugfs_setup(void)
}
rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
+ if (rc) {
+ CERROR("LProcFS failed in ldlm-init\n");
+ goto err_svc;
+ }
return 0;
+err_svc:
+ ldebugfs_remove(&ldlm_svc_debugfs_dir);
err_ns:
ldebugfs_remove(&ldlm_ns_debugfs_dir);
err_type:
@@ -758,12 +764,12 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
list_for_each(tmp, q) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
- if (lock->l_flags & LDLM_FL_CLEANED) {
+ if (ldlm_is_cleaned(lock)) {
lock = NULL;
continue;
}
LDLM_LOCK_GET(lock);
- lock->l_flags |= LDLM_FL_CLEANED;
+ ldlm_set_cleaned(lock);
break;
}
@@ -775,13 +781,13 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
/* Set CBPENDING so nothing in the cancellation path
* can match this lock.
*/
- lock->l_flags |= LDLM_FL_CBPENDING;
- lock->l_flags |= LDLM_FL_FAILED;
+ ldlm_set_cbpending(lock);
+ ldlm_set_failed(lock);
lock->l_flags |= flags;
/* ... without sending a CANCEL message for local_only. */
if (local_only)
- lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+ ldlm_set_local_only(lock);
if (local_only && (lock->l_readers || lock->l_writers)) {
/* This is a little bit gross, but much better than the
@@ -1275,7 +1281,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
LDLM_DEBUG(lock, "About to add this lock:\n");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1400,3 +1406,4 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
LDLM_DEBUG_LIMIT(level, lock, "###");
}
}
+EXPORT_SYMBOL(ldlm_resource_dump);
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 9ac29e718..2ce10ff01 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -4,7 +4,8 @@ lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
rw.o namei.o symlink.o llite_mmap.o \
xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \
rw26.o super25.o statahead.o \
- ../lclient/glimpse.o ../lclient/lcommon_cl.o ../lclient/lcommon_misc.o \
- vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o lproc_llite.o
+ glimpse.o lcommon_cl.o lcommon_misc.o \
+ vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
+ lproc_llite.o
llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index dd1c82701..1b6f82a1a 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -108,11 +108,8 @@ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
static inline int return_if_equal(struct ldlm_lock *lock, void *data)
{
- if ((lock->l_flags &
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
- return LDLM_ITER_CONTINUE;
- return LDLM_ITER_STOP;
+ return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ?
+ LDLM_ITER_CONTINUE : LDLM_ITER_STOP;
}
/* find any ldlm lock of the inode in mdc and lov
@@ -253,8 +250,8 @@ void ll_invalidate_aliases(struct inode *inode)
{
struct dentry *dentry;
- CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_lock_dcache(inode);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
@@ -289,8 +286,8 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
if (it->d.lustre.it_lock_mode && inode) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
- inode, inode->i_ino, inode->i_generation);
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index e4c82883e..4b00d1ac8 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -158,11 +158,16 @@ static int ll_dir_filler(void *_hash, struct page *page0)
int i;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n",
- inode->i_ino, inode->i_generation, inode, hash);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) hash %llu\n",
+ PFID(ll_inode2fid(inode)), inode, hash);
LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
if (page_pool) {
page_pool[0] = page0;
@@ -177,8 +182,6 @@ static int ll_dir_filler(void *_hash, struct page *page0)
page_pool[npages] = page;
}
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
op_data->op_npages = npages;
op_data->op_offset = hash;
rc = md_readpage(exp, op_data, page_pool, &request);
@@ -190,7 +193,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
/* Checked by mdc_readpage() */
if (body->valid & OBD_MD_FLSIZE)
- cl_isize_write(inode, body->size);
+ i_size_write(inode, body->size);
nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
>> PAGE_SHIFT;
@@ -372,8 +375,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
return ERR_PTR(rc);
}
- CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n",
- dir, dir->i_ino, dir->i_generation);
+ CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(dir)), dir);
md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
&it.d.lustre.it_lock_handle, dir, NULL);
} else {
@@ -468,6 +471,28 @@ fail:
goto out_unlock;
}
+/**
+ * return IF_* type for given lu_dirent entry.
+ * IF_* flag shld be converted to particular OS file type in
+ * platform llite module.
+ */
+static __u16 ll_dirent_type_get(struct lu_dirent *ent)
+{
+ __u16 type = 0;
+ struct luda_type *lt;
+ int len = 0;
+
+ if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
+ const unsigned int align = sizeof(struct luda_type) - 1;
+
+ len = le16_to_cpu(ent->lde_namelen);
+ len = (len + align) & ~align;
+ lt = (void *)ent->lde_name + len;
+ type = IFTODT(le16_to_cpu(lt->lt_type));
+ }
+ return type;
+}
+
int ll_dir_read(struct inode *inode, struct dir_context *ctx)
{
struct ll_inode_info *info = ll_i2info(inode);
@@ -589,15 +614,16 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
struct inode *inode = file_inode(filp);
struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
struct ll_sb_info *sbi = ll_i2sbi(inode);
+ __u64 pos = lfd ? lfd->lfd_pos : 0;
int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
int api32 = ll_need_32bit_api(sbi);
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
- inode->i_ino, inode->i_generation,
- inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) pos %lu/%llu 32bit_api %d\n",
+ PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
+ i_size_read(inode), api32);
- if (lfd->lfd_pos == MDS_DIR_END_OFF) {
+ if (pos == MDS_DIR_END_OFF) {
/*
* end-of-file.
*/
@@ -605,9 +631,10 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
goto out;
}
- ctx->pos = lfd->lfd_pos;
+ ctx->pos = pos;
rc = ll_dir_read(inode, ctx);
- lfd->lfd_pos = ctx->pos;
+ if (lfd)
+ lfd->lfd_pos = ctx->pos;
if (ctx->pos == MDS_DIR_END_OFF) {
if (api32)
ctx->pos = LL_DIR_END_OFF_32BIT;
@@ -804,9 +831,8 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
- inode->i_ino,
- inode->i_generation, rc);
+ CDEBUG(D_INFO, "md_getattr failed on inode "DFID": rc %d\n",
+ PFID(ll_inode2fid(inode)), rc);
goto out;
}
@@ -916,7 +942,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
}
/* Read current file data version */
- rc = ll_data_version(inode, &data_version, 1);
+ rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc != 0) {
CDEBUG(D_HSM, "Could not read file data version of "
@@ -936,6 +962,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
}
progress:
+ /* On error, the request should be considered as completed */
+ if (hpk.hpk_errval > 0)
+ hpk.hpk_flags |= HP_FLAG_COMPLETED;
rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
&hpk, NULL);
@@ -997,8 +1026,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
goto progress;
}
- rc = ll_data_version(inode, &data_version,
- copy->hc_hai.hai_action == HSMA_ARCHIVE);
+ rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc) {
CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
@@ -1033,7 +1061,6 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
/* hpk_errval must be >= 0 */
hpk.hpk_errval = EBUSY;
}
-
}
progress:
@@ -1242,8 +1269,8 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct obd_ioctl_data *data;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
- inode->i_ino, inode->i_generation, inode, cmd);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%#x\n",
+ PFID(ll_inode2fid(inode)), inode, cmd);
/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
@@ -1362,7 +1389,6 @@ out_free:
lmv_out_free:
obd_ioctl_freedata(buf, len);
return rc;
-
}
case LL_IOC_LOV_SETSTRIPE: {
struct lov_user_md_v3 lumv3;
@@ -1474,8 +1500,9 @@ free_lmv:
cmd == LL_IOC_MDC_GETINFO)) {
rc = 0;
goto skip_lmm;
- } else
+ } else {
goto out_req;
+ }
}
if (cmd == IOC_MDC_GETFILESTRIPE ||
@@ -1688,15 +1715,16 @@ out_quotactl:
return ll_flush_ctx(inode);
#ifdef CONFIG_FS_POSIX_ACL
case LL_IOC_RMTACL: {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- rc = rct_add(&sbi->ll_rct, current_pid(), arg);
- if (!rc)
- fd->fd_flags |= LL_FILE_RMTACL;
- return rc;
- } else
- return 0;
+ rc = rct_add(&sbi->ll_rct, current_pid(), arg);
+ if (!rc)
+ fd->fd_flags |= LL_FILE_RMTACL;
+ return rc;
+ } else {
+ return 0;
+ }
}
#endif
case LL_IOC_GETOBDCOUNT: {
@@ -1817,6 +1845,9 @@ out_quotactl:
return rc;
}
case LL_IOC_HSM_CT_START:
+ if (!capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+
rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
sizeof(struct lustre_kernelcomm));
return rc;
@@ -1865,7 +1896,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
int api32 = ll_need_32bit_api(sbi);
loff_t ret = -EINVAL;
- inode_lock(inode);
switch (origin) {
case SEEK_SET:
break;
@@ -1903,7 +1933,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
goto out;
out:
- inode_unlock(inode);
return ret;
}
@@ -1922,7 +1951,7 @@ const struct file_operations ll_dir_operations = {
.open = ll_dir_open,
.release = ll_dir_release,
.read = generic_read_dir,
- .iterate = ll_readdir,
+ .iterate_shared = ll_readdir,
.unlocked_ioctl = ll_dir_ioctl,
.fsync = ll_fsync,
};
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index cf619af3c..f47f2acaf 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -45,6 +45,7 @@
#include "../include/lustre_lite.h"
#include <linux/pagemap.h>
#include <linux/file.h>
+#include <linux/mount.h>
#include "llite_internal.h"
#include "../include/lustre/ll_fiemap.h"
@@ -87,8 +88,7 @@ void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
op_data->op_attr_blocks = inode->i_blocks;
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
- ll_inode_to_ext_flags(inode->i_flags);
+ op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
if (fh)
op_data->op_handle = *fh;
@@ -170,13 +170,15 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
*/
rc = ll_som_update(inode, op_data);
if (rc) {
- CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n",
- inode->i_ino, rc);
+ CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n",
+ ll_i2mdexp(inode)->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
rc = 0;
}
} else if (rc) {
- CERROR("inode %lu mdc close failed: rc = %d\n",
- inode->i_ino, rc);
+ CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
+ ll_i2mdexp(inode)->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
}
/* DATA_MODIFIED flag was successfully sent on close, cancel data
@@ -278,7 +280,7 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
/* clear group lock, if present */
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
- ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
+ ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
if (fd->fd_lease_och) {
bool lease_broken;
@@ -343,8 +345,8 @@ int ll_file_release(struct inode *inode, struct file *file)
struct ll_inode_info *lli = ll_i2info(inode);
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
#ifdef CONFIG_FS_POSIX_ACL
if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
@@ -543,8 +545,8 @@ int ll_file_open(struct inode *inode, struct file *file)
struct ll_file_data *fd;
int rc = 0, opendir_set = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
- inode->i_generation, inode, file->f_flags);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
+ PFID(ll_inode2fid(inode)), inode, file->f_flags);
it = file->private_data; /* XXX: compat macro */
file->private_data = NULL; /* prevent ll_local_open assertion */
@@ -677,7 +679,9 @@ restart:
if (rc)
goto out_och_free;
- LASSERT(it_disposition(it, DISP_ENQ_OPEN_REF));
+ LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
+ "inode %p: disposition %x, status %d\n", inode,
+ it_disposition(it, ~0), it->d.lustre.it_status);
rc = ll_local_open(file, it, fd, *och_p);
if (rc)
@@ -875,16 +879,19 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
return och;
out_close:
- rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
- if (rc2)
- CERROR("Close openhandle returned %d\n", rc2);
-
- /* cancel open lock */
+ /* Cancel open lock */
if (it.d.lustre.it_lock_mode != 0) {
ldlm_lock_decref_and_cancel(&och->och_lease_handle,
it.d.lustre.it_lock_mode);
it.d.lustre.it_lock_mode = 0;
+ och->och_lease_handle.cookie = 0ULL;
}
+ rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
+ if (rc2 < 0)
+ CERROR("%s: error closing file "DFID": %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&ll_i2info(inode)->lli_fid), rc2);
+ och = NULL; /* och has been freed in ll_close_inode_openhandle() */
out_release_it:
ll_intent_release(&it);
out:
@@ -908,7 +915,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
lock_res_and_lock(lock);
cancelled = ldlm_is_cancel(lock);
unlock_res_and_lock(lock);
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
CDEBUG(D_INODE, "lease for " DFID " broken? %d\n",
@@ -926,7 +933,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
/* Fills the obdo with the attributes for the lsm */
static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
- struct obdo *obdo, __u64 ioepoch, int sync)
+ struct obdo *obdo, __u64 ioepoch, int dv_flags)
{
struct ptlrpc_request_set *set;
struct obd_info oinfo = { };
@@ -945,9 +952,11 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
OBD_MD_FLMTIME | OBD_MD_FLCTIME |
OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
OBD_MD_FLDATAVERSION;
- if (sync) {
+ if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
+ if (dv_flags & LL_DV_WR_FLUSH)
+ oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
}
set = ptlrpc_prep_set();
@@ -960,11 +969,16 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
}
- if (rc == 0)
+ if (rc == 0) {
oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
OBD_MD_FLATIME | OBD_MD_FLMTIME |
OBD_MD_FLCTIME | OBD_MD_FLSIZE |
- OBD_MD_FLDATAVERSION);
+ OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
+ if (dv_flags & LL_DV_WR_FLUSH &&
+ !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
+ oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
+ return -ENOTSUPP;
+ }
return rc;
}
@@ -980,7 +994,7 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
lsm = ccc_inode_lsm_get(inode);
rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
- obdo, ioepoch, sync);
+ obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
if (rc == 0) {
struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
@@ -994,50 +1008,57 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
return rc;
}
-int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
+int ll_merge_attr(const struct lu_env *env, struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
- struct cl_attr *attr = ccc_env_thread_attr(env);
- struct ost_lvb lvb;
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ s64 atime;
+ s64 mtime;
+ s64 ctime;
int rc = 0;
ll_inode_size_lock(inode);
+
/* merge timestamps the most recently obtained from mds with
* timestamps obtained from osts
*/
- LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime;
+ LTIME_S(inode->i_atime) = lli->lli_atime;
+ LTIME_S(inode->i_mtime) = lli->lli_mtime;
+ LTIME_S(inode->i_ctime) = lli->lli_ctime;
- lvb.lvb_size = i_size_read(inode);
- lvb.lvb_blocks = inode->i_blocks;
- lvb.lvb_mtime = LTIME_S(inode->i_mtime);
- lvb.lvb_atime = LTIME_S(inode->i_atime);
- lvb.lvb_ctime = LTIME_S(inode->i_ctime);
+ mtime = LTIME_S(inode->i_mtime);
+ atime = LTIME_S(inode->i_atime);
+ ctime = LTIME_S(inode->i_ctime);
cl_object_attr_lock(obj);
rc = cl_object_attr_get(env, obj, attr);
cl_object_attr_unlock(obj);
- if (rc == 0) {
- if (lvb.lvb_atime < attr->cat_atime)
- lvb.lvb_atime = attr->cat_atime;
- if (lvb.lvb_ctime < attr->cat_ctime)
- lvb.lvb_ctime = attr->cat_ctime;
- if (lvb.lvb_mtime < attr->cat_mtime)
- lvb.lvb_mtime = attr->cat_mtime;
+ if (rc != 0)
+ goto out_size_unlock;
- CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
- PFID(&lli->lli_fid), attr->cat_size);
- cl_isize_write_nolock(inode, attr->cat_size);
+ if (atime < attr->cat_atime)
+ atime = attr->cat_atime;
- inode->i_blocks = attr->cat_blocks;
+ if (ctime < attr->cat_ctime)
+ ctime = attr->cat_ctime;
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
- }
+ if (mtime < attr->cat_mtime)
+ mtime = attr->cat_mtime;
+
+ CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
+ PFID(&lli->lli_fid), attr->cat_size);
+
+ i_size_write(inode, attr->cat_size);
+
+ inode->i_blocks = attr->cat_blocks;
+
+ LTIME_S(inode->i_mtime) = mtime;
+ LTIME_S(inode->i_atime) = atime;
+ LTIME_S(inode->i_ctime) = ctime;
+
+out_size_unlock:
ll_inode_size_unlock(inode);
return rc;
@@ -1120,47 +1141,48 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
struct cl_io *io;
ssize_t result;
+ CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: %llu, count: %zd\n",
+ file->f_path.dentry->d_name.name, iot, *ppos, count);
+
restart:
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
ll_io_init(io, file, iot == CIT_WRITE);
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
int write_mutex_locked = 0;
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- vio->cui_io_subtype = args->via_io_subtype;
+ vio->vui_fd = LUSTRE_FPRIVATE(file);
+ vio->vui_io_subtype = args->via_io_subtype;
- switch (vio->cui_io_subtype) {
+ switch (vio->vui_io_subtype) {
case IO_NORMAL:
- cio->cui_iter = args->u.normal.via_iter;
- cio->cui_iocb = args->u.normal.via_iocb;
+ vio->vui_iter = args->u.normal.via_iter;
+ vio->vui_iocb = args->u.normal.via_iocb;
if ((iot == CIT_WRITE) &&
- !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
if (mutex_lock_interruptible(&lli->
lli_write_mutex)) {
result = -ERESTARTSYS;
goto out;
}
write_mutex_locked = 1;
- } else if (iot == CIT_READ) {
- down_read(&lli->lli_trunc_sem);
}
+ down_read(&lli->lli_trunc_sem);
break;
case IO_SPLICE:
- vio->u.splice.cui_pipe = args->u.splice.via_pipe;
- vio->u.splice.cui_flags = args->u.splice.via_flags;
+ vio->u.splice.vui_pipe = args->u.splice.via_pipe;
+ vio->u.splice.vui_flags = args->u.splice.via_flags;
break;
default:
- CERROR("Unknown IO type - %u\n", vio->cui_io_subtype);
+ CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
LBUG();
}
result = cl_io_loop(env, io);
+ if (args->via_io_subtype == IO_NORMAL)
+ up_read(&lli->lli_trunc_sem);
if (write_mutex_locked)
mutex_unlock(&lli->lli_write_mutex);
- else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
- up_read(&lli->lli_trunc_sem);
} else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
@@ -1197,6 +1219,7 @@ out:
fd->fd_write_failed = true;
}
}
+ CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
return result;
}
@@ -1212,7 +1235,7 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_NORMAL);
+ args = ll_env_args(env, IO_NORMAL);
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
@@ -1236,7 +1259,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_NORMAL);
+ args = ll_env_args(env, IO_NORMAL);
args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
@@ -1262,7 +1285,7 @@ static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_SPLICE);
+ args = ll_env_args(env, IO_SPLICE);
args->u.splice.via_pipe = pipe;
args->u.splice.via_flags = flags;
@@ -1354,7 +1377,8 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
}
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- int flags, struct lov_user_md *lum, int lum_size)
+ __u64 flags, struct lov_user_md *lum,
+ int lum_size)
{
struct lov_stripe_md *lsm = NULL;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
@@ -1363,8 +1387,8 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
lsm = ccc_inode_lsm_get(inode);
if (lsm) {
ccc_inode_lsm_put(inode, lsm);
- CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
- inode->i_ino);
+ CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
rc = -EEXIST;
goto out;
}
@@ -1478,7 +1502,7 @@ out:
static int ll_lov_setea(struct inode *inode, struct file *file,
unsigned long arg)
{
- int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
+ __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
struct lov_user_md *lump;
int lum_size = sizeof(struct lov_user_md) +
sizeof(struct lov_user_ost_data);
@@ -1512,7 +1536,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
int lum_size, rc;
- int flags = FMODE_WRITE;
+ __u64 flags = FMODE_WRITE;
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
@@ -1561,7 +1585,7 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
+ struct ll_grouplock grouplock;
int rc;
if (arg == 0) {
@@ -1575,14 +1599,14 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
CWARN("group lock already existed with gid %lu\n",
- fd->fd_grouplock.cg_gid);
+ fd->fd_grouplock.lg_gid);
spin_unlock(&lli->lli_lock);
return -EINVAL;
}
- LASSERT(!fd->fd_grouplock.cg_lock);
+ LASSERT(!fd->fd_grouplock.lg_lock);
spin_unlock(&lli->lli_lock);
- rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+ rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
return rc;
@@ -1608,7 +1632,7 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
+ struct ll_grouplock grouplock;
spin_lock(&lli->lli_lock);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
@@ -1616,11 +1640,11 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
CWARN("no group lock held\n");
return -EINVAL;
}
- LASSERT(fd->fd_grouplock.cg_lock);
+ LASSERT(fd->fd_grouplock.lg_lock);
- if (fd->fd_grouplock.cg_gid != arg) {
+ if (fd->fd_grouplock.lg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
- arg, fd->fd_grouplock.cg_gid);
+ arg, fd->fd_grouplock.lg_gid);
spin_unlock(&lli->lli_lock);
return -EINVAL;
}
@@ -1861,11 +1885,12 @@ error:
* This value is computed using stripe object version on OST.
* Version is computed using server side locking.
*
- * @param extent_lock Take extent lock. Not needed if a process is already
- * holding the OST object group locks.
+ * @param sync if do sync on the OST side;
+ * 0: no sync
+ * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
+ * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
*/
-int ll_data_version(struct inode *inode, __u64 *data_version,
- int extent_lock)
+int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
{
struct lov_stripe_md *lsm = NULL;
struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -1887,7 +1912,7 @@ int ll_data_version(struct inode *inode, __u64 *data_version,
goto out;
}
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, extent_lock);
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags);
if (rc == 0) {
if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
rc = -EOPNOTSUPP;
@@ -1923,7 +1948,7 @@ int ll_hsm_release(struct inode *inode)
}
/* Grab latest data_version and [am]time values */
- rc = ll_data_version(inode, &data_version, 1);
+ rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH);
if (rc != 0)
goto out;
@@ -1933,7 +1958,7 @@ int ll_hsm_release(struct inode *inode)
goto out;
}
- ll_merge_lvb(env, inode);
+ ll_merge_attr(env, inode);
cl_env_nested_put(&nest, env);
/* Release the file.
@@ -2227,8 +2252,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int flags, rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
- inode->i_generation, inode, cmd);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),cmd=%x\n",
+ PFID(ll_inode2fid(inode)), inode, cmd);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
@@ -2331,9 +2356,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
return -EFAULT;
- rc = ll_data_version(inode, &idv.idv_version,
- !(idv.idv_flags & LL_DV_NOFLUSH));
-
+ idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
+ rc = ll_data_version(inode, &idv.idv_version, idv.idv_flags);
if (rc == 0 && copy_to_user((char __user *)arg, &idv,
sizeof(idv)))
return -EFAULT;
@@ -2499,7 +2523,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = och->och_flags &
(FMODE_READ | FMODE_WRITE);
unlock_res_and_lock(lock);
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
}
mutex_unlock(&lli->lli_och_mutex);
@@ -2537,9 +2561,8 @@ static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
(origin == SEEK_CUR) ? file->f_pos : 0);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n",
- inode->i_ino, inode->i_generation, inode, retval, retval,
- origin);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
+ PFID(ll_inode2fid(inode)), inode, retval, retval, origin);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
@@ -2603,8 +2626,8 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
if (IS_ERR(env))
return PTR_ERR(env);
- io = ccc_env_thread_io(env);
- io->ci_obj = cl_i2info(inode)->lli_clob;
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
io->ci_ignore_layout = ignore_layout;
/* initialize parameters for sync */
@@ -2634,8 +2657,8 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct ptlrpc_request *req;
int rc, err;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
@@ -2693,8 +2716,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
int rc;
int rc2 = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
- inode->i_ino, file_lock);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
+ PFID(ll_inode2fid(inode)), file_lock);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
@@ -2777,9 +2800,9 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
- inode->i_ino, flock.l_flock.pid, flags, einfo.ei_mode,
- flock.l_flock.start, flock.l_flock.end);
+ CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
+ PFID(ll_inode2fid(inode)), flock.l_flock.pid, flags,
+ einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
op_data, &lockh, &flock, 0, NULL /* req */, flags);
@@ -2901,8 +2924,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
struct obd_export *exp;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n",
- inode->i_ino, inode->i_generation, inode, dentry);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%pd\n",
+ PFID(ll_inode2fid(inode)), inode, dentry);
exp = ll_i2mdexp(inode);
@@ -2998,9 +3021,9 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
/* if object isn't regular file, don't validate size */
if (!S_ISREG(inode->i_mode)) {
- LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
+ LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_atime;
+ LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
+ LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
} else {
/* In case of restore, the MDT has the right size and has
* already send it back without granting the layout lock,
@@ -3124,8 +3147,8 @@ int ll_inode_permission(struct inode *inode, int mask)
return rc;
}
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
- inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
+ PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
return lustre_check_remote_perm(inode, mask);
@@ -3335,10 +3358,10 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
int rc;
CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
- PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
+ PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
lock->l_lvb_data, lock->l_lvb_len);
- if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY))
+ if (lock->l_lvb_data && ldlm_is_lvb_ready(lock))
return 0;
/* if layout lock was granted right away, the layout is returned
@@ -3415,14 +3438,14 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
- LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d",
- inode, PFID(&lli->lli_fid), reconf);
+ LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d",
+ PFID(&lli->lli_fid), inode, reconf);
/* in case this is a caching lock and reinstate with new inode */
md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
lock_res_and_lock(lock);
- lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
+ lvb_ready = ldlm_is_lvb_ready(lock);
unlock_res_and_lock(lock);
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time.
@@ -3487,9 +3510,9 @@ out:
/* wait for IO to complete if it's still being used. */
if (wait_layout) {
- CDEBUG(D_INODE, "%s: %p/" DFID " wait for layout reconf.\n",
+ CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
ll_get_fsname(inode->i_sb, NULL, 0),
- inode, PFID(&lli->lli_fid));
+ PFID(&lli->lli_fid), inode);
memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_WAIT;
@@ -3498,7 +3521,8 @@ out:
if (rc == 0)
rc = -EAGAIN;
- CDEBUG(D_INODE, "file: " DFID " waiting layout return: %d.\n",
+ CDEBUG(D_INODE, "%s: file="DFID" waiting layout return: %d.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
PFID(&lli->lli_fid), rc);
}
return rc;
@@ -3571,9 +3595,9 @@ again:
it.it_op = IT_LAYOUT;
lockh.cookie = 0ULL;
- LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/" DFID "",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid));
+ LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&lli->lli_fid), inode);
rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
NULL, 0, NULL, 0);
@@ -3601,7 +3625,7 @@ again:
/**
* This function send a restore request to the MDT
*/
-int ll_layout_restore(struct inode *inode)
+int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
{
struct hsm_user_request *hur;
int len, rc;
@@ -3617,9 +3641,10 @@ int ll_layout_restore(struct inode *inode)
hur->hur_request.hr_flags = 0;
memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
sizeof(hur->hur_user_item[0].hui_fid));
- hur->hur_user_item[0].hui_extent.length = -1;
+ hur->hur_user_item[0].hui_extent.offset = offset;
+ hur->hur_user_item[0].hui_extent.length = length;
hur->hur_request.hr_itemcount = 1;
- rc = obd_iocontrol(LL_IOC_HSM_REQUEST, cl_i2sbi(inode)->ll_md_exp,
+ rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,
len, hur, NULL);
kfree(hur);
return rc;
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index c4e8a0878..d8ea75424 100644
--- a/drivers/staging/lustre/lustre/lclient/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -52,7 +52,6 @@
#include <linux/file.h>
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../llite/llite_internal.h"
static const struct cl_lock_descr whole_file = {
@@ -70,14 +69,14 @@ static const struct cl_lock_descr whole_file = {
blkcnt_t dirty_cnt(struct inode *inode)
{
blkcnt_t cnt = 0;
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
void *results[1];
if (inode->i_mapping)
cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree,
results, 0, 1,
PAGECACHE_TAG_DIRTY);
- if (cnt == 0 && atomic_read(&vob->cob_mmap_cnt) > 0)
+ if (cnt == 0 && atomic_read(&vob->vob_mmap_cnt) > 0)
cnt = 1;
return (cnt > 0) ? 1 : 0;
@@ -86,17 +85,17 @@ blkcnt_t dirty_cnt(struct inode *inode)
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
struct inode *inode, struct cl_object *clob, int agl)
{
- struct cl_lock_descr *descr = &ccc_env_info(env)->cti_descr;
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock *lock;
int result;
result = 0;
if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
- CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
+ CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
if (lli->lli_has_smd) {
+ struct cl_lock *lock = vvp_env_lock(env);
+ struct cl_lock_descr *descr = &lock->cll_descr;
+
/* NOTE: this looks like DLM lock request, but it may
* not be one. Due to CEF_ASYNC flag (translated
* to LDLM_FL_HAS_INTENT by osc), this is
@@ -113,11 +112,10 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
*/
*descr = whole_file;
descr->cld_obj = clob;
- descr->cld_mode = CLM_PHANTOM;
+ descr->cld_mode = CLM_READ;
descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
if (agl)
descr->cld_enq_flags |= CEF_AGL;
- cio->cui_glimpse = 1;
/*
* CEF_ASYNC is used because glimpse sub-locks cannot
* deadlock (because they never conflict with other
@@ -126,21 +124,13 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
* CEF_MUST protects glimpse lock from conversion into
* a lockless mode.
*/
- lock = cl_lock_request(env, io, descr, "glimpse",
- current);
- cio->cui_glimpse = 0;
-
- if (!lock)
- return 0;
-
- if (IS_ERR(lock))
- return PTR_ERR(lock);
+ result = cl_lock_request(env, io, lock);
+ if (result < 0)
+ return result;
- LASSERT(agl == 0);
- result = cl_wait(env, lock);
- if (result == 0) {
- cl_merge_lvb(env, inode);
- if (cl_isize_read(inode) > 0 &&
+ if (!agl) {
+ ll_merge_attr(env, inode);
+ if (i_size_read(inode) > 0 &&
inode->i_blocks == 0) {
/*
* LU-417: Add dirty pages block count
@@ -150,12 +140,11 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
*/
inode->i_blocks = dirty_cnt(inode);
}
- cl_unuse(env, lock);
}
- cl_lock_release(env, lock, "glimpse", current);
+ cl_lock_release(env, lock);
} else {
CDEBUG(D_DLMTRACE, "No objects for inode\n");
- cl_merge_lvb(env, inode);
+ ll_merge_attr(env, inode);
}
}
@@ -167,22 +156,24 @@ static int cl_io_get(struct inode *inode, struct lu_env **envout,
{
struct lu_env *env;
struct cl_io *io;
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *clob = lli->lli_clob;
int result;
- if (S_ISREG(cl_inode_mode(inode))) {
+ if (S_ISREG(inode->i_mode)) {
env = cl_env_get(refcheck);
if (!IS_ERR(env)) {
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = clob;
*envout = env;
*ioout = io;
result = 1;
- } else
+ } else {
result = PTR_ERR(env);
- } else
+ }
+ } else {
result = 0;
+ }
return result;
}
@@ -231,14 +222,11 @@ int cl_local_size(struct inode *inode)
{
struct lu_env *env = NULL;
struct cl_io *io = NULL;
- struct ccc_thread_info *cti;
struct cl_object *clob;
- struct cl_lock_descr *descr;
- struct cl_lock *lock;
int result;
int refcheck;
- if (!cl_i2info(inode)->lli_has_smd)
+ if (!ll_i2info(inode)->lli_has_smd)
return 0;
result = cl_io_get(inode, &env, &io, &refcheck);
@@ -247,22 +235,19 @@ int cl_local_size(struct inode *inode)
clob = io->ci_obj;
result = cl_io_init(env, io, CIT_MISC, clob);
- if (result > 0)
+ if (result > 0) {
result = io->ci_result;
- else if (result == 0) {
- cti = ccc_env_info(env);
- descr = &cti->cti_descr;
-
- *descr = whole_file;
- descr->cld_obj = clob;
- lock = cl_lock_peek(env, io, descr, "localsize", current);
- if (lock) {
- cl_merge_lvb(env, inode);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, "localsize", current);
- result = 0;
- } else
- result = -ENODATA;
+ } else if (result == 0) {
+ struct cl_lock *lock = vvp_env_lock(env);
+
+ lock->cll_descr = whole_file;
+ lock->cll_descr.cld_enq_flags = CEF_PEEK;
+ lock->cll_descr.cld_obj = clob;
+ result = cl_lock_request(env, io, lock);
+ if (result == 0) {
+ ll_merge_attr(env, inode);
+ cl_lock_release(env, lock);
+ }
}
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
new file mode 100644
index 000000000..6c00715b4
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -0,0 +1,327 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2015, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * cl code shared between vvp and liblustre (and other Lustre clients in the
+ * future).
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include "../../include/linux/libcfs/libcfs.h"
+# include <linux/fs.h>
+# include <linux/sched.h>
+# include <linux/mm.h>
+# include <linux/quotaops.h>
+# include <linux/highmem.h>
+# include <linux/pagemap.h>
+# include <linux/rbtree.h>
+
+#include "../include/obd.h"
+#include "../include/obd_support.h"
+#include "../include/lustre_fid.h"
+#include "../include/lustre_lite.h"
+#include "../include/lustre_dlm.h"
+#include "../include/lustre_ver.h"
+#include "../include/lustre_mdc.h"
+#include "../include/cl_object.h"
+
+#include "../llite/llite_internal.h"
+
+/*
+ * ccc_ prefix stands for "Common Client Code".
+ */
+
+/*****************************************************************************
+ *
+ * Vvp device and device type functions.
+ *
+ */
+
+/**
+ * An `emergency' environment used by cl_inode_fini() when cl_env_get()
+ * fails. Access to this environment is serialized by cl_inode_fini_guard
+ * mutex.
+ */
+struct lu_env *cl_inode_fini_env;
+int cl_inode_fini_refcheck;
+
+/**
+ * A mutex serializing calls to slp_inode_fini() under extreme memory
+ * pressure, when environments cannot be allocated.
+ */
+static DEFINE_MUTEX(cl_inode_fini_guard);
+
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ int result;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+
+ io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
+ io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
+ io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
+ io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+ io->u.ci_setattr.sa_valid = attr->ia_valid;
+
+again:
+ if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ if (attr->ia_valid & ATTR_FILE)
+ /* populate the file descriptor for ftruncate to honor
+ * group lock - see LU-787
+ */
+ vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file);
+
+ result = cl_io_loop(env, io);
+ } else {
+ result = io->ci_result;
+ }
+ cl_io_fini(env, io);
+ if (unlikely(io->ci_need_restart))
+ goto again;
+ /* HSM import case: file is released, cannot be restored
+ * no need to fail except if restore registration failed
+ * with -ENODATA
+ */
+ if (result == -ENODATA && io->ci_restore_needed &&
+ io->ci_result != -ENODATA)
+ result = 0;
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+/**
+ * Initialize or update CLIO structures for regular files when new
+ * meta-data arrives from the server.
+ *
+ * \param inode regular file inode
+ * \param md new file metadata from MDS
+ * - allocates cl_object if necessary,
+ * - updated layout, if object was already here.
+ */
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
+{
+ struct lu_env *env;
+ struct ll_inode_info *lli;
+ struct cl_object *clob;
+ struct lu_site *site;
+ struct lu_fid *fid;
+ struct cl_object_conf conf = {
+ .coc_inode = inode,
+ .u = {
+ .coc_md = md
+ }
+ };
+ int result = 0;
+ int refcheck;
+
+ LASSERT(md->body->valid & OBD_MD_FLID);
+ LASSERT(S_ISREG(inode->i_mode));
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ site = ll_i2sbi(inode)->ll_site;
+ lli = ll_i2info(inode);
+ fid = &lli->lli_fid;
+ LASSERT(fid_is_sane(fid));
+
+ if (!lli->lli_clob) {
+ /* clob is slave of inode, empty lli_clob means for new inode,
+ * there is no clob in cache with the given fid, so it is
+ * unnecessary to perform lookup-alloc-lookup-insert, just
+ * alloc and insert directly.
+ */
+ LASSERT(inode->i_state & I_NEW);
+ conf.coc_lu.loc_flags = LOC_F_NEW;
+ clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
+ fid, &conf);
+ if (!IS_ERR(clob)) {
+ /*
+ * No locking is necessary, as new inode is
+ * locked by I_NEW bit.
+ */
+ lli->lli_clob = clob;
+ lli->lli_has_smd = lsm_has_objects(md->lsm);
+ lu_object_ref_add(&clob->co_lu, "inode", inode);
+ } else {
+ result = PTR_ERR(clob);
+ }
+ } else {
+ result = cl_conf_set(env, lli->lli_clob, &conf);
+ }
+
+ cl_env_put(env, &refcheck);
+
+ if (result != 0)
+ CERROR("Failure to initialize cl object " DFID ": %d\n",
+ PFID(fid), result);
+ return result;
+}
+
+/**
+ * Wait for others drop their references of the object at first, then we drop
+ * the last one, which will lead to the object be destroyed immediately.
+ * Must be called after cl_object_kill() against this object.
+ *
+ * The reason we want to do this is: destroying top object will wait for sub
+ * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
+ * to initiate top object destroying which may deadlock. See bz22520.
+ */
+static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *header = obj->co_lu.lo_header;
+ wait_queue_t waiter;
+
+ if (unlikely(atomic_read(&header->loh_ref) != 1)) {
+ struct lu_site *site = obj->co_lu.lo_dev->ld_site;
+ struct lu_site_bkt_data *bkt;
+
+ bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
+
+ init_waitqueue_entry(&waiter, current);
+ add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&header->loh_ref) == 1)
+ break;
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+ }
+
+ cl_object_put(env, obj);
+}
+
+void cl_inode_fini(struct inode *inode)
+{
+ struct lu_env *env;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *clob = lli->lli_clob;
+ int refcheck;
+ int emergency;
+
+ if (clob) {
+ void *cookie;
+
+ cookie = cl_env_reenter();
+ env = cl_env_get(&refcheck);
+ emergency = IS_ERR(env);
+ if (emergency) {
+ mutex_lock(&cl_inode_fini_guard);
+ LASSERT(cl_inode_fini_env);
+ cl_env_implant(cl_inode_fini_env, &refcheck);
+ env = cl_inode_fini_env;
+ }
+ /*
+ * cl_object cache is a slave to inode cache (which, in turn
+ * is a slave to dentry cache), don't keep cl_object in memory
+ * when its master is evicted.
+ */
+ cl_object_kill(env, clob);
+ lu_object_ref_del(&clob->co_lu, "inode", inode);
+ cl_object_put_last(env, clob);
+ lli->lli_clob = NULL;
+ if (emergency) {
+ cl_env_unplant(cl_inode_fini_env, &refcheck);
+ mutex_unlock(&cl_inode_fini_guard);
+ } else {
+ cl_env_put(env, &refcheck);
+ }
+ cl_env_reexit(cookie);
+ }
+}
+
+/**
+ * build inode number from passed @fid
+ */
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
+{
+ if (BITS_PER_LONG == 32 || api32)
+ return fid_flatten32(fid);
+ else
+ return fid_flatten(fid);
+}
+
+/**
+ * build inode generation from passed @fid. If our FID overflows the 32-bit
+ * inode number then return a non-zero generation to distinguish them.
+ */
+__u32 cl_fid_build_gen(const struct lu_fid *fid)
+{
+ __u32 gen;
+
+ if (fid_is_igif(fid)) {
+ gen = lu_igif_gen(fid);
+ return gen;
+ }
+
+ gen = fid_flatten(fid) >> 32;
+ return gen;
+}
+
+/* lsm is unreliable after hsm implementation as layout can be changed at
+ * any time. This is only to support old, non-clio-ized interfaces. It will
+ * cause deadlock if clio operations are called with this extra layout refcount
+ * because in case the layout changed during the IO, ll_layout_refresh() will
+ * have to wait for the refcount to become zero to destroy the older layout.
+ *
+ * Notice that the lsm returned by this function may not be valid unless called
+ * inside layout lock - MDS_INODELOCK_LAYOUT.
+ */
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
+{
+ return lov_lsm_get(ll_i2info(inode)->lli_clob);
+}
+
+inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
+{
+ lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
+}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index d80bcedd7..12f3e71f4 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -41,9 +41,9 @@
#include "../include/obd_support.h"
#include "../include/obd.h"
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold the
@@ -126,7 +126,7 @@ int cl_ocd_update(struct obd_device *host,
#define GROUPLOCK_SCOPE "grouplock"
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ccc_grouplock *cg)
+ struct ll_grouplock *cg)
{
struct lu_env *env;
struct cl_io *io;
@@ -140,20 +140,22 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
if (IS_ERR(env))
return PTR_ERR(env);
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = obj;
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (rc) {
+ if (rc != 0) {
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
/* Does not make sense to take GL for released layout */
if (rc > 0)
rc = -ENOTSUPP;
- cl_env_put(env, &refcheck);
return rc;
}
- descr = &ccc_env_info(env)->cti_descr;
+ lock = vvp_env_lock(env);
+ descr = &lock->cll_descr;
descr->cld_obj = obj;
descr->cld_start = 0;
descr->cld_end = CL_PAGE_EOF;
@@ -163,38 +165,37 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
descr->cld_enq_flags = enqflags;
- lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
- if (IS_ERR(lock)) {
+ rc = cl_lock_request(env, io, lock);
+ if (rc < 0) {
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
- return PTR_ERR(lock);
+ return rc;
}
- cg->cg_env = cl_env_get(&refcheck);
- cg->cg_io = io;
- cg->cg_lock = lock;
- cg->cg_gid = gid;
- LASSERT(cg->cg_env == env);
+ cg->lg_env = cl_env_get(&refcheck);
+ cg->lg_io = io;
+ cg->lg_lock = lock;
+ cg->lg_gid = gid;
+ LASSERT(cg->lg_env == env);
cl_env_unplant(env, &refcheck);
return 0;
}
-void cl_put_grouplock(struct ccc_grouplock *cg)
+void cl_put_grouplock(struct ll_grouplock *cg)
{
- struct lu_env *env = cg->cg_env;
- struct cl_io *io = cg->cg_io;
- struct cl_lock *lock = cg->cg_lock;
+ struct lu_env *env = cg->lg_env;
+ struct cl_io *io = cg->lg_io;
+ struct cl_lock *lock = cg->lg_lock;
int refcheck;
- LASSERT(cg->cg_env);
- LASSERT(cg->cg_gid);
+ LASSERT(cg->lg_env);
+ LASSERT(cg->lg_gid);
cl_env_implant(env, &refcheck);
cl_env_put(env, &refcheck);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
+ cl_lock_release(env, lock);
cl_io_fini(env, io);
cl_env_put(env, NULL);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index a55ac4dcc..2df551d3a 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -46,31 +46,31 @@
#include "llite_internal.h"
/** records that a write is in flight */
-void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
+void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+ struct ll_inode_info *lli = ll_i2info(club->vob_inode);
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page && list_empty(&page->cpg_pending_linkage))
- list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
+ if (page && list_empty(&page->vpg_pending_linkage))
+ list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
spin_unlock(&lli->lli_lock);
}
/** records that a write has completed */
-void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
+void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+ struct ll_inode_info *lli = ll_i2info(club->vob_inode);
int rc = 0;
spin_lock(&lli->lli_lock);
- if (page && !list_empty(&page->cpg_pending_linkage)) {
- list_del_init(&page->cpg_pending_linkage);
+ if (page && !list_empty(&page->vpg_pending_linkage)) {
+ list_del_init(&page->vpg_pending_linkage);
rc = 1;
}
spin_unlock(&lli->lli_lock);
if (rc)
- ll_queue_done_writing(club->cob_inode, 0);
+ ll_queue_done_writing(club->vob_inode, 0);
}
/** Queues DONE_WRITING if
@@ -80,25 +80,25 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
void ll_queue_done_writing(struct inode *inode, unsigned long flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+ struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
spin_lock(&lli->lli_lock);
lli->lli_flags |= flags;
if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- list_empty(&club->cob_pending_list)) {
+ list_empty(&club->vob_pending_list)) {
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
/* DONE_WRITING is allowed and inode has no dirty page. */
spin_lock(&lcq->lcq_lock);
LASSERT(list_empty(&lli->lli_close_list));
- CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
- inode->i_ino, inode->i_generation);
+ CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
+ PFID(ll_inode2fid(inode)));
list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
/* Avoid a concurrent insertion into the close thread queue:
@@ -124,9 +124,9 @@ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
if (!cl_local_size(inode)) {
/* Send Size-on-MDS Attributes if valid. */
@@ -140,10 +140,10 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle **och, unsigned long flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+ struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
spin_lock(&lli->lli_lock);
- if (!(list_empty(&club->cob_pending_list))) {
+ if (!(list_empty(&club->vob_pending_list))) {
if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
LASSERT(*och);
LASSERT(!lli->lli_pending_och);
@@ -198,7 +198,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
}
}
- LASSERT(list_empty(&club->cob_pending_list));
+ LASSERT(list_empty(&club->vob_pending_list));
lli->lli_flags &= ~LLIF_SOM_DIRTY;
spin_unlock(&lli->lli_lock);
ll_done_writing_attr(inode, op_data);
@@ -221,9 +221,9 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
LASSERT(op_data);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!oa) {
@@ -241,9 +241,9 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
if (rc) {
oa->o_valid = 0;
if (rc != -ENOENT)
- CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n",
- rc, inode->i_ino,
- inode->i_generation);
+ CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
} else {
CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
PFID(&lli->lli_fid));
@@ -302,9 +302,11 @@ static void ll_done_writing(struct inode *inode)
* OSTs and send setattr to back to MDS.
*/
rc = ll_som_update(inode, op_data);
- else if (rc)
- CERROR("inode %lu mdc done_writing failed: rc = %d\n",
- inode->i_ino, rc);
+ else if (rc) {
+ CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
+ }
out:
ll_finish_md_op_data(op_data);
if (och) {
@@ -323,8 +325,9 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
lli_close_list);
list_del_init(&lli->lli_close_list);
- } else if (atomic_read(&lcq->lcq_stop))
+ } else if (atomic_read(&lcq->lcq_stop)) {
lli = ERR_PTR(-EALREADY);
+ }
spin_unlock(&lcq->lcq_lock);
return lli;
@@ -348,8 +351,8 @@ static int ll_close_thread(void *arg)
break;
inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writing for inode %lu/%u\n",
- inode->i_ino, inode->i_generation);
+ CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
ll_done_writing(inode);
iput(inode);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index e3c0f1dd4..3f2f30b65 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -43,11 +43,11 @@
/* for struct cl_lock_descr and struct cl_io */
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../include/lustre_mdc.h"
#include "../include/lustre_intent.h"
#include <linux/compat.h>
#include <linux/posix_acl_xattr.h>
+#include "vvp_internal.h"
#ifndef FMODE_EXEC
#define FMODE_EXEC 0
@@ -99,6 +99,13 @@ struct ll_remote_perm {
*/
};
+struct ll_grouplock {
+ struct lu_env *lg_env;
+ struct cl_io *lg_io;
+ struct cl_lock *lg_lock;
+ unsigned long lg_gid;
+};
+
enum lli_flags {
/* MDS has an authority for the Size-on-MDS attributes. */
LLIF_MDS_SIZE_LOCK = (1 << 0),
@@ -161,7 +168,9 @@ struct ll_inode_info {
struct inode lli_vfs_inode;
/* the most recent timestamps obtained from mds */
- struct ost_lvb lli_lvb;
+ s64 lli_atime;
+ s64 lli_mtime;
+ s64 lli_ctime;
spinlock_t lli_agl_lock;
/* Try to make the d::member and f::member are aligned. Before using
@@ -328,6 +337,7 @@ enum ra_stat {
RA_STAT_EOF,
RA_STAT_MAX_IN_FLIGHT,
RA_STAT_WRONG_GRAB_PAGE,
+ RA_STAT_FAILED_REACH_END,
_NR_RA_STAT,
};
@@ -481,6 +491,12 @@ struct ll_sb_info {
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
+ /*
+ * Used to track "unstable" pages on a client, and maintain a
+ * LRU list of clean pages. An "unstable" page is defined as
+ * any page which is sent to a server as part of a bulk request,
+ * but is uncommitted to stable storage.
+ */
struct cl_client_cache ll_cache;
struct lprocfs_stats *ll_ra_stats;
@@ -525,13 +541,6 @@ struct ll_sb_info {
struct completion ll_kobj_unregister;
};
-struct ll_ra_read {
- pgoff_t lrr_start;
- pgoff_t lrr_count;
- struct task_struct *lrr_reader;
- struct list_head lrr_linkage;
-};
-
/*
* per file-descriptor read-ahead data.
*/
@@ -590,12 +599,6 @@ struct ll_readahead_state {
*/
unsigned long ras_request_index;
/*
- * list of struct ll_ra_read's one per read(2) call current in
- * progress against this file descriptor. Used by read-ahead code,
- * protected by ->ras_lock.
- */
- struct list_head ras_read_beads;
- /*
* The following 3 items are used for detecting the stride I/O
* mode.
* In stride I/O mode,
@@ -622,7 +625,7 @@ extern struct kmem_cache *ll_file_data_slab;
struct lustre_handle;
struct ll_file_data {
struct ll_readahead_state fd_ras;
- struct ccc_grouplock fd_grouplock;
+ struct ll_grouplock fd_grouplock;
__u64 lfd_pos;
__u32 fd_flags;
fmode_t fd_omode;
@@ -663,8 +666,16 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
#endif
}
-void ll_ra_read_in(struct file *f, struct ll_ra_read *rar);
-void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar);
+void ll_ras_enter(struct file *f);
+
+/* llite/lcommon_misc.c */
+int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
+int cl_ocd_update(struct obd_device *host,
+ struct obd_device *watched,
+ enum obd_notify_event ev, void *owner, void *data);
+int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
+ struct ll_grouplock *cg);
+void cl_put_grouplock(struct ll_grouplock *cg);
/* llite/lproc_llite.c */
int ldebugfs_register_mountpoint(struct dentry *parent,
@@ -697,15 +708,15 @@ int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
/* llite/rw.c */
-int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
-int ll_commit_write(struct file *, struct page *, unsigned from, unsigned to);
int ll_writepage(struct page *page, struct writeback_control *wbc);
int ll_writepages(struct address_space *, struct writeback_control *wbc);
int ll_readpage(struct file *file, struct page *page);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags);
+ struct cl_page_list *queue, struct ll_readahead_state *ras,
+ bool hit);
+struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage);
+void ll_cl_fini(struct ll_cl_context *lcc);
extern const struct address_space_operations ll_aops;
@@ -740,7 +751,7 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type);
int ll_inode_permission(struct inode *inode, int mask);
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- int flags, struct lov_user_md *lum,
+ __u64 flags, struct lov_user_md *lum,
int lum_size);
int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
struct lov_mds_md **lmm, int *lmm_size,
@@ -750,9 +761,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
int *lmm_size, struct ptlrpc_request **request);
int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
-int ll_merge_lvb(const struct lu_env *env, struct inode *inode);
+int ll_merge_attr(const struct lu_env *env, struct inode *inode);
int ll_fid2path(struct inode *inode, void __user *arg);
-int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
+int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
int ll_hsm_release(struct inode *inode);
/* llite/dcache.c */
@@ -824,65 +835,8 @@ struct ll_close_queue {
atomic_t lcq_stop;
};
-struct ccc_object *cl_inode2ccc(struct inode *inode);
-
-void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
-void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
-
-/* specific architecture can implement only part of this list */
-enum vvp_io_subtype {
- /** normal IO */
- IO_NORMAL,
- /** io started from splice_{read|write} */
- IO_SPLICE
-};
-
-/* IO subtypes */
-struct vvp_io {
- /** io subtype */
- enum vvp_io_subtype cui_io_subtype;
-
- union {
- struct {
- struct pipe_inode_info *cui_pipe;
- unsigned int cui_flags;
- } splice;
- struct vvp_fault_io {
- /**
- * Inode modification time that is checked across DLM
- * lock request.
- */
- time64_t ft_mtime;
- struct vm_area_struct *ft_vma;
- /**
- * locked page returned from vvp_io
- */
- struct page *ft_vmpage;
- struct vm_fault_api {
- /**
- * kernel fault info
- */
- struct vm_fault *ft_vmf;
- /**
- * fault API used bitflags for return code.
- */
- unsigned int ft_flags;
- /**
- * check that flags are from filemap_fault
- */
- bool ft_flags_valid;
- } fault;
- } fault;
- } u;
- /**
- * Read-ahead state used by read and page-fault IO contexts.
- */
- struct ll_ra_read cui_bead;
- /**
- * Set when cui_bead has been initialized.
- */
- int cui_ra_window_set;
-};
+void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
+void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
/**
* IO arguments for various VFS I/O interfaces.
@@ -911,54 +865,32 @@ struct ll_cl_context {
int lcc_refcheck;
};
-struct vvp_thread_info {
- struct vvp_io_args vti_args;
- struct ra_io_arg vti_ria;
- struct ll_cl_context vti_io_ctx;
+struct ll_thread_info {
+ struct vvp_io_args lti_args;
+ struct ra_io_arg lti_ria;
+ struct ll_cl_context lti_io_ctx;
};
-static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
-{
- extern struct lu_context_key vvp_key;
- struct vvp_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &vvp_key);
- LASSERT(info);
- return info;
-}
-
-static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env,
- enum vvp_io_subtype type)
+extern struct lu_context_key ll_thread_key;
+static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
{
- struct vvp_io_args *ret = &vvp_env_info(env)->vti_args;
-
- ret->via_io_subtype = type;
+ struct ll_thread_info *lti;
- return ret;
+ lti = lu_context_key_get(&env->le_ctx, &ll_thread_key);
+ LASSERT(lti);
+ return lti;
}
-struct vvp_session {
- struct vvp_io vs_ios;
-};
-
-static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
+static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
+ enum vvp_io_subtype type)
{
- extern struct lu_context_key vvp_session_key;
- struct vvp_session *ses;
+ struct vvp_io_args *via = &ll_env_info(env)->lti_args;
- ses = lu_context_key_get(env->le_ses, &vvp_session_key);
- LASSERT(ses);
- return ses;
-}
+ via->via_io_subtype = type;
-static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
-{
- return &vvp_env_session(env)->vs_ios;
+ return via;
}
-int vvp_global_init(void);
-void vvp_global_fini(void);
-
void ll_queue_done_writing(struct inode *inode, unsigned long flags);
void ll_close_thread_shutdown(struct ll_close_queue *lcq);
int ll_close_thread_start(struct ll_close_queue **lcq_ret);
@@ -981,6 +913,10 @@ static inline void ll_invalidate_page(struct page *vmpage)
if (!mapping)
return;
+ /*
+ * truncate_complete_page() calls
+ * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
+ */
ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
truncate_complete_page(mapping, vmpage);
}
@@ -1040,10 +976,10 @@ static inline __u64 ll_file_maxbytes(struct inode *inode)
}
/* llite/xattr.c */
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size);
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags);
+ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size);
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
int ll_removexattr(struct dentry *dentry, const char *name);
@@ -1055,9 +991,6 @@ void free_rmtperm_hash(struct hlist_head *hash);
int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
int lustre_check_remote_perm(struct inode *inode, int mask);
-/* llite/llite_cl.c */
-extern struct lu_device_type vvp_device_type;
-
/**
* Common IO arguments for various VFS I/O interfaces.
*/
@@ -1069,7 +1002,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
struct ll_readahead_state *ras, unsigned long index,
unsigned hit);
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which);
+void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
/* llite/llite_rmtacl.c */
#ifdef CONFIG_FS_POSIX_ACL
@@ -1163,6 +1096,22 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentry,
int only_unplug);
void ll_stop_statahead(struct inode *dir, void *key);
+blkcnt_t dirty_cnt(struct inode *inode);
+
+int cl_glimpse_size0(struct inode *inode, int agl);
+int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
+ struct inode *inode, struct cl_object *clob, int agl);
+
+static inline int cl_glimpse_size(struct inode *inode)
+{
+ return cl_glimpse_size0(inode, 0);
+}
+
+static inline int cl_agl(struct inode *inode)
+{
+ return cl_glimpse_size0(inode, 1);
+}
+
static inline int ll_glimpse_size(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
@@ -1285,43 +1234,6 @@ typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
void ll_iocontrol_unregister(void *magic);
-/* lclient compat stuff */
-#define cl_inode_info ll_inode_info
-#define cl_i2info(info) ll_i2info(info)
-#define cl_inode_mode(inode) ((inode)->i_mode)
-#define cl_i2sbi ll_i2sbi
-
-static inline struct ll_file_data *cl_iattr2fd(struct inode *inode,
- const struct iattr *attr)
-{
- LASSERT(attr->ia_valid & ATTR_FILE);
- return LUSTRE_FPRIVATE(attr->ia_file);
-}
-
-static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
-{
- LASSERT(mutex_is_locked(&ll_i2info(inode)->lli_size_mutex));
- i_size_write(inode, kms);
-}
-
-static inline void cl_isize_write(struct inode *inode, loff_t kms)
-{
- ll_inode_size_lock(inode);
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode);
-}
-
-#define cl_isize_read(inode) i_size_read(inode)
-
-static inline int cl_merge_lvb(const struct lu_env *env, struct inode *inode)
-{
- return ll_merge_lvb(env, inode);
-}
-
-#define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
-#define cl_inode_ctime(inode) LTIME_S((inode)->i_ctime)
-#define cl_inode_mtime(inode) LTIME_S((inode)->i_mtime)
-
int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
enum cl_fsync_mode mode, int ignore_layout);
@@ -1350,7 +1262,7 @@ static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
LPROC_LL_OSC_WRITE;
- ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc);
+ ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc);
}
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
@@ -1382,18 +1294,16 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
*/
if (it->d.lustre.it_remote_lock_mode) {
handle.cookie = it->d.lustre.it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n",
- inode,
- inode->i_ino, inode->i_generation,
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
+ PFID(ll_inode2fid(inode)), inode,
handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode, NULL);
}
handle.cookie = it->d.lustre.it_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u) for lock %#llx\n",
- inode, inode->i_ino,
- inode->i_generation, handle.cookie);
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
+ PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode,
&it->d.lustre.it_lock_bits);
@@ -1471,9 +1381,25 @@ enum {
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
int ll_layout_refresh(struct inode *inode, __u32 *gen);
-int ll_layout_restore(struct inode *inode);
+int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
int ll_xattr_init(void);
void ll_xattr_fini(void);
+int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, enum cl_req_type crt);
+
+/* lcommon_cl.c */
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
+
+extern struct lu_env *cl_inode_fini_env;
+extern int cl_inode_fini_refcheck;
+
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
+void cl_inode_fini(struct inode *inode);
+int cl_local_size(struct inode *inode);
+
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
+__u32 cl_fid_build_gen(const struct lu_fid *fid);
+
#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index b57a99268..96c7e9fc6 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,18 +85,18 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
si_meminfo(&si);
pages = si.totalram - si.totalhigh;
- if (pages >> (20 - PAGE_SHIFT) < 512)
- lru_page_max = pages / 2;
- else
- lru_page_max = (pages / 4) * 3;
+ lru_page_max = pages / 2;
- /* initialize lru data */
+ /* initialize ll_cache data */
atomic_set(&sbi->ll_cache.ccc_users, 0);
sbi->ll_cache.ccc_lru_max = lru_page_max;
atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
+ atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
+ init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
+
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
SBI_DEFAULT_READAHEAD_MAX);
sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
@@ -169,12 +169,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
return -ENOMEM;
}
- if (llite_root) {
- err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
- if (err < 0)
- CERROR("could not register mount in <debugfs>/lustre/llite\n");
- }
-
/* indicate the features supported by this client */
data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
OBD_CONNECT_ATTRFID |
@@ -337,10 +331,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
else
sbi->ll_md_brw_size = PAGE_SIZE;
- if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
- LCONSOLE_INFO("Layout lock feature supported.\n");
+ if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
- }
if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
@@ -453,7 +445,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* make root inode
* XXX: move this to after cbd setup?
*/
- valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS;
+ valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
valid |= OBD_MD_FLRMTPERM;
else if (sbi->ll_flags & LL_SBI_ACL)
@@ -555,6 +547,15 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
kfree(data);
kfree(osfs);
+ if (llite_root) {
+ err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
+ if (err < 0) {
+ CERROR("%s: could not register mount in debugfs: "
+ "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
+ err = 0;
+ }
+ }
+
return err;
out_root:
iput(root);
@@ -573,7 +574,6 @@ out_md:
out:
kfree(data);
kfree(osfs);
- ldebugfs_unregister_mountpoint(sbi);
return err;
}
@@ -897,10 +897,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
cfg->cfg_callback = class_config_llog_handler;
/* set up client obds */
err = lustre_process_log(sb, profilenm, cfg);
- if (err < 0) {
- CERROR("Unable to process log: %d\n", err);
+ if (err < 0)
goto out_free;
- }
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
@@ -947,7 +945,7 @@ void ll_put_super(struct super_block *sb)
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
char *profilenm = get_profile_name(sb);
- int next, force = 1;
+ int ccc_count, next, force = 1, rc = 0;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
@@ -963,6 +961,19 @@ void ll_put_super(struct super_block *sb)
force = obd->obd_force;
}
+ /* Wait for unstable pages to be committed to stable storage */
+ if (!force) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+
+ rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
+ !atomic_read(&sbi->ll_cache.ccc_unstable_nr),
+ &lwi);
+ }
+
+ ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+ if (!force && rc != -EINTR)
+ LASSERTF(!ccc_count, "count: %i\n", ccc_count);
+
/* We need to set force before the lov_disconnect in
* lustre_common_put_super, since l_d cleans up osc's as well.
*/
@@ -999,6 +1010,8 @@ void ll_put_super(struct super_block *sb)
lustre_common_put_super(sb);
+ cl_env_cache_purge(~0);
+
module_put(THIS_MODULE);
} /* client_put_super */
@@ -1032,8 +1045,8 @@ void ll_clear_inode(struct inode *inode)
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
if (S_ISDIR(inode->i_mode)) {
/* these should have been cleared in ll_file_release */
@@ -1180,9 +1193,11 @@ static int ll_setattr_done_writing(struct inode *inode,
* from OSTs and send setattr to back to MDS.
*/
rc = ll_som_update(inode, op_data);
- else if (rc)
- CERROR("inode %lu mdc truncate failed: rc = %d\n",
- inode->i_ino, rc);
+ else if (rc) {
+ CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
+ ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
+ }
return rc;
}
@@ -1210,12 +1225,9 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
bool file_is_released = false;
int rc = 0, rc1 = 0;
- CDEBUG(D_VFSTRACE,
- "%s: setattr inode %p/fid:" DFID
- " from %llu to %llu, valid %x, hsm_import %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
- attr->ia_valid, hsm_import);
+ CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
+ i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
if (attr->ia_valid & ATTR_SIZE) {
/* Check new size against VFS/VM file size limit and rlimit */
@@ -1265,14 +1277,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
(s64)ktime_get_real_seconds());
- /* If we are changing file size, file content is modified, flag it. */
- if (attr->ia_valid & ATTR_SIZE) {
- attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
/* We always do an MDS RPC, even if we're only changing the size;
* only the MDS knows whether truncate() should fail with -ETXTBUSY
*/
@@ -1284,13 +1288,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
if (!S_ISDIR(inode->i_mode))
inode_unlock(inode);
- memcpy(&op_data->op_attr, attr, sizeof(*attr));
-
- /* Open epoch for truncate. */
- if (exp_connect_som(ll_i2mdexp(inode)) &&
- (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
- op_data->op_flags = MF_EPOCH_OPEN;
-
/* truncate on a released file must failed with -ENODATA,
* so size must not be set on MDS for released file
* but other attributes must be set
@@ -1304,29 +1301,40 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
file_is_released = true;
ccc_inode_lsm_put(inode, lsm);
+
+ if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
+ if (file_is_released) {
+ rc = ll_layout_restore(inode, 0, attr->ia_size);
+ if (rc < 0)
+ goto out;
+
+ file_is_released = false;
+ ll_layout_refresh(inode, &gen);
+ }
+
+ /*
+ * If we are changing file size, file content is
+ * modified, flag it.
+ */
+ attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ op_data->op_bias |= MDS_DATA_MODIFIED;
+ }
}
- /* if not in HSM import mode, clear size attr for released file
- * we clear the attribute send to MDT in op_data, not the original
- * received from caller in attr which is used later to
- * decide return code
- */
- if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
- op_data->op_attr.ia_valid &= ~ATTR_SIZE;
+ memcpy(&op_data->op_attr, attr, sizeof(*attr));
+
+ /* Open epoch for truncate. */
+ if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
+ (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
+ op_data->op_flags = MF_EPOCH_OPEN;
rc = ll_md_setattr(dentry, op_data, &mod);
if (rc)
goto out;
- /* truncate failed (only when non HSM import), others succeed */
- if (file_is_released) {
- if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
- rc = -ENODATA;
- else
- rc = 0;
- goto out;
- }
-
/* RPC to MDT is sent, cancel data modification flag */
if (op_data->op_bias & MDS_DATA_MODIFIED) {
spin_lock(&lli->lli_lock);
@@ -1335,7 +1343,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
}
ll_ioepoch_open(lli, op_data->op_ioepoch);
- if (!S_ISREG(inode->i_mode)) {
+ if (!S_ISREG(inode->i_mode) || file_is_released) {
rc = 0;
goto out;
}
@@ -1552,7 +1560,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (body->valid & OBD_MD_FLATIME) {
if (body->atime > LTIME_S(inode->i_atime))
LTIME_S(inode->i_atime) = body->atime;
- lli->lli_lvb.lvb_atime = body->atime;
+ lli->lli_atime = body->atime;
}
if (body->valid & OBD_MD_FLMTIME) {
if (body->mtime > LTIME_S(inode->i_mtime)) {
@@ -1561,12 +1569,12 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
body->mtime);
LTIME_S(inode->i_mtime) = body->mtime;
}
- lli->lli_lvb.lvb_mtime = body->mtime;
+ lli->lli_mtime = body->mtime;
}
if (body->valid & OBD_MD_FLCTIME) {
if (body->ctime > LTIME_S(inode->i_ctime))
LTIME_S(inode->i_ctime) = body->ctime;
- lli->lli_lvb.lvb_ctime = body->ctime;
+ lli->lli_ctime = body->ctime;
}
if (body->valid & OBD_MD_FLMODE)
inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
@@ -1593,12 +1601,12 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
/* FID shouldn't be changed! */
if (fid_is_sane(&lli->lli_fid)) {
LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
- "Trying to change FID "DFID
- " to the "DFID", inode %lu/%u(%p)\n",
+ "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
PFID(&lli->lli_fid), PFID(&body->fid1),
- inode->i_ino, inode->i_generation, inode);
- } else
+ PFID(ll_inode2fid(inode)), inode);
+ } else {
lli->lli_fid = body->fid1;
+ }
}
LASSERT(fid_seq(&lli->lli_fid) != 0);
@@ -1622,8 +1630,10 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (lli->lli_flags & (LLIF_DONE_WRITING |
LLIF_EPOCH_PENDING |
LLIF_SOM_DIRTY)) {
- CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n",
- inode->i_ino, lli->lli_flags);
+ CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
+ sbi->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)),
+ lli->lli_flags);
} else {
/* Use old size assignment to avoid
* deadlock bz14138 & bz14326
@@ -1699,7 +1709,7 @@ void ll_read_inode2(struct inode *inode, void *opaque)
void ll_delete_inode(struct inode *inode)
{
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
if (S_ISREG(inode->i_mode) && lli->lli_clob)
/* discard all dirty pages before truncating them, required by
@@ -1715,8 +1725,8 @@ void ll_delete_inode(struct inode *inode)
spin_lock_irq(&inode->i_data.tree_lock);
spin_unlock_irq(&inode->i_data.tree_lock);
LASSERTF(inode->i_data.nrpages == 0,
- "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
- inode->i_ino, inode->i_generation, inode,
+ "inode="DFID"(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
+ PFID(ll_inode2fid(inode)), inode,
inode->i_data.nrpages);
}
/* Workaround end */
@@ -1747,7 +1757,9 @@ int ll_iocontrol(struct inode *inode, struct file *file,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc) {
- CERROR("failure %d inode %lu\n", rc, inode->i_ino);
+ CERROR("%s: failure inode "DFID": rc = %d\n",
+ sbi->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
return -abs(rc);
}
@@ -1772,7 +1784,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
+ op_data->op_attr_flags = flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
rc = md_setattr(sbi->ll_md_exp, op_data,
NULL, 0, NULL, 0, &req, NULL);
@@ -2066,11 +2078,11 @@ int ll_obd_statfs(struct inode *inode, void __user *arg)
}
memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
- if (type & LL_STATFS_LMV)
+ if (type & LL_STATFS_LMV) {
exp = sbi->ll_md_exp;
- else if (type & LL_STATFS_LOV)
+ } else if (type & LL_STATFS_LOV) {
exp = sbi->ll_dt_exp;
- else {
+ } else {
rc = -ENODEV;
goto out_statfs;
}
@@ -2271,7 +2283,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
{
char *buf, *path = NULL;
struct dentry *dentry = NULL;
- struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
+ struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
/* this can be called inside spin lock so use GFP_ATOMIC. */
buf = (char *)__get_free_page(GFP_ATOMIC);
@@ -2285,7 +2297,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
"%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
- PFID(&obj->cob_header.coh_lu.loh_fid),
+ PFID(&obj->vob_header.coh_lu.loh_fid),
(path && !IS_ERR(path)) ? path : "", ioret);
if (dentry)
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 5b484e62f..88ef1cac9 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -57,10 +57,10 @@ void policy_from_vma(ldlm_policy_data_t *policy,
struct vm_area_struct *vma, unsigned long addr,
size_t count)
{
- policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+ policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
(vma->vm_pgoff << PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
- ~CFS_PAGE_MASK;
+ ~PAGE_MASK;
}
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
@@ -123,7 +123,8 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
*env_ret = env;
- io = ccc_env_thread_io(env);
+restart:
+ io = vvp_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
LASSERT(io->ci_obj);
@@ -146,17 +147,20 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
if (rc == 0) {
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- LASSERT(cio->cui_cl.cis_io == io);
+ LASSERT(vio->vui_cl.cis_io == io);
/* mmap lock must be MANDATORY it has to cache pages. */
io->ci_lockreq = CILR_MANDATORY;
- cio->cui_fd = fd;
+ vio->vui_fd = fd;
} else {
LASSERT(rc < 0);
cl_io_fini(env, io);
+ if (io->ci_need_restart)
+ goto restart;
+
cl_env_nested_put(nest, env);
io = ERR_PTR(rc);
}
@@ -200,7 +204,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
* Otherwise, we could add dirty pages into osc cache
* while truncate is on-going.
*/
- inode = ccc_object_inode(io->ci_obj);
+ inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
down_read(&lli->lli_trunc_sem);
@@ -307,17 +311,17 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = NULL;
- vio->u.fault.fault.ft_vmf = vmf;
- vio->u.fault.fault.ft_flags = 0;
- vio->u.fault.fault.ft_flags_valid = false;
+ vio->u.fault.ft_vmf = vmf;
+ vio->u.fault.ft_flags = 0;
+ vio->u.fault.ft_flags_valid = false;
result = cl_io_loop(env, io);
/* ft_flags are only valid if we reached
* the call to filemap_fault
*/
- if (vio->u.fault.fault.ft_flags_valid)
- fault_ret = vio->u.fault.fault.ft_flags;
+ if (vio->u.fault.ft_flags_valid)
+ fault_ret = vio->u.fault.ft_flags;
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage) {
@@ -390,9 +394,11 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = ll_page_mkwrite0(vma, vmf->page, &retry);
if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n",
+ const struct dentry *de = vma->vm_file->f_path.dentry;
+
+ CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
current->comm, vmf->pgoff,
- file_inode(vma->vm_file)->i_ino);
+ PFID(ll_inode2fid(de->d_inode)));
printed = true;
}
} while (retry);
@@ -422,16 +428,16 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
/**
* To avoid cancel the locks covering mmapped region for lock cache pressure,
- * we track the mapped vma count in ccc_object::cob_mmap_cnt.
+ * we track the mapped vma count in vvp_object::vob_mmap_cnt.
*/
static void ll_vm_open(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
LASSERT(vma->vm_file);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
- atomic_inc(&vob->cob_mmap_cnt);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+ atomic_inc(&vob->vob_mmap_cnt);
}
/**
@@ -440,11 +446,11 @@ static void ll_vm_open(struct vm_area_struct *vma)
static void ll_vm_close(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
LASSERT(vma->vm_file);
- atomic_dec(&vob->cob_mmap_cnt);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+ atomic_dec(&vob->vob_mmap_cnt);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
}
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 193aab879..c1eef6198 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -119,7 +119,7 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
kfree(op_data);
if (rc) {
- CERROR("can't get object attrs, fid "DFID", rc %d\n",
+ CDEBUG(D_INFO, "can't get object attrs, fid "DFID", rc %d\n",
PFID(fid), rc);
return ERR_PTR(rc);
}
@@ -191,8 +191,9 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
int fileid_len = sizeof(struct lustre_nfs_fid) / 4;
struct lustre_nfs_fid *nfs_fid = (void *)fh;
- CDEBUG(D_INFO, "encoding for (%lu," DFID ") maxlen=%d minlen=%d\n",
- inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len);
+ CDEBUG(D_INFO, "%s: encoding for ("DFID") maxlen=%d minlen=%d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), *plen, fileid_len);
if (*plen < fileid_len) {
*plen = fileid_len;
@@ -298,8 +299,9 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
sbi = ll_s2sbi(dir->i_sb);
- CDEBUG(D_INFO, "getting parent for (%lu," DFID ")\n",
- dir->i_ino, PFID(ll_inode2fid(dir)));
+ CDEBUG(D_INFO, "%s: getting parent for ("DFID")\n",
+ ll_get_fsname(dir->i_sb, NULL, 0),
+ PFID(ll_inode2fid(dir)));
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc != 0)
@@ -314,15 +316,20 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc) {
- CERROR("failure %d inode %lu get parent\n", rc, dir->i_ino);
+ CERROR("%s: failure inode "DFID" get parent: rc = %d\n",
+ ll_get_fsname(dir->i_sb, NULL, 0),
+ PFID(ll_inode2fid(dir)), rc);
return ERR_PTR(rc);
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body->valid & OBD_MD_FLID);
-
- CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
- PFID(ll_inode2fid(dir)), PFID(&body->fid1));
-
+ /*
+ * LU-3952: MDT may lost the FID of its parent, we should not crash
+ * the NFS server, ll_iget_for_nfs() will handle the error.
+ */
+ if (body->valid & OBD_MD_FLID) {
+ CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
+ PFID(ll_inode2fid(dir)), PFID(&body->fid1));
+ }
result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index f169c0db6..813a9a354 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -274,8 +274,9 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
if (lo->lo_biotail) {
lo->lo_biotail->bi_next = bio;
lo->lo_biotail = bio;
- } else
+ } else {
lo->lo_bio = lo->lo_biotail = bio;
+ }
spin_unlock_irqrestore(&lo->lo_lock, flags);
atomic_inc(&lo->lo_pending);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 27ab12614..55d62eb11 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -254,7 +254,6 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number > totalram_pages / 2) {
-
CERROR("can't set file readahead more than %lu MB\n",
totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
@@ -393,6 +392,8 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
+ struct lu_env *env;
+ int refcheck;
int mult, rc, pages_number;
int diff = 0;
int nrpages = 0;
@@ -430,6 +431,10 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
goto out;
}
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return 0;
+
diff = -diff;
while (diff > 0) {
int tmp;
@@ -455,19 +460,20 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
break;
if (!sbi->ll_dt_exp) { /* being initialized */
- rc = -ENODEV;
- break;
+ rc = 0;
+ goto out;
}
/* difficult - have to ask OSCs to drop LRU slots. */
tmp = diff << 1;
- rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
+ rc = obd_set_info_async(env, sbi->ll_dt_exp,
sizeof(KEY_CACHE_LRU_SHRINK),
KEY_CACHE_LRU_SHRINK,
sizeof(tmp), &tmp, NULL);
if (rc < 0)
break;
}
+ cl_env_put(env, &refcheck);
out:
if (rc >= 0) {
@@ -818,6 +824,23 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(xattr_cache);
+static ssize_t unstable_stats_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kobj);
+ struct cl_client_cache *cache = &sbi->ll_cache;
+ int pages, mb;
+
+ pages = atomic_read(&cache->ccc_unstable_nr);
+ mb = (pages * PAGE_SIZE) >> 20;
+
+ return sprintf(buf, "unstable_pages: %8d\n"
+ "unstable_mb: %8d\n", pages, mb);
+}
+LUSTRE_RO_ATTR(unstable_stats);
+
static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
/* { "mntpt_path", ll_rd_path, 0, 0 }, */
{ "site", &ll_site_stats_fops, NULL, 0 },
@@ -853,6 +876,7 @@ static struct attribute *llite_attrs[] = {
&lustre_attr_max_easize.attr,
&lustre_attr_default_easize.attr,
&lustre_attr_xattr_cache.attr,
+ &lustre_attr_unstable_stats.attr,
NULL,
};
@@ -953,6 +977,7 @@ static const char *ra_stat_string[] = {
[RA_STAT_EOF] = "read-ahead to EOF",
[RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
[RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
+ [RA_STAT_FAILED_REACH_END] = "failed to reach end"
};
int ldebugfs_register_mountpoint(struct dentry *parent,
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index f8f98e4e8..5eba0ebae 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -128,12 +128,14 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
if (rc != 0) {
iget_failed(inode);
inode = NULL;
- } else
+ } else {
unlock_new_inode(inode);
- } else if (!(inode->i_state & (I_FREEING | I_CLEAR)))
+ }
+ } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
ll_update_inode(inode, md);
- CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n",
- inode, PFID(&md->body->fid1));
+ CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p)\n",
+ PFID(&md->body->fid1), inode);
+ }
}
return inode;
}
@@ -188,7 +190,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
break;
/* Invalidate all dentries associated with this inode */
- LASSERT(lock->l_flags & LDLM_FL_CANCELING);
+ LASSERT(ldlm_is_canceling(lock));
if (!fid_res_name_eq(ll_inode2fid(inode),
&lock->l_resource->lr_name)) {
@@ -255,8 +257,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
}
if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
- CDEBUG(D_INODE, "invalidating inode %lu\n",
- inode->i_ino);
+ CDEBUG(D_INODE, "invalidating inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
truncate_inode_pages(inode->i_mapping, 0);
ll_invalidate_negative_children(inode);
}
@@ -476,9 +478,8 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
return ERR_PTR(-ENAMETOOLONG);
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
- dentry, parent->i_ino,
- parent->i_generation, parent, LL_IT2STR(it));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),intent=%s\n",
+ dentry, PFID(ll_inode2fid(parent)), parent, LL_IT2STR(it));
if (d_mountpoint(dentry))
CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
@@ -553,9 +554,8 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
struct dentry *de;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u\n",
- dentry, parent->i_ino,
- parent->i_generation, parent, flags);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),flags=%u\n",
+ dentry, PFID(ll_inode2fid(parent)), parent, flags);
/* Optimize away (CREATE && !OPEN). Let .create handle the race. */
if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN))
@@ -586,10 +586,9 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
long long lookup_flags = LOOKUP_OPEN;
int rc = 0;
- CDEBUG(D_VFSTRACE,
- "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,open_flags %x,mode %x opened %d\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, file, open_flags, mode, *opened);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),file %p,open_flags %x,mode %x opened %d\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
+ *opened);
it = kzalloc(sizeof(*it), GFP_NOFS);
if (!it)
@@ -680,8 +679,8 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
* lock on the inode. Since we finally have an inode pointer,
* stuff it in the lock.
*/
- CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n",
- inode, inode->i_ino, inode->i_generation);
+ CDEBUG(D_DLMTRACE, "setting l_ast_data to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(dir)), inode);
ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
out:
ptlrpc_req_finished(request);
@@ -708,9 +707,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
struct inode *inode;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, LL_IT2STR(it));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), intent=%s\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, LL_IT2STR(it));
rc = it_open_error(DISP_OPEN_CREATE, it);
if (rc)
@@ -733,8 +731,9 @@ static void ll_update_times(struct ptlrpc_request *request,
LASSERT(body);
if (body->valid & OBD_MD_FLMTIME &&
body->mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
- inode->i_ino, LTIME_S(inode->i_mtime), body->mtime);
+ CDEBUG(D_INODE, "setting fid "DFID" mtime from %lu to %llu\n",
+ PFID(ll_inode2fid(inode)), LTIME_S(inode->i_mtime),
+ body->mtime);
LTIME_S(inode->i_mtime) = body->mtime;
}
if (body->valid & OBD_MD_FLCTIME &&
@@ -791,9 +790,9 @@ static int ll_mknod(struct inode *dir, struct dentry *dchild,
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p) mode %o dev %x\n",
- dchild, dir->i_ino, dir->i_generation, dir,
- mode, old_encode_dev(rdev));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p) mode %o dev %x\n",
+ dchild, PFID(ll_inode2fid(dir)), dir, mode,
+ old_encode_dev(rdev));
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
@@ -831,9 +830,8 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
{
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u, excl=%d\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, mode, want_excl);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), flags=%u, excl=%d\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl);
rc = ll_mknod(dir, dentry, mode, 0);
@@ -845,12 +843,6 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
return rc;
}
-static inline void ll_get_child_fid(struct dentry *child, struct lu_fid *fid)
-{
- if (d_really_is_positive(child))
- *fid = *ll_inode2fid(d_inode(child));
-}
-
int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
{
struct mdt_body *body;
@@ -927,23 +919,25 @@ out:
* is any lock existing. They will recycle dentries and inodes based upon locks
* too. b=20433
*/
-static int ll_unlink(struct inode *dir, struct dentry *dentry)
+static int ll_unlink(struct inode *dir, struct dentry *dchild)
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ dchild, dir->i_ino, dir->i_generation, dir);
op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
+ dchild->d_name.name,
+ dchild->d_name.len,
0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(dentry, &op_data->op_fid3);
+ if (dchild && dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
+
op_data->op_fid2 = op_data->op_fid3;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
@@ -963,8 +957,8 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir"DFID"(%p)\n",
+ dentry, PFID(ll_inode2fid(dir)), dir);
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
@@ -977,23 +971,25 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return err;
}
-static int ll_rmdir(struct inode *dir, struct dentry *dentry)
+static int ll_rmdir(struct inode *dir, struct dentry *dchild)
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p)\n",
+ dchild, PFID(ll_inode2fid(dir)), dir);
op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
+ dchild->d_name.name,
+ dchild->d_name.len,
S_IFDIR, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(dentry, &op_data->op_fid3);
+ if (dchild && dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
+
op_data->op_fid2 = op_data->op_fid3;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
@@ -1011,9 +1007,8 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry,
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),target=%.*s\n",
- dentry, dir->i_ino, dir->i_generation,
- dir, 3000, oldname);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),target=%.*s\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, 3000, oldname);
err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO,
0, LUSTRE_OPC_SYMLINK);
@@ -1033,10 +1028,9 @@ static int ll_link(struct dentry *old_dentry, struct inode *dir,
struct md_op_data *op_data;
int err;
- CDEBUG(D_VFSTRACE,
- "VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%pd\n",
- src->i_ino, src->i_generation, src, dir->i_ino,
- dir->i_generation, dir, new_dentry);
+ CDEBUG(D_VFSTRACE, "VFS Op: inode="DFID"(%p), dir="DFID"(%p), target=%pd\n",
+ PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir,
+ new_dentry);
op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name,
new_dentry->d_name.len,
@@ -1056,42 +1050,45 @@ out:
return err;
}
-static int ll_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+static int ll_rename(struct inode *src, struct dentry *src_dchild,
+ struct inode *tgt, struct dentry *tgt_dchild)
{
struct ptlrpc_request *request = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(old_dir);
+ struct ll_sb_info *sbi = ll_i2sbi(src);
struct md_op_data *op_data;
int err;
CDEBUG(D_VFSTRACE,
- "VFS Op:oldname=%pd,src_dir=%lu/%u(%p),newname=%pd,tgt_dir=%lu/%u(%p)\n",
- old_dentry, old_dir->i_ino, old_dir->i_generation, old_dir,
- new_dentry, new_dir->i_ino, new_dir->i_generation, new_dir);
+ "VFS Op:oldname=%pd, src_dir="DFID"(%p), newname=%pd, tgt_dir="DFID"(%p)\n",
+ src_dchild, PFID(ll_inode2fid(src)), src,
+ tgt_dchild, PFID(ll_inode2fid(tgt)), tgt);
- op_data = ll_prep_md_op_data(NULL, old_dir, new_dir, NULL, 0, 0,
+ op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(old_dentry, &op_data->op_fid3);
- ll_get_child_fid(new_dentry, &op_data->op_fid4);
+ if (src_dchild && src_dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(src_dchild->d_inode);
+ if (tgt_dchild && tgt_dchild->d_inode)
+ op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode);
+
err = md_rename(sbi->ll_md_exp, op_data,
- old_dentry->d_name.name,
- old_dentry->d_name.len,
- new_dentry->d_name.name,
- new_dentry->d_name.len, &request);
+ src_dchild->d_name.name,
+ src_dchild->d_name.len,
+ tgt_dchild->d_name.name,
+ tgt_dchild->d_name.len, &request);
ll_finish_md_op_data(op_data);
if (!err) {
- ll_update_times(request, old_dir);
- ll_update_times(request, new_dir);
+ ll_update_times(request, src);
+ ll_update_times(request, tgt);
ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
- err = ll_objects_destroy(request, old_dir);
+ err = ll_objects_destroy(request, src);
}
ptlrpc_req_finished(request);
if (!err)
- d_move(old_dentry, new_dentry);
+ d_move(src_dchild, tgt_dchild);
return err;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index edab6c5b7..336397773 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -63,7 +63,7 @@
* Finalizes cl-data before exiting typical address_space operation. Dual to
* ll_cl_init().
*/
-static void ll_cl_fini(struct ll_cl_context *lcc)
+void ll_cl_fini(struct ll_cl_context *lcc)
{
struct lu_env *env = lcc->lcc_env;
struct cl_io *io = lcc->lcc_io;
@@ -84,200 +84,59 @@ static void ll_cl_fini(struct ll_cl_context *lcc)
* Initializes common cl-data at the typical address_space operation entry
* point.
*/
-static struct ll_cl_context *ll_cl_init(struct file *file,
- struct page *vmpage, int create)
+struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
{
struct ll_cl_context *lcc;
struct lu_env *env;
struct cl_io *io;
struct cl_object *clob;
- struct ccc_io *cio;
+ struct vvp_io *vio;
int refcheck;
int result = 0;
- clob = ll_i2info(vmpage->mapping->host)->lli_clob;
+ clob = ll_i2info(file_inode(file))->lli_clob;
LASSERT(clob);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return ERR_CAST(env);
- lcc = &vvp_env_info(env)->vti_io_ctx;
+ lcc = &ll_env_info(env)->lti_io_ctx;
memset(lcc, 0, sizeof(*lcc));
lcc->lcc_env = env;
lcc->lcc_refcheck = refcheck;
lcc->lcc_cookie = current;
- cio = ccc_env_io(env);
- io = cio->cui_cl.cis_io;
- if (!io && create) {
- struct inode *inode = vmpage->mapping->host;
- loff_t pos;
-
- if (inode_trylock(inode)) {
- inode_unlock((inode));
-
- /* this is too bad. Someone is trying to write the
- * page w/o holding inode mutex. This means we can
- * add dirty pages into cache during truncate
- */
- CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
- current->comm);
- dump_stack();
- LBUG();
- return ERR_PTR(-EIO);
- }
-
- /*
- * Loop-back driver calls ->prepare_write().
- * methods directly, bypassing file system ->write() operation,
- * so cl_io has to be created here.
- */
- io = ccc_env_thread_io(env);
- ll_io_init(io, file, 1);
-
- /* No lock at all for this kind of IO - we can't do it because
- * we have held page lock, it would cause deadlock.
- * XXX: This causes poor performance to loop device - One page
- * per RPC.
- * In order to get better performance, users should use
- * lloop driver instead.
- */
- io->ci_lockreq = CILR_NEVER;
-
- pos = vmpage->index << PAGE_SHIFT;
-
- /* Create a temp IO to serve write. */
- result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
- if (result == 0) {
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- cio->cui_iter = NULL;
- result = cl_io_iter_init(env, io);
- if (result == 0) {
- result = cl_io_lock(env, io);
- if (result == 0)
- result = cl_io_start(env, io);
- }
- } else
- result = io->ci_result;
- }
-
+ vio = vvp_env_io(env);
+ io = vio->vui_cl.cis_io;
lcc->lcc_io = io;
if (!io)
result = -EIO;
- if (result == 0) {
+
+ if (result == 0 && vmpage) {
struct cl_page *page;
LASSERT(io->ci_state == CIS_IO_GOING);
- LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
+ LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
page = cl_page_find(env, clob, vmpage->index, vmpage,
CPT_CACHEABLE);
if (!IS_ERR(page)) {
lcc->lcc_page = page;
lu_ref_add(&page->cp_reference, "cl_io", io);
result = 0;
- } else
+ } else {
result = PTR_ERR(page);
+ }
}
if (result) {
ll_cl_fini(lcc);
lcc = ERR_PTR(result);
}
- CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
- vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
- env, io);
- return lcc;
-}
-
-static struct ll_cl_context *ll_cl_get(void)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- LASSERT(!IS_ERR(env));
- lcc = &vvp_env_info(env)->vti_io_ctx;
- LASSERT(env == lcc->lcc_env);
- LASSERT(current == lcc->lcc_cookie);
- cl_env_put(env, &refcheck);
-
- /* env has got in ll_cl_init, so it is still usable. */
return lcc;
}
-/**
- * ->prepare_write() address space operation called by generic_file_write()
- * for every page during write.
- */
-int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- int result;
-
- lcc = ll_cl_init(file, vmpage, 1);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- cl_page_assume(env, io, page);
-
- result = cl_io_prepare_write(env, io, page, from, to);
- if (result == 0) {
- /*
- * Add a reference, so that page is not evicted from
- * the cache until ->commit_write() is called.
- */
- cl_page_get(page);
- lu_ref_add(&page->cp_reference, "prepare_write",
- current);
- } else {
- cl_page_unassume(env, io, page);
- ll_cl_fini(lcc);
- }
- /* returning 0 in prepare assumes commit must be called
- * afterwards
- */
- } else {
- result = PTR_ERR(lcc);
- }
- return result;
-}
-
-int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- int result = 0;
-
- lcc = ll_cl_get();
- env = lcc->lcc_env;
- page = lcc->lcc_page;
- io = lcc->lcc_io;
-
- LASSERT(cl_page_is_owned(page, io));
- LASSERT(from <= to);
- if (from != to) /* handle short write case. */
- result = cl_io_commit_write(env, io, page, from, to);
- if (cl_page_is_owned(page, io))
- cl_page_unassume(env, io, page);
-
- /*
- * Release reference acquired by ll_prepare_write().
- */
- lu_ref_del(&page->cp_reference, "prepare_write", current);
- cl_page_put(env, page);
- ll_cl_fini(lcc);
- return result;
-}
-
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
/**
@@ -301,7 +160,7 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
*/
static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
struct ra_io_arg *ria,
- unsigned long pages)
+ unsigned long pages, unsigned long min)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
long ret;
@@ -341,6 +200,11 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
}
out:
+ if (ret < min) {
+ /* override ra limit for maximum performance */
+ atomic_add(min - ret, &ra->ra_cur_pages);
+ ret = min;
+ }
return ret;
}
@@ -357,9 +221,9 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
lprocfs_counter_incr(sbi->ll_ra_stats, which);
}
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
+void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
{
- struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
ll_ra_stats_inc_sbi(sbi, which);
}
@@ -388,61 +252,42 @@ static int index_in_window(unsigned long index, unsigned long point,
return start <= index && index <= end;
}
-static struct ll_readahead_state *ll_ras_get(struct file *f)
+void ll_ras_enter(struct file *f)
{
- struct ll_file_data *fd;
-
- fd = LUSTRE_FPRIVATE(f);
- return &fd->fd_ras;
-}
-
-void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
-{
- struct ll_readahead_state *ras;
-
- ras = ll_ras_get(f);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(f);
+ struct ll_readahead_state *ras = &fd->fd_ras;
spin_lock(&ras->ras_lock);
ras->ras_requests++;
ras->ras_request_index = 0;
ras->ras_consecutive_requests++;
- rar->lrr_reader = current;
-
- list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- spin_unlock(&ras->ras_lock);
-}
-
-void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
-{
- struct ll_readahead_state *ras;
-
- ras = ll_ras_get(f);
-
- spin_lock(&ras->ras_lock);
- list_del_init(&rar->lrr_linkage);
spin_unlock(&ras->ras_lock);
}
static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue, struct cl_page *page,
- struct page *vmpage)
+ struct cl_object *clob, pgoff_t *max_index)
{
- struct ccc_page *cp;
+ struct page *vmpage = page->cp_vmpage;
+ struct vvp_page *vpg;
int rc;
rc = 0;
cl_page_assume(env, io, page);
lu_ref_add(&page->cp_reference, "ra", current);
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
- rc = cl_page_is_under_lock(env, io, page);
- if (rc == -EBUSY) {
- cp->cpg_defer_uptodate = 1;
- cp->cpg_ra_used = 0;
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
+ CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
+ vvp_index(vpg), *max_index);
+ if (*max_index == 0 || vvp_index(vpg) > *max_index)
+ rc = cl_page_is_under_lock(env, io, page, max_index);
+ if (rc == 0) {
+ vpg->vpg_defer_uptodate = 1;
+ vpg->vpg_ra_used = 0;
cl_page_list_add(queue, page);
rc = 1;
} else {
- cl_page_delete(env, page);
+ cl_page_discard(env, io, page);
rc = -ENOLCK;
}
} else {
@@ -466,24 +311,25 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
*/
static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue,
- pgoff_t index, struct address_space *mapping)
+ pgoff_t index, pgoff_t *max_index)
{
+ struct cl_object *clob = io->ci_obj;
+ struct inode *inode = vvp_object_inode(clob);
struct page *vmpage;
- struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
struct cl_page *page;
enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
int rc = 0;
const char *msg = NULL;
- vmpage = grab_cache_page_nowait(mapping, index);
+ vmpage = grab_cache_page_nowait(inode->i_mapping, index);
if (vmpage) {
/* Check if vmpage was truncated or reclaimed */
- if (vmpage->mapping == mapping) {
+ if (vmpage->mapping == inode->i_mapping) {
page = cl_page_find(env, clob, vmpage->index,
vmpage, CPT_CACHEABLE);
if (!IS_ERR(page)) {
rc = cl_read_ahead_page(env, io, queue,
- page, vmpage);
+ page, clob, max_index);
if (rc == -ENOLCK) {
which = RA_STAT_FAILED_MATCH;
msg = "lock match failed";
@@ -504,7 +350,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
msg = "g_c_p_n failed";
}
if (msg) {
- ll_ra_stats_inc(mapping, which);
+ ll_ra_stats_inc(inode, which);
CDEBUG(D_READA, "%s\n", msg);
}
return rc;
@@ -616,11 +462,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *queue,
struct ra_io_arg *ria,
unsigned long *reserved_pages,
- struct address_space *mapping,
unsigned long *ra_end)
{
- int rc, count = 0, stride_ria;
- unsigned long page_idx;
+ int rc, count = 0;
+ bool stride_ria;
+ pgoff_t page_idx;
+ pgoff_t max_index = 0;
LASSERT(ria);
RIA_DEBUG(ria);
@@ -631,12 +478,13 @@ static int ll_read_ahead_pages(const struct lu_env *env,
if (ras_inside_ra_window(page_idx, ria)) {
/* If the page is inside the read-ahead window*/
rc = ll_read_ahead_page(env, io, queue,
- page_idx, mapping);
+ page_idx, &max_index);
if (rc == 1) {
(*reserved_pages)--;
count++;
- } else if (rc == -ENOLCK)
+ } else if (rc == -ENOLCK) {
break;
+ }
} else if (stride_ria) {
/* If it is not in the read-ahead window, and it is
* read-ahead mode, then check whether it should skip
@@ -666,25 +514,22 @@ static int ll_read_ahead_pages(const struct lu_env *env,
}
int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags)
+ struct cl_page_list *queue, struct ll_readahead_state *ras,
+ bool hit)
{
struct vvp_io *vio = vvp_env_io(env);
- struct vvp_thread_info *vti = vvp_env_info(env);
- struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct ll_thread_info *lti = ll_env_info(env);
+ struct cl_attr *attr = vvp_env_thread_attr(env);
unsigned long start = 0, end = 0, reserved;
- unsigned long ra_end, len;
+ unsigned long ra_end, len, mlen = 0;
struct inode *inode;
- struct ll_ra_read *bead;
- struct ra_io_arg *ria = &vti->vti_ria;
- struct ll_inode_info *lli;
+ struct ra_io_arg *ria = &lti->lti_ria;
struct cl_object *clob;
int ret = 0;
__u64 kms;
- inode = mapping->host;
- lli = ll_i2info(inode);
- clob = lli->lli_clob;
+ clob = io->ci_obj;
+ inode = vvp_object_inode(clob);
memset(ria, 0, sizeof(*ria));
@@ -696,22 +541,20 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
return ret;
kms = attr->cat_kms;
if (kms == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
return 0;
}
spin_lock(&ras->ras_lock);
- if (vio->cui_ra_window_set)
- bead = &vio->cui_bead;
- else
- bead = NULL;
/* Enlarge the RA window to encompass the full read */
- if (bead && ras->ras_window_start + ras->ras_window_len <
- bead->lrr_start + bead->lrr_count) {
- ras->ras_window_len = bead->lrr_start + bead->lrr_count -
+ if (vio->vui_ra_valid &&
+ ras->ras_window_start + ras->ras_window_len <
+ vio->vui_ra_start + vio->vui_ra_count) {
+ ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
ras->ras_window_start;
}
+
/* Reserve a part of the read-ahead window that we'll be issuing */
if (ras->ras_window_len) {
start = ras->ras_next_readahead;
@@ -755,29 +598,48 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
spin_unlock(&ras->ras_lock);
if (end == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
return 0;
}
len = ria_page_count(ria);
- if (len == 0)
+ if (len == 0) {
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
return 0;
+ }
+
+ CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
+ PFID(lu_object_fid(&clob->co_lu)),
+ ria->ria_start, ria->ria_end,
+ vio->vui_ra_valid ? vio->vui_ra_start : 0,
+ vio->vui_ra_valid ? vio->vui_ra_count : 0,
+ hit);
+
+ /* at least to extend the readahead window to cover current read */
+ if (!hit && vio->vui_ra_valid &&
+ vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) {
+ /* to the end of current read window. */
+ mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start;
+ /* trim to RPC boundary */
+ start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1);
+ mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start);
+ }
- reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
+ reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen);
if (reserved < len)
- ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
+ ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
- CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
+ CDEBUG(D_READA, "reserved pages %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
+ reserved, len, mlen,
atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
- ret = ll_read_ahead_pages(env, io, queue,
- ria, &reserved, mapping, &ra_end);
+ ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, &ra_end);
if (reserved != 0)
ll_ra_count_put(ll_i2sbi(inode), reserved);
if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
- ll_ra_stats_inc(mapping, RA_STAT_EOF);
+ ll_ra_stats_inc(inode, RA_STAT_EOF);
/* if we didn't get to the end of the region we reserved from
* the ras we need to go back and update the ras so that the
@@ -789,6 +651,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
ra_end, end, ria->ria_end);
if (ra_end != end + 1) {
+ ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
spin_lock(&ras->ras_lock);
if (ra_end < ras->ras_next_readahead &&
index_in_window(ra_end, ras->ras_window_start, 0,
@@ -836,7 +699,6 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
spin_lock_init(&ras->ras_lock);
ras_reset(inode, ras, 0);
ras->ras_requests = 0;
- INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
@@ -1059,15 +921,18 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
ras->ras_last_readpage = index;
ras_set_start(inode, ras, index);
- if (stride_io_mode(ras))
+ if (stride_io_mode(ras)) {
/* Since stride readahead is sensitive to the offset
* of read-ahead, so we use original offset here,
* instead of ras_window_start, which is RPC aligned
*/
ras->ras_next_readahead = max(index, ras->ras_next_readahead);
- else
- ras->ras_next_readahead = max(ras->ras_window_start,
- ras->ras_next_readahead);
+ } else {
+ if (ras->ras_next_readahead < ras->ras_window_start)
+ ras->ras_next_readahead = ras->ras_window_start;
+ if (!hit)
+ ras->ras_next_readahead = index + 1;
+ }
RAS_CDEBUG(ras);
/* Trigger RA in the mmap case where ras_consecutive_requests
@@ -1129,7 +994,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
clob = ll_i2info(inode)->lli_clob;
LASSERT(clob);
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = clob;
io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, clob);
@@ -1240,8 +1105,9 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
- end = i_size_read(inode);
- mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
+ mapping->writeback_index = 0;
+ else
+ mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
}
return result;
}
@@ -1251,7 +1117,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
struct ll_cl_context *lcc;
int result;
- lcc = ll_cl_init(file, vmpage, 0);
+ lcc = ll_cl_init(file, vmpage);
if (!IS_ERR(lcc)) {
struct lu_env *env = lcc->lcc_env;
struct cl_io *io = lcc->lcc_io;
@@ -1273,3 +1139,28 @@ int ll_readpage(struct file *file, struct page *vmpage)
}
return result;
}
+
+int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, enum cl_req_type crt)
+{
+ struct cl_2queue *queue;
+ int result;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ queue = &io->ci_queue;
+ cl_2queue_init_page(queue, page);
+
+ result = cl_io_submit_sync(env, io, crt, queue, 0);
+ LASSERT(cl_page_is_owned(page, io));
+
+ if (crt == CRT_READ)
+ /*
+ * in CRT_WRITE case page is left locked even in case of
+ * error.
+ */
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_2queue_fini(env, queue);
+
+ return result;
+}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 69aa15e8e..c12a048fc 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -95,15 +95,12 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
if (obj) {
page = cl_vmpage_page(vmpage, obj);
if (page) {
- lu_ref_add(&page->cp_reference,
- "delete", vmpage);
cl_page_delete(env, page);
- lu_ref_del(&page->cp_reference,
- "delete", vmpage);
cl_page_put(env, page);
}
- } else
+ } else {
LASSERT(vmpage->private == 0);
+ }
cl_env_put(env, &refcheck);
}
}
@@ -111,12 +108,12 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
{
- struct cl_env_nest nest;
struct lu_env *env;
+ void *cookie;
struct cl_object *obj;
struct cl_page *page;
struct address_space *mapping;
- int result;
+ int result = 0;
LASSERT(PageLocked(vmpage));
if (PageWriteback(vmpage) || PageDirty(vmpage))
@@ -130,53 +127,42 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
if (!obj)
return 1;
- /* 1 for page allocator, 1 for cl_page and 1 for page cache */
+ /* 1 for caller, 1 for cl_page and 1 for page cache */
if (page_count(vmpage) > 3)
return 0;
- /* TODO: determine what gfp should be used by @gfp_mask. */
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- /* If we can't allocate an env we won't call cl_page_put()
- * later on which further means it's impossible to drop
- * page refcount by cl_page, so ask kernel to not free
- * this page.
- */
- return 0;
-
page = cl_vmpage_page(vmpage, obj);
- result = !page;
- if (page) {
- if (!cl_page_in_use(page)) {
- result = 1;
- cl_page_delete(env, page);
- }
- cl_page_put(env, page);
- }
- cl_env_nested_put(&nest, env);
- return result;
-}
+ if (!page)
+ return 1;
-static int ll_set_page_dirty(struct page *vmpage)
-{
-#if 0
- struct cl_page *page = vvp_vmpage_page_transient(vmpage);
- struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host);
- struct vvp_page *cpg;
+ cookie = cl_env_reenter();
+ env = cl_env_percpu_get();
+ LASSERT(!IS_ERR(env));
- /*
- * XXX should page method be called here?
- */
- LASSERT(&obj->co_cl == page->cp_obj);
- cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
- /*
- * XXX cannot do much here, because page is possibly not locked:
- * sys_munmap()->...
- * ->unmap_page_range()->zap_pte_range()->set_page_dirty().
+ if (!cl_page_in_use(page)) {
+ result = 1;
+ cl_page_delete(env, page);
+ }
+
+ /* To use percpu env array, the call path can not be rescheduled;
+ * otherwise percpu array will be messed if ll_releaspage() called
+ * again on the same CPU.
+ *
+ * If this page holds the last refc of cl_object, the following
+ * call path may cause reschedule:
+ * cl_page_put -> cl_page_free -> cl_object_put ->
+ * lu_object_put -> lu_object_free -> lov_delete_raid0.
+ *
+ * However, the kernel can't get rid of this inode until all pages have
+ * been cleaned up. Now that we hold page lock here, it's pretty safe
+ * that we won't get into object delete path.
*/
- vvp_write_pending(obj, cpg);
-#endif
- return __set_page_dirty_nobuffers(vmpage);
+ LASSERT(cl_object_refc(obj) > 1);
+ cl_page_put(env, page);
+
+ cl_env_percpu_put(env);
+ cl_env_reexit(cookie);
+ return result;
}
#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
@@ -266,7 +252,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
* write directly
*/
if (clp->cp_type == CPT_CACHEABLE) {
- struct page *vmpage = cl_page_vmpage(env, clp);
+ struct page *vmpage = cl_page_vmpage(clp);
struct page *src_page;
struct page *dst_page;
void *src;
@@ -358,14 +344,14 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
*/
#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
-static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
- loff_t file_offset)
+static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
{
struct lu_env *env;
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct ccc_object *obj = cl_inode2ccc(inode);
+ struct vvp_object *obj = cl_inode2vvp(inode);
+ loff_t file_offset = iocb->ki_pos;
ssize_t count = iov_iter_count(iter);
ssize_t tot_bytes = 0, result = 0;
struct ll_inode_info *lli = ll_i2info(inode);
@@ -376,22 +362,21 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
return -EBADF;
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
- if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
+ if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
return -EINVAL;
- CDEBUG(D_VFSTRACE,
- "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
- inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
+ PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
file_offset, file_offset, count >> PAGE_SHIFT,
MAX_DIO_SIZE >> PAGE_SHIFT);
/* Check that all user buffers are aligned as well */
- if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
+ if (iov_iter_alignment(iter) & ~PAGE_MASK)
return -EINVAL;
env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env));
- io = ccc_env_io(env)->cui_cl.cis_io;
+ io = vvp_env_io(env)->vui_cl.cis_io;
LASSERT(io);
/* 0. Need locking between buffered and direct access. and race with
@@ -401,7 +386,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
if (iov_iter_rw(iter) == READ)
inode_lock(inode);
- LASSERT(obj->cob_transient_pages == 0);
+ LASSERT(obj->vob_transient_pages == 0);
while (iov_iter_count(iter)) {
struct page **pages;
size_t offs;
@@ -435,8 +420,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
size > (PAGE_SIZE / sizeof(*pages)) *
PAGE_SIZE) {
size = ((((size / 2) - 1) |
- ~CFS_PAGE_MASK) + 1) &
- CFS_PAGE_MASK;
+ ~PAGE_MASK) + 1) &
+ PAGE_MASK;
CDEBUG(D_VFSTRACE, "DIO size now %lu\n",
size);
continue;
@@ -449,62 +434,213 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
file_offset += result;
}
out:
- LASSERT(obj->cob_transient_pages == 0);
+ LASSERT(obj->vob_transient_pages == 0);
if (iov_iter_rw(iter) == READ)
inode_unlock(inode);
if (tot_bytes > 0) {
- if (iov_iter_rw(iter) == WRITE) {
- struct lov_stripe_md *lsm;
-
- lsm = ccc_inode_lsm_get(inode);
- LASSERT(lsm);
- lov_stripe_lock(lsm);
- obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
- lov_stripe_unlock(lsm);
- ccc_inode_lsm_put(inode, lsm);
- }
+ struct vvp_io *vio = vvp_env_io(env);
+
+ /* no commit async for direct IO */
+ vio->u.write.vui_written += tot_bytes;
}
cl_env_put(env, &refcheck);
- return tot_bytes ? : result;
+ return tot_bytes ? tot_bytes : result;
+}
+
+/**
+ * Prepare partially written-to page for a write.
+ */
+static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg)
+{
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ struct cl_object *obj = io->ci_obj;
+ struct vvp_page *vpg = cl_object_page_slice(obj, pg);
+ loff_t offset = cl_offset(obj, vvp_index(vpg));
+ int result;
+
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+ if (result == 0) {
+ /*
+ * If are writing to a new page, no need to read old data.
+ * The extent locking will have updated the KMS, and for our
+ * purposes here we can treat it like i_size.
+ */
+ if (attr->cat_kms <= offset) {
+ char *kaddr = kmap_atomic(vpg->vpg_page);
+
+ memset(kaddr, 0, cl_page_size(obj));
+ kunmap_atomic(kaddr);
+ } else if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
+ } else {
+ result = ll_page_sync_io(env, io, pg, CRT_READ);
+ }
+ }
+ return result;
}
static int ll_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
- int rc;
- unsigned from = pos & (PAGE_SIZE - 1);
+ struct page *vmpage = NULL;
+ unsigned int from = pos & (PAGE_SIZE - 1);
+ unsigned int to = from + len;
+ int result = 0;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
+ CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
- *pagep = page;
+ lcc = ll_cl_init(file, NULL);
+ if (IS_ERR(lcc)) {
+ result = PTR_ERR(lcc);
+ goto out;
+ }
- rc = ll_prepare_write(file, page, from, from + len);
- if (rc) {
- unlock_page(page);
- put_page(page);
+ env = lcc->lcc_env;
+ io = lcc->lcc_io;
+
+ /* To avoid deadlock, try to lock page first. */
+ vmpage = grab_cache_page_nowait(mapping, index);
+ if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *plist = &vio->u.write.vui_queue;
+
+ /* if the page is already in dirty cache, we have to commit
+ * the pages right now; otherwise, it may cause deadlock
+ * because it holds page lock of a dirty page and request for
+ * more grants. It's okay for the dirty page to be the first
+ * one in commit page list, though.
+ */
+ if (vmpage && plist->pl_nr > 0) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ vmpage = NULL;
+ }
+
+ /* commit pages and then wait for page lock */
+ result = vvp_io_write_commit(env, io);
+ if (result < 0)
+ goto out;
+
+ if (!vmpage) {
+ vmpage = grab_cache_page_write_begin(mapping, index,
+ flags);
+ if (!vmpage) {
+ result = -ENOMEM;
+ goto out;
+ }
+ }
}
- return rc;
+
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page)) {
+ result = PTR_ERR(page);
+ goto out;
+ }
+
+ lcc->lcc_page = page;
+ lu_ref_add(&page->cp_reference, "cl_io", io);
+
+ cl_page_assume(env, io, page);
+ if (!PageUptodate(vmpage)) {
+ /*
+ * We're completely overwriting an existing page,
+ * so _don't_ set it up to date until commit_write
+ */
+ if (from == 0 && to == PAGE_SIZE) {
+ CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
+ POISON_PAGE(vmpage, 0x11);
+ } else {
+ /* TODO: can be optimized at OSC layer to check if it
+ * is a lockless IO. In that case, it's not necessary
+ * to read the data.
+ */
+ result = ll_prepare_partial_page(env, io, page);
+ if (result == 0)
+ SetPageUptodate(vmpage);
+ }
+ }
+ if (result < 0)
+ cl_page_unassume(env, io, page);
+out:
+ if (result < 0) {
+ if (vmpage) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ }
+ if (!IS_ERR(lcc))
+ ll_cl_fini(lcc);
+ } else {
+ *pagep = vmpage;
+ *fsdata = lcc;
+ }
+ return result;
}
static int ll_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct page *vmpage, void *fsdata)
{
+ struct ll_cl_context *lcc = fsdata;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio;
+ struct cl_page *page;
unsigned from = pos & (PAGE_SIZE - 1);
- int rc;
+ bool unplug = false;
+ int result = 0;
+
+ put_page(vmpage);
+
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
+ vio = vvp_env_io(env);
+
+ LASSERT(cl_page_is_owned(page, io));
+ if (copied > 0) {
+ struct cl_page_list *plist = &vio->u.write.vui_queue;
+
+ lcc->lcc_page = NULL; /* page will be queued */
+
+ /* Add it into write queue */
+ cl_page_list_add(plist, page);
+ if (plist->pl_nr == 1) /* first page */
+ vio->u.write.vui_from = from;
+ else
+ LASSERT(from == 0);
+ vio->u.write.vui_to = from + copied;
+
+ /* We may have one full RPC, commit it soon */
+ if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
+ unplug = true;
+
+ CL_PAGE_DEBUG(D_VFSTRACE, env, page,
+ "queued page: %d.\n", plist->pl_nr);
+ } else {
+ cl_page_disown(env, io, page);
+
+ /* page list is not contiguous now, commit it now */
+ unplug = true;
+ }
- rc = ll_commit_write(file, page, from, from + copied);
- unlock_page(page);
- put_page(page);
+ if (unplug ||
+ file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
+ result = vvp_io_write_commit(env, io);
- return rc ?: copied;
+ ll_cl_fini(lcc);
+ return result >= 0 ? copied : result;
}
#ifdef CONFIG_MIGRATION
@@ -523,7 +659,7 @@ const struct address_space_operations ll_aops = {
.direct_IO = ll_direct_IO_26,
.writepage = ll_writepage,
.writepages = ll_writepages,
- .set_page_dirty = ll_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_nobuffers,
.write_begin = ll_write_begin,
.write_end = ll_write_end,
.invalidatepage = ll_invalidatepage,
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 99ffd1589..6322f8866 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -661,8 +661,9 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
if (rc)
goto out;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
- child, child->i_ino, child->i_generation);
+ CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"%p\n",
+ ll_get_fsname(child->i_sb, NULL, 0),
+ PFID(ll_inode2fid(child)), child);
ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
entry->se_inode = child;
@@ -1591,13 +1592,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
*dentryp = alias;
} else if (d_inode(*dentryp) != inode) {
/* revalidate, but inode is recreated */
- CDEBUG(D_READA,
- "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
- *dentryp,
- d_inode(*dentryp)->i_ino,
- d_inode(*dentryp)->i_generation,
- inode->i_ino,
- inode->i_generation);
+ CDEBUG(D_READA, "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
+ ll_get_fsname(d_inode(*dentryp)->i_sb, NULL, 0),
+ *dentryp,
+ PFID(ll_inode2fid(d_inode(*dentryp))),
+ PFID(ll_inode2fid(inode)));
ll_sai_unplug(sai, entry);
return -ESTALE;
} else {
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 61856d37a..415750b0b 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -164,9 +164,18 @@ static int __init lustre_init(void)
if (rc != 0)
goto out_sysfs;
+ cl_inode_fini_env = cl_env_alloc(&cl_inode_fini_refcheck,
+ LCT_REMEMBER | LCT_NOREF);
+ if (IS_ERR(cl_inode_fini_env)) {
+ rc = PTR_ERR(cl_inode_fini_env);
+ goto out_vvp;
+ }
+
+ cl_inode_fini_env->le_ctx.lc_cookie = 0x4;
+
rc = ll_xattr_init();
if (rc != 0)
- goto out_vvp;
+ goto out_inode_fini_env;
lustre_register_client_fill_super(ll_fill_super);
lustre_register_kill_super_cb(ll_kill_super);
@@ -174,6 +183,8 @@ static int __init lustre_init(void)
return 0;
+out_inode_fini_env:
+ cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
out_vvp:
vvp_global_fini();
out_sysfs:
@@ -198,6 +209,7 @@ static void __exit lustre_exit(void)
kset_unregister(llite_kset);
ll_xattr_fini();
+ cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
vvp_global_fini();
kmem_cache_destroy(ll_inode_cachep);
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 46d03ea48..3fc736ccf 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -77,7 +77,9 @@ static int ll_readlink_internal(struct inode *inode,
ll_finish_md_op_data(op_data);
if (rc) {
if (rc != -ENOENT)
- CERROR("inode %lu: rc = %d\n", inode->i_ino, rc);
+ CERROR("%s: inode "DFID": rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
goto failed;
}
@@ -90,8 +92,10 @@ static int ll_readlink_internal(struct inode *inode,
LASSERT(symlen != 0);
if (body->eadatasize != symlen) {
- CERROR("inode %lu: symlink length %d not expected %d\n",
- inode->i_ino, body->eadatasize - 1, symlen - 1);
+ CERROR("%s: inode "DFID": symlink length %d not expected %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), body->eadatasize - 1,
+ symlen - 1);
rc = -EPROTO;
goto failed;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 282b70b77..47101de1c 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -36,6 +36,7 @@
* cl_device and cl_device_type implementation for VVP layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_LLITE
@@ -56,13 +57,33 @@
* "llite_" (var. "ll_") prefix.
*/
-static struct kmem_cache *vvp_thread_kmem;
+static struct kmem_cache *ll_thread_kmem;
+struct kmem_cache *vvp_lock_kmem;
+struct kmem_cache *vvp_object_kmem;
+struct kmem_cache *vvp_req_kmem;
static struct kmem_cache *vvp_session_kmem;
+static struct kmem_cache *vvp_thread_kmem;
+
static struct lu_kmem_descr vvp_caches[] = {
{
- .ckd_cache = &vvp_thread_kmem,
- .ckd_name = "vvp_thread_kmem",
- .ckd_size = sizeof(struct vvp_thread_info),
+ .ckd_cache = &ll_thread_kmem,
+ .ckd_name = "ll_thread_kmem",
+ .ckd_size = sizeof(struct ll_thread_info),
+ },
+ {
+ .ckd_cache = &vvp_lock_kmem,
+ .ckd_name = "vvp_lock_kmem",
+ .ckd_size = sizeof(struct vvp_lock),
+ },
+ {
+ .ckd_cache = &vvp_object_kmem,
+ .ckd_name = "vvp_object_kmem",
+ .ckd_size = sizeof(struct vvp_object),
+ },
+ {
+ .ckd_cache = &vvp_req_kmem,
+ .ckd_name = "vvp_req_kmem",
+ .ckd_size = sizeof(struct vvp_req),
},
{
.ckd_cache = &vvp_session_kmem,
@@ -70,29 +91,40 @@ static struct lu_kmem_descr vvp_caches[] = {
.ckd_size = sizeof(struct vvp_session)
},
{
+ .ckd_cache = &vvp_thread_kmem,
+ .ckd_name = "vvp_thread_kmem",
+ .ckd_size = sizeof(struct vvp_thread_info),
+ },
+ {
.ckd_cache = NULL
}
};
-static void *vvp_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *ll_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct vvp_thread_info *info;
- info = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
+ info = kmem_cache_zalloc(ll_thread_kmem, GFP_NOFS);
if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
-static void vvp_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void ll_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct vvp_thread_info *info = data;
- kmem_cache_free(vvp_thread_kmem, info);
+ kmem_cache_free(ll_thread_kmem, info);
}
+struct lu_context_key ll_thread_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = ll_thread_key_init,
+ .lct_fini = ll_thread_key_fini
+};
+
static void *vvp_session_key_init(const struct lu_context *ctx,
struct lu_context_key *key)
{
@@ -112,34 +144,127 @@ static void vvp_session_key_fini(const struct lu_context *ctx,
kmem_cache_free(vvp_session_kmem, session);
}
-struct lu_context_key vvp_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = vvp_key_init,
- .lct_fini = vvp_key_fini
-};
-
struct lu_context_key vvp_session_key = {
.lct_tags = LCT_SESSION,
.lct_init = vvp_session_key_init,
.lct_fini = vvp_session_key_fini
};
+void *vvp_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct vvp_thread_info *vti;
+
+ vti = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
+ if (!vti)
+ vti = ERR_PTR(-ENOMEM);
+ return vti;
+}
+
+void vvp_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct vvp_thread_info *vti = data;
+
+ kmem_cache_free(vvp_thread_kmem, vti);
+}
+
+struct lu_context_key vvp_thread_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = vvp_thread_key_init,
+ .lct_fini = vvp_thread_key_fini
+};
+
/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
-LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
+LU_TYPE_INIT_FINI(vvp, &vvp_thread_key, &ll_thread_key, &vvp_session_key);
static const struct lu_device_operations vvp_lu_ops = {
.ldo_object_alloc = vvp_object_alloc
};
static const struct cl_device_operations vvp_cl_ops = {
- .cdo_req_init = ccc_req_init
+ .cdo_req_init = vvp_req_init
};
+static struct lu_device *vvp_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct vvp_device *vdv = lu2vvp_dev(d);
+ struct cl_site *site = lu2cl_site(d->ld_site);
+ struct lu_device *next = cl2lu_dev(vdv->vdv_next);
+
+ if (d->ld_site) {
+ cl_site_fini(site);
+ kfree(site);
+ }
+ cl_device_fini(lu2cl_dev(d));
+ kfree(vdv);
+ return next;
+}
+
static struct lu_device *vvp_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
{
- return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops);
+ struct vvp_device *vdv;
+ struct lu_device *lud;
+ struct cl_site *site;
+ int rc;
+
+ vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
+ if (!vdv)
+ return ERR_PTR(-ENOMEM);
+
+ lud = &vdv->vdv_cl.cd_lu_dev;
+ cl_device_init(&vdv->vdv_cl, t);
+ vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
+ vdv->vdv_cl.cd_ops = &vvp_cl_ops;
+
+ site = kzalloc(sizeof(*site), GFP_NOFS);
+ if (site) {
+ rc = cl_site_init(site, &vdv->vdv_cl);
+ if (rc == 0) {
+ rc = lu_site_init_finish(&site->cs_lu);
+ } else {
+ LASSERT(!lud->ld_site);
+ CERROR("Cannot init lu_site, rc %d.\n", rc);
+ kfree(site);
+ }
+ } else {
+ rc = -ENOMEM;
+ }
+ if (rc != 0) {
+ vvp_device_free(env, lud);
+ lud = ERR_PTR(rc);
+ }
+ return lud;
+}
+
+static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
+ const char *name, struct lu_device *next)
+{
+ struct vvp_device *vdv;
+ int rc;
+
+ vdv = lu2vvp_dev(d);
+ vdv->vdv_next = lu2cl_dev(next);
+
+ LASSERT(d->ld_site && next->ld_type);
+ next->ld_site = d->ld_site;
+ rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
+ next->ld_type->ldt_name,
+ NULL);
+ if (rc == 0) {
+ lu_device_get(next);
+ lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
+ }
+ return rc;
+}
+
+static struct lu_device *vvp_device_fini(const struct lu_env *env,
+ struct lu_device *d)
+{
+ return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
}
static const struct lu_device_type_operations vvp_device_type_ops = {
@@ -150,9 +275,9 @@ static const struct lu_device_type_operations vvp_device_type_ops = {
.ldto_stop = vvp_type_stop,
.ldto_device_alloc = vvp_device_alloc,
- .ldto_device_free = ccc_device_free,
- .ldto_device_init = ccc_device_init,
- .ldto_device_fini = ccc_device_fini
+ .ldto_device_free = vvp_device_free,
+ .ldto_device_init = vvp_device_init,
+ .ldto_device_fini = vvp_device_fini,
};
struct lu_device_type vvp_device_type = {
@@ -168,20 +293,27 @@ struct lu_device_type vvp_device_type = {
*/
int vvp_global_init(void)
{
- int result;
+ int rc;
- result = lu_kmem_init(vvp_caches);
- if (result == 0) {
- result = ccc_global_init(&vvp_device_type);
- if (result != 0)
- lu_kmem_fini(vvp_caches);
- }
- return result;
+ rc = lu_kmem_init(vvp_caches);
+ if (rc != 0)
+ return rc;
+
+ rc = lu_device_type_init(&vvp_device_type);
+ if (rc != 0)
+ goto out_kmem;
+
+ return 0;
+
+out_kmem:
+ lu_kmem_fini(vvp_caches);
+
+ return rc;
}
void vvp_global_fini(void)
{
- ccc_global_fini(&vvp_device_type);
+ lu_device_type_fini(&vvp_device_type);
lu_kmem_fini(vvp_caches);
}
@@ -205,13 +337,14 @@ int cl_sb_init(struct super_block *sb)
cl = cl_type_setup(env, NULL, &vvp_device_type,
sbi->ll_dt_exp->exp_obd->obd_lu_dev);
if (!IS_ERR(cl)) {
- cl2ccc_dev(cl)->cdv_sb = sb;
+ cl2vvp_dev(cl)->vdv_sb = sb;
sbi->ll_cl = cl;
sbi->ll_site = cl2lu_dev(cl)->ld_site;
}
cl_env_put(env, &refcheck);
- } else
+ } else {
rc = PTR_ERR(env);
+ }
return rc;
}
@@ -356,23 +489,18 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
return ~0ULL;
clob = vvp_pgcache_obj(env, dev, &id);
if (clob) {
- struct cl_object_header *hdr;
- int nr;
- struct cl_page *pg;
-
- /* got an object. Find next page. */
- hdr = cl_object_header(clob);
+ struct inode *inode = vvp_object_inode(clob);
+ struct page *vmpage;
+ int nr;
- spin_lock(&hdr->coh_page_guard);
- nr = radix_tree_gang_lookup(&hdr->coh_tree,
- (void **)&pg,
- id.vpi_index, 1);
+ nr = find_get_pages_contig(inode->i_mapping,
+ id.vpi_index, 1, &vmpage);
if (nr > 0) {
- id.vpi_index = pg->cp_index;
+ id.vpi_index = vmpage->index;
/* Cant support over 16T file */
- nr = !(pg->cp_index > 0xffffffff);
+ nr = !(vmpage->index > 0xffffffff);
+ put_page(vmpage);
}
- spin_unlock(&hdr->coh_page_guard);
lu_object_ref_del(&clob->co_lu, "dump", current);
cl_object_put(env, clob);
@@ -398,21 +526,20 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
static void vvp_pgcache_page_show(const struct lu_env *env,
struct seq_file *seq, struct cl_page *page)
{
- struct ccc_page *cpg;
+ struct vvp_page *vpg;
struct page *vmpage;
int has_flags;
- cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- vmpage = cpg->cpg_page;
- seq_printf(seq, " %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [",
+ vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
+ vmpage = vpg->vpg_page;
+ seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
0 /* gen */,
- cpg, page,
+ vpg, page,
"none",
- cpg->cpg_write_queued ? "wq" : "- ",
- cpg->cpg_defer_uptodate ? "du" : "- ",
+ vpg->vpg_write_queued ? "wq" : "- ",
+ vpg->vpg_defer_uptodate ? "du" : "- ",
PageWriteback(vmpage) ? "wb" : "-",
- vmpage, vmpage->mapping->host->i_ino,
- vmpage->mapping->host->i_generation,
+ vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
vmpage->mapping->host, vmpage->index,
page_count(vmpage));
has_flags = 0;
@@ -431,8 +558,6 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
struct ll_sb_info *sbi;
struct cl_object *clob;
struct lu_env *env;
- struct cl_page *page;
- struct cl_object_header *hdr;
struct vvp_pgcache_id id;
int refcheck;
int result;
@@ -444,27 +569,38 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
sbi = f->private;
clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
if (clob) {
- hdr = cl_object_header(clob);
-
- spin_lock(&hdr->coh_page_guard);
- page = cl_page_lookup(hdr, id.vpi_index);
- spin_unlock(&hdr->coh_page_guard);
+ struct inode *inode = vvp_object_inode(clob);
+ struct cl_page *page = NULL;
+ struct page *vmpage;
+
+ result = find_get_pages_contig(inode->i_mapping,
+ id.vpi_index, 1,
+ &vmpage);
+ if (result > 0) {
+ lock_page(vmpage);
+ page = cl_vmpage_page(vmpage, clob);
+ unlock_page(vmpage);
+ put_page(vmpage);
+ }
- seq_printf(f, "%8x@"DFID": ",
- id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
+ seq_printf(f, "%8x@" DFID ": ", id.vpi_index,
+ PFID(lu_object_fid(&clob->co_lu)));
if (page) {
vvp_pgcache_page_show(env, f, page);
cl_page_put(env, page);
- } else
+ } else {
seq_puts(f, "missing\n");
+ }
lu_object_ref_del(&clob->co_lu, "dump", current);
cl_object_put(env, clob);
- } else
+ } else {
seq_printf(f, "%llx missing\n", pos);
+ }
cl_env_put(env, &refcheck);
result = 0;
- } else
+ } else {
result = PTR_ERR(env);
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index bb393378c..27b9b0a01 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -41,21 +41,337 @@
#ifndef VVP_INTERNAL_H
#define VVP_INTERNAL_H
+#include "../include/lustre/lustre_idl.h"
#include "../include/cl_object.h"
-#include "llite_internal.h"
-int vvp_io_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int vvp_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
+enum obd_notify_event;
+struct inode;
+struct lov_stripe_md;
+struct lustre_md;
+struct obd_capa;
+struct obd_device;
+struct obd_export;
+struct page;
+
+/* specific architecture can implement only part of this list */
+enum vvp_io_subtype {
+ /** normal IO */
+ IO_NORMAL,
+ /** io started from splice_{read|write} */
+ IO_SPLICE
+};
+
+/**
+ * IO state private to IO state private to VVP layer.
+ */
+struct vvp_io {
+ /** super class */
+ struct cl_io_slice vui_cl;
+ struct cl_io_lock_link vui_link;
+ /**
+ * I/O vector information to or from which read/write is going.
+ */
+ struct iov_iter *vui_iter;
+ /**
+ * Total size for the left IO.
+ */
+ size_t vui_tot_count;
+
+ union {
+ struct vvp_fault_io {
+ /**
+ * Inode modification time that is checked across DLM
+ * lock request.
+ */
+ time64_t ft_mtime;
+ struct vm_area_struct *ft_vma;
+ /**
+ * locked page returned from vvp_io
+ */
+ struct page *ft_vmpage;
+ /**
+ * kernel fault info
+ */
+ struct vm_fault *ft_vmf;
+ /**
+ * fault API used bitflags for return code.
+ */
+ unsigned int ft_flags;
+ /**
+ * check that flags are from filemap_fault
+ */
+ bool ft_flags_valid;
+ } fault;
+ struct {
+ struct pipe_inode_info *vui_pipe;
+ unsigned int vui_flags;
+ } splice;
+ struct {
+ struct cl_page_list vui_queue;
+ unsigned long vui_written;
+ int vui_from;
+ int vui_to;
+ } write;
+ } u;
+
+ enum vvp_io_subtype vui_io_subtype;
+
+ /**
+ * Layout version when this IO is initialized
+ */
+ __u32 vui_layout_gen;
+ /**
+ * File descriptor against which IO is done.
+ */
+ struct ll_file_data *vui_fd;
+ struct kiocb *vui_iocb;
+
+ /* Readahead state. */
+ pgoff_t vui_ra_start;
+ pgoff_t vui_ra_count;
+ /* Set when vui_ra_{start,count} have been initialized. */
+ bool vui_ra_valid;
+};
+
+extern struct lu_device_type vvp_device_type;
+
+extern struct lu_context_key vvp_session_key;
+extern struct lu_context_key vvp_thread_key;
+
+extern struct kmem_cache *vvp_lock_kmem;
+extern struct kmem_cache *vvp_object_kmem;
+extern struct kmem_cache *vvp_req_kmem;
+
+struct vvp_thread_info {
+ struct cl_lock vti_lock;
+ struct cl_lock_descr vti_descr;
+ struct cl_io vti_io;
+ struct cl_attr vti_attr;
+};
+
+static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
+{
+ struct vvp_thread_info *vti;
+
+ vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
+ LASSERT(vti);
+
+ return vti;
+}
+
+static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
+{
+ struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
+
+ memset(lock, 0, sizeof(*lock));
+ return lock;
+}
+
+static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
+{
+ struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
+
+ memset(attr, 0, sizeof(*attr));
+
+ return attr;
+}
+
+static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
+{
+ struct cl_io *io = &vvp_env_info(env)->vti_io;
+
+ memset(io, 0, sizeof(*io));
+
+ return io;
+}
+
+struct vvp_session {
+ struct vvp_io cs_ios;
+};
+
+static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
+{
+ struct vvp_session *ses;
+
+ ses = lu_context_key_get(env->le_ses, &vvp_session_key);
+ LASSERT(ses);
+
+ return ses;
+}
+
+static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
+{
+ return &vvp_env_session(env)->cs_ios;
+}
+
+/**
+ * ccc-private object state.
+ */
+struct vvp_object {
+ struct cl_object_header vob_header;
+ struct cl_object vob_cl;
+ struct inode *vob_inode;
+
+ /**
+ * A list of dirty pages pending IO in the cache. Used by
+ * SOM. Protected by ll_inode_info::lli_lock.
+ *
+ * \see vvp_page::vpg_pending_linkage
+ */
+ struct list_head vob_pending_list;
+
+ /**
+ * Access this counter is protected by inode->i_sem. Now that
+ * the lifetime of transient pages must be covered by inode sem,
+ * we don't need to hold any lock..
+ */
+ int vob_transient_pages;
+ /**
+ * Number of outstanding mmaps on this file.
+ *
+ * \see ll_vm_open(), ll_vm_close().
+ */
+ atomic_t vob_mmap_cnt;
+
+ /**
+ * various flags
+ * vob_discard_page_warned
+ * if pages belonging to this object are discarded when a client
+ * is evicted, some debug info will be printed, this flag will be set
+ * during processing the first discarded page, then avoid flooding
+ * debug message for lots of discarded pages.
+ *
+ * \see ll_dirty_page_discard_warn.
+ */
+ unsigned int vob_discard_page_warned:1;
+};
+
+/**
+ * VVP-private page state.
+ */
+struct vvp_page {
+ struct cl_page_slice vpg_cl;
+ int vpg_defer_uptodate;
+ int vpg_ra_used;
+ int vpg_write_queued;
+ /**
+ * Non-empty iff this page is already counted in
+ * vvp_object::vob_pending_list. This list is only used as a flag,
+ * that is, never iterated through, only checked for list_empty(), but
+ * having a list is useful for debugging.
+ */
+ struct list_head vpg_pending_linkage;
+ /** VM page */
+ struct page *vpg_page;
+};
+
+static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
+{
+ return container_of(slice, struct vvp_page, vpg_cl);
+}
+
+static inline pgoff_t vvp_index(struct vvp_page *vvp)
+{
+ return vvp->vpg_cl.cpl_index;
+}
+
+struct vvp_device {
+ struct cl_device vdv_cl;
+ struct super_block *vdv_sb;
+ struct cl_device *vdv_next;
+};
+
+struct vvp_lock {
+ struct cl_lock_slice vlk_cl;
+};
+
+struct vvp_req {
+ struct cl_req_slice vrq_cl;
+};
+
+void *ccc_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key);
+void ccc_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data);
+
+void ccc_umount(const struct lu_env *env, struct cl_device *dev);
+
+static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
+{
+ return &vdv->vdv_cl.cd_lu_dev;
+}
+
+static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
+{
+ return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
+}
+
+static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
+{
+ return container_of0(d, struct vvp_device, vdv_cl);
+}
+
+static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
+{
+ return container_of0(obj, struct vvp_object, vob_cl);
+}
+
+static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
+{
+ return container_of0(obj, struct vvp_object, vob_cl.co_lu);
+}
+
+static inline struct inode *vvp_object_inode(const struct cl_object *obj)
+{
+ return cl2vvp(obj)->vob_inode;
+}
+
+int vvp_object_invariant(const struct cl_object *obj);
+struct vvp_object *cl_inode2vvp(struct inode *inode);
+
+static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
+{
+ return cl2vvp_page(slice)->vpg_page;
+}
+
+static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
+{
+ return container_of(slice, struct vvp_lock, vlk_cl);
+}
+
+# define CLOBINVRNT(env, clob, expr) \
+ ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
+
+/**
+ * New interfaces to get and put lov_stripe_md from lov layer. This violates
+ * layering because lov_stripe_md is supposed to be a private data in lov.
+ *
+ * NB: If you find you have to use these interfaces for your new code, please
+ * think about it again. These interfaces may be removed in the future for
+ * better layering.
+ */
+struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
+void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
+int lov_read_and_clear_async_rc(struct cl_object *clob);
+
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
+void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
+
+int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
+int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
+int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
-struct ccc_object *cl_inode2ccc(struct inode *inode);
+int vvp_global_init(void);
+void vvp_global_fini(void);
extern const struct file_operations vvp_dump_pgcache_file_ops;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 85a835976..5bf9592ae 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -44,21 +44,30 @@
#include "../include/obd.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice);
+struct vvp_io *cl2vvp_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct vvp_io *vio;
+
+ vio = container_of(slice, struct vvp_io, vui_cl);
+ LASSERT(vio == vvp_env_io(env));
+
+ return vio;
+}
/**
* True, if \a io is a normal io, False for splice_{read,write}
*/
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
+static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
{
struct vvp_io *vio = vvp_env_io(env);
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- return vio->cui_io_subtype == IO_NORMAL;
+ return vio->vui_io_subtype == IO_NORMAL;
}
/**
@@ -71,7 +80,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
bool rc = true;
switch (io->ci_type) {
@@ -80,7 +89,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
/* don't need lock here to check lli_layout_gen as we have held
* extent lock and GROUP lock has to hold to swap layout
*/
- if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
+ if (ll_layout_version_get(lli) != vio->vui_layout_gen) {
io->ci_need_restart = 1;
/* this will return application a short read/write */
io->ci_continue = 0;
@@ -95,20 +104,187 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
return rc;
}
+static void vvp_object_size_lock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ ll_inode_size_lock(inode);
+ cl_object_attr_lock(obj);
+}
+
+static void vvp_object_size_unlock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ cl_object_attr_unlock(obj);
+ ll_inode_size_unlock(inode);
+}
+
+/**
+ * Helper function that if necessary adjusts file size (inode->i_size), when
+ * position at the offset \a pos is accessed. File size can be arbitrary stale
+ * on a Lustre client, but client at least knows KMS. If accessed area is
+ * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
+ *
+ * Locking: cl_isize_lock is used to serialize changes to inode size and to
+ * protect consistency between inode size and cl_object
+ * attributes. cl_object_size_lock() protects consistency between cl_attr's of
+ * top-object and sub-objects.
+ */
+static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, loff_t start, size_t count,
+ int *exceed)
+{
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ struct inode *inode = vvp_object_inode(obj);
+ loff_t pos = start + count - 1;
+ loff_t kms;
+ int result;
+
+ /*
+ * Consistency guarantees: following possibilities exist for the
+ * relation between region being accessed and real file size at this
+ * moment:
+ *
+ * (A): the region is completely inside of the file;
+ *
+ * (B-x): x bytes of region are inside of the file, the rest is
+ * outside;
+ *
+ * (C): the region is completely outside of the file.
+ *
+ * This classification is stable under DLM lock already acquired by
+ * the caller, because to change the class, other client has to take
+ * DLM lock conflicting with our lock. Also, any updates to ->i_size
+ * by other threads on this client are serialized by
+ * ll_inode_size_lock(). This guarantees that short reads are handled
+ * correctly in the face of concurrent writes and truncates.
+ */
+ vvp_object_size_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ kms = attr->cat_kms;
+ if (pos > kms) {
+ /*
+ * A glimpse is necessary to determine whether we
+ * return a short read (B) or some zeroes at the end
+ * of the buffer (C)
+ */
+ vvp_object_size_unlock(obj);
+ result = cl_glimpse_lock(env, io, inode, obj, 0);
+ if (result == 0 && exceed) {
+ /* If objective page index exceed end-of-file
+ * page index, return directly. Do not expect
+ * kernel will check such case correctly.
+ * linux-2.6.18-128.1.1 miss to do that.
+ * --bug 17336
+ */
+ loff_t size = i_size_read(inode);
+ loff_t cur_index = start >> PAGE_SHIFT;
+ loff_t size_index = (size - 1) >> PAGE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ size_index < cur_index)
+ *exceed = 1;
+ }
+ return result;
+ }
+ /*
+ * region is within kms and, hence, within real file
+ * size (A). We need to increase i_size to cover the
+ * read region so that generic_file_read() will do its
+ * job, but that doesn't mean the kms size is
+ * _correct_, it is only the _minimum_ size. If
+ * someone does a stat they will get the correct size
+ * which will always be >= the kms value here.
+ * b=11081
+ */
+ if (i_size_read(inode) < kms) {
+ i_size_write(inode, kms);
+ CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (__u64)i_size_read(inode));
+ }
+ }
+
+ vvp_object_size_unlock(obj);
+
+ return result;
+}
+
/*****************************************************************************
*
* io operations.
*
*/
+static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ pgoff_t start, pgoff_t end)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
+ struct cl_object *obj = io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
+
+ memset(&vio->vui_link, 0, sizeof(vio->vui_link));
+
+ if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ descr->cld_mode = CLM_GROUP;
+ descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
+ } else {
+ descr->cld_mode = mode;
+ }
+ descr->cld_obj = obj;
+ descr->cld_start = start;
+ descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
+
+ cl_io_lock_add(env, io, &vio->vui_link);
+ return 0;
+}
+
+static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ loff_t start, loff_t end)
+{
+ struct cl_object *obj = io->ci_obj;
+
+ return vvp_io_one_lock_index(env, io, enqflags, mode,
+ cl_index(obj, start), cl_index(obj, end));
+}
+
+static int vvp_io_write_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+
+ cl_page_list_init(&vio->u.write.vui_queue);
+ vio->u.write.vui_written = 0;
+ vio->u.write.vui_from = 0;
+ vio->u.write.vui_to = PAGE_SIZE;
+
+ return 0;
+}
+
+static void vvp_io_write_iter_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+
+ LASSERT(vio->u.write.vui_queue.pl_nr == 0);
+}
+
static int vvp_io_fault_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
- LASSERT(inode ==
- file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
+ LASSERT(inode == file_inode(vio->vui_fd->fd_file));
vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
return 0;
}
@@ -117,15 +293,16 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct inode *inode = vvp_object_inode(obj);
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, DFID
" ignore/verify layout %d/%d, layout version %d restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
if (io->ci_restore_needed == 1) {
int rc;
@@ -133,7 +310,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
/* file was detected release, we need to restore it
* before finishing the io
*/
- rc = ll_layout_restore(ccc_object_inode(obj));
+ rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
/* if restore registration failed, no restart,
* we will return -ENODATA
*/
@@ -159,16 +336,16 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
__u32 gen = 0;
/* check layout version */
- ll_layout_refresh(ccc_object_inode(obj), &gen);
- io->ci_need_restart = cio->cui_layout_gen != gen;
+ ll_layout_refresh(inode, &gen);
+ io->ci_need_restart = vio->vui_layout_gen != gen;
if (io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
DFID" layout changed from %d to %d.\n",
PFID(lu_object_fid(&obj->co_lu)),
- cio->cui_layout_gen, gen);
+ vio->vui_layout_gen, gen);
/* today successful restore is the only possible case */
/* restore was done, clear restoring state */
- ll_i2info(ccc_object_inode(obj))->lli_flags &=
+ ll_i2info(vvp_object_inode(obj))->lli_flags &=
~LLIF_FILE_RESTORING;
}
}
@@ -180,7 +357,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_page *page = io->u.ci_fault.ft_page;
- CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
+ CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
if (page) {
lu_ref_del(&page->cp_reference, "fault", io);
@@ -203,16 +380,16 @@ static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
}
static int vvp_mmap_locks(const struct lu_env *env,
- struct ccc_io *vio, struct cl_io *io)
+ struct vvp_io *vio, struct cl_io *io)
{
- struct ccc_thread_info *cti = ccc_env_info(env);
+ struct vvp_thread_info *cti = vvp_env_info(env);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct cl_lock_descr *descr = &cti->cti_descr;
+ struct cl_lock_descr *descr = &cti->vti_descr;
ldlm_policy_data_t policy;
unsigned long addr;
ssize_t count;
- int result;
+ int result = 0;
struct iov_iter i;
struct iovec iov;
@@ -221,21 +398,21 @@ static int vvp_mmap_locks(const struct lu_env *env,
if (!cl_is_normalio(env, io))
return 0;
- if (!vio->cui_iter) /* nfs or loop back device write */
+ if (!vio->vui_iter) /* nfs or loop back device write */
return 0;
/* No MM (e.g. NFS)? No vmas too. */
if (!mm)
return 0;
- iov_for_each(iov, i, *(vio->cui_iter)) {
+ iov_for_each(iov, i, *vio->vui_iter) {
addr = (unsigned long)iov.iov_base;
count = iov.iov_len;
if (count == 0)
continue;
- count += addr & (~CFS_PAGE_MASK);
- addr &= CFS_PAGE_MASK;
+ count += addr & (~PAGE_MASK);
+ addr &= PAGE_MASK;
down_read(&mm->mmap_sem);
while ((vma = our_vma(mm, addr, count)) != NULL) {
@@ -244,10 +421,10 @@ static int vvp_mmap_locks(const struct lu_env *env,
if (ll_file_nolock(vma->vm_file)) {
/*
- * For no lock case, a lockless lock will be
- * generated.
+ * For no lock case is not allowed for mmap
*/
- flags = CEF_NEVER;
+ result = -EINVAL;
+ break;
}
/*
@@ -269,10 +446,8 @@ static int vvp_mmap_locks(const struct lu_env *env,
descr->cld_mode, descr->cld_start,
descr->cld_end);
- if (result < 0) {
- up_read(&mm->mmap_sem);
- return result;
- }
+ if (result < 0)
+ break;
if (vma->vm_end - addr >= count)
break;
@@ -281,26 +456,55 @@ static int vvp_mmap_locks(const struct lu_env *env,
addr = vma->vm_end;
}
up_read(&mm->mmap_sem);
+ if (result < 0)
+ break;
}
- return 0;
+ return result;
+}
+
+static void vvp_io_advance(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ size_t nob)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = ios->cis_io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ if (!cl_is_normalio(env, io))
+ return;
+
+ iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
+}
+
+static void vvp_io_update_iov(const struct lu_env *env,
+ struct vvp_io *vio, struct cl_io *io)
+{
+ size_t size = io->u.ci_rw.crw_count;
+
+ if (!cl_is_normalio(env, io) || !vio->vui_iter)
+ return;
+
+ iov_iter_truncate(vio->vui_iter, size);
}
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
enum cl_lock_mode mode, loff_t start, loff_t end)
{
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
int result;
int ast_flags = 0;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ccc_io_update_iov(env, cio, io);
+ vvp_io_update_iov(env, vio, io);
if (io->u.ci_rw.crw_nonblock)
ast_flags |= CEF_NONBLOCK;
- result = vvp_mmap_locks(env, cio, io);
+ result = vvp_mmap_locks(env, vio, io);
if (result == 0)
- result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
+ result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
return result;
}
@@ -325,9 +529,11 @@ static int vvp_io_fault_lock(const struct lu_env *env,
/*
* XXX LDLM_FL_CBPENDING
*/
- return ccc_io_one_lock_index
- (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
- io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
+ return vvp_io_one_lock_index(env,
+ io, 0,
+ vvp_mode_from_vma(vio->u.fault.ft_vma),
+ io->u.ci_fault.ft_index,
+ io->u.ci_fault.ft_index);
}
static int vvp_io_write_lock(const struct lu_env *env,
@@ -354,14 +560,13 @@ static int vvp_io_setattr_iter_init(const struct lu_env *env,
}
/**
- * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
+ * Implementation of cl_io_operations::vio_lock() method for CIT_SETATTR io.
*
* Handles "lockless io" mode when extent locking is done by server.
*/
static int vvp_io_setattr_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
struct cl_io *io = ios->cis_io;
__u64 new_size;
__u32 enqflags = 0;
@@ -378,8 +583,8 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
return 0;
new_size = 0;
}
- cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
- return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
+
+ return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
new_size, OBD_OBJECT_EOF);
}
@@ -413,7 +618,7 @@ static int vvp_io_setattr_time(const struct lu_env *env,
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct cl_attr *attr = vvp_env_thread_attr(env);
int result;
unsigned valid = CAT_CTIME;
@@ -437,7 +642,7 @@ static int vvp_io_setattr_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct inode *inode = vvp_object_inode(io->ci_obj);
int result = 0;
inode_lock(inode);
@@ -453,7 +658,7 @@ static void vvp_io_setattr_end(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct inode *inode = vvp_object_inode(io->ci_obj);
if (cl_io_is_trunc(io))
/* Truncate in memory pages - they must be clean pages
@@ -474,27 +679,25 @@ static int vvp_io_read_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_ra_read *bead = &vio->cui_bead;
- struct file *file = cio->cui_fd->fd_file;
+ struct inode *inode = vvp_object_inode(obj);
+ struct file *file = vio->vui_fd->fd_file;
int result;
loff_t pos = io->u.ci_rd.rd.crw_pos;
long cnt = io->u.ci_rd.rd.crw_count;
- long tot = cio->cui_tot_count;
+ long tot = vio->vui_tot_count;
int exceed = 0;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
if (!can_populate_pages(env, io, inode))
return 0;
- result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
+ result = vvp_prep_size(env, obj, io, pos, tot, &exceed);
if (result != 0)
return result;
else if (exceed != 0)
@@ -505,30 +708,27 @@ static int vvp_io_read_start(const struct lu_env *env,
inode->i_ino, cnt, pos, i_size_read(inode));
/* turn off the kernel's read-ahead */
- cio->cui_fd->fd_file->f_ra.ra_pages = 0;
+ vio->vui_fd->fd_file->f_ra.ra_pages = 0;
/* initialize read-ahead window once per syscall */
- if (!vio->cui_ra_window_set) {
- vio->cui_ra_window_set = 1;
- bead->lrr_start = cl_index(obj, pos);
- /*
- * XXX: explicit PAGE_SIZE
- */
- bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
- ll_ra_read_in(file, bead);
+ if (!vio->vui_ra_valid) {
+ vio->vui_ra_valid = true;
+ vio->vui_ra_start = cl_index(obj, pos);
+ vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
+ ll_ras_enter(file);
}
/* BUG: 5972 */
file_accessed(file);
- switch (vio->cui_io_subtype) {
+ switch (vio->vui_io_subtype) {
case IO_NORMAL:
- LASSERT(cio->cui_iocb->ki_pos == pos);
- result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
+ LASSERT(vio->vui_iocb->ki_pos == pos);
+ result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
break;
case IO_SPLICE:
result = generic_file_splice_read(file, &pos,
- vio->u.splice.cui_pipe, cnt,
- vio->u.splice.cui_flags);
+ vio->u.splice.vui_pipe, cnt,
+ vio->u.splice.vui_flags);
/* LU-1109: do splice read stripe by stripe otherwise if it
* may make nfsd stuck if this read occupied all internal pipe
* buffers.
@@ -536,7 +736,7 @@ static int vvp_io_read_start(const struct lu_env *env,
io->ci_continue = 0;
break;
default:
- CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
+ CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
LBUG();
}
@@ -546,30 +746,201 @@ out:
io->ci_continue = 0;
io->ci_nob += result;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, READ);
+ vio->vui_fd, pos, result, READ);
result = 0;
}
return result;
}
-static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *plist, int from, int to)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_2queue *queue = &io->ci_queue;
+ struct cl_page *page;
+ unsigned int bytes = 0;
+ int rc = 0;
- if (vio->cui_ra_window_set)
- ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
+ if (plist->pl_nr == 0)
+ return 0;
- vvp_io_fini(env, ios);
+ if (from > 0 || to != PAGE_SIZE) {
+ page = cl_page_list_first(plist);
+ if (plist->pl_nr == 1) {
+ cl_page_clip(env, page, from, to);
+ } else {
+ if (from > 0)
+ cl_page_clip(env, page, from, PAGE_SIZE);
+ if (to != PAGE_SIZE) {
+ page = cl_page_list_last(plist);
+ cl_page_clip(env, page, 0, to);
+ }
+ }
+ }
+
+ cl_2queue_init(queue);
+ cl_page_list_splice(plist, &queue->c2_qin);
+ rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
+
+ /* plist is not sorted any more */
+ cl_page_list_splice(&queue->c2_qin, plist);
+ cl_page_list_splice(&queue->c2_qout, plist);
+ cl_2queue_fini(env, queue);
+
+ if (rc == 0) {
+ /* calculate bytes */
+ bytes = plist->pl_nr << PAGE_SHIFT;
+ bytes -= from + PAGE_SIZE - to;
+
+ while (plist->pl_nr > 0) {
+ page = cl_page_list_first(plist);
+ cl_page_list_del(env, plist, page);
+
+ cl_page_clip(env, page, 0, PAGE_SIZE);
+
+ SetPageUptodate(cl_page_vmpage(page));
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ }
+
+ return bytes > 0 ? bytes : rc;
+}
+
+static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ struct vvp_page *vpg;
+ struct page *vmpage = page->cp_vmpage;
+ struct cl_object *clob = cl_io_top(io)->ci_obj;
+
+ SetPageUptodate(vmpage);
+ set_page_dirty(vmpage);
+
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ vvp_write_pending(cl2vvp(clob), vpg);
+
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+}
+
+/* make sure the page list is contiguous */
+static bool page_list_sanity_check(struct cl_object *obj,
+ struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ pgoff_t index = CL_PAGE_EOF;
+
+ cl_page_list_for_each(page, plist) {
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+
+ if (index == CL_PAGE_EOF) {
+ index = vvp_index(vpg);
+ continue;
+ }
+
+ ++index;
+ if (index == vvp_index(vpg))
+ continue;
+
+ return false;
+ }
+ return true;
+}
+
+/* Return how many bytes have queued or written */
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
+{
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *queue = &vio->u.write.vui_queue;
+ struct cl_page *page;
+ int rc = 0;
+ int bytes = 0;
+ unsigned int npages = vio->u.write.vui_queue.pl_nr;
+
+ if (npages == 0)
+ return 0;
+
+ CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
+ npages, vio->u.write.vui_from, vio->u.write.vui_to);
+
+ LASSERT(page_list_sanity_check(obj, queue));
+
+ /* submit IO with async write */
+ rc = cl_io_commit_async(env, io, queue,
+ vio->u.write.vui_from, vio->u.write.vui_to,
+ write_commit_callback);
+ npages -= queue->pl_nr; /* already committed pages */
+ if (npages > 0) {
+ /* calculate how many bytes were written */
+ bytes = npages << PAGE_SHIFT;
+
+ /* first page */
+ bytes -= vio->u.write.vui_from;
+ if (queue->pl_nr == 0) /* last page */
+ bytes -= PAGE_SIZE - vio->u.write.vui_to;
+ LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
+
+ vio->u.write.vui_written += bytes;
+
+ CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
+ npages, bytes, vio->u.write.vui_written);
+
+ /* the first page must have been written. */
+ vio->u.write.vui_from = 0;
+ }
+ LASSERT(page_list_sanity_check(obj, queue));
+ LASSERT(ergo(rc == 0, queue->pl_nr == 0));
+
+ /* out of quota, try sync write */
+ if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
+ rc = vvp_io_commit_sync(env, io, queue,
+ vio->u.write.vui_from,
+ vio->u.write.vui_to);
+ if (rc > 0) {
+ vio->u.write.vui_written += rc;
+ rc = 0;
+ }
+ }
+
+ /* update inode size */
+ ll_merge_attr(env, inode);
+
+ /* Now the pages in queue were failed to commit, discard them
+ * unless they were dirtied before.
+ */
+ while (queue->pl_nr > 0) {
+ page = cl_page_list_first(queue);
+ cl_page_list_del(env, queue, page);
+
+ if (!PageDirty(cl_page_vmpage(page)))
+ cl_page_discard(env, io, page);
+
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ cl_page_list_fini(env, queue);
+
+ return rc;
}
static int vvp_io_write_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
ssize_t result = 0;
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
@@ -582,25 +953,41 @@ static int vvp_io_write_start(const struct lu_env *env,
* PARALLEL IO This has to be changed for parallel IO doing
* out-of-order writes.
*/
+ ll_merge_attr(env, inode);
pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
- cio->cui_iocb->ki_pos = pos;
+ vio->vui_iocb->ki_pos = pos;
} else {
- LASSERT(cio->cui_iocb->ki_pos == pos);
+ LASSERT(vio->vui_iocb->ki_pos == pos);
}
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */
+ if (!vio->vui_iter) /* from a temp io in ll_cl_init(). */
result = 0;
else
- result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
+ result = generic_file_write_iter(vio->vui_iocb, vio->vui_iter);
+
+ if (result > 0) {
+ result = vvp_io_write_commit(env, io);
+ if (vio->u.write.vui_written > 0) {
+ result = vio->u.write.vui_written;
+ io->ci_nob += result;
+ CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
+ io->ci_nob, result);
+ }
+ }
if (result > 0) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+
if (result < cnt)
io->ci_continue = 0;
- io->ci_nob += result;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, WRITE);
+ vio->vui_fd, pos, result, WRITE);
result = 0;
}
return result;
@@ -608,10 +995,10 @@ static int vvp_io_write_start(const struct lu_env *env,
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
- struct vm_fault *vmf = cfio->fault.ft_vmf;
+ struct vm_fault *vmf = cfio->ft_vmf;
- cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
- cfio->fault.ft_flags_valid = 1;
+ cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf);
+ cfio->ft_flags_valid = 1;
if (vmf->page) {
CDEBUG(D_PAGE,
@@ -619,39 +1006,51 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
vmf->page, vmf->page->mapping, vmf->page->index,
(long)vmf->page->flags, page_count(vmf->page),
page_private(vmf->page), vmf->virtual_address);
- if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
+ if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
lock_page(vmf->page);
- cfio->fault.ft_flags |= VM_FAULT_LOCKED;
+ cfio->ft_flags |= VM_FAULT_LOCKED;
}
cfio->ft_vmpage = vmf->page;
return 0;
}
- if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+ if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
- if (cfio->fault.ft_flags & VM_FAULT_OOM) {
+ if (cfio->ft_flags & VM_FAULT_OOM) {
CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
return -ENOMEM;
}
- if (cfio->fault.ft_flags & VM_FAULT_RETRY)
+ if (cfio->ft_flags & VM_FAULT_RETRY)
return -EAGAIN;
- CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
+ CERROR("Unknown error in page fault %d!\n", cfio->ft_flags);
return -EINVAL;
}
+static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ struct vvp_page *vpg;
+ struct cl_object *clob = cl_io_top(io)->ci_obj;
+
+ set_page_dirty(page->cp_vmpage);
+
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ vvp_write_pending(cl2vvp(clob), vpg);
+}
+
static int vvp_io_fault_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
struct cl_fault_io *fio = &io->u.ci_fault;
struct vvp_fault_io *cfio = &vio->u.fault;
loff_t offset;
@@ -659,7 +1058,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
struct page *vmpage = NULL;
struct cl_page *page;
loff_t size;
- pgoff_t last; /* last page in a file data region */
+ pgoff_t last_index;
if (fio->ft_executable &&
inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
@@ -670,7 +1069,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
+ result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
if (result != 0)
return result;
@@ -705,15 +1104,15 @@ static int vvp_io_fault_start(const struct lu_env *env,
goto out;
}
+ last_index = cl_index(obj, size - 1);
+
if (fio->ft_mkwrite) {
- pgoff_t last_index;
/*
* Capture the size while holding the lli_trunc_sem from above
* we want to make sure that we complete the mkwrite action
* while holding this lock. We need to make sure that we are
* not past the end of the file.
*/
- last_index = cl_index(obj, size - 1);
if (last_index < fio->ft_index) {
CDEBUG(D_PAGE,
"llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
@@ -745,25 +1144,32 @@ static int vvp_io_fault_start(const struct lu_env *env,
*/
if (fio->ft_mkwrite) {
wait_on_page_writeback(vmpage);
- if (set_page_dirty(vmpage)) {
- struct ccc_page *cp;
+ if (!PageDirty(vmpage)) {
+ struct cl_page_list *plist = &io->ci_queue.c2_qin;
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+ int to = PAGE_SIZE;
/* vvp_page_assume() calls wait_on_page_writeback(). */
cl_page_assume(env, io, page);
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- vvp_write_pending(cl2ccc(obj), cp);
+ cl_page_list_init(plist);
+ cl_page_list_add(plist, page);
+
+ /* size fixup */
+ if (last_index == vvp_index(vpg))
+ to = size & ~PAGE_MASK;
/* Do not set Dirty bit here so that in case IO is
* started before the page is really made dirty, we
* still have chance to detect it.
*/
- result = cl_page_cache_add(env, io, page, CRT_WRITE);
+ result = cl_io_commit_async(env, io, plist, 0, to,
+ mkwrite_commit_callback);
LASSERT(cl_page_is_owned(page, io));
+ cl_page_list_fini(env, plist);
vmpage = NULL;
if (result < 0) {
- cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
@@ -773,20 +1179,20 @@ static int vvp_io_fault_start(const struct lu_env *env,
if (result == -EDQUOT)
result = -ENOSPC;
goto out;
- } else
+ } else {
cl_page_disown(env, io, page);
+ }
}
}
- last = cl_index(obj, size - 1);
/*
* The ft_index is only used in the case of
* a mkwrite action. We need to check
* our assertions are correct, since
* we should have caught this above
*/
- LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
- if (fio->ft_index == last)
+ LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
+ if (fio->ft_index == last_index)
/*
* Last page is mapped partially.
*/
@@ -801,7 +1207,9 @@ out:
/* return unlocked vmpage to avoid deadlocking */
if (vmpage)
unlock_page(vmpage);
- cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+
+ cfio->ft_flags &= ~VM_FAULT_LOCKED;
+
return result;
}
@@ -820,293 +1228,58 @@ static int vvp_io_read_page(const struct lu_env *env,
const struct cl_page_slice *slice)
{
struct cl_io *io = ios->cis_io;
- struct cl_object *obj = slice->cpl_obj;
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *page = slice->cpl_page;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(slice->cpl_obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
+ struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd;
struct ll_readahead_state *ras = &fd->fd_ras;
- struct page *vmpage = cp->cpg_page;
struct cl_2queue *queue = &io->ci_queue;
- int rc;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- LASSERT(slice->cpl_obj == obj);
if (sbi->ll_ra_info.ra_max_pages_per_file &&
sbi->ll_ra_info.ra_max_pages)
- ras_update(sbi, inode, ras, page->cp_index,
- cp->cpg_defer_uptodate);
-
- /* Sanity check whether the page is protected by a lock. */
- rc = cl_page_is_under_lock(env, io, page);
- if (rc != -EBUSY) {
- CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
- rc == -ENODATA ? "without a lock" :
- "match failed", rc);
- if (rc != -ENODATA)
- return rc;
- }
+ ras_update(sbi, inode, ras, vvp_index(vpg),
+ vpg->vpg_defer_uptodate);
- if (cp->cpg_defer_uptodate) {
- cp->cpg_ra_used = 1;
+ if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
cl_page_export(env, page, 1);
}
/*
* Add page into the queue even when it is marked uptodate above.
* this will unlock it automatically as part of cl_page_list_disown().
*/
+
cl_page_list_add(&queue->c2_qin, page);
if (sbi->ll_ra_info.ra_max_pages_per_file &&
sbi->ll_ra_info.ra_max_pages)
- ll_readahead(env, io, ras,
- vmpage->mapping, &queue->c2_qin, fd->fd_flags);
+ ll_readahead(env, io, &queue->c2_qin, ras,
+ vpg->vpg_defer_uptodate);
return 0;
}
-static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, struct ccc_page *cp,
- enum cl_req_type crt)
+void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
{
- struct cl_2queue *queue;
- int result;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- queue = &io->ci_queue;
- cl_2queue_init_page(queue, page);
-
- result = cl_io_submit_sync(env, io, crt, queue, 0);
- LASSERT(cl_page_is_owned(page, io));
-
- if (crt == CRT_READ)
- /*
- * in CRT_WRITE case page is left locked even in case of
- * error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
-
- return result;
-}
-
-/**
- * Prepare partially written-to page for a write.
- */
-static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
- struct cl_object *obj, struct cl_page *pg,
- struct ccc_page *cp,
- unsigned from, unsigned to)
-{
- struct cl_attr *attr = ccc_env_thread_attr(env);
- loff_t offset = cl_offset(obj, pg->cp_index);
- int result;
-
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result == 0) {
- /*
- * If are writing to a new page, no need to read old data.
- * The extent locking will have updated the KMS, and for our
- * purposes here we can treat it like i_size.
- */
- if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(cp->cpg_page);
-
- memset(kaddr, 0, cl_page_size(obj));
- kunmap_atomic(kaddr);
- } else if (cp->cpg_defer_uptodate)
- cp->cpg_ra_used = 1;
- else
- result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
- /*
- * In older implementations, obdo_refresh_inode is called here
- * to update the inode because the write might modify the
- * object info at OST. However, this has been proven useless,
- * since LVB functions will be called when user space program
- * tries to retrieve inode attribute. Also, see bug 15909 for
- * details. -jay
- */
- if (result == 0)
- cl_page_export(env, pg, 1);
- }
- return result;
-}
-
-static int vvp_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct cl_object *obj = slice->cpl_obj;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
-
- int result;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
-
- result = 0;
-
- CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
- if (!PageUptodate(vmpage)) {
- /*
- * We're completely overwriting an existing page, so _don't_
- * set it up to date until commit_write
- */
- if (from == 0 && to == PAGE_SIZE) {
- CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
- POISON_PAGE(page, 0x11);
- } else
- result = vvp_io_prepare_partial(env, ios->cis_io, obj,
- pg, cp, from, to);
- } else
- CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
- return result;
-}
-
-static int vvp_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct cl_object *obj = slice->cpl_obj;
- struct cl_io *io = ios->cis_io;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct page *vmpage = cp->cpg_page;
-
- int result;
- int tallyop;
- loff_t size;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(vmpage->mapping->host == inode);
-
- LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
- CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
-
- /*
- * queue a write for some time in the future the first time we
- * dirty the page.
- *
- * This is different from what other file systems do: they usually
- * just mark page (and some of its buffers) dirty and rely on
- * balance_dirty_pages() to start a write-back. Lustre wants write-back
- * to be started earlier for the following reasons:
- *
- * (1) with a large number of clients we need to limit the amount
- * of cached data on the clients a lot;
- *
- * (2) large compute jobs generally want compute-only then io-only
- * and the IO should complete as quickly as possible;
- *
- * (3) IO is batched up to the RPC size and is async until the
- * client max cache is hit
- * (/sys/fs/lustre/osc/OSC.../max_dirty_mb)
- *
- */
- if (!PageDirty(vmpage)) {
- tallyop = LPROC_LL_DIRTY_MISSES;
- result = cl_page_cache_add(env, io, pg, CRT_WRITE);
- if (result == 0) {
- /* page was added into cache successfully. */
- set_page_dirty(vmpage);
- vvp_write_pending(cl2ccc(obj), cp);
- } else if (result == -EDQUOT) {
- pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
- bool need_clip = true;
-
- /*
- * Client ran out of disk space grant. Possible
- * strategies are:
- *
- * (a) do a sync write, renewing grant;
- *
- * (b) stop writing on this stripe, switch to the
- * next one.
- *
- * (b) is a part of "parallel io" design that is the
- * ultimate goal. (a) is what "old" client did, and
- * what the new code continues to do for the time
- * being.
- */
- if (last_index > pg->cp_index) {
- to = PAGE_SIZE;
- need_clip = false;
- } else if (last_index == pg->cp_index) {
- int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
-
- if (to < size_to)
- to = size_to;
- }
- if (need_clip)
- cl_page_clip(env, pg, 0, to);
- result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
- if (result)
- CERROR("Write page %lu of inode %p failed %d\n",
- pg->cp_index, inode, result);
- }
- } else {
- tallyop = LPROC_LL_DIRTY_HITS;
- result = 0;
- }
- ll_stats_ops_tally(sbi, tallyop, 1);
-
- /* Inode should be marked DIRTY even if no new page was marked DIRTY
- * because page could have been not flushed between 2 modifications.
- * It is important the file is marked DIRTY as soon as the I/O is done
- * Indeed, when cache is flushed, file could be already closed and it
- * is too late to warn the MDT.
- * It is acceptable that file is marked DIRTY even if I/O is dropped
- * for some reasons before being flushed to OST.
- */
- if (result == 0) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- size = cl_offset(obj, pg->cp_index) + to;
-
- ll_inode_size_lock(inode);
- if (result == 0) {
- if (size > i_size_read(inode)) {
- cl_isize_write_nolock(inode, size);
- CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (unsigned long)size);
- }
- cl_page_export(env, pg, 1);
- } else {
- if (size > i_size_read(inode))
- cl_page_discard(env, io, pg);
- }
- ll_inode_size_unlock(inode);
- return result;
+ CLOBINVRNT(env, ios->cis_io->ci_obj,
+ vvp_object_invariant(ios->cis_io->ci_obj));
}
static const struct cl_io_operations vvp_io_ops = {
.op = {
[CIT_READ] = {
- .cio_fini = vvp_io_read_fini,
+ .cio_fini = vvp_io_fini,
.cio_lock = vvp_io_read_lock,
.cio_start = vvp_io_read_start,
- .cio_advance = ccc_io_advance
+ .cio_advance = vvp_io_advance,
},
[CIT_WRITE] = {
.cio_fini = vvp_io_fini,
+ .cio_iter_init = vvp_io_write_iter_init,
+ .cio_iter_fini = vvp_io_write_iter_fini,
.cio_lock = vvp_io_write_lock,
.cio_start = vvp_io_write_start,
- .cio_advance = ccc_io_advance
+ .cio_advance = vvp_io_advance,
},
[CIT_SETATTR] = {
.cio_fini = vvp_io_setattr_fini,
@@ -1120,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_iter_init = vvp_io_fault_iter_init,
.cio_lock = vvp_io_fault_lock,
.cio_start = vvp_io_fault_start,
- .cio_end = ccc_io_end
+ .cio_end = vvp_io_end,
},
[CIT_FSYNC] = {
.cio_start = vvp_io_fsync_start,
@@ -1131,29 +1304,26 @@ static const struct cl_io_operations vvp_io_ops = {
}
},
.cio_read_page = vvp_io_read_page,
- .cio_prepare_write = vvp_io_prepare_write,
- .cio_commit_write = vvp_io_commit_write
};
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io)
{
struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
int result;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, DFID
" ignore/verify layout %d/%d, layout version %d restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
- CL_IO_SLICE_CLEAN(cio, cui_cl);
- cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
- vio->cui_ra_window_set = 0;
+ CL_IO_SLICE_CLEAN(vio, vui_cl);
+ cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
+ vio->vui_ra_valid = false;
result = 0;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
size_t count;
@@ -1166,7 +1336,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
if (count == 0)
result = 1;
else
- cio->cui_tot_count = count;
+ vio->vui_tot_count = count;
/* for read/write, we store the jobid in the inode, and
* it'll be fetched by osc when building RPC.
@@ -1192,7 +1362,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
* because it might not grant layout lock in IT_OPEN.
*/
if (result == 0 && !io->ci_ignore_layout) {
- result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ result = ll_layout_refresh(inode, &vio->vui_layout_gen);
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout
@@ -1208,11 +1378,3 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
return result;
}
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- /* Calling just for assertion */
- cl2ccc_io(env, slice);
- return vvp_env_io(env);
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index ff0948043..f5bd6c22e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -40,7 +40,7 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/obd.h"
+#include "../include/obd_support.h"
#include "../include/lustre_lite.h"
#include "vvp_internal.h"
@@ -51,36 +51,41 @@
*
*/
-/**
- * Estimates lock value for the purpose of managing the lock cache during
- * memory shortages.
- *
- * Locks for memory mapped files are almost infinitely precious, others are
- * junk. "Mapped locks" are heavy, but not infinitely heavy, so that they are
- * ordered within themselves by weights assigned from other layers.
- */
-static unsigned long vvp_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static void vvp_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
+{
+ struct vvp_lock *vlk = cl2vvp_lock(slice);
+
+ kmem_cache_free(vvp_lock_kmem, vlk);
+}
+
+static int vvp_lock_enqueue(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_io *unused, struct cl_sync_io *anchor)
{
- struct ccc_object *cob = cl2ccc(slice->cls_obj);
+ CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj));
- return atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0;
+ return 0;
}
static const struct cl_lock_operations vvp_lock_ops = {
- .clo_delete = ccc_lock_delete,
- .clo_fini = ccc_lock_fini,
- .clo_enqueue = ccc_lock_enqueue,
- .clo_wait = ccc_lock_wait,
- .clo_use = ccc_lock_use,
- .clo_unuse = ccc_lock_unuse,
- .clo_fits_into = ccc_lock_fits_into,
- .clo_state = ccc_lock_state,
- .clo_weigh = vvp_lock_weigh
+ .clo_fini = vvp_lock_fini,
+ .clo_enqueue = vvp_lock_enqueue,
};
int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *unused)
{
- return ccc_lock_init(env, obj, lock, io, &vvp_lock_ops);
+ struct vvp_lock *vlk;
+ int result;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ vlk = kmem_cache_zalloc(vvp_lock_kmem, GFP_NOFS);
+ if (vlk) {
+ cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops);
+ result = 0;
+ } else {
+ result = -ENOMEM;
+ }
+ return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 03c887d8e..18c9df7eb 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -45,6 +45,7 @@
#include "../include/obd.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
/*****************************************************************************
@@ -53,16 +54,25 @@
*
*/
+int vvp_object_invariant(const struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
+ lli->lli_clob == obj;
+}
+
static int vvp_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- struct ccc_object *obj = lu2ccc(o);
- struct inode *inode = obj->cob_inode;
+ struct vvp_object *obj = lu2vvp(o);
+ struct inode *inode = obj->vob_inode;
struct ll_inode_info *lli;
(*p)(env, cookie, "(%s %d %d) inode: %p ",
- list_empty(&obj->cob_pending_list) ? "-" : "+",
- obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt),
+ list_empty(&obj->vob_pending_list) ? "-" : "+",
+ obj->vob_transient_pages, atomic_read(&obj->vob_mmap_cnt),
inode);
if (inode) {
lli = ll_i2info(inode);
@@ -77,7 +87,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr)
{
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
/*
* lov overwrites most of these fields in
@@ -99,7 +109,7 @@ static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_attr *attr, unsigned valid)
{
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
if (valid & CAT_UID)
inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
@@ -112,7 +122,7 @@ static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
if (valid & CAT_CTIME)
inode->i_ctime.tv_sec = attr->cat_ctime;
if (0 && valid & CAT_SIZE)
- cl_isize_write_nolock(inode, attr->cat_size);
+ i_size_write(inode, attr->cat_size);
/* not currently necessary */
if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
mark_inode_dirty(inode);
@@ -165,6 +175,40 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
return 0;
}
+static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+ int rc;
+
+ rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
+ if (rc < 0) {
+ CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
+ PFID(lu_object_fid(&obj->co_lu)), rc);
+ return rc;
+ }
+
+ truncate_inode_pages(inode->i_mapping, 0);
+ return 0;
+}
+
+static int vvp_object_glimpse(const struct lu_env *env,
+ const struct cl_object *obj, struct ost_lvb *lvb)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ lvb->lvb_mtime = LTIME_S(inode->i_mtime);
+ lvb->lvb_atime = LTIME_S(inode->i_atime);
+ lvb->lvb_ctime = LTIME_S(inode->i_ctime);
+ /*
+ * LU-417: Add dirty pages block count lest i_blocks reports 0, some
+ * "cp" or "tar" on remote node may think it's a completely sparse file
+ * and skip it.
+ */
+ if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
+ lvb->lvb_blocks = dirty_cnt(inode);
+ return 0;
+}
+
static const struct cl_object_operations vvp_ops = {
.coo_page_init = vvp_page_init,
.coo_lock_init = vvp_lock_init,
@@ -172,29 +216,94 @@ static const struct cl_object_operations vvp_ops = {
.coo_attr_get = vvp_attr_get,
.coo_attr_set = vvp_attr_set,
.coo_conf_set = vvp_conf_set,
- .coo_glimpse = ccc_object_glimpse
+ .coo_prune = vvp_prune,
+ .coo_glimpse = vvp_object_glimpse
};
+static int vvp_object_init0(const struct lu_env *env,
+ struct vvp_object *vob,
+ const struct cl_object_conf *conf)
+{
+ vob->vob_inode = conf->coc_inode;
+ vob->vob_transient_pages = 0;
+ cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
+ return 0;
+}
+
+static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
+{
+ struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
+ struct vvp_object *vob = lu2vvp(obj);
+ struct lu_object *below;
+ struct lu_device *under;
+ int result;
+
+ under = &dev->vdv_next->cd_lu_dev;
+ below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
+ if (below) {
+ const struct cl_object_conf *cconf;
+
+ cconf = lu2cl_conf(conf);
+ INIT_LIST_HEAD(&vob->vob_pending_list);
+ lu_object_add(obj, below);
+ result = vvp_object_init0(env, vob, cconf);
+ } else {
+ result = -ENOMEM;
+ }
+
+ return result;
+}
+
+static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
+{
+ struct vvp_object *vob = lu2vvp(obj);
+
+ lu_object_fini(obj);
+ lu_object_header_fini(obj->lo_header);
+ kmem_cache_free(vvp_object_kmem, vob);
+}
+
static const struct lu_object_operations vvp_lu_obj_ops = {
- .loo_object_init = ccc_object_init,
- .loo_object_free = ccc_object_free,
- .loo_object_print = vvp_object_print
+ .loo_object_init = vvp_object_init,
+ .loo_object_free = vvp_object_free,
+ .loo_object_print = vvp_object_print,
};
-struct ccc_object *cl_inode2ccc(struct inode *inode)
+struct vvp_object *cl_inode2vvp(struct inode *inode)
{
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
struct lu_object *lu;
lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
LASSERT(lu);
- return lu2ccc(lu);
+ return lu2vvp(lu);
}
struct lu_object *vvp_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
+ const struct lu_object_header *unused,
struct lu_device *dev)
{
- return ccc_object_alloc(env, hdr, dev, &vvp_ops, &vvp_lu_obj_ops);
+ struct vvp_object *vob;
+ struct lu_object *obj;
+
+ vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS);
+ if (vob) {
+ struct cl_object_header *hdr;
+
+ obj = &vob->vob_cl.co_lu;
+ hdr = &vob->vob_header;
+ cl_object_header_init(hdr);
+ hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
+
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
+
+ vob->vob_cl.co_ops = &vvp_ops;
+ obj->lo_ops = &vvp_lu_obj_ops;
+ } else {
+ obj = NULL;
+ }
+ return obj;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 33ca3eb34..6cd2af7a9 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -41,9 +41,16 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/obd.h"
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
/*****************************************************************************
@@ -52,9 +59,9 @@
*
*/
-static void vvp_page_fini_common(struct ccc_page *cp)
+static void vvp_page_fini_common(struct vvp_page *vpg)
{
- struct page *vmpage = cp->cpg_page;
+ struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage);
put_page(vmpage);
@@ -63,23 +70,23 @@ static void vvp_page_fini_common(struct ccc_page *cp)
static void vvp_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
/*
* vmpage->private was already cleared when page was moved into
* VPG_FREEING state.
*/
LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
- vvp_page_fini_common(cp);
+ vvp_page_fini_common(vpg);
}
static int vvp_page_own(const struct lu_env *env,
const struct cl_page_slice *slice, struct cl_io *io,
int nonblock)
{
- struct ccc_page *vpg = cl2ccc_page(slice);
- struct page *vmpage = vpg->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage);
if (nonblock) {
@@ -96,6 +103,7 @@ static int vvp_page_own(const struct lu_env *env,
lock_page(vmpage);
wait_on_page_writeback(vmpage);
+
return 0;
}
@@ -136,41 +144,15 @@ static void vvp_page_discard(const struct lu_env *env,
struct cl_io *unused)
{
struct page *vmpage = cl2vm_page(slice);
- struct address_space *mapping;
- struct ccc_page *cpg = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
- mapping = vmpage->mapping;
-
- if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
- ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
+ if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
+ ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
- /*
- * truncate_complete_page() calls
- * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
- */
- truncate_complete_page(mapping, vmpage);
-}
-
-static int vvp_page_unmap(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- __u64 offset;
-
- LASSERT(vmpage);
- LASSERT(PageLocked(vmpage));
-
- offset = vmpage->index << PAGE_SHIFT;
-
- /*
- * XXX is it safe to call this with the page lock held?
- */
- ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
- return 0;
+ ll_invalidate_page(vmpage);
}
static void vvp_page_delete(const struct lu_env *env,
@@ -179,12 +161,20 @@ static void vvp_page_delete(const struct lu_env *env,
struct page *vmpage = cl2vm_page(slice);
struct inode *inode = vmpage->mapping->host;
struct cl_object *obj = slice->cpl_obj;
+ struct cl_page *page = slice->cpl_page;
+ int refc;
LASSERT(PageLocked(vmpage));
- LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
- LASSERT(inode == ccc_object_inode(obj));
+ LASSERT((struct cl_page *)vmpage->private == page);
+ LASSERT(inode == vvp_object_inode(obj));
- vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
+ vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
+
+ /* Drop the reference count held in vvp_page_init */
+ refc = atomic_dec_return(&page->cp_ref);
+ LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
+
+ ClearPageUptodate(vmpage);
ClearPagePrivate(vmpage);
vmpage->private = 0;
/*
@@ -237,7 +227,7 @@ static int vvp_page_prep_write(const struct lu_env *env,
if (!pg->cp_sync_io)
set_page_writeback(vmpage);
- vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
+ vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
return 0;
}
@@ -250,11 +240,11 @@ static int vvp_page_prep_write(const struct lu_env *env,
*/
static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
{
- struct ccc_object *obj = cl_inode2ccc(inode);
+ struct vvp_object *obj = cl_inode2vvp(inode);
if (ioret == 0) {
ClearPageError(vmpage);
- obj->cob_discard_page_warned = 0;
+ obj->vob_discard_page_warned = 0;
} else {
SetPageError(vmpage);
if (ioret == -ENOSPC)
@@ -263,8 +253,8 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
set_bit(AS_EIO, &inode->i_mapping->flags);
if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
- obj->cob_discard_page_warned == 0) {
- obj->cob_discard_page_warned = 1;
+ obj->vob_discard_page_warned == 0) {
+ obj->vob_discard_page_warned = 1;
ll_dirty_page_discard_warn(vmpage, ioret);
}
}
@@ -274,22 +264,23 @@ static void vvp_page_completion_read(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
- struct cl_page *page = cl_page_top(slice->cpl_page);
- struct inode *inode = ccc_object_inode(page->cp_obj);
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
+ struct cl_page *page = slice->cpl_page;
+ struct inode *inode = vvp_object_inode(page->cp_obj);
LASSERT(PageLocked(vmpage));
CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
- if (cp->cpg_defer_uptodate)
+ if (vpg->vpg_defer_uptodate)
ll_ra_count_put(ll_i2sbi(inode), 1);
if (ioret == 0) {
- if (!cp->cpg_defer_uptodate)
+ if (!vpg->vpg_defer_uptodate)
cl_page_export(env, page, 1);
- } else
- cp->cpg_defer_uptodate = 0;
+ } else {
+ vpg->vpg_defer_uptodate = 0;
+ }
if (!page->cp_sync_io)
unlock_page(vmpage);
@@ -299,9 +290,9 @@ static void vvp_page_completion_write(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
+ struct page *vmpage = vpg->vpg_page;
CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
@@ -315,8 +306,8 @@ static void vvp_page_completion_write(const struct lu_env *env,
* and then re-add the page into pending transfer queue. -jay
*/
- cp->cpg_write_queued = 0;
- vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
+ vpg->vpg_write_queued = 0;
+ vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
if (pg->cp_sync_io) {
LASSERT(PageLocked(vmpage));
@@ -327,7 +318,7 @@ static void vvp_page_completion_write(const struct lu_env *env,
* Only mark the page error only when it's an async write
* because applications won't wait for IO to finish.
*/
- vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
+ vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
end_page_writeback(vmpage);
}
@@ -359,7 +350,7 @@ static int vvp_page_make_ready(const struct lu_env *env,
LASSERT(pg->cp_state == CPS_CACHED);
/* This actually clears the dirty bit in the radix tree. */
set_page_writeback(vmpage);
- vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
+ vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
} else if (pg->cp_state == CPS_PAGEOUT) {
/* is it possible for osc_flush_async_page() to already
@@ -375,24 +366,51 @@ static int vvp_page_make_ready(const struct lu_env *env,
return result;
}
+static int vvp_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, pgoff_t *max_index)
+{
+ if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
+ io->ci_type == CIT_FAULT) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
+ *max_index = CL_PAGE_EOF;
+ }
+ return 0;
+}
+
static int vvp_page_print(const struct lu_env *env,
const struct cl_page_slice *slice,
void *cookie, lu_printer_t printer)
{
- struct ccc_page *vp = cl2ccc_page(slice);
- struct page *vmpage = vp->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
(*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
- vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
- vp->cpg_write_queued, vmpage);
+ vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
+ vpg->vpg_write_queued, vmpage);
if (vmpage) {
(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
(long)vmpage->flags, page_count(vmpage),
page_mapcount(vmpage), vmpage->private,
- page_index(vmpage),
+ vmpage->index,
list_empty(&vmpage->lru) ? "not-" : "");
}
+
(*printer)(env, cookie, "\n");
+
+ return 0;
+}
+
+static int vvp_page_fail(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ /*
+ * Cached read?
+ */
+ LBUG();
+
return 0;
}
@@ -401,32 +419,38 @@ static const struct cl_page_operations vvp_page_ops = {
.cpo_assume = vvp_page_assume,
.cpo_unassume = vvp_page_unassume,
.cpo_disown = vvp_page_disown,
- .cpo_vmpage = ccc_page_vmpage,
.cpo_discard = vvp_page_discard,
.cpo_delete = vvp_page_delete,
- .cpo_unmap = vvp_page_unmap,
.cpo_export = vvp_page_export,
.cpo_is_vmlocked = vvp_page_is_vmlocked,
.cpo_fini = vvp_page_fini,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = ccc_page_is_under_lock,
+ .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
.cpo_prep = vvp_page_prep_read,
.cpo_completion = vvp_page_completion_read,
- .cpo_make_ready = ccc_fail,
+ .cpo_make_ready = vvp_page_fail,
},
[CRT_WRITE] = {
.cpo_prep = vvp_page_prep_write,
.cpo_completion = vvp_page_completion_write,
.cpo_make_ready = vvp_page_make_ready,
- }
- }
+ },
+ },
};
+static int vvp_transient_page_prep(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ /* transient page should always be sent. */
+ return 0;
+}
+
static void vvp_transient_page_verify(const struct cl_page *page)
{
- struct inode *inode = ccc_object_inode(page->cp_obj);
+ struct inode *inode = vvp_object_inode(page->cp_obj);
LASSERT(!inode_trylock(inode));
}
@@ -477,7 +501,7 @@ static void vvp_transient_page_discard(const struct lu_env *env,
static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- struct inode *inode = ccc_object_inode(slice->cpl_obj);
+ struct inode *inode = vvp_object_inode(slice->cpl_obj);
int locked;
locked = !inode_trylock(inode);
@@ -497,13 +521,13 @@ vvp_transient_page_completion(const struct lu_env *env,
static void vvp_transient_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *clp = slice->cpl_page;
- struct ccc_object *clobj = cl2ccc(clp->cp_obj);
+ struct vvp_object *clobj = cl2vvp(clp->cp_obj);
- vvp_page_fini_common(cp);
- LASSERT(!inode_trylock(clobj->cob_inode));
- clobj->cob_transient_pages--;
+ vvp_page_fini_common(vpg);
+ LASSERT(!inode_trylock(clobj->vob_inode));
+ clobj->vob_transient_pages--;
}
static const struct cl_page_operations vvp_transient_page_ops = {
@@ -512,45 +536,48 @@ static const struct cl_page_operations vvp_transient_page_ops = {
.cpo_unassume = vvp_transient_page_unassume,
.cpo_disown = vvp_transient_page_disown,
.cpo_discard = vvp_transient_page_discard,
- .cpo_vmpage = ccc_page_vmpage,
.cpo_fini = vvp_transient_page_fini,
.cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = ccc_page_is_under_lock,
+ .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
- .cpo_prep = ccc_transient_page_prep,
+ .cpo_prep = vvp_transient_page_prep,
.cpo_completion = vvp_transient_page_completion,
},
[CRT_WRITE] = {
- .cpo_prep = ccc_transient_page_prep,
+ .cpo_prep = vvp_transient_page_prep,
.cpo_completion = vvp_transient_page_completion,
}
}
};
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
- struct ccc_page *cpg = cl_object_page_slice(obj, page);
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+ struct page *vmpage = page->cp_vmpage;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- cpg->cpg_page = vmpage;
+ vpg->vpg_page = vmpage;
get_page(vmpage);
- INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+ INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
+ /* in cache, decref in vvp_page_delete */
+ atomic_inc(&page->cp_ref);
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &cpg->cpg_cl, obj, &vvp_page_ops);
+ cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
+ &vvp_page_ops);
} else {
- struct ccc_object *clobj = cl2ccc(obj);
+ struct vvp_object *clobj = cl2vvp(obj);
- LASSERT(!inode_trylock(clobj->cob_inode));
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ LASSERT(!inode_trylock(clobj->vob_inode));
+ cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
&vvp_transient_page_ops);
- clobj->cob_transient_pages++;
+ clobj->vob_transient_pages++;
}
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
new file mode 100644
index 000000000..fb886291a
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_req.c
@@ -0,0 +1,121 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include "../include/lustre/lustre_idl.h"
+#include "../include/cl_object.h"
+#include "../include/obd.h"
+#include "../include/obd_support.h"
+#include "../include/lustre_lite.h"
+#include "llite_internal.h"
+#include "vvp_internal.h"
+
+static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
+{
+ return container_of0(slice, struct vvp_req, vrq_cl);
+}
+
+/**
+ * Implementation of struct cl_req_operations::cro_attr_set() for VVP
+ * layer. VVP is responsible for
+ *
+ * - o_[mac]time
+ *
+ * - o_mode
+ *
+ * - o_parent_seq
+ *
+ * - o_[ug]id
+ *
+ * - o_parent_oid
+ *
+ * - o_parent_ver
+ *
+ * - o_ioepoch,
+ *
+ */
+void vvp_req_attr_set(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, u64 flags)
+{
+ struct inode *inode;
+ struct obdo *oa;
+ u32 valid_flags;
+
+ oa = attr->cra_oa;
+ inode = vvp_object_inode(obj);
+ valid_flags = OBD_MD_FLTYPE;
+
+ if (slice->crs_req->crq_type == CRT_WRITE) {
+ if (flags & OBD_MD_FLEPOCH) {
+ oa->o_valid |= OBD_MD_FLEPOCH;
+ oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID;
+ }
+ }
+ obdo_from_inode(oa, inode, valid_flags & flags);
+ obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
+ memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
+ JOBSTATS_JOBID_SIZE);
+}
+
+void vvp_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
+{
+ struct vvp_req *vrq;
+
+ if (ioret > 0)
+ cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
+
+ vrq = cl2vvp_req(slice);
+ kmem_cache_free(vvp_req_kmem, vrq);
+}
+
+static const struct cl_req_operations vvp_req_ops = {
+ .cro_attr_set = vvp_req_attr_set,
+ .cro_completion = vvp_req_completion
+};
+
+int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req)
+{
+ struct vvp_req *vrq;
+ int result;
+
+ vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
+ if (vrq) {
+ cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
+ result = 0;
+ } else {
+ result = -ENOMEM;
+ }
+ return result;
+}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index b68dcc921..608014b0d 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -181,8 +181,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
size = rc;
pv = (const char *)new_value;
- } else
+ } else {
return -EOPNOTSUPP;
+ }
valid |= rce_ops2valid(rce->rce_ops);
}
@@ -210,16 +211,14 @@ int ll_setxattr_common(struct inode *inode, const char *name,
return 0;
}
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
@@ -243,12 +242,12 @@ int ll_setxattr(struct dentry *dentry, const char *name,
lump->lmm_stripe_offset = -1;
if (lump && S_ISREG(inode->i_mode)) {
- int flags = FMODE_WRITE;
+ __u64 it_flags = FMODE_WRITE;
int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
sizeof(*lump) : sizeof(struct lov_user_md_v3);
- rc = ll_lov_setstripe_ea_info(inode, dentry, flags, lump,
- lum_size);
+ rc = ll_lov_setstripe_ea_info(inode, dentry, it_flags,
+ lump, lum_size);
/* b10667: rc always be 0 here for now */
rc = 0;
} else if (S_ISDIR(inode->i_mode)) {
@@ -272,8 +271,8 @@ int ll_removexattr(struct dentry *dentry, const char *name)
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
return ll_setxattr_common(inode, name, NULL, 0, 0,
@@ -292,8 +291,8 @@ int ll_getxattr_common(struct inode *inode, const char *name,
struct rmtacl_ctl_entry *rce = NULL;
struct ll_inode_info *lli = ll_i2info(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
/* listxattr have slightly different behavior from of ext3:
* without 'user_xattr' ext3 will list all xattr names but
@@ -338,7 +337,6 @@ int ll_getxattr_common(struct inode *inode, const char *name,
*/
if (xattr_type == XATTR_ACL_ACCESS_T &&
!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
-
struct posix_acl *acl;
spin_lock(&lli->lli_lock);
@@ -423,8 +421,7 @@ getxattr_nocache:
if (rce && rce->rce_ops == RMT_LSETFACL) {
ext_acl_xattr_header *acl;
- acl = lustre_posix_acl_xattr_2ext(
- (posix_acl_xattr_header *)buffer, rc);
+ acl = lustre_posix_acl_xattr_2ext(buffer, rc);
if (IS_ERR(acl)) {
rc = PTR_ERR(acl);
goto out;
@@ -451,16 +448,14 @@ out:
return rc;
}
-ssize_t ll_getxattr(struct dentry *dentry, const char *name,
- void *buffer, size_t size)
+ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, void *buffer, size_t size)
{
- struct inode *inode = d_inode(dentry);
-
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
@@ -554,8 +549,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
LASSERT(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 3480ce2bb..d7e17abbe 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -229,7 +229,6 @@ static int ll_xattr_cache_valid(struct ll_inode_info *lli)
*/
static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
{
-
if (!ll_xattr_cache_valid(lli))
return 0;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 8a0087190..7007e4c48 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -42,9 +42,6 @@
#define LMV_MAX_TGT_COUNT 128
-#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex)
-#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex)
-
#define LL_IT2STR(it) \
((it) ? ldlm_it2str((it)->it_op) : "0")
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 9abb7c2b9..9e31f6b03 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -132,8 +132,9 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
static struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
- return obd_get_uuid(lmv->tgts[0]->ltd_exp);
+ return tgt ? obd_get_uuid(tgt->ltd_exp) : NULL;
}
static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
@@ -249,7 +250,6 @@ static int lmv_connect(const struct lu_env *env,
static void lmv_set_timeouts(struct obd_device *obd)
{
- struct lmv_tgt_desc *tgt;
struct lmv_obd *lmv;
int i;
@@ -261,8 +261,10 @@ static void lmv_set_timeouts(struct obd_device *obd)
return;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
@@ -302,13 +304,14 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
return 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
CWARN("%s: NULL export for %d\n", obd->obd_name, i);
continue;
}
- rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize,
+ rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize,
cookiesize, def_cookiesize);
if (rc) {
CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
@@ -425,7 +428,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
- lmv_init_lock(lmv);
+ mutex_lock(&lmv->lmv_init_mutex);
if (lmv->desc.ld_tgt_count == 0) {
struct obd_device *mdc_obd;
@@ -433,7 +436,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
&obd->obd_uuid);
if (!mdc_obd) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
CERROR("%s: Target %s not attached: rc = %d\n",
obd->obd_name, uuidp->uuid, -EINVAL);
return -EINVAL;
@@ -445,7 +448,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
obd->obd_name,
obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -EEXIST;
}
@@ -459,7 +462,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
newsize <<= 1;
newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
if (!newtgts) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -ENOMEM;
}
@@ -481,7 +484,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
tgt = kzalloc(sizeof(*tgt), GFP_NOFS);
if (!tgt) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -ENOMEM;
}
@@ -507,7 +510,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
}
}
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return rc;
}
@@ -522,18 +525,27 @@ int lmv_check_connect(struct obd_device *obd)
if (lmv->connected)
return 0;
- lmv_init_lock(lmv);
+ mutex_lock(&lmv->lmv_init_mutex);
if (lmv->connected) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return 0;
}
if (lmv->desc.ld_tgt_count == 0) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
CERROR("%s: no targets configured.\n", obd->obd_name);
return -EINVAL;
}
+ LASSERT(lmv->tgts);
+
+ if (!lmv->tgts[0]) {
+ mutex_unlock(&lmv->lmv_init_mutex);
+ CERROR("%s: no target configured for index 0.\n",
+ obd->obd_name);
+ return -EINVAL;
+ }
+
CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
lmv->cluuid.uuid, obd->obd_name);
@@ -551,7 +563,7 @@ int lmv_check_connect(struct obd_device *obd)
lmv->connected = 1;
easize = lmv_get_easize(lmv);
lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return 0;
out_disc:
@@ -572,7 +584,7 @@ int lmv_check_connect(struct obd_device *obd)
}
}
class_disconnect(lmv->exp);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return rc;
}
@@ -796,6 +808,11 @@ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
/* unregister request (call from llapi_hsm_copytool_fini) */
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
/* best effort: try to clean as much as possible
* (continue on error)
*/
@@ -825,20 +842,28 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
* except if it because of inactive target.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
+ err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
if (err) {
- if (lmv->tgts[i]->ltd_active) {
+ if (tgt->ltd_active) {
/* permanent error */
CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ tgt->ltd_uuid.uuid, i, cmd, err);
rc = err;
lk->lk_flags |= LK_FLG_STOP;
/* unregister from previous MDS */
- for (j = 0; j < i; j++)
- obd_iocontrol(cmd,
- lmv->tgts[j]->ltd_exp,
- len, lk, uarg);
+ for (j = 0; j < i; j++) {
+ tgt = lmv->tgts[j];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+ obd_iocontrol(cmd, tgt->ltd_exp, len,
+ lk, uarg);
+ }
return rc;
}
/* else: transient error.
@@ -877,6 +902,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
{
struct obd_device *obddev = class_exp2obd(exp);
struct lmv_obd *lmv = &obddev->u.lmv;
+ struct lmv_tgt_desc *tgt = NULL;
int i = 0;
int rc = 0;
int set = 0;
@@ -896,10 +922,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (index >= count)
return -ENODEV;
- if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0)
+ tgt = lmv->tgts[index];
+ if (!tgt || !tgt->ltd_active)
return -ENODATA;
- mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp);
+ mdc_obd = class_exp2obd(tgt->ltd_exp);
if (!mdc_obd)
return -EINVAL;
@@ -909,7 +936,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
(int)sizeof(struct obd_uuid))))
return -EFAULT;
- rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf,
+ rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
0);
if (rc)
@@ -922,11 +949,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl = karg;
- struct lmv_tgt_desc *tgt = NULL;
struct obd_quotactl *oqctl;
if (qctl->qc_valid == QC_MDTIDX) {
- if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
+ if (count <= qctl->qc_idx)
return -EINVAL;
tgt = lmv->tgts[qctl->qc_idx];
@@ -975,18 +1001,18 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (icc->icc_mdtindex >= count)
return -ENODEV;
- if (!lmv->tgts[icc->icc_mdtindex] ||
- !lmv->tgts[icc->icc_mdtindex]->ltd_exp ||
- lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
+ tgt = lmv->tgts[icc->icc_mdtindex];
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
return -ENODEV;
- rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp,
- sizeof(*icc), icc, NULL);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, sizeof(*icc), icc, NULL);
break;
}
case LL_IOC_GET_CONNECT_FLAGS: {
- if (!lmv->tgts[0])
+ tgt = lmv->tgts[0];
+
+ if (!tgt || !tgt->ltd_exp)
return -ENODATA;
- rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
break;
}
case OBD_IOC_FID2PATH: {
@@ -997,7 +1023,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
case LL_IOC_HSM_STATE_SET:
case LL_IOC_HSM_ACTION: {
struct md_op_data *op_data = karg;
- struct lmv_tgt_desc *tgt;
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
@@ -1011,7 +1036,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case LL_IOC_HSM_PROGRESS: {
const struct hsm_progress_kernel *hpk = karg;
- struct lmv_tgt_desc *tgt;
tgt = lmv_find_target(lmv, &hpk->hpk_fid);
if (IS_ERR(tgt))
@@ -1021,7 +1045,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case LL_IOC_HSM_REQUEST: {
struct hsm_user_request *hur = karg;
- struct lmv_tgt_desc *tgt;
unsigned int reqcount = hur->hur_request.hr_itemcount;
if (reqcount == 0)
@@ -1044,7 +1067,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
int rc1;
struct hsm_user_request *req;
- nr = lmv_hsm_req_count(lmv, hur, lmv->tgts[i]);
+ tgt = lmv->tgts[i];
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
+ nr = lmv_hsm_req_count(lmv, hur, tgt);
if (nr == 0) /* nothing for this MDS */
continue;
@@ -1056,10 +1083,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (!req)
return -ENOMEM;
- lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
+ lmv_hsm_req_build(lmv, hur, tgt, req);
- rc1 = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
- reqlen, req, uarg);
+ rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
+ req, uarg);
if (rc1 != 0 && rc == 0)
rc = rc1;
kvfree(req);
@@ -1103,27 +1130,27 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
struct obd_device *mdc_obd;
int err;
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
+ tgt = lmv->tgts[i];
+ if (!tgt || !tgt->ltd_exp)
continue;
/* ll_umount_begin() sets force flag but for lmv, not
* mdc. Let's pass it through
*/
- mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp);
+ mdc_obd = class_exp2obd(tgt->ltd_exp);
mdc_obd->obd_force = obddev->obd_force;
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len,
- karg, uarg);
+ err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
return err;
} else if (err) {
- if (lmv->tgts[i]->ltd_active) {
+ if (tgt->ltd_active) {
CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ tgt->ltd_uuid.uuid, i, cmd, err);
if (!rc)
rc = err;
}
- } else
+ } else {
set = 1;
+ }
}
if (!set && !rc)
rc = -EIO;
@@ -1269,7 +1296,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
spin_lock_init(&lmv->lmv_lock);
- mutex_init(&lmv->init_mutex);
+ mutex_init(&lmv->lmv_init_mutex);
lprocfs_lmv_init_vars(&lvars);
@@ -2071,7 +2098,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
/* Check if we've reached the end of the CFS_PAGE. */
- if (!((unsigned long)dp & ~CFS_PAGE_MASK))
+ if (!((unsigned long)dp & ~PAGE_MASK))
break;
/* Save the hash and flags of this lu_dirpage. */
@@ -2268,7 +2295,6 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
lmv = &obd->u.lmv;
if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
- struct lmv_tgt_desc *tgt;
int i;
rc = lmv_check_connect(obd);
@@ -2277,7 +2303,8 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
LASSERT(*vallen == sizeof(__u32));
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
/*
* All tgts should be connected when this gets called.
*/
@@ -2466,12 +2493,13 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
LASSERT(fid);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0)
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- err = md_cancel_unused(lmv->tgts[i]->ltd_exp, fid,
- policy, mode, flags, opaque);
+ err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags,
+ opaque);
if (!rc)
rc = err;
}
@@ -2482,9 +2510,13 @@ static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
__u64 *bits)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
int rc;
- rc = md_set_lock_data(lmv->tgts[0]->ltd_exp, lockh, data, bits);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+
+ rc = md_set_lock_data(tgt->ltd_exp, lockh, data, bits);
return rc;
}
@@ -2509,12 +2541,13 @@ static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
* one fid was created in.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0)
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- rc = md_lock_match(lmv->tgts[i]->ltd_exp, flags, fid,
- type, policy, mode, lockh);
+ rc = md_lock_match(tgt->ltd_exp, flags, fid, type, policy, mode,
+ lockh);
if (rc)
return rc;
}
@@ -2529,18 +2562,24 @@ static int lmv_get_lustre_md(struct obd_export *exp,
struct lustre_md *md)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
- return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+ return md_get_lustre_md(tgt->ltd_exp, req, dt_exp, md_exp, md);
}
static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
if (md->mea)
obd_free_memmd(exp, (void *)&md->mea);
- return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+ return md_free_lustre_md(tgt->ltd_exp, md);
}
static int lmv_set_open_replay_data(struct obd_export *exp,
@@ -2649,7 +2688,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
int rc = 0, i;
__u64 curspace, curinodes;
- if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
+ !lmv->desc.ld_tgt_count) {
CERROR("master lmv inactive\n");
return -EIO;
}
@@ -2665,12 +2705,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- if (!tgt->ltd_active) {
- CDEBUG(D_HA, "mdt %d is inactive.\n", i);
- continue;
- }
err = obd_quotactl(tgt->ltd_exp, oqctl);
if (err) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 7dd3162b5..ac9744e88 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -73,19 +73,6 @@
* - top-page keeps a reference to its sub-page, and destroys it when it
* is destroyed.
*
- * - sub-lock keep a reference to its top-locks. Top-lock keeps a
- * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
- * actively using them (that is, in cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
- * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
- * hold. From this moment top-lock has only a 'weak' reference to its
- * sub-locks. This reference is protected by top-lock
- * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
- * when the latter is destroyed. When a sub-lock is canceled, a
- * reference to it is removed from the top-lock array, and top-lock is
- * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
- * while their top-lock is in CLS_HELD or CLS_CACHED states.
- *
* - IO's are not reference counted.
*
* To implement a connection between top and sub entities, lov layer is split
@@ -281,24 +268,17 @@ struct lov_object {
};
/**
- * Flags that top-lock can set on each of its sub-locks.
- */
-enum lov_sub_flags {
- /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
- LSF_HELD = 1 << 0
-};
-
-/**
* State lov_lock keeps for each sub-lock.
*/
struct lov_lock_sub {
/** sub-lock itself */
- struct lovsub_lock *sub_lock;
- /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
- unsigned sub_flags;
+ struct cl_lock sub_lock;
+ /** Set if the sublock has ever been enqueued, meaning it may
+ * hold resources of underlying layers
+ */
+ unsigned int sub_is_enqueued:1,
+ sub_initialized:1;
int sub_stripe;
- struct cl_lock_descr sub_descr;
- struct cl_lock_descr sub_got;
};
/**
@@ -308,59 +288,8 @@ struct lov_lock {
struct cl_lock_slice lls_cl;
/** Number of sub-locks in this lock */
int lls_nr;
- /**
- * Number of existing sub-locks.
- */
- unsigned lls_nr_filled;
- /**
- * Set when sub-lock was canceled, while top-lock was being
- * used, or unused.
- */
- unsigned int lls_cancel_race:1;
- /**
- * An array of sub-locks
- *
- * There are two issues with managing sub-locks:
- *
- * - sub-locks are concurrently canceled, and
- *
- * - sub-locks are shared with other top-locks.
- *
- * To manage cancellation, top-lock acquires a hold on a sublock
- * (lov_sublock_adopt()) when the latter is inserted into
- * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
- * when top-lock is going into CLS_CACHED state or destroyed. Hold
- * prevents sub-lock from cancellation.
- *
- * Sub-lock sharing means, among other things, that top-lock that is
- * in the process of creation (i.e., not yet inserted into lock list)
- * is already accessible to other threads once at least one of its
- * sub-locks is created, see lov_lock_sub_init().
- *
- * Sub-lock can be in one of the following states:
- *
- * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
- * sub-lock was either never created (top-lock is in CLS_NEW
- * state), or it was created, then canceled, then destroyed
- * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
- *
- * - sub-lock exists and is on
- * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
- * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
- * of a top-lock.
- *
- * - sub-lock exists, but is not held by the top-lock. This
- * happens after top-lock released a hold on sub-locks before
- * going into cache (lov_lock_unuse()).
- *
- * \todo To support wide-striping, array has to be replaced with a set
- * of queues to avoid scanning.
- */
- struct lov_lock_sub *lls_sub;
- /**
- * Original description with which lock was enqueued.
- */
- struct cl_lock_descr lls_orig;
+ /** sublock array */
+ struct lov_lock_sub lls_sub[0];
};
struct lov_page {
@@ -444,8 +373,9 @@ struct lov_thread_info {
struct cl_lock_descr lti_ldescr;
struct ost_lvb lti_lvb;
struct cl_2queue lti_cl2q;
- struct cl_lock_closure lti_closure;
+ struct cl_page_list lti_plist;
wait_queue_t lti_waiter;
+ struct cl_attr lti_attr;
};
/**
@@ -611,14 +541,13 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
const struct cl_lock_descr *d, int idx);
int lov_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-
+ struct cl_page *page, pgoff_t index);
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
struct lu_object *lov_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
@@ -631,6 +560,7 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
struct lovsub_lock *sub);
struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
const struct cl_page_slice *slice);
+int lov_page_stripe(const struct cl_page *page);
#define lov_foreach_target(lov, var) \
for (var = 0; var < lov_targets_nr(lov); ++var)
@@ -789,11 +719,6 @@ static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
return container_of0(slice, struct lovsub_req, lsrq_cl);
}
-static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
-{
- return slice->cpl_page->cp_child;
-}
-
static inline struct lov_io *cl2lov_io(const struct lu_env *env,
const struct cl_io_slice *ios)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 532ef87df..dae8e89bc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -143,9 +143,7 @@ static void *lov_key_init(const struct lu_context *ctx,
struct lov_thread_info *info;
info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS);
- if (info)
- INIT_LIST_HEAD(&info->lti_closure.clc_list);
- else
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
@@ -155,7 +153,6 @@ static void lov_key_fini(const struct lu_context *ctx,
{
struct lov_thread_info *info = data;
- LINVRNT(list_empty(&info->lti_closure.clc_list));
kmem_cache_free(lov_thread_kmem, info);
}
@@ -265,8 +262,9 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
if (lr) {
cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -335,14 +333,15 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr)
cl_page_list_init(&em->emrg_page_list);
em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
LCT_REMEMBER | LCT_NOREF);
- if (!IS_ERR(em->emrg_env))
+ if (!IS_ERR(em->emrg_env)) {
em->emrg_env->le_ctx.lc_cookie = 0x2;
- else {
+ } else {
result = PTR_ERR(em->emrg_env);
em->emrg_env = NULL;
}
- } else
+ } else {
result = -ENOMEM;
+ }
}
if (result != 0) {
lov_emerg_free(emerg, nr);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index b6529401c..460f0fa5e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -48,11 +48,6 @@
#include "lov_internal.h"
-struct lovea_unpack_args {
- struct lov_stripe_md *lsm;
- int cursor;
-};
-
static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
__u16 stripe_count)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 590f9326a..eef9afac8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -72,6 +72,21 @@
})
#endif
+#define pool_tgt_size(p) ((p)->pool_obds.op_size)
+#define pool_tgt_count(p) ((p)->pool_obds.op_count)
+#define pool_tgt_array(p) ((p)->pool_obds.op_array)
+#define pool_tgt_rw_sem(p) ((p)->pool_obds.op_rw_sem)
+
+struct pool_desc {
+ char pool_name[LOV_MAXPOOLNAME + 1];
+ struct ost_pool pool_obds;
+ atomic_t pool_refcount;
+ struct hlist_node pool_hash; /* access by poolname */
+ struct list_head pool_list; /* serial access */
+ struct dentry *pool_debugfs_entry; /* file in debugfs */
+ struct obd_device *pool_lobd; /* owner */
+};
+
struct lov_request {
struct obd_info rq_oi;
struct lov_request_set *rq_rqset;
@@ -88,7 +103,6 @@ struct lov_request {
};
struct lov_request_set {
- struct ldlm_enqueue_info *set_ei;
struct obd_info *set_oi;
atomic_t set_refcount;
struct obd_export *set_exp;
@@ -102,10 +116,8 @@ struct lov_request_set {
atomic_t set_finish_checked;
struct llog_cookie *set_cookies;
int set_cookie_sent;
- struct obd_trans_info *set_oti;
struct list_head set_list;
wait_queue_head_t set_waitq;
- spinlock_t set_lock;
};
extern struct kmem_cache *lov_oinfo_slab;
@@ -114,12 +126,6 @@ extern struct lu_kmem_descr lov_caches[];
void lov_finish_set(struct lov_request_set *set);
-static inline void lov_get_reqset(struct lov_request_set *set)
-{
- LASSERT(atomic_read(&set->set_refcount) > 0);
- atomic_inc(&set->set_refcount);
-}
-
static inline void lov_put_reqset(struct lov_request_set *set)
{
if (atomic_dec_and_test(&set->set_refcount))
@@ -146,10 +152,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
u64 start, u64 end,
u64 *obd_start, u64 *obd_end);
int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off);
-
-/* lov_qos.c */
-#define LOV_USES_ASSIGNED_STRIPE 0
-#define LOV_USES_DEFAULT_STRIPE 1
+pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
+ int stripe);
/* lov_request.c */
int lov_update_common_set(struct lov_request_set *set,
@@ -176,6 +180,8 @@ int lov_fini_statfs_set(struct lov_request_set *set);
int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc);
/* lov_obd.c */
+void lov_stripe_lock(struct lov_stripe_md *md);
+void lov_stripe_unlock(struct lov_stripe_md *md);
void lov_fix_desc(struct lov_desc *desc);
void lov_fix_desc_stripe_size(__u64 *val);
void lov_fix_desc_stripe_count(__u32 *val);
@@ -231,8 +237,6 @@ int lov_pool_new(struct obd_device *obd, char *poolname);
int lov_pool_del(struct obd_device *obd, char *poolname);
int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname);
int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname);
-struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname);
-int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool);
void lov_pool_putref(struct pool_desc *pool);
static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm)
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 4296aacd8..86cb3f8f9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -225,8 +225,9 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env,
if (!sub->sub_io_initialized) {
sub->sub_stripe = stripe;
rc = lov_io_sub_init(env, lio, sub);
- } else
+ } else {
rc = 0;
+ }
if (rc == 0)
lov_sub_enter(sub);
else
@@ -245,13 +246,15 @@ void lov_sub_put(struct lov_io_sub *sub)
*
*/
-static int lov_page_stripe(const struct cl_page *page)
+int lov_page_stripe(const struct cl_page *page)
{
struct lovsub_object *subobj;
+ const struct cl_page_slice *slice;
+
+ slice = cl_page_at(page, &lovsub_device_type);
+ LASSERT(slice->cpl_obj);
- subobj = lu2lovsub(
- lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
- &lovsub_device_type));
+ subobj = cl2lovsub(slice->cpl_obj);
return subobj->lso_index;
}
@@ -274,10 +277,11 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
struct cl_io *io)
{
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ struct lov_stripe_md *lsm;
int result;
LASSERT(lio->lis_object);
+ lsm = lio->lis_object->lo_lsm;
/*
* Need to be optimized, we can't afford to allocate a piece of memory
@@ -292,8 +296,9 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
lio->lis_single_subio_index = -1;
lio->lis_active_subios = 0;
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -411,8 +416,9 @@ static int lov_io_iter_init(const struct lu_env *env,
lov_sub_put(sub);
CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
stripe, start, end);
- } else
+ } else {
rc = PTR_ERR(sub);
+ }
if (!rc)
list_add_tail(&sub->sub_linkage, &lio->lis_active);
@@ -436,7 +442,6 @@ static int lov_io_rw_iter_init(const struct lu_env *env,
/* fast path for common case. */
if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
-
lov_do_div64(start, ssize);
next = (start + 1) * ssize;
if (next <= start * ssize)
@@ -543,13 +548,6 @@ static void lov_io_unlock(const struct lu_env *env,
LASSERT(rc == 0);
}
-static struct cl_page_list *lov_io_submit_qin(struct lov_device *ld,
- struct cl_page_list *qin,
- int idx, int alloc)
-{
- return alloc ? &qin[idx] : &ld->ld_emrg[idx]->emrg_page_list;
-}
-
/**
* lov implementation of cl_operations::cio_submit() method. It takes a list
* of pages in \a queue, splits it into per-stripe sub-lists, invokes
@@ -569,25 +567,17 @@ static int lov_io_submit(const struct lu_env *env,
const struct cl_io_slice *ios,
enum cl_req_type crt, struct cl_2queue *queue)
{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_object *obj = lio->lis_object;
- struct lov_device *ld = lu2lov_dev(lov2cl(obj)->co_lu.lo_dev);
- struct cl_page_list *qin = &queue->c2_qin;
- struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
- struct cl_page_list *stripes_qin = NULL;
+ struct cl_page_list *qin = &queue->c2_qin;
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_io_sub *sub;
+ struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
struct cl_page *page;
- struct cl_page *tmp;
int stripe;
-#define QIN(stripe) lov_io_submit_qin(ld, stripes_qin, stripe, alloc)
-
int rc = 0;
- int alloc =
- !(current->flags & PF_MEMALLOC);
if (lio->lis_active_subios == 1) {
int idx = lio->lis_single_subio_index;
- struct lov_io_sub *sub;
LASSERT(idx < lio->lis_nr_subios);
sub = lov_sub_get(env, lio, idx);
@@ -600,119 +590,120 @@ static int lov_io_submit(const struct lu_env *env,
}
LASSERT(lio->lis_subs);
- if (alloc) {
- stripes_qin =
- libcfs_kvzalloc(sizeof(*stripes_qin) *
- lio->lis_nr_subios,
- GFP_NOFS);
- if (!stripes_qin)
- return -ENOMEM;
-
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
- cl_page_list_init(&stripes_qin[stripe]);
- } else {
- /*
- * If we get here, it means pageout & swap doesn't help.
- * In order to not make things worse, even don't try to
- * allocate the memory with __GFP_NOWARN. -jay
- */
- mutex_lock(&ld->ld_mutex);
- lio->lis_mem_frozen = 1;
- }
- cl_2queue_init(cl2q);
- cl_page_list_for_each_safe(page, tmp, qin) {
- stripe = lov_page_stripe(page);
- cl_page_list_move(QIN(stripe), qin, page);
- }
+ cl_page_list_init(plist);
+ while (qin->pl_nr > 0) {
+ struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
- struct lov_io_sub *sub;
- struct cl_page_list *sub_qin = QIN(stripe);
+ cl_2queue_init(cl2q);
- if (list_empty(&sub_qin->pl_pages))
- continue;
+ page = cl_page_list_first(qin);
+ cl_page_list_move(&cl2q->c2_qin, qin, page);
+
+ stripe = lov_page_stripe(page);
+ while (qin->pl_nr > 0) {
+ page = cl_page_list_first(qin);
+ if (stripe != lov_page_stripe(page))
+ break;
+
+ cl_page_list_move(&cl2q->c2_qin, qin, page);
+ }
- cl_page_list_splice(sub_qin, &cl2q->c2_qin);
sub = lov_sub_get(env, lio, stripe);
if (!IS_ERR(sub)) {
rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
crt, cl2q);
lov_sub_put(sub);
- } else
+ } else {
rc = PTR_ERR(sub);
- cl_page_list_splice(&cl2q->c2_qin, &queue->c2_qin);
+ }
+
+ cl_page_list_splice(&cl2q->c2_qin, plist);
cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
+ cl_2queue_fini(env, cl2q);
+
if (rc != 0)
break;
}
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
- struct cl_page_list *sub_qin = QIN(stripe);
+ cl_page_list_splice(plist, qin);
+ cl_page_list_fini(env, plist);
- if (list_empty(&sub_qin->pl_pages))
- continue;
+ return rc;
+}
+
+static int lov_io_commit_async(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb)
+{
+ struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_io_sub *sub;
+ struct cl_page *page;
+ int rc = 0;
+
+ if (lio->lis_active_subios == 1) {
+ int idx = lio->lis_single_subio_index;
- cl_page_list_splice(sub_qin, qin);
+ LASSERT(idx < lio->lis_nr_subios);
+ sub = lov_sub_get(env, lio, idx);
+ LASSERT(!IS_ERR(sub));
+ LASSERT(sub->sub_io == &lio->lis_single_subio);
+ rc = cl_io_commit_async(sub->sub_env, sub->sub_io, queue,
+ from, to, cb);
+ lov_sub_put(sub);
+ return rc;
}
- if (alloc) {
- kvfree(stripes_qin);
- } else {
- int i;
+ LASSERT(lio->lis_subs);
- for (i = 0; i < lio->lis_nr_subios; i++) {
- struct cl_io *cio = lio->lis_subs[i].sub_io;
+ cl_page_list_init(plist);
+ while (queue->pl_nr > 0) {
+ int stripe_to = to;
+ int stripe;
- if (cio && cio == &ld->ld_emrg[i]->emrg_subio)
- lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
+ LASSERT(plist->pl_nr == 0);
+ page = cl_page_list_first(queue);
+ cl_page_list_move(plist, queue, page);
+
+ stripe = lov_page_stripe(page);
+ while (queue->pl_nr > 0) {
+ page = cl_page_list_first(queue);
+ if (stripe != lov_page_stripe(page))
+ break;
+
+ cl_page_list_move(plist, queue, page);
}
- lio->lis_mem_frozen = 0;
- mutex_unlock(&ld->ld_mutex);
- }
- return rc;
-#undef QIN
-}
+ if (queue->pl_nr > 0) /* still has more pages */
+ stripe_to = PAGE_SIZE;
-static int lov_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_page *sub_page = lov_sub_page(slice);
- struct lov_io_sub *sub;
- int result;
+ sub = lov_sub_get(env, lio, stripe);
+ if (!IS_ERR(sub)) {
+ rc = cl_io_commit_async(sub->sub_env, sub->sub_io,
+ plist, from, stripe_to, cb);
+ lov_sub_put(sub);
+ } else {
+ rc = PTR_ERR(sub);
+ break;
+ }
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- result = cl_io_prepare_write(sub->sub_env, sub->sub_io,
- sub_page, from, to);
- lov_sub_put(sub);
- } else
- result = PTR_ERR(sub);
- return result;
-}
+ if (plist->pl_nr > 0) /* short write */
+ break;
-static int lov_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_page *sub_page = lov_sub_page(slice);
- struct lov_io_sub *sub;
- int result;
+ from = 0;
+ }
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- result = cl_io_commit_write(sub->sub_env, sub->sub_io,
- sub_page, from, to);
- lov_sub_put(sub);
- } else
- result = PTR_ERR(sub);
- return result;
+ /* for error case, add the page back into the qin list */
+ LASSERT(ergo(rc == 0, plist->pl_nr == 0));
+ while (plist->pl_nr > 0) {
+ /* error occurred, add the uncommitted pages back into queue */
+ page = cl_page_list_last(plist);
+ cl_page_list_move_head(queue, plist, page);
+ }
+
+ return rc;
}
static int lov_io_fault_start(const struct lu_env *env,
@@ -803,16 +794,8 @@ static const struct cl_io_operations lov_io_ops = {
.cio_fini = lov_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = lov_io_submit
- },
- [CRT_WRITE] = {
- .cio_submit = lov_io_submit
- }
- },
- .cio_prepare_write = lov_io_prepare_write,
- .cio_commit_write = lov_io_commit_write
+ .cio_submit = lov_io_submit,
+ .cio_commit_async = lov_io_commit_async,
};
/*****************************************************************************
@@ -880,15 +863,8 @@ static const struct cl_io_operations lov_empty_io_ops = {
.cio_fini = lov_empty_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = LOV_EMPTY_IMPOSSIBLE
- },
- [CRT_WRITE] = {
- .cio_submit = LOV_EMPTY_IMPOSSIBLE
- }
- },
- .cio_commit_write = LOV_EMPTY_IMPOSSIBLE
+ .cio_submit = LOV_EMPTY_IMPOSSIBLE,
+ .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
};
int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
@@ -943,7 +919,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
}
io->ci_result = result < 0 ? result : 0;
- return result != 0;
+ return result;
}
int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
@@ -986,7 +962,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
}
io->ci_result = result < 0 ? result : 0;
- return result != 0;
+ return result;
}
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index ae854bc25..1b203d18c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -46,11 +46,6 @@
* @{
*/
-static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
- struct cl_lock *parent);
-
-static int lov_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice);
/*****************************************************************************
*
* Lov lock operations.
@@ -58,7 +53,7 @@ static int lov_lock_unuse(const struct lu_env *env,
*/
static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
- struct cl_lock *parent,
+ const struct cl_lock *parent,
struct lov_lock_sub *lls)
{
struct lov_sublock_env *subenv;
@@ -100,185 +95,26 @@ static void lov_sublock_env_put(struct lov_sublock_env *subenv)
lov_sub_put(subenv->lse_sub);
}
-static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
- struct cl_lock *sublock, int idx,
- struct lov_lock_link *link)
+static int lov_sublock_init(const struct lu_env *env,
+ const struct cl_lock *parent,
+ struct lov_lock_sub *lls)
{
- struct lovsub_lock *lsl;
- struct cl_lock *parent = lck->lls_cl.cls_lock;
- int rc;
-
- LASSERT(cl_lock_is_mutexed(parent));
- LASSERT(cl_lock_is_mutexed(sublock));
-
- lsl = cl2sub_lock(sublock);
- /*
- * check that sub-lock doesn't have lock link to this top-lock.
- */
- LASSERT(!lov_lock_link_find(env, lck, lsl));
- LASSERT(idx < lck->lls_nr);
-
- lck->lls_sub[idx].sub_lock = lsl;
- lck->lls_nr_filled++;
- LASSERT(lck->lls_nr_filled <= lck->lls_nr);
- list_add_tail(&link->lll_list, &lsl->lss_parents);
- link->lll_idx = idx;
- link->lll_super = lck;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lov-child", sublock);
- lck->lls_sub[idx].sub_flags |= LSF_HELD;
- cl_lock_user_add(env, sublock);
-
- rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
- LASSERT(rc == 0); /* there is no way this can fail, currently */
-}
-
-static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
- const struct cl_io *io,
- struct lov_lock *lck,
- int idx, struct lov_lock_link **out)
-{
- struct cl_lock *sublock;
- struct cl_lock *parent;
- struct lov_lock_link *link;
-
- LASSERT(idx < lck->lls_nr);
-
- link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS);
- if (link) {
- struct lov_sublock_env *subenv;
- struct lov_lock_sub *lls;
- struct cl_lock_descr *descr;
-
- parent = lck->lls_cl.cls_lock;
- lls = &lck->lls_sub[idx];
- descr = &lls->sub_got;
-
- subenv = lov_sublock_env_get(env, parent, lls);
- if (!IS_ERR(subenv)) {
- /* CAVEAT: Don't try to add a field in lov_lock_sub
- * to remember the subio. This is because lock is able
- * to be cached, but this is not true for IO. This
- * further means a sublock might be referenced in
- * different io context. -jay
- */
-
- sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
- descr, "lov-parent", parent);
- lov_sublock_env_put(subenv);
- } else {
- /* error occurs. */
- sublock = (void *)subenv;
- }
-
- if (!IS_ERR(sublock))
- *out = link;
- else
- kmem_cache_free(lov_lock_link_kmem, link);
- } else
- sublock = ERR_PTR(-ENOMEM);
- return sublock;
-}
-
-static void lov_sublock_unlock(const struct lu_env *env,
- struct lovsub_lock *lsl,
- struct cl_lock_closure *closure,
- struct lov_sublock_env *subenv)
-{
- lov_sublock_env_put(subenv);
- lsl->lss_active = NULL;
- cl_lock_disclosure(env, closure);
-}
-
-static int lov_sublock_lock(const struct lu_env *env,
- struct lov_lock *lck,
- struct lov_lock_sub *lls,
- struct cl_lock_closure *closure,
- struct lov_sublock_env **lsep)
-{
- struct lovsub_lock *sublock;
- struct cl_lock *child;
- int result = 0;
-
- LASSERT(list_empty(&closure->clc_list));
-
- sublock = lls->sub_lock;
- child = sublock->lss_cl.cls_lock;
- result = cl_lock_closure_build(env, child, closure);
- if (result == 0) {
- struct cl_lock *parent = closure->clc_origin;
-
- LASSERT(cl_lock_is_mutexed(child));
- sublock->lss_active = parent;
-
- if (unlikely((child->cll_state == CLS_FREEING) ||
- (child->cll_flags & CLF_CANCELLED))) {
- struct lov_lock_link *link;
- /*
- * we could race with lock deletion which temporarily
- * put the lock in freeing state, bug 19080.
- */
- LASSERT(!(lls->sub_flags & LSF_HELD));
-
- link = lov_lock_link_find(env, lck, sublock);
- LASSERT(link);
- lov_lock_unlink(env, link, sublock);
- lov_sublock_unlock(env, sublock, closure, NULL);
- lck->lls_cancel_race = 1;
- result = CLO_REPEAT;
- } else if (lsep) {
- struct lov_sublock_env *subenv;
+ struct lov_sublock_env *subenv;
+ int result;
- subenv = lov_sublock_env_get(env, parent, lls);
- if (IS_ERR(subenv)) {
- lov_sublock_unlock(env, sublock,
- closure, NULL);
- result = PTR_ERR(subenv);
- } else {
- *lsep = subenv;
- }
- }
+ subenv = lov_sublock_env_get(env, parent, lls);
+ if (!IS_ERR(subenv)) {
+ result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
+ subenv->lse_io);
+ lov_sublock_env_put(subenv);
+ } else {
+ /* error occurs. */
+ result = PTR_ERR(subenv);
}
return result;
}
/**
- * Updates the result of a top-lock operation from a result of sub-lock
- * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
- * over sub-locks and lov_subresult() is used to calculate return value of a
- * top-operation. To this end, possible return values of sub-operations are
- * ordered as
- *
- * - 0 success
- * - CLO_WAIT wait for event
- * - CLO_REPEAT repeat top-operation
- * - -ne fundamental error
- *
- * Top-level return code can only go down through this list. CLO_REPEAT
- * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
- * has to be rechecked by the upper layer.
- */
-static int lov_subresult(int result, int rc)
-{
- int result_rank;
- int rc_rank;
-
- LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
- "result = %d\n", result);
- LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
- "rc = %d\n", rc);
- CLASSERT(CLO_WAIT < CLO_REPEAT);
-
- /* calculate ranks in the ordering above */
- result_rank = result < 0 ? 1 + CLO_REPEAT : result;
- rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
-
- if (result_rank < rc_rank)
- result = rc;
- return result;
-}
-
-/**
* Creates sub-locks for a given lov_lock for the first time.
*
* Goes through all sub-objects of top-object, and creates sub-locks on every
@@ -286,8 +122,9 @@ static int lov_subresult(int result, int rc)
* fact that top-lock (that is being created) can be accessed concurrently
* through already created sub-locks (possibly shared with other top-locks).
*/
-static int lov_lock_sub_init(const struct lu_env *env,
- struct lov_lock *lck, const struct cl_io *io)
+static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
+ const struct cl_object *obj,
+ struct cl_lock *lock)
{
int result = 0;
int i;
@@ -297,241 +134,86 @@ static int lov_lock_sub_init(const struct lu_env *env,
u64 file_start;
u64 file_end;
- struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
+ struct lov_object *loo = cl2lov(obj);
struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct cl_lock *parent = lck->lls_cl.cls_lock;
+ struct lov_lock *lovlck;
- lck->lls_orig = parent->cll_descr;
- file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
- file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
+ file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
+ file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;
for (i = 0, nr = 0; i < r0->lo_nr; i++) {
/*
* XXX for wide striping smarter algorithm is desirable,
* breaking out of the loop, early.
*/
- if (likely(r0->lo_sub[i]) &&
+ if (likely(r0->lo_sub[i]) && /* spare layout */
lov_stripe_intersects(loo->lo_lsm, i,
file_start, file_end, &start, &end))
nr++;
}
LASSERT(nr > 0);
- lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS);
- if (!lck->lls_sub)
- return -ENOMEM;
+ lovlck = libcfs_kvzalloc(offsetof(struct lov_lock, lls_sub[nr]),
+ GFP_NOFS);
+ if (!lovlck)
+ return ERR_PTR(-ENOMEM);
- lck->lls_nr = nr;
- /*
- * First, fill in sub-lock descriptions in
- * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
- * (called below in this function, and by lov_lock_enqueue()) to
- * create sub-locks. At this moment, no other thread can access
- * top-lock.
- */
+ lovlck->lls_nr = nr;
for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
if (likely(r0->lo_sub[i]) &&
lov_stripe_intersects(loo->lo_lsm, i,
file_start, file_end, &start, &end)) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
struct cl_lock_descr *descr;
- descr = &lck->lls_sub[nr].sub_descr;
+ descr = &lls->sub_lock.cll_descr;
LASSERT(!descr->cld_obj);
descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
descr->cld_start = cl_index(descr->cld_obj, start);
descr->cld_end = cl_index(descr->cld_obj, end);
- descr->cld_mode = parent->cll_descr.cld_mode;
- descr->cld_gid = parent->cll_descr.cld_gid;
- descr->cld_enq_flags = parent->cll_descr.cld_enq_flags;
- /* XXX has no effect */
- lck->lls_sub[nr].sub_got = *descr;
- lck->lls_sub[nr].sub_stripe = i;
+ descr->cld_mode = lock->cll_descr.cld_mode;
+ descr->cld_gid = lock->cll_descr.cld_gid;
+ descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
+ lls->sub_stripe = i;
+
+ /* initialize sub lock */
+ result = lov_sublock_init(env, lock, lls);
+ if (result < 0)
+ break;
+
+ lls->sub_initialized = 1;
nr++;
}
}
- LASSERT(nr == lck->lls_nr);
-
- /*
- * Some sub-locks can be missing at this point. This is not a problem,
- * because enqueue will create them anyway. Main duty of this function
- * is to fill in sub-lock descriptions in a race free manner.
- */
- return result;
-}
+ LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
-static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
- int i, int deluser, int rc)
-{
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
-
- if (lck->lls_sub[i].sub_flags & LSF_HELD) {
- struct cl_lock *sublock;
- int dying;
-
- sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
- LASSERT(cl_lock_is_mutexed(sublock));
+ if (result != 0) {
+ for (i = 0; i < nr; ++i) {
+ if (!lovlck->lls_sub[i].sub_initialized)
+ break;
- lck->lls_sub[i].sub_flags &= ~LSF_HELD;
- if (deluser)
- cl_lock_user_del(env, sublock);
- /*
- * If the last hold is released, and cancellation is pending
- * for a sub-lock, release parent mutex, to avoid keeping it
- * while sub-lock is being paged out.
- */
- dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
- sublock->cll_descr.cld_mode == CLM_GROUP ||
- (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
- sublock->cll_holds == 1;
- if (dying)
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock, "lov-parent", parent);
- if (dying) {
- cl_lock_mutex_get(env, parent);
- rc = lov_subresult(rc, CLO_REPEAT);
+ cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
}
- /*
- * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
- * not backed by a reference on a
- * sub-lock. lovsub_lock_delete() will clear
- * lck->lls_sub[i].sub_lock under semaphores, just before
- * sub-lock is destroyed.
- */
+ kvfree(lovlck);
+ lovlck = ERR_PTR(result);
}
- return rc;
-}
-
-static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
- int i)
-{
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
-
- if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
- struct cl_lock *sublock;
-
- sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
- LASSERT(cl_lock_is_mutexed(sublock));
- LASSERT(sublock->cll_state != CLS_FREEING);
- lck->lls_sub[i].sub_flags |= LSF_HELD;
-
- cl_lock_get_trust(sublock);
- cl_lock_hold_add(env, sublock, "lov-parent", parent);
- cl_lock_user_add(env, sublock);
- cl_lock_put(env, sublock);
- }
+ return lovlck;
}
static void lov_lock_fini(const struct lu_env *env,
struct cl_lock_slice *slice)
{
- struct lov_lock *lck;
+ struct lov_lock *lovlck;
int i;
- lck = cl2lov_lock(slice);
- LASSERT(lck->lls_nr_filled == 0);
- if (lck->lls_sub) {
- for (i = 0; i < lck->lls_nr; ++i)
- /*
- * No sub-locks exists at this point, as sub-lock has
- * a reference on its parent.
- */
- LASSERT(!lck->lls_sub[i].sub_lock);
- kvfree(lck->lls_sub);
+ lovlck = cl2lov_lock(slice);
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
+ if (lovlck->lls_sub[i].sub_initialized)
+ cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
}
- kmem_cache_free(lov_lock_kmem, lck);
-}
-
-static int lov_lock_enqueue_wait(const struct lu_env *env,
- struct lov_lock *lck,
- struct cl_lock *sublock)
-{
- struct cl_lock *lock = lck->lls_cl.cls_lock;
- int result;
-
- LASSERT(cl_lock_is_mutexed(lock));
-
- cl_lock_mutex_put(env, lock);
- result = cl_lock_enqueue_wait(env, sublock, 0);
- cl_lock_mutex_get(env, lock);
- return result ?: CLO_REPEAT;
-}
-
-/**
- * Tries to advance a state machine of a given sub-lock toward enqueuing of
- * the top-lock.
- *
- * \retval 0 if state-transition can proceed
- * \retval -ve otherwise.
- */
-static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
- struct cl_lock *sublock,
- struct cl_io *io, __u32 enqflags, int last)
-{
- int result;
-
- /* first, try to enqueue a sub-lock ... */
- result = cl_enqueue_try(env, sublock, io, enqflags);
- if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
- /* if it is enqueued, try to `wait' on it---maybe it's already
- * granted
- */
- result = cl_wait_try(env, sublock);
- if (result == CLO_REENQUEUED)
- result = CLO_WAIT;
- }
- /*
- * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
- * parallel, otherwise---enqueue has to wait until sub-lock is granted
- * before proceeding to the next one.
- */
- if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
- (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
- result = 0;
- return result;
-}
-
-/**
- * Helper function for lov_lock_enqueue() that creates missing sub-lock.
- */
-static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
- struct cl_io *io, struct lov_lock *lck, int idx)
-{
- struct lov_lock_link *link = NULL;
- struct cl_lock *sublock;
- int result;
-
- LASSERT(parent->cll_depth == 1);
- cl_lock_mutex_put(env, parent);
- sublock = lov_sublock_alloc(env, io, lck, idx, &link);
- if (!IS_ERR(sublock))
- cl_lock_mutex_get(env, sublock);
- cl_lock_mutex_get(env, parent);
-
- if (!IS_ERR(sublock)) {
- cl_lock_get_trust(sublock);
- if (parent->cll_state == CLS_QUEUING &&
- !lck->lls_sub[idx].sub_lock) {
- lov_sublock_adopt(env, lck, sublock, idx, link);
- } else {
- kmem_cache_free(lov_lock_link_kmem, link);
- /* other thread allocated sub-lock, or enqueue is no
- * longer going on
- */
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock, "lov-parent", parent);
- cl_lock_mutex_get(env, parent);
- }
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- result = CLO_REPEAT;
- } else
- result = PTR_ERR(sublock);
- return result;
+ kvfree(lovlck);
}
/**
@@ -543,529 +225,59 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
*/
static int lov_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags)
+ struct cl_io *io, struct cl_sync_io *anchor)
{
- struct cl_lock *lock = slice->cls_lock;
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, lock);
+ struct cl_lock *lock = slice->cls_lock;
+ struct lov_lock *lovlck = cl2lov_lock(slice);
int i;
- int result;
- enum cl_lock_state minstate;
+ int rc = 0;
- for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct lov_lock_sub *lls;
- struct cl_lock *sublock;
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[i];
struct lov_sublock_env *subenv;
- if (lock->cll_state != CLS_QUEUING) {
- /*
- * Lock might have left QUEUING state if previous
- * iteration released its mutex. Stop enqueing in this
- * case and let the upper layer to decide what to do.
- */
- LASSERT(i > 0 && result != 0);
- break;
- }
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- /*
- * Sub-lock might have been canceled, while top-lock was
- * cached.
- */
- if (!sub) {
- result = lov_sublock_fill(env, lock, io, lck, i);
- /* lov_sublock_fill() released @lock mutex,
- * restart.
- */
+ subenv = lov_sublock_env_get(env, lock, lls);
+ if (IS_ERR(subenv)) {
+ rc = PTR_ERR(subenv);
break;
}
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- lov_sublock_hold(env, lck, i);
- rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
- subenv->lse_io, enqflags,
- i == lck->lls_nr - 1);
- minstate = min(minstate, sublock->cll_state);
- if (rc == CLO_WAIT) {
- switch (sublock->cll_state) {
- case CLS_QUEUING:
- /* take recursive mutex, the lock is
- * released in lov_lock_enqueue_wait.
- */
- cl_lock_mutex_get(env, sublock);
- lov_sublock_unlock(env, sub, closure,
- subenv);
- rc = lov_lock_enqueue_wait(env, lck,
- sublock);
- break;
- case CLS_CACHED:
- cl_lock_get(sublock);
- /* take recursive mutex of sublock */
- cl_lock_mutex_get(env, sublock);
- /* need to release all locks in closure
- * otherwise it may deadlock. LU-2683.
- */
- lov_sublock_unlock(env, sub, closure,
- subenv);
- /* sublock and parent are held. */
- rc = lov_sublock_release(env, lck, i,
- 1, rc);
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- break;
- default:
- lov_sublock_unlock(env, sub, closure,
- subenv);
- break;
- }
- } else {
- LASSERT(!sublock->cll_conflict);
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- }
- result = lov_subresult(result, rc);
- if (result != 0)
+ rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
+ &lls->sub_lock, anchor);
+ lov_sublock_env_put(subenv);
+ if (rc != 0)
break;
- }
- cl_lock_closure_fini(closure);
- return result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT;
-}
-
-static int lov_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int i;
- int result;
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
- /* top-lock state cannot change concurrently, because single
- * thread (one that released the last hold) carries unlocking
- * to the completion.
- */
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub)
- continue;
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (lls->sub_flags & LSF_HELD) {
- LASSERT(sublock->cll_state == CLS_HELD ||
- sublock->cll_state == CLS_ENQUEUED);
- rc = cl_unuse_try(subenv->lse_env, sublock);
- rc = lov_sublock_release(env, lck, i, 0, rc);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
+ lls->sub_is_enqueued = 1;
}
-
- if (result == 0 && lck->lls_cancel_race) {
- lck->lls_cancel_race = 0;
- result = -ESTALE;
- }
- cl_lock_closure_fini(closure);
- return result;
+ return rc;
}
static void lov_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ struct cl_lock *lock = slice->cls_lock;
+ struct lov_lock *lovlck = cl2lov_lock(slice);
int i;
- int result;
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[i];
+ struct cl_lock *sublock = &lls->sub_lock;
struct lov_sublock_env *subenv;
- /* top-lock state cannot change concurrently, because single
- * thread (one that released the last hold) carries unlocking
- * to the completion.
- */
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub)
- continue;
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (!(lls->sub_flags & LSF_HELD)) {
- lov_sublock_unlock(env, sub, closure, subenv);
- continue;
- }
-
- switch (sublock->cll_state) {
- case CLS_HELD:
- rc = cl_unuse_try(subenv->lse_env, sublock);
- lov_sublock_release(env, lck, i, 0, 0);
- break;
- default:
- lov_sublock_release(env, lck, i, 1, 0);
- break;
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
-
- if (rc == CLO_REPEAT) {
- --i;
- continue;
- }
-
- result = lov_subresult(result, rc);
- }
-
- if (result)
- CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
- "lov_lock_cancel fails with %d.\n", result);
-
- cl_lock_closure_fini(closure);
-}
-
-static int lov_lock_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- enum cl_lock_state minstate;
- int reenqueued;
- int result;
- int i;
-
-again:
- for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
- i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- LASSERT(sublock->cll_state >= CLS_ENQUEUED);
- if (sublock->cll_state < CLS_HELD)
- rc = cl_wait_try(env, sublock);
-
- minstate = min(minstate, sublock->cll_state);
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- if (rc == CLO_REENQUEUED) {
- reenqueued++;
- rc = 0;
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
- /* Each sublock only can be reenqueued once, so will not loop
- * forever.
- */
- if (result == 0 && reenqueued != 0)
- goto again;
- cl_lock_closure_fini(closure);
- return result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT;
-}
-
-static int lov_lock_use(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int result;
- int i;
-
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub) {
- /*
- * Sub-lock might have been canceled, while top-lock was
- * cached.
- */
- result = -ESTALE;
- break;
- }
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- LASSERT(sublock->cll_state != CLS_FREEING);
- lov_sublock_hold(env, lck, i);
- if (sublock->cll_state == CLS_CACHED) {
- rc = cl_use_try(subenv->lse_env, sublock, 0);
- if (rc != 0)
- rc = lov_sublock_release(env, lck,
- i, 1, rc);
- } else if (sublock->cll_state == CLS_NEW) {
- /* Sub-lock might have been canceled, while
- * top-lock was cached.
- */
- result = -ESTALE;
- lov_sublock_release(env, lck, i, 1, result);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
-
- if (lck->lls_cancel_race) {
- /*
- * If there is unlocking happened at the same time, then
- * sublock_lock state should be FREEING, and lov_sublock_lock
- * should return CLO_REPEAT. In this case, it should return
- * ESTALE, and up layer should reset the lock state to be NEW.
- */
- lck->lls_cancel_race = 0;
- LASSERT(result != 0);
- result = -ESTALE;
- }
- cl_lock_closure_fini(closure);
- return result;
-}
-
-/**
- * Check if the extent region \a descr is covered by \a child against the
- * specific \a stripe.
- */
-static int lov_lock_stripe_is_matching(const struct lu_env *env,
- struct lov_object *lov, int stripe,
- const struct cl_lock_descr *child,
- const struct cl_lock_descr *descr)
-{
- struct lov_stripe_md *lsm = lov->lo_lsm;
- u64 start;
- u64 end;
- int result;
-
- if (lov_r0(lov)->lo_nr == 1)
- return cl_lock_ext_match(child, descr);
-
- /*
- * For a multi-stripes object:
- * - make sure the descr only covers child's stripe, and
- * - check if extent is matching.
- */
- start = cl_offset(&lov->lo_cl, descr->cld_start);
- end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
- result = 0;
- /* glimpse should work on the object with LOV EA hole. */
- if (end - start <= lsm->lsm_stripe_size) {
- int idx;
-
- idx = lov_stripe_number(lsm, start);
- if (idx == stripe ||
- unlikely(!lov_r0(lov)->lo_sub[idx])) {
- idx = lov_stripe_number(lsm, end);
- if (idx == stripe ||
- unlikely(!lov_r0(lov)->lo_sub[idx]))
- result = 1;
- }
- }
-
- if (result != 0) {
- struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
- u64 sub_start;
- u64 sub_end;
-
- subd->cld_obj = NULL; /* don't need sub object at all */
- subd->cld_mode = descr->cld_mode;
- subd->cld_gid = descr->cld_gid;
- result = lov_stripe_intersects(lsm, stripe, start, end,
- &sub_start, &sub_end);
- LASSERT(result);
- subd->cld_start = cl_index(child->cld_obj, sub_start);
- subd->cld_end = cl_index(child->cld_obj, sub_end);
- result = cl_lock_ext_match(child, subd);
- }
- return result;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_fits_into() method.
- *
- * Checks whether a lock (given by \a slice) is suitable for \a
- * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
- * O_APPEND write.
- *
- * \see ccc_lock_fits_into().
- */
-static int lov_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct lov_lock *lov = cl2lov_lock(slice);
- struct lov_object *obj = cl2lov(slice->cls_obj);
- int result;
-
- LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
- LASSERT(lov->lls_nr > 0);
-
- /* for top lock, it's necessary to match enq flags otherwise it will
- * run into problem if a sublock is missing and reenqueue.
- */
- if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
- return 0;
-
- if (need->cld_mode == CLM_GROUP)
- /*
- * always allow to match group lock.
- */
- result = cl_lock_ext_match(&lov->lls_orig, need);
- else if (lov->lls_nr == 1) {
- struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
-
- result = lov_lock_stripe_is_matching(env,
- cl2lov(slice->cls_obj),
- lov->lls_sub[0].sub_stripe,
- got, need);
- } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
- !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
- /*
- * Multi-stripe locks are only suitable for `quick' IO and for
- * glimpse.
- */
- result = 0;
- else
- /*
- * Most general case: multi-stripe existing lock, and
- * (potentially) multi-stripe @need lock. Check that @need is
- * covered by @lov's sub-locks.
- *
- * For now, ignore lock expansions made by the server, and
- * match against original lock extent.
- */
- result = cl_lock_ext_match(&lov->lls_orig, need);
- CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
- PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
- lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
- result);
- return result;
-}
-
-void lov_lock_unlink(const struct lu_env *env,
- struct lov_lock_link *link, struct lovsub_lock *sub)
-{
- struct lov_lock *lck = link->lll_super;
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
- LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
-
- list_del_init(&link->lll_list);
- LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
- /* yank this sub-lock from parent's array */
- lck->lls_sub[link->lll_idx].sub_lock = NULL;
- LASSERT(lck->lls_nr_filled > 0);
- lck->lls_nr_filled--;
- lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
- cl_lock_put(env, parent);
- kmem_cache_free(lov_lock_link_kmem, link);
-}
-
-struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
- struct lov_lock *lck,
- struct lovsub_lock *sub)
-{
- struct lov_lock_link *scan;
-
- LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- if (scan->lll_super == lck)
- return scan;
- }
- return NULL;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_delete() method. This is
- * invoked for "top-to-bottom" delete, when lock destruction starts from the
- * top-lock, e.g., as a result of inode destruction.
- *
- * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
- * this is done separately elsewhere:
- *
- * - for inode destruction, lov_object_delete() calls cl_object_kill() for
- * each sub-object, purging its locks;
- *
- * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
- * left in the cache.
- */
-static void lov_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- struct lov_lock_link *link;
- int rc;
- int i;
-
- LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
-
- for (i = 0; i < lck->lls_nr; ++i) {
- struct lov_lock_sub *lls = &lck->lls_sub[i];
- struct lovsub_lock *lsl = lls->sub_lock;
-
- if (!lsl) /* already removed */
+ if (!lls->sub_is_enqueued)
continue;
- rc = lov_sublock_lock(env, lck, lls, closure, NULL);
- if (rc == CLO_REPEAT) {
- --i;
- continue;
+ lls->sub_is_enqueued = 0;
+ subenv = lov_sublock_env_get(env, lock, lls);
+ if (!IS_ERR(subenv)) {
+ cl_lock_cancel(subenv->lse_env, sublock);
+ lov_sublock_env_put(subenv);
+ } else {
+ CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
+ "lov_lock_cancel fails with %ld.\n",
+ PTR_ERR(subenv));
}
-
- LASSERT(rc == 0);
- LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
-
- if (lls->sub_flags & LSF_HELD)
- lov_sublock_release(env, lck, i, 1, 0);
-
- link = lov_lock_link_find(env, lck, lsl);
- LASSERT(link);
- lov_lock_unlink(env, link, lsl);
- LASSERT(!lck->lls_sub[i].sub_lock);
-
- lov_sublock_unlock(env, lsl, closure, NULL);
}
-
- cl_lock_closure_fini(closure);
}
static int lov_lock_print(const struct lu_env *env, void *cookie,
@@ -1079,12 +291,8 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
struct lov_lock_sub *sub;
sub = &lck->lls_sub[i];
- (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
- if (sub->sub_lock)
- cl_lock_print(env, cookie, p,
- sub->sub_lock->lss_cl.cls_lock);
- else
- (*p)(env, cookie, "---\n");
+ (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
+ cl_lock_print(env, cookie, p, &sub->sub_lock);
}
return 0;
}
@@ -1092,12 +300,7 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
static const struct cl_lock_operations lov_lock_ops = {
.clo_fini = lov_lock_fini,
.clo_enqueue = lov_lock_enqueue,
- .clo_wait = lov_lock_wait,
- .clo_use = lov_lock_use,
- .clo_unuse = lov_lock_unuse,
.clo_cancel = lov_lock_cancel,
- .clo_fits_into = lov_lock_fits_into,
- .clo_delete = lov_lock_delete,
.clo_print = lov_lock_print
};
@@ -1105,14 +308,13 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io)
{
struct lov_lock *lck;
- int result;
+ int result = 0;
- lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
- if (lck) {
+ lck = lov_lock_sub_init(env, obj, lock);
+ if (!IS_ERR(lck))
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
- result = lov_lock_sub_init(env, lck, io);
- } else
- result = -ENOMEM;
+ else
+ result = PTR_ERR(lck);
return result;
}
@@ -1147,21 +349,9 @@ int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
if (lck) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
- lck->lls_orig = lock->cll_descr;
result = 0;
}
return result;
}
-static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
- struct cl_lock *parent)
-{
- struct cl_lock_closure *closure;
-
- closure = &lov_env_info(env)->lti_closure;
- LASSERT(list_empty(&closure->clc_list));
- cl_lock_closure_init(env, closure, parent, 1);
- return closure;
-}
-
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 029cd4d62..56ef41d17 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -154,6 +154,7 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
valid &= src->o_valid;
if (*set) {
+ tgt->o_valid &= valid;
if (valid & OBD_MD_FLSIZE) {
/* this handles sparse files properly */
u64 lov_size;
@@ -172,12 +173,22 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
tgt->o_mtime = src->o_mtime;
if (valid & OBD_MD_FLDATAVERSION)
tgt->o_data_version += src->o_data_version;
+
+ /* handle flags */
+ if (valid & OBD_MD_FLFLAGS)
+ tgt->o_flags &= src->o_flags;
+ else
+ tgt->o_flags = 0;
} else {
memcpy(tgt, src, sizeof(*tgt));
tgt->o_oi = lsm->lsm_oi;
+ tgt->o_valid = valid;
if (valid & OBD_MD_FLSIZE)
tgt->o_size = lov_stripe_size(lsm, src->o_size,
stripeno);
+ tgt->o_flags = 0;
+ if (valid & OBD_MD_FLFLAGS)
+ tgt->o_flags = src->o_flags;
}
/* data_version needs to be valid on all stripes to be correct! */
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 5daa7faf4..e15ef2ece 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -54,7 +54,6 @@
#include "../include/lprocfs_status.h"
#include "../include/lustre_param.h"
#include "../include/cl_object.h"
-#include "../include/lclient.h" /* for cl_client_lru */
#include "../include/lustre/ll_fiemap.h"
#include "../include/lustre_fid.h"
@@ -124,7 +123,6 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
static int lov_notify(struct obd_device *obd, struct obd_device *watched,
enum obd_notify_event ev, void *data);
-#define MAX_STRING_SIZE 128
int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
struct obd_connect_data *data)
{
@@ -965,7 +963,6 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
CERROR("Unknown command: %d\n", lcfg->lcfg_command);
rc = -EINVAL;
goto out;
-
}
}
out:
@@ -1734,6 +1731,27 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
if (!lsm_has_objects(lsm)) {
+ if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start <
+ fm_key->oa.o_size)) {
+ /*
+ * released file, return a minimal FIEMAP if
+ * request fits in file-size.
+ */
+ fiemap->fm_mapped_extents = 1;
+ fiemap->fm_extents[0].fe_logical =
+ fm_key->fiemap.fm_start;
+ if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length <
+ fm_key->oa.o_size) {
+ fiemap->fm_extents[0].fe_length =
+ fm_key->fiemap.fm_length;
+ } else {
+ fiemap->fm_extents[0].fe_length =
+ fm_key->oa.o_size - fm_key->fiemap.fm_start;
+ fiemap->fm_extents[0].fe_flags |=
+ (FIEMAP_EXTENT_UNKNOWN |
+ FIEMAP_EXTENT_LAST);
+ }
+ }
rc = 0;
goto out;
}
@@ -2173,7 +2191,6 @@ void lov_stripe_lock(struct lov_stripe_md *md)
LASSERT(md->lsm_lock_owner == 0);
md->lsm_lock_owner = current_pid();
}
-EXPORT_SYMBOL(lov_stripe_lock);
void lov_stripe_unlock(struct lov_stripe_md *md)
__releases(&md->lsm_lock)
@@ -2182,7 +2199,6 @@ void lov_stripe_unlock(struct lov_stripe_md *md)
md->lsm_lock_owner = 0;
spin_unlock(&md->lsm_lock);
}
-EXPORT_SYMBOL(lov_stripe_unlock);
static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
struct obd_quotactl *oqctl)
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 1f8ed95a6..561d493b2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -67,7 +67,7 @@ struct lov_layout_operations {
int (*llo_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int (*llo_lock_init)(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
@@ -193,6 +193,18 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
return result;
}
+static int lov_page_slice_fixup(struct lov_object *lov,
+ struct cl_object *stripe)
+{
+ struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
+ struct cl_object *o;
+
+ cl_object_for_each(o, stripe)
+ o->co_slice_off += hdr->coh_page_bufsize;
+
+ return cl_object_header(stripe)->coh_page_bufsize;
+}
+
static int lov_init_raid0(const struct lu_env *env,
struct lov_device *dev, struct lov_object *lov,
const struct cl_object_conf *conf,
@@ -222,6 +234,8 @@ static int lov_init_raid0(const struct lu_env *env,
r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
GFP_NOFS);
if (r0->lo_sub) {
+ int psz = 0;
+
result = 0;
subconf->coc_inode = conf->coc_inode;
spin_lock_init(&r0->lo_sub_lock);
@@ -254,13 +268,24 @@ static int lov_init_raid0(const struct lu_env *env,
if (result == -EAGAIN) { /* try again */
--i;
result = 0;
+ continue;
}
} else {
result = PTR_ERR(stripe);
}
+
+ if (result == 0) {
+ int sz = lov_page_slice_fixup(lov, stripe);
+
+ LASSERT(ergo(psz > 0, psz == sz));
+ psz = sz;
+ }
}
- } else
+ if (result == 0)
+ cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
+ } else {
result = -ENOMEM;
+ }
out:
return result;
}
@@ -286,8 +311,6 @@ static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
lov_layout_wait(env, lov);
-
- cl_object_prune(env, &lov->lo_cl);
return 0;
}
@@ -355,7 +378,7 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
struct lovsub_object *los = r0->lo_sub[i];
if (los) {
- cl_locks_prune(env, &los->lso_cl, 1);
+ cl_object_prune(env, &los->lso_cl);
/*
* If top-level object is to be evicted from
* the cache, so are its sub-objects.
@@ -364,7 +387,6 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
}
}
}
- cl_object_prune(env, &lov->lo_cl);
return 0;
}
@@ -666,7 +688,6 @@ static int lov_layout_change(const struct lu_env *unused,
const struct lov_layout_operations *old_ops;
const struct lov_layout_operations *new_ops;
- struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
void *cookie;
struct lu_env *env;
int refcheck;
@@ -691,13 +712,15 @@ static int lov_layout_change(const struct lu_env *unused,
old_ops = &lov_dispatch[lov->lo_type];
new_ops = &lov_dispatch[llt];
+ result = cl_object_prune(env, &lov->lo_cl);
+ if (result != 0)
+ goto out;
+
result = old_ops->llo_delete(env, lov, &lov->u);
if (result == 0) {
old_ops->llo_fini(env, lov, &lov->u);
LASSERT(atomic_read(&lov->lo_active_ios) == 0);
- LASSERT(!hdr->coh_tree.rnode);
- LASSERT(hdr->coh_pages == 0);
lov->lo_type = LLT_EMPTY;
result = new_ops->llo_init(env,
@@ -713,6 +736,7 @@ static int lov_layout_change(const struct lu_env *unused,
}
}
+out:
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
return result;
@@ -793,7 +817,8 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
goto out;
}
- lov->lo_layout_invalid = lov_layout_change(env, lov, conf);
+ result = lov_layout_change(env, lov, conf);
+ lov->lo_layout_invalid = result != 0;
out:
lov_conf_unlock(lov);
@@ -825,10 +850,10 @@ static int lov_object_print(const struct lu_env *env, void *cookie,
}
int lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
- llo_page_init, env, obj, page, vmpage);
+ return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
+ index);
}
/**
@@ -911,8 +936,9 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
* for object with different layouts.
*/
obj->lo_ops = &lov_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index ae83eb0f6..9302f06c3 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -66,6 +66,18 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno)
return lov_size;
}
+/**
+ * Compute file level page index by stripe level page offset
+ */
+pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
+ int stripe)
+{
+ loff_t offset;
+
+ offset = lov_stripe_size(lsm, stripe_index << PAGE_SHIFT, stripe);
+ return offset >> PAGE_SHIFT;
+}
+
/* we have an offset in file backed by an lov and want to find out where
* that offset lands in our given stripe of the file. for the easy
* case where the offset is within the stripe, we just have to scale the
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 3925633a9..0215ea54d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -136,7 +136,6 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
return -EINVAL;
-
}
if (lsm) {
@@ -444,8 +443,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
if (lum.lmm_magic == LOV_USER_MAGIC) {
/* User request for v1, we need skip lmm_pool_name */
if (lmmk->lmm_magic == LOV_MAGIC_V3) {
- memmove((char *)(&lmmk->lmm_stripe_count) +
- sizeof(lmmk->lmm_stripe_count),
+ memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
lmmk->lmm_stripe_count *
sizeof(struct lov_ost_data_v1));
@@ -457,9 +455,9 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
}
/* User wasn't expecting this many OST entries */
- if (lum.lmm_stripe_count == 0)
+ if (lum.lmm_stripe_count == 0) {
lmm_size = lum_size;
- else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
+ } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
rc = -EOVERFLOW;
goto out_set;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index fdcaf8047..0306f00c3 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -36,6 +36,7 @@
* Implementation of cl_page for LOV layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_LOV
@@ -52,116 +53,66 @@
*
*/
-static int lov_page_invariant(const struct cl_page_slice *slice)
+/**
+ * Adjust the stripe index by layout of raid0. @max_index is the maximum
+ * page index covered by an underlying DLM lock.
+ * This function converts max_index from stripe level to file level, and make
+ * sure it's not beyond one stripe.
+ */
+static int lov_raid0_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused,
+ pgoff_t *max_index)
{
- const struct cl_page *page = slice->cpl_page;
- const struct cl_page *sub = lov_sub_page(slice);
-
- return ergo(sub,
- page->cp_child == sub &&
- sub->cp_parent == page &&
- page->cp_state == sub->cp_state);
-}
+ struct lov_object *loo = cl2lov(slice->cpl_obj);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ pgoff_t index = *max_index;
+ unsigned int pps; /* pages per stripe */
-static void lov_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct cl_page *sub = lov_sub_page(slice);
+ CDEBUG(D_READA, "*max_index = %lu, nr = %d\n", index, r0->lo_nr);
+ if (index == 0) /* the page is not covered by any lock */
+ return 0;
- LINVRNT(lov_page_invariant(slice));
+ if (r0->lo_nr == 1) /* single stripe file */
+ return 0;
- if (sub) {
- LASSERT(sub->cp_state == CPS_FREEING);
- lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
- sub->cp_parent = NULL;
- slice->cpl_page->cp_child = NULL;
- cl_page_put(env, sub);
+ /* max_index is stripe level, convert it into file level */
+ if (index != CL_PAGE_EOF) {
+ int stripeno = lov_page_stripe(slice->cpl_page);
+ *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno);
}
-}
-
-static int lov_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io,
- int nonblock)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_io_sub *sub;
- LINVRNT(lov_page_invariant(slice));
- LINVRNT(!cl2lov_page(slice)->lps_invalid);
+ /* calculate the end of current stripe */
+ pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
+ index = ((slice->cpl_index + pps) & ~(pps - 1)) - 1;
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- lov_sub_page(slice)->cp_owner = sub->sub_io;
- lov_sub_put(sub);
- } else
- LBUG(); /* Arrgh */
+ /* never exceed the end of the stripe */
+ *max_index = min_t(pgoff_t, *max_index, index);
return 0;
}
-static void lov_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
-{
- lov_page_own(env, slice, io, 0);
-}
-
-static int lov_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_io_sub *sub;
- int rc = 0;
-
- LINVRNT(lov_page_invariant(slice));
- LINVRNT(!cl2lov_page(slice)->lps_invalid);
-
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
- slice->cpl_page->cp_child, CRT_WRITE);
- lov_sub_put(sub);
- } else {
- rc = PTR_ERR(sub);
- CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
- }
- return rc;
-}
-
-static int lov_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
+static int lov_raid0_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
{
struct lov_page *lp = cl2lov_page(slice);
- return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp);
+ return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, raid0\n", lp);
}
-static const struct cl_page_operations lov_page_ops = {
- .cpo_fini = lov_page_fini,
- .cpo_own = lov_page_own,
- .cpo_assume = lov_page_assume,
- .io = {
- [CRT_WRITE] = {
- .cpo_cache_add = lov_page_cache_add
- }
- },
- .cpo_print = lov_page_print
+static const struct cl_page_operations lov_raid0_page_ops = {
+ .cpo_is_under_lock = lov_raid0_page_is_under_lock,
+ .cpo_print = lov_raid0_page_print
};
-static void lov_empty_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- LASSERT(!slice->cpl_page->cp_child);
-}
-
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct lov_object *loo = cl2lov(obj);
struct lov_layout_raid0 *r0 = lov_r0(loo);
struct lov_io *lio = lov_env_io(env);
- struct cl_page *subpage;
struct cl_object *subobj;
+ struct cl_object *o;
struct lov_io_sub *sub;
struct lov_page *lpg = cl_object_page_slice(obj, page);
loff_t offset;
@@ -169,59 +120,57 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
int stripe;
int rc;
- offset = cl_offset(obj, page->cp_index);
+ offset = cl_offset(obj, index);
stripe = lov_stripe_number(loo->lo_lsm, offset);
LASSERT(stripe < r0->lo_nr);
rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
LASSERT(rc == 0);
- lpg->lps_invalid = 1;
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
+ cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops);
sub = lov_sub_get(env, lio, stripe);
- if (IS_ERR(sub)) {
- rc = PTR_ERR(sub);
- goto out;
- }
+ if (IS_ERR(sub))
+ return PTR_ERR(sub);
subobj = lovsub2cl(r0->lo_sub[stripe]);
- subpage = cl_page_find_sub(sub->sub_env, subobj,
- cl_index(subobj, suboff), vmpage, page);
- lov_sub_put(sub);
- if (IS_ERR(subpage)) {
- rc = PTR_ERR(subpage);
- goto out;
- }
-
- if (likely(subpage->cp_parent == page)) {
- lu_ref_add(&subpage->cp_reference, "lov", page);
- lpg->lps_invalid = 0;
- rc = 0;
- } else {
- CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
- CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
- LASSERT(0);
+ list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
+ co_lu.lo_linkage) {
+ if (o->co_ops->coo_page_init) {
+ rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
+ cl_index(subobj, suboff));
+ if (rc != 0)
+ break;
+ }
}
+ lov_sub_put(sub);
-out:
return rc;
}
+static int lov_empty_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
+{
+ struct lov_page *lp = cl2lov_page(slice);
+
+ return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, empty.\n",
+ lp);
+}
+
static const struct cl_page_operations lov_empty_page_ops = {
- .cpo_fini = lov_empty_page_fini,
- .cpo_print = lov_page_print
+ .cpo_print = lov_empty_page_print
};
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct lov_page *lpg = cl_object_page_slice(obj, page);
void *addr;
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
- addr = kmap(vmpage);
+ cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
+ addr = kmap(page->cp_vmpage);
memset(addr, 0, cl_page_size(obj));
- kunmap(vmpage);
+ kunmap(page->cp_vmpage);
cl_page_export(env, page, 1);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 9ae1d6f42..690292ece 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -65,7 +65,6 @@ void lov_pool_putref(struct pool_desc *pool)
LASSERT(hlist_unhashed(&pool->pool_hash));
LASSERT(list_empty(&pool->pool_list));
LASSERT(!pool->pool_debugfs_entry);
- lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
lov_ost_pool_free(&(pool->pool_obds));
kfree(pool);
}
@@ -424,11 +423,6 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
if (rc)
goto out_err;
- memset(&(new_pool->pool_rr), 0, sizeof(struct lov_qos_rr));
- rc = lov_ost_pool_init(&new_pool->pool_rr.lqr_pool, 0);
- if (rc)
- goto out_free_pool_obds;
-
INIT_HLIST_NODE(&new_pool->pool_hash);
/* get ref for debugfs file */
@@ -469,13 +463,10 @@ out_err:
list_del_init(&new_pool->pool_list);
lov->lov_pool_count--;
spin_unlock(&obd->obd_dev_lock);
-
ldebugfs_remove(&new_pool->pool_debugfs_entry);
-
- lov_ost_pool_free(&new_pool->pool_rr.lqr_pool);
-out_free_pool_obds:
lov_ost_pool_free(&new_pool->pool_obds);
kfree(new_pool);
+
return rc;
}
@@ -543,8 +534,6 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
if (rc)
goto out;
- pool->pool_rr.lqr_dirty = 1;
-
CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
ostname, poolname, pool_tgt_count(pool));
@@ -589,8 +578,6 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
lov_ost_pool_remove(&pool->pool_obds, lov_idx);
- pool->pool_rr.lqr_dirty = 1;
-
CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
poolname);
@@ -599,50 +586,3 @@ out:
lov_pool_putref(pool);
return rc;
}
-
-int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
-{
- int i, rc;
-
- /* caller may no have a ref on pool if it got the pool
- * without calling lov_find_pool() (e.g. go through the lov pool
- * list)
- */
- lov_pool_getref(pool);
-
- down_read(&pool_tgt_rw_sem(pool));
-
- for (i = 0; i < pool_tgt_count(pool); i++) {
- if (pool_tgt_array(pool)[i] == idx) {
- rc = 0;
- goto out;
- }
- }
- rc = -ENOENT;
-out:
- up_read(&pool_tgt_rw_sem(pool));
-
- lov_pool_putref(pool);
- return rc;
-}
-
-struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname)
-{
- struct pool_desc *pool;
-
- pool = NULL;
- if (poolname[0] != '\0') {
- pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (!pool)
- CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
- poolname);
- if (pool && (pool_tgt_count(pool) == 0)) {
- CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n",
- poolname);
- /* pool is ignored, so we remove ref on it */
- lov_pool_putref(pool);
- pool = NULL;
- }
- }
- return pool;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 7178a02d6..1be4b921c 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -52,7 +52,6 @@ static void lov_init_set(struct lov_request_set *set)
INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
init_waitqueue_head(&set->set_waitq);
- spin_lock_init(&set->set_lock);
}
void lov_finish_set(struct lov_request_set *set)
@@ -235,7 +234,6 @@ out:
if (tmp_oa)
kmem_cache_free(obdo_cachep, tmp_oa);
return rc;
-
}
int lov_fini_getattr_set(struct lov_request_set *set)
@@ -363,7 +361,6 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
set->set_oi = oinfo;
set->set_oi->oi_md = lsm;
set->set_oi->oi_oa = src_oa;
- set->set_oti = oti;
if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE)
set->set_cookies = oti->oti_logcookies;
@@ -480,7 +477,6 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
lov_init_set(set);
set->set_exp = exp;
- set->set_oti = oti;
set->set_oi = oinfo;
if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
set->set_cookies = oti->oti_logcookies;
@@ -716,12 +712,15 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
struct lov_request *req;
if (!lov->lov_tgts[i] ||
- (!lov_check_and_wait_active(lov, i) &&
- (oinfo->oi_flags & OBD_STATFS_NODELAY))) {
+ (oinfo->oi_flags & OBD_STATFS_NODELAY &&
+ !lov->lov_tgts[i]->ltd_active)) {
CDEBUG(D_HA, "lov idx %d inactive\n", i);
continue;
}
+ if (!lov->lov_tgts[i]->ltd_active)
+ lov_check_and_wait_active(lov, i);
+
/* skip targets that have been explicitly disabled by the
* administrator
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index c335c020f..35f6b1d66 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -151,8 +151,9 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
if (lsr) {
cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -182,10 +183,12 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
d = lovsub2lu_dev(lsd);
d->ld_ops = &lovsub_lu_ops;
lsd->acid_cl.cd_ops = &lovsub_cl_ops;
- } else
+ } else {
d = ERR_PTR(result);
- } else
+ }
+ } else {
d = ERR_PTR(-ENOMEM);
+ }
return d;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index 3bb0c9068..e92edfb61 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -62,391 +62,8 @@ static void lovsub_lock_fini(const struct lu_env *env,
kmem_cache_free(lovsub_lock_kmem, lsl);
}
-static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
-{
- struct cl_lock *parent;
-
- parent = lov->lls_cl.cls_lock;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
- cl_lock_mutex_get(env, parent);
-}
-
-static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
-{
- struct cl_lock *parent;
-
- parent = lov->lls_cl.cls_lock;
- cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
- lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
- cl_lock_put(env, parent);
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for lovsub layer, which
- * method is called whenever sub-lock state changes. Propagates state change
- * to the top-locks.
- */
-static void lovsub_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- struct lov_lock_link *scan;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- struct lov_lock *lov = scan->lll_super;
- struct cl_lock *parent = lov->lls_cl.cls_lock;
-
- if (sub->lss_active != parent) {
- lovsub_parent_lock(env, lov);
- cl_lock_signal(env, parent);
- lovsub_parent_unlock(env, lov);
- }
- }
-}
-
-/**
- * Implementation of cl_lock_operation::clo_weigh() estimating lock weight by
- * asking parent lock.
- */
-static unsigned long lovsub_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lovsub_lock *lock = cl2lovsub_lock(slice);
- struct lov_lock *lov;
- unsigned long dumbbell;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- if (!list_empty(&lock->lss_parents)) {
- /*
- * It is not clear whether all parents have to be asked and
- * their estimations summed, or it is enough to ask one. For
- * the current usages, one is always enough.
- */
- lov = container_of(lock->lss_parents.next,
- struct lov_lock_link, lll_list)->lll_super;
-
- lovsub_parent_lock(env, lov);
- dumbbell = cl_lock_weigh(env, lov->lls_cl.cls_lock);
- lovsub_parent_unlock(env, lov);
- } else
- dumbbell = 0;
-
- return dumbbell;
-}
-
-/**
- * Maps start/end offsets within a stripe, to offsets within a file.
- */
-static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
- struct lov_object *lov,
- int stripe, struct cl_lock_descr *out)
-{
- pgoff_t size; /* stripe size in pages */
- pgoff_t skip; /* how many pages in every stripe are occupied by
- * "other" stripes
- */
- pgoff_t start;
- pgoff_t end;
-
- start = in->cld_start;
- end = in->cld_end;
-
- if (lov->lo_lsm->lsm_stripe_count > 1) {
- size = cl_index(lov2cl(lov), lov->lo_lsm->lsm_stripe_size);
- skip = (lov->lo_lsm->lsm_stripe_count - 1) * size;
-
- /* XXX overflow check here? */
- start += start/size * skip + stripe * size;
-
- if (end != CL_PAGE_EOF) {
- end += end/size * skip + stripe * size;
- /*
- * And check for overflow...
- */
- if (end < in->cld_end)
- end = CL_PAGE_EOF;
- }
- }
- out->cld_start = start;
- out->cld_end = end;
-}
-
-/**
- * Adjusts parent lock extent when a sub-lock is attached to a parent. This is
- * called in two ways:
- *
- * - as part of receive call-back, when server returns granted extent to
- * the client, and
- *
- * - when top-lock finds existing sub-lock in the cache.
- *
- * Note, that lock mode is not propagated to the parent: i.e., if CLM_READ
- * top-lock matches CLM_WRITE sub-lock, top-lock is still CLM_READ.
- */
-int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
- struct lovsub_lock *sublock,
- const struct cl_lock_descr *d, int idx)
-{
- struct cl_lock *parent;
- struct lovsub_object *subobj;
- struct cl_lock_descr *pd;
- struct cl_lock_descr *parent_descr;
- int result;
-
- parent = lov->lls_cl.cls_lock;
- parent_descr = &parent->cll_descr;
- LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode));
-
- subobj = cl2lovsub(sublock->lss_cl.cls_obj);
- pd = &lov_env_info(env)->lti_ldescr;
-
- pd->cld_obj = parent_descr->cld_obj;
- pd->cld_mode = parent_descr->cld_mode;
- pd->cld_gid = parent_descr->cld_gid;
- lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd);
- lov->lls_sub[idx].sub_got = *d;
- /*
- * Notify top-lock about modification, if lock description changes
- * materially.
- */
- if (!cl_lock_ext_match(parent_descr, pd))
- result = cl_lock_modify(env, parent, pd);
- else
- result = 0;
- return result;
-}
-
-static int lovsub_lock_modify(const struct lu_env *env,
- const struct cl_lock_slice *s,
- const struct cl_lock_descr *d)
-{
- struct lovsub_lock *lock = cl2lovsub_lock(s);
- struct lov_lock_link *scan;
- struct lov_lock *lov;
- int result = 0;
-
- LASSERT(cl_lock_mode_match(d->cld_mode,
- s->cls_lock->cll_descr.cld_mode));
- list_for_each_entry(scan, &lock->lss_parents, lll_list) {
- int rc;
-
- lov = scan->lll_super;
- lovsub_parent_lock(env, lov);
- rc = lov_sublock_modify(env, lov, lock, d, scan->lll_idx);
- lovsub_parent_unlock(env, lov);
- result = result ?: rc;
- }
- return result;
-}
-
-static int lovsub_lock_closure(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_lock_closure *closure)
-{
- struct lovsub_lock *sub;
- struct cl_lock *parent;
- struct lov_lock_link *scan;
- int result;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- sub = cl2lovsub_lock(slice);
- result = 0;
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- parent = scan->lll_super->lls_cl.cls_lock;
- result = cl_lock_closure_build(env, parent, closure);
- if (result != 0)
- break;
- }
- return result;
-}
-
-/**
- * A helper function for lovsub_lock_delete() that deals with a given parent
- * top-lock.
- */
-static int lovsub_lock_delete_one(const struct lu_env *env,
- struct cl_lock *child, struct lov_lock *lov)
-{
- struct cl_lock *parent;
- int result;
-
- parent = lov->lls_cl.cls_lock;
- if (parent->cll_error)
- return 0;
-
- result = 0;
- switch (parent->cll_state) {
- case CLS_ENQUEUED:
- /* See LU-1355 for the case that a glimpse lock is
- * interrupted by signal
- */
- LASSERT(parent->cll_flags & CLF_CANCELLED);
- break;
- case CLS_QUEUING:
- case CLS_FREEING:
- cl_lock_signal(env, parent);
- break;
- case CLS_INTRANSIT:
- /*
- * Here lies a problem: a sub-lock is canceled while top-lock
- * is being unlocked. Top-lock cannot be moved into CLS_NEW
- * state, because unlocking has to succeed eventually by
- * placing lock into CLS_CACHED (or failing it), see
- * cl_unuse_try(). Nor can top-lock be left in CLS_CACHED
- * state, because lov maintains an invariant that all
- * sub-locks exist in CLS_CACHED (this allows cached top-lock
- * to be reused immediately). Nor can we wait for top-lock
- * state to change, because this can be synchronous to the
- * current thread.
- *
- * We know for sure that lov_lock_unuse() will be called at
- * least one more time to finish un-using, so leave a mark on
- * the top-lock, that will be seen by the next call to
- * lov_lock_unuse().
- */
- if (cl_lock_is_intransit(parent))
- lov->lls_cancel_race = 1;
- break;
- case CLS_CACHED:
- /*
- * if a sub-lock is canceled move its top-lock into CLS_NEW
- * state to preserve an invariant that a top-lock in
- * CLS_CACHED is immediately ready for re-use (i.e., has all
- * sub-locks), and so that next attempt to re-use the top-lock
- * enqueues missing sub-lock.
- */
- cl_lock_state_set(env, parent, CLS_NEW);
- /* fall through */
- case CLS_NEW:
- /*
- * if last sub-lock is canceled, destroy the top-lock (which
- * is now `empty') proactively.
- */
- if (lov->lls_nr_filled == 0) {
- /* ... but unfortunately, this cannot be done easily,
- * as cancellation of a top-lock might acquire mutices
- * of its other sub-locks, violating lock ordering,
- * see cl_lock_{cancel,delete}() preconditions.
- *
- * To work around this, the mutex of this sub-lock is
- * released, top-lock is destroyed, and sub-lock mutex
- * acquired again. The list of parents has to be
- * re-scanned from the beginning after this.
- *
- * Only do this if no mutices other than on @child and
- * @parent are held by the current thread.
- *
- * TODO: The lock modal here is too complex, because
- * the lock may be canceled and deleted by voluntarily:
- * cl_lock_request
- * -> osc_lock_enqueue_wait
- * -> osc_lock_cancel_wait
- * -> cl_lock_delete
- * -> lovsub_lock_delete
- * -> cl_lock_cancel/delete
- * -> ...
- *
- * The better choice is to spawn a kernel thread for
- * this purpose. -jay
- */
- if (cl_lock_nr_mutexed(env) == 2) {
- cl_lock_mutex_put(env, child);
- cl_lock_cancel(env, parent);
- cl_lock_delete(env, parent);
- result = 1;
- }
- }
- break;
- case CLS_HELD:
- CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n");
- default:
- CERROR("Impossible state: %d\n", parent->cll_state);
- LBUG();
- break;
- }
-
- return result;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_delete() method. This is
- * invoked in "bottom-to-top" delete, when lock destruction starts from the
- * sub-lock (e.g, as a result of ldlm lock LRU policy).
- */
-static void lovsub_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct cl_lock *child = slice->cls_lock;
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- int restart;
-
- LASSERT(cl_lock_is_mutexed(child));
-
- /*
- * Destruction of a sub-lock might take multiple iterations, because
- * when the last sub-lock of a given top-lock is deleted, top-lock is
- * canceled proactively, and this requires to release sub-lock
- * mutex. Once sub-lock mutex has been released, list of its parents
- * has to be re-scanned from the beginning.
- */
- do {
- struct lov_lock *lov;
- struct lov_lock_link *scan;
- struct lov_lock_link *temp;
- struct lov_lock_sub *subdata;
-
- restart = 0;
- list_for_each_entry_safe(scan, temp,
- &sub->lss_parents, lll_list) {
- lov = scan->lll_super;
- subdata = &lov->lls_sub[scan->lll_idx];
- lovsub_parent_lock(env, lov);
- subdata->sub_got = subdata->sub_descr;
- lov_lock_unlink(env, scan, sub);
- restart = lovsub_lock_delete_one(env, child, lov);
- lovsub_parent_unlock(env, lov);
-
- if (restart) {
- cl_lock_mutex_get(env, child);
- break;
- }
- }
- } while (restart);
-}
-
-static int lovsub_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- struct lov_lock *lov;
- struct lov_lock_link *scan;
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- lov = scan->lll_super;
- (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
- if (lov)
- cl_lock_descr_print(env, cookie, p,
- &lov->lls_cl.cls_lock->cll_descr);
- (*p)(env, cookie, "] ");
- }
- return 0;
-}
-
static const struct cl_lock_operations lovsub_lock_ops = {
.clo_fini = lovsub_lock_fini,
- .clo_state = lovsub_lock_state,
- .clo_delete = lovsub_lock_delete,
- .clo_modify = lovsub_lock_modify,
- .clo_closure = lovsub_lock_closure,
- .clo_weigh = lovsub_lock_weigh,
- .clo_print = lovsub_lock_print
};
int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
@@ -460,8 +77,9 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
INIT_LIST_HEAD(&lsk->lss_parents);
cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 6c5430d93..bcaae1e5b 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -67,10 +67,10 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
lu_object_add(obj, below);
cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
-
}
static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
@@ -154,8 +154,9 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
lu_object_add_top(&hdr->coh_lu, obj);
los->lso_cl.co_ops = &lovsub_ops;
obj->lo_ops = &lovsub_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index 2d945532b..9badedcce 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -60,11 +60,11 @@ static const struct cl_page_operations lovsub_page_ops = {
};
int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *unused)
+ struct cl_page *page, pgoff_t index)
{
struct lovsub_page *lsb = cl_object_page_slice(obj, page);
- cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
+ cl_page_slice_add(page, &lsb->lsb_cl, obj, index, &lovsub_page_ops);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 38f267a60..5c7a15dd7 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -49,9 +49,9 @@ static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
obd_kobj);
struct client_obd *cli = &dev->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -74,9 +74,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
if (val < 1 || val > MDC_MAX_RIF_MAX)
return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_rpcs_in_flight = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index b3bfdcb73..856c54e03 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -279,8 +279,7 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime);
rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime);
rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
- rec->sa_attr_flags =
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
+ rec->sa_attr_flags = op_data->op_attr_flags;
if ((op_data->op_attr.ia_valid & ATTR_GID) &&
in_group_p(op_data->op_attr.ia_gid))
rec->sa_suppgid =
@@ -439,7 +438,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
LOGL0(op_data->op_name, op_data->op_namelen, tmp);
-
}
}
@@ -455,7 +453,7 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req,
lock = ldlm_handle2lock(&op_data->op_lease_handle);
if (lock) {
data->cd_handle = lock->l_remote_handle;
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
@@ -481,9 +479,9 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
};
@@ -497,23 +495,23 @@ int mdc_enter_request(struct client_obd *cli)
struct mdc_cache_waiter mcw;
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw),
&lwi);
if (rc) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (list_empty(&mcw.mcw_entry))
cli->cl_r_in_flight--;
list_del_init(&mcw.mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
} else {
cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
return rc;
}
@@ -523,7 +521,7 @@ void mdc_exit_request(struct client_obd *cli)
struct list_head *l, *tmp;
struct mdc_cache_waiter *mcw;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
@@ -538,5 +536,5 @@ void mdc_exit_request(struct client_obd *cli)
}
/* Empty waiting list? Decrease reqs in-flight number */
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 958a164f6..3b1bc9111 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -869,7 +869,9 @@ resend:
* (explicits or automatically generated by Kernel to clean
* current FLocks upon exit) that can't be trashed
*/
- if ((rc == -EINTR) || (rc == -ETIMEDOUT))
+ if (((rc == -EINTR) || (rc == -ETIMEDOUT)) &&
+ (einfo->ei_type == LDLM_FLOCK) &&
+ (einfo->ei_mode == LCK_NL))
goto resend;
return rc;
}
@@ -963,7 +965,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
if (fid_is_sane(&op_data->op_fid2) &&
it->it_create_mode & M_CHECK_STALE &&
it->it_op != IT_GETATTR) {
-
/* Also: did we find the same inode? */
/* sever can return one of two fids:
* op_fid2 - new allocated fid - if file is created.
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index b91d3ff18..86b744536 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -142,9 +142,8 @@ static int mdc_getattr_common(struct obd_export *exp,
CDEBUG(D_NET, "mode: %o\n", body->mode);
+ mdc_update_max_ea_from_body(exp, body);
if (body->eadatasize != 0) {
- mdc_update_max_ea_from_body(exp, body);
-
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
body->eadatasize);
if (!eadata)
@@ -1169,7 +1168,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_progress struct */
req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
@@ -1203,7 +1202,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_progress struct */
archive_mask = req_capsule_client_get(&req->rq_pill,
@@ -1278,7 +1277,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
ptlrpc_request_set_replen(req);
@@ -1395,7 +1394,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_request struct */
req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
@@ -1952,7 +1951,7 @@ static void lustre_swab_hal(struct hsm_action_list *h)
__swab32s(&h->hal_count);
__swab32s(&h->hal_archive_id);
__swab64s(&h->hal_flags);
- hai = hai_zero(h);
+ hai = hai_first(h);
for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
lustre_swab_hai(hai);
}
@@ -2249,7 +2248,7 @@ static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
* recovery, non zero value will be return if the lock can be canceled,
* or zero returned for not
*/
-static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
+static int mdc_cancel_weight(struct ldlm_lock *lock)
{
if (lock->l_resource->lr_type != LDLM_IBITS)
return 0;
@@ -2314,12 +2313,14 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
return -ENOMEM;
mdc_init_rpc_lock(cli->cl_rpc_lock);
- ptlrpcd_addref();
+ rc = ptlrpcd_addref();
+ if (rc < 0)
+ goto err_rpc_lock;
cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
if (!cli->cl_close_lock) {
rc = -ENOMEM;
- goto err_rpc_lock;
+ goto err_ptlrpcd_decref;
}
mdc_init_rpc_lock(cli->cl_close_lock);
@@ -2331,7 +2332,7 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
sptlrpc_lprocfs_cliobd_attach(obd);
ptlrpc_lprocfs_register_obd(obd);
- ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
+ ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
obd->obd_namespace->ns_lvbo = &inode_lvbo;
@@ -2345,9 +2346,10 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
err_close_lock:
kfree(cli->cl_close_lock);
+err_ptlrpcd_decref:
+ ptlrpcd_decref();
err_rpc_lock:
kfree(cli->cl_rpc_lock);
- ptlrpcd_decref();
return rc;
}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 3924b095b..2311a437c 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -502,8 +502,12 @@ static void do_requeue(struct config_llog_data *cld)
*/
down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
+ int rc;
+
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
- mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
+ rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
+ if (rc && rc != -ENOENT)
+ CERROR("failed processing log: %d\n", rc);
} else {
CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
cld->cld_logname);
@@ -734,7 +738,9 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
struct task_struct *task;
int rc;
- ptlrpcd_addref();
+ rc = ptlrpcd_addref();
+ if (rc < 0)
+ goto err_noref;
rc = client_obd_setup(obd, lcfg);
if (rc)
@@ -773,6 +779,7 @@ err_cleanup:
client_obd_cleanup(obd);
err_decref:
ptlrpcd_decref();
+err_noref:
return rc;
}
@@ -1720,7 +1727,6 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
CERROR("Unknown command: %d\n", lcfg->lcfg_command);
rc = -EINVAL;
goto out;
-
}
}
out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index f5128b4f1..583fb5f33 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -36,6 +36,7 @@
* Client IO.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -132,6 +133,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
case CIT_WRITE:
break;
case CIT_FAULT:
+ break;
case CIT_FSYNC:
LASSERT(!io->ci_need_restart);
break;
@@ -159,7 +161,6 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
io->ci_type = iot;
INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
- INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
INIT_LIST_HEAD(&io->ci_lockset.cls_done);
INIT_LIST_HEAD(&io->ci_layers);
@@ -241,37 +242,7 @@ static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
const struct cl_lock_descr *d1)
{
return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
- lu_object_fid(&d1->cld_obj->co_lu)) ?:
- __diff_normalize(d0->cld_start, d1->cld_start);
-}
-
-static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- int ret;
-
- ret = lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
- lu_object_fid(&d1->cld_obj->co_lu));
- if (ret)
- return ret;
- if (d0->cld_end < d1->cld_start)
- return -1;
- if (d0->cld_start > d0->cld_end)
- return 1;
- return 0;
-}
-
-static void cl_lock_descr_merge(struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- d0->cld_start = min(d0->cld_start, d1->cld_start);
- d0->cld_end = max(d0->cld_end, d1->cld_end);
-
- if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
- d0->cld_mode = CLM_WRITE;
-
- if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
- d0->cld_mode = CLM_GROUP;
+ lu_object_fid(&d1->cld_obj->co_lu));
}
/*
@@ -320,33 +291,35 @@ static void cl_io_locks_sort(struct cl_io *io)
} while (!done);
}
-/**
- * Check whether \a queue contains locks matching \a need.
- *
- * \retval +ve there is a matching lock in the \a queue
- * \retval 0 there are no matching locks in the \a queue
- */
-int cl_queue_match(const struct list_head *queue,
- const struct cl_lock_descr *need)
+static void cl_lock_descr_merge(struct cl_lock_descr *d0,
+ const struct cl_lock_descr *d1)
{
- struct cl_io_lock_link *scan;
+ d0->cld_start = min(d0->cld_start, d1->cld_start);
+ d0->cld_end = max(d0->cld_end, d1->cld_end);
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_match(&scan->cill_descr, need))
- return 1;
- }
- return 0;
+ if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
+ d0->cld_mode = CLM_WRITE;
+
+ if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
+ d0->cld_mode = CLM_GROUP;
}
-EXPORT_SYMBOL(cl_queue_match);
-static int cl_queue_merge(const struct list_head *queue,
- const struct cl_lock_descr *need)
+static int cl_lockset_merge(const struct cl_lockset *set,
+ const struct cl_lock_descr *need)
{
struct cl_io_lock_link *scan;
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_cmp(&scan->cill_descr, need))
+ list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
+ if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
continue;
+
+ /* Merge locks for the same object because ldlm lock server
+ * may expand the lock extent, otherwise there is a deadlock
+ * case if two conflicted locks are queueud for the same object
+ * and lock server expands one lock to overlap the another.
+ * The side effect is that it can generate a multi-stripe lock
+ * that may cause casacading problem
+ */
cl_lock_descr_merge(&scan->cill_descr, need);
CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
@@ -356,87 +329,20 @@ static int cl_queue_merge(const struct list_head *queue,
return 0;
}
-static int cl_lockset_match(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_match(&set->cls_curr, need) ||
- cl_queue_match(&set->cls_done, need);
-}
-
-static int cl_lockset_merge(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_merge(&set->cls_todo, need) ||
- cl_lockset_match(set, need);
-}
-
-static int cl_lockset_lock_one(const struct lu_env *env,
- struct cl_io *io, struct cl_lockset *set,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock;
- int result;
-
- lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
-
- if (!IS_ERR(lock)) {
- link->cill_lock = lock;
- list_move(&link->cill_linkage, &set->cls_curr);
- if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
- result = cl_wait(env, lock);
- if (result == 0)
- list_move(&link->cill_linkage, &set->cls_done);
- } else
- result = 0;
- } else
- result = PTR_ERR(lock);
- return result;
-}
-
-static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock = link->cill_lock;
-
- list_del_init(&link->cill_linkage);
- if (lock) {
- cl_lock_release(env, lock, "io", io);
- link->cill_lock = NULL;
- }
- if (link->cill_fini)
- link->cill_fini(env, link);
-}
-
static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
struct cl_lockset *set)
{
struct cl_io_lock_link *link;
struct cl_io_lock_link *temp;
- struct cl_lock *lock;
int result;
result = 0;
list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
- if (!cl_lockset_match(set, &link->cill_descr)) {
- /* XXX some locking to guarantee that locks aren't
- * expanded in between.
- */
- result = cl_lockset_lock_one(env, io, set, link);
- if (result != 0)
- break;
- } else
- cl_lock_link_fini(env, io, link);
- }
- if (result == 0) {
- list_for_each_entry_safe(link, temp,
- &set->cls_curr, cill_linkage) {
- lock = link->cill_lock;
- result = cl_wait(env, lock);
- if (result == 0)
- list_move(&link->cill_linkage, &set->cls_done);
- else
- break;
- }
+ result = cl_lock_request(env, io, &link->cill_lock);
+ if (result < 0)
+ break;
+
+ list_move(&link->cill_linkage, &set->cls_done);
}
return result;
}
@@ -492,16 +398,19 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
set = &io->ci_lockset;
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
- cl_lock_link_fini(env, io, link);
-
- list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
- cl_lock_link_fini(env, io, link);
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+ list_del_init(&link->cill_linkage);
+ if (link->cill_fini)
+ link->cill_fini(env, link);
+ }
list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
- cl_unuse(env, link->cill_lock);
- cl_lock_link_fini(env, io, link);
+ list_del_init(&link->cill_linkage);
+ cl_lock_release(env, &link->cill_lock);
+ if (link->cill_fini)
+ link->cill_fini(env, link);
}
+
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_unlock)
scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
@@ -595,9 +504,9 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
{
int result;
- if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
+ if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) {
result = 1;
- else {
+ } else {
list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
result = 0;
}
@@ -627,8 +536,9 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
result = cl_io_lock_add(env, io, link);
if (result) /* lock match */
link->cill_fini(env, link);
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -692,42 +602,6 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
}
/**
- * True iff \a page is within \a io range.
- */
-static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
-{
- int result = 1;
- loff_t start;
- loff_t end;
- pgoff_t idx;
-
- idx = page->cp_index;
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- /*
- * check that [start, end) and [pos, pos + count) extents
- * overlap.
- */
- if (!cl_io_is_append(io)) {
- const struct cl_io_rw_common *crw = &(io->u.ci_rw);
-
- start = cl_offset(page->cp_obj, idx);
- end = cl_offset(page->cp_obj, idx + 1);
- result = crw->crw_pos < end &&
- start < crw->crw_pos + crw->crw_count;
- }
- break;
- case CIT_FAULT:
- result = io->u.ci_fault.ft_index == idx;
- break;
- default:
- LBUG();
- }
- return result;
-}
-
-/**
* Called by read io, when page has to be read from the server.
*
* \see cl_io_operations::cio_read_page()
@@ -742,7 +616,6 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
LINVRNT(cl_page_is_owned(page, io));
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_page_in_io(page, io));
LINVRNT(cl_io_invariant(io));
queue = &io->ci_queue;
@@ -769,7 +642,7 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
break;
}
}
- if (result == 0)
+ if (result == 0 && queue->c2_qin.pl_nr > 0)
result = cl_io_submit_rw(env, io, CRT_READ, queue);
/*
* Unlock unsent pages in case of error.
@@ -781,77 +654,29 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
EXPORT_SYMBOL(cl_io_read_page);
/**
- * Called by write io to prepare page to receive data from user buffer.
+ * Commit a list of contiguous pages into writeback cache.
*
- * \see cl_io_operations::cio_prepare_write()
+ * \returns 0 if all pages committed, or errcode if error occurred.
+ * \see cl_io_operations::cio_commit_async()
*/
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to)
+int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb)
{
const struct cl_io_slice *scan;
int result = 0;
- LINVRNT(io->ci_type == CIT_WRITE);
- LINVRNT(cl_page_is_owned(page, io));
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- LASSERT(cl_page_in_io(page, io));
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->cio_prepare_write) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- result = scan->cis_iop->cio_prepare_write(env, scan,
- slice,
- from, to);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_io_prepare_write);
-
-/**
- * Called by write io after user data were copied into a page.
- *
- * \see cl_io_operations::cio_commit_write()
- */
-int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(io->ci_type == CIT_WRITE);
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- /*
- * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
- * already called cl_page_cache_add(), moving page into CPS_CACHED
- * state. Better (and more general) way of dealing with such situation
- * is needed.
- */
- LASSERT(cl_page_is_owned(page, io) || page->cp_parent);
- LASSERT(cl_page_in_io(page, io));
-
cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_commit_write) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- result = scan->cis_iop->cio_commit_write(env, scan,
- slice,
- from, to);
- if (result != 0)
- break;
- }
+ if (!scan->cis_iop->cio_commit_async)
+ continue;
+ result = scan->cis_iop->cio_commit_async(env, scan, queue,
+ from, to, cb);
+ if (result != 0)
+ break;
}
- LINVRNT(result <= 0);
return result;
}
-EXPORT_SYMBOL(cl_io_commit_write);
+EXPORT_SYMBOL(cl_io_commit_async);
/**
* Submits a list of pages for immediate io.
@@ -869,13 +694,10 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
const struct cl_io_slice *scan;
int result = 0;
- LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
-
cl_io_for_each(scan, io) {
- if (!scan->cis_iop->req_op[crt].cio_submit)
+ if (!scan->cis_iop->cio_submit)
continue;
- result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
- queue);
+ result = scan->cis_iop->cio_submit(env, scan, crt, queue);
if (result != 0)
break;
}
@@ -887,6 +709,9 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
}
EXPORT_SYMBOL(cl_io_submit_rw);
+static void cl_page_list_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+
/**
* Submit a sync_io and wait for the IO to be finished, or error happens.
* If \a timeout is zero, it means to wait for the IO unconditionally.
@@ -904,7 +729,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
pg->cp_sync_io = anchor;
}
- cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
+ cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
rc = cl_io_submit_rw(env, io, iot, queue);
if (rc == 0) {
/*
@@ -915,12 +740,12 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
*/
cl_page_list_for_each(pg, &queue->c2_qin) {
pg->cp_sync_io = NULL;
- cl_sync_io_note(anchor, 1);
+ cl_sync_io_note(env, anchor, 1);
}
/* wait for the IO to be finished. */
- rc = cl_sync_io_wait(env, io, &queue->c2_qout,
- anchor, timeout);
+ rc = cl_sync_io_wait(env, anchor, timeout);
+ cl_page_list_assume(env, io, &queue->c2_qout);
} else {
LASSERT(list_empty(&queue->c2_qout.pl_pages));
cl_page_list_for_each(pg, &queue->c2_qin)
@@ -931,26 +756,6 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
EXPORT_SYMBOL(cl_io_submit_sync);
/**
- * Cancel an IO which has been submitted by cl_io_submit_rw.
- */
-static int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue)
-{
- struct cl_page *page;
- int result = 0;
-
- CERROR("Canceling ongoing page transmission\n");
- cl_page_list_for_each(page, queue) {
- int rc;
-
- LINVRNT(cl_page_in_io(page, io));
- rc = cl_page_cancel(env, page);
- result = result ?: rc;
- }
- return result;
-}
-
-/**
* Main io loop.
*
* Pumps io through iterations calling
@@ -1072,8 +877,8 @@ EXPORT_SYMBOL(cl_page_list_add);
/**
* Removes a page from a page list.
*/
-static void cl_page_list_del(const struct lu_env *env,
- struct cl_page_list *plist, struct cl_page *page)
+void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
+ struct cl_page *page)
{
LASSERT(plist->pl_nr > 0);
LINVRNT(plist->pl_owner == current);
@@ -1086,6 +891,7 @@ static void cl_page_list_del(const struct lu_env *env,
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
}
+EXPORT_SYMBOL(cl_page_list_del);
/**
* Moves a page from one page list to another.
@@ -1106,6 +912,24 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
EXPORT_SYMBOL(cl_page_list_move);
/**
+ * Moves a page from one page list to the head of another list.
+ */
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page)
+{
+ LASSERT(src->pl_nr > 0);
+ LINVRNT(dst->pl_owner == current);
+ LINVRNT(src->pl_owner == current);
+
+ list_move(&page->cp_batch, &dst->pl_pages);
+ --src->pl_nr;
+ ++dst->pl_nr;
+ lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ src, dst);
+}
+EXPORT_SYMBOL(cl_page_list_move_head);
+
+/**
* splice the cl_page_list, just as list head does
*/
void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
@@ -1162,8 +986,7 @@ EXPORT_SYMBOL(cl_page_list_disown);
/**
* Releases pages from queue.
*/
-static void cl_page_list_fini(const struct lu_env *env,
- struct cl_page_list *plist)
+void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
{
struct cl_page *page;
struct cl_page *temp;
@@ -1174,6 +997,7 @@ static void cl_page_list_fini(const struct lu_env *env,
cl_page_list_del(env, plist, page);
LASSERT(plist->pl_nr == 0);
}
+EXPORT_SYMBOL(cl_page_list_fini);
/**
* Assumes all pages in a queue.
@@ -1260,7 +1084,7 @@ EXPORT_SYMBOL(cl_2queue_init_page);
/**
* Returns top-level io.
*
- * \see cl_object_top(), cl_page_top().
+ * \see cl_object_top()
*/
struct cl_io *cl_io_top(struct cl_io *io)
{
@@ -1323,19 +1147,14 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
int result;
result = 0;
- page = cl_page_top(page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
- if (dev->cd_ops->cdo_req_init) {
- result = dev->cd_ops->cdo_req_init(env,
- dev, req);
- if (result != 0)
- break;
- }
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
+ if (dev->cd_ops->cdo_req_init) {
+ result = dev->cd_ops->cdo_req_init(env, dev, req);
+ if (result != 0)
+ break;
}
- page = page->cp_child;
- } while (page && result == 0);
+ }
return result;
}
@@ -1384,14 +1203,16 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
if (req->crq_o) {
req->crq_nrobjs = nr_objects;
result = cl_req_init(env, req, page);
- } else
+ } else {
result = -ENOMEM;
+ }
if (result != 0) {
cl_req_completion(env, req, result);
req = ERR_PTR(result);
}
- } else
+ } else {
req = ERR_PTR(-ENOMEM);
+ }
return req;
}
EXPORT_SYMBOL(cl_req_alloc);
@@ -1406,8 +1227,6 @@ void cl_req_page_add(const struct lu_env *env,
struct cl_req_obj *rqo;
int i;
- page = cl_page_top(page);
-
LASSERT(list_empty(&page->cp_flight));
LASSERT(!page->cp_req);
@@ -1438,8 +1257,6 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
{
struct cl_req *req = page->cp_req;
- page = cl_page_top(page);
-
LASSERT(!list_empty(&page->cp_flight));
LASSERT(req->crq_nrpages > 0);
@@ -1511,25 +1328,39 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
}
EXPORT_SYMBOL(cl_req_attr_set);
+/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
+ * wait for the IO to finish.
+ */
+void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
+{
+ wake_up_all(&anchor->csi_waitq);
+
+ /* it's safe to nuke or reuse anchor now */
+ atomic_set(&anchor->csi_barrier, 0);
+}
+EXPORT_SYMBOL(cl_sync_io_end);
/**
- * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
+ * Initialize synchronous io wait anchor
*/
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
+void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
+ void (*end)(const struct lu_env *, struct cl_sync_io *))
{
+ memset(anchor, 0, sizeof(*anchor));
init_waitqueue_head(&anchor->csi_waitq);
- atomic_set(&anchor->csi_sync_nr, nrpages);
- atomic_set(&anchor->csi_barrier, nrpages > 0);
+ atomic_set(&anchor->csi_sync_nr, nr);
+ atomic_set(&anchor->csi_barrier, nr > 0);
anchor->csi_sync_rc = 0;
+ anchor->csi_end_io = end;
+ LASSERT(end);
}
EXPORT_SYMBOL(cl_sync_io_init);
/**
- * Wait until all transfer completes. Transfer completion routine has to call
- * cl_sync_io_note() for every page.
+ * Wait until all IO completes. Transfer completion routine has to call
+ * cl_sync_io_note() for every entity.
*/
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout)
{
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
@@ -1542,11 +1373,9 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
if (rc < 0) {
- CERROR("SYNC IO failed with error: %d, try to cancel %d remaining pages\n",
+ CERROR("IO failed: %d, still wait for %d remaining entries\n",
rc, atomic_read(&anchor->csi_sync_nr));
- (void)cl_io_cancel(env, io, queue);
-
lwi = (struct l_wait_info) { 0 };
(void)l_wait_event(anchor->csi_waitq,
atomic_read(&anchor->csi_sync_nr) == 0,
@@ -1555,14 +1384,12 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
rc = anchor->csi_sync_rc;
}
LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
- cl_page_list_assume(env, io, queue);
/* wait until cl_sync_io_note() has done wakeup */
while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
cpu_relax();
}
- POISON(anchor, 0x5a, sizeof(*anchor));
return rc;
}
EXPORT_SYMBOL(cl_sync_io_wait);
@@ -1570,7 +1397,8 @@ EXPORT_SYMBOL(cl_sync_io_wait);
/**
* Indicate that transfer of a single page completed.
*/
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret)
{
if (anchor->csi_sync_rc == 0 && ioret < 0)
anchor->csi_sync_rc = ioret;
@@ -1581,9 +1409,9 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
*/
LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
- wake_up_all(&anchor->csi_waitq);
- /* it's safe to nuke or reuse anchor now */
- atomic_set(&anchor->csi_barrier, 0);
+ LASSERT(anchor->csi_end_io);
+ anchor->csi_end_io(env, anchor);
+ /* Can't access anchor any more */
}
}
EXPORT_SYMBOL(cl_sync_io_note);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index aec644eb4..26a576b63 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -36,6 +36,7 @@
* Client Extent Lock.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -47,138 +48,18 @@
#include "../include/cl_object.h"
#include "cl_internal.h"
-/** Lock class of cl_lock::cll_guard */
-static struct lock_class_key cl_lock_guard_class;
-static struct kmem_cache *cl_lock_kmem;
-
-static struct lu_kmem_descr cl_lock_caches[] = {
- {
- .ckd_cache = &cl_lock_kmem,
- .ckd_name = "cl_lock_kmem",
- .ckd_size = sizeof (struct cl_lock)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-#define CS_LOCK_INC(o, item)
-#define CS_LOCK_DEC(o, item)
-#define CS_LOCKSTATE_INC(o, state)
-#define CS_LOCKSTATE_DEC(o, state)
-
-/**
- * Basic lock invariant that is maintained at all times. Caller either has a
- * reference to \a lock, or somehow assures that \a lock cannot be freed.
- *
- * \see cl_lock_invariant()
- */
-static int cl_lock_invariant_trusted(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
- atomic_read(&lock->cll_ref) >= lock->cll_holds &&
- lock->cll_holds >= lock->cll_users &&
- lock->cll_holds >= 0 &&
- lock->cll_users >= 0 &&
- lock->cll_depth >= 0;
-}
-
-/**
- * Stronger lock invariant, checking that caller has a reference on a lock.
- *
- * \see cl_lock_invariant_trusted()
- */
-static int cl_lock_invariant(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- int result;
-
- result = atomic_read(&lock->cll_ref) > 0 &&
- cl_lock_invariant_trusted(env, lock);
- if (!result && env)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
- return result;
-}
-
-/**
- * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
- */
-static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
-{
- return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
-}
-
-/**
- * Returns a set of counters for this lock, depending on a lock nesting.
- */
-static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- struct cl_thread_info *info;
- enum clt_nesting_level nesting;
-
- info = cl_env_info(env);
- nesting = cl_lock_nesting(lock);
- LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
- return &info->clt_counters[nesting];
-}
-
static void cl_lock_trace0(int level, const struct lu_env *env,
const char *prefix, const struct cl_lock *lock,
const char *func, const int line)
{
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
- CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
- prefix, lock, atomic_read(&lock->cll_ref),
- lock->cll_guarder, lock->cll_depth,
- lock->cll_state, lock->cll_error, lock->cll_holds,
- lock->cll_users, lock->cll_flags,
- env, h->coh_nesting, cl_lock_nr_mutexed(env),
- func, line);
+ CDEBUG(level, "%s: %p (%p/%d) at %s():%d\n",
+ prefix, lock, env, h->coh_nesting, func, line);
}
-
-#define cl_lock_trace(level, env, prefix, lock) \
+#define cl_lock_trace(level, env, prefix, lock) \
cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
-#define RETIP ((unsigned long)__builtin_return_address(0))
-
-#ifdef CONFIG_LOCKDEP
-static struct lock_class_key cl_lock_key;
-
-static void cl_lock_lockdep_init(struct cl_lock *lock)
-{
- lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
-}
-
-static void cl_lock_lockdep_acquire(const struct lu_env *env,
- struct cl_lock *lock, __u32 enqflags)
-{
- cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
- lock_map_acquire(&lock->dep_map);
-}
-
-static void cl_lock_lockdep_release(const struct lu_env *env,
- struct cl_lock *lock)
-{
- cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
- lock_release(&lock->dep_map, 0, RETIP);
-}
-
-#else /* !CONFIG_LOCKDEP */
-
-static void cl_lock_lockdep_init(struct cl_lock *lock)
-{}
-static void cl_lock_lockdep_acquire(const struct lu_env *env,
- struct cl_lock *lock, __u32 enqflags)
-{}
-static void cl_lock_lockdep_release(const struct lu_env *env,
- struct cl_lock *lock)
-{}
-
-#endif /* !CONFIG_LOCKDEP */
-
/**
* Adds lock slice to the compound lock.
*
@@ -199,62 +80,10 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
}
EXPORT_SYMBOL(cl_lock_slice_add);
-/**
- * Returns true iff a lock with the mode \a has provides at least the same
- * guarantees as a lock with the mode \a need.
- */
-int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
-{
- LINVRNT(need == CLM_READ || need == CLM_WRITE ||
- need == CLM_PHANTOM || need == CLM_GROUP);
- LINVRNT(has == CLM_READ || has == CLM_WRITE ||
- has == CLM_PHANTOM || has == CLM_GROUP);
- CLASSERT(CLM_PHANTOM < CLM_READ);
- CLASSERT(CLM_READ < CLM_WRITE);
- CLASSERT(CLM_WRITE < CLM_GROUP);
-
- if (has != CLM_GROUP)
- return need <= has;
- else
- return need == has;
-}
-EXPORT_SYMBOL(cl_lock_mode_match);
-
-/**
- * Returns true iff extent portions of lock descriptions match.
- */
-int cl_lock_ext_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need)
-{
- return
- has->cld_start <= need->cld_start &&
- has->cld_end >= need->cld_end &&
- cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
- (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
-}
-EXPORT_SYMBOL(cl_lock_ext_match);
-
-/**
- * Returns true iff a lock with the description \a has provides at least the
- * same guarantees as a lock with the description \a need.
- */
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need)
+void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock)
{
- return
- cl_object_same(has->cld_obj, need->cld_obj) &&
- cl_lock_ext_match(has, need);
-}
-EXPORT_SYMBOL(cl_lock_descr_match);
+ cl_lock_trace(D_DLMTRACE, env, "destroy lock", lock);
-static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object *obj = lock->cll_descr.cld_obj;
-
- LINVRNT(!cl_lock_is_mutexed(lock));
-
- cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
- might_sleep();
while (!list_empty(&lock->cll_layers)) {
struct cl_lock_slice *slice;
@@ -263,350 +92,36 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
- CS_LOCK_DEC(obj, total);
- CS_LOCKSTATE_DEC(obj, lock->cll_state);
- lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
- cl_object_put(env, obj);
- lu_ref_fini(&lock->cll_reference);
- lu_ref_fini(&lock->cll_holders);
- mutex_destroy(&lock->cll_guard);
- kmem_cache_free(cl_lock_kmem, lock);
-}
-
-/**
- * Releases a reference on a lock.
- *
- * When last reference is released, lock is returned to the cache, unless it
- * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
- * immediately.
- *
- * \see cl_object_put(), cl_page_put()
- */
-void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object *obj;
-
- LINVRNT(cl_lock_invariant(env, lock));
- obj = lock->cll_descr.cld_obj;
- LINVRNT(obj);
-
- CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
-
- if (atomic_dec_and_test(&lock->cll_ref)) {
- if (lock->cll_state == CLS_FREEING) {
- LASSERT(list_empty(&lock->cll_linkage));
- cl_lock_free(env, lock);
- }
- CS_LOCK_DEC(obj, busy);
- }
-}
-EXPORT_SYMBOL(cl_lock_put);
-
-/**
- * Acquires an additional reference to a lock.
- *
- * This can be called only by caller already possessing a reference to \a
- * lock.
- *
- * \see cl_object_get(), cl_page_get()
- */
-void cl_lock_get(struct cl_lock *lock)
-{
- LINVRNT(cl_lock_invariant(NULL, lock));
- CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- atomic_inc(&lock->cll_ref);
-}
-EXPORT_SYMBOL(cl_lock_get);
-
-/**
- * Acquires a reference to a lock.
- *
- * This is much like cl_lock_get(), except that this function can be used to
- * acquire initial reference to the cached lock. Caller has to deal with all
- * possible races. Use with care!
- *
- * \see cl_page_get_trust()
- */
-void cl_lock_get_trust(struct cl_lock *lock)
-{
- CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- if (atomic_inc_return(&lock->cll_ref) == 1)
- CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
-}
-EXPORT_SYMBOL(cl_lock_get_trust);
-
-/**
- * Helper function destroying the lock that wasn't completely initialized.
- *
- * Other threads can acquire references to the top-lock through its
- * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
- */
-static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_mutex_get(env, lock);
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
-}
-
-static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *descr)
-{
- struct cl_lock *lock;
- struct lu_object_header *head;
-
- lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS);
- if (lock) {
- atomic_set(&lock->cll_ref, 1);
- lock->cll_descr = *descr;
- lock->cll_state = CLS_NEW;
- cl_object_get(obj);
- lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
- lock);
- INIT_LIST_HEAD(&lock->cll_layers);
- INIT_LIST_HEAD(&lock->cll_linkage);
- INIT_LIST_HEAD(&lock->cll_inclosure);
- lu_ref_init(&lock->cll_reference);
- lu_ref_init(&lock->cll_holders);
- mutex_init(&lock->cll_guard);
- lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
- init_waitqueue_head(&lock->cll_wq);
- head = obj->co_lu.lo_header;
- CS_LOCKSTATE_INC(obj, CLS_NEW);
- CS_LOCK_INC(obj, total);
- CS_LOCK_INC(obj, create);
- cl_lock_lockdep_init(lock);
- list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
- int err;
-
- err = obj->co_ops->coo_lock_init(env, obj, lock, io);
- if (err != 0) {
- cl_lock_finish(env, lock);
- lock = ERR_PTR(err);
- break;
- }
- }
- } else
- lock = ERR_PTR(-ENOMEM);
- return lock;
-}
-
-/**
- * Transfer the lock into INTRANSIT state and return the original state.
- *
- * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
- * \post state: CLS_INTRANSIT
- * \see CLS_INTRANSIT
- */
-static enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
- struct cl_lock *lock)
-{
- enum cl_lock_state state = lock->cll_state;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(state != CLS_INTRANSIT);
- LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
- "Malformed lock state %d.\n", state);
-
- cl_lock_state_set(env, lock, CLS_INTRANSIT);
- lock->cll_intransit_owner = current;
- cl_lock_hold_add(env, lock, "intransit", current);
- return state;
-}
-
-/**
- * Exit the intransit state and restore the lock state to the original state
- */
-static void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(state != CLS_INTRANSIT);
- LASSERT(lock->cll_intransit_owner == current);
-
- lock->cll_intransit_owner = NULL;
- cl_lock_state_set(env, lock, state);
- cl_lock_unhold(env, lock, "intransit", current);
-}
-
-/**
- * Checking whether the lock is intransit state
- */
-int cl_lock_is_intransit(struct cl_lock *lock)
-{
- LASSERT(cl_lock_is_mutexed(lock));
- return lock->cll_state == CLS_INTRANSIT &&
- lock->cll_intransit_owner != current;
-}
-EXPORT_SYMBOL(cl_lock_is_intransit);
-/**
- * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
- * truncate and O_APPEND cannot be reused for read/non-append-write, as they
- * cover multiple stripes and can trigger cascading timeouts.
- */
-static int cl_lock_fits_into(const struct lu_env *env,
- const struct cl_lock *lock,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_fits_into &&
- !slice->cls_ops->clo_fits_into(env, slice, need, io))
- return 0;
- }
- return 1;
+ POISON(lock, 0x5a, sizeof(*lock));
}
+EXPORT_SYMBOL(cl_lock_fini);
-static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *need)
+int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_io *io)
{
- struct cl_lock *lock;
- struct cl_object_header *head;
-
- head = cl_object_header(obj);
- assert_spin_locked(&head->coh_lock_guard);
- CS_LOCK_INC(obj, lookup);
- list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
- int matched;
-
- matched = cl_lock_ext_match(&lock->cll_descr, need) &&
- lock->cll_state < CLS_FREEING &&
- lock->cll_error == 0 &&
- !(lock->cll_flags & CLF_CANCELLED) &&
- cl_lock_fits_into(env, lock, need, io);
- CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
- PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
- matched);
- if (matched) {
- cl_lock_get_trust(lock);
- CS_LOCK_INC(obj, hit);
- return lock;
- }
- }
- return NULL;
-}
-
-/**
- * Returns a lock matching description \a need.
- *
- * This is the main entry point into the cl_lock caching interface. First, a
- * cache (implemented as a per-object linked list) is consulted. If lock is
- * found there, it is returned immediately. Otherwise new lock is allocated
- * and returned. In any case, additional reference to lock is acquired.
- *
- * \see cl_object_find(), cl_page_find()
- */
-static struct cl_lock *cl_lock_find(const struct lu_env *env,
- const struct cl_io *io,
- const struct cl_lock_descr *need)
-{
- struct cl_object_header *head;
- struct cl_object *obj;
- struct cl_lock *lock;
-
- obj = need->cld_obj;
- head = cl_object_header(obj);
-
- spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
+ struct cl_object *obj = lock->cll_descr.cld_obj;
+ struct cl_object *scan;
+ int result = 0;
- if (!lock) {
- lock = cl_lock_alloc(env, obj, io, need);
- if (!IS_ERR(lock)) {
- struct cl_lock *ghost;
+ /* Make sure cl_lock::cll_descr is initialized. */
+ LASSERT(obj);
- spin_lock(&head->coh_lock_guard);
- ghost = cl_lock_lookup(env, obj, io, need);
- if (!ghost) {
- cl_lock_get_trust(lock);
- list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
- spin_unlock(&head->coh_lock_guard);
- CS_LOCK_INC(obj, busy);
- } else {
- spin_unlock(&head->coh_lock_guard);
- /*
- * Other threads can acquire references to the
- * top-lock through its sub-locks. Hence, it
- * cannot be cl_lock_free()-ed immediately.
- */
- cl_lock_finish(env, lock);
- lock = ghost;
- }
+ INIT_LIST_HEAD(&lock->cll_layers);
+ list_for_each_entry(scan, &obj->co_lu.lo_header->loh_layers,
+ co_lu.lo_linkage) {
+ result = scan->co_ops->coo_lock_init(env, scan, lock, io);
+ if (result != 0) {
+ cl_lock_fini(env, lock);
+ break;
}
}
- return lock;
-}
-/**
- * Returns existing lock matching given description. This is similar to
- * cl_lock_find() except that no new lock is created, and returned lock is
- * guaranteed to be in enum cl_lock_state::CLS_HELD state.
- */
-struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_object_header *head;
- struct cl_object *obj;
- struct cl_lock *lock;
-
- obj = need->cld_obj;
- head = cl_object_header(obj);
-
- do {
- spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
- if (!lock)
- return NULL;
-
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state == CLS_INTRANSIT)
- /* Don't care return value. */
- cl_lock_state_wait(env, lock);
- if (lock->cll_state == CLS_FREEING) {
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- lock = NULL;
- }
- } while (!lock);
-
- cl_lock_hold_add(env, lock, scope, source);
- cl_lock_user_add(env, lock);
- if (lock->cll_state == CLS_CACHED)
- cl_use_try(env, lock, 1);
- if (lock->cll_state == CLS_HELD) {
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env, lock, 0);
- cl_lock_put(env, lock);
- } else {
- cl_unuse_try(env, lock);
- cl_lock_unhold(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- lock = NULL;
- }
-
- return lock;
+ return result;
}
-EXPORT_SYMBOL(cl_lock_peek);
+EXPORT_SYMBOL(cl_lock_init);
/**
- * Returns a slice within a lock, corresponding to the given layer in the
+ * Returns a slice with a lock, corresponding to the given layer in the
* device stack.
*
* \see cl_page_at()
@@ -616,8 +131,6 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
{
const struct cl_lock_slice *slice;
- LINVRNT(cl_lock_invariant_trusted(NULL, lock));
-
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
return slice;
@@ -626,1537 +139,96 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
}
EXPORT_SYMBOL(cl_lock_at);
-static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_counters *counters;
-
- counters = cl_lock_counters(env, lock);
- lock->cll_depth++;
- counters->ctc_nr_locks_locked++;
- lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
- cl_lock_trace(D_TRACE, env, "got mutex", lock);
-}
-
-/**
- * Locks cl_lock object.
- *
- * This is used to manipulate cl_lock fields, and to serialize state
- * transitions in the lock state machine.
- *
- * \post cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_mutex_put()
- */
-void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_guarder == current) {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_depth > 0);
- } else {
- struct cl_object_header *hdr;
- struct cl_thread_info *info;
- int i;
-
- LINVRNT(lock->cll_guarder != current);
- hdr = cl_object_header(lock->cll_descr.cld_obj);
- /*
- * Check that mutices are taken in the bottom-to-top order.
- */
- info = cl_env_info(env);
- for (i = 0; i < hdr->coh_nesting; ++i)
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
- lock->cll_guarder = current;
- LINVRNT(lock->cll_depth == 0);
- }
- cl_lock_mutex_tail(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_mutex_get);
-
-/**
- * Try-locks cl_lock object.
- *
- * \retval 0 \a lock was successfully locked
- *
- * \retval -EBUSY \a lock cannot be locked right now
- *
- * \post ergo(result == 0, cl_lock_is_mutexed(lock))
- *
- * \see cl_lock_mutex_get()
- */
-static int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
-
- result = 0;
- if (lock->cll_guarder == current) {
- LINVRNT(lock->cll_depth > 0);
- cl_lock_mutex_tail(env, lock);
- } else if (mutex_trylock(&lock->cll_guard)) {
- LINVRNT(lock->cll_depth == 0);
- lock->cll_guarder = current;
- cl_lock_mutex_tail(env, lock);
- } else
- result = -EBUSY;
- return result;
-}
-
-/**
- {* Unlocks cl_lock object.
- *
- * \pre cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_mutex_get()
- */
-void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_counters *counters;
-
- LINVRNT(cl_lock_invariant(env, lock));
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_guarder == current);
- LINVRNT(lock->cll_depth > 0);
-
- counters = cl_lock_counters(env, lock);
- LINVRNT(counters->ctc_nr_locks_locked > 0);
-
- cl_lock_trace(D_TRACE, env, "put mutex", lock);
- lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
- counters->ctc_nr_locks_locked--;
- if (--lock->cll_depth == 0) {
- lock->cll_guarder = NULL;
- mutex_unlock(&lock->cll_guard);
- }
-}
-EXPORT_SYMBOL(cl_lock_mutex_put);
-
-/**
- * Returns true iff lock's mutex is owned by the current thread.
- */
-int cl_lock_is_mutexed(struct cl_lock *lock)
-{
- return lock->cll_guarder == current;
-}
-EXPORT_SYMBOL(cl_lock_is_mutexed);
-
-/**
- * Returns number of cl_lock mutices held by the current thread (environment).
- */
-int cl_lock_nr_mutexed(const struct lu_env *env)
-{
- struct cl_thread_info *info;
- int i;
- int locked;
-
- /*
- * NOTE: if summation across all nesting levels (currently 2) proves
- * too expensive, a summary counter can be added to
- * struct cl_thread_info.
- */
- info = cl_env_info(env);
- for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- locked += info->clt_counters[i].ctc_nr_locks_locked;
- return locked;
-}
-EXPORT_SYMBOL(cl_lock_nr_mutexed);
-
-static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- if (!(lock->cll_flags & CLF_CANCELLED)) {
- const struct cl_lock_slice *slice;
-
- lock->cll_flags |= CLF_CANCELLED;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_cancel)
- slice->cls_ops->clo_cancel(env, slice);
- }
- }
-}
-
-static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object_header *head;
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_state < CLS_FREEING) {
- bool in_cache;
-
- LASSERT(lock->cll_state != CLS_INTRANSIT);
- cl_lock_state_set(env, lock, CLS_FREEING);
-
- head = cl_object_header(lock->cll_descr.cld_obj);
-
- spin_lock(&head->coh_lock_guard);
- in_cache = !list_empty(&lock->cll_linkage);
- if (in_cache)
- list_del_init(&lock->cll_linkage);
- spin_unlock(&head->coh_lock_guard);
-
- if (in_cache) /* coh_locks cache holds a refcount. */
- cl_lock_put(env, lock);
-
- /*
- * From now on, no new references to this lock can be acquired
- * by cl_lock_lookup().
- */
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_delete)
- slice->cls_ops->clo_delete(env, slice);
- }
- /*
- * From now on, no new references to this lock can be acquired
- * by layer-specific means (like a pointer from struct
- * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
- * lov).
- *
- * Lock will be finally freed in cl_lock_put() when last of
- * existing references goes away.
- */
- }
-}
-
-/**
- * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
- * top-lock (nesting == 0) accounts for this modification in the per-thread
- * debugging counters. Sub-lock holds can be released by a thread different
- * from one that acquired it.
- */
-static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
- int delta)
-{
- struct cl_thread_counters *counters;
- enum clt_nesting_level nesting;
-
- lock->cll_holds += delta;
- nesting = cl_lock_nesting(lock);
- if (nesting == CNL_TOP) {
- counters = &cl_env_info(env)->clt_counters[CNL_TOP];
- counters->ctc_nr_held += delta;
- LASSERT(counters->ctc_nr_held >= 0);
- }
-}
-
-/**
- * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
- * cl_lock_hold_mod() for the explanation of the debugging code.
- */
-static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
- int delta)
-{
- struct cl_thread_counters *counters;
- enum clt_nesting_level nesting;
-
- lock->cll_users += delta;
- nesting = cl_lock_nesting(lock);
- if (nesting == CNL_TOP) {
- counters = &cl_env_info(env)->clt_counters[CNL_TOP];
- counters->ctc_nr_used += delta;
- LASSERT(counters->ctc_nr_used >= 0);
- }
-}
-
-void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_holds > 0);
-
- cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
- lu_ref_del(&lock->cll_holders, scope, source);
- cl_lock_hold_mod(env, lock, -1);
- if (lock->cll_holds == 0) {
- CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
- if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
- lock->cll_descr.cld_mode == CLM_GROUP ||
- lock->cll_state != CLS_CACHED)
- /*
- * If lock is still phantom or grouplock when user is
- * done with it---destroy the lock.
- */
- lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
- if (lock->cll_flags & CLF_CANCELPEND) {
- lock->cll_flags &= ~CLF_CANCELPEND;
- cl_lock_cancel0(env, lock);
- }
- if (lock->cll_flags & CLF_DOOMED) {
- /* no longer doomed: it's dead... Jim. */
- lock->cll_flags &= ~CLF_DOOMED;
- cl_lock_delete0(env, lock);
- }
- }
-}
-EXPORT_SYMBOL(cl_lock_hold_release);
-
-/**
- * Waits until lock state is changed.
- *
- * This function is called with cl_lock mutex locked, atomically releases
- * mutex and goes to sleep, waiting for a lock state change (signaled by
- * cl_lock_signal()), and re-acquires the mutex before return.
- *
- * This function is used to wait until lock state machine makes some progress
- * and to emulate synchronous operations on top of asynchronous lock
- * interface.
- *
- * \retval -EINTR wait was interrupted
- *
- * \retval 0 wait wasn't interrupted
- *
- * \pre cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_signal()
- */
-int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
-{
- wait_queue_t waiter;
- sigset_t blocked;
- int result;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_depth == 1);
- LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
-
- cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
- result = lock->cll_error;
- if (result == 0) {
- /* To avoid being interrupted by the 'non-fatal' signals
- * (SIGCHLD, for instance), we'd block them temporarily.
- * LU-305
- */
- blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&lock->cll_wq, &waiter);
- set_current_state(TASK_INTERRUPTIBLE);
- cl_lock_mutex_put(env, lock);
-
- LASSERT(cl_lock_nr_mutexed(env) == 0);
-
- /* Returning ERESTARTSYS instead of EINTR so syscalls
- * can be restarted if signals are pending here
- */
- result = -ERESTARTSYS;
- if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
- schedule();
- if (!cfs_signal_pending())
- result = 0;
- }
-
- cl_lock_mutex_get(env, lock);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&lock->cll_wq, &waiter);
-
- /* Restore old blocked signals */
- cfs_restore_sigs(blocked);
- }
- return result;
-}
-EXPORT_SYMBOL(cl_lock_state_wait);
-
-static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
- if (slice->cls_ops->clo_state)
- slice->cls_ops->clo_state(env, slice, state);
- wake_up_all(&lock->cll_wq);
-}
-
-/**
- * Notifies waiters that lock state changed.
- *
- * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
- * layers about state change by calling cl_lock_operations::clo_state()
- * top-to-bottom.
- */
-void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
- cl_lock_state_signal(env, lock, lock->cll_state);
-}
-EXPORT_SYMBOL(cl_lock_signal);
-
-/**
- * Changes lock state.
- *
- * This function is invoked to notify layers that lock state changed, possible
- * as a result of an asynchronous event such as call-back reception.
- *
- * \post lock->cll_state == state
- *
- * \see cl_lock_operations::clo_state()
- */
-void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- LASSERT(lock->cll_state <= state ||
- (lock->cll_state == CLS_CACHED &&
- (state == CLS_HELD || /* lock found in cache */
- state == CLS_NEW || /* sub-lock canceled */
- state == CLS_INTRANSIT)) ||
- /* lock is in transit state */
- lock->cll_state == CLS_INTRANSIT);
-
- if (lock->cll_state != state) {
- CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
- CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
-
- cl_lock_state_signal(env, lock, state);
- lock->cll_state = state;
- }
-}
-EXPORT_SYMBOL(cl_lock_state_set);
-
-static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- do {
- result = 0;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
-
- result = -ENOSYS;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_unuse) {
- result = slice->cls_ops->clo_unuse(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- } while (result == CLO_REPEAT);
-
- return result;
-}
-
-/**
- * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
- * cl_lock_operations::clo_use() top-to-bottom to notify layers.
- * @atomic = 1, it must unuse the lock to recovery the lock to keep the
- * use process atomic
- */
-int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
+void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
{
const struct cl_lock_slice *slice;
- int result;
- enum cl_lock_state state;
-
- cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
-
- LASSERT(lock->cll_state == CLS_CACHED);
- if (lock->cll_error)
- return lock->cll_error;
-
- result = -ENOSYS;
- state = cl_lock_intransit(env, lock);
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_use) {
- result = slice->cls_ops->clo_use(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
-
- LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
- lock->cll_state);
-
- if (result == 0) {
- state = CLS_HELD;
- } else {
- if (result == -ESTALE) {
- /*
- * ESTALE means sublock being cancelled
- * at this time, and set lock state to
- * be NEW here and ask the caller to repeat.
- */
- state = CLS_NEW;
- result = CLO_REPEAT;
- }
-
- /* @atomic means back-off-on-failure. */
- if (atomic) {
- int rc;
-
- rc = cl_unuse_try_internal(env, lock);
- /* Vet the results. */
- if (rc < 0 && result > 0)
- result = rc;
- }
+ cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_cancel)
+ slice->cls_ops->clo_cancel(env, slice);
}
- cl_lock_extransit(env, lock, state);
- return result;
}
-EXPORT_SYMBOL(cl_use_try);
+EXPORT_SYMBOL(cl_lock_cancel);
/**
- * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
- * top-to-bottom.
+ * Enqueue a lock.
+ * \param anchor: if we need to wait for resources before getting the lock,
+ * use @anchor for the purpose.
+ * \retval 0 enqueue successfully
+ * \retval <0 error code
*/
-static int cl_enqueue_kick(const struct lu_env *env,
- struct cl_lock *lock,
- struct cl_io *io, __u32 flags)
+int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock, struct cl_sync_io *anchor)
{
- int result;
const struct cl_lock_slice *slice;
+ int rc = -ENOSYS;
- result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_enqueue) {
- result = slice->cls_ops->clo_enqueue(env,
- slice, io, flags);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- return result;
-}
-
-/**
- * Tries to enqueue a lock.
- *
- * This function is called repeatedly by cl_enqueue() until either lock is
- * enqueued, or error occurs. This function does not block waiting for
- * networking communication to complete.
- *
- * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
- * lock->cll_state == CLS_HELD)
- *
- * \see cl_enqueue() cl_lock_operations::clo_enqueue()
- * \see cl_lock_state::CLS_ENQUEUED
- */
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags)
-{
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
- do {
- LINVRNT(cl_lock_is_mutexed(lock));
-
- result = lock->cll_error;
- if (result != 0)
- break;
-
- switch (lock->cll_state) {
- case CLS_NEW:
- cl_lock_state_set(env, lock, CLS_QUEUING);
- /* fall-through */
- case CLS_QUEUING:
- /* kick layers. */
- result = cl_enqueue_kick(env, lock, io, flags);
- /* For AGL case, the cl_lock::cll_state may
- * become CLS_HELD already.
- */
- if (result == 0 && lock->cll_state == CLS_QUEUING)
- cl_lock_state_set(env, lock, CLS_ENQUEUED);
- break;
- case CLS_INTRANSIT:
- LASSERT(cl_lock_is_intransit(lock));
- result = CLO_WAIT;
- break;
- case CLS_CACHED:
- /* yank lock from the cache. */
- result = cl_use_try(env, lock, 0);
- break;
- case CLS_ENQUEUED:
- case CLS_HELD:
- result = 0;
- break;
- default:
- case CLS_FREEING:
- /*
- * impossible, only held locks with increased
- * ->cll_holds can be enqueued, and they cannot be
- * freed.
- */
- LBUG();
- }
- } while (result == CLO_REPEAT);
- return result;
-}
-EXPORT_SYMBOL(cl_enqueue_try);
-
-/**
- * Cancel the conflicting lock found during previous enqueue.
- *
- * \retval 0 conflicting lock has been canceled.
- * \retval -ve error code.
- */
-int cl_lock_enqueue_wait(const struct lu_env *env,
- struct cl_lock *lock,
- int keep_mutex)
-{
- struct cl_lock *conflict;
- int rc = 0;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_QUEUING);
- LASSERT(lock->cll_conflict);
-
- conflict = lock->cll_conflict;
- lock->cll_conflict = NULL;
+ if (!slice->cls_ops->clo_enqueue)
+ continue;
- cl_lock_mutex_put(env, lock);
- LASSERT(cl_lock_nr_mutexed(env) == 0);
-
- cl_lock_mutex_get(env, conflict);
- cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
- cl_lock_cancel(env, conflict);
- cl_lock_delete(env, conflict);
-
- while (conflict->cll_state != CLS_FREEING) {
- rc = cl_lock_state_wait(env, conflict);
+ rc = slice->cls_ops->clo_enqueue(env, slice, io, anchor);
if (rc != 0)
break;
- }
- cl_lock_mutex_put(env, conflict);
- lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
- cl_lock_put(env, conflict);
-
- if (keep_mutex)
- cl_lock_mutex_get(env, lock);
-
- LASSERT(rc <= 0);
- return rc;
-}
-EXPORT_SYMBOL(cl_lock_enqueue_wait);
-
-static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 enqflags)
-{
- int result;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_holds > 0);
-
- cl_lock_user_add(env, lock);
- do {
- result = cl_enqueue_try(env, lock, io, enqflags);
- if (result == CLO_WAIT) {
- if (lock->cll_conflict)
- result = cl_lock_enqueue_wait(env, lock, 1);
- else
- result = cl_lock_state_wait(env, lock);
- if (result == 0)
- continue;
- }
- break;
- } while (1);
- if (result != 0)
- cl_unuse_try(env, lock);
- LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
- lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD));
- return result;
-}
-
-/**
- * Tries to unlock a lock.
- *
- * This function is called to release underlying resource:
- * 1. for top lock, the resource is sublocks it held;
- * 2. for sublock, the resource is the reference to dlmlock.
- *
- * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \see cl_unuse() cl_lock_operations::clo_unuse()
- * \see cl_lock_state::CLS_CACHED
- */
-int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
- enum cl_lock_state state = CLS_NEW;
-
- cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
-
- if (lock->cll_users > 1) {
- cl_lock_user_del(env, lock);
- return 0;
- }
-
- /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
- * underlying resources.
- */
- if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
- cl_lock_user_del(env, lock);
- return 0;
- }
-
- /*
- * New lock users (->cll_users) are not protecting unlocking
- * from proceeding. From this point, lock eventually reaches
- * CLS_CACHED, is reinitialized to CLS_NEW or fails into
- * CLS_FREEING.
- */
- state = cl_lock_intransit(env, lock);
-
- result = cl_unuse_try_internal(env, lock);
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(result != CLO_WAIT);
- cl_lock_user_del(env, lock);
- if (result == 0 || result == -ESTALE) {
- /*
- * Return lock back to the cache. This is the only
- * place where lock is moved into CLS_CACHED state.
- *
- * If one of ->clo_unuse() methods returned -ESTALE, lock
- * cannot be placed into cache and has to be
- * re-initialized. This happens e.g., when a sub-lock was
- * canceled while unlocking was in progress.
- */
- if (state == CLS_HELD && result == 0)
- state = CLS_CACHED;
- else
- state = CLS_NEW;
- cl_lock_extransit(env, lock, state);
-
- /*
- * Hide -ESTALE error.
- * If the lock is a glimpse lock, and it has multiple
- * stripes. Assuming that one of its sublock returned -ENAVAIL,
- * and other sublocks are matched write locks. In this case,
- * we can't set this lock to error because otherwise some of
- * its sublocks may not be canceled. This causes some dirty
- * pages won't be written to OSTs. -jay
- */
- result = 0;
- } else {
- CERROR("result = %d, this is unlikely!\n", result);
- state = CLS_NEW;
- cl_lock_extransit(env, lock, state);
- }
- return result ?: lock->cll_error;
-}
-EXPORT_SYMBOL(cl_unuse_try);
-
-static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- result = cl_unuse_try(env, lock);
- if (result)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
-}
-
-/**
- * Unlocks a lock.
- */
-void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_mutex_get(env, lock);
- cl_unuse_locked(env, lock);
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_release(env, lock);
-}
-EXPORT_SYMBOL(cl_unuse);
-
-/**
- * Tries to wait for a lock.
- *
- * This function is called repeatedly by cl_wait() until either lock is
- * granted, or error occurs. This function does not block waiting for network
- * communication to complete.
- *
- * \see cl_wait() cl_lock_operations::clo_wait()
- * \see cl_lock_state::CLS_HELD
- */
-int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
- do {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERTF(lock->cll_state == CLS_QUEUING ||
- lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD ||
- lock->cll_state == CLS_INTRANSIT,
- "lock state: %d\n", lock->cll_state);
- LASSERT(lock->cll_users > 0);
- LASSERT(lock->cll_holds > 0);
-
- result = lock->cll_error;
- if (result != 0)
- break;
-
- if (cl_lock_is_intransit(lock)) {
- result = CLO_WAIT;
- break;
- }
-
- if (lock->cll_state == CLS_HELD)
- /* nothing to do */
- break;
-
- result = -ENOSYS;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_wait) {
- result = slice->cls_ops->clo_wait(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- if (result == 0) {
- LASSERT(lock->cll_state != CLS_INTRANSIT);
- cl_lock_state_set(env, lock, CLS_HELD);
- }
- } while (result == CLO_REPEAT);
- return result;
-}
-EXPORT_SYMBOL(cl_wait_try);
-
-/**
- * Waits until enqueued lock is granted.
- *
- * \pre current thread or io owns a hold on the lock
- * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
- * lock->cll_state == CLS_HELD)
- *
- * \post ergo(result == 0, lock->cll_state == CLS_HELD)
- */
-int cl_wait(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- cl_lock_mutex_get(env, lock);
-
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
- "Wrong state %d\n", lock->cll_state);
- LASSERT(lock->cll_holds > 0);
-
- do {
- result = cl_wait_try(env, lock);
- if (result == CLO_WAIT) {
- result = cl_lock_state_wait(env, lock);
- if (result == 0)
- continue;
- }
- break;
- } while (1);
- if (result < 0) {
- cl_unuse_try(env, lock);
- cl_lock_lockdep_release(env, lock);
- }
- cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
- cl_lock_mutex_put(env, lock);
- LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
- return result;
-}
-EXPORT_SYMBOL(cl_wait);
-
-/**
- * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
- * value.
- */
-unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- unsigned long pound;
- unsigned long ounce;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- pound = 0;
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_weigh) {
- ounce = slice->cls_ops->clo_weigh(env, slice);
- pound += ounce;
- if (pound < ounce) /* over-weight^Wflow */
- pound = ~0UL;
- }
- }
- return pound;
-}
-EXPORT_SYMBOL(cl_lock_weigh);
-
-/**
- * Notifies layers that lock description changed.
- *
- * The server can grant client a lock different from one that was requested
- * (e.g., larger in extent). This method is called when actually granted lock
- * description becomes known to let layers to accommodate for changed lock
- * description.
- *
- * \see cl_lock_operations::clo_modify()
- */
-int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc)
-{
- const struct cl_lock_slice *slice;
- struct cl_object *obj = lock->cll_descr.cld_obj;
- struct cl_object_header *hdr = cl_object_header(obj);
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
- /* don't allow object to change */
- LASSERT(obj == desc->cld_obj);
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_modify) {
- result = slice->cls_ops->clo_modify(env, slice, desc);
- if (result != 0)
- return result;
- }
- }
- CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
- PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
- /*
- * Just replace description in place. Nothing more is needed for
- * now. If locks were indexed according to their extent and/or mode,
- * that index would have to be updated here.
- */
- spin_lock(&hdr->coh_lock_guard);
- lock->cll_descr = *desc;
- spin_unlock(&hdr->coh_lock_guard);
- return 0;
-}
-EXPORT_SYMBOL(cl_lock_modify);
-
-/**
- * Initializes lock closure with a given origin.
- *
- * \see cl_lock_closure
- */
-void cl_lock_closure_init(const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait)
-{
- LINVRNT(cl_lock_is_mutexed(origin));
- LINVRNT(cl_lock_invariant(env, origin));
-
- INIT_LIST_HEAD(&closure->clc_list);
- closure->clc_origin = origin;
- closure->clc_wait = wait;
- closure->clc_nr = 0;
-}
-EXPORT_SYMBOL(cl_lock_closure_init);
-
-/**
- * Builds a closure of \a lock.
- *
- * Building of a closure consists of adding initial lock (\a lock) into it,
- * and calling cl_lock_operations::clo_closure() methods of \a lock. These
- * methods might call cl_lock_closure_build() recursively again, adding more
- * locks to the closure, etc.
- *
- * \see cl_lock_closure
- */
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
- LINVRNT(cl_lock_invariant(env, closure->clc_origin));
-
- result = cl_lock_enclosure(env, lock, closure);
- if (result == 0) {
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_closure) {
- result = slice->cls_ops->clo_closure(env, slice,
- closure);
- if (result != 0)
- break;
- }
- }
- }
- if (result != 0)
- cl_lock_disclosure(env, closure);
- return result;
-}
-EXPORT_SYMBOL(cl_lock_closure_build);
-
-/**
- * Adds new lock to a closure.
- *
- * Try-locks \a lock and if succeeded, adds it to the closure (never more than
- * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
- * until next try-lock is likely to succeed.
- */
-int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure)
-{
- int result = 0;
-
- cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
- if (!cl_lock_mutex_try(env, lock)) {
- /*
- * If lock->cll_inclosure is not empty, lock is already in
- * this closure.
- */
- if (list_empty(&lock->cll_inclosure)) {
- cl_lock_get_trust(lock);
- lu_ref_add(&lock->cll_reference, "closure", closure);
- list_add(&lock->cll_inclosure, &closure->clc_list);
- closure->clc_nr++;
- } else
- cl_lock_mutex_put(env, lock);
- result = 0;
- } else {
- cl_lock_disclosure(env, closure);
- if (closure->clc_wait) {
- cl_lock_get_trust(lock);
- lu_ref_add(&lock->cll_reference, "closure-w", closure);
- cl_lock_mutex_put(env, closure->clc_origin);
-
- LASSERT(cl_lock_nr_mutexed(env) == 0);
- cl_lock_mutex_get(env, lock);
- cl_lock_mutex_put(env, lock);
-
- cl_lock_mutex_get(env, closure->clc_origin);
- lu_ref_del(&lock->cll_reference, "closure-w", closure);
- cl_lock_put(env, lock);
- }
- result = CLO_REPEAT;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_lock_enclosure);
-
-/** Releases mutices of enclosed locks. */
-void cl_lock_disclosure(const struct lu_env *env,
- struct cl_lock_closure *closure)
-{
- struct cl_lock *scan;
- struct cl_lock *temp;
-
- cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
- list_for_each_entry_safe(scan, temp, &closure->clc_list,
- cll_inclosure) {
- list_del_init(&scan->cll_inclosure);
- cl_lock_mutex_put(env, scan);
- lu_ref_del(&scan->cll_reference, "closure", closure);
- cl_lock_put(env, scan);
- closure->clc_nr--;
- }
- LASSERT(closure->clc_nr == 0);
-}
-EXPORT_SYMBOL(cl_lock_disclosure);
-
-/** Finalizes a closure. */
-void cl_lock_closure_fini(struct cl_lock_closure *closure)
-{
- LASSERT(closure->clc_nr == 0);
- LASSERT(list_empty(&closure->clc_list));
-}
-EXPORT_SYMBOL(cl_lock_closure_fini);
-
-/**
- * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
- * destroyed, then destroy the lock. If there are holds on the lock, postpone
- * destruction until all holds are released. This is called when a decision is
- * made to destroy the lock in the future. E.g., when a blocking AST is
- * received on it, or fatal communication error happens.
- *
- * Caller must have a reference on this lock to prevent a situation, when
- * deleted lock lingers in memory for indefinite time, because nobody calls
- * cl_lock_put() to finish it.
- *
- * \pre atomic_read(&lock->cll_ref) > 0
- * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
- * cl_lock_nr_mutexed(env) == 1)
- * [i.e., if a top-lock is deleted, mutices of no other locks can be
- * held, as deletion of sub-locks might require releasing a top-lock
- * mutex]
- *
- * \see cl_lock_operations::clo_delete()
- * \see cl_lock::cll_holds
- */
-void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
- cl_lock_nr_mutexed(env) == 1));
-
- cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
- if (lock->cll_holds == 0)
- cl_lock_delete0(env, lock);
- else
- lock->cll_flags |= CLF_DOOMED;
-}
-EXPORT_SYMBOL(cl_lock_delete);
-
-/**
- * Mark lock as irrecoverably failed, and mark it for destruction. This
- * happens when, e.g., server fails to grant a lock to us, or networking
- * time-out happens.
- *
- * \pre atomic_read(&lock->cll_ref) > 0
- *
- * \see clo_lock_delete()
- * \see cl_lock::cll_holds
- */
-void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_error == 0 && error != 0) {
- cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
- lock->cll_error = error;
- cl_lock_signal(env, lock);
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
-}
-EXPORT_SYMBOL(cl_lock_error);
-
-/**
- * Cancels this lock. Notifies layers
- * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
- * there are holds on the lock, postpone cancellation until
- * all holds are released.
- *
- * Cancellation notification is delivered to layers at most once.
- *
- * \see cl_lock_operations::clo_cancel()
- * \see cl_lock::cll_holds
- */
-void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
- if (lock->cll_holds == 0)
- cl_lock_cancel0(env, lock);
- else
- lock->cll_flags |= CLF_CANCELPEND;
-}
-EXPORT_SYMBOL(cl_lock_cancel);
-
-/**
- * Finds an existing lock covering given index and optionally different from a
- * given \a except lock.
- */
-struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
- struct cl_object *obj, pgoff_t index,
- struct cl_lock *except,
- int pending, int canceld)
-{
- struct cl_object_header *head;
- struct cl_lock *scan;
- struct cl_lock *lock;
- struct cl_lock_descr *need;
-
- head = cl_object_header(obj);
- need = &cl_env_info(env)->clt_descr;
- lock = NULL;
-
- need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
- * not PHANTOM
- */
- need->cld_start = need->cld_end = index;
- need->cld_enq_flags = 0;
-
- spin_lock(&head->coh_lock_guard);
- /* It is fine to match any group lock since there could be only one
- * with a uniq gid and it conflicts with all other lock modes too
- */
- list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
- if (scan != except &&
- (scan->cll_descr.cld_mode == CLM_GROUP ||
- cl_lock_ext_match(&scan->cll_descr, need)) &&
- scan->cll_state >= CLS_HELD &&
- scan->cll_state < CLS_FREEING &&
- /*
- * This check is racy as the lock can be canceled right
- * after it is done, but this is fine, because page exists
- * already.
- */
- (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
- (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
- /* Don't increase cs_hit here since this
- * is just a helper function.
- */
- cl_lock_get_trust(scan);
- lock = scan;
- break;
}
- }
- spin_unlock(&head->coh_lock_guard);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_at_pgoff);
-
-/**
- * Calculate the page offset at the layer of @lock.
- * At the time of this writing, @page is top page and @lock is sub lock.
- */
-static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
-{
- struct lu_device_type *dtype;
- const struct cl_page_slice *slice;
-
- dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
- slice = cl_page_at(page, dtype);
- return slice->cpl_page->cp_index;
+ return rc;
}
+EXPORT_SYMBOL(cl_lock_enqueue);
/**
- * Check if page @page is covered by an extra lock or discard it.
+ * Main high-level entry point of cl_lock interface that finds existing or
+ * enqueues new lock matching given description.
*/
-static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
+int cl_lock_request(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock)
{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
- pgoff_t index = pgoff_at_lock(page, lock);
+ struct cl_sync_io *anchor = NULL;
+ __u32 enq_flags = lock->cll_descr.cld_enq_flags;
+ int rc;
- if (index >= info->clt_fn_index) {
- struct cl_lock *tmp;
+ rc = cl_lock_init(env, lock, io);
+ if (rc < 0)
+ return rc;
- /* refresh non-overlapped index */
- tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
- lock, 1, 0);
- if (tmp) {
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, clt_fn_index). This
- * is safe because if tmp lock is canceled, it will
- * discard these pages.
- */
- info->clt_fn_index = tmp->cll_descr.cld_end + 1;
- if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
- info->clt_fn_index = CL_PAGE_EOF;
- cl_lock_put(env, tmp);
- } else if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
+ if ((enq_flags & CEF_ASYNC) && !(enq_flags & CEF_AGL)) {
+ anchor = &cl_env_info(env)->clt_anchor;
+ cl_sync_io_init(anchor, 1, cl_sync_io_end);
}
- info->clt_next_index = index + 1;
- return CLP_GANG_OKAY;
-}
+ rc = cl_lock_enqueue(env, io, lock, anchor);
-static int discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
+ if (anchor) {
+ int rc2;
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageWriteback(cl_page_vmpage(env, page))));
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageDirty(cl_page_vmpage(env, page))));
-
- info->clt_next_index = pgoff_at_lock(page, lock) + 1;
- if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
+ /* drop the reference count held at initialization time */
+ cl_sync_io_note(env, anchor, 0);
+ rc2 = cl_sync_io_wait(env, anchor, 0);
+ if (rc2 < 0 && rc == 0)
+ rc = rc2;
}
- return CLP_GANG_OKAY;
-}
+ if (rc < 0)
+ cl_lock_release(env, lock);
-/**
- * Discard pages protected by the given lock. This function traverses radix
- * tree to find all covering pages and discard them. If a page is being covered
- * by other locks, it should remain in cache.
- *
- * If error happens on any step, the process continues anyway (the reasoning
- * behind this being that lock cancellation cannot be delayed indefinitely).
- */
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_io *io = &info->clt_io;
- struct cl_lock_descr *descr = &lock->cll_descr;
- cl_page_gang_cb_t cb;
- int res;
- int result;
-
- LINVRNT(cl_lock_invariant(env, lock));
-
- io->ci_obj = cl_object_top(descr->cld_obj);
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result != 0)
- goto out;
-
- cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->clt_fn_index = info->clt_next_index = descr->cld_start;
- do {
- res = cl_page_gang_lookup(env, descr->cld_obj, io,
- info->clt_next_index, descr->cld_end,
- cb, (void *)lock);
- if (info->clt_next_index > descr->cld_end)
- break;
-
- if (res == CLP_GANG_RESCHED)
- cond_resched();
- } while (res != CLP_GANG_OKAY);
-out:
- cl_io_fini(env, io);
- return result;
-}
-EXPORT_SYMBOL(cl_lock_discard_pages);
-
-/**
- * Eliminate all locks for a given object.
- *
- * Caller has to guarantee that no lock is in active use.
- *
- * \param cancel when this is set, cl_locks_prune() cancels locks before
- * destroying.
- */
-void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
-{
- struct cl_object_header *head;
- struct cl_lock *lock;
-
- head = cl_object_header(obj);
- /*
- * If locks are destroyed without cancellation, all pages must be
- * already destroyed (as otherwise they will be left unprotected).
- */
- LASSERT(ergo(!cancel,
- !head->coh_tree.rnode && head->coh_pages == 0));
-
- spin_lock(&head->coh_lock_guard);
- while (!list_empty(&head->coh_locks)) {
- lock = container_of(head->coh_locks.next,
- struct cl_lock, cll_linkage);
- cl_lock_get_trust(lock);
- spin_unlock(&head->coh_lock_guard);
- lu_ref_add(&lock->cll_reference, "prune", current);
-
-again:
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING) {
- LASSERT(lock->cll_users <= 1);
- if (unlikely(lock->cll_users == 1)) {
- struct l_wait_info lwi = { 0 };
-
- cl_lock_mutex_put(env, lock);
- l_wait_event(lock->cll_wq,
- lock->cll_users == 0,
- &lwi);
- goto again;
- }
-
- if (cancel)
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, "prune", current);
- cl_lock_put(env, lock);
- spin_lock(&head->coh_lock_guard);
- }
- spin_unlock(&head->coh_lock_guard);
-}
-EXPORT_SYMBOL(cl_locks_prune);
-
-static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
- const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
-
- while (1) {
- lock = cl_lock_find(env, io, need);
- if (IS_ERR(lock))
- break;
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING &&
- !(lock->cll_flags & CLF_CANCELLED)) {
- cl_lock_hold_mod(env, lock, 1);
- lu_ref_add(&lock->cll_holders, scope, source);
- lu_ref_add(&lock->cll_reference, scope, source);
- break;
- }
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- }
- return lock;
-}
-
-/**
- * Returns a lock matching \a need description with a reference and a hold on
- * it.
- *
- * This is much like cl_lock_find(), except that cl_lock_hold() additionally
- * guarantees that lock is not in the CLS_FREEING state on return.
- */
-struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
-
- lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (!IS_ERR(lock))
- cl_lock_mutex_put(env, lock);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_hold);
-
-/**
- * Main high-level entry point of cl_lock interface that finds existing or
- * enqueues new lock matching given description.
- */
-struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
- int rc;
- __u32 enqflags = need->cld_enq_flags;
-
- do {
- lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (IS_ERR(lock))
- break;
-
- rc = cl_enqueue_locked(env, lock, io, enqflags);
- if (rc == 0) {
- if (cl_lock_fits_into(env, lock, need, io)) {
- if (!(enqflags & CEF_AGL)) {
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env, lock,
- enqflags);
- break;
- }
- rc = 1;
- }
- cl_unuse_locked(env, lock);
- }
- cl_lock_trace(D_DLMTRACE, env,
- rc <= 0 ? "enqueue failed" : "agl succeed", lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
- if (rc > 0) {
- LASSERT(enqflags & CEF_AGL);
- lock = NULL;
- } else if (rc != 0) {
- lock = ERR_PTR(rc);
- }
- } while (rc == 0);
- return lock;
+ return rc;
}
EXPORT_SYMBOL(cl_lock_request);
/**
- * Adds a hold to a known lock.
- */
-void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state != CLS_FREEING);
-
- cl_lock_hold_mod(env, lock, 1);
- cl_lock_get(lock);
- lu_ref_add(&lock->cll_holders, scope, source);
- lu_ref_add(&lock->cll_reference, scope, source);
-}
-EXPORT_SYMBOL(cl_lock_hold_add);
-
-/**
- * Releases a hold and a reference on a lock, on which caller acquired a
- * mutex.
- */
-void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_invariant(env, lock));
- cl_lock_hold_release(env, lock, scope, source);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_unhold);
-
-/**
* Releases a hold and a reference on a lock, obtained by cl_lock_hold().
*/
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock)
{
- LINVRNT(cl_lock_invariant(env, lock));
cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
- cl_lock_mutex_get(env, lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
+ cl_lock_cancel(env, lock);
+ cl_lock_fini(env, lock);
}
EXPORT_SYMBOL(cl_lock_release);
-void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- cl_lock_used_mod(env, lock, 1);
-}
-EXPORT_SYMBOL(cl_lock_user_add);
-
-void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_users > 0);
-
- cl_lock_used_mod(env, lock, -1);
- if (lock->cll_users == 0)
- wake_up_all(&lock->cll_wq);
-}
-EXPORT_SYMBOL(cl_lock_user_del);
-
const char *cl_lock_mode_name(const enum cl_lock_mode mode)
{
static const char *names[] = {
- [CLM_PHANTOM] = "P",
[CLM_READ] = "R",
[CLM_WRITE] = "W",
[CLM_GROUP] = "G"
@@ -2189,10 +261,8 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_lock *lock)
{
const struct cl_lock_slice *slice;
- (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
- lock, atomic_read(&lock->cll_ref),
- lock->cll_state, lock->cll_error, lock->cll_holds,
- lock->cll_users, lock->cll_flags);
+
+ (*printer)(env, cookie, "lock@%p", lock);
cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
(*printer)(env, cookie, " {\n");
@@ -2207,13 +277,3 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
(*printer)(env, cookie, "} lock@%p\n", lock);
}
EXPORT_SYMBOL(cl_lock_print);
-
-int cl_lock_init(void)
-{
- return lu_kmem_init(cl_lock_caches);
-}
-
-void cl_lock_fini(void)
-{
- lu_kmem_fini(cl_lock_caches);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 43e299d4d..5940f3031 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -36,6 +36,7 @@
* Client Lustre Object.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
/*
@@ -43,8 +44,6 @@
*
* i_mutex
* PG_locked
- * ->coh_page_guard
- * ->coh_lock_guard
* ->coh_attr_guard
* ->ls_guard
*/
@@ -63,10 +62,6 @@
static struct kmem_cache *cl_env_kmem;
-/** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
-/** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
static struct lock_class_key cl_attr_guard_class;
@@ -81,17 +76,9 @@ int cl_object_header_init(struct cl_object_header *h)
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
- spin_lock_init(&h->coh_page_guard);
- spin_lock_init(&h->coh_lock_guard);
spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
- lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- h->coh_pages = 0;
- /* XXX hard coded GFP_* mask. */
- INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
- INIT_LIST_HEAD(&h->coh_locks);
- h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
+ h->coh_page_bufsize = 0;
}
return result;
}
@@ -145,7 +132,7 @@ EXPORT_SYMBOL(cl_object_get);
/**
* Returns the top-object for a given \a o.
*
- * \see cl_page_top(), cl_io_top()
+ * \see cl_io_top()
*/
struct cl_object *cl_object_top(struct cl_object *o)
{
@@ -315,6 +302,29 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
EXPORT_SYMBOL(cl_conf_set);
/**
+ * Prunes caches of pages and locks for this object.
+ */
+int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *top;
+ struct cl_object *o;
+ int result;
+
+ top = obj->co_lu.lo_header;
+ result = 0;
+ list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
+ if (o->co_ops->coo_prune) {
+ result = o->co_ops->coo_prune(env, o);
+ if (result != 0)
+ break;
+ }
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(cl_object_prune);
+
+/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
@@ -323,34 +333,12 @@ EXPORT_SYMBOL(cl_conf_set);
*/
void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
{
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(obj);
- LASSERT(!hdr->coh_tree.rnode);
- LASSERT(hdr->coh_pages == 0);
+ struct cl_object_header *hdr = cl_object_header(obj);
set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
- /*
- * Destroy all locks. Object destruction (including cl_inode_fini())
- * cannot cancel the locks, because in the case of a local client,
- * where client and server share the same thread running
- * prune_icache(), this can dead-lock with ldlm_cancel_handler()
- * waiting on __wait_on_freeing_inode().
- */
- cl_locks_prune(env, obj, 0);
}
EXPORT_SYMBOL(cl_object_kill);
-/**
- * Prunes caches of pages and locks for this object.
- */
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
-{
- cl_pages_prune(env, obj);
- cl_locks_prune(env, obj, 1);
-}
-EXPORT_SYMBOL(cl_object_prune);
-
void cache_stats_init(struct cache_stats *cs, const char *name)
{
int i;
@@ -383,6 +371,8 @@ static int cache_stats_print(const struct cache_stats *cs,
return 0;
}
+static void cl_env_percpu_refill(void);
+
/**
* Initialize client site.
*
@@ -397,11 +387,9 @@ int cl_site_init(struct cl_site *s, struct cl_device *d)
result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
if (result == 0) {
cache_stats_init(&s->cs_pages, "pages");
- cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
atomic_set(&s->cs_pages_state[0], 0);
- for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- atomic_set(&s->cs_locks_state[i], 0);
+ cl_env_percpu_refill();
}
return result;
}
@@ -435,15 +423,6 @@ int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
[CPS_PAGEIN] = "r",
[CPS_FREEING] = "f"
};
- static const char *lstate[] = {
- [CLS_NEW] = "n",
- [CLS_QUEUING] = "q",
- [CLS_ENQUEUED] = "e",
- [CLS_HELD] = "h",
- [CLS_INTRANSIT] = "t",
- [CLS_CACHED] = "c",
- [CLS_FREEING] = "f"
- };
/*
lookup hit total busy create
pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
@@ -457,12 +436,6 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
seq_printf(m, "%s: %u ", pstate[i],
atomic_read(&site->cs_pages_state[i]));
seq_printf(m, "]\n");
- cache_stats_print(&site->cs_locks, m, 0);
- seq_printf(m, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
- seq_printf(m, "%s: %u ", lstate[i],
- atomic_read(&site->cs_locks_state[i]));
- seq_printf(m, "]\n");
cache_stats_print(&cl_env_stats, m, 0);
seq_printf(m, "\n");
return 0;
@@ -492,6 +465,13 @@ EXPORT_SYMBOL(cl_site_stats_print);
* bz20044, bz22683.
*/
+static LIST_HEAD(cl_envs);
+static unsigned int cl_envs_cached_nr;
+static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
+ * for now.
+ */
+static DEFINE_SPINLOCK(cl_envs_guard);
+
struct cl_env {
void *ce_magic;
struct lu_env ce_lu;
@@ -674,8 +654,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
lu_context_enter(&cle->ce_ses);
env->le_ses = &cle->ce_ses;
cl_env_init0(cle, debug);
- } else
+ } else {
lu_env_fini(env);
+ }
}
if (rc != 0) {
kmem_cache_free(cl_env_kmem, cle);
@@ -684,8 +665,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
CL_ENV_INC(create);
CL_ENV_INC(total);
}
- } else
+ } else {
env = ERR_PTR(-ENOMEM);
+ }
return env;
}
@@ -697,6 +679,39 @@ static void cl_env_fini(struct cl_env *cle)
kmem_cache_free(cl_env_kmem, cle);
}
+static struct lu_env *cl_env_obtain(void *debug)
+{
+ struct cl_env *cle;
+ struct lu_env *env;
+
+ spin_lock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+ if (cl_envs_cached_nr > 0) {
+ int rc;
+
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ env = &cle->ce_lu;
+ rc = lu_env_refill(env);
+ if (rc == 0) {
+ cl_env_init0(cle, debug);
+ lu_context_enter(&env->le_ctx);
+ lu_context_enter(&cle->ce_ses);
+ } else {
+ cl_env_fini(cle);
+ env = ERR_PTR(rc);
+ }
+ } else {
+ spin_unlock(&cl_envs_guard);
+ env = cl_env_new(lu_context_tags_default,
+ lu_session_tags_default, debug);
+ }
+ return env;
+}
+
static inline struct cl_env *cl_env_container(struct lu_env *env)
{
return container_of(env, struct cl_env, ce_lu);
@@ -727,6 +742,8 @@ static struct lu_env *cl_env_peek(int *refcheck)
* Returns lu_env: if there already is an environment associated with the
* current thread, it is returned, otherwise, new environment is allocated.
*
+ * Allocations are amortized through the global cache of environments.
+ *
* \param refcheck pointer to a counter used to detect environment leaks. In
* the usual case cl_env_get() and cl_env_put() are called in the same lexical
* scope and pointer to the same integer is passed as \a refcheck. This is
@@ -740,10 +757,7 @@ struct lu_env *cl_env_get(int *refcheck)
env = cl_env_peek(refcheck);
if (!env) {
- env = cl_env_new(lu_context_tags_default,
- lu_session_tags_default,
- __builtin_return_address(0));
-
+ env = cl_env_obtain(__builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
@@ -787,6 +801,32 @@ static void cl_env_exit(struct cl_env *cle)
}
/**
+ * Finalizes and frees a given number of cached environments. This is done to
+ * (1) free some memory (not currently hooked into VM), or (2) release
+ * references to modules.
+ */
+unsigned int cl_env_cache_purge(unsigned int nr)
+{
+ struct cl_env *cle;
+
+ spin_lock(&cl_envs_guard);
+ for (; !list_empty(&cl_envs) && nr > 0; --nr) {
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ LASSERT(cl_envs_cached_nr > 0);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ cl_env_fini(cle);
+ spin_lock(&cl_envs_guard);
+ }
+ LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+ spin_unlock(&cl_envs_guard);
+ return nr;
+}
+EXPORT_SYMBOL(cl_env_cache_purge);
+
+/**
* Release an environment.
*
* Decrement \a env reference counter. When counter drops to 0, nothing in
@@ -808,7 +848,22 @@ void cl_env_put(struct lu_env *env, int *refcheck)
cl_env_detach(cle);
cle->ce_debug = NULL;
cl_env_exit(cle);
- cl_env_fini(cle);
+ /*
+ * Don't bother to take a lock here.
+ *
+ * Return environment to the cache only when it was allocated
+ * with the standard tags.
+ */
+ if (cl_envs_cached_nr < cl_envs_cached_max &&
+ (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
+ (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
+ spin_lock(&cl_envs_guard);
+ list_add(&cle->ce_linkage, &cl_envs);
+ cl_envs_cached_nr++;
+ spin_unlock(&cl_envs_guard);
+ } else {
+ cl_env_fini(cle);
+ }
}
}
EXPORT_SYMBOL(cl_env_put);
@@ -914,6 +969,104 @@ void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
}
EXPORT_SYMBOL(cl_lvb2attr);
+static struct cl_env cl_env_percpu[NR_CPUS];
+
+static int cl_env_percpu_init(void)
+{
+ struct cl_env *cle;
+ int tags = LCT_REMEMBER | LCT_NOREF;
+ int i, j;
+ int rc = 0;
+
+ for_each_possible_cpu(i) {
+ struct lu_env *env;
+
+ cle = &cl_env_percpu[i];
+ env = &cle->ce_lu;
+
+ INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ rc = lu_env_init(env, LCT_CL_THREAD | tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ } else {
+ lu_env_fini(env);
+ }
+ }
+ if (rc != 0)
+ break;
+ }
+ if (rc != 0) {
+ /* Indices 0 to i (excluding i) were correctly initialized,
+ * thus we must uninitialize up to i, the rest are undefined.
+ */
+ for (j = 0; j < i; j++) {
+ cle = &cl_env_percpu[i];
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+ }
+
+ return rc;
+}
+
+static void cl_env_percpu_fini(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cl_env *cle = &cl_env_percpu[i];
+
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+}
+
+static void cl_env_percpu_refill(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ lu_env_refill(&cl_env_percpu[i].ce_lu);
+}
+
+void cl_env_percpu_put(struct lu_env *env)
+{
+ struct cl_env *cle;
+ int cpu;
+
+ cpu = smp_processor_id();
+ cle = cl_env_container(env);
+ LASSERT(cle == &cl_env_percpu[cpu]);
+
+ cle->ce_ref--;
+ LASSERT(cle->ce_ref == 0);
+
+ CL_ENV_DEC(busy);
+ cl_env_detach(cle);
+ cle->ce_debug = NULL;
+
+ put_cpu();
+}
+EXPORT_SYMBOL(cl_env_percpu_put);
+
+struct lu_env *cl_env_percpu_get(void)
+{
+ struct cl_env *cle;
+
+ cle = &cl_env_percpu[get_cpu()];
+ cl_env_init0(cle, __builtin_return_address(0));
+
+ cl_env_attach(cle);
+ return &cle->ce_lu;
+}
+EXPORT_SYMBOL(cl_env_percpu_get);
+
/*****************************************************************************
*
* Temporary prototype thing: mirror obd-devices into cl devices.
@@ -944,8 +1097,9 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
CERROR("can't init device '%s', %d\n", typename, rc);
d = ERR_PTR(rc);
}
- } else
+ } else {
CERROR("Cannot allocate device: '%s'\n", typename);
+ }
return lu2cl_dev(d);
}
EXPORT_SYMBOL(cl_type_setup);
@@ -959,12 +1113,6 @@ void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
}
EXPORT_SYMBOL(cl_stack_fini);
-int cl_lock_init(void);
-void cl_lock_fini(void);
-
-int cl_page_init(void);
-void cl_page_fini(void);
-
static struct lu_context_key cl_key;
struct cl_thread_info *cl_env_info(const struct lu_env *env)
@@ -1059,17 +1207,13 @@ int cl_global_init(void)
if (result)
goto out_kmem;
- result = cl_lock_init();
+ result = cl_env_percpu_init();
if (result)
+ /* no cl_env_percpu_fini on error */
goto out_context;
- result = cl_page_init();
- if (result)
- goto out_lock;
-
return 0;
-out_lock:
- cl_lock_fini();
+
out_context:
lu_context_key_degister(&cl_key);
out_kmem:
@@ -1084,8 +1228,7 @@ out_store:
*/
void cl_global_fini(void)
{
- cl_lock_fini();
- cl_page_fini();
+ cl_env_percpu_fini();
lu_context_key_degister(&cl_key);
lu_kmem_fini(cl_object_caches);
cl_env_store_fini();
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 394580016..b754f516e 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -36,6 +36,7 @@
* Client Lustre Page.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -48,8 +49,7 @@
#include "../include/cl_object.h"
#include "cl_internal.h"
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix);
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
# define PASSERT(env, page, expr) \
do { \
@@ -63,24 +63,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
/**
- * Internal version of cl_page_top, it should be called if the page is
- * known to be not freed, says with page referenced, or radix tree lock held,
- * or page owned.
- */
-static struct cl_page *cl_page_top_trusted(struct cl_page *page)
-{
- while (page->cp_parent)
- page = page->cp_parent;
- return page;
-}
-
-/**
* Internal version of cl_page_get().
*
* This function can be used to obtain initial reference to previously
* unreferenced cached object. It can be called only if concurrent page
- * reclamation is somehow prevented, e.g., by locking page radix-tree
- * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
+ * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
* associated with \a page.
*
* Use with care! Not exported.
@@ -103,142 +90,12 @@ cl_page_at_trusted(const struct cl_page *page,
{
const struct cl_page_slice *slice;
- page = cl_page_top_trusted((struct cl_page *)page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
- return slice;
- }
- page = page->cp_child;
- } while (page);
- return NULL;
-}
-
-/**
- * Returns a page with given index in the given object, or NULL if no page is
- * found. Acquires a reference on \a page.
- *
- * Locking: called under cl_object_header::coh_page_guard spin-lock.
- */
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
-{
- struct cl_page *page;
-
- assert_spin_locked(&hdr->coh_page_guard);
-
- page = radix_tree_lookup(&hdr->coh_tree, index);
- if (page)
- cl_page_get_trust(page);
- return page;
-}
-EXPORT_SYMBOL(cl_page_lookup);
-
-/**
- * Returns a list of pages by a given [start, end] of \a obj.
- *
- * \param resched If not NULL, then we give up before hogging CPU for too
- * long and set *resched = 1, in that case caller should implement a retry
- * logic.
- *
- * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
- * crucial in the face of [offset, EOF] locks.
- *
- * Return at least one page in @queue unless there is no covered page.
- */
-int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata)
-{
- struct cl_object_header *hdr;
- struct cl_page *page;
- struct cl_page **pvec;
- const struct cl_page_slice *slice;
- const struct lu_device_type *dtype;
- pgoff_t idx;
- unsigned int nr;
- unsigned int i;
- unsigned int j;
- int res = CLP_GANG_OKAY;
- int tree_lock = 1;
-
- idx = start;
- hdr = cl_object_header(obj);
- pvec = cl_env_info(env)->clt_pvec;
- dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
- spin_lock(&hdr->coh_page_guard);
- while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
- idx, CLT_PVEC_SIZE)) > 0) {
- int end_of_region = 0;
-
- idx = pvec[nr - 1]->cp_index + 1;
- for (i = 0, j = 0; i < nr; ++i) {
- page = pvec[i];
- pvec[i] = NULL;
-
- LASSERT(page->cp_type == CPT_CACHEABLE);
- if (page->cp_index > end) {
- end_of_region = 1;
- break;
- }
- if (page->cp_state == CPS_FREEING)
- continue;
-
- slice = cl_page_at_trusted(page, dtype);
- /*
- * Pages for lsm-less file has no underneath sub-page
- * for osc, in case of ...
- */
- PASSERT(env, page, slice);
-
- page = slice->cpl_page;
- /*
- * Can safely call cl_page_get_trust() under
- * radix-tree spin-lock.
- *
- * XXX not true, because @page is from object another
- * than @hdr and protected by different tree lock.
- */
- cl_page_get_trust(page);
- lu_ref_add_atomic(&page->cp_reference,
- "gang_lookup", current);
- pvec[j++] = page;
- }
-
- /*
- * Here a delicate locking dance is performed. Current thread
- * holds a reference to a page, but has to own it before it
- * can be placed into queue. Owning implies waiting, so
- * radix-tree lock is to be released. After a wait one has to
- * check that pages weren't truncated (cl_page_own() returns
- * error in the latter case).
- */
- spin_unlock(&hdr->coh_page_guard);
- tree_lock = 0;
-
- for (i = 0; i < j; ++i) {
- page = pvec[i];
- if (res == CLP_GANG_OKAY)
- res = (*cb)(env, io, page, cbdata);
- lu_ref_del(&page->cp_reference,
- "gang_lookup", current);
- cl_page_put(env, page);
- }
- if (nr < CLT_PVEC_SIZE || end_of_region)
- break;
-
- if (res == CLP_GANG_OKAY && need_resched())
- res = CLP_GANG_RESCHED;
- if (res != CLP_GANG_OKAY)
- break;
-
- spin_lock(&hdr->coh_page_guard);
- tree_lock = 1;
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
+ return slice;
}
- if (tree_lock)
- spin_unlock(&hdr->coh_page_guard);
- return res;
+ return NULL;
}
-EXPORT_SYMBOL(cl_page_gang_lookup);
static void cl_page_free(const struct lu_env *env, struct cl_page *page)
{
@@ -247,17 +104,16 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
PASSERT(env, page, list_empty(&page->cp_batch));
PASSERT(env, page, !page->cp_owner);
PASSERT(env, page, !page->cp_req);
- PASSERT(env, page, !page->cp_parent);
PASSERT(env, page, page->cp_state == CPS_FREEING);
- might_sleep();
while (!list_empty(&page->cp_layers)) {
struct cl_page_slice *slice;
slice = list_entry(page->cp_layers.next,
struct cl_page_slice, cpl_linkage);
list_del_init(page->cp_layers.next);
- slice->cpl_ops->cpo_fini(env, slice);
+ if (unlikely(slice->cpl_ops->cpo_fini))
+ slice->cpl_ops->cpo_fini(env, slice);
}
lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
cl_object_put(env, obj);
@@ -276,10 +132,10 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
*(enum cl_page_state *)&page->cp_state = state;
}
-static struct cl_page *cl_page_alloc(const struct lu_env *env,
- struct cl_object *o, pgoff_t ind,
- struct page *vmpage,
- enum cl_page_type type)
+struct cl_page *cl_page_alloc(const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type)
{
struct cl_page *page;
struct lu_object_header *head;
@@ -289,13 +145,11 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
int result = 0;
atomic_set(&page->cp_ref, 1);
- if (type == CPT_CACHEABLE) /* for radix tree */
- atomic_inc(&page->cp_ref);
page->cp_obj = o;
cl_object_get(o);
lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
page);
- page->cp_index = ind;
+ page->cp_vmpage = vmpage;
cl_page_state_set_trust(page, CPS_CACHED);
page->cp_type = type;
INIT_LIST_HEAD(&page->cp_layers);
@@ -306,10 +160,10 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
head = o->co_lu.lo_header;
list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
if (o->co_ops->coo_page_init) {
- result = o->co_ops->coo_page_init(env, o,
- page, vmpage);
+ result = o->co_ops->coo_page_init(env, o, page,
+ ind);
if (result != 0) {
- cl_page_delete0(env, page, 0);
+ cl_page_delete0(env, page);
cl_page_free(env, page);
page = ERR_PTR(result);
break;
@@ -321,6 +175,7 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
}
return page;
}
+EXPORT_SYMBOL(cl_page_alloc);
/**
* Returns a cl_page with index \a idx at the object \a o, and associated with
@@ -333,16 +188,13 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
*
* \see cl_object_find(), cl_lock_find()
*/
-static struct cl_page *cl_page_find0(const struct lu_env *env,
- struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type,
- struct cl_page *parent)
+struct cl_page *cl_page_find(const struct lu_env *env,
+ struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type)
{
struct cl_page *page = NULL;
- struct cl_page *ghost = NULL;
struct cl_object_header *hdr;
- int err;
LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
might_sleep();
@@ -368,120 +220,25 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
* reference on it.
*/
page = cl_vmpage_page(vmpage, o);
- PINVRNT(env, page,
- ergo(page,
- cl_page_vmpage(env, page) == vmpage &&
- (void *)radix_tree_lookup(&hdr->coh_tree,
- idx) == page));
- }
- if (page)
- return page;
+ if (page)
+ return page;
+ }
/* allocate and initialize cl_page */
page = cl_page_alloc(env, o, idx, vmpage, type);
- if (IS_ERR(page))
- return page;
-
- if (type == CPT_TRANSIENT) {
- if (parent) {
- LASSERT(!page->cp_parent);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- return page;
- }
-
- /*
- * XXX optimization: use radix_tree_preload() here, and change tree
- * gfp mask to GFP_KERNEL in cl_object_header_init().
- */
- spin_lock(&hdr->coh_page_guard);
- err = radix_tree_insert(&hdr->coh_tree, idx, page);
- if (err != 0) {
- ghost = page;
- /*
- * Noted by Jay: a lock on \a vmpage protects cl_page_find()
- * from this race, but
- *
- * 0. it's better to have cl_page interface "locally
- * consistent" so that its correctness can be reasoned
- * about without appealing to the (obscure world of) VM
- * locking.
- *
- * 1. handling this race allows ->coh_tree to remain
- * consistent even when VM locking is somehow busted,
- * which is very useful during diagnosing and debugging.
- */
- page = ERR_PTR(err);
- CL_PAGE_DEBUG(D_ERROR, env, ghost,
- "fail to insert into radix tree: %d\n", err);
- } else {
- if (parent) {
- LASSERT(!page->cp_parent);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- hdr->coh_pages++;
- }
- spin_unlock(&hdr->coh_page_guard);
-
- if (unlikely(ghost)) {
- cl_page_delete0(env, ghost, 0);
- cl_page_free(env, ghost);
- }
return page;
}
-
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
-{
- return cl_page_find0(env, o, idx, vmpage, type, NULL);
-}
EXPORT_SYMBOL(cl_page_find);
-struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent)
-{
- return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
-}
-EXPORT_SYMBOL(cl_page_find_sub);
-
static inline int cl_page_invariant(const struct cl_page *pg)
{
- struct cl_object_header *header;
- struct cl_page *parent;
- struct cl_page *child;
- struct cl_io *owner;
-
/*
* Page invariant is protected by a VM lock.
*/
LINVRNT(cl_page_is_vmlocked(NULL, pg));
- header = cl_object_header(pg->cp_obj);
- parent = pg->cp_parent;
- child = pg->cp_child;
- owner = pg->cp_owner;
-
- return cl_page_in_use(pg) &&
- ergo(parent, parent->cp_child == pg) &&
- ergo(child, child->cp_parent == pg) &&
- ergo(child, pg->cp_obj != child->cp_obj) &&
- ergo(parent, pg->cp_obj != parent->cp_obj) &&
- ergo(owner && parent,
- parent->cp_owner == pg->cp_owner->ci_parent) &&
- ergo(owner && child, child->cp_owner->ci_parent == owner) &&
- /*
- * Either page is early in initialization (has neither child
- * nor parent yet), or it is in the object radix tree.
- */
- ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
- (void *)radix_tree_lookup(&header->coh_tree,
- pg->cp_index) == pg ||
- (!child && !parent));
+ return cl_page_in_use_noref(pg);
}
static void cl_page_state_set0(const struct lu_env *env,
@@ -534,13 +291,9 @@ static void cl_page_state_set0(const struct lu_env *env,
old = page->cp_state;
PASSERT(env, page, allowed_transitions[old][state]);
CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
- for (; page; page = page->cp_child) {
- PASSERT(env, page, page->cp_state == old);
- PASSERT(env, page,
- equi(state == CPS_OWNED, page->cp_owner));
-
- cl_page_state_set_trust(page, state);
- }
+ PASSERT(env, page, page->cp_state == old);
+ PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner));
+ cl_page_state_set_trust(page, state);
}
static void cl_page_state_set(const struct lu_env *env,
@@ -574,8 +327,6 @@ EXPORT_SYMBOL(cl_page_get);
*/
void cl_page_put(const struct lu_env *env, struct cl_page *page)
{
- PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
-
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
atomic_read(&page->cp_ref));
@@ -595,34 +346,10 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
EXPORT_SYMBOL(cl_page_put);
/**
- * Returns a VM page associated with a given cl_page.
- */
-struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- /*
- * Find uppermost layer with ->cpo_vmpage() method, and return its
- * result.
- */
- page = cl_page_top(page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_ops->cpo_vmpage)
- return slice->cpl_ops->cpo_vmpage(env, slice);
- }
- page = page->cp_child;
- } while (page);
- LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
-}
-EXPORT_SYMBOL(cl_page_vmpage);
-
-/**
* Returns a cl_page associated with a VM page, and given cl_object.
*/
struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
{
- struct cl_page *top;
struct cl_page *page;
KLASSERT(PageLocked(vmpage));
@@ -633,36 +360,15 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
* bottom-to-top pass.
*/
- /*
- * This loop assumes that ->private points to the top-most page. This
- * can be rectified easily.
- */
- top = (struct cl_page *)vmpage->private;
- if (!top)
- return NULL;
-
- for (page = top; page; page = page->cp_child) {
- if (cl_object_same(page->cp_obj, obj)) {
- cl_page_get_trust(page);
- break;
- }
+ page = (struct cl_page *)vmpage->private;
+ if (page) {
+ cl_page_get_trust(page);
+ LASSERT(page->cp_type == CPT_CACHEABLE);
}
- LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
return page;
}
EXPORT_SYMBOL(cl_vmpage_page);
-/**
- * Returns the top-page for a given page.
- *
- * \see cl_object_top(), cl_io_top()
- */
-struct cl_page *cl_page_top(struct cl_page *page)
-{
- return cl_page_top_trusted(page);
-}
-EXPORT_SYMBOL(cl_page_top);
-
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype)
{
@@ -682,26 +388,43 @@ EXPORT_SYMBOL(cl_page_at);
int (*__method)_proto; \
\
__result = 0; \
- __page = cl_page_top(__page); \
- do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) { \
- __result = (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- __page = __page->cp_child; \
- } while (__page && __result == 0); \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) { \
+ __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
if (__result > 0) \
__result = 0; \
__result; \
})
+#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
+({ \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ int __result; \
+ ptrdiff_t __op = (_op); \
+ int (*__method)_proto; \
+ \
+ __result = 0; \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) { \
+ __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
+ if (__result > 0) \
+ __result = 0; \
+ __result; \
+})
+
#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
do { \
const struct lu_env *__env = (_env); \
@@ -710,18 +433,11 @@ do { \
ptrdiff_t __op = (_op); \
void (*__method)_proto; \
\
- __page = cl_page_top(__page); \
- do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_child; \
- } while (__page); \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) \
+ (*__method)(__env, __scan, ## __VA_ARGS__); \
+ } \
} while (0)
#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
@@ -732,20 +448,11 @@ do { \
ptrdiff_t __op = (_op); \
void (*__method)_proto; \
\
- /* get to the bottom page. */ \
- while (__page->cp_child) \
- __page = __page->cp_child; \
- do { \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_parent; \
- } while (__page); \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) \
+ (*__method)(__env, __scan, ## __VA_ARGS__); \
+ } \
} while (0)
static int cl_page_invoke(const struct lu_env *env,
@@ -771,20 +478,17 @@ static void cl_page_invoid(const struct lu_env *env,
static void cl_page_owner_clear(struct cl_page *page)
{
- for (page = cl_page_top(page); page; page = page->cp_child) {
- if (page->cp_owner) {
- LASSERT(page->cp_owner->ci_owned_nr > 0);
- page->cp_owner->ci_owned_nr--;
- page->cp_owner = NULL;
- page->cp_task = NULL;
- }
+ if (page->cp_owner) {
+ LASSERT(page->cp_owner->ci_owned_nr > 0);
+ page->cp_owner->ci_owned_nr--;
+ page->cp_owner = NULL;
+ page->cp_task = NULL;
}
}
static void cl_page_owner_set(struct cl_page *page)
{
- for (page = cl_page_top(page); page; page = page->cp_child)
- page->cp_owner->ci_owned_nr++;
+ page->cp_owner->ci_owned_nr++;
}
void cl_page_disown0(const struct lu_env *env,
@@ -794,7 +498,7 @@ void cl_page_disown0(const struct lu_env *env,
state = pg->cp_state;
PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
- PINVRNT(env, pg, cl_page_invariant(pg));
+ PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
cl_page_owner_clear(pg);
if (state == CPS_OWNED)
@@ -815,8 +519,9 @@ void cl_page_disown0(const struct lu_env *env,
*/
int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
{
+ struct cl_io *top = cl_io_top((struct cl_io *)io);
LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
- return pg->cp_state == CPS_OWNED && pg->cp_owner == io;
+ return pg->cp_state == CPS_OWNED && pg->cp_owner == top;
}
EXPORT_SYMBOL(cl_page_is_owned);
@@ -847,7 +552,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
PINVRNT(env, pg, !cl_page_is_owned(pg, io));
- pg = cl_page_top(pg);
io = cl_io_top(io);
if (pg->cp_state == CPS_FREEING) {
@@ -861,7 +565,7 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
if (result == 0) {
PASSERT(env, pg, !pg->cp_owner);
PASSERT(env, pg, !pg->cp_req);
- pg->cp_owner = io;
+ pg->cp_owner = cl_io_top(io);
pg->cp_task = current;
cl_page_owner_set(pg);
if (pg->cp_state != CPS_FREEING) {
@@ -914,12 +618,11 @@ void cl_page_assume(const struct lu_env *env,
{
PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
PASSERT(env, pg, !pg->cp_owner);
- pg->cp_owner = io;
+ pg->cp_owner = cl_io_top(io);
pg->cp_task = current;
cl_page_owner_set(pg);
cl_page_state_set(env, pg, CPS_OWNED);
@@ -943,7 +646,6 @@ void cl_page_unassume(const struct lu_env *env,
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_owner_clear(pg);
cl_page_state_set(env, pg, CPS_CACHED);
@@ -968,9 +670,9 @@ EXPORT_SYMBOL(cl_page_unassume);
void cl_page_disown(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg)
{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
+ pg->cp_state == CPS_FREEING);
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_disown0(env, io, pg);
}
@@ -1001,12 +703,8 @@ EXPORT_SYMBOL(cl_page_discard);
* pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
* path. Doesn't check page invariant.
*/
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix)
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
{
- struct cl_page *tmp = pg;
-
- PASSERT(env, pg, pg == cl_page_top(pg));
PASSERT(env, pg, pg->cp_state != CPS_FREEING);
/*
@@ -1014,41 +712,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
*/
cl_page_owner_clear(pg);
- /*
- * unexport the page firstly before freeing it so that
- * the page content is considered to be invalid.
- * We have to do this because a CPS_FREEING cl_page may
- * be NOT under the protection of a cl_lock.
- * Afterwards, if this page is found by other threads, then this
- * page will be forced to reread.
- */
- cl_page_export(env, pg, 0);
cl_page_state_set0(env, pg, CPS_FREEING);
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
- (const struct lu_env *, const struct cl_page_slice *));
-
- if (tmp->cp_type == CPT_CACHEABLE) {
- if (!radix)
- /* !radix means that @pg is not yet in the radix tree,
- * skip removing it.
- */
- tmp = pg->cp_child;
- for (; tmp; tmp = tmp->cp_child) {
- void *value;
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(tmp->cp_obj);
- spin_lock(&hdr->coh_page_guard);
- value = radix_tree_delete(&hdr->coh_tree,
- tmp->cp_index);
- PASSERT(env, tmp, value == tmp);
- PASSERT(env, tmp, hdr->coh_pages > 0);
- hdr->coh_pages--;
- spin_unlock(&hdr->coh_page_guard);
- cl_page_put(env, tmp);
- }
- }
+ CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
+ (const struct lu_env *,
+ const struct cl_page_slice *));
}
/**
@@ -1070,7 +738,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
* Once page reaches cl_page_state::CPS_FREEING, all remaining references will
* drain after some time, at which point page will be recycled.
*
- * \pre pg == cl_page_top(pg)
* \pre VM page is locked
* \post pg->cp_state == CPS_FREEING
*
@@ -1079,30 +746,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
{
PINVRNT(env, pg, cl_page_invariant(pg));
- cl_page_delete0(env, pg, 1);
+ cl_page_delete0(env, pg);
}
EXPORT_SYMBOL(cl_page_delete);
/**
- * Unmaps page from user virtual memory.
- *
- * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to unmap page from user space
- * virtual memory.
- *
- * \see cl_page_operations::cpo_unmap()
- */
-int cl_page_unmap(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
-}
-EXPORT_SYMBOL(cl_page_unmap);
-
-/**
* Marks page up-to-date.
*
* Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
@@ -1129,7 +777,6 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
int result;
const struct cl_page_slice *slice;
- pg = cl_page_top_trusted((struct cl_page *)pg);
slice = container_of(pg->cp_layers.next,
const struct cl_page_slice, cpl_linkage);
PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
@@ -1241,7 +888,7 @@ void cl_page_completion(const struct lu_env *env,
cl_page_put(env, pg);
if (anchor)
- cl_sync_io_note(anchor, ioret);
+ cl_sync_io_note(env, anchor, ioret);
}
EXPORT_SYMBOL(cl_page_completion);
@@ -1276,44 +923,6 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
EXPORT_SYMBOL(cl_page_make_ready);
/**
- * Notify layers that high level io decided to place this page into a cache
- * for future transfer.
- *
- * The layer implementing transfer engine (osc) has to register this page in
- * its queues.
- *
- * \pre cl_page_is_owned(pg, io)
- * \post cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_cache_add()
- */
-int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt)
-{
- const struct cl_page_slice *scan;
- int result = 0;
-
- PINVRNT(env, pg, crt < CRT_NR);
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- if (crt >= CRT_NR)
- return -EINVAL;
-
- list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
- if (!scan->cpl_ops->io[crt].cpo_cache_add)
- continue;
-
- result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
- if (result != 0)
- break;
- }
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_cache_add);
-
-/**
* Called if a pge is being written back by kernel's intention.
*
* \pre cl_page_is_owned(pg, io)
@@ -1344,68 +953,21 @@ EXPORT_SYMBOL(cl_page_flush);
* \see cl_page_operations::cpo_is_under_lock()
*/
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+ struct cl_page *page, pgoff_t *max_index)
{
int rc;
PINVRNT(env, page, cl_page_invariant(page));
- rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
- PASSERT(env, page, rc != 0);
+ rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
+ (const struct lu_env *,
+ const struct cl_page_slice *,
+ struct cl_io *, pgoff_t *),
+ io, max_index);
return rc;
}
EXPORT_SYMBOL(cl_page_is_under_lock);
-static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- cl_page_own(env, io, page);
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- return CLP_GANG_OKAY;
-}
-
-/**
- * Purges all cached pages belonging to the object \a obj.
- */
-int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
-{
- struct cl_thread_info *info;
- struct cl_object *obj = cl_object_top(clobj);
- struct cl_io *io;
- int result;
-
- info = cl_env_info(env);
- io = &info->clt_io;
-
- /*
- * initialize the io. This is ugly since we never do IO in this
- * function, we just make cl_page_list functions happy. -jay
- */
- io->ci_obj = obj;
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, obj);
- if (result != 0) {
- cl_io_fini(env, io);
- return io->ci_result;
- }
-
- do {
- result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
- page_prune_cb, NULL);
- if (result == CLP_GANG_RESCHED)
- cond_resched();
- } while (result != CLP_GANG_OKAY);
-
- cl_io_fini(env, io);
- return result;
-}
-EXPORT_SYMBOL(cl_pages_prune);
-
/**
* Tells transfer engine that only part of a page is to be transmitted.
*
@@ -1431,9 +993,8 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
(*printer)(env, cookie,
- "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
+ "page@%p[%d %p %d %d %d %p %p %#x]\n",
pg, atomic_read(&pg->cp_ref), pg->cp_obj,
- pg->cp_index, pg->cp_parent, pg->cp_child,
pg->cp_state, pg->cp_error, pg->cp_type,
pg->cp_owner, pg->cp_req, pg->cp_flags);
}
@@ -1445,11 +1006,7 @@ EXPORT_SYMBOL(cl_page_header_print);
void cl_page_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
- struct cl_page *scan;
-
- for (scan = cl_page_top((struct cl_page *)pg); scan;
- scan = scan->cp_child)
- cl_page_header_print(env, cookie, printer, scan);
+ cl_page_header_print(env, cookie, printer, pg);
CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
(const struct lu_env *env,
const struct cl_page_slice *slice,
@@ -1509,21 +1066,13 @@ EXPORT_SYMBOL(cl_page_size);
* \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
*/
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
+ struct cl_object *obj, pgoff_t index,
const struct cl_page_operations *ops)
{
list_add_tail(&slice->cpl_linkage, &page->cp_layers);
slice->cpl_obj = obj;
+ slice->cpl_index = index;
slice->cpl_ops = ops;
slice->cpl_page = page;
}
EXPORT_SYMBOL(cl_page_slice_add);
-
-int cl_page_init(void)
-{
- return 0;
-}
-
-void cl_page_fini(void)
-{
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index c2cf01596..f48816af8 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -60,6 +60,8 @@ unsigned int obd_dump_on_eviction;
EXPORT_SYMBOL(obd_dump_on_eviction);
unsigned int obd_max_dirty_pages = 256;
EXPORT_SYMBOL(obd_max_dirty_pages);
+atomic_t obd_unstable_pages;
+EXPORT_SYMBOL(obd_unstable_pages);
atomic_t obd_dirty_pages;
EXPORT_SYMBOL(obd_dirty_pages);
unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
@@ -335,7 +337,6 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
err = 0;
goto out;
}
-
}
if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
@@ -461,7 +462,7 @@ static int obd_init_checks(void)
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
- if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
+ if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) {
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
(__u64)PAGE_SIZE);
ret = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 43a7f7a79..e4edfb2c0 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -68,8 +68,8 @@ int block_debug_check(char *who, void *addr, int end, __u64 off, __u64 id)
LASSERT(addr);
- ne_off = le64_to_cpu (off);
- id = le64_to_cpu (id);
+ ne_off = le64_to_cpu(off);
+ id = le64_to_cpu(id);
if (memcmp(addr, (char *)&ne_off, LPDS)) {
CDEBUG(D_ERROR, "%s: id %#llx offset %llu off: %#llx != %#llx\n",
who, id, off, *(__u64 *)addr, ne_off);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index cf97b8f06..d95f11d62 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -604,7 +604,6 @@ int obd_init_caches(void)
out:
obd_cleanup_caches();
return -ENOMEM;
-
}
/* map connection to client */
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 8eddf206f..2cd452246 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -158,9 +158,7 @@ int obd_ioctl_popdata(void __user *arg, void *data, int len)
{
int err;
- err = copy_to_user(arg, data, len);
- if (err)
- err = -EFAULT;
+ err = copy_to_user(arg, data, len) ? -EFAULT : 0;
return err;
}
EXPORT_SYMBOL(obd_ioctl_popdata);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 992573eae..79194d8cb 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -265,7 +265,6 @@ repeat:
for (rec = (struct llog_rec_hdr *)buf;
(char *)rec < buf + LLOG_CHUNK_SIZE;
rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) {
-
CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
rec, rec->lrh_type);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index d93f42fee..5a1eae1de 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -49,7 +49,7 @@
static const char * const obd_connect_names[] = {
"read_only",
"lov_index",
- "unused",
+ "connect_from_mds",
"write_grant",
"server_lock",
"version",
@@ -122,6 +122,56 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
}
EXPORT_SYMBOL(obd_connect_flags2str);
+static void obd_connect_data_seqprint(struct seq_file *m,
+ struct obd_connect_data *ocd)
+{
+ int flags;
+
+ LASSERT(ocd);
+ flags = ocd->ocd_connect_flags;
+
+ seq_printf(m, " connect_data:\n"
+ " flags: %llx\n"
+ " instance: %u\n",
+ ocd->ocd_connect_flags,
+ ocd->ocd_instance);
+ if (flags & OBD_CONNECT_VERSION)
+ seq_printf(m, " target_version: %u.%u.%u.%u\n",
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version),
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version),
+ OBD_OCD_VERSION_FIX(ocd->ocd_version));
+ if (flags & OBD_CONNECT_MDS)
+ seq_printf(m, " mdt_index: %d\n", ocd->ocd_group);
+ if (flags & OBD_CONNECT_GRANT)
+ seq_printf(m, " initial_grant: %d\n", ocd->ocd_grant);
+ if (flags & OBD_CONNECT_INDEX)
+ seq_printf(m, " target_index: %u\n", ocd->ocd_index);
+ if (flags & OBD_CONNECT_BRW_SIZE)
+ seq_printf(m, " max_brw_size: %d\n", ocd->ocd_brw_size);
+ if (flags & OBD_CONNECT_IBITS)
+ seq_printf(m, " ibits_known: %llx\n",
+ ocd->ocd_ibits_known);
+ if (flags & OBD_CONNECT_GRANT_PARAM)
+ seq_printf(m, " grant_block_size: %d\n"
+ " grant_inode_size: %d\n"
+ " grant_extent_overhead: %d\n",
+ ocd->ocd_blocksize,
+ ocd->ocd_inodespace,
+ ocd->ocd_grant_extent);
+ if (flags & OBD_CONNECT_TRANSNO)
+ seq_printf(m, " first_transno: %llx\n",
+ ocd->ocd_transno);
+ if (flags & OBD_CONNECT_CKSUM)
+ seq_printf(m, " cksum_types: %#x\n",
+ ocd->ocd_cksum_types);
+ if (flags & OBD_CONNECT_MAX_EASIZE)
+ seq_printf(m, " max_easize: %d\n", ocd->ocd_max_easize);
+ if (flags & OBD_CONNECT_MAXBYTES)
+ seq_printf(m, " max_object_bytes: %llx\n",
+ ocd->ocd_maxbytes);
+}
+
int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
int mult)
{
@@ -624,6 +674,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
struct obd_device *obd = data;
struct obd_import *imp;
struct obd_import_conn *conn;
+ struct obd_connect_data *ocd;
int j;
int k;
int rw = 0;
@@ -635,9 +686,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
return rc;
imp = obd->u.cli.cl_import;
+ ocd = &imp->imp_connect_data;
- seq_printf(m,
- "import:\n"
+ seq_printf(m, "import:\n"
" name: %s\n"
" target: %s\n"
" state: %s\n"
@@ -649,9 +700,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
imp->imp_connect_data.ocd_instance);
obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags,
", ");
- seq_printf(m,
- " ]\n"
- " import_flags: [ ");
+ seq_printf(m, " ]\n");
+ obd_connect_data_seqprint(m, ocd);
+ seq_printf(m, " import_flags: [ ");
obd_import_flags2str(imp, m);
seq_printf(m,
@@ -694,8 +745,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
do_div(sum, ret.lc_count);
ret.lc_sum = sum;
- } else
+ } else {
ret.lc_sum = 0;
+ }
seq_printf(m,
" rpcs:\n"
" inflight: %u\n"
@@ -1471,10 +1523,10 @@ EXPORT_SYMBOL(lprocfs_oh_tally);
void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
{
- unsigned int val;
+ unsigned int val = 0;
- for (val = 0; ((1 << val) < value) && (val <= OBD_HIST_MAX); val++)
- ;
+ if (likely(value != 0))
+ val = min(fls(value - 1), OBD_HIST_MAX);
lprocfs_oh_tally(oh, val);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 978568ada..e04385760 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -55,6 +55,7 @@
#include "../include/lustre_disk.h"
#include "../include/lustre_fid.h"
#include "../include/lu_object.h"
+#include "../include/cl_object.h"
#include "../include/lu_ref.h"
#include <linux/list.h>
@@ -103,7 +104,6 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
if (lu_object_is_dying(top)) {
-
/*
* somebody may be waiting for this, currently only
* used for cl_object, see cl_object_put_last().
@@ -357,7 +357,6 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
if (count > 0 && --count == 0)
break;
-
}
cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
cond_resched();
@@ -715,8 +714,9 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env,
obj = lu_object_locate(top->lo_header, dev->ld_type);
if (!obj)
lu_object_put(env, top);
- } else
+ } else {
obj = top;
+ }
return obj;
}
EXPORT_SYMBOL(lu_object_find_slice);
@@ -935,7 +935,7 @@ static void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
* Initialize site \a s, with \a d as the top level device.
*/
#define LU_SITE_BITS_MIN 12
-#define LU_SITE_BITS_MAX 24
+#define LU_SITE_BITS_MAX 19
/**
* total 256 buckets, we don't want too many buckets because:
* - consume too much memory
@@ -1468,6 +1468,7 @@ void lu_context_key_quiesce(struct lu_context_key *key)
/*
* XXX layering violation.
*/
+ cl_env_cache_purge(~0);
key->lct_tags |= LCT_QUIESCENT;
/*
* XXX memory barrier has to go here.
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 5f812460b..b1abe023b 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -163,8 +163,9 @@ int class_del_uuid(const char *uuid)
break;
}
}
- } else
+ } else {
list_splice_init(&g_uuid_list, &deathrow);
+ }
spin_unlock(&g_uuid_lock);
if (uuid && list_empty(&deathrow)) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 5395e994d..cb1d65c3d 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -606,7 +606,7 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
return rc;
}
-LIST_HEAD(lustre_profile_list);
+static LIST_HEAD(lustre_profile_list);
struct lustre_profile *class_get_profile(const char *prof)
{
@@ -961,7 +961,6 @@ int class_process_config(struct lustre_cfg *lcfg)
default: {
err = obd_process_config(obd, sizeof(*lcfg), lcfg);
goto out;
-
}
}
out:
@@ -1001,7 +1000,13 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
for (i = 1; i < lcfg->lcfg_bufcount; i++) {
key = lustre_cfg_buf(lcfg, i);
/* Strip off prefix */
- class_match_param(key, prefix, &key);
+ if (class_match_param(key, prefix, &key)) {
+ /*
+ * If the prefix doesn't match, return error so we
+ * can pass it down the stack
+ */
+ return -ENOSYS;
+ }
sval = strchr(key, '=');
if (!sval || (*(sval + 1) == 0)) {
CERROR("Can't parse param %s (missing '=')\n", key);
@@ -1034,18 +1039,14 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
j++;
}
if (!matched) {
- /* If the prefix doesn't match, return error so we
- * can pass it down the stack
- */
- if (strnchr(key, keylen, '.'))
- return -ENOSYS;
- CERROR("%s: unknown param %s\n",
+ CERROR("%.*s: %s unknown param %s\n",
+ (int)strlen(prefix) - 1, prefix,
(char *)lustre_cfg_string(lcfg, 0), key);
/* rc = -EINVAL; continue parsing other params */
skip++;
} else if (rc < 0) {
- CERROR("writing proc entry %s err %d\n",
- var->name, rc);
+ CERROR("%s: error writing proc entry '%s': rc = %d\n",
+ prefix, var->name, rc);
rc = 0;
} else {
CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n",
@@ -1350,6 +1351,7 @@ static int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf,
lustre_cfg_string(lcfg, i));
}
}
+ ptr += snprintf(ptr, end - ptr, "\n");
/* return consumed bytes */
rc = ptr - buf;
return rc;
@@ -1368,7 +1370,7 @@ int class_config_dump_handler(const struct lu_env *env,
if (rec->lrh_type == OBD_CFG_REC) {
class_config_parse_rec(rec, outstr, 256);
- LCONSOLE(D_WARNING, " %s\n", outstr);
+ LCONSOLE(D_WARNING, " %s", outstr);
} else {
LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type);
rc = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index d3e28a389..e0c90adc7 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -102,7 +102,7 @@ int lustre_process_log(struct super_block *sb, char *logname,
LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n",
mgc->obd_name, logname, rc);
- if (rc)
+ else if (rc)
LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n",
mgc->obd_name, logname,
rc);
@@ -307,7 +307,8 @@ int lustre_start_mgc(struct super_block *sb)
while (class_parse_nid(ptr, &nid, &ptr) == 0) {
rc = do_lcfg(mgcname, nid,
LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
- i++;
+ if (!rc)
+ i++;
/* Stop at the first failover nid */
if (*ptr == ':')
break;
@@ -345,16 +346,18 @@ int lustre_start_mgc(struct super_block *sb)
sprintf(niduuid, "%s_%x", mgcname, i);
j = 0;
while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) {
- j++;
- rc = do_lcfg(mgcname, nid,
- LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
+ rc = do_lcfg(mgcname, nid, LCFG_ADD_UUID, niduuid,
+ NULL, NULL, NULL);
+ if (!rc)
+ ++j;
if (*ptr == ':')
break;
}
if (j > 0) {
rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN,
niduuid, NULL, NULL, NULL);
- i++;
+ if (!rc)
+ i++;
} else {
/* at ":/fsname" */
break;
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index e6436cb4a..748e33f01 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -185,8 +185,7 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid)
op_data->op_attr.ia_valid |= ATTR_BLOCKS;
}
if (valid & OBD_MD_FLFLAGS) {
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
- oa->o_flags;
+ op_data->op_attr_flags = oa->o_flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
}
}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 1e83669c2..91ef06f17 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -81,7 +81,6 @@ struct echo_object_conf {
struct echo_page {
struct cl_page_slice ep_cl;
struct mutex ep_lock;
- struct page *ep_vmpage;
};
struct echo_lock {
@@ -164,15 +163,13 @@ static int cl_echo_object_put(struct echo_object *eco);
static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct page **pages, int npages, int async);
-static struct echo_thread_info *echo_env_info(const struct lu_env *env);
-
struct echo_thread_info {
struct echo_object_conf eti_conf;
struct lustre_md eti_md;
struct cl_2queue eti_queue;
struct cl_io eti_io;
- struct cl_lock_descr eti_descr;
+ struct cl_lock eti_lock;
struct lu_fid eti_fid;
struct lu_fid eti_fid2;
};
@@ -219,12 +216,6 @@ static struct lu_kmem_descr echo_caches[] = {
*
* @{
*/
-static struct page *echo_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return cl2echo_page(slice)->ep_vmpage;
-}
-
static int echo_page_own(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *io, int nonblock)
@@ -273,12 +264,10 @@ static void echo_page_completion(const struct lu_env *env,
static void echo_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct echo_page *ep = cl2echo_page(slice);
struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
- struct page *vmpage = ep->ep_vmpage;
atomic_dec(&eco->eo_npages);
- put_page(vmpage);
+ put_page(slice->cpl_page->cp_vmpage);
}
static int echo_page_prep(const struct lu_env *env,
@@ -295,7 +284,8 @@ static int echo_page_print(const struct lu_env *env,
struct echo_page *ep = cl2echo_page(slice);
(*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
- ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
+ ep, mutex_is_locked(&ep->ep_lock),
+ slice->cpl_page->cp_vmpage);
return 0;
}
@@ -303,7 +293,6 @@ static const struct cl_page_operations echo_page_ops = {
.cpo_own = echo_page_own,
.cpo_disown = echo_page_disown,
.cpo_discard = echo_page_discard,
- .cpo_vmpage = echo_page_vmpage,
.cpo_fini = echo_page_fini,
.cpo_print = echo_page_print,
.cpo_is_vmlocked = echo_page_is_vmlocked,
@@ -336,26 +325,8 @@ static void echo_lock_fini(const struct lu_env *env,
kmem_cache_free(echo_lock_kmem, ecl);
}
-static void echo_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct echo_lock *ecl = cl2echo_lock(slice);
-
- LASSERT(list_empty(&ecl->el_chain));
-}
-
-static int echo_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *unused)
-{
- return 1;
-}
-
static struct cl_lock_operations echo_lock_ops = {
.clo_fini = echo_lock_fini,
- .clo_delete = echo_lock_delete,
- .clo_fits_into = echo_lock_fits_into
};
/** @} echo_lock */
@@ -367,15 +338,14 @@ static struct cl_lock_operations echo_lock_ops = {
* @{
*/
static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct echo_page *ep = cl_object_page_slice(obj, page);
struct echo_object *eco = cl2echo_obj(obj);
- ep->ep_vmpage = vmpage;
- get_page(vmpage);
+ get_page(page->cp_vmpage);
mutex_init(&ep->ep_lock);
- cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
+ cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
atomic_inc(&eco->eo_npages);
return 0;
}
@@ -568,6 +538,8 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
obj = &echo_obj2cl(eco)->co_lu;
cl_object_header_init(hdr);
+ hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
+
lu_object_init(obj, &hdr->coh_lu, dev);
lu_object_add_top(&hdr->coh_lu, obj);
@@ -694,8 +666,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
struct obd_device *obd = NULL; /* to keep compiler happy */
struct obd_device *tgt;
const char *tgt_type_name;
- int rc;
- int cleanup = 0;
+ int rc, err;
ed = kzalloc(sizeof(*ed), GFP_NOFS);
if (!ed) {
@@ -703,16 +674,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
goto out;
}
- cleanup = 1;
cd = &ed->ed_cl;
rc = cl_device_init(cd, t);
if (rc)
- goto out;
+ goto out_free;
cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
cd->cd_ops = &echo_device_cl_ops;
- cleanup = 2;
obd = class_name2obd(lustre_cfg_string(cfg, 0));
LASSERT(obd);
LASSERT(env);
@@ -722,28 +691,25 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
CERROR("Can not find tgt device %s\n",
lustre_cfg_string(cfg, 1));
rc = -ENODEV;
- goto out;
+ goto out_device_fini;
}
next = tgt->obd_lu_dev;
if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
CERROR("echo MDT client must be run on server\n");
rc = -EOPNOTSUPP;
- goto out;
+ goto out_device_fini;
}
rc = echo_site_init(env, ed);
if (rc)
- goto out;
-
- cleanup = 3;
+ goto out_device_fini;
rc = echo_client_setup(env, obd, cfg);
if (rc)
- goto out;
+ goto out_site_fini;
ed->ed_ec = &obd->u.echo_client;
- cleanup = 4;
/* if echo client is to be stacked upon ost device, the next is
* NULL since ost is not a clio device so far
@@ -755,7 +721,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
if (next) {
if (next->ld_site) {
rc = -EBUSY;
- goto out;
+ goto out_cleanup;
}
next->ld_site = &ed->ed_site->cs_lu;
@@ -763,7 +729,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
next->ld_type->ldt_name,
NULL);
if (rc)
- goto out;
+ goto out_cleanup;
} else {
LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
@@ -771,27 +737,19 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
ed->ed_next = next;
return &cd->cd_lu_dev;
-out:
- switch (cleanup) {
- case 4: {
- int rc2;
-
- rc2 = echo_client_cleanup(obd);
- if (rc2)
- CERROR("Cleanup obd device %s error(%d)\n",
- obd->obd_name, rc2);
- }
- case 3:
- echo_site_fini(env, ed);
- case 2:
- cl_device_fini(&ed->ed_cl);
- case 1:
- kfree(ed);
- case 0:
- default:
- break;
- }
+out_cleanup:
+ err = echo_client_cleanup(obd);
+ if (err)
+ CERROR("Cleanup obd device %s error(%d)\n",
+ obd->obd_name, err);
+out_site_fini:
+ echo_site_fini(env, ed);
+out_device_fini:
+ cl_device_fini(&ed->ed_cl);
+out_free:
+ kfree(ed);
+out:
return ERR_PTR(rc);
}
@@ -819,16 +777,7 @@ static void echo_lock_release(const struct lu_env *env,
{
struct cl_lock *clk = echo_lock2cl(ecl);
- cl_lock_get(clk);
- cl_unuse(env, clk);
- cl_lock_release(env, clk, "ec enqueue", ecl->el_object);
- if (!still_used) {
- cl_lock_mutex_get(env, clk);
- cl_lock_cancel(env, clk);
- cl_lock_delete(env, clk);
- cl_lock_mutex_put(env, clk);
- }
- cl_lock_put(env, clk);
+ cl_lock_release(env, clk);
}
static struct lu_device *echo_device_free(const struct lu_env *env,
@@ -1022,9 +971,11 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
info = echo_env_info(env);
io = &info->eti_io;
- descr = &info->eti_descr;
+ lck = &info->eti_lock;
obj = echo_obj2cl(eco);
+ memset(lck, 0, sizeof(*lck));
+ descr = &lck->cll_descr;
descr->cld_obj = obj;
descr->cld_start = cl_index(obj, start);
descr->cld_end = cl_index(obj, end);
@@ -1032,25 +983,20 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
descr->cld_enq_flags = enqflags;
io->ci_obj = obj;
- lck = cl_lock_request(env, io, descr, "ec enqueue", eco);
- if (lck) {
+ rc = cl_lock_request(env, io, lck);
+ if (rc == 0) {
struct echo_client_obd *ec = eco->eo_dev->ed_ec;
struct echo_lock *el;
- rc = cl_wait(env, lck);
- if (rc == 0) {
- el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- spin_lock(&ec->ec_lock);
- if (list_empty(&el->el_chain)) {
- list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- spin_unlock(&ec->ec_lock);
- } else {
- cl_lock_release(env, lck, "ec enqueue", current);
+ el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
+ spin_lock(&ec->ec_lock);
+ if (list_empty(&el->el_chain)) {
+ list_add(&el->el_chain, &ec->ec_locks);
+ el->el_cookie = ++ec->ec_unique;
}
+ atomic_inc(&el->el_refcount);
+ *cookie = el->el_cookie;
+ spin_unlock(&ec->ec_lock);
}
return rc;
}
@@ -1085,22 +1031,17 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
return 0;
}
-static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type unused, struct cl_2queue *queue)
+static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
{
- struct cl_page *clp;
- struct cl_page *temp;
- int result = 0;
+ struct echo_thread_info *info;
+ struct cl_2queue *queue;
- cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) {
- int rc;
+ info = echo_env_info(env);
+ LASSERT(io == &info->eti_io);
- rc = cl_page_cache_add(env, io, clp, CRT_WRITE);
- if (rc == 0)
- continue;
- result = result ?: rc;
- }
- return result;
+ queue = &info->eti_queue;
+ cl_page_list_add(&queue->c2_qout, page);
}
static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
@@ -1119,7 +1060,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
int rc;
int i;
- LASSERT((offset & ~CFS_PAGE_MASK) == 0);
+ LASSERT((offset & ~PAGE_MASK) == 0);
LASSERT(ed->ed_next);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@@ -1179,7 +1120,9 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
async = async && (typ == CRT_WRITE);
if (async)
- rc = cl_echo_async_brw(env, io, typ, queue);
+ rc = cl_io_commit_async(env, io, &queue->c2_qin,
+ 0, PAGE_SIZE,
+ echo_commit_callback);
else
rc = cl_io_submit_sync(env, io, typ, queue, 0);
CDEBUG(D_INFO, "echo_client %s write returns %d\n",
@@ -1387,7 +1330,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
if (count <= 0 ||
- (count & (~CFS_PAGE_MASK)) != 0)
+ (count & (~PAGE_MASK)) != 0)
return -EINVAL;
/* XXX think again with misaligned I/O */
@@ -1409,7 +1352,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
for (i = 0, pgp = pga, off = offset;
i < npages;
i++, pgp++, off += PAGE_SIZE) {
-
LASSERT(!pgp->pg); /* for cleanup */
rc = -ENOMEM;
@@ -1470,7 +1412,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
u64 npages, tot_pages;
int i, ret = 0, brw_flags = 0;
- if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
+ if (count <= 0 || (count & (~PAGE_MASK)) != 0)
return -EINVAL;
npages = batch >> PAGE_SHIFT;
@@ -1886,7 +1828,6 @@ static int __init obdecho_init(void)
static void /*__exit*/ obdecho_exit(void)
{
echo_client_exit();
-
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index a3358c39b..33a113213 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -121,9 +121,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
atomic_add(added, &osc_pool_req_count);
}
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_rpcs_in_flight = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
@@ -139,9 +139,9 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj,
long val;
int mult;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
val = cli->cl_dirty_max;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
mult = 1 << 20;
return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult);
@@ -169,10 +169,10 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
@@ -222,8 +222,16 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
return -ERANGE;
rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
- if (rc > 0)
- (void)osc_lru_shrink(cli, rc);
+ if (rc > 0) {
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ (void)osc_lru_shrink(env, cli, rc, true);
+ cl_env_put(env, &refcheck);
+ }
+ }
return count;
}
@@ -239,9 +247,9 @@ static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_dirty);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -256,9 +264,9 @@ static ssize_t cur_grant_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_avail_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -279,12 +287,12 @@ static ssize_t cur_grant_bytes_store(struct kobject *kobj,
return rc;
/* this is only for shrinking grant */
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (val >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return -EINVAL;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (cli->cl_import->imp_state == LUSTRE_IMP_FULL)
rc = osc_shrink_grant_to_target(cli, val);
@@ -303,9 +311,9 @@ static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_lost_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -577,14 +585,31 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
return -ERANGE;
}
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_pages_per_rpc = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
LUSTRE_RW_ATTR(max_pages_per_rpc);
+static ssize_t unstable_stats_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ struct client_obd *cli = &dev->u.cli;
+ int pages, mb;
+
+ pages = atomic_read(&cli->cl_unstable_count);
+ mb = (pages * PAGE_SIZE) >> 20;
+
+ return sprintf(buf, "unstable_pages: %8d\n"
+ "unstable_mb: %8d\n", pages, mb);
+}
+LUSTRE_RO_ATTR(unstable_stats);
+
LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid);
@@ -623,7 +648,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
ktime_get_real_ts64(&now);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n",
(s64)now.tv_sec, (unsigned long)now.tv_nsec);
@@ -707,7 +732,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
break;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
@@ -794,6 +819,7 @@ static struct attribute *osc_attrs[] = {
&lustre_attr_max_pages_per_rpc.attr,
&lustre_attr_max_rpcs_in_flight.attr,
&lustre_attr_resend_count.attr,
+ &lustre_attr_unstable_stats.attr,
NULL,
};
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 5f25bf83d..5a14bea96 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -76,6 +76,8 @@ static inline char *ext_flags(struct osc_extent *ext, char *flags)
*buf++ = ext->oe_rw ? 'r' : 'w';
if (ext->oe_intree)
*buf++ = 'i';
+ if (ext->oe_sync)
+ *buf++ = 'S';
if (ext->oe_srvlock)
*buf++ = 's';
if (ext->oe_hp)
@@ -121,9 +123,13 @@ static const char *oes_strings[] = {
__ext->oe_grants, __ext->oe_nr_pages, \
list_empty_marker(&__ext->oe_pages), \
waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
- __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
+ __ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner, \
/* ----- part 4 ----- */ \
## __VA_ARGS__); \
+ if (lvl == D_ERROR && __ext->oe_dlmlock) \
+ LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ else \
+ LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext); \
} while (0)
#undef EASSERTF
@@ -240,20 +246,25 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
goto out;
}
- if (!ext->oe_osclock && ext->oe_grants > 0) {
+ if (ext->oe_sync && ext->oe_grants > 0) {
rc = 90;
goto out;
}
- if (ext->oe_osclock) {
- struct cl_lock_descr *descr;
+ if (ext->oe_dlmlock) {
+ struct ldlm_extent *extent;
- descr = &ext->oe_osclock->cll_descr;
- if (!(descr->cld_start <= ext->oe_start &&
- descr->cld_end >= ext->oe_max_end)) {
+ extent = &ext->oe_dlmlock->l_policy_data.l_extent;
+ if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) &&
+ extent->end >= cl_offset(osc2cl(obj), ext->oe_max_end))) {
rc = 100;
goto out;
}
+
+ if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))) {
+ rc = 102;
+ goto out;
+ }
}
if (ext->oe_nr_pages > ext->oe_mppr) {
@@ -276,7 +287,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
page_count = 0;
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- pgoff_t index = oap2cl_page(oap)->cp_index;
+ pgoff_t index = osc_index(oap2osc(oap));
++page_count;
if (index > ext->oe_end || index < ext->oe_start) {
rc = 110;
@@ -359,7 +370,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
ext->oe_state = OES_INV;
INIT_LIST_HEAD(&ext->oe_pages);
init_waitqueue_head(&ext->oe_waitq);
- ext->oe_osclock = NULL;
+ ext->oe_dlmlock = NULL;
return ext;
}
@@ -385,9 +396,11 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
LASSERT(ext->oe_state == OES_INV);
LASSERT(!ext->oe_intree);
- if (ext->oe_osclock) {
- cl_lock_put(env, ext->oe_osclock);
- ext->oe_osclock = NULL;
+ if (ext->oe_dlmlock) {
+ lu_ref_add(&ext->oe_dlmlock->l_reference,
+ "osc_extent", ext);
+ LDLM_LOCK_PUT(ext->oe_dlmlock);
+ ext->oe_dlmlock = NULL;
}
osc_extent_free(ext);
}
@@ -543,7 +556,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
if (cur->oe_max_end != victim->oe_max_end)
return -ERANGE;
- LASSERT(cur->oe_osclock == victim->oe_osclock);
+ LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
@@ -624,10 +637,10 @@ static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
static struct osc_extent *osc_extent_find(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
int *grants)
-
{
struct client_obd *cli = osc_cli(obj);
- struct cl_lock *lock;
+ struct osc_lock *olck;
+ struct cl_lock_descr *descr;
struct osc_extent *cur;
struct osc_extent *ext;
struct osc_extent *conflict = NULL;
@@ -644,8 +657,12 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
if (!cur)
return ERR_PTR(-ENOMEM);
- lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
+ olck = osc_env_io(env)->oi_write_osclock;
+ LASSERTF(olck, "page %lu is not covered by lock\n", index);
+ LASSERT(olck->ols_state == OLS_GRANTED);
+
+ descr = &olck->ols_cl.cls_lock->cll_descr;
+ LASSERT(descr->cld_mode >= CLM_WRITE);
LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
@@ -657,19 +674,23 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
max_pages = cli->cl_max_pages_per_rpc;
LASSERT((max_pages & ~chunk_mask) == 0);
max_end = index - (index % max_pages) + max_pages - 1;
- max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end);
+ max_end = min_t(pgoff_t, max_end, descr->cld_end);
/* initialize new extent by parameters so far */
cur->oe_max_end = max_end;
cur->oe_start = index & chunk_mask;
cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
- if (cur->oe_start < lock->cll_descr.cld_start)
- cur->oe_start = lock->cll_descr.cld_start;
+ if (cur->oe_start < descr->cld_start)
+ cur->oe_start = descr->cld_start;
if (cur->oe_end > max_end)
cur->oe_end = max_end;
- cur->oe_osclock = lock;
cur->oe_grants = 0;
cur->oe_mppr = max_pages;
+ if (olck->ols_dlmlock) {
+ LASSERT(olck->ols_hold);
+ cur->oe_dlmlock = LDLM_LOCK_GET(olck->ols_dlmlock);
+ lu_ref_add(&olck->ols_dlmlock->l_reference, "osc_extent", cur);
+ }
/* grants has been allocated by caller */
LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
@@ -691,7 +712,7 @@ restart:
break;
/* if covering by different locks, no chance to match */
- if (lock != ext->oe_osclock) {
+ if (olck->ols_dlmlock != ext->oe_dlmlock) {
EASSERTF(!overlapped(ext, cur), ext,
EXTSTR"\n", EXTPARA(cur));
@@ -795,7 +816,7 @@ restart:
if (found) {
LASSERT(!conflict);
if (!IS_ERR(found)) {
- LASSERT(found->oe_osclock == cur->oe_osclock);
+ LASSERT(found->oe_dlmlock == cur->oe_dlmlock);
OSC_EXTENT_DUMP(D_CACHE, found,
"found caching ext for %lu.\n", index);
}
@@ -810,7 +831,7 @@ restart:
found = osc_extent_hold(cur);
osc_extent_insert(obj, cur);
OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
- index, lock->cll_descr.cld_end);
+ index, descr->cld_end);
}
osc_object_unlock(obj);
@@ -856,6 +877,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
ext->oe_rc = rc ?: ext->oe_nr_pages;
EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
+
+ osc_lru_add_batch(cli, &ext->oe_pages);
list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
list_del_init(&oap->oap_rpc_item);
list_del_init(&oap->oap_pending_item);
@@ -877,10 +900,9 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check.
*/
- int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
- int count = oap->oap_count + (offset & (blocksize - 1));
- int end = (offset + oap->oap_count) & (blocksize - 1);
-
+ int offset = last_off & ~PAGE_MASK;
+ int count = last_count + (offset & (blocksize - 1));
+ int end = (offset + last_count) & (blocksize - 1);
if (end)
count += blocksize - end;
@@ -943,7 +965,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
"%s: wait ext to %d timedout, recovery in progress?\n",
osc_export(obj)->exp_obd->obd_name, state);
- lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
&lwi);
}
@@ -990,19 +1012,19 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
/* discard all pages with index greater then trunc_index */
list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
- struct cl_page *sub = oap2cl_page(oap);
- struct cl_page *page = cl_page_top(sub);
+ pgoff_t index = osc_index(oap2osc(oap));
+ struct cl_page *page = oap2cl_page(oap);
LASSERT(list_empty(&oap->oap_rpc_item));
/* only discard the pages with their index greater than
* trunc_index, and ...
*/
- if (sub->cp_index < trunc_index ||
- (sub->cp_index == trunc_index && partial)) {
+ if (index < trunc_index ||
+ (index == trunc_index && partial)) {
/* accounting how many pages remaining in the chunk
* so that we can calculate grants correctly. */
- if (sub->cp_index >> ppc_bits == trunc_chunk)
+ if (index >> ppc_bits == trunc_chunk)
++pages_in_chunk;
continue;
}
@@ -1013,7 +1035,6 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
lu_ref_add(&page->cp_reference, "truncate", current);
if (cl_page_own(env, io, page) == 0) {
- cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
} else {
@@ -1126,7 +1147,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last->oap_count > 0);
LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
+ spin_lock(&last->oap_lock);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&last->oap_lock);
}
/* for the rest of pages, we don't need to call osf_refresh_count()
@@ -1135,7 +1158,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
oap->oap_count = PAGE_SIZE - oap->oap_page_off;
+ spin_lock(&last->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&last->oap_lock);
}
}
@@ -1256,7 +1281,7 @@ static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
int cmd)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = cl_page_top(oap2cl_page(oap));
+ struct cl_page *page = oap2cl_page(oap);
int result;
LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
@@ -1271,7 +1296,7 @@ static int osc_refresh_count(const struct lu_env *env,
struct osc_async_page *oap, int cmd)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = oap2cl_page(oap);
+ pgoff_t index = osc_index(oap2osc(oap));
struct cl_object *obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
@@ -1288,10 +1313,10 @@ static int osc_refresh_count(const struct lu_env *env,
if (result < 0)
return result;
kms = attr->cat_kms;
- if (cl_offset(obj, page->cp_index) >= kms)
+ if (cl_offset(obj, index) >= kms)
/* catch race with truncate */
return 0;
- else if (cl_offset(obj, page->cp_index + 1) > kms)
+ else if (cl_offset(obj, index + 1) > kms)
/* catch sub-page write at end of file */
return kms % PAGE_SIZE;
else
@@ -1302,14 +1327,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
int cmd, int rc)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = cl_page_top(oap2cl_page(oap));
+ struct cl_page *page = oap2cl_page(oap);
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
enum cl_req_type crt;
int srvlock;
cmd &= ~OBD_BRW_NOQUOTA;
- LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
- LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
+ LASSERTF(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ),
+ "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
+ LASSERTF(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE),
+ "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
LASSERT(opg->ops_transfer_pinned);
/*
@@ -1358,22 +1385,28 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
return 0;
}
-#define OSC_DUMP_GRANT(cli, fmt, args...) do { \
+#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \
- "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \
+ CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
+ "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
+ "reserved: %ld, flight: %d } lru {in list: %d, " \
+ "left: %d, waiters: %d }" fmt, \
__tmp->cl_import->imp_obd->obd_name, \
__tmp->cl_dirty, __tmp->cl_dirty_max, \
atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
+ atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
- __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \
+ __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
+ atomic_read(&__tmp->cl_lru_in_list), \
+ atomic_read(&__tmp->cl_lru_busy), \
+ atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
} while (0)
/* caller must hold loi_list_lock */
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
+ assert_spin_locked(&cli->cl_loi_list_lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_SIZE;
@@ -1389,7 +1422,7 @@ static void osc_consume_write_grant(struct client_obd *cli,
static void osc_release_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
+ assert_spin_locked(&cli->cl_loi_list_lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
return;
}
@@ -1408,7 +1441,7 @@ static void osc_release_write_grant(struct client_obd *cli,
* To avoid sleeping with object lock held, it's good for us allocate enough
* grants before entering into critical section.
*
- * client_obd_list_lock held by caller
+ * spin_lock held by caller
*/
static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
{
@@ -1442,11 +1475,11 @@ static void __osc_unreserve_grant(struct client_obd *cli,
static void osc_unreserve_grant(struct client_obd *cli,
unsigned int reserved, unsigned int unused)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
__osc_unreserve_grant(cli, reserved, unused);
if (unused > 0)
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
/**
@@ -1467,7 +1500,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
{
int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
atomic_sub(nr_pages, &obd_dirty_pages);
cli->cl_dirty -= nr_pages << PAGE_SHIFT;
cli->cl_lost_grant += lost_grant;
@@ -1479,7 +1512,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
cli->cl_avail_grant += grant;
}
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
lost_grant, cli->cl_lost_grant,
cli->cl_avail_grant, cli->cl_dirty);
@@ -1491,9 +1524,9 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
*/
static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_release_write_grant(cli, &oap->oap_brw_page);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
/**
@@ -1506,14 +1539,15 @@ static int osc_enter_cache_try(struct client_obd *cli,
{
int rc;
- OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
rc = osc_reserve_grant(cli, bytes);
if (rc < 0)
return 0;
if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
- atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
+ atomic_read(&obd_unstable_pages) + 1 +
+ atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
cli->cl_dirty_transit += PAGE_SIZE;
@@ -1532,9 +1566,9 @@ static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&ocw->ocw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
@@ -1551,12 +1585,13 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc = oap->oap_obj;
struct lov_oinfo *loi = osc->oo_oinfo;
struct osc_cache_waiter ocw;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
+ LWI_ON_SIGNAL_NOOP, NULL);
int rc = -EDQUOT;
- OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* force the caller to try sync io. this can jump the list
* of queued writes and create a discontiguous rpc stream
@@ -1587,7 +1622,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
ocw.ocw_rc = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug_async(env, cli, NULL);
@@ -1596,10 +1631,17 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
- /* l_wait_event is interrupted by signal */
+ /* l_wait_event is interrupted by signal, or timed out */
if (rc < 0) {
+ if (rc == -ETIMEDOUT) {
+ OSC_DUMP_GRANT(D_ERROR, cli,
+ "try to reserve %d.\n", bytes);
+ osc_extent_tree_dump(D_ERROR, osc);
+ rc = -EDQUOT;
+ }
+
list_del_init(&ocw.ocw_entry);
goto out;
}
@@ -1615,8 +1657,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
}
}
out:
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- OSC_DUMP_GRANT(cli, "returned %d.\n", rc);
+ spin_unlock(&cli->cl_loi_list_lock);
+ OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc);
return rc;
}
@@ -1633,8 +1675,8 @@ void osc_wake_cache_waiters(struct client_obd *cli)
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
- (atomic_read(&obd_dirty_pages) + 1 >
- obd_max_dirty_pages)) {
+ (atomic_read(&obd_unstable_pages) + 1 +
+ atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
cli->cl_dirty,
cli->cl_dirty_max, obd_max_dirty_pages);
@@ -1776,9 +1818,9 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
{
int is_ready;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
is_ready = __osc_list_maint(cli, osc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return is_ready;
}
@@ -1799,13 +1841,101 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
ar->ar_force_sync = 1;
ar->ar_min_xid = ptlrpc_sample_next_xid();
return;
-
}
if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
ar->ar_force_sync = 0;
}
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ int page_count = desc->bd_iov_count;
+ int i;
+
+ /* No unstable page tracking */
+ if (!cli->cl_cache)
+ return;
+
+ LASSERT(page_count >= 0);
+
+ for (i = 0; i < page_count; i++)
+ dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
+
+ atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+
+ atomic_sub(page_count, &cli->cl_unstable_count);
+ LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+
+ atomic_sub(page_count, &obd_unstable_pages);
+ LASSERT(atomic_read(&obd_unstable_pages) >= 0);
+
+ spin_lock(&req->rq_lock);
+ req->rq_committed = 1;
+ req->rq_unstable = 0;
+ spin_unlock(&req->rq_lock);
+
+ wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+}
+
+/* "unstable" page accounting. See: osc_dec_unstable_pages. */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ long page_count = desc->bd_iov_count;
+ int i;
+
+ /* No unstable page tracking */
+ if (!cli->cl_cache)
+ return;
+
+ LASSERT(page_count >= 0);
+
+ for (i = 0; i < page_count; i++)
+ inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
+
+ LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+ atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+ LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+ atomic_add(page_count, &cli->cl_unstable_count);
+
+ LASSERT(atomic_read(&obd_unstable_pages) >= 0);
+ atomic_add(page_count, &obd_unstable_pages);
+
+ spin_lock(&req->rq_lock);
+
+ /*
+ * If the request has already been committed (i.e. brw_commit
+ * called via rq_commit_cb), we need to undo the unstable page
+ * increments we just performed because rq_commit_cb wont be
+ * called again. Otherwise, just set the commit callback so the
+ * unstable page accounting is properly updated when the request
+ * is committed
+ */
+ if (req->rq_committed) {
+ /* Drop lock before calling osc_dec_unstable_pages */
+ spin_unlock(&req->rq_lock);
+ osc_dec_unstable_pages(req);
+ spin_lock(&req->rq_lock);
+ } else {
+ req->rq_unstable = 1;
+ req->rq_commit_cb = osc_dec_unstable_pages;
+ }
+
+ spin_unlock(&req->rq_lock);
+}
+
/* this must be called holding the loi list lock to give coverage to exit_cache,
* async_flag maintenance, and oap_request
*/
@@ -1817,6 +1947,9 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
__u64 xid = 0;
if (oap->oap_request) {
+ if (!rc)
+ osc_inc_unstable_pages(oap->oap_request);
+
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
oap->oap_request = NULL;
@@ -1829,10 +1962,10 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_process_ar(&cli->cl_ar, xid, rc);
osc_process_ar(&loi->loi_ar, xid, rc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
rc = osc_completion(env, oap, oap->oap_cmd, rc);
@@ -2133,9 +2266,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
}
cl_object_get(obj);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- lu_object_ref_add_at(&obj->co_lu, &link, "check",
- current);
+ spin_unlock(&cli->cl_loi_list_lock);
+ lu_object_ref_add_at(&obj->co_lu, &link, "check", current);
/* attempt some read/write balancing by alternating between
* reads and writes in an object. The makes_rpc checks here
@@ -2178,11 +2310,10 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
osc_object_unlock(osc);
osc_list_maint(cli, osc);
- lu_object_ref_del_at(&obj->co_lu, &link, "check",
- current);
+ lu_object_ref_del_at(&obj->co_lu, &link, "check", current);
cl_object_put(env, obj);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
}
}
@@ -2199,9 +2330,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
* potential stack overrun problem. LU-2859
*/
atomic_inc(&cli->cl_lru_shrinkers);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
@@ -2238,7 +2369,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
oap->oap_page = page;
oap->oap_obj_off = offset;
- LASSERT(!(offset & ~CFS_PAGE_MASK));
+ LASSERT(!(offset & ~PAGE_MASK));
if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
@@ -2306,16 +2437,23 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
return rc;
}
+ if (osc_over_unstable_soft_limit(cli))
+ brw_flags |= OBD_BRW_SOFT_SYNC;
+
oap->oap_cmd = cmd;
oap->oap_page_off = ops->ops_from;
oap->oap_count = ops->ops_to - ops->ops_from;
+ /*
+ * No need to hold a lock here,
+ * since this page is not in any list yet.
+ */
oap->oap_async_flags = 0;
oap->oap_brw_flags = brw_flags;
OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
- index = oap2cl_page(oap)->cp_index;
+ index = osc_index(oap2osc(oap));
/* Add this page into extent by the following steps:
* 1. if there exists an active extent for this IO, mostly this page
@@ -2334,9 +2472,9 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
grants = 0;
/* it doesn't need any grant to dirty this page */
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = osc_enter_cache_try(cli, oap, grants, 0);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (rc == 0) { /* try failed */
grants = 0;
need_release = 1;
@@ -2427,21 +2565,21 @@ int osc_teardown_async_page(const struct lu_env *env,
LASSERT(oap->oap_magic == OAP_MAGIC);
CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
- oap, ops, oap2cl_page(oap)->cp_index);
+ oap, ops, osc_index(oap2osc(oap)));
osc_object_lock(obj);
if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
} else if (!list_empty(&oap->oap_pending_item)) {
- ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
+ ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details.
*/
if (ext && ext->oe_state != OES_TRUNC) {
OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
- oap2cl_page(oap)->cp_index);
+ osc_index(oap2osc(oap)));
rc = -EBUSY;
}
}
@@ -2464,7 +2602,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
struct osc_extent *ext = NULL;
struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
struct cl_page *cp = ops->ops_cl.cpl_page;
- pgoff_t index = cp->cp_index;
+ pgoff_t index = osc_index(ops);
struct osc_async_page *oap = &ops->ops_oap;
bool unplug = false;
int rc = 0;
@@ -2479,8 +2617,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
switch (ext->oe_state) {
case OES_RPC:
case OES_LOCK_DONE:
- CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp),
- "flush an in-rpc page?\n");
+ CL_PAGE_DEBUG(D_ERROR, env, cp, "flush an in-rpc page?\n");
LASSERT(0);
break;
case OES_LOCKING:
@@ -2506,7 +2643,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
break;
}
- rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
+ rc = cl_page_prep(env, io, cp, CRT_WRITE);
if (rc)
goto out;
@@ -2550,7 +2687,7 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
struct osc_extent *ext;
struct osc_extent *found = NULL;
struct list_head *plist;
- pgoff_t index = oap2cl_page(oap)->cp_index;
+ pgoff_t index = osc_index(ops);
int rc = -EBUSY;
int cmd;
@@ -2613,12 +2750,12 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
pgoff_t end = 0;
list_for_each_entry(oap, list, oap_pending_item) {
- struct cl_page *cp = oap2cl_page(oap);
+ pgoff_t index = osc_index(oap2osc(oap));
- if (cp->cp_index > end)
- end = cp->cp_index;
- if (cp->cp_index < start)
- start = cp->cp_index;
+ if (index > end)
+ end = index;
+ if (index < start)
+ start = index;
++page_count;
mppr <<= (page_count > mppr);
}
@@ -2633,6 +2770,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
}
ext->oe_rw = !!(cmd & OBD_BRW_READ);
+ ext->oe_sync = 1;
ext->oe_urgent = 1;
ext->oe_start = start;
ext->oe_end = ext->oe_max_end = end;
@@ -2988,7 +3126,200 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
result = rc;
}
- OSC_IO_DEBUG(obj, "cache page out.\n");
+ OSC_IO_DEBUG(obj, "pageout [%lu, %lu], %d.\n", start, end, result);
+ return result;
+}
+
+/**
+ * Returns a list of pages by a given [start, end] of \a obj.
+ *
+ * \param resched If not NULL, then we give up before hogging CPU for too
+ * long and set *resched = 1, in that case caller should implement a retry
+ * logic.
+ *
+ * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
+ * crucial in the face of [offset, EOF] locks.
+ *
+ * Return at least one page in @queue unless there is no covered page.
+ */
+int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
+ struct osc_object *osc, pgoff_t start, pgoff_t end,
+ osc_page_gang_cbt cb, void *cbdata)
+{
+ struct osc_page *ops;
+ void **pvec;
+ pgoff_t idx;
+ unsigned int nr;
+ unsigned int i;
+ unsigned int j;
+ int res = CLP_GANG_OKAY;
+ bool tree_lock = true;
+
+ idx = start;
+ pvec = osc_env_info(env)->oti_pvec;
+ spin_lock(&osc->oo_tree_lock);
+ while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
+ idx, OTI_PVEC_SIZE)) > 0) {
+ struct cl_page *page;
+ bool end_of_region = false;
+
+ for (i = 0, j = 0; i < nr; ++i) {
+ ops = pvec[i];
+ pvec[i] = NULL;
+
+ idx = osc_index(ops);
+ if (idx > end) {
+ end_of_region = true;
+ break;
+ }
+
+ page = ops->ops_cl.cpl_page;
+ LASSERT(page->cp_type == CPT_CACHEABLE);
+ if (page->cp_state == CPS_FREEING)
+ continue;
+
+ cl_page_get(page);
+ lu_ref_add_atomic(&page->cp_reference,
+ "gang_lookup", current);
+ pvec[j++] = ops;
+ }
+ ++idx;
+
+ /*
+ * Here a delicate locking dance is performed. Current thread
+ * holds a reference to a page, but has to own it before it
+ * can be placed into queue. Owning implies waiting, so
+ * radix-tree lock is to be released. After a wait one has to
+ * check that pages weren't truncated (cl_page_own() returns
+ * error in the latter case).
+ */
+ spin_unlock(&osc->oo_tree_lock);
+ tree_lock = false;
+
+ for (i = 0; i < j; ++i) {
+ ops = pvec[i];
+ if (res == CLP_GANG_OKAY)
+ res = (*cb)(env, io, ops, cbdata);
+
+ page = ops->ops_cl.cpl_page;
+ lu_ref_del(&page->cp_reference, "gang_lookup", current);
+ cl_page_put(env, page);
+ }
+ if (nr < OTI_PVEC_SIZE || end_of_region)
+ break;
+
+ if (res == CLP_GANG_OKAY && need_resched())
+ res = CLP_GANG_RESCHED;
+ if (res != CLP_GANG_OKAY)
+ break;
+
+ spin_lock(&osc->oo_tree_lock);
+ tree_lock = true;
+ }
+ if (tree_lock)
+ spin_unlock(&osc->oo_tree_lock);
+ return res;
+}
+
+/**
+ * Check if page @page is covered by an extra lock or discard it.
+ */
+static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct osc_object *osc = cbdata;
+ pgoff_t index;
+
+ index = osc_index(ops);
+ if (index >= info->oti_fn_index) {
+ struct ldlm_lock *tmp;
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ /* refresh non-overlapped index */
+ tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
+ if (tmp) {
+ __u64 end = tmp->l_policy_data.l_extent.end;
+ /* Cache the first-non-overlapped index so as to skip
+ * all pages within [index, oti_fn_index). This is safe
+ * because if tmp lock is canceled, it will discard
+ * these pages.
+ */
+ info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
+ if (end == OBD_OBJECT_EOF)
+ info->oti_fn_index = CL_PAGE_EOF;
+ LDLM_LOCK_PUT(tmp);
+ } else if (cl_page_own(env, io, page) == 0) {
+ /* discard the page */
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+ }
+
+ info->oti_next_index = index + 1;
+ return CLP_GANG_OKAY;
+}
+
+static int discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ /* page is top page. */
+ info->oti_next_index = osc_index(ops) + 1;
+ if (cl_page_own(env, io, page) == 0) {
+ KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
+ !PageDirty(cl_page_vmpage(page))));
+
+ /* discard the page */
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+
+ return CLP_GANG_OKAY;
+}
+
+/**
+ * Discard pages protected by the given lock. This function traverses radix
+ * tree to find all covering pages and discard them. If a page is being covered
+ * by other locks, it should remain in cache.
+ *
+ * If error happens on any step, the process continues anyway (the reasoning
+ * behind this being that lock cancellation cannot be delayed indefinitely).
+ */
+int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
+ pgoff_t start, pgoff_t end, enum cl_lock_mode mode)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct cl_io *io = &info->oti_io;
+ osc_page_gang_cbt cb;
+ int res;
+ int result;
+
+ io->ci_obj = cl_object_top(osc2cl(osc));
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result != 0)
+ goto out;
+
+ cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
+ info->oti_fn_index = info->oti_next_index = start;
+ do {
+ res = osc_page_gang_lookup(env, io, osc,
+ info->oti_next_index, end, cb, osc);
+ if (info->oti_next_index > end)
+ break;
+
+ if (res == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (res != CLP_GANG_OKAY);
+out:
+ cl_io_fini(env, io);
return result;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index d55d04d04..ae19d396b 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -51,7 +51,6 @@
#include "../include/obd.h"
/* osc_build_res_name() */
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "osc_internal.h"
/** \defgroup osc osc
@@ -68,6 +67,9 @@ struct osc_io {
struct cl_io_slice oi_cl;
/** true if this io is lockless. */
int oi_lockless;
+ /** how many LRU pages are reserved for this IO */
+ int oi_lru_reserved;
+
/** active extents, we know how many bytes is going to be written,
* so having an active extent will prevent it from being fragmented
*/
@@ -77,6 +79,8 @@ struct osc_io {
*/
struct osc_extent *oi_trunc;
+ /** write osc_lock for this IO, used by osc_extent_find(). */
+ struct osc_lock *oi_write_osclock;
struct obd_info oi_info;
struct obdo oi_oa;
struct osc_async_cbargs {
@@ -100,7 +104,7 @@ struct osc_session {
struct osc_io os_io;
};
-#define OTI_PVEC_SIZE 64
+#define OTI_PVEC_SIZE 256
struct osc_thread_info {
struct ldlm_res_id oti_resname;
ldlm_policy_data_t oti_policy;
@@ -109,7 +113,13 @@ struct osc_thread_info {
struct lustre_handle oti_handle;
struct cl_page_list oti_plist;
struct cl_io oti_io;
- struct cl_page *oti_pvec[OTI_PVEC_SIZE];
+ void *oti_pvec[OTI_PVEC_SIZE];
+ /**
+ * Fields used by cl_lock_discard_pages().
+ */
+ pgoff_t oti_next_index;
+ pgoff_t oti_fn_index; /* first non-overlapped index */
+ struct cl_sync_io oti_anchor;
};
struct osc_object {
@@ -125,7 +135,7 @@ struct osc_object {
*/
struct list_head oo_inflight[CRT_NR];
/**
- * Lock, protecting ccc_object::cob_inflight, because a seat-belt is
+ * Lock, protecting osc_page::ops_inflight, because a seat-belt is
* locked during take-off and landing.
*/
spinlock_t oo_seatbelt;
@@ -159,6 +169,17 @@ struct osc_object {
* oo_{read|write}_pages soon.
*/
spinlock_t oo_lock;
+
+ /**
+ * Radix tree for caching pages
+ */
+ struct radix_tree_root oo_tree;
+ spinlock_t oo_tree_lock;
+ unsigned long oo_npages;
+
+ /* Protect osc_lock this osc_object has */
+ spinlock_t oo_ol_spin;
+ struct list_head oo_ol_list;
};
static inline void osc_object_lock(struct osc_object *obj)
@@ -198,8 +219,6 @@ enum osc_lock_state {
OLS_ENQUEUED,
OLS_UPCALL_RECEIVED,
OLS_GRANTED,
- OLS_RELEASED,
- OLS_BLOCKED,
OLS_CANCELLED
};
@@ -208,10 +227,8 @@ enum osc_lock_state {
*
* Interaction with DLM.
*
- * CLIO enqueues all DLM locks through ptlrpcd (that is, in "async" mode).
- *
* Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
- * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_lock.
+ * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
*
* This pointer is protected through a reference, acquired by
* osc_lock_upcall0(). Also, an additional reference is acquired by
@@ -249,26 +266,27 @@ enum osc_lock_state {
*/
struct osc_lock {
struct cl_lock_slice ols_cl;
+ /** Internal lock to protect states, etc. */
+ spinlock_t ols_lock;
+ /** Owner sleeps on this channel for state change */
+ struct cl_sync_io *ols_owner;
+ /** waiting list for this lock to be cancelled */
+ struct list_head ols_waiting_list;
+ /** wait entry of ols_waiting_list */
+ struct list_head ols_wait_entry;
+ /** list entry for osc_object::oo_ol_list */
+ struct list_head ols_nextlock_oscobj;
+
/** underlying DLM lock */
- struct ldlm_lock *ols_lock;
- /** lock value block */
- struct ost_lvb ols_lvb;
+ struct ldlm_lock *ols_dlmlock;
/** DLM flags with which osc_lock::ols_lock was enqueued */
__u64 ols_flags;
/** osc_lock::ols_lock handle */
struct lustre_handle ols_handle;
struct ldlm_enqueue_info ols_einfo;
enum osc_lock_state ols_state;
-
- /**
- * How many pages are using this lock for io, currently only used by
- * read-ahead. If non-zero, the underlying dlm lock won't be cancelled
- * during recovery to avoid deadlock. see bz16774.
- *
- * \see osc_page::ops_lock
- * \see osc_page_addref_lock(), osc_page_putref_lock()
- */
- atomic_t ols_pageref;
+ /** lock value block */
+ struct ost_lvb ols_lvb;
/**
* true, if ldlm_lock_addref() was called against
@@ -299,16 +317,6 @@ struct osc_lock {
*/
ols_locklessable:1,
/**
- * set by osc_lock_use() to wait until blocking AST enters into
- * osc_ldlm_blocking_ast0(), so that cl_lock mutex can be used for
- * further synchronization.
- */
- ols_ast_wait:1,
- /**
- * If the data of this lock has been flushed to server side.
- */
- ols_flush:1,
- /**
* if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
* the EVAVAIL error as tolerable, this will make upper logic happy
* to wait all glimpse locks to each OSTs to be completed.
@@ -321,15 +329,6 @@ struct osc_lock {
* For async glimpse lock.
*/
ols_agl:1;
- /**
- * IO that owns this lock. This field is used for a dead-lock
- * avoidance by osc_lock_enqueue_wait().
- *
- * XXX: unfortunately, the owner of a osc_lock is not unique,
- * the lock may have multiple users, if the lock is granted and
- * then matched.
- */
- struct osc_io *ols_owner;
};
/**
@@ -369,18 +368,15 @@ struct osc_page {
* Set if the page must be transferred with OBD_BRW_SRVLOCK.
*/
ops_srvlock:1;
- union {
- /**
- * lru page list. ops_inflight and ops_lru are exclusive so
- * that they can share the same data.
- */
- struct list_head ops_lru;
- /**
- * Linkage into a per-osc_object list of pages in flight. For
- * debugging.
- */
- struct list_head ops_inflight;
- };
+ /**
+ * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
+ */
+ struct list_head ops_lru;
+ /**
+ * Linkage into a per-osc_object list of pages in flight. For
+ * debugging.
+ */
+ struct list_head ops_inflight;
/**
* Thread that submitted this page for transfer. For debugging.
*/
@@ -389,16 +385,6 @@ struct osc_page {
* Submit time - the time when the page is starting RPC. For debugging.
*/
unsigned long ops_submit_time;
-
- /**
- * A lock of which we hold a reference covers this page. Only used by
- * read-ahead: for a readahead page, we hold it's covering lock to
- * prevent it from being canceled during recovery.
- *
- * \see osc_lock::ols_pageref
- * \see osc_page_addref_lock(), osc_page_putref_lock().
- */
- struct cl_lock *ops_lock;
};
extern struct kmem_cache *osc_lock_kmem;
@@ -417,21 +403,22 @@ extern struct lu_context_key osc_session_key;
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
-int osc_io_init (const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int osc_req_init (const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
+int osc_io_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io);
+int osc_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
struct lu_object *osc_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t ind);
-void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end);
-int osc_lvb_print (const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct ost_lvb *lvb);
+void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+ pgoff_t start, pgoff_t end);
+int osc_lvb_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct ost_lvb *lvb);
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
enum cl_req_type crt, int brw_flags);
int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
@@ -441,6 +428,8 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
struct page *page, loff_t offset);
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops);
+int osc_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io);
int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
struct osc_page *ops);
int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
@@ -457,12 +446,13 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
pgoff_t start, pgoff_t end);
void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc);
+int lru_queue_work(const struct lu_env *env, void *data);
-void osc_object_set_contended (struct osc_object *obj);
+void osc_object_set_contended(struct osc_object *obj);
void osc_object_clear_contended(struct osc_object *obj);
-int osc_object_is_contended (struct osc_object *obj);
+int osc_object_is_contended(struct osc_object *obj);
-int osc_lock_is_lockless (const struct osc_lock *olck);
+int osc_lock_is_lockless(const struct osc_lock *olck);
/*****************************************************************************
*
@@ -558,6 +548,11 @@ static inline struct osc_page *oap2osc(struct osc_async_page *oap)
return container_of0(oap, struct osc_page, ops_oap);
}
+static inline pgoff_t osc_index(struct osc_page *opg)
+{
+ return opg->ops_cl.cpl_index;
+}
+
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
{
return oap2osc(oap)->ops_cl.cpl_page;
@@ -608,7 +603,7 @@ enum osc_extent_state {
*
* LOCKING ORDER
* =============
- * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
+ * page lock -> cl_loi_list_lock -> object lock(osc_object::oo_lock)
*/
struct osc_extent {
/** red-black tree node */
@@ -627,6 +622,8 @@ struct osc_extent {
unsigned int oe_intree:1,
/** 0 is write, 1 is read */
oe_rw:1,
+ /** sync extent, queued by osc_queue_sync_pages() */
+ oe_sync:1,
oe_srvlock:1,
oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
@@ -675,7 +672,7 @@ struct osc_extent {
*/
wait_queue_head_t oe_waitq;
/** lock covering this extent */
- struct cl_lock *oe_osclock;
+ struct ldlm_lock *oe_dlmlock;
/** terminator of this extent. Must be true if this extent is in IO. */
struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
@@ -690,6 +687,14 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
int sent, int rc);
void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
+int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
+ pgoff_t start, pgoff_t end, enum cl_lock_mode mode);
+
+typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
+ struct osc_page *, void *);
+int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
+ struct osc_object *osc, pgoff_t start, pgoff_t end,
+ osc_page_gang_cbt cb, void *cbdata);
/** @} osc */
#endif /* OSC_CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index ea695c209..7fad82781 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -83,6 +83,12 @@ struct osc_async_page {
#define oap_count oap_brw_page.count
#define oap_brw_flags oap_brw_page.flag
+static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
+{
+ return (struct osc_async_page *)container_of(pga, struct osc_async_page,
+ oap_brw_page);
+}
+
struct osc_cache_waiter {
struct list_head ocw_entry;
wait_queue_head_t ocw_waitq;
@@ -102,12 +108,14 @@ void osc_update_next_shrink(struct client_obd *cli);
extern struct ptlrpc_request_set *PTLRPCD_SET;
+typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
+ int rc);
+
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
- obd_enqueue_update_f upcall,
+ osc_enqueue_upcall_f upcall,
void *cookie, struct ldlm_enqueue_info *einfo,
- struct lustre_handle *lockh,
struct ptlrpc_request_set *rqset, int async, int agl);
int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
@@ -130,9 +138,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct list_head *ext_list, int cmd);
-int osc_lru_shrink(struct client_obd *cli, int target);
+int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ int target, bool force);
+int osc_lru_reclaim(struct client_obd *cli);
-extern spinlock_t osc_ast_guard;
+unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
@@ -173,8 +183,6 @@ static inline struct osc_device *obd2osc_dev(const struct obd_device *d)
return container_of0(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev);
}
-int osc_dlm_lock_pageref(struct ldlm_lock *dlm);
-
extern struct kmem_cache *osc_quota_kmem;
struct osc_quota_info {
/** linkage for quota hash table */
@@ -192,5 +200,12 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl);
int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
+void osc_inc_unstable_pages(struct ptlrpc_request *req);
+void osc_dec_unstable_pages(struct ptlrpc_request *req);
+int osc_over_unstable_soft_limit(struct client_obd *cli);
+
+struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj, pgoff_t index,
+ int pending, int canceling);
#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 6bd0a45d8..d534b0e0e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -68,11 +68,15 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
return oio;
}
-static struct osc_page *osc_cl_page_osc(struct cl_page *page)
+static struct osc_page *osc_cl_page_osc(struct cl_page *page,
+ struct osc_object *osc)
{
const struct cl_page_slice *slice;
- slice = cl_page_at(page, &osc_device_type);
+ if (osc)
+ slice = cl_object_page_slice(&osc->oo_cl, page);
+ else
+ slice = cl_page_at(page, &osc_device_type);
LASSERT(slice);
return cl2osc_page(slice);
@@ -137,7 +141,7 @@ static int osc_io_submit(const struct lu_env *env,
io = page->cp_owner;
LASSERT(io);
- opg = osc_cl_page_osc(page);
+ opg = osc_cl_page_osc(page, osc);
oap = &opg->ops_oap;
LASSERT(osc == oap->oap_obj);
@@ -164,8 +168,10 @@ static int osc_io_submit(const struct lu_env *env,
}
cl_page_list_move(qout, qin, page);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
osc_page_submit(env, opg, crt, brw_flags);
list_add_tail(&oap->oap_pending_item, &list);
@@ -185,6 +191,13 @@ static int osc_io_submit(const struct lu_env *env,
return qout->pl_nr > 0 ? 0 : result;
}
+/**
+ * This is called when a page is accessed within file in a way that creates
+ * new page, if one were missing (i.e., if there were a hole at that place in
+ * the file, or accessed page is beyond the current file size).
+ *
+ * Expand stripe KMS if necessary.
+ */
static void osc_page_touch_at(const struct lu_env *env,
struct cl_object *obj, pgoff_t idx, unsigned to)
{
@@ -208,7 +221,8 @@ static void osc_page_touch_at(const struct lu_env *env,
kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
loi->loi_lvb.lvb_size);
- valid = 0;
+ attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
+ valid = CAT_MTIME | CAT_CTIME;
if (kms > loi->loi_kms) {
attr->cat_kms = kms;
valid |= CAT_KMS;
@@ -221,91 +235,128 @@ static void osc_page_touch_at(const struct lu_env *env,
cl_object_attr_unlock(obj);
}
-/**
- * This is called when a page is accessed within file in a way that creates
- * new page, if one were missing (i.e., if there were a hole at that place in
- * the file, or accessed page is beyond the current file size). Examples:
- * ->commit_write() and ->nopage() methods.
- *
- * Expand stripe KMS if necessary.
- */
-static void osc_page_touch(const struct lu_env *env,
- struct osc_page *opage, unsigned to)
-{
- struct cl_page *page = opage->ops_cl.cpl_page;
- struct cl_object *obj = opage->ops_cl.cpl_obj;
-
- osc_page_touch_at(env, obj, page->cp_index, to);
-}
-
-/**
- * Implements cl_io_operations::cio_prepare_write() method for osc layer.
- *
- * \retval -EIO transfer initiated against this osc will most likely fail
- * \retval 0 transfer initiated against this osc will most likely succeed.
- *
- * The reason for this check is to immediately return an error to the caller
- * in the case of a deactivated import. Note, that import can be deactivated
- * later, while pages, dirtied by this IO, are still in the cache, but this is
- * irrelevant, because that would still return an error to the application (if
- * it does fsync), but many applications don't do fsync because of performance
- * issues, and we wanted to return an -EIO at write time to notify the
- * application.
- */
-static int osc_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
+static int osc_io_commit_async(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ struct cl_page_list *qin, int from, int to,
+ cl_commit_cbt cb)
{
- struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
- struct obd_import *imp = class_exp2cliimp(dev->od_exp);
+ struct cl_io *io = ios->cis_io;
struct osc_io *oio = cl2osc_io(env, ios);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct cl_page *page;
+ struct cl_page *last_page;
+ struct osc_page *opg;
int result = 0;
- /*
- * This implements OBD_BRW_CHECK logic from old client.
- */
+ LASSERT(qin->pl_nr > 0);
+
+ /* Handle partial page cases */
+ last_page = cl_page_list_last(qin);
+ if (oio->oi_lockless) {
+ page = cl_page_list_first(qin);
+ if (page == last_page) {
+ cl_page_clip(env, page, from, to);
+ } else {
+ if (from != 0)
+ cl_page_clip(env, page, from, PAGE_SIZE);
+ if (to != PAGE_SIZE)
+ cl_page_clip(env, last_page, 0, to);
+ }
+ }
+
+ while (qin->pl_nr > 0) {
+ struct osc_async_page *oap;
+
+ page = cl_page_list_first(qin);
+ opg = osc_cl_page_osc(page, osc);
+ oap = &opg->ops_oap;
+
+ if (!list_empty(&oap->oap_rpc_item)) {
+ CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
+ oap, opg);
+ result = -EBUSY;
+ break;
+ }
+
+ /* The page may be already in dirty cache. */
+ if (list_empty(&oap->oap_pending_item)) {
+ result = osc_page_cache_add(env, &opg->ops_cl, io);
+ if (result != 0)
+ break;
+ }
+
+ osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
+ page == last_page ? to : PAGE_SIZE);
+
+ cl_page_list_del(env, qin, page);
- if (!imp || imp->imp_invalid)
- result = -EIO;
- if (result == 0 && oio->oi_lockless)
- /* this page contains `invalid' data, but who cares?
- * nobody can access the invalid data.
- * in osc_io_commit_write(), we're going to write exact
- * [from, to) bytes of this page to OST. -jay
+ (*cb)(env, io, page);
+ /* Can't access page any more. Page can be in transfer and
+ * complete at any time.
*/
- cl_page_export(env, slice->cpl_page, 1);
+ }
+ /* for sync write, kernel will wait for this page to be flushed before
+ * osc_io_end() is called, so release it earlier.
+ * for mkwrite(), it's known there is no further pages.
+ */
+ if (cl_io_is_sync_write(io) && oio->oi_active) {
+ osc_extent_release(env, oio->oi_active);
+ oio->oi_active = NULL;
+ }
+
+ CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
return result;
}
-static int osc_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
+static int osc_io_rw_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
{
- struct osc_io *oio = cl2osc_io(env, ios);
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- struct osc_async_page *oap = &opg->ops_oap;
+ struct cl_io *io = ios->cis_io;
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct client_obd *cli = osc_cli(osc);
+ unsigned long c;
+ unsigned int npages;
+ unsigned int max_pages;
+
+ if (cl_io_is_append(io))
+ return 0;
+
+ npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
+ if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
+ ++npages;
+
+ max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+ if (npages > max_pages)
+ npages = max_pages;
+
+ c = atomic_read(cli->cl_lru_left);
+ if (c < npages && osc_lru_reclaim(cli) > 0)
+ c = atomic_read(cli->cl_lru_left);
+ while (c >= npages) {
+ if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ oio->oi_lru_reserved = npages;
+ break;
+ }
+ c = atomic_read(cli->cl_lru_left);
+ }
- LASSERT(to > 0);
- /*
- * XXX instead of calling osc_page_touch() here and in
- * osc_io_fault_start() it might be more logical to introduce
- * cl_page_touch() method, that generic cl_io_commit_write() and page
- * fault code calls.
- */
- osc_page_touch(env, cl2osc_page(slice), to);
- if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE))
- oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
+ return 0;
+}
- if (oio->oi_lockless)
- /* see osc_io_prepare_write() for lockless io handling. */
- cl_page_clip(env, slice->cpl_page, from, to);
+static void osc_io_rw_iter_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct client_obd *cli = osc_cli(osc);
- return 0;
+ if (oio->oi_lru_reserved > 0) {
+ atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
+ oio->oi_lru_reserved = 0;
+ }
+ oio->oi_write_osclock = NULL;
}
static int osc_io_fault_start(const struct lu_env *env,
@@ -342,31 +393,21 @@ static int osc_async_upcall(void *a, int rc)
* Checks that there are no pages being written in the extent being truncated.
*/
static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
+ struct osc_page *ops, void *cbdata)
{
- const struct cl_page_slice *slice;
- struct osc_page *ops;
+ struct cl_page *page = ops->ops_cl.cpl_page;
struct osc_async_page *oap;
__u64 start = *(__u64 *)cbdata;
- slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice);
- ops = cl2osc_page(slice);
oap = &ops->ops_oap;
-
if (oap->oap_cmd & OBD_BRW_WRITE &&
!list_empty(&oap->oap_pending_item))
CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
start, current->comm);
- {
- struct page *vmpage = cl_page_vmpage(env, page);
-
- if (PageLocked(vmpage))
- CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
- ops, page->cp_index,
- (oap->oap_cmd & OBD_BRW_RWMASK));
- }
+ if (PageLocked(page->cp_vmpage))
+ CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
+ ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
return CLP_GANG_OKAY;
}
@@ -385,8 +426,9 @@ static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
/*
* Complain if there are pages in the truncated region.
*/
- cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF,
- trunc_check_cb, (void *)&size);
+ osc_page_gang_lookup(env, io, cl2osc(clob),
+ start + partial, CL_PAGE_EOF,
+ trunc_check_cb, (void *)&size);
}
static int osc_io_setattr_start(const struct lu_env *env,
@@ -650,6 +692,8 @@ static const struct cl_io_operations osc_io_ops = {
.cio_fini = osc_io_fini
},
[CIT_WRITE] = {
+ .cio_iter_init = osc_io_rw_iter_init,
+ .cio_iter_fini = osc_io_rw_iter_fini,
.cio_start = osc_io_write_start,
.cio_end = osc_io_end,
.cio_fini = osc_io_fini
@@ -672,16 +716,8 @@ static const struct cl_io_operations osc_io_ops = {
.cio_fini = osc_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = osc_io_submit
- },
- [CRT_WRITE] = {
- .cio_submit = osc_io_submit
- }
- },
- .cio_prepare_write = osc_io_prepare_write,
- .cio_commit_write = osc_io_commit_write
+ .cio_submit = osc_io_submit,
+ .cio_commit_async = osc_io_commit_async
};
/*****************************************************************************
@@ -718,8 +754,7 @@ static void osc_req_attr_set(const struct lu_env *env,
struct lov_oinfo *oinfo;
struct cl_req *clerq;
struct cl_page *apage; /* _some_ page in @clerq */
- struct cl_lock *lock; /* _some_ lock protecting @apage */
- struct osc_lock *olck;
+ struct ldlm_lock *lock; /* _some_ lock protecting @apage */
struct osc_page *opg;
struct obdo *oa;
struct ost_lvb *lvb;
@@ -753,31 +788,32 @@ static void osc_req_attr_set(const struct lu_env *env,
LASSERT(!list_empty(&clerq->crq_pages));
apage = container_of(clerq->crq_pages.next,
struct cl_page, cp_flight);
- opg = osc_cl_page_osc(apage);
- apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
- lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
- if (!lock) {
- struct cl_object_header *head;
- struct cl_lock *scan;
-
- head = cl_object_header(apage->cp_obj);
- list_for_each_entry(scan, &head->coh_locks, cll_linkage)
- CL_LOCK_DEBUG(D_ERROR, env, scan,
- "no cover page!\n");
- CL_PAGE_DEBUG(D_ERROR, env, apage,
- "dump uncover page!\n");
+ opg = osc_cl_page_osc(apage, NULL);
+ lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
+ 1, 1);
+ if (!lock && !opg->ops_srvlock) {
+ struct ldlm_resource *res;
+ struct ldlm_res_id *resname;
+
+ CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
+
+ resname = &osc_env_info(env)->oti_resname;
+ ostid_build_res_name(&oinfo->loi_oi, resname);
+ res = ldlm_resource_get(
+ osc_export(cl2osc(obj))->exp_obd->obd_namespace,
+ NULL, resname, LDLM_EXTENT, 0);
+ ldlm_resource_dump(D_ERROR, res);
+
dump_stack();
LBUG();
}
- olck = osc_lock_at(lock);
- LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock));
/* check for lockless io. */
- if (olck->ols_lock) {
- oa->o_handle = olck->ols_lock->l_remote_handle;
+ if (lock) {
+ oa->o_handle = lock->l_remote_handle;
oa->o_valid |= OBD_MD_FLHANDLE;
+ LDLM_LOCK_PUT(lock);
}
- cl_lock_put(env, lock);
}
}
@@ -807,8 +843,9 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev,
if (or) {
cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 013df9787..16f9cd9d3 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -36,6 +36,7 @@
* Implementation of cl_lock for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
@@ -50,8 +51,6 @@
* @{
*/
-#define _PAGEREF_MAGIC (-10000000)
-
/*****************************************************************************
*
* Type conversions.
@@ -62,7 +61,6 @@ static const struct cl_lock_operations osc_lock_ops;
static const struct cl_lock_operations osc_lock_lockless_ops;
static void osc_lock_to_lockless(const struct lu_env *env,
struct osc_lock *ols, int force);
-static int osc_lock_has_pages(struct osc_lock *olck);
int osc_lock_is_lockless(const struct osc_lock *olck)
{
@@ -90,11 +88,11 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
static int osc_lock_invariant(struct osc_lock *ols)
{
struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
- struct ldlm_lock *olock = ols->ols_lock;
+ struct ldlm_lock *olock = ols->ols_dlmlock;
int handle_used = lustre_handle_is_used(&ols->ols_handle);
if (ergo(osc_lock_is_lockless(ols),
- ols->ols_locklessable && !ols->ols_lock))
+ ols->ols_locklessable && !ols->ols_dlmlock))
return 1;
/*
@@ -111,7 +109,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
ergo(!lock, !olock)))
return 0;
/*
- * Check that ->ols_handle and ->ols_lock are consistent, but
+ * Check that ->ols_handle and ->ols_dlmlock are consistent, but
* take into account that they are set at the different time.
*/
if (!ergo(ols->ols_state == OLS_CANCELLED,
@@ -122,7 +120,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
* ast.
*/
if (!ergo(olock && ols->ols_state < OLS_CANCELLED,
- ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+ !ldlm_is_destroyed(olock)))
return 0;
if (!ergo(ols->ols_state == OLS_GRANTED,
@@ -138,117 +136,13 @@ static int osc_lock_invariant(struct osc_lock *ols)
*
*/
-/**
- * Breaks a link between osc_lock and dlm_lock.
- */
-static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
-{
- struct ldlm_lock *dlmlock;
-
- spin_lock(&osc_ast_guard);
- dlmlock = olck->ols_lock;
- if (!dlmlock) {
- spin_unlock(&osc_ast_guard);
- return;
- }
-
- olck->ols_lock = NULL;
- /* wb(); --- for all who checks (ols->ols_lock != NULL) before
- * call to osc_lock_detach()
- */
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
-
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
- struct cl_object *obj = olck->ols_cl.cls_obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- __u64 old_kms;
-
- cl_object_attr_lock(obj);
- /* Must get the value under the lock to avoid possible races. */
- old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
- /* Update the kms. Need to loop all granted locks.
- * Not a problem for the client
- */
- attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
-
- cl_object_attr_set(env, obj, attr, CAT_KMS);
- cl_object_attr_unlock(obj);
- }
- unlock_res_and_lock(dlmlock);
-
- /* release a reference taken in osc_lock_upcall0(). */
- LASSERT(olck->ols_has_ref);
- lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
- LDLM_LOCK_RELEASE(dlmlock);
- olck->ols_has_ref = 0;
-}
-
-static int osc_lock_unhold(struct osc_lock *ols)
-{
- int result = 0;
-
- if (ols->ols_hold) {
- ols->ols_hold = 0;
- result = osc_cancel_base(&ols->ols_handle,
- ols->ols_einfo.ei_mode);
- }
- return result;
-}
-
-static int osc_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- LINVRNT(osc_lock_invariant(ols));
-
- switch (ols->ols_state) {
- case OLS_NEW:
- LASSERT(!ols->ols_hold);
- LASSERT(ols->ols_agl);
- return 0;
- case OLS_UPCALL_RECEIVED:
- osc_lock_unhold(ols);
- case OLS_ENQUEUED:
- LASSERT(!ols->ols_hold);
- osc_lock_detach(env, ols);
- ols->ols_state = OLS_NEW;
- return 0;
- case OLS_GRANTED:
- LASSERT(!ols->ols_glimpse);
- LASSERT(ols->ols_hold);
- /*
- * Move lock into OLS_RELEASED state before calling
- * osc_cancel_base() so that possible synchronous cancellation
- * sees that lock is released.
- */
- ols->ols_state = OLS_RELEASED;
- return osc_lock_unhold(ols);
- default:
- CERROR("Impossible state: %d\n", ols->ols_state);
- LBUG();
- }
-}
-
static void osc_lock_fini(const struct lu_env *env,
struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
LINVRNT(osc_lock_invariant(ols));
- /*
- * ->ols_hold can still be true at this point if, for example, a
- * thread that requested a lock was killed (and released a reference
- * to the lock), before reply from a server was received. In this case
- * lock is destroyed immediately after upcall.
- */
- osc_lock_unhold(ols);
- LASSERT(!ols->ols_lock);
- LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
- atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
+ LASSERT(!ols->ols_dlmlock);
kmem_cache_free(osc_lock_kmem, ols);
}
@@ -275,55 +169,12 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags)
result |= LDLM_FL_HAS_INTENT;
if (enqflags & CEF_DISCARD_DATA)
result |= LDLM_FL_AST_DISCARD_DATA;
+ if (enqflags & CEF_PEEK)
+ result |= LDLM_FL_TEST_LOCK;
return result;
}
/**
- * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
- * pointers. Initialized in osc_init().
- */
-spinlock_t osc_ast_guard;
-
-static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
-{
- struct osc_lock *olck;
-
- lock_res_and_lock(dlm_lock);
- spin_lock(&osc_ast_guard);
- olck = dlm_lock->l_ast_data;
- if (olck) {
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- /*
- * If osc_lock holds a reference on ldlm lock, return it even
- * when cl_lock is in CLS_FREEING state. This way
- *
- * osc_ast_data_get(dlmlock) == NULL
- *
- * guarantees that all osc references on dlmlock were
- * released. osc_dlm_blocking_ast0() relies on that.
- */
- if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
- cl_lock_get_trust(lock);
- lu_ref_add_atomic(&lock->cll_reference,
- "ast", current);
- } else
- olck = NULL;
- }
- spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(dlm_lock);
- return olck;
-}
-
-static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
-{
- struct cl_lock *lock;
-
- lock = olck->ols_cl.cls_lock;
- lu_ref_del(&lock->cll_reference, "ast", current);
- cl_lock_put(env, lock);
-}
-
-/**
* Updates object attributes from a lock value block (lvb) received together
* with the DLM lock reply from the server. Copy of osc_update_enqueue()
* logic.
@@ -333,35 +184,30 @@ static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
*
* Called under lock and resource spin-locks.
*/
-static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
- int rc)
+static void osc_lock_lvb_update(const struct lu_env *env,
+ struct osc_object *osc,
+ struct ldlm_lock *dlmlock,
+ struct ost_lvb *lvb)
{
- struct ost_lvb *lvb;
- struct cl_object *obj;
- struct lov_oinfo *oinfo;
- struct cl_attr *attr;
+ struct cl_object *obj = osc2cl(osc);
+ struct lov_oinfo *oinfo = osc->oo_oinfo;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
unsigned valid;
- if (!(olck->ols_flags & LDLM_FL_LVB_READY))
- return;
-
- lvb = &olck->ols_lvb;
- obj = olck->ols_cl.cls_obj;
- oinfo = cl2osc(obj)->oo_oinfo;
- attr = &osc_env_info(env)->oti_attr;
valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
+ if (!lvb)
+ lvb = dlmlock->l_lvb_data;
+
cl_lvb2attr(attr, lvb);
cl_object_attr_lock(obj);
- if (rc == 0) {
- struct ldlm_lock *dlmlock;
+ if (dlmlock) {
__u64 size;
- dlmlock = olck->ols_lock;
-
- /* re-grab LVB from a dlm lock under DLM spin-locks. */
- *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
+ check_res_locked(dlmlock->l_resource);
+ LASSERT(lvb == dlmlock->l_lvb_data);
size = lvb->lvb_size;
+
/* Extend KMS up to the end of this lock and no further
* A lock on [x,y] means a KMS of up to y + 1 bytes!
*/
@@ -378,102 +224,67 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
dlmlock->l_policy_data.l_extent.end);
}
ldlm_lock_allow_match_locked(dlmlock);
- } else if (rc == -ENAVAIL && olck->ols_glimpse) {
- CDEBUG(D_INODE, "glimpsed, setting rss=%llu; leaving kms=%llu\n",
- lvb->lvb_size, oinfo->loi_kms);
- } else
- valid = 0;
-
- if (valid != 0)
- cl_object_attr_set(env, obj, attr, valid);
+ }
+ cl_object_attr_set(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
-/**
- * Called when a lock is granted, from an upcall (when server returned a
- * granted lock), or from completion AST, when server returned a blocked lock.
- *
- * Called under lock and resource spin-locks, that are released temporarily
- * here.
- */
-static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
- struct ldlm_lock *dlmlock, int rc)
+static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
+ struct lustre_handle *lockh, bool lvb_update)
{
- struct ldlm_extent *ext;
- struct cl_lock *lock;
- struct cl_lock_descr *descr;
+ struct ldlm_lock *dlmlock;
- LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+ dlmlock = ldlm_handle2lock_long(lockh, 0);
+ LASSERT(dlmlock);
- if (olck->ols_state < OLS_GRANTED) {
- lock = olck->ols_cl.cls_lock;
- ext = &dlmlock->l_policy_data.l_extent;
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_obj = lock->cll_descr.cld_obj;
+ /* lock reference taken by ldlm_handle2lock_long() is
+ * owned by osc_lock and released in osc_lock_detach()
+ */
+ lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
+ oscl->ols_has_ref = 1;
- /* XXX check that ->l_granted_mode is valid. */
- descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, ext->start);
- descr->cld_end = cl_index(descr->cld_obj, ext->end);
- descr->cld_gid = ext->gid;
- /*
- * tell upper layers the extent of the lock that was actually
- * granted
- */
- olck->ols_state = OLS_GRANTED;
- osc_lock_lvb_update(env, olck, rc);
-
- /* release DLM spin-locks to allow cl_lock_{modify,signal}()
- * to take a semaphore on a parent lock. This is safe, because
- * spin-locks are needed to protect consistency of
- * dlmlock->l_*_mode and LVB, and we have finished processing
- * them.
+ LASSERT(!oscl->ols_dlmlock);
+ oscl->ols_dlmlock = dlmlock;
+
+ /* This may be a matched lock for glimpse request, do not hold
+ * lock reference in that case.
+ */
+ if (!oscl->ols_glimpse) {
+ /* hold a refc for non glimpse lock which will
+ * be released in osc_lock_cancel()
*/
- unlock_res_and_lock(dlmlock);
- cl_lock_modify(env, lock, descr);
- cl_lock_signal(env, lock);
- LINVRNT(osc_lock_invariant(olck));
- lock_res_and_lock(dlmlock);
+ lustre_handle_copy(&oscl->ols_handle, lockh);
+ ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
+ oscl->ols_hold = 1;
}
-}
-
-static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
-
-{
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
- LASSERT(dlmlock);
+ /* Lock must have been granted. */
lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(dlmlock->l_ast_data == olck);
- LASSERT(!olck->ols_lock);
- olck->ols_lock = dlmlock;
- spin_unlock(&osc_ast_guard);
+ if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
+ struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
+ struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
- /*
- * Lock might be not yet granted. In this case, completion ast
- * (osc_ldlm_completion_ast()) comes later and finishes lock
- * granting.
- */
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
- osc_lock_granted(env, olck, dlmlock, 0);
- unlock_res_and_lock(dlmlock);
+ /* extend the lock extent, otherwise it will have problem when
+ * we decide whether to grant a lockless lock.
+ */
+ descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
+ descr->cld_start = cl_index(descr->cld_obj, ext->start);
+ descr->cld_end = cl_index(descr->cld_obj, ext->end);
+ descr->cld_gid = ext->gid;
- /*
- * osc_enqueue_interpret() decrefs asynchronous locks, counter
- * this.
- */
- ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
- olck->ols_hold = 1;
+ /* no lvb update for matched lock */
+ if (lvb_update) {
+ LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
+ osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
+ dlmlock, NULL);
+ }
+ LINVRNT(osc_lock_invariant(oscl));
+ }
+ unlock_res_and_lock(dlmlock);
- /* lock reference taken by ldlm_handle2lock_long() is owned by
- * osc_lock and released in osc_lock_detach()
- */
- lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
- olck->ols_has_ref = 1;
+ LASSERT(oscl->ols_state != OLS_GRANTED);
+ oscl->ols_state = OLS_GRANTED;
}
/**
@@ -481,143 +292,124 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
* received from a server, or after osc_enqueue_base() matched a local DLM
* lock.
*/
-static int osc_lock_upcall(void *cookie, int errcode)
+static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
+ int errcode)
{
- struct osc_lock *olck = cookie;
- struct cl_lock_slice *slice = &olck->ols_cl;
- struct cl_lock *lock = slice->cls_lock;
+ struct osc_lock *oscl = cookie;
+ struct cl_lock_slice *slice = &oscl->ols_cl;
struct lu_env *env;
struct cl_env_nest nest;
+ int rc;
env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- int rc;
+ /* should never happen, similar to osc_ldlm_blocking_ast(). */
+ LASSERT(!IS_ERR(env));
+
+ rc = ldlm_error2errno(errcode);
+ if (oscl->ols_state == OLS_ENQUEUED) {
+ oscl->ols_state = OLS_UPCALL_RECEIVED;
+ } else if (oscl->ols_state == OLS_CANCELLED) {
+ rc = -EIO;
+ } else {
+ CERROR("Impossible state: %d\n", oscl->ols_state);
+ LBUG();
+ }
- cl_lock_mutex_get(env, lock);
+ if (rc == 0)
+ osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
- LASSERT(lock->cll_state >= CLS_QUEUING);
- if (olck->ols_state == OLS_ENQUEUED) {
- olck->ols_state = OLS_UPCALL_RECEIVED;
- rc = ldlm_error2errno(errcode);
- } else if (olck->ols_state == OLS_CANCELLED) {
- rc = -EIO;
- } else {
- CERROR("Impossible state: %d\n", olck->ols_state);
- LBUG();
- }
- if (rc) {
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock(&olck->ols_handle);
- if (dlmlock) {
- lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(!olck->ols_lock);
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
- ldlm_lock_fail_match_locked(dlmlock);
- unlock_res_and_lock(dlmlock);
- LDLM_LOCK_PUT(dlmlock);
- }
- } else {
- if (olck->ols_glimpse)
- olck->ols_glimpse = 0;
- osc_lock_upcall0(env, olck);
- }
+ /* Error handling, some errors are tolerable. */
+ if (oscl->ols_locklessable && rc == -EUSERS) {
+ /* This is a tolerable error, turn this lock into
+ * lockless lock.
+ */
+ osc_object_set_contended(cl2osc(slice->cls_obj));
+ LASSERT(slice->cls_ops == &osc_lock_ops);
+
+ /* Change this lock to ldlmlock-less lock. */
+ osc_lock_to_lockless(env, oscl, 1);
+ oscl->ols_state = OLS_GRANTED;
+ rc = 0;
+ } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
+ LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
+ osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
+ NULL, &oscl->ols_lvb);
+ /* Hide the error. */
+ rc = 0;
+ }
- /* Error handling, some errors are tolerable. */
- if (olck->ols_locklessable && rc == -EUSERS) {
- /* This is a tolerable error, turn this lock into
- * lockless lock.
- */
- osc_object_set_contended(cl2osc(slice->cls_obj));
- LASSERT(slice->cls_ops == &osc_lock_ops);
+ if (oscl->ols_owner)
+ cl_sync_io_note(env, oscl->ols_owner, rc);
+ cl_env_nested_put(&nest, env);
- /* Change this lock to ldlmlock-less lock. */
- osc_lock_to_lockless(env, olck, 1);
- olck->ols_state = OLS_GRANTED;
- rc = 0;
- } else if (olck->ols_glimpse && rc == -ENAVAIL) {
- osc_lock_lvb_update(env, olck, rc);
- cl_lock_delete(env, lock);
- /* Hide the error. */
- rc = 0;
- }
-
- if (rc == 0) {
- /* For AGL case, the RPC sponsor may exits the cl_lock
- * processing without wait() called before related OSC
- * lock upcall(). So update the lock status according
- * to the enqueue result inside AGL upcall().
- */
- if (olck->ols_agl) {
- lock->cll_flags |= CLF_FROM_UPCALL;
- cl_wait_try(env, lock);
- lock->cll_flags &= ~CLF_FROM_UPCALL;
- if (!olck->ols_glimpse)
- olck->ols_agl = 0;
- }
- cl_lock_signal(env, lock);
- /* del user for lock upcall cookie */
- cl_unuse_try(env, lock);
- } else {
- /* del user for lock upcall cookie */
- cl_lock_user_del(env, lock);
- cl_lock_error(env, lock, rc);
- }
+ return rc;
+}
- /* release cookie reference, acquired by osc_lock_enqueue() */
- cl_lock_hold_release(env, lock, "upcall", lock);
- cl_lock_mutex_put(env, lock);
+static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
+ int errcode)
+{
+ struct osc_object *osc = cookie;
+ struct ldlm_lock *dlmlock;
+ struct lu_env *env;
+ struct cl_env_nest nest;
- lu_ref_del(&lock->cll_reference, "upcall", lock);
- /* This maybe the last reference, so must be called after
- * cl_lock_mutex_put().
- */
- cl_lock_put(env, lock);
+ env = cl_env_nested_get(&nest);
+ LASSERT(!IS_ERR(env));
- cl_env_nested_put(&nest, env);
- } else {
- /* should never happen, similar to osc_ldlm_blocking_ast(). */
- LBUG();
+ if (errcode == ELDLM_LOCK_MATCHED) {
+ errcode = ELDLM_OK;
+ goto out;
}
- return errcode;
+
+ if (errcode != ELDLM_OK)
+ goto out;
+
+ dlmlock = ldlm_handle2lock(lockh);
+ LASSERT(dlmlock);
+
+ lock_res_and_lock(dlmlock);
+ LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+
+ /* there is no osc_lock associated with AGL lock */
+ osc_lock_lvb_update(env, osc, dlmlock, NULL);
+
+ unlock_res_and_lock(dlmlock);
+ LDLM_LOCK_PUT(dlmlock);
+
+out:
+ cl_object_put(env, osc2cl(osc));
+ cl_env_nested_put(&nest, env);
+ return ldlm_error2errno(errcode);
}
-/**
- * Core of osc_dlm_blocking_ast() logic.
- */
-static void osc_lock_blocking(const struct lu_env *env,
- struct ldlm_lock *dlmlock,
- struct osc_lock *olck, int blocking)
+static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
+ enum cl_lock_mode mode, int discard)
{
- struct cl_lock *lock = olck->ols_cl.cls_lock;
+ struct lu_env *env;
+ struct cl_env_nest nest;
+ int rc = 0;
+ int rc2 = 0;
- LASSERT(olck->ols_lock == dlmlock);
- CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
- LASSERT(!osc_lock_is_lockless(olck));
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ if (mode == CLM_WRITE) {
+ rc = osc_cache_writeback_range(env, obj, start, end, 1,
+ discard);
+ CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
+ obj, start, end, rc,
+ discard ? "discarded" : "written back");
+ if (rc > 0)
+ rc = 0;
+ }
- /*
- * Lock might be still addref-ed here, if e.g., blocking ast
- * is sent for a failed lock.
- */
- osc_lock_unhold(olck);
+ rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
+ if (rc == 0 && rc2 < 0)
+ rc = rc2;
- if (blocking && olck->ols_state < OLS_BLOCKED)
- /*
- * Move osc_lock into OLS_BLOCKED before canceling the lock,
- * because it recursively re-enters osc_lock_blocking(), with
- * the state set to OLS_CANCELLED.
- */
- olck->ols_state = OLS_BLOCKED;
- /*
- * cancel and destroy lock at least once no matter how blocking ast is
- * entered (see comment above osc_ldlm_blocking_ast() for use
- * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
- */
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
+ cl_env_nested_put(&nest, env);
+ return rc;
}
/**
@@ -628,65 +420,63 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
struct ldlm_lock *dlmlock,
void *data, int flag)
{
- struct osc_lock *olck;
- struct cl_lock *lock;
- int result;
- int cancel;
-
- LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
-
- cancel = 0;
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
- LINVRNT(osc_lock_invariant(olck));
- if (olck->ols_ast_wait) {
- /* wake up osc_lock_use() */
- cl_lock_signal(env, lock);
- olck->ols_ast_wait = 0;
- }
- /*
- * Lock might have been canceled while this thread was
- * sleeping for lock mutex, but olck is pinned in memory.
- */
- if (olck == dlmlock->l_ast_data) {
- /*
- * NOTE: DLM sends blocking AST's for failed locks
- * (that are still in pre-OLS_GRANTED state)
- * too, and they have to be canceled otherwise
- * DLM lock is never destroyed and stuck in
- * the memory.
- *
- * Alternatively, ldlm_cli_cancel() can be
- * called here directly for osc_locks with
- * ols_state < OLS_GRANTED to maintain an
- * invariant that ->clo_cancel() is only called
- * for locks that were granted.
- */
- LASSERT(data == olck);
- osc_lock_blocking(env, dlmlock,
- olck, flag == LDLM_CB_BLOCKING);
- } else
- cancel = 1;
- cl_lock_mutex_put(env, lock);
- osc_ast_data_put(env, olck);
- } else
- /*
- * DLM lock exists, but there is no cl_lock attached to it.
- * This is a `normal' race. cl_object and its cl_lock's can be
- * removed by memory pressure, together with all pages.
+ struct cl_object *obj = NULL;
+ int result = 0;
+ int discard;
+ enum cl_lock_mode mode = CLM_READ;
+
+ LASSERT(flag == LDLM_CB_CANCELING);
+
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
+ dlmlock->l_ast_data = NULL;
+ unlock_res_and_lock(dlmlock);
+ return 0;
+ }
+
+ discard = ldlm_is_discard_data(dlmlock);
+ if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
+ mode = CLM_WRITE;
+
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ dlmlock->l_ast_data = NULL;
+
+ cl_object_get(obj);
+ }
+
+ unlock_res_and_lock(dlmlock);
+
+ /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
+ * the object has been destroyed.
+ */
+ if (obj) {
+ struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ __u64 old_kms;
+
+ /* Destroy pages covered by the extent of the DLM lock */
+ result = osc_lock_flush(cl2osc(obj),
+ cl_index(obj, extent->start),
+ cl_index(obj, extent->end),
+ mode, discard);
+
+ /* losing a lock, update kms */
+ lock_res_and_lock(dlmlock);
+ cl_object_attr_lock(obj);
+ /* Must get the value under the lock to avoid race. */
+ old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
+ /* Update the kms. Need to loop all granted locks.
+ * Not a problem for the client
*/
- cancel = (flag == LDLM_CB_BLOCKING);
+ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
- if (cancel) {
- struct lustre_handle *lockh;
+ cl_object_attr_set(env, obj, attr, CAT_KMS);
+ cl_object_attr_unlock(obj);
+ unlock_res_and_lock(dlmlock);
- lockh = &osc_env_info(env)->oti_handle;
- ldlm_lock2handle(dlmlock, lockh);
- result = ldlm_cli_cancel(lockh, LCF_ASYNC);
- } else
- result = 0;
+ cl_object_put(env, obj);
+ }
return result;
}
@@ -736,107 +526,52 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
struct ldlm_lock_desc *new, void *data,
int flag)
{
- struct lu_env *env;
- struct cl_env_nest nest;
- int result;
+ int result = 0;
- /*
- * This can be called in the context of outer IO, e.g.,
- *
- * cl_enqueue()->...
- * ->osc_enqueue_base()->...
- * ->ldlm_prep_elc_req()->...
- * ->ldlm_cancel_callback()->...
- * ->osc_ldlm_blocking_ast()
- *
- * new environment has to be created to not corrupt outer context.
- */
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
- cl_env_nested_put(&nest, env);
- } else {
- result = PTR_ERR(env);
- /*
- * XXX This should never happen, as cl_lock is
- * stuck. Pre-allocated environment a la vvp_inode_fini_env
- * should be used.
- */
- LBUG();
- }
- if (result != 0) {
+ switch (flag) {
+ case LDLM_CB_BLOCKING: {
+ struct lustre_handle lockh;
+
+ ldlm_lock2handle(dlmlock, &lockh);
+ result = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (result == -ENODATA)
result = 0;
- else
- CERROR("BAST failed: %d\n", result);
+ break;
}
- return result;
-}
+ case LDLM_CB_CANCELING: {
+ struct lu_env *env;
+ struct cl_env_nest nest;
-static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
- __u64 flags, void *data)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct osc_lock *olck;
- struct cl_lock *lock;
- int result;
- int dlmrc;
-
- /* first, do dlm part of the work */
- dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
- /* then, notify cl_lock */
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
- /*
- * ldlm_handle_cp_callback() copied LVB from request
- * to lock->l_lvb_data, store it in osc_lock.
- */
- LASSERT(dlmlock->l_lvb_data);
- lock_res_and_lock(dlmlock);
- olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
- if (!olck->ols_lock) {
- /*
- * upcall (osc_lock_upcall()) hasn't yet been
- * called. Do nothing now, upcall will bind
- * olck to dlmlock and signal the waiters.
- *
- * This maintains an invariant that osc_lock
- * and ldlm_lock are always bound when
- * osc_lock is in OLS_GRANTED state.
- */
- } else if (dlmlock->l_granted_mode ==
- dlmlock->l_req_mode) {
- osc_lock_granted(env, olck, dlmlock, dlmrc);
- }
- unlock_res_and_lock(dlmlock);
+ /*
+ * This can be called in the context of outer IO, e.g.,
+ *
+ * osc_enqueue_base()->...
+ * ->ldlm_prep_elc_req()->...
+ * ->ldlm_cancel_callback()->...
+ * ->osc_ldlm_blocking_ast()
+ *
+ * new environment has to be created to not corrupt outer
+ * context.
+ */
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env)) {
+ result = PTR_ERR(env);
+ break;
+ }
- if (dlmrc != 0) {
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "dlmlock returned %d\n", dlmrc);
- cl_lock_error(env, lock, dlmrc);
- }
- cl_lock_mutex_put(env, lock);
- osc_ast_data_put(env, olck);
- result = 0;
- } else
- result = -ELDLM_NO_LOCK_DATA;
+ result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- return dlmrc ?: result;
+ break;
+ }
+ default:
+ LBUG();
+ }
+ return result;
}
static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
{
struct ptlrpc_request *req = data;
- struct osc_lock *olck;
- struct cl_lock *lock;
- struct cl_object *obj;
struct cl_env_nest nest;
struct lu_env *env;
struct ost_lvb *lvb;
@@ -847,14 +582,16 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
- /* osc_ast_data_get() has to go after environment is
- * allocated, because osc_ast_data() acquires a
- * reference to a lock, and it can only be released in
- * environment.
- */
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
+ struct cl_object *obj = NULL;
+
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ cl_object_get(obj);
+ }
+ unlock_res_and_lock(dlmlock);
+
+ if (obj) {
/* Do not grab the mutex of cl_lock for glimpse.
* See LU-1274 for details.
* BTW, it's okay for cl_lock to be cancelled during
@@ -869,7 +606,6 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
result = req_capsule_server_pack(cap);
if (result == 0) {
lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
- obj = lock->cll_descr.cld_obj;
result = cl_object_glimpse(env, obj, lvb);
}
if (!exp_connect_lvb_type(req->rq_export))
@@ -877,7 +613,7 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
&RMF_DLM_LVB,
sizeof(struct ost_lvb_v1),
RCL_SERVER);
- osc_ast_data_put(env, olck);
+ cl_object_put(env, obj);
} else {
/*
* These errors are normal races, so we don't want to
@@ -888,44 +624,123 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
result = -ELDLM_NO_LOCK_DATA;
}
cl_env_nested_put(&nest, env);
- } else
+ } else {
result = PTR_ERR(env);
+ }
req->rq_status = result;
return result;
}
-static unsigned long osc_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static int weigh_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
{
- /*
- * don't need to grab coh_page_guard since we don't care the exact #
- * of pages..
- */
- return cl_object_header(slice->cls_obj)->coh_pages;
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ if (cl_page_is_vmlocked(env, page) ||
+ PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
+ ) {
+ (*(unsigned long *)cbdata)++;
+ return CLP_GANG_ABORT;
+ }
+
+ return CLP_GANG_OKAY;
}
-static void osc_lock_build_einfo(const struct lu_env *env,
- const struct cl_lock *clock,
- struct osc_lock *lock,
- struct ldlm_enqueue_info *einfo)
+static unsigned long osc_lock_weight(const struct lu_env *env,
+ struct osc_object *oscobj,
+ struct ldlm_extent *extent)
+{
+ struct cl_io *io = &osc_env_info(env)->oti_io;
+ struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
+ unsigned long npages = 0;
+ int result;
+
+ io->ci_obj = obj;
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result != 0)
+ return result;
+
+ do {
+ result = osc_page_gang_lookup(env, io, oscobj,
+ cl_index(obj, extent->start),
+ cl_index(obj, extent->end),
+ weigh_cb, (void *)&npages);
+ if (result == CLP_GANG_ABORT)
+ break;
+ if (result == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (result != CLP_GANG_OKAY);
+ cl_io_fini(env, io);
+
+ return npages;
+}
+
+/**
+ * Get the weight of dlm lock for early cancellation.
+ */
+unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
{
- enum cl_lock_mode mode;
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct osc_object *obj;
+ struct osc_lock *oscl;
+ unsigned long weight;
+ bool found = false;
+
+ might_sleep();
+ /*
+ * osc_ldlm_weigh_ast has a complex context since it might be called
+ * because of lock canceling, or from user's input. We have to make
+ * a new environment for it. Probably it is implementation safe to use
+ * the upper context because cl_lock_put don't modify environment
+ * variables. But just in case ..
+ */
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ /* Mostly because lack of memory, do not eliminate this lock */
+ return 1;
- mode = clock->cll_descr.cld_mode;
- if (mode == CLM_PHANTOM)
+ LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
+ obj = dlmlock->l_ast_data;
+ if (obj) {
+ weight = 1;
+ goto out;
+ }
+
+ spin_lock(&obj->oo_ol_spin);
+ list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
+ if (oscl->ols_dlmlock && oscl->ols_dlmlock != dlmlock)
+ continue;
+ found = true;
+ }
+ spin_unlock(&obj->oo_ol_spin);
+ if (found) {
/*
- * For now, enqueue all glimpse locks in read mode. In the
- * future, client might choose to enqueue LCK_PW lock for
- * glimpse on a file opened for write.
+ * If the lock is being used by an IO, definitely not cancel it.
*/
- mode = CLM_READ;
+ weight = 1;
+ goto out;
+ }
+
+ weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
+
+out:
+ cl_env_nested_put(&nest, env);
+ return weight;
+}
+static void osc_lock_build_einfo(const struct lu_env *env,
+ const struct cl_lock *lock,
+ struct osc_object *osc,
+ struct ldlm_enqueue_info *einfo)
+{
einfo->ei_type = LDLM_EXTENT;
- einfo->ei_mode = osc_cl_lock2ldlm(mode);
+ einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
einfo->ei_cb_bl = osc_ldlm_blocking_ast;
- einfo->ei_cb_cp = osc_ldlm_completion_ast;
+ einfo->ei_cb_cp = ldlm_completion_ast;
einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
- einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
+ einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
}
/**
@@ -981,113 +796,100 @@ static void osc_lock_to_lockless(const struct lu_env *env,
LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
}
-static int osc_lock_compatible(const struct osc_lock *qing,
- const struct osc_lock *qed)
+static bool osc_lock_compatible(const struct osc_lock *qing,
+ const struct osc_lock *qed)
{
- enum cl_lock_mode qing_mode;
- enum cl_lock_mode qed_mode;
+ struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
+ struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
- qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
- if (qed->ols_glimpse &&
- (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
- return 1;
+ if (qed->ols_glimpse)
+ return true;
+
+ if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
+ return true;
+
+ if (qed->ols_state < OLS_GRANTED)
+ return true;
+
+ if (qed_descr->cld_mode >= qing_descr->cld_mode &&
+ qed_descr->cld_start <= qing_descr->cld_start &&
+ qed_descr->cld_end >= qing_descr->cld_end)
+ return true;
- qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
- return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
+ return false;
}
-/**
- * Cancel all conflicting locks and wait for them to be destroyed.
- *
- * This function is used for two purposes:
- *
- * - early cancel all conflicting locks before starting IO, and
- *
- * - guarantee that pages added to the page cache by lockless IO are never
- * covered by locks other than lockless IO lock, and, hence, are not
- * visible to other threads.
- */
-static int osc_lock_enqueue_wait(const struct lu_env *env,
- const struct osc_lock *olck)
+static void osc_lock_wake_waiters(const struct lu_env *env,
+ struct osc_object *osc,
+ struct osc_lock *oscl)
{
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- struct cl_lock_descr *descr = &lock->cll_descr;
- struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
- struct cl_lock *scan;
- struct cl_lock *conflict = NULL;
- int lockless = osc_lock_is_lockless(olck);
- int rc = 0;
+ spin_lock(&osc->oo_ol_spin);
+ list_del_init(&oscl->ols_nextlock_oscobj);
+ spin_unlock(&osc->oo_ol_spin);
- LASSERT(cl_lock_is_mutexed(lock));
+ spin_lock(&oscl->ols_lock);
+ while (!list_empty(&oscl->ols_waiting_list)) {
+ struct osc_lock *scan;
- /* make it enqueue anyway for glimpse lock, because we actually
- * don't need to cancel any conflicting locks.
- */
- if (olck->ols_glimpse)
- return 0;
+ scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
+ ols_wait_entry);
+ list_del_init(&scan->ols_wait_entry);
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- struct cl_lock_descr *cld = &scan->cll_descr;
- const struct osc_lock *scan_ols;
+ cl_sync_io_note(env, scan->ols_owner, 0);
+ }
+ spin_unlock(&oscl->ols_lock);
+}
+
+static void osc_lock_enqueue_wait(const struct lu_env *env,
+ struct osc_object *obj,
+ struct osc_lock *oscl)
+{
+ struct osc_lock *tmp_oscl;
+ struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
+ struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
- if (scan == lock)
+ spin_lock(&obj->oo_ol_spin);
+ list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list);
+
+restart:
+ list_for_each_entry(tmp_oscl, &obj->oo_ol_list,
+ ols_nextlock_oscobj) {
+ struct cl_lock_descr *descr;
+
+ if (tmp_oscl == oscl)
break;
- if (scan->cll_state < CLS_QUEUING ||
- scan->cll_state == CLS_FREEING ||
- cld->cld_start > descr->cld_end ||
- cld->cld_end < descr->cld_start)
+ descr = &tmp_oscl->ols_cl.cls_lock->cll_descr;
+ if (descr->cld_start > need->cld_end ||
+ descr->cld_end < need->cld_start)
continue;
- /* overlapped and living locks. */
+ /* We're not supposed to give up group lock */
+ if (descr->cld_mode == CLM_GROUP)
+ break;
- /* We're not supposed to give up group lock. */
- if (scan->cll_descr.cld_mode == CLM_GROUP) {
- LASSERT(descr->cld_mode != CLM_GROUP ||
- descr->cld_gid != scan->cll_descr.cld_gid);
+ if (!osc_lock_is_lockless(oscl) &&
+ osc_lock_compatible(oscl, tmp_oscl))
continue;
- }
- scan_ols = osc_lock_at(scan);
+ /* wait for conflicting lock to be canceled */
+ cl_sync_io_init(waiter, 1, cl_sync_io_end);
+ oscl->ols_owner = waiter;
- /* We need to cancel the compatible locks if we're enqueuing
- * a lockless lock, for example:
- * imagine that client has PR lock on [0, 1000], and thread T0
- * is doing lockless IO in [500, 1500] region. Concurrent
- * thread T1 can see lockless data in [500, 1000], which is
- * wrong, because these data are possibly stale.
- */
- if (!lockless && osc_lock_compatible(olck, scan_ols))
- continue;
+ spin_lock(&tmp_oscl->ols_lock);
+ /* add oscl into tmp's ols_waiting list */
+ list_add_tail(&oscl->ols_wait_entry,
+ &tmp_oscl->ols_waiting_list);
+ spin_unlock(&tmp_oscl->ols_lock);
- cl_lock_get_trust(scan);
- conflict = scan;
- break;
- }
- spin_unlock(&hdr->coh_lock_guard);
+ spin_unlock(&obj->oo_ol_spin);
+ (void)cl_sync_io_wait(env, waiter, 0);
- if (conflict) {
- if (lock->cll_descr.cld_mode == CLM_GROUP) {
- /* we want a group lock but a previous lock request
- * conflicts, we do not wait but return 0 so the
- * request is send to the server
- */
- CDEBUG(D_DLMTRACE, "group lock %p is conflicted with %p, no wait, send to server\n",
- lock, conflict);
- cl_lock_put(env, conflict);
- rc = 0;
- } else {
- CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
- lock, conflict);
- LASSERT(!lock->cll_conflict);
- lu_ref_add(&conflict->cll_reference, "cancel-wait",
- lock);
- lock->cll_conflict = conflict;
- rc = CLO_WAIT;
- }
+ spin_lock(&obj->oo_ol_spin);
+ oscl->ols_owner = NULL;
+ goto restart;
}
- return rc;
+ spin_unlock(&obj->oo_ol_spin);
}
/**
@@ -1106,188 +908,122 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
*/
static int osc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
+ struct cl_io *unused, struct cl_sync_io *anchor)
{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct cl_lock *lock = ols->ols_cl.cls_lock;
+ struct osc_thread_info *info = osc_env_info(env);
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(slice->cls_obj);
+ struct osc_lock *oscl = cl2osc_lock(slice);
+ struct cl_lock *lock = slice->cls_lock;
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ osc_enqueue_upcall_f upcall = osc_lock_upcall;
+ void *cookie = oscl;
+ bool async = false;
int result;
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERTF(ols->ols_state == OLS_NEW,
- "Impossible state: %d\n", ols->ols_state);
-
- LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
- "lock = %p, ols = %p\n", lock, ols);
+ LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
+ "lock = %p, ols = %p\n", lock, oscl);
- result = osc_lock_enqueue_wait(env, ols);
- if (result == 0) {
- if (!osc_lock_is_lockless(ols)) {
- struct osc_object *obj = cl2osc(slice->cls_obj);
- struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
- struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
+ if (oscl->ols_state == OLS_GRANTED)
+ return 0;
- /* lock will be passed as upcall cookie,
- * hold ref to prevent to be released.
- */
- cl_lock_hold_add(env, lock, "upcall", lock);
- /* a user for lock also */
- cl_lock_user_add(env, lock);
- ols->ols_state = OLS_ENQUEUED;
+ if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
+ goto enqueue_base;
- /*
- * XXX: this is possible blocking point as
- * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
- * LDLM_CP_CALLBACK.
- */
- ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
- osc_lock_build_policy(env, lock, policy);
- result = osc_enqueue_base(osc_export(obj), resname,
- &ols->ols_flags, policy,
- &ols->ols_lvb,
- obj->oo_oinfo->loi_kms_valid,
- osc_lock_upcall,
- ols, einfo, &ols->ols_handle,
- PTLRPCD_SET, 1, ols->ols_agl);
- if (result != 0) {
- cl_lock_user_del(env, lock);
- cl_lock_unhold(env, lock, "upcall", lock);
- if (unlikely(result == -ECANCELED)) {
- ols->ols_state = OLS_NEW;
- result = 0;
- }
- }
- } else {
- ols->ols_state = OLS_GRANTED;
- ols->ols_owner = osc_env_io(env);
- }
+ if (oscl->ols_glimpse) {
+ LASSERT(equi(oscl->ols_agl, !anchor));
+ async = true;
+ goto enqueue_base;
}
- LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
- return result;
-}
-static int osc_lock_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- struct cl_lock *lock = olck->ols_cl.cls_lock;
-
- LINVRNT(osc_lock_invariant(olck));
-
- if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
- if (olck->ols_flags & LDLM_FL_LVB_READY) {
- return 0;
- } else if (olck->ols_agl) {
- if (lock->cll_flags & CLF_FROM_UPCALL)
- /* It is from enqueue RPC reply upcall for
- * updating state. Do not re-enqueue.
- */
- return -ENAVAIL;
- olck->ols_state = OLS_NEW;
- } else {
- LASSERT(lock->cll_error);
- return lock->cll_error;
- }
+ osc_lock_enqueue_wait(env, osc, oscl);
+
+ /* we can grant lockless lock right after all conflicting locks
+ * are canceled.
+ */
+ if (osc_lock_is_lockless(oscl)) {
+ oscl->ols_state = OLS_GRANTED;
+ oio->oi_lockless = 1;
+ return 0;
}
- if (olck->ols_state == OLS_NEW) {
- int rc;
-
- LASSERT(olck->ols_agl);
- olck->ols_agl = 0;
- olck->ols_flags &= ~LDLM_FL_BLOCK_NOWAIT;
- rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
- if (rc != 0)
- return rc;
- else
- return CLO_REENQUEUED;
+enqueue_base:
+ oscl->ols_state = OLS_ENQUEUED;
+ if (anchor) {
+ atomic_inc(&anchor->csi_sync_nr);
+ oscl->ols_owner = anchor;
}
- LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
- lock->cll_error == 0, olck->ols_lock));
+ /**
+ * DLM lock's ast data must be osc_object;
+ * if glimpse or AGL lock, async of osc_enqueue_base() must be true,
+ * DLM's enqueue callback set to osc_lock_upcall() with cookie as
+ * osc_lock.
+ */
+ ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
+ osc_lock_build_einfo(env, lock, osc, &oscl->ols_einfo);
+ osc_lock_build_policy(env, lock, policy);
+ if (oscl->ols_agl) {
+ oscl->ols_einfo.ei_cbdata = NULL;
+ /* hold a reference for callback */
+ cl_object_get(osc2cl(osc));
+ upcall = osc_lock_upcall_agl;
+ cookie = osc;
+ }
+ result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags,
+ policy, &oscl->ols_lvb,
+ osc->oo_oinfo->loi_kms_valid,
+ upcall, cookie,
+ &oscl->ols_einfo, PTLRPCD_SET, async,
+ oscl->ols_agl);
+ if (result != 0) {
+ oscl->ols_state = OLS_CANCELLED;
+ osc_lock_wake_waiters(env, osc, oscl);
- return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
+ /* hide error for AGL lock. */
+ if (oscl->ols_agl) {
+ cl_object_put(env, osc2cl(osc));
+ result = 0;
+ }
+ if (anchor)
+ cl_sync_io_note(env, anchor, result);
+ } else {
+ if (osc_lock_is_lockless(oscl)) {
+ oio->oi_lockless = 1;
+ } else if (!async) {
+ LASSERT(oscl->ols_state == OLS_GRANTED);
+ LASSERT(oscl->ols_hold);
+ LASSERT(oscl->ols_dlmlock);
+ }
+ }
+ return result;
}
/**
- * An implementation of cl_lock_operations::clo_use() method that pins cached
- * lock.
+ * Breaks a link between osc_lock and dlm_lock.
*/
-static int osc_lock_use(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
{
- struct osc_lock *olck = cl2osc_lock(slice);
- int rc;
-
- LASSERT(!olck->ols_hold);
+ struct ldlm_lock *dlmlock;
- /*
- * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
- * flag is not set. This protects us from a concurrent blocking ast.
- */
- rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
- if (rc == 0) {
- olck->ols_hold = 1;
- olck->ols_state = OLS_GRANTED;
- } else {
- struct cl_lock *lock;
+ dlmlock = olck->ols_dlmlock;
+ if (!dlmlock)
+ return;
- /*
- * Lock is being cancelled somewhere within
- * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
- * set, but osc_ldlm_blocking_ast() hasn't yet acquired
- * cl_lock mutex.
- */
- lock = slice->cls_lock;
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(lock->cll_users > 0);
- /* set a flag for osc_dlm_blocking_ast0() to signal the
- * lock.
- */
- olck->ols_ast_wait = 1;
- rc = CLO_WAIT;
+ if (olck->ols_hold) {
+ olck->ols_hold = 0;
+ osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
+ olck->ols_handle.cookie = 0ULL;
}
- return rc;
-}
-static int osc_lock_flush(struct osc_lock *ols, int discard)
-{
- struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct cl_env_nest nest;
- struct lu_env *env;
- int result = 0;
-
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj);
- struct cl_lock_descr *descr = &lock->cll_descr;
- int rc = 0;
-
- if (descr->cld_mode >= CLM_WRITE) {
- result = osc_cache_writeback_range(env, obj,
- descr->cld_start,
- descr->cld_end,
- 1, discard);
- LDLM_DEBUG(ols->ols_lock,
- "lock %p: %d pages were %s.\n", lock, result,
- discard ? "discarded" : "written");
- if (result > 0)
- result = 0;
- }
+ olck->ols_dlmlock = NULL;
- rc = cl_lock_discard_pages(env, lock);
- if (result == 0 && rc < 0)
- result = rc;
-
- cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- if (result == 0) {
- ols->ols_flush = 1;
- LINVRNT(!osc_lock_has_pages(ols));
- }
- return result;
+ /* release a reference taken in osc_lock_upcall(). */
+ LASSERT(olck->ols_has_ref);
+ lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
+ LDLM_LOCK_RELEASE(dlmlock);
+ olck->ols_has_ref = 0;
}
/**
@@ -1307,96 +1043,16 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
static void osc_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
- struct cl_lock *lock = slice->cls_lock;
- struct osc_lock *olck = cl2osc_lock(slice);
- struct ldlm_lock *dlmlock = olck->ols_lock;
- int result = 0;
- int discard;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LINVRNT(osc_lock_invariant(olck));
-
- if (dlmlock) {
- int do_cancel;
-
- discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
- if (olck->ols_state >= OLS_GRANTED)
- result = osc_lock_flush(olck, discard);
- osc_lock_unhold(olck);
-
- lock_res_and_lock(dlmlock);
- /* Now that we're the only user of dlm read/write reference,
- * mostly the ->l_readers + ->l_writers should be zero.
- * However, there is a corner case.
- * See bug 18829 for details.
- */
- do_cancel = (dlmlock->l_readers == 0 &&
- dlmlock->l_writers == 0);
- dlmlock->l_flags |= LDLM_FL_CBPENDING;
- unlock_res_and_lock(dlmlock);
- if (do_cancel)
- result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
- if (result < 0)
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "lock %p cancel failure with error(%d)\n",
- lock, result);
- }
- olck->ols_state = OLS_CANCELLED;
- olck->ols_flags &= ~LDLM_FL_LVB_READY;
- osc_lock_detach(env, olck);
-}
-
-static int osc_lock_has_pages(struct osc_lock *olck)
-{
- return 0;
-}
-
-static void osc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck;
+ struct osc_object *obj = cl2osc(slice->cls_obj);
+ struct osc_lock *oscl = cl2osc_lock(slice);
- olck = cl2osc_lock(slice);
- if (olck->ols_glimpse) {
- LASSERT(!olck->ols_hold);
- LASSERT(!olck->ols_lock);
- return;
- }
+ LINVRNT(osc_lock_invariant(oscl));
- LINVRNT(osc_lock_invariant(olck));
- LINVRNT(!osc_lock_has_pages(olck));
+ osc_lock_detach(env, oscl);
+ oscl->ols_state = OLS_CANCELLED;
+ oscl->ols_flags &= ~LDLM_FL_LVB_READY;
- osc_lock_unhold(olck);
- osc_lock_detach(env, olck);
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for osc layer.
- *
- * Maintains osc_lock::ols_owner field.
- *
- * This assumes that lock always enters CLS_HELD (from some other state) in
- * the same IO context as one that requested the lock. This should not be a
- * problem, because context is by definition shared by all activity pertaining
- * to the same high-level IO.
- */
-static void osc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- /*
- * XXX multiple io contexts can use the lock at the same time.
- */
- LINVRNT(osc_lock_invariant(lock));
- if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
- struct osc_io *oio = osc_env_io(env);
-
- LASSERT(!lock->ols_owner);
- lock->ols_owner = oio;
- } else if (state != CLS_HELD)
- lock->ols_owner = NULL;
+ osc_lock_wake_waiters(env, obj, oscl);
}
static int osc_lock_print(const struct lu_env *env, void *cookie,
@@ -1404,221 +1060,161 @@ static int osc_lock_print(const struct lu_env *env, void *cookie,
{
struct osc_lock *lock = cl2osc_lock(slice);
- /*
- * XXX print ldlm lock and einfo properly.
- */
(*p)(env, cookie, "%p %#16llx %#llx %d %p ",
- lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
+ lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie,
lock->ols_state, lock->ols_owner);
osc_lvb_print(env, cookie, p, &lock->ols_lvb);
return 0;
}
-static int osc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- if (need->cld_enq_flags & CEF_NEVER)
- return 0;
-
- if (ols->ols_state >= OLS_CANCELLED)
- return 0;
-
- if (need->cld_mode == CLM_PHANTOM) {
- if (ols->ols_agl)
- return !(ols->ols_state > OLS_RELEASED);
-
- /*
- * Note: the QUEUED lock can't be matched here, otherwise
- * it might cause the deadlocks.
- * In read_process,
- * P1: enqueued read lock, create sublock1
- * P2: enqueued write lock, create sublock2(conflicted
- * with sublock1).
- * P1: Grant read lock.
- * P1: enqueued glimpse lock(with holding sublock1_read),
- * matched with sublock2, waiting sublock2 to be granted.
- * But sublock2 can not be granted, because P1
- * will not release sublock1. Bang!
- */
- if (ols->ols_state < OLS_GRANTED ||
- ols->ols_state > OLS_RELEASED)
- return 0;
- } else if (need->cld_enq_flags & CEF_MUST) {
- /*
- * If the lock hasn't ever enqueued, it can't be matched
- * because enqueue process brings in many information
- * which can be used to determine things such as lockless,
- * CEF_MUST, etc.
- */
- if (ols->ols_state < OLS_UPCALL_RECEIVED &&
- ols->ols_locklessable)
- return 0;
- }
- return 1;
-}
-
static const struct cl_lock_operations osc_lock_ops = {
.clo_fini = osc_lock_fini,
.clo_enqueue = osc_lock_enqueue,
- .clo_wait = osc_lock_wait,
- .clo_unuse = osc_lock_unuse,
- .clo_use = osc_lock_use,
- .clo_delete = osc_lock_delete,
- .clo_state = osc_lock_state,
.clo_cancel = osc_lock_cancel,
- .clo_weigh = osc_lock_weigh,
.clo_print = osc_lock_print,
- .clo_fits_into = osc_lock_fits_into,
};
-static int osc_lock_lockless_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct cl_lock *lock = slice->cls_lock;
-
- LASSERT(ols->ols_state == OLS_GRANTED);
- LINVRNT(osc_lock_invariant(ols));
-
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- return 0;
-}
-
static void osc_lock_lockless_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
+ struct osc_object *osc = cl2osc(slice->cls_obj);
+ struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
int result;
- result = osc_lock_flush(ols, 0);
+ LASSERT(!ols->ols_dlmlock);
+ result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
+ descr->cld_mode, 0);
if (result)
CERROR("Pages for lockless lock %p were not purged(%d)\n",
ols, result);
- ols->ols_state = OLS_CANCELLED;
-}
-
-static int osc_lock_lockless_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- LINVRNT(osc_lock_invariant(olck));
- LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
-
- return lock->cll_error;
+ osc_lock_wake_waiters(env, osc, ols);
}
-static void osc_lock_lockless_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
+static const struct cl_lock_operations osc_lock_lockless_ops = {
+ .clo_fini = osc_lock_fini,
+ .clo_enqueue = osc_lock_enqueue,
+ .clo_cancel = osc_lock_lockless_cancel,
+ .clo_print = osc_lock_print
+};
- LINVRNT(osc_lock_invariant(lock));
- if (state == CLS_HELD) {
- struct osc_io *oio = osc_env_io(env);
+static void osc_lock_set_writer(const struct lu_env *env,
+ const struct cl_io *io,
+ struct cl_object *obj, struct osc_lock *oscl)
+{
+ struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
+ pgoff_t io_start;
+ pgoff_t io_end;
- LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
- lock->ols_owner = oio;
+ if (!cl_object_same(io->ci_obj, obj))
+ return;
- /* set the io to be lockless if this lock is for io's
- * host object
- */
- if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
- oio->oi_lockless = 1;
+ if (likely(io->ci_type == CIT_WRITE)) {
+ io_start = cl_index(obj, io->u.ci_rw.crw_pos);
+ io_end = cl_index(obj, io->u.ci_rw.crw_pos +
+ io->u.ci_rw.crw_count - 1);
+ if (cl_io_is_append(io)) {
+ io_start = 0;
+ io_end = CL_PAGE_EOF;
+ }
+ } else {
+ LASSERT(cl_io_is_mkwrite(io));
+ io_start = io_end = io->u.ci_fault.ft_index;
}
-}
-static int osc_lock_lockless_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- if (!(need->cld_enq_flags & CEF_NEVER))
- return 0;
+ if (descr->cld_mode >= CLM_WRITE &&
+ descr->cld_start <= io_start && descr->cld_end >= io_end) {
+ struct osc_io *oio = osc_env_io(env);
- /* lockless lock should only be used by its owning io. b22147 */
- return (lock->ols_owner == osc_env_io(env));
+ /* There must be only one lock to match the write region */
+ LASSERT(!oio->oi_write_osclock);
+ oio->oi_write_osclock = oscl;
+ }
}
-static const struct cl_lock_operations osc_lock_lockless_ops = {
- .clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_enqueue,
- .clo_wait = osc_lock_lockless_wait,
- .clo_unuse = osc_lock_lockless_unuse,
- .clo_state = osc_lock_lockless_state,
- .clo_fits_into = osc_lock_lockless_fits_into,
- .clo_cancel = osc_lock_lockless_cancel,
- .clo_print = osc_lock_print
-};
-
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
+ const struct cl_io *io)
{
- struct osc_lock *clk;
- int result;
-
- clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
- if (clk) {
- __u32 enqflags = lock->cll_descr.cld_enq_flags;
+ struct osc_lock *oscl;
+ __u32 enqflags = lock->cll_descr.cld_enq_flags;
+
+ oscl = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
+ if (!oscl)
+ return -ENOMEM;
+
+ oscl->ols_state = OLS_NEW;
+ spin_lock_init(&oscl->ols_lock);
+ INIT_LIST_HEAD(&oscl->ols_waiting_list);
+ INIT_LIST_HEAD(&oscl->ols_wait_entry);
+ INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
+
+ oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
+ oscl->ols_agl = !!(enqflags & CEF_AGL);
+ if (oscl->ols_agl)
+ oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
+ if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
+ oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
+ oscl->ols_glimpse = 1;
+ }
- osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
- atomic_set(&clk->ols_pageref, 0);
- clk->ols_state = OLS_NEW;
+ cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
- clk->ols_flags = osc_enq2ldlm_flags(enqflags);
- clk->ols_agl = !!(enqflags & CEF_AGL);
- if (clk->ols_agl)
- clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
- if (clk->ols_flags & LDLM_FL_HAS_INTENT)
- clk->ols_glimpse = 1;
+ if (!(enqflags & CEF_MUST))
+ /* try to convert this lock to a lockless lock */
+ osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER));
+ if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
+ oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
- cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
+ if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
+ osc_lock_set_writer(env, io, obj, oscl);
- if (!(enqflags & CEF_MUST))
- /* try to convert this lock to a lockless lock */
- osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
- if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
- clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
- LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx",
- lock, clk, clk->ols_flags);
+ LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
+ lock, oscl, oscl->ols_flags);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
+ return 0;
}
-int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
+/**
+ * Finds an existing lock covering given index and optionally different from a
+ * given \a except lock.
+ */
+struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj, pgoff_t index,
+ int pending, int canceling)
{
- struct osc_lock *olock;
- int rc = 0;
-
- spin_lock(&osc_ast_guard);
- olock = dlm->l_ast_data;
+ struct osc_thread_info *info = osc_env_info(env);
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ struct lustre_handle lockh;
+ struct ldlm_lock *lock = NULL;
+ enum ldlm_mode mode;
+ __u64 flags;
+
+ ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
+ osc_index2policy(policy, osc2cl(obj), index, index);
+ policy->l_extent.gid = LDLM_GID_ANY;
+
+ flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
+ if (pending)
+ flags |= LDLM_FL_CBPENDING;
/*
- * there's a very rare race with osc_page_addref_lock(), but that
- * doesn't matter because in the worst case we don't cancel a lock
- * which we actually can, that's no harm.
+ * It is fine to match any group lock since there could be only one
+ * with a uniq gid and it conflicts with all other lock modes too
*/
- if (olock &&
- atomic_add_return(_PAGEREF_MAGIC,
- &olock->ols_pageref) != _PAGEREF_MAGIC) {
- atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
- rc = 1;
+again:
+ mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace,
+ flags, resname, LDLM_EXTENT, policy,
+ LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling);
+ if (mode != 0) {
+ lock = ldlm_handle2lock(&lockh);
+ /* RACE: the lock is cancelled so let's try again */
+ if (unlikely(!lock))
+ goto again;
}
- spin_unlock(&osc_ast_guard);
- return rc;
+ return lock;
}
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 9d474fcdd..738ab10ab 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -36,6 +36,7 @@
* Implementation of cl_object for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
@@ -94,6 +95,9 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
atomic_set(&osc->oo_nr_reads, 0);
atomic_set(&osc->oo_nr_writes, 0);
spin_lock_init(&osc->oo_lock);
+ spin_lock_init(&osc->oo_tree_lock);
+ spin_lock_init(&osc->oo_ol_spin);
+ INIT_LIST_HEAD(&osc->oo_ol_list);
cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
@@ -120,6 +124,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
LASSERT(list_empty(&osc->oo_reading_exts));
LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
+ LASSERT(list_empty(&osc->oo_ol_list));
lu_object_fini(obj);
kmem_cache_free(osc_object_kmem, osc);
@@ -192,6 +197,32 @@ static int osc_object_glimpse(const struct lu_env *env,
return 0;
}
+static int osc_object_ast_clear(struct ldlm_lock *lock, void *data)
+{
+ LASSERT(lock->l_granted_mode == lock->l_req_mode);
+ if (lock->l_ast_data == data)
+ lock->l_ast_data = NULL;
+ return LDLM_ITER_CONTINUE;
+}
+
+static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct osc_object *osc = cl2osc(obj);
+ struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
+
+ LASSERTF(osc->oo_npages == 0,
+ DFID "still have %lu pages, obj: %p, osc: %p\n",
+ PFID(lu_object_fid(&obj->co_lu)), osc->oo_npages, obj, osc);
+
+ /* DLM locks don't hold a reference of osc_object so we have to
+ * clear it before the object is being destroyed.
+ */
+ ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
+ ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
+ osc_object_ast_clear, osc);
+ return 0;
+}
+
void osc_object_set_contended(struct osc_object *obj)
{
obj->oo_contention_time = cfs_time_current();
@@ -236,12 +267,12 @@ static const struct cl_object_operations osc_ops = {
.coo_io_init = osc_io_init,
.coo_attr_get = osc_attr_get,
.coo_attr_set = osc_attr_set,
- .coo_glimpse = osc_object_glimpse
+ .coo_glimpse = osc_object_glimpse,
+ .coo_prune = osc_object_prune
};
static const struct lu_object_operations osc_lu_obj_ops = {
.loo_object_init = osc_object_init,
- .loo_object_delete = NULL,
.loo_object_release = NULL,
.loo_object_free = osc_object_free,
.loo_object_print = osc_object_print,
@@ -261,8 +292,9 @@ struct lu_object *osc_object_alloc(const struct lu_env *env,
lu_object_init(obj, NULL, dev);
osc->oo_cl.co_ops = &osc_ops;
obj->lo_ops = &osc_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index ce9ddd515..c29c2eabe 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -36,14 +36,15 @@
* Implementation of cl_page for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
#include "osc_cl_internal.h"
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg);
@@ -63,18 +64,9 @@ static int osc_page_protected(const struct lu_env *env,
* Page operations.
*
*/
-static void osc_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
-
- CDEBUG(D_TRACE, "%p\n", opg);
- LASSERT(!opg->ops_lock);
-}
-
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+ struct cl_page *page = opg->ops_cl.cpl_page;
LASSERT(!opg->ops_transfer_pinned);
cl_page_get(page);
@@ -85,11 +77,11 @@ static void osc_page_transfer_get(struct osc_page *opg, const char *label)
static void osc_page_transfer_put(const struct lu_env *env,
struct osc_page *opg)
{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+ struct cl_page *page = opg->ops_cl.cpl_page;
if (opg->ops_transfer_pinned) {
- lu_ref_del(&page->cp_reference, "transfer", page);
opg->ops_transfer_pinned = 0;
+ lu_ref_del(&page->cp_reference, "transfer", page);
cl_page_put(env, page);
}
}
@@ -104,10 +96,7 @@ static void osc_page_transfer_add(const struct lu_env *env,
{
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- /* ops_lru and ops_inflight share the same field, so take it from LRU
- * first and then use it as inflight.
- */
- osc_lru_del(osc_cli(obj), opg, false);
+ osc_lru_use(osc_cli(obj), opg);
spin_lock(&obj->oo_seatbelt);
list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
@@ -115,11 +104,9 @@ static void osc_page_transfer_add(const struct lu_env *env,
spin_unlock(&obj->oo_seatbelt);
}
-static int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
+int osc_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io)
{
- struct osc_io *oio = osc_env_io(env);
struct osc_page *opg = cl2osc_page(slice);
int result;
@@ -132,17 +119,6 @@ static int osc_page_cache_add(const struct lu_env *env,
else
osc_page_transfer_add(env, opg, CRT_WRITE);
- /* for sync write, kernel will wait for this page to be flushed before
- * osc_io_end() is called, so release it earlier.
- * for mkwrite(), it's known there is no further pages.
- */
- if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
- if (oio->oi_active) {
- osc_extent_release(env, oio->oi_active);
- oio->oi_active = NULL;
- }
- }
-
return result;
}
@@ -154,102 +130,25 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
-static int osc_page_addref_lock(const struct lu_env *env,
- struct osc_page *opg,
- struct cl_lock *lock)
-{
- struct osc_lock *olock;
- int rc;
-
- LASSERT(!opg->ops_lock);
-
- olock = osc_lock_at(lock);
- if (atomic_inc_return(&olock->ols_pageref) <= 0) {
- atomic_dec(&olock->ols_pageref);
- rc = -ENODATA;
- } else {
- cl_lock_get(lock);
- opg->ops_lock = lock;
- rc = 0;
- }
- return rc;
-}
-
-static void osc_page_putref_lock(const struct lu_env *env,
- struct osc_page *opg)
-{
- struct cl_lock *lock = opg->ops_lock;
- struct osc_lock *olock;
-
- LASSERT(lock);
- olock = osc_lock_at(lock);
-
- atomic_dec(&olock->ols_pageref);
- opg->ops_lock = NULL;
-
- cl_lock_put(env, lock);
-}
-
static int osc_page_is_under_lock(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *unused)
+ struct cl_io *unused, pgoff_t *max_index)
{
- struct cl_lock *lock;
+ struct osc_page *opg = cl2osc_page(slice);
+ struct ldlm_lock *dlmlock;
int result = -ENODATA;
- lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
- NULL, 1, 0);
- if (lock) {
- if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
- result = -EBUSY;
- cl_lock_put(env, lock);
+ dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj),
+ osc_index(opg), 1, 0);
+ if (dlmlock) {
+ *max_index = cl_index(slice->cpl_obj,
+ dlmlock->l_policy_data.l_extent.end);
+ LDLM_LOCK_PUT(dlmlock);
+ result = 0;
}
return result;
}
-static void osc_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct osc_page *opg = cl2osc_page(slice);
-
- if (unlikely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
-}
-
-static void osc_page_completion_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
-
- if (likely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
- osc_lru_add(osc_cli(obj), opg);
-}
-
-static void osc_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(slice->cpl_obj);
-
- osc_lru_add(osc_cli(obj), opg);
-}
-
-static int osc_page_fail(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
static const char *osc_list(struct list_head *head)
{
return list_empty(head) ? "-" : "+";
@@ -272,8 +171,8 @@ static int osc_page_print(const struct lu_env *env,
struct osc_object *obj = cl2osc(slice->cpl_obj);
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
- return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
- opg,
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
+ opg, osc_index(opg),
/* 1 */
oap->oap_magic, oap->oap_cmd,
oap->oap_interrupted,
@@ -321,7 +220,7 @@ static void osc_page_delete(const struct lu_env *env,
osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
if (rc) {
- CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
+ CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
"Trying to teardown failed: %d\n", rc);
LASSERT(0);
}
@@ -334,7 +233,19 @@ static void osc_page_delete(const struct lu_env *env,
}
spin_unlock(&obj->oo_seatbelt);
- osc_lru_del(osc_cli(obj), opg, true);
+ osc_lru_del(osc_cli(obj), opg);
+
+ if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
+ void *value;
+
+ spin_lock(&obj->oo_tree_lock);
+ value = radix_tree_delete(&obj->oo_tree, osc_index(opg));
+ if (value)
+ --obj->oo_npages;
+ spin_unlock(&obj->oo_tree_lock);
+
+ LASSERT(ergo(value, value == opg));
+ }
}
static void osc_page_clip(const struct lu_env *env,
@@ -382,28 +293,16 @@ static int osc_page_flush(const struct lu_env *env,
}
static const struct cl_page_operations osc_page_ops = {
- .cpo_fini = osc_page_fini,
.cpo_print = osc_page_print,
.cpo_delete = osc_page_delete,
.cpo_is_under_lock = osc_page_is_under_lock,
- .cpo_disown = osc_page_disown,
- .io = {
- [CRT_READ] = {
- .cpo_cache_add = osc_page_fail,
- .cpo_completion = osc_page_completion_read
- },
- [CRT_WRITE] = {
- .cpo_cache_add = osc_page_cache_add,
- .cpo_completion = osc_page_completion_write
- }
- },
.cpo_clip = osc_page_clip,
.cpo_cancel = osc_page_cancel,
.cpo_flush = osc_page_flush
};
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
@@ -412,13 +311,14 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
opg->ops_from = 0;
opg->ops_to = PAGE_SIZE;
- result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
+ result = osc_prep_async_page(osc, opg, page->cp_vmpage,
+ cl_offset(obj, index));
if (result == 0) {
struct osc_io *oio = osc_env_io(env);
opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops);
+ cl_page_slice_add(page, &opg->ops_cl, obj, index,
+ &osc_page_ops);
}
/*
* Cannot assert osc_page_protected() here as read-ahead
@@ -431,12 +331,47 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
- if (page->cp_type == CPT_CACHEABLE && result == 0)
+ if (page->cp_type == CPT_CACHEABLE && result == 0) {
result = osc_lru_reserve(env, osc, opg);
+ if (result == 0) {
+ spin_lock(&osc->oo_tree_lock);
+ result = radix_tree_insert(&osc->oo_tree, index, opg);
+ if (result == 0)
+ ++osc->oo_npages;
+ spin_unlock(&osc->oo_tree_lock);
+ LASSERT(result == 0);
+ }
+ }
return result;
}
+int osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+ long obd_upages, obd_dpages, osc_upages;
+
+ /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+ if (!cli)
+ return 0;
+
+ obd_upages = atomic_read(&obd_unstable_pages);
+ obd_dpages = atomic_read(&obd_dirty_pages);
+
+ osc_upages = atomic_read(&cli->cl_unstable_count);
+
+ /*
+ * obd_max_dirty_pages is the max number of (dirty + unstable)
+ * pages allowed at any given time. To simulate an unstable page
+ * only limit, we subtract the current number of dirty pages
+ * from this max. This difference is roughly the amount of pages
+ * currently available for unstable pages. Thus, the soft limit
+ * is half of that difference. Check osc_upages to ensure we don't
+ * set SOFT_SYNC for OSCs without any outstanding unstable pages.
+ */
+ return osc_upages &&
+ obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
+}
+
/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
@@ -460,6 +395,9 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_count = opg->ops_to - opg->ops_from;
oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
+ if (osc_over_unstable_soft_limit(oap->oap_cli))
+ oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
+
if (!client_is_remote(osc_export(obj)) &&
capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
@@ -483,13 +421,12 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
*/
static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and..
*/
static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
+static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
@@ -500,65 +437,142 @@ static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
- int pages = atomic_read(&cli->cl_lru_in_list) >> 1;
+ int pages = atomic_read(&cli->cl_lru_in_list);
+ unsigned long budget;
- if (atomic_read(&osc_lru_waiters) > 0 &&
- atomic_read(cli->cl_lru_left) < lru_shrink_max)
- /* drop lru pages aggressively */
- return min(pages, lru_shrink_max);
+ budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
*/
if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
- unsigned long tmp;
+ if (pages >= budget)
+ return lru_shrink_max;
+ else if (pages >= budget / 2)
+ return lru_shrink_min;
+ } else if (pages >= budget * 2) {
+ return lru_shrink_min;
+ }
+ return 0;
+}
- tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
- if (pages > tmp)
- return min(pages, lru_shrink_max);
+int lru_queue_work(const struct lu_env *env, void *data)
+{
+ struct client_obd *cli = data;
- return pages > lru_shrink_min ? lru_shrink_min : 0;
- }
+ CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli);
+
+ if (osc_cache_too_much(cli))
+ osc_lru_shrink(env, cli, lru_shrink_max, true);
return 0;
}
-/* Return how many pages are not discarded in @pvec. */
-static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
- struct cl_page **pvec, int max_index)
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
+{
+ LIST_HEAD(lru);
+ struct osc_async_page *oap;
+ int npages = 0;
+
+ list_for_each_entry(oap, plist, oap_pending_item) {
+ struct osc_page *opg = oap2osc_page(oap);
+
+ if (!opg->ops_in_lru)
+ continue;
+
+ ++npages;
+ LASSERT(list_empty(&opg->ops_lru));
+ list_add(&opg->ops_lru, &lru);
+ }
+
+ if (npages > 0) {
+ spin_lock(&cli->cl_lru_list_lock);
+ list_splice_tail(&lru, &cli->cl_lru_list);
+ atomic_sub(npages, &cli->cl_lru_busy);
+ atomic_add(npages, &cli->cl_lru_in_list);
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ /* XXX: May set force to be true for better performance */
+ if (osc_cache_too_much(cli))
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+}
+
+static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
+{
+ LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
+ list_del_init(&opg->ops_lru);
+ atomic_dec(&cli->cl_lru_in_list);
+}
+
+/**
+ * Page is being destroyed. The page may be not in LRU list, if the transfer
+ * has never finished(error occurred).
+ */
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
+{
+ if (opg->ops_in_lru) {
+ spin_lock(&cli->cl_lru_list_lock);
+ if (!list_empty(&opg->ops_lru)) {
+ __osc_lru_del(cli, opg);
+ } else {
+ LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
+ atomic_dec(&cli->cl_lru_busy);
+ }
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ atomic_inc(cli->cl_lru_left);
+ /* this is a great place to release more LRU pages if
+ * this osc occupies too many LRU pages and kernel is
+ * stealing one of them.
+ */
+ if (!memory_pressure_get())
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ wake_up(&osc_lru_waitq);
+ } else {
+ LASSERT(list_empty(&opg->ops_lru));
+ }
+}
+
+/**
+ * Delete page from LRUlist for redirty.
+ */
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
+{
+ /* If page is being transferred for the first time,
+ * ops_lru should be empty
+ */
+ if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
+ spin_lock(&cli->cl_lru_list_lock);
+ __osc_lru_del(cli, opg);
+ spin_unlock(&cli->cl_lru_list_lock);
+ atomic_inc(&cli->cl_lru_busy);
+ }
+}
+
+static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
+ struct cl_page **pvec, int max_index)
{
- int count;
int i;
- for (count = 0, i = 0; i < max_index; i++) {
+ for (i = 0; i < max_index; i++) {
struct cl_page *page = pvec[i];
- if (cl_page_own_try(env, io, page) == 0) {
- /* free LRU page only if nobody is using it.
- * This check is necessary to avoid freeing the pages
- * having already been removed from LRU and pinned
- * for IO.
- */
- if (!cl_page_in_use(page)) {
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- ++count;
- }
- cl_page_disown(env, io, page);
- }
+ LASSERT(cl_page_is_owned(page, io));
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
cl_page_put(env, page);
+
pvec[i] = NULL;
}
- return max_index - count;
}
/**
* Drop @target of pages from LRU at most.
*/
-int osc_lru_shrink(struct client_obd *cli, int target)
+int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ int target, bool force)
{
- struct cl_env_nest nest;
- struct lu_env *env;
struct cl_io *io;
struct cl_object *clobj = NULL;
struct cl_page **pvec;
@@ -573,23 +587,31 @@ int osc_lru_shrink(struct client_obd *cli, int target)
if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
return 0;
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- return PTR_ERR(env);
+ if (!force) {
+ if (atomic_read(&cli->cl_lru_shrinkers) > 0)
+ return -EBUSY;
- pvec = osc_env_info(env)->oti_pvec;
+ if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
+ atomic_dec(&cli->cl_lru_shrinkers);
+ return -EBUSY;
+ }
+ } else {
+ atomic_inc(&cli->cl_lru_shrinkers);
+ }
+
+ pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
io = &osc_env_info(env)->oti_io;
- client_obd_list_lock(&cli->cl_lru_list_lock);
- atomic_inc(&cli->cl_lru_shrinkers);
+ spin_lock(&cli->cl_lru_list_lock);
maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
struct cl_page *page;
+ bool will_free = false;
if (--maxscan < 0)
break;
- page = cl_page_top(opg->ops_cl.cpl_page);
+ page = opg->ops_cl.cpl_page;
if (cl_page_in_use_noref(page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
@@ -600,10 +622,10 @@ int osc_lru_shrink(struct client_obd *cli, int target)
struct cl_object *tmp = page->cp_obj;
cl_object_get(tmp);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj) {
- count -= discard_pagevec(env, io, pvec, index);
+ discard_pagevec(env, io, pvec, index);
index = 0;
cl_io_fini(env, io);
@@ -616,7 +638,7 @@ int osc_lru_shrink(struct client_obd *cli, int target)
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, clobj);
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
if (rc != 0)
break;
@@ -625,98 +647,54 @@ int osc_lru_shrink(struct client_obd *cli, int target)
continue;
}
- /* move this page to the end of list as it will be discarded
- * soon. The page will be finally removed from LRU list in
- * osc_page_delete().
- */
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ if (cl_page_own_try(env, io, page) == 0) {
+ if (!cl_page_in_use_noref(page)) {
+ /* remove it from lru list earlier to avoid
+ * lock contention
+ */
+ __osc_lru_del(cli, opg);
+ opg->ops_in_lru = 0; /* will be discarded */
+
+ cl_page_get(page);
+ will_free = true;
+ } else {
+ cl_page_disown(env, io, page);
+ }
+ }
- /* it's okay to grab a refcount here w/o holding lock because
- * it has to grab cl_lru_list_lock to delete the page.
- */
- cl_page_get(page);
- pvec[index++] = page;
- if (++count >= target)
- break;
+ if (!will_free) {
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ continue;
+ }
+ /* Don't discard and free the page with cl_lru_list held */
+ pvec[index++] = page;
if (unlikely(index == OTI_PVEC_SIZE)) {
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- count -= discard_pagevec(env, io, pvec, index);
+ spin_unlock(&cli->cl_lru_list_lock);
+ discard_pagevec(env, io, pvec, index);
index = 0;
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
}
+
+ if (++count >= target)
+ break;
}
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj) {
- count -= discard_pagevec(env, io, pvec, index);
+ discard_pagevec(env, io, pvec, index);
cl_io_fini(env, io);
cl_object_put(env, clobj);
}
- cl_env_nested_put(&nest, env);
atomic_dec(&cli->cl_lru_shrinkers);
- return count > 0 ? count : rc;
-}
-
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
-{
- bool wakeup = false;
-
- if (!opg->ops_in_lru)
- return;
-
- atomic_dec(&cli->cl_lru_busy);
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (list_empty(&opg->ops_lru)) {
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
- atomic_inc_return(&cli->cl_lru_in_list);
- wakeup = atomic_read(&osc_lru_waiters) > 0;
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
-
- if (wakeup) {
- osc_lru_shrink(cli, osc_cache_too_much(cli));
+ if (count > 0) {
+ atomic_add(count, cli->cl_lru_left);
wake_up_all(&osc_lru_waitq);
}
-}
-
-/* delete page from LRUlist. The page can be deleted from LRUlist for two
- * reasons: redirtied or deleted from page cache.
- */
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
-{
- if (opg->ops_in_lru) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (!list_empty(&opg->ops_lru)) {
- LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
- list_del_init(&opg->ops_lru);
- atomic_dec(&cli->cl_lru_in_list);
- if (!del)
- atomic_inc(&cli->cl_lru_busy);
- } else if (del) {
- LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
- atomic_dec(&cli->cl_lru_busy);
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- if (del) {
- atomic_inc(cli->cl_lru_left);
- /* this is a great place to release more LRU pages if
- * this osc occupies too many LRU pages and kernel is
- * stealing one of them.
- * cl_lru_shrinkers is to avoid recursive call in case
- * we're already in the context of osc_lru_shrink().
- */
- if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
- !memory_pressure_get())
- osc_lru_shrink(cli, osc_cache_too_much(cli));
- wake_up(&osc_lru_waitq);
- }
- } else {
- LASSERT(list_empty(&opg->ops_lru));
- }
+ return count > 0 ? count : rc;
}
static inline int max_to_shrink(struct client_obd *cli)
@@ -724,19 +702,28 @@ static inline int max_to_shrink(struct client_obd *cli)
return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
}
-static int osc_lru_reclaim(struct client_obd *cli)
+int osc_lru_reclaim(struct client_obd *cli)
{
+ struct cl_env_nest nest;
+ struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
- int rc;
+ int rc = 0;
LASSERT(cache);
- rc = osc_lru_shrink(cli, lru_shrink_min);
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return 0;
+
+ rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false);
if (rc != 0) {
+ if (rc == -EBUSY)
+ rc = 0;
+
CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
cli->cl_import->imp_obd->obd_name, rc, cli);
- return rc;
+ goto out;
}
CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
@@ -764,10 +751,11 @@ static int osc_lru_reclaim(struct client_obd *cli)
atomic_read(&cli->cl_lru_busy));
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- if (atomic_read(&cli->cl_lru_in_list) > 0) {
+ if (osc_cache_too_much(cli) > 0) {
spin_unlock(&cache->ccc_lru_lock);
- rc = osc_lru_shrink(cli, max_to_shrink(cli));
+ rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli),
+ true);
spin_lock(&cache->ccc_lru_lock);
if (rc != 0)
break;
@@ -775,6 +763,8 @@ static int osc_lru_reclaim(struct client_obd *cli)
}
spin_unlock(&cache->ccc_lru_lock);
+out:
+ cl_env_nested_put(&nest, env);
CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
@@ -784,16 +774,20 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg)
{
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct osc_io *oio = osc_env_io(env);
struct client_obd *cli = osc_cli(obj);
int rc = 0;
if (!cli->cl_cache) /* shall not be in LRU */
return 0;
+ if (oio->oi_lru_reserved > 0) {
+ --oio->oi_lru_reserved;
+ goto out;
+ }
+
LASSERT(atomic_read(cli->cl_lru_left) >= 0);
while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
- int gen;
-
/* run out of LRU spaces, try to drop some by itself */
rc = osc_lru_reclaim(cli);
if (rc < 0)
@@ -803,23 +797,15 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
cond_resched();
- /* slowest case, all of caching pages are busy, notifying
- * other OSCs that we're lack of LRU slots.
- */
- atomic_inc(&osc_lru_waiters);
-
- gen = atomic_read(&cli->cl_lru_in_list);
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0 ||
- (atomic_read(&cli->cl_lru_in_list) > 0 &&
- gen != atomic_read(&cli->cl_lru_in_list)),
+ atomic_read(cli->cl_lru_left) > 0,
&lwi);
- atomic_dec(&osc_lru_waiters);
if (rc < 0)
break;
}
+out:
if (rc >= 0) {
atomic_inc(&cli->cl_lru_busy);
opg->ops_in_lru = 1;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 30526ebca..47417f88f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -92,12 +92,13 @@ struct osc_fsync_args {
struct osc_enqueue_args {
struct obd_export *oa_exp;
+ enum ldlm_type oa_type;
+ enum ldlm_mode oa_mode;
__u64 *oa_flags;
- obd_enqueue_update_f oa_upcall;
+ osc_enqueue_upcall_f oa_upcall;
void *oa_cookie;
struct ost_lvb *oa_lvb;
- struct lustre_handle *oa_lockh;
- struct ldlm_enqueue_info *oa_ei;
+ struct lustre_handle oa_lockh;
unsigned int oa_agl:1;
};
@@ -801,21 +802,24 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
LASSERT(!(oa->o_valid & bits));
oa->o_valid |= bits;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
oa->o_dirty = cli->cl_dirty;
if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
cli->cl_dirty_max)) {
CERROR("dirty %lu - %lu > dirty_max %lu\n",
cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
oa->o_undirty = 0;
- } else if (unlikely(atomic_read(&obd_dirty_pages) -
+ } else if (unlikely(atomic_read(&obd_unstable_pages) +
+ atomic_read(&obd_dirty_pages) -
atomic_read(&obd_dirty_transit_pages) >
(long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1).
*/
- CERROR("dirty %d - %d > system dirty_max %d\n",
+ CERROR("%s: dirty %d + %d - %d > system dirty_max %d\n",
+ cli->cl_import->imp_obd->obd_name,
+ atomic_read(&obd_unstable_pages),
atomic_read(&obd_dirty_pages),
atomic_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
@@ -833,10 +837,9 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
oa->o_dropped = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
-
}
void osc_update_next_shrink(struct client_obd *cli)
@@ -849,9 +852,9 @@ void osc_update_next_shrink(struct client_obd *cli)
static void __osc_update_grant(struct client_obd *cli, u64 grant)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_avail_grant += grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
@@ -889,10 +892,10 @@ out:
static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
oa->o_grant = cli->cl_avail_grant / 4;
cli->cl_avail_grant -= oa->o_grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
oa->o_valid |= OBD_MD_FLFLAGS;
oa->o_flags = 0;
@@ -911,10 +914,10 @@ static int osc_shrink_grant(struct client_obd *cli)
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
(cli->cl_max_pages_per_rpc << PAGE_SHIFT);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
}
@@ -924,7 +927,7 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
int rc = 0;
struct ost_body *body;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* Don't shrink if we are already above or below the desired limit
* We don't want to shrink below a single RPC, as that will negatively
* impact block allocation and long-term performance.
@@ -933,10 +936,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
body = kzalloc(sizeof(*body), GFP_NOFS);
if (!body)
@@ -944,10 +947,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
osc_announce_cached(cli, &body->oa, 0);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
body->oa.o_grant = cli->cl_avail_grant - target_bytes;
cli->cl_avail_grant = target_bytes;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
body->oa.o_valid |= OBD_MD_FLFLAGS;
body->oa.o_flags = 0;
@@ -1035,7 +1038,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
* race is tolerable here: if we're evicted, but imp_state already
* left EVICTED state, then cl_dirty must be 0 already.
*/
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
cli->cl_avail_grant = ocd->ocd_grant;
else
@@ -1053,7 +1056,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
/* determine the appropriate chunk size used by osc_extent. */
cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
cli->cl_import->imp_obd->obd_name,
@@ -1082,7 +1085,7 @@ static void handle_short_read(int nob_read, u32 page_count,
if (pga[i]->count > nob_read) {
/* EOF inside this page */
ptr = kmap(pga[i]->pg) +
- (pga[i]->off & ~CFS_PAGE_MASK);
+ (pga[i]->off & ~PAGE_MASK);
memset(ptr + nob_read, 0, pga[i]->count - nob_read);
kunmap(pga[i]->pg);
page_count--;
@@ -1097,7 +1100,7 @@ static void handle_short_read(int nob_read, u32 page_count,
/* zero remaining pages */
while (page_count-- > 0) {
- ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
+ ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
memset(ptr, 0, pga[i]->count);
kunmap(pga[i]->pg);
i++;
@@ -1144,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
{
if (p1->flag != p2->flag) {
unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
- OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
+ OBD_BRW_SYNC | OBD_BRW_ASYNC |
+ OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine
@@ -1188,32 +1192,29 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
unsigned char *ptr = kmap(pga[i]->pg);
- int off = pga[i]->off & ~CFS_PAGE_MASK;
+ int off = pga[i]->off & ~PAGE_MASK;
memcpy(ptr + off, "bad1", min(4, nob));
kunmap(pga[i]->pg);
}
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
- pga[i]->off & ~CFS_PAGE_MASK,
+ pga[i]->off & ~PAGE_MASK,
count);
CDEBUG(D_PAGE,
"page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
(long)pga[i]->pg->flags, page_count(pga[i]->pg),
page_private(pga[i]->pg),
- (int)(pga[i]->off & ~CFS_PAGE_MASK));
+ (int)(pga[i]->off & ~PAGE_MASK));
nob -= pga[i]->count;
pg_count--;
i++;
}
- bufsize = 4;
+ bufsize = sizeof(cksum);
err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
-
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo
*/
@@ -1312,7 +1313,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
pg_prev = pga[0];
for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
struct brw_page *pg = pga[i];
- int poff = pg->off & ~CFS_PAGE_MASK;
+ int poff = pg->off & ~PAGE_MASK;
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
@@ -1658,6 +1659,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
aa->aa_resends++;
new_req->rq_interpret_reply = request->rq_interpret_reply;
new_req->rq_async_args = request->rq_async_args;
+ new_req->rq_commit_cb = request->rq_commit_cb;
/* cap resend delay to the current request timeout, this is similar to
* what ptlrpc does (see after_reply())
*/
@@ -1737,7 +1739,6 @@ static int brw_interpret(const struct lu_env *env,
struct osc_brw_async_args *aa = data;
struct osc_extent *ext;
struct osc_extent *tmp;
- struct cl_object *obj = NULL;
struct client_obd *cli = aa->aa_cli;
rc = osc_brw_fini_request(req, rc);
@@ -1766,24 +1767,17 @@ static int brw_interpret(const struct lu_env *env,
rc = -EIO;
}
- list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
- if (!obj && rc == 0) {
- obj = osc2cl(ext->oe_obj);
- cl_object_get(obj);
- }
-
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 1, rc);
- }
- LASSERT(list_empty(&aa->aa_exts));
- LASSERT(list_empty(&aa->aa_oaps));
-
- if (obj) {
+ if (rc == 0) {
struct obdo *oa = aa->aa_oa;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
unsigned long valid = 0;
+ struct cl_object *obj;
+ struct osc_async_page *last;
+
+ last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
+ obj = osc2cl(last->oap_obj);
- LASSERT(rc == 0);
+ cl_object_attr_lock(obj);
if (oa->o_valid & OBD_MD_FLBLOCKS) {
attr->cat_blocks = oa->o_blocks;
valid |= CAT_BLOCKS;
@@ -1800,21 +1794,45 @@ static int brw_interpret(const struct lu_env *env,
attr->cat_ctime = oa->o_ctime;
valid |= CAT_CTIME;
}
- if (valid != 0) {
- cl_object_attr_lock(obj);
- cl_object_attr_set(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
+
+ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
+ struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
+ loff_t last_off = last->oap_count + last->oap_obj_off;
+
+ /* Change file size if this is an out of quota or
+ * direct IO write and it extends the file size
+ */
+ if (loi->loi_lvb.lvb_size < last_off) {
+ attr->cat_size = last_off;
+ valid |= CAT_SIZE;
+ }
+ /* Extend KMS if it's not a lockless write */
+ if (loi->loi_kms < last_off &&
+ oap2osc_page(last)->ops_srvlock == 0) {
+ attr->cat_kms = last_off;
+ valid |= CAT_KMS;
+ }
}
- cl_object_put(env, obj);
+
+ if (valid != 0)
+ cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
}
kmem_cache_free(obdo_cachep, aa->aa_oa);
+ list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
+ list_del_init(&ext->oe_link);
+ osc_extent_finish(env, ext, 1, rc);
+ }
+ LASSERT(list_empty(&aa->aa_exts));
+ LASSERT(list_empty(&aa->aa_oaps));
+
cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
req->rq_bulk->bd_nob_transferred);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
* is called so we know whether to go to sync BRWs or wait for more
* RPCs to complete
@@ -1824,12 +1842,31 @@ static int brw_interpret(const struct lu_env *env,
else
cli->cl_r_in_flight--;
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug(env, cli, NULL);
return rc;
}
+static void brw_commit(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ /*
+ * If osc_inc_unstable_pages (via osc_extent_finish) races with
+ * this called via the rq_commit_cb, I need to ensure
+ * osc_dec_unstable_pages is still called. Otherwise unstable
+ * pages may be leaked.
+ */
+ if (req->rq_unstable) {
+ spin_unlock(&req->rq_lock);
+ osc_dec_unstable_pages(req);
+ spin_lock(&req->rq_lock);
+ } else {
+ req->rq_committed = 1;
+ }
+ spin_unlock(&req->rq_lock);
+}
+
/**
* Build an RPC by the list of extent @ext_list. The caller must ensure
* that the total pages in this list are NOT over max pages per RPC.
@@ -1920,7 +1957,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
pga[i] = &oap->oap_brw_page;
pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i]->pg, page_index(oap->oap_page), oap,
+ pga[i]->pg, oap->oap_page->index, oap,
pga[i]->flag);
i++;
cl_req_page_add(env, clerq, page);
@@ -1949,6 +1986,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
goto out;
}
+ req->rq_commit_cb = brw_commit;
req->rq_interpret_reply = brw_interpret;
if (mem_tight != 0)
@@ -1992,7 +2030,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
if (tmp)
tmp->oap_request = ptlrpc_request_addref(req);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
starting_offset >>= PAGE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
@@ -2007,7 +2045,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
starting_offset + 1);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
page_count, aa, cli->cl_r_in_flight,
@@ -2055,14 +2093,12 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
lock_res_and_lock(lock);
- spin_lock(&osc_ast_guard);
if (!lock->l_ast_data)
lock->l_ast_data = data;
if (lock->l_ast_data == data)
set = 1;
- spin_unlock(&osc_ast_guard);
unlock_res_and_lock(lock);
return set;
@@ -2104,36 +2140,38 @@ static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
return rc;
}
-static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
- obd_enqueue_update_f upcall, void *cookie,
- __u64 *flags, int agl, int rc)
+static int osc_enqueue_fini(struct ptlrpc_request *req,
+ osc_enqueue_upcall_f upcall, void *cookie,
+ struct lustre_handle *lockh, enum ldlm_mode mode,
+ __u64 *flags, int agl, int errcode)
{
- int intent = *flags & LDLM_FL_HAS_INTENT;
-
- if (intent) {
- /* The request was created before ldlm_cli_enqueue call. */
- if (rc == ELDLM_LOCK_ABORTED) {
- struct ldlm_reply *rep;
+ bool intent = *flags & LDLM_FL_HAS_INTENT;
+ int rc;
- rep = req_capsule_server_get(&req->rq_pill,
- &RMF_DLM_REP);
+ /* The request was created before ldlm_cli_enqueue call. */
+ if (intent && errcode == ELDLM_LOCK_ABORTED) {
+ struct ldlm_reply *rep;
- rep->lock_policy_res1 =
- ptlrpc_status_ntoh(rep->lock_policy_res1);
- if (rep->lock_policy_res1)
- rc = rep->lock_policy_res1;
- }
- }
+ rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
- (rc == 0)) {
+ rep->lock_policy_res1 =
+ ptlrpc_status_ntoh(rep->lock_policy_res1);
+ if (rep->lock_policy_res1)
+ errcode = rep->lock_policy_res1;
+ if (!agl)
+ *flags |= LDLM_FL_LVB_READY;
+ } else if (errcode == ELDLM_OK) {
*flags |= LDLM_FL_LVB_READY;
- CDEBUG(D_INODE, "got kms %llu blocks %llu mtime %llu\n",
- lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
}
/* Call the update callback. */
- rc = (*upcall)(cookie, rc);
+ rc = (*upcall)(cookie, lockh, errcode);
+ /* release the reference taken in ldlm_cli_enqueue() */
+ if (errcode == ELDLM_LOCK_MATCHED)
+ errcode = ELDLM_OK;
+ if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
+ ldlm_lock_decref(lockh, mode);
+
return rc;
}
@@ -2142,62 +2180,50 @@ static int osc_enqueue_interpret(const struct lu_env *env,
struct osc_enqueue_args *aa, int rc)
{
struct ldlm_lock *lock;
- struct lustre_handle handle;
- __u32 mode;
- struct ost_lvb *lvb;
- __u32 lvb_len;
- __u64 *flags = aa->oa_flags;
-
- /* Make a local copy of a lock handle and a mode, because aa->oa_*
- * might be freed anytime after lock upcall has been called.
- */
- lustre_handle_copy(&handle, aa->oa_lockh);
- mode = aa->oa_ei->ei_mode;
+ struct lustre_handle *lockh = &aa->oa_lockh;
+ enum ldlm_mode mode = aa->oa_mode;
+ struct ost_lvb *lvb = aa->oa_lvb;
+ __u32 lvb_len = sizeof(*lvb);
+ __u64 flags = 0;
+
/* ldlm_cli_enqueue is holding a reference on the lock, so it must
* be valid.
*/
- lock = ldlm_handle2lock(&handle);
+ lock = ldlm_handle2lock(lockh);
+ LASSERTF(lock, "lockh %llx, req %p, aa %p - client evicted?\n",
+ lockh->cookie, req, aa);
/* Take an additional reference so that a blocking AST that
* ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
* to arrive after an upcall has been executed by
* osc_enqueue_fini().
*/
- ldlm_lock_addref(&handle, mode);
+ ldlm_lock_addref(lockh, mode);
+
+ /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
/* Let CP AST to grant the lock first. */
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
- if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
- lvb = NULL;
- lvb_len = 0;
- } else {
- lvb = aa->oa_lvb;
- lvb_len = sizeof(*aa->oa_lvb);
+ if (aa->oa_agl) {
+ LASSERT(!aa->oa_lvb);
+ LASSERT(!aa->oa_flags);
+ aa->oa_flags = &flags;
}
/* Complete obtaining the lock procedure. */
- rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
- mode, flags, lvb, lvb_len, &handle, rc);
+ rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
+ aa->oa_mode, aa->oa_flags, lvb, lvb_len,
+ lockh, rc);
/* Complete osc stuff. */
- rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
- flags, aa->oa_agl, rc);
+ rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
+ aa->oa_flags, aa->oa_agl, rc);
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
- /* Release the lock for async request. */
- if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
- /*
- * Releases a reference taken by ldlm_cli_enqueue(), if it is
- * not already released by
- * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
- */
- ldlm_lock_decref(&handle, mode);
-
- LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n",
- aa->oa_lockh, req, aa);
- ldlm_lock_decref(&handle, mode);
+ ldlm_lock_decref(lockh, mode);
LDLM_LOCK_PUT(lock);
return rc;
}
@@ -2209,29 +2235,29 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
* other synchronous requests, however keeping some locks and trying to obtain
* others may take a considerable amount of time in a case of ost failure; and
* when other sync requests do not get released lock from a client, the client
- * is excluded from the cluster -- such scenarious make the life difficult, so
+ * is evicted from the cluster -- such scenaries make the life difficult, so
* release locks just after they are obtained.
*/
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
- obd_enqueue_update_f upcall, void *cookie,
+ osc_enqueue_upcall_f upcall, void *cookie,
struct ldlm_enqueue_info *einfo,
- struct lustre_handle *lockh,
struct ptlrpc_request_set *rqset, int async, int agl)
{
struct obd_device *obd = exp->exp_obd;
+ struct lustre_handle lockh = { 0 };
struct ptlrpc_request *req = NULL;
int intent = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
+ __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
enum ldlm_mode mode;
int rc;
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother.
*/
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/*
* kms is not valid when either object is completely fresh (so that no
@@ -2259,64 +2285,46 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
if (einfo->ei_mode == LCK_PR)
mode |= LCK_PW;
mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
- einfo->ei_type, policy, mode, lockh, 0);
+ einfo->ei_type, policy, mode, &lockh, 0);
if (mode) {
- struct ldlm_lock *matched = ldlm_handle2lock(lockh);
+ struct ldlm_lock *matched;
- if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
- /* For AGL, if enqueue RPC is sent but the lock is not
- * granted, then skip to process this strpe.
- * Return -ECANCELED to tell the caller.
+ if (*flags & LDLM_FL_TEST_LOCK)
+ return ELDLM_OK;
+
+ matched = ldlm_handle2lock(&lockh);
+ if (agl) {
+ /* AGL enqueues DLM locks speculatively. Therefore if
+ * it already exists a DLM lock, it wll just inform the
+ * caller to cancel the AGL process for this stripe.
*/
- ldlm_lock_decref(lockh, mode);
+ ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return -ECANCELED;
- }
-
- if (osc_set_lock_data_with_check(matched, einfo)) {
+ } else if (osc_set_lock_data_with_check(matched, einfo)) {
*flags |= LDLM_FL_LVB_READY;
- /* addref the lock only if not async requests and PW
- * lock is matched whereas we asked for PR.
- */
- if (!rqset && einfo->ei_mode != mode)
- ldlm_lock_addref(lockh, LCK_PR);
- if (intent) {
- /* I would like to be able to ASSERT here that
- * rss <= kms, but I can't, for reasons which
- * are explained in lov_enqueue()
- */
- }
-
- /* We already have a lock, and it's referenced.
- *
- * At this point, the cl_lock::cll_state is CLS_QUEUING,
- * AGL upcall may change it to CLS_HELD directly.
- */
- (*upcall)(cookie, ELDLM_OK);
+ /* We already have a lock, and it's referenced. */
+ (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
- if (einfo->ei_mode != mode)
- ldlm_lock_decref(lockh, LCK_PW);
- else if (rqset)
- /* For async requests, decref the lock. */
- ldlm_lock_decref(lockh, einfo->ei_mode);
+ ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return ELDLM_OK;
+ } else {
+ ldlm_lock_decref(&lockh, mode);
+ LDLM_LOCK_PUT(matched);
}
-
- ldlm_lock_decref(lockh, mode);
- LDLM_LOCK_PUT(matched);
}
- no_match:
+no_match:
+ if (*flags & LDLM_FL_TEST_LOCK)
+ return -ENOLCK;
if (intent) {
- LIST_HEAD(cancels);
-
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_ENQUEUE_LVB);
if (!req)
return -ENOMEM;
- rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
return rc;
@@ -2331,21 +2339,31 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
*flags &= ~LDLM_FL_BLOCK_GRANTED;
rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
- sizeof(*lvb), LVB_T_OST, lockh, async);
- if (rqset) {
+ sizeof(*lvb), LVB_T_OST, &lockh, async);
+ if (async) {
if (!rc) {
struct osc_enqueue_args *aa;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- aa->oa_ei = einfo;
aa->oa_exp = exp;
- aa->oa_flags = flags;
+ aa->oa_mode = einfo->ei_mode;
+ aa->oa_type = einfo->ei_type;
+ lustre_handle_copy(&aa->oa_lockh, &lockh);
aa->oa_upcall = upcall;
aa->oa_cookie = cookie;
- aa->oa_lvb = lvb;
- aa->oa_lockh = lockh;
aa->oa_agl = !!agl;
+ if (!agl) {
+ aa->oa_flags = flags;
+ aa->oa_lvb = lvb;
+ } else {
+ /* AGL is essentially to enqueue an DLM lock
+ * in advance, so we don't care about the
+ * result of AGL enqueue.
+ */
+ aa->oa_lvb = NULL;
+ aa->oa_flags = NULL;
+ }
req->rq_interpret_reply =
(ptlrpc_interpterer_t)osc_enqueue_interpret;
@@ -2359,7 +2377,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
return rc;
}
- rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
+ rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
+ flags, agl, rc);
if (intent)
ptlrpc_req_finished(req);
@@ -2381,8 +2400,8 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother
*/
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/* Next, search for already existing extent locks that will cover us */
/* If we're trying to read, we also search for an existing PW lock. The
@@ -2493,7 +2512,7 @@ static int osc_statfs_async(struct obd_export *exp,
}
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
@@ -2787,7 +2806,7 @@ out:
goto skip_locking;
policy.l_extent.start = fm_key->fiemap.fm_start &
- CFS_PAGE_MASK;
+ PAGE_MASK;
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
fm_key->fiemap.fm_start + PAGE_SIZE - 1)
@@ -2795,7 +2814,7 @@ out:
else
policy.l_extent.end = (fm_key->fiemap.fm_start +
fm_key->fiemap.fm_length +
- PAGE_SIZE - 1) & CFS_PAGE_MASK;
+ PAGE_SIZE - 1) & PAGE_MASK;
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
@@ -2913,7 +2932,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
int target = *(int *)val;
- nr = osc_lru_shrink(cli, min(nr, target));
+ nr = osc_lru_shrink(env, cli, min(nr, target), true);
*(int *)val -= nr;
return 0;
}
@@ -2992,12 +3011,12 @@ static int osc_reconnect(const struct lu_env *env,
if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
long lost_grant;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
2 * cli_brw_size(obd);
lost_grant = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
data->ocd_connect_flags,
@@ -3047,10 +3066,10 @@ static int osc_import_event(struct obd_device *obd,
switch (event) {
case IMP_EVENT_DISCON: {
cli = &obd->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_avail_grant = 0;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
break;
}
case IMP_EVENT_INACTIVE: {
@@ -3073,8 +3092,9 @@ static int osc_import_event(struct obd_device *obd,
ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
cl_env_put(env, &refcheck);
- } else
+ } else {
rc = PTR_ERR(env);
+ }
break;
}
case IMP_EVENT_ACTIVE: {
@@ -3116,20 +3136,14 @@ static int osc_import_event(struct obd_device *obd,
* \retval zero the lock can't be canceled
* \retval other ok to cancel
*/
-static int osc_cancel_for_recovery(struct ldlm_lock *lock)
+static int osc_cancel_weight(struct ldlm_lock *lock)
{
- check_res_locked(lock->l_resource);
-
/*
- * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
- *
- * XXX as a future improvement, we can also cancel unused write lock
- * if it doesn't have dirty data and active mmaps.
+ * Cancel all unused and granted extent lock.
*/
if (lock->l_resource->lr_type == LDLM_EXTENT &&
- (lock->l_granted_mode == LCK_PR ||
- lock->l_granted_mode == LCK_CR) &&
- (osc_dlm_lock_pageref(lock) == 0))
+ lock->l_granted_mode == lock->l_req_mode &&
+ osc_ldlm_weigh_ast(lock) == 0)
return 1;
return 0;
@@ -3170,6 +3184,14 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
cli->cl_writeback_work = handler;
+ handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
+ if (IS_ERR(handler)) {
+ rc = PTR_ERR(handler);
+ goto out_ptlrpcd_work;
+ }
+
+ cli->cl_lru_work = handler;
+
rc = osc_quota_setup(obd);
if (rc)
goto out_ptlrpcd_work;
@@ -3198,11 +3220,18 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
- ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
+ ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
return rc;
out_ptlrpcd_work:
- ptlrpcd_destroy_work(handler);
+ if (cli->cl_writeback_work) {
+ ptlrpcd_destroy_work(cli->cl_writeback_work);
+ cli->cl_writeback_work = NULL;
+ }
+ if (cli->cl_lru_work) {
+ ptlrpcd_destroy_work(cli->cl_lru_work);
+ cli->cl_lru_work = NULL;
+ }
out_client_setup:
client_obd_cleanup(obd);
out_ptlrpcd:
@@ -3241,6 +3270,10 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
ptlrpcd_destroy_work(cli->cl_writeback_work);
cli->cl_writeback_work = NULL;
}
+ if (cli->cl_lru_work) {
+ ptlrpcd_destroy_work(cli->cl_lru_work);
+ cli->cl_lru_work = NULL;
+ }
obd_cleanup_client_import(obd);
ptlrpc_lprocfs_unregister_obd(obd);
lprocfs_obd_cleanup(obd);
@@ -3330,7 +3363,6 @@ static struct obd_ops osc_obd_ops = {
};
extern struct lu_kmem_descr osc_caches[];
-extern spinlock_t osc_ast_guard;
extern struct lock_class_key osc_ast_guard_class;
static int __init osc_init(void)
@@ -3357,9 +3389,6 @@ static int __init osc_init(void)
if (rc)
goto out_kmem;
- spin_lock_init(&osc_ast_guard);
- lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
-
/* This is obviously too much memory, only prevent overflow here */
if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
rc = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index cf3ac8eee..4b7912a2c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -595,9 +595,9 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
struct obd_import *imp = request->rq_import;
int rc;
- if (unlikely(ctx))
+ if (unlikely(ctx)) {
request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
- else {
+ } else {
rc = sptlrpc_req_get_ctx(request);
if (rc)
goto out_free;
@@ -1082,7 +1082,6 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req)
*/
if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
(opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
-
/* Suppress timed out reconnect requests */
if (req->rq_timedout)
return 0;
@@ -2087,7 +2086,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
set, timeout);
- if (timeout == 0 && !cfs_signal_pending())
+ if (timeout == 0 && !signal_pending(current))
/*
* No requests are in-flight (ether timed out
* or delayed), so we can allow interrupts.
@@ -2114,7 +2113,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
* it being ignored forever
*/
if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
+ signal_pending(current)) {
sigset_t blocked_sigs =
cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
@@ -2124,7 +2123,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
* important signals since ptlrpc set is not easily
* reentrant from userspace again
*/
- if (cfs_signal_pending())
+ if (signal_pending(current))
ptlrpc_interrupted_set(set);
cfs_restore_sigs(blocked_sigs);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 47be21ac9..fdcde9bbd 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -69,7 +69,6 @@ void request_out_callback(lnet_event_t *ev)
req->rq_req_unlink = 0;
if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
-
/* Failed send: make it seem like the reply timed out, just
* like failing sends in client.c does currently...
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index cd94fed0f..a4f7544f4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1001,6 +1001,7 @@ finish:
return 0;
}
} else {
+ static bool warned;
spin_lock(&imp->imp_lock);
list_del(&imp->imp_conn_current->oic_item);
@@ -1021,7 +1022,7 @@ finish:
goto out;
}
- if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
+ if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
(ocd->ocd_version > LUSTRE_VERSION_CODE +
LUSTRE_VERSION_OFFSET_WARN ||
ocd->ocd_version < LUSTRE_VERSION_CODE -
@@ -1029,10 +1030,8 @@ finish:
/* Sigh, some compilers do not like #ifdef in the middle
* of macro arguments
*/
- const char *older = "older. Consider upgrading server or downgrading client"
- ;
- const char *newer = "newer than client version. Consider upgrading client"
- ;
+ const char *older = "older than client. Consider upgrading server";
+ const char *newer = "newer than client. Consider recompiling application";
LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
obd2cli_tgt(imp->imp_obd),
@@ -1042,6 +1041,7 @@ finish:
OBD_OCD_VERSION_FIX(ocd->ocd_version),
ocd->ocd_version > LUSTRE_VERSION_CODE ?
newer : older, LUSTRE_VERSION_STRING);
+ warned = true;
}
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
@@ -1370,7 +1370,6 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
if (rc)
goto out;
}
-
}
if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
@@ -1453,7 +1452,6 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
!ptlrpc_import_in_recovery(imp), &lwi);
-
}
spin_lock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 5b06901e5..c0ecd1625 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -160,6 +160,16 @@ static const struct req_msg_field *fld_query_server[] = {
&RMF_FLD_MDFLD
};
+static const struct req_msg_field *fld_read_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_FLD_MDFLD
+};
+
+static const struct req_msg_field *fld_read_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GENERIC_DATA
+};
+
static const struct req_msg_field *mds_getattr_name_client[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_BODY,
@@ -566,7 +576,7 @@ static const struct req_msg_field *ost_get_info_generic_server[] = {
static const struct req_msg_field *ost_get_info_generic_client[] = {
&RMF_PTLRPC_BODY,
- &RMF_SETINFO_KEY
+ &RMF_GETINFO_KEY
};
static const struct req_msg_field *ost_get_last_id_server[] = {
@@ -574,6 +584,12 @@ static const struct req_msg_field *ost_get_last_id_server[] = {
&RMF_OBD_ID
};
+static const struct req_msg_field *ost_get_last_fid_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GETINFO_KEY,
+ &RMF_FID,
+};
+
static const struct req_msg_field *ost_get_last_fid_server[] = {
&RMF_PTLRPC_BODY,
&RMF_FID,
@@ -643,6 +659,7 @@ static struct req_format *req_formats[] = {
&RQF_MGS_CONFIG_READ,
&RQF_SEQ_QUERY,
&RQF_FLD_QUERY,
+ &RQF_FLD_READ,
&RQF_MDS_CONNECT,
&RQF_MDS_DISCONNECT,
&RQF_MDS_GET_INFO,
@@ -696,7 +713,7 @@ static struct req_format *req_formats[] = {
&RQF_OST_BRW_WRITE,
&RQF_OST_STATFS,
&RQF_OST_SET_GRANT_INFO,
- &RQF_OST_GET_INFO_GENERIC,
+ &RQF_OST_GET_INFO,
&RQF_OST_GET_INFO_LAST_ID,
&RQF_OST_GET_INFO_LAST_FID,
&RQF_OST_SET_INFO_LAST_FID,
@@ -1162,6 +1179,10 @@ struct req_format RQF_FLD_QUERY =
DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server);
EXPORT_SYMBOL(RQF_FLD_QUERY);
+struct req_format RQF_FLD_READ =
+ DEFINE_REQ_FMT0("FLD_READ", fld_read_client, fld_read_server);
+EXPORT_SYMBOL(RQF_FLD_READ);
+
struct req_format RQF_LOG_CANCEL =
DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
EXPORT_SYMBOL(RQF_LOG_CANCEL);
@@ -1519,10 +1540,10 @@ struct req_format RQF_OST_SET_GRANT_INFO =
ost_body_only);
EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
-struct req_format RQF_OST_GET_INFO_GENERIC =
+struct req_format RQF_OST_GET_INFO =
DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
ost_get_info_generic_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC);
+EXPORT_SYMBOL(RQF_OST_GET_INFO);
struct req_format RQF_OST_GET_INFO_LAST_ID =
DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
@@ -1530,7 +1551,7 @@ struct req_format RQF_OST_GET_INFO_LAST_ID =
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
struct req_format RQF_OST_GET_INFO_LAST_FID =
- DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client,
+ DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", ost_get_last_fid_client,
ost_get_last_fid_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index c95a91ce2..64c0f1e17 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -131,6 +131,7 @@ static struct ll_rpc_opcode {
{ SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
{ SEC_CTX_FINI, "sec_ctx_fini" },
{ FLD_QUERY, "fld_query" },
+ { FLD_READ, "fld_read" },
};
static struct ll_eopcode {
@@ -679,11 +680,11 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
/**
* The second token is either NULL, or an optional [reg|hp] string
*/
- if (strcmp(cmd, "reg") == 0)
+ if (strcmp(cmd, "reg") == 0) {
queue = PTLRPC_NRS_QUEUE_REG;
- else if (strcmp(cmd, "hp") == 0)
+ } else if (strcmp(cmd, "hp") == 0) {
queue = PTLRPC_NRS_QUEUE_HP;
- else {
+ } else {
rc = -EINVAL;
goto out;
}
@@ -693,8 +694,9 @@ default_queue:
if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
rc = -ENODEV;
goto out;
- } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
+ } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) {
queue = PTLRPC_NRS_QUEUE_REG;
+ }
/**
* Serialize NRS core lprocfs operations with policy registration/
@@ -1320,6 +1322,5 @@ int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
up_read(&obd->u.cli.cl_sem);
return count;
-
}
EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 710fb806f..c444f5168 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -975,7 +975,11 @@ static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt)
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
again:
- nrs = nrs_svcpt2nrs(svcpt, hp);
+ /* scp_nrs_hp could be NULL due to short of memory. */
+ nrs = hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg;
+ /* check the nrs_svcpt to see if nrs is initialized. */
+ if (!nrs || !nrs->nrs_svcpt)
+ return;
nrs->nrs_stopping = 1;
list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) {
@@ -1038,7 +1042,6 @@ static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc)
LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
-
if (!nrs_policy_compatible(svc, desc) ||
unlikely(svc->srv_is_stopping))
continue;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 492d63fad..811acf6fc 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -1160,7 +1160,6 @@ __u32 lustre_msg_get_timeout(struct lustre_msg *msg)
if (!pb) {
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
return 0;
-
}
return pb->pb_timeout;
}
@@ -1179,7 +1178,6 @@ __u32 lustre_msg_get_service_time(struct lustre_msg *msg)
if (!pb) {
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
return 0;
-
}
return pb->pb_service_time;
}
@@ -1572,7 +1570,6 @@ static void lustre_swab_obdo(struct obdo *o)
CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
-
}
void lustre_swab_obd_statfs(struct obd_statfs *os)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index db003f5da..76a355a9d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -387,7 +387,8 @@ static int ptlrpcd(void *arg)
{
struct ptlrpcd_ctl *pc = arg;
struct ptlrpc_request_set *set;
- struct lu_env env = { .le_ses = NULL };
+ struct lu_context ses = { 0 };
+ struct lu_env env = { .le_ses = &ses };
int rc = 0;
int exit = 0;
@@ -416,6 +417,13 @@ static int ptlrpcd(void *arg)
*/
rc = lu_context_init(&env.le_ctx,
LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
+ if (rc == 0) {
+ rc = lu_context_init(env.le_ses,
+ LCT_SESSION | LCT_REMEMBER | LCT_NOREF);
+ if (rc != 0)
+ lu_context_fini(&env.le_ctx);
+ }
+
if (rc != 0)
goto failed;
@@ -436,9 +444,10 @@ static int ptlrpcd(void *arg)
ptlrpc_expired_set, set);
lu_context_enter(&env.le_ctx);
- l_wait_event(set->set_waitq,
- ptlrpcd_check(&env, pc), &lwi);
+ lu_context_enter(env.le_ses);
+ l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
lu_context_exit(&env.le_ctx);
+ lu_context_exit(env.le_ses);
/*
* Abort inflight rpcs for forced stop case.
@@ -461,6 +470,7 @@ static int ptlrpcd(void *arg)
if (!list_empty(&set->set_requests))
ptlrpc_set_wait(set);
lu_context_fini(&env.le_ctx);
+ lu_context_fini(env.le_ses);
complete(&pc->pc_finishing);
@@ -899,8 +909,11 @@ int ptlrpcd_addref(void)
int rc = 0;
mutex_lock(&ptlrpcd_mutex);
- if (++ptlrpcd_users == 1)
+ if (++ptlrpcd_users == 1) {
rc = ptlrpcd_init();
+ if (rc < 0)
+ ptlrpcd_users--;
+ }
mutex_unlock(&ptlrpcd_mutex);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index d3872b8c9..02e6cda4c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -41,7 +41,6 @@
#define DEBUG_SUBSYSTEM S_SEC
#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/crypto.h>
#include "../include/obd.h"
#include "../include/obd_cksum.h"
@@ -511,7 +510,6 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
{
struct cfs_crypto_hash_desc *hdesc;
int hashsize;
- char hashbuf[64];
unsigned int bufsize;
int i, err;
@@ -529,21 +527,23 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
+ desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
desc->bd_iov[i].kiov_len);
}
+
if (hashsize > buflen) {
+ unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
+
bufsize = sizeof(hashbuf);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
- &bufsize);
+ LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
+ bufsize, hashsize);
+ err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize);
memcpy(buf, hashbuf, buflen);
} else {
bufsize = buflen;
err = cfs_crypto_hash_final(hdesc, buf, &bufsize);
}
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
return err;
}
EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 6276bf59c..37c9f4c45 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -162,7 +162,7 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
continue;
ptr = kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
kunmap(desc->bd_iov[i].kiov_page);
return;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 1bbd1d39c..17c7b9749 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -838,6 +838,11 @@ static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
{
ptlrpc_server_hpreq_fini(req);
+ if (req->rq_session.lc_thread) {
+ lu_context_exit(&req->rq_session);
+ lu_context_fini(&req->rq_session);
+ }
+
ptlrpc_server_drop_request(req);
}
@@ -1579,6 +1584,21 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
}
req->rq_svc_thread = thread;
+ if (thread) {
+ /* initialize request session, it is needed for request
+ * processing by target
+ */
+ rc = lu_context_init(&req->rq_session,
+ LCT_SERVER_SESSION | LCT_NOREF);
+ if (rc) {
+ CERROR("%s: failure to initialize session: rc = %d\n",
+ thread->t_name, rc);
+ goto err_req;
+ }
+ req->rq_session.lc_thread = thread;
+ lu_context_enter(&req->rq_session);
+ req->rq_svc_thread->t_env->le_ses = &req->rq_session;
+ }
ptlrpc_at_add_timed(req);
@@ -1612,7 +1632,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
struct timespec64 arrived;
unsigned long timediff_usecs;
unsigned long arrived_usecs;
- int rc;
int fail_opc = 0;
request = ptlrpc_server_request_get(svcpt, false);
@@ -1649,21 +1668,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
at_get(&svcpt->scp_at_estimate));
}
- rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
- if (rc) {
- CERROR("Failure to initialize session: %d\n", rc);
- goto out_req;
- }
- request->rq_session.lc_thread = thread;
- request->rq_session.lc_cookie = 0x5;
- lu_context_enter(&request->rq_session);
-
- CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
-
- request->rq_svc_thread = thread;
- if (thread)
- request->rq_svc_thread->t_env->le_ses = &request->rq_session;
-
if (likely(request->rq_export)) {
if (unlikely(ptlrpc_check_req(request)))
goto put_conn;
@@ -1695,14 +1699,21 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
- rc = svc->srv_ops.so_req_handler(request);
+ CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
+
+ /* re-assign request and sesson thread to the current one */
+ request->rq_svc_thread = thread;
+ if (thread) {
+ LASSERT(request->rq_session.lc_thread);
+ request->rq_session.lc_thread = thread;
+ request->rq_session.lc_cookie = 0x55;
+ thread->t_env->le_ses = &request->rq_session;
+ }
+ svc->srv_ops.so_req_handler(request);
ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
put_conn:
- lu_context_exit(&request->rq_session);
- lu_context_fini(&request->rq_session);
-
if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
DEBUG_REQ(D_WARNING, request,
"Request took longer than estimated (%lld:%llds); "
@@ -1756,7 +1767,6 @@ put_conn:
request->rq_arrival_time.tv_sec);
}
-out_req:
ptlrpc_server_finish_active_request(svcpt, request);
return 1;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 3ffd2d91f..aacc81083 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -276,7 +276,9 @@ void lustre_assert_wire_constants(void)
(long long)FLD_QUERY);
LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n",
(long long)FLD_FIRST_OPC);
- LASSERTF(FLD_LAST_OPC == 901, "found %lld\n",
+ LASSERTF(FLD_READ == 901, "found %lld\n",
+ (long long)FLD_READ);
+ LASSERTF(FLD_LAST_OPC == 902, "found %lld\n",
(long long)FLD_LAST_OPC);
LASSERTF(SEQ_QUERY == 700, "found %lld\n",
(long long)SEQ_QUERY);
@@ -1069,6 +1071,8 @@ void lustre_assert_wire_constants(void)
OBD_CONNECT_PINGLESS);
LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL,
"found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
+ LASSERTF(OBD_CONNECT_OPEN_BY_FID == 0x20000000000000ULL,
+ "found 0x%.16llxULL\n", OBD_CONNECT_OPEN_BY_FID);
LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
(unsigned)OBD_CKSUM_CRC32);
LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
@@ -1639,6 +1643,12 @@ void lustre_assert_wire_constants(void)
OBD_BRW_ASYNC);
LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n",
OBD_BRW_MEMALLOC);
+ LASSERTF(OBD_BRW_OVER_USRQUOTA == 0x1000, "found 0x%.8x\n",
+ OBD_BRW_OVER_USRQUOTA);
+ LASSERTF(OBD_BRW_OVER_GRPQUOTA == 0x2000, "found 0x%.8x\n",
+ OBD_BRW_OVER_GRPQUOTA);
+ LASSERTF(OBD_BRW_SOFT_SYNC == 0x4000, "found 0x%.8x\n",
+ OBD_BRW_SOFT_SYNC);
/* Checks for struct ost_body */
LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n",
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index 0078b6a92..de7e9f52e 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -37,6 +37,8 @@ source "drivers/staging/media/omap4iss/Kconfig"
source "drivers/staging/media/timb/Kconfig"
+source "drivers/staging/media/tw686x-kh/Kconfig"
+
# Keep LIRC at the end, as it has sub-menus
source "drivers/staging/media/lirc/Kconfig"
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 91495882a..60a35b3a4 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_VIDEO_OMAP1) += omap1/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_DVB_MN88472) += mn88472/
obj-$(CONFIG_VIDEO_TIMBERDALE) += timb/
+obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index abf330f92..8dade197f 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -308,7 +308,7 @@ module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr,
"Minor number for radio device (-1 ==> auto assign)");
-static struct region_info region_configs[] = {
+static const struct region_info region_configs[] = {
/* USA */
{
.channel_spacing = 20,
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index be72a8e5f..ea3ddec75 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -154,7 +154,7 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
while ((entity = media_entity_graph_walk_next(&graph))) {
if (entity == &video->video_dev.entity)
continue;
- if (!is_media_entity_v4l2_io(entity))
+ if (!is_media_entity_v4l2_video_device(entity))
continue;
far_end = to_vpfe_video(media_entity_to_video_device(entity));
if (far_end->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
diff --git a/drivers/staging/media/omap1/omap1_camera.c b/drivers/staging/media/omap1/omap1_camera.c
index bd721e354..54b8dd2d2 100644
--- a/drivers/staging/media/omap1/omap1_camera.c
+++ b/drivers/staging/media/omap1/omap1_camera.c
@@ -1569,27 +1569,21 @@ static int omap1_cam_probe(struct platform_device *pdev)
unsigned int irq;
int err = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || (int)irq <= 0) {
+ if ((int)irq <= 0) {
err = -ENODEV;
goto exit;
}
- clk = clk_get(&pdev->dev, "armper_ck");
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- goto exit;
- }
+ clk = devm_clk_get(&pdev->dev, "armper_ck");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
- pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL);
- if (!pcdev) {
- dev_err(&pdev->dev, "Could not allocate pcdev\n");
- err = -ENOMEM;
- goto exit_put_clk;
- }
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev) + resource_size(res),
+ GFP_KERNEL);
+ if (!pcdev)
+ return -ENOMEM;
- pcdev->res = res;
pcdev->clk = clk;
pcdev->pdata = pdev->dev.platform_data;
@@ -1620,19 +1614,11 @@ static int omap1_cam_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&pcdev->capture);
spin_lock_init(&pcdev->lock);
- /*
- * Request the region.
- */
- if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
- err = -EBUSY;
- goto exit_kfree;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- err = -ENOMEM;
- goto exit_release;
- }
pcdev->irq = irq;
pcdev->base = base;
@@ -1642,8 +1628,7 @@ static int omap1_cam_probe(struct platform_device *pdev)
dma_isr, (void *)pcdev, &pcdev->dma_ch);
if (err < 0) {
dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
- err = -EBUSY;
- goto exit_iounmap;
+ return -EBUSY;
}
dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);
@@ -1655,7 +1640,8 @@ static int omap1_cam_probe(struct platform_device *pdev)
/* setup DMA autoinitialization */
omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);
- err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev);
+ err = devm_request_irq(&pdev->dev, pcdev->irq, cam_isr, 0, DRIVER_NAME,
+ pcdev);
if (err) {
dev_err(&pdev->dev, "Camera interrupt register failed\n");
goto exit_free_dma;
@@ -1669,24 +1655,14 @@ static int omap1_cam_probe(struct platform_device *pdev)
err = soc_camera_host_register(&pcdev->soc_host);
if (err)
- goto exit_free_irq;
+ return err;
dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");
return 0;
-exit_free_irq:
- free_irq(pcdev->irq, pcdev);
exit_free_dma:
omap_free_dma(pcdev->dma_ch);
-exit_iounmap:
- iounmap(base);
-exit_release:
- release_mem_region(res->start, resource_size(res));
-exit_kfree:
- kfree(pcdev);
-exit_put_clk:
- clk_put(clk);
exit:
return err;
}
@@ -1696,23 +1672,11 @@ static int omap1_cam_remove(struct platform_device *pdev)
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct omap1_cam_dev *pcdev = container_of(soc_host,
struct omap1_cam_dev, soc_host);
- struct resource *res;
-
- free_irq(pcdev->irq, pcdev);
omap_free_dma(pcdev->dma_ch);
soc_camera_host_unregister(soc_host);
- iounmap(pcdev->base);
-
- res = pcdev->res;
- release_mem_region(res->start, resource_size(res));
-
- clk_put(pcdev->clk);
-
- kfree(pcdev);
-
dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n");
return 0;
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index c5a5138b3..6ceb4eb00 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1065,7 +1065,7 @@ static int iss_register_entities(struct iss_device *iss)
}
ret = media_create_pad_link(&sensor->entity, 0, input, pad,
- flags);
+ flags);
if (ret < 0)
goto done;
}
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index f54349bce..cf8da2355 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -223,7 +223,7 @@ iss_video_far_end(struct iss_video *video)
if (entity == &video->video.entity)
continue;
- if (!is_media_entity_v4l2_io(entity))
+ if (!is_media_entity_v4l2_video_device(entity))
continue;
far_end = to_iss_video(media_entity_to_video_device(entity));
diff --git a/drivers/staging/media/tw686x-kh/Kconfig b/drivers/staging/media/tw686x-kh/Kconfig
new file mode 100644
index 000000000..6264d30ed
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/Kconfig
@@ -0,0 +1,17 @@
+config VIDEO_TW686X_KH
+ tristate "Intersil/Techwell TW686x Video For Linux"
+ depends on VIDEO_DEV && PCI && VIDEO_V4L2
+ depends on !(VIDEO_TW686X=y || VIDEO_TW686X=m) || COMPILE_TEST
+ select VIDEOBUF2_DMA_SG
+ help
+ Support for Intersil/Techwell TW686x-based frame grabber cards.
+
+ Currently supported chips:
+ - TW6864 (4 video channels),
+ - TW6865 (4 video channels, not tested, second generation chip),
+ - TW6868 (8 video channels but only 4 first channels using
+ built-in video decoder are supported, not tested),
+ - TW6869 (8 video channels, second generation chip).
+
+ To compile this driver as a module, choose M here: the module
+ will be named tw686x-kh.
diff --git a/drivers/staging/media/tw686x-kh/Makefile b/drivers/staging/media/tw686x-kh/Makefile
new file mode 100644
index 000000000..2a36a38cf
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/Makefile
@@ -0,0 +1,3 @@
+tw686x-kh-objs := tw686x-kh-core.o tw686x-kh-video.o
+
+obj-$(CONFIG_VIDEO_TW686X_KH) += tw686x-kh.o
diff --git a/drivers/staging/media/tw686x-kh/TODO b/drivers/staging/media/tw686x-kh/TODO
new file mode 100644
index 000000000..480a495b1
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/TODO
@@ -0,0 +1,6 @@
+TODO:
+
+- implement V4L2_FIELD_INTERLACED* mode(s).
+- add audio support
+
+Please Cc: patches to Krzysztof Halasa <khalasa@piap.pl>.
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-core.c b/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
new file mode 100644
index 000000000..03b3b62c5
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-core.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "tw686x-kh.h"
+#include "tw686x-kh-regs.h"
+
+static irqreturn_t tw686x_irq(int irq, void *dev_id)
+{
+ struct tw686x_dev *dev = (struct tw686x_dev *)dev_id;
+ u32 int_status = reg_read(dev, INT_STATUS); /* cleared on read */
+ unsigned long flags;
+ unsigned int handled = 0;
+
+ if (int_status) {
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ dev->dma_requests |= int_status;
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ if (int_status & 0xFF0000FF)
+ handled = tw686x_kh_video_irq(dev);
+ }
+
+ return IRQ_RETVAL(handled);
+}
+
+static int tw686x_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *pci_id)
+{
+ struct tw686x_dev *dev;
+ int err;
+
+ dev = devm_kzalloc(&pci_dev->dev, sizeof(*dev) +
+ (pci_id->driver_data & TYPE_MAX_CHANNELS) *
+ sizeof(dev->video_channels[0]), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ sprintf(dev->name, "TW%04X", pci_dev->device);
+ dev->type = pci_id->driver_data;
+
+ pr_info("%s: PCI %s, IRQ %d, MMIO 0x%lx\n", dev->name,
+ pci_name(pci_dev), pci_dev->irq,
+ (unsigned long)pci_resource_start(pci_dev, 0));
+
+ dev->pci_dev = pci_dev;
+ if (pcim_enable_device(pci_dev))
+ return -EIO;
+
+ pci_set_master(pci_dev);
+
+ if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ pr_err("%s: 32-bit PCI DMA not supported\n", dev->name);
+ return -EIO;
+ }
+
+ err = pci_request_regions(pci_dev, dev->name);
+ if (err < 0) {
+ pr_err("%s: Unable to get MMIO region\n", dev->name);
+ return err;
+ }
+
+ dev->mmio = pci_ioremap_bar(pci_dev, 0);
+ if (!dev->mmio) {
+ pr_err("%s: Unable to remap MMIO region\n", dev->name);
+ return -EIO;
+ }
+
+ reg_write(dev, SYS_SOFT_RST, 0x0F); /* Reset all subsystems */
+ mdelay(1);
+
+ reg_write(dev, SRST[0], 0x3F);
+ if (max_channels(dev) > 4)
+ reg_write(dev, SRST[1], 0x3F);
+ reg_write(dev, DMA_CMD, 0);
+ reg_write(dev, DMA_CHANNEL_ENABLE, 0);
+ reg_write(dev, DMA_CHANNEL_TIMEOUT, 0x3EFF0FF0);
+ reg_write(dev, DMA_TIMER_INTERVAL, 0x38000);
+ reg_write(dev, DMA_CONFIG, 0xFFFFFF04);
+
+ spin_lock_init(&dev->irq_lock);
+
+ err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw686x_irq,
+ IRQF_SHARED, dev->name, dev);
+ if (err < 0) {
+ pr_err("%s: Unable to get IRQ\n", dev->name);
+ return err;
+ }
+
+ err = tw686x_kh_video_init(dev);
+ if (err)
+ return err;
+
+ pci_set_drvdata(pci_dev, dev);
+ return 0;
+}
+
+static void tw686x_remove(struct pci_dev *pci_dev)
+{
+ struct tw686x_dev *dev = pci_get_drvdata(pci_dev);
+
+ tw686x_kh_video_free(dev);
+}
+
+/* driver_data is number of A/V channels */
+static const struct pci_device_id tw686x_pci_tbl[] = {
+ {PCI_DEVICE(0x1797, 0x6864), .driver_data = 4},
+ /* not tested */
+ {PCI_DEVICE(0x1797, 0x6865), .driver_data = 4 | TYPE_SECOND_GEN},
+ /* TW6868 supports 8 A/V channels with an external TW2865 chip -
+ not supported by the driver */
+ {PCI_DEVICE(0x1797, 0x6868), .driver_data = 4}, /* not tested */
+ {PCI_DEVICE(0x1797, 0x6869), .driver_data = 8 | TYPE_SECOND_GEN},
+ {}
+};
+
+static struct pci_driver tw686x_pci_driver = {
+ .name = "tw686x-kh",
+ .id_table = tw686x_pci_tbl,
+ .probe = tw686x_probe,
+ .remove = tw686x_remove,
+};
+
+MODULE_DESCRIPTION("Driver for video frame grabber cards based on Intersil/Techwell TW686[4589]");
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, tw686x_pci_tbl);
+module_pci_driver(tw686x_pci_driver);
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h b/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
new file mode 100644
index 000000000..53e1889ba
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-regs.h
@@ -0,0 +1,103 @@
+/* DMA controller registers */
+#define REG8_1(a0) ((const u16[8]) {a0, a0 + 1, a0 + 2, a0 + 3, \
+ a0 + 4, a0 + 5, a0 + 6, a0 + 7})
+#define REG8_2(a0) ((const u16[8]) {a0, a0 + 2, a0 + 4, a0 + 6, \
+ a0 + 8, a0 + 0xA, a0 + 0xC, a0 + 0xE})
+#define REG8_8(a0) ((const u16[8]) {a0, a0 + 8, a0 + 0x10, a0 + 0x18, \
+ a0 + 0x20, a0 + 0x28, a0 + 0x30, a0 + 0x38})
+#define INT_STATUS 0x00
+#define PB_STATUS 0x01
+#define DMA_CMD 0x02
+#define VIDEO_FIFO_STATUS 0x03
+#define VIDEO_CHANNEL_ID 0x04
+#define VIDEO_PARSER_STATUS 0x05
+#define SYS_SOFT_RST 0x06
+#define DMA_PAGE_TABLE0_ADDR ((const u16[8]) {0x08, 0xD0, 0xD2, 0xD4, \
+ 0xD6, 0xD8, 0xDA, 0xDC})
+#define DMA_PAGE_TABLE1_ADDR ((const u16[8]) {0x09, 0xD1, 0xD3, 0xD5, \
+ 0xD7, 0xD9, 0xDB, 0xDD})
+#define DMA_CHANNEL_ENABLE 0x0A
+#define DMA_CONFIG 0x0B
+#define DMA_TIMER_INTERVAL 0x0C
+#define DMA_CHANNEL_TIMEOUT 0x0D
+#define VDMA_CHANNEL_CONFIG REG8_1(0x10)
+#define ADMA_P_ADDR REG8_2(0x18)
+#define ADMA_B_ADDR REG8_2(0x19)
+#define DMA10_P_ADDR 0x28 /* ??? */
+#define DMA10_B_ADDR 0x29
+#define VIDEO_CONTROL1 0x2A
+#define VIDEO_CONTROL2 0x2B
+#define AUDIO_CONTROL1 0x2C
+#define AUDIO_CONTROL2 0x2D
+#define PHASE_REF 0x2E
+#define GPIO_REG 0x2F
+#define INTL_HBAR_CTRL REG8_1(0x30)
+#define AUDIO_CONTROL3 0x38
+#define VIDEO_FIELD_CTRL REG8_1(0x39)
+#define HSCALER_CTRL REG8_1(0x42)
+#define VIDEO_SIZE REG8_1(0x4A)
+#define VIDEO_SIZE_F2 REG8_1(0x52)
+#define MD_CONF REG8_1(0x60)
+#define MD_INIT REG8_1(0x68)
+#define MD_MAP0 REG8_1(0x70)
+#define VDMA_P_ADDR REG8_8(0x80) /* not used in DMA SG mode */
+#define VDMA_WHP REG8_8(0x81)
+#define VDMA_B_ADDR REG8_8(0x82)
+#define VDMA_F2_P_ADDR REG8_8(0x84)
+#define VDMA_F2_WHP REG8_8(0x85)
+#define VDMA_F2_B_ADDR REG8_8(0x86)
+#define EP_REG_ADDR 0xFE
+#define EP_REG_DATA 0xFF
+
+/* Video decoder registers */
+#define VDREG8(a0) ((const u16[8]) { \
+ a0 + 0x000, a0 + 0x010, a0 + 0x020, a0 + 0x030, \
+ a0 + 0x100, a0 + 0x110, a0 + 0x120, a0 + 0x130})
+#define VIDSTAT VDREG8(0x100)
+#define BRIGHT VDREG8(0x101)
+#define CONTRAST VDREG8(0x102)
+#define SHARPNESS VDREG8(0x103)
+#define SAT_U VDREG8(0x104)
+#define SAT_V VDREG8(0x105)
+#define HUE VDREG8(0x106)
+#define CROP_HI VDREG8(0x107)
+#define VDELAY_LO VDREG8(0x108)
+#define VACTIVE_LO VDREG8(0x109)
+#define HDELAY_LO VDREG8(0x10A)
+#define HACTIVE_LO VDREG8(0x10B)
+#define MVSN VDREG8(0x10C)
+#define STATUS2 VDREG8(0x10C)
+#define SDT VDREG8(0x10E)
+#define SDT_EN VDREG8(0x10F)
+
+#define VSCALE_LO VDREG8(0x144)
+#define SCALE_HI VDREG8(0x145)
+#define HSCALE_LO VDREG8(0x146)
+#define F2CROP_HI VDREG8(0x147)
+#define F2VDELAY_LO VDREG8(0x148)
+#define F2VACTIVE_LO VDREG8(0x149)
+#define F2HDELAY_LO VDREG8(0x14A)
+#define F2HACTIVE_LO VDREG8(0x14B)
+#define F2VSCALE_LO VDREG8(0x14C)
+#define F2SCALE_HI VDREG8(0x14D)
+#define F2HSCALE_LO VDREG8(0x14E)
+#define F2CNT VDREG8(0x14F)
+
+#define VDREG2(a0) ((const u16[2]) {a0, a0 + 0x100})
+#define SRST VDREG2(0x180)
+#define ACNTL VDREG2(0x181)
+#define ACNTL2 VDREG2(0x182)
+#define CNTRL1 VDREG2(0x183)
+#define CKHY VDREG2(0x184)
+#define SHCOR VDREG2(0x185)
+#define CORING VDREG2(0x186)
+#define CLMPG VDREG2(0x187)
+#define IAGC VDREG2(0x188)
+#define VCTRL1 VDREG2(0x18F)
+#define MISC1 VDREG2(0x194)
+#define LOOP VDREG2(0x195)
+#define MISC2 VDREG2(0x196)
+
+#define CLMD VDREG2(0x197)
+#define AIGAIN ((const u16[8]) {0x1D0, 0x1D1, 0x1D2, 0x1D3, \
+ 0x2D0, 0x2D1, 0x2D2, 0x2D3})
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh-video.c b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
new file mode 100644
index 000000000..6ecb504a7
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh-video.c
@@ -0,0 +1,821 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-event.h>
+#include "tw686x-kh.h"
+#include "tw686x-kh-regs.h"
+
+#define MAX_SG_ENTRY_SIZE (/* 8192 - 128 */ 4096)
+#define MAX_SG_DESC_COUNT 256 /* PAL 704x576 needs up to 198 4-KB pages */
+
+static const struct tw686x_format formats[] = {
+ {
+ .name = "4:2:2 packed, UYVY", /* aka Y422 */
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mode = 0,
+ .depth = 16,
+ }, {
+#if 0
+ .name = "4:2:0 packed, YUV",
+ .mode = 1, /* non-standard */
+ .depth = 12,
+ }, {
+ .name = "4:1:1 packed, YUV",
+ .mode = 2, /* non-standard */
+ .depth = 12,
+ }, {
+#endif
+ .name = "4:1:1 packed, YUV",
+ .fourcc = V4L2_PIX_FMT_Y41P,
+ .mode = 3,
+ .depth = 12,
+ }, {
+ .name = "15 bpp RGB",
+ .fourcc = V4L2_PIX_FMT_RGB555,
+ .mode = 4,
+ .depth = 16,
+ }, {
+ .name = "16 bpp RGB",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mode = 5,
+ .depth = 16,
+ }, {
+ .name = "4:2:2 packed, YUYV",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mode = 6,
+ .depth = 16,
+ }
+ /* mode 7 is "reserved" */
+};
+
+static const v4l2_std_id video_standards[7] = {
+ V4L2_STD_NTSC,
+ V4L2_STD_PAL,
+ V4L2_STD_SECAM,
+ V4L2_STD_NTSC_443,
+ V4L2_STD_PAL_M,
+ V4L2_STD_PAL_N,
+ V4L2_STD_PAL_60,
+};
+
+static const struct tw686x_format *format_by_fourcc(unsigned int fourcc)
+{
+ unsigned int cnt;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(formats); cnt++)
+ if (formats[cnt].fourcc == fourcc)
+ return &formats[cnt];
+ return NULL;
+}
+
+static void tw686x_get_format(struct tw686x_video_channel *vc,
+ struct v4l2_format *f)
+{
+ const struct tw686x_format *format;
+ unsigned int width, height, height_div = 1;
+
+ format = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (!format) {
+ format = &formats[0];
+ f->fmt.pix.pixelformat = format->fourcc;
+ }
+
+ width = 704;
+ if (f->fmt.pix.width < width * 3 / 4 /* halfway */)
+ width /= 2;
+
+ height = (vc->video_standard & V4L2_STD_625_50) ? 576 : 480;
+ if (f->fmt.pix.height < height * 3 / 4 /* halfway */)
+ height_div = 2;
+
+ switch (f->fmt.pix.field) {
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ height_div = 2;
+ break;
+ case V4L2_FIELD_SEQ_BT:
+ if (height_div > 1)
+ f->fmt.pix.field = V4L2_FIELD_BOTTOM;
+ break;
+ default:
+ if (height_div > 1)
+ f->fmt.pix.field = V4L2_FIELD_TOP;
+ else
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+ }
+ height /= height_div;
+
+ f->fmt.pix.width = width;
+ f->fmt.pix.height = height;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * format->depth / 8;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+}
+
+/* video queue operations */
+
+static int tw686x_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ void *alloc_ctxs[])
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ unsigned int size = vc->width * vc->height * vc->format->depth / 8;
+
+ alloc_ctxs[0] = vc->alloc_ctx;
+ if (*nbuffers < 2)
+ *nbuffers = 2;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ sizes[0] = size;
+ *nplanes = 1; /* packed formats only */
+ return 0;
+}
+
+static void tw686x_buf_queue(struct vb2_buffer *vb)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct tw686x_vb2_buf *buf;
+
+ buf = container_of(vbuf, struct tw686x_vb2_buf, vb);
+
+ spin_lock(&vc->qlock);
+ list_add_tail(&buf->list, &vc->vidq_queued);
+ spin_unlock(&vc->qlock);
+}
+
+static void setup_descs(struct tw686x_video_channel *vc, unsigned int n)
+{
+loop:
+ while (!list_empty(&vc->vidq_queued)) {
+ struct vdma_desc *descs = vc->sg_descs[n];
+ struct tw686x_vb2_buf *buf;
+ struct sg_table *vbuf;
+ struct scatterlist *sg;
+ unsigned int buf_len, count = 0;
+ int i;
+
+ buf = list_first_entry(&vc->vidq_queued, struct tw686x_vb2_buf,
+ list);
+ list_del(&buf->list);
+
+ buf_len = vc->width * vc->height * vc->format->depth / 8;
+ if (vb2_plane_size(&buf->vb.vb2_buf, 0) < buf_len) {
+ pr_err("Video buffer size too small\n");
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ goto loop; /* try another */
+ }
+
+ vbuf = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
+ for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
+ dma_addr_t phys = sg_dma_address(sg);
+ unsigned int len = sg_dma_len(sg);
+
+ while (len && buf_len) {
+ unsigned int entry_len = min_t(unsigned int, len,
+ MAX_SG_ENTRY_SIZE);
+ entry_len = min(entry_len, buf_len);
+ if (count == MAX_SG_DESC_COUNT) {
+ pr_err("Video buffer size too fragmented\n");
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ goto loop;
+ }
+ descs[count].phys = cpu_to_le32(phys);
+ descs[count++].flags_length =
+ cpu_to_le32(0x40000000 /* available */ |
+ entry_len);
+ phys += entry_len;
+ len -= entry_len;
+ buf_len -= entry_len;
+ }
+ if (!buf_len)
+ break;
+ }
+
+ /* clear the remaining entries */
+ while (count < MAX_SG_DESC_COUNT) {
+ descs[count].phys = 0;
+ descs[count++].flags_length = 0; /* unavailable */
+ }
+
+ buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ vc->curr_bufs[n] = buf;
+ return;
+ }
+ vc->curr_bufs[n] = NULL;
+}
+
+/* On TW6864 and TW6868, all channels share the pair of video DMA SG tables,
+ with 10-bit start_idx and end_idx determining start and end of frame buffer
+ for particular channel.
+ TW6868 with all its 8 channels would be problematic (only 127 SG entries per
+ channel) but we support only 4 channels on this chip anyway (the first
+ 4 channels are driven with internal video decoder, the other 4 would require
+ an external TW286x part).
+
+ On TW6865 and TW6869, each channel has its own DMA SG table, with indexes
+ starting with 0. Both chips have complete sets of internal video decoders
+ (respectively 4 or 8-channel).
+
+ All chips have separate SG tables for two video frames. */
+
+static void setup_dma_cfg(struct tw686x_video_channel *vc)
+{
+ unsigned int field_width = 704;
+ unsigned int field_height = (vc->video_standard & V4L2_STD_625_50) ?
+ 288 : 240;
+ unsigned int start_idx = is_second_gen(vc->dev) ? 0 :
+ vc->ch * MAX_SG_DESC_COUNT;
+ unsigned int end_idx = start_idx + MAX_SG_DESC_COUNT - 1;
+ u32 dma_cfg = (0 << 30) /* input selection */ |
+ (1 << 29) /* field2 dropped (if any) */ |
+ ((vc->height < 300) << 28) /* field dropping */ |
+ (1 << 27) /* master */ |
+ (0 << 25) /* master channel (for slave only) */ |
+ (0 << 24) /* (no) vertical (line) decimation */ |
+ ((vc->width < 400) << 23) /* horizontal decimation */ |
+ (vc->format->mode << 20) /* output video format */ |
+ (end_idx << 10) /* DMA end index */ |
+ start_idx /* DMA start index */;
+ u32 reg;
+
+ reg_write(vc->dev, VDMA_CHANNEL_CONFIG[vc->ch], dma_cfg);
+ reg_write(vc->dev, VIDEO_SIZE[vc->ch], (1 << 31) | (field_height << 16)
+ | field_width);
+ reg = reg_read(vc->dev, VIDEO_CONTROL1);
+ if (vc->video_standard & V4L2_STD_625_50)
+ reg |= 1 << (vc->ch + 13);
+ else
+ reg &= ~(1 << (vc->ch + 13));
+ reg_write(vc->dev, VIDEO_CONTROL1, reg);
+}
+
+static int tw686x_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ struct tw686x_dev *dev = vc->dev;
+ u32 dma_ch_mask;
+ unsigned int n;
+
+ setup_dma_cfg(vc);
+
+ /* queue video buffers if available */
+ spin_lock(&vc->qlock);
+ for (n = 0; n < 2; n++)
+ setup_descs(vc, n);
+ spin_unlock(&vc->qlock);
+
+ dev->video_active |= 1 << vc->ch;
+ vc->seq = 0;
+ dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE) | (1 << vc->ch);
+ reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
+ reg_write(dev, DMA_CMD, (1 << 31) | dma_ch_mask);
+ return 0;
+}
+
+static void tw686x_stop_streaming(struct vb2_queue *vq)
+{
+ struct tw686x_video_channel *vc = vb2_get_drv_priv(vq);
+ struct tw686x_dev *dev = vc->dev;
+ u32 dma_ch_mask = reg_read(dev, DMA_CHANNEL_ENABLE);
+ u32 dma_cmd = reg_read(dev, DMA_CMD);
+ unsigned int n;
+
+ dma_ch_mask &= ~(1 << vc->ch);
+ reg_write(dev, DMA_CHANNEL_ENABLE, dma_ch_mask);
+
+ dev->video_active &= ~(1 << vc->ch);
+
+ dma_cmd &= ~(1 << vc->ch);
+ reg_write(dev, DMA_CMD, dma_cmd);
+
+ if (!dev->video_active) {
+ reg_write(dev, DMA_CMD, 0);
+ reg_write(dev, DMA_CHANNEL_ENABLE, 0);
+ }
+
+ spin_lock(&vc->qlock);
+ while (!list_empty(&vc->vidq_queued)) {
+ struct tw686x_vb2_buf *buf;
+
+ buf = list_entry(vc->vidq_queued.next, struct tw686x_vb2_buf,
+ list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ for (n = 0; n < 2; n++)
+ if (vc->curr_bufs[n])
+ vb2_buffer_done(&vc->curr_bufs[n]->vb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+
+ spin_unlock(&vc->qlock);
+}
+
+static struct vb2_ops tw686x_video_qops = {
+ .queue_setup = tw686x_queue_setup,
+ .buf_queue = tw686x_buf_queue,
+ .start_streaming = tw686x_start_streaming,
+ .stop_streaming = tw686x_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int tw686x_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct tw686x_video_channel *vc;
+ struct tw686x_dev *dev;
+ unsigned int ch;
+
+ vc = container_of(ctrl->handler, struct tw686x_video_channel,
+ ctrl_handler);
+ dev = vc->dev;
+ ch = vc->ch;
+
+ switch (ctrl->id) {
+ case V4L2_CID_BRIGHTNESS:
+ reg_write(dev, BRIGHT[ch], ctrl->val & 0xFF);
+ return 0;
+
+ case V4L2_CID_CONTRAST:
+ reg_write(dev, CONTRAST[ch], ctrl->val);
+ return 0;
+
+ case V4L2_CID_SATURATION:
+ reg_write(dev, SAT_U[ch], ctrl->val);
+ reg_write(dev, SAT_V[ch], ctrl->val);
+ return 0;
+
+ case V4L2_CID_HUE:
+ reg_write(dev, HUE[ch], ctrl->val & 0xFF);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ctrl_ops = {
+ .s_ctrl = tw686x_s_ctrl,
+};
+
+static int tw686x_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ f->fmt.pix.width = vc->width;
+ f->fmt.pix.height = vc->height;
+ f->fmt.pix.field = vc->field;
+ f->fmt.pix.pixelformat = vc->format->fourcc;
+ f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+ f->fmt.pix.bytesperline = f->fmt.pix.width * vc->format->depth / 8;
+ f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
+ return 0;
+}
+
+static int tw686x_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ tw686x_get_format(video_drvdata(file), f);
+ return 0;
+}
+
+static int tw686x_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ tw686x_get_format(vc, f);
+ vc->format = format_by_fourcc(f->fmt.pix.pixelformat);
+ vc->field = f->fmt.pix.field;
+ vc->width = f->fmt.pix.width;
+ vc->height = f->fmt.pix.height;
+ return 0;
+}
+
+static int tw686x_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ struct tw686x_dev *dev = vc->dev;
+
+ strcpy(cap->driver, "tw686x-kh");
+ strcpy(cap->card, dev->name);
+ sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci_dev));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+static int tw686x_s_std(struct file *file, void *priv, v4l2_std_id id)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+ unsigned int cnt;
+ u32 sdt = 0; /* default */
+
+ for (cnt = 0; cnt < ARRAY_SIZE(video_standards); cnt++)
+ if (id & video_standards[cnt]) {
+ sdt = cnt;
+ break;
+ }
+
+ reg_write(vc->dev, SDT[vc->ch], sdt);
+ vc->video_standard = video_standards[sdt];
+ return 0;
+}
+
+static int tw686x_g_std(struct file *file, void *priv, v4l2_std_id *id)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ *id = vc->video_standard;
+ return 0;
+}
+
+static int tw686x_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ strlcpy(f->description, formats[f->index].name, sizeof(f->description));
+ f->pixelformat = formats[f->index].fourcc;
+ return 0;
+}
+
+static int tw686x_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *sp)
+{
+ struct tw686x_video_channel *vc = video_drvdata(file);
+
+ if (sp->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ memset(&sp->parm.capture, 0, sizeof(sp->parm.capture));
+ sp->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ v4l2_video_std_frame_period(vc->video_standard,
+ &sp->parm.capture.timeperframe);
+
+ return 0;
+}
+
+static int tw686x_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ /* the chip has internal multiplexer, support can be added
+ if the actual hw uses it */
+ if (inp->index)
+ return -EINVAL;
+
+ snprintf(inp->name, sizeof(inp->name), "Composite");
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->std = V4L2_STD_ALL;
+ inp->capabilities = V4L2_IN_CAP_STD;
+ return 0;
+}
+
+static int tw686x_g_input(struct file *file, void *priv, unsigned int *v)
+{
+ *v = 0;
+ return 0;
+}
+
+static int tw686x_s_input(struct file *file, void *priv, unsigned int v)
+{
+ if (v)
+ return -EINVAL;
+ return 0;
+}
+
+static const struct v4l2_file_operations tw686x_video_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .unlocked_ioctl = video_ioctl2,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops tw686x_video_ioctl_ops = {
+ .vidioc_querycap = tw686x_querycap,
+ .vidioc_enum_fmt_vid_cap = tw686x_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = tw686x_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = tw686x_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = tw686x_try_fmt_vid_cap,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_g_std = tw686x_g_std,
+ .vidioc_s_std = tw686x_s_std,
+ .vidioc_g_parm = tw686x_g_parm,
+ .vidioc_enum_input = tw686x_enum_input,
+ .vidioc_g_input = tw686x_g_input,
+ .vidioc_s_input = tw686x_s_input,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int video_thread(void *arg)
+{
+ struct tw686x_dev *dev = arg;
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_freezable();
+ add_wait_queue(&dev->video_thread_wait, &wait);
+
+ while (1) {
+ long timeout = schedule_timeout_interruptible(HZ);
+ unsigned int ch;
+
+ if (timeout == -ERESTARTSYS || kthread_should_stop())
+ break;
+
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc;
+ unsigned long flags;
+ u32 request, n, stat = VB2_BUF_STATE_DONE;
+
+ vc = &dev->video_channels[ch];
+ if (!(dev->video_active & (1 << ch)))
+ continue;
+
+ spin_lock_irq(&dev->irq_lock);
+ request = dev->dma_requests & (0x01000001 << ch);
+ if (request)
+ dev->dma_requests &= ~request;
+ spin_unlock_irq(&dev->irq_lock);
+
+ if (!request)
+ continue;
+
+ request >>= ch;
+
+ /* handle channel events */
+ if ((request & 0x01000000) |
+ (reg_read(dev, VIDEO_FIFO_STATUS) & (0x01010001 << ch)) |
+ (reg_read(dev, VIDEO_PARSER_STATUS) & (0x00000101 << ch))) {
+ /* DMA Errors - reset channel */
+ u32 reg;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ reg = reg_read(dev, DMA_CMD);
+ /* Reset DMA channel */
+ reg_write(dev, DMA_CMD, reg & ~(1 << ch));
+ reg_write(dev, DMA_CMD, reg);
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ stat = VB2_BUF_STATE_ERROR;
+ }
+
+ /* handle video stream */
+ mutex_lock(&vc->vb_mutex);
+ spin_lock(&vc->qlock);
+ n = !!(reg_read(dev, PB_STATUS) & (1 << ch));
+ if (vc->curr_bufs[n]) {
+ struct vb2_v4l2_buffer *vb;
+
+ vb = &vc->curr_bufs[n]->vb;
+ vb->vb2_buf.timestamp = ktime_get_ns();
+ vb->field = vc->field;
+ if (V4L2_FIELD_HAS_BOTH(vc->field))
+ vb->sequence = vc->seq++;
+ else
+ vb->sequence = (vc->seq++) / 2;
+ vb2_set_plane_payload(&vb->vb2_buf, 0,
+ vc->width * vc->height * vc->format->depth / 8);
+ vb2_buffer_done(&vb->vb2_buf, stat);
+ }
+ setup_descs(vc, n);
+ spin_unlock(&vc->qlock);
+ mutex_unlock(&vc->vb_mutex);
+ }
+ try_to_freeze();
+ }
+
+ remove_wait_queue(&dev->video_thread_wait, &wait);
+ return 0;
+}
+
+int tw686x_kh_video_irq(struct tw686x_dev *dev)
+{
+ unsigned long flags, handled = 0;
+ u32 requests;
+
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ requests = dev->dma_requests;
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ if (requests & dev->video_active) {
+ wake_up_interruptible_all(&dev->video_thread_wait);
+ handled = 1;
+ }
+ return handled;
+}
+
+void tw686x_kh_video_free(struct tw686x_dev *dev)
+{
+ unsigned int ch, n;
+
+ if (dev->video_thread)
+ kthread_stop(dev->video_thread);
+
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+
+ v4l2_ctrl_handler_free(&vc->ctrl_handler);
+ if (vc->device)
+ video_unregister_device(vc->device);
+ vb2_dma_sg_cleanup_ctx(vc->alloc_ctx);
+ for (n = 0; n < 2; n++) {
+ struct dma_desc *descs = &vc->sg_tables[n];
+
+ if (descs->virt)
+ pci_free_consistent(dev->pci_dev, descs->size,
+ descs->virt, descs->phys);
+ }
+ }
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+}
+
+#define SG_TABLE_SIZE (MAX_SG_DESC_COUNT * sizeof(struct vdma_desc))
+
+int tw686x_kh_video_init(struct tw686x_dev *dev)
+{
+ unsigned int ch, n;
+ int err;
+
+ init_waitqueue_head(&dev->video_thread_wait);
+
+ err = v4l2_device_register(&dev->pci_dev->dev, &dev->v4l2_dev);
+ if (err)
+ return err;
+
+ reg_write(dev, VIDEO_CONTROL1, 0); /* NTSC, disable scaler */
+ reg_write(dev, PHASE_REF, 0x00001518); /* Scatter-gather DMA mode */
+
+ /* setup required SG table sizes */
+ for (n = 0; n < 2; n++)
+ if (is_second_gen(dev)) {
+ /* TW 6865, TW6869 - each channel needs a pair of
+ descriptor tables */
+ for (ch = 0; ch < max_channels(dev); ch++)
+ dev->video_channels[ch].sg_tables[n].size =
+ SG_TABLE_SIZE;
+
+ } else
+ /* TW 6864, TW6868 - we need to allocate a pair of
+ descriptor tables, common for all channels.
+ Each table will be bigger than 4 KB. */
+ dev->video_channels[0].sg_tables[n].size =
+ max_channels(dev) * SG_TABLE_SIZE;
+
+ /* allocate SG tables and initialize video channels */
+ for (ch = 0; ch < max_channels(dev); ch++) {
+ struct tw686x_video_channel *vc = &dev->video_channels[ch];
+ struct video_device *vdev;
+
+ mutex_init(&vc->vb_mutex);
+ spin_lock_init(&vc->qlock);
+ INIT_LIST_HEAD(&vc->vidq_queued);
+
+ vc->dev = dev;
+ vc->ch = ch;
+
+ /* default settings: NTSC */
+ vc->format = &formats[0];
+ vc->video_standard = V4L2_STD_NTSC;
+ reg_write(vc->dev, SDT[vc->ch], 0);
+ vc->field = V4L2_FIELD_SEQ_BT;
+ vc->width = 704;
+ vc->height = 480;
+
+ for (n = 0; n < 2; n++) {
+ void *cpu;
+
+ if (vc->sg_tables[n].size) {
+ unsigned int reg = n ? DMA_PAGE_TABLE1_ADDR[ch] :
+ DMA_PAGE_TABLE0_ADDR[ch];
+
+ cpu = pci_alloc_consistent(dev->pci_dev,
+ vc->sg_tables[n].size,
+ &vc->sg_tables[n].phys);
+ if (!cpu) {
+ pr_err("Error allocating video DMA scatter-gather tables\n");
+ err = -ENOMEM;
+ goto error;
+ }
+ vc->sg_tables[n].virt = cpu;
+ reg_write(dev, reg, vc->sg_tables[n].phys);
+ } else
+ cpu = dev->video_channels[0].sg_tables[n].virt +
+ ch * SG_TABLE_SIZE;
+
+ vc->sg_descs[n] = cpu;
+ }
+
+ reg_write(dev, VCTRL1[0], 0x24);
+ reg_write(dev, LOOP[0], 0xA5);
+ if (max_channels(dev) > 4) {
+ reg_write(dev, VCTRL1[1], 0x24);
+ reg_write(dev, LOOP[1], 0xA5);
+ }
+ reg_write(dev, VIDEO_FIELD_CTRL[ch], 0);
+ reg_write(dev, VDELAY_LO[ch], 0x14);
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ pr_warn("Unable to allocate video device\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
+ vc->alloc_ctx = vb2_dma_sg_init_ctx(&dev->pci_dev->dev);
+ if (IS_ERR(vc->alloc_ctx)) {
+ pr_warn("Unable to initialize DMA scatter-gather context\n");
+ err = PTR_ERR(vc->alloc_ctx);
+ goto error;
+ }
+
+ vc->vidq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vc->vidq.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ vc->vidq.drv_priv = vc;
+ vc->vidq.buf_struct_size = sizeof(struct tw686x_vb2_buf);
+ vc->vidq.ops = &tw686x_video_qops;
+ vc->vidq.mem_ops = &vb2_dma_sg_memops;
+ vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vc->vidq.min_buffers_needed = 2;
+ vc->vidq.lock = &vc->vb_mutex;
+ vc->vidq.gfp_flags = GFP_DMA32;
+
+ err = vb2_queue_init(&vc->vidq);
+ if (err)
+ goto error;
+
+ strcpy(vdev->name, "TW686x-video");
+ snprintf(vdev->name, sizeof(vdev->name), "%s video", dev->name);
+ vdev->fops = &tw686x_video_fops;
+ vdev->ioctl_ops = &tw686x_video_ioctl_ops;
+ vdev->release = video_device_release;
+ vdev->v4l2_dev = &dev->v4l2_dev;
+ vdev->queue = &vc->vidq;
+ vdev->tvnorms = V4L2_STD_ALL;
+ vdev->minor = -1;
+ vdev->lock = &vc->vb_mutex;
+
+ dev->video_channels[ch].device = vdev;
+ video_set_drvdata(vdev, vc);
+ err = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (err < 0)
+ goto error;
+
+ v4l2_ctrl_handler_init(&vc->ctrl_handler,
+ 4 /* number of controls */);
+ vdev->ctrl_handler = &vc->ctrl_handler;
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 64);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(&vc->ctrl_handler, &ctrl_ops, V4L2_CID_HUE,
+ -124, 127, 1, 0);
+ err = vc->ctrl_handler.error;
+ if (err)
+ goto error;
+
+ v4l2_ctrl_handler_setup(&vc->ctrl_handler);
+ }
+
+ dev->video_thread = kthread_run(video_thread, dev, "tw686x_video");
+ if (IS_ERR(dev->video_thread)) {
+ err = PTR_ERR(dev->video_thread);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ tw686x_kh_video_free(dev);
+ return err;
+}
diff --git a/drivers/staging/media/tw686x-kh/tw686x-kh.h b/drivers/staging/media/tw686x-kh/tw686x-kh.h
new file mode 100644
index 000000000..dc257967d
--- /dev/null
+++ b/drivers/staging/media/tw686x-kh/tw686x-kh.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015 Industrial Research Institute for Automation
+ * and Measurements PIAP
+ *
+ * Written by Krzysztof Ha?asa.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <media/videobuf2-dma-sg.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+
+#define TYPE_MAX_CHANNELS 0x0F
+#define TYPE_SECOND_GEN 0x10
+
+struct tw686x_format {
+ char *name;
+ unsigned int fourcc;
+ unsigned int depth;
+ unsigned int mode;
+};
+
+struct dma_desc {
+ dma_addr_t phys;
+ void *virt;
+ unsigned int size;
+};
+
+struct vdma_desc {
+ __le32 flags_length; /* 3 MSBits for flags, 13 LSBits for length */
+ __le32 phys;
+};
+
+struct tw686x_vb2_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+};
+
+struct tw686x_video_channel {
+ struct tw686x_dev *dev;
+
+ struct vb2_queue vidq;
+ struct list_head vidq_queued;
+ struct video_device *device;
+ struct dma_desc sg_tables[2];
+ struct tw686x_vb2_buf *curr_bufs[2];
+ void *alloc_ctx;
+ struct vdma_desc *sg_descs[2];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ const struct tw686x_format *format;
+ struct mutex vb_mutex;
+ spinlock_t qlock;
+ v4l2_std_id video_standard;
+ unsigned int width, height;
+ enum v4l2_field field; /* supported TOP, BOTTOM, SEQ_TB and SEQ_BT */
+ unsigned int seq; /* video field or frame counter */
+ unsigned int ch;
+};
+
+/* global device status */
+struct tw686x_dev {
+ spinlock_t irq_lock;
+
+ struct v4l2_device v4l2_dev;
+ struct snd_card *card; /* sound card */
+
+ unsigned int video_active; /* active video channel mask */
+
+ char name[32];
+ unsigned int type;
+ struct pci_dev *pci_dev;
+ __u32 __iomem *mmio;
+
+ struct task_struct *video_thread;
+ wait_queue_head_t video_thread_wait;
+ u32 dma_requests;
+
+ struct tw686x_video_channel video_channels[0];
+};
+
+static inline uint32_t reg_read(struct tw686x_dev *dev, unsigned int reg)
+{
+ return readl(dev->mmio + reg);
+}
+
+static inline void reg_write(struct tw686x_dev *dev, unsigned int reg,
+ uint32_t value)
+{
+ writel(value, dev->mmio + reg);
+}
+
+static inline unsigned int max_channels(struct tw686x_dev *dev)
+{
+ return dev->type & TYPE_MAX_CHANNELS; /* 4 or 8 channels */
+}
+
+static inline unsigned int is_second_gen(struct tw686x_dev *dev)
+{
+ /* each channel has its own DMA SG table */
+ return dev->type & TYPE_SECOND_GEN;
+}
+
+int tw686x_kh_video_irq(struct tw686x_dev *dev);
+int tw686x_kh_video_init(struct tw686x_dev *dev);
+void tw686x_kh_video_free(struct tw686x_dev *dev);
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/hdm-dim2/dim2_errors.h
index 5a713df1d..66343ba42 100644
--- a/drivers/staging/most/hdm-dim2/dim2_errors.h
+++ b/drivers/staging/most/hdm-dim2/dim2_errors.h
@@ -15,10 +15,6 @@
#ifndef _MOST_DIM_ERRORS_H
#define _MOST_DIM_ERRORS_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/**
* MOST DIM errors.
*/
@@ -58,8 +54,4 @@ enum dim_errors_t {
DIM_ERR_OVERFLOW,
};
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
index fc73d4f97..1c924e869 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -18,10 +18,6 @@
#include <linux/types.h>
#include "dim2_reg.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
* The values below are specified in the hardware specification.
* So, they should not be changed until the hardware specification changes.
@@ -42,14 +38,12 @@ struct dim_ch_state_t {
u16 done_buffers; /* Number of completed buffers */
};
-typedef int atomic_counter_t;
-
struct int_ch_state {
/* changed only in interrupt context */
- volatile atomic_counter_t request_counter;
+ volatile int request_counter;
/* changed only in task context */
- volatile atomic_counter_t service_counter;
+ volatile int service_counter;
u8 idx1;
u8 idx2;
@@ -110,8 +104,4 @@ void dimcb_io_write(u32 __iomem *ptr32, u32 value);
void dimcb_on_error(u8 error_id, const char *error_message);
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
index bcf6a79f6..e0837b6b9 100644
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ b/drivers/staging/most/hdm-dim2/dim2_reg.h
@@ -17,10 +17,6 @@
#include <linux/types.h>
-#ifdef __cplusplus
-extern "C" {
-#endif
-
struct dim2_regs {
/* 0x00 */ u32 MLBC0;
/* 0x01 */ u32 rsvd0[1];
@@ -166,8 +162,4 @@ enum {
CAT_CL_MASK = DIM2_MASK(6)
};
-#ifdef __cplusplus
-}
-#endif
-
#endif /* DIM2_OS62420_H */
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 163f21a12..e389009fc 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -42,23 +42,33 @@ static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
static int enable_hw_ecc;
static int enable_read_hw_ecc;
-static struct nand_ecclayout spinand_oob_64 = {
- .eccbytes = 24,
- .eccpos = {
- 1, 2, 3, 4, 5, 6,
- 17, 18, 19, 20, 21, 22,
- 33, 34, 35, 36, 37, 38,
- 49, 50, 51, 52, 53, 54, },
- .oobfree = {
- {.offset = 8,
- .length = 8},
- {.offset = 24,
- .length = 8},
- {.offset = 40,
- .length = 8},
- {.offset = 56,
- .length = 8},
- }
+static int spinand_ooblayout_64_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 1;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static int spinand_ooblayout_64_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops spinand_oob_64_ops = {
+ .ecc = spinand_ooblayout_64_ecc,
+ .free = spinand_ooblayout_64_free,
};
#endif
@@ -886,11 +896,11 @@ static int spinand_probe(struct spi_device *spi_nand)
chip->ecc.strength = 1;
chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
- chip->ecc.layout = &spinand_oob_64;
chip->ecc.read_page = spinand_read_page_hwecc;
chip->ecc.write_page = spinand_write_page_hwecc;
#else
chip->ecc.mode = NAND_ECC_SOFT;
+ chip->ecc.algo = NAND_ECC_HAMMING;
if (spinand_disable_ecc(spi_nand) < 0)
dev_info(&spi_nand->dev, "%s: disable ecc failed!\n",
__func__);
@@ -912,6 +922,9 @@ static int spinand_probe(struct spi_device *spi_nand)
mtd->dev.parent = &spi_nand->dev;
mtd->oobsize = 64;
+#ifdef CONFIG_MTD_SPINAND_ONDIEECC
+ mtd_set_ooblayout(mtd, &spinand_oob_64_ops);
+#endif
if (nand_scan(mtd, 1))
return -ENXIO;
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index aa1cdf602..99445d0fc 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -850,7 +850,7 @@ static int xlr_mii_probe(struct xlr_net_priv *priv)
/* Attach MAC to PHY */
phydev = phy_connect(priv->ndev, phydev_name(phydev),
- &xlr_gmac_link_adjust, priv->nd->phy_interface);
+ xlr_gmac_link_adjust, priv->nd->phy_interface);
if (IS_ERR(phydev)) {
pr_err("could not attach PHY\n");
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 9fda136b8..c1feccf8d 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -264,7 +264,7 @@ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
- if (msg == NULL)
+ if (!msg)
return -ENOMEM;
msg->data[0] = size;
@@ -620,7 +620,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
} else {
nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
/* Should not happen in a normal world */
- if (unlikely(nvec->rx == NULL)) {
+ if (unlikely(!nvec->rx)) {
nvec->state = 0;
break;
}
@@ -659,10 +659,11 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
to_send = nvec->tx->data[nvec->tx->pos++];
} else {
- dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
+ dev_err(nvec->dev,
+ "tx buffer underflow on %p (%u > %u)\n",
nvec->tx,
- (uint) (nvec->tx ? nvec->tx->pos : 0),
- (uint) (nvec->tx ? nvec->tx->size : 0));
+ (uint)(nvec->tx ? nvec->tx->pos : 0),
+ (uint)(nvec->tx ? nvec->tx->size : 0));
nvec->state = 0;
}
break;
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index b4a0545e8..fcbb0fa03 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -90,7 +90,7 @@ static int nvec_power_notifier(struct notifier_block *nb,
{
struct nvec_power *power =
container_of(nb, struct nvec_power, notifier);
- struct bat_response *res = (struct bat_response *)data;
+ struct bat_response *res = data;
if (event_type != NVEC_SYS)
return NOTIFY_DONE;
@@ -126,7 +126,7 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
{
struct nvec_power *power =
container_of(nb, struct nvec_power, notifier);
- struct bat_response *res = (struct bat_response *)data;
+ struct bat_response *res = data;
int status_changed = 0;
if (event_type != NVEC_BAT)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index b6993b0b8..a10fe3af9 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -172,12 +172,13 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
- 1ull << pow_receive_group);
+ 1ull << pow_receive_group);
cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
} else {
old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
- (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+ (old_group_mask & ~0xFFFFull) |
+ 1 << pow_receive_group);
}
if (USE_ASYNC_IOBDMA) {
@@ -374,7 +375,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* doesn't exist.
*/
printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
- port);
+ port);
dev_kfree_skb_irq(skb);
}
/*
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a5973fd01..315a63d70 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -30,7 +30,7 @@ static inline void cvm_oct_rx_refill_pool(int fill_threshold)
number_to_free);
if (num_freed != number_to_free) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- number_to_free - num_freed);
+ number_to_free - num_freed);
}
}
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index ffe9bd77a..6b4c20872 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -58,9 +58,9 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
-static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
+static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
{
- int32_t undo;
+ int undo;
undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
MAX_SKB_TO_FREE;
@@ -83,7 +83,7 @@ static void cvm_oct_kick_tx_poll_watchdog(void)
static void cvm_oct_free_tx_skbs(struct net_device *dev)
{
- int32_t skb_to_free;
+ int skb_to_free;
int qos, queues_per_port;
int total_freed = 0;
int total_remaining = 0;
@@ -148,8 +148,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
struct octeon_ethernet *priv = netdev_priv(dev);
struct sk_buff *to_free_list;
- int32_t skb_to_free;
- int32_t buffers_to_free;
+ int skb_to_free;
+ int buffers_to_free;
u32 total_to_clean;
unsigned long flags;
#if REUSE_SKBUFFS_WITHOUT_FREE
@@ -220,7 +220,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
priv->fau + qos * 4, MAX_SKB_TO_FREE);
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
+ priv->fau +
+ qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
@@ -402,7 +403,7 @@ dont_put_skbuff_in_hw:
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
+ priv->fau + qos * 4);
/*
* If we're sending faster than the receive can free them then
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 271e1b8d8..e9cd5f242 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -635,7 +635,7 @@ static struct device_node *cvm_oct_of_get_child(
}
static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
+ int interface, int port)
{
struct device_node *ni, *np;
@@ -815,7 +815,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
free_netdev(dev);
} else if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device for interface %d, port %d\n",
- interface, priv->port);
+ interface, priv->port);
free_netdev(dev);
} else {
cvm_oct_device[priv->port] = dev;
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
deleted file mode 100644
index f1f3ecadf..000000000
--- a/drivers/staging/rdma/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
-menuconfig STAGING_RDMA
- tristate "RDMA staging drivers"
- depends on INFINIBAND
- depends on PCI || BROKEN
- depends on HAS_IOMEM
- depends on NET
- depends on INET
- default n
- ---help---
- This option allows you to select a number of RDMA drivers that
- fall into one of two categories: deprecated drivers being held
- here before finally being removed or new drivers that still need
- some work before being moved to the normal RDMA driver area.
-
- If you wish to work on these drivers, to help improve them, or
- to report problems you have with them, please use the
- linux-rdma@vger.kernel.org mailing list.
-
- If in doubt, say N here.
-
-
-# Please keep entries in alphabetic order
-if STAGING_RDMA
-
-source "drivers/staging/rdma/hfi1/Kconfig"
-
-endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
deleted file mode 100644
index 8c7fc1de4..000000000
--- a/drivers/staging/rdma/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
deleted file mode 100644
index 4c6f1d7d2..000000000
--- a/drivers/staging/rdma/hfi1/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-July, 2015
-
-- Remove unneeded file entries in sysfs
-- Remove software processing of IB protocol and place in library for use
- by qib, ipath (if still present), hfi1, and eventually soft-roce
-- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
deleted file mode 100644
index c5b520bf6..000000000
--- a/drivers/staging/rdma/hfi1/diag.c
+++ /dev/null
@@ -1,1924 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
- * This file contains support for diagnostic functions. It is accessed by
- * opening the hfi1_diag device, normally minor number 129. Diagnostic use
- * of the chip may render the chip or board unusable until the driver
- * is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <rdma/ib_smi.h>
-#include "hfi.h"
-#include "device.h"
-#include "common.h"
-#include "verbs_txreq.h"
-#include "trace.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-#define snoop_dbg(fmt, ...) \
- hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__)
-
-/* Snoop option mask */
-#define SNOOP_DROP_SEND BIT(0)
-#define SNOOP_USE_METADATA BIT(1)
-#define SNOOP_SET_VL0TOVL15 BIT(2)
-
-static u8 snoop_flags;
-
-/*
- * Extract packet length from LRH header.
- * This is in Dwords so multiply by 4 to get size in bytes
- */
-#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2)
-
-enum hfi1_filter_status {
- HFI1_FILTER_HIT,
- HFI1_FILTER_ERR,
- HFI1_FILTER_MISS
-};
-
-/* snoop processing functions */
-rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = {
- [RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler,
- [RHF_RCV_TYPE_EAGER] = snoop_recv_handler,
- [RHF_RCV_TYPE_IB] = snoop_recv_handler,
- [RHF_RCV_TYPE_ERROR] = snoop_recv_handler,
- [RHF_RCV_TYPE_BYPASS] = snoop_recv_handler,
- [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
- [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
- [RHF_RCV_TYPE_INVALID7] = process_receive_invalid
-};
-
-/* Snoop packet structure */
-struct snoop_packet {
- struct list_head list;
- u32 total_len;
- u8 data[];
-};
-
-/* Do not make these an enum or it will blow up the capture_md */
-#define PKT_DIR_EGRESS 0x0
-#define PKT_DIR_INGRESS 0x1
-
-/* Packet capture metadata returned to the user with the packet. */
-struct capture_md {
- u8 port;
- u8 dir;
- u8 reserved[6];
- union {
- u64 pbc;
- u64 rhf;
- } u;
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev diagpkt_cdev;
-static struct device *diagpkt_device;
-
-static ssize_t diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
- .owner = THIS_MODULE,
- .write = diagpkt_write,
- .llseek = noop_llseek,
-};
-
-/*
- * This is used for communication with user space for snoop extended IOCTLs
- */
-struct hfi1_link_info {
- __be64 node_guid;
- u8 port_mode;
- u8 port_state;
- u16 link_speed_active;
- u16 link_width_active;
- u16 vl15_init;
- u8 port_number;
- /*
- * Add padding to make this a full IB SMP payload. Note: changing the
- * size of this structure will make the IOCTLs created with _IOWR
- * change.
- * Be sure to run tests on all IOCTLs when making changes to this
- * structure.
- */
- u8 res[47];
-};
-
-/*
- * This starts our ioctl sequence numbers *way* off from the ones
- * defined in ib_core.
- */
-#define SNOOP_CAPTURE_VERSION 0x1
-
-#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl-number.txt */
-#define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC
-#define HFI1_SNOOP_IOC_BASE_SEQ 0x80
-
-#define HFI1_SNOOP_IOCGETLINKSTATE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ)
-#define HFI1_SNOOP_IOCSETLINKSTATE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1)
-#define HFI1_SNOOP_IOCCLEARQUEUE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2)
-#define HFI1_SNOOP_IOCCLEARFILTER \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3)
-#define HFI1_SNOOP_IOCSETFILTER \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4)
-#define HFI1_SNOOP_IOCGETVERSION \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5)
-#define HFI1_SNOOP_IOCSET_OPTS \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6)
-
-/*
- * These offsets +6/+7 could change, but these are already known and used
- * IOCTL numbers so don't change them without a good reason.
- */
-#define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \
- _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \
- struct hfi1_link_info)
-#define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \
- _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \
- struct hfi1_link_info)
-
-static int hfi1_snoop_open(struct inode *in, struct file *fp);
-static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
- size_t pkt_len, loff_t *off);
-static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
-static unsigned int hfi1_snoop_poll(struct file *fp,
- struct poll_table_struct *wait);
-static int hfi1_snoop_release(struct inode *in, struct file *fp);
-
-struct hfi1_packet_filter_command {
- int opcode;
- int length;
- void *value_ptr;
-};
-
-/* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */
-#define HFI1_SNOOP_INGRESS 0x1
-#define HFI1_SNOOP_EGRESS 0x2
-
-enum hfi1_packet_filter_opcodes {
- FILTER_BY_LID,
- FILTER_BY_DLID,
- FILTER_BY_MAD_MGMT_CLASS,
- FILTER_BY_QP_NUMBER,
- FILTER_BY_PKT_TYPE,
- FILTER_BY_SERVICE_LEVEL,
- FILTER_BY_PKEY,
- FILTER_BY_DIRECTION,
-};
-
-static const struct file_operations snoop_file_ops = {
- .owner = THIS_MODULE,
- .open = hfi1_snoop_open,
- .read = hfi1_snoop_read,
- .unlocked_ioctl = hfi1_ioctl,
- .poll = hfi1_snoop_poll,
- .write = hfi1_snoop_write,
- .release = hfi1_snoop_release
-};
-
-struct hfi1_filter_array {
- int (*filter)(void *, void *, void *);
-};
-
-static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
-
-static const struct hfi1_filter_array hfi1_filters[] = {
- { hfi1_filter_lid },
- { hfi1_filter_dlid },
- { hfi1_filter_mad_mgmt_class },
- { hfi1_filter_qp_number },
- { hfi1_filter_ibpacket_type },
- { hfi1_filter_ib_service_level },
- { hfi1_filter_ib_pkey },
- { hfi1_filter_direction },
-};
-
-#define HFI1_MAX_FILTERS ARRAY_SIZE(hfi1_filters)
-#define HFI1_DIAG_MINOR_BASE 129
-
-static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name);
-
-int hfi1_diag_add(struct hfi1_devdata *dd)
-{
- char name[16];
- int ret = 0;
-
- snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(),
- dd->unit);
- /*
- * Do this for each device as opposed to the normal diagpkt
- * interface which is one per host
- */
- ret = hfi1_snoop_add(dd, name);
- if (ret)
- dd_dev_err(dd, "Unable to init snoop/capture device");
-
- snprintf(name, sizeof(name), "%s_diagpkt", class_name());
- if (atomic_inc_return(&diagpkt_count) == 1) {
- ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
- &diagpkt_file_ops, &diagpkt_cdev,
- &diagpkt_device, false);
- }
-
- return ret;
-}
-
-/* this must be called w/ dd->snoop_in_lock held */
-static void drain_snoop_list(struct list_head *queue)
-{
- struct list_head *pos, *q;
- struct snoop_packet *packet;
-
- list_for_each_safe(pos, q, queue) {
- packet = list_entry(pos, struct snoop_packet, list);
- list_del(pos);
- kfree(packet);
- }
-}
-
-static void hfi1_snoop_remove(struct hfi1_devdata *dd)
-{
- unsigned long flags = 0;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-}
-
-void hfi1_diag_remove(struct hfi1_devdata *dd)
-{
- hfi1_snoop_remove(dd);
- if (atomic_dec_and_test(&diagpkt_count))
- hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
- hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
-}
-
-/*
- * Allocated structure shared between the credit return mechanism and
- * diagpkt_send().
- */
-struct diagpkt_wait {
- struct completion credits_returned;
- int code;
- atomic_t count;
-};
-
-/*
- * When each side is finished with the structure, they call this.
- * The last user frees the structure.
- */
-static void put_diagpkt_wait(struct diagpkt_wait *wait)
-{
- if (atomic_dec_and_test(&wait->count))
- kfree(wait);
-}
-
-/*
- * Callback from the credit return code. Set the complete, which
- * will let diapkt_send() continue.
- */
-static void diagpkt_complete(void *arg, int code)
-{
- struct diagpkt_wait *wait = (struct diagpkt_wait *)arg;
-
- wait->code = code;
- complete(&wait->credits_returned);
- put_diagpkt_wait(wait); /* finished with the structure */
-}
-
-/**
- * diagpkt_send - send a packet
- * @dp: diag packet descriptor
- */
-static ssize_t diagpkt_send(struct diag_pkt *dp)
-{
- struct hfi1_devdata *dd;
- struct send_context *sc;
- struct pio_buf *pbuf;
- u32 *tmpbuf = NULL;
- ssize_t ret = 0;
- u32 pkt_len, total_len;
- pio_release_cb credit_cb = NULL;
- void *credit_arg = NULL;
- struct diagpkt_wait *wait = NULL;
- int trycount = 0;
-
- dd = hfi1_lookup(dp->unit);
- if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
- if (!(dd->flags & HFI1_INITTED)) {
- /* no hardware, freeze, etc. */
- ret = -ENODEV;
- goto bail;
- }
-
- if (dp->version != _DIAG_PKT_VERS) {
- dd_dev_err(dd, "Invalid version %u for diagpkt_write\n",
- dp->version);
- ret = -EINVAL;
- goto bail;
- }
-
- /* send count must be an exact number of dwords */
- if (dp->len & 3) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* there is only port 1 */
- if (dp->port != 1) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* need a valid context */
- if (dp->sw_index >= dd->num_send_contexts) {
- ret = -EINVAL;
- goto bail;
- }
- /* can only use kernel contexts */
- if (dd->send_contexts[dp->sw_index].type != SC_KERNEL) {
- ret = -EINVAL;
- goto bail;
- }
- /* must be allocated */
- sc = dd->send_contexts[dp->sw_index].sc;
- if (!sc) {
- ret = -EINVAL;
- goto bail;
- }
- /* must be enabled */
- if (!(sc->flags & SCF_ENABLED)) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* allocate a buffer and copy the data in */
- tmpbuf = vmalloc(dp->len);
- if (!tmpbuf) {
- ret = -ENOMEM;
- goto bail;
- }
-
- if (copy_from_user(tmpbuf,
- (const void __user *)(unsigned long)dp->data,
- dp->len)) {
- ret = -EFAULT;
- goto bail;
- }
-
- /*
- * pkt_len is how much data we have to write, includes header and data.
- * total_len is length of the packet in Dwords plus the PBC should not
- * include the CRC.
- */
- pkt_len = dp->len >> 2;
- total_len = pkt_len + 2; /* PBC + packet */
-
- /* if 0, fill in a default */
- if (dp->pbc == 0) {
- struct hfi1_pportdata *ppd = dd->pport;
-
- hfi1_cdbg(PKT, "Generating PBC");
- dp->pbc = create_pbc(ppd, 0, 0, 0, total_len);
- } else {
- hfi1_cdbg(PKT, "Using passed in PBC");
- }
-
- hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc);
-
- /*
- * The caller wants to wait until the packet is sent and to
- * check for errors. The best we can do is wait until
- * the buffer credits are returned and check if any packet
- * error has occurred. If there are any late errors, this
- * could miss it. If there are other senders who generate
- * an error, this may find it. However, in general, it
- * should catch most.
- */
- if (dp->flags & F_DIAGPKT_WAIT) {
- /* always force a credit return */
- dp->pbc |= PBC_CREDIT_RETURN;
- /* turn on credit return interrupts */
- sc_add_credit_return_intr(sc);
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- ret = -ENOMEM;
- goto bail;
- }
- init_completion(&wait->credits_returned);
- atomic_set(&wait->count, 2);
- wait->code = PRC_OK;
-
- credit_cb = diagpkt_complete;
- credit_arg = wait;
- }
-
-retry:
- pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg);
- if (!pbuf) {
- if (trycount == 0) {
- /* force a credit return and try again */
- sc_return_credits(sc);
- trycount = 1;
- goto retry;
- }
- /*
- * No send buffer means no credit callback. Undo
- * the wait set-up that was done above. We free wait
- * because the callback will never be called.
- */
- if (dp->flags & F_DIAGPKT_WAIT) {
- sc_del_credit_return_intr(sc);
- kfree(wait);
- wait = NULL;
- }
- ret = -ENOSPC;
- goto bail;
- }
-
- pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len);
- /* no flush needed as the HW knows the packet size */
-
- ret = sizeof(*dp);
-
- if (dp->flags & F_DIAGPKT_WAIT) {
- /* wait for credit return */
- ret = wait_for_completion_interruptible(
- &wait->credits_returned);
- /*
- * If the wait returns an error, the wait was interrupted,
- * e.g. with a ^C in the user program. The callback is
- * still pending. This is OK as the wait structure is
- * kmalloc'ed and the structure will free itself when
- * all users are done with it.
- *
- * A context disable occurs on a send context restart, so
- * include that in the list of errors below to check for.
- * NOTE: PRC_FILL_ERR is at best informational and cannot
- * be depended on.
- */
- if (!ret && (((wait->code & PRC_STATUS_ERR) ||
- (wait->code & PRC_FILL_ERR) ||
- (wait->code & PRC_SC_DISABLE))))
- ret = -EIO;
-
- put_diagpkt_wait(wait); /* finished with the structure */
- sc_del_credit_return_intr(sc);
- }
-
-bail:
- vfree(tmpbuf);
- return ret;
-}
-
-static ssize_t diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct hfi1_devdata *dd;
- struct send_context *sc;
- u8 vl;
-
- struct diag_pkt dp;
-
- if (count != sizeof(dp))
- return -EINVAL;
-
- if (copy_from_user(&dp, data, sizeof(dp)))
- return -EFAULT;
-
- /*
- * The Send Context is derived from the PbcVL value
- * if PBC is populated
- */
- if (dp.pbc) {
- dd = hfi1_lookup(dp.unit);
- if (!dd)
- return -ENODEV;
- vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
- sc = dd->vld[vl].sc;
- if (sc) {
- dp.sw_index = sc->sw_index;
- hfi1_cdbg(
- PKT,
- "Packet sent over VL %d via Send Context %u(%u)",
- vl, sc->sw_index, sc->hw_context);
- }
- }
-
- return diagpkt_send(&dp);
-}
-
-static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
-{
- int ret = 0;
-
- dd->hfi1_snoop.mode_flag = 0;
- spin_lock_init(&dd->hfi1_snoop.snoop_lock);
- INIT_LIST_HEAD(&dd->hfi1_snoop.queue);
- init_waitqueue_head(&dd->hfi1_snoop.waitq);
-
- ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
- &snoop_file_ops,
- &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
- false);
-
- if (ret) {
- dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
- hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev,
- &dd->hfi1_snoop.class_dev);
- }
-
- return ret;
-}
-
-static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in)
-{
- int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE;
- struct hfi1_devdata *dd;
-
- dd = hfi1_lookup(unit);
- return dd;
-}
-
-/* clear or restore send context integrity checks */
-static void adjust_integrity_checks(struct hfi1_devdata *dd)
-{
- struct send_context *sc;
- unsigned long sc_flags;
- int i;
-
- spin_lock_irqsave(&dd->sc_lock, sc_flags);
- for (i = 0; i < dd->num_send_contexts; i++) {
- int enable;
-
- sc = dd->send_contexts[i].sc;
-
- if (!sc)
- continue; /* not allocated */
-
- enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
- dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE;
-
- set_pio_integrity(sc);
-
- if (enable) /* take HFI_CAP_* flags into account */
- hfi1_init_ctxt(sc);
- }
- spin_unlock_irqrestore(&dd->sc_lock, sc_flags);
-}
-
-static int hfi1_snoop_open(struct inode *in, struct file *fp)
-{
- int ret;
- int mode_flag = 0;
- unsigned long flags = 0;
- struct hfi1_devdata *dd;
- struct list_head *queue;
-
- mutex_lock(&hfi1_mutex);
-
- dd = hfi1_dd_from_sc_inode(in);
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- /*
- * File mode determines snoop or capture. Some existing user
- * applications expect the capture device to be able to be opened RDWR
- * because they expect a dedicated capture device. For this reason we
- * support a module param to force capture mode even if the file open
- * mode matches snoop.
- */
- if ((fp->f_flags & O_ACCMODE) == O_RDONLY) {
- snoop_dbg("Capture Enabled");
- mode_flag = HFI1_PORT_CAPTURE_MODE;
- } else if ((fp->f_flags & O_ACCMODE) == O_RDWR) {
- snoop_dbg("Snoop Enabled");
- mode_flag = HFI1_PORT_SNOOP_MODE;
- } else {
- snoop_dbg("Invalid");
- ret = -EINVAL;
- goto bail;
- }
- queue = &dd->hfi1_snoop.queue;
-
- /*
- * We are not supporting snoop and capture at the same time.
- */
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (dd->hfi1_snoop.mode_flag) {
- ret = -EBUSY;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- goto bail;
- }
-
- dd->hfi1_snoop.mode_flag = mode_flag;
- drain_snoop_list(queue);
-
- dd->hfi1_snoop.filter_callback = NULL;
- dd->hfi1_snoop.filter_value = NULL;
-
- /*
- * Send side packet integrity checks are not helpful when snooping so
- * disable and re-enable when we stop snooping.
- */
- if (mode_flag == HFI1_PORT_SNOOP_MODE) {
- /* clear after snoop mode is on */
- adjust_integrity_checks(dd); /* clear */
-
- /*
- * We also do not want to be doing the DLID LMC check for
- * ingressed packets.
- */
- dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1);
- write_csr(dd, DCC_CFG_PORT_CONFIG1,
- (dd->hfi1_snoop.dcc_cfg >> 32) << 32);
- }
-
- /*
- * As soon as we set these function pointers the recv and send handlers
- * are active. This is a race condition so we must make sure to drain
- * the queue and init filter values above. Technically we should add
- * locking here but all that will happen is on recv a packet will get
- * allocated and get stuck on the snoop_lock before getting added to the
- * queue. Same goes for send.
- */
- dd->rhf_rcv_function_map = snoop_rhf_rcv_functions;
- dd->process_pio_send = snoop_send_pio_handler;
- dd->process_dma_send = snoop_send_pio_handler;
- dd->pio_inline_send = snoop_inline_pio_send;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- ret = 0;
-
-bail:
- mutex_unlock(&hfi1_mutex);
-
- return ret;
-}
-
-static int hfi1_snoop_release(struct inode *in, struct file *fp)
-{
- unsigned long flags = 0;
- struct hfi1_devdata *dd;
- int mode_flag;
-
- dd = hfi1_dd_from_sc_inode(in);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- /* clear the snoop mode before re-adjusting send context CSRs */
- mode_flag = dd->hfi1_snoop.mode_flag;
- dd->hfi1_snoop.mode_flag = 0;
-
- /*
- * Drain the queue and clear the filters we are done with it. Don't
- * forget to restore the packet integrity checks
- */
- drain_snoop_list(&dd->hfi1_snoop.queue);
- if (mode_flag == HFI1_PORT_SNOOP_MODE) {
- /* restore after snoop mode is clear */
- adjust_integrity_checks(dd); /* restore */
-
- /*
- * Also should probably reset the DCC_CONFIG1 register for DLID
- * checking on incoming packets again. Use the value saved when
- * opening the snoop device.
- */
- write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg);
- }
-
- dd->hfi1_snoop.filter_callback = NULL;
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = NULL;
-
- /*
- * User is done snooping and capturing, return control to the normal
- * handler. Re-enable SDMA handling.
- */
- dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
- dd->process_pio_send = hfi1_verbs_send_pio;
- dd->process_dma_send = hfi1_verbs_send_dma;
- dd->pio_inline_send = pio_copy;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-
- snoop_dbg("snoop/capture device released");
-
- return 0;
-}
-
-static unsigned int hfi1_snoop_poll(struct file *fp,
- struct poll_table_struct *wait)
-{
- int ret = 0;
- unsigned long flags = 0;
-
- struct hfi1_devdata *dd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- poll_wait(fp, &dd->hfi1_snoop.waitq, wait);
- if (!list_empty(&dd->hfi1_snoop.queue))
- ret |= POLLIN | POLLRDNORM;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- return ret;
-}
-
-static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct diag_pkt dpkt;
- struct hfi1_devdata *dd;
- size_t ret;
- u8 byte_two, sl, sc5, sc4, vl, byte_one;
- struct send_context *sc;
- u32 len;
- u64 pbc;
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- ppd = dd->pport;
- snoop_dbg("received %lu bytes from user", count);
-
- memset(&dpkt, 0, sizeof(struct diag_pkt));
- dpkt.version = _DIAG_PKT_VERS;
- dpkt.unit = dd->unit;
- dpkt.port = 1;
-
- if (likely(!(snoop_flags & SNOOP_USE_METADATA))) {
- /*
- * We need to generate the PBC and not let diagpkt_send do it,
- * to do this we need the VL and the length in dwords.
- * The VL can be determined by using the SL and looking up the
- * SC. Then the SC can be converted into VL. The exception to
- * this is those packets which are from an SMI queue pair.
- * Since we can't detect anything about the QP here we have to
- * rely on the SC. If its 0xF then we assume its SMI and
- * do not look at the SL.
- */
- if (copy_from_user(&byte_one, data, 1))
- return -EINVAL;
-
- if (copy_from_user(&byte_two, data + 1, 1))
- return -EINVAL;
-
- sc4 = (byte_one >> 4) & 0xf;
- if (sc4 == 0xF) {
- snoop_dbg("Detected VL15 packet ignoring SL in packet");
- vl = sc4;
- } else {
- sl = (byte_two >> 4) & 0xf;
- ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1);
- sc5 = ibp->sl_to_sc[sl];
- vl = sc_to_vlt(dd, sc5);
- if (vl != sc4) {
- snoop_dbg("VL %d does not match SC %d of packet",
- vl, sc4);
- return -EINVAL;
- }
- }
-
- sc = dd->vld[vl].sc; /* Look up the context based on VL */
- if (sc) {
- dpkt.sw_index = sc->sw_index;
- snoop_dbg("Sending on context %u(%u)", sc->sw_index,
- sc->hw_context);
- } else {
- snoop_dbg("Could not find context for vl %d", vl);
- return -EINVAL;
- }
-
- len = (count >> 2) + 2; /* Add in PBC */
- pbc = create_pbc(ppd, 0, 0, vl, len);
- } else {
- if (copy_from_user(&pbc, data, sizeof(pbc)))
- return -EINVAL;
- vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
- sc = dd->vld[vl].sc; /* Look up the context based on VL */
- if (sc) {
- dpkt.sw_index = sc->sw_index;
- } else {
- snoop_dbg("Could not find context for vl %d", vl);
- return -EINVAL;
- }
- data += sizeof(pbc);
- count -= sizeof(pbc);
- }
- dpkt.len = count;
- dpkt.data = (unsigned long)data;
-
- snoop_dbg("PBC: vl=0x%llx Length=0x%llx",
- (pbc >> 12) & 0xf,
- (pbc & 0xfff));
-
- dpkt.pbc = pbc;
- ret = diagpkt_send(&dpkt);
- /*
- * diagpkt_send only returns number of bytes in the diagpkt so patch
- * that up here before returning.
- */
- if (ret == sizeof(dpkt))
- return count;
-
- return ret;
-}
-
-static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
- size_t pkt_len, loff_t *off)
-{
- ssize_t ret = 0;
- unsigned long flags = 0;
- struct snoop_packet *packet = NULL;
- struct hfi1_devdata *dd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- while (list_empty(&dd->hfi1_snoop.queue)) {
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-
- if (fp->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- if (wait_event_interruptible(
- dd->hfi1_snoop.waitq,
- !list_empty(&dd->hfi1_snoop.queue)))
- return -EINTR;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- }
-
- if (!list_empty(&dd->hfi1_snoop.queue)) {
- packet = list_entry(dd->hfi1_snoop.queue.next,
- struct snoop_packet, list);
- list_del(&packet->list);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- if (pkt_len >= packet->total_len) {
- if (copy_to_user(data, packet->data,
- packet->total_len))
- ret = -EFAULT;
- else
- ret = packet->total_len;
- } else {
- ret = -EINVAL;
- }
-
- kfree(packet);
- } else {
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- }
-
- return ret;
-}
-
-/**
- * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others
- * @ppd : ptr to hfi1 port data
- * @value : options from user space
- *
- * Assumes the rest of the CM credit registers are zero from a
- * previous global or credit reset.
- * Leave shared count at zero for both global and all vls.
- * In snoop mode ideally we don't use shared credits
- * Reserve 8.5k for VL15
- * If total credits less than 8.5kbytes return error.
- * Divide the rest of the credits across VL0 to VL7 and if
- * each of these levels has less than 34 credits (at least 2048 + 128 bytes)
- * return with an error.
- * The credit registers will be reset to zero on link negotiation or link up
- * so this function should be activated from user space only if the port has
- * gone past link negotiation and link up.
- *
- * Return -- 0 if successful else error condition
- *
- */
-static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd,
- int value)
-{
-#define OPA_MIN_PER_VL_CREDITS 34 /* 2048 + 128 bytes */
- struct buffer_control t;
- int i;
- struct hfi1_devdata *dd = ppd->dd;
- u16 total_credits = (value >> 16) & 0xffff;
- u16 vl15_credits = dd->vl15_init / 2;
- u16 per_vl_credits;
- __be16 be_per_vl_credits;
-
- if (!(ppd->host_link_state & HLS_UP))
- goto err_exit;
- if (total_credits < vl15_credits)
- goto err_exit;
-
- per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL;
-
- if (per_vl_credits < OPA_MIN_PER_VL_CREDITS)
- goto err_exit;
-
- memset(&t, 0, sizeof(t));
- be_per_vl_credits = cpu_to_be16(per_vl_credits);
-
- for (i = 0; i < TXE_NUM_DATA_VL; i++)
- t.vl[i].dedicated = be_per_vl_credits;
-
- t.vl[15].dedicated = cpu_to_be16(vl15_credits);
- return set_buffer_control(ppd, &t);
-
-err_exit:
- snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d",
- ppd->host_link_state, total_credits, vl15_credits);
-
- return -EINVAL;
-}
-
-static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- struct hfi1_devdata *dd;
- void *filter_value = NULL;
- long ret = 0;
- int value = 0;
- u8 phys_state = 0;
- u8 link_state = 0;
- u16 dev_state = 0;
- unsigned long flags = 0;
- unsigned long *argp = NULL;
- struct hfi1_packet_filter_command filter_cmd = {0};
- int mode_flag = 0;
- struct hfi1_pportdata *ppd = NULL;
- unsigned int index;
- struct hfi1_link_info link_info;
- int read_cmd, write_cmd, read_ok, write_ok;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- mode_flag = dd->hfi1_snoop.mode_flag;
- read_cmd = _IOC_DIR(cmd) & _IOC_READ;
- write_cmd = _IOC_DIR(cmd) & _IOC_WRITE;
- write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
- read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
-
- if ((read_cmd && !write_ok) || (write_cmd && !read_ok))
- return -EFAULT;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
- (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
- (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
- (cmd != HFI1_SNOOP_IOCSETFILTER))
- /* Capture devices are allowed only 3 operations
- * 1.Clear capture queue
- * 2.Clear capture filter
- * 3.Set capture filter
- * Other are invalid.
- */
- return -EINVAL;
-
- switch (cmd) {
- case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
- memset(&link_info, 0, sizeof(link_info));
-
- if (copy_from_user(&link_info,
- (struct hfi1_link_info __user *)arg,
- sizeof(link_info)))
- return -EFAULT;
-
- value = link_info.port_state;
- index = link_info.port_number;
- if (index > dd->num_pports - 1)
- return -EINVAL;
-
- ppd = &dd->pport[index];
- if (!ppd)
- return -EINVAL;
-
- /* What we want to transition to */
- phys_state = (value >> 4) & 0xF;
- link_state = value & 0xF;
- snoop_dbg("Setting link state 0x%x", value);
-
- switch (link_state) {
- case IB_PORT_NOP:
- if (phys_state == 0)
- break;
- /* fall through */
- case IB_PORT_DOWN:
- switch (phys_state) {
- case 0:
- dev_state = HLS_DN_DOWNDEF;
- break;
- case 2:
- dev_state = HLS_DN_POLL;
- break;
- case 3:
- dev_state = HLS_DN_DISABLE;
- break;
- default:
- return -EINVAL;
- }
- ret = set_link_state(ppd, dev_state);
- break;
- case IB_PORT_ARMED:
- ret = set_link_state(ppd, HLS_UP_ARMED);
- if (!ret)
- send_idle_sma(dd, SMA_IDLE_ARM);
- break;
- case IB_PORT_ACTIVE:
- ret = set_link_state(ppd, HLS_UP_ACTIVE);
- if (!ret)
- send_idle_sma(dd, SMA_IDLE_ACTIVE);
- break;
- default:
- return -EINVAL;
- }
-
- if (ret)
- break;
- /* fall through */
- case HFI1_SNOOP_IOCGETLINKSTATE:
- case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
- if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
- memset(&link_info, 0, sizeof(link_info));
- if (copy_from_user(&link_info,
- (struct hfi1_link_info __user *)arg,
- sizeof(link_info)))
- return -EFAULT;
- index = link_info.port_number;
- } else {
- ret = __get_user(index, (int __user *)arg);
- if (ret != 0)
- break;
- }
-
- if (index > dd->num_pports - 1)
- return -EINVAL;
-
- ppd = &dd->pport[index];
- if (!ppd)
- return -EINVAL;
-
- value = hfi1_ibphys_portstate(ppd);
- value <<= 4;
- value |= driver_lstate(ppd);
-
- snoop_dbg("Link port | Link State: %d", value);
-
- if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
- (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
- link_info.port_state = value;
- link_info.node_guid = cpu_to_be64(ppd->guid);
- link_info.link_speed_active =
- ppd->link_speed_active;
- link_info.link_width_active =
- ppd->link_width_active;
- if (copy_to_user((struct hfi1_link_info __user *)arg,
- &link_info, sizeof(link_info)))
- return -EFAULT;
- } else {
- ret = __put_user(value, (int __user *)arg);
- }
- break;
-
- case HFI1_SNOOP_IOCCLEARQUEUE:
- snoop_dbg("Clearing snoop queue");
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
-
- case HFI1_SNOOP_IOCCLEARFILTER:
- snoop_dbg("Clearing filter");
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (dd->hfi1_snoop.filter_callback) {
- /* Drain packets first */
- drain_snoop_list(&dd->hfi1_snoop.queue);
- dd->hfi1_snoop.filter_callback = NULL;
- }
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = NULL;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
-
- case HFI1_SNOOP_IOCSETFILTER:
- snoop_dbg("Setting filter");
- /* just copy command structure */
- argp = (unsigned long *)arg;
- if (copy_from_user(&filter_cmd, (void __user *)argp,
- sizeof(filter_cmd)))
- return -EFAULT;
-
- if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
- pr_alert("Invalid opcode in request\n");
- return -EINVAL;
- }
-
- snoop_dbg("Opcode %d Len %d Ptr %p",
- filter_cmd.opcode, filter_cmd.length,
- filter_cmd.value_ptr);
-
- filter_value = kcalloc(filter_cmd.length, sizeof(u8),
- GFP_KERNEL);
- if (!filter_value)
- return -ENOMEM;
-
- /* copy remaining data from userspace */
- if (copy_from_user((u8 *)filter_value,
- (void __user *)filter_cmd.value_ptr,
- filter_cmd.length)) {
- kfree(filter_value);
- return -EFAULT;
- }
- /* Drain packets first */
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- dd->hfi1_snoop.filter_callback =
- hfi1_filters[filter_cmd.opcode].filter;
- /* just in case we see back to back sets */
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = filter_value;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
- case HFI1_SNOOP_IOCGETVERSION:
- value = SNOOP_CAPTURE_VERSION;
- snoop_dbg("Getting version: %d", value);
- ret = __put_user(value, (int __user *)arg);
- break;
- case HFI1_SNOOP_IOCSET_OPTS:
- snoop_flags = 0;
- ret = __get_user(value, (int __user *)arg);
- if (ret != 0)
- break;
-
- snoop_dbg("Setting snoop option %d", value);
- if (value & SNOOP_DROP_SEND)
- snoop_flags |= SNOOP_DROP_SEND;
- if (value & SNOOP_USE_METADATA)
- snoop_flags |= SNOOP_USE_METADATA;
- if (value & (SNOOP_SET_VL0TOVL15)) {
- ppd = &dd->pport[0]; /* first port will do */
- ret = hfi1_assign_snoop_link_credits(ppd, value);
- }
- break;
- default:
- return -ENOTTY;
- }
-
- return ret;
-}
-
-static void snoop_list_add_tail(struct snoop_packet *packet,
- struct hfi1_devdata *dd)
-{
- unsigned long flags = 0;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) ||
- (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) {
- list_add_tail(&packet->list, &dd->hfi1_snoop.queue);
- snoop_dbg("Added packet to list");
- }
-
- /*
- * Technically we can could have closed the snoop device while waiting
- * on the above lock and it is gone now. The snoop mode_flag will
- * prevent us from adding the packet to the queue though.
- */
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- wake_up_interruptible(&dd->hfi1_snoop.waitq);
-}
-
-static inline int hfi1_filter_check(void *val, const char *msg)
-{
- if (!val) {
- snoop_dbg("Error invalid %s value for filter", msg);
- return HFI1_FILTER_ERR;
- }
- return 0;
-}
-
-static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */
- return HFI1_FILTER_HIT; /* matched */
-
- return HFI1_FILTER_MISS; /* Not matched */
-}
-
-static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1]))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-/* Not valid for outgoing packets, send handler passes null for data*/
-static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
- void *value)
-{
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- struct ib_smp *smp = NULL;
- u32 qpn = 0;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(packet_data, "packet_data");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- /* Check for GRH */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
- else
- ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
-
- qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF;
- if (qpn <= 1) {
- smp = (struct ib_smp *)packet_data;
- if (*((u8 *)value) == smp->mgmt_class)
- return HFI1_FILTER_HIT;
- else
- return HFI1_FILTER_MISS;
- }
- return HFI1_FILTER_ERR;
-}
-
-static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- /* Check for GRH */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
- else
- ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
- if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
- void *value)
-{
- u32 lnh = 0;
- u8 opcode = 0;
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
-
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- return HFI1_FILTER_ERR;
-
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-
- if (*((u8 *)value) == ((opcode >> 5) & 0x7))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
- void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value)
-{
- u32 lnh = 0;
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- return HFI1_FILTER_ERR;
-
- /* P_key is 16-bit entity, however top most bit indicates
- * type of membership. 0 for limited and 1 for Full.
- * Limited members cannot accept information from other
- * Limited members, but communication is allowed between
- * every other combination of membership.
- * Hence we'll omit comparing top-most bit while filtering
- */
-
- if ((*(u16 *)value & 0x7FFF) ==
- ((be32_to_cpu(ohdr->bth[0])) & 0x7FFF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-/*
- * If packet_data is NULL then this is coming from one of the send functions.
- * Thus we know if its an ingressed or egressed packet.
- */
-static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value)
-{
- u8 user_dir = *(u8 *)value;
- int ret;
-
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- if (packet_data) {
- /* Incoming packet */
- if (user_dir & HFI1_SNOOP_INGRESS)
- return HFI1_FILTER_HIT;
- } else {
- /* Outgoing packet */
- if (user_dir & HFI1_SNOOP_EGRESS)
- return HFI1_FILTER_HIT;
- }
-
- return HFI1_FILTER_MISS;
-}
-
-/*
- * Allocate a snoop packet. The structure that is stored in the ring buffer, not
- * to be confused with an hfi packet type.
- */
-static struct snoop_packet *allocate_snoop_packet(u32 hdr_len,
- u32 data_len,
- u32 md_len)
-{
- struct snoop_packet *packet;
-
- packet = kzalloc(sizeof(*packet) + hdr_len + data_len
- + md_len,
- GFP_ATOMIC | __GFP_NOWARN);
- if (likely(packet))
- INIT_LIST_HEAD(&packet->list);
-
- return packet;
-}
-
-/*
- * Instead of having snoop and capture code intermixed with the recv functions,
- * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call
- * and land in here for snoop/capture but if not enabled the call will go
- * through as before. This gives us a single point to constrain all of the snoop
- * snoop recv logic. There is nothing special that needs to happen for bypass
- * packets. This routine should not try to look into the packet. It just copied
- * it. There is no guarantee for filters when it comes to bypass packets as
- * there is no specific support. Bottom line is this routine does now even know
- * what a bypass packet is.
- */
-int snoop_recv_handler(struct hfi1_packet *packet)
-{
- struct hfi1_pportdata *ppd = packet->rcd->ppd;
- struct hfi1_ib_header *hdr = packet->hdr;
- int header_size = packet->hlen;
- void *data = packet->ebuf;
- u32 tlen = packet->tlen;
- struct snoop_packet *s_packet = NULL;
- int ret;
- int snoop_mode = 0;
- u32 md_len = 0;
- struct capture_md md;
-
- snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen,
- data);
-
- trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size,
- data);
-
- if (!ppd->dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data,
- ppd->dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- break;
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- break;
- case HFI1_FILTER_HIT:
-
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- s_packet = allocate_snoop_packet(header_size,
- tlen - header_size,
- md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
- break;
- }
-
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_INGRESS;
- md.u.rhf = packet->rhf;
- memcpy(s_packet->data, &md, md_len);
- }
-
- /* We should always have a header */
- if (hdr) {
- memcpy(s_packet->data + md_len, hdr, header_size);
- } else {
- dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n");
- kfree(s_packet);
- break;
- }
-
- /*
- * Packets with no data are possible. If there is no data needed
- * to take care of the last 4 bytes which are normally included
- * with data buffers and are included in tlen. Since we kzalloc
- * the buffer we do not need to set any values but if we decide
- * not to use kzalloc we should zero them.
- */
- if (data)
- memcpy(s_packet->data + header_size + md_len, data,
- tlen - header_size);
-
- s_packet->total_len = tlen + md_len;
- snoop_list_add_tail(s_packet, ppd->dd);
-
- /*
- * If we are snooping the packet not capturing then throw away
- * after adding to the list.
- */
- snoop_dbg("Capturing packet");
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) {
- snoop_dbg("Throwing packet away");
- /*
- * If we are dropping the packet we still may need to
- * handle the case where error flags are set, this is
- * normally done by the type specific handler but that
- * won't be called in this case.
- */
- if (unlikely(rhf_err_flags(packet->rhf)))
- handle_eflags(packet);
-
- /* throw the packet on the floor */
- return RHF_RCV_CONTINUE;
- }
- break;
- default:
- break;
- }
-
- /*
- * We do not care what type of packet came in here - just pass it off
- * to the normal handler.
- */
- return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)]
- (packet);
-}
-
-/*
- * Handle snooping and capturing packets when sdma is being used.
- */
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc)
-{
- pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
- snoop_dbg("Unsupported Operation");
- return hfi1_verbs_send_dma(qp, ps, 0);
-}
-
-/*
- * Handle snooping and capturing packets when pio is being used. Does not handle
- * bypass packets. The only way to send a bypass packet currently is to use the
- * diagpkt interface. When that interface is enable snoop/capture is not.
- */
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc)
-{
- u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
- u32 dwords = (len + 3) >> 2;
- u32 plen = hdrwords + dwords + 2; /* includes pbc */
- struct hfi1_pportdata *ppd = ps->ppd;
- struct snoop_packet *s_packet = NULL;
- u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
- u32 length = 0;
- struct rvt_sge_state temp_ss;
- void *data = NULL;
- void *data_start = NULL;
- int ret;
- int snoop_mode = 0;
- int md_len = 0;
- struct capture_md md;
- u32 vl;
- u32 hdr_len = hdrwords << 2;
- u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr);
-
- md.u.pbc = 0;
-
- snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u",
- hdrwords, len, plen, dwords, tlen);
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- /* not using ss->total_len as arg 2 b/c that does not count CRC */
- s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
- goto out;
- }
-
- s_packet->total_len = tlen + md_len;
-
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_EGRESS;
- if (likely(pbc == 0)) {
- vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12;
- md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen);
- } else {
- md.u.pbc = 0;
- }
- memcpy(s_packet->data, &md, md_len);
- } else {
- md.u.pbc = pbc;
- }
-
- /* Copy header */
- if (likely(hdr)) {
- memcpy(s_packet->data + md_len, hdr, hdr_len);
- } else {
- dd_dev_err(ppd->dd,
- "Unable to copy header to snoop/capture packet\n");
- kfree(s_packet);
- goto out;
- }
-
- if (ss) {
- data = s_packet->data + hdr_len + md_len;
- data_start = data;
-
- /*
- * Copy SGE State
- * The update_sge() function below will not modify the
- * individual SGEs in the array. It will make a copy each time
- * and operate on that. So we only need to copy this instance
- * and it won't impact PIO.
- */
- temp_ss = *ss;
- length = len;
-
- snoop_dbg("Need to copy %d bytes", length);
- while (length) {
- void *addr = temp_ss.sge.vaddr;
- u32 slen = temp_ss.sge.length;
-
- if (slen > length) {
- slen = length;
- snoop_dbg("slen %d > len %d", slen, length);
- }
- snoop_dbg("copy %d to %p", slen, addr);
- memcpy(data, addr, slen);
- update_sge(&temp_ss, slen);
- length -= slen;
- data += slen;
- snoop_dbg("data is now %p bytes left %d", data, length);
- }
- snoop_dbg("Completed SGE copy");
- }
-
- /*
- * Why do the filter check down here? Because the event tracing has its
- * own filtering and we need to have the walked the SGE list.
- */
- if (!ppd->dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set\n");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = ppd->dd->hfi1_snoop.filter_callback(
- &ps->s_txreq->phdr.hdr,
- NULL,
- ppd->dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- /* fall through */
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- kfree(s_packet);
- break;
- case HFI1_FILTER_HIT:
- snoop_dbg("Capturing packet");
- snoop_list_add_tail(s_packet, ppd->dd);
-
- if (unlikely((snoop_flags & SNOOP_DROP_SEND) &&
- (ppd->dd->hfi1_snoop.mode_flag &
- HFI1_PORT_SNOOP_MODE))) {
- unsigned long flags;
-
- snoop_dbg("Dropping packet");
- if (qp->s_wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(
- qp,
- qp->s_wqe,
- IB_WC_SUCCESS);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else if (qp->ibqp.qp_type == IB_QPT_RC) {
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_rc_send_complete(qp,
- &ps->s_txreq->phdr.hdr);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
-
- /*
- * If snoop is dropping the packet we need to put the
- * txreq back because no one else will.
- */
- hfi1_put_txreq(ps->s_txreq);
- return 0;
- }
- break;
- default:
- kfree(s_packet);
- break;
- }
-out:
- return hfi1_verbs_send_pio(qp, ps, md.u.pbc);
-}
-
-/*
- * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently
- * this can be used anywhere, but the intention is for inline ACKs for RC and
- * CCA packets. We don't restrict this usage though.
- */
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
- u64 pbc, const void *from, size_t count)
-{
- int snoop_mode = 0;
- int md_len = 0;
- struct capture_md md;
- struct snoop_packet *s_packet = NULL;
-
- /*
- * count is in dwords so we need to convert to bytes.
- * We also need to account for CRC which would be tacked on by hardware.
- */
- int packet_len = (count << 2) + 4;
- int ret;
-
- snoop_dbg("ACK OUT: len %d", packet_len);
-
- if (!dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = dd->hfi1_snoop.filter_callback(
- (struct hfi1_ib_header *)from,
- NULL,
- dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- /* fall through */
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- break;
- case HFI1_FILTER_HIT:
- snoop_dbg("Capturing packet");
- if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- s_packet = allocate_snoop_packet(packet_len, 0, md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n");
- goto inline_pio_out;
- }
-
- s_packet->total_len = packet_len + md_len;
-
- /* Fill in the metadata for the packet */
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_EGRESS;
- md.u.pbc = pbc;
- memcpy(s_packet->data, &md, md_len);
- }
-
- /* Add the packet data which is a single buffer */
- memcpy(s_packet->data + md_len, from, packet_len);
-
- snoop_list_add_tail(s_packet, dd);
-
- if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) {
- snoop_dbg("Dropping packet");
- return;
- }
- break;
- default:
- break;
- }
-
-inline_pio_out:
- pio_copy(dd, pbuf, pbc, from, count);
-}
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c
deleted file mode 100644
index bd8771570..000000000
--- a/drivers/staging/rdma/hfi1/eprom.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/delay.h>
-#include "hfi.h"
-#include "common.h"
-#include "eprom.h"
-
-/*
- * The EPROM is logically divided into three partitions:
- * partition 0: the first 128K, visible from PCI ROM BAR
- * partition 1: 4K config file (sector size)
- * partition 2: the rest
- */
-#define P0_SIZE (128 * 1024)
-#define P1_SIZE (4 * 1024)
-#define P1_START P0_SIZE
-#define P2_START (P0_SIZE + P1_SIZE)
-
-/* erase sizes supported by the controller */
-#define SIZE_4KB (4 * 1024)
-#define MASK_4KB (SIZE_4KB - 1)
-
-#define SIZE_32KB (32 * 1024)
-#define MASK_32KB (SIZE_32KB - 1)
-
-#define SIZE_64KB (64 * 1024)
-#define MASK_64KB (SIZE_64KB - 1)
-
-/* controller page size, in bytes */
-#define EP_PAGE_SIZE 256
-#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1)
-
-/* controller commands */
-#define CMD_SHIFT 24
-#define CMD_NOP (0)
-#define CMD_PAGE_PROGRAM(addr) ((0x02 << CMD_SHIFT) | addr)
-#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
-#define CMD_READ_SR1 ((0x05 << CMD_SHIFT))
-#define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT))
-#define CMD_SECTOR_ERASE_4KB(addr) ((0x20 << CMD_SHIFT) | addr)
-#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr)
-#define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT))
-#define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT))
-#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
-#define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr)
-
-/* controller interface speeds */
-#define EP_SPEED_FULL 0x2 /* full speed */
-
-/* controller status register 1 bits */
-#define SR1_BUSY 0x1ull /* the BUSY bit in SR1 */
-
-/* sleep length while waiting for controller */
-#define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */
-#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US))
-
-/* GPIO pins */
-#define EPROM_WP_N BIT_ULL(14) /* EPROM write line */
-
-/*
- * How long to wait for the EPROM to become available, in ms.
- * The spec 32 Mb EPROM takes around 40s to erase then write.
- * Double it for safety.
- */
-#define EPROM_TIMEOUT 80000 /* ms */
-
-/*
- * Turn on external enable line that allows writing on the flash.
- */
-static void write_enable(struct hfi1_devdata *dd)
-{
- /* raise signal */
- write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
- /* raise enable */
- write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
-}
-
-/*
- * Turn off external enable line that allows writing on the flash.
- */
-static void write_disable(struct hfi1_devdata *dd)
-{
- /* lower signal */
- write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
- /* lower enable */
- write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
-}
-
-/*
- * Wait for the device to become not busy. Must be called after all
- * write or erase operations.
- */
-static int wait_for_not_busy(struct hfi1_devdata *dd)
-{
- unsigned long count = 0;
- u64 reg;
- int ret = 0;
-
- /* starts page mode */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1);
- while (1) {
- udelay(WAIT_SLEEP_US);
- usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5);
- count++;
- reg = read_csr(dd, ASIC_EEP_DATA);
- if ((reg & SR1_BUSY) == 0)
- break;
- /* 200s is the largest time for a 128Mb device */
- if (count > COUNT_DELAY_SEC(200)) {
- dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n");
- ret = -ETIMEDOUT;
- break; /* break, not goto - must stop page mode */
- }
- }
-
- /* stop page mode with a NOP */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP);
-
- return ret;
-}
-
-/*
- * Read the device ID from the SPI controller.
- */
-static u32 read_device_id(struct hfi1_devdata *dd)
-{
- /* read the Manufacture Device ID */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID);
- return (u32)read_csr(dd, ASIC_EEP_DATA);
-}
-
-/*
- * Erase the whole flash.
- */
-static int erase_chip(struct hfi1_devdata *dd)
-{
- int ret;
-
- write_enable(dd);
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE);
- ret = wait_for_not_busy(dd);
-
- write_disable(dd);
-
- return ret;
-}
-
-/*
- * Erase a range.
- */
-static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
-{
- u32 end = start + len;
- int ret = 0;
-
- if (end < start)
- return -EINVAL;
-
- /* check the end points for the minimum erase */
- if ((start & MASK_4KB) || (end & MASK_4KB)) {
- dd_dev_err(dd,
- "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
- __func__, start, end);
- return -EINVAL;
- }
-
- write_enable(dd);
-
- while (start < end) {
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- /* check in order of largest to smallest */
- if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) {
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_64KB(start));
- start += SIZE_64KB;
- } else if (((start & MASK_32KB) == 0) &&
- (start + SIZE_32KB <= end)) {
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_32KB(start));
- start += SIZE_32KB;
- } else { /* 4KB will work */
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_4KB(start));
- start += SIZE_4KB;
- }
- ret = wait_for_not_busy(dd);
- if (ret)
- goto done;
- }
-
-done:
- write_disable(dd);
-
- return ret;
-}
-
-/*
- * Read a 256 byte (64 dword) EPROM page.
- * All callers have verified the offset is at a page boundary.
- */
-static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
-{
- int i;
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
- for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++)
- result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
-}
-
-/*
- * Read length bytes starting at offset. Copy to user address addr.
- */
-static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
-{
- u32 offset;
- u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
- int ret = 0;
-
- /* reject anything not on an EPROM page boundary */
- if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
- return -EINVAL;
-
- for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
- read_page(dd, start + offset, buffer);
- if (copy_to_user((void __user *)(addr + offset),
- buffer, EP_PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
-/*
- * Write a 256 byte (64 dword) EPROM page.
- * All callers have verified the offset is at a page boundary.
- */
-static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
-{
- int i;
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- write_csr(dd, ASIC_EEP_DATA, data[0]);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset));
- for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++)
- write_csr(dd, ASIC_EEP_DATA, data[i]);
- /* will close the open page */
- return wait_for_not_busy(dd);
-}
-
-/*
- * Write length bytes starting at offset. Read from user address addr.
- */
-static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
-{
- u32 offset;
- u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
- int ret = 0;
-
- /* reject anything not on an EPROM page boundary */
- if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
- return -EINVAL;
-
- write_enable(dd);
-
- for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
- if (copy_from_user(buffer, (void __user *)(addr + offset),
- EP_PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- ret = write_page(dd, start + offset, buffer);
- if (ret)
- goto done;
- }
-
-done:
- write_disable(dd);
- return ret;
-}
-
-/* convert an range composite to a length, in bytes */
-static inline u32 extract_rlen(u32 composite)
-{
- return (composite & 0xffff) * EP_PAGE_SIZE;
-}
-
-/* convert an range composite to a start, in bytes */
-static inline u32 extract_rstart(u32 composite)
-{
- return (composite >> 16) * EP_PAGE_SIZE;
-}
-
-/*
- * Perform the given operation on the EPROM. Called from user space. The
- * user credentials have already been checked.
- *
- * Return 0 on success, -ERRNO on error
- */
-int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
-{
- struct hfi1_devdata *dd;
- u32 dev_id;
- u32 rlen; /* range length */
- u32 rstart; /* range start */
- int i_minor;
- int ret = 0;
-
- /*
- * Map the device file to device data using the relative minor.
- * The device file minor number is the unit number + 1. 0 is
- * the generic device file - reject it.
- */
- i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
- if (i_minor <= 0)
- return -EINVAL;
- dd = hfi1_lookup(i_minor - 1);
- if (!dd) {
- pr_err("%s: cannot find unit %d!\n", __func__, i_minor);
- return -EINVAL;
- }
-
- /* some devices do not have an EPROM */
- if (!dd->eprom_available)
- return -EOPNOTSUPP;
-
- ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
- if (ret) {
- dd_dev_err(dd, "%s: unable to acquire EPROM resource\n",
- __func__);
- goto done_asic;
- }
-
- dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
- __func__, cmd->type, cmd->len, cmd->addr);
-
- switch (cmd->type) {
- case HFI1_CMD_EP_INFO:
- if (cmd->len != sizeof(u32)) {
- ret = -ERANGE;
- break;
- }
- dev_id = read_device_id(dd);
- /* addr points to a u32 user buffer */
- if (copy_to_user((void __user *)cmd->addr, &dev_id,
- sizeof(u32)))
- ret = -EFAULT;
- break;
-
- case HFI1_CMD_EP_ERASE_CHIP:
- ret = erase_chip(dd);
- break;
-
- case HFI1_CMD_EP_ERASE_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = erase_range(dd, rstart, rlen);
- break;
-
- case HFI1_CMD_EP_READ_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = read_length(dd, rstart, rlen, cmd->addr);
- break;
-
- case HFI1_CMD_EP_WRITE_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = write_length(dd, rstart, rlen, cmd->addr);
- break;
-
- default:
- dd_dev_err(dd, "%s: unexpected command %d\n",
- __func__, cmd->type);
- ret = -EINVAL;
- break;
- }
-
- release_chip_resource(dd, CR_EPROM);
-done_asic:
- return ret;
-}
-
-/*
- * Initialize the EPROM handler.
- */
-int eprom_init(struct hfi1_devdata *dd)
-{
- int ret = 0;
-
- /* only the discrete chip has an EPROM */
- if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
- return 0;
-
- /*
- * It is OK if both HFIs reset the EPROM as long as they don't
- * do it at the same time.
- */
- ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
- if (ret) {
- dd_dev_err(dd,
- "%s: unable to acquire EPROM resource, no EPROM support\n",
- __func__);
- goto done_asic;
- }
-
- /* reset EPROM to be sure it is in a good state */
-
- /* set reset */
- write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
- /* clear reset, set speed */
- write_csr(dd, ASIC_EEP_CTL_STAT,
- EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
-
- /* wake the device with command "release powerdown NoID" */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
-
- dd->eprom_available = true;
- release_chip_resource(dd, CR_EPROM);
-done_asic:
- return ret;
-}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 012860b34..a5755358c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_AP_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index e5a6b7a70..77485235c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_CMD_C_
@@ -263,11 +258,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
}
@@ -350,7 +345,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
@@ -521,7 +516,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
/* prepare cmd parameter */
param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (param == NULL) {
+ if (!param) {
res = _FAIL;
goto exit;
}
@@ -530,7 +525,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
- if (cmdobj == NULL) {
+ if (!cmdobj) {
res = _FAIL;
kfree(param);
goto exit;
@@ -629,20 +624,20 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
clear_cam_entry(padapter, entry);
} else {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_ATOMIC);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_ATOMIC);
- if (psetstakey_rsp == NULL) {
+ if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
res = _FAIL;
@@ -676,13 +671,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL);
- if (paddbareq_parm == NULL) {
+ if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -713,13 +708,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -757,7 +752,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
/* prepare cmd parameter */
setChannelPlan_param = kzalloc(sizeof(struct SetChannelPlan_param), GFP_KERNEL);
- if (setChannelPlan_param == NULL) {
+ if (!setChannelPlan_param) {
res = _FAIL;
goto exit;
}
@@ -766,7 +761,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmdobj == NULL) {
+ if (!pcmdobj) {
kfree(setChannelPlan_param);
res = _FAIL;
goto exit;
@@ -925,13 +920,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
if (enqueue) {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -968,13 +963,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1010,13 +1005,13 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
if (enqueue) {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1108,13 +1103,13 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 93e898d59..db5c952ac 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_DEBUG_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 19f11d04d..fbce1f7e6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_EFUSE_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index f4e4baf60..0b0d78fe8 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _IEEE80211_C
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index cf60717a6..f85a6abec 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_IOCTL_SET_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index a645a620e..1456499b8 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_MLME_C_
@@ -1584,13 +1579,13 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
int res = _SUCCESS;
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL; /* try again */
goto exit;
}
psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
- if (psetauthparm == NULL) {
+ if (!psetauthparm) {
kfree(pcmd);
res = _FAIL;
goto exit;
@@ -1621,11 +1616,11 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
int res = _SUCCESS;
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL; /* try again */
psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
res = _FAIL;
goto err_free_cmd;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 591a9127b..7f32b39e5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_MLME_EXT_C_
@@ -606,8 +601,6 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, bool wait_ack)
@@ -888,8 +881,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
DBG_88E("%s\n", __func__);
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
@@ -1212,8 +1203,6 @@ exit:
rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
else
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
-
- return;
}
/* when wait_ack is true, this function should be called at process context */
@@ -2105,7 +2094,6 @@ static void site_survey(struct adapter *padapter)
issue_action_BSSCoexistPacket(padapter);
issue_action_BSSCoexistPacket(padapter);
}
- return;
}
/* collect bss info from Beacon and Probe request/response frames. */
@@ -4295,12 +4283,12 @@ void report_survey_event(struct adapter *padapter,
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct survey_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4332,8 +4320,6 @@ void report_survey_event(struct adapter *padapter,
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
pmlmeext->sitesurvey_res.bss_cnt++;
-
- return;
}
void report_surveydone_event(struct adapter *padapter)
@@ -4347,12 +4333,12 @@ void report_surveydone_event(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4377,8 +4363,6 @@ void report_surveydone_event(struct adapter *padapter)
DBG_88E("survey done event(%x)\n", psurveydone_evt->bss_cnt);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_join_res(struct adapter *padapter, int res)
@@ -4393,12 +4377,12 @@ void report_join_res(struct adapter *padapter, int res)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4429,8 +4413,6 @@ void report_join_res(struct adapter *padapter, int res)
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
@@ -4446,12 +4428,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4486,8 +4468,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
DBG_88E("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
@@ -4501,12 +4481,12 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4532,8 +4512,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
DBG_88E("report_add_sta_event: add STA\n");
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
@@ -4917,11 +4895,11 @@ void survey_timer_hdl(unsigned long data)
}
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
goto exit_survey_timer_hdl;
psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
goto exit_survey_timer_hdl;
}
@@ -4969,7 +4947,6 @@ void link_timer_hdl(unsigned long data)
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
- return;
}
void addba_timer_hdl(unsigned long data)
@@ -5485,7 +5462,7 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 5e1ef9fdc..59c6d8ab6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_PWRCTRL_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 5f53aa1cf..977bb2532 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_RECV_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
index 4ad2d8f63..3fc1a8fd3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_rf.c
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_RF_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index b781ccf45..442a614a3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_SECURITY_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
index e725a4708..13a5bf473 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sreset.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <rtw_sreset.h>
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 78a9b9bf3..a71e25294 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_STA_MGT_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 83096696c..4410fe8d7 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_WLAN_UTIL_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index f2dd7a60f..e0a5567f5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_XMIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index a108e8032..201c15b07 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -557,7 +557,7 @@ int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
u8 WirelessMode = 0xFF; /* invalid value */
u8 max_rate_idx = 0x13; /* MCS7 */
- if (dm_odm->pWirelessMode != NULL)
+ if (dm_odm->pWirelessMode)
WirelessMode = *(dm_odm->pWirelessMode);
if (WirelessMode != 0xFF) {
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index c2ad6a3b9..cce1ea259 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 8e904bd8e..e1964d65b 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
index 3871cda2e..960cc406d 100644
--- a/drivers/staging/rtl8188eu/hal/hal_com.c
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 85c17ef94..085f0fbd0 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HAL_INTF_C_
@@ -186,7 +181,7 @@ s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
- if (adapt->HalFunc.init_xmit_priv != NULL)
+ if (adapt->HalFunc.init_xmit_priv)
return adapt->HalFunc.init_xmit_priv(adapt);
return _FAIL;
}
diff --git a/drivers/staging/rtl8188eu/hal/mac_cfg.c b/drivers/staging/rtl8188eu/hal/mac_cfg.c
index 0bc1b2152..6ed5e15ce 100644
--- a/drivers/staging/rtl8188eu/hal/mac_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/mac_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 8d2316b9e..57a127501 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 28b9f7f59..0555e42a3 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index c0242a095..dd9b902c8 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index ae42b4492..a83bbea9b 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_PHYCFG_C_
diff --git a/drivers/staging/rtl8188eu/hal/pwrseq.c b/drivers/staging/rtl8188eu/hal/pwrseq.c
index 20dce42ce..d92a34ea8 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseq.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseq.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include "pwrseq.h"
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index b76b0f5d6..2867864bb 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#include <pwrseqcmd.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 38845d17d..1596274ee 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 44945427c..453f9e729 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 580876313..2422c0297 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_CMD_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index f9919a94a..81f293187 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* */
/* Description: */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 2592bc298..0b444fd3e 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HAL_INIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 53cf3baf4..f110c961d 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_REDESC_C_
@@ -45,7 +40,7 @@ static void process_link_qual(struct adapter *padapter,
struct rx_pkt_attrib *pattrib;
struct signal_stat *signal_stat;
- if (prframe == NULL || padapter == NULL)
+ if (!prframe || !padapter)
return;
pattrib = &prframe->attrib;
@@ -64,7 +59,7 @@ static void process_link_qual(struct adapter *padapter,
void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
{
- struct recv_frame *precvframe = (struct recv_frame *)prframe;
+ struct recv_frame *precvframe = prframe;
/* Check RSSI */
process_rssi(padapter, precvframe);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
index a6ba53b48..460a20558 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_XMIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 564cf53bf..d9e677ef8 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index d6d009aaf..255d6f215 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188EU_RECV_C_
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index c96d80487..ec21d8c82 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_XMIT_C_
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 07a61b827..363f3a34d 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HCI_HAL_INIT_C_
@@ -62,8 +57,8 @@ static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPip
_ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
/* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
- if (1 == haldata->OutEpNumber) {
- if (1 != NumInPipe)
+ if (haldata->OutEpNumber == 1) {
+ if (NumInPipe != 1)
return result;
}
@@ -179,7 +174,7 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
if (haldata->OutEpQueueSel & TX_SELE_LQ)
numLQ = 0x1C;
- /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ /* NOTE: This step shall be proceed before writing REG_RQPN. */
if (haldata->OutEpQueueSel & TX_SELE_NQ)
numNQ = 0x1C;
value8 = (u8)_NPQ(numNQ);
@@ -457,7 +452,8 @@ static void _InitRetryFunction(struct adapter *Adapter)
* When Who Remark
* 12/10/2010 MHC Separate to smaller function.
*
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
static void usb_AggSettingTxUpdate(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
@@ -489,7 +485,8 @@ static void usb_AggSettingTxUpdate(struct adapter *Adapter)
* When Who Remark
* 12/10/2010 MHC Separate to smaller function.
*
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
static void
usb_AggSettingRxUpdate(
struct adapter *Adapter
@@ -655,7 +652,8 @@ static void _InitAntenna_Selection(struct adapter *Adapter)
* Revised History:
* When Who Remark
* 08/23/2010 MHC HW suspend mode switch test..
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
{
u8 val8;
@@ -687,11 +685,9 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
#define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
-
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
if (Adapter->pwrctrlpriv.bkeepfwalive) {
-
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
rtl88eu_phy_iq_calibrate(Adapter, true);
} else {
@@ -715,9 +711,8 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Save target channel */
haldata->CurrentChannel = 6;/* default set to 6 */
- if (pwrctrlpriv->reg_rfoff) {
+ if (pwrctrlpriv->reg_rfoff)
pwrctrlpriv->rf_pwrstate = rf_off;
- }
/* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
/* HW GPIO pin. Before PHY_RFConfig8192C. */
@@ -749,10 +744,9 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
DBG_88E("%s: Download Firmware failed!!\n", __func__);
Adapter->bFWReady = false;
return status;
- } else {
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
- Adapter->bFWReady = true;
}
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
+ Adapter->bFWReady = true;
}
rtl8188e_InitializeFirmwareVars(Adapter);
@@ -878,7 +872,7 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
/* 2010/08/26 MH Merge from 8192CE. */
if (pwrctrlpriv->rf_pwrstate == rf_on) {
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
- rtl88eu_phy_iq_calibrate(Adapter, true);
+ rtl88eu_phy_iq_calibrate(Adapter, true);
} else {
rtl88eu_phy_iq_calibrate(Adapter, false);
haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
@@ -905,7 +899,6 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
DBG_88E("%s in %dms\n", __func__,
jiffies_to_msecs(jiffies - init_start_time));
-
return status;
}
@@ -968,6 +961,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
haldata->bMacPwrCtrlOn = false;
Adapter->bFWReady = false;
}
+
static void rtl8192cu_hw_power_down(struct adapter *adapt)
{
/* 2010/-8/09 MH For power down module, we need to enable register block contrl reg at 0x1c. */
@@ -980,7 +974,6 @@ static void rtl8192cu_hw_power_down(struct adapter *adapt)
static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
{
-
DBG_88E("==> %s\n", __func__);
usb_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E);
@@ -999,14 +992,14 @@ static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
}
}
return _SUCCESS;
- }
+}
static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
{
u8 i;
struct recv_buf *precvbuf;
uint status;
- struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ struct recv_priv *precvpriv = &Adapter->recvpriv;
status = _SUCCESS;
@@ -1116,7 +1109,6 @@ readAdapterInfo_8188EU(
Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
}
static void _ReadPROMContent(
@@ -1212,7 +1204,7 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
StopTxBeacon(Adapter);
usb_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
- } else if ((mode == _HW_STATE_ADHOC_)) {
+ } else if (mode == _HW_STATE_ADHOC_) {
ResumeTxBeacon(Adapter);
usb_write8(Adapter, REG_BCN_CTRL, 0x1a);
} else if (mode == _HW_STATE_AP_) {
@@ -1363,7 +1355,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
u64 tsf;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
tsf = pmlmeext->TSFValue - rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024)) - 1024; /* us */
@@ -1420,7 +1412,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) | BIT(4));
} else { /* sitesurvey done */
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if ((is_client_associated_to_ap(Adapter)) ||
((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) {
@@ -1490,7 +1482,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
u8 u1bAIFS, aSifsTime;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
usb_write8(Adapter, REG_SLOT, val[0]);
@@ -1790,7 +1782,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
break;
case HW_VAR_H2C_MEDIA_STATUS_RPT:
- rtl8188e_set_FwMediaStatus_cmd(Adapter , (*(__le16 *)val));
+ rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val));
break;
case HW_VAR_BCN_VALID:
/* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */
@@ -1855,7 +1847,6 @@ static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
default:
break;
}
-
}
/* */
@@ -1904,19 +1895,19 @@ GetHalDefVar8188EUsb(
case HAL_DEF_RA_DECISION_RATE:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, MacID);
}
break;
case HAL_DEF_RA_SGI:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&haldata->odmpriv, MacID);
}
break;
case HAL_DEF_PT_PWR_STATUS:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, MacID);
}
break;
case HW_VAR_MAX_RX_AMPDU_FACTOR:
@@ -1939,7 +1930,7 @@ GetHalDefVar8188EUsb(
break;
case HW_DEF_ODM_DBG_FLAG:
{
- struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ struct odm_dm_struct *dm_ocm = &haldata->odmpriv;
pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents);
}
break;
@@ -1967,8 +1958,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
struct sta_info *psta;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
if (mac_id >= NUM_STA) /* CAM_SIZE */
return;
@@ -1981,8 +1972,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf;
raid = networktype_to_raid(networkType);
mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
- mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&(pmlmeinfo->HT_caps)) : 0;
- if (support_short_GI(adapt, &(pmlmeinfo->HT_caps)))
+ mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&pmlmeinfo->HT_caps) : 0;
+ if (support_short_GI(adapt, &pmlmeinfo->HT_caps))
shortGIrate = true;
break;
case 1:/* for broadcast/multicast */
@@ -2023,8 +2014,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
{
u32 value32;
- struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u32 bcn_ctrl_reg = REG_BCN_CTRL;
/* reset TSF, enable update TSF, correcting TSF On Beacon */
@@ -2083,7 +2074,7 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (adapt->HalData == NULL)
+ if (!adapt->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
halfunc->hal_power_on = rtl8188eu_InitPowerOn;
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 2670d6b6a..8990748a1 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __INC_HAL8188EPHYCFG_H__
#define __INC_HAL8188EPHYCFG_H__
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
index 9f2969bf8..344c73d10 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __INC_HAL8188EPHYREG_H__
#define __INC_HAL8188EPHYREG_H__
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
index 1bf9bc70a..dbb55247b 100644
--- a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#ifndef __INC_FW_8188E_HW_IMG_H
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
index 6f2b2a436..d244efff3 100644
--- a/drivers/staging/rtl8188eu/include/HalVerDef.h
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_VERSION_DEF_H__
#define __HAL_VERSION_DEF_H__
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 3fb691daa..2c1676d2a 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __BASIC_TYPES_H__
#define __BASIC_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index dcb032b6c..55506a7da 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/*-----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8188eu/include/fw.h b/drivers/staging/rtl8188eu/include/fw.h
index 7884d8f65..b016f32a8 100644
--- a/drivers/staging/rtl8188eu/include/fw.h
+++ b/drivers/staging/rtl8188eu/include/fw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/staging/rtl8188eu/include/hal_com.h b/drivers/staging/rtl8188eu/include/hal_com.h
index 47715d949..aaf444733 100644
--- a/drivers/staging/rtl8188eu/include/hal_com.h
+++ b/drivers/staging/rtl8188eu/include/hal_com.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_COMMON_H__
#define __HAL_COMMON_H__
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 1b1c10292..eaf939bd4 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_INTF_H__
#define __HAL_INTF_H__
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index f8f5eb6b7..d8284c84f 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __IEEE80211_H
#define __IEEE80211_H
diff --git a/drivers/staging/rtl8188eu/include/mlme_osdep.h b/drivers/staging/rtl8188eu/include/mlme_osdep.h
index ae1722c67..5a35b0866 100644
--- a/drivers/staging/rtl8188eu/include/mlme_osdep.h
+++ b/drivers/staging/rtl8188eu/include/mlme_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __MLME_OSDEP_H_
#define __MLME_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/mp_custom_oid.h b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
index 6fa52cf99..1a06ee6ad 100644
--- a/drivers/staging/rtl8188eu/include/mp_custom_oid.h
+++ b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __CUSTOM_OID_H
#define __CUSTOM_OID_H
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index af781c7cd..dbebf17f3 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index ef792bfd5..da7325d59 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
index 14dce6c4b..72b4db67a 100644
--- a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
+++ b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_RTL8188E_H__
#define __ODM_RTL8188E_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
index 5a61f902b..c82c09013 100644
--- a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_REGDEFINE11N_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index e9390963d..52e51f19f 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index 0f236da09..9e5fe1777 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_PRECOMP_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_reg.h b/drivers/staging/rtl8188eu/include/odm_reg.h
index 7f10b695c..3405a44a1 100644
--- a/drivers/staging/rtl8188eu/include/odm_reg.h
+++ b/drivers/staging/rtl8188eu/include/odm_reg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* */
/* File Name: odm_reg.h */
diff --git a/drivers/staging/rtl8188eu/include/odm_types.h b/drivers/staging/rtl8188eu/include/odm_types.h
index c1355b959..3474a9c72 100644
--- a/drivers/staging/rtl8188eu/include/odm_types.h
+++ b/drivers/staging/rtl8188eu/include/odm_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_TYPES_H__
#define __ODM_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 1521744d6..54fca7982 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __OSDEP_INTF_H_
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 22de53d65..5475956c5 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __OSDEP_SERVICE_H_
#define __OSDEP_SERVICE_H_
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index 9dbf8435f..afd61cf4c 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -12,11 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL8188EPWRSEQ_H__
diff --git a/drivers/staging/rtl8188eu/include/pwrseqcmd.h b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
index 468a3fb28..c4a919ea1 100644
--- a/drivers/staging/rtl8188eu/include/pwrseqcmd.h
+++ b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HALPWRSEQCMD_H__
#define __HALPWRSEQCMD_H__
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index fdeb603b6..cad31587c 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RECV_OSDEP_H_
#define __RECV_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index f813ce056..4d7d80465 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_CMD_H__
#define __RTL8188E_CMD_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index 5e0ac31ef..4190112a5 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_DM_H__
#define __RTL8188E_DM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index e96584a3e..ed3d56538 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_HAL_H__
#define __RTL8188E_HAL_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_led.h b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
index c0147e73c..fca6d8c81 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_led.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_LED_H__
#define __RTL8188E_LED_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 5fed30d38..54048bc82 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_RECV_H__
#define __RTL8188E_RECV_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index beeee4a6b..fb82f663b 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*******************************************************************************/
#ifndef __RTL8188E_SPEC_H__
#define __RTL8188E_SPEC_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 0b96d42e2..65a63df20 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_XMIT_H__
#define __RTL8188E_XMIT_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
index e85bf1ff0..e81ee92b0 100644
--- a/drivers/staging/rtl8188eu/include/rtw_android.h
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_ANDROID_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_ap.h b/drivers/staging/rtl8188eu/include/rtw_ap.h
index 6128ccce9..b820684bc 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ap.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ap.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_AP_H_
#define __RTW_AP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 9e9f5f4af..08ca59217 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_CMD_H_
#define __RTW_CMD_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 971bf457f..7ed4cada7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_DEBUG_H__
#define __RTW_DEBUG_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index 904fea1fa..5dd73841d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_EEPROM_H__
#define __RTW_EEPROM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 5660eed71..9bfb10c30 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_EFUSE_H__
#define __RTW_EFUSE_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
index 52151dc44..5c34e567d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_event.h
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_EVENT_H_
#define _RTW_EVENT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ht.h b/drivers/staging/rtl8188eu/include/rtw_ht.h
index beb210b37..b45483fd0 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ht.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ht.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_HT_H_
#define _RTW_HT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index ee2cb54a7..3a652df4b 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_IOCTL_H_
#define _RTW_IOCTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
index 8fa3858cb..da4949f94 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_IOCTL_RTL_H_
#define _RTW_IOCTL_RTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
index fa9d655ea..b6e14a8b7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_IOCTL_SET_H_
#define __RTW_IOCTL_SET_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
index 68aae7f0b..1f324e68d 100644
--- a/drivers/staging/rtl8188eu/include/rtw_iol.h
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_IOL_H_
#define __RTW_IOL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 4c992573e..5d8bce0f5 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_MLME_H_
#define __RTW_MLME_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 44711332b..27382ff24 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_MLME_EXT_H_
#define __RTW_MLME_EXT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
index 30fd17f23..02b300217 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/*****************************************************************************
*
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index a493d4c37..9680e2eab 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_PWRCTRL_H_
#define __RTW_PWRCTRL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
index bbee1ddc0..45a77f6f8 100644
--- a/drivers/staging/rtl8188eu/include/rtw_qos.h
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_QOS_H_
#define _RTW_QOS_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index eb1ac3d03..b0373b621 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_RECV_H_
#define _RTW_RECV_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 35f61be12..66896af02 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_RF_H_
#define __RTW_RF_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index a1aebe6c8..ca1247bce 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_SECURITY_H_
#define __RTW_SECURITY_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_sreset.h b/drivers/staging/rtl8188eu/include/rtw_sreset.h
index 3a62ed010..ce027dfde 100644
--- a/drivers/staging/rtl8188eu/include/rtw_sreset.h
+++ b/drivers/staging/rtl8188eu/include/rtw_sreset.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_SRESET_C_
#define _RTW_SRESET_C_
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index b7c20883d..a0853bab3 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_XMIT_H_
#define _RTW_XMIT_H_
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index d4e78326f..42a035123 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __STA_INFO_H_
#define __STA_INFO_H_
diff --git a/drivers/staging/rtl8188eu/include/usb_hal.h b/drivers/staging/rtl8188eu/include/usb_hal.h
index 8a65995d5..b1bf07a90 100644
--- a/drivers/staging/rtl8188eu/include/usb_hal.h
+++ b/drivers/staging/rtl8188eu/include/usb_hal.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __USB_HAL_H__
#define __USB_HAL_H__
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 4fdc536cb..220733314 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __USB_OPS_LINUX_H__
#define __USB_OPS_LINUX_H__
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 6cb5beca1..e7c512183 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _WIFI_H_
#define _WIFI_H_
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index 85b99da49..560966cd7 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __WLAN_BSSDEF_H__
#define __WLAN_BSSDEF_H__
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index 13965f248..f96ca6af9 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __XMIT_OSDEP_H_
#define __XMIT_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 911980495..5672f014c 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _IOCTL_LINUX_C_
@@ -2120,13 +2115,13 @@ static u8 set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -2158,12 +2153,12 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
DBG_88E("%s\n", __func__);
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
kfree(pcmd);
res = _FAIL;
goto exit;
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index 08bfa76f4..bc756267c 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/os_dep/mon.c b/drivers/staging/rtl8188eu/os_dep/mon.c
index 63bb87593..d976e5e18 100644
--- a/drivers/staging/rtl8188eu/os_dep/mon.c
+++ b/drivers/staging/rtl8188eu/os_dep/mon.c
@@ -155,7 +155,7 @@ static void mon_setup(struct net_device *dev)
dev->netdev_ops = &mon_netdev_ops;
dev->destructor = free_netdev;
ether_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211;
/*
* Use a locally administered address (IEEE 802)
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 7986e6785..ae2caff03 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _OS_INTFS_C_
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index f090bef59..764250b4b 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index d4734baff..0c44914ea 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 5f3337c28..41e1b1d15 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <linux/module.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 794cc1143..11d51a301 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define pr_fmt(fmt) "R8188EU: " fmt
@@ -65,7 +60,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_device *pusbd;
pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
- if (pdvobjpriv == NULL)
+ if (!pdvobjpriv)
return NULL;
pdvobjpriv->pusbintf = usb_intf;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 0fea338d7..ce1e1a135 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#define _USB_OPS_LINUX_C_
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 1593e280e..221e27506 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _XMIT_OSDEP_C_
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index ccdcebeeb..32fe7352d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -1792,7 +1792,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
__skb_queue_tail(&ring->queue, skb);
pdesc->OWN = 1;
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
rtl92e_writew(dev, TPPoll, 0x01 << tcb_desc->queue_index);
return 0;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index cfab71549..62154e3f4 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1991,7 +1991,7 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
return 2;
if (!time_after(jiffies,
- ieee->dev->trans_start + msecs_to_jiffies(timeout)))
+ dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
return 0;
if (!time_after(jiffies,
ieee->last_rx_ps_time + msecs_to_jiffies(timeout)))
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index f18fc0b67..051c2be84 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -746,7 +746,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
// Indicate packets
if(index>REORDER_WIN_SIZE){
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
kfree(prxbIndicateArray);
return;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index ae1274cfb..d70559576 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -249,7 +249,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
ieee->seq_ctrl[0]++;
/* avoid watchdog triggers */
- ieee->dev->trans_start = jiffies;
+ netif_trans_update(ieee->dev);
ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
//dev_kfree_skb_any(skb);//edit by thomas
}
@@ -302,7 +302,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
ieee->seq_ctrl[0]++;
/* avoid watchdog triggers */
- ieee->dev->trans_start = jiffies;
+ netif_trans_update(ieee->dev);
ieee->softmac_data_hard_start_xmit(skb,ieee->dev,ieee->basic_rate);
}else{
@@ -1737,7 +1737,7 @@ static short ieee80211_sta_ps_sleep(struct ieee80211_device *ieee, u32 *time_h,
return 2;
if(!time_after(jiffies,
- ieee->dev->trans_start + msecs_to_jiffies(timeout)))
+ dev_trans_start(ieee->dev) + msecs_to_jiffies(timeout)))
return 0;
if(!time_after(jiffies,
@@ -2205,7 +2205,7 @@ static void ieee80211_resume_tx(struct ieee80211_device *ieee)
ieee->dev, ieee->rate);
//(i+1)<ieee->tx_pending.txb->nr_frags);
ieee->stats.tx_packets++;
- ieee->dev->trans_start = jiffies;
+ netif_trans_update(ieee->dev);
}
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 148d0d455..6033502ef 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -75,7 +75,7 @@ static void RxPktPendingTimeout(unsigned long data)
// Indicate packets
if(index > REORDER_WIN_SIZE){
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
return;
}
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 5c3bb3be2..d733fb2ad 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -194,7 +194,7 @@ void phy_RF8256_Config_ParaFile(struct net_device *dev)
break;
}
- /*----Restore RFENV control type----*/;
+ /*----Restore RFENV control type----*/
switch (eRFPath) {
case RF90_PATH_A:
case RF90_PATH_C:
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 849a95ef7..8c1d73719 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -235,7 +235,6 @@ static void CamResetAllEntry(struct net_device *dev)
*/
ulcommand |= BIT(31) | BIT(30);
write_nic_dword(dev, RWCAM, ulcommand);
-
}
@@ -298,6 +297,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
return 0;
}
+
/* as 92U has extend page from 4 to 16, so modify functions below. */
void write_nic_byte(struct net_device *dev, int indx, u8 data)
{
@@ -319,14 +319,11 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
if (status < 0)
netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
-
-
}
void write_nic_word(struct net_device *dev, int indx, u16 data)
{
-
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -345,13 +342,11 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
if (status < 0)
netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
-
}
void write_nic_dword(struct net_device *dev, int indx, u32 data)
{
-
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -372,7 +367,6 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
if (status < 0)
netdev_err(dev, "write_nic_dword TimeOut! status: %d\n",
status);
-
}
@@ -738,7 +732,6 @@ void rtl8192_update_msr(struct net_device *dev)
* master (see the create BSS/IBSS func)
*/
if (priv->ieee80211->state == IEEE80211_LINKED) {
-
if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
@@ -773,11 +766,10 @@ static void rtl8192_rx_isr(struct urb *urb);
static u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
{
-
return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize
+ pstats->RxBufShift);
-
}
+
static int rtl8192_rx_initiate(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -874,6 +866,7 @@ void rtl8192_set_rxconf(struct net_device *dev)
write_nic_dword(dev, RCR, rxconf);
}
+
/* wait to be removed */
void rtl8192_rx_enable(struct net_device *dev)
{
@@ -943,9 +936,9 @@ inline u16 ieeerate2rtlrate(int rate)
return 11;
default:
return 3;
-
}
}
+
static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
inline u16 rtl8192_rate2rate(short rate)
{
@@ -1050,7 +1043,7 @@ static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->tx_lock, flags);
- memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ *(struct net_device **)(skb->cb) = dev;
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, priv->ieee80211->tx_headroom);
ret = rtl8192_tx(dev, skb);
@@ -1100,7 +1093,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
if (!skb)
return;
- dev = (struct net_device *)(skb->cb);
+ dev = *(struct net_device **)(skb->cb);
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
queue_index = tcb_desc->queue_index;
@@ -1108,7 +1101,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
if (tcb_desc->queue_index != TXCMD_QUEUE) {
if (tx_urb->status == 0) {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
priv->stats.txoktotal++;
priv->ieee80211->LinkDetectInfo.NumTxOkInPeriod++;
priv->stats.txbytesunicast +=
@@ -1149,7 +1142,6 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
return; /* avoid further processing AMSDU */
}
}
-
}
static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
@@ -1272,11 +1264,10 @@ static void rtl8192_update_cap(struct net_device *dev, u16 cap)
priv->slot_time = slot_time;
write_nic_byte(dev, SLOT_TIME, slot_time);
}
-
}
+
static void rtl8192_net_update(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_network *net;
u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
@@ -1303,9 +1294,6 @@ static void rtl8192_net_update(struct net_device *dev)
write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
}
-
-
-
}
/* temporary hw beacon is not used any more.
@@ -1315,6 +1303,7 @@ void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
{
}
+
inline u8 rtl8192_IsWirelessBMode(u16 rate)
{
if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220))
@@ -1715,7 +1704,7 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
return -1;
}
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
atomic_inc(&priv->tx_pending[tcb_desc->queue_index]);
return 0;
}
@@ -1737,7 +1726,6 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
#ifndef JACKSON_NEW_RX
for (i = 0; i < (MAX_RX_URB + 1); i++) {
-
priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
priv->rx_urb[i]->transfer_buffer =
@@ -1782,8 +1770,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
netdev_dbg(dev, "End of initendpoints\n");
return 0;
-
}
+
#ifdef THOMAS_BEACON
static void rtl8192_usb_deleteendpoints(struct net_device *dev)
{
@@ -1820,7 +1808,6 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
}
kfree(priv->rx_urb);
priv->rx_urb = NULL;
-
}
#else
kfree(priv->rx_urb);
@@ -1888,6 +1875,7 @@ static void rtl8192_update_beacon(struct work_struct *work)
net->bssht.bdRT2RTLongSlotTime;
rtl8192_update_cap(dev, net->capability);
}
+
/*
* background support to run QoS activate functionality
*/
@@ -1992,7 +1980,6 @@ static int rtl8192_handle_beacon(struct net_device *dev,
rtl8192_qos_handle_probe_response(priv, 1, network);
schedule_delayed_work(&priv->update_beacon_wq, 0);
return 0;
-
}
/*
@@ -2007,7 +1994,7 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
u32 size = sizeof(struct ieee80211_qos_parameters);
int set_qos_param = 0;
- if ((priv == NULL) || (network == NULL))
+ if (!priv || !network)
return 0;
if (priv->ieee80211->state != IEEE80211_LINKED)
@@ -2182,6 +2169,7 @@ static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
}
return ret;
}
+
static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2223,8 +2211,8 @@ static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
priv->ieee80211->pHTInfo->bEnableHT = 0;
RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
rtl8192_refresh_supportrate(priv);
-
}
+
/* init priv variables here. only non_zero value should be initialized here. */
static void rtl8192_init_priv_variable(struct net_device *dev)
{
@@ -2432,6 +2420,7 @@ static inline u16 endian_swap(u16 *data)
*data = (tmp >> 8) | (tmp << 8);
return *data;
}
+
static void rtl8192_read_eeprom_info(struct net_device *dev)
{
u16 wEPROM_ID = 0;
@@ -2627,7 +2616,6 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
default:
priv->CustomerID = RT_CID_DEFAULT;
break;
-
}
switch (priv->CustomerID) {
@@ -2642,7 +2630,6 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
default:
priv->LedStrategy = SW_LED_MODE0;
break;
-
}
@@ -2676,7 +2663,6 @@ static short rtl8192_get_channel_map(struct net_device *dev)
static short rtl8192_init(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
memset(&(priv->stats), 0, sizeof(struct Stats));
@@ -2797,8 +2783,6 @@ static void rtl8192_hwconfig(struct net_device *dev)
/* Set Tx Antenna including Feedback control */
/* Set Auto Rate fallback control */
-
-
}
@@ -3027,7 +3011,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
bMaskByte2);
for (i = 0; i < CCKTxBBGainTableLength; i++) {
-
if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) {
priv->cck_present_attentuation_20Mdefault = (u8)i;
break;
@@ -3037,7 +3020,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
priv->cck_present_attentuation_difference = 0;
priv->cck_present_attentuation =
priv->cck_present_attentuation_20Mdefault;
-
}
}
write_nic_byte(dev, 0x87, 0x0);
@@ -3222,7 +3204,6 @@ static RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
} else {
return RESET_TYPE_NORESET;
}
-
}
static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
@@ -3250,7 +3231,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
-
for (EntryId = 0; EntryId < 4; EntryId++) {
MacAddr = CAM_CONST_ADDR[EntryId];
setKey(dev, EntryId, EntryId,
@@ -3259,7 +3239,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
}
} else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP) {
-
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
(u8 *)dev->dev_addr, 0, NULL);
@@ -3267,7 +3246,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
MacAddr, 0, NULL);
} else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP) {
-
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
(u8 *)dev->dev_addr, 0, NULL);
@@ -3301,6 +3279,7 @@ static void CamRestoreAllEntry(struct net_device *dev)
CAM_CONST_ADDR[0], 0, NULL);
}
}
+
/* This function is used to fix Tx/Rx stop bug temporarily.
* This function will do "system reset" to NIC when Tx or Rx is stuck.
* The method checking Tx/Rx stuck of this function is supported by FW,
@@ -3468,7 +3447,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
/* for AP roaming */
if (priv->ieee80211->state == IEEE80211_LINKED &&
priv->ieee80211->iw_mode == IW_MODE_INFRA) {
-
rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
if ((TotalRxBcnNum + TotalRxDataNum) == 0) {
#ifdef TODO
@@ -3485,7 +3463,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
priv->ieee80211->link_change(dev);
queue_work(priv->ieee80211->wq,
&priv->ieee80211->associate_procedure_wq);
-
}
}
priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3510,7 +3487,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
-
}
static void watch_dog_timer_callback(unsigned long data)
@@ -3521,6 +3497,7 @@ static void watch_dog_timer_callback(unsigned long data)
mod_timer(&priv->watch_dog_timer,
jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME));
}
+
static int _rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3560,7 +3537,6 @@ static int rtl8192_open(struct net_device *dev)
ret = rtl8192_up(dev);
up(&priv->wx_sem);
return ret;
-
}
@@ -3587,7 +3563,6 @@ static int rtl8192_close(struct net_device *dev)
up(&priv->wx_sem);
return ret;
-
}
int rtl8192_down(struct net_device *dev)
@@ -3649,7 +3624,6 @@ void rtl8192_commit(struct net_device *dev)
rtl8192_rtx_disable(dev);
reset_status = _rtl8192_up(dev);
-
}
static void rtl8192_restart(struct work_struct *work)
@@ -4111,7 +4085,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
(((priv->undecorated_smoothed_pwdb) * (Rx_Smooth_Factor - 1)) +
(pprevious_stats->RxPWDBAll)) / (Rx_Smooth_Factor);
}
-
}
/* Check EVM */
@@ -4159,8 +4132,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
}
}
}
-
-
}
/*-----------------------------------------------------------------------------
@@ -4201,6 +4172,7 @@ static u8 rtl819x_evm_dbtopercentage(char value)
ret_val = 100;
return ret_val;
}
+
/* We want good-looking for signal strength/quality */
static long rtl819x_signal_scale_mapping(long currsig)
{
@@ -4542,7 +4514,6 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
bpacket_match_bssid, bpacket_toself,
bPacketBeacon, bToSelfBA);
rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
-
}
/**
@@ -4758,7 +4729,6 @@ static void query_rxdesc_status(struct sk_buff *skb,
RT_TRACE(COMP_RXDESC,
"driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
driver_info->FirstAGGR, driver_info->PartAggr);
-
}
skb_pull(skb, sizeof(rx_desc_819x_usb));
@@ -4822,7 +4792,6 @@ static void rtl8192_rx_nomal(struct sk_buff *skb)
netdev_dbg(dev, "actual_length: %d\n", skb->len);
dev_kfree_skb_any(skb);
}
-
}
static void rtl819xusb_process_received_packet(
@@ -4898,7 +4867,6 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
};
if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
-
query_rx_cmdpkt_desc_status(skb, &stats);
/* prfd->queue_id = 1; */
@@ -4937,7 +4905,6 @@ static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
info->out_pipe);
dev_kfree_skb(skb);
break;
-
}
}
}
@@ -4971,7 +4938,7 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
RT_TRACE(COMP_INIT, "Oops: i'm coming\n");
dev = alloc_ieee80211(sizeof(struct r8192_priv));
- if (dev == NULL)
+ if (!dev)
return -ENOMEM;
usb_set_intfdata(intf, dev);
@@ -5034,7 +5001,6 @@ fail:
*/
static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
{
-
cancel_work_sync(&priv->reset_wq);
cancel_delayed_work(&priv->watch_dog_wq);
cancel_delayed_work(&priv->update_beacon_wq);
@@ -5191,13 +5157,12 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
write_nic_dword(dev, RWCAM, TargetCommand);
} else {
/* Key Material */
- if (KeyContent != NULL) {
+ if (KeyContent) {
write_nic_dword(dev, WCAMI, (u32)(*(KeyContent + i - 2)));
write_nic_dword(dev, RWCAM, TargetCommand);
}
}
}
-
}
/***************************************************************************
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index f828e6441..837704de3 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -30,7 +30,6 @@
static const u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
-
#ifndef ENETDOWN
#define ENETDOWN 1
#endif
@@ -44,7 +43,6 @@ static int r8192_wx_get_freq(struct net_device *dev,
return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b);
}
-
static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -53,8 +51,6 @@ static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b);
}
-
-
static int r8192_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -64,8 +60,6 @@ static int r8192_wx_get_rate(struct net_device *dev,
return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra);
}
-
-
static int r8192_wx_set_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -82,7 +76,6 @@ static int r8192_wx_set_rate(struct net_device *dev,
return ret;
}
-
static int r8192_wx_set_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -148,7 +141,6 @@ static int r8192_wx_force_reset(struct net_device *dev,
}
-
static int r8192_wx_set_rawtx(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -301,7 +293,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
/* range->min_r_time; */ /* Minimal retry lifetime */
/* range->max_r_time; */ /* Maximal retry lifetime */
-
for (i = 0, val = 0; i < 14; i++) {
/* Include only legal frequencies for some countries */
@@ -326,7 +317,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
return 0;
}
-
static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -396,9 +386,6 @@ static int r8192_wx_set_essid(struct net_device *dev,
return ret;
}
-
-
-
static int r8192_wx_get_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -415,7 +402,6 @@ static int r8192_wx_get_essid(struct net_device *dev,
return ret;
}
-
static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -439,7 +425,6 @@ static int r8192_wx_get_name(struct net_device *dev,
return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra);
}
-
static int r8192_wx_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -493,7 +478,6 @@ static int r8192_wx_set_wap(struct net_device *dev,
}
-
static int r8192_wx_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -503,7 +487,6 @@ static int r8192_wx_get_wap(struct net_device *dev,
return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra);
}
-
static int r8192_wx_get_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
@@ -695,7 +678,6 @@ static int r8192_wx_get_retry(struct net_device *dev,
wrqu->retry.value = priv->retry_data;
}
-
return 0;
}
@@ -711,7 +693,6 @@ static int r8192_wx_get_sens(struct net_device *dev,
return 0;
}
-
static int r8192_wx_set_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -862,7 +843,6 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
return -1;
}
-
static iw_handler r8192_wx_handlers[] = {
NULL, /* SIOCSIWCOMMIT */
r8192_wx_get_name, /* SIOCGIWNAME */
@@ -949,7 +929,6 @@ static const struct iw_priv_args r8192_private_args[] = {
};
-
static iw_handler r8192_private_handler[] = {
r8192_wx_set_crcmon,
r8192_wx_set_scan_type,
@@ -985,7 +964,6 @@ struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
return wstats;
}
-
struct iw_handler_def r8192_wx_handlers_def = {
.standard = r8192_wx_handlers,
.num_standard = ARRAY_SIZE(r8192_wx_handlers),
diff --git a/drivers/staging/rtl8712/basic_types.h b/drivers/staging/rtl8712/basic_types.h
index 7561bed5d..f5c023189 100644
--- a/drivers/staging/rtl8712/basic_types.h
+++ b/drivers/staging/rtl8712/basic_types.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 29e47e150..ae79047ac 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/ethernet.h b/drivers/staging/rtl8712/ethernet.h
index fad173f40..039da36fa 100644
--- a/drivers/staging/rtl8712/ethernet.h
+++ b/drivers/staging/rtl8712/ethernet.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 7914bdab7..b6f93af70 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
@@ -201,8 +197,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
0x0000ffff);
memcpy(ppayload, ptr, dump_imem_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- dump_imem_sz + TXDESC_SIZE,
- (u8 *)ptx_desc);
+ dump_imem_sz + TXDESC_SIZE,
+ (u8 *)ptx_desc);
ptr += dump_imem_sz;
imem_sz -= dump_imem_sz;
} while (imem_sz > 0);
@@ -230,7 +226,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
0x0000ffff);
memcpy(ppayload, ptr, dump_emem_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- dump_emem_sz + TXDESC_SIZE, (u8 *)ptx_desc);
+ dump_emem_sz + TXDESC_SIZE,
+ (u8 *)ptx_desc);
ptr += dump_emem_sz;
emem_sz -= dump_emem_sz;
} while (emem_sz > 0);
@@ -282,7 +279,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
ptx_desc->txdw0 |= cpu_to_le32(BIT(28));
memcpy(ppayload, &fwhdr.fwpriv, fwhdr.fw_priv_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- fwhdr.fw_priv_sz + TXDESC_SIZE, (u8 *)ptx_desc);
+ fwhdr.fw_priv_sz + TXDESC_SIZE, (u8 *)ptx_desc);
/* polling dmem code done */
i = 100;
@@ -297,7 +294,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
tmp8 = r8712_read8(padapter, 0x1025000A);
if (tmp8 & BIT(4)) /* When boot from EEPROM,
- & FW need more time to read EEPROM */
+ * & FW need more time to read EEPROM
+ */
i = 60;
else /* boot from EFUSE */
i = 30;
@@ -332,7 +330,8 @@ uint rtl8712_hal_init(struct _adapter *padapter)
r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
r8712_write32(padapter, RCR, (val32 | BIT(26))); /* Enable RX TCP
- Checksum offload */
+ * Checksum offload
+ */
netdev_info(padapter->pnetdev, "2 RCR=0x%x\n",
r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
@@ -346,7 +345,8 @@ uint rtl8712_hal_init(struct _adapter *padapter)
r8712_write8(padapter, 0x102500BD, r8712_read8(padapter, 0x102500BD) |
BIT(7)); /* enable usb rx aggregation */
r8712_write8(padapter, 0x102500D9, 1); /* TH=1 => means that invalidate
- * usb rx aggregation */
+ * usb rx aggregation
+ */
r8712_write8(padapter, 0x1025FE5B, 0x04); /* 1.7ms/4 */
/* Fix the RX FIFO issue(USB error) */
r8712_write8(padapter, 0x1025fe5C, r8712_read8(padapter, 0x1025fe5C)
@@ -367,7 +367,8 @@ uint rtl8712_hal_deinit(struct _adapter *padapter)
r8712_write8(padapter, SYS_FUNC_EN + 1, 0x70);
r8712_write8(padapter, PMC_FSM, 0x06); /* Enable Loader Data Keep */
r8712_write8(padapter, SYS_ISO_CTRL, 0xF9); /* Isolation signals from
- * CORE, PLL */
+ * CORE, PLL
+ */
r8712_write8(padapter, SYS_ISO_CTRL + 1, 0xe8); /* Enable EFUSE 1.2V */
r8712_write8(padapter, AFE_PLL_CTRL, 0x00); /* Disable AFE PLL. */
r8712_write8(padapter, LDOA15_CTRL, 0x54); /* Disable A15V */
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index d13b4d53c..8918654b4 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index e4e4bdee7..af7c4a477 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -153,7 +153,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
buff = NULL;
if (authmode == _WPA_IE_ID_) {
buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
- if (buff == NULL)
+ if (!buff)
return;
p = buff;
p += sprintf(p, "ASSOCINFO(ReqIEs=");
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index ab19112ea..57211f7e6 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -389,7 +389,7 @@ static int netdev_open(struct net_device *pnetdev)
padapter->bup = true;
if (rtl871x_hal_init(padapter) != _SUCCESS)
goto netdev_open_error;
- if (r8712_initmac == NULL)
+ if (!r8712_initmac)
/* Use the mac address stored in the Efuse */
memcpy(pnetdev->dev_addr,
padapter->eeprompriv.mac_addr, ETH_ALEN);
@@ -413,7 +413,7 @@ static int netdev_open(struct net_device *pnetdev)
}
if (start_drv_threads(padapter) != _SUCCESS)
goto netdev_open_error;
- if (padapter->dvobjpriv.inirp_init == NULL)
+ if (!padapter->dvobjpriv.inirp_init)
goto netdev_open_error;
else
padapter->dvobjpriv.inirp_init(padapter);
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 076d5083c..ad041c96f 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -57,9 +57,6 @@ struct __queue {
spin_lock_init(&((pqueue)->lock)); \
} while (0)
-#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
-
static inline u32 _down_sema(struct semaphore *sema)
{
if (down_interruptible(sema))
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 50f400234..13c018340 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -135,7 +135,7 @@ static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
/* invoke cmd->callback function */
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -149,7 +149,7 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
/* invoke cmd->callback function */
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -165,7 +165,7 @@ static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -178,7 +178,7 @@ static u8 write_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -194,7 +194,7 @@ static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -207,7 +207,7 @@ static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -227,7 +227,7 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
{
struct cmd_obj *pcmd_r;
- if (pcmd == NULL)
+ if (!pcmd)
return pcmd;
pcmd_r = NULL;
@@ -416,7 +416,7 @@ _next:
/* free all cmd_obj resources */
do {
pcmd = r8712_dequeue_cmd(&(pcmdpriv->cmd_queue));
- if (pcmd == NULL)
+ if (!pcmd)
break;
r8712_free_cmd_obj(pcmd);
} while (1);
@@ -431,7 +431,7 @@ void r8712_event_handle(struct _adapter *padapter, uint *peventbuf)
void (*event_callback)(struct _adapter *dev, u8 *pbuf);
struct evt_priv *pevt_priv = &(padapter->evtpriv);
- if (peventbuf == NULL)
+ if (!peventbuf)
goto _abort_event_;
evt_sz = (u16)(le32_to_cpu(*peventbuf) & 0xffff);
evt_seq = (u8)((le32_to_cpu(*peventbuf) >> 24) & 0x7f);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index d187508dd..f25b34c7d 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -204,7 +204,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
phead = &defrag_q->queue;
plist = phead->next;
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
list_del_init(&prframe->u.list);
pfhdr = &prframe->u.hdr;
curfragnum = 0;
@@ -219,7 +219,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
plist = &defrag_q->queue;
plist = plist->next;
while (!end_of_queue_search(phead, plist)) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = container_of(plist, union recv_frame, u.list);
pnfhdr = &pnextrframe->u.hdr;
/*check the fragment sequence (2nd ~n fragment frame) */
if (curfragnum != pnfhdr->attrib.frag_num) {
@@ -492,7 +492,7 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
phead = &ppending_recvframe_queue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = container_of(plist, union recv_frame, u.list);
pnextattrib = &pnextrframe->u.hdr.attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
plist = plist->next;
@@ -525,14 +525,14 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
if (list_empty(phead))
return true;
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
pattrib = &prframe->u.hdr.attrib;
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication.
* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
pattrib = &prframe->u.hdr.attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
plist = plist->next;
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index b21a60e9f..7e0b94503 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -169,8 +169,8 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv,
xmitframe_phead = &pframe_queue->queue;
xmitframe_plist = xmitframe_phead->next;
if (!end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist,
- struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist,
+ struct xmit_frame, list);
list_del_init(&pxmitframe->list);
ptxservq->qcnt--;
phwxmit->txcmdcnt++;
@@ -209,8 +209,8 @@ static struct xmit_frame *dequeue_xframe_ex(struct xmit_priv *pxmitpriv,
sta_phead = &phwxmit->sta_queue->queue;
sta_plist = sta_phead->next;
while (!end_of_queue_search(sta_phead, sta_plist)) {
- ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq,
- tx_pending);
+ ptxservq = container_of(sta_plist, struct tx_servq,
+ tx_pending);
pframe_queue = &ptxservq->sta_pending;
pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit,
ptxservq, pframe_queue);
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 86136cc73..aed03cfbb 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -225,10 +225,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psurveyPara = kmalloc(sizeof(*psurveyPara), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
}
@@ -258,10 +258,10 @@ u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pbsetdataratepara = kmalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
- if (pbsetdataratepara == NULL) {
+ if (!pbsetdataratepara) {
kfree(ph2c);
return _FAIL;
}
@@ -280,10 +280,10 @@ u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetchplanpara = kmalloc(sizeof(*psetchplanpara), GFP_ATOMIC);
- if (psetchplanpara == NULL) {
+ if (!psetchplanpara) {
kfree(ph2c);
return _FAIL;
}
@@ -301,10 +301,10 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pssetbasicratepara = kmalloc(sizeof(*pssetbasicratepara), GFP_ATOMIC);
- if (pssetbasicratepara == NULL) {
+ if (!pssetbasicratepara) {
kfree(ph2c);
return _FAIL;
}
@@ -322,10 +322,10 @@ u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
- if (pwriteptmparm == NULL) {
+ if (!pwriteptmparm) {
kfree(ph2c);
return _FAIL;
}
@@ -342,10 +342,10 @@ u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
- if (pwriteptmparm == NULL) {
+ if (!pwriteptmparm) {
kfree(ph2c);
return _FAIL;
}
@@ -362,10 +362,10 @@ u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriterfparm = kmalloc(sizeof(*pwriterfparm), GFP_ATOMIC);
- if (pwriterfparm == NULL) {
+ if (!pwriterfparm) {
kfree(ph2c);
return _FAIL;
}
@@ -383,10 +383,10 @@ u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
prdrfparm = kmalloc(sizeof(*prdrfparm), GFP_ATOMIC);
- if (prdrfparm == NULL) {
+ if (!prdrfparm) {
kfree(ph2c);
return _FAIL;
}
@@ -427,7 +427,7 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL;
INIT_LIST_HEAD(&pcmd->list);
pcmd->cmdcode = _CreateBss_CMD_;
@@ -457,7 +457,7 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL;
/* for hidden ap to set fw_state here */
@@ -587,10 +587,10 @@ u8 r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pdisconnect_cmd = kmalloc(sizeof(*pdisconnect_cmd), GFP_ATOMIC);
- if (pdisconnect_cmd == NULL)
+ if (!pdisconnect_cmd)
return _FAIL;
pdisconnect = kmalloc(sizeof(*pdisconnect), GFP_ATOMIC);
- if (pdisconnect == NULL) {
+ if (!pdisconnect) {
kfree(pdisconnect_cmd);
return _FAIL;
}
@@ -609,10 +609,10 @@ u8 r8712_setopmode_cmd(struct _adapter *padapter,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetop = kmalloc(sizeof(*psetop), GFP_ATOMIC);
- if (psetop == NULL) {
+ if (!psetop) {
kfree(ph2c);
return _FAIL;
}
@@ -633,15 +633,15 @@ u8 r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
struct sta_info *sta = (struct sta_info *)psta;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetstakey_para = kmalloc(sizeof(*psetstakey_para), GFP_ATOMIC);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
return _FAIL;
}
psetstakey_rsp = kmalloc(sizeof(*psetstakey_rsp), GFP_ATOMIC);
- if (psetstakey_rsp == NULL) {
+ if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
return _FAIL;
@@ -673,10 +673,10 @@ u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetrfintfsparm = kmalloc(sizeof(*psetrfintfsparm), GFP_ATOMIC);
- if (psetrfintfsparm == NULL) {
+ if (!psetrfintfsparm) {
kfree(ph2c);
return _FAIL;
}
@@ -695,10 +695,10 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetrttblparm = kmalloc(sizeof(*psetrttblparm), GFP_ATOMIC);
- if (psetrttblparm == NULL) {
+ if (!psetrttblparm) {
kfree(ph2c);
return _FAIL;
}
@@ -716,10 +716,10 @@ u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
struct SetMacAddr_param *psetMacAddr_para;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetMacAddr_para = kmalloc(sizeof(*psetMacAddr_para), GFP_ATOMIC);
- if (psetMacAddr_para == NULL) {
+ if (!psetMacAddr_para) {
kfree(ph2c);
return _FAIL;
}
@@ -738,15 +738,15 @@ u8 r8712_setassocsta_cmd(struct _adapter *padapter, u8 *mac_addr)
struct set_assocsta_rsp *psetassocsta_rsp = NULL;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetassocsta_para = kmalloc(sizeof(*psetassocsta_para), GFP_ATOMIC);
- if (psetassocsta_para == NULL) {
+ if (!psetassocsta_para) {
kfree(ph2c);
return _FAIL;
}
psetassocsta_rsp = kmalloc(sizeof(*psetassocsta_rsp), GFP_ATOMIC);
- if (psetassocsta_rsp == NULL) {
+ if (!psetassocsta_rsp) {
kfree(ph2c);
kfree(psetassocsta_para);
return _FAIL;
@@ -766,10 +766,10 @@ u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid)
struct addBaReq_parm *paddbareq_parm;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
paddbareq_parm = kmalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
- if (paddbareq_parm == NULL) {
+ if (!paddbareq_parm) {
kfree(ph2c);
return _FAIL;
}
@@ -787,10 +787,10 @@ u8 r8712_wdg_wk_cmd(struct _adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pdrvintcmd_param = kmalloc(sizeof(*pdrvintcmd_param), GFP_ATOMIC);
- if (pdrvintcmd_param == NULL) {
+ if (!pdrvintcmd_param) {
kfree(ph2c);
return _FAIL;
}
@@ -961,10 +961,10 @@ u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
param = kzalloc(sizeof(*param), GFP_ATOMIC);
- if (param == NULL) {
+ if (!param) {
kfree(ph2c);
return _FAIL;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 1b9e24900..e205adf24 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -399,7 +399,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC);
- if (pwep == NULL)
+ if (!pwep)
return -ENOMEM;
pwep->KeyLength = wep_key_len;
pwep->Length = wep_key_len +
@@ -1060,8 +1060,8 @@ static int r8711_wx_set_wap(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned))
break;
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_bssid = pnetwork->network.MacAddress;
if (!memcmp(dst_bssid, temp->sa_data, ETH_ALEN)) {
@@ -1216,7 +1216,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
ret = -E2BIG;
break;
}
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
ev = translate_scan(padapter, a, pnetwork, ev, stop);
plist = plist->next;
}
@@ -1271,8 +1271,8 @@ static int r8711_wx_set_essid(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned))
break;
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_ssid = pnetwork->network.Ssid.Ssid;
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength))
@@ -1793,7 +1793,7 @@ static int r871x_wx_set_enc_ext(struct net_device *dev,
param_len = sizeof(struct ieee_param) + pext->key_len;
param = kzalloc(param_len, GFP_ATOMIC);
- if (param == NULL)
+ if (!param)
return -ENOMEM;
param->cmd = IEEE_CMD_SET_ENCRYPTION;
eth_broadcast_addr(param->sta_addr);
@@ -1986,7 +1986,7 @@ static int r871x_get_ap_info(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, plist))
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!mac_pton(data, bssid)) {
netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
(u8 *)data);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index f772675ae..56760cda8 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -34,12 +34,6 @@
#include "usb_osintf.h"
#include "usb_ops.h"
-#define IS_MAC_ADDRESS_BROADCAST(addr) \
-( \
- ((addr[0] == 0xff) && (addr[1] == 0xff) && \
- (addr[2] == 0xff) && (addr[3] == 0xff) && \
- (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
-)
static u8 validate_ssid(struct ndis_802_11_ssid *ssid)
{
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 62d4ae85a..772bf9fa9 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -155,7 +155,7 @@ static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue,
phead = &scanned_queue->queue;
plist = phead->next;
while (plist != phead) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
plist = plist->next;
if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
break;
@@ -176,7 +176,7 @@ static void _free_network_queue(struct _adapter *padapter)
phead = &scanned_queue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
plist = plist->next;
_free_network(pmlmepriv, pnetwork);
}
@@ -304,7 +304,7 @@ struct wlan_network *r8712_get_oldest_wlan_network(
while (1) {
if (end_of_queue_search(phead, plist) == true)
break;
- pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pwlan = container_of(plist, struct wlan_network, list);
if (pwlan->fixed != true) {
if (oldest == NULL ||
time_after((unsigned long)oldest->last_scanned,
@@ -390,7 +390,7 @@ static void update_scanned_network(struct _adapter *adapter,
if (end_of_queue_search(phead, plist))
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (is_same_network(&pnetwork->network, target))
break;
if ((oldest == ((struct wlan_network *)0)) ||
@@ -1135,8 +1135,8 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
}
return _FAIL;
}
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
if (pnetwork == NULL)
return _FAIL;
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
@@ -1205,7 +1205,7 @@ sint r8712_set_auth(struct _adapter *adapter,
return _FAIL;
psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC);
- if (psetauthparm == NULL) {
+ if (!psetauthparm) {
kfree(pcmd);
return _FAIL;
}
@@ -1234,7 +1234,7 @@ sint r8712_set_key(struct _adapter *adapter,
if (!pcmd)
return _FAIL;
psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_ATOMIC);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
ret = _FAIL;
goto err_free_cmd;
}
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 616ca3965..23c143890 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -142,7 +142,7 @@ void r8712_free_recvframe_queue(struct __queue *pframequeue,
phead = &pframequeue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ precvframe = container_of(plist, union recv_frame, u.list);
plist = plist->next;
r8712_free_recvframe(precvframe, pfree_recv_queue);
}
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index e90c00de7..e11ce2896 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -216,8 +216,8 @@ void r8712_free_all_stainfo(struct _adapter *padapter)
phead = &(pstapriv->sta_hash[index]);
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist,
- struct sta_info, hash_list);
+ psta = container_of(plist,
+ struct sta_info, hash_list);
plist = plist->next;
if (pbcmc_stainfo != psta)
r8712_free_stainfo(padapter, psta);
@@ -241,7 +241,7 @@ struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
phead = &(pstapriv->sta_hash[index]);
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
if ((!memcmp(psta->hwaddr, hwaddr, ETH_ALEN))) {
/* if found the matched address */
break;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index c6d952f5d..99256baaf 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -848,7 +848,7 @@ void r8712_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
phead = &pframequeue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxmitframe = container_of(plist, struct xmit_frame, list);
plist = plist->next;
r8712_free_xmitframe(pxmitpriv, pxmitframe);
}
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 454cdf6c7..6f1234570 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -504,7 +504,7 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
u8 *palloc_buf, *pIo_buf;
palloc_buf = kmalloc((u32)len + 16, GFP_ATOMIC);
- if (palloc_buf == NULL)
+ if (!palloc_buf)
return -ENOMEM;
pIo_buf = palloc_buf + 16 - ((addr_t)(palloc_buf) & 0x0f);
if (requesttype == 0x01) {
diff --git a/drivers/staging/rtl8723au/Kconfig b/drivers/staging/rtl8723au/Kconfig
index 435f3594d..277c1ab69 100644
--- a/drivers/staging/rtl8723au/Kconfig
+++ b/drivers/staging/rtl8723au/Kconfig
@@ -1,5 +1,5 @@
config R8723AU
- tristate "Realtek RTL8723AU Wireless LAN NIC driver"
+ tristate "Realtek RTL8723AU Wireless LAN NIC driver (deprecated)"
depends on USB && WLAN && RFKILL
select WIRELESS_EXT
select WEXT_PRIV
@@ -7,7 +7,10 @@ config R8723AU
default n
---help---
This option adds the Realtek RTL8723AU USB device such as found in
- the Lenovo Yogi 13 tablet. If built as a module, it will be called r8723au.
+ the Lenovo Yoga 13 tablet. If built as a module, it will be called r8723au.
+
+ Note: This driver is deprecated and scheduled to be removed in a
+ future kernel release. Please use rtl8xxxu instead.
if R8723AU
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index f68e27702..aad686da3 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -1719,7 +1719,8 @@ void stop_ap_mode23a(struct rtw_adapter *padapter)
}
spin_unlock_bh(&pacl_node_q->lock);
- DBG_8723A("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
+ DBG_8723A("%s, free acl_node_queue, num =%d\n",
+ __func__, pacl_list->num);
rtw_sta_flush23a(padapter);
diff --git a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
index f4fff385a..7dd1540eb 100644
--- a/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723au/core/rtw_mlme_ext.c
@@ -2113,10 +2113,10 @@ static int on_action_public23a(struct rtw_adapter *padapter,
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
if (cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pframe,
skb->len, 0))
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 989ed0726..150dabc2a 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -211,31 +211,6 @@ u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter)
return cnt;
}
-int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue)
-{
- spin_lock_bh(&queue->lock);
-
- list_del_init(&precvbuf->list);
- list_add(&precvbuf->list, get_list_head(queue));
-
- spin_unlock_bh(&queue->lock);
-
- return _SUCCESS;
-}
-
-int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue)
-{
- unsigned long irqL;
-
- spin_lock_irqsave(&queue->lock, irqL);
-
- list_del_init(&precvbuf->list);
-
- list_add_tail(&precvbuf->list, get_list_head(queue));
- spin_unlock_irqrestore(&queue->lock, irqL);
- return _SUCCESS;
-}
-
struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue)
{
unsigned long irqL;
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
index cc2b84be9..694cf17f8 100644
--- a/drivers/staging/rtl8723au/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
@@ -304,21 +304,11 @@ inline void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch)
adapter_to_dvobj(adapter)->oper_channel = ch;
}
-inline u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter)
-{
- return adapter_to_dvobj(adapter)->oper_bwmode;
-}
-
inline void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw)
{
adapter_to_dvobj(adapter)->oper_bwmode = bw;
}
-inline u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter)
-{
- return adapter_to_dvobj(adapter)->oper_ch_offset;
-}
-
inline void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset)
{
adapter_to_dvobj(adapter)->oper_ch_offset = offset;
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index 8221320d4..ba32ade5e 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -1175,8 +1175,6 @@ int InitLLTTable23a(struct rtw_adapter *padapter, u32 boundary)
/* Let last entry point to the start entry of ring buffer */
status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
- if (status != _SUCCESS)
- return status;
return status;
}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
index ce0d8d894..24c0ff3d8 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
@@ -465,7 +465,7 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
break;
}
- /*----Restore RFENV control type----*/;
+ /*----Restore RFENV control type----*/
switch (eRFPath) {
case RF_PATH_A:
PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
diff --git a/drivers/staging/rtl8723au/include/ieee80211.h b/drivers/staging/rtl8723au/include/ieee80211.h
index 3aa40a325..634102e1b 100644
--- a/drivers/staging/rtl8723au/include/ieee80211.h
+++ b/drivers/staging/rtl8723au/include/ieee80211.h
@@ -266,7 +266,7 @@ join_res:
/* Represent channel details, subset of ieee80211_channel */
struct rtw_ieee80211_channel {
- /* enum ieee80211_band band; */
+ /* enum nl80211_band band; */
/* u16 center_freq; */
u16 hw_value;
u32 flags;
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
index ea2a6c914..0e7d3da91 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
@@ -461,9 +461,7 @@ void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen);
u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter);
void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch);
-u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter);
void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw);
-u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter);
void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset);
void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
diff --git a/drivers/staging/rtl8723au/include/rtw_recv.h b/drivers/staging/rtl8723au/include/rtw_recv.h
index dc784be3d..85a5edb45 100644
--- a/drivers/staging/rtl8723au/include/rtw_recv.h
+++ b/drivers/staging/rtl8723au/include/rtw_recv.h
@@ -279,8 +279,6 @@ int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *qu
u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter);
-int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue);
-int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue);
struct recv_buf *rtw_dequeue_recvbuf23a(struct rtw_queue *queue);
void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext);
diff --git a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
index 12d18440e..0da559d92 100644
--- a/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723au/os_dep/ioctl_cfg80211.c
@@ -39,7 +39,7 @@ static const u32 rtw_cipher_suites[] = {
}
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -48,7 +48,7 @@ static const u32 rtw_cipher_suites[] = {
}
#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -143,15 +143,15 @@ static void rtw_5g_rates_init(struct ieee80211_rate *rates)
}
static struct ieee80211_supported_band *
-rtw_spt_band_alloc(enum ieee80211_band band)
+rtw_spt_band_alloc(enum nl80211_band band)
{
struct ieee80211_supported_band *spt_band = NULL;
int n_channels, n_bitrates;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
n_channels = RTW_2G_CHANNELS_NUM;
n_bitrates = RTW_G_RATES_NUM;
- } else if (band == IEEE80211_BAND_5GHZ) {
+ } else if (band == NL80211_BAND_5GHZ) {
n_channels = RTW_5G_CHANNELS_NUM;
n_bitrates = RTW_A_RATES_NUM;
} else {
@@ -176,10 +176,10 @@ rtw_spt_band_alloc(enum ieee80211_band band)
spt_band->n_channels = n_channels;
spt_band->n_bitrates = n_bitrates;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
rtw_2g_channels_init(spt_band->channels);
rtw_2g_rates_init(spt_band->bitrates);
- } else if (band == IEEE80211_BAND_5GHZ) {
+ } else if (band == NL80211_BAND_5GHZ) {
rtw_5g_channels_init(spt_band->channels);
rtw_5g_rates_init(spt_band->bitrates);
}
@@ -257,10 +257,10 @@ static int rtw_cfg80211_inform_bss(struct rtw_adapter *padapter,
channel = pnetwork->network.DSConfig;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -322,11 +322,11 @@ void rtw_cfg80211_indicate_connect(struct rtw_adapter *padapter)
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq =
ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq =
ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2360,10 +2360,10 @@ void rtw_cfg80211_indicate_sta_assoc(struct rtw_adapter *padapter,
channel = pmlmeext->cur_channel;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
cfg80211_rx_mgmt(padapter->rtw_wdev, freq, 0, pmgmt_frame, frame_len,
0);
@@ -2392,10 +2392,10 @@ void rtw_cfg80211_indicate_sta_disassoc(struct rtw_adapter *padapter,
channel = pmlmeext->cur_channel;
if (channel <= RTW_CH_MAX_2G_CHANNEL)
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
else
freq = ieee80211_channel_to_frequency(channel,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
mgmt.frame_control =
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH);
@@ -3109,7 +3109,7 @@ static struct cfg80211_ops rtw_cfg80211_ops = {
};
static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
- enum ieee80211_band band, u8 rf_type)
+ enum nl80211_band band, u8 rf_type)
{
#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
@@ -3133,7 +3133,7 @@ static void rtw_cfg80211_init_ht_capab(struct ieee80211_sta_ht_cap *ht_cap,
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
/*
- *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ *hw->wiphy->bands[NL80211_BAND_2GHZ]
*base on ant_num
*rx_mask: RX mask
*if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -3173,19 +3173,19 @@ void rtw_cfg80211_init_wiphy(struct rtw_adapter *padapter)
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
{
- bands = wiphy->bands[IEEE80211_BAND_2GHZ];
+ bands = wiphy->bands[NL80211_BAND_2GHZ];
if (bands)
rtw_cfg80211_init_ht_capab(&bands->ht_cap,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
rf_type);
}
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
{
- bands = wiphy->bands[IEEE80211_BAND_5GHZ];
+ bands = wiphy->bands[NL80211_BAND_5GHZ];
if (bands)
rtw_cfg80211_init_ht_capab(&bands->ht_cap,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
rf_type);
}
}
@@ -3224,11 +3224,11 @@ static void rtw_cfg80211_preinit_wiphy(struct rtw_adapter *padapter,
wiphy->n_cipher_suites = ARRAY_SIZE(rtw_cipher_suites);
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11G) */
- wiphy->bands[IEEE80211_BAND_2GHZ] =
- rtw_spt_band_alloc(IEEE80211_BAND_2GHZ);
+ wiphy->bands[NL80211_BAND_2GHZ] =
+ rtw_spt_band_alloc(NL80211_BAND_2GHZ);
/* if (padapter->registrypriv.wireless_mode & WIRELESS_11A) */
- wiphy->bands[IEEE80211_BAND_5GHZ] =
- rtw_spt_band_alloc(IEEE80211_BAND_5GHZ);
+ wiphy->bands[NL80211_BAND_5GHZ] =
+ rtw_spt_band_alloc(NL80211_BAND_5GHZ);
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
wiphy->flags |= WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAVE_AP_SME;
@@ -3313,8 +3313,8 @@ void rtw_wdev_free(struct wireless_dev *wdev)
if (!wdev)
return;
- kfree(wdev->wiphy->bands[IEEE80211_BAND_2GHZ]);
- kfree(wdev->wiphy->bands[IEEE80211_BAND_5GHZ]);
+ kfree(wdev->wiphy->bands[NL80211_BAND_2GHZ]);
+ kfree(wdev->wiphy->bands[NL80211_BAND_5GHZ]);
wiphy_free(wdev->wiphy);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 27b3a5b7d..cf83efffb 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -532,6 +532,7 @@ static int rtw_drv_init(struct usb_interface *pusb_intf,
{
struct rtw_adapter *if1 = NULL;
struct dvobj_priv *dvobj;
+ struct usb_device *udev;
int status = _FAIL;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_drv_init\n");
@@ -544,6 +545,10 @@ static int rtw_drv_init(struct usb_interface *pusb_intf,
goto exit;
}
+ udev = dvobj->pusbdev;
+ dev_warn(&udev->dev, "WARNING: The rtl8723au driver is deprecated!");
+ dev_warn(&udev->dev, "Please use the rtl8xxxu driver for this device!");
+
if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
if (!if1) {
DBG_8723A("rtw_init_primary_adapter Failed!\n");
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index a780185a3..0f0cd4a03 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -49,7 +49,7 @@ static int ms_parse_err_code(struct rtsx_chip *chip)
}
static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
- u8 tpc, u8 cnt, u8 cfg)
+ u8 tpc, u8 cnt, u8 cfg)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
@@ -2691,7 +2691,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
}
if ((log_blk < ms_start_idx[seg_no]) ||
- (log_blk >= ms_start_idx[seg_no+1])) {
+ (log_blk >= ms_start_idx[seg_no + 1])) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
@@ -3836,7 +3836,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
start_page = (u8)(start_sector & ms_card->page_off);
for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) {
- if (log_blk < ms_start_idx[seg_no+1])
+ if (log_blk < ms_start_idx[seg_no + 1])
break;
}
@@ -4264,7 +4264,7 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
memset(buf1, 0, 32);
rtsx_stor_get_xfer_buf(buf2, min_t(int, 12, scsi_bufflen(srb)), srb);
for (i = 0; i < 8; i++)
- buf1[8+i] = buf2[4+i];
+ buf1[8 + i] = buf2[4 + i];
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
buf1, 32);
@@ -4399,10 +4399,10 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
for (i = 0; i < 8; i++)
- buf[i] = buf[4+i];
+ buf[i] = buf[4 + i];
for (i = 0; i < 24; i++)
- buf[8+i] = 0;
+ buf[8 + i] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA,
32, WAIT_INT, buf, 32);
@@ -4511,10 +4511,10 @@ int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
for (i = 0; i < 8; i++)
- buf[i] = buf[4+i];
+ buf[i] = buf[4 + i];
for (i = 0; i < 24; i++)
- buf[8+i] = 0;
+ buf[8 + i] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
buf, 32);
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 437436f5d..231833a30 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -628,11 +628,6 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
}
-static inline u8 double_depth(u8 depth)
-{
- return (depth > 1) ? (depth - 1) : depth;
-}
-
int switch_ssc_clock(struct rtsx_chip *chip, int clk)
{
int retval;
@@ -1184,22 +1179,6 @@ int check_card_wp(struct rtsx_chip *chip, unsigned int lun)
return 0;
}
-int check_card_fail(struct rtsx_chip *chip, unsigned int lun)
-{
- if (chip->card_fail & chip->lun2card[lun])
- return 1;
-
- return 0;
-}
-
-int check_card_ejected(struct rtsx_chip *chip, unsigned int lun)
-{
- if (chip->card_ejected & chip->lun2card[lun])
- return 1;
-
- return 0;
-}
-
u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun)
{
if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD)
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index 8f2cf9a4e..56df9a431 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1024,8 +1024,6 @@ int detect_card_cd(struct rtsx_chip *chip, int card);
int check_card_exist(struct rtsx_chip *chip, unsigned int lun);
int check_card_ready(struct rtsx_chip *chip, unsigned int lun);
int check_card_wp(struct rtsx_chip *chip, unsigned int lun);
-int check_card_fail(struct rtsx_chip *chip, unsigned int lun);
-int check_card_ejected(struct rtsx_chip *chip, unsigned int lun);
void eject_card(struct rtsx_chip *chip, unsigned int lun);
u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index c0ce659a5..bcc4b666d 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -43,14 +43,6 @@ static void rtsx_calibration(struct rtsx_chip *chip)
rtsx_write_phy_register(chip, 0x00, 0x0288);
}
-void rtsx_disable_card_int(struct rtsx_chip *chip)
-{
- u32 reg = rtsx_readl(chip, RTSX_BIER);
-
- reg &= ~(XD_INT_EN | SD_INT_EN | MS_INT_EN);
- rtsx_writel(chip, RTSX_BIER, reg);
-}
-
void rtsx_enable_card_int(struct rtsx_chip *chip)
{
u32 reg = rtsx_readl(chip, RTSX_BIER);
@@ -1447,12 +1439,6 @@ delink_stage:
rtsx_delink_stage(chip);
}
-void rtsx_undo_delink(struct rtsx_chip *chip)
-{
- chip->auto_delink_allowed = 0;
- rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x00);
-}
-
/**
* rtsx_stop_cmd - stop command transfer and DMA transfer
* @chip: Realtek's card reader chip
@@ -2000,27 +1986,6 @@ int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
return STATUS_SUCCESS;
}
-int rtsx_check_link_ready(struct rtsx_chip *chip)
-{
- int retval;
- u8 val;
-
- retval = rtsx_read_register(chip, IRQSTAT0, &val);
- if (retval) {
- rtsx_trace(chip);
- return retval;
- }
-
- dev_dbg(rtsx_dev(chip), "IRQSTAT0: 0x%x\n", val);
- if (val & LINK_RDY_INT) {
- dev_dbg(rtsx_dev(chip), "Delinked!\n");
- rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
- return STATUS_FAIL;
- }
-
- return STATUS_SUCCESS;
-}
-
static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate)
{
u32 ultmp;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index c295b1eed..c08164f32 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -950,7 +950,6 @@ do { \
int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl);
int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl);
-void rtsx_disable_card_int(struct rtsx_chip *chip);
void rtsx_enable_card_int(struct rtsx_chip *chip);
void rtsx_enable_bus_int(struct rtsx_chip *chip);
void rtsx_disable_bus_int(struct rtsx_chip *chip);
@@ -958,7 +957,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip);
int rtsx_init_chip(struct rtsx_chip *chip);
void rtsx_release_chip(struct rtsx_chip *chip);
void rtsx_polling_func(struct rtsx_chip *chip);
-void rtsx_undo_delink(struct rtsx_chip *chip);
void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
@@ -975,7 +973,6 @@ int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val);
int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
-int rtsx_check_link_ready(struct rtsx_chip *chip);
void rtsx_enter_ss(struct rtsx_chip *chip);
void rtsx_exit_ss(struct rtsx_chip *chip);
int rtsx_pre_handle_interrupt(struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 87d697623..6219e0475 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -1928,9 +1928,9 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
tuning_cmd = sd_sdr_tuning_rx_cmd;
} else {
- if (CHK_MMC_DDR52(sd_card))
+ if (CHK_MMC_DDR52(sd_card)) {
tuning_cmd = mmc_ddr_tunning_rx_cmd;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2054,9 +2054,9 @@ static int sd_tuning_tx(struct rtsx_chip *chip)
tuning_cmd = sd_sdr_tuning_tx_cmd;
} else {
- if (CHK_MMC_DDR52(sd_card))
+ if (CHK_MMC_DDR52(sd_card)) {
tuning_cmd = sd_ddr_tuning_tx_cmd;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2678,9 +2678,9 @@ RTY_SD_RST:
}
j++;
- if (j < 3)
+ if (j < 3) {
goto RTY_SD_RST;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2690,9 +2690,9 @@ RTY_SD_RST:
SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
k++;
- if (k < 3)
+ if (k < 3) {
goto RTY_SD_RST;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index 36f849fbb..cab26e736 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -165,7 +165,6 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg,
break;
}
return ret;
-
}
int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
@@ -210,7 +209,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
/* internal sanity check: there IS a partial byte in the buffer! */
skein_assert(length != 0);
/* partial byte bit mask */
- mask = (u8) (1u << (7 - (msg_bit_cnt & 7)));
+ mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
/* apply bit padding on final byte (in the buffer) */
up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask);
diff --git a/drivers/staging/skein/skein_base.c b/drivers/staging/skein/skein_base.c
index 25a01ca76..c24a57396 100644
--- a/drivers/staging/skein/skein_base.c
+++ b/drivers/staging/skein/skein_base.c
@@ -58,7 +58,7 @@ int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -98,7 +98,7 @@ int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -171,7 +171,7 @@ int skein_256_update(struct skein_256_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_256_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_256_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_256_BLOCK_BYTES;
skein_256_process_block(ctx, msg, n,
SKEIN_256_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_256_BLOCK_BYTES;
@@ -205,7 +205,7 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -219,19 +219,19 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_256_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
if (n >= SKEIN_256_BLOCK_BYTES)
n = SKEIN_256_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -282,7 +282,7 @@ int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -326,7 +326,7 @@ int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -398,7 +398,7 @@ int skein_512_update(struct skein_512_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_512_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_512_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_512_BLOCK_BYTES;
skein_512_process_block(ctx, msg, n,
SKEIN_512_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_512_BLOCK_BYTES;
@@ -432,7 +432,7 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -446,19 +446,19 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_512_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
if (n >= SKEIN_512_BLOCK_BYTES)
n = SKEIN_512_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -506,7 +506,7 @@ int skein_1024_init(struct skein_1024_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -547,7 +547,7 @@ int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -620,7 +620,7 @@ int skein_1024_update(struct skein_1024_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_1024_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_1024_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_1024_BLOCK_BYTES;
skein_1024_process_block(ctx, msg, n,
SKEIN_1024_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_1024_BLOCK_BYTES;
@@ -654,7 +654,7 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -668,19 +668,19 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_1024_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
if (n >= SKEIN_1024_BLOCK_BYTES)
n = SKEIN_1024_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -702,7 +702,7 @@ int skein_256_final_pad(struct skein_256_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -724,7 +724,7 @@ int skein_512_final_pad(struct skein_512_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -746,7 +746,7 @@ int skein_1024_final_pad(struct skein_1024_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -775,19 +775,19 @@ int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_256_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
if (n >= SKEIN_256_BLOCK_BYTES)
n = SKEIN_256_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -812,19 +812,19 @@ int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_512_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
if (n >= SKEIN_512_BLOCK_BYTES)
n = SKEIN_512_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -849,19 +849,19 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_1024_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
if (n >= SKEIN_1024_BLOCK_BYTES)
n = SKEIN_1024_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
diff --git a/drivers/staging/skein/skein_base.h b/drivers/staging/skein/skein_base.h
index 3c7f8ad36..dc464f334 100644
--- a/drivers/staging/skein/skein_base.h
+++ b/drivers/staging/skein/skein_base.h
@@ -32,7 +32,7 @@
/* below two prototype assume we are handed aligned data */
#define skein_put64_lsb_first(dst08, src64, b_cnt) memcpy(dst08, src64, b_cnt)
#define skein_get64_lsb_first(dst64, src08, w_cnt) \
- memcpy(dst64, src08, 8*(w_cnt))
+ memcpy(dst64, src08, 8 * (w_cnt))
#define skein_swap64(w64) (w64)
enum {
@@ -48,17 +48,17 @@ enum {
#define SKEIN_1024_STATE_WORDS 16
#define SKEIN_MAX_STATE_WORDS 16
-#define SKEIN_256_STATE_BYTES (8*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_STATE_BYTES (8*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_STATE_BYTES (8*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_STATE_BYTES (8 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_STATE_BYTES (8 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_STATE_BYTES (8 * SKEIN_1024_STATE_WORDS)
-#define SKEIN_256_STATE_BITS (64*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_STATE_BITS (64*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_STATE_BITS (64*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_STATE_BITS (64 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_STATE_BITS (64 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_STATE_BITS (64 * SKEIN_1024_STATE_WORDS)
-#define SKEIN_256_BLOCK_BYTES (8*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_BLOCK_BYTES (8*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_BLOCK_BYTES (8*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_BLOCK_BYTES (8 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_BLOCK_BYTES (8 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_BLOCK_BYTES (8 * SKEIN_1024_STATE_WORDS)
struct skein_ctx_hdr {
size_t hash_bit_len; /* size of hash result, in bits */
@@ -84,11 +84,6 @@ struct skein_1024_ctx { /* 1024-bit Skein hash context structure */
u8 b[SKEIN_1024_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
};
-static inline u64 rotl_64(u64 x, u8 N)
-{
- return (x << N) | (x >> (64 - N));
-}
-
/* Skein APIs for (incremental) "straight hashing" */
int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len);
int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len);
@@ -162,13 +157,13 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127) /* 127 final blk flag */
/* tweak word tweak[1]: flag bit definition(s) */
-#define SKEIN_T1_FLAG_FIRST (((u64) 1) << SKEIN_T1_POS_FIRST)
-#define SKEIN_T1_FLAG_FINAL (((u64) 1) << SKEIN_T1_POS_FINAL)
-#define SKEIN_T1_FLAG_BIT_PAD (((u64) 1) << SKEIN_T1_POS_BIT_PAD)
+#define SKEIN_T1_FLAG_FIRST (((u64)1) << SKEIN_T1_POS_FIRST)
+#define SKEIN_T1_FLAG_FINAL (((u64)1) << SKEIN_T1_POS_FINAL)
+#define SKEIN_T1_FLAG_BIT_PAD (((u64)1) << SKEIN_T1_POS_BIT_PAD)
/* tweak word tweak[1]: tree level bit field mask */
#define SKEIN_T1_TREE_LVL_MASK (((u64)0x7F) << SKEIN_T1_POS_TREE_LVL)
-#define SKEIN_T1_TREE_LEVEL(n) (((u64) (n)) << SKEIN_T1_POS_TREE_LVL)
+#define SKEIN_T1_TREE_LEVEL(n) (((u64)(n)) << SKEIN_T1_POS_TREE_LVL)
/* tweak word tweak[1]: block type field */
#define SKEIN_BLK_TYPE_KEY (0) /* key, for MAC and KDF */
@@ -181,7 +176,7 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_BLK_TYPE_OUT (63) /* output stage */
#define SKEIN_BLK_TYPE_MASK (63) /* bit field mask */
-#define SKEIN_T1_BLK_TYPE(T) (((u64) (SKEIN_BLK_TYPE_##T)) << \
+#define SKEIN_T1_BLK_TYPE(T) (((u64)(SKEIN_BLK_TYPE_##T)) << \
SKEIN_T1_POS_BLK_TYPE)
#define SKEIN_T1_BLK_TYPE_KEY SKEIN_T1_BLK_TYPE(KEY) /* for MAC and KDF */
#define SKEIN_T1_BLK_TYPE_CFG SKEIN_T1_BLK_TYPE(CFG) /* config block */
@@ -204,11 +199,11 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_ID_STRING_LE (0x33414853) /* "SHA3" (little-endian)*/
#endif
-#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64) (hi32)) << 32))
+#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64)(hi32)) << 32))
#define SKEIN_SCHEMA_VER SKEIN_MK_64(SKEIN_VERSION, SKEIN_ID_STRING_LE)
#define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA, 0xA9FC1A22)
-#define SKEIN_CFG_STR_LEN (4*8)
+#define SKEIN_CFG_STR_LEN (4 * 8)
/* bit field definitions in config block tree_info word */
#define SKEIN_CFG_TREE_LEAF_SIZE_POS (0)
@@ -327,9 +322,9 @@ enum {
#define SKEIN_512_ROUNDS_TOTAL (72)
#define SKEIN_1024_ROUNDS_TOTAL (80)
#else /* allow command-line define in range 8*(5..14) */
-#define SKEIN_256_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS/100) + 5) % 10) + 5))
-#define SKEIN_512_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS/10) + 5) % 10) + 5))
-#define SKEIN_1024_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS) + 5) % 10) + 5))
+#define SKEIN_256_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 100) + 5) % 10) + 5))
+#define SKEIN_512_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 10) + 5) % 10) + 5))
+#define SKEIN_1024_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS) + 5) % 10) + 5))
#endif
#endif /* ifndef _SKEIN_H_ */
diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c
index 45b47327e..59a0a8a82 100644
--- a/drivers/staging/skein/skein_block.c
+++ b/drivers/staging/skein/skein_block.c
@@ -15,6 +15,7 @@
************************************************************************/
#include <linux/string.h>
+#include <linux/bitops.h>
#include "skein_base.h"
#include "skein_block.h"
@@ -59,10 +60,10 @@
#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
} while (0)
@@ -120,10 +121,10 @@
#if !(SKEIN_USE_ASM & 512)
#undef RCNT
-#define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
+#define RCNT (SKEIN_512_ROUNDS_TOTAL / 8)
#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
+#define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10)
#else
#define SKEIN_UNROLL_512 (0)
#endif
@@ -136,15 +137,16 @@
#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
X##p4 += X##p5; \
- X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 = rol64(X##p5, ROT##_2); \
X##p5 ^= X##p4; \
- X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3);\
+ X##p6 += X##p7; \
+ X##p7 = rol64(X##p7, ROT##_3); \
X##p7 ^= X##p6; \
} while (0)
@@ -200,7 +202,7 @@
} while (0)
#define R512_UNROLL_R(NN) \
((SKEIN_UNROLL_512 == 0 && \
- SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
+ SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \
(SKEIN_UNROLL_512 > (NN)))
#if (SKEIN_UNROLL_512 > 14)
@@ -210,7 +212,7 @@
#if !(SKEIN_USE_ASM & 1024)
#undef RCNT
-#define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
+#define RCNT (SKEIN_1024_ROUNDS_TOTAL / 8)
#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
#define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10)
#else
@@ -226,28 +228,28 @@
pF, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
X##p4 += X##p5; \
- X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 = rol64(X##p5, ROT##_2); \
X##p5 ^= X##p4; \
X##p6 += X##p7; \
- X##p7 = rotl_64(X##p7, ROT##_3); \
+ X##p7 = rol64(X##p7, ROT##_3); \
X##p7 ^= X##p6; \
X##p8 += X##p9; \
- X##p9 = rotl_64(X##p9, ROT##_4); \
+ X##p9 = rol64(X##p9, ROT##_4); \
X##p9 ^= X##p8; \
X##pA += X##pB; \
- X##pB = rotl_64(X##pB, ROT##_5); \
+ X##pB = rol64(X##pB, ROT##_5); \
X##pB ^= X##pA; \
X##pC += X##pD; \
- X##pD = rotl_64(X##pD, ROT##_6); \
+ X##pD = rol64(X##pD, ROT##_6); \
X##pD ^= X##pC; \
X##pE += X##pF; \
- X##pF = rotl_64(X##pF, ROT##_7); \
+ X##pF = rol64(X##pF, ROT##_7); \
X##pF ^= X##pE; \
} while (0)
@@ -311,28 +313,28 @@
#define R1024_8_ROUNDS(R) \
do { \
R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
- 13, 14, 15, R1024_0, 8*(R) + 1); \
+ 13, 14, 15, R1024_0, 8 * (R) + 1); \
R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
- 05, 08, 01, R1024_1, 8*(R) + 2); \
+ 05, 08, 01, R1024_1, 8 * (R) + 2); \
R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
- 11, 10, 09, R1024_2, 8*(R) + 3); \
+ 11, 10, 09, R1024_2, 8 * (R) + 3); \
R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
- 03, 12, 07, R1024_3, 8*(R) + 4); \
- I1024(2*(R)); \
+ 03, 12, 07, R1024_3, 8 * (R) + 4); \
+ I1024(2 * (R)); \
R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
- 13, 14, 15, R1024_4, 8*(R) + 5); \
+ 13, 14, 15, R1024_4, 8 * (R) + 5); \
R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
- 05, 08, 01, R1024_5, 8*(R) + 6); \
+ 05, 08, 01, R1024_5, 8 * (R) + 6); \
R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
- 11, 10, 09, R1024_6, 8*(R) + 7); \
+ 11, 10, 09, R1024_6, 8 * (R) + 7); \
R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
- 03, 12, 07, R1024_7, 8*(R) + 8); \
- I1024(2*(R)+1); \
+ 03, 12, 07, R1024_7, 8 * (R) + 8); \
+ I1024(2 * (R) + 1); \
} while (0)
#define R1024_UNROLL_R(NN) \
((SKEIN_UNROLL_1024 == 0 && \
- SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
+ SKEIN_1024_ROUNDS_TOTAL / 8 > (NN)) || \
(SKEIN_UNROLL_1024 > (NN)))
#if (SKEIN_UNROLL_1024 > 14)
@@ -351,10 +353,10 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
size_t r;
#if SKEIN_UNROLL_256
/* key schedule: chaining vars + tweak + "rot"*/
- u64 kw[WCNT+4+RCNT*2];
+ u64 kw[WCNT + 4 + (RCNT * 2)];
#else
/* key schedule words : chaining vars + tweak */
- u64 kw[WCNT+4];
+ u64 kw[WCNT + 4];
#endif
u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
u64 w[WCNT]; /* local copy of input block */
@@ -460,9 +462,10 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_256_process_block_code_size(void)
{
- return ((u8 *) skein_256_process_block_code_size) -
- ((u8 *) skein_256_process_block);
+ return ((u8 *)skein_256_process_block_code_size) -
+ ((u8 *)skein_256_process_block);
}
+
unsigned int skein_256_unroll_cnt(void)
{
return SKEIN_UNROLL_256;
@@ -480,9 +483,11 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
};
size_t r;
#if SKEIN_UNROLL_512
- u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot"*/
+ /* key sched: chaining vars + tweak + "rot"*/
+ u64 kw[WCNT + 4 + RCNT * 2];
#else
- u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
+ /* key schedule words : chaining vars + tweak */
+ u64 kw[WCNT + 4];
#endif
u64 X0, X1, X2, X3, X4, X5, X6, X7; /* local copies, for speed */
u64 w[WCNT]; /* local copy of input block */
@@ -543,7 +548,6 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
for (r = 1;
r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
-
R512_8_ROUNDS(0);
#if R512_UNROLL_R(1)
@@ -609,9 +613,10 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_512_process_block_code_size(void)
{
- return ((u8 *) skein_512_process_block_code_size) -
- ((u8 *) skein_512_process_block);
+ return ((u8 *)skein_512_process_block_code_size) -
+ ((u8 *)skein_512_process_block);
}
+
unsigned int skein_512_unroll_cnt(void)
{
return SKEIN_UNROLL_512;
@@ -629,9 +634,11 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
};
size_t r;
#if (SKEIN_UNROLL_1024 != 0)
- u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot" */
+ /* key sched: chaining vars + tweak + "rot" */
+ u64 kw[WCNT + 4 + (RCNT * 2)];
#else
- u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
+ /* key schedule words : chaining vars + tweak */
+ u64 kw[WCNT + 4];
#endif
/* local copy of vars, for speed */
@@ -771,9 +778,10 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_1024_process_block_code_size(void)
{
- return ((u8 *) skein_1024_process_block_code_size) -
- ((u8 *) skein_1024_process_block);
+ return ((u8 *)skein_1024_process_block_code_size) -
+ ((u8 *)skein_1024_process_block);
}
+
unsigned int skein_1024_unroll_cnt(void)
{
return SKEIN_UNROLL_1024;
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
index e29b9abaa..11f5e530a 100644
--- a/drivers/staging/skein/skein_generic.c
+++ b/drivers/staging/skein/skein_generic.c
@@ -27,7 +27,7 @@ static int skein256_init(struct shash_desc *desc)
}
static int skein256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc),
data, len);
@@ -62,7 +62,7 @@ static int skein512_init(struct shash_desc *desc)
}
static int skein512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc),
data, len);
@@ -97,7 +97,7 @@ static int skein1024_init(struct shash_desc *desc)
}
static int skein1024_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc),
data, len);
diff --git a/drivers/staging/skein/threefish_api.h b/drivers/staging/skein/threefish_api.h
index 8e0a0b77e..615e46757 100644
--- a/drivers/staging/skein/threefish_api.h
+++ b/drivers/staging/skein/threefish_api.h
@@ -52,7 +52,7 @@ enum threefish_size {
*/
struct threefish_key {
u64 state_size;
- u64 key[SKEIN_MAX_STATE_WORDS+1]; /* max number of key words*/
+ u64 key[SKEIN_MAX_STATE_WORDS + 1]; /* max number of key words*/
u64 tweak[3];
};
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index e19ac4368..a95563fad 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -512,622 +512,622 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= k0 + t1;
b3 -= k1 + 18;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k2;
b1 -= k3 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k4 + t0;
b3 -= k0 + 17;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k1;
b1 -= k2 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k3 + t2;
b3 -= k4 + 16;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k0;
b1 -= k1 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k2 + t1;
b3 -= k3 + 15;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k4;
b1 -= k0 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k1 + t0;
b3 -= k2 + 14;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k3;
b1 -= k4 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k0 + t2;
b3 -= k1 + 13;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k2;
b1 -= k3 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k4 + t1;
b3 -= k0 + 12;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k1;
b1 -= k2 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k3 + t0;
b3 -= k4 + 11;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k0;
b1 -= k1 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k2 + t2;
b3 -= k3 + 10;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k4;
b1 -= k0 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k1 + t1;
b3 -= k2 + 9;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k3;
b1 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k0 + t0;
b3 -= k1 + 8;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k2;
b1 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k4 + t2;
b3 -= k0 + 7;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k1;
b1 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k3 + t1;
b3 -= k4 + 6;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k0;
b1 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k2 + t0;
b3 -= k3 + 5;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k4;
b1 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k1 + t2;
b3 -= k2 + 4;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k3;
b1 -= k4 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k0 + t1;
b3 -= k1 + 3;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k2;
b1 -= k3 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k4 + t0;
b3 -= k0 + 2;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k1;
b1 -= k2 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k3 + t2;
b3 -= k4 + 1;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k0;
b1 -= k1 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k2 + t1;
b3 -= k3;
@@ -2125,1226 +2125,1226 @@ void threefish_decrypt_512(struct threefish_key *key_ctx, u64 *input,
b7 -= k7 + 18;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k5 + t0;
b7 -= k6 + 17;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k3;
b5 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k8;
b1 -= k0;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k4 + t2;
b7 -= k5 + 16;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k2;
b5 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k7;
b1 -= k8;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k3 + t1;
b7 -= k4 + 15;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k1;
b5 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k8;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k6;
b1 -= k7;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k2 + t0;
b7 -= k3 + 14;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k0;
b5 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k5;
b1 -= k6;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k1 + t2;
b7 -= k2 + 13;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k8;
b5 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k4;
b1 -= k5;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k0 + t1;
b7 -= k1 + 12;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k7;
b5 -= k8 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k3;
b1 -= k4;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k8 + t0;
b7 -= k0 + 11;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k6;
b5 -= k7 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k2;
b1 -= k3;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k7 + t2;
b7 -= k8 + 10;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k5;
b5 -= k6 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k1;
b1 -= k2;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k6 + t1;
b7 -= k7 + 9;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k4;
b5 -= k5 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k0;
b1 -= k1;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k5 + t0;
b7 -= k6 + 8;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k3;
b5 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k8;
b1 -= k0;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k4 + t2;
b7 -= k5 + 7;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k2;
b5 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k7;
b1 -= k8;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k3 + t1;
b7 -= k4 + 6;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k1;
b5 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k8;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k6;
b1 -= k7;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k2 + t0;
b7 -= k3 + 5;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k0;
b5 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k5;
b1 -= k6;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k1 + t2;
b7 -= k2 + 4;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k8;
b5 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k4;
b1 -= k5;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k0 + t1;
b7 -= k1 + 3;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k7;
b5 -= k8 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k3;
b1 -= k4;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k8 + t0;
b7 -= k0 + 2;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k6;
b5 -= k7 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k2;
b1 -= k3;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k7 + t2;
b7 -= k8 + 1;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k5;
b5 -= k6 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k1;
b1 -= k2;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k6 + t1;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k4;
b5 -= k5 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k0;
b1 -= k1;
@@ -5521,2722 +5521,2722 @@ void threefish_decrypt_1024(struct threefish_key *key_ctx, u64 *input,
b14 -= k0 + t0;
b15 -= k1 + 20;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k16 + t2;
b15 -= k0 + 19;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k14;
b13 -= k15 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k12;
b11 -= k13;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k10;
b9 -= k11;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k8;
b7 -= k9;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k6;
b5 -= k7;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k2;
b1 -= k3;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k15 + t1;
b15 -= k16 + 18;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k13;
b13 -= k14 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k11;
b11 -= k12;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k9;
b9 -= k10;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k7;
b7 -= k8;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k5;
b5 -= k6;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k1;
b1 -= k2;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k14 + t0;
b15 -= k15 + 17;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k12;
b13 -= k13 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k10;
b11 -= k11;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k8;
b9 -= k9;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k6;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k4;
b5 -= k5;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k0;
b1 -= k1;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k13 + t2;
b15 -= k14 + 16;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k11;
b13 -= k12 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k9;
b11 -= k10;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k7;
b9 -= k8;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k5;
b7 -= k6;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k3;
b5 -= k4;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k16;
b1 -= k0;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k12 + t1;
b15 -= k13 + 15;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k10;
b13 -= k11 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k8;
b11 -= k9;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k6;
b9 -= k7;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k4;
b7 -= k5;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k2;
b5 -= k3;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k15;
b1 -= k16;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k11 + t0;
b15 -= k12 + 14;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k9;
b13 -= k10 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k7;
b11 -= k8;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k5;
b9 -= k6;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k3;
b7 -= k4;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k1;
b5 -= k2;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k16;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k14;
b1 -= k15;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k10 + t2;
b15 -= k11 + 13;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k8;
b13 -= k9 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k6;
b11 -= k7;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k4;
b9 -= k5;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k2;
b7 -= k3;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k0;
b5 -= k1;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k15;
b3 -= k16;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k13;
b1 -= k14;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k9 + t1;
b15 -= k10 + 12;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k7;
b13 -= k8 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k5;
b11 -= k6;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k3;
b9 -= k4;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k1;
b7 -= k2;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k16;
b5 -= k0;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k14;
b3 -= k15;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k12;
b1 -= k13;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k8 + t0;
b15 -= k9 + 11;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k6;
b13 -= k7 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k4;
b11 -= k5;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k2;
b9 -= k3;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k0;
b7 -= k1;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k15;
b5 -= k16;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k13;
b3 -= k14;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k11;
b1 -= k12;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k7 + t2;
b15 -= k8 + 10;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k5;
b13 -= k6 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k3;
b11 -= k4;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k1;
b9 -= k2;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k16;
b7 -= k0;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k14;
b5 -= k15;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k12;
b3 -= k13;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k10;
b1 -= k11;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k6 + t1;
b15 -= k7 + 9;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k4;
b13 -= k5 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k2;
b11 -= k3;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k0;
b9 -= k1;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k15;
b7 -= k16;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k13;
b5 -= k14;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k11;
b3 -= k12;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k9;
b1 -= k10;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k5 + t0;
b15 -= k6 + 8;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k3;
b13 -= k4 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k1;
b11 -= k2;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k16;
b9 -= k0;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k14;
b7 -= k15;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k12;
b5 -= k13;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k10;
b3 -= k11;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k8;
b1 -= k9;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k4 + t2;
b15 -= k5 + 7;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k2;
b13 -= k3 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k0;
b11 -= k1;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k15;
b9 -= k16;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k13;
b7 -= k14;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k11;
b5 -= k12;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k9;
b3 -= k10;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k7;
b1 -= k8;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k3 + t1;
b15 -= k4 + 6;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k1;
b13 -= k2 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k16;
b11 -= k0;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k14;
b9 -= k15;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k12;
b7 -= k13;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k10;
b5 -= k11;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k8;
b3 -= k9;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k6;
b1 -= k7;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k2 + t0;
b15 -= k3 + 5;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k0;
b13 -= k1 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k15;
b11 -= k16;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k13;
b9 -= k14;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k11;
b7 -= k12;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k9;
b5 -= k10;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k5;
b1 -= k6;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k1 + t2;
b15 -= k2 + 4;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k16;
b13 -= k0 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k14;
b11 -= k15;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k12;
b9 -= k13;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k10;
b7 -= k11;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k8;
b5 -= k9;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k4;
b1 -= k5;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k0 + t1;
b15 -= k1 + 3;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k15;
b13 -= k16 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k13;
b11 -= k14;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k11;
b9 -= k12;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k9;
b7 -= k10;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k7;
b5 -= k8;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k3;
b1 -= k4;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k16 + t0;
b15 -= k0 + 2;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k14;
b13 -= k15 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k12;
b11 -= k13;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k10;
b9 -= k11;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k8;
b7 -= k9;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k6;
b5 -= k7;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k2;
b1 -= k3;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k15 + t2;
b15 -= k16 + 1;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k13;
b13 -= k14 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k11;
b11 -= k12;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k9;
b9 -= k10;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k7;
b7 -= k8;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k5;
b5 -= k6;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k1;
b1 -= k2;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k14 + t1;
b15 -= k15;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k12;
b13 -= k13 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k10;
b11 -= k11;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k8;
b9 -= k9;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k6;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k4;
b5 -= k5;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k0;
b1 -= k1;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 3a2094f72..c831ba3ed 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -896,6 +896,7 @@ static void slic_upr_start(struct adapter *adapter)
{
struct slic_upr *upr;
__iomem struct slic_regs *slic_regs = adapter->slic_regs;
+
upr = adapter->upr_list;
if (!upr)
return;
@@ -1142,7 +1143,7 @@ static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
/*
* Compute a checksum of the EEPROM according to RFC 1071.
*/
-static u16 slic_eeprom_cksum(void *eeprom, unsigned len)
+static u16 slic_eeprom_cksum(void *eeprom, unsigned int len)
{
u16 *wp = eeprom;
u32 checksum = 0;
@@ -1853,6 +1854,11 @@ static void slic_xmit_build_request(struct adapter *adapter,
ihcmd->u.slic_buffers.totlen = skb->len;
phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) {
+ kfree_skb(skb);
+ dev_err(&adapter->pcidev->dev, "DMA mapping error\n");
+ return;
+ }
ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
ihcmd->u.slic_buffers.bufs[0].length = skb->len;
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 95f7cae3c..f80ee7766 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -306,7 +306,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
unsigned int input, request;
unsigned int tmpClock, ret;
const int max_OD = 3;
- int max_d;
+ int max_d = 6;
if (getChipType() == SM750LE) {
/* SM750LE don't have prgrammable PLL and M/N values to work on.
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index a22fb0751..97ca4ecca 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -263,7 +263,7 @@ static struct notifier_block vt_notifier_block = {
static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
{
pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
- return (u_char) (scr_readw(pos) >> 8);
+ return (scr_readw(pos) & ~vc->vc_hi_font_mask) >> 8;
}
static void speakup_date(struct vc_data *vc)
@@ -473,8 +473,10 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
w = scr_readw(pos);
c = w & 0xff;
- if (w & vc->vc_hi_font_mask)
+ if (w & vc->vc_hi_font_mask) {
+ w &= ~vc->vc_hi_font_mask;
c |= 0x100;
+ }
ch = inverse_translate(vc, c, 0);
*attribs = (w & 0xff00) >> 8;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 41ef099b7..0149edc1e 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -150,7 +150,7 @@ static void __speakup_paste_selection(struct work_struct *work)
add_wait_queue(&vc->paste_wait, &wait);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty_throttled(tty)) {
schedule();
continue;
}
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index 1b399214e..3ad7ff0bc 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -6,6 +6,7 @@
#ifndef __sparc__
#include <linux/serial.h>
#endif
+#include <linux/serial_core.h>
/*
* this is cut&paste from 8250.h. Get rid of the structure, the definitions
@@ -16,7 +17,7 @@ struct old_serial_port {
unsigned int baud_base;
unsigned int port;
unsigned int irq;
- unsigned int flags; /* unused */
+ upf_t flags; /* unused */
};
/* countdown values for serial timeouts in us */
diff --git a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
index b0498ff32..c2359de17 100644
--- a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
+++ b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
@@ -50,20 +50,6 @@ Description: This field is used to tell s-Par which type of recovery tool
commission the guest.
Users: sparmaintainer@unisys.com
-What: guest/chipsetready
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This entry is used by Unisys application software on the guest
- to acknowledge completion of specific events for integration
- purposes, but these acknowledgements are not required for the
- guest to operate correctly. The interface accepts one of two
- strings: MODULES_LOADED to indicate that the s-Par driver
- modules have been loaded successfully, or CALLHOMEDISK_MOUNTED,
- which indicates that the disk used to support call home services
- has been successfully mounted.
-Users: sparmaintainer@unisys.com
-
What: parahotplug/deviceenabled
Date: 7/18/2014
KernelVersion: TBD
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
index c2d8dd4a2..1146c1cf5 100644
--- a/drivers/staging/unisys/Documentation/overview.txt
+++ b/drivers/staging/unisys/Documentation/overview.txt
@@ -137,12 +137,6 @@ called automatically by the visorbus driver at appropriate times:
* The resume() function is the "book-end" to pause(), and is described above.
-If/when a function driver creates a Linux device (that needs to be accessed
-from usermode), it calls visorbus_registerdevnode(), passing the major and
-minor number of the device. (Of course not all function drivers will need
-to do this.) This simply creates the appropriate "devmajorminor" sysfs entry
-described below, so that a hotplug script can use it to create a device node.
-
2.1.3. sysfs Advertised Information
-----------------------------------
@@ -197,19 +191,6 @@ The following files exist under /sys/devices/visorbus<x>/vbus<x>:dev<y>:
if the appropriate function driver has not
been loaded yet.
- devmajorminor
-
- <devname> if applicable, each file here identifies (via
- ... its file contents) the
- "<major>:<minor>" needed for a device node to
- enable access from usermode. There is exactly
- one file here for each different device node
- that can be accessed (from usermode). Note
- that this info is provided by a particular
- function driver, so these will not exist
- until AFTER the appropriate function driver
- controlling this device class is loaded.
-
channel properties of the device channel (all in
ascii text format)
diff --git a/drivers/staging/unisys/Documentation/proc-entries.txt b/drivers/staging/unisys/Documentation/proc-entries.txt
deleted file mode 100644
index 426f92b1c..000000000
--- a/drivers/staging/unisys/Documentation/proc-entries.txt
+++ /dev/null
@@ -1,93 +0,0 @@
- s-Par Proc Entries
-This document describes the proc entries created by the Unisys s-Par modules.
-
-Support Module Entries
-These entries are provided primarily for debugging.
-
-/proc/uislib/info: This entry contains debugging information for the
-uislib module, including bus information and memory usage.
-
-/proc/visorchipset/controlvm: This directory contains debugging
-entries for the controlvm channel used by visorchipset.
-
-/proc/uislib/platform: This entry is used to display the platform
-number this node is in the system. For some guests, this may be
-invalid.
-
-/proc/visorchipset/chipsetready: This entry is written to by scripts
-to signify that any user level activity has been completed before the
-guest can be considered running and is shown as running in the s-Par
-UI.
-
-Device Entries
-These entries provide status of the devices shared by a service partition.
-
-/proc/uislib/vbus: this is a directory containing entries for each
-virtual bus. Each numbered sub-directory contains an info entry, which
-describes the devices that appear on that bus.
-
-/proc/uislib/cycles_before_wait: This entry is used to tune
-performance, by setting the number of cycles we wait before going idle
-when in polling mode. A longer time will reduce message latency but
-spend more processing time polling.
-
-/proc/uislib/smart_wakeup: This entry is used to tune performance, by
-enabling or disabling smart wakeup.
-
-/proc/virthba/info: This entry contains debugging information for the
-virthba module, including interrupt information and memory usage.
-
-/proc/virthba/enable_ints: This entry controls interrupt use by the
-virthba module. Writing a 0 to this entry will disable interrupts.
-
-/proc/virtnic/info: This entry contains debugging information for the
-virtnic module, including interrupt information, send and receive
-counts, and other device information.
-
-/proc/virtnic/ethX: This is a directory containing entries for each
-virtual NIC. Each named subdirectory contains two entries,
-clientstring and zone.
-
-/proc/virtpci/info: This entry contains debugging information for the
-virtpci module, including virtual PCI bus information and device
-locations.
-
-/proc/virtnic/enable_ints: This entry controls interrupt use by the
-virtnic module. Writing a 0 to this entry will disable interrupts.
-
-Visorconinclient, visordiag, visornoop, visorserialclient, and
-visorvideoclient Entries
-
-The entries in proc for these modules all follow the same
-pattern. Each module has its own proc directory with the same name,
-e.g. visordiag presents a /proc/visordiag directory. Inside of the
-module's directory are a device directory, which contains one numbered
-directory for each device provided by that module. Each device has a
-diag entry that presents the device number and visorbus name for that
-device. The module directory also has a driver/diag entry, which
-reports the corresponding s-Par version number of the driver.
-
-Automated Installation Entries
-
-These entries are used to pass information between the s-Par platform
-and the Linux-based installation and recovery tool. These values are
-read/write, however, the guest can only reset them to 0, or report an
-error status through the installer entry. The values are only set via
-s-Par's firmware interface, to help prevent accidentally booting into
-the tool.
-
-/proc/visorchipset/boottotool: This entry instructs s-Par that the
-next reboot will launch the installation and recovery tool. If set to
-0, the next boot will happen according to the UEFI boot manager
-settings.
-
-/proc/visorchipset/toolaction: This entry indicates the installation
-and recovery tool mode requested for the next boot.
-
-/proc/visorchipset/installer: this entry is used by the installation
-and recovery tool to pass status and result information back to the
-s-Par firmware.
-
-/proc/visorchipset/partition: This directory contains the guest
-partition configuration data for each virtual bus, for use during
-installation and at runtime for s-Par service partitions.
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
index cc46e37e6..1f0425bf3 100644
--- a/drivers/staging/unisys/MAINTAINERS
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -2,5 +2,4 @@ Unisys s-Par drivers
M: David Kershner <sparmaintainer@unisys.com>
S: Maintained
F: Documentation/s-Par/overview.txt
-F: Documentation/s-Par/proc-entries.txt
F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h
index 5af59a5fc..db4e6b287 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/channel.h
@@ -76,9 +76,9 @@ enum channel_clientstate {
};
static inline const u8 *
-ULTRA_CHANNELCLI_STRING(u32 v)
+ULTRA_CHANNELCLI_STRING(u32 state)
{
- switch (v) {
+ switch (state) {
case CHANNELCLI_DETACHED:
return (const u8 *)("DETACHED");
case CHANNELCLI_DISABLED:
@@ -411,7 +411,7 @@ spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
mb(); /* required for channel synch */
}
if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) {
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state;
* come out of it
*/
@@ -459,7 +459,7 @@ spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
mb(); /* required for channel synch */
return 0;
}
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state; come out of it */
pr_info("%s Channel OS client acquire now successful\n", id);
writeb(0, &hdr->cli_error_os);
@@ -472,7 +472,7 @@ spar_channel_client_release_os(void __iomem *ch, u8 *id)
{
struct channel_header __iomem *hdr = ch;
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state; come out of it */
pr_info("%s Channel OS client error state cleared\n", id);
writeb(0, &hdr->cli_error_os);
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 880d9f04c..5ccf81485 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -253,48 +253,6 @@ struct uiscmdrsp_scsi {
/* SCSI device version for no disk inquiry result */
#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
-/* Windows and Linux want different things for a non-existent lun. So, we'll let
- * caller pass in the peripheral qualifier and type.
- * NOTE:[4] SCSI returns (n-4); so we return length-1-4 or length-5.
- */
-
-#define SET_NO_DISK_INQUIRY_RESULT(buf, len, lun, lun0notpresent, notpresent) \
- do { \
- memset(buf, 0, \
- MINNUM(len, \
- (unsigned int)NO_DISK_INQUIRY_RESULT_LEN)); \
- buf[2] = (u8)SCSI_SPC2_VER; \
- if (lun == 0) { \
- buf[0] = (u8)lun0notpresent; \
- buf[3] = (u8)DEV_HISUPPORT; \
- } else \
- buf[0] = (u8)notpresent; \
- buf[4] = (u8)( \
- MINNUM(len, \
- (unsigned int)NO_DISK_INQUIRY_RESULT_LEN) - 5);\
- if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \
- buf[8] = 'D'; \
- buf[9] = 'E'; \
- buf[10] = 'L'; \
- buf[11] = 'L'; \
- buf[16] = 'P'; \
- buf[17] = 'S'; \
- buf[18] = 'E'; \
- buf[19] = 'U'; \
- buf[20] = 'D'; \
- buf[21] = 'O'; \
- buf[22] = ' '; \
- buf[23] = 'D'; \
- buf[24] = 'E'; \
- buf[25] = 'V'; \
- buf[26] = 'I'; \
- buf[27] = 'C'; \
- buf[28] = 'E'; \
- buf[30] = ' '; \
- buf[31] = '.'; \
- } \
- } while (0)
-
/* Struct & Defines to support sense information. */
/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 2a64a9ce0..9baf1ec70 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -61,54 +61,55 @@ struct visor_channeltype_descriptor {
const char *name;
};
-/** Information provided by each visor driver when it registers with the
- * visorbus driver.
+/**
+ * struct visor_driver - Information provided by each visor driver when it
+ * registers with the visorbus driver.
+ * @name: Name of the visor driver.
+ * @version: The numbered version of the driver (x.x.xxx).
+ * @vertag: A human readable version string.
+ * @owner: The module owner.
+ * @channel_types: Types of channels handled by this driver, ending with
+ * a zero GUID. Our specialized BUS.match() method knows
+ * about this list, and uses it to determine whether this
+ * driver will in fact handle a new device that it has
+ * detected.
+ * @probe: Called when a new device comes online, by our probe()
+ * function specified by driver.probe() (triggered
+ * ultimately by some call to driver_register(),
+ * bus_add_driver(), or driver_attach()).
+ * @remove: Called when a new device is removed, by our remove()
+ * function specified by driver.remove() (triggered
+ * ultimately by some call to device_release_driver()).
+ * @channel_interrupt: Called periodically, whenever there is a possiblity
+ * that "something interesting" may have happened to the
+ * channel.
+ * @pause: Called to initiate a change of the device's state. If
+ * the return valu`e is < 0, there was an error and the
+ * state transition will NOT occur. If the return value
+ * is >= 0, then the state transition was INITIATED
+ * successfully, and complete_func() will be called (or
+ * was just called) with the final status when either the
+ * state transition fails or completes successfully.
+ * @resume: Behaves similar to pause.
+ * @driver: Private reference to the device driver. For use by bus
+ * driver only.
+ * @version_attr: Private version field. For use by bus driver only.
*/
struct visor_driver {
const char *name;
const char *version;
const char *vertag;
- const char *build_date;
- const char *build_time;
struct module *owner;
-
- /** Types of channels handled by this driver, ending with 0 GUID.
- * Our specialized BUS.match() method knows about this list, and
- * uses it to determine whether this driver will in fact handle a
- * new device that it has detected.
- */
struct visor_channeltype_descriptor *channel_types;
-
- /** Called when a new device comes online, by our probe() function
- * specified by driver.probe() (triggered ultimately by some call
- * to driver_register() / bus_add_driver() / driver_attach()).
- */
int (*probe)(struct visor_device *dev);
-
- /** Called when a new device is removed, by our remove() function
- * specified by driver.remove() (triggered ultimately by some call
- * to device_release_driver()).
- */
void (*remove)(struct visor_device *dev);
-
- /** Called periodically, whenever there is a possibility that
- * "something interesting" may have happened to the channel state.
- */
void (*channel_interrupt)(struct visor_device *dev);
-
- /** Called to initiate a change of the device's state. If the return
- * valu`e is < 0, there was an error and the state transition will NOT
- * occur. If the return value is >= 0, then the state transition was
- * INITIATED successfully, and complete_func() will be called (or was
- * just called) with the final status when either the state transition
- * fails or completes successfully.
- */
int (*pause)(struct visor_device *dev,
visorbus_state_complete_func complete_func);
int (*resume)(struct visor_device *dev,
visorbus_state_complete_func complete_func);
- /** These fields are for private use by the bus driver only. */
+ /* These fields are for private use by the bus driver only. */
struct device_driver driver;
struct driver_attribute version_attr;
};
@@ -116,48 +117,58 @@ struct visor_driver {
#define to_visor_driver(x) ((x) ? \
(container_of(x, struct visor_driver, driver)) : (NULL))
-/** A device type for things "plugged" into the visorbus bus */
+/**
+ * struct visor_device - A device type for things "plugged" into the visorbus
+ * bus
+ * visorchannel: Points to the channel that the device is
+ * associated with.
+ * channel_type_guid: Identifies the channel type to the bus driver.
+ * device: Device struct meant for use by the bus driver
+ * only.
+ * list_all: Used by the bus driver to enumerate devices.
+ * periodic_work: Device work queue. Private use by bus driver
+ * only.
+ * being_removed: Indicates that the device is being removed from
+ * the bus. Private bus driver use only.
+ * visordriver_callback_lock: Used by the bus driver to lock when handling
+ * channel events.
+ * pausing: Indicates that a change towards a paused state.
+ * is in progress. Only modified by the bus driver.
+ * resuming: Indicates that a change towards a running state
+ * is in progress. Only modified by the bus driver.
+ * chipset_bus_no: Private field used by the bus driver.
+ * chipset_dev_no: Private field used the bus driver.
+ * state: Used to indicate the current state of the
+ * device.
+ * inst: Unique GUID for this instance of the device.
+ * name: Name of the device.
+ * pending_msg_hdr: For private use by bus driver to respond to
+ * hypervisor requests.
+ * vbus_hdr_info: A pointer to header info. Private use by bus
+ * driver.
+ * partition_uuid: Indicates client partion id. This should be the
+ * same across all visor_devices in the current
+ * guest. Private use by bus driver only.
+ */
struct visor_device {
- /** visor driver can use the visorchannel member with the functions
- * defined in visorchannel.h to access the channel
- */
struct visorchannel *visorchannel;
uuid_le channel_type_guid;
- u64 channel_bytes;
-
- /** These fields are for private use by the bus driver only.
- * A notable exception is that the visor driver can use
- * visor_get_drvdata() and visor_set_drvdata() to retrieve or stash
- * private visor driver specific data within the device member.
- */
+ /* These fields are for private use by the bus driver only. */
struct device device;
struct list_head list_all;
struct periodic_work *periodic_work;
bool being_removed;
- bool responded_to_device_create;
- struct kobject kobjdevmajorminor; /* visorbus<x>/dev<y>/devmajorminor/*/
- struct {
- int major, minor;
- void *attr; /* private use by devmajorminor_attr.c you can
- * change this constant to whatever you want
- */
- } devnodes[5];
- /* the code will detect and behave appropriately) */
struct semaphore visordriver_callback_lock;
bool pausing;
bool resuming;
u32 chipset_bus_no;
u32 chipset_dev_no;
struct visorchipset_state state;
- uuid_le type;
uuid_le inst;
u8 *name;
- u8 *description;
struct controlvm_message_header *pending_msg_hdr;
void *vbus_hdr_info;
- u32 switch_no;
- u32 internal_port_no;
uuid_le partition_uuid;
};
@@ -174,8 +185,6 @@ int visorbus_write_channel(struct visor_device *dev,
unsigned long nbytes);
int visorbus_clear_channel(struct visor_device *dev,
unsigned long offset, u8 ch, unsigned long nbytes);
-int visorbus_registerdevnode(struct visor_device *dev,
- const char *name, int major, int minor);
void visorbus_enable_channel_interrupts(struct visor_device *dev);
void visorbus_disable_channel_interrupts(struct visor_device *dev);
#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 533bb5b3d..3a147dbbd 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -33,6 +33,9 @@ static int visorbus_forcenomatch;
static int visorbus_debugref;
#define SERIALLOOPBACKCHANADDR (100 * 1024 * 1024)
+/* Display string that is guaranteed to be no longer the 99 characters*/
+#define LINESIZE 99
+
#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c
#define POLLJIFFIES_TESTWORK 100
#define POLLJIFFIES_NORMALCHANNEL 10
@@ -182,7 +185,6 @@ static int
visorbus_match(struct device *xdev, struct device_driver *xdrv)
{
uuid_le channel_type;
- int rc = 0;
int i;
struct visor_device *dev;
struct visor_driver *drv;
@@ -190,26 +192,23 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv)
dev = to_visor_device(xdev);
drv = to_visor_driver(xdrv);
channel_type = visorchannel_get_uuid(dev->visorchannel);
- if (visorbus_forcematch) {
- rc = 1;
- goto away;
- }
- if (visorbus_forcenomatch)
- goto away;
+ if (visorbus_forcematch)
+ return 1;
+ if (visorbus_forcenomatch)
+ return 0;
if (!drv->channel_types)
- goto away;
+ return 0;
+
for (i = 0;
(uuid_le_cmp(drv->channel_types[i].guid, NULL_UUID_LE) != 0) ||
(drv->channel_types[i].name);
i++)
if (uuid_le_cmp(drv->channel_types[i].guid,
- channel_type) == 0) {
- rc = i + 1;
- goto away;
- }
-away:
- return rc;
+ channel_type) == 0)
+ return i + 1;
+
+ return 0;
}
/** This is called when device_unregister() is called for the bus device
@@ -243,180 +242,6 @@ visorbus_release_device(struct device *xdev)
kfree(dev);
}
-/* Implement publishing of device node attributes under:
- *
- * /sys/bus/visorbus<x>/dev<y>/devmajorminor
- *
- */
-
-#define to_devmajorminor_attr(_attr) \
- container_of(_attr, struct devmajorminor_attribute, attr)
-#define to_visor_device_from_kobjdevmajorminor(obj) \
- container_of(obj, struct visor_device, kobjdevmajorminor)
-
-struct devmajorminor_attribute {
- struct attribute attr;
- int slot;
- ssize_t (*show)(struct visor_device *, int slot, char *buf);
- ssize_t (*store)(struct visor_device *, int slot, const char *buf,
- size_t count);
-};
-
-static ssize_t DEVMAJORMINOR_ATTR(struct visor_device *dev, int slot, char *buf)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
-
- if (slot < 0 || slot >= maxdevnodes)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d:%d\n",
- dev->devnodes[slot].major, dev->devnodes[slot].minor);
-}
-
-static ssize_t
-devmajorminor_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
- struct devmajorminor_attribute *devmajorminor_attr =
- to_devmajorminor_attr(attr);
- struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
- ssize_t ret = 0;
-
- if (devmajorminor_attr->show)
- ret = devmajorminor_attr->show(dev,
- devmajorminor_attr->slot, buf);
- return ret;
-}
-
-static ssize_t
-devmajorminor_attr_store(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
-{
- struct devmajorminor_attribute *devmajorminor_attr =
- to_devmajorminor_attr(attr);
- struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
- ssize_t ret = 0;
-
- if (devmajorminor_attr->store)
- ret = devmajorminor_attr->store(dev,
- devmajorminor_attr->slot,
- buf, count);
- return ret;
-}
-
-static int register_devmajorminor_attributes(struct visor_device *dev);
-
-static int
-devmajorminor_create_file(struct visor_device *dev, const char *name,
- int major, int minor)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
- struct devmajorminor_attribute *myattr = NULL;
- int x = -1, rc = 0, slot = -1;
-
- register_devmajorminor_attributes(dev);
- for (slot = 0; slot < maxdevnodes; slot++)
- if (!dev->devnodes[slot].attr)
- break;
- if (slot == maxdevnodes) {
- rc = -ENOMEM;
- goto away;
- }
- myattr = kzalloc(sizeof(*myattr), GFP_KERNEL);
- if (!myattr) {
- rc = -ENOMEM;
- goto away;
- }
- myattr->show = DEVMAJORMINOR_ATTR;
- myattr->store = NULL;
- myattr->slot = slot;
- myattr->attr.name = name;
- myattr->attr.mode = S_IRUGO;
- dev->devnodes[slot].attr = myattr;
- dev->devnodes[slot].major = major;
- dev->devnodes[slot].minor = minor;
- x = sysfs_create_file(&dev->kobjdevmajorminor, &myattr->attr);
- if (x < 0) {
- rc = x;
- goto away;
- }
- kobject_uevent(&dev->device.kobj, KOBJ_ONLINE);
-away:
- if (rc < 0) {
- kfree(myattr);
- myattr = NULL;
- dev->devnodes[slot].attr = NULL;
- }
- return rc;
-}
-
-static void
-devmajorminor_remove_file(struct visor_device *dev, int slot)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
- struct devmajorminor_attribute *myattr = NULL;
-
- if (slot < 0 || slot >= maxdevnodes)
- return;
- myattr = (struct devmajorminor_attribute *)(dev->devnodes[slot].attr);
- if (!myattr)
- return;
- sysfs_remove_file(&dev->kobjdevmajorminor, &myattr->attr);
- kobject_uevent(&dev->device.kobj, KOBJ_OFFLINE);
- dev->devnodes[slot].attr = NULL;
- kfree(myattr);
-}
-
-static void
-devmajorminor_remove_all_files(struct visor_device *dev)
-{
- int i = 0;
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
-
- for (i = 0; i < maxdevnodes; i++)
- devmajorminor_remove_file(dev, i);
-}
-
-static const struct sysfs_ops devmajorminor_sysfs_ops = {
- .show = devmajorminor_attr_show,
- .store = devmajorminor_attr_store,
-};
-
-static struct kobj_type devmajorminor_kobj_type = {
- .sysfs_ops = &devmajorminor_sysfs_ops
-};
-
-static int
-register_devmajorminor_attributes(struct visor_device *dev)
-{
- int rc = 0, x = 0;
-
- if (dev->kobjdevmajorminor.parent)
- goto away; /* already registered */
- x = kobject_init_and_add(&dev->kobjdevmajorminor,
- &devmajorminor_kobj_type, &dev->device.kobj,
- "devmajorminor");
- if (x < 0) {
- rc = x;
- goto away;
- }
-
- kobject_uevent(&dev->kobjdevmajorminor, KOBJ_ADD);
-
-away:
- return rc;
-}
-
-static void
-unregister_devmajorminor_attributes(struct visor_device *dev)
-{
- if (!dev->kobjdevmajorminor.parent)
- return; /* already unregistered */
- devmajorminor_remove_all_files(dev);
-
- kobject_del(&dev->kobjdevmajorminor);
- kobject_put(&dev->kobjdevmajorminor);
- dev->kobjdevmajorminor.parent = NULL;
-}
-
/* begin implementation of specific channel attributes to appear under
* /sys/bus/visorbus<x>/dev<y>/channel
*/
@@ -427,7 +252,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
if (!vdev->visorchannel)
return 0;
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n",
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_physaddr(vdev->visorchannel));
}
@@ -449,7 +274,7 @@ static ssize_t clientpartition_show(struct device *dev,
if (!vdev->visorchannel)
return 0;
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n",
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_clientpartition(vdev->visorchannel));
}
@@ -457,24 +282,24 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
- char s[99];
+ char typeid[LINESIZE];
if (!vdev->visorchannel)
return 0;
return snprintf(buf, PAGE_SIZE, "%s\n",
- visorchannel_id(vdev->visorchannel, s));
+ visorchannel_id(vdev->visorchannel, typeid));
}
static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
- char s[99];
+ char zoneid[LINESIZE];
if (!vdev->visorchannel)
return 0;
return snprintf(buf, PAGE_SIZE, "%s\n",
- visorchannel_zoneid(vdev->visorchannel, s));
+ visorchannel_zoneid(vdev->visorchannel, zoneid));
}
static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
@@ -541,7 +366,7 @@ static ssize_t partition_handle_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", handle);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle);
}
static ssize_t partition_guid_show(struct device *dev,
@@ -566,7 +391,7 @@ static ssize_t channel_addr_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", addr);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr);
}
static ssize_t channel_bytes_show(struct device *dev,
@@ -575,7 +400,7 @@ static ssize_t channel_bytes_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", nbytes);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes);
}
static ssize_t channel_id_show(struct device *dev,
@@ -598,9 +423,9 @@ static ssize_t client_bus_info_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
struct visorchannel *channel = vdev->visorchannel;
- int i, x, remain = PAGE_SIZE;
+ int i, shift, remain = PAGE_SIZE;
unsigned long off;
- char *p = buf;
+ char *pos = buf;
u8 *partition_name;
struct ultra_vbus_deviceinfo dev_info;
@@ -608,44 +433,45 @@ static ssize_t client_bus_info_show(struct device *dev,
if (channel) {
if (vdev->name)
partition_name = vdev->name;
- x = snprintf(p, remain,
- "Client device / client driver info for %s partition (vbus #%d):\n",
- partition_name, vdev->chipset_dev_no);
- p += x;
- remain -= x;
- x = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- chp_info),
- &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string(&dev_info, p,
- remain, -1);
- p += x;
- remain -= x;
+ shift = snprintf(pos, remain,
+ "Client device / client driver info for %s eartition (vbus #%d):\n",
+ partition_name, vdev->chipset_dev_no);
+ pos += shift;
+ remain -= shift;
+ shift = visorchannel_read(channel,
+ offsetof(struct
+ spar_vbus_channel_protocol,
+ chp_info),
+ &dev_info, sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string(&dev_info, pos,
+ remain, -1);
+ pos += shift;
+ remain -= shift;
}
- x = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- bus_info),
- &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string(&dev_info, p,
- remain, -1);
- p += x;
- remain -= x;
+ shift = visorchannel_read(channel,
+ offsetof(struct
+ spar_vbus_channel_protocol,
+ bus_info),
+ &dev_info, sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string(&dev_info, pos,
+ remain, -1);
+ pos += shift;
+ remain -= shift;
}
off = offsetof(struct spar_vbus_channel_protocol, dev_info);
i = 0;
while (off + sizeof(dev_info) <=
visorchannel_get_nbytes(channel)) {
- x = visorchannel_read(channel,
- off, &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string
- (&dev_info, p, remain, i);
- p += x;
- remain -= x;
+ shift = visorchannel_read(channel,
+ off, &dev_info,
+ sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string
+ (&dev_info, pos, remain, i);
+ pos += shift;
+ remain -= shift;
}
off += sizeof(dev_info);
i++;
@@ -752,36 +578,28 @@ dev_stop_periodic_work(struct visor_device *dev)
static int
visordriver_probe_device(struct device *xdev)
{
- int rc;
+ int res;
struct visor_driver *drv;
struct visor_device *dev;
drv = to_visor_driver(xdev->driver);
dev = to_visor_device(xdev);
+
+ if (!drv->probe)
+ return -ENODEV;
+
down(&dev->visordriver_callback_lock);
dev->being_removed = false;
- /*
- * ensure that the dev->being_removed flag is cleared before
- * we start the probe
- */
- wmb();
- get_device(&dev->device);
- if (!drv->probe) {
- up(&dev->visordriver_callback_lock);
- rc = -ENODEV;
- goto away;
+
+ res = drv->probe(dev);
+ if (res >= 0) {
+ /* success: reference kept via unmatched get_device() */
+ get_device(&dev->device);
+ fix_vbus_dev_info(dev);
}
- rc = drv->probe(dev);
- if (rc < 0)
- goto away;
- fix_vbus_dev_info(dev);
up(&dev->visordriver_callback_lock);
- rc = 0;
-away:
- if (rc != 0)
- put_device(&dev->device);
- return rc;
+ return res;
}
/** This is called when device_unregister() is called for each child device
@@ -798,21 +616,12 @@ visordriver_remove_device(struct device *xdev)
drv = to_visor_driver(xdev->driver);
down(&dev->visordriver_callback_lock);
dev->being_removed = true;
- /*
- * ensure that the dev->being_removed flag is set before we start the
- * actual removal
- */
- wmb();
- if (drv) {
- if (drv->remove)
- drv->remove(dev);
- }
+ if (drv->remove)
+ drv->remove(dev);
up(&dev->visordriver_callback_lock);
dev_stop_periodic_work(dev);
- devmajorminor_remove_all_files(dev);
put_device(&dev->device);
-
return 0;
}
@@ -928,14 +737,6 @@ visorbus_clear_channel(struct visor_device *dev, unsigned long offset, u8 ch,
}
EXPORT_SYMBOL_GPL(visorbus_clear_channel);
-int
-visorbus_registerdevnode(struct visor_device *dev,
- const char *name, int major, int minor)
-{
- return devmajorminor_create_file(dev, name, major, minor);
-}
-EXPORT_SYMBOL_GPL(visorbus_registerdevnode);
-
/** We don't really have a real interrupt, so for now we just call the
* interrupt function periodically...
*/
@@ -970,7 +771,7 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
static int
create_visor_device(struct visor_device *dev)
{
- int rc;
+ int err;
u32 chipset_bus_no = dev->chipset_bus_no;
u32 chipset_dev_no = dev->chipset_dev_no;
@@ -992,8 +793,8 @@ create_visor_device(struct visor_device *dev)
if (!dev->periodic_work) {
POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no,
DIAG_SEVERITY_ERR);
- rc = -EINVAL;
- goto away;
+ err = -EINVAL;
+ goto err_put;
}
/* bus_id must be a unique name with respect to this bus TYPE
@@ -1019,36 +820,25 @@ create_visor_device(struct visor_device *dev)
* claim the device. The device will be linked onto
* bus_type.klist_devices regardless (use bus_for_each_dev).
*/
- rc = device_add(&dev->device);
- if (rc < 0) {
+ err = device_add(&dev->device);
+ if (err < 0) {
POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no,
DIAG_SEVERITY_ERR);
- goto away;
- }
-
- rc = register_devmajorminor_attributes(dev);
- if (rc < 0) {
- POSTCODE_LINUX_3(DEVICE_REGISTER_FAILURE_PC, chipset_dev_no,
- DIAG_SEVERITY_ERR);
- goto away_unregister;
+ goto err_put;
}
list_add_tail(&dev->list_all, &list_all_device_instances);
- return 0;
-
-away_unregister:
- device_unregister(&dev->device);
+ return 0; /* success: reference kept via unmatched get_device() */
-away:
+err_put:
put_device(&dev->device);
- return rc;
+ return err;
}
static void
remove_visor_device(struct visor_device *dev)
{
list_del(&dev->list_all);
- unregister_devmajorminor_attributes(dev);
put_device(&dev->device);
device_unregister(&dev->device);
}
@@ -1477,24 +1267,24 @@ struct channel_size_info {
int
visorbus_init(void)
{
- int rc = 0;
+ int err;
- POSTCODE_LINUX_3(DRIVER_ENTRY_PC, rc, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO);
bus_device_info_init(&clientbus_driverinfo,
"clientbus", "visorbus",
VERSION, NULL);
- rc = create_bus_type();
- if (rc < 0) {
+ err = create_bus_type();
+ if (err < 0) {
POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR);
- goto away;
+ goto error;
}
periodic_dev_workqueue = create_singlethread_workqueue("visorbus_dev");
if (!periodic_dev_workqueue) {
POSTCODE_LINUX_2(CREATE_WORKQUEUE_PC, DIAG_SEVERITY_ERR);
- rc = -ENOMEM;
- goto away;
+ err = -ENOMEM;
+ goto error;
}
/* This enables us to receive notifications when devices appear for
@@ -1504,13 +1294,11 @@ visorbus_init(void)
&chipset_responders,
&chipset_driverinfo);
- rc = 0;
+ return 0;
-away:
- if (rc)
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
- POSTCODE_SEVERITY_ERR);
- return rc;
+error:
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ return err;
}
void
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index b68a904ac..43373582c 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -40,7 +40,6 @@ struct visorchannel {
bool requested;
struct channel_header chan_hdr;
uuid_le guid;
- ulong size;
bool needs_lock; /* channel creator knows if more than one */
/* thread will be inserting or removing */
spinlock_t insert_lock; /* protect head writes in chan_hdr */
@@ -134,8 +133,6 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
}
channel->nbytes = channel_bytes;
-
- channel->size = channel_bytes;
channel->guid = guid;
return channel;
@@ -186,7 +183,7 @@ EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
ulong
visorchannel_get_nbytes(struct visorchannel *channel)
{
- return channel->size;
+ return channel->nbytes;
}
EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 5fbda7b21..5ba5936e2 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -59,14 +59,13 @@
*/
static int visorchipset_major;
static int visorchipset_visorbusregwait = 1; /* default is on */
-static int visorchipset_holdchipsetready;
static unsigned long controlvm_payload_bytes_buffered;
static u32 dump_vhba_bus;
static int
visorchipset_open(struct inode *inode, struct file *file)
{
- unsigned minor_number = iminor(inode);
+ unsigned int minor_number = iminor(inode);
if (minor_number)
return -ENODEV;
@@ -90,9 +89,6 @@ static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
static unsigned long most_recent_message_jiffies;
static int visorbusregistered;
-#define MAX_CHIPSET_EVENTS 2
-static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
-
struct parser_context {
unsigned long allocbytes;
unsigned long param_bytes;
@@ -107,7 +103,6 @@ static DEFINE_SEMAPHORE(notifier_lock);
static struct cdev file_cdev;
static struct visorchannel **file_controlvm_channel;
-static struct controlvm_message_header g_chipset_msg_hdr;
static struct controlvm_message_packet g_devicechangestate_packet;
static LIST_HEAD(bus_info_list);
@@ -156,8 +151,6 @@ struct putfile_active_buffer {
/* a payload from a controlvm message, containing a file data buffer */
struct parser_context *parser_ctx;
/* points within data area of parser_ctx to next byte of data */
- u8 *pnext;
- /* # bytes left from <pnext> to the end of this data buffer */
size_t bytes_remaining;
};
@@ -171,14 +164,10 @@ struct putfile_request {
/* header from original TransmitFile request */
struct controlvm_message_header controlvm_header;
- u64 file_request_number; /* from original TransmitFile request */
/* link to next struct putfile_request */
struct list_head next_putfile_request;
- /* most-recent sequence number supplied via a controlvm message */
- u64 data_sequence_number;
-
/* head of putfile_buffer_entry list, which describes the data to be
* supplied as putfile data;
* - this list is added to when controlvm messages come in that supply
@@ -274,11 +263,6 @@ static ssize_t remaining_steps_store(struct device *dev,
const char *buf, size_t count);
static DEVICE_ATTR_RW(remaining_steps);
-static ssize_t chipsetready_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_WO(chipsetready);
-
static ssize_t devicedisabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
@@ -303,16 +287,6 @@ static struct attribute_group visorchipset_install_group = {
.attrs = visorchipset_install_attrs
};
-static struct attribute *visorchipset_guest_attrs[] = {
- &dev_attr_chipsetready.attr,
- NULL
-};
-
-static struct attribute_group visorchipset_guest_group = {
- .name = "guest",
- .attrs = visorchipset_guest_attrs
-};
-
static struct attribute *visorchipset_parahotplug_attrs[] = {
&dev_attr_devicedisabled.attr,
&dev_attr_deviceenabled.attr,
@@ -326,7 +300,6 @@ static struct attribute_group visorchipset_parahotplug_group = {
static const struct attribute_group *visorchipset_dev_groups[] = {
&visorchipset_install_group,
- &visorchipset_guest_group,
&visorchipset_parahotplug_group,
NULL
};
@@ -359,8 +332,7 @@ static struct parser_context *
parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
{
int allocbytes = sizeof(struct parser_context) + bytes;
- struct parser_context *rc = NULL;
- struct parser_context *ctx = NULL;
+ struct parser_context *ctx;
if (retry)
*retry = false;
@@ -374,15 +346,13 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
> MAX_CONTROLVM_PAYLOAD_BYTES) {
if (retry)
*retry = true;
- rc = NULL;
- goto cleanup;
+ return NULL;
}
ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
if (!ctx) {
if (retry)
*retry = true;
- rc = NULL;
- goto cleanup;
+ return NULL;
}
ctx->allocbytes = allocbytes;
@@ -393,35 +363,27 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
if (local) {
void *p;
- if (addr > virt_to_phys(high_memory - 1)) {
- rc = NULL;
- goto cleanup;
- }
+ if (addr > virt_to_phys(high_memory - 1))
+ goto err_finish_ctx;
p = __va((unsigned long)(addr));
memcpy(ctx->data, p, bytes);
} else {
void *mapping = memremap(addr, bytes, MEMREMAP_WB);
- if (!mapping) {
- rc = NULL;
- goto cleanup;
- }
+ if (!mapping)
+ goto err_finish_ctx;
memcpy(ctx->data, mapping, bytes);
memunmap(mapping);
}
ctx->byte_stream = true;
- rc = ctx;
-cleanup:
- if (rc) {
- controlvm_payload_bytes_buffered += ctx->param_bytes;
- } else {
- if (ctx) {
- parser_done(ctx);
- ctx = NULL;
- }
- }
- return rc;
+ controlvm_payload_bytes_buffered += ctx->param_bytes;
+
+ return ctx;
+
+err_finish_ctx:
+ parser_done(ctx);
+ return NULL;
}
static uuid_le
@@ -523,7 +485,7 @@ static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u8 tool_action;
+ u8 tool_action = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -541,10 +503,11 @@ static ssize_t toolaction_store(struct device *dev,
if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- tool_action),
- &tool_action, sizeof(u8));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action),
+ &tool_action, sizeof(u8));
if (ret)
return ret;
@@ -576,10 +539,11 @@ static ssize_t boottotool_store(struct device *dev,
return -EINVAL;
efi_spar_indication.boot_to_tool = val;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- efi_spar_ind), &(efi_spar_indication),
- sizeof(struct efi_spar_indication));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind), &(efi_spar_indication),
+ sizeof(struct efi_spar_indication));
if (ret)
return ret;
@@ -589,7 +553,7 @@ static ssize_t boottotool_store(struct device *dev,
static ssize_t error_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u32 error;
+ u32 error = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -607,10 +571,11 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &error))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_error),
- &error, sizeof(u32));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
+ &error, sizeof(u32));
if (ret)
return ret;
return count;
@@ -619,12 +584,13 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u32 text_id;
+ u32 text_id = 0;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_text_id),
- &text_id, sizeof(u32));
+ visorchannel_read
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
}
@@ -637,10 +603,11 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_text_id),
- &text_id, sizeof(u32));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
if (ret)
return ret;
return count;
@@ -649,7 +616,7 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
static ssize_t remaining_steps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u16 remaining_steps;
+ u16 remaining_steps = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -668,10 +635,11 @@ static ssize_t remaining_steps_store(struct device *dev,
if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
+ &remaining_steps, sizeof(u16));
if (ret)
return ret;
return count;
@@ -717,26 +685,6 @@ struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
}
EXPORT_SYMBOL(visorbus_get_device_by_id);
-static u8
-check_chipset_events(void)
-{
- int i;
- u8 send_msg = 1;
- /* Check events to determine if response should be sent */
- for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
- send_msg &= chipset_events[i];
- return send_msg;
-}
-
-static void
-clear_chipset_events(void)
-{
- int i;
- /* Clear chipset_events */
- for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
- chipset_events[i] = 0;
-}
-
void
visorchipset_register_busdev(
struct visorchipset_busdev_notifiers *notifiers,
@@ -772,7 +720,7 @@ chipset_init(struct controlvm_message *inmsg)
POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (chipset_inited) {
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_respond;
}
chipset_inited = 1;
POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
@@ -789,7 +737,7 @@ chipset_init(struct controlvm_message *inmsg)
*/
features |= ULTRA_CHIPSET_FEATURE_REPLY;
-cleanup:
+out_respond:
if (inmsg->hdr.flags.response_expected)
controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
}
@@ -970,28 +918,31 @@ bus_epilog(struct visor_device *bus_info,
u32 cmd, struct controlvm_message_header *msg_hdr,
int response, bool need_response)
{
- bool notified = false;
struct controlvm_message_header *pmsg_hdr = NULL;
+ down(&notifier_lock);
+
if (!bus_info) {
/* relying on a valid passed in response code */
/* be lazy and re-use msg_hdr for this failure, is this ok?? */
pmsg_hdr = msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (bus_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
pmsg_hdr = bus_info->pending_msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (need_response) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
- response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto away;
+ POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
+ bus_info->chipset_bus_no,
+ POSTCODE_SEVERITY_ERR);
+ goto out_unlock;
}
memcpy(pmsg_hdr, msg_hdr,
@@ -999,37 +950,27 @@ bus_epilog(struct visor_device *bus_info,
bus_info->pending_msg_hdr = pmsg_hdr;
}
- down(&notifier_lock);
if (response == CONTROLVM_RESP_SUCCESS) {
switch (cmd) {
case CONTROLVM_BUS_CREATE:
if (busdev_notifiers.bus_create) {
(*busdev_notifiers.bus_create) (bus_info);
- notified = true;
+ goto out_unlock;
}
break;
case CONTROLVM_BUS_DESTROY:
if (busdev_notifiers.bus_destroy) {
(*busdev_notifiers.bus_destroy) (bus_info);
- notified = true;
+ goto out_unlock;
}
break;
}
}
-away:
- if (notified)
- /* The callback function just called above is responsible
- * for calling the appropriate visorchipset_busdev_responders
- * function, which will call bus_responder()
- */
- ;
- else
- /*
- * Do not kfree(pmsg_hdr) as this is the failure path.
- * The success path ('notified') will call the responder
- * directly and kfree() there.
- */
- bus_responder(cmd, pmsg_hdr, response);
+
+out_respond_and_unlock:
+ bus_responder(cmd, pmsg_hdr, response);
+
+out_unlock:
up(&notifier_lock);
}
@@ -1040,30 +981,30 @@ device_epilog(struct visor_device *dev_info,
bool need_response, bool for_visorbus)
{
struct visorchipset_busdev_notifiers *notifiers;
- bool notified = false;
struct controlvm_message_header *pmsg_hdr = NULL;
notifiers = &busdev_notifiers;
+ down(&notifier_lock);
if (!dev_info) {
/* relying on a valid passed in response code */
/* be lazy and re-use msg_hdr for this failure, is this ok?? */
pmsg_hdr = msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (dev_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
pmsg_hdr = dev_info->pending_msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (need_response) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto away;
+ goto out_respond_and_unlock;
}
memcpy(pmsg_hdr, msg_hdr,
@@ -1071,13 +1012,12 @@ device_epilog(struct visor_device *dev_info,
dev_info->pending_msg_hdr = pmsg_hdr;
}
- down(&notifier_lock);
if (response >= 0) {
switch (cmd) {
case CONTROLVM_DEVICE_CREATE:
if (notifiers->device_create) {
(*notifiers->device_create) (dev_info);
- notified = true;
+ goto out_unlock;
}
break;
case CONTROLVM_DEVICE_CHANGESTATE:
@@ -1087,7 +1027,7 @@ device_epilog(struct visor_device *dev_info,
segment_state_running.operating) {
if (notifiers->device_resume) {
(*notifiers->device_resume) (dev_info);
- notified = true;
+ goto out_unlock;
}
}
/* ServerNotReady / ServerLost / SegmentStateStandby */
@@ -1099,32 +1039,23 @@ device_epilog(struct visor_device *dev_info,
*/
if (notifiers->device_pause) {
(*notifiers->device_pause) (dev_info);
- notified = true;
+ goto out_unlock;
}
}
break;
case CONTROLVM_DEVICE_DESTROY:
if (notifiers->device_destroy) {
(*notifiers->device_destroy) (dev_info);
- notified = true;
+ goto out_unlock;
}
break;
}
}
-away:
- if (notified)
- /* The callback function just called above is responsible
- * for calling the appropriate visorchipset_busdev_responders
- * function, which will call device_responder()
- */
- ;
- else
- /*
- * Do not kfree(pmsg_hdr) as this is the failure path.
- * The success path ('notified') will call the responder
- * directly and kfree() there.
- */
- device_responder(cmd, pmsg_hdr, response);
+
+out_respond_and_unlock:
+ device_responder(cmd, pmsg_hdr, response);
+
+out_unlock:
up(&notifier_lock);
}
@@ -1142,14 +1073,14 @@ bus_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_bus_epilog;
}
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto cleanup;
+ goto out_bus_epilog;
}
INIT_LIST_HEAD(&bus_info->list_all);
@@ -1169,7 +1100,7 @@ bus_create(struct controlvm_message *inmsg)
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
kfree(bus_info);
bus_info = NULL;
- goto cleanup;
+ goto out_bus_epilog;
}
bus_info->visorchannel = visorchannel;
if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
@@ -1179,7 +1110,7 @@ bus_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
-cleanup:
+out_bus_epilog:
bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
rc, inmsg->hdr.flags.response_expected == 1);
}
@@ -1231,8 +1162,9 @@ bus_configure(struct controlvm_message *inmsg,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
} else {
- visorchannel_set_clientpartition(bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
+ visorchannel_set_clientpartition
+ (bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
bus_info->partition_uuid = parser_id_get(parser_ctx);
parser_param_start(parser_ctx, PARSERSTRING_NAME);
bus_info->name = parser_string_get(parser_ctx);
@@ -1260,14 +1192,14 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- goto cleanup;
+ goto out_respond;
}
if (bus_info->state.created == 0) {
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- goto cleanup;
+ goto out_respond;
}
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
@@ -1275,7 +1207,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_respond;
}
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
@@ -1283,7 +1215,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto cleanup;
+ goto out_respond;
}
dev_info->chipset_bus_no = bus_no;
@@ -1308,7 +1240,7 @@ my_device_create(struct controlvm_message *inmsg)
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
kfree(dev_info);
dev_info = NULL;
- goto cleanup;
+ goto out_respond;
}
dev_info->visorchannel = visorchannel;
dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
@@ -1318,7 +1250,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
-cleanup:
+out_respond:
device_epilog(dev_info, segment_state_running,
CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
inmsg->hdr.flags.response_expected == 1, 1);
@@ -1382,35 +1314,23 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
struct visor_controlvm_payload_info *info)
{
u8 *payload = NULL;
- int rc = CONTROLVM_RESP_SUCCESS;
- if (!info) {
- rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
- goto cleanup;
- }
+ if (!info)
+ return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+
memset(info, 0, sizeof(struct visor_controlvm_payload_info));
- if ((offset == 0) || (bytes == 0)) {
- rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
- goto cleanup;
- }
+ if ((offset == 0) || (bytes == 0))
+ return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+
payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
- if (!payload) {
- rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
- goto cleanup;
- }
+ if (!payload)
+ return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
info->offset = offset;
info->bytes = bytes;
info->ptr = payload;
-cleanup:
- if (rc < 0) {
- if (payload) {
- memunmap(payload);
- payload = NULL;
- }
- }
- return rc;
+ return CONTROLVM_RESP_SUCCESS;
}
static void
@@ -1490,14 +1410,8 @@ chipset_ready(struct controlvm_message_header *msg_hdr)
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
+ if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, rc);
- if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
- /* Send CHIPSET_READY response when all modules have been loaded
- * and disks mounted for the partition
- */
- g_chipset_msg_hdr = *msg_hdr;
- }
}
static void
@@ -1726,9 +1640,10 @@ parahotplug_process_message(struct controlvm_message *inmsg)
* initialization.
*/
parahotplug_request_kickoff(req);
- controlvm_respond_physdev_changestate(&inmsg->hdr,
- CONTROLVM_RESP_SUCCESS,
- inmsg->cmd.device_change_state.state);
+ controlvm_respond_physdev_changestate
+ (&inmsg->hdr,
+ CONTROLVM_RESP_SUCCESS,
+ inmsg->cmd.device_change_state.state);
parahotplug_request_destroy(req);
} else {
/* For disable messages, add the request to the
@@ -1840,8 +1755,9 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
break;
default:
if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr,
- -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
+ controlvm_respond
+ (&inmsg.hdr,
+ -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
break;
}
@@ -1885,31 +1801,11 @@ controlvm_periodic_work(struct work_struct *work)
struct controlvm_message inmsg;
bool got_command = false;
bool handle_command_failed = false;
- static u64 poll_count;
/* make sure visorbus server is registered for controlvm callbacks */
if (visorchipset_visorbusregwait && !visorbusregistered)
goto cleanup;
- poll_count++;
- if (poll_count >= 250)
- ; /* keep going */
- else
- goto cleanup;
-
- /* Check events to determine if response to CHIPSET_READY
- * should be sent
- */
- if (visorchipset_holdchipsetready &&
- (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
- if (check_chipset_events() == 1) {
- controlvm_respond(&g_chipset_msg_hdr, 0);
- clear_chipset_events();
- memset(&g_chipset_msg_hdr, 0,
- sizeof(struct controlvm_message_header));
- }
- }
-
while (visorchannel_signalremove(controlvm_channel,
CONTROLVM_QUEUE_RESPONSE,
&inmsg))
@@ -1979,8 +1875,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
u16 local_crash_msg_count;
/* make sure visorbus is registered for controlvm callbacks */
- if (visorchipset_visorbusregwait && !visorbusregistered)
- goto cleanup;
+ if (visorchipset_visorbusregwait && !visorbusregistered) {
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
+ return;
+ }
POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
@@ -2057,13 +1956,6 @@ setup_crash_devices_work_queue(struct work_struct *work)
return;
}
POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
- return;
-
-cleanup:
-
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
-
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
}
static void
@@ -2135,25 +2027,6 @@ device_resume_response(struct visor_device *dev_info, int response)
dev_info->pending_msg_hdr = NULL;
}
-static ssize_t chipsetready_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- char msgtype[64];
-
- if (sscanf(buf, "%63s", msgtype) != 1)
- return -EINVAL;
-
- if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
- chipset_events[0] = 1;
- return count;
- } else if (!strcmp(msgtype, "MODULES_LOADED")) {
- chipset_events[1] = 1;
- return count;
- }
- return -EINVAL;
-}
-
/* The parahotplug/devicedisabled interface gets called by our support script
* when an SR-IOV device has been shut down. The ID is passed to the script
* and then passed back when the device has been removed.
@@ -2205,10 +2078,11 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
if (!*file_controlvm_channel)
return -ENXIO;
- visorchannel_read(*file_controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- gp_control_channel),
- &addr, sizeof(addr));
+ visorchannel_read
+ (*file_controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ gp_control_channel),
+ &addr, sizeof(addr));
if (!addr)
return -ENXIO;
@@ -2308,16 +2182,25 @@ visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
return 0;
}
+static void
+visorchipset_file_cleanup(dev_t major_dev)
+{
+ if (file_cdev.ops)
+ cdev_del(&file_cdev);
+ file_cdev.ops = NULL;
+ unregister_chrdev_region(major_dev, 1);
+}
+
static int
visorchipset_init(struct acpi_device *acpi_device)
{
- int rc = 0;
+ int err = -ENODEV;
u64 addr;
uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
addr = controlvm_get_channel_address();
if (!addr)
- return -ENODEV;
+ goto error;
memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
@@ -2325,24 +2208,19 @@ visorchipset_init(struct acpi_device *acpi_device)
controlvm_channel = visorchannel_create_with_lock(addr, 0,
GFP_KERNEL, uuid);
if (!controlvm_channel)
- return -ENODEV;
+ goto error;
+
if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
visorchannel_get_header(controlvm_channel))) {
initialize_controlvm_payload();
} else {
- visorchannel_destroy(controlvm_channel);
- controlvm_channel = NULL;
- return -ENODEV;
+ goto error_destroy_channel;
}
major_dev = MKDEV(visorchipset_major, 0);
- rc = visorchipset_file_init(major_dev, &controlvm_channel);
- if (rc < 0) {
- POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
- goto cleanup;
- }
-
- memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
+ err = visorchipset_file_init(major_dev, &controlvm_channel);
+ if (err < 0)
+ goto error_destroy_payload;
/* if booting in a crash kernel */
if (is_kdump_kernel())
@@ -2359,27 +2237,33 @@ visorchipset_init(struct acpi_device *acpi_device)
visorchipset_platform_device.dev.devt = major_dev;
if (platform_device_register(&visorchipset_platform_device) < 0) {
POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
- rc = -ENODEV;
- goto cleanup;
+ err = -ENODEV;
+ goto error_cancel_work;
}
POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
- rc = visorbus_init();
-cleanup:
- if (rc) {
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
- POSTCODE_SEVERITY_ERR);
- }
- return rc;
-}
+ err = visorbus_init();
+ if (err < 0)
+ goto error_unregister;
-static void
-visorchipset_file_cleanup(dev_t major_dev)
-{
- if (file_cdev.ops)
- cdev_del(&file_cdev);
- file_cdev.ops = NULL;
- unregister_chrdev_region(major_dev, 1);
+ return 0;
+
+error_unregister:
+ platform_device_unregister(&visorchipset_platform_device);
+
+error_cancel_work:
+ cancel_delayed_work_sync(&periodic_controlvm_work);
+ visorchipset_file_cleanup(major_dev);
+
+error_destroy_payload:
+ destroy_controlvm_payload_info(&controlvm_payload_info);
+
+error_destroy_channel:
+ visorchannel_destroy(controlvm_channel);
+
+error:
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ return err;
}
static int
@@ -2392,8 +2276,6 @@ visorchipset_exit(struct acpi_device *acpi_device)
cancel_delayed_work_sync(&periodic_controlvm_work);
destroy_controlvm_payload_info(&controlvm_payload_info);
- memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
-
visorchannel_destroy(controlvm_channel);
visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
@@ -2425,7 +2307,7 @@ static __init uint32_t visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
- if (cpu_has_hypervisor) {
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
/* check the ID */
cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
return (ebx == UNISYS_SPAR_ID_EBX) &&
@@ -2460,12 +2342,8 @@ module_param_named(major, visorchipset_major, int, S_IRUGO);
MODULE_PARM_DESC(visorchipset_major,
"major device number to use for the device node");
module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_visorbusreqwait,
+MODULE_PARM_DESC(visorchipset_visorbusregwait,
"1 to have the module wait for the visor bus to register");
-module_param_named(holdchipsetready, visorchipset_holdchipsetready,
- int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_holdchipsetready,
- "1 to hold response to CHIPSET_READY");
module_init(init_unisys);
module_exit(exit_unisys);
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index e93bb1dbf..6a4570d10 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -52,6 +52,8 @@ static int visorhba_resume(struct visor_device *dev,
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
size_t len, loff_t *offset);
+static int set_no_disk_inquiry_result(unsigned char *buf,
+ size_t len, bool is_lun0);
static struct dentry *visorhba_debugfs_dir;
static const struct file_operations debugfs_info_fops = {
.read = info_debugfs_read,
@@ -83,12 +85,6 @@ static struct visor_driver visorhba_driver = {
MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
-struct visor_thread_info {
- struct task_struct *task;
- struct completion has_stopped;
- int id;
-};
-
struct visordisk_info {
u32 valid;
u32 channel, id, lun; /* Disk Path */
@@ -135,7 +131,7 @@ struct visorhba_devdata {
struct visordisk_info head;
unsigned int max_buff_len;
int devnum;
- struct visor_thread_info threadinfo;
+ struct task_struct *thread;
int thread_wait_ms;
};
@@ -152,28 +148,36 @@ static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
(iter->lun == match->lun))
/**
* visor_thread_start - starts a thread for the device
- * @thrinfo: The thread to start
* @threadfn: Function the thread starts
* @thrcontext: Context to pass to the thread, i.e. devdata
* @name: string describing name of thread
*
* Starts a thread for the device.
*
- * Return 0 on success;
+ * Return the task_struct * denoting the thread on success,
+ * or NULL on failure
*/
-static int visor_thread_start(struct visor_thread_info *thrinfo,
- int (*threadfn)(void *),
- void *thrcontext, char *name)
+static struct task_struct *visor_thread_start
+(int (*threadfn)(void *), void *thrcontext, char *name)
{
- /* used to stop the thread */
- init_completion(&thrinfo->has_stopped);
- thrinfo->task = kthread_run(threadfn, thrcontext, "%s", name);
- if (IS_ERR(thrinfo->task)) {
- thrinfo->id = 0;
- return PTR_ERR(thrinfo->task);
+ struct task_struct *task;
+
+ task = kthread_run(threadfn, thrcontext, "%s", name);
+ if (IS_ERR(task)) {
+ pr_err("visorbus failed to start thread\n");
+ return NULL;
}
- thrinfo->id = thrinfo->task->pid;
- return 0;
+ return task;
+}
+
+/**
+ * visor_thread_stop - stops the thread if it is running
+ */
+static void visor_thread_stop(struct task_struct *task)
+{
+ if (!task)
+ return; /* no thread running */
+ kthread_stop(task);
}
/**
@@ -231,16 +235,17 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata,
int del)
{
unsigned long flags;
- void *sent = NULL;
+ void *sent;
- if (del < MAX_PENDING_REQUESTS) {
- spin_lock_irqsave(&devdata->privlock, flags);
- sent = devdata->pending[del].sent;
+ if (del >= MAX_PENDING_REQUESTS)
+ return NULL;
- devdata->pending[del].cmdtype = 0;
- devdata->pending[del].sent = NULL;
- spin_unlock_irqrestore(&devdata->privlock, flags);
- }
+ spin_lock_irqsave(&devdata->privlock, flags);
+ sent = devdata->pending[del].sent;
+
+ devdata->pending[del].cmdtype = 0;
+ devdata->pending[del].sent = NULL;
+ spin_unlock_irqrestore(&devdata->privlock, flags);
return sent;
}
@@ -681,7 +686,7 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
/* Stop using the IOVM response queue (queue should be drained
* by the end)
*/
- kthread_stop(devdata->threadinfo.task);
+ visor_thread_stop(devdata->thread);
/* Fail commands that weren't completed */
spin_lock_irqsave(&devdata->privlock, flags);
@@ -772,6 +777,24 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
}
}
+static int set_no_disk_inquiry_result(unsigned char *buf,
+ size_t len, bool is_lun0)
+{
+ if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
+ return -EINVAL;
+ memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
+ buf[2] = SCSI_SPC2_VER;
+ if (is_lun0) {
+ buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
+ buf[3] = DEV_HISUPPORT;
+ } else {
+ buf[0] = DEV_NOT_CAPABLE;
+ }
+ buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
+ strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
+ return 0;
+}
+
/**
* do_scsi_nolinuxstat - scsi command didn't have linuxstat
* @cmdrsp: response from IOVM
@@ -804,10 +827,8 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
* a disk there so we'll present a processor
* there.
*/
- SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
- scsidev->lun,
- DEV_DISK_CAPABLE_NOT_PRESENT,
- DEV_NOT_CAPABLE);
+ set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
+ scsidev->lun == 0);
if (scsi_sg_count(scsicmd) == 0) {
memcpy(scsi_sglist(scsicmd), buf,
@@ -929,14 +950,15 @@ static void process_disk_notify(struct Scsi_Host *shost,
struct diskaddremove *dar;
dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
- if (dar) {
- dar->add = cmdrsp->disknotify.add;
- dar->shost = shost;
- dar->channel = cmdrsp->disknotify.channel;
- dar->id = cmdrsp->disknotify.id;
- dar->lun = cmdrsp->disknotify.lun;
- queue_disk_add_remove(dar);
- }
+ if (!dar)
+ return;
+
+ dar->add = cmdrsp->disknotify.add;
+ dar->shost = shost;
+ dar->channel = cmdrsp->disknotify.channel;
+ dar->id = cmdrsp->disknotify.id;
+ dar->lun = cmdrsp->disknotify.lun;
+ queue_disk_add_remove(dar);
}
/**
@@ -1064,8 +1086,8 @@ static int visorhba_resume(struct visor_device *dev,
if (devdata->serverdown && !devdata->serverchangingstate)
devdata->serverchangingstate = true;
- visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
- devdata, "vhba_incming");
+ devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
+ "vhba_incming");
devdata->serverdown = false;
devdata->serverchangingstate = false;
@@ -1141,8 +1163,8 @@ static int visorhba_probe(struct visor_device *dev)
goto err_scsi_remove_host;
devdata->thread_wait_ms = 2;
- visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
- devdata, "vhba_incoming");
+ devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
+ "vhba_incoming");
scsi_scan_host(scsihost);
@@ -1172,7 +1194,7 @@ static void visorhba_remove(struct visor_device *dev)
return;
scsihost = devdata->scsihost;
- kthread_stop(devdata->threadinfo.task);
+ visor_thread_stop(devdata->thread);
scsi_remove_host(scsihost);
scsi_host_put(scsihost);
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 13c031611..12a357078 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -123,9 +123,9 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
- [41] = KEY_GRAVE, /* FIXME, '#' */
+ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
- [43] = KEY_BACKSLASH, /* FIXME, '~' */
+ [43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
@@ -173,7 +173,7 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
[88] = KEY_F12,
[90] = KEY_KPLEFTPAREN,
[91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK, /* FIXME */
+ [92] = KEY_KPASTERISK,
[93] = KEY_KPASTERISK,
[94] = KEY_KPPLUS,
[95] = KEY_HELP,
@@ -467,18 +467,14 @@ handle_locking_key(struct input_dev *visorinput_dev,
break;
default:
led = -1;
- break;
+ return;
}
- if (led >= 0) {
- int old_state = (test_bit(led, visorinput_dev->led) != 0);
-
- if (old_state != desired_state) {
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- __change_bit(led, visorinput_dev->led);
- }
+ if (test_bit(led, visorinput_dev->led) != desired_state) {
+ input_report_key(visorinput_dev, keycode, 1);
+ input_sync(visorinput_dev);
+ input_report_key(visorinput_dev, keycode, 0);
+ input_sync(visorinput_dev);
+ __change_bit(led, visorinput_dev->led);
}
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index be0d05734..fd7c9a6cb 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -109,51 +109,46 @@ struct chanstat {
};
struct visornic_devdata {
- unsigned short enabled; /* 0 disabled 1 enabled to receive */
- unsigned short enab_dis_acked; /* NET_RCV_ENABLE/DISABLE acked by
- * IOPART
- */
+ /* 0 disabled 1 enabled to receive */
+ unsigned short enabled;
+ /* NET_RCV_ENABLE/DISABLE acked by IOPART */
+ unsigned short enab_dis_acked;
+
struct visor_device *dev;
struct net_device *netdev;
struct net_device_stats net_stats;
atomic_t interrupt_rcvd;
wait_queue_head_t rsp_queue;
struct sk_buff **rcvbuf;
- u64 incarnation_id; /* lets IOPART know about re-birth */
- unsigned short old_flags; /* flags as they were prior to
- * set_multicast_list
- */
- atomic_t usage; /* count of users */
- int num_rcv_bufs; /* indicates how many rcv buffers
- * the vnic will post
- */
+ /* incarnation_id lets IOPART know about re-birth */
+ u64 incarnation_id;
+ /* flags as they were prior to set_multicast_list */
+ unsigned short old_flags;
+ atomic_t usage; /* count of users */
+
+ /* number of rcv buffers the vnic will post */
+ int num_rcv_bufs;
int num_rcv_bufs_could_not_alloc;
atomic_t num_rcvbuf_in_iovm;
unsigned long alloc_failed_in_if_needed_cnt;
unsigned long alloc_failed_in_repost_rtn_cnt;
- unsigned long max_outstanding_net_xmits; /* absolute max number of
- * outstanding xmits - should
- * never hit this
- */
- unsigned long upper_threshold_net_xmits; /* high water mark for
- * calling netif_stop_queue()
- */
- unsigned long lower_threshold_net_xmits; /* high water mark for calling
- * netif_wake_queue()
- */
- struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
- * xmit buffer list that have been
- * sent to the IOPART end
- */
+
+ /* absolute max number of outstanding xmits - should never hit this */
+ unsigned long max_outstanding_net_xmits;
+ /* high water mark for calling netif_stop_queue() */
+ unsigned long upper_threshold_net_xmits;
+ /* high water mark for calling netif_wake_queue() */
+ unsigned long lower_threshold_net_xmits;
+ /* xmitbufhead - head of the xmit buffer list sent to the IOPART end */
+ struct sk_buff_head xmitbufhead;
+
visorbus_state_complete_func server_down_complete_func;
struct work_struct timeout_reset;
- struct uiscmdrsp *cmdrsp_rcv; /* cmdrsp_rcv is used for
- * posting/unposting rcv buffers
- */
- struct uiscmdrsp *xmit_cmdrsp; /* used to issue NET_XMIT - there is
- * never more that one xmit in
- * progress at a time
- */
+ /* cmdrsp_rcv is used for posting/unposting rcv buffers */
+ struct uiscmdrsp *cmdrsp_rcv;
+ /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
+ struct uiscmdrsp *xmit_cmdrsp;
+
bool server_down; /* IOPART is down */
bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
bool going_away; /* device is being torn down */
@@ -173,18 +168,10 @@ struct visornic_devdata {
unsigned long n_rcv1; /* # rcvs of 1 buffers */
unsigned long n_rcv2; /* # rcvs of 2 buffers */
unsigned long n_rcvx; /* # rcvs of >2 buffers */
- unsigned long found_repost_rcvbuf_cnt; /* # times we called
- * repost_rcvbuf_cnt
- */
- unsigned long repost_found_skb_cnt; /* # times found the skb */
- unsigned long n_repost_deficit; /* # times we couldn't find
- * all of the rcv buffers
- */
- unsigned long bad_rcv_buf; /* # times we negleted to
- * free the rcv skb because
- * we didn't know where it
- * came from
- */
+ unsigned long found_repost_rcvbuf_cnt; /* # repost_rcvbuf_cnt */
+ unsigned long repost_found_skb_cnt; /* # of found the skb */
+ unsigned long n_repost_deficit; /* # of lost rcv buffers */
+ unsigned long bad_rcv_buf; /* # of unknown rcv skb not freed */
unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
int queuefullmsg_logged;
@@ -209,18 +196,17 @@ static void poll_for_irq(unsigned long v);
* Return value indicates number of entries filled in frags
* Negative values indicate an error.
*/
-static unsigned int
+static int
visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
unsigned int frags_max,
struct phys_info frags[])
{
- unsigned int count = 0, ii, size, offset = 0, numfrags;
+ unsigned int count = 0, frag, size, offset = 0, numfrags;
unsigned int total_count;
numfrags = skb_shinfo(skb)->nr_frags;
- /*
- * Compute the number of fragments this skb has, and if its more than
+ /* Compute the number of fragments this skb has, and if its more than
* frag array can hold, linearize the skb
*/
total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
@@ -257,23 +243,20 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
if ((count + numfrags) > frags_max)
return -EINVAL;
- for (ii = 0; ii < numfrags; ii++) {
+ for (frag = 0; frag < numfrags; frag++) {
count = add_physinfo_entries(page_to_pfn(
- skb_frag_page(&skb_shinfo(skb)->frags[ii])),
- skb_shinfo(skb)->frags[ii].
+ skb_frag_page(&skb_shinfo(skb)->frags[frag])),
+ skb_shinfo(skb)->frags[frag].
page_offset,
- skb_shinfo(skb)->frags[ii].
+ skb_shinfo(skb)->frags[frag].
size, count, frags_max, frags);
- /*
- * add_physinfo_entries only returns
+ /* add_physinfo_entries only returns
* zero if the frags array is out of room
* That should never happen because we
* fail above, if count+numfrags > frags_max.
- * Given that theres no recovery mechanism from putting
- * half a packet in the I/O channel, panic here as this
- * should never happen
*/
- BUG_ON(!count);
+ if (!count)
+ return -EINVAL;
}
}
if (skb_shinfo(skb)->frag_list) {
@@ -299,8 +282,7 @@ static ssize_t enable_ints_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
- /*
- * Don't want to break ABI here by having a debugfs
+ /* Don't want to break ABI here by having a debugfs
* file that no longer exists or is writable, so
* lets just make this a vestigual function
*/
@@ -308,8 +290,7 @@ static ssize_t enable_ints_write(struct file *file,
}
/**
- * visornic_serverdown_complete - IOPART went down, need to pause
- * device
+ * visornic_serverdown_complete - IOPART went down, pause device
* @work: Work queue it was scheduled on
*
* The IO partition has gone down and we need to do some cleanup
@@ -344,7 +325,7 @@ visornic_serverdown_complete(struct visornic_devdata *devdata)
}
/**
- * visornic_serverdown - Command has notified us that IOPARt is down
+ * visornic_serverdown - Command has notified us that IOPART is down
* @devdata: device that is being managed by IOPART
*
* Schedule the work needed to handle the server down request. Make
@@ -356,28 +337,38 @@ visornic_serverdown(struct visornic_devdata *devdata,
visorbus_state_complete_func complete_func)
{
unsigned long flags;
+ int err;
spin_lock_irqsave(&devdata->priv_lock, flags);
- if (!devdata->server_down && !devdata->server_change_state) {
- if (devdata->going_away) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_dbg(&devdata->dev->device,
- "%s aborting because device removal pending\n",
- __func__);
- return -ENODEV;
- }
- devdata->server_change_state = true;
- devdata->server_down_complete_func = complete_func;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- visornic_serverdown_complete(devdata);
- } else if (devdata->server_change_state) {
+ if (devdata->server_change_state) {
dev_dbg(&devdata->dev->device, "%s changing state\n",
__func__);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ if (devdata->server_down) {
+ dev_dbg(&devdata->dev->device, "%s already down\n",
+ __func__);
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ if (devdata->going_away) {
+ dev_dbg(&devdata->dev->device,
+ "%s aborting because device removal pending\n",
+ __func__);
+ err = -ENODEV;
+ goto err_unlock;
}
+ devdata->server_change_state = true;
+ devdata->server_down_complete_func = complete_func;
spin_unlock_irqrestore(&devdata->priv_lock, flags);
+
+ visornic_serverdown_complete(devdata);
return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ return err;
}
/**
@@ -395,20 +386,19 @@ alloc_rcv_buf(struct net_device *netdev)
/* NOTE: the first fragment in each rcv buffer is pointed to by
* rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
- * in length, so the firstfrag is large enough to hold 1514.
+ * in length, so the first frag is large enough to hold 1514.
*/
skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
if (!skb)
return NULL;
skb->dev = netdev;
- skb->len = RCVPOST_BUF_SIZE;
/* current value of mtu doesn't come into play here; large
* packets will just end up using multiple rcv buffers all of
- * same size
+ * same size.
*/
- skb->data_len = 0; /* dev_alloc_skb already zeroes it out
- * for clarification.
- */
+ skb->len = RCVPOST_BUF_SIZE;
+ /* alloc_skb already zeroes it out for clarification. */
+ skb->data_len = 0;
return skb;
}
@@ -436,8 +426,8 @@ post_skb(struct uiscmdrsp *cmdrsp,
cmdrsp->net.type = NET_RCV_POST;
cmdrsp->cmdtype = CMD_NET_TYPE;
if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp)) {
+ IOCHAN_TO_IOPART,
+ cmdrsp)) {
atomic_inc(&devdata->num_rcvbuf_in_iovm);
devdata->chstat.sent_post++;
} else {
@@ -465,8 +455,8 @@ send_enbdis(struct net_device *netdev, int state,
devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- devdata->cmdrsp_rcv))
+ IOCHAN_TO_IOPART,
+ devdata->cmdrsp_rcv))
devdata->chstat.sent_enbdis++;
}
@@ -872,8 +862,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (vnic_hit_high_watermark(devdata,
devdata->max_outstanding_net_xmits)) {
- /* too many NET_XMITs queued over to IOVM - need to wait
- */
+ /* extra NET_XMITs queued over to IOVM - need to wait */
devdata->chstat.reject_count++;
if (!devdata->queuefullmsg_logged &&
((devdata->chstat.reject_count & 0x3ff) == 1))
@@ -950,16 +939,12 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
devdata->net_stats.tx_bytes += skb->len;
devdata->chstat.sent_xmit++;
- /* check to see if we have hit the high watermark for
- * netif_stop_queue()
- */
+ /* check if we have hit the high watermark for netif_stop_queue() */
if (vnic_hit_high_watermark(devdata,
devdata->upper_threshold_net_xmits)) {
- /* too many NET_XMITs queued over to IOVM - need to wait */
- netif_stop_queue(netdev); /* calling stop queue - call
- * netif_wake_queue() after lower
- * threshold
- */
+ /* extra NET_XMITs queued over to IOVM - need to wait */
+ /* stop queue - call netif_wake_queue() after lower threshold */
+ netif_stop_queue(netdev);
dev_dbg(&netdev->dev,
"%s busy - invoking iovm flow control\n",
__func__);
@@ -1312,16 +1297,13 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
break;
}
}
+ /* accept pkt, dest matches a multicast addr */
if (found_mc)
- break; /* accept packet, dest
- * matches a multicast
- * address
- */
+ break;
}
+ /* accept packet, h_dest must match vnic mac address */
} else if (skb->pkt_type == PACKET_HOST) {
- break; /* accept packet, h_dest must match vnic
- * mac address
- */
+ break;
} else if (skb->pkt_type == PACKET_OTHERHOST) {
/* something is not right */
dev_err(&devdata->netdev->dev,
@@ -1409,14 +1391,10 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
if (!vbuf)
return -ENOMEM;
- /* for each vnic channel
- * dump out channel specific data
- */
+ /* for each vnic channel dump out channel specific data */
rcu_read_lock();
for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
- /*
- * Only consider netdevs that are visornic, and are open
- */
+ /* Only consider netdevs that are visornic, and are open */
if ((dev->netdev_ops != &visornic_dev_ops) ||
(!netif_queue_stopped(dev)))
continue;
@@ -1643,12 +1621,12 @@ service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
/* ASSERT netdev == vnicinfo->netdev; */
if ((netdev == devdata->netdev) &&
netif_queue_stopped(netdev)) {
- /* check to see if we have crossed
- * the lower watermark for
- * netif_wake_queue()
+ /* check if we have crossed the lower watermark
+ * for netif_wake_queue()
*/
- if (vnic_hit_low_watermark(devdata,
- devdata->lower_threshold_net_xmits)) {
+ if (vnic_hit_low_watermark
+ (devdata,
+ devdata->lower_threshold_net_xmits)) {
/* enough NET_XMITs completed
* so can restart netif queue
*/
@@ -1712,10 +1690,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
send_rcv_posts_if_needed(devdata);
service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
- /*
- * If there aren't any more packets to receive
- * stop the poll
- */
+ /* If there aren't any more packets to receive stop the poll */
if (rx_count < budget)
napi_complete(napi);
@@ -1867,8 +1842,7 @@ static int visornic_probe(struct visor_device *dev)
setup_timer(&devdata->irq_poll_timer, poll_for_irq,
(unsigned long)devdata);
- /*
- * Note: This time has to start running before the while
+ /* Note: This time has to start running before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
*/
@@ -1897,8 +1871,7 @@ static int visornic_probe(struct visor_device *dev)
/* Let's start our threads to get responses */
netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
- /*
- * Note: Interupts have to be enable before the while
+ /* Note: Interupts have to be enable before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
*/
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index df992c3cb..ba9fe3bc2 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -17,7 +17,7 @@
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/ctype.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/vme.h>
@@ -25,16 +25,11 @@
static const char driver_name[] = "pio2_gpio";
-static struct pio2_card *gpio_to_pio2_card(struct gpio_chip *chip)
-{
- return container_of(chip, struct pio2_card, gc);
-}
-
static int pio2_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
u8 reg;
int retval;
- struct pio2_card *card = gpio_to_pio2_card(chip);
+ struct pio2_card *card = gpiochip_get_data(chip);
if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
(card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -71,7 +66,7 @@ static void pio2_gpio_set(struct gpio_chip *chip,
{
u8 reg;
int retval;
- struct pio2_card *card = gpio_to_pio2_card(chip);
+ struct pio2_card *card = gpiochip_get_data(chip);
if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
(card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -97,10 +92,10 @@ static void pio2_gpio_set(struct gpio_chip *chip,
}
/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
{
int data;
- struct pio2_card *card = gpio_to_pio2_card(chip);
+ struct pio2_card *card = gpiochip_get_data(chip);
if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
(card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -116,10 +111,11 @@ static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
}
/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+static int pio2_gpio_dir_out(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
int data;
- struct pio2_card *card = gpio_to_pio2_card(chip);
+ struct pio2_card *card = gpiochip_get_data(chip);
if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
(card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
@@ -205,7 +201,7 @@ int pio2_gpio_init(struct pio2_card *card)
card->gc.set = pio2_gpio_set;
/* This function adds a memory mapped GPIO chip */
- retval = gpiochip_add(&card->gc);
+ retval = gpiochip_add_data(&card->gc, card);
if (retval) {
dev_err(&card->vdev->dev, "Unable to register GPIO\n");
kfree(card->gc.label);
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 1e6c0c4a0..654d072bd 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -36,8 +36,10 @@
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
* 08-07-2003 Bryan YC Fan: Add MAXIM2827/2825 and RFMD2959 support.
- * 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and BBvCalculateParameter().
- * cancel the setting of MAC_REG_SOFTPWRCTL on BBbVT3253Init().
+ * 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and
+ * BBvCalculateParameter().
+ * cancel the setting of MAC_REG_SOFTPWRCTL on
+ * BBbVT3253Init().
* Add the comments.
* 09-01-2003 Bryan YC Fan: RF & BB tables updated.
* Modified BBvLoopbackOn & BBvLoopbackOff().
@@ -66,7 +68,7 @@
/*--------------------- Static Variables --------------------------*/
#define CB_VT3253_INIT_FOR_RFMD 446
-static unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
+static const unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
{0x00, 0x30},
{0x01, 0x00},
{0x02, 0x00},
@@ -516,7 +518,7 @@ static unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
};
#define CB_VT3253B0_INIT_FOR_RFMD 256
-static unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
+static const unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -777,7 +779,8 @@ static unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
#define CB_VT3253B0_AGC_FOR_RFMD2959 195
/* For RFMD2959 */
-static unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
+static
+unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
{0xF0, 0x00},
{0xF1, 0x3E},
{0xF0, 0x80},
@@ -977,7 +980,8 @@ static unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] =
#define CB_VT3253B0_INIT_FOR_AIROHA2230 256
/* For AIROHA */
-static unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
+static
+unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -2160,9 +2164,13 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* {{ RobertYu:20050223, request by JerryChung */
- /* Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
+ /* Init ANT B select,TX Config CR09 = 0x61->0x45,
+ * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
+ */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
- /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
+ /* Init ANT B select,RX Config CR10 = 0x28->0x2A,
+ * 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
+ */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 43a4fb1f3..b4e8c4318 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -77,8 +77,10 @@ BBuGetFrameTime(
void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
-bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char *pbyData);
-bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char byData);
+bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr,
+ unsigned char *pbyData);
+bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr,
+ unsigned char byData);
void BBvSetShortSlotTime(struct vnt_private *);
void BBvSetVGAGainOffset(struct vnt_private *, unsigned char byData);
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 3d338122b..afb1e8bde 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -336,7 +336,8 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
- VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0, priv->byCWMaxMin);
+ VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0,
+ priv->byCWMaxMin);
}
priv->byPacketType = CARDbyGetPktType(priv);
@@ -373,9 +374,12 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
- VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset);
- VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32));
- MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
+ VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST,
+ (u32)qwTSFOffset);
+ VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4,
+ (u32)(qwTSFOffset >> 32));
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL,
+ TFTCTL_TSFSYNCEN);
}
return true;
}
@@ -407,7 +411,8 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
priv->wBeaconInterval = wBeaconInterval;
/* Set NextTBTT */
VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
- VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
+ VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4,
+ (u32)(qwNextTBTT >> 32));
MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
return true;
@@ -433,15 +438,19 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_RFMD2959:
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_TXPEINV);
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
case RF_AIROHA7230:
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE2);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE3);
break;
}
@@ -451,7 +460,8 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
priv->bRadioOff = true;
pr_debug("chester power off\n");
- MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0,
+ LED_ACTSET); /* LED issue */
return bResult;
}
@@ -488,21 +498,24 @@ bool CARDbRadioPowerOn(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_RFMD2959:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1);
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_TXPEINV);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
case RF_AIROHA7230:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 |
- SOFTPWRCTL_SWPE3));
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
break;
}
priv->bRadioOff = false;
pr_debug("chester power on\n");
- MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0,
+ LED_ACTSET); /* LED issue */
return bResult;
}
@@ -717,55 +730,72 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_9 */
s_vCalculateOFDMRParameter(RATE_9M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_12 */
s_vCalculateOFDMRParameter(RATE_12M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_18 */
s_vCalculateOFDMRParameter(RATE_18M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_24 */
s_vCalculateOFDMRParameter(RATE_24M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_36 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_36M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_36M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_48 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_48M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_48M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_54 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_72 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72,
+ MAKEWORD(byTxRate, byRsvTime));
/* Set to Page0 */
MACvSelectPage0(priv->PortOffset);
@@ -830,7 +860,8 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv)
*
* Return Value: none
*/
-void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode)
+void CARDvSetLoopbackMode(struct vnt_private *priv,
+ unsigned short wLoopbackMode)
{
switch (wLoopbackMode) {
case CARD_LB_NONE:
@@ -965,7 +996,8 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
*
* Return Value: none
*/
-void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInterval)
+void CARDvSetFirstNextTBTT(struct vnt_private *priv,
+ unsigned short wBeaconInterval)
{
void __iomem *dwIoBase = priv->PortOffset;
u64 qwNextTBTT = 0;
@@ -993,7 +1025,8 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInter
*
* Return Value: none
*/
-void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, unsigned short wBeaconInterval)
+void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
+ unsigned short wBeaconInterval)
{
void __iomem *dwIoBase = priv->PortOffset;
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 16cca49e6..0203c7fd9 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -38,7 +38,8 @@
* LOBYTE is MAC LB mode, HIBYTE is MII LB mode
*/
#define CARD_LB_NONE MAKEWORD(MAC_LB_NONE, 0)
-#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0) /* PHY must ISO, avoid MAC loopback packet go out */
+/* PHY must ISO, avoid MAC loopback packet go out */
+#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0)
#define CARD_LB_PHY MAKEWORD(MAC_LB_EXT, 0)
#define DEFAULT_MSDU_LIFETIME 512 /* ms */
@@ -71,8 +72,10 @@ void CARDvUpdateBasicTopRate(struct vnt_private *);
bool CARDbIsOFDMinBasicRate(struct vnt_private *);
void CARDvSetLoopbackMode(struct vnt_private *, unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *);
-void CARDvSetFirstNextTBTT(struct vnt_private *, unsigned short wBeaconInterval);
-void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF, unsigned short wBeaconInterval);
+void CARDvSetFirstNextTBTT(struct vnt_private *,
+ unsigned short wBeaconInterval);
+void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF,
+ unsigned short wBeaconInterval);
bool CARDbGetCurrentTSF(struct vnt_private *, u64 *pqwCurrTSF);
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 9ac1ef9d0..b7d43a562 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -144,7 +144,7 @@ void vnt_init_bands(struct vnt_private *priv)
ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&vnt_supported_5ghz_band;
/* fallthrough */
case RF_RFMD2959:
@@ -159,7 +159,7 @@ void vnt_init_bands(struct vnt_private *priv)
ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
&vnt_supported_2ghz_band;
break;
}
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 9fbc71724..2d7f6ae89 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -157,7 +157,8 @@
/* TD_INFO flags control bit */
#define TD_FLAGS_NETIF_SKB 0x01 /* check if need release skb */
-#define TD_FLAGS_PRIV_SKB 0x02 /* check if called from private skb (hostap) */
+/* check if called from private skb (hostap) */
+#define TD_FLAGS_PRIV_SKB 0x02
#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
/*
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index c3eea07ca..494164045 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -812,7 +812,7 @@ static int vnt_int_report_rate(struct vnt_private *priv,
else if (fb_option & FIFOCTL_AUTO_FB_1)
tx_rate = fallback_rate1[tx_rate][retry];
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
idx = tx_rate - RATE_6M;
else
idx = tx_rate;
@@ -1290,7 +1290,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
(conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
set_channel(priv, conf->chandef.chan);
- if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
bb_type = BB_TYPE_11A;
else
bb_type = BB_TYPE_11G;
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 45196c6e9..8e13f7f41 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -47,7 +47,8 @@
*
* Revision History:
* 08-22-2003 Kyle Hsu : Porting MAC functions from sim53
- * 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()& MACvEnableBusSusEn()
+ * 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()&
+ * MACvEnableBusSusEn()
* 09-18-2003 Jerry Chen : Add MACvSetKeyEntry & MACvDisableKeyEntry
*
*/
@@ -138,7 +139,8 @@ bool MACbIsIntDisable(struct vnt_private *priv)
* Return Value: none
*
*/
-void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
+void MACvSetShortRetryLimit(struct vnt_private *priv,
+ unsigned char byRetryLimit)
{
void __iomem *io_base = priv->PortOffset;
/* set SRT */
@@ -160,7 +162,8 @@ void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit
* Return Value: none
*
*/
-void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
+void MACvSetLongRetryLimit(struct vnt_private *priv,
+ unsigned char byRetryLimit)
{
void __iomem *io_base = priv->PortOffset;
/* set LRT */
@@ -304,7 +307,8 @@ bool MACbSoftwareReset(struct vnt_private *priv)
/*
* Description:
- * save some important register's value, then do reset, then restore register's value
+ * save some important register's value, then do reset, then restore
+ * register's value
*
* Parameters:
* In:
@@ -738,7 +742,8 @@ void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
* Return Value: none
*
*/
-void MACvOneShotTimer1MicroSec(struct vnt_private *priv, unsigned int uDelayTime)
+void MACvOneShotTimer1MicroSec(struct vnt_private *priv,
+ unsigned int uDelayTime)
{
void __iomem *io_base = priv->PortOffset;
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 1a2dda09b..e4c3165ae 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1307,7 +1307,7 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
}
if (current_rate > RATE_11M) {
- if (info->band == IEEE80211_BAND_5GHZ) {
+ if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 9ec49e653..ee9927720 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -72,7 +72,8 @@
* Return Value: data read
*
*/
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset)
+unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+ unsigned char byContntOffset)
{
unsigned short wDelay, wNoACK;
unsigned char byWait;
@@ -124,7 +125,8 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
/* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase, (unsigned char)ii);
+ *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,
+ (unsigned char)ii);
pbyEepromRegs++;
}
}
@@ -141,7 +143,8 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
* Return Value: none
*
*/
-void SROMvReadEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress)
+void SROMvReadEtherAddress(void __iomem *dwIoBase,
+ unsigned char *pbyEtherAddress)
{
unsigned char ii;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 9417c935f..882fe54ce 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -138,7 +138,7 @@ static const u16 vnt_frame_time[MAX_RATE] = {
*
*/
unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
- unsigned int frame_length, u16 tx_rate)
+ unsigned int frame_length, u16 tx_rate)
{
unsigned int frame_time;
unsigned int preamble;
@@ -195,7 +195,7 @@ unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
*
*/
void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
+ u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
{
u32 bit_count;
u32 count = 0;
@@ -355,7 +355,7 @@ void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode)
}
vnt_control_out(priv, MESSAGE_TYPE_SET_ANTMD,
- (u16)antenna_mode, 0, 0, NULL);
+ (u16)antenna_mode, 0, 0, NULL);
}
/*
@@ -383,7 +383,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
u8 data;
status = vnt_control_in(priv, MESSAGE_TYPE_READ, 0,
- MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
+ MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
priv->eeprom);
if (status != STATUS_SUCCESS)
return false;
@@ -393,7 +393,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
if ((priv->rf_type == RF_AL2230) ||
- (priv->rf_type == RF_AL2230S)) {
+ (priv->rf_type == RF_AL2230S)) {
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
@@ -457,21 +457,21 @@ int vnt_vt3184_init(struct vnt_private *priv)
memcpy(array, addr, length);
vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBREG, length, array);
+ MESSAGE_REQUEST_BBREG, length, array);
memcpy(array, agc, length_agc);
vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBAGC, length_agc, array);
+ MESSAGE_REQUEST_BBAGC, length_agc, array);
if ((priv->rf_type == RF_VT3226) ||
- (priv->rf_type == RF_VT3342A0)) {
+ (priv->rf_type == RF_VT3342A0)) {
vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x23);
+ MAC_REG_ITRTMSET, 0x23);
vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
} else if (priv->rf_type == RF_VT3226D0) {
vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x11);
+ MAC_REG_ITRTMSET, 0x11);
vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
}
@@ -482,12 +482,12 @@ int vnt_vt3184_init(struct vnt_private *priv)
/* Fix for TX USB resets from vendors driver */
vnt_control_in(priv, MESSAGE_TYPE_READ, USB_REG4,
- MESSAGE_REQUEST_MEM, sizeof(data), &data);
+ MESSAGE_REQUEST_MEM, sizeof(data), &data);
data |= 0x2;
vnt_control_out(priv, MESSAGE_TYPE_WRITE, USB_REG4,
- MESSAGE_REQUEST_MEM, sizeof(data), &data);
+ MESSAGE_REQUEST_MEM, sizeof(data), &data);
return true;
}
@@ -814,7 +814,7 @@ void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning)
priv->bb_pre_ed_index = ed_inx;
dev_dbg(&priv->usb->dev, "%s bb_pre_ed_rssi %d\n",
- __func__, priv->bb_pre_ed_rssi);
+ __func__, priv->bb_pre_ed_rssi);
if (!cr_201 && !cr_206)
return;
diff --git a/drivers/staging/vt6656/channel.c b/drivers/staging/vt6656/channel.c
index a0fe288c1..a4299f405 100644
--- a/drivers/staging/vt6656/channel.c
+++ b/drivers/staging/vt6656/channel.c
@@ -153,7 +153,7 @@ void vnt_init_bands(struct vnt_private *priv)
ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&vnt_supported_5ghz_band;
/* fallthrough */
case RF_AL2230:
@@ -167,7 +167,7 @@ void vnt_init_bands(struct vnt_private *priv)
ch[i].flags = IEEE80211_CHAN_NO_HT40;
}
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
&vnt_supported_2ghz_band;
break;
}
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index 8d05acbc0..73538fb4e 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -97,7 +97,7 @@ static int vnt_int_report_rate(struct vnt_private *priv, u8 pkt_no, u8 tsr)
else if (context->fb_option == AUTO_FB_1)
tx_rate = fallback_rate1[tx_rate][retry];
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
idx = tx_rate - RATE_6M;
else
idx = tx_rate;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index f9afab77b..ac4fecb30 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -238,7 +238,7 @@ static int vnt_init_registers(struct vnt_private *priv)
priv->tx_antenna_mode = ANT_B;
priv->rx_antenna_sel = 1;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_A;
else
priv->rx_antenna_mode = ANT_B;
@@ -248,14 +248,14 @@ static int vnt_init_registers(struct vnt_private *priv)
if (antenna & EEP_ANTENNA_AUX) {
priv->tx_antenna_mode = ANT_A;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_B;
else
priv->rx_antenna_mode = ANT_A;
} else {
priv->tx_antenna_mode = ANT_B;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_A;
else
priv->rx_antenna_mode = ANT_B;
@@ -662,7 +662,7 @@ static int vnt_config(struct ieee80211_hw *hw, u32 changed)
(conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
vnt_set_channel(priv, conf->chandef.chan->hw_value);
- if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (conf->chandef.chan->band == NL80211_BAND_5GHZ)
bb_type = BB_TYPE_11A;
else
bb_type = BB_TYPE_11G;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index b74e32001..aa59e7f14 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -813,7 +813,7 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
}
if (current_rate > RATE_11M) {
- if (info->band == IEEE80211_BAND_5GHZ) {
+ if (info->band == NL80211_BAND_5GHZ) {
pkt_type = PK_TYPE_11A;
} else {
if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 4846a898d..95faaeb74 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -97,7 +97,7 @@ void vnt_run_command(struct work_struct *work)
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return;
- if (priv->cmd_running != true)
+ if (!priv->cmd_running)
return;
switch (priv->command_state) {
@@ -143,13 +143,13 @@ void vnt_run_command(struct work_struct *work)
if (priv->rx_antenna_sel == 0) {
priv->rx_antenna_sel = 1;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
vnt_set_antenna_mode(priv, ANT_RXA);
else
vnt_set_antenna_mode(priv, ANT_RXB);
} else {
priv->rx_antenna_sel = 0;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
vnt_set_antenna_mode(priv, ANT_RXB);
else
vnt_set_antenna_mode(priv, ANT_RXA);
@@ -174,7 +174,7 @@ int vnt_schedule_command(struct vnt_private *priv, enum vnt_cmd command)
ADD_ONE_WITH_WRAP_AROUND(priv->cmd_enqueue_idx, CMD_Q_SIZE);
priv->free_cmd_queue--;
- if (priv->cmd_running == false)
+ if (!priv->cmd_running)
vnt_cmd_complete(priv);
return true;
diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig
index dce9cee91..73f7fefd3 100644
--- a/drivers/staging/wilc1000/Kconfig
+++ b/drivers/staging/wilc1000/Kconfig
@@ -1,6 +1,5 @@
config WILC1000
tristate
- select WIRELESS_EXT
---help---
This module only support IEEE 802.11n WiFi.
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 0a922c7c7..953584248 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -2,6 +2,7 @@
#include <linux/time.h>
#include <linux/kthread.h>
#include <linux/delay.h>
+#include <linux/completion.h>
#include "host_interface.h"
#include "coreconfigurator.h"
#include "wilc_wlan.h"
@@ -230,10 +231,10 @@ bool wilc_optaining_ip;
static u8 P2P_LISTEN_STATE;
static struct task_struct *hif_thread_handler;
static struct message_queue hif_msg_q;
-static struct semaphore hif_sema_thread;
-static struct semaphore hif_sema_driver;
-static struct semaphore hif_sema_wait_response;
-static struct semaphore hif_sema_deinit;
+static struct completion hif_thread_comp;
+static struct completion hif_driver_comp;
+static struct completion hif_wait_response;
+static struct mutex hif_deinit_lock;
static struct timer_list periodic_rssi;
u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
@@ -262,6 +263,7 @@ static struct wilc_vif *join_req_vif;
static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
+static s32 Handle_ScanDone(struct wilc_vif *vif, enum scan_event enuEvent);
/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
* special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -305,10 +307,10 @@ static void handle_set_channel(struct wilc_vif *vif,
netdev_err(vif->ndev, "Failed to set channel\n");
}
-static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
- struct drv_handler *hif_drv_handler)
+static void handle_set_wfi_drv_handler(struct wilc_vif *vif,
+ struct drv_handler *hif_drv_handler)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_SET_DRV_HANDLER;
@@ -316,24 +318,20 @@ static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
wid.val = (s8 *)hif_drv_handler;
wid.size = sizeof(*hif_drv_handler);
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- hif_drv_handler->handler);
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ hif_drv_handler->handler);
if (!hif_drv_handler->handler)
- up(&hif_sema_driver);
+ complete(&hif_driver_comp);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set driver handler\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_set_operation_mode(struct wilc_vif *vif,
- struct op_mode *hif_op_mode)
+static void handle_set_operation_mode(struct wilc_vif *vif,
+ struct op_mode *hif_op_mode)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_SET_OPERATION_MODE;
@@ -341,23 +339,19 @@ static s32 handle_set_operation_mode(struct wilc_vif *vif,
wid.val = (s8 *)&hif_op_mode->mode;
wid.size = sizeof(u32);
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if ((hif_op_mode->mode) == IDLE_MODE)
- up(&hif_sema_driver);
+ complete(&hif_driver_comp);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set driver handler\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
+static void handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
char firmware_ip_addr[4] = {0};
@@ -371,22 +365,18 @@ static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
wid.val = (u8 *)ip_addr;
wid.size = IP_ALEN;
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
host_int_get_ipaddress(vif, firmware_ip_addr, idx);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set IP address\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
+static void handle_get_ip_address(struct wilc_vif *vif, u8 idx)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_IP_ADDRESS;
@@ -394,8 +384,8 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
wid.val = kmalloc(IP_ALEN, GFP_KERNEL);
wid.size = IP_ALEN;
- result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
memcpy(get_ip[idx], wid.val, IP_ALEN);
@@ -404,18 +394,14 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
if (memcmp(get_ip[idx], set_ip[idx], IP_ALEN) != 0)
wilc_setup_ipaddress(vif, set_ip[idx], idx);
- if (result != 0) {
+ if (ret)
netdev_err(vif->ndev, "Failed to get IP address\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_get_mac_address(struct wilc_vif *vif,
- struct get_mac_addr *get_mac_addr)
+static void handle_get_mac_address(struct wilc_vif *vif,
+ struct get_mac_addr *get_mac_addr)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_MAC_ADDR;
@@ -423,16 +409,12 @@ static s32 handle_get_mac_address(struct wilc_vif *vif,
wid.val = get_mac_addr->mac_addr;
wid.size = ETH_ALEN;
- result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to get mac address\n");
- result = -EFAULT;
- }
- up(&hif_sema_wait_response);
-
- return result;
+ complete(&hif_wait_response);
}
static s32 handle_cfg_param(struct wilc_vif *vif,
@@ -455,7 +437,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "check value 6 over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -471,7 +453,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Impossible value\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -486,7 +468,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -500,7 +482,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Invalid power mode\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -515,7 +497,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -530,7 +512,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -545,7 +527,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -560,7 +542,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -574,7 +556,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -588,7 +570,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Short slot(2) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -602,7 +584,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "TXOP prot disable\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -617,7 +599,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -632,7 +614,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -646,7 +628,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Site survey disable\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -661,7 +643,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -676,7 +658,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Active time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -691,7 +673,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Passive time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -713,7 +695,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "out of TX rate\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -724,28 +706,24 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
if (result)
netdev_err(vif->ndev, "Error in setting CFG params\n");
-ERRORHANDLER:
+unlock:
mutex_unlock(&hif_drv->cfg_values_lock);
return result;
}
-static s32 Handle_ScanDone(struct wilc_vif *vif,
- enum scan_event enuEvent);
-
-static s32 Handle_Scan(struct wilc_vif *vif,
- struct scan_attr *pstrHostIFscanAttr)
+static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
{
s32 result = 0;
- struct wid strWIDList[5];
- u32 u32WidsCount = 0;
+ struct wid wid_list[5];
+ u32 index = 0;
u32 i;
- u8 *pu8Buffer;
+ u8 *buffer;
u8 valuesize = 0;
u8 *pu8HdnNtwrksWidVal = NULL;
struct host_if_drv *hif_drv = vif->hif_drv;
- hif_drv->usr_scan_req.scan_result = pstrHostIFscanAttr->result;
- hif_drv->usr_scan_req.arg = pstrHostIFscanAttr->arg;
+ hif_drv->usr_scan_req.scan_result = scan_info->result;
+ hif_drv->usr_scan_req.arg = scan_info->arg;
if ((hif_drv->hif_state >= HOST_IF_SCANNING) &&
(hif_drv->hif_state < HOST_IF_CONNECTED)) {
@@ -762,72 +740,70 @@ static s32 Handle_Scan(struct wilc_vif *vif,
hif_drv->usr_scan_req.rcvd_ch_cnt = 0;
- strWIDList[u32WidsCount].id = (u16)WID_SSID_PROBE_REQ;
- strWIDList[u32WidsCount].type = WID_STR;
+ wid_list[index].id = (u16)WID_SSID_PROBE_REQ;
+ wid_list[index].type = WID_STR;
- for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++)
- valuesize += ((pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len) + 1);
+ for (i = 0; i < scan_info->hidden_network.n_ssids; i++)
+ valuesize += ((scan_info->hidden_network.net_info[i].ssid_len) + 1);
pu8HdnNtwrksWidVal = kmalloc(valuesize + 1, GFP_KERNEL);
- strWIDList[u32WidsCount].val = pu8HdnNtwrksWidVal;
- if (strWIDList[u32WidsCount].val) {
- pu8Buffer = strWIDList[u32WidsCount].val;
+ wid_list[index].val = pu8HdnNtwrksWidVal;
+ if (wid_list[index].val) {
+ buffer = wid_list[index].val;
- *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.n_ssids;
+ *buffer++ = scan_info->hidden_network.n_ssids;
- for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) {
- *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
- memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.net_info[i].ssid, pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len);
- pu8Buffer += pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
+ for (i = 0; i < scan_info->hidden_network.n_ssids; i++) {
+ *buffer++ = scan_info->hidden_network.net_info[i].ssid_len;
+ memcpy(buffer, scan_info->hidden_network.net_info[i].ssid, scan_info->hidden_network.net_info[i].ssid_len);
+ buffer += scan_info->hidden_network.net_info[i].ssid_len;
}
- strWIDList[u32WidsCount].size = (s32)(valuesize + 1);
- u32WidsCount++;
+ wid_list[index].size = (s32)(valuesize + 1);
+ index++;
}
- {
- strWIDList[u32WidsCount].id = WID_INFO_ELEMENT_PROBE;
- strWIDList[u32WidsCount].type = WID_BIN_DATA;
- strWIDList[u32WidsCount].val = pstrHostIFscanAttr->ies;
- strWIDList[u32WidsCount].size = pstrHostIFscanAttr->ies_len;
- u32WidsCount++;
- }
+ wid_list[index].id = WID_INFO_ELEMENT_PROBE;
+ wid_list[index].type = WID_BIN_DATA;
+ wid_list[index].val = scan_info->ies;
+ wid_list[index].size = scan_info->ies_len;
+ index++;
- strWIDList[u32WidsCount].id = WID_SCAN_TYPE;
- strWIDList[u32WidsCount].type = WID_CHAR;
- strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)&pstrHostIFscanAttr->type;
- u32WidsCount++;
+ wid_list[index].id = WID_SCAN_TYPE;
+ wid_list[index].type = WID_CHAR;
+ wid_list[index].size = sizeof(char);
+ wid_list[index].val = (s8 *)&scan_info->type;
+ index++;
- strWIDList[u32WidsCount].id = WID_SCAN_CHANNEL_LIST;
- strWIDList[u32WidsCount].type = WID_BIN_DATA;
+ wid_list[index].id = WID_SCAN_CHANNEL_LIST;
+ wid_list[index].type = WID_BIN_DATA;
- if (pstrHostIFscanAttr->ch_freq_list &&
- pstrHostIFscanAttr->ch_list_len > 0) {
+ if (scan_info->ch_freq_list &&
+ scan_info->ch_list_len > 0) {
int i;
- for (i = 0; i < pstrHostIFscanAttr->ch_list_len; i++) {
- if (pstrHostIFscanAttr->ch_freq_list[i] > 0)
- pstrHostIFscanAttr->ch_freq_list[i] = pstrHostIFscanAttr->ch_freq_list[i] - 1;
+ for (i = 0; i < scan_info->ch_list_len; i++) {
+ if (scan_info->ch_freq_list[i] > 0)
+ scan_info->ch_freq_list[i] = scan_info->ch_freq_list[i] - 1;
}
}
- strWIDList[u32WidsCount].val = pstrHostIFscanAttr->ch_freq_list;
- strWIDList[u32WidsCount].size = pstrHostIFscanAttr->ch_list_len;
- u32WidsCount++;
+ wid_list[index].val = scan_info->ch_freq_list;
+ wid_list[index].size = scan_info->ch_list_len;
+ index++;
- strWIDList[u32WidsCount].id = WID_START_SCAN_REQ;
- strWIDList[u32WidsCount].type = WID_CHAR;
- strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)&pstrHostIFscanAttr->src;
- u32WidsCount++;
+ wid_list[index].id = WID_START_SCAN_REQ;
+ wid_list[index].type = WID_CHAR;
+ wid_list[index].size = sizeof(char);
+ wid_list[index].val = (s8 *)&scan_info->src;
+ index++;
if (hif_drv->hif_state == HOST_IF_CONNECTED)
scan_while_connected = true;
else if (hif_drv->hif_state == HOST_IF_IDLE)
scan_while_connected = false;
- result = wilc_send_config_pkt(vif, SET_CFG, strWIDList,
- u32WidsCount,
+ result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
+ index,
wilc_get_vif_idx(vif));
if (result)
@@ -839,13 +815,13 @@ ERRORHANDLER:
Handle_ScanDone(vif, SCAN_EVENT_ABORTED);
}
- kfree(pstrHostIFscanAttr->ch_freq_list);
- pstrHostIFscanAttr->ch_freq_list = NULL;
+ kfree(scan_info->ch_freq_list);
+ scan_info->ch_freq_list = NULL;
- kfree(pstrHostIFscanAttr->ies);
- pstrHostIFscanAttr->ies = NULL;
- kfree(pstrHostIFscanAttr->hidden_network.net_info);
- pstrHostIFscanAttr->hidden_network.net_info = NULL;
+ kfree(scan_info->ies);
+ scan_info->ies = NULL;
+ kfree(scan_info->hidden_network.net_info);
+ scan_info->hidden_network.net_info = NULL;
kfree(pu8HdnNtwrksWidVal);
@@ -1610,7 +1586,7 @@ static int Handle_Key(struct wilc_vif *vif,
&wid, 1,
wilc_get_vif_idx(vif));
}
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
break;
case WPA_RX_GTK:
@@ -1644,10 +1620,10 @@ static int Handle_Key(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
- if (pu8keybuf == NULL) {
+ if (!pu8keybuf) {
ret = -ENOMEM;
goto _WPARxGtk_end_case_;
}
@@ -1673,7 +1649,7 @@ static int Handle_Key(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
}
_WPARxGtk_end_case_:
kfree(pstrHostIFkeyAttr->attr.wpa.key);
@@ -1711,7 +1687,7 @@ _WPARxGtk_end_case_:
strWIDList, 2,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
pu8keybuf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL);
if (!pu8keybuf) {
@@ -1734,7 +1710,7 @@ _WPARxGtk_end_case_:
&wid, 1,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
}
_WPAPtk_end_case_:
@@ -1856,7 +1832,7 @@ static void Handle_Disconnect(struct wilc_vif *vif)
}
}
- up(&hif_drv->sem_test_disconn_block);
+ complete(&hif_drv->comp_test_disconn_block);
}
void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
@@ -1885,7 +1861,7 @@ static void Handle_GetRssi(struct wilc_vif *vif)
result = -EFAULT;
}
- up(&vif->hif_drv->sem_get_rssi);
+ complete(&vif->hif_drv->comp_get_rssi);
}
static s32 Handle_GetStatistics(struct wilc_vif *vif,
@@ -1938,7 +1914,7 @@ static s32 Handle_GetStatistics(struct wilc_vif *vif,
wilc_enable_tcp_ack_filter(false);
if (pstrStatistics != &vif->wilc->dummy_statistics)
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
return 0;
}
@@ -1979,7 +1955,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
return -EFAULT;
}
- up(&hif_drv->sem_inactive_time);
+ complete(&hif_drv->comp_inactive_time);
return result;
}
@@ -2172,7 +2148,7 @@ static void Handle_DelAllSta(struct wilc_vif *vif,
ERRORHANDLER:
kfree(wid.val);
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
}
static void Handle_DelStation(struct wilc_vif *vif,
@@ -2472,7 +2448,7 @@ static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr)
static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
{
- s32 ret = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_TX_POWER;
@@ -2485,7 +2461,7 @@ static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
}
static int hostIFthread(void *pvArg)
@@ -2518,7 +2494,7 @@ static int hostIFthread(void *pvArg)
switch (msg.id) {
case HOST_IF_MSG_SCAN:
- Handle_Scan(msg.vif, &msg.body.scan_info);
+ handle_scan(msg.vif, &msg.body.scan_info);
break;
case HOST_IF_MSG_CONNECT:
@@ -2667,7 +2643,7 @@ static int hostIFthread(void *pvArg)
}
}
- up(&hif_sema_thread);
+ complete(&hif_thread_comp);
return 0;
}
@@ -2730,7 +2706,8 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Request to remove WEP key\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2758,7 +2735,8 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Default key index\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2791,7 +2769,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "STA - WEP Key\n");
- down(&hif_drv->sem_test_key_block);
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2827,7 +2805,8 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
if (result)
netdev_err(vif->ndev, "AP - WEP Key\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2882,8 +2861,8 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
if (result)
netdev_err(vif->ndev, "PTK Key\n");
-
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2950,8 +2929,8 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "RX GTK\n");
-
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2961,14 +2940,8 @@ int wilc_set_pmkid_info(struct wilc_vif *vif,
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
int i;
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
-
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_KEY;
@@ -3007,7 +2980,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
return -EFAULT;
}
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3097,8 +3070,8 @@ int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Failed to send message: disconnect\n");
-
- down(&hif_drv->sem_test_disconn_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_disconn_block);
return result;
}
@@ -3110,12 +3083,6 @@ static s32 host_int_get_assoc_res_info(struct wilc_vif *vif,
{
s32 result = 0;
struct wid wid;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "Driver is null\n");
- return -EFAULT;
- }
wid.id = (u16)WID_ASSOC_RES_INFO;
wid.type = WID_STR;
@@ -3138,12 +3105,6 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
{
int result;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_SET_CHANNEL;
@@ -3219,8 +3180,8 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Failed to send get host ch param\n");
-
- down(&hif_drv->sem_inactive_time);
+ else
+ wait_for_completion(&hif_drv->comp_inactive_time);
*pu32InactiveTime = inactive_time;
@@ -3243,7 +3204,7 @@ int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
return -EFAULT;
}
- down(&hif_drv->sem_get_rssi);
+ wait_for_completion(&hif_drv->comp_get_rssi);
if (!rssi_level) {
netdev_err(vif->ndev, "RSS pointer value is null\n");
@@ -3272,7 +3233,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
}
if (stats != &vif->wilc->dummy_statistics)
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3382,7 +3343,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
scan_while_connected = false;
- sema_init(&hif_sema_wait_response, 0);
+ init_completion(&hif_wait_response);
hif_drv = kzalloc(sizeof(struct host_if_drv), GFP_KERNEL);
if (!hif_drv) {
@@ -3399,15 +3360,15 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
wilc_optaining_ip = false;
if (clients_count == 0) {
- sema_init(&hif_sema_thread, 0);
- sema_init(&hif_sema_driver, 0);
- sema_init(&hif_sema_deinit, 1);
+ init_completion(&hif_thread_comp);
+ init_completion(&hif_driver_comp);
+ mutex_init(&hif_deinit_lock);
}
- sema_init(&hif_drv->sem_test_key_block, 0);
- sema_init(&hif_drv->sem_test_disconn_block, 0);
- sema_init(&hif_drv->sem_get_rssi, 0);
- sema_init(&hif_drv->sem_inactive_time, 0);
+ init_completion(&hif_drv->comp_test_key_block);
+ init_completion(&hif_drv->comp_test_disconn_block);
+ init_completion(&hif_drv->comp_get_rssi);
+ init_completion(&hif_drv->comp_inactive_time);
if (clients_count == 0) {
result = wilc_mq_create(&hif_msg_q);
@@ -3469,7 +3430,7 @@ int wilc_deinit(struct wilc_vif *vif)
return -EFAULT;
}
- down(&hif_sema_deinit);
+ mutex_lock(&hif_deinit_lock);
terminated_handle = hif_drv;
@@ -3479,7 +3440,7 @@ int wilc_deinit(struct wilc_vif *vif)
del_timer_sync(&hif_drv->remain_on_ch_timer);
wilc_set_wfi_drv_handler(vif, 0, 0);
- down(&hif_sema_driver);
+ wait_for_completion(&hif_driver_comp);
if (hif_drv->usr_scan_req.scan_result) {
hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL,
@@ -3494,15 +3455,14 @@ int wilc_deinit(struct wilc_vif *vif)
memset(&msg, 0, sizeof(struct host_if_msg));
if (clients_count == 1) {
- del_timer_sync(&periodic_rssi);
msg.id = HOST_IF_MSG_EXIT;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result != 0)
netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
-
- down(&hif_sema_thread);
+ else
+ wait_for_completion(&hif_thread_comp);
wilc_mq_destroy(&hif_msg_q);
}
@@ -3511,7 +3471,7 @@ int wilc_deinit(struct wilc_vif *vif)
clients_count--;
terminated_handle = NULL;
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return result;
}
@@ -3558,25 +3518,25 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
struct host_if_drv *hif_drv = NULL;
struct wilc_vif *vif;
- down(&hif_sema_deinit);
+ mutex_lock(&hif_deinit_lock);
id = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif) {
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
hif_drv = vif->hif_drv;
if (!hif_drv || hif_drv == terminated_handle) {
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
if (!hif_drv->usr_conn_req.conn_result) {
netdev_err(vif->ndev, "there is no current Connect Request\n");
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
@@ -3593,7 +3553,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
if (result)
netdev_err(vif->ndev, "synchronous info (%d)\n", result);
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
}
void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
@@ -3634,12 +3594,6 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3688,12 +3642,6 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3727,12 +3675,6 @@ int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
int result = 0;
struct host_if_msg msg;
struct beacon_attr *beacon_info = &msg.body.beacon_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3776,12 +3718,6 @@ int wilc_del_beacon(struct wilc_vif *vif)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
msg.id = HOST_IF_MSG_DEL_BEACON;
msg.vif = vif;
@@ -3798,12 +3734,6 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
int result = 0;
struct host_if_msg msg;
struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3830,12 +3760,6 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
int result = 0;
struct host_if_msg msg;
struct del_sta *del_sta_info = &msg.body.del_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3858,16 +3782,10 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
int result = 0;
struct host_if_msg msg;
struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
u8 zero_addr[ETH_ALEN] = {0};
int i;
u8 assoc_sta = 0;
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
-
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_DEL_ALL_STA;
@@ -3887,8 +3805,8 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
-
- down(&hif_sema_wait_response);
+ else
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3899,12 +3817,6 @@ int wilc_edit_station(struct wilc_vif *vif,
int result = 0;
struct host_if_msg msg;
struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3932,12 +3844,6 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
int result = 0;
struct host_if_msg msg;
struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled)
return 0;
@@ -3962,12 +3868,6 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
int result = 0;
struct host_if_msg msg;
struct set_multicast *multicast_filter_param = &msg.body.multicast_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4141,12 +4041,6 @@ int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4167,12 +4061,6 @@ static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4221,7 +4109,7 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
*tx_power = msg.body.tx_power.tx_pwr;
return ret;
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 01f3222a4..8d2dd0db0 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -275,10 +275,10 @@ struct host_if_drv {
struct cfg_param_attr cfg_values;
struct mutex cfg_values_lock;
- struct semaphore sem_test_key_block;
- struct semaphore sem_test_disconn_block;
- struct semaphore sem_get_rssi;
- struct semaphore sem_inactive_time;
+ struct completion comp_test_key_block;
+ struct completion comp_test_disconn_block;
+ struct completion comp_get_rssi;
+ struct completion comp_inactive_time;
struct timer_list scan_timer;
struct timer_list connect_timer;
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 7d9e5ded8..242f82f4d 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -24,7 +24,7 @@ struct wilc_wfi_radiotap_cb_hdr {
static struct net_device *wilc_wfi_mon; /* global monitor netdev */
-static u8 srcAdd[6];
+static u8 srcadd[6];
static u8 bssid[6];
static u8 broadcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/**
@@ -59,9 +59,10 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
/* Get WILC header */
memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
-
- /* The packet offset field conain info about what type of managment frame */
- /* we are dealing with and ack status */
+ /*
+ * The packet offset field contain info about what type of management
+ * the frame we are dealing with and ack status
+ */
pkt_offset = GET_PKT_OFFSET(header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
@@ -105,7 +106,7 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
hdr->hdr.it_present = cpu_to_le32
- (1 << IEEE80211_RADIOTAP_RATE); /* | */
+ (1 << IEEE80211_RADIOTAP_RATE); /* | */
hdr->rate = 5; /* txrate->bitrate / 5; */
}
@@ -127,8 +128,10 @@ struct tx_complete_mon_data {
static void mgmt_tx_complete(void *priv, int status)
{
struct tx_complete_mon_data *pv_data = priv;
-
- /* incase of fully hosting mode, the freeing will be done in response to the cfg packet */
+ /*
+ * in case of fully hosting mode, the freeing will be done
+ * in response to the cfg packet
+ */
kfree(pv_data->buff);
kfree(pv_data);
@@ -225,11 +228,11 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
skb->dev = mon_priv->real_ndev;
/* Identify if Ethernet or MAC header (data or mgmt) */
- memcpy(srcAdd, &skb->data[10], 6);
+ memcpy(srcadd, &skb->data[10], 6);
memcpy(bssid, &skb->data[16], 6);
/* if source address and bssid fields are equal>>Mac header */
/*send it to mgmt frames handler */
- if (!(memcmp(srcAdd, bssid, 6))) {
+ if (!(memcmp(srcadd, bssid, 6))) {
ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
if (ret)
netdev_err(dev, "fail to mgmt tx\n");
@@ -255,7 +258,8 @@ static const struct net_device_ops wilc_wfi_netdev_ops = {
* @date 12 JUL 2012
* @version 1.0
*/
-struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_device *real_dev)
+struct net_device *WILC_WFI_init_mon_interface(const char *name,
+ struct net_device *real_dev)
{
u32 ret = 0;
struct WILC_WFI_mon_priv *priv;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index ce095b020..9a7fa90dc 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -22,6 +22,7 @@
#include <linux/skbuff.h>
#include <linux/semaphore.h>
+#include <linux/completion.h>
static int dev_state_ev_handler(struct notifier_block *this,
unsigned long event, void *ptr);
@@ -30,8 +31,6 @@ static struct notifier_block g_dev_notifier = {
.notifier_call = dev_state_ev_handler
};
-#define IRQ_WAIT 1
-#define IRQ_NO_WAIT 0
static struct semaphore close_exit_sync;
static int wlan_deinit_locks(struct net_device *dev);
@@ -259,10 +258,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
for (i = 0; i < wilc->vif_num; i++) {
if (wilc->vif[i]->mode == STATION_MODE)
- if (!memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
+ if (ether_addr_equal_unaligned(bssid,
+ wilc->vif[i]->bssid))
return wilc->vif[i]->ndev;
if (wilc->vif[i]->mode == AP_MODE)
- if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN))
+ if (ether_addr_equal_unaligned(bssid1,
+ wilc->vif[i]->bssid))
return wilc->vif[i]->ndev;
}
@@ -303,40 +304,27 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
return ret_val;
}
-#define USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
-
static int linux_wlan_txq_task(void *vp)
{
int ret, txq_count;
struct wilc_vif *vif;
struct wilc *wl;
struct net_device *dev = vp;
-#if defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
-#define TX_BACKOFF_WEIGHT_INCR_STEP (1)
-#define TX_BACKOFF_WEIGHT_DECR_STEP (1)
-#define TX_BACKOFF_WEIGHT_MAX (7)
-#define TX_BACKOFF_WEIGHT_MIN (0)
-#define TX_BACKOFF_WEIGHT_UNIT_MS (10)
- int backoff_weight = TX_BACKOFF_WEIGHT_MIN;
-#endif
vif = netdev_priv(dev);
wl = vif->wilc;
- up(&wl->txq_thread_started);
+ complete(&wl->txq_thread_started);
while (1) {
down(&wl->txq_event);
if (wl->close) {
- up(&wl->txq_thread_started);
+ complete(&wl->txq_thread_started);
while (!kthread_should_stop())
schedule();
break;
}
-#if !defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
- ret = wilc_wlan_handle_txq(dev, &txq_count);
-#else
do {
ret = wilc_wlan_handle_txq(dev, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
@@ -345,20 +333,7 @@ static int linux_wlan_txq_task(void *vp)
if (netif_queue_stopped(wl->vif[1]->ndev))
netif_wake_queue(wl->vif[1]->ndev);
}
-
- if (ret == WILC_TX_ERR_NO_BUF) {
- backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP;
- if (backoff_weight > TX_BACKOFF_WEIGHT_MAX)
- backoff_weight = TX_BACKOFF_WEIGHT_MAX;
- } else {
- if (backoff_weight > TX_BACKOFF_WEIGHT_MIN) {
- backoff_weight -= TX_BACKOFF_WEIGHT_DECR_STEP;
- if (backoff_weight < TX_BACKOFF_WEIGHT_MIN)
- backoff_weight = TX_BACKOFF_WEIGHT_MIN;
- }
- }
} while (ret == WILC_TX_ERR_NO_BUF && !wl->close);
-#endif
}
return 0;
}
@@ -449,7 +424,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
struct wilc_vif *vif)
{
unsigned char c_val[64];
- unsigned char mac_add[] = {0x00, 0x80, 0xC2, 0x5E, 0xa2, 0xff};
struct wilc *wilc = vif->wilc;
struct wilc_priv *priv;
struct host_if_drv *hif_drv;
@@ -458,9 +432,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
hif_drv = (struct host_if_drv *)priv->hif_drv;
netdev_dbg(dev, "Host = %p\n", hif_drv);
- wilc_get_mac_address(vif, mac_add);
-
- netdev_dbg(dev, "MAC address is : %pM\n", mac_add);
wilc_get_chipid(wilc, false);
*(int *)c_val = 1;
@@ -622,11 +593,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
0))
goto _fail_;
- memcpy(c_val, mac_add, 6);
-
- if (!wilc_wlan_cfg_set(vif, 0, WID_MAC_ADDR, c_val, 6, 0, 0))
- goto _fail_;
-
c_val[0] = DETECT_PROTECT_REPORT;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
0, 0))
@@ -691,14 +657,6 @@ void wilc1000_wlan_deinit(struct net_device *dev)
wilc_wlan_stop(wl);
wilc_wlan_cleanup(dev);
-#if defined(PLAT_ALLWINNER_A20) || defined(PLAT_ALLWINNER_A23) || defined(PLAT_ALLWINNER_A31)
- if (!wl->dev_irq_num &&
- wl->hif_func->disable_interrupt) {
- mutex_lock(&wl->hif_cs);
- wl->hif_func->disable_interrupt(wl);
- mutex_unlock(&wl->hif_cs);
- }
-#endif
wlan_deinit_locks(dev);
wl->initialized = false;
@@ -727,8 +685,7 @@ static int wlan_init_locks(struct net_device *dev)
sema_init(&wl->cfg_event, 0);
sema_init(&wl->sync_event, 0);
-
- sema_init(&wl->txq_thread_started, 0);
+ init_completion(&wl->txq_thread_started);
return 0;
}
@@ -765,7 +722,7 @@ static int wlan_initialize_threads(struct net_device *dev)
wilc->close = 0;
return -ENOBUFS;
}
- down(&wilc->txq_thread_started);
+ wait_for_completion(&wilc->txq_thread_started);
return 0;
}
@@ -896,25 +853,20 @@ static int mac_init_fn(struct net_device *ndev)
int wilc_mac_open(struct net_device *ndev)
{
struct wilc_vif *vif;
- struct wilc *wilc;
unsigned char mac_add[ETH_ALEN] = {0};
int ret = 0;
int i = 0;
- struct wilc_priv *priv;
struct wilc *wl;
vif = netdev_priv(ndev);
wl = vif->wilc;
if (!wl || !wl->dev) {
- netdev_err(ndev, "wilc1000: SPI device not ready\n");
+ netdev_err(ndev, "device not ready\n");
return -ENODEV;
}
- vif = netdev_priv(ndev);
- wilc = vif->wilc;
- priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
ret = wilc_init_host_int(ndev);
@@ -933,13 +885,13 @@ int wilc_mac_open(struct net_device *ndev)
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
0);
- } else if (!wilc_wlan_get_num_conn_ifcs(wilc)) {
+ } else if (!wilc_wlan_get_num_conn_ifcs(wl)) {
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
- wilc->open_ifcs);
+ wl->open_ifcs);
} else {
- if (memcmp(wilc->vif[i ^ 1]->bssid,
- wilc->vif[i ^ 1]->src_addr, 6))
+ if (memcmp(wl->vif[i ^ 1]->bssid,
+ wl->vif[i ^ 1]->src_addr, 6))
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
0);
@@ -969,12 +921,12 @@ int wilc_mac_open(struct net_device *ndev)
wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
vif->ndev->ieee80211_ptr,
- vif->g_struct_frame_reg[0].frame_type,
- vif->g_struct_frame_reg[0].reg);
+ vif->frame_reg[0].type,
+ vif->frame_reg[0].reg);
wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
vif->ndev->ieee80211_ptr,
- vif->g_struct_frame_reg[1].frame_type,
- vif->g_struct_frame_reg[1].reg);
+ vif->frame_reg[1].type,
+ vif->frame_reg[1].reg);
netif_wake_queue(ndev);
wl->open_ifcs++;
vif->mac_opened = 1;
@@ -991,14 +943,10 @@ static struct net_device_stats *mac_stats(struct net_device *dev)
static void wilc_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
- struct wilc_priv *priv;
- struct host_if_drv *hif_drv;
struct wilc_vif *vif;
int i = 0;
- priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
vif = netdev_priv(dev);
- hif_drv = (struct host_if_drv *)priv->hif_drv;
if (dev->flags & IFF_PROMISC)
return;
@@ -1152,7 +1100,6 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
s8 rssi;
u32 size = 0, length = 0;
struct wilc_vif *vif;
- struct wilc_priv *priv;
s32 ret = 0;
struct wilc *wilc;
@@ -1176,7 +1123,6 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
return PTR_ERR(buff);
if (strncasecmp(buff, "RSSI", length) == 0) {
- priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
ret = wilc_get_rssi(vif, &rssi);
netdev_info(ndev, "RSSI :%d\n", rssi);
@@ -1263,8 +1209,8 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
}
vif = netdev_priv(wilc->vif[1]->ndev);
- if ((buff[0] == vif->g_struct_frame_reg[0].frame_type && vif->g_struct_frame_reg[0].reg) ||
- (buff[0] == vif->g_struct_frame_reg[1].frame_type && vif->g_struct_frame_reg[1].reg))
+ if ((buff[0] == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
+ (buff[0] == vif->frame_reg[1].type && vif->frame_reg[1].reg))
WILC_WFI_p2p_rx(wilc->vif[1]->ndev, buff, size);
}
@@ -1280,8 +1226,10 @@ void wilc_netdev_cleanup(struct wilc *wilc)
vif[i] = netdev_priv(wilc->vif[i]->ndev);
}
- if (wilc && wilc->firmware)
+ if (wilc && wilc->firmware) {
release_firmware(wilc->firmware);
+ wilc->firmware = NULL;
+ }
if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index d41b8b679..4268e2f29 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -196,9 +196,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
dev_err(&spi->dev,
"can't write data with the following length: %d\n",
len);
- dev_err(&spi->dev,
- "FAILED due to NULL buffer or ZERO length check the following length: %d\n",
- len);
ret = -EINVAL;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 448a5c8c4..51aff4ff7 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -102,7 +102,7 @@ static u8 op_ifcs;
u8 wilc_initialized = 1;
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -241,7 +241,7 @@ static void refresh_scan(void *user_void, u8 all, bool direct_scan)
struct ieee80211_channel *channel;
if (network_info) {
- freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
+ freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
rssi = get_rssi_avg(network_info);
@@ -409,7 +409,7 @@ static void CfgScanResult(enum scan_event scan_event,
return;
if (network_info) {
- s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, IEEE80211_BAND_2GHZ);
+ s32Freq = ieee80211_channel_to_frequency((s32)network_info->ch, NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, s32Freq);
if (!channel)
@@ -451,7 +451,7 @@ static void CfgScanResult(enum scan_event scan_event,
} else if (scan_event == SCAN_EVENT_DONE) {
refresh_scan(priv, 1, false);
- down(&(priv->hSemScanReq));
+ mutex_lock(&priv->scan_req_lock);
if (priv->pstrScanReq) {
cfg80211_scan_done(priv->pstrScanReq, false);
@@ -459,9 +459,9 @@ static void CfgScanResult(enum scan_event scan_event,
priv->bCfgScanning = false;
priv->pstrScanReq = NULL;
}
- up(&(priv->hSemScanReq));
+ mutex_unlock(&priv->scan_req_lock);
} else if (scan_event == SCAN_EVENT_ABORTED) {
- down(&(priv->hSemScanReq));
+ mutex_lock(&priv->scan_req_lock);
if (priv->pstrScanReq) {
update_scan_time();
@@ -471,7 +471,7 @@ static void CfgScanResult(enum scan_event scan_event,
priv->bCfgScanning = false;
priv->pstrScanReq = NULL;
}
- up(&(priv->hSemScanReq));
+ mutex_unlock(&priv->scan_req_lock);
}
}
}
@@ -558,11 +558,11 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
if (!pstrWFIDrv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
- if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
+ if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
pstrDisconnectNotifInfo->reason = 3;
- } else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
+ else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
pstrDisconnectNotifInfo->reason = 1;
- }
+
cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie,
pstrDisconnectNotifInfo->ie_len, false,
GFP_KERNEL);
@@ -739,18 +739,15 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
sme->key_idx);
} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
u8security = ENCRYPT_ENABLED | WPA2 | TKIP;
- } else {
+ else
u8security = ENCRYPT_ENABLED | WPA2 | AES;
- }
} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
u8security = ENCRYPT_ENABLED | WPA | TKIP;
- } else {
+ else
u8security = ENCRYPT_ENABLED | WPA | AES;
- }
-
} else {
s32Error = -ENOTSUPP;
netdev_err(dev, "Not supported cipher\n");
@@ -762,11 +759,10 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
|| (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) {
- if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP)
u8security = u8security | TKIP;
- } else {
+ else
u8security = u8security | AES;
- }
}
}
@@ -823,11 +819,22 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co
struct wilc_priv *priv;
struct host_if_drv *pstrWFIDrv;
struct wilc_vif *vif;
+ struct wilc *wilc;
u8 NullBssid[ETH_ALEN] = {0};
wilc_connecting = 0;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
+ wilc = vif->wilc;
+
+ if (!wilc)
+ return -EIO;
+
+ if (wilc->close) {
+ /* already disconnected done */
+ cfg80211_disconnected(dev, 0, NULL, 0, true, GFP_KERNEL);
+ return 0;
+ }
pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
if (!pstrWFIDrv->p2p_connect)
@@ -1115,9 +1122,12 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
}
if (key_index >= 0 && key_index <= 3) {
- memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
- priv->WILC_WFI_wep_key_len[key_index] = 0;
- wilc_remove_wep_key(vif, key_index);
+ if (priv->WILC_WFI_wep_key_len[key_index]) {
+ memset(priv->WILC_WFI_wep_key[key_index], 0,
+ priv->WILC_WFI_wep_key_len[key_index]);
+ priv->WILC_WFI_wep_key_len[key_index] = 0;
+ wilc_remove_wep_key(vif, key_index);
+ }
} else {
wilc_remove_key(priv->hif_drv, mac_addr);
}
@@ -1355,9 +1365,8 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
u8 channel_list_attr_index = 0;
while (index < len) {
- if (buf[index] == GO_INTENT_ATTR_ID) {
+ if (buf[index] == GO_INTENT_ATTR_ID)
buf[index + 3] = (buf[index + 3] & 0x01) | (0x00 << 1);
- }
if (buf[index] == CHANLIST_ATTR_ID)
channel_list_attr_index = index;
@@ -1369,9 +1378,8 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
if (channel_list_attr_index) {
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
- for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
+ for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
buf[j] = wlan_channel;
- }
break;
}
}
@@ -1409,9 +1417,8 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp
if (channel_list_attr_index) {
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
- for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
+ for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
buf[j] = wlan_channel;
- }
break;
}
}
@@ -1451,7 +1458,7 @@ void WILC_WFI_p2p_rx(struct net_device *dev, u8 *buff, u32 size)
return;
}
} else {
- s32Freq = ieee80211_channel_to_frequency(curr_channel, IEEE80211_BAND_2GHZ);
+ s32Freq = ieee80211_channel_to_frequency(curr_channel, NL80211_BAND_2GHZ);
if (ieee80211_is_action(buff[FRAME_TYPE_ID])) {
if (priv->bCfgScanning && time_after_eq(jiffies, (unsigned long)pstrWFIDrv->p2p_timeout)) {
@@ -1752,15 +1759,15 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
switch (frame_type) {
case PROBE_REQ:
{
- vif->g_struct_frame_reg[0].frame_type = frame_type;
- vif->g_struct_frame_reg[0].reg = reg;
+ vif->frame_reg[0].type = frame_type;
+ vif->frame_reg[0].reg = reg;
}
break;
case ACTION:
{
- vif->g_struct_frame_reg[1].frame_type = frame_type;
- vif->g_struct_frame_reg[1].reg = reg;
+ vif->frame_reg[1].type = frame_type;
+ vif->frame_reg[1].reg = reg;
}
break;
@@ -1797,6 +1804,7 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
wilc_get_rssi(vif, &sinfo->signal);
+ memcpy(mac, priv->au8AssociatedBss, ETH_ALEN);
return 0;
}
@@ -2246,7 +2254,7 @@ static struct wireless_dev *WILC_WFI_CfgAlloc(void)
WILC_WFI_band_2ghz.ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K;
WILC_WFI_band_2ghz.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = &WILC_WFI_band_2ghz;
return wdev;
@@ -2269,7 +2277,6 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
}
priv = wdev_priv(wdev);
- sema_init(&(priv->SemHandleUpdateStats), 1);
priv->wdev = wdev;
wdev->wiphy->max_scan_ssids = MAX_NUM_PROBED_SSID;
#ifdef CONFIG_PM
@@ -2315,7 +2322,7 @@ int wilc_init_host_int(struct net_device *net)
priv->bInP2PlistenState = false;
- sema_init(&(priv->hSemScanReq), 1);
+ mutex_init(&priv->scan_req_lock);
s32Error = wilc_init(net, &priv->hif_drv);
if (s32Error)
netdev_err(net, "Error while initializing hostinterface\n");
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 4123cffe3..3a561df6d 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -130,8 +130,7 @@ struct wilc_priv {
struct wilc_wfi_key *wilc_ptk[MAX_NUM_STA];
u8 wilc_groupkey;
/* semaphores */
- struct semaphore SemHandleUpdateStats;
- struct semaphore hSemScanReq;
+ struct mutex scan_req_lock;
/* */
bool gbAutoRateAdjusted;
@@ -139,18 +138,17 @@ struct wilc_priv {
};
-typedef struct {
- u16 frame_type;
+struct frame_reg {
+ u16 type;
bool reg;
-
-} struct_frame_reg;
+};
struct wilc_vif {
u8 idx;
u8 iftype;
int monitor_flag;
int mac_opened;
- struct_frame_reg g_struct_frame_reg[num_reg_frame];
+ struct frame_reg frame_reg[num_reg_frame];
struct net_device_stats netstats;
struct wilc *wilc;
u8 src_addr[ETH_ALEN];
@@ -181,8 +179,7 @@ struct wilc {
struct semaphore cfg_event;
struct semaphore sync_event;
struct semaphore txq_event;
-
- struct semaphore txq_thread_started;
+ struct completion txq_thread_started;
struct task_struct *txq_thread;
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index fd938fb43..11e16d56a 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -150,11 +150,6 @@ static u32 pending_base;
static u32 tcp_session;
static u32 pending_acks;
-static inline int init_tcp_tracking(void)
-{
- return 0;
-}
-
static inline int add_tcp_session(u32 src_prt, u32 dst_prt, u32 seq)
{
if (tcp_session < 2 * MAX_TCP_SESSION) {
@@ -330,8 +325,11 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
tqe->priv = NULL;
tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
- if (wilc_wlan_txq_add_to_head(vif, tqe))
+ if (wilc_wlan_txq_add_to_head(vif, tqe)) {
+ kfree(tqe);
return 0;
+ }
+
return 1;
}
@@ -626,13 +624,12 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if ((reg & 0x1) == 0) {
break;
- } else {
- counter++;
- if (counter > 200) {
- counter = 0;
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
- break;
- }
+ }
+ counter++;
+ if (counter > 200) {
+ counter = 0;
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
+ break;
}
} while (!wilc->quit);
@@ -658,9 +655,8 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if ((reg >> 2) & 0x1) {
entries = ((reg >> 3) & 0x3f);
break;
- } else {
- release_bus(wilc, RELEASE_ALLOW_SLEEP);
}
+ release_bus(wilc, RELEASE_ALLOW_SLEEP);
} while (--timeout);
if (timeout <= 0) {
ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0);
@@ -679,9 +675,8 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (!ret)
break;
break;
- } else {
- break;
}
+ break;
} while (1);
if (!ret)
@@ -900,8 +895,6 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
DATA_INT_CLR | ENABLE_RX_VMM);
ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
- if (!ret)
- goto _end_;
_end_:
if (ret) {
offset += size;
@@ -951,10 +944,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
blksz = BIT(12);
dma_buffer = kmalloc(blksz, GFP_KERNEL);
- if (!dma_buffer) {
- ret = -EIO;
- goto _fail_1;
- }
+ if (!dma_buffer)
+ return -EIO;
offset = 0;
do {
@@ -992,8 +983,6 @@ _fail_:
kfree(dma_buffer);
-_fail_1:
-
return (ret < 0) ? ret : 0;
}
@@ -1211,7 +1200,7 @@ static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type,
return 0;
}
-int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler)
{
u32 offset;
@@ -1226,7 +1215,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
offset = wilc->cfg_frame_offset;
ret_size = wilc_wlan_cfg_set_wid(wilc->cfg_frame.frame, offset,
- (u16)wid, buffer, buffer_size);
+ wid, buffer, buffer_size);
offset += ret_size;
wilc->cfg_frame_offset = offset;
@@ -1253,7 +1242,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
return ret_size;
}
-int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
u32 drv_handler)
{
u32 offset;
@@ -1267,8 +1256,7 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
wilc->cfg_frame_offset = 0;
offset = wilc->cfg_frame_offset;
- ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset,
- (u16)wid);
+ ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset, wid);
offset += ret_size;
wilc->cfg_frame_offset = offset;
@@ -1291,9 +1279,9 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
return ret_size;
}
-int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size)
+int wilc_wlan_cfg_get_val(u16 wid, u8 *buffer, u32 buffer_size)
{
- return wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size);
+ return wilc_wlan_cfg_get_wid_value(wid, buffer, buffer_size);
}
int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
@@ -1440,7 +1428,6 @@ int wilc_wlan_init(struct net_device *dev)
ret = -EIO;
goto _fail_;
}
- init_tcp_tracking();
return 1;
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index bcd4bfa5a..30e5312ee 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -284,11 +284,11 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count);
void wilc_handle_isr(struct wilc *wilc);
void wilc_wlan_cleanup(struct net_device *dev);
-int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler);
-int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
u32 drv_handler);
-int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size);
+int wilc_wlan_cfg_get_val(u16 wid, u8 *buffer, u32 buffer_size);
int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
u32 buffer_size, wilc_tx_complete_func_t func);
void wilc_chip_sleep_manually(struct wilc *wilc);
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index b3425b9ce..926fc1631 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -230,7 +230,7 @@ static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str, u32 siz
buf[1] = (u8)(id >> 8);
buf[2] = (u8)size;
- if ((str != NULL) && (size != 0))
+ if ((str) && (size != 0))
memcpy(&buf[3], str, size);
return (size + 3);
@@ -251,11 +251,10 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
buf[2] = (u8)size;
buf[3] = (u8)(size >> 8);
- if ((b != NULL) && (size != 0)) {
+ if ((b) && (size != 0)) {
memcpy(&buf[4], b, size);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < size; i++)
checksum += buf[i + 4];
- }
}
buf[size + 4] = checksum;
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 83cf84dd6..410bfc034 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -15,18 +15,6 @@
/********************************************
*
- * Debug Flags
- *
- ********************************************/
-
-#define N_INIT 0x00000001
-#define N_ERR 0x00000002
-#define N_TXQ 0x00000004
-#define N_INTR 0x00000008
-#define N_RXQ 0x00000010
-
-/********************************************
- *
* Host Interface Defines
*
********************************************/
@@ -37,15 +25,6 @@
/********************************************
*
- * Tx/Rx Buffer Size Defines
- *
- ********************************************/
-
-#define CE_TX_BUFFER_SIZE (64 * 1024)
-#define CE_RX_BUFFER_SIZE (384 * 1024)
-
-/********************************************
- *
* Wlan Interface Defines
*
********************************************/
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 8bad018ed..a6e6fb9f4 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -415,7 +415,7 @@ static int prism2_scan(struct wiphy *wiphy,
ie_len = ie_buf[1] + 2;
memcpy(&ie_buf[2], &(msg2.ssid.data.data), msg2.ssid.data.len);
freq = ieee80211_channel_to_frequency(msg2.dschannel.data,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
bss = cfg80211_inform_bss(wiphy,
ieee80211_get_channel(wiphy, freq),
CFG80211_BSS_FTYPE_UNKNOWN,
@@ -758,9 +758,9 @@ static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev
priv->band.n_channels = ARRAY_SIZE(prism2_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = ARRAY_SIZE(prism2_rates);
- priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.band = NL80211_BAND_2GHZ;
priv->band.ht_cap.ht_supported = false;
- wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
set_wiphy_dev(wiphy, dev);
wiphy->privid = prism2_wiphy_privid;
@@ -771,8 +771,10 @@ static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev
wiphy->n_cipher_suites = PRISM2_NUM_CIPHER_SUITES;
wiphy->cipher_suites = prism2_cipher_suites;
- if (wiphy_register(wiphy) < 0)
+ if (wiphy_register(wiphy) < 0) {
+ wiphy_free(wiphy);
return NULL;
+ }
return wiphy;
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 21a92df85..337810750 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -614,7 +614,7 @@ static hfa384x_usbctlx_t *usbctlx_alloc(void)
ctlx = kzalloc(sizeof(*ctlx),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
- if (ctlx != NULL)
+ if (ctlx)
init_completion(&ctlx->done);
return ctlx;
@@ -797,7 +797,7 @@ static inline struct usbctlx_completor *init_rmem_completor(
----------------------------------------------------------------*/
static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
{
- if (ctlx->usercb != NULL) {
+ if (ctlx->usercb) {
hfa384x_cmdresult_t cmdresult;
if (ctlx->state != CTLX_COMPLETE) {
@@ -2738,7 +2738,7 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
/* Call the completion function that this
* command was assigned, assuming it has one.
*/
- if (ctlx->cmdcb != NULL) {
+ if (ctlx->cmdcb) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
ctlx->cmdcb(hw, ctlx);
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3629,7 +3629,7 @@ static void hfa384x_ctlxout_callback(struct urb *urb)
dbprint_urb(urb);
#endif
if ((urb->status == -ESHUTDOWN) ||
- (urb->status == -ENODEV) || (hw == NULL))
+ (urb->status == -ENODEV) || !hw)
return;
retry:
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 0a8f3960d..6354036ff 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -75,8 +75,8 @@
#include "p80211ioctl.h"
#include "p80211req.h"
-static u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
-static u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
+static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
+static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
/*----------------------------------------------------------------
* p80211pb_ether_to_80211
@@ -243,7 +243,6 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
for (i = 0; i < wlandev->spy_number; i++) {
if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) {
- memcpy(wlandev->spy_address[i], mac, ETH_ALEN);
wlandev->spy_stat[i].level = rxmeta->signal;
wlandev->spy_stat[i].noise = rxmeta->noise;
wlandev->spy_stat[i].qual =
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 88255ce28..90cc8cdcf 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -156,7 +156,7 @@ static int p80211knetdev_open(netdevice_t *netdev)
return -ENODEV;
/* Tell the MSD to open */
- if (wlandev->open != NULL) {
+ if (wlandev->open) {
result = wlandev->open(wlandev);
if (result == 0) {
netif_start_queue(wlandev->netdev);
@@ -186,7 +186,7 @@ static int p80211knetdev_stop(netdevice_t *netdev)
int result = 0;
wlandevice_t *wlandev = netdev->ml_priv;
- if (wlandev->close != NULL)
+ if (wlandev->close)
result = wlandev->close(wlandev);
netif_stop_queue(wlandev->netdev);
@@ -393,7 +393,7 @@ static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
goto failed;
}
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
netdev->stats.tx_packets++;
/* count only the packet payload */
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 810ee68aa..820a0e20a 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -158,7 +158,6 @@ extern int wlan_wext_write;
/* WLAN device type */
typedef struct wlandevice {
- struct wlandevice *next; /* link for list of devices */
void *priv; /* private data for MSD */
/* Subsystem State */
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 301457102..8233bf7af 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -278,7 +278,8 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
/* Build the PDA we're going to use. */
if (read_cardpda(&pda, wlandev)) {
netdev_err(wlandev->netdev, "load_cardpda failed, exiting.\n");
- return 1;
+ result = 1;
+ goto out;
}
/* read the card's PRI-SUP */
@@ -315,55 +316,58 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
if (result) {
netdev_err(wlandev->netdev,
"Failed to read the data exiting.\n");
- return 1;
+ goto out;
}
result = validate_identity();
-
if (result) {
netdev_err(wlandev->netdev, "Incompatible firmware image.\n");
- return 1;
+ goto out;
}
if (startaddr == 0x00000000) {
netdev_err(wlandev->netdev,
"Can't RAM download a Flash image!\n");
- return 1;
+ result = 1;
+ goto out;
}
/* Make the image chunks */
result = mkimage(fchunk, &nfchunks);
if (result) {
netdev_err(wlandev->netdev, "Failed to make image chunk.\n");
- return 1;
+ goto free_chunks;
}
/* Do any plugging */
result = plugimage(fchunk, nfchunks, s3plug, ns3plug, &pda);
if (result) {
netdev_err(wlandev->netdev, "Failed to plug data.\n");
- return 1;
+ goto free_chunks;
}
/* Insert any CRCs */
- if (crcimage(fchunk, nfchunks, s3crc, ns3crc)) {
+ result = crcimage(fchunk, nfchunks, s3crc, ns3crc);
+ if (result) {
netdev_err(wlandev->netdev, "Failed to insert all CRCs\n");
- return 1;
+ goto free_chunks;
}
/* Write the image */
result = writeimage(wlandev, fchunk, nfchunks);
if (result) {
netdev_err(wlandev->netdev, "Failed to ramwrite image data.\n");
- return 1;
+ goto free_chunks;
}
+ netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n");
+
+free_chunks:
/* clear any allocated memory */
free_chunks(fchunk, &nfchunks);
free_srecs();
- netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n");
-
+out:
return result;
}
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 41358bbc6..b26d09ff8 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -8,7 +8,7 @@
{ USB_DEVICE(vid, pid), \
.driver_info = (unsigned long)name }
-static struct usb_device_id usb_prism_tbl[] = {
+static const struct usb_device_id usb_prism_tbl[] = {
PRISM_DEV(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS"),
PRISM_DEV(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11"),
PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter"),
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 7eadf922b..d56ef1425 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1130,8 +1130,9 @@ static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var)
return (var->bits_per_pixel == 8) ? 256 : 16;
}
-static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp, struct fb_info *info)
+static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int transp, struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 26b539bc6..062ece22e 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -355,7 +355,8 @@ static void XGINew_DDR2_DefaultRegister(
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
/* keep following setting sequence, each setting in
- * the same reg insert idle */
+ * the same reg insert idle
+ */
xgifb_reg_set(P3d4, 0x82, 0x77);
xgifb_reg_set(P3d4, 0x86, 0x00);
xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
@@ -551,7 +552,8 @@ static int XGINew_ReadWriteRest(unsigned short StopAddr,
writel(Position, fbaddr + Position);
}
- usleep_range(500, 1500); /* Fix #1759 Memory Size error in Multi-Adapter. */
+ /* Fix #1759 Memory Size error in Multi-Adapter. */
+ usleep_range(500, 1500);
Position = 0;
@@ -699,11 +701,11 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
break;
case XG42:
/*
- XG42 SR14 D[3] Reserve
- D[2] = 1, Dual Channel
- = 0, Single Channel
-
- It's Different from Other XG40 Series.
+ * XG42 SR14 D[3] Reserve
+ * D[2] = 1, Dual Channel
+ * = 0, Single Channel
+ *
+ * It's Different from Other XG40 Series.
*/
if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */
pVBInfo->ram_bus = 32; /* 32 bits */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index f97c77d88..50c8ea4f5 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -108,9 +108,9 @@ static void XGI_SetATTRegs(unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
ARdata = 0;
} else if ((pVBInfo->VBInfo &
- (SetCRT2ToTV | SetCRT2ToLCD)) &&
- (pVBInfo->VBInfo & SetInSlaveMode)) {
- ARdata = 0;
+ (SetCRT2ToTV | SetCRT2ToLCD)) &&
+ (pVBInfo->VBInfo & SetInSlaveMode)) {
+ ARdata = 0;
}
}
@@ -1992,7 +1992,8 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
}
/* LCD+TV can't support in slave mode
- * (Force LCDA+TV->LCDB) */
+ * (Force LCDA+TV->LCDB)
+ */
if ((tempbx & SetInSlaveMode) && (tempbx & XGI_SetCRT2ToLCDA)) {
tempbx ^= (SetCRT2ToLCD | XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge);
@@ -2983,7 +2984,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if ((pVBInfo->VBInfo & SetCRT2ToHiVision) &&
!(pVBInfo->VBType & VB_SIS301LV) && (resinfo == 7))
- temp -= 2;
+ temp -= 2;
}
/* 0x05 Horizontal Display Start */
@@ -3450,8 +3451,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->TVInfo &
(TVSetYPbPr525p | TVSetYPbPr750p)))
tempbx >>= 1;
- } else
+ } else {
tempbx >>= 1;
+ }
}
tempbx -= 2;
@@ -3839,9 +3841,9 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
if (pVBInfo->VGAVDE == 525) {
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C)) {
+ | VB_XGI301C))
temp = 0xC6;
- } else
+ else
temp = 0xC4;
xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
@@ -3851,9 +3853,9 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
if (pVBInfo->VGAVDE == 420) {
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C)) {
+ | VB_XGI301C))
temp = 0x4F;
- } else
+ else
temp = 0x4E;
xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
}
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 45f2c992c..c801deb14 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -58,8 +58,9 @@ static const unsigned char XGI27_cr41[24][3] = {
{0xC4, 0x40, 0x84}, /* 1 CR8A */
{0xC4, 0x40, 0x84}, /* 2 CR8B */
{0xB3, 0x13, 0xa4}, /* 3 CR40[7],
- CR99[2:0],
- CR45[3:0]*/
+ * CR99[2:0],
+ * CR45[3:0]
+ */
{0xf0, 0xf5, 0xf0}, /* 4 CR59 */
{0x90, 0x90, 0x24}, /* 5 CR68 */
{0x77, 0x67, 0x44}, /* 6 CR69 */
@@ -101,9 +102,11 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
{0x38, 0x0a1b, 0x0508, 0x08, 0x00, 0x16},
{0x3a, 0x0e3b, 0x0609, 0x09, 0x00, 0x1e},
{0x3c, 0x0e3b, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE [2003/10/07] */
+ * add CRT2MODE [2003/10/07]
+ */
{0x3d, 0x0e7d, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE */
+ * add CRT2MODE
+ */
{0x40, 0x9a1c, 0x0000, 0x00, 0x04, 0x00},
{0x41, 0x9a1d, 0x0000, 0x00, 0x04, 0x00},
{0x43, 0x0a1c, 0x0306, 0x06, 0x05, 0x06},
@@ -129,7 +132,8 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
{0x64, 0x0a7f, 0x0508, 0x08, 0x00, 0x16},
{0x65, 0x0eff, 0x0609, 0x09, 0x00, 0x1e},
{0x66, 0x0eff, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE */
+ * add CRT2MODE
+ */
{0x68, 0x067b, 0x080b, 0x0b, 0x00, 0x29},
{0x69, 0x06fd, 0x080b, 0x0b, 0x00, 0x29},
{0x6b, 0x07ff, 0x080b, 0x0b, 0x00, 0x29},
@@ -223,38 +227,38 @@ const struct XGI_CRT1TableStruct XGI_CRT1Table[] = {
0x0D, 0x3E, 0xE0, 0x83, 0xDF, 0x0E, 0x90} }, /* 0xb */
{ {0x65, 0x4F, 0x89, 0x57, 0x9F, 0x00, 0x01, 0x00,
0xFB, 0x1F, 0xE6, 0x8A, 0xDF, 0xFC, 0x10} }, /* 0xc */
- { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00, /* ;
- 0D (800x600,56Hz) */
- 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} }, /* ;
- (VCLK 36.0MHz) */
- { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00, /* ;
- 0E (800x600,60Hz) */
- 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} }, /* ;
- (VCLK 40.0MHz) */
- { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00, /* ;
- 0F (800x600,72Hz) */
- 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} }, /* ;
- (VCLK 50.0MHz) */
- { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00, /* ;
- 10 (800x600,75Hz) */
- 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} }, /* ;
- (VCLK 49.5MHz) */
- { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00, /* ;
- 11 (800x600,85Hz) */
- 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} }, /* ;
- (VCLK 56.25MHz) */
- { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60, /* ;
- 12 (800x600,100Hz) */
- 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} }, /* ;
- (VCLK 75.8MHz) */
- { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60, /* ;
- 13 (800x600,120Hz) */
- 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} }, /* ;
- (VCLK 79.411MHz) */
- { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60, /* ;
- 14 (800x600,160Hz) */
- 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} }, /* ;
- (VCLK 105.822MHz) */
+ /* 0D (800x600,56Hz) */
+ { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00,
+ /* (VCLK 36.0MHz) */
+ 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} },
+ /* 0E (800x600,60Hz) */
+ { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00,
+ /* (VCLK 40.0MHz) */
+ 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} },
+ /* 0F (800x600,72Hz) */
+ { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00,
+ /* (VCLK 50.0MHz) */
+ 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} },
+ /* 10 (800x600,75Hz) */
+ { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00,
+ /* (VCLK 49.5MHz) */
+ 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} },
+ /* 11 (800x600,85Hz) */
+ { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00,
+ /* (VCLK 56.25MHz) */
+ 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} },
+ /* 12 (800x600,100Hz) */
+ { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60,
+ /* (VCLK 75.8MHz) */
+ 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} },
+ /* 13 (800x600,120Hz) */
+ { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60,
+ /* (VCLK 79.411MHz) */
+ 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} },
+ /* 14 (800x600,160Hz) */
+ { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60,
+ /* (VCLK 105.822MHz) */
+ 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} },
{ {0x99, 0x7F, 0x9D, 0x84, 0x1A, 0x00, 0x02, 0x00,
0x96, 0x1F, 0x7F, 0x83, 0x7F, 0x97, 0x10} }, /* 0x15 */
{ {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00,
@@ -388,7 +392,8 @@ static const struct SiS_LCDData XGI_ExtLCD1024x768Data[] = {
static const struct SiS_LCDData XGI_CetLCD1024x768Data[] = {
{1, 1, 1344, 806, 1344, 806}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1344, 806, 1344, 806}, /* 01 (320x350,640x350) */
{1, 1, 1344, 806, 1344, 806}, /* 02 (360x400,720x400) */
{1, 1, 1344, 806, 1344, 806}, /* 03 (720x350) */
@@ -421,7 +426,8 @@ static const struct SiS_LCDData XGI_ExtLCD1280x1024Data[] = {
static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
{1, 1, 1688, 1066, 1688, 1066}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1688, 1066, 1688, 1066}, /* 01 (320x350,640x350) */
{1, 1, 1688, 1066, 1688, 1066}, /* 02 (360x400,720x400) */
{1, 1, 1688, 1066, 1688, 1066}, /* 03 (720x350) */
@@ -434,7 +440,8 @@ static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
{211, 100, 2100, 408, 1688, 1066}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{211, 64, 1536, 358, 1688, 1066}, /* 01 (320x350,640x350) */
{211, 100, 2100, 408, 1688, 1066}, /* 02 (360x400,720x400) */
{211, 64, 1536, 358, 1688, 1066}, /* 03 (720x350) */
@@ -442,13 +449,15 @@ static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
{211, 72, 1008, 609, 1688, 1066}, /* 05 (800x600x60Hz) */
{211, 128, 1400, 776, 1688, 1066}, /* 06 (1024x768x60Hz) */
{1, 1, 1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz
- w/o Scaling) */
+ * w/o Scaling)
+ */
{1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */
};
static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
{4, 1, 1620, 420, 2160, 1250}, /* 00 (320x200,320x400,
- 640x200,640x400)*/
+ * 640x200,640x400)
+ */
{27, 7, 1920, 375, 2160, 1250}, /* 01 (320x350,640x350) */
{4, 1, 1620, 420, 2160, 1250}, /* 02 (360x400,720x400)*/
{27, 7, 1920, 375, 2160, 1250}, /* 03 (720x350) */
@@ -462,7 +471,8 @@ static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
static const struct SiS_LCDData XGI_StLCD1600x1200Data[] = {
{27, 4, 800, 500, 2160, 1250}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{27, 4, 800, 500, 2160, 1250}, /* 01 (320x350,640x350) */
{27, 4, 800, 500, 2160, 1250}, /* 02 (360x400,720x400) */
{27, 4, 800, 500, 2160, 1250}, /* 03 (720x350) */
@@ -489,7 +499,8 @@ static const struct SiS_LCDData XGI_NoScalingData[] = {
static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
{42, 25, 1536, 419, 1344, 806}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{48, 25, 1536, 369, 1344, 806}, /* ; 01 (320x350,640x350) */
{42, 25, 1536, 419, 1344, 806}, /* ; 02 (360x400,720x400) */
{48, 25, 1536, 369, 1344, 806}, /* ; 03 (720x350) */
@@ -500,7 +511,8 @@ static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
{1, 1, 1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */
{1, 1, 1312, 800, 1312, 800}, /* ; 02 (360x400,720x400) */
{1, 1, 1312, 800, 1312, 800}, /* ; 03 (720x350) */
@@ -511,7 +523,8 @@ static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
{211, 60, 1024, 501, 1688, 1066}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{211, 60, 1024, 508, 1688, 1066}, /* ; 01 (320x350,640x350) */
{211, 60, 1024, 501, 1688, 1066}, /* ; 02 (360x400,720x400) */
{211, 60, 1024, 508, 1688, 1066}, /* ; 03 (720x350) */
@@ -525,7 +538,8 @@ static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
static const struct SiS_LCDData XGI_NoScalingDatax75[] = {
{1, 1, 800, 449, 800, 449}, /* ; 00 (320x200, 320x400,
- 640x200, 640x400) */
+ * 640x200, 640x400)
+ */
{1, 1, 800, 449, 800, 449}, /* ; 01 (320x350, 640x350) */
{1, 1, 900, 449, 900, 449}, /* ; 02 (360x400, 720x400) */
{1, 1, 900, 449, 900, 449}, /* ; 03 (720x350) */
@@ -732,7 +746,8 @@ static const struct XGI_LCDDesStruct XGI_StLCDDes1600x1200Data[] = {
static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[] = {
{9, 657, 448, 405, 96, 2}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{9, 657, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
{9, 657, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
{9, 657, 448, 355, 96, 2}, /* 03 (720x350) */
@@ -818,7 +833,8 @@ static const struct XGI_LCDDesStruct XGI_CetLCDDes1280x1024x75Data[] = {
/* Scaling LCD 75Hz */
static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[] = {
{9, 657, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{9, 657, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
{9, 738, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
{9, 738, 448, 355, 108, 2}, /* ; 03 (720x350) */
@@ -873,7 +889,8 @@ static const struct SiS_TVData XGI_ExtNTSCData[] = {
static const struct SiS_TVData XGI_St1HiTVData[] = {
{1, 1, 892, 563, 690, 800, 0, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
{1, 1, 1000, 563, 785, 800, 0, 0, 0}, /* 02 (360x400,720x400) */
{1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
@@ -883,7 +900,8 @@ static const struct SiS_TVData XGI_St1HiTVData[] = {
static const struct SiS_TVData XGI_St2HiTVData[] = {
{3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
{3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 02 (360x400,720x400) */
{1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
@@ -893,7 +911,8 @@ static const struct SiS_TVData XGI_St2HiTVData[] = {
static const struct SiS_TVData XGI_ExtHiTVData[] = {
{6, 1, 840, 563, 1632, 960, 0, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 01 (320x350,640x350) */
{3, 1, 840, 483, 1632, 960, 0, 0, 0}, /* 02 (360x400,720x400) */
{3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 03 (720x350) */
@@ -948,7 +967,8 @@ static const struct SiS_TVData XGI_StYPbPr525pData[] = {
static const struct SiS_TVData XGI_ExtYPbPr750pData[] = {
{ 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 01 (320x350,640x350) */
{ 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 02 (360x400,720x400) */
{24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 03 (720x350) */
@@ -1269,7 +1289,8 @@ static const struct SiS_LVDSData XGI_LVDSNoScalingDatax75[] = {
{1312, 800, 1312, 800}, /* ; 06 (1024x768x75Hz) */
{1688, 1066, 1688, 1066}, /* ; 07 (1280x1024x75Hz) */
{1688, 1066, 1688, 1066}, /* ; 08 (1400x1050x75Hz)
- ;;[ycchen] 12/19/02 */
+ * ;;[ycchen] 12/19/02
+ */
{2160, 1250, 2160, 1250}, /* ; 09 (1600x1200x75Hz) */
{1688, 806, 1688, 806}, /* ; 0A (1280x768x75Hz) */
};
@@ -1364,7 +1385,8 @@ static const struct SiS_LVDSData XGI_LVDS1600x1200Des_1[] = {
static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[] = {
{0, 648, 448, 405, 96, 2}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{0, 648, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
{0, 648, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
{0, 648, 448, 355, 96, 2}, /* 03 (720x350) */
@@ -1435,7 +1457,8 @@ static const struct SiS_LVDSData XGI_LVDS1280x1024Des_2x75[] = {
/* Scaling LCD 75Hz */
static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[] = {
{0, 648, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{0, 648, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
{0, 729, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
{0, 729, 448, 355, 108, 2}, /* ; 03 (720x350) */
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index f613f54d5..08db58b39 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -13,7 +13,7 @@ static inline u8 xgifb_reg_get(unsigned long port, u8 index)
}
static inline void xgifb_reg_and_or(unsigned long port, u8 index,
- unsigned data_and, unsigned data_or)
+ unsigned int data_and, unsigned int data_or)
{
u8 temp;
@@ -22,7 +22,8 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index,
xgifb_reg_set(port, index, temp);
}
-static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and)
+static inline void xgifb_reg_and(unsigned long port, u8 index,
+ unsigned int data_and)
{
u8 temp;
@@ -31,7 +32,8 @@ static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and
xgifb_reg_set(port, index, temp);
}
-static inline void xgifb_reg_or(unsigned long port, u8 index, unsigned data_or)
+static inline void xgifb_reg_or(unsigned long port, u8 index,
+ unsigned int data_or)
{
u8 temp;
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 8345fb457..bbdbf9c4e 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -7,3 +7,5 @@ config ISCSI_TARGET
help
Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
Target Mode Stack.
+
+source "drivers/target/iscsi/cxgbit/Kconfig"
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 0f43be9c3..0f18295e0 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -18,3 +18,4 @@ iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_transport.o
obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
+obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit/
diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig
new file mode 100644
index 000000000..c9b6a3c75
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Kconfig
@@ -0,0 +1,7 @@
+config ISCSI_TARGET_CXGB4
+ tristate "Chelsio iSCSI target offload driver"
+ depends on ISCSI_TARGET && CHELSIO_T4 && INET
+ select CHELSIO_T4_UWIRE
+ ---help---
+ To compile this driver as module, choose M here: the module
+ will be called cxgbit.
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile
new file mode 100644
index 000000000..bd56c073d
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Makefile
@@ -0,0 +1,6 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -Idrivers/target/iscsi
+
+obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o
+
+cxgbit-y := cxgbit_main.o cxgbit_cm.o cxgbit_target.o cxgbit_ddp.o
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
new file mode 100644
index 000000000..625c7f6de
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CXGBIT_H__
+#define __CXGBIT_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/inet.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+
+#include <net/net_namespace.h>
+
+#include <target/iscsi/iscsi_transport.h>
+#include <iscsi_target_parameters.h>
+#include <iscsi_target_login.h>
+
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "cxgb4_ppm.h"
+#include "cxgbit_lro.h"
+
+extern struct mutex cdev_list_lock;
+extern struct list_head cdev_list_head;
+struct cxgbit_np;
+
+struct cxgbit_sock;
+
+struct cxgbit_cmd {
+ struct scatterlist sg;
+ struct cxgbi_task_tag_info ttinfo;
+ bool setup_ddp;
+ bool release;
+};
+
+#define CXGBIT_MAX_ISO_PAYLOAD \
+ min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
+
+struct cxgbit_iso_info {
+ u8 flags;
+ u32 mpdu;
+ u32 len;
+ u32 burst_len;
+};
+
+enum cxgbit_skcb_flags {
+ SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */
+ SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */
+ SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */
+ SKCBF_RX_LRO = (1 << 3), /* lro skb */
+};
+
+struct cxgbit_skb_rx_cb {
+ u8 opcode;
+ void *pdu_cb;
+ void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
+};
+
+struct cxgbit_skb_tx_cb {
+ u8 submode;
+ u32 extra_len;
+};
+
+union cxgbit_skb_cb {
+ struct {
+ u8 flags;
+ union {
+ struct cxgbit_skb_tx_cb tx;
+ struct cxgbit_skb_rx_cb rx;
+ };
+ };
+
+ struct {
+ /* This member must be first. */
+ struct l2t_skb_cb l2t;
+ struct sk_buff *wr_next;
+ };
+};
+
+#define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
+#define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
+#define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
+#define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
+#define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
+#define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
+#define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
+#define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+enum cxgbit_cdev_flags {
+ CDEV_STATE_UP = 0,
+ CDEV_ISO_ENABLE,
+ CDEV_DDP_ENABLE,
+};
+
+#define NP_INFO_HASH_SIZE 32
+
+struct np_info {
+ struct np_info *next;
+ struct cxgbit_np *cnp;
+ unsigned int stid;
+};
+
+struct cxgbit_list_head {
+ struct list_head list;
+ /* device lock */
+ spinlock_t lock;
+};
+
+struct cxgbit_device {
+ struct list_head list;
+ struct cxgb4_lld_info lldi;
+ struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
+ /* np lock */
+ spinlock_t np_lock;
+ u8 selectq[MAX_NPORTS][2];
+ struct cxgbit_list_head cskq;
+ u32 mdsl;
+ struct kref kref;
+ unsigned long flags;
+};
+
+struct cxgbit_wr_wait {
+ struct completion completion;
+ int ret;
+};
+
+enum cxgbit_csk_state {
+ CSK_STATE_IDLE = 0,
+ CSK_STATE_LISTEN,
+ CSK_STATE_CONNECTING,
+ CSK_STATE_ESTABLISHED,
+ CSK_STATE_ABORTING,
+ CSK_STATE_CLOSING,
+ CSK_STATE_MORIBUND,
+ CSK_STATE_DEAD,
+};
+
+enum cxgbit_csk_flags {
+ CSK_TX_DATA_SENT = 0,
+ CSK_LOGIN_PDU_DONE,
+ CSK_LOGIN_DONE,
+ CSK_DDP_ENABLE,
+};
+
+struct cxgbit_sock_common {
+ struct cxgbit_device *cdev;
+ struct sockaddr_storage local_addr;
+ struct sockaddr_storage remote_addr;
+ struct cxgbit_wr_wait wr_wait;
+ enum cxgbit_csk_state state;
+ unsigned long flags;
+};
+
+struct cxgbit_np {
+ struct cxgbit_sock_common com;
+ wait_queue_head_t accept_wait;
+ struct iscsi_np *np;
+ struct completion accept_comp;
+ struct list_head np_accept_list;
+ /* np accept lock */
+ spinlock_t np_accept_lock;
+ struct kref kref;
+ unsigned int stid;
+};
+
+struct cxgbit_sock {
+ struct cxgbit_sock_common com;
+ struct cxgbit_np *cnp;
+ struct iscsi_conn *conn;
+ struct l2t_entry *l2t;
+ struct dst_entry *dst;
+ struct list_head list;
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct sk_buff_head ppodq;
+ struct sk_buff_head backlogq;
+ struct sk_buff_head skbq;
+ struct sk_buff *wr_pending_head;
+ struct sk_buff *wr_pending_tail;
+ struct sk_buff *skb;
+ struct sk_buff *lro_skb;
+ struct sk_buff *lro_hskb;
+ struct list_head accept_node;
+ /* socket lock */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ wait_queue_head_t ack_waitq;
+ bool lock_owner;
+ struct kref kref;
+ u32 max_iso_npdu;
+ u32 wr_cred;
+ u32 wr_una_cred;
+ u32 wr_max_cred;
+ u32 snd_una;
+ u32 tid;
+ u32 snd_nxt;
+ u32 rcv_nxt;
+ u32 smac_idx;
+ u32 tx_chan;
+ u32 mtu;
+ u32 write_seq;
+ u32 rx_credits;
+ u32 snd_win;
+ u32 rcv_win;
+ u16 mss;
+ u16 emss;
+ u16 plen;
+ u16 rss_qid;
+ u16 txq_idx;
+ u16 ctrlq_idx;
+ u8 tos;
+ u8 port_id;
+#define CXGBIT_SUBMODE_HCRC 0x1
+#define CXGBIT_SUBMODE_DCRC 0x2
+ u8 submode;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 dcb_priority;
+#endif
+ u8 snd_wscale;
+};
+
+void _cxgbit_free_cdev(struct kref *kref);
+void _cxgbit_free_csk(struct kref *kref);
+void _cxgbit_free_cnp(struct kref *kref);
+
+static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
+{
+ kref_get(&cdev->kref);
+}
+
+static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
+{
+ kref_put(&cdev->kref, _cxgbit_free_cdev);
+}
+
+static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
+{
+ kref_get(&csk->kref);
+}
+
+static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
+{
+ kref_put(&csk->kref, _cxgbit_free_csk);
+}
+
+static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
+{
+ kref_get(&cnp->kref);
+}
+
+static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
+{
+ kref_put(&cnp->kref, _cxgbit_free_cnp);
+}
+
+static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
+{
+ csk->wr_pending_tail = NULL;
+ csk->wr_pending_head = NULL;
+}
+
+static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
+{
+ return csk->wr_pending_head;
+}
+
+static inline void
+cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_skcb_tx_wr_next(skb) = NULL;
+
+ skb_get(skb);
+
+ if (!csk->wr_pending_head)
+ csk->wr_pending_head = skb;
+ else
+ cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
+ csk->wr_pending_tail = skb;
+}
+
+static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = csk->wr_pending_head;
+
+ if (likely(skb)) {
+ csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
+ cxgbit_skcb_tx_wr_next(skb) = NULL;
+ }
+ return skb;
+}
+
+typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
+ struct sk_buff *);
+
+int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
+int cxgbit_setup_conn_digest(struct cxgbit_sock *);
+int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_free_conn(struct iscsi_conn *);
+extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
+int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+int cxgbit_rx_data_ack(struct cxgbit_sock *);
+int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
+ struct l2t_entry *);
+void cxgbit_push_tx_frames(struct cxgbit_sock *);
+int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
+ struct iscsi_datain_req *, const void *, u32);
+void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
+ struct iscsi_r2t *);
+u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
+int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
+void cxgbit_get_rx_pdu(struct iscsi_conn *);
+int cxgbit_validate_params(struct iscsi_conn *);
+struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
+
+/* DDP */
+int cxgbit_ddp_init(struct cxgbit_device *);
+int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
+int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
+void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *);
+
+static inline
+struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
+{
+ return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
+}
+#endif /* __CXGBIT_H__ */
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
new file mode 100644
index 000000000..0ae0b131a
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -0,0 +1,2086 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/route.h>
+#include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
+#include "cxgbit.h"
+#include "clip_tbl.h"
+
+static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
+{
+ wr_waitp->ret = 0;
+ reinit_completion(&wr_waitp->completion);
+}
+
+static void
+cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
+{
+ if (ret == CPL_ERR_NONE)
+ wr_waitp->ret = 0;
+ else
+ wr_waitp->ret = -EIO;
+
+ if (wr_waitp->ret)
+ pr_err("%s: err:%u", func, ret);
+
+ complete(&wr_waitp->completion);
+}
+
+static int
+cxgbit_wait_for_reply(struct cxgbit_device *cdev,
+ struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
+ const char *func)
+{
+ int ret;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ wr_waitp->ret = -EIO;
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
+ if (!ret) {
+ pr_info("%s - Device %s not responding tid %u\n",
+ func, pci_name(cdev->lldi.pdev), tid);
+ wr_waitp->ret = -ETIMEDOUT;
+ }
+out:
+ if (wr_waitp->ret)
+ pr_info("%s: FW reply %d tid %u\n",
+ pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
+ return wr_waitp->ret;
+}
+
+/* Returns whether a CPL status conveys negative advice.
+ */
+static int cxgbit_is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
+static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
+{
+ return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
+}
+
+static struct np_info *
+cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
+ unsigned int stid)
+{
+ struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+ if (p) {
+ int bucket = cxgbit_np_hashfn(cnp);
+
+ p->cnp = cnp;
+ p->stid = stid;
+ spin_lock(&cdev->np_lock);
+ p->next = cdev->np_hash_tab[bucket];
+ cdev->np_hash_tab[bucket] = p;
+ spin_unlock(&cdev->np_lock);
+ }
+
+ return p;
+}
+
+static int
+cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid = -1, bucket = cxgbit_np_hashfn(cnp);
+ struct np_info *p;
+
+ spin_lock(&cdev->np_lock);
+ for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
+ if (p->cnp == cnp) {
+ stid = p->stid;
+ break;
+ }
+ }
+ spin_unlock(&cdev->np_lock);
+
+ return stid;
+}
+
+static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid = -1, bucket = cxgbit_np_hashfn(cnp);
+ struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
+
+ spin_lock(&cdev->np_lock);
+ for (p = *prev; p; prev = &p->next, p = p->next) {
+ if (p->cnp == cnp) {
+ stid = p->stid;
+ *prev = p->next;
+ kfree(p);
+ break;
+ }
+ }
+ spin_unlock(&cdev->np_lock);
+
+ return stid;
+}
+
+void _cxgbit_free_cnp(struct kref *kref)
+{
+ struct cxgbit_np *cnp;
+
+ cnp = container_of(kref, struct cxgbit_np, kref);
+ kfree(cnp);
+}
+
+static int
+cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
+ struct cxgbit_np *cnp)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &cnp->com.local_addr;
+ int addr_type;
+ int ret;
+
+ pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
+ __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
+
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sin6->sin6_addr);
+ if (addr_type != IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ if (ret) {
+ pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
+ sin6->sin6_addr.s6_addr, ret);
+ return -ENOMEM;
+ }
+ }
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+
+ ret = cxgb4_create_server6(cdev->lldi.ports[0],
+ stid, &sin6->sin6_addr,
+ sin6->sin6_port,
+ cdev->lldi.rxq_ids[0]);
+ if (!ret)
+ ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
+ 0, 10, __func__);
+ else if (ret > 0)
+ ret = net_xmit_errno(ret);
+ else
+ cxgbit_put_cnp(cnp);
+
+ if (ret) {
+ if (ret != -ETIMEDOUT)
+ cxgb4_clip_release(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+
+ pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
+ ret, stid, sin6->sin6_addr.s6_addr,
+ ntohs(sin6->sin6_port));
+ }
+
+ return ret;
+}
+
+static int
+cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
+ struct cxgbit_np *cnp)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)
+ &cnp->com.local_addr;
+ int ret;
+
+ pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
+ __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+
+ ret = cxgb4_create_server(cdev->lldi.ports[0],
+ stid, sin->sin_addr.s_addr,
+ sin->sin_port, 0,
+ cdev->lldi.rxq_ids[0]);
+ if (!ret)
+ ret = cxgbit_wait_for_reply(cdev,
+ &cnp->com.wr_wait,
+ 0, 10, __func__);
+ else if (ret > 0)
+ ret = net_xmit_errno(ret);
+ else
+ cxgbit_put_cnp(cnp);
+
+ if (ret)
+ pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
+ ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
+ return ret;
+}
+
+struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
+{
+ struct cxgbit_device *cdev;
+ u8 i;
+
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+
+ for (i = 0; i < lldi->nports; i++) {
+ if (lldi->ports[i] == ndev) {
+ if (port_id)
+ *port_id = i;
+ return cdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
+{
+ if (ndev->priv_flags & IFF_BONDING) {
+ pr_err("Bond devices are not supported. Interface:%s\n",
+ ndev->name);
+ return NULL;
+ }
+
+ if (is_vlan_dev(ndev))
+ return vlan_dev_real_dev(ndev);
+
+ return ndev;
+}
+
+static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
+{
+ struct net_device *ndev;
+
+ ndev = __ip_dev_find(&init_net, saddr, false);
+ if (!ndev)
+ return NULL;
+
+ return cxgbit_get_real_dev(ndev);
+}
+
+static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
+{
+ struct net_device *ndev = NULL;
+ bool found = false;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ for_each_netdev_rcu(&init_net, ndev)
+ if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return NULL;
+ return cxgbit_get_real_dev(ndev);
+}
+
+static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
+{
+ struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
+ int ss_family = sockaddr->ss_family;
+ struct net_device *ndev = NULL;
+ struct cxgbit_device *cdev = NULL;
+
+ rcu_read_lock();
+ if (ss_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)sockaddr;
+ ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
+ } else if (ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sockaddr;
+ ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
+ }
+ if (!ndev)
+ goto out;
+
+ cdev = cxgbit_find_device(ndev, NULL);
+out:
+ rcu_read_unlock();
+ return cdev;
+}
+
+static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
+{
+ struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
+ int ss_family = sockaddr->ss_family;
+ int addr_type;
+
+ if (ss_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)sockaddr;
+ if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
+ return true;
+ } else if (ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sockaddr;
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sin6->sin6_addr);
+ if (addr_type == IPV6_ADDR_ANY)
+ return true;
+ }
+ return false;
+}
+
+static int
+__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid, ret;
+ int ss_family = cnp->com.local_addr.ss_family;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags))
+ return -EINVAL;
+
+ stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
+ if (stid < 0)
+ return -EINVAL;
+
+ if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
+ cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
+ return -EINVAL;
+ }
+
+ if (ss_family == AF_INET)
+ ret = cxgbit_create_server4(cdev, stid, cnp);
+ else
+ ret = cxgbit_create_server6(cdev, stid, cnp);
+
+ if (ret) {
+ if (ret != -ETIMEDOUT)
+ cxgb4_free_stid(cdev->lldi.tids, stid,
+ ss_family);
+ cxgbit_np_hash_del(cdev, cnp);
+ return ret;
+ }
+ return ret;
+}
+
+static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret = -1;
+
+ mutex_lock(&cdev_list_lock);
+ cdev = cxgbit_find_np_cdev(cnp);
+ if (!cdev)
+ goto out;
+
+ if (cxgbit_np_hash_find(cdev, cnp) >= 0)
+ goto out;
+
+ if (__cxgbit_setup_cdev_np(cdev, cnp))
+ goto out;
+
+ cnp->com.cdev = cdev;
+ ret = 0;
+out:
+ mutex_unlock(&cdev_list_lock);
+ return ret;
+}
+
+static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret;
+ u32 count = 0;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
+ mutex_unlock(&cdev_list_lock);
+ return -1;
+ }
+ }
+
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ ret = __cxgbit_setup_cdev_np(cdev, cnp);
+ if (ret == -ETIMEDOUT)
+ break;
+ if (ret != 0)
+ continue;
+ count++;
+ }
+ mutex_unlock(&cdev_list_lock);
+
+ return count ? 0 : -1;
+}
+
+int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
+{
+ struct cxgbit_np *cnp;
+ int ret;
+
+ if ((ksockaddr->ss_family != AF_INET) &&
+ (ksockaddr->ss_family != AF_INET6))
+ return -EINVAL;
+
+ cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
+ if (!cnp)
+ return -ENOMEM;
+
+ init_waitqueue_head(&cnp->accept_wait);
+ init_completion(&cnp->com.wr_wait.completion);
+ init_completion(&cnp->accept_comp);
+ INIT_LIST_HEAD(&cnp->np_accept_list);
+ spin_lock_init(&cnp->np_accept_lock);
+ kref_init(&cnp->kref);
+ memcpy(&np->np_sockaddr, ksockaddr,
+ sizeof(struct sockaddr_storage));
+ memcpy(&cnp->com.local_addr, &np->np_sockaddr,
+ sizeof(cnp->com.local_addr));
+
+ cnp->np = np;
+ cnp->com.cdev = NULL;
+
+ if (cxgbit_inaddr_any(cnp))
+ ret = cxgbit_setup_all_np(cnp);
+ else
+ ret = cxgbit_setup_cdev_np(cnp);
+
+ if (ret) {
+ cxgbit_put_cnp(cnp);
+ return -EINVAL;
+ }
+
+ np->np_context = cnp;
+ cnp->com.state = CSK_STATE_LISTEN;
+ return 0;
+}
+
+static void
+cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
+ struct cxgbit_sock *csk)
+{
+ conn->login_family = np->np_sockaddr.ss_family;
+ conn->login_sockaddr = csk->com.remote_addr;
+ conn->local_sockaddr = csk->com.local_addr;
+}
+
+int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+{
+ struct cxgbit_np *cnp = np->np_context;
+ struct cxgbit_sock *csk;
+ int ret = 0;
+
+accept_wait:
+ ret = wait_for_completion_interruptible(&cnp->accept_comp);
+ if (ret)
+ return -ENODEV;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+ /**
+ * No point in stalling here when np_thread
+ * is in state RESET/SHUTDOWN/EXIT - bail
+ **/
+ return -ENODEV;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ spin_lock_bh(&cnp->np_accept_lock);
+ if (list_empty(&cnp->np_accept_list)) {
+ spin_unlock_bh(&cnp->np_accept_lock);
+ goto accept_wait;
+ }
+
+ csk = list_first_entry(&cnp->np_accept_list,
+ struct cxgbit_sock,
+ accept_node);
+
+ list_del_init(&csk->accept_node);
+ spin_unlock_bh(&cnp->np_accept_lock);
+ conn->context = csk;
+ csk->conn = conn;
+
+ cxgbit_set_conn_info(np, conn, csk);
+ return 0;
+}
+
+static int
+__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid, ret;
+ bool ipv6 = false;
+
+ stid = cxgbit_np_hash_del(cdev, cnp);
+ if (stid < 0)
+ return -EINVAL;
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags))
+ return -EINVAL;
+
+ if (cnp->np->np_sockaddr.ss_family == AF_INET6)
+ ipv6 = true;
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+ ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
+ cdev->lldi.rxq_ids[0], ipv6);
+
+ if (ret > 0)
+ ret = net_xmit_errno(ret);
+
+ if (ret) {
+ cxgbit_put_cnp(cnp);
+ return ret;
+ }
+
+ ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
+ 0, 10, __func__);
+ if (ret == -ETIMEDOUT)
+ return ret;
+
+ if (ipv6 && cnp->com.cdev) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
+ cxgb4_clip_release(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr,
+ 1);
+ }
+
+ cxgb4_free_stid(cdev->lldi.tids, stid,
+ cnp->com.local_addr.ss_family);
+ return 0;
+}
+
+static void cxgbit_free_all_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ ret = __cxgbit_free_cdev_np(cdev, cnp);
+ if (ret == -ETIMEDOUT)
+ break;
+ }
+ mutex_unlock(&cdev_list_lock);
+}
+
+static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ bool found = false;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ if (cdev == cnp->com.cdev) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ goto out;
+
+ __cxgbit_free_cdev_np(cdev, cnp);
+out:
+ mutex_unlock(&cdev_list_lock);
+}
+
+void cxgbit_free_np(struct iscsi_np *np)
+{
+ struct cxgbit_np *cnp = np->np_context;
+
+ cnp->com.state = CSK_STATE_DEAD;
+ if (cnp->com.cdev)
+ cxgbit_free_cdev_np(cnp);
+ else
+ cxgbit_free_all_np(cnp);
+
+ np->np_context = NULL;
+ cxgbit_put_cnp(cnp);
+}
+
+static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct cpl_close_con_req *req;
+ unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ req = (struct cpl_close_con_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
+ csk->tid));
+ req->rsvd = 0;
+
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
+ __skb_queue_tail(&csk->txq, skb);
+ cxgbit_push_tx_frames(csk);
+}
+
+static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+ pr_debug("%s cxgbit_device %p\n", __func__, handle);
+ kfree_skb(skb);
+}
+
+static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct cxgbit_device *cdev = handle;
+ struct cpl_abort_req *req = cplhdr(skb);
+
+ pr_debug("%s cdev %p\n", __func__, cdev);
+ req->cmd = CPL_ABORT_NO_RST;
+ cxgbit_ofld_send(cdev, skb);
+}
+
+static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
+{
+ struct cpl_abort_req *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+ struct sk_buff *skb;
+
+ pr_debug("%s: csk %p tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ __skb_queue_purge(&csk->txq);
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
+ cxgbit_send_tx_flowc_wr(csk);
+
+ skb = __skb_dequeue(&csk->skbq);
+ req = (struct cpl_abort_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
+ csk->tid));
+ req->cmd = CPL_ABORT_SEND_RST;
+ return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+}
+
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+ bool release = false;
+
+ pr_debug("%s: state %d\n",
+ __func__, csk->com.state);
+
+ spin_lock_bh(&csk->lock);
+ switch (csk->com.state) {
+ case CSK_STATE_ESTABLISHED:
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ csk->com.state = CSK_STATE_CLOSING;
+ cxgbit_send_halfclose(csk);
+ } else {
+ csk->com.state = CSK_STATE_ABORTING;
+ cxgbit_send_abort_req(csk);
+ }
+ break;
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_MORIBUND;
+ cxgbit_send_halfclose(csk);
+ break;
+ case CSK_STATE_DEAD:
+ release = true;
+ break;
+ default:
+ pr_err("%s: csk %p; state %d\n",
+ __func__, csk, csk->com.state);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ if (release)
+ cxgbit_put_csk(csk);
+}
+
+static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
+{
+ csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
+ ((csk->com.remote_addr.ss_family == AF_INET) ?
+ sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
+ sizeof(struct tcphdr);
+ csk->mss = csk->emss;
+ if (TCPOPT_TSTAMP_G(opt))
+ csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
+ if (csk->emss < 128)
+ csk->emss = 128;
+ if (csk->emss & 7)
+ pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), csk->mss, csk->emss);
+ pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
+ csk->mss, csk->emss);
+}
+
+static void cxgbit_free_skb(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+
+ __skb_queue_purge(&csk->txq);
+ __skb_queue_purge(&csk->rxq);
+ __skb_queue_purge(&csk->backlogq);
+ __skb_queue_purge(&csk->ppodq);
+ __skb_queue_purge(&csk->skbq);
+
+ while ((skb = cxgbit_sock_dequeue_wr(csk)))
+ kfree_skb(skb);
+
+ __kfree_skb(csk->lro_hskb);
+}
+
+void _cxgbit_free_csk(struct kref *kref)
+{
+ struct cxgbit_sock *csk;
+ struct cxgbit_device *cdev;
+
+ csk = container_of(kref, struct cxgbit_sock, kref);
+
+ pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
+
+ if (csk->com.local_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &csk->com.local_addr;
+ cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
+ (const u32 *)
+ &sin6->sin6_addr.s6_addr, 1);
+ }
+
+ cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid);
+ dst_release(csk->dst);
+ cxgb4_l2t_release(csk->l2t);
+
+ cdev = csk->com.cdev;
+ spin_lock_bh(&cdev->cskq.lock);
+ list_del(&csk->list);
+ spin_unlock_bh(&cdev->cskq.lock);
+
+ cxgbit_free_skb(csk);
+ cxgbit_put_cdev(cdev);
+
+ kfree(csk);
+}
+
+static void
+cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
+ __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
+ __be16 *peer_port)
+{
+ u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
+ struct tcphdr *tcp = (struct tcphdr *)
+ ((u8 *)(req + 1) + eth_len + ip_len);
+
+ if (ip->version == 4) {
+ pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
+ __func__,
+ ntohl(ip->saddr), ntohl(ip->daddr),
+ ntohs(tcp->source),
+ ntohs(tcp->dest));
+ *iptype = 4;
+ memcpy(peer_ip, &ip->saddr, 4);
+ memcpy(local_ip, &ip->daddr, 4);
+ } else {
+ pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
+ __func__,
+ ip6->saddr.s6_addr, ip6->daddr.s6_addr,
+ ntohs(tcp->source),
+ ntohs(tcp->dest));
+ *iptype = 6;
+ memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+ memcpy(local_ip, ip6->daddr.s6_addr, 16);
+ }
+
+ *peer_port = tcp->source;
+ *local_port = tcp->dest;
+}
+
+static int
+cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
+{
+ u8 i;
+
+ egress_dev = cxgbit_get_real_dev(egress_dev);
+ for (i = 0; i < cdev->lldi.nports; i++)
+ if (cdev->lldi.ports[i] == egress_dev)
+ return 1;
+ return 0;
+}
+
+static struct dst_entry *
+cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
+ __be16 local_port, __be16 peer_port, u8 tos,
+ __u32 sin6_scope_id)
+{
+ struct dst_entry *dst = NULL;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, peer_ip, 16);
+ memcpy(&fl6.saddr, local_ip, 16);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = sin6_scope_id;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst)
+ goto out;
+ if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
+ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+ dst_release(dst);
+ dst = NULL;
+ }
+ }
+out:
+ return dst;
+}
+
+static struct dst_entry *
+cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
+ __be16 local_port, __be16 peer_port, u8 tos)
+{
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct neighbour *n;
+
+ rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
+ local_ip,
+ peer_port, local_port, IPPROTO_TCP,
+ tos, 0);
+ if (IS_ERR(rt))
+ return NULL;
+ n = dst_neigh_lookup(&rt->dst, &peer_ip);
+ if (!n)
+ return NULL;
+ if (!cxgbit_our_interface(cdev, n->dev) &&
+ !(n->dev->flags & IFF_LOOPBACK)) {
+ neigh_release(n);
+ dst_release(&rt->dst);
+ return NULL;
+ }
+ neigh_release(n);
+ return &rt->dst;
+}
+
+static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
+{
+ unsigned int linkspeed;
+ u8 scale;
+
+ linkspeed = pi->link_cfg.speed;
+ scale = linkspeed / SPEED_10000;
+
+#define CXGBIT_10G_RCV_WIN (256 * 1024)
+ csk->rcv_win = CXGBIT_10G_RCV_WIN;
+ if (scale)
+ csk->rcv_win *= scale;
+
+#define CXGBIT_10G_SND_WIN (256 * 1024)
+ csk->snd_win = CXGBIT_10G_SND_WIN;
+ if (scale)
+ csk->snd_win *= scale;
+
+ pr_debug("%s snd_win %d rcv_win %d\n",
+ __func__, csk->snd_win, csk->rcv_win);
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
+{
+ return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int cxgbit_select_priority(int pri_mask)
+{
+ if (!pri_mask)
+ return 0;
+
+ return (ffs(pri_mask) - 1);
+}
+
+static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
+{
+ int ret;
+ u8 caps;
+
+ struct dcb_app iscsi_dcb_app = {
+ .protocol = local_port
+ };
+
+ ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+
+ if (ret)
+ return 0;
+
+ if (caps & DCB_CAP_DCBX_VER_IEEE) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+
+ ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+
+ } else if (caps & DCB_CAP_DCBX_VER_CEE) {
+ iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+
+ ret = dcb_getapp(ndev, &iscsi_dcb_app);
+ }
+
+ pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
+
+ return cxgbit_select_priority(ret);
+}
+#endif
+
+static int
+cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
+ u16 local_port, struct dst_entry *dst,
+ struct cxgbit_device *cdev)
+{
+ struct neighbour *n;
+ int ret, step;
+ struct net_device *ndev;
+ u16 rxq_idx, port_id;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 priority = 0;
+#endif
+
+ n = dst_neigh_lookup(dst, peer_ip);
+ if (!n)
+ return -ENODEV;
+
+ rcu_read_lock();
+ ret = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ if (iptype == 4)
+ ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
+ else
+ ndev = NULL;
+
+ if (!ndev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
+ n, ndev, 0);
+ if (!csk->l2t)
+ goto out;
+ csk->mtu = ndev->mtu;
+ csk->tx_chan = cxgb4_port_chan(ndev);
+ csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
+ step = cdev->lldi.ntxq /
+ cdev->lldi.nchan;
+ csk->txq_idx = cxgb4_port_idx(ndev) * step;
+ step = cdev->lldi.nrxq /
+ cdev->lldi.nchan;
+ csk->ctrlq_idx = cxgb4_port_idx(ndev);
+ csk->rss_qid = cdev->lldi.rxq_ids[
+ cxgb4_port_idx(ndev) * step];
+ csk->port_id = cxgb4_port_idx(ndev);
+ cxgbit_set_tcp_window(csk,
+ (struct port_info *)netdev_priv(ndev));
+ } else {
+ ndev = cxgbit_get_real_dev(n->dev);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (cxgbit_get_iscsi_dcb_state(ndev))
+ priority = cxgbit_get_iscsi_dcb_priority(ndev,
+ local_port);
+
+ csk->dcb_priority = priority;
+
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
+#else
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
+#endif
+ if (!csk->l2t)
+ goto out;
+ port_id = cxgb4_port_idx(ndev);
+ csk->mtu = dst_mtu(dst);
+ csk->tx_chan = cxgb4_port_chan(ndev);
+ csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
+ step = cdev->lldi.ntxq /
+ cdev->lldi.nports;
+ csk->txq_idx = (port_id * step) +
+ (cdev->selectq[port_id][0]++ % step);
+ csk->ctrlq_idx = cxgb4_port_idx(ndev);
+ step = cdev->lldi.nrxq /
+ cdev->lldi.nports;
+ rxq_idx = (port_id * step) +
+ (cdev->selectq[port_id][1]++ % step);
+ csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
+ csk->port_id = port_id;
+ cxgbit_set_tcp_window(csk,
+ (struct port_info *)netdev_priv(ndev));
+ }
+ ret = 0;
+out:
+ rcu_read_unlock();
+ neigh_release(n);
+ return ret;
+}
+
+int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ int ret = 0;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ kfree_skb(skb);
+ pr_err("%s - device not up - dropping\n", __func__);
+ return -EIO;
+ }
+
+ ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
+ if (ret < 0)
+ kfree_skb(skb);
+ return ret < 0 ? ret : 0;
+}
+
+static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
+{
+ struct cpl_tid_release *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ req = (struct cpl_tid_release *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
+ CPL_TID_RELEASE, tid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ cxgbit_ofld_send(cdev, skb);
+}
+
+int
+cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
+ struct l2t_entry *l2e)
+{
+ int ret = 0;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ kfree_skb(skb);
+ pr_err("%s - device not up - dropping\n", __func__);
+ return -EIO;
+ }
+
+ ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
+ if (ret < 0)
+ kfree_skb(skb);
+ return ret < 0 ? ret : 0;
+}
+
+static void
+cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx, int use_ts, int ipv6)
+{
+ unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct tcphdr) +
+ (use_ts ? round_up(TCPOLEN_TIMESTAMP,
+ 4) : 0);
+ unsigned short data_size = mtu - hdr_size;
+
+ cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+}
+
+static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ if (csk->com.state != CSK_STATE_ESTABLISHED) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ cxgbit_ofld_send(csk->com.cdev, skb);
+}
+
+/*
+ * CPL connection rx data ack: host ->
+ * Send RX credits through an RX_DATA_ACK CPL message.
+ * Returns the number of credits sent.
+ */
+int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct cpl_rx_data_ack *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -1;
+
+ req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+ csk->tid));
+ req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
+ RX_CREDITS_V(csk->rx_credits));
+
+ csk->rx_credits = 0;
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
+ __skb_queue_tail(&csk->backlogq, skb);
+ spin_unlock_bh(&csk->lock);
+ return 0;
+ }
+
+ cxgbit_send_rx_credits(csk, skb);
+ spin_unlock_bh(&csk->lock);
+
+ return 0;
+}
+
+#define FLOWC_WR_NPARAMS_MIN 9
+#define FLOWC_WR_NPARAMS_MAX 11
+static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ u32 len, flowclen;
+ u8 i;
+
+ flowclen = offsetof(struct fw_flowc_wr,
+ mnemval[FLOWC_WR_NPARAMS_MAX]);
+
+ len = max_t(u32, sizeof(struct cpl_abort_req),
+ sizeof(struct cpl_abort_rpl));
+
+ len = max(len, flowclen);
+ len = roundup(len, 16);
+
+ for (i = 0; i < 3; i++) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+ __skb_queue_tail(&csk->skbq, skb);
+ }
+
+ skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
+ csk->lro_hskb = skb;
+
+ return 0;
+out:
+ __skb_queue_purge(&csk->skbq);
+ return -ENOMEM;
+}
+
+static u32 cxgbit_compute_wscale(u32 win)
+{
+ u32 wscale = 0;
+
+ while (wscale < 14 && (65535 << wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+static void
+cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
+{
+ struct sk_buff *skb;
+ const struct tcphdr *tcph;
+ struct cpl_t5_pass_accept_rpl *rpl5;
+ unsigned int len = roundup(sizeof(*rpl5), 16);
+ unsigned int mtu_idx;
+ u64 opt0;
+ u32 opt2, hlen;
+ u32 wscale;
+ u32 win;
+
+ pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ cxgbit_put_csk(csk);
+ return;
+ }
+
+ rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len);
+ memset(rpl5, 0, len);
+
+ INIT_TP_WR(rpl5, csk->tid);
+ OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ csk->tid));
+ cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
+ req->tcpopt.tstamp,
+ (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgbit_compute_wscale(csk->rcv_win);
+ /*
+ * Specify the largest window that will fit in opt0. The
+ * remainder will be specified in the rx_data_ack.
+ */
+ win = csk->rcv_win >> 10;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
+ opt0 = TCAM_BYPASS_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(csk->l2t->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ DSCP_V(csk->tos >> 2) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(win);
+
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
+
+ if (req->tcpopt.tstamp)
+ opt2 |= TSTAMPS_EN_F;
+ if (req->tcpopt.sack)
+ opt2 |= SACK_EN_F;
+ if (wscale)
+ opt2 |= WND_SCALE_EN_F;
+
+ hlen = ntohl(req->hdr_len);
+ tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
+ IP_HDR_LEN_G(hlen);
+
+ if (tcph->ece && tcph->cwr)
+ opt2 |= CCTRL_ECN_V(1);
+
+ opt2 |= RX_COALESCE_V(3);
+ opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
+
+ opt2 |= T5_ISS_F;
+ rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+
+ opt2 |= T5_OPT_2_VALID_F;
+
+ rpl5->opt0 = cpu_to_be64(opt0);
+ rpl5->opt2 = cpu_to_be32(opt2);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
+ t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
+ cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+}
+
+static void
+cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk = NULL;
+ struct cxgbit_np *cnp;
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int tid = GET_TID(req);
+ u16 peer_mss = ntohs(req->tcpopt.mss);
+ unsigned short hdrs;
+
+ struct dst_entry *dst;
+ __u8 local_ip[16], peer_ip[16];
+ __be16 local_port, peer_port;
+ int ret;
+ int iptype;
+
+ pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
+ __func__, cdev, stid, tid);
+
+ cnp = lookup_stid(t, stid);
+ if (!cnp) {
+ pr_err("%s connect request on invalid stid %d\n",
+ __func__, stid);
+ goto rel_skb;
+ }
+
+ if (cnp->com.state != CSK_STATE_LISTEN) {
+ pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
+ __func__);
+ goto reject;
+ }
+
+ csk = lookup_tid(t, tid);
+ if (csk) {
+ pr_err("%s csk not null tid %u\n",
+ __func__, tid);
+ goto rel_skb;
+ }
+
+ cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
+ &local_port, &peer_port);
+
+ /* Find output route */
+ if (iptype == 4) {
+ pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
+ "lport %d rport %d peer_mss %d\n"
+ , __func__, cnp, tid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
+ *(__be32 *)peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ } else {
+ pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
+ "lport %d rport %d peer_mss %d\n"
+ , __func__, cnp, tid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &cnp->com.local_addr)->sin6_scope_id);
+ }
+ if (!dst) {
+ pr_err("%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+
+ csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
+ if (!csk) {
+ dst_release(dst);
+ goto rel_skb;
+ }
+
+ ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
+ dst, cdev);
+ if (ret) {
+ pr_err("%s - failed to allocate l2t entry!\n",
+ __func__);
+ dst_release(dst);
+ kfree(csk);
+ goto reject;
+ }
+
+ kref_init(&csk->kref);
+ init_completion(&csk->com.wr_wait.completion);
+
+ INIT_LIST_HEAD(&csk->accept_node);
+
+ hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+ sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
+ if (peer_mss && csk->mtu > (peer_mss + hdrs))
+ csk->mtu = peer_mss + hdrs;
+
+ csk->com.state = CSK_STATE_CONNECTING;
+ csk->com.cdev = cdev;
+ csk->cnp = cnp;
+ csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ csk->dst = dst;
+ csk->tid = tid;
+ csk->wr_cred = cdev->lldi.wr_cred -
+ DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
+ csk->wr_max_cred = csk->wr_cred;
+ csk->wr_una_cred = 0;
+
+ if (iptype == 4) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)
+ &csk->com.local_addr;
+ sin->sin_family = AF_INET;
+ sin->sin_port = local_port;
+ sin->sin_addr.s_addr = *(__be32 *)local_ip;
+
+ sin = (struct sockaddr_in *)&csk->com.remote_addr;
+ sin->sin_family = AF_INET;
+ sin->sin_port = peer_port;
+ sin->sin_addr.s_addr = *(__be32 *)peer_ip;
+ } else {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &csk->com.local_addr;
+
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = local_port;
+ memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+ cxgb4_clip_get(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr,
+ 1);
+
+ sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = peer_port;
+ memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
+ }
+
+ skb_queue_head_init(&csk->rxq);
+ skb_queue_head_init(&csk->txq);
+ skb_queue_head_init(&csk->ppodq);
+ skb_queue_head_init(&csk->backlogq);
+ skb_queue_head_init(&csk->skbq);
+ cxgbit_sock_reset_wr_list(csk);
+ spin_lock_init(&csk->lock);
+ init_waitqueue_head(&csk->waitq);
+ init_waitqueue_head(&csk->ack_waitq);
+ csk->lock_owner = false;
+
+ if (cxgbit_alloc_csk_skb(csk)) {
+ dst_release(dst);
+ kfree(csk);
+ goto rel_skb;
+ }
+
+ cxgbit_get_cdev(cdev);
+
+ spin_lock(&cdev->cskq.lock);
+ list_add_tail(&csk->list, &cdev->cskq.list);
+ spin_unlock(&cdev->cskq.lock);
+
+ cxgb4_insert_tid(t, csk, tid);
+ cxgbit_pass_accept_rpl(csk, req);
+ goto rel_skb;
+
+reject:
+ cxgbit_release_tid(cdev, tid);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static u32
+cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
+ u32 *flowclenp)
+{
+ u32 nparams, flowclen16, flowclen;
+
+ nparams = FLOWC_WR_NPARAMS_MIN;
+
+ if (csk->snd_wscale)
+ nparams++;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ nparams++;
+#endif
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+ /*
+ * Return the number of 16-byte credits used by the flowc request.
+ * Pass back the nparams and actual flowc length if requested.
+ */
+ if (nparamsp)
+ *nparamsp = nparams;
+ if (flowclenp)
+ *flowclenp = flowclen;
+ return flowclen16;
+}
+
+u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct fw_flowc_wr *flowc;
+ u32 nparams, flowclen16, flowclen;
+ struct sk_buff *skb;
+ u8 index;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
+
+ flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
+
+ skb = __skb_dequeue(&csk->skbq);
+ flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+ memset(flowc, 0, flowclen);
+
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(nparams));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+ FW_WR_FLOWID_V(csk->tid));
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
+ (csk->com.cdev->lldi.pf));
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+ flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+ flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
+ flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+ flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
+ flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+ flowc->mnemval[7].val = cpu_to_be32(csk->emss);
+
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
+ flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
+ else
+ flowc->mnemval[8].val = cpu_to_be32(16384);
+
+ index = 9;
+
+ if (csk->snd_wscale) {
+ flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
+ flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
+ index++;
+ }
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+ if (vlan == VLAN_NONE) {
+ pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
+ flowc->mnemval[index].val = cpu_to_be32(0);
+ } else
+ flowc->mnemval[index].val = cpu_to_be32(
+ (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
+#endif
+
+ pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
+ " rcv_seq = %u; snd_win = %u; emss = %u\n",
+ __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
+ csk->rcv_nxt, csk->snd_win, csk->emss);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ cxgbit_ofld_send(csk->com.cdev, skb);
+ return flowclen16;
+}
+
+int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+ u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
+ u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
+ unsigned int len = roundup(sizeof(*req), 16);
+ int ret;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* set up ulp submode */
+ req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(0);
+ req->mask = cpu_to_be64(0x3 << 4);
+ req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
+ (dcrc ? ULP_CRC_DATA : 0)) << 4);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ cxgbit_ofld_send(csk->com.cdev, skb);
+
+ ret = cxgbit_wait_for_reply(csk->com.cdev,
+ &csk->com.wr_wait,
+ csk->tid, 5, __func__);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+ int ret;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(0);
+ req->mask = cpu_to_be64(0x3 << 8);
+ req->val = cpu_to_be64(pg_idx << 8);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ cxgbit_ofld_send(csk->com.cdev, skb);
+
+ ret = cxgbit_wait_for_reply(csk->com.cdev,
+ &csk->com.wr_wait,
+ csk->tid, 5, __func__);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+static void
+cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_open_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct cxgbit_np *cnp = lookup_stid(t, stid);
+
+ pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
+ __func__, cnp, stid, rpl->status);
+
+ if (!cnp) {
+ pr_info("%s stid %d lookup failure\n", __func__, stid);
+ return;
+ }
+
+ cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
+ cxgbit_put_cnp(cnp);
+}
+
+static void
+cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct cxgbit_np *cnp = lookup_stid(t, stid);
+
+ pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
+ __func__, cnp, stid, rpl->status);
+
+ if (!cnp) {
+ pr_info("%s stid %d lookup failure\n", __func__, stid);
+ return;
+ }
+
+ cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
+ cxgbit_put_cnp(cnp);
+}
+
+static void
+cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_establish *req = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int tid = GET_TID(req);
+ struct cxgbit_sock *csk;
+ struct cxgbit_np *cnp;
+ u16 tcp_opt = be16_to_cpu(req->tcp_opt);
+ u32 snd_isn = be32_to_cpu(req->snd_isn);
+ u32 rcv_isn = be32_to_cpu(req->rcv_isn);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+ cnp = csk->cnp;
+
+ pr_debug("%s: csk %p; tid %u; cnp %p\n",
+ __func__, csk, tid, cnp);
+
+ csk->write_seq = snd_isn;
+ csk->snd_una = snd_isn;
+ csk->snd_nxt = snd_isn;
+
+ csk->rcv_nxt = rcv_isn;
+
+ if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
+ csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
+
+ csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
+ cxgbit_set_emss(csk, tcp_opt);
+ dst_confirm(csk->dst);
+ csk->com.state = CSK_STATE_ESTABLISHED;
+ spin_lock_bh(&cnp->np_accept_lock);
+ list_add_tail(&csk->accept_node, &cnp->np_accept_list);
+ spin_unlock_bh(&cnp->np_accept_lock);
+ complete(&cnp->accept_comp);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_skcb_flags(skb) = 0;
+ spin_lock_bh(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ spin_unlock_bh(&csk->rxq.lock);
+ wake_up(&csk->waitq);
+}
+
+static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_ESTABLISHED:
+ csk->com.state = CSK_STATE_CLOSING;
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ case CSK_STATE_CLOSING:
+ /* simultaneous close */
+ csk->com.state = CSK_STATE_MORIBUND;
+ break;
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ cxgbit_put_csk(csk);
+ break;
+ case CSK_STATE_ABORTING:
+ break;
+ default:
+ pr_info("%s: cpl_peer_close in bad state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_MORIBUND;
+ break;
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ cxgbit_put_csk(csk);
+ break;
+ case CSK_STATE_ABORTING:
+ case CSK_STATE_DEAD:
+ break;
+ default:
+ pr_info("%s: cpl_close_con_rpl in bad state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *hdr = cplhdr(skb);
+ unsigned int tid = GET_TID(hdr);
+ struct cpl_abort_rpl *rpl;
+ struct sk_buff *rpl_skb;
+ bool release = false;
+ bool wakeup_thread = false;
+ unsigned int len = roundup(sizeof(*rpl), 16);
+
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, tid, csk->com.state);
+
+ if (cxgbit_is_neg_adv(hdr->status)) {
+ pr_err("%s: got neg advise %d on tid %u\n",
+ __func__, hdr->status, tid);
+ goto rel_skb;
+ }
+
+ switch (csk->com.state) {
+ case CSK_STATE_CONNECTING:
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ release = true;
+ break;
+ case CSK_STATE_ESTABLISHED:
+ csk->com.state = CSK_STATE_DEAD;
+ wakeup_thread = true;
+ break;
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_DEAD;
+ if (!csk->conn)
+ release = true;
+ break;
+ case CSK_STATE_ABORTING:
+ break;
+ default:
+ pr_info("%s: cpl_abort_req_rss in bad state %d\n",
+ __func__, csk->com.state);
+ csk->com.state = CSK_STATE_DEAD;
+ }
+
+ __skb_queue_purge(&csk->txq);
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
+ cxgbit_send_tx_flowc_wr(csk);
+
+ rpl_skb = __skb_dequeue(&csk->skbq);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+
+ rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
+ memset(rpl, 0, len);
+
+ INIT_TP_WR(rpl, csk->tid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = CPL_ABORT_NO_RST;
+ cxgbit_ofld_send(csk->com.cdev, rpl_skb);
+
+ if (wakeup_thread) {
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ }
+
+ if (release)
+ cxgbit_put_csk(csk);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_ABORTING:
+ csk->com.state = CSK_STATE_DEAD;
+ cxgbit_put_csk(csk);
+ break;
+ default:
+ pr_info("%s: cpl_abort_rpl_rss in state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
+{
+ const struct sk_buff *skb = csk->wr_pending_head;
+ u32 credit = 0;
+
+ if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
+ pr_err("csk 0x%p, tid %u, credit %u > %u\n",
+ csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
+ return true;
+ }
+
+ while (skb) {
+ credit += skb->csum;
+ skb = cxgbit_skcb_tx_wr_next(skb);
+ }
+
+ if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
+ pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
+ csk, csk->tid, csk->wr_cred,
+ credit, csk->wr_max_cred);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
+ u32 credits = rpl->credits;
+ u32 snd_una = ntohl(rpl->snd_una);
+
+ csk->wr_cred += credits;
+ if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
+ csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
+
+ while (credits) {
+ struct sk_buff *p = cxgbit_sock_peek_wr(csk);
+
+ if (unlikely(!p)) {
+ pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
+ csk, csk->tid, credits,
+ csk->wr_cred, csk->wr_una_cred);
+ break;
+ }
+
+ if (unlikely(credits < p->csum)) {
+ pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
+ csk, csk->tid,
+ credits, csk->wr_cred, csk->wr_una_cred,
+ p->csum);
+ p->csum -= credits;
+ break;
+ }
+
+ cxgbit_sock_dequeue_wr(csk);
+ credits -= p->csum;
+ kfree_skb(p);
+ }
+
+ if (unlikely(cxgbit_credit_err(csk))) {
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ }
+
+ if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
+ if (unlikely(before(snd_una, csk->snd_una))) {
+ pr_warn("csk 0x%p,%u, snd_una %u/%u.",
+ csk, csk->tid, snd_una,
+ csk->snd_una);
+ goto rel_skb;
+ }
+
+ if (csk->snd_una != snd_una) {
+ csk->snd_una = snd_una;
+ dst_confirm(csk->dst);
+ wake_up(&csk->ack_waitq);
+ }
+ }
+
+ if (skb_queue_len(&csk->txq))
+ cxgbit_push_tx_frames(csk);
+
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk))
+ pr_err("can't find connection for tid %u.\n", tid);
+ else
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+
+ cxgbit_put_csk(csk);
+}
+
+static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_rx_data *cpl = cplhdr(skb);
+ unsigned int tid = GET_TID(cpl);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ spin_lock(&csk->lock);
+ if (csk->lock_owner) {
+ __skb_queue_tail(&csk->backlogq, skb);
+ spin_unlock(&csk->lock);
+ return;
+ }
+
+ cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
+ spin_unlock(&csk->lock);
+}
+
+static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_get_csk(csk);
+ __cxgbit_process_rx_cpl(csk, skb);
+ cxgbit_put_csk(csk);
+}
+
+static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_tx_data *cpl = cplhdr(skb);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+ unsigned int tid = GET_TID(cpl);
+ u8 opcode = cxgbit_skcb_rx_opcode(skb);
+ bool ref = true;
+
+ switch (opcode) {
+ case CPL_FW4_ACK:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
+ ref = false;
+ break;
+ case CPL_PEER_CLOSE:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
+ break;
+ case CPL_CLOSE_CON_RPL:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
+ break;
+ case CPL_ABORT_REQ_RSS:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
+ break;
+ case CPL_ABORT_RPL_RSS:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
+ break;
+ default:
+ goto rel_skb;
+ }
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ if (ref)
+ cxgbit_process_rx_cpl(csk, skb);
+ else
+ __cxgbit_process_rx_cpl(csk, skb);
+
+ return;
+rel_skb:
+ __kfree_skb(skb);
+}
+
+cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
+ [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
+ [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
+ [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
+ [CPL_RX_DATA] = cxgbit_rx_data,
+ [CPL_FW4_ACK] = cxgbit_rx_cpl,
+ [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
+ [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
+ [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
+ [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
+};
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
new file mode 100644
index 000000000..5d78bdb7f
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cxgbit.h"
+
+static void
+cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
+ struct cxgbi_task_tag_info *ttinfo,
+ struct scatterlist **sg_pp, unsigned int *sg_off)
+{
+ struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
+ unsigned int offset = sg_off ? *sg_off : 0;
+ dma_addr_t addr = 0UL;
+ unsigned int len = 0;
+ int i;
+
+ memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
+
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
+
+ for (i = 0; i < PPOD_PAGES_MAX; i++) {
+ if (sg) {
+ ppod->addr[i] = cpu_to_be64(addr + offset);
+ offset += PAGE_SIZE;
+ if (offset == (len + sg->offset)) {
+ offset = 0;
+ sg = sg_next(sg);
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
+ }
+ } else {
+ ppod->addr[i] = 0ULL;
+ }
+ }
+
+ /*
+ * the fifth address needs to be repeated in the next ppod, so do
+ * not move sg
+ */
+ if (sg_pp) {
+ *sg_pp = sg;
+ *sg_off = offset;
+ }
+
+ if (offset == len) {
+ offset = 0;
+ if (sg) {
+ sg = sg_next(sg);
+ if (sg)
+ addr = sg_dma_address(sg);
+ }
+ }
+ ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
+}
+
+static struct sk_buff *
+cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
+ unsigned int idx, unsigned int npods, unsigned int tid)
+{
+ struct ulp_mem_io *req;
+ struct ulptx_idata *idata;
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
+ unsigned int dlen = npods << PPOD_SIZE_SHIFT;
+ unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
+ sizeof(struct ulptx_idata) + dlen, 16);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, tid);
+ req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
+ FW_WR_ATOMIC_V(0));
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ ULP_MEMIO_ORDER_V(0) |
+ T5_ULP_MEMIO_IMM_V(1));
+ req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
+ req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
+ req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
+
+ idata = (struct ulptx_idata *)(req + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ idata->len = htonl(dlen);
+
+ return skb;
+}
+
+static int
+cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
+ unsigned int npods, struct scatterlist **sg_pp,
+ unsigned int *sg_off)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_idata *idata;
+ struct cxgbi_pagepod *ppod;
+ unsigned int i;
+
+ skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct ulp_mem_io *)skb->data;
+ idata = (struct ulptx_idata *)(req + 1);
+ ppod = (struct cxgbi_pagepod *)(idata + 1);
+
+ for (i = 0; i < npods; i++, ppod++)
+ cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
+
+ __skb_queue_tail(&csk->ppodq, skb);
+
+ return 0;
+}
+
+static int
+cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo)
+{
+ unsigned int pidx = ttinfo->idx;
+ unsigned int npods = ttinfo->npods;
+ unsigned int i, cnt;
+ struct scatterlist *sg = ttinfo->sgl;
+ unsigned int offset = 0;
+ int ret = 0;
+
+ for (i = 0; i < npods; i += cnt, pidx += cnt) {
+ cnt = npods - i;
+
+ if (cnt > ULPMEM_IDATA_MAX_NPPODS)
+ cnt = ULPMEM_IDATA_MAX_NPPODS;
+
+ ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
+ &sg, &offset);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
+ unsigned int nents)
+{
+ unsigned int last_sgidx = nents - 1;
+ unsigned int i;
+
+ for (i = 0; i < nents; i++, sg = sg_next(sg)) {
+ unsigned int len = sg->length + sg->offset;
+
+ if ((sg->offset & 0x3) || (i && sg->offset) ||
+ ((i != last_sgidx) && (len != PAGE_SIZE))) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
+ unsigned int xferlen)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ struct scatterlist *sgl = ttinfo->sgl;
+ unsigned int sgcnt = ttinfo->nents;
+ unsigned int sg_offset = sgl->offset;
+ int ret;
+
+ if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
+ pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
+ ppm, ppm->tformat.pgsz_idx_dflt,
+ xferlen, ttinfo->nents);
+ return -EINVAL;
+ }
+
+ if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
+ return -EINVAL;
+
+ ttinfo->nr_pages = (xferlen + sgl->offset +
+ (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
+
+ /*
+ * the ddp tag will be used for the ttt in the outgoing r2t pdu
+ */
+ ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
+ &ttinfo->tag, 0);
+ if (ret < 0)
+ return ret;
+ ttinfo->npods = ret;
+
+ sgl->offset = 0;
+ ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+ sgl->offset = sg_offset;
+ if (!ret) {
+ pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+ __func__, 0, xferlen, sgcnt);
+ goto rel_ppods;
+ }
+
+ cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
+ xferlen, &ttinfo->hdr);
+
+ ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
+ if (ret < 0) {
+ __skb_queue_purge(&csk->ppodq);
+ dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+ goto rel_ppods;
+ }
+
+ return 0;
+
+rel_ppods:
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+ return -EINVAL;
+}
+
+void
+cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_r2t *r2t)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+ int ret = -EINVAL;
+
+ if ((!ccmd->setup_ddp) ||
+ (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
+ goto out;
+
+ ccmd->setup_ddp = false;
+
+ ttinfo->sgl = cmd->se_cmd.t_data_sg;
+ ttinfo->nents = cmd->se_cmd.t_data_nents;
+
+ ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
+ if (ret < 0) {
+ pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+ csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+
+ ttinfo->sgl = NULL;
+ ttinfo->nents = 0;
+ } else {
+ ccmd->release = true;
+ }
+out:
+ pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
+ r2t->targ_xfer_tag = ttinfo->tag;
+}
+
+void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+
+ if (ccmd->release) {
+ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+
+ if (ttinfo->sgl) {
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+ ttinfo->nents, DMA_FROM_DEVICE);
+ } else {
+ put_page(sg_page(&ccmd->sg));
+ }
+
+ ccmd->release = false;
+ }
+}
+
+int cxgbit_ddp_init(struct cxgbit_device *cdev)
+{
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct net_device *ndev = cdev->lldi.ports[0];
+ struct cxgbi_tag_format tformat;
+ unsigned int ppmax;
+ int ret, i;
+
+ if (!lldi->vr->iscsi.size) {
+ pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
+ return -EACCES;
+ }
+
+ ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
+
+ memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
+ for (i = 0; i < 4; i++)
+ tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
+ & 0xF;
+ cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
+
+ ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
+ cdev->lldi.pdev, &cdev->lldi, &tformat,
+ ppmax, lldi->iscsi_llimit,
+ lldi->vr->iscsi.start, 2);
+ if (ret >= 0) {
+ struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
+
+ if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
+ (ppm->ppmax >= 1024))
+ set_bit(CDEV_DDP_ENABLE, &cdev->flags);
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
new file mode 100644
index 000000000..28c11bd1b
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef __CXGBIT_LRO_H__
+#define __CXGBIT_LRO_H__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#define LRO_FLUSH_LEN_MAX 65535
+
+struct cxgbit_lro_cb {
+ struct cxgbit_sock *csk;
+ u32 pdu_totallen;
+ u32 offset;
+ u8 pdu_idx;
+ bool complete;
+};
+
+enum cxgbit_pducb_flags {
+ PDUCBF_RX_HDR = (1 << 0), /* received pdu header */
+ PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */
+ PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */
+ PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */
+ PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */
+ PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */
+};
+
+struct cxgbit_lro_pdu_cb {
+ u8 flags;
+ u8 frags;
+ u8 hfrag_idx;
+ u8 nr_dfrags;
+ u8 dfrag_idx;
+ bool complete;
+ u32 seq;
+ u32 pdulen;
+ u32 hlen;
+ u32 dlen;
+ u32 doffset;
+ u32 ddigest;
+ void *hdr;
+};
+
+#define LRO_SKB_MAX_HEADROOM \
+ (sizeof(struct cxgbit_lro_cb) + \
+ (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb)))
+
+#define LRO_SKB_MIN_HEADROOM \
+ (sizeof(struct cxgbit_lro_cb) + \
+ sizeof(struct cxgbit_lro_pdu_cb))
+
+#define cxgbit_skb_lro_cb(skb) ((struct cxgbit_lro_cb *)skb->data)
+#define cxgbit_skb_lro_pdu_cb(skb, i) \
+ ((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \
+ + (i * sizeof(struct cxgbit_lro_pdu_cb))))
+
+#define CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
+#define CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT 19 /* pad error */
+#define CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
+#define CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
+
+#endif /*__CXGBIT_LRO_H_*/
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
new file mode 100644
index 000000000..60dccd02b
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define DRV_NAME "cxgbit"
+#define DRV_VERSION "1.0.0-ko"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include "cxgbit.h"
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
+LIST_HEAD(cdev_list_head);
+/* cdev list lock */
+DEFINE_MUTEX(cdev_list_lock);
+
+void _cxgbit_free_cdev(struct kref *kref)
+{
+ struct cxgbit_device *cdev;
+
+ cdev = container_of(kref, struct cxgbit_device, kref);
+ kfree(cdev);
+}
+
+static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
+{
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ u32 mdsl;
+
+#define ULP2_MAX_PKT_LEN 16224
+#define ISCSI_PDU_NONPAYLOAD_LEN 312
+ mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
+ ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
+ mdsl = min_t(u32, mdsl, 8192);
+ mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
+
+ cdev->mdsl = mdsl;
+}
+
+static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
+{
+ struct cxgbit_device *cdev;
+
+ if (is_t4(lldi->adapter_type))
+ return ERR_PTR(-ENODEV);
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&cdev->kref);
+
+ cdev->lldi = *lldi;
+
+ cxgbit_set_mdsl(cdev);
+
+ if (cxgbit_ddp_init(cdev) < 0) {
+ kfree(cdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
+ pr_info("cdev %s ddp init failed\n",
+ pci_name(lldi->pdev));
+
+ if (lldi->fw_vers >= 0x10d2b00)
+ set_bit(CDEV_ISO_ENABLE, &cdev->flags);
+
+ spin_lock_init(&cdev->cskq.lock);
+ INIT_LIST_HEAD(&cdev->cskq.list);
+
+ mutex_lock(&cdev_list_lock);
+ list_add_tail(&cdev->list, &cdev_list_head);
+ mutex_unlock(&cdev_list_lock);
+
+ pr_info("cdev %s added for iSCSI target transport\n",
+ pci_name(lldi->pdev));
+
+ return cdev;
+}
+
+static void cxgbit_close_conn(struct cxgbit_device *cdev)
+{
+ struct cxgbit_sock *csk;
+ struct sk_buff *skb;
+ bool wakeup_thread = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ list_for_each_entry(csk, &cdev->cskq.list, list) {
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ spin_lock_bh(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock_bh(&csk->rxq.lock);
+
+ if (wakeup_thread) {
+ wake_up(&csk->waitq);
+ wakeup_thread = false;
+ }
+ }
+ spin_unlock_bh(&cdev->cskq.lock);
+}
+
+static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
+{
+ bool free_cdev = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ if (list_empty(&cdev->cskq.list))
+ free_cdev = true;
+ spin_unlock_bh(&cdev->cskq.lock);
+
+ if (free_cdev) {
+ mutex_lock(&cdev_list_lock);
+ list_del(&cdev->list);
+ mutex_unlock(&cdev_list_lock);
+
+ cxgbit_put_cdev(cdev);
+ } else {
+ cxgbit_close_conn(cdev);
+ }
+}
+
+static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
+{
+ struct cxgbit_device *cdev = handle;
+
+ switch (state) {
+ case CXGB4_STATE_UP:
+ set_bit(CDEV_STATE_UP, &cdev->flags);
+ pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ clear_bit(CDEV_STATE_UP, &cdev->flags);
+ cxgbit_close_conn(cdev);
+ pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_DOWN:
+ pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_DETACH:
+ clear_bit(CDEV_STATE_UP, &cdev->flags);
+ pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
+ cxgbit_detach_cdev(cdev);
+ break;
+ default:
+ pr_info("cdev %s unknown state %d.\n",
+ pci_name(cdev->lldi.pdev), state);
+ break;
+ }
+ return 0;
+}
+
+static void
+cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl,
+ struct cxgbit_lro_pdu_cb *pdu_cb)
+{
+ unsigned int status = ntohl(cpl->ddpvld);
+
+ pdu_cb->flags |= PDUCBF_RX_STATUS;
+ pdu_cb->ddigest = ntohl(cpl->ulp_crc);
+ pdu_cb->pdulen = ntohs(cpl->len);
+
+ if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status);
+ pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
+ }
+
+ if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status);
+ pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
+ }
+
+ if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
+ pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status);
+
+ if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
+ (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
+ pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
+ }
+}
+
+static void
+cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
+ lro_cb->pdu_idx);
+ struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
+
+ cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb);
+
+ if (pdu_cb->flags & PDUCBF_RX_HDR)
+ pdu_cb->complete = true;
+
+ lro_cb->complete = true;
+ lro_cb->pdu_totallen += pdu_cb->pdulen;
+ lro_cb->pdu_idx++;
+}
+
+static void
+cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
+ unsigned int offset)
+{
+ u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
+ u8 i;
+
+ /* usually there's just one frag */
+ __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
+ gl->frags[0].offset + offset,
+ gl->frags[0].size - offset);
+ for (i = 1; i < gl->nfrags; i++)
+ __skb_fill_page_desc(skb, skb_frag_idx + i,
+ gl->frags[i].page,
+ gl->frags[i].offset,
+ gl->frags[i].size);
+
+ skb_shinfo(skb)->nr_frags += gl->nfrags;
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+static void
+cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
+ lro_cb->pdu_idx);
+ u32 len, offset;
+
+ if (op == CPL_ISCSI_HDR) {
+ struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
+
+ offset = sizeof(struct cpl_iscsi_hdr);
+ pdu_cb->flags |= PDUCBF_RX_HDR;
+ pdu_cb->seq = ntohl(cpl->seq);
+ len = ntohs(cpl->len);
+ pdu_cb->hdr = gl->va + offset;
+ pdu_cb->hlen = len;
+ pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(gl->nfrags > 1))
+ cxgbit_skcb_flags(skb) = 0;
+
+ lro_cb->complete = false;
+ } else {
+ struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
+
+ offset = sizeof(struct cpl_iscsi_data);
+ pdu_cb->flags |= PDUCBF_RX_DATA;
+ len = ntohs(cpl->len);
+ pdu_cb->dlen = len;
+ pdu_cb->doffset = lro_cb->offset;
+ pdu_cb->nr_dfrags = gl->nfrags;
+ pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
+ }
+
+ cxgbit_copy_frags(skb, gl, offset);
+
+ pdu_cb->frags += gl->nfrags;
+ lro_cb->offset += len;
+ skb->len += len;
+ skb->data_len += len;
+ skb->truesize += len;
+}
+
+static struct sk_buff *
+cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
+ const __be64 *rsp, struct napi_struct *napi)
+{
+ struct sk_buff *skb;
+ struct cxgbit_lro_cb *lro_cb;
+
+ skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
+
+ cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
+
+ lro_cb = cxgbit_skb_lro_cb(skb);
+
+ cxgbit_get_csk(csk);
+
+ lro_cb->csk = csk;
+
+ return skb;
+}
+
+static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ bool wakeup_thread = false;
+
+ spin_lock(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock(&csk->rxq.lock);
+
+ if (wakeup_thread)
+ wake_up(&csk->waitq);
+}
+
+static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_sock *csk = lro_cb->csk;
+
+ csk->lro_skb = NULL;
+
+ __skb_unlink(skb, &lro_mgr->lroq);
+ cxgbit_queue_lro_skb(csk, skb);
+
+ cxgbit_put_csk(csk);
+
+ lro_mgr->lro_pkts++;
+ lro_mgr->lro_session_cnt--;
+}
+
+static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_peek(&lro_mgr->lroq)))
+ cxgbit_lro_flush(lro_mgr, skb);
+}
+
+static int
+cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
+ const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
+ struct napi_struct *napi)
+{
+ struct sk_buff *skb;
+ struct cxgbit_lro_cb *lro_cb;
+
+ if (!csk) {
+ pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
+ goto out;
+ }
+
+ if (csk->lro_skb)
+ goto add_packet;
+
+start_lro:
+ if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
+ cxgbit_uld_lro_flush(lro_mgr);
+ goto start_lro;
+ }
+
+ skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
+ if (unlikely(!skb))
+ goto out;
+
+ csk->lro_skb = skb;
+
+ __skb_queue_tail(&lro_mgr->lroq, skb);
+ lro_mgr->lro_session_cnt++;
+
+add_packet:
+ skb = csk->lro_skb;
+ lro_cb = cxgbit_skb_lro_cb(skb);
+
+ if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
+ MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
+ (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
+ cxgbit_lro_flush(lro_mgr, skb);
+ goto start_lro;
+ }
+
+ if (gl)
+ cxgbit_lro_add_packet_gl(skb, op, gl);
+ else
+ cxgbit_lro_add_packet_rsp(skb, op, rsp);
+
+ lro_mgr->lro_merged++;
+
+ return 0;
+
+out:
+ return -1;
+}
+
+static int
+cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
+ const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
+ struct napi_struct *napi)
+{
+ struct cxgbit_device *cdev = hndl;
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct cpl_tx_data *rpl = NULL;
+ struct cxgbit_sock *csk = NULL;
+ unsigned int tid = 0;
+ struct sk_buff *skb;
+ unsigned int op = *(u8 *)rsp;
+ bool lro_flush = true;
+
+ switch (op) {
+ case CPL_ISCSI_HDR:
+ case CPL_ISCSI_DATA:
+ case CPL_RX_ISCSI_DDP:
+ case CPL_FW4_ACK:
+ lro_flush = false;
+ case CPL_ABORT_RPL_RSS:
+ case CPL_PASS_ESTABLISH:
+ case CPL_PEER_CLOSE:
+ case CPL_CLOSE_CON_RPL:
+ case CPL_ABORT_REQ_RSS:
+ case CPL_SET_TCB_RPL:
+ case CPL_RX_DATA:
+ rpl = gl ? (struct cpl_tx_data *)gl->va :
+ (struct cpl_tx_data *)(rsp + 1);
+ tid = GET_TID(rpl);
+ csk = lookup_tid(lldi->tids, tid);
+ break;
+ default:
+ break;
+ }
+
+ if (csk && csk->lro_skb && lro_flush)
+ cxgbit_lro_flush(lro_mgr, csk->lro_skb);
+
+ if (!gl) {
+ unsigned int len;
+
+ if (op == CPL_RX_ISCSI_DDP) {
+ if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
+ napi))
+ return 0;
+ }
+
+ len = 64 - sizeof(struct rsp_ctrl) - 8;
+ skb = napi_alloc_skb(napi, len);
+ if (!skb)
+ goto nomem;
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, &rsp[1], len);
+ } else {
+ if (unlikely(op != *(u8 *)gl->va)) {
+ pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
+ gl->va, be64_to_cpu(*rsp),
+ be64_to_cpu(*(u64 *)gl->va),
+ gl->tot_len);
+ return 0;
+ }
+
+ if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) {
+ if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
+ napi))
+ return 0;
+ }
+
+#define RX_PULL_LEN 128
+ skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+ if (unlikely(!skb))
+ goto nomem;
+ }
+
+ rpl = (struct cpl_tx_data *)skb->data;
+ op = rpl->ot.opcode;
+ cxgbit_skcb_rx_opcode(skb) = op;
+
+ pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
+ cdev, op, rpl->ot.opcode_tid,
+ ntohl(rpl->ot.opcode_tid), skb);
+
+ if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
+ cxgbit_cplhandlers[op](cdev, skb);
+ } else {
+ pr_err("No handler for opcode 0x%x.\n", op);
+ __kfree_skb(skb);
+ }
+ return 0;
+nomem:
+ pr_err("%s OOM bailing out.\n", __func__);
+ return 1;
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+struct cxgbit_dcb_work {
+ struct dcb_app_type dcb_app;
+ struct work_struct work;
+};
+
+static void
+cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
+ u8 dcb_priority, u16 port_num)
+{
+ struct cxgbit_sock *csk;
+ struct sk_buff *skb;
+ u16 local_port;
+ bool wakeup_thread = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ list_for_each_entry(csk, &cdev->cskq.list, list) {
+ if (csk->port_id != port_id)
+ continue;
+
+ if (csk->com.local_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sock_in6;
+
+ sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
+ local_port = ntohs(sock_in6->sin6_port);
+ } else {
+ struct sockaddr_in *sock_in;
+
+ sock_in = (struct sockaddr_in *)&csk->com.local_addr;
+ local_port = ntohs(sock_in->sin_port);
+ }
+
+ if (local_port != port_num)
+ continue;
+
+ if (csk->dcb_priority == dcb_priority)
+ continue;
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ spin_lock(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock(&csk->rxq.lock);
+
+ if (wakeup_thread) {
+ wake_up(&csk->waitq);
+ wakeup_thread = false;
+ }
+ }
+ spin_unlock_bh(&cdev->cskq.lock);
+}
+
+static void cxgbit_dcb_workfn(struct work_struct *work)
+{
+ struct cxgbit_dcb_work *dcb_work;
+ struct net_device *ndev;
+ struct cxgbit_device *cdev = NULL;
+ struct dcb_app_type *iscsi_app;
+ u8 priority, port_id = 0xff;
+
+ dcb_work = container_of(work, struct cxgbit_dcb_work, work);
+ iscsi_app = &dcb_work->dcb_app;
+
+ if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+ goto out;
+
+ priority = iscsi_app->app.priority;
+
+ } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+ if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+ goto out;
+
+ if (!iscsi_app->app.priority)
+ goto out;
+
+ priority = ffs(iscsi_app->app.priority) - 1;
+ } else {
+ goto out;
+ }
+
+ pr_debug("priority for ifid %d is %u\n",
+ iscsi_app->ifindex, priority);
+
+ ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+
+ if (!ndev)
+ goto out;
+
+ mutex_lock(&cdev_list_lock);
+ cdev = cxgbit_find_device(ndev, &port_id);
+
+ dev_put(ndev);
+
+ if (!cdev) {
+ mutex_unlock(&cdev_list_lock);
+ goto out;
+ }
+
+ cxgbit_update_dcb_priority(cdev, port_id, priority,
+ iscsi_app->app.protocol);
+ mutex_unlock(&cdev_list_lock);
+out:
+ kfree(dcb_work);
+}
+
+static int
+cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct cxgbit_dcb_work *dcb_work;
+ struct dcb_app_type *dcb_app = data;
+
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ return NOTIFY_DONE;
+
+ dcb_work->dcb_app = *dcb_app;
+ INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
+ schedule_work(&dcb_work->work);
+ return NOTIFY_OK;
+}
+#endif
+
+static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+ return TARGET_PROT_NORMAL;
+}
+
+static struct iscsit_transport cxgbit_transport = {
+ .name = DRV_NAME,
+ .transport_type = ISCSI_CXGBIT,
+ .rdma_shutdown = false,
+ .priv_size = sizeof(struct cxgbit_cmd),
+ .owner = THIS_MODULE,
+ .iscsit_setup_np = cxgbit_setup_np,
+ .iscsit_accept_np = cxgbit_accept_np,
+ .iscsit_free_np = cxgbit_free_np,
+ .iscsit_free_conn = cxgbit_free_conn,
+ .iscsit_get_login_rx = cxgbit_get_login_rx,
+ .iscsit_put_login_tx = cxgbit_put_login_tx,
+ .iscsit_immediate_queue = iscsit_immediate_queue,
+ .iscsit_response_queue = iscsit_response_queue,
+ .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
+ .iscsit_queue_data_in = iscsit_queue_rsp,
+ .iscsit_queue_status = iscsit_queue_rsp,
+ .iscsit_xmit_pdu = cxgbit_xmit_pdu,
+ .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
+ .iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
+ .iscsit_validate_params = cxgbit_validate_params,
+ .iscsit_release_cmd = cxgbit_release_cmd,
+ .iscsit_aborted_task = iscsit_aborted_task,
+ .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
+};
+
+static struct cxgb4_uld_info cxgbit_uld_info = {
+ .name = DRV_NAME,
+ .add = cxgbit_uld_add,
+ .state_change = cxgbit_uld_state_change,
+ .lro_rx_handler = cxgbit_uld_lro_rx_handler,
+ .lro_flush = cxgbit_uld_lro_flush,
+};
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+static struct notifier_block cxgbit_dcbevent_nb = {
+ .notifier_call = cxgbit_dcbevent_notify,
+};
+#endif
+
+static int __init cxgbit_init(void)
+{
+ cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
+ iscsit_register_transport(&cxgbit_transport);
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ pr_info("%s dcb enabled.\n", DRV_NAME);
+ register_dcbevent_notifier(&cxgbit_dcbevent_nb);
+#endif
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+ sizeof(union cxgbit_skb_cb));
+ return 0;
+}
+
+static void __exit cxgbit_exit(void)
+{
+ struct cxgbit_device *cdev, *tmp;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
+#endif
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
+ list_del(&cdev->list);
+ cxgbit_put_cdev(cdev);
+ }
+ mutex_unlock(&cdev_list_lock);
+ iscsit_unregister_transport(&cxgbit_transport);
+ cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
+}
+
+module_init(cxgbit_init);
+module_exit(cxgbit_exit);
+
+MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
new file mode 100644
index 000000000..d02bf58ae
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -0,0 +1,1561 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <asm/unaligned.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "cxgbit.h"
+
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
+
+#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
+ sizeof(struct fw_ofld_tx_data_wr))
+
+static struct sk_buff *
+__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
+{
+ struct sk_buff *skb = NULL;
+ u8 submode = 0;
+ int errcode;
+ static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
+
+ if (len) {
+ skb = alloc_skb_with_frags(hdr_len, len,
+ 0, &errcode,
+ GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, TX_HDR_LEN);
+ skb_reset_transport_header(skb);
+ __skb_put(skb, ISCSI_HDR_LEN);
+ skb->data_len = len;
+ skb->len += len;
+ submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
+
+ } else {
+ u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
+
+ skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, TX_HDR_LEN + iso_len);
+ skb_reset_transport_header(skb);
+ __skb_put(skb, ISCSI_HDR_LEN);
+ }
+
+ submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
+ cxgbit_skcb_submode(skb) = submode;
+ cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
+ return skb;
+}
+
+static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
+{
+ return __cxgbit_alloc_skb(csk, len, false);
+}
+
+/*
+ * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data. We currently use the same limit as for Ethernet packets.
+ */
+static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
+{
+ int length = skb->len;
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
+ length += sizeof(struct fw_ofld_tx_data_wr);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
+ length += sizeof(struct cpl_tx_data_iso);
+
+#define MAX_IMM_TX_PKT_LEN 256
+ return length <= MAX_IMM_TX_PKT_LEN;
+}
+
+/*
+ * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int cxgbit_sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/*
+ * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (cxgbit_is_ofld_imm(skb))
+ return DIV_ROUND_UP(skb->len, 8);
+ flits = skb_transport_offset(skb) / 8;
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb_tail_pointer(skb) != skb_transport_header(skb))
+ cnt++;
+ return flits + cxgbit_sgl_len(cnt);
+}
+
+#define CXGBIT_ISO_FSLICE 0x1
+#define CXGBIT_ISO_LSLICE 0x2
+static void
+cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
+{
+ struct cpl_tx_data_iso *cpl;
+ unsigned int submode = cxgbit_skcb_submode(skb);
+ unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
+ unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
+
+ cpl = (struct cpl_tx_data_iso *)__skb_push(skb, sizeof(*cpl));
+
+ cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
+ CPL_TX_DATA_ISO_FIRST_V(fslice) |
+ CPL_TX_DATA_ISO_LAST_V(lslice) |
+ CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
+ CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
+ CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
+ CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
+ CPL_TX_DATA_ISO_SCSI_V(2));
+
+ cpl->ahs_len = 0;
+ cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
+ cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
+ cpl->len = htonl(iso_info->len);
+ cpl->reserved2_seglen_offset = htonl(0);
+ cpl->datasn_offset = htonl(0);
+ cpl->buffer_offset = htonl(0);
+ cpl->reserved3 = 0;
+
+ __skb_pull(skb, sizeof(*cpl));
+}
+
+static void
+cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
+ u32 len, u32 credits, u32 compl)
+{
+ struct fw_ofld_tx_data_wr *req;
+ u32 submode = cxgbit_skcb_submode(skb);
+ u32 wr_ulp_mode = 0;
+ u32 hdr_size = sizeof(*req);
+ u32 opcode = FW_OFLD_TX_DATA_WR;
+ u32 immlen = 0;
+ u32 force = TX_FORCE_V(!submode);
+
+ if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
+ opcode = FW_ISCSI_TX_DATA_WR;
+ immlen += sizeof(struct cpl_tx_data_iso);
+ hdr_size += sizeof(struct cpl_tx_data_iso);
+ submode |= 8;
+ }
+
+ if (cxgbit_is_ofld_imm(skb))
+ immlen += dlen;
+
+ req = (struct fw_ofld_tx_data_wr *)__skb_push(skb,
+ hdr_size);
+ req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
+ FW_WR_COMPL_V(compl) |
+ FW_WR_IMMDLEN_V(immlen));
+ req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
+ req->plen = htonl(len);
+ wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
+ FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
+
+ req->tunnel_to_proxy = htonl((wr_ulp_mode) | force |
+ FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1));
+}
+
+static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+
+ while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
+ u32 dlen = skb->len;
+ u32 len = skb->len;
+ u32 credits_needed;
+ u32 compl = 0;
+ u32 flowclen16 = 0;
+ u32 iso_cpl_len = 0;
+
+ if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
+ iso_cpl_len = sizeof(struct cpl_tx_data_iso);
+
+ if (cxgbit_is_ofld_imm(skb))
+ credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
+ else
+ credits_needed = DIV_ROUND_UP((8 *
+ cxgbit_calc_tx_flits_ofld(skb)) +
+ iso_cpl_len, 16);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
+ credits_needed += DIV_ROUND_UP(
+ sizeof(struct fw_ofld_tx_data_wr), 16);
+ /*
+ * Assumes the initial credits is large enough to support
+ * fw_flowc_wr plus largest possible first payload
+ */
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
+ flowclen16 = cxgbit_send_tx_flowc_wr(csk);
+ csk->wr_cred -= flowclen16;
+ csk->wr_una_cred += flowclen16;
+ }
+
+ if (csk->wr_cred < credits_needed) {
+ pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
+ csk, skb->len, skb->data_len,
+ credits_needed, csk->wr_cred);
+ break;
+ }
+ __skb_unlink(skb, &csk->txq);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ skb->csum = credits_needed + flowclen16;
+ csk->wr_cred -= credits_needed;
+ csk->wr_una_cred += credits_needed;
+
+ pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
+ csk, skb->len, skb->data_len, credits_needed,
+ csk->wr_cred, csk->wr_una_cred);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
+ len += cxgbit_skcb_tx_extralen(skb);
+
+ if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
+ (!before(csk->write_seq,
+ csk->snd_una + csk->snd_win))) {
+ compl = 1;
+ csk->wr_una_cred = 0;
+ }
+
+ cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
+ compl);
+ csk->snd_nxt += len;
+
+ } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
+ (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
+ struct cpl_close_con_req *req =
+ (struct cpl_close_con_req *)skb->data;
+ req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
+ csk->wr_una_cred = 0;
+ }
+
+ cxgbit_sock_enqueue_wr(csk, skb);
+ t4_set_arp_err_handler(skb, csk,
+ cxgbit_arp_failure_skb_discard);
+
+ pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
+ csk, csk->tid, skb, len);
+
+ cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+ }
+}
+
+static bool cxgbit_lock_sock(struct cxgbit_sock *csk)
+{
+ spin_lock_bh(&csk->lock);
+
+ if (before(csk->write_seq, csk->snd_una + csk->snd_win))
+ csk->lock_owner = true;
+
+ spin_unlock_bh(&csk->lock);
+
+ return csk->lock_owner;
+}
+
+static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
+{
+ struct sk_buff_head backlogq;
+ struct sk_buff *skb;
+ void (*fn)(struct cxgbit_sock *, struct sk_buff *);
+
+ skb_queue_head_init(&backlogq);
+
+ spin_lock_bh(&csk->lock);
+ while (skb_queue_len(&csk->backlogq)) {
+ skb_queue_splice_init(&csk->backlogq, &backlogq);
+ spin_unlock_bh(&csk->lock);
+
+ while ((skb = __skb_dequeue(&backlogq))) {
+ fn = cxgbit_skcb_rx_backlog_fn(skb);
+ fn(csk, skb);
+ }
+
+ spin_lock_bh(&csk->lock);
+ }
+
+ csk->lock_owner = false;
+ spin_unlock_bh(&csk->lock);
+}
+
+static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ int ret = 0;
+
+ wait_event_interruptible(csk->ack_waitq, cxgbit_lock_sock(csk));
+
+ if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
+ signal_pending(current))) {
+ __kfree_skb(skb);
+ __skb_queue_purge(&csk->ppodq);
+ ret = -1;
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ spin_unlock_bh(&csk->lock);
+ goto unlock;
+ }
+ spin_unlock_bh(&csk->lock);
+ return ret;
+ }
+
+ csk->write_seq += skb->len +
+ cxgbit_skcb_tx_extralen(skb);
+
+ skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
+ __skb_queue_tail(&csk->txq, skb);
+ cxgbit_push_tx_frames(csk);
+
+unlock:
+ cxgbit_unlock_sock(csk);
+ return ret;
+}
+
+static int
+cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset,
+ u32 data_length)
+{
+ u32 i = 0, nr_frags = MAX_SKB_FRAGS;
+ u32 padding = ((-data_length) & 3);
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned int page_off;
+
+ if (padding)
+ nr_frags--;
+
+ /*
+ * We know each entry in t_data_sg contains a page.
+ */
+ sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+ page_off = (data_offset % PAGE_SIZE);
+
+ while (data_length && (i < nr_frags)) {
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+ page = sg_page(sg);
+
+ get_page(page);
+ skb_fill_page_desc(skb, i, page, sg->offset + page_off,
+ cur_len);
+ skb->data_len += cur_len;
+ skb->len += cur_len;
+ skb->truesize += cur_len;
+
+ data_length -= cur_len;
+ page_off = 0;
+ sg = sg_next(sg);
+ i++;
+ }
+
+ if (data_length)
+ return -1;
+
+ if (padding) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return -1;
+ skb_fill_page_desc(skb, i, page, 0, padding);
+ skb->data_len += padding;
+ skb->len += padding;
+ skb->truesize += padding;
+ }
+
+ return 0;
+}
+
+static int
+cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct sk_buff *skb;
+ struct iscsi_datain datain;
+ struct cxgbit_iso_info iso_info;
+ u32 data_length = cmd->se_cmd.data_length;
+ u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
+ u32 num_pdu, plen, tx_data = 0;
+ bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
+ SCF_TRANSPORT_TASK_SENSE);
+ bool set_statsn = false;
+ int ret = -1;
+
+ while (data_length) {
+ num_pdu = (data_length + mrdsl - 1) / mrdsl;
+ if (num_pdu > csk->max_iso_npdu)
+ num_pdu = csk->max_iso_npdu;
+
+ plen = num_pdu * mrdsl;
+ if (plen > data_length)
+ plen = data_length;
+
+ skb = __cxgbit_alloc_skb(csk, 0, true);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memset(skb->data, 0, ISCSI_HDR_LEN);
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
+ cxgbit_skcb_submode(skb) |= (csk->submode &
+ CXGBIT_SUBMODE_DCRC);
+ cxgbit_skcb_tx_extralen(skb) = (num_pdu *
+ cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
+ ((num_pdu - 1) * ISCSI_HDR_LEN);
+
+ memset(&datain, 0, sizeof(struct iscsi_datain));
+ memset(&iso_info, 0, sizeof(iso_info));
+
+ if (!tx_data)
+ iso_info.flags |= CXGBIT_ISO_FSLICE;
+
+ if (!(data_length - plen)) {
+ iso_info.flags |= CXGBIT_ISO_LSLICE;
+ if (!task_sense) {
+ datain.flags = ISCSI_FLAG_DATA_STATUS;
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
+ set_statsn = true;
+ }
+ }
+
+ iso_info.burst_len = num_pdu * mrdsl;
+ iso_info.mpdu = mrdsl;
+ iso_info.len = ISCSI_HDR_LEN + plen;
+
+ cxgbit_cpl_tx_data_iso(skb, &iso_info);
+
+ datain.offset = tx_data;
+ datain.data_sn = cmd->data_sn - 1;
+
+ iscsit_build_datain_pdu(cmd, conn, &datain,
+ (struct iscsi_data_rsp *)skb->data,
+ set_statsn);
+
+ ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
+ if (unlikely(ret)) {
+ __kfree_skb(skb);
+ goto out;
+ }
+
+ ret = cxgbit_queue_skb(csk, skb);
+ if (unlikely(ret))
+ goto out;
+
+ tx_data += plen;
+ data_length -= plen;
+
+ cmd->read_data_done += plen;
+ cmd->data_sn += num_pdu;
+ }
+
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
+ const struct iscsi_datain *datain)
+{
+ struct sk_buff *skb;
+ int ret = 0;
+
+ skb = cxgbit_alloc_skb(csk, 0);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
+
+ if (datain->length) {
+ cxgbit_skcb_submode(skb) |= (csk->submode &
+ CXGBIT_SUBMODE_DCRC);
+ cxgbit_skcb_tx_extralen(skb) =
+ cxgbit_digest_len[cxgbit_skcb_submode(skb)];
+ }
+
+ ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
+ if (ret < 0) {
+ __kfree_skb(skb);
+ return ret;
+ }
+
+ return cxgbit_queue_skb(csk, skb);
+}
+
+static int
+cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr,
+ const struct iscsi_datain *datain)
+{
+ struct cxgbit_sock *csk = conn->context;
+ u32 data_length = cmd->se_cmd.data_length;
+ u32 padding = ((-data_length) & 3);
+ u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
+
+ if ((data_length > mrdsl) && (!dr->recovery) &&
+ (!padding) && (!datain->offset) && csk->max_iso_npdu) {
+ atomic_long_add(data_length - datain->length,
+ &conn->sess->tx_data_octets);
+ return cxgbit_tx_datain_iso(csk, cmd, dr);
+ }
+
+ return cxgbit_tx_datain(csk, cmd, datain);
+}
+
+static int
+cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ const void *data_buf, u32 data_buf_len)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct sk_buff *skb;
+ u32 padding = ((-data_buf_len) & 3);
+
+ skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
+
+ if (data_buf_len) {
+ u32 pad_bytes = 0;
+
+ skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
+
+ if (padding)
+ skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
+ &pad_bytes, padding);
+ }
+
+ cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
+ cxgbit_skcb_submode(skb)];
+
+ return cxgbit_queue_skb(csk, skb);
+}
+
+int
+cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
+{
+ if (dr)
+ return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
+ else
+ return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
+}
+
+int cxgbit_validate_params(struct iscsi_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct iscsi_param *param;
+ u32 max_xmitdsl;
+
+ param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
+ conn->param_list);
+ if (!param)
+ return -1;
+
+ if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
+ return -1;
+
+ if (max_xmitdsl > cdev->mdsl) {
+ if (iscsi_change_param_sprintf(
+ conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cxgbit_set_digest(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_param *param;
+
+ param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", HEADERDIGEST);
+ return -1;
+ }
+
+ if (!strcmp(param->value, CRC32C))
+ csk->submode |= CXGBIT_SUBMODE_HCRC;
+
+ param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
+ if (!param) {
+ csk->submode = 0;
+ pr_err("param not found key %s\n", DATADIGEST);
+ return -1;
+ }
+
+ if (!strcmp(param->value, CRC32C))
+ csk->submode |= CXGBIT_SUBMODE_DCRC;
+
+ if (cxgbit_setup_conn_digest(csk)) {
+ csk->submode = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_conn_ops *conn_ops = conn->conn_ops;
+ struct iscsi_param *param;
+ u32 mrdsl, mbl;
+ u32 max_npdu, max_iso_npdu;
+
+ if (conn->login->leading_connection) {
+ param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", DATASEQUENCEINORDER);
+ return -1;
+ }
+
+ if (strcmp(param->value, YES))
+ return 0;
+
+ param = iscsi_find_param_from_key(DATAPDUINORDER,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", DATAPDUINORDER);
+ return -1;
+ }
+
+ if (strcmp(param->value, YES))
+ return 0;
+
+ param = iscsi_find_param_from_key(MAXBURSTLENGTH,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", MAXBURSTLENGTH);
+ return -1;
+ }
+
+ if (kstrtou32(param->value, 0, &mbl) < 0)
+ return -1;
+ } else {
+ if (!conn->sess->sess_ops->DataSequenceInOrder)
+ return 0;
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ mbl = conn->sess->sess_ops->MaxBurstLength;
+ }
+
+ mrdsl = conn_ops->MaxRecvDataSegmentLength;
+ max_npdu = mbl / mrdsl;
+
+ max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
+ (ISCSI_HDR_LEN + mrdsl +
+ cxgbit_digest_len[csk->submode]);
+
+ csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
+
+ if (csk->max_iso_npdu <= 1)
+ csk->max_iso_npdu = 0;
+
+ return 0;
+}
+
+static int cxgbit_set_params(struct iscsi_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
+ struct iscsi_conn_ops *conn_ops = conn->conn_ops;
+ struct iscsi_param *param;
+ u8 erl;
+
+ if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
+ conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
+
+ if (conn->login->leading_connection) {
+ param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
+ return -1;
+ }
+ if (kstrtou8(param->value, 0, &erl) < 0)
+ return -1;
+ } else {
+ erl = conn->sess->sess_ops->ErrorRecoveryLevel;
+ }
+
+ if (!erl) {
+ if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
+ if (cxgbit_set_iso_npdu(csk))
+ return -1;
+ }
+
+ if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
+ if (cxgbit_setup_conn_pgidx(csk,
+ ppm->tformat.pgsz_idx_dflt))
+ return -1;
+ set_bit(CSK_DDP_ENABLE, &csk->com.flags);
+ }
+ }
+
+ if (cxgbit_set_digest(csk))
+ return -1;
+
+ return 0;
+}
+
+int
+cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+ u32 length)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct sk_buff *skb;
+ u32 padding_buf = 0;
+ u8 padding = ((-length) & 3);
+
+ skb = cxgbit_alloc_skb(csk, length + padding);
+ if (!skb)
+ return -ENOMEM;
+ skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
+ skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
+
+ if (padding)
+ skb_store_bits(skb, ISCSI_HDR_LEN + length,
+ &padding_buf, padding);
+
+ if (login->login_complete) {
+ if (cxgbit_set_params(conn)) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+ set_bit(CSK_LOGIN_DONE, &csk->com.flags);
+ }
+
+ if (cxgbit_queue_skb(csk, skb))
+ return -1;
+
+ if ((!login->login_complete) && (!login->login_failed))
+ schedule_delayed_work(&conn->login_work, 0);
+
+ return 0;
+}
+
+static void
+cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
+ unsigned int nents)
+{
+ struct skb_seq_state st;
+ const u8 *buf;
+ unsigned int consumed = 0, buf_len;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
+
+ skb_prepare_seq_read(skb, pdu_cb->doffset,
+ pdu_cb->doffset + pdu_cb->dlen,
+ &st);
+
+ while (true) {
+ buf_len = skb_seq_read(consumed, &buf, &st);
+ if (!buf_len) {
+ skb_abort_seq_read(&st);
+ break;
+ }
+
+ consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
+ buf_len, consumed);
+ }
+}
+
+static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
+ struct cxgbit_cmd *ccmd;
+ struct iscsi_cmd *cmd;
+
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd) {
+ pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n");
+ return NULL;
+ }
+
+ ccmd = iscsit_priv_cmd(cmd);
+ ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
+ ccmd->setup_ddp = true;
+
+ return cmd;
+}
+
+static int
+cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+ u32 length)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ pr_err("ImmediateData CRC32C DataDigest error\n");
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Immediate Data digest failure while"
+ " in ERL=0.\n");
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+ (unsigned char *)hdr);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ }
+
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+ (unsigned char *)hdr);
+ return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+ }
+
+ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+ struct skb_shared_info *ssi = skb_shinfo(csk->skb);
+ skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
+
+ sg_init_table(&ccmd->sg, 1);
+ sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
+ dfrag->page_offset);
+ get_page(dfrag->page.p);
+
+ cmd->se_cmd.t_data_sg = &ccmd->sg;
+ cmd->se_cmd.t_data_nents = 1;
+
+ ccmd->release = true;
+ } else {
+ struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
+ u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
+
+ cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
+ }
+
+ cmd->write_data_done += pdu_cb->dlen;
+
+ if (cmd->write_data_done == cmd->se_cmd.data_length) {
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+ }
+
+ return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+static int
+cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+ bool dump_payload)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ /*
+ * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+ */
+ if (dump_payload)
+ goto after_immediate_data;
+
+ immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
+ cmd->first_burst_len);
+after_immediate_data:
+ if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+ /*
+ * A PDU/CmdSN carrying Immediate Data passed
+ * DataCRC, check against ExpCmdSN/MaxCmdSN if
+ * Immediate Bit is not set.
+ */
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr,
+ hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
+
+ if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
+ return 0;
+ } else if (cmd->unsolicited_data) {
+ iscsit_set_unsoliticed_dataout(cmd);
+ }
+
+ } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+ /*
+ * Immediate Data failed DataCRC and ERL>=1,
+ * silently drop this PDU and let the initiator
+ * plug the CmdSN gap.
+ *
+ * FIXME: Send Unsolicited NOPIN with reserved
+ * TTT here to help the initiator figure out
+ * the missing CmdSN, although they should be
+ * intelligent enough to determine the missing
+ * CmdSN and issue a retry to plug the sequence.
+ */
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static int
+cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
+ int rc;
+ bool dump_payload = false;
+
+ rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
+ if (rc < 0)
+ return rc;
+
+ if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
+ (pdu_cb->nr_dfrags == 1))
+ cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+ rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return 0;
+ else if (rc > 0)
+ dump_payload = true;
+
+ if (!pdu_cb->dlen)
+ return 0;
+
+ return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
+}
+
+static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
+{
+ struct scatterlist *sg_start;
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_cmd *cmd = NULL;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
+ u32 data_offset = be32_to_cpu(hdr->offset);
+ u32 data_len = pdu_cb->dlen;
+ int rc, sg_nents, sg_off;
+ bool dcrc_err = false;
+
+ rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
+ if (rc < 0)
+ return rc;
+ else if (!cmd)
+ return 0;
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+ " DataSN: 0x%08x\n",
+ hdr->itt, hdr->offset, data_len,
+ hdr->datasn);
+
+ dcrc_err = true;
+ goto check_payload;
+ }
+
+ pr_debug("DataOut data_len: %u, "
+ "write_data_done: %u, data_length: %u\n",
+ data_len, cmd->write_data_done,
+ cmd->se_cmd.data_length);
+
+ if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+ sg_off = data_offset / PAGE_SIZE;
+ sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+ sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
+
+ cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
+ }
+
+check_payload:
+
+ rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
+ unsigned char *ping_data = NULL;
+ u32 payload_length = pdu_cb->dlen;
+ int ret;
+
+ ret = iscsit_setup_nop_out(conn, cmd, hdr);
+ if (ret < 0)
+ return 0;
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " NOPOUT Ping DataCRC failure while in"
+ " ERL=0.\n");
+ ret = -1;
+ goto out;
+ } else {
+ /*
+ * drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_info("Dropping NOPOUT"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * Handle NOP-OUT payload for traditional iSCSI sockets
+ */
+ if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+ ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!ping_data) {
+ pr_err("Unable to allocate memory for"
+ " NOPOUT ping data.\n");
+ ret = -1;
+ goto out;
+ }
+
+ skb_copy_bits(csk->skb, pdu_cb->doffset,
+ ping_data, payload_length);
+
+ ping_data[payload_length] = '\0';
+ /*
+ * Attach ping data to struct iscsi_cmd->buf_ptr.
+ */
+ cmd->buf_ptr = ping_data;
+ cmd->buf_ptr_size = payload_length;
+
+ pr_debug("Got %u bytes of NOPOUT ping"
+ " data.\n", payload_length);
+ pr_debug("Ping Data: \"%s\"\n", ping_data);
+ }
+
+ return iscsit_process_nop_out(conn, cmd, hdr);
+out:
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+ return ret;
+}
+
+static int
+cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
+ u32 payload_length = pdu_cb->dlen;
+ int rc;
+ unsigned char *text_in = NULL;
+
+ rc = iscsit_setup_text_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return rc;
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Text Data digest failure while in"
+ " ERL=0.\n");
+ goto reject;
+ } else {
+ /*
+ * drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_info("Dropping Text"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ return 0;
+ }
+ }
+
+ if (payload_length) {
+ text_in = kzalloc(payload_length, GFP_KERNEL);
+ if (!text_in) {
+ pr_err("Unable to allocate text_in of payload_length: %u\n",
+ payload_length);
+ return -ENOMEM;
+ }
+ skb_copy_bits(csk->skb, pdu_cb->doffset,
+ text_in, payload_length);
+
+ text_in[payload_length - 1] = '\0';
+
+ cmd->text_in_ptr = text_in;
+ }
+
+ return iscsit_process_text_cmd(conn, cmd, hdr);
+
+reject:
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ pdu_cb->hdr);
+}
+
+static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_cmd *cmd = NULL;
+ u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
+ int ret = -EINVAL;
+
+ switch (opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = cxgbit_handle_scsi_cmd(csk, cmd);
+ break;
+ case ISCSI_OP_SCSI_DATA_OUT:
+ ret = cxgbit_handle_iscsi_dataout(csk);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+ }
+
+ ret = cxgbit_handle_nop_out(csk, cmd);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = iscsit_handle_task_mgt_cmd(conn, cmd,
+ (unsigned char *)hdr);
+ break;
+ case ISCSI_OP_TEXT:
+ if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+ cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+ if (!cmd)
+ goto reject;
+ } else {
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+ }
+
+ ret = cxgbit_handle_text_cmd(csk, cmd);
+ break;
+ case ISCSI_OP_LOGOUT:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
+ if (ret > 0)
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP
+ * HZ);
+ break;
+ case ISCSI_OP_SNACK:
+ ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
+ break;
+ default:
+ pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+ dump_stack();
+ break;
+ }
+
+ return ret;
+
+reject:
+ return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ (unsigned char *)hdr);
+ return ret;
+}
+
+static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_hdr *hdr = pdu_cb->hdr;
+ u8 opcode;
+
+ if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
+ atomic_long_inc(&conn->sess->conn_digest_errors);
+ goto transport_err;
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+ goto transport_err;
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (conn->sess->sess_ops->SessionType &&
+ ((!(opcode & ISCSI_OP_TEXT)) ||
+ (!(opcode & ISCSI_OP_LOGOUT)))) {
+ pr_err("Received illegal iSCSI Opcode: 0x%02x"
+ " while in Discovery Session, rejecting.\n", opcode);
+ iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+ goto transport_err;
+ }
+
+ if (cxgbit_target_rx_opcode(csk) < 0)
+ goto transport_err;
+
+ return 0;
+
+transport_err:
+ return -1;
+}
+
+static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_login *login = conn->login;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_login_req *login_req;
+
+ login_req = (struct iscsi_login_req *)login->req;
+ memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
+ /*
+ * Setup the initial iscsi_login values from the leading
+ * login request PDU.
+ */
+ if (login->first_request) {
+ login_req = (struct iscsi_login_req *)login->req;
+ login->leading_connection = (!login_req->tsih) ? 1 : 0;
+ login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
+ login_req->flags);
+ login->version_min = login_req->min_version;
+ login->version_max = login_req->max_version;
+ memcpy(login->isid, login_req->isid, 6);
+ login->cmd_sn = be32_to_cpu(login_req->cmdsn);
+ login->init_task_tag = login_req->itt;
+ login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+ login->cid = be16_to_cpu(login_req->cid);
+ login->tsih = be16_to_cpu(login_req->tsih);
+ }
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+ skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
+
+ return 0;
+}
+
+static int
+cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
+ int ret;
+
+ cxgbit_rx_pdu_cb(skb) = pdu_cb;
+
+ csk->skb = skb;
+
+ if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
+ ret = cxgbit_rx_login_pdu(csk);
+ set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
+ } else {
+ ret = cxgbit_rx_opcode(csk);
+ }
+
+ return ret;
+}
+
+static void cxgbit_lro_skb_dump(struct sk_buff *skb)
+{
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ u8 i;
+
+ pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
+ skb, skb->head, skb->data, skb->len, skb->data_len,
+ ssi->nr_frags);
+ pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
+ skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
+
+ for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
+ pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
+ "frags %u.\n",
+ skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
+ pdu_cb->ddigest, pdu_cb->frags);
+ for (i = 0; i < ssi->nr_frags; i++)
+ pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
+ skb, i, ssi->frags[i].page_offset, ssi->frags[i].size);
+}
+
+static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = csk->lro_hskb;
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ u8 i;
+
+ memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
+ for (i = 0; i < ssi->nr_frags; i++)
+ put_page(skb_frag_page(&ssi->frags[i]));
+ ssi->nr_frags = 0;
+}
+
+static void
+cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
+{
+ struct sk_buff *hskb = csk->lro_hskb;
+ struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
+ struct skb_shared_info *hssi = skb_shinfo(hskb);
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ unsigned int len = 0;
+
+ if (pdu_cb->flags & PDUCBF_RX_HDR) {
+ hpdu_cb->flags = pdu_cb->flags;
+ hpdu_cb->seq = pdu_cb->seq;
+ hpdu_cb->hdr = pdu_cb->hdr;
+ hpdu_cb->hlen = pdu_cb->hlen;
+
+ memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx],
+ sizeof(skb_frag_t));
+
+ get_page(skb_frag_page(&hssi->frags[0]));
+ hssi->nr_frags = 1;
+ hpdu_cb->frags = 1;
+ hpdu_cb->hfrag_idx = 0;
+
+ len = hssi->frags[0].size;
+ hskb->len = len;
+ hskb->data_len = len;
+ hskb->truesize = len;
+ }
+
+ if (pdu_cb->flags & PDUCBF_RX_DATA) {
+ u8 hfrag_idx = 1, i;
+
+ hpdu_cb->flags |= pdu_cb->flags;
+
+ len = 0;
+ for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) {
+ memcpy(&hssi->frags[hfrag_idx],
+ &ssi->frags[pdu_cb->dfrag_idx + i],
+ sizeof(skb_frag_t));
+
+ get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
+
+ len += hssi->frags[hfrag_idx].size;
+
+ hssi->nr_frags++;
+ hpdu_cb->frags++;
+ }
+
+ hpdu_cb->dlen = pdu_cb->dlen;
+ hpdu_cb->doffset = hpdu_cb->hlen;
+ hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
+ hpdu_cb->dfrag_idx = 1;
+ hskb->len += len;
+ hskb->data_len += len;
+ hskb->truesize += len;
+ }
+
+ if (pdu_cb->flags & PDUCBF_RX_STATUS) {
+ hpdu_cb->flags |= pdu_cb->flags;
+
+ if (hpdu_cb->flags & PDUCBF_RX_DATA)
+ hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
+
+ hpdu_cb->ddigest = pdu_cb->ddigest;
+ hpdu_cb->pdulen = pdu_cb->pdulen;
+ }
+}
+
+static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ u8 pdu_idx = 0, last_idx = 0;
+ int ret = 0;
+
+ if (!pdu_cb->complete) {
+ cxgbit_lro_skb_merge(csk, skb, 0);
+
+ if (pdu_cb->flags & PDUCBF_RX_STATUS) {
+ struct sk_buff *hskb = csk->lro_hskb;
+
+ ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
+
+ cxgbit_lro_hskb_reset(csk);
+
+ if (ret < 0)
+ goto out;
+ }
+
+ pdu_idx = 1;
+ }
+
+ if (lro_cb->pdu_idx)
+ last_idx = lro_cb->pdu_idx - 1;
+
+ for (; pdu_idx <= last_idx; pdu_idx++) {
+ ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
+ if (ret < 0)
+ goto out;
+ }
+
+ if ((!lro_cb->complete) && lro_cb->pdu_idx)
+ cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
+
+out:
+ return ret;
+}
+
+static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ int ret = -1;
+
+ if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
+ (pdu_cb->seq != csk->rcv_nxt)) {
+ pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
+ csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
+ cxgbit_lro_skb_dump(skb);
+ return ret;
+ }
+
+ csk->rcv_nxt += lro_cb->pdu_totallen;
+
+ ret = cxgbit_process_lro_skb(csk, skb);
+
+ csk->rx_credits += lro_cb->pdu_totallen;
+
+ if (csk->rx_credits >= (csk->rcv_win / 4))
+ cxgbit_rx_data_ack(csk);
+
+ return ret;
+}
+
+static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ int ret = -1;
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO))
+ ret = cxgbit_rx_lro_skb(csk, skb);
+
+ __kfree_skb(skb);
+ return ret;
+}
+
+static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
+{
+ spin_lock_bh(&csk->rxq.lock);
+ if (skb_queue_len(&csk->rxq)) {
+ skb_queue_splice_init(&csk->rxq, rxq);
+ spin_unlock_bh(&csk->rxq.lock);
+ return true;
+ }
+ spin_unlock_bh(&csk->rxq.lock);
+ return false;
+}
+
+static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct sk_buff_head rxq;
+
+ skb_queue_head_init(&rxq);
+
+ wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
+
+ if (signal_pending(current))
+ goto out;
+
+ while ((skb = __skb_dequeue(&rxq))) {
+ if (cxgbit_rx_skb(csk, skb))
+ goto out;
+ }
+
+ return 0;
+out:
+ __skb_queue_purge(&rxq);
+ return -1;
+}
+
+int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ struct cxgbit_sock *csk = conn->context;
+ int ret = -1;
+
+ while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
+ ret = cxgbit_wait_rxq(csk);
+ if (ret) {
+ clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void cxgbit_get_rx_pdu(struct iscsi_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+
+ while (!kthread_should_stop()) {
+ iscsit_thread_check_cpumask(conn, current, 0);
+ if (cxgbit_wait_rxq(csk))
+ return;
+ }
+}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 961202f4e..50f3d3a0d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -478,16 +478,16 @@ int iscsit_del_np(struct iscsi_np *np)
return 0;
}
-static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
-static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+static void iscsit_get_rx_pdu(struct iscsi_conn *);
-static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
}
+EXPORT_SYMBOL(iscsit_queue_rsp);
-static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
@@ -498,6 +498,169 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
__iscsit_free_cmd(cmd, scsi_cmd, true);
}
+EXPORT_SYMBOL(iscsit_aborted_task);
+
+static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
+ u32, u32, u8 *, u8 *);
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
+
+static int
+iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ const void *data_buf, u32 data_buf_len)
+{
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
+ struct kvec *iov;
+ u32 niov = 0, tx_size = ISCSI_HDR_LEN;
+ int ret;
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL,
+ (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest"
+ " to opcode 0x%x 0x%08x\n",
+ hdr->opcode, *header_digest);
+ }
+
+ if (data_buf_len) {
+ u32 padding = ((-data_buf_len) & 3);
+
+ iov[niov].iov_base = (void *)data_buf;
+ iov[niov++].iov_len = data_buf_len;
+ tx_size += data_buf_len;
+
+ if (padding != 0) {
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ tx_size += padding;
+ pr_debug("Attaching %u additional"
+ " padding bytes.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
+ data_buf, data_buf_len,
+ padding,
+ (u8 *)&cmd->pad_bytes,
+ (u8 *)&cmd->data_crc);
+
+ iov[niov].iov_base = &cmd->data_crc;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attached DataDigest for %u"
+ " bytes opcode 0x%x, CRC 0x%08x\n",
+ data_buf_len, hdr->opcode, cmd->data_crc);
+ }
+ }
+
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ ret = iscsit_send_tx_data(cmd, conn, 1);
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32);
+static void iscsit_unmap_iovec(struct iscsi_cmd *);
+static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
+ u32, u32, u32, u8 *);
+static int
+iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ const struct iscsi_datain *datain)
+{
+ struct kvec *iov;
+ u32 iov_count = 0, tx_size = 0;
+ int ret, iov_ret;
+
+ iov = &cmd->iov_data[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
+ ISCSI_HDR_LEN, 0, NULL,
+ (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
+ *header_digest);
+ }
+
+ iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
+ datain->offset, datain->length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+ tx_size += datain->length;
+
+ cmd->padding = ((-datain->length) & 3);
+ if (cmd->padding) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = cmd->padding;
+ tx_size += cmd->padding;
+
+ pr_debug("Attaching %u padding bytes\n", cmd->padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
+ cmd, datain->offset,
+ datain->length,
+ cmd->padding,
+ cmd->pad_bytes);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
+ datain->length + cmd->padding, cmd->data_crc);
+ }
+
+ cmd->iov_data_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ ret = iscsit_fe_sendpage_sg(cmd, conn);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr, const void *buf,
+ u32 buf_len)
+{
+ if (dr)
+ return iscsit_xmit_datain_pdu(conn, cmd, buf);
+ else
+ return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
+}
static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
{
@@ -507,6 +670,7 @@ static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
static struct iscsit_transport iscsi_target_transport = {
.name = "iSCSI/TCP",
.transport_type = ISCSI_TCP,
+ .rdma_shutdown = false,
.owner = NULL,
.iscsit_setup_np = iscsit_setup_np,
.iscsit_accept_np = iscsit_accept_np,
@@ -519,6 +683,8 @@ static struct iscsit_transport iscsi_target_transport = {
.iscsit_queue_data_in = iscsit_queue_rsp,
.iscsit_queue_status = iscsit_queue_rsp,
.iscsit_aborted_task = iscsit_aborted_task,
+ .iscsit_xmit_pdu = iscsit_xmit_pdu,
+ .iscsit_get_rx_pdu = iscsit_get_rx_pdu,
.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
};
@@ -634,7 +800,7 @@ static void __exit iscsi_target_cleanup_module(void)
kfree(iscsit_global);
}
-static int iscsit_add_reject(
+int iscsit_add_reject(
struct iscsi_conn *conn,
u8 reason,
unsigned char *buf)
@@ -664,6 +830,7 @@ static int iscsit_add_reject(
return -1;
}
+EXPORT_SYMBOL(iscsit_add_reject);
static int iscsit_add_reject_from_cmd(
struct iscsi_cmd *cmd,
@@ -719,6 +886,7 @@ int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
{
return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
}
+EXPORT_SYMBOL(iscsit_reject_cmd);
/*
* Map some portion of the allocated scatterlist to an iovec, suitable for
@@ -737,7 +905,14 @@ static int iscsit_map_iovec(
/*
* We know each entry in t_data_sg contains a page.
*/
- sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+ u32 ent = data_offset / PAGE_SIZE;
+
+ if (ent >= cmd->se_cmd.t_data_nents) {
+ pr_err("Initial page entry out-of-bounds\n");
+ return -1;
+ }
+
+ sg = &cmd->se_cmd.t_data_sg[ent];
page_off = (data_offset % PAGE_SIZE);
cmd->first_data_sg = sg;
@@ -2335,7 +2510,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
EXPORT_SYMBOL(iscsit_handle_logout_cmd);
-static int iscsit_handle_snack(
+int iscsit_handle_snack(
struct iscsi_conn *conn,
unsigned char *buf)
{
@@ -2388,6 +2563,7 @@ static int iscsit_handle_snack(
return 0;
}
+EXPORT_SYMBOL(iscsit_handle_snack);
static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
{
@@ -2534,7 +2710,6 @@ static int iscsit_send_conn_drop_async_message(
{
struct iscsi_async *hdr;
- cmd->tx_size = ISCSI_HDR_LEN;
cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
hdr = (struct iscsi_async *) cmd->pdu;
@@ -2552,25 +2727,11 @@ static int iscsit_send_conn_drop_async_message(
hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " Async Message 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = cmd->tx_size;
- cmd->iov_misc_count = 1;
-
pr_debug("Sending Connection Dropped Async Message StatSN:"
" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
cmd->logout_cid, conn->cid);
- return 0;
+
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
@@ -2583,7 +2744,7 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
}
}
-static void
+void
iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
bool set_statsn)
@@ -2627,15 +2788,14 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
ntohl(hdr->offset), datain->length, conn->cid);
}
+EXPORT_SYMBOL(iscsit_build_datain_pdu);
static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
struct iscsi_datain datain;
struct iscsi_datain_req *dr;
- struct kvec *iov;
- u32 iov_count = 0, tx_size = 0;
- int eodr = 0, ret, iov_ret;
+ int eodr = 0, ret;
bool set_statsn = false;
memset(&datain, 0, sizeof(struct iscsi_datain));
@@ -2677,64 +2837,9 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
- iov = &cmd->iov_data[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching CRC32 HeaderDigest"
- " for DataIN PDU 0x%08x\n", *header_digest);
- }
-
- iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
- datain.offset, datain.length);
- if (iov_ret < 0)
- return -1;
-
- iov_count += iov_ret;
- tx_size += datain.length;
-
- cmd->padding = ((-datain.length) & 3);
- if (cmd->padding) {
- iov[iov_count].iov_base = cmd->pad_bytes;
- iov[iov_count++].iov_len = cmd->padding;
- tx_size += cmd->padding;
-
- pr_debug("Attaching %u padding bytes\n",
- cmd->padding);
- }
- if (conn->conn_ops->DataDigest) {
- cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd,
- datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attached CRC32C DataDigest %d bytes, crc"
- " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
- }
-
- cmd->iov_data_count = iov_count;
- cmd->tx_size = tx_size;
-
- ret = iscsit_fe_sendpage_sg(cmd, conn);
-
- iscsit_unmap_iovec(cmd);
-
- if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
+ if (ret < 0)
return ret;
- }
if (dr->dr_complete) {
eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
@@ -2843,34 +2948,14 @@ EXPORT_SYMBOL(iscsit_build_logout_rsp);
static int
iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
- struct kvec *iov;
- int niov = 0, tx_size, rc;
+ int rc;
rc = iscsit_build_logout_rsp(cmd, conn,
(struct iscsi_logout_rsp *)&cmd->pdu[0]);
if (rc < 0)
return rc;
- tx_size = ISCSI_HDR_LEN;
- iov = &cmd->iov_misc[0];
- iov[niov].iov_base = cmd->pdu;
- iov[niov++].iov_len = ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0],
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " Logout Response 0x%08x\n", *header_digest);
- }
- cmd->iov_misc_count = niov;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
void
@@ -2910,34 +2995,16 @@ static int iscsit_send_unsolicited_nopin(
int want_response)
{
struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
- int tx_size = ISCSI_HDR_LEN, ret;
+ int ret;
iscsit_build_nopin_rsp(cmd, conn, hdr, false);
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " NopIN 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = tx_size;
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
- ret = iscsit_send_tx_data(cmd, conn, 1);
- if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
+ if (ret < 0)
return ret;
- }
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = want_response ?
@@ -2951,75 +3018,24 @@ static int
iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
- struct kvec *iov;
- u32 padding = 0;
- int niov = 0, tx_size;
iscsit_build_nopin_rsp(cmd, conn, hdr, true);
- tx_size = ISCSI_HDR_LEN;
- iov = &cmd->iov_misc[0];
- iov[niov].iov_base = cmd->pdu;
- iov[niov++].iov_len = ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest"
- " to NopIn 0x%08x\n", *header_digest);
- }
-
/*
* NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
* NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
*/
- if (cmd->buf_ptr_size) {
- iov[niov].iov_base = cmd->buf_ptr;
- iov[niov++].iov_len = cmd->buf_ptr_size;
- tx_size += cmd->buf_ptr_size;
-
- pr_debug("Echoing back %u bytes of ping"
- " data.\n", cmd->buf_ptr_size);
-
- padding = ((-cmd->buf_ptr_size) & 3);
- if (padding != 0) {
- iov[niov].iov_base = &cmd->pad_bytes;
- iov[niov++].iov_len = padding;
- tx_size += padding;
- pr_debug("Attaching %u additional"
- " padding bytes.\n", padding);
- }
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- cmd->buf_ptr, cmd->buf_ptr_size,
- padding, (u8 *)&cmd->pad_bytes,
- (u8 *)&cmd->data_crc);
-
- iov[niov].iov_base = &cmd->data_crc;
- iov[niov++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attached DataDigest for %u"
- " bytes of ping data, CRC 0x%08x\n",
- cmd->buf_ptr_size, cmd->data_crc);
- }
- }
+ pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
- cmd->iov_misc_count = niov;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ cmd->buf_ptr_size);
}
static int iscsit_send_r2t(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
- int tx_size = 0;
struct iscsi_r2t *r2t;
struct iscsi_r2t_rsp *hdr;
int ret;
@@ -3035,7 +3051,10 @@ static int iscsit_send_r2t(
int_to_scsilun(cmd->se_cmd.orig_fe_lun,
(struct scsi_lun *)&hdr->lun);
hdr->itt = cmd->init_task_tag;
- r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
+ if (conn->conn_transport->iscsit_get_r2t_ttt)
+ conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
+ else
+ r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
hdr->statsn = cpu_to_be32(conn->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3044,38 +3063,18 @@ static int iscsit_send_r2t(
hdr->data_offset = cpu_to_be32(r2t->offset);
hdr->data_length = cpu_to_be32(r2t->xfer_len);
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for R2T"
- " PDU 0x%08x\n", *header_digest);
- }
-
pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
r2t->offset, r2t->xfer_len, conn->cid);
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
spin_lock_bh(&cmd->r2t_lock);
r2t->sent_r2t = 1;
spin_unlock_bh(&cmd->r2t_lock);
- ret = iscsit_send_tx_data(cmd, conn, 1);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
return ret;
}
@@ -3166,6 +3165,7 @@ int iscsit_build_r2ts_for_cmd(
return 0;
}
+EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
@@ -3204,18 +3204,12 @@ EXPORT_SYMBOL(iscsit_build_rsp_pdu);
static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
- struct kvec *iov;
- u32 padding = 0, tx_size = 0;
- int iov_count = 0;
bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
+ void *data_buf = NULL;
+ u32 padding = 0, data_buf_len = 0;
iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
/*
* Attach SENSE DATA payload to iSCSI Response PDU
*/
@@ -3227,56 +3221,23 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
padding = -(cmd->se_cmd.scsi_sense_length) & 3;
hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
- iov[iov_count].iov_base = cmd->sense_buffer;
- iov[iov_count++].iov_len =
- (cmd->se_cmd.scsi_sense_length + padding);
- tx_size += cmd->se_cmd.scsi_sense_length;
+ data_buf = cmd->sense_buffer;
+ data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
if (padding) {
memset(cmd->sense_buffer +
cmd->se_cmd.scsi_sense_length, 0, padding);
- tx_size += padding;
pr_debug("Adding %u bytes of padding to"
" SENSE.\n", padding);
}
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- cmd->sense_buffer,
- (cmd->se_cmd.scsi_sense_length + padding),
- 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching CRC32 DataDigest for"
- " SENSE, %u bytes CRC 0x%08x\n",
- (cmd->se_cmd.scsi_sense_length + padding),
- cmd->data_crc);
- }
-
pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
" Response PDU\n",
cmd->se_cmd.scsi_sense_length);
}
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for Response"
- " PDU 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
+ data_buf_len);
}
static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
@@ -3323,30 +3284,10 @@ static int
iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
- u32 tx_size = 0;
iscsit_build_task_mgt_rsp(cmd, conn, hdr);
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for Task"
- " Mgmt Response PDU 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
static bool iscsit_check_inaddr_any(struct iscsi_np *np)
@@ -3583,53 +3524,16 @@ static int iscsit_send_text_rsp(
struct iscsi_conn *conn)
{
struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
- struct kvec *iov;
- u32 tx_size = 0;
- int text_length, iov_count = 0, rc;
-
- rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
- if (rc < 0)
- return rc;
-
- text_length = rc;
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- iov[iov_count].iov_base = cmd->buf_ptr;
- iov[iov_count++].iov_len = text_length;
-
- tx_size += (ISCSI_HDR_LEN + text_length);
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for"
- " Text Response PDU 0x%08x\n", *header_digest);
- }
-
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- cmd->buf_ptr, text_length,
- 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching DataDigest for %u bytes of text"
- " data, CRC 0x%08x\n", text_length,
- cmd->data_crc);
- }
+ int text_length;
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
+ text_length = iscsit_build_text_rsp(cmd, conn, hdr,
+ conn->conn_transport->transport_type);
+ if (text_length < 0)
+ return text_length;
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ text_length);
}
void
@@ -3654,49 +3558,15 @@ static int iscsit_send_reject(
struct iscsi_conn *conn)
{
struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
- struct kvec *iov;
- u32 iov_count = 0, tx_size;
iscsit_build_reject(cmd, conn, hdr);
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- iov[iov_count].iov_base = cmd->buf_ptr;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
-
- tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for"
- " REJECT PDU 0x%08x\n", *header_digest);
- }
-
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 DataDigest for REJECT"
- " PDU 0x%08x\n", cmd->data_crc);
- }
-
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
-
pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ ISCSI_HDR_LEN);
}
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
@@ -3724,33 +3594,7 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
cpumask_setall(conn->conn_cpumask);
}
-static inline void iscsit_thread_check_cpumask(
- struct iscsi_conn *conn,
- struct task_struct *p,
- int mode)
-{
- /*
- * mode == 1 signals iscsi_target_tx_thread() usage.
- * mode == 0 signals iscsi_target_rx_thread() usage.
- */
- if (mode == 1) {
- if (!conn->conn_tx_reset_cpumask)
- return;
- conn->conn_tx_reset_cpumask = 0;
- } else {
- if (!conn->conn_rx_reset_cpumask)
- return;
- conn->conn_rx_reset_cpumask = 0;
- }
- /*
- * Update the CPU mask for this single kthread so that
- * both TX and RX kthreads are scheduled to run on the
- * same CPU.
- */
- set_cpus_allowed_ptr(p, conn->conn_cpumask);
-}
-
-static int
+int
iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
int ret;
@@ -3792,6 +3636,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
err:
return -1;
}
+EXPORT_SYMBOL(iscsit_immediate_queue);
static int
iscsit_handle_immediate_queue(struct iscsi_conn *conn)
@@ -3816,7 +3661,7 @@ iscsit_handle_immediate_queue(struct iscsi_conn *conn)
return 0;
}
-static int
+int
iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
int ret;
@@ -3889,13 +3734,6 @@ check_rsp_state:
if (ret < 0)
goto err;
- if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
- iscsit_unmap_iovec(cmd);
- goto err;
- }
- iscsit_unmap_iovec(cmd);
-
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
@@ -3928,6 +3766,7 @@ check_rsp_state:
err:
return -1;
}
+EXPORT_SYMBOL(iscsit_response_queue);
static int iscsit_handle_response_queue(struct iscsi_conn *conn)
{
@@ -4087,36 +3926,12 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
return ret;
}
-int iscsi_target_rx_thread(void *arg)
+static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
{
- int ret, rc;
+ int ret;
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
- struct iscsi_conn *conn = arg;
struct kvec iov;
- /*
- * Allow ourselves to be interrupted by SIGINT so that a
- * connection recovery / failure event can be triggered externally.
- */
- allow_signal(SIGINT);
- /*
- * Wait for iscsi_post_login_handler() to complete before allowing
- * incoming iscsi/tcp socket I/O, and/or failing the connection.
- */
- rc = wait_for_completion_interruptible(&conn->rx_login_comp);
- if (rc < 0 || iscsi_target_check_conn_state(conn))
- return 0;
-
- if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
- struct completion comp;
-
- init_completion(&comp);
- rc = wait_for_completion_interruptible(&comp);
- if (rc < 0)
- goto transport_err;
-
- goto transport_err;
- }
while (!kthread_should_stop()) {
/*
@@ -4134,7 +3949,7 @@ int iscsi_target_rx_thread(void *arg)
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- goto transport_err;
+ return;
}
if (conn->conn_ops->HeaderDigest) {
@@ -4144,7 +3959,7 @@ int iscsi_target_rx_thread(void *arg)
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- goto transport_err;
+ return;
}
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
@@ -4168,7 +3983,7 @@ int iscsi_target_rx_thread(void *arg)
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
- goto transport_err;
+ return;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
@@ -4179,15 +3994,38 @@ int iscsi_target_rx_thread(void *arg)
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
- goto transport_err;
+ return;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
- goto transport_err;
+ return;
}
+}
+
+int iscsi_target_rx_thread(void *arg)
+{
+ int rc;
+ struct iscsi_conn *conn = arg;
+
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+ /*
+ * Wait for iscsi_post_login_handler() to complete before allowing
+ * incoming iscsi/tcp socket I/O, and/or failing the connection.
+ */
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+ if (rc < 0 || iscsi_target_check_conn_state(conn))
+ return 0;
+
+ if (!conn->conn_transport->iscsit_get_rx_pdu)
+ return 0;
+
+ conn->conn_transport->iscsit_get_rx_pdu(conn);
-transport_err:
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
iscsit_take_action_for_connection_exit(conn);
@@ -4240,16 +4078,17 @@ int iscsit_close_connection(
pr_debug("Closing iSCSI connection CID %hu on SID:"
" %u\n", conn->cid, sess->sid);
/*
- * Always up conn_logout_comp for the traditional TCP case just in case
- * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
- * response never got sent because the connection failed.
+ * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
+ * case just in case the RX Thread in iscsi_target_rx_opcode() is
+ * sleeping and the logout response never got sent because the
+ * connection failed.
*
* However for iser-target, isert_wait4logout() is using conn_logout_comp
* to signal logout response TX interrupt completion. Go ahead and skip
* this for iser since isert_rx_opcode() does not wait on logout failure,
* and to avoid iscsi_conn pointer dereference in iser-target code.
*/
- if (conn->conn_transport->transport_type == ISCSI_TCP)
+ if (!conn->conn_transport->rdma_shutdown)
complete(&conn->conn_logout_comp);
if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
@@ -4438,7 +4277,7 @@ int iscsit_close_connection(
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
return 0;
} else if (atomic_read(&sess->session_logout)) {
@@ -4467,6 +4306,10 @@ int iscsit_close_connection(
}
}
+/*
+ * If the iSCSI Session for the iSCSI Initiator Node exists,
+ * forcefully shutdown the iSCSI NEXUS.
+ */
int iscsit_close_session(struct iscsi_session *sess)
{
struct iscsi_portal_group *tpg = sess->tpg;
@@ -4556,7 +4399,7 @@ static void iscsit_logout_post_handler_closesession(
* always sleep waiting for RX/TX thread shutdown to complete
* within iscsit_close_connection().
*/
- if (conn->conn_transport->transport_type == ISCSI_TCP)
+ if (!conn->conn_transport->rdma_shutdown)
sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
@@ -4565,7 +4408,7 @@ static void iscsit_logout_post_handler_closesession(
iscsit_dec_conn_usage_count(conn);
iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
}
static void iscsit_logout_post_handler_samecid(
@@ -4573,7 +4416,7 @@ static void iscsit_logout_post_handler_samecid(
{
int sleep = 1;
- if (conn->conn_transport->transport_type == ISCSI_TCP)
+ if (!conn->conn_transport->rdma_shutdown)
sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
@@ -4736,7 +4579,7 @@ int iscsit_free_session(struct iscsi_session *sess)
} else
spin_unlock_bh(&sess->conn_lock);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 667406fcf..e116f0e84 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -293,7 +293,7 @@ static int chap_server_compute_md5(
pr_debug("[server] MD5 Digests do not match!\n\n");
goto out;
} else
- pr_debug("[server] MD5 Digests match, CHAP connetication"
+ pr_debug("[server] MD5 Digests match, CHAP connection"
" successful.\n\n");
/*
* One way authentication has succeeded, return now if mutual
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 97e5b69e0..923c032f0 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -43,14 +43,15 @@ static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item)
return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np);
}
-static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page)
+static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
+ enum iscsit_transport_type type)
{
struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
- struct iscsi_tpg_np *tpg_np_sctp;
+ struct iscsi_tpg_np *tpg_np_new;
ssize_t rb;
- tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
- if (tpg_np_sctp)
+ tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
+ if (tpg_np_new)
rb = sprintf(page, "1\n");
else
rb = sprintf(page, "0\n");
@@ -58,19 +59,20 @@ static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page)
return rb;
}
-static ssize_t lio_target_np_sctp_store(struct config_item *item,
- const char *page, size_t count)
+static ssize_t lio_target_np_driver_store(struct config_item *item,
+ const char *page, size_t count, enum iscsit_transport_type type,
+ const char *mod_name)
{
struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
struct iscsi_np *np;
struct iscsi_portal_group *tpg;
- struct iscsi_tpg_np *tpg_np_sctp = NULL;
+ struct iscsi_tpg_np *tpg_np_new = NULL;
u32 op;
- int ret;
+ int rc;
- ret = kstrtou32(page, 0, &op);
- if (ret)
- return ret;
+ rc = kstrtou32(page, 0, &op);
+ if (rc)
+ return rc;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
@@ -87,107 +89,64 @@ static ssize_t lio_target_np_sctp_store(struct config_item *item,
return -EINVAL;
if (op) {
- /*
- * Use existing np->np_sockaddr for SCTP network portal reference
- */
- tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
- tpg_np, ISCSI_SCTP_TCP);
- if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
- goto out;
- } else {
- tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
- if (!tpg_np_sctp)
- goto out;
+ if (strlen(mod_name)) {
+ rc = request_module(mod_name);
+ if (rc != 0) {
+ pr_warn("Unable to request_module for %s\n",
+ mod_name);
+ rc = 0;
+ }
+ }
- ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
- if (ret < 0)
+ tpg_np_new = iscsit_tpg_add_network_portal(tpg,
+ &np->np_sockaddr, tpg_np, type);
+ if (IS_ERR(tpg_np_new))
goto out;
+ } else {
+ tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
+ if (tpg_np_new) {
+ rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new);
+ if (rc < 0)
+ goto out;
+ }
}
iscsit_put_tpg(tpg);
return count;
out:
iscsit_put_tpg(tpg);
- return -EINVAL;
+ return rc;
}
static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
{
- struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
- struct iscsi_tpg_np *tpg_np_iser;
- ssize_t rb;
-
- tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
- if (tpg_np_iser)
- rb = sprintf(page, "1\n");
- else
- rb = sprintf(page, "0\n");
-
- return rb;
+ return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND);
}
static ssize_t lio_target_np_iser_store(struct config_item *item,
- const char *page, size_t count)
+ const char *page, size_t count)
{
- struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
- struct iscsi_np *np;
- struct iscsi_portal_group *tpg;
- struct iscsi_tpg_np *tpg_np_iser = NULL;
- char *endptr;
- u32 op;
- int rc = 0;
-
- op = simple_strtoul(page, &endptr, 0);
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %u\n", op);
- return -EINVAL;
- }
- np = tpg_np->tpg_np;
- if (!np) {
- pr_err("Unable to locate struct iscsi_np from"
- " struct iscsi_tpg_np\n");
- return -EINVAL;
- }
-
- tpg = tpg_np->tpg;
- if (iscsit_get_tpg(tpg) < 0)
- return -EINVAL;
-
- if (op) {
- rc = request_module("ib_isert");
- if (rc != 0) {
- pr_warn("Unable to request_module for ib_isert\n");
- rc = 0;
- }
-
- tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
- tpg_np, ISCSI_INFINIBAND);
- if (IS_ERR(tpg_np_iser)) {
- rc = PTR_ERR(tpg_np_iser);
- goto out;
- }
- } else {
- tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
- if (tpg_np_iser) {
- rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
- if (rc < 0)
- goto out;
- }
- }
+ return lio_target_np_driver_store(item, page, count,
+ ISCSI_INFINIBAND, "ib_isert");
+}
+CONFIGFS_ATTR(lio_target_np_, iser);
- iscsit_put_tpg(tpg);
- return count;
-out:
- iscsit_put_tpg(tpg);
- return rc;
+static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page)
+{
+ return lio_target_np_driver_show(item, page, ISCSI_CXGBIT);
}
-CONFIGFS_ATTR(lio_target_np_, sctp);
-CONFIGFS_ATTR(lio_target_np_, iser);
+static ssize_t lio_target_np_cxgbit_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return lio_target_np_driver_store(item, page, count,
+ ISCSI_CXGBIT, "cxgbit");
+}
+CONFIGFS_ATTR(lio_target_np_, cxgbit);
static struct configfs_attribute *lio_target_portal_attrs[] = {
- &lio_target_np_attr_sctp,
&lio_target_np_attr_iser,
+ &lio_target_np_attr_cxgbit,
NULL,
};
@@ -1554,7 +1513,7 @@ static int lio_tpg_check_prot_fabric_only(
* This function calls iscsit_inc_session_usage_count() on the
* struct iscsi_session in question.
*/
-static int lio_tpg_shutdown_session(struct se_session *se_sess)
+static void lio_tpg_close_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
@@ -1566,7 +1525,7 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess)
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
- return 0;
+ return;
}
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
@@ -1575,20 +1534,6 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess)
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
- return 1;
-}
-
-/*
- * Calls iscsit_dec_session_usage_count() as inverse of
- * lio_tpg_shutdown_session()
- */
-static void lio_tpg_close_session(struct se_session *se_sess)
-{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
- /*
- * If the iSCSI Session for the iSCSI Initiator Node exists,
- * forcefully shutdown the iSCSI NEXUS.
- */
iscsit_close_session(sess);
}
@@ -1640,7 +1585,6 @@ const struct target_core_fabric_ops iscsi_ops = {
.tpg_get_inst_index = lio_tpg_get_inst_index,
.check_stop_free = lio_check_stop_free,
.release_cmd = lio_release_cmd,
- .shutdown_session = lio_tpg_shutdown_session,
.close_session = lio_tpg_close_session,
.sess_get_index = lio_sess_get_index,
.sess_get_initiator_sid = lio_sess_get_initiator_sid,
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index fb3b52b12..647d4a5dc 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -524,3 +524,4 @@ struct iscsi_datain_req *iscsit_get_datain_values(
return NULL;
}
+EXPORT_SYMBOL(iscsit_get_datain_values);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 210f6e483..b54e72c7a 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -786,7 +786,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
}
spin_unlock_bh(&se_tpg->session_lock);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
}
void iscsit_start_time2retain_handler(struct iscsi_session *sess)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8436d56c5..b5212f0f9 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -228,7 +228,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
@@ -236,7 +236,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
return 0;
}
@@ -258,7 +258,7 @@ static void iscsi_login_set_conn_values(
mutex_unlock(&auth_id_lock);
}
-static __printf(2, 3) int iscsi_change_param_sprintf(
+__printf(2, 3) int iscsi_change_param_sprintf(
struct iscsi_conn *conn,
const char *fmt, ...)
{
@@ -279,6 +279,7 @@ static __printf(2, 3) int iscsi_change_param_sprintf(
return 0;
}
+EXPORT_SYMBOL(iscsi_change_param_sprintf);
/*
* This is the leading connection of a new session,
@@ -1387,6 +1388,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto old_sess_out;
}
+ if (conn->conn_transport->iscsit_validate_params) {
+ ret = conn->conn_transport->iscsit_validate_params(conn);
+ if (ret < 0) {
+ if (zero_tsih)
+ goto new_sess_out;
+ else
+ goto old_sess_out;
+ }
+ }
+
ret = iscsi_target_start_negotiation(login, conn);
if (ret < 0)
goto new_sess_out;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 9fc9117d0..89d34bd6d 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -269,6 +269,7 @@ int iscsi_target_check_login_request(
return 0;
}
+EXPORT_SYMBOL(iscsi_target_check_login_request);
static int iscsi_target_check_first_request(
struct iscsi_conn *conn,
@@ -1246,16 +1247,16 @@ int iscsi_target_start_negotiation(
{
int ret;
- ret = iscsi_target_do_login(conn, login);
- if (!ret) {
- if (conn->sock) {
- struct sock *sk = conn->sock->sk;
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
- set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
- write_unlock_bh(&sk->sk_callback_lock);
- }
- } else if (ret < 0) {
+ write_lock_bh(&sk->sk_callback_lock);
+ set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+
+ ret = iscsi_target_do_login(conn, login);
+ if (ret < 0) {
cancel_delayed_work_sync(&conn->login_work);
cancel_delayed_work_sync(&conn->login_cleanup_work);
iscsi_target_restore_sock_callbacks(conn);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 3a1f9a7e6..0efa80bb8 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -680,6 +680,7 @@ struct iscsi_param *iscsi_find_param_from_key(
pr_err("Unable to locate key \"%s\".\n", key);
return NULL;
}
+EXPORT_SYMBOL(iscsi_find_param_from_key);
int iscsi_extract_key_value(char *textbuf, char **key, char **value)
{
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 428b0d9e3..1f3817720 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -514,6 +514,7 @@ void iscsit_add_cmd_to_immediate_queue(
wake_up(&conn->queues_wq);
}
+EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
{
@@ -725,6 +726,9 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
iscsit_remove_cmd_from_response_queue(cmd, conn);
}
+
+ if (conn && conn->conn_transport->iscsit_release_cmd)
+ conn->conn_transport->iscsit_release_cmd(conn, cmd);
}
void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
@@ -773,6 +777,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
break;
}
}
+EXPORT_SYMBOL(iscsit_free_cmd);
int iscsit_check_session_usage_count(struct iscsi_session *sess)
{
@@ -1283,9 +1288,8 @@ static int iscsit_do_rx_data(
iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
count->iov, count->iov_count, data);
- while (total_rx < data) {
- rx_loop = sock_recvmsg(conn->sock, &msg,
- (data - total_rx), MSG_WAITALL);
+ while (msg_data_left(&msg)) {
+ rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
if (rx_loop <= 0) {
pr_debug("rx_loop: %d total_rx: %d\n",
rx_loop, total_rx);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0ad5ac541..5091b31b3 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -601,16 +601,6 @@ static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
return tl_cmd->sc_cmd_state;
}
-static int tcm_loop_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void tcm_loop_close_session(struct se_session *se_sess)
-{
- return;
-};
-
static int tcm_loop_write_pending(struct se_cmd *se_cmd)
{
/*
@@ -1243,8 +1233,6 @@ static const struct target_core_fabric_ops loop_ops = {
.tpg_get_inst_index = tcm_loop_get_inst_index,
.check_stop_free = tcm_loop_check_stop_free,
.release_cmd = tcm_loop_release_cmd,
- .shutdown_session = tcm_loop_shutdown_session,
- .close_session = tcm_loop_close_session,
.sess_get_index = tcm_loop_sess_get_index,
.write_pending = tcm_loop_write_pending,
.write_pending_status = tcm_loop_write_pending_status,
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index c57e78849..58bb6ed18 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1726,16 +1726,6 @@ static void sbp_release_cmd(struct se_cmd *se_cmd)
sbp_free_request(req);
}
-static int sbp_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void sbp_close_session(struct se_session *se_sess)
-{
- return;
-}
-
static u32 sbp_sess_get_index(struct se_session *se_sess)
{
return 0;
@@ -2349,8 +2339,6 @@ static const struct target_core_fabric_ops sbp_ops = {
.tpg_check_prod_mode_write_protect = sbp_check_false,
.tpg_get_inst_index = sbp_tpg_get_inst_index,
.release_cmd = sbp_release_cmd,
- .shutdown_session = sbp_shutdown_session,
- .close_session = sbp_close_session,
.sess_get_index = sbp_sess_get_index,
.write_pending = sbp_write_pending,
.write_pending_status = sbp_write_pending_status,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 49aba4a31..4c82bbe19 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -932,7 +932,7 @@ static int core_alua_update_tpg_primary_metadata(
tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN,
- "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+ "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
rc = core_alua_write_tpg_metadata(path, md_buf, len);
@@ -1275,8 +1275,8 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
atomic_read(&lun->lun_tg_pt_secondary_offline),
lun->lun_tg_pt_secondary_stat);
- snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu",
- se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
+ snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
lun->unpacked_lun);
rc = core_alua_write_tpg_metadata(path, md_buf, len);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index d498533f0..2001005be 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -99,6 +99,67 @@ static ssize_t target_core_item_version_show(struct config_item *item,
CONFIGFS_ATTR_RO(target_core_item_, version);
+char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
+static char db_root_stage[DB_ROOT_LEN];
+
+static ssize_t target_core_item_dbroot_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", db_root);
+}
+
+static ssize_t target_core_item_dbroot_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ ssize_t read_bytes;
+ struct file *fp;
+
+ mutex_lock(&g_tf_lock);
+ if (!list_empty(&g_tf_list)) {
+ mutex_unlock(&g_tf_lock);
+ pr_err("db_root: cannot be changed: target drivers registered");
+ return -EINVAL;
+ }
+
+ if (count > (DB_ROOT_LEN - 1)) {
+ mutex_unlock(&g_tf_lock);
+ pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
+ (int)count, DB_ROOT_LEN - 1);
+ return -EINVAL;
+ }
+
+ read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
+ if (!read_bytes) {
+ mutex_unlock(&g_tf_lock);
+ return -EINVAL;
+ }
+ if (db_root_stage[read_bytes - 1] == '\n')
+ db_root_stage[read_bytes - 1] = '\0';
+
+ /* validate new db root before accepting it */
+ fp = filp_open(db_root_stage, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ mutex_unlock(&g_tf_lock);
+ pr_err("db_root: cannot open: %s\n", db_root_stage);
+ return -EINVAL;
+ }
+ if (!S_ISDIR(fp->f_inode->i_mode)) {
+ filp_close(fp, 0);
+ mutex_unlock(&g_tf_lock);
+ pr_err("db_root: not a directory: %s\n", db_root_stage);
+ return -EINVAL;
+ }
+ filp_close(fp, 0);
+
+ strncpy(db_root, db_root_stage, read_bytes);
+
+ mutex_unlock(&g_tf_lock);
+
+ return read_bytes;
+}
+
+CONFIGFS_ATTR(target_core_item_, dbroot);
+
static struct target_fabric_configfs *target_core_get_fabric(
const char *name)
{
@@ -239,6 +300,7 @@ static struct configfs_group_operations target_core_fabric_group_ops = {
*/
static struct configfs_attribute *target_core_fabric_item_attrs[] = {
&target_core_item_attr_version,
+ &target_core_item_attr_dbroot,
NULL,
};
@@ -323,14 +385,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->release_cmd()\n");
return -EINVAL;
}
- if (!tfo->shutdown_session) {
- pr_err("Missing tfo->shutdown_session()\n");
- return -EINVAL;
- }
- if (!tfo->close_session) {
- pr_err("Missing tfo->close_session()\n");
- return -EINVAL;
- }
if (!tfo->sess_get_index) {
pr_err("Missing tfo->sess_get_index()\n");
return -EINVAL;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 026a758e5..7c4efb441 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -687,10 +687,10 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* Force writethrough using WRITE_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
- if (q->flush_flags & REQ_FUA) {
+ if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
rw = WRITE_FUA;
- else if (!(q->flush_flags & REQ_FLUSH))
+ else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -836,7 +836,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
struct block_device *bd = ib_dev->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
- return q->flush_flags & REQ_FLUSH;
+ return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
}
static const struct target_backend_ops iblock_ops = {
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 86b4a8375..fc91e85f5 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -155,4 +155,10 @@ void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
/* target_core_xcopy.c */
extern struct se_portal_group xcopy_pt_tpg;
+/* target_core_configfs.c */
+#define DB_ROOT_LEN 4096
+#define DB_ROOT_DEFAULT "/var/target"
+
+extern char db_root[];
+
#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index b1795735e..47463c99c 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1985,7 +1985,7 @@ static int __core_scsi3_write_aptpl_to_file(
return -EMSGSIZE;
}
- snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+ snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 47a833f3a..24b36fd78 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -403,7 +403,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *prot_table;
- bool need_to_release = false;
struct scatterlist *prot_sg;
u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
u32 prot_offset, prot_page;
@@ -432,9 +431,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
if (!rc)
sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
- if (need_to_release)
- kfree(prot_sg);
-
return rc;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index ddf046080..d99752c6c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -336,44 +336,39 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
return acl;
}
-void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
+static void target_shutdown_sessions(struct se_node_acl *acl)
{
- struct se_portal_group *tpg = acl->se_tpg;
- LIST_HEAD(sess_list);
- struct se_session *sess, *sess_tmp;
+ struct se_session *sess;
unsigned long flags;
- int rc;
-
- mutex_lock(&tpg->acl_node_mutex);
- if (acl->dynamic_node_acl) {
- acl->dynamic_node_acl = 0;
- }
- list_del(&acl->acl_list);
- mutex_unlock(&tpg->acl_node_mutex);
+restart:
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- acl->acl_stop = 1;
-
- list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
- sess_acl_list) {
- if (sess->sess_tearing_down != 0)
+ list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
+ if (sess->sess_tearing_down)
continue;
- if (!target_get_session(sess))
- continue;
- list_move(&sess->sess_acl_list, &sess_list);
+ list_del_init(&sess->sess_acl_list);
+ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+ if (acl->se_tpg->se_tpg_tfo->close_session)
+ acl->se_tpg->se_tpg_tfo->close_session(sess);
+ goto restart;
}
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+}
- list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
- list_del(&sess->sess_acl_list);
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
+{
+ struct se_portal_group *tpg = acl->se_tpg;
+
+ mutex_lock(&tpg->acl_node_mutex);
+ if (acl->dynamic_node_acl)
+ acl->dynamic_node_acl = 0;
+ list_del(&acl->acl_list);
+ mutex_unlock(&tpg->acl_node_mutex);
+
+ target_shutdown_sessions(acl);
- rc = tpg->se_tpg_tfo->shutdown_session(sess);
- target_put_session(sess);
- if (!rc)
- continue;
- target_put_session(sess);
- }
target_put_nacl(acl);
/*
* Wait for last target_put_nacl() to complete in target_complete_nacl()
@@ -400,11 +395,7 @@ int core_tpg_set_initiator_node_queue_depth(
struct se_node_acl *acl,
u32 queue_depth)
{
- LIST_HEAD(sess_list);
struct se_portal_group *tpg = acl->se_tpg;
- struct se_session *sess, *sess_tmp;
- unsigned long flags;
- int rc;
/*
* User has requested to change the queue depth for a Initiator Node.
@@ -413,30 +404,10 @@ int core_tpg_set_initiator_node_queue_depth(
*/
target_set_nacl_queue_depth(tpg, acl, queue_depth);
- spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
- sess_acl_list) {
- if (sess->sess_tearing_down != 0)
- continue;
- if (!target_get_session(sess))
- continue;
- spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
-
- /*
- * Finally call tpg->se_tpg_tfo->close_session() to force session
- * reinstatement to occur if there is an active session for the
- * $FABRIC_MOD Initiator Node in question.
- */
- rc = tpg->se_tpg_tfo->shutdown_session(sess);
- target_put_session(sess);
- if (!rc) {
- spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- continue;
- }
- target_put_session(sess);
- spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- }
- spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+ /*
+ * Shutdown all pending sessions to force session reinstatement.
+ */
+ target_shutdown_sessions(acl);
pr_debug("Successfully changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ab2bf1297..5ab3967dd 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -239,7 +239,6 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
- kref_init(&se_sess->sess_kref);
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
@@ -430,27 +429,6 @@ target_alloc_session(struct se_portal_group *tpg,
}
EXPORT_SYMBOL(target_alloc_session);
-static void target_release_session(struct kref *kref)
-{
- struct se_session *se_sess = container_of(kref,
- struct se_session, sess_kref);
- struct se_portal_group *se_tpg = se_sess->se_tpg;
-
- se_tpg->se_tpg_tfo->close_session(se_sess);
-}
-
-int target_get_session(struct se_session *se_sess)
-{
- return kref_get_unless_zero(&se_sess->sess_kref);
-}
-EXPORT_SYMBOL(target_get_session);
-
-void target_put_session(struct se_session *se_sess)
-{
- kref_put(&se_sess->sess_kref, target_release_session);
-}
-EXPORT_SYMBOL(target_put_session);
-
ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
struct se_session *se_sess;
@@ -499,8 +477,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
se_nacl = se_sess->se_node_acl;
if (se_nacl) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
- if (se_nacl->acl_stop == 0)
- list_del(&se_sess->sess_acl_list);
+ if (!list_empty(&se_sess->sess_acl_list))
+ list_del_init(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
* Otherwise, set the struct se_session pointer from the tail
@@ -2195,7 +2173,7 @@ queue_full:
transport_handle_queue_full(cmd, cmd->se_dev);
}
-static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
+void target_free_sgl(struct scatterlist *sgl, int nents)
{
struct scatterlist *sg;
int count;
@@ -2205,6 +2183,7 @@ static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
kfree(sgl);
}
+EXPORT_SYMBOL(target_free_sgl);
static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
{
@@ -2225,7 +2204,7 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
static inline void transport_free_pages(struct se_cmd *cmd)
{
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
- transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+ target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
cmd->t_prot_sg = NULL;
cmd->t_prot_nents = 0;
}
@@ -2236,7 +2215,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
* SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
*/
if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
- transport_free_sgl(cmd->t_bidi_data_sg,
+ target_free_sgl(cmd->t_bidi_data_sg,
cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
@@ -2246,11 +2225,11 @@ static inline void transport_free_pages(struct se_cmd *cmd)
}
transport_reset_sgl_orig(cmd);
- transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
+ target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
cmd->t_data_sg = NULL;
cmd->t_data_nents = 0;
- transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
+ target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
}
@@ -2324,20 +2303,22 @@ EXPORT_SYMBOL(transport_kunmap_data_sg);
int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
- bool zero_page)
+ bool zero_page, bool chainable)
{
struct scatterlist *sg;
struct page *page;
gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
- unsigned int nent;
+ unsigned int nalloc, nent;
int i = 0;
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
+ nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
+ if (chainable)
+ nalloc++;
+ sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg)
return -ENOMEM;
- sg_init_table(sg, nent);
+ sg_init_table(sg, nalloc);
while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE);
@@ -2361,6 +2342,7 @@ out:
kfree(sg);
return -ENOMEM;
}
+EXPORT_SYMBOL(target_alloc_sgl);
/*
* Allocate any required resources to execute the command. For writes we
@@ -2376,7 +2358,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
if (cmd->prot_op != TARGET_PROT_NORMAL &&
!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
- cmd->prot_length, true);
+ cmd->prot_length, true, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -2401,13 +2383,13 @@ transport_generic_new_cmd(struct se_cmd *cmd)
ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
&cmd->t_bidi_data_nents,
- bidi_length, zero_flag);
+ bidi_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
- cmd->data_length, zero_flag);
+ cmd->data_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
@@ -2421,7 +2403,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
&cmd->t_bidi_data_nents,
- caw_length, zero_flag);
+ caw_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 47fe94ee1..75cd85426 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -563,7 +563,7 @@ static int target_xcopy_setup_pt_cmd(
if (alloc_mem) {
rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
- cmd->data_length, false);
+ cmd->data_length, false, false);
if (rc < 0) {
ret = rc;
goto out;
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index c30003bd4..e28209b99 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -139,7 +139,6 @@ extern unsigned int ft_debug_logging;
* Session ops.
*/
void ft_sess_put(struct ft_sess *);
-int ft_sess_shutdown(struct se_session *);
void ft_sess_close(struct se_session *);
u32 ft_sess_get_index(struct se_session *);
u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 4d375e958..42ee91123 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -442,7 +442,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
.release_cmd = ft_release_cmd,
- .shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
.sess_get_index = ft_sess_get_index,
.sess_get_initiator_sid = NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index d0c3e1894..f5186a744 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -303,18 +303,6 @@ static void ft_sess_delete_all(struct ft_tport *tport)
*/
/*
- * Determine whether session is allowed to be shutdown in the current context.
- * Returns non-zero if the session should be shutdown.
- */
-int ft_sess_shutdown(struct se_session *se_sess)
-{
- struct ft_sess *sess = se_sess->fabric_sess_ptr;
-
- pr_debug("port_id %x\n", sess->port_id);
- return 1;
-}
-
-/*
* Remove session and send PRLO.
* This is called when the ACL is being deleted or queue depth is changing.
*/
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 3c3dc4a3d..2d702ca65 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -260,16 +260,6 @@ config ARMADA_THERMAL
Enable this option if you want to have support for thermal management
controller present in Armada 370 and Armada XP SoC.
-config TEGRA_SOCTHERM
- tristate "Tegra SOCTHERM thermal management"
- depends on ARCH_TEGRA
- help
- Enable this option for integrated thermal management support on NVIDIA
- Tegra124 systems-on-chip. The driver supports four thermal zones
- (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal
- zones to manage temperatures. This option is also required for the
- emergency thermal reset (thermtrip) feature to function.
-
config DB8500_CPUFREQ_COOLING
tristate "DB8500 cpufreq cooling"
depends on ARCH_U8500 || COMPILE_TEST
@@ -338,31 +328,9 @@ config INTEL_QUARK_DTS_THERMAL
hot & critical. The critical trip point default value is set by
underlying BIOS/Firmware.
-config INT340X_THERMAL
- tristate "ACPI INT340X thermal drivers"
- depends on X86 && ACPI
- select THERMAL_GOV_USER_SPACE
- select ACPI_THERMAL_REL
- select ACPI_FAN
- select INTEL_SOC_DTS_IOSF_CORE
- select THERMAL_WRITABLE_TRIPS
- help
- Newer laptops and tablets that use ACPI may have thermal sensors and
- other devices with thermal control capabilities outside the core
- CPU/SOC, for thermal safety reasons.
- They are exposed for the OS to use via the INT3400 ACPI device object
- as the master, and INT3401~INT340B ACPI device objects as the slaves.
- Enable this to expose the temperature information and cooling ability
- from these objects to userspace via the normal thermal framework.
- This means that a wide range of applications and GUI widgets can show
- the information to the user or use this information for making
- decisions. For example, the Intel Thermal Daemon can use this
- information to allow the user to select his laptop to run without
- turning on the fans.
-
-config ACPI_THERMAL_REL
- tristate
- depends on ACPI
+menu "ACPI INT340X thermal drivers"
+source drivers/thermal/int340x_thermal/Kconfig
+endmenu
config INTEL_PCH_THERMAL
tristate "Intel PCH Thermal Reporting Driver"
@@ -399,6 +367,17 @@ depends on ARCH_STI && OF
source "drivers/thermal/st/Kconfig"
endmenu
+config TANGO_THERMAL
+ tristate "Tango thermal management"
+ depends on ARCH_TANGO || COMPILE_TEST
+ help
+ Enable the Tango thermal driver, which supports the primitive
+ temperature sensor embedded in Tango chips since the SMP8758.
+ This sensor only generates a 1-bit signal to indicate whether
+ the die temperature exceeds a programmable threshold.
+
+source "drivers/thermal/tegra/Kconfig"
+
config QCOM_SPMI_TEMP_ALARM
tristate "Qualcomm SPMI PMIC Temperature Alarm"
depends on OF && SPMI && IIO
@@ -410,4 +389,14 @@ config QCOM_SPMI_TEMP_ALARM
real time die temperature if an ADC is present or an estimate of the
temperature based upon the over temperature stage value.
+config GENERIC_ADC_THERMAL
+ tristate "Generic ADC based thermal sensor"
+ depends on IIO
+ help
+ This enabled a thermal sysfs driver for the temperature sensor
+ which is connected to the General Purpose ADC. The ADC channel
+ is read via IIO framework and the channel information is provided
+ to this driver. This driver reports the temperature by reading ADC
+ channel and converts it to temperature based on lookup table.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 8e9cbc3b5..10b07c14f 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -35,6 +35,7 @@ obj-y += samsung/
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
+obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
-obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
+obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
+obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 70836c5b8..fc52016d4 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -29,7 +29,13 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
struct thermal_instance *instance;
tz->ops->get_trip_temp(tz, trip, &trip_temp);
- tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
+
+ if (!tz->ops->get_trip_hyst) {
+ pr_warn_once("Undefined get_trip_hyst for thermal zone %s - "
+ "running with default hysteresis zero\n", tz->type);
+ trip_hyst = 0;
+ } else
+ tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
trip, trip_temp, tz->temperature,
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 5e820b541..97fad8f51 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -160,7 +160,7 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp)
struct hisi_thermal_sensor *sensor = _sensor;
struct hisi_thermal_data *data = sensor->thermal;
- int sensor_id = 0, i;
+ int sensor_id = -1, i;
long max_temp = 0;
*temp = hisi_thermal_get_sensor_temp(data, sensor);
@@ -168,12 +168,19 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp)
sensor->sensor_temp = *temp;
for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ if (!data->sensors[i].tzd)
+ continue;
+
if (data->sensors[i].sensor_temp >= max_temp) {
max_temp = data->sensors[i].sensor_temp;
sensor_id = i;
}
}
+ /* If no sensor has been enabled, then skip to enable irq */
+ if (sensor_id == -1)
+ return 0;
+
mutex_lock(&data->thermal_lock);
data->irq_bind_sensor = sensor_id;
mutex_unlock(&data->thermal_lock);
@@ -226,8 +233,12 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
sensor->thres_temp / 1000);
mutex_unlock(&data->thermal_lock);
- for (i = 0; i < HISI_MAX_SENSORS; i++)
+ for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ if (!data->sensors[i].tzd)
+ continue;
+
thermal_zone_device_update(data->sensors[i].tzd);
+ }
return IRQ_HANDLED;
}
@@ -243,10 +254,11 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
sensor->id = index;
sensor->thermal = data;
- sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, sensor->id,
- sensor, &hisi_of_thermal_ops);
+ sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ sensor->id, sensor, &hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
+ sensor->tzd = NULL;
dev_err(&pdev->dev, "failed to register sensor id %d: %d\n",
sensor->id, ret);
return ret;
@@ -331,28 +343,21 @@ static int hisi_thermal_probe(struct platform_device *pdev)
return ret;
}
+ hisi_thermal_enable_bind_irq_sensor(data);
+ irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
+ &data->irq_enabled);
+
for (i = 0; i < HISI_MAX_SENSORS; ++i) {
ret = hisi_thermal_register_sensor(pdev, data,
&data->sensors[i], i);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev,
"failed to register thermal sensor: %d\n", ret);
- goto err_get_sensor_data;
- }
+ else
+ hisi_thermal_toggle_sensor(&data->sensors[i], true);
}
- hisi_thermal_enable_bind_irq_sensor(data);
- data->irq_enabled = true;
-
- for (i = 0; i < HISI_MAX_SENSORS; i++)
- hisi_thermal_toggle_sensor(&data->sensors[i], true);
-
return 0;
-
-err_get_sensor_data:
- clk_disable_unprepare(data->clk);
-
- return ret;
}
static int hisi_thermal_remove(struct platform_device *pdev)
@@ -363,8 +368,10 @@ static int hisi_thermal_remove(struct platform_device *pdev)
for (i = 0; i < HISI_MAX_SENSORS; i++) {
struct hisi_thermal_sensor *sensor = &data->sensors[i];
+ if (!sensor->tzd)
+ continue;
+
hisi_thermal_toggle_sensor(sensor, false);
- thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
}
hisi_thermal_disable_sensor(data);
diff --git a/drivers/thermal/int340x_thermal/Kconfig b/drivers/thermal/int340x_thermal/Kconfig
new file mode 100644
index 000000000..0582bd12a
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/Kconfig
@@ -0,0 +1,42 @@
+#
+# ACPI INT340x thermal drivers configuration
+#
+
+config INT340X_THERMAL
+ tristate "ACPI INT340X thermal drivers"
+ depends on X86 && ACPI
+ select THERMAL_GOV_USER_SPACE
+ select ACPI_THERMAL_REL
+ select ACPI_FAN
+ select INTEL_SOC_DTS_IOSF_CORE
+ help
+ Newer laptops and tablets that use ACPI may have thermal sensors and
+ other devices with thermal control capabilities outside the core
+ CPU/SOC, for thermal safety reasons.
+ They are exposed for the OS to use via the INT3400 ACPI device object
+ as the master, and INT3401~INT340B ACPI device objects as the slaves.
+ Enable this to expose the temperature information and cooling ability
+ from these objects to userspace via the normal thermal framework.
+ This means that a wide range of applications and GUI widgets can show
+ the information to the user or use this information for making
+ decisions. For example, the Intel Thermal Daemon can use this
+ information to allow the user to select his laptop to run without
+ turning on the fans.
+
+config ACPI_THERMAL_REL
+ tristate
+ depends on ACPI
+
+if INT340X_THERMAL
+
+config INT3406_THERMAL
+ tristate "ACPI INT3406 display thermal driver"
+ depends on ACPI_VIDEO
+ help
+ The display thermal device represents the LED/LCD display panel
+ that may or may not include touch support. The main function of
+ the display thermal device is to allow control of the display
+ brightness in order to address a thermal condition or to reduce
+ power consumed by display device.
+
+endif
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index ba77a34f6..df0df055e 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -3,4 +3,5 @@ obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal_zone.o
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
+obj-$(CONFIG_INT3406_THERMAL) += int3406_thermal.o
obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c
new file mode 100644
index 000000000..a578cd257
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3406_thermal.c
@@ -0,0 +1,236 @@
+/*
+ * INT3406 thermal driver for display participant device
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Authors: Aaron Lu <aaron.lu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/thermal.h>
+#include <acpi/video.h>
+
+#define INT3406_BRIGHTNESS_LIMITS_CHANGED 0x80
+
+struct int3406_thermal_data {
+ int upper_limit;
+ int upper_limit_index;
+ int lower_limit;
+ int lower_limit_index;
+ acpi_handle handle;
+ struct acpi_video_device_brightness *br;
+ struct backlight_device *raw_bd;
+ struct thermal_cooling_device *cooling_dev;
+};
+
+static int int3406_thermal_to_raw(int level, struct int3406_thermal_data *d)
+{
+ int max_level = d->br->levels[d->br->count - 1];
+ int raw_max = d->raw_bd->props.max_brightness;
+
+ return level * raw_max / max_level;
+}
+
+static int int3406_thermal_to_acpi(int level, struct int3406_thermal_data *d)
+{
+ int raw_max = d->raw_bd->props.max_brightness;
+ int max_level = d->br->levels[d->br->count - 1];
+
+ return level * max_level / raw_max;
+}
+
+static int
+int3406_thermal_get_max_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+ int index = d->lower_limit_index ? d->lower_limit_index : 2;
+
+ *state = d->br->count - 1 - index;
+ return 0;
+}
+
+static int
+int3406_thermal_set_cur_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+ int level, raw_level;
+
+ if (state > d->br->count - 3)
+ return -EINVAL;
+
+ state = d->br->count - 1 - state;
+ level = d->br->levels[state];
+
+ if ((d->upper_limit && level > d->upper_limit) ||
+ (d->lower_limit && level < d->lower_limit))
+ return -EINVAL;
+
+ raw_level = int3406_thermal_to_raw(level, d);
+ return backlight_device_set_brightness(d->raw_bd, raw_level);
+}
+
+static int
+int3406_thermal_get_cur_state(struct thermal_cooling_device *cooling_dev,
+ unsigned long *state)
+{
+ struct int3406_thermal_data *d = cooling_dev->devdata;
+ int raw_level, level, i;
+ int *levels = d->br->levels;
+
+ raw_level = d->raw_bd->props.brightness;
+ level = int3406_thermal_to_acpi(raw_level, d);
+
+ /*
+ * There is no 1:1 mapping between the firmware interface level with the
+ * raw interface level, we will have to find one that is close enough.
+ */
+ for (i = 2; i < d->br->count; i++) {
+ if (level < levels[i]) {
+ if (i == 2)
+ break;
+ if ((level - levels[i - 1]) < (levels[i] - level))
+ i--;
+ break;
+ }
+ }
+
+ *state = d->br->count - 1 - i;
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops video_cooling_ops = {
+ .get_max_state = int3406_thermal_get_max_state,
+ .get_cur_state = int3406_thermal_get_cur_state,
+ .set_cur_state = int3406_thermal_set_cur_state,
+};
+
+static int int3406_thermal_get_index(int *array, int nr, int value)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ if (array[i] == value)
+ break;
+ }
+ return i == nr ? -ENOENT : i;
+}
+
+static void int3406_thermal_get_limit(struct int3406_thermal_data *d)
+{
+ acpi_status status;
+ unsigned long long lower_limit, upper_limit;
+ int index;
+
+ status = acpi_evaluate_integer(d->handle, "DDDL", NULL, &lower_limit);
+ if (ACPI_SUCCESS(status)) {
+ index = int3406_thermal_get_index(d->br->levels, d->br->count,
+ lower_limit);
+ if (index > 0) {
+ d->lower_limit = (int)lower_limit;
+ d->lower_limit_index = index;
+ }
+ }
+
+ status = acpi_evaluate_integer(d->handle, "DDPC", NULL, &upper_limit);
+ if (ACPI_SUCCESS(status)) {
+ index = int3406_thermal_get_index(d->br->levels, d->br->count,
+ upper_limit);
+ if (index > 0) {
+ d->upper_limit = (int)upper_limit;
+ d->upper_limit_index = index;
+ }
+ }
+}
+
+static void int3406_notify(acpi_handle handle, u32 event, void *data)
+{
+ if (event == INT3406_BRIGHTNESS_LIMITS_CHANGED)
+ int3406_thermal_get_limit(data);
+}
+
+static int int3406_thermal_probe(struct platform_device *pdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
+ struct int3406_thermal_data *d;
+ struct backlight_device *bd;
+ int ret;
+
+ if (!ACPI_HANDLE(&pdev->dev))
+ return -ENODEV;
+
+ d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ d->handle = ACPI_HANDLE(&pdev->dev);
+
+ bd = backlight_device_get_by_type(BACKLIGHT_RAW);
+ if (!bd)
+ return -ENODEV;
+ d->raw_bd = bd;
+
+ ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL);
+ if (ret)
+ return ret;
+
+ int3406_thermal_get_limit(d);
+
+ d->cooling_dev = thermal_cooling_device_register(acpi_device_bid(adev),
+ d, &video_cooling_ops);
+ if (IS_ERR(d->cooling_dev))
+ goto err;
+
+ ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ int3406_notify, d);
+ if (ret)
+ goto err_cdev;
+
+ platform_set_drvdata(pdev, d);
+
+ return 0;
+
+err_cdev:
+ thermal_cooling_device_unregister(d->cooling_dev);
+err:
+ kfree(d->br);
+ return -ENODEV;
+}
+
+static int int3406_thermal_remove(struct platform_device *pdev)
+{
+ struct int3406_thermal_data *d = platform_get_drvdata(pdev);
+
+ thermal_cooling_device_unregister(d->cooling_dev);
+ kfree(d->br);
+ return 0;
+}
+
+static const struct acpi_device_id int3406_thermal_match[] = {
+ {"INT3406", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, int3406_thermal_match);
+
+static struct platform_driver int3406_thermal_driver = {
+ .probe = int3406_thermal_probe,
+ .remove = int3406_thermal_remove,
+ .driver = {
+ .name = "int3406 thermal",
+ .owner = THIS_MODULE,
+ .acpi_match_table = int3406_thermal_match,
+ },
+};
+
+module_platform_driver(int3406_thermal_driver);
+
+MODULE_DESCRIPTION("INT3406 Thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 36fa724a3..42c1ac057 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -198,49 +198,33 @@ static struct thermal_zone_device_ops proc_thermal_local_ops = {
.get_temp = proc_thermal_get_zone_temp,
};
-static int proc_thermal_add(struct device *dev,
- struct proc_thermal_device **priv)
+static int proc_thermal_read_ppcc(struct proc_thermal_device *proc_priv)
{
- struct proc_thermal_device *proc_priv;
- struct acpi_device *adev;
+ int i;
acpi_status status;
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *elements, *ppcc;
union acpi_object *p;
- unsigned long long tmp;
- struct thermal_zone_device_ops *ops = NULL;
- int i;
- int ret;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
+ int ret = 0;
- status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+ status = acpi_evaluate_object(proc_priv->adev->handle, "PPCC",
+ NULL, &buf);
if (ACPI_FAILURE(status))
return -ENODEV;
p = buf.pointer;
if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
- dev_err(dev, "Invalid PPCC data\n");
+ dev_err(proc_priv->dev, "Invalid PPCC data\n");
ret = -EFAULT;
goto free_buffer;
}
+
if (!p->package.count) {
- dev_err(dev, "Invalid PPCC package size\n");
+ dev_err(proc_priv->dev, "Invalid PPCC package size\n");
ret = -EFAULT;
goto free_buffer;
}
- proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
- if (!proc_priv) {
- ret = -ENOMEM;
- goto free_buffer;
- }
-
- proc_priv->dev = dev;
- proc_priv->adev = adev;
-
for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
elements = &(p->package.elements[i+1]);
if (elements->type != ACPI_TYPE_PACKAGE ||
@@ -257,12 +241,62 @@ static int proc_thermal_add(struct device *dev,
proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
}
+free_buffer:
+ kfree(buf.pointer);
+
+ return ret;
+}
+
+#define PROC_POWER_CAPABILITY_CHANGED 0x83
+static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct proc_thermal_device *proc_priv = data;
+
+ if (!proc_priv)
+ return;
+
+ switch (event) {
+ case PROC_POWER_CAPABILITY_CHANGED:
+ proc_thermal_read_ppcc(proc_priv);
+ int340x_thermal_zone_device_update(proc_priv->int340x_zone);
+ break;
+ default:
+ dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+
+static int proc_thermal_add(struct device *dev,
+ struct proc_thermal_device **priv)
+{
+ struct proc_thermal_device *proc_priv;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned long long tmp;
+ struct thermal_zone_device_ops *ops = NULL;
+ int ret;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv)
+ return -ENOMEM;
+
+ proc_priv->dev = dev;
+ proc_priv->adev = adev;
*priv = proc_priv;
- ret = sysfs_create_group(&dev->kobj,
- &power_limit_attribute_group);
+ ret = proc_thermal_read_ppcc(proc_priv);
+ if (!ret) {
+ ret = sysfs_create_group(&dev->kobj,
+ &power_limit_attribute_group);
+
+ }
if (ret)
- goto free_buffer;
+ return ret;
status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
if (ACPI_FAILURE(status)) {
@@ -274,20 +308,32 @@ static int proc_thermal_add(struct device *dev,
proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
if (IS_ERR(proc_priv->int340x_zone)) {
- sysfs_remove_group(&proc_priv->dev->kobj,
- &power_limit_attribute_group);
ret = PTR_ERR(proc_priv->int340x_zone);
+ goto remove_group;
} else
ret = 0;
-free_buffer:
- kfree(buf.pointer);
+ ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ proc_thermal_notify,
+ (void *)proc_priv);
+ if (ret)
+ goto remove_zone;
+
+ return 0;
+
+remove_zone:
+ int340x_thermal_zone_remove(proc_priv->int340x_zone);
+remove_group:
+ sysfs_remove_group(&proc_priv->dev->kobj,
+ &power_limit_attribute_group);
return ret;
}
static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
{
+ acpi_remove_notify_handler(proc_priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, proc_thermal_notify);
int340x_thermal_zone_remove(proc_priv->int340x_zone);
sysfs_remove_group(&proc_priv->dev->kobj,
&power_limit_attribute_group);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 6c7958825..015ce2eb6 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -510,12 +510,6 @@ static int start_power_clamp(void)
unsigned long cpu;
struct task_struct *thread;
- /* check if pkg cstate counter is completely 0, abort in this case */
- if (!has_pkg_state_counter()) {
- pr_err("pkg cstate counter not functional, abort\n");
- return -EINVAL;
- }
-
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
/* prevent cpu hotplug */
get_online_cpus();
@@ -672,35 +666,11 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.set_cur_state = powerclamp_set_cur_state,
};
-/* runs on Nehalem and later */
static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
- { X86_VENDOR_INTEL, 6, 0x1a},
- { X86_VENDOR_INTEL, 6, 0x1c},
- { X86_VENDOR_INTEL, 6, 0x1e},
- { X86_VENDOR_INTEL, 6, 0x1f},
- { X86_VENDOR_INTEL, 6, 0x25},
- { X86_VENDOR_INTEL, 6, 0x26},
- { X86_VENDOR_INTEL, 6, 0x2a},
- { X86_VENDOR_INTEL, 6, 0x2c},
- { X86_VENDOR_INTEL, 6, 0x2d},
- { X86_VENDOR_INTEL, 6, 0x2e},
- { X86_VENDOR_INTEL, 6, 0x2f},
- { X86_VENDOR_INTEL, 6, 0x37},
- { X86_VENDOR_INTEL, 6, 0x3a},
- { X86_VENDOR_INTEL, 6, 0x3c},
- { X86_VENDOR_INTEL, 6, 0x3d},
- { X86_VENDOR_INTEL, 6, 0x3e},
- { X86_VENDOR_INTEL, 6, 0x3f},
- { X86_VENDOR_INTEL, 6, 0x45},
- { X86_VENDOR_INTEL, 6, 0x46},
- { X86_VENDOR_INTEL, 6, 0x47},
- { X86_VENDOR_INTEL, 6, 0x4c},
- { X86_VENDOR_INTEL, 6, 0x4d},
- { X86_VENDOR_INTEL, 6, 0x4e},
- { X86_VENDOR_INTEL, 6, 0x4f},
- { X86_VENDOR_INTEL, 6, 0x56},
- { X86_VENDOR_INTEL, 6, 0x57},
- { X86_VENDOR_INTEL, 6, 0x5e},
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
@@ -712,11 +682,12 @@ static int __init powerclamp_probe(void)
boot_cpu_data.x86, boot_cpu_data.x86_model);
return -ENODEV;
}
- if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
- !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
- !boot_cpu_has(X86_FEATURE_MWAIT) ||
- !boot_cpu_has(X86_FEATURE_ARAT))
+
+ /* The goal for idle time alignment is to achieve package cstate. */
+ if (!has_pkg_state_counter()) {
+ pr_info("No package C-state available");
return -ENODEV;
+ }
/* find the deepest mwait value */
find_target_mwait();
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 507632b96..262ab0a22 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -144,7 +144,6 @@ struct mtk_thermal {
s32 o_slope;
s32 vts[MT8173_NUM_SENSORS];
- struct thermal_zone_device *tzd;
};
struct mtk_thermal_bank_cfg {
@@ -572,16 +571,11 @@ static int mtk_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mt);
- mt->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
- &mtk_thermal_ops);
- if (IS_ERR(mt->tzd))
- goto err_register;
+ devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
+ &mtk_thermal_ops);
return 0;
-err_register:
- clk_disable_unprepare(mt->clk_peri_therm);
-
err_disable_clk_auxadc:
clk_disable_unprepare(mt->clk_auxadc);
@@ -592,8 +586,6 @@ static int mtk_thermal_remove(struct platform_device *pdev)
{
struct mtk_thermal *mt = platform_get_drvdata(pdev);
- thermal_zone_of_sensor_unregister(&pdev->dev, mt->tzd);
-
clk_disable_unprepare(mt->clk_peri_therm);
clk_disable_unprepare(mt->clk_auxadc);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index d8ec44b19..b8e509c60 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -331,6 +331,14 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
if (trip >= data->ntrips || trip < 0)
return -EDOM;
+ if (data->ops->set_trip_temp) {
+ int ret;
+
+ ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
+ if (ret)
+ return ret;
+ }
+
/* thermal framework should take care of data->mask & (1 << trip) */
data->trips[trip].temperature = temp;
@@ -906,7 +914,7 @@ finish:
return tz;
free_tbps:
- for (i = 0; i < tz->num_tbps; i++)
+ for (i = i - 1; i >= 0; i--)
of_node_put(tz->tbps[i].cooling_device);
kfree(tz->tbps);
free_trips:
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index b677aada5..f8a3c60be 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -260,7 +260,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
if (ret < 0)
goto fail;
- chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
+ chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
&qpnp_tm_sensor_ops);
if (IS_ERR(chip->tz_dev)) {
dev_err(&pdev->dev, "failed to register sensor\n");
@@ -281,7 +281,6 @@ static int qpnp_tm_remove(struct platform_device *pdev)
{
struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
- thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
if (!IS_ERR(chip->adc))
iio_channel_release(chip->adc);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 82daba09e..71a339271 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -492,7 +492,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
goto error_unregister;
if (of_data == USE_OF_THERMAL)
- priv->zone = thermal_zone_of_sensor_register(
+ priv->zone = devm_thermal_zone_of_sensor_register(
dev, i, priv,
&rcar_thermal_zone_of_ops);
else
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 233a56444..5d491f16a 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1,7 +1,5 @@
/*
- * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
- *
- * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2014-2016, Fuzhou Rockchip Electronics Co., Ltd
* Caesar Wang <wxt@rock-chips.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -23,8 +21,10 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/thermal.h>
+#include <linux/mfd/syscon.h>
#include <linux/pinctrl/consumer.h>
/**
@@ -73,7 +73,7 @@ enum adc_sort_mode {
#define SOC_MAX_SENSORS 2
/**
- * struct chip_tsadc_table: hold information about chip-specific differences
+ * struct chip_tsadc_table - hold information about chip-specific differences
* @id: conversion table
* @length: size of conversion table
* @data_mask: mask to apply on data inputs
@@ -86,6 +86,20 @@ struct chip_tsadc_table {
enum adc_sort_mode mode;
};
+/**
+ * struct rockchip_tsadc_chip - hold the private data of tsadc chip
+ * @chn_id[SOC_MAX_SENSORS]: the sensor id of chip correspond to the channel
+ * @chn_num: the channel number of tsadc chip
+ * @tshut_temp: the hardware-controlled shutdown temperature value
+ * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
+ * @initialize: SoC special initialize tsadc controller method
+ * @irq_ack: clear the interrupt
+ * @get_temp: get the temperature
+ * @set_tshut_temp: set the hardware-controlled shutdown temperature
+ * @set_tshut_mode: set the hardware-controlled shutdown mode
+ * @table: the chip-specific conversion table
+ */
struct rockchip_tsadc_chip {
/* The sensor id of chip correspond to the ADC channel */
int chn_id[SOC_MAX_SENSORS];
@@ -97,7 +111,8 @@ struct rockchip_tsadc_chip {
enum tshut_polarity tshut_polarity;
/* Chip-wide methods */
- void (*initialize)(void __iomem *reg, enum tshut_polarity p);
+ void (*initialize)(struct regmap *grf,
+ void __iomem *reg, enum tshut_polarity p);
void (*irq_ack)(void __iomem *reg);
void (*control)(void __iomem *reg, bool on);
@@ -112,12 +127,32 @@ struct rockchip_tsadc_chip {
struct chip_tsadc_table table;
};
+/**
+ * struct rockchip_thermal_sensor - hold the information of thermal sensor
+ * @thermal: pointer to the platform/configuration data
+ * @tzd: pointer to a thermal zone
+ * @id: identifier of the thermal sensor
+ */
struct rockchip_thermal_sensor {
struct rockchip_thermal_data *thermal;
struct thermal_zone_device *tzd;
int id;
};
+/**
+ * struct rockchip_thermal_data - hold the private data of thermal driver
+ * @chip: pointer to the platform/configuration data
+ * @pdev: platform device of thermal
+ * @reset: the reset controller of tsadc
+ * @sensors[SOC_MAX_SENSORS]: the thermal sensor
+ * @clk: the controller clock is divided by the exteral 24MHz
+ * @pclk: the advanced peripherals bus clock
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_temp: the hardware-controlled shutdown temperature value
+ * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
+ */
struct rockchip_thermal_data {
const struct rockchip_tsadc_chip *chip;
struct platform_device *pdev;
@@ -128,6 +163,7 @@ struct rockchip_thermal_data {
struct clk *clk;
struct clk *pclk;
+ struct regmap *grf;
void __iomem *regs;
int tshut_temp;
@@ -142,6 +178,7 @@ struct rockchip_thermal_data {
* TSADCV3_* are used for newer SoCs than RK3288. (e.g: RK3228, RK3399)
*
*/
+#define TSADCV2_USER_CON 0x00
#define TSADCV2_AUTO_CON 0x04
#define TSADCV2_INT_EN 0x08
#define TSADCV2_INT_PD 0x0c
@@ -155,12 +192,7 @@ struct rockchip_thermal_data {
#define TSADCV2_AUTO_EN BIT(0)
#define TSADCV2_AUTO_SRC_EN(chn) BIT(4 + (chn))
#define TSADCV2_AUTO_TSHUT_POLARITY_HIGH BIT(8)
-/**
- * TSADCV1_AUTO_Q_SEL_EN:
- * whether select (1024 - tsadc_q) as output
- * 1'b0:use tsadc_q as output(temperature-code is rising sequence)
- * 1'b1:use(1024 - tsadc_q) as output (temperature-code is falling sequence)
- */
+
#define TSADCV3_AUTO_Q_SEL_EN BIT(1)
#define TSADCV2_INT_SRC_EN(chn) BIT(chn)
@@ -177,19 +209,32 @@ struct rockchip_thermal_data {
#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4
#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */
#define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* msec */
+#define TSADCV2_USER_INTER_PD_SOC 0x340 /* 13 clocks */
-struct tsadc_table {
- u32 code;
- int temp;
-};
+#define GRF_SARADC_TESTBIT 0x0e644
+#define GRF_TSADC_TESTBIT_L 0x0e648
+#define GRF_TSADC_TESTBIT_H 0x0e64c
+
+#define GRF_TSADC_TSEN_PD_ON (0x30003 << 0)
+#define GRF_TSADC_TSEN_PD_OFF (0x30000 << 0)
+#define GRF_SARADC_TESTBIT_ON (0x10001 << 2)
+#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2)
/**
+ * struct tsadc_table - code to temperature conversion table
+ * @code: the value of adc channel
+ * @temp: the temperature
* Note:
- * Code to Temperature mapping of the Temperature sensor is a piece wise linear
+ * code to temperature mapping of the temperature sensor is a piece wise linear
* curve.Any temperature, code faling between to 2 give temperatures can be
* linearly interpolated.
- * Code to Temperature mapping should be updated based on sillcon results.
+ * Code to Temperature mapping should be updated based on manufacturer results.
*/
+struct tsadc_table {
+ u32 code;
+ int temp;
+};
+
static const struct tsadc_table rk3228_code_table[] = {
{0, -40000},
{588, -40000},
@@ -308,40 +353,40 @@ static const struct tsadc_table rk3368_code_table[] = {
static const struct tsadc_table rk3399_code_table[] = {
{0, -40000},
- {593, -40000},
- {598, -35000},
- {603, -30000},
- {609, -25000},
- {614, -20000},
- {619, -15000},
- {625, -10000},
- {630, -5000},
- {635, 0},
- {641, 5000},
- {646, 10000},
- {651, 15000},
- {657, 20000},
- {662, 25000},
- {667, 30000},
- {673, 35000},
- {678, 40000},
- {684, 45000},
- {689, 50000},
- {694, 55000},
- {700, 60000},
- {705, 65000},
- {711, 70000},
- {716, 75000},
- {722, 80000},
- {727, 85000},
- {733, 90000},
- {738, 95000},
- {743, 100000},
- {749, 105000},
- {754, 110000},
- {760, 115000},
- {765, 120000},
- {771, 125000},
+ {402, -40000},
+ {410, -35000},
+ {419, -30000},
+ {427, -25000},
+ {436, -20000},
+ {444, -15000},
+ {453, -10000},
+ {461, -5000},
+ {470, 0},
+ {478, 5000},
+ {487, 10000},
+ {496, 15000},
+ {504, 20000},
+ {513, 25000},
+ {521, 30000},
+ {530, 35000},
+ {538, 40000},
+ {547, 45000},
+ {555, 50000},
+ {564, 55000},
+ {573, 60000},
+ {581, 65000},
+ {590, 70000},
+ {599, 75000},
+ {607, 80000},
+ {616, 85000},
+ {624, 90000},
+ {633, 95000},
+ {642, 100000},
+ {650, 105000},
+ {659, 110000},
+ {668, 115000},
+ {677, 120000},
+ {685, 125000},
{TSADCV3_DATA_MASK, 125000},
};
@@ -405,8 +450,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
return -EAGAIN; /* Incorrect reading */
while (low <= high) {
- if (code >= table.id[mid - 1].code &&
- code < table.id[mid].code)
+ if (code <= table.id[mid].code &&
+ code > table.id[mid - 1].code)
break;
else if (code > table.id[mid].code)
low = mid + 1;
@@ -449,7 +494,7 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
* If the temperature is higher than COMP_INT or COMP_SHUT for
* "debounce" times, TSADC controller will generate interrupt or TSHUT.
*/
-static void rk_tsadcv2_initialize(void __iomem *regs,
+static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs,
enum tshut_polarity tshut_polarity)
{
if (tshut_polarity == TSHUT_HIGH_ACTIVE)
@@ -466,6 +511,62 @@ static void rk_tsadcv2_initialize(void __iomem *regs,
regs + TSADCV2_AUTO_PERIOD_HT);
writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
+
+ if (IS_ERR(grf)) {
+ pr_warn("%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+}
+
+/**
+ * rk_tsadcv3_initialize - initialize TASDC Controller.
+ *
+ * (1) The tsadc control power sequence.
+ *
+ * (2) Set TSADC_V2_AUTO_PERIOD:
+ * Configure the interleave between every two accessing of
+ * TSADC in normal operation.
+ *
+ * (2) Set TSADCV2_AUTO_PERIOD_HT:
+ * Configure the interleave between every two accessing of
+ * TSADC after the temperature is higher than COM_SHUT or COM_INT.
+ *
+ * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE:
+ * If the temperature is higher than COMP_INT or COMP_SHUT for
+ * "debounce" times, TSADC controller will generate interrupt or TSHUT.
+ */
+static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs,
+ enum tshut_polarity tshut_polarity)
+{
+ /* The tsadc control power sequence */
+ if (IS_ERR(grf)) {
+ /* Set interleave value to workround ic time sync issue */
+ writel_relaxed(TSADCV2_USER_INTER_PD_SOC, regs +
+ TSADCV2_USER_CON);
+ } else {
+ regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_ON);
+ mdelay(10);
+ regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_OFF);
+ usleep_range(15, 100); /* The spec note says at least 15 us */
+ regmap_write(grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON);
+ regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON);
+ usleep_range(90, 200); /* The spec note says at least 90 us */
+ }
+
+ if (tshut_polarity == TSHUT_HIGH_ACTIVE)
+ writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
+ regs + TSADCV2_AUTO_CON);
+ else
+ writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
+ regs + TSADCV2_AUTO_CON);
+
+ writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
+ writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+ writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
+ regs + TSADCV2_AUTO_PERIOD_HT);
+ writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
}
static void rk_tsadcv2_irq_ack(void __iomem *regs)
@@ -498,10 +599,11 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
}
/**
- * @rk_tsadcv3_control:
- * TSADC controller works at auto mode, and some SoCs need set the tsadc_q_sel
- * bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output adc value if
- * setting this bit to enable.
+ * rk_tsadcv3_control - the tsadc controller is enabled or disabled.
+ *
+ * NOTE: TSADC controller works at auto mode, and some SoCs need set the
+ * tsadc_q_sel bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output
+ * adc value if setting this bit to enable.
*/
static void rk_tsadcv3_control(void __iomem *regs, bool enable)
{
@@ -603,6 +705,30 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
},
};
+static const struct rockchip_tsadc_chip rk3366_tsadc_data = {
+ .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+ .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+ .chn_num = 2, /* two channels for tsadc */
+
+ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv3_initialize,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+ .table = {
+ .id = rk3228_code_table,
+ .length = ARRAY_SIZE(rk3228_code_table),
+ .data_mask = TSADCV3_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
@@ -636,7 +762,7 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
- .initialize = rk_tsadcv2_initialize,
+ .initialize = rk_tsadcv3_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
.get_temp = rk_tsadcv2_get_temp,
@@ -661,6 +787,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
.data = (void *)&rk3288_tsadc_data,
},
{
+ .compatible = "rockchip,rk3366-tsadc",
+ .data = (void *)&rk3366_tsadc_data,
+ },
+ {
.compatible = "rockchip,rk3368-tsadc",
.data = (void *)&rk3368_tsadc_data,
},
@@ -768,6 +898,11 @@ static int rockchip_configure_from_dt(struct device *dev,
return -EINVAL;
}
+ /* The tsadc wont to handle the error in here since some SoCs didn't
+ * need this property.
+ */
+ thermal->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+
return 0;
}
@@ -786,8 +921,8 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
sensor->thermal = thermal;
sensor->id = id;
- sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, id, sensor,
- &rockchip_of_thermal_ops);
+ sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, id,
+ sensor, &rockchip_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
error = PTR_ERR(sensor->tzd);
dev_err(&pdev->dev, "failed to register sensor %d: %d\n",
@@ -815,7 +950,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct resource *res;
int irq;
- int i, j;
+ int i;
int error;
match = of_match_node(of_rockchip_thermal_match, np);
@@ -888,7 +1023,8 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
goto err_disable_pclk;
}
- thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+ thermal->chip->initialize(thermal->grf, thermal->regs,
+ thermal->tshut_polarity);
for (i = 0; i < thermal->chip->chn_num; i++) {
error = rockchip_thermal_register_sensor(pdev, thermal,
@@ -898,9 +1034,6 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"failed to register sensor[%d] : error = %d\n",
i, error);
- for (j = 0; j < i; j++)
- thermal_zone_of_sensor_unregister(&pdev->dev,
- thermal->sensors[j].tzd);
goto err_disable_pclk;
}
}
@@ -912,7 +1045,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
if (error) {
dev_err(&pdev->dev,
"failed to request tsadc irq: %d\n", error);
- goto err_unregister_sensor;
+ goto err_disable_pclk;
}
thermal->chip->control(thermal->regs, true);
@@ -924,11 +1057,6 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
return 0;
-err_unregister_sensor:
- while (i--)
- thermal_zone_of_sensor_unregister(&pdev->dev,
- thermal->sensors[i].tzd);
-
err_disable_pclk:
clk_disable_unprepare(thermal->pclk);
err_disable_clk:
@@ -946,7 +1074,6 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
rockchip_thermal_toggle_sensor(sensor, false);
- thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
}
thermal->chip->control(thermal->regs, false);
@@ -988,12 +1115,15 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
return error;
error = clk_enable(thermal->pclk);
- if (error)
+ if (error) {
+ clk_disable(thermal->clk);
return error;
+ }
rockchip_thermal_reset_controller(thermal->reset);
- thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+ thermal->chip->initialize(thermal->grf, thermal->regs,
+ thermal->tshut_polarity);
for (i = 0; i < thermal->chip->chn_num; i++) {
int id = thermal->sensors[i].id;
diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c
new file mode 100644
index 000000000..70e0d9f40
--- /dev/null
+++ b/drivers/thermal/tango_thermal.c
@@ -0,0 +1,109 @@
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/*
+ * According to a data sheet draft, "this temperature sensor uses a bandgap
+ * type of circuit to compare a voltage which has a negative temperature
+ * coefficient with a voltage that is proportional to absolute temperature.
+ * A resistor bank allows 41 different temperature thresholds to be selected
+ * and the logic output will then indicate whether the actual die temperature
+ * lies above or below the selected threshold."
+ */
+
+#define TEMPSI_CMD 0
+#define TEMPSI_RES 4
+#define TEMPSI_CFG 8
+
+#define CMD_OFF 0
+#define CMD_ON 1
+#define CMD_READ 2
+
+#define IDX_MIN 15
+#define IDX_MAX 40
+
+struct tango_thermal_priv {
+ void __iomem *base;
+ int thresh_idx;
+};
+
+static bool temp_above_thresh(void __iomem *base, int thresh_idx)
+{
+ writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD);
+ usleep_range(10, 20);
+ writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD);
+
+ return readl(base + TEMPSI_RES);
+}
+
+static int tango_get_temp(void *arg, int *res)
+{
+ struct tango_thermal_priv *priv = arg;
+ int idx = priv->thresh_idx;
+
+ if (temp_above_thresh(priv->base, idx)) {
+ /* Search upward by incrementing thresh_idx */
+ while (idx < IDX_MAX && temp_above_thresh(priv->base, ++idx))
+ cpu_relax();
+ idx = idx - 1; /* always return lower bound */
+ } else {
+ /* Search downward by decrementing thresh_idx */
+ while (idx > IDX_MIN && !temp_above_thresh(priv->base, --idx))
+ cpu_relax();
+ }
+
+ *res = (idx * 9 / 2 - 38) * 1000; /* millidegrees Celsius */
+ priv->thresh_idx = idx;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops ops = {
+ .get_temp = tango_get_temp,
+};
+
+static int tango_thermal_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tango_thermal_priv *priv;
+ struct thermal_zone_device *tzdev;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->thresh_idx = IDX_MIN;
+ writel(0, priv->base + TEMPSI_CFG);
+ writel(CMD_ON, priv->base + TEMPSI_CMD);
+
+ tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &ops);
+ return PTR_ERR_OR_ZERO(tzdev);
+}
+
+static const struct of_device_id tango_sensor_ids[] = {
+ {
+ .compatible = "sigma,smp8758-thermal",
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver tango_thermal_driver = {
+ .probe = tango_thermal_probe,
+ .driver = {
+ .name = "tango-thermal",
+ .of_match_table = tango_sensor_ids,
+ },
+};
+
+module_platform_driver(tango_thermal_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sigma Designs");
+MODULE_DESCRIPTION("Tango temperature sensor");
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
new file mode 100644
index 000000000..cec586ec7
--- /dev/null
+++ b/drivers/thermal/tegra/Kconfig
@@ -0,0 +1,13 @@
+menu "NVIDIA Tegra thermal drivers"
+depends on ARCH_TEGRA
+
+config TEGRA_SOCTHERM
+ tristate "Tegra SOCTHERM thermal management"
+ help
+ Enable this option for integrated thermal management support on NVIDIA
+ Tegra systems-on-chip. The driver supports four thermal zones
+ (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal
+ zones to manage temperatures. This option is also required for the
+ emergency thermal reset (thermtrip) feature to function.
+
+endmenu
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
new file mode 100644
index 000000000..1ce1af2cf
--- /dev/null
+++ b/drivers/thermal/tegra/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
+
+tegra-soctherm-y := soctherm.o soctherm-fuse.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra132-soctherm.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm-fuse.c b/drivers/thermal/tegra/soctherm-fuse.c
new file mode 100644
index 000000000..29963180c
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm-fuse.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <soc/tegra/fuse.h>
+
+#include "soctherm.h"
+
+#define NOMINAL_CALIB_FT 105
+#define NOMINAL_CALIB_CP 25
+
+#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13)
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13
+
+#define FUSE_TSENSOR_COMMON 0x180
+
+/*
+ * Tegra210: Layout of bits in FUSE_TSENSOR_COMMON:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | BASE_FT | BASE_CP | SHFT_FT | SHIFT_CP |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Tegra12x, etc:
+ * In chips prior to Tegra210, this fuse was incorrectly sized as 26 bits,
+ * and didn't hold SHIFT_CP in [31:26]. Therefore these missing six bits
+ * were obtained via the FUSE_SPARE_REALIGNMENT_REG register [5:0].
+ *
+ * FUSE_TSENSOR_COMMON:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |-----------| SHFT_FT | BASE_FT | BASE_CP |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * FUSE_SPARE_REALIGNMENT_REG:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |---------------------------------------------------| SHIFT_CP |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+#define CALIB_COEFFICIENT 1000000LL
+
+/**
+ * div64_s64_precise() - wrapper for div64_s64()
+ * @a: the dividend
+ * @b: the divisor
+ *
+ * Implements division with fairly accurate rounding instead of truncation by
+ * shifting the dividend to the left by 16 so that the quotient has a
+ * much higher precision.
+ *
+ * Return: the quotient of a / b.
+ */
+static s64 div64_s64_precise(s64 a, s32 b)
+{
+ s64 r, al;
+
+ /* Scale up for increased precision division */
+ al = a << 16;
+
+ r = div64_s64(al * 2 + 1, 2 * b);
+ return r >> 16;
+}
+
+int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
+ struct tsensor_shared_calib *shared)
+{
+ u32 val;
+ s32 shifted_cp, shifted_ft;
+ int err;
+
+ err = tegra_fuse_readl(FUSE_TSENSOR_COMMON, &val);
+ if (err)
+ return err;
+
+ shared->base_cp = (val & tfuse->fuse_base_cp_mask) >>
+ tfuse->fuse_base_cp_shift;
+ shared->base_ft = (val & tfuse->fuse_base_ft_mask) >>
+ tfuse->fuse_base_ft_shift;
+
+ shifted_ft = (val & tfuse->fuse_shift_ft_mask) >>
+ tfuse->fuse_shift_ft_shift;
+ shifted_ft = sign_extend32(shifted_ft, 4);
+
+ if (tfuse->fuse_spare_realignment) {
+ err = tegra_fuse_readl(tfuse->fuse_spare_realignment, &val);
+ if (err)
+ return err;
+ }
+
+ shifted_cp = sign_extend32(val, 5);
+
+ shared->actual_temp_cp = 2 * NOMINAL_CALIB_CP + shifted_cp;
+ shared->actual_temp_ft = 2 * NOMINAL_CALIB_FT + shifted_ft;
+
+ return 0;
+}
+
+int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor,
+ const struct tsensor_shared_calib *shared,
+ u32 *calibration)
+{
+ const struct tegra_tsensor_group *sensor_group;
+ u32 val, calib;
+ s32 actual_tsensor_ft, actual_tsensor_cp;
+ s32 delta_sens, delta_temp;
+ s32 mult, div;
+ s16 therma, thermb;
+ s64 temp;
+ int err;
+
+ sensor_group = sensor->group;
+
+ err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
+ if (err)
+ return err;
+
+ actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
+ val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK) >>
+ FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
+ actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
+
+ delta_sens = actual_tsensor_ft - actual_tsensor_cp;
+ delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
+
+ mult = sensor_group->pdiv * sensor->config->tsample_ate;
+ div = sensor->config->tsample * sensor_group->pdiv_ate;
+
+ temp = (s64)delta_temp * (1LL << 13) * mult;
+ therma = div64_s64_precise(temp, (s64)delta_sens * div);
+
+ temp = ((s64)actual_tsensor_ft * shared->actual_temp_cp) -
+ ((s64)actual_tsensor_cp * shared->actual_temp_ft);
+ thermb = div64_s64_precise(temp, delta_sens);
+
+ temp = (s64)therma * sensor->fuse_corr_alpha;
+ therma = div64_s64_precise(temp, CALIB_COEFFICIENT);
+
+ temp = (s64)thermb * sensor->fuse_corr_alpha + sensor->fuse_corr_beta;
+ thermb = div64_s64_precise(temp, CALIB_COEFFICIENT);
+
+ calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
+ ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
+
+ *calibration = calib;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Wei Ni <wni@nvidia.com>");
+MODULE_DESCRIPTION("Tegra SOCTHERM fuse management");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
new file mode 100644
index 000000000..b86517262
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm.c
@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define SENSOR_CONFIG0 0
+#define SENSOR_CONFIG0_STOP BIT(0)
+#define SENSOR_CONFIG0_CPTR_OVER BIT(2)
+#define SENSOR_CONFIG0_OVER BIT(3)
+#define SENSOR_CONFIG0_TCALC_OVER BIT(4)
+#define SENSOR_CONFIG0_TALL_MASK (0xfffff << 8)
+#define SENSOR_CONFIG0_TALL_SHIFT 8
+
+#define SENSOR_CONFIG1 4
+#define SENSOR_CONFIG1_TSAMPLE_MASK 0x3ff
+#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0
+#define SENSOR_CONFIG1_TIDDQ_EN_MASK (0x3f << 15)
+#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15
+#define SENSOR_CONFIG1_TEN_COUNT_MASK (0x3f << 24)
+#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24
+#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31)
+
+/*
+ * SENSOR_CONFIG2 is defined in soctherm.h
+ * because, it will be used by tegra_soctherm_fuse.c
+ */
+
+#define SENSOR_STATUS0 0xc
+#define SENSOR_STATUS0_VALID_MASK BIT(31)
+#define SENSOR_STATUS0_CAPTURE_MASK 0xffff
+
+#define SENSOR_STATUS1 0x10
+#define SENSOR_STATUS1_TEMP_VALID_MASK BIT(31)
+#define SENSOR_STATUS1_TEMP_MASK 0xffff
+
+#define READBACK_VALUE_MASK 0xff00
+#define READBACK_VALUE_SHIFT 8
+#define READBACK_ADD_HALF BIT(7)
+#define READBACK_NEGATE BIT(0)
+
+/* get val from register(r) mask bits(m) */
+#define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1))
+/* set val(v) to mask bits(m) of register(r) */
+#define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \
+ (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1)))
+
+static const int min_low_temp = -127000;
+static const int max_high_temp = 127000;
+
+struct tegra_thermctl_zone {
+ void __iomem *reg;
+ struct device *dev;
+ struct thermal_zone_device *tz;
+ const struct tegra_tsensor_group *sg;
+};
+
+struct tegra_soctherm {
+ struct reset_control *reset;
+ struct clk *clock_tsensor;
+ struct clk *clock_soctherm;
+ void __iomem *regs;
+ struct thermal_zone_device **thermctl_tzs;
+
+ u32 *calib;
+ struct tegra_soctherm_soc *soc;
+
+ struct dentry *debugfs_dir;
+};
+
+static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i)
+{
+ const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i];
+ void __iomem *base = tegra->regs + sensor->base;
+ unsigned int val;
+
+ val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
+ writel(val, base + SENSOR_CONFIG0);
+
+ val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
+ val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
+ val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
+ val |= SENSOR_CONFIG1_TEMP_ENABLE;
+ writel(val, base + SENSOR_CONFIG1);
+
+ writel(tegra->calib[i], base + SENSOR_CONFIG2);
+}
+
+/*
+ * Translate from soctherm readback format to millicelsius.
+ * The soctherm readback format in bits is as follows:
+ * TTTTTTTT H______N
+ * where T's contain the temperature in Celsius,
+ * H denotes an addition of 0.5 Celsius and N denotes negation
+ * of the final value.
+ */
+static int translate_temp(u16 val)
+{
+ int t;
+
+ t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
+ if (val & READBACK_ADD_HALF)
+ t += 500;
+ if (val & READBACK_NEGATE)
+ t *= -1;
+
+ return t;
+}
+
+static int tegra_thermctl_get_temp(void *data, int *out_temp)
+{
+ struct tegra_thermctl_zone *zone = data;
+ u32 val;
+
+ val = readl(zone->reg);
+ val = REG_GET_MASK(val, zone->sg->sensor_temp_mask);
+ *out_temp = translate_temp(val);
+
+ return 0;
+}
+
+static int
+thermtrip_program(struct device *dev, const struct tegra_tsensor_group *sg,
+ int trip_temp);
+
+static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
+{
+ struct tegra_thermctl_zone *zone = data;
+ struct thermal_zone_device *tz = zone->tz;
+ const struct tegra_tsensor_group *sg = zone->sg;
+ struct device *dev = zone->dev;
+ enum thermal_trip_type type;
+ int ret;
+
+ if (!tz)
+ return -EINVAL;
+
+ ret = tz->ops->get_trip_type(tz, trip, &type);
+ if (ret)
+ return ret;
+
+ if (type != THERMAL_TRIP_CRITICAL)
+ return 0;
+
+ return thermtrip_program(dev, sg, temp);
+}
+
+static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
+ .get_temp = tegra_thermctl_get_temp,
+ .set_trip_temp = tegra_thermctl_set_trip_temp,
+};
+
+/**
+ * enforce_temp_range() - check and enforce temperature range [min, max]
+ * @trip_temp: the trip temperature to check
+ *
+ * Checks and enforces the permitted temperature range that SOC_THERM
+ * HW can support This is
+ * done while taking care of precision.
+ *
+ * Return: The precision adjusted capped temperature in millicelsius.
+ */
+static int enforce_temp_range(struct device *dev, int trip_temp)
+{
+ int temp;
+
+ temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
+ if (temp != trip_temp)
+ dev_info(dev, "soctherm: trip temperature %d forced to %d\n",
+ trip_temp, temp);
+ return temp;
+}
+
+/**
+ * thermtrip_program() - Configures the hardware to shut down the
+ * system if a given sensor group reaches a given temperature
+ * @dev: ptr to the struct device for the SOC_THERM IP block
+ * @sg: pointer to the sensor group to set the thermtrip temperature for
+ * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
+ *
+ * Sets the thermal trip threshold of the given sensor group to be the
+ * @trip_temp. If this threshold is crossed, the hardware will shut
+ * down.
+ *
+ * Note that, although @trip_temp is specified in millicelsius, the
+ * hardware is programmed in degrees Celsius.
+ *
+ * Return: 0 upon success, or %-EINVAL upon failure.
+ */
+static int thermtrip_program(struct device *dev,
+ const struct tegra_tsensor_group *sg,
+ int trip_temp)
+{
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ int temp;
+ u32 r;
+
+ if (!sg || !sg->thermtrip_threshold_mask)
+ return -EINVAL;
+
+ temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
+
+ r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
+ r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp);
+ r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1);
+ r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0);
+ writel(r, ts->regs + THERMCTL_THERMTRIP_CTL);
+
+ return 0;
+}
+
+/**
+ * tegra_soctherm_set_hwtrips() - set HW trip point from DT data
+ * @dev: struct device * of the SOC_THERM instance
+ *
+ * Configure the SOC_THERM HW trip points, setting "THERMTRIP"
+ * trip points , using "critical" type trip_temp from thermal
+ * zone.
+ * After they have been configured, THERMTRIP will take action
+ * when the configured SoC thermal sensor group reaches a
+ * certain temperature.
+ *
+ * Return: 0 upon success, or a negative error code on failure.
+ * "Success" does not mean that trips was enabled; it could also
+ * mean that no node was found in DT.
+ * THERMTRIP has been enabled successfully when a message similar to
+ * this one appears on the serial console:
+ * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC"
+ */
+static int tegra_soctherm_set_hwtrips(struct device *dev,
+ const struct tegra_tsensor_group *sg,
+ struct thermal_zone_device *tz)
+{
+ int temperature;
+ int ret;
+
+ ret = tz->ops->get_crit_temp(tz, &temperature);
+ if (ret) {
+ dev_warn(dev, "thermtrip: %s: missing critical temperature\n",
+ sg->name);
+ return ret;
+ }
+
+ ret = thermtrip_program(dev, sg, temperature);
+ if (ret) {
+ dev_err(dev, "thermtrip: %s: error during enable\n",
+ sg->name);
+ return ret;
+ }
+
+ dev_info(dev,
+ "thermtrip: will shut down when %s reaches %d mC\n",
+ sg->name, temperature);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int regs_show(struct seq_file *s, void *data)
+{
+ struct platform_device *pdev = s->private;
+ struct tegra_soctherm *ts = platform_get_drvdata(pdev);
+ const struct tegra_tsensor *tsensors = ts->soc->tsensors;
+ const struct tegra_tsensor_group **ttgs = ts->soc->ttgs;
+ u32 r, state;
+ int i;
+
+ seq_puts(s, "-----TSENSE (convert HW)-----\n");
+
+ for (i = 0; i < ts->soc->num_tsensors; i++) {
+ r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1);
+ state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE);
+
+ seq_printf(s, "%s: ", tsensors[i].name);
+ seq_printf(s, "En(%d) ", state);
+
+ if (!state) {
+ seq_puts(s, "\n");
+ continue;
+ }
+
+ state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK);
+ seq_printf(s, "tiddq(%d) ", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK);
+ seq_printf(s, "ten_count(%d) ", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK);
+ seq_printf(s, "tsample(%d) ", state + 1);
+
+ r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1);
+ state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK);
+ seq_printf(s, "Temp(%d/", state);
+ state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK);
+ seq_printf(s, "%d) ", translate_temp(state));
+
+ r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0);
+ state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK);
+ seq_printf(s, "Capture(%d/", state);
+ state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK);
+ seq_printf(s, "%d) ", state);
+
+ r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP);
+ seq_printf(s, "Stop(%d) ", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK);
+ seq_printf(s, "Tall(%d) ", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER);
+ seq_printf(s, "Over(%d/", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER);
+ seq_printf(s, "%d/", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER);
+ seq_printf(s, "%d) ", state);
+
+ r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2);
+ state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK);
+ seq_printf(s, "Therm_A/B(%d/", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK);
+ seq_printf(s, "%d)\n", (s16)state);
+ }
+
+ r = readl(ts->regs + SENSOR_PDIV);
+ seq_printf(s, "PDIV: 0x%x\n", r);
+
+ r = readl(ts->regs + SENSOR_HOTSPOT_OFF);
+ seq_printf(s, "HOTSPOT: 0x%x\n", r);
+
+ seq_puts(s, "\n");
+ seq_puts(s, "-----SOC_THERM-----\n");
+
+ r = readl(ts->regs + SENSOR_TEMP1);
+ state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK);
+ seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state));
+ state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK);
+ seq_printf(s, " GPU(%d) ", translate_temp(state));
+ r = readl(ts->regs + SENSOR_TEMP2);
+ state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK);
+ seq_printf(s, " PLLX(%d) ", translate_temp(state));
+ state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK);
+ seq_printf(s, " MEM(%d)\n", translate_temp(state));
+
+ r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
+ state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask);
+ seq_printf(s, "Thermtrip Any En(%d)\n", state);
+ for (i = 0; i < ts->soc->num_ttgs; i++) {
+ state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask);
+ seq_printf(s, " %s En(%d) ", ttgs[i]->name, state);
+ state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask);
+ state *= ts->soc->thresh_grain;
+ seq_printf(s, "Thresh(%d)\n", state);
+ }
+
+ return 0;
+}
+
+static int regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, regs_show, inode->i_private);
+}
+
+static const struct file_operations regs_fops = {
+ .open = regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void soctherm_debug_init(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ struct dentry *root, *file;
+
+ root = debugfs_create_dir("soctherm", NULL);
+ if (!root) {
+ dev_err(&pdev->dev, "failed to create debugfs directory\n");
+ return;
+ }
+
+ tegra->debugfs_dir = root;
+
+ file = debugfs_create_file("reg_contents", 0644, root,
+ pdev, &regs_fops);
+ if (!file) {
+ dev_err(&pdev->dev, "failed to create debugfs file\n");
+ debugfs_remove_recursive(tegra->debugfs_dir);
+ tegra->debugfs_dir = NULL;
+ }
+}
+#else
+static inline void soctherm_debug_init(struct platform_device *pdev) {}
+#endif
+
+static int soctherm_clk_enable(struct platform_device *pdev, bool enable)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ int err;
+
+ if (!tegra->clock_soctherm || !tegra->clock_tsensor)
+ return -EINVAL;
+
+ reset_control_assert(tegra->reset);
+
+ if (enable) {
+ err = clk_prepare_enable(tegra->clock_soctherm);
+ if (err) {
+ reset_control_deassert(tegra->reset);
+ return err;
+ }
+
+ err = clk_prepare_enable(tegra->clock_tsensor);
+ if (err) {
+ clk_disable_unprepare(tegra->clock_soctherm);
+ reset_control_deassert(tegra->reset);
+ return err;
+ }
+ } else {
+ clk_disable_unprepare(tegra->clock_tsensor);
+ clk_disable_unprepare(tegra->clock_soctherm);
+ }
+
+ reset_control_deassert(tegra->reset);
+
+ return 0;
+}
+
+static void soctherm_init(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs;
+ int i;
+ u32 pdiv, hotspot;
+
+ /* Initialize raw sensors */
+ for (i = 0; i < tegra->soc->num_tsensors; ++i)
+ enable_tsensor(tegra, i);
+
+ /* program pdiv and hotspot offsets per THERM */
+ pdiv = readl(tegra->regs + SENSOR_PDIV);
+ hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF);
+ for (i = 0; i < tegra->soc->num_ttgs; ++i) {
+ pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask,
+ ttgs[i]->pdiv);
+ /* hotspot offset from PLLX, doesn't need to configure PLLX */
+ if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX)
+ continue;
+ hotspot = REG_SET_MASK(hotspot,
+ ttgs[i]->pllx_hotspot_mask,
+ ttgs[i]->pllx_hotspot_diff);
+ }
+ writel(pdiv, tegra->regs + SENSOR_PDIV);
+ writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF);
+}
+
+static const struct of_device_id tegra_soctherm_of_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ {
+ .compatible = "nvidia,tegra124-soctherm",
+ .data = &tegra124_soctherm,
+ },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+ {
+ .compatible = "nvidia,tegra132-soctherm",
+ .data = &tegra132_soctherm,
+ },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ {
+ .compatible = "nvidia,tegra210-soctherm",
+ .data = &tegra210_soctherm,
+ },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
+
+static int tegra_soctherm_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct tegra_soctherm *tegra;
+ struct thermal_zone_device *z;
+ struct tsensor_shared_calib shared_calib;
+ struct resource *res;
+ struct tegra_soctherm_soc *soc;
+ unsigned int i;
+ int err;
+
+ match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node);
+ if (!match)
+ return -ENODEV;
+
+ soc = (struct tegra_soctherm_soc *)match->data;
+ if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM)
+ return -EINVAL;
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, tegra);
+
+ tegra->soc = soc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->regs))
+ return PTR_ERR(tegra->regs);
+
+ tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
+ if (IS_ERR(tegra->reset)) {
+ dev_err(&pdev->dev, "can't get soctherm reset\n");
+ return PTR_ERR(tegra->reset);
+ }
+
+ tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
+ if (IS_ERR(tegra->clock_tsensor)) {
+ dev_err(&pdev->dev, "can't get tsensor clock\n");
+ return PTR_ERR(tegra->clock_tsensor);
+ }
+
+ tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
+ if (IS_ERR(tegra->clock_soctherm)) {
+ dev_err(&pdev->dev, "can't get soctherm clock\n");
+ return PTR_ERR(tegra->clock_soctherm);
+ }
+
+ tegra->calib = devm_kzalloc(&pdev->dev,
+ sizeof(u32) * soc->num_tsensors,
+ GFP_KERNEL);
+ if (!tegra->calib)
+ return -ENOMEM;
+
+ /* calculate shared calibration data */
+ err = tegra_calc_shared_calib(soc->tfuse, &shared_calib);
+ if (err)
+ return err;
+
+ /* calculate tsensor calibaration data */
+ for (i = 0; i < soc->num_tsensors; ++i) {
+ err = tegra_calc_tsensor_calib(&soc->tsensors[i],
+ &shared_calib,
+ &tegra->calib[i]);
+ if (err)
+ return err;
+ }
+
+ tegra->thermctl_tzs = devm_kzalloc(&pdev->dev,
+ sizeof(*z) * soc->num_ttgs,
+ GFP_KERNEL);
+ if (!tegra->thermctl_tzs)
+ return -ENOMEM;
+
+ err = soctherm_clk_enable(pdev, true);
+ if (err)
+ return err;
+
+ soctherm_init(pdev);
+
+ for (i = 0; i < soc->num_ttgs; ++i) {
+ struct tegra_thermctl_zone *zone =
+ devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
+ if (!zone) {
+ err = -ENOMEM;
+ goto disable_clocks;
+ }
+
+ zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
+ zone->dev = &pdev->dev;
+ zone->sg = soc->ttgs[i];
+
+ z = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ soc->ttgs[i]->id, zone,
+ &tegra_of_thermal_ops);
+ if (IS_ERR(z)) {
+ err = PTR_ERR(z);
+ dev_err(&pdev->dev, "failed to register sensor: %d\n",
+ err);
+ goto disable_clocks;
+ }
+
+ zone->tz = z;
+ tegra->thermctl_tzs[soc->ttgs[i]->id] = z;
+
+ /* Configure hw trip points */
+ tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
+ }
+
+ soctherm_debug_init(pdev);
+
+ return 0;
+
+disable_clocks:
+ soctherm_clk_enable(pdev, false);
+
+ return err;
+}
+
+static int tegra_soctherm_remove(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(tegra->debugfs_dir);
+
+ soctherm_clk_enable(pdev, false);
+
+ return 0;
+}
+
+static int __maybe_unused soctherm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ soctherm_clk_enable(pdev, false);
+
+ return 0;
+}
+
+static int __maybe_unused soctherm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ struct tegra_soctherm_soc *soc = tegra->soc;
+ int err, i;
+
+ err = soctherm_clk_enable(pdev, true);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Resume failed: enable clocks failed\n");
+ return err;
+ }
+
+ soctherm_init(pdev);
+
+ for (i = 0; i < soc->num_ttgs; ++i) {
+ struct thermal_zone_device *tz;
+
+ tz = tegra->thermctl_tzs[soc->ttgs[i]->id];
+ tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume);
+
+static struct platform_driver tegra_soctherm_driver = {
+ .probe = tegra_soctherm_probe,
+ .remove = tegra_soctherm_remove,
+ .driver = {
+ .name = "tegra_soctherm",
+ .pm = &tegra_soctherm_pm,
+ .of_match_table = tegra_soctherm_of_match,
+ },
+};
+module_platform_driver(tegra_soctherm_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
new file mode 100644
index 000000000..28e18ec4b
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
+#define __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
+
+#define SENSOR_CONFIG2 8
+#define SENSOR_CONFIG2_THERMA_MASK (0xffff << 16)
+#define SENSOR_CONFIG2_THERMA_SHIFT 16
+#define SENSOR_CONFIG2_THERMB_MASK 0xffff
+#define SENSOR_CONFIG2_THERMB_SHIFT 0
+
+#define THERMCTL_THERMTRIP_CTL 0x80
+/* BITs are defined in device file */
+
+#define SENSOR_PDIV 0x1c0
+#define SENSOR_PDIV_CPU_MASK (0xf << 12)
+#define SENSOR_PDIV_GPU_MASK (0xf << 8)
+#define SENSOR_PDIV_MEM_MASK (0xf << 4)
+#define SENSOR_PDIV_PLLX_MASK (0xf << 0)
+
+#define SENSOR_HOTSPOT_OFF 0x1c4
+#define SENSOR_HOTSPOT_CPU_MASK (0xff << 16)
+#define SENSOR_HOTSPOT_GPU_MASK (0xff << 8)
+#define SENSOR_HOTSPOT_MEM_MASK (0xff << 0)
+
+#define SENSOR_TEMP1 0x1c8
+#define SENSOR_TEMP1_CPU_TEMP_MASK (0xffff << 16)
+#define SENSOR_TEMP1_GPU_TEMP_MASK 0xffff
+#define SENSOR_TEMP2 0x1cc
+#define SENSOR_TEMP2_MEM_TEMP_MASK (0xffff << 16)
+#define SENSOR_TEMP2_PLLX_TEMP_MASK 0xffff
+
+/**
+ * struct tegra_tsensor_group - SOC_THERM sensor group data
+ * @name: short name of the temperature sensor group
+ * @id: numeric ID of the temperature sensor group
+ * @sensor_temp_offset: offset of the SENSOR_TEMP* register
+ * @sensor_temp_mask: bit mask for this sensor group in SENSOR_TEMP* register
+ * @pdiv: the sensor count post-divider to use during runtime
+ * @pdiv_ate: the sensor count post-divider used during automated test
+ * @pdiv_mask: register bitfield mask for the PDIV field for this sensor
+ * @pllx_hotspot_diff: hotspot offset from the PLLX sensor, must be 0 for
+ PLLX sensor group
+ * @pllx_hotspot_mask: register bitfield mask for the HOTSPOT field
+ */
+struct tegra_tsensor_group {
+ const char *name;
+ u8 id;
+ u16 sensor_temp_offset;
+ u32 sensor_temp_mask;
+ u32 pdiv, pdiv_ate, pdiv_mask;
+ u32 pllx_hotspot_diff, pllx_hotspot_mask;
+ u32 thermtrip_enable_mask;
+ u32 thermtrip_any_en_mask;
+ u32 thermtrip_threshold_mask;
+};
+
+struct tegra_tsensor_configuration {
+ u32 tall, tiddq_en, ten_count, pdiv, pdiv_ate, tsample, tsample_ate;
+};
+
+struct tegra_tsensor {
+ const char *name;
+ const u32 base;
+ const struct tegra_tsensor_configuration *config;
+ const u32 calib_fuse_offset;
+ /*
+ * Correction values used to modify values read from
+ * calibration fuses
+ */
+ const s32 fuse_corr_alpha, fuse_corr_beta;
+ const struct tegra_tsensor_group *group;
+};
+
+struct tegra_soctherm_fuse {
+ u32 fuse_base_cp_mask, fuse_base_cp_shift;
+ u32 fuse_base_ft_mask, fuse_base_ft_shift;
+ u32 fuse_shift_ft_mask, fuse_shift_ft_shift;
+ u32 fuse_spare_realignment;
+};
+
+struct tsensor_shared_calib {
+ u32 base_cp, base_ft;
+ u32 actual_temp_cp, actual_temp_ft;
+};
+
+struct tegra_soctherm_soc {
+ const struct tegra_tsensor *tsensors;
+ const unsigned int num_tsensors;
+ const struct tegra_tsensor_group **ttgs;
+ const unsigned int num_ttgs;
+ const struct tegra_soctherm_fuse *tfuse;
+ const int thresh_grain;
+};
+
+int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
+ struct tsensor_shared_calib *shared);
+int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor,
+ const struct tsensor_shared_calib *shared,
+ u32 *calib);
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+extern const struct tegra_soctherm_soc tegra124_soctherm;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+extern const struct tegra_soctherm_soc tegra132_soctherm;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_soctherm_soc tegra210_soctherm;
+#endif
+
+#endif
+
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
new file mode 100644
index 000000000..beb9d36b9
--- /dev/null
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA124_THERMTRIP_ANY_EN_MASK (0x1 << 28)
+#define TEGRA124_THERMTRIP_MEM_EN_MASK (0x1 << 27)
+#define TEGRA124_THERMTRIP_GPU_EN_MASK (0x1 << 26)
+#define TEGRA124_THERMTRIP_CPU_EN_MASK (0x1 << 25)
+#define TEGRA124_THERMTRIP_TSENSE_EN_MASK (0x1 << 24)
+#define TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16)
+#define TEGRA124_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
+#define TEGRA124_THERMTRIP_TSENSE_THRESH_MASK 0xff
+
+#define TEGRA124_THRESH_GRAIN 1000
+
+static const struct tegra_tsensor_configuration tegra124_tsensor_config = {
+ .tall = 16300,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .tsample = 120,
+ .tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_CPU,
+ .name = "cpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_CPU_MASK,
+ .pllx_hotspot_diff = 10,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+ .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_GPU,
+ .name = "gpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_GPU_MASK,
+ .pllx_hotspot_diff = 5,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+ .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
+ .id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+ .name = "pll",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+ .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
+ .id = TEGRA124_SOCTHERM_SENSOR_MEM,
+ .name = "mem",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_MEM_MASK,
+ .pllx_hotspot_diff = 0,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+ .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra124_tsensor_groups[] = {
+ &tegra124_tsensor_group_cpu,
+ &tegra124_tsensor_group_gpu,
+ &tegra124_tsensor_group_pll,
+ &tegra124_tsensor_group_mem,
+};
+
+static const struct tegra_tsensor tegra124_tsensors[] = {
+ {
+ .name = "cpu0",
+ .base = 0xc0,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1135400,
+ .fuse_corr_beta = -6266900,
+ .group = &tegra124_tsensor_group_cpu,
+ }, {
+ .name = "cpu1",
+ .base = 0xe0,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1122220,
+ .fuse_corr_beta = -5700700,
+ .group = &tegra124_tsensor_group_cpu,
+ }, {
+ .name = "cpu2",
+ .base = 0x100,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1127000,
+ .fuse_corr_beta = -6768200,
+ .group = &tegra124_tsensor_group_cpu,
+ }, {
+ .name = "cpu3",
+ .base = 0x120,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1110900,
+ .fuse_corr_beta = -6232000,
+ .group = &tegra124_tsensor_group_cpu,
+ }, {
+ .name = "mem0",
+ .base = 0x140,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1122300,
+ .fuse_corr_beta = -5936400,
+ .group = &tegra124_tsensor_group_mem,
+ }, {
+ .name = "mem1",
+ .base = 0x160,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1145700,
+ .fuse_corr_beta = -7124600,
+ .group = &tegra124_tsensor_group_mem,
+ }, {
+ .name = "gpu",
+ .base = 0x180,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1120100,
+ .fuse_corr_beta = -6000500,
+ .group = &tegra124_tsensor_group_gpu,
+ }, {
+ .name = "pllx",
+ .base = 0x1a0,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1106500,
+ .fuse_corr_beta = -6729300,
+ .group = &tegra124_tsensor_group_pll,
+ },
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra124_soctherm_fuse = {
+ .fuse_base_cp_mask = 0x3ff,
+ .fuse_base_cp_shift = 0,
+ .fuse_base_ft_mask = 0x7ff << 10,
+ .fuse_base_ft_shift = 10,
+ .fuse_shift_ft_mask = 0x1f << 21,
+ .fuse_shift_ft_shift = 21,
+ .fuse_spare_realignment = 0x1fc,
+};
+
+const struct tegra_soctherm_soc tegra124_soctherm = {
+ .tsensors = tegra124_tsensors,
+ .num_tsensors = ARRAY_SIZE(tegra124_tsensors),
+ .ttgs = tegra124_tsensor_groups,
+ .num_ttgs = ARRAY_SIZE(tegra124_tsensor_groups),
+ .tfuse = &tegra124_soctherm_fuse,
+ .thresh_grain = TEGRA124_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
new file mode 100644
index 000000000..e2aa84e1b
--- /dev/null
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA132_THERMTRIP_ANY_EN_MASK (0x1 << 28)
+#define TEGRA132_THERMTRIP_MEM_EN_MASK (0x1 << 27)
+#define TEGRA132_THERMTRIP_GPU_EN_MASK (0x1 << 26)
+#define TEGRA132_THERMTRIP_CPU_EN_MASK (0x1 << 25)
+#define TEGRA132_THERMTRIP_TSENSE_EN_MASK (0x1 << 24)
+#define TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16)
+#define TEGRA132_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
+#define TEGRA132_THERMTRIP_TSENSE_THRESH_MASK 0xff
+
+#define TEGRA132_THRESH_GRAIN 1000
+
+static const struct tegra_tsensor_configuration tegra132_tsensor_config = {
+ .tall = 16300,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .tsample = 120,
+ .tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_CPU,
+ .name = "cpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_CPU_MASK,
+ .pllx_hotspot_diff = 10,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+ .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_GPU,
+ .name = "gpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_GPU_MASK,
+ .pllx_hotspot_diff = 5,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+ .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
+ .id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+ .name = "pll",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+ .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
+ .id = TEGRA124_SOCTHERM_SENSOR_MEM,
+ .name = "mem",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_MEM_MASK,
+ .pllx_hotspot_diff = 0,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+ .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra132_tsensor_groups[] = {
+ &tegra132_tsensor_group_cpu,
+ &tegra132_tsensor_group_gpu,
+ &tegra132_tsensor_group_pll,
+ &tegra132_tsensor_group_mem,
+};
+
+static struct tegra_tsensor tegra132_tsensors[] = {
+ {
+ .name = "cpu0",
+ .base = 0xc0,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1126600,
+ .fuse_corr_beta = -9433500,
+ .group = &tegra132_tsensor_group_cpu,
+ }, {
+ .name = "cpu1",
+ .base = 0xe0,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1110800,
+ .fuse_corr_beta = -7383000,
+ .group = &tegra132_tsensor_group_cpu,
+ }, {
+ .name = "cpu2",
+ .base = 0x100,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1113800,
+ .fuse_corr_beta = -6215200,
+ .group = &tegra132_tsensor_group_cpu,
+ }, {
+ .name = "cpu3",
+ .base = 0x120,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1129600,
+ .fuse_corr_beta = -8196100,
+ .group = &tegra132_tsensor_group_cpu,
+ }, {
+ .name = "mem0",
+ .base = 0x140,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1132900,
+ .fuse_corr_beta = -6755300,
+ .group = &tegra132_tsensor_group_mem,
+ }, {
+ .name = "mem1",
+ .base = 0x160,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1142300,
+ .fuse_corr_beta = -7374200,
+ .group = &tegra132_tsensor_group_mem,
+ }, {
+ .name = "gpu",
+ .base = 0x180,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1125100,
+ .fuse_corr_beta = -6350400,
+ .group = &tegra132_tsensor_group_gpu,
+ }, {
+ .name = "pllx",
+ .base = 0x1a0,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1118100,
+ .fuse_corr_beta = -8208800,
+ .group = &tegra132_tsensor_group_pll,
+ },
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra132_soctherm_fuse = {
+ .fuse_base_cp_mask = 0x3ff,
+ .fuse_base_cp_shift = 0,
+ .fuse_base_ft_mask = 0x7ff << 10,
+ .fuse_base_ft_shift = 10,
+ .fuse_shift_ft_mask = 0x1f << 21,
+ .fuse_shift_ft_shift = 21,
+ .fuse_spare_realignment = 0x1fc,
+};
+
+const struct tegra_soctherm_soc tegra132_soctherm = {
+ .tsensors = tegra132_tsensors,
+ .num_tsensors = ARRAY_SIZE(tegra132_tsensors),
+ .ttgs = tegra132_tsensor_groups,
+ .num_ttgs = ARRAY_SIZE(tegra132_tsensor_groups),
+ .tfuse = &tegra132_soctherm_fuse,
+ .thresh_grain = TEGRA132_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
new file mode 100644
index 000000000..19cc0ab66
--- /dev/null
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <soc/tegra/fuse.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA210_THERMTRIP_ANY_EN_MASK (0x1 << 31)
+#define TEGRA210_THERMTRIP_MEM_EN_MASK (0x1 << 30)
+#define TEGRA210_THERMTRIP_GPU_EN_MASK (0x1 << 29)
+#define TEGRA210_THERMTRIP_CPU_EN_MASK (0x1 << 28)
+#define TEGRA210_THERMTRIP_TSENSE_EN_MASK (0x1 << 27)
+#define TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK (0x1ff << 18)
+#define TEGRA210_THERMTRIP_CPU_THRESH_MASK (0x1ff << 9)
+#define TEGRA210_THERMTRIP_TSENSE_THRESH_MASK 0x1ff
+
+#define TEGRA210_THRESH_GRAIN 500
+
+static const struct tegra_tsensor_configuration tegra210_tsensor_config = {
+ .tall = 16300,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .tsample = 120,
+ .tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_CPU,
+ .name = "cpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_CPU_MASK,
+ .pllx_hotspot_diff = 10,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+ .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_GPU,
+ .name = "gpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_GPU_MASK,
+ .pllx_hotspot_diff = 5,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+ .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
+ .id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+ .name = "pll",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+ .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
+ .id = TEGRA124_SOCTHERM_SENSOR_MEM,
+ .name = "mem",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_MEM_MASK,
+ .pllx_hotspot_diff = 0,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+ .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra210_tsensor_groups[] = {
+ &tegra210_tsensor_group_cpu,
+ &tegra210_tsensor_group_gpu,
+ &tegra210_tsensor_group_pll,
+ &tegra210_tsensor_group_mem,
+};
+
+static const struct tegra_tsensor tegra210_tsensors[] = {
+ {
+ .name = "cpu0",
+ .base = 0xc0,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1085000,
+ .fuse_corr_beta = 3244200,
+ .group = &tegra210_tsensor_group_cpu,
+ }, {
+ .name = "cpu1",
+ .base = 0xe0,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1126200,
+ .fuse_corr_beta = -67500,
+ .group = &tegra210_tsensor_group_cpu,
+ }, {
+ .name = "cpu2",
+ .base = 0x100,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1098400,
+ .fuse_corr_beta = 2251100,
+ .group = &tegra210_tsensor_group_cpu,
+ }, {
+ .name = "cpu3",
+ .base = 0x120,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1108000,
+ .fuse_corr_beta = 602700,
+ .group = &tegra210_tsensor_group_cpu,
+ }, {
+ .name = "mem0",
+ .base = 0x140,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1069200,
+ .fuse_corr_beta = 3549900,
+ .group = &tegra210_tsensor_group_mem,
+ }, {
+ .name = "mem1",
+ .base = 0x160,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1173700,
+ .fuse_corr_beta = -6263600,
+ .group = &tegra210_tsensor_group_mem,
+ }, {
+ .name = "gpu",
+ .base = 0x180,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1074300,
+ .fuse_corr_beta = 2734900,
+ .group = &tegra210_tsensor_group_gpu,
+ }, {
+ .name = "pllx",
+ .base = 0x1a0,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1039700,
+ .fuse_corr_beta = 6829100,
+ .group = &tegra210_tsensor_group_pll,
+ },
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = {
+ .fuse_base_cp_mask = 0x3ff << 11,
+ .fuse_base_cp_shift = 11,
+ .fuse_base_ft_mask = 0x7ff << 21,
+ .fuse_base_ft_shift = 21,
+ .fuse_shift_ft_mask = 0x1f << 6,
+ .fuse_shift_ft_shift = 6,
+ .fuse_spare_realignment = 0,
+};
+
+const struct tegra_soctherm_soc tegra210_soctherm = {
+ .tsensors = tegra210_tsensors,
+ .num_tsensors = ARRAY_SIZE(tegra210_tsensors),
+ .ttgs = tegra210_tsensor_groups,
+ .num_ttgs = ARRAY_SIZE(tegra210_tsensor_groups),
+ .tfuse = &tegra210_soctherm_fuse,
+ .thresh_grain = TEGRA210_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
deleted file mode 100644
index 136975220..000000000
--- a/drivers/thermal/tegra_soctherm.c
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * Author:
- * Mikko Perttunen <mperttunen@nvidia.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/reset.h>
-#include <linux/thermal.h>
-
-#include <soc/tegra/fuse.h>
-
-#define SENSOR_CONFIG0 0
-#define SENSOR_CONFIG0_STOP BIT(0)
-#define SENSOR_CONFIG0_TALL_SHIFT 8
-#define SENSOR_CONFIG0_TCALC_OVER BIT(4)
-#define SENSOR_CONFIG0_OVER BIT(3)
-#define SENSOR_CONFIG0_CPTR_OVER BIT(2)
-
-#define SENSOR_CONFIG1 4
-#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0
-#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15
-#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24
-#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31)
-
-#define SENSOR_CONFIG2 8
-#define SENSOR_CONFIG2_THERMA_SHIFT 16
-#define SENSOR_CONFIG2_THERMB_SHIFT 0
-
-#define SENSOR_PDIV 0x1c0
-#define SENSOR_PDIV_T124 0x8888
-#define SENSOR_HOTSPOT_OFF 0x1c4
-#define SENSOR_HOTSPOT_OFF_T124 0x00060600
-#define SENSOR_TEMP1 0x1c8
-#define SENSOR_TEMP2 0x1cc
-
-#define SENSOR_TEMP_MASK 0xffff
-#define READBACK_VALUE_MASK 0xff00
-#define READBACK_VALUE_SHIFT 8
-#define READBACK_ADD_HALF BIT(7)
-#define READBACK_NEGATE BIT(0)
-
-#define FUSE_TSENSOR8_CALIB 0x180
-#define FUSE_SPARE_REALIGNMENT_REG_0 0x1fc
-
-#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff
-#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13)
-#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13
-
-#define FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK 0x3ff
-#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK (0x7ff << 10)
-#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT 10
-
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_MASK 0x3f
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK (0x1f << 21)
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT 21
-
-#define NOMINAL_CALIB_FT_T124 105
-#define NOMINAL_CALIB_CP_T124 25
-
-struct tegra_tsensor_configuration {
- u32 tall, tsample, tiddq_en, ten_count, pdiv, tsample_ate, pdiv_ate;
-};
-
-struct tegra_tsensor {
- const struct tegra_tsensor_configuration *config;
- u32 base, calib_fuse_offset;
- /* Correction values used to modify values read from calibration fuses */
- s32 fuse_corr_alpha, fuse_corr_beta;
-};
-
-struct tegra_thermctl_zone {
- void __iomem *reg;
- unsigned int shift;
-};
-
-static const struct tegra_tsensor_configuration t124_tsensor_config = {
- .tall = 16300,
- .tsample = 120,
- .tiddq_en = 1,
- .ten_count = 1,
- .pdiv = 8,
- .tsample_ate = 480,
- .pdiv_ate = 8
-};
-
-static const struct tegra_tsensor t124_tsensors[] = {
- {
- .config = &t124_tsensor_config,
- .base = 0xc0,
- .calib_fuse_offset = 0x098,
- .fuse_corr_alpha = 1135400,
- .fuse_corr_beta = -6266900,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0xe0,
- .calib_fuse_offset = 0x084,
- .fuse_corr_alpha = 1122220,
- .fuse_corr_beta = -5700700,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x100,
- .calib_fuse_offset = 0x088,
- .fuse_corr_alpha = 1127000,
- .fuse_corr_beta = -6768200,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x120,
- .calib_fuse_offset = 0x12c,
- .fuse_corr_alpha = 1110900,
- .fuse_corr_beta = -6232000,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x140,
- .calib_fuse_offset = 0x158,
- .fuse_corr_alpha = 1122300,
- .fuse_corr_beta = -5936400,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x160,
- .calib_fuse_offset = 0x15c,
- .fuse_corr_alpha = 1145700,
- .fuse_corr_beta = -7124600,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x180,
- .calib_fuse_offset = 0x154,
- .fuse_corr_alpha = 1120100,
- .fuse_corr_beta = -6000500,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x1a0,
- .calib_fuse_offset = 0x160,
- .fuse_corr_alpha = 1106500,
- .fuse_corr_beta = -6729300,
- },
-};
-
-struct tegra_soctherm {
- struct reset_control *reset;
- struct clk *clock_tsensor;
- struct clk *clock_soctherm;
- void __iomem *regs;
-
- struct thermal_zone_device *thermctl_tzs[4];
-};
-
-struct tsensor_shared_calibration {
- u32 base_cp, base_ft;
- u32 actual_temp_cp, actual_temp_ft;
-};
-
-static int calculate_shared_calibration(struct tsensor_shared_calibration *r)
-{
- u32 val, shifted_cp, shifted_ft;
- int err;
-
- err = tegra_fuse_readl(FUSE_TSENSOR8_CALIB, &val);
- if (err)
- return err;
- r->base_cp = val & FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK;
- r->base_ft = (val & FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK)
- >> FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT;
- val = ((val & FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK)
- >> FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT);
- shifted_ft = sign_extend32(val, 4);
-
- err = tegra_fuse_readl(FUSE_SPARE_REALIGNMENT_REG_0, &val);
- if (err)
- return err;
- shifted_cp = sign_extend32(val, 5);
-
- r->actual_temp_cp = 2 * NOMINAL_CALIB_CP_T124 + shifted_cp;
- r->actual_temp_ft = 2 * NOMINAL_CALIB_FT_T124 + shifted_ft;
-
- return 0;
-}
-
-static s64 div64_s64_precise(s64 a, s64 b)
-{
- s64 r, al;
-
- /* Scale up for increased precision division */
- al = a << 16;
-
- r = div64_s64(al * 2 + 1, 2 * b);
- return r >> 16;
-}
-
-static int
-calculate_tsensor_calibration(const struct tegra_tsensor *sensor,
- const struct tsensor_shared_calibration *shared,
- u32 *calib)
-{
- u32 val;
- s32 actual_tsensor_ft, actual_tsensor_cp, delta_sens, delta_temp,
- mult, div;
- s16 therma, thermb;
- s64 tmp;
- int err;
-
- err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
- if (err)
- return err;
-
- actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
- val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK)
- >> FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
- actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
-
- delta_sens = actual_tsensor_ft - actual_tsensor_cp;
- delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
-
- mult = sensor->config->pdiv * sensor->config->tsample_ate;
- div = sensor->config->tsample * sensor->config->pdiv_ate;
-
- therma = div64_s64_precise((s64) delta_temp * (1LL << 13) * mult,
- (s64) delta_sens * div);
-
- tmp = (s64)actual_tsensor_ft * shared->actual_temp_cp -
- (s64)actual_tsensor_cp * shared->actual_temp_ft;
- thermb = div64_s64_precise(tmp, (s64)delta_sens);
-
- therma = div64_s64_precise((s64)therma * sensor->fuse_corr_alpha,
- (s64)1000000LL);
- thermb = div64_s64_precise((s64)thermb * sensor->fuse_corr_alpha +
- sensor->fuse_corr_beta, (s64)1000000LL);
-
- *calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
- ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
-
- return 0;
-}
-
-static int enable_tsensor(struct tegra_soctherm *tegra,
- const struct tegra_tsensor *sensor,
- const struct tsensor_shared_calibration *shared)
-{
- void __iomem *base = tegra->regs + sensor->base;
- unsigned int val;
- u32 calib;
- int err;
-
- err = calculate_tsensor_calibration(sensor, shared, &calib);
- if (err)
- return err;
-
- val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
- writel(val, base + SENSOR_CONFIG0);
-
- val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
- val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
- val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
- val |= SENSOR_CONFIG1_TEMP_ENABLE;
- writel(val, base + SENSOR_CONFIG1);
-
- writel(calib, base + SENSOR_CONFIG2);
-
- return 0;
-}
-
-/*
- * Translate from soctherm readback format to millicelsius.
- * The soctherm readback format in bits is as follows:
- * TTTTTTTT H______N
- * where T's contain the temperature in Celsius,
- * H denotes an addition of 0.5 Celsius and N denotes negation
- * of the final value.
- */
-static int translate_temp(u16 val)
-{
- long t;
-
- t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
- if (val & READBACK_ADD_HALF)
- t += 500;
- if (val & READBACK_NEGATE)
- t *= -1;
-
- return t;
-}
-
-static int tegra_thermctl_get_temp(void *data, int *out_temp)
-{
- struct tegra_thermctl_zone *zone = data;
- u32 val;
-
- val = (readl(zone->reg) >> zone->shift) & SENSOR_TEMP_MASK;
- *out_temp = translate_temp(val);
-
- return 0;
-}
-
-static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
- .get_temp = tegra_thermctl_get_temp,
-};
-
-static const struct of_device_id tegra_soctherm_of_match[] = {
- { .compatible = "nvidia,tegra124-soctherm" },
- { },
-};
-MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
-
-struct thermctl_zone_desc {
- unsigned int offset;
- unsigned int shift;
-};
-
-static const struct thermctl_zone_desc t124_thermctl_temp_zones[] = {
- { SENSOR_TEMP1, 16 },
- { SENSOR_TEMP2, 16 },
- { SENSOR_TEMP1, 0 },
- { SENSOR_TEMP2, 0 }
-};
-
-static int tegra_soctherm_probe(struct platform_device *pdev)
-{
- struct tegra_soctherm *tegra;
- struct thermal_zone_device *tz;
- struct tsensor_shared_calibration shared_calib;
- struct resource *res;
- unsigned int i;
- int err;
-
- const struct tegra_tsensor *tsensors = t124_tsensors;
-
- tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
- if (!tegra)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tegra->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tegra->regs))
- return PTR_ERR(tegra->regs);
-
- tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
- if (IS_ERR(tegra->reset)) {
- dev_err(&pdev->dev, "can't get soctherm reset\n");
- return PTR_ERR(tegra->reset);
- }
-
- tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
- if (IS_ERR(tegra->clock_tsensor)) {
- dev_err(&pdev->dev, "can't get tsensor clock\n");
- return PTR_ERR(tegra->clock_tsensor);
- }
-
- tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
- if (IS_ERR(tegra->clock_soctherm)) {
- dev_err(&pdev->dev, "can't get soctherm clock\n");
- return PTR_ERR(tegra->clock_soctherm);
- }
-
- reset_control_assert(tegra->reset);
-
- err = clk_prepare_enable(tegra->clock_soctherm);
- if (err)
- return err;
-
- err = clk_prepare_enable(tegra->clock_tsensor);
- if (err) {
- clk_disable_unprepare(tegra->clock_soctherm);
- return err;
- }
-
- reset_control_deassert(tegra->reset);
-
- /* Initialize raw sensors */
-
- err = calculate_shared_calibration(&shared_calib);
- if (err)
- goto disable_clocks;
-
- for (i = 0; i < ARRAY_SIZE(t124_tsensors); ++i) {
- err = enable_tsensor(tegra, tsensors + i, &shared_calib);
- if (err)
- goto disable_clocks;
- }
-
- writel(SENSOR_PDIV_T124, tegra->regs + SENSOR_PDIV);
- writel(SENSOR_HOTSPOT_OFF_T124, tegra->regs + SENSOR_HOTSPOT_OFF);
-
- /* Initialize thermctl sensors */
-
- for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
- struct tegra_thermctl_zone *zone =
- devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
- if (!zone) {
- err = -ENOMEM;
- goto unregister_tzs;
- }
-
- zone->reg = tegra->regs + t124_thermctl_temp_zones[i].offset;
- zone->shift = t124_thermctl_temp_zones[i].shift;
-
- tz = thermal_zone_of_sensor_register(&pdev->dev, i, zone,
- &tegra_of_thermal_ops);
- if (IS_ERR(tz)) {
- err = PTR_ERR(tz);
- dev_err(&pdev->dev, "failed to register sensor: %d\n",
- err);
- goto unregister_tzs;
- }
-
- tegra->thermctl_tzs[i] = tz;
- }
-
- return 0;
-
-unregister_tzs:
- while (i--)
- thermal_zone_of_sensor_unregister(&pdev->dev,
- tegra->thermctl_tzs[i]);
-
-disable_clocks:
- clk_disable_unprepare(tegra->clock_tsensor);
- clk_disable_unprepare(tegra->clock_soctherm);
-
- return err;
-}
-
-static int tegra_soctherm_remove(struct platform_device *pdev)
-{
- struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
- thermal_zone_of_sensor_unregister(&pdev->dev,
- tegra->thermctl_tzs[i]);
- }
-
- clk_disable_unprepare(tegra->clock_tsensor);
- clk_disable_unprepare(tegra->clock_soctherm);
-
- return 0;
-}
-
-static struct platform_driver tegra_soctherm_driver = {
- .probe = tegra_soctherm_probe,
- .remove = tegra_soctherm_remove,
- .driver = {
- .name = "tegra-soctherm",
- .of_match_table = tegra_soctherm_of_match,
- },
-};
-module_platform_driver(tegra_soctherm_driver);
-
-MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
-MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
new file mode 100644
index 000000000..73f55d6a1
--- /dev/null
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -0,0 +1,182 @@
+/*
+ * Generic ADC thermal driver
+ *
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/iio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+struct gadc_thermal_info {
+ struct device *dev;
+ struct thermal_zone_device *tz_dev;
+ struct iio_channel *channel;
+ s32 *lookup_table;
+ int nlookup_table;
+};
+
+static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
+{
+ int temp, adc_hi, adc_lo;
+ int i;
+
+ for (i = 0; i < gti->nlookup_table; i++) {
+ if (val >= gti->lookup_table[2 * i + 1])
+ break;
+ }
+
+ if (i == 0) {
+ temp = gti->lookup_table[0];
+ } else if (i >= (gti->nlookup_table - 1)) {
+ temp = gti->lookup_table[2 * (gti->nlookup_table - 1)];
+ } else {
+ adc_hi = gti->lookup_table[2 * i - 1];
+ adc_lo = gti->lookup_table[2 * i + 1];
+ temp = gti->lookup_table[2 * i];
+ temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo);
+ }
+
+ return temp;
+}
+
+static int gadc_thermal_get_temp(void *data, int *temp)
+{
+ struct gadc_thermal_info *gti = data;
+ int val;
+ int ret;
+
+ ret = iio_read_channel_processed(gti->channel, &val);
+ if (ret < 0) {
+ dev_err(gti->dev, "IIO channel read failed %d\n", ret);
+ return ret;
+ }
+ *temp = gadc_thermal_adc_to_temp(gti, val);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops gadc_thermal_ops = {
+ .get_temp = gadc_thermal_get_temp,
+};
+
+static int gadc_thermal_read_linear_lookup_table(struct device *dev,
+ struct gadc_thermal_info *gti)
+{
+ struct device_node *np = dev->of_node;
+ int ntable;
+ int ret;
+
+ ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
+ sizeof(u32));
+ if (ntable < 0) {
+ dev_err(dev, "Lookup table is not provided\n");
+ return ntable;
+ }
+
+ if (ntable % 2) {
+ dev_err(dev, "Pair of temperature vs ADC read value missing\n");
+ return -EINVAL;
+ }
+
+ gti->lookup_table = devm_kzalloc(dev, sizeof(*gti->lookup_table) *
+ ntable, GFP_KERNEL);
+ if (!gti->lookup_table)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, "temperature-lookup-table",
+ (u32 *)gti->lookup_table, ntable);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read temperature lookup table: %d\n",
+ ret);
+ return ret;
+ }
+
+ gti->nlookup_table = ntable / 2;
+
+ return 0;
+}
+
+static int gadc_thermal_probe(struct platform_device *pdev)
+{
+ struct gadc_thermal_info *gti;
+ int ret;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "Only DT based supported\n");
+ return -ENODEV;
+ }
+
+ gti = devm_kzalloc(&pdev->dev, sizeof(*gti), GFP_KERNEL);
+ if (!gti)
+ return -ENOMEM;
+
+ ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
+ if (ret < 0)
+ return ret;
+
+ gti->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gti);
+
+ gti->channel = iio_channel_get(&pdev->dev, "sensor-channel");
+ if (IS_ERR(gti->channel)) {
+ ret = PTR_ERR(gti->channel);
+ dev_err(&pdev->dev, "IIO channel not found: %d\n", ret);
+ return ret;
+ }
+
+ gti->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0,
+ gti, &gadc_thermal_ops);
+ if (IS_ERR(gti->tz_dev)) {
+ ret = PTR_ERR(gti->tz_dev);
+ dev_err(&pdev->dev, "Thermal zone sensor register failed: %d\n",
+ ret);
+ goto sensor_fail;
+ }
+
+ return 0;
+
+sensor_fail:
+ iio_channel_release(gti->channel);
+
+ return ret;
+}
+
+static int gadc_thermal_remove(struct platform_device *pdev)
+{
+ struct gadc_thermal_info *gti = platform_get_drvdata(pdev);
+
+ thermal_zone_of_sensor_unregister(&pdev->dev, gti->tz_dev);
+ iio_channel_release(gti->channel);
+
+ return 0;
+}
+
+static const struct of_device_id of_adc_thermal_match[] = {
+ { .compatible = "generic-adc-thermal", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_adc_thermal_match);
+
+static struct platform_driver gadc_thermal_driver = {
+ .driver = {
+ .name = "generic-adc-thermal",
+ .of_match_table = of_adc_thermal_match,
+ },
+ .probe = gadc_thermal_probe,
+ .remove = gadc_thermal_remove,
+};
+
+module_platform_driver(gadc_thermal_driver);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("Generic ADC thermal driver using IIO framework with DT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index b213a1222..15c0a9ac2 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -337,7 +337,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
return -EINVAL;
/* in case this is specified by DT */
- data->ti_thermal = thermal_zone_of_sensor_register(bgp->dev, id,
+ data->ti_thermal = devm_thermal_zone_of_sensor_register(bgp->dev, id,
data, &ti_of_thermal_ops);
if (IS_ERR(data->ti_thermal)) {
/* Create thermal zone */
@@ -368,9 +368,6 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id)
if (data && data->ti_thermal) {
if (data->our_zone)
thermal_zone_device_unregister(data->ti_thermal);
- else
- thermal_zone_of_sensor_unregister(bgp->dev,
- data->ti_thermal);
}
return 0;
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 7fc919f7d..97f0a2bd9 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -555,7 +555,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long) hcpu;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
get_core_online(cpu);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 799634b38..1146ff421 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -249,7 +249,7 @@ static void tb_cfg_print_error(struct tb_ctl *ctl,
* cfg_read/cfg_write.
*/
tb_ctl_WARN(ctl,
- "CFG_ERROR(%llx:%x): Invalid config space of offset\n",
+ "CFG_ERROR(%llx:%x): Invalid config space or offset\n",
res->response_route, res->response_port);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 545c60c82..2b9602c2c 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -221,7 +221,7 @@ struct tb_drom_entry_port {
u8 micro1:4;
u8 micro3;
- /* BYTES 5-6, TODO: verify (find hardware that has these set) */
+ /* BYTES 6-7, TODO: verify (find hardware that has these set) */
u8 peer_port_rid:4;
u8 unknown3:3;
bool has_peer_port:1;
@@ -388,6 +388,11 @@ int tb_drom_read(struct tb_switch *sw)
sw->ports[4].link_nr = 1;
sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].dual_link_port = &sw->ports[3];
+
+ /* Port 5 is inaccessible on this gen 1 controller */
+ if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
+ sw->ports[5].disabled = true;
+
return 0;
}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 20a41f7de..9c15344b6 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -37,7 +37,8 @@ static int ring_interrupt_index(struct tb_ring *ring)
*/
static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
- int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32;
+ int reg = REG_RING_INTERRUPT_BASE +
+ ring_interrupt_index(ring) / 32 * 4;
int bit = ring_interrupt_index(ring) & 31;
int mask = 1 << bit;
u32 old, new;
@@ -564,7 +565,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* cannot fail - table is allocated bin pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
- if (nhi->hop_count != 12)
+ if (nhi->hop_count != 12 && nhi->hop_count != 32)
dev_warn(&pdev->dev, "unexpected hop count: %d\n",
nhi->hop_count);
INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
@@ -633,16 +634,24 @@ static const struct dev_pm_ops nhi_pm_ops = {
static struct pci_device_id nhi_ids[] = {
/*
* We have to specify class, the TB bridges use the same device and
- * vendor (sub)id.
+ * vendor (sub)id on gen 1 and gen 2 controllers.
*/
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
.subvendor = 0x2222, .subdevice = 0x1111,
},
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+ .subvendor = 0x2222, .subdevice = 0x1111,
+ },
+ {
+ .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
},
{ 0,}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index aeb982969..1e116f53d 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -293,9 +293,9 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
if (active) {
data = data & 0xFFFFFF83;
switch (sw->config.device_id) {
- case 0x1513:
- case 0x151a:
- case 0x1549:
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+ case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
break;
default:
data |= 4;
@@ -350,7 +350,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
return NULL;
sw->tb = tb;
- if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5))
+ if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
goto err;
tb_info(tb,
"initializing Switch at %#llx (depth: %d, up port: %d)\n",
@@ -370,7 +370,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route)
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
sw->config.vendor_id);
- if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549)
+ if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
+ sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
+ sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE)
tb_sw_warn(sw, "unsupported switch device id %#x\n",
sw->config.device_id);
@@ -425,9 +427,9 @@ err:
}
/**
- * tb_sw_set_unpplugged() - set is_unplugged on switch and downstream switches
+ * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
*/
-void tb_sw_set_unpplugged(struct tb_switch *sw)
+void tb_sw_set_unplugged(struct tb_switch *sw)
{
int i;
if (sw == sw->tb->root_switch) {
@@ -441,7 +443,7 @@ void tb_sw_set_unpplugged(struct tb_switch *sw)
sw->is_unplugged = true;
for (i = 0; i <= sw->config.max_port_number; i++) {
if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote)
- tb_sw_set_unpplugged(sw->ports[i].remote->sw);
+ tb_sw_set_unplugged(sw->ports[i].remote->sw);
}
}
@@ -483,7 +485,7 @@ int tb_switch_resume(struct tb_switch *sw)
|| tb_switch_resume(port->remote->sw)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
- tb_sw_set_unpplugged(port->remote->sw);
+ tb_sw_set_unplugged(port->remote->sw);
}
}
return 0;
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index d2c3fe346..24b6d30c3 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -246,7 +246,7 @@ static void tb_handle_hotplug(struct work_struct *work)
if (ev->unplug) {
if (port->remote) {
tb_port_info(port, "unplugged\n");
- tb_sw_set_unpplugged(port->remote->sw);
+ tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
tb_switch_free(port->remote->sw);
port->remote = NULL;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 8b0d7cf2b..61d57ba64 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -226,7 +226,7 @@ void tb_switch_free(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb *tb, u64 route);
-void tb_sw_set_unpplugged(struct tb_switch *sw);
+void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6577af75d..1e2a4a804 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -30,7 +30,7 @@ enum tb_cap {
TB_CAP_I2C = 0x0005,
TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */
TB_CAP_TIME2 = 0x0305,
- TB_CAL_IECS = 0x0405,
+ TB_CAP_IECS = 0x0405,
TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */
};
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 13e17cd39..c5af5f064 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -133,17 +133,6 @@ config UNIX98_PTYS
All modern Linux systems use the Unix98 ptys. Say Y unless
you're on an embedded system and want to conserve memory.
-config DEVPTS_MULTIPLE_INSTANCES
- bool "Support multiple instances of devpts"
- depends on UNIX98_PTYS
- default n
- ---help---
- Enable support for multiple instances of devpts filesystem.
- If you want to have isolated PTY namespaces (eg: in containers),
- say Y here. Otherwise, say N. If enabled, each mount of devpts
- filesystem with the '-o newinstance' option will create an
- independent PTY namespace.
-
config LEGACY_PTYS
bool "Legacy (BSD) PTY support"
default y
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index eacf4c9f3..208f57349 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -398,7 +398,7 @@ static void check_modem_status(struct serial_state *info)
wake_up_interruptible(&port->delta_msr_wait);
}
- if ((port->flags & ASYNC_CHECK_CD) && (dstatus & SER_DCD)) {
+ if (tty_port_check_carrier(port) && (dstatus & SER_DCD)) {
#if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
printk("ttyS%d CD now %s...", info->line,
(!(status & SER_DCD)) ? "on" : "off");
@@ -525,7 +525,7 @@ static int startup(struct tty_struct *tty, struct serial_state *info)
local_irq_save(flags);
- if (port->flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(port)) {
free_page(page);
goto errout;
}
@@ -586,7 +586,7 @@ static int startup(struct tty_struct *tty, struct serial_state *info)
*/
change_speed(tty, info, NULL);
- port->flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(port, 1);
local_irq_restore(flags);
return 0;
@@ -604,7 +604,7 @@ static void shutdown(struct tty_struct *tty, struct serial_state *info)
unsigned long flags;
struct serial_state *state;
- if (!(info->tport.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->tport))
return;
state = info;
@@ -645,7 +645,7 @@ static void shutdown(struct tty_struct *tty, struct serial_state *info)
set_bit(TTY_IO_ERROR, &tty->flags);
- info->tport.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->tport, 0);
local_irq_restore(flags);
}
@@ -727,17 +727,12 @@ static void change_speed(struct tty_struct *tty, struct serial_state *info,
info->IER &= ~UART_IER_MSI;
if (port->flags & ASYNC_HARDPPS_CD)
info->IER |= UART_IER_MSI;
- if (cflag & CRTSCTS) {
- port->flags |= ASYNC_CTS_FLOW;
+ tty_port_set_cts_flow(port, cflag & CRTSCTS);
+ if (cflag & CRTSCTS)
info->IER |= UART_IER_MSI;
- } else
- port->flags &= ~ASYNC_CTS_FLOW;
- if (cflag & CLOCAL)
- port->flags &= ~ASYNC_CHECK_CD;
- else {
- port->flags |= ASYNC_CHECK_CD;
+ tty_port_set_check_carrier(port, ~cflag & CLOCAL);
+ if (~cflag & CLOCAL)
info->IER |= UART_IER_MSI;
- }
/* TBD:
* Does clearing IER_MSI imply that we should disable the VBL interrupt ?
*/
@@ -1089,7 +1084,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
- if (port->flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(port)) {
if (change_spd) {
if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
tty->alt_speed = 57600;
@@ -1143,7 +1138,7 @@ static int rs_tiocmget(struct tty_struct *tty)
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
control = info->MCR;
@@ -1165,7 +1160,7 @@ static int rs_tiocmset(struct tty_struct *tty, unsigned int set,
if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
return -ENODEV;
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
local_irq_save(flags);
@@ -1250,7 +1245,7 @@ static int rs_ioctl(struct tty_struct *tty,
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) &&
(cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
}
@@ -1342,7 +1337,7 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
info->MCR |= SER_DTR;
- if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (!C_CRTSCTS(tty) || !tty_throttled(tty))
info->MCR |= SER_RTS;
local_irq_save(flags);
rtsdtr_ctrl(info->MCR);
@@ -1395,7 +1390,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
* line status register.
*/
state->read_status_mask &= ~UART_LSR_DR;
- if (port->flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(port)) {
/* disable receive interrupts */
custom.intena = IF_RBF;
mb();
@@ -1495,7 +1490,7 @@ static void rs_hangup(struct tty_struct *tty)
rs_flush_buffer(tty);
shutdown(tty, info);
info->tport.count = 0;
- info->tport.flags &= ~ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(&info->tport, 0);
info->tport.tty = NULL;
wake_up_interruptible(&info->tport.open_wait);
}
@@ -1543,7 +1538,7 @@ static inline void line_info(struct seq_file *m, int line,
local_irq_save(flags);
status = ciab.pra;
- control = (state->tport.flags & ASYNC_INITIALIZED) ? state->MCR : status;
+ control = tty_port_initialized(&state->tport) ? state->MCR : status;
local_irq_restore(flags);
stat_buf[0] = 0;
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 6ab8ccfac..c67a0baab 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -714,7 +714,7 @@ static void cyy_chip_modem(struct cyclades_card *cinfo, int chip,
wake_up_interruptible(&info->port.delta_msr_wait);
}
- if ((mdm_change & CyDCD) && (info->port.flags & ASYNC_CHECK_CD)) {
+ if ((mdm_change & CyDCD) && tty_port_check_carrier(&info->port)) {
if (mdm_status & CyDCD)
wake_up_interruptible(&info->port.open_wait);
else
@@ -1119,7 +1119,7 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
case C_CM_MDCD:
info->icount.dcd++;
delta_count++;
- if (info->port.flags & ASYNC_CHECK_CD) {
+ if (tty_port_check_carrier(&info->port)) {
u32 dcd = fw_ver > 241 ? param :
readl(&info->u.cyz.ch_ctrl->rs_status);
if (dcd & C_RS_DCD)
@@ -1279,7 +1279,7 @@ static int cy_startup(struct cyclades_port *info, struct tty_struct *tty)
spin_lock_irqsave(&card->card_lock, flags);
- if (info->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&info->port))
goto errout;
if (!info->type) {
@@ -1364,7 +1364,7 @@ static int cy_startup(struct cyclades_port *info, struct tty_struct *tty)
/* enable send, recv, modem !!! */
}
- info->port.flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 1);
clear_bit(TTY_IO_ERROR, &tty->flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
@@ -1424,7 +1424,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
struct cyclades_card *card;
unsigned long flags;
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
return;
card = info->card;
@@ -1448,7 +1448,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
some later date (after testing)!!! */
set_bit(TTY_IO_ERROR, &tty->flags);
- info->port.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 0);
spin_unlock_irqrestore(&card->card_lock, flags);
} else {
#ifdef CY_DEBUG_OPEN
@@ -1473,7 +1473,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
tty_port_lower_dtr_rts(&info->port);
set_bit(TTY_IO_ERROR, &tty->flags);
- info->port.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 0);
spin_unlock_irqrestore(&card->card_lock, flags);
}
@@ -1711,7 +1711,7 @@ static void cy_do_close(struct tty_port *port)
/* Stop accepting input */
cyy_writeb(info, CyCAR, channel & 0x03);
cyy_writeb(info, CySRER, cyy_readb(info, CySRER) & ~CyRxData);
- if (info->port.flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(&info->port)) {
/* Waiting for on-board buffers to be empty before
closing the port */
spin_unlock_irqrestore(&card->card_lock, flags);
@@ -2083,17 +2083,12 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
info->cor1 |= CyPARITY_NONE;
/* CTS flow control flag */
- if (cflag & CRTSCTS) {
- info->port.flags |= ASYNC_CTS_FLOW;
+ tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
+ if (cflag & CRTSCTS)
info->cor2 |= CyCtsAE;
- } else {
- info->port.flags &= ~ASYNC_CTS_FLOW;
- info->cor2 &= ~CyCtsAE;
- }
- if (cflag & CLOCAL)
- info->port.flags &= ~ASYNC_CHECK_CD;
else
- info->port.flags |= ASYNC_CHECK_CD;
+ info->cor2 &= ~CyCtsAE;
+ tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
/***********************************************
The hardware option, CyRtsAO, presents RTS when
@@ -2234,7 +2229,7 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
}
/* As the HW flow control is done in firmware, the driver
doesn't need to care about it */
- info->port.flags &= ~ASYNC_CTS_FLOW;
+ tty_port_set_cts_flow(&info->port, 0);
/* XON/XOFF/XANY flow control flags */
sw_flow = 0;
@@ -2252,10 +2247,7 @@ static void cy_set_line_char(struct cyclades_port *info, struct tty_struct *tty)
}
/* CD sensitivity */
- if (cflag & CLOCAL)
- info->port.flags &= ~ASYNC_CHECK_CD;
- else
- info->port.flags |= ASYNC_CHECK_CD;
+ tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
if (baud == 0) { /* baud rate is zero, turn off line */
cy_writel(&ch_ctrl->rs_control,
@@ -2342,7 +2334,7 @@ cy_set_serial_info(struct cyclades_port *info, struct tty_struct *tty,
info->port.closing_wait = new_serial.closing_wait * HZ / 100;
check_and_exit:
- if (info->port.flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(&info->port)) {
cy_set_line_char(info, tty);
ret = 0;
} else {
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index e46d62899..ce8648753 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -632,7 +632,7 @@ int hvc_poll(struct hvc_struct *hp)
goto bail;
/* Now check if we can get data (are we throttled ?) */
- if (test_bit(TTY_THROTTLED, &tty->flags))
+ if (tty_throttled(tty))
goto throttled;
/* If we aren't notifier driven and aren't throttled, we always
@@ -814,7 +814,7 @@ static int hvc_poll_get_char(struct tty_driver *driver, int line)
n = hp->ops->get_chars(hp->vtermno, &ch, 1);
- if (n == 0)
+ if (n <= 0)
return NO_POLL_CHAR;
return ch;
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 5997b1731..3c4d7c2b4 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -600,7 +600,7 @@ static int hvcs_io(struct hvcs_struct *hvcsd)
hvcs_try_write(hvcsd);
- if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!tty || tty_throttled(tty)) {
hvcsd->todo_mask &= ~(HVCS_READ_MASK);
goto bail;
} else if (!(hvcsd->todo_mask & (HVCS_READ_MASK)))
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index a75146f60..96ce6bd1c 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -509,7 +509,7 @@ static irqreturn_t hvsi_interrupt(int irq, void *arg)
}
spin_lock_irqsave(&hp->lock, flags);
- if (tty && hp->n_throttle && !test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty && hp->n_throttle && !tty_throttled(tty)) {
/* we weren't hung up and we weren't throttled, so we can
* deliver the rest now */
hvsi_send_overflow(hp);
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index ad7031a4f..df0204b61 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1572,6 +1572,11 @@ static void handle_received_SETUP_packet(struct ipw_hardware *hw,
sizeof(struct ipw_setup_reboot_msg_ack),
ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
TL_SETUP_SIGNO_REBOOT_MSG_ACK);
+ if (!packet) {
+ pr_err(IPWIRELESS_PCCARD_NAME
+ ": Not enough memory to send reboot packet");
+ break;
+ }
packet->header.length =
sizeof(struct TlSetupRebootMsgAck);
send_packet(hw, PRIO_SETUP, &packet->header);
diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c
index 374dd5134..5959ffdef 100644
--- a/drivers/tty/isicom.c
+++ b/drivers/tty/isicom.c
@@ -438,8 +438,8 @@ static void isicom_tx(unsigned long _data)
for (; count > 0; count--, port++) {
/* port not active or tx disabled to force flow control */
- if (!(port->port.flags & ASYNC_INITIALIZED) ||
- !(port->status & ISI_TXOK))
+ if (!tty_port_initialized(&port->port) ||
+ !(port->status & ISI_TXOK))
continue;
txcount = min_t(short, TX_SIZE, port->xmit_cnt);
@@ -553,7 +553,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
port = card->ports + channel;
- if (!(port->port.flags & ASYNC_INITIALIZED)) {
+ if (!tty_port_initialized(&port->port)) {
outw(0x0000, base+0x04); /* enable interrupts */
spin_unlock(&card->card_lock);
return IRQ_HANDLED;
@@ -577,7 +577,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
header = inw(base);
switch (header & 0xff) {
case 0: /* Change in EIA signals */
- if (port->port.flags & ASYNC_CHECK_CD) {
+ if (tty_port_check_carrier(&port->port)) {
if (port->status & ISI_DCD) {
if (!(header & ISI_DCD)) {
/* Carrier has been lost */
@@ -758,18 +758,13 @@ static void isicom_config_port(struct tty_struct *tty)
outw(channel_setup, base);
InterruptTheCard(base);
}
- if (C_CLOCAL(tty))
- port->port.flags &= ~ASYNC_CHECK_CD;
- else
- port->port.flags |= ASYNC_CHECK_CD;
+ tty_port_set_check_carrier(&port->port, !C_CLOCAL(tty));
/* flow control settings ...*/
flow_ctrl = 0;
- port->port.flags &= ~ASYNC_CTS_FLOW;
- if (C_CRTSCTS(tty)) {
- port->port.flags |= ASYNC_CTS_FLOW;
+ tty_port_set_cts_flow(&port->port, C_CRTSCTS(tty));
+ if (C_CRTSCTS(tty))
flow_ctrl |= ISICOM_CTSRTS;
- }
if (I_IXON(tty))
flow_ctrl |= ISICOM_RESPOND_XONXOFF;
if (I_IXOFF(tty))
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index e1952f615..feaa6edda 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -910,7 +910,7 @@ static void moxa_board_deinit(struct moxa_board_conf *brd)
/* pci hot-un-plug support */
for (a = 0; a < brd->numPorts; a++)
- if (brd->ports[a].port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&brd->ports[a].port))
tty_port_tty_hangup(&brd->ports[a].port, false);
for (a = 0; a < MAX_PORTS_PER_BOARD; a++)
@@ -919,7 +919,7 @@ static void moxa_board_deinit(struct moxa_board_conf *brd)
while (1) {
opened = 0;
for (a = 0; a < brd->numPorts; a++)
- if (brd->ports[a].port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&brd->ports[a].port))
opened++;
mutex_unlock(&moxa_openlock);
if (!opened)
@@ -1190,13 +1190,13 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = ch;
tty_port_tty_set(&ch->port, tty);
mutex_lock(&ch->port.mutex);
- if (!(ch->port.flags & ASYNC_INITIALIZED)) {
+ if (!tty_port_initialized(&ch->port)) {
ch->statusflags = 0;
moxa_set_tty_param(tty, &tty->termios);
MoxaPortLineCtrl(ch, 1, 1);
MoxaPortEnable(ch);
MoxaSetFifo(ch, ch->type == PORT_16550A);
- ch->port.flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(&ch->port, 1);
}
mutex_unlock(&ch->port.mutex);
mutex_unlock(&moxa_openlock);
@@ -1377,7 +1377,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
{
struct tty_struct *tty = tty_port_tty_get(&p->port);
void __iomem *ofsAddr;
- unsigned int inited = p->port.flags & ASYNC_INITIALIZED;
+ unsigned int inited = tty_port_initialized(&p->port);
u16 intr;
if (tty) {
@@ -1392,7 +1392,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
tty_wakeup(tty);
}
- if (inited && !test_bit(TTY_THROTTLED, &tty->flags) &&
+ if (inited && !tty_throttled(tty) &&
MoxaPortRxQueue(p) > 0) { /* RX */
MoxaPortReadData(p);
tty_schedule_flip(&p->port);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 2f12bb9f4..98d2bd167 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -711,8 +711,8 @@ static int mxser_change_speed(struct tty_struct *tty,
/* CTS flow control flag and modem status interrupts */
info->IER &= ~UART_IER_MSI;
info->MCR &= ~UART_MCR_AFE;
+ tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
if (cflag & CRTSCTS) {
- info->port.flags |= ASYNC_CTS_FLOW;
info->IER |= UART_IER_MSI;
if ((info->type == PORT_16550A) || (info->board->chip_flag)) {
info->MCR |= UART_MCR_AFE;
@@ -744,16 +744,11 @@ static int mxser_change_speed(struct tty_struct *tty,
}
}
}
- } else {
- info->port.flags &= ~ASYNC_CTS_FLOW;
}
outb(info->MCR, info->ioaddr + UART_MCR);
- if (cflag & CLOCAL) {
- info->port.flags &= ~ASYNC_CHECK_CD;
- } else {
- info->port.flags |= ASYNC_CHECK_CD;
+ tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
+ if (~cflag & CLOCAL)
info->IER |= UART_IER_MSI;
- }
outb(info->IER, info->ioaddr + UART_IER);
/*
@@ -826,7 +821,7 @@ static void mxser_check_modem_status(struct tty_struct *tty,
port->mon_data.modem_status = status;
wake_up_interruptible(&port->port.delta_msr_wait);
- if ((port->port.flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
+ if (tty_port_check_carrier(&port->port) && (status & UART_MSR_DDCD)) {
if (status & UART_MSR_DCD)
wake_up_interruptible(&port->port.open_wait);
}
@@ -1086,12 +1081,10 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
mutex_lock(&port->mutex);
mxser_close_port(port);
mxser_flush_buffer(tty);
- if (test_bit(ASYNCB_INITIALIZED, &port->flags)) {
- if (C_HUPCL(tty))
- tty_port_lower_dtr_rts(port);
- }
+ if (tty_port_initialized(port) && C_HUPCL(tty))
+ tty_port_lower_dtr_rts(port);
mxser_shutdown_port(port);
- clear_bit(ASYNCB_INITIALIZED, &port->flags);
+ tty_port_set_initialized(port, 0);
mutex_unlock(&port->mutex);
info->closing = 0;
/* Right now the tty_port set is done outside of the close_end helper
@@ -1287,7 +1280,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
process_txrx_fifo(info);
- if (test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ if (tty_port_initialized(port)) {
if (flags != (port->flags & ASYNC_SPD_MASK)) {
spin_lock_irqsave(&info->slock, sl_flags);
mxser_change_speed(tty, NULL);
@@ -1296,7 +1289,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
} else {
retval = mxser_activate(port, tty);
if (retval == 0)
- set_bit(ASYNCB_INITIALIZED, &port->flags);
+ tty_port_set_initialized(port, 1);
}
return retval;
}
@@ -1334,7 +1327,7 @@ static int mxser_tiocmget(struct tty_struct *tty)
if (tty->index == MXSER_PORTS)
return -ENOIOCTLCMD;
- if (test_bit(TTY_IO_ERROR, &tty->flags))
+ if (tty_io_error(tty))
return -EIO;
control = info->MCR;
@@ -1361,7 +1354,7 @@ static int mxser_tiocmset(struct tty_struct *tty,
if (tty->index == MXSER_PORTS)
return -ENOIOCTLCMD;
- if (test_bit(TTY_IO_ERROR, &tty->flags))
+ if (tty_io_error(tty))
return -EIO;
spin_lock_irqsave(&info->slock, flags);
@@ -1715,8 +1708,7 @@ static int mxser_ioctl(struct tty_struct *tty,
return 0;
}
- if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT &&
- test_bit(TTY_IO_ERROR, &tty->flags))
+ if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && tty_io_error(tty))
return -EIO;
switch (cmd) {
@@ -2257,7 +2249,7 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id)
iir &= MOXA_MUST_IIR_MASK;
tty = tty_port_tty_get(&port->port);
if (!tty || port->closing ||
- !(port->port.flags & ASYNC_INITIALIZED)) {
+ !tty_port_initialized(&port->port)) {
status = inb(port->ioaddr + UART_LSR);
outb(0x27, port->ioaddr + UART_FCR);
inb(port->ioaddr + UART_MSR);
@@ -2400,7 +2392,6 @@ static int mxser_initbrd(struct mxser_board *brd,
if (brd->chip_flag != MOXA_OTHER_UART)
mxser_enable_must_enchance_mode(info->ioaddr);
- info->port.flags = ASYNC_SHARE_IRQ;
info->type = brd->uart_type;
process_txrx_fifo(info);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 365dfd8bc..54cab59e2 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2664,7 +2664,7 @@ static int gsm_mux_net_start_xmit(struct sk_buff *skb,
STATS(net).tx_bytes += skb->len;
gsm_dlci_data_kick(dlci);
/* And tell the kernel when the last transmit started. */
- net->trans_start = jiffies;
+ netif_trans_update(net);
muxnet_put(mux_net);
return NETDEV_TX_OK;
}
@@ -2949,7 +2949,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
dlci->modem_rx = 0;
/* We could in theory open and close before we wait - eg if we get
a DM straight back. This is ok as that will have caused a hangup */
- set_bit(ASYNCB_INITIALIZED, &port->flags);
+ tty_port_set_initialized(port, 1);
/* Start sending off SABM messages */
gsm_dlci_begin_open(dlci);
/* And wait for virtual carrier */
@@ -2972,10 +2972,8 @@ static void gsmtty_close(struct tty_struct *tty, struct file *filp)
if (tty_port_close_start(&dlci->port, tty, filp) == 0)
return;
gsm_dlci_begin_close(dlci);
- if (test_bit(ASYNCB_INITIALIZED, &dlci->port.flags)) {
- if (C_HUPCL(tty))
- tty_port_lower_dtr_rts(&dlci->port);
- }
+ if (tty_port_initialized(&dlci->port) && C_HUPCL(tty))
+ tty_port_lower_dtr_rts(&dlci->port);
tty_port_close_end(&dlci->port, tty);
tty_port_tty_set(&dlci->port, NULL);
return;
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 5cc80b80c..d6fd0e802 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -826,7 +826,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
size = __le32_to_cpu(readl(addr));
/* DBG1( "%d bytes port: %d", size, index); */
- if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty && tty_throttled(tty)) {
DBG1("No room in tty, don't read data, don't ack interrupt, "
"disable interrupt");
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e198996c5..51e0d3288 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -44,7 +44,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
if (tty->driver->subtype == PTY_TYPE_MASTER)
WARN_ON(tty->count > 1);
else {
- if (test_bit(TTY_IO_ERROR, &tty->flags))
+ if (tty_io_error(tty))
return;
if (tty->count > 2)
return;
@@ -667,8 +667,11 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
fsi = tty->driver_data;
else
fsi = tty->link->driver_data;
- devpts_kill_index(fsi, tty->index);
- devpts_put_ref(fsi);
+
+ if (fsi) {
+ devpts_kill_index(fsi, tty->index);
+ devpts_release(fsi);
+ }
}
static const struct tty_operations ptm_unix98_ops = {
@@ -733,10 +736,11 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
return retval;
- fsi = devpts_get_ref(inode, filp);
- retval = -ENODEV;
- if (!fsi)
+ fsi = devpts_acquire(filp);
+ if (IS_ERR(fsi)) {
+ retval = PTR_ERR(fsi);
goto out_free_file;
+ }
/* find a device that is not in use. */
mutex_lock(&devpts_mutex);
@@ -745,7 +749,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
retval = index;
if (index < 0)
- goto out_put_ref;
+ goto out_put_fsi;
mutex_lock(&tty_mutex);
@@ -789,8 +793,8 @@ err_release:
return retval;
out:
devpts_kill_index(fsi, index);
-out_put_ref:
- devpts_put_ref(fsi);
+out_put_fsi:
+ devpts_release(fsi);
out_free_file:
tty_free_file(filp);
return retval;
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 0b802cdd7..b0cc47c77 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -495,7 +495,7 @@ static void rp_handle_port(struct r_port *info)
if (!info)
return;
- if ((info->port.flags & ASYNC_INITIALIZED) == 0) {
+ if (!tty_port_initialized(&info->port)) {
printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
"info->flags & NOT_INIT\n");
return;
@@ -615,7 +615,8 @@ static void rp_do_poll(unsigned long dummy)
* the board.
* Inputs: board, aiop, chan numbers
*/
-static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
+static void __init
+init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
{
unsigned rocketMode;
struct r_port *info;
@@ -920,7 +921,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
/*
* Info->count is now 1; so it's safe to sleep now.
*/
- if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ if (!tty_port_initialized(port)) {
cp = &info->channel;
sSetRxTrigger(cp, TRIG_1);
if (sGetChanStatus(cp) & CD_ACT)
@@ -944,7 +945,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
sEnRxFIFO(cp);
sEnTransmit(cp);
- set_bit(ASYNCB_INITIALIZED, &info->port.flags);
+ tty_port_set_initialized(&info->port, 1);
/*
* Set up the tty->alt_speed kludge
@@ -1042,9 +1043,10 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
}
}
spin_lock_irq(&port->lock);
- info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_NORMAL_ACTIVE);
tty->closing = 0;
spin_unlock_irq(&port->lock);
+ tty_port_set_initialized(port, 0);
+ tty_port_set_active(port, 0);
mutex_unlock(&port->mutex);
tty_port_tty_set(port, NULL);
@@ -1512,7 +1514,7 @@ static void rp_hangup(struct tty_struct *tty)
sDisCTSFlowCtl(cp);
sDisTxSoftFlowCtl(cp);
sClrTxXOFF(cp);
- clear_bit(ASYNCB_INITIALIZED, &info->port.flags);
+ tty_port_set_initialized(&info->port, 0);
wake_up_interruptible(&info->port.open_wait);
}
@@ -1624,7 +1626,7 @@ static int rp_write(struct tty_struct *tty,
/* Write remaining data into the port's xmit_buf */
while (1) {
/* Hung up ? */
- if (!test_bit(ASYNCB_NORMAL_ACTIVE, &info->port.flags))
+ if (!tty_port_active(&info->port))
goto end;
c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1);
c = min(c, XMIT_BUF_SIZE - info->xmit_head);
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 047a7ba67..215a99237 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -17,7 +17,7 @@
struct uart_8250_dma {
int (*tx_dma)(struct uart_8250_port *p);
- int (*rx_dma)(struct uart_8250_port *p, unsigned int iir);
+ int (*rx_dma)(struct uart_8250_port *p);
/* Filter function */
dma_filter_fn fn;
@@ -84,7 +84,6 @@ struct serial8250_config {
#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */
#define UART_BUG_PARITY (1 << 4) /* UART mishandles parity if FIFO enabled */
-#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
#ifdef CONFIG_SERIAL_8250_SHARE_IRQ
#define SERIAL8250_SHARE_IRQS 1
@@ -151,6 +150,12 @@ static inline int serial8250_pnp_init(void) { return 0; }
static inline void serial8250_pnp_exit(void) { }
#endif
+#ifdef CONFIG_SERIAL_8250_FINTEK
+int fintek_8250_probe(struct uart_8250_port *uart);
+#else
+static inline int fintek_8250_probe(struct uart_8250_port *uart) { return 0; }
+#endif
+
#ifdef CONFIG_ARCH_OMAP1
static inline int is_omap1_8250(struct uart_8250_port *pt)
{
@@ -190,7 +195,8 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
#ifdef CONFIG_SERIAL_8250_DMA
extern int serial8250_tx_dma(struct uart_8250_port *);
-extern int serial8250_rx_dma(struct uart_8250_port *, unsigned int iir);
+extern int serial8250_rx_dma(struct uart_8250_port *);
+extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
extern void serial8250_release_dma(struct uart_8250_port *);
#else
@@ -198,10 +204,11 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
return -1;
}
-static inline int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+static inline int serial8250_rx_dma(struct uart_8250_port *p)
{
return -1;
}
+static inline void serial8250_rx_dma_flush(struct uart_8250_port *p) { }
static inline int serial8250_request_dma(struct uart_8250_port *p)
{
return -1;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 2f4f5ee65..0fbd7c033 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -830,6 +830,7 @@ static int serial8250_probe(struct platform_device *dev)
uart.port.handle_irq = p->handle_irq;
uart.port.handle_break = p->handle_break;
uart.port.set_termios = p->set_termios;
+ uart.port.get_mctrl = p->get_mctrl;
uart.port.pm = p->pm;
uart.port.dev = &dev->dev;
uart.port.irqflags |= irqflag;
@@ -1022,6 +1023,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
/* Possibly override set_termios call */
if (up->port.set_termios)
uart->port.set_termios = up->port.set_termios;
+ if (up->port.get_mctrl)
+ uart->port.get_mctrl = up->port.get_mctrl;
if (up->port.set_mctrl)
uart->port.set_mctrl = up->port.set_mctrl;
if (up->port.startup)
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 78259d3c6..7f33d1c8d 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -110,30 +110,11 @@ err:
return ret;
}
-int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+int serial8250_rx_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
struct dma_async_tx_descriptor *desc;
- switch (iir & 0x3f) {
- case UART_IIR_RLSI:
- /* 8250_core handles errors and break interrupts */
- return -EIO;
- case UART_IIR_RX_TIMEOUT:
- /*
- * If RCVR FIFO trigger level was not reached, complete the
- * transfer and let 8250_core copy the remaining data.
- */
- if (dma->rx_running) {
- dmaengine_pause(dma->rxchan);
- __dma_rx_complete(p);
- dmaengine_terminate_all(dma->rxchan);
- }
- return -ETIMEDOUT;
- default:
- break;
- }
-
if (dma->rx_running)
return 0;
@@ -154,10 +135,23 @@ int serial8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
return 0;
}
+void serial8250_rx_dma_flush(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (dma->rx_running) {
+ dmaengine_pause(dma->rxchan);
+ __dma_rx_complete(p);
+ dmaengine_terminate_all(dma->rxchan);
+ }
+}
+
int serial8250_request_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
dma_cap_mask_t mask;
+ struct dma_slave_caps caps;
+ int ret;
/* Default slave configuration parameters */
dma->rxconf.direction = DMA_DEV_TO_MEM;
@@ -178,6 +172,16 @@ int serial8250_request_dma(struct uart_8250_port *p)
if (!dma->rxchan)
return -ENODEV;
+ /* 8250 rx dma requires dmaengine driver to support pause/terminate */
+ ret = dma_get_slave_caps(dma->rxchan, &caps);
+ if (ret)
+ goto release_rx;
+ if (!caps.cmd_pause || !caps.cmd_terminate ||
+ caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
+ ret = -EINVAL;
+ goto release_rx;
+ }
+
dmaengine_slave_config(dma->rxchan, &dma->rxconf);
/* Get a channel for TX */
@@ -185,8 +189,17 @@ int serial8250_request_dma(struct uart_8250_port *p)
dma->fn, dma->tx_param,
p->port.dev, "tx");
if (!dma->txchan) {
- dma_release_channel(dma->rxchan);
- return -ENODEV;
+ ret = -ENODEV;
+ goto release_rx;
+ }
+
+ /* 8250 tx dma requires dmaengine driver to support terminate */
+ ret = dma_get_slave_caps(dma->txchan, &caps);
+ if (ret)
+ goto err;
+ if (!caps.cmd_terminate) {
+ ret = -EINVAL;
+ goto err;
}
dmaengine_slave_config(dma->txchan, &dma->txconf);
@@ -197,8 +210,10 @@ int serial8250_request_dma(struct uart_8250_port *p)
dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
&dma->rx_addr, GFP_KERNEL);
- if (!dma->rx_buf)
+ if (!dma->rx_buf) {
+ ret = -ENOMEM;
goto err;
+ }
/* TX buffer */
dma->tx_addr = dma_map_single(dma->txchan->device->dev,
@@ -208,6 +223,7 @@ int serial8250_request_dma(struct uart_8250_port *p)
if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
dma->rx_buf, dma->rx_addr);
+ ret = -ENOMEM;
goto err;
}
@@ -215,10 +231,10 @@ int serial8250_request_dma(struct uart_8250_port *p)
return 0;
err:
- dma_release_channel(dma->rxchan);
dma_release_channel(dma->txchan);
-
- return -ENOMEM;
+release_rx:
+ dma_release_channel(dma->rxchan);
+ return ret;
}
EXPORT_SYMBOL_GPL(serial8250_request_dma);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index a3fb95d85..e19969614 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -104,15 +104,16 @@ static void dw8250_check_lcr(struct uart_port *p, int value)
dw8250_force_idle(p);
#ifdef CONFIG_64BIT
- __raw_writeq(value & 0xff, offset);
-#else
+ if (p->type == PORT_OCTEON)
+ __raw_writeq(value & 0xff, offset);
+ else
+#endif
if (p->iotype == UPIO_MEM32)
writel(value, offset);
else if (p->iotype == UPIO_MEM32BE)
iowrite32be(value, offset);
else
writeb(value, offset);
-#endif
}
/*
* FIXME: this deadlocks if port->lock is already held
@@ -617,6 +618,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "8086228A", 0 },
{ "APMC0D08", 0},
{ "AMD0020", 0 },
+ { "AMDI0020", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 89474399a..870981dd9 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -1,9 +1,7 @@
/*
* Probe for F81216A LPC to 4 UART
*
- * Based on drivers/tty/serial/8250_pnp.c, by Russell King, et al
- *
- * Copyright (C) 2014 Ricardo Ribalda, Qtechnology A/S
+ * Copyright (C) 2014-2016 Ricardo Ribalda, Qtechnology A/S
*
*
* This program is free software; you can redistribute it and/or modify
@@ -38,19 +36,15 @@
#define RXW4C_IRA BIT(3)
#define TXW4C_IRA BIT(2)
-#define DRIVER_NAME "8250_fintek"
-
struct fintek_8250 {
u16 base_port;
u8 index;
u8 key;
- long line;
};
static int fintek_8250_enter_key(u16 base_port, u8 key)
{
-
- if (!request_muxed_region(base_port, 2, DRIVER_NAME))
+ if (!request_muxed_region(base_port, 2, "8250_fintek"))
return -EBUSY;
outb(key, base_port + ADDR_PORT);
@@ -138,7 +132,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
return 0;
}
-static int fintek_8250_base_port(u16 io_address, u8 *key, u8 *index)
+static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
{
static const u16 addr[] = {0x4e, 0x2e};
static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67};
@@ -168,10 +162,13 @@ static int fintek_8250_base_port(u16 io_address, u8 *key, u8 *index)
continue;
fintek_8250_exit_key(addr[i]);
- *key = keys[j];
- *index = k;
- return addr[i];
+ pdata->key = keys[j];
+ pdata->base_port = addr[i];
+ pdata->index = k;
+
+ return 0;
}
+
fintek_8250_exit_key(addr[i]);
}
}
@@ -179,104 +176,21 @@ static int fintek_8250_base_port(u16 io_address, u8 *key, u8 *index)
return -ENODEV;
}
-static int
-fintek_8250_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
+int fintek_8250_probe(struct uart_8250_port *uart)
{
- struct uart_8250_port uart;
struct fintek_8250 *pdata;
- int base_port;
- u8 key;
- u8 index;
-
- if (!pnp_port_valid(dev, 0))
- return -ENODEV;
+ struct fintek_8250 probe_data;
- base_port = fintek_8250_base_port(pnp_port_start(dev, 0), &key, &index);
- if (base_port < 0)
+ if (find_base_port(&probe_data, uart->port.iobase))
return -ENODEV;
- memset(&uart, 0, sizeof(uart));
-
- pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(uart->port.dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- uart.port.private_data = pdata;
- if (!pnp_irq_valid(dev, 0))
- return -ENODEV;
- uart.port.irq = pnp_irq(dev, 0);
- uart.port.iobase = pnp_port_start(dev, 0);
- uart.port.iotype = UPIO_PORT;
- uart.port.rs485_config = fintek_8250_rs485_config;
-
- uart.port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
- if (pnp_irq_flags(dev, 0) & IORESOURCE_IRQ_SHAREABLE)
- uart.port.flags |= UPF_SHARE_IRQ;
- uart.port.uartclk = 1843200;
- uart.port.dev = &dev->dev;
-
- pdata->key = key;
- pdata->base_port = base_port;
- pdata->index = index;
- pdata->line = serial8250_register_8250_port(&uart);
- if (pdata->line < 0)
- return -ENODEV;
+ memcpy(pdata, &probe_data, sizeof(probe_data));
+ uart->port.rs485_config = fintek_8250_rs485_config;
+ uart->port.private_data = pdata;
- pnp_set_drvdata(dev, pdata);
return 0;
}
-
-static void fintek_8250_remove(struct pnp_dev *dev)
-{
- struct fintek_8250 *pdata = pnp_get_drvdata(dev);
-
- if (pdata)
- serial8250_unregister_port(pdata->line);
-}
-
-#ifdef CONFIG_PM
-static int fintek_8250_suspend(struct pnp_dev *dev, pm_message_t state)
-{
- struct fintek_8250 *pdata = pnp_get_drvdata(dev);
-
- if (!pdata)
- return -ENODEV;
- serial8250_suspend_port(pdata->line);
- return 0;
-}
-
-static int fintek_8250_resume(struct pnp_dev *dev)
-{
- struct fintek_8250 *pdata = pnp_get_drvdata(dev);
-
- if (!pdata)
- return -ENODEV;
- serial8250_resume_port(pdata->line);
- return 0;
-}
-#else
-#define fintek_8250_suspend NULL
-#define fintek_8250_resume NULL
-#endif /* CONFIG_PM */
-
-static const struct pnp_device_id fintek_dev_table[] = {
- /* Qtechnology Panel PC / IO1000 */
- { "PNP0501"},
- {}
-};
-
-MODULE_DEVICE_TABLE(pnp, fintek_dev_table);
-
-static struct pnp_driver fintek_8250_driver = {
- .name = DRIVER_NAME,
- .probe = fintek_8250_probe,
- .remove = fintek_8250_remove,
- .suspend = fintek_8250_suspend,
- .resume = fintek_8250_resume,
- .id_table = fintek_dev_table,
-};
-
-module_pnp_driver(fintek_8250_driver);
-MODULE_DESCRIPTION("Fintek F812164 module");
-MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_mid.c b/drivers/tty/serial/8250/8250_mid.c
index ed489880e..86379a79a 100644
--- a/drivers/tty/serial/8250/8250_mid.c
+++ b/drivers/tty/serial/8250/8250_mid.c
@@ -9,9 +9,10 @@
* published by the Free Software Foundation.
*/
-#include <linux/rational.h>
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/rational.h>
#include <linux/dma/hsu.h>
#include <linux/8250_pci.h>
@@ -79,7 +80,11 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
struct pci_dev *pdev = to_pci_dev(p->dev);
int index = PCI_FUNC(pdev->devfn);
- /* Currently no support for HSU port0 */
+ /*
+ * Device 0000:00:04.0 is not a real HSU port. It provides a global
+ * register set for all HSU ports, although it has the same PCI ID.
+ * Skip it here.
+ */
if (index-- == 0)
return -ENODEV;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index c7ed3d2bc..38963d7bc 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -29,7 +29,7 @@ struct of_serial_info {
};
#ifdef CONFIG_ARCH_TEGRA
-void tegra_serial_handle_break(struct uart_port *p)
+static void tegra_serial_handle_break(struct uart_port *p)
{
unsigned int status, tmout = 10000;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 6f760510e..2c44c792d 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -115,6 +115,12 @@ struct omap8250_priv {
bool rx_dma_broken;
};
+#ifdef CONFIG_SERIAL_8250_DMA
+static void omap_8250_rx_dma_flush(struct uart_8250_port *p);
+#else
+static inline void omap_8250_rx_dma_flush(struct uart_8250_port *p) { }
+#endif
+
static u32 uart_read(struct uart_8250_port *up, u32 reg)
{
return readl(up->port.membase + (reg << up->port.regshift));
@@ -635,7 +641,7 @@ static int omap_8250_startup(struct uart_port *port)
serial_out(up, UART_OMAP_WER, priv->wer);
if (up->dma)
- up->dma->rx_dma(up, 0);
+ up->dma->rx_dma(up);
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
@@ -654,7 +660,7 @@ static void omap_8250_shutdown(struct uart_port *port)
flush_work(&priv->qos_work);
if (up->dma)
- up->dma->rx_dma(up, UART_IIR_RX_TIMEOUT);
+ omap_8250_rx_dma_flush(up);
pm_runtime_get_sync(port->dev);
@@ -742,9 +748,9 @@ static void omap_8250_unthrottle(struct uart_port *port)
}
#ifdef CONFIG_SERIAL_8250_DMA
-static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir);
+static int omap_8250_rx_dma(struct uart_8250_port *p);
-static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
+static void __dma_rx_do_complete(struct uart_8250_port *p)
{
struct omap8250_priv *priv = p->port.private_data;
struct uart_8250_dma *dma = p->dma;
@@ -754,9 +760,6 @@ static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
unsigned long flags;
int ret;
- dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
- dma->rx_size, DMA_FROM_DEVICE);
-
spin_lock_irqsave(&priv->rx_dma_lock, flags);
if (!dma->rx_running)
@@ -764,7 +767,6 @@ static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
dma->rx_running = 0;
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
- dmaengine_terminate_all(dma->rxchan);
count = dma->rx_size - state.residue;
@@ -775,15 +777,13 @@ static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
unlock:
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
- if (!error)
- omap_8250_rx_dma(p, 0);
-
tty_flip_buffer_push(tty_port);
}
static void __dma_rx_complete(void *param)
{
- __dma_rx_do_complete(param, false);
+ __dma_rx_do_complete(param);
+ omap_8250_rx_dma(param);
}
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
@@ -806,10 +806,11 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
- __dma_rx_do_complete(p, true);
+ __dma_rx_do_complete(p);
+ dmaengine_terminate_all(dma->rxchan);
}
-static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+static int omap_8250_rx_dma(struct uart_8250_port *p)
{
struct omap8250_priv *priv = p->port.private_data;
struct uart_8250_dma *dma = p->dma;
@@ -817,35 +818,6 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
struct dma_async_tx_descriptor *desc;
unsigned long flags;
- switch (iir & 0x3f) {
- case UART_IIR_RLSI:
- /* 8250_core handles errors and break interrupts */
- omap_8250_rx_dma_flush(p);
- return -EIO;
- case UART_IIR_RX_TIMEOUT:
- /*
- * If RCVR FIFO trigger level was not reached, complete the
- * transfer and let 8250_core copy the remaining data.
- */
- omap_8250_rx_dma_flush(p);
- return -ETIMEDOUT;
- case UART_IIR_RDI:
- /*
- * The OMAP UART is a special BEAST. If we receive RDI we _have_
- * a DMA transfer programmed but it didn't work. One reason is
- * that we were too slow and there were too many bytes in the
- * FIFO, the UART counted wrong and never kicked the DMA engine
- * to do anything. That means once we receive RDI on OMAP then
- * the DMA won't do anything soon so we have to cancel the DMA
- * transfer and purge the FIFO manually.
- */
- omap_8250_rx_dma_flush(p);
- return -ETIMEDOUT;
-
- default:
- break;
- }
-
if (priv->rx_dma_broken)
return -EINVAL;
@@ -868,9 +840,6 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
dma->rx_cookie = dmaengine_submit(desc);
- dma_sync_single_for_device(dma->rxchan->device->dev, dma->rx_addr,
- dma->rx_size, DMA_FROM_DEVICE);
-
dma_async_issue_pending(dma->rxchan);
out:
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
@@ -1022,6 +991,18 @@ err:
return ret;
}
+static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
+{
+ switch (iir & 0x3f) {
+ case UART_IIR_RLSI:
+ case UART_IIR_RX_TIMEOUT:
+ case UART_IIR_RDI:
+ omap_8250_rx_dma_flush(up);
+ return true;
+ }
+ return omap_8250_rx_dma(up);
+}
+
/*
* This is mostly serial8250_handle_irq(). We have a slightly different DMA
* hoook for RX/TX and need different logic for them in the ISR. Therefore we
@@ -1033,7 +1014,6 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
unsigned char status;
unsigned long flags;
u8 iir;
- int dma_err = 0;
serial8250_rpm_get(up);
@@ -1048,11 +1028,9 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
status = serial_port_in(port, UART_LSR);
if (status & (UART_LSR_DR | UART_LSR_BI)) {
-
- dma_err = omap_8250_rx_dma(up, iir);
- if (dma_err) {
+ if (handle_rx_dma(up, iir)) {
status = serial8250_rx_chars(up, status);
- omap_8250_rx_dma(up, 0);
+ omap_8250_rx_dma(up);
}
}
serial8250_modem_status(up);
@@ -1066,8 +1044,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port)
* try again due to an earlier failer which
* might have been resolved by now.
*/
- dma_err = omap_8250_tx_dma(up);
- if (dma_err)
+ if (omap_8250_tx_dma(up))
serial8250_tx_chars(up);
}
}
@@ -1084,7 +1061,7 @@ static bool the_no_dma_filter_fn(struct dma_chan *chan, void *param)
#else
-static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
+static inline int omap_8250_rx_dma(struct uart_8250_port *p)
{
return -EINVAL;
}
@@ -1395,7 +1372,7 @@ static int omap8250_runtime_suspend(struct device *dev)
}
if (up->dma && up->dma->rxchan)
- omap_8250_rx_dma(up, UART_IIR_RX_TIMEOUT);
+ omap_8250_rx_dma_flush(up);
priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
schedule_work(&priv->qos_work);
@@ -1407,20 +1384,18 @@ static int omap8250_runtime_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up;
- int loss_cntx;
/* In case runtime-pm tries this before we are setup */
if (!priv)
return 0;
up = serial8250_get_port(priv->line);
- loss_cntx = omap8250_lost_context(up);
- if (loss_cntx)
+ if (omap8250_lost_context(up))
omap8250_restore_regs(up);
if (up->dma && up->dma->rxchan)
- omap_8250_rx_dma(up, 0);
+ omap_8250_rx_dma(up);
priv->latency = priv->calc_latency;
schedule_work(&priv->qos_work);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 4eedd1da3..8dd250fbd 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1416,6 +1416,17 @@ static bool byt_dma_filter(struct dma_chan *chan, void *param)
return true;
}
+static unsigned int
+byt_get_mctrl(struct uart_port *port)
+{
+ unsigned int ret = serial8250_do_get_mctrl(port);
+
+ /* Force DCD and DSR signals to permanently be reported as active. */
+ ret |= TIOCM_CAR | TIOCM_DSR;
+
+ return ret;
+}
+
static int
byt_serial_setup(struct serial_private *priv,
const struct pciserial_board *board,
@@ -1457,13 +1468,13 @@ byt_serial_setup(struct serial_private *priv,
return -EINVAL;
}
- rx_param->src_master = 1;
- rx_param->dst_master = 0;
+ rx_param->m_master = 0;
+ rx_param->p_master = 1;
dma->rxconf.src_maxburst = 16;
- tx_param->src_master = 1;
- tx_param->dst_master = 0;
+ tx_param->m_master = 0;
+ tx_param->p_master = 1;
dma->txconf.dst_maxburst = 16;
@@ -1480,6 +1491,7 @@ byt_serial_setup(struct serial_private *priv,
port->port.type = PORT_16550A;
port->port.flags = (port->port.flags | UPF_FIXED_PORT | UPF_FIXED_TYPE);
port->port.set_termios = byt_set_termios;
+ port->port.get_mctrl = byt_get_mctrl;
port->port.fifosize = 64;
port->tx_loadsz = 64;
port->dma = dma;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 00ad2637b..d4036038a 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1315,6 +1315,13 @@ static void autoconfig(struct uart_8250_port *up)
out_lock:
spin_unlock_irqrestore(&port->lock, flags);
+
+ /*
+ * Check if the device is a Fintek F81216A
+ */
+ if (port->type == PORT_16550A)
+ fintek_8250_probe(up);
+
if (up->capabilities != old_capabilities) {
pr_warn("ttyS%d: detected caps %08x should be %08x\n",
serial_index(port), old_capabilities,
@@ -1788,6 +1795,18 @@ unsigned int serial8250_modem_status(struct uart_8250_port *up)
}
EXPORT_SYMBOL_GPL(serial8250_modem_status);
+static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
+{
+ switch (iir & 0x3f) {
+ case UART_IIR_RX_TIMEOUT:
+ serial8250_rx_dma_flush(up);
+ /* fall-through */
+ case UART_IIR_RLSI:
+ return true;
+ }
+ return up->dma->rx_dma(up);
+}
+
/*
* This handles the interrupt from one port.
*/
@@ -1796,7 +1815,6 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
unsigned char status;
unsigned long flags;
struct uart_8250_port *up = up_to_u8250p(port);
- int dma_err = 0;
if (iir & UART_IIR_NO_INT)
return 0;
@@ -1808,15 +1826,11 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
DEBUG_INTR("status = %x...", status);
if (status & (UART_LSR_DR | UART_LSR_BI)) {
- if (up->dma)
- dma_err = up->dma->rx_dma(up, iir);
-
- if (!up->dma || dma_err)
+ if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
}
serial8250_modem_status(up);
- if ((!up->dma || (up->dma && up->dma->tx_err)) &&
- (status & UART_LSR_THRE))
+ if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE))
serial8250_tx_chars(up);
spin_unlock_irqrestore(&port->lock, flags);
@@ -1882,7 +1896,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
}
-static unsigned int serial8250_get_mctrl(struct uart_port *port)
+unsigned int serial8250_do_get_mctrl(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
unsigned int status;
@@ -1903,6 +1917,14 @@ static unsigned int serial8250_get_mctrl(struct uart_port *port)
ret |= TIOCM_CTS;
return ret;
}
+EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
+
+static unsigned int serial8250_get_mctrl(struct uart_port *port)
+{
+ if (port->get_mctrl)
+ return port->get_mctrl(port);
+ return serial8250_do_get_mctrl(port);
+}
void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 1b7bd2655..efd1f9c04 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -209,7 +209,7 @@ static int uniphier_uart_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "failed to get IRQ number");
+ dev_err(dev, "failed to get IRQ number\n");
return irq;
}
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 4d7cb9c04..e46761d20 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -57,6 +57,18 @@ config SERIAL_8250_PNP
This builds standard PNP serial support. You may be able to
disable this feature if you only need legacy serial support.
+config SERIAL_8250_FINTEK
+ bool "Support for Fintek F81216A LPC to 4 UART RS485 API"
+ depends on SERIAL_8250
+ ---help---
+ Selecting this option will add support for the RS485 capabilities
+ of the Fintek F81216A LPC to 4 UART.
+
+ If this option is not selected the device will be configured as a
+ standard 16550A serial port.
+
+ If unsure, say N.
+
config SERIAL_8250_CONSOLE
bool "Console on 8250/16550 and compatible serial port"
depends on SERIAL_8250=y
@@ -358,14 +370,6 @@ config SERIAL_8250_OMAP_TTYO_FIXUP
not booting kernel because the serial console remains silent in case
they forgot to update the command line.
-config SERIAL_8250_FINTEK
- tristate "Support for Fintek F81216A LPC to 4 UART"
- depends on SERIAL_8250 && PNP
- help
- Selecting this option will add support for the Fintek F81216A
- LPC to 4 UART. This device has some RS485 functionality not available
- through the PNP driver. If unsure, say N.
-
config SERIAL_8250_LPC18XX
tristate "NXP LPC18xx/43xx serial port support"
depends on SERIAL_8250 && OF && (ARCH_LPC18XX || COMPILE_TEST)
@@ -398,8 +402,10 @@ config SERIAL_8250_INGENIC
its UARTs, say Y to this option. If unsure, say N.
config SERIAL_8250_MID
- tristate "Support for serial ports on Intel MID platforms"
+ tristate "Support for serial ports on Intel MID platforms" if EXPERT
+ default SERIAL_8250
depends on SERIAL_8250 && PCI
+ depends on X86 || COMPILE_TEST
select HSU_DMA if SERIAL_8250_DMA
select HSU_DMA_PCI if (HSU_DMA && X86_INTEL_MID)
select RATIONAL
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index c9a2d6ed8..367d403d2 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o
8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
8250_base-y := 8250_port.o
8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
+8250_base-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o
obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
@@ -23,7 +24,6 @@ obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
-obj-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o
obj-$(CONFIG_SERIAL_8250_LPC18XX) += 8250_lpc18xx.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 13d4ed6ca..7e3a58c8b 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -213,6 +213,7 @@ config SERIAL_MESON_CONSOLE
bool "Support for console on meson"
depends on SERIAL_MESON=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
help
Say Y here if you wish to use a Amlogic MesonX UART as the
system console (the system console is the device which
@@ -900,6 +901,27 @@ config SERIAL_SGI_L1_CONSOLE
controller serial port as your console (you want this!),
say Y. Otherwise, say N.
+config SERIAL_PIC32
+ tristate "Microchip PIC32 serial support"
+ depends on MACH_PIC32
+ select SERIAL_CORE
+ help
+ If you have a PIC32, this driver supports the serial ports.
+
+ Say Y or M to use PIC32 serial ports, otherwise say N. Note that
+ to use a serial port as a console, this must be included in kernel and
+ not as a module.
+
+config SERIAL_PIC32_CONSOLE
+ bool "PIC32 serial console support"
+ depends on SERIAL_PIC32
+ select SERIAL_CORE_CONSOLE
+ help
+ If you have a PIC32, this driver supports the putting a console on one
+ of the serial ports.
+
+ Say Y to use the PIC32 console, otherwise say N.
+
config SERIAL_MPC52xx
tristate "Freescale MPC52xx/MPC512x family PSC serial support"
depends on PPC_MPC52xx || PPC_MPC512x
@@ -1383,11 +1405,12 @@ config SERIAL_PCH_UART_CONSOLE
config SERIAL_MXS_AUART
tristate "MXS AUART support"
depends on HAS_DMA
- depends on ARCH_MXS || COMPILE_TEST
+ depends on ARCH_MXS || MACH_ASM9260 || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
help
- This driver supports the MXS Application UART (AUART) port.
+ This driver supports the MXS and Alphascale ASM9260 Application
+ UART (AUART) port.
config SERIAL_MXS_AUART_CONSOLE
bool "MXS AUART console support"
@@ -1446,6 +1469,19 @@ config SERIAL_EFM32_UART
This driver support the USART and UART ports on
Energy Micro's efm32 SoCs.
+config SERIAL_MPS2_UART_CONSOLE
+ bool "MPS2 UART console support"
+ depends on SERIAL_MPS2_UART
+ select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
+
+config SERIAL_MPS2_UART
+ bool "MPS2 UART port"
+ depends on ARM || COMPILE_TEST
+ select SERIAL_CORE
+ help
+ This driver support the UART ports on ARM MPS2.
+
config SERIAL_EFM32_UART_CONSOLE
bool "EFM32 UART/USART console support"
depends on SERIAL_EFM32_UART=y
@@ -1604,6 +1640,7 @@ config SERIAL_STM32_CONSOLE
config SERIAL_MVEBU_UART
bool "Marvell EBU serial port support"
+ depends on ARCH_MVEBU || COMPILE_TEST
select SERIAL_CORE
help
This driver is for Marvell EBU SoC's UART. If you have a machine
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 8c261adac..1278d376d 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -91,6 +91,8 @@ obj-$(CONFIG_SERIAL_MEN_Z135) += men_z135_uart.o
obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o
obj-$(CONFIG_SERIAL_STM32) += stm32-usart.o
obj-$(CONFIG_SERIAL_MVEBU_UART) += mvebu-uart.o
+obj-$(CONFIG_SERIAL_PIC32) += pic32_uart.o
+obj-$(CONFIG_SERIAL_MPS2_UART) += mps2-uart.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 7c198e0a3..1b7331e40 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -121,6 +121,7 @@ static struct vendor_data vendor_arm = {
static struct vendor_data vendor_sbsa = {
.reg_offset = pl011_std_offsets,
+ .access_32b = true,
.oversampling = false,
.dma_threshold = false,
.cts_event_workaround = false,
@@ -2359,7 +2360,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev)
return ret;
ret = of_alias_get_id(np, "serial");
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
seen_dev_without_alias = true;
ret = index;
} else {
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index c0172bf54..315c84979 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -2599,7 +2599,7 @@ startup(struct e100_serial * info)
/* if it was already initialized, skip this */
- if (info->port.flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(&info->port)) {
local_irq_restore(flags);
free_page(xmit_page);
return 0;
@@ -2703,7 +2703,7 @@ startup(struct e100_serial * info)
e100_rts(info, 1);
e100_dtr(info, 1);
- info->port.flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 1);
local_irq_restore(flags);
return 0;
@@ -2745,7 +2745,7 @@ shutdown(struct e100_serial * info)
info->tr_running = 0;
}
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
return;
#ifdef SERIAL_DEBUG_OPEN
@@ -2776,7 +2776,7 @@ shutdown(struct e100_serial * info)
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->port.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 0);
local_irq_restore(flags);
}
@@ -3273,9 +3273,9 @@ set_serial_info(struct e100_serial *info,
info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
check_and_exit:
- if (info->port.flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(&info->port))
change_speed(info);
- } else
+ else
retval = startup(info);
return retval;
}
@@ -3445,7 +3445,7 @@ rs_ioctl(struct tty_struct *tty,
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD) &&
(cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
}
@@ -3628,7 +3628,7 @@ rs_close(struct tty_struct *tty, struct file * filp)
e100_disable_rx(info);
e100_disable_rx_irq(info);
- if (info->port.flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(&info->port)) {
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
@@ -3648,8 +3648,8 @@ rs_close(struct tty_struct *tty, struct file * filp)
schedule_timeout_interruptible(info->port.close_delay);
wake_up_interruptible(&info->port.open_wait);
}
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
local_irq_restore(flags);
+ tty_port_set_active(&info->port, 0);
/* port closed */
@@ -3732,7 +3732,7 @@ rs_hangup(struct tty_struct *tty)
shutdown(info);
info->event = 0;
info->port.count = 0;
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(&info->port, 0);
info->port.tty = NULL;
wake_up_interruptible(&info->port.open_wait);
}
@@ -3755,9 +3755,8 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
* If non-blocking mode is set, or the port is not enabled,
* then make the check up front and then exit.
*/
- if ((filp->f_flags & O_NONBLOCK) ||
- (tty->flags & (1 << TTY_IO_ERROR))) {
- info->port.flags |= ASYNC_NORMAL_ACTIVE;
+ if ((filp->f_flags & O_NONBLOCK) || tty_io_error(tty)) {
+ tty_port_set_active(&info->port, 1);
return 0;
}
@@ -3788,8 +3787,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
e100_dtr(info, 1);
local_irq_restore(flags);
set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) ||
- !(info->port.flags & ASYNC_INITIALIZED)) {
+ if (tty_hung_up_p(filp) || !tty_port_initialized(&info->port)) {
#ifdef SERIAL_DO_RESTART
if (info->port.flags & ASYNC_HUP_NOTIFY)
retval = -EAGAIN;
@@ -3826,7 +3824,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
#endif
if (retval)
return retval;
- info->port.flags |= ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(&info->port, 1);
return 0;
}
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 2085a6cfa..d38634624 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -651,7 +651,7 @@ static void ifx_spi_complete(void *ctx)
struct ifx_spi_device *ifx_dev = ctx;
int length;
int actual_length;
- unsigned char more;
+ unsigned char more = 0;
unsigned char cts;
int local_write_pending = 0;
int queue_length;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 231e7d5ca..0df2b1c09 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -114,6 +114,7 @@
#define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */
#define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */
#define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */
+#define UCR3_DTRDEN (1<<3) /* Data Terminal Ready Delta Enable. */
#define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */
#define UCR3_INVT (1<<1) /* Inverted Infrared transmission */
#define UCR3_BPEN (1<<0) /* Preset registers enable */
@@ -142,7 +143,7 @@
#define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */
#define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */
#define USR1_AGTIM (1<<8) /* Ageing timer interrupt flag */
-#define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */
+#define USR1_DTRD (1<<7) /* DTR Delta */
#define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */
#define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */
#define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */
@@ -361,6 +362,7 @@ static void imx_stop_tx(struct uart_port *port)
imx_port_rts_inactive(sport, &temp);
else
imx_port_rts_active(sport, &temp);
+ temp |= UCR2_RXEN;
writel(temp, port->membase + UCR2);
temp = readl(port->membase + UCR4);
@@ -568,6 +570,8 @@ static void imx_start_tx(struct uart_port *port)
imx_port_rts_inactive(sport, &temp);
else
imx_port_rts_active(sport, &temp);
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ temp &= ~UCR2_RXEN;
writel(temp, port->membase + UCR2);
/* enable transmitter and shifter empty irq */
@@ -729,11 +733,61 @@ static void imx_dma_rxint(struct imx_port *sport)
spin_unlock_irqrestore(&sport->port.lock, flags);
}
+/*
+ * We have a modem side uart, so the meanings of RTS and CTS are inverted.
+ */
+static unsigned int imx_get_hwmctrl(struct imx_port *sport)
+{
+ unsigned int tmp = TIOCM_DSR;
+ unsigned usr1 = readl(sport->port.membase + USR1);
+
+ if (usr1 & USR1_RTSS)
+ tmp |= TIOCM_CTS;
+
+ /* in DCE mode DCDIN is always 0 */
+ if (!(usr1 & USR2_DCDIN))
+ tmp |= TIOCM_CAR;
+
+ if (sport->dte_mode)
+ if (!(readl(sport->port.membase + USR2) & USR2_RIIN))
+ tmp |= TIOCM_RI;
+
+ return tmp;
+}
+
+/*
+ * Handle any change of modem status signal since we were last called.
+ */
+static void imx_mctrl_check(struct imx_port *sport)
+{
+ unsigned int status, changed;
+
+ status = imx_get_hwmctrl(sport);
+ changed = status ^ sport->old_status;
+
+ if (changed == 0)
+ return;
+
+ sport->old_status = status;
+
+ if (changed & TIOCM_RI && status & TIOCM_RI)
+ sport->port.icount.rng++;
+ if (changed & TIOCM_DSR)
+ sport->port.icount.dsr++;
+ if (changed & TIOCM_CAR)
+ uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
+ if (changed & TIOCM_CTS)
+ uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
+
+ wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
+}
+
static irqreturn_t imx_int(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int sts;
unsigned int sts2;
+ irqreturn_t ret = IRQ_NONE;
sts = readl(sport->port.membase + USR1);
sts2 = readl(sport->port.membase + USR2);
@@ -743,26 +797,47 @@ static irqreturn_t imx_int(int irq, void *dev_id)
imx_dma_rxint(sport);
else
imx_rxint(irq, dev_id);
+ ret = IRQ_HANDLED;
}
if ((sts & USR1_TRDY &&
readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) ||
(sts2 & USR2_TXDC &&
- readl(sport->port.membase + UCR4) & UCR4_TCEN))
+ readl(sport->port.membase + UCR4) & UCR4_TCEN)) {
imx_txint(irq, dev_id);
+ ret = IRQ_HANDLED;
+ }
+
+ if (sts & USR1_DTRD) {
+ unsigned long flags;
+
+ if (sts & USR1_DTRD)
+ writel(USR1_DTRD, sport->port.membase + USR1);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ imx_mctrl_check(sport);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
- if (sts & USR1_RTSD)
+ ret = IRQ_HANDLED;
+ }
+
+ if (sts & USR1_RTSD) {
imx_rtsint(irq, dev_id);
+ ret = IRQ_HANDLED;
+ }
- if (sts & USR1_AWAKE)
+ if (sts & USR1_AWAKE) {
writel(USR1_AWAKE, sport->port.membase + USR1);
+ ret = IRQ_HANDLED;
+ }
if (sts2 & USR2_ORE) {
sport->port.icount.overrun++;
writel(USR2_ORE, sport->port.membase + USR2);
+ ret = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return ret;
}
/*
@@ -782,28 +857,6 @@ static unsigned int imx_tx_empty(struct uart_port *port)
return ret;
}
-/*
- * We have a modem side uart, so the meanings of RTS and CTS are inverted.
- */
-static unsigned int imx_get_hwmctrl(struct imx_port *sport)
-{
- unsigned int tmp = TIOCM_DSR;
- unsigned usr1 = readl(sport->port.membase + USR1);
-
- if (usr1 & USR1_RTSS)
- tmp |= TIOCM_CTS;
-
- /* in DCE mode DCDIN is always 0 */
- if (!(usr1 & USR2_DCDIN))
- tmp |= TIOCM_CAR;
-
- /* in DCE mode RIIN is always 0 */
- if (readl(sport->port.membase + USR2) & USR2_RIIN)
- tmp |= TIOCM_RI;
-
- return tmp;
-}
-
static unsigned int imx_get_mctrl(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
@@ -861,33 +914,6 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
}
/*
- * Handle any change of modem status signal since we were last called.
- */
-static void imx_mctrl_check(struct imx_port *sport)
-{
- unsigned int status, changed;
-
- status = imx_get_hwmctrl(sport);
- changed = status ^ sport->old_status;
-
- if (changed == 0)
- return;
-
- sport->old_status = status;
-
- if (changed & TIOCM_RI)
- sport->port.icount.rng++;
- if (changed & TIOCM_DSR)
- sport->port.icount.dsr++;
- if (changed & TIOCM_CAR)
- uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
- if (changed & TIOCM_CTS)
- uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
-
- wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
-}
-
-/*
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
@@ -1193,7 +1219,7 @@ static int imx_startup(struct uart_port *port)
/*
* Finally, clear and enable interrupts
*/
- writel(USR1_RTSD, sport->port.membase + USR1);
+ writel(USR1_RTSD | USR1_DTRD, sport->port.membase + USR1);
writel(USR2_ORE, sport->port.membase + USR2);
if (sport->dma_is_inited && !sport->dma_is_enabled)
@@ -1212,11 +1238,32 @@ static int imx_startup(struct uart_port *port)
temp |= (UCR2_RXEN | UCR2_TXEN);
if (!sport->have_rtscts)
temp |= UCR2_IRTS;
+ /*
+ * make sure the edge sensitive RTS-irq is disabled,
+ * we're using RTSD instead.
+ */
+ if (!is_imx1_uart(sport))
+ temp &= ~UCR2_RTSEN;
writel(temp, sport->port.membase + UCR2);
if (!is_imx1_uart(sport)) {
temp = readl(sport->port.membase + UCR3);
- temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP;
+
+ /*
+ * The effect of RI and DCD differs depending on the UFCR_DCEDTE
+ * bit. In DCE mode they control the outputs, in DTE mode they
+ * enable the respective irqs. At least the DCD irq cannot be
+ * cleared on i.MX25 at least, so it's not usable and must be
+ * disabled. I don't have test hardware to check if RI has the
+ * same problem but I consider this likely so it's disabled for
+ * now, too.
+ */
+ temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP |
+ UCR3_DTRDEN | UCR3_RI | UCR3_DCD;
+
+ if (sport->dte_mode)
+ temp &= ~(UCR3_RI | UCR3_DCD);
+
writel(temp, sport->port.membase + UCR3);
}
@@ -1610,19 +1657,17 @@ static int imx_rs485_config(struct uart_port *port,
struct serial_rs485 *rs485conf)
{
struct imx_port *sport = (struct imx_port *)port;
+ unsigned long temp;
/* unimplemented */
rs485conf->delay_rts_before_send = 0;
rs485conf->delay_rts_after_send = 0;
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
/* RTS is required to control the transmitter */
if (!sport->have_rtscts)
rs485conf->flags &= ~SER_RS485_ENABLED;
if (rs485conf->flags & SER_RS485_ENABLED) {
- unsigned long temp;
-
/* disable transmitter */
temp = readl(sport->port.membase + UCR2);
if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
@@ -1632,6 +1677,14 @@ static int imx_rs485_config(struct uart_port *port,
writel(temp, sport->port.membase + UCR2);
}
+ /* Make sure Rx is enabled in case Tx is active with Rx disabled */
+ if (!(rs485conf->flags & SER_RS485_ENABLED) ||
+ rs485conf->flags & SER_RS485_RX_DURING_TX) {
+ temp = readl(sport->port.membase + UCR2);
+ temp |= UCR2_RXEN;
+ writel(temp, sport->port.membase + UCR2);
+ }
+
port->rs485 = *rs485conf;
return 0;
@@ -1927,7 +1980,8 @@ static int serial_imx_probe_dt(struct imx_port *sport,
}
sport->port.line = ret;
- if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
+ if (of_get_property(np, "uart-has-rtscts", NULL) ||
+ of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */)
sport->have_rtscts = 1;
if (of_get_property(np, "fsl,dte-mode", NULL))
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 3f98165b4..3f6e0ab72 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -17,7 +17,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -1036,7 +1036,7 @@ static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
unsigned int val;
- struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+ struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
val = max310x_port_read(port, MAX310X_GPIODATA_REG);
@@ -1046,7 +1046,7 @@ static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+ struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
@@ -1055,7 +1055,7 @@ static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+ struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
max310x_port_update(port, MAX310X_GPIOCFG_REG, 1 << (offset % 4), 0);
@@ -1066,7 +1066,7 @@ static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int max310x_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+ struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
@@ -1183,7 +1183,7 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
s->gpio.base = -1;
s->gpio.ngpio = devtype->nr * 4;
s->gpio.can_sleep = 1;
- ret = gpiochip_add(&s->gpio);
+ ret = gpiochip_add_data(&s->gpio, s);
if (ret)
goto out_uart;
#endif
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 024445aa0..6aea0f4a9 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -481,18 +481,13 @@ static void meson_console_putchar(struct uart_port *port, int ch)
writel(ch, port->membase + AML_UART_WFIFO);
}
-static void meson_serial_console_write(struct console *co, const char *s,
- u_int count)
+static void meson_serial_port_write(struct uart_port *port, const char *s,
+ u_int count)
{
- struct uart_port *port;
unsigned long flags;
int locked;
u32 val, tmp;
- port = meson_ports[co->index];
- if (!port)
- return;
-
local_irq_save(flags);
if (port->sysrq) {
locked = 0;
@@ -516,6 +511,18 @@ static void meson_serial_console_write(struct console *co, const char *s,
local_irq_restore(flags);
}
+static void meson_serial_console_write(struct console *co, const char *s,
+ u_int count)
+{
+ struct uart_port *port;
+
+ port = meson_ports[co->index];
+ if (!port)
+ return;
+
+ meson_serial_port_write(port, s, count);
+}
+
static int meson_serial_console_setup(struct console *co, char *options)
{
struct uart_port *port;
@@ -554,6 +561,27 @@ static int __init meson_serial_console_init(void)
}
console_initcall(meson_serial_console_init);
+static void meson_serial_early_console_write(struct console *co,
+ const char *s,
+ u_int count)
+{
+ struct earlycon_device *dev = co->data;
+
+ meson_serial_port_write(&dev->port, s, count);
+}
+
+static int __init
+meson_serial_early_console_setup(struct earlycon_device *device, const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = meson_serial_early_console_write;
+ return 0;
+}
+OF_EARLYCON_DECLARE(meson, "amlogic,meson-uart",
+ meson_serial_early_console_setup);
+
#define MESON_SERIAL_CONSOLE (&meson_serial_console)
#else
#define MESON_SERIAL_CONSOLE NULL
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
new file mode 100644
index 000000000..da9e27d3c
--- /dev/null
+++ b/drivers/tty/serial/mps2-uart.c
@@ -0,0 +1,625 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ *
+ * Author: Vladimir Murzin <vladimir.murzin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO: support for SysRq
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/console.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/tty_flip.h>
+#include <linux/types.h>
+
+#define SERIAL_NAME "ttyMPS"
+#define DRIVER_NAME "mps2-uart"
+#define MAKE_NAME(x) (DRIVER_NAME # x)
+
+#define UARTn_DATA 0x00
+
+#define UARTn_STATE 0x04
+#define UARTn_STATE_TX_FULL BIT(0)
+#define UARTn_STATE_RX_FULL BIT(1)
+#define UARTn_STATE_TX_OVERRUN BIT(2)
+#define UARTn_STATE_RX_OVERRUN BIT(3)
+
+#define UARTn_CTRL 0x08
+#define UARTn_CTRL_TX_ENABLE BIT(0)
+#define UARTn_CTRL_RX_ENABLE BIT(1)
+#define UARTn_CTRL_TX_INT_ENABLE BIT(2)
+#define UARTn_CTRL_RX_INT_ENABLE BIT(3)
+#define UARTn_CTRL_TX_OVERRUN_INT_ENABLE BIT(4)
+#define UARTn_CTRL_RX_OVERRUN_INT_ENABLE BIT(5)
+
+#define UARTn_INT 0x0c
+#define UARTn_INT_TX BIT(0)
+#define UARTn_INT_RX BIT(1)
+#define UARTn_INT_TX_OVERRUN BIT(2)
+#define UARTn_INT_RX_OVERRUN BIT(3)
+
+#define UARTn_BAUDDIV 0x10
+#define UARTn_BAUDDIV_MASK GENMASK(20, 0)
+
+/*
+ * Helpers to make typical enable/disable operations more readable.
+ */
+#define UARTn_CTRL_TX_GRP (UARTn_CTRL_TX_ENABLE |\
+ UARTn_CTRL_TX_INT_ENABLE |\
+ UARTn_CTRL_TX_OVERRUN_INT_ENABLE)
+
+#define UARTn_CTRL_RX_GRP (UARTn_CTRL_RX_ENABLE |\
+ UARTn_CTRL_RX_INT_ENABLE |\
+ UARTn_CTRL_RX_OVERRUN_INT_ENABLE)
+
+#define MPS2_MAX_PORTS 3
+
+struct mps2_uart_port {
+ struct uart_port port;
+ struct clk *clk;
+ unsigned int tx_irq;
+ unsigned int rx_irq;
+};
+
+static inline struct mps2_uart_port *to_mps2_port(struct uart_port *port)
+{
+ return container_of(port, struct mps2_uart_port, port);
+}
+
+static void mps2_uart_write8(struct uart_port *port, u8 val, unsigned int off)
+{
+ struct mps2_uart_port *mps_port = to_mps2_port(port);
+
+ writeb(val, mps_port->port.membase + off);
+}
+
+static u8 mps2_uart_read8(struct uart_port *port, unsigned int off)
+{
+ struct mps2_uart_port *mps_port = to_mps2_port(port);
+
+ return readb(mps_port->port.membase + off);
+}
+
+static void mps2_uart_write32(struct uart_port *port, u32 val, unsigned int off)
+{
+ struct mps2_uart_port *mps_port = to_mps2_port(port);
+
+ writel_relaxed(val, mps_port->port.membase + off);
+}
+
+static unsigned int mps2_uart_tx_empty(struct uart_port *port)
+{
+ u8 status = mps2_uart_read8(port, UARTn_STATE);
+
+ return (status & UARTn_STATE_TX_FULL) ? 0 : TIOCSER_TEMT;
+}
+
+static void mps2_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static unsigned int mps2_uart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR;
+}
+
+static void mps2_uart_stop_tx(struct uart_port *port)
+{
+ u8 control = mps2_uart_read8(port, UARTn_CTRL);
+
+ control &= ~UARTn_CTRL_TX_INT_ENABLE;
+
+ mps2_uart_write8(port, control, UARTn_CTRL);
+}
+
+static void mps2_uart_tx_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ while (!(mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_TX_FULL)) {
+ if (port->x_char) {
+ mps2_uart_write8(port, port->x_char, UARTn_DATA);
+ port->x_char = 0;
+ port->icount.tx++;
+ continue;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ break;
+
+ mps2_uart_write8(port, xmit->buf[xmit->tail], UARTn_DATA);
+ xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE;
+ port->icount.tx++;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ mps2_uart_stop_tx(port);
+}
+
+static void mps2_uart_start_tx(struct uart_port *port)
+{
+ u8 control = mps2_uart_read8(port, UARTn_CTRL);
+
+ control |= UARTn_CTRL_TX_INT_ENABLE;
+
+ mps2_uart_write8(port, control, UARTn_CTRL);
+
+ /*
+ * We've just unmasked the TX IRQ and now slow-starting via
+ * polling; if there is enough data to fill up the internal
+ * write buffer in one go, the TX IRQ should assert, at which
+ * point we switch to fully interrupt-driven TX.
+ */
+
+ mps2_uart_tx_chars(port);
+}
+
+static void mps2_uart_stop_rx(struct uart_port *port)
+{
+ u8 control = mps2_uart_read8(port, UARTn_CTRL);
+
+ control &= ~UARTn_CTRL_RX_GRP;
+
+ mps2_uart_write8(port, control, UARTn_CTRL);
+}
+
+static void mps2_uart_break_ctl(struct uart_port *port, int ctl)
+{
+}
+
+static void mps2_uart_rx_chars(struct uart_port *port)
+{
+ struct tty_port *tport = &port->state->port;
+
+ while (mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_RX_FULL) {
+ u8 rxdata = mps2_uart_read8(port, UARTn_DATA);
+
+ port->icount.rx++;
+ tty_insert_flip_char(&port->state->port, rxdata, TTY_NORMAL);
+ }
+
+ tty_flip_buffer_push(tport);
+}
+
+static irqreturn_t mps2_uart_rxirq(int irq, void *data)
+{
+ struct uart_port *port = data;
+ u8 irqflag = mps2_uart_read8(port, UARTn_INT);
+
+ if (unlikely(!(irqflag & UARTn_INT_RX)))
+ return IRQ_NONE;
+
+ spin_lock(&port->lock);
+
+ mps2_uart_write8(port, UARTn_INT_RX, UARTn_INT);
+ mps2_uart_rx_chars(port);
+
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mps2_uart_txirq(int irq, void *data)
+{
+ struct uart_port *port = data;
+ u8 irqflag = mps2_uart_read8(port, UARTn_INT);
+
+ if (unlikely(!(irqflag & UARTn_INT_TX)))
+ return IRQ_NONE;
+
+ spin_lock(&port->lock);
+
+ mps2_uart_write8(port, UARTn_INT_TX, UARTn_INT);
+ mps2_uart_tx_chars(port);
+
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mps2_uart_oerrirq(int irq, void *data)
+{
+ irqreturn_t handled = IRQ_NONE;
+ struct uart_port *port = data;
+ u8 irqflag = mps2_uart_read8(port, UARTn_INT);
+
+ spin_lock(&port->lock);
+
+ if (irqflag & UARTn_INT_RX_OVERRUN) {
+ struct tty_port *tport = &port->state->port;
+
+ mps2_uart_write8(port, UARTn_INT_RX_OVERRUN, UARTn_INT);
+ port->icount.overrun++;
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ tty_flip_buffer_push(tport);
+ handled = IRQ_HANDLED;
+ }
+
+ /*
+ * It's never been seen in practice and it never *should* happen since
+ * we check if there is enough room in TX buffer before sending data.
+ * So we keep this check in case something suspicious has happened.
+ */
+ if (irqflag & UARTn_INT_TX_OVERRUN) {
+ mps2_uart_write8(port, UARTn_INT_TX_OVERRUN, UARTn_INT);
+ handled = IRQ_HANDLED;
+ }
+
+ spin_unlock(&port->lock);
+
+ return handled;
+}
+
+static int mps2_uart_startup(struct uart_port *port)
+{
+ struct mps2_uart_port *mps_port = to_mps2_port(port);
+ u8 control = mps2_uart_read8(port, UARTn_CTRL);
+ int ret;
+
+ control &= ~(UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP);
+
+ mps2_uart_write8(port, control, UARTn_CTRL);
+
+ ret = request_irq(mps_port->rx_irq, mps2_uart_rxirq, 0,
+ MAKE_NAME(-rx), mps_port);
+ if (ret) {
+ dev_err(port->dev, "failed to register rxirq (%d)\n", ret);
+ return ret;
+ }
+
+ ret = request_irq(mps_port->tx_irq, mps2_uart_txirq, 0,
+ MAKE_NAME(-tx), mps_port);
+ if (ret) {
+ dev_err(port->dev, "failed to register txirq (%d)\n", ret);
+ goto err_free_rxirq;
+ }
+
+ ret = request_irq(port->irq, mps2_uart_oerrirq, IRQF_SHARED,
+ MAKE_NAME(-overrun), mps_port);
+
+ if (ret) {
+ dev_err(port->dev, "failed to register oerrirq (%d)\n", ret);
+ goto err_free_txirq;
+ }
+
+ control |= UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP;
+
+ mps2_uart_write8(port, control, UARTn_CTRL);
+
+ return 0;
+
+err_free_txirq:
+ free_irq(mps_port->tx_irq, mps_port);
+err_free_rxirq:
+ free_irq(mps_port->rx_irq, mps_port);
+
+ return ret;
+}
+
+static void mps2_uart_shutdown(struct uart_port *port)
+{
+ struct mps2_uart_port *mps_port = to_mps2_port(port);
+ u8 control = mps2_uart_read8(port, UARTn_CTRL);
+
+ control &= ~(UARTn_CTRL_RX_GRP | UARTn_CTRL_TX_GRP);
+
+ mps2_uart_write8(port, control, UARTn_CTRL);
+
+ free_irq(mps_port->rx_irq, mps_port);
+ free_irq(mps_port->tx_irq, mps_port);
+ free_irq(port->irq, mps_port);
+}
+
+static void
+mps2_uart_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, bauddiv;
+
+ termios->c_cflag &= ~(CRTSCTS | CMSPAR);
+ termios->c_cflag &= ~CSIZE;
+ termios->c_cflag |= CS8;
+ termios->c_cflag &= ~PARENB;
+ termios->c_cflag &= ~CSTOPB;
+
+ baud = uart_get_baud_rate(port, termios, old,
+ DIV_ROUND_CLOSEST(port->uartclk, UARTn_BAUDDIV_MASK),
+ DIV_ROUND_CLOSEST(port->uartclk, 16));
+
+ bauddiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+ mps2_uart_write32(port, bauddiv, UARTn_BAUDDIV);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+}
+
+static const char *mps2_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_MPS2UART) ? DRIVER_NAME : NULL;
+}
+
+static void mps2_uart_release_port(struct uart_port *port)
+{
+}
+
+static int mps2_uart_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void mps2_uart_config_port(struct uart_port *port, int type)
+{
+ if (type & UART_CONFIG_TYPE && !mps2_uart_request_port(port))
+ port->type = PORT_MPS2UART;
+}
+
+static int mps2_uart_verify_port(struct uart_port *port, struct serial_struct *serinfo)
+{
+ return -EINVAL;
+}
+
+static const struct uart_ops mps2_uart_pops = {
+ .tx_empty = mps2_uart_tx_empty,
+ .set_mctrl = mps2_uart_set_mctrl,
+ .get_mctrl = mps2_uart_get_mctrl,
+ .stop_tx = mps2_uart_stop_tx,
+ .start_tx = mps2_uart_start_tx,
+ .stop_rx = mps2_uart_stop_rx,
+ .break_ctl = mps2_uart_break_ctl,
+ .startup = mps2_uart_startup,
+ .shutdown = mps2_uart_shutdown,
+ .set_termios = mps2_uart_set_termios,
+ .type = mps2_uart_type,
+ .release_port = mps2_uart_release_port,
+ .request_port = mps2_uart_request_port,
+ .config_port = mps2_uart_config_port,
+ .verify_port = mps2_uart_verify_port,
+};
+
+static struct mps2_uart_port mps2_uart_ports[MPS2_MAX_PORTS];
+
+#ifdef CONFIG_SERIAL_MPS2_UART_CONSOLE
+static void mps2_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (mps2_uart_read8(port, UARTn_STATE) & UARTn_STATE_TX_FULL)
+ cpu_relax();
+
+ mps2_uart_write8(port, ch, UARTn_DATA);
+}
+
+static void mps2_uart_console_write(struct console *co, const char *s, unsigned int cnt)
+{
+ struct uart_port *port = &mps2_uart_ports[co->index].port;
+
+ uart_console_write(port, s, cnt, mps2_uart_console_putchar);
+}
+
+static int mps2_uart_console_setup(struct console *co, char *options)
+{
+ struct mps2_uart_port *mps_port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= MPS2_MAX_PORTS)
+ return -ENODEV;
+
+ mps_port = &mps2_uart_ports[co->index];
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&mps_port->port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver mps2_uart_driver;
+
+static struct console mps2_uart_console = {
+ .name = SERIAL_NAME,
+ .device = uart_console_device,
+ .write = mps2_uart_console_write,
+ .setup = mps2_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &mps2_uart_driver,
+};
+
+#define MPS2_SERIAL_CONSOLE (&mps2_uart_console)
+
+static void mps2_early_putchar(struct uart_port *port, int ch)
+{
+ while (readb(port->membase + UARTn_STATE) & UARTn_STATE_TX_FULL)
+ cpu_relax();
+
+ writeb((unsigned char)ch, port->membase + UARTn_DATA);
+}
+
+static void mps2_early_write(struct console *con, const char *s, unsigned int n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, mps2_early_putchar);
+}
+
+static int __init mps2_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = mps2_early_write;
+
+ return 0;
+}
+
+OF_EARLYCON_DECLARE(mps2, "arm,mps2-uart", mps2_early_console_setup);
+
+#else
+#define MPS2_SERIAL_CONSOLE NULL
+#endif
+
+static struct uart_driver mps2_uart_driver = {
+ .driver_name = DRIVER_NAME,
+ .dev_name = SERIAL_NAME,
+ .nr = MPS2_MAX_PORTS,
+ .cons = MPS2_SERIAL_CONSOLE,
+};
+
+static struct mps2_uart_port *mps2_of_get_port(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int id;
+
+ if (!np)
+ return NULL;
+
+ id = of_alias_get_id(np, "serial");
+ if (id < 0)
+ id = 0;
+
+ if (WARN_ON(id >= MPS2_MAX_PORTS))
+ return NULL;
+
+ mps2_uart_ports[id].port.line = id;
+ return &mps2_uart_ports[id];
+}
+
+static int mps2_init_port(struct mps2_uart_port *mps_port,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mps_port->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mps_port->port.membase))
+ return PTR_ERR(mps_port->port.membase);
+
+ mps_port->port.mapbase = res->start;
+ mps_port->port.mapsize = resource_size(res);
+
+ mps_port->rx_irq = platform_get_irq(pdev, 0);
+ mps_port->tx_irq = platform_get_irq(pdev, 1);
+ mps_port->port.irq = platform_get_irq(pdev, 2);
+
+ mps_port->port.iotype = UPIO_MEM;
+ mps_port->port.flags = UPF_BOOT_AUTOCONF;
+ mps_port->port.fifosize = 1;
+ mps_port->port.ops = &mps2_uart_pops;
+ mps_port->port.dev = &pdev->dev;
+
+ mps_port->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mps_port->clk))
+ return PTR_ERR(mps_port->clk);
+
+ ret = clk_prepare_enable(mps_port->clk);
+ if (ret)
+ return ret;
+
+ mps_port->port.uartclk = clk_get_rate(mps_port->clk);
+
+ clk_disable_unprepare(mps_port->clk);
+
+ return ret;
+}
+
+static int mps2_serial_probe(struct platform_device *pdev)
+{
+ struct mps2_uart_port *mps_port;
+ int ret;
+
+ mps_port = mps2_of_get_port(pdev);
+ if (!mps_port)
+ return -ENODEV;
+
+ ret = mps2_init_port(mps_port, pdev);
+ if (ret)
+ return ret;
+
+ ret = uart_add_one_port(&mps2_uart_driver, &mps_port->port);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mps_port);
+
+ return 0;
+}
+
+static int mps2_serial_remove(struct platform_device *pdev)
+{
+ struct mps2_uart_port *mps_port = platform_get_drvdata(pdev);
+
+ uart_remove_one_port(&mps2_uart_driver, &mps_port->port);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mps2_match[] = {
+ { .compatible = "arm,mps2-uart", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mps2_match);
+#endif
+
+static struct platform_driver mps2_serial_driver = {
+ .probe = mps2_serial_probe,
+ .remove = mps2_serial_remove,
+
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(mps2_match),
+ },
+};
+
+static int __init mps2_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&mps2_uart_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&mps2_serial_driver);
+ if (ret)
+ uart_unregister_driver(&mps2_uart_driver);
+
+ return ret;
+}
+module_init(mps2_uart_init);
+
+static void __exit mps2_uart_exit(void)
+{
+ platform_driver_unregister(&mps2_serial_driver);
+ uart_unregister_driver(&mps2_uart_driver);
+}
+module_exit(mps2_uart_exit);
+
+MODULE_AUTHOR("Vladimir Murzin <vladimir.murzin@arm.com>");
+MODULE_DESCRIPTION("MPS2 UART driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 96d3ce8dc..b7d80bd57 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -861,37 +861,72 @@ struct msm_baud_map {
};
static const struct msm_baud_map *
-msm_find_best_baud(struct uart_port *port, unsigned int baud)
+msm_find_best_baud(struct uart_port *port, unsigned int baud,
+ unsigned long *rate)
{
- unsigned int i, divisor;
- const struct msm_baud_map *entry;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int divisor, result;
+ unsigned long target, old, best_rate = 0, diff, best_diff = ULONG_MAX;
+ const struct msm_baud_map *entry, *end, *best;
static const struct msm_baud_map table[] = {
- { 1536, 0x00, 1 },
- { 768, 0x11, 1 },
- { 384, 0x22, 1 },
- { 192, 0x33, 1 },
- { 96, 0x44, 1 },
- { 48, 0x55, 1 },
- { 32, 0x66, 1 },
- { 24, 0x77, 1 },
- { 16, 0x88, 1 },
- { 12, 0x99, 6 },
- { 8, 0xaa, 6 },
- { 6, 0xbb, 6 },
- { 4, 0xcc, 6 },
- { 3, 0xdd, 8 },
- { 2, 0xee, 16 },
{ 1, 0xff, 31 },
- { 0, 0xff, 31 },
+ { 2, 0xee, 16 },
+ { 3, 0xdd, 8 },
+ { 4, 0xcc, 6 },
+ { 6, 0xbb, 6 },
+ { 8, 0xaa, 6 },
+ { 12, 0x99, 6 },
+ { 16, 0x88, 1 },
+ { 24, 0x77, 1 },
+ { 32, 0x66, 1 },
+ { 48, 0x55, 1 },
+ { 96, 0x44, 1 },
+ { 192, 0x33, 1 },
+ { 384, 0x22, 1 },
+ { 768, 0x11, 1 },
+ { 1536, 0x00, 1 },
};
- divisor = uart_get_divisor(port, baud);
+ best = table; /* Default to smallest divider */
+ target = clk_round_rate(msm_port->clk, 16 * baud);
+ divisor = DIV_ROUND_CLOSEST(target, 16 * baud);
+
+ end = table + ARRAY_SIZE(table);
+ entry = table;
+ while (entry < end) {
+ if (entry->divisor <= divisor) {
+ result = target / entry->divisor / 16;
+ diff = abs(result - baud);
+
+ /* Keep track of best entry */
+ if (diff < best_diff) {
+ best_diff = diff;
+ best = entry;
+ best_rate = target;
+ }
- for (i = 0, entry = table; i < ARRAY_SIZE(table); i++, entry++)
- if (entry->divisor <= divisor)
- break;
+ if (result == baud)
+ break;
+ } else if (entry->divisor > divisor) {
+ old = target;
+ target = clk_round_rate(msm_port->clk, old + 1);
+ /*
+ * The rate didn't get any faster so we can't do
+ * better at dividing it down
+ */
+ if (target == old)
+ break;
- return entry; /* Default to smallest divider */
+ /* Start the divisor search over at this new rate */
+ entry = table;
+ divisor = DIV_ROUND_CLOSEST(target, 16 * baud);
+ continue;
+ }
+ entry++;
+ }
+
+ *rate = best_rate;
+ return best;
}
static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
@@ -900,22 +935,20 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
unsigned int rxstale, watermark, mask;
struct msm_port *msm_port = UART_TO_MSM(port);
const struct msm_baud_map *entry;
- unsigned long flags;
-
- entry = msm_find_best_baud(port, baud);
-
- msm_write(port, entry->code, UART_CSR);
-
- if (baud > 460800)
- port->uartclk = baud * 16;
+ unsigned long flags, rate;
flags = *saved_flags;
spin_unlock_irqrestore(&port->lock, flags);
- clk_set_rate(msm_port->clk, port->uartclk);
+ entry = msm_find_best_baud(port, baud, &rate);
+ clk_set_rate(msm_port->clk, rate);
+ baud = rate / 16 / entry->divisor;
spin_lock_irqsave(&port->lock, flags);
*saved_flags = flags;
+ port->uartclk = rate;
+
+ msm_write(port, entry->code, UART_CSR);
/* RX stale watermark */
rxstale = entry->rxstale;
@@ -1577,8 +1610,6 @@ static int msm_serial_probe(struct platform_device *pdev)
msm_port->pclk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(msm_port->pclk))
return PTR_ERR(msm_port->pclk);
-
- clk_set_rate(msm_port->clk, 1843200);
}
port->uartclk = clk_get_rate(msm_port->clk);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 0ff27818b..45b57c294 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -1,5 +1,7 @@
/*
* ***************************************************************************
+* Marvell Armada-3700 Serial Driver
+* Author: Wilson Ding <dingwei@marvell.com>
* Copyright (C) 2015 Marvell International Ltd.
* ***************************************************************************
* This program is free software: you can redistribute it and/or modify it
@@ -23,7 +25,6 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -299,6 +300,8 @@ static int mvebu_uart_startup(struct uart_port *port)
static void mvebu_uart_shutdown(struct uart_port *port)
{
writel(0, port->membase + UART_CTRL);
+
+ free_irq(port->irq, port);
}
static void mvebu_uart_set_termios(struct uart_port *port,
@@ -594,30 +597,18 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return 0;
}
-static int mvebu_uart_remove(struct platform_device *pdev)
-{
- struct mvebu_uart_data *data = platform_get_drvdata(pdev);
-
- uart_remove_one_port(&mvebu_uart_driver, data->port);
- data->port->private_data = NULL;
- data->port->mapbase = 0;
- return 0;
-}
-
/* Match table for of_platform binding */
static const struct of_device_id mvebu_uart_of_match[] = {
{ .compatible = "marvell,armada-3700-uart", },
{}
};
-MODULE_DEVICE_TABLE(of, mvebu_uart_of_match);
static struct platform_driver mvebu_uart_platform_driver = {
.probe = mvebu_uart_probe,
- .remove = mvebu_uart_remove,
.driver = {
- .owner = THIS_MODULE,
.name = "mvebu-uart",
.of_match_table = of_match_ptr(mvebu_uart_of_match),
+ .suppress_bind_attrs = true,
},
};
@@ -635,16 +626,4 @@ static int __init mvebu_uart_init(void)
return ret;
}
-
-static void __exit mvebu_uart_exit(void)
-{
- platform_driver_unregister(&mvebu_uart_platform_driver);
- uart_unregister_driver(&mvebu_uart_driver);
-}
-
arch_initcall(mvebu_uart_init);
-module_exit(mvebu_uart_exit);
-
-MODULE_AUTHOR("Wilson Ding <dingwei@marvell.com>");
-MODULE_DESCRIPTION("Marvell Armada-3700 Serial Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index cd0414bbe..eb54e5c77 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1,17 +1,18 @@
/*
- * Freescale STMP37XX/STMP378X Application UART driver
+ * Application UART driver for:
+ * Freescale STMP37XX/STMP378X
+ * Alphascale ASM9260
*
* Author: dmitry pervushin <dimka@embeddedalley.com>
*
+ * Copyright 2014 Oleksij Rempel <linux@rempel-privat.de>
+ * Provide Alphascale ASM9260 support.
* Copyright 2008-2010 Freescale Semiconductor, Inc.
* Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
*/
#if defined(CONFIG_SERIAL_MXS_AUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -51,30 +52,16 @@
#define MXS_AUART_PORTS 5
#define MXS_AUART_FIFO_SIZE 16
+#define SET_REG 0x4
+#define CLR_REG 0x8
+#define TOG_REG 0xc
+
#define AUART_CTRL0 0x00000000
-#define AUART_CTRL0_SET 0x00000004
-#define AUART_CTRL0_CLR 0x00000008
-#define AUART_CTRL0_TOG 0x0000000c
#define AUART_CTRL1 0x00000010
-#define AUART_CTRL1_SET 0x00000014
-#define AUART_CTRL1_CLR 0x00000018
-#define AUART_CTRL1_TOG 0x0000001c
#define AUART_CTRL2 0x00000020
-#define AUART_CTRL2_SET 0x00000024
-#define AUART_CTRL2_CLR 0x00000028
-#define AUART_CTRL2_TOG 0x0000002c
#define AUART_LINECTRL 0x00000030
-#define AUART_LINECTRL_SET 0x00000034
-#define AUART_LINECTRL_CLR 0x00000038
-#define AUART_LINECTRL_TOG 0x0000003c
#define AUART_LINECTRL2 0x00000040
-#define AUART_LINECTRL2_SET 0x00000044
-#define AUART_LINECTRL2_CLR 0x00000048
-#define AUART_LINECTRL2_TOG 0x0000004c
#define AUART_INTR 0x00000050
-#define AUART_INTR_SET 0x00000054
-#define AUART_INTR_CLR 0x00000058
-#define AUART_INTR_TOG 0x0000005c
#define AUART_DATA 0x00000060
#define AUART_STAT 0x00000070
#define AUART_DEBUG 0x00000080
@@ -136,11 +123,301 @@
#define AUART_STAT_FERR (1 << 16)
#define AUART_STAT_RXCOUNT_MASK 0xffff
+/*
+ * Start of Alphascale asm9260 defines
+ * This list contains only differences of existing bits
+ * between imx2x and asm9260
+ */
+#define ASM9260_HW_CTRL0 0x0000
+/*
+ * RW. Tell the UART to execute the RX DMA Command. The
+ * UART will clear this bit at the end of receive execution.
+ */
+#define ASM9260_BM_CTRL0_RXDMA_RUN BIT(28)
+/* RW. 0 use FIFO for status register; 1 use DMA */
+#define ASM9260_BM_CTRL0_RXTO_SOURCE_STATUS BIT(25)
+/*
+ * RW. RX TIMEOUT Enable. Valid for FIFO and DMA.
+ * Warning: If this bit is set to 0, the RX timeout will not affect receive DMA
+ * operation. If this bit is set to 1, a receive timeout will cause the receive
+ * DMA logic to terminate by filling the remaining DMA bytes with garbage data.
+ */
+#define ASM9260_BM_CTRL0_RXTO_ENABLE BIT(24)
+/*
+ * RW. Receive Timeout Counter Value: number of 8-bit-time to wait before
+ * asserting timeout on the RX input. If the RXFIFO is not empty and the RX
+ * input is idle, then the watchdog counter will decrement each bit-time. Note
+ * 7-bit-time is added to the programmed value, so a value of zero will set
+ * the counter to 7-bit-time, a value of 0x1 gives 15-bit-time and so on. Also
+ * note that the counter is reloaded at the end of each frame, so if the frame
+ * is 10 bits long and the timeout counter value is zero, then timeout will
+ * occur (when FIFO is not empty) even if the RX input is not idle. The default
+ * value is 0x3 (31 bit-time).
+ */
+#define ASM9260_BM_CTRL0_RXTO_MASK (0xff << 16)
+/* TIMEOUT = (100*7+1)*(1/BAUD) */
+#define ASM9260_BM_CTRL0_DEFAULT_RXTIMEOUT (20 << 16)
+
+/* TX ctrl register */
+#define ASM9260_HW_CTRL1 0x0010
+/*
+ * RW. Tell the UART to execute the TX DMA Command. The
+ * UART will clear this bit at the end of transmit execution.
+ */
+#define ASM9260_BM_CTRL1_TXDMA_RUN BIT(28)
+
+#define ASM9260_HW_CTRL2 0x0020
+/*
+ * RW. Receive Interrupt FIFO Level Select.
+ * The trigger points for the receive interrupt are as follows:
+ * ONE_EIGHTHS = 0x0 Trigger on FIFO full to at least 2 of 16 entries.
+ * ONE_QUARTER = 0x1 Trigger on FIFO full to at least 4 of 16 entries.
+ * ONE_HALF = 0x2 Trigger on FIFO full to at least 8 of 16 entries.
+ * THREE_QUARTERS = 0x3 Trigger on FIFO full to at least 12 of 16 entries.
+ * SEVEN_EIGHTHS = 0x4 Trigger on FIFO full to at least 14 of 16 entries.
+ */
+#define ASM9260_BM_CTRL2_RXIFLSEL (7 << 20)
+#define ASM9260_BM_CTRL2_DEFAULT_RXIFLSEL (3 << 20)
+/* RW. Same as RXIFLSEL */
+#define ASM9260_BM_CTRL2_TXIFLSEL (7 << 16)
+#define ASM9260_BM_CTRL2_DEFAULT_TXIFLSEL (2 << 16)
+/* RW. Set DTR. When this bit is 1, the output is 0. */
+#define ASM9260_BM_CTRL2_DTR BIT(10)
+/* RW. Loop Back Enable */
+#define ASM9260_BM_CTRL2_LBE BIT(7)
+#define ASM9260_BM_CTRL2_PORT_ENABLE BIT(0)
+
+#define ASM9260_HW_LINECTRL 0x0030
+/*
+ * RW. Stick Parity Select. When bits 1, 2, and 7 of this register are set, the
+ * parity bit is transmitted and checked as a 0. When bits 1 and 7 are set,
+ * and bit 2 is 0, the parity bit is transmitted and checked as a 1. When this
+ * bit is cleared stick parity is disabled.
+ */
+#define ASM9260_BM_LCTRL_SPS BIT(7)
+/* RW. Word length */
+#define ASM9260_BM_LCTRL_WLEN (3 << 5)
+#define ASM9260_BM_LCTRL_CHRL_5 (0 << 5)
+#define ASM9260_BM_LCTRL_CHRL_6 (1 << 5)
+#define ASM9260_BM_LCTRL_CHRL_7 (2 << 5)
+#define ASM9260_BM_LCTRL_CHRL_8 (3 << 5)
+
+/*
+ * Interrupt register.
+ * contains the interrupt enables and the interrupt status bits
+ */
+#define ASM9260_HW_INTR 0x0040
+/* Tx FIFO EMPTY Raw Interrupt enable */
+#define ASM9260_BM_INTR_TFEIEN BIT(27)
+/* Overrun Error Interrupt Enable. */
+#define ASM9260_BM_INTR_OEIEN BIT(26)
+/* Break Error Interrupt Enable. */
+#define ASM9260_BM_INTR_BEIEN BIT(25)
+/* Parity Error Interrupt Enable. */
+#define ASM9260_BM_INTR_PEIEN BIT(24)
+/* Framing Error Interrupt Enable. */
+#define ASM9260_BM_INTR_FEIEN BIT(23)
+
+/* nUARTDSR Modem Interrupt Enable. */
+#define ASM9260_BM_INTR_DSRMIEN BIT(19)
+/* nUARTDCD Modem Interrupt Enable. */
+#define ASM9260_BM_INTR_DCDMIEN BIT(18)
+/* nUARTRI Modem Interrupt Enable. */
+#define ASM9260_BM_INTR_RIMIEN BIT(16)
+/* Auto-Boud Timeout */
+#define ASM9260_BM_INTR_ABTO BIT(13)
+#define ASM9260_BM_INTR_ABEO BIT(12)
+/* Tx FIFO EMPTY Raw Interrupt state */
+#define ASM9260_BM_INTR_TFEIS BIT(11)
+/* Overrun Error */
+#define ASM9260_BM_INTR_OEIS BIT(10)
+/* Break Error */
+#define ASM9260_BM_INTR_BEIS BIT(9)
+/* Parity Error */
+#define ASM9260_BM_INTR_PEIS BIT(8)
+/* Framing Error */
+#define ASM9260_BM_INTR_FEIS BIT(7)
+#define ASM9260_BM_INTR_DSRMIS BIT(3)
+#define ASM9260_BM_INTR_DCDMIS BIT(2)
+#define ASM9260_BM_INTR_RIMIS BIT(0)
+
+/*
+ * RW. In DMA mode, up to 4 Received/Transmit characters can be accessed at a
+ * time. In PIO mode, only one character can be accessed at a time. The status
+ * register contains the receive data flags and valid bits.
+ */
+#define ASM9260_HW_DATA 0x0050
+
+#define ASM9260_HW_STAT 0x0060
+/* RO. If 1, UARTAPP is present in this product. */
+#define ASM9260_BM_STAT_PRESENT BIT(31)
+/* RO. If 1, HISPEED is present in this product. */
+#define ASM9260_BM_STAT_HISPEED BIT(30)
+/* RO. Receive FIFO Full. */
+#define ASM9260_BM_STAT_RXFULL BIT(26)
+
+/* RO. The UART Debug Register contains the state of the DMA signals. */
+#define ASM9260_HW_DEBUG 0x0070
+/* DMA Command Run Status */
+#define ASM9260_BM_DEBUG_TXDMARUN BIT(5)
+#define ASM9260_BM_DEBUG_RXDMARUN BIT(4)
+/* DMA Command End Status */
+#define ASM9260_BM_DEBUG_TXCMDEND BIT(3)
+#define ASM9260_BM_DEBUG_RXCMDEND BIT(2)
+/* DMA Request Status */
+#define ASM9260_BM_DEBUG_TXDMARQ BIT(1)
+#define ASM9260_BM_DEBUG_RXDMARQ BIT(0)
+
+#define ASM9260_HW_ILPR 0x0080
+
+#define ASM9260_HW_RS485CTRL 0x0090
+/*
+ * RW. This bit reverses the polarity of the direction control signal on the RTS
+ * (or DTR) pin.
+ * If 0, The direction control pin will be driven to logic ‘0’ when the
+ * transmitter has data to be sent. It will be driven to logic ‘1’ after the
+ * last bit of data has been transmitted.
+ */
+#define ASM9260_BM_RS485CTRL_ONIV BIT(5)
+/* RW. Enable Auto Direction Control. */
+#define ASM9260_BM_RS485CTRL_DIR_CTRL BIT(4)
+/*
+ * RW. If 0 and DIR_CTRL = 1, pin RTS is used for direction control.
+ * If 1 and DIR_CTRL = 1, pin DTR is used for direction control.
+ */
+#define ASM9260_BM_RS485CTRL_PINSEL BIT(3)
+/* RW. Enable Auto Address Detect (AAD). */
+#define ASM9260_BM_RS485CTRL_AADEN BIT(2)
+/* RW. Disable receiver. */
+#define ASM9260_BM_RS485CTRL_RXDIS BIT(1)
+/* RW. Enable RS-485/EIA-485 Normal Multidrop Mode (NMM) */
+#define ASM9260_BM_RS485CTRL_RS485EN BIT(0)
+
+#define ASM9260_HW_RS485ADRMATCH 0x00a0
+/* Contains the address match value. */
+#define ASM9260_BM_RS485ADRMATCH_MASK (0xff << 0)
+
+#define ASM9260_HW_RS485DLY 0x00b0
+/*
+ * RW. Contains the direction control (RTS or DTR) delay value. This delay time
+ * is in periods of the baud clock.
+ */
+#define ASM9260_BM_RS485DLY_MASK (0xff << 0)
+
+#define ASM9260_HW_AUTOBAUD 0x00c0
+/* WO. Auto-baud time-out interrupt clear bit. */
+#define ASM9260_BM_AUTOBAUD_TO_INT_CLR BIT(9)
+/* WO. End of auto-baud interrupt clear bit. */
+#define ASM9260_BM_AUTOBAUD_EO_INT_CLR BIT(8)
+/* Restart in case of timeout (counter restarts at next UART Rx falling edge) */
+#define ASM9260_BM_AUTOBAUD_AUTORESTART BIT(2)
+/* Auto-baud mode select bit. 0 - Mode 0, 1 - Mode 1. */
+#define ASM9260_BM_AUTOBAUD_MODE BIT(1)
+/*
+ * Auto-baud start (auto-baud is running). Auto-baud run bit. This bit is
+ * automatically cleared after auto-baud completion.
+ */
+#define ASM9260_BM_AUTOBAUD_START BIT(0)
+
+#define ASM9260_HW_CTRL3 0x00d0
+#define ASM9260_BM_CTRL3_OUTCLK_DIV_MASK (0xffff << 16)
+/*
+ * RW. Provide clk over OUTCLK pin. In case of asm9260 it can be configured on
+ * pins 137 and 144.
+ */
+#define ASM9260_BM_CTRL3_MASTERMODE BIT(6)
+/* RW. Baud Rate Mode: 1 - Enable sync mode. 0 - async mode. */
+#define ASM9260_BM_CTRL3_SYNCMODE BIT(4)
+/* RW. 1 - MSB bit send frist; 0 - LSB bit frist. */
+#define ASM9260_BM_CTRL3_MSBF BIT(2)
+/* RW. 1 - sample rate = 8 x Baudrate; 0 - sample rate = 16 x Baudrate. */
+#define ASM9260_BM_CTRL3_BAUD8 BIT(1)
+/* RW. 1 - Set word length to 9bit. 0 - use ASM9260_BM_LCTRL_WLEN */
+#define ASM9260_BM_CTRL3_9BIT BIT(0)
+
+#define ASM9260_HW_ISO7816_CTRL 0x00e0
+/* RW. Enable High Speed mode. */
+#define ASM9260_BM_ISO7816CTRL_HS BIT(12)
+/* Disable Successive Receive NACK */
+#define ASM9260_BM_ISO7816CTRL_DS_NACK BIT(8)
+#define ASM9260_BM_ISO7816CTRL_MAX_ITER_MASK (0xff << 4)
+/* Receive NACK Inhibit */
+#define ASM9260_BM_ISO7816CTRL_INACK BIT(3)
+#define ASM9260_BM_ISO7816CTRL_NEG_DATA BIT(2)
+/* RW. 1 - ISO7816 mode; 0 - USART mode */
+#define ASM9260_BM_ISO7816CTRL_ENABLE BIT(0)
+
+#define ASM9260_HW_ISO7816_ERRCNT 0x00f0
+/* Parity error counter. Will be cleared after reading */
+#define ASM9260_BM_ISO7816_NB_ERRORS_MASK (0xff << 0)
+
+#define ASM9260_HW_ISO7816_STATUS 0x0100
+/* Max number of Repetitions Reached */
+#define ASM9260_BM_ISO7816_STAT_ITERATION BIT(0)
+
+/* End of Alphascale asm9260 defines */
+
static struct uart_driver auart_driver;
enum mxs_auart_type {
IMX23_AUART,
IMX28_AUART,
+ ASM9260_AUART,
+};
+
+struct vendor_data {
+ const u16 *reg_offset;
+};
+
+enum {
+ REG_CTRL0,
+ REG_CTRL1,
+ REG_CTRL2,
+ REG_LINECTRL,
+ REG_LINECTRL2,
+ REG_INTR,
+ REG_DATA,
+ REG_STAT,
+ REG_DEBUG,
+ REG_VERSION,
+ REG_AUTOBAUD,
+
+ /* The size of the array - must be last */
+ REG_ARRAY_SIZE,
+};
+
+static const u16 mxs_asm9260_offsets[REG_ARRAY_SIZE] = {
+ [REG_CTRL0] = ASM9260_HW_CTRL0,
+ [REG_CTRL1] = ASM9260_HW_CTRL1,
+ [REG_CTRL2] = ASM9260_HW_CTRL2,
+ [REG_LINECTRL] = ASM9260_HW_LINECTRL,
+ [REG_INTR] = ASM9260_HW_INTR,
+ [REG_DATA] = ASM9260_HW_DATA,
+ [REG_STAT] = ASM9260_HW_STAT,
+ [REG_DEBUG] = ASM9260_HW_DEBUG,
+ [REG_AUTOBAUD] = ASM9260_HW_AUTOBAUD,
+};
+
+static const u16 mxs_stmp37xx_offsets[REG_ARRAY_SIZE] = {
+ [REG_CTRL0] = AUART_CTRL0,
+ [REG_CTRL1] = AUART_CTRL1,
+ [REG_CTRL2] = AUART_CTRL2,
+ [REG_LINECTRL] = AUART_LINECTRL,
+ [REG_LINECTRL2] = AUART_LINECTRL2,
+ [REG_INTR] = AUART_INTR,
+ [REG_DATA] = AUART_DATA,
+ [REG_STAT] = AUART_STAT,
+ [REG_DEBUG] = AUART_DEBUG,
+ [REG_VERSION] = AUART_VERSION,
+ [REG_AUTOBAUD] = AUART_AUTOBAUD,
+};
+
+static const struct vendor_data vendor_alphascale_asm9260 = {
+ .reg_offset = mxs_asm9260_offsets,
+};
+
+static const struct vendor_data vendor_freescale_stmp37xx = {
+ .reg_offset = mxs_stmp37xx_offsets,
};
struct mxs_auart_port {
@@ -153,8 +430,10 @@ struct mxs_auart_port {
unsigned long flags;
unsigned int mctrl_prev;
enum mxs_auart_type devtype;
+ const struct vendor_data *vendor;
struct clk *clk;
+ struct clk *clk_ahb;
struct device *dev;
/* for DMA */
@@ -174,6 +453,7 @@ struct mxs_auart_port {
static const struct platform_device_id mxs_auart_devtype[] = {
{ .name = "mxs-auart-imx23", .driver_data = IMX23_AUART },
{ .name = "mxs-auart-imx28", .driver_data = IMX28_AUART },
+ { .name = "as-auart-asm9260", .driver_data = ASM9260_AUART },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, mxs_auart_devtype);
@@ -185,6 +465,9 @@ static const struct of_device_id mxs_auart_dt_ids[] = {
}, {
.compatible = "fsl,imx23-auart",
.data = &mxs_auart_devtype[IMX23_AUART]
+ }, {
+ .compatible = "alphascale,asm9260-auart",
+ .data = &mxs_auart_devtype[ASM9260_AUART]
}, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_auart_dt_ids);
@@ -194,11 +477,54 @@ static inline int is_imx28_auart(struct mxs_auart_port *s)
return s->devtype == IMX28_AUART;
}
+static inline int is_asm9260_auart(struct mxs_auart_port *s)
+{
+ return s->devtype == ASM9260_AUART;
+}
+
static inline bool auart_dma_enabled(struct mxs_auart_port *s)
{
return s->flags & MXS_AUART_DMA_ENABLED;
}
+static unsigned int mxs_reg_to_offset(const struct mxs_auart_port *uap,
+ unsigned int reg)
+{
+ return uap->vendor->reg_offset[reg];
+}
+
+static unsigned int mxs_read(const struct mxs_auart_port *uap,
+ unsigned int reg)
+{
+ void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg);
+
+ return readl_relaxed(addr);
+}
+
+static void mxs_write(unsigned int val, struct mxs_auart_port *uap,
+ unsigned int reg)
+{
+ void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg);
+
+ writel_relaxed(val, addr);
+}
+
+static void mxs_set(unsigned int val, struct mxs_auart_port *uap,
+ unsigned int reg)
+{
+ void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg);
+
+ writel_relaxed(val, addr + SET_REG);
+}
+
+static void mxs_clr(unsigned int val, struct mxs_auart_port *uap,
+ unsigned int reg)
+{
+ void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg);
+
+ writel_relaxed(val, addr + CLR_REG);
+}
+
static void mxs_auart_stop_tx(struct uart_port *u);
#define to_auart_port(u) container_of(u, struct mxs_auart_port, port)
@@ -295,19 +621,16 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
}
- while (!(readl(s->port.membase + AUART_STAT) &
- AUART_STAT_TXFF)) {
+ while (!(mxs_read(s, REG_STAT) & AUART_STAT_TXFF)) {
if (s->port.x_char) {
s->port.icount.tx++;
- writel(s->port.x_char,
- s->port.membase + AUART_DATA);
+ mxs_write(s->port.x_char, s, REG_DATA);
s->port.x_char = 0;
continue;
}
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) {
s->port.icount.tx++;
- writel(xmit->buf[xmit->tail],
- s->port.membase + AUART_DATA);
+ mxs_write(xmit->buf[xmit->tail], s, REG_DATA);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
} else
break;
@@ -316,11 +639,9 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
uart_write_wakeup(&s->port);
if (uart_circ_empty(&(s->port.state->xmit)))
- writel(AUART_INTR_TXIEN,
- s->port.membase + AUART_INTR_CLR);
+ mxs_clr(AUART_INTR_TXIEN, s, REG_INTR);
else
- writel(AUART_INTR_TXIEN,
- s->port.membase + AUART_INTR_SET);
+ mxs_set(AUART_INTR_TXIEN, s, REG_INTR);
if (uart_tx_stopped(&s->port))
mxs_auart_stop_tx(&s->port);
@@ -332,8 +653,8 @@ static void mxs_auart_rx_char(struct mxs_auart_port *s)
u32 stat;
u8 c;
- c = readl(s->port.membase + AUART_DATA);
- stat = readl(s->port.membase + AUART_STAT);
+ c = mxs_read(s, REG_DATA);
+ stat = mxs_read(s, REG_STAT);
flag = TTY_NORMAL;
s->port.icount.rx++;
@@ -368,7 +689,7 @@ static void mxs_auart_rx_char(struct mxs_auart_port *s)
uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag);
out:
- writel(stat, s->port.membase + AUART_STAT);
+ mxs_write(stat, s, REG_STAT);
}
static void mxs_auart_rx_chars(struct mxs_auart_port *s)
@@ -376,13 +697,13 @@ static void mxs_auart_rx_chars(struct mxs_auart_port *s)
u32 stat = 0;
for (;;) {
- stat = readl(s->port.membase + AUART_STAT);
+ stat = mxs_read(s, REG_STAT);
if (stat & AUART_STAT_RXFE)
break;
mxs_auart_rx_char(s);
}
- writel(stat, s->port.membase + AUART_STAT);
+ mxs_write(stat, s, REG_STAT);
tty_flip_buffer_push(&s->port.state->port);
}
@@ -418,7 +739,7 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
{
struct mxs_auart_port *s = to_auart_port(u);
- u32 ctrl = readl(u->membase + AUART_CTRL2);
+ u32 ctrl = mxs_read(s, REG_CTRL2);
ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS);
if (mctrl & TIOCM_RTS) {
@@ -428,7 +749,7 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl)
ctrl |= AUART_CTRL2_RTS;
}
- writel(ctrl, u->membase + AUART_CTRL2);
+ mxs_write(ctrl, s, REG_CTRL2);
mctrl_gpio_set(s->gpios, mctrl);
}
@@ -459,7 +780,7 @@ static u32 mxs_auart_modem_status(struct mxs_auart_port *s, u32 mctrl)
static u32 mxs_auart_get_mctrl(struct uart_port *u)
{
struct mxs_auart_port *s = to_auart_port(u);
- u32 stat = readl(u->membase + AUART_STAT);
+ u32 stat = mxs_read(s, REG_STAT);
u32 mctrl = 0;
if (stat & AUART_STAT_CTS)
@@ -536,14 +857,14 @@ static void dma_rx_callback(void *arg)
dma_unmap_sg(s->dev, &s->rx_sgl, 1, DMA_FROM_DEVICE);
- stat = readl(s->port.membase + AUART_STAT);
+ stat = mxs_read(s, REG_STAT);
stat &= ~(AUART_STAT_OERR | AUART_STAT_BERR |
AUART_STAT_PERR | AUART_STAT_FERR);
count = stat & AUART_STAT_RXCOUNT_MASK;
tty_insert_flip_string(port, s->rx_dma_buf, count);
- writel(stat, s->port.membase + AUART_STAT);
+ mxs_write(stat, s, REG_STAT);
tty_flip_buffer_push(port);
/* start the next DMA for RX. */
@@ -606,8 +927,8 @@ static void mxs_auart_dma_exit_channel(struct mxs_auart_port *s)
static void mxs_auart_dma_exit(struct mxs_auart_port *s)
{
- writel(AUART_CTRL2_TXDMAE | AUART_CTRL2_RXDMAE | AUART_CTRL2_DMAONERR,
- s->port.membase + AUART_CTRL2_CLR);
+ mxs_clr(AUART_CTRL2_TXDMAE | AUART_CTRL2_RXDMAE | AUART_CTRL2_DMAONERR,
+ s, REG_CTRL2);
mxs_auart_dma_exit_channel(s);
s->flags &= ~MXS_AUART_DMA_ENABLED;
@@ -666,7 +987,7 @@ static void mxs_auart_settermios(struct uart_port *u,
cflag = termios->c_cflag;
ctrl = AUART_LINECTRL_FEN;
- ctrl2 = readl(u->membase + AUART_CTRL2);
+ ctrl2 = mxs_read(s, REG_CTRL2);
/* byte size */
switch (cflag & CSIZE) {
@@ -754,15 +1075,24 @@ static void mxs_auart_settermios(struct uart_port *u,
}
/* set baud rate */
- baud_min = DIV_ROUND_UP(u->uartclk * 32, AUART_LINECTRL_BAUD_DIV_MAX);
- baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
- baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
- div = u->uartclk * 32 / baud;
+ if (is_asm9260_auart(s)) {
+ baud = uart_get_baud_rate(u, termios, old,
+ u->uartclk * 4 / 0x3FFFFF,
+ u->uartclk / 16);
+ div = u->uartclk * 4 / baud;
+ } else {
+ baud_min = DIV_ROUND_UP(u->uartclk * 32,
+ AUART_LINECTRL_BAUD_DIV_MAX);
+ baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
+ baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
+ div = u->uartclk * 32 / baud;
+ }
+
ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);
+ mxs_write(ctrl, s, REG_LINECTRL);
- writel(ctrl, u->membase + AUART_LINECTRL);
- writel(ctrl2, u->membase + AUART_CTRL2);
+ mxs_write(ctrl2, s, REG_CTRL2);
uart_update_timeout(u, termios->c_cflag, baud);
@@ -771,8 +1101,8 @@ static void mxs_auart_settermios(struct uart_port *u,
!test_and_set_bit(MXS_AUART_DMA_RX_READY, &s->flags)) {
if (!mxs_auart_dma_prep_rx(s)) {
/* Disable the normal RX interrupt. */
- writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN,
- u->membase + AUART_INTR_CLR);
+ mxs_clr(AUART_INTR_RXIEN | AUART_INTR_RTIEN,
+ s, REG_INTR);
} else {
mxs_auart_dma_exit(s);
dev_err(s->dev, "We can not start up the DMA.\n");
@@ -802,16 +1132,13 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
u32 istat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
- u32 stat = readl(s->port.membase + AUART_STAT);
+ u32 stat = mxs_read(s, REG_STAT);
- istat = readl(s->port.membase + AUART_INTR);
+ istat = mxs_read(s, REG_INTR);
/* ack irq */
- writel(istat & (AUART_INTR_RTIS
- | AUART_INTR_TXIS
- | AUART_INTR_RXIS
- | AUART_INTR_CTSMIS),
- s->port.membase + AUART_INTR_CLR);
+ mxs_clr(istat & (AUART_INTR_RTIS | AUART_INTR_TXIS | AUART_INTR_RXIS
+ | AUART_INTR_CTSMIS), s, REG_INTR);
/*
* Dealing with GPIO interrupt
@@ -827,8 +1154,7 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
if (CTS_AT_AUART() && s->ms_irq_enabled)
uart_handle_cts_change(&s->port,
stat & AUART_STAT_CTS);
- writel(AUART_INTR_CTSMIS,
- s->port.membase + AUART_INTR_CLR);
+ mxs_clr(AUART_INTR_CTSMIS, s, REG_INTR);
istat &= ~AUART_INTR_CTSMIS;
}
@@ -846,44 +1172,44 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
return IRQ_HANDLED;
}
-static void mxs_auart_reset_deassert(struct uart_port *u)
+static void mxs_auart_reset_deassert(struct mxs_auart_port *s)
{
int i;
unsigned int reg;
- writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_CLR);
+ mxs_clr(AUART_CTRL0_SFTRST, s, REG_CTRL0);
for (i = 0; i < 10000; i++) {
- reg = readl(u->membase + AUART_CTRL0);
+ reg = mxs_read(s, REG_CTRL0);
if (!(reg & AUART_CTRL0_SFTRST))
break;
udelay(3);
}
- writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+ mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0);
}
-static void mxs_auart_reset_assert(struct uart_port *u)
+static void mxs_auart_reset_assert(struct mxs_auart_port *s)
{
int i;
u32 reg;
- reg = readl(u->membase + AUART_CTRL0);
+ reg = mxs_read(s, REG_CTRL0);
/* if already in reset state, keep it untouched */
if (reg & AUART_CTRL0_SFTRST)
return;
- writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
- writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_SET);
+ mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0);
+ mxs_set(AUART_CTRL0_SFTRST, s, REG_CTRL0);
for (i = 0; i < 1000; i++) {
- reg = readl(u->membase + AUART_CTRL0);
+ reg = mxs_read(s, REG_CTRL0);
/* reset is finished when the clock is gated */
if (reg & AUART_CTRL0_CLKGATE)
return;
udelay(10);
}
- dev_err(u->dev, "Failed to reset the unit.");
+ dev_err(s->dev, "Failed to reset the unit.");
}
static int mxs_auart_startup(struct uart_port *u)
@@ -896,17 +1222,17 @@ static int mxs_auart_startup(struct uart_port *u)
return ret;
if (uart_console(u)) {
- writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+ mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0);
} else {
/* reset the unit to a well known state */
- mxs_auart_reset_assert(u);
- mxs_auart_reset_deassert(u);
+ mxs_auart_reset_assert(s);
+ mxs_auart_reset_deassert(s);
}
- writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_SET);
+ mxs_set(AUART_CTRL2_UARTEN, s, REG_CTRL2);
- writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
- u->membase + AUART_INTR);
+ mxs_write(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+ s, REG_INTR);
/* Reset FIFO size (it could have changed if DMA was enabled) */
u->fifosize = MXS_AUART_FIFO_SIZE;
@@ -915,7 +1241,7 @@ static int mxs_auart_startup(struct uart_port *u)
* Enable fifo so all four bytes of a DMA word are written to
* output (otherwise, only the LSB is written, ie. 1 in 4 bytes)
*/
- writel(AUART_LINECTRL_FEN, u->membase + AUART_LINECTRL_SET);
+ mxs_set(AUART_LINECTRL_FEN, s, REG_LINECTRL);
/* get initial status of modem lines */
mctrl_gpio_get(s->gpios, &s->mctrl_prev);
@@ -934,12 +1260,13 @@ static void mxs_auart_shutdown(struct uart_port *u)
mxs_auart_dma_exit(s);
if (uart_console(u)) {
- writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
- writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
- u->membase + AUART_INTR_CLR);
- writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+ mxs_clr(AUART_CTRL2_UARTEN, s, REG_CTRL2);
+
+ mxs_clr(AUART_INTR_RXIEN | AUART_INTR_RTIEN |
+ AUART_INTR_CTSMIEN, s, REG_INTR);
+ mxs_set(AUART_CTRL0_CLKGATE, s, REG_CTRL0);
} else {
- mxs_auart_reset_assert(u);
+ mxs_auart_reset_assert(s);
}
clk_disable_unprepare(s->clk);
@@ -947,7 +1274,9 @@ static void mxs_auart_shutdown(struct uart_port *u)
static unsigned int mxs_auart_tx_empty(struct uart_port *u)
{
- if ((readl(u->membase + AUART_STAT) &
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ if ((mxs_read(s, REG_STAT) &
(AUART_STAT_TXFE | AUART_STAT_BUSY)) == AUART_STAT_TXFE)
return TIOCSER_TEMT;
@@ -959,29 +1288,33 @@ static void mxs_auart_start_tx(struct uart_port *u)
struct mxs_auart_port *s = to_auart_port(u);
/* enable transmitter */
- writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_SET);
+ mxs_set(AUART_CTRL2_TXE, s, REG_CTRL2);
mxs_auart_tx_chars(s);
}
static void mxs_auart_stop_tx(struct uart_port *u)
{
- writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_CLR);
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ mxs_clr(AUART_CTRL2_TXE, s, REG_CTRL2);
}
static void mxs_auart_stop_rx(struct uart_port *u)
{
- writel(AUART_CTRL2_RXE, u->membase + AUART_CTRL2_CLR);
+ struct mxs_auart_port *s = to_auart_port(u);
+
+ mxs_clr(AUART_CTRL2_RXE, s, REG_CTRL2);
}
static void mxs_auart_break_ctl(struct uart_port *u, int ctl)
{
+ struct mxs_auart_port *s = to_auart_port(u);
+
if (ctl)
- writel(AUART_LINECTRL_BRK,
- u->membase + AUART_LINECTRL_SET);
+ mxs_set(AUART_LINECTRL_BRK, s, REG_LINECTRL);
else
- writel(AUART_LINECTRL_BRK,
- u->membase + AUART_LINECTRL_CLR);
+ mxs_clr(AUART_LINECTRL_BRK, s, REG_LINECTRL);
}
static struct uart_ops mxs_auart_ops = {
@@ -1009,15 +1342,16 @@ static struct mxs_auart_port *auart_port[MXS_AUART_PORTS];
#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE
static void mxs_auart_console_putchar(struct uart_port *port, int ch)
{
+ struct mxs_auart_port *s = to_auart_port(port);
unsigned int to = 1000;
- while (readl(port->membase + AUART_STAT) & AUART_STAT_TXFF) {
+ while (mxs_read(s, REG_STAT) & AUART_STAT_TXFF) {
if (!to--)
break;
udelay(1);
}
- writel(ch, port->membase + AUART_DATA);
+ mxs_write(ch, s, REG_DATA);
}
static void
@@ -1037,18 +1371,16 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
clk_enable(s->clk);
/* First save the CR then disable the interrupts */
- old_ctrl2 = readl(port->membase + AUART_CTRL2);
- old_ctrl0 = readl(port->membase + AUART_CTRL0);
+ old_ctrl2 = mxs_read(s, REG_CTRL2);
+ old_ctrl0 = mxs_read(s, REG_CTRL0);
- writel(AUART_CTRL0_CLKGATE,
- port->membase + AUART_CTRL0_CLR);
- writel(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE,
- port->membase + AUART_CTRL2_SET);
+ mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0);
+ mxs_set(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE, s, REG_CTRL2);
uart_console_write(port, str, count, mxs_auart_console_putchar);
/* Finally, wait for transmitter to become empty ... */
- while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+ while (mxs_read(s, REG_STAT) & AUART_STAT_BUSY) {
udelay(1);
if (!to--)
break;
@@ -1060,24 +1392,25 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
* unused, but that is better than to disable it while it is still
* transmitting.
*/
- if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
- writel(old_ctrl0, port->membase + AUART_CTRL0);
- writel(old_ctrl2, port->membase + AUART_CTRL2);
+ if (!(mxs_read(s, REG_STAT) & AUART_STAT_BUSY)) {
+ mxs_write(old_ctrl0, s, REG_CTRL0);
+ mxs_write(old_ctrl2, s, REG_CTRL2);
}
clk_disable(s->clk);
}
static void __init
-auart_console_get_options(struct uart_port *port, int *baud,
+auart_console_get_options(struct mxs_auart_port *s, int *baud,
int *parity, int *bits)
{
+ struct uart_port *port = &s->port;
unsigned int lcr_h, quot;
- if (!(readl(port->membase + AUART_CTRL2) & AUART_CTRL2_UARTEN))
+ if (!(mxs_read(s, REG_CTRL2) & AUART_CTRL2_UARTEN))
return;
- lcr_h = readl(port->membase + AUART_LINECTRL);
+ lcr_h = mxs_read(s, REG_LINECTRL);
*parity = 'n';
if (lcr_h & AUART_LINECTRL_PEN) {
@@ -1092,12 +1425,10 @@ auart_console_get_options(struct uart_port *port, int *baud,
else
*bits = 8;
- quot = ((readl(port->membase + AUART_LINECTRL)
- & AUART_LINECTRL_BAUD_DIVINT_MASK))
- >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6);
- quot |= ((readl(port->membase + AUART_LINECTRL)
- & AUART_LINECTRL_BAUD_DIVFRAC_MASK))
- >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT;
+ quot = ((mxs_read(s, REG_LINECTRL) & AUART_LINECTRL_BAUD_DIVINT_MASK))
+ >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6);
+ quot |= ((mxs_read(s, REG_LINECTRL) & AUART_LINECTRL_BAUD_DIVFRAC_MASK))
+ >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT;
if (quot == 0)
quot = 1;
@@ -1132,7 +1463,7 @@ auart_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
- auart_console_get_options(&s->port, &baud, &parity, &bits);
+ auart_console_get_options(s, &baud, &parity, &bits);
ret = uart_set_options(&s->port, co, baud, parity, bits, flow);
@@ -1164,6 +1495,60 @@ static struct uart_driver auart_driver = {
#endif
};
+static void mxs_init_regs(struct mxs_auart_port *s)
+{
+ if (is_asm9260_auart(s))
+ s->vendor = &vendor_alphascale_asm9260;
+ else
+ s->vendor = &vendor_freescale_stmp37xx;
+}
+
+static int mxs_get_clks(struct mxs_auart_port *s,
+ struct platform_device *pdev)
+{
+ int err;
+
+ if (!is_asm9260_auart(s)) {
+ s->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(s->clk))
+ return PTR_ERR(s->clk);
+
+ return 0;
+ }
+
+ s->clk = devm_clk_get(s->dev, "mod");
+ if (IS_ERR(s->clk)) {
+ dev_err(s->dev, "Failed to get \"mod\" clk\n");
+ return PTR_ERR(s->clk);
+ }
+
+ s->clk_ahb = devm_clk_get(s->dev, "ahb");
+ if (IS_ERR(s->clk_ahb)) {
+ dev_err(s->dev, "Failed to get \"ahb\" clk\n");
+ return PTR_ERR(s->clk_ahb);
+ }
+
+ err = clk_prepare_enable(s->clk_ahb);
+ if (err) {
+ dev_err(s->dev, "Failed to enable ahb_clk!\n");
+ return err;
+ }
+
+ err = clk_set_rate(s->clk, clk_get_rate(s->clk_ahb));
+ if (err) {
+ dev_err(s->dev, "Failed to set rate!\n");
+ return err;
+ }
+
+ err = clk_prepare_enable(s->clk);
+ if (err) {
+ dev_err(s->dev, "Failed to enable clk!\n");
+ return err;
+ }
+
+ return 0;
+}
+
/*
* This function returns 1 if pdev isn't a device instatiated by dt, 0 if it
* could successfully get all information from dt or a negative errno.
@@ -1185,7 +1570,8 @@ static int serial_mxs_probe_dt(struct mxs_auart_port *s,
}
s->port.line = ret;
- if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
+ if (of_get_property(np, "uart-has-rtscts", NULL) ||
+ of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */)
set_bit(MXS_AUART_RTSCTS, &s->flags);
return 0;
@@ -1269,6 +1655,9 @@ static int mxs_auart_probe(struct platform_device *pdev)
if (!s)
return -ENOMEM;
+ s->port.dev = &pdev->dev;
+ s->dev = &pdev->dev;
+
ret = serial_mxs_probe_dt(s, pdev);
if (ret > 0)
s->port.line = pdev->id < 0 ? 0 : pdev->id;
@@ -1280,15 +1669,14 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->devtype = pdev->id_entry->driver_data;
}
- s->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(s->clk))
- return PTR_ERR(s->clk);
+ ret = mxs_get_clks(s, pdev);
+ if (ret)
+ return ret;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -ENXIO;
-
s->port.mapbase = r->start;
s->port.membase = ioremap(r->start, resource_size(r));
s->port.ops = &mxs_auart_ops;
@@ -1296,7 +1684,8 @@ static int mxs_auart_probe(struct platform_device *pdev)
s->port.fifosize = MXS_AUART_FIFO_SIZE;
s->port.uartclk = clk_get_rate(s->clk);
s->port.type = PORT_IMX;
- s->port.dev = s->dev = &pdev->dev;
+
+ mxs_init_regs(s);
s->mctrl_prev = 0;
@@ -1327,16 +1716,21 @@ static int mxs_auart_probe(struct platform_device *pdev)
auart_port[s->port.line] = s;
- mxs_auart_reset_deassert(&s->port);
+ mxs_auart_reset_deassert(s);
ret = uart_add_one_port(&auart_driver, &s->port);
if (ret)
goto out_free_gpio_irq;
- version = readl(s->port.membase + AUART_VERSION);
- dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n",
- (version >> 24) & 0xff,
- (version >> 16) & 0xff, version & 0xffff);
+ /* ASM9260 don't have version reg */
+ if (is_asm9260_auart(s)) {
+ dev_info(&pdev->dev, "Found APPUART ASM9260\n");
+ } else {
+ version = mxs_read(s, REG_VERSION);
+ dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n",
+ (version >> 24) & 0xff,
+ (version >> 16) & 0xff, version & 0xffff);
+ }
return 0;
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
new file mode 100644
index 000000000..62a43bf56
--- /dev/null
+++ b/drivers/tty/serial/pic32_uart.c
@@ -0,0 +1,960 @@
+/*
+ * PIC32 Integrated Serial Driver.
+ *
+ * Copyright (C) 2015 Microchip Technology, Inc.
+ *
+ * Authors:
+ * Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/console.h>
+#include <linux/clk.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/delay.h>
+
+#include <asm/mach-pic32/pic32.h>
+#include "pic32_uart.h"
+
+/* UART name and device definitions */
+#define PIC32_DEV_NAME "pic32-uart"
+#define PIC32_MAX_UARTS 6
+#define PIC32_SDEV_NAME "ttyPIC"
+
+/* pic32_sport pointer for console use */
+static struct pic32_sport *pic32_sports[PIC32_MAX_UARTS];
+
+static inline void pic32_wait_deplete_txbuf(struct pic32_sport *sport)
+{
+ /* wait for tx empty, otherwise chars will be lost or corrupted */
+ while (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_TRMT))
+ udelay(1);
+}
+
+static inline int pic32_enable_clock(struct pic32_sport *sport)
+{
+ int ret = clk_prepare_enable(sport->clk);
+
+ if (ret)
+ return ret;
+
+ sport->ref_clk++;
+ return 0;
+}
+
+static inline void pic32_disable_clock(struct pic32_sport *sport)
+{
+ sport->ref_clk--;
+ clk_disable_unprepare(sport->clk);
+}
+
+/* serial core request to check if uart tx buffer is empty */
+static unsigned int pic32_uart_tx_empty(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+ u32 val = pic32_uart_readl(sport, PIC32_UART_STA);
+
+ return (val & PIC32_UART_STA_TRMT) ? 1 : 0;
+}
+
+/* serial core request to set UART outputs */
+static void pic32_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+
+ /* set loopback mode */
+ if (mctrl & TIOCM_LOOP)
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+ PIC32_UART_MODE_LPBK);
+ else
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_LPBK);
+}
+
+/* get the state of CTS input pin for this port */
+static unsigned int get_cts_state(struct pic32_sport *sport)
+{
+ /* read and invert UxCTS */
+ if (gpio_is_valid(sport->cts_gpio))
+ return !gpio_get_value(sport->cts_gpio);
+
+ return 1;
+}
+
+/* serial core request to return the state of misc UART input pins */
+static unsigned int pic32_uart_get_mctrl(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+ unsigned int mctrl = 0;
+
+ if (!sport->hw_flow_ctrl)
+ mctrl |= TIOCM_CTS;
+ else if (get_cts_state(sport))
+ mctrl |= TIOCM_CTS;
+
+ /* DSR and CD are not supported in PIC32, so return 1
+ * RI is not supported in PIC32, so return 0
+ */
+ mctrl |= TIOCM_CD;
+ mctrl |= TIOCM_DSR;
+
+ return mctrl;
+}
+
+/* stop tx and start tx are not called in pairs, therefore a flag indicates
+ * the status of irq to control the irq-depth.
+ */
+static inline void pic32_uart_irqtxen(struct pic32_sport *sport, u8 en)
+{
+ if (en && !tx_irq_enabled(sport)) {
+ enable_irq(sport->irq_tx);
+ tx_irq_enabled(sport) = 1;
+ } else if (!en && tx_irq_enabled(sport)) {
+ /* use disable_irq_nosync() and not disable_irq() to avoid self
+ * imposed deadlock by not waiting for irq handler to end,
+ * since this callback is called from interrupt context.
+ */
+ disable_irq_nosync(sport->irq_tx);
+ tx_irq_enabled(sport) = 0;
+ }
+}
+
+/* serial core request to disable tx ASAP (used for flow control) */
+static void pic32_uart_stop_tx(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+
+ if (!(pic32_uart_readl(sport, PIC32_UART_MODE) & PIC32_UART_MODE_ON))
+ return;
+
+ if (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_UTXEN))
+ return;
+
+ /* wait for tx empty */
+ pic32_wait_deplete_txbuf(sport);
+
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+ PIC32_UART_STA_UTXEN);
+ pic32_uart_irqtxen(sport, 0);
+}
+
+/* serial core request to (re)enable tx */
+static void pic32_uart_start_tx(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+
+ pic32_uart_irqtxen(sport, 1);
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
+ PIC32_UART_STA_UTXEN);
+}
+
+/* serial core request to stop rx, called before port shutdown */
+static void pic32_uart_stop_rx(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+
+ /* disable rx interrupts */
+ disable_irq(sport->irq_rx);
+
+ /* receiver Enable bit OFF */
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+ PIC32_UART_STA_URXEN);
+}
+
+/* serial core request to start/stop emitting break char */
+static void pic32_uart_break_ctl(struct uart_port *port, int ctl)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (ctl)
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
+ PIC32_UART_STA_UTXBRK);
+ else
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+ PIC32_UART_STA_UTXBRK);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* get port type in string format */
+static const char *pic32_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_PIC32) ? PIC32_DEV_NAME : NULL;
+}
+
+/* read all chars in rx fifo and send them to core */
+static void pic32_uart_do_rx(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+ struct tty_port *tty;
+ unsigned int max_count;
+
+ /* limit number of char read in interrupt, should not be
+ * higher than fifo size anyway since we're much faster than
+ * serial port
+ */
+ max_count = PIC32_UART_RX_FIFO_DEPTH;
+
+ spin_lock(&port->lock);
+
+ tty = &port->state->port;
+
+ do {
+ u32 sta_reg, c;
+ char flag;
+
+ /* get overrun/fifo empty information from status register */
+ sta_reg = pic32_uart_readl(sport, PIC32_UART_STA);
+ if (unlikely(sta_reg & PIC32_UART_STA_OERR)) {
+
+ /* fifo reset is required to clear interrupt */
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+ PIC32_UART_STA_OERR);
+
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ }
+
+ /* Can at least one more character can be read? */
+ if (!(sta_reg & PIC32_UART_STA_URXDA))
+ break;
+
+ /* read the character and increment the rx counter */
+ c = pic32_uart_readl(sport, PIC32_UART_RX);
+
+ port->icount.rx++;
+ flag = TTY_NORMAL;
+ c &= 0xff;
+
+ if (unlikely((sta_reg & PIC32_UART_STA_PERR) ||
+ (sta_reg & PIC32_UART_STA_FERR))) {
+
+ /* do stats first */
+ if (sta_reg & PIC32_UART_STA_PERR)
+ port->icount.parity++;
+ if (sta_reg & PIC32_UART_STA_FERR)
+ port->icount.frame++;
+
+ /* update flag wrt read_status_mask */
+ sta_reg &= port->read_status_mask;
+
+ if (sta_reg & PIC32_UART_STA_FERR)
+ flag = TTY_FRAME;
+ if (sta_reg & PIC32_UART_STA_PERR)
+ flag = TTY_PARITY;
+ }
+
+ if (uart_handle_sysrq_char(port, c))
+ continue;
+
+ if ((sta_reg & port->ignore_status_mask) == 0)
+ tty_insert_flip_char(tty, c, flag);
+
+ } while (--max_count);
+
+ spin_unlock(&port->lock);
+
+ tty_flip_buffer_push(tty);
+}
+
+/* fill tx fifo with chars to send, stop when fifo is about to be full
+ * or when all chars have been sent.
+ */
+static void pic32_uart_do_tx(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int max_count = PIC32_UART_TX_FIFO_DEPTH;
+
+ if (port->x_char) {
+ pic32_uart_writel(sport, PIC32_UART_TX, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_tx_stopped(port)) {
+ pic32_uart_stop_tx(port);
+ return;
+ }
+
+ if (uart_circ_empty(xmit))
+ goto txq_empty;
+
+ /* keep stuffing chars into uart tx buffer
+ * 1) until uart fifo is full
+ * or
+ * 2) until the circ buffer is empty
+ * (all chars have been sent)
+ * or
+ * 3) until the max count is reached
+ * (prevents lingering here for too long in certain cases)
+ */
+ while (!(PIC32_UART_STA_UTXBF &
+ pic32_uart_readl(sport, PIC32_UART_STA))) {
+ unsigned int c = xmit->buf[xmit->tail];
+
+ pic32_uart_writel(sport, PIC32_UART_TX, c);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ if (--max_count == 0)
+ break;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ goto txq_empty;
+
+ return;
+
+txq_empty:
+ pic32_uart_irqtxen(sport, 0);
+}
+
+/* RX interrupt handler */
+static irqreturn_t pic32_uart_rx_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+
+ pic32_uart_do_rx(port);
+
+ return IRQ_HANDLED;
+}
+
+/* TX interrupt handler */
+static irqreturn_t pic32_uart_tx_interrupt(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ pic32_uart_do_tx(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/* FAULT interrupt handler */
+static irqreturn_t pic32_uart_fault_interrupt(int irq, void *dev_id)
+{
+ /* do nothing: pic32_uart_do_rx() handles faults. */
+ return IRQ_HANDLED;
+}
+
+/* enable rx & tx operation on uart */
+static void pic32_uart_en_and_unmask(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_STA),
+ PIC32_UART_STA_UTXEN | PIC32_UART_STA_URXEN);
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+ PIC32_UART_MODE_ON);
+}
+
+/* disable rx & tx operation on uart */
+static void pic32_uart_dsbl_and_mask(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+
+ /* wait for tx empty, otherwise chars will be lost or corrupted */
+ pic32_wait_deplete_txbuf(sport);
+
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+ PIC32_UART_STA_UTXEN | PIC32_UART_STA_URXEN);
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_ON);
+}
+
+/* serial core request to initialize uart and start rx operation */
+static int pic32_uart_startup(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+ u32 dflt_baud = (port->uartclk / PIC32_UART_DFLT_BRATE / 16) - 1;
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+
+ ret = pic32_enable_clock(sport);
+ if (ret) {
+ local_irq_restore(flags);
+ goto out_done;
+ }
+
+ /* clear status and mode registers */
+ pic32_uart_writel(sport, PIC32_UART_MODE, 0);
+ pic32_uart_writel(sport, PIC32_UART_STA, 0);
+
+ /* disable uart and mask all interrupts */
+ pic32_uart_dsbl_and_mask(port);
+
+ /* set default baud */
+ pic32_uart_writel(sport, PIC32_UART_BRG, dflt_baud);
+
+ local_irq_restore(flags);
+
+ /* Each UART of a PIC32 has three interrupts therefore,
+ * we setup driver to register the 3 irqs for the device.
+ *
+ * For each irq request_irq() is called with interrupt disabled.
+ * And the irq is enabled as soon as we are ready to handle them.
+ */
+ tx_irq_enabled(sport) = 0;
+
+ sport->irq_fault_name = kasprintf(GFP_KERNEL, "%s%d-fault",
+ pic32_uart_type(port),
+ sport->idx);
+ if (!sport->irq_fault_name) {
+ dev_err(port->dev, "%s: kasprintf err!", __func__);
+ ret = -ENOMEM;
+ goto out_done;
+ }
+ irq_set_status_flags(sport->irq_fault, IRQ_NOAUTOEN);
+ ret = request_irq(sport->irq_fault, pic32_uart_fault_interrupt,
+ sport->irqflags_fault, sport->irq_fault_name, port);
+ if (ret) {
+ dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
+ __func__, sport->irq_fault, ret,
+ pic32_uart_type(port));
+ goto out_f;
+ }
+
+ sport->irq_rx_name = kasprintf(GFP_KERNEL, "%s%d-rx",
+ pic32_uart_type(port),
+ sport->idx);
+ if (!sport->irq_rx_name) {
+ dev_err(port->dev, "%s: kasprintf err!", __func__);
+ kfree(sport->irq_fault_name);
+ ret = -ENOMEM;
+ goto out_f;
+ }
+ irq_set_status_flags(sport->irq_rx, IRQ_NOAUTOEN);
+ ret = request_irq(sport->irq_rx, pic32_uart_rx_interrupt,
+ sport->irqflags_rx, sport->irq_rx_name, port);
+ if (ret) {
+ dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
+ __func__, sport->irq_rx, ret,
+ pic32_uart_type(port));
+ goto out_r;
+ }
+
+ sport->irq_tx_name = kasprintf(GFP_KERNEL, "%s%d-tx",
+ pic32_uart_type(port),
+ sport->idx);
+ if (!sport->irq_tx_name) {
+ dev_err(port->dev, "%s: kasprintf err!", __func__);
+ ret = -ENOMEM;
+ goto out_r;
+ }
+ irq_set_status_flags(sport->irq_tx, IRQ_NOAUTOEN);
+ ret = request_irq(sport->irq_tx, pic32_uart_tx_interrupt,
+ sport->irqflags_tx, sport->irq_tx_name, port);
+ if (ret) {
+ dev_err(port->dev, "%s: request irq(%d) err! ret:%d name:%s\n",
+ __func__, sport->irq_tx, ret,
+ pic32_uart_type(port));
+ goto out_t;
+ }
+
+ local_irq_save(flags);
+
+ /* set rx interrupt on first receive */
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+ PIC32_UART_STA_URXISEL1 | PIC32_UART_STA_URXISEL0);
+
+ /* set interrupt on empty */
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_STA),
+ PIC32_UART_STA_UTXISEL1);
+
+ /* enable all interrupts and eanable uart */
+ pic32_uart_en_and_unmask(port);
+
+ enable_irq(sport->irq_rx);
+
+ return 0;
+
+out_t:
+ kfree(sport->irq_tx_name);
+ free_irq(sport->irq_tx, sport);
+out_r:
+ kfree(sport->irq_rx_name);
+ free_irq(sport->irq_rx, sport);
+out_f:
+ kfree(sport->irq_fault_name);
+ free_irq(sport->irq_fault, sport);
+out_done:
+ return ret;
+}
+
+/* serial core request to flush & disable uart */
+static void pic32_uart_shutdown(struct uart_port *port)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+ unsigned long flags;
+
+ /* disable uart */
+ spin_lock_irqsave(&port->lock, flags);
+ pic32_uart_dsbl_and_mask(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+ pic32_disable_clock(sport);
+
+ /* free all 3 interrupts for this UART */
+ free_irq(sport->irq_fault, port);
+ free_irq(sport->irq_tx, port);
+ free_irq(sport->irq_rx, port);
+}
+
+/* serial core request to change current uart setting */
+static void pic32_uart_set_termios(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+ unsigned int baud;
+ unsigned int quot;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* disable uart and mask all interrupts while changing speed */
+ pic32_uart_dsbl_and_mask(port);
+
+ /* stop bit options */
+ if (new->c_cflag & CSTOPB)
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+ PIC32_UART_MODE_STSEL);
+ else
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_STSEL);
+
+ /* parity options */
+ if (new->c_cflag & PARENB) {
+ if (new->c_cflag & PARODD) {
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+ PIC32_UART_MODE_PDSEL1);
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_PDSEL0);
+ } else {
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+ PIC32_UART_MODE_PDSEL0);
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_PDSEL1);
+ }
+ } else {
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_PDSEL1 |
+ PIC32_UART_MODE_PDSEL0);
+ }
+ /* if hw flow ctrl, then the pins must be specified in device tree */
+ if ((new->c_cflag & CRTSCTS) && sport->hw_flow_ctrl) {
+ /* enable hardware flow control */
+ pic32_uart_writel(sport, PIC32_SET(PIC32_UART_MODE),
+ PIC32_UART_MODE_UEN1);
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_UEN0);
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_RTSMD);
+ } else {
+ /* disable hardware flow control */
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_UEN1);
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_UEN0);
+ pic32_uart_writel(sport, PIC32_CLR(PIC32_UART_MODE),
+ PIC32_UART_MODE_RTSMD);
+ }
+
+ /* Always 8-bit */
+ new->c_cflag |= CS8;
+
+ /* Mark/Space parity is not supported */
+ new->c_cflag &= ~CMSPAR;
+
+ /* update baud */
+ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+ quot = uart_get_divisor(port, baud) - 1;
+ pic32_uart_writel(sport, PIC32_UART_BRG, quot);
+ uart_update_timeout(port, new->c_cflag, baud);
+
+ if (tty_termios_baud_rate(new))
+ tty_termios_encode_baud_rate(new, baud, baud);
+
+ /* enable uart */
+ pic32_uart_en_and_unmask(port);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/* serial core request to claim uart iomem */
+static int pic32_uart_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *res_mem;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!res_mem))
+ return -EINVAL;
+
+ if (!request_mem_region(port->mapbase, resource_size(res_mem),
+ "pic32_uart_mem"))
+ return -EBUSY;
+
+ port->membase = devm_ioremap_nocache(port->dev, port->mapbase,
+ resource_size(res_mem));
+ if (!port->membase) {
+ dev_err(port->dev, "Unable to map registers\n");
+ release_mem_region(port->mapbase, resource_size(res_mem));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* serial core request to release uart iomem */
+static void pic32_uart_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *res_mem;
+ unsigned int res_size;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!res_mem))
+ return;
+ res_size = resource_size(res_mem);
+
+ release_mem_region(port->mapbase, res_size);
+}
+
+/* serial core request to do any port required auto-configuration */
+static void pic32_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ if (pic32_uart_request_port(port))
+ return;
+ port->type = PORT_PIC32;
+ }
+}
+
+/* serial core request to check that port information in serinfo are suitable */
+static int pic32_uart_verify_port(struct uart_port *port,
+ struct serial_struct *serinfo)
+{
+ if (port->type != PORT_PIC32)
+ return -EINVAL;
+ if (port->irq != serinfo->irq)
+ return -EINVAL;
+ if (port->iotype != serinfo->io_type)
+ return -EINVAL;
+ if (port->mapbase != (unsigned long)serinfo->iomem_base)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* serial core callbacks */
+static const struct uart_ops pic32_uart_ops = {
+ .tx_empty = pic32_uart_tx_empty,
+ .get_mctrl = pic32_uart_get_mctrl,
+ .set_mctrl = pic32_uart_set_mctrl,
+ .start_tx = pic32_uart_start_tx,
+ .stop_tx = pic32_uart_stop_tx,
+ .stop_rx = pic32_uart_stop_rx,
+ .break_ctl = pic32_uart_break_ctl,
+ .startup = pic32_uart_startup,
+ .shutdown = pic32_uart_shutdown,
+ .set_termios = pic32_uart_set_termios,
+ .type = pic32_uart_type,
+ .release_port = pic32_uart_release_port,
+ .request_port = pic32_uart_request_port,
+ .config_port = pic32_uart_config_port,
+ .verify_port = pic32_uart_verify_port,
+};
+
+#ifdef CONFIG_SERIAL_PIC32_CONSOLE
+/* output given char */
+static void pic32_console_putchar(struct uart_port *port, int ch)
+{
+ struct pic32_sport *sport = to_pic32_sport(port);
+
+ if (!(pic32_uart_readl(sport, PIC32_UART_MODE) & PIC32_UART_MODE_ON))
+ return;
+
+ if (!(pic32_uart_readl(sport, PIC32_UART_STA) & PIC32_UART_STA_UTXEN))
+ return;
+
+ /* wait for tx empty */
+ pic32_wait_deplete_txbuf(sport);
+
+ pic32_uart_writel(sport, PIC32_UART_TX, ch & 0xff);
+}
+
+/* console core request to output given string */
+static void pic32_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct pic32_sport *sport = pic32_sports[co->index];
+ struct uart_port *port = pic32_get_port(sport);
+
+ /* call uart helper to deal with \r\n */
+ uart_console_write(port, s, count, pic32_console_putchar);
+}
+
+/* console core request to setup given console, find matching uart
+ * port and setup it.
+ */
+static int pic32_console_setup(struct console *co, char *options)
+{
+ struct pic32_sport *sport;
+ struct uart_port *port = NULL;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+ int ret = 0;
+
+ if (unlikely(co->index < 0 || co->index >= PIC32_MAX_UARTS))
+ return -ENODEV;
+
+ sport = pic32_sports[co->index];
+ if (!sport)
+ return -ENODEV;
+ port = pic32_get_port(sport);
+
+ ret = pic32_enable_clock(sport);
+ if (ret)
+ return ret;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver pic32_uart_driver;
+static struct console pic32_console = {
+ .name = PIC32_SDEV_NAME,
+ .write = pic32_console_write,
+ .device = uart_console_device,
+ .setup = pic32_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &pic32_uart_driver,
+};
+#define PIC32_SCONSOLE (&pic32_console)
+
+static int __init pic32_console_init(void)
+{
+ register_console(&pic32_console);
+ return 0;
+}
+console_initcall(pic32_console_init);
+
+static inline bool is_pic32_console_port(struct uart_port *port)
+{
+ return port->cons && port->cons->index == port->line;
+}
+
+/*
+ * Late console initialization.
+ */
+static int __init pic32_late_console_init(void)
+{
+ if (!(pic32_console.flags & CON_ENABLED))
+ register_console(&pic32_console);
+
+ return 0;
+}
+
+core_initcall(pic32_late_console_init);
+
+#else
+#define PIC32_SCONSOLE NULL
+#endif
+
+static struct uart_driver pic32_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = PIC32_DEV_NAME,
+ .dev_name = PIC32_SDEV_NAME,
+ .nr = PIC32_MAX_UARTS,
+ .cons = PIC32_SCONSOLE,
+};
+
+static int pic32_uart_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct pic32_sport *sport;
+ int uart_idx = 0;
+ struct resource *res_mem;
+ struct uart_port *port;
+ int ret;
+
+ uart_idx = of_alias_get_id(np, "serial");
+ if (uart_idx < 0 || uart_idx >= PIC32_MAX_UARTS)
+ return -EINVAL;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem)
+ return -EINVAL;
+
+ sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
+ if (!sport)
+ return -ENOMEM;
+
+ sport->idx = uart_idx;
+ sport->irq_fault = irq_of_parse_and_map(np, 0);
+ sport->irqflags_fault = IRQF_NO_THREAD;
+ sport->irq_rx = irq_of_parse_and_map(np, 1);
+ sport->irqflags_rx = IRQF_NO_THREAD;
+ sport->irq_tx = irq_of_parse_and_map(np, 2);
+ sport->irqflags_tx = IRQF_NO_THREAD;
+ sport->clk = devm_clk_get(&pdev->dev, NULL);
+ sport->cts_gpio = -EINVAL;
+ sport->dev = &pdev->dev;
+
+ /* Hardware flow control: gpios
+ * !Note: Basically, CTS is needed for reading the status.
+ */
+ sport->hw_flow_ctrl = false;
+ sport->cts_gpio = of_get_named_gpio(np, "cts-gpios", 0);
+ if (gpio_is_valid(sport->cts_gpio)) {
+ sport->hw_flow_ctrl = true;
+
+ ret = devm_gpio_request(sport->dev,
+ sport->cts_gpio, "CTS");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "error requesting CTS GPIO\n");
+ goto err;
+ }
+
+ ret = gpio_direction_input(sport->cts_gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "error setting CTS GPIO\n");
+ goto err;
+ }
+ }
+
+ pic32_sports[uart_idx] = sport;
+ port = &sport->port;
+ memset(port, 0, sizeof(*port));
+ port->iotype = UPIO_MEM;
+ port->mapbase = res_mem->start;
+ port->ops = &pic32_uart_ops;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->dev = &pdev->dev;
+ port->fifosize = PIC32_UART_TX_FIFO_DEPTH;
+ port->uartclk = clk_get_rate(sport->clk);
+ port->line = uart_idx;
+
+ ret = uart_add_one_port(&pic32_uart_driver, port);
+ if (ret) {
+ port->membase = NULL;
+ dev_err(port->dev, "%s: uart add port error!\n", __func__);
+ goto err;
+ }
+
+#ifdef CONFIG_SERIAL_PIC32_CONSOLE
+ if (is_pic32_console_port(port) &&
+ (pic32_console.flags & CON_ENABLED)) {
+ /* The peripheral clock has been enabled by console_setup,
+ * so disable it till the port is used.
+ */
+ pic32_disable_clock(sport);
+ }
+#endif
+
+ platform_set_drvdata(pdev, port);
+
+ dev_info(&pdev->dev, "%s: uart(%d) driver initialized.\n",
+ __func__, uart_idx);
+
+ return 0;
+err:
+ /* automatic unroll of sport and gpios */
+ return ret;
+}
+
+static int pic32_uart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct pic32_sport *sport = to_pic32_sport(port);
+
+ uart_remove_one_port(&pic32_uart_driver, port);
+ pic32_disable_clock(sport);
+ platform_set_drvdata(pdev, NULL);
+ pic32_sports[sport->idx] = NULL;
+
+ /* automatic unroll of sport and gpios */
+ return 0;
+}
+
+static const struct of_device_id pic32_serial_dt_ids[] = {
+ { .compatible = "microchip,pic32mzda-uart" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_serial_dt_ids);
+
+static struct platform_driver pic32_uart_platform_driver = {
+ .probe = pic32_uart_probe,
+ .remove = pic32_uart_remove,
+ .driver = {
+ .name = PIC32_DEV_NAME,
+ .of_match_table = of_match_ptr(pic32_serial_dt_ids),
+ },
+};
+
+static int __init pic32_uart_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&pic32_uart_driver);
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
+ pic32_uart_driver.driver_name, ret);
+ return ret;
+ }
+
+ ret = platform_driver_register(&pic32_uart_platform_driver);
+ if (ret) {
+ pr_err("fail to register pic32 uart\n");
+ uart_unregister_driver(&pic32_uart_driver);
+ }
+
+ return ret;
+}
+arch_initcall(pic32_uart_init);
+
+static void __exit pic32_uart_exit(void)
+{
+#ifdef CONFIG_SERIAL_PIC32_CONSOLE
+ unregister_console(&pic32_console);
+#endif
+ platform_driver_unregister(&pic32_uart_platform_driver);
+ uart_unregister_driver(&pic32_uart_driver);
+}
+module_exit(pic32_uart_exit);
+
+MODULE_AUTHOR("Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 integrated serial port driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/pic32_uart.h b/drivers/tty/serial/pic32_uart.h
new file mode 100644
index 000000000..ec379da55
--- /dev/null
+++ b/drivers/tty/serial/pic32_uart.h
@@ -0,0 +1,126 @@
+/*
+ * PIC32 Integrated Serial Driver.
+ *
+ * Copyright (C) 2015 Microchip Technology, Inc.
+ *
+ * Authors:
+ * Sorin-Andrei Pistirica <andrei.pistirica@microchip.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+#ifndef __DT_PIC32_UART_H__
+#define __DT_PIC32_UART_H__
+
+#define PIC32_UART_DFLT_BRATE (9600)
+#define PIC32_UART_TX_FIFO_DEPTH (8)
+#define PIC32_UART_RX_FIFO_DEPTH (8)
+
+#define PIC32_UART_MODE 0x00
+#define PIC32_UART_STA 0x10
+#define PIC32_UART_TX 0x20
+#define PIC32_UART_RX 0x30
+#define PIC32_UART_BRG 0x40
+
+struct pic32_console_opt {
+ int baud;
+ int parity;
+ int bits;
+ int flow;
+};
+
+/* struct pic32_sport - pic32 serial port descriptor
+ * @port: uart port descriptor
+ * @idx: port index
+ * @irq_fault: virtual fault interrupt number
+ * @irqflags_fault: flags related to fault irq
+ * @irq_fault_name: irq fault name
+ * @irq_rx: virtual rx interrupt number
+ * @irqflags_rx: flags related to rx irq
+ * @irq_rx_name: irq rx name
+ * @irq_tx: virtual tx interrupt number
+ * @irqflags_tx: : flags related to tx irq
+ * @irq_tx_name: irq tx name
+ * @cts_gpio: clear to send gpio
+ * @dev: device descriptor
+ **/
+struct pic32_sport {
+ struct uart_port port;
+ struct pic32_console_opt opt;
+ int idx;
+
+ int irq_fault;
+ int irqflags_fault;
+ const char *irq_fault_name;
+ int irq_rx;
+ int irqflags_rx;
+ const char *irq_rx_name;
+ int irq_tx;
+ int irqflags_tx;
+ const char *irq_tx_name;
+ u8 enable_tx_irq;
+
+ bool hw_flow_ctrl;
+ int cts_gpio;
+
+ int ref_clk;
+ struct clk *clk;
+
+ struct device *dev;
+};
+#define to_pic32_sport(c) container_of(c, struct pic32_sport, port)
+#define pic32_get_port(sport) (&sport->port)
+#define pic32_get_opt(sport) (&sport->opt)
+#define tx_irq_enabled(sport) (sport->enable_tx_irq)
+
+static inline void pic32_uart_writel(struct pic32_sport *sport,
+ u32 reg, u32 val)
+{
+ struct uart_port *port = pic32_get_port(sport);
+
+ __raw_writel(val, port->membase + reg);
+}
+
+static inline u32 pic32_uart_readl(struct pic32_sport *sport, u32 reg)
+{
+ struct uart_port *port = pic32_get_port(sport);
+
+ return __raw_readl(port->membase + reg);
+}
+
+/* pic32 uart mode register bits */
+#define PIC32_UART_MODE_ON BIT(15)
+#define PIC32_UART_MODE_FRZ BIT(14)
+#define PIC32_UART_MODE_SIDL BIT(13)
+#define PIC32_UART_MODE_IREN BIT(12)
+#define PIC32_UART_MODE_RTSMD BIT(11)
+#define PIC32_UART_MODE_RESV1 BIT(10)
+#define PIC32_UART_MODE_UEN1 BIT(9)
+#define PIC32_UART_MODE_UEN0 BIT(8)
+#define PIC32_UART_MODE_WAKE BIT(7)
+#define PIC32_UART_MODE_LPBK BIT(6)
+#define PIC32_UART_MODE_ABAUD BIT(5)
+#define PIC32_UART_MODE_RXINV BIT(4)
+#define PIC32_UART_MODE_BRGH BIT(3)
+#define PIC32_UART_MODE_PDSEL1 BIT(2)
+#define PIC32_UART_MODE_PDSEL0 BIT(1)
+#define PIC32_UART_MODE_STSEL BIT(0)
+
+/* pic32 uart status register bits */
+#define PIC32_UART_STA_UTXISEL1 BIT(15)
+#define PIC32_UART_STA_UTXISEL0 BIT(14)
+#define PIC32_UART_STA_UTXINV BIT(13)
+#define PIC32_UART_STA_URXEN BIT(12)
+#define PIC32_UART_STA_UTXBRK BIT(11)
+#define PIC32_UART_STA_UTXEN BIT(10)
+#define PIC32_UART_STA_UTXBF BIT(9)
+#define PIC32_UART_STA_TRMT BIT(8)
+#define PIC32_UART_STA_URXISEL1 BIT(7)
+#define PIC32_UART_STA_URXISEL0 BIT(6)
+#define PIC32_UART_STA_ADDEN BIT(5)
+#define PIC32_UART_STA_RIDLE BIT(4)
+#define PIC32_UART_STA_PERR BIT(3)
+#define PIC32_UART_STA_FERR BIT(2)
+#define PIC32_UART_STA_OERR BIT(1)
+#define PIC32_UART_STA_URXDA BIT(0)
+
+#endif /* __DT_PIC32_UART_H__ */
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 025a42644..f36e6df2f 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -17,7 +17,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -666,7 +666,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
struct uart_port *port = &s->p[portno].port;
do {
- unsigned int iir, msr, rxlen;
+ unsigned int iir, rxlen;
iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
if (iir & SC16IS7XX_IIR_NO_INT_BIT)
@@ -683,12 +683,6 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
if (rxlen)
sc16is7xx_handle_rx(port, rxlen, iir);
break;
-
- case SC16IS7XX_IIR_CTSRTS_SRC:
- msr = sc16is7xx_port_read(port, SC16IS7XX_MSR_REG);
- uart_handle_cts_change(port,
- !!(msr & SC16IS7XX_MSR_DCTS_BIT));
- break;
case SC16IS7XX_IIR_THRI_SRC:
sc16is7xx_handle_tx(port);
break;
@@ -1014,9 +1008,8 @@ static int sc16is7xx_startup(struct uart_port *port)
SC16IS7XX_EFCR_TXDISABLE_BIT,
0);
- /* Enable RX, TX, CTS change interrupts */
- val = SC16IS7XX_IER_RDI_BIT | SC16IS7XX_IER_THRI_BIT |
- SC16IS7XX_IER_CTSI_BIT;
+ /* Enable RX, TX interrupts */
+ val = SC16IS7XX_IER_RDI_BIT | SC16IS7XX_IER_THRI_BIT;
sc16is7xx_port_write(port, SC16IS7XX_IER_REG, val);
return 0;
@@ -1104,8 +1097,7 @@ static const struct uart_ops sc16is7xx_ops = {
static int sc16is7xx_gpio_get(struct gpio_chip *chip, unsigned offset)
{
unsigned int val;
- struct sc16is7xx_port *s = container_of(chip, struct sc16is7xx_port,
- gpio);
+ struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
val = sc16is7xx_port_read(port, SC16IS7XX_IOSTATE_REG);
@@ -1115,8 +1107,7 @@ static int sc16is7xx_gpio_get(struct gpio_chip *chip, unsigned offset)
static void sc16is7xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
{
- struct sc16is7xx_port *s = container_of(chip, struct sc16is7xx_port,
- gpio);
+ struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
@@ -1126,8 +1117,7 @@ static void sc16is7xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
static int sc16is7xx_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
- struct sc16is7xx_port *s = container_of(chip, struct sc16is7xx_port,
- gpio);
+ struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset), 0);
@@ -1138,8 +1128,7 @@ static int sc16is7xx_gpio_direction_input(struct gpio_chip *chip,
static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int val)
{
- struct sc16is7xx_port *s = container_of(chip, struct sc16is7xx_port,
- gpio);
+ struct sc16is7xx_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[0].port;
sc16is7xx_port_update(port, SC16IS7XX_IOSTATE_REG, BIT(offset),
@@ -1210,7 +1199,7 @@ static int sc16is7xx_probe(struct device *dev,
s->gpio.base = -1;
s->gpio.ngpio = devtype->nr_gpio;
s->gpio.can_sleep = 1;
- ret = gpiochip_add(&s->gpio);
+ ret = gpiochip_add_data(&s->gpio, s);
if (ret)
goto out_thread;
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 1d6fc60ed..1dba6719d 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -206,10 +206,8 @@ static void set_dtr(struct tegra_uart_port *tup, bool active)
static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
{
struct tegra_uart_port *tup = to_tegra_uport(u);
- unsigned long mcr;
int dtr_enable;
- mcr = tup->mcr_shadow;
tup->rts_active = !!(mctrl & TIOCM_RTS);
set_rts(tup, tup->rts_active);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index a126a603b..a333c59cb 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -64,6 +64,41 @@ static int uart_dcd_enabled(struct uart_port *uport)
return !!(uport->status & UPSTAT_DCD_ENABLE);
}
+static inline struct uart_port *uart_port_ref(struct uart_state *state)
+{
+ if (atomic_add_unless(&state->refcount, 1, 0))
+ return state->uart_port;
+ return NULL;
+}
+
+static inline void uart_port_deref(struct uart_port *uport)
+{
+ if (uport && atomic_dec_and_test(&uport->state->refcount))
+ wake_up(&uport->state->remove_wait);
+}
+
+#define uart_port_lock(state, flags) \
+ ({ \
+ struct uart_port *__uport = uart_port_ref(state); \
+ if (__uport) \
+ spin_lock_irqsave(&__uport->lock, flags); \
+ __uport; \
+ })
+
+#define uart_port_unlock(uport, flags) \
+ ({ \
+ struct uart_port *__uport = uport; \
+ if (__uport) \
+ spin_unlock_irqrestore(&__uport->lock, flags); \
+ uart_port_deref(__uport); \
+ })
+
+static inline struct uart_port *uart_port_check(struct uart_state *state)
+{
+ lockdep_assert_held(&state->port.mutex);
+ return state->uart_port;
+}
+
/*
* This routine is used by the interrupt handler to schedule processing in
* the software interrupt portion of the driver.
@@ -82,12 +117,13 @@ void uart_write_wakeup(struct uart_port *port)
static void uart_stop(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
+ struct uart_port *port;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
- port->ops->stop_tx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ port = uart_port_lock(state, flags);
+ if (port)
+ port->ops->stop_tx(port);
+ uart_port_unlock(port, flags);
}
static void __uart_start(struct tty_struct *tty)
@@ -95,19 +131,19 @@ static void __uart_start(struct tty_struct *tty)
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
- if (!uart_tx_stopped(port))
+ if (port && !uart_tx_stopped(port))
port->ops->start_tx(port);
}
static void uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
+ struct uart_port *port;
unsigned long flags;
- spin_lock_irqsave(&port->lock, flags);
+ port = uart_port_lock(state, flags);
__uart_start(tty);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock(port, flags);
}
static void
@@ -134,7 +170,7 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
int init_hw)
{
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport = uart_port_check(state);
unsigned long page;
int retval = 0;
@@ -196,7 +232,7 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
struct tty_port *port = &state->port;
int retval;
- if (port->flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(port))
return 0;
/*
@@ -207,7 +243,7 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
retval = uart_port_startup(tty, state, init_hw);
if (!retval) {
- set_bit(ASYNCB_INITIALIZED, &port->flags);
+ tty_port_set_initialized(port, 1);
clear_bit(TTY_IO_ERROR, &tty->flags);
} else if (retval > 0)
retval = 0;
@@ -219,10 +255,12 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state,
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on. Calls to
* uart_shutdown are serialised by the per-port semaphore.
+ *
+ * uport == NULL if uart_port has already been removed
*/
static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
{
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport = uart_port_check(state);
struct tty_port *port = &state->port;
/*
@@ -231,11 +269,13 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
- if (test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ if (tty_port_initialized(port)) {
+ tty_port_set_initialized(port, 0);
+
/*
* Turn off DTR and RTS early.
*/
- if (uart_console(uport) && tty)
+ if (uport && uart_console(uport) && tty)
uport->cons->cflag = tty->termios.c_cflag;
if (!tty || C_HUPCL(tty))
@@ -249,7 +289,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
* a DCD drop (hangup) at just the right time. Clear suspended bit so
* we don't try to resume a port that has been shutdown.
*/
- clear_bit(ASYNCB_SUSPENDED, &port->flags);
+ tty_port_set_suspended(port, 0);
/*
* Free the transmit buffer page.
@@ -441,7 +481,7 @@ EXPORT_SYMBOL(uart_get_divisor);
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios)
{
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport = uart_port_check(state);
struct ktermios *termios;
int hw_stopped;
@@ -486,7 +526,7 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
static int uart_put_char(struct tty_struct *tty, unsigned char c)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
+ struct uart_port *port;
struct circ_buf *circ;
unsigned long flags;
int ret = 0;
@@ -495,13 +535,13 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
if (!circ->buf)
return 0;
- spin_lock_irqsave(&port->lock, flags);
- if (uart_circ_chars_free(circ) != 0) {
+ port = uart_port_lock(state, flags);
+ if (port && uart_circ_chars_free(circ) != 0) {
circ->buf[circ->head] = c;
circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
ret = 1;
}
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock(port, flags);
return ret;
}
@@ -528,14 +568,12 @@ static int uart_write(struct tty_struct *tty,
return -EL3HLT;
}
- port = state->uart_port;
circ = &state->xmit;
-
if (!circ->buf)
return 0;
- spin_lock_irqsave(&port->lock, flags);
- while (1) {
+ port = uart_port_lock(state, flags);
+ while (port) {
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
if (count < c)
c = count;
@@ -549,32 +587,33 @@ static int uart_write(struct tty_struct *tty,
}
__uart_start(tty);
- spin_unlock_irqrestore(&port->lock, flags);
-
+ uart_port_unlock(port, flags);
return ret;
}
static int uart_write_room(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
+ struct uart_port *port;
unsigned long flags;
int ret;
- spin_lock_irqsave(&state->uart_port->lock, flags);
+ port = uart_port_lock(state, flags);
ret = uart_circ_chars_free(&state->xmit);
- spin_unlock_irqrestore(&state->uart_port->lock, flags);
+ uart_port_unlock(port, flags);
return ret;
}
static int uart_chars_in_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
+ struct uart_port *port;
unsigned long flags;
int ret;
- spin_lock_irqsave(&state->uart_port->lock, flags);
+ port = uart_port_lock(state, flags);
ret = uart_circ_chars_pending(&state->xmit);
- spin_unlock_irqrestore(&state->uart_port->lock, flags);
+ uart_port_unlock(port, flags);
return ret;
}
@@ -593,14 +632,15 @@ static void uart_flush_buffer(struct tty_struct *tty)
return;
}
- port = state->uart_port;
pr_debug("uart_flush_buffer(%d) called\n", tty->index);
- spin_lock_irqsave(&port->lock, flags);
+ port = uart_port_lock(state, flags);
+ if (!port)
+ return;
uart_circ_clear(&state->xmit);
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock(port, flags);
tty_wakeup(tty);
}
@@ -611,9 +651,13 @@ static void uart_flush_buffer(struct tty_struct *tty)
static void uart_send_xchar(struct tty_struct *tty, char ch)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
+ struct uart_port *port;
unsigned long flags;
+ port = uart_port_ref(state);
+ if (!port)
+ return;
+
if (port->ops->send_xchar)
port->ops->send_xchar(port, ch);
else {
@@ -623,14 +667,19 @@ static void uart_send_xchar(struct tty_struct *tty, char ch)
port->ops->start_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
+ uart_port_deref(port);
}
static void uart_throttle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
+ struct uart_port *port;
upstat_t mask = 0;
+ port = uart_port_ref(state);
+ if (!port)
+ return;
+
if (I_IXOFF(tty))
mask |= UPSTAT_AUTOXOFF;
if (C_CRTSCTS(tty))
@@ -646,14 +695,20 @@ static void uart_throttle(struct tty_struct *tty)
if (mask & UPSTAT_AUTOXOFF)
uart_send_xchar(tty, STOP_CHAR(tty));
+
+ uart_port_deref(port);
}
static void uart_unthrottle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
+ struct uart_port *port;
upstat_t mask = 0;
+ port = uart_port_ref(state);
+ if (!port)
+ return;
+
if (I_IXOFF(tty))
mask |= UPSTAT_AUTOXOFF;
if (C_CRTSCTS(tty))
@@ -669,12 +724,15 @@ static void uart_unthrottle(struct tty_struct *tty)
if (mask & UPSTAT_AUTOXOFF)
uart_send_xchar(tty, START_CHAR(tty));
+
+ uart_port_deref(port);
}
-static void uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
+static int uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
{
struct uart_state *state = container_of(port, struct uart_state, port);
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
+ int ret = -ENODEV;
memset(retinfo, 0, sizeof(*retinfo));
@@ -683,6 +741,10 @@ static void uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
* occur as we go
*/
mutex_lock(&port->mutex);
+ uport = uart_port_check(state);
+ if (!uport)
+ goto out;
+
retinfo->type = uport->type;
retinfo->line = uport->line;
retinfo->port = uport->iobase;
@@ -701,7 +763,11 @@ static void uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
retinfo->io_type = uport->iotype;
retinfo->iomem_reg_shift = uport->regshift;
retinfo->iomem_base = (void *)(unsigned long)uport->mapbase;
+
+ ret = 0;
+out:
mutex_unlock(&port->mutex);
+ return ret;
}
static int uart_get_info_user(struct tty_port *port,
@@ -709,7 +775,8 @@ static int uart_get_info_user(struct tty_port *port,
{
struct serial_struct tmp;
- uart_get_info(port, &tmp);
+ if (uart_get_info(port, &tmp) < 0)
+ return -EIO;
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
@@ -720,13 +787,16 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
struct uart_state *state,
struct serial_struct *new_info)
{
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport = uart_port_check(state);
unsigned long new_port;
unsigned int change_irq, change_port, closing_wait;
unsigned int old_custom_divisor, close_delay;
upf_t old_flags, new_flags;
int retval = 0;
+ if (!uport)
+ return -EIO;
+
new_port = new_info->port;
if (HIGH_BITS_OFFSET)
new_port += (unsigned long) new_info->port_high << HIGH_BITS_OFFSET;
@@ -886,7 +956,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
retval = 0;
if (uport->type == PORT_UNKNOWN)
goto exit;
- if (port->flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(port)) {
if (((old_flags ^ uport->flags) & UPF_SPD_MASK) ||
old_custom_divisor != uport->custom_divisor) {
/*
@@ -936,13 +1006,11 @@ static int uart_set_info_user(struct tty_struct *tty, struct uart_state *state,
* @tty: tty associated with the UART
* @state: UART being queried
* @value: returned modem value
- *
- * Note: uart_ioctl protects us against hangups.
*/
static int uart_get_lsr_info(struct tty_struct *tty,
struct uart_state *state, unsigned int __user *value)
{
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport = uart_port_check(state);
unsigned int result;
result = uport->ops->tx_empty(uport);
@@ -965,18 +1033,22 @@ static int uart_tiocmget(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
int result = -EIO;
mutex_lock(&port->mutex);
- if (!(tty->flags & (1 << TTY_IO_ERROR))) {
+ uport = uart_port_check(state);
+ if (!uport)
+ goto out;
+
+ if (!tty_io_error(tty)) {
result = uport->mctrl;
spin_lock_irq(&uport->lock);
result |= uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
}
+out:
mutex_unlock(&port->mutex);
-
return result;
}
@@ -984,15 +1056,20 @@ static int
uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
+ struct uart_port *uport;
int ret = -EIO;
mutex_lock(&port->mutex);
- if (!(tty->flags & (1 << TTY_IO_ERROR))) {
+ uport = uart_port_check(state);
+ if (!uport)
+ goto out;
+
+ if (!tty_io_error(tty)) {
uart_update_mctrl(uport, set, clear);
ret = 0;
}
+out:
mutex_unlock(&port->mutex);
return ret;
}
@@ -1001,21 +1078,26 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
+ int ret = -EIO;
mutex_lock(&port->mutex);
+ uport = uart_port_check(state);
+ if (!uport)
+ goto out;
if (uport->type != PORT_UNKNOWN)
uport->ops->break_ctl(uport, break_state);
-
+ ret = 0;
+out:
mutex_unlock(&port->mutex);
- return 0;
+ return ret;
}
static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
{
- struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
+ struct uart_port *uport;
int flags, ret;
if (!capable(CAP_SYS_ADMIN))
@@ -1029,6 +1111,12 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
if (mutex_lock_interruptible(&port->mutex))
return -ERESTARTSYS;
+ uport = uart_port_check(state);
+ if (!uport) {
+ ret = -EIO;
+ goto out;
+ }
+
ret = -EBUSY;
if (tty_port_users(port) == 1) {
uart_shutdown(tty, state);
@@ -1052,6 +1140,7 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
ret = uart_startup(tty, state, 1);
}
+out:
mutex_unlock(&port->mutex);
return ret;
}
@@ -1074,10 +1163,9 @@ static void uart_enable_ms(struct uart_port *uport)
* FIXME: This wants extracting into a common all driver implementation
* of TIOCMWAIT using tty_port.
*/
-static int
-uart_wait_modem_status(struct uart_state *state, unsigned long arg)
+static int uart_wait_modem_status(struct uart_state *state, unsigned long arg)
{
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
struct tty_port *port = &state->port;
DECLARE_WAITQUEUE(wait, current);
struct uart_icount cprev, cnow;
@@ -1086,6 +1174,9 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg)
/*
* note the counters on entry
*/
+ uport = uart_port_ref(state);
+ if (!uport)
+ return -EIO;
spin_lock_irq(&uport->lock);
memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
uart_enable_ms(uport);
@@ -1119,6 +1210,7 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg)
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&port->delta_msr_wait, &wait);
+ uart_port_deref(uport);
return ret;
}
@@ -1134,11 +1226,15 @@ static int uart_get_icount(struct tty_struct *tty,
{
struct uart_state *state = tty->driver_data;
struct uart_icount cnow;
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
+ uport = uart_port_ref(state);
+ if (!uport)
+ return -EIO;
spin_lock_irq(&uport->lock);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
spin_unlock_irq(&uport->lock);
+ uart_port_deref(uport);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
@@ -1200,11 +1296,11 @@ static int uart_set_rs485_config(struct uart_port *port,
* Called via sys_ioctl. We can use spin_lock_irq() here.
*/
static int
-uart_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
+uart_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
+ struct uart_port *uport;
void __user *uarg = (void __user *)arg;
int ret = -ENOIOCTLCMD;
@@ -1238,7 +1334,7 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd,
if (ret != -ENOIOCTLCMD)
goto out;
- if (tty->flags & (1 << TTY_IO_ERROR)) {
+ if (tty_io_error(tty)) {
ret = -EIO;
goto out;
}
@@ -1256,8 +1352,9 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd,
goto out;
mutex_lock(&port->mutex);
+ uport = uart_port_check(state);
- if (tty->flags & (1 << TTY_IO_ERROR)) {
+ if (!uport || tty_io_error(tty)) {
ret = -EIO;
goto out_up;
}
@@ -1273,19 +1370,17 @@ uart_ioctl(struct tty_struct *tty, unsigned int cmd,
break;
case TIOCGRS485:
- ret = uart_get_rs485_config(state->uart_port, uarg);
+ ret = uart_get_rs485_config(uport, uarg);
break;
case TIOCSRS485:
- ret = uart_set_rs485_config(state->uart_port, uarg);
+ ret = uart_set_rs485_config(uport, uarg);
break;
- default: {
- struct uart_port *uport = state->uart_port;
+ default:
if (uport->ops->ioctl)
ret = uport->ops->ioctl(uport, cmd, arg);
break;
}
- }
out_up:
mutex_unlock(&port->mutex);
out:
@@ -1295,24 +1390,29 @@ out:
static void uart_set_ldisc(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
- if (uport->ops->set_ldisc) {
- mutex_lock(&state->port.mutex);
+ mutex_lock(&state->port.mutex);
+ uport = uart_port_check(state);
+ if (uport && uport->ops->set_ldisc)
uport->ops->set_ldisc(uport, &tty->termios);
- mutex_unlock(&state->port.mutex);
- }
+ mutex_unlock(&state->port.mutex);
}
static void uart_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
unsigned int cflag = tty->termios.c_cflag;
unsigned int iflag_mask = IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK;
bool sw_changed = false;
+ mutex_lock(&state->port.mutex);
+ uport = uart_port_check(state);
+ if (!uport)
+ goto out;
+
/*
* Drivers doing software flow control also need to know
* about changes to these input settings.
@@ -1335,12 +1435,10 @@ static void uart_set_termios(struct tty_struct *tty,
tty->termios.c_ispeed == old_termios->c_ispeed &&
((tty->termios.c_iflag ^ old_termios->c_iflag) & iflag_mask) == 0 &&
!sw_changed) {
- return;
+ goto out;
}
- mutex_lock(&state->port.mutex);
uart_change_speed(tty, state, old_termios);
- mutex_unlock(&state->port.mutex);
/* reload cflag from termios; port driver may have overriden flags */
cflag = tty->termios.c_cflag;
@@ -1350,17 +1448,18 @@ static void uart_set_termios(struct tty_struct *tty,
/* Handle transition away from B0 status */
else if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
unsigned int mask = TIOCM_DTR;
- if (!(cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (!(cflag & CRTSCTS) || !tty_throttled(tty))
mask |= TIOCM_RTS;
uart_set_mctrl(uport, mask);
}
+out:
+ mutex_unlock(&state->port.mutex);
}
/*
* Calls to uart_close() are serialised via the tty_lock in
* drivers/tty/tty_io.c:tty_release()
* drivers/tty/tty_io.c:do_tty_hangup()
- * This runs from a workqueue and can sleep for a _short_ time only.
*/
static void uart_close(struct tty_struct *tty, struct file *filp)
{
@@ -1379,18 +1478,21 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
return;
}
- uport = state->uart_port;
port = &state->port;
pr_debug("uart_close(%d) called\n", tty->index);
- if (!port->count || tty_port_close_start(port, tty, filp) == 0)
+ if (tty_port_close_start(port, tty, filp) == 0)
return;
+ mutex_lock(&port->mutex);
+ uport = uart_port_check(state);
+
/*
* At this point, we stop accepting input. To do this, we
* disable the receive line status interrupts.
*/
- if (port->flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(port) &&
+ !WARN(!uport, "detached port still initialized!\n")) {
spin_lock_irq(&uport->lock);
uport->ops->stop_rx(uport);
spin_unlock_irq(&uport->lock);
@@ -1402,7 +1504,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
uart_wait_until_sent(tty, uport->timeout);
}
- mutex_lock(&port->mutex);
uart_shutdown(tty, state);
tty_port_tty_set(port, NULL);
@@ -1413,17 +1514,17 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
if (port->close_delay)
msleep_interruptible(jiffies_to_msecs(port->close_delay));
spin_lock_irq(&port->lock);
- } else if (!uart_console(uport)) {
+ } else if (uport && !uart_console(uport)) {
spin_unlock_irq(&port->lock);
uart_change_pm(state, UART_PM_STATE_OFF);
spin_lock_irq(&port->lock);
}
+ spin_unlock_irq(&port->lock);
+ tty_port_set_active(port, 0);
/*
* Wake up anyone trying to open this port.
*/
- clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
- spin_unlock_irq(&port->lock);
wake_up_interruptible(&port->open_wait);
mutex_unlock(&port->mutex);
@@ -1435,11 +1536,14 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct uart_state *state = tty->driver_data;
- struct uart_port *port = state->uart_port;
+ struct uart_port *port;
unsigned long char_time, expire;
- if (port->type == PORT_UNKNOWN || port->fifosize == 0)
+ port = uart_port_ref(state);
+ if (!port || port->type == PORT_UNKNOWN || port->fifosize == 0) {
+ uart_port_deref(port);
return;
+ }
/*
* Set the check interval to be 1/5 of the estimated time to
@@ -1485,6 +1589,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
if (time_after(jiffies, expire))
break;
}
+ uart_port_deref(port);
}
/*
@@ -1496,20 +1601,24 @@ static void uart_hangup(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
+ struct uart_port *uport;
unsigned long flags;
pr_debug("uart_hangup(%d)\n", tty->index);
mutex_lock(&port->mutex);
- if (port->flags & ASYNC_NORMAL_ACTIVE) {
+ uport = uart_port_check(state);
+ WARN(!uport, "hangup of detached port!\n");
+
+ if (tty_port_active(port)) {
uart_flush_buffer(tty);
uart_shutdown(tty, state);
spin_lock_irqsave(&port->lock, flags);
port->count = 0;
- clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
+ tty_port_set_active(port, 0);
tty_port_tty_set(port, NULL);
- if (!uart_console(state->uart_port))
+ if (uport && !uart_console(uport))
uart_change_pm(state, UART_PM_STATE_OFF);
wake_up_interruptible(&port->open_wait);
wake_up_interruptible(&port->delta_msr_wait);
@@ -1517,10 +1626,11 @@ static void uart_hangup(struct tty_struct *tty)
mutex_unlock(&port->mutex);
}
+/* uport == NULL if uart_port has already been removed */
static void uart_port_shutdown(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport = uart_port_check(state);
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free
@@ -1534,23 +1644,36 @@ static void uart_port_shutdown(struct tty_port *port)
/*
* Free the IRQ and disable the port.
*/
- uport->ops->shutdown(uport);
+ if (uport)
+ uport->ops->shutdown(uport);
/*
* Ensure that the IRQ handler isn't running on another CPU.
*/
- synchronize_irq(uport->irq);
+ if (uport)
+ synchronize_irq(uport->irq);
}
static int uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
int mctrl;
+
+ uport = uart_port_ref(state);
+ /*
+ * Should never observe uport == NULL since checks for hangup should
+ * abort the tty_port_block_til_ready() loop before checking for carrier
+ * raised -- but report carrier raised if it does anyway so open will
+ * continue and not sleep
+ */
+ if (WARN_ON(!uport))
+ return 1;
spin_lock_irq(&uport->lock);
uart_enable_ms(uport);
mctrl = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
+ uart_port_deref(uport);
if (mctrl & TIOCM_CAR)
return 1;
return 0;
@@ -1559,12 +1682,18 @@ static int uart_carrier_raised(struct tty_port *port)
static void uart_dtr_rts(struct tty_port *port, int onoff)
{
struct uart_state *state = container_of(port, struct uart_state, port);
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
+
+ uport = uart_port_ref(state);
+ if (!uport)
+ return;
if (onoff)
uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
else
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+
+ uart_port_deref(uport);
}
/*
@@ -1583,6 +1712,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
int retval, line = tty->index;
struct uart_state *state = drv->state + line;
struct tty_port *port = &state->port;
+ struct uart_port *uport;
pr_debug("uart_open(%d) called\n", line);
@@ -1602,15 +1732,15 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
goto end;
}
- if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
+ uport = uart_port_check(state);
+ if (!uport || uport->flags & UPF_DEAD) {
retval = -ENXIO;
goto err_unlock;
}
tty->driver_data = state;
- state->uart_port->state = state;
- state->port.low_latency =
- (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
+ uport->state = state;
+ port->low_latency = (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
tty_port_tty_set(port, tty);
/*
@@ -1649,13 +1779,15 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
enum uart_pm_state pm_state;
- struct uart_port *uport = state->uart_port;
+ struct uart_port *uport;
char stat_buf[32];
unsigned int status;
int mmio;
+ mutex_lock(&port->mutex);
+ uport = uart_port_check(state);
if (!uport)
- return;
+ goto out;
mmio = uport->iotype >= UPIO_MEM;
seq_printf(m, "%d: uart:%s %s%08llX irq:%d",
@@ -1667,11 +1799,10 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
if (uport->type == PORT_UNKNOWN) {
seq_putc(m, '\n');
- return;
+ goto out;
}
if (capable(CAP_SYS_ADMIN)) {
- mutex_lock(&port->mutex);
pm_state = state->pm_state;
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, UART_PM_STATE_ON);
@@ -1680,7 +1811,6 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
spin_unlock_irq(&uport->lock);
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, pm_state);
- mutex_unlock(&port->mutex);
seq_printf(m, " tx:%d rx:%d",
uport->icount.tx, uport->icount.rx);
@@ -1718,6 +1848,8 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
seq_putc(m, '\n');
#undef STATBIT
#undef INFOBIT
+out:
+ mutex_unlock(&port->mutex);
}
static int uart_proc_show(struct seq_file *m, void *v)
@@ -1954,10 +2086,10 @@ EXPORT_SYMBOL_GPL(uart_set_options);
static void uart_change_pm(struct uart_state *state,
enum uart_pm_state pm_state)
{
- struct uart_port *port = state->uart_port;
+ struct uart_port *port = uart_port_check(state);
if (state->pm_state != pm_state) {
- if (port->ops->pm)
+ if (port && port->ops->pm)
port->ops->pm(port, pm_state, state->pm_state);
state->pm_state = pm_state;
}
@@ -2003,12 +2135,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
uport->suspended = 1;
- if (port->flags & ASYNC_INITIALIZED) {
+ if (tty_port_initialized(port)) {
const struct uart_ops *ops = uport->ops;
int tries;
- set_bit(ASYNCB_SUSPENDED, &port->flags);
- clear_bit(ASYNCB_INITIALIZED, &port->flags);
+ tty_port_set_suspended(port, 1);
+ tty_port_set_initialized(port, 0);
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
@@ -2088,7 +2220,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
console_start(uport->cons);
}
- if (port->flags & ASYNC_SUSPENDED) {
+ if (tty_port_suspended(port)) {
const struct uart_ops *ops = uport->ops;
int ret;
@@ -2107,7 +2239,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
ops->set_mctrl(uport, uport->mctrl);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
- set_bit(ASYNCB_INITIALIZED, &port->flags);
+ tty_port_set_initialized(port, 1);
} else {
/*
* Failed to resume - maybe hardware went away?
@@ -2118,7 +2250,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
}
}
- clear_bit(ASYNCB_SUSPENDED, &port->flags);
+ tty_port_set_suspended(port, 0);
}
mutex_unlock(&port->mutex);
@@ -2228,42 +2360,42 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
+ struct tty_port *tport;
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
- int ret;
+ int ret = 0;
- if (!state || !state->uart_port)
+ if (!state)
return -1;
- port = state->uart_port;
- if (!(port->ops->poll_get_char && port->ops->poll_put_char))
- return -1;
+ tport = &state->port;
+ mutex_lock(&tport->mutex);
- if (port->ops->poll_init) {
- struct tty_port *tport = &state->port;
+ port = uart_port_check(state);
+ if (!port || !(port->ops->poll_get_char && port->ops->poll_put_char)) {
+ ret = -1;
+ goto out;
+ }
- ret = 0;
- mutex_lock(&tport->mutex);
+ if (port->ops->poll_init) {
/*
- * We don't set ASYNCB_INITIALIZED as we only initialized the
- * hw, e.g. state->xmit is still uninitialized.
+ * We don't set initialized as we only initialized the hw,
+ * e.g. state->xmit is still uninitialized.
*/
- if (!test_bit(ASYNCB_INITIALIZED, &tport->flags))
+ if (!tty_port_initialized(tport))
ret = port->ops->poll_init(port);
- mutex_unlock(&tport->mutex);
- if (ret)
- return ret;
}
- if (options) {
+ if (!ret && options) {
uart_parse_options(options, &baud, &parity, &bits, &flow);
- return uart_set_options(port, NULL, baud, parity, bits, flow);
+ ret = uart_set_options(port, NULL, baud, parity, bits, flow);
}
-
- return 0;
+out:
+ mutex_unlock(&tport->mutex);
+ return ret;
}
static int uart_poll_get_char(struct tty_driver *driver, int line)
@@ -2271,12 +2403,15 @@ static int uart_poll_get_char(struct tty_driver *driver, int line)
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
+ int ret = -1;
- if (!state || !state->uart_port)
- return -1;
-
- port = state->uart_port;
- return port->ops->poll_get_char(port);
+ if (state) {
+ port = uart_port_ref(state);
+ if (port)
+ ret = port->ops->poll_get_char(port);
+ uart_port_deref(port);
+ }
+ return ret;
}
static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
@@ -2285,14 +2420,17 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
struct uart_state *state = drv->state + line;
struct uart_port *port;
- if (!state || !state->uart_port)
+ if (!state)
return;
- port = state->uart_port;
+ port = uart_port_ref(state);
+ if (!port)
+ return;
if (ch == '\n')
port->ops->poll_put_char(port, '\r');
port->ops->poll_put_char(port, ch);
+ uart_port_deref(port);
}
#endif
@@ -2639,6 +2777,8 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
}
/* Link the port to the driver state table and vice versa */
+ atomic_set(&state->refcount, 1);
+ init_waitqueue_head(&state->remove_wait);
state->uart_port = uport;
uport->state = state;
@@ -2711,15 +2851,12 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
+ struct uart_port *uart_port;
struct tty_struct *tty;
int ret = 0;
BUG_ON(in_interrupt());
- if (state->uart_port != uport)
- dev_alert(uport->dev, "Removing wrong port: %p != %p\n",
- state->uart_port, uport);
-
mutex_lock(&port_mutex);
/*
@@ -2727,7 +2864,12 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
* succeeding while we shut down the port.
*/
mutex_lock(&port->mutex);
- if (!state->uart_port) {
+ uart_port = uart_port_check(state);
+ if (uart_port != uport)
+ dev_alert(uport->dev, "Removing wrong port: %p != %p\n",
+ uart_port, uport);
+
+ if (!uart_port) {
mutex_unlock(&port->mutex);
ret = -EINVAL;
goto out;
@@ -2764,7 +2906,11 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
*/
uport->type = PORT_UNKNOWN;
+ mutex_lock(&port->mutex);
+ WARN_ON(atomic_dec_return(&state->refcount) < 0);
+ wait_event(state->remove_wait, !atomic_read(&state->refcount));
state->uart_port = NULL;
+ mutex_unlock(&port->mutex);
out:
mutex_unlock(&port_mutex);
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 02147361e..e8dd5097d 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -43,8 +43,6 @@ static const struct {
{ "rng", TIOCM_RNG, false, },
{ "rts", TIOCM_RTS, true, },
{ "dtr", TIOCM_DTR, true, },
- { "out1", TIOCM_OUT1, true, },
- { "out2", TIOCM_OUT2, true, },
};
void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
@@ -125,9 +123,12 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
struct uart_port *port = gpios->port;
u32 mctrl = gpios->mctrl_prev;
u32 mctrl_diff;
+ unsigned long flags;
mctrl_gpio_get(gpios, &mctrl);
+ spin_lock_irqsave(&port->lock, flags);
+
mctrl_diff = mctrl ^ gpios->mctrl_prev;
gpios->mctrl_prev = mctrl;
@@ -147,6 +148,8 @@ static irqreturn_t mctrl_gpio_irq_handle(int irq, void *context)
wake_up_interruptible(&port->state->port.delta_msr_wait);
}
+ spin_unlock_irqrestore(&port->lock, flags);
+
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h
index 9716db283..332a33ab0 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.h
+++ b/drivers/tty/serial/serial_mctrl_gpio.h
@@ -32,8 +32,6 @@ enum mctrl_gpio_idx {
UART_GPIO_RI = UART_GPIO_RNG,
UART_GPIO_RTS,
UART_GPIO_DTR,
- UART_GPIO_OUT1,
- UART_GPIO_OUT2,
UART_GPIO_MAX,
};
@@ -62,7 +60,7 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx);
/*
- * Request and set direction of modem control lines GPIOs and sets up irq
+ * Request and set direction of modem control line GPIOs and set up irq
* handling.
* devm_* functions are used, so there's no need to call mctrl_gpio_free().
* Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
@@ -71,7 +69,7 @@ struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx);
/*
- * Request and set direction of modem control lines GPIOs.
+ * Request and set direction of modem control line GPIOs.
* devm_* functions are used, so there's no need to call mctrl_gpio_free().
* Returns a pointer to the allocated mctrl structure if ok, -ENOMEM on
* allocation error.
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index c6657de78..b186c9c4f 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -1264,6 +1264,7 @@ MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
static int sirfsoc_uart_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct sirfsoc_uart_port *sirfport;
struct uart_port *port;
struct resource *res;
@@ -1276,13 +1277,13 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
};
const struct of_device_id *match;
- match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
+ match = of_match_node(sirfsoc_uart_ids, np);
sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL);
if (!sirfport) {
ret = -ENOMEM;
goto err;
}
- sirfport->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
+ sirfport->port.line = of_alias_get_id(np, "serial");
sirf_ports[sirfport->port.line] = sirfport;
sirfport->port.iotype = UPIO_MEM;
sirfport->port.flags = UPF_BOOT_AUTOCONF;
@@ -1291,25 +1292,25 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
port->private_data = sirfport;
sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
- sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
- "sirf,uart-has-rtscts");
- if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart") ||
- of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart"))
+ sirfport->hw_flow_ctrl =
+ of_property_read_bool(np, "uart-has-rtscts") ||
+ of_property_read_bool(np, "sirf,uart-has-rtscts") /* deprecated */;
+ if (of_device_is_compatible(np, "sirf,prima2-uart") ||
+ of_device_is_compatible(np, "sirf,atlas7-uart"))
sirfport->uart_reg->uart_type = SIRF_REAL_UART;
- if (of_device_is_compatible(pdev->dev.of_node,
- "sirf,prima2-usp-uart") || of_device_is_compatible(
- pdev->dev.of_node, "sirf,atlas7-usp-uart")) {
+ if (of_device_is_compatible(np, "sirf,prima2-usp-uart") ||
+ of_device_is_compatible(np, "sirf,atlas7-usp-uart")) {
sirfport->uart_reg->uart_type = SIRF_USP_UART;
if (!sirfport->hw_flow_ctrl)
goto usp_no_flow_control;
- if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
- sirfport->cts_gpio = of_get_named_gpio(
- pdev->dev.of_node, "cts-gpios", 0);
+ if (of_find_property(np, "cts-gpios", NULL))
+ sirfport->cts_gpio =
+ of_get_named_gpio(np, "cts-gpios", 0);
else
sirfport->cts_gpio = -1;
- if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
- sirfport->rts_gpio = of_get_named_gpio(
- pdev->dev.of_node, "rts-gpios", 0);
+ if (of_find_property(np, "rts-gpios", NULL))
+ sirfport->rts_gpio =
+ of_get_named_gpio(np, "rts-gpios", 0);
else
sirfport->rts_gpio = -1;
@@ -1336,13 +1337,11 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
gpio_direction_output(sirfport->rts_gpio, 1);
}
usp_no_flow_control:
- if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart") ||
- of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-usp-uart"))
+ if (of_device_is_compatible(np, "sirf,atlas7-uart") ||
+ of_device_is_compatible(np, "sirf,atlas7-usp-uart"))
sirfport->is_atlas7 = true;
- if (of_property_read_u32(pdev->dev.of_node,
- "fifosize",
- &port->fifosize)) {
+ if (of_property_read_u32(np, "fifosize", &port->fifosize)) {
dev_err(&pdev->dev,
"Unable to find fifosize in uart node.\n");
ret = -EFAULT;
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 18971063f..699447aa8 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -654,7 +654,7 @@ static int sprd_probe_dt_alias(int index, struct device *dev)
return ret;
ret = of_alias_get_id(np, "serial");
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
ret = index;
else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) {
dev_warn(dev, "requested serial port %d not available.\n", ret);
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index d08baa668..05089b6c2 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
iowrite32be(val, addr);
}
-static struct uartlite_reg_ops uartlite_be = {
+static const struct uartlite_reg_ops uartlite_be = {
.in = uartlite_inbe32,
.out = uartlite_outbe32,
};
@@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
iowrite32(val, addr);
}
-static struct uartlite_reg_ops uartlite_le = {
+static const struct uartlite_reg_ops uartlite_le = {
.in = uartlite_inle32,
.out = uartlite_outle32,
};
static inline u32 uart_in32(u32 offset, struct uart_port *port)
{
- struct uartlite_reg_ops *reg_ops = port->private_data;
+ const struct uartlite_reg_ops *reg_ops = port->private_data;
return reg_ops->in(port->membase + offset);
}
static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
{
- struct uartlite_reg_ops *reg_ops = port->private_data;
+ const struct uartlite_reg_ops *reg_ops = port->private_data;
reg_ops->out(val, port->membase + offset);
}
@@ -345,13 +345,13 @@ static int ulite_request_port(struct uart_port *port)
return -EBUSY;
}
- port->private_data = &uartlite_be;
+ port->private_data = (void *)&uartlite_be;
ret = uart_in32(ULITE_CONTROL, port);
uart_out32(ULITE_CONTROL_RST_TX, ULITE_CONTROL, port);
ret = uart_in32(ULITE_STATUS, port);
/* Endianess detection */
if ((ret & ULITE_STATUS_TXEMPTY) != ULITE_STATUS_TXEMPTY)
- port->private_data = &uartlite_le;
+ port->private_data = (void *)&uartlite_le;
return 0;
}
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index f5476e270..c13e27ecb 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -1340,7 +1340,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
- if ( (info->port.flags & ASYNC_CHECK_CD) &&
+ if (tty_port_check_carrier(&info->port) &&
(status & MISCSTATUS_DCD_LATCHED) ) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s CD now %s...", info->device_name,
@@ -1361,8 +1361,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
if (status & MISCSTATUS_CTS) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx start...");
- if (info->port.tty)
- info->port.tty->hw_stopped = 0;
+ info->port.tty->hw_stopped = 0;
usc_start_transmitter(info);
info->pending_bh |= BH_TRANSMIT;
return;
@@ -1749,13 +1748,13 @@ static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
static int startup(struct mgsl_struct * info)
{
int retval = 0;
-
+
if ( debug_level >= DEBUG_LEVEL_INFO )
printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
-
- if (info->port.flags & ASYNC_INITIALIZED)
+
+ if (tty_port_initialized(&info->port))
return 0;
-
+
if (!info->xmit_buf) {
/* allocate a page of memory for a transmit buffer */
info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
@@ -1788,14 +1787,13 @@ static int startup(struct mgsl_struct * info)
/* program hardware for current parameters */
mgsl_change_params(info);
-
+
if (info->port.tty)
clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->port.flags |= ASYNC_INITIALIZED;
-
+ tty_port_set_initialized(&info->port, 1);
+
return 0;
-
} /* end of startup() */
/* shutdown()
@@ -1808,8 +1806,8 @@ static int startup(struct mgsl_struct * info)
static void shutdown(struct mgsl_struct * info)
{
unsigned long flags;
-
- if (!(info->port.flags & ASYNC_INITIALIZED))
+
+ if (!tty_port_initialized(&info->port))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -1853,13 +1851,12 @@ static void shutdown(struct mgsl_struct * info)
spin_unlock_irqrestore(&info->irq_spinlock,flags);
- mgsl_release_resources(info);
-
+ mgsl_release_resources(info);
+
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->port.flags &= ~ASYNC_INITIALIZED;
-
+ tty_port_set_initialized(&info->port, 0);
} /* end of shutdown() */
static void mgsl_program_hw(struct mgsl_struct *info)
@@ -1966,15 +1963,8 @@ static void mgsl_change_params(struct mgsl_struct *info)
}
info->timeout += HZ/50; /* Add .02 seconds of slop */
- if (cflag & CRTSCTS)
- info->port.flags |= ASYNC_CTS_FLOW;
- else
- info->port.flags &= ~ASYNC_CTS_FLOW;
-
- if (cflag & CLOCAL)
- info->port.flags &= ~ASYNC_CHECK_CD;
- else
- info->port.flags |= ASYNC_CHECK_CD;
+ tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
+ tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
/* process tty input control flags */
@@ -2972,7 +2962,7 @@ static int mgsl_ioctl(struct tty_struct *tty,
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCMIWAIT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
}
@@ -3049,7 +3039,7 @@ static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termio
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->serial_signals |= SerialSignal_DTR;
- if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (!C_CRTSCTS(tty) || !tty_throttled(tty))
info->serial_signals |= SerialSignal_RTS;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_set_serial_signals(info);
@@ -3091,7 +3081,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
goto cleanup;
mutex_lock(&info->port.mutex);
- if (info->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&info->port))
mgsl_wait_until_sent(tty, info->timeout);
mgsl_flush_buffer(tty);
tty_ldisc_flush(tty);
@@ -3129,15 +3119,15 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
__FILE__,__LINE__, info->device_name );
-
+
if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
return;
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
goto exit;
-
+
orig_jiffies = jiffies;
-
+
/* Set check interval to 1/5 of estimated time to
* send a character, and make it at least 1. The check
* interval should also be less than the timeout.
@@ -3204,7 +3194,7 @@ static void mgsl_hangup(struct tty_struct *tty)
shutdown(info);
info->port.count = 0;
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(&info->port, 0);
info->port.tty = NULL;
wake_up_interruptible(&info->port.open_wait);
@@ -3270,9 +3260,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
printk("%s(%d):block_til_ready on %s\n",
__FILE__,__LINE__, tty->driver->name );
- if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
+ if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
/* nonblock mode is set or port is not enabled */
- port->flags |= ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(port, 1);
return 0;
}
@@ -3297,14 +3287,14 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
port->count--;
spin_unlock_irqrestore(&info->irq_spinlock, flags);
port->blocked_open++;
-
+
while (1) {
- if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags))
+ if (C_BAUD(tty) && tty_port_initialized(port))
tty_port_raise_dtr_rts(port);
-
+
set_current_state(TASK_INTERRUPTIBLE);
-
- if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
+
+ if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
retval = (port->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS;
break;
@@ -3341,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
__FILE__,__LINE__, tty->driver->name, port->count );
if (!retval)
- port->flags |= ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(port, 1);
return retval;
@@ -7708,7 +7698,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
dev_kfree_skb(skb);
/* save start time for transmit timeout detection */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* start hardware transmitter if necessary */
spin_lock_irqsave(&info->irq_spinlock,flags);
@@ -7764,7 +7754,7 @@ static int hdlcdev_open(struct net_device *dev)
mgsl_program_hw(info);
/* enable network layer transmit */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_start_queue(dev);
/* inform generic HDLC layer of current DCD status */
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index c0a2f5a1b..7aca2d467 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -726,7 +726,7 @@ static void close(struct tty_struct *tty, struct file *filp)
goto cleanup;
mutex_lock(&info->port.mutex);
- if (info->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&info->port))
wait_until_sent(tty, info->timeout);
flush_buffer(tty);
tty_ldisc_flush(tty);
@@ -756,9 +756,9 @@ static void hangup(struct tty_struct *tty)
spin_lock_irqsave(&info->port.lock, flags);
info->port.count = 0;
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
info->port.tty = NULL;
spin_unlock_irqrestore(&info->port.lock, flags);
+ tty_port_set_active(&info->port, 0);
mutex_unlock(&info->port.mutex);
wake_up_interruptible(&info->port.open_wait);
@@ -784,7 +784,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->signals |= SerialSignal_DTR;
- if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (!C_CRTSCTS(tty) || !tty_throttled(tty))
info->signals |= SerialSignal_RTS;
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
@@ -893,7 +893,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
if (sanity_check(info, tty->name, "wait_until_sent"))
return;
DBGINFO(("%s wait_until_sent entry\n", info->device_name));
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
goto exit;
orig_jiffies = jiffies;
@@ -1032,7 +1032,7 @@ static int ioctl(struct tty_struct *tty,
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCMIWAIT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
}
@@ -1493,7 +1493,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
dev->stats.tx_bytes += skb->len;
/* save start time for transmit timeout detection */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_lock_irqsave(&info->lock, flags);
tx_load(info, skb->data, skb->len);
@@ -1552,7 +1552,7 @@ static int hdlcdev_open(struct net_device *dev)
program_hw(info);
/* enable network layer transmit */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_start_queue(dev);
/* inform generic HDLC layer of current DCD status */
@@ -2080,7 +2080,7 @@ static void dcd_change(struct slgt_info *info, unsigned short status)
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
- if (info->port.flags & ASYNC_CHECK_CD) {
+ if (tty_port_check_carrier(&info->port)) {
if (info->signals & SerialSignal_DCD)
wake_up_interruptible(&info->port.open_wait);
else {
@@ -2421,7 +2421,7 @@ static int startup(struct slgt_info *info)
{
DBGINFO(("%s startup\n", info->device_name));
- if (info->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&info->port))
return 0;
if (!info->tx_buf) {
@@ -2442,7 +2442,7 @@ static int startup(struct slgt_info *info)
if (info->port.tty)
clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->port.flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 1);
return 0;
}
@@ -2454,7 +2454,7 @@ static void shutdown(struct slgt_info *info)
{
unsigned long flags;
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
return;
DBGINFO(("%s shutdown\n", info->device_name));
@@ -2489,7 +2489,7 @@ static void shutdown(struct slgt_info *info)
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->port.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 0);
}
static void program_hw(struct slgt_info *info)
@@ -2576,15 +2576,8 @@ static void change_params(struct slgt_info *info)
}
info->timeout += HZ/50; /* Add .02 seconds of slop */
- if (cflag & CRTSCTS)
- info->port.flags |= ASYNC_CTS_FLOW;
- else
- info->port.flags &= ~ASYNC_CTS_FLOW;
-
- if (cflag & CLOCAL)
- info->port.flags &= ~ASYNC_CHECK_CD;
- else
- info->port.flags |= ASYNC_CHECK_CD;
+ tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
+ tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
/* process tty input control flags */
@@ -3269,9 +3262,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
DBGINFO(("%s block_til_ready\n", tty->driver->name));
- if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
+ if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
/* nonblock mode is set or port is not enabled */
- port->flags |= ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(port, 1);
return 0;
}
@@ -3294,12 +3287,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
port->blocked_open++;
while (1) {
- if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags))
+ if (C_BAUD(tty) && tty_port_initialized(port))
tty_port_raise_dtr_rts(port);
set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
+ if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
retval = (port->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS;
break;
@@ -3328,7 +3321,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
port->blocked_open--;
if (!retval)
- port->flags |= ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(port, 1);
DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
return retval;
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 90da0c712..dec156586 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -812,7 +812,7 @@ static void close(struct tty_struct *tty, struct file *filp)
goto cleanup;
mutex_lock(&info->port.mutex);
- if (info->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&info->port))
wait_until_sent(tty, info->timeout);
flush_buffer(tty);
@@ -849,9 +849,9 @@ static void hangup(struct tty_struct *tty)
spin_lock_irqsave(&info->port.lock, flags);
info->port.count = 0;
- info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
info->port.tty = NULL;
spin_unlock_irqrestore(&info->port.lock, flags);
+ tty_port_set_active(&info->port, 1);
mutex_unlock(&info->port.mutex);
wake_up_interruptible(&info->port.open_wait);
@@ -881,7 +881,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
info->serial_signals |= SerialSignal_DTR;
- if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
+ if (!C_CRTSCTS(tty) || !tty_throttled(tty))
info->serial_signals |= SerialSignal_RTS;
spin_lock_irqsave(&info->lock,flags);
set_signals(info);
@@ -1061,7 +1061,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
if (sanity_check(info, tty->name, "wait_until_sent"))
return;
- if (!test_bit(ASYNCB_INITIALIZED, &info->port.flags))
+ if (!tty_port_initialized(&info->port))
goto exit;
orig_jiffies = jiffies;
@@ -1261,7 +1261,7 @@ static int ioctl(struct tty_struct *tty,
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCMIWAIT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
+ if (tty_io_error(tty))
return -EIO;
}
@@ -1612,7 +1612,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
dev_kfree_skb(skb);
/* save start time for transmit timeout detection */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* start hardware transmitter if necessary */
spin_lock_irqsave(&info->lock,flags);
@@ -1668,7 +1668,7 @@ static int hdlcdev_open(struct net_device *dev)
program_hw(info);
/* enable network layer transmit */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_start_queue(dev);
/* inform generic HDLC layer of current DCD status */
@@ -2463,7 +2463,7 @@ static void isr_io_pin( SLMP_INFO *info, u16 status )
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
- if ( (info->port.flags & ASYNC_CHECK_CD) &&
+ if (tty_port_check_carrier(&info->port) &&
(status & MISCSTATUS_DCD_LATCHED) ) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s CD now %s...", info->device_name,
@@ -2636,7 +2636,7 @@ static int startup(SLMP_INFO * info)
if ( debug_level >= DEBUG_LEVEL_INFO )
printk("%s(%d):%s tx_releaseup()\n",__FILE__,__LINE__,info->device_name);
- if (info->port.flags & ASYNC_INITIALIZED)
+ if (tty_port_initialized(&info->port))
return 0;
if (!info->tx_buf) {
@@ -2662,7 +2662,7 @@ static int startup(SLMP_INFO * info)
if (info->port.tty)
clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->port.flags |= ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 1);
return 0;
}
@@ -2673,7 +2673,7 @@ static void shutdown(SLMP_INFO * info)
{
unsigned long flags;
- if (!(info->port.flags & ASYNC_INITIALIZED))
+ if (!tty_port_initialized(&info->port))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
@@ -2705,7 +2705,7 @@ static void shutdown(SLMP_INFO * info)
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
- info->port.flags &= ~ASYNC_INITIALIZED;
+ tty_port_set_initialized(&info->port, 0);
}
static void program_hw(SLMP_INFO *info)
@@ -2813,15 +2813,8 @@ static void change_params(SLMP_INFO *info)
}
info->timeout += HZ/50; /* Add .02 seconds of slop */
- if (cflag & CRTSCTS)
- info->port.flags |= ASYNC_CTS_FLOW;
- else
- info->port.flags &= ~ASYNC_CTS_FLOW;
-
- if (cflag & CLOCAL)
- info->port.flags &= ~ASYNC_CHECK_CD;
- else
- info->port.flags |= ASYNC_CHECK_CD;
+ tty_port_set_cts_flow(&info->port, cflag & CRTSCTS);
+ tty_port_set_check_carrier(&info->port, ~cflag & CLOCAL);
/* process tty input control flags */
@@ -3285,10 +3278,10 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
printk("%s(%d):%s block_til_ready()\n",
__FILE__,__LINE__, tty->driver->name );
- if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
+ if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
/* nonblock mode is set or port is not enabled */
/* just verify that callout device is not active */
- port->flags |= ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(port, 1);
return 0;
}
@@ -3315,12 +3308,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
port->blocked_open++;
while (1) {
- if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags))
+ if (C_BAUD(tty) && tty_port_initialized(port))
tty_port_raise_dtr_rts(port);
set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
+ if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
retval = (port->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS;
break;
@@ -3355,7 +3348,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
__FILE__,__LINE__, tty->driver->name, port->count );
if (!retval)
- port->flags |= ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(port, 1);
return retval;
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 24d5491ef..734a635e7 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -230,9 +230,6 @@ static void tty_del_file(struct file *file)
tty_free_file(file);
}
-
-#define TTY_NUMBER(tty) ((tty)->index + (tty)->driver->name_base)
-
/**
* tty_name - return tty naming
* @tty: tty structure
@@ -1070,7 +1067,7 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
if (tty_paranoia_check(tty, inode, "tty_read"))
return -EIO;
- if (!tty || (test_bit(TTY_IO_ERROR, &tty->flags)))
+ if (!tty || tty_io_error(tty))
return -EIO;
/* We want to wait for the line discipline to sort out in this
@@ -1245,8 +1242,7 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
if (tty_paranoia_check(tty, file_inode(file), "tty_write"))
return -EIO;
- if (!tty || !tty->ops->write ||
- (test_bit(TTY_IO_ERROR, &tty->flags)))
+ if (!tty || !tty->ops->write || tty_io_error(tty))
return -EIO;
/* Short term debug to catch buggy drivers */
if (tty->ops->write_room == NULL)
@@ -1964,7 +1960,6 @@ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
* tty_lookup_driver - lookup a tty driver for a given device file
* @device: device number
* @filp: file pointer to tty
- * @noctty: set if the device should not become a controlling tty
* @index: index for the device in the @return driver
* @return: driver for this inode (with increased refcount)
*
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 23bf5bb1d..bf36ac9ae 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -158,7 +158,7 @@ int tty_throttle_safe(struct tty_struct *tty)
int ret = 0;
mutex_lock(&tty->throttle_mutex);
- if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!tty_throttled(tty)) {
if (tty->flow_change != TTY_THROTTLE_SAFE)
ret = 1;
else {
@@ -189,7 +189,7 @@ int tty_unthrottle_safe(struct tty_struct *tty)
int ret = 0;
mutex_lock(&tty->throttle_mutex);
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty_throttled(tty)) {
if (tty->flow_change != TTY_UNTHROTTLE_SAFE)
ret = 1;
else {
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index dbcca30a5..c3f9d93ba 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -204,7 +204,8 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
if (port->console)
goto out;
- if (test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ if (tty_port_initialized(port)) {
+ tty_port_set_initialized(port, 0);
/*
* Drop DTR/RTS if HUPCL is set. This causes any attached
* modem to hang up the line.
@@ -236,12 +237,12 @@ void tty_port_hangup(struct tty_port *port)
spin_lock_irqsave(&port->lock, flags);
port->count = 0;
- port->flags &= ~ASYNC_NORMAL_ACTIVE;
tty = port->tty;
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
port->tty = NULL;
spin_unlock_irqrestore(&port->lock, flags);
+ tty_port_set_active(port, 0);
tty_port_shutdown(port, tty);
tty_kref_put(tty);
wake_up_interruptible(&port->open_wait);
@@ -364,15 +365,15 @@ int tty_port_block_til_ready(struct tty_port *port,
/* if non-blocking mode is set we can pass directly to open unless
the port has just hung up or is in another error state */
- if (tty->flags & (1 << TTY_IO_ERROR)) {
- port->flags |= ASYNC_NORMAL_ACTIVE;
+ if (tty_io_error(tty)) {
+ tty_port_set_active(port, 1);
return 0;
}
if (filp->f_flags & O_NONBLOCK) {
/* Indicate we are open */
if (C_BAUD(tty))
tty_port_raise_dtr_rts(port);
- port->flags |= ASYNC_NORMAL_ACTIVE;
+ tty_port_set_active(port, 1);
return 0;
}
@@ -393,13 +394,13 @@ int tty_port_block_til_ready(struct tty_port *port,
while (1) {
/* Indicate we are open */
- if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags))
+ if (C_BAUD(tty) && tty_port_initialized(port))
tty_port_raise_dtr_rts(port);
prepare_to_wait(&port->open_wait, &wait, TASK_INTERRUPTIBLE);
/* Check for a hangup or uninitialised port.
Return accordingly */
- if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
+ if (tty_hung_up_p(filp) || !tty_port_initialized(port)) {
if (port->flags & ASYNC_HUP_NOTIFY)
retval = -EAGAIN;
else
@@ -430,9 +431,9 @@ int tty_port_block_til_ready(struct tty_port *port,
if (!tty_hung_up_p(filp))
port->count++;
port->blocked_open--;
- if (retval == 0)
- port->flags |= ASYNC_NORMAL_ACTIVE;
spin_unlock_irqrestore(&port->lock, flags);
+ if (retval == 0)
+ tty_port_set_active(port, 1);
return retval;
}
EXPORT_SYMBOL(tty_port_block_til_ready);
@@ -480,7 +481,7 @@ int tty_port_close_start(struct tty_port *port,
tty->closing = 1;
- if (test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ if (tty_port_initialized(port)) {
/* Don't block on a stalled port, just pull the chain */
if (tty->flow_stopped)
tty_driver_flush_buffer(tty);
@@ -514,8 +515,8 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
spin_lock_irqsave(&port->lock, flags);
wake_up_interruptible(&port->open_wait);
}
- port->flags &= ~ASYNC_NORMAL_ACTIVE;
spin_unlock_irqrestore(&port->lock, flags);
+ tty_port_set_active(port, 0);
}
EXPORT_SYMBOL(tty_port_close_end);
@@ -578,7 +579,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
mutex_lock(&port->mutex);
- if (!test_bit(ASYNCB_INITIALIZED, &port->flags)) {
+ if (!tty_port_initialized(port)) {
clear_bit(TTY_IO_ERROR, &tty->flags);
if (port->ops->activate) {
int retval = port->ops->activate(port, tty);
@@ -587,7 +588,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
return retval;
}
}
- set_bit(ASYNCB_INITIALIZED, &port->flags);
+ tty_port_set_initialized(port, 1);
}
mutex_unlock(&port->mutex);
return tty_port_block_til_ready(port, tty, filp);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 4dd9dd227..368ce1803 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -354,7 +354,7 @@ int paste_selection(struct tty_struct *tty)
add_wait_queue(&vc->paste_wait, &wait);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
- if (test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty_throttled(tty)) {
schedule();
continue;
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index e9e29ded3..5b0fe97c4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -761,50 +761,54 @@ static void visual_init(struct vc_data *vc, int num, int init)
int vc_allocate(unsigned int currcons) /* return 0 on success */
{
+ struct vt_notifier_param param;
+ struct vc_data *vc;
+
WARN_CONSOLE_UNLOCKED();
if (currcons >= MAX_NR_CONSOLES)
return -ENXIO;
- if (!vc_cons[currcons].d) {
- struct vc_data *vc;
- struct vt_notifier_param param;
-
- /* prevent users from taking too much memory */
- if (currcons >= MAX_NR_USER_CONSOLES && !capable(CAP_SYS_RESOURCE))
- return -EPERM;
-
- /* due to the granularity of kmalloc, we waste some memory here */
- /* the alloc is done in two steps, to optimize the common situation
- of a 25x80 console (structsize=216, screenbuf_size=4000) */
- /* although the numbers above are not valid since long ago, the
- point is still up-to-date and the comment still has its value
- even if only as a historical artifact. --mj, July 1998 */
- param.vc = vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
- if (!vc)
+
+ if (vc_cons[currcons].d)
+ return 0;
+
+ /* due to the granularity of kmalloc, we waste some memory here */
+ /* the alloc is done in two steps, to optimize the common situation
+ of a 25x80 console (structsize=216, screenbuf_size=4000) */
+ /* although the numbers above are not valid since long ago, the
+ point is still up-to-date and the comment still has its value
+ even if only as a historical artifact. --mj, July 1998 */
+ param.vc = vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
+ if (!vc)
return -ENOMEM;
- vc_cons[currcons].d = vc;
- tty_port_init(&vc->port);
- INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
- visual_init(vc, currcons, 1);
- if (!*vc->vc_uni_pagedir_loc)
+
+ vc_cons[currcons].d = vc;
+ tty_port_init(&vc->port);
+ INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
+
+ visual_init(vc, currcons, 1);
+
+ if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc);
- vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
- if (!vc->vc_screenbuf) {
- kfree(vc);
- vc_cons[currcons].d = NULL;
- return -ENOMEM;
- }
- /* If no drivers have overridden us and the user didn't pass a
- boot option, default to displaying the cursor */
- if (global_cursor_default == -1)
- global_cursor_default = 1;
+ vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+ if (!vc->vc_screenbuf)
+ goto err_free;
+
+ /* If no drivers have overridden us and the user didn't pass a
+ boot option, default to displaying the cursor */
+ if (global_cursor_default == -1)
+ global_cursor_default = 1;
+
+ vc_init(vc, vc->vc_rows, vc->vc_cols, 1);
+ vcs_make_sysfs(currcons);
+ atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, &param);
- vc_init(vc, vc->vc_rows, vc->vc_cols, 1);
- vcs_make_sysfs(currcons);
- atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, &param);
- }
return 0;
+err_free:
+ kfree(vc);
+ vc_cons[currcons].d = NULL;
+ return -ENOMEM;
}
static inline int resize_screen(struct vc_data *vc, int width, int height,
@@ -1036,20 +1040,27 @@ struct vc_data *vc_deallocate(unsigned int currcons)
#define VT100ID "\033[?1;2c"
#define VT102ID "\033[?6c"
-unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7,
+const unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7,
8,12,10,14, 9,13,11,15 };
/* the default colour table, for VGA+ colour systems */
-int default_red[] = {0x00,0xaa,0x00,0xaa,0x00,0xaa,0x00,0xaa,
- 0x55,0xff,0x55,0xff,0x55,0xff,0x55,0xff};
-int default_grn[] = {0x00,0x00,0xaa,0x55,0x00,0x00,0xaa,0xaa,
- 0x55,0x55,0xff,0xff,0x55,0x55,0xff,0xff};
-int default_blu[] = {0x00,0x00,0x00,0x00,0xaa,0xaa,0xaa,0xaa,
- 0x55,0x55,0x55,0x55,0xff,0xff,0xff,0xff};
+unsigned char default_red[] = {
+ 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa,
+ 0x55, 0xff, 0x55, 0xff, 0x55, 0xff, 0x55, 0xff
+};
+module_param_array(default_red, byte, NULL, S_IRUGO | S_IWUSR);
+
+unsigned char default_grn[] = {
+ 0x00, 0x00, 0xaa, 0x55, 0x00, 0x00, 0xaa, 0xaa,
+ 0x55, 0x55, 0xff, 0xff, 0x55, 0x55, 0xff, 0xff
+};
+module_param_array(default_grn, byte, NULL, S_IRUGO | S_IWUSR);
-module_param_array(default_red, int, NULL, S_IRUGO | S_IWUSR);
-module_param_array(default_grn, int, NULL, S_IRUGO | S_IWUSR);
-module_param_array(default_blu, int, NULL, S_IRUGO | S_IWUSR);
+unsigned char default_blu[] = {
+ 0x00, 0x00, 0x00, 0x00, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0x55, 0x55, 0x55, 0x55, 0xff, 0xff, 0xff, 0xff
+};
+module_param_array(default_blu, byte, NULL, S_IRUGO | S_IWUSR);
/*
* gotoxy() must verify all boundaries, because the arguments
@@ -3565,7 +3576,7 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
struct module *owner = csw->owner;
struct con_driver *con_driver;
const char *desc;
- int i, retval = 0;
+ int i, retval;
WARN_CONSOLE_UNLOCKED();
@@ -3576,13 +3587,12 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
con_driver = &registered_con_driver[i];
/* already registered */
- if (con_driver->con == csw)
+ if (con_driver->con == csw) {
retval = -EBUSY;
+ goto err;
+ }
}
- if (retval)
- goto err;
-
desc = csw->con_startup();
if (!desc) {
retval = -ENODEV;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index bcc1fc027..fba021f57 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -271,12 +271,16 @@ static int uio_dev_add_attributes(struct uio_device *idev)
map_found = 1;
idev->map_dir = kobject_create_and_add("maps",
&idev->dev->kobj);
- if (!idev->map_dir)
+ if (!idev->map_dir) {
+ ret = -ENOMEM;
goto err_map;
+ }
}
map = kzalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
+ if (!map) {
+ ret = -ENOMEM;
goto err_map_kobj;
+ }
kobject_init(&map->kobj, &map_attr_type);
map->mem = mem;
mem->map = map;
@@ -296,12 +300,16 @@ static int uio_dev_add_attributes(struct uio_device *idev)
portio_found = 1;
idev->portio_dir = kobject_create_and_add("portio",
&idev->dev->kobj);
- if (!idev->portio_dir)
+ if (!idev->portio_dir) {
+ ret = -ENOMEM;
goto err_portio;
+ }
}
portio = kzalloc(sizeof(*portio), GFP_KERNEL);
- if (!portio)
+ if (!portio) {
+ ret = -ENOMEM;
goto err_portio_kobj;
+ }
kobject_init(&portio->kobj, &portio_attr_type);
portio->port = port;
port->portio = portio;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 8ed451dd6..8689dcba5 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -31,8 +31,6 @@ if USB_SUPPORT
config USB_COMMON
tristate
- default y
- depends on USB || USB_GADGET
config USB_ARCH_HAS_HCD
def_bool y
@@ -41,6 +39,7 @@ config USB_ARCH_HAS_HCD
config USB
tristate "Support for Host-side USB"
depends on USB_ARCH_HAS_HCD
+ select USB_COMMON
select NLS # for UTF-8 strings
---help---
Universal Serial Bus (USB) is a specification for a serial bus
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 2a3d0dcaf..59efae2ff 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -173,10 +173,10 @@ struct uea_softc {
const struct firmware *dsp_firm;
struct urb *urb_int;
- void (*dispatch_cmv) (struct uea_softc *, struct intr_pkt *);
- void (*schedule_load_page) (struct uea_softc *, struct intr_pkt *);
- int (*stat) (struct uea_softc *);
- int (*send_cmvs) (struct uea_softc *);
+ void (*dispatch_cmv)(struct uea_softc *, struct intr_pkt *);
+ void (*schedule_load_page)(struct uea_softc *, struct intr_pkt *);
+ int (*stat)(struct uea_softc *);
+ int (*send_cmvs)(struct uea_softc *);
/* keep in sync with eaglectl */
struct uea_stats {
@@ -2454,7 +2454,7 @@ UEA_ATTR(firmid, 0);
/* Retrieve the device End System Identifier (MAC) */
-static int uea_getesi(struct uea_softc *sc, u_char * esi)
+static int uea_getesi(struct uea_softc *sc, u_char *esi)
{
unsigned char mac_str[2 * ETH_ALEN + 1];
int i;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 9ce8c9f91..dedc33e58 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -292,10 +292,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM)
data->supports_runtime_pm = true;
- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- goto err_clk;
-
ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n", ret);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index a6c4a1b89..94a14f5dc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1680,7 +1680,7 @@ static int acm_resume(struct usb_interface *intf)
if (--acm->susp_count)
goto out;
- if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
+ if (tty_port_initialized(&acm->port)) {
rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
for (;;) {
@@ -1710,7 +1710,7 @@ static int acm_reset_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
- if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
+ if (tty_port_initialized(&acm->port))
tty_port_tty_hangup(&acm->port, false);
return acm_resume(intf);
diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
index 6c6040c22..2f537bbdd 100644
--- a/drivers/usb/common/usb-otg-fsm.c
+++ b/drivers/usb/common/usb-otg-fsm.c
@@ -62,8 +62,6 @@ static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
return 0;
}
-static int state_changed;
-
/* Called when leaving a state. Do state clean up jobs here */
static void otg_leave_state(struct otg_fsm *fsm, enum usb_otg_state old_state)
{
@@ -209,7 +207,6 @@ static void otg_start_hnp_polling(struct otg_fsm *fsm)
/* Called when entering a state */
static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
{
- state_changed = 1;
if (fsm->otg->state == new_state)
return 0;
VDBG("Set state: %s\n", usb_otg_state_string(new_state));
@@ -325,6 +322,7 @@ static int otg_set_state(struct otg_fsm *fsm, enum usb_otg_state new_state)
}
fsm->otg->state = new_state;
+ fsm->state_changed = 1;
return 0;
}
@@ -336,7 +334,7 @@ int otg_statemachine(struct otg_fsm *fsm)
mutex_lock(&fsm->lock);
state = fsm->otg->state;
- state_changed = 0;
+ fsm->state_changed = 0;
/* State machine state change judgement */
switch (state) {
@@ -449,8 +447,8 @@ int otg_statemachine(struct otg_fsm *fsm)
}
mutex_unlock(&fsm->lock);
- VDBG("quit statemachine, changed = %d\n", state_changed);
- return state_changed;
+ VDBG("quit statemachine, changed = %d\n", fsm->state_changed);
+ return fsm->state_changed;
}
EXPORT_SYMBOL_GPL(otg_statemachine);
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 2741566ee..98e39f917 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -122,6 +122,9 @@ void *hcd_buffer_alloc(
struct usb_hcd *hcd = bus_to_hcd(bus);
int i;
+ if (size == 0)
+ return NULL;
+
/* some USB hosts just use PIO */
if (!IS_ENABLED(CONFIG_HAS_DMA) ||
(!bus->controller->dma_mask &&
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 52c4461df..e9f5043a2 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -216,7 +216,7 @@ static void usbdev_vm_close(struct vm_area_struct *vma)
dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
}
-struct vm_operations_struct usbdev_vm_ops = {
+static struct vm_operations_struct usbdev_vm_ops = {
.open = usbdev_vm_open,
.close = usbdev_vm_close
};
@@ -1316,10 +1316,11 @@ static int proc_getdriver(struct usb_dev_state *ps, void __user *arg)
static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
- struct usbdevfs_connectinfo ci = {
- .devnum = ps->dev->devnum,
- .slow = ps->dev->speed == USB_SPEED_LOW
- };
+ struct usbdevfs_connectinfo ci;
+
+ memset(&ci, 0, sizeof(ci));
+ ci.devnum = ps->dev->devnum;
+ ci.slow = ps->dev->speed == USB_SPEED_LOW;
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2d107d0f6..d2e3f655c 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1118,6 +1118,7 @@ static int register_root_hub(struct usb_hcd *hcd)
/* Did the HC die before the root hub was registered? */
if (HCD_DEAD(hcd))
usb_hc_died (hcd); /* This time clean up */
+ usb_dev->dev.of_node = parent_dev->of_node;
}
mutex_unlock(&usb_bus_idr_lock);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 1ab42bfbe..bee135176 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -104,6 +104,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
+ if (hub_is_superspeedplus(hub->hdev))
+ return "10.0 Gb/s";
if (hub_is_superspeed(hub->hdev))
return "5.0 Gb/s";
if (portstatus & USB_PORT_STAT_HIGH_SPEED)
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 8e641b589..ea681f157 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -302,9 +302,10 @@ static void sg_complete(struct urb *urb)
*/
spin_unlock(&io->lock);
for (i = 0, found = 0; i < io->entries; i++) {
- if (!io->urbs[i] || !io->urbs[i]->dev)
+ if (!io->urbs[i])
continue;
if (found) {
+ usb_block_urb(io->urbs[i]);
retval = usb_unlink_urb(io->urbs[i]);
if (retval != -EINPROGRESS &&
retval != -ENODEV &&
@@ -515,12 +516,10 @@ void usb_sg_wait(struct usb_sg_request *io)
int retval;
io->urbs[i]->dev = io->dev;
- retval = usb_submit_urb(io->urbs[i], GFP_ATOMIC);
-
- /* after we submit, let completions or cancellations fire;
- * we handshake using io->status.
- */
spin_unlock_irq(&io->lock);
+
+ retval = usb_submit_urb(io->urbs[i], GFP_NOIO);
+
switch (retval) {
/* maybe we retrying will recover */
case -ENXIO: /* hc didn't queue this one */
@@ -578,31 +577,28 @@ EXPORT_SYMBOL_GPL(usb_sg_wait);
void usb_sg_cancel(struct usb_sg_request *io)
{
unsigned long flags;
+ int i, retval;
spin_lock_irqsave(&io->lock, flags);
+ if (io->status) {
+ spin_unlock_irqrestore(&io->lock, flags);
+ return;
+ }
+ /* shut everything down */
+ io->status = -ECONNRESET;
+ spin_unlock_irqrestore(&io->lock, flags);
- /* shut everything down, if it didn't already */
- if (!io->status) {
- int i;
-
- io->status = -ECONNRESET;
- spin_unlock(&io->lock);
- for (i = 0; i < io->entries; i++) {
- int retval;
+ for (i = io->entries - 1; i >= 0; --i) {
+ usb_block_urb(io->urbs[i]);
- if (!io->urbs[i]->dev)
- continue;
- retval = usb_unlink_urb(io->urbs[i]);
- if (retval != -EINPROGRESS
- && retval != -ENODEV
- && retval != -EBUSY
- && retval != -EIDRM)
- dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
- __func__, retval);
- }
- spin_lock(&io->lock);
+ retval = usb_unlink_urb(io->urbs[i]);
+ if (retval != -EINPROGRESS
+ && retval != -ENODEV
+ && retval != -EBUSY
+ && retval != -EIDRM)
+ dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
+ __func__, retval);
}
- spin_unlock_irqrestore(&io->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 479187c32..5e80697ef 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -466,7 +466,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->route = 0;
dev->dev.parent = bus->controller;
- dev->dev.of_node = bus->controller->of_node;
dev_set_name(&dev->dev, "usb%d", bus->busnum);
root_hub = 1;
} else {
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 818f15823..26cf09d0f 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1018,7 +1018,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
return 1;
}
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
/**
* get_ep_head - return the first request on the endpoint
@@ -1094,7 +1094,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
case USB_ENDPOINT_HALT:
halted = ep->halted;
- dwc2_hsotg_ep_sethalt(&ep->ep, set);
+ dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
if (ret) {
@@ -2425,6 +2425,9 @@ static irqreturn_t dwc2_hsotg_irq(int irq, void *pw)
u32 gintsts;
u32 gintmsk;
+ if (!dwc2_is_device_mode(hsotg))
+ return IRQ_NONE;
+
spin_lock(&hsotg->lock);
irq_retry:
gintsts = dwc2_readl(hsotg->regs + GINTSTS);
@@ -2631,7 +2634,10 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
desc->wMaxPacketSize, desc->bInterval);
/* not to be called for EP0 */
- WARN_ON(index == 0);
+ if (index == 0) {
+ dev_err(hsotg->dev, "%s: called for EP 0\n", __func__);
+ return -EINVAL;
+ }
dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0;
if (dir_in != hs_ep->dir_in) {
@@ -2942,8 +2948,13 @@ static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
* dwc2_hsotg_ep_sethalt - set halt on a given endpoint
* @ep: The endpoint to set halt.
* @value: Set or unset the halt.
+ * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
+ * the endpoint is busy processing requests.
+ *
+ * We need to stall the endpoint immediately if request comes from set_feature
+ * protocol command handler.
*/
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
{
struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hs = hs_ep->parent;
@@ -2963,6 +2974,17 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
return 0;
}
+ if (hs_ep->isochronous) {
+ dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
+ return -EINVAL;
+ }
+
+ if (!now && value && !list_empty(&hs_ep->queue)) {
+ dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
+ ep->name);
+ return -EAGAIN;
+ }
+
if (hs_ep->dir_in) {
epreg = DIEPCTL(index);
epctl = dwc2_readl(hs->regs + epreg);
@@ -3014,7 +3036,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
int ret = 0;
spin_lock_irqsave(&hs->lock, flags);
- ret = dwc2_hsotg_ep_sethalt(ep, value);
+ ret = dwc2_hsotg_ep_sethalt(ep, value, false);
spin_unlock_irqrestore(&hs->lock, flags);
return ret;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 1f6255131..2df3d04d2 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4703,6 +4703,7 @@ fail2:
spin_unlock_irqrestore(&hsotg->lock, flags);
urb->hcpriv = NULL;
kfree(qtd);
+ qtd = NULL;
fail1:
if (qh_allocated) {
struct dwc2_qtd *qtd2, *qtd2_tmp;
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 89fa26cb2..7758bfb64 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -552,6 +552,7 @@ static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
{
list_del(&qtd->qtd_list_entry);
kfree(qtd);
+ qtd = NULL;
}
/* Descriptor DMA support functions */
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 7f634fd77..b5c7793a2 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -1709,7 +1709,8 @@ void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
dwc2_deschedule_periodic(hsotg, qh);
hsotg->periodic_qh_count--;
- if (!hsotg->periodic_qh_count) {
+ if (!hsotg->periodic_qh_count &&
+ hsotg->core_params->dma_desc_enable <= 0) {
intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
intr_mask &= ~GINTSTS_SOF;
dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 88629bed6..fc6f5251d 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -562,7 +562,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
retval = dwc2_get_dr_mode(hsotg);
if (retval)
- return retval;
+ goto error;
/*
* Reset before dwc2_get_hwparams() then it could get power-on real
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 34277ced2..a590cd225 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -60,6 +60,20 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
}
+u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
+{
+ struct dwc3 *dwc = dep->dwc;
+ u32 reg;
+
+ dwc3_writel(dwc->regs, DWC3_GDBGFIFOSPACE,
+ DWC3_GDBGFIFOSPACE_NUM(dep->number) |
+ DWC3_GDBGFIFOSPACE_TYPE(type));
+
+ reg = dwc3_readl(dwc->regs, DWC3_GDBGFIFOSPACE);
+
+ return DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(reg);
+}
+
/**
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
@@ -203,13 +217,10 @@ static struct dwc3_event_buffer *dwc3_alloc_one_event_buffer(struct dwc3 *dwc,
static void dwc3_free_event_buffers(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
- int i;
- for (i = 0; i < dwc->num_event_buffers; i++) {
- evt = dwc->ev_buffs[i];
- if (evt)
- dwc3_free_one_event_buffer(dwc, evt);
- }
+ evt = dwc->ev_buf;
+ if (evt)
+ dwc3_free_one_event_buffer(dwc, evt);
}
/**
@@ -222,27 +233,14 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
*/
static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
- int num;
- int i;
-
- num = DWC3_NUM_INT(dwc->hwparams.hwparams1);
- dwc->num_event_buffers = num;
-
- dwc->ev_buffs = devm_kzalloc(dwc->dev, sizeof(*dwc->ev_buffs) * num,
- GFP_KERNEL);
- if (!dwc->ev_buffs)
- return -ENOMEM;
-
- for (i = 0; i < num; i++) {
- struct dwc3_event_buffer *evt;
+ struct dwc3_event_buffer *evt;
- evt = dwc3_alloc_one_event_buffer(dwc, length);
- if (IS_ERR(evt)) {
- dev_err(dwc->dev, "can't allocate event buffer\n");
- return PTR_ERR(evt);
- }
- dwc->ev_buffs[i] = evt;
+ evt = dwc3_alloc_one_event_buffer(dwc, length);
+ if (IS_ERR(evt)) {
+ dev_err(dwc->dev, "can't allocate event buffer\n");
+ return PTR_ERR(evt);
}
+ dwc->ev_buf = evt;
return 0;
}
@@ -256,25 +254,22 @@ static int dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
static int dwc3_event_buffers_setup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
- int n;
- for (n = 0; n < dwc->num_event_buffers; n++) {
- evt = dwc->ev_buffs[n];
- dwc3_trace(trace_dwc3_core,
- "Event buf %p dma %08llx length %d\n",
- evt->buf, (unsigned long long) evt->dma,
- evt->length);
-
- evt->lpos = 0;
-
- dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n),
- lower_32_bits(evt->dma));
- dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
- upper_32_bits(evt->dma));
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
- DWC3_GEVNTSIZ_SIZE(evt->length));
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
- }
+ evt = dwc->ev_buf;
+ dwc3_trace(trace_dwc3_core,
+ "Event buf %p dma %08llx length %d\n",
+ evt->buf, (unsigned long long) evt->dma,
+ evt->length);
+
+ evt->lpos = 0;
+
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0),
+ lower_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0),
+ upper_32_bits(evt->dma));
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_SIZE(evt->length));
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
return 0;
}
@@ -282,19 +277,16 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc)
static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
{
struct dwc3_event_buffer *evt;
- int n;
- for (n = 0; n < dwc->num_event_buffers; n++) {
- evt = dwc->ev_buffs[n];
+ evt = dwc->ev_buf;
- evt->lpos = 0;
+ evt->lpos = 0;
- dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
- dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), DWC3_GEVNTSIZ_INTMASK
- | DWC3_GEVNTSIZ_SIZE(0));
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
- }
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(0), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(0), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_INTMASK
+ | DWC3_GEVNTSIZ_SIZE(0));
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 0);
}
static int dwc3_alloc_scratch_buffers(struct dwc3 *dwc)
@@ -434,6 +426,9 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
if (dwc->u2ss_inp3_quirk)
reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK;
+ if (dwc->dis_rxdet_inp3_quirk)
+ reg |= DWC3_GUSB3PIPECTL_DISRXDETINP3;
+
if (dwc->req_p1p2p3_quirk)
reg |= DWC3_GUSB3PIPECTL_REQP1P2P3;
@@ -882,9 +877,6 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->usb3_lpm_capable = device_property_read_bool(dev,
"snps,usb3_lpm_capable");
- dwc->needs_fifo_resize = device_property_read_bool(dev,
- "tx-fifo-resize");
-
dwc->disable_scramble_quirk = device_property_read_bool(dev,
"snps,disable_scramble_quirk");
dwc->u2exit_lfps_quirk = device_property_read_bool(dev,
@@ -907,6 +899,8 @@ static int dwc3_probe(struct platform_device *pdev)
"snps,dis_u2_susphy_quirk");
dwc->dis_enblslpm_quirk = device_property_read_bool(dev,
"snps,dis_enblslpm_quirk");
+ dwc->dis_rxdet_inp3_quirk = device_property_read_bool(dev,
+ "snps,dis_rxdet_inp3_quirk");
dwc->tx_de_emphasis_quirk = device_property_read_bool(dev,
"snps,tx_de_emphasis_quirk");
@@ -926,7 +920,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (pdata->hird_threshold)
hird_threshold = pdata->hird_threshold;
- dwc->needs_fifo_resize = pdata->tx_fifo_resize;
dwc->usb3_lpm_capable = pdata->usb3_lpm_capable;
dwc->dr_mode = pdata->dr_mode;
@@ -941,6 +934,7 @@ static int dwc3_probe(struct platform_device *pdev)
dwc->dis_u3_susphy_quirk = pdata->dis_u3_susphy_quirk;
dwc->dis_u2_susphy_quirk = pdata->dis_u2_susphy_quirk;
dwc->dis_enblslpm_quirk = pdata->dis_enblslpm_quirk;
+ dwc->dis_rxdet_inp3_quirk = pdata->dis_rxdet_inp3_quirk;
dwc->tx_de_emphasis_quirk = pdata->tx_de_emphasis_quirk;
if (pdata->tx_de_emphasis)
@@ -1050,19 +1044,11 @@ static int dwc3_probe(struct platform_device *pdev)
if (ret)
goto err5;
- ret = dwc3_debugfs_init(dwc);
- if (ret) {
- dev_err(dev, "failed to initialize debugfs\n");
- goto err6;
- }
-
+ dwc3_debugfs_init(dwc);
pm_runtime_allow(dev);
return 0;
-err6:
- dwc3_core_exit_mode(dwc);
-
err5:
dwc3_event_buffers_cleanup(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 6254b2ff9..654050684 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -152,6 +152,24 @@
/* Bit fields */
+/* Global Debug Queue/FIFO Space Available Register */
+#define DWC3_GDBGFIFOSPACE_NUM(n) ((n) & 0x1f)
+#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
+#define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff)
+
+#define DWC3_TXFIFOQ 1
+#define DWC3_RXFIFOQ 3
+#define DWC3_TXREQQ 5
+#define DWC3_RXREQQ 7
+#define DWC3_RXINFOQ 9
+#define DWC3_DESCFETCHQ 13
+#define DWC3_EVENTQ 15
+
+/* Global RX Threshold Configuration Register */
+#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
+#define DWC3_GRXTHRCFG_RXPKTCNT(n) (((n) & 0xf) << 24)
+#define DWC3_GRXTHRCFG_PKTCNTSEL (1 << 29)
+
/* Global Configuration Register */
#define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19)
#define DWC3_GCTL_U2RSTECN (1 << 16)
@@ -193,6 +211,7 @@
/* Global USB3 PIPE Control Register */
#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
#define DWC3_GUSB3PIPECTL_U2SSINP3OK (1 << 29)
+#define DWC3_GUSB3PIPECTL_DISRXDETINP3 (1 << 28)
#define DWC3_GUSB3PIPECTL_REQP1P2P3 (1 << 24)
#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19)
#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7)
@@ -257,6 +276,9 @@
#define DWC3_DCFG_LOWSPEED (2 << 0)
#define DWC3_DCFG_FULLSPEED1 (3 << 0)
+#define DWC3_DCFG_NUMP_SHIFT 17
+#define DWC3_DCFG_NUMP(n) (((n) >> DWC3_DCFG_NUMP_SHIFT) & 0x1f)
+#define DWC3_DCFG_NUMP_MASK (0x1f << DWC3_DCFG_NUMP_SHIFT)
#define DWC3_DCFG_LPM_CAP (1 << 22)
/* Device Control Register */
@@ -380,6 +402,7 @@
#define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
#define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
+#define DWC3_DEPCMD_CLEARPENDIN (1 << 11)
#define DWC3_DEPCMD_CMDACT (1 << 10)
#define DWC3_DEPCMD_CMDIOC (1 << 8)
@@ -438,18 +461,17 @@ struct dwc3_event_buffer {
#define DWC3_EP_DIRECTION_TX true
#define DWC3_EP_DIRECTION_RX false
-#define DWC3_TRB_NUM 32
-#define DWC3_TRB_MASK (DWC3_TRB_NUM - 1)
+#define DWC3_TRB_NUM 256
/**
* struct dwc3_ep - device side endpoint representation
* @endpoint: usb endpoint
- * @request_list: list of requests for this endpoint
- * @req_queued: list of requests on this ep which have TRBs setup
+ * @pending_list: list of pending requests for this endpoint
+ * @started_list: list of started requests on this endpoint
* @trb_pool: array of transaction buffers
* @trb_pool_dma: dma address of @trb_pool
- * @free_slot: next slot which is going to be used
- * @busy_slot: first slot which is owned by HW
+ * @trb_enqueue: enqueue 'pointer' into TRB array
+ * @trb_dequeue: dequeue 'pointer' into TRB array
* @desc: usb_endpoint_descriptor pointer
* @dwc: pointer to DWC controller
* @saved_state: ep state saved during hibernation
@@ -464,13 +486,11 @@ struct dwc3_event_buffer {
*/
struct dwc3_ep {
struct usb_ep endpoint;
- struct list_head request_list;
- struct list_head req_queued;
+ struct list_head pending_list;
+ struct list_head started_list;
struct dwc3_trb *trb_pool;
dma_addr_t trb_pool_dma;
- u32 free_slot;
- u32 busy_slot;
const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
@@ -486,6 +506,18 @@ struct dwc3_ep {
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN (1 << 31)
+ /*
+ * IMPORTANT: we *know* we have 256 TRBs in our @trb_pool, so we will
+ * use a u8 type here. If anybody decides to increase number of TRBs to
+ * anything larger than 256 - I can't see why people would want to do
+ * this though - then this type needs to be changed.
+ *
+ * By using u8 types we ensure that our % operator when incrementing
+ * enqueue and dequeue get optimized away by the compiler.
+ */
+ u8 trb_enqueue;
+ u8 trb_dequeue;
+
u8 number;
u8 type;
u8 resource_index;
@@ -557,6 +589,7 @@ enum dwc3_link_state {
#define DWC3_TRB_CTRL_IOC (1 << 11)
#define DWC3_TRB_CTRL_SID_SOFN(n) (((n) & 0xffff) << 14)
+#define DWC3_TRBCTL_TYPE(n) ((n) & (0x3f << 4))
#define DWC3_TRBCTL_NORMAL DWC3_TRB_CTRL_TRBCTL(1)
#define DWC3_TRBCTL_CONTROL_SETUP DWC3_TRB_CTRL_TRBCTL(2)
#define DWC3_TRBCTL_CONTROL_STATUS2 DWC3_TRB_CTRL_TRBCTL(3)
@@ -623,19 +656,32 @@ struct dwc3_hwparams {
/* HWPARAMS7 */
#define DWC3_RAM1_DEPTH(n) ((n) & 0xffff)
+/**
+ * struct dwc3_request - representation of a transfer request
+ * @request: struct usb_request to be transferred
+ * @list: a list_head used for request queueing
+ * @dep: struct dwc3_ep owning this request
+ * @first_trb_index: index to first trb used by this request
+ * @epnum: endpoint number to which this request refers
+ * @trb: pointer to struct dwc3_trb
+ * @trb_dma: DMA address of @trb
+ * @direction: IN or OUT direction flag
+ * @mapped: true when request has been dma-mapped
+ * @queued: true when request has been queued to HW
+ */
struct dwc3_request {
struct usb_request request;
struct list_head list;
struct dwc3_ep *dep;
- u32 start_slot;
+ u8 first_trb_index;
u8 epnum;
struct dwc3_trb *trb;
dma_addr_t trb_dma;
unsigned direction:1;
unsigned mapped:1;
- unsigned queued:1;
+ unsigned started:1;
};
/*
@@ -667,7 +713,6 @@ struct dwc3_scratchpad_array {
* @regs: base address for our registers
* @regs_size: address space size
* @nr_scratch: number of scratch buffers
- * @num_event_buffers: calculated number of event buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
* @revision: revision register contents
@@ -709,9 +754,7 @@ struct dwc3_scratchpad_array {
* 0 - utmi_sleep_n
* 1 - utmi_l1_suspend_n
* @is_fpga: true when we are using the FPGA board
- * @needs_fifo_resize: not all users might want fifo resizing, flag it
* @pullups_connected: true when Run/Stop bit is set
- * @resize_fifos: tells us it's ok to reconfigure our TxFIFO sizes.
* @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @start_config_issued: true when StartConfig command has been issued
* @three_stage_setup: set if we perform a three phase setup
@@ -756,7 +799,7 @@ struct dwc3 {
struct platform_device *xhci;
struct resource xhci_resources[DWC3_XHCI_RESOURCES_NUM];
- struct dwc3_event_buffer **ev_buffs;
+ struct dwc3_event_buffer *ev_buf;
struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM];
struct usb_gadget gadget;
@@ -780,7 +823,6 @@ struct dwc3 {
u32 gctl;
u32 nr_scratch;
- u32 num_event_buffers;
u32 u1u2;
u32 maximum_speed;
@@ -855,9 +897,7 @@ struct dwc3 {
unsigned has_lpm_erratum:1;
unsigned is_utmi_l1_suspend:1;
unsigned is_fpga:1;
- unsigned needs_fifo_resize:1;
unsigned pullups_connected:1;
- unsigned resize_fifos:1;
unsigned setup_packet_pending:1;
unsigned three_stage_setup:1;
unsigned usb3_lpm_capable:1;
@@ -873,6 +913,7 @@ struct dwc3 {
unsigned dis_u3_susphy_quirk:1;
unsigned dis_u2_susphy_quirk:1;
unsigned dis_enblslpm_quirk:1;
+ unsigned dis_rxdet_inp3_quirk:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
@@ -938,6 +979,10 @@ struct dwc3_event_depevt {
#define DEPEVT_STATUS_CONTROL_DATA 1
#define DEPEVT_STATUS_CONTROL_STATUS 2
+/* In response to Start Transfer */
+#define DEPEVT_TRANSFER_NO_RESOURCE 1
+#define DEPEVT_TRANSFER_BUS_EXPIRY 2
+
u32 parameters:16;
} __packed;
@@ -1025,7 +1070,7 @@ struct dwc3_gadget_ep_cmd_params {
/* prototypes */
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
-int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
+u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
/* check whether we are on the DWC_usb31 core */
static inline bool dwc3_is_usb31(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 07fbc2d94..71e318025 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -217,11 +217,11 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
#ifdef CONFIG_DEBUG_FS
-extern int dwc3_debugfs_init(struct dwc3 *);
+extern void dwc3_debugfs_init(struct dwc3 *);
extern void dwc3_debugfs_exit(struct dwc3 *);
#else
-static inline int dwc3_debugfs_init(struct dwc3 *d)
-{ return 0; }
+static inline void dwc3_debugfs_init(struct dwc3 *d)
+{ }
static inline void dwc3_debugfs_exit(struct dwc3 *d)
{ }
#endif
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index cebf9e38b..b1dd3c6d7 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -618,24 +618,323 @@ static const struct file_operations dwc3_link_state_fops = {
.release = single_release,
};
-int dwc3_debugfs_init(struct dwc3 *dwc)
+struct dwc3_ep_file_map {
+ char name[25];
+ int (*show)(struct seq_file *s, void *unused);
+};
+
+static int dwc3_tx_fifo_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_TXFIFOQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_rx_fifo_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXFIFOQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_event_queue_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
+ seq_printf(s, "%u\n", val);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static int dwc3_ep_transfer_type_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (!(dep->flags & DWC3_EP_ENABLED) ||
+ !dep->endpoint.desc) {
+ seq_printf(s, "--\n");
+ goto out;
+ }
+
+ switch (usb_endpoint_type(dep->endpoint.desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ seq_printf(s, "control\n");
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ seq_printf(s, "isochronous\n");
+ break;
+ case USB_ENDPOINT_XFER_BULK:
+ seq_printf(s, "bulk\n");
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ seq_printf(s, "interrupt\n");
+ break;
+ default:
+ seq_printf(s, "--\n");
+ }
+
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static inline const char *dwc3_trb_type_string(struct dwc3_trb *trb)
+{
+ switch (DWC3_TRBCTL_TYPE(trb->ctrl)) {
+ case DWC3_TRBCTL_NORMAL:
+ return "normal";
+ case DWC3_TRBCTL_CONTROL_SETUP:
+ return "control-setup";
+ case DWC3_TRBCTL_CONTROL_STATUS2:
+ return "control-status2";
+ case DWC3_TRBCTL_CONTROL_STATUS3:
+ return "control-status3";
+ case DWC3_TRBCTL_CONTROL_DATA:
+ return "control-data";
+ case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
+ return "isoc-first";
+ case DWC3_TRBCTL_ISOCHRONOUS:
+ return "isoc";
+ case DWC3_TRBCTL_LINK_TRB:
+ return "link";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static int dwc3_ep_trb_ring_show(struct seq_file *s, void *unused)
+{
+ struct dwc3_ep *dep = s->private;
+ struct dwc3 *dwc = dep->dwc;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (dep->number <= 1) {
+ seq_printf(s, "--\n");
+ goto out;
+ }
+
+ seq_printf(s, "enqueue pointer %d\n", dep->trb_enqueue);
+ seq_printf(s, "dequeue pointer %d\n", dep->trb_dequeue);
+ seq_printf(s, "\n--------------------------------------------------\n\n");
+ seq_printf(s, "buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo\n");
+
+ for (i = 0; i < DWC3_TRB_NUM; i++) {
+ struct dwc3_trb *trb = &dep->trb_pool[i];
+
+ seq_printf(s, "%08x%08x,%d,%s,%d,%d,%d,%d,%d,%d\n",
+ trb->bph, trb->bpl, trb->size,
+ dwc3_trb_type_string(trb),
+ !!(trb->ctrl & DWC3_TRB_CTRL_IOC),
+ !!(trb->ctrl & DWC3_TRB_CTRL_ISP_IMI),
+ !!(trb->ctrl & DWC3_TRB_CTRL_CSP),
+ !!(trb->ctrl & DWC3_TRB_CTRL_CHN),
+ !!(trb->ctrl & DWC3_TRB_CTRL_LST),
+ !!(trb->ctrl & DWC3_TRB_CTRL_HWO));
+ }
+
+out:
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ return 0;
+}
+
+static struct dwc3_ep_file_map map[] = {
+ { "tx_fifo_queue", dwc3_tx_fifo_queue_show, },
+ { "rx_fifo_queue", dwc3_rx_fifo_queue_show, },
+ { "tx_request_queue", dwc3_tx_request_queue_show, },
+ { "rx_request_queue", dwc3_rx_request_queue_show, },
+ { "rx_info_queue", dwc3_rx_info_queue_show, },
+ { "descriptor_fetch_queue", dwc3_descriptor_fetch_queue_show, },
+ { "event_queue", dwc3_event_queue_show, },
+ { "transfer_type", dwc3_ep_transfer_type_show, },
+ { "trb_ring", dwc3_ep_trb_ring_show, },
+};
+
+static int dwc3_endpoint_open(struct inode *inode, struct file *file)
+{
+ const char *file_name = file_dentry(file)->d_iname;
+ struct dwc3_ep_file_map *f_map;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ f_map = &map[i];
+
+ if (strcmp(f_map->name, file_name) == 0)
+ break;
+ }
+
+ return single_open(file, f_map->show, inode->i_private);
+}
+
+static const struct file_operations dwc3_endpoint_fops = {
+ .open = dwc3_endpoint_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void dwc3_debugfs_create_endpoint_file(struct dwc3_ep *dep,
+ struct dentry *parent, int type)
{
- struct dentry *root;
struct dentry *file;
- int ret;
+ struct dwc3_ep_file_map *ep_file = &map[type];
- root = debugfs_create_dir(dev_name(dwc->dev), NULL);
- if (!root) {
- ret = -ENOMEM;
- goto err0;
+ file = debugfs_create_file(ep_file->name, S_IRUGO, parent, dep,
+ &dwc3_endpoint_fops);
+}
+
+static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
+ struct dentry *parent)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(map); i++)
+ dwc3_debugfs_create_endpoint_file(dep, parent, i);
+}
+
+static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
+ struct dentry *parent)
+{
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(dep->name, parent);
+ if (IS_ERR_OR_NULL(dir))
+ return;
+
+ dwc3_debugfs_create_endpoint_files(dep, dir);
+}
+
+static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
+ struct dentry *parent)
+{
+ int i;
+
+ for (i = 0; i < dwc->num_in_eps; i++) {
+ u8 epnum = (i << 1) | 1;
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ dwc3_debugfs_create_endpoint_dir(dep, parent);
+ }
+
+ for (i = 0; i < dwc->num_out_eps; i++) {
+ u8 epnum = (i << 1);
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ if (!dep)
+ continue;
+
+ dwc3_debugfs_create_endpoint_dir(dep, parent);
}
+}
+
+void dwc3_debugfs_init(struct dwc3 *dwc)
+{
+ struct dentry *root;
+ struct dentry *file;
+ root = debugfs_create_dir(dev_name(dwc->dev), NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ if (!root)
+ dev_err(dwc->dev, "Can't create debugfs root\n");
+ return;
+ }
dwc->root = root;
dwc->regset = kzalloc(sizeof(*dwc->regset), GFP_KERNEL);
if (!dwc->regset) {
- ret = -ENOMEM;
- goto err1;
+ debugfs_remove_recursive(root);
+ return;
}
dwc->regset->regs = dwc3_regs;
@@ -643,47 +942,30 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->base = dwc->regs;
file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
- if (!file) {
- ret = -ENOMEM;
- goto err2;
- }
+ if (!file)
+ dev_dbg(dwc->dev, "Can't create debugfs regdump\n");
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
dwc, &dwc3_mode_fops);
- if (!file) {
- ret = -ENOMEM;
- goto err2;
- }
+ if (!file)
+ dev_dbg(dwc->dev, "Can't create debugfs mode\n");
}
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) ||
IS_ENABLED(CONFIG_USB_DWC3_GADGET)) {
file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root,
dwc, &dwc3_testmode_fops);
- if (!file) {
- ret = -ENOMEM;
- goto err2;
- }
-
- file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
- dwc, &dwc3_link_state_fops);
- if (!file) {
- ret = -ENOMEM;
- goto err2;
- }
- }
+ if (!file)
+ dev_dbg(dwc->dev, "Can't create debugfs testmode\n");
- return 0;
+ file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR,
+ root, dwc, &dwc3_link_state_fops);
+ if (!file)
+ dev_dbg(dwc->dev, "Can't create debugfs link_state\n");
-err2:
- kfree(dwc->regset);
-
-err1:
- debugfs_remove_recursive(root);
-
-err0:
- return ret;
+ dwc3_debugfs_create_endpoint_dirs(dwc, root);
+ }
}
void dwc3_debugfs_exit(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 55da2c7f7..af264493b 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -126,8 +126,6 @@ struct dwc3_omap {
u32 debug_offset;
u32 irq0_offset;
- u32 dma_status:1;
-
struct extcon_dev *edev;
struct notifier_block vbus_nb;
struct notifier_block id_nb;
@@ -277,9 +275,6 @@ static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
reg = dwc3_omap_read_irqmisc_status(omap);
- if (reg & USBOTGSS_IRQMISC_DMADISABLECLR)
- omap->dma_status = false;
-
dwc3_omap_write_irqmisc_status(omap, reg);
reg = dwc3_omap_read_irq0_status(omap);
@@ -331,8 +326,6 @@ static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
dwc3_omap_write_irqmisc_clr(omap, reg);
}
-static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
-
static int dwc3_omap_id_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -490,7 +483,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
omap->irq = irq;
omap->base = base;
omap->vbus_reg = vbus_reg;
- dev->dma_mask = &dwc3_omap_dma_mask;
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
@@ -504,7 +496,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
/* check the DMA Status */
reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
- omap->dma_status = !!(reg & USBOTGSS_SYSCONFIG_DMADISABLE);
ret = devm_request_irq(dev, omap->irq, dwc3_omap_interrupt, 0,
"dwc3-omap", omap);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index adc1e8a62..14196cd41 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -47,7 +47,7 @@ static const struct acpi_gpio_mapping acpi_dwc3_byt_gpios[] = {
{ },
};
-static int dwc3_pci_quirks(struct pci_dev *pdev)
+static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
{
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
@@ -77,8 +77,7 @@ static int dwc3_pci_quirks(struct pci_dev *pdev)
pdata.dis_u3_susphy_quirk = true;
pdata.dis_u2_susphy_quirk = true;
- return platform_device_add_data(pci_get_drvdata(pdev), &pdata,
- sizeof(pdata));
+ return platform_device_add_data(dwc3, &pdata, sizeof(pdata));
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -123,8 +122,7 @@ static int dwc3_pci_quirks(struct pci_dev *pdev)
pdata.has_lpm_erratum = true;
pdata.dis_enblslpm_quirk = true;
- return platform_device_add_data(pci_get_drvdata(pdev), &pdata,
- sizeof(pdata));
+ return platform_device_add_data(dwc3, &pdata, sizeof(pdata));
}
return 0;
@@ -169,20 +167,20 @@ static int dwc3_pci_probe(struct pci_dev *pci,
return ret;
}
- pci_set_drvdata(pci, dwc3);
- ret = dwc3_pci_quirks(pci);
- if (ret)
- goto err;
-
dwc3->dev.parent = dev;
ACPI_COMPANION_SET(&dwc3->dev, ACPI_COMPANION(dev));
+ ret = dwc3_pci_quirks(pci, dwc3);
+ if (ret)
+ goto err;
+
ret = platform_device_add(dwc3);
if (ret) {
dev_err(dev, "failed to register dwc3 device\n");
goto err;
}
+ pci_set_drvdata(pci, dwc3);
return 0;
err:
platform_device_put(dwc3);
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 5c0adb9c6..89a2f712f 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -129,12 +129,18 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
switch (dwc3_data->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID
+ val &= ~(USB3_DELAY_VBUSVALID
| USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
| USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
| USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
- val |= USB3_DEVICE_NOT_HOST;
+ /*
+ * USB3_PORT2_FORCE_VBUSVALID When '1' and when
+ * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
+ * of the pico PHY to 1.
+ */
+
+ val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
break;
case USB_DR_MODE_HOST:
@@ -227,7 +233,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
- dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
+ dwc3_data->rstc_pwrdn =
+ devm_reset_control_get_exclusive(dev, "powerdown");
if (IS_ERR(dwc3_data->rstc_pwrdn)) {
dev_err(&pdev->dev, "could not get power controller\n");
ret = PTR_ERR(dwc3_data->rstc_pwrdn);
@@ -237,7 +244,8 @@ static int st_dwc3_probe(struct platform_device *pdev)
/* Manage PowerDown */
reset_control_deassert(dwc3_data->rstc_pwrdn);
- dwc3_data->rstc_rst = devm_reset_control_get(dev, "softreset");
+ dwc3_data->rstc_rst =
+ devm_reset_control_get_shared(dev, "softreset");
if (IS_ERR(dwc3_data->rstc_rst)) {
dev_err(&pdev->dev, "could not get reset controller\n");
ret = PTR_ERR(dwc3_data->rstc_rst);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index eca2e6d8e..51b52a79d 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -70,10 +70,10 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
return 0;
}
- trb = &dwc->ep0_trb[dep->free_slot];
+ trb = &dwc->ep0_trb[dep->trb_enqueue];
if (chain)
- dep->free_slot++;
+ dep->trb_enqueue++;
trb->bpl = lower_32_bits(buf_dma);
trb->bph = upper_32_bits(buf_dma);
@@ -124,7 +124,7 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
req->request.status = -EINPROGRESS;
req->epnum = dep->number;
- list_add_tail(&req->list, &dep->request_list);
+ list_add_tail(&req->list, &dep->pending_list);
/*
* Gadget driver might not be quick enough to queue a request
@@ -240,7 +240,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
}
/* we share one TRB for ep0/1 */
- if (!list_empty(&dep->request_list)) {
+ if (!list_empty(&dep->pending_list)) {
ret = -EBUSY;
goto out;
}
@@ -272,10 +272,10 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
dep->flags = DWC3_EP_ENABLED;
dwc->delayed_status = false;
- if (!list_empty(&dep->request_list)) {
+ if (!list_empty(&dep->pending_list)) {
struct dwc3_request *req;
- req = next_request(&dep->request_list);
+ req = next_request(&dep->pending_list);
dwc3_gadget_giveback(dep, req, -ECONNRESET);
}
@@ -463,8 +463,18 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
if (!set)
return -EINVAL;
- dwc->test_mode_nr = wIndex >> 8;
- dwc->test_mode = true;
+ switch (wIndex >> 8) {
+ case TEST_J:
+ case TEST_K:
+ case TEST_SE0_NAK:
+ case TEST_PACKET:
+ case TEST_FORCE_EN:
+ dwc->test_mode_nr = wIndex >> 8;
+ dwc->test_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
break;
default:
return -EINVAL;
@@ -586,9 +596,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA);
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
-
- dwc->resize_fifos = true;
- dwc3_trace(trace_dwc3_ep0, "resize FIFOs flag SET");
}
break;
@@ -809,7 +816,7 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
trace_dwc3_complete_trb(ep0, trb);
- r = next_request(&ep0->request_list);
+ r = next_request(&ep0->pending_list);
if (!r)
return;
@@ -848,7 +855,7 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
trb++;
length = trb->size & DWC3_TRB_SIZE_MASK;
- ep0->free_slot = 0;
+ ep0->trb_enqueue = 0;
}
transfer_size = roundup((ur->length - transfer_size),
@@ -897,8 +904,8 @@ static void dwc3_ep0_complete_status(struct dwc3 *dwc,
trace_dwc3_complete_trb(dep, trb);
- if (!list_empty(&dep->request_list)) {
- r = next_request(&dep->request_list);
+ if (!list_empty(&dep->pending_list)) {
+ r = next_request(&dep->pending_list);
dwc3_gadget_giveback(dep, r, 0);
}
@@ -1027,12 +1034,6 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
{
- if (dwc->resize_fifos) {
- dwc3_trace(trace_dwc3_ep0, "Resizing FIFOs");
- dwc3_gadget_resize_tx_fifos(dwc);
- dwc->resize_fifos = 0;
- }
-
WARN_ON(dwc3_ep0_start_control_status(dep));
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 8e4a1b195..07248ff1b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -145,90 +145,21 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
return -ETIMEDOUT;
}
-/**
- * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
- * @dwc: pointer to our context structure
- *
- * This function will a best effort FIFO allocation in order
- * to improve FIFO usage and throughput, while still allowing
- * us to enable as many endpoints as possible.
- *
- * Keep in mind that this operation will be highly dependent
- * on the configured size for RAM1 - which contains TxFifo -,
- * the amount of endpoints enabled on coreConsultant tool, and
- * the width of the Master Bus.
- *
- * In the ideal world, we would always be able to satisfy the
- * following equation:
- *
- * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
- * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
- *
- * Unfortunately, due to many variables that's not always the case.
- */
-int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
+static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
{
- int last_fifo_depth = 0;
- int ram1_depth;
- int fifo_size;
- int mdwidth;
- int num;
-
- if (!dwc->needs_fifo_resize)
- return 0;
-
- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
- mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
-
- /* MDWIDTH is represented in bits, we need it in bytes */
- mdwidth >>= 3;
-
- /*
- * FIXME For now we will only allocate 1 wMaxPacketSize space
- * for each enabled endpoint, later patches will come to
- * improve this algorithm so that we better use the internal
- * FIFO space
- */
- for (num = 0; num < dwc->num_in_eps; num++) {
- /* bit0 indicates direction; 1 means IN ep */
- struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
- int mult = 1;
- int tmp;
-
- if (!(dep->flags & DWC3_EP_ENABLED))
- continue;
-
- if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
- || usb_endpoint_xfer_isoc(dep->endpoint.desc))
- mult = 3;
-
- /*
- * REVISIT: the following assumes we will always have enough
- * space available on the FIFO RAM for all possible use cases.
- * Make sure that's true somehow and change FIFO allocation
- * accordingly.
- *
- * If we have Bulk or Isochronous endpoints, we want
- * them to be able to be very, very fast. So we're giving
- * those endpoints a fifo_size which is enough for 3 full
- * packets
- */
- tmp = mult * (dep->endpoint.maxpacket + mdwidth);
- tmp += mdwidth;
-
- fifo_size = DIV_ROUND_UP(tmp, mdwidth);
-
- fifo_size |= (last_fifo_depth << 16);
-
- dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
- dep->name, last_fifo_depth, fifo_size & 0xffff);
-
- dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
+ dep->trb_enqueue++;
+ dep->trb_enqueue %= DWC3_TRB_NUM;
+}
- last_fifo_depth += (fifo_size & 0xffff);
- }
+static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
+{
+ dep->trb_dequeue++;
+ dep->trb_dequeue %= DWC3_TRB_NUM;
+}
- return 0;
+static int dwc3_ep_is_last_trb(unsigned int index)
+{
+ return index == DWC3_TRB_NUM - 1;
}
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
@@ -237,21 +168,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
struct dwc3 *dwc = dep->dwc;
int i;
- if (req->queued) {
+ if (req->started) {
i = 0;
do {
- dep->busy_slot++;
+ dwc3_ep_inc_deq(dep);
/*
* Skip LINK TRB. We can't use req->trb and check for
* DWC3_TRBCTL_LINK_TRB because it points the TRB we
* just completed (not the LINK TRB).
*/
- if (((dep->busy_slot & DWC3_TRB_MASK) ==
- DWC3_TRB_NUM- 1) &&
- usb_endpoint_xfer_isoc(dep->endpoint.desc))
- dep->busy_slot++;
+ if (dwc3_ep_is_last_trb(dep->trb_dequeue))
+ dwc3_ep_inc_deq(dep);
} while(++i < req->request.num_mapped_sgs);
- req->queued = false;
+ req->started = false;
}
list_del(&req->list);
req->trb = NULL;
@@ -307,6 +236,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
} while (1);
}
+static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
+
int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
{
@@ -314,8 +245,40 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
u32 timeout = 500;
u32 reg;
+ int susphy = false;
+ int ret = -EINVAL;
+
trace_dwc3_gadget_ep_cmd(dep, cmd, params);
+ /*
+ * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
+ * we're issuing an endpoint command, we must check if
+ * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
+ *
+ * We will also set SUSPHY bit to what it was before returning as stated
+ * by the same section on Synopsys databook.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+ susphy = true;
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
+ if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
+ int needs_wakeup;
+
+ needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
+ dwc->link_state == DWC3_LINK_STATE_U2 ||
+ dwc->link_state == DWC3_LINK_STATE_U3);
+
+ if (unlikely(needs_wakeup)) {
+ ret = __dwc3_gadget_wakeup(dwc);
+ dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
+ ret);
+ }
+ }
+
dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
@@ -324,12 +287,40 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
do {
reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
if (!(reg & DWC3_DEPCMD_CMDACT)) {
+ int cmd_status = DWC3_DEPCMD_STATUS(reg);
+
dwc3_trace(trace_dwc3_gadget,
"Command Complete --> %d",
- DWC3_DEPCMD_STATUS(reg));
- if (DWC3_DEPCMD_STATUS(reg))
- return -EINVAL;
- return 0;
+ cmd_status);
+
+ switch (cmd_status) {
+ case 0:
+ ret = 0;
+ break;
+ case DEPEVT_TRANSFER_NO_RESOURCE:
+ dwc3_trace(trace_dwc3_gadget, "%s: no resource available");
+ ret = -EINVAL;
+ break;
+ case DEPEVT_TRANSFER_BUS_EXPIRY:
+ /*
+ * SW issues START TRANSFER command to
+ * isochronous ep with future frame interval. If
+ * future interval time has already passed when
+ * core receives the command, it will respond
+ * with an error status of 'Bus Expiry'.
+ *
+ * Instead of always returning -EINVAL, let's
+ * give a hint to the gadget driver that this is
+ * the case by returning -EAGAIN.
+ */
+ dwc3_trace(trace_dwc3_gadget, "%s: bus expiry");
+ ret = -EAGAIN;
+ break;
+ default:
+ dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
+ }
+
+ break;
}
/*
@@ -340,11 +331,42 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
if (!timeout) {
dwc3_trace(trace_dwc3_gadget,
"Command Timed Out");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ break;
}
udelay(1);
} while (1);
+
+ if (unlikely(susphy)) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
+ return ret;
+}
+
+static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_gadget_ep_cmd_params params;
+ u32 cmd = DWC3_DEPCMD_CLEARSTALL;
+
+ /*
+ * As of core revision 2.60a the recommended programming model
+ * is to set the ClearPendIN bit when issuing a Clear Stall EP
+ * command for IN endpoints. This is to prevent an issue where
+ * some (non-compliant) hosts may not send ACK TPs for pending
+ * IN transfers due to a mishandled error condition. Synopsys
+ * STAR 9000614252.
+ */
+ if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
+ cmd |= DWC3_DEPCMD_CLEARPENDIN;
+
+ memset(&params, 0, sizeof(params));
+
+ return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
}
static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
@@ -464,9 +486,19 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
/* Burst size is only needed in SuperSpeed mode */
if (dwc->gadget.speed >= USB_SPEED_SUPER) {
- u32 burst = dep->endpoint.maxburst - 1;
+ u32 burst = dep->endpoint.maxburst;
+ u32 nump;
+ u32 reg;
+
+ /* update NumP */
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ nump = DWC3_DCFG_NUMP(reg);
+ nump = max(nump, burst);
+ reg &= ~DWC3_DCFG_NUMP_MASK;
+ reg |= nump << DWC3_DCFG_NUMP_SHIFT;
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
- params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+ params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
}
if (ignore)
@@ -567,10 +599,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
reg |= DWC3_DALEPENA_EP(dep->number);
dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
- if (!usb_endpoint_xfer_isoc(desc))
+ if (usb_endpoint_xfer_control(desc))
goto out;
- /* Link TRB for ISOC. The HWO bit is never reset */
+ /* Link TRB. The HWO bit is never reset */
trb_st_hw = &dep->trb_pool[0];
trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
@@ -608,19 +640,19 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_request *req;
- if (!list_empty(&dep->req_queued)) {
+ if (!list_empty(&dep->started_list)) {
dwc3_stop_active_transfer(dwc, dep->number, true);
/* - giveback all requests to gadget driver */
- while (!list_empty(&dep->req_queued)) {
- req = next_request(&dep->req_queued);
+ while (!list_empty(&dep->started_list)) {
+ req = next_request(&dep->started_list);
dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
}
}
- while (!list_empty(&dep->request_list)) {
- req = next_request(&dep->request_list);
+ while (!list_empty(&dep->pending_list)) {
+ req = next_request(&dep->pending_list);
dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
}
@@ -783,20 +815,19 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
chain ? " chain" : "");
- trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+ trb = &dep->trb_pool[dep->trb_enqueue];
if (!req->trb) {
- dwc3_gadget_move_request_queued(req);
+ dwc3_gadget_move_started_request(req);
req->trb = trb;
req->trb_dma = dwc3_trb_dma_offset(dep, trb);
- req->start_slot = dep->free_slot & DWC3_TRB_MASK;
+ req->first_trb_index = dep->trb_enqueue;
}
- dep->free_slot++;
- /* Skip the LINK-TRB on ISOC */
- if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
- usb_endpoint_xfer_isoc(dep->endpoint.desc))
- dep->free_slot++;
+ dwc3_ep_inc_enq(dep);
+ /* Skip the LINK-TRB */
+ if (dwc3_ep_is_last_trb(dep->trb_enqueue))
+ dwc3_ep_inc_enq(dep);
trb->size = DWC3_TRB_SIZE_LENGTH(length);
trb->bpl = lower_32_bits(dma);
@@ -812,6 +843,9 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
else
trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
+
+ /* always enable Interrupt on Missed ISOC */
+ trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
break;
case USB_ENDPOINT_XFER_BULK:
@@ -826,15 +860,14 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
BUG();
}
+ /* always enable Continue on Short Packet */
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
if (!req->request.no_interrupt && !chain)
- trb->ctrl |= DWC3_TRB_CTRL_IOC;
+ trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI;
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
- trb->ctrl |= DWC3_TRB_CTRL_CSP;
- } else if (last) {
+ if (last)
trb->ctrl |= DWC3_TRB_CTRL_LST;
- }
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
@@ -860,55 +893,29 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
{
struct dwc3_request *req, *n;
u32 trbs_left;
- u32 max;
unsigned int last_one = 0;
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
- /* the first request must not be queued */
- trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
-
- /* Can't wrap around on a non-isoc EP since there's no link TRB */
- if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
- if (trbs_left > max)
- trbs_left = max;
- }
+ trbs_left = dep->trb_dequeue - dep->trb_enqueue;
/*
- * If busy & slot are equal than it is either full or empty. If we are
- * starting to process requests then we are empty. Otherwise we are
+ * If enqueue & dequeue are equal than it is either full or empty. If we
+ * are starting to process requests then we are empty. Otherwise we are
* full and don't do anything
*/
if (!trbs_left) {
if (!starting)
return;
+
trbs_left = DWC3_TRB_NUM;
- /*
- * In case we start from scratch, we queue the ISOC requests
- * starting from slot 1. This is done because we use ring
- * buffer and have no LST bit to stop us. Instead, we place
- * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
- * after the first request so we start at slot 1 and have
- * 7 requests proceed before we hit the first IOC.
- * Other transfer types don't use the ring buffer and are
- * processed from the first TRB until the last one. Since we
- * don't wrap around we have to start at the beginning.
- */
- if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- dep->busy_slot = 1;
- dep->free_slot = 1;
- } else {
- dep->busy_slot = 0;
- dep->free_slot = 0;
- }
}
/* The last TRB is a link TRB, not used for xfer */
- if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ if (trbs_left <= 1)
return;
- list_for_each_entry_safe(req, n, &dep->request_list, list) {
+ list_for_each_entry_safe(req, n, &dep->pending_list, list) {
unsigned length;
dma_addr_t dma;
last_one = false;
@@ -927,7 +934,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (i == (request->num_mapped_sgs - 1) ||
sg_is_last(s)) {
- if (list_empty(&dep->request_list))
+ if (list_empty(&dep->pending_list))
last_one = true;
chain = false;
}
@@ -957,7 +964,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
last_one = 1;
/* Is this the last request? */
- if (list_is_last(&req->list, &dep->request_list))
+ if (list_is_last(&req->list, &dep->pending_list))
last_one = 1;
dwc3_prepare_one_trb(dep, req, dma, length,
@@ -988,18 +995,18 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
* new requests as we try to set the IOC bit only on the last request.
*/
if (start_new) {
- if (list_empty(&dep->req_queued))
+ if (list_empty(&dep->started_list))
dwc3_prepare_trbs(dep, start_new);
/* req points to the first request which will be sent */
- req = next_request(&dep->req_queued);
+ req = next_request(&dep->started_list);
} else {
dwc3_prepare_trbs(dep, start_new);
/*
* req points to the first request where HWO changed from 0 to 1
*/
- req = next_request(&dep->req_queued);
+ req = next_request(&dep->started_list);
}
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
@@ -1046,7 +1053,7 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
{
u32 uf;
- if (list_empty(&dep->request_list)) {
+ if (list_empty(&dep->pending_list)) {
dwc3_trace(trace_dwc3_gadget,
"ISOC ep %s run out for requests",
dep->name);
@@ -1114,7 +1121,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
if (ret)
return ret;
- list_add_tail(&req->list, &dep->request_list);
+ list_add_tail(&req->list, &dep->pending_list);
/*
* If there are no pending requests and the endpoint isn't already
@@ -1149,7 +1156,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
* notion of current microframe.
*/
if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
- if (list_empty(&dep->req_queued)) {
+ if (list_empty(&dep->started_list)) {
dwc3_stop_active_transfer(dwc, dep->number, true);
dep->flags = DWC3_EP_ENABLED;
}
@@ -1267,13 +1274,13 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
spin_lock_irqsave(&dwc->lock, flags);
- list_for_each_entry(r, &dep->request_list, list) {
+ list_for_each_entry(r, &dep->pending_list, list) {
if (r == req)
break;
}
if (r != req) {
- list_for_each_entry(r, &dep->req_queued, list) {
+ list_for_each_entry(r, &dep->started_list, list) {
if (r == req)
break;
}
@@ -1313,10 +1320,10 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
if (value) {
if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
- (!list_empty(&dep->req_queued) ||
- !list_empty(&dep->request_list)))) {
+ (!list_empty(&dep->started_list) ||
+ !list_empty(&dep->pending_list)))) {
dwc3_trace(trace_dwc3_gadget,
- "%s: pending request, cannot halt\n",
+ "%s: pending request, cannot halt",
dep->name);
return -EAGAIN;
}
@@ -1329,8 +1336,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
else
dep->flags |= DWC3_EP_STALL;
} else {
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_CLEARSTALL, &params);
+ ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret)
dev_err(dwc->dev, "failed to clear STALL on %s\n",
dep->name);
@@ -1417,22 +1423,16 @@ static int dwc3_gadget_get_frame(struct usb_gadget *g)
return DWC3_DSTS_SOFFN(reg);
}
-static int dwc3_gadget_wakeup(struct usb_gadget *g)
+static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
{
- struct dwc3 *dwc = gadget_to_dwc(g);
-
unsigned long timeout;
- unsigned long flags;
+ int ret;
u32 reg;
- int ret = 0;
-
u8 link_state;
u8 speed;
- spin_lock_irqsave(&dwc->lock, flags);
-
/*
* According to the Databook Remote wakeup request should
* be issued only when the device is in early suspend state.
@@ -1445,8 +1445,7 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
if ((speed == DWC3_DSTS_SUPERSPEED) ||
(speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
link_state = DWC3_DSTS_USBLNKST(reg);
@@ -1459,14 +1458,13 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
dwc3_trace(trace_dwc3_gadget,
"can't wakeup from '%s'\n",
dwc3_gadget_link_string(link_state));
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
if (ret < 0) {
dev_err(dwc->dev, "failed to put link in Recovery\n");
- goto out;
+ return ret;
}
/* Recent versions do this automatically */
@@ -1490,10 +1488,20 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
dev_err(dwc->dev, "failed to send remote wakeup\n");
- ret = -EINVAL;
+ return -EINVAL;
}
-out:
+ return 0;
+}
+
+static int dwc3_gadget_wakeup(struct usb_gadget *g)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ ret = __dwc3_gadget_wakeup(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -1620,7 +1628,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
- IRQF_SHARED, "dwc3", dwc);
+ IRQF_SHARED, "dwc3", dwc->ev_buf);
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
irq, ret);
@@ -1682,6 +1690,17 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ /*
+ * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
+ * field instead of letting dwc3 itself calculate that automatically.
+ *
+ * This way, we maximize the chances that we'll be able to get several
+ * bursts of data without going through any sort of endpoint throttling.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
+ reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
+ dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -1720,7 +1739,7 @@ err2:
err1:
spin_unlock_irqrestore(&dwc->lock, flags);
- free_irq(irq, dwc);
+ free_irq(irq, dwc->ev_buf);
err0:
return ret;
@@ -1743,7 +1762,7 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
spin_unlock_irqrestore(&dwc->lock, flags);
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- free_irq(irq, dwc);
+ free_irq(irq, dwc->ev_buf);
return 0;
}
@@ -1815,8 +1834,8 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->endpoint.caps.dir_in = !!direction;
dep->endpoint.caps.dir_out = !direction;
- INIT_LIST_HEAD(&dep->request_list);
- INIT_LIST_HEAD(&dep->req_queued);
+ INIT_LIST_HEAD(&dep->pending_list);
+ INIT_LIST_HEAD(&dep->started_list);
}
return 0;
@@ -1913,11 +1932,11 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
* If there are still queued request
* then wait, do not issue either END
* or UPDATE TRANSFER, just attach next
- * request in request_list during
+ * request in pending_list during
* giveback.If any future queued request
* is successfully transferred then we
* will issue UPDATE TRANSFER for all
- * request in the request_list.
+ * request in the pending_list.
*/
dep->flags |= DWC3_EP_MISSED_ISOC;
} else {
@@ -1963,15 +1982,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
int ret;
do {
- req = next_request(&dep->req_queued);
+ req = next_request(&dep->started_list);
if (WARN_ON_ONCE(!req))
return 1;
i = 0;
do {
- slot = req->start_slot + i;
- if ((slot == DWC3_TRB_NUM - 1) &&
- usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ slot = req->first_trb_index + i;
+ if (slot == DWC3_TRB_NUM - 1)
slot++;
slot %= DWC3_TRB_NUM;
trb = &dep->trb_pool[slot];
@@ -1989,8 +2007,8 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
} while (1);
if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
- list_empty(&dep->req_queued)) {
- if (list_empty(&dep->request_list)) {
+ list_empty(&dep->started_list)) {
+ if (list_empty(&dep->pending_list)) {
/*
* If there is no entry in request list then do
* not issue END TRANSFER now. Just set PENDING
@@ -2039,7 +2057,7 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
if (!(dep->flags & DWC3_EP_ENABLED))
continue;
- if (!list_empty(&dep->req_queued))
+ if (!list_empty(&dep->started_list))
return;
}
@@ -2250,7 +2268,6 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
struct dwc3_ep *dep;
- struct dwc3_gadget_ep_cmd_params params;
int ret;
dep = dwc->eps[epnum];
@@ -2262,9 +2279,7 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
dep->flags &= ~DWC3_EP_STALL;
- memset(&params, 0, sizeof(params));
- ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
- DWC3_DEPCMD_CLEARSTALL, &params);
+ ret = dwc3_send_clear_stall_ep_cmd(dep);
WARN_ON_ONCE(ret);
}
}
@@ -2686,14 +2701,13 @@ static void dwc3_process_event_entry(struct dwc3 *dwc,
}
}
-static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
+static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
{
- struct dwc3_event_buffer *evt;
+ struct dwc3 *dwc = evt->dwc;
irqreturn_t ret = IRQ_NONE;
int left;
u32 reg;
- evt = dwc->ev_buffs[buf];
left = evt->count;
if (!(evt->flags & DWC3_EVENT_PENDING))
@@ -2718,7 +2732,7 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
left -= 4;
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
}
evt->count = 0;
@@ -2726,39 +2740,34 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
ret = IRQ_HANDLED;
/* Unmask interrupt */
- reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
reg &= ~DWC3_GEVNTSIZ_INTMASK;
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
return ret;
}
-static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
+static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
{
- struct dwc3 *dwc = _dwc;
+ struct dwc3_event_buffer *evt = _evt;
+ struct dwc3 *dwc = evt->dwc;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
- int i;
spin_lock_irqsave(&dwc->lock, flags);
-
- for (i = 0; i < dwc->num_event_buffers; i++)
- ret |= dwc3_process_event_buf(dwc, i);
-
+ ret = dwc3_process_event_buf(evt);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
-static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
+static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
{
- struct dwc3_event_buffer *evt;
+ struct dwc3 *dwc = evt->dwc;
u32 count;
u32 reg;
- evt = dwc->ev_buffs[buf];
-
- count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
+ count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
count &= DWC3_GEVNTCOUNT_MASK;
if (!count)
return IRQ_NONE;
@@ -2767,28 +2776,18 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
evt->flags |= DWC3_EVENT_PENDING;
/* Mask interrupt */
- reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
reg |= DWC3_GEVNTSIZ_INTMASK;
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
return IRQ_WAKE_THREAD;
}
-static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
+static irqreturn_t dwc3_interrupt(int irq, void *_evt)
{
- struct dwc3 *dwc = _dwc;
- int i;
- irqreturn_t ret = IRQ_NONE;
+ struct dwc3_event_buffer *evt = _evt;
- for (i = 0; i < dwc->num_event_buffers; i++) {
- irqreturn_t status;
-
- status = dwc3_check_event_buf(dwc, i);
- if (status == IRQ_WAKE_THREAD)
- ret = status;
- }
-
- return ret;
+ return dwc3_check_event_buf(evt);
}
/**
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 18ae3eaa8..f21c0fccb 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -68,12 +68,12 @@ static inline struct dwc3_request *next_request(struct list_head *list)
return list_first_entry(list, struct dwc3_request, list);
}
-static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
+static inline void dwc3_gadget_move_started_request(struct dwc3_request *req)
{
struct dwc3_ep *dep = req->dep;
- req->queued = true;
- list_move_tail(&req->list, &dep->req_queued);
+ req->started = true;
+ list_move_tail(&req->list, &dep->started_list);
}
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
index 2bb4d3ad0..8826cca5f 100644
--- a/drivers/usb/dwc3/platform_data.h
+++ b/drivers/usb/dwc3/platform_data.h
@@ -23,7 +23,6 @@
struct dwc3_platform_data {
enum usb_device_speed maximum_speed;
enum usb_dr_mode dr_mode;
- bool tx_fifo_resize;
bool usb3_lpm_capable;
unsigned is_utmi_l1_suspend:1;
@@ -43,6 +42,7 @@ struct dwc3_platform_data {
unsigned dis_u3_susphy_quirk:1;
unsigned dis_u2_susphy_quirk:1;
unsigned dis_enblslpm_quirk:1;
+ unsigned dis_rxdet_inp3_quirk:1;
unsigned tx_de_emphasis_quirk:1;
unsigned tx_de_emphasis:2;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index af5d922a8..2057add43 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -15,6 +15,7 @@
menuconfig USB_GADGET
tristate "USB Gadget Support"
+ select USB_COMMON
select NLS
help
USB is a master/slave protocol, organized with one master
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 524e233d4..eb648485a 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -66,20 +66,36 @@ function_descriptors(struct usb_function *f,
{
struct usb_descriptor_header **descriptors;
+ /*
+ * NOTE: we try to help gadget drivers which might not be setting
+ * max_speed appropriately.
+ */
+
switch (speed) {
case USB_SPEED_SUPER_PLUS:
descriptors = f->ssp_descriptors;
- break;
+ if (descriptors)
+ break;
+ /* FALLTHROUGH */
case USB_SPEED_SUPER:
descriptors = f->ss_descriptors;
- break;
+ if (descriptors)
+ break;
+ /* FALLTHROUGH */
case USB_SPEED_HIGH:
descriptors = f->hs_descriptors;
- break;
+ if (descriptors)
+ break;
+ /* FALLTHROUGH */
default:
descriptors = f->fs_descriptors;
}
+ /*
+ * if we can't find any descriptors at all, then this gadget deserves to
+ * Oops with a NULL pointer dereference
+ */
+
return descriptors;
}
@@ -1852,14 +1868,19 @@ unknown:
}
break;
}
- req->length = value;
- req->context = cdev;
- req->zero = value < w_length;
- value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
- if (value < 0) {
- DBG(cdev, "ep_queue --> %d\n", value);
- req->status = 0;
- composite_setup_complete(gadget->ep0, req);
+
+ if (value >= 0) {
+ req->length = value;
+ req->context = cdev;
+ req->zero = value < w_length;
+ value = composite_ep0_queue(cdev, req,
+ GFP_ATOMIC);
+ if (value < 0) {
+ DBG(cdev, "ep_queue --> %d\n", value);
+ req->status = 0;
+ composite_setup_complete(gadget->ep0,
+ req);
+ }
}
return value;
}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index b6f60ca8a..70cf3477f 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1401,6 +1401,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
},
+ .match_existing_only = 1,
};
static struct config_group *gadgets_make(
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 73515d54e..cc33d2667 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2051,7 +2051,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- d->Reserved1)
+ !d->Reserved1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
@@ -2729,6 +2729,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
+ struct ffs_ep *eps_ptr;
/* Make it a single chunk, less management later on */
vla_group(d);
@@ -2777,12 +2778,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
ffs->raw_descs_length);
memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
- for (ret = ffs->eps_count; ret; --ret) {
- struct ffs_ep *ptr;
-
- ptr = vla_ptr(vlabuf, d, eps);
- ptr[ret].num = -1;
- }
+ eps_ptr = vla_ptr(vlabuf, d, eps);
+ for (i = 0; i < ffs->eps_count; i++)
+ eps_ptr[i].num = -1;
/* Save pointers
* d_eps == vlabuf, func->eps used to kfree vlabuf later
@@ -2851,7 +2849,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
goto error;
func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
- if (c->cdev->use_os_string)
+ if (c->cdev->use_os_string) {
for (i = 0; i < ffs->interfaces_count; ++i) {
struct usb_os_desc *desc;
@@ -2862,13 +2860,15 @@ static int _ffs_func_bind(struct usb_configuration *c,
vla_ptr(vlabuf, d, ext_compat) + i * 16;
INIT_LIST_HEAD(&desc->ext_prop);
}
- ret = ffs_do_os_descs(ffs->ms_os_descs_count,
- vla_ptr(vlabuf, d, raw_descs) +
- fs_len + hs_len + ss_len,
- d_raw_descs__sz - fs_len - hs_len - ss_len,
- __ffs_func_bind_do_os_desc, func);
- if (unlikely(ret < 0))
- goto error;
+ ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+ vla_ptr(vlabuf, d, raw_descs) +
+ fs_len + hs_len + ss_len,
+ d_raw_descs__sz - fs_len - hs_len -
+ ss_len,
+ __ffs_func_bind_do_os_desc, func);
+ if (unlikely(ret < 0))
+ goto error;
+ }
func->function.os_desc_n =
c->cdev->use_os_string ? ffs->interfaces_count : 0;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index c45104e3a..64706a789 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -161,14 +161,6 @@ static struct usb_endpoint_descriptor hs_ep_out_desc = {
.wMaxPacketSize = cpu_to_le16(512)
};
-static struct usb_qualifier_descriptor dev_qualifier = {
- .bLength = sizeof(dev_qualifier),
- .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
- .bcdUSB = cpu_to_le16(0x0200),
- .bDeviceClass = USB_CLASS_PRINTER,
- .bNumConfigurations = 1
-};
-
static struct usb_descriptor_header *hs_printer_function[] = {
(struct usb_descriptor_header *) &intf_desc,
(struct usb_descriptor_header *) &hs_ep_in_desc,
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 2ace02954..197f73386 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1290,15 +1290,6 @@ static void usbg_release_cmd(struct se_cmd *se_cmd)
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
-static int usbg_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void usbg_close_session(struct se_session *se_sess)
-{
-}
-
static u32 usbg_sess_get_index(struct se_session *se_sess)
{
return 0;
@@ -1454,16 +1445,18 @@ static void usbg_drop_tpg(struct se_portal_group *se_tpg)
for (i = 0; i < TPG_INSTANCES; ++i)
if (tpg_instances[i].tpg == tpg)
break;
- if (i < TPG_INSTANCES)
+ if (i < TPG_INSTANCES) {
tpg_instances[i].tpg = NULL;
- opts = container_of(tpg_instances[i].func_inst,
- struct f_tcm_opts, func_inst);
- mutex_lock(&opts->dep_lock);
- if (opts->has_dep)
- module_put(opts->dependent);
- else
- configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
- mutex_unlock(&opts->dep_lock);
+ opts = container_of(tpg_instances[i].func_inst,
+ struct f_tcm_opts, func_inst);
+ mutex_lock(&opts->dep_lock);
+ if (opts->has_dep)
+ module_put(opts->dependent);
+ else
+ configfs_undepend_item_unlocked(
+ &opts->func_inst.group.cg_item);
+ mutex_unlock(&opts->dep_lock);
+ }
mutex_unlock(&tpg_instances_lock);
kfree(tpg);
@@ -1735,8 +1728,6 @@ static const struct target_core_fabric_ops usbg_ops = {
.tpg_check_prod_mode_write_protect = usbg_check_false,
.tpg_get_inst_index = usbg_tpg_get_inst_index,
.release_cmd = usbg_release_cmd,
- .shutdown_session = usbg_shutdown_session,
- .close_session = usbg_close_session,
.sess_get_index = usbg_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = usbg_send_write_request,
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 186d4b162..cd214ec8a 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -598,18 +598,6 @@ static struct usb_gadget_strings *fn_strings[] = {
NULL,
};
-static struct usb_qualifier_descriptor devqual_desc = {
- .bLength = sizeof devqual_desc,
- .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
-
- .bcdUSB = cpu_to_le16(0x200),
- .bDeviceClass = USB_CLASS_MISC,
- .bDeviceSubClass = 0x02,
- .bDeviceProtocol = 0x01,
- .bNumConfigurations = 1,
- .bRESERVED = 0,
-};
-
static struct usb_interface_assoc_descriptor iad_desc = {
.bLength = sizeof iad_desc,
.bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
@@ -1292,6 +1280,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
struct cntrl_cur_lay3 c;
+ memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
c.dCUR = p_srate;
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index d62683017..990df221c 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -83,9 +83,7 @@ EXPORT_SYMBOL_GPL(fsg_fs_function);
* USB 2.0 devices need to expose both high speed and full speed
* descriptors, unless they only run at full speed.
*
- * That means alternate endpoint descriptors (bigger packets)
- * and a "device qualifier" ... plus more construction options
- * for the configuration descriptor.
+ * That means alternate endpoint descriptors (bigger packets).
*/
struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 637809e3b..a3f7e7c55 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -597,7 +597,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
DBG(dev, "tx queue err %d\n", retval);
break;
case 0:
- net->trans_start = jiffies;
+ netif_trans_update(net);
atomic_inc(&dev->tx_qlen);
}
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 5a76e4aec..99c486264 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -512,7 +512,7 @@ static void gs_rx_push(unsigned long _port)
req = list_first_entry(queue, struct usb_request, list);
/* leave data queued if tty was rx throttled */
- if (tty && test_bit(TTY_THROTTLED, &tty->flags))
+ if (tty && tty_throttled(tty))
break;
switch (req->status) {
@@ -579,7 +579,7 @@ static void gs_rx_push(unsigned long _port)
* from starving ... but it's not clear that case ever happens.
*/
if (!list_empty(queue) && tty) {
- if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (!tty_throttled(tty)) {
if (do_push)
tasklet_schedule(&port->push);
else
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index d0d18947f..8bc78418d 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -1726,10 +1726,7 @@ static int at91sam9261_udc_init(struct at91_udc *udc)
udc->matrix = syscon_regmap_lookup_by_phandle(udc->pdev->dev.of_node,
"atmel,matrix");
- if (IS_ERR(udc->matrix))
- return PTR_ERR(udc->matrix);
-
- return 0;
+ return PTR_ERR_OR_ZERO(udc->matrix);
}
static void at91sam9261_udc_pullup(struct at91_udc *udc, int is_on)
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 9571ef54b..ebc51ec57 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -325,11 +325,8 @@ struct pch_vbus_gpio_data {
* @pdev: reference to the PCI device
* @ep: array of endpoints
* @lock: protects all state
- * @active: enabled the PCI device
* @stall: stall requested
* @prot_stall: protcol stall requested
- * @irq_registered: irq registered with system
- * @mem_region: device memory mapped
* @registered: driver registered with system
* @suspended: driver in suspended state
* @connected: gadget driver associated
@@ -339,12 +336,8 @@ struct pch_vbus_gpio_data {
* @data_requests: DMA pool for data requests
* @stp_requests: DMA pool for setup requests
* @dma_addr: DMA pool for received
- * @ep0out_buf: Buffer for DMA
* @setup_data: Received setup data
- * @phys_addr: of device memory
* @base_addr: for mapped device memory
- * @bar: Indicates which PCI BAR for USB regs
- * @irq: IRQ line for the device
* @cfg_data: current cfg, intf, and alt in use
* @vbus_gpio: GPIO informaton for detecting VBUS
*/
@@ -354,11 +347,9 @@ struct pch_udc_dev {
struct pci_dev *pdev;
struct pch_udc_ep ep[PCH_UDC_EP_NUM];
spinlock_t lock; /* protects all state */
- unsigned active:1,
+ unsigned
stall:1,
prot_stall:1,
- irq_registered:1,
- mem_region:1,
suspended:1,
connected:1,
vbus_session:1,
@@ -367,12 +358,8 @@ struct pch_udc_dev {
struct pci_pool *data_requests;
struct pci_pool *stp_requests;
dma_addr_t dma_addr;
- void *ep0out_buf;
struct usb_ctrlrequest setup_data;
- unsigned long phys_addr;
void __iomem *base_addr;
- unsigned bar;
- unsigned irq;
struct pch_udc_cfg_data cfg_data;
struct pch_vbus_gpio_data vbus_gpio;
};
@@ -380,8 +367,10 @@ struct pch_udc_dev {
#define PCH_UDC_PCI_BAR_QUARK_X1000 0
#define PCH_UDC_PCI_BAR 1
-#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
+
#define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
+#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
+
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
@@ -1732,14 +1721,12 @@ static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
{
struct pch_udc_ep *ep;
- struct pch_udc_dev *dev;
unsigned long iflags;
if (!usbep)
return -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
- dev = ep->dev;
if ((usbep->name == ep0_string) || !ep->ep.desc)
return -EINVAL;
@@ -1770,12 +1757,10 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
struct pch_udc_request *req;
struct pch_udc_ep *ep;
struct pch_udc_data_dma_desc *dma_desc;
- struct pch_udc_dev *dev;
if (!usbep)
return NULL;
ep = container_of(usbep, struct pch_udc_ep, ep);
- dev = ep->dev;
req = kzalloc(sizeof *req, gfp);
if (!req)
return NULL;
@@ -1948,12 +1933,10 @@ static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
{
struct pch_udc_ep *ep;
struct pch_udc_request *req;
- struct pch_udc_dev *dev;
unsigned long flags;
int ret = -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
- dev = ep->dev;
if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
return ret;
req = container_of(usbreq, struct pch_udc_request, req);
@@ -1985,14 +1968,12 @@ static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
{
struct pch_udc_ep *ep;
- struct pch_udc_dev *dev;
unsigned long iflags;
int ret;
if (!usbep)
return -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
- dev = ep->dev;
if (!ep->ep.desc && !ep->num)
return -EINVAL;
if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
@@ -2030,14 +2011,12 @@ static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
{
struct pch_udc_ep *ep;
- struct pch_udc_dev *dev;
unsigned long iflags;
int ret;
if (!usbep)
return -EINVAL;
ep = container_of(usbep, struct pch_udc_ep, ep);
- dev = ep->dev;
if (!ep->ep.desc && !ep->num)
return -EINVAL;
if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
@@ -2647,7 +2626,7 @@ static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
{
u32 reg, dev_stat = 0;
- int i, ret;
+ int i;
dev_stat = pch_udc_read_device_status(dev);
dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
@@ -2676,7 +2655,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
}
dev->stall = 0;
spin_lock(&dev->lock);
- ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+ dev->driver->setup(&dev->gadget, &dev->setup_data);
spin_unlock(&dev->lock);
}
@@ -2687,7 +2666,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
*/
static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
{
- int i, ret;
+ int i;
u32 reg, dev_stat = 0;
dev_stat = pch_udc_read_device_status(dev);
@@ -2713,7 +2692,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
/* call gadget zero with setup data received */
spin_lock(&dev->lock);
- ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+ dev->driver->setup(&dev->gadget, &dev->setup_data);
spin_unlock(&dev->lock);
}
@@ -2856,17 +2835,6 @@ static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
}
/**
- * gadget_release() - Free the gadget driver private data
- * @pdev reference to struct pci_dev
- */
-static void gadget_release(struct device *pdev)
-{
- struct pch_udc_dev *dev = dev_get_drvdata(pdev);
-
- kfree(dev);
-}
-
-/**
* pch_udc_pcd_reinit() - This API initializes the endpoint structures
* @dev: Reference to the driver structure
*/
@@ -2949,6 +2917,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
{
struct pch_udc_stp_dma_desc *td_stp;
struct pch_udc_data_dma_desc *td_data;
+ void *ep0out_buf;
/* DMA setup */
dev->data_requests = pci_pool_create("data_requests", dev->pdev,
@@ -2991,10 +2960,11 @@ static int init_dma_pools(struct pch_udc_dev *dev)
dev->ep[UDC_EP0IN_IDX].td_data = NULL;
dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
- dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
- if (!dev->ep0out_buf)
+ ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
+ GFP_KERNEL);
+ if (!ep0out_buf)
return -ENOMEM;
- dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
+ dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
UDC_EP0OUT_BUFF_SIZE * 4,
DMA_FROM_DEVICE);
return 0;
@@ -3078,129 +3048,80 @@ static void pch_udc_remove(struct pci_dev *pdev)
if (dev->dma_addr)
dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
- kfree(dev->ep0out_buf);
pch_vbus_gpio_free(dev);
pch_udc_exit(dev);
-
- if (dev->irq_registered)
- free_irq(pdev->irq, dev);
- if (dev->base_addr)
- iounmap(dev->base_addr);
- if (dev->mem_region)
- release_mem_region(dev->phys_addr,
- pci_resource_len(pdev, dev->bar));
- if (dev->active)
- pci_disable_device(pdev);
- kfree(dev);
}
-#ifdef CONFIG_PM
-static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pch_udc_suspend(struct device *d)
{
+ struct pci_dev *pdev = to_pci_dev(d);
struct pch_udc_dev *dev = pci_get_drvdata(pdev);
pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
- pci_disable_device(pdev);
- pci_enable_wake(pdev, PCI_D3hot, 0);
-
- if (pci_save_state(pdev)) {
- dev_err(&pdev->dev,
- "%s: could not save PCI config state\n", __func__);
- return -ENOMEM;
- }
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
-static int pch_udc_resume(struct pci_dev *pdev)
+static int pch_udc_resume(struct device *d)
{
- int ret;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret) {
- dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
- return ret;
- }
- pci_enable_wake(pdev, PCI_D3hot, 0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
+#define PCH_UDC_PM_OPS (&pch_udc_pm)
#else
-#define pch_udc_suspend NULL
-#define pch_udc_resume NULL
-#endif /* CONFIG_PM */
+#define PCH_UDC_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
static int pch_udc_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- unsigned long resource;
- unsigned long len;
+ int bar;
int retval;
struct pch_udc_dev *dev;
/* init */
- dev = kzalloc(sizeof *dev, GFP_KERNEL);
- if (!dev) {
- pr_err("%s: no memory for device structure\n", __func__);
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
- }
+
/* pci setup */
- if (pci_enable_device(pdev) < 0) {
- kfree(dev);
- pr_err("%s: pci_enable_device failed\n", __func__);
- return -ENODEV;
- }
- dev->active = 1;
+ retval = pcim_enable_device(pdev);
+ if (retval)
+ return retval;
+
pci_set_drvdata(pdev, dev);
/* Determine BAR based on PCI ID */
if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
- dev->bar = PCH_UDC_PCI_BAR_QUARK_X1000;
+ bar = PCH_UDC_PCI_BAR_QUARK_X1000;
else
- dev->bar = PCH_UDC_PCI_BAR;
+ bar = PCH_UDC_PCI_BAR;
/* PCI resource allocation */
- resource = pci_resource_start(pdev, dev->bar);
- len = pci_resource_len(pdev, dev->bar);
+ retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
+ if (retval)
+ return retval;
- if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
- dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
- retval = -EBUSY;
- goto finished;
- }
- dev->phys_addr = resource;
- dev->mem_region = 1;
+ dev->base_addr = pcim_iomap_table(pdev)[bar];
- dev->base_addr = ioremap_nocache(resource, len);
- if (!dev->base_addr) {
- pr_err("%s: device memory cannot be mapped\n", __func__);
- retval = -ENOMEM;
- goto finished;
- }
- if (!pdev->irq) {
- dev_err(&pdev->dev, "%s: irq not set\n", __func__);
- retval = -ENODEV;
- goto finished;
- }
/* initialize the hardware */
- if (pch_udc_pcd_init(dev)) {
- retval = -ENODEV;
- goto finished;
- }
- if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
- dev)) {
+ if (pch_udc_pcd_init(dev))
+ return -ENODEV;
+
+ pci_enable_msi(pdev);
+
+ retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (retval) {
dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
pdev->irq);
- retval = -ENODEV;
goto finished;
}
- dev->irq = pdev->irq;
- dev->irq_registered = 1;
pci_set_master(pdev);
pci_try_set_mwi(pdev);
@@ -3219,8 +3140,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
/* Put the device in disconnected state till a driver is bound */
pch_udc_set_disconnect(dev);
- retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
- gadget_release);
+ retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
if (retval)
goto finished;
return 0;
@@ -3262,9 +3182,10 @@ static struct pci_driver pch_udc_driver = {
.id_table = pch_udc_pcidev_id,
.probe = pch_udc_probe,
.remove = pch_udc_remove,
- .suspend = pch_udc_suspend,
- .resume = pch_udc_resume,
.shutdown = pch_udc_shutdown,
+ .driver = {
+ .pm = PCH_UDC_PM_OPS,
+ },
};
module_pci_driver(pch_udc_driver);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index baa0609a4..8b300e6da 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -296,7 +296,7 @@ static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
} while ((tmp & mask) != loop);
}
-static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
+static void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
{
struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index c6e764650..e1b2dcebd 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -61,11 +61,9 @@ static int udc_bind_to_driver(struct usb_udc *udc,
#ifdef CONFIG_HAS_DMA
-int usb_gadget_map_request(struct usb_gadget *gadget,
+int usb_gadget_map_request_by_dev(struct device *dev,
struct usb_request *req, int is_in)
{
- struct device *dev = gadget->dev.parent;
-
if (req->length == 0)
return 0;
@@ -92,24 +90,38 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
return 0;
}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
+
+int usb_gadget_map_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in)
+{
+ return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
+}
EXPORT_SYMBOL_GPL(usb_gadget_map_request);
-void usb_gadget_unmap_request(struct usb_gadget *gadget,
+void usb_gadget_unmap_request_by_dev(struct device *dev,
struct usb_request *req, int is_in)
{
if (req->length == 0)
return;
if (req->num_mapped_sgs) {
- dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
+ dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->num_mapped_sgs = 0;
} else {
- dma_unmap_single(gadget->dev.parent, req->dma, req->length,
+ dma_unmap_single(dev, req->dma, req->length,
is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
+
+void usb_gadget_unmap_request(struct usb_gadget *gadget,
+ struct usb_request *req, int is_in)
+{
+ usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
+}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
#endif /* CONFIG_HAS_DMA */
@@ -591,11 +603,15 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
}
}
- list_add_tail(&driver->pending, &gadget_driver_pending_list);
- pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
- driver->function);
+ if (!driver->match_existing_only) {
+ list_add_tail(&driver->pending, &gadget_driver_pending_list);
+ pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+ driver->function);
+ ret = 0;
+ }
+
mutex_unlock(&udc_lock);
- return 0;
+ return ret;
found:
ret = udc_bind_to_driver(udc, driver);
mutex_unlock(&udc_lock);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index e9d4dde3e..d8f567480 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -70,6 +70,15 @@ config USB_XHCI_RCAR
Say 'Y' to enable the support for the xHCI host controller
found in Renesas R-Car ARM SoCs.
+config USB_XHCI_TEGRA
+ tristate "xHCI support for NVIDIA Tegra SoCs"
+ depends on PHY_TEGRA_XUSB
+ depends on RESET_CONTROLLER
+ select FW_LOADER
+ ---help---
+ Say 'Y' to enable the support for the xHCI host controller
+ found in NVIDIA Tegra124 and later SoCs.
+
endif # USB_XHCI_HCD
config USB_EHCI_HCD
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index a9ddd3c9e..6ef785b0e 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_USB_XHCI_HCD) += xhci-hcd.o
obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_XHCI_MTK) += xhci-mtk.o
+obj-$(CONFIG_USB_XHCI_TEGRA) += xhci-tegra.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
obj-$(CONFIG_USB_SL811_CS) += sl811_cs.o
obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 963e2d0e8..172ef1791 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -352,10 +352,8 @@ static int bcma_hcd_probe(struct bcma_device *core)
usb_dev->core = core;
if (core->dev.of_node)
- usb_dev->gpio_desc = devm_get_gpiod_from_child(&core->dev, "vcc",
- &core->dev.of_node->fwnode);
- if (!IS_ERR_OR_NULL(usb_dev->gpio_desc))
- gpiod_direction_output(usb_dev->gpio_desc, 1);
+ usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
+ GPIOD_OUT_HIGH);
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 79d12b2ba..1a2614aae 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -52,13 +52,6 @@ static void dbg_hcs_params(struct ehci_hcd *ehci, char *label)
ehci_dbg(ehci, "%s portroute %s\n", label, buf);
}
}
-#else
-
-static inline void dbg_hcs_params(struct ehci_hcd *ehci, char *label) {}
-
-#endif
-
-#ifdef CONFIG_DYNAMIC_DEBUG
/*
* check the values in the HCCPARAMS register
@@ -92,13 +85,6 @@ static void dbg_hcc_params(struct ehci_hcd *ehci, char *label)
" 32 periodic list" : "");
}
}
-#else
-
-static inline void dbg_hcc_params(struct ehci_hcd *ehci, char *label) {}
-
-#endif
-
-#ifdef CONFIG_DYNAMIC_DEBUG
static void __maybe_unused
dbg_qtd(const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
@@ -281,37 +267,6 @@ dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
(status & PORT_CONNECT) ? " CONNECT" : "");
}
-#else
-static inline void __maybe_unused
-dbg_qh(char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
-{}
-
-static inline int __maybe_unused
-dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
-{
- return 0;
-}
-
-static inline int __maybe_unused
-dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
-{
- return 0;
-}
-
-static inline int __maybe_unused
-dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
-{
- return 0;
-}
-
-static inline int __maybe_unused
-dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
-{
- return 0;
-}
-
-#endif /* CONFIG_DYNAMIC_DEBUG */
-
static inline void
dbg_status(struct ehci_hcd *ehci, const char *label, u32 status)
{
@@ -341,13 +296,6 @@ dbg_port(struct ehci_hcd *ehci, const char *label, int port, u32 status)
/*-------------------------------------------------------------------------*/
-#ifndef CONFIG_DYNAMIC_DEBUG
-
-static inline void create_debug_files(struct ehci_hcd *bus) { }
-static inline void remove_debug_files(struct ehci_hcd *bus) { }
-
-#else
-
/* troubleshooting help: expose state in debugfs */
static int debug_async_open(struct inode *, struct file *);
@@ -1120,4 +1068,38 @@ static inline void remove_debug_files(struct ehci_hcd *ehci)
debugfs_remove_recursive(ehci->debug_dir);
}
+#else /* CONFIG_DYNAMIC_DEBUG */
+
+static inline void dbg_hcs_params(struct ehci_hcd *ehci, char *label) { }
+static inline void dbg_hcc_params(struct ehci_hcd *ehci, char *label) { }
+
+static inline void __maybe_unused dbg_qh(const char *label,
+ struct ehci_hcd *ehci, struct ehci_qh *qh) { }
+
+static inline int __maybe_unused dbg_status_buf(const char *buf,
+ unsigned int len, const char *label, u32 status)
+{ return 0; }
+
+static inline int __maybe_unused dbg_command_buf(const char *buf,
+ unsigned int len, const char *label, u32 command)
+{ return 0; }
+
+static inline int __maybe_unused dbg_intr_buf(const char *buf,
+ unsigned int len, const char *label, u32 enable)
+{ return 0; }
+
+static inline int __maybe_unused dbg_port_buf(char *buf,
+ unsigned int len, const char *label, int port, u32 status)
+{ return 0; }
+
+static inline void dbg_status(struct ehci_hcd *ehci, const char *label,
+ u32 status) { }
+static inline void dbg_cmd(struct ehci_hcd *ehci, const char *label,
+ u32 command) { }
+static inline void dbg_port(struct ehci_hcd *ehci, const char *label,
+ int port, u32 status) { }
+
+static inline void create_debug_files(struct ehci_hcd *bus) { }
+static inline void remove_debug_files(struct ehci_hcd *bus) { }
+
#endif /* CONFIG_DYNAMIC_DEBUG */
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index df538fd10..42e5b6635 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -321,7 +321,7 @@ static struct platform_driver exynos_ehci_driver = {
.of_match_table = of_match_ptr(exynos_ehci_match),
}
};
-static const struct ehci_driver_overrides exynos_overrides __initdata = {
+static const struct ehci_driver_overrides exynos_overrides __initconst = {
.extra_priv_size = sizeof(struct exynos_ehci_hcd),
};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index ae1b6e69e..a962b89b6 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ /**
+ * Protect the system from crashing at system shutdown in cases where
+ * usb host is not added yet from OTG controller driver.
+ * As ehci_setup() not done yet, so stop accessing registers or
+ * variables initialized in ehci_setup()
+ */
+ if (!ehci->sbrn)
+ return;
+
spin_lock_irq(&ehci->lock);
ehci->shutdown = true;
ehci->rh_state = EHCI_RH_STOPPING;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index ffc90295a..74f62d68f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -872,15 +872,23 @@ int ehci_hub_control(
) {
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int ports = HCS_N_PORTS (ehci->hcs_params);
- u32 __iomem *status_reg = &ehci->regs->port_status[
- (wIndex & 0xff) - 1];
- u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
+ u32 __iomem *status_reg, *hostpc_reg;
u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
/*
+ * Avoid underflow while calculating (wIndex & 0xff) - 1.
+ * The compiler might deduce that wIndex can never be 0 and then
+ * optimize away the tests for !wIndex below.
+ */
+ temp = wIndex & 0xff;
+ temp -= (temp > 0);
+ status_reg = &ehci->regs->port_status[temp];
+ hostpc_reg = &ehci->regs->hostpc[temp];
+
+ /*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 3e226ef6c..2f8d3af81 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev)
static int ehci_msm_pm_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
bool do_wakeup = device_may_wakeup(dev);
dev_dbg(dev, "ehci-msm PM suspend\n");
- return ehci_suspend(hcd, do_wakeup);
+ /* Only call ehci_suspend if ehci_setup has been done */
+ if (ehci->sbrn)
+ return ehci_suspend(hcd, do_wakeup);
+
+ return 0;
}
static int ehci_msm_pm_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
dev_dbg(dev, "ehci-msm PM resume\n");
- ehci_resume(hcd, false);
+
+ /* Only call ehci_resume if ehci_setup has been done */
+ if (ehci->sbrn)
+ ehci_resume(hcd, false);
return 0;
}
+
#else
#define ehci_msm_pm_suspend NULL
#define ehci_msm_pm_resume NULL
@@ -229,7 +239,7 @@ static struct platform_driver ehci_msm_driver = {
},
};
-static const struct ehci_driver_overrides msm_overrides __initdata = {
+static const struct ehci_driver_overrides msm_overrides __initconst = {
.reset = ehci_msm_reset,
};
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a24720beb..94ea9fff1 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -86,7 +86,7 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
static struct hc_driver __read_mostly ehci_omap_hc_driver;
-static const struct ehci_driver_overrides ehci_omap_overrides __initdata = {
+static const struct ehci_driver_overrides ehci_omap_overrides __initconst = {
.extra_priv_size = sizeof(struct omap_hcd),
};
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 3c4e52539..1f25c7985 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -163,7 +163,7 @@ static struct platform_driver spear_ehci_hcd_driver = {
}
};
-static const struct ehci_driver_overrides spear_overrides __initdata = {
+static const struct ehci_driver_overrides spear_overrides __initconst = {
.extra_priv_size = sizeof(struct spear_ehci),
};
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index a94ed677d..be4a2788f 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -206,7 +206,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
priv->clk48 = NULL;
}
- priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+ priv->pwr =
+ devm_reset_control_get_optional_shared(&dev->dev, "power");
if (IS_ERR(priv->pwr)) {
err = PTR_ERR(priv->pwr);
if (err == -EPROBE_DEFER)
@@ -214,7 +215,8 @@ static int st_ehci_platform_probe(struct platform_device *dev)
priv->pwr = NULL;
}
- priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+ priv->rst =
+ devm_reset_control_get_optional_shared(&dev->dev, "softreset");
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
if (err == -EPROBE_DEFER)
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index c1c1024a0..9a3d7db5b 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct tegra_ehci_hcd *tegra =
(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+ bool has_utmi_pad_registers = false;
phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
if (!phy_np)
return -ENOENT;
+ if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
+ has_utmi_pad_registers = true;
+
if (!usb1_reset_attempted) {
struct reset_control *usb1_reset;
- usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
+ if (!has_utmi_pad_registers)
+ usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
+ else
+ usb1_reset = tegra->rst;
+
if (IS_ERR(usb1_reset)) {
dev_warn(&pdev->dev,
"can't get utmi-pads reset from the PHY\n");
@@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
reset_control_assert(usb1_reset);
udelay(1);
reset_control_deassert(usb1_reset);
+
+ if (!has_utmi_pad_registers)
+ reset_control_put(usb1_reset);
}
- reset_control_put(usb1_reset);
usb1_reset_attempted = true;
}
- if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) {
+ if (!has_utmi_pad_registers) {
reset_control_assert(tegra->rst);
udelay(1);
reset_control_deassert(tegra->rst);
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c
index a9609a336..2f162faab 100644
--- a/drivers/usb/host/fhci-sched.c
+++ b/drivers/usb/host/fhci-sched.c
@@ -288,7 +288,7 @@ static int scan_ed_list(struct fhci_usb *usb,
list_for_each_entry(ed, list, node) {
td = ed->td_head;
- if (!td || (td && td->status == USB_TD_INPROGRESS))
+ if (!td || td->status == USB_TD_INPROGRESS)
continue;
if (ed->state != FHCI_ED_OPER) {
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 360a5e95a..66efa9a67 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -4795,14 +4795,8 @@ static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max,
static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
{
struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
- int i = 0;
- if (i)
- goto out;
-
- i = device_create_file(controller, &dev_attr_uframe_periodic_max);
-out:
- return i;
+ return device_create_file(controller, &dev_attr_uframe_periodic_max);
}
static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 04dcedfde..0449235d4 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1245,11 +1245,6 @@ MODULE_LICENSE ("GPL");
#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
#endif
-#ifdef CONFIG_MACH_JZ4740
-#include "ohci-jz4740.c"
-#define PLATFORM_DRIVER ohci_hcd_jz4740_driver
-#endif
-
#ifdef CONFIG_TILE_USB
#include "ohci-tilegx.c"
#define PLATFORM_DRIVER ohci_hcd_tilegx_driver
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
deleted file mode 100644
index 4db78f169..000000000
--- a/drivers/usb/host/ohci-jz4740.c
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-
-struct jz4740_ohci_hcd {
- struct ohci_hcd ohci_hcd;
-
- struct regulator *vbus;
- bool vbus_enabled;
- struct clk *clk;
-};
-
-static inline struct jz4740_ohci_hcd *hcd_to_jz4740_hcd(struct usb_hcd *hcd)
-{
- return (struct jz4740_ohci_hcd *)(hcd->hcd_priv);
-}
-
-static inline struct usb_hcd *jz4740_hcd_to_hcd(struct jz4740_ohci_hcd *jz4740_ohci)
-{
- return container_of((void *)jz4740_ohci, struct usb_hcd, hcd_priv);
-}
-
-static int ohci_jz4740_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- ret = ohci_init(ohci);
- if (ret < 0)
- return ret;
-
- ohci->num_ports = 1;
-
- ret = ohci_run(ohci);
- if (ret < 0) {
- dev_err(hcd->self.controller, "Can not start %s",
- hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
- return 0;
-}
-
-static int ohci_jz4740_set_vbus_power(struct jz4740_ohci_hcd *jz4740_ohci,
- bool enabled)
-{
- int ret = 0;
-
- if (!jz4740_ohci->vbus)
- return 0;
-
- if (enabled && !jz4740_ohci->vbus_enabled) {
- ret = regulator_enable(jz4740_ohci->vbus);
- if (ret)
- dev_err(jz4740_hcd_to_hcd(jz4740_ohci)->self.controller,
- "Could not power vbus\n");
- } else if (!enabled && jz4740_ohci->vbus_enabled) {
- ret = regulator_disable(jz4740_ohci->vbus);
- }
-
- if (ret == 0)
- jz4740_ohci->vbus_enabled = enabled;
-
- return ret;
-}
-
-static int ohci_jz4740_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
- u16 wIndex, char *buf, u16 wLength)
-{
- struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
- int ret = 0;
-
- switch (typeReq) {
- case SetPortFeature:
- if (wValue == USB_PORT_FEAT_POWER)
- ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true);
- break;
- case ClearPortFeature:
- if (wValue == USB_PORT_FEAT_POWER)
- ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false);
- break;
- }
-
- if (ret)
- return ret;
-
- return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
-}
-
-
-static const struct hc_driver ohci_jz4740_hc_driver = {
- .description = hcd_name,
- .product_desc = "JZ4740 OHCI",
- .hcd_priv_size = sizeof(struct jz4740_ohci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /*
- * basic lifecycle operations
- */
- .start = ohci_jz4740_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /*
- * scheduling support
- */
- .get_frame_number = ohci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_jz4740_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-
-static int jz4740_ohci_probe(struct platform_device *pdev)
-{
- int ret;
- struct usb_hcd *hcd;
- struct jz4740_ohci_hcd *jz4740_ohci;
- struct resource *res;
- int irq;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get platform irq\n");
- return irq;
- }
-
- hcd = usb_create_hcd(&ohci_jz4740_hc_driver, &pdev->dev, "jz4740");
- if (!hcd) {
- dev_err(&pdev->dev, "Failed to create hcd.\n");
- return -ENOMEM;
- }
-
- jz4740_ohci = hcd_to_jz4740_hcd(hcd);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- ret = PTR_ERR(hcd->regs);
- goto err_free;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- jz4740_ohci->clk = devm_clk_get(&pdev->dev, "uhc");
- if (IS_ERR(jz4740_ohci->clk)) {
- ret = PTR_ERR(jz4740_ohci->clk);
- dev_err(&pdev->dev, "Failed to get clock: %d\n", ret);
- goto err_free;
- }
-
- jz4740_ohci->vbus = devm_regulator_get(&pdev->dev, "vbus");
- if (IS_ERR(jz4740_ohci->vbus))
- jz4740_ohci->vbus = NULL;
-
-
- clk_set_rate(jz4740_ohci->clk, 48000000);
- clk_enable(jz4740_ohci->clk);
- if (jz4740_ohci->vbus)
- ohci_jz4740_set_vbus_power(jz4740_ohci, true);
-
- platform_set_drvdata(pdev, hcd);
-
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- ret = usb_add_hcd(hcd, irq, 0);
- if (ret) {
- dev_err(&pdev->dev, "Failed to add hcd: %d\n", ret);
- goto err_disable;
- }
- device_wakeup_enable(hcd->self.controller);
-
- return 0;
-
-err_disable:
- if (jz4740_ohci->vbus)
- regulator_disable(jz4740_ohci->vbus);
- clk_disable(jz4740_ohci->clk);
-
-err_free:
- usb_put_hcd(hcd);
-
- return ret;
-}
-
-static int jz4740_ohci_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd);
-
- usb_remove_hcd(hcd);
-
- if (jz4740_ohci->vbus)
- regulator_disable(jz4740_ohci->vbus);
-
- clk_disable(jz4740_ohci->clk);
-
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static struct platform_driver ohci_hcd_jz4740_driver = {
- .probe = jz4740_ohci_probe,
- .remove = jz4740_ohci_remove,
- .driver = {
- .name = "jz4740-ohci",
- },
-};
-
-MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index d029bbe9e..641fed609 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
{
int branch;
- ed->state = ED_OPER;
ed->ed_prev = NULL;
ed->ed_next = NULL;
ed->hwNextED = 0;
@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
/* the HC may not see the schedule updates yet, but if it does
* then they'll be properly ordered.
*/
+
+ ed->state = ED_OPER;
return 0;
}
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index acf2eb2a5..02816a151 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -188,13 +188,15 @@ static int st_ohci_platform_probe(struct platform_device *dev)
priv->clk48 = NULL;
}
- priv->pwr = devm_reset_control_get_optional(&dev->dev, "power");
+ priv->pwr =
+ devm_reset_control_get_optional_shared(&dev->dev, "power");
if (IS_ERR(priv->pwr)) {
err = PTR_ERR(priv->pwr);
goto err_put_clks;
}
- priv->rst = devm_reset_control_get_optional(&dev->dev, "softreset");
+ priv->rst =
+ devm_reset_control_get_optional_shared(&dev->dev, "softreset");
if (IS_ERR(priv->rst)) {
err = PTR_ERR(priv->rst);
goto err_put_clks;
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 43626c446..5b3603c36 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -257,14 +257,14 @@ static int whc_probe(struct umc_dev *umc)
ret = whc_init(whc);
if (ret)
- goto error;
+ goto error_whc_init;
wusbhc->dev = dev;
wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
if (!wusbhc->uwb_rc) {
ret = -ENODEV;
dev_err(dev, "cannot get radio controller\n");
- goto error;
+ goto error_uwb_rc;
}
if (whc->n_devices > USB_MAXCHILDREN) {
@@ -311,8 +311,9 @@ error_usb_add_hcd:
wusbhc_destroy(wusbhc);
error_wusbhc_create:
uwb_rc_put(wusbhc->uwb_rc);
-error:
+error_uwb_rc:
whc_clean_up(whc);
+error_whc_init:
usb_put_hcd(usb_hcd);
return ret;
}
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 1a8e960d0..c0e681242 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -314,7 +314,7 @@ void qset_free_std(struct whc *whc, struct whc_std *std)
kfree(std->bounce_buf);
}
if (std->pl_virt) {
- if (std->dma_addr)
+ if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
std->num_pointers * sizeof(struct whc_page_list_entry),
DMA_TO_DEVICE);
@@ -535,9 +535,11 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
list_for_each_entry(std, &qset->stds, list_node) {
if (std->ntds_remaining == -1) {
pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
- std->ntds_remaining = ntds--;
std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
pl_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
+ return -EFAULT;
+ std->ntds_remaining = ntds--;
}
}
return 0;
@@ -618,6 +620,8 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (dma_mapping_error(&whc->umc->dev, std->dma_addr))
+ return -EFAULT;
if (qset_fill_page_list(whc, std, mem_flags) < 0)
return -ENOMEM;
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 1eefc9881..85908a3ec 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -12,6 +12,9 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
#include "xhci-mvebu.h"
#define USB3_MAX_WINDOWS 4
@@ -41,8 +44,10 @@ static void xhci_mvebu_mbus_config(void __iomem *base,
}
}
-int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev)
+int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
{
+ struct device *dev = hcd->self.controller;
+ struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
void __iomem *base;
const struct mbus_dram_target_info *dram;
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 7ede92aa4..301fc984c 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -10,10 +10,13 @@
#ifndef __LINUX_XHCI_MVEBU_H
#define __LINUX_XHCI_MVEBU_H
+
+struct usb_hcd;
+
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
-int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev);
+int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
#else
-static inline int xhci_mvebu_mbus_init_quirk(struct platform_device *pdev)
+static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
{
return 0;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index d6e2b2751..1f3f981fe 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -37,27 +37,32 @@ static const struct xhci_driver_overrides xhci_plat_overrides __initconst = {
.start = xhci_plat_start,
};
-static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
+static void xhci_priv_plat_start(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (priv->plat_start)
+ priv->plat_start(hcd);
+}
+
+static int xhci_priv_init_quirk(struct usb_hcd *hcd)
{
- struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (!priv->init_quirk)
+ return 0;
+ return priv->init_quirk(hcd);
+}
+
+static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
/*
* As of now platform drivers don't provide MSI support so we ensure
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
xhci->quirks |= XHCI_PLAT;
-
- /*
- * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
- * to 1. However, these SoCs don't support 64-bit address memory
- * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
- * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
- * xhci_gen_setup().
- */
- if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
- xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
}
/* called during probe() after chip reset completes */
@@ -65,38 +70,35 @@ static int xhci_plat_setup(struct usb_hcd *hcd)
{
int ret;
- if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
- xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3)) {
- ret = xhci_rcar_init_quirk(hcd);
- if (ret)
- return ret;
- }
+
+ ret = xhci_priv_init_quirk(hcd);
+ if (ret)
+ return ret;
return xhci_gen_setup(hcd, xhci_plat_quirks);
}
static int xhci_plat_start(struct usb_hcd *hcd)
{
- if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2) ||
- xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3))
- xhci_rcar_start(hcd);
-
+ xhci_priv_plat_start(hcd);
return xhci_run(hcd);
}
#ifdef CONFIG_OF
static const struct xhci_plat_priv xhci_plat_marvell_armada = {
- .type = XHCI_PLAT_TYPE_MARVELL_ARMADA,
+ .init_quirk = xhci_mvebu_mbus_init_quirk,
};
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
- .type = XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V1,
+ .init_quirk = xhci_rcar_init_quirk,
+ .plat_start = xhci_rcar_start,
};
static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
- .type = XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
.firmware_name = XHCI_RCAR_FIRMWARE_NAME_V2,
+ .init_quirk = xhci_rcar_init_quirk,
+ .plat_start = xhci_rcar_start,
};
static const struct of_device_id usb_xhci_of_match[] = {
@@ -210,12 +212,6 @@ static int xhci_plat_probe(struct platform_device *pdev)
*priv = *priv_match;
}
- if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) {
- ret = xhci_mvebu_mbus_init_quirk(pdev);
- if (ret)
- goto disable_clk;
- }
-
device_wakeup_enable(hcd->self.controller);
xhci->clk = clk;
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 529c3c40f..9af0cb480 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -13,27 +13,11 @@
#include "xhci.h" /* for hcd_to_xhci() */
-enum xhci_plat_type {
- XHCI_PLAT_TYPE_MARVELL_ARMADA = 1,
- XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2,
- XHCI_PLAT_TYPE_RENESAS_RCAR_GEN3,
-};
-
struct xhci_plat_priv {
- enum xhci_plat_type type;
const char *firmware_name;
+ void (*plat_start)(struct usb_hcd *);
+ int (*init_quirk)(struct usb_hcd *);
};
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
-
-static inline bool xhci_plat_type_is(struct usb_hcd *hcd,
- enum xhci_plat_type type)
-{
- struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
-
- if (priv && priv->type == type)
- return true;
- else
- return false;
-}
#endif /* _XHCI_PLAT_H */
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 6ea42ebe8..33534b401 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -11,6 +11,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/usb/phy.h>
#include "xhci.h"
@@ -75,6 +76,24 @@ static void xhci_rcar_start_gen2(struct usb_hcd *hcd)
writel(RCAR_USB3_TX_POL_VAL, hcd->regs + RCAR_USB3_TX_POL);
}
+static int xhci_rcar_is_gen2(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+
+ return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
+ of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
+ of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
+ of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
+}
+
+static int xhci_rcar_is_gen3(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+
+ return of_device_is_compatible(node, "renesas,xhci-r8a7795") ||
+ of_device_is_compatible(node, "renesas,rcar-gen3-xhci");
+}
+
void xhci_rcar_start(struct usb_hcd *hcd)
{
u32 temp;
@@ -84,7 +103,7 @@ void xhci_rcar_start(struct usb_hcd *hcd)
temp = readl(hcd->regs + RCAR_USB3_INT_ENA);
temp |= RCAR_USB3_INT_ENA_VAL;
writel(temp, hcd->regs + RCAR_USB3_INT_ENA);
- if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_RENESAS_RCAR_GEN2))
+ if (xhci_rcar_is_gen2(hcd->self.controller))
xhci_rcar_start_gen2(hcd);
}
}
@@ -155,9 +174,22 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd)
/* This function needs to initialize a "phy" of usb before */
int xhci_rcar_init_quirk(struct usb_hcd *hcd)
{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
/* If hcd->regs is NULL, we don't just call the following function */
if (!hcd->regs)
return 0;
+ /*
+ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+ * to 1. However, these SoCs don't support 64-bit address memory
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup().
+ */
+ if (xhci_rcar_is_gen2(hcd->self.controller) ||
+ xhci_rcar_is_gen3(hcd->self.controller))
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+
return xhci_rcar_download_firmware(hcd);
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 8b5b2aca2..d7d502578 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -382,7 +382,11 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
}
}
-static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+/* Get the right ring for the given slot_id, ep_index and stream_id.
+ * If the endpoint supports streams, boundary check the URB's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id)
{
@@ -414,17 +418,6 @@ static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
return NULL;
}
-/* Get the right ring for the given URB.
- * If the endpoint supports streams, boundary check the URB's stream ID.
- * If the endpoint doesn't support streams, return the singular endpoint ring.
- */
-static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
- struct urb *urb)
-{
- return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
- xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
-}
-
/*
* Move the xHC's endpoint ring dequeue pointer past cur_td.
* Record the new state of the xHC's endpoint ring dequeue segment,
@@ -1785,7 +1778,7 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
if (trb_comp_code == COMP_TX_ERR ||
trb_comp_code == COMP_BABBLE ||
trb_comp_code == COMP_SPLIT_ERR)
- /* The 0.96 spec says a babbling control endpoint
+ /* The 0.95 spec says a babbling control endpoint
* is not halted. The 0.96 spec says it is. Some HW
* claims to be 0.95 compliant, but it halts the control
* endpoint anyway. Check if a babble halted the
@@ -2956,46 +2949,55 @@ static int prepare_transfer(struct xhci_hcd *xhci,
return 0;
}
-static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
+static unsigned int count_trbs(u64 addr, u64 len)
+{
+ unsigned int num_trbs;
+
+ num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+ TRB_MAX_BUFF_SIZE);
+ if (num_trbs == 0)
+ num_trbs++;
+
+ return num_trbs;
+}
+
+static inline unsigned int count_trbs_needed(struct urb *urb)
+{
+ return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
+}
+
+static unsigned int count_sg_trbs_needed(struct urb *urb)
{
- int num_sgs, num_trbs, running_total, temp, i;
struct scatterlist *sg;
+ unsigned int i, len, full_len, num_trbs = 0;
- sg = NULL;
- num_sgs = urb->num_mapped_sgs;
- temp = urb->transfer_buffer_length;
+ full_len = urb->transfer_buffer_length;
- num_trbs = 0;
- for_each_sg(urb->sg, sg, num_sgs, i) {
- unsigned int len = sg_dma_len(sg);
-
- /* Scatter gather list entries may cross 64KB boundaries */
- running_total = TRB_MAX_BUFF_SIZE -
- (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
- running_total &= TRB_MAX_BUFF_SIZE - 1;
- if (running_total != 0)
- num_trbs++;
-
- /* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < sg_dma_len(sg) && running_total < temp) {
- num_trbs++;
- running_total += TRB_MAX_BUFF_SIZE;
- }
- len = min_t(int, len, temp);
- temp -= len;
- if (temp == 0)
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+ len = sg_dma_len(sg);
+ num_trbs += count_trbs(sg_dma_address(sg), len);
+ len = min_t(unsigned int, len, full_len);
+ full_len -= len;
+ if (full_len == 0)
break;
}
+
return num_trbs;
}
-static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
+static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
{
- if (num_trbs != 0)
- dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
- "TRBs, %d left\n", __func__,
- urb->ep->desc.bEndpointAddress, num_trbs);
- if (running_total != urb->transfer_buffer_length)
+ u64 addr, len;
+
+ addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
+ len = urb->iso_frame_desc[i].length;
+
+ return count_trbs(addr, len);
+}
+
+static void check_trb_math(struct urb *urb, int running_total)
+{
+ if (unlikely(running_total != urb->transfer_buffer_length))
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
@@ -3021,26 +3023,20 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
}
-/*
- * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
- * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
- * (comprised of sg list entries) can take several service intervals to
- * transmit.
- */
-int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
- struct urb *urb, int slot_id, unsigned int ep_index)
+static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
+ struct xhci_ep_ctx *ep_ctx)
{
- struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
- xhci->devs[slot_id]->out_ctx, ep_index);
int xhci_interval;
int ep_interval;
xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
ep_interval = urb->interval;
+
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
ep_interval *= 8;
+
/* FIXME change this to a warning and a suggestion to use the new API
* to set the polling interval (once the API is added).
*/
@@ -3055,6 +3051,22 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
+}
+
+/*
+ * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
+ * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
+ * (comprised of sg list entries) can take several service intervals to
+ * transmit.
+ */
+int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ struct urb *urb, int slot_id, unsigned int ep_index)
+{
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
+ check_interval(xhci, urb, ep_ctx);
+
return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
@@ -3104,44 +3116,47 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
return (total_packet_count - ((transferred + trb_buff_len) / maxp));
}
-
-static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+/* This is very similar to what ehci-q.c qtd_fill() does */
+int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ring *ep_ring;
- unsigned int num_trbs;
struct urb_priv *urb_priv;
struct xhci_td *td;
- struct scatterlist *sg;
- int num_sgs;
- int trb_buff_len, this_sg_len, running_total, ret;
- unsigned int total_packet_count;
+ struct xhci_generic_trb *start_trb;
+ struct scatterlist *sg = NULL;
+ bool more_trbs_coming;
bool zero_length_needed;
- bool first_trb;
- int last_trb_num;
+ unsigned int num_trbs, last_trb_num, i;
+ unsigned int start_cycle, num_sgs = 0;
+ unsigned int running_total, block_len, trb_buff_len;
+ unsigned int full_len;
+ int ret;
+ u32 field, length_field, remainder;
u64 addr;
- bool more_trbs_coming;
-
- struct xhci_generic_trb *start_trb;
- int start_cycle;
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
if (!ep_ring)
return -EINVAL;
- num_trbs = count_sg_trbs_needed(xhci, urb);
- num_sgs = urb->num_mapped_sgs;
- total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
- usb_endpoint_maxp(&urb->ep->desc));
+ /* If we have scatter/gather list, we use it. */
+ if (urb->num_sgs) {
+ num_sgs = urb->num_mapped_sgs;
+ sg = urb->sg;
+ num_trbs = count_sg_trbs_needed(urb);
+ } else
+ num_trbs = count_trbs_needed(urb);
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, mem_flags);
- if (ret < 0)
+ if (unlikely(ret < 0))
return ret;
urb_priv = urb->hcpriv;
+ last_trb_num = num_trbs - 1;
+
/* Deal with URB_ZERO_PACKET - need one more td/trb */
zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
urb_priv->length == 2;
@@ -3151,7 +3166,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
1, urb, 1, mem_flags);
- if (ret < 0)
+ if (unlikely(ret < 0))
return ret;
}
@@ -3165,228 +3180,58 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
+ full_len = urb->transfer_buffer_length;
running_total = 0;
- /*
- * How much data is in the first TRB?
- *
- * There are three forces at work for TRB buffer pointers and lengths:
- * 1. We don't want to walk off the end of this sg-list entry buffer.
- * 2. The transfer length that the driver requested may be smaller than
- * the amount of memory allocated for this scatter-gather list.
- * 3. TRBs buffers can't cross 64KB boundaries.
- */
- sg = urb->sg;
- addr = (u64) sg_dma_address(sg);
- this_sg_len = sg_dma_len(sg);
- trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
- trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
- if (trb_buff_len > urb->transfer_buffer_length)
- trb_buff_len = urb->transfer_buffer_length;
-
- first_trb = true;
- last_trb_num = zero_length_needed ? 2 : 1;
- /* Queue the first TRB, even if it's zero-length */
- do {
- u32 field = 0;
- u32 length_field = 0;
- u32 remainder = 0;
+ block_len = 0;
- /* Don't change the cycle bit of the first TRB until later */
- if (first_trb) {
- first_trb = false;
- if (start_cycle == 0)
- field |= 0x1;
- } else
- field |= ep_ring->cycle_state;
+ /* Queue the TRBs, even if they are zero-length */
+ for (i = 0; i < num_trbs; i++) {
+ field = TRB_TYPE(TRB_NORMAL);
- /* Chain all the TRBs together; clear the chain bit in the last
- * TRB to indicate it's the last TRB in the chain.
- */
- if (num_trbs > last_trb_num) {
- field |= TRB_CHAIN;
- } else if (num_trbs == last_trb_num) {
- td->last_trb = ep_ring->enqueue;
- field |= TRB_IOC;
- } else if (zero_length_needed && num_trbs == 1) {
- trb_buff_len = 0;
- urb_priv->td[1]->last_trb = ep_ring->enqueue;
- field |= TRB_IOC;
- }
-
- /* Only set interrupt on short packet for IN endpoints */
- if (usb_urb_dir_in(urb))
- field |= TRB_ISP;
-
- if (TRB_MAX_BUFF_SIZE -
- (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
- xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
- xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
- (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
- (unsigned int) addr + trb_buff_len);
- }
-
- /* Set the TRB length, TD size, and interrupter fields. */
- remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
- urb->transfer_buffer_length,
- urb, num_trbs - 1);
-
- length_field = TRB_LEN(trb_buff_len) |
- TRB_TD_SIZE(remainder) |
- TRB_INTR_TARGET(0);
-
- if (num_trbs > 1)
- more_trbs_coming = true;
- else
- more_trbs_coming = false;
- queue_trb(xhci, ep_ring, more_trbs_coming,
- lower_32_bits(addr),
- upper_32_bits(addr),
- length_field,
- field | TRB_TYPE(TRB_NORMAL));
- --num_trbs;
- running_total += trb_buff_len;
-
- /* Calculate length for next transfer --
- * Are we done queueing all the TRBs for this sg entry?
- */
- this_sg_len -= trb_buff_len;
- if (this_sg_len == 0) {
- --num_sgs;
- if (num_sgs == 0)
- break;
- sg = sg_next(sg);
- addr = (u64) sg_dma_address(sg);
- this_sg_len = sg_dma_len(sg);
+ if (block_len == 0) {
+ /* A new contiguous block. */
+ if (sg) {
+ addr = (u64) sg_dma_address(sg);
+ block_len = sg_dma_len(sg);
+ } else {
+ addr = (u64) urb->transfer_dma;
+ block_len = full_len;
+ }
+ /* TRB buffer should not cross 64KB boundaries */
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ trb_buff_len = min_t(unsigned int,
+ trb_buff_len,
+ block_len);
} else {
- addr += trb_buff_len;
+ /* Further through the contiguous block. */
+ trb_buff_len = block_len;
+ if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+ trb_buff_len = TRB_MAX_BUFF_SIZE;
}
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & (TRB_MAX_BUFF_SIZE - 1));
- trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
- if (running_total + trb_buff_len > urb->transfer_buffer_length)
- trb_buff_len =
- urb->transfer_buffer_length - running_total;
- } while (num_trbs > 0);
-
- check_trb_math(urb, num_trbs, running_total);
- giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
- start_cycle, start_trb);
- return 0;
-}
-
-/* This is very similar to what ehci-q.c qtd_fill() does */
-int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
- struct urb *urb, int slot_id, unsigned int ep_index)
-{
- struct xhci_ring *ep_ring;
- struct urb_priv *urb_priv;
- struct xhci_td *td;
- int num_trbs;
- struct xhci_generic_trb *start_trb;
- bool first_trb;
- int last_trb_num;
- bool more_trbs_coming;
- bool zero_length_needed;
- int start_cycle;
- u32 field, length_field;
-
- int running_total, trb_buff_len, ret;
- unsigned int total_packet_count;
- u64 addr;
-
- if (urb->num_sgs)
- return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
-
- ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
- if (!ep_ring)
- return -EINVAL;
-
- num_trbs = 0;
- /* How much data is (potentially) left before the 64KB boundary? */
- running_total = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
- running_total &= TRB_MAX_BUFF_SIZE - 1;
-
- /* If there's some data on this 64KB chunk, or we have to send a
- * zero-length transfer, we need at least one TRB
- */
- if (running_total != 0 || urb->transfer_buffer_length == 0)
- num_trbs++;
- /* How many more 64KB chunks to transfer, how many more TRBs? */
- while (running_total < urb->transfer_buffer_length) {
- num_trbs++;
- running_total += TRB_MAX_BUFF_SIZE;
- }
-
- ret = prepare_transfer(xhci, xhci->devs[slot_id],
- ep_index, urb->stream_id,
- num_trbs, urb, 0, mem_flags);
- if (ret < 0)
- return ret;
-
- urb_priv = urb->hcpriv;
-
- /* Deal with URB_ZERO_PACKET - need one more td/trb */
- zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
- urb_priv->length == 2;
- if (zero_length_needed) {
- num_trbs++;
- xhci_dbg(xhci, "Creating zero length td.\n");
- ret = prepare_transfer(xhci, xhci->devs[slot_id],
- ep_index, urb->stream_id,
- 1, urb, 1, mem_flags);
- if (ret < 0)
- return ret;
- }
-
- td = urb_priv->td[0];
-
- /*
- * Don't give the first TRB to the hardware (by toggling the cycle bit)
- * until we've finished creating all the other TRBs. The ring's cycle
- * state may change as we enqueue the other TRBs, so save it too.
- */
- start_trb = &ep_ring->enqueue->generic;
- start_cycle = ep_ring->cycle_state;
-
- running_total = 0;
- total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
- usb_endpoint_maxp(&urb->ep->desc));
- /* How much data is in the first TRB? */
- addr = (u64) urb->transfer_dma;
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
- if (trb_buff_len > urb->transfer_buffer_length)
- trb_buff_len = urb->transfer_buffer_length;
-
- first_trb = true;
- last_trb_num = zero_length_needed ? 2 : 1;
- /* Queue the first TRB, even if it's zero-length */
- do {
- u32 remainder = 0;
- field = 0;
+ if (running_total + trb_buff_len > full_len)
+ trb_buff_len = full_len - running_total;
/* Don't change the cycle bit of the first TRB until later */
- if (first_trb) {
- first_trb = false;
+ if (i == 0) {
if (start_cycle == 0)
- field |= 0x1;
+ field |= TRB_CYCLE;
} else
field |= ep_ring->cycle_state;
/* Chain all the TRBs together; clear the chain bit in the last
* TRB to indicate it's the last TRB in the chain.
*/
- if (num_trbs > last_trb_num) {
+ if (i < last_trb_num) {
field |= TRB_CHAIN;
- } else if (num_trbs == last_trb_num) {
- td->last_trb = ep_ring->enqueue;
- field |= TRB_IOC;
- } else if (zero_length_needed && num_trbs == 1) {
- trb_buff_len = 0;
- urb_priv->td[1]->last_trb = ep_ring->enqueue;
+ } else {
field |= TRB_IOC;
+ if (i == last_trb_num)
+ td->last_trb = ep_ring->enqueue;
+ else if (zero_length_needed) {
+ trb_buff_len = 0;
+ urb_priv->td[1]->last_trb = ep_ring->enqueue;
+ }
}
/* Only set interrupt on short packet for IN endpoints */
@@ -3394,15 +3239,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_ISP;
/* Set the TRB length, TD size, and interrupter fields. */
- remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
- urb->transfer_buffer_length,
- urb, num_trbs - 1);
+ remainder = xhci_td_remainder(xhci, running_total,
+ trb_buff_len, full_len,
+ urb, num_trbs - i - 1);
length_field = TRB_LEN(trb_buff_len) |
TRB_TD_SIZE(remainder) |
TRB_INTR_TARGET(0);
- if (num_trbs > 1)
+ if (i < num_trbs - 1)
more_trbs_coming = true;
else
more_trbs_coming = false;
@@ -3410,18 +3255,24 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
- field | TRB_TYPE(TRB_NORMAL));
- --num_trbs;
- running_total += trb_buff_len;
+ field);
- /* Calculate length for next transfer */
+ running_total += trb_buff_len;
addr += trb_buff_len;
- trb_buff_len = urb->transfer_buffer_length - running_total;
- if (trb_buff_len > TRB_MAX_BUFF_SIZE)
- trb_buff_len = TRB_MAX_BUFF_SIZE;
- } while (num_trbs > 0);
+ block_len -= trb_buff_len;
+
+ if (sg) {
+ if (block_len == 0) {
+ /* New sg entry */
+ --num_sgs;
+ if (num_sgs == 0)
+ break;
+ sg = sg_next(sg);
+ }
+ }
+ }
- check_trb_math(urb, num_trbs, running_total);
+ check_trb_math(urb, running_total);
giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
start_cycle, start_trb);
return 0;
@@ -3550,23 +3401,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return 0;
}
-static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
- struct urb *urb, int i)
-{
- int num_trbs = 0;
- u64 addr, td_len;
-
- addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
- td_len = urb->iso_frame_desc[i].length;
-
- num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
- TRB_MAX_BUFF_SIZE);
- if (num_trbs == 0)
- num_trbs++;
-
- return num_trbs;
-}
-
/*
* The transfer burst count field of the isochronous TRB defines the number of
* bursts that are required to move all packets in this TD. Only SuperSpeed
@@ -3764,7 +3598,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
urb, total_pkt_count);
- trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
+ trbs_per_td = count_isoc_trbs_needed(urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
urb->stream_id, trbs_per_td, urb, i, mem_flags);
@@ -3825,8 +3659,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_BEI;
}
/* Calculate TRB length */
- trb_buff_len = TRB_MAX_BUFF_SIZE -
- (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
+ trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len;
@@ -3915,8 +3748,6 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx;
int start_frame;
- int xhci_interval;
- int ep_interval;
int num_tds, num_trbs, i;
int ret;
struct xhci_virt_ep *xep;
@@ -3930,7 +3761,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
num_tds = urb->number_of_packets;
for (i = 0; i < num_tds; i++)
- num_trbs += count_isoc_trbs_needed(xhci, urb, i);
+ num_trbs += count_isoc_trbs_needed(urb, i);
/* Check the ring to guarantee there is enough room for the whole urb.
* Do not insert any td of the urb to the ring if the check failed.
@@ -3944,26 +3775,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Check interval value. This should be done before we start to
* calculate the start frame value.
*/
- xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
- ep_interval = urb->interval;
- /* Convert to microframes */
- if (urb->dev->speed == USB_SPEED_LOW ||
- urb->dev->speed == USB_SPEED_FULL)
- ep_interval *= 8;
- /* FIXME change this to a warning and a suggestion to use the new API
- * to set the polling interval (once the API is added).
- */
- if (xhci_interval != ep_interval) {
- dev_dbg_ratelimited(&urb->dev->dev,
- "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
- ep_interval, ep_interval == 1 ? "" : "s",
- xhci_interval, xhci_interval == 1 ? "" : "s");
- urb->interval = xhci_interval;
- /* Convert back to frames for LS/FS devices */
- if (urb->dev->speed == USB_SPEED_LOW ||
- urb->dev->speed == USB_SPEED_FULL)
- urb->interval /= 8;
- }
+ check_interval(xhci, urb, ep_ctx);
/* Calculate the start frame and put it in urb->start_frame. */
if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
new file mode 100644
index 000000000..e97cb6b18
--- /dev/null
+++ b/drivers/usb/host/xhci-tegra.c
@@ -0,0 +1,1331 @@
+/*
+ * NVIDIA Tegra xHCI host controller driver
+ *
+ * Copyright (C) 2014 NVIDIA Corporation
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/tegra/xusb.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "xhci.h"
+
+#define TEGRA_XHCI_SS_HIGH_SPEED 120000000
+#define TEGRA_XHCI_SS_LOW_SPEED 12000000
+
+/* FPCI CFG registers */
+#define XUSB_CFG_1 0x004
+#define XUSB_IO_SPACE_EN BIT(0)
+#define XUSB_MEM_SPACE_EN BIT(1)
+#define XUSB_BUS_MASTER_EN BIT(2)
+#define XUSB_CFG_4 0x010
+#define XUSB_BASE_ADDR_SHIFT 15
+#define XUSB_BASE_ADDR_MASK 0x1ffff
+#define XUSB_CFG_ARU_C11_CSBRANGE 0x41c
+#define XUSB_CFG_CSB_BASE_ADDR 0x800
+
+/* FPCI mailbox registers */
+#define XUSB_CFG_ARU_MBOX_CMD 0x0e4
+#define MBOX_DEST_FALC BIT(27)
+#define MBOX_DEST_PME BIT(28)
+#define MBOX_DEST_SMI BIT(29)
+#define MBOX_DEST_XHCI BIT(30)
+#define MBOX_INT_EN BIT(31)
+#define XUSB_CFG_ARU_MBOX_DATA_IN 0x0e8
+#define CMD_DATA_SHIFT 0
+#define CMD_DATA_MASK 0xffffff
+#define CMD_TYPE_SHIFT 24
+#define CMD_TYPE_MASK 0xff
+#define XUSB_CFG_ARU_MBOX_DATA_OUT 0x0ec
+#define XUSB_CFG_ARU_MBOX_OWNER 0x0f0
+#define MBOX_OWNER_NONE 0
+#define MBOX_OWNER_FW 1
+#define MBOX_OWNER_SW 2
+#define XUSB_CFG_ARU_SMI_INTR 0x428
+#define MBOX_SMI_INTR_FW_HANG BIT(1)
+#define MBOX_SMI_INTR_EN BIT(3)
+
+/* IPFS registers */
+#define IPFS_XUSB_HOST_CONFIGURATION_0 0x180
+#define IPFS_EN_FPCI BIT(0)
+#define IPFS_XUSB_HOST_INTR_MASK_0 0x188
+#define IPFS_IP_INT_MASK BIT(16)
+#define IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0 0x1bc
+
+#define CSB_PAGE_SELECT_MASK 0x7fffff
+#define CSB_PAGE_SELECT_SHIFT 9
+#define CSB_PAGE_OFFSET_MASK 0x1ff
+#define CSB_PAGE_SELECT(addr) ((addr) >> (CSB_PAGE_SELECT_SHIFT) & \
+ CSB_PAGE_SELECT_MASK)
+#define CSB_PAGE_OFFSET(addr) ((addr) & CSB_PAGE_OFFSET_MASK)
+
+/* Falcon CSB registers */
+#define XUSB_FALC_CPUCTL 0x100
+#define CPUCTL_STARTCPU BIT(1)
+#define CPUCTL_STATE_HALTED BIT(4)
+#define CPUCTL_STATE_STOPPED BIT(5)
+#define XUSB_FALC_BOOTVEC 0x104
+#define XUSB_FALC_DMACTL 0x10c
+#define XUSB_FALC_IMFILLRNG1 0x154
+#define IMFILLRNG1_TAG_MASK 0xffff
+#define IMFILLRNG1_TAG_LO_SHIFT 0
+#define IMFILLRNG1_TAG_HI_SHIFT 16
+#define XUSB_FALC_IMFILLCTL 0x158
+
+/* MP CSB registers */
+#define XUSB_CSB_MP_ILOAD_ATTR 0x101a00
+#define XUSB_CSB_MP_ILOAD_BASE_LO 0x101a04
+#define XUSB_CSB_MP_ILOAD_BASE_HI 0x101a08
+#define XUSB_CSB_MP_L2IMEMOP_SIZE 0x101a10
+#define L2IMEMOP_SIZE_SRC_OFFSET_SHIFT 8
+#define L2IMEMOP_SIZE_SRC_OFFSET_MASK 0x3ff
+#define L2IMEMOP_SIZE_SRC_COUNT_SHIFT 24
+#define L2IMEMOP_SIZE_SRC_COUNT_MASK 0xff
+#define XUSB_CSB_MP_L2IMEMOP_TRIG 0x101a14
+#define L2IMEMOP_ACTION_SHIFT 24
+#define L2IMEMOP_INVALIDATE_ALL (0x40 << L2IMEMOP_ACTION_SHIFT)
+#define L2IMEMOP_LOAD_LOCKED_RESULT (0x11 << L2IMEMOP_ACTION_SHIFT)
+#define XUSB_CSB_MP_APMAP 0x10181c
+#define APMAP_BOOTPATH BIT(31)
+
+#define IMEM_BLOCK_SIZE 256
+
+struct tegra_xusb_fw_header {
+ u32 boot_loadaddr_in_imem;
+ u32 boot_codedfi_offset;
+ u32 boot_codetag;
+ u32 boot_codesize;
+ u32 phys_memaddr;
+ u16 reqphys_memsize;
+ u16 alloc_phys_memsize;
+ u32 rodata_img_offset;
+ u32 rodata_section_start;
+ u32 rodata_section_end;
+ u32 main_fnaddr;
+ u32 fwimg_cksum;
+ u32 fwimg_created_time;
+ u32 imem_resident_start;
+ u32 imem_resident_end;
+ u32 idirect_start;
+ u32 idirect_end;
+ u32 l2_imem_start;
+ u32 l2_imem_end;
+ u32 version_id;
+ u8 init_ddirect;
+ u8 reserved[3];
+ u32 phys_addr_log_buffer;
+ u32 total_log_entries;
+ u32 dequeue_ptr;
+ u32 dummy_var[2];
+ u32 fwimg_len;
+ u8 magic[8];
+ u32 ss_low_power_entry_timeout;
+ u8 num_hsic_port;
+ u8 padding[139]; /* Pad to 256 bytes */
+};
+
+struct tegra_xusb_phy_type {
+ const char *name;
+ unsigned int num;
+};
+
+struct tegra_xusb_soc {
+ const char *firmware;
+ const char * const *supply_names;
+ unsigned int num_supplies;
+ const struct tegra_xusb_phy_type *phy_types;
+ unsigned int num_types;
+
+ struct {
+ struct {
+ unsigned int offset;
+ unsigned int count;
+ } usb2, ulpi, hsic, usb3;
+ } ports;
+
+ bool scale_ss_clock;
+};
+
+struct tegra_xusb {
+ struct device *dev;
+ void __iomem *regs;
+ struct usb_hcd *hcd;
+
+ struct mutex lock;
+
+ int xhci_irq;
+ int mbox_irq;
+
+ void __iomem *ipfs_base;
+ void __iomem *fpci_base;
+
+ const struct tegra_xusb_soc *soc;
+
+ struct regulator_bulk_data *supplies;
+
+ struct tegra_xusb_padctl *padctl;
+
+ struct clk *host_clk;
+ struct clk *falcon_clk;
+ struct clk *ss_clk;
+ struct clk *ss_src_clk;
+ struct clk *hs_src_clk;
+ struct clk *fs_src_clk;
+ struct clk *pll_u_480m;
+ struct clk *clk_m;
+ struct clk *pll_e;
+
+ struct reset_control *host_rst;
+ struct reset_control *ss_rst;
+
+ struct phy **phys;
+ unsigned int num_phys;
+
+ /* Firmware loading related */
+ struct {
+ size_t size;
+ void *virt;
+ dma_addr_t phys;
+ } fw;
+};
+
+static struct hc_driver __read_mostly tegra_xhci_hc_driver;
+
+static inline u32 fpci_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+ return readl(tegra->fpci_base + offset);
+}
+
+static inline void fpci_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ writel(value, tegra->fpci_base + offset);
+}
+
+static inline u32 ipfs_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+ return readl(tegra->ipfs_base + offset);
+}
+
+static inline void ipfs_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ writel(value, tegra->ipfs_base + offset);
+}
+
+static u32 csb_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+ u32 page = CSB_PAGE_SELECT(offset);
+ u32 ofs = CSB_PAGE_OFFSET(offset);
+
+ fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
+
+ return fpci_readl(tegra, XUSB_CFG_CSB_BASE_ADDR + ofs);
+}
+
+static void csb_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ u32 page = CSB_PAGE_SELECT(offset);
+ u32 ofs = CSB_PAGE_OFFSET(offset);
+
+ fpci_writel(tegra, page, XUSB_CFG_ARU_C11_CSBRANGE);
+ fpci_writel(tegra, value, XUSB_CFG_CSB_BASE_ADDR + ofs);
+}
+
+static int tegra_xusb_set_ss_clk(struct tegra_xusb *tegra,
+ unsigned long rate)
+{
+ unsigned long new_parent_rate, old_parent_rate;
+ struct clk *clk = tegra->ss_src_clk;
+ unsigned int div;
+ int err;
+
+ if (clk_get_rate(clk) == rate)
+ return 0;
+
+ switch (rate) {
+ case TEGRA_XHCI_SS_HIGH_SPEED:
+ /*
+ * Reparent to PLLU_480M. Set divider first to avoid
+ * overclocking.
+ */
+ old_parent_rate = clk_get_rate(clk_get_parent(clk));
+ new_parent_rate = clk_get_rate(tegra->pll_u_480m);
+ div = new_parent_rate / rate;
+
+ err = clk_set_rate(clk, old_parent_rate / div);
+ if (err)
+ return err;
+
+ err = clk_set_parent(clk, tegra->pll_u_480m);
+ if (err)
+ return err;
+
+ /*
+ * The rate should already be correct, but set it again just
+ * to be sure.
+ */
+ err = clk_set_rate(clk, rate);
+ if (err)
+ return err;
+
+ break;
+
+ case TEGRA_XHCI_SS_LOW_SPEED:
+ /* Reparent to CLK_M */
+ err = clk_set_parent(clk, tegra->clk_m);
+ if (err)
+ return err;
+
+ err = clk_set_rate(clk, rate);
+ if (err)
+ return err;
+
+ break;
+
+ default:
+ dev_err(tegra->dev, "Invalid SS rate: %lu Hz\n", rate);
+ return -EINVAL;
+ }
+
+ if (clk_get_rate(clk) != rate) {
+ dev_err(tegra->dev, "SS clock doesn't match requested rate\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static unsigned long extract_field(u32 value, unsigned int start,
+ unsigned int count)
+{
+ return (value >> start) & ((1 << count) - 1);
+}
+
+/* Command requests from the firmware */
+enum tegra_xusb_mbox_cmd {
+ MBOX_CMD_MSG_ENABLED = 1,
+ MBOX_CMD_INC_FALC_CLOCK,
+ MBOX_CMD_DEC_FALC_CLOCK,
+ MBOX_CMD_INC_SSPI_CLOCK,
+ MBOX_CMD_DEC_SSPI_CLOCK,
+ MBOX_CMD_SET_BW, /* no ACK/NAK required */
+ MBOX_CMD_SET_SS_PWR_GATING,
+ MBOX_CMD_SET_SS_PWR_UNGATING,
+ MBOX_CMD_SAVE_DFE_CTLE_CTX,
+ MBOX_CMD_AIRPLANE_MODE_ENABLED, /* unused */
+ MBOX_CMD_AIRPLANE_MODE_DISABLED, /* unused */
+ MBOX_CMD_START_HSIC_IDLE,
+ MBOX_CMD_STOP_HSIC_IDLE,
+ MBOX_CMD_DBC_WAKE_STACK, /* unused */
+ MBOX_CMD_HSIC_PRETEND_CONNECT,
+ MBOX_CMD_RESET_SSPI,
+ MBOX_CMD_DISABLE_SS_LFPS_DETECTION,
+ MBOX_CMD_ENABLE_SS_LFPS_DETECTION,
+
+ MBOX_CMD_MAX,
+
+ /* Response message to above commands */
+ MBOX_CMD_ACK = 128,
+ MBOX_CMD_NAK
+};
+
+static const char * const mbox_cmd_name[] = {
+ [ 1] = "MSG_ENABLE",
+ [ 2] = "INC_FALCON_CLOCK",
+ [ 3] = "DEC_FALCON_CLOCK",
+ [ 4] = "INC_SSPI_CLOCK",
+ [ 5] = "DEC_SSPI_CLOCK",
+ [ 6] = "SET_BW",
+ [ 7] = "SET_SS_PWR_GATING",
+ [ 8] = "SET_SS_PWR_UNGATING",
+ [ 9] = "SAVE_DFE_CTLE_CTX",
+ [ 10] = "AIRPLANE_MODE_ENABLED",
+ [ 11] = "AIRPLANE_MODE_DISABLED",
+ [ 12] = "START_HSIC_IDLE",
+ [ 13] = "STOP_HSIC_IDLE",
+ [ 14] = "DBC_WAKE_STACK",
+ [ 15] = "HSIC_PRETEND_CONNECT",
+ [ 16] = "RESET_SSPI",
+ [ 17] = "DISABLE_SS_LFPS_DETECTION",
+ [ 18] = "ENABLE_SS_LFPS_DETECTION",
+ [128] = "ACK",
+ [129] = "NAK",
+};
+
+struct tegra_xusb_mbox_msg {
+ u32 cmd;
+ u32 data;
+};
+
+static inline u32 tegra_xusb_mbox_pack(const struct tegra_xusb_mbox_msg *msg)
+{
+ return (msg->cmd & CMD_TYPE_MASK) << CMD_TYPE_SHIFT |
+ (msg->data & CMD_DATA_MASK) << CMD_DATA_SHIFT;
+}
+static inline void tegra_xusb_mbox_unpack(struct tegra_xusb_mbox_msg *msg,
+ u32 value)
+{
+ msg->cmd = (value >> CMD_TYPE_SHIFT) & CMD_TYPE_MASK;
+ msg->data = (value >> CMD_DATA_SHIFT) & CMD_DATA_MASK;
+}
+
+static bool tegra_xusb_mbox_cmd_requires_ack(enum tegra_xusb_mbox_cmd cmd)
+{
+ switch (cmd) {
+ case MBOX_CMD_SET_BW:
+ case MBOX_CMD_ACK:
+ case MBOX_CMD_NAK:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
+ const struct tegra_xusb_mbox_msg *msg)
+{
+ bool wait_for_idle = false;
+ u32 value;
+
+ /*
+ * Acquire the mailbox. The firmware still owns the mailbox for
+ * ACK/NAK messages.
+ */
+ if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
+ value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ if (value != MBOX_OWNER_NONE) {
+ dev_err(tegra->dev, "mailbox is busy\n");
+ return -EBUSY;
+ }
+
+ fpci_writel(tegra, MBOX_OWNER_SW, XUSB_CFG_ARU_MBOX_OWNER);
+
+ value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ if (value != MBOX_OWNER_SW) {
+ dev_err(tegra->dev, "failed to acquire mailbox\n");
+ return -EBUSY;
+ }
+
+ wait_for_idle = true;
+ }
+
+ value = tegra_xusb_mbox_pack(msg);
+ fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_DATA_IN);
+
+ value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value |= MBOX_INT_EN | MBOX_DEST_FALC;
+ fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+
+ if (wait_for_idle) {
+ unsigned long timeout = jiffies + msecs_to_jiffies(250);
+
+ while (time_before(jiffies, timeout)) {
+ value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+ if (value == MBOX_OWNER_NONE)
+ break;
+
+ usleep_range(10, 20);
+ }
+
+ if (time_after(jiffies, timeout))
+ value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_OWNER);
+
+ if (value != MBOX_OWNER_NONE)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static irqreturn_t tegra_xusb_mbox_irq(int irq, void *data)
+{
+ struct tegra_xusb *tegra = data;
+ u32 value;
+
+ /* clear mailbox interrupts */
+ value = fpci_readl(tegra, XUSB_CFG_ARU_SMI_INTR);
+ fpci_writel(tegra, value, XUSB_CFG_ARU_SMI_INTR);
+
+ if (value & MBOX_SMI_INTR_FW_HANG)
+ dev_err(tegra->dev, "controller firmware hang\n");
+
+ return IRQ_WAKE_THREAD;
+}
+
+static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
+ const struct tegra_xusb_mbox_msg *msg)
+{
+ struct tegra_xusb_padctl *padctl = tegra->padctl;
+ const struct tegra_xusb_soc *soc = tegra->soc;
+ struct device *dev = tegra->dev;
+ struct tegra_xusb_mbox_msg rsp;
+ unsigned long mask;
+ unsigned int port;
+ bool idle, enable;
+ int err;
+
+ memset(&rsp, 0, sizeof(rsp));
+
+ switch (msg->cmd) {
+ case MBOX_CMD_INC_FALC_CLOCK:
+ case MBOX_CMD_DEC_FALC_CLOCK:
+ rsp.data = clk_get_rate(tegra->falcon_clk) / 1000;
+ if (rsp.data != msg->data)
+ rsp.cmd = MBOX_CMD_NAK;
+ else
+ rsp.cmd = MBOX_CMD_ACK;
+
+ break;
+
+ case MBOX_CMD_INC_SSPI_CLOCK:
+ case MBOX_CMD_DEC_SSPI_CLOCK:
+ if (tegra->soc->scale_ss_clock) {
+ err = tegra_xusb_set_ss_clk(tegra, msg->data * 1000);
+ if (err < 0)
+ rsp.cmd = MBOX_CMD_NAK;
+ else
+ rsp.cmd = MBOX_CMD_ACK;
+
+ rsp.data = clk_get_rate(tegra->ss_src_clk) / 1000;
+ } else {
+ rsp.cmd = MBOX_CMD_ACK;
+ rsp.data = msg->data;
+ }
+
+ break;
+
+ case MBOX_CMD_SET_BW:
+ /*
+ * TODO: Request bandwidth once EMC scaling is supported.
+ * Ignore for now since ACK/NAK is not required for SET_BW
+ * messages.
+ */
+ break;
+
+ case MBOX_CMD_SAVE_DFE_CTLE_CTX:
+ err = tegra_xusb_padctl_usb3_save_context(padctl, msg->data);
+ if (err < 0) {
+ dev_err(dev, "failed to save context for USB3#%u: %d\n",
+ msg->data, err);
+ rsp.cmd = MBOX_CMD_NAK;
+ } else {
+ rsp.cmd = MBOX_CMD_ACK;
+ }
+
+ rsp.data = msg->data;
+ break;
+
+ case MBOX_CMD_START_HSIC_IDLE:
+ case MBOX_CMD_STOP_HSIC_IDLE:
+ if (msg->cmd == MBOX_CMD_STOP_HSIC_IDLE)
+ idle = false;
+ else
+ idle = true;
+
+ mask = extract_field(msg->data, 1 + soc->ports.hsic.offset,
+ soc->ports.hsic.count);
+
+ for_each_set_bit(port, &mask, 32) {
+ err = tegra_xusb_padctl_hsic_set_idle(padctl, port,
+ idle);
+ if (err < 0)
+ break;
+ }
+
+ if (err < 0) {
+ dev_err(dev, "failed to set HSIC#%u %s: %d\n", port,
+ idle ? "idle" : "busy", err);
+ rsp.cmd = MBOX_CMD_NAK;
+ } else {
+ rsp.cmd = MBOX_CMD_ACK;
+ }
+
+ rsp.data = msg->data;
+ break;
+
+ case MBOX_CMD_DISABLE_SS_LFPS_DETECTION:
+ case MBOX_CMD_ENABLE_SS_LFPS_DETECTION:
+ if (msg->cmd == MBOX_CMD_DISABLE_SS_LFPS_DETECTION)
+ enable = false;
+ else
+ enable = true;
+
+ mask = extract_field(msg->data, 1 + soc->ports.usb3.offset,
+ soc->ports.usb3.count);
+
+ for_each_set_bit(port, &mask, soc->ports.usb3.count) {
+ err = tegra_xusb_padctl_usb3_set_lfps_detect(padctl,
+ port,
+ enable);
+ if (err < 0)
+ break;
+ }
+
+ if (err < 0) {
+ dev_err(dev,
+ "failed to %s LFPS detection on USB3#%u: %d\n",
+ enable ? "enable" : "disable", port, err);
+ rsp.cmd = MBOX_CMD_NAK;
+ } else {
+ rsp.cmd = MBOX_CMD_ACK;
+ }
+
+ rsp.data = msg->data;
+ break;
+
+ default:
+ dev_warn(dev, "unknown message: %#x\n", msg->cmd);
+ break;
+ }
+
+ if (rsp.cmd) {
+ const char *cmd = (rsp.cmd == MBOX_CMD_ACK) ? "ACK" : "NAK";
+
+ err = tegra_xusb_mbox_send(tegra, &rsp);
+ if (err < 0)
+ dev_err(dev, "failed to send %s: %d\n", cmd, err);
+ }
+}
+
+static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
+{
+ struct tegra_xusb *tegra = data;
+ struct tegra_xusb_mbox_msg msg;
+ u32 value;
+
+ mutex_lock(&tegra->lock);
+
+ value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_DATA_OUT);
+ tegra_xusb_mbox_unpack(&msg, value);
+
+ value = fpci_readl(tegra, XUSB_CFG_ARU_MBOX_CMD);
+ value &= ~MBOX_DEST_SMI;
+ fpci_writel(tegra, value, XUSB_CFG_ARU_MBOX_CMD);
+
+ /* clear mailbox owner if no ACK/NAK is required */
+ if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
+ fpci_writel(tegra, MBOX_OWNER_NONE, XUSB_CFG_ARU_MBOX_OWNER);
+
+ tegra_xusb_mbox_handle(tegra, &msg);
+
+ mutex_unlock(&tegra->lock);
+ return IRQ_HANDLED;
+}
+
+static void tegra_xusb_ipfs_config(struct tegra_xusb *tegra,
+ struct resource *regs)
+{
+ u32 value;
+
+ value = ipfs_readl(tegra, IPFS_XUSB_HOST_CONFIGURATION_0);
+ value |= IPFS_EN_FPCI;
+ ipfs_writel(tegra, value, IPFS_XUSB_HOST_CONFIGURATION_0);
+
+ usleep_range(10, 20);
+
+ /* Program BAR0 space */
+ value = fpci_readl(tegra, XUSB_CFG_4);
+ value &= ~(XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+ value |= regs->start & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
+ fpci_writel(tegra, value, XUSB_CFG_4);
+
+ usleep_range(100, 200);
+
+ /* Enable bus master */
+ value = fpci_readl(tegra, XUSB_CFG_1);
+ value |= XUSB_IO_SPACE_EN | XUSB_MEM_SPACE_EN | XUSB_BUS_MASTER_EN;
+ fpci_writel(tegra, value, XUSB_CFG_1);
+
+ /* Enable interrupt assertion */
+ value = ipfs_readl(tegra, IPFS_XUSB_HOST_INTR_MASK_0);
+ value |= IPFS_IP_INT_MASK;
+ ipfs_writel(tegra, value, IPFS_XUSB_HOST_INTR_MASK_0);
+
+ /* Set hysteresis */
+ ipfs_writel(tegra, 0x80, IPFS_XUSB_HOST_CLKGATE_HYSTERESIS_0);
+}
+
+static int tegra_xusb_clk_enable(struct tegra_xusb *tegra)
+{
+ int err;
+
+ err = clk_prepare_enable(tegra->pll_e);
+ if (err < 0)
+ return err;
+
+ err = clk_prepare_enable(tegra->host_clk);
+ if (err < 0)
+ goto disable_plle;
+
+ err = clk_prepare_enable(tegra->ss_clk);
+ if (err < 0)
+ goto disable_host;
+
+ err = clk_prepare_enable(tegra->falcon_clk);
+ if (err < 0)
+ goto disable_ss;
+
+ err = clk_prepare_enable(tegra->fs_src_clk);
+ if (err < 0)
+ goto disable_falc;
+
+ err = clk_prepare_enable(tegra->hs_src_clk);
+ if (err < 0)
+ goto disable_fs_src;
+
+ if (tegra->soc->scale_ss_clock) {
+ err = tegra_xusb_set_ss_clk(tegra, TEGRA_XHCI_SS_HIGH_SPEED);
+ if (err < 0)
+ goto disable_hs_src;
+ }
+
+ return 0;
+
+disable_hs_src:
+ clk_disable_unprepare(tegra->hs_src_clk);
+disable_fs_src:
+ clk_disable_unprepare(tegra->fs_src_clk);
+disable_falc:
+ clk_disable_unprepare(tegra->falcon_clk);
+disable_ss:
+ clk_disable_unprepare(tegra->ss_clk);
+disable_host:
+ clk_disable_unprepare(tegra->host_clk);
+disable_plle:
+ clk_disable_unprepare(tegra->pll_e);
+ return err;
+}
+
+static void tegra_xusb_clk_disable(struct tegra_xusb *tegra)
+{
+ clk_disable_unprepare(tegra->pll_e);
+ clk_disable_unprepare(tegra->host_clk);
+ clk_disable_unprepare(tegra->ss_clk);
+ clk_disable_unprepare(tegra->falcon_clk);
+ clk_disable_unprepare(tegra->fs_src_clk);
+ clk_disable_unprepare(tegra->hs_src_clk);
+}
+
+static int tegra_xusb_phy_enable(struct tegra_xusb *tegra)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < tegra->num_phys; i++) {
+ err = phy_init(tegra->phys[i]);
+ if (err)
+ goto disable_phy;
+
+ err = phy_power_on(tegra->phys[i]);
+ if (err) {
+ phy_exit(tegra->phys[i]);
+ goto disable_phy;
+ }
+ }
+
+ return 0;
+
+disable_phy:
+ while (i--) {
+ phy_power_off(tegra->phys[i]);
+ phy_exit(tegra->phys[i]);
+ }
+
+ return err;
+}
+
+static void tegra_xusb_phy_disable(struct tegra_xusb *tegra)
+{
+ unsigned int i;
+
+ for (i = 0; i < tegra->num_phys; i++) {
+ phy_power_off(tegra->phys[i]);
+ phy_exit(tegra->phys[i]);
+ }
+}
+
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+ unsigned int code_tag_blocks, code_size_blocks, code_blocks;
+ struct tegra_xusb_fw_header *header;
+ struct device *dev = tegra->dev;
+ const struct firmware *fw;
+ unsigned long timeout;
+ time_t timestamp;
+ struct tm time;
+ u64 address;
+ u32 value;
+ int err;
+
+ err = reject_firmware(&fw, tegra->soc->firmware, tegra->dev);
+ if (err < 0) {
+ dev_err(tegra->dev, "failed to request firmware: %d\n", err);
+ return err;
+ }
+
+ /* Load Falcon controller with its firmware. */
+ header = (struct tegra_xusb_fw_header *)fw->data;
+ tegra->fw.size = le32_to_cpu(header->fwimg_len);
+
+ tegra->fw.virt = dma_alloc_coherent(tegra->dev, tegra->fw.size,
+ &tegra->fw.phys, GFP_KERNEL);
+ if (!tegra->fw.virt) {
+ dev_err(tegra->dev, "failed to allocate memory for firmware\n");
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
+ memcpy(tegra->fw.virt, fw->data, tegra->fw.size);
+ release_firmware(fw);
+
+ if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
+ dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
+ csb_readl(tegra, XUSB_FALC_CPUCTL));
+ return 0;
+ }
+
+ /* Program the size of DFI into ILOAD_ATTR. */
+ csb_writel(tegra, tegra->fw.size, XUSB_CSB_MP_ILOAD_ATTR);
+
+ /*
+ * Boot code of the firmware reads the ILOAD_BASE registers
+ * to get to the start of the DFI in system memory.
+ */
+ address = tegra->fw.phys + sizeof(*header);
+ csb_writel(tegra, address >> 32, XUSB_CSB_MP_ILOAD_BASE_HI);
+ csb_writel(tegra, address, XUSB_CSB_MP_ILOAD_BASE_LO);
+
+ /* Set BOOTPATH to 1 in APMAP. */
+ csb_writel(tegra, APMAP_BOOTPATH, XUSB_CSB_MP_APMAP);
+
+ /* Invalidate L2IMEM. */
+ csb_writel(tegra, L2IMEMOP_INVALIDATE_ALL, XUSB_CSB_MP_L2IMEMOP_TRIG);
+
+ /*
+ * Initiate fetch of bootcode from system memory into L2IMEM.
+ * Program bootcode location and size in system memory.
+ */
+ code_tag_blocks = DIV_ROUND_UP(le32_to_cpu(header->boot_codetag),
+ IMEM_BLOCK_SIZE);
+ code_size_blocks = DIV_ROUND_UP(le32_to_cpu(header->boot_codesize),
+ IMEM_BLOCK_SIZE);
+ code_blocks = code_tag_blocks + code_size_blocks;
+
+ value = ((code_tag_blocks & L2IMEMOP_SIZE_SRC_OFFSET_MASK) <<
+ L2IMEMOP_SIZE_SRC_OFFSET_SHIFT) |
+ ((code_size_blocks & L2IMEMOP_SIZE_SRC_COUNT_MASK) <<
+ L2IMEMOP_SIZE_SRC_COUNT_SHIFT);
+ csb_writel(tegra, value, XUSB_CSB_MP_L2IMEMOP_SIZE);
+
+ /* Trigger L2IMEM load operation. */
+ csb_writel(tegra, L2IMEMOP_LOAD_LOCKED_RESULT,
+ XUSB_CSB_MP_L2IMEMOP_TRIG);
+
+ /* Setup Falcon auto-fill. */
+ csb_writel(tegra, code_size_blocks, XUSB_FALC_IMFILLCTL);
+
+ value = ((code_tag_blocks & IMFILLRNG1_TAG_MASK) <<
+ IMFILLRNG1_TAG_LO_SHIFT) |
+ ((code_blocks & IMFILLRNG1_TAG_MASK) <<
+ IMFILLRNG1_TAG_HI_SHIFT);
+ csb_writel(tegra, value, XUSB_FALC_IMFILLRNG1);
+
+ csb_writel(tegra, 0, XUSB_FALC_DMACTL);
+
+ msleep(50);
+
+ csb_writel(tegra, le32_to_cpu(header->boot_codetag),
+ XUSB_FALC_BOOTVEC);
+
+ /* Boot Falcon CPU and wait for it to enter the STOPPED (idle) state. */
+ timeout = jiffies + msecs_to_jiffies(5);
+
+ csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
+
+ while (time_before(jiffies, timeout)) {
+ if (csb_readl(tegra, XUSB_FALC_CPUCTL) == CPUCTL_STATE_STOPPED)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (csb_readl(tegra, XUSB_FALC_CPUCTL) != CPUCTL_STATE_STOPPED) {
+ dev_err(dev, "Falcon failed to start, state: %#x\n",
+ csb_readl(tegra, XUSB_FALC_CPUCTL));
+ return -EIO;
+ }
+
+ timestamp = le32_to_cpu(header->fwimg_created_time);
+ time_to_tm(timestamp, 0, &time);
+
+ dev_info(dev, "Firmware timestamp: %ld-%02d-%02d %02d:%02d:%02d UTC\n",
+ time.tm_year + 1900, time.tm_mon + 1, time.tm_mday,
+ time.tm_hour, time.tm_min, time.tm_sec);
+
+ return 0;
+}
+
+static int tegra_xusb_probe(struct platform_device *pdev)
+{
+ struct tegra_xusb_mbox_msg msg;
+ struct resource *res, *regs;
+ struct tegra_xusb *tegra;
+ struct xhci_hcd *xhci;
+ unsigned int i, j, k;
+ struct phy *phy;
+ int err;
+
+ BUILD_BUG_ON(sizeof(struct tegra_xusb_fw_header) != 256);
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ tegra->soc = of_device_get_match_data(&pdev->dev);
+ mutex_init(&tegra->lock);
+ tegra->dev = &pdev->dev;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tegra->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(tegra->regs))
+ return PTR_ERR(tegra->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ tegra->fpci_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->fpci_base))
+ return PTR_ERR(tegra->fpci_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ tegra->ipfs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->ipfs_base))
+ return PTR_ERR(tegra->ipfs_base);
+
+ tegra->xhci_irq = platform_get_irq(pdev, 0);
+ if (tegra->xhci_irq < 0)
+ return tegra->xhci_irq;
+
+ tegra->mbox_irq = platform_get_irq(pdev, 1);
+ if (tegra->mbox_irq < 0)
+ return tegra->mbox_irq;
+
+ tegra->padctl = tegra_xusb_padctl_get(&pdev->dev);
+ if (IS_ERR(tegra->padctl))
+ return PTR_ERR(tegra->padctl);
+
+ tegra->host_rst = devm_reset_control_get(&pdev->dev, "xusb_host");
+ if (IS_ERR(tegra->host_rst)) {
+ err = PTR_ERR(tegra->host_rst);
+ dev_err(&pdev->dev, "failed to get xusb_host reset: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->ss_rst = devm_reset_control_get(&pdev->dev, "xusb_ss");
+ if (IS_ERR(tegra->ss_rst)) {
+ err = PTR_ERR(tegra->ss_rst);
+ dev_err(&pdev->dev, "failed to get xusb_ss reset: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
+ if (IS_ERR(tegra->host_clk)) {
+ err = PTR_ERR(tegra->host_clk);
+ dev_err(&pdev->dev, "failed to get xusb_host: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->falcon_clk = devm_clk_get(&pdev->dev, "xusb_falcon_src");
+ if (IS_ERR(tegra->falcon_clk)) {
+ err = PTR_ERR(tegra->falcon_clk);
+ dev_err(&pdev->dev, "failed to get xusb_falcon_src: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->ss_clk = devm_clk_get(&pdev->dev, "xusb_ss");
+ if (IS_ERR(tegra->ss_clk)) {
+ err = PTR_ERR(tegra->ss_clk);
+ dev_err(&pdev->dev, "failed to get xusb_ss: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->ss_src_clk = devm_clk_get(&pdev->dev, "xusb_ss_src");
+ if (IS_ERR(tegra->ss_src_clk)) {
+ err = PTR_ERR(tegra->ss_src_clk);
+ dev_err(&pdev->dev, "failed to get xusb_ss_src: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->hs_src_clk = devm_clk_get(&pdev->dev, "xusb_hs_src");
+ if (IS_ERR(tegra->hs_src_clk)) {
+ err = PTR_ERR(tegra->hs_src_clk);
+ dev_err(&pdev->dev, "failed to get xusb_hs_src: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->fs_src_clk = devm_clk_get(&pdev->dev, "xusb_fs_src");
+ if (IS_ERR(tegra->fs_src_clk)) {
+ err = PTR_ERR(tegra->fs_src_clk);
+ dev_err(&pdev->dev, "failed to get xusb_fs_src: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->pll_u_480m = devm_clk_get(&pdev->dev, "pll_u_480m");
+ if (IS_ERR(tegra->pll_u_480m)) {
+ err = PTR_ERR(tegra->pll_u_480m);
+ dev_err(&pdev->dev, "failed to get pll_u_480m: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->clk_m = devm_clk_get(&pdev->dev, "clk_m");
+ if (IS_ERR(tegra->clk_m)) {
+ err = PTR_ERR(tegra->clk_m);
+ dev_err(&pdev->dev, "failed to get clk_m: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->pll_e = devm_clk_get(&pdev->dev, "pll_e");
+ if (IS_ERR(tegra->pll_e)) {
+ err = PTR_ERR(tegra->pll_e);
+ dev_err(&pdev->dev, "failed to get pll_e: %d\n", err);
+ goto put_padctl;
+ }
+
+ tegra->supplies = devm_kcalloc(&pdev->dev, tegra->soc->num_supplies,
+ sizeof(*tegra->supplies), GFP_KERNEL);
+ if (!tegra->supplies) {
+ err = -ENOMEM;
+ goto put_padctl;
+ }
+
+ for (i = 0; i < tegra->soc->num_supplies; i++)
+ tegra->supplies[i].supply = tegra->soc->supply_names[i];
+
+ err = devm_regulator_bulk_get(&pdev->dev, tegra->soc->num_supplies,
+ tegra->supplies);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
+ goto put_padctl;
+ }
+
+ for (i = 0; i < tegra->soc->num_types; i++)
+ tegra->num_phys += tegra->soc->phy_types[i].num;
+
+ tegra->phys = devm_kcalloc(&pdev->dev, tegra->num_phys,
+ sizeof(*tegra->phys), GFP_KERNEL);
+ if (!tegra->phys) {
+ dev_err(&pdev->dev, "failed to allocate PHY array\n");
+ err = -ENOMEM;
+ goto put_padctl;
+ }
+
+ for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
+ char prop[8];
+
+ for (j = 0; j < tegra->soc->phy_types[i].num; j++) {
+ snprintf(prop, sizeof(prop), "%s-%d",
+ tegra->soc->phy_types[i].name, j);
+
+ phy = devm_phy_optional_get(&pdev->dev, prop);
+ if (IS_ERR(phy)) {
+ dev_err(&pdev->dev,
+ "failed to get PHY %s: %ld\n", prop,
+ PTR_ERR(phy));
+ err = PTR_ERR(phy);
+ goto put_padctl;
+ }
+
+ tegra->phys[k++] = phy;
+ }
+ }
+
+ err = tegra_xusb_clk_enable(tegra);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable clocks: %d\n", err);
+ goto put_padctl;
+ }
+
+ err = regulator_bulk_enable(tegra->soc->num_supplies, tegra->supplies);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable regulators: %d\n", err);
+ goto disable_clk;
+ }
+
+ err = tegra_xusb_phy_enable(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable PHYs: %d\n", err);
+ goto disable_regulator;
+ }
+
+ tegra_xusb_ipfs_config(tegra, regs);
+
+ err = tegra_xusb_load_firmware(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to load firmware: %d\n", err);
+ goto disable_phy;
+ }
+
+ tegra->hcd = usb_create_hcd(&tegra_xhci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!tegra->hcd) {
+ err = -ENOMEM;
+ goto disable_phy;
+ }
+
+ /*
+ * This must happen after usb_create_hcd(), because usb_create_hcd()
+ * will overwrite the drvdata of the device with the hcd it creates.
+ */
+ platform_set_drvdata(pdev, tegra);
+
+ tegra->hcd->regs = tegra->regs;
+ tegra->hcd->rsrc_start = regs->start;
+ tegra->hcd->rsrc_len = resource_size(regs);
+
+ err = usb_add_hcd(tegra->hcd, tegra->xhci_irq, IRQF_SHARED);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add USB HCD: %d\n", err);
+ goto put_usb2;
+ }
+
+ device_wakeup_enable(tegra->hcd->self.controller);
+
+ xhci = hcd_to_xhci(tegra->hcd);
+
+ xhci->shared_hcd = usb_create_shared_hcd(&tegra_xhci_hc_driver,
+ &pdev->dev,
+ dev_name(&pdev->dev),
+ tegra->hcd);
+ if (!xhci->shared_hcd) {
+ dev_err(&pdev->dev, "failed to create shared HCD\n");
+ goto remove_usb2;
+ }
+
+ err = usb_add_hcd(xhci->shared_hcd, tegra->xhci_irq, IRQF_SHARED);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add shared HCD: %d\n", err);
+ goto put_usb3;
+ }
+
+ mutex_lock(&tegra->lock);
+
+ /* Enable firmware messages from controller. */
+ msg.cmd = MBOX_CMD_MSG_ENABLED;
+ msg.data = 0;
+
+ err = tegra_xusb_mbox_send(tegra, &msg);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable messages: %d\n", err);
+ mutex_unlock(&tegra->lock);
+ goto remove_usb3;
+ }
+
+ mutex_unlock(&tegra->lock);
+
+ err = devm_request_threaded_irq(&pdev->dev, tegra->mbox_irq,
+ tegra_xusb_mbox_irq,
+ tegra_xusb_mbox_thread, 0,
+ dev_name(&pdev->dev), tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto remove_usb3;
+ }
+
+ return 0;
+
+remove_usb3:
+ usb_remove_hcd(xhci->shared_hcd);
+put_usb3:
+ usb_put_hcd(xhci->shared_hcd);
+remove_usb2:
+ usb_remove_hcd(tegra->hcd);
+put_usb2:
+ usb_put_hcd(tegra->hcd);
+disable_phy:
+ tegra_xusb_phy_disable(tegra);
+disable_regulator:
+ regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+disable_clk:
+ tegra_xusb_clk_disable(tegra);
+put_padctl:
+ tegra_xusb_padctl_put(tegra->padctl);
+ return err;
+}
+
+static int tegra_xusb_remove(struct platform_device *pdev)
+{
+ struct tegra_xusb *tegra = platform_get_drvdata(pdev);
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+
+ usb_remove_hcd(xhci->shared_hcd);
+ usb_put_hcd(xhci->shared_hcd);
+ usb_remove_hcd(tegra->hcd);
+ usb_put_hcd(tegra->hcd);
+
+ dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
+ tegra->fw.phys);
+
+ tegra_xusb_phy_disable(tegra);
+ regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+ tegra_xusb_clk_disable(tegra);
+
+ tegra_xusb_padctl_put(tegra->padctl);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_xusb_suspend(struct device *dev)
+{
+ struct tegra_xusb *tegra = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+ bool wakeup = device_may_wakeup(dev);
+
+ /* TODO: Powergate controller across suspend/resume. */
+ return xhci_suspend(xhci, wakeup);
+}
+
+static int tegra_xusb_resume(struct device *dev)
+{
+ struct tegra_xusb *tegra = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
+
+ return xhci_resume(xhci, 0);
+}
+#endif
+
+static const struct dev_pm_ops tegra_xusb_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_xusb_suspend, tegra_xusb_resume)
+};
+
+static const char * const tegra124_supply_names[] = {
+ "avddio-pex",
+ "dvddio-pex",
+ "avdd-usb",
+ "avdd-pll-utmip",
+ "avdd-pll-erefe",
+ "avdd-usb-ss-pll",
+ "hvdd-usb-ss",
+ "hvdd-usb-ss-pll-e",
+};
+
+static const struct tegra_xusb_phy_type tegra124_phy_types[] = {
+ { .name = "usb3", .num = 2, },
+ { .name = "usb2", .num = 3, },
+ { .name = "hsic", .num = 2, },
+};
+
+static const struct tegra_xusb_soc tegra124_soc = {
+ .firmware = "/*(DEBLOBBED)*/",
+ .supply_names = tegra124_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra124_supply_names),
+ .phy_types = tegra124_phy_types,
+ .num_types = ARRAY_SIZE(tegra124_phy_types),
+ .ports = {
+ .usb2 = { .offset = 4, .count = 4, },
+ .hsic = { .offset = 6, .count = 2, },
+ .usb3 = { .offset = 0, .count = 2, },
+ },
+ .scale_ss_clock = true,
+};
+/*(DEBLOBBED)*/
+
+static const char * const tegra210_supply_names[] = {
+ "dvddio-pex",
+ "hvddio-pex",
+ "avdd-usb",
+ "avdd-pll-utmip",
+ "avdd-pll-uerefe",
+ "dvdd-pex-pll",
+ "hvdd-pex-pll-e",
+};
+
+static const struct tegra_xusb_phy_type tegra210_phy_types[] = {
+ { .name = "usb3", .num = 4, },
+ { .name = "usb2", .num = 4, },
+ { .name = "hsic", .num = 1, },
+};
+
+static const struct tegra_xusb_soc tegra210_soc = {
+ .firmware = "/*(DEBLOBBED)*/",
+ .supply_names = tegra210_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra210_supply_names),
+ .phy_types = tegra210_phy_types,
+ .num_types = ARRAY_SIZE(tegra210_phy_types),
+ .ports = {
+ .usb2 = { .offset = 4, .count = 4, },
+ .hsic = { .offset = 8, .count = 1, },
+ .usb3 = { .offset = 0, .count = 4, },
+ },
+ .scale_ss_clock = false,
+};
+/*(DEBLOBBED)*/
+
+static const struct of_device_id tegra_xusb_of_match[] = {
+ { .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
+ { .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
+
+static struct platform_driver tegra_xusb_driver = {
+ .probe = tegra_xusb_probe,
+ .remove = tegra_xusb_remove,
+ .driver = {
+ .name = "tegra-xusb",
+ .pm = &tegra_xusb_pm_ops,
+ .of_match_table = tegra_xusb_of_match,
+ },
+};
+
+static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+ xhci->quirks |= XHCI_PLAT;
+}
+
+static int tegra_xhci_setup(struct usb_hcd *hcd)
+{
+ return xhci_gen_setup(hcd, tegra_xhci_quirks);
+}
+
+static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
+ .extra_priv_size = sizeof(struct xhci_hcd),
+ .reset = tegra_xhci_setup,
+};
+
+static int __init tegra_xusb_init(void)
+{
+ xhci_init_driver(&tegra_xhci_hc_driver, &tegra_xhci_overrides);
+
+ return platform_driver_register(&tegra_xusb_driver);
+}
+module_init(tegra_xusb_init);
+
+static void __exit tegra_xusb_exit(void)
+{
+ platform_driver_unregister(&tegra_xusb_driver);
+}
+module_exit(tegra_xusb_exit);
+
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_DESCRIPTION("NVIDIA Tegra XUSB xHCI host-controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 327280535..f2f9518c5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1462,47 +1462,6 @@ free_priv:
return ret;
}
-/* Get the right ring for the given URB.
- * If the endpoint supports streams, boundary check the URB's stream ID.
- * If the endpoint doesn't support streams, return the singular endpoint ring.
- */
-static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
- struct urb *urb)
-{
- unsigned int slot_id;
- unsigned int ep_index;
- unsigned int stream_id;
- struct xhci_virt_ep *ep;
-
- slot_id = urb->dev->slot_id;
- ep_index = xhci_get_endpoint_index(&urb->ep->desc);
- stream_id = urb->stream_id;
- ep = &xhci->devs[slot_id]->eps[ep_index];
- /* Common case: no streams */
- if (!(ep->ep_state & EP_HAS_STREAMS))
- return ep->ring;
-
- if (stream_id == 0) {
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has streams, "
- "but URB has no stream ID.\n",
- slot_id, ep_index);
- return NULL;
- }
-
- if (stream_id < ep->stream_info->num_streams)
- return ep->stream_info->stream_rings[stream_id];
-
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has "
- "stream IDs 1 to %u allocated, "
- "but stream ID %u is requested.\n",
- slot_id, ep_index,
- ep->stream_info->num_streams - 1,
- stream_id);
- return NULL;
-}
-
/*
* Remove the URB's TD from the endpoint ring. This may cause the HC to stop
* USB transfers, potentially stopping in the middle of a TRB buffer. The HC
@@ -4930,7 +4889,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
xhci_print_registers(xhci);
- xhci->quirks = quirks;
+ xhci->quirks |= quirks;
get_quirks(dev, xhci);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6c629c97f..b0b8d0f87 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1338,6 +1338,9 @@ union xhci_trb {
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
+/* How much data is left before the 64KB boundary? */
+#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
+ (addr & (TRB_MAX_BUFF_SIZE - 1)))
struct xhci_segment {
union xhci_trb *trbs;
@@ -1965,4 +1968,15 @@ struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_container_
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
+struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id);
+static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
+ struct urb *urb)
+{
+ return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
+ xhci_get_endpoint_index(&urb->ep->desc),
+ urb->stream_id);
+}
+
#endif /* __LINUX_XHCI_HCD_H */
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 264be4d21..9535b2872 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -163,7 +163,7 @@ static void isp1761_pci_shutdown(struct pci_dev *dev)
printk(KERN_ERR "ips1761_pci_shutdown\n");
}
-static const struct pci_device_id isp1760_plx [] = {
+static const struct pci_device_id isp1760_plx[] = {
{
.class = PCI_CLASS_BRIDGE_OTHER << 8,
.class_mask = ~0,
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index f7a7fc21b..e9e5ae521 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -268,3 +268,29 @@ config USB_CHAOSKEY
To compile this driver as a module, choose M here: the
module will be called chaoskey.
+
+config UCSI
+ tristate "USB Type-C Connector System Software Interface driver"
+ depends on ACPI
+ help
+ UCSI driver is meant to be used as a convenience tool for desktop and
+ server systems that are not equipped to handle USB in device mode. It
+ will always select USB host role for the USB Type-C ports on systems
+ that provide UCSI interface.
+
+ USB Type-C Connector System Software Interface (UCSI) is a
+ specification for an interface that allows the Operating System to
+ control the USB Type-C ports on a system. Things the need controlling
+ include the USB Data Role (host or device), and when USB Power
+ Delivery is supported, the Power Role (source or sink). With USB
+ Type-C connectors, when two dual role capable devices are attached
+ together, the data role is selected randomly. Therefore it is
+ important to give the OS a way to select the role. Otherwise the user
+ would have to unplug and replug in order in order to attempt to swap
+ the data and power roles.
+
+ The UCSI specification can be downloaded from:
+ http://www.intel.com/content/www/us/en/io/universal-serial-bus/usb-type-c-ucsi-spec.html
+
+ To compile the driver as a module, choose M here: the module will be
+ called ucsi.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 45fd4ac39..2769cf635 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
obj-$(CONFIG_USB_YUREX) += yurex.o
obj-$(CONFIG_USB_HSIC_USB3503) += usb3503.o
obj-$(CONFIG_USB_CHAOSKEY) += chaoskey.o
+obj-$(CONFIG_UCSI) += ucsi.o
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
obj-$(CONFIG_USB_LINK_LAYER_TEST) += lvstest.o
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index a22de52cb..15666ad7c 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -2420,7 +2420,7 @@ static int sisusb_open(struct inode *inode, struct file *file)
if (!sisusb->devinit) {
if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
- sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
+ sisusb->sisusb_dev->speed >= USB_SPEED_SUPER) {
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev,
@@ -3127,7 +3127,7 @@ static int sisusb_probe(struct usb_interface *intf,
sisusb->present = 1;
- if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
+ if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) {
int initscreen = 1;
#ifdef INCL_SISUSB_CON
if (sisusb_first_vc > 0 && sisusb_last_vc > 0 &&
diff --git a/drivers/usb/misc/sisusbvga/sisusb_con.c b/drivers/usb/misc/sisusbvga/sisusb_con.c
index ace343088..afa853209 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_con.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_con.c
@@ -601,7 +601,7 @@ sisusbcon_save_screen(struct vc_data *c)
/* interface routine */
static int
-sisusbcon_set_palette(struct vc_data *c, unsigned char *table)
+sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
{
struct sisusb_usb_data *sisusb;
int i, j;
diff --git a/drivers/usb/misc/ucsi.c b/drivers/usb/misc/ucsi.c
new file mode 100644
index 000000000..07397bdde
--- /dev/null
+++ b/drivers/usb/misc/ucsi.c
@@ -0,0 +1,478 @@
+/*
+ * USB Type-C Connector System Software Interface driver
+ *
+ * Copyright (C) 2016, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+
+#include "ucsi.h"
+
+/* Double the time defined by MIN_TIME_TO_RESPOND_WITH_BUSY */
+#define UCSI_TIMEOUT_MS 20
+
+enum ucsi_status {
+ UCSI_IDLE = 0,
+ UCSI_BUSY,
+ UCSI_ERROR,
+};
+
+struct ucsi_connector {
+ int num;
+ struct ucsi *ucsi;
+ struct work_struct work;
+ struct ucsi_connector_capability cap;
+};
+
+struct ucsi {
+ struct device *dev;
+ struct ucsi_data __iomem *data;
+
+ enum ucsi_status status;
+ struct completion complete;
+ struct ucsi_capability cap;
+ struct ucsi_connector *connector;
+
+ /* device lock */
+ spinlock_t dev_lock;
+
+ /* PPM Communication lock */
+ struct mutex ppm_lock;
+
+ /* PPM communication flags */
+ unsigned long flags;
+#define EVENT_PENDING 0
+#define COMMAND_PENDING 1
+};
+
+static int ucsi_acpi_cmd(struct ucsi *ucsi, struct ucsi_control *ctrl)
+{
+ uuid_le uuid = UUID_LE(0x6f8398c2, 0x7ca4, 0x11e4,
+ 0xad, 0x36, 0x63, 0x10, 0x42, 0xb5, 0x00, 0x8f);
+ union acpi_object *obj;
+
+ ucsi->data->ctrl.raw_cmd = ctrl->raw_cmd;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(ucsi->dev), uuid.b, 1, 1, NULL);
+ if (!obj) {
+ dev_err(ucsi->dev, "%s: failed to evaluate _DSM\n", __func__);
+ return -EIO;
+ }
+
+ ACPI_FREE(obj);
+ return 0;
+}
+
+static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct ucsi *ucsi = data;
+ struct ucsi_cci *cci;
+
+ spin_lock(&ucsi->dev_lock);
+
+ ucsi->status = UCSI_IDLE;
+ cci = &ucsi->data->cci;
+
+ /*
+ * REVISIT: This is not documented behavior, but all known PPMs ACK
+ * asynchronous events by sending notification with cleared CCI.
+ */
+ if (!ucsi->data->raw_cci) {
+ if (test_bit(EVENT_PENDING, &ucsi->flags))
+ complete(&ucsi->complete);
+ else
+ dev_WARN(ucsi->dev, "spurious notification\n");
+ goto out_unlock;
+ }
+
+ if (test_bit(COMMAND_PENDING, &ucsi->flags)) {
+ if (cci->busy) {
+ ucsi->status = UCSI_BUSY;
+ complete(&ucsi->complete);
+
+ goto out_unlock;
+ } else if (cci->ack_complete || cci->cmd_complete) {
+ /* Error Indication is only valid with commands */
+ if (cci->error && cci->cmd_complete)
+ ucsi->status = UCSI_ERROR;
+
+ ucsi->data->ctrl.raw_cmd = 0;
+ complete(&ucsi->complete);
+ }
+ }
+
+ if (cci->connector_change) {
+ struct ucsi_connector *con;
+
+ /*
+ * This is workaround for buggy PPMs that create asynchronous
+ * event notifications before OPM has enabled them.
+ */
+ if (!ucsi->connector)
+ goto out_unlock;
+
+ con = ucsi->connector + (cci->connector_change - 1);
+
+ /*
+ * PPM will not clear the connector specific bit in Connector
+ * Change Indication field of CCI until the driver has ACK it,
+ * and the driver can not ACK it before it has been processed.
+ * The PPM will not generate new events before the first has
+ * been acknowledged, even if they are for an other connector.
+ * So only one event at a time.
+ */
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+ schedule_work(&con->work);
+ }
+out_unlock:
+ spin_unlock(&ucsi->dev_lock);
+}
+
+static int ucsi_ack(struct ucsi *ucsi, u8 cmd)
+{
+ struct ucsi_control ctrl;
+ int ret;
+
+ ctrl.cmd.cmd = UCSI_ACK_CC_CI;
+ ctrl.cmd.length = 0;
+ ctrl.cmd.data = cmd;
+ ret = ucsi_acpi_cmd(ucsi, &ctrl);
+ if (ret)
+ return ret;
+
+ /* Waiting for ACK also with ACK CMD for now */
+ ret = wait_for_completion_timeout(&ucsi->complete,
+ msecs_to_jiffies(UCSI_TIMEOUT_MS));
+ if (!ret)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+static int ucsi_run_cmd(struct ucsi *ucsi, struct ucsi_control *ctrl,
+ void *data, size_t size)
+{
+ u16 err_value = 0;
+ int ret;
+
+ set_bit(COMMAND_PENDING, &ucsi->flags);
+
+ ret = ucsi_acpi_cmd(ucsi, ctrl);
+ if (ret)
+ goto err_clear_flag;
+
+ ret = wait_for_completion_timeout(&ucsi->complete,
+ msecs_to_jiffies(UCSI_TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto err_clear_flag;
+ }
+
+ switch (ucsi->status) {
+ case UCSI_IDLE:
+ if (data)
+ memcpy(data, ucsi->data->message_in, size);
+
+ ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+ break;
+ case UCSI_BUSY:
+ /* The caller decides whether to cancel or not */
+ ret = -EBUSY;
+ goto err_clear_flag;
+ case UCSI_ERROR:
+ ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+ if (ret)
+ goto err_clear_flag;
+
+ ctrl->cmd.cmd = UCSI_GET_ERROR_STATUS;
+ ctrl->cmd.length = 0;
+ ctrl->cmd.data = 0;
+ ret = ucsi_acpi_cmd(ucsi, ctrl);
+ if (ret)
+ goto err_clear_flag;
+
+ ret = wait_for_completion_timeout(&ucsi->complete,
+ msecs_to_jiffies(UCSI_TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto err_clear_flag;
+ }
+
+ memcpy(&err_value, ucsi->data->message_in, sizeof(err_value));
+
+ /* Something has really gone wrong */
+ if (WARN_ON(ucsi->status == UCSI_ERROR)) {
+ ret = -ENODEV;
+ goto err_clear_flag;
+ }
+
+ ret = ucsi_ack(ucsi, UCSI_ACK_CMD);
+ if (ret)
+ goto err_clear_flag;
+
+ switch (err_value) {
+ case UCSI_ERROR_INCOMPATIBLE_PARTNER:
+ ret = -EOPNOTSUPP;
+ break;
+ case UCSI_ERROR_CC_COMMUNICATION_ERR:
+ ret = -ECOMM;
+ break;
+ case UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL:
+ ret = -EIO;
+ break;
+ case UCSI_ERROR_DEAD_BATTERY:
+ dev_warn(ucsi->dev, "Dead battery condition!\n");
+ ret = -EPERM;
+ break;
+ /* The following mean a bug in this driver */
+ case UCSI_ERROR_INVALID_CON_NUM:
+ case UCSI_ERROR_UNREGONIZED_CMD:
+ case UCSI_ERROR_INVALID_CMD_ARGUMENT:
+ default:
+ dev_warn(ucsi->dev,
+ "%s: possible UCSI driver bug - error %hu\n",
+ __func__, err_value);
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+ ctrl->raw_cmd = 0;
+err_clear_flag:
+ clear_bit(COMMAND_PENDING, &ucsi->flags);
+ return ret;
+}
+
+static void ucsi_connector_change(struct work_struct *work)
+{
+ struct ucsi_connector *con = container_of(work, struct ucsi_connector,
+ work);
+ struct ucsi_connector_status constat;
+ struct ucsi *ucsi = con->ucsi;
+ struct ucsi_control ctrl;
+ int ret;
+
+ mutex_lock(&ucsi->ppm_lock);
+
+ ctrl.cmd.cmd = UCSI_GET_CONNECTOR_STATUS;
+ ctrl.cmd.length = 0;
+ ctrl.cmd.data = con->num;
+ ret = ucsi_run_cmd(con->ucsi, &ctrl, &constat, sizeof(constat));
+ if (ret) {
+ dev_err(ucsi->dev, "%s: failed to read connector status (%d)\n",
+ __func__, ret);
+ goto out_ack_event;
+ }
+
+ /* Ignoring disconnections and Alternate Modes */
+ if (!constat.connected || !(constat.change &
+ (UCSI_CONSTAT_PARTNER_CHANGE | UCSI_CONSTAT_CONNECT_CHANGE)) ||
+ constat.partner_flags & UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE)
+ goto out_ack_event;
+
+ /* If the partner got USB Host role, attempting swap */
+ if (constat.partner_type & UCSI_CONSTAT_PARTNER_TYPE_DFP) {
+ ctrl.uor.cmd = UCSI_SET_UOR;
+ ctrl.uor.con_num = con->num;
+ ctrl.uor.role = UCSI_UOR_ROLE_DFP;
+
+ ret = ucsi_run_cmd(con->ucsi, &ctrl, NULL, 0);
+ if (ret)
+ dev_err(ucsi->dev, "%s: failed to swap role (%d)\n",
+ __func__, ret);
+ }
+out_ack_event:
+ ucsi_ack(ucsi, UCSI_ACK_EVENT);
+ clear_bit(EVENT_PENDING, &ucsi->flags);
+ mutex_unlock(&ucsi->ppm_lock);
+}
+
+static int ucsi_reset_ppm(struct ucsi *ucsi)
+{
+ int timeout = UCSI_TIMEOUT_MS;
+ struct ucsi_control ctrl;
+ int ret;
+
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.cmd.cmd = UCSI_PPM_RESET;
+ ret = ucsi_acpi_cmd(ucsi, &ctrl);
+ if (ret)
+ return ret;
+
+ /* There is no quarantee the PPM will ever set the RESET_COMPLETE bit */
+ while (!ucsi->data->cci.reset_complete && timeout--)
+ usleep_range(1000, 2000);
+ return 0;
+}
+
+static int ucsi_init(struct ucsi *ucsi)
+{
+ struct ucsi_connector *con;
+ struct ucsi_control ctrl;
+ int ret;
+ int i;
+
+ init_completion(&ucsi->complete);
+ spin_lock_init(&ucsi->dev_lock);
+ mutex_init(&ucsi->ppm_lock);
+
+ /* Reset the PPM */
+ ret = ucsi_reset_ppm(ucsi);
+ if (ret)
+ return ret;
+
+ /*
+ * REVISIT: Executing second reset to WA an issue seen on some of the
+ * Broxton based platforms, where the first reset puts the PPM into a
+ * state where it's unable to recognise some of the commands.
+ */
+ ret = ucsi_reset_ppm(ucsi);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ucsi->ppm_lock);
+
+ /* Enable basic notifications */
+ ctrl.cmd.cmd = UCSI_SET_NOTIFICATION_ENABLE;
+ ctrl.cmd.length = 0;
+ ctrl.cmd.data = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ ret = ucsi_run_cmd(ucsi, &ctrl, NULL, 0);
+ if (ret)
+ goto err_reset;
+
+ /* Get PPM capabilities */
+ ctrl.cmd.cmd = UCSI_GET_CAPABILITY;
+ ret = ucsi_run_cmd(ucsi, &ctrl, &ucsi->cap, sizeof(ucsi->cap));
+ if (ret)
+ goto err_reset;
+
+ if (!ucsi->cap.num_connectors) {
+ ret = -ENODEV;
+ goto err_reset;
+ }
+
+ ucsi->connector = devm_kcalloc(ucsi->dev, ucsi->cap.num_connectors,
+ sizeof(*ucsi->connector), GFP_KERNEL);
+ if (!ucsi->connector) {
+ ret = -ENOMEM;
+ goto err_reset;
+ }
+
+ for (i = 1, con = ucsi->connector; i < ucsi->cap.num_connectors + 1;
+ i++, con++) {
+ /* Get connector capability */
+ ctrl.cmd.cmd = UCSI_GET_CONNECTOR_CAPABILITY;
+ ctrl.cmd.data = i;
+ ret = ucsi_run_cmd(ucsi, &ctrl, &con->cap, sizeof(con->cap));
+ if (ret)
+ goto err_reset;
+
+ con->num = i;
+ con->ucsi = ucsi;
+ INIT_WORK(&con->work, ucsi_connector_change);
+ }
+
+ /* Enable all notifications */
+ ctrl.cmd.cmd = UCSI_SET_NOTIFICATION_ENABLE;
+ ctrl.cmd.data = UCSI_ENABLE_NTFY_ALL;
+ ret = ucsi_run_cmd(ucsi, &ctrl, NULL, 0);
+ if (ret < 0)
+ goto err_reset;
+
+ mutex_unlock(&ucsi->ppm_lock);
+ return 0;
+err_reset:
+ ucsi_reset_ppm(ucsi);
+ mutex_unlock(&ucsi->ppm_lock);
+ return ret;
+}
+
+static int ucsi_acpi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ acpi_status status;
+ struct ucsi *ucsi;
+ int ret;
+
+ ucsi = devm_kzalloc(&pdev->dev, sizeof(*ucsi), GFP_KERNEL);
+ if (!ucsi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "missing memory resource\n");
+ return -ENODEV;
+ }
+
+ /*
+ * NOTE: ACPI has claimed the memory region as it's also an Operation
+ * Region. It's not possible to request it in the driver.
+ */
+ ucsi->data = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ucsi->data)
+ return -ENOMEM;
+
+ ucsi->dev = &pdev->dev;
+
+ status = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_ALL_NOTIFY,
+ ucsi_acpi_notify, ucsi);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ ret = ucsi_init(ucsi);
+ if (ret) {
+ acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_ALL_NOTIFY,
+ ucsi_acpi_notify);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ucsi);
+ return 0;
+}
+
+static int ucsi_acpi_remove(struct platform_device *pdev)
+{
+ struct ucsi *ucsi = platform_get_drvdata(pdev);
+
+ acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+ ACPI_ALL_NOTIFY, ucsi_acpi_notify);
+
+ /* Make sure there are no events in the middle of being processed */
+ if (wait_on_bit_timeout(&ucsi->flags, EVENT_PENDING,
+ TASK_UNINTERRUPTIBLE,
+ msecs_to_jiffies(UCSI_TIMEOUT_MS)))
+ dev_WARN(ucsi->dev, "%s: Events still pending\n", __func__);
+
+ ucsi_reset_ppm(ucsi);
+ return 0;
+}
+
+static const struct acpi_device_id ucsi_acpi_match[] = {
+ { "PNP0CA0", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, ucsi_acpi_match);
+
+static struct platform_driver ucsi_acpi_platform_driver = {
+ .driver = {
+ .name = "ucsi_acpi",
+ .acpi_match_table = ACPI_PTR(ucsi_acpi_match),
+ },
+ .probe = ucsi_acpi_probe,
+ .remove = ucsi_acpi_remove,
+};
+
+module_platform_driver(ucsi_acpi_platform_driver);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("USB Type-C System Software Interface (UCSI) driver");
diff --git a/drivers/usb/misc/ucsi.h b/drivers/usb/misc/ucsi.h
new file mode 100644
index 000000000..6dd11d1fe
--- /dev/null
+++ b/drivers/usb/misc/ucsi.h
@@ -0,0 +1,215 @@
+
+#include <linux/types.h>
+
+/* -------------------------------------------------------------------------- */
+
+/* Command Status and Connector Change Indication (CCI) data structure */
+struct ucsi_cci {
+ unsigned int RESERVED1:1;
+ unsigned int connector_change:7;
+ u8 data_length;
+ unsigned int RESERVED9:9;
+ unsigned int not_supported:1;
+ unsigned int cancel_complete:1;
+ unsigned int reset_complete:1;
+ unsigned int busy:1;
+ unsigned int ack_complete:1;
+ unsigned int error:1;
+ unsigned int cmd_complete:1;
+} __packed;
+
+/* Default fields in CONTROL data structure */
+struct ucsi_command {
+ u8 cmd;
+ u8 length;
+ u64 data:48;
+} __packed;
+
+/* Set USB Operation Mode Command structure */
+struct ucsi_uor_cmd {
+ u8 cmd;
+ u8 length;
+ u64 con_num:7;
+ u64 role:3;
+#define UCSI_UOR_ROLE_DFP BIT(0)
+#define UCSI_UOR_ROLE_UFP BIT(1)
+#define UCSI_UOR_ROLE_DRP BIT(2)
+ u64 data:38;
+} __packed;
+
+struct ucsi_control {
+ union {
+ u64 raw_cmd;
+ struct ucsi_command cmd;
+ struct ucsi_uor_cmd uor;
+ };
+};
+
+struct ucsi_data {
+ u16 version;
+ u16 RESERVED;
+ union {
+ u32 raw_cci;
+ struct ucsi_cci cci;
+ };
+ struct ucsi_control ctrl;
+ u32 message_in[4];
+ u32 message_out[4];
+} __packed;
+
+/* Commands */
+#define UCSI_PPM_RESET 0x01
+#define UCSI_CANCEL 0x02
+#define UCSI_CONNECTOR_RESET 0x03
+#define UCSI_ACK_CC_CI 0x04
+#define UCSI_SET_NOTIFICATION_ENABLE 0x05
+#define UCSI_GET_CAPABILITY 0x06
+#define UCSI_GET_CONNECTOR_CAPABILITY 0x07
+#define UCSI_SET_UOM 0x08
+#define UCSI_SET_UOR 0x09
+#define UCSI_SET_PDM 0x0A
+#define UCSI_SET_PDR 0x0B
+#define UCSI_GET_ALTERNATE_MODES 0x0C
+#define UCSI_GET_CAM_SUPPORTED 0x0D
+#define UCSI_GET_CURRENT_CAM 0x0E
+#define UCSI_SET_NEW_CAM 0x0F
+#define UCSI_GET_PDOS 0x10
+#define UCSI_GET_CABLE_PROPERTY 0x11
+#define UCSI_GET_CONNECTOR_STATUS 0x12
+#define UCSI_GET_ERROR_STATUS 0x13
+
+/* ACK_CC_CI commands */
+#define UCSI_ACK_EVENT 1
+#define UCSI_ACK_CMD 2
+
+/* Bits for SET_NOTIFICATION_ENABLE command */
+#define UCSI_ENABLE_NTFY_CMD_COMPLETE BIT(0)
+#define UCSI_ENABLE_NTFY_EXT_PWR_SRC_CHANGE BIT(1)
+#define UCSI_ENABLE_NTFY_PWR_OPMODE_CHANGE BIT(2)
+#define UCSI_ENABLE_NTFY_CAP_CHANGE BIT(5)
+#define UCSI_ENABLE_NTFY_PWR_LEVEL_CHANGE BIT(6)
+#define UCSI_ENABLE_NTFY_PD_RESET_COMPLETE BIT(7)
+#define UCSI_ENABLE_NTFY_CAM_CHANGE BIT(8)
+#define UCSI_ENABLE_NTFY_BAT_STATUS_CHANGE BIT(9)
+#define UCSI_ENABLE_NTFY_PARTNER_CHANGE BIT(11)
+#define UCSI_ENABLE_NTFY_PWR_DIR_CHANGE BIT(12)
+#define UCSI_ENABLE_NTFY_CONNECTOR_CHANGE BIT(14)
+#define UCSI_ENABLE_NTFY_ERROR BIT(15)
+#define UCSI_ENABLE_NTFY_ALL 0xdbe7
+
+/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
+#define UCSI_ERROR_UNREGONIZED_CMD BIT(0)
+#define UCSI_ERROR_INVALID_CON_NUM BIT(1)
+#define UCSI_ERROR_INVALID_CMD_ARGUMENT BIT(2)
+#define UCSI_ERROR_INCOMPATIBLE_PARTNER BIT(3)
+#define UCSI_ERROR_CC_COMMUNICATION_ERR BIT(4)
+#define UCSI_ERROR_DEAD_BATTERY BIT(5)
+#define UCSI_ERROR_CONTRACT_NEGOTIATION_FAIL BIT(6)
+
+/* Data structure filled by PPM in response to GET_CAPABILITY command. */
+struct ucsi_capability {
+ u32 attributes;
+#define UCSI_CAP_ATTR_DISABLE_STATE BIT(0)
+#define UCSI_CAP_ATTR_BATTERY_CHARGING BIT(1)
+#define UCSI_CAP_ATTR_USB_PD BIT(2)
+#define UCSI_CAP_ATTR_TYPEC_CURRENT BIT(6)
+#define UCSI_CAP_ATTR_POWER_AC_SUPPLY BIT(8)
+#define UCSI_CAP_ATTR_POWER_OTHER BIT(10)
+#define UCSI_CAP_ATTR_POWER_VBUS BIT(14)
+ u8 num_connectors;
+ u32 features:24;
+#define UCSI_CAP_SET_UOM BIT(0)
+#define UCSI_CAP_SET_PDM BIT(1)
+#define UCSI_CAP_ALT_MODE_DETAILS BIT(2)
+#define UCSI_CAP_ALT_MODE_OVERRIDE BIT(3)
+#define UCSI_CAP_PDO_DETAILS BIT(4)
+#define UCSI_CAP_CABLE_DETAILS BIT(5)
+#define UCSI_CAP_EXT_SUPPLY_NOTIFICATIONS BIT(6)
+#define UCSI_CAP_PD_RESET BIT(7)
+ u8 num_alt_modes;
+ u8 RESERVED;
+ u16 bc_version;
+ u16 pd_version;
+ u16 typec_version;
+} __packed;
+
+/* Data structure filled by PPM in response to GET_CONNECTOR_CAPABILITY cmd. */
+struct ucsi_connector_capability {
+ u8 op_mode;
+#define UCSI_CONCAP_OPMODE_DFP BIT(0)
+#define UCSI_CONCAP_OPMODE_UFP BIT(1)
+#define UCSI_CONCAP_OPMODE_DRP BIT(2)
+#define UCSI_CONCAP_OPMODE_AUDIO_ACCESSORY BIT(3)
+#define UCSI_CONCAP_OPMODE_DEBUG_ACCESSORY BIT(4)
+#define UCSI_CONCAP_OPMODE_USB2 BIT(5)
+#define UCSI_CONCAP_OPMODE_USB3 BIT(6)
+#define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7)
+ u8 provider:1;
+ u8 consumer:1;
+} __packed;
+
+/* Data structure filled by PPM in response to GET_CABLE_PROPERTY command. */
+struct ucsi_cable_property {
+ u16 speed_supported;
+ u8 current_capability;
+ u8 vbus_in_cable:1;
+ u8 active_cable:1;
+ u8 directionality:1;
+ u8 plug_type:2;
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
+#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
+#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
+ u8 mode_support:1;
+ u8 RESERVED_2:2;
+ u8 latency:4;
+ u8 RESERVED_4:4;
+} __packed;
+
+/* Data structure filled by PPM in response to GET_CONNECTOR_STATUS command. */
+struct ucsi_connector_status {
+ u16 change;
+#define UCSI_CONSTAT_EXT_SUPPLY_CHANGE BIT(1)
+#define UCSI_CONSTAT_POWER_OPMODE_CHANGE BIT(2)
+#define UCSI_CONSTAT_PDOS_CHANGE BIT(5)
+#define UCSI_CONSTAT_POWER_LEVEL_CHANGE BIT(6)
+#define UCSI_CONSTAT_PD_RESET_COMPLETE BIT(7)
+#define UCSI_CONSTAT_CAM_CHANGE BIT(8)
+#define UCSI_CONSTAT_BC_CHANGE BIT(9)
+#define UCSI_CONSTAT_PARTNER_CHANGE BIT(11)
+#define UCSI_CONSTAT_POWER_DIR_CHANGE BIT(12)
+#define UCSI_CONSTAT_CONNECT_CHANGE BIT(14)
+#define UCSI_CONSTAT_ERROR BIT(15)
+ u16 pwr_op_mode:3;
+#define UCSI_CONSTAT_PWR_OPMODE_NONE 0
+#define UCSI_CONSTAT_PWR_OPMODE_DEFAULT 1
+#define UCSI_CONSTAT_PWR_OPMODE_BC 2
+#define UCSI_CONSTAT_PWR_OPMODE_PD 3
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC1_3 4
+#define UCSI_CONSTAT_PWR_OPMODE_TYPEC3_0 5
+ u16 connected:1;
+ u16 pwr_dir:1;
+ u16 partner_flags:8;
+#define UCSI_CONSTAT_PARTNER_FLAG_USB BIT(0)
+#define UCSI_CONSTAT_PARTNER_FLAG_ALT_MODE BIT(1)
+ u16 partner_type:3;
+#define UCSI_CONSTAT_PARTNER_TYPE_DFP 1
+#define UCSI_CONSTAT_PARTNER_TYPE_UFP 2
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_NO_UFP 3 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP 4 /* Powered Cable */
+#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
+#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
+ u32 request_data_obj;
+ u8 bc_status:2;
+#define UCSI_CONSTAT_BC_NOT_CHARGING 0
+#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
+#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
+#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
+ u8 provider_cap_limit_reason:4;
+#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
+#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
+ u8 RESERVED:2;
+} __packed;
+
+/* -------------------------------------------------------------------------- */
+
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index c78ff95a4..6b978f04b 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -287,6 +287,9 @@ static struct urb *usbtest_alloc_urb(
if (usb_pipein(pipe))
urb->transfer_flags |= URB_SHORT_NOT_OK;
+ if ((bytes + offset) == 0)
+ return urb;
+
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
GFP_KERNEL, &urb->transfer_dma);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c84f4d081..f824336de 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1090,29 +1090,6 @@ void musb_stop(struct musb *musb)
musb_platform_try_idle(musb, 0);
}
-static void musb_shutdown(struct platform_device *pdev)
-{
- struct musb *musb = dev_to_musb(&pdev->dev);
- unsigned long flags;
-
- pm_runtime_get_sync(musb->controller);
-
- musb_host_cleanup(musb);
- musb_gadget_cleanup(musb);
-
- spin_lock_irqsave(&musb->lock, flags);
- musb_platform_disable(musb);
- musb_generic_disable(musb);
- spin_unlock_irqrestore(&musb->lock, flags);
-
- musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
- musb_platform_exit(musb);
-
- pm_runtime_put(musb->controller);
- /* FIXME power down */
-}
-
-
/*-------------------------------------------------------------------------*/
/*
@@ -1702,7 +1679,7 @@ EXPORT_SYMBOL_GPL(musb_dma_completion);
#define use_dma 0
#endif
-static void (*musb_phy_callback)(enum musb_vbus_id_status status);
+static int (*musb_phy_callback)(enum musb_vbus_id_status status);
/*
* musb_mailbox - optional phy notifier function
@@ -1711,11 +1688,12 @@ static void (*musb_phy_callback)(enum musb_vbus_id_status status);
* Optionally gets called from the USB PHY. Note that the USB PHY must be
* disabled at the point the phy_callback is registered or unregistered.
*/
-void musb_mailbox(enum musb_vbus_id_status status)
+int musb_mailbox(enum musb_vbus_id_status status)
{
if (musb_phy_callback)
- musb_phy_callback(status);
+ return musb_phy_callback(status);
+ return -ENODEV;
};
EXPORT_SYMBOL_GPL(musb_mailbox);
@@ -2028,11 +2006,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
musb_readl = musb_default_readl;
musb_writel = musb_default_writel;
- /* We need musb_read/write functions initialized for PM */
- pm_runtime_use_autosuspend(musb->controller);
- pm_runtime_set_autosuspend_delay(musb->controller, 200);
- pm_runtime_enable(musb->controller);
-
/* The musb_platform_init() call:
* - adjusts musb->mregs
* - sets the musb->isr
@@ -2134,6 +2107,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (musb->ops->phy_callback)
musb_phy_callback = musb->ops->phy_callback;
+ /*
+ * We need musb_read/write functions initialized for PM.
+ * Note that at least 2430 glue needs autosuspend delay
+ * somewhere above 300 ms for the hardware to idle properly
+ * after disconnecting the cable in host mode. Let's use
+ * 500 ms for some margin.
+ */
+ pm_runtime_use_autosuspend(musb->controller);
+ pm_runtime_set_autosuspend_delay(musb->controller, 500);
+ pm_runtime_enable(musb->controller);
pm_runtime_get_sync(musb->controller);
status = usb_phy_init(musb->xceiv);
@@ -2237,13 +2220,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status)
goto fail5;
- pm_runtime_put(musb->controller);
-
- /*
- * For why this is currently needed, see commit 3e43a0725637
- * ("usb: musb: core: add pm_runtime_irq_safe()")
- */
- pm_runtime_irq_safe(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
@@ -2265,7 +2243,9 @@ fail2_5:
usb_phy_shutdown(musb->xceiv);
err_usb_phy_init:
+ pm_runtime_dont_use_autosuspend(musb->controller);
pm_runtime_put_sync(musb->controller);
+ pm_runtime_disable(musb->controller);
fail2:
if (musb->irq_wake)
@@ -2273,7 +2253,6 @@ fail2:
musb_platform_exit(musb);
fail1:
- pm_runtime_disable(musb->controller);
dev_err(musb->controller,
"musb_init_controller failed with status %d\n", status);
@@ -2312,6 +2291,7 @@ static int musb_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
/* this gets called on rmmod.
* - Host mode: host may still be active
@@ -2319,17 +2299,26 @@ static int musb_remove(struct platform_device *pdev)
* - OTG mode: both roles are deactivated (or never-activated)
*/
musb_exit_debugfs(musb);
- musb_shutdown(pdev);
- musb_phy_callback = NULL;
-
- if (musb->dma_controller)
- musb_dma_controller_destroy(musb->dma_controller);
-
- usb_phy_shutdown(musb->xceiv);
cancel_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
+ pm_runtime_get_sync(musb->controller);
+ musb_host_cleanup(musb);
+ musb_gadget_cleanup(musb);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+ pm_runtime_dont_use_autosuspend(musb->controller);
+ pm_runtime_put_sync(musb->controller);
+ pm_runtime_disable(musb->controller);
+ musb_platform_exit(musb);
+ musb_phy_callback = NULL;
+ if (musb->dma_controller)
+ musb_dma_controller_destroy(musb->dma_controller);
+ usb_phy_shutdown(musb->xceiv);
musb_free(musb);
device_init_wakeup(dev, 0);
return 0;
@@ -2613,7 +2602,6 @@ static struct platform_driver musb_driver = {
},
.probe = musb_probe,
.remove = musb_remove,
- .shutdown = musb_shutdown,
};
module_platform_driver(musb_driver);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index b6afe9e43..b55a776b0 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -215,7 +215,7 @@ struct musb_platform_ops {
dma_addr_t *dma_addr, u32 *len);
void (*pre_root_reset_end)(struct musb *musb);
void (*post_root_reset_end)(struct musb *musb);
- void (*phy_callback)(enum musb_vbus_id_status status);
+ int (*phy_callback)(enum musb_vbus_id_status status);
};
/*
@@ -312,6 +312,7 @@ struct musb {
struct work_struct irq_work;
struct delayed_work deassert_reset_work;
struct delayed_work finish_resume_work;
+ struct delayed_work gadget_work;
u16 hwvers;
u16 intrrxe;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 152865b36..af2a3a7ad 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1656,6 +1656,20 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
return usb_phy_set_power(musb->xceiv, mA);
}
+static void musb_gadget_work(struct work_struct *work)
+{
+ struct musb *musb;
+ unsigned long flags;
+
+ musb = container_of(work, struct musb, gadget_work.work);
+ pm_runtime_get_sync(musb->controller);
+ spin_lock_irqsave(&musb->lock, flags);
+ musb_pullup(musb, musb->softconnect);
+ spin_unlock_irqrestore(&musb->lock, flags);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+}
+
static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
{
struct musb *musb = gadget_to_musb(gadget);
@@ -1663,20 +1677,16 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
is_on = !!is_on;
- pm_runtime_get_sync(musb->controller);
-
/* NOTE: this assumes we are sensing vbus; we'd rather
* not pullup unless the B-session is active.
*/
spin_lock_irqsave(&musb->lock, flags);
if (is_on != musb->softconnect) {
musb->softconnect = is_on;
- musb_pullup(musb, is_on);
+ schedule_delayed_work(&musb->gadget_work, 0);
}
spin_unlock_irqrestore(&musb->lock, flags);
- pm_runtime_put(musb->controller);
-
return 0;
}
@@ -1845,7 +1855,7 @@ int musb_gadget_setup(struct musb *musb)
#elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
musb->g.is_otg = 0;
#endif
-
+ INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
musb_g_init_endpoints(musb);
musb->is_active = 0;
@@ -1866,6 +1876,8 @@ void musb_gadget_cleanup(struct musb *musb)
{
if (musb->port_mode == MUSB_PORT_MODE_HOST)
return;
+
+ cancel_delayed_work_sync(&musb->gadget_work);
usb_del_gadget_udc(&musb->g);
}
@@ -1914,8 +1926,8 @@ static int musb_gadget_start(struct usb_gadget *g,
if (musb->xceiv->last_event == USB_EVENT_ID)
musb_platform_set_vbus(musb, 1);
- if (musb->xceiv->last_event == USB_EVENT_NONE)
- pm_runtime_put(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
@@ -1934,8 +1946,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
struct musb *musb = gadget_to_musb(g);
unsigned long flags;
- if (musb->xceiv->last_event == USB_EVENT_NONE)
- pm_runtime_get_sync(musb->controller);
+ pm_runtime_get_sync(musb->controller);
/*
* REVISIT always use otg_set_peripheral() here too;
@@ -1963,7 +1974,8 @@ static int musb_gadget_stop(struct usb_gadget *g)
* that currently misbehaves.
*/
- pm_runtime_put(musb->controller);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8ff032285..d227a71d8 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -434,7 +434,13 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
}
}
- if (qh != NULL && qh->is_ready) {
+ /*
+ * The pipe must be broken if current urb->status is set, so don't
+ * start next urb.
+ * TODO: to minimize the risk of regression, only check urb->status
+ * for RX, until we have a test case to understand the behavior of TX.
+ */
+ if ((!status || !is_in) && qh && qh->is_ready) {
dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
@@ -626,7 +632,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
ep->rx_reinit = 0;
}
-static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
+static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
struct urb *urb, u32 offset,
u32 *length, u8 *mode)
@@ -663,23 +669,18 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
}
channel->desired_mode = *mode;
musb_writew(epio, MUSB_TXCSR, csr);
-
- return 0;
}
-static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
- struct musb_hw_ep *hw_ep,
- struct musb_qh *qh,
- struct urb *urb,
- u32 offset,
- u32 *length,
- u8 *mode)
+static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
+ struct musb_hw_ep *hw_ep,
+ struct musb_qh *qh,
+ struct urb *urb,
+ u32 offset,
+ u32 *length,
+ u8 *mode)
{
struct dma_channel *channel = hw_ep->tx_channel;
- if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
- return -ENODEV;
-
channel->actual_len = 0;
/*
@@ -687,8 +688,6 @@ static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
* to identify the zero-length-final-packet case.
*/
*mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
-
- return 0;
}
static bool musb_tx_dma_program(struct dma_controller *dma,
@@ -698,15 +697,14 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
struct dma_channel *channel = hw_ep->tx_channel;
u16 pkt_size = qh->maxpacket;
u8 mode;
- int res;
if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
- res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb,
- offset, &length, &mode);
+ musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
+ &length, &mode);
+ else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
+ musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
+ &length, &mode);
else
- res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
- offset, &length, &mode);
- if (res)
return false;
qh->segsize = length;
@@ -1875,6 +1873,9 @@ void musb_host_rx(struct musb *musb, u8 epnum)
status = -EPROTO;
musb_writeb(epio, MUSB_RXINTERVAL, 0);
+ rx_csr &= ~MUSB_RXCSR_H_ERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
if (USB_ENDPOINT_XFER_ISOC != qh->type) {
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index c84e0322c..0b4cec940 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -49,97 +49,14 @@ struct omap2430_glue {
enum musb_vbus_id_status status;
struct work_struct omap_musb_mailbox_work;
struct device *control_otghs;
+ bool cable_connected;
+ bool enabled;
+ bool powered;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
static struct omap2430_glue *_glue;
-static struct timer_list musb_idle_timer;
-
-static void musb_do_idle(unsigned long _musb)
-{
- struct musb *musb = (void *)_musb;
- unsigned long flags;
- u8 power;
- u8 devctl;
-
- spin_lock_irqsave(&musb->lock, flags);
-
- switch (musb->xceiv->otg->state) {
- case OTG_STATE_A_WAIT_BCON:
-
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- if (devctl & MUSB_DEVCTL_BDEVICE) {
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- MUSB_DEV_MODE(musb);
- } else {
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
- MUSB_HST_MODE(musb);
- }
- break;
- case OTG_STATE_A_SUSPEND:
- /* finish RESUME signaling? */
- if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
- power = musb_readb(musb->mregs, MUSB_POWER);
- power &= ~MUSB_POWER_RESUME;
- dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
- musb_writeb(musb->mregs, MUSB_POWER, power);
- musb->is_active = 1;
- musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
- | MUSB_PORT_STAT_RESUME);
- musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
- usb_hcd_poll_rh_status(musb->hcd);
- /* NOTE: it might really be A_WAIT_BCON ... */
- musb->xceiv->otg->state = OTG_STATE_A_HOST;
- }
- break;
- case OTG_STATE_A_HOST:
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- if (devctl & MUSB_DEVCTL_BDEVICE)
- musb->xceiv->otg->state = OTG_STATE_B_IDLE;
- else
- musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
- default:
- break;
- }
- spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-
-static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
-{
- unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
- static unsigned long last_timer;
-
- if (timeout == 0)
- timeout = default_timeout;
-
- /* Never idle if active, or when VBUS timeout is not set as host */
- if (musb->is_active || ((musb->a_wait_bcon == 0)
- && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
- dev_dbg(musb->controller, "%s active, deleting timer\n",
- usb_otg_state_string(musb->xceiv->otg->state));
- del_timer(&musb_idle_timer);
- last_timer = jiffies;
- return;
- }
-
- if (time_after(last_timer, timeout)) {
- if (!timer_pending(&musb_idle_timer))
- last_timer = timeout;
- else {
- dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
- return;
- }
- }
- last_timer = timeout;
-
- dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
- usb_otg_state_string(musb->xceiv->otg->state),
- (unsigned long)jiffies_to_msecs(timeout - jiffies));
- mod_timer(&musb_idle_timer, timeout);
-}
-
static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
{
struct usb_otg *otg = musb->xceiv->otg;
@@ -205,16 +122,6 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
musb_readb(musb->mregs, MUSB_DEVCTL));
}
-static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
-{
- u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
- return 0;
-}
-
static inline void omap2430_low_level_exit(struct musb *musb)
{
u32 l;
@@ -234,22 +141,63 @@ static inline void omap2430_low_level_init(struct musb *musb)
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
}
-static void omap2430_musb_mailbox(enum musb_vbus_id_status status)
+/*
+ * We can get multiple cable events so we need to keep track
+ * of the power state. Only keep power enabled if USB cable is
+ * connected and a gadget is started.
+ */
+static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
+{
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+ bool power_up;
+ int res;
+
+ if (glue->enabled != enabled)
+ glue->enabled = enabled;
+
+ if (glue->cable_connected != cable)
+ glue->cable_connected = cable;
+
+ power_up = glue->enabled && glue->cable_connected;
+ if (power_up == glue->powered) {
+ dev_warn(musb->controller, "power state already %i\n",
+ power_up);
+ return;
+ }
+
+ glue->powered = power_up;
+
+ if (power_up) {
+ res = pm_runtime_get_sync(musb->controller);
+ if (res < 0) {
+ dev_err(musb->controller, "could not enable: %i", res);
+ glue->powered = false;
+ }
+ } else {
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
+ }
+}
+
+static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
{
struct omap2430_glue *glue = _glue;
if (!glue) {
pr_err("%s: musb core is not yet initialized\n", __func__);
- return;
+ return -EPROBE_DEFER;
}
glue->status = status;
if (!glue_to_musb(glue)) {
pr_err("%s: musb core is not yet ready\n", __func__);
- return;
+ return -EPROBE_DEFER;
}
schedule_work(&glue->omap_musb_mailbox_work);
+
+ return 0;
}
static void omap_musb_set_mailbox(struct omap2430_glue *glue)
@@ -259,6 +207,13 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
struct usb_otg *otg = musb->xceiv->otg;
+ bool cable_connected;
+
+ cable_connected = ((glue->status == MUSB_ID_GROUND) ||
+ (glue->status == MUSB_VBUS_VALID));
+
+ if (cable_connected)
+ omap2430_set_power(musb, glue->enabled, cable_connected);
switch (glue->status) {
case MUSB_ID_GROUND:
@@ -268,7 +223,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
musb->xceiv->last_event = USB_EVENT_ID;
if (musb->gadget_driver) {
- pm_runtime_get_sync(dev);
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_HOST);
omap2430_musb_set_vbus(musb, 1);
@@ -281,8 +235,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
otg->default_a = false;
musb->xceiv->otg->state = OTG_STATE_B_IDLE;
musb->xceiv->last_event = USB_EVENT_VBUS;
- if (musb->gadget_driver)
- pm_runtime_get_sync(dev);
omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
break;
@@ -291,11 +243,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
dev_dbg(dev, "VBUS Disconnect\n");
musb->xceiv->last_event = USB_EVENT_NONE;
- if (musb->gadget_driver) {
+ if (musb->gadget_driver)
omap2430_musb_set_vbus(musb, 0);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
if (data->interface_type == MUSB_INTERFACE_UTMI)
otg_set_vbus(musb->xceiv->otg, 0);
@@ -307,6 +256,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
dev_dbg(dev, "ID float\n");
}
+ if (!cable_connected)
+ omap2430_set_power(musb, glue->enabled, cable_connected);
+
atomic_notifier_call_chain(&musb->xceiv->notifier,
musb->xceiv->last_event, NULL);
}
@@ -316,13 +268,8 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
{
struct omap2430_glue *glue = container_of(mailbox_work,
struct omap2430_glue, omap_musb_mailbox_work);
- struct musb *musb = glue_to_musb(glue);
- struct device *dev = musb->controller;
- pm_runtime_get_sync(dev);
omap_musb_set_mailbox(glue);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
}
static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -389,23 +336,7 @@ static int omap2430_musb_init(struct musb *musb)
return PTR_ERR(musb->phy);
}
musb->isr = omap2430_musb_interrupt;
-
- /*
- * Enable runtime PM for musb parent (this driver). We can't
- * do it earlier as struct musb is not yet allocated and we
- * need to touch the musb registers for runtime PM.
- */
- pm_runtime_enable(glue->dev);
- status = pm_runtime_get_sync(glue->dev);
- if (status < 0)
- goto err1;
-
- status = pm_runtime_get_sync(dev);
- if (status < 0) {
- dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
- pm_runtime_put_sync(glue->dev);
- goto err1;
- }
+ phy_init(musb->phy);
l = musb_readl(musb->mregs, OTG_INTERFSEL);
@@ -427,20 +358,10 @@ static int omap2430_musb_init(struct musb *musb)
musb_readl(musb->mregs, OTG_INTERFSEL),
musb_readl(musb->mregs, OTG_SIMENABLE));
- setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
-
if (glue->status != MUSB_UNKNOWN)
omap_musb_set_mailbox(glue);
- phy_init(musb->phy);
- phy_power_on(musb->phy);
-
- pm_runtime_put_noidle(musb->controller);
- pm_runtime_put_noidle(glue->dev);
return 0;
-
-err1:
- return status;
}
static void omap2430_musb_enable(struct musb *musb)
@@ -452,6 +373,11 @@ static void omap2430_musb_enable(struct musb *musb)
struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
+ if (!WARN_ON(!musb->phy))
+ phy_power_on(musb->phy);
+
+ omap2430_set_power(musb, true, glue->cable_connected);
+
switch (glue->status) {
case MUSB_ID_GROUND:
@@ -487,18 +413,25 @@ static void omap2430_musb_disable(struct musb *musb)
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+ if (!WARN_ON(!musb->phy))
+ phy_power_off(musb->phy);
+
if (glue->status != MUSB_UNKNOWN)
omap_control_usb_set_mode(glue->control_otghs,
USB_MODE_DISCONNECT);
+
+ omap2430_set_power(musb, false, glue->cable_connected);
}
static int omap2430_musb_exit(struct musb *musb)
{
- del_timer_sync(&musb_idle_timer);
+ struct device *dev = musb->controller;
+ struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
omap2430_low_level_exit(musb);
- phy_power_off(musb->phy);
phy_exit(musb->phy);
+ musb->phy = NULL;
+ cancel_work_sync(&glue->omap_musb_mailbox_work);
return 0;
}
@@ -512,9 +445,6 @@ static const struct musb_platform_ops omap2430_ops = {
.init = omap2430_musb_init,
.exit = omap2430_musb_exit,
- .set_mode = omap2430_musb_set_mode,
- .try_idle = omap2430_musb_try_idle,
-
.set_vbus = omap2430_musb_set_vbus,
.enable = omap2430_musb_enable,
@@ -639,11 +569,9 @@ static int omap2430_probe(struct platform_device *pdev)
goto err2;
}
- /*
- * Note that we cannot enable PM runtime yet for this
- * driver as we need struct musb initialized first.
- * See omap2430_musb_init above.
- */
+ pm_runtime_enable(glue->dev);
+ pm_runtime_use_autosuspend(glue->dev);
+ pm_runtime_set_autosuspend_delay(glue->dev, 500);
ret = platform_device_add(musb);
if (ret) {
@@ -662,12 +590,14 @@ err0:
static int omap2430_remove(struct platform_device *pdev)
{
- struct omap2430_glue *glue = platform_get_drvdata(pdev);
+ struct omap2430_glue *glue = platform_get_drvdata(pdev);
+ struct musb *musb = glue_to_musb(glue);
pm_runtime_get_sync(glue->dev);
- cancel_work_sync(&glue->omap_musb_mailbox_work);
platform_device_unregister(glue->musb);
+ omap2430_set_power(musb, false, false);
pm_runtime_put_sync(glue->dev);
+ pm_runtime_dont_use_autosuspend(glue->dev);
pm_runtime_disable(glue->dev);
return 0;
@@ -680,12 +610,13 @@ static int omap2430_runtime_suspend(struct device *dev)
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
- if (musb) {
- musb->context.otg_interfsel = musb_readl(musb->mregs,
- OTG_INTERFSEL);
+ if (!musb)
+ return 0;
- omap2430_low_level_exit(musb);
- }
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
+
+ omap2430_low_level_exit(musb);
return 0;
}
@@ -696,7 +627,7 @@ static int omap2430_runtime_resume(struct device *dev)
struct musb *musb = glue_to_musb(glue);
if (!musb)
- return -EPROBE_DEFER;
+ return 0;
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -738,18 +669,8 @@ static struct platform_driver omap2430_driver = {
},
};
+module_platform_driver(omap2430_driver);
+
MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("GPL v2");
-
-static int __init omap2430_init(void)
-{
- return platform_driver_register(&omap2430_driver);
-}
-subsys_initcall(omap2430_init);
-
-static void __exit omap2430_exit(void)
-{
- platform_driver_unregister(&omap2430_driver);
-}
-module_exit(omap2430_exit);
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index fdab4232c..76500515d 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -80,7 +80,8 @@ static struct musb *sunxi_musb;
struct sunxi_glue {
struct device *dev;
- struct platform_device *musb;
+ struct musb *musb;
+ struct platform_device *musb_pdev;
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
@@ -102,7 +103,7 @@ static void sunxi_musb_work(struct work_struct *work)
return;
if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
- struct musb *musb = platform_get_drvdata(glue->musb);
+ struct musb *musb = glue->musb;
unsigned long flags;
u8 devctl;
@@ -112,7 +113,7 @@ static void sunxi_musb_work(struct work_struct *work)
if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
musb->xceiv->otg->default_a = 1;
- musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
MUSB_HST_MODE(musb);
devctl |= MUSB_DEVCTL_SESSION;
} else {
@@ -145,10 +146,12 @@ static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
- if (is_on)
+ if (is_on) {
set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
- else
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ } else {
clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ }
schedule_work(&glue->work);
}
@@ -264,15 +267,6 @@ static int sunxi_musb_init(struct musb *musb)
if (ret)
goto error_unregister_notifier;
- if (musb->port_mode == MUSB_PORT_MODE_HOST) {
- ret = phy_power_on(glue->phy);
- if (ret)
- goto error_phy_exit;
- set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
- /* Stop musb work from turning vbus off again */
- set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
- }
-
musb->isr = sunxi_musb_interrupt;
/* Stop the musb-core from doing runtime pm (not supported on sunxi) */
@@ -280,8 +274,6 @@ static int sunxi_musb_init(struct musb *musb)
return 0;
-error_phy_exit:
- phy_exit(glue->phy);
error_unregister_notifier:
if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
@@ -323,10 +315,31 @@ static int sunxi_musb_exit(struct musb *musb)
return 0;
}
+static int sunxi_set_mode(struct musb *musb, u8 mode)
+{
+ struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ int ret;
+
+ if (mode == MUSB_HOST) {
+ ret = phy_power_on(glue->phy);
+ if (ret)
+ return ret;
+
+ set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+ /* Stop musb work from turning vbus off again */
+ set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ }
+
+ return 0;
+}
+
static void sunxi_musb_enable(struct musb *musb)
{
struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+ glue->musb = musb;
+
/* musb_core does not call us in a balanced manner */
if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
return;
@@ -569,6 +582,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
.exit = sunxi_musb_exit,
.enable = sunxi_musb_enable,
.disable = sunxi_musb_disable,
+ .set_mode = sunxi_set_mode,
.fifo_offset = sunxi_musb_fifo_offset,
.ep_offset = sunxi_musb_ep_offset,
.busctl_offset = sunxi_musb_busctl_offset,
@@ -721,9 +735,9 @@ static int sunxi_musb_probe(struct platform_device *pdev)
pinfo.data = &pdata;
pinfo.size_data = sizeof(pdata);
- glue->musb = platform_device_register_full(&pinfo);
- if (IS_ERR(glue->musb)) {
- ret = PTR_ERR(glue->musb);
+ glue->musb_pdev = platform_device_register_full(&pinfo);
+ if (IS_ERR(glue->musb_pdev)) {
+ ret = PTR_ERR(glue->musb_pdev);
dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
goto err_unregister_usb_phy;
}
@@ -740,7 +754,7 @@ static int sunxi_musb_remove(struct platform_device *pdev)
struct sunxi_glue *glue = platform_get_drvdata(pdev);
struct platform_device *usb_phy = glue->usb_phy;
- platform_device_unregister(glue->musb); /* Frees glue ! */
+ platform_device_unregister(glue->musb_pdev);
usb_phy_generic_unregister(usb_phy);
return 0;
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
index 3d7af85ae..d8593adb3 100644
--- a/drivers/usb/phy/phy-qcom-8x16-usb.c
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -240,10 +240,7 @@ static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
qphy->switch_gpio = devm_gpiod_get_optional(dev, "switch",
GPIOD_OUT_LOW);
- if (IS_ERR(qphy->switch_gpio))
- return PTR_ERR(qphy->switch_gpio);
-
- return 0;
+ return PTR_ERR_OR_ZERO(qphy->switch_gpio);
}
static int phy_8x16_reboot_notify(struct notifier_block *this,
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 014dbbd72..a72e8d670 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -97,6 +97,9 @@ struct twl6030_usb {
struct regulator *usb3v3;
+ /* used to check initial cable status after probe */
+ struct delayed_work get_status_work;
+
/* used to set vbus, in atomic path */
struct work_struct set_vbus_work;
@@ -155,13 +158,13 @@ static int twl6030_start_srp(struct phy_companion *comparator)
static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
{
/* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
- twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
+ twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x1, TWL6030_BACKUP_REG);
/* Program CFG_LDO_PD2 register and set VUSB bit */
- twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_CFG_LDO_PD2);
+ twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x1, TWL6030_CFG_LDO_PD2);
/* Program MISC2 register and set bit VUSB_IN_VBAT */
- twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
+ twl6030_writeb(twl, TWL6030_MODULE_ID0, 0x10, TWL6030_MISC2);
twl->usb3v3 = regulator_get(twl->dev, twl->regulator);
if (IS_ERR(twl->usb3v3))
@@ -227,12 +230,16 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
twl->asleep = 1;
status = MUSB_VBUS_VALID;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
} else {
if (twl->linkstat != MUSB_UNKNOWN) {
status = MUSB_VBUS_OFF;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
if (twl->asleep) {
regulator_disable(twl->usb3v3);
twl->asleep = 0;
@@ -264,7 +271,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
status = MUSB_ID_GROUND;
twl->linkstat = status;
- musb_mailbox(status);
+ ret = musb_mailbox(status);
+ if (ret)
+ twl->linkstat = MUSB_UNKNOWN;
} else {
twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -274,6 +283,15 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
return IRQ_HANDLED;
}
+static void twl6030_status_work(struct work_struct *work)
+{
+ struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
+ get_status_work.work);
+
+ twl6030_usb_irq(twl->irq2, twl);
+ twl6030_usbotg_irq(twl->irq1, twl);
+}
+
static int twl6030_enable_irq(struct twl6030_usb *twl)
{
twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -284,8 +302,6 @@ static int twl6030_enable_irq(struct twl6030_usb *twl)
REG_INT_MSK_LINE_C);
twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
REG_INT_MSK_STS_C);
- twl6030_usb_irq(twl->irq2, twl);
- twl6030_usbotg_irq(twl->irq1, twl);
return 0;
}
@@ -301,10 +317,10 @@ static void otg_set_vbus_work(struct work_struct *data)
*/
if (twl->vbus_enable)
- twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x40,
+ twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE, 0x40,
CHARGERUSB_CTRL1);
else
- twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE , 0x00,
+ twl6030_writeb(twl, TWL_MODULE_MAIN_CHARGE, 0x00,
CHARGERUSB_CTRL1);
}
@@ -371,6 +387,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "could not create sysfs file\n");
INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
+ INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -395,6 +412,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
twl->asleep = 0;
twl6030_enable_irq(twl);
+ schedule_delayed_work(&twl->get_status_work, HZ);
dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
return 0;
@@ -404,6 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
{
struct twl6030_usb *twl = platform_get_drvdata(pdev);
+ cancel_delayed_work(&twl->get_status_work);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
REG_INT_MSK_LINE_C);
twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 000f97501..7be4e7d57 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -799,8 +799,10 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
+ struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
+ struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
- return info->dma_map_ctrl(pkt, map);
+ return info->dma_map_ctrl(chan->device->dev, pkt, map);
}
static void usbhsf_dma_complete(void *arg);
@@ -881,12 +883,12 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
if (!fifo)
goto usbhsf_pio_prepare_push;
- if (usbhsf_dma_map(pkt) < 0)
- goto usbhsf_pio_prepare_push;
-
ret = usbhsf_fifo_select(pipe, fifo, 0);
if (ret < 0)
- goto usbhsf_pio_prepare_push_unmap;
+ goto usbhsf_pio_prepare_push;
+
+ if (usbhsf_dma_map(pkt) < 0)
+ goto usbhsf_pio_prepare_push_unselect;
pkt->trans = len;
@@ -896,8 +898,8 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
return 0;
-usbhsf_pio_prepare_push_unmap:
- usbhsf_dma_unmap(pkt);
+usbhsf_pio_prepare_push_unselect:
+ usbhsf_fifo_unselect(pipe, fifo);
usbhsf_pio_prepare_push:
/*
* change handler to PIO
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 53d104b56..30345c2d0 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -191,13 +191,12 @@ static void usbhsg_queue_push(struct usbhsg_uep *uep,
/*
* dma map/unmap
*/
-static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
+static int usbhsg_dma_map_ctrl(struct device *dma_dev, struct usbhs_pkt *pkt,
+ int map)
{
struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
struct usb_request *req = &ureq->req;
struct usbhs_pipe *pipe = pkt->pipe;
- struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
- struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
enum dma_data_direction dir;
int ret = 0;
@@ -207,13 +206,13 @@ static int usbhsg_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
/* it can not use scatter/gather */
WARN_ON(req->num_sgs);
- ret = usb_gadget_map_request(&gpriv->gadget, req, dir);
+ ret = usb_gadget_map_request_by_dev(dma_dev, req, dir);
if (ret < 0)
return ret;
pkt->dma = req->dma;
} else {
- usb_gadget_unmap_request(&gpriv->gadget, req, dir);
+ usb_gadget_unmap_request_by_dev(dma_dev, req, dir);
}
return ret;
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 1a8e4c45c..3bf0b72eb 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -929,7 +929,8 @@ static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
/*
* dma map functions
*/
-static int usbhsh_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
+static int usbhsh_dma_map_ctrl(struct device *dma_dev, struct usbhs_pkt *pkt,
+ int map)
{
if (map) {
struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index 78e9dba70..c238772b9 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -391,9 +391,8 @@ void usbhs_pipe_set_trans_count_if_bulk(struct usbhs_pipe *pipe, int len)
/*
* pipe setup
*/
-static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
- int is_host,
- int dir_in)
+static int usbhsp_setup_pipecfg(struct usbhs_pipe *pipe, int is_host,
+ int dir_in, u16 *pipecfg)
{
u16 type = 0;
u16 bfre = 0;
@@ -451,14 +450,14 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
/* EPNUM */
epnum = 0; /* see usbhs_pipe_config_update() */
-
- return type |
- bfre |
- dblb |
- cntmd |
- dir |
- shtnak |
- epnum;
+ *pipecfg = type |
+ bfre |
+ dblb |
+ cntmd |
+ dir |
+ shtnak |
+ epnum;
+ return 0;
}
static u16 usbhsp_setup_pipebuff(struct usbhs_pipe *pipe)
@@ -655,7 +654,8 @@ static void usbhsp_put_pipe(struct usbhs_pipe *pipe)
}
void usbhs_pipe_init(struct usbhs_priv *priv,
- int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map))
+ int (*dma_map_ctrl)(struct device *dma_dev,
+ struct usbhs_pkt *pkt, int map))
{
struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
struct usbhs_pipe *pipe;
@@ -702,7 +702,11 @@ struct usbhs_pipe *usbhs_pipe_malloc(struct usbhs_priv *priv,
return NULL;
}
- pipecfg = usbhsp_setup_pipecfg(pipe, is_host, dir_in);
+ if (usbhsp_setup_pipecfg(pipe, is_host, dir_in, &pipecfg)) {
+ dev_err(dev, "can't setup pipe\n");
+ return NULL;
+ }
+
pipebuf = usbhsp_setup_pipebuff(pipe);
usbhsp_pipe_select(pipe);
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 7835747f9..95185fdb2 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -47,7 +47,8 @@ struct usbhs_pipe_info {
struct usbhs_pipe *pipe;
int size; /* array size of "pipe" */
- int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map);
+ int (*dma_map_ctrl)(struct device *dma_dev, struct usbhs_pkt *pkt,
+ int map);
};
/*
@@ -84,7 +85,8 @@ int usbhs_pipe_is_running(struct usbhs_pipe *pipe);
void usbhs_pipe_running(struct usbhs_pipe *pipe, int running);
void usbhs_pipe_init(struct usbhs_priv *priv,
- int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map));
+ int (*dma_map_ctrl)(struct device *dma_dev,
+ struct usbhs_pkt *pkt, int map));
int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe);
void usbhs_pipe_clear(struct usbhs_pipe *pipe);
int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index a66b01bb1..8967715fe 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -127,7 +127,7 @@ static int usb_console_setup(struct console *co, char *options)
info->port = port;
++port->port.count;
- if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
+ if (!tty_port_initialized(&port->port)) {
if (serial->type->set_termios) {
/*
* allocate a fake tty so the driver can initialize
@@ -168,7 +168,7 @@ static int usb_console_setup(struct console *co, char *options)
tty_port_tty_set(&port->port, NULL);
tty_kref_put(tty);
}
- set_bit(ASYNCB_INITIALIZED, &port->port.flags);
+ tty_port_set_initialized(&port->port, 1);
}
/* Now that any required fake tty operations are completed restore
* the tty port count */
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index a7c8d26a3..96a70789b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -331,6 +331,42 @@ struct cp210x_comm_status {
*/
#define PURGE_ALL 0x000f
+/* CP210X_GET_FLOW/CP210X_SET_FLOW read/write these 0x10 bytes */
+struct cp210x_flow_ctl {
+ __le32 ulControlHandshake;
+ __le32 ulFlowReplace;
+ __le32 ulXonLimit;
+ __le32 ulXoffLimit;
+} __packed;
+
+/* cp210x_flow_ctl::ulControlHandshake */
+#define CP210X_SERIAL_DTR_MASK GENMASK(1, 0)
+#define CP210X_SERIAL_DTR_SHIFT(_mode) (_mode)
+#define CP210X_SERIAL_CTS_HANDSHAKE BIT(3)
+#define CP210X_SERIAL_DSR_HANDSHAKE BIT(4)
+#define CP210X_SERIAL_DCD_HANDSHAKE BIT(5)
+#define CP210X_SERIAL_DSR_SENSITIVITY BIT(6)
+
+/* values for cp210x_flow_ctl::ulControlHandshake::CP210X_SERIAL_DTR_MASK */
+#define CP210X_SERIAL_DTR_INACTIVE 0
+#define CP210X_SERIAL_DTR_ACTIVE 1
+#define CP210X_SERIAL_DTR_FLOW_CTL 2
+
+/* cp210x_flow_ctl::ulFlowReplace */
+#define CP210X_SERIAL_AUTO_TRANSMIT BIT(0)
+#define CP210X_SERIAL_AUTO_RECEIVE BIT(1)
+#define CP210X_SERIAL_ERROR_CHAR BIT(2)
+#define CP210X_SERIAL_NULL_STRIPPING BIT(3)
+#define CP210X_SERIAL_BREAK_CHAR BIT(4)
+#define CP210X_SERIAL_RTS_MASK GENMASK(7, 6)
+#define CP210X_SERIAL_RTS_SHIFT(_mode) (_mode << 6)
+#define CP210X_SERIAL_XOFF_CONTINUE BIT(31)
+
+/* values for cp210x_flow_ctl::ulFlowReplace::CP210X_SERIAL_RTS_MASK */
+#define CP210X_SERIAL_RTS_INACTIVE 0
+#define CP210X_SERIAL_RTS_ACTIVE 1
+#define CP210X_SERIAL_RTS_FLOW_CTL 2
+
/*
* Reads a variable-sized block of CP210X_ registers, identified by req.
* Returns data into buf in native USB byte order.
@@ -698,9 +734,10 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
{
struct device *dev = &port->dev;
unsigned int cflag;
- u8 modem_ctl[16];
+ struct cp210x_flow_ctl flow_ctl;
u32 baud;
u16 bits;
+ u32 ctl_hs;
cp210x_read_u32_reg(port, CP210X_GET_BAUDRATE, &baud);
@@ -796,9 +833,10 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
break;
}
- cp210x_read_reg_block(port, CP210X_GET_FLOW, modem_ctl,
- sizeof(modem_ctl));
- if (modem_ctl[0] & 0x08) {
+ cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl,
+ sizeof(flow_ctl));
+ ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
+ if (ctl_hs & CP210X_SERIAL_CTS_HANDSHAKE) {
dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__);
cflag |= CRTSCTS;
} else {
@@ -867,7 +905,6 @@ static void cp210x_set_termios(struct tty_struct *tty,
struct device *dev = &port->dev;
unsigned int cflag, old_cflag;
u16 bits;
- u8 modem_ctl[16];
cflag = tty->termios.c_cflag;
old_cflag = old_termios->c_cflag;
@@ -951,34 +988,44 @@ static void cp210x_set_termios(struct tty_struct *tty,
}
if ((cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
-
- /* Only bytes 0, 4 and 7 out of first 8 have functional bits */
-
- cp210x_read_reg_block(port, CP210X_GET_FLOW, modem_ctl,
- sizeof(modem_ctl));
- dev_dbg(dev, "%s - read modem controls = %02x .. .. .. %02x .. .. %02x\n",
- __func__, modem_ctl[0], modem_ctl[4], modem_ctl[7]);
-
+ struct cp210x_flow_ctl flow_ctl;
+ u32 ctl_hs;
+ u32 flow_repl;
+
+ cp210x_read_reg_block(port, CP210X_GET_FLOW, &flow_ctl,
+ sizeof(flow_ctl));
+ ctl_hs = le32_to_cpu(flow_ctl.ulControlHandshake);
+ flow_repl = le32_to_cpu(flow_ctl.ulFlowReplace);
+ dev_dbg(dev, "%s - read ulControlHandshake=0x%08x, ulFlowReplace=0x%08x\n",
+ __func__, ctl_hs, flow_repl);
+
+ ctl_hs &= ~CP210X_SERIAL_DSR_HANDSHAKE;
+ ctl_hs &= ~CP210X_SERIAL_DCD_HANDSHAKE;
+ ctl_hs &= ~CP210X_SERIAL_DSR_SENSITIVITY;
+ ctl_hs &= ~CP210X_SERIAL_DTR_MASK;
+ ctl_hs |= CP210X_SERIAL_DTR_SHIFT(CP210X_SERIAL_DTR_ACTIVE);
if (cflag & CRTSCTS) {
- modem_ctl[0] &= ~0x7B;
- modem_ctl[0] |= 0x09;
- modem_ctl[4] = 0x80;
- /* FIXME - why clear reserved bits just read? */
- modem_ctl[5] = 0;
- modem_ctl[6] = 0;
- modem_ctl[7] = 0;
+ ctl_hs |= CP210X_SERIAL_CTS_HANDSHAKE;
+
+ flow_repl &= ~CP210X_SERIAL_RTS_MASK;
+ flow_repl |= CP210X_SERIAL_RTS_SHIFT(
+ CP210X_SERIAL_RTS_FLOW_CTL);
dev_dbg(dev, "%s - flow control = CRTSCTS\n", __func__);
} else {
- modem_ctl[0] &= ~0x7B;
- modem_ctl[0] |= 0x01;
- modem_ctl[4] = 0x40;
+ ctl_hs &= ~CP210X_SERIAL_CTS_HANDSHAKE;
+
+ flow_repl &= ~CP210X_SERIAL_RTS_MASK;
+ flow_repl |= CP210X_SERIAL_RTS_SHIFT(
+ CP210X_SERIAL_RTS_ACTIVE);
dev_dbg(dev, "%s - flow control = NONE\n", __func__);
}
- dev_dbg(dev, "%s - write modem controls = %02x .. .. .. %02x .. .. %02x\n",
- __func__, modem_ctl[0], modem_ctl[4], modem_ctl[7]);
- cp210x_write_reg_block(port, CP210X_SET_FLOW, modem_ctl,
- sizeof(modem_ctl));
+ dev_dbg(dev, "%s - write ulControlHandshake=0x%08x, ulFlowReplace=0x%08x\n",
+ __func__, ctl_hs, flow_repl);
+ flow_ctl.ulControlHandshake = cpu_to_le32(ctl_hs);
+ flow_ctl.ulFlowReplace = cpu_to_le32(flow_repl);
+ cp210x_write_reg_block(port, CP210X_SET_FLOW, &flow_ctl,
+ sizeof(flow_ctl));
}
}
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 16e8e37b3..6a1df9e82 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -699,8 +699,7 @@ static void digi_set_termios(struct tty_struct *tty,
/* don't set RTS if using hardware flow control */
/* and throttling input */
modem_signals = TIOCM_DTR;
- if (!C_CRTSCTS(tty) ||
- !test_bit(TTY_THROTTLED, &tty->flags))
+ if (!C_CRTSCTS(tty) || !tty_throttled(tty))
modem_signals |= TIOCM_RTS;
digi_set_modem_signals(port, modem_signals, 1);
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3a814e802..008208091 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -93,27 +93,27 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial);
static void ftdi_USB_UIRT_setup(struct ftdi_private *priv);
static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
-static struct ftdi_sio_quirk ftdi_jtag_quirk = {
+static const struct ftdi_sio_quirk ftdi_jtag_quirk = {
.probe = ftdi_jtag_probe,
};
-static struct ftdi_sio_quirk ftdi_NDI_device_quirk = {
+static const struct ftdi_sio_quirk ftdi_NDI_device_quirk = {
.probe = ftdi_NDI_device_setup,
};
-static struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
+static const struct ftdi_sio_quirk ftdi_USB_UIRT_quirk = {
.port_probe = ftdi_USB_UIRT_setup,
};
-static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
+static const struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
.port_probe = ftdi_HE_TIRA1_setup,
};
-static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+static const struct ftdi_sio_quirk ftdi_stmclite_quirk = {
.probe = ftdi_stmclite_probe,
};
-static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
+static const struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
.probe = ftdi_8u2232c_probe,
};
@@ -1775,7 +1775,7 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
static int ftdi_sio_probe(struct usb_serial *serial,
const struct usb_device_id *id)
{
- struct ftdi_sio_quirk *quirk =
+ const struct ftdi_sio_quirk *quirk =
(struct ftdi_sio_quirk *)id->driver_info;
if (quirk && quirk->probe) {
@@ -1792,7 +1792,7 @@ static int ftdi_sio_probe(struct usb_serial *serial,
static int ftdi_sio_port_probe(struct usb_serial_port *port)
{
struct ftdi_private *priv;
- struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
+ const struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 54e170dd3..ae8c0365a 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -473,7 +473,7 @@ static bool usb_serial_generic_msr_changed(struct tty_struct *tty,
* Use tty-port initialised flag to detect all hangups including the
* one generated at USB-device disconnect.
*/
- if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (!tty_port_initialized(&port->port))
return true;
spin_lock_irqsave(&port->lock, flags);
@@ -503,7 +503,7 @@ int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg)
ret = wait_event_interruptible(port->port.delta_msr_wait,
usb_serial_generic_msr_changed(tty, arg, &cnow));
- if (!ret && !test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (!ret && !tty_port_initialized(&port->port))
ret = -EIO;
return ret;
@@ -606,7 +606,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
- if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (!tty_port_initialized(&port->port))
continue;
if (port->bulk_in_size) {
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index ffe7e4852..08dc5dd54 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -255,7 +255,7 @@ static int keyspan_write(struct tty_struct *tty,
return count;
}
- dev_dbg(&port->dev, "%s - endpoint %d flip %d\n",
+ dev_dbg(&port->dev, "%s - endpoint %x flip %d\n",
__func__, usb_pipeendpoint(this_urb->pipe), flip);
if (this_urb->status == -EINPROGRESS) {
@@ -300,7 +300,7 @@ static void usa26_indat_callback(struct urb *urb)
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x on endpoint %d.\n",
+ dev_dbg(&urb->dev->dev, "%s - nonzero status %d on endpoint %x\n",
__func__, status, endpoint);
return;
}
@@ -393,7 +393,8 @@ static void usa26_instat_callback(struct urb *urb)
serial = urb->context;
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x\n", __func__, status);
+ dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
+ __func__, status);
return;
}
if (urb->actual_length != 9) {
@@ -452,7 +453,7 @@ static void usa28_indat_callback(struct urb *urb)
do {
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x on endpoint %d.\n",
+ dev_dbg(&urb->dev->dev, "%s - nonzero status %d on endpoint %x\n",
__func__, status, usb_pipeendpoint(urb->pipe));
return;
}
@@ -511,7 +512,8 @@ static void usa28_instat_callback(struct urb *urb)
serial = urb->context;
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x\n", __func__, status);
+ dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
+ __func__, status);
return;
}
@@ -591,7 +593,8 @@ static void usa49_instat_callback(struct urb *urb)
serial = urb->context;
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x\n", __func__, status);
+ dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
+ __func__, status);
return;
}
@@ -646,7 +649,7 @@ static void usa49_indat_callback(struct urb *urb)
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x on endpoint %d.\n",
+ dev_dbg(&urb->dev->dev, "%s - nonzero status %d on endpoint %x\n",
__func__, status, endpoint);
return;
}
@@ -698,7 +701,8 @@ static void usa49wg_indat_callback(struct urb *urb)
serial = urb->context;
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x\n", __func__, status);
+ dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
+ __func__, status);
return;
}
@@ -774,8 +778,8 @@ static void usa90_indat_callback(struct urb *urb)
endpoint = usb_pipeendpoint(urb->pipe);
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x on endpoint %d.\n",
- __func__, status, endpoint);
+ dev_dbg(&urb->dev->dev, "%s - nonzero status %d on endpoint %x\n",
+ __func__, status, endpoint);
return;
}
@@ -847,7 +851,8 @@ static void usa90_instat_callback(struct urb *urb)
serial = urb->context;
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x\n", __func__, status);
+ dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
+ __func__, status);
return;
}
if (urb->actual_length < 14) {
@@ -912,7 +917,8 @@ static void usa67_instat_callback(struct urb *urb)
serial = urb->context;
if (status) {
- dev_dbg(&urb->dev->dev, "%s - nonzero status: %x\n", __func__, status);
+ dev_dbg(&urb->dev->dev, "%s - nonzero status: %d\n",
+ __func__, status);
return;
}
@@ -1082,12 +1088,6 @@ static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
return 0;
}
-static inline void stop_urb(struct urb *urb)
-{
- if (urb && urb->status == -EINPROGRESS)
- usb_kill_urb(urb);
-}
-
static void keyspan_dtr_rts(struct usb_serial_port *port, int on)
{
struct keyspan_port_private *p_priv = usb_get_serial_port_data(port);
@@ -1114,10 +1114,10 @@ static void keyspan_close(struct usb_serial_port *port)
p_priv->out_flip = 0;
p_priv->in_flip = 0;
- stop_urb(p_priv->inack_urb);
+ usb_kill_urb(p_priv->inack_urb);
for (i = 0; i < 2; i++) {
- stop_urb(p_priv->in_urbs[i]);
- stop_urb(p_priv->out_urbs[i]);
+ usb_kill_urb(p_priv->in_urbs[i]);
+ usb_kill_urb(p_priv->out_urbs[i]);
}
}
@@ -1221,8 +1221,8 @@ static struct usb_endpoint_descriptor const *find_ep(struct usb_serial const *se
if (ep->bEndpointAddress == endpoint)
return ep;
}
- dev_warn(&serial->interface->dev, "found no endpoint descriptor for "
- "endpoint %x\n", endpoint);
+ dev_warn(&serial->interface->dev, "found no endpoint descriptor for endpoint %x\n",
+ endpoint);
return NULL;
}
@@ -1237,7 +1237,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
if (endpoint == -1)
return NULL; /* endpoint not needed */
- dev_dbg(&serial->interface->dev, "%s - alloc for endpoint %d.\n", __func__, endpoint);
+ dev_dbg(&serial->interface->dev, "%s - alloc for endpoint %x\n",
+ __func__, endpoint);
urb = usb_alloc_urb(0, GFP_KERNEL); /* No ISO */
if (!urb)
return NULL;
@@ -1572,7 +1573,8 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
return -1;
}
- dev_dbg(&port->dev, "%s - endpoint %d\n", __func__, usb_pipeendpoint(this_urb->pipe));
+ dev_dbg(&port->dev, "%s - endpoint %x\n",
+ __func__, usb_pipeendpoint(this_urb->pipe));
/* Save reset port val for resend.
Don't overwrite resend for open/close condition. */
@@ -1838,7 +1840,7 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
return -1;
}
- dev_dbg(&port->dev, "%s - endpoint %d (%d)\n",
+ dev_dbg(&port->dev, "%s - endpoint %x (%d)\n",
__func__, usb_pipeendpoint(this_urb->pipe), device_port);
/* Save reset port val for resend.
@@ -2365,9 +2367,9 @@ static void keyspan_disconnect(struct usb_serial *serial)
s_priv = usb_get_serial_data(serial);
- stop_urb(s_priv->instat_urb);
- stop_urb(s_priv->glocont_urb);
- stop_urb(s_priv->indat_urb);
+ usb_kill_urb(s_priv->instat_urb);
+ usb_kill_urb(s_priv->glocont_urb);
+ usb_kill_urb(s_priv->indat_urb);
}
static void keyspan_release(struct usb_serial *serial)
@@ -2495,11 +2497,11 @@ static int keyspan_port_remove(struct usb_serial_port *port)
p_priv = usb_get_serial_port_data(port);
- stop_urb(p_priv->inack_urb);
- stop_urb(p_priv->outcont_urb);
+ usb_kill_urb(p_priv->inack_urb);
+ usb_kill_urb(p_priv->outcont_urb);
for (i = 0; i < 2; i++) {
- stop_urb(p_priv->in_urbs[i]);
- stop_urb(p_priv->out_urbs[i]);
+ usb_kill_urb(p_priv->in_urbs[i]);
+ usb_kill_urb(p_priv->out_urbs[i]);
}
usb_free_urb(p_priv->inack_urb);
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index f1d5df873..7e23d000a 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -503,7 +503,7 @@ static void mxuport_process_read_urb_demux_data(struct urb *urb)
return;
}
- if (test_bit(ASYNCB_INITIALIZED, &demux_port->port.flags)) {
+ if (tty_port_initialized(&demux_port->port)) {
ch = data + HEADER_SIZE;
mxuport_process_read_urb_data(demux_port, ch, rcv_len);
} else {
@@ -544,7 +544,7 @@ static void mxuport_process_read_urb_demux_event(struct urb *urb)
}
demux_port = serial->port[rcv_port];
- if (test_bit(ASYNCB_INITIALIZED, &demux_port->port.flags)) {
+ if (tty_port_initialized(&demux_port->port)) {
ch = data + HEADER_SIZE;
rcv_event = get_unaligned_be16(data + 2);
mxuport_process_read_urb_event(demux_port, ch,
@@ -1348,7 +1348,7 @@ static int mxuport_resume(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
- if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (!tty_port_initialized(&port->port))
continue;
r = usb_serial_generic_write_start(port, GFP_NOIO);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 07d1ecd56..e1994e264 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -776,7 +776,7 @@ static void sierra_close(struct usb_serial_port *port)
/*
* Need to take susp_lock to make sure port is not already being
- * resumed, but no need to hold it due to ASYNC_INITIALIZED.
+ * resumed, but no need to hold it due to initialized
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
@@ -1039,7 +1039,7 @@ static int sierra_resume(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
- if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (!tty_port_initialized(&port->port))
continue;
err = sierra_submit_delayed_urbs(port);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 299b85b0a..510d14bd0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -80,6 +80,7 @@ struct ti_device {
int td_open_port_count;
struct usb_serial *td_serial;
int td_is_3410;
+ bool td_rs485_only;
int td_urb_error;
};
@@ -160,6 +161,11 @@ static const struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
{ USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
{ } /* terminator */
};
@@ -193,6 +199,11 @@ static const struct usb_device_id ti_id_table_combined[] = {
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
{ USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
+ { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
{ } /* terminator */
};
@@ -286,6 +297,9 @@ static int ti_startup(struct usb_serial *serial)
{
struct ti_device *tdev;
struct usb_device *dev = serial->dev;
+ struct usb_host_interface *cur_altsetting;
+ int num_endpoints;
+ u16 vid, pid;
int status;
dev_dbg(&dev->dev,
@@ -309,8 +323,22 @@ static int ti_startup(struct usb_serial *serial)
dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
tdev->td_is_3410 ? "3410" : "5052");
- /* if we have only 1 configuration, download firmware */
- if (dev->descriptor.bNumConfigurations == 1) {
+ vid = le16_to_cpu(dev->descriptor.idVendor);
+ pid = le16_to_cpu(dev->descriptor.idProduct);
+ if (vid == MXU1_VENDOR_ID) {
+ switch (pid) {
+ case MXU1_1130_PRODUCT_ID:
+ case MXU1_1131_PRODUCT_ID:
+ tdev->td_rs485_only = true;
+ break;
+ }
+ }
+
+ cur_altsetting = serial->interface->cur_altsetting;
+ num_endpoints = cur_altsetting->desc.bNumEndpoints;
+
+ /* if we have only 1 configuration and 1 endpoint, download firmware */
+ if (dev->descriptor.bNumConfigurations == 1 && num_endpoints == 1) {
status = ti_download_firmware(tdev);
if (status != 0)
@@ -365,7 +393,11 @@ static int ti_port_probe(struct usb_serial_port *port)
port->port.closing_wait = msecs_to_jiffies(10 * closing_wait);
tport->tp_port = port;
tport->tp_tdev = usb_get_serial_data(port->serial);
- tport->tp_uart_mode = 0; /* default is RS232 */
+
+ if (tport->tp_tdev->td_rs485_only)
+ tport->tp_uart_mode = TI_UART_485_RECEIVER_DISABLED;
+ else
+ tport->tp_uart_mode = TI_UART_232;
usb_set_serial_port_data(port, tport);
@@ -1444,6 +1476,16 @@ static int ti_download_firmware(struct ti_device *tdev)
const struct firmware *fw_p;
char buf[32];
+ if (le16_to_cpu(dev->descriptor.idVendor) == MXU1_VENDOR_ID) {
+ snprintf(buf,
+ sizeof(buf),
+ "/*(DEBLOBBED)*/",
+ le16_to_cpu(dev->descriptor.idProduct));
+
+ status = reject_firmware(&fw_p, buf, &dev->dev);
+ goto check_firmware;
+ }
+
/* try ID specific firmware first, then try generic firmware */
sprintf(buf, "/*(DEBLOBBED)*/",
le16_to_cpu(dev->descriptor.idVendor),
@@ -1481,6 +1523,8 @@ static int ti_download_firmware(struct ti_device *tdev)
}
status = reject_firmware(&fw_p, buf, &dev->dev);
}
+
+check_firmware:
if (status) {
dev_err(&dev->dev, "%s - firmware not found\n", __func__);
return -ENOENT;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 98f35c656..bbfd3a184 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -60,6 +60,14 @@
#define HONEYWELL_VENDOR_ID 0x10ac
#define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */
+/* Moxa UPORT 11x0 vendor and product IDs */
+#define MXU1_VENDOR_ID 0x110a
+#define MXU1_1110_PRODUCT_ID 0x1110
+#define MXU1_1130_PRODUCT_ID 0x1130
+#define MXU1_1131_PRODUCT_ID 0x1131
+#define MXU1_1150_PRODUCT_ID 0x1150
+#define MXU1_1151_PRODUCT_ID 0x1151
+
/* Commands */
#define TI_GET_VERSION 0x01
#define TI_GET_PORT_STATUS 0x02
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 46f1f13b4..b1b9bac44 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -96,7 +96,8 @@ static int allocate_minors(struct usb_serial *serial, int num_ports)
mutex_lock(&table_lock);
for (i = 0; i < num_ports; ++i) {
port = serial->port[i];
- minor = idr_alloc(&serial_minors, port, 0, 0, GFP_KERNEL);
+ minor = idr_alloc(&serial_minors, port, 0,
+ USB_SERIAL_TTY_MINORS, GFP_KERNEL);
if (minor < 0)
goto error;
port->minor = minor;
@@ -254,7 +255,7 @@ static int serial_open(struct tty_struct *tty, struct file *filp)
*
* Shut down a USB serial port. Serialized against activate by the
* tport mutex and kept to matching open/close pairs
- * of calls by the ASYNCB_INITIALIZED flag.
+ * of calls by the initialized flag.
*
* Not called if tty is console.
*/
@@ -815,7 +816,7 @@ static int usb_serial_probe(struct usb_interface *interface,
}
}
-#if defined(CONFIG_USB_SERIAL_PL2303) || defined(CONFIG_USB_SERIAL_PL2303_MODULE)
+#if IS_ENABLED(CONFIG_USB_SERIAL_PL2303)
/* BEGIN HORRIBLE HACK FOR PL2303 */
/* this is needed due to the looney way its endpoints are set up */
if (((le16_to_cpu(dev->descriptor.idVendor) == PL2303_VENDOR_ID) &&
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index be9cb61b4..3dfdfc812 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -464,7 +464,7 @@ void usb_wwan_close(struct usb_serial_port *port)
/*
* Need to take susp_lock to make sure port is not already being
- * resumed, but no need to hold it due to ASYNC_INITIALIZED.
+ * resumed, but no need to hold it due to initialized
*/
spin_lock_irq(&intfdata->susp_lock);
if (--intfdata->open_ports == 0)
@@ -682,7 +682,7 @@ int usb_wwan_resume(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
- if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (!tty_port_initialized(&port->port))
continue;
portdata = usb_get_serial_port_data(port);
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 171fa7d79..1d8b03c81 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -829,8 +829,10 @@ static int alauda_write_lba(struct us_data *us, u16 lba,
pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset];
if (pba == 1) {
- /* Maybe it is impossible to write to PBA 1.
- Fake success, but don't do anything. */
+ /*
+ * Maybe it is impossible to write to PBA 1.
+ * Fake success, but don't do anything.
+ */
printk(KERN_WARNING
"alauda_write_lba: avoid writing to pba 1\n");
return USB_STOR_TRANSPORT_GOOD;
@@ -977,10 +979,12 @@ static int alauda_read_data(struct us_data *us, unsigned long address,
usb_stor_dbg(us, "Read %d zero pages (LBA %d) page %d\n",
pages, lba, page);
- /* This is not really an error. It just means
- that the block has never been written.
- Instead of returning USB_STOR_TRANSPORT_ERROR
- it is better to return all zero data. */
+ /*
+ * This is not really an error. It just means
+ * that the block has never been written.
+ * Instead of returning USB_STOR_TRANSPORT_ERROR
+ * it is better to return all zero data.
+ */
memset(buffer, 0, len);
} else {
@@ -1222,8 +1226,10 @@ static int alauda_transport(struct scsi_cmnd *srb, struct us_data *us)
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
- /* sure. whatever. not like we can stop the user from popping
- the media out of the device (no locking doors, etc) */
+ /*
+ * sure. whatever. not like we can stop the user from popping
+ * the media out of the device (no locking doors, etc)
+ */
return USB_STOR_TRANSPORT_GOOD;
}
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index c80d3dec9..5e4af44d7 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -110,13 +110,17 @@ static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
/* first build the ATACB command */
srb->cmd_len = 16;
- srb->cmnd[0] = 0x24; /* bVSCBSignature : vendor-specific command
- this value can change, but most(all ?) manufacturers
- keep the cypress default : 0x24 */
+ srb->cmnd[0] = 0x24; /*
+ * bVSCBSignature : vendor-specific command
+ * this value can change, but most(all ?) manufacturers
+ * keep the cypress default : 0x24
+ */
srb->cmnd[1] = 0x24; /* bVSCBSubCommand : 0x24 for ATACB */
- srb->cmnd[3] = 0xff - 1; /* features, sector count, lba low, lba med
- lba high, device, command are valid */
+ srb->cmnd[3] = 0xff - 1; /*
+ * features, sector count, lba low, lba med
+ * lba high, device, command are valid
+ */
srb->cmnd[4] = 1; /* TransferBlockCount : 512 */
if (save_cmnd[0] == ATA_16) {
@@ -155,8 +159,7 @@ static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
usb_stor_transparent_scsi_command(srb, us);
- /* if the device doesn't support ATACB
- */
+ /* if the device doesn't support ATACB */
if (srb->result == SAM_STAT_CHECK_CONDITION &&
memcmp(srb->sense_buffer, usb_stor_sense_invalidCDB,
sizeof(usb_stor_sense_invalidCDB)) == 0) {
@@ -164,7 +167,8 @@ static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
goto end;
}
- /* if ck_cond flags is set, and there wasn't critical error,
+ /*
+ * if ck_cond flags is set, and there wasn't critical error,
* build the special sense
*/
if ((srb->result != (DID_ERROR << 16) &&
@@ -176,11 +180,11 @@ static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
unsigned char *desc = sb + 8;
int tmp_result;
- /* build the command for
- * reading the ATA registers */
+ /* build the command for reading the ATA registers */
scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sizeof(regs));
- /* we use the same command as before, but we set
+ /*
+ * we use the same command as before, but we set
* the read taskfile bit, for not executing atacb command,
* but reading register selected in srb->cmnd[4]
*/
@@ -204,10 +208,11 @@ static void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
sb[2] = 0; /* ATA PASS THROUGH INFORMATION AVAILABLE */
sb[3] = 0x1D;
- /* XXX we should generate sk, asc, ascq from status and error
+ /*
+ * XXX we should generate sk, asc, ascq from status and error
* regs
* (see 11.1 Error translation ATA device error to SCSI error
- * map, and ata_to_sense_error from libata.)
+ * map, and ata_to_sense_error from libata.)
*/
/* Sense data is current and format is descriptor. */
@@ -258,7 +263,8 @@ static int cypress_probe(struct usb_interface *intf,
if (result)
return result;
- /* Among CY7C68300 chips, the A revision does not support Cypress ATACB
+ /*
+ * Among CY7C68300 chips, the A revision does not support Cypress ATACB
* Filter out this revision from EEPROM default descriptor values
*/
device = interface_to_usbdev(intf);
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index aa4f51944..723197af6 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -1,4 +1,5 @@
-/* Driver for Datafab USB Compact Flash reader
+/*
+ * Driver for Datafab USB Compact Flash reader
*
* datafab driver v0.1:
*
@@ -693,18 +694,23 @@ static int datafab_transport(struct scsi_cmnd *srb, struct us_data *us)
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
- // sure. whatever. not like we can stop the user from
- // popping the media out of the device (no locking doors, etc)
- //
+ /*
+ * sure. whatever. not like we can stop the user from
+ * popping the media out of the device (no locking doors, etc)
+ */
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == START_STOP) {
- /* this is used by sd.c'check_scsidisk_media_change to detect
- media change */
+ /*
+ * this is used by sd.c'check_scsidisk_media_change to detect
+ * media change
+ */
usb_stor_dbg(us, "START_STOP\n");
- /* the first datafab_id_device after a media change returns
- an error (determined experimentally) */
+ /*
+ * the first datafab_id_device after a media change returns
+ * an error (determined experimentally)
+ */
rc = datafab_id_device(us, info);
if (rc == USB_STOR_TRANSPORT_GOOD) {
info->sense_key = NO_SENSE;
diff --git a/drivers/usb/storage/debug.c b/drivers/usb/storage/debug.c
index 5a12c0313..8d20804a5 100644
--- a/drivers/usb/storage/debug.c
+++ b/drivers/usb/storage/debug.c
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
* Debugging Functions Source Code File
*
* Current development and maintenance by:
diff --git a/drivers/usb/storage/debug.h b/drivers/usb/storage/debug.h
index 6b365ce4e..8ab73299b 100644
--- a/drivers/usb/storage/debug.h
+++ b/drivers/usb/storage/debug.h
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
* Debugging Functions Header File
*
* Current development and maintenance by:
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 4646ef09e..f39c9b6be 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -555,8 +555,10 @@ static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
/* check bulk status */
residue = le32_to_cpu(bcs->Residue);
- /* try to compute the actual residue, based on how much data
- * was really transferred and what the device tells us */
+ /*
+ * try to compute the actual residue, based on how much data
+ * was really transferred and what the device tells us
+ */
if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
residue = min(residue, transfer_length);
if (us->srb != NULL)
@@ -857,9 +859,6 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
u8 ExtBuf[4];
u32 bn = PhyBlockAddr * 0x20 + PageNum;
- /* printk(KERN_INFO "MS --- MS_ReaderReadPage,
- PhyBlockAddr = %x, PageNum = %x\n", PhyBlockAddr, PageNum); */
-
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -1136,8 +1135,6 @@ static int ms_read_copyblock(struct us_data *us, u16 oldphy, u16 newphy,
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- /* printk(KERN_INFO "MS_ReaderCopyBlock --- PhyBlockAddr = %x,
- PageNum = %x\n", PhyBlockAddr, PageNum); */
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -1171,8 +1168,6 @@ static int ms_read_eraseblock(struct us_data *us, u32 PhyBlockAddr)
int result;
u32 bn = PhyBlockAddr;
- /* printk(KERN_INFO "MS --- ms_read_eraseblock,
- PhyBlockAddr = %x\n", PhyBlockAddr); */
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -1250,8 +1245,6 @@ static int ms_lib_overwrite_extra(struct us_data *us, u32 PhyBlockAddr,
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- /* printk("MS --- MS_LibOverwriteExtra,
- PhyBlockAddr = %x, PageNum = %x\n", PhyBlockAddr, PageNum); */
result = ene_load_bincode(us, MS_RW_PATTERN);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -1337,7 +1330,6 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
int result;
u8 ExtBuf[4];
- /* printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->DataTransferLength = 0x4;
@@ -1536,9 +1528,6 @@ static int ms_lib_read_extrablock(struct us_data *us, u32 PhyBlock,
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- /* printk("MS_LibReadExtraBlock --- PhyBlock = %x,
- PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); */
-
/* Read Extra Data */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
@@ -2385,8 +2374,10 @@ static int ene_ub6250_reset_resume(struct usb_interface *iface)
/* Report the reset to the SCSI core */
usb_stor_reset_resume(iface);
- /* FIXME: Notify the subdrivers that they need to reinitialize
- * the device */
+ /*
+ * FIXME: Notify the subdrivers that they need to reinitialize
+ * the device
+ */
info->Power_IsResum = true;
/*info->SD_Status.Ready = 0; */
info->SD_Status = *(struct SD_STATUS *)&tmp;
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 3f2b08966..c0a5d9544 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -1,4 +1,5 @@
-/* Driver for Freecom USB/IDE adaptor
+/*
+ * Driver for Freecom USB/IDE adaptor
*
* Freecom v0.1:
*
@@ -84,25 +85,33 @@ struct freecom_status {
u8 Pad[60];
};
-/* Freecom stuffs the interrupt status in the INDEX_STAT bit of the ide
- * register. */
+/*
+ * Freecom stuffs the interrupt status in the INDEX_STAT bit of the ide
+ * register.
+ */
#define FCM_INT_STATUS 0x02 /* INDEX_STAT */
#define FCM_STATUS_BUSY 0x80
-/* These are the packet types. The low bit indicates that this command
- * should wait for an interrupt. */
+/*
+ * These are the packet types. The low bit indicates that this command
+ * should wait for an interrupt.
+ */
#define FCM_PACKET_ATAPI 0x21
#define FCM_PACKET_STATUS 0x20
-/* Receive data from the IDE interface. The ATAPI packet has already
- * waited, so the data should be immediately available. */
+/*
+ * Receive data from the IDE interface. The ATAPI packet has already
+ * waited, so the data should be immediately available.
+ */
#define FCM_PACKET_INPUT 0x81
/* Send data to the IDE interface. */
#define FCM_PACKET_OUTPUT 0x01
-/* Write a value to an ide register. Or the ide register to write after
- * munging the address a bit. */
+/*
+ * Write a value to an ide register. Or the ide register to write after
+ * munging the address a bit.
+ */
#define FCM_PACKET_IDE_WRITE 0x40
#define FCM_PACKET_IDE_READ 0xC0
@@ -251,16 +260,20 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
result = usb_stor_bulk_transfer_buf (us, opipe, fcb,
FCM_PACKET_LENGTH, NULL);
- /* The Freecom device will only fail if there is something wrong in
+ /*
+ * The Freecom device will only fail if there is something wrong in
* USB land. It returns the status in its own registers, which
- * come back in the bulk pipe. */
+ * come back in the bulk pipe.
+ */
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "freecom transport error\n");
return USB_STOR_TRANSPORT_ERROR;
}
- /* There are times we can optimize out this status read, but it
- * doesn't hurt us to always do it now. */
+ /*
+ * There are times we can optimize out this status read, but it
+ * doesn't hurt us to always do it now.
+ */
result = usb_stor_bulk_transfer_buf (us, ipipe, fst,
FCM_STATUS_PACKET_LENGTH, &partial);
usb_stor_dbg(us, "foo Status result %d %u\n", result, partial);
@@ -269,7 +282,8 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
US_DEBUG(pdump(us, (void *)fst, partial));
- /* The firmware will time-out commands after 20 seconds. Some commands
+ /*
+ * The firmware will time-out commands after 20 seconds. Some commands
* can legitimately take longer than this, so we use a different
* command that only waits for the interrupt and then sends status,
* without having to send a new ATAPI command to the device.
@@ -291,7 +305,8 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
result = usb_stor_bulk_transfer_buf (us, opipe, fcb,
FCM_PACKET_LENGTH, NULL);
- /* The Freecom device will only fail if there is something
+ /*
+ * The Freecom device will only fail if there is something
* wrong in USB land. It returns the status in its own
* registers, which come back in the bulk pipe.
*/
@@ -318,9 +333,11 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_FAILED;
}
- /* The device might not have as much data available as we
+ /*
+ * The device might not have as much data available as we
* requested. If you ask for more than the device has, this reads
- * and such will hang. */
+ * and such will hang.
+ */
usb_stor_dbg(us, "Device indicates that it has %d bytes available\n",
le16_to_cpu(fst->Count));
usb_stor_dbg(us, "SCSI requested %d\n", scsi_bufflen(srb));
@@ -344,16 +361,20 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
length);
}
- /* What we do now depends on what direction the data is supposed to
- * move in. */
+ /*
+ * What we do now depends on what direction the data is supposed to
+ * move in.
+ */
switch (us->srb->sc_data_direction) {
case DMA_FROM_DEVICE:
/* catch bogus "read 0 length" case */
if (!length)
break;
- /* Make sure that the status indicates that the device
- * wants data as well. */
+ /*
+ * Make sure that the status indicates that the device
+ * wants data as well.
+ */
if ((fst->Status & DRQ_STAT) == 0 || (fst->Reason & 3) != 2) {
usb_stor_dbg(us, "SCSI wants data, drive doesn't have any\n");
return USB_STOR_TRANSPORT_FAILED;
@@ -384,8 +405,10 @@ static int freecom_transport(struct scsi_cmnd *srb, struct us_data *us)
/* catch bogus "write 0 length" case */
if (!length)
break;
- /* Make sure the status indicates that the device wants to
- * send us data. */
+ /*
+ * Make sure the status indicates that the device wants to
+ * send us data.
+ */
/* !!IMPLEMENT!! */
result = freecom_writedata (srb, us, ipipe, opipe, length);
if (result != USB_STOR_TRANSPORT_GOOD)
@@ -431,7 +454,8 @@ static int init_freecom(struct us_data *us)
int result;
char *buffer = us->iobuf;
- /* The DMA-mapped I/O buffer is 64 bytes long, just right for
+ /*
+ * The DMA-mapped I/O buffer is 64 bytes long, just right for
* all our packets. No need to allocate any extra buffer space.
*/
@@ -440,7 +464,8 @@ static int init_freecom(struct us_data *us)
buffer[32] = '\0';
usb_stor_dbg(us, "String returned from FC init is: %s\n", buffer);
- /* Special thanks to the people at Freecom for providing me with
+ /*
+ * Special thanks to the people at Freecom for providing me with
* this "magic sequence", which they use in their Windows and MacOS
* drivers to make sure that all the attached perhiperals are
* properly reset.
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 31fa2e920..d9d8c17e0 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -1,4 +1,5 @@
-/* Special Initializers for certain USB Mass Storage devices
+/*
+ * Special Initializers for certain USB Mass Storage devices
*
* Current development and maintenance by:
* (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
@@ -42,8 +43,10 @@
#include "debug.h"
#include "transport.h"
-/* This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target
- * mode */
+/*
+ * This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target
+ * mode
+ */
int usb_stor_euscsi_init(struct us_data *us)
{
int result;
@@ -57,8 +60,10 @@ int usb_stor_euscsi_init(struct us_data *us)
return 0;
}
-/* This function is required to activate all four slots on the UCR-61S2B
- * flash reader */
+/*
+ * This function is required to activate all four slots on the UCR-61S2B
+ * flash reader
+ */
int usb_stor_ucr61s2b_init(struct us_data *us)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap*) us->iobuf;
diff --git a/drivers/usb/storage/initializers.h b/drivers/usb/storage/initializers.h
index 529327fbb..039abf4d1 100644
--- a/drivers/usb/storage/initializers.h
+++ b/drivers/usb/storage/initializers.h
@@ -1,4 +1,5 @@
-/* Header file for Special Initializers for certain USB Mass Storage devices
+/*
+ * Header file for Special Initializers for certain USB Mass Storage devices
*
* Current development and maintenance by:
* (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
@@ -38,12 +39,16 @@
#include "usb.h"
#include "transport.h"
-/* This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target
- * mode */
+/*
+ * This places the Shuttle/SCM USB<->SCSI bridge devices in multi-target
+ * mode
+ */
int usb_stor_euscsi_init(struct us_data *us);
-/* This function is required to activate all four slots on the UCR-61S2B
- * flash reader */
+/*
+ * This function is required to activate all four slots on the UCR-61S2B
+ * flash reader
+ */
int usb_stor_ucr61s2b_init(struct us_data *us);
/* This places the HUAWEI E220 devices in multi-port mode */
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 39afd7045..fba4005dd 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1,4 +1,5 @@
-/* Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC
+/*
+ * Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC
*
* Current development and maintenance:
* (C) 2001-2002 Björn Stenberg (bjorn@haxx.se)
@@ -628,7 +629,8 @@ static void isd200_invoke_transport( struct us_data *us,
srb->cmd_len = sizeof(ataCdb->generic);
transferStatus = usb_stor_Bulk_transport(srb, us);
- /* if the command gets aborted by the higher layers, we need to
+ /*
+ * if the command gets aborted by the higher layers, we need to
* short-circuit all other processing
*/
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
@@ -695,15 +697,18 @@ static void isd200_invoke_transport( struct us_data *us,
}
}
- /* Regardless of auto-sense, if we _know_ we have an error
+ /*
+ * Regardless of auto-sense, if we _know_ we have an error
* condition, show that in the result code
*/
if (transferStatus == USB_STOR_TRANSPORT_FAILED)
srb->result = SAM_STAT_CHECK_CONDITION;
return;
- /* abort processing: the bulk-only transport requires a reset
- * following an abort */
+ /*
+ * abort processing: the bulk-only transport requires a reset
+ * following an abort
+ */
Handle_Abort:
srb->result = DID_ABORT << 16;
@@ -965,20 +970,22 @@ static int isd200_try_enum(struct us_data *us, unsigned char master_slave,
info->DeviceHead = master_slave;
break;
}
- /* check Cylinder High/Low to
- determine if it is an ATAPI device
- */
+ /*
+ * check Cylinder High/Low to
+ * determine if it is an ATAPI device
+ */
else if (regs[ATA_REG_HCYL_OFFSET] == 0xEB &&
regs[ATA_REG_LCYL_OFFSET] == 0x14) {
- /* It seems that the RICOH
- MP6200A CD/RW drive will
- report itself okay as a
- slave when it is really a
- master. So this check again
- as a master device just to
- make sure it doesn't report
- itself okay as a master also
- */
+ /*
+ * It seems that the RICOH
+ * MP6200A CD/RW drive will
+ * report itself okay as a
+ * slave when it is really a
+ * master. So this check again
+ * as a master device just to
+ * make sure it doesn't report
+ * itself okay as a master also
+ */
if ((master_slave & ATA_ADDRESS_DEVHEAD_SLAVE) &&
!recheckAsMaster) {
usb_stor_dbg(us, " Identified ATAPI device as slave. Rechecking again as master\n");
@@ -1176,9 +1183,11 @@ static int isd200_get_inquiry_data( struct us_data *us )
if (id[ATA_ID_COMMAND_SET_2] & COMMANDSET_MEDIA_STATUS) {
usb_stor_dbg(us, " Device supports Media Status Notification\n");
- /* Indicate that it is enabled, even though it is not
- * This allows the lock/unlock of the media to work
- * correctly.
+ /*
+ * Indicate that it is enabled, even
+ * though it is not.
+ * This allows the lock/unlock of the
+ * media to work correctly.
*/
info->DeviceFlags |= DF_MEDIA_STATUS_ENABLED;
}
@@ -1197,7 +1206,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
usb_stor_dbg(us, "Protocol changed to: %s\n",
us->protocol_name);
- /* Free driver structure */
+ /* Free driver structure */
us->extra_destructor(info);
kfree(info);
us->extra = NULL;
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index ee613e258..011e52706 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -1,4 +1,5 @@
-/* Driver for Lexar "Jumpshot" Compact Flash reader
+/*
+ * Driver for Lexar "Jumpshot" Compact Flash reader
*
* jumpshot driver v0.1:
*
@@ -618,18 +619,23 @@ static int jumpshot_transport(struct scsi_cmnd *srb, struct us_data *us)
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
- // sure. whatever. not like we can stop the user from popping
- // the media out of the device (no locking doors, etc)
- //
+ /*
+ * sure. whatever. not like we can stop the user from popping
+ * the media out of the device (no locking doors, etc)
+ */
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == START_STOP) {
- /* this is used by sd.c'check_scsidisk_media_change to detect
- media change */
+ /*
+ * this is used by sd.c'check_scsidisk_media_change to detect
+ * media change
+ */
usb_stor_dbg(us, "START_STOP\n");
- /* the first jumpshot_id_device after a media change returns
- an error (determined experimentally) */
+ /*
+ * the first jumpshot_id_device after a media change returns
+ * an error (determined experimentally)
+ */
rc = jumpshot_id_device(us, info);
if (rc == USB_STOR_TRANSPORT_GOOD) {
info->sense_key = NO_SENSE;
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index ae201e694..f9d407f0b 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -1,4 +1,5 @@
-/* Driver for Rio Karma
+/*
+ * Driver for Rio Karma
*
* (c) 2006 Bob Copeland <me@bobcopeland.com>
* (c) 2006 Keith Bennett <keith@mcs.st-and.ac.uk>
diff --git a/drivers/usb/storage/option_ms.c b/drivers/usb/storage/option_ms.c
index b2b35b1d7..57282f123 100644
--- a/drivers/usb/storage/option_ms.c
+++ b/drivers/usb/storage/option_ms.c
@@ -65,7 +65,8 @@ static int option_rezero(struct us_data *us)
goto out;
}
- /* Some of the devices need to be asked for a response, but we don't
+ /*
+ * Some of the devices need to be asked for a response, but we don't
* care what that response is.
*/
usb_stor_bulk_transfer_buf(us,
@@ -140,7 +141,8 @@ int option_ms_init(struct us_data *us)
usb_stor_dbg(us, "Option MS: %s\n", "option_ms_init called");
- /* Additional test for vendor information via INQUIRY,
+ /*
+ * Additional test for vendor information via INQUIRY,
* because some vendor/product IDs are ambiguous
*/
result = option_inquiry(us);
diff --git a/drivers/usb/storage/protocol.c b/drivers/usb/storage/protocol.c
index 12e3c2fac..74c38870a 100644
--- a/drivers/usb/storage/protocol.c
+++ b/drivers/usb/storage/protocol.c
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
*
* Current development and maintenance by:
* (c) 1999-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
@@ -75,7 +76,8 @@ void usb_stor_pad12_command(struct scsi_cmnd *srb, struct us_data *us)
void usb_stor_ufi_command(struct scsi_cmnd *srb, struct us_data *us)
{
- /* fix some commands -- this is a form of mode translation
+ /*
+ * fix some commands -- this is a form of mode translation
* UFI devices only accept 12 byte long commands
*
* NOTE: This only works because a scsi_cmnd struct field contains
@@ -127,7 +129,8 @@ EXPORT_SYMBOL_GPL(usb_stor_transparent_scsi_command);
* Scatter-gather transfer buffer access routines
***********************************************************************/
-/* Copy a buffer of length buflen to/from the srb's transfer buffer.
+/*
+ * Copy a buffer of length buflen to/from the srb's transfer buffer.
* Update the **sgptr and *offset variables so that the next copy will
* pick up from where this one left off.
*/
@@ -175,7 +178,8 @@ unsigned int usb_stor_access_xfer_buf(unsigned char *buffer,
}
EXPORT_SYMBOL_GPL(usb_stor_access_xfer_buf);
-/* Store the contents of buffer into srb's transfer buffer and set the
+/*
+ * Store the contents of buffer into srb's transfer buffer and set the
* SCSI residue.
*/
void usb_stor_set_xfer_buf(unsigned char *buffer,
diff --git a/drivers/usb/storage/protocol.h b/drivers/usb/storage/protocol.h
index ffc3e2af0..a55666880 100644
--- a/drivers/usb/storage/protocol.h
+++ b/drivers/usb/storage/protocol.h
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
* Protocol Functions Header File
*
* Current development and maintenance by:
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 20433563a..4176d1af9 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -1,4 +1,5 @@
-/* Driver for Realtek RTS51xx USB card reader
+/*
+ * Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
@@ -267,8 +268,10 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
if (bcs->Tag != us->tag)
return USB_STOR_TRANSPORT_ERROR;
- /* try to compute the actual residue, based on how much data
- * was really transferred and what the device tells us */
+ /*
+ * try to compute the actual residue, based on how much data
+ * was really transferred and what the device tells us
+ */
if (residue)
residue = residue < buf_len ? residue : buf_len;
@@ -286,7 +289,8 @@ static int rts51x_bulk_transport(struct us_data *us, u8 lun,
return USB_STOR_TRANSPORT_FAILED;
case US_BULK_STAT_PHASE:
- /* phase error -- note that a transport reset will be
+ /*
+ * phase error -- note that a transport reset will be
* invoked by the invoke_transport() function
*/
return USB_STOR_TRANSPORT_ERROR;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 90901861b..33eb923df 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
* SCSI layer glue code
*
* Current development and maintenance by:
@@ -58,7 +59,8 @@
#include "transport.h"
#include "protocol.h"
-/* Vendor IDs for companies that seem to include the READ CAPACITY bug
+/*
+ * Vendor IDs for companies that seem to include the READ CAPACITY bug
* in all their devices
*/
#define VENDOR_ID_NOKIA 0x0421
@@ -87,7 +89,8 @@ static int slave_alloc (struct scsi_device *sdev)
*/
sdev->inquiry_len = 36;
- /* USB has unusual DMA-alignment requirements: Although the
+ /*
+ * USB has unusual DMA-alignment requirements: Although the
* starting address of each scatter-gather element doesn't matter,
* the length of each element except the last must be divisible
* by the Bulk maxpacket value. There's currently no way to
@@ -115,7 +118,8 @@ static int slave_configure(struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
- /* Many devices have trouble transferring more than 32KB at a time,
+ /*
+ * Many devices have trouble transferring more than 32KB at a time,
* while others have trouble with more than 64K. At this time we
* are limiting both to 32K (64 sectores).
*/
@@ -128,14 +132,22 @@ static int slave_configure(struct scsi_device *sdev)
blk_queue_max_hw_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
- /* Tapes need much higher max_sector limits, so just
+ /*
+ * Tapes need much higher max_sector limits, so just
* raise it to the maximum possible (4 GB / 512) and
* let the queue segment size sort out the real limit.
*/
blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF);
+ } else if (us->pusb_dev->speed >= USB_SPEED_SUPER) {
+ /*
+ * USB3 devices will be limited to 2048 sectors. This gives us
+ * better throughput on most devices.
+ */
+ blk_queue_max_hw_sectors(sdev->request_queue, 2048);
}
- /* Some USB host controllers can't do DMA; they have to use PIO.
+ /*
+ * Some USB host controllers can't do DMA; they have to use PIO.
* They indicate this by setting their dma_mask to NULL. For
* such controllers we need to make sure the block layer sets
* up bounce buffers in addressable memory.
@@ -143,17 +155,21 @@ static int slave_configure(struct scsi_device *sdev)
if (!us->pusb_dev->bus->controller->dma_mask)
blk_queue_bounce_limit(sdev->request_queue, BLK_BOUNCE_HIGH);
- /* We can't put these settings in slave_alloc() because that gets
+ /*
+ * We can't put these settings in slave_alloc() because that gets
* called before the device type is known. Consequently these
- * settings can't be overridden via the scsi devinfo mechanism. */
+ * settings can't be overridden via the scsi devinfo mechanism.
+ */
if (sdev->type == TYPE_DISK) {
- /* Some vendors seem to put the READ CAPACITY bug into
+ /*
+ * Some vendors seem to put the READ CAPACITY bug into
* all their devices -- primarily makers of cell phones
* and digital cameras. Since these devices always use
* flash media and can be expected to have an even number
* of sectors, we will always enable the CAPACITY_HEURISTICS
- * flag unless told otherwise. */
+ * flag unless told otherwise.
+ */
switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
case VENDOR_ID_NOKIA:
case VENDOR_ID_NIKON:
@@ -165,28 +181,36 @@ static int slave_configure(struct scsi_device *sdev)
break;
}
- /* Disk-type devices use MODE SENSE(6) if the protocol
+ /*
+ * Disk-type devices use MODE SENSE(6) if the protocol
* (SubClass) is Transparent SCSI, otherwise they use
- * MODE SENSE(10). */
+ * MODE SENSE(10).
+ */
if (us->subclass != USB_SC_SCSI && us->subclass != USB_SC_CYP_ATACB)
sdev->use_10_for_ms = 1;
- /* Many disks only accept MODE SENSE transfer lengths of
- * 192 bytes (that's what Windows uses). */
+ /*
+ *Many disks only accept MODE SENSE transfer lengths of
+ * 192 bytes (that's what Windows uses).
+ */
sdev->use_192_bytes_for_3f = 1;
- /* Some devices don't like MODE SENSE with page=0x3f,
+ /*
+ * Some devices don't like MODE SENSE with page=0x3f,
* which is the command used for checking if a device
* is write-protected. Now that we tell the sd driver
* to do a 192-byte transfer with this command the
* majority of devices work fine, but a few still can't
* handle it. The sd driver will simply assume those
- * devices are write-enabled. */
+ * devices are write-enabled.
+ */
if (us->fflags & US_FL_NO_WP_DETECT)
sdev->skip_ms_page_3f = 1;
- /* A number of devices have problems with MODE SENSE for
- * page x08, so we will skip it. */
+ /*
+ * A number of devices have problems with MODE SENSE for
+ * page x08, so we will skip it.
+ */
sdev->skip_ms_page_8 = 1;
/* Some devices don't handle VPD pages correctly */
@@ -198,15 +222,19 @@ static int slave_configure(struct scsi_device *sdev)
/* Do not attempt to use WRITE SAME */
sdev->no_write_same = 1;
- /* Some disks return the total number of blocks in response
+ /*
+ * Some disks return the total number of blocks in response
* to READ CAPACITY rather than the highest block number.
- * If this device makes that mistake, tell the sd driver. */
+ * If this device makes that mistake, tell the sd driver.
+ */
if (us->fflags & US_FL_FIX_CAPACITY)
sdev->fix_capacity = 1;
- /* A few disks have two indistinguishable version, one of
+ /*
+ * A few disks have two indistinguishable version, one of
* which reports the correct capacity and the other does not.
- * The sd driver has to guess which is the case. */
+ * The sd driver has to guess which is the case.
+ */
if (us->fflags & US_FL_CAPACITY_HEURISTICS)
sdev->guess_capacity = 1;
@@ -227,26 +255,34 @@ static int slave_configure(struct scsi_device *sdev)
if (sdev->scsi_level > SCSI_SPC_2)
us->fflags |= US_FL_SANE_SENSE;
- /* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
+ /*
+ * USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
* Hardware Error) when any low-level error occurs,
* recoverable or not. Setting this flag tells the SCSI
* midlayer to retry such commands, which frequently will
* succeed and fix the error. The worst this can lead to
- * is an occasional series of retries that will all fail. */
+ * is an occasional series of retries that will all fail.
+ */
sdev->retry_hwerror = 1;
- /* USB disks should allow restart. Some drives spin down
- * automatically, requiring a START-STOP UNIT command. */
+ /*
+ * USB disks should allow restart. Some drives spin down
+ * automatically, requiring a START-STOP UNIT command.
+ */
sdev->allow_restart = 1;
- /* Some USB cardreaders have trouble reading an sdcard's last
+ /*
+ * Some USB cardreaders have trouble reading an sdcard's last
* sector in a larger then 1 sector read, since the performance
- * impact is negligible we set this flag for all USB disks */
+ * impact is negligible we set this flag for all USB disks
+ */
sdev->last_sector_bug = 1;
- /* Enable last-sector hacks for single-target devices using
+ /*
+ * Enable last-sector hacks for single-target devices using
* the Bulk-only transport, unless we already know the
- * capacity will be decremented or is correct. */
+ * capacity will be decremented or is correct.
+ */
if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK |
US_FL_SCM_MULT_TARG)) &&
us->protocol == USB_PR_BULK)
@@ -262,9 +298,11 @@ static int slave_configure(struct scsi_device *sdev)
} else {
- /* Non-disk-type devices don't need to blacklist any pages
+ /*
+ * Non-disk-type devices don't need to blacklist any pages
* or to force 192-byte transfer lengths for MODE SENSE.
- * But they do need to use MODE SENSE(10). */
+ * But they do need to use MODE SENSE(10).
+ */
sdev->use_10_for_ms = 1;
/* Some (fake) usb cdrom devices don't like READ_DISC_INFO */
@@ -272,7 +310,8 @@ static int slave_configure(struct scsi_device *sdev)
sdev->no_read_disc_info = 1;
}
- /* The CB and CBI transports have no way to pass LUN values
+ /*
+ * The CB and CBI transports have no way to pass LUN values
* other than the bits in the second byte of a CDB. But those
* bits don't get set to the LUN value if the device reports
* scsi_level == 0 (UNKNOWN). Hence such devices must necessarily
@@ -282,13 +321,17 @@ static int slave_configure(struct scsi_device *sdev)
sdev->scsi_level == SCSI_UNKNOWN)
us->max_lun = 0;
- /* Some devices choke when they receive a PREVENT-ALLOW MEDIUM
- * REMOVAL command, so suppress those commands. */
+ /*
+ * Some devices choke when they receive a PREVENT-ALLOW MEDIUM
+ * REMOVAL command, so suppress those commands.
+ */
if (us->fflags & US_FL_NOT_LOCKABLE)
sdev->lockable = 0;
- /* this is to satisfy the compiler, tho I don't think the
- * return code is ever checked anywhere. */
+ /*
+ * this is to satisfy the compiler, tho I don't think the
+ * return code is ever checked anywhere.
+ */
return 0;
}
@@ -362,8 +405,10 @@ static int command_abort(struct scsi_cmnd *srb)
usb_stor_dbg(us, "%s called\n", __func__);
- /* us->srb together with the TIMED_OUT, RESETTING, and ABORTING
- * bits are protected by the host lock. */
+ /*
+ * us->srb together with the TIMED_OUT, RESETTING, and ABORTING
+ * bits are protected by the host lock.
+ */
scsi_lock(us_to_host(us));
/* Is this command still active? */
@@ -373,11 +418,13 @@ static int command_abort(struct scsi_cmnd *srb)
return FAILED;
}
- /* Set the TIMED_OUT bit. Also set the ABORTING bit, but only if
+ /*
+ * Set the TIMED_OUT bit. Also set the ABORTING bit, but only if
* a device reset isn't already in progress (to avoid interfering
* with the reset). Note that we must retain the host lock while
* calling usb_stor_stop_transport(); otherwise it might interfere
- * with an auto-reset that begins as soon as we release the lock. */
+ * with an auto-reset that begins as soon as we release the lock.
+ */
set_bit(US_FLIDX_TIMED_OUT, &us->dflags);
if (!test_bit(US_FLIDX_RESETTING, &us->dflags)) {
set_bit(US_FLIDX_ABORTING, &us->dflags);
@@ -390,8 +437,10 @@ static int command_abort(struct scsi_cmnd *srb)
return SUCCESS;
}
-/* This invokes the transport reset mechanism to reset the state of the
- * device */
+/*
+ * This invokes the transport reset mechanism to reset the state of the
+ * device
+ */
static int device_reset(struct scsi_cmnd *srb)
{
struct us_data *us = host_to_us(srb->device->host);
@@ -419,9 +468,11 @@ static int bus_reset(struct scsi_cmnd *srb)
return result < 0 ? FAILED : SUCCESS;
}
-/* Report a driver-initiated device reset to the SCSI layer.
+/*
+ * Report a driver-initiated device reset to the SCSI layer.
* Calling this for a SCSI-initiated reset is unnecessary but harmless.
- * The caller must own the SCSI host lock. */
+ * The caller must own the SCSI host lock.
+ */
void usb_stor_report_device_reset(struct us_data *us)
{
int i;
@@ -434,9 +485,11 @@ void usb_stor_report_device_reset(struct us_data *us)
}
}
-/* Report a driver-initiated bus reset to the SCSI layer.
+/*
+ * Report a driver-initiated bus reset to the SCSI layer.
* Calling this for a SCSI-initiated reset is unnecessary but harmless.
- * The caller must not own the SCSI host lock. */
+ * The caller must not own the SCSI host lock.
+ */
void usb_stor_report_bus_reset(struct us_data *us)
{
struct Scsi_Host *host = us_to_host(us);
@@ -563,12 +616,30 @@ static const struct scsi_host_template usb_stor_host_template = {
.target_alloc = target_alloc,
/* lots of sg segments can be handled */
- .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .sg_tablesize = SG_MAX_SEGMENTS,
- /* limit the total size of a transfer to 120 KB */
+
+ /*
+ * Limit the total size of a transfer to 120 KB.
+ *
+ * Some devices are known to choke with anything larger. It seems like
+ * the problem stems from the fact that original IDE controllers had
+ * only an 8-bit register to hold the number of sectors in one transfer
+ * and even those couldn't handle a full 256 sectors.
+ *
+ * Because we want to make sure we interoperate with as many devices as
+ * possible, we will maintain a 240 sector transfer size limit for USB
+ * Mass Storage devices.
+ *
+ * Tests show that other operating have similar limits with Microsoft
+ * Windows 7 limiting transfers to 128 sectors for both USB2 and USB3
+ * and Apple Mac OS X 10.11 limiting transfers to 256 sectors for USB2
+ * and 2048 for USB3 devices.
+ */
.max_sectors = 240,
- /* merge commands... this seems to help performance, but
+ /*
+ * merge commands... this seems to help performance, but
* periodically someone should test to see which setting is more
* optimal.
*/
diff --git a/drivers/usb/storage/scsiglue.h b/drivers/usb/storage/scsiglue.h
index 5494d8760..d0a331dd9 100644
--- a/drivers/usb/storage/scsiglue.h
+++ b/drivers/usb/storage/scsiglue.h
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
* SCSI Connecting Glue Header File
*
* Current development and maintenance by:
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index 79224fcf9..c5797fa21 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -1,4 +1,5 @@
-/* Driver for SanDisk SDDR-09 SmartMedia reader
+/*
+ * Driver for SanDisk SDDR-09 SmartMedia reader
*
* (c) 2000, 2001 Robert Baruch (autophile@starband.net)
* (c) 2002 Andries Brouwer (aeb@cwi.nl)
@@ -799,10 +800,12 @@ sddr09_read_data(struct us_data *us,
usb_stor_dbg(us, "Read %d zero pages (LBA %d) page %d\n",
pages, lba, page);
- /* This is not really an error. It just means
- that the block has never been written.
- Instead of returning an error
- it is better to return all zero data. */
+ /*
+ * This is not really an error. It just means
+ * that the block has never been written.
+ * Instead of returning an error
+ * it is better to return all zero data.
+ */
memset(buffer, 0, len);
@@ -890,8 +893,10 @@ sddr09_write_lba(struct us_data *us, unsigned int lba,
}
if (pba == 1) {
- /* Maybe it is impossible to write to PBA 1.
- Fake success, but don't do anything. */
+ /*
+ * Maybe it is impossible to write to PBA 1.
+ * Fake success, but don't do anything.
+ */
printk(KERN_WARNING "sddr09: avoid writing to pba 1\n");
return 0;
}
@@ -979,18 +984,22 @@ sddr09_write_data(struct us_data *us,
struct scatterlist *sg;
int result;
- // Figure out the initial LBA and page
+ /* Figure out the initial LBA and page */
lba = address >> info->blockshift;
page = (address & info->blockmask);
maxlba = info->capacity >> (info->pageshift + info->blockshift);
if (lba >= maxlba)
return -EIO;
- // blockbuffer is used for reading in the old data, overwriting
- // with the new data, and performing ECC calculations
+ /*
+ * blockbuffer is used for reading in the old data, overwriting
+ * with the new data, and performing ECC calculations
+ */
- /* TODO: instead of doing kmalloc/kfree for each write,
- add a bufferpointer to the info structure */
+ /*
+ * TODO: instead of doing kmalloc/kfree for each write,
+ * add a bufferpointer to the info structure
+ */
pagelen = (1 << info->pageshift) + (1 << CONTROL_SHIFT);
blocklen = (pagelen << info->blockshift);
@@ -1000,9 +1009,11 @@ sddr09_write_data(struct us_data *us,
return -ENOMEM;
}
- // Since we don't write the user data directly to the device,
- // we have to create a bounce buffer and move the data a piece
- // at a time between the bounce buffer and the actual transfer buffer.
+ /*
+ * Since we don't write the user data directly to the device,
+ * we have to create a bounce buffer and move the data a piece
+ * at a time between the bounce buffer and the actual transfer buffer.
+ */
len = min(sectors, (unsigned int) info->blocksize) * info->pagesize;
buffer = kmalloc(len, GFP_NOIO);
@@ -1018,7 +1029,7 @@ sddr09_write_data(struct us_data *us,
while (sectors > 0) {
- // Write as many sectors as possible in this block
+ /* Write as many sectors as possible in this block */
pages = min(sectors, info->blocksize - page);
len = (pages << info->pageshift);
@@ -1031,7 +1042,7 @@ sddr09_write_data(struct us_data *us,
break;
}
- // Get the data from the transfer buffer
+ /* Get the data from the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &offset, FROM_XFER_BUF);
@@ -1168,9 +1179,11 @@ sddr09_get_cardinfo(struct us_data *us, unsigned char flags) {
/* Byte 1 is the device type */
cardinfo = nand_find_id(deviceID[1]);
if (cardinfo) {
- /* MB or MiB? It is neither. A 16 MB card has
- 17301504 raw bytes, of which 16384000 are
- usable for user data. */
+ /*
+ * MB or MiB? It is neither. A 16 MB card has
+ * 17301504 raw bytes, of which 16384000 are
+ * usable for user data.
+ */
sprintf(blurbtxt + strlen(blurbtxt),
", %d MB", 1<<(cardinfo->chipshift - 20));
} else {
@@ -1211,14 +1224,18 @@ sddr09_read_map(struct us_data *us) {
if (!info->capacity)
return -1;
- // size of a block is 1 << (blockshift + pageshift) bytes
- // divide into the total capacity to get the number of blocks
+ /*
+ * size of a block is 1 << (blockshift + pageshift) bytes
+ * divide into the total capacity to get the number of blocks
+ */
numblocks = info->capacity >> (info->blockshift + info->pageshift);
- // read 64 bytes for every block (actually 1 << CONTROL_SHIFT)
- // but only use a 64 KB buffer
- // buffer size used must be a multiple of (1 << CONTROL_SHIFT)
+ /*
+ * read 64 bytes for every block (actually 1 << CONTROL_SHIFT)
+ * but only use a 64 KB buffer
+ * buffer size used must be a multiple of (1 << CONTROL_SHIFT)
+ */
#define SDDR09_READ_MAP_BUFSZ 65536
alloc_blocks = min(numblocks, SDDR09_READ_MAP_BUFSZ >> CONTROL_SHIFT);
@@ -1575,8 +1592,10 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
havefakesense = 1;
- /* Dummy up a response for INQUIRY since SDDR09 doesn't
- respond to INQUIRY commands */
+ /*
+ * Dummy up a response for INQUIRY since SDDR09 doesn't
+ * respond to INQUIRY commands
+ */
if (srb->cmnd[0] == INQUIRY) {
memcpy(ptr, inquiry_response, 8);
@@ -1628,8 +1647,10 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
if (srb->cmnd[0] == MODE_SENSE_10) {
int modepage = (srb->cmnd[2] & 0x3F);
- /* They ask for the Read/Write error recovery page,
- or for all pages. */
+ /*
+ * They ask for the Read/Write error recovery page,
+ * or for all pages.
+ */
/* %% We should check DBD %% */
if (modepage == 0x01 || modepage == 0x3F) {
usb_stor_dbg(us, "Dummy up request for mode page 0x%x\n",
@@ -1682,7 +1703,8 @@ static int sddr09_transport(struct scsi_cmnd *srb, struct us_data *us)
USB_STOR_TRANSPORT_ERROR);
}
- /* catch-all for all other commands, except
+ /*
+ * catch-all for all other commands, except
* pass TEST_UNIT_READY and REQUEST_SENSE through
*/
if (srb->cmnd[0] != TEST_UNIT_READY &&
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index e5e0a25ec..147c50b3e 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -1,4 +1,5 @@
-/* Driver for SanDisk SDDR-55 SmartMedia reader
+/*
+ * Driver for SanDisk SDDR-55 SmartMedia reader
*
* SDDR55 driver v0.1:
*
@@ -130,7 +131,8 @@ sddr55_bulk_transport(struct us_data *us, int direction,
return usb_stor_bulk_transfer_buf(us, pipe, data, len, NULL);
}
-/* check if card inserted, if there is, update read_only status
+/*
+ * check if card inserted, if there is, update read_only status
* return non zero if no card
*/
@@ -714,15 +716,18 @@ static int sddr55_read_map(struct us_data *us) {
if (max_lba > 1000)
max_lba = 1000;
- // Each block is 64 bytes of control data, so block i is located in
- // scatterlist block i*64/128k = i*(2^6)*(2^-17) = i*(2^-11)
+ /*
+ * Each block is 64 bytes of control data, so block i is located in
+ * scatterlist block i*64/128k = i*(2^6)*(2^-17) = i*(2^-11)
+ */
for (i=0; i<numblocks; i++) {
int zone = i / 1024;
lba = short_pack(buffer[i * 2], buffer[i * 2 + 1]);
- /* Every 1024 physical blocks ("zone"), the LBA numbers
+ /*
+ * Every 1024 physical blocks ("zone"), the LBA numbers
* go back to zero, but are within a higher
* block of LBA's. Also, there is a maximum of
* 1000 LBA's per zone. In other words, in PBA
@@ -733,7 +738,8 @@ static int sddr55_read_map(struct us_data *us) {
* are 24 spare blocks to use when blocks do go bad.
*/
- /* SDDR55 returns 0xffff for a bad block, and 0x400 for the
+ /*
+ * SDDR55 returns 0xffff for a bad block, and 0x400 for the
* CIS block. (Is this true for cards 8MB or less??)
* Record these in the physical to logical map
*/
@@ -824,8 +830,10 @@ static int sddr55_transport(struct scsi_cmnd *srb, struct us_data *us)
memset (info->sense_data, 0, sizeof info->sense_data);
- /* Dummy up a response for INQUIRY since SDDR55 doesn't
- respond to INQUIRY commands */
+ /*
+ * Dummy up a response for INQUIRY since SDDR55 doesn't
+ * respond to INQUIRY commands
+ */
if (srb->cmnd[0] == INQUIRY) {
memcpy(ptr, inquiry_response, 8);
@@ -833,7 +841,8 @@ static int sddr55_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_GOOD;
}
- /* only check card status if the map isn't allocated, ie no card seen yet
+ /*
+ * only check card status if the map isn't allocated, ie no card seen yet
* or if it's been over half a second since we last accessed it
*/
if (info->lba_to_pba == NULL || time_after(jiffies, info->last_access + HZ/2)) {
@@ -849,8 +858,10 @@ static int sddr55_transport(struct scsi_cmnd *srb, struct us_data *us)
}
}
- /* if we detected a problem with the map when writing,
- don't allow any more access */
+ /*
+ * if we detected a problem with the map when writing,
+ * don't allow any more access
+ */
if (info->fatal_error) {
set_sense_info (3, 0x31, 0);
@@ -868,12 +879,16 @@ static int sddr55_transport(struct scsi_cmnd *srb, struct us_data *us)
info->capacity = capacity;
- /* figure out the maximum logical block number, allowing for
- * the fact that only 250 out of every 256 are used */
+ /*
+ * figure out the maximum logical block number, allowing for
+ * the fact that only 250 out of every 256 are used
+ */
info->max_log_blks = ((info->capacity >> (info->pageshift + info->blockshift)) / 256) * 250;
- /* Last page in the card, adjust as we only use 250 out of
- * every 256 pages */
+ /*
+ * Last page in the card, adjust as we only use 250 out of
+ * every 256 pages
+ */
capacity = (capacity / 256) * 250;
capacity /= PAGESIZE;
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index a3ec86b91..3b0294e4d 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1,4 +1,5 @@
-/* Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
+/*
+ * Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
*
* Current development and maintenance by:
* (c) 2000, 2001 Robert Baruch (autophile@starband.net)
@@ -408,7 +409,8 @@ static int usbat_wait_not_busy(struct us_data *us, int minutes)
int result;
unsigned char *status = us->iobuf;
- /* Synchronizing cache on a CDR could take a heck of a long time,
+ /*
+ * Synchronizing cache on a CDR could take a heck of a long time,
* but probably not more than 10 minutes or so. On the other hand,
* doing a full blank on a CDRW at speed 1 will take about 75
* minutes!
@@ -1570,9 +1572,10 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
len = scsi_bufflen(srb);
- /* Send A0 (ATA PACKET COMMAND).
- Note: I guess we're never going to get any of the ATA
- commands... just ATA Packet Commands.
+ /*
+ * Send A0 (ATA PACKET COMMAND).
+ * Note: I guess we're never going to get any of the ATA
+ * commands... just ATA Packet Commands.
*/
registers[0] = USBAT_ATA_FEATURES;
@@ -1851,7 +1854,8 @@ static int usbat_probe(struct usb_interface *intf,
if (result)
return result;
- /* The actual transport will be determined later by the
+ /*
+ * The actual transport will be determined later by the
* initialization routine; this is just a placeholder.
*/
us->transport_name = "Shuttle USBAT";
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 2ea657be1..9a51019ac 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -177,7 +177,8 @@ int sierra_ms_init(struct us_data *us)
debug_swoc(&us->pusb_dev->dev, swocInfo);
- /* If there is not Linux software on the TRU-Install device
+ /*
+ * If there is not Linux software on the TRU-Install device
* then switch to modem mode
*/
if (!containsFullLinuxPackage(swocInfo)) {
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 5e67f63b2..ffd086733 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
*
* Current development and maintenance by:
* (c) 1999-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
@@ -109,7 +110,8 @@
* called more than once or from being called during usb_submit_urb().
*/
-/* This is the completion handler which will wake us up when an URB
+/*
+ * This is the completion handler which will wake us up when an URB
* completes.
*/
static void usb_stor_blocking_completion(struct urb *urb)
@@ -119,7 +121,8 @@ static void usb_stor_blocking_completion(struct urb *urb)
complete(urb_done_ptr);
}
-/* This is the common part of the URB message submission code
+/*
+ * This is the common part of the URB message submission code
*
* All URBs from the usb-storage driver involved in handling a queued scsi
* command _must_ pass through this function (or something like it) for the
@@ -142,10 +145,12 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
us->current_urb->context = &urb_done;
us->current_urb->transfer_flags = 0;
- /* we assume that if transfer_buffer isn't us->iobuf then it
+ /*
+ * we assume that if transfer_buffer isn't us->iobuf then it
* hasn't been mapped for DMA. Yes, this is clunky, but it's
* easier than always having the caller tell us whether the
- * transfer buffer has already been mapped. */
+ * transfer buffer has already been mapped.
+ */
if (us->current_urb->transfer_buffer == us->iobuf)
us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
us->current_urb->transfer_dma = us->iobuf_dma;
@@ -157,8 +162,10 @@ static int usb_stor_msg_common(struct us_data *us, int timeout)
return status;
}
- /* since the URB has been submitted successfully, it's now okay
- * to cancel it */
+ /*
+ * since the URB has been submitted successfully, it's now okay
+ * to cancel it
+ */
set_bit(US_FLIDX_URB_ACTIVE, &us->dflags);
/* did an abort occur during the submission? */
@@ -220,7 +227,8 @@ int usb_stor_control_msg(struct us_data *us, unsigned int pipe,
}
EXPORT_SYMBOL_GPL(usb_stor_control_msg);
-/* This is a version of usb_clear_halt() that allows early termination and
+/*
+ * This is a version of usb_clear_halt() that allows early termination and
* doesn't read the status from the device -- this is because some devices
* crash their internal firmware when the status is requested after a halt.
*
@@ -280,8 +288,10 @@ static int interpret_urb_result(struct us_data *us, unsigned int pipe,
/* stalled */
case -EPIPE:
- /* for control endpoints, (used by CB[I]) a stall indicates
- * a failed command */
+ /*
+ * for control endpoints, (used by CB[I]) a stall indicates
+ * a failed command
+ */
if (usb_pipecontrol(pipe)) {
usb_stor_dbg(us, "-- stall on control pipe\n");
return USB_STOR_XFER_STALLED;
@@ -433,8 +443,10 @@ static int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe,
return USB_STOR_XFER_ERROR;
}
- /* since the block has been initialized successfully, it's now
- * okay to cancel it */
+ /*
+ * since the block has been initialized successfully, it's now
+ * okay to cancel it
+ */
set_bit(US_FLIDX_SG_ACTIVE, &us->dflags);
/* did an abort occur during the submission? */
@@ -515,7 +527,8 @@ EXPORT_SYMBOL_GPL(usb_stor_bulk_transfer_sg);
* Transport routines
***********************************************************************/
-/* There are so many devices that report the capacity incorrectly,
+/*
+ * There are so many devices that report the capacity incorrectly,
* this routine was written to counteract some of the resulting
* problems.
*/
@@ -533,7 +546,8 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
[12] = 0x14 /* Record Not Found */
};
- /* If last-sector problems can't occur, whether because the
+ /*
+ * If last-sector problems can't occur, whether because the
* capacity was already decremented or because the device is
* known to report the correct capacity, then we don't need
* to do anything.
@@ -559,13 +573,15 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
- /* The command succeeded. We know this device doesn't
+ /*
+ * The command succeeded. We know this device doesn't
* have the last-sector bug, so stop checking it.
*/
us->use_last_sector_hacks = 0;
} else {
- /* The command failed. Allow up to 3 retries in case this
+ /*
+ * The command failed. Allow up to 3 retries in case this
* is some normal sort of failure. After that, assume the
* capacity is wrong and we're trying to access the sector
* beyond the end. Replace the result code and sense data
@@ -581,7 +597,8 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
}
done:
- /* Don't reset the retry counter for TEST UNIT READY commands,
+ /*
+ * Don't reset the retry counter for TEST UNIT READY commands,
* because they get issued after device resets which might be
* caused by a failed last-sector access.
*/
@@ -589,7 +606,8 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
us->last_sector_retries = 0;
}
-/* Invoke the transport and basic error-handling/recovery methods
+/*
+ * Invoke the transport and basic error-handling/recovery methods
*
* This is used by the protocol layers to actually send the message to
* the device and receive the response.
@@ -603,7 +621,8 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
scsi_set_resid(srb, 0);
result = us->transport(srb, us);
- /* if the command gets aborted by the higher layers, we need to
+ /*
+ * if the command gets aborted by the higher layers, we need to
* short-circuit all other processing
*/
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
@@ -628,7 +647,8 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
srb->result = SAM_STAT_GOOD;
- /* Determine if we need to auto-sense
+ /*
+ * Determine if we need to auto-sense
*
* I normally don't use a flag like this, but it's almost impossible
* to understand what's going on here if I don't.
@@ -728,7 +748,8 @@ Retry_Sense:
goto Handle_Errors;
}
- /* Some devices claim to support larger sense but fail when
+ /*
+ * Some devices claim to support larger sense but fail when
* trying to request it. When a transport failure happens
* using US_FS_SANE_SENSE, we always retry with a standard
* (small) sense request. This fixes some USB GSM modems
@@ -746,7 +767,8 @@ Retry_Sense:
if (temp_result != USB_STOR_TRANSPORT_GOOD) {
usb_stor_dbg(us, "-- auto-sense failure\n");
- /* we skip the reset if this happens to be a
+ /*
+ * we skip the reset if this happens to be a
* multi-target device, since failure of an
* auto-sense is perfectly valid
*/
@@ -756,7 +778,8 @@ Retry_Sense:
return;
}
- /* If the sense data returned is larger than 18-bytes then we
+ /*
+ * If the sense data returned is larger than 18-bytes then we
* assume this device supports requesting more in the future.
* The response code must be 70h through 73h inclusive.
*/
@@ -767,7 +790,8 @@ Retry_Sense:
usb_stor_dbg(us, "-- SANE_SENSE support enabled\n");
us->fflags |= US_FL_SANE_SENSE;
- /* Indicate to the user that we truncated their sense
+ /*
+ * Indicate to the user that we truncated their sense
* because we didn't know it supported larger sense.
*/
usb_stor_dbg(us, "-- Sense data truncated to %i from %i\n",
@@ -795,13 +819,15 @@ Retry_Sense:
SCSI_SENSE_BUFFERSIZE, 4);
fm_ili = (scdd ? scdd[3] : srb->sense_buffer[2]) & 0xA0;
- /* We often get empty sense data. This could indicate that
+ /*
+ * We often get empty sense data. This could indicate that
* everything worked or that there was an unspecified
* problem. We have to decide which.
*/
if (sshdr.sense_key == 0 && sshdr.asc == 0 && sshdr.ascq == 0 &&
fm_ili == 0) {
- /* If things are really okay, then let's show that.
+ /*
+ * If things are really okay, then let's show that.
* Zero out the sense buffer so the higher layers
* won't realize we did an unsolicited auto-sense.
*/
@@ -809,7 +835,8 @@ Retry_Sense:
srb->result = SAM_STAT_GOOD;
srb->sense_buffer[0] = 0x0;
- /* If there was a problem, report an unspecified
+ /*
+ * If there was a problem, report an unspecified
* hardware error to prevent the higher layers from
* entering an infinite retry loop.
*/
@@ -860,20 +887,26 @@ Retry_Sense:
last_sector_hacks(us, srb);
return;
- /* Error and abort processing: try to resynchronize with the device
+ /*
+ * Error and abort processing: try to resynchronize with the device
* by issuing a port reset. If that fails, try a class-specific
- * device reset. */
+ * device reset.
+ */
Handle_Errors:
- /* Set the RESETTING bit, and clear the ABORTING bit so that
- * the reset may proceed. */
+ /*
+ * Set the RESETTING bit, and clear the ABORTING bit so that
+ * the reset may proceed.
+ */
scsi_lock(us_to_host(us));
set_bit(US_FLIDX_RESETTING, &us->dflags);
clear_bit(US_FLIDX_ABORTING, &us->dflags);
scsi_unlock(us_to_host(us));
- /* We must release the device lock because the pre_reset routine
- * will want to acquire it. */
+ /*
+ * We must release the device lock because the pre_reset routine
+ * will want to acquire it.
+ */
mutex_unlock(&us->dev_mutex);
result = usb_stor_port_reset(us);
mutex_lock(&us->dev_mutex);
@@ -891,10 +924,12 @@ Retry_Sense:
/* Stop the current URB transfer */
void usb_stor_stop_transport(struct us_data *us)
{
- /* If the state machine is blocked waiting for an URB,
+ /*
+ * If the state machine is blocked waiting for an URB,
* let's wake it up. The test_and_clear_bit() call
* guarantees that if a URB has just been submitted,
- * it won't be cancelled more than once. */
+ * it won't be cancelled more than once.
+ */
if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
usb_stor_dbg(us, "-- cancelling URB\n");
usb_unlink_urb(us->current_urb);
@@ -955,7 +990,8 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
/* STATUS STAGE */
- /* NOTE: CB does not have a status stage. Silly, I know. So
+ /*
+ * NOTE: CB does not have a status stage. Silly, I know. So
* we have to catch this at a higher level.
*/
if (us->protocol != USB_PR_CBI)
@@ -967,7 +1003,8 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
- /* UFI gives us ASC and ASCQ, like a request sense
+ /*
+ * UFI gives us ASC and ASCQ, like a request sense
*
* REQUEST_SENSE and INQUIRY don't affect the sense data on UFI
* devices, so we ignore the information for those commands. Note
@@ -983,7 +1020,8 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_GOOD;
}
- /* If not UFI, we interpret the data as a result code
+ /*
+ * If not UFI, we interpret the data as a result code
* The first byte should always be a 0x0.
*
* Some bogus devices don't follow that rule. They stuff the ASC
@@ -1005,7 +1043,8 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
}
return USB_STOR_TRANSPORT_ERROR;
- /* the CBI spec requires that the bulk pipe must be cleared
+ /*
+ * the CBI spec requires that the bulk pipe must be cleared
* following any data-in/out command failure (section 2.4.3.1.3)
*/
Failed:
@@ -1107,9 +1146,11 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
/* DATA STAGE */
/* send/receive data payload, if there is any */
- /* Some USB-IDE converter chips need a 100us delay between the
+ /*
+ * Some USB-IDE converter chips need a 100us delay between the
* command phase and the data phase. Some devices need a little
- * more than that, probably because of clock rate inaccuracies. */
+ * more than that, probably because of clock rate inaccuracies.
+ */
if (unlikely(us->fflags & US_FL_GO_SLOW))
usleep_range(125, 150);
@@ -1121,7 +1162,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
if (result == USB_STOR_XFER_ERROR)
return USB_STOR_TRANSPORT_ERROR;
- /* If the device tried to send back more data than the
+ /*
+ * If the device tried to send back more data than the
* amount requested, the spec requires us to transfer
* the CSW anyway. Since there's no point retrying the
* the command, we'll return fake sense data indicating
@@ -1156,7 +1198,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
}
}
- /* See flow chart on pg 15 of the Bulk Only Transport spec for
+ /*
+ * See flow chart on pg 15 of the Bulk Only Transport spec for
* an explanation of how this code works.
*/
@@ -1165,7 +1208,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
bcs, US_BULK_CS_WRAP_LEN, &cswlen);
- /* Some broken devices add unnecessary zero-length packets to the
+ /*
+ * Some broken devices add unnecessary zero-length packets to the
* end of their data transfers. Such packets show up as 0-length
* CSWs. If we encounter such a thing, try to read the CSW again.
*/
@@ -1201,7 +1245,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_ERROR;
}
- /* Some broken devices report odd signatures, so we do not check them
+ /*
+ * Some broken devices report odd signatures, so we do not check them
* for validity against the spec. We store the first one we see,
* and check subsequent transfers for validity against this signature.
*/
@@ -1217,11 +1262,14 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_ERROR;
}
- /* try to compute the actual residue, based on how much data
- * was really transferred and what the device tells us */
+ /*
+ * try to compute the actual residue, based on how much data
+ * was really transferred and what the device tells us
+ */
if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
- /* Heuristically detect devices that generate bogus residues
+ /*
+ * Heuristically detect devices that generate bogus residues
* by seeing what happens with INQUIRY and READ CAPACITY
* commands.
*/
@@ -1259,7 +1307,8 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_FAILED;
case US_BULK_STAT_PHASE:
- /* phase error -- note that a transport reset will be
+ /*
+ * phase error -- note that a transport reset will be
* invoked by the invoke_transport() function
*/
return USB_STOR_TRANSPORT_ERROR;
@@ -1274,7 +1323,8 @@ EXPORT_SYMBOL_GPL(usb_stor_Bulk_transport);
* Reset routines
***********************************************************************/
-/* This is the common part of the device reset code.
+/*
+ * This is the common part of the device reset code.
*
* It's handy that every transport mechanism uses the control endpoint for
* resets.
@@ -1302,8 +1352,10 @@ static int usb_stor_reset_common(struct us_data *us,
return result;
}
- /* Give the device some time to recover from the reset,
- * but don't delay disconnect processing. */
+ /*
+ * Give the device some time to recover from the reset,
+ * but don't delay disconnect processing.
+ */
wait_event_interruptible_timeout(us->delay_wait,
test_bit(US_FLIDX_DISCONNECTING, &us->dflags),
HZ*6);
@@ -1328,8 +1380,7 @@ static int usb_stor_reset_common(struct us_data *us,
return result;
}
-/* This issues a CB[I] Reset to the device in question
- */
+/* This issues a CB[I] Reset to the device in question */
#define CB_RESET_CMD_SIZE 12
int usb_stor_CB_reset(struct us_data *us)
@@ -1343,7 +1394,8 @@ int usb_stor_CB_reset(struct us_data *us)
}
EXPORT_SYMBOL_GPL(usb_stor_CB_reset);
-/* This issues a Bulk-only Reset to the device in question, including
+/*
+ * This issues a Bulk-only Reset to the device in question, including
* clearing the subsequent endpoint halts that may occur.
*/
int usb_stor_Bulk_reset(struct us_data *us)
@@ -1354,7 +1406,8 @@ int usb_stor_Bulk_reset(struct us_data *us)
}
EXPORT_SYMBOL_GPL(usb_stor_Bulk_reset);
-/* Issue a USB port reset to the device. The caller must not hold
+/*
+ * Issue a USB port reset to the device. The caller must not hold
* us->dev_mutex.
*/
int usb_stor_port_reset(struct us_data *us)
diff --git a/drivers/usb/storage/transport.h b/drivers/usb/storage/transport.h
index 9369d752d..dae3ecd2e 100644
--- a/drivers/usb/storage/transport.h
+++ b/drivers/usb/storage/transport.h
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
* Transport Functions Header File
*
* Current development and maintenance by:
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index ecc7d4b1d..5ef014ba6 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -799,7 +799,8 @@ static int uas_slave_alloc(struct scsi_device *sdev)
sdev->hostdata = devinfo;
- /* USB has unusual DMA-alignment requirements: Although the
+ /*
+ * USB has unusual DMA-alignment requirements: Although the
* starting address of each scatter-gather element doesn't matter,
* the length of each element except the last must be divisible
* by the Bulk maxpacket value. There's currently no way to
@@ -848,7 +849,6 @@ static struct scsi_host_template uas_host_template = {
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
- .can_queue = MAX_CMNDS,
.this_id = -1,
.sg_tablesize = SG_NONE,
.skip_settle_delay = 1,
diff --git a/drivers/usb/storage/unusual_alauda.h b/drivers/usb/storage/unusual_alauda.h
index fa3e9edaa..763bc0303 100644
--- a/drivers/usb/storage/unusual_alauda.h
+++ b/drivers/usb/storage/unusual_alauda.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for the Alauda-based card readers
+/*
+ * Unusual Devices File for the Alauda-based card readers
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 82e8ed032..e9a2eb888 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for devices based on the Cypress USB/ATA bridge
+/*
+ * Unusual Devices File for devices based on the Cypress USB/ATA bridge
* with support for ATACB
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/usb/storage/unusual_datafab.h b/drivers/usb/storage/unusual_datafab.h
index 582a603c7..5049b6bbe 100644
--- a/drivers/usb/storage/unusual_datafab.h
+++ b/drivers/usb/storage/unusual_datafab.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for the Datafab USB Compact Flash reader
+/*
+ * Unusual Devices File for the Datafab USB Compact Flash reader
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -79,7 +80,8 @@ UNUSUAL_DEV( 0x07c4, 0xa109, 0x0000, 0xffff,
USB_SC_SCSI, USB_PR_DATAFAB, NULL,
0),
-/* Reported by Felix Moeller <felix@derklecks.de>
+/*
+ * Reported by Felix Moeller <felix@derklecks.de>
* in Germany this is sold by Hama with the productnumber 46952
* as "DualSlot CompactFlash(TM) & MStick Drive USB"
*/
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 7ffe42090..aa3539238 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
* Unusual Devices File
*
* Current development and maintenance by:
@@ -25,13 +26,15 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/* IMPORTANT NOTE: This file must be included in another file which does
+/*
+ * IMPORTANT NOTE: This file must be included in another file which does
* the following thing for it to work:
* The UNUSUAL_DEV, COMPLIANT_DEV, and USUAL_DEV macros must be defined
* before this file is included.
*/
-/* If you edit this file, please try to keep it sorted first by VendorID,
+/*
+ * If you edit this file, please try to keep it sorted first by VendorID,
* then by ProductID.
*
* If you want to add an entry for this file, be sure to include the
@@ -47,13 +50,15 @@
* <usb-storage@lists.one-eyed-alien.net>
*/
-/* Note: If you add an entry only in order to set the CAPACITY_OK flag,
+/*
+ * Note: If you add an entry only in order to set the CAPACITY_OK flag,
* use the COMPLIANT_DEV macro instead of UNUSUAL_DEV. This is
* because such entries mark devices which actually work correctly,
* as opposed to devices that do something strangely or wrongly.
*/
-/* In-kernel mode switching is deprecated. Do not add new devices to
+/*
+ * In-kernel mode switching is deprecated. Do not add new devices to
* this list for the sole purpose of switching them to a different
* mode. Existing userspace solutions are superior.
*
@@ -66,8 +71,7 @@
#define NO_SDDR09
#endif
-/* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr>
- */
+/* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr> */
UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100,
"ATMEL",
"SND1 Storage",
@@ -93,7 +97,8 @@ UNUSUAL_DEV( 0x03f0, 0x070c, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE ),
-/* Reported by Grant Grundler <grundler@parisc-linux.org>
+/*
+ * Reported by Grant Grundler <grundler@parisc-linux.org>
* HP r707 camera in "Disk" mode with 2.00.23 or 2.00.24 firmware.
*/
UNUSUAL_DEV( 0x03f0, 0x4002, 0x0001, 0x0001,
@@ -107,7 +112,8 @@ UNUSUAL_DEV( 0x03f3, 0x0001, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
-/* Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
+/*
+ * Reported by Sebastian Kapfer <sebastian_kapfer@gmx.net>
* and Olaf Hering <olh@suse.de> (different bcd's, same vendor/product)
* for USB floppies that need the SINGLE_LUN enforcement.
*/
@@ -124,7 +130,8 @@ UNUSUAL_DEV( 0x040d, 0x6205, 0x0003, 0x0003,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Deduced by Jonathan Woithe <jwoithe@just42.net>
+/*
+ * Deduced by Jonathan Woithe <jwoithe@just42.net>
* Entry needed for flags: US_FL_FIX_INQUIRY because initial inquiry message
* always fails and confuses drive.
*/
@@ -167,8 +174,10 @@ UNUSUAL_DEV( 0x0420, 0x0001, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Reported by Andrew Nayenko <relan@bk.ru>
- * Updated for new firmware by Phillip Potter <phillipinda@hotmail.com> */
+/*
+ * Reported by Andrew Nayenko <relan@bk.ru>
+ * Updated for new firmware by Phillip Potter <phillipinda@hotmail.com>
+ */
UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0610,
"Nokia",
"Nokia 6288",
@@ -196,16 +205,20 @@ UNUSUAL_DEV( 0x0421, 0x0434, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
-/* Reported by Sumedha Swamy <sumedhaswamy@gmail.com> and
- * Einar Th. Einarsson <einarthered@gmail.com> */
+/*
+ * Reported by Sumedha Swamy <sumedhaswamy@gmail.com> and
+ * Einar Th. Einarsson <einarthered@gmail.com>
+ */
UNUSUAL_DEV( 0x0421, 0x0444, 0x0100, 0x0100,
"Nokia",
"N91",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE | US_FL_FIX_CAPACITY ),
-/* Reported by Jiri Slaby <jirislaby@gmail.com> and
- * Rene C. Castberg <Rene@Castberg.org> */
+/*
+ * Reported by Jiri Slaby <jirislaby@gmail.com> and
+ * Rene C. Castberg <Rene@Castberg.org>
+ */
UNUSUAL_DEV( 0x0421, 0x0446, 0x0100, 0x0100,
"Nokia",
"N80",
@@ -269,8 +282,10 @@ UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
US_FL_SINGLE_LUN ),
#endif
-/* Patch submitted by Daniel Drake <dsd@gentoo.org>
- * Device reports nonsense bInterfaceProtocol 6 when connected over USB2 */
+/*
+ * Patch submitted by Daniel Drake <dsd@gentoo.org>
+ * Device reports nonsense bInterfaceProtocol 6 when connected over USB2
+ */
UNUSUAL_DEV( 0x0451, 0x5416, 0x0100, 0x0100,
"Neuros Audio",
"USB 2.0 HD 2.5",
@@ -288,17 +303,18 @@ UNUSUAL_DEV( 0x0457, 0x0150, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
/*
-* Bohdan Linda <bohdan.linda@gmail.com>
-* 1GB USB sticks MyFlash High Speed. I have restricted
-* the revision to my model only
-*/
+ * Bohdan Linda <bohdan.linda@gmail.com>
+ * 1GB USB sticks MyFlash High Speed. I have restricted
+ * the revision to my model only
+ */
UNUSUAL_DEV( 0x0457, 0x0151, 0x0100, 0x0100,
"USB 2.0",
"Flash Disk",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NOT_LOCKABLE ),
-/* Reported by Tamas Kerecsen <kerecsen@bigfoot.com>
+/*
+ * Reported by Tamas Kerecsen <kerecsen@bigfoot.com>
* Obviously the PROM has not been customized by the VAR;
* the Vendor and Product string descriptors are:
* Generic Mass Storage (PROTOTYPE--Remember to change idVendor)
@@ -347,24 +363,30 @@ UNUSUAL_DEV( 0x0482, 0x0107, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE),
-/* Reported by Paul Stewart <stewart@wetlogic.net>
- * This entry is needed because the device reports Sub=ff */
+/*
+ * Reported by Paul Stewart <stewart@wetlogic.net>
+ * This entry is needed because the device reports Sub=ff
+ */
UNUSUAL_DEV( 0x04a4, 0x0004, 0x0001, 0x0001,
"Hitachi",
"DVD-CAM DZ-MV100A Camcorder",
USB_SC_SCSI, USB_PR_CB, NULL, US_FL_SINGLE_LUN),
-/* BENQ DC5330
+/*
+ * BENQ DC5330
* Reported by Manuel Fombuena <mfombuena@ya.com> and
- * Frank Copeland <fjc@thingy.apana.org.au> */
+ * Frank Copeland <fjc@thingy.apana.org.au>
+ */
UNUSUAL_DEV( 0x04a5, 0x3010, 0x0100, 0x0100,
"Tekom Technologies, Inc",
"300_CAMERA",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Patch for Nikon coolpix 2000
- * Submitted by Fabien Cosse <fabien.cosse@wanadoo.fr>*/
+/*
+ * Patch for Nikon coolpix 2000
+ * Submitted by Fabien Cosse <fabien.cosse@wanadoo.fr>
+ */
UNUSUAL_DEV( 0x04b0, 0x0301, 0x0010, 0x0010,
"NIKON",
"NIKON DSC E2000",
@@ -378,21 +400,26 @@ UNUSUAL_DEV( 0x04b3, 0x4001, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_CB, NULL,
US_FL_MAX_SECTORS_MIN),
-/* Reported by Simon Levitt <simon@whattf.com>
- * This entry needs Sub and Proto fields */
+/*
+ * Reported by Simon Levitt <simon@whattf.com>
+ * This entry needs Sub and Proto fields
+ */
UNUSUAL_DEV( 0x04b8, 0x0601, 0x0100, 0x0100,
"Epson",
"875DC Storage",
USB_SC_SCSI, USB_PR_CB, NULL, US_FL_FIX_INQUIRY),
-/* Reported by Khalid Aziz <khalid@gonehiking.org>
- * This entry is needed because the device reports Sub=ff */
+/*
+ * Reported by Khalid Aziz <khalid@gonehiking.org>
+ * This entry is needed because the device reports Sub=ff
+ */
UNUSUAL_DEV( 0x04b8, 0x0602, 0x0110, 0x0110,
"Epson",
"785EPX Storage",
USB_SC_SCSI, USB_PR_BULK, NULL, US_FL_SINGLE_LUN),
-/* Not sure who reported this originally but
+/*
+ * Not sure who reported this originally but
* Pavel Machek <pavel@ucw.cz> reported that the extra US_FL_SINGLE_LUN
* flag be added */
UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
@@ -400,7 +427,8 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
"FinePix 1400Zoom",
USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
-/* Reported by Ondrej Zary <linux@rainbow-software.org>
+/*
+ * Reported by Ondrej Zary <linux@rainbow-software.org>
* The device reports one sector more and breaks when that sector is accessed
*/
UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
@@ -409,7 +437,8 @@ UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY),
-/* Reported by Kriston Fincher <kriston@airmail.net>
+/*
+ * Reported by Kriston Fincher <kriston@airmail.net>
* Patch submitted by Sean Millichamp <sean@bruenor.org>
* This is to support the Panasonic PalmCam PV-SD4090
* This entry is needed because the device reports Sub=ff
@@ -419,8 +448,10 @@ UNUSUAL_DEV( 0x04da, 0x0901, 0x0100, 0x0200,
"LS-120 Camera",
USB_SC_UFI, USB_PR_DEVICE, NULL, 0),
-/* From Yukihiro Nakai, via zaitcev@yahoo.com.
- * This is needed for CB instead of CBI */
+/*
+ * From Yukihiro Nakai, via zaitcev@yahoo.com.
+ * This is needed for CB instead of CBI
+ */
UNUSUAL_DEV( 0x04da, 0x0d05, 0x0000, 0x0000,
"Sharp CE-CW05",
"CD-R/RW Drive",
@@ -440,7 +471,8 @@ UNUSUAL_DEV( 0x04da, 0x2373, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY | US_FL_NOT_LOCKABLE ),
-/* Most of the following entries were developed with the help of
+/*
+ * Most of the following entries were developed with the help of
* Shuttle/SCM directly.
*/
UNUSUAL_DEV( 0x04e6, 0x0001, 0x0200, 0x0200,
@@ -536,7 +568,8 @@ UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64),
-/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+/*
+ * Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
* Device uses standards-violating 32-byte Bulk Command Block Wrappers and
* reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
*/
@@ -553,7 +586,8 @@ UNUSUAL_DEV( 0x050d, 0x0115, 0x0133, 0x0133,
USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
-/* Iomega Clik! Drive
+/*
+ * Iomega Clik! Drive
* Reported by David Chatenay <dchatenay@hotmail.com>
* The reason this is needed is not fully known.
*/
@@ -570,7 +604,8 @@ COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_CAPACITY_OK ),
-/* Yakumo Mega Image 37
+/*
+ * Yakumo Mega Image 37
* Submitted by Stephan Fuhrmann <atomenergie@t-online.de> */
UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100,
"Tekom Technologies, Inc",
@@ -578,8 +613,10 @@ UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Another Yakumo camera.
- * Reported by Michele Alzetta <michele.alzetta@aliceposta.it> */
+/*
+ * Another Yakumo camera.
+ * Reported by Michele Alzetta <michele.alzetta@aliceposta.it>
+ */
UNUSUAL_DEV( 0x052b, 0x1804, 0x0100, 0x0100,
"Tekom Technologies, Inc",
"300_CAMERA",
@@ -593,16 +630,20 @@ UNUSUAL_DEV( 0x052b, 0x1807, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Yakumo Mega Image 47
- * Reported by Bjoern Paetzel <kolrabi@kolrabi.de> */
+/*
+ * Yakumo Mega Image 47
+ * Reported by Bjoern Paetzel <kolrabi@kolrabi.de>
+ */
UNUSUAL_DEV( 0x052b, 0x1905, 0x0100, 0x0100,
"Tekom Technologies, Inc",
"400_CAMERA",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Reported by Paul Ortyl <ortylp@3miasto.net>
- * Note that it's similar to the device above, only different prodID */
+/*
+ * Reported by Paul Ortyl <ortylp@3miasto.net>
+ * Note that it's similar to the device above, only different prodID
+ */
UNUSUAL_DEV( 0x052b, 0x1911, 0x0100, 0x0100,
"Tekom Technologies, Inc",
"400_CAMERA",
@@ -615,8 +656,10 @@ UNUSUAL_DEV( 0x054c, 0x0010, 0x0106, 0x0450,
USB_SC_SCSI, USB_PR_DEVICE, NULL,
US_FL_SINGLE_LUN | US_FL_NOT_LOCKABLE | US_FL_NO_WP_DETECT ),
-/* Submitted by Lars Jacob <jacob.lars@googlemail.com>
- * This entry is needed because the device reports Sub=ff */
+/*
+ * Submitted by Lars Jacob <jacob.lars@googlemail.com>
+ * This entry is needed because the device reports Sub=ff
+ */
UNUSUAL_DEV( 0x054c, 0x0010, 0x0500, 0x0610,
"Sony",
"DSC-T1/T5/H5",
@@ -719,7 +762,8 @@ UNUSUAL_DEV( 0x057b, 0x0000, 0x0000, 0x0299,
USB_SC_DEVICE, USB_PR_CB, NULL,
US_FL_SINGLE_LUN),
-/* Reported by Johann Cardon <johann.cardon@free.fr>
+/*
+ * Reported by Johann Cardon <johann.cardon@free.fr>
* This entry is needed only because the device reports
* bInterfaceClass = 0xff (vendor-specific)
*/
@@ -741,7 +785,8 @@ UNUSUAL_DEV( 0x0595, 0x4343, 0x0000, 0x2210,
"Digital Camera EX-20 DSC",
USB_SC_8070, USB_PR_DEVICE, NULL, 0 ),
-/* Reported by Andre Welter <a.r.welter@gmx.de>
+/*
+ * Reported by Andre Welter <a.r.welter@gmx.de>
* This antique device predates the release of the Bulk-only Transport
* spec, and if it gets a Get-Max-LUN then it requires the host to do a
* Clear-Halt on the bulk endpoints. The SINGLE_LUN flag will prevent
@@ -773,7 +818,8 @@ UNUSUAL_DEV( 0x059f, 0x0651, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_WP_DETECT ),
-/* Submitted by Joel Bourquard <numlock@freesurf.ch>
+/*
+ * Submitted by Joel Bourquard <numlock@freesurf.ch>
* Some versions of this device need the SubClass and Protocol overrides
* while others don't.
*/
@@ -783,7 +829,8 @@ UNUSUAL_DEV( 0x05ab, 0x0060, 0x1104, 0x1110,
USB_SC_SCSI, USB_PR_BULK, NULL,
US_FL_NEED_OVERRIDE ),
-/* Submitted by Sven Anderson <sven-linux@anderson.de>
+/*
+ * Submitted by Sven Anderson <sven-linux@anderson.de>
* There are at least four ProductIDs used for iPods, so I added 0x1202 and
* 0x1204. They just need the US_FL_FIX_CAPACITY. As the bcdDevice appears
* to change with firmware updates, I changed the range to maximum for all
@@ -824,7 +871,8 @@ UNUSUAL_DEV( 0x05ac, 0x120a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
-/* Reported by Dan Williams <dcbw@redhat.com>
+/*
+ * Reported by Dan Williams <dcbw@redhat.com>
* Option N.V. mobile broadband modems
* Ignore driver CD mode and force into modem mode by default.
*/
@@ -843,7 +891,8 @@ UNUSUAL_DEV( 0x05dc, 0xb002, 0x0000, 0x0113,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
-/* The following two entries are for a Genesys USB to IDE
+/*
+ * The following two entries are for a Genesys USB to IDE
* converter chip, but it changes its ProductId depending
* on whether or not a disk or an optical device is enclosed
* They were originally reported by Alexander Oltu
@@ -873,8 +922,10 @@ UNUSUAL_DEV( 0x05e3, 0x0723, 0x9451, 0x9451,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE ),
-/* Reported by Hanno Boeck <hanno@gmx.de>
- * Taken from the Lycoris Kernel */
+/*
+ * Reported by Hanno Boeck <hanno@gmx.de>
+ * Taken from the Lycoris Kernel
+ */
UNUSUAL_DEV( 0x0636, 0x0003, 0x0000, 0x9999,
"Vivitar",
"Vivicam 35Xx",
@@ -908,8 +959,10 @@ UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
US_FL_NOT_LOCKABLE ),
/* Reported by Richard -=[]=- <micro_flyer@hotmail.com> */
-/* Change to bcdDeviceMin (0x0100 to 0x0001) reported by
- * Thomas Bartosik <tbartdev@gmx-topmail.de> */
+/*
+ * Change to bcdDeviceMin (0x0100 to 0x0001) reported by
+ * Thomas Bartosik <tbartdev@gmx-topmail.de>
+ */
UNUSUAL_DEV( 0x067b, 0x2507, 0x0001, 0x0100,
"Prolific Technology Inc.",
"Mass Storage Device",
@@ -961,7 +1014,8 @@ UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000,
US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64 |
US_FL_NO_READ_CAPACITY_16),
-/* Reported by Jean-Baptiste Onofre <jb@nanthrax.net>
+/*
+ * Reported by Jean-Baptiste Onofre <jb@nanthrax.net>
* Support the following product :
* "Dane-Elec MediaTouch"
*/
@@ -971,7 +1025,8 @@ UNUSUAL_DEV( 0x071b, 0x32bb, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64),
-/* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com>
+/*
+ * Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com>
* This USB MP3/AVI player device fails and disconnects if more than 128
* sectors (64kB) are read/written in a single command, and may be present
* at least in the following products:
@@ -1040,7 +1095,8 @@ UNUSUAL_DEV( 0x07af, 0x0006, 0x0100, 0x0100,
US_FL_SINGLE_LUN ),
#endif
-/* Datafab KECF-USB / Sagatek DCS-CF / Simpletech Flashlink UCF-100
+/*
+ * Datafab KECF-USB / Sagatek DCS-CF / Simpletech Flashlink UCF-100
* Only revision 1.13 tested (same for all of the above devices,
* based on the Datafab DF-UG-07 chip). Needed for US_FL_FIX_INQUIRY.
* Submitted by Marek Michalkiewicz <marekm@amelek.gda.pl>.
@@ -1052,7 +1108,8 @@ UNUSUAL_DEV( 0x07c4, 0xa400, 0x0000, 0xffff,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ),
-/* Reported by Rauch Wolke <rauchwolke@gmx.net>
+/*
+ * Reported by Rauch Wolke <rauchwolke@gmx.net>
* and augmented by binbin <binbinsh@gmail.com> (Bugzilla #12882)
*/
UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
@@ -1061,7 +1118,8 @@ UNUSUAL_DEV( 0x07c4, 0xa4a5, 0x0000, 0xffff,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ),
-/* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
+/*
+ * Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
* to the USB storage specification in two ways:
* - They tell us they are using transport protocol CBI. In reality they
* are using transport protocol CB.
@@ -1119,7 +1177,8 @@ UNUSUAL_DEV( 0x084b, 0xa001, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
-/* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
+/*
+ * Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
* Flag will support Bulk devices which use a standards-violating 32-byte
* Command Block Wrapper. Here, the "DC2MEGA" cameras (several brands) with
* Grandtech GT892x chip, which request "Proprietary SCSI Bulk" support.
@@ -1131,7 +1190,8 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BULK32),
-/* Reported by <ttkspam@free.fr>
+/*
+ * Reported by <ttkspam@free.fr>
* The device reports a vendor-specific device class, requiring an
* explicit vendor/product match.
*/
@@ -1140,11 +1200,12 @@ UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002,
"FW_Omega2",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, 0),
-/* Andrew Lunn <andrew@lunn.ch>
+/*
+ * Andrew Lunn <andrew@lunn.ch>
* PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
* on LUN 4.
* Note: Vend:Prod clash with "Ltd Maxell WS30 Slim Digital Camera"
-*/
+ */
UNUSUAL_DEV( 0x0851, 0x1543, 0x0200, 0x0200,
"PanDigital",
"Photo Frame",
@@ -1170,7 +1231,8 @@ UNUSUAL_DEV( 0x08bd, 0x1100, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SINGLE_LUN),
-/* Submitted by Dylan Taft <d13f00l@gmail.com>
+/*
+ * Submitted by Dylan Taft <d13f00l@gmail.com>
* US_FL_IGNORE_RESIDUE Needed
*/
UNUSUAL_DEV( 0x08ca, 0x3103, 0x0100, 0x0100,
@@ -1179,7 +1241,8 @@ UNUSUAL_DEV( 0x08ca, 0x3103, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE),
-/* Entry needed for flags. Moreover, all devices with this ID use
+/*
+ * Entry needed for flags. Moreover, all devices with this ID use
* bulk-only transport, but _some_ falsely report Control/Bulk instead.
* One example is "Trumpion Digital Research MYMP3".
* Submitted by Bjoern Brill <brill(at)fs.math.uni-frankfurt.de>
@@ -1190,7 +1253,8 @@ UNUSUAL_DEV( 0x090a, 0x1001, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_BULK, NULL,
US_FL_NEED_OVERRIDE ),
-/* Reported by Filippo Bardelli <filibard@libero.it>
+/*
+ * Reported by Filippo Bardelli <filibard@libero.it>
* The device reports a subclass of RBC, which is wrong.
*/
UNUSUAL_DEV( 0x090a, 0x1050, 0x0100, 0x0100,
@@ -1213,7 +1277,8 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
-/* Reported by Paul Hartman <paul.hartman+linux@gmail.com>
+/*
+ * Reported by Paul Hartman <paul.hartman+linux@gmail.com>
* This card reader returns "Illegal Request, Logical Block Address
* Out of Range" for the first READ(10) after a new card is inserted.
*/
@@ -1223,7 +1288,8 @@ UNUSUAL_DEV( 0x090c, 0x6000, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_INITIAL_READ10 ),
-/* This Pentax still camera is not conformant
+/*
+ * This Pentax still camera is not conformant
* to the USB storage specification: -
* - It does not like the INQUIRY command. So we must handle this command
* of the SCSI layer ourselves.
@@ -1236,8 +1302,10 @@ UNUSUAL_DEV( 0x0a17, 0x0004, 0x1000, 0x1000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
-/* These are virtual windows driver CDs, which the zd1211rw driver
- * automatically converts into WLAN devices. */
+/*
+ * These are virtual windows driver CDs, which the zd1211rw driver
+ * automatically converts into WLAN devices.
+ */
UNUSUAL_DEV( 0x0ace, 0x2011, 0x0101, 0x0101,
"ZyXEL",
"G-220F USB-WLAN Install",
@@ -1250,7 +1318,8 @@ UNUSUAL_DEV( 0x0ace, 0x20ff, 0x0101, 0x0101,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_DEVICE ),
-/* Reported by Dan Williams <dcbw@redhat.com>
+/*
+ * Reported by Dan Williams <dcbw@redhat.com>
* Option N.V. mobile broadband modems
* Ignore driver CD mode and force into modem mode by default.
*/
@@ -1262,20 +1331,24 @@ UNUSUAL_DEV( 0x0af0, 0x6971, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, option_ms_init,
0),
-/* Reported by F. Aben <f.aben@option.com>
+/*
+ * Reported by F. Aben <f.aben@option.com>
* This device (wrongly) has a vendor-specific device descriptor.
* The entry is needed so usb-storage can bind to it's mass-storage
- * interface as an interface driver */
+ * interface as an interface driver
+ */
UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
"Option",
"GI 0401 SD-Card",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
0 ),
-/* Reported by Jan Dumon <j.dumon@option.com>
+/*
+ * Reported by Jan Dumon <j.dumon@option.com>
* These devices (wrongly) have a vendor-specific device descriptor.
* These entries are needed so usb-storage can bind to their mass-storage
- * interface as an interface driver */
+ * interface as an interface driver
+ */
UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
"Option",
"GI 0431 SD-Card",
@@ -1419,7 +1492,8 @@ UNUSUAL_DEV( 0x0dc4, 0x0073, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY),
-/* Reported by Lubomir Blaha <tritol@trilogic.cz>
+/*
+ * Reported by Lubomir Blaha <tritol@trilogic.cz>
* I _REALLY_ don't know what 3rd, 4th number and all defines mean, but this
* works for me. Can anybody correct these values? (I able to test corrected
* version.)
@@ -1430,8 +1504,10 @@ UNUSUAL_DEV( 0x0dd8, 0x1060, 0x0000, 0xffff,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
-/* Reported by Edward Chapman (taken from linux-usb mailing list)
- Netac OnlyDisk Mini U2CV2 512MB USB 2.0 Flash Drive */
+/*
+ * Reported by Edward Chapman (taken from linux-usb mailing list)
+ * Netac OnlyDisk Mini U2CV2 512MB USB 2.0 Flash Drive
+ */
UNUSUAL_DEV( 0x0dd8, 0xd202, 0x0000, 0x9999,
"Netac",
"USB Flash Disk",
@@ -1439,8 +1515,10 @@ UNUSUAL_DEV( 0x0dd8, 0xd202, 0x0000, 0x9999,
US_FL_IGNORE_RESIDUE ),
-/* Patch by Stephan Walter <stephan.walter@epfl.ch>
- * I don't know why, but it works... */
+/*
+ * Patch by Stephan Walter <stephan.walter@epfl.ch>
+ * I don't know why, but it works...
+ */
UNUSUAL_DEV( 0x0dda, 0x0001, 0x0012, 0x0012,
"WINWARD",
"Music Disk",
@@ -1468,8 +1546,10 @@ UNUSUAL_DEV( 0x0ed1, 0x6660, 0x0100, 0x0300,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
-/* Submitted by Daniel Drake <dsd@gentoo.org>
- * Reported by dayul on the Gentoo Forums */
+/*
+ * Submitted by Daniel Drake <dsd@gentoo.org>
+ * Reported by dayul on the Gentoo Forums
+ */
UNUSUAL_DEV( 0x0ea0, 0x2168, 0x0110, 0x0110,
"Ours Technology",
"Flash Disk",
@@ -1483,15 +1563,18 @@ UNUSUAL_DEV( 0x0ea0, 0x6828, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Reported by Benjamin Schiller <sbenni@gmx.de>
- * It is also sold by Easylite as DJ 20 */
+/*
+ * Reported by Benjamin Schiller <sbenni@gmx.de>
+ * It is also sold by Easylite as DJ 20
+ */
UNUSUAL_DEV( 0x0ed1, 0x7636, 0x0103, 0x0103,
"Typhoon",
"My DJ 1820",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE | US_FL_GO_SLOW | US_FL_MAX_SECTORS_64),
-/* Patch by Leonid Petrov mail at lpetrov.net
+/*
+ * Patch by Leonid Petrov mail at lpetrov.net
* Reported by Robert Spitzenpfeil <robert@spitzenpfeil.org>
* http://www.qbik.ch/usb/devices/showdev.php?id=1705
* Updated to 103 device by MJ Ray mjr at phonecoop.coop
@@ -1502,7 +1585,8 @@ UNUSUAL_DEV( 0x0f19, 0x0103, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* David Kuehling <dvdkhlng@gmx.de>:
+/*
+ * David Kuehling <dvdkhlng@gmx.de>:
* for MP3-Player AVOX WSX-300ER (bought in Japan). Reports lots of SCSI
* errors when trying to write.
*/
@@ -1540,8 +1624,10 @@ UNUSUAL_DEV( 0x0fce, 0xd0e1, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_DEVICE),
-/* Reported by Jan Mate <mate@fiit.stuba.sk>
- * and by Soeren Sonnenburg <kernel@nn7.de> */
+/*
+ * Reported by Jan Mate <mate@fiit.stuba.sk>
+ * and by Soeren Sonnenburg <kernel@nn7.de>
+ */
UNUSUAL_DEV( 0x0fce, 0xe030, 0x0000, 0x0000,
"Sony Ericsson",
"P990i",
@@ -1562,7 +1648,8 @@ UNUSUAL_DEV( 0x0fce, 0xe092, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
+/*
+ * Reported by Kevin Cernekee <kpc-usbdev@gelato.uiuc.edu>
* Tested on hardware version 1.10.
* Entry is needed only for the initializer function override.
* Devices with bcd > 110 seem to not need it while those
@@ -1586,7 +1673,8 @@ UNUSUAL_DEV(0x1058, 0x070a, 0x0000, 0x9999,
"My Passport HDD",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_WRITE_CACHE),
-/* Reported by Fabio Venturi <f.venturi@tdnet.it>
+/*
+ * Reported by Fabio Venturi <f.venturi@tdnet.it>
* The device reports a vendor-specific bDeviceClass.
*/
UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
@@ -1595,7 +1683,8 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
0),
-/* Reported by Pascal Terjan <pterjan@mandriva.com>
+/*
+ * Reported by Pascal Terjan <pterjan@mandriva.com>
* Ignore driver CD mode and force into modem mode by default.
*/
UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000,
@@ -1603,7 +1692,8 @@ UNUSUAL_DEV( 0x1186, 0x3e04, 0x0000, 0x0000,
"USB Mass Storage",
USB_SC_DEVICE, USB_PR_DEVICE, option_ms_init, US_FL_IGNORE_DEVICE),
-/* Reported by Kevin Lloyd <linux@sierrawireless.com>
+/*
+ * Reported by Kevin Lloyd <linux@sierrawireless.com>
* Entry is needed for the initializer function override,
* which instructs the device to load as a modem
* device.
@@ -1614,7 +1704,8 @@ UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, sierra_ms_init,
0),
-/* Reported by Jaco Kroon <jaco@kroon.co.za>
+/*
+ * Reported by Jaco Kroon <jaco@kroon.co.za>
* The usb-storage module found on the Digitech GNX4 (and supposedly other
* devices) misbehaves and causes a bunch of invalid I/O errors.
*/
@@ -1624,7 +1715,8 @@ UNUSUAL_DEV( 0x1210, 0x0003, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
-/* Reported by fangxiaozhi <huananhu@huawei.com>
+/*
+ * Reported by fangxiaozhi <huananhu@huawei.com>
* This brings the HUAWEI data card devices into multi-port mode
*/
UNUSUAL_DEV( 0x12d1, 0x1001, 0x0000, 0x0000,
@@ -1993,7 +2085,8 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
-/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
+/*
+ * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
* JMicron responds to USN and several other SCSI ioctls with a
* residue that causes subsequent I/O requests to fail. */
UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
@@ -2009,7 +2102,8 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
-/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
+/*
+ * Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
* and Mac USB Dock USB-SCSI */
UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
"Entrega Technologies",
@@ -2017,8 +2111,10 @@ UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
-/* Reported by Robert Schedel <r.schedel@yahoo.de>
- * Note: this is a 'super top' device like the above 14cd/6600 device */
+/*
+ * Reported by Robert Schedel <r.schedel@yahoo.de>
+ * Note: this is a 'super top' device like the above 14cd/6600 device
+ */
UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201,
"Teac",
"HD-35PUK-B",
@@ -2045,10 +2141,12 @@ UNUSUAL_DEV( 0x1822, 0x0001, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
-/* Reported by Hans de Goede <hdegoede@redhat.com>
+/*
+ * Reported by Hans de Goede <hdegoede@redhat.com>
* These Appotech controllers are found in Picture Frames, they provide a
* (buggy) emulation of a cdrom drive which contains the windows software
- * Uploading of pictures happens over the corresponding /dev/sg device. */
+ * Uploading of pictures happens over the corresponding /dev/sg device.
+ */
UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000,
"BUILDWIN",
"Photo Frame",
@@ -2065,19 +2163,22 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_READ_DISC_INFO ),
-/* Reported by Oliver Neukum <oneukum@suse.com>
+/*
+ * Reported by Oliver Neukum <oneukum@suse.com>
* This device morphes spontaneously into another device if the access
* pattern of Windows isn't followed. Thus writable media would be dirty
* if the initial instance is used. So the device is limited to its
* virtual CD.
- * And yes, the concept that BCD goes up to 9 is not heeded */
+ * And yes, the concept that BCD goes up to 9 is not heeded
+ */
UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
"ZTE,Incorporated",
"ZTE WCDMA Technologies MSM",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SINGLE_LUN ),
-/* Reported by Sven Geggus <sven-usbst@geggus.net>
+/*
+ * Reported by Sven Geggus <sven-usbst@geggus.net>
* This encrypted pen drive returns bogus data for the initial READ(10).
*/
UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
@@ -2086,7 +2187,8 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_INITIAL_READ10 ),
-/* Reported by Hans de Goede <hdegoede@redhat.com>
+/*
+ * Reported by Hans de Goede <hdegoede@redhat.com>
* These are mini projectors using USB for both power and video data transport
* The usb-storage interface is a virtual windows driver CD, which the gm12u320
* driver automatically converts into framebuffer & kms dri device nodes.
@@ -2097,9 +2199,11 @@ UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_DEVICE ),
-/* Patch by Richard SchĂ¼tz <r.schtz@t-online.de>
+/*
+ * Patch by Richard SchĂ¼tz <r.schtz@t-online.de>
* This external hard drive enclosure uses a JMicron chip which
- * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
+ * needs the US_FL_IGNORE_RESIDUE flag to work properly.
+ */
UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
"TrekStor GmbH & Co. KG",
"DataStation maxi g.u",
@@ -2126,7 +2230,8 @@ UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY),
-/* patch submitted by Davide Perini <perini.davide@dpsoftware.org>
+/*
+ * patch submitted by Davide Perini <perini.davide@dpsoftware.org>
* and Renato Perini <rperini@email.it>
*/
UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001,
@@ -2153,7 +2258,8 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_GO_SLOW ),
-/* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
+/*
+ * Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
* Mio Moov 330
*/
UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
diff --git a/drivers/usb/storage/unusual_freecom.h b/drivers/usb/storage/unusual_freecom.h
index 59a261155..1f5aab42e 100644
--- a/drivers/usb/storage/unusual_freecom.h
+++ b/drivers/usb/storage/unusual_freecom.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for the Freecom USB/IDE adaptor
+/*
+ * Unusual Devices File for the Freecom USB/IDE adaptor
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/usb/storage/unusual_isd200.h b/drivers/usb/storage/unusual_isd200.h
index 14cca0c48..9b6862ec3 100644
--- a/drivers/usb/storage/unusual_isd200.h
+++ b/drivers/usb/storage/unusual_isd200.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for In-System Design, Inc. ISD200 ASIC
+/*
+ * Unusual Devices File for In-System Design, Inc. ISD200 ASIC
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/usb/storage/unusual_jumpshot.h b/drivers/usb/storage/unusual_jumpshot.h
index 54be78b5d..413e64fa6 100644
--- a/drivers/usb/storage/unusual_jumpshot.h
+++ b/drivers/usb/storage/unusual_jumpshot.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for the Lexar "Jumpshot" Compact Flash reader
+/*
+ * Unusual Devices File for the Lexar "Jumpshot" Compact Flash reader
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/usb/storage/unusual_karma.h b/drivers/usb/storage/unusual_karma.h
index 6df03972a..e6fad3aea 100644
--- a/drivers/usb/storage/unusual_karma.h
+++ b/drivers/usb/storage/unusual_karma.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for the Rio Karma
+/*
+ * Unusual Devices File for the Rio Karma
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/usb/storage/unusual_onetouch.h b/drivers/usb/storage/unusual_onetouch.h
index 0abb819c7..425dc22f3 100644
--- a/drivers/usb/storage/unusual_onetouch.h
+++ b/drivers/usb/storage/unusual_onetouch.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for the Maxtor OneTouch USB hard drive's button
+/*
+ * Unusual Devices File for the Maxtor OneTouch USB hard drive's button
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -18,7 +19,8 @@
#if defined(CONFIG_USB_STORAGE_ONETOUCH) || \
defined(CONFIG_USB_STORAGE_ONETOUCH_MODULE)
-/* Submitted by: Nick Sillik <n.sillik@temple.edu>
+/*
+ * Submitted by: Nick Sillik <n.sillik@temple.edu>
* Needed for OneTouch extension to usb-storage
*/
UNUSUAL_DEV( 0x0d49, 0x7000, 0x0000, 0x9999,
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index e41f50c95..8fe624ad3 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -1,4 +1,5 @@
-/* Driver for Realtek RTS51xx USB card reader
+/*
+ * Driver for Realtek RTS51xx USB card reader
*
* Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved.
*
diff --git a/drivers/usb/storage/unusual_sddr09.h b/drivers/usb/storage/unusual_sddr09.h
index 59a7e37b6..d9d38ac4a 100644
--- a/drivers/usb/storage/unusual_sddr09.h
+++ b/drivers/usb/storage/unusual_sddr09.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for SanDisk SDDR-09 SmartMedia reader
+/*
+ * Unusual Devices File for SanDisk SDDR-09 SmartMedia reader
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/usb/storage/unusual_sddr55.h b/drivers/usb/storage/unusual_sddr55.h
index fcb7e12c5..ebb1d1c6c 100644
--- a/drivers/usb/storage/unusual_sddr55.h
+++ b/drivers/usb/storage/unusual_sddr55.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for SanDisk SDDR-55 SmartMedia reader
+/*
+ * Unusual Devices File for SanDisk SDDR-55 SmartMedia reader
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 53341a77d..cbea9f329 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -1,4 +1,5 @@
-/* Driver for USB Attached SCSI devices - Unusual Devices File
+/*
+ * Driver for USB Attached SCSI devices - Unusual Devices File
*
* (c) 2013 Hans de Goede <hdegoede@redhat.com>
*
diff --git a/drivers/usb/storage/unusual_usbat.h b/drivers/usb/storage/unusual_usbat.h
index 38e79c4e6..2044ad5ef 100644
--- a/drivers/usb/storage/unusual_usbat.h
+++ b/drivers/usb/storage/unusual_usbat.h
@@ -1,4 +1,5 @@
-/* Unusual Devices File for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
+/*
+ * Unusual Devices File for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 9de988a0f..ef2d8cde6 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
*
* Current development and maintenance by:
* (c) 1999-2003 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
@@ -97,7 +98,8 @@ MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks");
* with the entries in usb_storage_usb_ids[], defined in usual-tables.c.
*/
-/* The vendor name should be kept at eight characters or less, and
+/*
+ *The vendor name should be kept at eight characters or less, and
* the product name should be kept at 16 characters or less. If a device
* has the US_FL_FIX_INQUIRY flag, then the vendor and product names
* normally generated by a device through the INQUIRY response will be
@@ -191,8 +193,10 @@ int usb_stor_suspend(struct usb_interface *iface, pm_message_t message)
if (us->suspend_resume_hook)
(us->suspend_resume_hook)(us, US_SUSPEND);
- /* When runtime PM is working, we'll set a flag to indicate
- * whether we should autoresume when a SCSI request arrives. */
+ /*
+ * When runtime PM is working, we'll set a flag to indicate
+ * whether we should autoresume when a SCSI request arrives.
+ */
mutex_unlock(&us->dev_mutex);
return 0;
@@ -220,8 +224,10 @@ int usb_stor_reset_resume(struct usb_interface *iface)
/* Report the reset to the SCSI core */
usb_stor_report_bus_reset(us);
- /* FIXME: Notify the subdrivers that they need to reinitialize
- * the device */
+ /*
+ * FIXME: Notify the subdrivers that they need to reinitialize
+ * the device
+ */
return 0;
}
EXPORT_SYMBOL_GPL(usb_stor_reset_resume);
@@ -250,8 +256,10 @@ int usb_stor_post_reset(struct usb_interface *iface)
/* Report the reset to the SCSI core */
usb_stor_report_bus_reset(us);
- /* FIXME: Notify the subdrivers that they need to reinitialize
- * the device */
+ /*
+ * FIXME: Notify the subdrivers that they need to reinitialize
+ * the device
+ */
mutex_unlock(&us->dev_mutex);
return 0;
@@ -274,15 +282,17 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data,
return;
memset(data+8, ' ', 28);
- if (data[0]&0x20) { /* USB device currently not connected. Return
- peripheral qualifier 001b ("...however, the
- physical device is not currently connected
- to this logical unit") and leave vendor and
- product identification empty. ("If the target
- does store some of the INQUIRY data on the
- device, it may return zeros or ASCII spaces
- (20h) in those fields until the data is
- available from the device."). */
+ if (data[0]&0x20) { /*
+ * USB device currently not connected. Return
+ * peripheral qualifier 001b ("...however, the
+ * physical device is not currently connected
+ * to this logical unit") and leave vendor and
+ * product identification empty. ("If the target
+ * does store some of the INQUIRY data on the
+ * device, it may return zeros or ASCII spaces
+ * (20h) in those fields until the data is
+ * available from the device.").
+ */
} else {
u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
int n;
@@ -336,7 +346,8 @@ static int usb_stor_control_thread(void * __us)
scsi_unlock(host);
- /* reject the command if the direction indicator
+ /*
+ * reject the command if the direction indicator
* is UNKNOWN
*/
if (us->srb->sc_data_direction == DMA_BIDIRECTIONAL) {
@@ -344,7 +355,8 @@ static int usb_stor_control_thread(void * __us)
us->srb->result = DID_ERROR << 16;
}
- /* reject if target != 0 or if LUN is higher than
+ /*
+ * reject if target != 0 or if LUN is higher than
* the maximum known LUN
*/
else if (us->srb->device->id &&
@@ -362,8 +374,10 @@ static int usb_stor_control_thread(void * __us)
us->srb->result = DID_BAD_TARGET << 16;
}
- /* Handle those devices which need us to fake
- * their inquiry data */
+ /*
+ * Handle those devices which need us to fake
+ * their inquiry data
+ */
else if ((us->srb->cmnd[0] == INQUIRY) &&
(us->fflags & US_FL_FIX_INQUIRY)) {
unsigned char data_ptr[36] = {
@@ -395,11 +409,13 @@ SkipForAbort:
usb_stor_dbg(us, "scsi command aborted\n");
}
- /* If an abort request was received we need to signal that
+ /*
+ * If an abort request was received we need to signal that
* the abort has finished. The proper test for this is
* the TIMED_OUT flag, not srb->result == DID_ABORT, because
* the timeout might have occurred after the command had
- * already completed with a different result code. */
+ * already completed with a different result code.
+ */
if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
complete(&(us->notify));
@@ -610,7 +626,8 @@ static int get_device_info(struct us_data *us, const struct usb_device_id *id,
le16_to_cpu(dev->descriptor.idProduct),
us->fflags);
- /* Log a message if a non-generic unusual_dev entry contains an
+ /*
+ * Log a message if a non-generic unusual_dev entry contains an
* unnecessary subclass or protocol override. This may stimulate
* reports from users that will help us remove unneeded entries
* from the unusual_devs.h table.
@@ -782,8 +799,10 @@ static int usb_stor_acquire_resources(struct us_data *us)
return -ENOMEM;
}
- /* Just before we start our control thread, initialize
- * the device if it needs initialization */
+ /*
+ * Just before we start our control thread, initialize
+ * the device if it needs initialization
+ */
if (us->unusual_dev->initFunction) {
p = us->unusual_dev->initFunction(us);
if (p)
@@ -805,7 +824,8 @@ static int usb_stor_acquire_resources(struct us_data *us)
/* Release all our dynamic resources */
static void usb_stor_release_resources(struct us_data *us)
{
- /* Tell the control thread to exit. The SCSI host must
+ /*
+ * Tell the control thread to exit. The SCSI host must
* already have been removed and the DISCONNECTING flag set
* so that we won't accept any more commands.
*/
@@ -836,7 +856,8 @@ static void dissociate_dev(struct us_data *us)
usb_set_intfdata(us->pusb_intf, NULL);
}
-/* First stage of disconnect processing: stop SCSI scanning,
+/*
+ * First stage of disconnect processing: stop SCSI scanning,
* remove the host, and stop accepting new commands
*/
static void quiesce_and_remove_host(struct us_data *us)
@@ -849,7 +870,8 @@ static void quiesce_and_remove_host(struct us_data *us)
wake_up(&us->delay_wait);
}
- /* Prevent SCSI scanning (if it hasn't started yet)
+ /*
+ * Prevent SCSI scanning (if it hasn't started yet)
* or wait for the SCSI-scanning routine to stop.
*/
cancel_delayed_work_sync(&us->scan_dwork);
@@ -858,12 +880,14 @@ static void quiesce_and_remove_host(struct us_data *us)
if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags))
usb_autopm_put_interface_no_suspend(us->pusb_intf);
- /* Removing the host will perform an orderly shutdown: caches
+ /*
+ * Removing the host will perform an orderly shutdown: caches
* synchronized, disks spun down, etc.
*/
scsi_remove_host(host);
- /* Prevent any new commands from being accepted and cut short
+ /*
+ * Prevent any new commands from being accepted and cut short
* reset delays.
*/
scsi_lock(host);
@@ -878,8 +902,10 @@ static void release_everything(struct us_data *us)
usb_stor_release_resources(us);
dissociate_dev(us);
- /* Drop our reference to the host; the SCSI core will free it
- * (and "us" along with it) when the refcount becomes 0. */
+ /*
+ * Drop our reference to the host; the SCSI core will free it
+ * (and "us" along with it) when the refcount becomes 0.
+ */
scsi_host_put(us_to_host(us));
}
@@ -900,7 +926,8 @@ static void usb_stor_scan_dwork(struct work_struct *work)
us->max_lun = usb_stor_Bulk_max_lun(us);
/*
* Allow proper scanning of devices that present more than 8 LUNs
- * While not affecting other devices that may need the previous behavior
+ * While not affecting other devices that may need the previous
+ * behavior
*/
if (us->max_lun >= 8)
us_to_host(us)->max_lun = us->max_lun+1;
@@ -975,7 +1002,8 @@ int usb_stor_probe1(struct us_data **pus,
get_transport(us);
get_protocol(us);
- /* Give the caller a chance to fill in specialized transport
+ /*
+ * Give the caller a chance to fill in specialized transport
* or protocol settings.
*/
return 0;
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index da0ad3241..8fae28b40 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage compliant devices
+/*
+ * Driver for USB Mass Storage compliant devices
* Main Header File
*
* Current development and maintenance by:
@@ -100,7 +101,8 @@ typedef void (*pm_hook)(struct us_data *, int); /* power management hook */
/* we allocate one of these for every device that we remember */
struct us_data {
- /* The device we're working with
+ /*
+ * The device we're working with
* It's important to note:
* (o) you must hold dev_mutex to change pusb_dev
*/
@@ -125,7 +127,7 @@ struct us_data {
u8 max_lun;
u8 ifnum; /* interface number */
- u8 ep_bInterval; /* interrupt interval */
+ u8 ep_bInterval; /* interrupt interval */
/* function pointers for this device */
trans_cmnd transport; /* transport function */
@@ -175,8 +177,10 @@ static inline struct us_data *host_to_us(struct Scsi_Host *host) {
extern void fill_inquiry_response(struct us_data *us,
unsigned char *data, unsigned int data_len);
-/* The scsi_lock() and scsi_unlock() macros protect the sm_state and the
- * single queue element srb for write access */
+/*
+ * The scsi_lock() and scsi_unlock() macros protect the sm_state and the
+ * single queue element srb for write access
+ */
#define scsi_unlock(host) spin_unlock_irq(host->host_lock)
#define scsi_lock(host) spin_lock_irq(host->host_lock)
diff --git a/drivers/usb/storage/usual-tables.c b/drivers/usb/storage/usual-tables.c
index 5ef8ce74a..499669bcf 100644
--- a/drivers/usb/storage/usual-tables.c
+++ b/drivers/usb/storage/usual-tables.c
@@ -1,4 +1,5 @@
-/* Driver for USB Mass Storage devices
+/*
+ * Driver for USB Mass Storage devices
* Usual Tables File for usb-storage and libusual
*
* Copyright (C) 2009 Alan Stern (stern@rowland.harvard.edu)
diff --git a/drivers/usb/usbip/Kconfig b/drivers/usb/usbip/Kconfig
index bd99e9e47..17646b253 100644
--- a/drivers/usb/usbip/Kconfig
+++ b/drivers/usb/usbip/Kconfig
@@ -1,6 +1,6 @@
config USBIP_CORE
tristate "USB/IP support"
- depends on USB && NET
+ depends on USB_COMMON && NET
---help---
This enables pushing USB packets over IP to allow remote
machines direct access to USB devices. It provides the
@@ -16,7 +16,7 @@ config USBIP_CORE
config USBIP_VHCI_HCD
tristate "VHCI hcd"
- depends on USBIP_CORE
+ depends on USBIP_CORE && USB
---help---
This enables the USB/IP virtual host controller driver,
which is run on the remote machine.
@@ -26,7 +26,7 @@ config USBIP_VHCI_HCD
config USBIP_HOST
tristate "Host driver"
- depends on USBIP_CORE
+ depends on USBIP_CORE && USB
---help---
This enables the USB/IP host driver, which is run on the
machine that is sharing the USB devices.
@@ -34,6 +34,17 @@ config USBIP_HOST
To compile this driver as a module, choose M here: the
module will be called usbip-host.
+config USBIP_VUDC
+ tristate "VUDC driver"
+ depends on USBIP_CORE && USB_GADGET
+ ---help---
+ This enables the USB/IP virtual USB device controller
+ driver, which is run on the host machine, allowing the
+ machine itself to act as a device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called usbip-vudc.
+
config USBIP_DEBUG
bool "Debug messages for USB/IP"
depends on USBIP_CORE
diff --git a/drivers/usb/usbip/Makefile b/drivers/usb/usbip/Makefile
index 9ecd61545..d843a9e68 100644
--- a/drivers/usb/usbip/Makefile
+++ b/drivers/usb/usbip/Makefile
@@ -8,3 +8,6 @@ vhci-hcd-y := vhci_sysfs.o vhci_tx.o vhci_rx.o vhci_hcd.o
obj-$(CONFIG_USBIP_HOST) += usbip-host.o
usbip-host-y := stub_dev.o stub_main.o stub_rx.o stub_tx.o
+
+obj-$(CONFIG_USBIP_VUDC) += usbip-vudc.o
+usbip-vudc-y := vudc_dev.o vudc_sysfs.o vudc_tx.o vudc_rx.o vudc_transfer.o vudc_main.o
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 266e2b0ce..910f02777 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -33,7 +33,6 @@
#define STUB_BUSID_ALLOC 3
struct stub_device {
- struct usb_interface *interface;
struct usb_device *udev;
struct usbip_device ud;
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index a3ec49bdc..c653ce533 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -219,7 +219,7 @@ static void stub_device_reset(struct usbip_device *ud)
dev_dbg(&udev->dev, "device reset");
- ret = usb_lock_device_for_reset(udev, sdev->interface);
+ ret = usb_lock_device_for_reset(udev, NULL);
if (ret < 0) {
dev_err(&udev->dev, "lock for reset\n");
spin_lock_irq(&ud->lock);
@@ -252,7 +252,7 @@ static void stub_device_unusable(struct usbip_device *ud)
/**
* stub_device_alloc - allocate a new stub_device struct
- * @interface: usb_interface of a new device
+ * @udev: usb_device of a new device
*
* Allocates and initializes a new stub_device struct.
*/
@@ -388,7 +388,6 @@ err_files:
err_port:
dev_set_drvdata(&udev->dev, NULL);
usb_put_dev(udev);
- kthread_stop_put(sdev->ud.eh);
busid_priv->sdev = NULL;
stub_device_free(sdev);
@@ -449,7 +448,7 @@ static void stub_disconnect(struct usb_device *udev)
}
/* If usb reset is called from event handler */
- if (busid_priv->sdev->ud.eh == current)
+ if (usbip_in_eh(current))
return;
/* shutdown the current connection */
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
index 00e475c51..2df63e305 100644
--- a/drivers/usb/usbip/stub_rx.c
+++ b/drivers/usb/usbip/stub_rx.c
@@ -165,12 +165,7 @@ static int tweak_reset_device_cmd(struct urb *urb)
dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
- /*
- * With the implementation of pre_reset and post_reset the driver no
- * longer unbinds. This allows the use of synchronous reset.
- */
-
- if (usb_lock_device_for_reset(sdev->udev, sdev->interface) < 0) {
+ if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
return 0;
}
@@ -321,7 +316,7 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
if (!priv) {
- dev_err(&sdev->interface->dev, "alloc stub_priv\n");
+ dev_err(&sdev->udev->dev, "alloc stub_priv\n");
spin_unlock_irqrestore(&sdev->priv_lock, flags);
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return NULL;
@@ -352,7 +347,7 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
else
ep = udev->ep_out[epnum & 0x7f];
if (!ep) {
- dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
+ dev_err(&sdev->udev->dev, "no such endpoint?, %d\n",
epnum);
BUG();
}
@@ -387,7 +382,7 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
}
/* NOT REACHED */
- dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
+ dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum);
return 0;
}
@@ -466,7 +461,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
priv->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->urb) {
- dev_err(&sdev->interface->dev, "malloc urb\n");
+ dev_err(&udev->dev, "malloc urb\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
@@ -486,7 +481,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
priv->urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
GFP_KERNEL);
if (!priv->urb->setup_packet) {
- dev_err(&sdev->interface->dev, "allocate setup_packet\n");
+ dev_err(&udev->dev, "allocate setup_packet\n");
usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
return;
}
@@ -517,7 +512,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
pdu->base.seqnum);
else {
- dev_err(&sdev->interface->dev, "submit_urb error, %d\n", ret);
+ dev_err(&udev->dev, "submit_urb error, %d\n", ret);
usbip_dump_header(pdu);
usbip_dump_urb(priv->urb);
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index dbcabc9db..6b1e8c3f0 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -97,7 +97,10 @@ void stub_complete(struct urb *urb)
/* link a urb to the queue of tx. */
spin_lock_irqsave(&sdev->priv_lock, flags);
- if (priv->unlinking) {
+ if (sdev->ud.tcp_socket == NULL) {
+ usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
+ /* It will be freed in stub_device_cleanup_urbs(). */
+ } else if (priv->unlinking) {
stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
stub_free_priv_and_urb(priv);
} else {
@@ -229,7 +232,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
}
if (txsize != sizeof(pdu_header) + urb->actual_length) {
- dev_err(&sdev->interface->dev,
+ dev_err(&sdev->udev->dev,
"actual length of urb %d does not match iso packet sizes %zu\n",
urb->actual_length,
txsize-sizeof(pdu_header));
@@ -261,7 +264,7 @@ static int stub_send_ret_submit(struct stub_device *sdev)
ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg,
iov, iovnum, txsize);
if (ret != txsize) {
- dev_err(&sdev->interface->dev,
+ dev_err(&sdev->udev->dev,
"sendmsg failed!, retval %d for %zd\n",
ret, txsize);
kfree(iov);
@@ -336,7 +339,7 @@ static int stub_send_ret_unlink(struct stub_device *sdev)
ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov,
1, txsize);
if (ret != txsize) {
- dev_err(&sdev->interface->dev,
+ dev_err(&sdev->udev->dev,
"sendmsg failed!, retval %d for %zd\n",
ret, txsize);
usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP);
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
index e40da7759..8b232290b 100644
--- a/drivers/usb/usbip/usbip_common.c
+++ b/drivers/usb/usbip/usbip_common.c
@@ -1,5 +1,7 @@
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Krzysztof Opasiak <k.opasiak@samsung.com>
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -643,7 +645,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
ret);
kfree(buff);
- if (ud->side == USBIP_STUB)
+ if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC)
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
else
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
@@ -665,7 +667,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
"total length of iso packets %d not equal to actual length of buffer %d\n",
total_length, urb->actual_length);
- if (ud->side == USBIP_STUB)
+ if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC)
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
else
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
@@ -723,7 +725,7 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
int ret;
int size;
- if (ud->side == USBIP_STUB) {
+ if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) {
/* the direction of urb must be OUT. */
if (usb_pipein(urb->pipe))
return 0;
@@ -755,7 +757,7 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
- if (ud->side == USBIP_STUB) {
+ if (ud->side == USBIP_STUB || ud->side == USBIP_VUDC) {
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
} else {
usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
@@ -769,12 +771,19 @@ EXPORT_SYMBOL_GPL(usbip_recv_xbuff);
static int __init usbip_core_init(void)
{
+ int ret;
+
pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
+ ret = usbip_init_eh();
+ if (ret)
+ return ret;
+
return 0;
}
static void __exit usbip_core_exit(void)
{
+ usbip_finish_eh();
return;
}
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 86b08475c..c7508cbce 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -1,5 +1,7 @@
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Krzysztof Opasiak <k.opasiak@samsung.com>
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -234,6 +236,7 @@ struct usbip_iso_packet_descriptor {
enum usbip_side {
USBIP_VHCI,
USBIP_STUB,
+ USBIP_VUDC,
};
/* event handler */
@@ -248,6 +251,13 @@ enum usbip_side {
#define SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define SDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+#define VUDC_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
+#define VUDC_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
+#define VUDC_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
+/* catastrophic emulated usb error */
+#define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+#define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+
#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
@@ -267,7 +277,6 @@ struct usbip_device {
struct task_struct *tcp_tx;
unsigned long event;
- struct task_struct *eh;
wait_queue_head_t eh_waitq;
struct eh_ops {
@@ -313,10 +322,13 @@ void usbip_pad_iso(struct usbip_device *ud, struct urb *urb);
int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb);
/* usbip_event.c */
+int usbip_init_eh(void);
+void usbip_finish_eh(void);
int usbip_start_eh(struct usbip_device *ud);
void usbip_stop_eh(struct usbip_device *ud);
void usbip_event_add(struct usbip_device *ud, unsigned long event);
int usbip_event_happened(struct usbip_device *ud);
+int usbip_in_eh(struct task_struct *task);
static inline int interface_to_busnum(struct usb_interface *interface)
{
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 2580a32bc..f1635662c 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2003-2008 Takahiro Hirofuchi
+ * Copyright (C) 2015 Nobuo Iwata
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,17 +20,68 @@
#include <linux/kthread.h>
#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "usbip_common.h"
-static int event_handler(struct usbip_device *ud)
+struct usbip_event {
+ struct list_head node;
+ struct usbip_device *ud;
+};
+
+static DEFINE_SPINLOCK(event_lock);
+static LIST_HEAD(event_list);
+
+static void set_event(struct usbip_device *ud, unsigned long event)
{
- usbip_dbg_eh("enter\n");
+ unsigned long flags;
- /*
- * Events are handled by only this thread.
- */
- while (usbip_event_happened(ud)) {
+ spin_lock_irqsave(&ud->lock, flags);
+ ud->event |= event;
+ spin_unlock_irqrestore(&ud->lock, flags);
+}
+
+static void unset_event(struct usbip_device *ud, unsigned long event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ud->lock, flags);
+ ud->event &= ~event;
+ spin_unlock_irqrestore(&ud->lock, flags);
+}
+
+static struct usbip_device *get_event(void)
+{
+ struct usbip_event *ue = NULL;
+ struct usbip_device *ud = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&event_lock, flags);
+ if (!list_empty(&event_list)) {
+ ue = list_first_entry(&event_list, struct usbip_event, node);
+ list_del(&ue->node);
+ }
+ spin_unlock_irqrestore(&event_lock, flags);
+
+ if (ue) {
+ ud = ue->ud;
+ kfree(ue);
+ }
+ return ud;
+}
+
+static struct task_struct *worker_context;
+
+static void event_handler(struct work_struct *work)
+{
+ struct usbip_device *ud;
+
+ if (worker_context == NULL) {
+ worker_context = current;
+ }
+
+ while ((ud = get_event()) != NULL) {
usbip_dbg_eh("pending event %lx\n", ud->event);
/*
@@ -38,79 +90,102 @@ static int event_handler(struct usbip_device *ud)
*/
if (ud->event & USBIP_EH_SHUTDOWN) {
ud->eh_ops.shutdown(ud);
- ud->event &= ~USBIP_EH_SHUTDOWN;
+ unset_event(ud, USBIP_EH_SHUTDOWN);
}
/* Reset the device. */
if (ud->event & USBIP_EH_RESET) {
ud->eh_ops.reset(ud);
- ud->event &= ~USBIP_EH_RESET;
+ unset_event(ud, USBIP_EH_RESET);
}
/* Mark the device as unusable. */
if (ud->event & USBIP_EH_UNUSABLE) {
ud->eh_ops.unusable(ud);
- ud->event &= ~USBIP_EH_UNUSABLE;
+ unset_event(ud, USBIP_EH_UNUSABLE);
}
/* Stop the error handler. */
if (ud->event & USBIP_EH_BYE)
- return -1;
+ usbip_dbg_eh("removed %p\n", ud);
+
+ wake_up(&ud->eh_waitq);
}
+}
+int usbip_start_eh(struct usbip_device *ud)
+{
+ init_waitqueue_head(&ud->eh_waitq);
+ ud->event = 0;
return 0;
}
+EXPORT_SYMBOL_GPL(usbip_start_eh);
-static int event_handler_loop(void *data)
+void usbip_stop_eh(struct usbip_device *ud)
{
- struct usbip_device *ud = data;
+ unsigned long pending = ud->event & ~USBIP_EH_BYE;
- while (!kthread_should_stop()) {
- wait_event_interruptible(ud->eh_waitq,
- usbip_event_happened(ud) ||
- kthread_should_stop());
- usbip_dbg_eh("wakeup\n");
+ if (!(ud->event & USBIP_EH_BYE))
+ usbip_dbg_eh("usbip_eh stopping but not removed\n");
- if (event_handler(ud) < 0)
- break;
- }
+ if (pending)
+ usbip_dbg_eh("usbip_eh waiting completion %lx\n", pending);
- return 0;
+ wait_event_interruptible(ud->eh_waitq, !(ud->event & ~USBIP_EH_BYE));
+ usbip_dbg_eh("usbip_eh has stopped\n");
}
+EXPORT_SYMBOL_GPL(usbip_stop_eh);
-int usbip_start_eh(struct usbip_device *ud)
-{
- init_waitqueue_head(&ud->eh_waitq);
- ud->event = 0;
+#define WORK_QUEUE_NAME "usbip_event"
- ud->eh = kthread_run(event_handler_loop, ud, "usbip_eh");
- if (IS_ERR(ud->eh)) {
- pr_warn("Unable to start control thread\n");
- return PTR_ERR(ud->eh);
- }
+static struct workqueue_struct *usbip_queue;
+static DECLARE_WORK(usbip_work, event_handler);
+int usbip_init_eh(void)
+{
+ usbip_queue = create_singlethread_workqueue(WORK_QUEUE_NAME);
+ if (usbip_queue == NULL) {
+ pr_err("failed to create usbip_event\n");
+ return -ENOMEM;
+ }
return 0;
}
-EXPORT_SYMBOL_GPL(usbip_start_eh);
-void usbip_stop_eh(struct usbip_device *ud)
+void usbip_finish_eh(void)
{
- if (ud->eh == current)
- return; /* do not wait for myself */
-
- kthread_stop(ud->eh);
- usbip_dbg_eh("usbip_eh has finished\n");
+ flush_workqueue(usbip_queue);
+ destroy_workqueue(usbip_queue);
+ usbip_queue = NULL;
}
-EXPORT_SYMBOL_GPL(usbip_stop_eh);
void usbip_event_add(struct usbip_device *ud, unsigned long event)
{
+ struct usbip_event *ue;
unsigned long flags;
- spin_lock_irqsave(&ud->lock, flags);
- ud->event |= event;
- wake_up(&ud->eh_waitq);
- spin_unlock_irqrestore(&ud->lock, flags);
+ if (ud->event & USBIP_EH_BYE)
+ return;
+
+ set_event(ud, event);
+
+ spin_lock_irqsave(&event_lock, flags);
+
+ list_for_each_entry_reverse(ue, &event_list, node) {
+ if (ue->ud == ud)
+ goto out;
+ }
+
+ ue = kmalloc(sizeof(struct usbip_event), GFP_ATOMIC);
+ if (ue == NULL)
+ goto out;
+
+ ue->ud = ud;
+
+ list_add_tail(&ue->node, &event_list);
+ queue_work(usbip_queue, &usbip_work);
+
+out:
+ spin_unlock_irqrestore(&event_lock, flags);
}
EXPORT_SYMBOL_GPL(usbip_event_add);
@@ -127,3 +202,12 @@ int usbip_event_happened(struct usbip_device *ud)
return happened;
}
EXPORT_SYMBOL_GPL(usbip_event_happened);
+
+int usbip_in_eh(struct task_struct *task)
+{
+ if (task == worker_context)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usbip_in_eh);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index fca511059..2e0450bec 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -941,7 +941,7 @@ static void vhci_stop(struct usb_hcd *hcd)
static int vhci_get_frame_number(struct usb_hcd *hcd)
{
- pr_err("Not yet implemented\n");
+ dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
return 0;
}
diff --git a/drivers/usb/usbip/vudc.h b/drivers/usb/usbip/vudc.h
new file mode 100644
index 000000000..25e01b09c
--- /dev/null
+++ b/drivers/usb/usbip/vudc.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Igor Kotrasinski <i.kotrasinsk@samsung.com>
+ * Krzysztof Opasiak <k.opasiak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __USBIP_VUDC_H
+#define __USBIP_VUDC_H
+
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/ch9.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/sysfs.h>
+
+#include "usbip_common.h"
+
+#define GADGET_NAME "usbip-vudc"
+
+struct vep {
+ struct usb_ep ep;
+ unsigned type:2; /* type, as USB_ENDPOINT_XFER_* */
+ char name[8]; /* space for ep name */
+
+ const struct usb_endpoint_descriptor *desc;
+ struct usb_gadget *gadget;
+ struct list_head req_queue; /* Request queue */
+ unsigned halted:1;
+ unsigned wedged:1;
+ unsigned already_seen:1;
+ unsigned setup_stage:1;
+};
+
+struct vrequest {
+ struct usb_request req;
+ struct vudc *udc;
+ struct list_head req_entry; /* Request queue */
+};
+
+struct urbp {
+ struct urb *urb;
+ struct vep *ep;
+ struct list_head urb_entry; /* urb queue */
+ unsigned long seqnum;
+ unsigned type:2; /* for tx, since ep type can change after */
+ unsigned new:1;
+};
+
+struct v_unlink {
+ unsigned long seqnum;
+ __u32 status;
+};
+
+enum tx_type {
+ TX_UNLINK,
+ TX_SUBMIT,
+};
+
+struct tx_item {
+ struct list_head tx_entry;
+ enum tx_type type;
+ union {
+ struct urbp *s;
+ struct v_unlink *u;
+ };
+};
+
+enum tr_state {
+ VUDC_TR_RUNNING,
+ VUDC_TR_IDLE,
+ VUDC_TR_STOPPED,
+};
+
+struct transfer_timer {
+ struct timer_list timer;
+ enum tr_state state;
+ unsigned long frame_start;
+ int frame_limit;
+};
+
+struct vudc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct platform_device *pdev;
+
+ struct usb_device_descriptor dev_desc;
+
+ struct usbip_device ud;
+ struct transfer_timer tr_timer;
+ struct timeval start_time;
+
+ struct list_head urb_queue;
+
+ spinlock_t lock_tx;
+ struct list_head tx_queue;
+ wait_queue_head_t tx_waitq;
+
+ spinlock_t lock;
+ struct vep *ep;
+ int address;
+ u16 devstatus;
+
+ unsigned pullup:1;
+ unsigned connected:1;
+ unsigned desc_cached:1;
+};
+
+struct vudc_device {
+ struct platform_device *pdev;
+ struct list_head dev_entry;
+};
+
+extern const struct attribute_group vudc_attr_group;
+
+/* visible everywhere */
+
+static inline struct vep *to_vep(struct usb_ep *_ep)
+{
+ return container_of(_ep, struct vep, ep);
+}
+
+static inline struct vrequest *to_vrequest(
+ struct usb_request *_req)
+{
+ return container_of(_req, struct vrequest, req);
+}
+
+static inline struct vudc *usb_gadget_to_vudc(
+ struct usb_gadget *_gadget)
+{
+ return container_of(_gadget, struct vudc, gadget);
+}
+
+static inline struct vudc *ep_to_vudc(struct vep *ep)
+{
+ return container_of(ep->gadget, struct vudc, gadget);
+}
+
+/* vudc_sysfs.c */
+
+int get_gadget_descs(struct vudc *udc);
+
+/* vudc_tx.c */
+
+int v_tx_loop(void *data);
+void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status);
+void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p);
+
+/* vudc_rx.c */
+
+int v_rx_loop(void *data);
+
+/* vudc_transfer.c */
+
+void v_init_timer(struct vudc *udc);
+void v_start_timer(struct vudc *udc);
+void v_kick_timer(struct vudc *udc, unsigned long time);
+void v_stop_timer(struct vudc *udc);
+
+/* vudc_dev.c */
+
+struct urbp *alloc_urbp(void);
+void free_urbp_and_urb(struct urbp *urb_p);
+
+struct vep *vudc_find_endpoint(struct vudc *udc, u8 address);
+
+struct vudc_device *alloc_vudc_device(int devid);
+void put_vudc_device(struct vudc_device *udc_dev);
+
+int vudc_probe(struct platform_device *pdev);
+int vudc_remove(struct platform_device *pdev);
+
+#endif /* __USBIP_VUDC_H */
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
new file mode 100644
index 000000000..8994a1381
--- /dev/null
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Igor Kotrasinski <i.kotrasinsk@samsung.com>
+ * Krzysztof Opasiak <k.opasiak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/kthread.h>
+#include <linux/file.h>
+#include <linux/byteorder/generic.h>
+
+#include "usbip_common.h"
+#include "vudc.h"
+
+#define VIRTUAL_ENDPOINTS (1 /* ep0 */ + 15 /* in eps */ + 15 /* out eps */)
+
+/* urb-related structures alloc / free */
+
+
+static void free_urb(struct urb *urb)
+{
+ if (!urb)
+ return;
+
+ kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
+ usb_free_urb(urb);
+}
+
+struct urbp *alloc_urbp(void)
+{
+ struct urbp *urb_p;
+
+ urb_p = kzalloc(sizeof(*urb_p), GFP_KERNEL);
+ if (!urb_p)
+ return urb_p;
+
+ urb_p->urb = NULL;
+ urb_p->ep = NULL;
+ INIT_LIST_HEAD(&urb_p->urb_entry);
+ return urb_p;
+}
+
+static void free_urbp(struct urbp *urb_p)
+{
+ kfree(urb_p);
+}
+
+void free_urbp_and_urb(struct urbp *urb_p)
+{
+ if (!urb_p)
+ return;
+ free_urb(urb_p->urb);
+ free_urbp(urb_p);
+}
+
+
+/* utilities ; almost verbatim from dummy_hcd.c */
+
+/* called with spinlock held */
+static void nuke(struct vudc *udc, struct vep *ep)
+{
+ struct vrequest *req;
+
+ while (!list_empty(&ep->req_queue)) {
+ req = list_first_entry(&ep->req_queue, struct vrequest,
+ req_entry);
+ list_del_init(&req->req_entry);
+ req->req.status = -ESHUTDOWN;
+
+ spin_unlock(&udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&udc->lock);
+ }
+}
+
+/* caller must hold lock */
+static void stop_activity(struct vudc *udc)
+{
+ int i;
+ struct urbp *urb_p, *tmp;
+
+ udc->address = 0;
+
+ for (i = 0; i < VIRTUAL_ENDPOINTS; i++)
+ nuke(udc, &udc->ep[i]);
+
+ list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
+ list_del(&urb_p->urb_entry);
+ free_urbp_and_urb(urb_p);
+ }
+}
+
+struct vep *vudc_find_endpoint(struct vudc *udc, u8 address)
+{
+ int i;
+
+ if ((address & ~USB_DIR_IN) == 0)
+ return &udc->ep[0];
+
+ for (i = 1; i < VIRTUAL_ENDPOINTS; i++) {
+ struct vep *ep = &udc->ep[i];
+
+ if (!ep->desc)
+ continue;
+ if (ep->desc->bEndpointAddress == address)
+ return ep;
+ }
+ return NULL;
+}
+
+/* gadget ops */
+
+/* FIXME - this will probably misbehave when suspend/resume is added */
+static int vgadget_get_frame(struct usb_gadget *_gadget)
+{
+ struct timeval now;
+ struct vudc *udc = usb_gadget_to_vudc(_gadget);
+
+ do_gettimeofday(&now);
+ return ((now.tv_sec - udc->start_time.tv_sec) * 1000 +
+ (now.tv_usec - udc->start_time.tv_usec) / 1000)
+ % 0x7FF;
+}
+
+static int vgadget_set_selfpowered(struct usb_gadget *_gadget, int value)
+{
+ struct vudc *udc = usb_gadget_to_vudc(_gadget);
+
+ if (value)
+ udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
+ else
+ udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
+ return 0;
+}
+
+static int vgadget_pullup(struct usb_gadget *_gadget, int value)
+{
+ struct vudc *udc = usb_gadget_to_vudc(_gadget);
+ unsigned long flags;
+ int ret;
+
+
+ spin_lock_irqsave(&udc->lock, flags);
+ value = !!value;
+ if (value == udc->pullup)
+ goto unlock;
+
+ udc->pullup = value;
+ if (value) {
+ udc->gadget.speed = min_t(u8, USB_SPEED_HIGH,
+ udc->driver->max_speed);
+ udc->ep[0].ep.maxpacket = 64;
+ /*
+ * This is the first place where we can ask our
+ * gadget driver for descriptors.
+ */
+ ret = get_gadget_descs(udc);
+ if (ret) {
+ dev_err(&udc->gadget.dev, "Unable go get desc: %d", ret);
+ goto unlock;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ usbip_start_eh(&udc->ud);
+ } else {
+ /* Invalidate descriptors */
+ udc->desc_cached = 0;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ usbip_event_add(&udc->ud, VUDC_EVENT_REMOVED);
+ usbip_stop_eh(&udc->ud); /* Wait for eh completion */
+ }
+
+ return 0;
+
+unlock:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static int vgadget_udc_start(struct usb_gadget *g,
+ struct usb_gadget_driver *driver)
+{
+ struct vudc *udc = usb_gadget_to_vudc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->driver = driver;
+ udc->pullup = udc->connected = udc->desc_cached = 0;
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int vgadget_udc_stop(struct usb_gadget *g)
+{
+ struct vudc *udc = usb_gadget_to_vudc(g);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ udc->driver = NULL;
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+}
+
+static const struct usb_gadget_ops vgadget_ops = {
+ .get_frame = vgadget_get_frame,
+ .set_selfpowered = vgadget_set_selfpowered,
+ .pullup = vgadget_pullup,
+ .udc_start = vgadget_udc_start,
+ .udc_stop = vgadget_udc_stop,
+};
+
+
+/* endpoint ops */
+
+static int vep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct vep *ep;
+ struct vudc *udc;
+ unsigned maxp;
+ unsigned long flags;
+
+ ep = to_vep(_ep);
+ udc = ep_to_vudc(ep);
+
+ if (!_ep || !desc || ep->desc || _ep->caps.type_control
+ || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ if (!udc->driver)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ maxp = usb_endpoint_maxp(desc) & 0x7ff;
+ _ep->maxpacket = maxp;
+ ep->desc = desc;
+ ep->type = usb_endpoint_type(desc);
+ ep->halted = ep->wedged = 0;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int vep_disable(struct usb_ep *_ep)
+{
+ struct vep *ep;
+ struct vudc *udc;
+ unsigned long flags;
+
+ ep = to_vep(_ep);
+ udc = ep_to_vudc(ep);
+ if (!_ep || !ep->desc || _ep->caps.type_control)
+ return -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ ep->desc = NULL;
+ nuke(udc, ep);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static struct usb_request *vep_alloc_request(struct usb_ep *_ep,
+ gfp_t mem_flags)
+{
+ struct vep *ep;
+ struct vrequest *req;
+
+ if (!_ep)
+ return NULL;
+ ep = to_vep(_ep);
+
+ req = kzalloc(sizeof(*req), mem_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->req_entry);
+
+ return &req->req;
+}
+
+static void vep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct vrequest *req;
+
+ if (WARN_ON(!_ep || !_req))
+ return;
+
+ req = to_vrequest(_req);
+ kfree(req);
+}
+
+static int vep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t mem_flags)
+{
+ struct vep *ep;
+ struct vrequest *req;
+ struct vudc *udc;
+ unsigned long flags;
+
+ if (!_ep || !_req)
+ return -EINVAL;
+
+ ep = to_vep(_ep);
+ req = to_vrequest(_req);
+ udc = ep_to_vudc(ep);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ _req->actual = 0;
+ _req->status = -EINPROGRESS;
+
+ list_add_tail(&req->req_entry, &ep->req_queue);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int vep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct vep *ep;
+ struct vrequest *req;
+ struct vudc *udc;
+ struct vrequest *lst;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (!_ep || !_req)
+ return ret;
+
+ ep = to_vep(_ep);
+ req = to_vrequest(_req);
+ udc = req->udc;
+
+ if (!udc->driver)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ list_for_each_entry(lst, &ep->req_queue, req_entry) {
+ if (&lst->req == _req) {
+ list_del_init(&lst->req_entry);
+ _req->status = -ECONNRESET;
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ if (ret == 0)
+ usb_gadget_giveback_request(_ep, _req);
+
+ return ret;
+}
+
+static int
+vep_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
+{
+ struct vep *ep;
+ struct vudc *udc;
+ unsigned long flags;
+ int ret = 0;
+
+ ep = to_vep(_ep);
+ if (!_ep)
+ return -EINVAL;
+
+ udc = ep_to_vudc(ep);
+ if (!udc->driver)
+ return -ESHUTDOWN;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (!value)
+ ep->halted = ep->wedged = 0;
+ else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
+ !list_empty(&ep->req_queue))
+ ret = -EAGAIN;
+ else {
+ ep->halted = 1;
+ if (wedged)
+ ep->wedged = 1;
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+
+static int
+vep_set_halt(struct usb_ep *_ep, int value)
+{
+ return vep_set_halt_and_wedge(_ep, value, 0);
+}
+
+static int vep_set_wedge(struct usb_ep *_ep)
+{
+ return vep_set_halt_and_wedge(_ep, 1, 1);
+}
+
+static const struct usb_ep_ops vep_ops = {
+ .enable = vep_enable,
+ .disable = vep_disable,
+
+ .alloc_request = vep_alloc_request,
+ .free_request = vep_free_request,
+
+ .queue = vep_queue,
+ .dequeue = vep_dequeue,
+
+ .set_halt = vep_set_halt,
+ .set_wedge = vep_set_wedge,
+};
+
+
+/* shutdown / reset / error handlers */
+
+static void vudc_shutdown(struct usbip_device *ud)
+{
+ struct vudc *udc = container_of(ud, struct vudc, ud);
+ int call_disconnect = 0;
+ unsigned long flags;
+
+ dev_dbg(&udc->pdev->dev, "device shutdown");
+ if (ud->tcp_socket)
+ kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
+
+ if (ud->tcp_tx) {
+ kthread_stop_put(ud->tcp_rx);
+ ud->tcp_rx = NULL;
+ }
+ if (ud->tcp_tx) {
+ kthread_stop_put(ud->tcp_tx);
+ ud->tcp_tx = NULL;
+ }
+
+ if (ud->tcp_socket) {
+ sockfd_put(ud->tcp_socket);
+ ud->tcp_socket = NULL;
+ }
+
+ spin_lock_irqsave(&udc->lock, flags);
+ stop_activity(udc);
+ if (udc->connected && udc->driver->disconnect)
+ call_disconnect = 1;
+ udc->connected = 0;
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (call_disconnect)
+ udc->driver->disconnect(&udc->gadget);
+}
+
+static void vudc_device_reset(struct usbip_device *ud)
+{
+ struct vudc *udc = container_of(ud, struct vudc, ud);
+ unsigned long flags;
+
+ dev_dbg(&udc->pdev->dev, "device reset");
+ spin_lock_irqsave(&udc->lock, flags);
+ stop_activity(udc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (udc->driver)
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+ spin_lock_irqsave(&ud->lock, flags);
+ ud->status = SDEV_ST_AVAILABLE;
+ spin_unlock_irqrestore(&ud->lock, flags);
+}
+
+static void vudc_device_unusable(struct usbip_device *ud)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ud->lock, flags);
+ ud->status = SDEV_ST_ERROR;
+ spin_unlock_irqrestore(&ud->lock, flags);
+}
+
+/* device setup / cleanup */
+
+struct vudc_device *alloc_vudc_device(int devid)
+{
+ struct vudc_device *udc_dev = NULL;
+
+ udc_dev = kzalloc(sizeof(*udc_dev), GFP_KERNEL);
+ if (!udc_dev)
+ goto out;
+
+ INIT_LIST_HEAD(&udc_dev->dev_entry);
+
+ udc_dev->pdev = platform_device_alloc(GADGET_NAME, devid);
+ if (!udc_dev->pdev) {
+ kfree(udc_dev);
+ udc_dev = NULL;
+ }
+
+out:
+ return udc_dev;
+}
+
+void put_vudc_device(struct vudc_device *udc_dev)
+{
+ platform_device_put(udc_dev->pdev);
+ kfree(udc_dev);
+}
+
+static int init_vudc_hw(struct vudc *udc)
+{
+ int i;
+ struct usbip_device *ud = &udc->ud;
+ struct vep *ep;
+
+ udc->ep = kcalloc(VIRTUAL_ENDPOINTS, sizeof(*udc->ep), GFP_KERNEL);
+ if (!udc->ep)
+ goto nomem_ep;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+
+ /* create ep0 and 15 in, 15 out general purpose eps */
+ for (i = 0; i < VIRTUAL_ENDPOINTS; ++i) {
+ int is_out = i % 2;
+ int num = (i + 1) / 2;
+
+ ep = &udc->ep[i];
+
+ sprintf(ep->name, "ep%d%s", num,
+ i ? (is_out ? "out" : "in") : "");
+ ep->ep.name = ep->name;
+ if (i == 0) {
+ ep->ep.caps.type_control = true;
+ ep->ep.caps.dir_out = true;
+ ep->ep.caps.dir_in = true;
+ } else {
+ ep->ep.caps.type_iso = true;
+ ep->ep.caps.type_int = true;
+ ep->ep.caps.type_bulk = true;
+ }
+
+ if (is_out)
+ ep->ep.caps.dir_out = true;
+ else
+ ep->ep.caps.dir_in = true;
+
+ ep->ep.ops = &vep_ops;
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ ep->halted = ep->wedged = ep->already_seen =
+ ep->setup_stage = 0;
+ usb_ep_set_maxpacket_limit(&ep->ep, ~0);
+ ep->ep.max_streams = 16;
+ ep->gadget = &udc->gadget;
+ ep->desc = NULL;
+ INIT_LIST_HEAD(&ep->req_queue);
+ }
+
+ spin_lock_init(&udc->lock);
+ spin_lock_init(&udc->lock_tx);
+ INIT_LIST_HEAD(&udc->urb_queue);
+ INIT_LIST_HEAD(&udc->tx_queue);
+ init_waitqueue_head(&udc->tx_waitq);
+
+ spin_lock_init(&ud->lock);
+ ud->status = SDEV_ST_AVAILABLE;
+ ud->side = USBIP_VUDC;
+
+ ud->eh_ops.shutdown = vudc_shutdown;
+ ud->eh_ops.reset = vudc_device_reset;
+ ud->eh_ops.unusable = vudc_device_unusable;
+
+ udc->gadget.ep0 = &udc->ep[0].ep;
+ list_del_init(&udc->ep[0].ep.ep_list);
+
+ v_init_timer(udc);
+ return 0;
+
+nomem_ep:
+ return -ENOMEM;
+}
+
+static void cleanup_vudc_hw(struct vudc *udc)
+{
+ kfree(udc->ep);
+}
+
+/* platform driver ops */
+
+int vudc_probe(struct platform_device *pdev)
+{
+ struct vudc *udc;
+ int ret = -ENOMEM;
+
+ udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ goto out;
+
+ udc->gadget.name = GADGET_NAME;
+ udc->gadget.ops = &vgadget_ops;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.dev.parent = &pdev->dev;
+ udc->pdev = pdev;
+
+ ret = init_vudc_hw(udc);
+ if (ret)
+ goto err_init_vudc_hw;
+
+ ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
+ if (ret < 0)
+ goto err_add_udc;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &vudc_attr_group);
+ if (ret) {
+ dev_err(&udc->pdev->dev, "create sysfs files\n");
+ goto err_sysfs;
+ }
+
+ platform_set_drvdata(pdev, udc);
+
+ return ret;
+
+err_sysfs:
+ usb_del_gadget_udc(&udc->gadget);
+err_add_udc:
+ cleanup_vudc_hw(udc);
+err_init_vudc_hw:
+ kfree(udc);
+out:
+ return ret;
+}
+
+int vudc_remove(struct platform_device *pdev)
+{
+ struct vudc *udc = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&pdev->dev.kobj, &vudc_attr_group);
+ usb_del_gadget_udc(&udc->gadget);
+ cleanup_vudc_hw(udc);
+ kfree(udc);
+ return 0;
+}
diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
new file mode 100644
index 000000000..9e655714e
--- /dev/null
+++ b/drivers/usb/usbip/vudc_main.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Igor Kotrasinski <i.kotrasinsk@samsung.com>
+ * Krzysztof Opasiak <k.opasiak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/module.h>
+
+#include "vudc.h"
+
+static unsigned int vudc_number = 1;
+
+module_param_named(num, vudc_number, uint, S_IRUGO);
+MODULE_PARM_DESC(num, "number of emulated controllers");
+
+static struct platform_driver vudc_driver = {
+ .probe = vudc_probe,
+ .remove = vudc_remove,
+ .driver = {
+ .name = GADGET_NAME,
+ },
+};
+
+static struct list_head vudc_devices = LIST_HEAD_INIT(vudc_devices);
+
+static int __init init(void)
+{
+ int retval = -ENOMEM;
+ int i;
+ struct vudc_device *udc_dev = NULL, *udc_dev2 = NULL;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ if (vudc_number < 1) {
+ pr_err("Number of emulated UDC must be no less than 1");
+ return -EINVAL;
+ }
+
+ retval = platform_driver_register(&vudc_driver);
+ if (retval < 0)
+ goto out;
+
+ for (i = 0; i < vudc_number; i++) {
+ udc_dev = alloc_vudc_device(i);
+ if (!udc_dev) {
+ retval = -ENOMEM;
+ goto cleanup;
+ }
+
+ retval = platform_device_add(udc_dev->pdev);
+ if (retval < 0) {
+ put_vudc_device(udc_dev);
+ goto cleanup;
+ }
+
+ list_add_tail(&udc_dev->dev_entry, &vudc_devices);
+ if (!platform_get_drvdata(udc_dev->pdev)) {
+ /*
+ * The udc was added successfully but its probe
+ * function failed for some reason.
+ */
+ retval = -EINVAL;
+ goto cleanup;
+ }
+ }
+ goto out;
+
+cleanup:
+ list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
+ list_del(&udc_dev->dev_entry);
+ platform_device_del(udc_dev->pdev);
+ put_vudc_device(udc_dev);
+ }
+
+ platform_driver_unregister(&vudc_driver);
+out:
+ return retval;
+}
+module_init(init);
+
+static void __exit cleanup(void)
+{
+ struct vudc_device *udc_dev = NULL, *udc_dev2 = NULL;
+
+ list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
+ list_del(&udc_dev->dev_entry);
+ platform_device_unregister(udc_dev->pdev);
+ put_vudc_device(udc_dev);
+ }
+ platform_driver_unregister(&vudc_driver);
+}
+module_exit(cleanup);
+
+MODULE_DESCRIPTION("USB over IP Device Controller");
+MODULE_AUTHOR("Krzysztof Opasiak, Karol Kosik, Igor Kotrasinski");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c
new file mode 100644
index 000000000..344bd9473
--- /dev/null
+++ b/drivers/usb/usbip/vudc_rx.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Igor Kotrasinski <i.kotrasinsk@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <net/sock.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+
+#include "usbip_common.h"
+#include "vudc.h"
+
+static int alloc_urb_from_cmd(struct urb **urbp,
+ struct usbip_header *pdu, u8 type)
+{
+ struct urb *urb;
+
+ if (type == USB_ENDPOINT_XFER_ISOC)
+ urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
+ GFP_KERNEL);
+ else
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!urb)
+ goto err;
+
+ usbip_pack_pdu(pdu, urb, USBIP_CMD_SUBMIT, 0);
+
+ if (urb->transfer_buffer_length > 0) {
+ urb->transfer_buffer = kzalloc(urb->transfer_buffer_length,
+ GFP_KERNEL);
+ if (!urb->transfer_buffer)
+ goto free_urb;
+ }
+
+ urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
+ GFP_KERNEL);
+ if (!urb->setup_packet)
+ goto free_buffer;
+
+ /*
+ * FIXME - we only setup pipe enough for usbip functions
+ * to behave nicely
+ */
+ urb->pipe |= pdu->base.direction == USBIP_DIR_IN ?
+ USB_DIR_IN : USB_DIR_OUT;
+
+ *urbp = urb;
+ return 0;
+
+free_buffer:
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+free_urb:
+ usb_free_urb(urb);
+err:
+ return -ENOMEM;
+}
+
+static int v_recv_cmd_unlink(struct vudc *udc,
+ struct usbip_header *pdu)
+{
+ unsigned long flags;
+ struct urbp *urb_p;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ list_for_each_entry(urb_p, &udc->urb_queue, urb_entry) {
+ if (urb_p->seqnum != pdu->u.cmd_unlink.seqnum)
+ continue;
+ urb_p->urb->unlinked = -ECONNRESET;
+ urb_p->seqnum = pdu->base.seqnum;
+ v_kick_timer(udc, jiffies);
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return 0;
+ }
+ /* Not found, completed / not queued */
+ spin_lock(&udc->lock_tx);
+ v_enqueue_ret_unlink(udc, pdu->base.seqnum, 0);
+ wake_up(&udc->tx_waitq);
+ spin_unlock(&udc->lock_tx);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int v_recv_cmd_submit(struct vudc *udc,
+ struct usbip_header *pdu)
+{
+ int ret = 0;
+ struct urbp *urb_p;
+ u8 address;
+ unsigned long flags;
+
+ urb_p = alloc_urbp();
+ if (!urb_p) {
+ usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
+ return -ENOMEM;
+ }
+
+ /* base.ep is pipeendpoint(pipe) */
+ address = pdu->base.ep;
+ if (pdu->base.direction == USBIP_DIR_IN)
+ address |= USB_DIR_IN;
+
+ spin_lock_irq(&udc->lock);
+ urb_p->ep = vudc_find_endpoint(udc, address);
+ if (!urb_p->ep) {
+ /* we don't know the type, there may be isoc data! */
+ dev_err(&udc->pdev->dev, "request to nonexistent endpoint");
+ spin_unlock_irq(&udc->lock);
+ usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
+ ret = -EPIPE;
+ goto free_urbp;
+ }
+ urb_p->type = urb_p->ep->type;
+ spin_unlock_irq(&udc->lock);
+
+ urb_p->new = 1;
+ urb_p->seqnum = pdu->base.seqnum;
+
+ ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
+ if (ret) {
+ usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
+ ret = -ENOMEM;
+ goto free_urbp;
+ }
+
+ urb_p->urb->status = -EINPROGRESS;
+
+ /* FIXME: more pipe setup to please usbip_common */
+ urb_p->urb->pipe &= ~(11 << 30);
+ switch (urb_p->ep->type) {
+ case USB_ENDPOINT_XFER_BULK:
+ urb_p->urb->pipe |= (PIPE_BULK << 30);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ urb_p->urb->pipe |= (PIPE_INTERRUPT << 30);
+ break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ urb_p->urb->pipe |= (PIPE_CONTROL << 30);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ urb_p->urb->pipe |= (PIPE_ISOCHRONOUS << 30);
+ break;
+ }
+ ret = usbip_recv_xbuff(&udc->ud, urb_p->urb);
+ if (ret < 0)
+ goto free_urbp;
+
+ ret = usbip_recv_iso(&udc->ud, urb_p->urb);
+ if (ret < 0)
+ goto free_urbp;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ v_kick_timer(udc, jiffies);
+ list_add_tail(&urb_p->urb_entry, &udc->urb_queue);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+
+free_urbp:
+ free_urbp_and_urb(urb_p);
+ return ret;
+}
+
+static int v_rx_pdu(struct usbip_device *ud)
+{
+ int ret;
+ struct usbip_header pdu;
+ struct vudc *udc = container_of(ud, struct vudc, ud);
+
+ memset(&pdu, 0, sizeof(pdu));
+ ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
+ if (ret != sizeof(pdu)) {
+ usbip_event_add(ud, VUDC_EVENT_ERROR_TCP);
+ if (ret >= 0)
+ return -EPIPE;
+ return ret;
+ }
+ usbip_header_correct_endian(&pdu, 0);
+
+ spin_lock_irq(&ud->lock);
+ ret = (ud->status == SDEV_ST_USED);
+ spin_unlock_irq(&ud->lock);
+ if (!ret) {
+ usbip_event_add(ud, VUDC_EVENT_ERROR_TCP);
+ return -EBUSY;
+ }
+
+ switch (pdu.base.command) {
+ case USBIP_CMD_UNLINK:
+ ret = v_recv_cmd_unlink(udc, &pdu);
+ break;
+ case USBIP_CMD_SUBMIT:
+ ret = v_recv_cmd_submit(udc, &pdu);
+ break;
+ default:
+ ret = -EPIPE;
+ pr_err("rx: unknown command");
+ break;
+ }
+ return ret;
+}
+
+int v_rx_loop(void *data)
+{
+ struct usbip_device *ud = data;
+ int ret = 0;
+
+ while (!kthread_should_stop()) {
+ if (usbip_event_happened(ud))
+ break;
+ ret = v_rx_pdu(ud);
+ if (ret < 0) {
+ pr_warn("v_rx exit with error %d", ret);
+ break;
+ }
+ }
+ return ret;
+}
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
new file mode 100644
index 000000000..99397fa1e
--- /dev/null
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Igor Kotrasinski <i.kotrasinsk@samsung.com>
+ * Krzysztof Opasiak <k.opasiak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/ch9.h>
+#include <linux/sysfs.h>
+#include <linux/kthread.h>
+#include <linux/byteorder/generic.h>
+
+#include "usbip_common.h"
+#include "vudc.h"
+
+#include <net/sock.h>
+
+/* called with udc->lock held */
+int get_gadget_descs(struct vudc *udc)
+{
+ struct vrequest *usb_req;
+ struct vep *ep0 = to_vep(udc->gadget.ep0);
+ struct usb_device_descriptor *ddesc = &udc->dev_desc;
+ struct usb_ctrlrequest req;
+ int ret;
+
+ if (!udc || !udc->driver || !udc->pullup)
+ return -EINVAL;
+
+ req.bRequestType = USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
+ req.bRequest = USB_REQ_GET_DESCRIPTOR;
+ req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+ req.wIndex = cpu_to_le16(0);
+ req.wLength = cpu_to_le16(sizeof(*ddesc));
+
+ spin_unlock(&udc->lock);
+ ret = udc->driver->setup(&(udc->gadget), &req);
+ spin_lock(&udc->lock);
+ if (ret < 0)
+ goto out;
+
+ /* assuming request queue is empty; request is now on top */
+ usb_req = list_last_entry(&ep0->req_queue, struct vrequest, req_entry);
+ list_del(&usb_req->req_entry);
+
+ if (usb_req->req.length > sizeof(*ddesc)) {
+ ret = -EOVERFLOW;
+ goto giveback_req;
+ }
+
+ memcpy(ddesc, usb_req->req.buf, sizeof(*ddesc));
+ udc->desc_cached = 1;
+ ret = 0;
+giveback_req:
+ usb_req->req.status = 0;
+ usb_req->req.actual = usb_req->req.length;
+ usb_gadget_giveback_request(&(ep0->ep), &(usb_req->req));
+out:
+ return ret;
+}
+
+/*
+ * Exposes device descriptor from the gadget driver.
+ */
+static ssize_t dev_desc_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *out,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct vudc *udc = (struct vudc *)dev_get_drvdata(dev);
+ char *desc_ptr = (char *) &udc->dev_desc;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (!udc->desc_cached) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ memcpy(out, desc_ptr + off, count);
+ ret = count;
+unlock:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+static BIN_ATTR_RO(dev_desc, sizeof(struct usb_device_descriptor));
+
+static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
+ const char *in, size_t count)
+{
+ struct vudc *udc = (struct vudc *) dev_get_drvdata(dev);
+ int rv;
+ int sockfd = 0;
+ int err;
+ struct socket *socket;
+ unsigned long flags;
+ int ret;
+
+ rv = kstrtoint(in, 0, &sockfd);
+ if (rv != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ /* Don't export what we don't have */
+ if (!udc || !udc->driver || !udc->pullup) {
+ dev_err(dev, "no device or gadget not bound");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ if (sockfd != -1) {
+ if (udc->connected) {
+ dev_err(dev, "Device already connected");
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ spin_lock_irq(&udc->ud.lock);
+
+ if (udc->ud.status != SDEV_ST_AVAILABLE) {
+ ret = -EINVAL;
+ goto unlock_ud;
+ }
+
+ socket = sockfd_lookup(sockfd, &err);
+ if (!socket) {
+ dev_err(dev, "failed to lookup sock");
+ ret = -EINVAL;
+ goto unlock_ud;
+ }
+
+ udc->ud.tcp_socket = socket;
+
+ spin_unlock_irq(&udc->ud.lock);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ udc->ud.tcp_rx = kthread_get_run(&v_rx_loop,
+ &udc->ud, "vudc_rx");
+ udc->ud.tcp_tx = kthread_get_run(&v_tx_loop,
+ &udc->ud, "vudc_tx");
+
+ spin_lock_irqsave(&udc->lock, flags);
+ spin_lock_irq(&udc->ud.lock);
+ udc->ud.status = SDEV_ST_USED;
+ spin_unlock_irq(&udc->ud.lock);
+
+ do_gettimeofday(&udc->start_time);
+ v_start_timer(udc);
+ udc->connected = 1;
+ } else {
+ if (!udc->connected) {
+ dev_err(dev, "Device not connected");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ spin_lock_irq(&udc->ud.lock);
+ if (udc->ud.status != SDEV_ST_USED) {
+ ret = -EINVAL;
+ goto unlock_ud;
+ }
+ spin_unlock_irq(&udc->ud.lock);
+
+ usbip_event_add(&udc->ud, VUDC_EVENT_DOWN);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return count;
+
+unlock_ud:
+ spin_unlock_irq(&udc->ud.lock);
+unlock:
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return ret;
+}
+static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd);
+
+static ssize_t usbip_status_show(struct device *dev,
+ struct device_attribute *attr, char *out)
+{
+ struct vudc *udc = (struct vudc *) dev_get_drvdata(dev);
+ int status;
+
+ if (!udc) {
+ dev_err(dev, "no device");
+ return -ENODEV;
+ }
+ spin_lock_irq(&udc->ud.lock);
+ status = udc->ud.status;
+ spin_unlock_irq(&udc->ud.lock);
+
+ return snprintf(out, PAGE_SIZE, "%d\n", status);
+}
+static DEVICE_ATTR_RO(usbip_status);
+
+static struct attribute *dev_attrs[] = {
+ &dev_attr_usbip_sockfd.attr,
+ &dev_attr_usbip_status.attr,
+ NULL,
+};
+
+static struct bin_attribute *dev_bin_attrs[] = {
+ &bin_attr_dev_desc,
+ NULL,
+};
+
+const struct attribute_group vudc_attr_group = {
+ .attrs = dev_attrs,
+ .bin_attrs = dev_bin_attrs,
+};
diff --git a/drivers/usb/usbip/vudc_transfer.c b/drivers/usb/usbip/vudc_transfer.c
new file mode 100644
index 000000000..aba6bd478
--- /dev/null
+++ b/drivers/usb/usbip/vudc_transfer.c
@@ -0,0 +1,506 @@
+/*
+ * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Igor Kotrasinski <i.kotrasinsk@samsung.com>
+ *
+ * Based on dummy_hcd.c, which is:
+ * Copyright (C) 2003 David Brownell
+ * Copyright (C) 2003-2005 Alan Stern
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/usb.h>
+#include <linux/timer.h>
+#include <linux/usb/ch9.h>
+
+#include "vudc.h"
+
+#define DEV_REQUEST (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+#define DEV_INREQUEST (DEV_REQUEST | USB_DIR_IN)
+#define INTF_REQUEST (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
+#define INTF_INREQUEST (INTF_REQUEST | USB_DIR_IN)
+#define EP_REQUEST (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
+#define EP_INREQUEST (EP_REQUEST | USB_DIR_IN)
+
+static int get_frame_limit(enum usb_device_speed speed)
+{
+ switch (speed) {
+ case USB_SPEED_LOW:
+ return 8 /*bytes*/ * 12 /*packets*/;
+ case USB_SPEED_FULL:
+ return 64 /*bytes*/ * 19 /*packets*/;
+ case USB_SPEED_HIGH:
+ return 512 /*bytes*/ * 13 /*packets*/ * 8 /*uframes*/;
+ case USB_SPEED_SUPER:
+ /* Bus speed is 500000 bytes/ms, so use a little less */
+ return 490000;
+ default:
+ /* error */
+ return -1;
+ }
+
+}
+
+/*
+ * handle_control_request() - handles all control transfers
+ * @udc: pointer to vudc
+ * @urb: the urb request to handle
+ * @setup: pointer to the setup data for a USB device control
+ * request
+ * @status: pointer to request handling status
+ *
+ * Return 0 - if the request was handled
+ * 1 - if the request wasn't handles
+ * error code on error
+ *
+ * Adapted from drivers/usb/gadget/udc/dummy_hcd.c
+ */
+static int handle_control_request(struct vudc *udc, struct urb *urb,
+ struct usb_ctrlrequest *setup,
+ int *status)
+{
+ struct vep *ep2;
+ int ret_val = 1;
+ unsigned w_index;
+ unsigned w_value;
+
+ w_index = le16_to_cpu(setup->wIndex);
+ w_value = le16_to_cpu(setup->wValue);
+ switch (setup->bRequest) {
+ case USB_REQ_SET_ADDRESS:
+ if (setup->bRequestType != DEV_REQUEST)
+ break;
+ udc->address = w_value;
+ ret_val = 0;
+ *status = 0;
+ break;
+ case USB_REQ_SET_FEATURE:
+ if (setup->bRequestType == DEV_REQUEST) {
+ ret_val = 0;
+ switch (w_value) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ break;
+ case USB_DEVICE_B_HNP_ENABLE:
+ udc->gadget.b_hnp_enable = 1;
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ udc->gadget.a_hnp_support = 1;
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ udc->gadget.a_alt_hnp_support = 1;
+ break;
+ default:
+ ret_val = -EOPNOTSUPP;
+ }
+ if (ret_val == 0) {
+ udc->devstatus |= (1 << w_value);
+ *status = 0;
+ }
+ } else if (setup->bRequestType == EP_REQUEST) {
+ /* endpoint halt */
+ ep2 = vudc_find_endpoint(udc, w_index);
+ if (!ep2 || ep2->ep.name == udc->ep[0].ep.name) {
+ ret_val = -EOPNOTSUPP;
+ break;
+ }
+ ep2->halted = 1;
+ ret_val = 0;
+ *status = 0;
+ }
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ if (setup->bRequestType == DEV_REQUEST) {
+ ret_val = 0;
+ switch (w_value) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ w_value = USB_DEVICE_REMOTE_WAKEUP;
+ break;
+
+ case USB_DEVICE_U1_ENABLE:
+ case USB_DEVICE_U2_ENABLE:
+ case USB_DEVICE_LTM_ENABLE:
+ ret_val = -EOPNOTSUPP;
+ break;
+ default:
+ ret_val = -EOPNOTSUPP;
+ break;
+ }
+ if (ret_val == 0) {
+ udc->devstatus &= ~(1 << w_value);
+ *status = 0;
+ }
+ } else if (setup->bRequestType == EP_REQUEST) {
+ /* endpoint halt */
+ ep2 = vudc_find_endpoint(udc, w_index);
+ if (!ep2) {
+ ret_val = -EOPNOTSUPP;
+ break;
+ }
+ if (!ep2->wedged)
+ ep2->halted = 0;
+ ret_val = 0;
+ *status = 0;
+ }
+ break;
+ case USB_REQ_GET_STATUS:
+ if (setup->bRequestType == DEV_INREQUEST
+ || setup->bRequestType == INTF_INREQUEST
+ || setup->bRequestType == EP_INREQUEST) {
+ char *buf;
+ /*
+ * device: remote wakeup, selfpowered
+ * interface: nothing
+ * endpoint: halt
+ */
+ buf = (char *)urb->transfer_buffer;
+ if (urb->transfer_buffer_length > 0) {
+ if (setup->bRequestType == EP_INREQUEST) {
+ ep2 = vudc_find_endpoint(udc, w_index);
+ if (!ep2) {
+ ret_val = -EOPNOTSUPP;
+ break;
+ }
+ buf[0] = ep2->halted;
+ } else if (setup->bRequestType ==
+ DEV_INREQUEST) {
+ buf[0] = (u8)udc->devstatus;
+ } else
+ buf[0] = 0;
+ }
+ if (urb->transfer_buffer_length > 1)
+ buf[1] = 0;
+ urb->actual_length = min_t(u32, 2,
+ urb->transfer_buffer_length);
+ ret_val = 0;
+ *status = 0;
+ }
+ break;
+ }
+ return ret_val;
+}
+
+/* Adapted from dummy_hcd.c ; caller must hold lock */
+static int transfer(struct vudc *udc,
+ struct urb *urb, struct vep *ep, int limit)
+{
+ struct vrequest *req;
+ int sent = 0;
+top:
+ /* if there's no request queued, the device is NAKing; return */
+ list_for_each_entry(req, &ep->req_queue, req_entry) {
+ unsigned host_len, dev_len, len;
+ void *ubuf_pos, *rbuf_pos;
+ int is_short, to_host;
+ int rescan = 0;
+
+ /*
+ * 1..N packets of ep->ep.maxpacket each ... the last one
+ * may be short (including zero length).
+ *
+ * writer can send a zlp explicitly (length 0) or implicitly
+ * (length mod maxpacket zero, and 'zero' flag); they always
+ * terminate reads.
+ */
+ host_len = urb->transfer_buffer_length - urb->actual_length;
+ dev_len = req->req.length - req->req.actual;
+ len = min(host_len, dev_len);
+
+ to_host = usb_pipein(urb->pipe);
+ if (unlikely(len == 0))
+ is_short = 1;
+ else {
+ /* send multiple of maxpacket first, then remainder */
+ if (len >= ep->ep.maxpacket) {
+ is_short = 0;
+ if (len % ep->ep.maxpacket > 0)
+ rescan = 1;
+ len -= len % ep->ep.maxpacket;
+ } else {
+ is_short = 1;
+ }
+
+ ubuf_pos = urb->transfer_buffer + urb->actual_length;
+ rbuf_pos = req->req.buf + req->req.actual;
+
+ if (urb->pipe & USB_DIR_IN)
+ memcpy(ubuf_pos, rbuf_pos, len);
+ else
+ memcpy(rbuf_pos, ubuf_pos, len);
+
+ urb->actual_length += len;
+ req->req.actual += len;
+ sent += len;
+ }
+
+ /*
+ * short packets terminate, maybe with overflow/underflow.
+ * it's only really an error to write too much.
+ *
+ * partially filling a buffer optionally blocks queue advances
+ * (so completion handlers can clean up the queue) but we don't
+ * need to emulate such data-in-flight.
+ */
+ if (is_short) {
+ if (host_len == dev_len) {
+ req->req.status = 0;
+ urb->status = 0;
+ } else if (to_host) {
+ req->req.status = 0;
+ if (dev_len > host_len)
+ urb->status = -EOVERFLOW;
+ else
+ urb->status = 0;
+ } else {
+ urb->status = 0;
+ if (host_len > dev_len)
+ req->req.status = -EOVERFLOW;
+ else
+ req->req.status = 0;
+ }
+
+ /* many requests terminate without a short packet */
+ /* also check if we need to send zlp */
+ } else {
+ if (req->req.length == req->req.actual) {
+ if (req->req.zero && to_host)
+ rescan = 1;
+ else
+ req->req.status = 0;
+ }
+ if (urb->transfer_buffer_length == urb->actual_length) {
+ if (urb->transfer_flags & URB_ZERO_PACKET &&
+ !to_host)
+ rescan = 1;
+ else
+ urb->status = 0;
+ }
+ }
+
+ /* device side completion --> continuable */
+ if (req->req.status != -EINPROGRESS) {
+
+ list_del_init(&req->req_entry);
+ spin_unlock(&udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&udc->lock);
+
+ /* requests might have been unlinked... */
+ rescan = 1;
+ }
+
+ /* host side completion --> terminate */
+ if (urb->status != -EINPROGRESS)
+ break;
+
+ /* rescan to continue with any other queued i/o */
+ if (rescan)
+ goto top;
+ }
+ return sent;
+}
+
+static void v_timer(unsigned long _vudc)
+{
+ struct vudc *udc = (struct vudc *) _vudc;
+ struct transfer_timer *timer = &udc->tr_timer;
+ struct urbp *urb_p, *tmp;
+ unsigned long flags;
+ struct usb_ep *_ep;
+ struct vep *ep;
+ int ret = 0;
+ int total, limit;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ total = get_frame_limit(udc->gadget.speed);
+ if (total < 0) { /* unknown speed, or not set yet */
+ timer->state = VUDC_TR_IDLE;
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return;
+ }
+ /* is it next frame now? */
+ if (time_after(jiffies, timer->frame_start + msecs_to_jiffies(1))) {
+ timer->frame_limit = total;
+ /* FIXME: how to make it accurate? */
+ timer->frame_start = jiffies;
+ } else {
+ total = timer->frame_limit;
+ }
+
+ list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
+ ep = to_vep(_ep);
+ ep->already_seen = 0;
+ }
+
+restart:
+ list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
+ struct urb *urb = urb_p->urb;
+
+ ep = urb_p->ep;
+ if (urb->unlinked)
+ goto return_urb;
+ if (timer->state != VUDC_TR_RUNNING)
+ continue;
+
+ if (!ep) {
+ urb->status = -EPROTO;
+ goto return_urb;
+ }
+
+ /* Used up bandwidth? */
+ if (total <= 0 && ep->type == USB_ENDPOINT_XFER_BULK)
+ continue;
+
+ if (ep->already_seen)
+ continue;
+ ep->already_seen = 1;
+ if (ep == &udc->ep[0] && urb_p->new) {
+ ep->setup_stage = 1;
+ urb_p->new = 0;
+ }
+ if (ep->halted && !ep->setup_stage) {
+ urb->status = -EPIPE;
+ goto return_urb;
+ }
+
+ if (ep == &udc->ep[0] && ep->setup_stage) {
+ /* TODO - flush any stale requests */
+ ep->setup_stage = 0;
+ ep->halted = 0;
+
+ ret = handle_control_request(udc, urb,
+ (struct usb_ctrlrequest *) urb->setup_packet,
+ (&urb->status));
+ if (ret > 0) {
+ spin_unlock(&udc->lock);
+ ret = udc->driver->setup(&udc->gadget,
+ (struct usb_ctrlrequest *)
+ urb->setup_packet);
+ spin_lock(&udc->lock);
+ }
+ if (ret >= 0) {
+ /* no delays (max 64kb data stage) */
+ limit = 64 * 1024;
+ goto treat_control_like_bulk;
+ } else {
+ urb->status = -EPIPE;
+ urb->actual_length = 0;
+ goto return_urb;
+ }
+ }
+
+ limit = total;
+ switch (ep->type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ /* TODO: support */
+ urb->status = -EXDEV;
+ break;
+
+ case USB_ENDPOINT_XFER_INT:
+ /*
+ * TODO: figure out bandwidth guarantees
+ * for now, give unlimited bandwidth
+ */
+ limit += urb->transfer_buffer_length;
+ /* fallthrough */
+ default:
+treat_control_like_bulk:
+ total -= transfer(udc, urb, ep, limit);
+ }
+ if (urb->status == -EINPROGRESS)
+ continue;
+
+return_urb:
+ if (ep)
+ ep->already_seen = ep->setup_stage = 0;
+
+ spin_lock(&udc->lock_tx);
+ list_del(&urb_p->urb_entry);
+ if (!urb->unlinked) {
+ v_enqueue_ret_submit(udc, urb_p);
+ } else {
+ v_enqueue_ret_unlink(udc, urb_p->seqnum,
+ urb->unlinked);
+ free_urbp_and_urb(urb_p);
+ }
+ wake_up(&udc->tx_waitq);
+ spin_unlock(&udc->lock_tx);
+
+ goto restart;
+ }
+
+ /* TODO - also wait on empty usb_request queues? */
+ if (list_empty(&udc->urb_queue))
+ timer->state = VUDC_TR_IDLE;
+ else
+ mod_timer(&timer->timer,
+ timer->frame_start + msecs_to_jiffies(1));
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+}
+
+/* All timer functions are run with udc->lock held */
+
+void v_init_timer(struct vudc *udc)
+{
+ struct transfer_timer *t = &udc->tr_timer;
+
+ setup_timer(&t->timer, v_timer, (unsigned long) udc);
+ t->state = VUDC_TR_STOPPED;
+}
+
+void v_start_timer(struct vudc *udc)
+{
+ struct transfer_timer *t = &udc->tr_timer;
+
+ dev_dbg(&udc->pdev->dev, "timer start");
+ switch (t->state) {
+ case VUDC_TR_RUNNING:
+ return;
+ case VUDC_TR_IDLE:
+ return v_kick_timer(udc, jiffies);
+ case VUDC_TR_STOPPED:
+ t->state = VUDC_TR_IDLE;
+ t->frame_start = jiffies;
+ t->frame_limit = get_frame_limit(udc->gadget.speed);
+ return v_kick_timer(udc, jiffies);
+ }
+}
+
+void v_kick_timer(struct vudc *udc, unsigned long time)
+{
+ struct transfer_timer *t = &udc->tr_timer;
+
+ dev_dbg(&udc->pdev->dev, "timer kick");
+ switch (t->state) {
+ case VUDC_TR_RUNNING:
+ return;
+ case VUDC_TR_IDLE:
+ t->state = VUDC_TR_RUNNING;
+ /* fallthrough */
+ case VUDC_TR_STOPPED:
+ /* we may want to kick timer to unqueue urbs */
+ mod_timer(&t->timer, time);
+ }
+}
+
+void v_stop_timer(struct vudc *udc)
+{
+ struct transfer_timer *t = &udc->tr_timer;
+
+ /* timer itself will take care of stopping */
+ dev_dbg(&udc->pdev->dev, "timer stop");
+ t->state = VUDC_TR_STOPPED;
+}
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c
new file mode 100644
index 000000000..234661782
--- /dev/null
+++ b/drivers/usb/usbip/vudc_tx.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
+ * Copyright (C) 2015-2016 Samsung Electronics
+ * Igor Kotrasinski <i.kotrasinsk@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <net/sock.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+
+#include "usbip_common.h"
+#include "vudc.h"
+
+static inline void setup_base_pdu(struct usbip_header_basic *base,
+ __u32 command, __u32 seqnum)
+{
+ base->command = command;
+ base->seqnum = seqnum;
+ base->devid = 0;
+ base->ep = 0;
+ base->direction = 0;
+}
+
+static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p)
+{
+ setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum);
+ usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1);
+}
+
+static void setup_ret_unlink_pdu(struct usbip_header *rpdu,
+ struct v_unlink *unlink)
+{
+ setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum);
+ rpdu->u.ret_unlink.status = unlink->status;
+}
+
+static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink)
+{
+ struct msghdr msg;
+ struct kvec iov[1];
+ size_t txsize;
+
+ int ret;
+ struct usbip_header pdu_header;
+
+ txsize = 0;
+ memset(&pdu_header, 0, sizeof(pdu_header));
+ memset(&msg, 0, sizeof(msg));
+ memset(&iov, 0, sizeof(iov));
+
+ /* 1. setup usbip_header */
+ setup_ret_unlink_pdu(&pdu_header, unlink);
+ usbip_header_correct_endian(&pdu_header, 1);
+
+ iov[0].iov_base = &pdu_header;
+ iov[0].iov_len = sizeof(pdu_header);
+ txsize += sizeof(pdu_header);
+
+ ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov,
+ 1, txsize);
+ if (ret != txsize) {
+ usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
+ if (ret >= 0)
+ return -EPIPE;
+ return ret;
+ }
+ kfree(unlink);
+
+ return txsize;
+}
+
+static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p)
+{
+ struct urb *urb = urb_p->urb;
+ struct usbip_header pdu_header;
+ struct usbip_iso_packet_descriptor *iso_buffer = NULL;
+ struct kvec *iov = NULL;
+ int iovnum = 0;
+ int ret = 0;
+ size_t txsize;
+ struct msghdr msg;
+
+ txsize = 0;
+ memset(&pdu_header, 0, sizeof(pdu_header));
+ memset(&msg, 0, sizeof(msg));
+
+ if (urb_p->type == USB_ENDPOINT_XFER_ISOC)
+ iovnum = 2 + urb->number_of_packets;
+ else
+ iovnum = 2;
+
+ iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL);
+ if (!iov) {
+ usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
+ ret = -ENOMEM;
+ goto out;
+ }
+ iovnum = 0;
+
+ /* 1. setup usbip_header */
+ setup_ret_submit_pdu(&pdu_header, urb_p);
+ usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
+ pdu_header.base.seqnum, urb);
+ usbip_header_correct_endian(&pdu_header, 1);
+
+ iov[iovnum].iov_base = &pdu_header;
+ iov[iovnum].iov_len = sizeof(pdu_header);
+ iovnum++;
+ txsize += sizeof(pdu_header);
+
+ /* 2. setup transfer buffer */
+ if (urb_p->type != USB_ENDPOINT_XFER_ISOC &&
+ usb_pipein(urb->pipe) && urb->actual_length > 0) {
+ iov[iovnum].iov_base = urb->transfer_buffer;
+ iov[iovnum].iov_len = urb->actual_length;
+ iovnum++;
+ txsize += urb->actual_length;
+ } else if (urb_p->type == USB_ENDPOINT_XFER_ISOC &&
+ usb_pipein(urb->pipe)) {
+ /* FIXME - copypasted from stub_tx, refactor */
+ int i;
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ iov[iovnum].iov_base = urb->transfer_buffer +
+ urb->iso_frame_desc[i].offset;
+ iov[iovnum].iov_len =
+ urb->iso_frame_desc[i].actual_length;
+ iovnum++;
+ txsize += urb->iso_frame_desc[i].actual_length;
+ }
+
+ if (txsize != sizeof(pdu_header) + urb->actual_length) {
+ usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
+ ret = -EPIPE;
+ goto out;
+ }
+ }
+ /* else - no buffer to send */
+
+ /* 3. setup iso_packet_descriptor */
+ if (urb_p->type == USB_ENDPOINT_XFER_ISOC) {
+ ssize_t len = 0;
+
+ iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len);
+ if (!iso_buffer) {
+ usbip_event_add(&udc->ud,
+ VUDC_EVENT_ERROR_MALLOC);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ iov[iovnum].iov_base = iso_buffer;
+ iov[iovnum].iov_len = len;
+ txsize += len;
+ iovnum++;
+ }
+
+ ret = kernel_sendmsg(udc->ud.tcp_socket, &msg,
+ iov, iovnum, txsize);
+ if (ret != txsize) {
+ usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
+ if (ret >= 0)
+ ret = -EPIPE;
+ goto out;
+ }
+
+out:
+ kfree(iov);
+ kfree(iso_buffer);
+ free_urbp_and_urb(urb_p);
+ if (ret < 0)
+ return ret;
+ return txsize;
+}
+
+static int v_send_ret(struct vudc *udc)
+{
+ unsigned long flags;
+ struct tx_item *txi;
+ size_t total_size = 0;
+ int ret = 0;
+
+ spin_lock_irqsave(&udc->lock_tx, flags);
+ while (!list_empty(&udc->tx_queue)) {
+ txi = list_first_entry(&udc->tx_queue, struct tx_item,
+ tx_entry);
+ list_del(&txi->tx_entry);
+ spin_unlock_irqrestore(&udc->lock_tx, flags);
+
+ switch (txi->type) {
+ case TX_SUBMIT:
+ ret = v_send_ret_submit(udc, txi->s);
+ break;
+ case TX_UNLINK:
+ ret = v_send_ret_unlink(udc, txi->u);
+ break;
+ }
+ kfree(txi);
+
+ if (ret < 0)
+ return ret;
+
+ total_size += ret;
+
+ spin_lock_irqsave(&udc->lock_tx, flags);
+ }
+
+ spin_unlock_irqrestore(&udc->lock_tx, flags);
+ return total_size;
+}
+
+
+int v_tx_loop(void *data)
+{
+ struct usbip_device *ud = (struct usbip_device *) data;
+ struct vudc *udc = container_of(ud, struct vudc, ud);
+ int ret;
+
+ while (!kthread_should_stop()) {
+ if (usbip_event_happened(&udc->ud))
+ break;
+ ret = v_send_ret(udc);
+ if (ret < 0) {
+ pr_warn("v_tx exit with error %d", ret);
+ break;
+ }
+ wait_event_interruptible(udc->tx_waitq,
+ (!list_empty(&udc->tx_queue) ||
+ kthread_should_stop()));
+ }
+
+ return 0;
+}
+
+/* called with spinlocks held */
+void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status)
+{
+ struct tx_item *txi;
+ struct v_unlink *unlink;
+
+ txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
+ if (!txi) {
+ usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
+ return;
+ }
+ unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC);
+ if (!unlink) {
+ kfree(txi);
+ usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
+ return;
+ }
+
+ unlink->seqnum = seqnum;
+ unlink->status = status;
+ txi->type = TX_UNLINK;
+ txi->u = unlink;
+
+ list_add_tail(&txi->tx_entry, &udc->tx_queue);
+}
+
+/* called with spinlocks held */
+void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p)
+{
+ struct tx_item *txi;
+
+ txi = kzalloc(sizeof(*txi), GFP_ATOMIC);
+ if (!txi) {
+ usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC);
+ return;
+ }
+
+ txi->type = TX_SUBMIT;
+ txi->s = urb_p;
+
+ list_add_tail(&txi->tx_entry, &udc->tx_queue);
+}
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 8ed8e34c3..33acd1599 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -54,7 +54,7 @@
#include <linux/usb/wusb.h>
#include <linux/scatterlist.h>
-static int debug_crypto_verify = 0;
+static int debug_crypto_verify;
module_param(debug_crypto_verify, int, 0);
MODULE_PARM_DESC(debug_crypto_verify, "verify the key generation algorithms");
@@ -390,7 +390,7 @@ static int wusb_oob_mic_verify(void)
0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
0x2c, 0x2d, 0x2e, 0x2f },
.MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c,
- 0x14, 0x7b } ,
+ 0x14, 0x7b },
};
size_t hs_size;
@@ -480,7 +480,7 @@ static int wusb_key_derive_verify(void)
printk(KERN_ERR "E: keydvt in: key\n");
wusb_key_dump(stv_key_a1, sizeof(stv_key_a1));
printk(KERN_ERR "E: keydvt in: nonce\n");
- wusb_key_dump( &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
+ wusb_key_dump(&stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
printk(KERN_ERR "E: keydvt in: hnonce & dnonce\n");
wusb_key_dump(&stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1));
printk(KERN_ERR "E: keydvt out: KCK\n");
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 3f4f5fbde..bf9551735 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -893,7 +893,6 @@ out:
error_nodev:
return;
- wusb_dev_sysfs_rm(wusb_dev);
error_add_sysfs:
wusb_dev_bos_rm(wusb_dev);
error_bos_add:
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 712a84978..188b1ff03 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -113,6 +113,35 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
static void vfio_pci_disable(struct vfio_pci_device *vdev);
+/*
+ * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
+ * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
+ * If a device implements the former but not the latter we would typically
+ * expect broken_intx_masking be set and require an exclusive interrupt.
+ * However since we do have control of the device's ability to assert INTx,
+ * we can instead pretend that the device does not implement INTx, virtualizing
+ * the pin register to report zero and maintaining DisINTx set on the host.
+ */
+static bool vfio_pci_nointx(struct pci_dev *pdev)
+{
+ switch (pdev->vendor) {
+ case PCI_VENDOR_ID_INTEL:
+ switch (pdev->device) {
+ /* All i40e (XL710/X710) 10/20/40GbE NICs */
+ case 0x1572:
+ case 0x1574:
+ case 0x1580 ... 0x1581:
+ case 0x1583 ... 0x1589:
+ case 0x37d0 ... 0x37d2:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return false;
+}
+
static int vfio_pci_enable(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
@@ -136,23 +165,29 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
pr_debug("%s: Couldn't store %s saved state\n",
__func__, dev_name(&pdev->dev));
- ret = vfio_config_init(vdev);
- if (ret) {
- kfree(vdev->pci_saved_state);
- vdev->pci_saved_state = NULL;
- pci_disable_device(pdev);
- return ret;
+ if (likely(!nointxmask)) {
+ if (vfio_pci_nointx(pdev)) {
+ dev_info(&pdev->dev, "Masking broken INTx support\n");
+ vdev->nointx = true;
+ pci_intx(pdev, 0);
+ } else
+ vdev->pci_2_3 = pci_intx_mask_supported(pdev);
}
- if (likely(!nointxmask))
- vdev->pci_2_3 = pci_intx_mask_supported(pdev);
-
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
+ ret = vfio_config_init(vdev);
+ if (ret) {
+ kfree(vdev->pci_saved_state);
+ vdev->pci_saved_state = NULL;
+ pci_disable_device(pdev);
+ return ret;
+ }
+
msix_pos = pdev->msix_cap;
if (msix_pos) {
u16 flags;
@@ -304,7 +339,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
u8 pin;
pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
- if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && pin)
+ if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
return 1;
} else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 142c533ef..688691d90 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -408,6 +408,7 @@ static void vfio_bar_restore(struct vfio_pci_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
u32 *rbar = vdev->rbar;
+ u16 cmd;
int i;
if (pdev->is_virtfn)
@@ -420,6 +421,12 @@ static void vfio_bar_restore(struct vfio_pci_device *vdev)
pci_user_write_config_dword(pdev, i, *rbar);
pci_user_write_config_dword(pdev, PCI_ROM_ADDRESS, *rbar);
+
+ if (vdev->nointx) {
+ pci_user_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_INTX_DISABLE;
+ pci_user_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
}
static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
@@ -515,6 +522,23 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
return count;
}
+/* Test whether BARs match the value we think they should contain */
+static bool vfio_need_bar_restore(struct vfio_pci_device *vdev)
+{
+ int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
+ u32 bar;
+
+ for (; pos <= PCI_BASE_ADDRESS_5; i++, pos += 4) {
+ if (vdev->rbar[i]) {
+ ret = pci_user_read_config_dword(vdev->pdev, pos, &bar);
+ if (ret || vdev->rbar[i] != bar)
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
int count, struct perm_bits *perm,
int offset, __le32 val)
@@ -553,7 +577,8 @@ static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
* SR-IOV devices will trigger this, but we catch them later
*/
if ((new_mem && virt_mem && !phys_mem) ||
- (new_io && virt_io && !phys_io))
+ (new_io && virt_io && !phys_io) ||
+ vfio_need_bar_restore(vdev))
vfio_bar_restore(vdev);
}
@@ -724,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4)
return count;
} else {
- if (pci_read_vpd(pdev, addr, 4, &data) != 4)
+ data = 0;
+ if (pci_read_vpd(pdev, addr, 4, &data) < 0)
return count;
*pdata = cpu_to_le32(data);
}
@@ -1124,9 +1150,12 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
return pcibios_err_to_errno(ret);
if (PCI_X_CMD_VERSION(word)) {
- /* Test for extended capabilities */
- pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
- vdev->extended_caps = (dword != 0);
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
+ /* Test for extended capabilities */
+ pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE,
+ &dword);
+ vdev->extended_caps = (dword != 0);
+ }
return PCI_CAP_PCIX_SIZEOF_V2;
} else
return PCI_CAP_PCIX_SIZEOF_V0;
@@ -1138,9 +1167,11 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
return byte;
case PCI_CAP_ID_EXP:
- /* Test for extended capabilities */
- pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
- vdev->extended_caps = (dword != 0);
+ if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) {
+ /* Test for extended capabilities */
+ pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &dword);
+ vdev->extended_caps = (dword != 0);
+ }
/* length based on version */
if ((pcie_caps_reg(pdev) & PCI_EXP_FLAGS_VERS) == 1)
@@ -1545,7 +1576,7 @@ int vfio_config_init(struct vfio_pci_device *vdev)
*(__le16 *)&vconfig[PCI_DEVICE_ID] = cpu_to_le16(pdev->device);
}
- if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX))
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
vconfig[PCI_INTERRUPT_PIN] = 0;
ret = vfio_cap_init(vdev);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index e9ea3fef1..15ecfc9c5 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
static void vfio_intx_disable(struct vfio_pci_device *vdev)
{
- vfio_intx_set_signal(vdev, -1);
vfio_virqfd_disable(&vdev->ctx[0].unmask);
vfio_virqfd_disable(&vdev->ctx[0].mask);
+ vfio_intx_set_signal(vdev, -1);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
vdev->num_ctx = 0;
kfree(vdev->ctx);
@@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
struct pci_dev *pdev = vdev->pdev;
int i;
- vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
-
for (i = 0; i < vdev->num_ctx; i++) {
vfio_virqfd_disable(&vdev->ctx[i].unmask);
vfio_virqfd_disable(&vdev->ctx[i].mask);
}
+ vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
+
if (msix) {
pci_disable_msix(vdev->pdev);
kfree(vdev->msix);
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 8a7d546d1..016c14a1b 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -83,6 +83,7 @@ struct vfio_pci_device {
bool bardirty;
bool has_vga;
bool needs_reset;
+ bool nointx;
struct pci_saved_state *pci_saved_state;
int refcnt;
struct eventfd_ctx *err_trigger;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 0582b72ef..80378ddad 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -331,14 +331,12 @@ static void tce_iommu_free_table(struct iommu_table *tbl);
static void tce_iommu_release(void *iommu_data)
{
struct tce_container *container = iommu_data;
- struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp;
long i;
while (tce_groups_attached(container)) {
tcegrp = list_first_entry(&container->group_list,
struct tce_iommu_group, next);
- table_group = iommu_group_get_iommudata(tcegrp->grp);
tce_iommu_detach_group(iommu_data, tcegrp->grp);
}
@@ -1188,7 +1186,8 @@ static int tce_iommu_attach_group(void *iommu_data,
goto unlock_exit;
}
table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
- if (table_group_tmp->ops != table_group->ops) {
+ if (table_group_tmp->ops->create_table !=
+ table_group->ops->create_table) {
pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
iommu_group_id(iommu_group),
iommu_group_id(tcegrp->grp));
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 75b24e93c..2ba19424e 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -407,7 +407,7 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
mutex_lock(&iommu->lock);
list_for_each_entry(domain, &iommu->domain_list, next)
- bitmap &= domain->domain->ops->pgsize_bitmap;
+ bitmap &= domain->domain->pgsize_bitmap;
mutex_unlock(&iommu->lock);
/*
@@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
unsigned long pfn, long npage, int prot)
{
long i;
- int ret;
+ int ret = 0;
for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
ret = iommu_map(domain->domain, iova,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0e6fd556c..9d6320e8f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -333,16 +333,6 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
-static int vhost_scsi_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void vhost_scsi_close_session(struct se_session *se_sess)
-{
- return;
-}
-
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
{
return 0;
@@ -2114,8 +2104,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
.release_cmd = vhost_scsi_release_cmd,
.check_stop_free = vhost_scsi_check_stop_free,
- .shutdown_session = vhost_scsi_shutdown_session,
- .close_session = vhost_scsi_close_session,
.sess_get_index = vhost_scsi_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = vhost_scsi_write_pending,
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e0606c01e..3c20af999 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -8,10 +8,6 @@ menu "Graphics support"
config HAVE_FB_ATMEL
bool
-config SH_MIPI_DSI
- tristate
- depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
-
config SH_LCD_MIPI_DSI
bool
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bddc8b17a..288318ad2 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -164,18 +164,10 @@ static ssize_t brightness_show(struct device *dev,
return sprintf(buf, "%d\n", bd->props.brightness);
}
-static ssize_t brightness_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+int backlight_device_set_brightness(struct backlight_device *bd,
+ unsigned long brightness)
{
- int rc;
- struct backlight_device *bd = to_backlight_device(dev);
- unsigned long brightness;
-
- rc = kstrtoul(buf, 0, &brightness);
- if (rc)
- return rc;
-
- rc = -ENXIO;
+ int rc = -ENXIO;
mutex_lock(&bd->ops_lock);
if (bd->ops) {
@@ -185,7 +177,7 @@ static ssize_t brightness_store(struct device *dev,
pr_debug("set brightness to %lu\n", brightness);
bd->props.brightness = brightness;
backlight_update_status(bd);
- rc = count;
+ rc = 0;
}
}
mutex_unlock(&bd->ops_lock);
@@ -194,6 +186,23 @@ static ssize_t brightness_store(struct device *dev,
return rc;
}
+EXPORT_SYMBOL(backlight_device_set_brightness);
+
+static ssize_t brightness_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int rc;
+ struct backlight_device *bd = to_backlight_device(dev);
+ unsigned long brightness;
+
+ rc = kstrtoul(buf, 0, &brightness);
+ if (rc)
+ return rc;
+
+ rc = backlight_device_set_brightness(bd, brightness);
+
+ return rc ? rc : count;
+}
static DEVICE_ATTR_RW(brightness);
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -380,7 +389,7 @@ struct backlight_device *backlight_device_register(const char *name,
}
EXPORT_SYMBOL(backlight_device_register);
-bool backlight_device_registered(enum backlight_type type)
+struct backlight_device *backlight_device_get_by_type(enum backlight_type type)
{
bool found = false;
struct backlight_device *bd;
@@ -394,9 +403,9 @@ bool backlight_device_registered(enum backlight_type type)
}
mutex_unlock(&backlight_dev_list_mutex);
- return found;
+ return found ? bd : NULL;
}
-EXPORT_SYMBOL(backlight_device_registered);
+EXPORT_SYMBOL(backlight_device_get_by_type);
/**
* backlight_device_unregister - unregisters a backlight device object.
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 35fe4825a..60d6c2ac8 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -162,7 +162,7 @@ static int lm3630a_intr_config(struct lm3630a_chip *pchip)
static void lm3630a_pwm_ctrl(struct lm3630a_chip *pchip, int br, int br_max)
{
- unsigned int period = pwm_get_period(pchip->pwmd);
+ unsigned int period = pchip->pdata->pwm_period;
unsigned int duty = br * period / br_max;
pwm_config(pchip->pwmd, duty, period);
@@ -424,8 +424,13 @@ static int lm3630a_probe(struct i2c_client *client,
dev_err(&client->dev, "fail : get pwm device\n");
return PTR_ERR(pchip->pwmd);
}
+
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(pchip->pwmd);
}
- pchip->pwmd->period = pdata->pwm_period;
/* interrupt enable : irq 0 is not allowed */
pchip->irq = client->irq;
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index daca9e6a2..e5b14f526 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -246,6 +246,12 @@ static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
return;
lp->pwm = pwm;
+
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(pwm);
}
pwm_config(lp->pwm, duty, period);
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index 5d583d7a5..cf869ec90 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -145,6 +145,12 @@ static void lp8788_pwm_ctrl(struct lp8788_bl *bl, int br, int max_br)
}
bl->pwm = pwm;
+
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(pwm);
}
pwm_config(bl->pwm, duty, period);
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 64f9e1b86..b2b366bb0 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -201,6 +201,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct pwm_bl_data *pb;
int initial_blank = FB_BLANK_UNBLANK;
+ struct pwm_args pargs;
int ret;
if (!data) {
@@ -307,16 +308,21 @@ static int pwm_backlight_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "got pwm for backlight\n");
/*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(pb->pwm);
+
+ /*
* The DT case will set the pwm_period_ns field to 0 and store the
* period, parsed from the DT, in the PWM device. For the non-DT case,
* set the period from platform data if it has not already been set
* via the PWM lookup table.
*/
- pb->period = pwm_get_period(pb->pwm);
- if (!pb->period && (data->pwm_period_ns > 0)) {
+ pwm_get_args(pb->pwm, &pargs);
+ pb->period = pargs.period;
+ if (!pb->period && (data->pwm_period_ns > 0))
pb->period = data->pwm_period_ns;
- pwm_set_period(pb->pwm, data->pwm_period_ns);
- }
pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 6e92917ba..afd3301ac 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -170,7 +170,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
-static int fbcon_set_palette(struct vc_data *vc, unsigned char *table);
+static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
static int fbcon_scrolldelta(struct vc_data *vc, int lines);
/*
@@ -2652,7 +2652,7 @@ static struct fb_cmap palette_cmap = {
0, 16, palette_red, palette_green, palette_blue, NULL
};
-static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
int i, j, k, depth;
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index 296e94561..8edc06253 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -481,7 +481,7 @@ static int mdacon_switch(struct vc_data *c)
return 1; /* redrawing needed */
}
-static int mdacon_set_palette(struct vc_data *c, unsigned char *table)
+static int mdacon_set_palette(struct vc_data *c, const unsigned char *table)
{
return -EINVAL;
}
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index bb4e96255..0553dfe68 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -574,7 +574,7 @@ static int newport_font_set(struct vc_data *vc, struct console_font *font, unsig
return newport_set_font(vc->vc_num, font);
}
-static int newport_set_palette(struct vc_data *vc, unsigned char *table)
+static int newport_set_palette(struct vc_data *vc, const unsigned char *table)
{
return -EINVAL;
}
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 026fd1215..e440c2d9f 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -79,7 +79,7 @@ static const char *sticon_startup(void)
return "STI console";
}
-static int sticon_set_palette(struct vc_data *c, unsigned char *table)
+static int sticon_set_palette(struct vc_data *c, const unsigned char *table)
{
return -EINVAL;
}
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 517f565b6..8bf911002 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -80,7 +80,6 @@ static void vgacon_deinit(struct vc_data *c);
static void vgacon_cursor(struct vc_data *c, int mode);
static int vgacon_switch(struct vc_data *c);
static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
-static int vgacon_set_palette(struct vc_data *vc, unsigned char *table);
static int vgacon_scrolldelta(struct vc_data *c, int lines);
static int vgacon_set_origin(struct vc_data *c);
static void vgacon_save_screen(struct vc_data *c);
@@ -847,7 +846,7 @@ static int vgacon_switch(struct vc_data *c)
return 0; /* Redrawing not needed */
}
-static void vga_set_palette(struct vc_data *vc, unsigned char *table)
+static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
{
int i, j;
@@ -860,7 +859,7 @@ static void vga_set_palette(struct vc_data *vc, unsigned char *table)
}
}
-static int vgacon_set_palette(struct vc_data *vc, unsigned char *table)
+static int vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
{
#ifdef CAN_LOAD_PALETTE
if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 983280e8d..88b008fb8 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -761,7 +761,7 @@ config FB_VESA
config FB_EFI
bool "EFI-based Framebuffer Support"
- depends on (FB = y) && X86 && EFI
+ depends on (FB = y) && !IA64 && EFI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1993,7 +1993,6 @@ config FB_SH_MOBILE_LCDC
select FB_SYS_FOPS
select FB_DEFERRED_IO
select FB_BACKLIGHT
- select SH_MIPI_DSI if SH_LCD_MIPI_DSI
---help---
Frame buffer driver for the on-chip SH-Mobile LCD controller.
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 65fb15075..f6731867d 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -117,7 +117,6 @@ obj-$(CONFIG_FB_SM501) += sm501fb.o
obj-$(CONFIG_FB_UDL) += udlfb.o
obj-$(CONFIG_FB_SMSCUFX) += smscufx.o
obj-$(CONFIG_FB_XILINX) += xilinxfb.o
-obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o
obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
obj-$(CONFIG_FB_OMAP) += omap/
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 93e66a914..9b158869c 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -34,8 +34,6 @@
#include <video/of_display_timing.h>
#include <video/videomode.h>
-#include <asm/sizes.h>
-
#define to_clcd(info) container_of(info, struct clcd_fb, fb)
/* This is limited to 16 characters when displayed by X startup */
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 57721c731..74b5bcac8 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -164,7 +164,7 @@ static const struct address_space_operations fb_deferred_io_aops = {
.set_page_dirty = fb_deferred_io_set_page_dirty,
};
-static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
+int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
@@ -173,6 +173,7 @@ static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma->vm_private_data = info;
return 0;
}
+EXPORT_SYMBOL(fb_deferred_io_mmap);
/* workqueue callback */
static void fb_deferred_io_work(struct work_struct *work)
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 4e73b6f6b..76c1ad96f 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1854,17 +1854,31 @@ EXPORT_SYMBOL(fb_set_suspend);
static int __init
fbmem_init(void)
{
- proc_create("fb", 0, NULL, &fb_proc_fops);
+ int ret;
+
+ if (!proc_create("fb", 0, NULL, &fb_proc_fops))
+ return -ENOMEM;
- if (register_chrdev(FB_MAJOR,"fb",&fb_fops))
+ ret = register_chrdev(FB_MAJOR, "fb", &fb_fops);
+ if (ret) {
printk("unable to get major %d for fb devs\n", FB_MAJOR);
+ goto err_chrdev;
+ }
fb_class = class_create(THIS_MODULE, "graphics");
if (IS_ERR(fb_class)) {
- printk(KERN_WARNING "Unable to create fb class; errno = %ld\n", PTR_ERR(fb_class));
+ ret = PTR_ERR(fb_class);
+ pr_warn("Unable to create fb class; errno = %d\n", ret);
fb_class = NULL;
+ goto err_class;
}
return 0;
+
+err_class:
+ unregister_chrdev(FB_MAJOR, "fb");
+err_chrdev:
+ remove_proc_entry("fb", NULL);
+ return ret;
}
#ifdef MODULE
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index d8d583d32..c229b1a0d 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -713,7 +713,7 @@ static int da8xx_fb_config_clk_divider(struct da8xx_fb_par *par,
if (par->lcdc_clk_rate != lcdc_clk_rate) {
ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(par->dev,
"unable to set clock rate at %u\n",
lcdc_clk_rate);
@@ -784,7 +784,7 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
int ret = 0;
ret = da8xx_fb_calc_config_clk_divider(par, panel);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(par->dev, "unable to configure clock\n");
return ret;
}
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 95d293b74..924bad45c 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -6,16 +6,14 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/efi.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
-#include <linux/dmi.h>
-#include <linux/pci.h>
#include <video/vga.h>
-#include <asm/sysfb.h>
+#include <asm/efi.h>
static bool request_mem_succeeded = false;
@@ -85,21 +83,13 @@ static struct fb_ops efifb_ops = {
static int efifb_setup(char *options)
{
char *this_opt;
- int i;
if (options && *options) {
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
- for (i = 0; i < M_UNKNOWN; i++) {
- if (efifb_dmi_list[i].base != 0 &&
- !strcmp(this_opt, efifb_dmi_list[i].optname)) {
- screen_info.lfb_base = efifb_dmi_list[i].base;
- screen_info.lfb_linelength = efifb_dmi_list[i].stride;
- screen_info.lfb_width = efifb_dmi_list[i].width;
- screen_info.lfb_height = efifb_dmi_list[i].height;
- }
- }
+ efifb_setup_from_dmi(&screen_info, this_opt);
+
if (!strncmp(this_opt, "base:", 5))
screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
else if (!strncmp(this_opt, "stride:", 7))
@@ -247,10 +237,8 @@ static int efifb_probe(struct platform_device *dev)
goto err_release_fb;
}
- printk(KERN_INFO "efifb: framebuffer at 0x%lx, mapped to 0x%p, "
- "using %dk, total %dk\n",
- efifb_fix.smem_start, info->screen_base,
- size_remap/1024, size_total/1024);
+ printk(KERN_INFO "efifb: framebuffer at 0x%lx, using %dk, total %dk\n",
+ efifb_fix.smem_start, size_remap/1024, size_total/1024);
printk(KERN_INFO "efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
efifb_defined.xres, efifb_defined.yres,
efifb_defined.bits_per_pixel, efifb_fix.line_length,
@@ -338,5 +326,4 @@ static struct platform_driver efifb_driver = {
.remove = efifb_remove,
};
-module_platform_driver(efifb_driver);
-MODULE_LICENSE("GPL");
+builtin_platform_driver(efifb_driver);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index e2451bdb4..2fd49b235 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -743,7 +743,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
err3:
iounmap(fb_virt);
err2:
- release_mem_region(par->mem->start, screen_fb_size);
+ vmbus_free_mmio(par->mem->start, screen_fb_size);
par->mem = NULL;
err1:
if (!gen2vm)
@@ -758,7 +758,7 @@ static void hvfb_putmem(struct fb_info *info)
struct hvfb_par *par = info->par;
iounmap(info->screen_base);
- release_mem_region(par->mem->start, screen_fb_size);
+ vmbus_free_mmio(par->mem->start, screen_fb_size);
par->mem = NULL;
}
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 76b6a7784..fe0c4eeff 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -473,11 +473,12 @@ static int imxfb_set_par(struct fb_info *info)
return 0;
}
-static void imxfb_enable_controller(struct imxfb_info *fbi)
+static int imxfb_enable_controller(struct imxfb_info *fbi)
{
+ int ret;
if (fbi->enabled)
- return;
+ return 0;
pr_debug("Enabling LCD controller\n");
@@ -496,10 +497,29 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
*/
writel(RMCR_LCDC_EN_MX1, fbi->regs + LCDC_RMCR);
- clk_prepare_enable(fbi->clk_ipg);
- clk_prepare_enable(fbi->clk_ahb);
- clk_prepare_enable(fbi->clk_per);
+ ret = clk_prepare_enable(fbi->clk_ipg);
+ if (ret)
+ goto err_enable_ipg;
+
+ ret = clk_prepare_enable(fbi->clk_ahb);
+ if (ret)
+ goto err_enable_ahb;
+
+ ret = clk_prepare_enable(fbi->clk_per);
+ if (ret)
+ goto err_enable_per;
+
fbi->enabled = true;
+ return 0;
+
+err_enable_per:
+ clk_disable_unprepare(fbi->clk_ahb);
+err_enable_ahb:
+ clk_disable_unprepare(fbi->clk_ipg);
+err_enable_ipg:
+ writel(0, fbi->regs + LCDC_RMCR);
+
+ return ret;
}
static void imxfb_disable_controller(struct imxfb_info *fbi)
@@ -510,8 +530,8 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
pr_debug("Disabling LCD controller\n");
clk_disable_unprepare(fbi->clk_per);
- clk_disable_unprepare(fbi->clk_ipg);
clk_disable_unprepare(fbi->clk_ahb);
+ clk_disable_unprepare(fbi->clk_ipg);
fbi->enabled = false;
writel(0, fbi->regs + LCDC_RMCR);
@@ -532,8 +552,7 @@ static int imxfb_blank(int blank, struct fb_info *info)
break;
case FB_BLANK_UNBLANK:
- imxfb_enable_controller(fbi);
- break;
+ return imxfb_enable_controller(fbi);
}
return 0;
}
@@ -758,10 +777,11 @@ static int imxfb_lcd_get_power(struct lcd_device *lcddev)
{
struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
- if (!IS_ERR(fbi->lcd_pwr))
- return regulator_is_enabled(fbi->lcd_pwr);
+ if (!IS_ERR(fbi->lcd_pwr) &&
+ !regulator_is_enabled(fbi->lcd_pwr))
+ return FB_BLANK_POWERDOWN;
- return 1;
+ return FB_BLANK_UNBLANK;
}
static int imxfb_lcd_set_power(struct lcd_device *lcddev, int power)
@@ -769,7 +789,7 @@ static int imxfb_lcd_set_power(struct lcd_device *lcddev, int power)
struct imxfb_info *fbi = dev_get_drvdata(&lcddev->dev);
if (!IS_ERR(fbi->lcd_pwr)) {
- if (power)
+ if (power == FB_BLANK_UNBLANK)
return regulator_enable(fbi->lcd_pwr);
else
return regulator_disable(fbi->lcd_pwr);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index 0eec073b3..d63e59807 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -1180,13 +1180,11 @@ static int dsi_regulator_init(struct platform_device *dsidev)
return PTR_ERR(vdds_dsi);
}
- if (regulator_can_change_voltage(vdds_dsi)) {
- r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
- if (r) {
- devm_regulator_put(vdds_dsi);
- DSSERR("can't set the DSI regulator voltage\n");
- return r;
- }
+ r = regulator_set_voltage(vdds_dsi, 1800000, 1800000);
+ if (r) {
+ devm_regulator_put(vdds_dsi);
+ DSSERR("can't set the DSI regulator voltage\n");
+ return r;
}
dsi->vdds_dsi_reg = vdds_dsi;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 7103c659a..2e71aec83 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -114,13 +114,11 @@ static int hdmi_init_regulator(void)
return PTR_ERR(reg);
}
- if (regulator_can_change_voltage(reg)) {
- r = regulator_set_voltage(reg, 1800000, 1800000);
- if (r) {
- devm_regulator_put(reg);
- DSSWARN("can't set the regulator voltage\n");
- return r;
- }
+ r = regulator_set_voltage(reg, 1800000, 1800000);
+ if (r) {
+ devm_regulator_put(reg);
+ DSSWARN("can't set the regulator voltage\n");
+ return r;
}
hdmi.vdda_reg = reg;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index a955a2c4c..aade6d996 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -131,13 +131,11 @@ static int hdmi_init_regulator(void)
return PTR_ERR(reg);
}
- if (regulator_can_change_voltage(reg)) {
- r = regulator_set_voltage(reg, 1800000, 1800000);
- if (r) {
- devm_regulator_put(reg);
- DSSWARN("can't set the regulator voltage\n");
- return r;
- }
+ r = regulator_set_voltage(reg, 1800000, 1800000);
+ if (r) {
+ devm_regulator_put(reg);
+ DSSWARN("can't set the regulator voltage\n");
+ return r;
}
hdmi.vdda_reg = reg;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index 8ea531d26..bbfe7e2d4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned ss_scl_high = 4000; /* ns */
- const unsigned ss_scl_low = 4700; /* ns */
+ const unsigned ss_scl_high = 4600; /* ns */
+ const unsigned ss_scl_low = 5400; /* ns */
const unsigned fs_scl_high = 600; /* ns */
const unsigned fs_scl_low = 1300; /* ns */
const unsigned sda_hold = 1000; /* ns */
@@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
c = (ptr[1] >> 6) & 0x3;
m = (ptr[1] >> 4) & 0x3;
- r = (ptr[1] >> 0) & 0x3;
+ r = (ptr[1] >> 0) & 0xf;
itc = (ptr[2] >> 7) & 0x1;
ec = (ptr[2] >> 4) & 0x7;
diff --git a/drivers/video/fbdev/sh_mipi_dsi.c b/drivers/video/fbdev/sh_mipi_dsi.c
deleted file mode 100644
index 8f6e8ff62..000000000
--- a/drivers/video/fbdev/sh_mipi_dsi.c
+++ /dev/null
@@ -1,587 +0,0 @@
-/*
- * Renesas SH-mobile MIPI DSI support
- *
- * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- */
-
-#include <linux/bitmap.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/module.h>
-
-#include <video/mipi_display.h>
-#include <video/sh_mipi_dsi.h>
-#include <video/sh_mobile_lcdc.h>
-
-#include "sh_mobile_lcdcfb.h"
-
-#define SYSCTRL 0x0000
-#define SYSCONF 0x0004
-#define TIMSET 0x0008
-#define RESREQSET0 0x0018
-#define RESREQSET1 0x001c
-#define HSTTOVSET 0x0020
-#define LPRTOVSET 0x0024
-#define TATOVSET 0x0028
-#define PRTOVSET 0x002c
-#define DSICTRL 0x0030
-#define DSIINTE 0x0060
-#define PHYCTRL 0x0070
-
-/* relative to linkbase */
-#define DTCTR 0x0000
-#define VMCTR1 0x0020
-#define VMCTR2 0x0024
-#define VMLEN1 0x0028
-#define VMLEN2 0x002c
-#define CMTSRTREQ 0x0070
-#define CMTSRTCTR 0x00d0
-
-/* E.g., sh7372 has 2 MIPI-DSIs - one for each LCDC */
-#define MAX_SH_MIPI_DSI 2
-
-struct sh_mipi {
- struct sh_mobile_lcdc_entity entity;
-
- void __iomem *base;
- void __iomem *linkbase;
- struct clk *dsit_clk;
- struct platform_device *pdev;
-};
-
-#define to_sh_mipi(e) container_of(e, struct sh_mipi, entity)
-
-static struct sh_mipi *mipi_dsi[MAX_SH_MIPI_DSI];
-
-/* Protect the above array */
-static DEFINE_MUTEX(array_lock);
-
-static struct sh_mipi *sh_mipi_by_handle(int handle)
-{
- if (handle >= ARRAY_SIZE(mipi_dsi) || handle < 0)
- return NULL;
-
- return mipi_dsi[handle];
-}
-
-static int sh_mipi_send_short(struct sh_mipi *mipi, u8 dsi_cmd,
- u8 cmd, u8 param)
-{
- u32 data = (dsi_cmd << 24) | (cmd << 16) | (param << 8);
- int cnt = 100;
-
- /* transmit a short packet to LCD panel */
- iowrite32(1 | data, mipi->linkbase + CMTSRTCTR);
- iowrite32(1, mipi->linkbase + CMTSRTREQ);
-
- while ((ioread32(mipi->linkbase + CMTSRTREQ) & 1) && --cnt)
- udelay(1);
-
- return cnt ? 0 : -ETIMEDOUT;
-}
-
-#define LCD_CHAN2MIPI(c) ((c) < LCDC_CHAN_MAINLCD || (c) > LCDC_CHAN_SUBLCD ? \
- -EINVAL : (c) - 1)
-
-static int sh_mipi_dcs(int handle, u8 cmd)
-{
- struct sh_mipi *mipi = sh_mipi_by_handle(LCD_CHAN2MIPI(handle));
- if (!mipi)
- return -ENODEV;
- return sh_mipi_send_short(mipi, MIPI_DSI_DCS_SHORT_WRITE, cmd, 0);
-}
-
-static int sh_mipi_dcs_param(int handle, u8 cmd, u8 param)
-{
- struct sh_mipi *mipi = sh_mipi_by_handle(LCD_CHAN2MIPI(handle));
- if (!mipi)
- return -ENODEV;
- return sh_mipi_send_short(mipi, MIPI_DSI_DCS_SHORT_WRITE_PARAM, cmd,
- param);
-}
-
-static void sh_mipi_dsi_enable(struct sh_mipi *mipi, bool enable)
-{
- /*
- * enable LCDC data tx, transition to LPS after completion of each HS
- * packet
- */
- iowrite32(0x00000002 | enable, mipi->linkbase + DTCTR);
-}
-
-static void sh_mipi_shutdown(struct platform_device *pdev)
-{
- struct sh_mipi *mipi = to_sh_mipi(platform_get_drvdata(pdev));
-
- sh_mipi_dsi_enable(mipi, false);
-}
-
-static int sh_mipi_setup(struct sh_mipi *mipi, const struct fb_videomode *mode)
-{
- void __iomem *base = mipi->base;
- struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
- u32 pctype, datatype, pixfmt, linelength, vmctr2;
- u32 tmp, top, bottom, delay, div;
- int bpp;
-
- /*
- * Select data format. MIPI DSI is not hot-pluggable, so, we just use
- * the default videomode. If this ever becomes a problem, We'll have to
- * move this to mipi_display_on() above and use info->var.xres
- */
- switch (pdata->data_format) {
- case MIPI_RGB888:
- pctype = 0;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
- pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = mode->xres * 3;
- break;
- case MIPI_RGB565:
- pctype = 1;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
- pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = mode->xres * 2;
- break;
- case MIPI_RGB666_LP:
- pctype = 2;
- datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
- pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = mode->xres * 3;
- break;
- case MIPI_RGB666:
- pctype = 3;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
- pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
- linelength = (mode->xres * 18 + 7) / 8;
- break;
- case MIPI_BGR888:
- pctype = 8;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24;
- pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = mode->xres * 3;
- break;
- case MIPI_BGR565:
- pctype = 9;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16;
- pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = mode->xres * 2;
- break;
- case MIPI_BGR666_LP:
- pctype = 0xa;
- datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
- pixfmt = MIPI_DCS_PIXEL_FMT_24BIT;
- linelength = mode->xres * 3;
- break;
- case MIPI_BGR666:
- pctype = 0xb;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18;
- pixfmt = MIPI_DCS_PIXEL_FMT_18BIT;
- linelength = (mode->xres * 18 + 7) / 8;
- break;
- case MIPI_YUYV:
- pctype = 4;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
- pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = mode->xres * 2;
- break;
- case MIPI_UYVY:
- pctype = 5;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16;
- pixfmt = MIPI_DCS_PIXEL_FMT_16BIT;
- linelength = mode->xres * 2;
- break;
- case MIPI_YUV420_L:
- pctype = 6;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
- pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
- linelength = (mode->xres * 12 + 7) / 8;
- break;
- case MIPI_YUV420:
- pctype = 7;
- datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12;
- pixfmt = MIPI_DCS_PIXEL_FMT_12BIT;
- /* Length of U/V line */
- linelength = (mode->xres + 1) / 2;
- break;
- default:
- return -EINVAL;
- }
-
- if (!pdata->lane)
- return -EINVAL;
-
- /* reset DSI link */
- iowrite32(0x00000001, base + SYSCTRL);
- /* Hold reset for 100 cycles of the slowest of bus, HS byte and LP clock */
- udelay(50);
- iowrite32(0x00000000, base + SYSCTRL);
-
- /* setup DSI link */
-
- /*
- * T_wakeup = 0x7000
- * T_hs-trail = 3
- * T_hs-prepare = 3
- * T_clk-trail = 3
- * T_clk-prepare = 2
- */
- iowrite32(0x70003332, base + TIMSET);
- /* no responses requested */
- iowrite32(0x00000000, base + RESREQSET0);
- /* request response to packets of type 0x28 */
- iowrite32(0x00000100, base + RESREQSET1);
- /* High-speed transmission timeout, default 0xffffffff */
- iowrite32(0x0fffffff, base + HSTTOVSET);
- /* LP reception timeout, default 0xffffffff */
- iowrite32(0x0fffffff, base + LPRTOVSET);
- /* Turn-around timeout, default 0xffffffff */
- iowrite32(0x0fffffff, base + TATOVSET);
- /* Peripheral reset timeout, default 0xffffffff */
- iowrite32(0x0fffffff, base + PRTOVSET);
- /* Interrupts not used, disable all */
- iowrite32(0, base + DSIINTE);
- /* DSI-Tx bias on */
- iowrite32(0x00000001, base + PHYCTRL);
- udelay(200);
- /* Deassert resets, power on */
- iowrite32(0x03070001 | pdata->phyctrl, base + PHYCTRL);
-
- /*
- * Default = ULPS enable |
- * Contention detection enabled |
- * EoT packet transmission enable |
- * CRC check enable |
- * ECC check enable
- */
- bitmap_fill((unsigned long *)&tmp, pdata->lane);
- tmp |= 0x00003700;
- iowrite32(tmp, base + SYSCONF);
-
- /* setup l-bridge */
-
- /*
- * Enable transmission of all packets,
- * transmit LPS after each HS packet completion
- */
- iowrite32(0x00000006, mipi->linkbase + DTCTR);
- /* VSYNC width = 2 (<< 17) */
- iowrite32((mode->vsync_len << pdata->vsynw_offset) |
- (pdata->clksrc << 16) | (pctype << 12) | datatype,
- mipi->linkbase + VMCTR1);
-
- /*
- * Non-burst mode with sync pulses: VSE and HSE are output,
- * HSA period allowed, no commands in LP
- */
- vmctr2 = 0;
- if (pdata->flags & SH_MIPI_DSI_VSEE)
- vmctr2 |= 1 << 23;
- if (pdata->flags & SH_MIPI_DSI_HSEE)
- vmctr2 |= 1 << 22;
- if (pdata->flags & SH_MIPI_DSI_HSAE)
- vmctr2 |= 1 << 21;
- if (pdata->flags & SH_MIPI_DSI_BL2E)
- vmctr2 |= 1 << 17;
- if (pdata->flags & SH_MIPI_DSI_HSABM)
- vmctr2 |= 1 << 5;
- if (pdata->flags & SH_MIPI_DSI_HBPBM)
- vmctr2 |= 1 << 4;
- if (pdata->flags & SH_MIPI_DSI_HFPBM)
- vmctr2 |= 1 << 3;
- iowrite32(vmctr2, mipi->linkbase + VMCTR2);
-
- /*
- * VMLEN1 = RGBLEN | HSALEN
- *
- * see
- * Video mode - Blanking Packet setting
- */
- top = linelength << 16; /* RGBLEN */
- bottom = 0x00000001;
- if (pdata->flags & SH_MIPI_DSI_HSABM) /* HSALEN */
- bottom = (pdata->lane * mode->hsync_len) - 10;
- iowrite32(top | bottom , mipi->linkbase + VMLEN1);
-
- /*
- * VMLEN2 = HBPLEN | HFPLEN
- *
- * see
- * Video mode - Blanking Packet setting
- */
- top = 0x00010000;
- bottom = 0x00000001;
- delay = 0;
-
- div = 1; /* HSbyteCLK is calculation base
- * HS4divCLK = HSbyteCLK/2
- * HS6divCLK is not supported for now */
- if (pdata->flags & SH_MIPI_DSI_HS4divCLK)
- div = 2;
-
- if (pdata->flags & SH_MIPI_DSI_HFPBM) { /* HBPLEN */
- top = mode->hsync_len + mode->left_margin;
- top = ((pdata->lane * top / div) - 10) << 16;
- }
- if (pdata->flags & SH_MIPI_DSI_HBPBM) { /* HFPLEN */
- bottom = mode->right_margin;
- bottom = (pdata->lane * bottom / div) - 12;
- }
-
- bpp = linelength / mode->xres; /* byte / pixel */
- if ((pdata->lane / div) > bpp) {
- tmp = mode->xres / bpp; /* output cycle */
- tmp = mode->xres - tmp; /* (input - output) cycle */
- delay = (pdata->lane * tmp);
- }
-
- iowrite32(top | (bottom + delay) , mipi->linkbase + VMLEN2);
-
- msleep(5);
-
- /* setup LCD panel */
-
- /* cf. drivers/video/omap/lcd_mipid.c */
- sh_mipi_dcs(pdata->channel, MIPI_DCS_EXIT_SLEEP_MODE);
- msleep(120);
- /*
- * [7] - Page Address Mode
- * [6] - Column Address Mode
- * [5] - Page / Column Address Mode
- * [4] - Display Device Line Refresh Order
- * [3] - RGB/BGR Order
- * [2] - Display Data Latch Data Order
- * [1] - Flip Horizontal
- * [0] - Flip Vertical
- */
- sh_mipi_dcs_param(pdata->channel, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- /* cf. set_data_lines() */
- sh_mipi_dcs_param(pdata->channel, MIPI_DCS_SET_PIXEL_FORMAT,
- pixfmt << 4);
- sh_mipi_dcs(pdata->channel, MIPI_DCS_SET_DISPLAY_ON);
-
- /* Enable timeout counters */
- iowrite32(0x00000f00, base + DSICTRL);
-
- return 0;
-}
-
-static int mipi_display_on(struct sh_mobile_lcdc_entity *entity)
-{
- struct sh_mipi *mipi = to_sh_mipi(entity);
- struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
- int ret;
-
- pm_runtime_get_sync(&mipi->pdev->dev);
-
- ret = pdata->set_dot_clock(mipi->pdev, mipi->base, 1);
- if (ret < 0)
- goto mipi_display_on_fail1;
-
- ret = sh_mipi_setup(mipi, &entity->def_mode);
- if (ret < 0)
- goto mipi_display_on_fail2;
-
- sh_mipi_dsi_enable(mipi, true);
-
- return SH_MOBILE_LCDC_DISPLAY_CONNECTED;
-
-mipi_display_on_fail1:
- pm_runtime_put_sync(&mipi->pdev->dev);
-mipi_display_on_fail2:
- pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
-
- return ret;
-}
-
-static void mipi_display_off(struct sh_mobile_lcdc_entity *entity)
-{
- struct sh_mipi *mipi = to_sh_mipi(entity);
- struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
-
- sh_mipi_dsi_enable(mipi, false);
-
- pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
-
- pm_runtime_put_sync(&mipi->pdev->dev);
-}
-
-static const struct sh_mobile_lcdc_entity_ops mipi_ops = {
- .display_on = mipi_display_on,
- .display_off = mipi_display_off,
-};
-
-static int __init sh_mipi_probe(struct platform_device *pdev)
-{
- struct sh_mipi *mipi;
- struct sh_mipi_dsi_info *pdata = pdev->dev.platform_data;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- unsigned long rate, f_current;
- int idx = pdev->id, ret;
-
- if (!res || !res2 || idx >= ARRAY_SIZE(mipi_dsi) || !pdata)
- return -ENODEV;
-
- if (!pdata->set_dot_clock)
- return -EINVAL;
-
- mutex_lock(&array_lock);
- if (idx < 0)
- for (idx = 0; idx < ARRAY_SIZE(mipi_dsi) && mipi_dsi[idx]; idx++)
- ;
-
- if (idx == ARRAY_SIZE(mipi_dsi)) {
- ret = -EBUSY;
- goto efindslot;
- }
-
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi) {
- ret = -ENOMEM;
- goto ealloc;
- }
-
- mipi->entity.owner = THIS_MODULE;
- mipi->entity.ops = &mipi_ops;
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "MIPI register region already claimed\n");
- ret = -EBUSY;
- goto ereqreg;
- }
-
- mipi->base = ioremap(res->start, resource_size(res));
- if (!mipi->base) {
- ret = -ENOMEM;
- goto emap;
- }
-
- if (!request_mem_region(res2->start, resource_size(res2), pdev->name)) {
- dev_err(&pdev->dev, "MIPI register region 2 already claimed\n");
- ret = -EBUSY;
- goto ereqreg2;
- }
-
- mipi->linkbase = ioremap(res2->start, resource_size(res2));
- if (!mipi->linkbase) {
- ret = -ENOMEM;
- goto emap2;
- }
-
- mipi->pdev = pdev;
-
- mipi->dsit_clk = clk_get(&pdev->dev, "dsit_clk");
- if (IS_ERR(mipi->dsit_clk)) {
- ret = PTR_ERR(mipi->dsit_clk);
- goto eclktget;
- }
-
- f_current = clk_get_rate(mipi->dsit_clk);
- /* 80MHz required by the datasheet */
- rate = clk_round_rate(mipi->dsit_clk, 80000000);
- if (rate > 0 && rate != f_current)
- ret = clk_set_rate(mipi->dsit_clk, rate);
- else
- ret = rate;
- if (ret < 0)
- goto esettrate;
-
- dev_dbg(&pdev->dev, "DSI-T clk %lu -> %lu\n", f_current, rate);
-
- ret = clk_enable(mipi->dsit_clk);
- if (ret < 0)
- goto eclkton;
-
- mipi_dsi[idx] = mipi;
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
-
- mutex_unlock(&array_lock);
- platform_set_drvdata(pdev, &mipi->entity);
-
- return 0;
-
-eclkton:
-esettrate:
- clk_put(mipi->dsit_clk);
-eclktget:
- iounmap(mipi->linkbase);
-emap2:
- release_mem_region(res2->start, resource_size(res2));
-ereqreg2:
- iounmap(mipi->base);
-emap:
- release_mem_region(res->start, resource_size(res));
-ereqreg:
- kfree(mipi);
-ealloc:
-efindslot:
- mutex_unlock(&array_lock);
-
- return ret;
-}
-
-static int sh_mipi_remove(struct platform_device *pdev)
-{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- struct sh_mipi *mipi = to_sh_mipi(platform_get_drvdata(pdev));
- int i, ret;
-
- mutex_lock(&array_lock);
-
- for (i = 0; i < ARRAY_SIZE(mipi_dsi) && mipi_dsi[i] != mipi; i++)
- ;
-
- if (i == ARRAY_SIZE(mipi_dsi)) {
- ret = -EINVAL;
- } else {
- ret = 0;
- mipi_dsi[i] = NULL;
- }
-
- mutex_unlock(&array_lock);
-
- if (ret < 0)
- return ret;
-
- pm_runtime_disable(&pdev->dev);
- clk_disable(mipi->dsit_clk);
- clk_put(mipi->dsit_clk);
-
- iounmap(mipi->linkbase);
- if (res2)
- release_mem_region(res2->start, resource_size(res2));
- iounmap(mipi->base);
- if (res)
- release_mem_region(res->start, resource_size(res));
- kfree(mipi);
-
- return 0;
-}
-
-static struct platform_driver sh_mipi_driver = {
- .remove = sh_mipi_remove,
- .shutdown = sh_mipi_shutdown,
- .driver = {
- .name = "sh-mipi-dsi",
- },
-};
-
-module_platform_driver_probe(sh_mipi_driver, sh_mipi_probe);
-
-MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
-MODULE_DESCRIPTION("SuperH / ARM-shmobile MIPI DSI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index fa3480815..a9c45c89b 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -286,6 +286,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
{
int ret;
u32 precharge, dclk, com_invdir, compins;
+ struct pwm_args pargs;
if (par->device_info->need_pwm) {
par->pwm = pwm_get(&par->client->dev, NULL);
@@ -294,7 +295,15 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
return PTR_ERR(par->pwm);
}
- par->pwm_period = pwm_get_period(par->pwm);
+ /*
+ * FIXME: pwm_apply_args() should be removed when switching to
+ * the atomic PWM API.
+ */
+ pwm_apply_args(par->pwm);
+
+ pwm_get_args(par->pwm, &pargs);
+
+ par->pwm_period = pargs.period;
/* Enable the PWM */
pwm_config(par->pwm, par->pwm_period / 2, par->pwm_period);
pwm_enable(par->pwm);
@@ -389,7 +398,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
return ret;
ret = ssd1307fb_write_cmd(par->client,
- (par->device_info->need_chargepump & 0x1 << 2) & 0x14);
+ BIT(4) | (par->device_info->need_chargepump ? BIT(2) : 0));
if (ret < 0)
return ret;
diff --git a/drivers/video/fbdev/via/accel.c b/drivers/video/fbdev/via/accel.c
index 4b67b8e60..eb3615c69 100644
--- a/drivers/video/fbdev/via/accel.c
+++ b/drivers/video/fbdev/via/accel.c
@@ -358,7 +358,7 @@ int viafb_setup_engine(struct fb_info *info)
viapar->shared->vq_vram_addr = viapar->fbmem_free;
viapar->fbmem_used += VQ_SIZE;
-#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA)
/*
* Set aside a chunk of framebuffer memory for the camera
* driver. Someday this driver probably needs a proper allocator
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index 6e274825f..1d28e1688 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -116,7 +116,7 @@ EXPORT_SYMBOL_GPL(viafb_irq_disable);
* most viafb systems will not need to have this extra code for a while.
* As soon as another user comes long, the ifdef can be removed.
*/
-#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA)
/*
* Access to the DMA engine. This currently provides what the camera
* driver needs (i.e. outgoing only) but is easily expandable if need
@@ -542,7 +542,7 @@ static struct viafb_subdev_info {
{
.name = "viafb-i2c",
},
-#if defined(CONFIG_VIDEO_VIA_CAMERA) || defined(CONFIG_VIDEO_VIA_CAMERA_MODULE)
+#if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA)
{
.name = "viafb-camera",
},
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 5fbeab388..9f2c834e4 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -204,10 +204,6 @@ static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
/* Need pdev */
pdev = to_pci_dev(ca91cx42_bridge->parent);
- INIT_LIST_HEAD(&ca91cx42_bridge->vme_error_handlers);
-
- mutex_init(&ca91cx42_bridge->irq_mtx);
-
/* Disable interrupts from PCI to VME */
iowrite32(0, bridge->base + VINT_EN);
@@ -1626,6 +1622,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
retval = -ENOMEM;
goto err_struct;
}
+ vme_init_bridge(ca91cx42_bridge);
ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
@@ -1686,7 +1683,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add master windows to list */
- INIT_LIST_HEAD(&ca91cx42_bridge->master_resources);
for (i = 0; i < CA91C142_MAX_MASTER; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
@@ -1713,7 +1709,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add slave windows to list */
- INIT_LIST_HEAD(&ca91cx42_bridge->slave_resources);
for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
@@ -1741,7 +1736,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add dma engines to list */
- INIT_LIST_HEAD(&ca91cx42_bridge->dma_resources);
for (i = 0; i < CA91C142_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
@@ -1764,7 +1758,6 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
- INIT_LIST_HEAD(&ca91cx42_bridge->lm_resources);
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 60524834d..4bc5d451e 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -314,10 +314,6 @@ static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
bridge = tsi148_bridge->driver_priv;
- INIT_LIST_HEAD(&tsi148_bridge->vme_error_handlers);
-
- mutex_init(&tsi148_bridge->irq_mtx);
-
result = request_irq(pdev->irq,
tsi148_irqhandler,
IRQF_SHARED,
@@ -2301,6 +2297,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
retval = -ENOMEM;
goto err_struct;
}
+ vme_init_bridge(tsi148_bridge);
tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
if (tsi148_device == NULL) {
@@ -2387,7 +2384,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add master windows to list */
- INIT_LIST_HEAD(&tsi148_bridge->master_resources);
for (i = 0; i < master_num; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
@@ -2417,7 +2413,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add slave windows to list */
- INIT_LIST_HEAD(&tsi148_bridge->slave_resources);
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
@@ -2442,7 +2437,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add dma engines to list */
- INIT_LIST_HEAD(&tsi148_bridge->dma_resources);
for (i = 0; i < TSI148_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
@@ -2467,7 +2461,6 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
- INIT_LIST_HEAD(&tsi148_bridge->lm_resources);
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 72924b063..37ac0a58e 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -782,7 +782,7 @@ struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
if (dma_list == NULL) {
- printk(KERN_ERR "Unable to allocate memory for new dma list\n");
+ printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
return NULL;
}
INIT_LIST_HEAD(&dma_list->entries);
@@ -846,7 +846,7 @@ struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
if (pci_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
+ printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
goto err_pci;
}
@@ -884,7 +884,7 @@ struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
if (vme_attr == NULL) {
- printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
+ printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
goto err_vme;
}
@@ -975,8 +975,8 @@ int vme_dma_list_free(struct vme_dma_list *list)
}
/*
- * Empty out all of the entries from the dma list. We need to go to the
- * low level driver as dma entries are driver specific.
+ * Empty out all of the entries from the DMA list. We need to go to the
+ * low level driver as DMA entries are driver specific.
*/
retval = bridge->dma_list_empty(list);
if (retval) {
@@ -1091,7 +1091,7 @@ void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
if (call != NULL)
call(level, statid, priv_data);
else
- printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
+ printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
level, statid);
}
EXPORT_SYMBOL(vme_irq_handler);
@@ -1429,6 +1429,20 @@ static void vme_dev_release(struct device *dev)
kfree(dev_to_vme_dev(dev));
}
+/* Common bridge initialization */
+struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
+{
+ INIT_LIST_HEAD(&bridge->vme_error_handlers);
+ INIT_LIST_HEAD(&bridge->master_resources);
+ INIT_LIST_HEAD(&bridge->slave_resources);
+ INIT_LIST_HEAD(&bridge->dma_resources);
+ INIT_LIST_HEAD(&bridge->lm_resources);
+ mutex_init(&bridge->irq_mtx);
+
+ return bridge;
+}
+EXPORT_SYMBOL(vme_init_bridge);
+
int vme_register_bridge(struct vme_bridge *bridge)
{
int i;
diff --git a/drivers/vme/vme_bridge.h b/drivers/vme/vme_bridge.h
index b59cbee23..cb8246fd9 100644
--- a/drivers/vme/vme_bridge.h
+++ b/drivers/vme/vme_bridge.h
@@ -177,6 +177,7 @@ void vme_bus_error_handler(struct vme_bridge *bridge,
unsigned long long address, int am);
void vme_irq_handler(struct vme_bridge *, int, int);
+struct vme_bridge *vme_init_bridge(struct vme_bridge *);
int vme_register_bridge(struct vme_bridge *);
void vme_unregister_bridge(struct vme_bridge *);
struct vme_error_handler *vme_register_error_handler(
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index b05e8fefb..2e30db1b1 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -24,6 +24,19 @@
#include "../w1_int.h"
/**
+ * Allow the active pullup to be disabled, default is enabled.
+ *
+ * Note from the DS2482 datasheet:
+ * The APU bit controls whether an active pullup (controlled slew-rate
+ * transistor) or a passive pullup (Rwpu resistor) will be used to drive
+ * a 1-Wire line from low to high. When APU = 0, active pullup is disabled
+ * (resistor mode). Active Pullup should always be selected unless there is
+ * only a single slave on the 1-Wire line.
+ */
+static int ds2482_active_pullup = 1;
+module_param_named(active_pullup, ds2482_active_pullup, int, 0644);
+
+/**
* The DS2482 registers - there are 3 registers that are addressed by a read
* pointer. The read pointer is set by the last command executed.
*
@@ -138,6 +151,9 @@ struct ds2482_data {
*/
static inline u8 ds2482_calculate_config(u8 conf)
{
+ if (ds2482_active_pullup)
+ conf |= DS2482_REG_CFG_APU;
+
return conf | ((~conf & 0x0f) << 4);
}
@@ -546,6 +562,8 @@ static int ds2482_remove(struct i2c_client *client)
module_i2c_driver(ds2482_driver);
+MODULE_PARM_DESC(active_pullup, "Active pullup (apply to all buses): " \
+ "0-disable, 1-enable (default)");
MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>");
MODULE_DESCRIPTION("DS2482 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 2f029e8f4..581a300fd 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -92,10 +92,13 @@ static void w1_therm_remove_slave(struct w1_slave *sl)
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf);
+static ssize_t w1_slave_store(struct device *device,
+ struct device_attribute *attr, const char *buf, size_t size);
+
static ssize_t w1_seq_show(struct device *device,
struct device_attribute *attr, char *buf);
-static DEVICE_ATTR_RO(w1_slave);
+static DEVICE_ATTR_RW(w1_slave);
static DEVICE_ATTR_RO(w1_seq);
static struct attribute *w1_therm_attrs[] = {
@@ -154,8 +157,17 @@ struct w1_therm_family_converter
u16 reserved;
struct w1_family *f;
int (*convert)(u8 rom[9]);
+ int (*precision)(struct device *device, int val);
+ int (*eeprom)(struct device *device);
};
+/* write configuration to eeprom */
+static inline int w1_therm_eeprom(struct device *device);
+
+/* Set precision for conversion */
+static inline int w1_DS18B20_precision(struct device *device, int val);
+static inline int w1_DS18S20_precision(struct device *device, int val);
+
/* The return value is millidegrees Centigrade. */
static inline int w1_DS18B20_convert_temp(u8 rom[9]);
static inline int w1_DS18S20_convert_temp(u8 rom[9]);
@@ -163,26 +175,194 @@ static inline int w1_DS18S20_convert_temp(u8 rom[9]);
static struct w1_therm_family_converter w1_therm_families[] = {
{
.f = &w1_therm_family_DS18S20,
- .convert = w1_DS18S20_convert_temp
+ .convert = w1_DS18S20_convert_temp,
+ .precision = w1_DS18S20_precision,
+ .eeprom = w1_therm_eeprom
},
{
.f = &w1_therm_family_DS1822,
- .convert = w1_DS18B20_convert_temp
+ .convert = w1_DS18B20_convert_temp,
+ .precision = w1_DS18S20_precision,
+ .eeprom = w1_therm_eeprom
},
{
.f = &w1_therm_family_DS18B20,
- .convert = w1_DS18B20_convert_temp
+ .convert = w1_DS18B20_convert_temp,
+ .precision = w1_DS18B20_precision,
+ .eeprom = w1_therm_eeprom
},
{
.f = &w1_therm_family_DS28EA00,
- .convert = w1_DS18B20_convert_temp
+ .convert = w1_DS18B20_convert_temp,
+ .precision = w1_DS18S20_precision,
+ .eeprom = w1_therm_eeprom
},
{
.f = &w1_therm_family_DS1825,
- .convert = w1_DS18B20_convert_temp
+ .convert = w1_DS18B20_convert_temp,
+ .precision = w1_DS18S20_precision,
+ .eeprom = w1_therm_eeprom
}
};
+static inline int w1_therm_eeprom(struct device *device)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct w1_master *dev = sl->master;
+ u8 rom[9], external_power;
+ int ret, max_trying = 10;
+ u8 *family_data = sl->family_data;
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto post_unlock;
+
+ if (!sl->family_data) {
+ ret = -ENODEV;
+ goto pre_unlock;
+ }
+
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(family_data));
+ memset(rom, 0, sizeof(rom));
+
+ while (max_trying--) {
+ if (!w1_reset_select_slave(sl)) {
+ unsigned int tm = 10;
+ unsigned long sleep_rem;
+
+ /* check if in parasite mode */
+ w1_write_8(dev, W1_READ_PSUPPLY);
+ external_power = w1_read_8(dev);
+
+ if (w1_reset_select_slave(sl))
+ continue;
+
+ /* 10ms strong pullup/delay after the copy command */
+ if (w1_strong_pullup == 2 ||
+ (!external_power && w1_strong_pullup))
+ w1_next_pullup(dev, tm);
+
+ w1_write_8(dev, W1_COPY_SCRATCHPAD);
+
+ if (external_power) {
+ mutex_unlock(&dev->bus_mutex);
+
+ sleep_rem = msleep_interruptible(tm);
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto post_unlock;
+ }
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto post_unlock;
+ } else if (!w1_strong_pullup) {
+ sleep_rem = msleep_interruptible(tm);
+ if (sleep_rem != 0) {
+ ret = -EINTR;
+ goto pre_unlock;
+ }
+ }
+
+ break;
+ }
+ }
+
+pre_unlock:
+ mutex_unlock(&dev->bus_mutex);
+
+post_unlock:
+ atomic_dec(THERM_REFCNT(family_data));
+ return ret;
+}
+
+/* DS18S20 does not feature configuration register */
+static inline int w1_DS18S20_precision(struct device *device, int val)
+{
+ return 0;
+}
+
+static inline int w1_DS18B20_precision(struct device *device, int val)
+{
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ struct w1_master *dev = sl->master;
+ u8 rom[9], crc;
+ int ret, max_trying = 10;
+ u8 *family_data = sl->family_data;
+ uint8_t precision_bits;
+ uint8_t mask = 0x60;
+
+ if(val > 12 || val < 9) {
+ pr_warn("Unsupported precision\n");
+ return -1;
+ }
+
+ ret = mutex_lock_interruptible(&dev->bus_mutex);
+ if (ret != 0)
+ goto post_unlock;
+
+ if (!sl->family_data) {
+ ret = -ENODEV;
+ goto pre_unlock;
+ }
+
+ /* prevent the slave from going away in sleep */
+ atomic_inc(THERM_REFCNT(family_data));
+ memset(rom, 0, sizeof(rom));
+
+ /* translate precision to bitmask (see datasheet page 9) */
+ switch (val) {
+ case 9:
+ precision_bits = 0x00;
+ break;
+ case 10:
+ precision_bits = 0x20;
+ break;
+ case 11:
+ precision_bits = 0x40;
+ break;
+ case 12:
+ default:
+ precision_bits = 0x60;
+ break;
+ }
+
+ while (max_trying--) {
+ crc = 0;
+
+ if (!w1_reset_select_slave(sl)) {
+ int count = 0;
+
+ /* read values to only alter precision bits */
+ w1_write_8(dev, W1_READ_SCRATCHPAD);
+ if ((count = w1_read_block(dev, rom, 9)) != 9)
+ dev_warn(device, "w1_read_block() returned %u instead of 9.\n", count);
+
+ crc = w1_calc_crc8(rom, 8);
+ if (rom[8] == crc) {
+ rom[4] = (rom[4] & ~mask) | (precision_bits & mask);
+
+ if (!w1_reset_select_slave(sl)) {
+ w1_write_8(dev, W1_WRITE_SCRATCHPAD);
+ w1_write_8(dev, rom[2]);
+ w1_write_8(dev, rom[3]);
+ w1_write_8(dev, rom[4]);
+
+ break;
+ }
+ }
+ }
+ }
+
+pre_unlock:
+ mutex_unlock(&dev->bus_mutex);
+
+post_unlock:
+ atomic_dec(THERM_REFCNT(family_data));
+ return ret;
+}
+
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
s16 t = le16_to_cpup((__le16 *)rom);
@@ -220,6 +400,30 @@ static inline int w1_convert_temp(u8 rom[9], u8 fid)
return 0;
}
+static ssize_t w1_slave_store(struct device *device,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ int val, ret;
+ struct w1_slave *sl = dev_to_w1_slave(device);
+ int i;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(w1_therm_families); ++i) {
+ if (w1_therm_families[i].f->fid == sl->family->fid) {
+ /* zero value indicates to write current configuration to eeprom */
+ if (0 == val)
+ ret = w1_therm_families[i].eeprom(device);
+ else
+ ret = w1_therm_families[i].precision(device, val);
+ break;
+ }
+ }
+ return ret ? : size;
+}
static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf)
@@ -311,7 +515,7 @@ static ssize_t w1_slave_show(struct device *device,
for (i = 0; i < 9; ++i)
c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", rom[i]);
c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
- crc, (verdict) ? "YES" : "NO");
+ crc, (verdict) ? "YES" : "NO");
if (verdict)
memcpy(family_data, rom, sizeof(rom));
else
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 89a784751..bb34362e9 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -335,7 +335,7 @@ static ssize_t w1_master_attribute_store_max_slave_count(struct device *dev,
int tmp;
struct w1_master *md = dev_to_w1_master(dev);
- if (kstrtoint(buf, 0, &tmp) == -EINVAL || tmp < 1)
+ if (kstrtoint(buf, 0, &tmp) || tmp < 1)
return -EINVAL;
mutex_lock(&md->mutex);
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h
index 56a49ba41..129895f56 100644
--- a/drivers/w1/w1.h
+++ b/drivers/w1/w1.h
@@ -58,6 +58,8 @@ struct w1_reg_num
#define W1_ALARM_SEARCH 0xEC
#define W1_CONVERT_TEMP 0x44
#define W1_SKIP_ROM 0xCC
+#define W1_COPY_SCRATCHPAD 0x48
+#define W1_WRITE_SCRATCHPAD 0x4E
#define W1_READ_SCRATCHPAD 0xBE
#define W1_READ_ROM 0x33
#define W1_READ_PSUPPLY 0xB4
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
index 282092421..f4bc8c100 100644
--- a/drivers/w1/w1_io.c
+++ b/drivers/w1/w1_io.c
@@ -352,7 +352,7 @@ int w1_reset_bus(struct w1_master *dev)
w1_delay(70);
result = dev->bus_master->read_bit(dev->bus_master->data) & 0x1;
- /* minmum 70 (above) + 430 = 500 us
+ /* minimum 70 (above) + 430 = 500 us
* There aren't any timing requirements between a reset and
* the following transactions. Sleeping is safe here.
*/
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index fb947655b..b4b3e2564 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -661,6 +661,14 @@ config ATLAS7_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called atlas7_wdt.
+config RENESAS_WDT
+ tristate "Renesas WDT Watchdog"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This driver adds watchdog support for the integrated watchdogs in the
+ Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT).
+
# AVR32 Architecture
config AT32AP700X_WDT
@@ -738,7 +746,7 @@ config ALIM7101_WDT
config EBC_C384_WDT
tristate "WinSystems EBC-C384 Watchdog Timer"
- depends on X86
+ depends on X86 && ISA_BUS_API
select WATCHDOG_CORE
help
Enables watchdog timer support for the watchdog timer on the
@@ -1475,6 +1483,32 @@ config MT7621_WDT
help
Hardware driver for the Mediatek/Ralink MT7621/8 SoC Watchdog Timer.
+config PIC32_WDT
+ tristate "Microchip PIC32 hardware watchdog"
+ select WATCHDOG_CORE
+ depends on MACH_PIC32
+ help
+ Watchdog driver for the built in watchdog hardware in a PIC32.
+
+ Configuration bits must be set appropriately for the watchdog to be
+ controlled by this driver.
+
+ To compile this driver as a loadable module, choose M here.
+ The module will be called pic32-wdt.
+
+config PIC32_DMT
+ tristate "Microchip PIC32 Deadman Timer"
+ select WATCHDOG_CORE
+ depends on MACH_PIC32
+ help
+ Watchdog driver for PIC32 instruction fetch counting timer. This specific
+ timer is typically be used in misson critical and safety critical
+ applications, where any single failure of the software functionality
+ and sequencing must be detected.
+
+ To compile this driver as a loadable module, choose M here.
+ The module will be called pic32-dmt.
+
# PARISC Architecture
# POWERPC Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index feb6270fd..a46e7c138 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o
obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o
+obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
# AVR32 Architecture
obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -157,6 +158,8 @@ obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o
obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
obj-$(CONFIG_MT7621_WDT) += mt7621_wdt.o
+obj-$(CONFIG_PIC32_WDT) += pic32-wdt.o
+obj-$(CONFIG_PIC32_DMT) += pic32-dmt.o
# PARISC Architecture
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 02007689e..71ee07950 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -611,9 +611,7 @@ static int cpwd_probe(struct platform_device *op)
}
if (p->broken) {
- init_timer(&cpwd_timer);
- cpwd_timer.function = cpwd_brokentimer;
- cpwd_timer.data = (unsigned long) p;
+ setup_timer(&cpwd_timer, cpwd_brokentimer, (unsigned long)p);
cpwd_timer.expires = WD_BTIMEOUT;
pr_info("PLD defect workaround enabled for model %s\n",
diff --git a/drivers/watchdog/ebc-c384_wdt.c b/drivers/watchdog/ebc-c384_wdt.c
index 77fda0b4b..4b849b8e3 100644
--- a/drivers/watchdog/ebc-c384_wdt.c
+++ b/drivers/watchdog/ebc-c384_wdt.c
@@ -16,10 +16,10 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/watchdog.h>
@@ -95,9 +95,8 @@ static const struct watchdog_info ebc_c384_wdt_info = {
.identity = MODULE_NAME
};
-static int __init ebc_c384_wdt_probe(struct platform_device *pdev)
+static int ebc_c384_wdt_probe(struct device *dev, unsigned int id)
{
- struct device *dev = &pdev->dev;
struct watchdog_device *wdd;
if (!devm_request_region(dev, BASE_ADDR, ADDR_EXTENT, dev_name(dev))) {
@@ -122,61 +121,39 @@ static int __init ebc_c384_wdt_probe(struct platform_device *pdev)
dev_warn(dev, "Invalid timeout (%u seconds), using default (%u seconds)\n",
timeout, WATCHDOG_TIMEOUT);
- platform_set_drvdata(pdev, wdd);
+ dev_set_drvdata(dev, wdd);
return watchdog_register_device(wdd);
}
-static int ebc_c384_wdt_remove(struct platform_device *pdev)
+static int ebc_c384_wdt_remove(struct device *dev, unsigned int id)
{
- struct watchdog_device *wdd = platform_get_drvdata(pdev);
+ struct watchdog_device *wdd = dev_get_drvdata(dev);
watchdog_unregister_device(wdd);
return 0;
}
-static struct platform_driver ebc_c384_wdt_driver = {
+static struct isa_driver ebc_c384_wdt_driver = {
+ .probe = ebc_c384_wdt_probe,
.driver = {
.name = MODULE_NAME
},
.remove = ebc_c384_wdt_remove
};
-static struct platform_device *ebc_c384_wdt_device;
-
static int __init ebc_c384_wdt_init(void)
{
- int err;
-
if (!dmi_match(DMI_BOARD_NAME, "EBC-C384 SBC"))
return -ENODEV;
- ebc_c384_wdt_device = platform_device_alloc(MODULE_NAME, -1);
- if (!ebc_c384_wdt_device)
- return -ENOMEM;
-
- err = platform_device_add(ebc_c384_wdt_device);
- if (err)
- goto err_platform_device;
-
- err = platform_driver_probe(&ebc_c384_wdt_driver, ebc_c384_wdt_probe);
- if (err)
- goto err_platform_driver;
-
- return 0;
-
-err_platform_driver:
- platform_device_del(ebc_c384_wdt_device);
-err_platform_device:
- platform_device_put(ebc_c384_wdt_device);
- return err;
+ return isa_register_driver(&ebc_c384_wdt_driver, 1);
}
static void __exit ebc_c384_wdt_exit(void)
{
- platform_device_unregister(ebc_c384_wdt_device);
- platform_driver_unregister(&ebc_c384_wdt_driver);
+ isa_unregister_driver(&ebc_c384_wdt_driver);
}
module_init(ebc_c384_wdt_init);
@@ -185,4 +162,4 @@ module_exit(ebc_c384_wdt_exit);
MODULE_AUTHOR("William Breathitt Gray <vilhelm.gray@gmail.com>");
MODULE_DESCRIPTION("WinSystems EBC-C384 watchdog timer driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" MODULE_NAME);
+MODULE_ALIAS("isa:" MODULE_NAME);
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 016bd9355..d4ba262da 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -38,7 +38,7 @@
#define SIO_F71808FG_LD_WDT 0x07 /* Watchdog timer logical device */
#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
-#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */
+#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
#define SIO_REG_LDSEL 0x07 /* Logical device select */
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
@@ -59,6 +59,7 @@
#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
+#define SIO_F81865_ID 0x0704 /* Chipset ID */
#define F71808FG_REG_WDO_CONF 0xf0
#define F71808FG_REG_WDT_CONF 0xf5
@@ -66,11 +67,14 @@
#define F71808FG_FLAG_WDOUT_EN 7
-#define F71808FG_FLAG_WDTMOUT_STS 5
+#define F71808FG_FLAG_WDTMOUT_STS 6
#define F71808FG_FLAG_WD_EN 5
#define F71808FG_FLAG_WD_PULSE 4
#define F71808FG_FLAG_WD_UNIT 3
+#define F81865_REG_WDO_CONF 0xfa
+#define F81865_FLAG_WDOUT_EN 0
+
/* Default values */
#define WATCHDOG_TIMEOUT 60 /* 1 minute default timeout */
#define WATCHDOG_MAX_TIMEOUT (60 * 255)
@@ -112,7 +116,7 @@ module_param(start_withtimeout, uint, 0);
MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
-enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg };
+enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865 };
static const char *f71808e_names[] = {
"f71808fg",
@@ -121,6 +125,7 @@ static const char *f71808e_names[] = {
"f71869",
"f71882fg",
"f71889fg",
+ "f81865",
};
/* Super-I/O Function prototypes */
@@ -360,6 +365,11 @@ static int watchdog_start(void)
superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf);
break;
+ case f81865:
+ /* Set pin 70 to WDTRST# */
+ superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 5);
+ break;
+
default:
/*
* 'default' label to shut up the compiler and catch
@@ -371,8 +381,13 @@ static int watchdog_start(void)
superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
superio_set_bit(watchdog.sioaddr, SIO_REG_ENABLE, 0);
- superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDO_CONF,
- F71808FG_FLAG_WDOUT_EN);
+
+ if (watchdog.type == f81865)
+ superio_set_bit(watchdog.sioaddr, F81865_REG_WDO_CONF,
+ F81865_FLAG_WDOUT_EN);
+ else
+ superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDO_CONF,
+ F71808FG_FLAG_WDOUT_EN);
superio_set_bit(watchdog.sioaddr, F71808FG_REG_WDT_CONF,
F71808FG_FLAG_WD_EN);
@@ -655,7 +670,7 @@ static int __init watchdog_init(int sioaddr)
superio_select(watchdog.sioaddr, SIO_F71808FG_LD_WDT);
wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
- watchdog.caused_reboot = wdt_conf & F71808FG_FLAG_WDTMOUT_STS;
+ watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
superio_exit(sioaddr);
@@ -770,6 +785,9 @@ static int __init f71808e_find(int sioaddr)
/* Confirmed (by datasheet) not to have a watchdog. */
err = -ENODEV;
goto exit;
+ case SIO_F81865_ID:
+ watchdog.type = f81865;
+ break;
default:
pr_info("Unrecognized Fintek device: %04x\n",
(unsigned int)devid);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 331aed831..62f346bb4 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -37,6 +37,8 @@
#define IMX2_WDT_WCR 0x00 /* Control Register */
#define IMX2_WDT_WCR_WT (0xFF << 8) /* -> Watchdog Timeout Field */
+#define IMX2_WDT_WCR_WDA (1 << 5) /* -> External Reset WDOG_B */
+#define IMX2_WDT_WCR_SRS (1 << 4) /* -> Software Reset Signal */
#define IMX2_WDT_WCR_WRE (1 << 3) /* -> WDOG Reset Enable */
#define IMX2_WDT_WCR_WDE (1 << 2) /* -> Watchdog Enable */
#define IMX2_WDT_WCR_WDZST (1 << 0) /* -> Watchdog timer Suspend */
@@ -59,6 +61,7 @@ struct imx2_wdt_device {
struct clk *clk;
struct regmap *regmap;
struct watchdog_device wdog;
+ bool ext_reset;
};
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -83,6 +86,12 @@ static int imx2_wdt_restart(struct watchdog_device *wdog, unsigned long action,
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
unsigned int wcr_enable = IMX2_WDT_WCR_WDE;
+ /* Use internal reset or external - not both */
+ if (wdev->ext_reset)
+ wcr_enable |= IMX2_WDT_WCR_SRS; /* do not assert int reset */
+ else
+ wcr_enable |= IMX2_WDT_WCR_WDA; /* do not assert ext-reset */
+
/* Assert SRS signal */
regmap_write(wdev->regmap, IMX2_WDT_WCR, wcr_enable);
/*
@@ -112,8 +121,12 @@ static inline void imx2_wdt_setup(struct watchdog_device *wdog)
val |= IMX2_WDT_WCR_WDZST;
/* Strip the old watchdog Time-Out value */
val &= ~IMX2_WDT_WCR_WT;
- /* Generate reset if WDOG times out */
- val &= ~IMX2_WDT_WCR_WRE;
+ /* Generate internal chip-level reset if WDOG times out */
+ if (!wdev->ext_reset)
+ val &= ~IMX2_WDT_WCR_WRE;
+ /* Or if external-reset assert WDOG_B reset only on time-out */
+ else
+ val |= IMX2_WDT_WCR_WRE;
/* Keep Watchdog Disabled */
val &= ~IMX2_WDT_WCR_WDE;
/* Set the watchdog's Time-Out value */
@@ -230,6 +243,8 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
+ wdev->ext_reset = of_property_read_bool(pdev->dev.of_node,
+ "fsl,ext-reset-output");
wdog->timeout = clamp_t(unsigned, timeout, 1, IMX2_WDT_MAX_TIME);
if (wdog->timeout != timeout)
dev_warn(&pdev->dev, "Initial timeout out of range! Clamped from %u to %u\n",
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 6a7d5c365..c8d51ddb2 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -160,10 +160,8 @@ static int jz4740_wdt_probe(struct platform_device *pdev)
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct jz4740_wdt_drvdata),
GFP_KERNEL);
- if (!drvdata) {
- dev_err(&pdev->dev, "Unable to alloacate watchdog device\n");
+ if (!drvdata)
return -ENOMEM;
- }
if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT)
heartbeat = DEFAULT_HEARTBEAT;
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 14521c8b3..b55981f88 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -431,7 +431,7 @@ static int octeon_wdt_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
octeon_wdt_disable_interrupt(cpu);
break;
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
new file mode 100644
index 000000000..962f58c03
--- /dev/null
+++ b/drivers/watchdog/pic32-dmt.c
@@ -0,0 +1,257 @@
+/*
+ * PIC32 deadman timer driver
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/watchdog.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+/* Deadman Timer Regs */
+#define DMTCON_REG 0x00
+#define DMTPRECLR_REG 0x10
+#define DMTCLR_REG 0x20
+#define DMTSTAT_REG 0x30
+#define DMTCNT_REG 0x40
+#define DMTPSCNT_REG 0x60
+#define DMTPSINTV_REG 0x70
+
+/* Deadman Timer Regs fields */
+#define DMT_ON BIT(15)
+#define DMT_STEP1_KEY BIT(6)
+#define DMT_STEP2_KEY BIT(3)
+#define DMTSTAT_WINOPN BIT(0)
+#define DMTSTAT_EVENT BIT(5)
+#define DMTSTAT_BAD2 BIT(6)
+#define DMTSTAT_BAD1 BIT(7)
+
+/* Reset Control Register fields for watchdog */
+#define RESETCON_DMT_TIMEOUT BIT(5)
+
+struct pic32_dmt {
+ void __iomem *regs;
+ struct clk *clk;
+};
+
+static inline void dmt_enable(struct pic32_dmt *dmt)
+{
+ writel(DMT_ON, PIC32_SET(dmt->regs + DMTCON_REG));
+}
+
+static inline void dmt_disable(struct pic32_dmt *dmt)
+{
+ writel(DMT_ON, PIC32_CLR(dmt->regs + DMTCON_REG));
+ /*
+ * Cannot touch registers in the CPU cycle following clearing the
+ * ON bit.
+ */
+ nop();
+}
+
+static inline int dmt_bad_status(struct pic32_dmt *dmt)
+{
+ u32 val;
+
+ val = readl(dmt->regs + DMTSTAT_REG);
+ val &= (DMTSTAT_BAD1 | DMTSTAT_BAD2 | DMTSTAT_EVENT);
+ if (val)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static inline int dmt_keepalive(struct pic32_dmt *dmt)
+{
+ u32 v;
+ u32 timeout = 500;
+
+ /* set pre-clear key */
+ writel(DMT_STEP1_KEY << 8, dmt->regs + DMTPRECLR_REG);
+
+ /* wait for DMT window to open */
+ while (--timeout) {
+ v = readl(dmt->regs + DMTSTAT_REG) & DMTSTAT_WINOPN;
+ if (v == DMTSTAT_WINOPN)
+ break;
+ }
+
+ /* apply key2 */
+ writel(DMT_STEP2_KEY, dmt->regs + DMTCLR_REG);
+
+ /* check whether keys are latched correctly */
+ return dmt_bad_status(dmt);
+}
+
+static inline u32 pic32_dmt_get_timeout_secs(struct pic32_dmt *dmt)
+{
+ unsigned long rate;
+
+ rate = clk_get_rate(dmt->clk);
+ if (rate)
+ return readl(dmt->regs + DMTPSCNT_REG) / rate;
+
+ return 0;
+}
+
+static inline u32 pic32_dmt_bootstatus(struct pic32_dmt *dmt)
+{
+ u32 v;
+ void __iomem *rst_base;
+
+ rst_base = ioremap(PIC32_BASE_RESET, 0x10);
+ if (!rst_base)
+ return 0;
+
+ v = readl(rst_base);
+
+ writel(RESETCON_DMT_TIMEOUT, PIC32_CLR(rst_base));
+
+ iounmap(rst_base);
+ return v & RESETCON_DMT_TIMEOUT;
+}
+
+static int pic32_dmt_start(struct watchdog_device *wdd)
+{
+ struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
+
+ dmt_enable(dmt);
+ return dmt_keepalive(dmt);
+}
+
+static int pic32_dmt_stop(struct watchdog_device *wdd)
+{
+ struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
+
+ dmt_disable(dmt);
+
+ return 0;
+}
+
+static int pic32_dmt_ping(struct watchdog_device *wdd)
+{
+ struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
+
+ return dmt_keepalive(dmt);
+}
+
+static const struct watchdog_ops pic32_dmt_fops = {
+ .owner = THIS_MODULE,
+ .start = pic32_dmt_start,
+ .stop = pic32_dmt_stop,
+ .ping = pic32_dmt_ping,
+};
+
+static const struct watchdog_info pic32_dmt_ident = {
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .identity = "PIC32 Deadman Timer",
+};
+
+static struct watchdog_device pic32_dmt_wdd = {
+ .info = &pic32_dmt_ident,
+ .ops = &pic32_dmt_fops,
+};
+
+static int pic32_dmt_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct pic32_dmt *dmt;
+ struct resource *mem;
+ struct watchdog_device *wdd = &pic32_dmt_wdd;
+
+ dmt = devm_kzalloc(&pdev->dev, sizeof(*dmt), GFP_KERNEL);
+ if (IS_ERR(dmt))
+ return PTR_ERR(dmt);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dmt->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dmt->regs))
+ return PTR_ERR(dmt->regs);
+
+ dmt->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dmt->clk)) {
+ dev_err(&pdev->dev, "clk not found\n");
+ return PTR_ERR(dmt->clk);
+ }
+
+ ret = clk_prepare_enable(dmt->clk);
+ if (ret)
+ return ret;
+
+ wdd->timeout = pic32_dmt_get_timeout_secs(dmt);
+ if (!wdd->timeout) {
+ dev_err(&pdev->dev,
+ "failed to read watchdog register timeout\n");
+ ret = -EINVAL;
+ goto out_disable_clk;
+ }
+
+ dev_info(&pdev->dev, "timeout %d\n", wdd->timeout);
+
+ wdd->bootstatus = pic32_dmt_bootstatus(dmt) ? WDIOF_CARDRESET : 0;
+
+ watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
+ watchdog_set_drvdata(wdd, dmt);
+
+ ret = watchdog_register_device(wdd);
+ if (ret) {
+ dev_err(&pdev->dev, "watchdog register failed, err %d\n", ret);
+ goto out_disable_clk;
+ }
+
+ platform_set_drvdata(pdev, wdd);
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(dmt->clk);
+ return ret;
+}
+
+static int pic32_dmt_remove(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+ struct pic32_dmt *dmt = watchdog_get_drvdata(wdd);
+
+ watchdog_unregister_device(wdd);
+ clk_disable_unprepare(dmt->clk);
+
+ return 0;
+}
+
+static const struct of_device_id pic32_dmt_of_ids[] = {
+ { .compatible = "microchip,pic32mzda-dmt",},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_dmt_of_ids);
+
+static struct platform_driver pic32_dmt_driver = {
+ .probe = pic32_dmt_probe,
+ .remove = pic32_dmt_remove,
+ .driver = {
+ .name = "pic32-dmt",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pic32_dmt_of_ids),
+ }
+};
+
+module_platform_driver(pic32_dmt_driver);
+
+MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 DMT Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
new file mode 100644
index 000000000..6047aa89a
--- /dev/null
+++ b/drivers/watchdog/pic32-wdt.c
@@ -0,0 +1,263 @@
+/*
+ * PIC32 watchdog driver
+ *
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (c) 2016, Microchip Technology Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/watchdog.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+/* Watchdog Timer Registers */
+#define WDTCON_REG 0x00
+
+/* Watchdog Timer Control Register fields */
+#define WDTCON_WIN_EN BIT(0)
+#define WDTCON_RMCS_MASK 0x0003
+#define WDTCON_RMCS_SHIFT 0x0006
+#define WDTCON_RMPS_MASK 0x001F
+#define WDTCON_RMPS_SHIFT 0x0008
+#define WDTCON_ON BIT(15)
+#define WDTCON_CLR_KEY 0x5743
+
+/* Reset Control Register fields for watchdog */
+#define RESETCON_TIMEOUT_IDLE BIT(2)
+#define RESETCON_TIMEOUT_SLEEP BIT(3)
+#define RESETCON_WDT_TIMEOUT BIT(4)
+
+struct pic32_wdt {
+ void __iomem *regs;
+ void __iomem *rst_base;
+ struct clk *clk;
+};
+
+static inline bool pic32_wdt_is_win_enabled(struct pic32_wdt *wdt)
+{
+ return !!(readl(wdt->regs + WDTCON_REG) & WDTCON_WIN_EN);
+}
+
+static inline u32 pic32_wdt_get_post_scaler(struct pic32_wdt *wdt)
+{
+ u32 v = readl(wdt->regs + WDTCON_REG);
+
+ return (v >> WDTCON_RMPS_SHIFT) & WDTCON_RMPS_MASK;
+}
+
+static inline u32 pic32_wdt_get_clk_id(struct pic32_wdt *wdt)
+{
+ u32 v = readl(wdt->regs + WDTCON_REG);
+
+ return (v >> WDTCON_RMCS_SHIFT) & WDTCON_RMCS_MASK;
+}
+
+static int pic32_wdt_bootstatus(struct pic32_wdt *wdt)
+{
+ u32 v = readl(wdt->rst_base);
+
+ writel(RESETCON_WDT_TIMEOUT, PIC32_CLR(wdt->rst_base));
+
+ return v & RESETCON_WDT_TIMEOUT;
+}
+
+static u32 pic32_wdt_get_timeout_secs(struct pic32_wdt *wdt, struct device *dev)
+{
+ unsigned long rate;
+ u32 period, ps, terminal;
+
+ rate = clk_get_rate(wdt->clk);
+
+ dev_dbg(dev, "wdt: clk_id %d, clk_rate %lu (prescale)\n",
+ pic32_wdt_get_clk_id(wdt), rate);
+
+ /* default, prescaler of 32 (i.e. div-by-32) is implicit. */
+ rate >>= 5;
+ if (!rate)
+ return 0;
+
+ /* calculate terminal count from postscaler. */
+ ps = pic32_wdt_get_post_scaler(wdt);
+ terminal = BIT(ps);
+
+ /* find time taken (in secs) to reach terminal count */
+ period = terminal / rate;
+ dev_dbg(dev,
+ "wdt: clk_rate %lu (postscale) / terminal %d, timeout %dsec\n",
+ rate, terminal, period);
+
+ return period;
+}
+
+static void pic32_wdt_keepalive(struct pic32_wdt *wdt)
+{
+ /* write key through single half-word */
+ writew(WDTCON_CLR_KEY, wdt->regs + WDTCON_REG + 2);
+}
+
+static int pic32_wdt_start(struct watchdog_device *wdd)
+{
+ struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ writel(WDTCON_ON, PIC32_SET(wdt->regs + WDTCON_REG));
+ pic32_wdt_keepalive(wdt);
+
+ return 0;
+}
+
+static int pic32_wdt_stop(struct watchdog_device *wdd)
+{
+ struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ writel(WDTCON_ON, PIC32_CLR(wdt->regs + WDTCON_REG));
+
+ /*
+ * Cannot touch registers in the CPU cycle following clearing the
+ * ON bit.
+ */
+ nop();
+
+ return 0;
+}
+
+static int pic32_wdt_ping(struct watchdog_device *wdd)
+{
+ struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ pic32_wdt_keepalive(wdt);
+
+ return 0;
+}
+
+static const struct watchdog_ops pic32_wdt_fops = {
+ .owner = THIS_MODULE,
+ .start = pic32_wdt_start,
+ .stop = pic32_wdt_stop,
+ .ping = pic32_wdt_ping,
+};
+
+static const struct watchdog_info pic32_wdt_ident = {
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
+ .identity = "PIC32 Watchdog",
+};
+
+static struct watchdog_device pic32_wdd = {
+ .info = &pic32_wdt_ident,
+ .ops = &pic32_wdt_fops,
+};
+
+static const struct of_device_id pic32_wdt_dt_ids[] = {
+ { .compatible = "microchip,pic32mzda-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, pic32_wdt_dt_ids);
+
+static int pic32_wdt_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct watchdog_device *wdd = &pic32_wdd;
+ struct pic32_wdt *wdt;
+ struct resource *mem;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
+ if (IS_ERR(wdt))
+ return PTR_ERR(wdt);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(wdt->regs))
+ return PTR_ERR(wdt->regs);
+
+ wdt->rst_base = devm_ioremap(&pdev->dev, PIC32_BASE_RESET, 0x10);
+ if (IS_ERR(wdt->rst_base))
+ return PTR_ERR(wdt->rst_base);
+
+ wdt->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(wdt->clk)) {
+ dev_err(&pdev->dev, "clk not found\n");
+ return PTR_ERR(wdt->clk);
+ }
+
+ ret = clk_prepare_enable(wdt->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk enable failed\n");
+ return ret;
+ }
+
+ if (pic32_wdt_is_win_enabled(wdt)) {
+ dev_err(&pdev->dev, "windowed-clear mode is not supported.\n");
+ ret = -ENODEV;
+ goto out_disable_clk;
+ }
+
+ wdd->timeout = pic32_wdt_get_timeout_secs(wdt, &pdev->dev);
+ if (!wdd->timeout) {
+ dev_err(&pdev->dev,
+ "failed to read watchdog register timeout\n");
+ ret = -EINVAL;
+ goto out_disable_clk;
+ }
+
+ dev_info(&pdev->dev, "timeout %d\n", wdd->timeout);
+
+ wdd->bootstatus = pic32_wdt_bootstatus(wdt) ? WDIOF_CARDRESET : 0;
+
+ watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT);
+ watchdog_set_drvdata(wdd, wdt);
+
+ ret = watchdog_register_device(wdd);
+ if (ret) {
+ dev_err(&pdev->dev, "watchdog register failed, err %d\n", ret);
+ goto out_disable_clk;
+ }
+
+ platform_set_drvdata(pdev, wdd);
+
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(wdt->clk);
+
+ return ret;
+}
+
+static int pic32_wdt_drv_remove(struct platform_device *pdev)
+{
+ struct watchdog_device *wdd = platform_get_drvdata(pdev);
+ struct pic32_wdt *wdt = watchdog_get_drvdata(wdd);
+
+ watchdog_unregister_device(wdd);
+ clk_disable_unprepare(wdt->clk);
+
+ return 0;
+}
+
+static struct platform_driver pic32_wdt_driver = {
+ .probe = pic32_wdt_drv_probe,
+ .remove = pic32_wdt_drv_remove,
+ .driver = {
+ .name = "pic32-wdt",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pic32_wdt_dt_ids),
+ }
+};
+
+module_platform_driver(pic32_wdt_driver);
+
+MODULE_AUTHOR("Joshua Henderson <joshua.henderson@microchip.com>");
+MODULE_DESCRIPTION("Microchip PIC32 Watchdog Timer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index 20563ccb7..a043fa4f6 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -21,6 +21,7 @@
#define WDT_RST 0x38
#define WDT_EN 0x40
+#define WDT_STS 0x44
#define WDT_BITE_TIME 0x5C
struct qcom_wdt {
@@ -108,7 +109,8 @@ static const struct watchdog_ops qcom_wdt_ops = {
static const struct watchdog_info qcom_wdt_info = {
.options = WDIOF_KEEPALIVEPING
| WDIOF_MAGICCLOSE
- | WDIOF_SETTIMEOUT,
+ | WDIOF_SETTIMEOUT
+ | WDIOF_CARDRESET,
.identity = KBUILD_MODNAME,
};
@@ -171,6 +173,9 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
wdt->wdd.parent = &pdev->dev;
+ if (readl(wdt->base + WDT_STS) & 1)
+ wdt->wdd.bootstatus = WDIOF_CARDRESET;
+
/*
* If 'timeout-sec' unspecified in devicetree, assume a 30 second
* default, unless the max timeout is less than 30 seconds, then use
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
new file mode 100644
index 000000000..cf61c92f7
--- /dev/null
+++ b/drivers/watchdog/renesas_wdt.c
@@ -0,0 +1,213 @@
+/*
+ * Watchdog driver for Renesas WDT watchdog
+ *
+ * Copyright (C) 2015-16 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com>
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/watchdog.h>
+
+#define RWTCNT 0
+#define RWTCSRA 4
+#define RWTCSRA_WOVF BIT(4)
+#define RWTCSRA_WRFLG BIT(5)
+#define RWTCSRA_TME BIT(7)
+
+#define RWDT_DEFAULT_TIMEOUT 60U
+
+static const unsigned int clk_divs[] = { 1, 4, 16, 32, 64, 128, 1024 };
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct rwdt_priv {
+ void __iomem *base;
+ struct watchdog_device wdev;
+ struct clk *clk;
+ unsigned int clks_per_sec;
+ u8 cks;
+};
+
+static void rwdt_write(struct rwdt_priv *priv, u32 val, unsigned int reg)
+{
+ if (reg == RWTCNT)
+ val |= 0x5a5a0000;
+ else
+ val |= 0xa5a5a500;
+
+ writel_relaxed(val, priv->base + reg);
+}
+
+static int rwdt_init_timeout(struct watchdog_device *wdev)
+{
+ struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ rwdt_write(priv, 65536 - wdev->timeout * priv->clks_per_sec, RWTCNT);
+
+ return 0;
+}
+
+static int rwdt_start(struct watchdog_device *wdev)
+{
+ struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ clk_prepare_enable(priv->clk);
+
+ rwdt_write(priv, priv->cks, RWTCSRA);
+ rwdt_init_timeout(wdev);
+
+ while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG)
+ cpu_relax();
+
+ rwdt_write(priv, priv->cks | RWTCSRA_TME, RWTCSRA);
+
+ return 0;
+}
+
+static int rwdt_stop(struct watchdog_device *wdev)
+{
+ struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ rwdt_write(priv, priv->cks, RWTCSRA);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static unsigned int rwdt_get_timeleft(struct watchdog_device *wdev)
+{
+ struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+ u16 val = readw_relaxed(priv->base + RWTCNT);
+
+ return DIV_ROUND_CLOSEST(65536 - val, priv->clks_per_sec);
+}
+
+static const struct watchdog_info rwdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .identity = "Renesas WDT Watchdog",
+};
+
+static const struct watchdog_ops rwdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rwdt_start,
+ .stop = rwdt_stop,
+ .ping = rwdt_init_timeout,
+ .get_timeleft = rwdt_get_timeleft,
+};
+
+static int rwdt_probe(struct platform_device *pdev)
+{
+ struct rwdt_priv *priv;
+ struct resource *res;
+ unsigned long rate;
+ unsigned int clks_per_sec;
+ int ret, i;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ rate = clk_get_rate(priv->clk);
+ if (!rate)
+ return -ENOENT;
+
+ for (i = ARRAY_SIZE(clk_divs) - 1; i >= 0; i--) {
+ clks_per_sec = DIV_ROUND_UP(rate, clk_divs[i]);
+ if (clks_per_sec) {
+ priv->clks_per_sec = clks_per_sec;
+ priv->cks = i;
+ break;
+ }
+ }
+
+ if (!clks_per_sec) {
+ dev_err(&pdev->dev, "Can't find suitable clock divider\n");
+ return -ERANGE;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ priv->wdev.info = &rwdt_ident,
+ priv->wdev.ops = &rwdt_ops,
+ priv->wdev.parent = &pdev->dev;
+ priv->wdev.min_timeout = 1;
+ priv->wdev.max_timeout = 65536 / clks_per_sec;
+ priv->wdev.timeout = min(priv->wdev.max_timeout, RWDT_DEFAULT_TIMEOUT);
+
+ platform_set_drvdata(pdev, priv);
+ watchdog_set_drvdata(&priv->wdev, priv);
+ watchdog_set_nowayout(&priv->wdev, nowayout);
+
+ /* This overrides the default timeout only if DT configuration was found */
+ ret = watchdog_init_timeout(&priv->wdev, 0, &pdev->dev);
+ if (ret)
+ dev_warn(&pdev->dev, "Specified timeout value invalid, using default\n");
+
+ ret = watchdog_register_device(&priv->wdev);
+ if (ret < 0) {
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rwdt_remove(struct platform_device *pdev)
+{
+ struct rwdt_priv *priv = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&priv->wdev);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+/*
+ * This driver does also fit for R-Car Gen2 (r8a779[0-4]) WDT. However, for SMP
+ * to work there, one also needs a RESET (RST) driver which does not exist yet
+ * due to HW issues. This needs to be solved before adding compatibles here.
+ */
+static const struct of_device_id rwdt_ids[] = {
+ { .compatible = "renesas,rcar-gen3-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rwdt_ids);
+
+static struct platform_driver rwdt_driver = {
+ .driver = {
+ .name = "renesas_wdt",
+ .of_match_table = rwdt_ids,
+ },
+ .probe = rwdt_probe,
+ .remove = rwdt_remove,
+};
+module_platform_driver(rwdt_driver);
+
+MODULE_DESCRIPTION("Renesas WDT Watchdog Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index f90812170..517a73317 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -275,9 +275,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
return rc;
}
- init_timer(&wdt->timer);
- wdt->timer.function = sh_wdt_ping;
- wdt->timer.data = (unsigned long)wdt;
+ setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt);
wdt->timer.expires = next_ping_period(clock_division_ratio);
dev_info(&pdev->dev, "initialized.\n");
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index c1658fe73..7c3ba58ae 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -104,7 +104,7 @@ static void watchdog_check_min_max_timeout(struct watchdog_device *wdd)
* timeout module parameter (if it is valid value) or the timeout-sec property
* (only if it is a valid value and the timeout_parm is out of bounds).
* If none of them are valid then we keep the old value (which should normally
- * be the default timeout value.
+ * be the default timeout value).
*
* A zero is returned on success and -EINVAL for failure.
*/
@@ -262,7 +262,7 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
ret = register_restart_handler(&wdd->restart_nb);
if (ret)
- pr_warn("watchog%d: Cannot register restart handler (%d)\n",
+ pr_warn("watchdog%d: Cannot register restart handler (%d)\n",
wdd->id, ret);
}
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 9b7a35c9e..030e91b38 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -8,6 +8,7 @@ nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_features.o := $(nostackp)
CFLAGS_efi.o += -fshort-wchar
+LDFLAGS += $(call ld-option, --no-wchar-size-warning)
dom0-$(CONFIG_PCI) += pci.o
dom0-$(CONFIG_USB_SUPPORT) += dbgp.o
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index be7e56a33..e9d213544 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -316,7 +316,6 @@ static const struct efi efi_xen __initconst = {
.get_next_high_mono_count = xen_efi_get_next_high_mono_count,
.reset_system = NULL, /* Functionality provided by Xen. */
.set_virtual_address_map = NULL, /* Not used under Xen. */
- .memmap = NULL, /* Not used under Xen. */
.flags = 0 /* Initialized later. */
};
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index dc495383a..67939578c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -748,7 +748,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
return rc;
}
-#define GNTDEV_COPY_BATCH 24
+#define GNTDEV_COPY_BATCH 16
struct gntdev_copy_batch {
struct gnttab_copy ops[GNTDEV_COPY_BATCH];
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 8e67336f8..6a25533da 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
err = conf_space_read(dev, cfg_entry, field_start,
&tmp_val);
if (err)
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
field_start = OFFSET(cfg_entry);
field_end = OFFSET(cfg_entry) + field->size;
- if ((req_start >= field_start && req_start < field_end)
- || (req_end > field_start && req_end <= field_end)) {
+ if (req_end > field_start && field_end > req_start) {
tmp_val = 0;
err = xen_pcibk_config_read(dev, field_start,
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index ad3d17d29..9ead1c2ff 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
/* A write to obtain the length must happen as a 32-bit write.
* This does not (yet) support writing individual bytes
*/
- if (value == ~PCI_ROM_ADDRESS_ENABLE)
+ if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
bar->which = 1;
else {
u32 tmpval;
@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev,
(PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64))) {
bar_info->val = res[pos - 1].start >> 32;
- bar_info->len_val = res[pos - 1].end >> 32;
+ bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
return;
}
}
+ if (!res[pos].flags ||
+ (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
+ IORESOURCE_BUSY)))
+ return;
+
bar_info->val = res[pos].start |
(res[pos].flags & PCI_REGION_FLAG_MASK);
- bar_info->len_val = resource_size(&res[pos]);
+ bar_info->len_val = -resource_size(&res[pos]) |
+ (res[pos].flags & PCI_REGION_FLAG_MASK);
}
static void *bar_init(struct pci_dev *dev, int offset)
{
- struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+ struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
if (!bar)
return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~0);
- bar->which = 0;
return bar;
}
static void *rom_init(struct pci_dev *dev, int offset)
{
- struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+ struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
if (!bar)
return ERR_PTR(-ENOMEM);
read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
- bar->which = 0;
return bar;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ff932624e..d6950e080 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1399,15 +1399,6 @@ static void scsiback_release_cmd(struct se_cmd *se_cmd)
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
-static int scsiback_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void scsiback_close_session(struct se_session *se_sess)
-{
-}
-
static u32 scsiback_sess_get_index(struct se_session *se_sess)
{
return 0;
@@ -1841,8 +1832,6 @@ static const struct target_core_fabric_ops scsiback_ops = {
.tpg_get_inst_index = scsiback_tpg_get_inst_index,
.check_stop_free = scsiback_check_stop_free,
.release_cmd = scsiback_release_cmd,
- .shutdown_session = scsiback_shutdown_session,
- .close_session = scsiback_close_session,
.sess_get_index = scsiback_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = scsiback_write_pending,
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index cacf30d14..7487971f9 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
rc = -ENOMEM;
goto out;
}
+ } else {
+ list_for_each_entry(trans, &u->transactions, list)
+ if (trans->handle.id == u->u.msg.tx_id)
+ break;
+ if (&trans->list == &u->transactions)
+ return -ESRCH;
}
reply = xenbus_dev_request_and_reply(&u->u.msg);
if (IS_ERR(reply)) {
- kfree(trans);
+ if (msg_type == XS_TRANSACTION_START)
+ kfree(trans);
rc = PTR_ERR(reply);
goto out;
}
@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
list_add(&trans->list, &u->transactions);
}
} else if (u->u.msg.type == XS_TRANSACTION_END) {
- list_for_each_entry(trans, &u->transactions, list)
- if (trans->handle.id == u->u.msg.tx_id)
- break;
- BUG_ON(&trans->list == &u->transactions);
list_del(&trans->list);
-
kfree(trans);
}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 374b12af8..22f7cd711 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -232,10 +232,10 @@ static void transaction_resume(void)
void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
{
void *ret;
- struct xsd_sockmsg req_msg = *msg;
+ enum xsd_sockmsg_type type = msg->type;
int err;
- if (req_msg.type == XS_TRANSACTION_START)
+ if (type == XS_TRANSACTION_START)
transaction_start();
mutex_lock(&xs_state.request_mutex);
@@ -249,12 +249,8 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
mutex_unlock(&xs_state.request_mutex);
- if (IS_ERR(ret))
- return ret;
-
if ((msg->type == XS_TRANSACTION_END) ||
- ((req_msg.type == XS_TRANSACTION_START) &&
- (msg->type == XS_ERROR)))
+ ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
transaction_end();
return ret;